summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig3
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/accessibility/Kconfig1
-rw-r--r--drivers/acpi/Kconfig28
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/ac.c1
-rw-r--r--drivers/acpi/acpi_apd.c5
-rw-r--r--drivers/acpi/acpi_configfs.c6
-rw-r--r--drivers/acpi/acpi_lpit.c162
-rw-r--r--drivers/acpi/acpi_lpss.c97
-rw-r--r--drivers/acpi/acpi_processor.c1
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/achware.h4
-rw-r--r--drivers/acpi/acpica/acinterp.h6
-rw-r--r--drivers/acpi/acpica/acutils.h33
-rw-r--r--drivers/acpi/acpica/dbconvert.c5
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c30
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c4
-rw-r--r--drivers/acpi/acpica/hwregs.c72
-rw-r--r--drivers/acpi/acpica/hwtimer.c10
-rw-r--r--drivers/acpi/acpica/hwxface.c118
-rw-r--r--drivers/acpi/acpica/nsconvert.c4
-rw-r--r--drivers/acpi/acpica/tbxface.c9
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c438
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c442
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/Makefile1
-rw-r--r--drivers/acpi/apei/apei-internal.h1
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c124
-rw-r--r--drivers/acpi/arm64/gtdt.c2
-rw-r--r--drivers/acpi/arm64/iort.c258
-rw-r--r--drivers/acpi/battery.h1
-rw-r--r--drivers/acpi/button.c32
-rw-r--r--drivers/acpi/cppc_acpi.c240
-rw-r--r--drivers/acpi/device_pm.c277
-rw-r--r--drivers/acpi/dock.c1
-rw-r--r--drivers/acpi/dptf/Kconfig1
-rw-r--r--drivers/acpi/ec.c18
-rw-r--r--drivers/acpi/event.c1
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/nfit/Kconfig1
-rw-r--r--drivers/acpi/nfit/core.c274
-rw-r--r--drivers/acpi/nfit/mce.c2
-rw-r--r--drivers/acpi/nfit/nfit.h37
-rw-r--r--drivers/acpi/osl.c42
-rw-r--r--drivers/acpi/pmic/intel_pmic.h1
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c137
-rw-r--r--drivers/acpi/pmic/tps68470_pmic.c455
-rw-r--r--drivers/acpi/proc.c1
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/processor_pdc.c1
-rw-r--r--drivers/acpi/reboot.c1
-rw-r--r--drivers/acpi/resource.c1
-rw-r--r--drivers/acpi/sbshc.h1
-rw-r--r--drivers/acpi/scan.c38
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/sysfs.c12
-rw-r--r--drivers/acpi/utils.c1
-rw-r--r--drivers/acpi/wakeup.c1
-rw-r--r--drivers/acpi/x86/utils.c18
-rw-r--r--drivers/amba/Kconfig1
-rw-r--r--drivers/amba/bus.c1
-rw-r--r--drivers/android/Kconfig1
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_ceva.c197
-rw-r--r--drivers/ata/ahci_imx.c2
-rw-r--r--drivers/ata/libahci.c11
-rw-r--r--drivers/ata/libahci_platform.c1
-rw-r--r--drivers/ata/libata-core.c13
-rw-r--r--drivers/ata/libata-eh.c18
-rw-r--r--drivers/ata/libata-scsi.c19
-rw-r--r--drivers/ata/libata-transport.h1
-rw-r--r--drivers/ata/libata-zpodd.c1
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/ata/pata_artop.c2
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/sata_dwc_460ex.c1
-rw-r--r--drivers/ata/sata_gemini.h1
-rw-r--r--drivers/ata/sata_mv.c8
-rw-r--r--drivers/ata/sata_rcar.c7
-rw-r--r--drivers/ata/sis.h1
-rw-r--r--drivers/atm/Kconfig1
-rw-r--r--drivers/atm/Makefile1
-rw-r--r--drivers/atm/ambassador.c9
-rw-r--r--drivers/atm/eni.h1
-rw-r--r--drivers/atm/firestream.c8
-rw-r--r--drivers/atm/fore200e.c8
-rw-r--r--drivers/atm/fore200e.h1
-rw-r--r--drivers/atm/horizon.c10
-rw-r--r--drivers/atm/idt77105.c14
-rw-r--r--drivers/atm/idt77105.h1
-rw-r--r--drivers/atm/idt77252.c27
-rw-r--r--drivers/atm/idt77252.h3
-rw-r--r--drivers/atm/idt77252_tables.h1
-rw-r--r--drivers/atm/iphase.c12
-rw-r--r--drivers/atm/lanai.c8
-rw-r--r--drivers/atm/midway.h1
-rw-r--r--drivers/atm/nicstar.c8
-rw-r--r--drivers/atm/nicstar.h1
-rw-r--r--drivers/atm/nicstarmac.c1
-rw-r--r--drivers/atm/solos-attrlist.c1
-rw-r--r--drivers/atm/suni.c10
-rw-r--r--drivers/atm/suni.h1
-rw-r--r--drivers/atm/tonga.h1
-rw-r--r--drivers/atm/uPD98401.h1
-rw-r--r--drivers/atm/uPD98402.h1
-rw-r--r--drivers/atm/zatm.h1
-rw-r--r--drivers/atm/zeprom.h1
-rw-r--r--drivers/auxdisplay/Kconfig2
-rw-r--r--drivers/auxdisplay/Makefile1
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c12
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/Kconfig1
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/arch_topology.c31
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/cpu.c11
-rw-r--r--drivers/base/dd.c18
-rw-r--r--drivers/base/devtmpfs.c1
-rw-r--r--drivers/base/dma-coherent.c1
-rw-r--r--drivers/base/memory.c1
-rw-r--r--drivers/base/node.c1
-rw-r--r--drivers/base/platform.c1
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/domain.c226
-rw-r--r--drivers/base/power/domain_governor.c86
-rw-r--r--drivers/base/power/generic_ops.c23
-rw-r--r--drivers/base/power/main.c61
-rw-r--r--drivers/base/power/power.h1
-rw-r--r--drivers/base/power/qos.c3
-rw-r--r--drivers/base/power/runtime.c42
-rw-r--r--drivers/base/power/sysfs.c28
-rw-r--r--drivers/base/power/wakeup.c11
-rw-r--r--drivers/base/property.c9
-rw-r--r--drivers/base/regmap/Kconfig5
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regmap-spi.c2
-rw-r--r--drivers/base/regmap/regmap-spmi.c4
-rw-r--r--drivers/base/regmap/regmap.c111
-rw-r--r--drivers/base/regmap/trace.h1
-rw-r--r--drivers/base/test/Kconfig1
-rw-r--r--drivers/base/test/test_async_driver_probe.c6
-rw-r--r--drivers/bcma/Kconfig19
-rw-r--r--drivers/bcma/Makefile1
-rw-r--r--drivers/bcma/bcma_private.h1
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/bcma/driver_mips.c12
-rw-r--r--drivers/bcma/scan.h1
-rw-r--r--drivers/block/DAC960.c9
-rw-r--r--drivers/block/DAC960.h2
-rw-r--r--drivers/block/Kconfig18
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c58
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/aoe/aoedev.c9
-rw-r--r--drivers/block/aoe/aoemain.c44
-rw-r--r--drivers/block/ataflop.c24
-rw-r--r--drivers/block/brd.c68
-rw-r--r--drivers/block/cryptoloop.c2
-rw-r--r--drivers/block/drbd/Kconfig1
-rw-r--r--drivers/block/drbd/Makefile1
-rw-r--r--drivers/block/drbd/drbd_debugfs.c1
-rw-r--r--drivers/block/drbd/drbd_debugfs.h1
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_interval.c1
-rw-r--r--drivers/block/drbd/drbd_interval.h1
-rw-r--r--drivers/block/drbd/drbd_main.c18
-rw-r--r--drivers/block/drbd/drbd_nla.c1
-rw-r--r--drivers/block/drbd/drbd_nla.h1
-rw-r--r--drivers/block/drbd/drbd_protocol.h1
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/drbd/drbd_state.h1
-rw-r--r--drivers/block/drbd/drbd_state_change.h1
-rw-r--r--drivers/block/drbd/drbd_strings.h1
-rw-r--r--drivers/block/drbd/drbd_worker.c8
-rw-r--r--drivers/block/floppy.c14
-rw-r--r--drivers/block/loop.c13
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/Kconfig1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/nbd.c26
-rw-r--r--drivers/block/null_blk.c14
-rw-r--r--drivers/block/paride/Kconfig2
-rw-r--r--drivers/block/paride/Makefile1
-rw-r--r--drivers/block/paride/mkd1
-rw-r--r--drivers/block/rbd.c69
-rw-r--r--drivers/block/rsxx/cregs.c7
-rw-r--r--drivers/block/rsxx/dma.c7
-rw-r--r--drivers/block/skd_main.c9
-rw-r--r--drivers/block/sunvdc.c9
-rw-r--r--drivers/block/swim3.c29
-rw-r--r--drivers/block/umem.c5
-rw-r--r--drivers/block/virtio_blk.c12
-rw-r--r--drivers/block/xsysace.c6
-rw-r--r--drivers/block/zram/Kconfig1
-rw-r--r--drivers/block/zram/zcomp.c6
-rw-r--r--drivers/block/zram/zram_drv.c18
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/bcm203x.c2
-rw-r--r--drivers/bluetooth/bluecard_cs.c7
-rw-r--r--drivers/bluetooth/bpa10x.c15
-rw-r--r--drivers/bluetooth/bt3c_cs.c2
-rw-r--r--drivers/bluetooth/btbcm.c106
-rw-r--r--drivers/bluetooth/btintel.c42
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c6
-rw-r--r--drivers/bluetooth/btqca.c6
-rw-r--r--drivers/bluetooth/btqcomsmd.c45
-rw-r--r--drivers/bluetooth/btrtl.c21
-rw-r--r--drivers/bluetooth/btusb.c232
-rw-r--r--drivers/bluetooth/hci_ath.c51
-rw-r--r--drivers/bluetooth/hci_bcm.c263
-rw-r--r--drivers/bluetooth/hci_bcsp.c10
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_h5.c10
-rw-r--r--drivers/bluetooth/hci_ldisc.c49
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_qca.c29
-rw-r--r--drivers/bluetooth/hci_serdev.c9
-rw-r--r--drivers/bluetooth/hci_uart.h2
-rw-r--r--drivers/bus/Kconfig16
-rw-r--r--drivers/bus/Makefile3
-rw-r--r--drivers/bus/arm-ccn.c1
-rw-r--r--drivers/bus/ti-sysc.c583
-rw-r--r--drivers/bus/ts-nbus.c375
-rw-r--r--drivers/cdrom/Makefile16
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/Kconfig1
-rw-r--r--drivers/char/agp/Makefile1
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/isoch.c1
-rw-r--r--drivers/char/applicom.h1
-rw-r--r--drivers/char/ds1302.c1
-rw-r--r--drivers/char/dtlk.c6
-rw-r--r--drivers/char/hangcheck-timer.c6
-rw-r--r--drivers/char/hw_random/Kconfig6
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/core.c53
-rw-r--r--drivers/char/hw_random/iproc-rng200.c1
-rw-r--r--drivers/char/hw_random/n2-asm.S1
-rw-r--r--drivers/char/hw_random/n2rng.h1
-rw-r--r--drivers/char/hw_random/pseries-rng.c2
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c7
-rw-r--r--drivers/char/hw_random/virtio-rng.c21
-rw-r--r--drivers/char/hw_random/xgene-rng.c8
-rw-r--r--drivers/char/ipmi/Kconfig35
-rw-r--r--drivers/char/ipmi/Makefile11
-rw-r--r--drivers/char/ipmi/bt-bmc.c13
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c77
-rw-r--r--drivers/char/ipmi/ipmi_dmi.h9
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1279
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c4
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c4
-rw-r--r--drivers/char/ipmi/ipmi_si.h49
-rw-r--r--drivers/char/ipmi/ipmi_si_hardcode.c146
-rw-r--r--drivers/char/ipmi/ipmi_si_hotmod.c242
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2128
-rw-r--r--drivers/char/ipmi/ipmi_si_mem_io.c144
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c58
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c166
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c593
-rw-r--r--drivers/char/ipmi/ipmi_si_port_io.c112
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h23
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c119
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c11
-rw-r--r--drivers/char/mem.c5
-rw-r--r--drivers/char/misc.c1
-rw-r--r--drivers/char/nwbutton.c6
-rw-r--r--drivers/char/nwbutton.h3
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c6
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c7
-rw-r--r--drivers/char/pcmcia/cm4040_cs.h1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c8
-rw-r--r--drivers/char/random.c5
-rw-r--r--drivers/char/rtc.c6
-rw-r--r--drivers/char/tlclk.c12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/st33zp24/Makefile1
-rw-r--r--drivers/char/tpm/tpm-dev-common.c13
-rw-r--r--drivers/char/tpm/tpm-dev.h1
-rw-r--r--drivers/char/tpm/tpm-sysfs.c87
-rw-r--r--drivers/char/tpm/tpm.h15
-rw-r--r--drivers/char/tpm/tpm2-cmd.c73
-rw-r--r--drivers/char/tpm/tpm2-space.c4
-rw-r--r--drivers/char/tpm/tpm_crb.c59
-rw-r--r--drivers/char/tpm/tpm_eventlog.h1
-rw-r--r--drivers/char/tpm/tpm_tis.c5
-rw-r--r--drivers/char/tpm/tpm_tis_core.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.h4
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c73
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-utmi.c95
-rw-r--r--drivers/clk/bcm/Kconfig9
-rw-r--r--drivers/clk/bcm/Makefile2
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c30
-rw-r--r--drivers/clk/bcm/clk-hr2.c27
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c7
-rw-r--r--drivers/clk/clk-cdce925.c2
-rw-r--r--drivers/clk/clk-gemini.c1
-rw-r--r--drivers/clk/clk-gpio.c84
-rw-r--r--drivers/clk/clk-hsdk-pll.c4
-rw-r--r--drivers/clk/clk-mux.c6
-rw-r--r--drivers/clk/clk-stm32h7.c4
-rw-r--r--drivers/clk/clk-tango4.c1
-rw-r--r--drivers/clk/clk-twl6040.c2
-rw-r--r--drivers/clk/clk-u300.c84
-rw-r--r--drivers/clk/clk-wm831x.c6
-rw-r--r--drivers/clk/clk-xgene.c20
-rw-r--r--drivers/clk/clk.c178
-rw-r--r--drivers/clk/h8300/clk-div.c1
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c1
-rw-r--r--drivers/clk/hisilicon/Makefile1
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3660.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c2
-rw-r--r--drivers/clk/hisilicon/clk-hix5hd2.c4
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c6
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c12
-rw-r--r--drivers/clk/imgtec/clk-boston.c2
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-busy.c4
-rw-r--r--drivers/clk/imx/clk-gate2.c2
-rw-r--r--drivers/clk/imx/clk-imx27.c1
-rw-r--r--drivers/clk/imx/clk-imx6q.c2
-rw-r--r--drivers/clk/imx/clk-imx6ul.c2
-rw-r--r--drivers/clk/imx/clk-imx7d.c11
-rw-r--r--drivers/clk/imx/clk-pllv1.c3
-rw-r--r--drivers/clk/imx/clk-pllv2.c3
-rw-r--r--drivers/clk/imx/clk.c1
-rw-r--r--drivers/clk/imx/clk.h1
-rw-r--r--drivers/clk/ingenic/cgu.c2
-rw-r--r--drivers/clk/ingenic/cgu.h2
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c2
-rw-r--r--drivers/clk/mediatek/Kconfig80
-rw-r--r--drivers/clk/mediatek/Makefile13
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2712-bdp.c102
-rw-r--r--drivers/clk/mediatek/clk-mt2712-img.c80
-rw-r--r--drivers/clk/mediatek/clk-mt2712-jpgdec.c76
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mfg.c75
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c170
-rw-r--r--drivers/clk/mediatek/clk-mt2712-vdec.c94
-rw-r--r--drivers/clk/mediatek/clk-mt2712-venc.c77
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c1435
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c195
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c156
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c169
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c780
-rw-r--r--drivers/clk/mediatek/clk-mtk.h3
-rw-r--r--drivers/clk/mediatek/clk-pll.c18
-rw-r--r--drivers/clk/meson/gxbb.c292
-rw-r--r--drivers/clk/meson/gxbb.h6
-rw-r--r--drivers/clk/mmp/Makefile1
-rw-r--r--drivers/clk/mmp/clk-apbc.c2
-rw-r--r--drivers/clk/mmp/clk-apmu.c2
-rw-r--r--drivers/clk/mmp/clk-frac.c6
-rw-r--r--drivers/clk/mmp/clk-gate.c4
-rw-r--r--drivers/clk/mmp/clk-mix.c27
-rw-r--r--drivers/clk/mmp/clk-mmp2.c6
-rw-r--r--drivers/clk/mmp/clk-pxa168.c6
-rw-r--r--drivers/clk/mmp/clk-pxa910.c8
-rw-r--r--drivers/clk/mmp/clk.c1
-rw-r--r--drivers/clk/mmp/clk.h1
-rw-r--r--drivers/clk/mmp/reset.c1
-rw-r--r--drivers/clk/mmp/reset.h1
-rw-r--r--drivers/clk/mvebu/Makefile1
-rw-r--r--drivers/clk/mvebu/dove-divider.c1
-rw-r--r--drivers/clk/mvebu/dove-divider.h1
-rw-r--r--drivers/clk/mxs/clk-div.c2
-rw-r--r--drivers/clk/mxs/clk-frac.c2
-rw-r--r--drivers/clk/pxa/clk-pxa.c4
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/clk-rcg.h3
-rw-r--r--drivers/clk/qcom/clk-rcg2.c79
-rw-r--r--drivers/clk/qcom/clk-rpm.c93
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c82
-rw-r--r--drivers/clk/qcom/common.c32
-rw-r--r--drivers/clk/renesas/Kconfig5
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-div6.c38
-rw-r--r--drivers/clk/renesas/clk-div6.h4
-rw-r--r--drivers/clk/renesas/clk-mstp.c5
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c1
-rw-r--r--drivers/clk/renesas/clk-rz.c2
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77970-cpg-mssr.c199
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.c7
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h6
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c79
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h3
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c105
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h4
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-cpu.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c12
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c2
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-cpu.c2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c76
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c111
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c179
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c18
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c5
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c409
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c12
-rw-r--r--drivers/clk/samsung/clk-pll.c11
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c16
-rw-r--r--drivers/clk/samsung/clk.c45
-rw-r--r--drivers/clk/samsung/clk.h80
-rw-r--r--drivers/clk/sirf/atlas6.h1
-rw-r--r--drivers/clk/sirf/clk-atlas6.c2
-rw-r--r--drivers/clk/sirf/clk-atlas7.c18
-rw-r--r--drivers/clk/sirf/clk-common.c92
-rw-r--r--drivers/clk/sirf/clk-prima2.c2
-rw-r--r--drivers/clk/sirf/prima2.h1
-rw-r--r--drivers/clk/socfpga/Makefile1
-rw-r--r--drivers/clk/spear/Makefile1
-rw-r--r--drivers/clk/spear/clk-aux-synth.c10
-rw-r--r--drivers/clk/spear/clk-frac-synth.c6
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c6
-rw-r--r--drivers/clk/spear/clk-vco-pll.c12
-rw-r--r--drivers/clk/spear/clk.h4
-rw-r--r--drivers/clk/spear/spear1310_clock.c2
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/st/clkgen.h1
-rw-r--r--drivers/clk/sunxi-ng/Makefile2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c28
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c27
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.h8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c38
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c21
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c56
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.h1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.h1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c25
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.h25
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c158
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.h80
-rw-r--r--drivers/clk/sunxi/Makefile1
-rw-r--r--drivers/clk/sunxi/clk-factors.c2
-rw-r--r--drivers/clk/sunxi/clk-factors.h1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-cpus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/clk/tegra/Makefile1
-rw-r--r--drivers/clk/tegra/clk-bpmp.c15
-rw-r--r--drivers/clk/tegra/clk-dfll.c10
-rw-r--r--drivers/clk/tegra/clk-dfll.h2
-rw-r--r--drivers/clk/tegra/clk-id.h2
-rw-r--r--drivers/clk/tegra/clk-periph.c8
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c24
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c4
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c12
-rw-r--r--drivers/clk/tegra/clk-tegra20.c13
-rw-r--r--drivers/clk/tegra/clk-tegra210.c51
-rw-r--r--drivers/clk/tegra/clk-tegra30.c23
-rw-r--r--drivers/clk/tegra/clk.h3
-rw-r--r--drivers/clk/ti/Makefile1
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c3
-rw-r--r--drivers/clk/ti/divider.c4
-rw-r--r--drivers/clk/ti/mux.c4
-rw-r--r--drivers/clk/uniphier/Makefile1
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c7
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c8
-rw-r--r--drivers/clk/ux500/Makefile1
-rw-r--r--drivers/clk/ux500/clk-prcc.c6
-rw-r--r--drivers/clk/ux500/clk-prcmu.c6
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c6
-rw-r--r--drivers/clk/versatile/clk-icst.c7
-rw-r--r--drivers/clk/versatile/clk-icst.h1
-rw-r--r--drivers/clocksource/Kconfig50
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c101
-rw-r--r--drivers/clocksource/bcm2835_timer.c2
-rw-r--r--drivers/clocksource/h8300_timer16.c1
-rw-r--r--drivers/clocksource/h8300_timer8.c1
-rw-r--r--drivers/clocksource/h8300_tpu.c1
-rw-r--r--drivers/clocksource/i8253.c1
-rw-r--r--drivers/clocksource/mips-gic-timer.c12
-rw-r--r--drivers/clocksource/owl-timer.c4
-rw-r--r--drivers/clocksource/rockchip_timer.c2
-rw-r--r--drivers/clocksource/sh_cmt.c76
-rw-r--r--drivers/clocksource/tango_xtal.c1
-rw-r--r--drivers/clocksource/tcb_clksrc.c1
-rw-r--r--drivers/clocksource/timer-fttmr010.c5
-rw-r--r--drivers/clocksource/timer-of.c19
-rw-r--r--drivers/clocksource/timer-of.h4
-rw-r--r--drivers/clocksource/timer-sp.h1
-rw-r--r--drivers/connector/cn_queue.c4
-rw-r--r--drivers/connector/connector.c2
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/arm_big_little.c16
-rw-r--r--drivers/cpufreq/arm_big_little.h4
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c3
-rw-r--r--drivers/cpufreq/cpufreq-dt.c12
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq_stats.c7
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c1
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c1
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c85
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c13
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h1
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c191
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/spear-cpufreq.c4
-rw-r--r--drivers/cpufreq/speedstep-lib.c2
-rw-r--r--drivers/cpufreq/ti-cpufreq.c6
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c2
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-arm.c153
-rw-r--r--drivers/cpuidle/cpuidle-cps.c2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c5
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c1
-rw-r--r--drivers/cpuidle/cpuidle.c14
-rw-r--r--drivers/cpuidle/cpuidle.h1
-rw-r--r--drivers/cpuidle/dt_idle_states.h1
-rw-r--r--drivers/cpuidle/governors/ladder.c7
-rw-r--r--drivers/crypto/Kconfig40
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c512
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c831
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h199
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h3
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.c85
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h99
-rw-r--r--drivers/crypto/atmel-aes-regs.h1
-rw-r--r--drivers/crypto/atmel-aes.c80
-rw-r--r--drivers/crypto/atmel-sha-regs.h1
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes-regs.h1
-rw-r--r--drivers/crypto/atmel-tdes.c23
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c6
-rw-r--r--drivers/crypto/bcm/cipher.c116
-rw-r--r--drivers/crypto/bcm/cipher.h3
-rw-r--r--drivers/crypto/bcm/util.c14
-rw-r--r--drivers/crypto/caam/Makefile1
-rw-r--r--drivers/crypto/caam/caamalg.c10
-rw-r--r--drivers/crypto/caam/caamalg_desc.h1
-rw-r--r--drivers/crypto/caam/caamalg_qi.c7
-rw-r--r--drivers/crypto/caam/caamhash.c12
-rw-r--r--drivers/crypto/caam/caampkc.h1
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.h1
-rw-r--r--drivers/crypto/caam/desc.h3
-rw-r--r--drivers/crypto/caam/desc_constr.h1
-rw-r--r--drivers/crypto/caam/error.c1
-rw-r--r--drivers/crypto/caam/error.h1
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/caam/jr.h1
-rw-r--r--drivers/crypto/caam/key_gen.c1
-rw-r--r--drivers/crypto/caam/key_gen.h1
-rw-r--r--drivers/crypto/caam/pdb.h1
-rw-r--r--drivers/crypto/caam/pkc_desc.c1
-rw-r--r--drivers/crypto/caam/qi.c1
-rw-r--r--drivers/crypto/caam/qi.h1
-rw-r--r--drivers/crypto/caam/regs.h1
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h1
-rw-r--r--drivers/crypto/cavium/nitrox/Makefile1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_algs.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_common.h1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_csr.h1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c1
-rw-r--r--drivers/crypto/cavium/zip/Makefile1
-rw-r--r--drivers/crypto/ccp/Makefile1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c9
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c8
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c7
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c5
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c1798
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h57
-rw-r--r--drivers/crypto/chelsio/chcr_core.c10
-rw-r--r--drivers/crypto/chelsio/chcr_core.h2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h121
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c6
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/marvell/cesa.c29
-rw-r--r--drivers/crypto/marvell/cesa.h28
-rw-r--r--drivers/crypto/marvell/cipher.c476
-rw-r--r--drivers/crypto/marvell/tdma.c5
-rw-r--r--drivers/crypto/mediatek/mtk-aes.c39
-rw-r--r--drivers/crypto/mv_cesa.c1216
-rw-r--r--drivers/crypto/mv_cesa.h150
-rw-r--r--drivers/crypto/n2_asm.S1
-rw-r--r--drivers/crypto/n2_core.c12
-rw-r--r--drivers/crypto/n2_core.h1
-rw-r--r--drivers/crypto/nx/Makefile1
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c169
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c2
-rw-r--r--drivers/crypto/nx/nx-842.c2
-rw-r--r--drivers/crypto/nx/nx-842.h1
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c9
-rw-r--r--drivers/crypto/nx/nx.c2
-rw-r--r--drivers/crypto/nx/nx.h1
-rw-r--r--drivers/crypto/nx/nx_csbcpb.h1
-rw-r--r--drivers/crypto/omap-aes-gcm.c11
-rw-r--r--drivers/crypto/omap-aes.c12
-rw-r--r--drivers/crypto/omap-des.c7
-rw-r--r--drivers/crypto/omap-sham.c7
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/crypto/picoxcell_crypto.c7
-rw-r--r--drivers/crypto/qat/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/Makefile1
-rw-r--r--drivers/crypto/qat/qat_common/adf_dev_mgr.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c18
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c15
-rw-r--r--drivers/crypto/qce/Makefile1
-rw-r--r--drivers/crypto/qce/ablkcipher.c5
-rw-r--r--drivers/crypto/qce/sha.c30
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h1
-rw-r--r--drivers/crypto/s5p-sss.c1596
-rw-r--r--drivers/crypto/stm32/stm32-hash.c20
-rw-r--r--drivers/crypto/talitos.c582
-rw-r--r--drivers/crypto/talitos.h7
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c1
-rw-r--r--drivers/crypto/virtio/Makefile1
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c2
-rw-r--r--drivers/crypto/vmx/Makefile1
-rw-r--r--drivers/crypto/vmx/aes_ctr.c33
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.h1
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl1
-rw-r--r--drivers/dax/Makefile1
-rw-r--r--drivers/dax/device.c3
-rw-r--r--drivers/dax/super.c14
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq.c139
-rw-r--r--drivers/devfreq/exynos-bus.c5
-rw-r--r--drivers/devfreq/governor_passive.c2
-rw-r--r--drivers/devfreq/governor_performance.c2
-rw-r--r--drivers/devfreq/governor_powersave.c2
-rw-r--r--drivers/devfreq/governor_simpleondemand.c2
-rw-r--r--drivers/devfreq/governor_userspace.c2
-rw-r--r--drivers/devfreq/rk3399_dmc.c2
-rw-r--r--drivers/dio/dio.c1
-rw-r--r--drivers/dma-buf/dma-fence.c1
-rw-r--r--drivers/dma-buf/sync_trace.h1
-rw-r--r--drivers/dma/Kconfig31
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/at_hdmac_regs.h2
-rw-r--r--drivers/dma/bcm-sba-raid.c117
-rw-r--r--drivers/dma/bestcomm/Makefile1
-rw-r--r--drivers/dma/coh901318.c6
-rw-r--r--drivers/dma/dma-axi-dmac.c75
-rw-r--r--drivers/dma/dmaengine.h1
-rw-r--r--drivers/dma/dmatest.c1
-rw-r--r--drivers/dma/dw/Makefile1
-rw-r--r--drivers/dma/edma.c5
-rw-r--r--drivers/dma/img-mdc-dma.c98
-rw-r--r--drivers/dma/imx-dma.c8
-rw-r--r--drivers/dma/imx-sdma.c14
-rw-r--r--drivers/dma/ioat/dma.c6
-rw-r--r--drivers/dma/ioat/dma.h3
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/nbpfaxi.c5
-rw-r--r--drivers/dma/omap-dma.c5
-rw-r--r--drivers/dma/pch_dma.c12
-rw-r--r--drivers/dma/pl330.c39
-rw-r--r--drivers/dma/qcom/Makefile1
-rw-r--r--drivers/dma/qcom/bam_dma.c169
-rw-r--r--drivers/dma/sa11x0-dma.c11
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sprd-dma.c988
-rw-r--r--drivers/dma/stm32-dmamux.c327
-rw-r--r--drivers/dma/stm32-mdma.c1682
-rw-r--r--drivers/dma/sun6i-dma.c257
-rw-r--r--drivers/dma/ti-dma-crossbar.c8
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c14
-rw-r--r--drivers/edac/altera_edac.c10
-rw-r--r--drivers/edac/amd64_edac.c5
-rw-r--r--drivers/edac/amd64_edac_dbg.c1
-rw-r--r--drivers/edac/amd64_edac_inj.c1
-rw-r--r--drivers/edac/edac_mc.c7
-rw-r--r--drivers/edac/edac_mc.h8
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/edac/edac_module.c3
-rw-r--r--drivers/edac/edac_module.h1
-rw-r--r--drivers/edac/ghes_edac.c137
-rw-r--r--drivers/edac/i7core_edac.c11
-rw-r--r--drivers/edac/mce_amd.h1
-rw-r--r--drivers/edac/pnd2_edac.c9
-rw-r--r--drivers/edac/sb_edac.c43
-rw-r--r--drivers/edac/skx_edac.c30
-rw-r--r--drivers/edac/thunderx_edac.c25
-rw-r--r--drivers/eisa/Makefile1
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-arizona.c2
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-gpio.c2
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c2
-rw-r--r--drivers/extcon/extcon-intel-int3496.c2
-rw-r--r--drivers/extcon/extcon-max14577.c4
-rw-r--r--drivers/extcon/extcon-max3355.c2
-rw-r--r--drivers/extcon/extcon-max77693.c2
-rw-r--r--drivers/extcon/extcon-max77843.c95
-rw-r--r--drivers/extcon/extcon-max8997.c2
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/extcon/extcon-rt8973a.c2
-rw-r--r--drivers/extcon/extcon-sm5502.c2
-rw-r--r--drivers/extcon/extcon-usb-gpio.c2
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c2
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/extcon/extcon.h3
-rw-r--r--drivers/firewire/Makefile1
-rw-r--r--drivers/firewire/core-transaction.c10
-rw-r--r--drivers/firewire/core.h1
-rw-r--r--drivers/firewire/nosy-user.h1
-rw-r--r--drivers/firewire/nosy.h1
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firewire/ohci.h1
-rw-r--r--drivers/firmware/Kconfig11
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_scpi.c216
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/libstub/Makefile7
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c7
-rw-r--r--drivers/firmware/efi/libstub/efistub.h1
-rw-r--r--drivers/firmware/efi/libstub/string.c1
-rw-r--r--drivers/firmware/efi/memmap.c1
-rw-r--r--drivers/firmware/efi/reboot.c1
-rw-r--r--drivers/firmware/efi/test/efi_test.h1
-rw-r--r--drivers/firmware/google/Makefile1
-rw-r--r--drivers/firmware/psci_checker.c5
-rw-r--r--drivers/firmware/qcom_scm-32.c31
-rw-r--r--drivers/firmware/qcom_scm-64.c71
-rw-r--r--drivers/firmware/qcom_scm.c182
-rw-r--r--drivers/firmware/qcom_scm.h13
-rw-r--r--drivers/firmware/qemu_fw_cfg.c8
-rw-r--r--drivers/firmware/tegra/Makefile4
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c444
-rw-r--r--drivers/firmware/tegra/bpmp.c31
-rw-r--r--drivers/firmware/tegra/ivc.c24
-rw-r--r--drivers/firmware/ti_sci.c2
-rw-r--r--drivers/fmc/Makefile1
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c2
-rw-r--r--drivers/fsi/fsi-core.c6
-rw-r--r--drivers/gpio/Kconfig41
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c2
-rw-r--r--drivers/gpio/gpio-104-idi-48.c2
-rw-r--r--drivers/gpio/gpio-104-idio-16.c2
-rw-r--r--drivers/gpio/gpio-adnp.c31
-rw-r--r--drivers/gpio/gpio-altera.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c13
-rw-r--r--drivers/gpio/gpio-ath79.c3
-rw-r--r--drivers/gpio/gpio-brcmstb.c422
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-dln2.c2
-rw-r--r--drivers/gpio/gpio-dwapb.c36
-rw-r--r--drivers/gpio/gpio-em.c4
-rw-r--r--drivers/gpio/gpio-etraxfs.c1
-rw-r--r--drivers/gpio/gpio-ftgpio010.c3
-rw-r--r--drivers/gpio/gpio-grgpio.c6
-rw-r--r--drivers/gpio/gpio-ingenic.c2
-rw-r--r--drivers/gpio/gpio-intel-mid.c2
-rw-r--r--drivers/gpio/gpio-loongson1.c7
-rw-r--r--drivers/gpio/gpio-lynxpoint.c2
-rw-r--r--drivers/gpio/gpio-max3191x.c492
-rw-r--r--drivers/gpio/gpio-max732x.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c12
-rw-r--r--drivers/gpio/gpio-merrifield.c2
-rw-r--r--drivers/gpio/gpio-mmio.c130
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c23
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c2
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c2
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c14
-rw-r--r--drivers/gpio/gpio-rcar.c28
-rw-r--r--drivers/gpio/gpio-reg.c4
-rw-r--r--drivers/gpio/gpio-stmpe.c10
-rw-r--r--drivers/gpio/gpio-tb10x.c3
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-tegra186.c620
-rw-r--r--drivers/gpio/gpio-thunderx.c13
-rw-r--r--drivers/gpio/gpio-tz1090.c4
-rw-r--r--drivers/gpio/gpio-uniphier.c508
-rw-r--r--drivers/gpio/gpio-vf610.c2
-rw-r--r--drivers/gpio/gpio-wcove.c2
-rw-r--r--drivers/gpio/gpio-ws16c48.c2
-rw-r--r--drivers/gpio/gpio-xgene-sb.c23
-rw-r--r--drivers/gpio/gpio-xlp.c2
-rw-r--r--drivers/gpio/gpio-zx.c2
-rw-r--r--drivers/gpio/gpio-zynq.c2
-rw-r--r--drivers/gpio/gpiolib-of.c4
-rw-r--r--drivers/gpio/gpiolib.c599
-rw-r--r--drivers/gpio/gpiolib.h6
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig45
-rw-r--r--drivers/gpu/drm/amd/display/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/TODO107
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4925
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h259
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c498
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c755
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h102
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c446
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c379
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c567
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c161
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c397
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/Makefile27
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c3871
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c1934
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2424
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c812
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c290
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h90
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c265
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c364
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c418
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c418
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c191
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/custom_float.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c3257
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c1899
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c1626
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1677
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c359
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2367
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c775
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2587
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c331
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2795
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c398
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c193
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h218
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h467
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c171
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h706
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h652
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c485
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h228
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c945
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c1383
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h145
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c827
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h137
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c620
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h218
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h631
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h238
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c1379
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c700
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h347
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c567
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h310
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c1119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c1617
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h733
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c1463
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c933
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c522
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2987
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c1052
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c738
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c555
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1327
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c1966
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h273
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c688
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c716
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c854
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c1283
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c1004
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c1174
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c834
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c481
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h1386
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c816
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c702
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c960
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h683
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2958
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h167
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c363
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c351
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h186
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c1466
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c1203
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h374
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h387
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h282
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dc_features.h559
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h557
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c6124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h598
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c1763
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c392
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c1905
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile58
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c178
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c408
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c192
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c408
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h150
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c591
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c232
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h144
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c162
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile78
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c571
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h119
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c470
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c570
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h210
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c128
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c875
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h120
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h113
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c244
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h80
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c601
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c485
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h122
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h166
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h136
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h283
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/custom_float.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h481
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h635
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h85
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h112
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h175
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h289
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h130
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h183
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h311
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h197
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h392
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h172
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile48
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c430
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c289
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c356
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c170
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h85
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h193
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h96
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h39
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h106
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_interface.h44
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h310
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h143
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h49
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h154
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h149
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h466
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed32_32.h129
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_interface.h92
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h105
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_types.h332
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h445
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h140
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h294
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h92
-rw-r--r--drivers/gpu/drm/amd/display/include/irq_service_interface.h51
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h170
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h188
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h166
-rw-r--r--drivers/gpu/drm/amd/display/include/set_mode_types.h107
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h95
-rw-r--r--drivers/gpu/drm/amd/display/include/vector.h150
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c1483
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h167
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile1
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h1
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c3
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c9
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h1
-rw-r--r--drivers/gpu/drm/armada/Makefile1
-rw-r--r--drivers/gpu/drm/armada/armada_trace.c1
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h1
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c1
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Makefile1
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h13
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c32
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c17
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c48
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c25
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c73
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c8
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/drm_trace.h1
-rw-r--r--drivers/gpu/drm/drm_vblank.c11
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c7
-rw-r--r--drivers/gpu/drm/etnaviv/state.xml.h1
-rw-r--r--drivers/gpu/drm/etnaviv/state_3d.xml.h1
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h1
-rw-r--r--drivers/gpu/drm/exynos/Makefile1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c8
-rw-r--r--drivers/gpu/drm/i2c/Makefile1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c7
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h1
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c1
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c19
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c13
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c51
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_object.h1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c11
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h1
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lib/drm_random.h1
-rw-r--r--drivers/gpu/drm/mediatek/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0002.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0046.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl006b.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506e.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507a.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507b.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507d.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507e.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826e.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl906f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl9097.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/event.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0001.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0002.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0003.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0004.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0005.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/notify.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/unpack.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/debug.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/enum.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/event.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/notify.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/option.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c23
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_common.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c3
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/pl111/Makefile1
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h4
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c99
-rw-r--r--drivers/gpu/drm/r128/r128_state.c6
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c1
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c6
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h1
-rw-r--r--drivers/gpu/drm/shmobile/Makefile1
-rw-r--r--drivers/gpu/drm/sti/Makefile1
-rw-r--r--drivers/gpu/drm/sun4i/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile1
-rw-r--r--drivers/gpu/drm/ttm/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c10
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c6
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c13
-rw-r--r--drivers/gpu/drm/virtio/Makefile1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile1
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/drm/zte/Makefile1
-rw-r--r--drivers/gpu/host1x/Makefile1
-rw-r--r--drivers/gpu/host1x/bus.c1
-rw-r--r--drivers/gpu/host1x/dev.c3
-rw-r--r--drivers/gpu/ipu-v3/Makefile1
-rw-r--r--drivers/hid/Kconfig5
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-alps.c520
-rw-r--r--drivers/hid/hid-appleir.c7
-rw-r--r--drivers/hid/hid-asus.c31
-rw-r--r--drivers/hid/hid-core.c8
-rw-r--r--drivers/hid/hid-cp2112.c10
-rw-r--r--drivers/hid/hid-hyperv.c2
-rw-r--r--drivers/hid/hid-ids.h10
-rw-r--r--drivers/hid/hid-input.c9
-rw-r--r--drivers/hid/hid-lg.c4
-rw-r--r--drivers/hid/hid-lg.h1
-rw-r--r--drivers/hid/hid-lg4ff.c4
-rw-r--r--drivers/hid/hid-lg4ff.h1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-magicmouse.c3
-rw-r--r--drivers/hid/hid-multitouch.c52
-rw-r--r--drivers/hid/hid-prodikeys.c7
-rw-r--r--drivers/hid/hid-rmi.c13
-rw-r--r--drivers/hid/hid-sony.c14
-rw-r--r--drivers/hid/hid-tmff.c2
-rw-r--r--drivers/hid/hid-wiimote-core.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c7
-rw-r--r--drivers/hid/intel-ish-hid/Makefile1
-rw-r--r--drivers/hid/usbhid/Makefile1
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_sys.c7
-rw-r--r--drivers/hid/wacom_wac.c44
-rw-r--r--drivers/hid/wacom_wac.h3
-rw-r--r--drivers/hsi/Makefile1
-rw-r--r--drivers/hsi/clients/hsi_char.c4
-rw-r--r--drivers/hsi/clients/ssi_protocol.c32
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c6
-rw-r--r--drivers/hv/Makefile5
-rw-r--r--drivers/hv/channel.c23
-rw-r--r--drivers/hv/channel_mgmt.c36
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv_trace.c4
-rw-r--r--drivers/hv/hv_trace.h327
-rw-r--r--drivers/hv/hyperv_vmbus.h4
-rw-r--r--drivers/hv/vmbus_drv.c211
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adt7x10.h1
-rw-r--r--drivers/hwmon/asc7621.c1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c8
-rw-r--r--drivers/hwmon/gpio-fan.c224
-rw-r--r--drivers/hwmon/k10temp.c108
-rw-r--r--drivers/hwmon/max1619.c10
-rw-r--r--drivers/hwmon/max6621.c593
-rw-r--r--drivers/hwmon/pmbus/Kconfig10
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/max31785.c116
-rw-r--r--drivers/hwmon/pmbus/pmbus.h6
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c25
-rw-r--r--drivers/hwmon/sht15.c175
-rw-r--r--drivers/hwmon/stts751.c18
-rw-r--r--drivers/hwmon/w83781d.c12
-rw-r--r--drivers/hwmon/w83791d.c15
-rw-r--r--drivers/hwmon/w83792d.c15
-rw-r--r--drivers/hwmon/w83793.c19
-rw-r--r--drivers/hwmon/xgene-hwmon.c39
-rw-r--r--drivers/hwspinlock/Kconfig2
-rw-r--r--drivers/hwspinlock/Makefile1
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c8
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c4
-rw-r--r--drivers/hwtracing/intel_th/Makefile1
-rw-r--r--drivers/hwtracing/stm/Makefile1
-rw-r--r--drivers/hwtracing/stm/policy.c10
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/busses/Kconfig5
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c12
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c51
-rw-r--r--drivers/i2c/busses/i2c-davinci.c69
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c71
-rw-r--r--drivers/i2c/busses/i2c-gpio.c212
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c104
-rw-r--r--drivers/i2c/busses/i2c-mpc.c4
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h1
-rw-r--r--drivers/i2c/busses/i2c-omap.c25
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c1
-rw-r--r--drivers/i2c/busses/i2c-parport.c1
-rw-r--r--drivers/i2c/busses/i2c-pnx.c8
-rw-r--r--drivers/i2c/busses/i2c-riic.c115
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c8
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c3
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c9
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c30
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c44
-rw-r--r--drivers/i2c/i2c-core-base.c34
-rw-r--r--drivers/i2c/i2c-core-smbus.c55
-rw-r--r--drivers/i2c/i2c-dev.c268
-rw-r--r--drivers/i2c/i2c-smbus.c81
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c95
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c9
-rw-r--r--drivers/ide/Kconfig2
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/ide-acpi.c1
-rw-r--r--drivers/ide/ide-atapi.c6
-rw-r--r--drivers/ide/ide-cd.c9
-rw-r--r--drivers/ide/ide-cd.h1
-rw-r--r--drivers/ide/ide-cd_ioctl.c1
-rw-r--r--drivers/ide/ide-cd_verbose.c1
-rw-r--r--drivers/ide/ide-devsets.c1
-rw-r--r--drivers/ide/ide-disk.c1
-rw-r--r--drivers/ide/ide-disk.h1
-rw-r--r--drivers/ide/ide-disk_ioctl.c1
-rw-r--r--drivers/ide/ide-disk_proc.c1
-rw-r--r--drivers/ide/ide-floppy.c1
-rw-r--r--drivers/ide/ide-floppy.h1
-rw-r--r--drivers/ide/ide-floppy_ioctl.c1
-rw-r--r--drivers/ide/ide-floppy_proc.c1
-rw-r--r--drivers/ide/ide-gd.h1
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-park.c1
-rw-r--r--drivers/ide/ide-pio-blacklist.c1
-rw-r--r--drivers/ide/ide-pm.c5
-rw-r--r--drivers/ide/ide-pnp.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/ide-sysfs.c1
-rw-r--r--drivers/ide/ide.c4
-rw-r--r--drivers/ide/qd65xx.h1
-rw-r--r--drivers/idle/intel_idle.c34
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/Kconfig15
-rw-r--r--drivers/iio/accel/Makefile3
-rw-r--r--drivers/iio/accel/adxl345_core.c1
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bma220_spi.c1
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c3
-rw-r--r--drivers/iio/accel/bmc150-accel.h1
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c423
-rw-r--r--drivers/iio/accel/da280.c1
-rw-r--r--drivers/iio/accel/da311.c1
-rw-r--r--drivers/iio/accel/dmard06.c1
-rw-r--r--drivers/iio/accel/dmard09.c1
-rw-r--r--drivers/iio/accel/dmard10.c1
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c1
-rw-r--r--drivers/iio/accel/kxcjk-1013.c356
-rw-r--r--drivers/iio/accel/kxsd9-i2c.c1
-rw-r--r--drivers/iio/accel/kxsd9.c1
-rw-r--r--drivers/iio/accel/kxsd9.h1
-rw-r--r--drivers/iio/accel/mc3230.c1
-rw-r--r--drivers/iio/accel/mma7455_core.c1
-rw-r--r--drivers/iio/accel/mma7660.c1
-rw-r--r--drivers/iio/accel/mma8452.c383
-rw-r--r--drivers/iio/accel/mma9551.c1
-rw-r--r--drivers/iio/accel/mma9553.c1
-rw-r--r--drivers/iio/accel/mxc4005.c2
-rw-r--r--drivers/iio/accel/mxc6255.c1
-rw-r--r--drivers/iio/accel/sca3000.c1
-rw-r--r--drivers/iio/accel/st_accel.h4
-rw-r--r--drivers/iio/accel/st_accel_core.c248
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c10
-rw-r--r--drivers/iio/accel/stk8312.c2
-rw-r--r--drivers/iio/accel/stk8ba50.c2
-rw-r--r--drivers/iio/adc/Kconfig7
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7266.c1
-rw-r--r--drivers/iio/adc/ad7291.c1
-rw-r--r--drivers/iio/adc/ad7298.c1
-rw-r--r--drivers/iio/adc/ad7476.c1
-rw-r--r--drivers/iio/adc/ad7766.c2
-rw-r--r--drivers/iio/adc/ad7791.c2
-rw-r--r--drivers/iio/adc/ad7793.c2
-rw-r--r--drivers/iio/adc/ad7887.c1
-rw-r--r--drivers/iio/adc/ad7923.c1
-rw-r--r--drivers/iio/adc/ad799x.c3
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c1
-rw-r--r--drivers/iio/adc/aspeed_adc.c1
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c2
-rw-r--r--drivers/iio/adc/at91_adc.c2
-rw-r--r--drivers/iio/adc/axp20x_adc.c2
-rw-r--r--drivers/iio/adc/axp288_adc.c1
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c1
-rw-r--r--drivers/iio/adc/berlin2-adc.c1
-rw-r--r--drivers/iio/adc/cc10001_adc.c1
-rw-r--r--drivers/iio/adc/cpcap-adc.c1
-rw-r--r--drivers/iio/adc/da9150-gpadc.c1
-rw-r--r--drivers/iio/adc/dln2-adc.c6
-rw-r--r--drivers/iio/adc/envelope-detector.c1
-rw-r--r--drivers/iio/adc/ep93xx_adc.c1
-rw-r--r--drivers/iio/adc/exynos_adc.c1
-rw-r--r--drivers/iio/adc/hi8435.c1
-rw-r--r--drivers/iio/adc/hx711.c1
-rw-r--r--drivers/iio/adc/imx7d_adc.c1
-rw-r--r--drivers/iio/adc/ina2xx-adc.c20
-rw-r--r--drivers/iio/adc/lp8788_adc.c1
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c1
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c1
-rw-r--r--drivers/iio/adc/ltc2471.c1
-rw-r--r--drivers/iio/adc/ltc2485.c1
-rw-r--r--drivers/iio/adc/ltc2497.c1
-rw-r--r--drivers/iio/adc/max1027.c2
-rw-r--r--drivers/iio/adc/max11100.c1
-rw-r--r--drivers/iio/adc/max1118.c1
-rw-r--r--drivers/iio/adc/max1363.c2
-rw-r--r--drivers/iio/adc/max9611.c2
-rw-r--r--drivers/iio/adc/mcp320x.c235
-rw-r--r--drivers/iio/adc/mcp3422.c1
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/adc/meson_saradc.c1
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c2
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c2
-rw-r--r--drivers/iio/adc/nau7802.c1
-rw-r--r--drivers/iio/adc/palmas_gpadc.c1
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c1
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c1
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c1
-rw-r--r--drivers/iio/adc/qcom-vadc-common.c1
-rw-r--r--drivers/iio/adc/qcom-vadc-common.h1
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c8
-rw-r--r--drivers/iio/adc/rockchip_saradc.c1
-rw-r--r--drivers/iio/adc/spear_adc.c1
-rw-r--r--drivers/iio/adc/stm32-adc-core.c13
-rw-r--r--drivers/iio/adc/stm32-adc.c2
-rw-r--r--drivers/iio/adc/stx104.c1
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c45
-rw-r--r--drivers/iio/adc/ti-adc081c.c1
-rw-r--r--drivers/iio/adc/ti-adc0832.c1
-rw-r--r--drivers/iio/adc/ti-adc084s021.c1
-rw-r--r--drivers/iio/adc/ti-adc108s102.c1
-rw-r--r--drivers/iio/adc/ti-adc12138.c3
-rw-r--r--drivers/iio/adc/ti-adc128s052.c1
-rw-r--r--drivers/iio/adc/ti-adc161s626.c1
-rw-r--r--drivers/iio/adc/ti-ads1015.c22
-rw-r--r--drivers/iio/adc/ti-ads7950.c1
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/adc/ti-tlc4541.c1
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c1
-rw-r--r--drivers/iio/adc/twl4030-madc.c1
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c1
-rw-r--r--drivers/iio/adc/vf610_adc.c1
-rw-r--r--drivers/iio/adc/viperboard_adc.c1
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c2
-rw-r--r--drivers/iio/amplifiers/ad8366.c1
-rw-r--r--drivers/iio/buffer/Makefile1
-rw-r--r--drivers/iio/chemical/ams-iaq-core.c1
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c2
-rw-r--r--drivers/iio/chemical/ccs811.c87
-rw-r--r--drivers/iio/chemical/vz89x.c1
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c1
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c13
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c6
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio_sensor.h1
-rw-r--r--drivers/iio/common/st_sensors/Makefile1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c59
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.h1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c16
-rw-r--r--drivers/iio/counter/104-quad-8.c1
-rw-r--r--drivers/iio/counter/stm32-lptimer-cnt.c1
-rw-r--r--drivers/iio/dac/Kconfig22
-rw-r--r--drivers/iio/dac/Makefile3
-rw-r--r--drivers/iio/dac/ad5064.c1
-rw-r--r--drivers/iio/dac/ad5360.c1
-rw-r--r--drivers/iio/dac/ad5380.c1
-rw-r--r--drivers/iio/dac/ad5421.c1
-rw-r--r--drivers/iio/dac/ad5446.c14
-rw-r--r--drivers/iio/dac/ad5449.c1
-rw-r--r--drivers/iio/dac/ad5504.c1
-rw-r--r--drivers/iio/dac/ad5592r-base.c1
-rw-r--r--drivers/iio/dac/ad5624r_spi.c1
-rw-r--r--drivers/iio/dac/ad5686.c1
-rw-r--r--drivers/iio/dac/ad5755.c1
-rw-r--r--drivers/iio/dac/ad5761.c1
-rw-r--r--drivers/iio/dac/ad5764.c1
-rw-r--r--drivers/iio/dac/ad5791.c1
-rw-r--r--drivers/iio/dac/ad7303.c1
-rw-r--r--drivers/iio/dac/ad8801.c1
-rw-r--r--drivers/iio/dac/cio-dac.c1
-rw-r--r--drivers/iio/dac/dpot-dac.c1
-rw-r--r--drivers/iio/dac/ds4424.c341
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c1
-rw-r--r--drivers/iio/dac/ltc2632.c1
-rw-r--r--drivers/iio/dac/m62332.c1
-rw-r--r--drivers/iio/dac/max517.c1
-rw-r--r--drivers/iio/dac/max5821.c1
-rw-r--r--drivers/iio/dac/mcp4725.c1
-rw-r--r--drivers/iio/dac/mcp4922.c1
-rw-r--r--drivers/iio/dac/stm32-dac.c1
-rw-r--r--drivers/iio/dac/ti-dac082s085.c368
-rw-r--r--drivers/iio/dac/vf610_dac.c1
-rw-r--r--drivers/iio/dummy/Kconfig2
-rw-r--r--drivers/iio/dummy/Makefile1
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c89
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.h1
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c3
-rw-r--r--drivers/iio/frequency/ad9523.c1
-rw-r--r--drivers/iio/frequency/adf4350.c1
-rw-r--r--drivers/iio/gyro/Makefile1
-rw-r--r--drivers/iio/gyro/adis16080.c1
-rw-r--r--drivers/iio/gyro/adis16130.c1
-rw-r--r--drivers/iio/gyro/adis16136.c1
-rw-r--r--drivers/iio/gyro/adis16260.c1
-rw-r--r--drivers/iio/gyro/adxrs450.c1
-rw-r--r--drivers/iio/gyro/bmg160.h1
-rw-r--r--drivers/iio/gyro/bmg160_core.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c1
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c1
-rw-r--r--drivers/iio/gyro/itg3200_core.c1
-rw-r--r--drivers/iio/gyro/mpu3050-core.c2
-rw-r--r--drivers/iio/gyro/mpu3050.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c47
-rw-r--r--drivers/iio/health/afe4403.c2
-rw-r--r--drivers/iio/health/afe4404.c2
-rw-r--r--drivers/iio/health/max30100.c1
-rw-r--r--drivers/iio/health/max30102.c1
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/am2315.c1
-rw-r--r--drivers/iio/humidity/dht11.c1
-rw-r--r--drivers/iio/humidity/hdc100x.c1
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c1
-rw-r--r--drivers/iio/humidity/hts221_buffer.c1
-rw-r--r--drivers/iio/humidity/hts221_core.c1
-rw-r--r--drivers/iio/humidity/htu21.c1
-rw-r--r--drivers/iio/humidity/si7005.c1
-rw-r--r--drivers/iio/humidity/si7020.c1
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis16400_buffer.c1
-rw-r--r--drivers/iio/imu/adis16400_core.c1
-rw-r--r--drivers/iio/imu/adis16480.c1
-rw-r--r--drivers/iio/imu/adis_trigger.c1
-rw-r--r--drivers/iio/imu/bmi160/bmi160.h1
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c1
-rw-r--r--drivers/iio/imu/inv_mpu6050/Makefile1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c13
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c1
-rw-r--r--drivers/iio/imu/kmx61.c3
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h32
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c72
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c98
-rw-r--r--drivers/iio/industrialio-configfs.c2
-rw-r--r--drivers/iio/industrialio-core.c35
-rw-r--r--drivers/iio/industrialio-sw-device.c6
-rw-r--r--drivers/iio/industrialio-sw-trigger.c6
-rw-r--r--drivers/iio/industrialio-trigger.c35
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/acpi-als.c1
-rw-r--r--drivers/iio/light/adjd_s311.c1
-rw-r--r--drivers/iio/light/al3320a.c1
-rw-r--r--drivers/iio/light/apds9300.c2
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/light/bh1750.c1
-rw-r--r--drivers/iio/light/bh1780.c1
-rw-r--r--drivers/iio/light/cm32181.c1
-rw-r--r--drivers/iio/light/cm3232.c1
-rw-r--r--drivers/iio/light/cm3323.c1
-rw-r--r--drivers/iio/light/cm3605.c1
-rw-r--r--drivers/iio/light/cm36651.c1
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c1
-rw-r--r--drivers/iio/light/gp2ap020a00f.c2
-rw-r--r--drivers/iio/light/hid-sensor-als.c1
-rw-r--r--drivers/iio/light/hid-sensor-prox.c1
-rw-r--r--drivers/iio/light/isl29018.c2
-rw-r--r--drivers/iio/light/isl29028.c1
-rw-r--r--drivers/iio/light/isl29125.c1
-rw-r--r--drivers/iio/light/jsa1212.c1
-rw-r--r--drivers/iio/light/lm3533-als.c1
-rw-r--r--drivers/iio/light/ltr501.c4
-rw-r--r--drivers/iio/light/max44000.c1
-rw-r--r--drivers/iio/light/opt3001.c1
-rw-r--r--drivers/iio/light/pa12203001.c1
-rw-r--r--drivers/iio/light/rpr0521.c2
-rw-r--r--drivers/iio/light/si1145.c3
-rw-r--r--drivers/iio/light/stk3310.c1
-rw-r--r--drivers/iio/light/tcs3414.c1
-rw-r--r--drivers/iio/light/tcs3472.c263
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/tsl2583.c1
-rw-r--r--drivers/iio/light/tsl4531.c1
-rw-r--r--drivers/iio/light/us5182d.c1
-rw-r--r--drivers/iio/light/vcnl4000.c1
-rw-r--r--drivers/iio/light/veml6070.c1
-rw-r--r--drivers/iio/light/vl6180.c127
-rw-r--r--drivers/iio/magnetometer/Makefile1
-rw-r--r--drivers/iio/magnetometer/ak8974.c1
-rw-r--r--drivers/iio/magnetometer/ak8975.c1
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c2
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.h1
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c1
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c1
-rw-r--r--drivers/iio/magnetometer/mag3110.c1
-rw-r--r--drivers/iio/magnetometer/mmc35240.c1
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c22
-rw-r--r--drivers/iio/multiplexer/iio-mux.c7
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c1
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c1
-rw-r--r--drivers/iio/potentiometer/Makefile1
-rw-r--r--drivers/iio/potentiometer/ds1803.c1
-rw-r--r--drivers/iio/potentiometer/max5481.c2
-rw-r--r--drivers/iio/potentiometer/max5487.c2
-rw-r--r--drivers/iio/potentiometer/mcp4131.c1
-rw-r--r--drivers/iio/potentiometer/mcp4531.c1
-rw-r--r--drivers/iio/potentiometer/tpl0102.c1
-rw-r--r--drivers/iio/potentiostat/lmp91000.c2
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/abp060mg.c1
-rw-r--r--drivers/iio/pressure/bmp280-core.c1
-rw-r--r--drivers/iio/pressure/bmp280-regmap.c1
-rw-r--r--drivers/iio/pressure/bmp280.h1
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c3
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c1
-rw-r--r--drivers/iio/pressure/hp03.c1
-rw-r--r--drivers/iio/pressure/hp206c.c1
-rw-r--r--drivers/iio/pressure/mpl115.c1
-rw-r--r--drivers/iio/pressure/mpl3115.c1
-rw-r--r--drivers/iio/pressure/ms5611_core.c1
-rw-r--r--drivers/iio/pressure/ms5637.c1
-rw-r--r--drivers/iio/pressure/st_pressure.h4
-rw-r--r--drivers/iio/pressure/st_pressure_core.c78
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c10
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c10
-rw-r--r--drivers/iio/pressure/t5403.c1
-rw-r--r--drivers/iio/pressure/zpa2326.c2
-rw-r--r--drivers/iio/proximity/Kconfig10
-rw-r--r--drivers/iio/proximity/Makefile2
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c1
-rw-r--r--drivers/iio/proximity/rfd77402.c352
-rw-r--r--drivers/iio/proximity/srf04.c1
-rw-r--r--drivers/iio/proximity/srf08.c2
-rw-r--r--drivers/iio/proximity/sx9500.c2
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c1
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c1
-rw-r--r--drivers/iio/temperature/mlx90614.c1
-rw-r--r--drivers/iio/temperature/tmp006.c1
-rw-r--r--drivers/iio/temperature/tmp007.c1
-rw-r--r--drivers/iio/temperature/tsys01.c1
-rw-r--r--drivers/iio/temperature/tsys02d.c1
-rw-r--r--drivers/iio/trigger/Makefile1
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c3
-rw-r--r--drivers/iio/trigger/iio-trig-interrupt.c1
-rw-r--r--drivers/iio/trigger/iio-trig-loop.c3
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c1
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c1
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c2
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/Makefile3
-rw-r--r--drivers/infiniband/core/addr.c29
-rw-r--r--drivers/infiniband/core/cm.c38
-rw-r--r--drivers/infiniband/core/cma.c19
-rw-r--r--drivers/infiniband/core/cma_configfs.c8
-rw-r--r--drivers/infiniband/core/iwcm.c3
-rw-r--r--drivers/infiniband/core/mad.c3
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/rw.c24
-rw-r--r--drivers/infiniband/core/security.c66
-rw-r--r--drivers/infiniband/core/sysfs.c16
-rw-r--r--drivers/infiniband/core/umem_odp.c72
-rw-r--r--drivers/infiniband/core/umem_rbtree.c109
-rw-r--r--drivers/infiniband/core/user_mad.c13
-rw-r--r--drivers/infiniband/core/uverbs.h36
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c189
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c13
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c23
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c13
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c20
-rw-r--r--drivers/infiniband/core/verbs.c52
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c78
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c19
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c39
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c18
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/Kconfig2
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c18
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c330
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c127
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c69
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h95
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c268
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c66
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c186
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h29
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h4
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile1
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h7
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c385
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h6
-rw-r--r--drivers/infiniband/hw/hfi1/common.h1
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c80
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c22
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c486
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c113
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h35
-rw-r--r--drivers/infiniband/hw/hfi1/init.c53
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c57
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c144
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h4
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c24
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c23
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h6
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c7
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c13
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c44
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h2
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c2
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c27
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h10
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h49
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h11
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c7
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c12
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c9
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c100
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h29
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c65
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h2
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c44
-rw-r--r--drivers/infiniband/hw/hns/Kconfig25
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c107
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c95
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h134
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c719
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c609
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c3296
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h1177
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c384
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c692
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c226
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig1
-rw-r--r--drivers/infiniband/hw/i40iw/Makefile1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c76
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c48
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c76
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h23
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c30
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c47
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c8
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx4/main.c23
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h19
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c284
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c26
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c38
-rw-r--r--drivers/infiniband/hw/mlx5/main.c57
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h18
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c10
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c149
-rw-r--r--drivers/infiniband/hw/mthca/Makefile1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c10
-rw-r--r--drivers/infiniband/hw/nes/nes.c33
-rw-r--r--drivers/infiniband/hw/nes/nes.h6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c14
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c9
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c24
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c20
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c15
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig2
-rw-r--r--drivers/infiniband/hw/qedr/Makefile2
-rw-r--r--drivers/infiniband/hw/qedr/main.c118
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h31
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h6
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c749
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.h49
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c (renamed from drivers/infiniband/hw/qedr/qedr_cm.c)31
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.h (renamed from drivers/infiniband/hw/qedr/qedr_cm.h)0
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c359
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h2
-rw-r--r--drivers/infiniband/hw/qib/Kconfig1
-rw-r--r--drivers/infiniband/hw/qib/Makefile1
-rw-r--r--drivers/infiniband/hw/qib/qib.h30
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c81
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c95
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c200
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c34
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c128
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c9
-rw-r--r--drivers/infiniband/hw/usnic/Makefile1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h25
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c25
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/Makefile2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h25
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h54
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c59
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c55
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c319
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h18
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig1
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c21
-rw-r--r--drivers/infiniband/sw/rxe/Makefile1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c11
-rw-r--r--drivers/infiniband/ulp/Makefile1
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c56
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c150
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c17
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c14
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c42
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h22
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h7
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c44
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c22
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c90
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h3
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c333
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h9
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/ff-memless.c8
-rw-r--r--drivers/input/gameport/Makefile1
-rw-r--r--drivers/input/gameport/gameport.c7
-rw-r--r--drivers/input/input.c9
-rw-r--r--drivers/input/joystick/Makefile1
-rw-r--r--drivers/input/joystick/db9.c6
-rw-r--r--drivers/input/joystick/gamecon.c9
-rw-r--r--drivers/input/joystick/sidewinder.c10
-rw-r--r--drivers/input/joystick/spaceball.c4
-rw-r--r--drivers/input/joystick/turbografx.c6
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/bf54x-keys.c7
-rw-r--r--drivers/input/keyboard/gpio_keys.c7
-rw-r--r--drivers/input/keyboard/imx_keypad.c8
-rw-r--r--drivers/input/keyboard/locomokbd.c7
-rw-r--r--drivers/input/keyboard/omap-keypad.c6
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c7
-rw-r--r--drivers/input/keyboard/tegra-kbc.c6
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/adxl34x.c2
-rw-r--r--drivers/input/misc/regulator-haptic.c2
-rw-r--r--drivers/input/misc/uinput.c305
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/input/mouse/byd.c10
-rw-r--r--drivers/input/mouse/byd.h1
-rw-r--r--drivers/input/mouse/cypress_ps2.h1
-rw-r--r--drivers/input/mouse/elan_i2c_core.c12
-rw-r--r--drivers/input/mouse/gpio_mouse.c206
-rw-r--r--drivers/input/mouse/hgpk.h1
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/vmmouse.c10
-rw-r--r--drivers/input/rmi4/Makefile1
-rw-r--r--drivers/input/rmi4/rmi_f54.c2
-rw-r--r--drivers/input/rmi4/rmi_smbus.c4
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/hil_mlc.c4
-rw-r--r--drivers/input/serio/hp_sdc.c5
-rw-r--r--drivers/input/serio/i8042-sparcio.h1
-rw-r--r--drivers/input/serio/ps2-gpio.c1
-rw-r--r--drivers/input/serio/sa1111ps2.c69
-rw-r--r--drivers/input/sparse-keymap.c1
-rw-r--r--drivers/input/tablet/Makefile1
-rw-r--r--drivers/input/touchscreen/Kconfig36
-rw-r--r--drivers/input/touchscreen/Makefile4
-rw-r--r--drivers/input/touchscreen/ad7877.c6
-rw-r--r--drivers/input/touchscreen/ad7879.c19
-rw-r--r--drivers/input/touchscreen/ar1021_i2c.c1
-rw-r--r--drivers/input/touchscreen/atmel-wm97xx.c10
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c59
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c173
-rw-r--r--drivers/input/touchscreen/elants_i2c.c19
-rw-r--r--drivers/input/touchscreen/exc3000.c223
-rw-r--r--drivers/input/touchscreen/goodix.c125
-rw-r--r--drivers/input/touchscreen/hideep.c1120
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c17
-rw-r--r--drivers/input/touchscreen/mxs-lradc-ts.c2
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c18
-rw-r--r--drivers/input/touchscreen/rohm_bu21023.c17
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c4
-rw-r--r--drivers/input/touchscreen/s6sy761.c559
-rw-r--r--drivers/input/touchscreen/st1232.c16
-rw-r--r--drivers/input/touchscreen/stmfts.c4
-rw-r--r--drivers/input/touchscreen/tsc2007_iio.c1
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c7
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h1
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c7
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c10
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c252
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/arm-smmu-v3.c214
-rw-r--r--drivers/iommu/arm-smmu.c31
-rw-r--r--drivers/iommu/dma-iommu.c24
-rw-r--r--drivers/iommu/dmar.c10
-rw-r--r--drivers/iommu/exynos-iommu.c23
-rw-r--r--drivers/iommu/intel-iommu.c28
-rw-r--r--drivers/iommu/intel-svm.c4
-rw-r--r--drivers/iommu/intel_irq_remapping.c44
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c7
-rw-r--r--drivers/iommu/io-pgtable-arm.c7
-rw-r--r--drivers/iommu/io-pgtable.h1
-rw-r--r--drivers/iommu/iommu-traces.c1
-rw-r--r--drivers/iommu/iova.c228
-rw-r--r--drivers/iommu/ipmmu-vmsa.c527
-rw-r--r--drivers/iommu/mtk_iommu.c7
-rw-r--r--drivers/iommu/mtk_iommu_v1.c2
-rw-r--r--drivers/iommu/omap-iommu.c375
-rw-r--r--drivers/iommu/omap-iommu.h30
-rw-r--r--drivers/iommu/qcom_iommu.c33
-rw-r--r--drivers/iommu/s390-iommu.c1
-rw-r--r--drivers/irqchip/Kconfig23
-rw-r--r--drivers/irqchip/Makefile7
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c4
-rw-r--r--drivers/irqchip/irq-bcm2836.c79
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c171
-rw-r--r--drivers/irqchip/irq-ftintc010.c1
-rw-r--r--drivers/irqchip/irq-gic-common.c5
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-realview.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c390
-rw-r--r--drivers/irqchip/irq-gic-v3.c69
-rw-r--r--drivers/irqchip/irq-gic-v4.c7
-rw-r--r--drivers/irqchip/irq-gic.c77
-rw-r--r--drivers/irqchip/irq-imgpdc.c3
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c1
-rw-r--r--drivers/irqchip/irq-meson-gpio.c419
-rw-r--r--drivers/irqchip/irq-metag-ext.c1
-rw-r--r--drivers/irqchip/irq-metag.c1
-rw-r--r--drivers/irqchip/irq-mips-gic.c226
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c1
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.h1
-rw-r--r--drivers/irqchip/irq-omap-intc.c16
-rw-r--r--drivers/irqchip/irq-ompic.c202
-rw-r--r--drivers/irqchip/irq-renesas-h8300h.c1
-rw-r--r--drivers/irqchip/irq-renesas-h8s.c1
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c9
-rw-r--r--drivers/irqchip/irq-s3c24xx.c4
-rw-r--r--drivers/irqchip/irq-sni-exiu.c227
-rw-r--r--drivers/irqchip/irq-stm32-exti.c206
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c1
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c2
-rw-r--r--drivers/isdn/Makefile1
-rw-r--r--drivers/isdn/capi/Makefile1
-rw-r--r--drivers/isdn/capi/capidrv.c6
-rw-r--r--drivers/isdn/capi/capilib.c1
-rw-r--r--drivers/isdn/divert/isdn_divert.c9
-rw-r--r--drivers/isdn/gigaset/Makefile1
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c44
-rw-r--r--drivers/isdn/gigaset/common.c7
-rw-r--r--drivers/isdn/hardware/avm/Makefile1
-rw-r--r--drivers/isdn/hardware/eicon/Makefile1
-rw-r--r--drivers/isdn/hardware/eicon/adapter.h1
-rw-r--r--drivers/isdn/hardware/eicon/debug.c1
-rw-r--r--drivers/isdn/hardware/eicon/diva.c1
-rw-r--r--drivers/isdn/hardware/eicon/diva.h1
-rw-r--r--drivers/isdn/hardware/eicon/diva_pci.h1
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c9
-rw-r--r--drivers/isdn/hardware/eicon/dqueue.h1
-rw-r--r--drivers/isdn/hardware/eicon/dsp_tst.h1
-rw-r--r--drivers/isdn/hardware/eicon/entity.h1
-rw-r--r--drivers/isdn/hardware/eicon/message.c70
-rw-r--r--drivers/isdn/hardware/eicon/os_4bri.c1
-rw-r--r--drivers/isdn/hardware/eicon/os_4bri.h1
-rw-r--r--drivers/isdn/hardware/eicon/os_bri.c1
-rw-r--r--drivers/isdn/hardware/eicon/os_bri.h1
-rw-r--r--drivers/isdn/hardware/eicon/os_pri.c1
-rw-r--r--drivers/isdn/hardware/eicon/os_pri.h1
-rw-r--r--drivers/isdn/hardware/eicon/um_idi.c1
-rw-r--r--drivers/isdn/hardware/eicon/um_idi.h1
-rw-r--r--drivers/isdn/hardware/eicon/um_xdi.h1
-rw-r--r--drivers/isdn/hardware/eicon/xdi_adapter.h1
-rw-r--r--drivers/isdn/hardware/eicon/xdi_msg.h1
-rw-r--r--drivers/isdn/hardware/mISDN/Makefile1
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi.h1
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi_8xx.h1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c20
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.h1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c7
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c10
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c9
-rw-r--r--drivers/isdn/hisax/Makefile1
-rw-r--r--drivers/isdn/hisax/amd7930_fn.c6
-rw-r--r--drivers/isdn/hisax/arcofi.c6
-rw-r--r--drivers/isdn/hisax/asuscom.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c2
-rw-r--r--drivers/isdn/hisax/diva.c11
-rw-r--r--drivers/isdn/hisax/elsa.c12
-rw-r--r--drivers/isdn/hisax/fsm.c7
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c6
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.h1
-rw-r--r--drivers/isdn/hisax/hfc_2bds0.c4
-rw-r--r--drivers/isdn/hisax/hfc_pci.c9
-rw-r--r--drivers/isdn/hisax/hfc_sx.c11
-rw-r--r--drivers/isdn/hisax/hfc_usb.c10
-rw-r--r--drivers/isdn/hisax/hfc_usb.h1
-rw-r--r--drivers/isdn/hisax/hfcscard.c7
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.h1
-rw-r--r--drivers/isdn/hisax/hisax_isac.h1
-rw-r--r--drivers/isdn/hisax/icc.c6
-rw-r--r--drivers/isdn/hisax/ipacx.c8
-rw-r--r--drivers/isdn/hisax/isac.c6
-rw-r--r--drivers/isdn/hisax/isar.c9
-rw-r--r--drivers/isdn/hisax/isdnl3.c6
-rw-r--r--drivers/isdn/hisax/isurf.c2
-rw-r--r--drivers/isdn/hisax/ix1_micro.c2
-rw-r--r--drivers/isdn/hisax/niccy.c2
-rw-r--r--drivers/isdn/hisax/saphir.c7
-rw-r--r--drivers/isdn/hisax/sedlbauer.c2
-rw-r--r--drivers/isdn/hisax/teleint.c5
-rw-r--r--drivers/isdn/hisax/teles3.c2
-rw-r--r--drivers/isdn/hisax/w6692.c7
-rw-r--r--drivers/isdn/i4l/Makefile1
-rw-r--r--drivers/isdn/i4l/isdn_common.c5
-rw-r--r--drivers/isdn/i4l/isdn_net.c9
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c9
-rw-r--r--drivers/isdn/i4l/isdn_tty.c7
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c33
-rw-r--r--drivers/isdn/mISDN/Makefile1
-rw-r--r--drivers/isdn/mISDN/dsp.h2
-rw-r--r--drivers/isdn/mISDN/dsp_core.c6
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.h1
-rw-r--r--drivers/isdn/mISDN/dsp_tones.c6
-rw-r--r--drivers/isdn/mISDN/fsm.c7
-rw-r--r--drivers/isdn/mISDN/l1oip.h1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c25
-rw-r--r--drivers/isdn/mISDN/timerdev.c6
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-core.c7
-rw-r--r--drivers/leds/leds-apu.c278
-rw-r--r--drivers/leds/leds-lp5523.c2
-rw-r--r--drivers/leds/leds-pca955x.c17
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/trigger/Kconfig9
-rw-r--r--drivers/leds/trigger/Makefile2
-rw-r--r--drivers/leds/trigger/ledtrig-activity.c275
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c16
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c12
-rw-r--r--drivers/lightnvm/Kconfig3
-rw-r--r--drivers/lightnvm/Makefile1
-rw-r--r--drivers/lightnvm/core.c176
-rw-r--r--drivers/lightnvm/pblk-cache.c24
-rw-r--r--drivers/lightnvm/pblk-core.c516
-rw-r--r--drivers/lightnvm/pblk-gc.c295
-rw-r--r--drivers/lightnvm/pblk-init.c199
-rw-r--r--drivers/lightnvm/pblk-map.c28
-rw-r--r--drivers/lightnvm/pblk-rb.c30
-rw-r--r--drivers/lightnvm/pblk-read.c274
-rw-r--r--drivers/lightnvm/pblk-recovery.c129
-rw-r--r--drivers/lightnvm/pblk-rl.c49
-rw-r--r--drivers/lightnvm/pblk-sysfs.c2
-rw-r--r--drivers/lightnvm/pblk-write.c229
-rw-r--r--drivers/lightnvm/pblk.h134
-rw-r--r--drivers/lightnvm/rrpc.c6
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/macintosh/adb-iop.c5
-rw-r--r--drivers/macintosh/adb.c1
-rw-r--r--drivers/macintosh/adbhid.c1
-rw-r--r--drivers/macintosh/ams/ams.h1
-rw-r--r--drivers/macintosh/ans-lcd.c1
-rw-r--r--drivers/macintosh/ans-lcd.h1
-rw-r--r--drivers/macintosh/macio-adb.c1
-rw-r--r--drivers/macintosh/macio_sysfs.c1
-rw-r--r--drivers/macintosh/smu.c10
-rw-r--r--drivers/macintosh/via-cuda.c1
-rw-r--r--drivers/macintosh/via-macii.c1
-rw-r--r--drivers/macintosh/via-pmu-backlight.c1
-rw-r--r--drivers/macintosh/via-pmu-event.h1
-rw-r--r--drivers/macintosh/via-pmu.c1
-rw-r--r--drivers/macintosh/via-pmu68k.c1
-rw-r--r--drivers/mailbox/Kconfig3
-rw-r--r--drivers/mailbox/Makefile1
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c66
-rw-r--r--drivers/mailbox/mailbox-altera.c12
-rw-r--r--drivers/mailbox/mailbox-test.c11
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/omap-mailbox.c6
-rw-r--r--drivers/mailbox/pcc.c5
-rw-r--r--drivers/mcb/Makefile1
-rw-r--r--drivers/mcb/mcb-internal.h1
-rw-r--r--drivers/md/Kconfig5
-rw-r--r--drivers/md/Makefile6
-rw-r--r--drivers/md/bcache/Makefile1
-rw-r--r--drivers/md/bcache/alloc.c16
-rw-r--r--drivers/md/bcache/bcache.h20
-rw-r--r--drivers/md/bcache/bset.c1
-rw-r--r--drivers/md/bcache/bset.h1
-rw-r--r--drivers/md/bcache/btree.c18
-rw-r--r--drivers/md/bcache/btree.h3
-rw-r--r--drivers/md/bcache/closure.h7
-rw-r--r--drivers/md/bcache/debug.c1
-rw-r--r--drivers/md/bcache/debug.h1
-rw-r--r--drivers/md/bcache/extents.c1
-rw-r--r--drivers/md/bcache/extents.h1
-rw-r--r--drivers/md/bcache/io.c1
-rw-r--r--drivers/md/bcache/journal.c1
-rw-r--r--drivers/md/bcache/journal.h1
-rw-r--r--drivers/md/bcache/movinggc.c1
-rw-r--r--drivers/md/bcache/request.c37
-rw-r--r--drivers/md/bcache/request.h1
-rw-r--r--drivers/md/bcache/stats.c9
-rw-r--r--drivers/md/bcache/stats.h1
-rw-r--r--drivers/md/bcache/super.c52
-rw-r--r--drivers/md/bcache/sysfs.c29
-rw-r--r--drivers/md/bcache/sysfs.h1
-rw-r--r--drivers/md/bcache/trace.c1
-rw-r--r--drivers/md/bcache/util.c10
-rw-r--r--drivers/md/bcache/util.h5
-rw-r--r--drivers/md/bcache/writeback.c118
-rw-r--r--drivers/md/bcache/writeback.h7
-rw-r--r--drivers/md/dm-bufio.c25
-rw-r--r--drivers/md/dm-builtin.c1
-rw-r--r--drivers/md/dm-cache-background-tracker.c18
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-cache-policy-smq.c42
-rw-r--r--drivers/md/dm-cache-target.c326
-rw-r--r--drivers/md/dm-core.h3
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-delay.c6
-rw-r--r--drivers/md/dm-era-target.c1
-rw-r--r--drivers/md/dm-integrity.c23
-rw-r--r--drivers/md/dm-kcopyd.c4
-rw-r--r--drivers/md/dm-log-writes.c175
-rw-r--r--drivers/md/dm-mpath.c44
-rw-r--r--drivers/md/dm-raid.c39
-rw-r--r--drivers/md/dm-raid1.c8
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-stats.c37
-rw-r--r--drivers/md/dm-stats.h1
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-table.c64
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c83
-rw-r--r--drivers/md/dm-verity.h5
-rw-r--r--drivers/md/dm-zoned-target.c13
-rw-r--r--drivers/md/dm.c60
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/md-bitmap.c (renamed from drivers/md/bitmap.c)29
-rw-r--r--drivers/md/md-bitmap.h (renamed from drivers/md/bitmap.h)1
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md-cluster.h1
-rw-r--r--drivers/md/md-faulty.c (renamed from drivers/md/faulty.c)0
-rw-r--r--drivers/md/md-linear.c (renamed from drivers/md/linear.c)2
-rw-r--r--drivers/md/md-linear.h (renamed from drivers/md/linear.h)1
-rw-r--r--drivers/md/md-multipath.c (renamed from drivers/md/multipath.c)4
-rw-r--r--drivers/md/md-multipath.h (renamed from drivers/md/multipath.h)1
-rw-r--r--drivers/md/md.c164
-rw-r--r--drivers/md/md.h20
-rw-r--r--drivers/md/persistent-data/Makefile1
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c3
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid0.h1
-rw-r--r--drivers/md/raid1-10.c1
-rw-r--r--drivers/md/raid1.c78
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c169
-rw-r--r--drivers/md/raid10.h7
-rw-r--r--drivers/md/raid5-cache.c44
-rw-r--r--drivers/md/raid5-log.h3
-rw-r--r--drivers/md/raid5-ppl.c6
-rw-r--r--drivers/md/raid5.c81
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/media/Makefile1
-rw-r--r--drivers/media/cec/Makefile1
-rw-r--r--drivers/media/cec/cec-adap.c18
-rw-r--r--drivers/media/cec/cec-api.c19
-rw-r--r--drivers/media/cec/cec-core.c9
-rw-r--r--drivers/media/cec/cec-pin-priv.h133
-rw-r--r--drivers/media/cec/cec-pin.c40
-rw-r--r--drivers/media/common/b2c2/Makefile1
-rw-r--r--drivers/media/common/b2c2/flexcop-common.h1
-rw-r--r--drivers/media/common/b2c2/flexcop-eeprom.c1
-rw-r--r--drivers/media/common/b2c2/flexcop-fe-tuner.c1
-rw-r--r--drivers/media/common/b2c2/flexcop-hw-filter.c1
-rw-r--r--drivers/media/common/b2c2/flexcop-i2c.c1
-rw-r--r--drivers/media/common/b2c2/flexcop-misc.c1
-rw-r--r--drivers/media/common/b2c2/flexcop-reg.h1
-rw-r--r--drivers/media/common/b2c2/flexcop-sram.c1
-rw-r--r--drivers/media/common/b2c2/flexcop.h1
-rw-r--r--drivers/media/common/b2c2/flexcop_ibi_value_be.h1
-rw-r--r--drivers/media/common/b2c2/flexcop_ibi_value_le.h1
-rw-r--r--drivers/media/common/btcx-risc.h1
-rw-r--r--drivers/media/common/cypress_firmware.c6
-rw-r--r--drivers/media/common/cypress_firmware.h1
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c6
-rw-r--r--drivers/media/common/saa7146/saa7146_i2c.c1
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c13
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c5
-rw-r--r--drivers/media/common/siano/Makefile1
-rw-r--r--drivers/media/common/siano/smscoreapi.c39
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c12
-rw-r--r--drivers/media/dvb-core/Makefile1
-rw-r--r--drivers/media/dvb-core/dmxdev.c8
-rw-r--r--drivers/media/dvb-core/dmxdev.h90
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_demux.c17
-rw-r--r--drivers/media/dvb-core/dvb_demux.h248
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c518
-rw-r--r--drivers/media/dvb-core/dvb_frontend.h117
-rw-r--r--drivers/media/dvb-core/dvb_net.h34
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c8
-rw-r--r--drivers/media/dvb-core/dvbdev.c32
-rw-r--r--drivers/media/dvb-core/dvbdev.h137
-rw-r--r--drivers/media/dvb-frontends/Kconfig6
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c7
-rw-r--r--drivers/media/dvb-frontends/cx24113.c10
-rw-r--r--drivers/media/dvb-frontends/cx24116.c22
-rw-r--r--drivers/media/dvb-frontends/dib7000m.h1
-rw-r--r--drivers/media/dvb-frontends/dib7000p.h1
-rw-r--r--drivers/media/dvb-frontends/dib8000.h1
-rw-r--r--drivers/media/dvb-frontends/dib9000.h1
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.h1
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c9
-rw-r--r--drivers/media/dvb-frontends/drxk.h1
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.h1
-rw-r--r--drivers/media/dvb-frontends/drxk_map.h1
-rw-r--r--drivers/media/dvb-frontends/ds3000.c22
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.h1
-rw-r--r--drivers/media/dvb-frontends/lg2160.c14
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c3
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c23
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c2
-rw-r--r--drivers/media/dvb-frontends/nxt6000_priv.h1
-rw-r--r--drivers/media/dvb-frontends/si2168.c1
-rw-r--r--drivers/media/dvb-frontends/si21xx.h1
-rw-r--r--drivers/media/dvb-frontends/sp2.c9
-rw-r--r--drivers/media/dvb-frontends/sp887x.h1
-rw-r--r--drivers/media/dvb-frontends/stv0288.c7
-rw-r--r--drivers/media/dvb-frontends/stv6110.c9
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.h1
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd_maps.h1
-rw-r--r--drivers/media/i2c/Kconfig16
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/adv7180.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c7
-rw-r--r--drivers/media/i2c/adv7604.c10
-rw-r--r--drivers/media/i2c/adv7842.c6
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c2
-rw-r--r--drivers/media/i2c/dw9714.c7
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c2
-rw-r--r--drivers/media/i2c/imx274.c1811
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c1
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/msp3400-driver.h1
-rw-r--r--drivers/media/i2c/mt9m111.c2
-rw-r--r--drivers/media/i2c/ov13858.c61
-rw-r--r--drivers/media/i2c/ov2640.c17
-rw-r--r--drivers/media/i2c/ov5640.c2
-rw-r--r--drivers/media/i2c/ov5647.c51
-rw-r--r--drivers/media/i2c/ov5670.c37
-rw-r--r--drivers/media/i2c/ov6650.c5
-rw-r--r--drivers/media/i2c/ov7670.c129
-rw-r--r--drivers/media/i2c/ov9650.c1
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c149
-rw-r--r--drivers/media/i2c/smiapp/smiapp-regs.c3
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h1
-rw-r--r--drivers/media/i2c/soc_camera/Makefile1
-rw-r--r--drivers/media/i2c/soc_camera/ov9640.c11
-rw-r--r--drivers/media/i2c/soc_camera/ov9740.c6
-rw-r--r--drivers/media/i2c/tc358743.c220
-rw-r--r--drivers/media/i2c/tc358743_regs.h94
-rw-r--r--drivers/media/i2c/tea6415c.h1
-rw-r--r--drivers/media/i2c/tea6420.h1
-rw-r--r--drivers/media/i2c/tvaudio.c8
-rw-r--r--drivers/media/media-entity.c13
-rw-r--r--drivers/media/mmc/Makefile1
-rw-r--r--drivers/media/pci/Makefile1
-rw-r--r--drivers/media/pci/b2c2/Kconfig4
-rw-r--r--drivers/media/pci/b2c2/Makefile1
-rw-r--r--drivers/media/pci/b2c2/flexcop-dma.c1
-rw-r--r--drivers/media/pci/bt8xx/Makefile1
-rw-r--r--drivers/media/pci/bt8xx/btcx-risc.h1
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c19
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv.h1
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h3
-rw-r--r--drivers/media/pci/bt8xx/dst_priv.h1
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c5
-rw-r--r--drivers/media/pci/cx18/Makefile1
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c28
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx23885/Makefile1
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-f300.h1
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-vbi.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx25821/Makefile1
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c2
-rw-r--r--drivers/media/pci/cx88/Makefile1
-rw-r--r--drivers/media/pci/cx88/cx88-input.c4
-rw-r--r--drivers/media/pci/cx88/cx88-vbi.c1
-rw-r--r--drivers/media/pci/ddbridge/Makefile1
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-io.h4
-rw-r--r--drivers/media/pci/ivtv/Makefile1
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/pci/mantis/Makefile1
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c9
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c8
-rw-r--r--drivers/media/pci/meye/meye.c20
-rw-r--r--drivers/media/pci/netup_unidvb/Kconfig12
-rw-r--r--drivers/media/pci/netup_unidvb/Makefile1
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c7
-rw-r--r--drivers/media/pci/ngene/Makefile1
-rw-r--r--drivers/media/pci/pt3/Makefile1
-rw-r--r--drivers/media/pci/saa7134/Makefile1
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c6
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c9
-rw-r--r--drivers/media/pci/saa7134/saa7134-reg.h1
-rw-r--r--drivers/media/pci/saa7134/saa7134-ts.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-vbi.c5
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134.h4
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c7
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c10
-rw-r--r--drivers/media/pci/saa7164/Makefile1
-rw-r--r--drivers/media/pci/saa7164/saa7164-buffer.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-i2c.c2
-rw-r--r--drivers/media/pci/smipcie/Makefile1
-rw-r--r--drivers/media/pci/ttpci/Makefile1
-rw-r--r--drivers/media/pci/ttpci/av7110.c8
-rw-r--r--drivers/media/pci/ttpci/av7110.h3
-rw-r--r--drivers/media/pci/ttpci/av7110_av.h1
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.h1
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.h1
-rw-r--r--drivers/media/pci/ttpci/av7110_ipack.c1
-rw-r--r--drivers/media/pci/ttpci/av7110_ipack.h1
-rw-r--r--drivers/media/pci/ttpci/av7110_ir.c56
-rw-r--r--drivers/media/pci/ttpci/budget-core.c2
-rw-r--r--drivers/media/pci/ttpci/budget.h1
-rw-r--r--drivers/media/pci/ttpci/dvb_filter.c1
-rw-r--r--drivers/media/pci/tw5864/tw5864-util.c1
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c11
-rw-r--r--drivers/media/pci/tw686x/tw686x-regs.h1
-rw-r--r--drivers/media/pci/zoran/Makefile1
-rw-r--r--drivers/media/pci/zoran/zoran_card.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/platform/Kconfig36
-rw-r--r--drivers/media/platform/Makefile7
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c8
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h2
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c652
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c24
-rw-r--r--drivers/media/platform/blackfin/ppi.c1
-rw-r--r--drivers/media/platform/cec-gpio/Makefile1
-rw-r--r--drivers/media/platform/cec-gpio/cec-gpio.c239
-rw-r--r--drivers/media/platform/coda/coda-bit.c4
-rw-r--r--drivers/media/platform/coda/trace.h1
-rw-r--r--drivers/media/platform/davinci/Makefile1
-rw-r--r--drivers/media/platform/davinci/ccdc_hw_device.h4
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c2
-rw-r--r--drivers/media/platform/davinci/isif.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c37
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c6
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c8
-rw-r--r--drivers/media/platform/davinci/vpif_display.c8
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c127
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/Makefile1
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c8
-rw-r--r--drivers/media/platform/fsl-viu.c7
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c1
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h1
-rw-r--r--drivers/media/platform/mtk-mdp/Makefile1
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile1
-rw-r--r--drivers/media/platform/omap/omap_vout.c3
-rw-r--r--drivers/media/platform/omap3isp/Makefile1
-rw-r--r--drivers/media/platform/omap3isp/isp.c133
-rw-r--r--drivers/media/platform/omap3isp/isp.h5
-rw-r--r--drivers/media/platform/pxa_camera.c8
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c3
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-video.c1
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss.c8
-rw-r--r--drivers/media/platform/qcom/venus/Makefile1
-rw-r--r--drivers/media/platform/qcom/venus/core.h2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c9
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c12
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c34
-rw-r--r--drivers/media/platform/qcom/venus/venc.c7
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c117
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c10
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c14
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h4
-rw-r--r--drivers/media/platform/rcar_drif.c12
-rw-r--r--drivers/media/platform/rockchip/rga/Makefile3
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c154
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c421
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.h437
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c1010
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h125
-rw-r--r--drivers/media/platform/s5p-mfc/Makefile1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c26
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c14
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Makefile1
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c7
-rw-r--r--drivers/media/platform/sti/hva/hva-h264.c5
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c10
-rw-r--r--drivers/media/platform/tegra-cec/Makefile1
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c495
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.h127
-rw-r--r--drivers/media/platform/ti-vpe/Makefile1
-rw-r--r--drivers/media/platform/ti-vpe/cal.c8
-rw-r--r--drivers/media/platform/via-camera.h1
-rw-r--r--drivers/media/platform/vim2m.c6
-rw-r--r--drivers/media/platform/vimc/Makefile1
-rw-r--r--drivers/media/platform/vimc/vimc-core.c5
-rw-r--r--drivers/media/platform/vivid/Makefile1
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c16
-rw-r--r--drivers/media/platform/vsp1/Makefile1
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c8
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/lm7000.h1
-rw-r--r--drivers/media/radio/radio-cadet.c7
-rw-r--r--drivers/media/radio/radio-raremono.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h2
-rw-r--r--drivers/media/radio/wl128x/Kconfig10
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c7
-rw-r--r--drivers/media/rc/Kconfig16
-rw-r--r--drivers/media/rc/Makefile2
-rw-r--r--drivers/media/rc/ati_remote.c2
-rw-r--r--drivers/media/rc/ene_ir.c7
-rw-r--r--drivers/media/rc/gpio-ir-recv.c192
-rw-r--r--drivers/media/rc/igorplugusb.c8
-rw-r--r--drivers/media/rc/img-ir/Makefile1
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c5
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c13
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c6
-rw-r--r--drivers/media/rc/imon.c30
-rw-r--r--drivers/media/rc/ir-lirc-codec.c65
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c7
-rw-r--r--drivers/media/rc/ir-nec-decoder.c29
-rw-r--r--drivers/media/rc/keymaps/Makefile5
-rw-r--r--drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c70
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c3
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-poplar.c69
-rw-r--r--drivers/media/rc/keymaps/rc-hisi-tv-demo.c81
-rw-r--r--drivers/media/rc/keymaps/rc-tango.c92
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan1027.c2
-rw-r--r--drivers/media/rc/lirc_dev.c515
-rw-r--r--drivers/media/rc/mceusb.c20
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-ir-raw.c8
-rw-r--r--drivers/media/rc/rc-main.c79
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/rc/serial_ir.c5
-rw-r--r--drivers/media/rc/sir_ir.c4
-rw-r--r--drivers/media/rc/streamzap.c2
-rw-r--r--drivers/media/rc/tango-ir.c281
-rw-r--r--drivers/media/tuners/Makefile1
-rw-r--r--drivers/media/tuners/fc0011.h1
-rw-r--r--drivers/media/tuners/mt2063.h1
-rw-r--r--drivers/media/usb/Makefile1
-rw-r--r--drivers/media/usb/as102/Makefile1
-rw-r--r--drivers/media/usb/as102/as102_fw.c28
-rw-r--r--drivers/media/usb/au0828/Makefile1
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c8
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c2
-rw-r--r--drivers/media/usb/au0828/au0828-vbi.c2
-rw-r--r--drivers/media/usb/au0828/au0828-video.c18
-rw-r--r--drivers/media/usb/au0828/au0828.h2
-rw-r--r--drivers/media/usb/b2c2/Kconfig6
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.h1
-rw-r--r--drivers/media/usb/cx231xx/Makefile1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-vbi.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/Makefile1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/usb_urb.c1
-rw-r--r--drivers/media/usb/dvb-usb/Makefile1
-rw-r--r--drivers/media/usb/dvb-usb/a800.c65
-rw-r--r--drivers/media/usb/dvb-usb/af9005-script.h1
-rw-r--r--drivers/media/usb/dvb-usb/az6027.h1
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.h1
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c24
-rw-r--r--drivers/media/usb/dvb-usb/dib07x0.h1
-rw-r--r--drivers/media/usb/dvb-usb/digitv.h1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-common.h1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-dvb.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-firmware.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-i2c.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c4
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-urb.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.h1
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c24
-rw-r--r--drivers/media/usb/dvb-usb/m920x.h1
-rw-r--r--drivers/media/usb/dvb-usb/usb-urb.c1
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.h1
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c88
-rw-r--r--drivers/media/usb/em28xx/Makefile1
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h1
-rw-r--r--drivers/media/usb/em28xx/em28xx-v4l.h2
-rw-r--r--drivers/media/usb/em28xx/em28xx-vbi.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c4
-rw-r--r--drivers/media/usb/go7007/Makefile1
-rw-r--r--drivers/media/usb/gspca/Kconfig16
-rw-r--r--drivers/media/usb/gspca/Makefile1
-rw-r--r--drivers/media/usb/gspca/gl860/Makefile1
-rw-r--r--drivers/media/usb/gspca/gspca.c1
-rw-r--r--drivers/media/usb/gspca/gspca.h1
-rw-r--r--drivers/media/usb/gspca/m5602/Makefile1
-rw-r--r--drivers/media/usb/gspca/ov519.c22
-rw-r--r--drivers/media/usb/gspca/stv06xx/Makefile1
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig1
-rw-r--r--drivers/media/usb/pvrusb2/Makefile1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-dvb.h1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c64
-rw-r--r--drivers/media/usb/pwc/pwc-if.c3
-rw-r--r--drivers/media/usb/pwc/pwc-nala.h1
-rw-r--r--drivers/media/usb/s2255/s2255drv.c7
-rw-r--r--drivers/media/usb/stk1160/Makefile1
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c4
-rw-r--r--drivers/media/usb/tm6000/Makefile1
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c27
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c15
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c21
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c4
-rw-r--r--drivers/media/usb/usbvision/usbvision-cards.h1
-rw-r--r--drivers/media/usb/uvc/Makefile1
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c4
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h1
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c32
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c516
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c22
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c702
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-trace.c1
-rw-r--r--drivers/media/v4l2-core/vb2-trace.c1
-rw-r--r--drivers/memory/Makefile2
-rw-r--r--drivers/memory/brcmstb_dpfe.c722
-rw-r--r--drivers/memory/omap-gpmc.c54
-rw-r--r--drivers/memory/tegra/Makefile1
-rw-r--r--drivers/memstick/core/ms_block.c7
-rw-r--r--drivers/memstick/host/Makefile1
-rw-r--r--drivers/memstick/host/jmb38x_ms.c10
-rw-r--r--drivers/memstick/host/r592.c7
-rw-r--r--drivers/memstick/host/tifm_ms.c6
-rw-r--r--drivers/message/fusion/Makefile1
-rw-r--r--drivers/message/fusion/lsi/mpi.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_fc.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_init.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_lan.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_log_fc.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_log_sas.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_raid.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_sas.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_targ.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_tool.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_type.h1
-rw-r--r--drivers/message/fusion/mptbase.c4
-rw-r--r--drivers/message/fusion/mptdebug.h1
-rw-r--r--drivers/mfd/Kconfig50
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/arizona-core.c132
-rw-r--r--drivers/mfd/axp20x.c2
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c17
-rw-r--r--drivers/mfd/intel-lpss.h7
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c184
-rw-r--r--drivers/mfd/lpc_ich.c1
-rw-r--r--drivers/mfd/max77693.c5
-rw-r--r--drivers/mfd/mxs-lradc.c6
-rw-r--r--drivers/mfd/rts5249.c155
-rw-r--r--drivers/mfd/rtsx_pcr.c142
-rw-r--r--drivers/mfd/rtsx_pcr.h14
-rw-r--r--drivers/mfd/rtsx_usb.c6
-rw-r--r--drivers/mfd/sm501.c49
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c259
-rw-r--r--drivers/mfd/ssbi.c2
-rw-r--r--drivers/mfd/stw481x.c10
-rw-r--r--drivers/mfd/tps65217.c28
-rw-r--r--drivers/mfd/tps65218.c8
-rw-r--r--drivers/mfd/twl-core.h1
-rw-r--r--drivers/mfd/wm97xx-core.c366
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/altera-stapl/Kconfig3
-rw-r--r--drivers/misc/c2port/core.c2
-rw-r--r--drivers/misc/cxl/Makefile1
-rw-r--r--drivers/misc/cxl/api.c16
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h22
-rw-r--r--drivers/misc/cxl/debugfs.c29
-rw-r--r--drivers/misc/cxl/fault.c15
-rw-r--r--drivers/misc/cxl/file.c24
-rw-r--r--drivers/misc/cxl/flash.c1
-rw-r--r--drivers/misc/cxl/native.c27
-rw-r--r--drivers/misc/cxl/pci.c88
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at24.c112
-rw-r--r--drivers/misc/genwqe/card_base.h7
-rw-r--r--drivers/misc/genwqe/card_dev.c6
-rw-r--r--drivers/misc/genwqe/card_utils.c43
-rw-r--r--drivers/misc/ibmasm/Makefile1
-rw-r--r--drivers/misc/ibmasm/event.c2
-rw-r--r--drivers/misc/ibmasm/module.c6
-rw-r--r--drivers/misc/kgdbts.c3
-rw-r--r--drivers/misc/lkdtm.h1
-rw-r--r--drivers/misc/lkdtm_bugs.c5
-rw-r--r--drivers/misc/lkdtm_core.c172
-rw-r--r--drivers/misc/lkdtm_heap.c1
-rw-r--r--drivers/misc/lkdtm_perms.c1
-rw-r--r--drivers/misc/lkdtm_rodata.c1
-rw-r--r--drivers/misc/lkdtm_usercopy.c1
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/mei-trace.c1
-rw-r--r--drivers/misc/mei/mei-trace.h19
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/pci-txe.c2
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/mic/Makefile1
-rw-r--r--drivers/misc/mic/card/Makefile1
-rw-r--r--drivers/misc/mic/cosm/Makefile1
-rw-r--r--drivers/misc/mic/host/Makefile1
-rw-r--r--drivers/misc/mic/scif/Makefile1
-rw-r--r--drivers/misc/mic/scif/scif_rb.c8
-rw-r--r--drivers/misc/mic/scif/scif_rma.c3
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c33
-rw-r--r--drivers/misc/sgi-xp/Makefile1
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c15
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c15
-rw-r--r--drivers/misc/ti_dac7512.c103
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/block.c346
-rw-r--r--drivers/mmc/core/block.h1
-rw-r--r--drivers/mmc/core/bus.c7
-rw-r--r--drivers/mmc/core/core.c262
-rw-r--r--drivers/mmc/core/core.h16
-rw-r--r--drivers/mmc/core/host.c26
-rw-r--r--drivers/mmc/core/host.h7
-rw-r--r--drivers/mmc/core/mmc.c46
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/queue.c41
-rw-r--r--drivers/mmc/core/queue.h5
-rw-r--r--drivers/mmc/core/quirks.h1
-rw-r--r--drivers/mmc/core/sd.c51
-rw-r--r--drivers/mmc/core/sd.h1
-rw-r--r--drivers/mmc/core/sdio_irq.c3
-rw-r--r--drivers/mmc/host/Kconfig28
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/atmel-mci.c13
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc-zx.h1
-rw-r--r--drivers/mmc/host/dw_mmc.c191
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c7
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c768
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c285
-rw-r--r--drivers/mmc/host/mvsdio.c6
-rw-r--r--drivers/mmc/host/mxcmmc.c11
-rw-r--r--drivers/mmc/host/omap.c20
-rw-r--r--drivers/mmc/host/omap_hsmmc.c35
-rw-r--r--drivers/mmc/host/pxamci.h1
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c18
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c5
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c38
-rw-r--r--drivers/mmc/host/sdhci-acpi.c174
-rw-r--r--drivers/mmc/host/sdhci-cadence.c28
-rw-r--r--drivers/mmc/host/sdhci-msm.c326
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c3
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c58
-rw-r--r--drivers/mmc/host/sdhci-omap.c607
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c11
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c35
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.h73
-rw-r--r--drivers/mmc/host/sdhci-pci.h14
-rw-r--r--drivers/mmc/host/sdhci-s3c.c18
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c14
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/mmc/host/tifm_sd.c6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c49
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c8
-rw-r--r--drivers/mmc/host/vub300.c41
-rw-r--r--drivers/mmc/host/wbsd.c8
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/Makefile1
-rw-r--r--drivers/mtd/chips/fwh_lock.h1
-rw-r--r--drivers/mtd/chips/map_ram.c34
-rw-r--r--drivers/mtd/chips/map_rom.c34
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h1
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/devices/docg3.c7
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/mtdram.c36
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/devices/powernv_flash.c83
-rw-r--r--drivers/mtd/devices/slram.c9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/netsc520.c2
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/maps/physmap_of_gemini.c1
-rw-r--r--drivers/mtd/maps/physmap_of_gemini.h1
-rw-r--r--drivers/mtd/maps/physmap_of_versatile.h1
-rw-r--r--drivers/mtd/maps/plat-ram.c38
-rw-r--r--drivers/mtd/maps/sbc_gxx.c2
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/maps/tsunami_flash.c1
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/mtdchar.c24
-rw-r--r--drivers/mtd/mtdconcat.c27
-rw-r--r--drivers/mtd/mtdcore.c61
-rw-r--r--drivers/mtd/mtdcore.h1
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/mtdsuper.c6
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/ams-delta.c2
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c7
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c17
-rw-r--r--drivers/mtd/nand/atmel/pmecc.h1
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h1
-rw-r--r--drivers/mtd/nand/brcmnand/Makefile1
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/denali.c291
-rw-r--r--drivers/mtd/nand/denali.h44
-rw-r--r--drivers/mtd/nand/denali_dt.c4
-rw-r--r--drivers/mtd/nand/denali_pci.c5
-rw-r--r--drivers/mtd/nand/diskonchip.c3
-rw-r--r--drivers/mtd/nand/gpio.c112
-rw-r--r--drivers/mtd/nand/hisi504_nand.c3
-rw-r--r--drivers/mtd/nand/mtk_ecc.c13
-rw-r--r--drivers/mtd/nand/mxc_nand.c19
-rw-r--r--drivers/mtd/nand/nand_base.c34
-rw-r--r--drivers/mtd/nand/nandsim.c13
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c377
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c41
-rw-r--r--drivers/mtd/nand/qcom_nandc.c127
-rw-r--r--drivers/mtd/nand/sh_flctl.c9
-rw-r--r--drivers/mtd/onenand/Makefile1
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c1
-rw-r--r--drivers/mtd/parsers/Kconfig8
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/sharpslpart.c398
-rw-r--r--drivers/mtd/sm_ftl.c6
-rw-r--r--drivers/mtd/spi-nor/Kconfig6
-rw-r--r--drivers/mtd/spi-nor/Makefile1
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c55
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c3
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c209
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c70
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c105
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c35
-rw-r--r--drivers/mtd/tests/Makefile1
-rw-r--r--drivers/mtd/tests/mtd_test.c1
-rw-r--r--drivers/mtd/tests/mtd_test.h1
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/wl.h1
-rw-r--r--drivers/net/Kconfig12
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/appletalk/cops.c13
-rw-r--r--drivers/net/appletalk/cops.h1
-rw-r--r--drivers/net/appletalk/ipddp.h1
-rw-r--r--drivers/net/appletalk/ltpc.c18
-rw-r--r--drivers/net/appletalk/ltpc.h1
-rw-r--r--drivers/net/arcnet/Makefile1
-rw-r--r--drivers/net/arcnet/arcnet.c9
-rw-r--r--drivers/net/arcnet/com9026.h1
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_debugfs.c1
-rw-r--r--drivers/net/bonding/bond_main.c51
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/bonding/bond_procfs.c1
-rw-r--r--drivers/net/caif/Makefile1
-rw-r--r--drivers/net/caif/caif_hsi.c24
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/c_can/c_can_platform.c1
-rw-r--r--drivers/net/can/grcan.c21
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c6
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c14
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c8
-rw-r--r--drivers/net/can/softing/softing.h1
-rw-r--r--drivers/net/can/softing/softing_platform.h1
-rw-r--r--drivers/net/can/sun4i_can.c12
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c11
-rw-r--r--drivers/net/cris/eth_v10.c37
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/Kconfig2
-rw-r--r--drivers/net/dsa/b53/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c235
-rw-r--r--drivers/net/dsa/b53/b53_priv.h146
-rw-r--r--drivers/net/dsa/b53/b53_regs.h48
-rw-r--r--drivers/net/dsa/bcm_sf2.c242
-rw-r--r--drivers/net/dsa/bcm_sf2.h3
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c1130
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h49
-rw-r--r--drivers/net/dsa/dsa_loop.c11
-rw-r--r--drivers/net/dsa/dsa_loop.h1
-rw-r--r--drivers/net/dsa/lan9303-core.c557
-rw-r--r--drivers/net/dsa/lan9303.h22
-rw-r--r--drivers/net/dsa/lan9303_i2c.c2
-rw-r--r--drivers/net/dsa/lan9303_mdio.c7
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c3
-rw-r--r--drivers/net/dsa/mt7530.c21
-rw-r--r--drivers/net/dsa/mv88e6060.c51
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c210
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c35
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c7
-rw-r--r--drivers/net/dsa/qca8k.c15
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/eql.c8
-rw-r--r--drivers/net/ethernet/3com/3c515.c10
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c13
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c10
-rw-r--r--drivers/net/ethernet/3com/3c59x.c20
-rw-r--r--drivers/net/ethernet/3com/Makefile1
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c10
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c11
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c10
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h1
-rw-r--r--drivers/net/ethernet/agere/et131x.c8
-rw-r--r--drivers/net/ethernet/alacritech/slic.h1
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c16
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c5
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c217
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h15
-rw-r--r--drivers/net/ethernet/amd/7990.h1
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/a2065.c14
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c11
-rw-r--r--drivers/net/ethernet/amd/am79c961a.h1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c12
-rw-r--r--drivers/net/ethernet/amd/hplance.h1
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c11
-rw-r--r--drivers/net/ethernet/amd/sunlance.c9
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c16
-rw-r--r--drivers/net/ethernet/apple/bmac.c12
-rw-r--r--drivers/net/ethernet/apple/mace.c12
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c3
-rw-r--r--drivers/net/ethernet/arc/emac.h1
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c1
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c8
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c15
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/b44.h1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c108
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c140
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h12
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c16
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c235
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h49
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c65
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c120
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h420
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c946
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h88
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c96
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c272
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c34
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c43
-rw-r--r--drivers/net/ethernet/cadence/Makefile1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c11
-rw-r--r--drivers/net/ethernet/cavium/Kconfig1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c22
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c382
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c28
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c695
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h49
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h68
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_image.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c32
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c15
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c20
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c24
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c86
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/fpga_defs.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/my3126.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/regs.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c130
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge_defs.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3cdev.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h384
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h90
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c1929
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h169
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h87
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h105
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c403
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c103
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c1055
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c135
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c876
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h120
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.c247
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h76
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c637
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h118
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h69
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h81
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c58
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c6
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c77
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c16
-rw-r--r--drivers/net/ethernet/davicom/dm9000.h1
-rw-r--r--drivers/net/ethernet/dec/tulip/Makefile1
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c21
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h12
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c14
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c12
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c12
-rw-r--r--drivers/net/ethernet/dlink/sundance.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c26
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c5
-rw-r--r--drivers/net/ethernet/fealnx.c30
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c156
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c31
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c82
-rw-r--r--drivers/net/ethernet/freescale/fman/Makefile13
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c145
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c5
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig15
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h150
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c347
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1412
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h53
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c260
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c504
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c485
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c14
-rw-r--r--drivers/net/ethernet/i825xx/Makefile1
-rw-r--r--drivers/net/ethernet/ibm/emac/Makefile1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c520
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h60
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e100.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c92
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h60
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c176
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c209
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c482
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h215
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h210
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c459
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c907
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2911
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c280
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h182
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c69
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h34
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c39
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c38
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c203
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c45
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h23
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h10
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c371
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c9
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c333
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/korina.c238
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c13
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c419
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c8
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/skge.h1
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c619
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c899
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h291
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c998
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c272
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c525
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h842
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c343
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c434
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c291
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c1012
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h134
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c839
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c276
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1546
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c514
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/micrel/Makefile1
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.h1
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c18
-rw-r--r--drivers/net/ethernet/microchip/enc28j60_hw.h1
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h1
-rw-r--r--drivers/net/ethernet/natsemi/Makefile1
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c12
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c10
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h1
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c13
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c60
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile9
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c1299
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c128
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h114
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c282
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c123
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c420
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h146
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c47
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h40
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c114
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c188
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c804
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h65
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c257
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h307
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c28
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c77
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c148
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h36
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c87
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c9
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c193
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h3
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c50
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c16
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c12
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c7
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c749
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c103
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c16
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c15
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c27
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Kconfig1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c168
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h35
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c155
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c120
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h7
-rw-r--r--drivers/net/ethernet/realtek/atp.c14
-rw-r--r--drivers/net/ethernet/realtek/atp.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c295
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c90
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c32
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c7
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c16
-rw-r--r--drivers/net/ethernet/seeq/ether3.c11
-rw-r--r--drivers/net/ethernet/seeq/ether3.h1
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.h1
-rw-r--r--drivers/net/ethernet/sfc/Makefile1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c28
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/Makefile1
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c15
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h8
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c8
-rw-r--r--drivers/net/ethernet/sfc/farch.c12
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c9
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/nic.h6
-rw-r--r--drivers/net/ethernet/sfc/ptp.c12
-rw-r--r--drivers/net/ethernet/sfc/rx.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c8
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c8
-rw-r--r--drivers/net/ethernet/sis/sis190.c10
-rw-r--r--drivers/net/ethernet/sis/sis900.c12
-rw-r--r--drivers/net/ethernet/sis/sis900.h1
-rw-r--r--drivers/net/ethernet/smsc/Makefile1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c12
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c361
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c20
-rw-r--r--drivers/net/ethernet/sun/Makefile1
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c5
-rw-r--r--drivers/net/ethernet/sun/niu.c16
-rw-r--r--drivers/net/ethernet/sun/niu.h1
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c10
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h1
-rw-r--r--drivers/net/ethernet/sun/sungem.c8
-rw-r--r--drivers/net/ethernet/sun/sungem.h1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c10
-rw-r--r--drivers/net/ethernet/sun/sunhme.h1
-rw-r--r--drivers/net/ethernet/sun/sunqe.h1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h3
-rw-r--r--drivers/net/ethernet/synopsys/Makefile1
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c7
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c8
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c26
-rw-r--r--drivers/net/ethernet/tile/Makefile1
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/ethernet/tile/tilepro.c9
-rw-r--r--drivers/net/ethernet/toshiba/Makefile1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c18
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c10
-rw-r--r--drivers/net/ethernet/xilinx/Makefile1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/fddi/skfp/Makefile1
-rw-r--r--drivers/net/geneve.c60
-rw-r--r--drivers/net/hamradio/6pack.c34
-rw-r--r--drivers/net/hamradio/Makefile1
-rw-r--r--drivers/net/hamradio/baycom_epp.c50
-rw-r--r--drivers/net/hamradio/baycom_par.c48
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hamradio/scc.c61
-rw-r--r--drivers/net/hamradio/yam.c6
-rw-r--r--drivers/net/hamradio/z8530.h1
-rw-r--r--drivers/net/hippi/rrunner.c10
-rw-r--r--drivers/net/hippi/rrunner.h1
-rw-r--r--drivers/net/hyperv/hyperv_net.h21
-rw-r--r--drivers/net/hyperv/netvsc.c88
-rw-r--r--drivers/net/hyperv/netvsc_drv.c116
-rw-r--r--drivers/net/hyperv/rndis_filter.c149
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/adf7242.c8
-rw-r--r--drivers/net/ieee802154/atusb.c392
-rw-r--r--drivers/net/ieee802154/atusb.h8
-rw-r--r--drivers/net/ieee802154/ca8210.c22
-rw-r--r--drivers/net/ieee802154/cc2520.c21
-rw-r--r--drivers/net/ieee802154/mrf24j40.c2
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/ipvlan/ipvlan.h31
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c62
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c164
-rw-r--r--drivers/net/macsec.c29
-rw-r--r--drivers/net/macvlan.c31
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/netconsole.c4
-rw-r--r--drivers/net/ntb_netdev.c8
-rw-r--r--drivers/net/phy/Kconfig17
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/at803x.c2
-rw-r--r--drivers/net/phy/broadcom.c6
-rw-r--r--drivers/net/phy/cortina.c4
-rw-r--r--drivers/net/phy/dp83640_reg.h1
-rw-r--r--drivers/net/phy/dp83822.c344
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/marvell.c8
-rw-r--r--drivers/net/phy/mdio-boardinfo.h1
-rw-r--r--drivers/net/phy/phy_led_triggers.c63
-rw-r--r--drivers/net/phy/phylink.c16
-rw-r--r--drivers/net/phy/realtek.c56
-rw-r--r--drivers/net/phy/sfp-bus.c11
-rw-r--r--drivers/net/phy/sfp.c29
-rw-r--r--drivers/net/phy/swphy.h1
-rw-r--r--drivers/net/phy/uPD60620.c109
-rw-r--r--drivers/net/plip/plip.c13
-rw-r--r--drivers/net/ppp/Makefile1
-rw-r--r--drivers/net/ppp/ppp_async.c10
-rw-r--r--drivers/net/ppp/ppp_generic.c26
-rw-r--r--drivers/net/ppp/ppp_mppe.h1
-rw-r--r--drivers/net/ppp/ppp_synctty.c11
-rw-r--r--drivers/net/slip/slip.c20
-rw-r--r--drivers/net/slip/slip.h1
-rw-r--r--drivers/net/tap.c6
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c5
-rw-r--r--drivers/net/team/team_mode_loadbalance.c8
-rw-r--r--drivers/net/thunderbolt.c1363
-rw-r--r--drivers/net/tun.c327
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/catc.c8
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/ipheth.c33
-rw-r--r--drivers/net/usb/lan78xx.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/sierra_net.c12
-rw-r--r--drivers/net/usb/usbnet.c12
-rw-r--r--drivers/net/virtio_net.c91
-rw-r--r--drivers/net/vrf.c26
-rw-r--r--drivers/net/vxlan.c43
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/dscc4.c22
-rw-r--r--drivers/net/wan/hd64570.h1
-rw-r--r--drivers/net/wan/hdlc_cisco.c15
-rw-r--r--drivers/net/wan/hdlc_fr.c13
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/lapbether.c1
-rw-r--r--drivers/net/wan/lmc/lmc.h1
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c1
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c12
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h1
-rw-r--r--drivers/net/wan/sbni.c21
-rw-r--r--drivers/net/wan/sdla.c14
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c1
-rw-r--r--drivers/net/wan/x25_asy.h1
-rw-r--r--drivers/net/wan/z85230.h1
-rw-r--r--drivers/net/wimax/i2400m/Makefile1
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/admtek/adm8211.h1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c165
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c168
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c46
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c13
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h6
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c45
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/atmel/atmel.c11
-rw-r--r--drivers/net/wireless/broadcom/b43/Makefile1
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/bus.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/lo.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_a.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ac.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_g.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_g.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lcn.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/ppr.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2055.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2056.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2057.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2059.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/rfkill.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/sdio.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/tables.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_nphy.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_phy_ht.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_phy_lcn.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/wa.h1
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/Makefile1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/b43legacy.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/ilt.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/pio.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/rfkill.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c170
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c157
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h31
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c1675
-rw-r--r--drivers/net/wireless/cisco/airo.h1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c11
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c210
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/paging.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c242
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c280
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c31
-rw-r--r--drivers/net/wireless/intersil/hostap/Makefile1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap.h1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211.h1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211_rx.c1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_80211_tx.c1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c9
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.h1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_common.h1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_config.h1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_download.c1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c17
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_info.c1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c1
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_wlan.h1
-rw-r--r--drivers/net/wireless/intersil/orinoco/Makefile1
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c8
-rw-r--r--drivers/net/wireless/intersil/p54/Makefile1
-rw-r--r--drivers/net/wireless/intersil/p54/main.c7
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c203
-rw-r--r--drivers/net/wireless/marvell/libertas/Makefile1
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/debugfs.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/debugfs.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/defs.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/ethtool.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/host.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c21
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/radiotap.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/types.h1
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/deb_defs.h1
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c6
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c76
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Makefile1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c257
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c476
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h11
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c32
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h39
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c131
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c15
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h276
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c113
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dump.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/ray_cs.c41
-rw-r--r--drivers/net/wireless/ray_cs.h1
-rw-r--r--drivers/net/wireless/rayctl.h1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c132
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c339
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c43
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h140
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c33
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h72
-rw-r--r--drivers/net/wireless/rsi/Makefile1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c41
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c19
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c94
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c577
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c33
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c111
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_ps.c16
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c269
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c16
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h7
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h6
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h36
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h54
-rw-r--r--drivers/net/wireless/rsi/rsi_ps.h7
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h2
-rw-r--r--drivers/net/wireless/st/cw1200/Makefile1
-rw-r--r--drivers/net/wireless/st/cw1200/main.c3
-rw-r--r--drivers/net/wireless/st/cw1200/pm.c5
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c6
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c5
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h2
-rw-r--r--drivers/net/wireless/ti/Makefile1
-rw-r--r--drivers/net/wireless/ti/wl1251/Makefile1
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/zydas/zd1201.c3
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/Makefile1
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netback/netback.c6
-rw-r--r--drivers/net/xen-netfront.c7
-rw-r--r--drivers/nfc/Makefile1
-rw-r--r--drivers/nfc/fdp/fdp.c2
-rw-r--r--drivers/nfc/mei_phy.h1
-rw-r--r--drivers/nfc/microread/Makefile1
-rw-r--r--drivers/nfc/microread/i2c.c2
-rw-r--r--drivers/nfc/nfcmrvl/Makefile1
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c7
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c2
-rw-r--r--drivers/nfc/nxp-nci/i2c.c2
-rw-r--r--drivers/nfc/pn533/i2c.c2
-rw-r--r--drivers/nfc/pn533/pn533.c8
-rw-r--r--drivers/nfc/pn544/i2c.c2
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c2
-rw-r--r--drivers/nfc/st-nci/Kconfig4
-rw-r--r--drivers/nfc/st-nci/Makefile1
-rw-r--r--drivers/nfc/st-nci/i2c.c2
-rw-r--r--drivers/nfc/st-nci/ndlc.c17
-rw-r--r--drivers/nfc/st-nci/se.c19
-rw-r--r--drivers/nfc/st21nfca/i2c.c2
-rw-r--r--drivers/nfc/st21nfca/se.c19
-rw-r--r--drivers/ntb/hw/Kconfig1
-rw-r--r--drivers/ntb/hw/Makefile1
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c16
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c75
-rw-r--r--drivers/ntb/hw/mscc/Kconfig9
-rw-r--r--drivers/ntb/hw/mscc/Makefile1
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c1216
-rw-r--r--drivers/ntb/ntb_transport.c20
-rw-r--r--drivers/ntb/test/ntb_perf.c18
-rw-r--r--drivers/ntb/test/ntb_pingpong.c8
-rw-r--r--drivers/ntb/test/ntb_tool.c6
-rw-r--r--drivers/nubus/nubus.c14
-rw-r--r--drivers/nubus/proc.c1
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/Makefile2
-rw-r--r--drivers/nvdimm/badrange.c293
-rw-r--r--drivers/nvdimm/btt.c3
-rw-r--r--drivers/nvdimm/bus.c24
-rw-r--r--drivers/nvdimm/core.c260
-rw-r--r--drivers/nvdimm/dimm.c3
-rw-r--r--drivers/nvdimm/dimm_devs.c19
-rw-r--r--drivers/nvdimm/label.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c6
-rw-r--r--drivers/nvdimm/nd-core.h3
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvdimm/pfn_devs.c8
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvdimm/pmem.h1
-rw-r--r--drivers/nvdimm/region_devs.c8
-rw-r--r--drivers/nvme/Kconfig4
-rw-r--r--drivers/nvme/host/Kconfig9
-rw-r--r--drivers/nvme/host/Makefile2
-rw-r--r--drivers/nvme/host/core.c1302
-rw-r--r--drivers/nvme/host/fabrics.c16
-rw-r--r--drivers/nvme/host/fabrics.h14
-rw-r--r--drivers/nvme/host/fc.c793
-rw-r--r--drivers/nvme/host/lightnvm.c86
-rw-r--r--drivers/nvme/host/multipath.c291
-rw-r--r--drivers/nvme/host/nvme.h169
-rw-r--r--drivers/nvme/host/pci.c243
-rw-r--r--drivers/nvme/host/rdma.c257
-rw-r--r--drivers/nvme/target/Makefile1
-rw-r--r--drivers/nvme/target/admin-cmd.c21
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c23
-rw-r--r--drivers/nvme/target/fc.c50
-rw-r--r--drivers/nvme/target/io-cmd.c20
-rw-r--r--drivers/nvme/target/loop.c66
-rw-r--r--drivers/nvme/target/nvmet.h6
-rw-r--r--drivers/nvme/target/rdma.c16
-rw-r--r--drivers/nvmem/Kconfig35
-rw-r--r--drivers/nvmem/Makefile7
-rw-r--r--drivers/nvmem/bcm-ocotp.c1
-rw-r--r--drivers/nvmem/core.c13
-rw-r--r--drivers/nvmem/imx-iim.c24
-rw-r--r--drivers/nvmem/imx-ocotp.c193
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c1
-rw-r--r--drivers/nvmem/lpc18xx_otp.c1
-rw-r--r--drivers/nvmem/meson-efuse.c5
-rw-r--r--drivers/nvmem/meson-mx-efuse.c265
-rw-r--r--drivers/nvmem/mtk-efuse.c47
-rw-r--r--drivers/nvmem/mxs-ocotp.c1
-rw-r--r--drivers/nvmem/qfprom.c27
-rw-r--r--drivers/nvmem/rockchip-efuse.c5
-rw-r--r--drivers/nvmem/snvs_lpgpr.c156
-rw-r--r--drivers/nvmem/sunxi_sid.c7
-rw-r--r--drivers/nvmem/uniphier-efuse.c97
-rw-r--r--drivers/nvmem/vf610-ocotp.c1
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/Makefile2
-rw-r--r--drivers/of/address.c19
-rw-r--r--drivers/of/base.c141
-rw-r--r--drivers/of/device.c8
-rw-r--r--drivers/of/dynamic.c190
-rw-r--r--drivers/of/fdt.c91
-rw-r--r--drivers/of/kobj.c164
-rw-r--r--drivers/of/of_pci.c2
-rw-r--r--drivers/of/of_private.h51
-rw-r--r--drivers/of/of_reserved_mem.c26
-rw-r--r--drivers/of/overlay.c1049
-rw-r--r--drivers/of/platform.c19
-rw-r--r--drivers/of/property.c4
-rw-r--r--drivers/of/resolver.c15
-rw-r--r--drivers/of/unittest-data/.gitignore2
-rw-r--r--drivers/of/unittest-data/Makefile2
-rw-r--r--drivers/of/unittest-data/overlay.dts1
-rw-r--r--drivers/of/unittest-data/overlay_bad_phandle.dts1
-rw-r--r--drivers/of/unittest-data/overlay_base.dts1
-rw-r--r--drivers/of/unittest-data/testcases.dts66
-rw-r--r--drivers/of/unittest-data/tests-interrupts.dtsi1
-rw-r--r--drivers/of/unittest-data/tests-match.dtsi1
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi1
-rw-r--r--drivers/of/unittest-data/tests-phandle.dtsi1
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi1
-rw-r--r--drivers/of/unittest.c84
-rw-r--r--drivers/opp/Kconfig13
-rw-r--r--drivers/opp/Makefile (renamed from drivers/base/power/opp/Makefile)0
-rw-r--r--drivers/opp/core.c (renamed from drivers/base/power/opp/core.c)143
-rw-r--r--drivers/opp/cpu.c (renamed from drivers/base/power/opp/cpu.c)0
-rw-r--r--drivers/opp/debugfs.c (renamed from drivers/base/power/opp/debugfs.c)10
-rw-r--r--drivers/opp/of.c (renamed from drivers/base/power/opp/of.c)6
-rw-r--r--drivers/opp/opp.h (renamed from drivers/base/power/opp/opp.h)6
-rw-r--r--drivers/oprofile/nmi_timer_int.c1
-rw-r--r--drivers/oprofile/oprofile_perf.c1
-rw-r--r--drivers/parisc/Makefile1
-rw-r--r--drivers/parisc/iommu-helpers.h1
-rw-r--r--drivers/parport/Makefile1
-rw-r--r--drivers/parport/ieee1284.c21
-rw-r--r--drivers/parport/ieee1284_ops.c1
-rw-r--r--drivers/parport/multiface.h1
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/parport/probe.c1
-rw-r--r--drivers/parport/procfs.c1
-rw-r--r--drivers/pci/Kconfig26
-rw-r--r--drivers/pci/Makefile7
-rw-r--r--drivers/pci/dwc/Kconfig10
-rw-r--r--drivers/pci/dwc/Makefile2
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c33
-rw-r--r--drivers/pci/dwc/pci-layerscape.c12
-rw-r--r--drivers/pci/dwc/pcie-histb.c470
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c12
-rw-r--r--drivers/pci/host/Kconfig6
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-ftpci100.c23
-rw-r--r--drivers/pci/host/pci-host-generic.c43
-rw-r--r--drivers/pci/host/pci-hyperv.c8
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c20
-rw-r--r--drivers/pci/host/pci-tegra.c158
-rw-r--r--drivers/pci/host/pci-v3-semi.c959
-rw-r--r--drivers/pci/host/pci-xgene.c24
-rw-r--r--drivers/pci/host/pcie-altera.c8
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c19
-rw-r--r--drivers/pci/host/pcie-iproc.c20
-rw-r--r--drivers/pci/host/pcie-rcar.c20
-rw-r--r--drivers/pci/host/pcie-tango.c206
-rw-r--r--drivers/pci/host/pcie-xilinx.c6
-rw-r--r--drivers/pci/hotplug-pci.c29
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c15
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c7
-rw-r--r--drivers/pci/hotplug/cpqphp.h2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c3
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c19
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c19
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c25
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c11
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c9
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c6
-rw-r--r--drivers/pci/htirq.c134
-rw-r--r--drivers/pci/iov.c34
-rw-r--r--drivers/pci/irq.c1
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-driver.c135
-rw-r--r--drivers/pci/pci-label.c1
-rw-r--r--drivers/pci/pci-sysfs.c36
-rw-r--r--drivers/pci/pci.c157
-rw-r--r--drivers/pci/pci.h12
-rw-r--r--drivers/pci/pcie/Makefile1
-rw-r--r--drivers/pci/pcie/aer/Makefile1
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c9
-rw-r--r--drivers/pci/pcie/aspm.c52
-rw-r--r--drivers/pci/pcie/pme.c5
-rw-r--r--drivers/pci/pcie/portdrv.h1
-rw-r--r--drivers/pci/pcie/portdrv_core.c172
-rw-r--r--drivers/pci/pcie/portdrv_pci.c2
-rw-r--r--drivers/pci/probe.c187
-rw-r--r--drivers/pci/proc.c1
-rw-r--r--drivers/pci/quirks.c43
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/rom.c19
-rw-r--r--drivers/pci/setup-bus.c299
-rw-r--r--drivers/pci/setup-res.c59
-rw-r--r--drivers/pci/switch/switchtec.c318
-rw-r--r--drivers/pci/syscall.c1
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c6
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.h1
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c6
-rw-r--r--drivers/pcmcia/cardbus.c5
-rw-r--r--drivers/pcmcia/cistpl.c2
-rw-r--r--drivers/pcmcia/cs_internal.h2
-rw-r--r--drivers/pcmcia/electra_cf.c12
-rw-r--r--drivers/pcmcia/i82092aa.h1
-rw-r--r--drivers/pcmcia/i82365.c6
-rw-r--r--drivers/pcmcia/m32r_cfc.c7
-rw-r--r--drivers/pcmcia/m32r_cfc.h1
-rw-r--r--drivers/pcmcia/m32r_pcc.c7
-rw-r--r--drivers/pcmcia/m32r_pcc.h1
-rw-r--r--drivers/pcmcia/omap_cf.c10
-rw-r--r--drivers/pcmcia/pd6729.c7
-rw-r--r--drivers/pcmcia/pd6729.h1
-rw-r--r--drivers/pcmcia/sa1100_assabet.c1
-rw-r--r--drivers/pcmcia/sa1100_cerf.c1
-rw-r--r--drivers/pcmcia/sa1100_generic.h1
-rw-r--r--drivers/pcmcia/sa1100_h3600.c1
-rw-r--r--drivers/pcmcia/sa1100_shannon.c1
-rw-r--r--drivers/pcmcia/sa1100_simpad.c1
-rw-r--r--drivers/pcmcia/sa1111_badge4.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c33
-rw-r--r--drivers/pcmcia/sa1111_generic.h1
-rw-r--r--drivers/pcmcia/sa1111_jornada720.c1
-rw-r--r--drivers/pcmcia/sa1111_neponset.c1
-rw-r--r--drivers/pcmcia/soc_common.c7
-rw-r--r--drivers/pcmcia/soc_common.h1
-rw-r--r--drivers/pcmcia/tcic.c8
-rw-r--r--drivers/pcmcia/yenta_socket.c7
-rw-r--r--drivers/pcmcia/yenta_socket.h1
-rw-r--r--drivers/perf/Kconfig15
-rw-r--r--drivers/perf/Makefile3
-rw-r--r--drivers/perf/arm_pmu.c10
-rw-r--r--drivers/perf/arm_pmu_acpi.c3
-rw-r--r--drivers/perf/arm_pmu_platform.c5
-rw-r--r--drivers/perf/arm_spe_pmu.c1249
-rw-r--r--drivers/perf/hisilicon/Makefile1
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c463
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c473
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c463
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c447
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h102
-rw-r--r--drivers/perf/qcom_l2_pmu.c54
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c3
-rw-r--r--drivers/phy/broadcom/Kconfig13
-rw-r--r--drivers/phy/broadcom/Makefile4
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c74
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c1017
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.h50
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c459
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c39
-rw-r--r--drivers/phy/phy-core.c15
-rw-r--r--drivers/phy/qualcomm/Makefile1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c29
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c29
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c42
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c72
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c2
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c217
-rw-r--r--drivers/phy/samsung/Makefile1
-rw-r--r--drivers/phy/ti/Makefile1
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c101
-rw-r--r--drivers/pinctrl/Kconfig32
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/bcm/Makefile1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c21
-rw-r--r--drivers/pinctrl/core.c12
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/intel/Kconfig11
-rw-r--r--drivers/pinctrl/intel/Makefile2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c375
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c22
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h3
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6397.h1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8127.h1
-rw-r--r--drivers/pinctrl/meson/Kconfig41
-rw-r--r--drivers/pinctrl/meson/Makefile9
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c892
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c852
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c200
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h47
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.c108
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8-pmx.h48
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c992
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c808
-rw-r--r--drivers/pinctrl/mvebu/Makefile1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c70
-rw-r--r--drivers/pinctrl/nomadik/Makefile1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.h1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.h1
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c6
-rw-r--r--drivers/pinctrl/pinctrl-at91.c2
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-coh901.h1
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c333
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c21
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c22
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c2
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c2
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c2
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c43
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c134
-rw-r--r--drivers/pinctrl/pinctrl-single.c4
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c4
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c6
-rw-r--r--drivers/pinctrl/samsung/Kconfig2
-rw-r--r--drivers/pinctrl/samsung/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c131
-rw-r--r--drivers/pinctrl/sh-pfc/core.h11
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c4
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c403
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c542
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c1904
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c573
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c394
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c1
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c12
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h24
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c6
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c6
-rw-r--r--drivers/pinctrl/spear/Makefile1
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c8
-rw-r--r--drivers/pinctrl/stm32/Makefile1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c9
-rw-r--r--drivers/pinctrl/sunxi/Makefile1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c13
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h1
-rw-r--r--drivers/pinctrl/tegra/Makefile1
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c4
-rw-r--r--drivers/pinctrl/uniphier/Makefile1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c14
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c4
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c4
-rw-r--r--drivers/pinctrl/vt8500/Makefile1
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/x86/Kconfig63
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/asus-wmi.c63
-rw-r--r--drivers/platform/x86/dell-laptop.c286
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c196
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c285
-rw-r--r--drivers/platform/x86/dell-smbios.c512
-rw-r--r--drivers/platform/x86/dell-smbios.h49
-rw-r--r--drivers/platform/x86/dell-smo8800.c3
-rw-r--r--drivers/platform/x86/dell-wmi-descriptor.c213
-rw-r--r--drivers/platform/x86/dell-wmi-descriptor.h28
-rw-r--r--drivers/platform/x86/dell-wmi.c97
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c14
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/hp_accel.c1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel-hid.c18
-rw-r--r--drivers/platform/x86/intel-wmi-thunderbolt.c98
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c114
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c6
-rw-r--r--drivers/platform/x86/intel_ips.c160
-rw-r--r--drivers/platform/x86/intel_ips.h4
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c8
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c3
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c24
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c25
-rw-r--r--drivers/platform/x86/intel_turbo_max_3.c1
-rw-r--r--drivers/platform/x86/mlx-platform.c4
-rw-r--r--drivers/platform/x86/peaq-wmi.c19
-rw-r--r--drivers/platform/x86/silead_dmi.c52
-rw-r--r--drivers/platform/x86/sony-laptop.c20
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c134
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/platform/x86/wmi.c254
-rw-r--r--drivers/pnp/Makefile1
-rw-r--r--drivers/pnp/base.h1
-rw-r--r--drivers/pnp/card.c1
-rw-r--r--drivers/pnp/core.c1
-rw-r--r--drivers/pnp/driver.c1
-rw-r--r--drivers/pnp/interface.c1
-rw-r--r--drivers/pnp/isapnp/compat.c1
-rw-r--r--drivers/pnp/manager.c1
-rw-r--r--drivers/pnp/pnpacpi/pnpacpi.h1
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c1
-rw-r--r--drivers/pnp/pnpbios/proc.c1
-rw-r--r--drivers/pnp/pnpbios/rsparser.c1
-rw-r--r--drivers/pnp/quirks.c1
-rw-r--r--drivers/pnp/resource.c1
-rw-r--r--drivers/pnp/support.c1
-rw-r--r--drivers/pnp/system.c1
-rw-r--r--drivers/power/avs/smartreflex.c10
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/gemini-poweroff.c1
-rw-r--r--drivers/power/reset/piix4-poweroff.c4
-rw-r--r--drivers/power/supply/Kconfig16
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/ab8500_bmdata.c1
-rw-r--r--drivers/power/supply/cpcap-charger.c2
-rw-r--r--drivers/power/supply/generic-adc-battery.c2
-rw-r--r--drivers/power/supply/max8997_charger.c3
-rw-r--r--drivers/power/supply/pcf50633-charger.c2
-rw-r--r--drivers/power/supply/power_supply_core.c2
-rw-r--r--drivers/power/supply/qcom_smbb.c2
-rw-r--r--drivers/power/supply/sbs-battery.c35
-rw-r--r--drivers/power/supply/sbs-manager.c445
-rw-r--r--drivers/power/supply/twl4030_charger.c2
-rw-r--r--drivers/pps/clients/pps-ktimer.c4
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/ptp_kvm.c5
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c63
-rw-r--r--drivers/pwm/pwm-img.c160
-rw-r--r--drivers/pwm/pwm-mediatek.c53
-rw-r--r--drivers/pwm/pwm-stm32-lp.c3
-rw-r--r--drivers/pwm/pwm-sun4i.c8
-rw-r--r--drivers/rapidio/Makefile1
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c11
-rw-r--r--drivers/rapidio/switches/Makefile1
-rw-r--r--drivers/rapidio/switches/idt_gen2.c2
-rw-r--r--drivers/rapidio/switches/idt_gen3.c2
-rw-r--r--drivers/rapidio/switches/idtcps.c2
-rw-r--r--drivers/rapidio/switches/tsi568.c2
-rw-r--r--drivers/rapidio/switches/tsi57x.c2
-rw-r--r--drivers/ras/cec.c9
-rw-r--r--drivers/ras/debugfs.h1
-rw-r--r--drivers/ras/ras.c1
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/axp20x-regulator.c92
-rw-r--r--drivers/regulator/da9211-regulator.c14
-rw-r--r--drivers/regulator/da9211-regulator.h2
-rw-r--r--drivers/regulator/fixed-helper.c1
-rw-r--r--drivers/regulator/pbias-regulator.c21
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c48
-rw-r--r--drivers/regulator/tps65217-regulator.c5
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig4
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/qcom_common.h1
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c291
-rw-r--r--drivers/remoteproc/qcom_wcnss.h1
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c130
-rw-r--r--drivers/reset/Kconfig30
-rw-r--r--drivers/reset/Makefile6
-rw-r--r--drivers/reset/reset-axs10x.c83
-rw-r--r--drivers/reset/reset-meson.c65
-rw-r--r--drivers/reset/reset-simple.c186
-rw-r--r--drivers/reset/reset-simple.h45
-rw-r--r--drivers/reset/reset-socfpga.c157
-rw-r--r--drivers/reset/reset-stm32.c108
-rw-r--r--drivers/reset/reset-sunxi.c104
-rw-r--r--drivers/reset/reset-uniphier.c30
-rw-r--r--drivers/reset/reset-zx2967.c99
-rw-r--r--drivers/rpmsg/Kconfig3
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/qcom_glink_native.c51
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/class.c3
-rw-r--r--drivers/rtc/interface.c6
-rw-r--r--drivers/rtc/rtc-abx80x.c12
-rw-r--r--drivers/rtc/rtc-armada38x.c101
-rw-r--r--drivers/rtc/rtc-at91rm9200.c19
-rw-r--r--drivers/rtc/rtc-core.h1
-rw-r--r--drivers/rtc/rtc-dev.c6
-rw-r--r--drivers/rtc/rtc-ds1305.c70
-rw-r--r--drivers/rtc/rtc-ds1307.c57
-rw-r--r--drivers/rtc/rtc-ds1390.c7
-rw-r--r--drivers/rtc/rtc-ds1511.c75
-rw-r--r--drivers/rtc/rtc-efi-platform.c1
-rw-r--r--drivers/rtc/rtc-jz4740.c6
-rw-r--r--drivers/rtc/rtc-m41t80.c84
-rw-r--r--drivers/rtc/rtc-m48t86.c58
-rw-r--r--drivers/rtc/rtc-mt7622.c422
-rw-r--r--drivers/rtc/rtc-omap.c57
-rw-r--r--drivers/rtc/rtc-pcf8523.c40
-rw-r--r--drivers/rtc/rtc-pcf85363.c220
-rw-r--r--drivers/rtc/rtc-pcf8563.c4
-rw-r--r--drivers/rtc/rtc-pl031.c48
-rw-r--r--drivers/rtc/rtc-rv3029c2.c18
-rw-r--r--drivers/rtc/rtc-rx8010.c7
-rw-r--r--drivers/rtc/rtc-sa1100.h1
-rw-r--r--drivers/rtc/rtc-sc27xx.c662
-rw-r--r--drivers/rtc/rtc-sysfs.c25
-rw-r--r--drivers/rtc/rtc-xgene.c47
-rw-r--r--drivers/rtc/systohc.c53
-rw-r--r--drivers/s390/block/Makefile1
-rw-r--r--drivers/s390/block/dasd.c20
-rw-r--r--drivers/s390/block/dasd_3990_erp.c1
-rw-r--r--drivers/s390/block/dasd_alias.c1
-rw-r--r--drivers/s390/block/dasd_diag.h1
-rw-r--r--drivers/s390/block/dasd_eckd.h1
-rw-r--r--drivers/s390/block/dasd_eer.c17
-rw-r--r--drivers/s390/block/dasd_erp.c1
-rw-r--r--drivers/s390/block/dasd_fba.h1
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_int.h17
-rw-r--r--drivers/s390/block/dasd_ioctl.c1
-rw-r--r--drivers/s390/block/dasd_proc.c1
-rw-r--r--drivers/s390/block/scm_blk.h9
-rw-r--r--drivers/s390/block/scm_drv.c1
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/con3215.c7
-rw-r--r--drivers/s390/char/con3270.c11
-rw-r--r--drivers/s390/char/ctrlchar.c1
-rw-r--r--drivers/s390/char/ctrlchar.h1
-rw-r--r--drivers/s390/char/defkeymap.c1
-rw-r--r--drivers/s390/char/diag_ftp.c1
-rw-r--r--drivers/s390/char/diag_ftp.h1
-rw-r--r--drivers/s390/char/hmcdrv_cache.c1
-rw-r--r--drivers/s390/char/hmcdrv_cache.h1
-rw-r--r--drivers/s390/char/hmcdrv_dev.c1
-rw-r--r--drivers/s390/char/hmcdrv_dev.h1
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c1
-rw-r--r--drivers/s390/char/hmcdrv_ftp.h1
-rw-r--r--drivers/s390/char/keyboard.c1
-rw-r--r--drivers/s390/char/keyboard.h1
-rw-r--r--drivers/s390/char/raw3270.h1
-rw-r--r--drivers/s390/char/sclp.c46
-rw-r--r--drivers/s390/char/sclp.h1
-rw-r--r--drivers/s390/char/sclp_cmd.c1
-rw-r--r--drivers/s390/char/sclp_con.c11
-rw-r--r--drivers/s390/char/sclp_config.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.h1
-rw-r--r--drivers/s390/char/sclp_ctl.c1
-rw-r--r--drivers/s390/char/sclp_diag.h1
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/s390/char/sclp_early_core.c1
-rw-r--r--drivers/s390/char/sclp_ftp.c1
-rw-r--r--drivers/s390/char/sclp_ftp.h1
-rw-r--r--drivers/s390/char/sclp_ocf.c1
-rw-r--r--drivers/s390/char/sclp_pci.c1
-rw-r--r--drivers/s390/char/sclp_quiesce.c1
-rw-r--r--drivers/s390/char/sclp_rw.c1
-rw-r--r--drivers/s390/char/sclp_rw.h1
-rw-r--r--drivers/s390/char/sclp_sdias.c1
-rw-r--r--drivers/s390/char/sclp_sdias.h1
-rw-r--r--drivers/s390/char/sclp_tty.c11
-rw-r--r--drivers/s390/char/sclp_tty.h1
-rw-r--r--drivers/s390/char/sclp_vt220.c7
-rw-r--r--drivers/s390/char/tape.h2
-rw-r--r--drivers/s390/char/tape_3590.h1
-rw-r--r--drivers/s390/char/tape_char.c1
-rw-r--r--drivers/s390/char/tape_class.c3
-rw-r--r--drivers/s390/char/tape_class.h1
-rw-r--r--drivers/s390/char/tape_core.c14
-rw-r--r--drivers/s390/char/tape_proc.c1
-rw-r--r--drivers/s390/char/tape_std.c19
-rw-r--r--drivers/s390/char/tape_std.h1
-rw-r--r--drivers/s390/char/tty3270.c8
-rw-r--r--drivers/s390/char/tty3270.h1
-rw-r--r--drivers/s390/char/vmcp.c1
-rw-r--r--drivers/s390/char/vmlogrdr.c3
-rw-r--r--drivers/s390/char/vmur.c11
-rw-r--r--drivers/s390/char/vmur.h5
-rw-r--r--drivers/s390/cio/Makefile1
-rw-r--r--drivers/s390/cio/airq.c1
-rw-r--r--drivers/s390/cio/blacklist.c1
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/ccwreq.c1
-rw-r--r--drivers/s390/cio/chp.h1
-rw-r--r--drivers/s390/cio/chsc.h1
-rw-r--r--drivers/s390/cio/chsc_sch.c6
-rw-r--r--drivers/s390/cio/chsc_sch.h1
-rw-r--r--drivers/s390/cio/cio.h1
-rw-r--r--drivers/s390/cio/cio_debug.h9
-rw-r--r--drivers/s390/cio/cmf.c278
-rw-r--r--drivers/s390/cio/crw.c1
-rw-r--r--drivers/s390/cio/css.h1
-rw-r--r--drivers/s390/cio/device.c8
-rw-r--r--drivers/s390/cio/device.h3
-rw-r--r--drivers/s390/cio/device_fsm.c10
-rw-r--r--drivers/s390/cio/device_id.c1
-rw-r--r--drivers/s390/cio/device_pgid.c1
-rw-r--r--drivers/s390/cio/device_status.c1
-rw-r--r--drivers/s390/cio/eadm_sch.c17
-rw-r--r--drivers/s390/cio/eadm_sch.h1
-rw-r--r--drivers/s390/cio/fcx.c1
-rw-r--r--drivers/s390/cio/idset.c1
-rw-r--r--drivers/s390/cio/idset.h1
-rw-r--r--drivers/s390/cio/io_sch.h1
-rw-r--r--drivers/s390/cio/ioasm.c1
-rw-r--r--drivers/s390/cio/ioasm.h1
-rw-r--r--drivers/s390/cio/itcw.c1
-rw-r--r--drivers/s390/cio/orb.h1
-rw-r--r--drivers/s390/cio/qdio.h3
-rw-r--r--drivers/s390/cio/qdio_debug.c1
-rw-r--r--drivers/s390/cio/qdio_debug.h19
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c3
-rw-r--r--drivers/s390/cio/qdio_thinint.c11
-rw-r--r--drivers/s390/cio/trace.c1
-rw-r--r--drivers/s390/cio/trace.h1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c25
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h1
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c1
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h1
-rw-r--r--drivers/s390/crypto/Makefile1
-rw-r--r--drivers/s390/crypto/ap_asm.h44
-rw-r--r--drivers/s390/crypto/ap_bus.c84
-rw-r--r--drivers/s390/crypto/ap_bus.h6
-rw-r--r--drivers/s390/crypto/ap_card.c13
-rw-r--r--drivers/s390/crypto/ap_debug.h1
-rw-r--r--drivers/s390/crypto/ap_queue.c7
-rw-r--r--drivers/s390/crypto/pkey_api.c3
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c48
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.h1
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c6
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/ctcm_dbug.c1
-rw-r--r--drivers/s390/net/ctcm_dbug.h1
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_main.c1
-rw-r--r--drivers/s390/net/ctcm_main.h1
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/ctcm_mpc.h1
-rw-r--r--drivers/s390/net/ctcm_sysfs.c1
-rw-r--r--drivers/s390/net/fsm.c15
-rw-r--r--drivers/s390/net/fsm.h1
-rw-r--r--drivers/s390/net/lcs.c18
-rw-r--r--drivers/s390/net/lcs.h2
-rw-r--r--drivers/s390/net/qeth_core.h39
-rw-r--r--drivers/s390/net/qeth_core_main.c211
-rw-r--r--drivers/s390/net/qeth_core_mpc.c5
-rw-r--r--drivers/s390/net/qeth_core_mpc.h74
-rw-r--r--drivers/s390/net/qeth_core_sys.c4
-rw-r--r--drivers/s390/net/qeth_l2.h7
-rw-r--r--drivers/s390/net/qeth_l2_main.c557
-rw-r--r--drivers/s390/net/qeth_l2_sys.c216
-rw-r--r--drivers/s390/net/qeth_l3.h1
-rw-r--r--drivers/s390/net/qeth_l3_main.c37
-rw-r--r--drivers/s390/net/qeth_l3_sys.c46
-rw-r--r--drivers/s390/net/smsgiucv.h1
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c1
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c1
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h1
-rw-r--r--drivers/s390/scsi/zfcp_erp.c16
-rw-r--r--drivers/s390/scsi/zfcp_ext.h3
-rw-r--r--drivers/s390/scsi/zfcp_fc.c1
-rw-r--r--drivers/s390/scsi/zfcp_fc.h1
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c11
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h1
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c1
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h1
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c1
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c1
-rw-r--r--drivers/s390/scsi/zfcp_unit.c1
-rw-r--r--drivers/s390/virtio/Makefile6
-rw-r--r--drivers/s390/virtio/kvm_virtio.c515
-rw-r--r--drivers/sbus/char/Makefile1
-rw-r--r--drivers/sbus/char/bbc_envctrl.c1
-rw-r--r--drivers/sbus/char/bbc_i2c.h1
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/sbus/char/max1617.h1
-rw-r--r--drivers/scsi/.gitignore1
-rw-r--r--drivers/scsi/53c700.h1
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/Makefile9
-rw-r--r--drivers/scsi/NCR5380.c19
-rw-r--r--drivers/scsi/NCR5380.h1
-rw-r--r--drivers/scsi/NCR_D700.h1
-rw-r--r--drivers/scsi/NCR_Q720.h1
-rw-r--r--drivers/scsi/a2091.h1
-rw-r--r--drivers/scsi/a3000.h1
-rw-r--r--drivers/scsi/aacraid/commsup.c26
-rw-r--r--drivers/scsi/aha152x.h1
-rw-r--r--drivers/scsi/aha1542.h1
-rw-r--r--drivers/scsi/aha1740.h1
-rw-r--r--drivers/scsi/aic7xxx/Makefile1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h5
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c29
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h7
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.h5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c11
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c14
-rw-r--r--drivers/scsi/arm/Makefile1
-rw-r--r--drivers/scsi/arm/fas216.c8
-rw-r--r--drivers/scsi/atp870u.h1
-rw-r--r--drivers/scsi/be2iscsi/be.h19
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c55
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h48
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c54
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c128
-rw-r--r--drivers/scsi/be2iscsi/be_main.h51
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c278
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h10
-rw-r--r--drivers/scsi/bfa/Makefile1
-rw-r--r--drivers/scsi/bfa/bfad.c8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c7
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c5
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c11
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c40
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c16
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c14
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c15
-rw-r--r--drivers/scsi/constants.c1
-rw-r--r--drivers/scsi/csiostor/Makefile1
-rw-r--r--drivers/scsi/csiostor/csio_hw.c15
-rw-r--r--drivers/scsi/csiostor/csio_hw.h3
-rw-r--r--drivers/scsi/csiostor/csio_init.c3
-rw-r--r--drivers/scsi/csiostor/csio_mb.c12
-rw-r--r--drivers/scsi/csiostor/csio_mb.h3
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c52
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
-rw-r--r--drivers/scsi/cxlflash/main.c30
-rw-r--r--drivers/scsi/cxlflash/main.h3
-rw-r--r--drivers/scsi/cxlflash/sislite.h3
-rw-r--r--drivers/scsi/cxlflash/superpipe.c6
-rw-r--r--drivers/scsi/cxlflash/vlun.c6
-rw-r--r--drivers/scsi/dc395x.c13
-rw-r--r--drivers/scsi/dc395x.h1
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c10
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/eata_generic.h1
-rw-r--r--drivers/scsi/eata_pio.h1
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c10
-rw-r--r--drivers/scsi/esp_scsi.h1
-rw-r--r--drivers/scsi/fcoe/fcoe.c83
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c8
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c26
-rw-r--r--drivers/scsi/fcoe/libfcoe.h1
-rw-r--r--drivers/scsi/fnic/Makefile1
-rw-r--r--drivers/scsi/fnic/fnic_main.c14
-rw-r--r--drivers/scsi/gdth.c6
-rw-r--r--drivers/scsi/gdth.h1
-rw-r--r--drivers/scsi/gdth_ioctl.h1
-rw-r--r--drivers/scsi/gdth_proc.c1
-rw-r--r--drivers/scsi/gdth_proc.h1
-rw-r--r--drivers/scsi/gvp11.h1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c56
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c271
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c259
-rw-r--r--drivers/scsi/hpsa.c356
-rw-r--r--drivers/scsi/hpsa_cmd.h3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c14
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c7
-rw-r--r--drivers/scsi/ibmvscsi_tgt/libsrp.h1
-rw-r--r--drivers/scsi/imm.h1
-rw-r--r--drivers/scsi/ipr.c30
-rw-r--r--drivers/scsi/isci/Makefile1
-rw-r--r--drivers/scsi/isci/host.c12
-rw-r--r--drivers/scsi/isci/isci.h6
-rw-r--r--drivers/scsi/isci/phy.c4
-rw-r--r--drivers/scsi/isci/port.c4
-rw-r--r--drivers/scsi/isci/port_config.c8
-rw-r--r--drivers/scsi/libfc/Makefile1
-rw-r--r--drivers/scsi/libfc/fc_fcp.c21
-rw-r--r--drivers/scsi/libfc/fc_lport.c3
-rw-r--r--drivers/scsi/libiscsi.c16
-rw-r--r--drivers/scsi/libsas/sas_dump.c10
-rw-r--r--drivers/scsi/libsas/sas_dump.h1
-rw-r--r--drivers/scsi/libsas/sas_event.c26
-rw-r--r--drivers/scsi/libsas/sas_expander.c6
-rw-r--r--drivers/scsi/libsas/sas_init.c15
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h16
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c174
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c148
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c240
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c9
-rw-r--r--drivers/scsi/mac53c94.h1
-rw-r--r--drivers/scsi/megaraid.h1
-rw-r--r--drivers/scsi/megaraid/Makefile1
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c26
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h64
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1060
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c691
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h29
-rw-r--r--drivers/scsi/mesh.h1
-rw-r--r--drivers/scsi/mpt3sas/Makefile1
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h44
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h565
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h12
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h283
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_pci.h111
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h1
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h1
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h15
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c660
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h177
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c100
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c164
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2219
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c2
-rw-r--r--drivers/scsi/mvme147.c1
-rw-r--r--drivers/scsi/mvme147.h1
-rw-r--r--drivers/scsi/mvsas/mv_init.c3
-rw-r--r--drivers/scsi/mvsas/mv_sas.c11
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/ncr53c8xx.c8
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/osst.h1
-rw-r--r--drivers/scsi/osst_detect.h1
-rw-r--r--drivers/scsi/osst_options.h1
-rw-r--r--drivers/scsi/pcmcia/Makefile1
-rw-r--r--drivers/scsi/pm8001/Makefile1
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c54
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c13
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c131
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h10
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c62
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h102
-rw-r--r--drivers/scsi/pmcraid.c33
-rw-r--r--drivers/scsi/ppa.h1
-rw-r--r--drivers/scsi/qedi/Kconfig1
-rw-r--r--drivers/scsi/qedi/qedi_fw.c17
-rw-r--r--drivers/scsi/qla1280.c14
-rw-r--r--drivers/scsi/qla1280.h1
-rw-r--r--drivers/scsi/qla2xxx/Makefile1
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h29
-rw-r--r--drivers/scsi/qla2xxx/qla_devtbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c144
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c195
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c132
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/qlogicfas408.h1
-rw-r--r--drivers/scsi/qlogicpti.h1
-rw-r--r--drivers/scsi/scsi.h1
-rw-r--r--drivers/scsi/scsi_common.c1
-rw-r--r--drivers/scsi/scsi_debug.c31
-rw-r--r--drivers/scsi/scsi_debugfs.c1
-rw-r--r--drivers/scsi/scsi_devinfo.c91
-rw-r--r--drivers/scsi/scsi_dh.c36
-rw-r--r--drivers/scsi/scsi_error.c13
-rw-r--r--drivers/scsi/scsi_lib.c111
-rw-r--r--drivers/scsi/scsi_lib_dma.c1
-rw-r--r--drivers/scsi/scsi_logging.h9
-rw-r--r--drivers/scsi/scsi_priv.h20
-rw-r--r--drivers/scsi/scsi_proc.c1
-rw-r--r--drivers/scsi/scsi_sas_internal.h1
-rw-r--r--drivers/scsi/scsi_scan.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c45
-rw-r--r--drivers/scsi/scsi_transport_api.h1
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/scsi_transport_srp.c5
-rw-r--r--drivers/scsi/scsicam.c1
-rw-r--r--drivers/scsi/sd.c32
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sd_zbc.c169
-rw-r--r--drivers/scsi/sense_codes.h1
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c27
-rw-r--r--drivers/scsi/snic/Makefile1
-rw-r--r--drivers/scsi/sr.h1
-rw-r--r--drivers/scsi/sr_ioctl.c1
-rw-r--r--drivers/scsi/sr_vendor.c1
-rw-r--r--drivers/scsi/st.c6
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/st_options.h1
-rw-r--r--drivers/scsi/storvsc_drv.c52
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c8
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.c10
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c43
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h7
-rw-r--r--drivers/scsi/ufs/ufshcd.c40
-rw-r--r--drivers/scsi/ufs/ufshcd.h16
-rw-r--r--drivers/scsi/ufs/ufshci.h70
-rw-r--r--drivers/scsi/wd719x.h1
-rw-r--r--drivers/sh/Makefile1
-rw-r--r--drivers/sh/intc/internals.h1
-rw-r--r--drivers/sh/maple/maple.c5
-rw-r--r--drivers/soc/Makefile3
-rw-r--r--drivers/soc/amlogic/Kconfig21
-rw-r--r--drivers/soc/amlogic/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c243
-rw-r--r--drivers/soc/amlogic/meson-mx-socinfo.c175
-rw-r--r--drivers/soc/atmel/soc.c8
-rw-r--r--drivers/soc/atmel/soc.h4
-rw-r--r--drivers/soc/bcm/Kconfig2
-rw-r--r--drivers/soc/bcm/brcmstb/Kconfig10
-rw-r--r--drivers/soc/bcm/brcmstb/Makefile1
-rw-r--r--drivers/soc/bcm/brcmstb/common.c12
-rw-r--r--drivers/soc/bcm/brcmstb/pm/Makefile3
-rw-r--r--drivers/soc/bcm/brcmstb/pm/aon_defs.h113
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c822
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-mips.c461
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h89
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s2-arm.S76
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s2-mips.S200
-rw-r--r--drivers/soc/bcm/brcmstb/pm/s3-mips.S146
-rw-r--r--drivers/soc/dove/pmu.c1
-rw-r--r--drivers/soc/fsl/guts.c1
-rw-r--r--drivers/soc/fsl/qbman/Kconfig2
-rw-r--r--drivers/soc/fsl/qbman/Makefile3
-rw-r--r--drivers/soc/fsl/qbman/bman.c42
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c15
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c23
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h8
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c78
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h25
-rw-r--r--drivers/soc/fsl/qbman/qman.c83
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c95
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c23
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h11
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h2
-rw-r--r--drivers/soc/fsl/qe/Makefile1
-rw-r--r--drivers/soc/mediatek/Kconfig8
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c511
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c14
-rw-r--r--drivers/soc/qcom/Kconfig11
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c269
-rw-r--r--drivers/soc/qcom/smem.c335
-rw-r--r--drivers/soc/renesas/Kconfig8
-rw-r--r--drivers/soc/renesas/Makefile2
-rw-r--r--drivers/soc/renesas/r8a77970-sysc.c39
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/rcar-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/renesas/renesas-soc.c8
-rw-r--r--drivers/soc/rockchip/pm_domains.c14
-rw-r--r--drivers/soc/samsung/exynos-pmu.c9
-rw-r--r--drivers/soc/samsung/exynos-pmu.h2
-rw-r--r--drivers/soc/samsung/exynos4-pmu.c13
-rw-r--r--drivers/soc/tegra/Makefile1
-rw-r--r--drivers/soc/tegra/fuse/Makefile1
-rw-r--r--drivers/soc/tegra/powergate-bpmp.c15
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-armada-3700.c17
-rw-r--r--drivers/spi/spi-axi-spi-engine.c4
-rw-r--r--drivers/spi/spi-bcm53xx.h1
-rw-r--r--drivers/spi/spi-bitbang-txrx.h1
-rw-r--r--drivers/spi/spi-cavium.h1
-rw-r--r--drivers/spi/spi-dw.h1
-rw-r--r--drivers/spi/spi-fsl-dspi.c66
-rw-r--r--drivers/spi/spi-imx.c256
-rw-r--r--drivers/spi/spi-mxs.c120
-rw-r--r--drivers/spi/spi-orion.c1
-rw-r--r--drivers/spi/spi-rspi.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c3
-rw-r--r--drivers/spi/spi-sh-msiof.c12
-rw-r--r--drivers/spi/spi-sprd-adi.c418
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/ssb/Makefile1
-rw-r--r--drivers/ssb/ssb_private.h1
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/TODO1
-rw-r--r--drivers/staging/android/ion/Makefile1
-rw-r--r--drivers/staging/android/ion/ion-ioctl.c11
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/android/ion/ion.h2
-rw-r--r--drivers/staging/board/board.h1
-rw-r--r--drivers/staging/board/kzm9d.c1
-rw-r--r--drivers/staging/ccree/cc_hal.h33
-rw-r--r--drivers/staging/ccree/cc_lli_defs.h2
-rw-r--r--drivers/staging/ccree/cc_regs.h42
-rw-r--r--drivers/staging/ccree/dx_reg_base_host.h25
-rw-r--r--drivers/staging/ccree/ssi_aead.c258
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c438
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.h5
-rw-r--r--drivers/staging/ccree/ssi_cipher.c189
-rw-r--r--drivers/staging/ccree/ssi_cipher.h13
-rw-r--r--drivers/staging/ccree/ssi_driver.c381
-rw-r--r--drivers/staging/ccree/ssi_driver.h55
-rw-r--r--drivers/staging/ccree/ssi_fips.c26
-rw-r--r--drivers/staging/ccree/ssi_fips.h4
-rw-r--r--drivers/staging/ccree/ssi_hash.c377
-rw-r--r--drivers/staging/ccree/ssi_ivgen.c18
-rw-r--r--drivers/staging/ccree/ssi_pm.c35
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c195
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.c33
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c282
-rw-r--r--drivers/staging/comedi/Kconfig4
-rw-r--r--drivers/staging/comedi/Makefile1
-rw-r--r--drivers/staging/comedi/comedi_internal.h1
-rw-r--r--drivers/staging/comedi/drivers/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/addi_tcw.h1
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.h1
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c26
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h1
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c18
-rw-r--r--drivers/staging/comedi/drivers/das16.c17
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c2
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c10
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.h1
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.h1
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_regs.h1
-rw-r--r--drivers/staging/comedi/drivers/s526.c5
-rw-r--r--drivers/staging/comedi/drivers/z8536.h1
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c8
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h16
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c11
-rw-r--r--drivers/staging/dgnc/dgnc_utils.c1
-rw-r--r--drivers/staging/dgnc/dgnc_utils.h1
-rw-r--r--drivers/staging/fbtft/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c10
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c8
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c2
-rw-r--r--drivers/staging/fbtft/fb_uc1701.c2
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c25
-rw-r--r--drivers/staging/fbtft/fbtft-io.c1
-rw-r--r--drivers/staging/fbtft/fbtft-sysfs.c1
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c281
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h54
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c14
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h5
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.c32
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.h5
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/dpio-service.c4
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c2
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-io.h6
-rw-r--r--drivers/staging/fwserial/fwserial.c16
-rw-r--r--drivers/staging/fwserial/fwserial.h1
-rw-r--r--drivers/staging/gdm724x/Makefile1
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c19
-rw-r--r--drivers/staging/greybus/Documentation/firmware/authenticate.c1
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c1
-rw-r--r--drivers/staging/greybus/Makefile1
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c3
-rw-r--r--drivers/staging/greybus/arche-platform.c3
-rw-r--r--drivers/staging/greybus/arche_platform.h3
-rw-r--r--drivers/staging/greybus/arpc.h1
-rw-r--r--drivers/staging/greybus/audio_apbridgea.c3
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h1
-rw-r--r--drivers/staging/greybus/audio_codec.c3
-rw-r--r--drivers/staging/greybus/audio_codec.h3
-rw-r--r--drivers/staging/greybus/audio_gb.c3
-rw-r--r--drivers/staging/greybus/audio_manager.c5
-rw-r--r--drivers/staging/greybus/audio_manager.h3
-rw-r--r--drivers/staging/greybus/audio_manager_module.c3
-rw-r--r--drivers/staging/greybus/audio_manager_private.h3
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c3
-rw-r--r--drivers/staging/greybus/audio_module.c3
-rw-r--r--drivers/staging/greybus/audio_topology.c3
-rw-r--r--drivers/staging/greybus/authentication.c3
-rw-r--r--drivers/staging/greybus/bootrom.c3
-rw-r--r--drivers/staging/greybus/bundle.c3
-rw-r--r--drivers/staging/greybus/bundle.h3
-rw-r--r--drivers/staging/greybus/camera.c3
-rw-r--r--drivers/staging/greybus/connection.c3
-rw-r--r--drivers/staging/greybus/connection.h3
-rw-r--r--drivers/staging/greybus/control.c3
-rw-r--r--drivers/staging/greybus/control.h3
-rw-r--r--drivers/staging/greybus/core.c3
-rw-r--r--drivers/staging/greybus/debugfs.c3
-rw-r--r--drivers/staging/greybus/es2.c4
-rw-r--r--drivers/staging/greybus/firmware.h3
-rw-r--r--drivers/staging/greybus/fw-core.c3
-rw-r--r--drivers/staging/greybus/fw-download.c3
-rw-r--r--drivers/staging/greybus/fw-management.c3
-rw-r--r--drivers/staging/greybus/gb-camera.h3
-rw-r--r--drivers/staging/greybus/gbphy.c3
-rw-r--r--drivers/staging/greybus/gbphy.h3
-rw-r--r--drivers/staging/greybus/gpio.c3
-rw-r--r--drivers/staging/greybus/greybus.h3
-rw-r--r--drivers/staging/greybus/greybus_authentication.h1
-rw-r--r--drivers/staging/greybus/greybus_firmware.h1
-rw-r--r--drivers/staging/greybus/greybus_id.h1
-rw-r--r--drivers/staging/greybus/greybus_manifest.h1
-rw-r--r--drivers/staging/greybus/greybus_protocols.h1
-rw-r--r--drivers/staging/greybus/greybus_trace.h3
-rw-r--r--drivers/staging/greybus/hd.c3
-rw-r--r--drivers/staging/greybus/hd.h3
-rw-r--r--drivers/staging/greybus/hid.c3
-rw-r--r--drivers/staging/greybus/i2c.c3
-rw-r--r--drivers/staging/greybus/interface.c3
-rw-r--r--drivers/staging/greybus/interface.h3
-rw-r--r--drivers/staging/greybus/light.c11
-rw-r--r--drivers/staging/greybus/log.c3
-rw-r--r--drivers/staging/greybus/loopback.c242
-rw-r--r--drivers/staging/greybus/manifest.c3
-rw-r--r--drivers/staging/greybus/manifest.h3
-rw-r--r--drivers/staging/greybus/module.c3
-rw-r--r--drivers/staging/greybus/module.h3
-rw-r--r--drivers/staging/greybus/operation.c10
-rw-r--r--drivers/staging/greybus/operation.h16
-rw-r--r--drivers/staging/greybus/power_supply.c3
-rw-r--r--drivers/staging/greybus/pwm.c3
-rw-r--r--drivers/staging/greybus/raw.c3
-rw-r--r--drivers/staging/greybus/sdio.c3
-rw-r--r--drivers/staging/greybus/spi.c3
-rw-r--r--drivers/staging/greybus/spilib.c11
-rw-r--r--drivers/staging/greybus/spilib.h1
-rw-r--r--drivers/staging/greybus/svc.c3
-rw-r--r--drivers/staging/greybus/svc.h3
-rw-r--r--drivers/staging/greybus/svc_watchdog.c3
-rw-r--r--drivers/staging/greybus/tools/Makefile1
-rwxr-xr-xdrivers/staging/greybus/tools/lbtest1
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c1
-rw-r--r--drivers/staging/greybus/uart.c3
-rw-r--r--drivers/staging/greybus/usb.c4
-rw-r--r--drivers/staging/greybus/vibrator.c3
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c2
-rw-r--r--drivers/staging/iio/Documentation/device.txt3
-rw-r--r--drivers/staging/iio/Documentation/trigger.txt4
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16201.c1
-rw-r--r--drivers/staging/iio/accel/adis16203.c1
-rw-r--r--drivers/staging/iio/accel/adis16209.c1
-rw-r--r--drivers/staging/iio/accel/adis16240.c1
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c11
-rw-r--r--drivers/staging/iio/adc/ad7280a.c1
-rw-r--r--drivers/staging/iio/adc/ad7606.c4
-rw-r--r--drivers/staging/iio/adc/ad7780.c1
-rw-r--r--drivers/staging/iio/adc/ad7816.c1
-rw-r--r--drivers/staging/iio/addac/adt7316.c2
-rw-r--r--drivers/staging/iio/cdc/ad7150.c1
-rw-r--r--drivers/staging/iio/cdc/ad7152.c1
-rw-r--r--drivers/staging/iio/cdc/ad7746.c1
-rw-r--r--drivers/staging/iio/frequency/ad9832.c1
-rw-r--r--drivers/staging/iio/frequency/ad9834.c2
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c1
-rw-r--r--drivers/staging/iio/light/tsl2x7x.c665
-rw-r--r--drivers/staging/iio/light/tsl2x7x.h13
-rw-r--r--drivers/staging/iio/meter/Makefile1
-rw-r--r--drivers/staging/iio/meter/ade7753.c27
-rw-r--r--drivers/staging/iio/meter/ade7754.c1
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c1
-rw-r--r--drivers/staging/iio/meter/ade7758_trigger.c1
-rw-r--r--drivers/staging/iio/meter/ade7759.c28
-rw-r--r--drivers/staging/iio/meter/ade7854.c1
-rw-r--r--drivers/staging/iio/meter/ade7854.h1
-rw-r--r--drivers/staging/iio/meter/meter.h1
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c1
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c1
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c1
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.h1
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.c3
-rw-r--r--drivers/staging/irda/drivers/au1k_ir.c40
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.c12
-rw-r--r--drivers/staging/irda/drivers/esi-sir.c22
-rw-r--r--drivers/staging/irda/drivers/irda-usb.c24
-rw-r--r--drivers/staging/irda/drivers/irda-usb.h1
-rw-r--r--drivers/staging/irda/drivers/mcs7780.c9
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.c18
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_event.h6
-rw-r--r--drivers/staging/irda/include/net/irda/qos.h20
-rw-r--r--drivers/staging/irda/include/net/irda/timer.h11
-rw-r--r--drivers/staging/irda/net/af_irda.c7
-rw-r--r--drivers/staging/irda/net/discovery.c4
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty.c2
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_attach.c8
-rw-r--r--drivers/staging/irda/net/irda_device.c36
-rw-r--r--drivers/staging/irda/net/iriap.c10
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client.c6
-rw-r--r--drivers/staging/irda/net/irlan/irlan_common.c4
-rw-r--r--drivers/staging/irda/net/irlap.c16
-rw-r--r--drivers/staging/irda/net/irlap_event.c6
-rw-r--r--drivers/staging/irda/net/irlmp.c8
-rw-r--r--drivers/staging/irda/net/irlmp_event.c10
-rw-r--r--drivers/staging/irda/net/irqueue.c3
-rw-r--r--drivers/staging/irda/net/irttp.c11
-rw-r--r--drivers/staging/irda/net/timer.c54
-rw-r--r--drivers/staging/ks7010/eap_packet.h1
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_time.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h1
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h1
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h1
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h29
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h1
-rw-r--r--drivers/staging/lustre/lnet/Kconfig2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c21
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h10
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c158
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c100
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/Makefile1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h1
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile1
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h1
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h1
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c14
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h1
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h1
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c1
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.h1
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c1
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c4
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c1
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c1
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c4
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c1
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h1
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h1
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h5
-rw-r--r--drivers/staging/lustre/lustre/include/lu_ref.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_acl.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h7
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_errno.h51
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ha.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_intent.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_kernelcomm.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_obdo.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_patchless_compat.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h1
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c215
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c34
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c7
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c1
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c7
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c44
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile1
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c21
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h19
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c44
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.h1
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c1
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/Makefile1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c1
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c3
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c1
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linkea.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c1
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c1
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c1
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/errno.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c1
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/Kconfig11
-rw-r--r--drivers/staging/media/atomisp/TODO24
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig100
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile20
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.c1255
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.h198
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc0310.c (renamed from drivers/staging/media/atomisp/i2c/gc0310.c)53
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c (renamed from drivers/staging/media/atomisp/i2c/gc2235.c)54
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c (renamed from drivers/staging/media/atomisp/i2c/libmsrlisthelper.c)4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c (renamed from drivers/staging/media/atomisp/i2c/lm3554.c)47
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c (renamed from drivers/staging/media/atomisp/i2c/mt9m114.c)51
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c (renamed from drivers/staging/media/atomisp/i2c/ov2680.c)51
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c (renamed from drivers/staging/media/atomisp/i2c/ov2722.c)54
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h7
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Kconfig9
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Makefile13
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.c216
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.h49
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/common.h65
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.c209
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.h38
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.c223
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.h63
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.c233
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.h64
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.c198
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.h58
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.c2480
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.h737
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx132.h566
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx134.h2464
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx135.h3374
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx175.h1959
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx208.h550
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx219.h227
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx227.h726
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp.c39
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c80
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c89
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/otp_imx.c191
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/vcm.c45
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.h9
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h14
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile3
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ad5823.h4
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c (renamed from drivers/staging/media/atomisp/i2c/ov5693/ov5693.c)59
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h11
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.c65
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.h5
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858_btns.h5
-rw-r--r--drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h37
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h4
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h3
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h25
-rw-r--r--drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h4
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3554.h5
-rw-r--r--drivers/staging/media/atomisp/include/media/lm3642.h153
-rw-r--r--drivers/staging/media/atomisp/pci/Kconfig17
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/Makefile1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c38
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c33
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c20
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c67
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h39
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c80
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h64
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h60
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h5
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h18
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h25
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h42
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c12
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c28
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c133
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c19
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c54
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c48
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c4
-rw-r--r--drivers/staging/media/atomisp/platform/Makefile1
-rw-r--r--drivers/staging/media/atomisp/platform/clock/Makefile6
-rw-r--r--drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c40
-rw-r--r--drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h27
-rw-r--r--drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c247
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/Makefile1
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c141
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c297
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c60
-rw-r--r--drivers/staging/media/imx/Makefile1
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c5
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c7
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c7
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c8
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c231
-rw-r--r--drivers/staging/most/Makefile1
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c7
-rw-r--r--drivers/staging/netlogic/xlr_net.c82
-rw-r--r--drivers/staging/nvec/Makefile1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h1
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt24
-rw-r--r--drivers/staging/pi433/pi433_if.c153
-rw-r--r--drivers/staging/pi433/rf69.c236
-rw-r--r--drivers/staging/rtl8188eu/Makefile1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c7
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c89
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c35
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c144
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c64
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c1
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c15
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c1
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h33
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h3
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h29
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h14
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h3
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h6
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h114
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h46
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h2
-rw-r--r--drivers/staging/rtl8188eu/include/phy.h1
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h44
-rw-r--r--drivers/staging/rtl8188eu/include/rf.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h29
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h157
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h222
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h9
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h40
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h7
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h44
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h33
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h19
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h22
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h3
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h82
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h6
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h27
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h12
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h12
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h25
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c34
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c1
-rw-r--r--drivers/staging/rtl8192e/Makefile1
-rw-r--r--drivers/staging/rtl8192e/dot11d.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c25
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c15
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c56
-rw-r--r--drivers/staging/rtl8192e/rtllib.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c23
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c3
-rw-r--r--drivers/staging/rtl8192u/Makefile1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/Makefile1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.h1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h118
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c43
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c15
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c17
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c47
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c20
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c10
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h3
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h2
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.h1
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.h1
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware_img.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware_img.h1
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c1
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.h1
-rw-r--r--drivers/staging/rtl8192u/r819xU_phyreg.h1
-rw-r--r--drivers/staging/rtl8712/Makefile1
-rw-r--r--drivers/staging/rtl8712/hal_init.c4
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c48
-rw-r--r--drivers/staging/rtl8712/os_intfs.c8
-rw-r--r--drivers/staging/rtl8712/recv_linux.c9
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_event.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c9
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c3
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_security.h2
-rw-r--r--drivers/staging/rtl8712/usb_intf.c3
-rw-r--r--drivers/staging/rtl8723bs/Makefile1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c144
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c21
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c140
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c34
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c33
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_rf.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/Hal8723BReg.h1
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c9
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c7
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c11
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c10
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c69
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_CfoTracking.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c10
-rw-r--r--drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h1
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h3
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h6
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service_linux.h13
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h10
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_pwrctrl.h3
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_wifi_regd.h1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c16
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c45
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c62
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c243
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c24
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c5
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/wifi_regd.c1
-rw-r--r--drivers/staging/rtlwifi/base.c58
-rw-r--r--drivers/staging/rtlwifi/base.h4
-rw-r--r--drivers/staging/rtlwifi/core.c72
-rw-r--r--drivers/staging/rtlwifi/core.h4
-rw-r--r--drivers/staging/rtlwifi/debug.c36
-rw-r--r--drivers/staging/rtlwifi/efuse.c5
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c2
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c50
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c2
-rw-r--r--drivers/staging/rtlwifi/pci.c2
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.c2
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c8
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.c4
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.c12
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.c24
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.c23
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.h11
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c10
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c4
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c13
-rw-r--r--drivers/staging/rtlwifi/ps.c2
-rw-r--r--drivers/staging/rtlwifi/rc.c16
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c9
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.c23
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.c24
-rw-r--r--drivers/staging/rts5208/sd.c6
-rw-r--r--drivers/staging/rts5208/trace.c1
-rw-r--r--drivers/staging/skein/Makefile1
-rw-r--r--drivers/staging/skein/skein_iv.h1
-rw-r--r--drivers/staging/skein/threefish_api.c1
-rw-r--r--drivers/staging/skein/threefish_api.h1
-rw-r--r--drivers/staging/skein/threefish_block.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h3
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h1
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c5
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.h5
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.h1
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.h1
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h8
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h1
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c4
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h5
-rw-r--r--drivers/staging/sm750fb/sm750.h1
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c1
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h1
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c5
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.h1
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c1
-rw-r--r--drivers/staging/speakup/Makefile1
-rw-r--r--drivers/staging/speakup/buffers.c4
-rw-r--r--drivers/staging/speakup/devsynth.c1
-rw-r--r--drivers/staging/speakup/i18n.c1
-rw-r--r--drivers/staging/speakup/i18n.h1
-rw-r--r--drivers/staging/speakup/main.c10
-rw-r--r--drivers/staging/speakup/selection.c1
-rw-r--r--drivers/staging/speakup/serialio.h1
-rw-r--r--drivers/staging/speakup/speakup.h1
-rw-r--r--drivers/staging/speakup/speakup_acnt.h1
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c2
-rw-r--r--drivers/staging/speakup/speakup_apollo.c2
-rw-r--r--drivers/staging/speakup/speakup_audptr.c2
-rw-r--r--drivers/staging/speakup/speakup_bns.c2
-rw-r--r--drivers/staging/speakup/speakup_decext.c2
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c2
-rw-r--r--drivers/staging/speakup/speakup_dtlk.h1
-rw-r--r--drivers/staging/speakup/speakup_dummy.c2
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c2
-rw-r--r--drivers/staging/speakup/speakup_spkout.c2
-rw-r--r--drivers/staging/speakup/speakup_txprt.c2
-rw-r--r--drivers/staging/speakup/speakupmap.h1
-rw-r--r--drivers/staging/speakup/spk_ttyio.c10
-rw-r--r--drivers/staging/speakup/spk_types.h1
-rw-r--r--drivers/staging/speakup/synth.c4
-rw-r--r--drivers/staging/typec/Kconfig10
-rw-r--r--drivers/staging/typec/Makefile2
-rw-r--r--drivers/staging/typec/TODO10
-rw-r--r--drivers/staging/typec/fusb302/TODO10
-rw-r--r--drivers/staging/typec/pd.h293
-rw-r--r--drivers/staging/typec/pd_bdo.h31
-rw-r--r--drivers/staging/typec/pd_vdo.h251
-rw-r--r--drivers/staging/typec/tcpci.c5
-rw-r--r--drivers/staging/typec/tcpm.h161
-rw-r--r--drivers/staging/unisys/MAINTAINERS2
-rw-r--r--drivers/staging/unisys/include/iochannel.h9
-rw-r--r--drivers/staging/unisys/include/visorbus.h159
-rw-r--r--drivers/staging/unisys/include/visorchannel.h (renamed from drivers/staging/unisys/include/channel.h)12
-rw-r--r--drivers/staging/unisys/visorbus/Makefile1
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h3
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h12
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c277
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h8
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c81
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c298
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c12
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c6
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c18
-rw-r--r--drivers/staging/vboxvideo/Makefile1
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h8
-rw-r--r--drivers/staging/vboxvideo/vbox_irq.c4
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c26
-rw-r--r--drivers/staging/vc04_services/Kconfig12
-rw-r--r--drivers/staging/vc04_services/Makefile1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c11
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c11
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Makefile1
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h24
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h2
-rw-r--r--drivers/staging/vc04_services/interface/vchi/connections/connection.h3
-rw-r--r--drivers/staging/vc04_services/interface/vchi/message_drivers/message.h5
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h12
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c77
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c177
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c13
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_genversion1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c47
-rw-r--r--drivers/staging/vme/devices/Kconfig13
-rw-r--r--drivers/staging/vme/devices/Makefile3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h244
-rw-r--r--drivers/staging/vme/devices/vme_pio2_cntr.c71
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c493
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c220
-rw-r--r--drivers/staging/vme/devices/vme_user.h1
-rw-r--r--drivers/staging/vt6655/Makefile1
-rw-r--r--drivers/staging/vt6655/device_main.c3
-rw-r--r--drivers/staging/vt6655/key.c4
-rw-r--r--drivers/staging/vt6656/Makefile1
-rw-r--r--drivers/staging/vt6656/card.c2
-rw-r--r--drivers/staging/wilc1000/Makefile1
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c1
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h1
-rw-r--r--drivers/staging/wilc1000/host_interface.c46
-rw-r--r--drivers/staging/wilc1000/host_interface.h6
-rw-r--r--drivers/staging/wilc1000/linux_mon.c1
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c14
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c2
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c1
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h1
-rw-r--r--drivers/staging/wlan-ng/Makefile1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c1
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c33
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c78
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c6
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c1
-rw-r--r--drivers/staging/xgifb/XGI_main.h1
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c4
-rw-r--r--drivers/staging/xgifb/XGIfb.h1
-rw-r--r--drivers/staging/xgifb/vb_def.h1
-rw-r--r--drivers/staging/xgifb/vb_init.c1
-rw-r--r--drivers/staging/xgifb/vb_init.h1
-rw-r--r--drivers/staging/xgifb/vb_setmode.c8
-rw-r--r--drivers/staging/xgifb/vb_setmode.h1
-rw-r--r--drivers/staging/xgifb/vb_struct.h1
-rw-r--r--drivers/staging/xgifb/vb_table.h1
-rw-r--r--drivers/staging/xgifb/vb_util.h1
-rw-r--r--drivers/staging/xgifb/vgatypes.h1
-rw-r--r--drivers/target/Makefile1
-rw-r--r--drivers/target/iscsi/Makefile1
-rw-r--r--drivers/target/iscsi/cxgbit/Makefile1
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c45
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c82
-rw-r--r--drivers/target/iscsi/iscsi_target.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c25
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c33
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h3
-rw-r--r--drivers/target/loopback/tcm_loop.h1
-rw-r--r--drivers/target/sbp/sbp_target.h1
-rw-r--r--drivers/target/target_core_alua.c51
-rw-r--r--drivers/target/target_core_alua.h10
-rw-r--r--drivers/target/target_core_configfs.c28
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h2
-rw-r--r--drivers/target/target_core_pr.c41
-rw-r--r--drivers/target/target_core_pr.h1
-rw-r--r--drivers/target/target_core_pscsi.h1
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_stat.c16
-rw-r--r--drivers/target/target_core_tmr.c12
-rw-r--r--drivers/target/target_core_transport.c84
-rw-r--r--drivers/target/target_core_ua.h1
-rw-r--r--drivers/target/target_core_user.c217
-rw-r--r--drivers/target/target_core_xcopy.h1
-rw-r--r--drivers/target/tcm_fc/Makefile1
-rw-r--r--drivers/tee/Makefile1
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/thermal/Kconfig3
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/armada_thermal.c2
-rw-r--r--drivers/thermal/broadcom/Kconfig7
-rw-r--r--drivers/thermal/broadcom/Makefile1
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c387
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/hisi_thermal.c612
-rw-r--r--drivers/thermal/imx_thermal.c104
-rw-r--r--drivers/thermal/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.h1
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c6
-rw-r--r--drivers/thermal/intel_bxt_pmic_thermal.c3
-rw-r--r--drivers/thermal/intel_pch_thermal.c11
-rw-r--r--drivers/thermal/intel_powerclamp.c4
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c43
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c34
-rw-r--r--drivers/thermal/rockchip_thermal.c67
-rw-r--r--drivers/thermal/step_wise.c11
-rw-r--r--drivers/thermal/tegra/Kconfig7
-rw-r--r--drivers/thermal/tegra/Makefile4
-rw-r--r--drivers/thermal/tegra/soctherm.c2
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c263
-rw-r--r--drivers/thermal/thermal-generic-adc.c24
-rw-r--r--drivers/thermal/ti-soc-thermal/Makefile1
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c3
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/cap.c1
-rw-r--r--drivers/thunderbolt/ctl.c47
-rw-r--r--drivers/thunderbolt/ctl.h4
-rw-r--r--drivers/thunderbolt/domain.c197
-rw-r--r--drivers/thunderbolt/eeprom.c1
-rw-r--r--drivers/thunderbolt/icm.c218
-rw-r--r--drivers/thunderbolt/nhi.c412
-rw-r--r--drivers/thunderbolt/nhi.h142
-rw-r--r--drivers/thunderbolt/nhi_regs.h12
-rw-r--r--drivers/thunderbolt/path.c1
-rw-r--r--drivers/thunderbolt/property.c670
-rw-r--r--drivers/thunderbolt/switch.c8
-rw-r--r--drivers/thunderbolt/tb.c2
-rw-r--r--drivers/thunderbolt/tb.h89
-rw-r--r--drivers/thunderbolt/tb_msgs.h168
-rw-r--r--drivers/thunderbolt/tb_regs.h1
-rw-r--r--drivers/thunderbolt/tunnel_pci.c1
-rw-r--r--drivers/thunderbolt/tunnel_pci.h1
-rw-r--r--drivers/thunderbolt/xdomain.c1570
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/amiserial.c1
-rw-r--r--drivers/tty/bfin_jtag_comm.c3
-rw-r--r--drivers/tty/cyclades.c23
-rw-r--r--drivers/tty/ehv_bytechan.c9
-rw-r--r--drivers/tty/goldfish.c11
-rw-r--r--drivers/tty/hvc/Makefile1
-rw-r--r--drivers/tty/hvc/hvc_bfin_jtag.c3
-rw-r--r--drivers/tty/hvc/hvc_console.c15
-rw-r--r--drivers/tty/hvc/hvc_console.h15
-rw-r--r--drivers/tty/hvc/hvc_dcc.c13
-rw-r--r--drivers/tty/hvc/hvc_irq.c1
-rw-r--r--drivers/tty/hvc/hvc_iucv.c1
-rw-r--r--drivers/tty/hvc/hvc_opal.c16
-rw-r--r--drivers/tty/hvc/hvc_rtas.c15
-rw-r--r--drivers/tty/hvc/hvc_tile.c11
-rw-r--r--drivers/tty/hvc/hvc_udbg.c15
-rw-r--r--drivers/tty/hvc/hvc_vio.c15
-rw-r--r--drivers/tty/hvc/hvc_xen.c15
-rw-r--r--drivers/tty/hvc/hvcs.c15
-rw-r--r--drivers/tty/hvc/hvsi.c15
-rw-r--r--drivers/tty/hvc/hvsi_lib.c1
-rw-r--r--drivers/tty/ipwireless/hardware.c12
-rw-r--r--drivers/tty/ipwireless/hardware.h1
-rw-r--r--drivers/tty/ipwireless/main.c1
-rw-r--r--drivers/tty/ipwireless/main.h1
-rw-r--r--drivers/tty/ipwireless/network.c1
-rw-r--r--drivers/tty/ipwireless/network.h1
-rw-r--r--drivers/tty/ipwireless/setup_protocol.h1
-rw-r--r--drivers/tty/ipwireless/tty.c1
-rw-r--r--drivers/tty/ipwireless/tty.h1
-rw-r--r--drivers/tty/isicom.c12
-rw-r--r--drivers/tty/metag_da.c16
-rw-r--r--drivers/tty/mips_ejtag_fdc.c13
-rw-r--r--drivers/tty/moxa.c12
-rw-r--r--drivers/tty/moxa.h1
-rw-r--r--drivers/tty/mxser.c22
-rw-r--r--drivers/tty/mxser.h1
-rw-r--r--drivers/tty/n_gsm.c28
-rw-r--r--drivers/tty/n_hdlc.c3
-rw-r--r--drivers/tty/n_null.c14
-rw-r--r--drivers/tty/n_r3964.c12
-rw-r--r--drivers/tty/n_tracerouter.c12
-rw-r--r--drivers/tty/n_tracesink.c12
-rw-r--r--drivers/tty/n_tracesink.h12
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/nozomi.c15
-rw-r--r--drivers/tty/pty.c1
-rw-r--r--drivers/tty/rocket.c21
-rw-r--r--drivers/tty/rocket.h1
-rw-r--r--drivers/tty/rocket_int.h1
-rw-r--r--drivers/tty/serdev/Kconfig8
-rw-r--r--drivers/tty/serdev/core.c152
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c31
-rw-r--r--drivers/tty/serial/21285.c1
-rw-r--r--drivers/tty/serial/8250/8250.h6
-rw-r--r--drivers/tty/serial/8250/8250_accent.c5
-rw-r--r--drivers/tty/serial/8250/8250_acorn.c5
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c6
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c6
-rw-r--r--drivers/tty/serial/8250/8250_boca.c5
-rw-r--r--drivers/tty/serial/8250/8250_core.c19
-rw-r--r--drivers/tty/serial/8250/8250_dma.c6
-rw-r--r--drivers/tty/serial/8250/8250_dw.c36
-rw-r--r--drivers/tty/serial/8250/8250_early.c5
-rw-r--r--drivers/tty/serial/8250/8250_em.c14
-rw-r--r--drivers/tty/serial/8250/8250_exar.c5
-rw-r--r--drivers/tty/serial/8250/8250_exar_st16c554.c5
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c123
-rw-r--r--drivers/tty/serial/8250/8250_fourport.c5
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c5
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c6
-rw-r--r--drivers/tty/serial/8250/8250_hp300.c1
-rw-r--r--drivers/tty/serial/8250/8250_hub6.c5
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c10
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c6
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c5
-rw-r--r--drivers/tty/serial/8250/8250_mid.c11
-rw-r--r--drivers/tty/serial/8250/8250_moxa.c5
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c13
-rw-r--r--drivers/tty/serial/8250/8250_of.c7
-rw-r--r--drivers/tty/serial/8250/8250_omap.c5
-rw-r--r--drivers/tty/serial/8250/8250_pci.c6
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c5
-rw-r--r--drivers/tty/serial/8250/8250_port.c16
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c7
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c11
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/8250/serial_cs.c1
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/altera_jtaguart.c6
-rw-r--r--drivers/tty/serial/altera_uart.c14
-rw-r--r--drivers/tty/serial/amba-pl010.c15
-rw-r--r--drivers/tty/serial/amba-pl011.c32
-rw-r--r--drivers/tty/serial/amba-pl011.h1
-rw-r--r--drivers/tty/serial/apbuart.c1
-rw-r--r--drivers/tty/serial/apbuart.h1
-rw-r--r--drivers/tty/serial/ar933x_uart.c5
-rw-r--r--drivers/tty/serial/arc_uart.c5
-rw-r--r--drivers/tty/serial/atmel_serial.c65
-rw-r--r--drivers/tty/serial/atmel_serial.h6
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c11
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c5
-rw-r--r--drivers/tty/serial/bfin_sport_uart.h3
-rw-r--r--drivers/tty/serial/bfin_uart.c10
-rw-r--r--drivers/tty/serial/clps711x.c6
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart.h6
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c16
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c16
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h1
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c16
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h1
-rw-r--r--drivers/tty/serial/crisv10.c5
-rw-r--r--drivers/tty/serial/crisv10.h1
-rw-r--r--drivers/tty/serial/digicolor-usart.c6
-rw-r--r--drivers/tty/serial/dz.c1
-rw-r--r--drivers/tty/serial/dz.h1
-rw-r--r--drivers/tty/serial/earlycon-arm-semihost.c13
-rw-r--r--drivers/tty/serial/earlycon.c5
-rw-r--r--drivers/tty/serial/efm32-uart.c1
-rw-r--r--drivers/tty/serial/etraxfs-uart.c1
-rw-r--r--drivers/tty/serial/fsl_lpuart.c40
-rw-r--r--drivers/tty/serial/icom.c16
-rw-r--r--drivers/tty/serial/icom.h15
-rw-r--r--drivers/tty/serial/ifx6x60.c23
-rw-r--r--drivers/tty/serial/ifx6x60.h18
-rw-r--r--drivers/tty/serial/imx.c107
-rw-r--r--drivers/tty/serial/ioc3_serial.c5
-rw-r--r--drivers/tty/serial/ioc4_serial.c5
-rw-r--r--drivers/tty/serial/ip22zilog.c1
-rw-r--r--drivers/tty/serial/ip22zilog.h1
-rw-r--r--drivers/tty/serial/jsm/jsm.h11
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c11
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c11
-rw-r--r--drivers/tty/serial/jsm/jsm_neo.c11
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c27
-rw-r--r--drivers/tty/serial/kgdb_nmi.c11
-rw-r--r--drivers/tty/serial/kgdboc.c8
-rw-r--r--drivers/tty/serial/lantiq.c14
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c11
-rw-r--r--drivers/tty/serial/m32r_sio.c14
-rw-r--r--drivers/tty/serial/m32r_sio_reg.h4
-rw-r--r--drivers/tty/serial/max3100.c19
-rw-r--r--drivers/tty/serial/max310x.c6
-rw-r--r--drivers/tty/serial/mcf.c6
-rw-r--r--drivers/tty/serial/men_z135_uart.c5
-rw-r--r--drivers/tty/serial/meson_uart.c31
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c5
-rw-r--r--drivers/tty/serial/mps2-uart.c5
-rw-r--r--drivers/tty/serial/mpsc.c6
-rw-r--r--drivers/tty/serial/msm_serial.c10
-rw-r--r--drivers/tty/serial/mux.c11
-rw-r--r--drivers/tty/serial/mvebu-uart.c495
-rw-r--r--drivers/tty/serial/mxs-auart.c5
-rw-r--r--drivers/tty/serial/netx-serial.c14
-rw-r--r--drivers/tty/serial/omap-serial.c23
-rw-r--r--drivers/tty/serial/owl-uart.c14
-rw-r--r--drivers/tty/serial/pch_uart.c14
-rw-r--r--drivers/tty/serial/pic32_uart.c3
-rw-r--r--drivers/tty/serial/pic32_uart.h3
-rw-r--r--drivers/tty/serial/pmac_zilog.c15
-rw-r--r--drivers/tty/serial/pmac_zilog.h1
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c14
-rw-r--r--drivers/tty/serial/pxa.c6
-rw-r--r--drivers/tty/serial/rp2.c5
-rw-r--r--drivers/tty/serial/sa1100.c23
-rw-r--r--drivers/tty/serial/samsung.c5
-rw-r--r--drivers/tty/serial/samsung.h5
-rw-r--r--drivers/tty/serial/sb1250-duart.c6
-rw-r--r--drivers/tty/serial/sc16is7xx.c7
-rw-r--r--drivers/tty/serial/sccnxp.c13
-rw-r--r--drivers/tty/serial/serial-tegra.c13
-rw-r--r--drivers/tty/serial/serial_core.c54
-rw-r--r--drivers/tty/serial/serial_ks8695.c7
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c11
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h12
-rw-r--r--drivers/tty/serial/serial_txx9.c5
-rw-r--r--drivers/tty/serial/sh-sci.c115
-rw-r--r--drivers/tty/serial/sh-sci.h4
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c3
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h3
-rw-r--r--drivers/tty/serial/sn_console.c27
-rw-r--r--drivers/tty/serial/sprd_serial.c10
-rw-r--r--drivers/tty/serial/st-asc.c7
-rw-r--r--drivers/tty/serial/stm32-usart.c5
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/suncore.c1
-rw-r--r--drivers/tty/serial/sunhv.c1
-rw-r--r--drivers/tty/serial/sunsab.c1
-rw-r--r--drivers/tty/serial/sunsab.h1
-rw-r--r--drivers/tty/serial/sunsu.c1
-rw-r--r--drivers/tty/serial/sunzilog.c1
-rw-r--r--drivers/tty/serial/sunzilog.h1
-rw-r--r--drivers/tty/serial/tilegx.c11
-rw-r--r--drivers/tty/serial/timbuart.c14
-rw-r--r--drivers/tty/serial/timbuart.h14
-rw-r--r--drivers/tty/serial/uartlite.c7
-rw-r--r--drivers/tty/serial/ucc_uart.c6
-rw-r--r--drivers/tty/serial/vr41xx_siu.c15
-rw-r--r--drivers/tty/serial/vt8500_serial.c10
-rw-r--r--drivers/tty/serial/xilinx_uartps.c9
-rw-r--r--drivers/tty/serial/zs.c1
-rw-r--r--drivers/tty/serial/zs.h1
-rw-r--r--drivers/tty/synclink.c14
-rw-r--r--drivers/tty/synclink_gt.c19
-rw-r--r--drivers/tty/synclinkmp.c19
-rw-r--r--drivers/tty/sysrq.c19
-rw-r--r--drivers/tty/tty_audit.c6
-rw-r--r--drivers/tty/tty_baudrate.c1
-rw-r--r--drivers/tty/tty_buffer.c3
-rw-r--r--drivers/tty/tty_io.c1
-rw-r--r--drivers/tty/tty_ioctl.c1
-rw-r--r--drivers/tty/tty_jobctrl.c1
-rw-r--r--drivers/tty/tty_ldisc.c1
-rw-r--r--drivers/tty/tty_ldsem.c4
-rw-r--r--drivers/tty/tty_mutex.c1
-rw-r--r--drivers/tty/tty_port.c5
-rw-r--r--drivers/tty/vcc.c25
-rw-r--r--drivers/tty/vt/Makefile1
-rw-r--r--drivers/tty/vt/consolemap.c1
-rw-r--r--drivers/tty/vt/keyboard.c5
-rw-r--r--drivers/tty/vt/selection.c51
-rw-r--r--drivers/tty/vt/vc_screen.c1
-rw-r--r--drivers/tty/vt/vt.c113
-rw-r--r--drivers/tty/vt/vt_ioctl.c69
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/usb/Kconfig9
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c43
-rw-r--r--drivers/usb/atm/speedtch.c35
-rw-r--r--drivers/usb/atm/ueagle-atm.c1
-rw-r--r--drivers/usb/atm/usbatm.c26
-rw-r--r--drivers/usb/atm/usbatm.h16
-rw-r--r--drivers/usb/atm/xusbatm.c16
-rw-r--r--drivers/usb/c67x00/Makefile1
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c16
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c16
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h16
-rw-r--r--drivers/usb/c67x00/c67x00-ll-hpi.c16
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c18
-rw-r--r--drivers/usb/c67x00/c67x00.h16
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/Makefile1
-rw-r--r--drivers/usb/chipidea/bits.h5
-rw-r--r--drivers/usb/chipidea/ci.h5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c5
-rw-r--r--drivers/usb/chipidea/ci_hdrc_zevio.c6
-rw-r--r--drivers/usb/chipidea/core.c5
-rw-r--r--drivers/usb/chipidea/debug.c1
-rw-r--r--drivers/usb/chipidea/host.c14
-rw-r--r--drivers/usb/chipidea/host.h1
-rw-r--r--drivers/usb/chipidea/otg.c5
-rw-r--r--drivers/usb/chipidea/otg.h5
-rw-r--r--drivers/usb/chipidea/otg_fsm.c5
-rw-r--r--drivers/usb/chipidea/otg_fsm.h5
-rw-r--r--drivers/usb/chipidea/udc.c9
-rw-r--r--drivers/usb/chipidea/udc.h5
-rw-r--r--drivers/usb/chipidea/ulpi.c10
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c8
-rw-r--r--drivers/usb/class/Makefile1
-rw-r--r--drivers/usb/class/cdc-acm.c15
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/cdc-wdm.c3
-rw-r--r--drivers/usb/class/usblp.c17
-rw-r--r--drivers/usb/class/usbtmc.c15
-rw-r--r--drivers/usb/common/Makefile1
-rw-r--r--drivers/usb/common/common.c5
-rw-r--r--drivers/usb/common/led.c6
-rw-r--r--drivers/usb/common/ulpi.c5
-rw-r--r--drivers/usb/common/usb-otg-fsm.c15
-rw-r--r--drivers/usb/core/Makefile1
-rw-r--r--drivers/usb/core/buffer.c2
-rw-r--r--drivers/usb/core/config.c2
-rw-r--r--drivers/usb/core/devices.c15
-rw-r--r--drivers/usb/core/devio.c31
-rw-r--r--drivers/usb/core/driver.c16
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd-pci.c15
-rw-r--r--drivers/usb/core/hcd.c25
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/core/hub.h10
-rw-r--r--drivers/usb/core/ledtrig-usbport.c5
-rw-r--r--drivers/usb/core/message.c53
-rw-r--r--drivers/usb/core/notify.c2
-rw-r--r--drivers/usb/core/of.c13
-rw-r--r--drivers/usb/core/otg_whitelist.h6
-rw-r--r--drivers/usb/core/port.c11
-rw-r--r--drivers/usb/core/quirks.c16
-rw-r--r--drivers/usb/core/sysfs.c9
-rw-r--r--drivers/usb/core/urb.c33
-rw-r--r--drivers/usb/core/usb-acpi.c6
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/core/usb.h3
-rw-r--r--drivers/usb/dwc2/Makefile1
-rw-r--r--drivers/usb/dwc2/core.c1
-rw-r--r--drivers/usb/dwc2/core.h5
-rw-r--r--drivers/usb/dwc2/core_intr.c1
-rw-r--r--drivers/usb/dwc2/debug.h10
-rw-r--r--drivers/usb/dwc2/debugfs.c10
-rw-r--r--drivers/usb/dwc2/gadget.c12
-rw-r--r--drivers/usb/dwc2/hcd.c20
-rw-r--r--drivers/usb/dwc2/hcd.h1
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c1
-rw-r--r--drivers/usb/dwc2/hcd_intr.c1
-rw-r--r--drivers/usb/dwc2/hcd_queue.c8
-rw-r--r--drivers/usb/dwc2/hw.h1
-rw-r--r--drivers/usb/dwc2/params.c15
-rw-r--r--drivers/usb/dwc2/pci.c1
-rw-r--r--drivers/usb/dwc2/platform.c1
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c74
-rw-r--r--drivers/usb/dwc3/core.h12
-rw-r--r--drivers/usb/dwc3/debug.h10
-rw-r--r--drivers/usb/dwc3/debugfs.c10
-rw-r--r--drivers/usb/dwc3/drd.c13
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c10
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c10
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c39
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c39
-rw-r--r--drivers/usb/dwc3/dwc3-st.c6
-rw-r--r--drivers/usb/dwc3/ep0.c30
-rw-r--r--drivers/usb/dwc3/gadget.c72
-rw-r--r--drivers/usb/dwc3/gadget.h10
-rw-r--r--drivers/usb/dwc3/host.c10
-rw-r--r--drivers/usb/dwc3/io.h10
-rw-r--r--drivers/usb/dwc3/trace.c10
-rw-r--r--drivers/usb/dwc3/trace.h10
-rw-r--r--drivers/usb/dwc3/ulpi.c5
-rw-r--r--drivers/usb/early/Makefile1
-rw-r--r--drivers/usb/early/ehci-dbgp.c1
-rw-r--r--drivers/usb/early/xhci-dbc.c5
-rw-r--r--drivers/usb/early/xhci-dbc.h11
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/composite.c13
-rw-r--r--drivers/usb/gadget/config.c6
-rw-r--r--drivers/usb/gadget/configfs.c11
-rw-r--r--drivers/usb/gadget/configfs.h1
-rw-r--r--drivers/usb/gadget/epautoconf.c6
-rw-r--r--drivers/usb/gadget/function/Makefile1
-rw-r--r--drivers/usb/gadget/function/f_acm.c7
-rw-r--r--drivers/usb/gadget/function/f_ecm.c8
-rw-r--r--drivers/usb/gadget/function/f_eem.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c9
-rw-r--r--drivers/usb/gadget/function/f_hid.c8
-rw-r--r--drivers/usb/gadget/function/f_loopback.c8
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c5
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h1
-rw-r--r--drivers/usb/gadget/function/f_midi.c5
-rw-r--r--drivers/usb/gadget/function/f_ncm.c8
-rw-r--r--drivers/usb/gadget/function/f_obex.c8
-rw-r--r--drivers/usb/gadget/function/f_phonet.c9
-rw-r--r--drivers/usb/gadget/function/f_printer.c8
-rw-r--r--drivers/usb/gadget/function/f_rndis.c8
-rw-r--r--drivers/usb/gadget/function/f_serial.c7
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c8
-rw-r--r--drivers/usb/gadget/function/f_subset.c8
-rw-r--r--drivers/usb/gadget/function/f_tcm.c5
-rw-r--r--drivers/usb/gadget/function/f_uac1.c8
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c5
-rw-r--r--drivers/usb/gadget/function/f_uac2.c8
-rw-r--r--drivers/usb/gadget/function/f_uvc.c6
-rw-r--r--drivers/usb/gadget/function/f_uvc.h6
-rw-r--r--drivers/usb/gadget/function/g_zero.h1
-rw-r--r--drivers/usb/gadget/function/rndis.c5
-rw-r--r--drivers/usb/gadget/function/rndis.h5
-rw-r--r--drivers/usb/gadget/function/storage_common.c6
-rw-r--r--drivers/usb/gadget/function/storage_common.h1
-rw-r--r--drivers/usb/gadget/function/tcm.h1
-rw-r--r--drivers/usb/gadget/function/u_audio.c11
-rw-r--r--drivers/usb/gadget/function/u_audio.h12
-rw-r--r--drivers/usb/gadget/function/u_ecm.h5
-rw-r--r--drivers/usb/gadget/function/u_eem.h5
-rw-r--r--drivers/usb/gadget/function/u_ether.c6
-rw-r--r--drivers/usb/gadget/function/u_ether.h6
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h5
-rw-r--r--drivers/usb/gadget/function/u_fs.h5
-rw-r--r--drivers/usb/gadget/function/u_gether.h5
-rw-r--r--drivers/usb/gadget/function/u_hid.h5
-rw-r--r--drivers/usb/gadget/function/u_midi.h5
-rw-r--r--drivers/usb/gadget/function/u_ncm.h5
-rw-r--r--drivers/usb/gadget/function/u_phonet.h5
-rw-r--r--drivers/usb/gadget/function/u_printer.h5
-rw-r--r--drivers/usb/gadget/function/u_rndis.h5
-rw-r--r--drivers/usb/gadget/function/u_serial.c6
-rw-r--r--drivers/usb/gadget/function/u_serial.h5
-rw-r--r--drivers/usb/gadget/function/u_tcm.h5
-rw-r--r--drivers/usb/gadget/function/u_uac1.h5
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.c3
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.h3
-rw-r--r--drivers/usb/gadget/function/u_uac2.h5
-rw-r--r--drivers/usb/gadget/function/u_uvc.h5
-rw-r--r--drivers/usb/gadget/function/uvc.h6
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c55
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h5
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c6
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h1
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c8
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.h7
-rw-r--r--drivers/usb/gadget/function/uvc_video.c6
-rw-r--r--drivers/usb/gadget/function/uvc_video.h5
-rw-r--r--drivers/usb/gadget/functions.c1
-rw-r--r--drivers/usb/gadget/legacy/Makefile1
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c6
-rw-r--r--drivers/usb/gadget/legacy/audio.c3
-rw-r--r--drivers/usb/gadget/legacy/cdc2.c6
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c1
-rw-r--r--drivers/usb/gadget/legacy/ether.c6
-rw-r--r--drivers/usb/gadget/legacy/g_ffs.c6
-rw-r--r--drivers/usb/gadget/legacy/gmidi.c4
-rw-r--r--drivers/usb/gadget/legacy/hid.c6
-rw-r--r--drivers/usb/gadget/legacy/inode.c6
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c6
-rw-r--r--drivers/usb/gadget/legacy/multi.c6
-rw-r--r--drivers/usb/gadget/legacy/ncm.c6
-rw-r--r--drivers/usb/gadget/legacy/nokia.c5
-rw-r--r--drivers/usb/gadget/legacy/printer.c6
-rw-r--r--drivers/usb/gadget/legacy/serial.c5
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c2
-rw-r--r--drivers/usb/gadget/legacy/webcam.c6
-rw-r--r--drivers/usb/gadget/legacy/zero.c14
-rw-r--r--drivers/usb/gadget/u_f.c5
-rw-r--r--drivers/usb/gadget/u_f.h5
-rw-r--r--drivers/usb/gadget/u_os_desc.h5
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.h6
-rw-r--r--drivers/usb/gadget/udc/amd5536udc_pci.c6
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c13
-rw-r--r--drivers/usb/gadget/udc/at91_udc.h6
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c5
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h5
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c6
-rw-r--r--drivers/usb/gadget/udc/bdc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.h7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.h7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.h7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c7
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c7
-rw-r--r--drivers/usb/gadget/udc/core.c15
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c86
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c5
-rw-r--r--drivers/usb/gadget/udc/fotg210.h6
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c6
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c6
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.h6
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c6
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h6
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c5
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.h5
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c19
-rw-r--r--drivers/usb/gadget/udc/goku_udc.h5
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c12
-rw-r--r--drivers/usb/gadget/udc/gr_udc.h6
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c15
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c13
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.h5
-rw-r--r--drivers/usb/gadget/udc/mv_u3d.h5
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c5
-rw-r--r--drivers/usb/gadget/udc/mv_udc.h6
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c6
-rw-r--r--drivers/usb/gadget/udc/net2272.c15
-rw-r--r--drivers/usb/gadget/udc/net2272.h15
-rw-r--r--drivers/usb/gadget/udc/net2280.c6
-rw-r--r--drivers/usb/gadget/udc/net2280.h6
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c14
-rw-r--r--drivers/usb/gadget/udc/omap_udc.h1
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c5
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c14
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h7
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c6
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.h6
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c13
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.h5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c80
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c7
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c6
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.h6
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c29
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c10
-rw-r--r--drivers/usb/gadget/udc/trace.c10
-rw-r--r--drivers/usb/gadget/udc/trace.h13
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c7
-rw-r--r--drivers/usb/gadget/usbstring.c6
-rw-r--r--drivers/usb/host/Kconfig16
-rw-r--r--drivers/usb/host/Makefile6
-rw-r--r--drivers/usb/host/bcma-hcd.c3
-rw-r--r--drivers/usb/host/ehci-atmel.c8
-rw-r--r--drivers/usb/host/ehci-dbg.c12
-rw-r--r--drivers/usb/host/ehci-exynos.c7
-rw-r--r--drivers/usb/host/ehci-fsl.c15
-rw-r--r--drivers/usb/host/ehci-fsl.h15
-rw-r--r--drivers/usb/host/ehci-grlib.c15
-rw-r--r--drivers/usb/host/ehci-hcd.c17
-rw-r--r--drivers/usb/host/ehci-hub.c15
-rw-r--r--drivers/usb/host/ehci-mem.c15
-rw-r--r--drivers/usb/host/ehci-msm.c265
-rw-r--r--drivers/usb/host/ehci-mv.c6
-rw-r--r--drivers/usb/host/ehci-mxc.c15
-rw-r--r--drivers/usb/host/ehci-omap.c16
-rw-r--r--drivers/usb/host/ehci-orion.c5
-rw-r--r--drivers/usb/host/ehci-pci.c15
-rw-r--r--drivers/usb/host/ehci-platform.c38
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c6
-rw-r--r--drivers/usb/host/ehci-ppc-of.c1
-rw-r--r--drivers/usb/host/ehci-ps3.c14
-rw-r--r--drivers/usb/host/ehci-q.c15
-rw-r--r--drivers/usb/host/ehci-sched.c15
-rw-r--r--drivers/usb/host/ehci-sh.c5
-rw-r--r--drivers/usb/host/ehci-spear.c5
-rw-r--r--drivers/usb/host/ehci-st.c5
-rw-r--r--drivers/usb/host/ehci-sysfs.c15
-rw-r--r--drivers/usb/host/ehci-tegra.c12
-rw-r--r--drivers/usb/host/ehci-tilegx.c11
-rw-r--r--drivers/usb/host/ehci-timer.c11
-rw-r--r--drivers/usb/host/ehci-w90x900.c6
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c16
-rw-r--r--drivers/usb/host/ehci.h15
-rw-r--r--drivers/usb/host/fhci-dbg.c6
-rw-r--r--drivers/usb/host/fhci-hcd.c6
-rw-r--r--drivers/usb/host/fhci-hub.c6
-rw-r--r--drivers/usb/host/fhci-mem.c6
-rw-r--r--drivers/usb/host/fhci-q.c6
-rw-r--r--drivers/usb/host/fhci-sched.c6
-rw-r--r--drivers/usb/host/fhci-tds.c6
-rw-r--r--drivers/usb/host/fhci.h6
-rw-r--r--drivers/usb/host/fotg210-hcd.c17
-rw-r--r--drivers/usb/host/fotg210.h1
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c6
-rw-r--r--drivers/usb/host/hwa-hc.c16
-rw-r--r--drivers/usb/host/imx21-dbg.c15
-rw-r--r--drivers/usb/host/imx21-hcd.c15
-rw-r--r--drivers/usb/host/imx21-hcd.h15
-rw-r--r--drivers/usb/host/isp116x-hcd.c4
-rw-r--r--drivers/usb/host/isp116x.h1
-rw-r--r--drivers/usb/host/isp1362-hcd.c5
-rw-r--r--drivers/usb/host/isp1362.h1
-rw-r--r--drivers/usb/host/max3421-hcd.c81
-rw-r--r--drivers/usb/host/ohci-at91.c1
-rw-r--r--drivers/usb/host/ohci-da8xx.c5
-rw-r--r--drivers/usb/host/ohci-dbg.c1
-rw-r--r--drivers/usb/host/ohci-exynos.c7
-rw-r--r--drivers/usb/host/ohci-hcd.c14
-rw-r--r--drivers/usb/host/ohci-hub.c1
-rw-r--r--drivers/usb/host/ohci-mem.c1
-rw-r--r--drivers/usb/host/ohci-nxp.c6
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/ohci-pci.c1
-rw-r--r--drivers/usb/host/ohci-platform.c40
-rw-r--r--drivers/usb/host/ohci-ppc-of.c1
-rw-r--r--drivers/usb/host/ohci-ps3.c14
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/host/ohci-q.c1
-rw-r--r--drivers/usb/host/ohci-s3c2410.c1
-rw-r--r--drivers/usb/host/ohci-sa1111.c26
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/ohci-spear.c5
-rw-r--r--drivers/usb/host/ohci-st.c5
-rw-r--r--drivers/usb/host/ohci-tilegx.c11
-rw-r--r--drivers/usb/host/ohci-tmio.c5
-rw-r--r--drivers/usb/host/ohci.h1
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c23
-rw-r--r--drivers/usb/host/oxu210hp.h1
-rw-r--r--drivers/usb/host/pci-quirks.c3
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/r8a66597-hcd.c49
-rw-r--r--drivers/usb/host/r8a66597.h26
-rw-r--r--drivers/usb/host/sl811-hcd.c7
-rw-r--r--drivers/usb/host/sl811.h1
-rw-r--r--drivers/usb/host/sl811_cs.c1
-rw-r--r--drivers/usb/host/ssb-hcd.c3
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/uhci-debug.c1
-rw-r--r--drivers/usb/host/uhci-grlib.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c4
-rw-r--r--drivers/usb/host/uhci-hcd.h5
-rw-r--r--drivers/usb/host/uhci-hub.c1
-rw-r--r--drivers/usb/host/uhci-pci.c1
-rw-r--r--drivers/usb/host/uhci-platform.c1
-rw-r--r--drivers/usb/host/uhci-q.c5
-rw-r--r--drivers/usb/host/whci/asl.c13
-rw-r--r--drivers/usb/host/whci/debug.c13
-rw-r--r--drivers/usb/host/whci/hcd.c13
-rw-r--r--drivers/usb/host/whci/hw.c13
-rw-r--r--drivers/usb/host/whci/init.c13
-rw-r--r--drivers/usb/host/whci/int.c13
-rw-r--r--drivers/usb/host/whci/pzl.c13
-rw-r--r--drivers/usb/host/whci/qset.c13
-rw-r--r--drivers/usb/host/whci/whcd.h15
-rw-r--r--drivers/usb/host/whci/whci-hc.h15
-rw-r--r--drivers/usb/host/whci/wusb.c13
-rw-r--r--drivers/usb/host/xhci-dbg.c14
-rw-r--r--drivers/usb/host/xhci-debugfs.c523
-rw-r--r--drivers/usb/host/xhci-debugfs.h134
-rw-r--r--drivers/usb/host/xhci-ext-caps.h14
-rw-r--r--drivers/usb/host/xhci-hub.c20
-rw-r--r--drivers/usb/host/xhci-mem.c24
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c14
-rw-r--r--drivers/usb/host/xhci-mtk.c144
-rw-r--r--drivers/usb/host/xhci-mtk.h16
-rw-r--r--drivers/usb/host/xhci-mvebu.c5
-rw-r--r--drivers/usb/host/xhci-mvebu.h5
-rw-r--r--drivers/usb/host/xhci-pci.c14
-rw-r--r--drivers/usb/host/xhci-plat.c16
-rw-r--r--drivers/usb/host/xhci-plat.h5
-rw-r--r--drivers/usb/host/xhci-rcar.c5
-rw-r--r--drivers/usb/host/xhci-rcar.h5
-rw-r--r--drivers/usb/host/xhci-ring.c35
-rw-r--r--drivers/usb/host/xhci-tegra.c9
-rw-r--r--drivers/usb/host/xhci-trace.c5
-rw-r--r--drivers/usb/host/xhci-trace.h10
-rw-r--r--drivers/usb/host/xhci.c111
-rw-r--r--drivers/usb/host/xhci.h34
-rw-r--r--drivers/usb/image/Makefile1
-rw-r--r--drivers/usb/image/mdc800.c16
-rw-r--r--drivers/usb/image/microtek.c1
-rw-r--r--drivers/usb/image/microtek.h1
-rw-r--r--drivers/usb/isp1760/Makefile1
-rw-r--r--drivers/usb/isp1760/isp1760-core.c5
-rw-r--r--drivers/usb/isp1760/isp1760-core.h5
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c9
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.h1
-rw-r--r--drivers/usb/isp1760/isp1760-if.c1
-rw-r--r--drivers/usb/isp1760/isp1760-regs.h5
-rw-r--r--drivers/usb/isp1760/isp1760-udc.c12
-rw-r--r--drivers/usb/isp1760/isp1760-udc.h5
-rw-r--r--drivers/usb/misc/Kconfig4
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/adutux.c8
-rw-r--r--drivers/usb/misc/appledisplay.c15
-rw-r--r--drivers/usb/misc/chaoskey.c10
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c5
-rw-r--r--drivers/usb/misc/cytherm.c6
-rw-r--r--drivers/usb/misc/ehset.c10
-rw-r--r--drivers/usb/misc/emi26.c5
-rw-r--r--drivers/usb/misc/emi62.c5
-rw-r--r--drivers/usb/misc/ezusb.c5
-rw-r--r--drivers/usb/misc/ftdi-elan.c6
-rw-r--r--drivers/usb/misc/idmouse.c6
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/isight_firmware.c5
-rw-r--r--drivers/usb/misc/ldusb.c6
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/misc/lvstest.c5
-rw-r--r--drivers/usb/misc/rio500.c15
-rw-r--r--drivers/usb/misc/rio500_usb.h19
-rw-r--r--drivers/usb/misc/sisusbvga/Makefile1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.h1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.h1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_struct.h1
-rw-r--r--drivers/usb/misc/trancevibrator.c15
-rw-r--r--drivers/usb/misc/usb251xb.c177
-rw-r--r--drivers/usb/misc/usb3503.c15
-rw-r--r--drivers/usb/misc/usb4604.c11
-rw-r--r--drivers/usb/misc/usb_u132.h6
-rw-r--r--drivers/usb/misc/usblcd.c1
-rw-r--r--drivers/usb/misc/usbsevseg.c6
-rw-r--r--drivers/usb/misc/usbtest.c33
-rw-r--r--drivers/usb/misc/uss720.c15
-rw-r--r--drivers/usb/misc/yurex.c6
-rw-r--r--drivers/usb/mon/Makefile1
-rw-r--r--drivers/usb/mon/mon_bin.c1
-rw-r--r--drivers/usb/mon/mon_main.c1
-rw-r--r--drivers/usb/mon/mon_stat.c1
-rw-r--r--drivers/usb/mon/mon_text.c1
-rw-r--r--drivers/usb/mon/usb_mon.h1
-rw-r--r--drivers/usb/mtu3/Makefile1
-rw-r--r--drivers/usb/mtu3/mtu3.h59
-rw-r--r--drivers/usb/mtu3/mtu3_core.c72
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c72
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h17
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c14
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c27
-rw-r--r--drivers/usb/mtu3/mtu3_host.c89
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h24
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c176
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c113
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h11
-rw-r--r--drivers/usb/musb/Makefile1
-rw-r--r--drivers/usb/musb/am35x.c42
-rw-r--r--drivers/usb/musb/blackfin.c16
-rw-r--r--drivers/usb/musb/blackfin.h8
-rw-r--r--drivers/usb/musb/cppi_dma.c1
-rw-r--r--drivers/usb/musb/cppi_dma.h1
-rw-r--r--drivers/usb/musb/da8xx.c47
-rw-r--r--drivers/usb/musb/davinci.c38
-rw-r--r--drivers/usb/musb/davinci.h6
-rw-r--r--drivers/usb/musb/jz4740.c10
-rw-r--r--drivers/usb/musb/musb_am335x.c1
-rw-r--r--drivers/usb/musb/musb_core.c34
-rw-r--r--drivers/usb/musb/musb_core.h28
-rw-r--r--drivers/usb/musb/musb_cppi41.c1
-rw-r--r--drivers/usb/musb/musb_debug.h27
-rw-r--r--drivers/usb/musb/musb_debugfs.c27
-rw-r--r--drivers/usb/musb/musb_dma.h27
-rw-r--r--drivers/usb/musb/musb_dsps.c35
-rw-r--r--drivers/usb/musb/musb_gadget.c27
-rw-r--r--drivers/usb/musb/musb_gadget.h27
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c27
-rw-r--r--drivers/usb/musb/musb_host.c27
-rw-r--r--drivers/usb/musb/musb_host.h27
-rw-r--r--drivers/usb/musb/musb_io.h27
-rw-r--r--drivers/usb/musb/musb_regs.h27
-rw-r--r--drivers/usb/musb/musb_trace.c10
-rw-r--r--drivers/usb/musb/musb_trace.h10
-rw-r--r--drivers/usb/musb/musb_virthub.c27
-rw-r--r--drivers/usb/musb/musbhsdma.c27
-rw-r--r--drivers/usb/musb/musbhsdma.h27
-rw-r--r--drivers/usb/musb/omap2430.c18
-rw-r--r--drivers/usb/musb/omap2430.h6
-rw-r--r--drivers/usb/musb/sunxi.c11
-rw-r--r--drivers/usb/musb/tusb6010.c25
-rw-r--r--drivers/usb/musb/tusb6010.h5
-rw-r--r--drivers/usb/musb/tusb6010_omap.c5
-rw-r--r--drivers/usb/musb/ux500.c15
-rw-r--r--drivers/usb/musb/ux500_dma.c14
-rw-r--r--drivers/usb/phy/Kconfig29
-rw-r--r--drivers/usb/phy/Makefile3
-rw-r--r--drivers/usb/phy/of.c6
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c16
-rw-r--r--drivers/usb/phy/phy-am335x-control.c1
-rw-r--r--drivers/usb/phy/phy-am335x-control.h1
-rw-r--r--drivers/usb/phy/phy-am335x.c1
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c15
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h18
-rw-r--r--drivers/usb/phy/phy-generic.c20
-rw-r--r--drivers/usb/phy/phy-generic.h1
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c5
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c26
-rw-r--r--drivers/usb/phy/phy-isp1301.c5
-rw-r--r--drivers/usb/phy/phy-keystone.c11
-rw-r--r--drivers/usb/phy/phy-msm-usb.c2085
-rw-r--r--drivers/usb/phy/phy-mv-usb.c23
-rw-r--r--drivers/usb/phy/phy-mv-usb.h6
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c162
-rw-r--r--drivers/usb/phy/phy-omap-otg.c10
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c366
-rw-r--r--drivers/usb/phy/phy-tahvo.c15
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c11
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c15
-rw-r--r--drivers/usb/phy/phy-ulpi-viewport.c11
-rw-r--r--drivers/usb/phy/phy-ulpi.c15
-rw-r--r--drivers/usb/phy/phy.c6
-rw-r--r--drivers/usb/renesas_usbhs/Makefile1
-rw-r--r--drivers/usb/renesas_usbhs/common.c39
-rw-r--r--drivers/usb/renesas_usbhs/common.h11
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c11
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h11
-rw-r--r--drivers/usb/renesas_usbhs/mod.c11
-rw-r--r--drivers/usb/renesas_usbhs/mod.h11
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c11
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c11
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c11
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h11
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.c7
-rw-r--r--drivers/usb/renesas_usbhs/rcar2.h1
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c54
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.h2
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/Makefile-keyspan_pda_fw1
-rw-r--r--drivers/usb/serial/aircable.c7
-rw-r--r--drivers/usb/serial/ark3116.c6
-rw-r--r--drivers/usb/serial/belkin_sa.c6
-rw-r--r--drivers/usb/serial/belkin_sa.h6
-rw-r--r--drivers/usb/serial/bus.c5
-rw-r--r--drivers/usb/serial/ch341.c7
-rw-r--r--drivers/usb/serial/console.c5
-rw-r--r--drivers/usb/serial/cp210x.c7
-rw-r--r--drivers/usb/serial/cyberjack.c6
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/cypress_m8.h1
-rw-r--r--drivers/usb/serial/digi_acceleport.c6
-rw-r--r--drivers/usb/serial/empeg.c7
-rw-r--r--drivers/usb/serial/ezusb_convert.pl1
-rw-r--r--drivers/usb/serial/f81232.c6
-rw-r--r--drivers/usb/serial/f81534.c96
-rw-r--r--drivers/usb/serial/ftdi_sio.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.h1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/garmin_gps.c82
-rw-r--r--drivers/usb/serial/generic.c5
-rw-r--r--drivers/usb/serial/io_16654.h5
-rw-r--r--drivers/usb/serial/io_edgeport.c6
-rw-r--r--drivers/usb/serial/io_edgeport.h7
-rw-r--r--drivers/usb/serial/io_ionsp.h6
-rw-r--r--drivers/usb/serial/io_ti.c6
-rw-r--r--drivers/usb/serial/io_ti.h7
-rw-r--r--drivers/usb/serial/io_usbvend.h5
-rw-r--r--drivers/usb/serial/ipaq.c6
-rw-r--r--drivers/usb/serial/ipw.c6
-rw-r--r--drivers/usb/serial/ir-usb.c6
-rw-r--r--drivers/usb/serial/iuu_phoenix.c7
-rw-r--r--drivers/usb/serial/iuu_phoenix.h7
-rw-r--r--drivers/usb/serial/keyspan.c6
-rw-r--r--drivers/usb/serial/keyspan_pda.c6
-rw-r--r--drivers/usb/serial/kl5kusb105.c6
-rw-r--r--drivers/usb/serial/kl5kusb105.h1
-rw-r--r--drivers/usb/serial/kobil_sct.c7
-rw-r--r--drivers/usb/serial/kobil_sct.h1
-rw-r--r--drivers/usb/serial/mct_u232.c6
-rw-r--r--drivers/usb/serial/mct_u232.h6
-rw-r--r--drivers/usb/serial/metro-usb.c44
-rw-r--r--drivers/usb/serial/mos7720.c7
-rw-r--r--drivers/usb/serial/mos7840.c30
-rw-r--r--drivers/usb/serial/mxuport.c6
-rw-r--r--drivers/usb/serial/navman.c7
-rw-r--r--drivers/usb/serial/omninet.c7
-rw-r--r--drivers/usb/serial/opticon.c7
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/oti6858.c7
-rw-r--r--drivers/usb/serial/oti6858.h6
-rw-r--r--drivers/usb/serial/pl2303.c7
-rw-r--r--drivers/usb/serial/pl2303.h7
-rw-r--r--drivers/usb/serial/qcaux.c7
-rw-r--r--drivers/usb/serial/qcserial.c7
-rw-r--r--drivers/usb/serial/quatech2.c8
-rw-r--r--drivers/usb/serial/safe_serial.c6
-rw-r--r--drivers/usb/serial/sierra.c7
-rw-r--r--drivers/usb/serial/spcp8x5.c6
-rw-r--r--drivers/usb/serial/ssu100.c3
-rw-r--r--drivers/usb/serial/symbolserial.c7
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c6
-rw-r--r--drivers/usb/serial/upd78f0730.c5
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/serial/usb-serial.c29
-rw-r--r--drivers/usb/serial/usb-wwan.h1
-rw-r--r--drivers/usb/serial/usb_debug.c11
-rw-r--r--drivers/usb/serial/usb_wwan.c7
-rw-r--r--drivers/usb/serial/visor.c7
-rw-r--r--drivers/usb/serial/visor.h6
-rw-r--r--drivers/usb/serial/whiteheat.c6
-rw-r--r--drivers/usb/serial/whiteheat.h6
-rw-r--r--drivers/usb/serial/wishbone-serial.c6
-rw-r--r--drivers/usb/serial/xsens_mt.c7
-rw-r--r--drivers/usb/storage/Makefile1
-rw-r--r--drivers/usb/storage/alauda.c15
-rw-r--r--drivers/usb/storage/cypress_atacb.c15
-rw-r--r--drivers/usb/storage/datafab.c15
-rw-r--r--drivers/usb/storage/debug.c15
-rw-r--r--drivers/usb/storage/debug.h15
-rw-r--r--drivers/usb/storage/ene_ub6250.c17
-rw-r--r--drivers/usb/storage/freecom.c15
-rw-r--r--drivers/usb/storage/initializers.c15
-rw-r--r--drivers/usb/storage/initializers.h15
-rw-r--r--drivers/usb/storage/isd200.c15
-rw-r--r--drivers/usb/storage/jumpshot.c15
-rw-r--r--drivers/usb/storage/karma.c15
-rw-r--r--drivers/usb/storage/onetouch.c19
-rw-r--r--drivers/usb/storage/option_ms.c19
-rw-r--r--drivers/usb/storage/option_ms.h1
-rw-r--r--drivers/usb/storage/protocol.c15
-rw-r--r--drivers/usb/storage/protocol.h15
-rw-r--r--drivers/usb/storage/realtek_cr.c21
-rw-r--r--drivers/usb/storage/scsiglue.c15
-rw-r--r--drivers/usb/storage/scsiglue.h15
-rw-r--r--drivers/usb/storage/sddr09.c15
-rw-r--r--drivers/usb/storage/sddr55.c16
-rw-r--r--drivers/usb/storage/shuttle_usbat.c15
-rw-r--r--drivers/usb/storage/sierra_ms.c1
-rw-r--r--drivers/usb/storage/sierra_ms.h1
-rw-r--r--drivers/usb/storage/transport.c15
-rw-r--r--drivers/usb/storage/transport.h15
-rw-r--r--drivers/usb/storage/uas-detect.h1
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/storage/unusual_alauda.h15
-rw-r--r--drivers/usb/storage/unusual_cypress.h15
-rw-r--r--drivers/usb/storage/unusual_datafab.h15
-rw-r--r--drivers/usb/storage/unusual_devs.h15
-rw-r--r--drivers/usb/storage/unusual_ene_ub6250.h18
-rw-r--r--drivers/usb/storage/unusual_freecom.h15
-rw-r--r--drivers/usb/storage/unusual_isd200.h15
-rw-r--r--drivers/usb/storage/unusual_jumpshot.h15
-rw-r--r--drivers/usb/storage/unusual_karma.h15
-rw-r--r--drivers/usb/storage/unusual_onetouch.h15
-rw-r--r--drivers/usb/storage/unusual_realtek.h14
-rw-r--r--drivers/usb/storage/unusual_sddr09.h15
-rw-r--r--drivers/usb/storage/unusual_sddr55.h15
-rw-r--r--drivers/usb/storage/unusual_uas.h15
-rw-r--r--drivers/usb/storage/unusual_usbat.h15
-rw-r--r--drivers/usb/storage/usb.c49
-rw-r--r--drivers/usb/storage/usb.h15
-rw-r--r--drivers/usb/storage/usual-tables.c15
-rw-r--r--drivers/usb/typec/Kconfig25
-rw-r--r--drivers/usb/typec/Makefile4
-rw-r--r--drivers/usb/typec/fusb302/Kconfig (renamed from drivers/staging/typec/fusb302/Kconfig)0
-rw-r--r--drivers/usb/typec/fusb302/Makefile (renamed from drivers/staging/typec/fusb302/Makefile)1
-rw-r--r--drivers/usb/typec/fusb302/fusb302.c (renamed from drivers/staging/typec/fusb302/fusb302.c)15
-rw-r--r--drivers/usb/typec/fusb302/fusb302_reg.h (renamed from drivers/staging/typec/fusb302/fusb302_reg.h)11
-rw-r--r--drivers/usb/typec/tcpm.c (renamed from drivers/staging/typec/tcpm.c)60
-rw-r--r--drivers/usb/typec/tps6598x.c473
-rw-r--r--drivers/usb/typec/typec.c5
-rw-r--r--drivers/usb/typec/typec_wcove.c599
-rw-r--r--drivers/usb/typec/ucsi/Makefile1
-rw-r--r--drivers/usb/typec/ucsi/debug.h1
-rw-r--r--drivers/usb/typec/ucsi/trace.c1
-rw-r--r--drivers/usb/typec/ucsi/trace.h1
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c5
-rw-r--r--drivers/usb/usb-skeleton.c6
-rw-r--r--drivers/usb/usbip/Makefile1
-rw-r--r--drivers/usb/usbip/stub.h16
-rw-r--r--drivers/usb/usbip/stub_dev.c16
-rw-r--r--drivers/usb/usbip/stub_main.c16
-rw-r--r--drivers/usb/usbip/stub_rx.c16
-rw-r--r--drivers/usb/usbip/stub_tx.c16
-rw-r--r--drivers/usb/usbip/usbip_common.c16
-rw-r--r--drivers/usb/usbip/usbip_common.h16
-rw-r--r--drivers/usb/usbip/usbip_event.c16
-rw-r--r--drivers/usb/usbip/vhci.h7
-rw-r--r--drivers/usb/usbip/vhci_hcd.c16
-rw-r--r--drivers/usb/usbip/vhci_rx.c16
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c16
-rw-r--r--drivers/usb/usbip/vhci_tx.c16
-rw-r--r--drivers/usb/usbip/vudc.h16
-rw-r--r--drivers/usb/usbip/vudc_dev.c23
-rw-r--r--drivers/usb/usbip/vudc_main.c14
-rw-r--r--drivers/usb/usbip/vudc_rx.c14
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c16
-rw-r--r--drivers/usb/usbip/vudc_transfer.c20
-rw-r--r--drivers/usb/usbip/vudc_tx.c14
-rw-r--r--drivers/usb/wusbcore/Makefile1
-rw-r--r--drivers/usb/wusbcore/cbaf.c16
-rw-r--r--drivers/usb/wusbcore/crypto.c16
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c16
-rw-r--r--drivers/usb/wusbcore/devconnect.c16
-rw-r--r--drivers/usb/wusbcore/mmc.c16
-rw-r--r--drivers/usb/wusbcore/pal.c13
-rw-r--r--drivers/usb/wusbcore/reservation.c13
-rw-r--r--drivers/usb/wusbcore/rh.c16
-rw-r--r--drivers/usb/wusbcore/security.c25
-rw-r--r--drivers/usb/wusbcore/wa-hc.c16
-rw-r--r--drivers/usb/wusbcore/wa-hc.h16
-rw-r--r--drivers/usb/wusbcore/wa-nep.c16
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c16
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c17
-rw-r--r--drivers/usb/wusbcore/wusbhc.c16
-rw-r--r--drivers/usb/wusbcore/wusbhc.h16
-rw-r--r--drivers/uwb/Makefile1
-rw-r--r--drivers/uwb/drp.c6
-rw-r--r--drivers/uwb/i1480/dfu/Makefile1
-rw-r--r--drivers/uwb/neh.c8
-rw-r--r--drivers/uwb/rsv.c15
-rw-r--r--drivers/uwb/uwb-internal.h2
-rw-r--r--drivers/vfio/Makefile1
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c31
-rw-r--r--drivers/vfio/platform/Makefile1
-rw-r--r--drivers/vfio/platform/reset/Kconfig9
-rw-r--r--drivers/vfio/platform/reset/Makefile2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c113
-rw-r--r--drivers/vfio/vfio.c2
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c3
-rw-r--r--drivers/vhost/Makefile1
-rw-r--r--drivers/vhost/net.c14
-rw-r--r--drivers/vhost/scsi.c79
-rw-r--r--drivers/vhost/test.h1
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/ili922x.c3
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/backlight/tps65217_bl.c17
-rw-r--r--drivers/video/console/Makefile1
-rw-r--r--drivers/video/fbdev/Kconfig10
-rw-r--r--drivers/video/fbdev/Makefile2
-rw-r--r--drivers/video/fbdev/amba-clcd-nomadik.h1
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.h1
-rw-r--r--drivers/video/fbdev/atafb.h1
-rw-r--r--drivers/video/fbdev/atafb_utils.h1
-rw-r--r--drivers/video/fbdev/aty/Makefile1
-rw-r--r--drivers/video/fbdev/aty/ati_ids.h1
-rw-r--r--drivers/video/fbdev/aty/atyfb.h1
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c4
-rw-r--r--drivers/video/fbdev/aty/mach64_accel.c1
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c1
-rw-r--r--drivers/video/fbdev/aty/mach64_cursor.c1
-rw-r--r--drivers/video/fbdev/aty/mach64_gx.c1
-rw-r--r--drivers/video/fbdev/aty/radeon_accel.c1
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c10
-rw-r--r--drivers/video/fbdev/aty/radeon_i2c.c1
-rw-r--r--drivers/video/fbdev/aty/radeon_monitor.c1
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c4
-rw-r--r--drivers/video/fbdev/aty/radeonfb.h1
-rw-r--r--drivers/video/fbdev/au1200fb.c43
-rw-r--r--drivers/video/fbdev/carminefb.h1
-rw-r--r--drivers/video/fbdev/carminefb_regs.h1
-rw-r--r--drivers/video/fbdev/cirrusfb.c6
-rw-r--r--drivers/video/fbdev/controlfb.h2
-rw-r--r--drivers/video/fbdev/core/Makefile1
-rw-r--r--drivers/video/fbdev/core/fb_draw.h1
-rw-r--r--drivers/video/fbdev/core/fbcon.c11
-rw-r--r--drivers/video/fbdev/core/fbcon.h1
-rw-r--r--drivers/video/fbdev/dnfb.c15
-rw-r--r--drivers/video/fbdev/efifb.c1
-rw-r--r--drivers/video/fbdev/geode/Makefile1
-rw-r--r--drivers/video/fbdev/goldfishfb.c8
-rw-r--r--drivers/video/fbdev/i810/Makefile1
-rw-r--r--drivers/video/fbdev/igafb.c579
-rw-r--r--drivers/video/fbdev/intelfb/Makefile1
-rw-r--r--drivers/video/fbdev/intelfb/intelfb.h1
-rw-r--r--drivers/video/fbdev/intelfb/intelfbhw.c9
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.h1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_DAC1064.h1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.h1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_accel.h1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.h1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_crtc2.h1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_g450.h1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_maven.h1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_misc.h1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xx_reg.h1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb.h1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb_accel.h1
-rw-r--r--drivers/video/fbdev/mbx/mbxdebugfs.c1
-rw-r--r--drivers/video/fbdev/mbx/reg_bits.h1
-rw-r--r--drivers/video/fbdev/mbx/regs.h1
-rw-r--r--drivers/video/fbdev/mmp/panel/Kconfig1
-rw-r--r--drivers/video/fbdev/mxsfb.c13
-rw-r--r--drivers/video/fbdev/nvidia/Makefile1
-rw-r--r--drivers/video/fbdev/nvidia/nv_proto.h1
-rw-r--r--drivers/video/fbdev/nvidia/nv_type.h1
-rw-r--r--drivers/video/fbdev/omap/Makefile1
-rw-r--r--drivers/video/fbdev/omap/hwa742.c6
-rw-r--r--drivers/video/fbdev/omap/lcdc.h1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/Makefile1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/Makefile1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/Kconfig1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/Makefile1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/pxa168fb.h1
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c24
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.h1
-rw-r--r--drivers/video/fbdev/riva/Makefile1
-rw-r--r--drivers/video/fbdev/riva/nv_type.h1
-rw-r--r--drivers/video/fbdev/riva/rivafb.h1
-rw-r--r--drivers/video/fbdev/sa1100fb.c75
-rw-r--r--drivers/video/fbdev/sa1100fb.h2
-rw-r--r--drivers/video/fbdev/sbuslib.c1
-rw-r--r--drivers/video/fbdev/sbuslib.h1
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.h1
-rw-r--r--drivers/video/fbdev/sis/init301.c2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c4
-rw-r--r--drivers/video/fbdev/sm501fb.c22
-rw-r--r--drivers/video/fbdev/sticore.h1
-rw-r--r--drivers/video/fbdev/udlfb.c10
-rw-r--r--drivers/video/fbdev/via/Makefile1
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.h1
-rw-r--r--drivers/video/logo/Makefile1
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_balloon.c24
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c73
-rw-r--r--drivers/vme/bridges/vme_fake.c35
-rw-r--r--drivers/vme/bridges/vme_tsi148.c83
-rw-r--r--drivers/vme/vme.c214
-rw-r--r--drivers/vme/vme_bridge.h1
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/slaves/Kconfig15
-rw-r--r--drivers/w1/slaves/Makefile2
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c771
-rw-r--r--drivers/w1/slaves/w1_therm.c59
-rw-r--r--drivers/w1/w1_io.c3
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/alim7101_wdt.c6
-rw-r--r--drivers/watchdog/at91sam9_wdt.c6
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c9
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c4
-rw-r--r--drivers/watchdog/cpu5wdt.c4
-rw-r--r--drivers/watchdog/cpwd.c8
-rw-r--r--drivers/watchdog/iTCO_vendor.h1
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c13
-rw-r--r--drivers/watchdog/machzwd.c6
-rw-r--r--drivers/watchdog/mixcomwd.c6
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c7
-rw-r--r--drivers/watchdog/mtx-1_wdt.c4
-rw-r--r--drivers/watchdog/nuc900_wdt.c4
-rw-r--r--drivers/watchdog/pcwd.c4
-rw-r--r--drivers/watchdog/pika_wdt.c4
-rw-r--r--drivers/watchdog/rdc321x_wdt.c4
-rw-r--r--drivers/watchdog/sbc60xxwdt.c6
-rw-r--r--drivers/watchdog/sc520_wdt.c6
-rw-r--r--drivers/watchdog/shwdt.c6
-rw-r--r--drivers/watchdog/sp5100_tco.h1
-rw-r--r--drivers/watchdog/via_wdt.c6
-rw-r--r--drivers/watchdog/w83877f_wdt.c6
-rw-r--r--drivers/watchdog/watchdog_core.c35
-rw-r--r--drivers/watchdog/watchdog_dev.c32
-rw-r--r--drivers/watchdog/watchdog_pretimeout.h1
-rw-r--r--drivers/xen/Kconfig11
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/biomerge.c1
-rw-r--r--drivers/xen/cpu_hotplug.c1
-rw-r--r--drivers/xen/events/events_2l.c1
-rw-r--r--drivers/xen/grant-table.c250
-rw-r--r--drivers/xen/manage.c7
-rw-r--r--drivers/xen/privcmd.c3
-rw-r--r--drivers/xen/pvcalls-back.c20
-rw-r--r--drivers/xen/pvcalls-front.c1278
-rw-r--r--drivers/xen/pvcalls-front.h28
-rw-r--r--drivers/xen/time.c73
-rw-r--r--drivers/xen/xen-pciback/Makefile1
-rw-r--r--drivers/xen/xen-pciback/conf_space.c1
-rw-r--r--drivers/xen/xen-pciback/conf_space.h1
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c1
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c1
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c1
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.h1
-rw-r--r--drivers/xen/xen-pciback/passthrough.c1
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c1
-rw-r--r--drivers/xen/xen-pciback/vpci.c1
-rw-r--r--drivers/xen/xen-pciback/xenbus.c1
-rw-r--r--drivers/xen/xen-selfballoon.c1
-rw-r--r--drivers/xen/xenbus/Makefile1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c1
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
-rw-r--r--drivers/xen/xenfs/xenfs.h1
-rw-r--r--drivers/xen/xenfs/xenstored.c1
-rw-r--r--drivers/xen/xenfs/xensyms.c1
-rw-r--r--drivers/zorro/Makefile1
-rw-r--r--drivers/zorro/gen-devlist.c1
-rw-r--r--drivers/zorro/names.c1
-rw-r--r--drivers/zorro/proc.c1
-rw-r--r--drivers/zorro/zorro.h1
7707 files changed, 317229 insertions, 93204 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 505c676fa9c7..152744c5ef0f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "Device Drivers"
source "drivers/amba/Kconfig"
@@ -208,4 +209,6 @@ source "drivers/tee/Kconfig"
source "drivers/mux/Kconfig"
+source "drivers/opp/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index d90fdc413648..1d034b680431 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux kernel device drivers.
#
@@ -125,6 +126,7 @@ obj-$(CONFIG_ACCESSIBILITY) += accessibility/
obj-$(CONFIG_ISDN) += isdn/
obj-$(CONFIG_EDAC) += edac/
obj-$(CONFIG_EISA) += eisa/
+obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
diff --git a/drivers/accessibility/Kconfig b/drivers/accessibility/Kconfig
index ef3b65bfdd0a..00f7512c9cf4 100644
--- a/drivers/accessibility/Kconfig
+++ b/drivers/accessibility/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig ACCESSIBILITY
bool "Accessibility support"
---help---
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 1ce52f84dc23..46505396869e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# ACPI Configuration
#
@@ -80,6 +81,11 @@ endif
config ACPI_SPCR_TABLE
bool
+config ACPI_LPIT
+ bool
+ depends on X86_64
+ default y
+
config ACPI_SLEEP
bool
depends on SUSPEND || HIBERNATION
@@ -521,6 +527,12 @@ config CHT_WC_PMIC_OPREGION
help
This config adds ACPI operation region support for CHT Whiskey Cove PMIC.
+config CHT_DC_TI_PMIC_OPREGION
+ bool "ACPI operation region support for Dollar Cove TI PMIC"
+ depends on INTEL_SOC_PMIC_CHTDC_TI
+ help
+ This config adds ACPI operation region support for Dollar Cove TI PMIC.
+
endif
config ACPI_CONFIGFS
@@ -535,4 +547,20 @@ if ARM64
source "drivers/acpi/arm64/Kconfig"
endif
+config TPS68470_PMIC_OPREGION
+ bool "ACPI operation region support for TPS68470 PMIC"
+ depends on MFD_TPS68470
+ help
+ This config adds ACPI operation region support for TI TPS68470 PMIC.
+ TPS68470 device is an advanced power management unit that powers
+ a Compact Camera Module (CCM), generates clocks for image sensors,
+ drives a dual LED for flash and incorporates two LED drivers for
+ general purpose indicators.
+ This driver enables ACPI operation region support control voltage
+ regulators and clocks.
+
+ This option is a bool as it provides an ACPI operation
+ region, which must be available before any of the devices
+ using this, are probed.
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 90265ab4437a..41954a601989 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux ACPI interpreter
#
@@ -56,6 +57,7 @@ acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
acpi-y += acpi_lpat.o
+acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
@@ -104,9 +106,12 @@ obj-$(CONFIG_CRC_PMIC_OPREGION) += pmic/intel_pmic_crc.o
obj-$(CONFIG_XPOWER_PMIC_OPREGION) += pmic/intel_pmic_xpower.o
obj-$(CONFIG_BXT_WC_PMIC_OPREGION) += pmic/intel_pmic_bxtwc.o
obj-$(CONFIG_CHT_WC_PMIC_OPREGION) += pmic/intel_pmic_chtwc.o
+obj-$(CONFIG_CHT_DC_TI_PMIC_OPREGION) += pmic/intel_pmic_chtdc_ti.o
obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o
+obj-$(CONFIG_TPS68470_PMIC_OPREGION) += pmic/tps68470_pmic.o
+
video-objs += acpi_video.o video_detect.o
obj-y += dptf/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 8f52483219ba..47a7ed557bd6 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -265,6 +265,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
+ /* fall through */
case ACPI_AC_NOTIFY_STATUS:
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d5999eb41c00..d553b0087947 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -116,6 +116,10 @@ static const struct apd_device_desc hip08_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 250000000,
};
+static const struct apd_device_desc thunderx2_i2c_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 125000000,
+};
#endif
#else
@@ -180,6 +184,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
{ "BRCM900D", APD_ADDR(vulcan_spi_desc) },
{ "CAV900D", APD_ADDR(vulcan_spi_desc) },
+ { "CAV9007", APD_ADDR(thunderx2_i2c_desc) },
{ "HISI02A1", APD_ADDR(hip07_i2c_desc) },
{ "HISI02A2", APD_ADDR(hip08_i2c_desc) },
#endif
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 853bc7fc673f..b58850389094 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -204,7 +204,7 @@ struct configfs_attribute *acpi_table_attrs[] = {
NULL,
};
-static struct config_item_type acpi_table_type = {
+static const struct config_item_type acpi_table_type = {
.ct_owner = THIS_MODULE,
.ct_bin_attrs = acpi_table_bin_attrs,
.ct_attrs = acpi_table_attrs,
@@ -237,12 +237,12 @@ struct configfs_group_operations acpi_table_group_ops = {
.drop_item = acpi_table_drop_item,
};
-static struct config_item_type acpi_tables_type = {
+static const struct config_item_type acpi_tables_type = {
.ct_owner = THIS_MODULE,
.ct_group_ops = &acpi_table_group_ops,
};
-static struct config_item_type acpi_root_group_type = {
+static const struct config_item_type acpi_root_group_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
new file mode 100644
index 000000000000..e94e478dd18b
--- /dev/null
+++ b/drivers/acpi/acpi_lpit.c
@@ -0,0 +1,162 @@
+
+/*
+ * acpi_lpit.c - LPIT table processing functions
+ *
+ * Copyright (C) 2017 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/acpi.h>
+#include <asm/msr.h>
+#include <asm/tsc.h>
+
+struct lpit_residency_info {
+ struct acpi_generic_address gaddr;
+ u64 frequency;
+ void __iomem *iomem_addr;
+};
+
+/* Storage for an memory mapped and FFH based entries */
+static struct lpit_residency_info residency_info_mem;
+static struct lpit_residency_info residency_info_ffh;
+
+static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
+{
+ int err;
+
+ if (io_mem) {
+ u64 count = 0;
+ int error;
+
+ error = acpi_os_read_iomem(residency_info_mem.iomem_addr, &count,
+ residency_info_mem.gaddr.bit_width);
+ if (error)
+ return error;
+
+ *counter = div64_u64(count * 1000000ULL, residency_info_mem.frequency);
+ return 0;
+ }
+
+ err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
+ if (!err) {
+ u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
+ residency_info_ffh.gaddr. bit_width - 1,
+ residency_info_ffh.gaddr.bit_offset);
+
+ *counter &= mask;
+ *counter >>= residency_info_ffh.gaddr.bit_offset;
+ *counter = div64_u64(*counter * 1000000ULL, residency_info_ffh.frequency);
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+static ssize_t low_power_idle_system_residency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 counter;
+ int ret;
+
+ ret = lpit_read_residency_counter_us(&counter, true);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%llu\n", counter);
+}
+static DEVICE_ATTR_RO(low_power_idle_system_residency_us);
+
+static ssize_t low_power_idle_cpu_residency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 counter;
+ int ret;
+
+ ret = lpit_read_residency_counter_us(&counter, false);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%llu\n", counter);
+}
+static DEVICE_ATTR_RO(low_power_idle_cpu_residency_us);
+
+int lpit_read_residency_count_address(u64 *address)
+{
+ if (!residency_info_mem.gaddr.address)
+ return -EINVAL;
+
+ *address = residency_info_mem.gaddr.address;
+
+ return 0;
+}
+
+static void lpit_update_residency(struct lpit_residency_info *info,
+ struct acpi_lpit_native *lpit_native)
+{
+ info->frequency = lpit_native->counter_frequency ?
+ lpit_native->counter_frequency : tsc_khz * 1000;
+ if (!info->frequency)
+ info->frequency = 1;
+
+ info->gaddr = lpit_native->residency_counter;
+ if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ info->iomem_addr = ioremap_nocache(info->gaddr.address,
+ info->gaddr.bit_width / 8);
+ if (!info->iomem_addr)
+ return;
+
+ /* Silently fail, if cpuidle attribute group is not present */
+ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
+ &dev_attr_low_power_idle_system_residency_us.attr,
+ "cpuidle");
+ } else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+ /* Silently fail, if cpuidle attribute group is not present */
+ sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
+ &dev_attr_low_power_idle_cpu_residency_us.attr,
+ "cpuidle");
+ }
+}
+
+static void lpit_process(u64 begin, u64 end)
+{
+ while (begin + sizeof(struct acpi_lpit_native) < end) {
+ struct acpi_lpit_native *lpit_native = (struct acpi_lpit_native *)begin;
+
+ if (!lpit_native->header.type && !lpit_native->header.flags) {
+ if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ !residency_info_mem.gaddr.address) {
+ lpit_update_residency(&residency_info_mem, lpit_native);
+ } else if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
+ !residency_info_ffh.gaddr.address) {
+ lpit_update_residency(&residency_info_ffh, lpit_native);
+ }
+ }
+ begin += lpit_native->header.length;
+ }
+}
+
+void acpi_init_lpit(void)
+{
+ acpi_status status;
+ u64 lpit_begin;
+ struct acpi_table_lpit *lpit;
+
+ status = acpi_get_table(ACPI_SIG_LPIT, 0, (struct acpi_table_header **)&lpit);
+
+ if (ACPI_FAILURE(status))
+ return;
+
+ lpit_begin = (u64)lpit + sizeof(*lpit);
+ lpit_process(lpit_begin, lpit_begin + lpit->header.length);
+}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 032ae44710e5..7f2b02cc8ea1 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -362,7 +362,7 @@ static int register_device_clock(struct acpi_device *adev,
{
const struct lpss_device_desc *dev_desc = pdata->dev_desc;
const char *devname = dev_name(&adev->dev);
- struct clk *clk = ERR_PTR(-ENODEV);
+ struct clk *clk;
struct lpss_clk_data *clk_data;
const char *parent, *clk_name;
void __iomem *prv_base;
@@ -693,7 +693,7 @@ static int acpi_lpss_activate(struct device *dev)
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
- ret = acpi_dev_runtime_resume(dev);
+ ret = acpi_dev_resume(dev);
if (ret)
return ret;
@@ -713,42 +713,8 @@ static int acpi_lpss_activate(struct device *dev)
static void acpi_lpss_dismiss(struct device *dev)
{
- acpi_dev_runtime_suspend(dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_lpss_suspend_late(struct device *dev)
-{
- struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- int ret;
-
- ret = pm_generic_suspend_late(dev);
- if (ret)
- return ret;
-
- if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
- acpi_lpss_save_ctx(dev, pdata);
-
- return acpi_dev_suspend_late(dev);
-}
-
-static int acpi_lpss_resume_early(struct device *dev)
-{
- struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
- int ret;
-
- ret = acpi_dev_resume_early(dev);
- if (ret)
- return ret;
-
- acpi_lpss_d3_to_d0_delay(pdata);
-
- if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
- acpi_lpss_restore_ctx(dev, pdata);
-
- return pm_generic_resume_early(dev);
+ acpi_dev_suspend(dev, false);
}
-#endif /* CONFIG_PM_SLEEP */
/* IOSF SB for LPSS island */
#define LPSS_IOSF_UNIT_LPIOEP 0xA0
@@ -835,19 +801,15 @@ static void lpss_iosf_exit_d3_state(void)
mutex_unlock(&lpss_iosf_mutex);
}
-static int acpi_lpss_runtime_suspend(struct device *dev)
+static int acpi_lpss_suspend(struct device *dev, bool wakeup)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
- ret = pm_generic_runtime_suspend(dev);
- if (ret)
- return ret;
-
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_save_ctx(dev, pdata);
- ret = acpi_dev_runtime_suspend(dev);
+ ret = acpi_dev_suspend(dev, wakeup);
/*
* This call must be last in the sequence, otherwise PMC will return
@@ -860,7 +822,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
return ret;
}
-static int acpi_lpss_runtime_resume(struct device *dev)
+static int acpi_lpss_resume(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
@@ -872,7 +834,7 @@ static int acpi_lpss_runtime_resume(struct device *dev)
if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_exit_d3_state();
- ret = acpi_dev_runtime_resume(dev);
+ ret = acpi_dev_resume(dev);
if (ret)
return ret;
@@ -881,7 +843,41 @@ static int acpi_lpss_runtime_resume(struct device *dev)
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_restore_ctx(dev, pdata);
- return pm_generic_runtime_resume(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int acpi_lpss_suspend_late(struct device *dev)
+{
+ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ ret = pm_generic_suspend_late(dev);
+ return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
+}
+
+static int acpi_lpss_resume_early(struct device *dev)
+{
+ int ret = acpi_lpss_resume(dev);
+
+ return ret ? ret : pm_generic_resume_early(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int acpi_lpss_runtime_suspend(struct device *dev)
+{
+ int ret = pm_generic_runtime_suspend(dev);
+
+ return ret ? ret : acpi_lpss_suspend(dev, true);
+}
+
+static int acpi_lpss_runtime_resume(struct device *dev)
+{
+ int ret = acpi_lpss_resume(dev);
+
+ return ret ? ret : pm_generic_runtime_resume(dev);
}
#endif /* CONFIG_PM */
@@ -894,13 +890,20 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
- .complete = pm_complete_with_resume_check,
+ .complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.suspend_late = acpi_lpss_suspend_late,
+ .suspend_noirq = acpi_subsys_suspend_noirq,
+ .resume_noirq = acpi_subsys_resume_noirq,
.resume_early = acpi_lpss_resume_early,
.freeze = acpi_subsys_freeze,
+ .freeze_late = acpi_subsys_freeze_late,
+ .freeze_noirq = acpi_subsys_freeze_noirq,
+ .thaw_noirq = acpi_subsys_thaw_noirq,
.poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_lpss_suspend_late,
+ .poweroff_noirq = acpi_subsys_suspend_noirq,
+ .restore_noirq = acpi_subsys_resume_noirq,
.restore_early = acpi_lpss_resume_early,
#endif
.runtime_suspend = acpi_lpss_runtime_suspend,
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 86c10599d9f8..449d86d39965 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -82,6 +82,7 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
* PIIX4 models.
*/
errata.piix4.throttle = 1;
+ /* fall through*/
case 2: /* PIIX4E */
case 3: /* PIIX4M */
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 1709551bc4aa..71f6f2624deb 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for ACPICA Core interpreter
#
@@ -177,6 +178,7 @@ acpi-y += \
utresrc.o \
utstate.o \
utstring.o \
+ utstrsuppt.o \
utstrtoul64.o \
utxface.o \
utxfinit.o \
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index fd4f3cacb356..cd722d8edacb 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -66,9 +66,9 @@ acpi_status
acpi_hw_validate_register(struct acpi_generic_address *reg,
u8 max_bit_width, u64 *address);
-acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg);
+acpi_status acpi_hw_read(u64 *value, struct acpi_generic_address *reg);
-acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg);
+acpi_status acpi_hw_write(u64 value, struct acpi_generic_address *reg);
struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id);
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 29a863c85318..29555c8789a3 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -101,7 +101,8 @@ typedef const struct acpi_exdump_info {
*/
acpi_status
acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
- union acpi_operand_object **result_desc, u32 flags);
+ union acpi_operand_object **result_desc,
+ u32 implicit_conversion);
acpi_status
acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
@@ -424,9 +425,6 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
struct acpi_walk_state *walk_state,
u8 implicit_conversion);
-#define ACPI_IMPLICIT_CONVERSION TRUE
-#define ACPI_NO_IMPLICIT_CONVERSION FALSE
-
/*
* exstoren - resolve/store object
*/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 745134ade35f..83b75e9db7ef 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -141,6 +141,11 @@ extern const char *acpi_gbl_ptyp_decode[];
#define ACPI_MSG_SUFFIX \
acpi_os_printf (" (%8.8X/%s-%u)\n", ACPI_CA_VERSION, module_name, line_number)
+/* Flags to indicate implicit or explicit string-to-integer conversion */
+
+#define ACPI_IMPLICIT_CONVERSION TRUE
+#define ACPI_NO_IMPLICIT_CONVERSION FALSE
+
/* Types for Resource descriptor entries */
#define ACPI_INVALID_RESOURCE 0
@@ -197,15 +202,31 @@ void acpi_ut_strlwr(char *src_string);
int acpi_ut_stricmp(char *string1, char *string2);
-acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *ret_integer);
+/*
+ * utstrsuppt - string-to-integer conversion support functions
+ */
+acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value);
+
+acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr);
+
+acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr);
+
+char acpi_ut_remove_whitespace(char **string);
+
+char acpi_ut_remove_leading_zeros(char **string);
+
+u8 acpi_ut_detect_hex_prefix(char **string);
+
+u8 acpi_ut_detect_octal_prefix(char **string);
/*
- * Values for Flags above
- * Note: LIMIT values correspond to acpi_gbl_integer_byte_width values (4/8)
+ * utstrtoul64 - string-to-integer conversion functions
*/
-#define ACPI_STRTOUL_32BIT 0x04 /* 4 bytes */
-#define ACPI_STRTOUL_64BIT 0x08 /* 8 bytes */
-#define ACPI_STRTOUL_BASE16 0x10 /* Default: Base10/16 */
+acpi_status acpi_ut_strtoul64(char *string, u64 *ret_integer);
+
+u64 acpi_ut_explicit_strtoul64(char *string);
+
+u64 acpi_ut_implicit_strtoul64(char *string);
/*
* utglobal - Global data structures and procedures
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 857dbc43a9b1..32d546f0db2f 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -277,10 +277,7 @@ acpi_db_convert_to_object(acpi_object_type type,
default:
object->type = ACPI_TYPE_INTEGER;
- status = acpi_ut_strtoul64(string,
- (acpi_gbl_integer_byte_width |
- ACPI_STRTOUL_BASE16),
- &object->integer.value);
+ status = acpi_ut_strtoul64(string, &object->integer.value);
break;
}
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 20d7744b06ae..22f45d090733 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -134,7 +134,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
* object. Implicitly convert the argument if necessary.
*/
status = acpi_ex_convert_to_integer(obj_desc, &local_obj_desc,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 229382035550..263d8fc4a9e2 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -390,8 +390,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
struct acpi_gpe_handler_info *gpe_handler_info;
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte;
- u32 status_reg;
- u32 enable_reg;
+ u64 status_reg;
+ u64 enable_reg;
acpi_cpu_flags flags;
u32 i;
u32 j;
@@ -472,7 +472,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
gpe_register_info->base_gpe_number,
gpe_register_info->base_gpe_number +
(ACPI_GPE_REGISTER_WIDTH - 1),
- status_reg, enable_reg,
+ (u32)status_reg, (u32)enable_reg,
gpe_register_info->enable_for_run,
gpe_register_info->enable_for_wake));
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 76bfb7dcae2f..59b8de2f07d3 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -156,7 +156,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
status =
acpi_ex_convert_to_integer(local_operand1, &temp_operand1,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
break;
case ACPI_TYPE_BUFFER:
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index f71028e334ee..23ebadb06a95 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -57,10 +57,10 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
*
* FUNCTION: acpi_ex_convert_to_integer
*
- * PARAMETERS: obj_desc - Object to be converted. Must be an
- * Integer, Buffer, or String
- * result_desc - Where the new Integer object is returned
- * flags - Used for string conversion
+ * PARAMETERS: obj_desc - Object to be converted. Must be an
+ * Integer, Buffer, or String
+ * result_desc - Where the new Integer object is returned
+ * implicit_conversion - Used for string conversion
*
* RETURN: Status
*
@@ -70,14 +70,14 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
acpi_status
acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
- union acpi_operand_object **result_desc, u32 flags)
+ union acpi_operand_object **result_desc,
+ u32 implicit_conversion)
{
union acpi_operand_object *return_desc;
u8 *pointer;
u64 result;
u32 i;
u32 count;
- acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ex_convert_to_integer, obj_desc);
@@ -123,12 +123,18 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
* hexadecimal as per the ACPI specification. The only exception (as
* of ACPI 3.0) is that the to_integer() operator allows both decimal
* and hexadecimal strings (hex prefixed with "0x").
+ *
+ * Explicit conversion is used only by to_integer.
+ * All other string-to-integer conversions are implicit conversions.
*/
- status = acpi_ut_strtoul64(ACPI_CAST_PTR(char, pointer),
- (acpi_gbl_integer_byte_width |
- flags), &result);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (implicit_conversion) {
+ result =
+ acpi_ut_implicit_strtoul64(ACPI_CAST_PTR
+ (char, pointer));
+ } else {
+ result =
+ acpi_ut_explicit_strtoul64(ACPI_CAST_PTR
+ (char, pointer));
}
break;
@@ -631,7 +637,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
*/
status =
acpi_ex_convert_to_integer(source_desc, result_desc,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
break;
case ACPI_TYPE_STRING:
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 1e7649ce0a7b..dbad3ebd7df5 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -330,7 +330,7 @@ acpi_ex_do_logical_op(u16 opcode,
case ACPI_TYPE_INTEGER:
status = acpi_ex_convert_to_integer(operand1, &local_operand1,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
break;
case ACPI_TYPE_STRING:
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index c4852429e2ff..1c7c9962b0de 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -415,7 +415,7 @@ acpi_ex_resolve_operands(u16 opcode,
* Known as "Implicit Source Operand Conversion"
*/
status = acpi_ex_convert_to_integer(obj_desc, stack_ptr,
- ACPI_STRTOUL_BASE16);
+ ACPI_IMPLICIT_CONVERSION);
if (ACPI_FAILURE(status)) {
if (status == AE_TYPE) {
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 5eb11b30a79e..09b6822aa5cc 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -99,7 +99,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
{
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status = AE_OK;
- u32 enable_mask;
+ u64 enable_mask;
u32 register_bit;
ACPI_FUNCTION_ENTRY();
@@ -214,7 +214,7 @@ acpi_status
acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_event_status *event_status)
{
- u32 in_byte;
+ u64 in_byte;
u32 register_bit;
struct acpi_gpe_register_info *gpe_register_info;
acpi_event_status local_event_status = 0;
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index acb417b58bbb..aa6e00081915 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -220,16 +220,15 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
*
* RETURN: Status
*
- * DESCRIPTION: Read from either memory or IO space. This is a 32-bit max
- * version of acpi_read, used internally since the overhead of
- * 64-bit values is not needed.
+ * DESCRIPTION: Read from either memory or IO space. This is a 64-bit max
+ * version of acpi_read.
*
* LIMITATIONS: <These limitations also apply to acpi_hw_write>
* space_ID must be system_memory or system_IO.
*
******************************************************************************/
-acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
+acpi_status acpi_hw_read(u64 *value, struct acpi_generic_address *reg)
{
u64 address;
u8 access_width;
@@ -244,17 +243,17 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
/* Validate contents of the GAS register */
- status = acpi_hw_validate_register(reg, 32, &address);
+ status = acpi_hw_validate_register(reg, 64, &address);
if (ACPI_FAILURE(status)) {
return (status);
}
/*
- * Initialize entire 32-bit return value to zero, convert access_width
+ * Initialize entire 64-bit return value to zero, convert access_width
* into number of bits based
*/
*value = 0;
- access_width = acpi_hw_get_access_bit_width(address, reg, 32);
+ access_width = acpi_hw_get_access_bit_width(address, reg, 64);
bit_width = reg->bit_offset + reg->bit_width;
bit_offset = reg->bit_offset;
@@ -265,7 +264,7 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
index = 0;
while (bit_width) {
if (bit_offset >= access_width) {
- value32 = 0;
+ value64 = 0;
bit_offset -= access_width;
} else {
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -276,7 +275,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
ACPI_DIV_8
(access_width),
&value64, access_width);
- value32 = (u32)value64;
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
status = acpi_hw_read_port((acpi_io_address)
@@ -286,15 +284,16 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
(access_width),
&value32,
access_width);
+ value64 = (u64)value32;
}
}
/*
* Use offset style bit writes because "Index * AccessWidth" is
- * ensured to be less than 32-bits by acpi_hw_validate_register().
+ * ensured to be less than 64-bits by acpi_hw_validate_register().
*/
ACPI_SET_BITS(value, index * access_width,
- ACPI_MASK_BITS_ABOVE_32(access_width), value32);
+ ACPI_MASK_BITS_ABOVE_64(access_width), value64);
bit_width -=
bit_width > access_width ? access_width : bit_width;
@@ -302,8 +301,9 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
- *value, access_width, ACPI_FORMAT_UINT64(address),
+ "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n",
+ ACPI_FORMAT_UINT64(*value), access_width,
+ ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
@@ -318,20 +318,18 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
*
* RETURN: Status
*
- * DESCRIPTION: Write to either memory or IO space. This is a 32-bit max
- * version of acpi_write, used internally since the overhead of
- * 64-bit values is not needed.
+ * DESCRIPTION: Write to either memory or IO space. This is a 64-bit max
+ * version of acpi_write.
*
******************************************************************************/
-acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
+acpi_status acpi_hw_write(u64 value, struct acpi_generic_address *reg)
{
u64 address;
u8 access_width;
u32 bit_width;
u8 bit_offset;
u64 value64;
- u32 value32;
u8 index;
acpi_status status;
@@ -339,14 +337,14 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
/* Validate contents of the GAS register */
- status = acpi_hw_validate_register(reg, 32, &address);
+ status = acpi_hw_validate_register(reg, 64, &address);
if (ACPI_FAILURE(status)) {
return (status);
}
/* Convert access_width into number of bits based */
- access_width = acpi_hw_get_access_bit_width(address, reg, 32);
+ access_width = acpi_hw_get_access_bit_width(address, reg, 64);
bit_width = reg->bit_offset + reg->bit_width;
bit_offset = reg->bit_offset;
@@ -358,16 +356,15 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
while (bit_width) {
/*
* Use offset style bit reads because "Index * AccessWidth" is
- * ensured to be less than 32-bits by acpi_hw_validate_register().
+ * ensured to be less than 64-bits by acpi_hw_validate_register().
*/
- value32 = ACPI_GET_BITS(&value, index * access_width,
- ACPI_MASK_BITS_ABOVE_32(access_width));
+ value64 = ACPI_GET_BITS(&value, index * access_width,
+ ACPI_MASK_BITS_ABOVE_64(access_width));
if (bit_offset >= access_width) {
bit_offset -= access_width;
} else {
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- value64 = (u64)value32;
status =
acpi_os_write_memory((acpi_physical_address)
address +
@@ -382,7 +379,7 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
index *
ACPI_DIV_8
(access_width),
- value32,
+ (u32)value64,
access_width);
}
}
@@ -397,8 +394,9 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n",
- value, access_width, ACPI_FORMAT_UINT64(address),
+ "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n",
+ ACPI_FORMAT_UINT64(value), access_width,
+ ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
@@ -526,6 +524,7 @@ acpi_status acpi_hw_write_pm1_control(u32 pm1a_control, u32 pm1b_control)
acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
{
u32 value = 0;
+ u64 value64;
acpi_status status;
ACPI_FUNCTION_TRACE(hw_register_read);
@@ -564,12 +563,14 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
case ACPI_REGISTER_PM2_CONTROL: /* 8-bit access */
status =
- acpi_hw_read(&value, &acpi_gbl_FADT.xpm2_control_block);
+ acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block);
+ value = (u32)value64;
break;
case ACPI_REGISTER_PM_TIMER: /* 32-bit access */
- status = acpi_hw_read(&value, &acpi_gbl_FADT.xpm_timer_block);
+ status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block);
+ value = (u32)value64;
break;
case ACPI_REGISTER_SMI_COMMAND_BLOCK: /* 8-bit access */
@@ -586,7 +587,7 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
}
if (ACPI_SUCCESS(status)) {
- *return_value = value;
+ *return_value = (u32)value;
}
return_ACPI_STATUS(status);
@@ -622,6 +623,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
{
acpi_status status;
u32 read_value;
+ u64 read_value64;
ACPI_FUNCTION_TRACE(hw_register_write);
@@ -685,11 +687,12 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
* as per the ACPI spec.
*/
status =
- acpi_hw_read(&read_value,
+ acpi_hw_read(&read_value64,
&acpi_gbl_FADT.xpm2_control_block);
if (ACPI_FAILURE(status)) {
goto exit;
}
+ read_value = (u32)read_value64;
/* Insert the bits to be preserved */
@@ -745,22 +748,25 @@ acpi_hw_read_multiple(u32 *value,
{
u32 value_a = 0;
u32 value_b = 0;
+ u64 value64;
acpi_status status;
/* The first register is always required */
- status = acpi_hw_read(&value_a, register_a);
+ status = acpi_hw_read(&value64, register_a);
if (ACPI_FAILURE(status)) {
return (status);
}
+ value_a = (u32)value64;
/* Second register is optional */
if (register_b->address) {
- status = acpi_hw_read(&value_b, register_b);
+ status = acpi_hw_read(&value64, register_b);
if (ACPI_FAILURE(status)) {
return (status);
}
+ value_b = (u32)value64;
}
/*
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index b3c5d8c754bb..a2f4e25d45b1 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -94,6 +94,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
acpi_status acpi_get_timer(u32 * ticks)
{
acpi_status status;
+ u64 timer_value;
ACPI_FUNCTION_TRACE(acpi_get_timer);
@@ -107,7 +108,14 @@ acpi_status acpi_get_timer(u32 * ticks)
return_ACPI_STATUS(AE_SUPPORT);
}
- status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
+ status = acpi_hw_read(&timer_value, &acpi_gbl_FADT.xpm_timer_block);
+ if (ACPI_SUCCESS(status)) {
+
+ /* ACPI PM Timer is defined to be 32 bits (PM_TMR_LEN) */
+
+ *ticks = (u32)timer_value;
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 34684ae89981..b3c6e439933c 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -125,76 +125,12 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
******************************************************************************/
acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
{
- u32 value_lo;
- u32 value_hi;
- u32 width;
- u64 address;
acpi_status status;
ACPI_FUNCTION_NAME(acpi_read);
- if (!return_value) {
- return (AE_BAD_PARAMETER);
- }
-
- /* Validate contents of the GAS register. Allow 64-bit transfers */
-
- status = acpi_hw_validate_register(reg, 64, &address);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * Two address spaces supported: Memory or I/O. PCI_Config is
- * not supported here because the GAS structure is insufficient
- */
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- status = acpi_os_read_memory((acpi_physical_address)
- address, return_value,
- reg->bit_width);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
- value_lo = 0;
- value_hi = 0;
-
- width = reg->bit_width;
- if (width == 64) {
- width = 32; /* Break into two 32-bit transfers */
- }
-
- status = acpi_hw_read_port((acpi_io_address)
- address, &value_lo, width);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- if (reg->bit_width == 64) {
-
- /* Read the top 32 bits */
-
- status = acpi_hw_read_port((acpi_io_address)
- (address + 4), &value_hi,
- 32);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
-
- /* Set the return value only if status is AE_OK */
-
- *return_value = (value_lo | ((u64)value_hi << 32));
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n",
- ACPI_FORMAT_UINT64(*return_value), reg->bit_width,
- ACPI_FORMAT_UINT64(address),
- acpi_ut_get_region_name(reg->space_id)));
-
- return (AE_OK);
+ status = acpi_hw_read(return_value, reg);
+ return (status);
}
ACPI_EXPORT_SYMBOL(acpi_read)
@@ -213,59 +149,11 @@ ACPI_EXPORT_SYMBOL(acpi_read)
******************************************************************************/
acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
{
- u32 width;
- u64 address;
acpi_status status;
ACPI_FUNCTION_NAME(acpi_write);
- /* Validate contents of the GAS register. Allow 64-bit transfers */
-
- status = acpi_hw_validate_register(reg, 64, &address);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- /*
- * Two address spaces supported: Memory or IO. PCI_Config is
- * not supported here because the GAS structure is insufficient
- */
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- status = acpi_os_write_memory((acpi_physical_address)
- address, value, reg->bit_width);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
- width = reg->bit_width;
- if (width == 64) {
- width = 32; /* Break into two 32-bit transfers */
- }
-
- status = acpi_hw_write_port((acpi_io_address)
- address, ACPI_LODWORD(value),
- width);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
-
- if (reg->bit_width == 64) {
- status = acpi_hw_write_port((acpi_io_address)
- (address + 4),
- ACPI_HIDWORD(value), 32);
- if (ACPI_FAILURE(status)) {
- return (status);
- }
- }
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n",
- ACPI_FORMAT_UINT64(value), reg->bit_width,
- ACPI_FORMAT_UINT64(address),
- acpi_ut_get_region_name(reg->space_id)));
-
+ status = acpi_hw_write(value, reg);
return (status);
}
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index e4a7da8a11f0..539d775bbc92 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -78,8 +78,8 @@ acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
/* String-to-Integer conversion */
- status = acpi_ut_strtoul64(original_object->string.pointer,
- acpi_gbl_integer_byte_width, &value);
+ status =
+ acpi_ut_strtoul64(original_object->string.pointer, &value);
if (ACPI_FAILURE(status)) {
return (status);
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 26ad596c973e..5ecb8d2e6834 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -173,10 +173,13 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
/*
- * Only reallocate the root table if the host provided a static buffer
- * for the table array in the call to acpi_initialize_tables.
+ * If there are tables unverified, it is required to reallocate the
+ * root table list to clean up invalid table entries. Otherwise only
+ * reallocate the root table list if the host provided a static buffer
+ * for the table array in the call to acpi_initialize_tables().
*/
- if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
+ if ((acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) &&
+ acpi_gbl_enable_table_validation) {
return_ACPI_STATUS(AE_SUPPORT);
}
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
new file mode 100644
index 000000000000..965fb5cec94f
--- /dev/null
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -0,0 +1,438 @@
+/*******************************************************************************
+ *
+ * Module Name: utstrsuppt - Support functions for string-to-integer conversion
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2017, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utstrsuppt")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit);
+
+static acpi_status
+acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product);
+
+static acpi_status
+acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_convert_octal_string
+ *
+ * PARAMETERS: string - Null terminated input string
+ * return_value_ptr - Where the converted value is returned
+ *
+ * RETURN: Status and 64-bit converted integer
+ *
+ * DESCRIPTION: Performs a base 8 conversion of the input string to an
+ * integer value, either 32 or 64 bits.
+ *
+ * NOTE: Maximum 64-bit unsigned octal value is 01777777777777777777777
+ * Maximum 32-bit unsigned octal value is 037777777777
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_convert_octal_string(char *string, u64 *return_value_ptr)
+{
+ u64 accumulated_value = 0;
+ acpi_status status = AE_OK;
+
+ /* Convert each ASCII byte in the input string */
+
+ while (*string) {
+
+ /* Character must be ASCII 0-7, otherwise terminate with no error */
+
+ if (!(ACPI_IS_OCTAL_DIGIT(*string))) {
+ break;
+ }
+
+ /* Convert and insert this octal digit into the accumulator */
+
+ status = acpi_ut_insert_digit(&accumulated_value, 8, *string);
+ if (ACPI_FAILURE(status)) {
+ status = AE_OCTAL_OVERFLOW;
+ break;
+ }
+
+ string++;
+ }
+
+ /* Always return the value that has been accumulated */
+
+ *return_value_ptr = accumulated_value;
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_convert_decimal_string
+ *
+ * PARAMETERS: string - Null terminated input string
+ * return_value_ptr - Where the converted value is returned
+ *
+ * RETURN: Status and 64-bit converted integer
+ *
+ * DESCRIPTION: Performs a base 10 conversion of the input string to an
+ * integer value, either 32 or 64 bits.
+ *
+ * NOTE: Maximum 64-bit unsigned decimal value is 18446744073709551615
+ * Maximum 32-bit unsigned decimal value is 4294967295
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_convert_decimal_string(char *string, u64 *return_value_ptr)
+{
+ u64 accumulated_value = 0;
+ acpi_status status = AE_OK;
+
+ /* Convert each ASCII byte in the input string */
+
+ while (*string) {
+
+ /* Character must be ASCII 0-9, otherwise terminate with no error */
+
+ if (!isdigit(*string)) {
+ break;
+ }
+
+ /* Convert and insert this decimal digit into the accumulator */
+
+ status = acpi_ut_insert_digit(&accumulated_value, 10, *string);
+ if (ACPI_FAILURE(status)) {
+ status = AE_DECIMAL_OVERFLOW;
+ break;
+ }
+
+ string++;
+ }
+
+ /* Always return the value that has been accumulated */
+
+ *return_value_ptr = accumulated_value;
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_convert_hex_string
+ *
+ * PARAMETERS: string - Null terminated input string
+ * return_value_ptr - Where the converted value is returned
+ *
+ * RETURN: Status and 64-bit converted integer
+ *
+ * DESCRIPTION: Performs a base 16 conversion of the input string to an
+ * integer value, either 32 or 64 bits.
+ *
+ * NOTE: Maximum 64-bit unsigned hex value is 0xFFFFFFFFFFFFFFFF
+ * Maximum 32-bit unsigned hex value is 0xFFFFFFFF
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_convert_hex_string(char *string, u64 *return_value_ptr)
+{
+ u64 accumulated_value = 0;
+ acpi_status status = AE_OK;
+
+ /* Convert each ASCII byte in the input string */
+
+ while (*string) {
+
+ /* Must be ASCII A-F, a-f, or 0-9, otherwise terminate with no error */
+
+ if (!isxdigit(*string)) {
+ break;
+ }
+
+ /* Convert and insert this hex digit into the accumulator */
+
+ status = acpi_ut_insert_digit(&accumulated_value, 16, *string);
+ if (ACPI_FAILURE(status)) {
+ status = AE_HEX_OVERFLOW;
+ break;
+ }
+
+ string++;
+ }
+
+ /* Always return the value that has been accumulated */
+
+ *return_value_ptr = accumulated_value;
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_leading_zeros
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: Next character after any leading zeros. This character may be
+ * used by the caller to detect end-of-string.
+ *
+ * DESCRIPTION: Remove any leading zeros in the input string. Return the
+ * next character after the final ASCII zero to enable the caller
+ * to check for the end of the string (NULL terminator).
+ *
+ ******************************************************************************/
+
+char acpi_ut_remove_leading_zeros(char **string)
+{
+
+ while (**string == ACPI_ASCII_ZERO) {
+ *string += 1;
+ }
+
+ return (**string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_whitespace
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: Next character after any whitespace. This character may be
+ * used by the caller to detect end-of-string.
+ *
+ * DESCRIPTION: Remove any leading whitespace in the input string. Return the
+ * next character after the final ASCII zero to enable the caller
+ * to check for the end of the string (NULL terminator).
+ *
+ ******************************************************************************/
+
+char acpi_ut_remove_whitespace(char **string)
+{
+
+ while (isspace((u8)**string)) {
+ *string += 1;
+ }
+
+ return (**string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_detect_hex_prefix
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: TRUE if a "0x" prefix was found at the start of the string
+ *
+ * DESCRIPTION: Detect and remove a hex "0x" prefix
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_detect_hex_prefix(char **string)
+{
+
+ if ((**string == ACPI_ASCII_ZERO) &&
+ (tolower((int)*(*string + 1)) == 'x')) {
+ *string += 2; /* Go past the leading 0x */
+ return (TRUE);
+ }
+
+ return (FALSE); /* Not a hex string */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_detect_octal_prefix
+ *
+ * PARAMETERS: string - Pointer to input ASCII string
+ *
+ * RETURN: True if an octal "0" prefix was found at the start of the
+ * string
+ *
+ * DESCRIPTION: Detect and remove an octal prefix (zero)
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_detect_octal_prefix(char **string)
+{
+
+ if (**string == ACPI_ASCII_ZERO) {
+ *string += 1; /* Go past the leading 0 */
+ return (TRUE);
+ }
+
+ return (FALSE); /* Not an octal string */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_insert_digit
+ *
+ * PARAMETERS: accumulated_value - Current value of the integer value
+ * accumulator. The new value is
+ * returned here.
+ * base - Radix, either 8/10/16
+ * ascii_digit - ASCII single digit to be inserted
+ *
+ * RETURN: Status and result of the convert/insert operation. The only
+ * possible returned exception code is numeric overflow of
+ * either the multiply or add conversion operations.
+ *
+ * DESCRIPTION: Generic conversion and insertion function for all bases:
+ *
+ * 1) Multiply the current accumulated/converted value by the
+ * base in order to make room for the new character.
+ *
+ * 2) Convert the new character to binary and add it to the
+ * current accumulated value.
+ *
+ * Note: The only possible exception indicates an integer
+ * overflow (AE_NUMERIC_OVERFLOW)
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
+{
+ acpi_status status;
+ u64 product;
+
+ /* Make room in the accumulated value for the incoming digit */
+
+ status = acpi_ut_strtoul_multiply64(*accumulated_value, base, &product);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Add in the new digit, and store the sum to the accumulated value */
+
+ status =
+ acpi_ut_strtoul_add64(product,
+ acpi_ut_ascii_char_to_hex(ascii_digit),
+ accumulated_value);
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strtoul_multiply64
+ *
+ * PARAMETERS: multiplicand - Current accumulated converted integer
+ * multiplier - Base/Radix
+ * out_product - Where the product is returned
+ *
+ * RETURN: Status and 64-bit product
+ *
+ * DESCRIPTION: Multiply two 64-bit values, with checking for 64-bit overflow as
+ * well as 32-bit overflow if necessary (if the current global
+ * integer width is 32).
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
+{
+ u64 val;
+
+ /* Exit if either operand is zero */
+
+ *out_product = 0;
+ if (!multiplicand || !multiplier) {
+ return (AE_OK);
+ }
+
+ /* Check for 64-bit overflow before the actual multiplication */
+
+ acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL);
+ if (multiplicand > val) {
+ return (AE_NUMERIC_OVERFLOW);
+ }
+
+ val = multiplicand * multiplier;
+
+ /* Check for 32-bit overflow if necessary */
+
+ if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) {
+ return (AE_NUMERIC_OVERFLOW);
+ }
+
+ *out_product = val;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_strtoul_add64
+ *
+ * PARAMETERS: addend1 - Current accumulated converted integer
+ * addend2 - New hex value/char
+ * out_sum - Where sum is returned (Accumulator)
+ *
+ * RETURN: Status and 64-bit sum
+ *
+ * DESCRIPTION: Add two 64-bit values, with checking for 64-bit overflow as
+ * well as 32-bit overflow if necessary (if the current global
+ * integer width is 32).
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum)
+{
+ u64 sum;
+
+ /* Check for 64-bit overflow before the actual addition */
+
+ if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) {
+ return (AE_NUMERIC_OVERFLOW);
+ }
+
+ sum = addend1 + addend2;
+
+ /* Check for 32-bit overflow if necessary */
+
+ if ((acpi_gbl_integer_bit_width == 32) && (sum > ACPI_UINT32_MAX)) {
+ return (AE_NUMERIC_OVERFLOW);
+ }
+
+ *out_sum = sum;
+ return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index 9633ee142855..e2067dcb9389 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -1,6 +1,7 @@
/*******************************************************************************
*
- * Module Name: utstrtoul64 - string to 64-bit integer support
+ * Module Name: utstrtoul64 - String-to-integer conversion support for both
+ * 64-bit and 32-bit integers
*
******************************************************************************/
@@ -44,304 +45,319 @@
#include <acpi/acpi.h>
#include "accommon.h"
-/*******************************************************************************
- *
- * The functions in this module satisfy the need for 64-bit string-to-integer
- * conversions on both 32-bit and 64-bit platforms.
- *
- ******************************************************************************/
-
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utstrtoul64")
-/* Local prototypes */
-static u64 acpi_ut_strtoul_base10(char *string, u32 flags);
-
-static u64 acpi_ut_strtoul_base16(char *string, u32 flags);
-
/*******************************************************************************
*
- * String conversion rules as written in the ACPI specification. The error
- * conditions and behavior are different depending on the type of conversion.
- *
- *
- * Implicit data type conversion: string-to-integer
- * --------------------------------------------------
- *
- * Base is always 16. This is the ACPI_STRTOUL_BASE16 case.
- *
- * Example:
- * Add ("BA98", Arg0, Local0)
- *
- * The integer is initialized to the value zero.
- * The ASCII string is interpreted as a hexadecimal constant.
+ * This module contains the top-level string to 64/32-bit unsigned integer
+ * conversion functions:
*
- * 1) A "0x" prefix is not allowed. However, ACPICA allows this for
- * compatibility with previous ACPICA. (NO ERROR)
+ * 1) A standard strtoul() function that supports 64-bit integers, base
+ * 8/10/16, with integer overflow support. This is used mainly by the
+ * iASL compiler, which implements tighter constraints on integer
+ * constants than the runtime (interpreter) integer-to-string conversions.
+ * 2) Runtime "Explicit conversion" as defined in the ACPI specification.
+ * 3) Runtime "Implicit conversion" as defined in the ACPI specification.
*
- * 2) Terminates when the size of an integer is reached (32 or 64 bits).
- * (NO ERROR)
+ * Current users of this module:
*
- * 3) The first non-hex character terminates the conversion without error.
- * (NO ERROR)
- *
- * 4) Conversion of a null (zero-length) string to an integer is not
- * allowed. However, ACPICA allows this for compatibility with previous
- * ACPICA. This conversion returns the value 0. (NO ERROR)
- *
- *
- * Explicit data type conversion: to_integer() with string operand
- * ---------------------------------------------------------------
- *
- * Base is either 10 (default) or 16 (with 0x prefix)
- *
- * Examples:
- * to_integer ("1000")
- * to_integer ("0xABCD")
- *
- * 1) Can be (must be) either a decimal or hexadecimal numeric string.
- * A hex value must be prefixed by "0x" or it is interpreted as a decimal.
+ * iASL - Preprocessor (constants and math expressions)
+ * iASL - Main parser, conversion of constants to integers
+ * iASL - Data Table Compiler parser (constants and math expressions)
+ * interpreter - Implicit and explicit conversions, GPE method names
+ * interpreter - Repair code for return values from predefined names
+ * debugger - Command line input string conversion
+ * acpi_dump - ACPI table physical addresses
+ * acpi_exec - Support for namespace overrides
*
- * 2) The value must not exceed the maximum of an integer value. ACPI spec
- * states the behavior is "unpredictable", so ACPICA matches the behavior
- * of the implicit conversion case.(NO ERROR)
+ * Notes concerning users of these interfaces:
*
- * 3) Behavior on the first non-hex character is not specified by the ACPI
- * spec, so ACPICA matches the behavior of the implicit conversion case
- * and terminates. (NO ERROR)
+ * acpi_gbl_integer_byte_width is used to set the 32/64 bit limit for explicit
+ * and implicit conversions. This global must be set to the proper width.
+ * For the core ACPICA code, the width depends on the DSDT version. For the
+ * acpi_ut_strtoul64 interface, all conversions are 64 bits. This interface is
+ * used primarily for iASL, where the default width is 64 bits for all parsers,
+ * but error checking is performed later to flag cases where a 64-bit constant
+ * is wrongly defined in a 32-bit DSDT/SSDT.
*
- * 4) A null (zero-length) string is illegal.
- * However, ACPICA allows this for compatibility with previous ACPICA.
- * This conversion returns the value 0. (NO ERROR)
+ * In ACPI, the only place where octal numbers are supported is within
+ * the ASL language itself. This is implemented via the main acpi_ut_strtoul64
+ * interface. According the ACPI specification, there is no ACPI runtime
+ * support (explicit/implicit) for octal string conversions.
*
******************************************************************************/
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_strtoul64
*
- * PARAMETERS: string - Null terminated input string
- * flags - Conversion info, see below
+ * PARAMETERS: string - Null terminated input string,
+ * must be a valid pointer
* return_value - Where the converted integer is
- * returned
- *
- * RETURN: Status and Converted value
+ * returned. Must be a valid pointer
*
- * DESCRIPTION: Convert a string into an unsigned value. Performs either a
- * 32-bit or 64-bit conversion, depending on the input integer
- * size in Flags (often the current mode of the interpreter).
+ * RETURN: Status and converted integer. Returns an exception on a
+ * 64-bit numeric overflow
*
- * Values for Flags:
- * ACPI_STRTOUL_32BIT - Max integer value is 32 bits
- * ACPI_STRTOUL_64BIT - Max integer value is 64 bits
- * ACPI_STRTOUL_BASE16 - Input string is hexadecimal. Default
- * is 10/16 based on string prefix (0x).
+ * DESCRIPTION: Convert a string into an unsigned integer. Always performs a
+ * full 64-bit conversion, regardless of the current global
+ * integer width. Supports Decimal, Hex, and Octal strings.
*
- * NOTES:
- * Negative numbers are not supported, as they are not supported by ACPI.
+ * Current users of this function:
*
- * Supports only base 16 or base 10 strings/values. Does not
- * support Octal strings, as these are not supported by ACPI.
- *
- * Current users of this support:
- *
- * interpreter - Implicit and explicit conversions, GPE method names
- * debugger - Command line input string conversion
- * iASL - Main parser, conversion of constants to integers
- * iASL - Data Table Compiler parser (constant math expressions)
- * iASL - Preprocessor (constant math expressions)
- * acpi_dump - Input table addresses
- * acpi_exec - Testing of the acpi_ut_strtoul64 function
- *
- * Note concerning callers:
- * acpi_gbl_integer_byte_width can be used to set the 32/64 limit. If used,
- * this global should be set to the proper width. For the core ACPICA code,
- * this width depends on the DSDT version. For iASL, the default byte
- * width is always 8 for the parser, but error checking is performed later
- * to flag cases where a 64-bit constant is defined in a 32-bit DSDT/SSDT.
+ * iASL - Preprocessor (constants and math expressions)
+ * iASL - Main ASL parser, conversion of ASL constants to integers
+ * iASL - Data Table Compiler parser (constants and math expressions)
+ * interpreter - Repair code for return values from predefined names
+ * acpi_dump - ACPI table physical addresses
+ * acpi_exec - Support for namespace overrides
*
******************************************************************************/
-
-acpi_status acpi_ut_strtoul64(char *string, u32 flags, u64 *return_value)
+acpi_status acpi_ut_strtoul64(char *string, u64 *return_value)
{
acpi_status status = AE_OK;
- u32 base;
+ u8 original_bit_width;
+ u32 base = 10; /* Default is decimal */
ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string);
- /* Parameter validation */
-
- if (!string || !return_value) {
- return_ACPI_STATUS(AE_BAD_PARAMETER);
- }
-
*return_value = 0;
- /* Check for zero-length string, returns 0 */
+ /* A NULL return string returns a value of zero */
if (*string == 0) {
return_ACPI_STATUS(AE_OK);
}
- /* Skip over any white space at start of string */
-
- while (isspace((int)*string)) {
- string++;
- }
-
- /* End of string? return 0 */
-
- if (*string == 0) {
+ if (!acpi_ut_remove_whitespace(&string)) {
return_ACPI_STATUS(AE_OK);
}
/*
- * 1) The "0x" prefix indicates base 16. Per the ACPI specification,
- * the "0x" prefix is only allowed for implicit (non-strict) conversions.
- * However, we always allow it for compatibility with older ACPICA.
+ * 1) Check for a hex constant. A "0x" prefix indicates base 16.
*/
- if ((*string == ACPI_ASCII_ZERO) &&
- (tolower((int)*(string + 1)) == 'x')) {
- string += 2; /* Go past the 0x */
- if (*string == 0) {
- return_ACPI_STATUS(AE_OK); /* Return value 0 */
- }
-
+ if (acpi_ut_detect_hex_prefix(&string)) {
base = 16;
}
- /* 2) Force to base 16 (implicit conversion case) */
-
- else if (flags & ACPI_STRTOUL_BASE16) {
- base = 16;
+ /*
+ * 2) Check for an octal constant, defined to be a leading zero
+ * followed by sequence of octal digits (0-7)
+ */
+ else if (acpi_ut_detect_octal_prefix(&string)) {
+ base = 8;
}
- /* 3) Default fallback is to Base 10 */
-
- else {
- base = 10;
+ if (!acpi_ut_remove_leading_zeros(&string)) {
+ return_ACPI_STATUS(AE_OK); /* Return value 0 */
}
- /* Skip all leading zeros */
+ /*
+ * Force a full 64-bit conversion. The caller (usually iASL) must
+ * check for a 32-bit overflow later as necessary (If current mode
+ * is 32-bit, meaning a 32-bit DSDT).
+ */
+ original_bit_width = acpi_gbl_integer_bit_width;
+ acpi_gbl_integer_bit_width = 64;
- while (*string == ACPI_ASCII_ZERO) {
- string++;
- if (*string == 0) {
- return_ACPI_STATUS(AE_OK); /* Return value 0 */
- }
+ /*
+ * Perform the base 8, 10, or 16 conversion. A 64-bit numeric overflow
+ * will return an exception (to allow iASL to flag the statement).
+ */
+ switch (base) {
+ case 8:
+ status = acpi_ut_convert_octal_string(string, return_value);
+ break;
+
+ case 10:
+ status = acpi_ut_convert_decimal_string(string, return_value);
+ break;
+
+ case 16:
+ default:
+ status = acpi_ut_convert_hex_string(string, return_value);
+ break;
}
- /* Perform the base 16 or 10 conversion */
-
- if (base == 16) {
- *return_value = acpi_ut_strtoul_base16(string, flags);
- } else {
- *return_value = acpi_ut_strtoul_base10(string, flags);
- }
+ /* Only possible exception from above is a 64-bit overflow */
+ acpi_gbl_integer_bit_width = original_bit_width;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
- * FUNCTION: acpi_ut_strtoul_base10
+ * FUNCTION: acpi_ut_implicit_strtoul64
+ *
+ * PARAMETERS: string - Null terminated input string,
+ * must be a valid pointer
+ *
+ * RETURN: Converted integer
+ *
+ * DESCRIPTION: Perform a 64-bit conversion with restrictions placed upon
+ * an "implicit conversion" by the ACPI specification. Used by
+ * many ASL operators that require an integer operand, and support
+ * an automatic (implicit) conversion from a string operand
+ * to the final integer operand. The major restriction is that
+ * only hex strings are supported.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Base is always 16, either with or without the 0x prefix. Decimal and
+ * Octal strings are not supported, as per the ACPI specification.
+ *
+ * Examples (both are hex values):
+ * Add ("BA98", Arg0, Local0)
+ * Subtract ("0x12345678", Arg1, Local1)
+ *
+ * Conversion rules as extracted from the ACPI specification:
+ *
+ * The converted integer is initialized to the value zero.
+ * The ASCII string is always interpreted as a hexadecimal constant.
+ *
+ * 1) According to the ACPI specification, a "0x" prefix is not allowed.
+ * However, ACPICA allows this as an ACPI extension on general
+ * principle. (NO ERROR)
+ *
+ * 2) The conversion terminates when the size of an integer is reached
+ * (32 or 64 bits). There are no numeric overflow conditions. (NO ERROR)
+ *
+ * 3) The first non-hex character terminates the conversion and returns
+ * the current accumulated value of the converted integer (NO ERROR).
*
- * PARAMETERS: string - Null terminated input string
- * flags - Conversion info
+ * 4) Conversion of a null (zero-length) string to an integer is
+ * technically not allowed. However, ACPICA allows this as an ACPI
+ * extension. The conversion returns the value 0. (NO ERROR)
*
- * RETURN: 64-bit converted integer
+ * NOTE: There are no error conditions returned by this function. At
+ * the minimum, a value of zero is returned.
*
- * DESCRIPTION: Performs a base 10 conversion of the input string to an
- * integer value, either 32 or 64 bits.
- * Note: String must be valid and non-null.
+ * Current users of this function:
+ *
+ * interpreter - All runtime implicit conversions, as per ACPI specification
+ * iASL - Data Table Compiler parser (constants and math expressions)
*
******************************************************************************/
-static u64 acpi_ut_strtoul_base10(char *string, u32 flags)
+u64 acpi_ut_implicit_strtoul64(char *string)
{
- int ascii_digit;
- u64 next_value;
- u64 return_value = 0;
-
- /* Main loop: convert each ASCII byte in the input string */
-
- while (*string) {
- ascii_digit = *string;
- if (!isdigit(ascii_digit)) {
-
- /* Not ASCII 0-9, terminate */
-
- goto exit;
- }
-
- /* Convert and insert (add) the decimal digit */
+ u64 converted_integer = 0;
- acpi_ut_short_multiply(return_value, 10, &next_value);
- next_value += (ascii_digit - ACPI_ASCII_ZERO);
+ ACPI_FUNCTION_TRACE_STR(ut_implicit_strtoul64, string);
- /* Check for overflow (32 or 64 bit) - return current converted value */
+ if (!acpi_ut_remove_whitespace(&string)) {
+ return_VALUE(0);
+ }
- if (((flags & ACPI_STRTOUL_32BIT) && (next_value > ACPI_UINT32_MAX)) || (next_value < return_value)) { /* 64-bit overflow case */
- goto exit;
- }
+ /*
+ * Per the ACPI specification, only hexadecimal is supported for
+ * implicit conversions, and the "0x" prefix is "not allowed".
+ * However, allow a "0x" prefix as an ACPI extension.
+ */
+ acpi_ut_detect_hex_prefix(&string);
- return_value = next_value;
- string++;
+ if (!acpi_ut_remove_leading_zeros(&string)) {
+ return_VALUE(0);
}
-exit:
- return (return_value);
+ /*
+ * Ignore overflow as per the ACPI specification. This is implemented by
+ * ignoring the return status from the conversion function called below.
+ * On overflow, the input string is simply truncated.
+ */
+ acpi_ut_convert_hex_string(string, &converted_integer);
+ return_VALUE(converted_integer);
}
/*******************************************************************************
*
- * FUNCTION: acpi_ut_strtoul_base16
+ * FUNCTION: acpi_ut_explicit_strtoul64
+ *
+ * PARAMETERS: string - Null terminated input string,
+ * must be a valid pointer
*
- * PARAMETERS: string - Null terminated input string
- * flags - conversion info
+ * RETURN: Converted integer
*
- * RETURN: 64-bit converted integer
+ * DESCRIPTION: Perform a 64-bit conversion with the restrictions placed upon
+ * an "explicit conversion" by the ACPI specification. The
+ * main restriction is that only hex and decimal are supported.
*
- * DESCRIPTION: Performs a base 16 conversion of the input string to an
- * integer value, either 32 or 64 bits.
- * Note: String must be valid and non-null.
+ * -----------------------------------------------------------------------------
+ *
+ * Base is either 10 (default) or 16 (with 0x prefix). Octal (base 8) strings
+ * are not supported, as per the ACPI specification.
+ *
+ * Examples:
+ * to_integer ("1000") Decimal
+ * to_integer ("0xABCD") Hex
+ *
+ * Conversion rules as extracted from the ACPI specification:
+ *
+ * 1) The input string is either a decimal or hexadecimal numeric string.
+ * A hex value must be prefixed by "0x" or it is interpreted as decimal.
+ *
+ * 2) The value must not exceed the maximum of an integer value
+ * (32 or 64 bits). The ACPI specification states the behavior is
+ * "unpredictable", so ACPICA matches the behavior of the implicit
+ * conversion case. There are no numeric overflow conditions. (NO ERROR)
+ *
+ * 3) Behavior on the first non-hex character is not defined by the ACPI
+ * specification (for the to_integer operator), so ACPICA matches the
+ * behavior of the implicit conversion case. It terminates the
+ * conversion and returns the current accumulated value of the converted
+ * integer. (NO ERROR)
+ *
+ * 4) Conversion of a null (zero-length) string to an integer is
+ * technically not allowed. However, ACPICA allows this as an ACPI
+ * extension. The conversion returns the value 0. (NO ERROR)
+ *
+ * NOTE: There are no error conditions returned by this function. At the
+ * minimum, a value of zero is returned.
+ *
+ * Current users of this function:
+ *
+ * interpreter - Runtime ASL to_integer operator, as per the ACPI specification
*
******************************************************************************/
-static u64 acpi_ut_strtoul_base16(char *string, u32 flags)
+u64 acpi_ut_explicit_strtoul64(char *string)
{
- int ascii_digit;
- u32 valid_digits = 1;
- u64 return_value = 0;
-
- /* Main loop: convert each ASCII byte in the input string */
+ u64 converted_integer = 0;
+ u32 base = 10; /* Default is decimal */
- while (*string) {
+ ACPI_FUNCTION_TRACE_STR(ut_explicit_strtoul64, string);
- /* Check for overflow (32 or 64 bit) - return current converted value */
-
- if ((valid_digits > 16) ||
- ((valid_digits > 8) && (flags & ACPI_STRTOUL_32BIT))) {
- goto exit;
- }
-
- ascii_digit = *string;
- if (!isxdigit(ascii_digit)) {
-
- /* Not Hex ASCII A-F, a-f, or 0-9, terminate */
-
- goto exit;
- }
+ if (!acpi_ut_remove_whitespace(&string)) {
+ return_VALUE(0);
+ }
- /* Convert and insert the hex digit */
+ /*
+ * Only Hex and Decimal are supported, as per the ACPI specification.
+ * A "0x" prefix indicates hex; otherwise decimal is assumed.
+ */
+ if (acpi_ut_detect_hex_prefix(&string)) {
+ base = 16;
+ }
- acpi_ut_short_shift_left(return_value, 4, &return_value);
- return_value |= acpi_ut_ascii_char_to_hex(ascii_digit);
+ if (!acpi_ut_remove_leading_zeros(&string)) {
+ return_VALUE(0);
+ }
- string++;
- valid_digits++;
+ /*
+ * Ignore overflow as per the ACPI specification. This is implemented by
+ * ignoring the return status from the conversion functions called below.
+ * On overflow, the input string is simply truncated.
+ */
+ switch (base) {
+ case 10:
+ default:
+ acpi_ut_convert_decimal_string(string, &converted_integer);
+ break;
+
+ case 16:
+ acpi_ut_convert_hex_string(string, &converted_integer);
+ break;
}
-exit:
- return (return_value);
+ return_VALUE(converted_integer);
}
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index de14d49a5c90..52ae5438edeb 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config HAVE_ACPI_APEI
bool
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
index e50573de25f1..4dfac2128737 100644
--- a/drivers/acpi/apei/Makefile
+++ b/drivers/acpi/apei/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ACPI_APEI) += apei.o
obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cb4126051f62..1d6ef9654725 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* apei-internal.h - ACPI Platform Error Interface internal
* definitions.
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 2c462beee551..6742f6c68034 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1061,7 +1061,7 @@ static int erst_writer(struct pstore_record *record)
rcd->hdr.error_severity = CPER_SEV_FATAL;
/* timestamp valid. platform_id, partition_id are invalid */
rcd->hdr.validation_bits = CPER_VALID_TIMESTAMP;
- rcd->hdr.timestamp = get_seconds();
+ rcd->hdr.timestamp = ktime_get_real_seconds();
rcd->hdr.record_length = sizeof(*rcd) + record->size;
rcd->hdr.creator_id = CPER_CREATOR_PSTORE;
rcd->hdr.notification_type = CPER_NOTIFY_MCE;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 3c3a37b8503b..6402f7fad3bb 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -51,6 +51,7 @@
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
#include <acpi/apei.h>
+#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <ras/ras_event.h>
@@ -112,22 +113,10 @@ static DEFINE_MUTEX(ghes_list_mutex);
* Because the memory area used to transfer hardware error information
* from BIOS to Linux can be determined only in NMI, IRQ or timer
* handler, but general ioremap can not be used in atomic context, so
- * a special version of atomic ioremap is implemented for that.
- */
-
-/*
- * Two virtual pages are used, one for IRQ/PROCESS context, the other for
- * NMI context (optionally).
- */
-#define GHES_IOREMAP_PAGES 2
-#define GHES_IOREMAP_IRQ_PAGE(base) (base)
-#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE)
-
-/* virtual memory area for atomic ioremap */
-static struct vm_struct *ghes_ioremap_area;
-/*
- * These 2 spinlock is used to prevent atomic ioremap virtual memory
- * area from being mapped simultaneously.
+ * the fixmap is used instead.
+ *
+ * These 2 spinlocks are used to prevent the fixmap entries from being used
+ * simultaneously.
*/
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
@@ -140,71 +129,38 @@ static atomic_t ghes_estatus_cache_alloced;
static int ghes_panic_timeout __read_mostly = 30;
-static int ghes_ioremap_init(void)
-{
- ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
- VM_IOREMAP, VMALLOC_START, VMALLOC_END);
- if (!ghes_ioremap_area) {
- pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void ghes_ioremap_exit(void)
-{
- free_vm_area(ghes_ioremap_area);
-}
-
static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
{
- unsigned long vaddr;
phys_addr_t paddr;
pgprot_t prot;
- vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
-
paddr = pfn << PAGE_SHIFT;
prot = arch_apei_get_mem_attribute(paddr);
- ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
+ __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot);
- return (void __iomem *)vaddr;
+ return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI);
}
static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
{
- unsigned long vaddr, paddr;
+ phys_addr_t paddr;
pgprot_t prot;
- vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
-
paddr = pfn << PAGE_SHIFT;
prot = arch_apei_get_mem_attribute(paddr);
+ __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot);
- ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
-
- return (void __iomem *)vaddr;
+ return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ);
}
-static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
+static void ghes_iounmap_nmi(void)
{
- unsigned long vaddr = (unsigned long __force)vaddr_ptr;
- void *base = ghes_ioremap_area->addr;
-
- BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
- unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
- arch_apei_flush_tlb_one(vaddr);
+ clear_fixmap(FIX_APEI_GHES_NMI);
}
-static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
+static void ghes_iounmap_irq(void)
{
- unsigned long vaddr = (unsigned long __force)vaddr_ptr;
- void *base = ghes_ioremap_area->addr;
-
- BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
- unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
- arch_apei_flush_tlb_one(vaddr);
+ clear_fixmap(FIX_APEI_GHES_IRQ);
}
static int ghes_estatus_pool_init(void)
@@ -360,10 +316,10 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
paddr += trunk;
buffer += trunk;
if (in_nmi) {
- ghes_iounmap_nmi(vaddr);
+ ghes_iounmap_nmi();
raw_spin_unlock(&ghes_ioremap_lock_nmi);
} else {
- ghes_iounmap_irq(vaddr);
+ ghes_iounmap_irq();
spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
}
}
@@ -774,9 +730,9 @@ static void ghes_add_timer(struct ghes *ghes)
add_timer(&ghes->timer);
}
-static void ghes_poll_func(unsigned long data)
+static void ghes_poll_func(struct timer_list *t)
{
- struct ghes *ghes = (void *)data;
+ struct ghes *ghes = from_timer(ghes, t, timer);
ghes_proc(ghes);
if (!(ghes->flags & GHES_EXITING))
@@ -851,17 +807,8 @@ static void ghes_sea_remove(struct ghes *ghes)
synchronize_rcu();
}
#else /* CONFIG_ACPI_APEI_SEA */
-static inline void ghes_sea_add(struct ghes *ghes)
-{
- pr_err(GHES_PFX "ID: %d, trying to add SEA notification which is not supported\n",
- ghes->generic->header.source_id);
-}
-
-static inline void ghes_sea_remove(struct ghes *ghes)
-{
- pr_err(GHES_PFX "ID: %d, trying to remove SEA notification which is not supported\n",
- ghes->generic->header.source_id);
-}
+static inline void ghes_sea_add(struct ghes *ghes) { }
+static inline void ghes_sea_remove(struct ghes *ghes) { }
#endif /* CONFIG_ACPI_APEI_SEA */
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
@@ -1063,23 +1010,9 @@ static void ghes_nmi_init_cxt(void)
init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
}
#else /* CONFIG_HAVE_ACPI_APEI_NMI */
-static inline void ghes_nmi_add(struct ghes *ghes)
-{
- pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n",
- ghes->generic->header.source_id);
- BUG();
-}
-
-static inline void ghes_nmi_remove(struct ghes *ghes)
-{
- pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n",
- ghes->generic->header.source_id);
- BUG();
-}
-
-static inline void ghes_nmi_init_cxt(void)
-{
-}
+static inline void ghes_nmi_add(struct ghes *ghes) { }
+static inline void ghes_nmi_remove(struct ghes *ghes) { }
+static inline void ghes_nmi_init_cxt(void) { }
#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
static int ghes_probe(struct platform_device *ghes_dev)
@@ -1147,8 +1080,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
- setup_deferrable_timer(&ghes->timer, ghes_poll_func,
- (unsigned long)ghes);
+ timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE);
ghes_add_timer(ghes);
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
@@ -1285,13 +1217,9 @@ static int __init ghes_init(void)
ghes_nmi_init_cxt();
- rc = ghes_ioremap_init();
- if (rc)
- goto err;
-
rc = ghes_estatus_pool_init();
if (rc)
- goto err_ioremap_exit;
+ goto err;
rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
GHES_ESTATUS_CACHE_ALLOCED_MAX);
@@ -1315,8 +1243,6 @@ static int __init ghes_init(void)
return 0;
err_pool_exit:
ghes_estatus_pool_exit();
-err_ioremap_exit:
- ghes_ioremap_exit();
err:
return rc;
}
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 597a737d538f..92f9edf9d11e 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -199,7 +199,7 @@ static int __init gtdt_parse_timer_block(struct acpi_gtdt_timer_block *block,
struct acpi_gtdt_timer_entry *gtdt_frame;
if (!block->timer_count) {
- pr_err(FW_BUG "GT block present, but frame count is zero.");
+ pr_err(FW_BUG "GT block present, but frame count is zero.\n");
return -ENODEV;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index de56394dd161..95255ecfae7c 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -88,8 +88,8 @@ static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
*
* Returns: fwnode_handle pointer on success, NULL on failure
*/
-static inline
-struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
+static inline struct fwnode_handle *iort_get_fwnode(
+ struct acpi_iort_node *node)
{
struct iort_fwnode *curr;
struct fwnode_handle *fwnode = NULL;
@@ -126,6 +126,31 @@ static inline void iort_delete_fwnode(struct acpi_iort_node *node)
spin_unlock(&iort_fwnode_lock);
}
+/**
+ * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
+ *
+ * @fwnode: fwnode associated with device to be looked-up
+ *
+ * Returns: iort_node pointer on success, NULL on failure
+ */
+static inline struct acpi_iort_node *iort_get_iort_node(
+ struct fwnode_handle *fwnode)
+{
+ struct iort_fwnode *curr;
+ struct acpi_iort_node *iort_node = NULL;
+
+ spin_lock(&iort_fwnode_lock);
+ list_for_each_entry(curr, &iort_fwnode_list, list) {
+ if (curr->fwnode == fwnode) {
+ iort_node = curr->iort_node;
+ break;
+ }
+ }
+ spin_unlock(&iort_fwnode_lock);
+
+ return iort_node;
+}
+
typedef acpi_status (*iort_find_node_callback)
(struct acpi_iort_node *node, void *context);
@@ -306,9 +331,8 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
return 0;
}
-static
-struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
- u32 *id_out, int index)
+static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
+ u32 *id_out, int index)
{
struct acpi_iort_node *parent;
struct acpi_iort_id_mapping *map;
@@ -332,7 +356,8 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
- node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+ node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
+ node->type == ACPI_IORT_NODE_SMMU_V3) {
*id_out = map->output_base;
return parent;
}
@@ -341,6 +366,47 @@ struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
return NULL;
}
+#if (ACPI_CA_VERSION > 0x20170929)
+static int iort_get_id_mapping_index(struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+
+ switch (node->type) {
+ case ACPI_IORT_NODE_SMMU_V3:
+ /*
+ * SMMUv3 dev ID mapping index was introduced in revision 1
+ * table, not available in revision 0
+ */
+ if (node->revision < 1)
+ return -EINVAL;
+
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+ /*
+ * ID mapping index is only ignored if all interrupts are
+ * GSIV based
+ */
+ if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
+ && smmu->sync_gsiv)
+ return -EINVAL;
+
+ if (smmu->id_mapping_index >= node->mapping_count) {
+ pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
+ node, node->type);
+ return -EINVAL;
+ }
+
+ return smmu->id_mapping_index;
+ default:
+ return -EINVAL;
+ }
+}
+#else
+static inline int iort_get_id_mapping_index(struct acpi_iort_node *node)
+{
+ return -EINVAL;
+}
+#endif
+
static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
u32 id_in, u32 *id_out,
u8 type_mask)
@@ -350,7 +416,7 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
/* Parse the ID mapping tree to find specified node type */
while (node) {
struct acpi_iort_id_mapping *map;
- int i;
+ int i, index;
if (IORT_TYPE_MASK(node->type) & type_mask) {
if (id_out)
@@ -371,8 +437,19 @@ static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
goto fail_map;
}
+ /*
+ * Get the special ID mapping index (if any) and skip its
+ * associated ID map to prevent erroneous multi-stage
+ * IORT ID translations.
+ */
+ index = iort_get_id_mapping_index(node);
+
/* Do the ID translation */
for (i = 0; i < node->mapping_count; i++, map++) {
+ /* if it is special mapping index, skip it */
+ if (i == index)
+ continue;
+
if (!iort_id_map(map, node->type, id, &id))
break;
}
@@ -392,10 +469,9 @@ fail_map:
return NULL;
}
-static
-struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node,
- u32 *id_out, u8 type_mask,
- int index)
+static struct acpi_iort_node *iort_node_map_platform_id(
+ struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
+ int index)
{
struct acpi_iort_node *parent;
u32 id;
@@ -424,9 +500,25 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
{
struct pci_bus *pbus;
- if (!dev_is_pci(dev))
+ if (!dev_is_pci(dev)) {
+ struct acpi_iort_node *node;
+ /*
+ * scan iort_fwnode_list to see if it's an iort platform
+ * device (such as SMMU, PMCG),its iort node already cached
+ * and associated with fwnode when iort platform devices
+ * were initialized.
+ */
+ node = iort_get_iort_node(dev->fwnode);
+ if (node)
+ return node;
+
+ /*
+ * if not, then it should be a platform device defined in
+ * DSDT/SSDT (with Named Component node in IORT)
+ */
return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
+ }
/* Find a PCI root bus */
pbus = to_pci_dev(dev)->bus;
@@ -466,16 +558,24 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id)
*/
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
{
- int i;
+ int i, index;
struct acpi_iort_node *node;
node = iort_find_dev_node(dev);
if (!node)
return -ENODEV;
- for (i = 0; i < node->mapping_count; i++) {
- if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i))
+ index = iort_get_id_mapping_index(node);
+ /* if there is a valid index, go get the dev_id directly */
+ if (index >= 0) {
+ if (iort_node_get_id(node, dev_id, index))
return 0;
+ } else {
+ for (i = 0; i < node->mapping_count; i++) {
+ if (iort_node_map_platform_id(node, dev_id,
+ IORT_MSI_TYPE, i))
+ return 0;
+ }
}
return -ENODEV;
@@ -538,6 +638,49 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
}
+static void iort_set_device_domain(struct device *dev,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_its_group *its;
+ struct acpi_iort_node *msi_parent;
+ struct acpi_iort_id_mapping *map;
+ struct fwnode_handle *iort_fwnode;
+ struct irq_domain *domain;
+ int index;
+
+ index = iort_get_id_mapping_index(node);
+ if (index < 0)
+ return;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
+ node->mapping_offset + index * sizeof(*map));
+
+ /* Firmware bug! */
+ if (!map->output_reference ||
+ !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
+ pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
+ node, node->type);
+ return;
+ }
+
+ msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+ map->output_reference);
+
+ if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
+ return;
+
+ /* Move to ITS specific data */
+ its = (struct acpi_iort_its_group *)msi_parent->node_data;
+
+ iort_fwnode = iort_find_domain_token(its->identifiers[0]);
+ if (!iort_fwnode)
+ return;
+
+ domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
+ if (domain)
+ dev_set_msi_domain(dev, domain);
+}
+
/**
* iort_get_platform_device_domain() - Find MSI domain related to a
* platform device
@@ -623,14 +766,14 @@ static inline bool iort_iommu_driver_enabled(u8 type)
}
#ifdef CONFIG_IOMMU_API
-static inline
-const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
+static inline const struct iommu_ops *iort_fwspec_iommu_ops(
+ struct iommu_fwspec *fwspec)
{
return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
}
-static inline
-int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
+static inline int iort_add_device_replay(const struct iommu_ops *ops,
+ struct device *dev)
{
int err = 0;
@@ -640,11 +783,11 @@ int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
return err;
}
#else
-static inline
-const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
+static inline const struct iommu_ops *iort_fwspec_iommu_ops(
+ struct iommu_fwspec *fwspec)
{ return NULL; }
-static inline
-int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
+static inline int iort_add_device_replay(const struct iommu_ops *ops,
+ struct device *dev)
{ return 0; }
#endif
@@ -968,7 +1111,7 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
}
-#if defined(CONFIG_ACPI_NUMA) && defined(ACPI_IORT_SMMU_V3_PXM_VALID)
+#if defined(CONFIG_ACPI_NUMA)
/*
* set numa proximity domain for smmuv3 device
*/
@@ -1051,34 +1194,34 @@ static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
}
-struct iort_iommu_config {
+struct iort_dev_config {
const char *name;
- int (*iommu_init)(struct acpi_iort_node *node);
- bool (*iommu_is_coherent)(struct acpi_iort_node *node);
- int (*iommu_count_resources)(struct acpi_iort_node *node);
- void (*iommu_init_resources)(struct resource *res,
+ int (*dev_init)(struct acpi_iort_node *node);
+ bool (*dev_is_coherent)(struct acpi_iort_node *node);
+ int (*dev_count_resources)(struct acpi_iort_node *node);
+ void (*dev_init_resources)(struct resource *res,
struct acpi_iort_node *node);
- void (*iommu_set_proximity)(struct device *dev,
+ void (*dev_set_proximity)(struct device *dev,
struct acpi_iort_node *node);
};
-static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
+static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
- .iommu_is_coherent = arm_smmu_v3_is_coherent,
- .iommu_count_resources = arm_smmu_v3_count_resources,
- .iommu_init_resources = arm_smmu_v3_init_resources,
- .iommu_set_proximity = arm_smmu_v3_set_proximity,
+ .dev_is_coherent = arm_smmu_v3_is_coherent,
+ .dev_count_resources = arm_smmu_v3_count_resources,
+ .dev_init_resources = arm_smmu_v3_init_resources,
+ .dev_set_proximity = arm_smmu_v3_set_proximity,
};
-static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
+static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
.name = "arm-smmu",
- .iommu_is_coherent = arm_smmu_is_coherent,
- .iommu_count_resources = arm_smmu_count_resources,
- .iommu_init_resources = arm_smmu_init_resources
+ .dev_is_coherent = arm_smmu_is_coherent,
+ .dev_count_resources = arm_smmu_count_resources,
+ .dev_init_resources = arm_smmu_init_resources
};
-static __init
-const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
+static __init const struct iort_dev_config *iort_get_dev_cfg(
+ struct acpi_iort_node *node)
{
switch (node->type) {
case ACPI_IORT_NODE_SMMU_V3:
@@ -1091,31 +1234,28 @@ const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
}
/**
- * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
- * @node: Pointer to SMMU ACPI IORT node
+ * iort_add_platform_device() - Allocate a platform device for IORT node
+ * @node: Pointer to device ACPI IORT node
*
* Returns: 0 on success, <0 failure
*/
-static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
+static int __init iort_add_platform_device(struct acpi_iort_node *node,
+ const struct iort_dev_config *ops)
{
struct fwnode_handle *fwnode;
struct platform_device *pdev;
struct resource *r;
enum dev_dma_attr attr;
int ret, count;
- const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
-
- if (!ops)
- return -ENODEV;
pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
- if (ops->iommu_set_proximity)
- ops->iommu_set_proximity(&pdev->dev, node);
+ if (ops->dev_set_proximity)
+ ops->dev_set_proximity(&pdev->dev, node);
- count = ops->iommu_count_resources(node);
+ count = ops->dev_count_resources(node);
r = kcalloc(count, sizeof(*r), GFP_KERNEL);
if (!r) {
@@ -1123,7 +1263,7 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
goto dev_put;
}
- ops->iommu_init_resources(r, node);
+ ops->dev_init_resources(r, node);
ret = platform_device_add_resources(pdev, r, count);
/*
@@ -1158,12 +1298,14 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
pdev->dev.fwnode = fwnode;
- attr = ops->iommu_is_coherent(node) ?
- DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+ attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
/* Configure DMA for the page table walker */
acpi_dma_configure(&pdev->dev, attr);
+ iort_set_device_domain(&pdev->dev, node);
+
ret = platform_device_add(pdev);
if (ret)
goto dma_deconfigure;
@@ -1216,6 +1358,7 @@ static void __init iort_init_platform_devices(void)
struct fwnode_handle *fwnode;
int i, ret;
bool acs_enabled = false;
+ const struct iort_dev_config *ops;
/*
* iort_table and iort both point to the start of IORT table, but
@@ -1238,16 +1381,15 @@ static void __init iort_init_platform_devices(void)
if (!acs_enabled)
acs_enabled = iort_enable_acs(iort_node);
- if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
- (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
-
+ ops = iort_get_dev_cfg(iort_node);
+ if (ops) {
fwnode = acpi_alloc_fwnode_static();
if (!fwnode)
return;
iort_set_fwnode(iort_node, fwnode);
- ret = iort_add_smmu_platform_device(iort_node);
+ ret = iort_add_platform_device(iort_node, ops);
if (ret) {
iort_delete_fwnode(iort_node);
acpi_free_fwnode_static(fwnode);
diff --git a/drivers/acpi/battery.h b/drivers/acpi/battery.h
index 6c084976987d..225f493d4c27 100644
--- a/drivers/acpi/battery.h
+++ b/drivers/acpi/battery.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ACPI_BATTERY_H
#define __ACPI_BATTERY_H
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index ef1856b15488..bf8e4d371fa7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -390,6 +390,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
{
struct acpi_button *button = acpi_driver_data(device);
struct input_dev *input;
+ int users;
switch (event) {
case ACPI_FIXED_HARDWARE_EVENT:
@@ -398,7 +399,11 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
case ACPI_BUTTON_NOTIFY_STATUS:
input = button->input;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- acpi_lid_update_state(device);
+ mutex_lock(&button->input->mutex);
+ users = button->input->users;
+ mutex_unlock(&button->input->mutex);
+ if (users)
+ acpi_lid_update_state(device);
} else {
int keycode;
@@ -442,12 +447,24 @@ static int acpi_button_resume(struct device *dev)
struct acpi_button *button = acpi_driver_data(device);
button->suspended = false;
- if (button->type == ACPI_BUTTON_TYPE_LID)
+ if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users)
acpi_lid_initialize_state(device);
return 0;
}
#endif
+static int acpi_lid_input_open(struct input_dev *input)
+{
+ struct acpi_device *device = input_get_drvdata(input);
+ struct acpi_button *button = acpi_driver_data(device);
+
+ button->last_state = !!acpi_lid_evaluate_state(device);
+ button->last_time = ktime_get();
+ acpi_lid_initialize_state(device);
+
+ return 0;
+}
+
static int acpi_button_add(struct acpi_device *device)
{
struct acpi_button *button;
@@ -488,8 +505,7 @@ static int acpi_button_add(struct acpi_device *device)
strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID);
sprintf(class, "%s/%s",
ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID);
- button->last_state = !!acpi_lid_evaluate_state(device);
- button->last_time = ktime_get();
+ input->open = acpi_lid_input_open;
} else {
printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid);
error = -ENODEV;
@@ -522,11 +538,11 @@ static int acpi_button_add(struct acpi_device *device)
break;
}
+ input_set_drvdata(input, device);
error = input_register_device(input);
if (error)
goto err_remove_fs;
if (button->type == ACPI_BUTTON_TYPE_LID) {
- acpi_lid_initialize_state(device);
/*
* This assumes there's only one lid device, or if there are
* more we only care about the last one...
@@ -557,7 +573,8 @@ static int acpi_button_remove(struct acpi_device *device)
return 0;
}
-static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
+static int param_set_lid_init_state(const char *val,
+ const struct kernel_param *kp)
{
int result = 0;
@@ -575,7 +592,8 @@ static int param_set_lid_init_state(const char *val, struct kernel_param *kp)
return result;
}
-static int param_get_lid_init_state(char *buffer, struct kernel_param *kp)
+static int param_get_lid_init_state(char *buffer,
+ const struct kernel_param *kp)
{
switch (lid_init_state) {
case ACPI_BUTTON_LID_INIT_OPEN:
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index e5b47f032d9a..21c28433c590 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -48,7 +48,6 @@
struct cppc_pcc_data {
struct mbox_chan *pcc_channel;
void __iomem *pcc_comm_addr;
- int pcc_subspace_idx;
bool pcc_channel_acquired;
ktime_t deadline;
unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
@@ -75,13 +74,16 @@ struct cppc_pcc_data {
/* Wait queue for CPUs whose requests were batched */
wait_queue_head_t pcc_write_wait_q;
+ ktime_t last_cmd_cmpl_time;
+ ktime_t last_mpar_reset;
+ int mpar_count;
+ int refcount;
};
-/* Structure to represent the single PCC channel */
-static struct cppc_pcc_data pcc_data = {
- .pcc_subspace_idx = -1,
- .platform_owns_pcc = true,
-};
+/* Array to represent the PCC channel per subspace id */
+static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
+/* The cpu_pcc_subspace_idx containsper CPU subspace id */
+static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
/*
* The cpc_desc structure contains the ACPI register details
@@ -93,7 +95,8 @@ static struct cppc_pcc_data pcc_data = {
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
/* pcc mapped address + header size + offset within PCC subspace */
-#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs))
+#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
+ 0x8 + (offs))
/* Check if a CPC register is in PCC */
#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
@@ -188,13 +191,16 @@ static struct kobj_type cppc_ktype = {
.default_attrs = cppc_attrs,
};
-static int check_pcc_chan(bool chk_err_bit)
+static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
{
int ret = -EIO, status = 0;
- struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr;
- ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base =
+ pcc_ss_data->pcc_comm_addr;
+ ktime_t next_deadline = ktime_add(ktime_get(),
+ pcc_ss_data->deadline);
- if (!pcc_data.platform_owns_pcc)
+ if (!pcc_ss_data->platform_owns_pcc)
return 0;
/* Retry in case the remote processor was too slow to catch up. */
@@ -219,7 +225,7 @@ static int check_pcc_chan(bool chk_err_bit)
}
if (likely(!ret))
- pcc_data.platform_owns_pcc = false;
+ pcc_ss_data->platform_owns_pcc = false;
else
pr_err("PCC check channel failed. Status=%x\n", status);
@@ -230,13 +236,12 @@ static int check_pcc_chan(bool chk_err_bit)
* This function transfers the ownership of the PCC to the platform
* So it must be called while holding write_lock(pcc_lock)
*/
-static int send_pcc_cmd(u16 cmd)
+static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
{
int ret = -EIO, i;
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
struct acpi_pcct_shared_memory *generic_comm_base =
- (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr;
- static ktime_t last_cmd_cmpl_time, last_mpar_reset;
- static int mpar_count;
+ (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
unsigned int time_delta;
/*
@@ -249,24 +254,25 @@ static int send_pcc_cmd(u16 cmd)
* before write completion, so first send a WRITE command to
* platform
*/
- if (pcc_data.pending_pcc_write_cmd)
- send_pcc_cmd(CMD_WRITE);
+ if (pcc_ss_data->pending_pcc_write_cmd)
+ send_pcc_cmd(pcc_ss_id, CMD_WRITE);
- ret = check_pcc_chan(false);
+ ret = check_pcc_chan(pcc_ss_id, false);
if (ret)
goto end;
} else /* CMD_WRITE */
- pcc_data.pending_pcc_write_cmd = FALSE;
+ pcc_ss_data->pending_pcc_write_cmd = FALSE;
/*
* Handle the Minimum Request Turnaround Time(MRTT)
* "The minimum amount of time that OSPM must wait after the completion
* of a command before issuing the next command, in microseconds"
*/
- if (pcc_data.pcc_mrtt) {
- time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
- if (pcc_data.pcc_mrtt > time_delta)
- udelay(pcc_data.pcc_mrtt - time_delta);
+ if (pcc_ss_data->pcc_mrtt) {
+ time_delta = ktime_us_delta(ktime_get(),
+ pcc_ss_data->last_cmd_cmpl_time);
+ if (pcc_ss_data->pcc_mrtt > time_delta)
+ udelay(pcc_ss_data->pcc_mrtt - time_delta);
}
/*
@@ -280,18 +286,19 @@ static int send_pcc_cmd(u16 cmd)
* not send the request to the platform after hitting the MPAR limit in
* any 60s window
*/
- if (pcc_data.pcc_mpar) {
- if (mpar_count == 0) {
- time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
- if (time_delta < 60 * MSEC_PER_SEC) {
+ if (pcc_ss_data->pcc_mpar) {
+ if (pcc_ss_data->mpar_count == 0) {
+ time_delta = ktime_ms_delta(ktime_get(),
+ pcc_ss_data->last_mpar_reset);
+ if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
pr_debug("PCC cmd not sent due to MPAR limit");
ret = -EIO;
goto end;
}
- last_mpar_reset = ktime_get();
- mpar_count = pcc_data.pcc_mpar;
+ pcc_ss_data->last_mpar_reset = ktime_get();
+ pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
}
- mpar_count--;
+ pcc_ss_data->mpar_count--;
}
/* Write to the shared comm region. */
@@ -300,10 +307,10 @@ static int send_pcc_cmd(u16 cmd)
/* Flip CMD COMPLETE bit */
writew_relaxed(0, &generic_comm_base->status);
- pcc_data.platform_owns_pcc = true;
+ pcc_ss_data->platform_owns_pcc = true;
/* Ring doorbell */
- ret = mbox_send_message(pcc_data.pcc_channel, &cmd);
+ ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
if (ret < 0) {
pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
cmd, ret);
@@ -311,15 +318,15 @@ static int send_pcc_cmd(u16 cmd)
}
/* wait for completion and check for PCC errro bit */
- ret = check_pcc_chan(true);
+ ret = check_pcc_chan(pcc_ss_id, true);
- if (pcc_data.pcc_mrtt)
- last_cmd_cmpl_time = ktime_get();
+ if (pcc_ss_data->pcc_mrtt)
+ pcc_ss_data->last_cmd_cmpl_time = ktime_get();
- if (pcc_data.pcc_channel->mbox->txdone_irq)
- mbox_chan_txdone(pcc_data.pcc_channel, ret);
+ if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
+ mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
else
- mbox_client_txdone(pcc_data.pcc_channel, ret);
+ mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
end:
if (cmd == CMD_WRITE) {
@@ -329,12 +336,12 @@ end:
if (!desc)
continue;
- if (desc->write_cmd_id == pcc_data.pcc_write_cnt)
+ if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
desc->write_cmd_status = ret;
}
}
- pcc_data.pcc_write_cnt++;
- wake_up_all(&pcc_data.pcc_write_wait_q);
+ pcc_ss_data->pcc_write_cnt++;
+ wake_up_all(&pcc_ss_data->pcc_write_wait_q);
}
return ret;
@@ -536,16 +543,16 @@ err_ret:
}
EXPORT_SYMBOL_GPL(acpi_get_psd_map);
-static int register_pcc_channel(int pcc_subspace_idx)
+static int register_pcc_channel(int pcc_ss_idx)
{
struct acpi_pcct_hw_reduced *cppc_ss;
u64 usecs_lat;
- if (pcc_subspace_idx >= 0) {
- pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
- pcc_subspace_idx);
+ if (pcc_ss_idx >= 0) {
+ pcc_data[pcc_ss_idx]->pcc_channel =
+ pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
- if (IS_ERR(pcc_data.pcc_channel)) {
+ if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
pr_err("Failed to find PCC communication channel\n");
return -ENODEV;
}
@@ -556,7 +563,7 @@ static int register_pcc_channel(int pcc_subspace_idx)
* PCC channels) and stored pointers to the
* subspace communication region in con_priv.
*/
- cppc_ss = (pcc_data.pcc_channel)->con_priv;
+ cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
if (!cppc_ss) {
pr_err("No PCC subspace found for CPPC\n");
@@ -569,19 +576,20 @@ static int register_pcc_channel(int pcc_subspace_idx)
* So add an arbitrary amount of wait on top of Nominal.
*/
usecs_lat = NUM_RETRIES * cppc_ss->latency;
- pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
- pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time;
- pcc_data.pcc_mpar = cppc_ss->max_access_rate;
- pcc_data.pcc_nominal = cppc_ss->latency;
-
- pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
- if (!pcc_data.pcc_comm_addr) {
+ pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
+ pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
+ pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
+ pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
+
+ pcc_data[pcc_ss_idx]->pcc_comm_addr =
+ acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
+ if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem\n");
return -ENOMEM;
}
/* Set flag so that we dont come here for each CPU. */
- pcc_data.pcc_channel_acquired = true;
+ pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
return 0;
@@ -600,6 +608,34 @@ bool __weak cpc_ffh_supported(void)
return false;
}
+
+/**
+ * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
+ *
+ * Check and allocate the cppc_pcc_data memory.
+ * In some processor configurations it is possible that same subspace
+ * is shared between multiple CPU's. This is seen especially in CPU's
+ * with hardware multi-threading support.
+ *
+ * Return: 0 for success, errno for failure
+ */
+int pcc_data_alloc(int pcc_ss_id)
+{
+ if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
+ return -EINVAL;
+
+ if (pcc_data[pcc_ss_id]) {
+ pcc_data[pcc_ss_id]->refcount++;
+ } else {
+ pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
+ GFP_KERNEL);
+ if (!pcc_data[pcc_ss_id])
+ return -ENOMEM;
+ pcc_data[pcc_ss_id]->refcount++;
+ }
+
+ return 0;
+}
/*
* An example CPC table looks like the following.
*
@@ -661,6 +697,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
struct device *cpu_dev;
acpi_handle handle = pr->handle;
unsigned int num_ent, i, cpc_rev;
+ int pcc_subspace_id = -1;
acpi_status status;
int ret = -EFAULT;
@@ -733,9 +770,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
* so extract it only once.
*/
if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
- if (pcc_data.pcc_subspace_idx < 0)
- pcc_data.pcc_subspace_idx = gas_t->access_width;
- else if (pcc_data.pcc_subspace_idx != gas_t->access_width) {
+ if (pcc_subspace_id < 0) {
+ pcc_subspace_id = gas_t->access_width;
+ if (pcc_data_alloc(pcc_subspace_id))
+ goto out_free;
+ } else if (pcc_subspace_id != gas_t->access_width) {
pr_debug("Mismatched PCC ids.\n");
goto out_free;
}
@@ -763,6 +802,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
}
+ per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
/* Store CPU Logical ID */
cpc_ptr->cpu_id = pr->id;
@@ -771,14 +811,14 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (ret)
goto out_free;
- /* Register PCC channel once for all CPUs. */
- if (!pcc_data.pcc_channel_acquired) {
- ret = register_pcc_channel(pcc_data.pcc_subspace_idx);
+ /* Register PCC channel once for all PCC subspace id. */
+ if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
+ ret = register_pcc_channel(pcc_subspace_id);
if (ret)
goto out_free;
- init_rwsem(&pcc_data.pcc_lock);
- init_waitqueue_head(&pcc_data.pcc_write_wait_q);
+ init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
+ init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
}
/* Everything looks okay */
@@ -831,6 +871,18 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
struct cpc_desc *cpc_ptr;
unsigned int i;
void __iomem *addr;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
+
+ if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
+ if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
+ pcc_data[pcc_ss_id]->refcount--;
+ if (!pcc_data[pcc_ss_id]->refcount) {
+ pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
+ pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
+ kfree(pcc_data[pcc_ss_id]);
+ }
+ }
+ }
cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
if (!cpc_ptr)
@@ -888,6 +940,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
int ret_val = 0;
void __iomem *vaddr = 0;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
if (reg_res->type == ACPI_TYPE_INTEGER) {
@@ -897,7 +950,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = 0;
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
- vaddr = GET_PCC_VADDR(reg->address);
+ vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
@@ -932,10 +985,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
void __iomem *vaddr = 0;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
- vaddr = GET_PCC_VADDR(reg->address);
+ vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
@@ -980,6 +1034,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
struct cpc_register_resource *highest_reg, *lowest_reg,
*lowest_non_linear_reg, *nominal_reg;
u64 high, low, nom, min_nonlinear;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
int ret = 0, regs_in_pcc = 0;
if (!cpc_desc) {
@@ -996,9 +1052,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
regs_in_pcc = 1;
- down_write(&pcc_data.pcc_lock);
+ down_write(&pcc_ss_data->pcc_lock);
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ) < 0) {
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -1021,7 +1077,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
out_err:
if (regs_in_pcc)
- up_write(&pcc_data.pcc_lock);
+ up_write(&pcc_ss_data->pcc_lock);
return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
@@ -1038,6 +1094,8 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
struct cpc_register_resource *delivered_reg, *reference_reg,
*ref_perf_reg, *ctr_wrap_reg;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
u64 delivered, reference, ref_perf, ctr_wrap_time;
int ret = 0, regs_in_pcc = 0;
@@ -1061,10 +1119,10 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
/* Are any of the regs PCC ?*/
if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
- down_write(&pcc_data.pcc_lock);
+ down_write(&pcc_ss_data->pcc_lock);
regs_in_pcc = 1;
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ) < 0) {
+ if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -1094,7 +1152,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
perf_fb_ctrs->wraparound_time = ctr_wrap_time;
out_err:
if (regs_in_pcc)
- up_write(&pcc_data.pcc_lock);
+ up_write(&pcc_ss_data->pcc_lock);
return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
@@ -1110,6 +1168,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
struct cpc_register_resource *desired_reg;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
int ret = 0;
if (!cpc_desc) {
@@ -1127,11 +1187,11 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* achieve that goal here
*/
if (CPC_IN_PCC(desired_reg)) {
- down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */
- if (pcc_data.platform_owns_pcc) {
- ret = check_pcc_chan(false);
+ down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
+ if (pcc_ss_data->platform_owns_pcc) {
+ ret = check_pcc_chan(pcc_ss_id, false);
if (ret) {
- up_read(&pcc_data.pcc_lock);
+ up_read(&pcc_ss_data->pcc_lock);
return ret;
}
}
@@ -1139,8 +1199,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* Update the pending_write to make sure a PCC CMD_READ will not
* arrive and steal the channel during the switch to write lock
*/
- pcc_data.pending_pcc_write_cmd = true;
- cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt;
+ pcc_ss_data->pending_pcc_write_cmd = true;
+ cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
cpc_desc->write_cmd_status = 0;
}
@@ -1151,7 +1211,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
if (CPC_IN_PCC(desired_reg))
- up_read(&pcc_data.pcc_lock); /* END Phase-I */
+ up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
/*
* This is Phase-II where we transfer the ownership of PCC to Platform
*
@@ -1199,15 +1259,15 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* the write command before servicing the read command
*/
if (CPC_IN_PCC(desired_reg)) {
- if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */
+ if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
/* Update only if there are pending write commands */
- if (pcc_data.pending_pcc_write_cmd)
- send_pcc_cmd(CMD_WRITE);
- up_write(&pcc_data.pcc_lock); /* END Phase-II */
+ if (pcc_ss_data->pending_pcc_write_cmd)
+ send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
} else
/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
- wait_event(pcc_data.pcc_write_wait_q,
- cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt);
+ wait_event(pcc_ss_data->pcc_write_wait_q,
+ cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
/* send_pcc_cmd updates the status in case of failure */
ret = cpc_desc->write_cmd_status;
@@ -1240,6 +1300,8 @@ unsigned int cppc_get_transition_latency(int cpu_num)
unsigned int latency_ns = 0;
struct cpc_desc *cpc_desc;
struct cpc_register_resource *desired_reg;
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
+ struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
@@ -1249,11 +1311,11 @@ unsigned int cppc_get_transition_latency(int cpu_num)
if (!CPC_IN_PCC(desired_reg))
return CPUFREQ_ETERNAL;
- if (pcc_data.pcc_mpar)
- latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar);
+ if (pcc_ss_data->pcc_mpar)
+ latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
- latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000);
- latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000);
+ latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
+ latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
return latency_ns;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index fbcc73f7a099..e4ffaeec9ec2 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable);
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
+static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
void acpi_pm_wakeup_event(struct device *dev)
{
@@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
if (!dev && !func)
return AE_BAD_PARAMETER;
- mutex_lock(&acpi_pm_notifier_lock);
+ mutex_lock(&acpi_pm_notifier_install_lock);
if (adev->wakeup.flags.notifier_present)
goto out;
- adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
- adev->wakeup.context.dev = dev;
- adev->wakeup.context.func = func;
-
status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
acpi_pm_notify_handler, NULL);
if (ACPI_FAILURE(status))
goto out;
+ mutex_lock(&acpi_pm_notifier_lock);
+ adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
+ adev->wakeup.context.dev = dev;
+ adev->wakeup.context.func = func;
adev->wakeup.flags.notifier_present = true;
+ mutex_unlock(&acpi_pm_notifier_lock);
out:
- mutex_unlock(&acpi_pm_notifier_lock);
+ mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
@@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
{
acpi_status status = AE_BAD_PARAMETER;
- mutex_lock(&acpi_pm_notifier_lock);
+ mutex_lock(&acpi_pm_notifier_install_lock);
if (!adev->wakeup.flags.notifier_present)
goto out;
@@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
if (ACPI_FAILURE(status))
goto out;
+ mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.context.func = NULL;
adev->wakeup.context.dev = NULL;
wakeup_source_unregister(adev->wakeup.ws);
-
adev->wakeup.flags.notifier_present = false;
+ mutex_unlock(&acpi_pm_notifier_lock);
out:
- mutex_unlock(&acpi_pm_notifier_lock);
+ mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
@@ -581,8 +584,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
d_min = ret;
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
&& adev->wakeup.sleep_state >= target_state;
- } else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) !=
- PM_QOS_FLAGS_NONE) {
+ } else {
wakeup = adev->wakeup.flags.valid;
}
@@ -848,48 +850,48 @@ static int acpi_dev_pm_full_power(struct acpi_device *adev)
}
/**
- * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI.
+ * acpi_dev_suspend - Put device into a low-power state using ACPI.
* @dev: Device to put into a low-power state.
+ * @wakeup: Whether or not to enable wakeup for the device.
*
- * Put the given device into a runtime low-power state using the standard ACPI
+ * Put the given device into a low-power state using the standard ACPI
* mechanism. Set up remote wakeup if desired, choose the state to put the
* device into (this checks if remote wakeup is expected to work too), and set
* the power state of the device.
*/
-int acpi_dev_runtime_suspend(struct device *dev)
+int acpi_dev_suspend(struct device *dev, bool wakeup)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- bool remote_wakeup;
+ u32 target_state = acpi_target_system_state();
int error;
if (!adev)
return 0;
- remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
- PM_QOS_FLAGS_NONE;
- if (remote_wakeup) {
- error = acpi_device_wakeup_enable(adev, ACPI_STATE_S0);
+ if (wakeup && acpi_device_can_wakeup(adev)) {
+ error = acpi_device_wakeup_enable(adev, target_state);
if (error)
return -EAGAIN;
+ } else {
+ wakeup = false;
}
- error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
- if (error && remote_wakeup)
+ error = acpi_dev_pm_low_power(dev, adev, target_state);
+ if (error && wakeup)
acpi_device_wakeup_disable(adev);
return error;
}
-EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend);
+EXPORT_SYMBOL_GPL(acpi_dev_suspend);
/**
- * acpi_dev_runtime_resume - Put device into the full-power state using ACPI.
+ * acpi_dev_resume - Put device into the full-power state using ACPI.
* @dev: Device to put into the full-power state.
*
* Put the given device into the full-power state using the standard ACPI
- * mechanism at run time. Set the power state of the device to ACPI D0 and
- * disable remote wakeup.
+ * mechanism. Set the power state of the device to ACPI D0 and disable wakeup.
*/
-int acpi_dev_runtime_resume(struct device *dev)
+int acpi_dev_resume(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
@@ -901,7 +903,7 @@ int acpi_dev_runtime_resume(struct device *dev)
acpi_device_wakeup_disable(adev);
return error;
}
-EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
+EXPORT_SYMBOL_GPL(acpi_dev_resume);
/**
* acpi_subsys_runtime_suspend - Suspend device using ACPI.
@@ -913,7 +915,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
int acpi_subsys_runtime_suspend(struct device *dev)
{
int ret = pm_generic_runtime_suspend(dev);
- return ret ? ret : acpi_dev_runtime_suspend(dev);
+ return ret ? ret : acpi_dev_suspend(dev, true);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
@@ -926,68 +928,33 @@ EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
*/
int acpi_subsys_runtime_resume(struct device *dev)
{
- int ret = acpi_dev_runtime_resume(dev);
+ int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
#ifdef CONFIG_PM_SLEEP
-/**
- * acpi_dev_suspend_late - Put device into a low-power state using ACPI.
- * @dev: Device to put into a low-power state.
- *
- * Put the given device into a low-power state during system transition to a
- * sleep state using the standard ACPI mechanism. Set up system wakeup if
- * desired, choose the state to put the device into (this checks if system
- * wakeup is expected to work too), and set the power state of the device.
- */
-int acpi_dev_suspend_late(struct device *dev)
+static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- u32 target_state;
- bool wakeup;
- int error;
-
- if (!adev)
- return 0;
-
- target_state = acpi_target_system_state();
- wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
- if (wakeup) {
- error = acpi_device_wakeup_enable(adev, target_state);
- if (error)
- return error;
- }
+ u32 sys_target = acpi_target_system_state();
+ int ret, state;
- error = acpi_dev_pm_low_power(dev, adev, target_state);
- if (error && wakeup)
- acpi_device_wakeup_disable(adev);
+ if (!pm_runtime_suspended(dev) || !adev ||
+ device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
+ return true;
- return error;
-}
-EXPORT_SYMBOL_GPL(acpi_dev_suspend_late);
+ if (sys_target == ACPI_STATE_S0)
+ return false;
-/**
- * acpi_dev_resume_early - Put device into the full-power state using ACPI.
- * @dev: Device to put into the full-power state.
- *
- * Put the given device into the full-power state using the standard ACPI
- * mechanism during system transition to the working state. Set the power
- * state of the device to ACPI D0 and disable remote wakeup.
- */
-int acpi_dev_resume_early(struct device *dev)
-{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- int error;
+ if (adev->power.flags.dsw_present)
+ return true;
- if (!adev)
- return 0;
+ ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state);
+ if (ret)
+ return true;
- error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup_disable(adev);
- return error;
+ return state != adev->power.state;
}
-EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
/**
* acpi_subsys_prepare - Prepare device for system transition to a sleep state.
@@ -996,39 +963,53 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
int acpi_subsys_prepare(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- u32 sys_target;
- int ret, state;
- ret = pm_generic_prepare(dev);
- if (ret < 0)
- return ret;
-
- if (!adev || !pm_runtime_suspended(dev)
- || device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
- return 0;
+ if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) {
+ int ret = dev->driver->pm->prepare(dev);
- sys_target = acpi_target_system_state();
- if (sys_target == ACPI_STATE_S0)
- return 1;
+ if (ret < 0)
+ return ret;
- if (adev->power.flags.dsw_present)
- return 0;
+ if (!ret && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
+ return 0;
+ }
- ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state);
- return !ret && state == adev->power.state;
+ return !acpi_dev_needs_resume(dev, adev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
/**
+ * acpi_subsys_complete - Finalize device's resume during system resume.
+ * @dev: Device to handle.
+ */
+void acpi_subsys_complete(struct device *dev)
+{
+ pm_generic_complete(dev);
+ /*
+ * If the device had been runtime-suspended before the system went into
+ * the sleep state it is going out of and it has never been resumed till
+ * now, resume it in case the firmware powered it up.
+ */
+ if (dev->power.direct_complete && pm_resume_via_firmware())
+ pm_request_resume(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_complete);
+
+/**
* acpi_subsys_suspend - Run the device driver's suspend callback.
* @dev: Device to handle.
*
- * Follow PCI and resume devices suspended at run time before running their
- * system suspend callbacks.
+ * Follow PCI and resume devices from runtime suspend before running their
+ * system suspend callbacks, unless the driver can cope with runtime-suspended
+ * devices during system suspend and there are no ACPI-specific reasons for
+ * resuming them.
*/
int acpi_subsys_suspend(struct device *dev)
{
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
+ pm_runtime_resume(dev);
+
return pm_generic_suspend(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
@@ -1042,12 +1023,48 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
*/
int acpi_subsys_suspend_late(struct device *dev)
{
- int ret = pm_generic_suspend_late(dev);
- return ret ? ret : acpi_dev_suspend_late(dev);
+ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ ret = pm_generic_suspend_late(dev);
+ return ret ? ret : acpi_dev_suspend(dev, device_may_wakeup(dev));
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
/**
+ * acpi_subsys_suspend_noirq - Run the device driver's "noirq" suspend callback.
+ * @dev: Device to suspend.
+ */
+int acpi_subsys_suspend_noirq(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ return pm_generic_suspend_noirq(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
+
+/**
+ * acpi_subsys_resume_noirq - Run the device driver's "noirq" resume callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_resume_noirq(struct device *dev)
+{
+ /*
+ * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
+ * during system suspend, so update their runtime PM status to "active"
+ * as they will be put into D0 going forward.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ pm_runtime_set_active(dev);
+
+ return pm_generic_resume_noirq(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq);
+
+/**
* acpi_subsys_resume_early - Resume device using ACPI.
* @dev: Device to Resume.
*
@@ -1057,7 +1074,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
*/
int acpi_subsys_resume_early(struct device *dev)
{
- int ret = acpi_dev_resume_early(dev);
+ int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
@@ -1074,11 +1091,60 @@ int acpi_subsys_freeze(struct device *dev)
* runtime-suspended devices should not be touched during freeze/thaw
* transitions.
*/
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
+
return pm_generic_freeze(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
+/**
+ * acpi_subsys_freeze_late - Run the device driver's "late" freeze callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_freeze_late(struct device *dev)
+{
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ return pm_generic_freeze_late(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_freeze_late);
+
+/**
+ * acpi_subsys_freeze_noirq - Run the device driver's "noirq" freeze callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_freeze_noirq(struct device *dev)
+{
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ return pm_generic_freeze_noirq(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_freeze_noirq);
+
+/**
+ * acpi_subsys_thaw_noirq - Run the device driver's "noirq" thaw callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_thaw_noirq(struct device *dev)
+{
+ /*
+ * If the device is in runtime suspend, the "thaw" code may not work
+ * correctly with it, so skip the driver callback and make the PM core
+ * skip all of the subsequent "thaw" callbacks for the device.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.direct_complete = true;
+ return 0;
+ }
+
+ return pm_generic_thaw_noirq(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_subsys_thaw_noirq);
#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_domain acpi_general_pm_domain = {
@@ -1087,13 +1153,20 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.runtime_resume = acpi_subsys_runtime_resume,
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
- .complete = pm_complete_with_resume_check,
+ .complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.suspend_late = acpi_subsys_suspend_late,
+ .suspend_noirq = acpi_subsys_suspend_noirq,
+ .resume_noirq = acpi_subsys_resume_noirq,
.resume_early = acpi_subsys_resume_early,
.freeze = acpi_subsys_freeze,
+ .freeze_late = acpi_subsys_freeze_late,
+ .freeze_noirq = acpi_subsys_freeze_noirq,
+ .thaw_noirq = acpi_subsys_thaw_noirq,
.poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_subsys_suspend_late,
+ .poweroff_noirq = acpi_subsys_suspend_noirq,
+ .restore_noirq = acpi_subsys_resume_noirq,
.restore_early = acpi_subsys_resume_early,
#endif
},
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 2305e1ab978e..e3fc1f045e1c 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -482,6 +482,7 @@ int dock_notify(struct acpi_device *adev, u32 event)
surprise_removal = 1;
event = ACPI_NOTIFY_EJECT_REQUEST;
/* Fall back */
+ /* fall through */
case ACPI_NOTIFY_EJECT_REQUEST:
begin_undock(ds);
if ((immediate_undock && !(ds->flags & DOCK_IS_ATA))
diff --git a/drivers/acpi/dptf/Kconfig b/drivers/acpi/dptf/Kconfig
index ac0a6ed0cf46..90a2fd979282 100644
--- a/drivers/acpi/dptf/Kconfig
+++ b/drivers/acpi/dptf/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DPTF_POWER
tristate "DPTF Platform Power Participant"
depends on X86
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 236b14324780..da176c95aa2c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -486,8 +486,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
{
if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
ec_log_drv("event unblocked");
- if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- advance_transaction(ec);
+ /*
+ * Unconditionally invoke this once after enabling the event
+ * handling mechanism to detect the pending events.
+ */
+ advance_transaction(ec);
}
static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
@@ -1456,11 +1459,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->reference_count >= 1)
acpi_ec_enable_gpe(ec, true);
-
- /* EC is fully operational, allow queries */
- acpi_ec_enable_event(ec);
}
}
+ /* EC is fully operational, allow queries */
+ acpi_ec_enable_event(ec);
return 0;
}
@@ -1939,7 +1941,8 @@ static const struct dev_pm_ops acpi_ec_pm = {
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
-static int param_set_event_clearing(const char *val, struct kernel_param *kp)
+static int param_set_event_clearing(const char *val,
+ const struct kernel_param *kp)
{
int result = 0;
@@ -1957,7 +1960,8 @@ static int param_set_event_clearing(const char *val, struct kernel_param *kp)
return result;
}
-static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
+static int param_get_event_clearing(char *buffer,
+ const struct kernel_param *kp)
{
switch (ec_event_clearing) {
case ACPI_EC_EVT_TIMING_STATUS:
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 7fceb3b4691b..5a127f3f2d5c 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* event.c - exporting ACPI events via procfs
*
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4361c4415b4f..fc8c43e76707 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -248,4 +248,10 @@ void acpi_watchdog_init(void);
static inline void acpi_watchdog_init(void) {}
#endif
+#ifdef CONFIG_ACPI_LPIT
+void acpi_init_lpit(void);
+#else
+static inline void acpi_init_lpit(void) { }
+#endif
+
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig
index 929ba4da0b30..f7c57e33499e 100644
--- a/drivers/acpi/nfit/Kconfig
+++ b/drivers/acpi/nfit/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config ACPI_NFIT
tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
depends on PHYS_ADDR_T_64BIT
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 9c2c49b6a240..ff2580e7611d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -183,13 +183,33 @@ static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
return 0;
}
-static int xlat_nvdimm_status(void *buf, unsigned int cmd, u32 status)
+#define ACPI_LABELS_LOCKED 3
+
+static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
+ u32 status)
{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE:
+ /*
+ * In the _LSI, _LSR, _LSW case the locked status is
+ * communicated via the read/write commands
+ */
+ if (nfit_mem->has_lsi)
+ break;
+
if (status >> 16 & ND_CONFIG_LOCKED)
return -EACCES;
break;
+ case ND_CMD_GET_CONFIG_DATA:
+ if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED)
+ return -EACCES;
+ break;
+ case ND_CMD_SET_CONFIG_DATA:
+ if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED)
+ return -EACCES;
+ break;
default:
break;
}
@@ -205,13 +225,182 @@ static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
{
if (!nvdimm)
return xlat_bus_status(buf, cmd, status);
- return xlat_nvdimm_status(buf, cmd, status);
+ return xlat_nvdimm_status(nvdimm, buf, cmd, status);
+}
+
+/* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
+static union acpi_object *pkg_to_buf(union acpi_object *pkg)
+{
+ int i;
+ void *dst;
+ size_t size = 0;
+ union acpi_object *buf = NULL;
+
+ if (pkg->type != ACPI_TYPE_PACKAGE) {
+ WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
+ pkg->type);
+ goto err;
+ }
+
+ for (i = 0; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+
+ if (obj->type == ACPI_TYPE_INTEGER)
+ size += 4;
+ else if (obj->type == ACPI_TYPE_BUFFER)
+ size += obj->buffer.length;
+ else {
+ WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
+ obj->type);
+ goto err;
+ }
+ }
+
+ buf = ACPI_ALLOCATE(sizeof(*buf) + size);
+ if (!buf)
+ goto err;
+
+ dst = buf + 1;
+ buf->type = ACPI_TYPE_BUFFER;
+ buf->buffer.length = size;
+ buf->buffer.pointer = dst;
+ for (i = 0; i < pkg->package.count; i++) {
+ union acpi_object *obj = &pkg->package.elements[i];
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ memcpy(dst, &obj->integer.value, 4);
+ dst += 4;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ memcpy(dst, obj->buffer.pointer, obj->buffer.length);
+ dst += obj->buffer.length;
+ }
+ }
+err:
+ ACPI_FREE(pkg);
+ return buf;
+}
+
+static union acpi_object *int_to_buf(union acpi_object *integer)
+{
+ union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
+ void *dst = NULL;
+
+ if (!buf)
+ goto err;
+
+ if (integer->type != ACPI_TYPE_INTEGER) {
+ WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
+ integer->type);
+ goto err;
+ }
+
+ dst = buf + 1;
+ buf->type = ACPI_TYPE_BUFFER;
+ buf->buffer.length = 4;
+ buf->buffer.pointer = dst;
+ memcpy(dst, &integer->integer.value, 4);
+err:
+ ACPI_FREE(integer);
+ return buf;
+}
+
+static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
+ u32 len, void *data)
+{
+ acpi_status rc;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input = {
+ .count = 3,
+ .pointer = (union acpi_object []) {
+ [0] = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = offset,
+ },
+ [1] = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = len,
+ },
+ [2] = {
+ .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.pointer = data,
+ .buffer.length = len,
+ },
+ },
+ };
+
+ rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
+ if (ACPI_FAILURE(rc))
+ return NULL;
+ return int_to_buf(buf.pointer);
+}
+
+static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
+ u32 len)
+{
+ acpi_status rc;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input = {
+ .count = 2,
+ .pointer = (union acpi_object []) {
+ [0] = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = offset,
+ },
+ [1] = {
+ .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = len,
+ },
+ },
+ };
+
+ rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
+ if (ACPI_FAILURE(rc))
+ return NULL;
+ return pkg_to_buf(buf.pointer);
+}
+
+static union acpi_object *acpi_label_info(acpi_handle handle)
+{
+ acpi_status rc;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
+ if (ACPI_FAILURE(rc))
+ return NULL;
+ return pkg_to_buf(buf.pointer);
+}
+
+static u8 nfit_dsm_revid(unsigned family, unsigned func)
+{
+ static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
+ [NVDIMM_FAMILY_INTEL] = {
+ [NVDIMM_INTEL_GET_MODES] = 2,
+ [NVDIMM_INTEL_GET_FWINFO] = 2,
+ [NVDIMM_INTEL_START_FWUPDATE] = 2,
+ [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
+ [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
+ [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
+ [NVDIMM_INTEL_SET_THRESHOLD] = 2,
+ [NVDIMM_INTEL_INJECT_ERROR] = 2,
+ },
+ };
+ u8 id;
+
+ if (family > NVDIMM_FAMILY_MAX)
+ return 0;
+ if (func > 31)
+ return 0;
+ id = revid_table[family][func];
+ if (id == 0)
+ return 1; /* default */
+ return id;
}
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
union acpi_object in_obj, in_buf, *out_obj;
const struct nd_cmd_desc *desc = NULL;
struct device *dev = acpi_desc->dev;
@@ -235,7 +424,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
}
if (nvdimm) {
- struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_device *adev = nfit_mem->adev;
if (!adev)
@@ -294,7 +482,29 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
in_buf.buffer.pointer,
min_t(u32, 256, in_buf.buffer.length), true);
- out_obj = acpi_evaluate_dsm(handle, guid, 1, func, &in_obj);
+ /* call the BIOS, prefer the named methods over _DSM if available */
+ if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsi)
+ out_obj = acpi_label_info(handle);
+ else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
+ struct nd_cmd_get_config_data_hdr *p = buf;
+
+ out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
+ } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
+ && nfit_mem->has_lsw) {
+ struct nd_cmd_set_config_hdr *p = buf;
+
+ out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
+ p->in_buf);
+ } else {
+ u8 revid;
+
+ if (nvdimm)
+ revid = nfit_dsm_revid(nfit_mem->family, func);
+ else
+ revid = 1;
+ out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
+ }
+
if (!out_obj) {
dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
cmd_name);
@@ -356,8 +566,10 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
* Set fw_status for all the commands with a known format to be
* later interpreted by xlat_status().
*/
- if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
- || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
+ if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
+ && cmd <= ND_CMD_CLEAR_ERROR)
+ || (nvdimm && cmd >= ND_CMD_SMART
+ && cmd <= ND_CMD_VENDOR)))
fw_status = *(u32 *) out_obj->buffer.pointer;
if (offset + in_buf.buffer.length < buf_len) {
@@ -1431,6 +1643,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
{
struct acpi_device *adev, *adev_dimm;
struct device *dev = acpi_desc->dev;
+ union acpi_object *obj;
unsigned long dsm_mask;
const guid_t *guid;
int i;
@@ -1463,7 +1676,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
* different command sets. Note, that checking for function0 (bit0)
* tells us if any commands are reachable through this GUID.
*/
- for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
+ for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
if (family < 0 || i == default_dsm_family)
family = i;
@@ -1473,7 +1686,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
if (override_dsm_mask && !disable_vendor_specific)
dsm_mask = override_dsm_mask;
else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
- dsm_mask = 0x3fe;
+ dsm_mask = NVDIMM_INTEL_CMDMASK;
if (disable_vendor_specific)
dsm_mask &= ~(1 << ND_CMD_VENDOR);
} else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
@@ -1493,9 +1706,32 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
guid = to_nfit_uuid(nfit_mem->family);
for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
- if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
+ if (acpi_check_dsm(adev_dimm->handle, guid,
+ nfit_dsm_revid(nfit_mem->family, i),
+ 1ULL << i))
set_bit(i, &nfit_mem->dsm_mask);
+ obj = acpi_label_info(adev_dimm->handle);
+ if (obj) {
+ ACPI_FREE(obj);
+ nfit_mem->has_lsi = 1;
+ dev_dbg(dev, "%s: has _LSI\n", dev_name(&adev_dimm->dev));
+ }
+
+ obj = acpi_label_read(adev_dimm->handle, 0, 0);
+ if (obj) {
+ ACPI_FREE(obj);
+ nfit_mem->has_lsr = 1;
+ dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
+ }
+
+ obj = acpi_label_write(adev_dimm->handle, 0, 0, NULL);
+ if (obj) {
+ ACPI_FREE(obj);
+ nfit_mem->has_lsw = 1;
+ dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
+ }
+
return 0;
}
@@ -1571,8 +1807,21 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
* userspace interface.
*/
cmd_mask = 1UL << ND_CMD_CALL;
- if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
- cmd_mask |= nfit_mem->dsm_mask;
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
+ /*
+ * These commands have a 1:1 correspondence
+ * between DSM payload and libnvdimm ioctl
+ * payload format.
+ */
+ cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
+ }
+
+ if (nfit_mem->has_lsi)
+ set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
+ if (nfit_mem->has_lsr)
+ set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
+ if (nfit_mem->has_lsw)
+ set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
: NULL;
@@ -1645,6 +1894,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
int i;
nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
+ nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
adev = to_acpi_dev(acpi_desc);
if (!adev)
return;
@@ -2239,7 +2489,7 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
if (ars_status->out_length
< 44 + sizeof(struct nd_ars_record) * (i + 1))
break;
- rc = nvdimm_bus_add_poison(nvdimm_bus,
+ rc = nvdimm_bus_add_badrange(nvdimm_bus,
ars_status->records[i].err_address,
ars_status->records[i].length);
if (rc)
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index feeb95d574fa..b92921439657 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -67,7 +67,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
continue;
/* If this fails due to an -ENOMEM, there is little we can do */
- nvdimm_bus_add_poison(acpi_desc->nvdimm_bus,
+ nvdimm_bus_add_badrange(acpi_desc->nvdimm_bus,
ALIGN(mce->addr, L1_CACHE_BYTES),
L1_CACHE_BYTES);
nvdimm_region_notify(nfit_spa->nd_region,
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 54292db61262..f0cf18b2da8b 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -24,7 +24,7 @@
/* ACPI 6.1 */
#define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba"
-/* http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf */
+/* http://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */
#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
@@ -38,6 +38,37 @@
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
+#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT
+
+#define NVDIMM_STANDARD_CMDMASK \
+(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
+ | 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA \
+ | 1 << ND_CMD_SET_CONFIG_DATA | 1 << ND_CMD_VENDOR_EFFECT_LOG_SIZE \
+ | 1 << ND_CMD_VENDOR_EFFECT_LOG | 1 << ND_CMD_VENDOR)
+
+/*
+ * Command numbers that the kernel needs to know about to handle
+ * non-default DSM revision ids
+ */
+enum nvdimm_family_cmds {
+ NVDIMM_INTEL_LATCH_SHUTDOWN = 10,
+ NVDIMM_INTEL_GET_MODES = 11,
+ NVDIMM_INTEL_GET_FWINFO = 12,
+ NVDIMM_INTEL_START_FWUPDATE = 13,
+ NVDIMM_INTEL_SEND_FWUPDATE = 14,
+ NVDIMM_INTEL_FINISH_FWUPDATE = 15,
+ NVDIMM_INTEL_QUERY_FWUPDATE = 16,
+ NVDIMM_INTEL_SET_THRESHOLD = 17,
+ NVDIMM_INTEL_INJECT_ERROR = 18,
+};
+
+#define NVDIMM_INTEL_CMDMASK \
+(NVDIMM_STANDARD_CMDMASK | 1 << NVDIMM_INTEL_GET_MODES \
+ | 1 << NVDIMM_INTEL_GET_FWINFO | 1 << NVDIMM_INTEL_START_FWUPDATE \
+ | 1 << NVDIMM_INTEL_SEND_FWUPDATE | 1 << NVDIMM_INTEL_FINISH_FWUPDATE \
+ | 1 << NVDIMM_INTEL_QUERY_FWUPDATE | 1 << NVDIMM_INTEL_SET_THRESHOLD \
+ | 1 << NVDIMM_INTEL_INJECT_ERROR | 1 << NVDIMM_INTEL_LATCH_SHUTDOWN)
+
enum nfit_uuids {
/* for simplicity alias the uuid index with the family id */
NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL,
@@ -140,6 +171,9 @@ struct nfit_mem {
struct resource *flush_wpq;
unsigned long dsm_mask;
int family;
+ u32 has_lsi:1;
+ u32 has_lsr:1;
+ u32 has_lsw:1;
};
struct acpi_nfit_desc {
@@ -167,6 +201,7 @@ struct acpi_nfit_desc {
unsigned int init_complete:1;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
+ unsigned long bus_nfit_cmd_force_en;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
};
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index db78d353bab1..3bb46cb24a99 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -663,6 +663,29 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
EXPORT_SYMBOL(acpi_os_write_port);
+int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
+{
+
+ switch (width) {
+ case 8:
+ *(u8 *) value = readb(virt_addr);
+ break;
+ case 16:
+ *(u16 *) value = readw(virt_addr);
+ break;
+ case 32:
+ *(u32 *) value = readl(virt_addr);
+ break;
+ case 64:
+ *(u64 *) value = readq(virt_addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
{
@@ -670,6 +693,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
unsigned int size = width / 8;
bool unmap = false;
u64 dummy;
+ int error;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
@@ -684,22 +708,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
if (!value)
value = &dummy;
- switch (width) {
- case 8:
- *(u8 *) value = readb(virt_addr);
- break;
- case 16:
- *(u16 *) value = readw(virt_addr);
- break;
- case 32:
- *(u32 *) value = readl(virt_addr);
- break;
- case 64:
- *(u64 *) value = readq(virt_addr);
- break;
- default:
- BUG();
- }
+ error = acpi_os_read_iomem(virt_addr, value, width);
+ BUG_ON(error);
if (unmap)
iounmap(virt_addr);
diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h
index e8bfa7b865a5..095afc96952e 100644
--- a/drivers/acpi/pmic/intel_pmic.h
+++ b/drivers/acpi/pmic/intel_pmic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INTEL_PMIC_H
#define __INTEL_PMIC_H
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
new file mode 100644
index 000000000000..109c1e9c9c7a
--- /dev/null
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -0,0 +1,137 @@
+/*
+ * Dollar Cove TI PMIC operation region driver
+ * Copyright (C) 2014 Intel Corporation. All rights reserved.
+ *
+ * Rewritten and cleaned up
+ * Copyright (C) 2017 Takashi Iwai <tiwai@suse.de>
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/platform_device.h>
+#include "intel_pmic.h"
+
+/* registers stored in 16bit BE (high:low, total 10bit) */
+#define CHTDC_TI_VBAT 0x54
+#define CHTDC_TI_DIETEMP 0x56
+#define CHTDC_TI_BPTHERM 0x58
+#define CHTDC_TI_GPADC 0x5a
+
+static struct pmic_table chtdc_ti_power_table[] = {
+ { .address = 0x00, .reg = 0x41 },
+ { .address = 0x04, .reg = 0x42 },
+ { .address = 0x08, .reg = 0x43 },
+ { .address = 0x0c, .reg = 0x45 },
+ { .address = 0x10, .reg = 0x46 },
+ { .address = 0x14, .reg = 0x47 },
+ { .address = 0x18, .reg = 0x48 },
+ { .address = 0x1c, .reg = 0x49 },
+ { .address = 0x20, .reg = 0x4a },
+ { .address = 0x24, .reg = 0x4b },
+ { .address = 0x28, .reg = 0x4c },
+ { .address = 0x2c, .reg = 0x4d },
+ { .address = 0x30, .reg = 0x4e },
+};
+
+static struct pmic_table chtdc_ti_thermal_table[] = {
+ {
+ .address = 0x00,
+ .reg = CHTDC_TI_GPADC
+ },
+ {
+ .address = 0x0c,
+ .reg = CHTDC_TI_GPADC
+ },
+ /* TMP2 -> SYSTEMP */
+ {
+ .address = 0x18,
+ .reg = CHTDC_TI_GPADC
+ },
+ /* TMP3 -> BPTHERM */
+ {
+ .address = 0x24,
+ .reg = CHTDC_TI_BPTHERM
+ },
+ {
+ .address = 0x30,
+ .reg = CHTDC_TI_GPADC
+ },
+ /* TMP5 -> DIETEMP */
+ {
+ .address = 0x3c,
+ .reg = CHTDC_TI_DIETEMP
+ },
+};
+
+static int chtdc_ti_pmic_get_power(struct regmap *regmap, int reg, int bit,
+ u64 *value)
+{
+ int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = data & 1;
+ return 0;
+}
+
+static int chtdc_ti_pmic_update_power(struct regmap *regmap, int reg, int bit,
+ bool on)
+{
+ return regmap_update_bits(regmap, reg, 1, on);
+}
+
+static int chtdc_ti_pmic_get_raw_temp(struct regmap *regmap, int reg)
+{
+ u8 buf[2];
+
+ if (regmap_bulk_read(regmap, reg, buf, 2))
+ return -EIO;
+
+ /* stored in big-endian */
+ return ((buf[0] & 0x03) << 8) | buf[1];
+}
+
+static struct intel_pmic_opregion_data chtdc_ti_pmic_opregion_data = {
+ .get_power = chtdc_ti_pmic_get_power,
+ .update_power = chtdc_ti_pmic_update_power,
+ .get_raw_temp = chtdc_ti_pmic_get_raw_temp,
+ .power_table = chtdc_ti_power_table,
+ .power_table_count = ARRAY_SIZE(chtdc_ti_power_table),
+ .thermal_table = chtdc_ti_thermal_table,
+ .thermal_table_count = ARRAY_SIZE(chtdc_ti_thermal_table),
+};
+
+static int chtdc_ti_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
+ int err;
+
+ err = intel_pmic_install_opregion_handler(&pdev->dev,
+ ACPI_HANDLE(pdev->dev.parent), pmic->regmap,
+ &chtdc_ti_pmic_opregion_data);
+ if (err < 0)
+ return err;
+
+ /* Re-enumerate devices depending on PMIC */
+ acpi_walk_dep_device_list(ACPI_HANDLE(pdev->dev.parent));
+ return 0;
+}
+
+static const struct platform_device_id chtdc_ti_pmic_opregion_id_table[] = {
+ { .name = "chtdc_ti_region" },
+ {},
+};
+
+static struct platform_driver chtdc_ti_pmic_opregion_driver = {
+ .probe = chtdc_ti_pmic_opregion_probe,
+ .driver = {
+ .name = "cht_dollar_cove_ti_pmic",
+ },
+ .id_table = chtdc_ti_pmic_opregion_id_table,
+};
+module_platform_driver(chtdc_ti_pmic_opregion_driver);
+
+MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c
new file mode 100644
index 000000000000..7f3c567e8168
--- /dev/null
+++ b/drivers/acpi/pmic/tps68470_pmic.c
@@ -0,0 +1,455 @@
+/*
+ * TI TPS68470 PMIC operation region driver
+ *
+ * Copyright (C) 2017 Intel Corporation. All rights reserved.
+ *
+ * Author: Rajmohan Mani <rajmohan.mani@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Based on drivers/acpi/pmic/intel_pmic* drivers
+ */
+
+#include <linux/acpi.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct tps68470_pmic_table {
+ u32 address; /* operation region address */
+ u32 reg; /* corresponding register */
+ u32 bitmask; /* bit mask for power, clock */
+};
+
+#define TI_PMIC_POWER_OPREGION_ID 0xB0
+#define TI_PMIC_VR_VAL_OPREGION_ID 0xB1
+#define TI_PMIC_CLOCK_OPREGION_ID 0xB2
+#define TI_PMIC_CLKFREQ_OPREGION_ID 0xB3
+
+struct tps68470_pmic_opregion {
+ struct mutex lock;
+ struct regmap *regmap;
+};
+
+#define S_IO_I2C_EN (BIT(0) | BIT(1))
+
+static const struct tps68470_pmic_table power_table[] = {
+ {
+ .address = 0x00,
+ .reg = TPS68470_REG_S_I2C_CTL,
+ .bitmask = S_IO_I2C_EN,
+ /* S_I2C_CTL */
+ },
+ {
+ .address = 0x04,
+ .reg = TPS68470_REG_VCMCTL,
+ .bitmask = BIT(0),
+ /* VCMCTL */
+ },
+ {
+ .address = 0x08,
+ .reg = TPS68470_REG_VAUX1CTL,
+ .bitmask = BIT(0),
+ /* VAUX1_CTL */
+ },
+ {
+ .address = 0x0C,
+ .reg = TPS68470_REG_VAUX2CTL,
+ .bitmask = BIT(0),
+ /* VAUX2CTL */
+ },
+ {
+ .address = 0x10,
+ .reg = TPS68470_REG_VACTL,
+ .bitmask = BIT(0),
+ /* VACTL */
+ },
+ {
+ .address = 0x14,
+ .reg = TPS68470_REG_VDCTL,
+ .bitmask = BIT(0),
+ /* VDCTL */
+ },
+};
+
+/* Table to set voltage regulator value */
+static const struct tps68470_pmic_table vr_val_table[] = {
+ {
+ .address = 0x00,
+ .reg = TPS68470_REG_VSIOVAL,
+ .bitmask = TPS68470_VSIOVAL_IOVOLT_MASK,
+ /* TPS68470_REG_VSIOVAL */
+ },
+ {
+ .address = 0x04,
+ .reg = TPS68470_REG_VIOVAL,
+ .bitmask = TPS68470_VIOVAL_IOVOLT_MASK,
+ /* TPS68470_REG_VIOVAL */
+ },
+ {
+ .address = 0x08,
+ .reg = TPS68470_REG_VCMVAL,
+ .bitmask = TPS68470_VCMVAL_VCVOLT_MASK,
+ /* TPS68470_REG_VCMVAL */
+ },
+ {
+ .address = 0x0C,
+ .reg = TPS68470_REG_VAUX1VAL,
+ .bitmask = TPS68470_VAUX1VAL_AUX1VOLT_MASK,
+ /* TPS68470_REG_VAUX1VAL */
+ },
+ {
+ .address = 0x10,
+ .reg = TPS68470_REG_VAUX2VAL,
+ .bitmask = TPS68470_VAUX2VAL_AUX2VOLT_MASK,
+ /* TPS68470_REG_VAUX2VAL */
+ },
+ {
+ .address = 0x14,
+ .reg = TPS68470_REG_VAVAL,
+ .bitmask = TPS68470_VAVAL_AVOLT_MASK,
+ /* TPS68470_REG_VAVAL */
+ },
+ {
+ .address = 0x18,
+ .reg = TPS68470_REG_VDVAL,
+ .bitmask = TPS68470_VDVAL_DVOLT_MASK,
+ /* TPS68470_REG_VDVAL */
+ },
+};
+
+/* Table to configure clock frequency */
+static const struct tps68470_pmic_table clk_freq_table[] = {
+ {
+ .address = 0x00,
+ .reg = TPS68470_REG_POSTDIV2,
+ .bitmask = BIT(0) | BIT(1),
+ /* TPS68470_REG_POSTDIV2 */
+ },
+ {
+ .address = 0x04,
+ .reg = TPS68470_REG_BOOSTDIV,
+ .bitmask = 0x1F,
+ /* TPS68470_REG_BOOSTDIV */
+ },
+ {
+ .address = 0x08,
+ .reg = TPS68470_REG_BUCKDIV,
+ .bitmask = 0x0F,
+ /* TPS68470_REG_BUCKDIV */
+ },
+ {
+ .address = 0x0C,
+ .reg = TPS68470_REG_PLLSWR,
+ .bitmask = 0x13,
+ /* TPS68470_REG_PLLSWR */
+ },
+ {
+ .address = 0x10,
+ .reg = TPS68470_REG_XTALDIV,
+ .bitmask = 0xFF,
+ /* TPS68470_REG_XTALDIV */
+ },
+ {
+ .address = 0x14,
+ .reg = TPS68470_REG_PLLDIV,
+ .bitmask = 0xFF,
+ /* TPS68470_REG_PLLDIV */
+ },
+ {
+ .address = 0x18,
+ .reg = TPS68470_REG_POSTDIV,
+ .bitmask = 0x83,
+ /* TPS68470_REG_POSTDIV */
+ },
+};
+
+/* Table to configure and enable clocks */
+static const struct tps68470_pmic_table clk_table[] = {
+ {
+ .address = 0x00,
+ .reg = TPS68470_REG_PLLCTL,
+ .bitmask = 0xF5,
+ /* TPS68470_REG_PLLCTL */
+ },
+ {
+ .address = 0x04,
+ .reg = TPS68470_REG_PLLCTL2,
+ .bitmask = BIT(0),
+ /* TPS68470_REG_PLLCTL2 */
+ },
+ {
+ .address = 0x08,
+ .reg = TPS68470_REG_CLKCFG1,
+ .bitmask = TPS68470_CLKCFG1_MODE_A_MASK |
+ TPS68470_CLKCFG1_MODE_B_MASK,
+ /* TPS68470_REG_CLKCFG1 */
+ },
+ {
+ .address = 0x0C,
+ .reg = TPS68470_REG_CLKCFG2,
+ .bitmask = TPS68470_CLKCFG1_MODE_A_MASK |
+ TPS68470_CLKCFG1_MODE_B_MASK,
+ /* TPS68470_REG_CLKCFG2 */
+ },
+};
+
+static int pmic_get_reg_bit(u64 address,
+ const struct tps68470_pmic_table *table,
+ const unsigned int table_size, int *reg,
+ int *bitmask)
+{
+ u64 i;
+
+ i = address / 4;
+ if (i >= table_size)
+ return -ENOENT;
+
+ if (!reg || !bitmask)
+ return -EINVAL;
+
+ *reg = table[i].reg;
+ *bitmask = table[i].bitmask;
+
+ return 0;
+}
+
+static int tps68470_pmic_get_power(struct regmap *regmap, int reg,
+ int bitmask, u64 *value)
+{
+ unsigned int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = (data & bitmask) ? 1 : 0;
+ return 0;
+}
+
+static int tps68470_pmic_get_vr_val(struct regmap *regmap, int reg,
+ int bitmask, u64 *value)
+{
+ unsigned int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = data & bitmask;
+ return 0;
+}
+
+static int tps68470_pmic_get_clk(struct regmap *regmap, int reg,
+ int bitmask, u64 *value)
+{
+ unsigned int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = (data & bitmask) ? 1 : 0;
+ return 0;
+}
+
+static int tps68470_pmic_get_clk_freq(struct regmap *regmap, int reg,
+ int bitmask, u64 *value)
+{
+ unsigned int data;
+
+ if (regmap_read(regmap, reg, &data))
+ return -EIO;
+
+ *value = data & bitmask;
+ return 0;
+}
+
+static int ti_tps68470_regmap_update_bits(struct regmap *regmap, int reg,
+ int bitmask, u64 value)
+{
+ return regmap_update_bits(regmap, reg, bitmask, value);
+}
+
+static acpi_status tps68470_pmic_common_handler(u32 function,
+ acpi_physical_address address,
+ u32 bits, u64 *value,
+ void *region_context,
+ int (*get)(struct regmap *,
+ int, int, u64 *),
+ int (*update)(struct regmap *,
+ int, int, u64),
+ const struct tps68470_pmic_table *tbl,
+ unsigned int tbl_size)
+{
+ struct tps68470_pmic_opregion *opregion = region_context;
+ struct regmap *regmap = opregion->regmap;
+ int reg, ret, bitmask;
+
+ if (bits != 32)
+ return AE_BAD_PARAMETER;
+
+ ret = pmic_get_reg_bit(address, tbl, tbl_size, &reg, &bitmask);
+ if (ret < 0)
+ return AE_BAD_PARAMETER;
+
+ if (function == ACPI_WRITE && *value > bitmask)
+ return AE_BAD_PARAMETER;
+
+ mutex_lock(&opregion->lock);
+
+ ret = (function == ACPI_READ) ?
+ get(regmap, reg, bitmask, value) :
+ update(regmap, reg, bitmask, *value);
+
+ mutex_unlock(&opregion->lock);
+
+ return ret ? AE_ERROR : AE_OK;
+}
+
+static acpi_status tps68470_pmic_cfreq_handler(u32 function,
+ acpi_physical_address address,
+ u32 bits, u64 *value,
+ void *handler_context,
+ void *region_context)
+{
+ return tps68470_pmic_common_handler(function, address, bits, value,
+ region_context,
+ tps68470_pmic_get_clk_freq,
+ ti_tps68470_regmap_update_bits,
+ clk_freq_table,
+ ARRAY_SIZE(clk_freq_table));
+}
+
+static acpi_status tps68470_pmic_clk_handler(u32 function,
+ acpi_physical_address address, u32 bits,
+ u64 *value, void *handler_context,
+ void *region_context)
+{
+ return tps68470_pmic_common_handler(function, address, bits, value,
+ region_context,
+ tps68470_pmic_get_clk,
+ ti_tps68470_regmap_update_bits,
+ clk_table,
+ ARRAY_SIZE(clk_table));
+}
+
+static acpi_status tps68470_pmic_vrval_handler(u32 function,
+ acpi_physical_address address,
+ u32 bits, u64 *value,
+ void *handler_context,
+ void *region_context)
+{
+ return tps68470_pmic_common_handler(function, address, bits, value,
+ region_context,
+ tps68470_pmic_get_vr_val,
+ ti_tps68470_regmap_update_bits,
+ vr_val_table,
+ ARRAY_SIZE(vr_val_table));
+}
+
+static acpi_status tps68470_pmic_pwr_handler(u32 function,
+ acpi_physical_address address,
+ u32 bits, u64 *value,
+ void *handler_context,
+ void *region_context)
+{
+ if (bits != 32)
+ return AE_BAD_PARAMETER;
+
+ /* set/clear for bit 0, bits 0 and 1 together */
+ if (function == ACPI_WRITE &&
+ !(*value == 0 || *value == 1 || *value == 3)) {
+ return AE_BAD_PARAMETER;
+ }
+
+ return tps68470_pmic_common_handler(function, address, bits, value,
+ region_context,
+ tps68470_pmic_get_power,
+ ti_tps68470_regmap_update_bits,
+ power_table,
+ ARRAY_SIZE(power_table));
+}
+
+static int tps68470_pmic_opregion_probe(struct platform_device *pdev)
+{
+ struct regmap *tps68470_regmap = dev_get_drvdata(pdev->dev.parent);
+ acpi_handle handle = ACPI_HANDLE(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct tps68470_pmic_opregion *opregion;
+ acpi_status status;
+
+ if (!dev || !tps68470_regmap) {
+ dev_warn(dev, "dev or regmap is NULL\n");
+ return -EINVAL;
+ }
+
+ if (!handle) {
+ dev_warn(dev, "acpi handle is NULL\n");
+ return -ENODEV;
+ }
+
+ opregion = devm_kzalloc(dev, sizeof(*opregion), GFP_KERNEL);
+ if (!opregion)
+ return -ENOMEM;
+
+ mutex_init(&opregion->lock);
+ opregion->regmap = tps68470_regmap;
+
+ status = acpi_install_address_space_handler(handle,
+ TI_PMIC_POWER_OPREGION_ID,
+ tps68470_pmic_pwr_handler,
+ NULL, opregion);
+ if (ACPI_FAILURE(status))
+ goto out_mutex_destroy;
+
+ status = acpi_install_address_space_handler(handle,
+ TI_PMIC_VR_VAL_OPREGION_ID,
+ tps68470_pmic_vrval_handler,
+ NULL, opregion);
+ if (ACPI_FAILURE(status))
+ goto out_remove_power_handler;
+
+ status = acpi_install_address_space_handler(handle,
+ TI_PMIC_CLOCK_OPREGION_ID,
+ tps68470_pmic_clk_handler,
+ NULL, opregion);
+ if (ACPI_FAILURE(status))
+ goto out_remove_vr_val_handler;
+
+ status = acpi_install_address_space_handler(handle,
+ TI_PMIC_CLKFREQ_OPREGION_ID,
+ tps68470_pmic_cfreq_handler,
+ NULL, opregion);
+ if (ACPI_FAILURE(status))
+ goto out_remove_clk_handler;
+
+ return 0;
+
+out_remove_clk_handler:
+ acpi_remove_address_space_handler(handle, TI_PMIC_CLOCK_OPREGION_ID,
+ tps68470_pmic_clk_handler);
+out_remove_vr_val_handler:
+ acpi_remove_address_space_handler(handle, TI_PMIC_VR_VAL_OPREGION_ID,
+ tps68470_pmic_vrval_handler);
+out_remove_power_handler:
+ acpi_remove_address_space_handler(handle, TI_PMIC_POWER_OPREGION_ID,
+ tps68470_pmic_pwr_handler);
+out_mutex_destroy:
+ mutex_destroy(&opregion->lock);
+ return -ENODEV;
+}
+
+static struct platform_driver tps68470_pmic_opregion_driver = {
+ .probe = tps68470_pmic_opregion_probe,
+ .driver = {
+ .name = "tps68470_pmic_opregion",
+ },
+};
+
+builtin_platform_driver(tps68470_pmic_opregion_driver)
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 85ac848ac6ab..652f19e6c541 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2736e25e9dc6..d50a7b6ccddd 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -710,6 +710,8 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
static void acpi_idle_enter_bm(struct acpi_processor *pr,
struct acpi_processor_cx *cx, bool timer_bc)
{
+ acpi_unlazy_tlb(smp_processor_id());
+
/*
* Must be done before busmaster disable as we might need to
* access HPET !
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 74f738cb6073..813f1b78c16a 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Intel Corporation
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index a6c77e8b37bd..71769fd687b2 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/acpi.h>
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index d85e010ee2cc..316a0fc785e3 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -381,6 +381,7 @@ unsigned int acpi_dev_get_irq_type(int triggering, int polarity)
case ACPI_ACTIVE_BOTH:
if (triggering == ACPI_EDGE_SENSITIVE)
return IRQ_TYPE_EDGE_BOTH;
+ /* fall through */
default:
return IRQ_TYPE_NONE;
}
diff --git a/drivers/acpi/sbshc.h b/drivers/acpi/sbshc.h
index a57b0762dd7f..06372a37df10 100644
--- a/drivers/acpi/sbshc.h
+++ b/drivers/acpi/sbshc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
struct acpi_smb_hc;
enum acpi_smb_protocol {
SMBUS_WRITE_QUICK = 2,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 602f8ff212f2..e14e964bfe6d 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1505,41 +1505,38 @@ static void acpi_init_coherency(struct acpi_device *adev)
adev->flags.coherent_dma = cca;
}
-static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
+static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
{
- bool *is_spi_i2c_slave_p = data;
+ bool *is_serial_bus_slave_p = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
- /*
- * devices that are connected to UART still need to be enumerated to
- * platform bus
- */
- if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
- *is_spi_i2c_slave_p = true;
+ *is_serial_bus_slave_p = true;
/* no need to do more checking */
return -1;
}
-static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
+static bool acpi_is_serial_bus_slave(struct acpi_device *device)
{
struct list_head resource_list;
- bool is_spi_i2c_slave = false;
+ bool is_serial_bus_slave = false;
/* Macs use device properties in lieu of _CRS resources */
if (x86_apple_machine &&
(fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
- fwnode_property_present(&device->fwnode, "i2cAddress")))
+ fwnode_property_present(&device->fwnode, "i2cAddress") ||
+ fwnode_property_present(&device->fwnode, "baud")))
return true;
INIT_LIST_HEAD(&resource_list);
- acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
- &is_spi_i2c_slave);
+ acpi_dev_get_resources(device, &resource_list,
+ acpi_check_serial_bus_slave,
+ &is_serial_bus_slave);
acpi_dev_free_resource_list(&resource_list);
- return is_spi_i2c_slave;
+ return is_serial_bus_slave;
}
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
@@ -1557,7 +1554,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device);
device->flags.match_driver = false;
device->flags.initialized = true;
- device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
+ device->flags.serial_bus_slave = acpi_is_serial_bus_slave(device);
acpi_device_clear_enumerated(device);
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
@@ -1841,10 +1838,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
static void acpi_default_enumeration(struct acpi_device *device)
{
/*
- * Do not enumerate SPI/I2C slaves as they will be enumerated by their
- * respective parents.
+ * Do not enumerate SPI/I2C/UART slaves as they will be enumerated by
+ * their respective parents.
*/
- if (!device->flags.spi_i2c_slave) {
+ if (!device->flags.serial_bus_slave) {
acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device);
} else {
@@ -1941,7 +1938,7 @@ static void acpi_bus_attach(struct acpi_device *device)
return;
device->flags.match_driver = true;
- if (ret > 0 && !device->flags.spi_i2c_slave) {
+ if (ret > 0 && !device->flags.serial_bus_slave) {
acpi_device_set_enumerated(device);
goto ok;
}
@@ -1950,7 +1947,7 @@ static void acpi_bus_attach(struct acpi_device *device)
if (ret < 0)
return;
- if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
+ if (!device->pnp.type.platform_id && !device->flags.serial_bus_slave)
acpi_device_set_enumerated(device);
else
acpi_default_enumeration(device);
@@ -2122,6 +2119,7 @@ int __init acpi_scan_init(void)
acpi_int340x_thermal_init();
acpi_amba_init();
acpi_watchdog_init();
+ acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6804ddab3052..8082871b409a 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -160,6 +160,14 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
+static bool acpi_sleep_no_lps0;
+
+static int __init init_no_lps0(const struct dmi_system_id *d)
+{
+ acpi_sleep_no_lps0 = true;
+ return 0;
+}
+
static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{
.callback = init_old_suspend_ordering,
@@ -343,6 +351,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=196907
+ * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
+ * S0 Idle firmware interface.
+ */
+ {
+ .callback = init_no_lps0,
+ .ident = "Dell XPS13 9360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
+ },
+ },
{},
};
@@ -485,6 +506,7 @@ static void acpi_pm_end(void)
}
#else /* !CONFIG_ACPI_SLEEP */
#define acpi_target_sleep_state ACPI_STATE_S0
+#define acpi_sleep_no_lps0 (false)
static inline void acpi_sleep_dmi_check(void) {}
#endif /* CONFIG_ACPI_SLEEP */
@@ -863,6 +885,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if (lps0_device_handle)
return 0;
+ if (acpi_sleep_no_lps0) {
+ acpi_handle_info(adev->handle,
+ "Low Power S0 Idle interface disabled\n");
+ return 0;
+ }
+
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
return 0;
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index a82ff74faf7a..41675d24a9bc 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern void acpi_enable_wakeup_devices(u8 sleep_state);
extern void acpi_disable_wakeup_devices(u8 sleep_state);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 78a5a23010ab..06a150bb35bf 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sysfs.c - ACPI sysfs interface to userspace.
*/
@@ -168,7 +169,8 @@ module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
static char trace_method_name[1024];
-int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
+static int param_set_trace_method_name(const char *val,
+ const struct kernel_param *kp)
{
u32 saved_flags = 0;
bool is_abs_path = true;
@@ -229,7 +231,8 @@ module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name,
module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
-static int param_set_trace_state(const char *val, struct kernel_param *kp)
+static int param_set_trace_state(const char *val,
+ const struct kernel_param *kp)
{
acpi_status status;
const char *method = trace_method_name;
@@ -265,7 +268,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
return 0;
}
-static int param_get_trace_state(char *buffer, struct kernel_param *kp)
+static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
{
if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
return sprintf(buffer, "disable");
@@ -294,7 +297,8 @@ MODULE_PARM_DESC(aml_debug_output,
"To enable/disable the ACPI Debug Object output.");
/* /sys/module/acpi/parameters/acpica_version */
-static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
+static int param_get_acpica_version(char *buffer,
+ const struct kernel_param *kp)
{
int result;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 0a9e5979aaa9..9d49a1acebe3 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -355,6 +355,7 @@ acpi_evaluate_reference(acpi_handle handle,
}
if (package->package.count > ACPI_MAX_HANDLES) {
+ kfree(package);
return AE_NO_MEMORY;
}
list->count = package->package.count;
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index 1638401ab282..9614126bf56e 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* wakeup.c - support wakeup devices
* Copyright (C) 2004 Li Shaohua <shaohua.li@intel.com>
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index b4fbb9929482..ec5b0f190231 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -71,18 +71,34 @@ static const struct always_present_id always_present_ids[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
}),
/*
- * The GPD win BIOS dated 20170320 has disabled the accelerometer, the
+ * The GPD win BIOS dated 20170221 has disabled the accelerometer, the
* drivers sometimes cause crashes under Windows and this is how the
* manufacturer has solved this :| Note that the the DMI data is less
* generic then it seems, a board_vendor of "AMI Corporation" is quite
* rare and a board_name of "Default String" also is rare.
+ *
+ * Unfortunately the GPD pocket also uses these strings and its BIOS
+ * was copy-pasted from the GPD win, so it has a disabled KIOX000A
+ * node which we should not enable, thus we also check the BIOS date.
*/
ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BIOS_DATE, "02/21/2017")
+ }),
+ ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_BIOS_DATE, "03/20/2017")
}),
+ ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BIOS_DATE, "05/25/2017")
+ }),
};
bool acpi_device_always_present(struct acpi_device *adev)
diff --git a/drivers/amba/Kconfig b/drivers/amba/Kconfig
index 294ba6f36396..fb6c7e0b4cce 100644
--- a/drivers/amba/Kconfig
+++ b/drivers/amba/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config ARM_AMBA
bool
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index e0f74ddc22b7..594c228d2f02 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -195,6 +195,7 @@ struct bus_type amba_bustype = {
.match = amba_match,
.uevent = amba_uevent,
.pm = &amba_pm,
+ .force_dma = true,
};
static int __init amba_init(void)
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 9801d852bd56..7dce3795b887 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "Android"
config ANDROID
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index fddf76ef5bd6..a73596a4f804 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -150,7 +150,7 @@ static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;
static int binder_set_stop_on_user_error(const char *val,
- struct kernel_param *kp)
+ const struct kernel_param *kp)
{
int ret;
@@ -2192,7 +2192,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
off_start,
offp - off_start);
if (!parent) {
- pr_err("transaction release %d bad parent offset",
+ pr_err("transaction release %d bad parent offset\n",
debug_id);
continue;
}
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index c2819a3d58a6..6f6f745605af 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -186,12 +186,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+ void *start, void *end)
{
void *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
+ struct vm_area_struct *vma = NULL;
struct mm_struct *mm = NULL;
bool need_mm = false;
@@ -215,7 +215,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
+ if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
mm = alloc->vma_vm_mm;
if (mm) {
@@ -437,7 +437,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
if (ret)
return ERR_PTR(ret);
@@ -478,7 +478,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
err_alloc_buf_struct_failed:
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- end_page_addr, NULL);
+ end_page_addr);
return ERR_PTR(-ENOMEM);
}
@@ -562,8 +562,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
alloc->pid, buffer->data,
prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
- buffer_start_page(buffer) + PAGE_SIZE,
- NULL);
+ buffer_start_page(buffer) + PAGE_SIZE);
}
list_del(&buffer->entry);
kfree(buffer);
@@ -600,8 +599,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
@@ -984,7 +982,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
return ret;
}
-struct shrinker binder_shrinker = {
+static struct shrinker binder_shrinker = {
.count_objects = binder_shrink_count,
.scan_objects = binder_shrink_scan,
.seeks = DEFAULT_SEEKS,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 488c93724220..cb5339166563 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# SATA/PATA driver configuration
#
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index ff9cd2e37458..8daec3e657f8 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ATA) += libata.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9f78bb03bb76..5443cb71d7ba 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -57,6 +57,7 @@ enum {
AHCI_PCI_BAR_STA2X11 = 0,
AHCI_PCI_BAR_CAVIUM = 0,
AHCI_PCI_BAR_ENMOTUS = 2,
+ AHCI_PCI_BAR_CAVIUM_GEN5 = 4,
AHCI_PCI_BAR_STANDARD = 5,
};
@@ -1570,8 +1571,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
- else if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
- ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
+ else if (pdev->vendor == PCI_VENDOR_ID_CAVIUM) {
+ if (pdev->device == 0xa01c)
+ ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
+ if (pdev->device == 0xa084)
+ ahci_pci_bar = AHCI_PCI_BAR_CAVIUM_GEN5;
+ }
/* acquire resources */
rc = pcim_enable_device(pdev);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8b61123d2c3c..749fd94441b0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -303,6 +303,7 @@ struct ahci_em_priv {
unsigned long saved_activity;
unsigned long activity;
unsigned long led_state;
+ struct ata_link *link;
};
struct ahci_port_priv {
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 207649d323c5..5ecc9d46cb54 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -32,15 +32,27 @@
#define AHCI_VEND_PP3C 0xB0
#define AHCI_VEND_PP4C 0xB4
#define AHCI_VEND_PP5C 0xB8
+#define AHCI_VEND_AXICC 0xBC
#define AHCI_VEND_PAXIC 0xC0
#define AHCI_VEND_PTC 0xC8
/* Vendor Specific Register bit definitions */
#define PAXIC_ADBW_BW64 0x1
-#define PAXIC_MAWIDD (1 << 8)
-#define PAXIC_MARIDD (1 << 16)
+#define PAXIC_MAWID(i) (((i) * 2) << 4)
+#define PAXIC_MARID(i) (((i) * 2) << 12)
+#define PAXIC_MARIDD(i) ((((i) * 2) + 1) << 16)
+#define PAXIC_MAWIDD(i) ((((i) * 2) + 1) << 8)
#define PAXIC_OTL (0x4 << 20)
+/* Register bit definitions for cache control */
+#define AXICC_ARCA_VAL (0xF << 0)
+#define AXICC_ARCF_VAL (0xF << 4)
+#define AXICC_ARCH_VAL (0xF << 8)
+#define AXICC_ARCP_VAL (0xF << 12)
+#define AXICC_AWCFD_VAL (0xF << 16)
+#define AXICC_AWCD_VAL (0xF << 20)
+#define AXICC_AWCF_VAL (0xF << 24)
+
#define PCFG_TPSS_VAL (0x32 << 16)
#define PCFG_TPRS_VAL (0x2 << 12)
#define PCFG_PAD_VAL 0x2
@@ -50,21 +62,6 @@
#define PPCFG_PSS_EN (1 << 29)
#define PPCFG_ESDF_EN (1 << 31)
-#define PP2C_CIBGMN 0x0F
-#define PP2C_CIBGMX (0x25 << 8)
-#define PP2C_CIBGN (0x18 << 16)
-#define PP2C_CINMP (0x29 << 24)
-
-#define PP3C_CWBGMN 0x04
-#define PP3C_CWBGMX (0x0B << 8)
-#define PP3C_CWBGN (0x08 << 16)
-#define PP3C_CWNMP (0x0F << 24)
-
-#define PP4C_BMX 0x0a
-#define PP4C_BNM (0x08 << 8)
-#define PP4C_SFD (0x4a << 16)
-#define PP4C_PTST (0x06 << 24)
-
#define PP5C_RIT 0x60216
#define PP5C_RCT (0x7f0 << 20)
@@ -75,6 +72,7 @@
#define PORT1_BASE 0x180
/* Port Control Register Bit Definitions */
+#define PORT_SCTL_SPD_GEN3 (0x3 << 4)
#define PORT_SCTL_SPD_GEN2 (0x2 << 4)
#define PORT_SCTL_SPD_GEN1 (0x1 << 4)
#define PORT_SCTL_IPM (0x3 << 8)
@@ -85,13 +83,43 @@
#define DRV_NAME "ahci-ceva"
#define CEVA_FLAG_BROKEN_GEN2 1
+static unsigned int rx_watermark = PTC_RX_WM_VAL;
+module_param(rx_watermark, uint, 0644);
+MODULE_PARM_DESC(rx_watermark, "RxWaterMark value (0 - 0x80)");
+
struct ceva_ahci_priv {
struct platform_device *ahci_pdev;
+ /* Port Phy2Cfg Register */
+ u32 pp2c[NR_PORTS];
+ u32 pp3c[NR_PORTS];
+ u32 pp4c[NR_PORTS];
+ u32 pp5c[NR_PORTS];
+ /* Axi Cache Control Register */
+ u32 axicc;
+ bool is_cci_enabled;
int flags;
};
+static unsigned int ceva_ahci_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ u32 err_mask;
+
+ err_mask = ata_do_dev_read_id(dev, tf, id);
+ if (err_mask)
+ return err_mask;
+ /*
+ * Since CEVA controller does not support device sleep feature, we
+ * need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
+ */
+ id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+
+ return 0;
+}
+
static struct ata_port_operations ahci_ceva_ops = {
.inherits = &ahci_platform_ops,
+ .read_id = ceva_ahci_read_id,
};
static const struct ata_port_info ahci_ceva_port_info = {
@@ -108,14 +136,6 @@ static void ahci_ceva_setup(struct ahci_host_priv *hpriv)
u32 tmp;
int i;
- /*
- * AXI Data bus width to 64
- * Set Mem Addr Read, Write ID for data transfers
- * Transfer limit to 72 DWord
- */
- tmp = PAXIC_ADBW_BW64 | PAXIC_MAWIDD | PAXIC_MARIDD | PAXIC_OTL;
- writel(tmp, mmio + AHCI_VEND_PAXIC);
-
/* Set AHCI Enable */
tmp = readl(mmio + HOST_CTL);
tmp |= HOST_AHCI_EN;
@@ -126,32 +146,48 @@ static void ahci_ceva_setup(struct ahci_host_priv *hpriv)
tmp = PCFG_TPSS_VAL | PCFG_TPRS_VAL | (PCFG_PAD_VAL + i);
writel(tmp, mmio + AHCI_VEND_PCFG);
+ /*
+ * AXI Data bus width to 64
+ * Set Mem Addr Read, Write ID for data transfers
+ * Set Mem Addr Read ID, Write ID for non-data transfers
+ * Transfer limit to 72 DWord
+ */
+ tmp = PAXIC_ADBW_BW64 | PAXIC_MAWIDD(i) | PAXIC_MARIDD(i) |
+ PAXIC_MAWID(i) | PAXIC_MARID(i) | PAXIC_OTL;
+ writel(tmp, mmio + AHCI_VEND_PAXIC);
+
+ /* Set AXI cache control register if CCi is enabled */
+ if (cevapriv->is_cci_enabled) {
+ tmp = readl(mmio + AHCI_VEND_AXICC);
+ tmp |= AXICC_ARCA_VAL | AXICC_ARCF_VAL |
+ AXICC_ARCH_VAL | AXICC_ARCP_VAL |
+ AXICC_AWCFD_VAL | AXICC_AWCD_VAL |
+ AXICC_AWCF_VAL;
+ writel(tmp, mmio + AHCI_VEND_AXICC);
+ }
+
/* Port Phy Cfg register enables */
tmp = PPCFG_TTA | PPCFG_PSS_EN | PPCFG_ESDF_EN;
writel(tmp, mmio + AHCI_VEND_PPCFG);
/* Phy Control OOB timing parameters COMINIT */
- tmp = PP2C_CIBGMN | PP2C_CIBGMX | PP2C_CIBGN | PP2C_CINMP;
- writel(tmp, mmio + AHCI_VEND_PP2C);
+ writel(cevapriv->pp2c[i], mmio + AHCI_VEND_PP2C);
/* Phy Control OOB timing parameters COMWAKE */
- tmp = PP3C_CWBGMN | PP3C_CWBGMX | PP3C_CWBGN | PP3C_CWNMP;
- writel(tmp, mmio + AHCI_VEND_PP3C);
+ writel(cevapriv->pp3c[i], mmio + AHCI_VEND_PP3C);
/* Phy Control Burst timing setting */
- tmp = PP4C_BMX | PP4C_BNM | PP4C_SFD | PP4C_PTST;
- writel(tmp, mmio + AHCI_VEND_PP4C);
+ writel(cevapriv->pp4c[i], mmio + AHCI_VEND_PP4C);
/* Rate Change Timer and Retry Interval Timer setting */
- tmp = PP5C_RIT | PP5C_RCT;
- writel(tmp, mmio + AHCI_VEND_PP5C);
+ writel(cevapriv->pp5c[i], mmio + AHCI_VEND_PP5C);
/* Rx Watermark setting */
- tmp = PTC_RX_WM_VAL | PTC_RSVD;
+ tmp = rx_watermark | PTC_RSVD;
writel(tmp, mmio + AHCI_VEND_PTC);
- /* Default to Gen 2 Speed and Gen 1 if Gen2 is broken */
- tmp = PORT_SCTL_SPD_GEN2 | PORT_SCTL_IPM;
+ /* Default to Gen 3 Speed and Gen 1 if Gen2 is broken */
+ tmp = PORT_SCTL_SPD_GEN3 | PORT_SCTL_IPM;
if (cevapriv->flags & CEVA_FLAG_BROKEN_GEN2)
tmp = PORT_SCTL_SPD_GEN1 | PORT_SCTL_IPM;
writel(tmp, mmio + PORT_SCR_CTL + PORT_BASE + PORT_OFFSET * i);
@@ -168,6 +204,7 @@ static int ceva_ahci_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ceva_ahci_priv *cevapriv;
+ enum dev_dma_attr attr;
int rc;
cevapriv = devm_kzalloc(dev, sizeof(*cevapriv), GFP_KERNEL);
@@ -187,6 +224,65 @@ static int ceva_ahci_probe(struct platform_device *pdev)
if (of_property_read_bool(np, "ceva,broken-gen2"))
cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
+ /* Read OOB timing value for COMINIT from device-tree */
+ if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
+ (u8 *)&cevapriv->pp2c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
+ (u8 *)&cevapriv->pp2c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
+ return -EINVAL;
+ }
+
+ /* Read OOB timing value for COMWAKE from device-tree*/
+ if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
+ (u8 *)&cevapriv->pp3c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
+ (u8 *)&cevapriv->pp3c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
+ return -EINVAL;
+ }
+
+ /* Read phy BURST timing value from device-tree */
+ if (of_property_read_u8_array(np, "ceva,p0-burst-params",
+ (u8 *)&cevapriv->pp4c[0], 4) < 0) {
+ dev_warn(dev, "ceva,p0-burst-params property not defined\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u8_array(np, "ceva,p1-burst-params",
+ (u8 *)&cevapriv->pp4c[1], 4) < 0) {
+ dev_warn(dev, "ceva,p1-burst-params property not defined\n");
+ return -EINVAL;
+ }
+
+ /* Read phy RETRY interval timing value from device-tree */
+ if (of_property_read_u16_array(np, "ceva,p0-retry-params",
+ (u16 *)&cevapriv->pp5c[0], 2) < 0) {
+ dev_warn(dev, "ceva,p0-retry-params property not defined\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u16_array(np, "ceva,p1-retry-params",
+ (u16 *)&cevapriv->pp5c[1], 2) < 0) {
+ dev_warn(dev, "ceva,p1-retry-params property not defined\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Check if CCI is enabled for SATA. The DEV_DMA_COHERENT is returned
+ * if CCI is enabled, so check for DEV_DMA_COHERENT.
+ */
+ attr = device_get_dma_attr(dev);
+ cevapriv->is_cci_enabled = (attr == DEV_DMA_COHERENT);
+
hpriv->plat_data = cevapriv;
/* CEVA specific initialization */
@@ -206,12 +302,37 @@ disable_resources:
static int __maybe_unused ceva_ahci_suspend(struct device *dev)
{
- return ahci_platform_suspend_host(dev);
+ return ahci_platform_suspend(dev);
}
static int __maybe_unused ceva_ahci_resume(struct device *dev)
{
- return ahci_platform_resume_host(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
+
+ /* Configure CEVA specific config before resuming HBA */
+ ahci_ceva_setup(hpriv);
+
+ rc = ahci_platform_resume_host(dev);
+ if (rc)
+ goto disable_resources;
+
+ /* We resumed so update PM runtime state */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+
+ return rc;
}
static SIMPLE_DEV_PM_OPS(ahci_ceva_pm_ops, ceva_ahci_suspend, ceva_ahci_resume);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 787567e840bd..a58bcc069c54 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -230,7 +230,7 @@ static int read_adc_sum(void *dev, u16 rtune_ctl_reg, void __iomem * mmio)
{
u16 adc_out_reg, read_sum;
u32 index, read_attempt;
- const u32 attempt_limit = 100;
+ const u32 attempt_limit = 200;
imx_phy_reg_addressing(SATA_PHY_CR_CLOCK_RTUNE_CTL, mmio);
imx_phy_reg_write(rtune_ctl_reg, mmio);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 3e286d86ab42..a0de7a38430c 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -968,12 +968,12 @@ static void ahci_sw_activity(struct ata_link *link)
mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
}
-static void ahci_sw_activity_blink(unsigned long arg)
+static void ahci_sw_activity_blink(struct timer_list *t)
{
- struct ata_link *link = (struct ata_link *)arg;
+ struct ahci_em_priv *emp = from_timer(emp, t, timer);
+ struct ata_link *link = emp->link;
struct ata_port *ap = link->ap;
- struct ahci_port_priv *pp = ap->private_data;
- struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
+
unsigned long led_message = emp->led_state;
u32 activity_led_state;
unsigned long flags;
@@ -1020,7 +1020,8 @@ static void ahci_init_sw_activity(struct ata_link *link)
/* init activity stats, setup timer */
emp->saved_activity = emp->activity = 0;
- setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
+ emp->link = link;
+ timer_setup(&emp->timer, ahci_sw_activity_blink, 0);
/* check our blink policy and set flag for link if it's enabled */
if (emp->blink_policy)
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index a270a1173c8c..341d0ef82cbd 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -295,6 +295,7 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
node->name);
break;
}
+ /* fall through */
case -ENODEV:
/* continue normally */
hpriv->phys[port] = NULL;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ee4c1ec9dca0..2a882929de4a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1879,6 +1879,7 @@ retry:
switch (class) {
case ATA_DEV_SEMB:
class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
+ /* fall through */
case ATA_DEV_ATA:
case ATA_DEV_ZAC:
tf.command = ATA_CMD_ID_ATA;
@@ -2975,6 +2976,7 @@ int ata_bus_probe(struct ata_port *ap)
case -ENODEV:
/* give it just one more chance */
tries[dev->devno] = min(tries[dev->devno], 1);
+ /* fall through */
case -EIO:
if (tries[dev->devno] == 1) {
/* This is the last chance, better to slow
@@ -3462,6 +3464,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
case ATA_DNXFER_FORCE_PIO0:
pio_mask &= 1;
+ /* fall through */
case ATA_DNXFER_FORCE_PIO:
mwdma_mask = 0;
udma_mask = 0;
@@ -3964,6 +3967,7 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
scontrol &= ~(0x1 << 8);
scontrol |= (0x6 << 8);
break;
+ case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER:
if (ata_link_nr_enabled(link) > 0)
/* no restrictions on LPM transitions */
@@ -5823,7 +5827,7 @@ void ata_host_resume(struct ata_host *host)
}
#endif
-struct device_type ata_port_type = {
+const struct device_type ata_port_type = {
.name = "ata_port",
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
@@ -5979,9 +5983,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
INIT_LIST_HEAD(&ap->eh_done_q);
init_waitqueue_head(&ap->eh_wait_q);
init_completion(&ap->park_req_pending);
- setup_deferrable_timer(&ap->fastdrain_timer,
- ata_eh_fastdrain_timerfn,
- (unsigned long)ap);
+ timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
+ TIMER_DEFERRABLE);
ap->cbl = ATA_CBL_NONE;
@@ -6904,7 +6907,7 @@ static int __init ata_parse_force_one(char **cur,
return -EINVAL;
}
if (nr_matches > 1) {
- *reason = "ambigious value";
+ *reason = "ambiguous value";
return -EINVAL;
}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index e4effef0c83f..11c3137d7b0a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -879,9 +879,9 @@ static int ata_eh_nr_in_flight(struct ata_port *ap)
return nr;
}
-void ata_eh_fastdrain_timerfn(unsigned long arg)
+void ata_eh_fastdrain_timerfn(struct timer_list *t)
{
- struct ata_port *ap = (void *)arg;
+ struct ata_port *ap = from_timer(ap, t, fastdrain_timer);
unsigned long flags;
int cnt;
@@ -2264,8 +2264,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
eflags |= ATA_EFLAG_DUBIOUS_XFER;
ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
+ trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
}
- trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
DPRINTK("EXIT\n");
}
@@ -3454,9 +3454,9 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
* @r_failed_dev: out parameter for failed device
*
* Enable SATA Interface power management. This will enable
- * Device Interface Power Management (DIPM) for min_power
- * policy, and then call driver specific callbacks for
- * enabling Host Initiated Power management.
+ * Device Interface Power Management (DIPM) for min_power and
+ * medium_power_with_dipm policies, and then call driver specific
+ * callbacks for enabling Host Initiated Power management.
*
* LOCKING:
* EH context.
@@ -3502,7 +3502,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
hints &= ~ATA_LPM_HIPM;
/* disable DIPM before changing link config */
- if (policy != ATA_LPM_MIN_POWER && dipm) {
+ if (policy < ATA_LPM_MED_POWER_WITH_DIPM && dipm) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
@@ -3545,7 +3545,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
/* host config updated, enable DIPM if transitioning to MIN_POWER */
ata_for_each_dev(dev, link, ENABLED) {
- if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && !no_dipm &&
ata_id_has_dipm(dev->id)) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
@@ -3711,9 +3711,11 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
case -ENODEV:
/* device missing or wrong IDENTIFY data, schedule probing */
ehc->i.probe_mask |= (1 << dev->devno);
+ /* fall through */
case -EINVAL:
/* give it just one more chance */
ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
+ /* fall through */
case -EIO:
if (ehc->tries[dev->devno] == 1) {
/* This is the last chance, better to slow
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 44ba292f2cd7..66be961c93a4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -106,10 +106,11 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
};
static const char *ata_lpm_policy_names[] = {
- [ATA_LPM_UNKNOWN] = "max_performance",
- [ATA_LPM_MAX_POWER] = "max_performance",
- [ATA_LPM_MED_POWER] = "medium_power",
- [ATA_LPM_MIN_POWER] = "min_power",
+ [ATA_LPM_UNKNOWN] = "max_performance",
+ [ATA_LPM_MAX_POWER] = "max_performance",
+ [ATA_LPM_MED_POWER] = "medium_power",
+ [ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm",
+ [ATA_LPM_MIN_POWER] = "min_power",
};
static ssize_t ata_scsi_lpm_store(struct device *device,
@@ -2145,7 +2146,7 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
*/
static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
{
- const u8 versions[] = {
+ static const u8 versions[] = {
0x00,
0x60, /* SAM-3 (no version claimed) */
@@ -2155,7 +2156,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
0x03,
0x00 /* SPC-3 (no version claimed) */
};
- const u8 versions_zbc[] = {
+ static const u8 versions_zbc[] = {
0x00,
0xA0, /* SAM-5 (no version claimed) */
@@ -2227,7 +2228,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
{
int num_pages;
- const u8 pages[] = {
+ static const u8 pages[] = {
0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */
0x83, /* page 0x83, device ident page */
@@ -2258,7 +2259,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
*/
static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
{
- const u8 hdr[] = {
+ static const u8 hdr[] = {
0,
0x80, /* this page code */
0,
@@ -2580,7 +2581,7 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
{
struct ata_device *dev = args->dev;
u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
- const u8 sat_blk_desc[] = {
+ static const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */
0,
0, 0x2, 0x0 /* block length: 512 bytes */
diff --git a/drivers/ata/libata-transport.h b/drivers/ata/libata-transport.h
index 2820cf864f11..08a57fb9dc61 100644
--- a/drivers/ata/libata-transport.h
+++ b/drivers/ata/libata-transport.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LIBATA_TRANSPORT_H
#define _LIBATA_TRANSPORT_H
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 23a62e4015d0..de4ddd0e8550 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/libata.h>
#include <linux/cdrom.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 839d487394b7..f953cb4bb1ba 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -51,7 +51,7 @@ extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
extern int libata_allow_tpm;
-extern struct device_type ata_port_type;
+extern const struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
extern void ata_force_cbl(struct ata_port *ap);
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
@@ -154,7 +154,7 @@ extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
extern void ata_eh_acquire(struct ata_port *ap);
extern void ata_eh_release(struct ata_port *ap);
extern void ata_scsi_error(struct Scsi_Host *host);
-extern void ata_eh_fastdrain_timerfn(unsigned long arg);
+extern void ata_eh_fastdrain_timerfn(struct timer_list *t);
extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
extern void ata_dev_disable(struct ata_device *dev);
extern void ata_eh_detach_dev(struct ata_device *dev);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 96c05c9494fa..6b3355343542 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -242,7 +242,7 @@ static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- unsigned int pio = adev->pio_mode - XFER_PIO_0;
+ unsigned int pio;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 3ea50dc5ea47..3729e2448eb6 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -171,6 +171,7 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
default:
printk(KERN_WARNING "ATP867X: active %dclk is invalid. "
"Using 12clk.\n", clk);
+ /* fall through */
case 9 ... 12:
clocks = 7; /* 12 clk */
break;
@@ -203,6 +204,7 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk)
default:
printk(KERN_WARNING "ATP867X: recover %dclk is invalid. "
"Using default 12clk.\n", clk);
+ /* fall through */
case 12: /* default 12 clk */
clocks = 0;
break;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 82bfd51692f3..ffd8d33c6e0f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -84,7 +84,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
*/
static struct pdc2027x_pio_timing {
u8 value0, value1, value2;
-} pdc2027x_pio_timing_tbl [] = {
+} pdc2027x_pio_timing_tbl[] = {
{ 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
{ 0x46, 0x29, 0xa4 }, /* PIO mode 1 */
{ 0x23, 0x26, 0x64 }, /* PIO mode 2 */
@@ -94,7 +94,7 @@ static struct pdc2027x_pio_timing {
static struct pdc2027x_mdma_timing {
u8 value0, value1;
-} pdc2027x_mdma_timing_tbl [] = {
+} pdc2027x_mdma_timing_tbl[] = {
{ 0xdf, 0x5f }, /* MDMA mode 0 */
{ 0x6b, 0x27 }, /* MDMA mode 1 */
{ 0x69, 0x25 }, /* MDMA mode 2 */
@@ -102,7 +102,7 @@ static struct pdc2027x_mdma_timing {
static struct pdc2027x_udma_timing {
u8 value0, value1, value2;
-} pdc2027x_udma_timing_tbl [] = {
+} pdc2027x_udma_timing_tbl[] = {
{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
{ 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
{ 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index ce128d5a6ded..6af4ec3c88c3 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -248,6 +248,7 @@ static int sata_dwc_dma_init_old(struct platform_device *pdev,
return -ENOMEM;
hsdev->dma->dev = &pdev->dev;
+ hsdev->dma->id = pdev->id;
/* Get SATA DMA interrupt number */
hsdev->dma->irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/ata/sata_gemini.h b/drivers/ata/sata_gemini.h
index ca1837a394c8..6f6e691d6007 100644
--- a/drivers/ata/sata_gemini.h
+++ b/drivers/ata/sata_gemini.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Header for the Gemini SATA bridge */
#ifndef SATA_GEMINI_H
#define SATA_GEMINI_H
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 3b2246dded74..cc208b72b199 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2387,7 +2387,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
": attempting PIO w/multiple DRQ: "
"this may fail due to h/w errata\n");
}
- /* drop through */
+ /* fall through */
case ATA_PROT_NODATA:
case ATAPI_PROT_PIO:
case ATAPI_PROT_NODATA:
@@ -2478,20 +2478,18 @@ static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
{
- struct ata_eh_info *ehi;
unsigned int pmp;
/*
* Initialize EH info for PMPs which saw device errors
*/
- ehi = &ap->link.eh_info;
for (pmp = 0; pmp_map != 0; pmp++) {
unsigned int this_pmp = (1 << pmp);
if (pmp_map & this_pmp) {
struct ata_link *link = &ap->pmp_link[pmp];
+ struct ata_eh_info *ehi = &link->eh_info;
pmp_map &= ~this_pmp;
- ehi = &link->eh_info;
ata_ehi_clear_desc(ehi);
ata_ehi_push_desc(ehi, "dev err");
ehi->err_mask |= AC_ERR_DEV;
@@ -3877,7 +3875,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
}
- /* drop through */
+ /* fall through */
case chip_6042:
hpriv->ops = &mv6xxx_ops;
hp_flags |= MV_HP_GEN_IIE;
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 537d11869069..80ee2f2a50d0 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -872,7 +872,6 @@ MODULE_DEVICE_TABLE(of, sata_rcar_match);
static int sata_rcar_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
struct ata_host *host;
struct sata_rcar_priv *priv;
struct resource *mem;
@@ -888,11 +887,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- of_id = of_match_device(sata_rcar_match, &pdev->dev);
- if (!of_id)
- return -ENODEV;
-
- priv->type = (enum sata_rcar_type)of_id->data;
+ priv->type = (enum sata_rcar_type)of_device_get_match_data(&pdev->dev);
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "failed to get access to sata clock\n");
diff --git a/drivers/ata/sis.h b/drivers/ata/sis.h
index f7f3eebe666c..0be49691fb24 100644
--- a/drivers/ata/sis.h
+++ b/drivers/ata/sis.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
struct ata_port_info;
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 31c60101a69a..2e2efa577437 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# ATM device configuration
#
diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile
index c6c9ee9f5da2..aa191616a72e 100644
--- a/drivers/atm/Makefile
+++ b/drivers/atm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux network (ATM) device drivers.
#
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index acf16c323e38..dd286ad404f8 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -293,7 +293,7 @@ static inline void __init show_version (void) {
*/
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
/********** globals **********/
static unsigned short debug = 0;
@@ -1493,8 +1493,8 @@ static const struct atmdev_ops amb_ops = {
};
/********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
- amb_dev * dev = (amb_dev *) arg;
+static void do_housekeeping (struct timer_list *t) {
+ amb_dev * dev = from_timer(dev, t, housekeeping);
// could collect device-specific (not driver/atm-linux) stats here
@@ -2267,8 +2267,7 @@ static int amb_probe(struct pci_dev *pci_dev,
dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
- setup_timer(&dev->housekeeping, do_housekeeping,
- (unsigned long)dev);
+ timer_setup(&dev->housekeeping, do_housekeeping, 0);
mod_timer(&dev->housekeeping, jiffies);
// enable host interrupts
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h
index 565e53a5cb78..de1ed802cbf8 100644
--- a/drivers/atm/eni.h
+++ b/drivers/atm/eni.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* drivers/atm/eni.h - Efficient Networks ENI155P device driver declarations */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 6b6368a56526..d97c05690faa 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1656,9 +1656,9 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
#ifdef FS_POLL_FREQ
-static void fs_poll (unsigned long data)
+static void fs_poll (struct timer_list *t)
{
- struct fs_dev *dev = (struct fs_dev *) data;
+ struct fs_dev *dev = from_timer(dev, t, timer);
fs_irq (0, dev);
dev->timer.expires = jiffies + FS_POLL_FREQ;
@@ -1885,9 +1885,7 @@ static int fs_init(struct fs_dev *dev)
}
#ifdef FS_POLL_FREQ
- init_timer (&dev->timer);
- dev->timer.data = (unsigned long) dev;
- dev->timer.function = fs_poll;
+ timer_setup(&dev->timer, fs_poll, 0);
dev->timer.expires = jiffies + FS_POLL_FREQ;
add_timer (&dev->timer);
#endif
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f8b7e86907cc..126855e6cb7d 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -358,26 +358,33 @@ fore200e_shutdown(struct fore200e* fore200e)
case FORE200E_STATE_COMPLETE:
kfree(fore200e->stats);
+ /* fall through */
case FORE200E_STATE_IRQ:
free_irq(fore200e->irq, fore200e->atm_dev);
+ /* fall through */
case FORE200E_STATE_ALLOC_BUF:
fore200e_free_rx_buf(fore200e);
+ /* fall through */
case FORE200E_STATE_INIT_BSQ:
fore200e_uninit_bs_queue(fore200e);
+ /* fall through */
case FORE200E_STATE_INIT_RXQ:
fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
+ /* fall through */
case FORE200E_STATE_INIT_TXQ:
fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
+ /* fall through */
case FORE200E_STATE_INIT_CMDQ:
fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
+ /* fall through */
case FORE200E_STATE_INITIALIZE:
/* nothing to do for that state */
@@ -390,6 +397,7 @@ fore200e_shutdown(struct fore200e* fore200e)
case FORE200E_STATE_MAP:
fore200e->bus->unmap(fore200e);
+ /* fall through */
case FORE200E_STATE_CONFIGURE:
/* nothing to do for that state */
diff --git a/drivers/atm/fore200e.h b/drivers/atm/fore200e.h
index ba34a02b717d..c8a02c8fba15 100644
--- a/drivers/atm/fore200e.h
+++ b/drivers/atm/fore200e.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FORE200E_H
#define _FORE200E_H
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 7e76b35f422c..5ddc203206b8 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -357,7 +357,7 @@ static inline void __init show_version (void) {
/********** globals **********/
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
static unsigned short debug = 0;
static unsigned short vpi_bits = 0;
@@ -1418,9 +1418,9 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
/********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
+static void do_housekeeping (struct timer_list *t) {
// just stats at the moment
- hrz_dev * dev = (hrz_dev *) arg;
+ hrz_dev * dev = from_timer(dev, t, housekeeping);
// collect device-specific (not driver/atm-linux) stats here
dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
@@ -2796,14 +2796,14 @@ static int hrz_probe(struct pci_dev *pci_dev,
dev->atm_dev->ci_range.vpi_bits = vpi_bits;
dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
- setup_timer(&dev->housekeeping, do_housekeeping, (unsigned long) dev);
+ timer_setup(&dev->housekeeping, do_housekeeping, 0);
mod_timer(&dev->housekeeping, jiffies);
out:
return err;
out_free_irq:
- free_irq(dev->irq, dev);
+ free_irq(irq, dev);
out_free:
kfree(dev);
out_release:
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 082aa02abc57..0a67487c0b1d 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -45,12 +45,12 @@ static DEFINE_SPINLOCK(idt77105_priv_lock);
#define PUT(val,reg) dev->ops->phy_put(dev,val,IDT77105_##reg)
#define GET(reg) dev->ops->phy_get(dev,IDT77105_##reg)
-static void idt77105_stats_timer_func(unsigned long);
-static void idt77105_restart_timer_func(unsigned long);
+static void idt77105_stats_timer_func(struct timer_list *);
+static void idt77105_restart_timer_func(struct timer_list *);
-static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func, 0, 0);
-static DEFINE_TIMER(restart_timer, idt77105_restart_timer_func, 0, 0);
+static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func);
+static DEFINE_TIMER(restart_timer, idt77105_restart_timer_func);
static int start_timer = 1;
static struct idt77105_priv *idt77105_all = NULL;
@@ -80,7 +80,7 @@ static u16 get_counter(struct atm_dev *dev, int counter)
* a separate copy of the stats allows implementation of
* an ioctl which gathers the stats *without* zero'ing them.
*/
-static void idt77105_stats_timer_func(unsigned long dummy)
+static void idt77105_stats_timer_func(struct timer_list *unused)
{
struct idt77105_priv *walk;
struct atm_dev *dev;
@@ -109,7 +109,7 @@ static void idt77105_stats_timer_func(unsigned long dummy)
* interrupts need to be disabled when the cable is pulled out
* to avoid lots of spurious cell error interrupts.
*/
-static void idt77105_restart_timer_func(unsigned long dummy)
+static void idt77105_restart_timer_func(struct timer_list *unused)
{
struct idt77105_priv *walk;
struct atm_dev *dev;
@@ -306,11 +306,9 @@ static int idt77105_start(struct atm_dev *dev)
if (start_timer) {
start_timer = 0;
- setup_timer(&stats_timer, idt77105_stats_timer_func, 0UL);
stats_timer.expires = jiffies+IDT77105_STATS_TIMER_PERIOD;
add_timer(&stats_timer);
- setup_timer(&restart_timer, idt77105_restart_timer_func, 0UL);
restart_timer.expires = jiffies+IDT77105_RESTART_TIMER_PERIOD;
add_timer(&restart_timer);
}
diff --git a/drivers/atm/idt77105.h b/drivers/atm/idt77105.h
index 3fd2bc899761..8dfea9e361de 100644
--- a/drivers/atm/idt77105.h
+++ b/drivers/atm/idt77105.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* drivers/atm/idt77105.h - IDT77105 (PHY) declarations */
/* Written 1999 by Greg Banks, NEC Australia <gnb@linuxfan.com>. Based on suni.h */
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 47f3c4ae0594..0277f36be85b 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1528,9 +1528,9 @@ idt77252_tx(struct idt77252_dev *card)
static void
-tst_timer(unsigned long data)
+tst_timer(struct timer_list *t)
{
- struct idt77252_dev *card = (struct idt77252_dev *)data;
+ struct idt77252_dev *card = from_timer(card, t, tst_timer);
unsigned long base, idle, jump;
unsigned long flags;
u32 pc;
@@ -2073,21 +2073,19 @@ idt77252_rate_logindex(struct idt77252_dev *card, int pcr)
}
static void
-idt77252_est_timer(unsigned long data)
+idt77252_est_timer(struct timer_list *t)
{
- struct vc_map *vc = (struct vc_map *)data;
+ struct rate_estimator *est = from_timer(est, t, timer);
+ struct vc_map *vc = est->vc;
struct idt77252_dev *card = vc->card;
- struct rate_estimator *est;
unsigned long flags;
u32 rate, cps;
u64 ncells;
u8 lacr;
spin_lock_irqsave(&vc->lock, flags);
- est = vc->estimator;
- if (!est)
+ if (!vc->estimator)
goto out;
-
ncells = est->cells;
rate = ((u32)(ncells - est->last_cells)) << (7 - est->interval);
@@ -2126,10 +2124,11 @@ idt77252_init_est(struct vc_map *vc, int pcr)
est->maxcps = pcr < 0 ? -pcr : pcr;
est->cps = est->maxcps;
est->avcps = est->cps << 5;
+ est->vc = vc;
est->interval = 2; /* XXX: make this configurable */
est->ewma_log = 2; /* XXX: make this configurable */
- setup_timer(&est->timer, idt77252_est_timer, (unsigned long)vc);
+ timer_setup(&est->timer, idt77252_est_timer, 0);
mod_timer(&est->timer, jiffies + ((HZ / 4) << est->interval));
return est;
@@ -2209,16 +2208,20 @@ static int
idt77252_init_ubr(struct idt77252_dev *card, struct vc_map *vc,
struct atm_vcc *vcc, struct atm_qos *qos)
{
+ struct rate_estimator *est = NULL;
unsigned long flags;
int tcr;
spin_lock_irqsave(&vc->lock, flags);
if (vc->estimator) {
- del_timer(&vc->estimator->timer);
- kfree(vc->estimator);
+ est = vc->estimator;
vc->estimator = NULL;
}
spin_unlock_irqrestore(&vc->lock, flags);
+ if (est) {
+ del_timer_sync(&est->timer);
+ kfree(est);
+ }
tcr = atm_pcr_goal(&qos->txtp);
if (tcr == 0)
@@ -3631,7 +3634,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
spin_lock_init(&card->cmd_lock);
spin_lock_init(&card->tst_lock);
- setup_timer(&card->tst_timer, tst_timer, (unsigned long)card);
+ timer_setup(&card->tst_timer, tst_timer, 0);
/* Do the I/O remapping... */
card->membase = ioremap(membase, 1024);
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index 3a82cc23a053..9339197d701c 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -184,6 +184,8 @@ struct aal1 {
unsigned char sequence;
};
+struct vc_map;
+
struct rate_estimator {
struct timer_list timer;
unsigned int interval;
@@ -193,6 +195,7 @@ struct rate_estimator {
long avcps;
u32 cps;
u32 maxcps;
+ struct vc_map *vc;
};
struct vc_map {
diff --git a/drivers/atm/idt77252_tables.h b/drivers/atm/idt77252_tables.h
index b6c8ee512fb4..12b81e046a7b 100644
--- a/drivers/atm/idt77252_tables.h
+++ b/drivers/atm/idt77252_tables.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Do not edit, automatically generated by `./genrtbl'.
*
* Cell Line Rate: 353207.55 (155520000 bps)
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index fc72b763fdd7..98a3a43484c8 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -75,8 +75,8 @@ static void desc_dbg(IADEV *iadev);
static IADEV *ia_dev[8];
static struct atm_dev *_ia_dev[8];
static int iadev_count;
-static void ia_led_timer(unsigned long arg);
-static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
+static void ia_led_timer(struct timer_list *unused);
+static DEFINE_TIMER(ia_timer, ia_led_timer);
static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
@@ -880,7 +880,7 @@ static void ia_phy_write(struct iadev_priv *iadev,
static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
{
- static const struct ia_reg suni_ds3_init [] = {
+ static const struct ia_reg suni_ds3_init[] = {
{ SUNI_DS3_FRM_INTR_ENBL, 0x17 },
{ SUNI_DS3_FRM_CFG, 0x01 },
{ SUNI_DS3_TRAN_CFG, 0x01 },
@@ -898,7 +898,7 @@ static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
{
- static const struct ia_reg suni_e3_init [] = {
+ static const struct ia_reg suni_e3_init[] = {
{ SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
{ SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
{ SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
@@ -918,7 +918,7 @@ static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
static void ia_suni_pm7345_init(struct iadev_priv *iadev)
{
- static const struct ia_reg suni_init [] = {
+ static const struct ia_reg suni_init[] = {
/* Enable RSOP loss of signal interrupt. */
{ SUNI_INTR_ENBL, 0x28 },
/* Clear error counters. */
@@ -2432,7 +2432,7 @@ static void ia_update_stats(IADEV *iadev) {
return;
}
-static void ia_led_timer(unsigned long arg) {
+static void ia_led_timer(struct timer_list *unused) {
unsigned long flags;
static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
u_char i;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 2351dad78ff5..6664aa50789e 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1761,9 +1761,9 @@ static void iter_dequeue(struct lanai_dev *lanai, vci_t vci)
}
#endif /* !DEBUG_RW */
-static void lanai_timed_poll(unsigned long arg)
+static void lanai_timed_poll(struct timer_list *t)
{
- struct lanai_dev *lanai = (struct lanai_dev *) arg;
+ struct lanai_dev *lanai = from_timer(lanai, t, timer);
#ifndef DEBUG_RW
unsigned long flags;
#ifdef USE_POWERDOWN
@@ -1790,10 +1790,8 @@ static void lanai_timed_poll(unsigned long arg)
static inline void lanai_timed_poll_start(struct lanai_dev *lanai)
{
- init_timer(&lanai->timer);
+ timer_setup(&lanai->timer, lanai_timed_poll, 0);
lanai->timer.expires = jiffies + LANAI_POLL_PERIOD;
- lanai->timer.data = (unsigned long) lanai;
- lanai->timer.function = lanai_timed_poll;
add_timer(&lanai->timer);
}
diff --git a/drivers/atm/midway.h b/drivers/atm/midway.h
index d8bec0f2a71c..d47307adc0c9 100644
--- a/drivers/atm/midway.h
+++ b/drivers/atm/midway.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* drivers/atm/midway.h - Efficient Networks Midway (SAR) description */
/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index a9702836cbae..cbec9adc01c7 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -145,7 +145,7 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
#ifdef EXTRA_DEBUG
static void which_list(ns_dev * card, struct sk_buff *skb);
#endif
-static void ns_poll(unsigned long arg);
+static void ns_poll(struct timer_list *unused);
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr);
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -284,10 +284,8 @@ static int __init nicstar_init(void)
XPRINTK("nicstar: nicstar_init() returned.\n");
if (!error) {
- init_timer(&ns_timer);
+ timer_setup(&ns_timer, ns_poll, 0);
ns_timer.expires = jiffies + NS_POLL_PERIOD;
- ns_timer.data = 0UL;
- ns_timer.function = ns_poll;
add_timer(&ns_timer);
}
@@ -2681,7 +2679,7 @@ static void which_list(ns_dev * card, struct sk_buff *skb)
}
#endif /* EXTRA_DEBUG */
-static void ns_poll(unsigned long arg)
+static void ns_poll(struct timer_list *unused)
{
int i;
ns_dev *card;
diff --git a/drivers/atm/nicstar.h b/drivers/atm/nicstar.h
index 9bc27ea5088e..1b7f1dfc1735 100644
--- a/drivers/atm/nicstar.h
+++ b/drivers/atm/nicstar.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* nicstar.h
*
diff --git a/drivers/atm/nicstarmac.c b/drivers/atm/nicstarmac.c
index f594526f8c6d..e0dda9062e6b 100644
--- a/drivers/atm/nicstarmac.c
+++ b/drivers/atm/nicstarmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* this file included by nicstar.c
*/
diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c
index 9a676ee30824..1830d1b8619f 100644
--- a/drivers/atm/solos-attrlist.c
+++ b/drivers/atm/solos-attrlist.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
SOLOS_ATTR_RO(DriverVersion)
SOLOS_ATTR_RO(APIVersion)
SOLOS_ATTR_RO(FirmwareVersion)
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index b0363149b2fd..b8825f2d79e0 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -53,7 +53,7 @@ static DEFINE_SPINLOCK(sunis_lock);
if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
-static void suni_hz(unsigned long from_timer)
+static void suni_hz(struct timer_list *timer)
{
struct suni_priv *walk;
struct atm_dev *dev;
@@ -85,7 +85,7 @@ static void suni_hz(unsigned long from_timer)
((GET(TACP_TCC) & 0xff) << 8) |
((GET(TACP_TCCM) & 7) << 16));
}
- if (from_timer) mod_timer(&poll_timer,jiffies+HZ);
+ if (timer) mod_timer(&poll_timer,jiffies+HZ);
}
@@ -322,13 +322,11 @@ static int suni_start(struct atm_dev *dev)
printk(KERN_WARNING "%s(itf %d): no signal\n",dev->type,
dev->number);
PRIV(dev)->loop_mode = ATM_LM_NONE;
- suni_hz(0); /* clear SUNI counters */
+ suni_hz(NULL); /* clear SUNI counters */
(void) fetch_stats(dev,NULL,1); /* clear kernel counters */
if (first) {
- init_timer(&poll_timer);
+ timer_setup(&poll_timer, suni_hz, 0);
poll_timer.expires = jiffies+HZ;
- poll_timer.function = suni_hz;
- poll_timer.data = 1;
#if 0
printk(KERN_DEBUG "[u] p=0x%lx,n=0x%lx\n",(unsigned long) poll_timer.list.prev,
(unsigned long) poll_timer.list.next);
diff --git a/drivers/atm/suni.h b/drivers/atm/suni.h
index 7e3e656b3993..d28a50d47d8b 100644
--- a/drivers/atm/suni.h
+++ b/drivers/atm/suni.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* drivers/atm/suni.h - S/UNI PHY driver
*/
diff --git a/drivers/atm/tonga.h b/drivers/atm/tonga.h
index 672da96243ca..771b3f95246c 100644
--- a/drivers/atm/tonga.h
+++ b/drivers/atm/tonga.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* drivers/atm/tonga.h - Efficient Networks Tonga (PCI bridge) declarations */
/* Written 1995 by Werner Almesberger, EPFL LRC */
diff --git a/drivers/atm/uPD98401.h b/drivers/atm/uPD98401.h
index 0ab36503a4b7..f766a5ef0c5d 100644
--- a/drivers/atm/uPD98401.h
+++ b/drivers/atm/uPD98401.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* drivers/atm/uPD98401.h - NEC uPD98401 (SAR) declarations */
/* Written 1995 by Werner Almesberger, EPFL LRC */
diff --git a/drivers/atm/uPD98402.h b/drivers/atm/uPD98402.h
index c947214db7e3..437cfaa20c96 100644
--- a/drivers/atm/uPD98402.h
+++ b/drivers/atm/uPD98402.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* drivers/atm/uPD98402.h - NEC uPD98402 (PHY) declarations */
/* Written 1995 by Werner Almesberger, EPFL LRC */
diff --git a/drivers/atm/zatm.h b/drivers/atm/zatm.h
index ae9165ce15a0..8204369fe825 100644
--- a/drivers/atm/zatm.h
+++ b/drivers/atm/zatm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* drivers/atm/zatm.h - ZeitNet ZN122x device driver declarations */
/* Written 1995-1998 by Werner Almesberger, EPFL LRC/ICA */
diff --git a/drivers/atm/zeprom.h b/drivers/atm/zeprom.h
index 019bb82490e9..88e01f808a86 100644
--- a/drivers/atm/zeprom.h
+++ b/drivers/atm/zeprom.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* drivers/atm/zeprom.h - ZeitNet ZN122x EEPROM (NM93C46) declarations */
/* Written 1995,1996 by Werner Almesberger, EPFL LRC */
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 9ae6681c90ad..2c2ed9cf8796 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
@@ -135,6 +136,7 @@ config CFAG12864B_RATE
config IMG_ASCII_LCD
tristate "Imagination Technologies ASCII LCD Display"
+ depends on HAS_IOMEM
default y if MIPS_MALTA || MIPS_SEAD3
select SYSCON
help
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 2b8af3dc5e42..7ac6776ca3f6 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel auxiliary displays device drivers.
#
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index 25306fa27251..db040b378224 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2016 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
+ * Author: Paul Burton <paul.burton@mips.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -229,9 +229,9 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches);
* Scroll the current message along the LCD by one character, rearming the
* timer if required.
*/
-static void img_ascii_lcd_scroll(unsigned long arg)
+static void img_ascii_lcd_scroll(struct timer_list *t)
{
- struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg;
+ struct img_ascii_lcd_ctx *ctx = from_timer(ctx, t, timer);
unsigned int i, ch = ctx->scroll_pos;
unsigned int num_chars = ctx->cfg->num_chars;
@@ -299,7 +299,7 @@ static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx,
ctx->scroll_pos = 0;
/* update the LCD */
- img_ascii_lcd_scroll((unsigned long)ctx);
+ img_ascii_lcd_scroll(&ctx->timer);
return 0;
}
@@ -395,9 +395,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
ctx->scroll_rate = HZ / 2;
/* initialise a timer for scrolling the message */
- init_timer(&ctx->timer);
- ctx->timer.function = img_ascii_lcd_scroll;
- ctx->timer.data = (unsigned long)ctx;
+ timer_setup(&ctx->timer, img_ascii_lcd_scroll, 0);
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 6911acd896d9..ea7869c0d7f9 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1396,7 +1396,7 @@ static void panel_process_inputs(void)
}
}
-static void panel_scan_timer(void)
+static void panel_scan_timer(struct timer_list *unused)
{
if (keypad.enabled && keypad_initialized) {
if (spin_trylock_irq(&pprt_lock)) {
@@ -1421,7 +1421,7 @@ static void init_scan_timer(void)
if (scan_timer.function)
return; /* already started */
- setup_timer(&scan_timer, (void *)&panel_scan_timer, 0);
+ timer_setup(&scan_timer, panel_scan_timer, 0);
scan_timer.expires = jiffies + INPUT_POLL_TIME;
add_timer(&scan_timer);
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 1a5f6a157a57..2f6614c9a229 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "Generic Driver Options"
config UEVENT_HELPER
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 397e5c344e6a..e32a52490051 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the Linux device tree
obj-y := component.o core.o bus.o dd.o syscore.o \
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 6df7d6676a48..4de87b0b53c8 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -22,14 +22,23 @@
#include <linux/string.h>
#include <linux/sched/topology.h>
-static DEFINE_MUTEX(cpu_scale_mutex);
-static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
-unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
+void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq)
{
- return per_cpu(cpu_scale, cpu);
+ unsigned long scale;
+ int i;
+
+ scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
+
+ for_each_cpu(i, cpus)
+ per_cpu(freq_scale, i) = scale;
}
+static DEFINE_MUTEX(cpu_scale_mutex);
+DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
per_cpu(cpu_scale, cpu) = capacity;
@@ -96,7 +105,7 @@ subsys_initcall(register_cpu_capacity_sysctl);
static u32 capacity_scale;
static u32 *raw_capacity;
-static int __init free_raw_capacity(void)
+static int free_raw_capacity(void)
{
kfree(raw_capacity);
raw_capacity = NULL;
@@ -212,6 +221,8 @@ static struct notifier_block init_cpu_capacity_notifier __initdata = {
static int __init register_cpufreq_notifier(void)
{
+ int ret;
+
/*
* on ACPI-based systems we need to use the default cpu capacity
* until we have the necessary code to parse the cpu capacity, so
@@ -227,8 +238,13 @@ static int __init register_cpufreq_notifier(void)
cpumask_copy(cpus_to_visit, cpu_possible_mask);
- return cpufreq_register_notifier(&init_cpu_capacity_notifier,
- CPUFREQ_POLICY_NOTIFIER);
+ ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ if (ret)
+ free_cpumask_var(cpus_to_visit);
+
+ return ret;
}
core_initcall(register_cpufreq_notifier);
@@ -236,6 +252,7 @@ static void __init parsing_done_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
CPUFREQ_POLICY_NOTIFIER);
+ free_cpumask_var(cpus_to_visit);
}
#else
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 539432a14b5c..d800de650fa5 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/notifier.h>
/**
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 12ebd055724c..110230d86527 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev)
* so be careful about accessing it. dev->bus and dev->class should
* never change once they are set, so they don't need special care.
*/
- drv = ACCESS_ONCE(dev->driver);
+ drv = READ_ONCE(dev->driver);
return drv ? drv->name :
(dev->bus ? dev->bus->name :
(dev->class ? dev->class->name : ""));
@@ -1571,7 +1571,7 @@ static int device_add_class_symlinks(struct device *dev)
int error;
if (of_node) {
- error = sysfs_create_link(&dev->kobj, &of_node->kobj,"of_node");
+ error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
if (error)
dev_warn(dev, "Error %d creating of_node link\n",error);
/* An error here doesn't warrant bringing down the device */
@@ -1958,7 +1958,6 @@ void device_del(struct device *dev)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
- device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
@@ -1986,6 +1985,7 @@ void device_del(struct device *dev)
device_pm_remove(dev);
driver_deferred_probe_del(dev);
device_remove_properties(dev);
+ device_links_purge(dev);
/* Notify the platform of the removal, in case they
* need to do anything...
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 227bac5f1191..58a9b608d821 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -18,6 +18,7 @@
#include <linux/cpufeature.h>
#include <linux/tick.h>
#include <linux/pm_qos.h>
+#include <linux/sched/isolation.h>
#include "base.h"
@@ -271,8 +272,16 @@ static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
int n = 0, len = PAGE_SIZE-2;
+ cpumask_var_t isolated;
- n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(cpu_isolated_map));
+ if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_andnot(isolated, cpu_possible_mask,
+ housekeeping_cpumask(HK_FLAG_DOMAIN));
+ n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated));
+
+ free_cpumask_var(isolated);
return n;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index ad44b40fe284..2c964f56dafe 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -350,6 +350,15 @@ EXPORT_SYMBOL_GPL(device_bind_driver);
static atomic_t probe_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+static void driver_deferred_probe_add_trigger(struct device *dev,
+ int local_trigger_count)
+{
+ driver_deferred_probe_add(dev);
+ /* Did a trigger occur while probing? Need to re-trigger if yes */
+ if (local_trigger_count != atomic_read(&deferred_trigger_count))
+ driver_deferred_probe_trigger();
+}
+
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = -EPROBE_DEFER;
@@ -369,6 +378,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
}
ret = device_links_check_suppliers(dev);
+ if (ret == -EPROBE_DEFER)
+ driver_deferred_probe_add_trigger(dev, local_trigger_count);
if (ret)
return ret;
@@ -464,15 +475,13 @@ pinctrl_bind_failed:
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
+ dev_pm_set_driver_flags(dev, 0);
switch (ret) {
case -EPROBE_DEFER:
/* Driver requested deferred probing */
dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
- driver_deferred_probe_add(dev);
- /* Did a trigger occur while probing? Need to re-trigger if yes */
- if (local_trigger_count != atomic_read(&deferred_trigger_count))
- driver_deferred_probe_trigger();
+ driver_deferred_probe_add_trigger(dev, local_trigger_count);
break;
case -ENODEV:
case -ENXIO:
@@ -869,6 +878,7 @@ static void __device_release_driver(struct device *dev, struct device *parent)
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
+ dev_pm_set_driver_flags(dev, 0);
klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index d2fb9c8ed205..50025d7959cb 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* devtmpfs - kernel-maintained tmpfs-based /dev
*
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 744f64f43454..1e6396bb807b 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Coherent per-device memory handling.
* Borrowed from i386
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 4e3b61cda520..1d60b58a8c19 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Memory subsystem support
*
diff --git a/drivers/base/node.c b/drivers/base/node.c
index aae2402f3791..ee090ab9171c 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Basic Node interface support
*/
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9045c5f3734e..c203fb90c1a0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1143,6 +1143,7 @@ struct bus_type platform_bus_type = {
.match = platform_match,
.uevent = platform_uevent,
.pm = &platform_dev_pm_ops,
+ .force_dma = true,
};
EXPORT_SYMBOL_GPL(platform_bus_type);
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 5998c53280f5..e1bb691cf8f1 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,7 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
-obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e8ca5e2cf1e5..0c80bea05bcb 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -124,6 +124,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
+#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -237,6 +238,95 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
+/**
+ * dev_pm_genpd_set_performance_state- Set performance state of device's power
+ * domain.
+ *
+ * @dev: Device for which the performance-state needs to be set.
+ * @state: Target performance state of the device. This can be set as 0 when the
+ * device doesn't have any performance state constraints left (And so
+ * the device wouldn't participate anymore to find the target
+ * performance state of the genpd).
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data, *pd_data;
+ struct pm_domain_data *pdd;
+ unsigned int prev;
+ int ret = 0;
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -ENODEV;
+
+ if (unlikely(!genpd->set_performance_state))
+ return -EINVAL;
+
+ if (unlikely(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ genpd_lock(genpd);
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ prev = gpd_data->performance_state;
+ gpd_data->performance_state = state;
+
+ /* New requested state is same as Max requested state */
+ if (state == genpd->performance_state)
+ goto unlock;
+
+ /* New requested state is higher than Max requested state */
+ if (state > genpd->performance_state)
+ goto update_state;
+
+ /* Traverse all devices within the domain */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ pd_data = to_gpd_data(pdd);
+
+ if (pd_data->performance_state > state)
+ state = pd_data->performance_state;
+ }
+
+ if (state == genpd->performance_state)
+ goto unlock;
+
+ /*
+ * We aren't propagating performance state changes of a subdomain to its
+ * masters as we don't have hardware that needs it. Over that, the
+ * performance states of subdomain and its masters may not have
+ * one-to-one mapping and would require additional information. We can
+ * get back to this once we have hardware that needs it. For that
+ * reason, we don't have to consider performance state of the subdomains
+ * of genpd here.
+ */
+
+update_state:
+ if (genpd_status_on(genpd)) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret) {
+ gpd_data->performance_state = prev;
+ goto unlock;
+ }
+ }
+
+ genpd->performance_state = state;
+
+unlock:
+ genpd_unlock(genpd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -256,6 +346,15 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+
+ if (unlikely(genpd->set_performance_state)) {
+ ret = genpd->set_performance_state(genpd, genpd->performance_state);
+ if (ret) {
+ pr_warn("%s: Failed to set performance state %d (%d)\n",
+ genpd->name, genpd->performance_state, ret);
+ }
+ }
+
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
@@ -346,9 +445,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
enum pm_qos_flags_status stat;
- stat = dev_pm_qos_flags(pdd->dev,
- PM_QOS_FLAG_NO_POWER_OFF
- | PM_QOS_FLAG_REMOTE_WAKEUP);
+ stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
if (stat > PM_QOS_FLAGS_NONE)
return -EBUSY;
@@ -749,11 +846,7 @@ late_initcall(genpd_power_off_unused);
#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
-/**
- * pm_genpd_present - Check if the given PM domain has been initialized.
- * @genpd: PM domain to check.
- */
-static bool pm_genpd_present(const struct generic_pm_domain *genpd)
+static bool genpd_present(const struct generic_pm_domain *genpd)
{
const struct generic_pm_domain *gpd;
@@ -771,12 +864,6 @@ static bool pm_genpd_present(const struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_SLEEP
-static bool genpd_dev_active_wakeup(const struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
-}
-
/**
* genpd_sync_power_off - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -863,7 +950,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
* @genpd: PM domain the device belongs to.
*
* There are two cases in which a device that can wake up the system from sleep
- * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * states should be resumed by genpd_prepare(): (1) if the device is enabled
* to wake up the system and it has to remain active for this purpose while the
* system is in the sleep state and (2) if the device is not enabled to wake up
* the system from sleep states and it generally doesn't generate wakeup signals
@@ -881,12 +968,12 @@ static bool resume_needed(struct device *dev,
if (!device_can_wakeup(dev))
return false;
- active_wakeup = genpd_dev_active_wakeup(genpd, dev);
+ active_wakeup = genpd_is_active_wakeup(genpd);
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}
/**
- * pm_genpd_prepare - Start power transition of a device in a PM domain.
+ * genpd_prepare - Start power transition of a device in a PM domain.
* @dev: Device to start the transition of.
*
* Start a power transition of a device (during a system-wide power transition)
@@ -894,7 +981,7 @@ static bool resume_needed(struct device *dev,
* an object of type struct generic_pm_domain representing a PM domain
* consisting of I/O devices.
*/
-static int pm_genpd_prepare(struct device *dev)
+static int genpd_prepare(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret;
@@ -921,7 +1008,7 @@ static int pm_genpd_prepare(struct device *dev)
genpd_unlock(genpd);
ret = pm_generic_prepare(dev);
- if (ret) {
+ if (ret < 0) {
genpd_lock(genpd);
genpd->prepared_count--;
@@ -929,7 +1016,8 @@ static int pm_genpd_prepare(struct device *dev)
genpd_unlock(genpd);
}
- return ret;
+ /* Never return 1, as genpd don't cope with the direct_complete path. */
+ return ret >= 0 ? 0 : ret;
}
/**
@@ -950,7 +1038,7 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
return 0;
if (poweroff)
@@ -975,13 +1063,13 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
}
/**
- * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
* @dev: Device to suspend.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
-static int pm_genpd_suspend_noirq(struct device *dev)
+static int genpd_suspend_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
@@ -989,12 +1077,12 @@ static int pm_genpd_suspend_noirq(struct device *dev)
}
/**
- * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
+ * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Restore power to the device's PM domain, if necessary, and start the device.
*/
-static int pm_genpd_resume_noirq(struct device *dev)
+static int genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -1005,7 +1093,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
return 0;
genpd_lock(genpd);
@@ -1024,7 +1112,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
}
/**
- * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
+ * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
@@ -1032,7 +1120,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
-static int pm_genpd_freeze_noirq(struct device *dev)
+static int genpd_freeze_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
@@ -1054,13 +1142,13 @@ static int pm_genpd_freeze_noirq(struct device *dev)
}
/**
- * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
+ * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Start the device, unless power has been removed from the domain already
* before the system transition.
*/
-static int pm_genpd_thaw_noirq(struct device *dev)
+static int genpd_thaw_noirq(struct device *dev)
{
const struct generic_pm_domain *genpd;
int ret = 0;
@@ -1081,14 +1169,14 @@ static int pm_genpd_thaw_noirq(struct device *dev)
}
/**
- * pm_genpd_poweroff_noirq - Completion of hibernation of device in an
+ * genpd_poweroff_noirq - Completion of hibernation of device in an
* I/O PM domain.
* @dev: Device to poweroff.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
-static int pm_genpd_poweroff_noirq(struct device *dev)
+static int genpd_poweroff_noirq(struct device *dev)
{
dev_dbg(dev, "%s()\n", __func__);
@@ -1096,13 +1184,13 @@ static int pm_genpd_poweroff_noirq(struct device *dev)
}
/**
- * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
+ * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
* @dev: Device to resume.
*
* Make sure the domain will be in the same power state as before the
* hibernation the system is resuming from and start the device if necessary.
*/
-static int pm_genpd_restore_noirq(struct device *dev)
+static int genpd_restore_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -1139,7 +1227,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
}
/**
- * pm_genpd_complete - Complete power transition of a device in a power domain.
+ * genpd_complete - Complete power transition of a device in a power domain.
* @dev: Device to complete the transition of.
*
* Complete a power transition of a device (during a system-wide power
@@ -1147,7 +1235,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
* domain member of an object of type struct generic_pm_domain representing
* a power domain consisting of I/O devices.
*/
-static void pm_genpd_complete(struct device *dev)
+static void genpd_complete(struct device *dev)
{
struct generic_pm_domain *genpd;
@@ -1180,7 +1268,7 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
struct generic_pm_domain *genpd;
genpd = dev_to_genpd(dev);
- if (!pm_genpd_present(genpd))
+ if (!genpd_present(genpd))
return;
if (suspend) {
@@ -1206,14 +1294,14 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#else /* !CONFIG_PM_SLEEP */
-#define pm_genpd_prepare NULL
-#define pm_genpd_suspend_noirq NULL
-#define pm_genpd_resume_noirq NULL
-#define pm_genpd_freeze_noirq NULL
-#define pm_genpd_thaw_noirq NULL
-#define pm_genpd_poweroff_noirq NULL
-#define pm_genpd_restore_noirq NULL
-#define pm_genpd_complete NULL
+#define genpd_prepare NULL
+#define genpd_suspend_noirq NULL
+#define genpd_resume_noirq NULL
+#define genpd_freeze_noirq NULL
+#define genpd_thaw_noirq NULL
+#define genpd_poweroff_noirq NULL
+#define genpd_restore_noirq NULL
+#define genpd_complete NULL
#endif /* CONFIG_PM_SLEEP */
@@ -1239,7 +1327,7 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
gpd_data->base.dev = dev;
gpd_data->td.constraint_changed = true;
- gpd_data->td.effective_constraint_ns = -1;
+ gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
spin_lock_irq(&dev->power.lock);
@@ -1574,14 +1662,14 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->accounting_time = ktime_get();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
- genpd->domain.ops.prepare = pm_genpd_prepare;
- genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
- genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
- genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
- genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
- genpd->domain.ops.poweroff_noirq = pm_genpd_poweroff_noirq;
- genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.complete = pm_genpd_complete;
+ genpd->domain.ops.prepare = genpd_prepare;
+ genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
+ genpd->domain.ops.resume_noirq = genpd_resume_noirq;
+ genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
+ genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
+ genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
+ genpd->domain.ops.restore_noirq = genpd_restore_noirq;
+ genpd->domain.ops.complete = genpd_complete;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
@@ -1795,7 +1883,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
mutex_lock(&gpd_list_lock);
- if (pm_genpd_present(genpd)) {
+ if (genpd_present(genpd)) {
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (!ret) {
genpd->provider = &np->fwnode;
@@ -1831,7 +1919,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
for (i = 0; i < data->num_domains; i++) {
if (!data->domains[i])
continue;
- if (!pm_genpd_present(data->domains[i]))
+ if (!genpd_present(data->domains[i]))
goto error;
data->domains[i]->provider = &np->fwnode;
@@ -2274,7 +2362,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/kobject.h>
-static struct dentry *pm_genpd_debugfs_dir;
+static struct dentry *genpd_debugfs_dir;
/*
* TODO: This function is a slightly modified version of rtpm_status_show
@@ -2302,8 +2390,8 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev)
seq_puts(s, p);
}
-static int pm_genpd_summary_one(struct seq_file *s,
- struct generic_pm_domain *genpd)
+static int genpd_summary_one(struct seq_file *s,
+ struct generic_pm_domain *genpd)
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
@@ -2373,7 +2461,7 @@ static int genpd_summary_show(struct seq_file *s, void *data)
return -ERESTARTSYS;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- ret = pm_genpd_summary_one(s, genpd);
+ ret = genpd_summary_one(s, genpd);
if (ret)
break;
}
@@ -2559,23 +2647,23 @@ define_genpd_debugfs_fops(active_time);
define_genpd_debugfs_fops(total_idle_time);
define_genpd_debugfs_fops(devices);
-static int __init pm_genpd_debug_init(void)
+static int __init genpd_debug_init(void)
{
struct dentry *d;
struct generic_pm_domain *genpd;
- pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
+ genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
- if (!pm_genpd_debugfs_dir)
+ if (!genpd_debugfs_dir)
return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
+ genpd_debugfs_dir, NULL, &genpd_summary_fops);
if (!d)
return -ENOMEM;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
+ d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
if (!d)
return -ENOMEM;
@@ -2595,11 +2683,11 @@ static int __init pm_genpd_debug_init(void)
return 0;
}
-late_initcall(pm_genpd_debug_init);
+late_initcall(genpd_debug_init);
-static void __exit pm_genpd_debug_exit(void)
+static void __exit genpd_debug_exit(void)
{
- debugfs_remove_recursive(pm_genpd_debugfs_dir);
+ debugfs_remove_recursive(genpd_debugfs_dir);
}
-__exitcall(pm_genpd_debug_exit);
+__exitcall(genpd_debug_exit);
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 51751cc8c9e6..99896fbf18e4 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,20 +14,29 @@
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
- s64 constraint_ns = -1;
+ s64 constraint_ns;
- if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
+ if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
+ /*
+ * Only take suspend-time QoS constraints of devices into
+ * account, because constraints updated after the device has
+ * been suspended are not guaranteed to be taken into account
+ * anyway. In order for them to take effect, the device has to
+ * be resumed and suspended again.
+ */
constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
-
- if (constraint_ns < 0)
+ } else {
+ /*
+ * The child is not in a domain and there's no info on its
+ * suspend/resume latencies, so assume them to be negligible and
+ * take its current PM QoS constraint (that's the only thing
+ * known at this point anyway).
+ */
constraint_ns = dev_pm_qos_read_value(dev);
+ constraint_ns *= NSEC_PER_USEC;
+ }
- if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
- return 0;
-
- constraint_ns *= NSEC_PER_USEC;
-
- if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0)
+ if (constraint_ns < *constraint_ns_p)
*constraint_ns_p = constraint_ns;
return 0;
@@ -55,7 +64,7 @@ static bool default_suspend_ok(struct device *dev)
}
td->constraint_changed = false;
td->cached_suspend_ok = false;
- td->effective_constraint_ns = -1;
+ td->effective_constraint_ns = 0;
constraint_ns = __dev_pm_qos_read_value(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -63,11 +72,7 @@ static bool default_suspend_ok(struct device *dev)
if (constraint_ns == 0)
return false;
- if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
- constraint_ns = -1;
- else
- constraint_ns *= NSEC_PER_USEC;
-
+ constraint_ns *= NSEC_PER_USEC;
/*
* We can walk the children without any additional locking, because
* they all have been suspended at this point and their
@@ -77,18 +82,31 @@ static bool default_suspend_ok(struct device *dev)
device_for_each_child(dev, &constraint_ns,
dev_update_qos_constraint);
- if (constraint_ns < 0) {
- /* The children have no constraints. */
- td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
+ /* "No restriction", so the device is allowed to suspend. */
+ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
td->cached_suspend_ok = true;
+ } else if (constraint_ns == 0) {
+ /*
+ * This triggers if one of the children that don't belong to a
+ * domain has a zero PM QoS constraint and it's better not to
+ * suspend then. effective_constraint_ns is zero already and
+ * cached_suspend_ok is false, so bail out.
+ */
+ return false;
} else {
- constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns;
- if (constraint_ns > 0) {
- td->effective_constraint_ns = constraint_ns;
- td->cached_suspend_ok = true;
- } else {
- td->effective_constraint_ns = 0;
- }
+ constraint_ns -= td->suspend_latency_ns +
+ td->resume_latency_ns;
+ /*
+ * effective_constraint_ns is zero already and cached_suspend_ok
+ * is false, so if the computed value is not positive, return
+ * right away.
+ */
+ if (constraint_ns <= 0)
+ return false;
+
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_suspend_ok = true;
}
/*
@@ -150,19 +168,13 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*/
td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
- /* default_suspend_ok() need not be called before us. */
- if (constraint_ns < 0)
- constraint_ns = dev_pm_qos_read_value(pdd->dev);
-
- if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
- continue;
-
- constraint_ns *= NSEC_PER_USEC;
-
/*
- * constraint_ns cannot be negative here, because the device has
- * been suspended.
+ * Zero means "no suspend at all" and this runs only when all
+ * devices in the domain are suspended, so it must be positive.
*/
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
+ continue;
+
if (constraint_ns <= off_on_time_ns)
return false;
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 07c3c4a9522d..b2ed606265a8 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -9,7 +9,6 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#include <linux/suspend.h>
#ifdef CONFIG_PM
/**
@@ -298,26 +297,4 @@ void pm_generic_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
}
-
-/**
- * pm_complete_with_resume_check - Complete a device power transition.
- * @dev: Device to handle.
- *
- * Complete a device power transition during a system-wide power transition and
- * optionally schedule a runtime resume of the device if the system resume in
- * progress has been initated by the platform firmware and the device had its
- * power.direct_complete flag set.
- */
-void pm_complete_with_resume_check(struct device *dev)
-{
- pm_generic_complete(dev);
- /*
- * If the device had been runtime-suspended before the system went into
- * the sleep state it is going out of and it has never been resumed till
- * now, resume it in case the firmware powered it up.
- */
- if (dev->power.direct_complete && pm_resume_via_firmware())
- pm_request_resume(dev);
-}
-EXPORT_SYMBOL_GPL(pm_complete_with_resume_check);
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 770b1539a083..db2f04415927 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -478,9 +478,9 @@ struct dpm_watchdog {
* There's not much we can do here to recover so panic() to
* capture a crash-dump in pstore.
*/
-static void dpm_watchdog_handler(unsigned long data)
+static void dpm_watchdog_handler(struct timer_list *t)
{
- struct dpm_watchdog *wd = (void *)data;
+ struct dpm_watchdog *wd = from_timer(wd, t, timer);
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
show_stack(wd->tsk, NULL);
@@ -500,11 +500,9 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
wd->dev = dev;
wd->tsk = current;
- init_timer_on_stack(timer);
+ timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
/* use same timeout value for both suspend and resume */
timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
- timer->function = dpm_watchdog_handler;
- timer->data = (unsigned long)wd;
add_timer(timer);
}
@@ -528,7 +526,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
/*------------------------- Resume routines -------------------------*/
/**
- * device_resume_noirq - Execute an "early resume" callback for given device.
+ * device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being resumed asynchronously.
@@ -848,16 +846,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Driver;
}
- if (dev->class) {
- if (dev->class->pm) {
- info = "class ";
- callback = pm_op(dev->class->pm, state);
- goto Driver;
- } else if (dev->class->resume) {
- info = "legacy class ";
- callback = dev->class->resume;
- goto End;
- }
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Driver;
}
if (dev->bus) {
@@ -1083,7 +1075,7 @@ static pm_message_t resume_event(pm_message_t sleep_state)
}
/**
- * device_suspend_noirq - Execute a "late suspend" callback for given device.
+ * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1243,7 +1235,7 @@ int dpm_suspend_noirq(pm_message_t state)
}
/**
- * device_suspend_late - Execute a "late suspend" callback for given device.
+ * __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1445,7 +1437,7 @@ static void dpm_clear_suppliers_direct_complete(struct device *dev)
}
/**
- * device_suspend - Execute "suspend" callbacks for given device.
+ * __device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@@ -1508,17 +1500,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto Run;
}
- if (dev->class) {
- if (dev->class->pm) {
- info = "class ";
- callback = pm_op(dev->class->pm, state);
- goto Run;
- } else if (dev->class->suspend) {
- pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_suspend(dev, state, dev->class->suspend,
- "legacy class ");
- goto End;
- }
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Run;
}
if (dev->bus) {
@@ -1665,6 +1650,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
+ WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
+ !pm_runtime_enabled(dev));
+
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
@@ -1713,7 +1701,9 @@ unlock:
* applies to suspend transitions, however.
*/
spin_lock_irq(&dev->power.lock);
- dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
+ dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
+ pm_runtime_suspended(dev) && ret > 0 &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
spin_unlock_irq(&dev->power.lock);
return 0;
}
@@ -1862,11 +1852,16 @@ void device_pm_check_callbacks(struct device *dev)
dev->power.no_pm_callbacks =
(!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
!dev->bus->suspend && !dev->bus->resume)) &&
- (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
- !dev->class->suspend && !dev->class->resume)) &&
+ (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
(!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
!dev->driver->suspend && !dev->driver->resume));
spin_unlock_irq(&dev->power.lock);
}
+
+bool dev_pm_smart_suspend_and_suspended(struct device *dev)
+{
+ return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
+ pm_runtime_status_suspended(dev);
+}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index a46e97e515c5..7beee75399d4 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/pm_qos.h>
static inline void device_pm_init_common(struct device *dev)
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 7d29286d9313..3382542b39b7 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -139,6 +139,9 @@ static int apply_constraint(struct dev_pm_qos_request *req,
switch(req->type) {
case DEV_PM_QOS_RESUME_LATENCY:
+ if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
+ value = 0;
+
ret = pm_qos_update_target(&qos->resume_latency,
&req->data.pnode, action, value);
break;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 13e015905543..027d159ac381 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
if (!dev->power.use_autosuspend)
goto out;
- autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
+ autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
goto out;
- last_busy = ACCESS_ONCE(dev->power.last_busy);
+ last_busy = READ_ONCE(dev->power.last_busy);
elapsed = jiffies - last_busy;
if (elapsed < 0)
goto out; /* jiffies has wrapped around. */
@@ -894,9 +894,9 @@ static void pm_runtime_work(struct work_struct *work)
*
* Check if the time is right and queue a suspend request.
*/
-static void pm_suspend_timer_fn(unsigned long data)
+static void pm_suspend_timer_fn(struct timer_list *t)
{
- struct device *dev = (struct device *)data;
+ struct device *dev = from_timer(dev, t, power.suspend_timer);
unsigned long flags;
unsigned long expires;
@@ -1101,29 +1101,13 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
goto out;
}
- if (dev->power.runtime_status == status)
+ if (dev->power.runtime_status == status || !parent)
goto out_set;
if (status == RPM_SUSPENDED) {
- /*
- * It is invalid to suspend a device with an active child,
- * unless it has been set to ignore its children.
- */
- if (!dev->power.ignore_children &&
- atomic_read(&dev->power.child_count)) {
- dev_err(dev, "runtime PM trying to suspend device but active child\n");
- error = -EBUSY;
- goto out;
- }
-
- if (parent) {
- atomic_add_unless(&parent->power.child_count, -1, 0);
- notify_parent = !parent->power.ignore_children;
- }
- goto out_set;
- }
-
- if (parent) {
+ atomic_add_unless(&parent->power.child_count, -1, 0);
+ notify_parent = !parent->power.ignore_children;
+ } else {
spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
/*
@@ -1307,6 +1291,13 @@ void pm_runtime_enable(struct device *dev)
else
dev_warn(dev, "Unbalanced %s!\n", __func__);
+ WARN(!dev->power.disable_depth &&
+ dev->power.runtime_status == RPM_SUSPENDED &&
+ !dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0,
+ "Enabling runtime PM for inactive device (%s) with active children\n",
+ dev_name(dev));
+
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_runtime_enable);
@@ -1499,8 +1490,7 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
- (unsigned long)dev);
+ timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
init_waitqueue_head(&dev->power.wait_queue);
}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 632077f05c57..e153e28b1857 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -326,33 +326,6 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
static DEVICE_ATTR(pm_qos_no_power_off, 0644,
pm_qos_no_power_off_show, pm_qos_no_power_off_store);
-static ssize_t pm_qos_remote_wakeup_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
- & PM_QOS_FLAG_REMOTE_WAKEUP));
-}
-
-static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
-{
- int ret;
-
- if (kstrtoint(buf, 0, &ret))
- return -EINVAL;
-
- if (ret != 0 && ret != 1)
- return -EINVAL;
-
- ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret);
- return ret < 0 ? ret : n;
-}
-
-static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
- pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
-
#ifdef CONFIG_PM_SLEEP
static const char _enabled[] = "enabled";
static const char _disabled[] = "disabled";
@@ -688,7 +661,6 @@ static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
static struct attribute *pm_qos_flags_attrs[] = {
&dev_attr_pm_qos_no_power_off.attr,
- &dev_attr_pm_qos_remote_wakeup.attr,
NULL,
};
static const struct attribute_group pm_qos_flags_attr_group = {
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index cdd6f256da59..38559f04db2c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -54,7 +54,7 @@ static unsigned int saved_count;
static DEFINE_SPINLOCK(events_lock);
-static void pm_wakeup_timer_fn(unsigned long data);
+static void pm_wakeup_timer_fn(struct timer_list *t);
static LIST_HEAD(wakeup_sources);
@@ -176,7 +176,7 @@ void wakeup_source_add(struct wakeup_source *ws)
return;
spin_lock_init(&ws->lock);
- setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
+ timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
ws->active = false;
ws->last_time = ktime_get();
@@ -481,8 +481,7 @@ static bool wakeup_source_not_registered(struct wakeup_source *ws)
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
*/
- return ws->timer.function != pm_wakeup_timer_fn ||
- ws->timer.data != (unsigned long)ws;
+ return ws->timer.function != pm_wakeup_timer_fn;
}
/*
@@ -724,9 +723,9 @@ EXPORT_SYMBOL_GPL(pm_relax);
* in @data if it is currently active and its timer has not been canceled and
* the expiration time of the timer is not in future.
*/
-static void pm_wakeup_timer_fn(unsigned long data)
+static void pm_wakeup_timer_fn(struct timer_list *t)
{
- struct wakeup_source *ws = (struct wakeup_source *)data;
+ struct wakeup_source *ws = from_timer(ws, t, timer);
unsigned long flags;
spin_lock_irqsave(&ws->lock, flags);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 7ed99c1b2a8b..851b1b6596a4 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1044,10 +1044,15 @@ EXPORT_SYMBOL_GPL(device_get_named_child_node);
/**
* fwnode_handle_get - Obtain a reference to a device node
* @fwnode: Pointer to the device node to obtain the reference to.
+ *
+ * Returns the fwnode handle.
*/
-void fwnode_handle_get(struct fwnode_handle *fwnode)
+struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
{
- fwnode_call_void_op(fwnode, get);
+ if (!fwnode_has_op(fwnode, get))
+ return fwnode;
+
+ return fwnode_call_ptr_op(fwnode, get);
}
EXPORT_SYMBOL_GPL(fwnode_handle_get);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 073c0b77e5b3..3a1535d812d8 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Generic register map support. There are no user servicable options here,
# this is an API intended to be used by other kernel subsystems. These
# subsystems should select the appropriate symbols.
@@ -5,6 +6,7 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select IRQ_DOMAIN if REGMAP_IRQ
+ select REGMAP_HWSPINLOCK if HWSPINLOCK=y
bool
config REGCACHE_COMPRESSED
@@ -36,3 +38,6 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
+
+config REGMAP_HWSPINLOCK
+ bool
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0cf4abc8fbf1..0d298c446108 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# For include/trace/define_trace.h to include trace.h
CFLAGS_regmap.o := -I$(src)
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 2a4435d76028..8641183cac2f 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -157,6 +157,8 @@ struct regmap {
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
+
+ struct hwspinlock *hwlock;
};
struct regcache_ops {
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index edd9a839d004..c7150dd264d5 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -102,7 +102,7 @@ static int regmap_spi_read(void *context,
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
-static struct regmap_bus regmap_spi = {
+static const struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
.async_write = regmap_spi_async_write,
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 4a36e415e938..0bfb8ed244d5 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -83,7 +83,7 @@ static int regmap_spmi_base_write(void *context, const void *data,
count - 1);
}
-static struct regmap_bus regmap_spmi_base = {
+static const struct regmap_bus regmap_spmi_base = {
.read = regmap_spmi_base_read,
.write = regmap_spmi_base_write,
.gather_write = regmap_spmi_base_gather_write,
@@ -203,7 +203,7 @@ static int regmap_spmi_ext_write(void *context, const void *data,
count - 2);
}
-static struct regmap_bus regmap_spmi_ext = {
+static const struct regmap_bus regmap_spmi_ext = {
.read = regmap_spmi_ext_read,
.write = regmap_spmi_ext_write,
.gather_write = regmap_spmi_ext_gather_write,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index b9a779a4a739..8d516a9bfc01 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/log2.h>
+#include <linux/hwspinlock.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -413,6 +414,51 @@ static unsigned int regmap_parse_64_native(const void *buf)
}
#endif
+#ifdef REGMAP_HWSPINLOCK
+static void regmap_lock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irqsave(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
+ &map->spinlock_flags);
+}
+
+static void regmap_unlock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irq(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irqrestore(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
+}
+#endif
+
static void regmap_lock_mutex(void *__map)
{
struct regmap *map = __map;
@@ -627,6 +673,34 @@ struct regmap *__regmap_init(struct device *dev,
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
+ } else if (config->hwlock_id) {
+#ifdef REGMAP_HWSPINLOCK
+ map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
+ if (!map->hwlock) {
+ ret = -ENXIO;
+ goto err_map;
+ }
+
+ switch (config->hwlock_mode) {
+ case HWLOCK_IRQSTATE:
+ map->lock = regmap_lock_hwlock_irqsave;
+ map->unlock = regmap_unlock_hwlock_irqrestore;
+ break;
+ case HWLOCK_IRQ:
+ map->lock = regmap_lock_hwlock_irq;
+ map->unlock = regmap_unlock_hwlock_irq;
+ break;
+ default:
+ map->lock = regmap_lock_hwlock;
+ map->unlock = regmap_unlock_hwlock;
+ break;
+ }
+
+ map->lock_arg = map;
+#else
+ ret = -EINVAL;
+ goto err_map;
+#endif
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
@@ -729,7 +803,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_2_6_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -739,7 +813,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_4_12_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -749,7 +823,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_7_9_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -759,7 +833,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_10_14_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -779,13 +853,13 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_16_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
case 24:
if (reg_endian != REGMAP_ENDIAN_BIG)
- goto err_map;
+ goto err_hwlock;
map->format.format_reg = regmap_format_24;
break;
@@ -801,7 +875,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_32_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -818,13 +892,13 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_64_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#endif
default:
- goto err_map;
+ goto err_hwlock;
}
if (val_endian == REGMAP_ENDIAN_NATIVE)
@@ -853,12 +927,12 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_16_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
case 24:
if (val_endian != REGMAP_ENDIAN_BIG)
- goto err_map;
+ goto err_hwlock;
map->format.format_val = regmap_format_24;
map->format.parse_val = regmap_parse_24;
break;
@@ -879,7 +953,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_32_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#ifdef CONFIG_64BIT
@@ -900,7 +974,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_64_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#endif
@@ -909,18 +983,18 @@ struct regmap *__regmap_init(struct device *dev,
if (map->format.format_write) {
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
(val_endian != REGMAP_ENDIAN_BIG))
- goto err_map;
+ goto err_hwlock;
map->use_single_write = true;
}
if (!map->format.format_write &&
!(map->format.format_reg && map->format.format_val))
- goto err_map;
+ goto err_hwlock;
map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
if (map->work_buf == NULL) {
ret = -ENOMEM;
- goto err_map;
+ goto err_hwlock;
}
if (map->format.format_write) {
@@ -1041,6 +1115,9 @@ err_regcache:
err_range:
regmap_range_exit(map);
kfree(map->work_buf);
+err_hwlock:
+ if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ hwspin_lock_free(map->hwlock);
err_map:
kfree(map);
err:
@@ -1228,6 +1305,8 @@ void regmap_exit(struct regmap *map)
kfree(async->work_buf);
kfree(async);
}
+ if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ hwspin_lock_free(map->hwlock);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h
index 64586a1c5a42..d4066fa079ab 100644
--- a/drivers/base/regmap/trace.h
+++ b/drivers/base/regmap/trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM regmap
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 9aa0d45a60db..86e85daa80bf 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config TEST_ASYNC_DRIVER_PROBE
tristate "Build kernel module to test asynchronous driver probing"
depends on m
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 304d5c2bd5e9..a3355d66bc12 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -64,7 +64,7 @@ static int __init test_async_probe_init(void)
NULL, 0);
if (IS_ERR(async_dev_1)) {
error = PTR_ERR(async_dev_1);
- pr_err("failed to create async_dev_1: %d", error);
+ pr_err("failed to create async_dev_1: %d\n", error);
return error;
}
@@ -91,7 +91,7 @@ static int __init test_async_probe_init(void)
NULL, 0);
if (IS_ERR(async_dev_2)) {
error = PTR_ERR(async_dev_2);
- pr_err("failed to create async_dev_2: %d", error);
+ pr_err("failed to create async_dev_2: %d\n", error);
goto err_unregister_async_driver;
}
@@ -118,7 +118,7 @@ static int __init test_async_probe_init(void)
NULL, 0);
if (IS_ERR(sync_dev_1)) {
error = PTR_ERR(sync_dev_1);
- pr_err("failed to create sync_dev_1: %d", error);
+ pr_err("failed to create sync_dev_1: %d\n", error);
goto err_unregister_sync_driver;
}
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 54f81c554815..02d78f6cecbb 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config BCMA_POSSIBLE
bool
depends on HAS_IOMEM && HAS_DMA
@@ -10,14 +11,15 @@ menuconfig BCMA
Bus driver for Broadcom specific Advanced Microcontroller Bus
Architecture.
+if BCMA
+
# Support for Block-I/O. SELECT this from the driver that needs it.
config BCMA_BLOCKIO
bool
- depends on BCMA
config BCMA_HOST_PCI_POSSIBLE
bool
- depends on BCMA && PCI = y
+ depends on PCI = y
default y
config BCMA_HOST_PCI
@@ -28,7 +30,6 @@ config BCMA_HOST_PCI
config BCMA_HOST_SOC
bool "Support for BCMA in a SoC"
- depends on BCMA
help
Host interface for a Broadcom AIX bus directly mapped into
the memory. This only works with the Broadcom SoCs from the
@@ -38,7 +39,7 @@ config BCMA_HOST_SOC
config BCMA_DRIVER_PCI
bool "BCMA Broadcom PCI core driver"
- depends on BCMA && PCI
+ depends on PCI
default y
help
BCMA bus may have many versions of PCIe core. This driver
@@ -54,13 +55,13 @@ config BCMA_DRIVER_PCI
config BCMA_DRIVER_PCI_HOSTMODE
bool "Driver for PCI core working in hostmode"
- depends on BCMA && MIPS && BCMA_DRIVER_PCI
+ depends on MIPS && BCMA_DRIVER_PCI
help
PCI core hostmode operation (external PCI bus).
config BCMA_DRIVER_MIPS
bool "BCMA Broadcom MIPS core driver"
- depends on BCMA && MIPS
+ depends on MIPS
help
Driver for the Broadcom MIPS core attached to Broadcom specific
Advanced Microcontroller Bus.
@@ -91,7 +92,6 @@ config BCMA_NFLASH
config BCMA_DRIVER_GMAC_CMN
bool "BCMA Broadcom GBIT MAC COMMON core driver"
- depends on BCMA
help
Driver for the Broadcom GBIT MAC COMMON core attached to Broadcom
specific Advanced Microcontroller Bus.
@@ -100,7 +100,7 @@ config BCMA_DRIVER_GMAC_CMN
config BCMA_DRIVER_GPIO
bool "BCMA GPIO driver"
- depends on BCMA && GPIOLIB
+ depends on GPIOLIB
select GPIOLIB_IRQCHIP if BCMA_HOST_SOC
help
Driver to provide access to the GPIO pins of the bcma bus.
@@ -109,8 +109,9 @@ config BCMA_DRIVER_GPIO
config BCMA_DEBUG
bool "BCMA debugging"
- depends on BCMA
help
This turns on additional debugging messages.
If unsure, say N
+
+endif # BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 087948a1d20d..f8c37de35da2 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
bcma-y += main.o scan.o core.o sprom.o
bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
bcma-y += driver_chipcommon_b.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 168fa175d65a..a4aac370f21f 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_BCMA_PRIVATE_H_
#define LINUX_BCMA_PRIVATE_H_
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 982d5781d3ce..2c0ffb77d738 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -113,7 +113,7 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &irqs, gc->ngpio)
- generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio));
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, gpio));
bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
return IRQ_HANDLED;
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 89af807cf29c..f040aba48d50 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -184,10 +184,14 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
{
int i;
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
- printk(KERN_DEBUG KBUILD_MODNAME ": core 0x%04x, irq :", dev->id.id);
- for (i = 0; i <= 6; i++)
- printk(" %s%s", irq_name[i], i == irq ? "*" : " ");
- printk("\n");
+ char interrupts[20];
+ char *ints = interrupts;
+
+ for (i = 0; i < ARRAY_SIZE(irq_name); i++)
+ ints += sprintf(ints, " %s%c",
+ irq_name[i], i == irq ? '*' : ' ');
+
+ bcma_debug(dev->bus, "core 0x%04x, irq:%s\n", dev->id.id, interrupts);
}
static void bcma_core_mips_dump_irq(struct bcma_bus *bus)
diff --git a/drivers/bcma/scan.h b/drivers/bcma/scan.h
index 30eb475e4d19..e53079a3daf7 100644
--- a/drivers/bcma/scan.h
+++ b/drivers/bcma/scan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BCMA_SCAN_H_
#define BCMA_SCAN_H_
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 255591ab3716..442e777bdfb2 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3079,11 +3079,10 @@ DAC960_InitializeController(DAC960_Controller_T *Controller)
/*
Initialize the Monitoring Timer.
*/
- init_timer(&Controller->MonitoringTimer);
+ timer_setup(&Controller->MonitoringTimer,
+ DAC960_MonitoringTimerFunction, 0);
Controller->MonitoringTimer.expires =
jiffies + DAC960_MonitoringTimerInterval;
- Controller->MonitoringTimer.data = (unsigned long) Controller;
- Controller->MonitoringTimer.function = DAC960_MonitoringTimerFunction;
add_timer(&Controller->MonitoringTimer);
Controller->ControllerInitialized = true;
return true;
@@ -5620,9 +5619,9 @@ static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command)
the status of DAC960 Controllers.
*/
-static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
+static void DAC960_MonitoringTimerFunction(struct timer_list *t)
{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) TimerData;
+ DAC960_Controller_T *Controller = from_timer(Controller, t, MonitoringTimer);
DAC960_Command_T *Command;
unsigned long flags;
diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
index 85fa9bb63759..6a6226a2b932 100644
--- a/drivers/block/DAC960.h
+++ b/drivers/block/DAC960.h
@@ -4406,7 +4406,7 @@ static irqreturn_t DAC960_PD_InterruptHandler(int, void *);
static irqreturn_t DAC960_P_InterruptHandler(int, void *);
static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_MonitoringTimerFunction(unsigned long);
+static void DAC960_MonitoringTimerFunction(struct timer_list *);
static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
DAC960_Controller_T *, ...);
static void DAC960_CreateProcEntries(DAC960_Controller_T *);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 2dfe99b328f8..40579d0cb3d1 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Block device driver configuration
#
@@ -67,9 +68,13 @@ config AMIGA_Z2RAM
To compile this driver as a module, choose M here: the
module will be called z2ram.
+config CDROM
+ tristate
+
config GDROM
tristate "SEGA Dreamcast GD-ROM drive"
depends on SH_DREAMCAST
+ select CDROM
select BLK_SCSI_REQUEST # only for the generic cdrom code
help
A standard SEGA Dreamcast comes with a modified CD ROM drive called a
@@ -297,7 +302,6 @@ config BLK_DEV_SX8
config BLK_DEV_RAM
tristate "RAM block device support"
- select DAX if BLK_DEV_RAM_DAX
---help---
Saying Y here will allow you to use a portion of your RAM memory as
a block device, so that you can make file systems on it, read and
@@ -333,20 +337,10 @@ config BLK_DEV_RAM_SIZE
The default value is 4096 kilobytes. Only change this if you know
what you are doing.
-config BLK_DEV_RAM_DAX
- bool "Support Direct Access (DAX) to RAM block devices"
- depends on BLK_DEV_RAM && FS_DAX
- default n
- help
- Support filesystems using DAX to access RAM block devices. This
- avoids double-buffering data in the page cache before copying it
- to the block device. Answering Y will slightly enlarge the kernel,
- and will prevent RAM block device backing store memory from being
- allocated from highmem (only a problem for highmem systems).
-
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
+ select CDROM
select BLK_SCSI_REQUEST
help
Note: This driver is deprecated and will be removed from the
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1f456d86a190..dc061158b403 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel block device drivers.
#
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 49908c74bfcb..e5aa62fcf5a8 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -146,6 +146,7 @@ static struct amiga_floppy_struct unit[FD_MAX_UNITS];
static struct timer_list flush_track_timer[FD_MAX_UNITS];
static struct timer_list post_write_timer;
+static unsigned long post_write_timer_drive;
static struct timer_list motor_on_timer;
static struct timer_list motor_off_timer[FD_MAX_UNITS];
static int on_attempts;
@@ -323,7 +324,7 @@ static void fd_deselect (int drive)
}
-static void motor_on_callback(unsigned long nr)
+static void motor_on_callback(struct timer_list *unused)
{
if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) {
complete_all(&motor_on_completion);
@@ -344,7 +345,6 @@ static int fd_motor_on(int nr)
fd_select(nr);
reinit_completion(&motor_on_completion);
- motor_on_timer.data = nr;
mod_timer(&motor_on_timer, jiffies + HZ/2);
on_attempts = 10;
@@ -356,7 +356,7 @@ static int fd_motor_on(int nr)
on_attempts = -1;
#if 0
printk (KERN_ERR "motor_on failed, turning motor off\n");
- fd_motor_off (nr);
+ fd_motor_off (motor_off_timer + nr);
return 0;
#else
printk (KERN_WARNING "DSKRDY not set after 1.5 seconds - assuming drive is spinning notwithstanding\n");
@@ -366,20 +366,17 @@ static int fd_motor_on(int nr)
return 1;
}
-static void fd_motor_off(unsigned long drive)
+static void fd_motor_off(struct timer_list *timer)
{
- long calledfromint;
-#ifdef MODULE
- long decusecount;
+ unsigned long drive = ((unsigned long)timer -
+ (unsigned long)&motor_off_timer[0]) /
+ sizeof(motor_off_timer[0]);
- decusecount = drive & 0x40000000;
-#endif
- calledfromint = drive & 0x80000000;
drive&=3;
- if (calledfromint && !try_fdc(drive)) {
+ if (!try_fdc(drive)) {
/* We would be blocked in an interrupt, so try again later */
- motor_off_timer[drive].expires = jiffies + 1;
- add_timer(motor_off_timer + drive);
+ timer->expires = jiffies + 1;
+ add_timer(timer);
return;
}
unit[drive].motor = 0;
@@ -393,8 +390,6 @@ static void floppy_off (unsigned int nr)
int drive;
drive = nr & 3;
- /* called this way it is always from interrupt */
- motor_off_timer[drive].data = nr | 0x80000000;
mod_timer(motor_off_timer + drive, jiffies + 3*HZ);
}
@@ -436,7 +431,7 @@ static int fd_calibrate(int drive)
break;
if (--n == 0) {
printk (KERN_ERR "fd%d: calibrate failed, turning motor off\n", drive);
- fd_motor_off (drive);
+ fd_motor_off (motor_off_timer + drive);
unit[drive].track = -1;
rel_fdc();
return 0;
@@ -565,7 +560,7 @@ static irqreturn_t fd_block_done(int irq, void *dummy)
if (block_flag == 2) { /* writing */
writepending = 2;
post_write_timer.expires = jiffies + 1; /* at least 2 ms */
- post_write_timer.data = selected;
+ post_write_timer_drive = selected;
add_timer(&post_write_timer);
}
else { /* reading */
@@ -652,6 +647,10 @@ static void post_write (unsigned long drive)
rel_fdc(); /* corresponds to get_fdc() in raw_write */
}
+static void post_write_callback(struct timer_list *timer)
+{
+ post_write(post_write_timer_drive);
+}
/*
* The following functions are to convert the block contents into raw data
@@ -1245,8 +1244,12 @@ static void dos_write(int disk)
/* FIXME: this assumes the drive is still spinning -
* which is only true if we complete writing a track within three seconds
*/
-static void flush_track_callback(unsigned long nr)
+static void flush_track_callback(struct timer_list *timer)
{
+ unsigned long nr = ((unsigned long)timer -
+ (unsigned long)&flush_track_timer[0]) /
+ sizeof(flush_track_timer[0]);
+
nr&=3;
writefromint = 1;
if (!try_fdc(nr)) {
@@ -1650,8 +1653,7 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
fd_ref[drive] = 0;
}
#ifdef MODULE
-/* the mod_use counter is handled this way */
- floppy_off (drive | 0x40000000);
+ floppy_off (drive);
#endif
mutex_unlock(&amiflop_mutex);
}
@@ -1792,27 +1794,19 @@ static int __init amiga_floppy_probe(struct platform_device *pdev)
floppy_find, NULL, NULL);
/* initialize variables */
- init_timer(&motor_on_timer);
+ timer_setup(&motor_on_timer, motor_on_callback, 0);
motor_on_timer.expires = 0;
- motor_on_timer.data = 0;
- motor_on_timer.function = motor_on_callback;
for (i = 0; i < FD_MAX_UNITS; i++) {
- init_timer(&motor_off_timer[i]);
+ timer_setup(&motor_off_timer[i], fd_motor_off, 0);
motor_off_timer[i].expires = 0;
- motor_off_timer[i].data = i|0x80000000;
- motor_off_timer[i].function = fd_motor_off;
- init_timer(&flush_track_timer[i]);
+ timer_setup(&flush_track_timer[i], flush_track_callback, 0);
flush_track_timer[i].expires = 0;
- flush_track_timer[i].data = i;
- flush_track_timer[i].function = flush_track_callback;
unit[i].track = -1;
}
- init_timer(&post_write_timer);
+ timer_setup(&post_write_timer, post_write_callback, 0);
post_write_timer.expires = 0;
- post_write_timer.data = 0;
- post_write_timer.function = post_write;
for (i = 0; i < 128; i++)
mfmdecode[i]=255;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index dc43254e05a4..812fed069708 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -744,7 +744,7 @@ count_targets(struct aoedev *d, int *untainted)
}
static void
-rexmit_timer(ulong vp)
+rexmit_timer(struct timer_list *timer)
{
struct aoedev *d;
struct aoetgt *t;
@@ -758,7 +758,7 @@ rexmit_timer(ulong vp)
int utgts; /* number of aoetgt descriptors (not slots) */
int since;
- d = (struct aoedev *) vp;
+ d = from_timer(d, timer, timer);
spin_lock_irqsave(&d->lock, flags);
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index b28fefb90391..697f735b07a4 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -15,7 +15,6 @@
#include <linux/string.h>
#include "aoe.h"
-static void dummy_timer(ulong);
static void freetgt(struct aoedev *d, struct aoetgt *t);
static void skbpoolfree(struct aoedev *d);
@@ -146,11 +145,11 @@ aoedev_put(struct aoedev *d)
}
static void
-dummy_timer(ulong vp)
+dummy_timer(struct timer_list *t)
{
struct aoedev *d;
- d = (struct aoedev *)vp;
+ d = from_timer(d, t, timer);
if (d->flags & DEVFL_TKILL)
return;
d->timer.expires = jiffies + HZ;
@@ -466,9 +465,7 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
skb_queue_head_init(&d->skbpool);
- init_timer(&d->timer);
- d->timer.data = (ulong) d;
- d->timer.function = dummy_timer;
+ timer_setup(&d->timer, dummy_timer, 0);
d->timer.expires = jiffies + HZ;
add_timer(&d->timer);
d->bufpool = NULL; /* defer to aoeblk_gdalloc */
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 4b987c2fefbe..251482066977 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -15,49 +15,19 @@ MODULE_AUTHOR("Sam Hopkins <sah@coraid.com>");
MODULE_DESCRIPTION("AoE block/char driver for 2.6.2 and newer 2.6 kernels");
MODULE_VERSION(VERSION);
-enum { TINIT, TRUN, TKILL };
+static struct timer_list timer;
-static void
-discover_timer(ulong vp)
+static void discover_timer(struct timer_list *t)
{
- static struct timer_list t;
- static volatile ulong die;
- static spinlock_t lock;
- ulong flags;
- enum { DTIMERTICK = HZ * 60 }; /* one minute */
-
- switch (vp) {
- case TINIT:
- init_timer(&t);
- spin_lock_init(&lock);
- t.data = TRUN;
- t.function = discover_timer;
- die = 0;
- case TRUN:
- spin_lock_irqsave(&lock, flags);
- if (!die) {
- t.expires = jiffies + DTIMERTICK;
- add_timer(&t);
- }
- spin_unlock_irqrestore(&lock, flags);
-
- aoecmd_cfg(0xffff, 0xff);
- return;
- case TKILL:
- spin_lock_irqsave(&lock, flags);
- die = 1;
- spin_unlock_irqrestore(&lock, flags);
+ mod_timer(t, jiffies + HZ * 60); /* one minute */
- del_timer_sync(&t);
- default:
- return;
- }
+ aoecmd_cfg(0xffff, 0xff);
}
static void
aoe_exit(void)
{
- discover_timer(TKILL);
+ del_timer_sync(&timer);
aoenet_exit();
unregister_blkdev(AOE_MAJOR, DEVICE_NAME);
@@ -93,7 +63,9 @@ aoe_init(void)
goto blkreg_fail;
}
printk(KERN_INFO "aoe: AoE v%s initialised.\n", VERSION);
- discover_timer(TINIT);
+
+ timer_setup(&timer, discover_timer, 0);
+ discover_timer(&timer);
return 0;
blkreg_fail:
aoecmd_exit();
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 92da886180aa..8bc3b9fd8dd2 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -342,8 +342,8 @@ static int NeedSeek = 0;
static void fd_select_side( int side );
static void fd_select_drive( int drive );
static void fd_deselect( void );
-static void fd_motor_off_timer( unsigned long dummy );
-static void check_change( unsigned long dummy );
+static void fd_motor_off_timer(struct timer_list *unused);
+static void check_change(struct timer_list *unused);
static irqreturn_t floppy_irq (int irq, void *dummy);
static void fd_error( void );
static int do_format(int drive, int type, struct atari_format_descr *desc);
@@ -353,12 +353,12 @@ static void fd_calibrate_done( int status );
static void fd_seek( void );
static void fd_seek_done( int status );
static void fd_rwsec( void );
-static void fd_readtrack_check( unsigned long dummy );
+static void fd_readtrack_check(struct timer_list *unused);
static void fd_rwsec_done( int status );
static void fd_rwsec_done1(int status);
static void fd_writetrack( void );
static void fd_writetrack_done( int status );
-static void fd_times_out( unsigned long dummy );
+static void fd_times_out(struct timer_list *unused);
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
@@ -373,10 +373,10 @@ static void floppy_release(struct gendisk *disk, fmode_t mode);
/************************* End of Prototypes **************************/
-static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer, 0, 0);
-static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
-static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
-static DEFINE_TIMER(fd_timer, check_change, 0, 0);
+static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer);
+static DEFINE_TIMER(readtrack_timer, fd_readtrack_check);
+static DEFINE_TIMER(timeout_timer, fd_times_out);
+static DEFINE_TIMER(fd_timer, check_change);
static void fd_end_request_cur(blk_status_t err)
{
@@ -479,7 +479,7 @@ static void fd_deselect( void )
* counts the index signals, which arrive only if one drive is selected.
*/
-static void fd_motor_off_timer( unsigned long dummy )
+static void fd_motor_off_timer(struct timer_list *unused)
{
unsigned char status;
@@ -515,7 +515,7 @@ static void fd_motor_off_timer( unsigned long dummy )
* as possible) and keep track of the current state of the write protection.
*/
-static void check_change( unsigned long dummy )
+static void check_change(struct timer_list *unused)
{
static int drive = 0;
@@ -966,7 +966,7 @@ static void fd_rwsec( void )
}
-static void fd_readtrack_check( unsigned long dummy )
+static void fd_readtrack_check(struct timer_list *unused)
{
unsigned long flags, addr, addr2;
@@ -1237,7 +1237,7 @@ static void fd_writetrack_done( int status )
fd_error();
}
-static void fd_times_out( unsigned long dummy )
+static void fd_times_out(struct timer_list *unused)
{
atari_disable_irq( IRQ_MFP_FDC );
if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2d7178f7754e..8028a3a7e7fd 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -20,11 +20,7 @@
#include <linux/radix-tree.h>
#include <linux/fs.h>
#include <linux/slab.h>
-#ifdef CONFIG_BLK_DEV_RAM_DAX
-#include <linux/pfn_t.h>
-#include <linux/dax.h>
-#include <linux/uio.h>
-#endif
+#include <linux/backing-dev.h>
#include <linux/uaccess.h>
@@ -44,9 +40,6 @@ struct brd_device {
struct request_queue *brd_queue;
struct gendisk *brd_disk;
-#ifdef CONFIG_BLK_DEV_RAM_DAX
- struct dax_device *dax_dev;
-#endif
struct list_head brd_list;
/*
@@ -60,7 +53,6 @@ struct brd_device {
/*
* Look up and return a brd's page for a given sector.
*/
-static DEFINE_MUTEX(brd_mutex);
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
pgoff_t idx;
@@ -112,9 +104,6 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
* restriction might be able to be lifted.
*/
gfp_flags = GFP_NOIO | __GFP_ZERO;
-#ifndef CONFIG_BLK_DEV_RAM_DAX
- gfp_flags |= __GFP_HIGHMEM;
-#endif
page = alloc_page(gfp_flags);
if (!page)
return NULL;
@@ -334,43 +323,6 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
return err;
}
-#ifdef CONFIG_BLK_DEV_RAM_DAX
-static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
-{
- struct page *page;
-
- if (!brd)
- return -ENODEV;
- page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
- if (!page)
- return -ENOSPC;
- *kaddr = page_address(page);
- *pfn = page_to_pfn_t(page);
-
- return 1;
-}
-
-static long brd_dax_direct_access(struct dax_device *dax_dev,
- pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
-{
- struct brd_device *brd = dax_get_private(dax_dev);
-
- return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn);
-}
-
-static size_t brd_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- return copy_from_iter(addr, bytes, i);
-}
-
-static const struct dax_operations brd_dax_ops = {
- .direct_access = brd_dax_direct_access,
- .copy_from_iter = brd_dax_copy_from_iter,
-};
-#endif
-
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.rw_page = brd_rw_page,
@@ -449,22 +401,10 @@ static struct brd_device *brd_alloc(int i)
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2);
-
-#ifdef CONFIG_BLK_DEV_RAM_DAX
- queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
- brd->dax_dev = alloc_dax(brd, disk->disk_name, &brd_dax_ops);
- if (!brd->dax_dev)
- goto out_free_inode;
-#endif
-
+ disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
return brd;
-#ifdef CONFIG_BLK_DEV_RAM_DAX
-out_free_inode:
- kill_dax(brd->dax_dev);
- put_dax(brd->dax_dev);
-#endif
out_free_queue:
blk_cleanup_queue(brd->brd_queue);
out_free_dev:
@@ -504,10 +444,6 @@ out:
static void brd_del_one(struct brd_device *brd)
{
list_del(&brd->brd_list);
-#ifdef CONFIG_BLK_DEV_RAM_DAX
- kill_dax(brd->dax_dev);
- put_dax(brd->dax_dev);
-#endif
del_gendisk(brd->brd_disk);
brd_free(brd);
}
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 74e03aa537ad..7033a4beda66 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -43,7 +43,6 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
int cipher_len;
int mode_len;
char cms[LO_NAME_SIZE]; /* cipher-mode string */
- char *cipher;
char *mode;
char *cmsp = cms; /* c-m string pointer */
struct crypto_skcipher *tfm;
@@ -56,7 +55,6 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
cms[LO_NAME_SIZE - 1] = 0;
- cipher = cmsp;
cipher_len = strcspn(cmsp, "-");
mode = cmsp + cipher_len;
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index 7845bd6ee414..87aab6910d2d 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# DRBD device driver configuration
#
diff --git a/drivers/block/drbd/Makefile b/drivers/block/drbd/Makefile
index 4464e353c1e8..8bd534697d1b 100644
--- a/drivers/block/drbd/Makefile
+++ b/drivers/block/drbd/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
drbd-y := drbd_bitmap.o drbd_proc.o
drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index 494837e59f23..ab21976a87b2 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "drbd debugfs: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/block/drbd/drbd_debugfs.h b/drivers/block/drbd/drbd_debugfs.h
index 8bee21340dce..4ecfbb3358d7 100644
--- a/drivers/block/drbd/drbd_debugfs.h
+++ b/drivers/block/drbd/drbd_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 7e8589ce631c..06ecee1b528e 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1551,8 +1551,8 @@ extern int w_restart_disk_io(struct drbd_work *, int);
extern int w_send_out_of_sync(struct drbd_work *, int);
extern int w_start_resync(struct drbd_work *, int);
-extern void resync_timer_fn(unsigned long data);
-extern void start_resync_timer_fn(unsigned long data);
+extern void resync_timer_fn(struct timer_list *t);
+extern void start_resync_timer_fn(struct timer_list *t);
extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
index 51b25ad85251..c58986556161 100644
--- a/drivers/block/drbd/drbd_interval.c
+++ b/drivers/block/drbd/drbd_interval.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <asm/bug.h>
#include <linux/rbtree_augmented.h>
#include "drbd_interval.h"
diff --git a/drivers/block/drbd/drbd_interval.h b/drivers/block/drbd/drbd_interval.h
index 23c5a94428d2..b8c2dee5edc8 100644
--- a/drivers/block/drbd/drbd_interval.h
+++ b/drivers/block/drbd/drbd_interval.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DRBD_INTERVAL_H
#define __DRBD_INTERVAL_H
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 8cb3791898ae..4b4697a1f963 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -64,7 +64,7 @@
static DEFINE_MUTEX(drbd_main_mutex);
static int drbd_open(struct block_device *bdev, fmode_t mode);
static void drbd_release(struct gendisk *gd, fmode_t mode);
-static void md_sync_timer_fn(unsigned long data);
+static void md_sync_timer_fn(struct timer_list *t);
static int w_bitmap_io(struct drbd_work *w, int unused);
MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
@@ -2023,14 +2023,10 @@ void drbd_init_set_defaults(struct drbd_device *device)
device->unplug_work.cb = w_send_write_hint;
device->bm_io_work.w.cb = w_bitmap_io;
- setup_timer(&device->resync_timer, resync_timer_fn,
- (unsigned long)device);
- setup_timer(&device->md_sync_timer, md_sync_timer_fn,
- (unsigned long)device);
- setup_timer(&device->start_resync_timer, start_resync_timer_fn,
- (unsigned long)device);
- setup_timer(&device->request_timer, request_timer_fn,
- (unsigned long)device);
+ timer_setup(&device->resync_timer, resync_timer_fn, 0);
+ timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0);
+ timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0);
+ timer_setup(&device->request_timer, request_timer_fn, 0);
init_waitqueue_head(&device->misc_wait);
init_waitqueue_head(&device->state_wait);
@@ -3721,9 +3717,9 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
return (bdev->md.flags & flag) != 0;
}
-static void md_sync_timer_fn(unsigned long data)
+static void md_sync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = (struct drbd_device *) data;
+ struct drbd_device *device = from_timer(device, t, md_sync_timer);
drbd_device_post_work(device, MD_SYNC);
}
diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c
index 6bf806df60dc..8e261cb5198b 100644
--- a/drivers/block/drbd/drbd_nla.c
+++ b/drivers/block/drbd/drbd_nla.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <net/netlink.h>
#include <linux/drbd_genl_api.h>
diff --git a/drivers/block/drbd/drbd_nla.h b/drivers/block/drbd/drbd_nla.h
index 679c2d5b4535..f5eaffb6474e 100644
--- a/drivers/block/drbd/drbd_nla.h
+++ b/drivers/block/drbd/drbd_nla.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DRBD_NLA_H
#define __DRBD_NLA_H
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
index 4d296800f706..c3081f93051c 100644
--- a/drivers/block/drbd/drbd_protocol.h
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DRBD_PROTOCOL_H
#define __DRBD_PROTOCOL_H
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 796eaf347dc0..cb2fa63f6bc0 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5056,7 +5056,7 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
wake_up(&device->misc_wait);
del_timer_sync(&device->resync_timer);
- resync_timer_fn((unsigned long)device);
+ resync_timer_fn(&device->resync_timer);
/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
* w_make_resync_request etc. which may still be on the worker queue
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index de8566e55334..a500e738d929 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1714,9 +1714,9 @@ static bool net_timeout_reached(struct drbd_request *net_req,
* to expire twice (worst case) to become effective. Good enough.
*/
-void request_timer_fn(unsigned long data)
+void request_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = (struct drbd_device *) data;
+ struct drbd_device *device = from_timer(device, t, request_timer);
struct drbd_connection *connection = first_peer_device(device)->connection;
struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
struct net_conf *nc;
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index a2254f825601..cb97b3b30962 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -294,7 +294,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m);
extern void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m);
-extern void request_timer_fn(unsigned long data);
+extern void request_timer_fn(struct timer_list *t);
extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
extern void tl_abort_disk_io(struct drbd_device *device);
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index 0276c98fbbdd..ea58301d0895 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DRBD_STATE_H
#define DRBD_STATE_H
diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h
index 9e503a1a0bfb..ba80f612d6ab 100644
--- a/drivers/block/drbd/drbd_state_change.h
+++ b/drivers/block/drbd/drbd_state_change.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DRBD_STATE_CHANGE_H
#define DRBD_STATE_CHANGE_H
diff --git a/drivers/block/drbd/drbd_strings.h b/drivers/block/drbd/drbd_strings.h
index f9923cc88afb..87b94a27358a 100644
--- a/drivers/block/drbd/drbd_strings.h
+++ b/drivers/block/drbd/drbd_strings.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DRBD_STRINGS_H
#define __DRBD_STRINGS_H
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 03471b3fce86..1476cb3439f4 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -457,9 +457,9 @@ int w_resync_timer(struct drbd_work *w, int cancel)
return 0;
}
-void resync_timer_fn(unsigned long data)
+void resync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = (struct drbd_device *) data;
+ struct drbd_device *device = from_timer(device, t, resync_timer);
drbd_queue_work_if_unqueued(
&first_peer_device(device)->connection->sender_work,
@@ -1705,9 +1705,9 @@ void drbd_rs_controller_reset(struct drbd_device *device)
rcu_read_unlock();
}
-void start_resync_timer_fn(unsigned long data)
+void start_resync_timer_fn(struct timer_list *t)
{
- struct drbd_device *device = (struct drbd_device *) data;
+ struct drbd_device *device = from_timer(device, t, start_resync_timer);
drbd_device_post_work(device, RS_START);
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 60c086a53609..eae484acfbbc 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -275,6 +275,10 @@ static int set_next_request(void);
#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size))
#endif
+#ifndef fd_cacheflush
+#define fd_cacheflush(addr, size) /* nothing... */
+#endif
+
static inline void fallback_on_nodma_alloc(char **addr, size_t l)
{
#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
@@ -899,10 +903,14 @@ static void unlock_fdc(void)
}
/* switches the motor off after a given timeout */
-static void motor_off_callback(unsigned long nr)
+static void motor_off_callback(struct timer_list *t)
{
+ unsigned long nr = t - motor_off_timer;
unsigned char mask = ~(0x10 << UNIT(nr));
+ if (WARN_ON_ONCE(nr >= N_DRIVE))
+ return;
+
set_dor(FDC(nr), mask, 0);
}
@@ -3043,7 +3051,7 @@ static void raw_cmd_done(int flag)
else
raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
- motor_off_callback(current_drive);
+ motor_off_callback(&motor_off_timer[current_drive]);
if (raw_cmd->next &&
(!(raw_cmd->flags & FD_RAW_FAILURE) ||
@@ -4538,7 +4546,7 @@ static int __init do_floppy_init(void)
disks[drive]->fops = &floppy_fops;
sprintf(disks[drive]->disk_name, "fd%d", drive);
- setup_timer(&motor_off_timer[drive], motor_off_callback, drive);
+ timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
}
err = register_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 85de67334695..bc8e61506968 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -476,6 +476,8 @@ static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
+ if (cmd->css)
+ css_put(cmd->css);
cmd->ret = ret;
lo_rw_aio_do_completion(cmd);
}
@@ -535,6 +537,8 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
cmd->iocb.ki_filp = file;
cmd->iocb.ki_complete = lo_rw_aio_complete;
cmd->iocb.ki_flags = IOCB_DIRECT;
+ if (cmd->css)
+ kthread_associate_blkcg(cmd->css);
if (rw == WRITE)
ret = call_write_iter(file, &cmd->iocb, &iter);
@@ -542,6 +546,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
ret = call_read_iter(file, &cmd->iocb, &iter);
lo_rw_aio_do_completion(cmd);
+ kthread_associate_blkcg(NULL);
if (ret != -EIOCBQUEUED)
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
@@ -1686,6 +1691,14 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
break;
}
+ /* always use the first bio's css */
+#ifdef CONFIG_BLK_CGROUP
+ if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) {
+ cmd->css = cmd->rq->bio->bi_css;
+ css_get(cmd->css);
+ } else
+#endif
+ cmd->css = NULL;
kthread_queue_work(&lo->worker, &cmd->work);
return BLK_STS_OK;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 1f3956702993..0f45416e4fcf 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -72,6 +72,7 @@ struct loop_cmd {
long ret;
struct kiocb iocb;
struct bio_vec *bvec;
+ struct cgroup_subsys_state *css;
};
/* Support for loadable transfer modules */
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
index 0ba837fc62a8..bf221358567e 100644
--- a/drivers/block/mtip32xx/Kconfig
+++ b/drivers/block/mtip32xx/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# mtip32xx device driver configuration
#
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 4a3cfc7940de..b8af7352a18f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -887,12 +887,9 @@ static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
static bool mtip_pause_ncq(struct mtip_port *port,
struct host_to_dev_fis *fis)
{
- struct host_to_dev_fis *reply;
unsigned long task_file_data;
- reply = port->rxfis + RX_FIS_D2H_REG;
task_file_data = readl(port->mmio+PORT_TFDATA);
-
if ((task_file_data & 1))
return false;
@@ -1020,7 +1017,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
.opts = opts
};
int rv = 0;
- unsigned long start;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@@ -1057,7 +1053,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Copy the command to the command table */
memcpy(int_cmd->command, fis, fis_len*4);
- start = jiffies;
rq->timeout = timeout;
/* insert request and run queue */
@@ -3015,7 +3010,6 @@ static int mtip_hw_init(struct driver_data *dd)
{
int i;
int rv;
- unsigned int num_command_slots;
unsigned long timeout, timetaken;
dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
@@ -3025,7 +3019,6 @@ static int mtip_hw_init(struct driver_data *dd)
rv = -EIO;
goto out1;
}
- num_command_slots = dd->slot_groups * 32;
hba_setup(dd);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9adfb5445f8d..5f2a4240a204 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -288,15 +288,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
cmd->status = BLK_STS_TIMEOUT;
return BLK_EH_HANDLED;
}
-
- /* If we are waiting on our dead timer then we could get timeout
- * callbacks for our request. For this we just want to reset the timer
- * and let the queue side take care of everything.
- */
- if (!completion_done(&cmd->send_complete)) {
- nbd_config_put(nbd);
- return BLK_EH_RESET_TIMER;
- }
config = nbd->config;
if (config->num_connections > 1) {
@@ -723,9 +714,9 @@ static int wait_for_reconnect(struct nbd_device *nbd)
return 0;
if (test_bit(NBD_DISCONNECTED, &config->runtime_flags))
return 0;
- wait_event_interruptible_timeout(config->conn_wait,
- atomic_read(&config->live_connections),
- config->dead_conn_timeout);
+ wait_event_timeout(config->conn_wait,
+ atomic_read(&config->live_connections),
+ config->dead_conn_timeout);
return atomic_read(&config->live_connections);
}
@@ -740,6 +731,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
if (!refcount_inc_not_zero(&nbd->config_refs)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
+ blk_mq_start_request(req);
return -EINVAL;
}
config = nbd->config;
@@ -748,6 +740,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
+ blk_mq_start_request(req);
return -EINVAL;
}
cmd->status = BLK_STS_OK;
@@ -771,6 +764,7 @@ again:
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
+ blk_mq_start_request(req);
return -EIO;
}
goto again;
@@ -781,6 +775,7 @@ again:
* here so that it gets put _after_ the request that is already on the
* dispatch list.
*/
+ blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
blk_mq_requeue_request(req, true);
ret = 0;
@@ -793,10 +788,10 @@ again:
ret = nbd_send_cmd(nbd, cmd, index);
if (ret == -EAGAIN) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
- "Request send failed trying another connection\n");
+ "Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
- mutex_unlock(&nsock->tx_lock);
- goto again;
+ blk_mq_requeue_request(req, true);
+ ret = 0;
}
out:
mutex_unlock(&nsock->tx_lock);
@@ -820,7 +815,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* done sending everything over the wire.
*/
init_completion(&cmd->send_complete);
- blk_mq_start_request(bd->rq);
/* We can be called directly from the user space process, which means we
* could possibly have signals pending so our sendmsg will fail. In
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8042c26ea9e6..c61960deb74a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -154,6 +154,10 @@ enum {
NULL_Q_MQ = 2,
};
+static int g_no_sched;
+module_param_named(no_sched, g_no_sched, int, S_IRUGO);
+MODULE_PARM_DESC(no_sched, "No io scheduler");
+
static int g_submit_queues = 1;
module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
@@ -476,7 +480,7 @@ static struct configfs_item_operations nullb_device_ops = {
.release = nullb_device_release,
};
-static struct config_item_type nullb_device_type = {
+static const struct config_item_type nullb_device_type = {
.ct_item_ops = &nullb_device_ops,
.ct_attrs = nullb_device_attrs,
.ct_owner = THIS_MODULE,
@@ -528,7 +532,7 @@ static struct configfs_group_operations nullb_group_ops = {
.drop_item = nullb_group_drop_item,
};
-static struct config_item_type nullb_group_type = {
+static const struct config_item_type nullb_group_type = {
.ct_group_ops = &nullb_group_ops,
.ct_attrs = nullb_group_attrs,
.ct_owner = THIS_MODULE,
@@ -1754,6 +1758,8 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
set->cmd_size = sizeof(struct nullb_cmd);
set->flags = BLK_MQ_F_SHOULD_MERGE;
+ if (g_no_sched)
+ set->flags |= BLK_MQ_F_NO_SCHED;
set->driver_data = NULL;
if ((nullb && nullb->dev->blocking) || g_blocking)
@@ -1985,8 +1991,10 @@ static int __init null_init(void)
for (i = 0; i < nr_devices; i++) {
dev = null_alloc_dev();
- if (!dev)
+ if (!dev) {
+ ret = -ENOMEM;
goto err_dev;
+ }
ret = null_add_dev(dev);
if (ret) {
null_free_dev(dev);
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index 3a15247942e4..f8bd6ef3605a 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# PARIDE configuration
#
@@ -25,6 +26,7 @@ config PARIDE_PD
config PARIDE_PCD
tristate "Parallel port ATAPI CD-ROMs"
depends on PARIDE
+ select CDROM
select BLK_SCSI_REQUEST # only for the generic cdrom code
---help---
This option enables the high-level driver for ATAPI CD-ROM devices
diff --git a/drivers/block/paride/Makefile b/drivers/block/paride/Makefile
index a539e004bb7a..cf1742a8475e 100644
--- a/drivers/block/paride/Makefile
+++ b/drivers/block/paride/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Parallel port IDE device drivers.
#
diff --git a/drivers/block/paride/mkd b/drivers/block/paride/mkd
index 971f099b40aa..6d0d802479ea 100644
--- a/drivers/block/paride/mkd
+++ b/drivers/block/paride/mkd
@@ -1,4 +1,5 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
#
# mkd -- a script to create the device special files for the PARIDE subsystem
#
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b640ad8a6d20..38fc5f397fde 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -348,7 +348,6 @@ struct rbd_client_id {
struct rbd_mapping {
u64 size;
u64 features;
- bool read_only;
};
/*
@@ -450,12 +449,11 @@ static DEFINE_IDA(rbd_dev_id_ida);
static struct workqueue_struct *rbd_wq;
/*
- * Default to false for now, as single-major requires >= 0.75 version of
- * userspace rbd utility.
+ * single-major requires >= 0.75 version of userspace rbd utility.
*/
-static bool single_major = false;
+static bool single_major = true;
module_param(single_major, bool, S_IRUGO);
-MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
+MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
static int rbd_img_request_submit(struct rbd_img_request *img_request);
@@ -608,9 +606,6 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
bool removing = false;
- if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
- return -EROFS;
-
spin_lock_irq(&rbd_dev->lock);
if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
removing = true;
@@ -640,46 +635,24 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
{
- int ret = 0;
- int val;
- bool ro;
- bool ro_changed = false;
+ int ro;
- /* get_user() may sleep, so call it before taking rbd_dev->lock */
- if (get_user(val, (int __user *)(arg)))
+ if (get_user(ro, (int __user *)arg))
return -EFAULT;
- ro = val ? true : false;
- /* Snapshot doesn't allow to write*/
+ /* Snapshots can't be marked read-write */
if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
return -EROFS;
- spin_lock_irq(&rbd_dev->lock);
- /* prevent others open this device */
- if (rbd_dev->open_count > 1) {
- ret = -EBUSY;
- goto out;
- }
-
- if (rbd_dev->mapping.read_only != ro) {
- rbd_dev->mapping.read_only = ro;
- ro_changed = true;
- }
-
-out:
- spin_unlock_irq(&rbd_dev->lock);
- /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
- if (ret == 0 && ro_changed)
- set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
-
- return ret;
+ /* Let blkdev_roset() handle it */
+ return -ENOTTY;
}
static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
- int ret = 0;
+ int ret;
switch (cmd) {
case BLKROSET:
@@ -2692,7 +2665,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
* from the parent.
*/
page_count = (u32)calc_pages_for(0, length);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
result = PTR_ERR(pages);
pages = NULL;
@@ -2827,7 +2800,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
*/
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail_stat_request;
@@ -4050,15 +4023,8 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
- /* Only reads are allowed to a read-only device */
-
- if (op_type != OBJ_OP_READ) {
- if (rbd_dev->mapping.read_only) {
- result = -EROFS;
- goto err_rq;
- }
- rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
- }
+ rbd_assert(op_type == OBJ_OP_READ ||
+ rbd_dev->spec->snap_id == CEPH_NOSNAP);
/*
* Quit early if the mapped snapshot no longer exists. It's
@@ -4423,7 +4389,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
/* enable the discard support */
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = segment_size;
- q->limits.discard_alignment = segment_size;
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
@@ -5994,7 +5959,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
goto err_out_disk;
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
- set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
+ set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
if (ret)
@@ -6145,7 +6110,6 @@ static ssize_t do_rbd_add(struct bus_type *bus,
struct rbd_options *rbd_opts = NULL;
struct rbd_spec *spec = NULL;
struct rbd_client *rbdc;
- bool read_only;
int rc;
if (!try_module_get(THIS_MODULE))
@@ -6194,11 +6158,8 @@ static ssize_t do_rbd_add(struct bus_type *bus,
}
/* If we are mapping a snapshot it must be marked read-only */
-
- read_only = rbd_dev->opts->read_only;
if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
- read_only = true;
- rbd_dev->mapping.read_only = read_only;
+ rbd_dev->opts->read_only = true;
rc = rbd_dev_device_setup(rbd_dev);
if (rc)
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index 926dce9c452f..c148e83e4ed7 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -203,9 +203,9 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
return 0;
}
-static void creg_cmd_timed_out(unsigned long data)
+static void creg_cmd_timed_out(struct timer_list *t)
{
- struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
+ struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
struct creg_cmd *cmd;
spin_lock(&card->creg_ctrl.lock);
@@ -745,8 +745,7 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card)
mutex_init(&card->creg_ctrl.reset_lock);
INIT_LIST_HEAD(&card->creg_ctrl.queue);
spin_lock_init(&card->creg_ctrl.lock);
- setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
- (unsigned long) card);
+ timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
return 0;
}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 6a1b2177951c..beaccf197a5a 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -354,9 +354,9 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
rsxx_complete_dma(ctrl, dma, status);
}
-static void dma_engine_stalled(unsigned long data)
+static void dma_engine_stalled(struct timer_list *t)
{
- struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+ struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
int cnt;
if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
@@ -838,8 +838,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
mutex_init(&ctrl->work_lock);
INIT_LIST_HEAD(&ctrl->queue);
- setup_timer(&ctrl->activity_timer, dma_engine_stalled,
- (unsigned long)ctrl);
+ timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
if (!ctrl->issue_wq)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 64d0fc17c174..de0d08133c7e 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -707,9 +707,9 @@ static void skd_start_queue(struct work_struct *work)
blk_mq_start_hw_queues(skdev->queue);
}
-static void skd_timer_tick(ulong arg)
+static void skd_timer_tick(struct timer_list *t)
{
- struct skd_device *skdev = (struct skd_device *)arg;
+ struct skd_device *skdev = from_timer(skdev, t, timer);
unsigned long reqflags;
u32 state;
@@ -857,7 +857,7 @@ static int skd_start_timer(struct skd_device *skdev)
{
int rc;
- setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
+ timer_setup(&skdev->timer, skd_timer_tick, 0);
rc = mod_timer(&skdev->timer, (jiffies + HZ));
if (rc)
@@ -1967,7 +1967,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
break;
case FIT_MTD_CMD_LOG_HOST_ID:
- skdev->connect_time_stamp = get_seconds();
+ /* hardware interface overflows in y2106 */
+ skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
data = skdev->connect_time_stamp & 0xFFFF;
mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index ad9749463d4f..5ca56bfae63c 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -81,7 +81,7 @@ struct vdc_port {
static void vdc_ldc_reset(struct vdc_port *port);
static void vdc_ldc_reset_work(struct work_struct *work);
-static void vdc_ldc_reset_timer(unsigned long _arg);
+static void vdc_ldc_reset_timer(struct timer_list *t);
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
@@ -974,8 +974,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
*/
ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
- setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
- (unsigned long)port);
+ timer_setup(&port->ldc_reset_timer, vdc_ldc_reset_timer, 0);
INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
@@ -1087,9 +1086,9 @@ static void vdc_queue_drain(struct vdc_port *port)
__blk_end_request_all(req, BLK_STS_IOERR);
}
-static void vdc_ldc_reset_timer(unsigned long _arg)
+static void vdc_ldc_reset_timer(struct timer_list *t)
{
- struct vdc_port *port = (struct vdc_port *) _arg;
+ struct vdc_port *port = from_timer(port, t, ldc_reset_timer);
struct vio_driver_state *vio = &port->vio;
unsigned long flags;
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 9f931f8f6b4c..af51015d056e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -239,10 +239,10 @@ static unsigned short write_postamble[] = {
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
static void act(struct floppy_state *fs);
-static void scan_timeout(unsigned long data);
-static void seek_timeout(unsigned long data);
-static void settle_timeout(unsigned long data);
-static void xfer_timeout(unsigned long data);
+static void scan_timeout(struct timer_list *t);
+static void seek_timeout(struct timer_list *t);
+static void settle_timeout(struct timer_list *t);
+static void xfer_timeout(struct timer_list *t);
static irqreturn_t swim3_interrupt(int irq, void *dev_id);
/*static void fd_dma_interrupt(int irq, void *dev_id);*/
static int grab_drive(struct floppy_state *fs, enum swim_state state,
@@ -392,13 +392,12 @@ static void do_fd_request(struct request_queue * q)
}
static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long))
+ void (*proc)(struct timer_list *t))
{
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
fs->timeout.function = proc;
- fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
}
@@ -569,9 +568,9 @@ static void act(struct floppy_state *fs)
}
}
-static void scan_timeout(unsigned long data)
+static void scan_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -594,9 +593,9 @@ static void scan_timeout(unsigned long data)
spin_unlock_irqrestore(&swim3_lock, flags);
}
-static void seek_timeout(unsigned long data)
+static void seek_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -614,9 +613,9 @@ static void seek_timeout(unsigned long data)
spin_unlock_irqrestore(&swim3_lock, flags);
}
-static void settle_timeout(unsigned long data)
+static void settle_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
@@ -644,9 +643,9 @@ static void settle_timeout(unsigned long data)
spin_unlock_irqrestore(&swim3_lock, flags);
}
-static void xfer_timeout(unsigned long data)
+static void xfer_timeout(struct timer_list *t)
{
- struct floppy_state *fs = (struct floppy_state *) data;
+ struct floppy_state *fs = from_timer(fs, t, timeout);
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
unsigned long flags;
@@ -1182,7 +1181,7 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
return -EBUSY;
}
- init_timer(&fs->timeout);
+ timer_setup(&fs->timeout, NULL, 0);
swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 0677d2514665..8077123678ad 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -718,7 +718,7 @@ static void check_batteries(struct cardinfo *card)
set_fault_to_battery_status(card);
}
-static void check_all_batteries(unsigned long ptr)
+static void check_all_batteries(struct timer_list *unused)
{
int i;
@@ -738,8 +738,7 @@ static void check_all_batteries(unsigned long ptr)
static void init_battery_timer(void)
{
- init_timer(&battery_timer);
- battery_timer.function = check_all_batteries;
+ timer_setup(&battery_timer, check_all_batteries, 0);
battery_timer.expires = jiffies + (HZ * 60);
add_timer(&battery_timer);
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 34e17ee799be..68846897d213 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
}
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+static void virtblk_initialize_rq(struct request *req)
+{
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ scsi_req_init(&vbr->sreq);
+}
+#endif
+
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
+#ifdef CONFIG_VIRTIO_BLK_SCSI
+ .initialize_rq_fn = virtblk_initialize_rq,
+#endif
.map_queues = virtblk_map_queues,
};
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 14459d66ef0c..c24589414c75 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -770,9 +770,9 @@ static void ace_fsm_tasklet(unsigned long data)
spin_unlock_irqrestore(&ace->lock, flags);
}
-static void ace_stall_timer(unsigned long data)
+static void ace_stall_timer(struct timer_list *t)
{
- struct ace_device *ace = (void *)data;
+ struct ace_device *ace = from_timer(ace, t, stall_timer);
unsigned long flags;
dev_warn(ace->dev,
@@ -984,7 +984,7 @@ static int ace_setup(struct ace_device *ace)
* Initialize the state machine tasklet and stall timer
*/
tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
- setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+ timer_setup(&ace->stall_timer, ace_stall_timer, 0);
/*
* Initialize the request queue
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 7cd4a8ec3c8f..ac3a31d433b2 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 5b8992beffec..4ed0a78fdc09 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -23,15 +23,15 @@ static const char * const backends[] = {
#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
"lz4",
#endif
-#if IS_ENABLED(CONFIG_CRYPTO_DEFLATE)
- "deflate",
-#endif
#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
"lz4hc",
#endif
#if IS_ENABLED(CONFIG_CRYPTO_842)
"842",
#endif
+#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
+ "zstd",
+#endif
NULL
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index f149d3e61234..d70eba30003a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -122,14 +122,6 @@ static inline bool is_partial_io(struct bio_vec *bvec)
}
#endif
-static void zram_revalidate_disk(struct zram *zram)
-{
- revalidate_disk(zram->disk);
- /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
- zram->disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_STABLE_WRITES;
-}
-
/*
* Check if request is within bounds and aligned on zram logical blocks.
*/
@@ -436,7 +428,7 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
WARN_ON_ONCE(!was_set);
}
-void zram_page_end_io(struct bio *bio)
+static void zram_page_end_io(struct bio *bio)
{
struct page *page = bio->bi_io_vec[0].bv_page;
@@ -1373,7 +1365,8 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- zram_revalidate_disk(zram);
+
+ revalidate_disk(zram->disk);
up_write(&zram->init_lock);
return len;
@@ -1420,7 +1413,7 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- zram_revalidate_disk(zram);
+ revalidate_disk(zram->disk);
bdput(bdev);
mutex_lock(&bdev->bd_mutex);
@@ -1539,6 +1532,7 @@ static int zram_add(void)
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
+
/*
* To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests.
@@ -1563,6 +1557,8 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
+ zram->disk->queue->backing_dev_info->capabilities |=
+ (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
add_disk(zram->disk);
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index fae5a74dc737..60e1c7d6986d 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "Bluetooth device drivers"
depends on BT
@@ -65,6 +66,7 @@ config BT_HCIBTSDIO
config BT_HCIUART
tristate "HCI UART driver"
+ depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
depends on TTY
help
Bluetooth HCI UART driver.
@@ -79,7 +81,6 @@ config BT_HCIUART
config BT_HCIUART_SERDEV
bool
depends on SERIAL_DEV_BUS && BT_HCIUART
- depends on SERIAL_DEV_BUS=y || SERIAL_DEV_BUS=BT_HCIUART
default y
config BT_HCIUART_H4
@@ -169,6 +170,7 @@ config BT_HCIUART_BCM
bool "Broadcom protocol support"
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
+ depends on (!ACPI || SERIAL_DEV_CTRL_TTYPORT)
select BT_HCIUART_H4
select BT_BCM
help
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index e693ca6eeed9..4e4e44d09796 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux Bluetooth HCI device drivers.
#
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 5ce6d4176dc3..8e9547f195ef 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -121,7 +121,7 @@ static void bcm203x_complete(struct urb *urb)
}
data->state = BCM203X_LOAD_FIRMWARE;
-
+ /* fall through */
case BCM203X_LOAD_FIRMWARE:
if (data->fw_sent == data->fw_size) {
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP),
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index b07ca9565291..d513ef4743dc 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -156,9 +156,9 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
/* ======================== LED handling routines ======================== */
-static void bluecard_activity_led_timeout(u_long arg)
+static void bluecard_activity_led_timeout(struct timer_list *t)
{
- struct bluecard_info *info = (struct bluecard_info *)arg;
+ struct bluecard_info *info = from_timer(info, t, timer);
unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_ACTIVITY, &(info->hw_state))) {
@@ -691,8 +691,7 @@ static int bluecard_open(struct bluecard_info *info)
spin_lock_init(&(info->lock));
- setup_timer(&(info->timer), &bluecard_activity_led_timeout,
- (u_long)info);
+ timer_setup(&info->timer, bluecard_activity_led_timeout, 0);
skb_queue_head_init(&(info->txq));
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 48d10cb5c9a1..7971bfbd4321 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -117,7 +117,7 @@ static void bpa10x_rx_complete(struct urb *urb)
bpa10x_recv_pkts,
ARRAY_SIZE(bpa10x_recv_pkts));
if (IS_ERR(data->rx_skb[idx])) {
- BT_ERR("%s corrupted event packet", hdev->name);
+ bt_dev_err(hdev, "corrupted event packet");
hdev->stat.err_rx++;
data->rx_skb[idx] = NULL;
}
@@ -127,8 +127,7 @@ static void bpa10x_rx_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)", urb, -err);
usb_unanchor_urb(urb);
}
}
@@ -164,8 +163,7 @@ static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev)
err = usb_submit_urb(urb, GFP_KERNEL);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err);
usb_unanchor_urb(urb);
}
@@ -205,8 +203,7 @@ static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev)
err = usb_submit_urb(urb, GFP_KERNEL);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err);
usb_unanchor_urb(urb);
}
@@ -272,7 +269,7 @@ static int bpa10x_setup(struct hci_dev *hdev)
if (IS_ERR(skb))
return PTR_ERR(skb);
- BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+ bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
hci_set_fw_info(hdev, "%s", skb->data + 1);
@@ -348,7 +345,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- BT_ERR("%s urb %p submission failed", hdev->name, urb);
+ bt_dev_err(hdev, "urb %p submission failed", urb);
kfree(urb->setup_packet);
usb_unanchor_urb(urb);
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 194788739a83..25b0cf952b91 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -355,7 +355,7 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
} else if ((stat & 0xff) != 0xff) {
if (stat & 0x0020) {
int status = bt3c_read(iobase, 0x7002) & 0x10;
- BT_INFO("%s: Antenna %s", info->hdev->name,
+ bt_dev_info(info->hdev, "Antenna %s",
status ? "out" : "in");
}
if (stat & 0x0001)
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index cc4bdefa6648..afa4cb3b16e3 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -45,13 +45,12 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
- BT_ERR("%s: BCM: Reading device address failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "BCM: Reading device address failed (%d)", err);
return err;
}
if (skb->len != sizeof(*bda)) {
- BT_ERR("%s: BCM: Device address length mismatch", hdev->name);
+ bt_dev_err(hdev, "BCM: Device address length mismatch");
kfree_skb(skb);
return -EIO;
}
@@ -74,8 +73,8 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) {
- BT_INFO("%s: BCM: Using default device address (%pMR)",
- hdev->name, &bda->bdaddr);
+ bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
+ &bda->bdaddr);
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
@@ -93,8 +92,7 @@ int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: BCM: Change address command failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "BCM: Change address command failed (%d)", err);
return err;
}
kfree_skb(skb);
@@ -116,8 +114,8 @@ int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: BCM: Download Minidrv command failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "BCM: Download Minidrv command failed (%d)",
+ err);
goto done;
}
kfree_skb(skb);
@@ -136,7 +134,7 @@ int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
fw_size -= sizeof(*cmd);
if (fw_size < cmd->plen) {
- BT_ERR("%s: BCM: Patch is corrupted", hdev->name);
+ bt_dev_err(hdev, "BCM: Patch is corrupted");
err = -EINVAL;
goto done;
}
@@ -151,8 +149,8 @@ int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw)
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: BCM: Patch command %04x failed (%d)",
- hdev->name, opcode, err);
+ bt_dev_err(hdev, "BCM: Patch command %04x failed (%d)",
+ opcode, err);
goto done;
}
kfree_skb(skb);
@@ -173,7 +171,7 @@ static int btbcm_reset(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
- BT_ERR("%s: BCM: Reset failed (%d)", hdev->name, err);
+ bt_dev_err(hdev, "BCM: Reset failed (%d)", err);
return err;
}
kfree_skb(skb);
@@ -191,13 +189,13 @@ static struct sk_buff *btbcm_read_local_name(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: BCM: Reading local name failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "BCM: Reading local name failed (%ld)",
+ PTR_ERR(skb));
return skb;
}
if (skb->len != sizeof(struct hci_rp_read_local_name)) {
- BT_ERR("%s: BCM: Local name length mismatch", hdev->name);
+ bt_dev_err(hdev, "BCM: Local name length mismatch");
kfree_skb(skb);
return ERR_PTR(-EIO);
}
@@ -212,13 +210,13 @@ static struct sk_buff *btbcm_read_local_version(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: BCM: Reading local version info failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "BCM: Reading local version info failed (%ld)",
+ PTR_ERR(skb));
return skb;
}
if (skb->len != sizeof(struct hci_rp_read_local_version)) {
- BT_ERR("%s: BCM: Local version length mismatch", hdev->name);
+ bt_dev_err(hdev, "BCM: Local version length mismatch");
kfree_skb(skb);
return ERR_PTR(-EIO);
}
@@ -232,13 +230,13 @@ static struct sk_buff *btbcm_read_verbose_config(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: BCM: Read verbose config info failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "BCM: Read verbose config info failed (%ld)",
+ PTR_ERR(skb));
return skb;
}
if (skb->len != 7) {
- BT_ERR("%s: BCM: Verbose config length mismatch", hdev->name);
+ bt_dev_err(hdev, "BCM: Verbose config length mismatch");
kfree_skb(skb);
return ERR_PTR(-EIO);
}
@@ -252,14 +250,13 @@ static struct sk_buff *btbcm_read_controller_features(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, 0xfc6e, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: BCM: Read controller features failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "BCM: Read controller features failed (%ld)",
+ PTR_ERR(skb));
return skb;
}
if (skb->len != 9) {
- BT_ERR("%s: BCM: Controller features length mismatch",
- hdev->name);
+ bt_dev_err(hdev, "BCM: Controller features length mismatch");
kfree_skb(skb);
return ERR_PTR(-EIO);
}
@@ -273,13 +270,13 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, 0xfc5a, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: BCM: Read USB product info failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "BCM: Read USB product info failed (%ld)",
+ PTR_ERR(skb));
return skb;
}
if (skb->len != 5) {
- BT_ERR("%s: BCM: USB product length mismatch", hdev->name);
+ bt_dev_err(hdev, "BCM: USB product length mismatch");
kfree_skb(skb);
return ERR_PTR(-EIO);
}
@@ -296,7 +293,7 @@ static int btbcm_read_info(struct hci_dev *hdev)
if (IS_ERR(skb))
return PTR_ERR(skb);
- BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
+ bt_dev_info(hdev, "BCM: chip id %u", skb->data[1]);
kfree_skb(skb);
/* Read Controller Features */
@@ -304,7 +301,7 @@ static int btbcm_read_info(struct hci_dev *hdev)
if (IS_ERR(skb))
return PTR_ERR(skb);
- BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
+ bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
kfree_skb(skb);
/* Read Local Name */
@@ -312,7 +309,7 @@ static int btbcm_read_info(struct hci_dev *hdev)
if (IS_ERR(skb))
return PTR_ERR(skb);
- BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+ bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
kfree_skb(skb);
return 0;
@@ -327,6 +324,8 @@ static const struct {
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
{ 0x2209, "BCM43430A1" }, /* 001.002.009 */
+ { 0x6119, "BCM4345C0" }, /* 003.001.025 */
+ { 0x230f, "BCM4356A2" }, /* 001.003.015 */
{ }
};
@@ -361,6 +360,7 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
switch ((rev & 0xf000) >> 12) {
case 0:
case 1:
+ case 2:
case 3:
for (i = 0; bcm_uart_subver_table[i].name; i++) {
if (subver == bcm_uart_subver_table[i].subver) {
@@ -375,9 +375,9 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
return 0;
}
- BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
- hw_name ? : "BCM", (subver & 0xe000) >> 13,
- (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+ bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
+ hw_name ? : "BCM", (subver & 0xe000) >> 13,
+ (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
return 0;
}
@@ -405,9 +405,9 @@ int btbcm_finalize(struct hci_dev *hdev)
subver = le16_to_cpu(ver->lmp_subver);
kfree_skb(skb);
- BT_INFO("%s: BCM (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
- (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8,
- (subver & 0x00ff), rev & 0x0fff);
+ bt_dev_info(hdev, "BCM (%3.3u.%3.3u.%3.3u) build %4.4u",
+ (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8,
+ (subver & 0x00ff), rev & 0x0fff);
btbcm_check_bdaddr(hdev);
@@ -502,13 +502,13 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
return 0;
}
- BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
- hw_name ? : "BCM", (subver & 0xe000) >> 13,
- (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+ bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
+ hw_name ? : "BCM", (subver & 0xe000) >> 13,
+ (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
err = request_firmware(&fw, fw_name, &hdev->dev);
if (err < 0) {
- BT_INFO("%s: BCM: Patch %s not found", hdev->name, fw_name);
+ bt_dev_info(hdev, "BCM: Patch %s not found", fw_name);
goto done;
}
@@ -531,16 +531,16 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
subver = le16_to_cpu(ver->lmp_subver);
kfree_skb(skb);
- BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
- hw_name ? : "BCM", (subver & 0xe000) >> 13,
- (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
+ bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u",
+ hw_name ? : "BCM", (subver & 0xe000) >> 13,
+ (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (IS_ERR(skb))
return PTR_ERR(skb);
- BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+ bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
kfree_skb(skb);
done:
@@ -565,31 +565,31 @@ int btbcm_setup_apple(struct hci_dev *hdev)
/* Read Verbose Config Version Info */
skb = btbcm_read_verbose_config(hdev);
if (!IS_ERR(skb)) {
- BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name,
- skb->data[1], get_unaligned_le16(skb->data + 5));
+ bt_dev_info(hdev, "BCM: chip id %u build %4.4u",
+ skb->data[1], get_unaligned_le16(skb->data + 5));
kfree_skb(skb);
}
/* Read USB Product Info */
skb = btbcm_read_usb_product(hdev);
if (!IS_ERR(skb)) {
- BT_INFO("%s: BCM: product %4.4x:%4.4x", hdev->name,
- get_unaligned_le16(skb->data + 1),
- get_unaligned_le16(skb->data + 3));
+ bt_dev_info(hdev, "BCM: product %4.4x:%4.4x",
+ get_unaligned_le16(skb->data + 1),
+ get_unaligned_le16(skb->data + 3));
kfree_skb(skb);
}
/* Read Controller Features */
skb = btbcm_read_controller_features(hdev);
if (!IS_ERR(skb)) {
- BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
+ bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]);
kfree_skb(skb);
}
/* Read Local Name */
skb = btbcm_read_local_name(hdev);
if (!IS_ERR(skb)) {
- BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+ bt_dev_info(hdev, "%s", (char *)(skb->data + 1));
kfree_skb(skb);
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index d32e109bd5cb..4459555c9d88 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -43,13 +43,13 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
- BT_ERR("%s: Reading Intel device address failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "Reading Intel device address failed (%d)",
+ err);
return err;
}
if (skb->len != sizeof(*bda)) {
- BT_ERR("%s: Intel device address length mismatch", hdev->name);
+ bt_dev_err(hdev, "Intel device address length mismatch");
kfree_skb(skb);
return -EIO;
}
@@ -62,8 +62,8 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
* and that in turn can cause problems with Bluetooth operation.
*/
if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
- BT_ERR("%s: Found Intel default device address (%pMR)",
- hdev->name, &bda->bdaddr);
+ bt_dev_err(hdev, "Found Intel default device address (%pMR)",
+ &bda->bdaddr);
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
@@ -123,8 +123,8 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Changing Intel device address failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "Changing Intel device address failed (%d)",
+ err);
return err;
}
kfree_skb(skb);
@@ -154,8 +154,8 @@ int btintel_set_diag(struct hci_dev *hdev, bool enable)
err = PTR_ERR(skb);
if (err == -ENODATA)
goto done;
- BT_ERR("%s: Changing Intel diagnostic mode failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "Changing Intel diagnostic mode failed (%d)",
+ err);
return err;
}
kfree_skb(skb);
@@ -189,30 +189,30 @@ void btintel_hw_error(struct hci_dev *hdev, u8 code)
struct sk_buff *skb;
u8 type = 0x00;
- BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+ bt_dev_err(hdev, "Hardware error 0x%2.2x", code);
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: Reset after hardware error failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "Reset after hardware error failed (%ld)",
+ PTR_ERR(skb));
return;
}
kfree_skb(skb);
skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "Retrieving Intel exception info failed (%ld)",
+ PTR_ERR(skb));
return;
}
if (skb->len != 13) {
- BT_ERR("%s: Exception info size mismatch", hdev->name);
+ bt_dev_err(hdev, "Exception info size mismatch");
kfree_skb(skb);
return;
}
- BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+ bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1));
kfree_skb(skb);
}
@@ -233,9 +233,10 @@ void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
return;
}
- BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
- variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
- ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+ bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u",
+ variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+ ver->fw_build_num, ver->fw_build_ww,
+ 2000 + ver->fw_build_yy);
}
EXPORT_SYMBOL_GPL(btintel_version_info);
@@ -321,8 +322,7 @@ int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- BT_ERR("%s: Setting Intel event mask failed (%d)",
- hdev->name, err);
+ bt_dev_err(hdev, "Setting Intel event mask failed (%d)", err);
return err;
}
kfree_skb(skb);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 03341ce98c32..7dbb4463b539 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -64,7 +64,7 @@ static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv)
struct btmrvl_sdio_card *card = priv;
struct btmrvl_plt_wake_cfg *cfg = card->plt_wake_cfg;
- pr_info("%s: wake by bt", __func__);
+ pr_info("%s: wake by bt\n", __func__);
cfg->wake_by_bt = true;
disable_irq_nosync(irq);
@@ -87,7 +87,7 @@ static int btmrvl_sdio_probe_of(struct device *dev,
if (!dev->of_node ||
!of_match_node(btmrvl_sdio_of_match_table, dev->of_node)) {
- pr_err("sdio platform data not available");
+ pr_err("sdio platform data not available\n");
return -1;
}
@@ -99,7 +99,7 @@ static int btmrvl_sdio_probe_of(struct device *dev,
if (cfg && card->plt_of_node) {
cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0);
if (!cfg->irq_bt) {
- dev_err(dev, "fail to parse irq_bt from device tree");
+ dev_err(dev, "fail to parse irq_bt from device tree\n");
cfg->irq_bt = -1;
} else {
ret = devm_request_irq(dev, cfg->irq_bt,
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 0bbdfcef2aa8..2793d4180d2f 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -287,7 +287,7 @@ static int rome_download_firmware(struct hci_dev *hdev,
const struct firmware *fw;
int ret;
- BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname);
+ bt_dev_info(hdev, "ROME Downloading %s", config->fwname);
ret = request_firmware(&fw, config->fwname, &hdev->dev);
if (ret) {
@@ -351,7 +351,7 @@ int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
return err;
}
- BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver);
+ bt_dev_info(hdev, "ROME controller version 0x%08x", rome_ver);
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
@@ -380,7 +380,7 @@ int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
return err;
}
- BT_INFO("%s: ROME setup on UART is completed", hdev->name);
+ bt_dev_info(hdev, "ROME setup on UART is completed");
return 0;
}
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index d00c4fdae924..663bed63b871 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -15,6 +15,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/rpmsg.h>
+#include <linux/of.h>
+
#include <linux/soc/qcom/wcnss_ctrl.h>
#include <linux/platform_device.h>
@@ -26,6 +28,7 @@
struct btqcomsmd {
struct hci_dev *hdev;
+ bdaddr_t bdaddr;
struct rpmsg_endpoint *acl_channel;
struct rpmsg_endpoint *cmd_channel;
};
@@ -100,6 +103,38 @@ static int btqcomsmd_close(struct hci_dev *hdev)
return 0;
}
+static int btqcomsmd_setup(struct hci_dev *hdev)
+{
+ struct btqcomsmd *btq = hci_get_drvdata(hdev);
+ struct sk_buff *skb;
+ int err;
+
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+ kfree_skb(skb);
+
+ /* Devices do not have persistent storage for BD address. If no
+ * BD address has been retrieved during probe, mark the device
+ * as having an invalid BD address.
+ */
+ if (!bacmp(&btq->bdaddr, BDADDR_ANY)) {
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ return 0;
+ }
+
+ /* When setting a configured BD address fails, mark the device
+ * as having an invalid BD address.
+ */
+ err = qca_set_bdaddr_rome(hdev, &btq->bdaddr);
+ if (err) {
+ set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ return 0;
+ }
+
+ return 0;
+}
+
static int btqcomsmd_probe(struct platform_device *pdev)
{
struct btqcomsmd *btq;
@@ -123,6 +158,15 @@ static int btqcomsmd_probe(struct platform_device *pdev)
if (IS_ERR(btq->cmd_channel))
return PTR_ERR(btq->cmd_channel);
+ /* The local-bd-address property is usually injected by the
+ * bootloader which has access to the allocated BD address.
+ */
+ if (!of_property_read_u8_array(pdev->dev.of_node, "local-bd-address",
+ (u8 *)&btq->bdaddr, sizeof(bdaddr_t))) {
+ dev_info(&pdev->dev, "BD address %pMR retrieved from device-tree",
+ &btq->bdaddr);
+ }
+
hdev = hci_alloc_dev();
if (!hdev)
return -ENOMEM;
@@ -135,6 +179,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->open = btqcomsmd_open;
hdev->close = btqcomsmd_close;
hdev->send = btqcomsmd_send;
+ hdev->setup = btqcomsmd_setup;
hdev->set_bdaddr = qca_set_bdaddr_rome;
ret = hci_register_dev(hdev);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index d9a99b4302ea..6e2ad748abba 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -55,8 +55,8 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
}
rom_version = (struct rtl_rom_version_evt *)skb->data;
- BT_INFO("%s: rom_version status=%x version=%x",
- hdev->name, rom_version->status, rom_version->version);
+ bt_dev_info(hdev, "rom_version status=%x version=%x",
+ rom_version->status, rom_version->version);
*version = rom_version->version;
@@ -273,7 +273,7 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
const struct firmware *fw;
int ret;
- BT_INFO("%s: rtl: loading %s", hdev->name, name);
+ bt_dev_info(hdev, "rtl: loading %s", name);
ret = request_firmware(&fw, name, &hdev->dev);
if (ret < 0)
return ret;
@@ -292,7 +292,7 @@ static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
const struct firmware *fw;
int ret;
- BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
+ bt_dev_info(hdev, "rtl: loading rtl_bt/rtl8723a_fw.bin");
ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev);
if (ret < 0) {
BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
@@ -363,7 +363,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
} else
cfg_sz = 0;
- BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
+ bt_dev_info(hdev, "rtl: loading %s", fw_name);
ret = request_firmware(&fw, fw_name, &hdev->dev);
if (ret < 0) {
BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
@@ -390,7 +390,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
fw_data = tbuff;
}
- BT_INFO("cfg_sz %d, total size %d", cfg_sz, ret);
+ bt_dev_info(hdev, "cfg_sz %d, total size %d", cfg_sz, ret);
ret = rtl_download_firmware(hdev, fw_data, ret);
@@ -436,9 +436,10 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
return -PTR_ERR(skb);
resp = (struct hci_rp_read_local_version *)skb->data;
- BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
- "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
- resp->lmp_ver, resp->lmp_subver);
+ bt_dev_info(hdev, "rtl: examining hci_ver=%02x hci_rev=%04x "
+ "lmp_ver=%02x lmp_subver=%04x",
+ resp->hci_ver, resp->hci_rev,
+ resp->lmp_ver, resp->lmp_subver);
lmp_subver = le16_to_cpu(resp->lmp_subver);
kfree_skb(skb);
@@ -466,7 +467,7 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
return btrtl_setup_rtl8723b(hdev, lmp_subver,
"rtl_bt/rtl8822b_fw.bin");
default:
- BT_INFO("rtl: assuming no firmware upload needed.");
+ bt_dev_info(hdev, "rtl: assuming no firmware upload needed");
return 0;
}
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7a5c06aaa181..f7120c9eb9bd 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -66,7 +66,6 @@ static struct usb_driver btusb_driver;
#define BTUSB_BCM2045 0x40000
#define BTUSB_IFNUM_2 0x80000
#define BTUSB_CW6622 0x100000
-#define BTUSB_BCM_NO_PRODID 0x200000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -171,10 +170,6 @@ static const struct usb_device_id btusb_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
- /* Broadcom devices with missing product id */
- { USB_DEVICE_AND_INTERFACE_INFO(0x0000, 0x0000, 0xff, 0x01, 0x01),
- .driver_info = BTUSB_BCM_PATCHRAM | BTUSB_BCM_NO_PRODID },
-
/* Intel Bluetooth USB Bootloader (RAM module) */
{ USB_DEVICE(0x8087, 0x0a5a),
.driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
@@ -272,6 +267,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
@@ -401,6 +397,7 @@ struct btusb_data {
struct usb_interface *intf;
struct usb_interface *isoc;
struct usb_interface *diag;
+ unsigned isoc_ifnum;
unsigned long flags;
@@ -647,7 +644,7 @@ static void btusb_intr_complete(struct urb *urb)
if (btusb_recv_intr(data, urb->transfer_buffer,
urb->actual_length) < 0) {
- BT_ERR("%s corrupted event packet", hdev->name);
+ bt_dev_err(hdev, "corrupted event packet");
hdev->stat.err_rx++;
}
} else if (urb->status == -ENOENT) {
@@ -667,8 +664,8 @@ static void btusb_intr_complete(struct urb *urb)
* -ENODEV: device got disconnected
*/
if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
+ urb, -err);
usb_unanchor_urb(urb);
}
}
@@ -710,8 +707,8 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
usb_unanchor_urb(urb);
}
@@ -737,7 +734,7 @@ static void btusb_bulk_complete(struct urb *urb)
if (data->recv_bulk(data, urb->transfer_buffer,
urb->actual_length) < 0) {
- BT_ERR("%s corrupted ACL packet", hdev->name);
+ bt_dev_err(hdev, "corrupted ACL packet");
hdev->stat.err_rx++;
}
} else if (urb->status == -ENOENT) {
@@ -757,8 +754,8 @@ static void btusb_bulk_complete(struct urb *urb)
* -ENODEV: device got disconnected
*/
if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
+ urb, -err);
usb_unanchor_urb(urb);
}
}
@@ -799,8 +796,8 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
usb_unanchor_urb(urb);
}
@@ -833,7 +830,7 @@ static void btusb_isoc_complete(struct urb *urb)
if (btusb_recv_isoc(data, urb->transfer_buffer + offset,
length) < 0) {
- BT_ERR("%s corrupted SCO packet", hdev->name);
+ bt_dev_err(hdev, "corrupted SCO packet");
hdev->stat.err_rx++;
}
}
@@ -853,8 +850,8 @@ static void btusb_isoc_complete(struct urb *urb)
* -ENODEV: device got disconnected
*/
if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
+ urb, -err);
usb_unanchor_urb(urb);
}
}
@@ -921,8 +918,8 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
usb_unanchor_urb(urb);
}
@@ -966,8 +963,8 @@ static void btusb_diag_complete(struct urb *urb)
* -ENODEV: device got disconnected
*/
if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p failed to resubmit (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
+ urb, -err);
usb_unanchor_urb(urb);
}
}
@@ -1008,8 +1005,8 @@ static int btusb_submit_diag_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
usb_unanchor_urb(urb);
}
@@ -1270,8 +1267,8 @@ static int submit_tx_urb(struct hci_dev *hdev, struct urb *urb)
err = usb_submit_urb(urb, GFP_KERNEL);
if (err < 0) {
if (err != -EPERM && err != -ENODEV)
- BT_ERR("%s urb %p submission failed (%d)",
- hdev->name, urb, -err);
+ bt_dev_err(hdev, "urb %p submission failed (%d)",
+ urb, -err);
kfree(urb->setup_packet);
usb_unanchor_urb(urb);
} else {
@@ -1364,9 +1361,9 @@ static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
if (!data->isoc)
return -ENODEV;
- err = usb_set_interface(data->udev, 1, altsetting);
+ err = usb_set_interface(data->udev, data->isoc_ifnum, altsetting);
if (err < 0) {
- BT_ERR("%s setting interface failed (%d)", hdev->name, -err);
+ bt_dev_err(hdev, "setting interface failed (%d)", -err);
return err;
}
@@ -1390,7 +1387,7 @@ static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting)
}
if (!data->isoc_tx_ep || !data->isoc_rx_ep) {
- BT_ERR("%s invalid SCO descriptors", hdev->name);
+ bt_dev_err(hdev, "invalid SCO descriptors");
return -ENODEV;
}
@@ -1485,7 +1482,7 @@ static int btusb_setup_bcm92035(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, 0xfc3b, 1, &val, HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
- BT_ERR("BCM92035 command failed (%ld)", -PTR_ERR(skb));
+ bt_dev_err(hdev, "BCM92035 command failed (%ld)", PTR_ERR(skb));
else
kfree_skb(skb);
@@ -1503,12 +1500,12 @@ static int btusb_setup_csr(struct hci_dev *hdev)
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
int err = PTR_ERR(skb);
- BT_ERR("%s: CSR: Local version failed (%d)", hdev->name, err);
+ bt_dev_err(hdev, "CSR: Local version failed (%d)", err);
return err;
}
if (skb->len != sizeof(struct hci_rp_read_local_version)) {
- BT_ERR("%s: CSR: Local version length mismatch", hdev->name);
+ bt_dev_err(hdev, "CSR: Local version length mismatch");
kfree_skb(skb);
return -EIO;
}
@@ -1570,7 +1567,7 @@ static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
}
}
- BT_INFO("%s: Intel Bluetooth firmware file: %s", hdev->name, fwname);
+ bt_dev_info(hdev, "Intel Bluetooth firmware file: %s", fwname);
return fw;
}
@@ -1726,18 +1723,18 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (err)
return err;
- BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x",
- hdev->name, ver.hw_platform, ver.hw_variant, ver.hw_revision,
- ver.fw_variant, ver.fw_revision, ver.fw_build_num,
- ver.fw_build_ww, ver.fw_build_yy, ver.fw_patch_num);
+ bt_dev_info(hdev, "read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x",
+ ver.hw_platform, ver.hw_variant, ver.hw_revision,
+ ver.fw_variant, ver.fw_revision, ver.fw_build_num,
+ ver.fw_build_ww, ver.fw_build_yy, ver.fw_patch_num);
/* fw_patch_num indicates the version of patch the device currently
* have. If there is no patch data in the device, it is always 0x00.
* So, if it is other than 0x00, no need to patch the device again.
*/
if (ver.fw_patch_num) {
- BT_INFO("%s: Intel device is already patched. patch num: %02x",
- hdev->name, ver.fw_patch_num);
+ bt_dev_info(hdev, "Intel device is already patched. "
+ "patch num: %02x", ver.fw_patch_num);
goto complete;
}
@@ -1805,8 +1802,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (err)
return err;
- BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
- hdev->name);
+ bt_dev_info(hdev, "Intel firmware patch completed and activated");
goto complete;
@@ -1816,7 +1812,7 @@ exit_mfg_disable:
if (err)
return err;
- BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
+ bt_dev_info(hdev, "Intel firmware patch completed");
goto complete;
@@ -1830,8 +1826,7 @@ exit_mfg_deactivate:
if (err)
return err;
- BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
- hdev->name);
+ bt_dev_info(hdev, "Intel firmware patch completed and deactivated");
complete:
/* Set the event mask for Intel specific vendor events. This enables
@@ -2112,24 +2107,24 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
params = (struct intel_boot_params *)skb->data;
- BT_INFO("%s: Device revision is %u", hdev->name,
- le16_to_cpu(params->dev_revid));
+ bt_dev_info(hdev, "Device revision is %u",
+ le16_to_cpu(params->dev_revid));
- BT_INFO("%s: Secure boot is %s", hdev->name,
- params->secure_boot ? "enabled" : "disabled");
+ bt_dev_info(hdev, "Secure boot is %s",
+ params->secure_boot ? "enabled" : "disabled");
- BT_INFO("%s: OTP lock is %s", hdev->name,
- params->otp_lock ? "enabled" : "disabled");
+ bt_dev_info(hdev, "OTP lock is %s",
+ params->otp_lock ? "enabled" : "disabled");
- BT_INFO("%s: API lock is %s", hdev->name,
- params->api_lock ? "enabled" : "disabled");
+ bt_dev_info(hdev, "API lock is %s",
+ params->api_lock ? "enabled" : "disabled");
- BT_INFO("%s: Debug lock is %s", hdev->name,
- params->debug_lock ? "enabled" : "disabled");
+ bt_dev_info(hdev, "Debug lock is %s",
+ params->debug_lock ? "enabled" : "disabled");
- BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
- params->min_fw_build_nn, params->min_fw_build_cw,
- 2000 + params->min_fw_build_yy);
+ bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
+ params->min_fw_build_nn, params->min_fw_build_cw,
+ 2000 + params->min_fw_build_yy);
/* It is required that every single firmware fragment is acknowledged
* with a command complete event. If the boot parameters indicate
@@ -2146,24 +2141,49 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* also be no valid address for the operational firmware.
*/
if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
- BT_INFO("%s: No device address configured", hdev->name);
+ bt_dev_info(hdev, "No device address configured");
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
/* With this Intel bootloader only the hardware variant and device
- * revision information are used to select the right firmware.
+ * revision information are used to select the right firmware for SfP
+ * and WsP.
*
* The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi.
*
* Currently the supported hardware variants are:
* 11 (0x0b) for iBT3.0 (LnP/SfP)
* 12 (0x0c) for iBT3.5 (WsP)
+ *
+ * For ThP/JfP and for future SKU's, the FW name varies based on HW
+ * variant, HW revision and FW revision, as these are dependent on CNVi
+ * and RF Combination.
+ *
* 17 (0x11) for iBT3.5 (JfP)
* 18 (0x12) for iBT3.5 (ThP)
+ *
+ * The firmware file name for these will be
+ * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
+ *
*/
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ switch (ver.hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(params->dev_revid));
+ break;
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+ le16_to_cpu(ver.fw_revision));
+ break;
+ default:
+ BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ return -EINVAL;
+ }
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
@@ -2173,14 +2193,29 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return err;
}
- BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+ bt_dev_info(hdev, "Found device firmware: %s", fwname);
/* Save the DDC file name for later use to apply once the firmware
* downloading is done.
*/
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ switch (ver.hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(params->dev_revid));
+ break;
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+ le16_to_cpu(ver.fw_revision));
+ break;
+ default:
+ BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ return -EINVAL;
+ }
kfree_skb(skb);
@@ -2254,7 +2289,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
- BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+ bt_dev_info(hdev, "Waiting for firmware download to complete");
/* Before switching the device into operational mode and with that
* booting the loaded firmware, wait for the bootloader notification
@@ -2291,7 +2326,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+ bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
done:
release_firmware(fw);
@@ -2317,7 +2352,7 @@ done:
* 1 second. However if that happens, then just fail the setup
* since something went wrong.
*/
- BT_INFO("%s: Waiting for device to boot", hdev->name);
+ bt_dev_info(hdev, "Waiting for device to boot");
err = wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
TASK_INTERRUPTIBLE,
@@ -2337,7 +2372,7 @@ done:
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+ bt_dev_info(hdev, "Device booted in %llu usecs", duration);
clear_bit(BTUSB_BOOTLOADER, &data->flags);
@@ -2440,8 +2475,8 @@ static int btusb_set_bdaddr_marvell(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, 0xfc22, sizeof(buf), buf, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
- BT_ERR("%s: changing Marvell device address failed (%ld)",
- hdev->name, ret);
+ bt_dev_err(hdev, "changing Marvell device address failed (%ld)",
+ ret);
return ret;
}
kfree_skb(skb);
@@ -2465,8 +2500,7 @@ static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
- BT_ERR("%s: Change address command failed (%ld)",
- hdev->name, ret);
+ bt_dev_err(hdev, "Change address command failed (%ld)", ret);
return ret;
}
kfree_skb(skb);
@@ -2532,7 +2566,7 @@ static int btusb_qca_send_vendor_req(struct hci_dev *hdev, u8 request,
err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN,
0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
if (err < 0) {
- BT_ERR("%s: Failed to access otp area (%d)", hdev->name, err);
+ bt_dev_err(hdev, "Failed to access otp area (%d)", err);
goto done;
}
@@ -2572,7 +2606,7 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
err = usb_control_msg(udev, pipe, QCA_DFU_DOWNLOAD, USB_TYPE_VENDOR,
0, 0, buf, size, USB_CTRL_SET_TIMEOUT);
if (err < 0) {
- BT_ERR("%s: Failed to send headers (%d)", hdev->name, err);
+ bt_dev_err(hdev, "Failed to send headers (%d)", err);
goto done;
}
@@ -2588,13 +2622,13 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
err = usb_bulk_msg(udev, pipe, buf, size, &len,
QCA_DFU_TIMEOUT);
if (err < 0) {
- BT_ERR("%s: Failed to send body at %zd of %zd (%d)",
- hdev->name, sent, firmware->size, err);
+ bt_dev_err(hdev, "Failed to send body at %zd of %zd (%d)",
+ sent, firmware->size, err);
break;
}
if (size != len) {
- BT_ERR("%s: Failed to get bulk buffer", hdev->name);
+ bt_dev_err(hdev, "Failed to get bulk buffer");
err = -EILSEQ;
break;
}
@@ -2626,24 +2660,23 @@ static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
- BT_ERR("%s: failed to request rampatch file: %s (%d)",
- hdev->name, fwname, err);
+ bt_dev_err(hdev, "failed to request rampatch file: %s (%d)",
+ fwname, err);
return err;
}
- BT_INFO("%s: using rampatch file: %s", hdev->name, fwname);
+ bt_dev_info(hdev, "using rampatch file: %s", fwname);
rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset);
rver_rom = le16_to_cpu(rver->rom_version);
rver_patch = le16_to_cpu(rver->patch_version);
- BT_INFO("%s: QCA: patch rome 0x%x build 0x%x, firmware rome 0x%x "
- "build 0x%x", hdev->name, rver_rom, rver_patch, ver_rom,
- ver_patch);
+ bt_dev_info(hdev, "QCA: patch rome 0x%x build 0x%x, "
+ "firmware rome 0x%x build 0x%x",
+ rver_rom, rver_patch, ver_rom, ver_patch);
if (rver_rom != ver_rom || rver_patch <= ver_patch) {
- BT_ERR("%s: rampatch file version did not match with firmware",
- hdev->name);
+ bt_dev_err(hdev, "rampatch file version did not match with firmware");
err = -EINVAL;
goto done;
}
@@ -2669,12 +2702,12 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
err = request_firmware(&fw, fwname, &hdev->dev);
if (err) {
- BT_ERR("%s: failed to request NVM file: %s (%d)",
- hdev->name, fwname, err);
+ bt_dev_err(hdev, "failed to request NVM file: %s (%d)",
+ fwname, err);
return err;
}
- BT_INFO("%s: using NVM file: %s", hdev->name, fwname);
+ bt_dev_info(hdev, "using NVM file: %s", fwname);
err = btusb_setup_qca_download_fw(hdev, fw, info->nvm_hdr);
@@ -2702,8 +2735,7 @@ static int btusb_setup_qca(struct hci_dev *hdev)
info = &qca_devices_table[i];
}
if (!info) {
- BT_ERR("%s: don't support firmware rome 0x%x", hdev->name,
- ver_rom);
+ bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom);
return -ENODEV;
}
@@ -2757,7 +2789,7 @@ static inline int __set_diag_interface(struct hci_dev *hdev)
}
if (!data->diag_tx_ep || !data->diag_rx_ep) {
- BT_ERR("%s invalid diagnostic descriptors", hdev->name);
+ bt_dev_err(hdev, "invalid diagnostic descriptors");
return -ENODEV;
}
@@ -2909,19 +2941,6 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info == BTUSB_IGNORE)
return -ENODEV;
- if (id->driver_info & BTUSB_BCM_NO_PRODID) {
- struct usb_device *udev = interface_to_usbdev(intf);
-
- /* For the broken Broadcom devices that show 0000:0000
- * as USB vendor and product information, check that the
- * manufacturer string identifies them as Broadcom based
- * devices.
- */
- if (!udev->manufacturer ||
- strcmp(udev->manufacturer, "Broadcom Corp"))
- return -ENODEV;
- }
-
if (id->driver_info & BTUSB_ATH3012) {
struct usb_device *udev = interface_to_usbdev(intf);
@@ -3124,6 +3143,7 @@ static int btusb_probe(struct usb_interface *intf,
} else {
/* Interface orders are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1);
+ data->isoc_ifnum = ifnum_base + 1;
}
if (!reset)
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 0ccf6bf01ed4..14ae7ee88acb 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -50,6 +50,17 @@ struct ath_struct {
struct work_struct ctxtsw;
};
+#define OP_WRITE_TAG 0x01
+
+#define INDEX_BDADDR 0x01
+
+struct ath_vendor_cmd {
+ __u8 opcode;
+ __le16 index;
+ __u8 len;
+ __u8 data[251];
+} __packed;
+
static int ath_wakeup_ar3k(struct tty_struct *tty)
{
int status = tty->driver->ops->tiocmget(tty);
@@ -144,30 +155,34 @@ static int ath_flush(struct hci_uart *hu)
return 0;
}
-static int ath_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+static int ath_vendor_cmd(struct hci_dev *hdev, uint8_t opcode, uint16_t index,
+ const void *data, size_t dlen)
{
struct sk_buff *skb;
- u8 buf[10];
- int err;
-
- buf[0] = 0x01;
- buf[1] = 0x01;
- buf[2] = 0x00;
- buf[3] = sizeof(bdaddr_t);
- memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
-
- skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- BT_ERR("%s: Change address command failed (%d)",
- hdev->name, err);
- return err;
- }
+ struct ath_vendor_cmd cmd;
+
+ if (dlen > sizeof(cmd.data))
+ return -EINVAL;
+
+ cmd.opcode = opcode;
+ cmd.index = cpu_to_le16(index);
+ cmd.len = dlen;
+ memcpy(cmd.data, data, dlen);
+
+ skb = __hci_cmd_sync(hdev, 0xfc0b, dlen + 4, &cmd, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
kfree_skb(skb);
return 0;
}
+static int ath_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ return ath_vendor_cmd(hdev, OP_WRITE_TAG, INDEX_BDADDR, bdaddr,
+ sizeof(*bdaddr));
+}
+
static int ath_setup(struct hci_uart *hu)
{
BT_DBG("hu %p", hu);
@@ -191,7 +206,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
int err = PTR_ERR(ath->rx_skb);
- BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
ath->rx_skb = NULL;
return err;
}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index e2540113d0da..707c2d1b84c7 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -52,11 +52,13 @@
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
-/* platform device driver resources */
+/* device driver resources */
struct bcm_device {
+ /* Must be the first member, hci_serdev.c expects this. */
+ struct hci_uart serdev_hu;
struct list_head list;
- struct platform_device *pdev;
+ struct device *dev;
const char *name;
struct gpio_desc *device_wakeup;
@@ -68,7 +70,7 @@ struct bcm_device {
u32 init_speed;
u32 oper_speed;
int irq;
- u8 irq_polarity;
+ bool irq_active_low;
#ifdef CONFIG_PM
struct hci_uart *hu;
@@ -76,11 +78,6 @@ struct bcm_device {
#endif
};
-/* serdev driver resources */
-struct bcm_serdev {
- struct hci_uart hu;
-};
-
/* generic bcm uart resources */
struct bcm_data {
struct sk_buff *rx_skb;
@@ -155,6 +152,12 @@ static bool bcm_device_exists(struct bcm_device *device)
{
struct list_head *p;
+#ifdef CONFIG_PM
+ /* Devices using serdev always exist */
+ if (device && device->hu && device->hu->serdev)
+ return true;
+#endif
+
list_for_each(p, &bcm_device_list) {
struct bcm_device *dev = list_entry(p, struct bcm_device, list);
@@ -188,9 +191,9 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
bt_dev_dbg(bdev, "Host wake IRQ");
- pm_runtime_get(&bdev->pdev->dev);
- pm_runtime_mark_last_busy(&bdev->pdev->dev);
- pm_runtime_put_autosuspend(&bdev->pdev->dev);
+ pm_runtime_get(bdev->dev);
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
return IRQ_HANDLED;
}
@@ -200,7 +203,6 @@ static int bcm_request_irq(struct bcm_data *bcm)
struct bcm_device *bdev = bcm->dev;
int err;
- /* If this is not a platform device, do not enable PM functionalities */
mutex_lock(&bcm_device_lock);
if (!bcm_device_exists(bdev)) {
err = -ENODEV;
@@ -212,18 +214,20 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
- err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake,
- IRQF_TRIGGER_RISING, "host_wake", bdev);
+ err = devm_request_irq(bdev->dev, bdev->irq, bcm_host_wake,
+ bdev->irq_active_low ? IRQF_TRIGGER_FALLING :
+ IRQF_TRIGGER_RISING,
+ "host_wake", bdev);
if (err)
goto unlock;
- device_init_wakeup(&bdev->pdev->dev, true);
+ device_init_wakeup(bdev->dev, true);
- pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
+ pm_runtime_set_autosuspend_delay(bdev->dev,
BCM_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(&bdev->pdev->dev);
- pm_runtime_set_active(&bdev->pdev->dev);
- pm_runtime_enable(&bdev->pdev->dev);
+ pm_runtime_use_autosuspend(bdev->dev);
+ pm_runtime_set_active(bdev->dev);
+ pm_runtime_enable(bdev->dev);
unlock:
mutex_unlock(&bcm_device_lock);
@@ -253,7 +257,7 @@ static int bcm_setup_sleep(struct hci_uart *hu)
struct sk_buff *skb;
struct bcm_set_sleep_mode sleep_params = default_sleep_params;
- sleep_params.host_wake_active = !bcm->dev->irq_polarity;
+ sleep_params.host_wake_active = !bcm->dev->irq_active_low;
skb = __hci_cmd_sync(hu->hdev, 0xfc27, sizeof(sleep_params),
&sleep_params, HCI_INIT_TIMEOUT);
@@ -311,18 +315,17 @@ static int bcm_open(struct hci_uart *hu)
hu->priv = bcm;
- /* If this is a serdev defined device, then only use
- * serdev open primitive and skip the rest.
- */
+ mutex_lock(&bcm_device_lock);
+
if (hu->serdev) {
serdev_device_open(hu->serdev);
+ bcm->dev = serdev_device_get_drvdata(hu->serdev);
goto out;
}
if (!hu->tty->dev)
goto out;
- mutex_lock(&bcm_device_lock);
list_for_each(p, &bcm_device_list) {
struct bcm_device *dev = list_entry(p, struct bcm_device, list);
@@ -330,50 +333,56 @@ static int bcm_open(struct hci_uart *hu)
* platform device (saved during device probe) and
* parent of tty device used by hci_uart
*/
- if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+ if (hu->tty->dev->parent == dev->dev->parent) {
bcm->dev = dev;
- hu->init_speed = dev->init_speed;
- hu->oper_speed = dev->oper_speed;
#ifdef CONFIG_PM
dev->hu = hu;
#endif
- bcm_gpio_set_power(bcm->dev, true);
break;
}
}
- mutex_unlock(&bcm_device_lock);
out:
+ if (bcm->dev) {
+ hu->init_speed = bcm->dev->init_speed;
+ hu->oper_speed = bcm->dev->oper_speed;
+ bcm_gpio_set_power(bcm->dev, true);
+ }
+
+ mutex_unlock(&bcm_device_lock);
return 0;
}
static int bcm_close(struct hci_uart *hu)
{
struct bcm_data *bcm = hu->priv;
- struct bcm_device *bdev = bcm->dev;
+ struct bcm_device *bdev = NULL;
bt_dev_dbg(hu->hdev, "hu %p", hu);
- /* If this is a serdev defined device, only use serdev
- * close primitive and then continue as usual.
- */
- if (hu->serdev)
- serdev_device_close(hu->serdev);
-
/* Protect bcm->dev against removal of the device or driver */
mutex_lock(&bcm_device_lock);
- if (bcm_device_exists(bdev)) {
+
+ if (hu->serdev) {
+ serdev_device_close(hu->serdev);
+ bdev = serdev_device_get_drvdata(hu->serdev);
+ } else if (bcm_device_exists(bcm->dev)) {
+ bdev = bcm->dev;
+#ifdef CONFIG_PM
+ bdev->hu = NULL;
+#endif
+ }
+
+ if (bdev) {
bcm_gpio_set_power(bdev, false);
#ifdef CONFIG_PM
- pm_runtime_disable(&bdev->pdev->dev);
- pm_runtime_set_suspended(&bdev->pdev->dev);
+ pm_runtime_disable(bdev->dev);
+ pm_runtime_set_suspended(bdev->dev);
- if (device_can_wakeup(&bdev->pdev->dev)) {
- devm_free_irq(&bdev->pdev->dev, bdev->irq, bdev);
- device_init_wakeup(&bdev->pdev->dev, false);
+ if (device_can_wakeup(bdev->dev)) {
+ devm_free_irq(bdev->dev, bdev->irq, bdev);
+ device_init_wakeup(bdev->dev, false);
}
-
- bdev->hu = NULL;
#endif
}
mutex_unlock(&bcm_device_lock);
@@ -504,9 +513,9 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
/* Delay auto-suspend when receiving completed packet */
mutex_lock(&bcm_device_lock);
if (bcm->dev && bcm_device_exists(bcm->dev)) {
- pm_runtime_get(&bcm->dev->pdev->dev);
- pm_runtime_mark_last_busy(&bcm->dev->pdev->dev);
- pm_runtime_put_autosuspend(&bcm->dev->pdev->dev);
+ pm_runtime_get(bcm->dev->dev);
+ pm_runtime_mark_last_busy(bcm->dev->dev);
+ pm_runtime_put_autosuspend(bcm->dev->dev);
}
mutex_unlock(&bcm_device_lock);
}
@@ -537,15 +546,15 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
if (bcm_device_exists(bcm->dev)) {
bdev = bcm->dev;
- pm_runtime_get_sync(&bdev->pdev->dev);
+ pm_runtime_get_sync(bdev->dev);
/* Shall be resumed here */
}
skb = skb_dequeue(&bcm->txq);
if (bdev) {
- pm_runtime_mark_last_busy(&bdev->pdev->dev);
- pm_runtime_put_autosuspend(&bdev->pdev->dev);
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
}
mutex_unlock(&bcm_device_lock);
@@ -556,7 +565,7 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
#ifdef CONFIG_PM
static int bcm_suspend_device(struct device *dev)
{
- struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+ struct bcm_device *bdev = dev_get_drvdata(dev);
bt_dev_dbg(bdev, "");
@@ -579,7 +588,7 @@ static int bcm_suspend_device(struct device *dev)
static int bcm_resume_device(struct device *dev)
{
- struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+ struct bcm_device *bdev = dev_get_drvdata(dev);
bt_dev_dbg(bdev, "");
@@ -601,16 +610,18 @@ static int bcm_resume_device(struct device *dev)
#endif
#ifdef CONFIG_PM_SLEEP
-/* Platform suspend callback */
+/* suspend callback */
static int bcm_suspend(struct device *dev)
{
- struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+ struct bcm_device *bdev = dev_get_drvdata(dev);
int error;
bt_dev_dbg(bdev, "suspend: is_suspended %d", bdev->is_suspended);
- /* bcm_suspend can be called at any time as long as platform device is
- * bound, so it should use bcm_device_lock to protect access to hci_uart
+ /*
+ * When used with a device instantiated as platform_device, bcm_suspend
+ * can be called at any time as long as the platform device is bound,
+ * so it should use bcm_device_lock to protect access to hci_uart
* and device_wake-up GPIO.
*/
mutex_lock(&bcm_device_lock);
@@ -621,7 +632,7 @@ static int bcm_suspend(struct device *dev)
if (pm_runtime_active(dev))
bcm_suspend_device(dev);
- if (device_may_wakeup(&bdev->pdev->dev)) {
+ if (device_may_wakeup(dev)) {
error = enable_irq_wake(bdev->irq);
if (!error)
bt_dev_dbg(bdev, "BCM irq: enabled");
@@ -633,15 +644,17 @@ unlock:
return 0;
}
-/* Platform resume callback */
+/* resume callback */
static int bcm_resume(struct device *dev)
{
- struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+ struct bcm_device *bdev = dev_get_drvdata(dev);
bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
- /* bcm_resume can be called at any time as long as platform device is
- * bound, so it should use bcm_device_lock to protect access to hci_uart
+ /*
+ * When used with a device instantiated as platform_device, bcm_resume
+ * can be called at any time as long as platform device is bound,
+ * so it should use bcm_device_lock to protect access to hci_uart
* and device_wake-up GPIO.
*/
mutex_lock(&bcm_device_lock);
@@ -649,7 +662,7 @@ static int bcm_resume(struct device *dev)
if (!bdev->hu)
goto unlock;
- if (device_may_wakeup(&bdev->pdev->dev)) {
+ if (device_may_wakeup(dev)) {
disable_irq_wake(bdev->irq);
bt_dev_dbg(bdev, "BCM irq: disabled");
}
@@ -690,10 +703,8 @@ static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = {
};
#ifdef CONFIG_ACPI
-static u8 acpi_active_low = ACPI_ACTIVE_LOW;
-
/* IRQ polarity of some chipsets are not defined correctly in ACPI table. */
-static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
+static const struct dmi_system_id bcm_active_low_irq_dmi_table[] = {
{
.ident = "Asus T100TA",
.matches = {
@@ -701,7 +712,6 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
"ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
},
- .driver_data = &acpi_active_low,
},
{
.ident = "Asus T100CHI",
@@ -710,7 +720,6 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
"ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100CHI"),
},
- .driver_data = &acpi_active_low,
},
{ /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
.ident = "Lenovo ThinkPad 8",
@@ -718,7 +727,13 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
},
- .driver_data = &acpi_active_low,
+ },
+ {
+ .ident = "MINIX Z83-4",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MINIX"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
+ },
},
{ }
};
@@ -733,13 +748,13 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
irq = &ares->data.extended_irq;
- dev->irq_polarity = irq->polarity;
+ dev->irq_active_low = irq->polarity == ACPI_ACTIVE_LOW;
break;
case ACPI_RESOURCE_TYPE_GPIO:
gpio = &ares->data.gpio;
if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
- dev->irq_polarity = gpio->polarity;
+ dev->irq_active_low = gpio->polarity == ACPI_ACTIVE_LOW;
break;
case ACPI_RESOURCE_TYPE_SERIAL_BUS:
@@ -754,36 +769,32 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
break;
}
- /* Always tell the ACPI core to skip this resource */
- return 1;
+ return 0;
}
#endif /* CONFIG_ACPI */
-static int bcm_platform_probe(struct bcm_device *dev)
+static int bcm_get_resources(struct bcm_device *dev)
{
- struct platform_device *pdev = dev->pdev;
-
- dev->name = dev_name(&pdev->dev);
+ dev->name = dev_name(dev->dev);
- dev->clk = devm_clk_get(&pdev->dev, NULL);
+ dev->clk = devm_clk_get(dev->dev, NULL);
- dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev,
+ dev->device_wakeup = devm_gpiod_get_optional(dev->dev,
"device-wakeup",
GPIOD_OUT_LOW);
if (IS_ERR(dev->device_wakeup))
return PTR_ERR(dev->device_wakeup);
- dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown",
+ dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
GPIOD_OUT_LOW);
if (IS_ERR(dev->shutdown))
return PTR_ERR(dev->shutdown);
/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
- dev->irq = platform_get_irq(pdev, 0);
if (dev->irq <= 0) {
struct gpio_desc *gpio;
- gpio = devm_gpiod_get_optional(&pdev->dev, "host-wakeup",
+ gpio = devm_gpiod_get_optional(dev->dev, "host-wakeup",
GPIOD_IN);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
@@ -791,54 +802,48 @@ static int bcm_platform_probe(struct bcm_device *dev)
dev->irq = gpiod_to_irq(gpio);
}
- dev_info(&pdev->dev, "BCM irq: %d\n", dev->irq);
-
- /* Make sure at-least one of the GPIO is defined and that
- * a name is specified for this instance
- */
- if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) {
- dev_err(&pdev->dev, "invalid platform data\n");
- return -EINVAL;
- }
-
+ dev_info(dev->dev, "BCM irq: %d\n", dev->irq);
return 0;
}
#ifdef CONFIG_ACPI
static int bcm_acpi_probe(struct bcm_device *dev)
{
- struct platform_device *pdev = dev->pdev;
LIST_HEAD(resources);
const struct dmi_system_id *dmi_id;
const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios;
const struct acpi_device_id *id;
+ struct resource_entry *entry;
int ret;
/* Retrieve GPIO data */
- id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+ id = acpi_match_device(dev->dev->driver->acpi_match_table, dev->dev);
if (id)
gpio_mapping = (const struct acpi_gpio_mapping *) id->driver_data;
- ret = devm_acpi_dev_add_driver_gpios(&pdev->dev, gpio_mapping);
- if (ret)
- return ret;
-
- ret = bcm_platform_probe(dev);
+ ret = devm_acpi_dev_add_driver_gpios(dev->dev, gpio_mapping);
if (ret)
return ret;
/* Retrieve UART ACPI info */
- ret = acpi_dev_get_resources(ACPI_COMPANION(&dev->pdev->dev),
+ ret = acpi_dev_get_resources(ACPI_COMPANION(dev->dev),
&resources, bcm_resource, dev);
if (ret < 0)
return ret;
+
+ resource_list_for_each_entry(entry, &resources) {
+ if (resource_type(entry->res) == IORESOURCE_IRQ) {
+ dev->irq = entry->res->start;
+ break;
+ }
+ }
acpi_dev_free_resource_list(&resources);
- dmi_id = dmi_first_match(bcm_wrong_irq_dmi_table);
+ dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table);
if (dmi_id) {
- bt_dev_warn(dev, "%s: Overwriting IRQ polarity to active low",
+ dev_warn(dev->dev, "%s: Overwriting IRQ polarity to active low",
dmi_id->ident);
- dev->irq_polarity = *(u8 *)dmi_id->driver_data;
+ dev->irq_active_low = true;
}
return 0;
@@ -850,6 +855,12 @@ static int bcm_acpi_probe(struct bcm_device *dev)
}
#endif /* CONFIG_ACPI */
+static int bcm_of_probe(struct bcm_device *bdev)
+{
+ device_property_read_u32(bdev->dev, "max-speed", &bdev->oper_speed);
+ return 0;
+}
+
static int bcm_probe(struct platform_device *pdev)
{
struct bcm_device *dev;
@@ -859,12 +870,16 @@ static int bcm_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- dev->pdev = pdev;
+ dev->dev = &pdev->dev;
+ dev->irq = platform_get_irq(pdev, 0);
- if (has_acpi_companion(&pdev->dev))
+ if (has_acpi_companion(&pdev->dev)) {
ret = bcm_acpi_probe(dev);
- else
- ret = bcm_platform_probe(dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = bcm_get_resources(dev);
if (ret)
return ret;
@@ -926,14 +941,16 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
{ "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ { "BCM2E7E", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
{ "BCM2E95", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
{ "BCM2E96", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
+ { "BCM2EA4", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
{ },
};
MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
#endif
-/* Platform suspend and resume callbacks */
+/* suspend and resume callbacks */
static const struct dev_pm_ops bcm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bcm_suspend, bcm_resume)
SET_RUNTIME_PM_OPS(bcm_suspend_device, bcm_resume_device, NULL)
@@ -951,29 +968,41 @@ static struct platform_driver bcm_driver = {
static int bcm_serdev_probe(struct serdev_device *serdev)
{
- struct bcm_serdev *bcmdev;
- u32 speed;
+ struct bcm_device *bcmdev;
int err;
bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL);
if (!bcmdev)
return -ENOMEM;
- bcmdev->hu.serdev = serdev;
+ bcmdev->dev = &serdev->dev;
+#ifdef CONFIG_PM
+ bcmdev->hu = &bcmdev->serdev_hu;
+#endif
+ bcmdev->serdev_hu.serdev = serdev;
serdev_device_set_drvdata(serdev, bcmdev);
- err = device_property_read_u32(&serdev->dev, "max-speed", &speed);
- if (!err)
- bcmdev->hu.oper_speed = speed;
+ if (has_acpi_companion(&serdev->dev))
+ err = bcm_acpi_probe(bcmdev);
+ else
+ err = bcm_of_probe(bcmdev);
+ if (err)
+ return err;
+
+ err = bcm_get_resources(bcmdev);
+ if (err)
+ return err;
+
+ bcm_gpio_set_power(bcmdev, false);
- return hci_uart_register_device(&bcmdev->hu, &bcm_proto);
+ return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
}
static void bcm_serdev_remove(struct serdev_device *serdev)
{
- struct bcm_serdev *bcmdev = serdev_device_get_drvdata(serdev);
+ struct bcm_device *bcmdev = serdev_device_get_drvdata(serdev);
- hci_uart_unregister_device(&bcmdev->hu);
+ hci_uart_unregister_device(&bcmdev->serdev_hu);
}
#ifdef CONFIG_OF
@@ -990,6 +1019,8 @@ static struct serdev_device_driver bcm_serdev_driver = {
.driver = {
.name = "hci_uart_bcm",
.of_match_table = of_match_ptr(bcm_bluetooth_of_match),
+ .acpi_match_table = ACPI_PTR(bcm_acpi_match),
+ .pm = &bcm_pm_ops,
},
};
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index d880f4e33c75..1a7f0c82fb36 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -65,6 +65,7 @@ struct bcsp_struct {
u8 rxseq_txack; /* rxseq == txack. */
u8 rxack; /* Last packet sent by us that the peer ack'ed */
struct timer_list tbcsp;
+ struct hci_uart *hu;
enum {
BCSP_W4_PKT_DELIMITER,
@@ -697,10 +698,10 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
}
/* Arrange to retransmit all messages in the relq. */
-static void bcsp_timed_event(unsigned long arg)
+static void bcsp_timed_event(struct timer_list *t)
{
- struct hci_uart *hu = (struct hci_uart *)arg;
- struct bcsp_struct *bcsp = hu->priv;
+ struct bcsp_struct *bcsp = from_timer(bcsp, t, tbcsp);
+ struct hci_uart *hu = bcsp->hu;
struct sk_buff *skb;
unsigned long flags;
@@ -729,11 +730,12 @@ static int bcsp_open(struct hci_uart *hu)
return -ENOMEM;
hu->priv = bcsp;
+ bcsp->hu = hu;
skb_queue_head_init(&bcsp->unack);
skb_queue_head_init(&bcsp->rel);
skb_queue_head_init(&bcsp->unrel);
- setup_timer(&bcsp->tbcsp, bcsp_timed_event, (u_long)hu);
+ timer_setup(&bcsp->tbcsp, bcsp_timed_event, 0);
bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 3b82a87224a9..fb97a3bf069b 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -132,7 +132,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
if (IS_ERR(h4->rx_skb)) {
int err = PTR_ERR(h4->rx_skb);
- BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
h4->rx_skb = NULL;
return err;
}
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index c0e4e26dc30d..6a8d0d06aba7 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -78,6 +78,7 @@ struct h5 {
int (*rx_func)(struct hci_uart *hu, u8 c);
struct timer_list timer; /* Retransmission timer */
+ struct hci_uart *hu; /* Parent HCI UART */
u8 tx_seq; /* Next seq number to send */
u8 tx_ack; /* Next ack number to send */
@@ -120,12 +121,12 @@ static u8 h5_cfg_field(struct h5 *h5)
return h5->tx_win & 0x07;
}
-static void h5_timed_event(unsigned long arg)
+static void h5_timed_event(struct timer_list *t)
{
const unsigned char sync_req[] = { 0x01, 0x7e };
unsigned char conf_req[3] = { 0x03, 0xfc };
- struct hci_uart *hu = (struct hci_uart *)arg;
- struct h5 *h5 = hu->priv;
+ struct h5 *h5 = from_timer(h5, t, timer);
+ struct hci_uart *hu = h5->hu;
struct sk_buff *skb;
unsigned long flags;
@@ -197,6 +198,7 @@ static int h5_open(struct hci_uart *hu)
return -ENOMEM;
hu->priv = h5;
+ h5->hu = hu;
skb_queue_head_init(&h5->unack);
skb_queue_head_init(&h5->rel);
@@ -204,7 +206,7 @@ static int h5_open(struct hci_uart *hu)
h5_reset_rx(h5);
- setup_timer(&h5->timer, h5_timed_event, (unsigned long)hu);
+ timer_setup(&h5->timer, h5_timed_event, 0);
h5->tx_win = H5_TX_WIN_MAX;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index a746627e784e..c823914b3a80 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -41,6 +41,7 @@
#include <linux/ioctl.h>
#include <linux/skbuff.h>
#include <linux/firmware.h>
+#include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -114,12 +115,12 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
struct sk_buff *skb = hu->tx_skb;
if (!skb) {
- read_lock(&hu->proto_lock);
+ percpu_down_read(&hu->proto_lock);
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
skb = hu->proto->dequeue(hu);
- read_unlock(&hu->proto_lock);
+ percpu_up_read(&hu->proto_lock);
} else {
hu->tx_skb = NULL;
}
@@ -129,7 +130,14 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
int hci_uart_tx_wakeup(struct hci_uart *hu)
{
- read_lock(&hu->proto_lock);
+ /* This may be called in an IRQ context, so we can't sleep. Therefore
+ * we try to acquire the lock only, and if that fails we assume the
+ * tty is being closed because that is the only time the write lock is
+ * acquired. If, however, at some point in the future the write lock
+ * is also acquired in other situations, then this must be revisited.
+ */
+ if (!percpu_down_read_trylock(&hu->proto_lock))
+ return 0;
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
goto no_schedule;
@@ -144,7 +152,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
schedule_work(&hu->write_work);
no_schedule:
- read_unlock(&hu->proto_lock);
+ percpu_up_read(&hu->proto_lock);
return 0;
}
@@ -246,12 +254,12 @@ static int hci_uart_flush(struct hci_dev *hdev)
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
- read_lock(&hu->proto_lock);
+ percpu_down_read(&hu->proto_lock);
if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
hu->proto->flush(hu);
- read_unlock(&hu->proto_lock);
+ percpu_up_read(&hu->proto_lock);
return 0;
}
@@ -274,15 +282,15 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
skb->len);
- read_lock(&hu->proto_lock);
+ percpu_down_read(&hu->proto_lock);
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
- read_unlock(&hu->proto_lock);
+ percpu_up_read(&hu->proto_lock);
return -EUNATCH;
}
hu->proto->enqueue(hu, skb);
- read_unlock(&hu->proto_lock);
+ percpu_up_read(&hu->proto_lock);
hci_uart_tx_wakeup(hu);
@@ -298,6 +306,12 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
unsigned int set = 0;
unsigned int clear = 0;
+ if (hu->serdev) {
+ serdev_device_set_flow_control(hu->serdev, !enable);
+ serdev_device_set_rts(hu->serdev, !enable);
+ return;
+ }
+
if (enable) {
/* Disable hardware flow control */
ktermios = tty->termios;
@@ -479,7 +493,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
- rwlock_init(&hu->proto_lock);
+ percpu_init_rwsem(&hu->proto_lock);
/* Flush any pending characters in the driver */
tty_driver_flush_buffer(tty);
@@ -496,7 +510,6 @@ static void hci_uart_tty_close(struct tty_struct *tty)
{
struct hci_uart *hu = tty->disc_data;
struct hci_dev *hdev;
- unsigned long flags;
BT_DBG("tty %p", tty);
@@ -510,12 +523,12 @@ static void hci_uart_tty_close(struct tty_struct *tty)
if (hdev)
hci_uart_close(hdev);
- cancel_work_sync(&hu->write_work);
-
if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
- write_lock_irqsave(&hu->proto_lock, flags);
+ percpu_down_write(&hu->proto_lock);
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
- write_unlock_irqrestore(&hu->proto_lock, flags);
+ percpu_up_write(&hu->proto_lock);
+
+ cancel_work_sync(&hu->write_work);
if (hdev) {
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
@@ -575,10 +588,10 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
if (!hu || tty != hu->tty)
return;
- read_lock(&hu->proto_lock);
+ percpu_down_read(&hu->proto_lock);
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
- read_unlock(&hu->proto_lock);
+ percpu_up_read(&hu->proto_lock);
return;
}
@@ -586,7 +599,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
* tty caller
*/
hu->proto->recv(hu, data, count);
- read_unlock(&hu->proto_lock);
+ percpu_up_read(&hu->proto_lock);
if (hu->hdev)
hu->hdev->stat.byte_rx += count;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 424c15aa7bb7..e2c078d61730 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -242,7 +242,7 @@ static void ll_device_want_to_wakeup(struct hci_uart *hu)
* perfectly safe to always send one.
*/
BT_DBG("dual wake-up-indication");
- /* deliberate fall-through - do not add break */
+ /* fall through */
case HCILL_ASLEEP:
/* acknowledge device wake up */
if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 392f412b4575..bbd7db7384e6 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -307,10 +307,10 @@ static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
}
-static void hci_ibs_tx_idle_timeout(unsigned long arg)
+static void hci_ibs_tx_idle_timeout(struct timer_list *t)
{
- struct hci_uart *hu = (struct hci_uart *)arg;
- struct qca_data *qca = hu->priv;
+ struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
+ struct hci_uart *hu = qca->hu;
unsigned long flags;
BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
@@ -342,10 +342,10 @@ static void hci_ibs_tx_idle_timeout(unsigned long arg)
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
}
-static void hci_ibs_wake_retrans_timeout(unsigned long arg)
+static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
{
- struct hci_uart *hu = (struct hci_uart *)arg;
- struct qca_data *qca = hu->priv;
+ struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
+ struct hci_uart *hu = qca->hu;
unsigned long flags, retrans_delay;
bool retransmit = false;
@@ -438,11 +438,10 @@ static int qca_open(struct hci_uart *hu)
hu->priv = qca;
- setup_timer(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout,
- (u_long)hu);
+ timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
- setup_timer(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, (u_long)hu);
+ timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
@@ -801,7 +800,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
if (IS_ERR(qca->rx_skb)) {
int err = PTR_ERR(qca->rx_skb);
- BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
qca->rx_skb = NULL;
return err;
}
@@ -864,7 +863,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
if (!skb) {
- BT_ERR("Failed to allocate memory for baudrate packet");
+ bt_dev_err(hdev, "Failed to allocate baudrate packet");
return -ENOMEM;
}
@@ -893,7 +892,7 @@ static int qca_setup(struct hci_uart *hu)
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
int ret;
- BT_INFO("%s: ROME setup", hdev->name);
+ bt_dev_info(hdev, "ROME setup");
/* Patch downloading has to be done without IBS mode */
clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
@@ -918,11 +917,11 @@ static int qca_setup(struct hci_uart *hu)
if (speed) {
qca_baudrate = qca_get_baudrate_value(speed);
- BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
+ bt_dev_info(hdev, "Set UART speed to %d", speed);
ret = qca_set_baudrate(hdev, qca_baudrate);
if (ret) {
- BT_ERR("%s: Failed to change the baud rate (%d)",
- hdev->name, ret);
+ bt_dev_err(hdev, "Failed to change the baud rate (%d)",
+ ret);
return ret;
}
hci_uart_set_baudrate(hu, speed);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index b725ac4f7ff6..71664b22ec9d 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -185,7 +185,7 @@ static int hci_uart_setup(struct hci_dev *hdev)
if (hu->proto->set_baudrate && speed) {
err = hu->proto->set_baudrate(hu, speed);
if (err)
- BT_ERR("%s: failed to set baudrate", hdev->name);
+ bt_dev_err(hdev, "Failed to set baudrate");
else
serdev_device_set_baudrate(hu->serdev, speed);
}
@@ -199,14 +199,13 @@ static int hci_uart_setup(struct hci_dev *hdev)
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
- BT_ERR("%s: Reading local version information failed (%ld)",
- hdev->name, PTR_ERR(skb));
+ bt_dev_err(hdev, "Reading local version info failed (%ld)",
+ PTR_ERR(skb));
return 0;
}
if (skb->len != sizeof(*ver)) {
- BT_ERR("%s: Event length mismatch for version information",
- hdev->name);
+ bt_dev_err(hdev, "Event length mismatch for version info");
}
kfree_skb(skb);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index d9cd95d81149..66e8c68e4607 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -87,7 +87,7 @@ struct hci_uart {
struct work_struct write_work;
const struct hci_uart_proto *proto;
- rwlock_t proto_lock; /* Stop work for proto close */
+ struct percpu_rw_semaphore proto_lock; /* Stop work for proto close */
void *priv;
struct sk_buff *tx_skb;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index ae3d8f3444b9..dc7b3c7b7d42 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Bus Devices
#
@@ -157,6 +158,21 @@ config TEGRA_GMI
Driver for the Tegra Generic Memory Interface bus which can be used
to attach devices such as NOR, UART, FPGA and more.
+config TI_SYSC
+ bool "TI sysc interconnect target module driver"
+ depends on ARCH_OMAP2PLUS
+ help
+ Generic driver for Texas Instruments interconnect target module
+ found on many TI SoCs.
+
+config TS_NBUS
+ tristate "Technologic Systems NBUS Driver"
+ depends on SOC_IMX28
+ depends on OF_GPIO && PWM
+ help
+ Driver for the Technologic Systems NBUS which is used to interface
+ with the peripherals in the FPGA of the TS-4600 SoM.
+
config UNIPHIER_SYSTEM_BUS
tristate "UniPhier System Bus driver"
depends on ARCH_UNIPHIER && OF
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index cc6364bec054..9bcd0bf3954b 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the bus drivers.
#
@@ -20,6 +21,8 @@ obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
+obj-$(CONFIG_TI_SYSC) += ti-sysc.o
+obj-$(CONFIG_TS_NBUS) += ts-nbus.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index e8c6946fed9d..3063f5312397 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1276,6 +1276,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Perf driver registration */
ccn->dt.pmu = (struct pmu) {
+ .module = THIS_MODULE,
.attr_groups = arm_ccn_pmu_attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = arm_ccn_pmu_event_init,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
new file mode 100644
index 000000000000..c3c76a1ea8a8
--- /dev/null
+++ b/drivers/bus/ti-sysc.c
@@ -0,0 +1,583 @@
+/*
+ * ti-sysc.c - Texas Instruments sysc interconnect target driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+enum sysc_registers {
+ SYSC_REVISION,
+ SYSC_SYSCONFIG,
+ SYSC_SYSSTATUS,
+ SYSC_MAX_REGS,
+};
+
+static const char * const reg_names[] = { "rev", "sysc", "syss", };
+
+enum sysc_clocks {
+ SYSC_FCK,
+ SYSC_ICK,
+ SYSC_MAX_CLOCKS,
+};
+
+static const char * const clock_names[] = { "fck", "ick", };
+
+/**
+ * struct sysc - TI sysc interconnect target module registers and capabilities
+ * @dev: struct device pointer
+ * @module_pa: physical address of the interconnect target module
+ * @module_size: size of the interconnect target module
+ * @module_va: virtual address of the interconnect target module
+ * @offsets: register offsets from module base
+ * @clocks: clocks used by the interconnect target module
+ * @legacy_mode: configured for legacy mode if set
+ */
+struct sysc {
+ struct device *dev;
+ u64 module_pa;
+ u32 module_size;
+ void __iomem *module_va;
+ int offsets[SYSC_MAX_REGS];
+ struct clk *clocks[SYSC_MAX_CLOCKS];
+ const char *legacy_mode;
+};
+
+static u32 sysc_read_revision(struct sysc *ddata)
+{
+ return readl_relaxed(ddata->module_va +
+ ddata->offsets[SYSC_REVISION]);
+}
+
+static int sysc_get_one_clock(struct sysc *ddata,
+ enum sysc_clocks index)
+{
+ const char *name;
+ int error;
+
+ switch (index) {
+ case SYSC_FCK:
+ break;
+ case SYSC_ICK:
+ break;
+ default:
+ return -EINVAL;
+ }
+ name = clock_names[index];
+
+ ddata->clocks[index] = devm_clk_get(ddata->dev, name);
+ if (IS_ERR(ddata->clocks[index])) {
+ if (PTR_ERR(ddata->clocks[index]) == -ENOENT)
+ return 0;
+
+ dev_err(ddata->dev, "clock get error for %s: %li\n",
+ name, PTR_ERR(ddata->clocks[index]));
+
+ return PTR_ERR(ddata->clocks[index]);
+ }
+
+ error = clk_prepare(ddata->clocks[index]);
+ if (error) {
+ dev_err(ddata->dev, "clock prepare error for %s: %i\n",
+ name, error);
+
+ return error;
+ }
+
+ return 0;
+}
+
+static int sysc_get_clocks(struct sysc *ddata)
+{
+ int i, error;
+
+ if (ddata->legacy_mode)
+ return 0;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ error = sysc_get_one_clock(ddata, i);
+ if (error && error != -ENOENT)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * sysc_parse_and_check_child_range - parses module IO region from ranges
+ * @ddata: device driver data
+ *
+ * In general we only need rev, syss, and sysc registers and not the whole
+ * module range. But we do want the offsets for these registers from the
+ * module base. This allows us to check them against the legacy hwmod
+ * platform data. Let's also check the ranges are configured properly.
+ */
+static int sysc_parse_and_check_child_range(struct sysc *ddata)
+{
+ struct device_node *np = ddata->dev->of_node;
+ const __be32 *ranges;
+ u32 nr_addr, nr_size;
+ int len, error;
+
+ ranges = of_get_property(np, "ranges", &len);
+ if (!ranges) {
+ dev_err(ddata->dev, "missing ranges for %pOF\n", np);
+
+ return -ENOENT;
+ }
+
+ len /= sizeof(*ranges);
+
+ if (len < 3) {
+ dev_err(ddata->dev, "incomplete ranges for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ error = of_property_read_u32(np, "#address-cells", &nr_addr);
+ if (error)
+ return -ENOENT;
+
+ error = of_property_read_u32(np, "#size-cells", &nr_size);
+ if (error)
+ return -ENOENT;
+
+ if (nr_addr != 1 || nr_size != 1) {
+ dev_err(ddata->dev, "invalid ranges for %pOF\n", np);
+
+ return -EINVAL;
+ }
+
+ ranges++;
+ ddata->module_pa = of_translate_address(np, ranges++);
+ ddata->module_size = be32_to_cpup(ranges);
+
+ dev_dbg(ddata->dev, "interconnect target 0x%llx size 0x%x for %pOF\n",
+ ddata->module_pa, ddata->module_size, np);
+
+ return 0;
+}
+
+/**
+ * sysc_check_one_child - check child configuration
+ * @ddata: device driver data
+ * @np: child device node
+ *
+ * Let's avoid messy situations where we have new interconnect target
+ * node but children have "ti,hwmods". These belong to the interconnect
+ * target node and are managed by this driver.
+ */
+static int sysc_check_one_child(struct sysc *ddata,
+ struct device_node *np)
+{
+ const char *name;
+
+ name = of_get_property(np, "ti,hwmods", NULL);
+ if (name)
+ dev_warn(ddata->dev, "really a child ti,hwmods property?");
+
+ return 0;
+}
+
+static int sysc_check_children(struct sysc *ddata)
+{
+ struct device_node *child;
+ int error;
+
+ for_each_child_of_node(ddata->dev->of_node, child) {
+ error = sysc_check_one_child(ddata, child);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * sysc_parse_one - parses the interconnect target module registers
+ * @ddata: device driver data
+ * @reg: register to parse
+ */
+static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
+{
+ struct resource *res;
+ const char *name;
+
+ switch (reg) {
+ case SYSC_REVISION:
+ case SYSC_SYSCONFIG:
+ case SYSC_SYSSTATUS:
+ name = reg_names[reg];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(to_platform_device(ddata->dev),
+ IORESOURCE_MEM, name);
+ if (!res) {
+ dev_dbg(ddata->dev, "has no %s register\n", name);
+ ddata->offsets[reg] = -ENODEV;
+
+ return 0;
+ }
+
+ ddata->offsets[reg] = res->start - ddata->module_pa;
+
+ return 0;
+}
+
+static int sysc_parse_registers(struct sysc *ddata)
+{
+ int i, error;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++) {
+ error = sysc_parse_one(ddata, i);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * sysc_check_registers - check for misconfigured register overlaps
+ * @ddata: device driver data
+ */
+static int sysc_check_registers(struct sysc *ddata)
+{
+ int i, j, nr_regs = 0, nr_matches = 0;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++) {
+ if (ddata->offsets[i] < 0)
+ continue;
+
+ if (ddata->offsets[i] > (ddata->module_size - 4)) {
+ dev_err(ddata->dev, "register outside module range");
+
+ return -EINVAL;
+ }
+
+ for (j = 0; j < SYSC_MAX_REGS; j++) {
+ if (ddata->offsets[j] < 0)
+ continue;
+
+ if (ddata->offsets[i] == ddata->offsets[j])
+ nr_matches++;
+ }
+ nr_regs++;
+ }
+
+ if (nr_regs < 1) {
+ dev_err(ddata->dev, "missing registers\n");
+
+ return -EINVAL;
+ }
+
+ if (nr_matches > nr_regs) {
+ dev_err(ddata->dev, "overlapping registers: (%i/%i)",
+ nr_regs, nr_matches);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * syc_ioremap - ioremap register space for the interconnect target module
+ * @ddata: deviec driver data
+ *
+ * Note that the interconnect target module registers can be anywhere
+ * within the first child device address space. For example, SGX has
+ * them at offset 0x1fc00 in the 32MB module address space. We just
+ * what we need around the interconnect target module registers.
+ */
+static int sysc_ioremap(struct sysc *ddata)
+{
+ u32 size = 0;
+
+ if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
+ size = ddata->offsets[SYSC_SYSSTATUS];
+ else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
+ size = ddata->offsets[SYSC_SYSCONFIG];
+ else if (ddata->offsets[SYSC_REVISION] >= 0)
+ size = ddata->offsets[SYSC_REVISION];
+ else
+ return -EINVAL;
+
+ size &= 0xfff00;
+ size += SZ_256;
+
+ ddata->module_va = devm_ioremap(ddata->dev,
+ ddata->module_pa,
+ size);
+ if (!ddata->module_va)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * sysc_map_and_check_registers - ioremap and check device registers
+ * @ddata: device driver data
+ */
+static int sysc_map_and_check_registers(struct sysc *ddata)
+{
+ int error;
+
+ error = sysc_parse_and_check_child_range(ddata);
+ if (error)
+ return error;
+
+ error = sysc_check_children(ddata);
+ if (error)
+ return error;
+
+ error = sysc_parse_registers(ddata);
+ if (error)
+ return error;
+
+ error = sysc_ioremap(ddata);
+ if (error)
+ return error;
+
+ error = sysc_check_registers(ddata);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+/**
+ * sysc_show_rev - read and show interconnect target module revision
+ * @bufp: buffer to print the information to
+ * @ddata: device driver data
+ */
+static int sysc_show_rev(char *bufp, struct sysc *ddata)
+{
+ int error, len;
+
+ if (ddata->offsets[SYSC_REVISION] < 0)
+ return sprintf(bufp, ":NA");
+
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+
+ return 0;
+ }
+
+ len = sprintf(bufp, ":%08x", sysc_read_revision(ddata));
+
+ pm_runtime_mark_last_busy(ddata->dev);
+ pm_runtime_put_autosuspend(ddata->dev);
+
+ return len;
+}
+
+static int sysc_show_reg(struct sysc *ddata,
+ char *bufp, enum sysc_registers reg)
+{
+ if (ddata->offsets[reg] < 0)
+ return sprintf(bufp, ":NA");
+
+ return sprintf(bufp, ":%x", ddata->offsets[reg]);
+}
+
+/**
+ * sysc_show_registers - show information about interconnect target module
+ * @ddata: device driver data
+ */
+static void sysc_show_registers(struct sysc *ddata)
+{
+ char buf[128];
+ char *bufp = buf;
+ int i;
+
+ for (i = 0; i < SYSC_MAX_REGS; i++)
+ bufp += sysc_show_reg(ddata, bufp, i);
+
+ bufp += sysc_show_rev(bufp, ddata);
+
+ dev_dbg(ddata->dev, "%llx:%x%s\n",
+ ddata->module_pa, ddata->module_size,
+ buf);
+}
+
+static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+ int i;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->legacy_mode)
+ return 0;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ if (IS_ERR_OR_NULL(ddata->clocks[i]))
+ continue;
+ clk_disable(ddata->clocks[i]);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int i, error;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (ddata->legacy_mode)
+ return 0;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ if (IS_ERR_OR_NULL(ddata->clocks[i]))
+ continue;
+ error = clk_enable(ddata->clocks[i]);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops sysc_pm_ops = {
+ SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
+ sysc_runtime_resume,
+ NULL)
+};
+
+static void sysc_unprepare(struct sysc *ddata)
+{
+ int i;
+
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ if (!IS_ERR_OR_NULL(ddata->clocks[i]))
+ clk_unprepare(ddata->clocks[i]);
+ }
+}
+
+static int sysc_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sysc *ddata;
+ int error;
+
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->dev = &pdev->dev;
+ ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
+
+ error = sysc_get_clocks(ddata);
+ if (error)
+ return error;
+
+ error = sysc_map_and_check_registers(ddata);
+ if (error)
+ goto unprepare;
+
+ platform_set_drvdata(pdev, ddata);
+
+ pm_runtime_enable(ddata->dev);
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ goto unprepare;
+ }
+
+ pm_runtime_use_autosuspend(ddata->dev);
+
+ sysc_show_registers(ddata);
+
+ error = of_platform_populate(ddata->dev->of_node,
+ NULL, NULL, ddata->dev);
+ if (error)
+ goto err;
+
+ pm_runtime_mark_last_busy(ddata->dev);
+ pm_runtime_put_autosuspend(ddata->dev);
+
+ return 0;
+
+err:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+unprepare:
+ sysc_unprepare(ddata);
+
+ return error;
+}
+
+static int sysc_remove(struct platform_device *pdev)
+{
+ struct sysc *ddata = platform_get_drvdata(pdev);
+ int error;
+
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ goto unprepare;
+ }
+
+ of_platform_depopulate(&pdev->dev);
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+unprepare:
+ sysc_unprepare(ddata);
+
+ return 0;
+}
+
+static const struct of_device_id sysc_match[] = {
+ { .compatible = "ti,sysc-omap2" },
+ { .compatible = "ti,sysc-omap4" },
+ { .compatible = "ti,sysc-omap4-simple" },
+ { .compatible = "ti,sysc-omap3430-sr" },
+ { .compatible = "ti,sysc-omap3630-sr" },
+ { .compatible = "ti,sysc-omap4-sr" },
+ { .compatible = "ti,sysc-omap3-sham" },
+ { .compatible = "ti,sysc-omap-aes" },
+ { .compatible = "ti,sysc-mcasp" },
+ { .compatible = "ti,sysc-usb-host-fs" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sysc_match);
+
+static struct platform_driver sysc_driver = {
+ .probe = sysc_probe,
+ .remove = sysc_remove,
+ .driver = {
+ .name = "ti-sysc",
+ .of_match_table = sysc_match,
+ .pm = &sysc_pm_ops,
+ },
+};
+module_platform_driver(sysc_driver);
+
+MODULE_DESCRIPTION("TI sysc interconnect target driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
new file mode 100644
index 000000000000..073fd9011154
--- /dev/null
+++ b/drivers/bus/ts-nbus.c
@@ -0,0 +1,375 @@
+/*
+ * NBUS driver for TS-4600 based boards
+ *
+ * Copyright (c) 2016 - Savoir-faire Linux
+ * Author: Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * This driver implements a GPIOs bit-banged bus, called the NBUS by Technologic
+ * Systems. It is used to communicate with the peripherals in the FPGA on the
+ * TS-4600 SoM.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/ts-nbus.h>
+
+#define TS_NBUS_DIRECTION_IN 0
+#define TS_NBUS_DIRECTION_OUT 1
+#define TS_NBUS_WRITE_ADR 0
+#define TS_NBUS_WRITE_VAL 1
+
+struct ts_nbus {
+ struct pwm_device *pwm;
+ struct gpio_descs *data;
+ struct gpio_desc *csn;
+ struct gpio_desc *txrx;
+ struct gpio_desc *strobe;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct mutex lock;
+};
+
+/*
+ * request all gpios required by the bus.
+ */
+static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus
+ *ts_nbus)
+{
+ ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->data)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n");
+ return PTR_ERR(ts_nbus->data);
+ }
+
+ ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->csn)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n");
+ return PTR_ERR(ts_nbus->csn);
+ }
+
+ ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->txrx)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n");
+ return PTR_ERR(ts_nbus->txrx);
+ }
+
+ ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->strobe)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n");
+ return PTR_ERR(ts_nbus->strobe);
+ }
+
+ ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts_nbus->ale)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n");
+ return PTR_ERR(ts_nbus->ale);
+ }
+
+ ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN);
+ if (IS_ERR(ts_nbus->rdy)) {
+ dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n");
+ return PTR_ERR(ts_nbus->rdy);
+ }
+
+ return 0;
+}
+
+/*
+ * the data gpios are used for reading and writing values, their directions
+ * should be adjusted accordingly.
+ */
+static void ts_nbus_set_direction(struct ts_nbus *ts_nbus, int direction)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (direction == TS_NBUS_DIRECTION_IN)
+ gpiod_direction_input(ts_nbus->data->desc[i]);
+ else
+ /* when used as output the default state of the data
+ * lines are set to high */
+ gpiod_direction_output(ts_nbus->data->desc[i], 1);
+ }
+}
+
+/*
+ * reset the bus in its initial state.
+ * The data, csn, strobe and ale lines must be zero'ed to let the FPGA knows a
+ * new transaction can be process.
+ */
+static void ts_nbus_reset_bus(struct ts_nbus *ts_nbus)
+{
+ int i;
+ int values[8];
+
+ for (i = 0; i < 8; i++)
+ values[i] = 0;
+
+ gpiod_set_array_value_cansleep(8, ts_nbus->data->desc, values);
+ gpiod_set_value_cansleep(ts_nbus->csn, 0);
+ gpiod_set_value_cansleep(ts_nbus->strobe, 0);
+ gpiod_set_value_cansleep(ts_nbus->ale, 0);
+}
+
+/*
+ * let the FPGA knows it can process.
+ */
+static void ts_nbus_start_transaction(struct ts_nbus *ts_nbus)
+{
+ gpiod_set_value_cansleep(ts_nbus->strobe, 1);
+}
+
+/*
+ * read a byte value from the data gpios.
+ * return 0 on success or negative errno on failure.
+ */
+static int ts_nbus_read_byte(struct ts_nbus *ts_nbus, u8 *val)
+{
+ struct gpio_descs *gpios = ts_nbus->data;
+ int ret, i;
+
+ *val = 0;
+ for (i = 0; i < 8; i++) {
+ ret = gpiod_get_value_cansleep(gpios->desc[i]);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ *val |= BIT(i);
+ }
+
+ return 0;
+}
+
+/*
+ * set the data gpios accordingly to the byte value.
+ */
+static void ts_nbus_write_byte(struct ts_nbus *ts_nbus, u8 byte)
+{
+ struct gpio_descs *gpios = ts_nbus->data;
+ int i;
+ int values[8];
+
+ for (i = 0; i < 8; i++)
+ if (byte & BIT(i))
+ values[i] = 1;
+ else
+ values[i] = 0;
+
+ gpiod_set_array_value_cansleep(8, gpios->desc, values);
+}
+
+/*
+ * reading the bus consists of resetting the bus, then notifying the FPGA to
+ * send the data in the data gpios and return the read value.
+ * return 0 on success or negative errno on failure.
+ */
+static int ts_nbus_read_bus(struct ts_nbus *ts_nbus, u8 *val)
+{
+ ts_nbus_reset_bus(ts_nbus);
+ ts_nbus_start_transaction(ts_nbus);
+
+ return ts_nbus_read_byte(ts_nbus, val);
+}
+
+/*
+ * writing to the bus consists of resetting the bus, then define the type of
+ * command (address/value), write the data and notify the FPGA to retrieve the
+ * value in the data gpios.
+ */
+static void ts_nbus_write_bus(struct ts_nbus *ts_nbus, int cmd, u8 val)
+{
+ ts_nbus_reset_bus(ts_nbus);
+
+ if (cmd == TS_NBUS_WRITE_ADR)
+ gpiod_set_value_cansleep(ts_nbus->ale, 1);
+
+ ts_nbus_write_byte(ts_nbus, val);
+ ts_nbus_start_transaction(ts_nbus);
+}
+
+/*
+ * read the value in the FPGA register at the given address.
+ * return 0 on success or negative errno on failure.
+ */
+int ts_nbus_read(struct ts_nbus *ts_nbus, u8 adr, u16 *val)
+{
+ int ret, i;
+ u8 byte;
+
+ /* bus access must be atomic */
+ mutex_lock(&ts_nbus->lock);
+
+ /* set the bus in read mode */
+ gpiod_set_value_cansleep(ts_nbus->txrx, 0);
+
+ /* write address */
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
+
+ /* set the data gpios direction as input before reading */
+ ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_IN);
+
+ /* reading value MSB first */
+ do {
+ *val = 0;
+ byte = 0;
+ for (i = 1; i >= 0; i--) {
+ /* read a byte from the bus, leave on error */
+ ret = ts_nbus_read_bus(ts_nbus, &byte);
+ if (ret < 0)
+ goto err;
+
+ /* append the byte read to the final value */
+ *val |= byte << (i * 8);
+ }
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ ret = gpiod_get_value_cansleep(ts_nbus->rdy);
+ } while (ret);
+
+err:
+ /* restore the data gpios direction as output after reading */
+ ts_nbus_set_direction(ts_nbus, TS_NBUS_DIRECTION_OUT);
+
+ mutex_unlock(&ts_nbus->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ts_nbus_read);
+
+/*
+ * write the desired value in the FPGA register at the given address.
+ */
+int ts_nbus_write(struct ts_nbus *ts_nbus, u8 adr, u16 val)
+{
+ int i;
+
+ /* bus access must be atomic */
+ mutex_lock(&ts_nbus->lock);
+
+ /* set the bus in write mode */
+ gpiod_set_value_cansleep(ts_nbus->txrx, 1);
+
+ /* write address */
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_ADR, adr);
+
+ /* writing value MSB first */
+ for (i = 1; i >= 0; i--)
+ ts_nbus_write_bus(ts_nbus, TS_NBUS_WRITE_VAL, (u8)(val >> (i * 8)));
+
+ /* wait for completion */
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ while (gpiod_get_value_cansleep(ts_nbus->rdy) != 0) {
+ gpiod_set_value_cansleep(ts_nbus->csn, 0);
+ gpiod_set_value_cansleep(ts_nbus->csn, 1);
+ }
+
+ mutex_unlock(&ts_nbus->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ts_nbus_write);
+
+static int ts_nbus_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ struct pwm_args pargs;
+ struct device *dev = &pdev->dev;
+ struct ts_nbus *ts_nbus;
+ int ret;
+
+ ts_nbus = devm_kzalloc(dev, sizeof(*ts_nbus), GFP_KERNEL);
+ if (!ts_nbus)
+ return -ENOMEM;
+
+ mutex_init(&ts_nbus->lock);
+
+ ret = ts_nbus_init_pdata(pdev, ts_nbus);
+ if (ret < 0)
+ return ret;
+
+ pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(pwm)) {
+ ret = PTR_ERR(pwm);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "unable to request PWM\n");
+ return ret;
+ }
+
+ pwm_get_args(pwm, &pargs);
+ if (!pargs.period) {
+ dev_err(&pdev->dev, "invalid PWM period\n");
+ return -EINVAL;
+ }
+
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(pwm);
+ ret = pwm_config(pwm, pargs.period, pargs.period);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * we can now start the FPGA and populate the peripherals.
+ */
+ pwm_enable(pwm);
+ ts_nbus->pwm = pwm;
+
+ /*
+ * let the child nodes retrieve this instance of the ts-nbus.
+ */
+ dev_set_drvdata(dev, ts_nbus);
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "initialized\n");
+
+ return 0;
+}
+
+static int ts_nbus_remove(struct platform_device *pdev)
+{
+ struct ts_nbus *ts_nbus = dev_get_drvdata(&pdev->dev);
+
+ /* shutdown the FPGA */
+ mutex_lock(&ts_nbus->lock);
+ pwm_disable(ts_nbus->pwm);
+ mutex_unlock(&ts_nbus->lock);
+
+ return 0;
+}
+
+static const struct of_device_id ts_nbus_of_match[] = {
+ { .compatible = "technologic,ts-nbus", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
+
+static struct platform_driver ts_nbus_driver = {
+ .probe = ts_nbus_probe,
+ .remove = ts_nbus_remove,
+ .driver = {
+ .name = "ts_nbus",
+ .of_match_table = ts_nbus_of_match,
+ },
+};
+
+module_platform_driver(ts_nbus_driver);
+
+MODULE_ALIAS("platform:ts_nbus");
+MODULE_AUTHOR("Sebastien Bourdelin <sebastien.bourdelin@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Technologic Systems NBUS");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cdrom/Makefile b/drivers/cdrom/Makefile
index 8ffde4f8ab9a..0f3664b45f48 100644
--- a/drivers/cdrom/Makefile
+++ b/drivers/cdrom/Makefile
@@ -1,13 +1,3 @@
-# Makefile for the kernel cdrom device drivers.
-#
-# 30 Jan 1998, Michael Elizabeth Chastain, <mailto:mec@shout.net>
-# Rewritten to use lists instead of if-statements.
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_BLK_DEV_IDECD) += cdrom.o
-obj-$(CONFIG_BLK_DEV_SR) += cdrom.o
-obj-$(CONFIG_PARIDE_PCD) += cdrom.o
-obj-$(CONFIG_CDROM_PKTCDVD) += cdrom.o
-
-obj-$(CONFIG_GDROM) += gdrom.o cdrom.o
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CDROM) += cdrom.o
+obj-$(CONFIG_GDROM) += gdrom.o
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 623714344600..c28dca0c613d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Character device configuration
#
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 53e33720818c..7dc3abe66464 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel character device drivers.
#
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index c528f96ee204..6231714ef3c8 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menuconfig AGP
tristate "/dev/agpgart (AGP Support)"
depends on ALPHA || IA64 || PARISC || PPC || X86
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 604489bcdbf9..4a786ffd9dee 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
agpgart-y := backend.o frontend.o generic.o isoch.o
agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index fda073dcd967..164bf651953f 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common Intel AGPGART and GTT definitions.
*/
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
index c73385cc4b8a..fc8e1bc3347d 100644
--- a/drivers/char/agp/isoch.c
+++ b/drivers/char/agp/isoch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Setup routines for AGP 3.5 compliant bridges.
*/
diff --git a/drivers/char/applicom.h b/drivers/char/applicom.h
index 35530b3d9bd6..282e08f159d5 100644
--- a/drivers/char/applicom.h
+++ b/drivers/char/applicom.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: applicom.h,v 1.2 1999/08/28 15:09:49 dwmw2 Exp $ */
diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c
index c614a56e68cc..8e16ad5d6d89 100644
--- a/drivers/char/ds1302.c
+++ b/drivers/char/ds1302.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*!***************************************************************************
*!
*! FILE NAME : ds1302.c
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 58471394beb9..839ee61d352a 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -74,7 +74,7 @@
#endif /* TRACING */
static DEFINE_MUTEX(dtlk_mutex);
-static void dtlk_timer_tick(unsigned long data);
+static void dtlk_timer_tick(struct timer_list *unused);
static int dtlk_major;
static int dtlk_port_lpc;
@@ -84,7 +84,7 @@ static int dtlk_has_indexing;
static unsigned int dtlk_portlist[] =
{0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0};
static wait_queue_head_t dtlk_process_list;
-static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick, 0, 0);
+static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick);
/* prototypes for file_operations struct */
static ssize_t dtlk_read(struct file *, char __user *,
@@ -259,7 +259,7 @@ static unsigned int dtlk_poll(struct file *file, poll_table * wait)
return mask;
}
-static void dtlk_timer_tick(unsigned long data)
+static void dtlk_timer_tick(struct timer_list *unused)
{
TRACE_TEXT(" dtlk_timer_tick");
wake_up_interruptible(&dtlk_process_list);
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 5406b90bf626..7700280717f2 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -122,11 +122,11 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
/* Last time scheduled */
static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
-static void hangcheck_fire(unsigned long);
+static void hangcheck_fire(struct timer_list *);
-static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0);
+static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire);
-static void hangcheck_fire(unsigned long data)
+static void hangcheck_fire(struct timer_list *unused)
{
unsigned long long cur_tsc, tsc_diff;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 95a031e9eced..f6e3e5abc117 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -100,12 +100,12 @@ config HW_RANDOM_BCM2835
If unsure, say Y.
config HW_RANDOM_IPROC_RNG200
- tristate "Broadcom iProc RNG200 support"
- depends on ARCH_BCM_IPROC
+ tristate "Broadcom iProc/STB RNG200 support"
+ depends on ARCH_BCM_IPROC || ARCH_BRCMSTB
default HW_RANDOM
---help---
This driver provides kernel-side support for the RNG200
- hardware found on the Broadcom iProc SoCs.
+ hardware found on the Broadcom iProc and STB SoCs.
To compile this driver as a module, choose M here: the
module will be called iproc-rng200
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 39a67defac67..f3728d008fff 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for HW Random Number Generator (RNG) device drivers.
#
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 9701ac7d8b47..657b8770b6b9 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -292,26 +292,48 @@ static struct miscdevice rng_miscdev = {
.groups = rng_dev_groups,
};
+static int enable_best_rng(void)
+{
+ int ret = -ENODEV;
+
+ BUG_ON(!mutex_is_locked(&rng_mutex));
+
+ /* rng_list is sorted by quality, use the best (=first) one */
+ if (!list_empty(&rng_list)) {
+ struct hwrng *new_rng;
+
+ new_rng = list_entry(rng_list.next, struct hwrng, list);
+ ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
+ if (!ret)
+ cur_rng_set_by_user = 0;
+ }
+
+ return ret;
+}
+
static ssize_t hwrng_attr_current_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- int err;
+ int err = -ENODEV;
struct hwrng *rng;
err = mutex_lock_interruptible(&rng_mutex);
if (err)
return -ERESTARTSYS;
- err = -ENODEV;
- list_for_each_entry(rng, &rng_list, list) {
- if (sysfs_streq(rng->name, buf)) {
- err = 0;
- cur_rng_set_by_user = 1;
- if (rng != current_rng)
+
+ if (sysfs_streq(buf, "")) {
+ err = enable_best_rng();
+ } else {
+ list_for_each_entry(rng, &rng_list, list) {
+ if (sysfs_streq(rng->name, buf)) {
+ cur_rng_set_by_user = 1;
err = set_current_rng(rng);
- break;
+ break;
+ }
}
}
+
mutex_unlock(&rng_mutex);
return err ? : len;
@@ -423,7 +445,7 @@ static void start_khwrngd(void)
{
hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed");
+ pr_err("hwrng_fill thread creation failed\n");
hwrng_fill = NULL;
}
}
@@ -493,17 +515,8 @@ void hwrng_unregister(struct hwrng *rng)
mutex_lock(&rng_mutex);
list_del(&rng->list);
- if (current_rng == rng) {
- drop_current_rng();
- cur_rng_set_by_user = 0;
- /* rng_list is sorted by quality, use the best (=first) one */
- if (!list_empty(&rng_list)) {
- struct hwrng *new_rng;
-
- new_rng = list_entry(rng_list.next, struct hwrng, list);
- set_current_rng(new_rng);
- }
- }
+ if (current_rng == rng)
+ enable_best_rng();
if (list_empty(&rng_list)) {
mutex_unlock(&rng_mutex);
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 3eaf7cb96d36..8b5a20b35293 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -220,6 +220,7 @@ static int iproc_rng200_probe(struct platform_device *pdev)
}
static const struct of_device_id iproc_rng200_of_match[] = {
+ { .compatible = "brcm,bcm7278-rng200", },
{ .compatible = "brcm,iproc-rng200", },
{},
};
diff --git a/drivers/char/hw_random/n2-asm.S b/drivers/char/hw_random/n2-asm.S
index 9b6eb5cd59f6..c205df43d5ae 100644
--- a/drivers/char/hw_random/n2-asm.S
+++ b/drivers/char/hw_random/n2-asm.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* n2-asm.S: Niagara2 RNG hypervisor call assembler.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 6bad6cc634e8..9a870f5dc371 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* n2rng.h: Niagara2 RNG defines.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index d9f46b437cc2..4e2a3f635277 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -72,7 +72,7 @@ static int pseries_rng_remove(struct vio_dev *dev)
return 0;
}
-static struct vio_device_id pseries_rng_driver_ids[] = {
+static const struct vio_device_id pseries_rng_driver_ids[] = {
{ "ibm,random-v1", "ibm,random"},
{ "", "" }
};
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 03ff5483d865..f615684028af 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -53,13 +53,6 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
int period_us = ktime_to_us(priv->period);
/*
- * The RNG provides 32-bits per read. Ensure there is enough space for
- * at minimum one read.
- */
- if (max < sizeof(u32))
- return 0;
-
- /*
* There may not have been enough time for new data to be generated
* since the last request. If the caller doesn't want to wait, let them
* bail out. Otherwise, wait for the completion. If the new data has
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 3fa2f8a009b3..b89df66ea1ae 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -184,7 +184,26 @@ static int virtrng_freeze(struct virtio_device *vdev)
static int virtrng_restore(struct virtio_device *vdev)
{
- return probe_common(vdev);
+ int err;
+
+ err = probe_common(vdev);
+ if (!err) {
+ struct virtrng_info *vi = vdev->priv;
+
+ /*
+ * Set hwrng_removed to ensure that virtio_read()
+ * does not block waiting for data before the
+ * registration is complete.
+ */
+ vi->hwrng_removed = true;
+ err = hwrng_register(&vi->hwrng);
+ if (!err) {
+ vi->hwrng_register_done = true;
+ vi->hwrng_removed = false;
+ }
+ }
+
+ return err;
}
#endif
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 3c77645405e5..71755790c32b 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -100,9 +100,9 @@ struct xgene_rng_dev {
struct clk *clk;
};
-static void xgene_rng_expired_timer(unsigned long arg)
+static void xgene_rng_expired_timer(struct timer_list *t)
{
- struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg;
+ struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer);
/* Clear failure counter as timer expired */
disable_irq(ctx->irq);
@@ -113,8 +113,6 @@ static void xgene_rng_expired_timer(unsigned long arg)
static void xgene_rng_start_timer(struct xgene_rng_dev *ctx)
{
- ctx->failure_timer.data = (unsigned long) ctx;
- ctx->failure_timer.function = xgene_rng_expired_timer;
ctx->failure_timer.expires = jiffies + 120 * HZ;
add_timer(&ctx->failure_timer);
}
@@ -292,7 +290,7 @@ static int xgene_rng_init(struct hwrng *rng)
struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
ctx->failure_cnt = 0;
- init_timer(&ctx->failure_timer);
+ timer_setup(&ctx->failure_timer, xgene_rng_expired_timer, 0);
ctx->revision = readl(ctx->csr_base + RNG_EIP_REV);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index f6fa056a52fc..3544abc0f9f9 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -22,24 +22,39 @@ config IPMI_DMI_DECODE
if IPMI_HANDLER
+config IPMI_PROC_INTERFACE
+ bool 'Provide an interface for IPMI stats in /proc (deprecated)'
+ depends on PROC_FS
+ default y
+ help
+ Do not use this any more, use sysfs for this info. It will be
+ removed in future kernel versions.
+
config IPMI_PANIC_EVENT
bool 'Generate a panic event to all BMCs on a panic'
help
- When a panic occurs, this will cause the IPMI message handler to
- generate an IPMI event describing the panic to each interface
- registered with the message handler.
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate an IPMI event describing the panic to each
+ interface registered with the message handler. This is always
+ available, the module parameter for ipmi_msghandler named
+ panic_op can be set to "event" to chose this value, this config
+ simply causes the default value to be set to "event".
config IPMI_PANIC_STRING
bool 'Generate OEM events containing the panic string'
depends on IPMI_PANIC_EVENT
help
- When a panic occurs, this will cause the IPMI message handler to
- generate IPMI OEM type f0 events holding the IPMB address of the
- panic generator (byte 4 of the event), a sequence number for the
- string (byte 5 of the event) and part of the string (the rest of the
- event). Bytes 1, 2, and 3 are the normal usage for an OEM event.
- You can fetch these events and use the sequence numbers to piece the
- string together.
+ When a panic occurs, this will cause the IPMI message handler to,
+ by default, generate IPMI OEM type f0 events holding the IPMB
+ address of the panic generator (byte 4 of the event), a sequence
+ number for the string (byte 5 of the event) and part of the
+ string (the rest of the event). Bytes 1, 2, and 3 are the normal
+ usage for an OEM event. You can fetch these events and use the
+ sequence numbers to piece the string together. This config
+ parameter sets the default value to generate these events,
+ the module parameter for ipmi_msghandler named panic_op can
+ be set to "string" to chose this value, this config simply
+ causes the default value to be set to "string".
config IPMI_DEVICE_INTERFACE
tristate 'Device interface for IPMI'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index eefb0b301e83..33b899fcf14a 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -1,8 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the ipmi drivers.
#
-ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
+ ipmi_si_hotmod.o ipmi_si_hardcode.o ipmi_si_platform.o \
+ ipmi_si_port_io.o ipmi_si_mem_io.o
+ifdef CONFIG_PCI
+ipmi_si-y += ipmi_si_pci.o
+endif
+ifdef CONFIG_PARISC
+ipmi_si-y += ipmi_si_parisc.o
+endif
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 70d434bc1cbf..6edfaa72b98b 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -204,9 +204,6 @@ static ssize_t bt_bmc_read(struct file *file, char __user *buf,
ssize_t ret = 0;
ssize_t nread;
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
WARN_ON(*ppos);
if (wait_event_interruptible(bt_bmc->queue,
@@ -277,9 +274,6 @@ static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
if (count < 5)
return -EINVAL;
- if (!access_ok(VERIFY_READ, buf, count))
- return -EFAULT;
-
WARN_ON(*ppos);
/*
@@ -373,9 +367,9 @@ static const struct file_operations bt_bmc_fops = {
.unlocked_ioctl = bt_bmc_ioctl,
};
-static void poll_timer(unsigned long data)
+static void poll_timer(struct timer_list *t)
{
- struct bt_bmc *bt_bmc = (void *)data;
+ struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
wake_up(&bt_bmc->queue);
@@ -493,8 +487,7 @@ static int bt_bmc_probe(struct platform_device *pdev)
dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
} else {
dev_info(dev, "No IRQ; using timer\n");
- setup_timer(&bt_bmc->poll_timer, poll_timer,
- (unsigned long)bt_bmc);
+ timer_setup(&bt_bmc->poll_timer, poll_timer, 0);
bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
add_timer(&bt_bmc->poll_timer);
}
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index 2a84401dea05..ab78b3be7e33 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* A hack to create a platform device from a DMI entry. This will
* allow autoloading of the IPMI drive based on SMBIOS entries.
@@ -8,10 +9,16 @@
#include <linux/dmi.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include "ipmi_si_sm.h"
#include "ipmi_dmi.h"
+#define IPMI_DMI_TYPE_KCS 0x01
+#define IPMI_DMI_TYPE_SMIC 0x02
+#define IPMI_DMI_TYPE_BT 0x03
+#define IPMI_DMI_TYPE_SSIF 0x04
+
struct ipmi_dmi_info {
- int type;
+ enum si_type si_type;
u32 flags;
unsigned long addr;
u8 slave_addr;
@@ -22,6 +29,15 @@ static struct ipmi_dmi_info *ipmi_dmi_infos;
static int ipmi_dmi_nr __initdata;
+#define set_prop_entry(_p_, _name_, type, val) \
+do { \
+ struct property_entry *_p = &_p_; \
+ _p->name = _name_; \
+ _p->length = sizeof(type); \
+ _p->is_string = false; \
+ _p->value.type##_data = val; \
+} while(0)
+
static void __init dmi_add_platform_ipmi(unsigned long base_addr,
u32 flags,
u8 slave_addr,
@@ -32,27 +48,14 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
struct platform_device *pdev;
struct resource r[4];
unsigned int num_r = 1, size;
- struct property_entry p[4] = {
- PROPERTY_ENTRY_U8("slave-addr", slave_addr),
- PROPERTY_ENTRY_U8("ipmi-type", type),
- PROPERTY_ENTRY_U16("i2c-addr", base_addr),
- { }
- };
+ struct property_entry p[5];
+ unsigned int pidx = 0;
char *name, *override;
int rv;
+ enum si_type si_type;
struct ipmi_dmi_info *info;
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- pr_warn("ipmi:dmi: Could not allocate dmi info\n");
- } else {
- info->type = type;
- info->flags = flags;
- info->addr = base_addr;
- info->slave_addr = slave_addr;
- info->next = ipmi_dmi_infos;
- ipmi_dmi_infos = info;
- }
+ memset(p, 0, sizeof(p));
name = "dmi-ipmi-si";
override = "ipmi_si";
@@ -62,28 +65,53 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
override = "ipmi_ssif";
offset = 1;
size = 1;
+ si_type = SI_TYPE_INVALID;
break;
case IPMI_DMI_TYPE_BT:
size = 3;
+ si_type = SI_BT;
break;
case IPMI_DMI_TYPE_KCS:
+ size = 2;
+ si_type = SI_KCS;
+ break;
case IPMI_DMI_TYPE_SMIC:
size = 2;
+ si_type = SI_SMIC;
break;
default:
- pr_err("ipmi:dmi: Invalid IPMI type: %d", type);
+ pr_err("ipmi:dmi: Invalid IPMI type: %d\n", type);
return;
}
+ if (si_type != SI_TYPE_INVALID)
+ set_prop_entry(p[pidx++], "ipmi-type", u8, si_type);
+ set_prop_entry(p[pidx++], "slave-addr", u8, slave_addr);
+ set_prop_entry(p[pidx++], "addr-source", u8, SI_SMBIOS);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ pr_warn("ipmi:dmi: Could not allocate dmi info\n");
+ } else {
+ info->si_type = si_type;
+ info->flags = flags;
+ info->addr = base_addr;
+ info->slave_addr = slave_addr;
+ info->next = ipmi_dmi_infos;
+ ipmi_dmi_infos = info;
+ }
+
pdev = platform_device_alloc(name, ipmi_dmi_nr);
if (!pdev) {
- pr_err("ipmi:dmi: Error allocation IPMI platform device");
+ pr_err("ipmi:dmi: Error allocation IPMI platform device\n");
return;
}
pdev->driver_override = override;
- if (type == IPMI_DMI_TYPE_SSIF)
+ if (type == IPMI_DMI_TYPE_SSIF) {
+ set_prop_entry(p[pidx++], "i2c-addr", u16, base_addr);
goto add_properties;
+ }
memset(r, 0, sizeof(r));
@@ -151,12 +179,13 @@ err:
* This function allows an ACPI-specified IPMI device to look up the
* slave address from the DMI table.
*/
-int ipmi_dmi_get_slave_addr(int type, u32 flags, unsigned long base_addr)
+int ipmi_dmi_get_slave_addr(enum si_type si_type, u32 flags,
+ unsigned long base_addr)
{
struct ipmi_dmi_info *info = ipmi_dmi_infos;
while (info) {
- if (info->type == type &&
+ if (info->si_type == si_type &&
info->flags == flags &&
info->addr == base_addr)
return info->slave_addr;
@@ -239,7 +268,7 @@ static void __init dmi_decode_ipmi(const struct dmi_header *dm)
offset = 16;
break;
default:
- pr_err("ipmi:dmi: Invalid offset: 0");
+ pr_err("ipmi:dmi: Invalid offset: 0\n");
return;
}
}
diff --git a/drivers/char/ipmi/ipmi_dmi.h b/drivers/char/ipmi/ipmi_dmi.h
index 0a1afe5ceb1e..6c21018e3668 100644
--- a/drivers/char/ipmi/ipmi_dmi.h
+++ b/drivers/char/ipmi/ipmi_dmi.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* DMI defines for use by IPMI
*/
-#define IPMI_DMI_TYPE_KCS 0x01
-#define IPMI_DMI_TYPE_SMIC 0x02
-#define IPMI_DMI_TYPE_BT 0x03
-#define IPMI_DMI_TYPE_SSIF 0x04
-
#ifdef CONFIG_IPMI_DMI_DECODE
-int ipmi_dmi_get_slave_addr(int type, u32 flags, unsigned long base_addr);
+int ipmi_dmi_get_slave_addr(enum si_type si_type, u32 flags,
+ unsigned long base_addr);
#endif
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 810b138f5897..f45732a2cb3e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -46,6 +46,9 @@
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/uuid.h>
#define PFX "IPMI message handler: "
@@ -61,9 +64,77 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
static int initialized;
-#ifdef CONFIG_PROC_FS
+enum ipmi_panic_event_op {
+ IPMI_SEND_PANIC_EVENT_NONE,
+ IPMI_SEND_PANIC_EVENT,
+ IPMI_SEND_PANIC_EVENT_STRING
+};
+#ifdef CONFIG_IPMI_PANIC_STRING
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
+#elif defined(CONFIG_IPMI_PANIC_EVENT)
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
+#else
+#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
+#endif
+static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
+
+static int panic_op_write_handler(const char *val,
+ const struct kernel_param *kp)
+{
+ char valcp[16];
+ char *s;
+
+ strncpy(valcp, val, 16);
+ valcp[15] = '\0';
+
+ s = strstrip(valcp);
+
+ if (strcmp(s, "none") == 0)
+ ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
+ else if (strcmp(s, "event") == 0)
+ ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
+ else if (strcmp(s, "string") == 0)
+ ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
+{
+ switch (ipmi_send_panic_event) {
+ case IPMI_SEND_PANIC_EVENT_NONE:
+ strcpy(buffer, "none");
+ break;
+
+ case IPMI_SEND_PANIC_EVENT:
+ strcpy(buffer, "event");
+ break;
+
+ case IPMI_SEND_PANIC_EVENT_STRING:
+ strcpy(buffer, "string");
+ break;
+
+ default:
+ strcpy(buffer, "???");
+ break;
+ }
+
+ return strlen(buffer);
+}
+
+static const struct kernel_param_ops panic_op_ops = {
+ .set = panic_op_write_handler,
+ .get = panic_op_read_handler
+};
+module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
+MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
+
+
+#ifdef CONFIG_IPMI_PROC_INTERFACE
static struct proc_dir_entry *proc_ipmi_root;
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_IPMI_PROC_INTERFACE */
/* Remain in auto-maintenance mode for this amount of time (in ms). */
#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
@@ -90,6 +161,9 @@ static struct proc_dir_entry *proc_ipmi_root;
*/
#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
+/* How long should we cache dynamic device IDs? */
+#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
+
/*
* The main "user" data structure.
*/
@@ -169,10 +243,17 @@ struct seq_table {
#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
+#define IPMI_MAX_CHANNELS 16
struct ipmi_channel {
unsigned char medium;
unsigned char protocol;
+};
+struct ipmi_channel_set {
+ struct ipmi_channel c[IPMI_MAX_CHANNELS];
+};
+
+struct ipmi_my_addrinfo {
/*
* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
* but may be changed by the user.
@@ -186,23 +267,38 @@ struct ipmi_channel {
unsigned char lun;
};
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
struct ipmi_proc_entry {
char *name;
struct ipmi_proc_entry *next;
};
#endif
+/*
+ * Note that the product id, manufacturer id, guid, and device id are
+ * immutable in this structure, so dyn_mutex is not required for
+ * accessing those. If those change on a BMC, a new BMC is allocated.
+ */
struct bmc_device {
struct platform_device pdev;
+ struct list_head intfs; /* Interfaces on this BMC. */
struct ipmi_device_id id;
- unsigned char guid[16];
- int guid_set;
- char name[16];
+ struct ipmi_device_id fetch_id;
+ int dyn_id_set;
+ unsigned long dyn_id_expiry;
+ struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
+ guid_t guid;
+ guid_t fetch_guid;
+ int dyn_guid_set;
struct kref usecount;
+ struct work_struct remove_work;
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
+static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid);
+
/*
* Various statistics for IPMI, these index stats[] in the ipmi_smi
* structure.
@@ -308,7 +404,6 @@ enum ipmi_stat_indexes {
#define IPMI_IPMB_NUM_SEQ 64
-#define IPMI_MAX_CHANNELS 16
struct ipmi_smi {
/* What interface number are we? */
int intf_num;
@@ -327,15 +422,23 @@ struct ipmi_smi {
*/
struct list_head users;
- /* Information to supply to users. */
- unsigned char ipmi_version_major;
- unsigned char ipmi_version_minor;
-
/* Used for wake ups at startup. */
wait_queue_head_t waitq;
+ /*
+ * Prevents the interface from being unregistered when the
+ * interface is used by being looked up through the BMC
+ * structure.
+ */
+ struct mutex bmc_reg_mutex;
+
+ struct bmc_device tmp_bmc;
struct bmc_device *bmc;
+ bool bmc_registered;
+ struct list_head bmc_link;
char *my_dev_name;
+ bool in_bmc_register; /* Handle recursive situations. Yuck. */
+ struct work_struct bmc_reg_work;
/*
* This is the lower-layer's sender routine. Note that you
@@ -346,10 +449,13 @@ struct ipmi_smi {
const struct ipmi_smi_handlers *handlers;
void *send_info;
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
/* A list of proc entries for this interface. */
struct mutex proc_entry_lock;
struct ipmi_proc_entry *proc_entries;
+
+ struct proc_dir_entry *proc_dir;
+ char proc_dir_name[10];
#endif
/* Driver-model device for the system interface. */
@@ -421,6 +527,8 @@ struct ipmi_smi {
* interface comes in with a NULL user, call this routine with
* it. Note that the message will still be freed by the
* caller. This only works on the system interface.
+ *
+ * Protected by bmc_reg_mutex.
*/
void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
@@ -431,11 +539,11 @@ struct ipmi_smi {
int curr_channel;
/* Channel information */
- struct ipmi_channel channels[IPMI_MAX_CHANNELS];
-
- /* Proc FS stuff. */
- struct proc_dir_entry *proc_dir;
- char proc_dir_name[10];
+ struct ipmi_channel_set *channel_list;
+ unsigned int curr_working_cset; /* First index into the following. */
+ struct ipmi_channel_set wchannels[2];
+ struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
+ bool channels_ready;
atomic_t stats[IPMI_NUM_STATS];
@@ -448,6 +556,14 @@ struct ipmi_smi {
};
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
+static void __get_guid(ipmi_smi_t intf);
+static void __ipmi_bmc_unregister(ipmi_smi_t intf);
+static int __ipmi_bmc_register(ipmi_smi_t intf,
+ struct ipmi_device_id *id,
+ bool guid_set, guid_t *guid, int intf_num);
+static int __scan_channels(ipmi_smi_t intf, struct ipmi_device_id *id);
+
+
/**
* The driver model view of the IPMI messaging driver.
*/
@@ -457,6 +573,9 @@ static struct platform_driver ipmidriver = {
.bus = &platform_bus_type
}
};
+/*
+ * This mutex keeps us from adding the same BMC twice.
+ */
static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
@@ -475,7 +594,7 @@ static DEFINE_MUTEX(smi_watchers_mutex);
static const char * const addr_src_to_str[] = {
"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
- "device-tree"
+ "device-tree", "platform"
};
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
@@ -1119,12 +1238,21 @@ int ipmi_destroy_user(ipmi_user_t user)
}
EXPORT_SYMBOL(ipmi_destroy_user);
-void ipmi_get_version(ipmi_user_t user,
- unsigned char *major,
- unsigned char *minor)
+int ipmi_get_version(ipmi_user_t user,
+ unsigned char *major,
+ unsigned char *minor)
{
- *major = user->intf->ipmi_version_major;
- *minor = user->intf->ipmi_version_minor;
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ *major = ipmi_version_major(&id);
+ *minor = ipmi_version_minor(&id);
+
+ return 0;
}
EXPORT_SYMBOL(ipmi_get_version);
@@ -1134,7 +1262,7 @@ int ipmi_set_my_address(ipmi_user_t user,
{
if (channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- user->intf->channels[channel].address = address;
+ user->intf->addrinfo[channel].address = address;
return 0;
}
EXPORT_SYMBOL(ipmi_set_my_address);
@@ -1145,7 +1273,7 @@ int ipmi_get_my_address(ipmi_user_t user,
{
if (channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- *address = user->intf->channels[channel].address;
+ *address = user->intf->addrinfo[channel].address;
return 0;
}
EXPORT_SYMBOL(ipmi_get_my_address);
@@ -1156,7 +1284,7 @@ int ipmi_set_my_LUN(ipmi_user_t user,
{
if (channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- user->intf->channels[channel].lun = LUN & 0x3;
+ user->intf->addrinfo[channel].lun = LUN & 0x3;
return 0;
}
EXPORT_SYMBOL(ipmi_set_my_LUN);
@@ -1167,7 +1295,7 @@ int ipmi_get_my_LUN(ipmi_user_t user,
{
if (channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- *address = user->intf->channels[channel].lun;
+ *address = user->intf->addrinfo[channel].lun;
return 0;
}
EXPORT_SYMBOL(ipmi_get_my_LUN);
@@ -1264,8 +1392,8 @@ int ipmi_set_gets_events(ipmi_user_t user, bool val)
list_move_tail(&msg->link, &msgs);
intf->waiting_events_count = 0;
if (intf->event_msg_printed) {
- printk(KERN_WARNING PFX "Event queue no longer"
- " full\n");
+ dev_warn(intf->si_dev,
+ PFX "Event queue no longer full\n");
intf->event_msg_printed = 0;
}
@@ -1655,6 +1783,7 @@ static int i_ipmi_request(ipmi_user_t user,
unsigned char ipmb_seq;
long seqid;
int broadcast = 0;
+ struct ipmi_channel *chans;
if (addr->channel >= IPMI_MAX_CHANNELS) {
ipmi_inc_stat(intf, sent_invalid_commands);
@@ -1662,8 +1791,9 @@ static int i_ipmi_request(ipmi_user_t user,
goto out_err;
}
- if (intf->channels[addr->channel].medium
- != IPMI_CHANNEL_MEDIUM_IPMB) {
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
goto out_err;
@@ -1785,6 +1915,7 @@ static int i_ipmi_request(ipmi_user_t user,
struct ipmi_lan_addr *lan_addr;
unsigned char ipmb_seq;
long seqid;
+ struct ipmi_channel *chans;
if (addr->channel >= IPMI_MAX_CHANNELS) {
ipmi_inc_stat(intf, sent_invalid_commands);
@@ -1792,9 +1923,11 @@ static int i_ipmi_request(ipmi_user_t user,
goto out_err;
}
- if ((intf->channels[addr->channel].medium
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ if ((chans[addr->channel].medium
!= IPMI_CHANNEL_MEDIUM_8023LAN)
- && (intf->channels[addr->channel].medium
+ && (chans[addr->channel].medium
!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL;
@@ -1928,8 +2061,8 @@ static int check_addr(ipmi_smi_t intf,
{
if (addr->channel >= IPMI_MAX_CHANNELS)
return -EINVAL;
- *lun = intf->channels[addr->channel].lun;
- *saddr = intf->channels[addr->channel].address;
+ *lun = intf->addrinfo[addr->channel].lun;
+ *saddr = intf->addrinfo[addr->channel].address;
return 0;
}
@@ -1997,15 +2130,249 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
}
EXPORT_SYMBOL(ipmi_request_supply_msgs);
-#ifdef CONFIG_PROC_FS
+static void bmc_device_id_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+{
+ int rv;
+
+ if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
+ || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
+ || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
+ dev_warn(intf->si_dev,
+ PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
+ msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
+ return;
+ }
+
+ rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
+ msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
+ if (rv) {
+ dev_warn(intf->si_dev,
+ PFX "device id demangle failed: %d\n", rv);
+ intf->bmc->dyn_id_set = 0;
+ } else {
+ /*
+ * Make sure the id data is available before setting
+ * dyn_id_set.
+ */
+ smp_wmb();
+ intf->bmc->dyn_id_set = 1;
+ }
+
+ wake_up(&intf->waitq);
+}
+
+static int
+send_get_device_id_cmd(ipmi_smi_t intf)
+{
+ struct ipmi_system_interface_addr si;
+ struct kernel_ipmi_msg msg;
+
+ si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ si.channel = IPMI_BMC_CHANNEL;
+ si.lun = 0;
+
+ msg.netfn = IPMI_NETFN_APP_REQUEST;
+ msg.cmd = IPMI_GET_DEVICE_ID_CMD;
+ msg.data = NULL;
+ msg.data_len = 0;
+
+ return i_ipmi_request(NULL,
+ intf,
+ (struct ipmi_addr *) &si,
+ 0,
+ &msg,
+ intf,
+ NULL,
+ NULL,
+ 0,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
+ -1, 0);
+}
+
+static int __get_device_id(ipmi_smi_t intf, struct bmc_device *bmc)
+{
+ int rv;
+
+ bmc->dyn_id_set = 2;
+
+ intf->null_user_handler = bmc_device_id_handler;
+
+ rv = send_get_device_id_cmd(intf);
+ if (rv)
+ return rv;
+
+ wait_event(intf->waitq, bmc->dyn_id_set != 2);
+
+ if (!bmc->dyn_id_set)
+ rv = -EIO; /* Something went wrong in the fetch. */
+
+ /* dyn_id_set makes the id data available. */
+ smp_rmb();
+
+ intf->null_user_handler = NULL;
+
+ return rv;
+}
+
+/*
+ * Fetch the device id for the bmc/interface. You must pass in either
+ * bmc or intf, this code will get the other one. If the data has
+ * been recently fetched, this will just use the cached data. Otherwise
+ * it will run a new fetch.
+ *
+ * Except for the first time this is called (in ipmi_register_smi()),
+ * this will always return good data;
+ */
+static int __bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid, int intf_num)
+{
+ int rv = 0;
+ int prev_dyn_id_set, prev_guid_set;
+ bool intf_set = intf != NULL;
+
+ if (!intf) {
+ mutex_lock(&bmc->dyn_mutex);
+retry_bmc_lock:
+ if (list_empty(&bmc->intfs)) {
+ mutex_unlock(&bmc->dyn_mutex);
+ return -ENOENT;
+ }
+ intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
+ bmc_link);
+ kref_get(&intf->refcount);
+ mutex_unlock(&bmc->dyn_mutex);
+ mutex_lock(&intf->bmc_reg_mutex);
+ mutex_lock(&bmc->dyn_mutex);
+ if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
+ bmc_link)) {
+ mutex_unlock(&intf->bmc_reg_mutex);
+ kref_put(&intf->refcount, intf_free);
+ goto retry_bmc_lock;
+ }
+ } else {
+ mutex_lock(&intf->bmc_reg_mutex);
+ bmc = intf->bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ kref_get(&intf->refcount);
+ }
+
+ /* If we have a valid and current ID, just return that. */
+ if (intf->in_bmc_register ||
+ (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
+ goto out_noprocessing;
+
+ prev_guid_set = bmc->dyn_guid_set;
+ __get_guid(intf);
+
+ prev_dyn_id_set = bmc->dyn_id_set;
+ rv = __get_device_id(intf, bmc);
+ if (rv)
+ goto out;
+
+ /*
+ * The guid, device id, manufacturer id, and product id should
+ * not change on a BMC. If it does we have to do some dancing.
+ */
+ if (!intf->bmc_registered
+ || (!prev_guid_set && bmc->dyn_guid_set)
+ || (!prev_dyn_id_set && bmc->dyn_id_set)
+ || (prev_guid_set && bmc->dyn_guid_set
+ && !guid_equal(&bmc->guid, &bmc->fetch_guid))
+ || bmc->id.device_id != bmc->fetch_id.device_id
+ || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
+ || bmc->id.product_id != bmc->fetch_id.product_id) {
+ struct ipmi_device_id id = bmc->fetch_id;
+ int guid_set = bmc->dyn_guid_set;
+ guid_t guid;
+
+ guid = bmc->fetch_guid;
+ mutex_unlock(&bmc->dyn_mutex);
+
+ __ipmi_bmc_unregister(intf);
+ /* Fill in the temporary BMC for good measure. */
+ intf->bmc->id = id;
+ intf->bmc->dyn_guid_set = guid_set;
+ intf->bmc->guid = guid;
+ if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
+ need_waiter(intf); /* Retry later on an error. */
+ else
+ __scan_channels(intf, &id);
+
+
+ if (!intf_set) {
+ /*
+ * We weren't given the interface on the
+ * command line, so restart the operation on
+ * the next interface for the BMC.
+ */
+ mutex_unlock(&intf->bmc_reg_mutex);
+ mutex_lock(&bmc->dyn_mutex);
+ goto retry_bmc_lock;
+ }
+
+ /* We have a new BMC, set it up. */
+ bmc = intf->bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ goto out_noprocessing;
+ } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
+ /* Version info changes, scan the channels again. */
+ __scan_channels(intf, &bmc->fetch_id);
+
+ bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+out:
+ if (rv && prev_dyn_id_set) {
+ rv = 0; /* Ignore failures if we have previous data. */
+ bmc->dyn_id_set = prev_dyn_id_set;
+ }
+ if (!rv) {
+ bmc->id = bmc->fetch_id;
+ if (bmc->dyn_guid_set)
+ bmc->guid = bmc->fetch_guid;
+ else if (prev_guid_set)
+ /*
+ * The guid used to be valid and it failed to fetch,
+ * just use the cached value.
+ */
+ bmc->dyn_guid_set = prev_guid_set;
+ }
+out_noprocessing:
+ if (!rv) {
+ if (id)
+ *id = bmc->id;
+
+ if (guid_set)
+ *guid_set = bmc->dyn_guid_set;
+
+ if (guid && bmc->dyn_guid_set)
+ *guid = bmc->guid;
+ }
+
+ mutex_unlock(&bmc->dyn_mutex);
+ mutex_unlock(&intf->bmc_reg_mutex);
+
+ kref_put(&intf->refcount, intf_free);
+ return rv;
+}
+
+static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
+ struct ipmi_device_id *id,
+ bool *guid_set, guid_t *guid)
+{
+ return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
+}
+
+#ifdef CONFIG_IPMI_PROC_INTERFACE
static int smi_ipmb_proc_show(struct seq_file *m, void *v)
{
ipmi_smi_t intf = m->private;
int i;
- seq_printf(m, "%x", intf->channels[0].address);
+ seq_printf(m, "%x", intf->addrinfo[0].address);
for (i = 1; i < IPMI_MAX_CHANNELS; i++)
- seq_printf(m, " %x", intf->channels[i].address);
+ seq_printf(m, " %x", intf->addrinfo[i].address);
seq_putc(m, '\n');
return 0;
@@ -2026,10 +2393,16 @@ static const struct file_operations smi_ipmb_proc_ops = {
static int smi_version_proc_show(struct seq_file *m, void *v)
{
ipmi_smi_t intf = m->private;
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(intf, NULL, &id, NULL, NULL);
+ if (rv)
+ return rv;
seq_printf(m, "%u.%u\n",
- ipmi_version_major(&intf->bmc->id),
- ipmi_version_minor(&intf->bmc->id));
+ ipmi_version_major(&id),
+ ipmi_version_minor(&id));
return 0;
}
@@ -2120,14 +2493,12 @@ static const struct file_operations smi_stats_proc_ops = {
.llseek = seq_lseek,
.release = single_release,
};
-#endif /* CONFIG_PROC_FS */
int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
const struct file_operations *proc_ops,
void *data)
{
int rv = 0;
-#ifdef CONFIG_PROC_FS
struct proc_dir_entry *file;
struct ipmi_proc_entry *entry;
@@ -2153,7 +2524,6 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
smi->proc_entries = entry;
mutex_unlock(&smi->proc_entry_lock);
}
-#endif /* CONFIG_PROC_FS */
return rv;
}
@@ -2163,7 +2533,6 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
{
int rv = 0;
-#ifdef CONFIG_PROC_FS
sprintf(smi->proc_dir_name, "%d", num);
smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
if (!smi->proc_dir)
@@ -2183,14 +2552,12 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
rv = ipmi_smi_add_proc_entry(smi, "version",
&smi_version_proc_ops,
smi);
-#endif /* CONFIG_PROC_FS */
return rv;
}
static void remove_proc_entries(ipmi_smi_t smi)
{
-#ifdef CONFIG_PROC_FS
struct ipmi_proc_entry *entry;
mutex_lock(&smi->proc_entry_lock);
@@ -2204,66 +2571,22 @@ static void remove_proc_entries(ipmi_smi_t smi)
}
mutex_unlock(&smi->proc_entry_lock);
remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
-#endif /* CONFIG_PROC_FS */
-}
-
-static int __find_bmc_guid(struct device *dev, void *data)
-{
- unsigned char *id = data;
- struct bmc_device *bmc = to_bmc_device(dev);
- return memcmp(bmc->guid, id, 16) == 0;
-}
-
-static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
- unsigned char *guid)
-{
- struct device *dev;
-
- dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
- if (dev)
- return to_bmc_device(dev);
- else
- return NULL;
-}
-
-struct prod_dev_id {
- unsigned int product_id;
- unsigned char device_id;
-};
-
-static int __find_bmc_prod_dev_id(struct device *dev, void *data)
-{
- struct prod_dev_id *id = data;
- struct bmc_device *bmc = to_bmc_device(dev);
-
- return (bmc->id.product_id == id->product_id
- && bmc->id.device_id == id->device_id);
-}
-
-static struct bmc_device *ipmi_find_bmc_prod_dev_id(
- struct device_driver *drv,
- unsigned int product_id, unsigned char device_id)
-{
- struct prod_dev_id id = {
- .product_id = product_id,
- .device_id = device_id,
- };
- struct device *dev;
-
- dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
- if (dev)
- return to_bmc_device(dev);
- else
- return NULL;
}
+#endif /* CONFIG_IPMI_PROC_INTERFACE */
static ssize_t device_id_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
- return snprintf(buf, 10, "%u\n", bmc->id.device_id);
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return snprintf(buf, 10, "%u\n", id.device_id);
}
static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
@@ -2272,9 +2595,14 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
- return snprintf(buf, 10, "%u\n",
- (bmc->id.device_revision & 0x80) >> 7);
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
}
static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
NULL);
@@ -2283,9 +2611,14 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
- return snprintf(buf, 20, "%u\n",
- bmc->id.device_revision & 0x0F);
+ return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
}
static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
@@ -2294,9 +2627,15 @@ static ssize_t firmware_revision_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
- return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
- bmc->id.firmware_revision_2);
+ return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
+ id.firmware_revision_2);
}
static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
@@ -2305,10 +2644,16 @@ static ssize_t ipmi_version_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
return snprintf(buf, 20, "%u.%u\n",
- ipmi_version_major(&bmc->id),
- ipmi_version_minor(&bmc->id));
+ ipmi_version_major(&id),
+ ipmi_version_minor(&id));
}
static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
@@ -2317,9 +2662,14 @@ static ssize_t add_dev_support_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
- return snprintf(buf, 10, "0x%02x\n",
- bmc->id.additional_device_support);
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
+
+ return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
}
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
NULL);
@@ -2329,8 +2679,14 @@ static ssize_t manufacturer_id_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
- return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
+ return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
}
static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
@@ -2339,8 +2695,14 @@ static ssize_t product_id_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
- return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
+ return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
}
static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
@@ -2349,12 +2711,18 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ struct ipmi_device_id id;
+ int rv;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ if (rv)
+ return rv;
return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
- bmc->id.aux_firmware_revision[3],
- bmc->id.aux_firmware_revision[2],
- bmc->id.aux_firmware_revision[1],
- bmc->id.aux_firmware_revision[0]);
+ id.aux_firmware_revision[3],
+ id.aux_firmware_revision[2],
+ id.aux_firmware_revision[1],
+ id.aux_firmware_revision[0]);
}
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
@@ -2362,10 +2730,17 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct bmc_device *bmc = to_bmc_device(dev);
+ bool guid_set;
+ guid_t guid;
+ int rv;
- return snprintf(buf, 100, "%Lx%Lx\n",
- (long long) bmc->guid[0],
- (long long) bmc->guid[8]);
+ rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
+ if (rv)
+ return rv;
+ if (!guid_set)
+ return -ENOENT;
+
+ return snprintf(buf, 38, "%pUl\n", guid.b);
}
static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
@@ -2389,11 +2764,20 @@ static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct bmc_device *bmc = to_bmc_device(dev);
umode_t mode = attr->mode;
+ int rv;
- if (attr == &dev_attr_aux_firmware_revision.attr)
- return bmc->id.aux_firmware_revision_set ? mode : 0;
- if (attr == &dev_attr_guid.attr)
- return bmc->guid_set ? mode : 0;
+ if (attr == &dev_attr_aux_firmware_revision.attr) {
+ struct ipmi_device_id id;
+
+ rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
+ return (!rv && id.aux_firmware_revision_set) ? mode : 0;
+ }
+ if (attr == &dev_attr_guid.attr) {
+ bool guid_set;
+
+ rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
+ return (!rv && guid_set) ? mode : 0;
+ }
return mode;
}
@@ -2411,127 +2795,239 @@ static const struct device_type bmc_device_type = {
.groups = bmc_dev_attr_groups,
};
+static int __find_bmc_guid(struct device *dev, void *data)
+{
+ guid_t *guid = data;
+ struct bmc_device *bmc;
+ int rv;
+
+ if (dev->type != &bmc_device_type)
+ return 0;
+
+ bmc = to_bmc_device(dev);
+ rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
+ if (rv)
+ rv = kref_get_unless_zero(&bmc->usecount);
+ return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
+ guid_t *guid)
+{
+ struct device *dev;
+ struct bmc_device *bmc = NULL;
+
+ dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
+ if (dev) {
+ bmc = to_bmc_device(dev);
+ put_device(dev);
+ }
+ return bmc;
+}
+
+struct prod_dev_id {
+ unsigned int product_id;
+ unsigned char device_id;
+};
+
+static int __find_bmc_prod_dev_id(struct device *dev, void *data)
+{
+ struct prod_dev_id *cid = data;
+ struct bmc_device *bmc;
+ int rv;
+
+ if (dev->type != &bmc_device_type)
+ return 0;
+
+ bmc = to_bmc_device(dev);
+ rv = (bmc->id.product_id == cid->product_id
+ && bmc->id.device_id == cid->device_id);
+ if (rv)
+ rv = kref_get_unless_zero(&bmc->usecount);
+ return rv;
+}
+
+/*
+ * Returns with the bmc's usecount incremented, if it is non-NULL.
+ */
+static struct bmc_device *ipmi_find_bmc_prod_dev_id(
+ struct device_driver *drv,
+ unsigned int product_id, unsigned char device_id)
+{
+ struct prod_dev_id id = {
+ .product_id = product_id,
+ .device_id = device_id,
+ };
+ struct device *dev;
+ struct bmc_device *bmc = NULL;
+
+ dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
+ if (dev) {
+ bmc = to_bmc_device(dev);
+ put_device(dev);
+ }
+ return bmc;
+}
+
+static DEFINE_IDA(ipmi_bmc_ida);
+
static void
release_bmc_device(struct device *dev)
{
kfree(to_bmc_device(dev));
}
+static void cleanup_bmc_work(struct work_struct *work)
+{
+ struct bmc_device *bmc = container_of(work, struct bmc_device,
+ remove_work);
+ int id = bmc->pdev.id; /* Unregister overwrites id */
+
+ platform_device_unregister(&bmc->pdev);
+ ida_simple_remove(&ipmi_bmc_ida, id);
+}
+
static void
cleanup_bmc_device(struct kref *ref)
{
struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
- platform_device_unregister(&bmc->pdev);
+ /*
+ * Remove the platform device in a work queue to avoid issues
+ * with removing the device attributes while reading a device
+ * attribute.
+ */
+ schedule_work(&bmc->remove_work);
}
-static void ipmi_bmc_unregister(ipmi_smi_t intf)
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static void __ipmi_bmc_unregister(ipmi_smi_t intf)
{
struct bmc_device *bmc = intf->bmc;
- sysfs_remove_link(&intf->si_dev->kobj, "bmc");
- if (intf->my_dev_name) {
- sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
- kfree(intf->my_dev_name);
- intf->my_dev_name = NULL;
- }
+ if (!intf->bmc_registered)
+ return;
- mutex_lock(&ipmidriver_mutex);
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+ sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
kref_put(&bmc->usecount, cleanup_bmc_device);
- intf->bmc = NULL;
- mutex_unlock(&ipmidriver_mutex);
+ intf->bmc_registered = false;
}
-static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
+static void ipmi_bmc_unregister(ipmi_smi_t intf)
+{
+ mutex_lock(&intf->bmc_reg_mutex);
+ __ipmi_bmc_unregister(intf);
+ mutex_unlock(&intf->bmc_reg_mutex);
+}
+
+/*
+ * Must be called with intf->bmc_reg_mutex held.
+ */
+static int __ipmi_bmc_register(ipmi_smi_t intf,
+ struct ipmi_device_id *id,
+ bool guid_set, guid_t *guid, int intf_num)
{
int rv;
- struct bmc_device *bmc = intf->bmc;
+ struct bmc_device *bmc;
struct bmc_device *old_bmc;
- mutex_lock(&ipmidriver_mutex);
+ /*
+ * platform_device_register() can cause bmc_reg_mutex to
+ * be claimed because of the is_visible functions of
+ * the attributes. Eliminate possible recursion and
+ * release the lock.
+ */
+ intf->in_bmc_register = true;
+ mutex_unlock(&intf->bmc_reg_mutex);
/*
* Try to find if there is an bmc_device struct
* representing the interfaced BMC already
*/
- if (bmc->guid_set)
- old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, bmc->guid);
+ mutex_lock(&ipmidriver_mutex);
+ if (guid_set)
+ old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
else
old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
- bmc->id.product_id,
- bmc->id.device_id);
+ id->product_id,
+ id->device_id);
/*
* If there is already an bmc_device, free the new one,
* otherwise register the new BMC device
*/
if (old_bmc) {
- kfree(bmc);
- intf->bmc = old_bmc;
bmc = old_bmc;
+ /*
+ * Note: old_bmc already has usecount incremented by
+ * the BMC find functions.
+ */
+ intf->bmc = old_bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ list_add_tail(&intf->bmc_link, &bmc->intfs);
+ mutex_unlock(&bmc->dyn_mutex);
- kref_get(&bmc->usecount);
- mutex_unlock(&ipmidriver_mutex);
-
- printk(KERN_INFO
- "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
- " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
- bmc->id.manufacturer_id,
- bmc->id.product_id,
- bmc->id.device_id);
+ dev_info(intf->si_dev,
+ "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
+ " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
} else {
- unsigned char orig_dev_id = bmc->id.device_id;
- int warn_printed = 0;
-
- snprintf(bmc->name, sizeof(bmc->name),
- "ipmi_bmc.%4.4x", bmc->id.product_id);
- bmc->pdev.name = bmc->name;
-
- while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
- bmc->id.product_id,
- bmc->id.device_id)) {
- if (!warn_printed) {
- printk(KERN_WARNING PFX
- "This machine has two different BMCs"
- " with the same product id and device"
- " id. This is an error in the"
- " firmware, but incrementing the"
- " device id to work around the problem."
- " Prod ID = 0x%x, Dev ID = 0x%x\n",
- bmc->id.product_id, bmc->id.device_id);
- warn_printed = 1;
- }
- bmc->id.device_id++; /* Wraps at 255 */
- if (bmc->id.device_id == orig_dev_id) {
- printk(KERN_ERR PFX
- "Out of device ids!\n");
- break;
- }
+ bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
+ if (!bmc) {
+ rv = -ENOMEM;
+ goto out;
}
+ INIT_LIST_HEAD(&bmc->intfs);
+ mutex_init(&bmc->dyn_mutex);
+ INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
+
+ bmc->id = *id;
+ bmc->dyn_id_set = 1;
+ bmc->dyn_guid_set = guid_set;
+ bmc->guid = *guid;
+ bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
+
+ bmc->pdev.name = "ipmi_bmc";
+ rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
+ if (rv < 0)
+ goto out;
bmc->pdev.dev.driver = &ipmidriver.driver;
- bmc->pdev.id = bmc->id.device_id;
+ bmc->pdev.id = rv;
bmc->pdev.dev.release = release_bmc_device;
bmc->pdev.dev.type = &bmc_device_type;
kref_init(&bmc->usecount);
+ intf->bmc = bmc;
+ mutex_lock(&bmc->dyn_mutex);
+ list_add_tail(&intf->bmc_link, &bmc->intfs);
+ mutex_unlock(&bmc->dyn_mutex);
+
rv = platform_device_register(&bmc->pdev);
- mutex_unlock(&ipmidriver_mutex);
if (rv) {
- put_device(&bmc->pdev.dev);
- printk(KERN_ERR
- "ipmi_msghandler:"
- " Unable to register bmc device: %d\n",
- rv);
- /*
- * Don't go to out_err, you can only do that if
- * the device is registered already.
- */
- return rv;
+ dev_err(intf->si_dev,
+ PFX " Unable to register bmc device: %d\n",
+ rv);
+ goto out_list_del;
}
- dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
- "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ dev_info(intf->si_dev,
+ "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
bmc->id.manufacturer_id,
bmc->id.product_id,
bmc->id.device_id);
@@ -2543,19 +3039,19 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
*/
rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
if (rv) {
- printk(KERN_ERR
- "ipmi_msghandler: Unable to create bmc symlink: %d\n",
- rv);
- goto out_err;
+ dev_err(intf->si_dev,
+ PFX "Unable to create bmc symlink: %d\n", rv);
+ goto out_put_bmc;
}
- intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum);
+ if (intf_num == -1)
+ intf_num = intf->intf_num;
+ intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
if (!intf->my_dev_name) {
rv = -ENOMEM;
- printk(KERN_ERR
- "ipmi_msghandler: allocate link from BMC: %d\n",
- rv);
- goto out_err;
+ dev_err(intf->si_dev,
+ PFX "Unable to allocate link from BMC: %d\n", rv);
+ goto out_unlink1;
}
rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
@@ -2563,18 +3059,42 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
if (rv) {
kfree(intf->my_dev_name);
intf->my_dev_name = NULL;
- printk(KERN_ERR
- "ipmi_msghandler:"
- " Unable to create symlink to bmc: %d\n",
- rv);
- goto out_err;
+ dev_err(intf->si_dev,
+ PFX "Unable to create symlink to bmc: %d\n", rv);
+ goto out_free_my_dev_name;
}
- return 0;
+ intf->bmc_registered = true;
-out_err:
- ipmi_bmc_unregister(intf);
+out:
+ mutex_unlock(&ipmidriver_mutex);
+ mutex_lock(&intf->bmc_reg_mutex);
+ intf->in_bmc_register = false;
return rv;
+
+
+out_free_my_dev_name:
+ kfree(intf->my_dev_name);
+ intf->my_dev_name = NULL;
+
+out_unlink1:
+ sysfs_remove_link(&intf->si_dev->kobj, "bmc");
+
+out_put_bmc:
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ kref_put(&bmc->usecount, cleanup_bmc_device);
+ goto out;
+
+out_list_del:
+ mutex_lock(&bmc->dyn_mutex);
+ list_del(&intf->bmc_link);
+ mutex_unlock(&bmc->dyn_mutex);
+ intf->bmc = &intf->tmp_bmc;
+ put_device(&bmc->pdev.dev);
+ goto out;
}
static int
@@ -2600,14 +3120,15 @@ send_guid_cmd(ipmi_smi_t intf, int chan)
NULL,
NULL,
0,
- intf->channels[0].address,
- intf->channels[0].lun,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
-1, 0);
}
-static void
-guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
+static void guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
{
+ struct bmc_device *bmc = intf->bmc;
+
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
|| (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
@@ -2616,38 +3137,46 @@ guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
if (msg->msg.data[0] != 0) {
/* Error from getting the GUID, the BMC doesn't have one. */
- intf->bmc->guid_set = 0;
+ bmc->dyn_guid_set = 0;
goto out;
}
if (msg->msg.data_len < 17) {
- intf->bmc->guid_set = 0;
- printk(KERN_WARNING PFX
- "guid_handler: The GUID response from the BMC was too"
- " short, it was %d but should have been 17. Assuming"
- " GUID is not available.\n",
- msg->msg.data_len);
+ bmc->dyn_guid_set = 0;
+ dev_warn(intf->si_dev,
+ PFX "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n",
+ msg->msg.data_len);
goto out;
}
- memcpy(intf->bmc->guid, msg->msg.data, 16);
- intf->bmc->guid_set = 1;
+ memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
+ /*
+ * Make sure the guid data is available before setting
+ * dyn_guid_set.
+ */
+ smp_wmb();
+ bmc->dyn_guid_set = 1;
out:
wake_up(&intf->waitq);
}
-static void
-get_guid(ipmi_smi_t intf)
+static void __get_guid(ipmi_smi_t intf)
{
int rv;
+ struct bmc_device *bmc = intf->bmc;
- intf->bmc->guid_set = 0x2;
+ bmc->dyn_guid_set = 2;
intf->null_user_handler = guid_handler;
rv = send_guid_cmd(intf, 0);
if (rv)
/* Send failed, no GUID available. */
- intf->bmc->guid_set = 0;
- wait_event(intf->waitq, intf->bmc->guid_set != 2);
+ bmc->dyn_guid_set = 0;
+
+ wait_event(intf->waitq, bmc->dyn_guid_set != 2);
+
+ /* dyn_guid_set makes the guid data available. */
+ smp_rmb();
+
intf->null_user_handler = NULL;
}
@@ -2676,8 +3205,8 @@ send_channel_info_cmd(ipmi_smi_t intf, int chan)
NULL,
NULL,
0,
- intf->channels[0].address,
- intf->channels[0].lun,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
-1, 0);
}
@@ -2685,7 +3214,9 @@ static void
channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
{
int rv = 0;
- int chan;
+ int ch;
+ unsigned int set = intf->curr_working_cset;
+ struct ipmi_channel *chans;
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
@@ -2701,12 +3232,13 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
* assume it has one IPMB at channel
* zero.
*/
- intf->channels[0].medium
+ intf->wchannels[set].c[0].medium
= IPMI_CHANNEL_MEDIUM_IPMB;
- intf->channels[0].protocol
+ intf->wchannels[set].c[0].protocol
= IPMI_CHANNEL_PROTOCOL_IPMB;
- intf->curr_channel = IPMI_MAX_CHANNELS;
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
wake_up(&intf->waitq);
goto out;
}
@@ -2716,24 +3248,31 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
/* Message not big enough, just go on. */
goto next_channel;
}
- chan = intf->curr_channel;
- intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
- intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
+ ch = intf->curr_channel;
+ chans = intf->wchannels[set].c;
+ chans[ch].medium = msg->msg.data[2] & 0x7f;
+ chans[ch].protocol = msg->msg.data[3] & 0x1f;
next_channel:
intf->curr_channel++;
- if (intf->curr_channel >= IPMI_MAX_CHANNELS)
+ if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
wake_up(&intf->waitq);
- else
+ } else {
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
rv = send_channel_info_cmd(intf, intf->curr_channel);
+ }
if (rv) {
/* Got an error somehow, just give up. */
- printk(KERN_WARNING PFX
- "Error sending channel information for channel"
- " %d: %d\n", intf->curr_channel, rv);
+ dev_warn(intf->si_dev,
+ PFX "Error sending channel information for channel %d: %d\n",
+ intf->curr_channel, rv);
- intf->curr_channel = IPMI_MAX_CHANNELS;
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
wake_up(&intf->waitq);
}
}
@@ -2741,6 +3280,53 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
return;
}
+/*
+ * Must be holding intf->bmc_reg_mutex to call this.
+ */
+static int __scan_channels(ipmi_smi_t intf, struct ipmi_device_id *id)
+{
+ int rv;
+
+ if (ipmi_version_major(id) > 1
+ || (ipmi_version_major(id) == 1
+ && ipmi_version_minor(id) >= 5)) {
+ unsigned int set;
+
+ /*
+ * Start scanning the channels to see what is
+ * available.
+ */
+ set = !intf->curr_working_cset;
+ intf->curr_working_cset = set;
+ memset(&intf->wchannels[set], 0,
+ sizeof(struct ipmi_channel_set));
+
+ intf->null_user_handler = channel_handler;
+ intf->curr_channel = 0;
+ rv = send_channel_info_cmd(intf, 0);
+ if (rv) {
+ dev_warn(intf->si_dev,
+ "Error sending channel information for channel 0, %d\n",
+ rv);
+ return -EIO;
+ }
+
+ /* Wait for the channel info to be read. */
+ wait_event(intf->waitq, intf->channels_ready);
+ intf->null_user_handler = NULL;
+ } else {
+ unsigned int set = intf->curr_working_cset;
+
+ /* Assume a single IPMB channel at zero. */
+ intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
+ intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
+ intf->channel_list = intf->wchannels + set;
+ intf->channels_ready = true;
+ }
+
+ return 0;
+}
+
static void ipmi_poll(ipmi_smi_t intf)
{
if (intf->handlers->poll)
@@ -2755,9 +3341,18 @@ void ipmi_poll_interface(ipmi_user_t user)
}
EXPORT_SYMBOL(ipmi_poll_interface);
+static void redo_bmc_reg(struct work_struct *work)
+{
+ ipmi_smi_t intf = container_of(work, struct ipmi_smi, bmc_reg_work);
+
+ if (!intf->in_shutdown)
+ bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
+
+ kref_put(&intf->refcount, intf_free);
+}
+
int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
void *send_info,
- struct ipmi_device_id *device_id,
struct device *si_dev,
unsigned char slave_addr)
{
@@ -2766,6 +3361,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
ipmi_smi_t intf;
ipmi_smi_t tintf;
struct list_head *link;
+ struct ipmi_device_id id;
/*
* Make sure the driver is actually initialized, this handles
@@ -2787,24 +3383,21 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
if (!intf)
return -ENOMEM;
- intf->ipmi_version_major = ipmi_version_major(device_id);
- intf->ipmi_version_minor = ipmi_version_minor(device_id);
-
- intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
- if (!intf->bmc) {
- kfree(intf);
- return -ENOMEM;
- }
+ intf->bmc = &intf->tmp_bmc;
+ INIT_LIST_HEAD(&intf->bmc->intfs);
+ mutex_init(&intf->bmc->dyn_mutex);
+ INIT_LIST_HEAD(&intf->bmc_link);
+ mutex_init(&intf->bmc_reg_mutex);
intf->intf_num = -1; /* Mark it invalid for now. */
kref_init(&intf->refcount);
- intf->bmc->id = *device_id;
+ INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
intf->si_dev = si_dev;
for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
- intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
- intf->channels[j].lun = 2;
+ intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
+ intf->addrinfo[j].lun = 2;
}
if (slave_addr != 0)
- intf->channels[0].address = slave_addr;
+ intf->addrinfo[0].address = slave_addr;
INIT_LIST_HEAD(&intf->users);
intf->handlers = handlers;
intf->send_info = send_info;
@@ -2814,7 +3407,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
intf->seq_table[j].seqid = 0;
}
intf->curr_seq = 0;
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
mutex_init(&intf->proc_entry_lock);
#endif
spin_lock_init(&intf->waiting_rcv_msgs_lock);
@@ -2838,7 +3431,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
for (i = 0; i < IPMI_NUM_STATS; i++)
atomic_set(&intf->stats[i], 0);
+#ifdef CONFIG_IPMI_PROC_INTERFACE
intf->proc_dir = NULL;
+#endif
mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
@@ -2862,45 +3457,29 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
if (rv)
goto out;
- get_guid(intf);
-
- if ((intf->ipmi_version_major > 1)
- || ((intf->ipmi_version_major == 1)
- && (intf->ipmi_version_minor >= 5))) {
- /*
- * Start scanning the channels to see what is
- * available.
- */
- intf->null_user_handler = channel_handler;
- intf->curr_channel = 0;
- rv = send_channel_info_cmd(intf, 0);
- if (rv) {
- printk(KERN_WARNING PFX
- "Error sending channel information for channel"
- " 0, %d\n", rv);
- goto out;
- }
-
- /* Wait for the channel info to be read. */
- wait_event(intf->waitq,
- intf->curr_channel >= IPMI_MAX_CHANNELS);
- intf->null_user_handler = NULL;
- } else {
- /* Assume a single IPMB channel at zero. */
- intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
- intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
- intf->curr_channel = IPMI_MAX_CHANNELS;
+ rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
+ if (rv) {
+ dev_err(si_dev, "Unable to get the device id: %d\n", rv);
+ goto out;
}
- rv = ipmi_bmc_register(intf, i);
+ mutex_lock(&intf->bmc_reg_mutex);
+ rv = __scan_channels(intf, &id);
+ mutex_unlock(&intf->bmc_reg_mutex);
+ if (rv)
+ goto out;
- if (rv == 0)
- rv = add_proc_entries(intf, i);
+#ifdef CONFIG_IPMI_PROC_INTERFACE
+ rv = add_proc_entries(intf, i);
+#endif
out:
if (rv) {
+ ipmi_bmc_unregister(intf);
+#ifdef CONFIG_IPMI_PROC_INTERFACE
if (intf->proc_dir)
remove_proc_entries(intf);
+#endif
intf->handlers = NULL;
list_del_rcu(&intf->link);
mutex_unlock(&ipmi_interfaces_mutex);
@@ -3005,7 +3584,9 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
intf->handlers = NULL;
mutex_unlock(&ipmi_interfaces_mutex);
+#ifdef CONFIG_IPMI_PROC_INTERFACE
remove_proc_entries(intf);
+#endif
ipmi_bmc_unregister(intf);
/*
@@ -3130,7 +3711,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
msg->data[3] = msg->rsp[6];
msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
- msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
+ msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
/* rqseq/lun */
msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
msg->data[8] = msg->rsp[8]; /* cmd */
@@ -3584,8 +4165,8 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
* There's too many things in the queue, discard this
* message.
*/
- printk(KERN_WARNING PFX "Event queue full, discarding"
- " incoming events\n");
+ dev_warn(intf->si_dev,
+ PFX "Event queue full, discarding incoming events\n");
intf->event_msg_printed = 1;
}
@@ -3603,11 +4184,8 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
recv_msg = (struct ipmi_recv_msg *) msg->user_data;
if (recv_msg == NULL) {
- printk(KERN_WARNING
- "IPMI message received with no owner. This\n"
- "could be because of a malformed message, or\n"
- "because of a hardware error. Contact your\n"
- "hardware vender for assistance\n");
+ dev_warn(intf->si_dev,
+ "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vender for assistance\n");
return 0;
}
@@ -3661,9 +4239,9 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
#endif
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
- printk(KERN_WARNING PFX "BMC returned to small a message"
- " for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+ dev_warn(intf->si_dev,
+ PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
/* Generate an error response for the message. */
msg->rsp[0] = msg->data[0] | (1 << 2);
@@ -3676,10 +4254,10 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
* The NetFN and Command in the response is not even
* marginally correct.
*/
- printk(KERN_WARNING PFX "BMC returned incorrect response,"
- " expected netfn %x cmd %x, got netfn %x cmd %x\n",
- (msg->data[0] >> 2) | 1, msg->data[1],
- msg->rsp[0] >> 2, msg->rsp[1]);
+ dev_warn(intf->si_dev,
+ PFX "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
+ (msg->data[0] >> 2) | 1, msg->data[1],
+ msg->rsp[0] >> 2, msg->rsp[1]);
/* Generate an error response for the message. */
msg->rsp[0] = msg->data[0] | (1 << 2);
@@ -3721,6 +4299,8 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
deliver_response(recv_msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
+ struct ipmi_channel *chans;
+
/* It's from the receive queue. */
chan = msg->rsp[3] & 0xf;
if (chan >= IPMI_MAX_CHANNELS) {
@@ -3735,12 +4315,14 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
* equal to or greater than IPMI_MAX_CHANNELS when all the
* channels for this interface have been initialized.
*/
- if (intf->curr_channel < IPMI_MAX_CHANNELS) {
+ if (!intf->channels_ready) {
requeue = 0; /* Throw the message away */
goto out;
}
- switch (intf->channels[chan].medium) {
+ chans = READ_ONCE(intf->channel_list)->c;
+
+ switch (chans[chan].medium) {
case IPMI_CHANNEL_MEDIUM_IPMB:
if (msg->rsp[4] & 0x04) {
/*
@@ -3777,9 +4359,8 @@ static int handle_one_recv_msg(ipmi_smi_t intf,
default:
/* Check for OEM Channels. Clients had better
register for these commands. */
- if ((intf->channels[chan].medium
- >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
- && (intf->channels[chan].medium
+ if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
+ && (chans[chan].medium
<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
requeue = handle_oem_get_msg_cmd(intf, msg);
} else {
@@ -3941,15 +4522,14 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
&& (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
&& (msg->rsp[2] != IPMI_BUS_ERR)
&& (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
- int chan = msg->rsp[3] & 0xf;
+ int ch = msg->rsp[3] & 0xf;
+ struct ipmi_channel *chans;
/* Got an error sending the message, handle it. */
- if (chan >= IPMI_MAX_CHANNELS)
- ; /* This shouldn't happen */
- else if ((intf->channels[chan].medium
- == IPMI_CHANNEL_MEDIUM_8023LAN)
- || (intf->channels[chan].medium
- == IPMI_CHANNEL_MEDIUM_ASYNC))
+
+ chans = READ_ONCE(intf->channel_list)->c;
+ if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
+ || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
ipmi_inc_stat(intf, sent_lan_command_errs);
else
ipmi_inc_stat(intf, sent_ipmb_command_errs);
@@ -4030,7 +4610,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
}
static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
- struct list_head *timeouts, long timeout_period,
+ struct list_head *timeouts,
+ unsigned long timeout_period,
int slot, unsigned long *flags,
unsigned int *waiting_msgs)
{
@@ -4043,8 +4624,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
if (!ent->inuse)
return;
- ent->timeout -= timeout_period;
- if (ent->timeout > 0) {
+ if (timeout_period < ent->timeout) {
+ ent->timeout -= timeout_period;
(*waiting_msgs)++;
return;
}
@@ -4110,7 +4691,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
}
}
-static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
+static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
+ unsigned long timeout_period)
{
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
@@ -4118,6 +4700,14 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
int i;
unsigned int waiting_msgs = 0;
+ if (!intf->bmc_registered) {
+ kref_get(&intf->refcount);
+ if (!schedule_work(&intf->bmc_reg_work)) {
+ kref_put(&intf->refcount, intf_free);
+ waiting_msgs++;
+ }
+ }
+
/*
* Go through the seq table and find any messages that
* have timed out, putting them in the timeouts
@@ -4176,7 +4766,7 @@ static struct timer_list ipmi_timer;
static atomic_t stop_operation;
-static void ipmi_timeout(unsigned long data)
+static void ipmi_timeout(struct timer_list *unused)
{
ipmi_smi_t intf;
int nt = 0;
@@ -4269,8 +4859,6 @@ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
-#ifdef CONFIG_IPMI_PANIC_EVENT
-
static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
@@ -4306,8 +4894,8 @@ static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
&smi_msg,
&recv_msg,
0,
- intf->channels[0].address,
- intf->channels[0].lun,
+ intf->addrinfo[0].address,
+ intf->addrinfo[0].lun,
0, 1); /* Don't retry, and don't wait. */
if (rv)
atomic_sub(2, &panic_done_count);
@@ -4318,7 +4906,6 @@ static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
ipmi_poll(intf);
}
-#ifdef CONFIG_IPMI_PANIC_STRING
static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
{
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
@@ -4345,7 +4932,6 @@ static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
}
}
-#endif
static void send_panic_events(char *str)
{
@@ -4355,6 +4941,9 @@ static void send_panic_events(char *str)
struct ipmi_system_interface_addr *si;
struct ipmi_addr addr;
+ if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
+ return;
+
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
si->channel = IPMI_BMC_CHANNEL;
@@ -4383,20 +4972,19 @@ static void send_panic_events(char *str)
/* For every registered interface, send the event. */
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- if (!intf->handlers)
- /* Interface is not ready. */
+ if (!intf->handlers || !intf->handlers->poll)
+ /* Interface is not ready or can't run at panic time. */
continue;
/* Send the event announcing the panic. */
ipmi_panic_request_and_wait(intf, &addr, &msg);
}
-#ifdef CONFIG_IPMI_PANIC_STRING
/*
* On every interface, dump a bunch of OEM event holding the
* string.
*/
- if (!str)
+ if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
return;
/* For every registered interface, send the event. */
@@ -4456,7 +5044,7 @@ static void send_panic_events(char *str)
*/
if (((intf->event_receiver & 1) == 0)
&& (intf->event_receiver != 0)
- && (intf->event_receiver != intf->channels[0].address)) {
+ && (intf->event_receiver != intf->addrinfo[0].address)) {
/*
* The event receiver is valid, send an IPMB
* message.
@@ -4493,7 +5081,7 @@ static void send_panic_events(char *str)
data[0] = 0;
data[1] = 0;
data[2] = 0xf0; /* OEM event without timestamp. */
- data[3] = intf->channels[0].address;
+ data[3] = intf->addrinfo[0].address;
data[4] = j++; /* sequence # */
/*
* Always give 11 bytes, so strncpy will fill
@@ -4505,9 +5093,7 @@ static void send_panic_events(char *str)
ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
-#endif /* CONFIG_IPMI_PANIC_STRING */
}
-#endif /* CONFIG_IPMI_PANIC_EVENT */
static int has_panicked;
@@ -4545,12 +5131,12 @@ static int panic_event(struct notifier_block *this,
spin_unlock(&intf->waiting_rcv_msgs_lock);
intf->run_to_completion = 1;
- intf->handlers->set_run_to_completion(intf->send_info, 1);
+ if (intf->handlers->set_run_to_completion)
+ intf->handlers->set_run_to_completion(intf->send_info,
+ 1);
}
-#ifdef CONFIG_IPMI_PANIC_EVENT
send_panic_events(ptr);
-#endif
return NOTIFY_DONE;
}
@@ -4570,24 +5156,23 @@ static int ipmi_init_msghandler(void)
rv = driver_register(&ipmidriver.driver);
if (rv) {
- printk(KERN_ERR PFX "Could not register IPMI driver\n");
+ pr_err(PFX "Could not register IPMI driver\n");
return rv;
}
- printk(KERN_INFO "ipmi message handler version "
- IPMI_DRIVER_VERSION "\n");
+ pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n");
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
proc_ipmi_root = proc_mkdir("ipmi", NULL);
if (!proc_ipmi_root) {
- printk(KERN_ERR PFX "Unable to create IPMI proc dir");
+ pr_err(PFX "Unable to create IPMI proc dir");
driver_unregister(&ipmidriver.driver);
return -ENOMEM;
}
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_IPMI_PROC_INTERFACE */
- setup_timer(&ipmi_timer, ipmi_timeout, 0);
+ timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
@@ -4625,9 +5210,9 @@ static void __exit cleanup_ipmi(void)
atomic_inc(&stop_operation);
del_timer_sync(&ipmi_timer);
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_IPMI_PROC_INTERFACE
proc_remove(proc_ipmi_root);
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_IPMI_PROC_INTERFACE */
driver_unregister(&ipmidriver.driver);
@@ -4636,12 +5221,10 @@ static void __exit cleanup_ipmi(void)
/* Check for buffer leaks. */
count = atomic_read(&smi_msg_inuse_count);
if (count != 0)
- printk(KERN_WARNING PFX "SMI message count %d at exit\n",
- count);
+ pr_warn(PFX "SMI message count %d at exit\n", count);
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
- printk(KERN_WARNING PFX "recv message count %d at exit\n",
- count);
+ pr_warn(PFX "recv message count %d at exit\n", count);
}
module_exit(cleanup_ipmi);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index b338a4becbf8..07fddbefefe4 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -23,7 +23,6 @@
struct ipmi_smi_powernv {
u64 interface_id;
- struct ipmi_device_id ipmi_id;
ipmi_smi_t intf;
unsigned int irq;
@@ -266,8 +265,7 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
}
/* todo: query actual ipmi_device_id */
- rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi,
- &ipmi->ipmi_id, dev, 0);
+ rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
if (rc) {
dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
goto err_free_msg;
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 9f2e3be2c5b8..38e6af1c8e38 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -66,7 +66,7 @@ static void (*specific_poweroff_func)(ipmi_user_t user);
/* Holds the old poweroff function so we can restore it on removal. */
static void (*old_poweroff_func)(void);
-static int set_param_ifnum(const char *val, struct kernel_param *kp)
+static int set_param_ifnum(const char *val, const struct kernel_param *kp)
{
int rv = param_set_int(val, kp);
if (rv)
@@ -133,7 +133,7 @@ static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
complete(comp);
}
-static struct ipmi_user_hndl ipmi_poweroff_handler = {
+static const struct ipmi_user_hndl ipmi_poweroff_handler = {
.ipmi_recv_hndl = receive_handler
};
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
new file mode 100644
index 000000000000..17ce5f7b89ab
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -0,0 +1,49 @@
+/*
+ * ipmi_si.h
+ *
+ * Interface from the device-specific interfaces (OF, DMI, ACPI, PCI,
+ * etc) to the base ipmi system interface code.
+ */
+
+#include <linux/interrupt.h>
+#include "ipmi_si_sm.h"
+
+#define IPMI_IO_ADDR_SPACE 0
+#define IPMI_MEM_ADDR_SPACE 1
+
+#define DEFAULT_REGSPACING 1
+#define DEFAULT_REGSIZE 1
+
+#define DEVICE_NAME "ipmi_si"
+
+int ipmi_si_add_smi(struct si_sm_io *io);
+irqreturn_t ipmi_si_irq_handler(int irq, void *data);
+void ipmi_irq_start_cleanup(struct si_sm_io *io);
+int ipmi_std_irq_setup(struct si_sm_io *io);
+void ipmi_irq_finish_setup(struct si_sm_io *io);
+int ipmi_si_remove_by_dev(struct device *dev);
+void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+ unsigned long addr);
+int ipmi_si_hardcode_find_bmc(void);
+void ipmi_si_platform_init(void);
+void ipmi_si_platform_shutdown(void);
+
+extern struct platform_driver ipmi_platform_driver;
+
+#ifdef CONFIG_PCI
+void ipmi_si_pci_init(void);
+void ipmi_si_pci_shutdown(void);
+#else
+static inline void ipmi_si_pci_init(void) { }
+static inline void ipmi_si_pci_shutdown(void) { }
+#endif
+#ifdef CONFIG_PARISC
+void ipmi_si_parisc_init(void);
+void ipmi_si_parisc_shutdown(void);
+#else
+static inline void ipmi_si_parisc_init(void) { }
+static inline void ipmi_si_parisc_shutdown(void) { }
+#endif
+
+int ipmi_si_port_setup(struct si_sm_io *io);
+int ipmi_si_mem_setup(struct si_sm_io *io);
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
new file mode 100644
index 000000000000..fa9a4780de36
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -0,0 +1,146 @@
+
+#include <linux/moduleparam.h>
+#include "ipmi_si.h"
+
+#define PFX "ipmi_hardcode: "
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
+ */
+
+#define SI_MAX_PARMS 4
+
+static char *si_type[SI_MAX_PARMS];
+#define MAX_SI_TYPE_STR 30
+static char si_type_str[MAX_SI_TYPE_STR];
+static unsigned long addrs[SI_MAX_PARMS];
+static unsigned int num_addrs;
+static unsigned int ports[SI_MAX_PARMS];
+static unsigned int num_ports;
+static int irqs[SI_MAX_PARMS];
+static unsigned int num_irqs;
+static int regspacings[SI_MAX_PARMS];
+static unsigned int num_regspacings;
+static int regsizes[SI_MAX_PARMS];
+static unsigned int num_regsizes;
+static int regshifts[SI_MAX_PARMS];
+static unsigned int num_regshifts;
+static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
+static unsigned int num_slave_addrs;
+
+module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
+MODULE_PARM_DESC(type, "Defines the type of each interface, each"
+ " interface separated by commas. The types are 'kcs',"
+ " 'smic', and 'bt'. For example si_type=kcs,bt will set"
+ " the first interface to kcs and the second to bt");
+module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0);
+MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " is in memory. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_hw_array(ports, uint, ioport, &num_ports, 0);
+MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " is a port. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_hw_array(irqs, int, irq, &num_irqs, 0);
+MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
+ " addresses separated by commas. Only use if an interface"
+ " has an interrupt. Otherwise, set it to zero or leave"
+ " it blank.");
+module_param_hw_array(regspacings, int, other, &num_regspacings, 0);
+MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
+ " and each successive register used by the interface. For"
+ " instance, if the start address is 0xca2 and the spacing"
+ " is 2, then the second address is at 0xca4. Defaults"
+ " to 1.");
+module_param_hw_array(regsizes, int, other, &num_regsizes, 0);
+MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
+ " This should generally be 1, 2, 4, or 8 for an 8-bit,"
+ " 16-bit, 32-bit, or 64-bit register. Use this if you"
+ " the 8-bit IPMI register has to be read from a larger"
+ " register.");
+module_param_hw_array(regshifts, int, other, &num_regshifts, 0);
+MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
+ " IPMI register, in bits. For instance, if the data"
+ " is read from a 32-bit word and the IPMI data is in"
+ " bit 8-15, then the shift would be 8");
+module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0);
+MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
+ " the controller. Normally this is 0x20, but can be"
+ " overridden by this parm. This is an array indexed"
+ " by interface number.");
+
+int ipmi_si_hardcode_find_bmc(void)
+{
+ int ret = -ENODEV;
+ int i;
+ struct si_sm_io io;
+ char *str;
+
+ /* Parse out the si_type string into its components. */
+ str = si_type_str;
+ if (*str != '\0') {
+ for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
+ si_type[i] = str;
+ str = strchr(str, ',');
+ if (str) {
+ *str = '\0';
+ str++;
+ } else {
+ break;
+ }
+ }
+ }
+
+ memset(&io, 0, sizeof(io));
+ for (i = 0; i < SI_MAX_PARMS; i++) {
+ if (!ports[i] && !addrs[i])
+ continue;
+
+ io.addr_source = SI_HARDCODED;
+ pr_info(PFX "probing via hardcoded address\n");
+
+ if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
+ io.si_type = SI_KCS;
+ } else if (strcmp(si_type[i], "smic") == 0) {
+ io.si_type = SI_SMIC;
+ } else if (strcmp(si_type[i], "bt") == 0) {
+ io.si_type = SI_BT;
+ } else {
+ pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
+ i, si_type[i]);
+ continue;
+ }
+
+ if (ports[i]) {
+ /* An I/O port */
+ io.addr_data = ports[i];
+ io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else if (addrs[i]) {
+ /* A memory port */
+ io.addr_data = addrs[i];
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+ } else {
+ pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
+ i);
+ continue;
+ }
+
+ io.addr = NULL;
+ io.regspacing = regspacings[i];
+ if (!io.regspacing)
+ io.regspacing = DEFAULT_REGSPACING;
+ io.regsize = regsizes[i];
+ if (!io.regsize)
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = regshifts[i];
+ io.irq = irqs[i];
+ if (io.irq)
+ io.irq_setup = ipmi_std_irq_setup;
+ io.slave_addr = slave_addrs[i];
+
+ ret = ipmi_si_add_smi(&io);
+ }
+ return ret;
+}
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
new file mode 100644
index 000000000000..fc03b9be2f3d
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -0,0 +1,242 @@
+/*
+ * ipmi_si_hotmod.c
+ *
+ * Handling for dynamically adding/removing IPMI devices through
+ * a module parameter (and thus sysfs).
+ */
+#include <linux/moduleparam.h>
+#include <linux/ipmi.h>
+#include "ipmi_si.h"
+
+#define PFX "ipmi_hotmod: "
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp);
+
+module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
+ " Documentation/IPMI.txt in the kernel sources for the"
+ " gory details.");
+
+/*
+ * Parms come in as <op1>[:op2[:op3...]]. ops are:
+ * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
+ * Options are:
+ * rsp=<regspacing>
+ * rsi=<regsize>
+ * rsh=<regshift>
+ * irq=<irq>
+ * ipmb=<ipmb addr>
+ */
+enum hotmod_op { HM_ADD, HM_REMOVE };
+struct hotmod_vals {
+ const char *name;
+ const int val;
+};
+
+static const struct hotmod_vals hotmod_ops[] = {
+ { "add", HM_ADD },
+ { "remove", HM_REMOVE },
+ { NULL }
+};
+
+static const struct hotmod_vals hotmod_si[] = {
+ { "kcs", SI_KCS },
+ { "smic", SI_SMIC },
+ { "bt", SI_BT },
+ { NULL }
+};
+
+static const struct hotmod_vals hotmod_as[] = {
+ { "mem", IPMI_MEM_ADDR_SPACE },
+ { "i/o", IPMI_IO_ADDR_SPACE },
+ { NULL }
+};
+
+static int parse_str(const struct hotmod_vals *v, int *val, char *name,
+ char **curr)
+{
+ char *s;
+ int i;
+
+ s = strchr(*curr, ',');
+ if (!s) {
+ pr_warn(PFX "No hotmod %s given.\n", name);
+ return -EINVAL;
+ }
+ *s = '\0';
+ s++;
+ for (i = 0; v[i].name; i++) {
+ if (strcmp(*curr, v[i].name) == 0) {
+ *val = v[i].val;
+ *curr = s;
+ return 0;
+ }
+ }
+
+ pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
+ return -EINVAL;
+}
+
+static int check_hotmod_int_op(const char *curr, const char *option,
+ const char *name, int *val)
+{
+ char *n;
+
+ if (strcmp(curr, name) == 0) {
+ if (!option) {
+ pr_warn(PFX "No option given for '%s'\n", curr);
+ return -EINVAL;
+ }
+ *val = simple_strtoul(option, &n, 0);
+ if ((*n != '\0') || (*option == '\0')) {
+ pr_warn(PFX "Bad option given for '%s'\n", curr);
+ return -EINVAL;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int hotmod_handler(const char *val, const struct kernel_param *kp)
+{
+ char *str = kstrdup(val, GFP_KERNEL);
+ int rv;
+ char *next, *curr, *s, *n, *o;
+ enum hotmod_op op;
+ enum si_type si_type;
+ int addr_space;
+ unsigned long addr;
+ int regspacing;
+ int regsize;
+ int regshift;
+ int irq;
+ int ipmb;
+ int ival;
+ int len;
+
+ if (!str)
+ return -ENOMEM;
+
+ /* Kill any trailing spaces, as we can get a "\n" from echo. */
+ len = strlen(str);
+ ival = len - 1;
+ while ((ival >= 0) && isspace(str[ival])) {
+ str[ival] = '\0';
+ ival--;
+ }
+
+ for (curr = str; curr; curr = next) {
+ regspacing = 1;
+ regsize = 1;
+ regshift = 0;
+ irq = 0;
+ ipmb = 0; /* Choose the default if not specified */
+
+ next = strchr(curr, ':');
+ if (next) {
+ *next = '\0';
+ next++;
+ }
+
+ rv = parse_str(hotmod_ops, &ival, "operation", &curr);
+ if (rv)
+ break;
+ op = ival;
+
+ rv = parse_str(hotmod_si, &ival, "interface type", &curr);
+ if (rv)
+ break;
+ si_type = ival;
+
+ rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
+ if (rv)
+ break;
+
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ addr = simple_strtoul(curr, &n, 0);
+ if ((*n != '\0') || (*curr == '\0')) {
+ pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
+ break;
+ }
+
+ while (s) {
+ curr = s;
+ s = strchr(curr, ',');
+ if (s) {
+ *s = '\0';
+ s++;
+ }
+ o = strchr(curr, '=');
+ if (o) {
+ *o = '\0';
+ o++;
+ }
+ rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "irq", &irq);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+ rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
+ if (rv < 0)
+ goto out;
+ else if (rv)
+ continue;
+
+ rv = -EINVAL;
+ pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
+ goto out;
+ }
+
+ if (op == HM_ADD) {
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_HOTMOD;
+ io.si_type = si_type;
+ io.addr_data = addr;
+ io.addr_type = addr_space;
+
+ io.addr = NULL;
+ io.regspacing = regspacing;
+ if (!io.regspacing)
+ io.regspacing = DEFAULT_REGSPACING;
+ io.regsize = regsize;
+ if (!io.regsize)
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = regshift;
+ io.irq = irq;
+ if (io.irq)
+ io.irq_setup = ipmi_std_irq_setup;
+ io.slave_addr = ipmb;
+
+ rv = ipmi_si_add_smi(&io);
+ if (rv)
+ goto out;
+ } else {
+ ipmi_si_remove_by_data(addr_space, si_type, addr);
+ }
+ }
+ rv = len;
+out:
+ kfree(str);
+ return rv;
+}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 36f47e8d06a3..779869ed32b1 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -49,8 +49,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/ioport.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
@@ -59,22 +57,9 @@
#include <linux/rcupdate.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
-#include <asm/io.h>
-#include "ipmi_si_sm.h"
-#include "ipmi_dmi.h"
-#include <linux/dmi.h>
+#include "ipmi_si.h"
#include <linux/string.h>
#include <linux/ctype.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/acpi.h>
-
-#ifdef CONFIG_PARISC
-#include <asm/hardware.h> /* for register_parisc_driver() stuff */
-#include <asm/parisc-device.h>
-#endif
#define PFX "ipmi_si: "
@@ -104,15 +89,9 @@ enum si_intf_state {
#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
-enum si_type {
- SI_KCS, SI_SMIC, SI_BT
-};
-
-static const char * const si_to_str[] = { "kcs", "smic", "bt" };
+static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
-#define DEVICE_NAME "ipmi_si"
-
-static struct platform_driver ipmi_driver;
+static int initialized;
/*
* Indexes into stats[] in smi_info below.
@@ -167,7 +146,6 @@ struct smi_info {
ipmi_smi_t intf;
struct si_sm_data *si_sm;
const struct si_sm_handlers *handlers;
- enum si_type si_type;
spinlock_t si_lock;
struct ipmi_smi_msg *waiting_msg;
struct ipmi_smi_msg *curr_msg;
@@ -178,14 +156,6 @@ struct smi_info {
* IPMI
*/
struct si_sm_io io;
- int (*io_setup)(struct smi_info *info);
- void (*io_cleanup)(struct smi_info *info);
- int (*irq_setup)(struct smi_info *info);
- void (*irq_cleanup)(struct smi_info *info);
- unsigned int io_size;
- enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
- void (*addr_source_cleanup)(struct smi_info *info);
- void *addr_source_data;
/*
* Per-OEM handler, called from handle_flags(). Returns 1
@@ -226,19 +196,6 @@ struct smi_info {
*/
bool run_to_completion;
- /* The I/O port of an SI interface. */
- int port;
-
- /*
- * The space between start addresses of the two ports. For
- * instance, if the first port is 0xca2 and the spacing is 4, then
- * the second port is 0xca6.
- */
- unsigned int spacing;
-
- /* zero if no irq; */
- int irq;
-
/* The timer for this si. */
struct timer_list si_timer;
@@ -289,26 +246,15 @@ struct smi_info {
/* From the get device id response... */
struct ipmi_device_id device_id;
- /* Driver model stuff. */
- struct device *dev;
+ /* Default driver model device. */
struct platform_device *pdev;
- /*
- * True if we allocated the device, false if it came from
- * someplace else (like PCI).
- */
- bool dev_registered;
-
- /* Slave address, could be reported from DMI. */
- unsigned char slave_addr;
-
/* Counters and things for the proc filesystem. */
atomic_t stats[SI_NUM_STATS];
struct task_struct *thread;
struct list_head link;
- union ipmi_smi_info_union addr_info;
};
#define smi_inc_stat(smi, stat) \
@@ -316,23 +262,15 @@ struct smi_info {
#define smi_get_stat(smi, stat) \
((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
-#define SI_MAX_PARMS 4
-
-static int force_kipmid[SI_MAX_PARMS];
+#define IPMI_MAX_INTFS 4
+static int force_kipmid[IPMI_MAX_INTFS];
static int num_force_kipmid;
-#ifdef CONFIG_PCI
-static bool pci_registered;
-#endif
-#ifdef CONFIG_PARISC
-static bool parisc_registered;
-#endif
-static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
+static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
static int num_max_busy_us;
static bool unload_when_empty = true;
-static int add_smi(struct smi_info *smi);
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *to_clean);
static void cleanup_ipmi_si(void);
@@ -499,7 +437,7 @@ static void start_getting_events(struct smi_info *smi_info)
*/
static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
{
- if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = true;
start_check_enables(smi_info, start_timer);
return true;
@@ -509,7 +447,7 @@ static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
static inline bool enable_si_irq(struct smi_info *smi_info)
{
- if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
+ if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = false;
start_check_enables(smi_info, true);
return true;
@@ -585,13 +523,13 @@ static u8 current_global_enables(struct smi_info *smi_info, u8 base,
if (smi_info->supports_event_msg_buff)
enables |= IPMI_BMC_EVT_MSG_BUFF;
- if (((smi_info->irq && !smi_info->interrupt_disabled) ||
+ if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
smi_info->cannot_disable_irq) &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_RCV_MSG_INTR;
if (smi_info->supports_event_msg_buff &&
- smi_info->irq && !smi_info->interrupt_disabled &&
+ smi_info->io.irq && !smi_info->interrupt_disabled &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_EVT_MSG_INTR;
@@ -673,7 +611,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
smi_info->si_state = SI_NORMAL;
@@ -765,15 +703,15 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Couldn't get irq info: %x.\n", msg[2]);
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Maybe ok, but ipmi might run very slowly.\n");
smi_info->si_state = SI_NORMAL;
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
- if (smi_info->si_type == SI_BT)
+ if (smi_info->io.si_type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
@@ -803,7 +741,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0)
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Could not set the global enables: 0x%x.\n",
msg[2]);
@@ -927,7 +865,7 @@ restart:
* asynchronously reset, and may thus get interrupts
* disable and messages disabled.
*/
- if (smi_info->supports_event_msg_buff || smi_info->irq) {
+ if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
start_check_enables(smi_info, true);
} else {
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
@@ -1153,11 +1091,9 @@ static void set_need_watch(void *send_info, bool enable)
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
-static int initialized;
-
-static void smi_timeout(unsigned long data)
+static void smi_timeout(struct timer_list *t)
{
- struct smi_info *smi_info = (struct smi_info *) data;
+ struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
@@ -1172,7 +1108,7 @@ static void smi_timeout(unsigned long data)
* SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff);
- if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
@@ -1199,11 +1135,17 @@ do_mod_timer:
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
-static irqreturn_t si_irq_handler(int irq, void *data)
+irqreturn_t ipmi_si_irq_handler(int irq, void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
+ if (smi_info->io.si_type == SI_BT)
+ /* We need to clear the IRQ flag for the BT interface. */
+ smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_CLEAR_IRQ_BIT
+ | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
+
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_inc_stat(smi_info, interrupts);
@@ -1215,16 +1157,6 @@ static irqreturn_t si_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t si_bt_irq_handler(int irq, void *data)
-{
- struct smi_info *smi_info = data;
- /* We need to clear the IRQ flag for the BT interface. */
- smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
- IPMI_BT_INTMASK_CLEAR_IRQ_BIT
- | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
- return si_irq_handler(irq, data);
-}
-
static int smi_start_processing(void *send_info,
ipmi_smi_t intf)
{
@@ -1234,12 +1166,14 @@ static int smi_start_processing(void *send_info,
new_smi->intf = intf;
/* Set up the timer that drives the interface. */
- setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ timer_setup(&new_smi->si_timer, smi_timeout, 0);
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
- if (new_smi->irq_setup)
- new_smi->irq_setup(new_smi);
+ if (new_smi->io.irq_setup) {
+ new_smi->io.irq_handler_data = new_smi;
+ new_smi->io.irq_setup(&new_smi->io);
+ }
/*
* Check if the user forcefully enabled the daemon.
@@ -1250,14 +1184,14 @@ static int smi_start_processing(void *send_info,
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
- else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
+ else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
enable = 1;
if (enable) {
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->intf_num);
if (IS_ERR(new_smi->thread)) {
- dev_notice(new_smi->dev, "Could not start"
+ dev_notice(new_smi->io.dev, "Could not start"
" kernel thread due to error %ld, only using"
" timers to drive the interface\n",
PTR_ERR(new_smi->thread));
@@ -1272,10 +1206,10 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
{
struct smi_info *smi = send_info;
- data->addr_src = smi->addr_source;
- data->dev = smi->dev;
- data->addr_info = smi->addr_info;
- get_device(smi->dev);
+ data->addr_src = smi->io.addr_source;
+ data->dev = smi->io.dev;
+ data->addr_info = smi->io.addr_info;
+ get_device(smi->io.dev);
return 0;
}
@@ -1301,118 +1235,12 @@ static const struct ipmi_smi_handlers handlers = {
.poll = poll,
};
-/*
- * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
- * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
- */
-
static LIST_HEAD(smi_infos);
static DEFINE_MUTEX(smi_infos_lock);
static int smi_num; /* Used to sequence the SMIs */
-#define DEFAULT_REGSPACING 1
-#define DEFAULT_REGSIZE 1
-
-#ifdef CONFIG_ACPI
-static bool si_tryacpi = true;
-#endif
-#ifdef CONFIG_DMI
-static bool si_trydmi = true;
-#endif
-static bool si_tryplatform = true;
-#ifdef CONFIG_PCI
-static bool si_trypci = true;
-#endif
-static char *si_type[SI_MAX_PARMS];
-#define MAX_SI_TYPE_STR 30
-static char si_type_str[MAX_SI_TYPE_STR];
-static unsigned long addrs[SI_MAX_PARMS];
-static unsigned int num_addrs;
-static unsigned int ports[SI_MAX_PARMS];
-static unsigned int num_ports;
-static int irqs[SI_MAX_PARMS];
-static unsigned int num_irqs;
-static int regspacings[SI_MAX_PARMS];
-static unsigned int num_regspacings;
-static int regsizes[SI_MAX_PARMS];
-static unsigned int num_regsizes;
-static int regshifts[SI_MAX_PARMS];
-static unsigned int num_regshifts;
-static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
-static unsigned int num_slave_addrs;
-
-#define IPMI_IO_ADDR_SPACE 0
-#define IPMI_MEM_ADDR_SPACE 1
static const char * const addr_space_to_str[] = { "i/o", "mem" };
-static int hotmod_handler(const char *val, struct kernel_param *kp);
-
-module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
-MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
- " Documentation/IPMI.txt in the kernel sources for the"
- " gory details.");
-
-#ifdef CONFIG_ACPI
-module_param_named(tryacpi, si_tryacpi, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
- " default scan of the interfaces identified via ACPI");
-#endif
-#ifdef CONFIG_DMI
-module_param_named(trydmi, si_trydmi, bool, 0);
-MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
- " default scan of the interfaces identified via DMI");
-#endif
-module_param_named(tryplatform, si_tryplatform, bool, 0);
-MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the"
- " default scan of the interfaces identified via platform"
- " interfaces like openfirmware");
-#ifdef CONFIG_PCI
-module_param_named(trypci, si_trypci, bool, 0);
-MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
- " default scan of the interfaces identified via pci");
-#endif
-module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
-MODULE_PARM_DESC(type, "Defines the type of each interface, each"
- " interface separated by commas. The types are 'kcs',"
- " 'smic', and 'bt'. For example si_type=kcs,bt will set"
- " the first interface to kcs and the second to bt");
-module_param_hw_array(addrs, ulong, iomem, &num_addrs, 0);
-MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
- " addresses separated by commas. Only use if an interface"
- " is in memory. Otherwise, set it to zero or leave"
- " it blank.");
-module_param_hw_array(ports, uint, ioport, &num_ports, 0);
-MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
- " addresses separated by commas. Only use if an interface"
- " is a port. Otherwise, set it to zero or leave"
- " it blank.");
-module_param_hw_array(irqs, int, irq, &num_irqs, 0);
-MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
- " addresses separated by commas. Only use if an interface"
- " has an interrupt. Otherwise, set it to zero or leave"
- " it blank.");
-module_param_hw_array(regspacings, int, other, &num_regspacings, 0);
-MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
- " and each successive register used by the interface. For"
- " instance, if the start address is 0xca2 and the spacing"
- " is 2, then the second address is at 0xca4. Defaults"
- " to 1.");
-module_param_hw_array(regsizes, int, other, &num_regsizes, 0);
-MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
- " This should generally be 1, 2, 4, or 8 for an 8-bit,"
- " 16-bit, 32-bit, or 64-bit register. Use this if you"
- " the 8-bit IPMI register has to be read from a larger"
- " register.");
-module_param_hw_array(regshifts, int, other, &num_regshifts, 0);
-MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
- " IPMI register, in bits. For instance, if the data"
- " is read from a 32-bit word and the IPMI data is in"
- " bit 8-15, then the shift would be 8");
-module_param_hw_array(slave_addrs, int, other, &num_slave_addrs, 0);
-MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
- " the controller. Normally this is 0x20, but can be"
- " overridden by this parm. This is an array indexed"
- " by interface number.");
module_param_array(force_kipmid, int, &num_force_kipmid, 0);
MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
" disabled(0). Normally the IPMI driver auto-detects"
@@ -1427,1450 +1255,53 @@ MODULE_PARM_DESC(kipmid_max_busy_us,
" sleeping. 0 (default) means to wait forever. Set to 100-500"
" if kipmid is using up a lot of CPU time.");
-
-static void std_irq_cleanup(struct smi_info *info)
-{
- if (info->si_type == SI_BT)
- /* Disable the interrupt in the BT interface. */
- info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
- free_irq(info->irq, info);
-}
-
-static int std_irq_setup(struct smi_info *info)
-{
- int rv;
-
- if (!info->irq)
- return 0;
-
- if (info->si_type == SI_BT) {
- rv = request_irq(info->irq,
- si_bt_irq_handler,
- IRQF_SHARED,
- DEVICE_NAME,
- info);
- if (!rv)
- /* Enable the interrupt in the BT interface. */
- info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
- IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
- } else
- rv = request_irq(info->irq,
- si_irq_handler,
- IRQF_SHARED,
- DEVICE_NAME,
- info);
- if (rv) {
- dev_warn(info->dev, "%s unable to claim interrupt %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
- info->irq = 0;
- } else {
- info->irq_cleanup = std_irq_cleanup;
- dev_info(info->dev, "Using irq %d\n", info->irq);
- }
-
- return rv;
-}
-
-static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
-{
- unsigned int addr = io->addr_data;
-
- return inb(addr + (offset * io->regspacing));
-}
-
-static void port_outb(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- unsigned int addr = io->addr_data;
-
- outb(b, addr + (offset * io->regspacing));
-}
-
-static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
-{
- unsigned int addr = io->addr_data;
-
- return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
-}
-
-static void port_outw(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- unsigned int addr = io->addr_data;
-
- outw(b << io->regshift, addr + (offset * io->regspacing));
-}
-
-static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
-{
- unsigned int addr = io->addr_data;
-
- return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
-}
-
-static void port_outl(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- unsigned int addr = io->addr_data;
-
- outl(b << io->regshift, addr+(offset * io->regspacing));
-}
-
-static void port_cleanup(struct smi_info *info)
-{
- unsigned int addr = info->io.addr_data;
- int idx;
-
- if (addr) {
- for (idx = 0; idx < info->io_size; idx++)
- release_region(addr + idx * info->io.regspacing,
- info->io.regsize);
- }
-}
-
-static int port_setup(struct smi_info *info)
-{
- unsigned int addr = info->io.addr_data;
- int idx;
-
- if (!addr)
- return -ENODEV;
-
- info->io_cleanup = port_cleanup;
-
- /*
- * Figure out the actual inb/inw/inl/etc routine to use based
- * upon the register size.
- */
- switch (info->io.regsize) {
- case 1:
- info->io.inputb = port_inb;
- info->io.outputb = port_outb;
- break;
- case 2:
- info->io.inputb = port_inw;
- info->io.outputb = port_outw;
- break;
- case 4:
- info->io.inputb = port_inl;
- info->io.outputb = port_outl;
- break;
- default:
- dev_warn(info->dev, "Invalid register size: %d\n",
- info->io.regsize);
- return -EINVAL;
- }
-
- /*
- * Some BIOSes reserve disjoint I/O regions in their ACPI
- * tables. This causes problems when trying to register the
- * entire I/O region. Therefore we must register each I/O
- * port separately.
- */
- for (idx = 0; idx < info->io_size; idx++) {
- if (request_region(addr + idx * info->io.regspacing,
- info->io.regsize, DEVICE_NAME) == NULL) {
- /* Undo allocations */
- while (idx--)
- release_region(addr + idx * info->io.regspacing,
- info->io.regsize);
- return -EIO;
- }
- }
- return 0;
-}
-
-static unsigned char intf_mem_inb(const struct si_sm_io *io,
- unsigned int offset)
-{
- return readb((io->addr)+(offset * io->regspacing));
-}
-
-static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- writeb(b, (io->addr)+(offset * io->regspacing));
-}
-
-static unsigned char intf_mem_inw(const struct si_sm_io *io,
- unsigned int offset)
-{
- return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
- & 0xff;
-}
-
-static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
-}
-
-static unsigned char intf_mem_inl(const struct si_sm_io *io,
- unsigned int offset)
-{
- return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
- & 0xff;
-}
-
-static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
-}
-
-#ifdef readq
-static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
-{
- return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
- & 0xff;
-}
-
-static void mem_outq(const struct si_sm_io *io, unsigned int offset,
- unsigned char b)
-{
- writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
-}
-#endif
-
-static void mem_region_cleanup(struct smi_info *info, int num)
-{
- unsigned long addr = info->io.addr_data;
- int idx;
-
- for (idx = 0; idx < num; idx++)
- release_mem_region(addr + idx * info->io.regspacing,
- info->io.regsize);
-}
-
-static void mem_cleanup(struct smi_info *info)
-{
- if (info->io.addr) {
- iounmap(info->io.addr);
- mem_region_cleanup(info, info->io_size);
- }
-}
-
-static int mem_setup(struct smi_info *info)
-{
- unsigned long addr = info->io.addr_data;
- int mapsize, idx;
-
- if (!addr)
- return -ENODEV;
-
- info->io_cleanup = mem_cleanup;
-
- /*
- * Figure out the actual readb/readw/readl/etc routine to use based
- * upon the register size.
- */
- switch (info->io.regsize) {
- case 1:
- info->io.inputb = intf_mem_inb;
- info->io.outputb = intf_mem_outb;
- break;
- case 2:
- info->io.inputb = intf_mem_inw;
- info->io.outputb = intf_mem_outw;
- break;
- case 4:
- info->io.inputb = intf_mem_inl;
- info->io.outputb = intf_mem_outl;
- break;
-#ifdef readq
- case 8:
- info->io.inputb = mem_inq;
- info->io.outputb = mem_outq;
- break;
-#endif
- default:
- dev_warn(info->dev, "Invalid register size: %d\n",
- info->io.regsize);
- return -EINVAL;
- }
-
- /*
- * Some BIOSes reserve disjoint memory regions in their ACPI
- * tables. This causes problems when trying to request the
- * entire region. Therefore we must request each register
- * separately.
- */
- for (idx = 0; idx < info->io_size; idx++) {
- if (request_mem_region(addr + idx * info->io.regspacing,
- info->io.regsize, DEVICE_NAME) == NULL) {
- /* Undo allocations */
- mem_region_cleanup(info, idx);
- return -EIO;
- }
- }
-
- /*
- * Calculate the total amount of memory to claim. This is an
- * unusual looking calculation, but it avoids claiming any
- * more memory than it has to. It will claim everything
- * between the first address to the end of the last full
- * register.
- */
- mapsize = ((info->io_size * info->io.regspacing)
- - (info->io.regspacing - info->io.regsize));
- info->io.addr = ioremap(addr, mapsize);
- if (info->io.addr == NULL) {
- mem_region_cleanup(info, info->io_size);
- return -EIO;
- }
- return 0;
-}
-
-/*
- * Parms come in as <op1>[:op2[:op3...]]. ops are:
- * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
- * Options are:
- * rsp=<regspacing>
- * rsi=<regsize>
- * rsh=<regshift>
- * irq=<irq>
- * ipmb=<ipmb addr>
- */
-enum hotmod_op { HM_ADD, HM_REMOVE };
-struct hotmod_vals {
- const char *name;
- const int val;
-};
-
-static const struct hotmod_vals hotmod_ops[] = {
- { "add", HM_ADD },
- { "remove", HM_REMOVE },
- { NULL }
-};
-
-static const struct hotmod_vals hotmod_si[] = {
- { "kcs", SI_KCS },
- { "smic", SI_SMIC },
- { "bt", SI_BT },
- { NULL }
-};
-
-static const struct hotmod_vals hotmod_as[] = {
- { "mem", IPMI_MEM_ADDR_SPACE },
- { "i/o", IPMI_IO_ADDR_SPACE },
- { NULL }
-};
-
-static int parse_str(const struct hotmod_vals *v, int *val, char *name,
- char **curr)
-{
- char *s;
- int i;
-
- s = strchr(*curr, ',');
- if (!s) {
- pr_warn(PFX "No hotmod %s given.\n", name);
- return -EINVAL;
- }
- *s = '\0';
- s++;
- for (i = 0; v[i].name; i++) {
- if (strcmp(*curr, v[i].name) == 0) {
- *val = v[i].val;
- *curr = s;
- return 0;
- }
- }
-
- pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
- return -EINVAL;
-}
-
-static int check_hotmod_int_op(const char *curr, const char *option,
- const char *name, int *val)
-{
- char *n;
-
- if (strcmp(curr, name) == 0) {
- if (!option) {
- pr_warn(PFX "No option given for '%s'\n", curr);
- return -EINVAL;
- }
- *val = simple_strtoul(option, &n, 0);
- if ((*n != '\0') || (*option == '\0')) {
- pr_warn(PFX "Bad option given for '%s'\n", curr);
- return -EINVAL;
- }
- return 1;
- }
- return 0;
-}
-
-static struct smi_info *smi_info_alloc(void)
-{
- struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
-
- if (info)
- spin_lock_init(&info->si_lock);
- return info;
-}
-
-static int hotmod_handler(const char *val, struct kernel_param *kp)
-{
- char *str = kstrdup(val, GFP_KERNEL);
- int rv;
- char *next, *curr, *s, *n, *o;
- enum hotmod_op op;
- enum si_type si_type;
- int addr_space;
- unsigned long addr;
- int regspacing;
- int regsize;
- int regshift;
- int irq;
- int ipmb;
- int ival;
- int len;
- struct smi_info *info;
-
- if (!str)
- return -ENOMEM;
-
- /* Kill any trailing spaces, as we can get a "\n" from echo. */
- len = strlen(str);
- ival = len - 1;
- while ((ival >= 0) && isspace(str[ival])) {
- str[ival] = '\0';
- ival--;
- }
-
- for (curr = str; curr; curr = next) {
- regspacing = 1;
- regsize = 1;
- regshift = 0;
- irq = 0;
- ipmb = 0; /* Choose the default if not specified */
-
- next = strchr(curr, ':');
- if (next) {
- *next = '\0';
- next++;
- }
-
- rv = parse_str(hotmod_ops, &ival, "operation", &curr);
- if (rv)
- break;
- op = ival;
-
- rv = parse_str(hotmod_si, &ival, "interface type", &curr);
- if (rv)
- break;
- si_type = ival;
-
- rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
- if (rv)
- break;
-
- s = strchr(curr, ',');
- if (s) {
- *s = '\0';
- s++;
- }
- addr = simple_strtoul(curr, &n, 0);
- if ((*n != '\0') || (*curr == '\0')) {
- pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
- break;
- }
-
- while (s) {
- curr = s;
- s = strchr(curr, ',');
- if (s) {
- *s = '\0';
- s++;
- }
- o = strchr(curr, '=');
- if (o) {
- *o = '\0';
- o++;
- }
- rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
- rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
- rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
- rv = check_hotmod_int_op(curr, o, "irq", &irq);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
- rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
- if (rv < 0)
- goto out;
- else if (rv)
- continue;
-
- rv = -EINVAL;
- pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
- goto out;
- }
-
- if (op == HM_ADD) {
- info = smi_info_alloc();
- if (!info) {
- rv = -ENOMEM;
- goto out;
- }
-
- info->addr_source = SI_HOTMOD;
- info->si_type = si_type;
- info->io.addr_data = addr;
- info->io.addr_type = addr_space;
- if (addr_space == IPMI_MEM_ADDR_SPACE)
- info->io_setup = mem_setup;
- else
- info->io_setup = port_setup;
-
- info->io.addr = NULL;
- info->io.regspacing = regspacing;
- if (!info->io.regspacing)
- info->io.regspacing = DEFAULT_REGSPACING;
- info->io.regsize = regsize;
- if (!info->io.regsize)
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = regshift;
- info->irq = irq;
- if (info->irq)
- info->irq_setup = std_irq_setup;
- info->slave_addr = ipmb;
-
- rv = add_smi(info);
- if (rv) {
- kfree(info);
- goto out;
- }
- mutex_lock(&smi_infos_lock);
- rv = try_smi_init(info);
- mutex_unlock(&smi_infos_lock);
- if (rv) {
- cleanup_one_si(info);
- goto out;
- }
- } else {
- /* remove */
- struct smi_info *e, *tmp_e;
-
- mutex_lock(&smi_infos_lock);
- list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
- if (e->io.addr_type != addr_space)
- continue;
- if (e->si_type != si_type)
- continue;
- if (e->io.addr_data == addr)
- cleanup_one_si(e);
- }
- mutex_unlock(&smi_infos_lock);
- }
- }
- rv = len;
-out:
- kfree(str);
- return rv;
-}
-
-static int hardcode_find_bmc(void)
-{
- int ret = -ENODEV;
- int i;
- struct smi_info *info;
-
- for (i = 0; i < SI_MAX_PARMS; i++) {
- if (!ports[i] && !addrs[i])
- continue;
-
- info = smi_info_alloc();
- if (!info)
- return -ENOMEM;
-
- info->addr_source = SI_HARDCODED;
- pr_info(PFX "probing via hardcoded address\n");
-
- if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
- info->si_type = SI_KCS;
- } else if (strcmp(si_type[i], "smic") == 0) {
- info->si_type = SI_SMIC;
- } else if (strcmp(si_type[i], "bt") == 0) {
- info->si_type = SI_BT;
- } else {
- pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
- i, si_type[i]);
- kfree(info);
- continue;
- }
-
- if (ports[i]) {
- /* An I/O port */
- info->io_setup = port_setup;
- info->io.addr_data = ports[i];
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else if (addrs[i]) {
- /* A memory port */
- info->io_setup = mem_setup;
- info->io.addr_data = addrs[i];
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- } else {
- pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
- i);
- kfree(info);
- continue;
- }
-
- info->io.addr = NULL;
- info->io.regspacing = regspacings[i];
- if (!info->io.regspacing)
- info->io.regspacing = DEFAULT_REGSPACING;
- info->io.regsize = regsizes[i];
- if (!info->io.regsize)
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = regshifts[i];
- info->irq = irqs[i];
- if (info->irq)
- info->irq_setup = std_irq_setup;
- info->slave_addr = slave_addrs[i];
-
- if (!add_smi(info)) {
- mutex_lock(&smi_infos_lock);
- if (try_smi_init(info))
- cleanup_one_si(info);
- mutex_unlock(&smi_infos_lock);
- ret = 0;
- } else {
- kfree(info);
- }
- }
- return ret;
-}
-
-#ifdef CONFIG_ACPI
-
-/*
- * Once we get an ACPI failure, we don't try any more, because we go
- * through the tables sequentially. Once we don't find a table, there
- * are no more.
- */
-static int acpi_failure;
-
-/* For GPE-type interrupts. */
-static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
- u32 gpe_number, void *context)
-{
- struct smi_info *smi_info = context;
- unsigned long flags;
-
- spin_lock_irqsave(&(smi_info->si_lock), flags);
-
- smi_inc_stat(smi_info, interrupts);
-
- debug_timestamp("ACPI_GPE");
-
- smi_event_handler(smi_info, 0);
- spin_unlock_irqrestore(&(smi_info->si_lock), flags);
-
- return ACPI_INTERRUPT_HANDLED;
-}
-
-static void acpi_gpe_irq_cleanup(struct smi_info *info)
-{
- if (!info->irq)
- return;
-
- acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
-}
-
-static int acpi_gpe_irq_setup(struct smi_info *info)
-{
- acpi_status status;
-
- if (!info->irq)
- return 0;
-
- status = acpi_install_gpe_handler(NULL,
- info->irq,
- ACPI_GPE_LEVEL_TRIGGERED,
- &ipmi_acpi_gpe,
- info);
- if (status != AE_OK) {
- dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
- " running polled\n", DEVICE_NAME, info->irq);
- info->irq = 0;
- return -EINVAL;
- } else {
- info->irq_cleanup = acpi_gpe_irq_cleanup;
- dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
- return 0;
- }
-}
-
-/*
- * Defined at
- * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
- */
-struct SPMITable {
- s8 Signature[4];
- u32 Length;
- u8 Revision;
- u8 Checksum;
- s8 OEMID[6];
- s8 OEMTableID[8];
- s8 OEMRevision[4];
- s8 CreatorID[4];
- s8 CreatorRevision[4];
- u8 InterfaceType;
- u8 IPMIlegacy;
- s16 SpecificationRevision;
-
- /*
- * Bit 0 - SCI interrupt supported
- * Bit 1 - I/O APIC/SAPIC
- */
- u8 InterruptType;
-
- /*
- * If bit 0 of InterruptType is set, then this is the SCI
- * interrupt in the GPEx_STS register.
- */
- u8 GPE;
-
- s16 Reserved;
-
- /*
- * If bit 1 of InterruptType is set, then this is the I/O
- * APIC/SAPIC interrupt.
- */
- u32 GlobalSystemInterrupt;
-
- /* The actual register address. */
- struct acpi_generic_address addr;
-
- u8 UID[4];
-
- s8 spmi_id[1]; /* A '\0' terminated array starts here. */
-};
-
-static int try_init_spmi(struct SPMITable *spmi)
-{
- struct smi_info *info;
- int rv;
-
- if (spmi->IPMIlegacy != 1) {
- pr_info(PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
- return -ENODEV;
- }
-
- info = smi_info_alloc();
- if (!info) {
- pr_err(PFX "Could not allocate SI data (3)\n");
- return -ENOMEM;
- }
-
- info->addr_source = SI_SPMI;
- pr_info(PFX "probing via SPMI\n");
-
- /* Figure out the interface type. */
- switch (spmi->InterfaceType) {
- case 1: /* KCS */
- info->si_type = SI_KCS;
- break;
- case 2: /* SMIC */
- info->si_type = SI_SMIC;
- break;
- case 3: /* BT */
- info->si_type = SI_BT;
- break;
- case 4: /* SSIF, just ignore */
- kfree(info);
- return -EIO;
- default:
- pr_info(PFX "Unknown ACPI/SPMI SI type %d\n",
- spmi->InterfaceType);
- kfree(info);
- return -EIO;
- }
-
- if (spmi->InterruptType & 1) {
- /* We've got a GPE interrupt. */
- info->irq = spmi->GPE;
- info->irq_setup = acpi_gpe_irq_setup;
- } else if (spmi->InterruptType & 2) {
- /* We've got an APIC/SAPIC interrupt. */
- info->irq = spmi->GlobalSystemInterrupt;
- info->irq_setup = std_irq_setup;
- } else {
- /* Use the default interrupt setting. */
- info->irq = 0;
- info->irq_setup = NULL;
- }
-
- if (spmi->addr.bit_width) {
- /* A (hopefully) properly formed register bit width. */
- info->io.regspacing = spmi->addr.bit_width / 8;
- } else {
- info->io.regspacing = DEFAULT_REGSPACING;
- }
- info->io.regsize = info->io.regspacing;
- info->io.regshift = spmi->addr.bit_offset;
-
- if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- info->io_setup = port_setup;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else {
- kfree(info);
- pr_warn(PFX "Unknown ACPI I/O Address type\n");
- return -EIO;
- }
- info->io.addr_data = spmi->addr.address;
-
- pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
-
- rv = add_smi(info);
- if (rv)
- kfree(info);
-
- return rv;
-}
-
-static void spmi_find_bmc(void)
-{
- acpi_status status;
- struct SPMITable *spmi;
- int i;
-
- if (acpi_disabled)
- return;
-
- if (acpi_failure)
- return;
-
- for (i = 0; ; i++) {
- status = acpi_get_table(ACPI_SIG_SPMI, i+1,
- (struct acpi_table_header **)&spmi);
- if (status != AE_OK)
- return;
-
- try_init_spmi(spmi);
- }
-}
-#endif
-
-#if defined(CONFIG_DMI) || defined(CONFIG_ACPI)
-struct resource *ipmi_get_info_from_resources(struct platform_device *pdev,
- struct smi_info *info)
-{
- struct resource *res, *res_second;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (res) {
- info->io_setup = port_setup;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res) {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- }
- }
- if (!res) {
- dev_err(&pdev->dev, "no I/O or memory address\n");
- return NULL;
- }
- info->io.addr_data = res->start;
-
- info->io.regspacing = DEFAULT_REGSPACING;
- res_second = platform_get_resource(pdev,
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
- IORESOURCE_IO : IORESOURCE_MEM,
- 1);
- if (res_second) {
- if (res_second->start > info->io.addr_data)
- info->io.regspacing =
- res_second->start - info->io.addr_data;
- }
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = 0;
-
- return res;
-}
-
-#endif
-
-#ifdef CONFIG_DMI
-static int dmi_ipmi_probe(struct platform_device *pdev)
-{
- struct smi_info *info;
- u8 type, slave_addr;
- int rv;
-
- if (!si_trydmi)
- return -ENODEV;
-
- rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
- if (rv)
- return -ENODEV;
-
- info = smi_info_alloc();
- if (!info) {
- pr_err(PFX "Could not allocate SI data\n");
- return -ENOMEM;
- }
-
- info->addr_source = SI_SMBIOS;
- pr_info(PFX "probing via SMBIOS\n");
-
- switch (type) {
- case IPMI_DMI_TYPE_KCS:
- info->si_type = SI_KCS;
- break;
- case IPMI_DMI_TYPE_SMIC:
- info->si_type = SI_SMIC;
- break;
- case IPMI_DMI_TYPE_BT:
- info->si_type = SI_BT;
- break;
- default:
- kfree(info);
- return -EINVAL;
- }
-
- if (!ipmi_get_info_from_resources(pdev, info)) {
- rv = -EINVAL;
- goto err_free;
- }
-
- rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
- if (rv) {
- dev_warn(&pdev->dev, "device has no slave-addr property");
- info->slave_addr = 0x20;
- } else {
- info->slave_addr = slave_addr;
- }
-
- info->irq = platform_get_irq(pdev, 0);
- if (info->irq > 0)
- info->irq_setup = std_irq_setup;
- else
- info->irq = 0;
-
- info->dev = &pdev->dev;
-
- pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
- (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
-
- if (add_smi(info))
- kfree(info);
-
- return 0;
-
-err_free:
- kfree(info);
- return rv;
-}
-#else
-static int dmi_ipmi_probe(struct platform_device *pdev)
+void ipmi_irq_finish_setup(struct si_sm_io *io)
{
- return -ENODEV;
+ if (io->si_type == SI_BT)
+ /* Enable the interrupt in the BT interface. */
+ io->outputb(io, IPMI_BT_INTMASK_REG,
+ IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
}
-#endif /* CONFIG_DMI */
-
-#ifdef CONFIG_PCI
-#define PCI_ERMC_CLASSCODE 0x0C0700
-#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
-#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
-#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
-#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
-#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
-
-#define PCI_HP_VENDOR_ID 0x103C
-#define PCI_MMC_DEVICE_ID 0x121A
-#define PCI_MMC_ADDR_CW 0x10
-
-static void ipmi_pci_cleanup(struct smi_info *info)
+void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
- struct pci_dev *pdev = info->addr_source_data;
-
- pci_disable_device(pdev);
+ if (io->si_type == SI_BT)
+ /* Disable the interrupt in the BT interface. */
+ io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
-static int ipmi_pci_probe_regspacing(struct smi_info *info)
+static void std_irq_cleanup(struct si_sm_io *io)
{
- if (info->si_type == SI_KCS) {
- unsigned char status;
- int regspacing;
-
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = 0;
- info->io_size = 2;
- info->handlers = &kcs_smi_handlers;
-
- /* detect 1, 4, 16byte spacing */
- for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
- info->io.regspacing = regspacing;
- if (info->io_setup(info)) {
- dev_err(info->dev,
- "Could not setup I/O space\n");
- return DEFAULT_REGSPACING;
- }
- /* write invalid cmd */
- info->io.outputb(&info->io, 1, 0x10);
- /* read status back */
- status = info->io.inputb(&info->io, 1);
- info->io_cleanup(info);
- if (status)
- return regspacing;
- regspacing *= 4;
- }
- }
- return DEFAULT_REGSPACING;
+ ipmi_irq_start_cleanup(io);
+ free_irq(io->irq, io->irq_handler_data);
}
-static int ipmi_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int ipmi_std_irq_setup(struct si_sm_io *io)
{
int rv;
- int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
- struct smi_info *info;
-
- info = smi_info_alloc();
- if (!info)
- return -ENOMEM;
-
- info->addr_source = SI_PCI;
- dev_info(&pdev->dev, "probing via PCI");
-
- switch (class_type) {
- case PCI_ERMC_CLASSCODE_TYPE_SMIC:
- info->si_type = SI_SMIC;
- break;
-
- case PCI_ERMC_CLASSCODE_TYPE_KCS:
- info->si_type = SI_KCS;
- break;
-
- case PCI_ERMC_CLASSCODE_TYPE_BT:
- info->si_type = SI_BT;
- break;
-
- default:
- kfree(info);
- dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
- return -ENOMEM;
- }
-
- rv = pci_enable_device(pdev);
- if (rv) {
- dev_err(&pdev->dev, "couldn't enable PCI device\n");
- kfree(info);
- return rv;
- }
-
- info->addr_source_cleanup = ipmi_pci_cleanup;
- info->addr_source_data = pdev;
-
- if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
- info->io_setup = port_setup;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- }
- info->io.addr_data = pci_resource_start(pdev, 0);
-
- info->io.regspacing = ipmi_pci_probe_regspacing(info);
- info->io.regsize = DEFAULT_REGSIZE;
- info->io.regshift = 0;
-
- info->irq = pdev->irq;
- if (info->irq)
- info->irq_setup = std_irq_setup;
- info->dev = &pdev->dev;
- pci_set_drvdata(pdev, info);
-
- dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
- &pdev->resource[0], info->io.regsize, info->io.regspacing,
- info->irq);
+ if (!io->irq)
+ return 0;
- rv = add_smi(info);
+ rv = request_irq(io->irq,
+ ipmi_si_irq_handler,
+ IRQF_SHARED,
+ DEVICE_NAME,
+ io->irq_handler_data);
if (rv) {
- kfree(info);
- pci_disable_device(pdev);
- }
-
- return rv;
-}
-
-static void ipmi_pci_remove(struct pci_dev *pdev)
-{
- struct smi_info *info = pci_get_drvdata(pdev);
- cleanup_one_si(info);
-}
-
-static const struct pci_device_id ipmi_pci_devices[] = {
- { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
- { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
-
-static struct pci_driver ipmi_pci_driver = {
- .name = DEVICE_NAME,
- .id_table = ipmi_pci_devices,
- .probe = ipmi_pci_probe,
- .remove = ipmi_pci_remove,
-};
-#endif /* CONFIG_PCI */
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_ipmi_match[] = {
- { .type = "ipmi", .compatible = "ipmi-kcs",
- .data = (void *)(unsigned long) SI_KCS },
- { .type = "ipmi", .compatible = "ipmi-smic",
- .data = (void *)(unsigned long) SI_SMIC },
- { .type = "ipmi", .compatible = "ipmi-bt",
- .data = (void *)(unsigned long) SI_BT },
- {},
-};
-MODULE_DEVICE_TABLE(of, of_ipmi_match);
-
-static int of_ipmi_probe(struct platform_device *dev)
-{
- const struct of_device_id *match;
- struct smi_info *info;
- struct resource resource;
- const __be32 *regsize, *regspacing, *regshift;
- struct device_node *np = dev->dev.of_node;
- int ret;
- int proplen;
-
- dev_info(&dev->dev, "probing via device tree\n");
-
- match = of_match_device(of_ipmi_match, &dev->dev);
- if (!match)
- return -ENODEV;
-
- if (!of_device_is_available(np))
- return -EINVAL;
-
- ret = of_address_to_resource(np, 0, &resource);
- if (ret) {
- dev_warn(&dev->dev, PFX "invalid address from OF\n");
- return ret;
- }
-
- regsize = of_get_property(np, "reg-size", &proplen);
- if (regsize && proplen != 4) {
- dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
- return -EINVAL;
- }
-
- regspacing = of_get_property(np, "reg-spacing", &proplen);
- if (regspacing && proplen != 4) {
- dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
- return -EINVAL;
- }
-
- regshift = of_get_property(np, "reg-shift", &proplen);
- if (regshift && proplen != 4) {
- dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
- return -EINVAL;
- }
-
- info = smi_info_alloc();
-
- if (!info) {
- dev_err(&dev->dev,
- "could not allocate memory for OF probe\n");
- return -ENOMEM;
- }
-
- info->si_type = (enum si_type) match->data;
- info->addr_source = SI_DEVICETREE;
- info->irq_setup = std_irq_setup;
-
- if (resource.flags & IORESOURCE_IO) {
- info->io_setup = port_setup;
- info->io.addr_type = IPMI_IO_ADDR_SPACE;
- } else {
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- }
-
- info->io.addr_data = resource.start;
-
- info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
- info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
- info->io.regshift = regshift ? be32_to_cpup(regshift) : 0;
-
- info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
- info->dev = &dev->dev;
-
- dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
- info->io.addr_data, info->io.regsize, info->io.regspacing,
- info->irq);
-
- dev_set_drvdata(&dev->dev, info);
-
- ret = add_smi(info);
- if (ret) {
- kfree(info);
- return ret;
- }
- return 0;
-}
-#else
-#define of_ipmi_match NULL
-static int of_ipmi_probe(struct platform_device *dev)
-{
- return -ENODEV;
-}
-#endif
-
-#ifdef CONFIG_ACPI
-static int find_slave_address(struct smi_info *info, int slave_addr)
-{
-#ifdef CONFIG_IPMI_DMI_DECODE
- if (!slave_addr) {
- int type = -1;
- u32 flags = IORESOURCE_IO;
-
- switch (info->si_type) {
- case SI_KCS:
- type = IPMI_DMI_TYPE_KCS;
- break;
- case SI_BT:
- type = IPMI_DMI_TYPE_BT;
- break;
- case SI_SMIC:
- type = IPMI_DMI_TYPE_SMIC;
- break;
- }
-
- if (info->io.addr_type == IPMI_MEM_ADDR_SPACE)
- flags = IORESOURCE_MEM;
-
- slave_addr = ipmi_dmi_get_slave_addr(type, flags,
- info->io.addr_data);
- }
-#endif
-
- return slave_addr;
-}
-
-static int acpi_ipmi_probe(struct platform_device *dev)
-{
- struct smi_info *info;
- acpi_handle handle;
- acpi_status status;
- unsigned long long tmp;
- struct resource *res;
- int rv = -EINVAL;
-
- if (!si_tryacpi)
- return -ENODEV;
-
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle)
- return -ENODEV;
-
- info = smi_info_alloc();
- if (!info)
- return -ENOMEM;
-
- info->addr_source = SI_ACPI;
- dev_info(&dev->dev, PFX "probing via ACPI\n");
-
- info->addr_info.acpi_info.acpi_handle = handle;
-
- /* _IFT tells us the interface type: KCS, BT, etc */
- status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n");
- goto err_free;
- }
-
- switch (tmp) {
- case 1:
- info->si_type = SI_KCS;
- break;
- case 2:
- info->si_type = SI_SMIC;
- break;
- case 3:
- info->si_type = SI_BT;
- break;
- case 4: /* SSIF, just ignore */
- rv = -ENODEV;
- goto err_free;
- default:
- dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
- goto err_free;
- }
-
- res = ipmi_get_info_from_resources(dev, info);
- if (!res) {
- rv = -EINVAL;
- goto err_free;
- }
-
- /* If _GPE exists, use it; otherwise use standard interrupts */
- status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
- if (ACPI_SUCCESS(status)) {
- info->irq = tmp;
- info->irq_setup = acpi_gpe_irq_setup;
+ dev_warn(io->dev, "%s unable to claim interrupt %d,"
+ " running polled\n",
+ DEVICE_NAME, io->irq);
+ io->irq = 0;
} else {
- int irq = platform_get_irq(dev, 0);
-
- if (irq > 0) {
- info->irq = irq;
- info->irq_setup = std_irq_setup;
- }
+ io->irq_cleanup = std_irq_cleanup;
+ ipmi_irq_finish_setup(io);
+ dev_info(io->dev, "Using irq %d\n", io->irq);
}
- info->slave_addr = find_slave_address(info, info->slave_addr);
-
- info->dev = &dev->dev;
- platform_set_drvdata(dev, info);
-
- dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
- res, info->io.regsize, info->io.regspacing,
- info->irq);
-
- rv = add_smi(info);
- if (rv)
- kfree(info);
-
- return rv;
-
-err_free:
- kfree(info);
return rv;
}
-static const struct acpi_device_id acpi_ipmi_match[] = {
- { "IPI0001", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match);
-#else
-static int acpi_ipmi_probe(struct platform_device *dev)
-{
- return -ENODEV;
-}
-#endif
-
-static int ipmi_probe(struct platform_device *dev)
-{
- if (of_ipmi_probe(dev) == 0)
- return 0;
-
- if (acpi_ipmi_probe(dev) == 0)
- return 0;
-
- return dmi_ipmi_probe(dev);
-}
-
-static int ipmi_remove(struct platform_device *dev)
-{
- struct smi_info *info = dev_get_drvdata(&dev->dev);
-
- cleanup_one_si(info);
- return 0;
-}
-
-static struct platform_driver ipmi_driver = {
- .driver = {
- .name = DEVICE_NAME,
- .of_match_table = of_ipmi_match,
- .acpi_match_table = ACPI_PTR(acpi_ipmi_match),
- },
- .probe = ipmi_probe,
- .remove = ipmi_remove,
-};
-
-#ifdef CONFIG_PARISC
-static int __init ipmi_parisc_probe(struct parisc_device *dev)
-{
- struct smi_info *info;
- int rv;
-
- info = smi_info_alloc();
-
- if (!info) {
- dev_err(&dev->dev,
- "could not allocate memory for PARISC probe\n");
- return -ENOMEM;
- }
-
- info->si_type = SI_KCS;
- info->addr_source = SI_DEVICETREE;
- info->io_setup = mem_setup;
- info->io.addr_type = IPMI_MEM_ADDR_SPACE;
- info->io.addr_data = dev->hpa.start;
- info->io.regsize = 1;
- info->io.regspacing = 1;
- info->io.regshift = 0;
- info->irq = 0; /* no interrupt */
- info->irq_setup = NULL;
- info->dev = &dev->dev;
-
- dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data);
-
- dev_set_drvdata(&dev->dev, info);
-
- rv = add_smi(info);
- if (rv) {
- kfree(info);
- return rv;
- }
-
- return 0;
-}
-
-static int __exit ipmi_parisc_remove(struct parisc_device *dev)
-{
- cleanup_one_si(dev_get_drvdata(&dev->dev));
- return 0;
-}
-
-static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
- { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
-
-static struct parisc_driver ipmi_parisc_driver __refdata = {
- .name = "ipmi",
- .id_table = ipmi_parisc_tbl,
- .probe = ipmi_parisc_probe,
- .remove = __exit_p(ipmi_parisc_remove),
-};
-#endif /* CONFIG_PARISC */
-
static int wait_for_msg_done(struct smi_info *smi_info)
{
enum si_sm_result smi_result;
@@ -2925,7 +1356,8 @@ static int try_get_dev_id(struct smi_info *smi_info)
resp, IPMI_MAX_MSG_LENGTH);
/* Check and record info from the get device id, in case we need it. */
- rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
+ rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
+ resp + 2, resp_len - 2, &smi_info->device_id);
out:
kfree(resp);
@@ -2949,7 +1381,7 @@ static int get_global_enables(struct smi_info *smi_info, u8 *enables)
rv = wait_for_msg_done(smi_info);
if (rv) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Error getting response from get global enables command: %d\n",
rv);
goto out;
@@ -2962,7 +1394,7 @@ static int get_global_enables(struct smi_info *smi_info, u8 *enables)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Invalid return from get global enables command: %ld %x %x %x\n",
resp_len, resp[0], resp[1], resp[2]);
rv = -EINVAL;
@@ -2997,7 +1429,7 @@ static int set_global_enables(struct smi_info *smi_info, u8 enables)
rv = wait_for_msg_done(smi_info);
if (rv) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Error getting response from set global enables command: %d\n",
rv);
goto out;
@@ -3009,7 +1441,7 @@ static int set_global_enables(struct smi_info *smi_info, u8 enables)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"Invalid return from set global enables command: %ld %x %x\n",
resp_len, resp[0], resp[1]);
rv = -EINVAL;
@@ -3045,7 +1477,7 @@ static void check_clr_rcv_irq(struct smi_info *smi_info)
}
if (rv < 0) {
- dev_err(smi_info->dev,
+ dev_err(smi_info->io.dev,
"Cannot check clearing the rcv irq: %d\n", rv);
return;
}
@@ -3055,7 +1487,7 @@ static void check_clr_rcv_irq(struct smi_info *smi_info)
* An error when setting the event buffer bit means
* clearing the bit is not supported.
*/
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
}
@@ -3071,7 +1503,7 @@ static void check_set_rcv_irq(struct smi_info *smi_info)
u8 enables = 0;
int rv;
- if (!smi_info->irq)
+ if (!smi_info->io.irq)
return;
rv = get_global_enables(smi_info, &enables);
@@ -3081,7 +1513,7 @@ static void check_set_rcv_irq(struct smi_info *smi_info)
}
if (rv < 0) {
- dev_err(smi_info->dev,
+ dev_err(smi_info->io.dev,
"Cannot check setting the rcv irq: %d\n", rv);
return;
}
@@ -3091,7 +1523,7 @@ static void check_set_rcv_irq(struct smi_info *smi_info)
* An error when setting the event buffer bit means
* setting the bit is not supported.
*/
- dev_warn(smi_info->dev,
+ dev_warn(smi_info->io.dev,
"The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
smi_info->irq_enable_broken = true;
@@ -3173,11 +1605,12 @@ out:
return rv;
}
+#ifdef CONFIG_IPMI_PROC_INTERFACE
static int smi_type_proc_show(struct seq_file *m, void *v)
{
struct smi_info *smi = m->private;
- seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+ seq_printf(m, "%s\n", si_to_str[smi->io.si_type]);
return 0;
}
@@ -3199,7 +1632,7 @@ static int smi_si_stats_proc_show(struct seq_file *m, void *v)
struct smi_info *smi = m->private;
seq_printf(m, "interrupts_enabled: %d\n",
- smi->irq && !smi->interrupt_disabled);
+ smi->io.irq && !smi->interrupt_disabled);
seq_printf(m, "short_timeouts: %u\n",
smi_get_stat(smi, short_timeouts));
seq_printf(m, "long_timeouts: %u\n",
@@ -3243,14 +1676,14 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
seq_printf(m,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
- si_to_str[smi->si_type],
+ si_to_str[smi->io.si_type],
addr_space_to_str[smi->io.addr_type],
smi->io.addr_data,
smi->io.regspacing,
smi->io.regsize,
smi->io.regshift,
- smi->irq,
- smi->slave_addr);
+ smi->io.irq,
+ smi->io.slave_addr);
return 0;
}
@@ -3266,6 +1699,93 @@ static const struct file_operations smi_params_proc_ops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
+
+#define IPMI_SI_ATTR(name) \
+static ssize_t ipmi_##name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct smi_info *smi_info = dev_get_drvdata(dev); \
+ \
+ return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
+
+static ssize_t ipmi_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+
+ return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
+}
+static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
+
+static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+ int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
+
+ return snprintf(buf, 10, "%d\n", enabled);
+}
+static DEVICE_ATTR(interrupts_enabled, S_IRUGO,
+ ipmi_interrupts_enabled_show, NULL);
+
+IPMI_SI_ATTR(short_timeouts);
+IPMI_SI_ATTR(long_timeouts);
+IPMI_SI_ATTR(idles);
+IPMI_SI_ATTR(interrupts);
+IPMI_SI_ATTR(attentions);
+IPMI_SI_ATTR(flag_fetches);
+IPMI_SI_ATTR(hosed_count);
+IPMI_SI_ATTR(complete_transactions);
+IPMI_SI_ATTR(events);
+IPMI_SI_ATTR(watchdog_pretimeouts);
+IPMI_SI_ATTR(incoming_messages);
+
+static ssize_t ipmi_params_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smi_info *smi_info = dev_get_drvdata(dev);
+
+ return snprintf(buf, 200,
+ "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+ si_to_str[smi_info->io.si_type],
+ addr_space_to_str[smi_info->io.addr_type],
+ smi_info->io.addr_data,
+ smi_info->io.regspacing,
+ smi_info->io.regsize,
+ smi_info->io.regshift,
+ smi_info->io.irq,
+ smi_info->io.slave_addr);
+}
+static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL);
+
+static struct attribute *ipmi_si_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_interrupts_enabled.attr,
+ &dev_attr_short_timeouts.attr,
+ &dev_attr_long_timeouts.attr,
+ &dev_attr_idles.attr,
+ &dev_attr_interrupts.attr,
+ &dev_attr_attentions.attr,
+ &dev_attr_flag_fetches.attr,
+ &dev_attr_hosed_count.attr,
+ &dev_attr_complete_transactions.attr,
+ &dev_attr_events.attr,
+ &dev_attr_watchdog_pretimeouts.attr,
+ &dev_attr_incoming_messages.attr,
+ &dev_attr_params.attr,
+ NULL
+};
+
+static const struct attribute_group ipmi_si_dev_attr_group = {
+ .attrs = ipmi_si_dev_attrs,
+};
/*
* oem_data_avail_to_receive_msg_avail
@@ -3388,7 +1908,7 @@ setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
- smi_info->si_type == SI_BT)
+ smi_info->io.si_type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
@@ -3424,7 +1944,7 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
del_timer_sync(&smi_info->si_timer);
}
-static int is_new_interface(struct smi_info *info)
+static struct smi_info *find_dup_si(struct smi_info *info)
{
struct smi_info *e;
@@ -3437,31 +1957,61 @@ static int is_new_interface(struct smi_info *info)
* slave address but SMBIOS does. Pick it up from
* any source that has it available.
*/
- if (info->slave_addr && !e->slave_addr)
- e->slave_addr = info->slave_addr;
- return 0;
+ if (info->io.slave_addr && !e->io.slave_addr)
+ e->io.slave_addr = info->io.slave_addr;
+ return e;
}
}
- return 1;
+ return NULL;
}
-static int add_smi(struct smi_info *new_smi)
+int ipmi_si_add_smi(struct si_sm_io *io)
{
int rv = 0;
+ struct smi_info *new_smi, *dup;
+
+ if (!io->io_setup) {
+ if (io->addr_type == IPMI_IO_ADDR_SPACE) {
+ io->io_setup = ipmi_si_port_setup;
+ } else if (io->addr_type == IPMI_MEM_ADDR_SPACE) {
+ io->io_setup = ipmi_si_mem_setup;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
+ if (!new_smi)
+ return -ENOMEM;
+ spin_lock_init(&new_smi->si_lock);
+
+ new_smi->io = *io;
mutex_lock(&smi_infos_lock);
- if (!is_new_interface(new_smi)) {
- pr_info(PFX "%s-specified %s state machine: duplicate\n",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type]);
- rv = -EBUSY;
- goto out_err;
+ dup = find_dup_si(new_smi);
+ if (dup) {
+ if (new_smi->io.addr_source == SI_ACPI &&
+ dup->io.addr_source == SI_SMBIOS) {
+ /* We prefer ACPI over SMBIOS. */
+ dev_info(dup->io.dev,
+ "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
+ si_to_str[new_smi->io.si_type]);
+ cleanup_one_si(dup);
+ } else {
+ dev_info(new_smi->io.dev,
+ "%s-specified %s state machine: duplicate\n",
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type]);
+ rv = -EBUSY;
+ kfree(new_smi);
+ goto out_err;
+ }
}
pr_info(PFX "Adding %s-specified %s state machine\n",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type]);
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type]);
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
@@ -3470,6 +2020,14 @@ static int add_smi(struct smi_info *new_smi)
list_add_tail(&new_smi->link, &smi_infos);
+ if (initialized) {
+ rv = try_smi_init(new_smi);
+ if (rv) {
+ mutex_unlock(&smi_infos_lock);
+ cleanup_one_si(new_smi);
+ return rv;
+ }
+ }
out_err:
mutex_unlock(&smi_infos_lock);
return rv;
@@ -3487,13 +2045,13 @@ static int try_smi_init(struct smi_info *new_smi)
char *init_name = NULL;
pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
- ipmi_addr_src_to_str(new_smi->addr_source),
- si_to_str[new_smi->si_type],
+ ipmi_addr_src_to_str(new_smi->io.addr_source),
+ si_to_str[new_smi->io.si_type],
addr_space_to_str[new_smi->io.addr_type],
new_smi->io.addr_data,
- new_smi->slave_addr, new_smi->irq);
+ new_smi->io.slave_addr, new_smi->io.irq);
- switch (new_smi->si_type) {
+ switch (new_smi->io.si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
@@ -3515,7 +2073,7 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->intf_num = smi_num;
/* Do this early so it's available for logs. */
- if (!new_smi->dev) {
+ if (!new_smi->io.dev) {
init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
new_smi->intf_num);
@@ -3529,33 +2087,33 @@ static int try_smi_init(struct smi_info *new_smi)
pr_err(PFX "Unable to allocate platform device\n");
goto out_err;
}
- new_smi->dev = &new_smi->pdev->dev;
- new_smi->dev->driver = &ipmi_driver.driver;
+ new_smi->io.dev = &new_smi->pdev->dev;
+ new_smi->io.dev->driver = &ipmi_platform_driver.driver;
/* Nulled by device_add() */
- new_smi->dev->init_name = init_name;
+ new_smi->io.dev->init_name = init_name;
}
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- pr_err(PFX "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
- new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
- &new_smi->io);
+ new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
+ &new_smi->io);
/* Now that we know the I/O size, we can set up the I/O. */
- rv = new_smi->io_setup(new_smi);
+ rv = new_smi->io.io_setup(&new_smi->io);
if (rv) {
- dev_err(new_smi->dev, "Could not set up I/O space\n");
+ dev_err(new_smi->io.dev, "Could not set up I/O space\n");
goto out_err;
}
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
- if (new_smi->addr_source)
- dev_err(new_smi->dev, "Interface detection failed\n");
+ if (new_smi->io.addr_source)
+ dev_err(new_smi->io.dev,
+ "Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
@@ -3566,8 +2124,9 @@ static int try_smi_init(struct smi_info *new_smi)
*/
rv = try_get_dev_id(new_smi);
if (rv) {
- if (new_smi->addr_source)
- dev_err(new_smi->dev, "There appears to be no BMC at this location\n");
+ if (new_smi->io.addr_source)
+ dev_err(new_smi->io.dev,
+ "There appears to be no BMC at this location\n");
goto out_err;
}
@@ -3599,7 +2158,7 @@ static int try_smi_init(struct smi_info *new_smi)
* IRQ is defined to be set when non-zero. req_events will
* cause a global flags check that will enable interrupts.
*/
- if (new_smi->irq) {
+ if (new_smi->io.irq) {
new_smi->interrupt_disabled = false;
atomic_set(&new_smi->req_events, 1);
}
@@ -3607,30 +2166,40 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->pdev) {
rv = platform_device_add(new_smi->pdev);
if (rv) {
- dev_err(new_smi->dev,
+ dev_err(new_smi->io.dev,
"Unable to register system interface device: %d\n",
rv);
goto out_err;
}
- new_smi->dev_registered = true;
+ }
+
+ dev_set_drvdata(new_smi->io.dev, new_smi);
+ rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
+ if (rv) {
+ dev_err(new_smi->io.dev,
+ "Unable to add device attributes: error %d\n",
+ rv);
+ goto out_err_stop_timer;
}
rv = ipmi_register_smi(&handlers,
new_smi,
- &new_smi->device_id,
- new_smi->dev,
- new_smi->slave_addr);
+ new_smi->io.dev,
+ new_smi->io.slave_addr);
if (rv) {
- dev_err(new_smi->dev, "Unable to register device: error %d\n",
+ dev_err(new_smi->io.dev,
+ "Unable to register device: error %d\n",
rv);
- goto out_err_stop_timer;
+ goto out_err_remove_attrs;
}
+#ifdef CONFIG_IPMI_PROC_INTERFACE
rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
&smi_type_proc_ops,
new_smi);
if (rv) {
- dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+ dev_err(new_smi->io.dev,
+ "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3638,7 +2207,8 @@ static int try_smi_init(struct smi_info *new_smi)
&smi_si_stats_proc_ops,
new_smi);
if (rv) {
- dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+ dev_err(new_smi->io.dev,
+ "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
@@ -3646,21 +2216,27 @@ static int try_smi_init(struct smi_info *new_smi)
&smi_params_proc_ops,
new_smi);
if (rv) {
- dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
+ dev_err(new_smi->io.dev,
+ "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
+#endif
/* Don't increment till we know we have succeeded. */
smi_num++;
- dev_info(new_smi->dev, "IPMI %s interface initialized\n",
- si_to_str[new_smi->si_type]);
+ dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
+ si_to_str[new_smi->io.si_type]);
- WARN_ON(new_smi->dev->init_name != NULL);
+ WARN_ON(new_smi->io.dev->init_name != NULL);
kfree(init_name);
return 0;
+out_err_remove_attrs:
+ device_remove_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
+ dev_set_drvdata(new_smi->io.dev, NULL);
+
out_err_stop_timer:
wait_for_timer_and_thread(new_smi);
@@ -3673,9 +2249,9 @@ out_err:
ipmi_unregister_smi(intf);
}
- if (new_smi->irq_cleanup) {
- new_smi->irq_cleanup(new_smi);
- new_smi->irq_cleanup = NULL;
+ if (new_smi->io.irq_cleanup) {
+ new_smi->io.irq_cleanup(&new_smi->io);
+ new_smi->io.irq_cleanup = NULL;
}
/*
@@ -3691,22 +2267,20 @@ out_err:
kfree(new_smi->si_sm);
new_smi->si_sm = NULL;
}
- if (new_smi->addr_source_cleanup) {
- new_smi->addr_source_cleanup(new_smi);
- new_smi->addr_source_cleanup = NULL;
+ if (new_smi->io.addr_source_cleanup) {
+ new_smi->io.addr_source_cleanup(&new_smi->io);
+ new_smi->io.addr_source_cleanup = NULL;
}
- if (new_smi->io_cleanup) {
- new_smi->io_cleanup(new_smi);
- new_smi->io_cleanup = NULL;
+ if (new_smi->io.io_cleanup) {
+ new_smi->io.io_cleanup(&new_smi->io);
+ new_smi->io.io_cleanup = NULL;
}
- if (new_smi->dev_registered) {
+ if (new_smi->pdev) {
platform_device_unregister(new_smi->pdev);
- new_smi->dev_registered = false;
new_smi->pdev = NULL;
} else if (new_smi->pdev) {
platform_device_put(new_smi->pdev);
- new_smi->pdev = NULL;
}
kfree(init_name);
@@ -3716,97 +2290,57 @@ out_err:
static int init_ipmi_si(void)
{
- int i;
- char *str;
- int rv;
struct smi_info *e;
enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
- initialized = 1;
-
- if (si_tryplatform) {
- rv = platform_driver_register(&ipmi_driver);
- if (rv) {
- pr_err(PFX "Unable to register driver: %d\n", rv);
- return rv;
- }
- }
-
- /* Parse out the si_type string into its components. */
- str = si_type_str;
- if (*str != '\0') {
- for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
- si_type[i] = str;
- str = strchr(str, ',');
- if (str) {
- *str = '\0';
- str++;
- } else {
- break;
- }
- }
- }
pr_info("IPMI System Interface driver.\n");
/* If the user gave us a device, they presumably want us to use it */
- if (!hardcode_find_bmc())
- return 0;
+ if (!ipmi_si_hardcode_find_bmc())
+ goto do_scan;
-#ifdef CONFIG_PCI
- if (si_trypci) {
- rv = pci_register_driver(&ipmi_pci_driver);
- if (rv)
- pr_err(PFX "Unable to register PCI driver: %d\n", rv);
- else
- pci_registered = true;
- }
-#endif
+ ipmi_si_platform_init();
-#ifdef CONFIG_ACPI
- if (si_tryacpi)
- spmi_find_bmc();
-#endif
+ ipmi_si_pci_init();
-#ifdef CONFIG_PARISC
- register_parisc_driver(&ipmi_parisc_driver);
- parisc_registered = true;
-#endif
+ ipmi_si_parisc_init();
/* We prefer devices with interrupts, but in the case of a machine
with multiple BMCs we assume that there will be several instances
of a given type so if we succeed in registering a type then also
try to register everything else of the same type */
-
+do_scan:
mutex_lock(&smi_infos_lock);
list_for_each_entry(e, &smi_infos, link) {
/* Try to register a device if it has an IRQ and we either
haven't successfully registered a device yet or this
device has the same type as one we successfully registered */
- if (e->irq && (!type || e->addr_source == type)) {
+ if (e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
- type = e->addr_source;
+ type = e->io.addr_source;
}
}
}
/* type will only have been set if we successfully registered an si */
- if (type) {
- mutex_unlock(&smi_infos_lock);
- return 0;
- }
+ if (type)
+ goto skip_fallback_noirq;
/* Fall back to the preferred device */
list_for_each_entry(e, &smi_infos, link) {
- if (!e->irq && (!type || e->addr_source == type)) {
+ if (!e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
- type = e->addr_source;
+ type = e->io.addr_source;
}
}
}
+
+skip_fallback_noirq:
+ initialized = 1;
mutex_unlock(&smi_infos_lock);
if (type)
@@ -3843,8 +2377,8 @@ static void cleanup_one_si(struct smi_info *to_clean)
}
}
- if (to_clean->dev)
- dev_set_drvdata(to_clean->dev, NULL);
+ device_remove_group(to_clean->io.dev, &ipmi_si_dev_attr_group);
+ dev_set_drvdata(to_clean->io.dev, NULL);
list_del(&to_clean->link);
@@ -3852,8 +2386,8 @@ static void cleanup_one_si(struct smi_info *to_clean)
* Make sure that interrupts, the timer and the thread are
* stopped and will not run again.
*/
- if (to_clean->irq_cleanup)
- to_clean->irq_cleanup(to_clean);
+ if (to_clean->io.irq_cleanup)
+ to_clean->io.irq_cleanup(&to_clean->io);
wait_for_timer_and_thread(to_clean);
/*
@@ -3865,7 +2399,8 @@ static void cleanup_one_si(struct smi_info *to_clean)
poll(to_clean);
schedule_timeout_uninterruptible(1);
}
- disable_si_irq(to_clean, false);
+ if (to_clean->handlers)
+ disable_si_irq(to_clean, false);
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
poll(to_clean);
schedule_timeout_uninterruptible(1);
@@ -3876,17 +2411,53 @@ static void cleanup_one_si(struct smi_info *to_clean)
kfree(to_clean->si_sm);
- if (to_clean->addr_source_cleanup)
- to_clean->addr_source_cleanup(to_clean);
- if (to_clean->io_cleanup)
- to_clean->io_cleanup(to_clean);
+ if (to_clean->io.addr_source_cleanup)
+ to_clean->io.addr_source_cleanup(&to_clean->io);
+ if (to_clean->io.io_cleanup)
+ to_clean->io.io_cleanup(&to_clean->io);
- if (to_clean->dev_registered)
+ if (to_clean->pdev)
platform_device_unregister(to_clean->pdev);
kfree(to_clean);
}
+int ipmi_si_remove_by_dev(struct device *dev)
+{
+ struct smi_info *e;
+ int rv = -ENOENT;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ if (e->io.dev == dev) {
+ cleanup_one_si(e);
+ rv = 0;
+ break;
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+
+ return rv;
+}
+
+void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
+ unsigned long addr)
+{
+ /* remove */
+ struct smi_info *e, *tmp_e;
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
+ if (e->io.addr_type != addr_space)
+ continue;
+ if (e->io.si_type != si_type)
+ continue;
+ if (e->io.addr_data == addr)
+ cleanup_one_si(e);
+ }
+ mutex_unlock(&smi_infos_lock);
+}
+
static void cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
@@ -3894,16 +2465,11 @@ static void cleanup_ipmi_si(void)
if (!initialized)
return;
-#ifdef CONFIG_PCI
- if (pci_registered)
- pci_unregister_driver(&ipmi_pci_driver);
-#endif
-#ifdef CONFIG_PARISC
- if (parisc_registered)
- unregister_parisc_driver(&ipmi_parisc_driver);
-#endif
+ ipmi_si_pci_shutdown();
+
+ ipmi_si_parisc_shutdown();
- platform_driver_unregister(&ipmi_driver);
+ ipmi_si_platform_shutdown();
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
new file mode 100644
index 000000000000..8796396ecd0f
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_mem_io.c
@@ -0,0 +1,144 @@
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char intf_mem_inb(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return readb((io->addr)+(offset * io->regspacing));
+}
+
+static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inw(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inl(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+
+#ifdef readq
+static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset)
+{
+ return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static void mem_outq(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
+}
+#endif
+
+static void mem_region_cleanup(struct si_sm_io *io, int num)
+{
+ unsigned long addr = io->addr_data;
+ int idx;
+
+ for (idx = 0; idx < num; idx++)
+ release_mem_region(addr + idx * io->regspacing,
+ io->regsize);
+}
+
+static void mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr) {
+ iounmap(io->addr);
+ mem_region_cleanup(io, io->io_size);
+ }
+}
+
+int ipmi_si_mem_setup(struct si_sm_io *io)
+{
+ unsigned long addr = io->addr_data;
+ int mapsize, idx;
+
+ if (!addr)
+ return -ENODEV;
+
+ io->io_cleanup = mem_cleanup;
+
+ /*
+ * Figure out the actual readb/readw/readl/etc routine to use based
+ * upon the register size.
+ */
+ switch (io->regsize) {
+ case 1:
+ io->inputb = intf_mem_inb;
+ io->outputb = intf_mem_outb;
+ break;
+ case 2:
+ io->inputb = intf_mem_inw;
+ io->outputb = intf_mem_outw;
+ break;
+ case 4:
+ io->inputb = intf_mem_inl;
+ io->outputb = intf_mem_outl;
+ break;
+#ifdef readq
+ case 8:
+ io->inputb = mem_inq;
+ io->outputb = mem_outq;
+ break;
+#endif
+ default:
+ dev_warn(io->dev, "Invalid register size: %d\n",
+ io->regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Some BIOSes reserve disjoint memory regions in their ACPI
+ * tables. This causes problems when trying to request the
+ * entire region. Therefore we must request each register
+ * separately.
+ */
+ for (idx = 0; idx < io->io_size; idx++) {
+ if (request_mem_region(addr + idx * io->regspacing,
+ io->regsize, DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ mem_region_cleanup(io, idx);
+ return -EIO;
+ }
+ }
+
+ /*
+ * Calculate the total amount of memory to claim. This is an
+ * unusual looking calculation, but it avoids claiming any
+ * more memory than it has to. It will claim everything
+ * between the first address to the end of the last full
+ * register.
+ */
+ mapsize = ((io->io_size * io->regspacing)
+ - (io->regspacing - io->regsize));
+ io->addr = ioremap(addr, mapsize);
+ if (io->addr == NULL) {
+ mem_region_cleanup(io, io->io_size);
+ return -EIO;
+ }
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
new file mode 100644
index 000000000000..090b073ab441
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -0,0 +1,58 @@
+
+#include <linux/module.h>
+#include <asm/hardware.h> /* for register_parisc_driver() stuff */
+#include <asm/parisc-device.h>
+#include "ipmi_si.h"
+
+static bool parisc_registered;
+
+static int __init ipmi_parisc_probe(struct parisc_device *dev)
+{
+ struct si_sm_io io;
+
+ io.si_type = SI_KCS;
+ io.addr_source = SI_DEVICETREE;
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+ io.addr_data = dev->hpa.start;
+ io.regsize = 1;
+ io.regspacing = 1;
+ io.regshift = 0;
+ io.irq = 0; /* no interrupt */
+ io.irq_setup = NULL;
+ io.dev = &dev->dev;
+
+ dev_dbg(&dev->dev, "addr 0x%lx\n", io.addr_data);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static int __exit ipmi_parisc_remove(struct parisc_device *dev)
+{
+ return ipmi_si_remove_by_dev(&dev->dev);
+}
+
+static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
+ { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
+
+static struct parisc_driver ipmi_parisc_driver __refdata = {
+ .name = "ipmi",
+ .id_table = ipmi_parisc_tbl,
+ .probe = ipmi_parisc_probe,
+ .remove = __exit_p(ipmi_parisc_remove),
+};
+
+void ipmi_si_parisc_init(void)
+{
+ register_parisc_driver(&ipmi_parisc_driver);
+ parisc_registered = true;
+}
+
+void ipmi_si_parisc_shutdown(void)
+{
+ if (parisc_registered)
+ unregister_parisc_driver(&ipmi_parisc_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
new file mode 100644
index 000000000000..99771f5cad07
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -0,0 +1,166 @@
+/*
+ * ipmi_si_pci.c
+ *
+ * Handling for IPMI devices on the PCI bus.
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "ipmi_si.h"
+
+#define PFX "ipmi_pci: "
+
+static bool pci_registered;
+
+static bool si_trypci = true;
+
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via pci");
+
+#define PCI_ERMC_CLASSCODE 0x0C0700
+#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
+#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
+#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
+#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
+#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
+
+#define PCI_HP_VENDOR_ID 0x103C
+#define PCI_MMC_DEVICE_ID 0x121A
+#define PCI_MMC_ADDR_CW 0x10
+
+static void ipmi_pci_cleanup(struct si_sm_io *io)
+{
+ struct pci_dev *pdev = io->addr_source_data;
+
+ pci_disable_device(pdev);
+}
+
+static int ipmi_pci_probe_regspacing(struct si_sm_io *io)
+{
+ if (io->si_type == SI_KCS) {
+ unsigned char status;
+ int regspacing;
+
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ io->regspacing = regspacing;
+ if (io->io_setup(io)) {
+ dev_err(io->dev,
+ "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
+ }
+ /* write invalid cmd */
+ io->outputb(io, 1, 0x10);
+ /* read status back */
+ status = io->inputb(io, 1);
+ io->io_cleanup(io);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
+ }
+ }
+ return DEFAULT_REGSPACING;
+}
+
+static int ipmi_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rv;
+ int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_PCI;
+ dev_info(&pdev->dev, "probing via PCI");
+
+ switch (class_type) {
+ case PCI_ERMC_CLASSCODE_TYPE_SMIC:
+ io.si_type = SI_SMIC;
+ break;
+
+ case PCI_ERMC_CLASSCODE_TYPE_KCS:
+ io.si_type = SI_KCS;
+ break;
+
+ case PCI_ERMC_CLASSCODE_TYPE_BT:
+ io.si_type = SI_BT;
+ break;
+
+ default:
+ dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
+ return -ENOMEM;
+ }
+
+ rv = pci_enable_device(pdev);
+ if (rv) {
+ dev_err(&pdev->dev, "couldn't enable PCI device\n");
+ return rv;
+ }
+
+ io.addr_source_cleanup = ipmi_pci_cleanup;
+ io.addr_source_data = pdev;
+
+ if (pci_resource_flags(pdev, 0) & IORESOURCE_IO)
+ io.addr_type = IPMI_IO_ADDR_SPACE;
+ else
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+ io.addr_data = pci_resource_start(pdev, 0);
+
+ io.regspacing = ipmi_pci_probe_regspacing(&io);
+ io.regsize = DEFAULT_REGSIZE;
+ io.regshift = 0;
+
+ io.irq = pdev->irq;
+ if (io.irq)
+ io.irq_setup = ipmi_std_irq_setup;
+
+ io.dev = &pdev->dev;
+
+ dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ &pdev->resource[0], io.regsize, io.regspacing, io.irq);
+
+ rv = ipmi_si_add_smi(&io);
+ if (rv)
+ pci_disable_device(pdev);
+
+ return rv;
+}
+
+static void ipmi_pci_remove(struct pci_dev *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+static const struct pci_device_id ipmi_pci_devices[] = {
+ { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
+ { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
+
+static struct pci_driver ipmi_pci_driver = {
+ .name = DEVICE_NAME,
+ .id_table = ipmi_pci_devices,
+ .probe = ipmi_pci_probe,
+ .remove = ipmi_pci_remove,
+};
+
+void ipmi_si_pci_init(void)
+{
+ if (si_trypci) {
+ int rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ pr_err(PFX "Unable to register PCI driver: %d\n", rv);
+ else
+ pci_registered = true;
+ }
+}
+
+void ipmi_si_pci_shutdown(void)
+{
+ if (pci_registered)
+ pci_unregister_driver(&ipmi_pci_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
new file mode 100644
index 000000000000..9573f1116450
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -0,0 +1,593 @@
+/*
+ * ipmi_si_platform.c
+ *
+ * Handling for platform devices in IPMI (ACPI, OF, and things
+ * coming from the platform.
+ */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/acpi.h>
+#include "ipmi_si.h"
+#include "ipmi_dmi.h"
+
+#define PFX "ipmi_platform: "
+
+static bool si_tryplatform = true;
+#ifdef CONFIG_ACPI
+static bool si_tryacpi = true;
+#endif
+#ifdef CONFIG_OF
+static bool si_tryopenfirmware = true;
+#endif
+#ifdef CONFIG_DMI
+static bool si_trydmi = true;
+#else
+static bool si_trydmi = false;
+#endif
+
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via platform"
+ " interfaces besides ACPI, OpenFirmware, and DMI");
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_OF
+module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via OpenFirmware");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via DMI");
+#endif
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially. Once we don't find a table, there
+ * are no more.
+ */
+static int acpi_failure;
+
+/* For GPE-type interrupts. */
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+ u32 gpe_number, void *context)
+{
+ struct si_sm_io *io = context;
+
+ ipmi_si_irq_handler(io->irq, io->irq_handler_data);
+ return ACPI_INTERRUPT_HANDLED;
+}
+
+static void acpi_gpe_irq_cleanup(struct si_sm_io *io)
+{
+ if (!io->irq)
+ return;
+
+ ipmi_irq_start_cleanup(io);
+ acpi_remove_gpe_handler(NULL, io->irq, &ipmi_acpi_gpe);
+}
+
+static int acpi_gpe_irq_setup(struct si_sm_io *io)
+{
+ acpi_status status;
+
+ if (!io->irq)
+ return 0;
+
+ status = acpi_install_gpe_handler(NULL,
+ io->irq,
+ ACPI_GPE_LEVEL_TRIGGERED,
+ &ipmi_acpi_gpe,
+ io);
+ if (status != AE_OK) {
+ dev_warn(io->dev,
+ "Unable to claim ACPI GPE %d, running polled\n",
+ io->irq);
+ io->irq = 0;
+ return -EINVAL;
+ } else {
+ io->irq_cleanup = acpi_gpe_irq_cleanup;
+ ipmi_irq_finish_setup(io);
+ dev_info(io->dev, "Using ACPI GPE %d\n", io->irq);
+ return 0;
+ }
+}
+
+/*
+ * Defined at
+ * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
+ */
+struct SPMITable {
+ s8 Signature[4];
+ u32 Length;
+ u8 Revision;
+ u8 Checksum;
+ s8 OEMID[6];
+ s8 OEMTableID[8];
+ s8 OEMRevision[4];
+ s8 CreatorID[4];
+ s8 CreatorRevision[4];
+ u8 InterfaceType;
+ u8 IPMIlegacy;
+ s16 SpecificationRevision;
+
+ /*
+ * Bit 0 - SCI interrupt supported
+ * Bit 1 - I/O APIC/SAPIC
+ */
+ u8 InterruptType;
+
+ /*
+ * If bit 0 of InterruptType is set, then this is the SCI
+ * interrupt in the GPEx_STS register.
+ */
+ u8 GPE;
+
+ s16 Reserved;
+
+ /*
+ * If bit 1 of InterruptType is set, then this is the I/O
+ * APIC/SAPIC interrupt.
+ */
+ u32 GlobalSystemInterrupt;
+
+ /* The actual register address. */
+ struct acpi_generic_address addr;
+
+ u8 UID[4];
+
+ s8 spmi_id[1]; /* A '\0' terminated array starts here. */
+};
+
+static int try_init_spmi(struct SPMITable *spmi)
+{
+ struct si_sm_io io;
+
+ if (spmi->IPMIlegacy != 1) {
+ pr_info(PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
+ }
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_SPMI;
+ pr_info(PFX "probing via SPMI\n");
+
+ /* Figure out the interface type. */
+ switch (spmi->InterfaceType) {
+ case 1: /* KCS */
+ io.si_type = SI_KCS;
+ break;
+ case 2: /* SMIC */
+ io.si_type = SI_SMIC;
+ break;
+ case 3: /* BT */
+ io.si_type = SI_BT;
+ break;
+ case 4: /* SSIF, just ignore */
+ return -EIO;
+ default:
+ pr_info(PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
+ return -EIO;
+ }
+
+ if (spmi->InterruptType & 1) {
+ /* We've got a GPE interrupt. */
+ io.irq = spmi->GPE;
+ io.irq_setup = acpi_gpe_irq_setup;
+ } else if (spmi->InterruptType & 2) {
+ /* We've got an APIC/SAPIC interrupt. */
+ io.irq = spmi->GlobalSystemInterrupt;
+ io.irq_setup = ipmi_std_irq_setup;
+ } else {
+ /* Use the default interrupt setting. */
+ io.irq = 0;
+ io.irq_setup = NULL;
+ }
+
+ if (spmi->addr.bit_width) {
+ /* A (hopefully) properly formed register bit width. */
+ io.regspacing = spmi->addr.bit_width / 8;
+ } else {
+ io.regspacing = DEFAULT_REGSPACING;
+ }
+ io.regsize = io.regspacing;
+ io.regshift = spmi->addr.bit_offset;
+
+ if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+ } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ pr_warn(PFX "Unknown ACPI I/O Address type\n");
+ return -EIO;
+ }
+ io.addr_data = spmi->addr.address;
+
+ pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
+ (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void spmi_find_bmc(void)
+{
+ acpi_status status;
+ struct SPMITable *spmi;
+ int i;
+
+ if (acpi_disabled)
+ return;
+
+ if (acpi_failure)
+ return;
+
+ for (i = 0; ; i++) {
+ status = acpi_get_table(ACPI_SIG_SPMI, i+1,
+ (struct acpi_table_header **)&spmi);
+ if (status != AE_OK)
+ return;
+
+ try_init_spmi(spmi);
+ }
+}
+#endif
+
+static struct resource *
+ipmi_get_info_from_resources(struct platform_device *pdev,
+ struct si_sm_io *io)
+{
+ struct resource *res, *res_second;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res) {
+ io->addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ io->addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+ if (!res) {
+ dev_err(&pdev->dev, "no I/O or memory address\n");
+ return NULL;
+ }
+ io->addr_data = res->start;
+
+ io->regspacing = DEFAULT_REGSPACING;
+ res_second = platform_get_resource(pdev,
+ (io->addr_type == IPMI_IO_ADDR_SPACE) ?
+ IORESOURCE_IO : IORESOURCE_MEM,
+ 1);
+ if (res_second) {
+ if (res_second->start > io->addr_data)
+ io->regspacing = res_second->start - io->addr_data;
+ }
+ io->regsize = DEFAULT_REGSIZE;
+ io->regshift = 0;
+
+ return res;
+}
+
+static int platform_ipmi_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+ u8 type, slave_addr, addr_source;
+ int rv;
+
+ rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source);
+ if (rv)
+ addr_source = SI_PLATFORM;
+ if (addr_source >= SI_LAST)
+ return -EINVAL;
+
+ if (addr_source == SI_SMBIOS) {
+ if (!si_trydmi)
+ return -ENODEV;
+ } else {
+ if (!si_tryplatform)
+ return -ENODEV;
+ }
+
+ rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
+ if (rv)
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = addr_source;
+ dev_info(&pdev->dev, PFX "probing via %s\n",
+ ipmi_addr_src_to_str(addr_source));
+
+ switch (type) {
+ case SI_KCS:
+ case SI_SMIC:
+ case SI_BT:
+ io.si_type = type;
+ break;
+ default:
+ dev_err(&pdev->dev, "ipmi-type property is invalid\n");
+ return -EINVAL;
+ }
+
+ if (!ipmi_get_info_from_resources(pdev, &io))
+ return -EINVAL;
+
+ rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
+ if (rv) {
+ dev_warn(&pdev->dev, "device has no slave-addr property\n");
+ io.slave_addr = 0x20;
+ } else {
+ io.slave_addr = slave_addr;
+ }
+
+ io.irq = platform_get_irq(pdev, 0);
+ if (io.irq > 0)
+ io.irq_setup = ipmi_std_irq_setup;
+ else
+ io.irq = 0;
+
+ io.dev = &pdev->dev;
+
+ pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
+ (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ ipmi_si_add_smi(&io);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_match[] = {
+ { .type = "ipmi", .compatible = "ipmi-kcs",
+ .data = (void *)(unsigned long) SI_KCS },
+ { .type = "ipmi", .compatible = "ipmi-smic",
+ .data = (void *)(unsigned long) SI_SMIC },
+ { .type = "ipmi", .compatible = "ipmi-bt",
+ .data = (void *)(unsigned long) SI_BT },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
+
+static int of_ipmi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct si_sm_io io;
+ struct resource resource;
+ const __be32 *regsize, *regspacing, *regshift;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+ int proplen;
+
+ if (!si_tryopenfirmware)
+ return -ENODEV;
+
+ dev_info(&pdev->dev, "probing via device tree\n");
+
+ match = of_match_device(of_ipmi_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ if (!of_device_is_available(np))
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_warn(&pdev->dev, PFX "invalid address from OF\n");
+ return ret;
+ }
+
+ regsize = of_get_property(np, "reg-size", &proplen);
+ if (regsize && proplen != 4) {
+ dev_warn(&pdev->dev, PFX "invalid regsize from OF\n");
+ return -EINVAL;
+ }
+
+ regspacing = of_get_property(np, "reg-spacing", &proplen);
+ if (regspacing && proplen != 4) {
+ dev_warn(&pdev->dev, PFX "invalid regspacing from OF\n");
+ return -EINVAL;
+ }
+
+ regshift = of_get_property(np, "reg-shift", &proplen);
+ if (regshift && proplen != 4) {
+ dev_warn(&pdev->dev, PFX "invalid regshift from OF\n");
+ return -EINVAL;
+ }
+
+ memset(&io, 0, sizeof(io));
+ io.si_type = (enum si_type) match->data;
+ io.addr_source = SI_DEVICETREE;
+ io.irq_setup = ipmi_std_irq_setup;
+
+ if (resource.flags & IORESOURCE_IO)
+ io.addr_type = IPMI_IO_ADDR_SPACE;
+ else
+ io.addr_type = IPMI_MEM_ADDR_SPACE;
+
+ io.addr_data = resource.start;
+
+ io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+ io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+ io.regshift = regshift ? be32_to_cpup(regshift) : 0;
+
+ io.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
+ io.addr_data, io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+}
+#else
+#define of_ipmi_match NULL
+static int of_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_ACPI
+static int find_slave_address(struct si_sm_io *io, int slave_addr)
+{
+#ifdef CONFIG_IPMI_DMI_DECODE
+ if (!slave_addr) {
+ u32 flags = IORESOURCE_IO;
+
+ if (io->addr_type == IPMI_MEM_ADDR_SPACE)
+ flags = IORESOURCE_MEM;
+
+ slave_addr = ipmi_dmi_get_slave_addr(io->si_type, flags,
+ io->addr_data);
+ }
+#endif
+
+ return slave_addr;
+}
+
+static int acpi_ipmi_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long tmp;
+ struct resource *res;
+ int rv = -EINVAL;
+
+ if (!si_tryacpi)
+ return -ENODEV;
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle)
+ return -ENODEV;
+
+ memset(&io, 0, sizeof(io));
+ io.addr_source = SI_ACPI;
+ dev_info(&pdev->dev, PFX "probing via ACPI\n");
+
+ io.addr_info.acpi_info.acpi_handle = handle;
+
+ /* _IFT tells us the interface type: KCS, BT, etc */
+ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&pdev->dev,
+ "Could not find ACPI IPMI interface type\n");
+ goto err_free;
+ }
+
+ switch (tmp) {
+ case 1:
+ io.si_type = SI_KCS;
+ break;
+ case 2:
+ io.si_type = SI_SMIC;
+ break;
+ case 3:
+ io.si_type = SI_BT;
+ break;
+ case 4: /* SSIF, just ignore */
+ rv = -ENODEV;
+ goto err_free;
+ default:
+ dev_info(&pdev->dev, "unknown IPMI type %lld\n", tmp);
+ goto err_free;
+ }
+
+ res = ipmi_get_info_from_resources(pdev, &io);
+ if (!res) {
+ rv = -EINVAL;
+ goto err_free;
+ }
+
+ /* If _GPE exists, use it; otherwise use standard interrupts */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ io.irq = tmp;
+ io.irq_setup = acpi_gpe_irq_setup;
+ } else {
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq > 0) {
+ io.irq = irq;
+ io.irq_setup = ipmi_std_irq_setup;
+ }
+ }
+
+ io.slave_addr = find_slave_address(&io, io.slave_addr);
+
+ io.dev = &pdev->dev;
+
+ dev_info(io.dev, "%pR regsize %d spacing %d irq %d\n",
+ res, io.regsize, io.regspacing, io.irq);
+
+ return ipmi_si_add_smi(&io);
+
+err_free:
+ return rv;
+}
+
+static const struct acpi_device_id acpi_ipmi_match[] = {
+ { "IPI0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ipmi_match);
+#else
+static int acpi_ipmi_probe(struct platform_device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+static int ipmi_probe(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node && of_ipmi_probe(pdev) == 0)
+ return 0;
+
+ if (acpi_ipmi_probe(pdev) == 0)
+ return 0;
+
+ return platform_ipmi_probe(pdev);
+}
+
+static int ipmi_remove(struct platform_device *pdev)
+{
+ return ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+struct platform_driver ipmi_platform_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = of_ipmi_match,
+ .acpi_match_table = ACPI_PTR(acpi_ipmi_match),
+ },
+ .probe = ipmi_probe,
+ .remove = ipmi_remove,
+};
+
+void ipmi_si_platform_init(void)
+{
+ int rv = platform_driver_register(&ipmi_platform_driver);
+ if (rv)
+ pr_err(PFX "Unable to register driver: %d\n", rv);
+
+#ifdef CONFIG_ACPI
+ if (si_tryacpi)
+ spmi_find_bmc();
+#endif
+
+}
+
+void ipmi_si_platform_shutdown(void)
+{
+ platform_driver_unregister(&ipmi_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c
new file mode 100644
index 000000000000..e5ce174fbeeb
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_port_io.c
@@ -0,0 +1,112 @@
+
+#include <linux/io.h>
+#include "ipmi_si.h"
+
+static unsigned char port_inb(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return inb(addr + (offset * io->regspacing));
+}
+
+static void port_outb(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outb(b, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inw(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outw(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outw(b << io->regshift, addr + (offset * io->regspacing));
+}
+
+static unsigned char port_inl(const struct si_sm_io *io, unsigned int offset)
+{
+ unsigned int addr = io->addr_data;
+
+ return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
+}
+
+static void port_outl(const struct si_sm_io *io, unsigned int offset,
+ unsigned char b)
+{
+ unsigned int addr = io->addr_data;
+
+ outl(b << io->regshift, addr+(offset * io->regspacing));
+}
+
+static void port_cleanup(struct si_sm_io *io)
+{
+ unsigned int addr = io->addr_data;
+ int idx;
+
+ if (addr) {
+ for (idx = 0; idx < io->io_size; idx++)
+ release_region(addr + idx * io->regspacing,
+ io->regsize);
+ }
+}
+
+int ipmi_si_port_setup(struct si_sm_io *io)
+{
+ unsigned int addr = io->addr_data;
+ int idx;
+
+ if (!addr)
+ return -ENODEV;
+
+ io->io_cleanup = port_cleanup;
+
+ /*
+ * Figure out the actual inb/inw/inl/etc routine to use based
+ * upon the register size.
+ */
+ switch (io->regsize) {
+ case 1:
+ io->inputb = port_inb;
+ io->outputb = port_outb;
+ break;
+ case 2:
+ io->inputb = port_inw;
+ io->outputb = port_outw;
+ break;
+ case 4:
+ io->inputb = port_inl;
+ io->outputb = port_outl;
+ break;
+ default:
+ dev_warn(io->dev, "Invalid register size: %d\n",
+ io->regsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Some BIOSes reserve disjoint I/O regions in their ACPI
+ * tables. This causes problems when trying to register the
+ * entire I/O region. Therefore we must register each I/O
+ * port separately.
+ */
+ for (idx = 0; idx < io->io_size; idx++) {
+ if (request_region(addr + idx * io->regspacing,
+ io->regsize, DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ while (idx--)
+ release_region(addr + idx * io->regspacing,
+ io->regsize);
+ return -EIO;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index a705027c0493..aa8d88ab4433 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -34,12 +34,18 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/ipmi.h>
+
/*
* This is defined by the state machines themselves, it is an opaque
* data type for them to use.
*/
struct si_sm_data;
+enum si_type {
+ SI_TYPE_INVALID, SI_KCS, SI_SMIC, SI_BT
+};
+
/*
* The structure for doing I/O in the state machine. The state
* machine doesn't have the actual I/O routines, they are done through
@@ -61,6 +67,23 @@ struct si_sm_io {
int regshift;
int addr_type;
long addr_data;
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
+ void (*addr_source_cleanup)(struct si_sm_io *io);
+ void *addr_source_data;
+ union ipmi_smi_info_union addr_info;
+
+ int (*io_setup)(struct si_sm_io *info);
+ void (*io_cleanup)(struct si_sm_io *info);
+ unsigned int io_size;
+
+ int irq;
+ int (*irq_setup)(struct si_sm_io *io);
+ void *irq_handler_data;
+ void (*irq_cleanup)(struct si_sm_io *io);
+
+ u8 slave_addr;
+ enum si_type si_type;
+ struct device *dev;
};
/* Results of SMI events. */
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0aea3bcb6158..3cfaec728604 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -53,6 +53,7 @@
#include <linux/acpi.h>
#include <linux/ctype.h>
#include <linux/time64.h>
+#include "ipmi_si_sm.h"
#include "ipmi_dmi.h"
#define PFX "ipmi_ssif: "
@@ -267,9 +268,6 @@ struct ssif_info {
unsigned char *i2c_data;
unsigned int i2c_size;
- /* From the device id response. */
- struct ipmi_device_id device_id;
-
struct timer_list retry_timer;
int retries_left;
@@ -553,9 +551,9 @@ static void start_get(struct ssif_info *ssif_info)
}
}
-static void retry_timeout(unsigned long data)
+static void retry_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = (void *) data;
+ struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
unsigned long oflags, *flags;
bool waiting;
@@ -1176,6 +1174,61 @@ MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of
static DEFINE_MUTEX(ssif_infos_mutex);
static LIST_HEAD(ssif_infos);
+#define IPMI_SSIF_ATTR(name) \
+static ssize_t ipmi_##name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct ssif_info *ssif_info = dev_get_drvdata(dev); \
+ \
+ return snprintf(buf, 10, "%u\n", ssif_get_stat(ssif_info, name));\
+} \
+static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
+
+static ssize_t ipmi_type_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, 10, "ssif\n");
+}
+static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
+
+IPMI_SSIF_ATTR(sent_messages);
+IPMI_SSIF_ATTR(sent_messages_parts);
+IPMI_SSIF_ATTR(send_retries);
+IPMI_SSIF_ATTR(send_errors);
+IPMI_SSIF_ATTR(received_messages);
+IPMI_SSIF_ATTR(received_message_parts);
+IPMI_SSIF_ATTR(receive_retries);
+IPMI_SSIF_ATTR(receive_errors);
+IPMI_SSIF_ATTR(flag_fetches);
+IPMI_SSIF_ATTR(hosed);
+IPMI_SSIF_ATTR(events);
+IPMI_SSIF_ATTR(watchdog_pretimeouts);
+IPMI_SSIF_ATTR(alerts);
+
+static struct attribute *ipmi_ssif_dev_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_sent_messages.attr,
+ &dev_attr_sent_messages_parts.attr,
+ &dev_attr_send_retries.attr,
+ &dev_attr_send_errors.attr,
+ &dev_attr_received_messages.attr,
+ &dev_attr_received_message_parts.attr,
+ &dev_attr_receive_retries.attr,
+ &dev_attr_receive_errors.attr,
+ &dev_attr_flag_fetches.attr,
+ &dev_attr_hosed.attr,
+ &dev_attr_events.attr,
+ &dev_attr_watchdog_pretimeouts.attr,
+ &dev_attr_alerts.attr,
+ NULL
+};
+
+static const struct attribute_group ipmi_ssif_dev_attr_group = {
+ .attrs = ipmi_ssif_dev_attrs,
+};
+
static int ssif_remove(struct i2c_client *client)
{
struct ssif_info *ssif_info = i2c_get_clientdata(client);
@@ -1196,6 +1249,9 @@ static int ssif_remove(struct i2c_client *client)
}
ssif_info->intf = NULL;
+ device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+ dev_set_drvdata(&ssif_info->client->dev, NULL);
+
/* make sure the driver is not looking for flags any more. */
while (ssif_info->ssif_state != SSIF_NORMAL)
schedule_timeout(1);
@@ -1289,6 +1345,7 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
return rv;
}
+#ifdef CONFIG_IPMI_PROC_INTERFACE
static int smi_type_proc_show(struct seq_file *m, void *v)
{
seq_puts(m, "ssif\n");
@@ -1352,6 +1409,7 @@ static const struct file_operations smi_stats_proc_ops = {
.llseek = seq_lseek,
.release = single_release,
};
+#endif
static int strcmp_nospace(char *s1, char *s2)
{
@@ -1425,7 +1483,7 @@ static int find_slave_address(struct i2c_client *client, int slave_addr)
#ifdef CONFIG_IPMI_DMI_DECODE
if (!slave_addr)
slave_addr = ipmi_dmi_get_slave_addr(
- IPMI_DMI_TYPE_SSIF,
+ SI_TYPE_INVALID,
i2c_adapter_id(client->adapter),
client->addr);
#endif
@@ -1481,20 +1539,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
- /*
- * Do a Get Device ID command, since it comes back with some
- * useful info.
- */
- msg[0] = IPMI_NETFN_APP_REQUEST << 2;
- msg[1] = IPMI_GET_DEVICE_ID_CMD;
- rv = do_cmd(client, 2, msg, &len, resp);
- if (rv)
- goto out;
-
- rv = ipmi_demangle_device_id(resp, len, &ssif_info->device_id);
- if (rv)
- goto out;
-
ssif_info->client = client;
i2c_set_clientdata(client, ssif_info);
@@ -1647,8 +1691,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
spin_lock_init(&ssif_info->lock);
ssif_info->ssif_state = SSIF_NORMAL;
- setup_timer(&ssif_info->retry_timer, retry_timeout,
- (unsigned long)ssif_info);
+ timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
for (i = 0; i < SSIF_NUM_STATS; i++)
atomic_set(&ssif_info->stats[i], 0);
@@ -1682,16 +1725,26 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ dev_set_drvdata(&ssif_info->client->dev, ssif_info);
+ rv = device_add_group(&ssif_info->client->dev,
+ &ipmi_ssif_dev_attr_group);
+ if (rv) {
+ dev_err(&ssif_info->client->dev,
+ "Unable to add device attributes: error %d\n",
+ rv);
+ goto out;
+ }
+
rv = ipmi_register_smi(&ssif_info->handlers,
ssif_info,
- &ssif_info->device_id,
&ssif_info->client->dev,
slave_addr);
if (rv) {
pr_err(PFX "Unable to register device: error %d\n", rv);
- goto out;
+ goto out_remove_attr;
}
+#ifdef CONFIG_IPMI_PROC_INTERFACE
rv = ipmi_smi_add_proc_entry(ssif_info->intf, "type",
&smi_type_proc_ops,
ssif_info);
@@ -1707,6 +1760,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
pr_err(PFX "Unable to create proc entry: %d\n", rv);
goto out_err_unreg;
}
+#endif
out:
if (rv) {
@@ -1725,8 +1779,14 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
kfree(resp);
return rv;
- out_err_unreg:
+#ifdef CONFIG_IPMI_PROC_INTERFACE
+out_err_unreg:
ipmi_unregister_smi(ssif_info->intf);
+#endif
+
+out_remove_attr:
+ device_remove_group(&ssif_info->client->dev, &ipmi_ssif_dev_attr_group);
+ dev_set_drvdata(&ssif_info->client->dev, NULL);
goto out;
}
@@ -1953,20 +2013,13 @@ static void spmi_find_bmc(void) { }
#ifdef CONFIG_DMI
static int dmi_ipmi_probe(struct platform_device *pdev)
{
- u8 type, slave_addr = 0;
+ u8 slave_addr = 0;
u16 i2c_addr;
int rv;
if (!ssif_trydmi)
return -ENODEV;
- rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type);
- if (rv)
- return -ENODEV;
-
- if (type != IPMI_DMI_TYPE_SSIF)
- return -ENODEV;
-
rv = device_property_read_u16(&pdev->dev, "i2c-addr", &i2c_addr);
if (rv) {
dev_warn(&pdev->dev, PFX "No i2c-addr property\n");
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 3d832d0362a4..76b270678b50 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1009,9 +1009,14 @@ static void ipmi_register_watchdog(int ipmi_intf)
goto out;
}
- ipmi_get_version(watchdog_user,
- &ipmi_version_major,
- &ipmi_version_minor);
+ rv = ipmi_get_version(watchdog_user,
+ &ipmi_version_major,
+ &ipmi_version_minor);
+ if (rv) {
+ pr_warn(PFX "Unable to get IPMI version, assuming 1.0\n");
+ ipmi_version_major = 1;
+ ipmi_version_minor = 0;
+ }
rv = misc_register(&ipmi_wdog_miscdev);
if (rv < 0) {
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 593a8818aca9..6aefe5370e5b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/char/mem.c
*
@@ -342,6 +343,10 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start;
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ /* Does it even fit in phys_addr_t? */
+ if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+ return -EINVAL;
+
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index c9cd1ea6844a..1bb9e7cc82e3 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/char/misc.c
*
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index e6d0d271c58c..a7113b78251a 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -23,11 +23,11 @@
#define __NWBUTTON_C /* Tell the header file who we are */
#include "nwbutton.h"
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
static int button_press_count; /* The count of button presses */
/* Times for the end of a sequence */
-static DEFINE_TIMER(button_timer, button_sequence_finished, 0, 0);
+static DEFINE_TIMER(button_timer, button_sequence_finished);
static DECLARE_WAIT_QUEUE_HEAD(button_wait_queue); /* Used for blocking read */
static char button_output_buffer[32]; /* Stores data to write out of device */
static int bcount; /* The number of bytes in the buffer */
@@ -127,7 +127,7 @@ static void button_consume_callbacks (int bpcount)
* any matching registered function callbacks, initiate reboot, etc.).
*/
-static void button_sequence_finished (unsigned long parameters)
+static void button_sequence_finished(struct timer_list *unused)
{
if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
button_press_count == reboot_count)
diff --git a/drivers/char/nwbutton.h b/drivers/char/nwbutton.h
index c3ebc16ce8a7..9dedfd7adc0e 100644
--- a/drivers/char/nwbutton.h
+++ b/drivers/char/nwbutton.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NWBUTTON_H
#define __NWBUTTON_H
@@ -24,7 +25,7 @@ struct button_callback {
/* Function prototypes: */
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
static irqreturn_t button_handler (int irq, void *dev_id);
int button_init (void);
int button_add_callback (void (*callback) (void), int count);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index cd53771b9ae7..370e0a64ead1 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -659,9 +659,9 @@ static void terminate_monitor(struct cm4000_dev *dev)
* is already doing that for you.
*/
-static void monitor_card(unsigned long p)
+static void monitor_card(struct timer_list *t)
{
- struct cm4000_dev *dev = (struct cm4000_dev *) p;
+ struct cm4000_dev *dev = from_timer(dev, t, timer);
unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
struct ptsreq ptsreq;
@@ -1374,7 +1374,7 @@ static void start_monitor(struct cm4000_dev *dev)
DEBUGP(3, dev, "-> start_monitor\n");
if (!dev->monitor_running) {
DEBUGP(5, dev, "create, init and add timer\n");
- setup_timer(&dev->timer, monitor_card, (unsigned long)dev);
+ timer_setup(&dev->timer, monitor_card, 0);
dev->monitor_running = 1;
mod_timer(&dev->timer, jiffies);
} else
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 382c864814d9..9a1aaf538758 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -104,9 +104,9 @@ static inline unsigned char xinb(unsigned short port)
/* poll the device fifo status register. not to be confused with
* the poll syscall. */
-static void cm4040_do_poll(unsigned long dummy)
+static void cm4040_do_poll(struct timer_list *t)
{
- struct reader_dev *dev = (struct reader_dev *) dummy;
+ struct reader_dev *dev = from_timer(dev, t, poll_timer);
unsigned int obs = xinb(dev->p_dev->resource[0]->start
+ REG_OFFSET_BUFFER_STATUS);
@@ -465,7 +465,6 @@ static int cm4040_open(struct inode *inode, struct file *filp)
link->open = 1;
- dev->poll_timer.data = (unsigned long) dev;
mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
DEBUGP(2, dev, "<- cm4040_open (successfully)\n");
@@ -585,7 +584,7 @@ static int reader_probe(struct pcmcia_device *link)
init_waitqueue_head(&dev->poll_wait);
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
- setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
+ timer_setup(&dev->poll_timer, cm4040_do_poll, 0);
ret = reader_config(link, i);
if (ret) {
diff --git a/drivers/char/pcmcia/cm4040_cs.h b/drivers/char/pcmcia/cm4040_cs.h
index 9a8b805c5095..e2ffff995d51 100644
--- a/drivers/char/pcmcia/cm4040_cs.h
+++ b/drivers/char/pcmcia/cm4040_cs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CM4040_H_
#define _CM4040_H_
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 62be953e5fb0..aa502e9fb7fa 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -375,7 +375,7 @@ static void reset_device(MGSLPC_INFO *info);
static void hdlc_mode(MGSLPC_INFO *info);
static void async_mode(MGSLPC_INFO *info);
-static void tx_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
static int carrier_raised(struct tty_port *port);
static void dtr_rts(struct tty_port *port, int onoff);
@@ -1289,7 +1289,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
memset(&info->icount, 0, sizeof(info->icount));
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
/* Allocate and claim adapter resources */
retval = claim_resources(info);
@@ -3846,9 +3846,9 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
/* HDLC frame time out
* update stats and do tx completion processing
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- MGSLPC_INFO *info = (MGSLPC_INFO*)context;
+ MGSLPC_INFO *info = from_timer(info, t, tx_timer);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8ad92707e45f..ec42c8bb9b0d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -259,7 +259,6 @@
#include <linux/cryptohash.h>
#include <linux/fips.h>
#include <linux/ptrace.h>
-#include <linux/kmemcheck.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
#include <linux/syscalls.h>
@@ -641,7 +640,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
return;
retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(r->entropy_count);
if (nfrac < 0) {
/* Debit */
entropy_count += nfrac;
@@ -1265,7 +1264,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
/* Can we pull enough? */
retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(r->entropy_count);
ibytes = nbytes;
/* never pull more than available */
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 974d48927b07..5542a438bbd0 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -135,9 +135,9 @@ static struct fasync_struct *rtc_async_queue;
static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
#ifdef RTC_IRQ
-static void rtc_dropped_irq(unsigned long data);
+static void rtc_dropped_irq(struct timer_list *unused);
-static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0);
+static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq);
#endif
static ssize_t rtc_read(struct file *file, char __user *buf,
@@ -1171,7 +1171,7 @@ module_exit(rtc_exit);
* for something that requires a steady > 1KHz signal anyways.)
*/
-static void rtc_dropped_irq(unsigned long data)
+static void rtc_dropped_irq(struct timer_list *unused)
{
unsigned long freq;
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 6210bff46341..8eeb4190207d 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -184,9 +184,8 @@ static unsigned int telclk_interrupt;
static int int_events; /* Event that generate a interrupt */
static int got_event; /* if events processing have been done */
-static void switchover_timeout(unsigned long data);
-static struct timer_list switchover_timer =
- TIMER_INITIALIZER(switchover_timeout , 0, 0);
+static void switchover_timeout(struct timer_list *t);
+static struct timer_list switchover_timer;
static unsigned long tlclk_timer_data;
static struct tlclk_alarms *alarm_events;
@@ -805,7 +804,7 @@ static int __init tlclk_init(void)
goto out3;
}
- init_timer(&switchover_timer);
+ timer_setup(&switchover_timer, switchover_timeout, 0);
ret = misc_register(&tlclk_miscdev);
if (ret < 0) {
@@ -855,9 +854,9 @@ static void __exit tlclk_cleanup(void)
}
-static void switchover_timeout(unsigned long data)
+static void switchover_timeout(struct timer_list *unused)
{
- unsigned long flags = *(unsigned long *) data;
+ unsigned long flags = tlclk_timer_data;
if ((flags & 1)) {
if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
@@ -922,7 +921,6 @@ static irqreturn_t tlclk_interrupt(int irq, void *dev_id)
/* TIMEOUT in ~10ms */
switchover_timer.expires = jiffies + msecs_to_jiffies(10);
tlclk_timer_data = inb(TLCLK_REG1);
- switchover_timer.data = (unsigned long) &tlclk_timer_data;
mod_timer(&switchover_timer, switchover_timer.expires);
} else {
got_event = 1;
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 23681f01f95a..34b4bcf46f43 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel tpm device drivers.
#
diff --git a/drivers/char/tpm/st33zp24/Makefile b/drivers/char/tpm/st33zp24/Makefile
index 74a722e5e068..649e41107de9 100644
--- a/drivers/char/tpm/st33zp24/Makefile
+++ b/drivers/char/tpm/st33zp24/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for ST33ZP24 TPM 1.2 driver
#
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 610638a80383..230b99288024 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -22,9 +22,9 @@
#include "tpm.h"
#include "tpm-dev.h"
-static void user_reader_timeout(unsigned long ptr)
+static void user_reader_timeout(struct timer_list *t)
{
- struct file_priv *priv = (struct file_priv *)ptr;
+ struct file_priv *priv = from_timer(priv, t, user_read_timer);
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
@@ -48,8 +48,7 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
priv->chip = chip;
atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
- setup_timer(&priv->user_read_timer, user_reader_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
@@ -110,6 +109,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
return -EFAULT;
}
+ if (in_size < 6 ||
+ in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EINVAL;
+ }
+
/* atomic tpm command send and result receive. We only hold the ops
* lock during this period so that the tpm can be unregistered even if
* the char dev is held open.
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
index ff15cf719bad..ba3b6f9dacf7 100644
--- a/drivers/char/tpm/tpm-dev.h
+++ b/drivers/char/tpm/tpm-dev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TPM_DEV_H
#define _TPM_DEV_H
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 86f38d239476..83a77a445538 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -20,44 +20,48 @@
#include <linux/device.h>
#include "tpm.h"
-#define READ_PUBEK_RESULT_SIZE 314
+struct tpm_readpubek_out {
+ u8 algorithm[4];
+ u8 encscheme[2];
+ u8 sigscheme[2];
+ __be32 paramsize;
+ u8 parameters[12];
+ __be32 keysize;
+ u8 modulus[256];
+ u8 checksum[20];
+} __packed;
+
#define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
#define TPM_ORD_READPUBEK 124
-static const struct tpm_input_header tpm_readpubek_header = {
- .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
- .length = cpu_to_be32(30),
- .ordinal = cpu_to_be32(TPM_ORD_READPUBEK)
-};
+
static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u8 *data;
- struct tpm_cmd_t tpm_cmd;
- ssize_t err;
- int i, rc;
+ struct tpm_buf tpm_buf;
+ struct tpm_readpubek_out *out;
+ ssize_t rc;
+ int i;
char *str = buf;
struct tpm_chip *chip = to_tpm_chip(dev);
+ char anti_replay[20];
- memset(&tpm_cmd, 0, sizeof(tpm_cmd));
-
- tpm_cmd.header.in = tpm_readpubek_header;
- err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
- READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
- "attempting to read the PUBEK");
- if (err)
- goto out;
-
- /*
- ignore header 10 bytes
- algorithm 32 bits (1 == RSA )
- encscheme 16 bits
- sigscheme 16 bits
- parameters (RSA 12->bytes: keybit, #primes, expbit)
- keylenbytes 32 bits
- 256 byte modulus
- ignore checksum 20 bytes
- */
- data = tpm_cmd.params.readpubek_out_buffer;
+ memset(&anti_replay, 0, sizeof(anti_replay));
+
+ rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
+ if (rc)
+ return rc;
+
+ tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
+
+ rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
+ READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
+ "attempting to read the PUBEK");
+ if (rc) {
+ tpm_buf_destroy(&tpm_buf);
+ return 0;
+ }
+
+ out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
str +=
sprintf(str,
"Algorithm: %02X %02X %02X %02X\n"
@@ -68,21 +72,26 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
"%02X %02X %02X %02X\n"
"Modulus length: %d\n"
"Modulus:\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5],
- data[6], data[7],
- data[12], data[13], data[14], data[15],
- data[16], data[17], data[18], data[19],
- data[20], data[21], data[22], data[23],
- be32_to_cpu(*((__be32 *) (data + 24))));
+ out->algorithm[0], out->algorithm[1], out->algorithm[2],
+ out->algorithm[3],
+ out->encscheme[0], out->encscheme[1],
+ out->sigscheme[0], out->sigscheme[1],
+ out->parameters[0], out->parameters[1],
+ out->parameters[2], out->parameters[3],
+ out->parameters[4], out->parameters[5],
+ out->parameters[6], out->parameters[7],
+ out->parameters[8], out->parameters[9],
+ out->parameters[10], out->parameters[11],
+ be32_to_cpu(out->keysize));
for (i = 0; i < 256; i++) {
- str += sprintf(str, "%02X ", data[i + 28]);
+ str += sprintf(str, "%02X ", out->modulus[i]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
-out:
+
rc = str - buf;
+ tpm_buf_destroy(&tpm_buf);
return rc;
}
static DEVICE_ATTR_RO(pubek);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2d5466a72e40..528cffbd49d3 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -345,17 +345,6 @@ enum tpm_sub_capabilities {
TPM_CAP_PROP_TIS_DURATION = 0x120,
};
-struct tpm_readpubek_params_out {
- u8 algorithm[4];
- u8 encscheme[2];
- u8 sigscheme[2];
- __be32 paramsize;
- u8 parameters[12]; /*assuming RSA*/
- __be32 keysize;
- u8 modulus[256];
- u8 checksum[20];
-} __packed;
-
typedef union {
struct tpm_input_header in;
struct tpm_output_header out;
@@ -385,8 +374,6 @@ struct tpm_getrandom_in {
} __packed;
typedef union {
- struct tpm_readpubek_params_out readpubek_out;
- u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
struct tpm_pcrread_in pcrread_in;
struct tpm_pcrread_out pcrread_out;
struct tpm_getrandom_in getrandom_in;
@@ -557,7 +544,7 @@ static inline void tpm_add_ppi(struct tpm_chip *chip)
}
#endif
-static inline inline u32 tpm2_rc_value(u32 rc)
+static inline u32 tpm2_rc_value(u32 rc)
{
return (rc & BIT(7)) ? rc & 0xff : rc;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index e1a41b788f08..f40d20671a78 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -834,72 +834,43 @@ static const struct tpm_input_header tpm2_selftest_header = {
};
/**
- * tpm2_continue_selftest() - start a self test
- *
- * @chip: TPM chip to use
- * @full: test all commands instead of testing only those that were not
- * previously tested.
- *
- * Return: Same as with tpm_transmit_cmd with exception of RC_TESTING.
- */
-static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
-{
- int rc;
- struct tpm2_cmd cmd;
-
- cmd.header.in = tpm2_selftest_header;
- cmd.params.selftest_in.full_test = full;
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE, 0, 0,
- "continue selftest");
-
- /* At least some prototype chips seem to give RC_TESTING error
- * immediately. This is a workaround for that.
- */
- if (rc == TPM2_RC_TESTING) {
- dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n");
- rc = 0;
- }
-
- return rc;
-}
-
-/**
- * tpm2_do_selftest() - run a full self test
+ * tpm2_do_selftest() - ensure that all self tests have passed
*
* @chip: TPM chip to use
*
* Return: Same as with tpm_transmit_cmd.
*
- * During the self test TPM2 commands return with the error code RC_TESTING.
- * Waiting is done by issuing PCR read until it executes successfully.
+ * The TPM can either run all self tests synchronously and then return
+ * RC_SUCCESS once all tests were successful. Or it can choose to run the tests
+ * asynchronously and return RC_TESTING immediately while the self tests still
+ * execute in the background. This function handles both cases and waits until
+ * all tests have completed.
*/
static int tpm2_do_selftest(struct tpm_chip *chip)
{
int rc;
- unsigned int loops;
- unsigned int delay_msec = 100;
- unsigned long duration;
- int i;
-
- duration = tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST);
+ unsigned int delay_msec = 20;
+ long duration;
+ struct tpm2_cmd cmd;
- loops = jiffies_to_msecs(duration) / delay_msec;
+ duration = jiffies_to_msecs(
+ tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST));
- rc = tpm2_start_selftest(chip, true);
- if (rc)
- return rc;
+ while (duration > 0) {
+ cmd.header.in = tpm2_selftest_header;
+ cmd.params.selftest_in.full_test = 0;
- for (i = 0; i < loops; i++) {
- /* Attempt to read a PCR value */
- rc = tpm2_pcr_read(chip, 0, NULL);
- if (rc < 0)
- break;
+ rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE,
+ 0, 0, "continue selftest");
if (rc != TPM2_RC_TESTING)
break;
tpm_msleep(delay_msec);
+ duration -= delay_msec;
+
+ /* wait longer the next round */
+ delay_msec *= 2;
}
return rc;
@@ -1009,7 +980,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
{
struct tpm_buf buf;
u32 nr_commands;
- u32 *attrs;
+ __be32 *attrs;
u32 cc;
int i;
int rc;
@@ -1049,7 +1020,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
chip->nr_commands = nr_commands;
- attrs = (u32 *)&buf.data[TPM_HEADER_SIZE + 9];
+ attrs = (__be32 *)&buf.data[TPM_HEADER_SIZE + 9];
for (i = 0; i < nr_commands; i++, attrs++) {
chip->cc_attrs_tbl[i] = be32_to_cpup(attrs);
cc = chip->cc_attrs_tbl[i] & 0xFFFF;
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index e2e059d8ffec..4e4014eabdb9 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -242,7 +242,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
struct tpm_space *space = &chip->work_space;
unsigned int nr_handles;
u32 attrs;
- u32 *handle;
+ __be32 *handle;
int i;
i = tpm2_find_cc(chip, cc);
@@ -252,7 +252,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
attrs = chip->cc_attrs_tbl[i];
nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
- handle = (u32 *)&cmd[TPM_HEADER_SIZE];
+ handle = (__be32 *)&cmd[TPM_HEADER_SIZE];
for (i = 0; i < nr_handles; i++, handle++) {
if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) {
if (!tpm2_map_to_phandle(space, handle))
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 8f0a98dea327..7b3c2a8aa9de 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -92,14 +92,9 @@ enum crb_status {
CRB_DRV_STS_COMPLETE = BIT(0),
};
-enum crb_flags {
- CRB_FL_ACPI_START = BIT(0),
- CRB_FL_CRB_START = BIT(1),
- CRB_FL_CRB_SMC_START = BIT(2),
-};
-
struct crb_priv {
- unsigned int flags;
+ u32 sm;
+ const char *hid;
void __iomem *iobase;
struct crb_regs_head __iomem *regs_h;
struct crb_regs_tail __iomem *regs_t;
@@ -128,14 +123,16 @@ struct tpm2_crb_smc {
* Anyhow, we do not wait here as a consequent CMD_READY request
* will be handled correctly even if idle was not completed.
*
- * The function does nothing for devices with ACPI-start method.
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
*
* Return: 0 always
*/
static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
{
- if ((priv->flags & CRB_FL_ACPI_START) ||
- (priv->flags & CRB_FL_CRB_SMC_START))
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
@@ -174,14 +171,16 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
* The device should respond within TIMEOUT_C.
*
* The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
*
* Return: 0 on success -ETIME on timeout;
*/
static int __maybe_unused crb_cmd_ready(struct device *dev,
struct crb_priv *priv)
{
- if ((priv->flags & CRB_FL_ACPI_START) ||
- (priv->flags & CRB_FL_CRB_SMC_START))
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
@@ -325,13 +324,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* Make sure that cmd is populated before issuing start. */
wmb();
- if (priv->flags & CRB_FL_CRB_START)
+ /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
+ * report only ACPI start but in practice seems to require both
+ * CRB start, hence invoking CRB start method if hid == MSFT0101.
+ */
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
+ (!strcmp(priv->hid, "MSFT0101")))
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
- if (priv->flags & CRB_FL_ACPI_START)
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
rc = crb_do_acpi_start(chip);
- if (priv->flags & CRB_FL_CRB_SMC_START) {
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
@@ -345,7 +351,9 @@ static void crb_cancel(struct tpm_chip *chip)
iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
- if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip))
+ if (((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
}
@@ -458,7 +466,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* the control area, as one nice sane region except for some older
* stuff that puts the control area outside the ACPI IO region.
*/
- if (!(priv->flags & CRB_FL_ACPI_START)) {
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
if (buf->control_address == io_res.start +
sizeof(*priv->regs_h))
priv->regs_h = priv->iobase;
@@ -552,18 +561,6 @@ static int crb_acpi_add(struct acpi_device *device)
if (!priv)
return -ENOMEM;
- /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
- * report only ACPI start but in practice seems to require both
- * ACPI start and CRB start.
- */
- if (sm == ACPI_TPM2_COMMAND_BUFFER || sm == ACPI_TPM2_MEMORY_MAPPED ||
- !strcmp(acpi_device_hid(device), "MSFT0101"))
- priv->flags |= CRB_FL_CRB_START;
-
- if (sm == ACPI_TPM2_START_METHOD ||
- sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
- priv->flags |= CRB_FL_ACPI_START;
-
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
dev_err(dev,
@@ -574,9 +571,11 @@ static int crb_acpi_add(struct acpi_device *device)
}
crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
priv->smc_func_id = crb_smc->smc_func_id;
- priv->flags |= CRB_FL_CRB_SMC_START;
}
+ priv->sm = sm;
+ priv->hid = acpi_device_hid(device);
+
rc = crb_map_io(device, priv, buf);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
index b4b549559203..204466cc4d05 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/drivers/char/tpm/tpm_eventlog.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __TPM_EVENTLOG_H__
#define __TPM_EVENTLOG_H__
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7e55aa9ce680..e2d1055fb814 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -30,6 +30,7 @@
#include <linux/freezer.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/kernel.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -223,7 +224,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
}
static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value)
+ const u8 *value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
@@ -365,7 +366,7 @@ static struct pnp_driver tis_pnp_driver = {
},
};
-#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
+#define TIS_HID_USR_IDX (ARRAY_SIZE(tpm_pnp_tbl) - 2)
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 63bc6c3b949e..fdde971bc810 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -252,7 +252,7 @@ out:
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc, status, burstcnt;
@@ -343,7 +343,7 @@ static void disable_interrupts(struct tpm_chip *chip)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc;
@@ -445,7 +445,7 @@ static int probe_itpm(struct tpm_chip *chip)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
- u8 cmd_getticks[] = {
+ static const u8 cmd_getticks[] = {
0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0xf1
};
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index e2212f021a02..6bbac319ff3b 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value);
+ const u8 *value);
int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
@@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
}
static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
return data->phy_ops->write_bytes(data, addr, len, value);
}
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 88fe72ae967f..424ff2fde1f2 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -46,9 +46,7 @@
struct tpm_tis_spi_phy {
struct tpm_tis_data priv;
struct spi_device *spi_device;
-
- u8 tx_buf[4];
- u8 rx_buf[4];
+ u8 *iobuf;
};
static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
@@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
}
static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *buffer, u8 direction)
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
@@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
while (len) {
transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
- phy->tx_buf[0] = direction | (transfer_len - 1);
- phy->tx_buf[1] = 0xd4;
- phy->tx_buf[2] = addr >> 8;
- phy->tx_buf[3] = addr;
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
memset(&spi_xfer, 0, sizeof(spi_xfer));
- spi_xfer.tx_buf = phy->tx_buf;
- spi_xfer.rx_buf = phy->rx_buf;
+ spi_xfer.tx_buf = phy->iobuf;
+ spi_xfer.rx_buf = phy->iobuf;
spi_xfer.len = 4;
spi_xfer.cs_change = 1;
@@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
- if ((phy->rx_buf[3] & 0x01) == 0) {
+ if ((phy->iobuf[3] & 0x01) == 0) {
// handle SPI wait states
- phy->tx_buf[0] = 0;
+ phy->iobuf[0] = 0;
for (i = 0; i < TPM_RETRY; i++) {
spi_xfer.len = 1;
@@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
ret = spi_sync_locked(phy->spi_device, &m);
if (ret < 0)
goto exit;
- if (phy->rx_buf[0] & 0x01)
+ if (phy->iobuf[0] & 0x01)
break;
}
@@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_xfer.len = transfer_len;
spi_xfer.delay_usecs = 5;
- if (direction) {
+ if (in) {
spi_xfer.tx_buf = NULL;
- spi_xfer.rx_buf = buffer;
- } else {
- spi_xfer.tx_buf = buffer;
+ } else if (out) {
spi_xfer.rx_buf = NULL;
+ memcpy(phy->iobuf, out, transfer_len);
+ out += transfer_len;
}
spi_message_init(&m);
@@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
+ if (in) {
+ memcpy(in, phy->iobuf, transfer_len);
+ in += transfer_len;
+ }
+
len -= transfer_len;
- buffer += transfer_len;
}
exit:
@@ -139,40 +141,51 @@ exit:
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result)
{
- return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
+ return tpm_tis_spi_transfer(data, addr, len, result, NULL);
}
static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
- return tpm_tis_spi_transfer(data, addr, len, value, 0);
+ return tpm_tis_spi_transfer(data, addr, len, NULL, value);
}
static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
+ __le16 result_le;
int rc;
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), (u8 *)result);
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
+ (u8 *)&result_le);
if (!rc)
- *result = le16_to_cpu(*result);
+ *result = le16_to_cpu(result_le);
+
return rc;
}
static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
+ __le32 result_le;
int rc;
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), (u8 *)result);
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
+ (u8 *)&result_le);
if (!rc)
- *result = le32_to_cpu(*result);
+ *result = le32_to_cpu(result_le);
+
return rc;
}
static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
- value = cpu_to_le32(value);
- return data->phy_ops->write_bytes(data, addr, sizeof(u32),
- (u8 *)&value);
+ __le32 value_le;
+ int rc;
+
+ value_le = cpu_to_le32(value);
+ rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
+ (u8 *)&value_le);
+
+ return rc;
}
static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
@@ -194,6 +207,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
phy->spi_device = dev;
+ phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
NULL);
}
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index c99f363826f0..f7f761b02bed 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# common clock types
obj-$(CONFIG_HAVE_CLK) += clk-devres.o clk-bulk.o
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index c68947b65a4c..082596f37c1d 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for at91 specific clk
#
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index aadabd9d1e2b..cd8d689138ff 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -14,14 +14,20 @@
#include <linux/of.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <soc/at91/atmel-sfr.h>
#include "pmc.h"
-#define UTMI_FIXED_MUL 40
+/*
+ * The purpose of this clock is to generate a 480 MHz signal. A different
+ * rate can't be configured.
+ */
+#define UTMI_RATE 480000000
struct clk_utmi {
struct clk_hw hw;
- struct regmap *regmap;
+ struct regmap *regmap_pmc;
+ struct regmap *regmap_sfr;
};
#define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
@@ -37,13 +43,54 @@ static inline bool clk_utmi_ready(struct regmap *regmap)
static int clk_utmi_prepare(struct clk_hw *hw)
{
+ struct clk_hw *hw_parent;
struct clk_utmi *utmi = to_clk_utmi(hw);
unsigned int uckr = AT91_PMC_UPLLEN | AT91_PMC_UPLLCOUNT |
AT91_PMC_BIASEN;
+ unsigned int utmi_ref_clk_freq;
+ unsigned long parent_rate;
+
+ /*
+ * If mainck rate is different from 12 MHz, we have to configure the
+ * FREQ field of the SFR_UTMICKTRIM register to generate properly
+ * the utmi clock.
+ */
+ hw_parent = clk_hw_get_parent(hw);
+ parent_rate = clk_hw_get_rate(hw_parent);
+
+ switch (parent_rate) {
+ case 12000000:
+ utmi_ref_clk_freq = 0;
+ break;
+ case 16000000:
+ utmi_ref_clk_freq = 1;
+ break;
+ case 24000000:
+ utmi_ref_clk_freq = 2;
+ break;
+ /*
+ * Not supported on SAMA5D2 but it's not an issue since MAINCK
+ * maximum value is 24 MHz.
+ */
+ case 48000000:
+ utmi_ref_clk_freq = 3;
+ break;
+ default:
+ pr_err("UTMICK: unsupported mainck rate\n");
+ return -EINVAL;
+ }
- regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, uckr, uckr);
+ if (utmi->regmap_sfr) {
+ regmap_update_bits(utmi->regmap_sfr, AT91_SFR_UTMICKTRIM,
+ AT91_UTMICKTRIM_FREQ, utmi_ref_clk_freq);
+ } else if (utmi_ref_clk_freq) {
+ pr_err("UTMICK: sfr node required\n");
+ return -EINVAL;
+ }
- while (!clk_utmi_ready(utmi->regmap))
+ regmap_update_bits(utmi->regmap_pmc, AT91_CKGR_UCKR, uckr, uckr);
+
+ while (!clk_utmi_ready(utmi->regmap_pmc))
cpu_relax();
return 0;
@@ -53,21 +100,22 @@ static int clk_utmi_is_prepared(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- return clk_utmi_ready(utmi->regmap);
+ return clk_utmi_ready(utmi->regmap_pmc);
}
static void clk_utmi_unprepare(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, AT91_PMC_UPLLEN, 0);
+ regmap_update_bits(utmi->regmap_pmc, AT91_CKGR_UCKR,
+ AT91_PMC_UPLLEN, 0);
}
static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- /* UTMI clk is a fixed clk multiplier */
- return parent_rate * UTMI_FIXED_MUL;
+ /* UTMI clk rate is fixed. */
+ return UTMI_RATE;
}
static const struct clk_ops utmi_ops = {
@@ -78,7 +126,7 @@ static const struct clk_ops utmi_ops = {
};
static struct clk_hw * __init
-at91_clk_register_utmi(struct regmap *regmap,
+at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr,
const char *name, const char *parent_name)
{
struct clk_utmi *utmi;
@@ -97,7 +145,8 @@ at91_clk_register_utmi(struct regmap *regmap,
init.flags = CLK_SET_RATE_GATE;
utmi->hw.init = &init;
- utmi->regmap = regmap;
+ utmi->regmap_pmc = regmap_pmc;
+ utmi->regmap_sfr = regmap_sfr;
hw = &utmi->hw;
ret = clk_hw_register(NULL, &utmi->hw);
@@ -114,17 +163,35 @@ static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
struct clk_hw *hw;
const char *parent_name;
const char *name = np->name;
- struct regmap *regmap;
+ struct regmap *regmap_pmc, *regmap_sfr;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(regmap))
+ regmap_pmc = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap_pmc))
return;
- hw = at91_clk_register_utmi(regmap, name, parent_name);
+ /*
+ * If the device supports different mainck rates, this value has to be
+ * set in the UTMI Clock Trimming register.
+ * - 9x5: mainck supports several rates but it is indicated that a
+ * 12 MHz is needed in case of USB.
+ * - sama5d3 and sama5d2: mainck supports several rates. Configuring
+ * the FREQ field of the UTMI Clock Trimming register is mandatory.
+ * - sama5d4: mainck is at 12 MHz.
+ *
+ * We only need to retrieve sama5d3 or sama5d2 sfr regmap.
+ */
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d3-sfr");
+ if (IS_ERR(regmap_sfr)) {
+ regmap_sfr = syscon_regmap_lookup_by_compatible("atmel,sama5d2-sfr");
+ if (IS_ERR(regmap_sfr))
+ regmap_sfr = NULL;
+ }
+
+ hw = at91_clk_register_utmi(regmap_pmc, regmap_sfr, name, parent_name);
if (IS_ERR(hw))
return;
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index 1d9187df167b..4c4bd85f707c 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -30,6 +30,15 @@ config CLK_BCM_CYGNUS
help
Enable common clock framework support for the Broadcom Cygnus SoC
+config CLK_BCM_HR2
+ bool "Broadcom Hurricane 2 clock support"
+ depends on ARCH_BCM_HR2 || COMPILE_TEST
+ select COMMON_CLK_IPROC
+ default ARCH_BCM_HR2
+ help
+ Enable common clock framework support for the Broadcom Hurricane 2
+ SoC
+
config CLK_BCM_NSP
bool "Broadcom Northstar/Northstar Plus clock support"
depends on ARCH_BCM_5301X || ARCH_BCM_NSP || COMPILE_TEST
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
index a0c14fa4aa1e..002661d39128 100644
--- a/drivers/clk/bcm/Makefile
+++ b/drivers/clk/bcm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CLK_BCM_63XX) += clk-bcm63xx.o
obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o
obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o
@@ -8,6 +9,7 @@ obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835-aux.o
obj-$(CONFIG_ARCH_BCM_53573) += clk-bcm53573-ilp.o
obj-$(CONFIG_CLK_BCM_CYGNUS) += clk-cygnus.o
+obj-$(CONFIG_CLK_BCM_HR2) += clk-hr2.o
obj-$(CONFIG_CLK_BCM_NSP) += clk-nsp.o
obj-$(CONFIG_CLK_BCM_NS2) += clk-ns2.o
obj-$(CONFIG_CLK_BCM_SR) += clk-sr.o
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index bd750cf2238d..77e276d61702 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -14,7 +14,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clk/bcm2835.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/bcm2835-aux.h>
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 58ce6af8452d..44301a3d9963 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -37,7 +37,6 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
-#include <linux/clk/bcm2835.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -416,35 +415,6 @@ static int bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base,
return regdump ? 0 : -ENOMEM;
}
-/*
- * These are fixed clocks. They're probably not all root clocks and it may
- * be possible to turn them on and off but until this is mapped out better
- * it's the only way they can be used.
- */
-void __init bcm2835_init_clocks(void)
-{
- struct clk_hw *hw;
- int ret;
-
- hw = clk_hw_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 126000000);
- if (IS_ERR(hw))
- pr_err("apb_pclk not registered\n");
-
- hw = clk_hw_register_fixed_rate(NULL, "uart0_pclk", NULL, 0, 3000000);
- if (IS_ERR(hw))
- pr_err("uart0_pclk not registered\n");
- ret = clk_hw_register_clkdev(hw, NULL, "20201000.uart");
- if (ret)
- pr_err("uart0_pclk alias not registered\n");
-
- hw = clk_hw_register_fixed_rate(NULL, "uart1_pclk", NULL, 0, 125000000);
- if (IS_ERR(hw))
- pr_err("uart1_pclk not registered\n");
- ret = clk_hw_register_clkdev(hw, NULL, "20215000.uart");
- if (ret)
- pr_err("uart1_pclk alias not registered\n");
-}
-
struct bcm2835_pll_data {
const char *name;
u32 cm_ctrl_reg;
diff --git a/drivers/clk/bcm/clk-hr2.c b/drivers/clk/bcm/clk-hr2.c
new file mode 100644
index 000000000000..f7c5b7379475
--- /dev/null
+++ b/drivers/clk/bcm/clk-hr2.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk-iproc.h"
+
+static void __init hr2_armpll_init(struct device_node *node)
+{
+ iproc_armpll_setup(node);
+}
+CLK_OF_DECLARE(hr2_armpll, "brcm,hr2-armpll", hr2_armpll_init);
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index c37a7f0e83aa..281f4322355c 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -579,18 +579,13 @@ static u32 *parent_process(const char *clocks[],
*/
parent_names = kmalloc_array(parent_count, sizeof(*parent_names),
GFP_KERNEL);
- if (!parent_names) {
- pr_err("%s: error allocating %u parent names\n", __func__,
- parent_count);
+ if (!parent_names)
return ERR_PTR(-ENOMEM);
- }
/* There is at least one parent, so allocate a selector array */
parent_sel = kmalloc_array(parent_count, sizeof(*parent_sel),
GFP_KERNEL);
if (!parent_sel) {
- pr_err("%s: error allocating %u parent selectors\n", __func__,
- parent_count);
kfree(parent_names);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index c933be01c7db..0a7e7d5a7506 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -665,7 +665,7 @@ static int cdce925_probe(struct i2c_client *client,
init.ops = &cdce925_pll_ops;
init.flags = 0;
init.parent_names = &parent_name;
- init.num_parents = parent_name ? 1 : 0;
+ init.num_parents = 1;
/* Register PLL clocks */
for (i = 0; i < data->chip_info->num_plls; ++i) {
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index f940e5af845b..5e66e6c0205e 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cortina Gemini SoC Clock Controller driver
* Copyright (c) 2017 Linus Walleij <linus.walleij@linaro.org>
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 86b245746a6b..151513c655c3 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -15,9 +15,7 @@
#include <linux/clk-provider.h>
#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -95,14 +93,12 @@ const struct clk_ops clk_gpio_mux_ops = {
EXPORT_SYMBOL_GPL(clk_gpio_mux_ops);
static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags,
- const struct clk_ops *clk_gpio_ops)
+ const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+ unsigned long flags, const struct clk_ops *clk_gpio_ops)
{
struct clk_gpio *clk_gpio;
struct clk_hw *hw;
struct clk_init_data init = {};
- unsigned long gpio_flags;
int err;
if (dev)
@@ -113,32 +109,13 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
if (!clk_gpio)
return ERR_PTR(-ENOMEM);
- if (active_low)
- gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH;
- else
- gpio_flags = GPIOF_OUT_INIT_LOW;
-
- if (dev)
- err = devm_gpio_request_one(dev, gpio, gpio_flags, name);
- else
- err = gpio_request_one(gpio, gpio_flags, name);
- if (err) {
- if (err != -EPROBE_DEFER)
- pr_err("%s: %s: Error requesting clock control gpio %u\n",
- __func__, name, gpio);
- if (!dev)
- kfree(clk_gpio);
-
- return ERR_PTR(err);
- }
-
init.name = name;
init.ops = clk_gpio_ops;
init.flags = flags | CLK_IS_BASIC;
init.parent_names = parent_names;
init.num_parents = num_parents;
- clk_gpio->gpiod = gpio_to_desc(gpio);
+ clk_gpio->gpiod = gpiod;
clk_gpio->hw.init = &init;
hw = &clk_gpio->hw;
@@ -151,7 +128,6 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
return hw;
if (!dev) {
- gpiod_put(clk_gpio->gpiod);
kfree(clk_gpio);
}
@@ -164,29 +140,27 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_name: name of this clock's parent
- * @gpio: gpio number to gate this clock
- * @active_low: true if gpio should be set to 0 to enable clock
+ * @gpiod: gpio descriptor to gate this clock
* @flags: clock flags
*/
struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned gpio, bool active_low,
+ const char *parent_name, struct gpio_desc *gpiod,
unsigned long flags)
{
return clk_register_gpio(dev, name,
(parent_name ? &parent_name : NULL),
- (parent_name ? 1 : 0), gpio, active_low, flags,
+ (parent_name ? 1 : 0), gpiod, flags,
&clk_gpio_gate_ops);
}
EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned gpio, bool active_low,
+ const char *parent_name, struct gpio_desc *gpiod,
unsigned long flags)
{
struct clk_hw *hw;
- hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpio, active_low,
- flags);
+ hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpiod, flags);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
@@ -199,13 +173,12 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
* @name: name of this clock
* @parent_names: names of this clock's parents
* @num_parents: number of parents listed in @parent_names
- * @gpio: gpio number to gate this clock
- * @active_low: true if gpio should be set to 0 to enable clock
+ * @gpiod: gpio descriptor to gate this clock
* @flags: clock flags
*/
struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags)
+ const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+ unsigned long flags)
{
if (num_parents != 2) {
pr_err("mux-clock %s must have 2 parents\n", name);
@@ -213,18 +186,18 @@ struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
}
return clk_register_gpio(dev, name, parent_names, num_parents,
- gpio, active_low, flags, &clk_gpio_mux_ops);
+ gpiod, flags, &clk_gpio_mux_ops);
}
EXPORT_SYMBOL_GPL(clk_hw_register_gpio_mux);
struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low, unsigned long flags)
+ const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+ unsigned long flags)
{
struct clk_hw *hw;
hw = clk_hw_register_gpio_mux(dev, name, parent_names, num_parents,
- gpio, active_low, flags);
+ gpiod, flags);
if (IS_ERR(hw))
return ERR_CAST(hw);
return hw->clk;
@@ -236,10 +209,10 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
const char **parent_names, *gpio_name;
unsigned int num_parents;
- int gpio;
- enum of_gpio_flags of_flags;
+ struct gpio_desc *gpiod;
struct clk *clk;
- bool active_low, is_mux;
+ bool is_mux;
+ int ret;
num_parents = of_clk_get_parent_count(node);
if (num_parents) {
@@ -255,28 +228,27 @@ static int gpio_clk_driver_probe(struct platform_device *pdev)
is_mux = of_device_is_compatible(node, "gpio-mux-clock");
- gpio_name = is_mux ? "select-gpios" : "enable-gpios";
- gpio = of_get_named_gpio_flags(node, gpio_name, 0, &of_flags);
- if (gpio < 0) {
- if (gpio == -EPROBE_DEFER)
+ gpio_name = is_mux ? "select" : "enable";
+ gpiod = devm_gpiod_get(&pdev->dev, gpio_name, GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret == -EPROBE_DEFER)
pr_debug("%s: %s: GPIOs not yet available, retry later\n",
node->name, __func__);
else
- pr_err("%s: %s: Can't get '%s' DT property\n",
+ pr_err("%s: %s: Can't get '%s' named GPIO property\n",
node->name, __func__,
gpio_name);
- return gpio;
+ return ret;
}
- active_low = of_flags & OF_GPIO_ACTIVE_LOW;
-
if (is_mux)
clk = clk_register_gpio_mux(&pdev->dev, node->name,
- parent_names, num_parents, gpio, active_low, 0);
+ parent_names, num_parents, gpiod, 0);
else
clk = clk_register_gpio_gate(&pdev->dev, node->name,
- parent_names ? parent_names[0] : NULL, gpio,
- active_low, 0);
+ parent_names ? parent_names[0] : NULL, gpiod,
+ 0);
if (IS_ERR(clk))
return PTR_ERR(clk);
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index bbf237173b37..c4ee280f454d 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -139,7 +139,7 @@ static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
- dev_dbg(clk->dev, "write configurarion: %#x\n", val);
+ dev_dbg(clk->dev, "write configuration: %#x\n", val);
hsdk_pll_write(clk, CGU_PLL_CTRL, val);
}
@@ -169,7 +169,7 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
val = hsdk_pll_read(clk, CGU_PLL_CTRL);
- dev_dbg(clk->dev, "current configurarion: %#x\n", val);
+ dev_dbg(clk->dev, "current configuration: %#x\n", val);
/* Check if PLL is disabled */
if (val & CGU_PLL_CTRL_PD)
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 16a3d5717f4e..39cabe157163 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -134,11 +134,9 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
}
/* allocate the mux */
- mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
- if (!mux) {
- pr_err("%s: could not allocate mux clk\n", __func__);
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
if (clk_mux_flags & CLK_MUX_READ_ONLY)
diff --git a/drivers/clk/clk-stm32h7.c b/drivers/clk/clk-stm32h7.c
index a94c3f56c590..61c3e40507d3 100644
--- a/drivers/clk/clk-stm32h7.c
+++ b/drivers/clk/clk-stm32h7.c
@@ -384,7 +384,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
mux_ops = div_ops = gate_ops = NULL;
mux_hw = div_hw = gate_hw = NULL;
- if (gcfg->mux && gcfg->mux) {
+ if (gcfg->mux && cfg->mux) {
mux = _get_cmux(base + cfg->mux->offset,
cfg->mux->shift,
cfg->mux->width,
@@ -410,7 +410,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
}
}
- if (gcfg->gate && gcfg->gate) {
+ if (gcfg->gate && cfg->gate) {
gate = _get_cgate(base + cfg->gate->offset,
cfg->gate->bit_idx,
gcfg->gate->flags, lock);
diff --git a/drivers/clk/clk-tango4.c b/drivers/clk/clk-tango4.c
index eef75e305a59..34b22b7930fb 100644
--- a/drivers/clk/clk-tango4.c
+++ b/drivers/clk/clk-tango4.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index 7b222a5db931..25dfe050ae9f 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -82,7 +82,7 @@ static const struct clk_ops twl6040_pdmclk_ops = {
.recalc_rate = twl6040_pdmclk_recalc_rate,
};
-static struct clk_init_data twl6040_pdmclk_init = {
+static const struct clk_init_data twl6040_pdmclk_init = {
.name = "pdmclk",
.ops = &twl6040_pdmclk_ops,
.flags = CLK_GET_RATE_NOCACHE,
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
index ec8aafda6e24..7b3e1921771f 100644
--- a/drivers/clk/clk-u300.c
+++ b/drivers/clk/clk-u300.c
@@ -229,15 +229,15 @@
#define U300_SYSCON_S0CCR_CLOCK_FREQ_MASK (0x01E0)
#define U300_SYSCON_S0CCR_CLOCK_SELECT_MASK (0x001E)
#define U300_SYSCON_S0CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S0CCR_SEL_MCLK (0x8<<1)
-#define U300_SYSCON_S0CCR_SEL_ACC_FSM_CLK (0xA<<1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_48_CLK (0xC<<1)
-#define U300_SYSCON_S0CCR_SEL_PLL60_60_CLK (0xD<<1)
-#define U300_SYSCON_S0CCR_SEL_ACC_PLL208_CLK (0xE<<1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL13_CLK (0x0<<1)
-#define U300_SYSCON_S0CCR_SEL_APP_FSM_CLK (0x2<<1)
-#define U300_SYSCON_S0CCR_SEL_RTC_CLK (0x4<<1)
-#define U300_SYSCON_S0CCR_SEL_APP_PLL208_CLK (0x6<<1)
+#define U300_SYSCON_S0CCR_SEL_MCLK (0x8 << 1)
+#define U300_SYSCON_S0CCR_SEL_ACC_FSM_CLK (0xA << 1)
+#define U300_SYSCON_S0CCR_SEL_PLL60_48_CLK (0xC << 1)
+#define U300_SYSCON_S0CCR_SEL_PLL60_60_CLK (0xD << 1)
+#define U300_SYSCON_S0CCR_SEL_ACC_PLL208_CLK (0xE << 1)
+#define U300_SYSCON_S0CCR_SEL_APP_PLL13_CLK (0x0 << 1)
+#define U300_SYSCON_S0CCR_SEL_APP_FSM_CLK (0x2 << 1)
+#define U300_SYSCON_S0CCR_SEL_RTC_CLK (0x4 << 1)
+#define U300_SYSCON_S0CCR_SEL_APP_PLL208_CLK (0x6 << 1)
/* SYS_1_CLK_CONTROL second clock control 16 bit (R/W) */
#define U300_SYSCON_S1CCR (0x124)
#define U300_SYSCON_S1CCR_FIELD_MASK (0x43FF)
@@ -247,16 +247,16 @@
#define U300_SYSCON_S1CCR_CLOCK_FREQ_MASK (0x01E0)
#define U300_SYSCON_S1CCR_CLOCK_SELECT_MASK (0x001E)
#define U300_SYSCON_S1CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S1CCR_SEL_MCLK (0x8<<1)
-#define U300_SYSCON_S1CCR_SEL_ACC_FSM_CLK (0xA<<1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_48_CLK (0xC<<1)
-#define U300_SYSCON_S1CCR_SEL_PLL60_60_CLK (0xD<<1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL208_CLK (0xE<<1)
-#define U300_SYSCON_S1CCR_SEL_ACC_PLL13_CLK (0x0<<1)
-#define U300_SYSCON_S1CCR_SEL_APP_FSM_CLK (0x2<<1)
-#define U300_SYSCON_S1CCR_SEL_RTC_CLK (0x4<<1)
-#define U300_SYSCON_S1CCR_SEL_APP_PLL208_CLK (0x6<<1)
-/* SYS_2_CLK_CONTROL third clock contol 16 bit (R/W) */
+#define U300_SYSCON_S1CCR_SEL_MCLK (0x8 << 1)
+#define U300_SYSCON_S1CCR_SEL_ACC_FSM_CLK (0xA << 1)
+#define U300_SYSCON_S1CCR_SEL_PLL60_48_CLK (0xC << 1)
+#define U300_SYSCON_S1CCR_SEL_PLL60_60_CLK (0xD << 1)
+#define U300_SYSCON_S1CCR_SEL_ACC_PLL208_CLK (0xE << 1)
+#define U300_SYSCON_S1CCR_SEL_ACC_PLL13_CLK (0x0 << 1)
+#define U300_SYSCON_S1CCR_SEL_APP_FSM_CLK (0x2 << 1)
+#define U300_SYSCON_S1CCR_SEL_RTC_CLK (0x4 << 1)
+#define U300_SYSCON_S1CCR_SEL_APP_PLL208_CLK (0x6 << 1)
+/* SYS_2_CLK_CONTROL third clock control 16 bit (R/W) */
#define U300_SYSCON_S2CCR (0x128)
#define U300_SYSCON_S2CCR_FIELD_MASK (0xC3FF)
#define U300_SYSCON_S2CCR_CLK_STEAL (0x8000)
@@ -266,15 +266,15 @@
#define U300_SYSCON_S2CCR_CLOCK_FREQ_MASK (0x01E0)
#define U300_SYSCON_S2CCR_CLOCK_SELECT_MASK (0x001E)
#define U300_SYSCON_S2CCR_CLOCK_ENABLE (0x0001)
-#define U300_SYSCON_S2CCR_SEL_MCLK (0x8<<1)
-#define U300_SYSCON_S2CCR_SEL_ACC_FSM_CLK (0xA<<1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_48_CLK (0xC<<1)
-#define U300_SYSCON_S2CCR_SEL_PLL60_60_CLK (0xD<<1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL208_CLK (0xE<<1)
-#define U300_SYSCON_S2CCR_SEL_ACC_PLL13_CLK (0x0<<1)
-#define U300_SYSCON_S2CCR_SEL_APP_FSM_CLK (0x2<<1)
-#define U300_SYSCON_S2CCR_SEL_RTC_CLK (0x4<<1)
-#define U300_SYSCON_S2CCR_SEL_APP_PLL208_CLK (0x6<<1)
+#define U300_SYSCON_S2CCR_SEL_MCLK (0x8 << 1)
+#define U300_SYSCON_S2CCR_SEL_ACC_FSM_CLK (0xA << 1)
+#define U300_SYSCON_S2CCR_SEL_PLL60_48_CLK (0xC << 1)
+#define U300_SYSCON_S2CCR_SEL_PLL60_60_CLK (0xD << 1)
+#define U300_SYSCON_S2CCR_SEL_ACC_PLL208_CLK (0xE << 1)
+#define U300_SYSCON_S2CCR_SEL_ACC_PLL13_CLK (0x0 << 1)
+#define U300_SYSCON_S2CCR_SEL_APP_FSM_CLK (0x2 << 1)
+#define U300_SYSCON_S2CCR_SEL_RTC_CLK (0x4 << 1)
+#define U300_SYSCON_S2CCR_SEL_APP_PLL208_CLK (0x6 << 1)
/* SC_PLL_IRQ_CONTROL 16bit (R/W) */
#define U300_SYSCON_PICR (0x0130)
#define U300_SYSCON_PICR_MASK (0x00FF)
@@ -378,7 +378,7 @@
* +- ISP Image Signal Processor (U335 only)
* +- CDS (U335 only)
* +- DMA Direct Memory Access Controller
- * +- AAIF APP/ACC Inteface (Mobile Scalable Link, MSL)
+ * +- AAIF APP/ACC Interface (Mobile Scalable Link, MSL)
* +- APEX
* +- VIDEO_ENC AVE2/3 Video Encoder
* +- XGAM Graphics Accelerator Controller
@@ -568,14 +568,14 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
struct clk_syscon *sclk = to_syscon(hw);
u16 perf = syscon_get_perf();
- switch(sclk->clk_val) {
+ switch (sclk->clk_val) {
case U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN:
case U300_SYSCON_SBCER_I2C0_CLK_EN:
case U300_SYSCON_SBCER_I2C1_CLK_EN:
case U300_SYSCON_SBCER_MMC_CLK_EN:
case U300_SYSCON_SBCER_SPI_CLK_EN:
/* The FAST clocks have one progression */
- switch(perf) {
+ switch (perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 13000000;
@@ -586,7 +586,7 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
case U300_SYSCON_SBCER_NANDIF_CLK_EN:
case U300_SYSCON_SBCER_XGAM_CLK_EN:
/* AMBA interconnect peripherals */
- switch(perf) {
+ switch (perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 6500000;
@@ -598,7 +598,7 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
case U300_SYSCON_SBCER_SEMI_CLK_EN:
case U300_SYSCON_SBCER_EMIF_CLK_EN:
/* EMIF speeds */
- switch(perf) {
+ switch (perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 13000000;
@@ -609,7 +609,7 @@ syscon_clk_recalc_rate(struct clk_hw *hw,
}
case U300_SYSCON_SBCER_CPU_CLK_EN:
/* And the fast CPU clock */
- switch(perf) {
+ switch (perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 13000000;
@@ -702,12 +702,10 @@ syscon_clk_register(struct device *dev, const char *name,
struct clk_init_data init;
int ret;
- sclk = kzalloc(sizeof(struct clk_syscon), GFP_KERNEL);
- if (!sclk) {
- pr_err("could not allocate syscon clock %s\n",
- name);
+ sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
+ if (!sclk)
return ERR_PTR(-ENOMEM);
- }
+
init.name = name;
init.ops = &syscon_clk_ops;
init.flags = flags;
@@ -1123,12 +1121,10 @@ mclk_clk_register(struct device *dev, const char *name,
struct clk_init_data init;
int ret;
- mclk = kzalloc(sizeof(struct clk_mclk), GFP_KERNEL);
- if (!mclk) {
- pr_err("could not allocate MMC/SD clock %s\n",
- name);
+ mclk = kzalloc(sizeof(*mclk), GFP_KERNEL);
+ if (!mclk)
return ERR_PTR(-ENOMEM);
- }
+
init.name = "mclk";
init.ops = &mclk_ops;
init.flags = 0;
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index a47960aacfa5..146769532325 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -52,7 +52,7 @@ static const struct clk_ops wm831x_xtal_ops = {
.recalc_rate = wm831x_xtal_recalc_rate,
};
-static struct clk_init_data wm831x_xtal_init = {
+static const struct clk_init_data wm831x_xtal_init = {
.name = "xtal",
.ops = &wm831x_xtal_ops,
};
@@ -225,7 +225,7 @@ static const struct clk_ops wm831x_fll_ops = {
.get_parent = wm831x_fll_get_parent,
};
-static struct clk_init_data wm831x_fll_init = {
+static const struct clk_init_data wm831x_fll_init = {
.name = "fll",
.ops = &wm831x_fll_ops,
.parent_names = wm831x_fll_parents,
@@ -338,7 +338,7 @@ static const struct clk_ops wm831x_clkout_ops = {
.set_parent = wm831x_clkout_set_parent,
};
-static struct clk_init_data wm831x_clkout_init = {
+static const struct clk_init_data wm831x_clkout_init = {
.name = "clkout",
.ops = &wm831x_clkout_ops,
.parent_names = wm831x_clkout_parents,
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 4c75821a3933..531b030d4d4e 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -146,10 +146,8 @@ static struct clk *xgene_register_clk_pll(struct device *dev,
/* allocate the APM clock structure */
apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
- if (!apmclk) {
- pr_err("%s: could not allocate APM clk\n", __func__);
+ if (!apmclk)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &xgene_clk_pll_ops;
@@ -191,7 +189,7 @@ static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_ty
int version = xgene_pllclk_version(np);
reg = of_iomap(np, 0);
- if (reg == NULL) {
+ if (!reg) {
pr_err("Unable to map CSR register for %pOF\n", np);
return;
}
@@ -467,7 +465,7 @@ static int xgene_clk_enable(struct clk_hw *hw)
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
- if (pclk->param.csr_reg != NULL) {
+ if (pclk->param.csr_reg) {
pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
/* First enable the clock */
data = xgene_clk_read(pclk->param.csr_reg +
@@ -507,7 +505,7 @@ static void xgene_clk_disable(struct clk_hw *hw)
if (pclk->lock)
spin_lock_irqsave(pclk->lock, flags);
- if (pclk->param.csr_reg != NULL) {
+ if (pclk->param.csr_reg) {
pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
/* First put the CSR in reset */
data = xgene_clk_read(pclk->param.csr_reg +
@@ -533,7 +531,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
struct xgene_clk *pclk = to_xgene_clk(hw);
u32 data = 0;
- if (pclk->param.csr_reg != NULL) {
+ if (pclk->param.csr_reg) {
pr_debug("%s clock checking\n", clk_hw_get_name(hw));
data = xgene_clk_read(pclk->param.csr_reg +
pclk->param.reg_clk_offset);
@@ -542,7 +540,7 @@ static int xgene_clk_is_enabled(struct clk_hw *hw)
"disabled");
}
- if (pclk->param.csr_reg == NULL)
+ if (!pclk->param.csr_reg)
return 1;
return data & pclk->param.reg_clk_mask ? 1 : 0;
}
@@ -650,10 +648,8 @@ static struct clk *xgene_register_clk(struct device *dev,
/* allocate the APM clock structure */
apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
- if (!apmclk) {
- pr_err("%s: could not allocate APM clk\n", __func__);
+ if (!apmclk)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &xgene_clk_ops;
@@ -709,7 +705,7 @@ static void __init xgene_devclk_init(struct device_node *np)
break;
}
map_res = of_iomap(np, i);
- if (map_res == NULL) {
+ if (!map_res) {
pr_err("Unable to map resource %d for %pOF\n", i, np);
goto err;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c8d83acda006..647d056df88c 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/clkdev.h>
@@ -46,6 +47,7 @@ struct clk_core {
const struct clk_ops *ops;
struct clk_hw *hw;
struct module *owner;
+ struct device *dev;
struct clk_core *parent;
const char **parent_names;
struct clk_core **parents;
@@ -87,6 +89,26 @@ struct clk {
struct hlist_node clks_node;
};
+/*** runtime pm ***/
+static int clk_pm_runtime_get(struct clk_core *core)
+{
+ int ret = 0;
+
+ if (!core->dev)
+ return 0;
+
+ ret = pm_runtime_get_sync(core->dev);
+ return ret < 0 ? ret : 0;
+}
+
+static void clk_pm_runtime_put(struct clk_core *core)
+{
+ if (!core->dev)
+ return;
+
+ pm_runtime_put_sync(core->dev);
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -150,6 +172,8 @@ static void clk_enable_unlock(unsigned long flags)
static bool clk_core_is_prepared(struct clk_core *core)
{
+ bool ret = false;
+
/*
* .is_prepared is optional for clocks that can prepare
* fall back to software usage counter if it is missing
@@ -157,11 +181,18 @@ static bool clk_core_is_prepared(struct clk_core *core)
if (!core->ops->is_prepared)
return core->prepare_count;
- return core->ops->is_prepared(core->hw);
+ if (!clk_pm_runtime_get(core)) {
+ ret = core->ops->is_prepared(core->hw);
+ clk_pm_runtime_put(core);
+ }
+
+ return ret;
}
static bool clk_core_is_enabled(struct clk_core *core)
{
+ bool ret = false;
+
/*
* .is_enabled is only mandatory for clocks that gate
* fall back to software usage counter if .is_enabled is missing
@@ -169,7 +200,29 @@ static bool clk_core_is_enabled(struct clk_core *core)
if (!core->ops->is_enabled)
return core->enable_count;
- return core->ops->is_enabled(core->hw);
+ /*
+ * Check if clock controller's device is runtime active before
+ * calling .is_enabled callback. If not, assume that clock is
+ * disabled, because we might be called from atomic context, from
+ * which pm_runtime_get() is not allowed.
+ * This function is called mainly from clk_disable_unused_subtree,
+ * which ensures proper runtime pm activation of controller before
+ * taking enable spinlock, but the below check is needed if one tries
+ * to call it from other places.
+ */
+ if (core->dev) {
+ pm_runtime_get_noresume(core->dev);
+ if (!pm_runtime_active(core->dev)) {
+ ret = false;
+ goto done;
+ }
+ }
+
+ ret = core->ops->is_enabled(core->hw);
+done:
+ clk_pm_runtime_put(core);
+
+ return ret;
}
/*** helper functions ***/
@@ -489,6 +542,8 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
+ clk_pm_runtime_put(core);
+
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
}
@@ -530,10 +585,14 @@ static int clk_core_prepare(struct clk_core *core)
return 0;
if (core->prepare_count == 0) {
- ret = clk_core_prepare(core->parent);
+ ret = clk_pm_runtime_get(core);
if (ret)
return ret;
+ ret = clk_core_prepare(core->parent);
+ if (ret)
+ goto runtime_put;
+
trace_clk_prepare(core);
if (core->ops->prepare)
@@ -541,15 +600,18 @@ static int clk_core_prepare(struct clk_core *core)
trace_clk_prepare_complete(core);
- if (ret) {
- clk_core_unprepare(core->parent);
- return ret;
- }
+ if (ret)
+ goto unprepare;
}
core->prepare_count++;
return 0;
+unprepare:
+ clk_core_unprepare(core->parent);
+runtime_put:
+ clk_pm_runtime_put(core);
+ return ret;
}
static int clk_core_prepare_lock(struct clk_core *core)
@@ -745,6 +807,9 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
+ if (clk_pm_runtime_get(core))
+ return;
+
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -753,6 +818,8 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
+
+ clk_pm_runtime_put(core);
}
static void clk_disable_unused_subtree(struct clk_core *core)
@@ -768,6 +835,9 @@ static void clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
+ if (clk_pm_runtime_get(core))
+ goto unprepare_out;
+
flags = clk_enable_lock();
if (core->enable_count)
@@ -792,6 +862,8 @@ static void clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
+ clk_pm_runtime_put(core);
+unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1038,9 +1110,13 @@ EXPORT_SYMBOL_GPL(clk_get_accuracy);
static unsigned long clk_recalc(struct clk_core *core,
unsigned long parent_rate)
{
- if (core->ops->recalc_rate)
- return core->ops->recalc_rate(core->hw, parent_rate);
- return parent_rate;
+ unsigned long rate = parent_rate;
+
+ if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
+ rate = core->ops->recalc_rate(core->hw, parent_rate);
+ clk_pm_runtime_put(core);
+ }
+ return rate;
}
/**
@@ -1565,6 +1641,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
{
struct clk_core *top, *fail_clk;
unsigned long rate = req_rate;
+ int ret = 0;
if (!core)
return 0;
@@ -1581,21 +1658,28 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
if (!top)
return -EINVAL;
+ ret = clk_pm_runtime_get(core);
+ if (ret)
+ return ret;
+
/* notify that we are about to change rates */
fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
if (fail_clk) {
pr_debug("%s: failed to set %s rate\n", __func__,
fail_clk->name);
clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err;
}
/* change the rates */
clk_change_rate(top);
core->req_rate = req_rate;
+err:
+ clk_pm_runtime_put(core);
- return 0;
+ return ret;
}
/**
@@ -1826,12 +1910,16 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
p_rate = parent->rate;
}
+ ret = clk_pm_runtime_get(core);
+ if (ret)
+ goto out;
+
/* propagate PRE_RATE_CHANGE notifications */
ret = __clk_speculate_rates(core, p_rate);
/* abort if a driver objects */
if (ret & NOTIFY_STOP_MASK)
- goto out;
+ goto runtime_put;
/* do the re-parent */
ret = __clk_set_parent(core, parent, p_index);
@@ -1844,6 +1932,8 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
__clk_recalc_accuracies(core);
}
+runtime_put:
+ clk_pm_runtime_put(core);
out:
clk_prepare_unlock();
@@ -2350,7 +2440,7 @@ static inline void clk_debug_unregister(struct clk_core *core)
*/
static int __clk_core_init(struct clk_core *core)
{
- int i, ret = 0;
+ int i, ret;
struct clk_core *orphan;
struct hlist_node *tmp2;
unsigned long rate;
@@ -2360,6 +2450,10 @@ static int __clk_core_init(struct clk_core *core)
clk_prepare_lock();
+ ret = clk_pm_runtime_get(core);
+ if (ret)
+ goto unlock;
+
/* check to see if a clock with this name is already registered */
if (clk_core_lookup(core->name)) {
pr_debug("%s: clk %s already initialized\n",
@@ -2512,6 +2606,8 @@ static int __clk_core_init(struct clk_core *core)
kref_init(&core->ref);
out:
+ clk_pm_runtime_put(core);
+unlock:
clk_prepare_unlock();
if (!ret)
@@ -2583,6 +2679,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
goto fail_name;
}
core->ops = hw->init->ops;
+ if (dev && pm_runtime_enabled(dev))
+ core->dev = dev;
if (dev && dev->driver)
core->owner = dev->driver->owner;
core->hw = hw;
@@ -3177,6 +3275,37 @@ int of_clk_add_hw_provider(struct device_node *np,
}
EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
+static void devm_of_clk_release_provider(struct device *dev, void *res)
+{
+ of_clk_del_provider(*(struct device_node **)res);
+}
+
+int devm_of_clk_add_hw_provider(struct device *dev,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data)
+{
+ struct device_node **ptr, *np;
+ int ret;
+
+ ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ np = dev->of_node;
+ ret = of_clk_add_hw_provider(np, get, data);
+ if (!ret) {
+ *ptr = np;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
+
/**
* of_clk_del_provider() - Remove a previously registered clock provider
* @np: Device node pointer associated with clock provider
@@ -3198,6 +3327,27 @@ void of_clk_del_provider(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
+static int devm_clk_provider_match(struct device *dev, void *res, void *data)
+{
+ struct device_node **np = res;
+
+ if (WARN_ON(!np || !*np))
+ return 0;
+
+ return *np == data;
+}
+
+void devm_of_clk_del_provider(struct device *dev)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_of_clk_release_provider,
+ devm_clk_provider_match, dev->of_node);
+
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_of_clk_del_provider);
+
static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
struct of_phandle_args *clkspec)
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index 715b882205a8..4ae624425e9d 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* H8/300 divide clock driver
*
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index a26312460621..fc24b0b55a3d 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* H8S2678 clock driver
*
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 1e4c3ddbad84..0e55612112af 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Hisilicon Clock specific Makefile
#
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index fa0fba653898..77072c7778b9 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -415,7 +415,7 @@ static int mmc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return mmc_clk_set_timing(hw, rate);
}
-static struct clk_ops clk_mmc_ops = {
+static const struct clk_ops clk_mmc_ops = {
.prepare = mmc_clk_prepare,
.determine_rate = mmc_clk_determine_rate,
.set_rate = mmc_clk_set_rate,
diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
index a18258eb89cb..f40419959656 100644
--- a/drivers/clk/hisilicon/clk-hi3660.c
+++ b/drivers/clk/hisilicon/clk-hi3660.c
@@ -34,7 +34,7 @@ static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = {
/* crgctrl */
static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = {
- { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 8, 0, },
+ { HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 16, 0, },
{ HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, },
{ HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, },
{ HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, },
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index e786d717f75d..a87809d4bd52 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -145,7 +145,7 @@ static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = {
{ HI6220_BBPPLL_SEL, "bbppll_sel", "pll0_bbp_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9, 0, },
{ HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, },
{ HI6220_MMC2_SEL, "mmc2_sel", "mmc2_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, },
- { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, },
+ { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IS_CRITICAL, 0x270, 12, 0, },
};
static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = {
diff --git a/drivers/clk/hisilicon/clk-hix5hd2.c b/drivers/clk/hisilicon/clk-hix5hd2.c
index 14b05efa3c2a..9584f0c32dda 100644
--- a/drivers/clk/hisilicon/clk-hix5hd2.c
+++ b/drivers/clk/hisilicon/clk-hix5hd2.c
@@ -208,7 +208,7 @@ static void clk_ether_unprepare(struct clk_hw *hw)
writel_relaxed(val, clk->ctrl_reg);
}
-static struct clk_ops clk_ether_ops = {
+static const struct clk_ops clk_ether_ops = {
.prepare = clk_ether_prepare,
.unprepare = clk_ether_unprepare,
};
@@ -247,7 +247,7 @@ static void clk_complex_disable(struct clk_hw *hw)
writel_relaxed(val, clk->phy_reg);
}
-static struct clk_ops clk_complex_ops = {
+static const struct clk_ops clk_complex_ops = {
.enable = clk_complex_enable,
.disable = clk_complex_disable,
};
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index 7908bc3c9ec7..f36bdef91831 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -88,7 +88,7 @@ static int clkgate_separated_is_enabled(struct clk_hw *hw)
return reg ? 1 : 0;
}
-static struct clk_ops clkgate_separated_ops = {
+static const struct clk_ops clkgate_separated_ops = {
.enable = clkgate_separated_enable,
.disable = clkgate_separated_disable,
.is_enabled = clkgate_separated_is_enabled,
@@ -105,10 +105,8 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
struct clk_init_data init;
sclk = kzalloc(sizeof(*sclk), GFP_KERNEL);
- if (!sclk) {
- pr_err("%s: fail to allocate separated gated clk\n", __func__);
+ if (!sclk)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &clkgate_separated_ops;
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index ed8bb5f7507f..8478948e858e 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -47,6 +47,8 @@
#define HI3798CV200_FIXED_12M 81
#define HI3798CV200_FIXED_48M 82
#define HI3798CV200_FIXED_60M 83
+#define HI3798CV200_FIXED_166P5M 84
+#define HI3798CV200_SDIO0_MUX 85
#define HI3798CV200_CRG_NR_CLKS 128
@@ -63,6 +65,7 @@ static const struct hisi_fixed_rate_clock hi3798cv200_fixed_rate_clks[] = {
{ HI3798CV200_FIXED_75M, "75m", NULL, 0, 75000000, },
{ HI3798CV200_FIXED_100M, "100m", NULL, 0, 100000000, },
{ HI3798CV200_FIXED_150M, "150m", NULL, 0, 150000000, },
+ { HI3798CV200_FIXED_166P5M, "166p5m", NULL, 0, 165000000, },
{ HI3798CV200_FIXED_200M, "200m", NULL, 0, 200000000, },
{ HI3798CV200_FIXED_250M, "250m", NULL, 0, 250000000, },
};
@@ -75,12 +78,19 @@ static const char *const comphy1_mux_p[] = {
"100m", "25m"};
static u32 comphy1_mux_table[] = {2, 3};
+static const char *const sdio_mux_p[] = {
+ "100m", "50m", "150m", "166p5m" };
+static u32 sdio_mux_table[] = {0, 1, 2, 3};
+
static struct hisi_mux_clock hi3798cv200_mux_clks[] = {
{ HI3798CV200_MMC_MUX, "mmc_mux", mmc_mux_p, ARRAY_SIZE(mmc_mux_p),
CLK_SET_RATE_PARENT, 0xa0, 8, 3, 0, mmc_mux_table, },
{ HI3798CV200_COMBPHY1_MUX, "combphy1_mux",
comphy1_mux_p, ARRAY_SIZE(comphy1_mux_p),
CLK_SET_RATE_PARENT, 0x188, 10, 2, 0, comphy1_mux_table, },
+ { HI3798CV200_SDIO0_MUX, "sdio0_mux", sdio_mux_p,
+ ARRAY_SIZE(sdio_mux_p), CLK_SET_RATE_PARENT,
+ 0x9c, 8, 2, 0, sdio_mux_table, },
};
static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
@@ -104,7 +114,7 @@ static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
/* SDIO */
{ HISTB_SDIO0_BIU_CLK, "clk_sdio0_biu", "200m",
CLK_SET_RATE_PARENT, 0x9c, 0, 0, },
- { HISTB_SDIO0_CIU_CLK, "clk_sdio0_ciu", "mmc_mux",
+ { HISTB_SDIO0_CIU_CLK, "clk_sdio0_ciu", "sdio0_mux",
CLK_SET_RATE_PARENT, 0x9c, 1, 0, },
/* EMMC */
{ HISTB_MMC_BIU_CLK, "clk_mmc_biu", "200m",
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
index f18f10351785..15af423cc0c9 100644
--- a/drivers/clk/imgtec/clk-boston.c
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2016-2017 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
+ * Author: Paul Burton <paul.burton@mips.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 1ada68abb158..f91f2b2e11cd 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += \
clk.o \
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 5cc99590f9a3..6df3389687bc 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -72,7 +72,7 @@ static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static struct clk_ops clk_busy_divider_ops = {
+static const struct clk_ops clk_busy_divider_ops = {
.recalc_rate = clk_busy_divider_recalc_rate,
.round_rate = clk_busy_divider_round_rate,
.set_rate = clk_busy_divider_set_rate,
@@ -147,7 +147,7 @@ static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
return ret;
}
-static struct clk_ops clk_busy_mux_ops = {
+static const struct clk_ops clk_busy_mux_ops = {
.get_parent = clk_busy_mux_get_parent,
.set_parent = clk_busy_mux_set_parent,
};
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index db44a198a0d9..60fc9d7a9723 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -118,7 +118,7 @@ static void clk_gate2_disable_unused(struct clk_hw *hw)
spin_unlock_irqrestore(gate->lock, flags);
}
-static struct clk_ops clk_gate2_ops = {
+static const struct clk_ops clk_gate2_ops = {
.enable = clk_gate2_enable,
.disable = clk_gate2_disable,
.disable_unused = clk_gate2_disable_unused,
diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
index cf5cf75a4848..0a0ab95d16fe 100644
--- a/drivers/clk/imx/clk-imx27.c
+++ b/drivers/clk/imx/clk-imx27.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index c07df719b8a3..8d518ad5dc13 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -761,7 +761,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
- clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4);
+ clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4);
clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 5e8c18afce9a..85c118164469 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -267,7 +267,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6ULL_CLK_EPDC_SEL] = imx_clk_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels));
}
clks[IMX6UL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
- clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
+ clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux_flags("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels), CLK_SET_RATE_PARENT);
clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
clks[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 2305699db467..80dc211eb74b 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -54,11 +54,6 @@ static const char *arm_m4_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
"pll_usb_main_clk", };
-static const char *arm_m0_sel[] = { "osc", "pll_sys_main_120m_clk",
- "pll_enet_125m_clk", "pll_sys_pfd2_135m_clk",
- "pll_dram_533m_clk", "pll_audio_post_div", "pll_video_main_clk",
- "pll_usb_main_clk", };
-
static const char *axi_sel[] = { "osc", "pll_sys_pfd1_332m_clk",
"pll_dram_533m_clk", "pll_enet_250m_clk", "pll_sys_pfd5_clk",
"pll_audio_post_div", "pll_video_main_clk", "pll_sys_pfd7_clk", };
@@ -510,7 +505,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_mux2("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel));
clks[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel));
- clks[IMX7D_ARM_M0_ROOT_SRC] = imx_clk_mux2("arm_m0_src", base + 0x8100, 24, 3, arm_m0_sel, ARRAY_SIZE(arm_m0_sel));
clks[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_mux2("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel));
clks[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_mux2("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel));
clks[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_mux2("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel));
@@ -582,7 +576,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ARM_A7_ROOT_CG] = imx_clk_gate3("arm_a7_cg", "arm_a7_src", base + 0x8000, 28);
clks[IMX7D_ARM_M4_ROOT_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28);
- clks[IMX7D_ARM_M0_ROOT_CG] = imx_clk_gate3("arm_m0_cg", "arm_m0_src", base + 0x8100, 28);
clks[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_gate3("axi_cg", "axi_src", base + 0x8800, 28);
clks[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_gate3("disp_axi_cg", "disp_axi_src", base + 0x8880, 28);
clks[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_gate3("enet_axi_cg", "enet_axi_src", base + 0x8900, 28);
@@ -721,7 +714,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_divider2("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3);
clks[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3);
- clks[IMX7D_ARM_M0_ROOT_DIV] = imx_clk_divider2("arm_m0_div", "arm_m0_cg", base + 0x8100, 0, 3);
clks[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_divider2("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6);
clks[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_divider2("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6);
clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6);
@@ -793,11 +785,10 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate4("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0);
clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0);
- clks[IMX7D_ARM_M0_ROOT_CLK] = imx_clk_gate4("arm_m0_root_clk", "arm_m0_div", base + 0x4020, 0);
clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate4("main_axi_root_clk", "axi_post_div", base + 0x4040, 0);
clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0);
clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0);
- clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "axi_post_div", base + 0x4110, 0);
+ clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0);
clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0);
clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate4("dram_root_clk", "dram_post_div", base + 0x4130, 0);
clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate4("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0);
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index 82fe3662b5f6..4ba9973d4c18 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -106,7 +107,7 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
return ull;
}
-static struct clk_ops clk_pllv1_ops = {
+static const struct clk_ops clk_pllv1_ops = {
.recalc_rate = clk_pllv1_recalc_rate,
};
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index 4aeda56ce372..85b5cbe9744c 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -226,7 +227,7 @@ static void clk_pllv2_unprepare(struct clk_hw *hw)
__raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
}
-static struct clk_ops clk_pllv2_ops = {
+static const struct clk_ops clk_pllv2_ops = {
.prepare = clk_pllv2_prepare,
.unprepare = clk_pllv2_unprepare,
.recalc_rate = clk_pllv2_recalc_rate,
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index a634b1185be3..9074e6974b6d 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/of.h>
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index d54f0720afba..d69c4bbf3597 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MACH_IMX_CLK_H
#define __MACH_IMX_CLK_H
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index e8248f9185f7..ab393637f7b0 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -2,7 +2,7 @@
* Ingenic SoC CGU driver
*
* Copyright (c) 2013-2015 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
+ * Author: Paul Burton <paul.burton@mips.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 09700b2c555d..e78b586536ea 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -2,7 +2,7 @@
* Ingenic SoC CGU driver
*
* Copyright (c) 2013-2015 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
+ * Author: Paul Burton <paul.burton@mips.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 510fe7e0c8f1..32fcc75f6f77 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -2,7 +2,7 @@
* Ingenic JZ4740 SoC CGU driver
*
* Copyright (c) 2015 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
+ * Author: Paul Burton <paul.burton@mips.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index b35d6d9dd5aa..ac3585ed8228 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -2,7 +2,7 @@
* Ingenic JZ4780 SoC CGU driver
*
* Copyright (c) 2013-2015 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
+ * Author: Paul Burton <paul.burton@mips.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 28739a9a6e37..59dc0aad553c 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -50,6 +50,56 @@ config COMMON_CLK_MT2701_BDPSYS
---help---
This driver supports Mediatek MT2701 bdpsys clocks.
+config COMMON_CLK_MT2712
+ bool "Clock driver for Mediatek MT2712"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM64
+ ---help---
+ This driver supports Mediatek MT2712 basic clocks.
+
+config COMMON_CLK_MT2712_BDPSYS
+ bool "Clock driver for Mediatek MT2712 bdpsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 bdpsys clocks.
+
+config COMMON_CLK_MT2712_IMGSYS
+ bool "Clock driver for Mediatek MT2712 imgsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 imgsys clocks.
+
+config COMMON_CLK_MT2712_JPGDECSYS
+ bool "Clock driver for Mediatek MT2712 jpgdecsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 jpgdecsys clocks.
+
+config COMMON_CLK_MT2712_MFGCFG
+ bool "Clock driver for Mediatek MT2712 mfgcfg"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 mfgcfg clocks.
+
+config COMMON_CLK_MT2712_MMSYS
+ bool "Clock driver for Mediatek MT2712 mmsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 mmsys clocks.
+
+config COMMON_CLK_MT2712_VDECSYS
+ bool "Clock driver for Mediatek MT2712 vdecsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 vdecsys clocks.
+
+config COMMON_CLK_MT2712_VENCSYS
+ bool "Clock driver for Mediatek MT2712 vencsys"
+ depends on COMMON_CLK_MT2712
+ ---help---
+ This driver supports Mediatek MT2712 vencsys clocks.
+
config COMMON_CLK_MT6797
bool "Clock driver for Mediatek MT6797"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
@@ -82,6 +132,36 @@ config COMMON_CLK_MT6797_VENCSYS
---help---
This driver supports Mediatek MT6797 vencsys clocks.
+config COMMON_CLK_MT7622
+ bool "Clock driver for MediaTek MT7622"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ ---help---
+ This driver supports MediaTek MT7622 basic clocks and clocks
+ required for various periperals found on MediaTek.
+
+config COMMON_CLK_MT7622_ETHSYS
+ bool "Clock driver for MediaTek MT7622 ETHSYS"
+ depends on COMMON_CLK_MT7622
+ ---help---
+ This driver add support for clocks for Ethernet and SGMII
+ required on MediaTek MT7622 SoC.
+
+config COMMON_CLK_MT7622_HIFSYS
+ bool "Clock driver for MediaTek MT7622 HIFSYS"
+ depends on COMMON_CLK_MT7622
+ ---help---
+ This driver supports MediaTek MT7622 HIFSYS clocks providing
+ to PCI-E and USB.
+
+config COMMON_CLK_MT7622_AUDSYS
+ bool "Clock driver for MediaTek MT7622 AUDSYS"
+ depends on COMMON_CLK_MT7622
+ ---help---
+ This driver supports MediaTek MT7622 AUDSYS clocks providing
+ to audio consumers such as I2S and TDM.
+
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 2a755b5fb51b..c421ffcd49ff 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o
@@ -12,5 +13,17 @@ obj-$(CONFIG_COMMON_CLK_MT2701_HIFSYS) += clk-mt2701-hif.o
obj-$(CONFIG_COMMON_CLK_MT2701_IMGSYS) += clk-mt2701-img.o
obj-$(CONFIG_COMMON_CLK_MT2701_MMSYS) += clk-mt2701-mm.o
obj-$(CONFIG_COMMON_CLK_MT2701_VDECSYS) += clk-mt2701-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT2712) += clk-mt2712.o
+obj-$(CONFIG_COMMON_CLK_MT2712_BDPSYS) += clk-mt2712-bdp.o
+obj-$(CONFIG_COMMON_CLK_MT2712_IMGSYS) += clk-mt2712-img.o
+obj-$(CONFIG_COMMON_CLK_MT2712_JPGDECSYS) += clk-mt2712-jpgdec.o
+obj-$(CONFIG_COMMON_CLK_MT2712_MFGCFG) += clk-mt2712-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT2712_MMSYS) += clk-mt2712-mm.o
+obj-$(CONFIG_COMMON_CLK_MT2712_VDECSYS) += clk-mt2712-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT2712_VENCSYS) += clk-mt2712-venc.o
+obj-$(CONFIG_COMMON_CLK_MT7622) += clk-mt7622.o
+obj-$(CONFIG_COMMON_CLK_MT7622_ETHSYS) += clk-mt7622-eth.o
+obj-$(CONFIG_COMMON_CLK_MT7622_HIFSYS) += clk-mt7622-hif.o
+obj-$(CONFIG_COMMON_CLK_MT7622_AUDSYS) += clk-mt7622-aud.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 9598889f972b..8e7f16fd87c9 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -750,7 +750,7 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = {
static struct clk_onecell_data *infra_clk_data;
-static void mtk_infrasys_init_early(struct device_node *node)
+static void __init mtk_infrasys_init_early(struct device_node *node)
{
int r, i;
diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
new file mode 100644
index 000000000000..5fe4728c076e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs bdp_cg_regs = {
+ .set_ofs = 0x100,
+ .clr_ofs = 0x100,
+ .sta_ofs = 0x100,
+};
+
+#define GATE_BDP(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &bdp_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate bdp_clks[] = {
+ GATE_BDP(CLK_BDP_BRIDGE_B, "bdp_bridge_b", "mm_sel", 0),
+ GATE_BDP(CLK_BDP_BRIDGE_DRAM, "bdp_bridge_d", "mm_sel", 1),
+ GATE_BDP(CLK_BDP_LARB_DRAM, "bdp_larb_d", "mm_sel", 2),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_VDI_PXL, "bdp_vdi_pxl", "tvd_sel", 3),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_VDI_DRAM, "bdp_vdi_d", "mm_sel", 4),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_VDI_B, "bdp_vdi_b", "mm_sel", 5),
+ GATE_BDP(CLK_BDP_MT_B, "bdp_fmt_b", "mm_sel", 9),
+ GATE_BDP(CLK_BDP_DISPFMT_27M, "bdp_27m", "di_sel", 10),
+ GATE_BDP(CLK_BDP_DISPFMT_27M_VDOUT, "bdp_27m_vdout", "di_sel", 11),
+ GATE_BDP(CLK_BDP_DISPFMT_27_74_74, "bdp_27_74_74", "di_sel", 12),
+ GATE_BDP(CLK_BDP_DISPFMT_2FS, "bdp_2fs", "di_sel", 13),
+ GATE_BDP(CLK_BDP_DISPFMT_2FS_2FS74_148, "bdp_2fs74_148", "di_sel", 14),
+ GATE_BDP(CLK_BDP_DISPFMT_B, "bdp_b", "mm_sel", 15),
+ GATE_BDP(CLK_BDP_VDO_DRAM, "bdp_vdo_d", "mm_sel", 16),
+ GATE_BDP(CLK_BDP_VDO_2FS, "bdp_vdo_2fs", "di_sel", 17),
+ GATE_BDP(CLK_BDP_VDO_B, "bdp_vdo_b", "mm_sel", 18),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_DI_PXL, "bdp_di_pxl", "di_sel", 19),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_DI_DRAM, "bdp_di_d", "mm_sel", 20),
+ GATE_BDP(CLK_BDP_WR_CHANNEL_DI_B, "bdp_di_b", "mm_sel", 21),
+ GATE_BDP(CLK_BDP_NR_AGENT, "bdp_nr_agent", "nr_sel", 22),
+ GATE_BDP(CLK_BDP_NR_DRAM, "bdp_nr_d", "mm_sel", 23),
+ GATE_BDP(CLK_BDP_NR_B, "bdp_nr_b", "mm_sel", 24),
+ GATE_BDP(CLK_BDP_BRIDGE_RT_B, "bdp_bridge_rt_b", "mm_sel", 25),
+ GATE_BDP(CLK_BDP_BRIDGE_RT_DRAM, "bdp_bridge_rt_d", "mm_sel", 26),
+ GATE_BDP(CLK_BDP_LARB_RT_DRAM, "bdp_larb_rt_d", "mm_sel", 27),
+ GATE_BDP(CLK_BDP_TVD_TDC, "bdp_tvd_tdc", "mm_sel", 28),
+ GATE_BDP(CLK_BDP_TVD_54, "bdp_tvd_clk_54", "tvd_sel", 29),
+ GATE_BDP(CLK_BDP_TVD_CBUS, "bdp_tvd_cbus", "mm_sel", 30),
+};
+
+static int clk_mt2712_bdp_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_BDP_NR_CLK);
+
+ mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_bdp[] = {
+ { .compatible = "mediatek,mt2712-bdpsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_bdp_drv = {
+ .probe = clk_mt2712_bdp_probe,
+ .driver = {
+ .name = "clk-mt2712-bdp",
+ .of_match_table = of_match_clk_mt2712_bdp,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_bdp_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
new file mode 100644
index 000000000000..139ff55d495e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-img.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &img_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_SMI_LARB2, "img_smi_larb2", "mm_sel", 0),
+ GATE_IMG(CLK_IMG_SENINF_SCAM_EN, "img_scam_en", "csi0", 3),
+ GATE_IMG(CLK_IMG_SENINF_CAM_EN, "img_cam_en", "mm_sel", 8),
+ GATE_IMG(CLK_IMG_CAM_SV_EN, "img_cam_sv_en", "mm_sel", 9),
+ GATE_IMG(CLK_IMG_CAM_SV1_EN, "img_cam_sv1_en", "mm_sel", 10),
+ GATE_IMG(CLK_IMG_CAM_SV2_EN, "img_cam_sv2_en", "mm_sel", 11),
+};
+
+static int clk_mt2712_img_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_img[] = {
+ { .compatible = "mediatek,mt2712-imgsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_img_drv = {
+ .probe = clk_mt2712_img_probe,
+ .driver = {
+ .name = "clk-mt2712-img",
+ .of_match_table = of_match_clk_mt2712_img,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
new file mode 100644
index 000000000000..c7d4aada4892
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs jpgdec_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_JPGDEC(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &jpgdec_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate jpgdec_clks[] = {
+ GATE_JPGDEC(CLK_JPGDEC_JPGDEC1, "jpgdec_jpgdec1", "jpgdec_sel", 0),
+ GATE_JPGDEC(CLK_JPGDEC_JPGDEC, "jpgdec_jpgdec", "jpgdec_sel", 4),
+};
+
+static int clk_mt2712_jpgdec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_JPGDEC_NR_CLK);
+
+ mtk_clk_register_gates(node, jpgdec_clks, ARRAY_SIZE(jpgdec_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_jpgdec[] = {
+ { .compatible = "mediatek,mt2712-jpgdecsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_jpgdec_drv = {
+ .probe = clk_mt2712_jpgdec_probe,
+ .driver = {
+ .name = "clk-mt2712-jpgdec",
+ .of_match_table = of_match_clk_mt2712_jpgdec,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_jpgdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
new file mode 100644
index 000000000000..570f72d48d4d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mfg_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
+};
+
+static int clk_mt2712_mfg_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
+
+ mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_mfg[] = {
+ { .compatible = "mediatek,mt2712-mfgcfg", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_mfg_drv = {
+ .probe = clk_mt2712_mfg_probe,
+ .driver = {
+ .name = "clk-mt2712-mfg",
+ .of_match_table = of_match_clk_mt2712_mfg,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
new file mode 100644
index 000000000000..a8b4b6d42488
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm2_cg_regs = {
+ .set_ofs = 0x224,
+ .clr_ofs = 0x228,
+ .sta_ofs = 0x220,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MM2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_CAM_MDP, "mm_cam_mdp", "mm_sel", 2),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 5),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 6),
+ GATE_MM0(CLK_MM_MDP_RSZ2, "mm_mdp_rsz2", "mm_sel", 7),
+ GATE_MM0(CLK_MM_MDP_TDSHP0, "mm_mdp_tdshp0", "mm_sel", 8),
+ GATE_MM0(CLK_MM_MDP_TDSHP1, "mm_mdp_tdshp1", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_CROP, "mm_mdp_crop", "mm_sel", 10),
+ GATE_MM0(CLK_MM_MDP_WDMA, "mm_mdp_wdma", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_WROT1, "mm_mdp_wrot1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MUTEX_32K, "mm_mutex_32k", "clk32k", 15),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 16),
+ GATE_MM0(CLK_MM_DISP_OVL1, "mm_disp_ovl1", "mm_sel", 17),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_RDMA2, "mm_disp_rdma2", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_WDMA1, "mm_disp_wdma1", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_AAL, "mm_disp_aal", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_GAMMA, "mm_disp_gamma", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_UFOE, "mm_disp_ufoe", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_SPLIT0, "mm_disp_split0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_OD, "mm_disp_od", "mm_sel", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DISP_PWM0_MM, "mm_pwm0_mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DISP_PWM0_26M, "mm_pwm0_26m", "pwm_sel", 1),
+ GATE_MM1(CLK_MM_DISP_PWM1_MM, "mm_pwm1_mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DISP_PWM1_26M, "mm_pwm1_26m", "pwm_sel", 3),
+ GATE_MM1(CLK_MM_DSI0_ENGINE, "mm_dsi0_engine", "mm_sel", 4),
+ GATE_MM1(CLK_MM_DSI0_DIGITAL, "mm_dsi0_digital", "dsi0_lntc", 5),
+ GATE_MM1(CLK_MM_DSI1_ENGINE, "mm_dsi1_engine", "mm_sel", 6),
+ GATE_MM1(CLK_MM_DSI1_DIGITAL, "mm_dsi1_digital", "dsi1_lntc", 7),
+ GATE_MM1(CLK_MM_DPI_PIXEL, "mm_dpi_pixel", "vpll_dpix", 8),
+ GATE_MM1(CLK_MM_DPI_ENGINE, "mm_dpi_engine", "mm_sel", 9),
+ GATE_MM1(CLK_MM_DPI1_PIXEL, "mm_dpi1_pixel", "vpll3_dpix", 10),
+ GATE_MM1(CLK_MM_DPI1_ENGINE, "mm_dpi1_engine", "mm_sel", 11),
+ GATE_MM1(CLK_MM_LVDS_PIXEL, "mm_lvds_pixel", "vpll_dpix", 16),
+ GATE_MM1(CLK_MM_LVDS_CTS, "mm_lvds_cts", "lvdstx", 17),
+ GATE_MM1(CLK_MM_SMI_LARB4, "mm_smi_larb4", "mm_sel", 18),
+ GATE_MM1(CLK_MM_SMI_COMMON1, "mm_smi_common1", "mm_sel", 21),
+ GATE_MM1(CLK_MM_SMI_LARB5, "mm_smi_larb5", "mm_sel", 22),
+ GATE_MM1(CLK_MM_MDP_RDMA2, "mm_mdp_rdma2", "mm_sel", 23),
+ GATE_MM1(CLK_MM_MDP_TDSHP2, "mm_mdp_tdshp2", "mm_sel", 24),
+ GATE_MM1(CLK_MM_DISP_OVL2, "mm_disp_ovl2", "mm_sel", 25),
+ GATE_MM1(CLK_MM_DISP_WDMA2, "mm_disp_wdma2", "mm_sel", 26),
+ GATE_MM1(CLK_MM_DISP_COLOR2, "mm_disp_color2", "mm_sel", 27),
+ GATE_MM1(CLK_MM_DISP_AAL1, "mm_disp_aal1", "mm_sel", 28),
+ GATE_MM1(CLK_MM_DISP_OD1, "mm_disp_od1", "mm_sel", 29),
+ GATE_MM1(CLK_MM_LVDS1_PIXEL, "mm_lvds1_pixel", "vpll3_dpix", 30),
+ GATE_MM1(CLK_MM_LVDS1_CTS, "mm_lvds1_cts", "lvdstx3", 31),
+ /* MM2 */
+ GATE_MM2(CLK_MM_SMI_LARB7, "mm_smi_larb7", "mm_sel", 0),
+ GATE_MM2(CLK_MM_MDP_RDMA3, "mm_mdp_rdma3", "mm_sel", 1),
+ GATE_MM2(CLK_MM_MDP_WROT2, "mm_mdp_wrot2", "mm_sel", 2),
+ GATE_MM2(CLK_MM_DSI2, "mm_dsi2", "mm_sel", 3),
+ GATE_MM2(CLK_MM_DSI2_DIGITAL, "mm_dsi2_digital", "dsi0_lntc", 4),
+ GATE_MM2(CLK_MM_DSI3, "mm_dsi3", "mm_sel", 5),
+ GATE_MM2(CLK_MM_DSI3_DIGITAL, "mm_dsi3_digital", "dsi1_lntc", 6),
+};
+
+static int clk_mt2712_mm_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+ mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_mm[] = {
+ { .compatible = "mediatek,mt2712-mmsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_mm_drv = {
+ .probe = clk_mt2712_mm_probe,
+ .driver = {
+ .name = "clk-mt2712-mm",
+ .of_match_table = of_match_clk_mt2712_mm,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
new file mode 100644
index 000000000000..55c64ee8cc91
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vdec1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_CKEN, "vdec_cken", "vdec_sel", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "vdec_sel", 0),
+ GATE_VDEC1(CLK_VDEC_IMGRZ_CKEN, "vdec_imgrz_cken", "vdec_sel", 1),
+};
+
+static int clk_mt2712_vdec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+
+ mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_vdec[] = {
+ { .compatible = "mediatek,mt2712-vdecsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_vdec_drv = {
+ .probe = clk_mt2712_vdec_probe,
+ .driver = {
+ .name = "clk-mt2712-vdec",
+ .of_match_table = of_match_clk_mt2712_vdec,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
new file mode 100644
index 000000000000..ccbfe98777c8
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712-venc.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &venc_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_SMI_COMMON_CON, "venc_smi", "mm_sel", 0),
+ GATE_VENC(CLK_VENC_VENC, "venc_venc", "venc_sel", 4),
+ GATE_VENC(CLK_VENC_SMI_LARB6, "venc_smi_larb6", "jpgdec_sel", 12),
+};
+
+static int clk_mt2712_venc_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+
+ mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712_venc[] = {
+ { .compatible = "mediatek,mt2712-vencsys", },
+ {}
+};
+
+static struct platform_driver clk_mt2712_venc_drv = {
+ .probe = clk_mt2712_venc_probe,
+ .driver = {
+ .name = "clk-mt2712-venc",
+ .of_match_table = of_match_clk_mt2712_venc,
+ },
+};
+
+builtin_platform_driver(clk_mt2712_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
new file mode 100644
index 000000000000..498d13799388
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -0,0 +1,1435 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt2712-clk.h>
+
+static DEFINE_SPINLOCK(mt2712_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_VPLL3_DPIX, "vpll3_dpix", NULL, 200000000),
+ FIXED_CLK(CLK_TOP_VPLL_DPIX, "vpll_dpix", NULL, 200000000),
+ FIXED_CLK(CLK_TOP_LTEPLL_FS26M, "ltepll_fs26m", NULL, 26000000),
+ FIXED_CLK(CLK_TOP_DMPLL, "dmpll_ck", NULL, 350000000),
+ FIXED_CLK(CLK_TOP_DSI0_LNTC, "dsi0_lntc", NULL, 143000000),
+ FIXED_CLK(CLK_TOP_DSI1_LNTC, "dsi1_lntc", NULL, 143000000),
+ FIXED_CLK(CLK_TOP_LVDSTX3_CLKDIG_CTS, "lvdstx3", NULL, 140000000),
+ FIXED_CLK(CLK_TOP_LVDSTX_CLKDIG_CTS, "lvdstx", NULL, 140000000),
+ FIXED_CLK(CLK_TOP_CLKRTC_EXT, "clkrtc_ext", NULL, 32768),
+ FIXED_CLK(CLK_TOP_CLKRTC_INT, "clkrtc_int", NULL, 32747),
+ FIXED_CLK(CLK_TOP_CSI0, "csi0", NULL, 26000000),
+ FIXED_CLK(CLK_TOP_CVBSPLL, "cvbspll", NULL, 108000000),
+};
+
+static const struct mtk_fixed_factor top_early_divs[] = {
+ FACTOR(CLK_TOP_SYS_26M, "sys_26m", "clk26m", 1,
+ 1),
+ FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "sys_26m", 1,
+ 2),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_ARMCA35PLL, "armca35pll_ck", "armca35pll", 1,
+ 1),
+ FACTOR(CLK_TOP_ARMCA35PLL_600M, "armca35pll_600m", "armca35pll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_ARMCA35PLL_400M, "armca35pll_400m", "armca35pll_ck", 1,
+ 3),
+ FACTOR(CLK_TOP_ARMCA72PLL, "armca72pll_ck", "armca72pll", 1,
+ 1),
+ FACTOR(CLK_TOP_SYSPLL, "syspll_ck", "mainpll", 1,
+ 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "syspll_d2", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "syspll_d2", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "syspll_d2", 1,
+ 8),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "syspll_d2", 1,
+ 16),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "syspll_ck", 1,
+ 3),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "syspll_d3", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "syspll_d3", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "syspll_ck", 1,
+ 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "syspll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "syspll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "syspll_ck", 1,
+ 7),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "syspll_d7", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "syspll_d7", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll_ck", "univpll", 1,
+ 1),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll_ck", 1,
+ 7),
+ FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll_ck", 1,
+ 26),
+ FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univpll_ck", 1,
+ 52),
+ FACTOR(CLK_TOP_UNIVPLL_D104, "univpll_d104", "univpll_ck", 1,
+ 104),
+ FACTOR(CLK_TOP_UNIVPLL_D208, "univpll_d208", "univpll_ck", 1,
+ 208),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_d2", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_d2", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_d2", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll_ck", 1,
+ 3),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_d3", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_d3", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_d3", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll_ck", 1,
+ 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univpll_d5", 1,
+ 8),
+ FACTOR(CLK_TOP_F_MP0_PLL1, "f_mp0_pll1_ck", "univpll_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_MP0_PLL2, "f_mp0_pll2_ck", "univpll1_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_BIG_PLL1, "f_big_pll1_ck", "univpll_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_BIG_PLL2, "f_big_pll2_ck", "univpll1_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_BUS_PLL1, "f_bus_pll1_ck", "univpll_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_F_BUS_PLL2, "f_bus_pll2_ck", "univpll1_d2", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_APLL1_D16, "apll1_d16", "apll1_ck", 1,
+ 16),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_APLL2_D16, "apll2_d16", "apll2_ck", 1,
+ 16),
+ FACTOR(CLK_TOP_LVDSPLL, "lvdspll_ck", "lvdspll", 1,
+ 1),
+ FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_LVDSPLL2, "lvdspll2_ck", "lvdspll2", 1,
+ 1),
+ FACTOR(CLK_TOP_LVDSPLL2_D2, "lvdspll2_d2", "lvdspll2_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_LVDSPLL2_D4, "lvdspll2_d4", "lvdspll2_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_LVDSPLL2_D8, "lvdspll2_d8", "lvdspll2_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_ETHERPLL_125M, "etherpll_125m", "etherpll", 1,
+ 1),
+ FACTOR(CLK_TOP_ETHERPLL_50M, "etherpll_50m", "etherpll", 1,
+ 1),
+ FACTOR(CLK_TOP_CVBS, "cvbs", "cvbspll", 1,
+ 1),
+ FACTOR(CLK_TOP_CVBS_D2, "cvbs_d2", "cvbs", 1,
+ 2),
+ FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1,
+ 1),
+ FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1,
+ 1),
+ FACTOR(CLK_TOP_VCODECPLL_D2, "vcodecpll_d2", "vcodecpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll", 1,
+ 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_TVDPLL_429M, "tvdpll_429m", "tvdpll", 1,
+ 1),
+ FACTOR(CLK_TOP_TVDPLL_429M_D2, "tvdpll_429m_d2", "tvdpll_429m", 1,
+ 2),
+ FACTOR(CLK_TOP_TVDPLL_429M_D4, "tvdpll_429m_d4", "tvdpll_429m", 1,
+ 4),
+ FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1,
+ 1),
+ FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_D2A_ULCLK_6P5M, "d2a_ulclk_6p5m", "clk26m", 1,
+ 4),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "msdcpll2_ck"
+};
+
+static const char * const mem_parents[] = {
+ "clk26m",
+ "dmpll_ck"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll1_d2",
+ "univpll2_d2"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "vcodecpll_ck",
+ "tvdpll_429m",
+ "univpll_d3",
+ "vencpll_ck",
+ "syspll_d3",
+ "univpll1_d2",
+ "mmpll_d2",
+ "syspll3_d2",
+ "tvdpll_ck"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "univpll1_d2",
+ "mmpll_d2",
+ "tvdpll_d2",
+ "syspll1_d2",
+ "univpll_d5",
+ "vcodecpll_d2",
+ "univpll2_d2",
+ "syspll3_d2"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "univpll_d3",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll_d5",
+ "univpll_d3",
+ "univpll1_d2",
+ "univpll_d5",
+ "univpll2_d2"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_d52",
+ "univpll_d208",
+ "univpll_d104",
+ "clk26m_d2",
+ "univpll_d26",
+ "univpll2_d8",
+ "syspll3_d4",
+ "syspll3_d2",
+ "univpll1_d4",
+ "univpll2_d2"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll2_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll1_d4",
+ "univpll2_d2",
+ "univpll3_d2",
+ "univpll1_d8"
+};
+
+static const char * const usb20_parents[] = {
+ "clk26m",
+ "univpll1_d8",
+ "univpll3_d4"
+};
+
+static const char * const usb30_parents[] = {
+ "clk26m",
+ "univpll3_d2",
+ "univpll3_d4",
+ "univpll2_d4"
+};
+
+static const char * const msdc50_0_h_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "syspll2_d2",
+ "syspll4_d2",
+ "univpll_d5",
+ "univpll1_d4"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "univpll1_d4",
+ "syspll2_d2",
+ "msdcpll_d4",
+ "vencpll_d2",
+ "univpll1_d2",
+ "msdcpll2_ck",
+ "msdcpll2_d2",
+ "msdcpll2_d4"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "univpll2_d2",
+ "msdcpll_d2",
+ "univpll1_d4",
+ "syspll2_d2",
+ "univpll_d7",
+ "vencpll_d2"
+};
+
+static const char * const msdc30_3_parents[] = {
+ "clk26m",
+ "msdcpll2_ck",
+ "msdcpll2_d2",
+ "univpll2_d2",
+ "msdcpll2_d4",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll_d7",
+ "univpll_d7",
+ "vencpll_d2",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "msdcpll_d4"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll1_d4",
+ "syspll4_d2",
+ "univpll3_d2",
+ "univpll2_d8",
+ "syspll3_d2",
+ "syspll3_d4"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll3_d4",
+ "syspll1_d16",
+ "univpll3_d4",
+ "univpll_d26",
+ "syspll3_d4"
+};
+
+static const char * const dpilvds1_parents[] = {
+ "clk26m",
+ "lvdspll2_ck",
+ "lvdspll2_d2",
+ "lvdspll2_d4",
+ "lvdspll2_d8",
+ "clkfpc"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "syspll1_d2",
+ "univpll_d5",
+ "syspll_d5"
+};
+
+static const char * const nr_parents[] = {
+ "clk26m",
+ "univpll1_d4",
+ "syspll2_d2",
+ "syspll1_d4",
+ "univpll1_d8",
+ "univpll3_d2",
+ "univpll2_d2",
+ "syspll_d5"
+};
+
+static const char * const nfi2x_parents[] = {
+ "clk26m",
+ "syspll4_d4",
+ "univpll3_d4",
+ "univpll1_d8",
+ "syspll2_d4",
+ "univpll3_d2",
+ "syspll_d7",
+ "syspll2_d2",
+ "univpll2_d2",
+ "syspll_d5",
+ "syspll1_d2"
+};
+
+static const char * const irda_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "syspll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const cci400_parents[] = {
+ "clk26m",
+ "vencpll_ck",
+ "armca35pll_600m",
+ "armca35pll_400m",
+ "univpll_d2",
+ "syspll_d2",
+ "msdcpll_ck",
+ "univpll_d3"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck",
+ "univpll2_d4",
+ "univpll2_d8"
+};
+
+static const char * const mem_mfg_parents[] = {
+ "clk26m",
+ "mmpll_ck",
+ "univpll_d3"
+};
+
+static const char * const axi_mfg_parents[] = {
+ "clk26m",
+ "axi_sel",
+ "univpll_d5"
+};
+
+static const char * const scam_parents[] = {
+ "clk26m",
+ "syspll3_d2",
+ "univpll2_d4",
+ "syspll2_d4"
+};
+
+static const char * const nfiecc_parents[] = {
+ "clk26m",
+ "nfi2x_sel",
+ "syspll_d7",
+ "syspll2_d2",
+ "univpll2_d2",
+ "univpll_d5",
+ "syspll1_d2"
+};
+
+static const char * const pe2_mac_p0_parents[] = {
+ "clk26m",
+ "syspll1_d8",
+ "syspll4_d2",
+ "syspll2_d4",
+ "univpll2_d4",
+ "syspll3_d2"
+};
+
+static const char * const dpilvds_parents[] = {
+ "clk26m",
+ "lvdspll_ck",
+ "lvdspll_d2",
+ "lvdspll_d4",
+ "lvdspll_d8",
+ "clkfpc"
+};
+
+static const char * const hdcp_parents[] = {
+ "clk26m",
+ "syspll4_d2",
+ "syspll3_d4",
+ "univpll2_d4"
+};
+
+static const char * const hdcp_24m_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll_d52",
+ "univpll2_d8"
+};
+
+static const char * const rtc_parents[] = {
+ "clkrtc_int",
+ "clkrtc_ext",
+ "clk26m",
+ "univpll3_d8"
+};
+
+static const char * const spinor_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "syspll4_d4",
+ "univpll2_d8",
+ "univpll3_d4",
+ "syspll4_d2",
+ "syspll2_d4",
+ "univpll2_d4",
+ "etherpll_125m",
+ "syspll1_d4"
+};
+
+static const char * const apll_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8",
+ "apll1_d16",
+ "apll2_ck",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8",
+ "apll2_d16",
+ "clk26m",
+ "clk26m"
+};
+
+static const char * const a1sys_hp_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const a2sys_hp_parents[] = {
+ "clk26m",
+ "apll2_ck",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8"
+};
+
+static const char * const asm_l_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll2_d2",
+ "syspll_d5"
+};
+
+static const char * const i2so1_parents[] = {
+ "clk26m",
+ "apll1_ck",
+ "apll2_ck"
+};
+
+static const char * const ether_125m_parents[] = {
+ "clk26m",
+ "etherpll_125m",
+ "univpll3_d2"
+};
+
+static const char * const ether_50m_parents[] = {
+ "clk26m",
+ "etherpll_50m",
+ "univpll_d26",
+ "univpll3_d4"
+};
+
+static const char * const jpgdec_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "tvdpll_429m",
+ "vencpll_ck",
+ "syspll_d3",
+ "vcodecpll_ck",
+ "univpll1_d2",
+ "armca35pll_400m",
+ "tvdpll_429m_d2",
+ "tvdpll_429m_d4"
+};
+
+static const char * const spislv_parents[] = {
+ "clk26m",
+ "univpll2_d4",
+ "univpll1_d4",
+ "univpll2_d2",
+ "univpll3_d2",
+ "univpll1_d8",
+ "univpll1_d2",
+ "univpll_d5"
+};
+
+static const char * const ether_parents[] = {
+ "clk26m",
+ "etherpll_50m",
+ "univpll_d26"
+};
+
+static const char * const di_parents[] = {
+ "clk26m",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "tvdpll_d8",
+ "vencpll_ck",
+ "vencpll_d2",
+ "cvbs",
+ "cvbs_d2"
+};
+
+static const char * const tvd_parents[] = {
+ "clk26m",
+ "cvbs_d2",
+ "univpll2_d8"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "univpll_d26",
+ "univpll2_d4",
+ "univpll3_d2",
+ "univpll1_d4"
+};
+
+static const char * const msdc0p_aes_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "univpll_d3",
+ "vcodecpll_ck"
+};
+
+static const char * const cmsys_parents[] = {
+ "clk26m",
+ "univpll_d3",
+ "syspll_d3",
+ "syspll1_d2",
+ "syspll2_d2"
+};
+
+static const char * const gcpu_parents[] = {
+ "clk26m",
+ "syspll_d3",
+ "syspll1_d2",
+ "univpll1_d2",
+ "univpll_d5",
+ "univpll3_d2",
+ "univpll_d3"
+};
+
+static const char * const aud_apll1_parents[] = {
+ "apll1",
+ "clkaud_ext_i_1"
+};
+
+static const char * const aud_apll2_parents[] = {
+ "apll2",
+ "clkaud_ext_i_2"
+};
+
+static const char * const audull_vtx_parents[] = {
+ "d2a_ulclk_6p5m",
+ "clkaud_ext_i_0"
+};
+
+static struct mtk_composite top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, 0x040, 0, 3,
+ 7, CLK_IS_CRITICAL),
+ MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x040, 8, 1,
+ 15, CLK_IS_CRITICAL),
+ MUX_GATE(CLK_TOP_MM_SEL, "mm_sel",
+ mm_parents, 0x040, 24, 3, 31),
+ /* CLK_CFG_1 */
+ MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel",
+ pwm_parents, 0x050, 0, 2, 7),
+ MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel",
+ vdec_parents, 0x050, 8, 4, 15),
+ MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel",
+ venc_parents, 0x050, 16, 4, 23),
+ MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel",
+ mfg_parents, 0x050, 24, 4, 31),
+ /* CLK_CFG_2 */
+ MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel",
+ camtg_parents, 0x060, 0, 4, 7),
+ MUX_GATE(CLK_TOP_UART_SEL, "uart_sel",
+ uart_parents, 0x060, 8, 1, 15),
+ MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel",
+ spi_parents, 0x060, 16, 3, 23),
+ MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel",
+ usb20_parents, 0x060, 24, 2, 31),
+ /* CLK_CFG_3 */
+ MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel",
+ usb30_parents, 0x070, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MSDC50_0_HCLK_SEL, "msdc50_0_h_sel",
+ msdc50_0_h_parents, 0x070, 8, 3, 15),
+ MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel",
+ msdc50_0_parents, 0x070, 16, 4, 23),
+ MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel",
+ msdc30_1_parents, 0x070, 24, 3, 31),
+ /* CLK_CFG_4 */
+ MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel",
+ msdc30_1_parents, 0x080, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel",
+ msdc30_3_parents, 0x080, 8, 4, 15),
+ MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel",
+ audio_parents, 0x080, 16, 2, 23),
+ MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel",
+ aud_intbus_parents, 0x080, 24, 3, 31),
+ /* CLK_CFG_5 */
+ MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel",
+ pmicspi_parents, 0x090, 0, 3, 7),
+ MUX_GATE(CLK_TOP_DPILVDS1_SEL, "dpilvds1_sel",
+ dpilvds1_parents, 0x090, 8, 3, 15),
+ MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel",
+ atb_parents, 0x090, 16, 2, 23),
+ MUX_GATE(CLK_TOP_NR_SEL, "nr_sel",
+ nr_parents, 0x090, 24, 3, 31),
+ /* CLK_CFG_6 */
+ MUX_GATE(CLK_TOP_NFI2X_SEL, "nfi2x_sel",
+ nfi2x_parents, 0x0a0, 0, 4, 7),
+ MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel",
+ irda_parents, 0x0a0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel",
+ cci400_parents, 0x0a0, 16, 3, 23),
+ MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel",
+ aud_1_parents, 0x0a0, 24, 2, 31),
+ /* CLK_CFG_7 */
+ MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel",
+ aud_2_parents, 0x0b0, 0, 2, 7),
+ MUX_GATE(CLK_TOP_MEM_MFG_IN_AS_SEL, "mem_mfg_sel",
+ mem_mfg_parents, 0x0b0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_AXI_MFG_IN_AS_SEL, "axi_mfg_sel",
+ axi_mfg_parents, 0x0b0, 16, 2, 23),
+ MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel",
+ scam_parents, 0x0b0, 24, 2, 31),
+ /* CLK_CFG_8 */
+ MUX_GATE(CLK_TOP_NFIECC_SEL, "nfiecc_sel",
+ nfiecc_parents, 0x0c0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_PE2_MAC_P0_SEL, "pe2_mac_p0_sel",
+ pe2_mac_p0_parents, 0x0c0, 8, 3, 15),
+ MUX_GATE(CLK_TOP_PE2_MAC_P1_SEL, "pe2_mac_p1_sel",
+ pe2_mac_p0_parents, 0x0c0, 16, 3, 23),
+ MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel",
+ dpilvds_parents, 0x0c0, 24, 3, 31),
+ /* CLK_CFG_9 */
+ MUX_GATE(CLK_TOP_MSDC50_3_HCLK_SEL, "msdc50_3_h_sel",
+ msdc50_0_h_parents, 0x0d0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_HDCP_SEL, "hdcp_sel",
+ hdcp_parents, 0x0d0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_HDCP_24M_SEL, "hdcp_24m_sel",
+ hdcp_24m_parents, 0x0d0, 16, 2, 23),
+ MUX_GATE_FLAGS(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents, 0x0d0, 24, 2,
+ 31, CLK_IS_CRITICAL),
+ /* CLK_CFG_10 */
+ MUX_GATE(CLK_TOP_SPINOR_SEL, "spinor_sel",
+ spinor_parents, 0x500, 0, 4, 7),
+ MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel",
+ apll_parents, 0x500, 8, 4, 15),
+ MUX_GATE(CLK_TOP_APLL2_SEL, "apll2_sel",
+ apll_parents, 0x500, 16, 4, 23),
+ MUX_GATE(CLK_TOP_A1SYS_HP_SEL, "a1sys_hp_sel",
+ a1sys_hp_parents, 0x500, 24, 3, 31),
+ /* CLK_CFG_11 */
+ MUX_GATE(CLK_TOP_A2SYS_HP_SEL, "a2sys_hp_sel",
+ a2sys_hp_parents, 0x510, 0, 3, 7),
+ MUX_GATE(CLK_TOP_ASM_L_SEL, "asm_l_sel",
+ asm_l_parents, 0x510, 8, 2, 15),
+ MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel",
+ asm_l_parents, 0x510, 16, 2, 23),
+ MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel",
+ asm_l_parents, 0x510, 24, 2, 31),
+ /* CLK_CFG_12 */
+ MUX_GATE(CLK_TOP_I2SO1_SEL, "i2so1_sel",
+ i2so1_parents, 0x520, 0, 2, 7),
+ MUX_GATE(CLK_TOP_I2SO2_SEL, "i2so2_sel",
+ i2so1_parents, 0x520, 8, 2, 15),
+ MUX_GATE(CLK_TOP_I2SO3_SEL, "i2so3_sel",
+ i2so1_parents, 0x520, 16, 2, 23),
+ MUX_GATE(CLK_TOP_TDMO0_SEL, "tdmo0_sel",
+ i2so1_parents, 0x520, 24, 2, 31),
+ /* CLK_CFG_13 */
+ MUX_GATE(CLK_TOP_TDMO1_SEL, "tdmo1_sel",
+ i2so1_parents, 0x530, 0, 2, 7),
+ MUX_GATE(CLK_TOP_I2SI1_SEL, "i2si1_sel",
+ i2so1_parents, 0x530, 8, 2, 15),
+ MUX_GATE(CLK_TOP_I2SI2_SEL, "i2si2_sel",
+ i2so1_parents, 0x530, 16, 2, 23),
+ MUX_GATE(CLK_TOP_I2SI3_SEL, "i2si3_sel",
+ i2so1_parents, 0x530, 24, 2, 31),
+ /* CLK_CFG_14 */
+ MUX_GATE(CLK_TOP_ETHER_125M_SEL, "ether_125m_sel",
+ ether_125m_parents, 0x540, 0, 2, 7),
+ MUX_GATE(CLK_TOP_ETHER_50M_SEL, "ether_50m_sel",
+ ether_50m_parents, 0x540, 8, 2, 15),
+ MUX_GATE(CLK_TOP_JPGDEC_SEL, "jpgdec_sel",
+ jpgdec_parents, 0x540, 16, 4, 23),
+ MUX_GATE(CLK_TOP_SPISLV_SEL, "spislv_sel",
+ spislv_parents, 0x540, 24, 3, 31),
+ /* CLK_CFG_15 */
+ MUX_GATE(CLK_TOP_ETHER_50M_RMII_SEL, "ether_sel",
+ ether_parents, 0x550, 0, 2, 7),
+ MUX_GATE(CLK_TOP_CAM2TG_SEL, "cam2tg_sel",
+ camtg_parents, 0x550, 8, 4, 15),
+ MUX_GATE(CLK_TOP_DI_SEL, "di_sel",
+ di_parents, 0x550, 16, 3, 23),
+ MUX_GATE(CLK_TOP_TVD_SEL, "tvd_sel",
+ tvd_parents, 0x550, 24, 2, 31),
+ /* CLK_CFG_16 */
+ MUX_GATE(CLK_TOP_I2C_SEL, "i2c_sel",
+ i2c_parents, 0x560, 0, 3, 7),
+ MUX_GATE(CLK_TOP_PWM_INFRA_SEL, "pwm_infra_sel",
+ pwm_parents, 0x560, 8, 2, 15),
+ MUX_GATE(CLK_TOP_MSDC0P_AES_SEL, "msdc0p_aes_sel",
+ msdc0p_aes_parents, 0x560, 16, 2, 23),
+ MUX_GATE(CLK_TOP_CMSYS_SEL, "cmsys_sel",
+ cmsys_parents, 0x560, 24, 3, 31),
+ /* CLK_CFG_17 */
+ MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel",
+ gcpu_parents, 0x570, 0, 3, 7),
+ /* CLK_AUDDIV_4 */
+ MUX(CLK_TOP_AUD_APLL1_SEL, "aud_apll1_sel",
+ aud_apll1_parents, 0x134, 0, 1),
+ MUX(CLK_TOP_AUD_APLL2_SEL, "aud_apll2_sel",
+ aud_apll2_parents, 0x134, 1, 1),
+ MUX(CLK_TOP_DA_AUDULL_VTX_6P5M_SEL, "audull_vtx_sel",
+ audull_vtx_parents, 0x134, 31, 1),
+};
+
+static const char * const mcu_mp0_parents[] = {
+ "clk26m",
+ "armca35pll_ck",
+ "f_mp0_pll1_ck",
+ "f_mp0_pll2_ck"
+};
+
+static const char * const mcu_mp2_parents[] = {
+ "clk26m",
+ "armca72pll_ck",
+ "f_big_pll1_ck",
+ "f_big_pll2_ck"
+};
+
+static const char * const mcu_bus_parents[] = {
+ "clk26m",
+ "cci400_sel",
+ "f_bus_pll1_ck",
+ "f_bus_pll2_ck"
+};
+
+static struct mtk_composite mcu_muxes[] = {
+ /* mp0_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_MP0_SEL, "mcu_mp0_sel", mcu_mp0_parents, 0x7A0,
+ 9, 2, -1, CLK_IS_CRITICAL),
+ /* mp2_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_MP2_SEL, "mcu_mp2_sel", mcu_mp2_parents, 0x7A8,
+ 9, 2, -1, CLK_IS_CRITICAL),
+ /* bus_pll_divider_cfg */
+ MUX_GATE_FLAGS(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0,
+ 9, 2, -1, CLK_IS_CRITICAL),
+};
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ(CLK_TOP_APLL_DIV0, "apll_div0", "i2so1_sel", 0x124, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV1, "apll_div1", "i2so2_sel", 0x124, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV2, "apll_div2", "i2so3_sel", 0x124, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV3, "apll_div3", "tdmo0_sel", 0x124, 24, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV4, "apll_div4", "tdmo1_sel", 0x128, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV5, "apll_div5", "i2si1_sel", 0x128, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV6, "apll_div6", "i2si2_sel", 0x128, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL_DIV7, "apll_div7", "i2si3_sel", 0x128, 24, 8),
+};
+
+static const struct mtk_gate_regs top_cg_regs = {
+ .set_ofs = 0x120,
+ .clr_ofs = 0x120,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_TOP(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate top_clks[] = {
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN0, "apll_div_pdn0", "i2so1_sel", 0),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN1, "apll_div_pdn1", "i2so2_sel", 1),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN2, "apll_div_pdn2", "i2so3_sel", 2),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN3, "apll_div_pdn3", "tdmo0_sel", 3),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN4, "apll_div_pdn4", "tdmo1_sel", 4),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN5, "apll_div_pdn5", "i2si1_sel", 5),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN6, "apll_div_pdn6", "i2si2_sel", 6),
+ GATE_TOP(CLK_TOP_APLL_DIV_PDN7, "apll_div_pdn7", "i2si3_sel", 7),
+};
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x40,
+};
+
+#define GATE_INFRA(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate infra_clks[] = {
+ GATE_INFRA(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
+ GATE_INFRA(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6),
+ GATE_INFRA(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8),
+ GATE_INFRA(CLK_INFRA_KP, "infra_kp", "axi_sel", 16),
+ GATE_INFRA(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "spi_sel", 24),
+ GATE_INFRA(CLK_INFRA_AO_SPI1, "infra_ao_spi1", "spislv_sel", 25),
+ GATE_INFRA(CLK_INFRA_AO_UART5, "infra_ao_uart5", "axi_sel", 26),
+};
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+ .set_ofs = 0xc,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x1c,
+};
+
+static const struct mtk_gate_regs peri2_cg_regs = {
+ .set_ofs = 0x42c,
+ .clr_ofs = 0x42c,
+ .sta_ofs = 0x42c,
+};
+
+#define GATE_PERI0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate peri_clks[] = {
+ /* PERI0 */
+ GATE_PERI0(CLK_PERI_NFI, "per_nfi",
+ "axi_sel", 0),
+ GATE_PERI0(CLK_PERI_THERM, "per_therm",
+ "axi_sel", 1),
+ GATE_PERI0(CLK_PERI_PWM0, "per_pwm0",
+ "pwm_sel", 2),
+ GATE_PERI0(CLK_PERI_PWM1, "per_pwm1",
+ "pwm_sel", 3),
+ GATE_PERI0(CLK_PERI_PWM2, "per_pwm2",
+ "pwm_sel", 4),
+ GATE_PERI0(CLK_PERI_PWM3, "per_pwm3",
+ "pwm_sel", 5),
+ GATE_PERI0(CLK_PERI_PWM4, "per_pwm4",
+ "pwm_sel", 6),
+ GATE_PERI0(CLK_PERI_PWM5, "per_pwm5",
+ "pwm_sel", 7),
+ GATE_PERI0(CLK_PERI_PWM6, "per_pwm6",
+ "pwm_sel", 8),
+ GATE_PERI0(CLK_PERI_PWM7, "per_pwm7",
+ "pwm_sel", 9),
+ GATE_PERI0(CLK_PERI_PWM, "per_pwm",
+ "pwm_sel", 10),
+ GATE_PERI0(CLK_PERI_AP_DMA, "per_ap_dma",
+ "axi_sel", 13),
+ GATE_PERI0(CLK_PERI_MSDC30_0, "per_msdc30_0",
+ "msdc50_0_sel", 14),
+ GATE_PERI0(CLK_PERI_MSDC30_1, "per_msdc30_1",
+ "msdc30_1_sel", 15),
+ GATE_PERI0(CLK_PERI_MSDC30_2, "per_msdc30_2",
+ "msdc30_2_sel", 16),
+ GATE_PERI0(CLK_PERI_MSDC30_3, "per_msdc30_3",
+ "msdc30_3_sel", 17),
+ GATE_PERI0(CLK_PERI_UART0, "per_uart0",
+ "uart_sel", 20),
+ GATE_PERI0(CLK_PERI_UART1, "per_uart1",
+ "uart_sel", 21),
+ GATE_PERI0(CLK_PERI_UART2, "per_uart2",
+ "uart_sel", 22),
+ GATE_PERI0(CLK_PERI_UART3, "per_uart3",
+ "uart_sel", 23),
+ GATE_PERI0(CLK_PERI_I2C0, "per_i2c0",
+ "axi_sel", 24),
+ GATE_PERI0(CLK_PERI_I2C1, "per_i2c1",
+ "axi_sel", 25),
+ GATE_PERI0(CLK_PERI_I2C2, "per_i2c2",
+ "axi_sel", 26),
+ GATE_PERI0(CLK_PERI_I2C3, "per_i2c3",
+ "axi_sel", 27),
+ GATE_PERI0(CLK_PERI_I2C4, "per_i2c4",
+ "axi_sel", 28),
+ GATE_PERI0(CLK_PERI_AUXADC, "per_auxadc",
+ "ltepll_fs26m", 29),
+ GATE_PERI0(CLK_PERI_SPI0, "per_spi0",
+ "spi_sel", 30),
+ /* PERI1 */
+ GATE_PERI1(CLK_PERI_SPI, "per_spi",
+ "spinor_sel", 1),
+ GATE_PERI1(CLK_PERI_I2C5, "per_i2c5",
+ "axi_sel", 3),
+ GATE_PERI1(CLK_PERI_SPI2, "per_spi2",
+ "spi_sel", 5),
+ GATE_PERI1(CLK_PERI_SPI3, "per_spi3",
+ "spi_sel", 6),
+ GATE_PERI1(CLK_PERI_SPI5, "per_spi5",
+ "spi_sel", 8),
+ GATE_PERI1(CLK_PERI_UART4, "per_uart4",
+ "uart_sel", 9),
+ GATE_PERI1(CLK_PERI_SFLASH, "per_sflash",
+ "uart_sel", 11),
+ GATE_PERI1(CLK_PERI_GMAC, "per_gmac",
+ "uart_sel", 12),
+ GATE_PERI1(CLK_PERI_PCIE0, "per_pcie0",
+ "uart_sel", 14),
+ GATE_PERI1(CLK_PERI_PCIE1, "per_pcie1",
+ "uart_sel", 15),
+ GATE_PERI1(CLK_PERI_GMAC_PCLK, "per_gmac_pclk",
+ "uart_sel", 16),
+ /* PERI2 */
+ GATE_PERI2(CLK_PERI_MSDC50_0_EN, "per_msdc50_0_en",
+ "msdc50_0_sel", 0),
+ GATE_PERI2(CLK_PERI_MSDC30_1_EN, "per_msdc30_1_en",
+ "msdc30_1_sel", 1),
+ GATE_PERI2(CLK_PERI_MSDC30_2_EN, "per_msdc30_2_en",
+ "msdc30_2_sel", 2),
+ GATE_PERI2(CLK_PERI_MSDC30_3_EN, "per_msdc30_3_en",
+ "msdc30_3_sel", 3),
+ GATE_PERI2(CLK_PERI_MSDC50_0_HCLK_EN, "per_msdc50_0_h",
+ "msdc50_0_h_sel", 4),
+ GATE_PERI2(CLK_PERI_MSDC50_3_HCLK_EN, "per_msdc50_3_h",
+ "msdc50_3_h_sel", 5),
+};
+
+#define MT2712_PLL_FMAX (3000UL * MHZ)
+
+#define CON0_MT2712_RST_BAR BIT(24)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT2712_RST_BAR, \
+ .fmax = MT2712_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _pcwbits, _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, _pcw_reg, \
+ _pcw_shift, NULL)
+
+static const struct mtk_pll_div_table armca35pll_div_table[] = {
+ { .div = 0, .freq = MT2712_PLL_FMAX },
+ { .div = 1, .freq = 1202500000 },
+ { .div = 2, .freq = 500500000 },
+ { .div = 3, .freq = 315250000 },
+ { .div = 4, .freq = 157625000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table armca72pll_div_table[] = {
+ { .div = 0, .freq = MT2712_PLL_FMAX },
+ { .div = 1, .freq = 994500000 },
+ { .div = 2, .freq = 520000000 },
+ { .div = 3, .freq = 315250000 },
+ { .div = 4, .freq = 157625000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT2712_PLL_FMAX },
+ { .div = 1, .freq = 1001000000 },
+ { .div = 2, .freq = 601250000 },
+ { .div = 3, .freq = 250250000 },
+ { .div = 4, .freq = 125125000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0xf0000101,
+ HAVE_RST_BAR, 31, 0x0230, 4, 0, 0, 0, 0x0234, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0240, 0x024C, 0xfe000101,
+ HAVE_RST_BAR, 31, 0x0240, 4, 0, 0, 0, 0x0244, 0),
+ PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x0320, 0x032C, 0xc0000101,
+ 0, 31, 0x0320, 4, 0, 0, 0, 0x0324, 0),
+ PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x0280, 0x028C, 0x00000101,
+ 0, 31, 0x0280, 4, 0, 0, 0, 0x0284, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0330, 0x0340, 0x00000101,
+ 0, 31, 0x0330, 4, 0x0338, 0x0014, 0, 0x0334, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0350, 0x0360, 0x00000101,
+ 0, 31, 0x0350, 4, 0x0358, 0x0014, 1, 0x0354, 0),
+ PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0370, 0x037c, 0x00000101,
+ 0, 31, 0x0370, 4, 0, 0, 0, 0x0374, 0),
+ PLL(CLK_APMIXED_LVDSPLL2, "lvdspll2", 0x0390, 0x039C, 0x00000101,
+ 0, 31, 0x0390, 4, 0, 0, 0, 0x0394, 0),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0270, 0x027C, 0x00000101,
+ 0, 31, 0x0270, 4, 0, 0, 0, 0x0274, 0),
+ PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x0410, 0x041C, 0x00000101,
+ 0, 31, 0x0410, 4, 0, 0, 0, 0x0414, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0290, 0x029C, 0xc0000101,
+ 0, 31, 0x0290, 4, 0, 0, 0, 0x0294, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0250, 0x0260, 0x00000101,
+ 0, 31, 0x0250, 4, 0, 0, 0, 0x0254, 0,
+ mmpll_div_table),
+ PLL_B(CLK_APMIXED_ARMCA35PLL, "armca35pll", 0x0100, 0x0110, 0xf0000101,
+ HAVE_RST_BAR, 31, 0x0100, 4, 0, 0, 0, 0x0104, 0,
+ armca35pll_div_table),
+ PLL_B(CLK_APMIXED_ARMCA72PLL, "armca72pll", 0x0210, 0x0220, 0x00000101,
+ 0, 31, 0x0210, 4, 0, 0, 0, 0x0214, 0,
+ armca72pll_div_table),
+ PLL(CLK_APMIXED_ETHERPLL, "etherpll", 0x0300, 0x030C, 0xc0000101,
+ 0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0),
+};
+
+static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static struct clk_onecell_data *top_clk_data;
+
+static void clk_mt2712_top_init_early(struct device_node *node)
+{
+ int r, i;
+
+ if (!top_clk_data) {
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ for (i = 0; i < CLK_TOP_NR_CLK; i++)
+ top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+
+CLK_OF_DECLARE_DRIVER(mt2712_topckgen, "mediatek,mt2712-topckgen",
+ clk_mt2712_top_init_early);
+
+static int clk_mt2712_top_probe(struct platform_device *pdev)
+{
+ int r, i;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ if (!top_clk_data) {
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ } else {
+ for (i = 0; i < CLK_TOP_NR_CLK; i++) {
+ if (top_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER))
+ top_clk_data->clks[i] = ERR_PTR(-ENOENT);
+ }
+ }
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ top_clk_data);
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ &mt2712_clk_lock, top_clk_data);
+ mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+ &mt2712_clk_lock, top_clk_data);
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ top_clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static int clk_mt2712_infra_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ mtk_register_reset_controller(node, 2, 0x30);
+
+ return r;
+}
+
+static int clk_mt2712_peri_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+
+ mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ mtk_register_reset_controller(node, 2, 0);
+
+ return r;
+}
+
+static int clk_mt2712_mcu_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return PTR_ERR(base);
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+
+ mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+ &mt2712_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r != 0)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt2712[] = {
+ {
+ .compatible = "mediatek,mt2712-apmixedsys",
+ .data = clk_mt2712_apmixed_probe,
+ }, {
+ .compatible = "mediatek,mt2712-topckgen",
+ .data = clk_mt2712_top_probe,
+ }, {
+ .compatible = "mediatek,mt2712-infracfg",
+ .data = clk_mt2712_infra_probe,
+ }, {
+ .compatible = "mediatek,mt2712-pericfg",
+ .data = clk_mt2712_peri_probe,
+ }, {
+ .compatible = "mediatek,mt2712-mcucfg",
+ .data = clk_mt2712_mcu_probe,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt2712_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *);
+ int r;
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ r = clk_probe(pdev);
+ if (r != 0)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt2712_drv = {
+ .probe = clk_mt2712_probe,
+ .driver = {
+ .name = "clk-mt2712",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_clk_mt2712,
+ },
+};
+
+static int __init clk_mt2712_init(void)
+{
+ return platform_driver_register(&clk_mt2712_drv);
+}
+
+arch_initcall(clk_mt2712_init);
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
new file mode 100644
index 000000000000..fad7d9fc53ba
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_AUDIO3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &audio3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs audio2_cg_regs = {
+ .set_ofs = 0x14,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs audio3_cg_regs = {
+ .set_ofs = 0x634,
+ .clr_ofs = 0x634,
+ .sta_ofs = 0x634,
+};
+
+static const struct mtk_gate audio_clks[] = {
+ /* AUDIO0 */
+ GATE_AUDIO0(CLK_AUDIO_AFE, "audio_afe", "rtc", 2),
+ GATE_AUDIO0(CLK_AUDIO_HDMI, "audio_hdmi", "apll1_ck_sel", 20),
+ GATE_AUDIO0(CLK_AUDIO_SPDF, "audio_spdf", "apll1_ck_sel", 21),
+ GATE_AUDIO0(CLK_AUDIO_APLL, "audio_apll", "apll1_ck_sel", 23),
+ /* AUDIO1 */
+ GATE_AUDIO1(CLK_AUDIO_I2SIN1, "audio_i2sin1", "a1sys_hp_sel", 0),
+ GATE_AUDIO1(CLK_AUDIO_I2SIN2, "audio_i2sin2", "a1sys_hp_sel", 1),
+ GATE_AUDIO1(CLK_AUDIO_I2SIN3, "audio_i2sin3", "a1sys_hp_sel", 2),
+ GATE_AUDIO1(CLK_AUDIO_I2SIN4, "audio_i2sin4", "a1sys_hp_sel", 3),
+ GATE_AUDIO1(CLK_AUDIO_I2SO1, "audio_i2so1", "a1sys_hp_sel", 6),
+ GATE_AUDIO1(CLK_AUDIO_I2SO2, "audio_i2so2", "a1sys_hp_sel", 7),
+ GATE_AUDIO1(CLK_AUDIO_I2SO3, "audio_i2so3", "a1sys_hp_sel", 8),
+ GATE_AUDIO1(CLK_AUDIO_I2SO4, "audio_i2so4", "a1sys_hp_sel", 9),
+ GATE_AUDIO1(CLK_AUDIO_ASRCI1, "audio_asrci1", "asm_h_sel", 12),
+ GATE_AUDIO1(CLK_AUDIO_ASRCI2, "audio_asrci2", "asm_h_sel", 13),
+ GATE_AUDIO1(CLK_AUDIO_ASRCO1, "audio_asrco1", "asm_h_sel", 14),
+ GATE_AUDIO1(CLK_AUDIO_ASRCO2, "audio_asrco2", "asm_h_sel", 15),
+ GATE_AUDIO1(CLK_AUDIO_INTDIR, "audio_intdir", "intdir_sel", 20),
+ GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
+ GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
+ /* AUDIO2 */
+ GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
+ GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
+ GATE_AUDIO2(CLK_AUDIO_UL3, "audio_ul3", "a1sys_hp_sel", 2),
+ GATE_AUDIO2(CLK_AUDIO_UL4, "audio_ul4", "a1sys_hp_sel", 3),
+ GATE_AUDIO2(CLK_AUDIO_UL5, "audio_ul5", "a1sys_hp_sel", 4),
+ GATE_AUDIO2(CLK_AUDIO_UL6, "audio_ul6", "a1sys_hp_sel", 5),
+ GATE_AUDIO2(CLK_AUDIO_DL1, "audio_dl1", "a1sys_hp_sel", 6),
+ GATE_AUDIO2(CLK_AUDIO_DL2, "audio_dl2", "a1sys_hp_sel", 7),
+ GATE_AUDIO2(CLK_AUDIO_DL3, "audio_dl3", "a1sys_hp_sel", 8),
+ GATE_AUDIO2(CLK_AUDIO_DL4, "audio_dl4", "a1sys_hp_sel", 9),
+ GATE_AUDIO2(CLK_AUDIO_DL5, "audio_dl5", "a1sys_hp_sel", 10),
+ GATE_AUDIO2(CLK_AUDIO_DL6, "audio_dl6", "a1sys_hp_sel", 11),
+ GATE_AUDIO2(CLK_AUDIO_DLMCH, "audio_dlmch", "a1sys_hp_sel", 12),
+ GATE_AUDIO2(CLK_AUDIO_ARB1, "audio_arb1", "a1sys_hp_sel", 13),
+ GATE_AUDIO2(CLK_AUDIO_AWB, "audio_awb", "a1sys_hp_sel", 14),
+ GATE_AUDIO2(CLK_AUDIO_AWB2, "audio_awb2", "a1sys_hp_sel", 15),
+ GATE_AUDIO2(CLK_AUDIO_DAI, "audio_dai", "a1sys_hp_sel", 16),
+ GATE_AUDIO2(CLK_AUDIO_MOD, "audio_mod", "a1sys_hp_sel", 17),
+ /* AUDIO3 */
+ GATE_AUDIO3(CLK_AUDIO_ASRCI3, "audio_asrci3", "asm_h_sel", 2),
+ GATE_AUDIO3(CLK_AUDIO_ASRCI4, "audio_asrci4", "asm_h_sel", 3),
+ GATE_AUDIO3(CLK_AUDIO_ASRCO3, "audio_asrco3", "asm_h_sel", 6),
+ GATE_AUDIO3(CLK_AUDIO_ASRCO4, "audio_asrco4", "asm_h_sel", 7),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC1, "audio_mem_asrc1", "asm_h_sel", 10),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC2, "audio_mem_asrc2", "asm_h_sel", 11),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC3, "audio_mem_asrc3", "asm_h_sel", 12),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC4, "audio_mem_asrc4", "asm_h_sel", 13),
+ GATE_AUDIO3(CLK_AUDIO_MEM_ASRC5, "audio_mem_asrc5", "asm_h_sel", 14),
+};
+
+static int clk_mt7622_audiosys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+
+ mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7622_aud[] = {
+ {
+ .compatible = "mediatek,mt7622-audsys",
+ .data = clk_mt7622_audiosys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7622_aud_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7622_aud_drv = {
+ .probe = clk_mt7622_aud_probe,
+ .driver = {
+ .name = "clk-mt7622-aud",
+ .of_match_table = of_match_clk_mt7622_aud,
+ },
+};
+
+builtin_platform_driver(clk_mt7622_aud_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
new file mode 100644
index 000000000000..6328127bbb3c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+
+#define GATE_ETH(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &eth_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate_regs eth_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate eth_clks[] = {
+ GATE_ETH(CLK_ETH_HSDMA_EN, "eth_hsdma_en", "eth_sel", 5),
+ GATE_ETH(CLK_ETH_ESW_EN, "eth_esw_en", "eth_500m", 6),
+ GATE_ETH(CLK_ETH_GP2_EN, "eth_gp2_en", "txclk_src_pre", 7),
+ GATE_ETH(CLK_ETH_GP1_EN, "eth_gp1_en", "txclk_src_pre", 8),
+ GATE_ETH(CLK_ETH_GP0_EN, "eth_gp0_en", "txclk_src_pre", 9),
+};
+
+static const struct mtk_gate_regs sgmii_cg_regs = {
+ .set_ofs = 0xE4,
+ .clr_ofs = 0xE4,
+ .sta_ofs = 0xE4,
+};
+
+#define GATE_SGMII(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &sgmii_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii_clks[] = {
+ GATE_SGMII(CLK_SGMII_TX250M_EN, "sgmii_tx250m_en",
+ "ssusb_tx250m", 2),
+ GATE_SGMII(CLK_SGMII_RX250M_EN, "sgmii_rx250m_en",
+ "ssusb_eq_rx250m", 3),
+ GATE_SGMII(CLK_SGMII_CDR_REF, "sgmii_cdr_ref",
+ "ssusb_cdr_ref", 4),
+ GATE_SGMII(CLK_SGMII_CDR_FB, "sgmii_cdr_fb",
+ "ssusb_cdr_fb", 5),
+};
+
+static int clk_mt7622_ethsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
+
+ mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return r;
+}
+
+static int clk_mt7622_sgmiisys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
+
+ mtk_clk_register_gates(node, sgmii_clks, ARRAY_SIZE(sgmii_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7622_eth[] = {
+ {
+ .compatible = "mediatek,mt7622-ethsys",
+ .data = clk_mt7622_ethsys_init,
+ }, {
+ .compatible = "mediatek,mt7622-sgmiisys",
+ .data = clk_mt7622_sgmiisys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7622_eth_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7622_eth_drv = {
+ .probe = clk_mt7622_eth_probe,
+ .driver = {
+ .name = "clk-mt7622-eth",
+ .of_match_table = of_match_clk_mt7622_eth,
+ },
+};
+
+builtin_platform_driver(clk_mt7622_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
new file mode 100644
index 000000000000..a6e8534276c6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+
+#define GATE_PCIE(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &pcie_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+#define GATE_SSUSB(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ssusb_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate_regs pcie_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate_regs ssusb_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate ssusb_clks[] = {
+ GATE_SSUSB(CLK_SSUSB_U2_PHY_1P_EN, "ssusb_u2_phy_1p",
+ "to_u2_phy_1p", 0),
+ GATE_SSUSB(CLK_SSUSB_U2_PHY_EN, "ssusb_u2_phy_en", "to_u2_phy", 1),
+ GATE_SSUSB(CLK_SSUSB_REF_EN, "ssusb_ref_en", "to_usb3_ref", 5),
+ GATE_SSUSB(CLK_SSUSB_SYS_EN, "ssusb_sys_en", "to_usb3_sys", 6),
+ GATE_SSUSB(CLK_SSUSB_MCU_EN, "ssusb_mcu_en", "axi_sel", 7),
+ GATE_SSUSB(CLK_SSUSB_DMA_EN, "ssusb_dma_en", "hif_sel", 8),
+};
+
+static const struct mtk_gate pcie_clks[] = {
+ GATE_PCIE(CLK_PCIE_P1_AUX_EN, "pcie_p1_aux_en", "p1_1mhz", 12),
+ GATE_PCIE(CLK_PCIE_P1_OBFF_EN, "pcie_p1_obff_en", "free_run_4mhz", 13),
+ GATE_PCIE(CLK_PCIE_P1_AHB_EN, "pcie_p1_ahb_en", "axi_sel", 14),
+ GATE_PCIE(CLK_PCIE_P1_AXI_EN, "pcie_p1_axi_en", "hif_sel", 15),
+ GATE_PCIE(CLK_PCIE_P1_MAC_EN, "pcie_p1_mac_en", "pcie1_mac_en", 16),
+ GATE_PCIE(CLK_PCIE_P1_PIPE_EN, "pcie_p1_pipe_en", "pcie1_pipe_en", 17),
+ GATE_PCIE(CLK_PCIE_P0_AUX_EN, "pcie_p0_aux_en", "p0_1mhz", 18),
+ GATE_PCIE(CLK_PCIE_P0_OBFF_EN, "pcie_p0_obff_en", "free_run_4mhz", 19),
+ GATE_PCIE(CLK_PCIE_P0_AHB_EN, "pcie_p0_ahb_en", "axi_sel", 20),
+ GATE_PCIE(CLK_PCIE_P0_AXI_EN, "pcie_p0_axi_en", "hif_sel", 21),
+ GATE_PCIE(CLK_PCIE_P0_MAC_EN, "pcie_p0_mac_en", "pcie0_mac_en", 22),
+ GATE_PCIE(CLK_PCIE_P0_PIPE_EN, "pcie_p0_pipe_en", "pcie0_pipe_en", 23),
+ GATE_PCIE(CLK_SATA_AHB_EN, "sata_ahb_en", "axi_sel", 26),
+ GATE_PCIE(CLK_SATA_AXI_EN, "sata_axi_en", "hif_sel", 27),
+ GATE_PCIE(CLK_SATA_ASIC_EN, "sata_asic_en", "sata_asic", 28),
+ GATE_PCIE(CLK_SATA_RBC_EN, "sata_rbc_en", "sata_rbc", 29),
+ GATE_PCIE(CLK_SATA_PM_EN, "sata_pm_en", "univpll2_d4", 30),
+};
+
+static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
+
+ mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return r;
+}
+
+static int clk_mt7622_pciesys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
+
+ mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7622_hif[] = {
+ {
+ .compatible = "mediatek,mt7622-pciesys",
+ .data = clk_mt7622_pciesys_init,
+ }, {
+ .compatible = "mediatek,mt7622-ssusbsys",
+ .data = clk_mt7622_ssusbsys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7622_hif_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7622_hif_drv = {
+ .probe = clk_mt7622_hif_probe,
+ .driver = {
+ .name = "clk-mt7622-hif",
+ .of_match_table = of_match_clk_mt7622_hif,
+ },
+};
+
+builtin_platform_driver(clk_mt7622_hif_drv);
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
new file mode 100644
index 000000000000..92f7e32770c6
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -0,0 +1,780 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chen Zhong <chen.zhong@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-cpumux.h"
+
+#include <dt-bindings/clock/mt7622-clk.h>
+#include <linux/clk.h> /* for consumer */
+
+#define MT7622_PLL_FMAX (2500UL * MHZ)
+#define CON0_MT7622_RST_BAR BIT(27)
+
+#define PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table, _parent_name) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT7622_RST_BAR, \
+ .fmax = MT7622_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ .parent_name = _parent_name, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,\
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL, "clkxtal")
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &apmixed_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+#define GATE_INFRA(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_TOP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+#define GATE_PERI0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static DEFINE_SPINLOCK(mt7622_clk_lock);
+
+static const char * const infra_mux1_parents[] = {
+ "clkxtal",
+ "armpll",
+ "main_core_en",
+ "armpll"
+};
+
+static const char * const axi_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "univpll_d7"
+};
+
+static const char * const mem_parents[] = {
+ "clkxtal",
+ "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clkxtal",
+ "syspll1_d8"
+};
+
+static const char * const eth_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "univpll1_d2",
+ "syspll1_d4",
+ "univpll_d5",
+ "clk_null",
+ "univpll_d7"
+};
+
+static const char * const pwm_parents[] = {
+ "clkxtal",
+ "univpll2_d4"
+};
+
+static const char * const f10m_ref_parents[] = {
+ "clkxtal",
+ "syspll4_d16"
+};
+
+static const char * const nfi_infra_parents[] = {
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "univpll2_d8",
+ "syspll1_d8",
+ "univpll1_d8",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll3_d2",
+ "syspll1_d4"
+};
+
+static const char * const flash_parents[] = {
+ "clkxtal",
+ "univpll_d80_d4",
+ "syspll2_d8",
+ "syspll3_d4",
+ "univpll3_d4",
+ "univpll1_d8",
+ "syspll2_d4",
+ "univpll2_d4"
+};
+
+static const char * const uart_parents[] = {
+ "clkxtal",
+ "univpll2_d8"
+};
+
+static const char * const spi0_parents[] = {
+ "clkxtal",
+ "syspll3_d2",
+ "clkxtal",
+ "syspll2_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8",
+ "clkxtal"
+};
+
+static const char * const spi1_parents[] = {
+ "clkxtal",
+ "syspll3_d2",
+ "clkxtal",
+ "syspll4_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8",
+ "clkxtal"
+};
+
+static const char * const msdc30_0_parents[] = {
+ "clkxtal",
+ "univpll2_d16",
+ "univ48m"
+};
+
+static const char * const a1sys_hp_parents[] = {
+ "clkxtal",
+ "aud1pll_ck",
+ "aud2pll_ck",
+ "clkxtal"
+};
+
+static const char * const intdir_parents[] = {
+ "clkxtal",
+ "syspll_d2",
+ "univpll_d2",
+ "sgmiipll_ck"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clkxtal",
+ "syspll1_d4",
+ "syspll4_d2",
+ "syspll3_d2"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clkxtal",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "univpll2_d16"
+};
+
+static const char * const atb_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "syspll_d5"
+};
+
+static const char * const audio_parents[] = {
+ "clkxtal",
+ "syspll3_d4",
+ "syspll4_d4",
+ "univpll1_d16"
+};
+
+static const char * const usb20_parents[] = {
+ "clkxtal",
+ "univpll3_d4",
+ "syspll1_d8",
+ "clkxtal"
+};
+
+static const char * const aud1_parents[] = {
+ "clkxtal",
+ "aud1pll_ck"
+};
+
+static const char * const aud2_parents[] = {
+ "clkxtal",
+ "aud2pll_ck"
+};
+
+static const char * const asm_l_parents[] = {
+ "clkxtal",
+ "syspll_d5",
+ "univpll2_d2",
+ "univpll2_d4"
+};
+
+static const char * const apll1_ck_parents[] = {
+ "aud1_sel",
+ "aud2_sel"
+};
+
+static const char * const peribus_ck_parents[] = {
+ "syspll1_d8",
+ "syspll1_d4"
+};
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x8,
+};
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x48,
+};
+
+static const struct mtk_gate_regs top0_cg_regs = {
+ .set_ofs = 0x120,
+ .clr_ofs = 0x120,
+ .sta_ofs = 0x120,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x128,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x128,
+};
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+ .set_ofs = 0xC,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x1C,
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001,
+ PLL_AO, 21, 0x0204, 24, 0, 0x0204, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0x00000001,
+ HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0x00000001,
+ HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14),
+ PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0x00000001,
+ 0, 21, 0x0300, 1, 0, 0x0304, 0),
+ PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0x00000001,
+ 0, 21, 0x0314, 1, 0, 0x0318, 0),
+ PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x0324, 0x0330, 0x00000001,
+ 0, 31, 0x0324, 1, 0, 0x0328, 0),
+ PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x0334, 0x0340, 0x00000001,
+ 0, 31, 0x0334, 1, 0, 0x0338, 0),
+ PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x0344, 0x0354, 0x00000001,
+ 0, 21, 0x0344, 1, 0, 0x0348, 0),
+ PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0x00000001,
+ 0, 21, 0x0358, 1, 0, 0x035C, 0),
+};
+
+static const struct mtk_gate apmixed_clks[] = {
+ GATE_APMIXED(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
+};
+
+static const struct mtk_gate infra_clks[] = {
+ GATE_INFRA(CLK_INFRA_DBGCLK_PD, "infra_dbgclk_pd", "axi_sel", 0),
+ GATE_INFRA(CLK_INFRA_TRNG, "trng_ck", "axi_sel", 2),
+ GATE_INFRA(CLK_INFRA_AUDIO_PD, "infra_audio_pd", "aud_intbus_sel", 5),
+ GATE_INFRA(CLK_INFRA_IRRX_PD, "infra_irrx_pd", "irrx_sel", 16),
+ GATE_INFRA(CLK_INFRA_APXGPT_PD, "infra_apxgpt_pd", "f10m_ref_sel", 18),
+ GATE_INFRA(CLK_INFRA_PMIC_PD, "infra_pmic_pd", "pmicspi_sel", 22),
+};
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_TO_U2_PHY, "to_u2_phy", "clkxtal",
+ 31250000),
+ FIXED_CLK(CLK_TOP_TO_U2_PHY_1P, "to_u2_phy_1p", "clkxtal",
+ 31250000),
+ FIXED_CLK(CLK_TOP_PCIE0_PIPE_EN, "pcie0_pipe_en", "clkxtal",
+ 125000000),
+ FIXED_CLK(CLK_TOP_PCIE1_PIPE_EN, "pcie1_pipe_en", "clkxtal",
+ 125000000),
+ FIXED_CLK(CLK_TOP_SSUSB_TX250M, "ssusb_tx250m", "clkxtal",
+ 250000000),
+ FIXED_CLK(CLK_TOP_SSUSB_EQ_RX250M, "ssusb_eq_rx250m", "clkxtal",
+ 250000000),
+ FIXED_CLK(CLK_TOP_SSUSB_CDR_REF, "ssusb_cdr_ref", "clkxtal",
+ 33333333),
+ FIXED_CLK(CLK_TOP_SSUSB_CDR_FB, "ssusb_cdr_fb", "clkxtal",
+ 50000000),
+ FIXED_CLK(CLK_TOP_SATA_ASIC, "sata_asic", "clkxtal",
+ 50000000),
+ FIXED_CLK(CLK_TOP_SATA_RBC, "sata_rbc", "clkxtal",
+ 50000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_TO_USB3_SYS, "to_usb3_sys", "eth1pll", 1, 4),
+ FACTOR(CLK_TOP_P1_1MHZ, "p1_1mhz", "eth1pll", 1, 500),
+ FACTOR(CLK_TOP_4MHZ, "free_run_4mhz", "eth1pll", 1, 125),
+ FACTOR(CLK_TOP_P0_1MHZ, "p0_1mhz", "eth1pll", 1, 500),
+ FACTOR(CLK_TOP_TXCLK_SRC_PRE, "txclk_src_pre", "sgmiipll_d2", 1, 1),
+ FACTOR(CLK_TOP_RTC, "rtc", "clkxtal", 1, 1024),
+ FACTOR(CLK_TOP_MEMPLL, "mempll", "clkxtal", 32, 1),
+ FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "mainpll", 1, 24),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_SYSPLL4_D16, "syspll4_d16", "mainpll", 1, 112),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL1_D16, "univpll1_d16", "univpll", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL2_D16, "univpll2_d16", "univpll", 1, 48),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_UNIVPLL3_D16, "univpll3_d16", "univpll", 1, 80),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_D80_D4, "univpll_d80_d4", "univpll", 1, 320),
+ FACTOR(CLK_TOP_UNIV48M, "univ48m", "univpll", 1, 25),
+ FACTOR(CLK_TOP_SGMIIPLL, "sgmiipll_ck", "sgmipll", 1, 1),
+ FACTOR(CLK_TOP_SGMIIPLL_D2, "sgmiipll_d2", "sgmipll", 1, 2),
+ FACTOR(CLK_TOP_AUD1PLL, "aud1pll_ck", "aud1pll", 1, 1),
+ FACTOR(CLK_TOP_AUD2PLL, "aud2pll_ck", "aud2pll", 1, 1),
+ FACTOR(CLK_TOP_AUD_I2S2_MCK, "aud_i2s2_mck", "i2s2_mck_sel", 1, 2),
+ FACTOR(CLK_TOP_TO_USB3_REF, "to_usb3_ref", "univpll2_d4", 1, 4),
+ FACTOR(CLK_TOP_PCIE1_MAC_EN, "pcie1_mac_en", "univpll1_d4", 1, 1),
+ FACTOR(CLK_TOP_PCIE0_MAC_EN, "pcie0_mac_en", "univpll1_d4", 1, 1),
+ FACTOR(CLK_TOP_ETH_500M, "eth_500m", "eth1pll", 1, 1),
+};
+
+static const struct mtk_gate top_clks[] = {
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_APLL1_DIV_PD, "apll1_ck_div_pd", "apll1_ck_div", 0),
+ GATE_TOP0(CLK_TOP_APLL2_DIV_PD, "apll2_ck_div_pd", "apll2_ck_div", 1),
+ GATE_TOP0(CLK_TOP_I2S0_MCK_DIV_PD, "i2s0_mck_div_pd", "i2s0_mck_div",
+ 2),
+ GATE_TOP0(CLK_TOP_I2S1_MCK_DIV_PD, "i2s1_mck_div_pd", "i2s1_mck_div",
+ 3),
+ GATE_TOP0(CLK_TOP_I2S2_MCK_DIV_PD, "i2s2_mck_div_pd", "i2s2_mck_div",
+ 4),
+ GATE_TOP0(CLK_TOP_I2S3_MCK_DIV_PD, "i2s3_mck_div_pd", "i2s3_mck_div",
+ 5),
+
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_A1SYS_HP_DIV_PD, "a1sys_div_pd", "a1sys_div", 0),
+ GATE_TOP1(CLK_TOP_A2SYS_HP_DIV_PD, "a2sys_div_pd", "a2sys_div", 16),
+};
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ(CLK_TOP_APLL1_DIV, "apll1_ck_div", "apll1_ck_sel",
+ 0x120, 24, 3),
+ DIV_ADJ(CLK_TOP_APLL2_DIV, "apll2_ck_div", "apll2_ck_sel",
+ 0x120, 28, 3),
+ DIV_ADJ(CLK_TOP_I2S0_MCK_DIV, "i2s0_mck_div", "i2s0_mck_sel",
+ 0x124, 0, 7),
+ DIV_ADJ(CLK_TOP_I2S1_MCK_DIV, "i2s1_mck_div", "i2s1_mck_sel",
+ 0x124, 8, 7),
+ DIV_ADJ(CLK_TOP_I2S2_MCK_DIV, "i2s2_mck_div", "aud_i2s2_mck",
+ 0x124, 16, 7),
+ DIV_ADJ(CLK_TOP_I2S3_MCK_DIV, "i2s3_mck_div", "i2s3_mck_sel",
+ 0x124, 24, 7),
+ DIV_ADJ(CLK_TOP_A1SYS_HP_DIV, "a1sys_div", "a1sys_hp_sel",
+ 0x128, 8, 7),
+ DIV_ADJ(CLK_TOP_A2SYS_HP_DIV, "a2sys_div", "a2sys_hp_sel",
+ 0x128, 24, 7),
+};
+
+static const struct mtk_gate peri_clks[] = {
+ /* PERI0 */
+ GATE_PERI0(CLK_PERI_THERM_PD, "peri_therm_pd", "axi_sel", 1),
+ GATE_PERI0(CLK_PERI_PWM1_PD, "peri_pwm1_pd", "clkxtal", 2),
+ GATE_PERI0(CLK_PERI_PWM2_PD, "peri_pwm2_pd", "clkxtal", 3),
+ GATE_PERI0(CLK_PERI_PWM3_PD, "peri_pwm3_pd", "clkxtal", 4),
+ GATE_PERI0(CLK_PERI_PWM4_PD, "peri_pwm4_pd", "clkxtal", 5),
+ GATE_PERI0(CLK_PERI_PWM5_PD, "peri_pwm5_pd", "clkxtal", 6),
+ GATE_PERI0(CLK_PERI_PWM6_PD, "peri_pwm6_pd", "clkxtal", 7),
+ GATE_PERI0(CLK_PERI_PWM7_PD, "peri_pwm7_pd", "clkxtal", 8),
+ GATE_PERI0(CLK_PERI_PWM_PD, "peri_pwm_pd", "clkxtal", 9),
+ GATE_PERI0(CLK_PERI_AP_DMA_PD, "peri_ap_dma_pd", "axi_sel", 12),
+ GATE_PERI0(CLK_PERI_MSDC30_0_PD, "peri_msdc30_0", "msdc30_0_sel", 13),
+ GATE_PERI0(CLK_PERI_MSDC30_1_PD, "peri_msdc30_1", "msdc30_1_sel", 14),
+ GATE_PERI0(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
+ GATE_PERI0(CLK_PERI_UART1_PD, "peri_uart1_pd", "axi_sel", 18),
+ GATE_PERI0(CLK_PERI_UART2_PD, "peri_uart2_pd", "axi_sel", 19),
+ GATE_PERI0(CLK_PERI_UART3_PD, "peri_uart3_pd", "axi_sel", 20),
+ GATE_PERI0(CLK_PERI_UART4_PD, "peri_uart4_pd", "axi_sel", 21),
+ GATE_PERI0(CLK_PERI_BTIF_PD, "peri_btif_pd", "axi_sel", 22),
+ GATE_PERI0(CLK_PERI_I2C0_PD, "peri_i2c0_pd", "axi_sel", 23),
+ GATE_PERI0(CLK_PERI_I2C1_PD, "peri_i2c1_pd", "axi_sel", 24),
+ GATE_PERI0(CLK_PERI_I2C2_PD, "peri_i2c2_pd", "axi_sel", 25),
+ GATE_PERI0(CLK_PERI_SPI1_PD, "peri_spi1_pd", "spi1_sel", 26),
+ GATE_PERI0(CLK_PERI_AUXADC_PD, "peri_auxadc_pd", "clkxtal", 27),
+ GATE_PERI0(CLK_PERI_SPI0_PD, "peri_spi0_pd", "spi0_sel", 28),
+ GATE_PERI0(CLK_PERI_SNFI_PD, "peri_snfi_pd", "nfi_infra_sel", 29),
+ GATE_PERI0(CLK_PERI_NFI_PD, "peri_nfi_pd", "axi_sel", 30),
+ GATE_PERI0(CLK_PERI_NFIECC_PD, "peri_nfiecc_pd", "axi_sel", 31),
+
+ /* PERI1 */
+ GATE_PERI1(CLK_PERI_FLASH_PD, "peri_flash_pd", "flash_sel", 1),
+ GATE_PERI1(CLK_PERI_IRTX_PD, "peri_irtx_pd", "irtx_sel", 2),
+};
+
+static struct mtk_composite infra_muxes[] __initdata = {
+ MUX(CLK_INFRA_MUX1_SEL, "infra_mux1_sel", infra_mux1_parents,
+ 0x000, 2, 2),
+};
+
+static struct mtk_composite top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x040, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ 0x040, 8, 1, 15),
+ MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x040, 16, 1, 23),
+ MUX_GATE(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+ 0x040, 24, 3, 31),
+
+ /* CLK_CFG_1 */
+ MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+ 0x050, 0, 2, 7),
+ MUX_GATE(CLK_TOP_F10M_REF_SEL, "f10m_ref_sel", f10m_ref_parents,
+ 0x050, 8, 1, 15),
+ MUX_GATE(CLK_TOP_NFI_INFRA_SEL, "nfi_infra_sel", nfi_infra_parents,
+ 0x050, 16, 4, 23),
+ MUX_GATE(CLK_TOP_FLASH_SEL, "flash_sel", flash_parents,
+ 0x050, 24, 3, 31),
+
+ /* CLK_CFG_2 */
+ MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents,
+ 0x060, 0, 1, 7),
+ MUX_GATE(CLK_TOP_SPI0_SEL, "spi0_sel", spi0_parents,
+ 0x060, 8, 3, 15),
+ MUX_GATE(CLK_TOP_SPI1_SEL, "spi1_sel", spi1_parents,
+ 0x060, 16, 3, 23),
+ MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", uart_parents,
+ 0x060, 24, 3, 31),
+
+ /* CLK_CFG_3 */
+ MUX_GATE(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_0_parents,
+ 0x070, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_0_parents,
+ 0x070, 8, 3, 15),
+ MUX_GATE(CLK_TOP_A1SYS_HP_SEL, "a1sys_hp_sel", a1sys_hp_parents,
+ 0x070, 16, 2, 23),
+ MUX_GATE(CLK_TOP_A2SYS_HP_SEL, "a2sys_hp_sel", a1sys_hp_parents,
+ 0x070, 24, 2, 31),
+
+ /* CLK_CFG_4 */
+ MUX_GATE(CLK_TOP_INTDIR_SEL, "intdir_sel", intdir_parents,
+ 0x080, 0, 2, 7),
+ MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x080, 8, 2, 15),
+ MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+ 0x080, 16, 3, 23),
+ MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", ddrphycfg_parents,
+ 0x080, 24, 2, 31),
+
+ /* CLK_CFG_5 */
+ MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents,
+ 0x090, 0, 2, 7),
+ MUX_GATE(CLK_TOP_HIF_SEL, "hif_sel", eth_parents,
+ 0x090, 8, 3, 15),
+ MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+ 0x090, 16, 2, 23),
+ MUX_GATE(CLK_TOP_U2_SEL, "usb20_sel", usb20_parents,
+ 0x090, 24, 2, 31),
+
+ /* CLK_CFG_6 */
+ MUX_GATE(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents,
+ 0x0A0, 0, 1, 7),
+ MUX_GATE(CLK_TOP_AUD2_SEL, "aud2_sel", aud2_parents,
+ 0x0A0, 8, 1, 15),
+ MUX_GATE(CLK_TOP_IRRX_SEL, "irrx_sel", f10m_ref_parents,
+ 0x0A0, 16, 1, 23),
+ MUX_GATE(CLK_TOP_IRTX_SEL, "irtx_sel", f10m_ref_parents,
+ 0x0A0, 24, 1, 31),
+
+ /* CLK_CFG_7 */
+ MUX_GATE(CLK_TOP_ASM_L_SEL, "asm_l_sel", asm_l_parents,
+ 0x0B0, 0, 2, 7),
+ MUX_GATE(CLK_TOP_ASM_M_SEL, "asm_m_sel", asm_l_parents,
+ 0x0B0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_ASM_H_SEL, "asm_h_sel", asm_l_parents,
+ 0x0B0, 16, 2, 23),
+
+ /* CLK_AUDDIV_0 */
+ MUX(CLK_TOP_APLL1_SEL, "apll1_ck_sel", apll1_ck_parents,
+ 0x120, 6, 1),
+ MUX(CLK_TOP_APLL2_SEL, "apll2_ck_sel", apll1_ck_parents,
+ 0x120, 7, 1),
+ MUX(CLK_TOP_I2S0_MCK_SEL, "i2s0_mck_sel", apll1_ck_parents,
+ 0x120, 8, 1),
+ MUX(CLK_TOP_I2S1_MCK_SEL, "i2s1_mck_sel", apll1_ck_parents,
+ 0x120, 9, 1),
+ MUX(CLK_TOP_I2S2_MCK_SEL, "i2s2_mck_sel", apll1_ck_parents,
+ 0x120, 10, 1),
+ MUX(CLK_TOP_I2S3_MCK_SEL, "i2s3_mck_sel", apll1_ck_parents,
+ 0x120, 11, 1),
+};
+
+static struct mtk_composite peri_muxes[] = {
+ /* PERI_GLOBALCON_CKSEL */
+ MUX(CLK_PERIBUS_SEL, "peribus_ck_sel", peribus_ck_parents, 0x05C, 0, 1),
+};
+
+static int mtk_topckgen_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ clk_data);
+
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
+ base, &mt7622_clk_lock, clk_data);
+
+ mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ base, &mt7622_clk_lock, clk_data);
+
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_TOP_AXI_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_MEM_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int __init mtk_infrasys_init(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+
+ mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get,
+ clk_data);
+ if (r)
+ return r;
+
+ mtk_register_reset_controller(node, 1, 0x30);
+
+ return 0;
+}
+
+static int mtk_apmixedsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
+ clk_data);
+
+ mtk_clk_register_gates(node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]);
+ clk_prepare_enable(clk_data->clks[CLK_APMIXED_MAIN_CORE_EN]);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int mtk_pericfg_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+
+ mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ clk_data);
+
+ mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
+ &mt7622_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ return r;
+
+ clk_prepare_enable(clk_data->clks[CLK_PERI_UART0_PD]);
+
+ mtk_register_reset_controller(node, 2, 0x0);
+
+ return 0;
+}
+
+static const struct of_device_id of_match_clk_mt7622[] = {
+ {
+ .compatible = "mediatek,mt7622-apmixedsys",
+ .data = mtk_apmixedsys_init,
+ }, {
+ .compatible = "mediatek,mt7622-infracfg",
+ .data = mtk_infrasys_init,
+ }, {
+ .compatible = "mediatek,mt7622-topckgen",
+ .data = mtk_topckgen_init,
+ }, {
+ .compatible = "mediatek,mt7622-pericfg",
+ .data = mtk_pericfg_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7622_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7622_drv = {
+ .probe = clk_mt7622_probe,
+ .driver = {
+ .name = "clk-mt7622",
+ .of_match_table = of_match_clk_mt7622,
+ },
+};
+
+static int clk_mt7622_init(void)
+{
+ return platform_driver_register(&clk_mt7622_drv);
+}
+
+arch_initcall(clk_mt7622_init);
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index f5d6b70ce189..f10250dcece4 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -207,6 +207,8 @@ struct mtk_pll_data {
uint32_t en_mask;
uint32_t pd_reg;
uint32_t tuner_reg;
+ uint32_t tuner_en_reg;
+ uint8_t tuner_en_bit;
int pd_shift;
unsigned int flags;
const struct clk_ops *ops;
@@ -216,6 +218,7 @@ struct mtk_pll_data {
uint32_t pcw_reg;
int pcw_shift;
const struct mtk_pll_div_table *div_table;
+ const char *parent_name;
};
void mtk_clk_register_plls(struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index a409142e9346..f54e4015b0b1 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -47,6 +47,7 @@ struct mtk_clk_pll {
void __iomem *pd_addr;
void __iomem *pwr_addr;
void __iomem *tuner_addr;
+ void __iomem *tuner_en_addr;
void __iomem *pcw_addr;
const struct mtk_pll_data *data;
};
@@ -227,7 +228,10 @@ static int mtk_pll_prepare(struct clk_hw *hw)
r |= pll->data->en_mask;
writel(r, pll->base_addr + REG_CON0);
- if (pll->tuner_addr) {
+ if (pll->tuner_en_addr) {
+ r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
+ writel(r, pll->tuner_en_addr);
+ } else if (pll->tuner_addr) {
r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
writel(r, pll->tuner_addr);
}
@@ -254,7 +258,10 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
writel(r, pll->base_addr + REG_CON0);
}
- if (pll->tuner_addr) {
+ if (pll->tuner_en_addr) {
+ r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
+ writel(r, pll->tuner_en_addr);
+ } else if (pll->tuner_addr) {
r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
writel(r, pll->tuner_addr);
}
@@ -297,13 +304,18 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
pll->pcw_addr = base + data->pcw_reg;
if (data->tuner_reg)
pll->tuner_addr = base + data->tuner_reg;
+ if (data->tuner_en_reg)
+ pll->tuner_en_addr = base + data->tuner_en_reg;
pll->hw.init = &init;
pll->data = data;
init.name = data->name;
init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
init.ops = &mtk_pll_ops;
- init.parent_names = &parent_name;
+ if (data->parent_name)
+ init.parent_names = &data->parent_name;
+ else
+ init.parent_names = &parent_name;
init.num_parents = 1;
clk = clk_register(NULL, &pll->hw);
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index b2d1e8ed7152..ae385310e980 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -1131,6 +1131,253 @@ static struct clk_gate gxbb_sd_emmc_c_clk0 = {
},
};
+/* VPU Clock */
+
+static u32 mux_table_vpu[] = {0, 1, 2, 3};
+static const char * const gxbb_vpu_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+};
+
+static struct clk_mux gxbb_vpu_0_sel = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .lock = &clk_lock,
+ .table = mux_table_vpu,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bits 9:10 selects from 4 possible parents:
+ * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+ */
+ .parent_names = gxbb_vpu_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_divider gxbb_vpu_0_div = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "vpu_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_vpu_0 = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_0",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vpu_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_mux gxbb_vpu_1_sel = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ .lock = &clk_lock,
+ .table = mux_table_vpu,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bits 25:26 selects from 4 possible parents:
+ * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+ */
+ .parent_names = gxbb_vpu_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_divider gxbb_vpu_1_div = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "vpu_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_vpu_1 = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_1",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vpu_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_mux gxbb_vpu = {
+ .reg = (void *)HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu",
+ .ops = &clk_mux_ops,
+ /*
+ * bit 31 selects from 2 possible parents:
+ * vpu_0 or vpu_1
+ */
+ .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+/* VAPB Clock */
+
+static u32 mux_table_vapb[] = {0, 1, 2, 3};
+static const char * const gxbb_vapb_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+};
+
+static struct clk_mux gxbb_vapb_0_sel = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .lock = &clk_lock,
+ .table = mux_table_vapb,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bits 9:10 selects from 4 possible parents:
+ * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+ */
+ .parent_names = gxbb_vapb_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_divider gxbb_vapb_0_div = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "vapb_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_vapb_0 = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .bit_idx = 8,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_0",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vapb_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_mux gxbb_vapb_1_sel = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ .lock = &clk_lock,
+ .table = mux_table_vapb,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bits 25:26 selects from 4 possible parents:
+ * fclk_div4, fclk_div3, fclk_div5, fclk_div7,
+ */
+ .parent_names = gxbb_vapb_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_divider gxbb_vapb_1_div = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "vapb_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_gate gxbb_vapb_1 = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .bit_idx = 24,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_1",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vapb_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_mux gxbb_vapb_sel = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_sel",
+ .ops = &clk_mux_ops,
+ /*
+ * bit 31 selects from 2 possible parents:
+ * vapb_0 or vapb_1
+ */
+ .parent_names = (const char *[]){ "vapb_0", "vapb_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_gate gxbb_vapb = {
+ .reg = (void *)HHI_VAPBCLK_CNTL,
+ .bit_idx = 30,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "vapb_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
@@ -1349,6 +1596,21 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_SD_EMMC_C_CLK0_SEL] = &gxbb_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK0_DIV] = &gxbb_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C_CLK0] = &gxbb_sd_emmc_c_clk0.hw,
+ [CLKID_VPU_0_SEL] = &gxbb_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &gxbb_vpu_0_div.hw,
+ [CLKID_VPU_0] = &gxbb_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &gxbb_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &gxbb_vpu_1_div.hw,
+ [CLKID_VPU_1] = &gxbb_vpu_1.hw,
+ [CLKID_VPU] = &gxbb_vpu.hw,
+ [CLKID_VAPB_0_SEL] = &gxbb_vapb_0_sel.hw,
+ [CLKID_VAPB_0_DIV] = &gxbb_vapb_0_div.hw,
+ [CLKID_VAPB_0] = &gxbb_vapb_0.hw,
+ [CLKID_VAPB_1_SEL] = &gxbb_vapb_1_sel.hw,
+ [CLKID_VAPB_1_DIV] = &gxbb_vapb_1_div.hw,
+ [CLKID_VAPB_1] = &gxbb_vapb_1.hw,
+ [CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
+ [CLKID_VAPB] = &gxbb_vapb.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1481,6 +1743,21 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_SD_EMMC_C_CLK0_SEL] = &gxbb_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK0_DIV] = &gxbb_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C_CLK0] = &gxbb_sd_emmc_c_clk0.hw,
+ [CLKID_VPU_0_SEL] = &gxbb_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &gxbb_vpu_0_div.hw,
+ [CLKID_VPU_0] = &gxbb_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &gxbb_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &gxbb_vpu_1_div.hw,
+ [CLKID_VPU_1] = &gxbb_vpu_1.hw,
+ [CLKID_VPU] = &gxbb_vpu.hw,
+ [CLKID_VAPB_0_SEL] = &gxbb_vapb_0_sel.hw,
+ [CLKID_VAPB_0_DIV] = &gxbb_vapb_0_div.hw,
+ [CLKID_VAPB_0] = &gxbb_vapb_0.hw,
+ [CLKID_VAPB_1_SEL] = &gxbb_vapb_1_sel.hw,
+ [CLKID_VAPB_1_DIV] = &gxbb_vapb_1_div.hw,
+ [CLKID_VAPB_1] = &gxbb_vapb_1.hw,
+ [CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
+ [CLKID_VAPB] = &gxbb_vapb.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1600,6 +1877,11 @@ static struct clk_gate *const gxbb_clk_gates[] = {
&gxbb_sd_emmc_a_clk0,
&gxbb_sd_emmc_b_clk0,
&gxbb_sd_emmc_c_clk0,
+ &gxbb_vpu_0,
+ &gxbb_vpu_1,
+ &gxbb_vapb_0,
+ &gxbb_vapb_1,
+ &gxbb_vapb,
};
static struct clk_mux *const gxbb_clk_muxes[] = {
@@ -1615,6 +1897,12 @@ static struct clk_mux *const gxbb_clk_muxes[] = {
&gxbb_sd_emmc_a_clk0_sel,
&gxbb_sd_emmc_b_clk0_sel,
&gxbb_sd_emmc_c_clk0_sel,
+ &gxbb_vpu_0_sel,
+ &gxbb_vpu_1_sel,
+ &gxbb_vpu,
+ &gxbb_vapb_0_sel,
+ &gxbb_vapb_1_sel,
+ &gxbb_vapb_sel,
};
static struct clk_divider *const gxbb_clk_dividers[] = {
@@ -1627,6 +1915,10 @@ static struct clk_divider *const gxbb_clk_dividers[] = {
&gxbb_sd_emmc_a_clk0_div,
&gxbb_sd_emmc_b_clk0_div,
&gxbb_sd_emmc_c_clk0_div,
+ &gxbb_vpu_0_div,
+ &gxbb_vpu_1_div,
+ &gxbb_vapb_0_div,
+ &gxbb_vapb_1_div,
};
static struct meson_clk_audio_divider *const gxbb_audio_dividers[] = {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index 5b1d4b374d1c..aee6fbba2004 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -190,8 +190,12 @@
#define CLKID_SD_EMMC_B_CLK0_DIV 121
#define CLKID_SD_EMMC_C_CLK0_SEL 123
#define CLKID_SD_EMMC_C_CLK0_DIV 124
+#define CLKID_VPU_0_DIV 127
+#define CLKID_VPU_1_DIV 130
+#define CLKID_VAPB_0_DIV 134
+#define CLKID_VAPB_1_DIV 137
-#define NR_CLKS 126
+#define NR_CLKS 141
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 9d4bc41e4239..7bc7ac69391e 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for mmp specific clk
#
diff --git a/drivers/clk/mmp/clk-apbc.c b/drivers/clk/mmp/clk-apbc.c
index 4c717db05f2d..fb294ada0b03 100644
--- a/drivers/clk/mmp/clk-apbc.c
+++ b/drivers/clk/mmp/clk-apbc.c
@@ -114,7 +114,7 @@ static void clk_apbc_unprepare(struct clk_hw *hw)
spin_unlock_irqrestore(apbc->lock, flags);
}
-static struct clk_ops clk_apbc_ops = {
+static const struct clk_ops clk_apbc_ops = {
.prepare = clk_apbc_prepare,
.unprepare = clk_apbc_unprepare,
};
diff --git a/drivers/clk/mmp/clk-apmu.c b/drivers/clk/mmp/clk-apmu.c
index 47b5542ce50f..b7ce8f52026e 100644
--- a/drivers/clk/mmp/clk-apmu.c
+++ b/drivers/clk/mmp/clk-apmu.c
@@ -60,7 +60,7 @@ static void clk_apmu_disable(struct clk_hw *hw)
spin_unlock_irqrestore(apmu->lock, flags);
}
-static struct clk_ops clk_apmu_ops = {
+static const struct clk_ops clk_apmu_ops = {
.enable = clk_apmu_enable,
.disable = clk_apmu_disable,
};
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 584a9927993b..cb43d54735b0 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -149,7 +149,7 @@ static void clk_factor_init(struct clk_hw *hw)
spin_unlock_irqrestore(factor->lock, flags);
}
-static struct clk_ops clk_factor_ops = {
+static const struct clk_ops clk_factor_ops = {
.recalc_rate = clk_factor_recalc_rate,
.round_rate = clk_factor_round_rate,
.set_rate = clk_factor_set_rate,
@@ -172,10 +172,8 @@ struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
}
factor = kzalloc(sizeof(*factor), GFP_KERNEL);
- if (!factor) {
- pr_err("%s: could not allocate factor clk\n", __func__);
+ if (!factor)
return ERR_PTR(-ENOMEM);
- }
/* struct clk_aux assignments */
factor->base = base;
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index d20cd3431ac2..7355595c42e2 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -103,10 +103,8 @@ struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
/* allocate the gate */
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
- if (!gate) {
- pr_err("%s:%s could not allocate gate clk\n", __func__, name);
+ if (!gate)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &mmp_clk_gate_ops;
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index c554833cffc5..90814b2613c0 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -229,7 +229,7 @@ static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
parent_rate = clk_hw_get_rate(parent);
mix_rate = parent_rate / item->divisor;
gap = abs(mix_rate - req->rate);
- if (parent_best == NULL || gap < gap_best) {
+ if (!parent_best || gap < gap_best) {
parent_best = parent;
parent_rate_best = parent_rate;
mix_rate_best = mix_rate;
@@ -247,7 +247,7 @@ static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
div = _get_div(mix, j);
mix_rate = parent_rate / div;
gap = abs(mix_rate - req->rate);
- if (parent_best == NULL || gap < gap_best) {
+ if (!parent_best || gap < gap_best) {
parent_best = parent;
parent_rate_best = parent_rate;
mix_rate_best = mix_rate;
@@ -451,11 +451,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
size_t table_bytes;
mix = kzalloc(sizeof(*mix), GFP_KERNEL);
- if (!mix) {
- pr_err("%s:%s: could not allocate mmp mix clk\n",
- __func__, name);
+ if (!mix)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.flags = flags | CLK_GET_RATE_NOCACHE;
@@ -467,12 +464,9 @@ struct clk *mmp_clk_register_mix(struct device *dev,
if (config->table) {
table_bytes = sizeof(*config->table) * config->table_size;
mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL);
- if (!mix->table) {
- pr_err("%s:%s: could not allocate mmp mix table\n",
- __func__, name);
- kfree(mix);
- return ERR_PTR(-ENOMEM);
- }
+ if (!mix->table)
+ goto free_mix;
+
mix->table_size = config->table_size;
}
@@ -481,11 +475,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
mix->mux_table = kmemdup(config->mux_table, table_bytes,
GFP_KERNEL);
if (!mix->mux_table) {
- pr_err("%s:%s: could not allocate mmp mix mux-table\n",
- __func__, name);
kfree(mix->table);
- kfree(mix);
- return ERR_PTR(-ENOMEM);
+ goto free_mix;
}
}
@@ -509,4 +500,8 @@ struct clk *mmp_clk_register_mix(struct device *dev,
}
return clk;
+
+free_mix:
+ kfree(mix);
+ return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index 038023483b98..7460031714da 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -83,19 +83,19 @@ void __init mmp2_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
void __iomem *apbc_base;
mpmu_base = ioremap(mpmu_phys, SZ_4K);
- if (mpmu_base == NULL) {
+ if (!mpmu_base) {
pr_err("error to ioremap MPMU base\n");
return;
}
apmu_base = ioremap(apmu_phys, SZ_4K);
- if (apmu_base == NULL) {
+ if (!apmu_base) {
pr_err("error to ioremap APMU base\n");
return;
}
apbc_base = ioremap(apbc_phys, SZ_4K);
- if (apbc_base == NULL) {
+ if (!apbc_base) {
pr_err("error to ioremap APBC base\n");
return;
}
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index a9ef9209532a..8e2551ab8462 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -75,19 +75,19 @@ void __init pxa168_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
void __iomem *apbc_base;
mpmu_base = ioremap(mpmu_phys, SZ_4K);
- if (mpmu_base == NULL) {
+ if (!mpmu_base) {
pr_err("error to ioremap MPMU base\n");
return;
}
apmu_base = ioremap(apmu_phys, SZ_4K);
- if (apmu_base == NULL) {
+ if (!apmu_base) {
pr_err("error to ioremap APMU base\n");
return;
}
apbc_base = ioremap(apbc_phys, SZ_4K);
- if (apbc_base == NULL) {
+ if (!apbc_base) {
pr_err("error to ioremap APBC base\n");
return;
}
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index a520cf7702a1..7a7965141918 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -74,25 +74,25 @@ void __init pxa910_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
void __iomem *apbc_base;
mpmu_base = ioremap(mpmu_phys, SZ_4K);
- if (mpmu_base == NULL) {
+ if (!mpmu_base) {
pr_err("error to ioremap MPMU base\n");
return;
}
apmu_base = ioremap(apmu_phys, SZ_4K);
- if (apmu_base == NULL) {
+ if (!apmu_base) {
pr_err("error to ioremap APMU base\n");
return;
}
apbcp_base = ioremap(apbcp_phys, SZ_4K);
- if (apbcp_base == NULL) {
+ if (!apbcp_base) {
pr_err("error to ioremap APBC extension base\n");
return;
}
apbc_base = ioremap(apbc_phys, SZ_4K);
- if (apbc_base == NULL) {
+ if (!apbc_base) {
pr_err("error to ioremap APBC base\n");
return;
}
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
index 089927e4cda2..ad8d483a35cd 100644
--- a/drivers/clk/mmp/clk.c
+++ b/drivers/clk/mmp/clk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/io.h>
#include <linux/clk-provider.h>
#include <linux/slab.h>
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index adf9b711b037..70bb73257647 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MACH_MMP_CLK_H
#define __MACH_MMP_CLK_H
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
index b4e4d6aa2631..ded7e391c737 100644
--- a/drivers/clk/mmp/reset.c
+++ b/drivers/clk/mmp/reset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/mmp/reset.h b/drivers/clk/mmp/reset.h
index be8b1a7000f7..3d0470ca3fd9 100644
--- a/drivers/clk/mmp/reset.h
+++ b/drivers/clk/mmp/reset.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MACH_MMP_CLK_RESET_H
#define __MACH_MMP_CLK_RESET_H
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
index d71c7fd5da16..93ac3685271f 100644
--- a/drivers/clk/mvebu/Makefile
+++ b/drivers/clk/mvebu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MVEBU_CLK_COMMON) += common.o
obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o
obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index 4091f3cfee19..7e35c891e168 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Dove PMU Core PLL divider driver
*
diff --git a/drivers/clk/mvebu/dove-divider.h b/drivers/clk/mvebu/dove-divider.h
index 4f2f718deb8e..38ea373088ca 100644
--- a/drivers/clk/mvebu/dove-divider.h
+++ b/drivers/clk/mvebu/dove-divider.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DOVE_DIVIDER_H
#define DOVE_DIVIDER_H
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index f75e989c578f..ccebd014fc1e 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -67,7 +67,7 @@ static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static struct clk_ops clk_div_ops = {
+static const struct clk_ops clk_div_ops = {
.recalc_rate = clk_div_recalc_rate,
.round_rate = clk_div_round_rate,
.set_rate = clk_div_set_rate,
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
index f8dd10f6df3d..27b3372adc37 100644
--- a/drivers/clk/mxs/clk-frac.c
+++ b/drivers/clk/mxs/clk-frac.c
@@ -107,7 +107,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
return mxs_clk_wait(frac->reg, frac->busy);
}
-static struct clk_ops clk_frac_ops = {
+static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
.round_rate = clk_frac_round_rate,
.set_rate = clk_frac_set_rate,
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 74f64c3c4290..b80dc9d5855c 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -147,9 +147,7 @@ void pxa2xx_core_turbo_switch(bool on)
" b 3f\n"
"2: b 1b\n"
"3: nop\n"
- : "=&r" (unused)
- : "r" (clkcfg)
- : );
+ : "=&r" (unused) : "r" (clkcfg));
local_irq_restore(flags);
}
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 3f3aff229fb7..26410d31446b 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_COMMON_CLK_QCOM) += clk-qcom.o
clk-qcom-y += common.o
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 1b3e8d265bdb..a2495457e564 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -156,7 +156,6 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @hid_width: number of bits in half integer divider
* @parent_map: map from software's parent index to hardware's src_sel field
* @freq_tbl: frequency table
- * @current_freq: last cached frequency when using branches with shared RCGs
* @clkr: regmap clock handle
*
*/
@@ -166,7 +165,6 @@ struct clk_rcg2 {
u8 hid_width;
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
- unsigned long current_freq;
struct clk_regmap clkr;
};
@@ -174,7 +172,6 @@ struct clk_rcg2 {
extern const struct clk_ops clk_rcg2_ops;
extern const struct clk_ops clk_rcg2_floor_ops;
-extern const struct clk_ops clk_rcg2_shared_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
extern const struct clk_ops clk_byte2_ops;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 1a0985ae20d2..bbeaf9c09dbb 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -358,85 +358,6 @@ const struct clk_ops clk_rcg2_floor_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
-static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- const char *name = clk_hw_get_name(hw);
- int ret, count;
-
- /* force enable RCG */
- ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
- CMD_ROOT_EN, CMD_ROOT_EN);
- if (ret)
- return ret;
-
- /* wait for RCG to turn ON */
- for (count = 500; count > 0; count--) {
- ret = clk_rcg2_is_enabled(hw);
- if (ret)
- break;
- udelay(1);
- }
- if (!count)
- pr_err("%s: RCG did not turn on\n", name);
-
- /* set clock rate */
- ret = __clk_rcg2_set_rate(hw, rate, CEIL);
- if (ret)
- return ret;
-
- /* clear force enable RCG */
- return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
- CMD_ROOT_EN, 0);
-}
-
-static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- /* cache the rate */
- rcg->current_freq = rate;
-
- if (!__clk_is_enabled(hw->clk))
- return 0;
-
- return clk_rcg2_shared_force_enable(hw, rcg->current_freq);
-}
-
-static unsigned long
-clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- return rcg->current_freq = clk_rcg2_recalc_rate(hw, parent_rate);
-}
-
-static int clk_rcg2_shared_enable(struct clk_hw *hw)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- return clk_rcg2_shared_force_enable(hw, rcg->current_freq);
-}
-
-static void clk_rcg2_shared_disable(struct clk_hw *hw)
-{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-
- /* switch to XO, which is the lowest entry in the freq table */
- clk_rcg2_shared_set_rate(hw, rcg->freq_tbl[0].freq, 0);
-}
-
-const struct clk_ops clk_rcg2_shared_ops = {
- .enable = clk_rcg2_shared_enable,
- .disable = clk_rcg2_shared_disable,
- .get_parent = clk_rcg2_get_parent,
- .recalc_rate = clk_rcg2_shared_recalc_rate,
- .determine_rate = clk_rcg2_determine_rate,
- .set_rate = clk_rcg2_shared_set_rate,
-};
-EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
-
struct frac_entry {
int num;
int den;
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
index df3e5fe8442a..c60f61b10c7f 100644
--- a/drivers/clk/qcom/clk-rpm.c
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -56,6 +56,18 @@
}, \
}
+#define DEFINE_CLK_RPM_FIXED(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_fixed_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \
static struct clk_rpm _platform##_##_active; \
static struct clk_rpm _platform##_##_name = { \
@@ -143,6 +155,13 @@ static int clk_rpm_handoff(struct clk_rpm *r)
int ret;
u32 value = INT_MAX;
+ /*
+ * The vendor tree simply reads the status for this
+ * RPM clock.
+ */
+ if (r->rpm_clk_id == QCOM_RPM_PLL_4)
+ return 0;
+
ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
r->rpm_clk_id, &value, 1);
if (ret)
@@ -269,6 +288,32 @@ out:
mutex_unlock(&rpm_clk_lock);
}
+static int clk_rpm_fixed_prepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ u32 value = 1;
+ int ret;
+
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (!ret)
+ r->enabled = true;
+
+ return ret;
+}
+
+static void clk_rpm_fixed_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ u32 value = 0;
+ int ret;
+
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (!ret)
+ r->enabled = false;
+}
+
static int clk_rpm_set_rate(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate)
{
@@ -333,6 +378,13 @@ static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
return r->rate;
}
+static const struct clk_ops clk_rpm_fixed_ops = {
+ .prepare = clk_rpm_fixed_prepare,
+ .unprepare = clk_rpm_fixed_unprepare,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
static const struct clk_ops clk_rpm_ops = {
.prepare = clk_rpm_prepare,
.unprepare = clk_rpm_unprepare,
@@ -348,6 +400,45 @@ static const struct clk_ops clk_rpm_branch_ops = {
.recalc_rate = clk_rpm_recalc_rate,
};
+/* MSM8660/APQ8060 */
+DEFINE_CLK_RPM(msm8660, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
+DEFINE_CLK_RPM(msm8660, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
+DEFINE_CLK_RPM(msm8660, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
+DEFINE_CLK_RPM(msm8660, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
+DEFINE_CLK_RPM(msm8660, smi_clk, smi_a_clk, QCOM_RPM_SMI_CLK);
+DEFINE_CLK_RPM(msm8660, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
+DEFINE_CLK_RPM_FIXED(msm8660, pll4_clk, pll4_a_clk, QCOM_RPM_PLL_4, 540672000);
+
+static struct clk_rpm *msm8660_clks[] = {
+ [RPM_APPS_FABRIC_CLK] = &msm8660_afab_clk,
+ [RPM_APPS_FABRIC_A_CLK] = &msm8660_afab_a_clk,
+ [RPM_SYS_FABRIC_CLK] = &msm8660_sfab_clk,
+ [RPM_SYS_FABRIC_A_CLK] = &msm8660_sfab_a_clk,
+ [RPM_MM_FABRIC_CLK] = &msm8660_mmfab_clk,
+ [RPM_MM_FABRIC_A_CLK] = &msm8660_mmfab_a_clk,
+ [RPM_DAYTONA_FABRIC_CLK] = &msm8660_daytona_clk,
+ [RPM_DAYTONA_FABRIC_A_CLK] = &msm8660_daytona_a_clk,
+ [RPM_SFPB_CLK] = &msm8660_sfpb_clk,
+ [RPM_SFPB_A_CLK] = &msm8660_sfpb_a_clk,
+ [RPM_CFPB_CLK] = &msm8660_cfpb_clk,
+ [RPM_CFPB_A_CLK] = &msm8660_cfpb_a_clk,
+ [RPM_MMFPB_CLK] = &msm8660_mmfpb_clk,
+ [RPM_MMFPB_A_CLK] = &msm8660_mmfpb_a_clk,
+ [RPM_SMI_CLK] = &msm8660_smi_clk,
+ [RPM_SMI_A_CLK] = &msm8660_smi_a_clk,
+ [RPM_EBI1_CLK] = &msm8660_ebi1_clk,
+ [RPM_EBI1_A_CLK] = &msm8660_ebi1_a_clk,
+ [RPM_PLL4_CLK] = &msm8660_pll4_clk,
+};
+
+static const struct rpm_clk_desc rpm_clk_msm8660 = {
+ .clks = msm8660_clks,
+ .num_clks = ARRAY_SIZE(msm8660_clks),
+};
+
/* apq8064 */
DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
@@ -386,6 +477,8 @@ static const struct rpm_clk_desc rpm_clk_apq8064 = {
};
static const struct of_device_id rpm_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 },
+ { .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 },
{ .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 },
{ }
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index cc03d5508627..c26d9007bfc4 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -530,9 +530,91 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8974 = {
.clks = msm8974_clks,
.num_clks = ARRAY_SIZE(msm8974_clks),
};
+
+/* msm8996 */
+DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8996, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msm8996, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+ QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre1_noc_clk, aggre1_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 1, 1000);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre2_noc_clk, aggre2_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 2, 1000);
+DEFINE_CLK_SMD_RPM_QDSS(msm8996, qdss_clk, qdss_a_clk,
+ QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, ln_bb_clk, ln_bb_a_clk, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk1, div_clk1_a, 0xb);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk2, div_clk2_a, 0xc);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk3, div_clk3_a, 0xd);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk2_pin, bb_clk2_a_pin, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk1_pin, rf_clk1_a_pin, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk2_pin, rf_clk2_a_pin, 5);
+
+static struct clk_smd_rpm *msm8996_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk,
+ [RPM_SMD_CNOC_CLK] = &msm8996_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &msm8996_cnoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk,
+ [RPM_SMD_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
+ [RPM_SMD_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
+ [RPM_SMD_IPA_CLK] = &msm8996_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8996_ipa_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8996_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8996_ce1_a_clk,
+ [RPM_SMD_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk,
+ [RPM_SMD_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk,
+ [RPM_SMD_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk,
+ [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8996_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8996_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8996_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8996_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &msm8996_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8996_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8996_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8996_rf_clk2_a,
+ [RPM_SMD_LN_BB_CLK] = &msm8996_ln_bb_clk,
+ [RPM_SMD_LN_BB_A_CLK] = &msm8996_ln_bb_a_clk,
+ [RPM_SMD_DIV_CLK1] = &msm8996_div_clk1,
+ [RPM_SMD_DIV_A_CLK1] = &msm8996_div_clk1_a,
+ [RPM_SMD_DIV_CLK2] = &msm8996_div_clk2,
+ [RPM_SMD_DIV_A_CLK2] = &msm8996_div_clk2_a,
+ [RPM_SMD_DIV_CLK3] = &msm8996_div_clk3,
+ [RPM_SMD_DIV_A_CLK3] = &msm8996_div_clk3_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8996_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8996_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &msm8996_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8996_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
+ .clks = msm8996_clks,
+ .num_clks = ARRAY_SIZE(msm8996_clks),
+};
+
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
+ { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index d523991c945f..b8064a336d46 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -111,16 +111,6 @@ qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count)
}
EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode);
-static void qcom_cc_del_clk_provider(void *data)
-{
- of_clk_del_provider(data);
-}
-
-static void qcom_cc_reset_unregister(void *data)
-{
- reset_controller_unregister(data);
-}
-
static void qcom_cc_gdsc_unregister(void *data)
{
gdsc_unregister(data);
@@ -143,8 +133,10 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
int ret;
clocks_node = of_find_node_by_path("/clocks");
- if (clocks_node)
- node = of_find_node_by_name(clocks_node, path);
+ if (clocks_node) {
+ node = of_get_child_by_name(clocks_node, path);
+ of_node_put(clocks_node);
+ }
if (!node) {
fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
@@ -248,13 +240,7 @@ int qcom_cc_really_probe(struct platform_device *pdev,
return ret;
}
- ret = of_clk_add_hw_provider(dev->of_node, qcom_cc_clk_hw_get, cc);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, qcom_cc_del_clk_provider,
- pdev->dev.of_node);
-
+ ret = devm_of_clk_add_hw_provider(dev, qcom_cc_clk_hw_get, cc);
if (ret)
return ret;
@@ -266,13 +252,7 @@ int qcom_cc_really_probe(struct platform_device *pdev,
reset->regmap = regmap;
reset->reset_map = desc->resets;
- ret = reset_controller_register(&reset->rcdev);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(dev, qcom_cc_reset_unregister,
- &reset->rcdev);
-
+ ret = devm_reset_controller_register(dev, &reset->rcdev);
if (ret)
return ret;
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index acbb38151ba1..43b5a89c4b28 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -15,6 +15,7 @@ config CLK_RENESAS
select CLK_R8A7794 if ARCH_R8A7794
select CLK_R8A7795 if ARCH_R8A7795
select CLK_R8A7796 if ARCH_R8A7796
+ select CLK_R8A77970 if ARCH_R8A77970
select CLK_R8A77995 if ARCH_R8A77995
select CLK_SH73A0 if ARCH_SH73A0
@@ -95,6 +96,10 @@ config CLK_R8A7796
bool "R-Car M3-W clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
+config CLK_R8A77970
+ bool "R-Car V3M clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN3_CPG
+
config CLK_R8A77995
bool "R-Car D3 clock support" if COMPILE_TEST
select CLK_RCAR_GEN3_CPG
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 9bda3ec5b199..34c4e0b37afa 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# SoC
obj-$(CONFIG_CLK_EMEV2) += clk-emev2.o
obj-$(CONFIG_CLK_RZA1) += clk-rz.o
@@ -13,6 +14,7 @@ obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7794) += r8a7794-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7795) += r8a7795-cpg-mssr.o
obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A77970) += r8a77970-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 3e0040c0ac87..151336d2ba59 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -14,8 +14,10 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include "clk-div6.h"
@@ -32,6 +34,7 @@
* @src_shift: Shift to access the register bits to select the parent clock
* @src_width: Number of register bits to select the parent clock (may be 0)
* @parents: Array to map from valid parent clocks indices to hardware indices
+ * @nb: Notifier block to save/restore clock state for system resume
*/
struct div6_clock {
struct clk_hw hw;
@@ -40,6 +43,7 @@ struct div6_clock {
u32 src_shift;
u32 src_width;
u8 *parents;
+ struct notifier_block nb;
};
#define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw)
@@ -176,6 +180,29 @@ static const struct clk_ops cpg_div6_clock_ops = {
.set_rate = cpg_div6_clock_set_rate,
};
+static int cpg_div6_clock_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct div6_clock *clock = container_of(nb, struct div6_clock, nb);
+
+ switch (action) {
+ case PM_EVENT_RESUME:
+ /*
+ * TODO: This does not yet support DIV6 clocks with multiple
+ * parents, as the parent selection bits are not restored.
+ * Fortunately so far such DIV6 clocks are found only on
+ * R/SH-Mobile SoCs, while the resume functionality is only
+ * needed on R-Car Gen3.
+ */
+ if (__clk_get_enable_count(clock->hw.clk))
+ cpg_div6_clock_enable(&clock->hw);
+ else
+ cpg_div6_clock_disable(&clock->hw);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
/**
* cpg_div6_register - Register a DIV6 clock
@@ -183,11 +210,13 @@ static const struct clk_ops cpg_div6_clock_ops = {
* @num_parents: Number of parent clocks of the DIV6 clock (1, 4, or 8)
* @parent_names: Array containing the names of the parent clocks
* @reg: Mapped register used to control the DIV6 clock
+ * @notifiers: Optional notifier chain to save/restore state for system resume
*/
struct clk * __init cpg_div6_register(const char *name,
unsigned int num_parents,
const char **parent_names,
- void __iomem *reg)
+ void __iomem *reg,
+ struct raw_notifier_head *notifiers)
{
unsigned int valid_parents;
struct clk_init_data init;
@@ -258,6 +287,11 @@ struct clk * __init cpg_div6_register(const char *name,
if (IS_ERR(clk))
goto free_parents;
+ if (notifiers) {
+ clock->nb.notifier_call = cpg_div6_clock_notifier_call;
+ raw_notifier_chain_register(notifiers, &clock->nb);
+ }
+
return clk;
free_parents:
@@ -301,7 +335,7 @@ static void __init cpg_div6_clock_init(struct device_node *np)
for (i = 0; i < num_parents; i++)
parent_names[i] = of_clk_get_parent_name(np, i);
- clk = cpg_div6_register(clk_name, num_parents, parent_names, reg);
+ clk = cpg_div6_register(clk_name, num_parents, parent_names, reg, NULL);
if (IS_ERR(clk)) {
pr_err("%s: failed to register %s DIV6 clock (%ld)\n",
__func__, np->name, PTR_ERR(clk));
diff --git a/drivers/clk/renesas/clk-div6.h b/drivers/clk/renesas/clk-div6.h
index 567b31d2bfa5..3af640a0b08d 100644
--- a/drivers/clk/renesas/clk-div6.h
+++ b/drivers/clk/renesas/clk-div6.h
@@ -1,7 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RENESAS_CLK_DIV6_H__
#define __RENESAS_CLK_DIV6_H__
struct clk *cpg_div6_register(const char *name, unsigned int num_parents,
- const char **parent_names, void __iomem *reg);
+ const char **parent_names, void __iomem *reg,
+ struct raw_notifier_head *notifiers);
#endif
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 500a9e4e03c4..c944cc421e30 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -156,10 +156,8 @@ static struct clk * __init cpg_mstp_clock_register(const char *name,
struct clk *clk;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
- if (!clock) {
- pr_err("%s: failed to allocate MSTP clock.\n", __func__);
+ if (!clock)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &cpg_mstp_clock_ops;
@@ -196,7 +194,6 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
if (group == NULL || clks == NULL) {
kfree(group);
kfree(clks);
- pr_err("%s: failed to allocate group\n", __func__);
return;
}
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 0b2e56d0d94b..d14cbe1ca29a 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -423,7 +423,6 @@ static void __init rcar_gen2_cpg_clocks_init(struct device_node *np)
/* We're leaking memory on purpose, there's no point in cleaning
* up as the system won't boot anyway.
*/
- pr_err("%s: failed to allocate cpg\n", __func__);
return;
}
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 5adb934326d1..127c58135c8f 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -1,5 +1,5 @@
/*
- * rz Core CPG Clocks
+ * RZ/A1 Core CPG Clocks
*
* Copyright (C) 2013 Ideas On Board SPRL
* Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
index 9e2360a8e14b..2859504cc866 100644
--- a/drivers/clk/renesas/r8a7745-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -129,6 +129,7 @@ static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
DEF_MOD("scif2", 719, R8A7745_CLK_P),
DEF_MOD("scif1", 720, R8A7745_CLK_P),
DEF_MOD("scif0", 721, R8A7745_CLK_P),
+ DEF_MOD("du1", 723, R8A7745_CLK_ZX),
DEF_MOD("du0", 724, R8A7745_CLK_ZX),
DEF_MOD("ipmmu-sgx", 800, R8A7745_CLK_ZX),
DEF_MOD("vin1", 810, R8A7745_CLK_ZG),
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 762b2f8824f1..b1d9f48eae9e 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -149,7 +149,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("usb-dmac1", 331, R8A7795_CLK_S3D1),
DEF_MOD("rwdt", 402, R8A7795_CLK_R),
DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
- DEF_MOD("intc-ap", 408, R8A7795_CLK_S3D1),
+ DEF_MOD("intc-ap", 408, R8A7795_CLK_S0D3),
DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3),
DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3),
DEF_MOD("drif7", 508, R8A7795_CLK_S3D2),
@@ -348,6 +348,7 @@ static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
{ MOD_CLK_ID(217), R8A7795_CLK_S3D1 }, /* SYS-DMAC2 */
{ MOD_CLK_ID(218), R8A7795_CLK_S3D1 }, /* SYS-DMAC1 */
{ MOD_CLK_ID(219), R8A7795_CLK_S3D1 }, /* SYS-DMAC0 */
+ { MOD_CLK_ID(408), R8A7795_CLK_S3D1 }, /* INTC-AP */
{ MOD_CLK_ID(501), R8A7795_CLK_S3D1 }, /* AUDMAC1 */
{ MOD_CLK_ID(502), R8A7795_CLK_S3D1 }, /* AUDMAC0 */
{ MOD_CLK_ID(523), R8A7795_CLK_S3D4 }, /* PWM */
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index e5e7fb212288..b3767472088a 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -143,7 +143,7 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("usb-dmac1", 331, R8A7796_CLK_S3D1),
DEF_MOD("rwdt", 402, R8A7796_CLK_R),
DEF_MOD("intc-ex", 407, R8A7796_CLK_CP),
- DEF_MOD("intc-ap", 408, R8A7796_CLK_S3D1),
+ DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3),
DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3),
DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3),
DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c
new file mode 100644
index 000000000000..72f98527473a
--- /dev/null
+++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c
@@ -0,0 +1,199 @@
+/*
+ * r8a77970 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2017 Cogent Embedded Inc.
+ *
+ * Based on r8a7795-cpg-mssr.c
+ *
+ * Copyright (C) 2015 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a77970-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen3-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A77970_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+ CLK_PLL1_DIV4,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a77970_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("ztr", R8A77970_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("ztrd2", R8A77970_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("zt", R8A77970_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("zx", R8A77970_CLK_ZX, CLK_PLL1_DIV2, 3, 1),
+ DEF_FIXED("s1d1", R8A77970_CLK_S1D1, CLK_PLL1_DIV2, 4, 1),
+ DEF_FIXED("s1d2", R8A77970_CLK_S1D2, CLK_PLL1_DIV2, 8, 1),
+ DEF_FIXED("s1d4", R8A77970_CLK_S1D4, CLK_PLL1_DIV2, 16, 1),
+ DEF_FIXED("s2d1", R8A77970_CLK_S2D1, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED("s2d2", R8A77970_CLK_S2D2, CLK_PLL1_DIV2, 12, 1),
+ DEF_FIXED("s2d4", R8A77970_CLK_S2D4, CLK_PLL1_DIV2, 24, 1),
+
+ DEF_FIXED("cl", R8A77970_CLK_CL, CLK_PLL1_DIV2, 48, 1),
+ DEF_FIXED("cp", R8A77970_CLK_CP, CLK_EXTAL, 2, 1),
+
+ DEF_DIV6P1("canfd", R8A77970_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+ DEF_DIV6P1("mso", R8A77970_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+ DEF_DIV6P1("csi0", R8A77970_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+
+ DEF_FIXED("osc", R8A77970_CLK_OSC, CLK_PLL1_DIV2, 12*1024, 1),
+ DEF_FIXED("r", R8A77970_CLK_R, CLK_EXTALR, 1, 1),
+};
+
+static const struct mssr_mod_clk r8a77970_mod_clks[] __initconst = {
+ DEF_MOD("ivcp1e", 127, R8A77970_CLK_S2D1),
+ DEF_MOD("scif4", 203, R8A77970_CLK_S2D4),
+ DEF_MOD("scif3", 204, R8A77970_CLK_S2D4),
+ DEF_MOD("scif1", 206, R8A77970_CLK_S2D4),
+ DEF_MOD("scif0", 207, R8A77970_CLK_S2D4),
+ DEF_MOD("msiof3", 208, R8A77970_CLK_MSO),
+ DEF_MOD("msiof2", 209, R8A77970_CLK_MSO),
+ DEF_MOD("msiof1", 210, R8A77970_CLK_MSO),
+ DEF_MOD("msiof0", 211, R8A77970_CLK_MSO),
+ DEF_MOD("mfis", 213, R8A77970_CLK_S2D2),
+ DEF_MOD("sys-dmac2", 217, R8A77970_CLK_S2D1),
+ DEF_MOD("sys-dmac1", 218, R8A77970_CLK_S2D1),
+ DEF_MOD("rwdt", 402, R8A77970_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A77970_CLK_CP),
+ DEF_MOD("intc-ap", 408, R8A77970_CLK_S2D1),
+ DEF_MOD("hscif3", 517, R8A77970_CLK_S2D1),
+ DEF_MOD("hscif2", 518, R8A77970_CLK_S2D1),
+ DEF_MOD("hscif1", 519, R8A77970_CLK_S2D1),
+ DEF_MOD("hscif0", 520, R8A77970_CLK_S2D1),
+ DEF_MOD("thermal", 522, R8A77970_CLK_CP),
+ DEF_MOD("pwm", 523, R8A77970_CLK_S2D4),
+ DEF_MOD("fcpvd0", 603, R8A77970_CLK_S2D1),
+ DEF_MOD("vspd0", 623, R8A77970_CLK_S2D1),
+ DEF_MOD("csi40", 716, R8A77970_CLK_CSI0),
+ DEF_MOD("du0", 724, R8A77970_CLK_S2D1),
+ DEF_MOD("vin3", 808, R8A77970_CLK_S2D1),
+ DEF_MOD("vin2", 809, R8A77970_CLK_S2D1),
+ DEF_MOD("vin1", 810, R8A77970_CLK_S2D1),
+ DEF_MOD("vin0", 811, R8A77970_CLK_S2D1),
+ DEF_MOD("etheravb", 812, R8A77970_CLK_S2D2),
+ DEF_MOD("gpio5", 907, R8A77970_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A77970_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A77970_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A77970_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A77970_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A77970_CLK_CP),
+ DEF_MOD("can-fd", 914, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c4", 927, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c3", 928, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c2", 929, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c1", 930, R8A77970_CLK_S2D2),
+ DEF_MOD("i2c0", 931, R8A77970_CLK_S2D2),
+};
+
+static const unsigned int r8a77970_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-AP (GIC) */
+};
+
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz)
+ *-------------------------------------------------
+ * 0 0 0 16.66 x 1 x192 x192 x96
+ * 0 0 1 16.66 x 1 x192 x192 x80
+ * 0 1 0 20 x 1 x160 x160 x80
+ * 0 1 1 20 x 1 x160 x160 x66
+ * 1 0 0 27 / 2 x236 x236 x118
+ * 1 0 1 27 / 2 x236 x236 x98
+ * 1 1 0 33.33 / 2 x192 x192 x96
+ * 1 1 1 33.33 / 2 x192 x192 x80
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+
+static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ /* EXTAL div PLL1 mult/div PLL3 mult/div */
+ { 1, 192, 1, 96, 1, },
+ { 1, 192, 1, 80, 1, },
+ { 1, 160, 1, 80, 1, },
+ { 1, 160, 1, 66, 1, },
+ { 2, 236, 1, 118, 1, },
+ { 2, 236, 1, 98, 1, },
+ { 2, 192, 1, 96, 1, },
+ { 2, 192, 1, 80, 1, },
+};
+
+static int __init r8a77970_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a77970_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a77970_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a77970_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a77970_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a77970_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a77970_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a77970_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a77970_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen3_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index e594cf8ee63b..ea4cafbe6e85 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -127,7 +127,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
DEF_MOD("usb-dmac1", 331, R8A77995_CLK_S3D1),
DEF_MOD("rwdt", 402, R8A77995_CLK_R),
DEF_MOD("intc-ex", 407, R8A77995_CLK_CP),
- DEF_MOD("intc-ap", 408, R8A77995_CLK_S3D1),
+ DEF_MOD("intc-ap", 408, R8A77995_CLK_S1D2),
DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C),
DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C),
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.c b/drivers/clk/renesas/rcar-gen2-cpg.c
index 123b1e622179..feb14579a71b 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.c
+++ b/drivers/clk/renesas/rcar-gen2-cpg.c
@@ -262,10 +262,9 @@ static unsigned int cpg_pll0_div __initdata;
static u32 cpg_mode __initdata;
struct clk * __init rcar_gen2_cpg_clk_register(struct device *dev,
- const struct cpg_core_clk *core,
- const struct cpg_mssr_info *info,
- struct clk **clks,
- void __iomem *base)
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers)
{
const struct clk_div_table *table = NULL;
const struct clk *parent;
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index 9eba07ff8b11..020a3baad015 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -34,9 +34,9 @@ struct rcar_gen2_cpg_pll_config {
};
struct clk *rcar_gen2_cpg_clk_register(struct device *dev,
- const struct cpg_core_clk *core,
- const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base);
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers);
int rcar_gen2_cpg_init(const struct rcar_gen2_cpg_pll_config *config,
unsigned int pll0_div, u32 mode);
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 951105816547..0904886f5501 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -19,6 +19,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -29,6 +30,36 @@
#define CPG_PLL2CR 0x002c
#define CPG_PLL4CR 0x01f4
+struct cpg_simple_notifier {
+ struct notifier_block nb;
+ void __iomem *reg;
+ u32 saved;
+};
+
+static int cpg_simple_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct cpg_simple_notifier *csn =
+ container_of(nb, struct cpg_simple_notifier, nb);
+
+ switch (action) {
+ case PM_EVENT_SUSPEND:
+ csn->saved = readl(csn->reg);
+ return NOTIFY_OK;
+
+ case PM_EVENT_RESUME:
+ writel(csn->saved, csn->reg);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
+ struct cpg_simple_notifier *csn)
+{
+ csn->nb.notifier_call = cpg_simple_notifier_call;
+ raw_notifier_chain_register(notifiers, &csn->nb);
+}
/*
* SDn Clock
@@ -55,8 +86,8 @@ struct sd_div_table {
struct sd_clock {
struct clk_hw hw;
- void __iomem *reg;
const struct sd_div_table *div_table;
+ struct cpg_simple_notifier csn;
unsigned int div_num;
unsigned int div_min;
unsigned int div_max;
@@ -97,12 +128,12 @@ static const struct sd_div_table cpg_sd_div_table[] = {
static int cpg_sd_clock_enable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- u32 val = readl(clock->reg);
+ u32 val = readl(clock->csn.reg);
val &= ~(CPG_SD_STP_MASK);
val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK;
- writel(val, clock->reg);
+ writel(val, clock->csn.reg);
return 0;
}
@@ -111,14 +142,14 @@ static void cpg_sd_clock_disable(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- writel(readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
+ writel(readl(clock->csn.reg) | CPG_SD_STP_MASK, clock->csn.reg);
}
static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
{
struct sd_clock *clock = to_sd_clock(hw);
- return !(readl(clock->reg) & CPG_SD_STP_MASK);
+ return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
}
static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
@@ -170,10 +201,10 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
clock->cur_div_idx = i;
- val = readl(clock->reg);
+ val = readl(clock->csn.reg);
val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
- writel(val, clock->reg);
+ writel(val, clock->csn.reg);
return 0;
}
@@ -188,8 +219,8 @@ static const struct clk_ops cpg_sd_clock_ops = {
};
static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
- void __iomem *base,
- const char *parent_name)
+ void __iomem *base, const char *parent_name,
+ struct raw_notifier_head *notifiers)
{
struct clk_init_data init;
struct sd_clock *clock;
@@ -207,12 +238,12 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
init.parent_names = &parent_name;
init.num_parents = 1;
- clock->reg = base + core->offset;
+ clock->csn.reg = base + core->offset;
clock->hw.init = &init;
clock->div_table = cpg_sd_div_table;
clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
- sd_fc = readl(clock->reg) & CPG_SD_FC_MASK;
+ sd_fc = readl(clock->csn.reg) & CPG_SD_FC_MASK;
for (i = 0; i < clock->div_num; i++)
if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
break;
@@ -233,8 +264,13 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
clk = clk_register(NULL, &clock->hw);
if (IS_ERR(clk))
- kfree(clock);
+ goto free_clock;
+ cpg_simple_notifier_register(notifiers, &clock->csn);
+ return clk;
+
+free_clock:
+ kfree(clock);
return clk;
}
@@ -265,7 +301,8 @@ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base)
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers)
{
const struct clk *parent;
unsigned int mult = 1;
@@ -331,22 +368,32 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
break;
case CLK_TYPE_GEN3_SD:
- return cpg_sd_clk_register(core, base, __clk_get_name(parent));
+ return cpg_sd_clk_register(core, base, __clk_get_name(parent),
+ notifiers);
case CLK_TYPE_GEN3_R:
if (cpg_quirks & RCKCR_CKSEL) {
+ struct cpg_simple_notifier *csn;
+
+ csn = kzalloc(sizeof(*csn), GFP_KERNEL);
+ if (!csn)
+ return ERR_PTR(-ENOMEM);
+
+ csn->reg = base + CPG_RCKCR;
+
/*
* RINT is default.
* Only if EXTALR is populated, we switch to it.
*/
- value = readl(base + CPG_RCKCR) & 0x3f;
+ value = readl(csn->reg) & 0x3f;
if (clk_get_rate(clks[cpg_clk_extalr])) {
parent = clks[cpg_clk_extalr];
value |= BIT(15);
}
- writel(value, base + CPG_RCKCR);
+ writel(value, csn->reg);
+ cpg_simple_notifier_register(notifiers, csn);
break;
}
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index d756ef8b78eb..2e4284399f53 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -44,7 +44,8 @@ struct rcar_gen3_cpg_pll_config {
struct clk *rcar_gen3_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base);
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers);
int rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
unsigned int clk_extalr, u32 mode);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index e580a5e6346c..e3d03ffea4bc 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
+#include <linux/psci.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
@@ -106,6 +107,9 @@ static const u16 srcr[] = {
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
+ * @notifiers: Notifier chain to save/restore clock state for system resume
+ * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
+ * @smstpcr_saved[].val: Saved values of SMSTPCR[]
*/
struct cpg_mssr_priv {
#ifdef CONFIG_RESET_CONTROLLER
@@ -119,6 +123,12 @@ struct cpg_mssr_priv {
unsigned int num_core_clks;
unsigned int num_mod_clks;
unsigned int last_dt_core_clk;
+
+ struct raw_notifier_head notifiers;
+ struct {
+ u32 mask;
+ u32 val;
+ } smstpcr_saved[ARRAY_SIZE(smstpcr)];
};
@@ -293,7 +303,8 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
if (core->type == CLK_TYPE_DIV6P1) {
clk = cpg_div6_register(core->name, 1, &parent_name,
- priv->base + core->offset);
+ priv->base + core->offset,
+ &priv->notifiers);
} else {
clk = clk_register_fixed_factor(NULL, core->name,
parent_name, 0,
@@ -304,7 +315,8 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
default:
if (info->cpg_clk_register)
clk = info->cpg_clk_register(dev, core, info,
- priv->clks, priv->base);
+ priv->clks, priv->base,
+ &priv->notifiers);
else
dev_err(dev, "%s has unsupported core clock type %u\n",
core->name, core->type);
@@ -382,6 +394,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk);
priv->clks[id] = clk;
+ priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
return;
fail:
@@ -680,6 +693,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a7796_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A77970
+ {
+ .compatible = "renesas,r8a77970-cpg-mssr",
+ .data = &r8a77970_cpg_mssr_info,
+ },
+#endif
#ifdef CONFIG_CLK_R8A77995
{
.compatible = "renesas,r8a77995-cpg-mssr",
@@ -694,6 +713,85 @@ static void cpg_mssr_del_clk_provider(void *data)
of_clk_del_provider(data);
}
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
+static int cpg_mssr_suspend_noirq(struct device *dev)
+{
+ struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
+ unsigned int reg;
+
+ /* This is the best we can do to check for the presence of PSCI */
+ if (!psci_ops.cpu_suspend)
+ return 0;
+
+ /* Save module registers with bits under our control */
+ for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
+ if (priv->smstpcr_saved[reg].mask)
+ priv->smstpcr_saved[reg].val =
+ readl(priv->base + SMSTPCR(reg));
+ }
+
+ /* Save core clocks */
+ raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
+
+ return 0;
+}
+
+static int cpg_mssr_resume_noirq(struct device *dev)
+{
+ struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
+ unsigned int reg, i;
+ u32 mask, oldval, newval;
+
+ /* This is the best we can do to check for the presence of PSCI */
+ if (!psci_ops.cpu_suspend)
+ return 0;
+
+ /* Restore core clocks */
+ raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
+
+ /* Restore module clocks */
+ for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
+ mask = priv->smstpcr_saved[reg].mask;
+ if (!mask)
+ continue;
+
+ oldval = readl(priv->base + SMSTPCR(reg));
+ newval = oldval & ~mask;
+ newval |= priv->smstpcr_saved[reg].val & mask;
+ if (newval == oldval)
+ continue;
+
+ writel(newval, priv->base + SMSTPCR(reg));
+
+ /* Wait until enabled clocks are really enabled */
+ mask &= ~priv->smstpcr_saved[reg].val;
+ if (!mask)
+ continue;
+
+ for (i = 1000; i > 0; --i) {
+ oldval = readl(priv->base + MSTPSR(reg));
+ if (!(oldval & mask))
+ break;
+ cpu_relax();
+ }
+
+ if (!i)
+ dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n",
+ priv->base + SMSTPCR(reg), oldval & mask);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops cpg_mssr_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
+ cpg_mssr_resume_noirq)
+};
+#define DEV_PM_OPS &cpg_mssr_pm
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
+
static int __init cpg_mssr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -729,10 +827,12 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
if (!clks)
return -ENOMEM;
+ dev_set_drvdata(dev, priv);
priv->clks = clks;
priv->num_core_clks = info->num_total_core_clks;
priv->num_mod_clks = info->num_hw_mod_clks;
priv->last_dt_core_clk = info->last_dt_core_clk;
+ RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
for (i = 0; i < nclks; i++)
clks[i] = ERR_PTR(-ENOENT);
@@ -769,6 +869,7 @@ static struct platform_driver cpg_mssr_driver = {
.driver = {
.name = "renesas-cpg-mssr",
.of_match_table = cpg_mssr_match,
+ .pm = DEV_PM_OPS,
},
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 94b9071d1061..0745b0930308 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -127,7 +127,8 @@ struct cpg_mssr_info {
struct clk *(*cpg_clk_register)(struct device *dev,
const struct cpg_core_clk *core,
const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base);
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers);
};
extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
@@ -138,6 +139,7 @@ extern const struct cpg_mssr_info r8a7792_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7794_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a77970_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 6f19826cc447..59b8d320960a 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Rockchip Clock specific Makefile
#
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 0e09684d43a5..32c19c0f1e14 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -322,8 +322,6 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
sizeof(*rates) * nrates,
GFP_KERNEL);
if (!cpuclk->rate_table) {
- pr_err("%s: could not allocate memory for cpuclk rates\n",
- __func__);
ret = -ENOMEM;
goto unregister_notifier;
}
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 00ad0e5f8d66..67e73fd71f09 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -290,15 +290,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(0), 6, 2, DFLAGS | CLK_DIVIDER_READ_ONLY,
div_core_peri_t, RK2928_CLKGATE_CON(0), 0, GFLAGS),
- COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_p, 0,
+ COMPOSITE(ACLK_VEPU, "aclk_vepu", mux_pll_src_cpll_gpll_p, 0,
RK2928_CLKSEL_CON(32), 7, 1, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 9, GFLAGS),
- GATE(0, "hclk_vepu", "aclk_vepu", 0,
+ GATE(HCLK_VEPU, "hclk_vepu", "aclk_vepu", 0,
RK2928_CLKGATE_CON(3), 10, GFLAGS),
- COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_p, 0,
+ COMPOSITE(ACLK_VDPU, "aclk_vdpu", mux_pll_src_cpll_gpll_p, 0,
RK2928_CLKSEL_CON(32), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 11, GFLAGS),
- GATE(0, "hclk_vdpu", "aclk_vdpu", 0,
+ GATE(HCLK_VDPU, "hclk_vdpu", "aclk_vdpu", 0,
RK2928_CLKGATE_CON(3), 12, GFLAGS),
GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
@@ -644,13 +644,13 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
- GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
+ GATE(HCLK_CIF1, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(5), 14, GFLAGS),
- GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
+ GATE(ACLK_CIF1, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
GATE(PCLK_TIMER1, "pclk_timer1", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 8, GFLAGS),
GATE(PCLK_TIMER2, "pclk_timer2", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index fc56565379dd..7c4d242f19c1 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -711,7 +711,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(PCLK_SIM, "pclk_sim", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 8, GFLAGS),
GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 6, GFLAGS),
GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 5, GFLAGS),
- GATE(0, "pclk_efuse_256", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 1, GFLAGS),
GATE(0, "pclk_efuse_1024", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 0, GFLAGS),
/*
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 7afc21dc374e..ef8900bc077f 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Samsung Clock specific Makefile
#
@@ -5,6 +6,7 @@
obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o clk-cpu.o
obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
+obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4412-isp.o
obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 6686e8ba61f9..d2c99d8916b8 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -457,8 +457,6 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
if (!cpuclk->cfg) {
- pr_err("%s: could not allocate memory for cpuclk data\n",
- __func__);
ret = -ENOMEM;
goto unregister_clk_nb;
}
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index b117783ed404..5bfc92ee3129 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -18,6 +18,7 @@
#include <linux/syscore_ops.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <dt-bindings/clock/exynos-audss-clk.h>
@@ -36,14 +37,13 @@ static struct clk *epll;
#define ASS_CLK_DIV 0x4
#define ASS_CLK_GATE 0x8
-#ifdef CONFIG_PM_SLEEP
static unsigned long reg_save[][2] = {
{ ASS_CLK_SRC, 0 },
{ ASS_CLK_DIV, 0 },
{ ASS_CLK_GATE, 0 },
};
-static int exynos_audss_clk_suspend(struct device *dev)
+static int __maybe_unused exynos_audss_clk_suspend(struct device *dev)
{
int i;
@@ -53,7 +53,7 @@ static int exynos_audss_clk_suspend(struct device *dev)
return 0;
}
-static int exynos_audss_clk_resume(struct device *dev)
+static int __maybe_unused exynos_audss_clk_resume(struct device *dev)
{
int i;
@@ -62,7 +62,6 @@ static int exynos_audss_clk_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
struct exynos_audss_clk_drvdata {
unsigned int has_adma_clk:1;
@@ -135,6 +134,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
const struct exynos_audss_clk_drvdata *variant;
struct clk_hw **clk_table;
struct resource *res;
+ struct device *dev = &pdev->dev;
int i, ret = 0;
variant = of_device_get_match_data(&pdev->dev);
@@ -142,15 +142,15 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(&pdev->dev, res);
+ reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(reg_base)) {
- dev_err(&pdev->dev, "failed to map audss registers\n");
+ dev_err(dev, "failed to map audss registers\n");
return PTR_ERR(reg_base);
}
epll = ERR_PTR(-ENODEV);
- clk_data = devm_kzalloc(&pdev->dev,
+ clk_data = devm_kzalloc(dev,
sizeof(*clk_data) +
sizeof(*clk_data->hws) * EXYNOS_AUDSS_MAX_CLKS,
GFP_KERNEL);
@@ -160,8 +160,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
clk_data->num = variant->num_clks;
clk_table = clk_data->hws;
- pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
- pll_in = devm_clk_get(&pdev->dev, "pll_in");
+ pll_ref = devm_clk_get(dev, "pll_ref");
+ pll_in = devm_clk_get(dev, "pll_in");
if (!IS_ERR(pll_ref))
mout_audss_p[0] = __clk_get_name(pll_ref);
if (!IS_ERR(pll_in)) {
@@ -172,88 +172,103 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
ret = clk_prepare_enable(epll);
if (ret) {
- dev_err(&pdev->dev,
+ dev_err(dev,
"failed to prepare the epll clock\n");
return ret;
}
}
}
- clk_table[EXYNOS_MOUT_AUDSS] = clk_hw_register_mux(NULL, "mout_audss",
+
+ /*
+ * Enable runtime PM here to allow the clock core using runtime PM
+ * for the registered clocks. Additionally, we increase the runtime
+ * PM usage count before registering the clocks, to prevent the
+ * clock core from runtime suspending the device.
+ */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ clk_table[EXYNOS_MOUT_AUDSS] = clk_hw_register_mux(dev, "mout_audss",
mout_audss_p, ARRAY_SIZE(mout_audss_p),
CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
- cdclk = devm_clk_get(&pdev->dev, "cdclk");
- sclk_audio = devm_clk_get(&pdev->dev, "sclk_audio");
+ cdclk = devm_clk_get(dev, "cdclk");
+ sclk_audio = devm_clk_get(dev, "sclk_audio");
if (!IS_ERR(cdclk))
mout_i2s_p[1] = __clk_get_name(cdclk);
if (!IS_ERR(sclk_audio))
mout_i2s_p[2] = __clk_get_name(sclk_audio);
- clk_table[EXYNOS_MOUT_I2S] = clk_hw_register_mux(NULL, "mout_i2s",
+ clk_table[EXYNOS_MOUT_I2S] = clk_hw_register_mux(dev, "mout_i2s",
mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
- clk_table[EXYNOS_DOUT_SRP] = clk_hw_register_divider(NULL, "dout_srp",
+ clk_table[EXYNOS_DOUT_SRP] = clk_hw_register_divider(dev, "dout_srp",
"mout_audss", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_DIV, 0, 4, 0, &lock);
- clk_table[EXYNOS_DOUT_AUD_BUS] = clk_hw_register_divider(NULL,
+ clk_table[EXYNOS_DOUT_AUD_BUS] = clk_hw_register_divider(dev,
"dout_aud_bus", "dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_DIV, 4, 4, 0, &lock);
- clk_table[EXYNOS_DOUT_I2S] = clk_hw_register_divider(NULL, "dout_i2s",
+ clk_table[EXYNOS_DOUT_I2S] = clk_hw_register_divider(dev, "dout_i2s",
"mout_i2s", 0, reg_base + ASS_CLK_DIV, 8, 4, 0,
&lock);
- clk_table[EXYNOS_SRP_CLK] = clk_hw_register_gate(NULL, "srp_clk",
+ clk_table[EXYNOS_SRP_CLK] = clk_hw_register_gate(dev, "srp_clk",
"dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 0, 0, &lock);
- clk_table[EXYNOS_I2S_BUS] = clk_hw_register_gate(NULL, "i2s_bus",
+ clk_table[EXYNOS_I2S_BUS] = clk_hw_register_gate(dev, "i2s_bus",
"dout_aud_bus", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 2, 0, &lock);
- clk_table[EXYNOS_SCLK_I2S] = clk_hw_register_gate(NULL, "sclk_i2s",
+ clk_table[EXYNOS_SCLK_I2S] = clk_hw_register_gate(dev, "sclk_i2s",
"dout_i2s", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 3, 0, &lock);
- clk_table[EXYNOS_PCM_BUS] = clk_hw_register_gate(NULL, "pcm_bus",
+ clk_table[EXYNOS_PCM_BUS] = clk_hw_register_gate(dev, "pcm_bus",
"sclk_pcm", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 4, 0, &lock);
- sclk_pcm_in = devm_clk_get(&pdev->dev, "sclk_pcm_in");
+ sclk_pcm_in = devm_clk_get(dev, "sclk_pcm_in");
if (!IS_ERR(sclk_pcm_in))
sclk_pcm_p = __clk_get_name(sclk_pcm_in);
- clk_table[EXYNOS_SCLK_PCM] = clk_hw_register_gate(NULL, "sclk_pcm",
+ clk_table[EXYNOS_SCLK_PCM] = clk_hw_register_gate(dev, "sclk_pcm",
sclk_pcm_p, CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 5, 0, &lock);
if (variant->has_adma_clk) {
- clk_table[EXYNOS_ADMA] = clk_hw_register_gate(NULL, "adma",
+ clk_table[EXYNOS_ADMA] = clk_hw_register_gate(dev, "adma",
"dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 9, 0, &lock);
}
for (i = 0; i < clk_data->num; i++) {
if (IS_ERR(clk_table[i])) {
- dev_err(&pdev->dev, "failed to register clock %d\n", i);
+ dev_err(dev, "failed to register clock %d\n", i);
ret = PTR_ERR(clk_table[i]);
goto unregister;
}
}
- ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
clk_data);
if (ret) {
- dev_err(&pdev->dev, "failed to add clock provider\n");
+ dev_err(dev, "failed to add clock provider\n");
goto unregister;
}
+ pm_runtime_put_sync(dev);
+
return 0;
unregister:
exynos_audss_clk_teardown();
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
if (!IS_ERR(epll))
clk_disable_unprepare(epll);
@@ -266,6 +281,7 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
of_clk_del_provider(pdev->dev.of_node);
exynos_audss_clk_teardown();
+ pm_runtime_disable(&pdev->dev);
if (!IS_ERR(epll))
clk_disable_unprepare(epll);
@@ -274,8 +290,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops exynos_audss_clk_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_audss_clk_suspend,
- exynos_audss_clk_resume)
+ SET_RUNTIME_PM_OPS(exynos_audss_clk_suspend, exynos_audss_clk_resume,
+ NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver exynos_audss_clk_driver = {
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index a21aea062bae..f29fb5824005 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -144,8 +144,6 @@ static void __init exynos4_clkout_init(struct device_node *node)
}
CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
exynos4_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos4212_clkout, "samsung,exynos4212-pmu",
- exynos4_clkout_init);
CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
exynos4_clkout_init);
CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index d8d3cb67b402..134f25f2a913 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -550,9 +550,8 @@ static const struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __
/* list of mux clocks supported in all exynos4 soc's */
static const struct samsung_mux_clock exynos4_mux_clks[] __initconst = {
- MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
- CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0,
- "mout_apll"),
+ MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
MUX(0, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1),
MUX(0, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
@@ -737,7 +736,7 @@ static const struct samsung_div_clock exynos4_div_clks[] __initconst = {
DIV(0, "div_periph", "div_core2", DIV_CPU0, 12, 3),
DIV(0, "div_atb", "mout_core", DIV_CPU0, 16, 3),
DIV(0, "div_pclk_dbg", "div_atb", DIV_CPU0, 20, 3),
- DIV(CLK_ARM_CLK, "div_core2", "div_core", DIV_CPU0, 28, 3),
+ DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
DIV(0, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
DIV(0, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
DIV(0, "div_clkout_cpu", "mout_clkout_cpu", CLKOUT_CMU_CPU, 8, 6),
@@ -837,6 +836,12 @@ static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = {
DIV(0, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
DIV(0, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
DIV(0, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
+ DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
+ DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
+ DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
+};
+
+static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
DIV_F(CLK_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
CLK_GET_RATE_NOCACHE, 0),
DIV_F(CLK_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
@@ -846,18 +851,10 @@ static const struct samsung_div_clock exynos4x12_div_clks[] __initconst = {
4, 3, CLK_GET_RATE_NOCACHE, 0),
DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
8, 3, CLK_GET_RATE_NOCACHE, 0),
- DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
- DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
- DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
};
/* list of gate clocks supported in all exynos4 soc's */
static const struct samsung_gate_clock exynos4_gate_clks[] __initconst = {
- /*
- * After all Exynos4 based platforms are migrated to use device tree,
- * the device name and clock alias names specified below for some
- * of the clocks can be removed.
- */
GATE(CLK_PPMULEFT, "ppmuleft", "aclk200", GATE_IP_LEFTBUS, 1, 0, 0),
GATE(CLK_PPMURIGHT, "ppmuright", "aclk200", GATE_IP_RIGHTBUS, 1, 0, 0),
GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
@@ -1147,6 +1144,13 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
0, 0),
GATE(CLK_I2S0, "i2s0", "aclk100", E4X12_GATE_IP_MAUDIO, 3,
0, 0),
+ GATE(CLK_G2D, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
+ GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", GATE_IP_DMC, 24, 0, 0),
+ GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0,
+ 0),
+};
+
+static struct samsung_gate_clock exynos4x12_isp_gate_clks[] = {
GATE(CLK_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(CLK_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1,
@@ -1199,24 +1203,6 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = {
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(CLK_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
- GATE(CLK_G2D, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
- GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", GATE_IP_DMC, 24, 0, 0),
- GATE(CLK_TMU_APBIF, "tmu_apbif", "aclk100", E4X12_GATE_IP_PERIR, 17, 0,
- 0),
-};
-
-static const struct samsung_clock_alias exynos4_aliases[] __initconst = {
- ALIAS(CLK_MOUT_CORE, NULL, "moutcore"),
- ALIAS(CLK_ARM_CLK, NULL, "armclk"),
- ALIAS(CLK_SCLK_APLL, NULL, "mout_apll"),
-};
-
-static const struct samsung_clock_alias exynos4210_aliases[] __initconst = {
- ALIAS(CLK_SCLK_MPLL, NULL, "mout_mpll"),
-};
-
-static const struct samsung_clock_alias exynos4x12_aliases[] __initconst = {
- ALIAS(CLK_MOUT_MPLL_USER_C, NULL, "mout_mpll"),
};
/*
@@ -1355,14 +1341,14 @@ static const struct samsung_pll_rate_table exynos4x12_vpll_rates[] __initconst =
};
static struct samsung_pll_clock exynos4210_plls[nr_plls] __initdata = {
- [apll] = PLL_A(pll_4508, CLK_FOUT_APLL, "fout_apll", "fin_pll",
- APLL_LOCK, APLL_CON0, "fout_apll", NULL),
- [mpll] = PLL_A(pll_4508, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
- E4210_MPLL_LOCK, E4210_MPLL_CON0, "fout_mpll", NULL),
- [epll] = PLL_A(pll_4600, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
- EPLL_LOCK, EPLL_CON0, "fout_epll", NULL),
- [vpll] = PLL_A(pll_4650c, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
- VPLL_LOCK, VPLL_CON0, "fout_vpll", NULL),
+ [apll] = PLL(pll_4508, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, NULL),
+ [mpll] = PLL(pll_4508, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+ E4210_MPLL_LOCK, E4210_MPLL_CON0, NULL),
+ [epll] = PLL(pll_4600, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
+ [vpll] = PLL(pll_4650c, CLK_FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
+ VPLL_LOCK, VPLL_CON0, NULL),
};
static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
@@ -1416,24 +1402,6 @@ static const struct exynos_cpuclk_cfg_data e4210_armclk_d[] __initconst = {
{ 0 },
};
-static const struct exynos_cpuclk_cfg_data e4212_armclk_d[] __initconst = {
- { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
- { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
- { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
- { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
- { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4210_CPU_DIV1(2, 4), },
- { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4210_CPU_DIV1(2, 4), },
- { 900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
- { 800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
- { 700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
- { 200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4210_CPU_DIV1(2, 3), },
- { 0 },
-};
-
#define E4412_CPU_DIV1(cores, hpm, copy) \
(((cores) << 8) | ((hpm) << 4) | ((copy) << 0))
@@ -1527,8 +1495,6 @@ static void __init exynos4_clk_init(struct device_node *np,
ARRAY_SIZE(exynos4210_div_clks));
samsung_clk_register_gate(ctx, exynos4210_gate_clks,
ARRAY_SIZE(exynos4210_gate_clks));
- samsung_clk_register_alias(ctx, exynos4210_aliases,
- ARRAY_SIZE(exynos4210_aliases));
samsung_clk_register_fixed_factor(ctx,
exynos4210_fixed_factor_clks,
ARRAY_SIZE(exynos4210_fixed_factor_clks));
@@ -1537,32 +1503,31 @@ static void __init exynos4_clk_init(struct device_node *np,
e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d),
CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
} else {
+ struct resource res;
+
samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
ARRAY_SIZE(exynos4x12_mux_clks));
samsung_clk_register_div(ctx, exynos4x12_div_clks,
ARRAY_SIZE(exynos4x12_div_clks));
samsung_clk_register_gate(ctx, exynos4x12_gate_clks,
ARRAY_SIZE(exynos4x12_gate_clks));
- samsung_clk_register_alias(ctx, exynos4x12_aliases,
- ARRAY_SIZE(exynos4x12_aliases));
samsung_clk_register_fixed_factor(ctx,
exynos4x12_fixed_factor_clks,
ARRAY_SIZE(exynos4x12_fixed_factor_clks));
- if (of_machine_is_compatible("samsung,exynos4412")) {
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
- e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
- } else {
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
- e4212_armclk_d, ARRAY_SIZE(e4212_armclk_d),
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+
+ of_address_to_resource(np, 0, &res);
+ if (resource_size(&res) > 0x18000) {
+ samsung_clk_register_div(ctx, exynos4x12_isp_div_clks,
+ ARRAY_SIZE(exynos4x12_isp_div_clks));
+ samsung_clk_register_gate(ctx, exynos4x12_isp_gate_clks,
+ ARRAY_SIZE(exynos4x12_isp_gate_clks));
}
- }
- samsung_clk_register_alias(ctx, exynos4_aliases,
- ARRAY_SIZE(exynos4_aliases));
+ exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+ mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+ e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ }
if (soc == EXYNOS4X12)
exynos4x12_core_down_clock();
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
new file mode 100644
index 000000000000..d5f1ccb36300
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos4412 ISP module.
+*/
+
+#include <dt-bindings/clock/exynos4.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "clk.h"
+
+/* Exynos4x12 specific registers, which belong to ISP power domain */
+#define E4X12_DIV_ISP0 0x0300
+#define E4X12_DIV_ISP1 0x0304
+#define E4X12_GATE_ISP0 0x0800
+#define E4X12_GATE_ISP1 0x0804
+
+/*
+ * Support for CMU save/restore across system suspends
+ */
+static struct samsung_clk_reg_dump *exynos4x12_save_isp;
+
+static const unsigned long exynos4x12_clk_isp_save[] __initconst = {
+ E4X12_DIV_ISP0,
+ E4X12_DIV_ISP1,
+ E4X12_GATE_ISP0,
+ E4X12_GATE_ISP1,
+};
+
+PNAME(mout_user_aclk400_mcuisp_p4x12) = { "fin_pll", "div_aclk400_mcuisp", };
+
+static struct samsung_div_clock exynos4x12_isp_div_clks[] = {
+ DIV(CLK_ISP_DIV_ISP0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
+ DIV(CLK_ISP_DIV_ISP1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
+ DIV(CLK_ISP_DIV_MCUISP0, "div_mcuisp0", "aclk400_mcuisp",
+ E4X12_DIV_ISP1, 4, 3),
+ DIV(CLK_ISP_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0",
+ E4X12_DIV_ISP1, 8, 3),
+ DIV(0, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
+};
+
+static struct samsung_gate_clock exynos4x12_isp_gate_clks[] = {
+ GATE(CLK_ISP_FIMC_ISP, "isp", "aclk200", E4X12_GATE_ISP0, 0, 0, 0),
+ GATE(CLK_ISP_FIMC_DRC, "drc", "aclk200", E4X12_GATE_ISP0, 1, 0, 0),
+ GATE(CLK_ISP_FIMC_FD, "fd", "aclk200", E4X12_GATE_ISP0, 2, 0, 0),
+ GATE(CLK_ISP_FIMC_LITE0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, 0, 0),
+ GATE(CLK_ISP_FIMC_LITE1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, 0, 0),
+ GATE(CLK_ISP_MCUISP, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, 0, 0),
+ GATE(CLK_ISP_GICISP, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, 0, 0),
+ GATE(CLK_ISP_SMMU_ISP, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, 0, 0),
+ GATE(CLK_ISP_SMMU_DRC, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, 0, 0),
+ GATE(CLK_ISP_SMMU_FD, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, 0, 0),
+ GATE(CLK_ISP_SMMU_LITE0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
+ 0, 0),
+ GATE(CLK_ISP_SMMU_LITE1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
+ 0, 0),
+ GATE(CLK_ISP_PPMUISPMX, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
+ 0, 0),
+ GATE(CLK_ISP_PPMUISPX, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
+ 0, 0),
+ GATE(CLK_ISP_MCUCTL_ISP, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
+ 0, 0),
+ GATE(CLK_ISP_MPWM_ISP, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
+ 0, 0),
+ GATE(CLK_ISP_I2C0_ISP, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
+ 0, 0),
+ GATE(CLK_ISP_I2C1_ISP, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
+ 0, 0),
+ GATE(CLK_ISP_MTCADC_ISP, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
+ 0, 0),
+ GATE(CLK_ISP_PWM_ISP, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, 0, 0),
+ GATE(CLK_ISP_WDT_ISP, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, 0, 0),
+ GATE(CLK_ISP_UART_ISP, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
+ 0, 0),
+ GATE(CLK_ISP_ASYNCAXIM, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
+ 0, 0),
+ GATE(CLK_ISP_SMMU_ISPCX, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
+ 0, 0),
+ GATE(CLK_ISP_SPI0_ISP, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
+ 0, 0),
+ GATE(CLK_ISP_SPI1_ISP, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
+ 0, 0),
+};
+
+static int __maybe_unused exynos4x12_isp_clk_suspend(struct device *dev)
+{
+ struct samsung_clk_provider *ctx = dev_get_drvdata(dev);
+
+ samsung_clk_save(ctx->reg_base, exynos4x12_save_isp,
+ ARRAY_SIZE(exynos4x12_clk_isp_save));
+ return 0;
+}
+
+static int __maybe_unused exynos4x12_isp_clk_resume(struct device *dev)
+{
+ struct samsung_clk_provider *ctx = dev_get_drvdata(dev);
+
+ samsung_clk_restore(ctx->reg_base, exynos4x12_save_isp,
+ ARRAY_SIZE(exynos4x12_clk_isp_save));
+ return 0;
+}
+
+static int __init exynos4x12_isp_clk_probe(struct platform_device *pdev)
+{
+ struct samsung_clk_provider *ctx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ void __iomem *reg_base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_base)) {
+ dev_err(dev, "failed to map registers\n");
+ return PTR_ERR(reg_base);
+ }
+
+ exynos4x12_save_isp = samsung_clk_alloc_reg_dump(exynos4x12_clk_isp_save,
+ ARRAY_SIZE(exynos4x12_clk_isp_save));
+ if (!exynos4x12_save_isp)
+ return -ENOMEM;
+
+ ctx = samsung_clk_init(np, reg_base, CLK_NR_ISP_CLKS);
+ ctx->dev = dev;
+
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ samsung_clk_register_div(ctx, exynos4x12_isp_div_clks,
+ ARRAY_SIZE(exynos4x12_isp_div_clks));
+ samsung_clk_register_gate(ctx, exynos4x12_isp_gate_clks,
+ ARRAY_SIZE(exynos4x12_isp_gate_clks));
+
+ samsung_clk_of_add_provider(np, ctx);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static const struct of_device_id exynos4x12_isp_clk_of_match[] = {
+ { .compatible = "samsung,exynos4412-isp-clock", },
+ { },
+};
+
+static const struct dev_pm_ops exynos4x12_isp_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos4x12_isp_clk_suspend,
+ exynos4x12_isp_clk_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver exynos4x12_isp_clk_driver __refdata = {
+ .driver = {
+ .name = "exynos4x12-isp-clk",
+ .of_match_table = exynos4x12_isp_clk_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &exynos4x12_isp_pm_ops,
+ },
+ .probe = exynos4x12_isp_clk_probe,
+};
+
+static int __init exynos4x12_isp_clk_init(void)
+{
+ return platform_driver_register(&exynos4x12_isp_clk_driver);
+}
+core_initcall(exynos4x12_isp_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 27a227d6620c..9b073c98a891 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -293,14 +293,14 @@ static const struct samsung_mux_clock exynos5250_mux_clks[] __initconst = {
/*
* CMU_CPU
*/
- MUX_FA(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
- CLK_SET_RATE_PARENT, 0, "mout_apll"),
- MUX_A(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
+ MUX_F(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+ CLK_SET_RATE_PARENT, 0),
+ MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
/*
* CMU_CORE
*/
- MUX_A(0, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
+ MUX(0, "mout_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
/*
* CMU_TOP
@@ -391,7 +391,7 @@ static const struct samsung_div_clock exynos5250_div_clks[] __initconst = {
*/
DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(0, "div_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV_A(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3, "armclk"),
+ DIV(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3),
/*
* CMU_TOP
@@ -743,10 +743,10 @@ static const struct samsung_pll_rate_table apll_24mhz_tbl[] __initconst = {
};
static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
- [apll] = PLL_A(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
- APLL_LOCK, APLL_CON0, "fout_apll", NULL),
- [mpll] = PLL_A(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
- MPLL_LOCK, MPLL_CON0, "fout_mpll", NULL),
+ [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
+ APLL_CON0, NULL),
+ [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", MPLL_LOCK,
+ MPLL_CON0, NULL),
[bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK,
BPLL_CON0, NULL),
[gpll] = PLL(pll_35xx, CLK_FOUT_GPLL, "fout_gpll", "fin_pll", GPLL_LOCK,
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 25601967d1cd..45d34f601e9e 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -600,8 +600,7 @@ static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
TOP_SPARE2, 4, 1),
MUX(0, "mout_aclk400_isp", mout_group1_p, SRC_TOP0, 0, 2),
- MUX_A(0, "mout_aclk400_mscl", mout_group1_p,
- SRC_TOP0, 4, 2, "aclk400_mscl"),
+ MUX(0, "mout_aclk400_mscl", mout_group1_p, SRC_TOP0, 4, 2),
MUX(0, "mout_aclk400_wcore", mout_group1_p, SRC_TOP0, 16, 2),
MUX(0, "mout_aclk100_noc", mout_group1_p, SRC_TOP0, 20, 2),
@@ -998,7 +997,7 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
GATE_BUS_TOP, 16, 0, 0),
GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
- GATE_BUS_TOP, 17, 0, 0),
+ GATE_BUS_TOP, 17, CLK_IS_CRITICAL, 0),
GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 11343a597093..db270908037a 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -9,9 +9,13 @@
* Common Clock Framework support for Exynos5433 SoC.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <dt-bindings/clock/exynos5433.h>
@@ -1991,6 +1995,14 @@ static const unsigned long fsys_clk_regs[] __initconst = {
ENABLE_IP_FSYS1,
};
+static const struct samsung_clk_reg_dump fsys_suspend_regs[] = {
+ { MUX_SEL_FSYS0, 0 },
+ { MUX_SEL_FSYS1, 0 },
+ { MUX_SEL_FSYS2, 0 },
+ { MUX_SEL_FSYS3, 0 },
+ { MUX_SEL_FSYS4, 0 },
+};
+
static const struct samsung_fixed_rate_clock fsys_fixed_clks[] __initconst = {
/* PHY clocks from USBDRD30_PHY */
FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY,
@@ -2296,16 +2308,11 @@ static const struct samsung_cmu_info fsys_cmu_info __initconst = {
.nr_clk_ids = FSYS_NR_CLK,
.clk_regs = fsys_clk_regs,
.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs),
+ .suspend_regs = fsys_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(fsys_suspend_regs),
+ .clk_name = "aclk_fsys_200",
};
-static void __init exynos5433_cmu_fsys_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &fsys_cmu_info);
-}
-
-CLK_OF_DECLARE(exynos5433_cmu_fsys, "samsung,exynos5433-cmu-fsys",
- exynos5433_cmu_fsys_init);
-
/*
* Register offset definitions for CMU_G2D
*/
@@ -2335,6 +2342,10 @@ static const unsigned long g2d_clk_regs[] __initconst = {
DIV_ENABLE_IP_G2D_SECURE_SMMU_G2D,
};
+static const struct samsung_clk_reg_dump g2d_suspend_regs[] = {
+ { MUX_SEL_G2D0, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_aclk_g2d_266_user_p) = { "oscclk", "aclk_g2d_266", };
PNAME(mout_aclk_g2d_400_user_p) = { "oscclk", "aclk_g2d_400", };
@@ -2420,16 +2431,11 @@ static const struct samsung_cmu_info g2d_cmu_info __initconst = {
.nr_clk_ids = G2D_NR_CLK,
.clk_regs = g2d_clk_regs,
.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs),
+ .suspend_regs = g2d_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(g2d_suspend_regs),
+ .clk_name = "aclk_g2d_400",
};
-static void __init exynos5433_cmu_g2d_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &g2d_cmu_info);
-}
-
-CLK_OF_DECLARE(exynos5433_cmu_g2d, "samsung,exynos5433-cmu-g2d",
- exynos5433_cmu_g2d_init);
-
/*
* Register offset definitions for CMU_DISP
*/
@@ -2494,6 +2500,18 @@ static const unsigned long disp_clk_regs[] __initconst = {
CLKOUT_CMU_DISP_DIV_STAT,
};
+static const struct samsung_clk_reg_dump disp_suspend_regs[] = {
+ /* PLL has to be enabled for suspend */
+ { DISP_PLL_CON0, 0x85f40502 },
+ /* ignore status of external PHY muxes during suspend to avoid hangs */
+ { MUX_IGNORE_DISP2, 0x00111111 },
+ { MUX_SEL_DISP0, 0 },
+ { MUX_SEL_DISP1, 0 },
+ { MUX_SEL_DISP2, 0 },
+ { MUX_SEL_DISP3, 0 },
+ { MUX_SEL_DISP4, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_disp_pll_p) = { "oscclk", "fout_disp_pll", };
PNAME(mout_sclk_dsim1_user_p) = { "oscclk", "sclk_dsim1_disp", };
@@ -2841,16 +2859,11 @@ static const struct samsung_cmu_info disp_cmu_info __initconst = {
.nr_clk_ids = DISP_NR_CLK,
.clk_regs = disp_clk_regs,
.nr_clk_regs = ARRAY_SIZE(disp_clk_regs),
+ .suspend_regs = disp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(disp_suspend_regs),
+ .clk_name = "aclk_disp_333",
};
-static void __init exynos5433_cmu_disp_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &disp_cmu_info);
-}
-
-CLK_OF_DECLARE(exynos5433_cmu_disp, "samsung,exynos5433-cmu-disp",
- exynos5433_cmu_disp_init);
-
/*
* Register offset definitions for CMU_AUD
*/
@@ -2885,6 +2898,11 @@ static const unsigned long aud_clk_regs[] __initconst = {
ENABLE_IP_AUD1,
};
+static const struct samsung_clk_reg_dump aud_suspend_regs[] = {
+ { MUX_SEL_AUD0, 0 },
+ { MUX_SEL_AUD1, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_aud_pll_user_aud_p) = { "oscclk", "fout_aud_pll", };
PNAME(mout_sclk_aud_pcm_p) = { "mout_aud_pll_user", "ioclk_audiocdclk0",};
@@ -3011,16 +3029,11 @@ static const struct samsung_cmu_info aud_cmu_info __initconst = {
.nr_clk_ids = AUD_NR_CLK,
.clk_regs = aud_clk_regs,
.nr_clk_regs = ARRAY_SIZE(aud_clk_regs),
+ .suspend_regs = aud_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(aud_suspend_regs),
+ .clk_name = "fout_aud_pll",
};
-static void __init exynos5433_cmu_aud_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &aud_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_aud, "samsung,exynos5433-cmu-aud",
- exynos5433_cmu_aud_init);
-
-
/*
* Register offset definitions for CMU_BUS{0|1|2}
*/
@@ -3222,6 +3235,10 @@ static const unsigned long g3d_clk_regs[] __initconst = {
CLK_STOPCTRL,
};
+static const struct samsung_clk_reg_dump g3d_suspend_regs[] = {
+ { MUX_SEL_G3D, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_aclk_g3d_400_p) = { "mout_g3d_pll", "aclk_g3d_400", };
PNAME(mout_g3d_pll_p) = { "oscclk", "fout_g3d_pll", };
@@ -3295,15 +3312,11 @@ static const struct samsung_cmu_info g3d_cmu_info __initconst = {
.nr_clk_ids = G3D_NR_CLK,
.clk_regs = g3d_clk_regs,
.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs),
+ .suspend_regs = g3d_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(g3d_suspend_regs),
+ .clk_name = "aclk_g3d_400",
};
-static void __init exynos5433_cmu_g3d_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &g3d_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_g3d, "samsung,exynos5433-cmu-g3d",
- exynos5433_cmu_g3d_init);
-
/*
* Register offset definitions for CMU_GSCL
*/
@@ -3342,6 +3355,12 @@ static const unsigned long gscl_clk_regs[] __initconst = {
ENABLE_IP_GSCL_SECURE_SMMU_GSCL2,
};
+static const struct samsung_clk_reg_dump gscl_suspend_regs[] = {
+ { MUX_SEL_GSCL, 0 },
+ { ENABLE_ACLK_GSCL, 0xfff },
+ { ENABLE_PCLK_GSCL, 0xff },
+};
+
/* list of all parent clock list */
PNAME(aclk_gscl_111_user_p) = { "oscclk", "aclk_gscl_111", };
PNAME(aclk_gscl_333_user_p) = { "oscclk", "aclk_gscl_333", };
@@ -3436,15 +3455,11 @@ static const struct samsung_cmu_info gscl_cmu_info __initconst = {
.nr_clk_ids = GSCL_NR_CLK,
.clk_regs = gscl_clk_regs,
.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs),
+ .suspend_regs = gscl_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(gscl_suspend_regs),
+ .clk_name = "aclk_gscl_111",
};
-static void __init exynos5433_cmu_gscl_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &gscl_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_gscl, "samsung,exynos5433-cmu-gscl",
- exynos5433_cmu_gscl_init);
-
/*
* Register offset definitions for CMU_APOLLO
*/
@@ -3970,6 +3985,11 @@ static const unsigned long mscl_clk_regs[] __initconst = {
ENABLE_IP_MSCL_SECURE_SMMU_JPEG,
};
+static const struct samsung_clk_reg_dump mscl_suspend_regs[] = {
+ { MUX_SEL_MSCL0, 0 },
+ { MUX_SEL_MSCL1, 0 },
+};
+
/* list of all parent clock list */
PNAME(mout_sclk_jpeg_user_p) = { "oscclk", "sclk_jpeg_mscl", };
PNAME(mout_aclk_mscl_400_user_p) = { "oscclk", "aclk_mscl_400", };
@@ -4082,15 +4102,11 @@ static const struct samsung_cmu_info mscl_cmu_info __initconst = {
.nr_clk_ids = MSCL_NR_CLK,
.clk_regs = mscl_clk_regs,
.nr_clk_regs = ARRAY_SIZE(mscl_clk_regs),
+ .suspend_regs = mscl_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(mscl_suspend_regs),
+ .clk_name = "aclk_mscl_400",
};
-static void __init exynos5433_cmu_mscl_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &mscl_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_mscl, "samsung,exynos5433-cmu-mscl",
- exynos5433_cmu_mscl_init);
-
/*
* Register offset definitions for CMU_MFC
*/
@@ -4120,6 +4136,10 @@ static const unsigned long mfc_clk_regs[] __initconst = {
ENABLE_IP_MFC_SECURE_SMMU_MFC,
};
+static const struct samsung_clk_reg_dump mfc_suspend_regs[] = {
+ { MUX_SEL_MFC, 0 },
+};
+
PNAME(mout_aclk_mfc_400_user_p) = { "oscclk", "aclk_mfc_400", };
static const struct samsung_mux_clock mfc_mux_clks[] __initconst = {
@@ -4190,15 +4210,11 @@ static const struct samsung_cmu_info mfc_cmu_info __initconst = {
.nr_clk_ids = MFC_NR_CLK,
.clk_regs = mfc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs),
+ .suspend_regs = mfc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(mfc_suspend_regs),
+ .clk_name = "aclk_mfc_400",
};
-static void __init exynos5433_cmu_mfc_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &mfc_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_mfc, "samsung,exynos5433-cmu-mfc",
- exynos5433_cmu_mfc_init);
-
/*
* Register offset definitions for CMU_HEVC
*/
@@ -4228,6 +4244,10 @@ static const unsigned long hevc_clk_regs[] __initconst = {
ENABLE_IP_HEVC_SECURE_SMMU_HEVC,
};
+static const struct samsung_clk_reg_dump hevc_suspend_regs[] = {
+ { MUX_SEL_HEVC, 0 },
+};
+
PNAME(mout_aclk_hevc_400_user_p) = { "oscclk", "aclk_hevc_400", };
static const struct samsung_mux_clock hevc_mux_clks[] __initconst = {
@@ -4300,15 +4320,11 @@ static const struct samsung_cmu_info hevc_cmu_info __initconst = {
.nr_clk_ids = HEVC_NR_CLK,
.clk_regs = hevc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(hevc_clk_regs),
+ .suspend_regs = hevc_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(hevc_suspend_regs),
+ .clk_name = "aclk_hevc_400",
};
-static void __init exynos5433_cmu_hevc_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &hevc_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_hevc, "samsung,exynos5433-cmu-hevc",
- exynos5433_cmu_hevc_init);
-
/*
* Register offset definitions for CMU_ISP
*/
@@ -4342,6 +4358,10 @@ static const unsigned long isp_clk_regs[] __initconst = {
ENABLE_IP_ISP3,
};
+static const struct samsung_clk_reg_dump isp_suspend_regs[] = {
+ { MUX_SEL_ISP, 0 },
+};
+
PNAME(mout_aclk_isp_dis_400_user_p) = { "oscclk", "aclk_isp_dis_400", };
PNAME(mout_aclk_isp_400_user_p) = { "oscclk", "aclk_isp_400", };
@@ -4553,15 +4573,11 @@ static const struct samsung_cmu_info isp_cmu_info __initconst = {
.nr_clk_ids = ISP_NR_CLK,
.clk_regs = isp_clk_regs,
.nr_clk_regs = ARRAY_SIZE(isp_clk_regs),
+ .suspend_regs = isp_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(isp_suspend_regs),
+ .clk_name = "aclk_isp_400",
};
-static void __init exynos5433_cmu_isp_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &isp_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_isp, "samsung,exynos5433-cmu-isp",
- exynos5433_cmu_isp_init);
-
/*
* Register offset definitions for CMU_CAM0
*/
@@ -4625,6 +4641,15 @@ static const unsigned long cam0_clk_regs[] __initconst = {
ENABLE_IP_CAM02,
ENABLE_IP_CAM03,
};
+
+static const struct samsung_clk_reg_dump cam0_suspend_regs[] = {
+ { MUX_SEL_CAM00, 0 },
+ { MUX_SEL_CAM01, 0 },
+ { MUX_SEL_CAM02, 0 },
+ { MUX_SEL_CAM03, 0 },
+ { MUX_SEL_CAM04, 0 },
+};
+
PNAME(mout_aclk_cam0_333_user_p) = { "oscclk", "aclk_cam0_333", };
PNAME(mout_aclk_cam0_400_user_p) = { "oscclk", "aclk_cam0_400", };
PNAME(mout_aclk_cam0_552_user_p) = { "oscclk", "aclk_cam0_552", };
@@ -5030,15 +5055,11 @@ static const struct samsung_cmu_info cam0_cmu_info __initconst = {
.nr_clk_ids = CAM0_NR_CLK,
.clk_regs = cam0_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cam0_clk_regs),
+ .suspend_regs = cam0_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(cam0_suspend_regs),
+ .clk_name = "aclk_cam0_400",
};
-static void __init exynos5433_cmu_cam0_init(struct device_node *np)
-{
- samsung_cmu_register_one(np, &cam0_cmu_info);
-}
-CLK_OF_DECLARE(exynos5433_cmu_cam0, "samsung,exynos5433-cmu-cam0",
- exynos5433_cmu_cam0_init);
-
/*
* Register offset definitions for CMU_CAM1
*/
@@ -5085,6 +5106,12 @@ static const unsigned long cam1_clk_regs[] __initconst = {
ENABLE_IP_CAM12,
};
+static const struct samsung_clk_reg_dump cam1_suspend_regs[] = {
+ { MUX_SEL_CAM10, 0 },
+ { MUX_SEL_CAM11, 0 },
+ { MUX_SEL_CAM12, 0 },
+};
+
PNAME(mout_sclk_isp_uart_user_p) = { "oscclk", "sclk_isp_uart_cam1", };
PNAME(mout_sclk_isp_spi1_user_p) = { "oscclk", "sclk_isp_spi1_cam1", };
PNAME(mout_sclk_isp_spi0_user_p) = { "oscclk", "sclk_isp_spi0_cam1", };
@@ -5403,11 +5430,223 @@ static const struct samsung_cmu_info cam1_cmu_info __initconst = {
.nr_clk_ids = CAM1_NR_CLK,
.clk_regs = cam1_clk_regs,
.nr_clk_regs = ARRAY_SIZE(cam1_clk_regs),
+ .suspend_regs = cam1_suspend_regs,
+ .nr_suspend_regs = ARRAY_SIZE(cam1_suspend_regs),
+ .clk_name = "aclk_cam1_400",
+};
+
+
+struct exynos5433_cmu_data {
+ struct samsung_clk_reg_dump *clk_save;
+ unsigned int nr_clk_save;
+ const struct samsung_clk_reg_dump *clk_suspend;
+ unsigned int nr_clk_suspend;
+
+ struct clk *clk;
+ struct clk **pclks;
+ int nr_pclks;
+
+ /* must be the last entry */
+ struct samsung_clk_provider ctx;
+};
+
+static int __maybe_unused exynos5433_cmu_suspend(struct device *dev)
+{
+ struct exynos5433_cmu_data *data = dev_get_drvdata(dev);
+ int i;
+
+ samsung_clk_save(data->ctx.reg_base, data->clk_save,
+ data->nr_clk_save);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_prepare_enable(data->pclks[i]);
+
+ /* for suspend some registers have to be set to certain values */
+ samsung_clk_restore(data->ctx.reg_base, data->clk_suspend,
+ data->nr_clk_suspend);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_disable_unprepare(data->pclks[i]);
+
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int __maybe_unused exynos5433_cmu_resume(struct device *dev)
+{
+ struct exynos5433_cmu_data *data = dev_get_drvdata(dev);
+ int i;
+
+ clk_prepare_enable(data->clk);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_prepare_enable(data->pclks[i]);
+
+ samsung_clk_restore(data->ctx.reg_base, data->clk_save,
+ data->nr_clk_save);
+
+ for (i = 0; i < data->nr_pclks; i++)
+ clk_disable_unprepare(data->pclks[i]);
+
+ return 0;
+}
+
+static int __init exynos5433_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct exynos5433_cmu_data *data;
+ struct samsung_clk_provider *ctx;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *reg_base;
+ int i;
+
+ info = of_device_get_match_data(dev);
+
+ data = devm_kzalloc(dev, sizeof(*data) +
+ sizeof(*data->ctx.clk_data.hws) * info->nr_clk_ids,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ ctx = &data->ctx;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_base)) {
+ dev_err(dev, "failed to map registers\n");
+ return PTR_ERR(reg_base);
+ }
+
+ for (i = 0; i < info->nr_clk_ids; ++i)
+ ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+
+ ctx->clk_data.num = info->nr_clk_ids;
+ ctx->reg_base = reg_base;
+ ctx->dev = dev;
+ spin_lock_init(&ctx->lock);
+
+ data->clk_save = samsung_clk_alloc_reg_dump(info->clk_regs,
+ info->nr_clk_regs);
+ data->nr_clk_save = info->nr_clk_regs;
+ data->clk_suspend = info->suspend_regs;
+ data->nr_clk_suspend = info->nr_suspend_regs;
+ data->nr_pclks = of_count_phandle_with_args(dev->of_node, "clocks",
+ "#clock-cells");
+ if (data->nr_pclks > 0) {
+ data->pclks = devm_kcalloc(dev, sizeof(struct clk *),
+ data->nr_pclks, GFP_KERNEL);
+
+ for (i = 0; i < data->nr_pclks; i++) {
+ struct clk *clk = of_clk_get(dev->of_node, i);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ data->pclks[i] = clk;
+ }
+ }
+
+ if (info->clk_name)
+ data->clk = clk_get(dev, info->clk_name);
+ clk_prepare_enable(data->clk);
+
+ platform_set_drvdata(pdev, data);
+
+ /*
+ * Enable runtime PM here to allow the clock core using runtime PM
+ * for the registered clocks. Additionally, we increase the runtime
+ * PM usage count before registering the clocks, to prevent the
+ * clock core from runtime suspending the device.
+ */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ if (info->pll_clks)
+ samsung_clk_register_pll(ctx, info->pll_clks, info->nr_pll_clks,
+ reg_base);
+ if (info->mux_clks)
+ samsung_clk_register_mux(ctx, info->mux_clks,
+ info->nr_mux_clks);
+ if (info->div_clks)
+ samsung_clk_register_div(ctx, info->div_clks,
+ info->nr_div_clks);
+ if (info->gate_clks)
+ samsung_clk_register_gate(ctx, info->gate_clks,
+ info->nr_gate_clks);
+ if (info->fixed_clks)
+ samsung_clk_register_fixed_rate(ctx, info->fixed_clks,
+ info->nr_fixed_clks);
+ if (info->fixed_factor_clks)
+ samsung_clk_register_fixed_factor(ctx, info->fixed_factor_clks,
+ info->nr_fixed_factor_clks);
+
+ samsung_clk_of_add_provider(dev->of_node, ctx);
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static const struct of_device_id exynos5433_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos5433-cmu-aud",
+ .data = &aud_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-cam0",
+ .data = &cam0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-cam1",
+ .data = &cam1_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-disp",
+ .data = &disp_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-g2d",
+ .data = &g2d_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-g3d",
+ .data = &g3d_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-fsys",
+ .data = &fsys_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-gscl",
+ .data = &gscl_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-mfc",
+ .data = &mfc_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-hevc",
+ .data = &hevc_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-isp",
+ .data = &isp_cmu_info,
+ }, {
+ .compatible = "samsung,exynos5433-cmu-mscl",
+ .data = &mscl_cmu_info,
+ }, {
+ },
+};
+
+static const struct dev_pm_ops exynos5433_cmu_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos5433_cmu_suspend, exynos5433_cmu_resume,
+ NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver exynos5433_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos5433-cmu",
+ .of_match_table = exynos5433_cmu_of_match,
+ .suppress_bind_attrs = true,
+ .pm = &exynos5433_cmu_pm_ops,
+ },
+ .probe = exynos5433_cmu_probe,
};
-static void __init exynos5433_cmu_cam1_init(struct device_node *np)
+static int __init exynos5433_cmu_init(void)
{
- samsung_cmu_register_one(np, &cam1_cmu_info);
+ return platform_driver_register(&exynos5433_cmu_driver);
}
-CLK_OF_DECLARE(exynos5433_cmu_cam1, "samsung,exynos5433-cmu-cam1",
- exynos5433_cmu_cam1_init);
+core_initcall(exynos5433_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index a80f3ef20801..b08bd54c5e76 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -53,8 +53,7 @@ static const struct samsung_fixed_factor_clock exynos5440_fixed_factor_clks[] __
/* mux clocks */
static const struct samsung_mux_clock exynos5440_mux_clks[] __initconst = {
MUX(0, "mout_spi", mout_spi_p, MISC_DOUT1, 5, 1),
- MUX_A(CLK_ARM_CLK, "arm_clk", mout_armclk_p,
- CPU_CLK_STATUS, 0, 1, "armclk"),
+ MUX(CLK_ARM_CLK, "arm_clk", mout_armclk_p, CPU_CLK_STATUS, 0, 1),
};
/* divider clocks */
@@ -117,6 +116,13 @@ static const struct samsung_pll_clock exynos5440_plls[] __initconst = {
PLL(pll_2550x, CLK_CPLLB, "cpllb", "xtal", 0, 0x50, NULL),
};
+/*
+ * Clock aliases for legacy clkdev look-up.
+ */
+static const struct samsung_clock_alias exynos5440_aliases[] __initconst = {
+ ALIAS(CLK_ARM_CLK, NULL, "armclk"),
+};
+
/* register exynos5440 clocks */
static void __init exynos5440_clk_init(struct device_node *np)
{
@@ -147,6 +153,8 @@ static void __init exynos5440_clk_init(struct device_node *np)
ARRAY_SIZE(exynos5440_div_clks));
samsung_clk_register_gate(ctx, exynos5440_gate_clks,
ARRAY_SIZE(exynos5440_gate_clks));
+ samsung_clk_register_alias(ctx, exynos5440_aliases,
+ ARRAY_SIZE(exynos5440_aliases));
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 037c61484098..1c4c7a3039f1 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -1388,7 +1388,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
pll->lock_reg = base + pll_clk->lock_offset;
pll->con_reg = base + pll_clk->con_offset;
- ret = clk_hw_register(NULL, &pll->hw);
+ ret = clk_hw_register(ctx->dev, &pll->hw);
if (ret) {
pr_err("%s: failed to register pll clock %s : %d\n",
__func__, pll_clk->name, ret);
@@ -1397,15 +1397,6 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
}
samsung_clk_add_lookup(ctx, &pll->hw, pll_clk->id);
-
- if (!pll_clk->alias)
- return;
-
- ret = clk_hw_register_clkdev(&pll->hw, pll_clk->alias,
- pll_clk->dev_name);
- if (ret)
- pr_err("%s: failed to register lookup for %s : %d",
- __func__, pll_clk->name, ret);
}
void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index abb935c42916..d94b85a42356 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -117,8 +117,8 @@ struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
MUX(0, "epllref", epllref_p, CLKSRC, 7, 2),
MUX(ESYSCLK, "esysclk", esysclk_p, CLKSRC, 6, 1),
MUX(0, "mpllref", mpllref_p, CLKSRC, 3, 1),
- MUX_A(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1, "msysclk"),
- MUX_A(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1, "armclk"),
+ MUX(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1),
+ MUX(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1),
MUX(0, "mux_i2s0", i2s0_p, CLKSRC, 14, 2),
};
@@ -189,6 +189,10 @@ struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
};
struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
+ ALIAS(MSYSCLK, NULL, "msysclk"),
+ ALIAS(ARMCLK, NULL, "armclk"),
+ ALIAS(MPLL, NULL, "mpll"),
+ ALIAS(EPLL, NULL, "epll"),
ALIAS(HCLK, NULL, "hclk"),
ALIAS(HCLK_SSMC, NULL, "nand"),
ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
@@ -221,9 +225,9 @@ struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
/* S3C2416 specific clocks */
static struct samsung_pll_clock s3c2416_pll_clks[] __initdata = {
- [mpll] = PLL(pll_6552_s3c2416, 0, "mpll", "mpllref",
+ [mpll] = PLL(pll_6552_s3c2416, MPLL, "mpll", "mpllref",
LOCKCON0, MPLLCON, NULL),
- [epll] = PLL(pll_6553, 0, "epll", "epllref",
+ [epll] = PLL(pll_6553, EPLL, "epll", "epllref",
LOCKCON1, EPLLCON, NULL),
};
@@ -275,9 +279,9 @@ struct samsung_clock_alias s3c2416_aliases[] __initdata = {
/* S3C2443 specific clocks */
static struct samsung_pll_clock s3c2443_pll_clks[] __initdata = {
- [mpll] = PLL(pll_3000, 0, "mpll", "mpllref",
+ [mpll] = PLL(pll_3000, MPLL, "mpll", "mpllref",
LOCKCON0, MPLLCON, NULL),
- [epll] = PLL(pll_2126, 0, "epll", "epllref",
+ [epll] = PLL(pll_2126, EPLL, "epll", "epllref",
LOCKCON1, EPLLCON, NULL),
};
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 7ce0fa86c5ff..8634884aa11c 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -134,7 +134,7 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
unsigned int idx, ret;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk_hw = clk_hw_register_fixed_rate(NULL, list->name,
+ clk_hw = clk_hw_register_fixed_rate(ctx->dev, list->name,
list->parent_name, list->flags, list->fixed_rate);
if (IS_ERR(clk_hw)) {
pr_err("%s: failed to register clock %s\n", __func__,
@@ -163,7 +163,7 @@ void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk_hw = clk_hw_register_fixed_factor(NULL, list->name,
+ clk_hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
list->parent_name, list->flags, list->mult, list->div);
if (IS_ERR(clk_hw)) {
pr_err("%s: failed to register clock %s\n", __func__,
@@ -181,10 +181,10 @@ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
unsigned int nr_clk)
{
struct clk_hw *clk_hw;
- unsigned int idx, ret;
+ unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk_hw = clk_hw_register_mux(NULL, list->name,
+ clk_hw = clk_hw_register_mux(ctx->dev, list->name,
list->parent_names, list->num_parents, list->flags,
ctx->reg_base + list->offset,
list->shift, list->width, list->mux_flags, &ctx->lock);
@@ -195,15 +195,6 @@ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
}
samsung_clk_add_lookup(ctx, clk_hw, list->id);
-
- /* register a clock lookup only if a clock alias is specified */
- if (list->alias) {
- ret = clk_hw_register_clkdev(clk_hw, list->alias,
- list->dev_name);
- if (ret)
- pr_err("%s: failed to register lookup %s\n",
- __func__, list->alias);
- }
}
}
@@ -213,17 +204,17 @@ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
unsigned int nr_clk)
{
struct clk_hw *clk_hw;
- unsigned int idx, ret;
+ unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
if (list->table)
- clk_hw = clk_hw_register_divider_table(NULL,
+ clk_hw = clk_hw_register_divider_table(ctx->dev,
list->name, list->parent_name, list->flags,
ctx->reg_base + list->offset,
list->shift, list->width, list->div_flags,
list->table, &ctx->lock);
else
- clk_hw = clk_hw_register_divider(NULL, list->name,
+ clk_hw = clk_hw_register_divider(ctx->dev, list->name,
list->parent_name, list->flags,
ctx->reg_base + list->offset, list->shift,
list->width, list->div_flags, &ctx->lock);
@@ -234,15 +225,6 @@ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
}
samsung_clk_add_lookup(ctx, clk_hw, list->id);
-
- /* register a clock lookup only if a clock alias is specified */
- if (list->alias) {
- ret = clk_hw_register_clkdev(clk_hw, list->alias,
- list->dev_name);
- if (ret)
- pr_err("%s: failed to register lookup %s\n",
- __func__, list->alias);
- }
}
}
@@ -252,10 +234,10 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
unsigned int nr_clk)
{
struct clk_hw *clk_hw;
- unsigned int idx, ret;
+ unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk_hw = clk_hw_register_gate(NULL, list->name, list->parent_name,
+ clk_hw = clk_hw_register_gate(ctx->dev, list->name, list->parent_name,
list->flags, ctx->reg_base + list->offset,
list->bit_idx, list->gate_flags, &ctx->lock);
if (IS_ERR(clk_hw)) {
@@ -264,15 +246,6 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
continue;
}
- /* register a clock lookup only if a clock alias is specified */
- if (list->alias) {
- ret = clk_hw_register_clkdev(clk_hw, list->alias,
- list->dev_name);
- if (ret)
- pr_err("%s: failed to register lookup %s\n",
- __func__, list->alias);
- }
-
samsung_clk_add_lookup(ctx, clk_hw, list->id);
}
}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index b8ca0dd3a38b..3880d2f9d582 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -24,6 +24,7 @@
*/
struct samsung_clk_provider {
void __iomem *reg_base;
+ struct device *dev;
spinlock_t lock;
/* clk_data must be the last entry due to variable lenght 'hws' array */
struct clk_hw_onecell_data clk_data;
@@ -106,7 +107,6 @@ struct samsung_fixed_factor_clock {
/**
* struct samsung_mux_clock: information about mux clock
* @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
* @name: name of this mux clock.
* @parent_names: array of pointer to parent clock names.
* @num_parents: number of parents listed in @parent_names.
@@ -115,11 +115,9 @@ struct samsung_fixed_factor_clock {
* @shift: starting bit location of the mux control bit-field in @reg.
* @width: width of the mux control bit-field in @reg.
* @mux_flags: flags for mux-type clock.
- * @alias: optional clock alias name to be assigned to this clock.
*/
struct samsung_mux_clock {
unsigned int id;
- const char *dev_name;
const char *name;
const char *const *parent_names;
u8 num_parents;
@@ -128,13 +126,11 @@ struct samsung_mux_clock {
u8 shift;
u8 width;
u8 mux_flags;
- const char *alias;
};
-#define __MUX(_id, dname, cname, pnames, o, s, w, f, mf, a) \
+#define __MUX(_id, cname, pnames, o, s, w, f, mf) \
{ \
.id = _id, \
- .dev_name = dname, \
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
@@ -143,36 +139,26 @@ struct samsung_mux_clock {
.shift = s, \
.width = w, \
.mux_flags = mf, \
- .alias = a, \
}
#define MUX(_id, cname, pnames, o, s, w) \
- __MUX(_id, NULL, cname, pnames, o, s, w, 0, 0, NULL)
-
-#define MUX_A(_id, cname, pnames, o, s, w, a) \
- __MUX(_id, NULL, cname, pnames, o, s, w, 0, 0, a)
+ __MUX(_id, cname, pnames, o, s, w, 0, 0)
#define MUX_F(_id, cname, pnames, o, s, w, f, mf) \
- __MUX(_id, NULL, cname, pnames, o, s, w, f, mf, NULL)
-
-#define MUX_FA(_id, cname, pnames, o, s, w, f, mf, a) \
- __MUX(_id, NULL, cname, pnames, o, s, w, f, mf, a)
+ __MUX(_id, cname, pnames, o, s, w, f, mf)
/**
* @id: platform specific id of the clock.
* struct samsung_div_clock: information about div clock
- * @dev_name: name of the device to which this clock belongs.
* @name: name of this div clock.
* @parent_name: name of the parent clock.
* @flags: optional flags for basic clock.
* @offset: offset of the register for configuring the div.
* @shift: starting bit location of the div control bit-field in @reg.
* @div_flags: flags for div-type clock.
- * @alias: optional clock alias name to be assigned to this clock.
*/
struct samsung_div_clock {
unsigned int id;
- const char *dev_name;
const char *name;
const char *parent_name;
unsigned long flags;
@@ -180,14 +166,12 @@ struct samsung_div_clock {
u8 shift;
u8 width;
u8 div_flags;
- const char *alias;
struct clk_div_table *table;
};
-#define __DIV(_id, dname, cname, pname, o, s, w, f, df, a, t) \
+#define __DIV(_id, cname, pname, o, s, w, f, df, t) \
{ \
.id = _id, \
- .dev_name = dname, \
.name = cname, \
.parent_name = pname, \
.flags = f, \
@@ -195,70 +179,51 @@ struct samsung_div_clock {
.shift = s, \
.width = w, \
.div_flags = df, \
- .alias = a, \
.table = t, \
}
#define DIV(_id, cname, pname, o, s, w) \
- __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, NULL, NULL)
-
-#define DIV_A(_id, cname, pname, o, s, w, a) \
- __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, a, NULL)
+ __DIV(_id, cname, pname, o, s, w, 0, 0, NULL)
#define DIV_F(_id, cname, pname, o, s, w, f, df) \
- __DIV(_id, NULL, cname, pname, o, s, w, f, df, NULL, NULL)
+ __DIV(_id, cname, pname, o, s, w, f, df, NULL)
#define DIV_T(_id, cname, pname, o, s, w, t) \
- __DIV(_id, NULL, cname, pname, o, s, w, 0, 0, NULL, t)
+ __DIV(_id, cname, pname, o, s, w, 0, 0, t)
/**
* struct samsung_gate_clock: information about gate clock
* @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
* @name: name of this gate clock.
* @parent_name: name of the parent clock.
* @flags: optional flags for basic clock.
* @offset: offset of the register for configuring the gate.
* @bit_idx: bit index of the gate control bit-field in @reg.
* @gate_flags: flags for gate-type clock.
- * @alias: optional clock alias name to be assigned to this clock.
*/
struct samsung_gate_clock {
unsigned int id;
- const char *dev_name;
const char *name;
const char *parent_name;
unsigned long flags;
unsigned long offset;
u8 bit_idx;
u8 gate_flags;
- const char *alias;
};
-#define __GATE(_id, dname, cname, pname, o, b, f, gf, a) \
+#define __GATE(_id, cname, pname, o, b, f, gf) \
{ \
.id = _id, \
- .dev_name = dname, \
.name = cname, \
.parent_name = pname, \
.flags = f, \
.offset = o, \
.bit_idx = b, \
.gate_flags = gf, \
- .alias = a, \
}
#define GATE(_id, cname, pname, o, b, f, gf) \
- __GATE(_id, NULL, cname, pname, o, b, f, gf, NULL)
-
-#define GATE_A(_id, cname, pname, o, b, f, gf, a) \
- __GATE(_id, NULL, cname, pname, o, b, f, gf, a)
-
-#define GATE_D(_id, dname, cname, pname, o, b, f, gf) \
- __GATE(_id, dname, cname, pname, o, b, f, gf, NULL)
-
-#define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a) \
- __GATE(_id, dname, cname, pname, o, b, f, gf, a)
+ __GATE(_id, cname, pname, o, b, f, gf)
#define PNAME(x) static const char * const x[] __initconst
@@ -275,18 +240,15 @@ struct samsung_clk_reg_dump {
/**
* struct samsung_pll_clock: information about pll clock
* @id: platform specific id of the clock.
- * @dev_name: name of the device to which this clock belongs.
* @name: name of this pll clock.
* @parent_name: name of the parent clock.
* @flags: optional flags for basic clock.
* @con_offset: offset of the register for configuring the PLL.
* @lock_offset: offset of the register for locking the PLL.
* @type: Type of PLL to be registered.
- * @alias: optional clock alias name to be assigned to this clock.
*/
struct samsung_pll_clock {
unsigned int id;
- const char *dev_name;
const char *name;
const char *parent_name;
unsigned long flags;
@@ -294,31 +256,23 @@ struct samsung_pll_clock {
int lock_offset;
enum samsung_pll_type type;
const struct samsung_pll_rate_table *rate_table;
- const char *alias;
};
-#define __PLL(_typ, _id, _dname, _name, _pname, _flags, _lock, _con, \
- _rtable, _alias) \
+#define __PLL(_typ, _id, _name, _pname, _flags, _lock, _con, _rtable) \
{ \
.id = _id, \
.type = _typ, \
- .dev_name = _dname, \
.name = _name, \
.parent_name = _pname, \
- .flags = CLK_GET_RATE_NOCACHE, \
+ .flags = _flags, \
.con_offset = _con, \
.lock_offset = _lock, \
.rate_table = _rtable, \
- .alias = _alias, \
}
#define PLL(_typ, _id, _name, _pname, _lock, _con, _rtable) \
- __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
- _lock, _con, _rtable, _name)
-
-#define PLL_A(_typ, _id, _name, _pname, _lock, _con, _alias, _rtable) \
- __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
- _lock, _con, _rtable, _alias)
+ __PLL(_typ, _id, _name, _pname, CLK_GET_RATE_NOCACHE, _lock, \
+ _con, _rtable)
struct samsung_clock_reg_cache {
struct list_head node;
@@ -352,6 +306,12 @@ struct samsung_cmu_info {
/* list and number of clocks registers */
const unsigned long *clk_regs;
unsigned int nr_clk_regs;
+
+ /* list and number of clocks registers to set before suspend */
+ const struct samsung_clk_reg_dump *suspend_regs;
+ unsigned int nr_suspend_regs;
+ /* name of the parent clock needed for CMU register access */
+ const char *clk_name;
};
extern struct samsung_clk_provider *__init samsung_clk_init(
diff --git a/drivers/clk/sirf/atlas6.h b/drivers/clk/sirf/atlas6.h
index 376217f3bf8f..cb871e30a175 100644
--- a/drivers/clk/sirf/atlas6.h
+++ b/drivers/clk/sirf/atlas6.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define SIRFSOC_CLKC_CLK_EN0 0x0000
#define SIRFSOC_CLKC_CLK_EN1 0x0004
#define SIRFSOC_CLKC_REF_CFG 0x0020
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
index 665fa681b2e1..0cd11e6893af 100644
--- a/drivers/clk/sirf/clk-atlas6.c
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -42,7 +42,7 @@ static struct clk_dmn clk_mmc45 = {
},
};
-static struct clk_init_data clk_nand_init = {
+static const struct clk_init_data clk_nand_init = {
.name = "nand",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index d0c6c9a2d06a..be012b4bab46 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -392,7 +392,7 @@ static const char * const pll_clk_parents[] = {
"xin",
};
-static struct clk_init_data clk_cpupll_init = {
+static const struct clk_init_data clk_cpupll_init = {
.name = "cpupll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -406,7 +406,7 @@ static struct clk_pll clk_cpupll = {
},
};
-static struct clk_init_data clk_mempll_init = {
+static const struct clk_init_data clk_mempll_init = {
.name = "mempll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -420,7 +420,7 @@ static struct clk_pll clk_mempll = {
},
};
-static struct clk_init_data clk_sys0pll_init = {
+static const struct clk_init_data clk_sys0pll_init = {
.name = "sys0pll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -434,7 +434,7 @@ static struct clk_pll clk_sys0pll = {
},
};
-static struct clk_init_data clk_sys1pll_init = {
+static const struct clk_init_data clk_sys1pll_init = {
.name = "sys1pll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -448,7 +448,7 @@ static struct clk_pll clk_sys1pll = {
},
};
-static struct clk_init_data clk_sys2pll_init = {
+static const struct clk_init_data clk_sys2pll_init = {
.name = "sys2pll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -462,7 +462,7 @@ static struct clk_pll clk_sys2pll = {
},
};
-static struct clk_init_data clk_sys3pll_init = {
+static const struct clk_init_data clk_sys3pll_init = {
.name = "sys3pll_vco",
.ops = &ab_pll_ops,
.parent_names = pll_clk_parents,
@@ -596,7 +596,7 @@ static const char * const audiodto_clk_parents[] = {
"sys3pll_clk1",
};
-static struct clk_init_data clk_audiodto_init = {
+static const struct clk_init_data clk_audiodto_init = {
.name = "audio_dto",
.ops = &dto_ops,
.parent_names = audiodto_clk_parents,
@@ -617,7 +617,7 @@ static const char * const disp0dto_clk_parents[] = {
"sys3pll_clk1",
};
-static struct clk_init_data clk_disp0dto_init = {
+static const struct clk_init_data clk_disp0dto_init = {
.name = "disp0_dto",
.ops = &dto_ops,
.parent_names = disp0dto_clk_parents,
@@ -638,7 +638,7 @@ static const char * const disp1dto_clk_parents[] = {
"sys3pll_clk1",
};
-static struct clk_init_data clk_disp1dto_init = {
+static const struct clk_init_data clk_disp1dto_init = {
.name = "disp1_dto",
.ops = &dto_ops,
.parent_names = disp1dto_clk_parents,
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
index 77e1e2491689..d8f9efa5129a 100644
--- a/drivers/clk/sirf/clk-common.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -184,7 +184,7 @@ static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
return clk_hw_get_rate(parent_clk);
}
-static struct clk_ops std_pll_ops = {
+static const struct clk_ops std_pll_ops = {
.recalc_rate = pll_clk_recalc_rate,
.round_rate = pll_clk_round_rate,
.set_rate = pll_clk_set_rate,
@@ -194,21 +194,21 @@ static const char * const pll_clk_parents[] = {
"osc",
};
-static struct clk_init_data clk_pll1_init = {
+static const struct clk_init_data clk_pll1_init = {
.name = "pll1",
.ops = &std_pll_ops,
.parent_names = pll_clk_parents,
.num_parents = ARRAY_SIZE(pll_clk_parents),
};
-static struct clk_init_data clk_pll2_init = {
+static const struct clk_init_data clk_pll2_init = {
.name = "pll2",
.ops = &std_pll_ops,
.parent_names = pll_clk_parents,
.num_parents = ARRAY_SIZE(pll_clk_parents),
};
-static struct clk_init_data clk_pll3_init = {
+static const struct clk_init_data clk_pll3_init = {
.name = "pll3",
.ops = &std_pll_ops,
.parent_names = pll_clk_parents,
@@ -265,13 +265,13 @@ static unsigned long usb_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long pa
return (reg & SIRFSOC_USBPHY_PLL_BYPASS) ? parent_rate : 48*MHZ;
}
-static struct clk_ops usb_pll_ops = {
+static const struct clk_ops usb_pll_ops = {
.enable = usb_pll_clk_enable,
.disable = usb_pll_clk_disable,
.recalc_rate = usb_pll_clk_recalc_rate,
};
-static struct clk_init_data clk_usb_pll_init = {
+static const struct clk_init_data clk_usb_pll_init = {
.name = "usb_pll",
.ops = &usb_pll_ops,
.parent_names = pll_clk_parents,
@@ -437,7 +437,7 @@ static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return ret2 ? ret2 : ret1;
}
-static struct clk_ops msi_ops = {
+static const struct clk_ops msi_ops = {
.set_rate = dmn_clk_set_rate,
.round_rate = dmn_clk_round_rate,
.recalc_rate = dmn_clk_recalc_rate,
@@ -445,7 +445,7 @@ static struct clk_ops msi_ops = {
.get_parent = dmn_clk_get_parent,
};
-static struct clk_init_data clk_mem_init = {
+static const struct clk_init_data clk_mem_init = {
.name = "mem",
.ops = &msi_ops,
.parent_names = dmn_clk_parents,
@@ -459,7 +459,7 @@ static struct clk_dmn clk_mem = {
},
};
-static struct clk_init_data clk_sys_init = {
+static const struct clk_init_data clk_sys_init = {
.name = "sys",
.ops = &msi_ops,
.parent_names = dmn_clk_parents,
@@ -474,7 +474,7 @@ static struct clk_dmn clk_sys = {
},
};
-static struct clk_init_data clk_io_init = {
+static const struct clk_init_data clk_io_init = {
.name = "io",
.ops = &msi_ops,
.parent_names = dmn_clk_parents,
@@ -488,7 +488,7 @@ static struct clk_dmn clk_io = {
},
};
-static struct clk_ops cpu_ops = {
+static const struct clk_ops cpu_ops = {
.set_parent = dmn_clk_set_parent,
.get_parent = dmn_clk_get_parent,
.set_rate = cpu_clk_set_rate,
@@ -496,7 +496,7 @@ static struct clk_ops cpu_ops = {
.recalc_rate = cpu_clk_recalc_rate,
};
-static struct clk_init_data clk_cpu_init = {
+static const struct clk_init_data clk_cpu_init = {
.name = "cpu",
.ops = &cpu_ops,
.parent_names = dmn_clk_parents,
@@ -511,7 +511,7 @@ static struct clk_dmn clk_cpu = {
},
};
-static struct clk_ops dmn_ops = {
+static const struct clk_ops dmn_ops = {
.is_enabled = std_clk_is_enabled,
.enable = std_clk_enable,
.disable = std_clk_disable,
@@ -524,7 +524,7 @@ static struct clk_ops dmn_ops = {
/* dsp, gfx, mm, lcd and vpp domain */
-static struct clk_init_data clk_dsp_init = {
+static const struct clk_init_data clk_dsp_init = {
.name = "dsp",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -539,7 +539,7 @@ static struct clk_dmn clk_dsp = {
},
};
-static struct clk_init_data clk_gfx_init = {
+static const struct clk_init_data clk_gfx_init = {
.name = "gfx",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -554,7 +554,7 @@ static struct clk_dmn clk_gfx = {
},
};
-static struct clk_init_data clk_mm_init = {
+static const struct clk_init_data clk_mm_init = {
.name = "mm",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -574,7 +574,7 @@ static struct clk_dmn clk_mm = {
*/
#define clk_gfx2d clk_mm
-static struct clk_init_data clk_lcd_init = {
+static const struct clk_init_data clk_lcd_init = {
.name = "lcd",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -589,7 +589,7 @@ static struct clk_dmn clk_lcd = {
},
};
-static struct clk_init_data clk_vpp_init = {
+static const struct clk_init_data clk_vpp_init = {
.name = "vpp",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -604,21 +604,21 @@ static struct clk_dmn clk_vpp = {
},
};
-static struct clk_init_data clk_mmc01_init = {
+static const struct clk_init_data clk_mmc01_init = {
.name = "mmc01",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_init_data clk_mmc23_init = {
+static const struct clk_init_data clk_mmc23_init = {
.name = "mmc23",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
.num_parents = ARRAY_SIZE(dmn_clk_parents),
};
-static struct clk_init_data clk_mmc45_init = {
+static const struct clk_init_data clk_mmc45_init = {
.name = "mmc45",
.ops = &dmn_ops,
.parent_names = dmn_clk_parents,
@@ -679,13 +679,13 @@ static const char * const std_clk_io_parents[] = {
"io",
};
-static struct clk_ops ios_ops = {
+static const struct clk_ops ios_ops = {
.is_enabled = std_clk_is_enabled,
.enable = std_clk_enable,
.disable = std_clk_disable,
};
-static struct clk_init_data clk_cphif_init = {
+static const struct clk_init_data clk_cphif_init = {
.name = "cphif",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -699,7 +699,7 @@ static struct clk_std clk_cphif = {
},
};
-static struct clk_init_data clk_dmac0_init = {
+static const struct clk_init_data clk_dmac0_init = {
.name = "dmac0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -713,7 +713,7 @@ static struct clk_std clk_dmac0 = {
},
};
-static struct clk_init_data clk_dmac1_init = {
+static const struct clk_init_data clk_dmac1_init = {
.name = "dmac1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -727,7 +727,7 @@ static struct clk_std clk_dmac1 = {
},
};
-static struct clk_init_data clk_audio_init = {
+static const struct clk_init_data clk_audio_init = {
.name = "audio",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -741,7 +741,7 @@ static struct clk_std clk_audio = {
},
};
-static struct clk_init_data clk_uart0_init = {
+static const struct clk_init_data clk_uart0_init = {
.name = "uart0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -755,7 +755,7 @@ static struct clk_std clk_uart0 = {
},
};
-static struct clk_init_data clk_uart1_init = {
+static const struct clk_init_data clk_uart1_init = {
.name = "uart1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -769,7 +769,7 @@ static struct clk_std clk_uart1 = {
},
};
-static struct clk_init_data clk_uart2_init = {
+static const struct clk_init_data clk_uart2_init = {
.name = "uart2",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -783,7 +783,7 @@ static struct clk_std clk_uart2 = {
},
};
-static struct clk_init_data clk_usp0_init = {
+static const struct clk_init_data clk_usp0_init = {
.name = "usp0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -797,7 +797,7 @@ static struct clk_std clk_usp0 = {
},
};
-static struct clk_init_data clk_usp1_init = {
+static const struct clk_init_data clk_usp1_init = {
.name = "usp1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -811,7 +811,7 @@ static struct clk_std clk_usp1 = {
},
};
-static struct clk_init_data clk_usp2_init = {
+static const struct clk_init_data clk_usp2_init = {
.name = "usp2",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -825,7 +825,7 @@ static struct clk_std clk_usp2 = {
},
};
-static struct clk_init_data clk_vip_init = {
+static const struct clk_init_data clk_vip_init = {
.name = "vip",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -839,7 +839,7 @@ static struct clk_std clk_vip = {
},
};
-static struct clk_init_data clk_spi0_init = {
+static const struct clk_init_data clk_spi0_init = {
.name = "spi0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -853,7 +853,7 @@ static struct clk_std clk_spi0 = {
},
};
-static struct clk_init_data clk_spi1_init = {
+static const struct clk_init_data clk_spi1_init = {
.name = "spi1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -867,7 +867,7 @@ static struct clk_std clk_spi1 = {
},
};
-static struct clk_init_data clk_tsc_init = {
+static const struct clk_init_data clk_tsc_init = {
.name = "tsc",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -881,7 +881,7 @@ static struct clk_std clk_tsc = {
},
};
-static struct clk_init_data clk_i2c0_init = {
+static const struct clk_init_data clk_i2c0_init = {
.name = "i2c0",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -895,7 +895,7 @@ static struct clk_std clk_i2c0 = {
},
};
-static struct clk_init_data clk_i2c1_init = {
+static const struct clk_init_data clk_i2c1_init = {
.name = "i2c1",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -909,7 +909,7 @@ static struct clk_std clk_i2c1 = {
},
};
-static struct clk_init_data clk_pwmc_init = {
+static const struct clk_init_data clk_pwmc_init = {
.name = "pwmc",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -923,7 +923,7 @@ static struct clk_std clk_pwmc = {
},
};
-static struct clk_init_data clk_efuse_init = {
+static const struct clk_init_data clk_efuse_init = {
.name = "efuse",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -937,7 +937,7 @@ static struct clk_std clk_efuse = {
},
};
-static struct clk_init_data clk_pulse_init = {
+static const struct clk_init_data clk_pulse_init = {
.name = "pulse",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -955,7 +955,7 @@ static const char * const std_clk_dsp_parents[] = {
"dsp",
};
-static struct clk_init_data clk_gps_init = {
+static const struct clk_init_data clk_gps_init = {
.name = "gps",
.ops = &ios_ops,
.parent_names = std_clk_dsp_parents,
@@ -969,7 +969,7 @@ static struct clk_std clk_gps = {
},
};
-static struct clk_init_data clk_mf_init = {
+static const struct clk_init_data clk_mf_init = {
.name = "mf",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
@@ -987,7 +987,7 @@ static const char * const std_clk_sys_parents[] = {
"sys",
};
-static struct clk_init_data clk_security_init = {
+static const struct clk_init_data clk_security_init = {
.name = "security",
.ops = &ios_ops,
.parent_names = std_clk_sys_parents,
@@ -1005,7 +1005,7 @@ static const char * const std_clk_usb_parents[] = {
"usb_pll",
};
-static struct clk_init_data clk_usb0_init = {
+static const struct clk_init_data clk_usb0_init = {
.name = "usb0",
.ops = &ios_ops,
.parent_names = std_clk_usb_parents,
@@ -1019,7 +1019,7 @@ static struct clk_std clk_usb0 = {
},
};
-static struct clk_init_data clk_usb1_init = {
+static const struct clk_init_data clk_usb1_init = {
.name = "usb1",
.ops = &ios_ops,
.parent_names = std_clk_usb_parents,
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
index aac1c8ec151a..2f824320c318 100644
--- a/drivers/clk/sirf/clk-prima2.c
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -42,7 +42,7 @@ static struct clk_dmn clk_mmc45 = {
},
};
-static struct clk_init_data clk_nand_init = {
+static const struct clk_init_data clk_nand_init = {
.name = "nand",
.ops = &ios_ops,
.parent_names = std_clk_io_parents,
diff --git a/drivers/clk/sirf/prima2.h b/drivers/clk/sirf/prima2.h
index 01bc3854a058..2fb56941795d 100644
--- a/drivers/clk/sirf/prima2.h
+++ b/drivers/clk/sirf/prima2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define SIRFSOC_CLKC_CLK_EN0 0x0000
#define SIRFSOC_CLKC_CLK_EN1 0x0004
#define SIRFSOC_CLKC_REF_CFG 0x0014
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index d8bb239753a4..9146c20fe21f 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += clk.o
obj-y += clk-gate.o
obj-y += clk-pll.o
diff --git a/drivers/clk/spear/Makefile b/drivers/clk/spear/Makefile
index cdb425d3b8ee..d7e75d2d42df 100644
--- a/drivers/clk/spear/Makefile
+++ b/drivers/clk/spear/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# SPEAr Clock specific Makefile
#
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index f271c350ef94..906410413bc1 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -29,7 +29,7 @@
#define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
-static struct aux_clk_masks default_aux_masks = {
+static const struct aux_clk_masks default_aux_masks = {
.eq_sel_mask = AUX_EQ_SEL_MASK,
.eq_sel_shift = AUX_EQ_SEL_SHIFT,
.eq1_mask = AUX_EQ1_SEL,
@@ -128,7 +128,7 @@ static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_aux_ops = {
+static const struct clk_ops clk_aux_ops = {
.recalc_rate = clk_aux_recalc_rate,
.round_rate = clk_aux_round_rate,
.set_rate = clk_aux_set_rate,
@@ -136,7 +136,7 @@ static struct clk_ops clk_aux_ops = {
struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
const char *parent_name, unsigned long flags, void __iomem *reg,
- struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ const struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
{
struct clk_aux *aux;
@@ -149,10 +149,8 @@ struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
}
aux = kzalloc(sizeof(*aux), GFP_KERNEL);
- if (!aux) {
- pr_err("could not allocate aux clk\n");
+ if (!aux)
return ERR_PTR(-ENOMEM);
- }
/* struct clk_aux assignments */
if (!masks)
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 58d678b5b40a..229c96daece6 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -116,7 +116,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_frac_ops = {
+static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
.round_rate = clk_frac_round_rate,
.set_rate = clk_frac_set_rate,
@@ -136,10 +136,8 @@ struct clk *clk_register_frac(const char *name, const char *parent_name,
}
frac = kzalloc(sizeof(*frac), GFP_KERNEL);
- if (!frac) {
- pr_err("could not allocate frac clk\n");
+ if (!frac)
return ERR_PTR(-ENOMEM);
- }
/* struct clk_frac assignments */
frac->reg = reg;
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 1a722e99e76e..28262f422562 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -105,7 +105,7 @@ static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_gpt_ops = {
+static const struct clk_ops clk_gpt_ops = {
.recalc_rate = clk_gpt_recalc_rate,
.round_rate = clk_gpt_round_rate,
.set_rate = clk_gpt_set_rate,
@@ -125,10 +125,8 @@ struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
}
gpt = kzalloc(sizeof(*gpt), GFP_KERNEL);
- if (!gpt) {
- pr_err("could not allocate gpt clk\n");
+ if (!gpt)
return ERR_PTR(-ENOMEM);
- }
/* struct clk_gpt assignments */
gpt->reg = reg;
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index dc21ca4601aa..c08dec30bfa6 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -165,7 +165,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_pll_ops = {
+static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_round_rate,
.set_rate = clk_pll_set_rate,
@@ -266,7 +266,7 @@ static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
return 0;
}
-static struct clk_ops clk_vco_ops = {
+static const struct clk_ops clk_vco_ops = {
.recalc_rate = clk_vco_recalc_rate,
.round_rate = clk_vco_round_rate,
.set_rate = clk_vco_set_rate,
@@ -292,16 +292,12 @@ struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
}
vco = kzalloc(sizeof(*vco), GFP_KERNEL);
- if (!vco) {
- pr_err("could not allocate vco clk\n");
+ if (!vco)
return ERR_PTR(-ENOMEM);
- }
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
- if (!pll) {
- pr_err("could not allocate pll clk\n");
+ if (!pll)
goto free_vco;
- }
/* struct clk_vco assignments */
vco->mode_reg = mode_reg;
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 9834944f08b1..af0e25f496c1 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -49,7 +49,7 @@ struct aux_rate_tbl {
struct clk_aux {
struct clk_hw hw;
void __iomem *reg;
- struct aux_clk_masks *masks;
+ const struct aux_clk_masks *masks;
struct aux_rate_tbl *rtbl;
u8 rtbl_cnt;
spinlock_t *lock;
@@ -112,7 +112,7 @@ typedef unsigned long (*clk_calc_rate)(struct clk_hw *hw, unsigned long prate,
/* clk register routines */
struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
const char *parent_name, unsigned long flags, void __iomem *reg,
- struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ const struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
struct clk *clk_register_frac(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg,
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 2f86e3f94efa..591248c9a88e 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -284,7 +284,7 @@ static struct frac_rate_tbl clcd_rtbl[] = {
};
/* i2s prescaler1 masks */
-static struct aux_clk_masks i2s_prs1_masks = {
+static const struct aux_clk_masks i2s_prs1_masks = {
.eq_sel_mask = AUX_EQ_SEL_MASK,
.eq_sel_shift = SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT,
.eq1_mask = AUX_EQ1_SEL,
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index cbb19a90f2d6..e5bc8c828cf0 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -323,7 +323,7 @@ static struct frac_rate_tbl clcd_rtbl[] = {
};
/* i2s prescaler1 masks */
-static struct aux_clk_masks i2s_prs1_masks = {
+static const struct aux_clk_masks i2s_prs1_masks = {
.eq_sel_mask = AUX_EQ_SEL_MASK,
.eq_sel_shift = SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT,
.eq1_mask = AUX_EQ1_SEL,
diff --git a/drivers/clk/st/clkgen.h b/drivers/clk/st/clkgen.h
index f7ec2d9139d6..44302fc7ca96 100644
--- a/drivers/clk/st/clkgen.h
+++ b/drivers/clk/st/clkgen.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/************************************************************************
File : Clock H/w specific Information
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 85a0633c1eac..4141c3fe08ae 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Common objects
lib-$(CONFIG_SUNXI_CCU) += ccu_common.o
lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o
@@ -10,6 +11,7 @@ lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o
lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o
lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o
lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o
# Multi-factor clocks
lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index 286b0049b7b6..ffa5dac221e4 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -28,6 +28,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun4i-a10.h"
@@ -51,16 +52,29 @@ static struct ccu_nkmp pll_core_clk = {
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names.
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN4I_PLL_AUDIO_REG 0x008
+
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
static struct ccu_nm pll_audio_base_clk = {
.enable = BIT(31),
.n = _SUNXI_CCU_MULT_OFFSET(8, 7, 0),
.m = _SUNXI_CCU_DIV_OFFSET(0, 5, 0),
+ .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table, 0,
+ 0x00c, BIT(31)),
.common = {
.reg = 0x008,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
.hw.init = CLK_HW_INIT("pll-audio-base",
"hosc",
&ccu_nm_ops,
@@ -223,7 +237,7 @@ static struct ccu_mux cpu_clk = {
.hw.init = CLK_HW_INIT_PARENTS("cpu",
cpu_parents,
&ccu_mux_ops,
- CLK_IS_CRITICAL),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
}
};
@@ -1021,9 +1035,9 @@ static struct ccu_common *sun4i_sun7i_ccu_clks[] = {
&out_b_clk.common
};
-/* Post-divider for pll-audio is hardcoded to 4 */
+/* Post-divider for pll-audio is hardcoded to 1 */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -1420,10 +1434,10 @@ static void __init sun4i_ccu_init(struct device_node *node,
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN4I_PLL_AUDIO_REG);
val &= ~GENMASK(29, 26);
- writel(val | (4 << 26), reg + SUN4I_PLL_AUDIO_REG);
+ writel(val | (1 << 26), reg + SUN4I_PLL_AUDIO_REG);
/*
* Use the peripheral PLL6 as the AHB parent, instead of CPU /
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.h b/drivers/clk/sunxi-ng/ccu-sun4i-a10.h
index c5947c7c050e..23c908ad509f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.h
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.h
@@ -29,7 +29,7 @@
#define CLK_PLL_AUDIO_4X 6
#define CLK_PLL_AUDIO_8X 7
#define CLK_PLL_VIDEO0 8
-#define CLK_PLL_VIDEO0_2X 9
+/* The PLL_VIDEO0_2X clock is exported */
#define CLK_PLL_VE 10
#define CLK_PLL_DDR_BASE 11
#define CLK_PLL_DDR 12
@@ -38,7 +38,7 @@
#define CLK_PLL_PERIPH 15
#define CLK_PLL_PERIPH_SATA 16
#define CLK_PLL_VIDEO1 17
-#define CLK_PLL_VIDEO1_2X 18
+/* The PLL_VIDEO1_2X clock is exported */
#define CLK_PLL_GPU 19
/* The CPU clock is exported */
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index ab9e850b3707..fa2c2dd77102 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -26,6 +26,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun5i.h"
@@ -49,11 +50,20 @@ static struct ccu_nkmp pll_core_clk = {
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN5I_PLL_AUDIO_REG 0x008
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
static struct ccu_nm pll_audio_base_clk = {
.enable = BIT(31),
.n = _SUNXI_CCU_MULT_OFFSET(8, 7, 0),
@@ -63,8 +73,11 @@ static struct ccu_nm pll_audio_base_clk = {
* offset
*/
.m = _SUNXI_CCU_DIV_OFFSET(0, 5, 0),
+ .sdm = _SUNXI_CCU_SDM(pll_audio_sdm_table, 0,
+ 0x00c, BIT(31)),
.common = {
.reg = 0x008,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
.hw.init = CLK_HW_INIT("pll-audio-base",
"hosc",
&ccu_nm_ops,
@@ -597,9 +610,9 @@ static struct ccu_common *sun5i_a10s_ccu_clks[] = {
&iep_clk.common,
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -980,10 +993,10 @@ static void __init sun5i_ccu_init(struct device_node *node,
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN5I_PLL_AUDIO_REG);
- val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN5I_PLL_AUDIO_REG);
+ val &= ~GENMASK(29, 26);
+ writel(val | (0 << 26), reg + SUN5I_PLL_AUDIO_REG);
/*
* Use the peripheral PLL as the AHB parent, instead of CPU /
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 8af434815fba..72b16ed1012b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -31,6 +31,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun6i-a31.h"
@@ -48,18 +49,29 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_cpu_clk, "pll-cpu",
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN6I_A31_PLL_AUDIO_REG 0x008
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
- "osc24M", 0x008,
- 8, 7, /* N */
- 0, 5, /* M */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ pll_audio_sdm_table, BIT(24),
+ 0x284, BIT(31),
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video0_clk, "pll-video0",
"osc24M", 0x010,
@@ -608,7 +620,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents,
0x150, 0, 4, 24, 2, BIT(31),
CLK_SET_RATE_PARENT);
-static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0);
+static SUNXI_CCU_GATE(hdmi_ddc_clk, "ddc", "osc24M", 0x150, BIT(30), 0);
static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0);
@@ -950,9 +962,9 @@ static struct ccu_common *sun6i_a31_ccu_clks[] = {
&out_c_clk.common,
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -1221,10 +1233,10 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN6I_A31_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN6I_A31_PLL_AUDIO_REG);
+ writel(val | (0 << 16), reg + SUN6I_A31_PLL_AUDIO_REG);
/* Force PLL-MIPI to MIPI mode */
val = readl(reg + SUN6I_A31_PLL_MIPI_REG);
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.h b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
index 4e434011e9e7..27e6ad4133ab 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.h
@@ -27,7 +27,9 @@
#define CLK_PLL_AUDIO_4X 4
#define CLK_PLL_AUDIO_8X 5
#define CLK_PLL_VIDEO0 6
-#define CLK_PLL_VIDEO0_2X 7
+
+/* The PLL_VIDEO0_2X clock is exported */
+
#define CLK_PLL_VE 8
#define CLK_PLL_DDR 9
@@ -35,7 +37,9 @@
#define CLK_PLL_PERIPH_2X 11
#define CLK_PLL_VIDEO1 12
-#define CLK_PLL_VIDEO1_2X 13
+
+/* The PLL_VIDEO1_2X clock is exported */
+
#define CLK_PLL_GPU 14
#define CLK_PLL_MIPI 15
#define CLK_PLL9 16
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index d93b452f0df9..a4fa2945f230 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -26,6 +26,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun8i-a23-a33.h"
@@ -52,18 +53,29 @@ static struct ccu_nkmp pll_cpux_clk = {
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN8I_A23_PLL_AUDIO_REG 0x008
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
- "osc24M", 0x008,
- 8, 7, /* N */
- 0, 5, /* M */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ pll_audio_sdm_table, BIT(24),
+ 0x284, BIT(31),
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
"osc24M", 0x010,
@@ -538,9 +550,9 @@ static struct ccu_common *sun8i_a23_ccu_clks[] = {
&ats_clk.common,
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -720,10 +732,10 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node)
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_A23_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN8I_A23_PLL_AUDIO_REG);
+ writel(val | (0 << 16), reg + SUN8I_A23_PLL_AUDIO_REG);
/* Force PLL-MIPI to MIPI mode */
val = readl(reg + SUN8I_A23_PLL_MIPI_REG);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index e43acebdfbcd..5cedcd0d8be8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -354,9 +354,9 @@ static SUNXI_CCU_GATE(bus_tdm_clk, "bus-tdm", "apb1",
static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
0x06c, BIT(0), 0);
static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
- 0x06c, BIT(0), 0);
+ 0x06c, BIT(1), 0);
static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
- 0x06c, BIT(0), 0);
+ 0x06c, BIT(2), 0);
static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
0x06c, BIT(16), 0);
static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
@@ -506,7 +506,7 @@ static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_mclk_clk, "csi-mclk",
csi_mclk_parents, csi_mclk_table,
0x134,
0, 5, /* M */
- 10, 3, /* mux */
+ 8, 3, /* mux */
BIT(15), /* gate */
0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 5cdaf52669e4..5cc9d9952121 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -41,11 +41,16 @@ static SUNXI_CCU_GATE(wb_clk, "wb", "wb-div",
static SUNXI_CCU_M(mixer0_div_clk, "mixer0-div", "de", 0x0c, 0, 4,
CLK_SET_RATE_PARENT);
-static SUNXI_CCU_M(mixer1_div_clk, "mixer1-div", "de", 0x0c, 4, 4,
- CLK_SET_RATE_PARENT);
static SUNXI_CCU_M(wb_div_clk, "wb-div", "de", 0x0c, 8, 4,
CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(mixer0_div_a83_clk, "mixer0-div", "pll-de", 0x0c, 0, 4,
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(mixer1_div_a83_clk, "mixer1-div", "pll-de", 0x0c, 4, 4,
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4,
+ CLK_SET_RATE_PARENT);
+
static struct ccu_common *sun8i_a83t_de2_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
@@ -55,9 +60,9 @@ static struct ccu_common *sun8i_a83t_de2_clks[] = {
&bus_mixer1_clk.common,
&bus_wb_clk.common,
- &mixer0_div_clk.common,
- &mixer1_div_clk.common,
- &wb_div_clk.common,
+ &mixer0_div_a83_clk.common,
+ &mixer1_div_a83_clk.common,
+ &wb_div_a83_clk.common,
};
static struct ccu_common *sun8i_v3s_de2_clks[] = {
@@ -81,9 +86,9 @@ static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
[CLK_BUS_MIXER1] = &bus_mixer1_clk.common.hw,
[CLK_BUS_WB] = &bus_wb_clk.common.hw,
- [CLK_MIXER0_DIV] = &mixer0_div_clk.common.hw,
- [CLK_MIXER1_DIV] = &mixer1_div_clk.common.hw,
- [CLK_WB_DIV] = &wb_div_clk.common.hw,
+ [CLK_MIXER0_DIV] = &mixer0_div_a83_clk.common.hw,
+ [CLK_MIXER1_DIV] = &mixer1_div_a83_clk.common.hw,
+ [CLK_WB_DIV] = &wb_div_a83_clk.common.hw,
},
.num = CLK_NUMBER,
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 1729ff6a5aae..29bc0566b776 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -26,6 +26,7 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
#include "ccu_phase.h"
+#include "ccu_sdm.h"
#include "ccu-sun8i-h3.h"
@@ -37,25 +38,36 @@ static SUNXI_CCU_NKMP_WITH_GATE_LOCK(pll_cpux_clk, "pll-cpux",
16, 2, /* P */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
/*
* The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN8I_H3_PLL_AUDIO_REG 0x008
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
- "osc24M", 0x008,
- 8, 7, /* N */
- 0, 5, /* M */
- BIT(31), /* gate */
- BIT(28), /* lock */
- 0);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ pll_audio_sdm_table, BIT(24),
+ 0x284, BIT(31),
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
"osc24M", 0x0010,
@@ -67,7 +79,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
"osc24M", 0x0018,
@@ -79,7 +91,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
"osc24M", 0x020,
@@ -88,7 +100,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
0, 2, /* M */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph0_clk, "pll-periph0",
"osc24M", 0x028,
@@ -97,7 +109,7 @@ static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph0_clk, "pll-periph0",
BIT(31), /* gate */
BIT(28), /* lock */
2, /* post-div */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
"osc24M", 0x0038,
@@ -109,7 +121,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1",
"osc24M", 0x044,
@@ -118,7 +130,7 @@ static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1",
BIT(31), /* gate */
BIT(28), /* lock */
2, /* post-div */
- 0);
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
"osc24M", 0x0048,
@@ -130,7 +142,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_de_clk, "pll-de",
297000000, /* frac rate 1 */
BIT(31), /* gate */
BIT(28), /* lock */
- 0);
+ CLK_SET_RATE_UNGATE);
static const char * const cpux_parents[] = { "osc32k", "osc24M",
"pll-cpux" , "pll-cpux" };
@@ -484,7 +496,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
0x15c, 0, 3, 24, 2, BIT(31), CLK_IS_CRITICAL);
static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
- 0x1a0, 0, 3, BIT(31), 0);
+ 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
static struct ccu_common *sun8i_h3_ccu_clks[] = {
&pll_cpux_clk.common,
@@ -707,9 +719,9 @@ static struct ccu_common *sun50i_h5_ccu_clks[] = {
&gpu_clk.common,
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -1129,10 +1141,10 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_H3_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
+ writel(val | (0 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
sunxi_ccu_probe(node, reg, desc);
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index cadd1a9f93b6..5d684ce77c54 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -24,6 +24,7 @@
#define CCU_FEATURE_ALL_PREDIV BIT(4)
#define CCU_FEATURE_LOCK_REG BIT(5)
#define CCU_FEATURE_MMC_TIMING_SWITCH BIT(6)
+#define CCU_FEATURE_SIGMA_DELTA_MOD BIT(7)
/* MMC timing mode switch bit */
#define CCU_MMC_NEW_TIMING_MODE BIT(30)
diff --git a/drivers/clk/sunxi-ng/ccu_mult.h b/drivers/clk/sunxi-ng/ccu_mult.h
index f9c37b987d72..6b30b0c10807 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.h
+++ b/drivers/clk/sunxi-ng/ccu_mult.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CCU_MULT_H_
#define _CCU_MULT_H_
diff --git a/drivers/clk/sunxi-ng/ccu_mux.h b/drivers/clk/sunxi-ng/ccu_mux.h
index f20c0bd62a47..f165395effb5 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.h
+++ b/drivers/clk/sunxi-ng/ccu_mux.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CCU_MUX_H_
#define _CCU_MUX_H_
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index a32158e8f2e3..7620aa973a6e 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -90,6 +90,14 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
if (!m)
m++;
+ if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm)) {
+ unsigned long rate =
+ ccu_sdm_helper_read_rate(&nm->common, &nm->sdm,
+ m, n);
+ if (rate)
+ return rate;
+ }
+
return parent_rate * n / m;
}
@@ -99,6 +107,12 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
struct ccu_nm *nm = hw_to_ccu_nm(hw);
struct _ccu_nm _nm;
+ if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate))
+ return rate;
+
+ if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate))
+ return rate;
+
_nm.min_n = nm->n.min ?: 1;
_nm.max_n = nm->n.max ?: 1 << nm->n.width;
_nm.min_m = 1;
@@ -140,7 +154,16 @@ static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
_nm.min_m = 1;
_nm.max_m = nm->m.max ?: 1 << nm->m.width;
- ccu_nm_find_best(parent_rate, rate, &_nm);
+ if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
+ ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
+
+ /* Sigma delta modulation requires specific N and M factors */
+ ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
+ &_nm.m, &_nm.n);
+ } else {
+ ccu_sdm_helper_disable(&nm->common, &nm->sdm);
+ ccu_nm_find_best(parent_rate, rate, &_nm);
+ }
spin_lock_irqsave(nm->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_nm.h b/drivers/clk/sunxi-ng/ccu_nm.h
index e87fd186da78..c623b0c7a23c 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.h
+++ b/drivers/clk/sunxi-ng/ccu_nm.h
@@ -20,6 +20,7 @@
#include "ccu_div.h"
#include "ccu_frac.h"
#include "ccu_mult.h"
+#include "ccu_sdm.h"
/*
* struct ccu_nm - Definition of an N-M clock
@@ -33,10 +34,34 @@ struct ccu_nm {
struct ccu_mult_internal n;
struct ccu_div_internal m;
struct ccu_frac_internal frac;
+ struct ccu_sdm_internal sdm;
struct ccu_common common;
};
+#define SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(_struct, _name, _parent, _reg, \
+ _nshift, _nwidth, \
+ _mshift, _mwidth, \
+ _sdm_table, _sdm_en, \
+ _sdm_reg, _sdm_reg_en, \
+ _gate, _lock, _flags) \
+ struct ccu_nm _struct = { \
+ .enable = _gate, \
+ .lock = _lock, \
+ .n = _SUNXI_CCU_MULT(_nshift, _nwidth), \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .sdm = _SUNXI_CCU_SDM(_sdm_table, _sdm_en, \
+ _sdm_reg, _sdm_reg_en),\
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD, \
+ .hw.init = CLK_HW_INIT(_name, \
+ _parent, \
+ &ccu_nm_ops, \
+ _flags), \
+ }, \
+ }
+
#define SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(_struct, _name, _parent, _reg, \
_nshift, _nwidth, \
_mshift, _mwidth, \
diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c
index 1dc4e98ea802..b67149143554 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.c
+++ b/drivers/clk/sunxi-ng/ccu_reset.c
@@ -60,8 +60,22 @@ static int ccu_reset_reset(struct reset_controller_dev *rcdev,
return 0;
}
+static int ccu_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct ccu_reset *ccu = rcdev_to_ccu_reset(rcdev);
+ const struct ccu_reset_map *map = &ccu->reset_map[id];
+
+ /*
+ * The reset control API expects 0 if reset is not asserted,
+ * which is the opposite of what our hardware uses.
+ */
+ return !(map->bit & readl(ccu->base + map->reg));
+}
+
const struct reset_control_ops ccu_reset_ops = {
.assert = ccu_reset_assert,
.deassert = ccu_reset_deassert,
.reset = ccu_reset_reset,
+ .status = ccu_reset_status,
};
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
new file mode 100644
index 000000000000..3b3dc9bdf2b0
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2017 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "ccu_sdm.h"
+
+bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm)
+{
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return false;
+
+ if (sdm->enable && !(readl(common->base + common->reg) & sdm->enable))
+ return false;
+
+ return !!(readl(common->base + sdm->tuning_reg) & sdm->tuning_enable);
+}
+
+void ccu_sdm_helper_enable(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate)
+{
+ unsigned long flags;
+ unsigned int i;
+ u32 reg;
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return;
+
+ /* Set the pattern */
+ for (i = 0; i < sdm->table_size; i++)
+ if (sdm->table[i].rate == rate)
+ writel(sdm->table[i].pattern,
+ common->base + sdm->tuning_reg);
+
+ /* Make sure SDM is enabled */
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + sdm->tuning_reg);
+ writel(reg | sdm->tuning_enable, common->base + sdm->tuning_reg);
+ spin_unlock_irqrestore(common->lock, flags);
+
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + common->reg);
+ writel(reg | sdm->enable, common->base + common->reg);
+ spin_unlock_irqrestore(common->lock, flags);
+}
+
+void ccu_sdm_helper_disable(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm)
+{
+ unsigned long flags;
+ u32 reg;
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return;
+
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + common->reg);
+ writel(reg & ~sdm->enable, common->base + common->reg);
+ spin_unlock_irqrestore(common->lock, flags);
+
+ spin_lock_irqsave(common->lock, flags);
+ reg = readl(common->base + sdm->tuning_reg);
+ writel(reg & ~sdm->tuning_enable, common->base + sdm->tuning_reg);
+ spin_unlock_irqrestore(common->lock, flags);
+}
+
+/*
+ * Sigma delta modulation provides a way to do fractional-N frequency
+ * synthesis, in essence allowing the PLL to output any frequency
+ * within its operational range. On earlier SoCs such as the A10/A20,
+ * some PLLs support this. On later SoCs, all PLLs support this.
+ *
+ * The datasheets do not explain what the "wave top" and "wave bottom"
+ * parameters mean or do, nor how to calculate the effective output
+ * frequency. The only examples (and real world usage) are for the audio
+ * PLL to generate 24.576 and 22.5792 MHz clock rates used by the audio
+ * peripherals. The author lacks the underlying domain knowledge to
+ * pursue this.
+ *
+ * The goal and function of the following code is to support the two
+ * clock rates used by the audio subsystem, allowing for proper audio
+ * playback and capture without any pitch or speed changes.
+ */
+bool ccu_sdm_helper_has_rate(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate)
+{
+ unsigned int i;
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return false;
+
+ for (i = 0; i < sdm->table_size; i++)
+ if (sdm->table[i].rate == rate)
+ return true;
+
+ return false;
+}
+
+unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ u32 m, u32 n)
+{
+ unsigned int i;
+ u32 reg;
+
+ pr_debug("%s: Read sigma-delta modulation setting\n",
+ clk_hw_get_name(&common->hw));
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return 0;
+
+ pr_debug("%s: clock is sigma-delta modulated\n",
+ clk_hw_get_name(&common->hw));
+
+ reg = readl(common->base + sdm->tuning_reg);
+
+ pr_debug("%s: pattern reg is 0x%x",
+ clk_hw_get_name(&common->hw), reg);
+
+ for (i = 0; i < sdm->table_size; i++)
+ if (sdm->table[i].pattern == reg &&
+ sdm->table[i].m == m && sdm->table[i].n == n)
+ return sdm->table[i].rate;
+
+ /* We can't calculate the effective clock rate, so just fail. */
+ return 0;
+}
+
+int ccu_sdm_helper_get_factors(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate,
+ unsigned long *m, unsigned long *n)
+{
+ unsigned int i;
+
+ if (!(common->features & CCU_FEATURE_SIGMA_DELTA_MOD))
+ return -EINVAL;
+
+ for (i = 0; i < sdm->table_size; i++)
+ if (sdm->table[i].rate == rate) {
+ *m = sdm->table[i].m;
+ *n = sdm->table[i].n;
+ return 0;
+ }
+
+ /* nothing found */
+ return -EINVAL;
+}
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.h b/drivers/clk/sunxi-ng/ccu_sdm.h
new file mode 100644
index 000000000000..2a9b4a2584d6
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_sdm.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SDM_H
+#define _CCU_SDM_H
+
+#include <linux/clk-provider.h>
+
+#include "ccu_common.h"
+
+struct ccu_sdm_setting {
+ unsigned long rate;
+
+ /*
+ * XXX We don't know what the step and bottom register fields
+ * mean. Just copy the whole register value from the vendor
+ * kernel for now.
+ */
+ u32 pattern;
+
+ /*
+ * M and N factors here should be the values used in
+ * calculation, not the raw values written to registers
+ */
+ u32 m;
+ u32 n;
+};
+
+struct ccu_sdm_internal {
+ struct ccu_sdm_setting *table;
+ u32 table_size;
+ /* early SoCs don't have the SDM enable bit in the PLL register */
+ u32 enable;
+ /* second enable bit in tuning register */
+ u32 tuning_enable;
+ u16 tuning_reg;
+};
+
+#define _SUNXI_CCU_SDM(_table, _enable, \
+ _reg, _reg_enable) \
+ { \
+ .table = _table, \
+ .table_size = ARRAY_SIZE(_table), \
+ .enable = _enable, \
+ .tuning_enable = _reg_enable, \
+ .tuning_reg = _reg, \
+ }
+
+bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm);
+void ccu_sdm_helper_enable(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate);
+void ccu_sdm_helper_disable(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm);
+
+bool ccu_sdm_helper_has_rate(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate);
+
+unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ u32 m, u32 n);
+
+int ccu_sdm_helper_get_factors(struct ccu_common *common,
+ struct ccu_sdm_internal *sdm,
+ unsigned long rate,
+ unsigned long *m, unsigned long *n);
+
+#endif
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index 39d2044a1f49..be88368b48a1 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for sunxi specific clk
#
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index dfe5e3e32d28..856fef65433b 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -276,13 +276,11 @@ void sunxi_factors_unregister(struct device_node *node, struct clk *clk)
{
struct clk_hw *hw = __clk_get_hw(clk);
struct clk_factors *factors;
- const char *name;
if (!hw)
return;
factors = to_clk_factors(hw);
- name = clk_hw_get_name(hw);
of_clk_del_provider(node);
/* TODO: The composite clock stuff will leak a bit here. */
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 1e63c5b2d5f4..824f746b2567 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MACH_SUNXI_CLK_FACTORS_H
#define __MACH_SUNXI_CLK_FACTORS_H
diff --git a/drivers/clk/sunxi/clk-sun9i-cpus.c b/drivers/clk/sunxi/clk-sun9i-cpus.c
index 7626d2194b96..4d5e14142e15 100644
--- a/drivers/clk/sunxi/clk-sun9i-cpus.c
+++ b/drivers/clk/sunxi/clk-sun9i-cpus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Chen-Yu Tsai
*
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 6041bdba2e97..a1a634253d6f 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -124,7 +124,7 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
return PTR_ERR(data->clk);
}
- data->reset = devm_reset_control_get(&pdev->dev, NULL);
+ data->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(data->reset)) {
dev_err(&pdev->dev, "Could not get reset control\n");
return PTR_ERR(data->reset);
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 4be8af28ee61..b71692391bd6 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += clk.o
obj-y += clk-audio-sync.o
obj-y += clk-dfll.o
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index 638ace64033b..a896692b74ec 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -55,6 +55,7 @@ struct tegra_bpmp_clk_message {
struct {
void *data;
size_t size;
+ int ret;
} rx;
};
@@ -64,6 +65,7 @@ static int tegra_bpmp_clk_transfer(struct tegra_bpmp *bpmp,
struct mrq_clk_request request;
struct tegra_bpmp_message msg;
void *req = &request;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd_and_id = (clk->cmd << 24) | clk->id;
@@ -84,7 +86,13 @@ static int tegra_bpmp_clk_transfer(struct tegra_bpmp *bpmp,
msg.rx.data = clk->rx.data;
msg.rx.size = clk->rx.size;
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_clk_prepare(struct clk_hw *hw)
@@ -414,11 +422,8 @@ static int tegra_bpmp_probe_clocks(struct tegra_bpmp *bpmp,
struct tegra_bpmp_clk_info *info = &clocks[count];
err = tegra_bpmp_clk_get_info(bpmp, id, info);
- if (err < 0) {
- dev_err(bpmp->dev, "failed to query clock %u: %d\n",
- id, err);
+ if (err < 0)
continue;
- }
if (info->num_parents >= U8_MAX) {
dev_err(bpmp->dev,
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 2c44aeb0b97c..0a7deee74eea 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1728,10 +1728,10 @@ EXPORT_SYMBOL(tegra_dfll_register);
* @pdev: DFLL platform_device *
*
* Unbind this driver from the DFLL hardware device represented by
- * @pdev. The DFLL must be disabled for this to succeed. Returns 0
- * upon success or -EBUSY if the DFLL is still active.
+ * @pdev. The DFLL must be disabled for this to succeed. Returns a
+ * soc pointer upon success or -EBUSY if the DFLL is still active.
*/
-int tegra_dfll_unregister(struct platform_device *pdev)
+struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev)
{
struct tegra_dfll *td = platform_get_drvdata(pdev);
@@ -1739,7 +1739,7 @@ int tegra_dfll_unregister(struct platform_device *pdev)
if (td->mode != DFLL_DISABLED) {
dev_err(&pdev->dev,
"must disable DFLL before removing driver\n");
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
}
debugfs_remove_recursive(td->debugfs_dir);
@@ -1753,6 +1753,6 @@ int tegra_dfll_unregister(struct platform_device *pdev)
reset_control_assert(td->dvco_rst);
- return 0;
+ return td->soc;
}
EXPORT_SYMBOL(tegra_dfll_unregister);
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
index ed2ad888268f..83352c8078f2 100644
--- a/drivers/clk/tegra/clk-dfll.h
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -43,7 +43,7 @@ struct tegra_dfll_soc_data {
int tegra_dfll_register(struct platform_device *pdev,
struct tegra_dfll_soc_data *soc);
-int tegra_dfll_unregister(struct platform_device *pdev);
+struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev);
int tegra_dfll_runtime_suspend(struct device *dev);
int tegra_dfll_runtime_resume(struct device *dev);
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 689f344377a7..b616e33c5255 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This header provides IDs for clocks common between several Tegra SoCs
*/
@@ -12,6 +13,7 @@ enum clk_id {
tegra_clk_amx,
tegra_clk_amx1,
tegra_clk_apb2ape,
+ tegra_clk_ahbdma,
tegra_clk_apbdma,
tegra_clk_apbif,
tegra_clk_ape,
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index cf80831de79d..9475c00b7cf9 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -203,3 +203,11 @@ struct clk *tegra_clk_register_periph_nodiv(const char *name,
return _tegra_clk_register_periph(name, parent_names, num_parents,
periph, clk_base, offset, CLK_SET_RATE_PARENT);
}
+
+struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
+ struct tegra_periph_init_data *init)
+{
+ return _tegra_clk_register_periph(init->name, init->p.parent_names,
+ init->num_parents, &init->periph,
+ clk_base, init->offset, init->flags);
+}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 848255cc0209..c02711927d79 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -129,7 +129,6 @@
#define CLK_SOURCE_NVDEC 0x698
#define CLK_SOURCE_NVJPG 0x69c
#define CLK_SOURCE_APE 0x6c0
-#define CLK_SOURCE_SOR1 0x410
#define CLK_SOURCE_SDMMC_LEGACY 0x694
#define CLK_SOURCE_QSPI 0x6c4
#define CLK_SOURCE_VI_I2C 0x6c8
@@ -278,7 +277,6 @@ static DEFINE_SPINLOCK(PLLP_OUTA_lock);
static DEFINE_SPINLOCK(PLLP_OUTB_lock);
static DEFINE_SPINLOCK(PLLP_OUTC_lock);
static DEFINE_SPINLOCK(sor0_lock);
-static DEFINE_SPINLOCK(sor1_lock);
#define MUX_I2S_SPDIF(_id) \
static const char *mux_pllaout0_##_id##_2x_pllp_clkm[] = { "pll_a_out0", \
@@ -604,18 +602,6 @@ static u32 mux_pllp_plld_plld2_clkm_idx[] = {
[0] = 0, [1] = 2, [2] = 5, [3] = 6
};
-static const char *mux_sor_safe_sor1_brick_sor1_src[] = {
- /*
- * Bit 0 of the mux selects sor1_brick, irrespective of bit 1, so the
- * sor1_brick parent appears twice in the list below. This is merely
- * to support clk_get_parent() if firmware happened to set these bits
- * to 0b11. While not an invalid setting, code should always set the
- * bits to 0b01 to select sor1_brick.
- */
- "sor_safe", "sor1_brick", "sor1_src", "sor1_brick"
-};
-#define mux_sor_safe_sor1_brick_sor1_src_idx NULL
-
static const char *mux_pllp_pllre_clkm[] = {
"pll_p", "pll_re_out1", "clk_m"
};
@@ -804,8 +790,6 @@ static struct tegra_periph_init_data periph_clks[] = {
MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
- MUX8_NOGATE_LOCK("sor1_src", mux_pllp_plld_plld2_clkm, CLK_SOURCE_SOR1, tegra_clk_sor1_src, &sor1_lock),
- NODIV("sor1", mux_sor_safe_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 14, MASK(2), 183, 0, tegra_clk_sor1, &sor1_lock),
MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
@@ -823,7 +807,8 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("timer", "clk_m", 5, 0, tegra_clk_timer, CLK_IS_CRITICAL),
GATE("isp", "clk_m", 23, 0, tegra_clk_isp, 0),
GATE("vcp", "clk_m", 29, 0, tegra_clk_vcp, 0),
- GATE("apbdma", "clk_m", 34, 0, tegra_clk_apbdma, 0),
+ GATE("ahbdma", "hclk", 33, 0, tegra_clk_ahbdma, 0),
+ GATE("apbdma", "pclk", 34, 0, tegra_clk_apbdma, 0),
GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
@@ -927,10 +912,7 @@ static void __init periph_clk_init(void __iomem *clk_base,
continue;
data->periph.gate.regs = bank;
- clk = tegra_clk_register_periph(data->name,
- data->p.parent_names, data->num_parents,
- &data->periph, clk_base, data->offset,
- data->flags);
+ clk = tegra_clk_register_periph_data(clk_base, data);
*dt_clk = clk;
}
}
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 4f6fd307cb70..10047107c1dc 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -166,7 +166,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
clk_base + SYSTEM_CLK_RATE, 0, 2, 0,
&sysrate_lock);
clk = clk_register_gate(NULL, "pclk", "pclk_div", CLK_SET_RATE_PARENT |
- CLK_IGNORE_UNUSED, clk_base + SYSTEM_CLK_RATE,
+ CLK_IS_CRITICAL, clk_base + SYSTEM_CLK_RATE,
3, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
*dt_clk = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index fd1a99c05c2d..63087d17c3e2 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1092,9 +1092,7 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name,
- data->p.parent_names, data->num_parents,
- &data->periph, clk_base, data->offset, data->flags);
+ clk = tegra_clk_register_periph_data(clk_base, data);
clks[data->clk_id] = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index ad1c1cc829cb..269d3595758b 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -125,19 +125,17 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, soc);
-
return 0;
}
static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
{
- struct tegra_dfll_soc_data *soc = platform_get_drvdata(pdev);
- int err;
+ struct tegra_dfll_soc_data *soc;
- err = tegra_dfll_unregister(pdev);
- if (err < 0)
- dev_err(&pdev->dev, "failed to unregister DFLL: %d\n", err);
+ soc = tegra_dfll_unregister(pdev);
+ if (IS_ERR(soc))
+ dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
+ PTR_ERR(soc));
tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 837e5cbd60e9..cbd5a2e5c569 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -522,6 +522,8 @@ static struct tegra_devclk devclks[] __initdata = {
};
static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
+ [tegra_clk_ahbdma] = { .dt_id = TEGRA20_CLK_AHBDMA, .present = true },
+ [tegra_clk_apbdma] = { .dt_id = TEGRA20_CLK_APBDMA, .present = true },
[tegra_clk_spdif_out] = { .dt_id = TEGRA20_CLK_SPDIF_OUT, .present = true },
[tegra_clk_spdif_in] = { .dt_id = TEGRA20_CLK_SPDIF_IN, .present = true },
[tegra_clk_sdmmc1] = { .dt_id = TEGRA20_CLK_SDMMC1, .present = true },
@@ -806,11 +808,6 @@ static void __init tegra20_periph_clk_init(void)
clk_base, 0, 3, periph_clk_enb_refcnt);
clks[TEGRA20_CLK_AC97] = clk;
- /* apbdma */
- clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
- 0, 34, periph_clk_enb_refcnt);
- clks[TEGRA20_CLK_APBDMA] = clk;
-
/* emc */
clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
ARRAY_SIZE(mux_pllmcp_clkm),
@@ -850,9 +847,7 @@ static void __init tegra20_periph_clk_init(void)
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->p.parent_names,
- data->num_parents, &data->periph,
- clk_base, data->offset, data->flags);
+ clk = tegra_clk_register_periph_data(clk_base, data);
clks[data->clk_id] = clk;
}
@@ -1025,7 +1020,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_PLL_P_OUT3, TEGRA20_CLK_CLK_MAX, 72000000, 1 },
{ TEGRA20_CLK_PLL_P_OUT4, TEGRA20_CLK_CLK_MAX, 24000000, 1 },
{ TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 1 },
- { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 120000000, 1 },
+ { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 216000000, 1 },
{ TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 1 },
{ TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
{ TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 1 },
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 6d7a613f2656..9e6260869eb9 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -40,6 +40,7 @@
#define CLK_SOURCE_CSITE 0x1d4
#define CLK_SOURCE_EMC 0x19c
+#define CLK_SOURCE_SOR1 0x410
#define PLLC_BASE 0x80
#define PLLC_OUT 0x84
@@ -264,6 +265,7 @@ static DEFINE_SPINLOCK(pll_d_lock);
static DEFINE_SPINLOCK(pll_e_lock);
static DEFINE_SPINLOCK(pll_re_lock);
static DEFINE_SPINLOCK(pll_u_lock);
+static DEFINE_SPINLOCK(sor1_lock);
static DEFINE_SPINLOCK(emc_lock);
/* possible OSC frequencies in Hz */
@@ -2566,8 +2568,8 @@ static int tegra210_enable_pllu(void)
reg |= PLL_ENABLE;
writel(reg, clk_base + PLLU_BASE);
- readl_relaxed_poll_timeout(clk_base + PLLU_BASE, reg,
- reg & PLL_BASE_LOCK, 2, 1000);
+ readl_relaxed_poll_timeout_atomic(clk_base + PLLU_BASE, reg,
+ reg & PLL_BASE_LOCK, 2, 1000);
if (!(reg & PLL_BASE_LOCK)) {
pr_err("Timed out waiting for PLL_U to lock\n");
return -ETIMEDOUT;
@@ -2628,10 +2630,35 @@ static int tegra210_init_pllu(void)
return 0;
}
+static const char * const sor1_out_parents[] = {
+ /*
+ * Bit 0 of the mux selects sor1_pad_clkout, irrespective of bit 1, so
+ * the sor1_pad_clkout parent appears twice in the list below. This is
+ * merely to support clk_get_parent() if firmware happened to set
+ * these bits to 0b11. While not an invalid setting, code should
+ * always set the bits to 0b01 to select sor1_pad_clkout.
+ */
+ "sor_safe", "sor1_pad_clkout", "sor1", "sor1_pad_clkout",
+};
+
+static const char * const sor1_parents[] = {
+ "pll_p", "pll_d_out0", "pll_d2_out0", "clk_m",
+};
+
+static u32 sor1_parents_idx[] = { 0, 2, 5, 6 };
+
+static struct tegra_periph_init_data tegra210_periph[] = {
+ TEGRA_INIT_DATA_TABLE("sor1", NULL, NULL, sor1_parents,
+ CLK_SOURCE_SOR1, 29, 0x7, 0, 0, 8, 1,
+ TEGRA_DIVIDER_ROUND_UP, 183, 0, tegra_clk_sor1,
+ sor1_parents_idx, 0, &sor1_lock),
+};
+
static __init void tegra210_periph_clk_init(void __iomem *clk_base,
void __iomem *pmc_base)
{
struct clk *clk;
+ unsigned int i;
/* xusb_ss_div2 */
clk = clk_register_fixed_factor(NULL, "xusb_ss_div2", "xusb_ss_src", 0,
@@ -2650,6 +2677,12 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
1, 17, 207);
clks[TEGRA210_CLK_DPAUX1] = clk;
+ clk = clk_register_mux_table(NULL, "sor1_out", sor1_out_parents,
+ ARRAY_SIZE(sor1_out_parents), 0,
+ clk_base + CLK_SOURCE_SOR1, 14, 0x3,
+ 0, NULL, &sor1_lock);
+ clks[TEGRA210_CLK_SOR1_OUT] = clk;
+
/* pll_d_dsi_out */
clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
clk_base + PLLD_MISC0, 21, 0, &pll_d_lock);
@@ -2694,6 +2727,20 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
0, NULL);
clks[TEGRA210_CLK_ACLK] = clk;
+ for (i = 0; i < ARRAY_SIZE(tegra210_periph); i++) {
+ struct tegra_periph_init_data *init = &tegra210_periph[i];
+ struct clk **clkp;
+
+ clkp = tegra_lookup_dt_id(init->clk_id, tegra210_clks);
+ if (!clkp) {
+ pr_warn("clock %u not found\n", init->clk_id);
+ continue;
+ }
+
+ clk = tegra_clk_register_periph_data(clk_base, init);
+ *clkp = clk;
+ }
+
tegra_periph_clk_init(clk_base, pmc_base, tegra210_clks, &pll_p_params);
}
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index a2d163f759b4..bee84c554932 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -359,7 +359,7 @@ static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
};
/* PLL parameters */
-static struct tegra_clk_pll_params pll_c_params = {
+static struct tegra_clk_pll_params pll_c_params __ro_after_init = {
.input_min = 2000000,
.input_max = 31000000,
.cf_min = 1000000,
@@ -388,7 +388,7 @@ static struct div_nmp pllm_nmp = {
.override_divp_shift = 15,
};
-static struct tegra_clk_pll_params pll_m_params = {
+static struct tegra_clk_pll_params pll_m_params __ro_after_init = {
.input_min = 2000000,
.input_max = 31000000,
.cf_min = 1000000,
@@ -409,7 +409,7 @@ static struct tegra_clk_pll_params pll_m_params = {
TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_FIXED,
};
-static struct tegra_clk_pll_params pll_p_params = {
+static struct tegra_clk_pll_params pll_p_params __ro_after_init = {
.input_min = 2000000,
.input_max = 31000000,
.cf_min = 1000000,
@@ -444,7 +444,7 @@ static struct tegra_clk_pll_params pll_a_params = {
TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_d_params = {
+static struct tegra_clk_pll_params pll_d_params __ro_after_init = {
.input_min = 2000000,
.input_max = 40000000,
.cf_min = 1000000,
@@ -461,7 +461,7 @@ static struct tegra_clk_pll_params pll_d_params = {
TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_d2_params = {
+static struct tegra_clk_pll_params pll_d2_params __ro_after_init = {
.input_min = 2000000,
.input_max = 40000000,
.cf_min = 1000000,
@@ -478,7 +478,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_u_params = {
+static struct tegra_clk_pll_params pll_u_params __ro_after_init = {
.input_min = 2000000,
.input_max = 40000000,
.cf_min = 1000000,
@@ -496,7 +496,7 @@ static struct tegra_clk_pll_params pll_u_params = {
TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_x_params = {
+static struct tegra_clk_pll_params pll_x_params __ro_after_init = {
.input_min = 2000000,
.input_max = 31000000,
.cf_min = 1000000,
@@ -513,7 +513,7 @@ static struct tegra_clk_pll_params pll_x_params = {
TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
};
-static struct tegra_clk_pll_params pll_e_params = {
+static struct tegra_clk_pll_params pll_e_params __ro_after_init = {
.input_min = 12000000,
.input_max = 216000000,
.cf_min = 12000000,
@@ -788,6 +788,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
[tegra_clk_extern3] = { .dt_id = TEGRA30_CLK_EXTERN3, .present = true },
[tegra_clk_disp1] = { .dt_id = TEGRA30_CLK_DISP1, .present = true },
[tegra_clk_disp2] = { .dt_id = TEGRA30_CLK_DISP2, .present = true },
+ [tegra_clk_ahbdma] = { .dt_id = TEGRA30_CLK_AHBDMA, .present = true },
[tegra_clk_apbdma] = { .dt_id = TEGRA30_CLK_APBDMA, .present = true },
[tegra_clk_rtc] = { .dt_id = TEGRA30_CLK_RTC, .present = true },
[tegra_clk_timer] = { .dt_id = TEGRA30_CLK_TIMER, .present = true },
@@ -964,7 +965,7 @@ static void __init tegra30_super_clk_init(void)
* U71 divider of cclk_lp.
*/
clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
- clk_base + SUPER_CCLKG_DIVIDER, 0,
+ clk_base + SUPER_CCLKLP_DIVIDER, 0,
TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
@@ -1079,9 +1080,7 @@ static void __init tegra30_periph_clk_init(void)
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
- clk = tegra_clk_register_periph(data->name, data->p.parent_names,
- data->num_parents, &data->periph,
- clk_base, data->offset, data->flags);
+ clk = tegra_clk_register_periph_data(clk_base, data);
clks[data->clk_id] = clk;
}
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 872f1189ad7f..3b2763df51c2 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -662,6 +662,9 @@ struct tegra_periph_init_data {
_clk_num, _gate_flags, _clk_id,\
NULL, 0, NULL)
+struct clk *tegra_clk_register_periph_data(void __iomem *clk_base,
+ struct tegra_periph_init_data *init);
+
/**
* struct clk_super_mux - super clock
*
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index edb9f471e525..a2293ee09440 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ifeq ($(CONFIG_ARCH_OMAP2PLUS), y)
obj-y += clk.o autoidle.o clockdomain.o
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 13eb04f72389..148815470431 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -274,8 +274,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
/* Get configuration for the ATL instances */
snprintf(prop, sizeof(prop), "atl%u", i);
- of_node_get(node);
- cfg_node = of_find_node_by_name(node, prop);
+ cfg_node = of_get_child_by_name(node, prop);
if (cfg_node) {
ret = of_property_read_u32(cfg_node, "bws",
&cdesc->bws);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 88f04a4cb890..77f93f6d2806 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -292,10 +292,8 @@ static struct clk *_register_divider(struct device *dev, const char *name,
/* allocate the divider */
div = kzalloc(sizeof(*div), GFP_KERNEL);
- if (!div) {
- pr_err("%s: could not allocate divider clk\n", __func__);
+ if (!div)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &ti_clk_divider_ops;
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 18c267b38461..d4705803f3d3 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -108,10 +108,8 @@ static struct clk *_register_mux(struct device *dev, const char *name,
/* allocate the mux */
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
- if (!mux) {
- pr_err("%s: could not allocate mux clk\n", __func__);
+ if (!mux)
return ERR_PTR(-ENOMEM);
- }
init.name = name;
init.ops = &ti_clk_mux_ops;
diff --git a/drivers/clk/uniphier/Makefile b/drivers/clk/uniphier/Makefile
index 665d1d65a90e..e5715abef180 100644
--- a/drivers/clk/uniphier/Makefile
+++ b/drivers/clk/uniphier/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += clk-uniphier-core.o
obj-y += clk-uniphier-cpugear.o
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
index 16e4d303f535..badc478a86c6 100644
--- a/drivers/clk/uniphier/clk-uniphier-mio.c
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -13,6 +13,8 @@
* GNU General Public License for more details.
*/
+#include <linux/stddef.h>
+
#include "clk-uniphier.h"
#define UNIPHIER_MIO_CLK_SD_FIXED \
@@ -73,15 +75,12 @@
#define UNIPHIER_MIO_CLK_USB2_PHY(idx, ch) \
UNIPHIER_CLK_GATE("usb2" #ch "-phy", (idx), "usb2", 0x20 + 0x200 * (ch), 29)
-#define UNIPHIER_MIO_CLK_DMAC(idx) \
- UNIPHIER_CLK_GATE("miodmac", (idx), "stdmac", 0x20, 25)
-
const struct uniphier_clk_data uniphier_ld4_mio_clk_data[] = {
UNIPHIER_MIO_CLK_SD_FIXED,
UNIPHIER_MIO_CLK_SD(0, 0),
UNIPHIER_MIO_CLK_SD(1, 1),
UNIPHIER_MIO_CLK_SD(2, 2),
- UNIPHIER_MIO_CLK_DMAC(7),
+ UNIPHIER_CLK_GATE("miodmac", 7, NULL, 0x20, 25),
UNIPHIER_MIO_CLK_USB2(8, 0),
UNIPHIER_MIO_CLK_USB2(9, 1),
UNIPHIER_MIO_CLK_USB2(10, 2),
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index 0e396f3da526..d244e724e198 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -123,7 +123,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */
UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */
- UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */
+ UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
UNIPHIER_PRO5_SYS_CLK_NAND(2),
@@ -233,9 +233,9 @@ const struct uniphier_clk_data uniphier_pxs3_sys_clk_data[] = {
UNIPHIER_LD20_SYS_CLK_SD,
UNIPHIER_LD11_SYS_CLK_NAND(2),
UNIPHIER_LD11_SYS_CLK_EMMC(4),
- UNIPHIER_CLK_GATE("usb30", 12, NULL, 0x2104, 4), /* =GIO0 */
- UNIPHIER_CLK_GATE("usb31-0", 13, NULL, 0x2104, 5), /* =GIO1 */
- UNIPHIER_CLK_GATE("usb31-1", 14, NULL, 0x2104, 6), /* =GIO1-1 */
+ UNIPHIER_CLK_GATE("usb30", 12, NULL, 0x210c, 4), /* =GIO0 */
+ UNIPHIER_CLK_GATE("usb31-0", 13, NULL, 0x210c, 5), /* =GIO1 */
+ UNIPHIER_CLK_GATE("usb31-1", 14, NULL, 0x210c, 6), /* =GIO1-1 */
UNIPHIER_CLK_GATE("usb30-phy0", 16, NULL, 0x210c, 16),
UNIPHIER_CLK_GATE("usb30-phy1", 17, NULL, 0x210c, 18),
UNIPHIER_CLK_GATE("usb30-phy2", 18, NULL, 0x210c, 20),
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index f3baef29859c..fedc083dc8be 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for ux500 clocks
#
diff --git a/drivers/clk/ux500/clk-prcc.c b/drivers/clk/ux500/clk-prcc.c
index f50592775c9d..7cfb59c9136d 100644
--- a/drivers/clk/ux500/clk-prcc.c
+++ b/drivers/clk/ux500/clk-prcc.c
@@ -107,11 +107,9 @@ static struct clk *clk_reg_prcc(const char *name,
return ERR_PTR(-EINVAL);
}
- clk = kzalloc(sizeof(struct clk_prcc), GFP_KERNEL);
- if (!clk) {
- pr_err("clk_prcc: %s could not allocate clk\n", __func__);
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
return ERR_PTR(-ENOMEM);
- }
clk->base = ioremap(phy_base, SZ_4K);
if (!clk->base)
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 6e3e16b2e5ca..9d1f2d4550ad 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -258,11 +258,9 @@ static struct clk *clk_reg_prcmu(const char *name,
return ERR_PTR(-EINVAL);
}
- clk = kzalloc(sizeof(struct clk_prcmu), GFP_KERNEL);
- if (!clk) {
- pr_err("clk_prcmu: %s could not allocate clk\n", __func__);
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
return ERR_PTR(-ENOMEM);
- }
clk->cg_sel = cg_sel;
clk->is_prepared = 1;
diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c
index 8a4e93ce1e42..7c0403b733ae 100644
--- a/drivers/clk/ux500/clk-sysctrl.c
+++ b/drivers/clk/ux500/clk-sysctrl.c
@@ -139,11 +139,9 @@ static struct clk *clk_reg_sysctrl(struct device *dev,
return ERR_PTR(-EINVAL);
}
- clk = devm_kzalloc(dev, sizeof(struct clk_sysctrl), GFP_KERNEL);
- if (!clk) {
- dev_err(dev, "clk_sysctrl: could not allocate clk\n");
+ clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
+ if (!clk)
return ERR_PTR(-ENOMEM);
- }
/* set main clock registers */
clk->reg_sel[0] = reg_sel[0];
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 09fbe66f1f11..dafe7a45875d 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -359,16 +359,13 @@ static struct clk *icst_clk_setup(struct device *dev,
struct clk_init_data init;
struct icst_params *pclone;
- icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL);
- if (!icst) {
- pr_err("could not allocate ICST clock!\n");
+ icst = kzalloc(sizeof(*icst), GFP_KERNEL);
+ if (!icst)
return ERR_PTR(-ENOMEM);
- }
pclone = kmemdup(desc->params, sizeof(*pclone), GFP_KERNEL);
if (!pclone) {
kfree(icst);
- pr_err("could not clone ICST params\n");
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
index 5add02ebec5d..e36ca1a20e90 100644
--- a/drivers/clk/versatile/clk-icst.h
+++ b/drivers/clk/versatile/clk-icst.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* struct clk_icst_desc - descriptor for the ICST VCO
* @params: ICST parameters
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index cc6062049170..c729a88007d0 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,9 +1,8 @@
menu "Clock Source drivers"
- depends on !ARCH_USES_GETTIMEOFFSET
+ depends on GENERIC_CLOCKEVENTS
config TIMER_OF
bool
- depends on GENERIC_CLOCKEVENTS
select TIMER_PROBE
config TIMER_ACPI
@@ -30,21 +29,18 @@ config CLKSRC_MMIO
config BCM2835_TIMER
bool "BCM2835 timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables the support for the BCM2835 timer driver.
config BCM_KONA_TIMER
bool "BCM mobile timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables the support for the BCM Kona mobile timer driver.
config DIGICOLOR_TIMER
bool "Digicolor timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
depends on HAS_IOMEM
help
@@ -52,7 +48,6 @@ config DIGICOLOR_TIMER
config DW_APB_TIMER
bool "DW APB timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
help
Enables the support for the dw_apb timer.
@@ -63,7 +58,6 @@ config DW_APB_TIMER_OF
config FTTMR010_TIMER
bool "Faraday Technology timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
select CLKSRC_MMIO
select TIMER_OF
@@ -90,7 +84,6 @@ config ARMADA_370_XP_TIMER
config MESON6_TIMER
bool "Meson6 timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables the support for the Meson6 timer driver.
@@ -105,14 +98,12 @@ config ORION_TIMER
config OWL_TIMER
bool "Owl timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables the support for the Actions Semi Owl timer driver.
config SUN4I_TIMER
bool "Sun4i timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
select CLKSRC_MMIO
select TIMER_OF
@@ -135,7 +126,6 @@ config TEGRA_TIMER
config VT8500_TIMER
bool "VT8500 timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
Enables support for the VT8500 driver.
@@ -148,7 +138,6 @@ config CADENCE_TTC_TIMER
config ASM9260_TIMER
bool "ASM9260 timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
select TIMER_OF
help
@@ -171,28 +160,24 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
Use the always on PRCMU Timer as clocksource
config CLPS711X_TIMER
bool "Cirrus logic timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Cirrus Logic PS711 timer.
config ATLAS7_TIMER
bool "Atlas7 timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Atlas7 timer.
config MXS_TIMER
bool "Mxs timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
select STMP_DEVICE
help
@@ -200,14 +185,12 @@ config MXS_TIMER
config PRIMA2_TIMER
bool "Prima2 timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Prima2 timer.
config U300_TIMER
bool "U300 timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on ARM
select CLKSRC_MMIO
help
@@ -215,14 +198,12 @@ config U300_TIMER
config NSPIRE_TIMER
bool "NSpire timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Nspire timer.
config KEYSTONE_TIMER
bool "Keystone timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on ARM || ARM64
select CLKSRC_MMIO
help
@@ -230,7 +211,6 @@ config KEYSTONE_TIMER
config INTEGRATOR_AP_TIMER
bool "Integrator-ap timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables support for the Integrator-ap timer.
@@ -253,7 +233,7 @@ config CLKSRC_EFM32
config CLKSRC_LPC32XX
bool "Clocksource for LPC32XX" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+ depends on HAS_IOMEM
depends on ARM
select CLKSRC_MMIO
select TIMER_OF
@@ -262,7 +242,7 @@ config CLKSRC_LPC32XX
config CLKSRC_PISTACHIO
bool "Clocksource for Pistachio SoC" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+ depends on HAS_IOMEM
select TIMER_OF
help
Enables the clocksource for the Pistachio SoC.
@@ -298,7 +278,6 @@ config CLKSRC_MPS2
config ARC_TIMERS
bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select TIMER_OF
help
These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
@@ -307,7 +286,6 @@ config ARC_TIMERS
config ARC_TIMERS_64BIT
bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on ARC_TIMERS
select TIMER_OF
help
@@ -407,7 +385,6 @@ config ATMEL_PIT
config ATMEL_ST
bool "Atmel ST timer support" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select TIMER_OF
select MFD_SYSCON
help
@@ -426,7 +403,6 @@ config CLKSRC_EXYNOS_MCT
config CLKSRC_SAMSUNG_PWM
bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
help
This is a new clocksource driver for the PWM timer found in
@@ -436,7 +412,6 @@ config CLKSRC_SAMSUNG_PWM
config FSL_FTM_TIMER
bool "Freescale FlexTimer Module driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
select CLKSRC_MMIO
help
@@ -450,7 +425,6 @@ config VF_PIT_TIMER
config OXNAS_RPS_TIMER
bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select TIMER_OF
select CLKSRC_MMIO
help
@@ -461,7 +435,7 @@ config SYS_SUPPORTS_SH_CMT
config MTK_TIMER
bool "Mediatek timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+ depends on HAS_IOMEM
select TIMER_OF
select CLKSRC_MMIO
help
@@ -479,7 +453,6 @@ config SYS_SUPPORTS_EM_STI
config CLKSRC_JCORE_PIT
bool "J-Core PIT timer driver" if COMPILE_TEST
depends on OF
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
select CLKSRC_MMIO
help
@@ -488,7 +461,6 @@ config CLKSRC_JCORE_PIT
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_CMT
help
@@ -498,7 +470,6 @@ config SH_TIMER_CMT
config SH_TIMER_MTU2
bool "Renesas MTU2 timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_MTU2
help
@@ -508,14 +479,12 @@ config SH_TIMER_MTU2
config RENESAS_OSTM
bool "Renesas OSTM timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
select CLKSRC_MMIO
help
Enables the support for the Renesas OSTM.
config SH_TIMER_TMU
bool "Renesas TMU timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_TMU
help
@@ -525,7 +494,7 @@ config SH_TIMER_TMU
config EM_TIMER_STI
bool "Renesas STI timer driver" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+ depends on HAS_IOMEM
default SYS_SUPPORTS_EM_STI
help
This enables build of a clocksource and clockevent driver for
@@ -566,7 +535,6 @@ config CLKSRC_TANGO_XTAL
config CLKSRC_PXA
bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
select CLKSRC_MMIO
help
@@ -575,20 +543,20 @@ config CLKSRC_PXA
config H8300_TMR8
bool "Clockevent timer for the H8300 platform" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+ depends on HAS_IOMEM
help
This enables the 8 bits timer for the H8300 platform.
config H8300_TMR16
bool "Clockevent timer for the H83069 platform" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+ depends on HAS_IOMEM
help
This enables the 16 bits timer for the H8300 platform with the
H83069 cpu.
config H8300_TPU
bool "Clocksource for the H8300 platform" if COMPILE_TEST
- depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+ depends on HAS_IOMEM
help
This enables the clocksource for the H8300 platform with the
H8S2678 cpu.
@@ -600,7 +568,7 @@ config CLKSRC_IMX_GPT
config CLKSRC_IMX_TPM
bool "Clocksource using i.MX TPM" if COMPILE_TEST
- depends on ARM && CLKDEV_LOOKUP && GENERIC_CLOCKEVENTS
+ depends on ARM && CLKDEV_LOOKUP
select CLKSRC_MMIO
help
Enable this option to use IMX Timer/PWM Module (TPM) timer as
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index dbc1ad14515e..72711f1491e3 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TIMER_OF) += timer-of.o
obj-$(CONFIG_TIMER_PROBE) += timer-probe.o
obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index fd4b7f684bd0..57cb2f00fc07 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -77,6 +77,7 @@ static bool arch_timer_mem_use_virtual;
static bool arch_counter_suspend_stop;
static bool vdso_default = true;
+static cpumask_t evtstrm_available = CPU_MASK_NONE;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
static int __init early_evtstrm_cfg(char *buf)
@@ -158,6 +159,7 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
* if we don't have the cp15 accessors we won't have a problem.
*/
u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
+EXPORT_SYMBOL_GPL(arch_timer_read_counter);
static u64 arch_counter_read(struct clocksource *cs)
{
@@ -217,6 +219,11 @@ static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
return __fsl_a008585_read_reg(cntv_tval_el0);
}
+static u64 notrace fsl_a008585_read_cntpct_el0(void)
+{
+ return __fsl_a008585_read_reg(cntpct_el0);
+}
+
static u64 notrace fsl_a008585_read_cntvct_el0(void)
{
return __fsl_a008585_read_reg(cntvct_el0);
@@ -258,6 +265,11 @@ static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
return __hisi_161010101_read_reg(cntv_tval_el0);
}
+static u64 notrace hisi_161010101_read_cntpct_el0(void)
+{
+ return __hisi_161010101_read_reg(cntpct_el0);
+}
+
static u64 notrace hisi_161010101_read_cntvct_el0(void)
{
return __hisi_161010101_read_reg(cntvct_el0);
@@ -288,6 +300,15 @@ static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
+static u64 notrace arm64_858921_read_cntpct_el0(void)
+{
+ u64 old, new;
+
+ old = read_sysreg(cntpct_el0);
+ new = read_sysreg(cntpct_el0);
+ return (((old ^ new) >> 32) & 1) ? old : new;
+}
+
static u64 notrace arm64_858921_read_cntvct_el0(void)
{
u64 old, new;
@@ -299,8 +320,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
#endif
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
-DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
- timer_unstable_counter_workaround);
+DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
@@ -310,16 +330,19 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long
struct clock_event_device *clk)
{
unsigned long ctrl;
- u64 cval = evt + arch_counter_get_cntvct();
+ u64 cval;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
- if (access == ARCH_TIMER_PHYS_ACCESS)
+ if (access == ARCH_TIMER_PHYS_ACCESS) {
+ cval = evt + arch_counter_get_cntpct();
write_sysreg(cval, cntp_cval_el0);
- else
+ } else {
+ cval = evt + arch_counter_get_cntvct();
write_sysreg(cval, cntv_cval_el0);
+ }
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
@@ -346,6 +369,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.desc = "Freescale erratum a005858",
.read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
.read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
+ .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt,
@@ -358,6 +382,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.desc = "HiSilicon erratum 161010101",
.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
+ .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt,
@@ -368,6 +393,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.desc = "HiSilicon erratum 161010101",
.read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
.read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
+ .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
.set_next_event_phys = erratum_set_next_event_tval_phys,
.set_next_event_virt = erratum_set_next_event_tval_virt,
@@ -378,6 +404,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_local_cap_id,
.id = (void *)ARM64_WORKAROUND_858921,
.desc = "ARM erratum 858921",
+ .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
@@ -740,6 +767,7 @@ static void arch_timer_evtstrm_enable(int divider)
#ifdef CONFIG_COMPAT
compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif
+ cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
static void arch_timer_configure_evtstream(void)
@@ -864,6 +892,16 @@ u32 arch_timer_get_rate(void)
return arch_timer_rate;
}
+bool arch_timer_evtstrm_available(void)
+{
+ /*
+ * We might get called from a preemptible context. This is fine
+ * because availability of the event stream should be always the same
+ * for a preemptible context and context where we might resume a task.
+ */
+ return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
+}
+
static u64 arch_counter_get_cntvct_mem(void)
{
u32 vct_lo, vct_hi, tmp_hi;
@@ -890,7 +928,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
- if (IS_ENABLED(CONFIG_ARM64) ||
+ if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
arch_timer_read_counter = arch_counter_get_cntvct;
else
@@ -929,6 +967,8 @@ static int arch_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
+ cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
+
arch_timer_stop(clk);
return 0;
}
@@ -938,10 +978,16 @@ static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
static int arch_timer_cpu_pm_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
- if (action == CPU_PM_ENTER)
+ if (action == CPU_PM_ENTER) {
__this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
- else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
+
+ cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
+ } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
+
+ if (elf_hwcap & HWCAP_EVTSTRM)
+ cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
+ }
return NOTIFY_OK;
}
@@ -1017,7 +1063,6 @@ static int __init arch_timer_register(void)
if (err)
goto out_unreg_notify;
-
/* Register and immediately configure the timer on the boot CPU */
err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
"clockevents/arm/arch_timer:starting",
@@ -1268,10 +1313,6 @@ arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
iounmap(cntctlbase);
- if (!best_frame)
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer_mem->cntctlbase);
-
return best_frame;
}
@@ -1372,6 +1413,8 @@ static int __init arch_timer_mem_of_init(struct device_node *np)
frame = arch_timer_mem_find_best_frame(timer_mem);
if (!frame) {
+ pr_err("Unable to find a suitable frame in timer @ %pa\n",
+ &timer_mem->cntctlbase);
ret = -EINVAL;
goto out;
}
@@ -1420,7 +1463,7 @@ arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
static int __init arch_timer_mem_acpi_init(int platform_timer_count)
{
struct arch_timer_mem *timers, *timer;
- struct arch_timer_mem_frame *frame;
+ struct arch_timer_mem_frame *frame, *best_frame = NULL;
int timer_count, i, ret = 0;
timers = kcalloc(platform_timer_count, sizeof(*timers),
@@ -1432,14 +1475,6 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count)
if (ret || !timer_count)
goto out;
- for (i = 0; i < timer_count; i++) {
- ret = arch_timer_mem_verify_cntfrq(&timers[i]);
- if (ret) {
- pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
- goto out;
- }
- }
-
/*
* While unlikely, it's theoretically possible that none of the frames
* in a timer expose the combination of feature we want.
@@ -1448,12 +1483,26 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count)
timer = &timers[i];
frame = arch_timer_mem_find_best_frame(timer);
- if (frame)
- break;
+ if (!best_frame)
+ best_frame = frame;
+
+ ret = arch_timer_mem_verify_cntfrq(timer);
+ if (ret) {
+ pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
+ goto out;
+ }
+
+ if (!best_frame) /* implies !frame */
+ /*
+ * Only complain about missing suitable frames if we
+ * haven't already found one in a previous iteration.
+ */
+ pr_err("Unable to find a suitable frame in timer @ %pa\n",
+ &timer->cntctlbase);
}
- if (frame)
- ret = arch_timer_mem_frame_register(frame);
+ if (best_frame)
+ ret = arch_timer_mem_frame_register(best_frame);
out:
kfree(timers);
return ret;
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 39e489a96ad7..60da2537bef9 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
if (readl_relaxed(timer->control) & timer->match_mask) {
writel_relaxed(timer->match_mask, timer->control);
- event_handler = ACCESS_ONCE(timer->evt.event_handler);
+ event_handler = READ_ONCE(timer->evt.event_handler);
if (event_handler)
event_handler(&timer->evt);
return IRQ_HANDLED;
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c
index dfbd4f8051cb..86ca91451b2e 100644
--- a/drivers/clocksource/h8300_timer16.c
+++ b/drivers/clocksource/h8300_timer16.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* H8/300 16bit Timer driver
*
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index f6ffb0cef091..1d740a8c42ab 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/h8300/kernel/cpu/timer/timer8.c
*
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c
index 45a8d17dac1e..17d4ab0f6ad1 100644
--- a/drivers/clocksource/h8300_tpu.c
+++ b/drivers/clocksource/h8300_tpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* H8S TPU Driver
*
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 64f6490740d7..9c38895542f4 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* i8253 PIT clocksource
*/
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index ae3167c28b12..a04808a21d4e 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -39,16 +39,18 @@ static u64 notrace gic_read_count(void)
static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
{
- unsigned long flags;
+ int cpu = cpumask_first(evt->cpumask);
u64 cnt;
int res;
cnt = gic_read_count();
cnt += (u64)delta;
- local_irq_save(flags);
- write_gic_vl_other(mips_cm_vp_id(cpumask_first(evt->cpumask)));
- write_gic_vo_compare(cnt);
- local_irq_restore(flags);
+ if (cpu == raw_smp_processor_id()) {
+ write_gic_vl_compare(cnt);
+ } else {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_compare(cnt);
+ }
res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/owl-timer.c
index d19c53c11094..c68630565079 100644
--- a/drivers/clocksource/owl-timer.c
+++ b/drivers/clocksource/owl-timer.c
@@ -125,7 +125,7 @@ static int __init owl_timer_init(struct device_node *node)
owl_timer_base = of_io_request_and_map(node, 0, "owl-timer");
if (IS_ERR(owl_timer_base)) {
- pr_err("Can't map timer registers");
+ pr_err("Can't map timer registers\n");
return PTR_ERR(owl_timer_base);
}
@@ -134,7 +134,7 @@ static int __init owl_timer_init(struct device_node *node)
timer1_irq = of_irq_get_byname(node, "timer1");
if (timer1_irq <= 0) {
- pr_err("Can't parse timer1 IRQ");
+ pr_err("Can't parse timer1 IRQ\n");
return -EINVAL;
}
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index c27f4c850d83..33f370dbd0d6 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -274,7 +274,7 @@ static int __init rk_clksrc_init(struct device_node *np)
TIMER_NAME, rk_clksrc->freq, 250, 32,
clocksource_mmio_readl_down);
if (ret) {
- pr_err("Failed to register clocksource");
+ pr_err("Failed to register clocksource\n");
goto out_clocksource;
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index e09e8bf0bb9b..70b3cf8e23d0 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -25,6 +25,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -39,16 +40,16 @@ struct sh_cmt_device;
* SoC but also on the particular instance. The following table lists the main
* characteristics of those flavours.
*
- * 16B 32B 32B-F 48B 48B-2
+ * 16B 32B 32B-F 48B R-Car Gen2
* -----------------------------------------------------------------------------
* Channels 2 1/4 1 6 2/8
* Control Width 16 16 16 16 32
* Counter Width 16 32 32 32/48 32/48
* Shared Start/Stop Y Y Y Y N
*
- * The 48-bit gen2 version has a per-channel start/stop register located in the
- * channel registers block. All other versions have a shared start/stop register
- * located in the global space.
+ * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register
+ * located in the channel registers block. All other versions have a shared
+ * start/stop register located in the global space.
*
* Channels are indexed from 0 to N-1 in the documentation. The channel index
* infers the start/stop bit position in the control register and the channel
@@ -66,14 +67,16 @@ struct sh_cmt_device;
enum sh_cmt_model {
SH_CMT_16BIT,
SH_CMT_32BIT,
- SH_CMT_32BIT_FAST,
SH_CMT_48BIT,
- SH_CMT_48BIT_GEN2,
+ SH_CMT0_RCAR_GEN2,
+ SH_CMT1_RCAR_GEN2,
};
struct sh_cmt_info {
enum sh_cmt_model model;
+ unsigned int channels_mask;
+
unsigned long width; /* 16 or 32 bit version of hardware block */
unsigned long overflow_bit;
unsigned long clear_bits;
@@ -200,18 +203,20 @@ static const struct sh_cmt_info sh_cmt_info[] = {
.read_count = sh_cmt_read32,
.write_count = sh_cmt_write32,
},
- [SH_CMT_32BIT_FAST] = {
- .model = SH_CMT_32BIT_FAST,
+ [SH_CMT_48BIT] = {
+ .model = SH_CMT_48BIT,
+ .channels_mask = 0x3f,
.width = 32,
.overflow_bit = SH_CMT32_CMCSR_CMF,
.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
- .read_control = sh_cmt_read16,
- .write_control = sh_cmt_write16,
+ .read_control = sh_cmt_read32,
+ .write_control = sh_cmt_write32,
.read_count = sh_cmt_read32,
.write_count = sh_cmt_write32,
},
- [SH_CMT_48BIT] = {
- .model = SH_CMT_48BIT,
+ [SH_CMT0_RCAR_GEN2] = {
+ .model = SH_CMT0_RCAR_GEN2,
+ .channels_mask = 0x60,
.width = 32,
.overflow_bit = SH_CMT32_CMCSR_CMF,
.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
@@ -220,8 +225,9 @@ static const struct sh_cmt_info sh_cmt_info[] = {
.read_count = sh_cmt_read32,
.write_count = sh_cmt_write32,
},
- [SH_CMT_48BIT_GEN2] = {
- .model = SH_CMT_48BIT_GEN2,
+ [SH_CMT1_RCAR_GEN2] = {
+ .model = SH_CMT1_RCAR_GEN2,
+ .channels_mask = 0xff,
.width = 32,
.overflow_bit = SH_CMT32_CMCSR_CMF,
.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
@@ -859,6 +865,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
ch->cmt = cmt;
ch->index = index;
ch->hwidx = hwidx;
+ ch->timer_bit = hwidx;
/*
* Compute the address of the channel control register block. For the
@@ -873,16 +880,11 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
case SH_CMT_48BIT:
ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
break;
- case SH_CMT_32BIT_FAST:
- /*
- * The 32-bit "fast" timer has a single channel at hwidx 5 but
- * is located at offset 0x40 instead of 0x60 for some reason.
- */
- ch->ioctrl = cmt->mapbase + 0x40;
- break;
- case SH_CMT_48BIT_GEN2:
+ case SH_CMT0_RCAR_GEN2:
+ case SH_CMT1_RCAR_GEN2:
ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
ch->ioctrl = ch->iostart + 0x10;
+ ch->timer_bit = 0;
break;
}
@@ -894,8 +896,6 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
ch->match_value = ch->max_match_value;
raw_spin_lock_init(&ch->lock);
- ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx;
-
ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
clockevent, clocksource);
if (ret) {
@@ -935,22 +935,18 @@ static const struct platform_device_id sh_cmt_id_table[] = {
MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
- { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] },
- { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] },
{ .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] },
- { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] },
+ {
+ /* deprecated, preserved for backward compatibility */
+ .compatible = "renesas,cmt-48-gen2",
+ .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
+ },
+ { .compatible = "renesas,rcar-gen2-cmt0", .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] },
+ { .compatible = "renesas,rcar-gen2-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] },
{ }
};
MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
-static int sh_cmt_parse_dt(struct sh_cmt_device *cmt)
-{
- struct device_node *np = cmt->pdev->dev.of_node;
-
- return of_property_read_u32(np, "renesas,channels-mask",
- &cmt->hw_channels);
-}
-
static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
{
unsigned int mask;
@@ -961,14 +957,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
raw_spin_lock_init(&cmt->lock);
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
- const struct of_device_id *id;
-
- id = of_match_node(sh_cmt_of_table, pdev->dev.of_node);
- cmt->info = id->data;
-
- ret = sh_cmt_parse_dt(cmt);
- if (ret < 0)
- return ret;
+ cmt->info = of_device_get_match_data(&pdev->dev);
+ cmt->hw_channels = cmt->info->channels_mask;
} else if (pdev->dev.platform_data) {
struct sh_timer_config *cfg = pdev->dev.platform_data;
const struct platform_device_id *id = pdev->id_entry;
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index 6a8d9838ce33..3f94e454ef99 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/clocksource.h>
#include <linux/sched_clock.h>
#include <linux/of_address.h>
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 59e8aee0ec16..9de47d4d2d9e 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
index 66dd909960c6..c020038ebfab 100644
--- a/drivers/clocksource/timer-fttmr010.c
+++ b/drivers/clocksource/timer-fttmr010.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Faraday Technology FTTMR010 timer driver
* Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
@@ -263,14 +264,14 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
fttmr010->base = of_iomap(np, 0);
if (!fttmr010->base) {
- pr_err("Can't remap registers");
+ pr_err("Can't remap registers\n");
ret = -ENXIO;
goto out_free;
}
/* IRQ for timer 1 */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
- pr_err("Can't parse IRQ");
+ pr_err("Can't parse IRQ\n");
ret = -EINVAL;
goto out_unmap;
}
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index c79122d8e10d..a31990408153 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -176,3 +176,22 @@ out_fail:
timer_base_exit(&to->of_base);
return ret;
}
+
+/**
+ * timer_of_cleanup - release timer_of ressources
+ * @to: timer_of structure
+ *
+ * Release the ressources that has been used in timer_of_init().
+ * This function should be called in init error cases
+ */
+void __init timer_of_cleanup(struct timer_of *to)
+{
+ if (to->flags & TIMER_OF_IRQ)
+ timer_irq_exit(&to->of_irq);
+
+ if (to->flags & TIMER_OF_CLOCK)
+ timer_clk_exit(&to->of_clk);
+
+ if (to->flags & TIMER_OF_BASE)
+ timer_base_exit(&to->of_base);
+}
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index e0d727255f72..3f708f1be43d 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __TIMER_OF_H__
#define __TIMER_OF_H__
@@ -66,4 +67,7 @@ static inline unsigned long timer_of_period(struct timer_of *to)
extern int __init timer_of_init(struct device_node *np,
struct timer_of *to);
+
+extern void __init timer_of_cleanup(struct timer_of *to);
+
#endif
diff --git a/drivers/clocksource/timer-sp.h b/drivers/clocksource/timer-sp.h
index 050d88561e9c..b2037eb94a41 100644
--- a/drivers/clocksource/timer-sp.h
+++ b/drivers/clocksource/timer-sp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ARM timer implementation, found in Integrator, Versatile and Realview
* platforms. Not all platforms support all registers and bits in these
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 1f8bf054d11c..9c54fdf7acea 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -45,7 +45,7 @@ cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
return NULL;
}
- atomic_set(&cbq->refcnt, 1);
+ refcount_set(&cbq->refcnt, 1);
atomic_inc(&dev->refcnt);
cbq->pdev = dev;
@@ -58,7 +58,7 @@ cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
void cn_queue_release_callback(struct cn_callback_entry *cbq)
{
- if (!atomic_dec_and_test(&cbq->refcnt))
+ if (!refcount_dec_and_test(&cbq->refcnt))
return;
atomic_dec(&cbq->pdev->refcnt);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 25693b045371..8615594bd065 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -157,7 +157,7 @@ static int cn_call_callback(struct sk_buff *skb)
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&i->id.id, &msg->id)) {
- atomic_inc(&i->refcnt);
+ refcount_inc(&i->refcnt);
cbq = i;
break;
}
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index c7af9b2a255e..812f9e0d01a3 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# CPUfreq core
obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 17504129fd77..65ec5f01aa8d 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -57,7 +57,7 @@ static bool bL_switching_enabled;
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
-static struct cpufreq_arm_bL_ops *arm_bL_ops;
+static const struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
@@ -213,6 +213,7 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
{
u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
unsigned int freqs_new;
+ int ret;
cur_cluster = cpu_to_cluster(cpu);
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
@@ -229,7 +230,14 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
}
}
- return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
+ ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
+
+ if (!ret) {
+ arch_set_freq_scale(policy->related_cpus, freqs_new,
+ policy->cpuinfo.max_freq);
+ }
+
+ return ret;
}
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
@@ -609,7 +617,7 @@ static int __bLs_register_notifier(void) { return 0; }
static int __bLs_unregister_notifier(void) { return 0; }
#endif
-int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
+int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops)
{
int ret, i;
@@ -653,7 +661,7 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
}
EXPORT_SYMBOL_GPL(bL_cpufreq_register);
-void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
+void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops)
{
if (arm_bL_ops != ops) {
pr_err("%s: Registered with: %s, can't unregister, exiting\n",
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 184d7c3a112a..88a176e466c8 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -37,7 +37,7 @@ struct cpufreq_arm_bL_ops {
void (*free_opp_table)(const struct cpumask *cpumask);
};
-int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
-void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
+int bL_cpufreq_register(const struct cpufreq_arm_bL_ops *ops);
+void bL_cpufreq_unregister(const struct cpufreq_arm_bL_ops *ops);
#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 39b3f51d9a30..b944f290c8a4 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -61,7 +61,7 @@ static int dt_get_transition_latency(struct device *cpu_dev)
return transition_latency;
}
-static struct cpufreq_arm_bL_ops dt_bL_ops = {
+static const struct cpufreq_arm_bL_ops dt_bL_ops = {
.name = "dt-bl",
.get_transition_latency = dt_get_transition_latency,
.init_opp_table = dev_pm_opp_of_cpumask_add_table,
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index a753c50e9e41..ecc56e26f8f6 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -48,7 +48,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "samsung,exynos3250", },
{ .compatible = "samsung,exynos4210", },
- { .compatible = "samsung,exynos4212", },
{ .compatible = "samsung,exynos5250", },
#ifndef CONFIG_BL_SWITCHER
{ .compatible = "samsung,exynos5800", },
@@ -83,8 +82,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "rockchip,rk3368", },
{ .compatible = "rockchip,rk3399", },
- { .compatible = "socionext,uniphier-ld6b", },
-
{ .compatible = "st-ericsson,u8500", },
{ .compatible = "st-ericsson,u8540", },
{ .compatible = "st-ericsson,u9500", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index d83ab94d041a..545946ad0752 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -43,9 +43,17 @@ static struct freq_attr *cpufreq_dt_attr[] = {
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct private_data *priv = policy->driver_data;
+ unsigned long freq = policy->freq_table[index].frequency;
+ int ret;
+
+ ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
- return dev_pm_opp_set_rate(priv->cpu_dev,
- policy->freq_table[index].frequency * 1000);
+ if (!ret) {
+ arch_set_freq_scale(policy->related_cpus, freq,
+ policy->cpuinfo.max_freq);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ea43b147a7fe..41d148af7748 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -161,6 +161,12 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
+__weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq)
+{
+}
+EXPORT_SYMBOL_GPL(arch_set_freq_scale);
+
/*
* This is a generic cpufreq init() routine which can be used by cpufreq
* drivers of SMP systems. It will do following:
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index e75880eb037d..1e55b5790853 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -118,8 +118,11 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
}
- if (len >= PAGE_SIZE)
- return PAGE_SIZE;
+
+ if (len >= PAGE_SIZE) {
+ pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
+ return -EFBIG;
+ }
return len;
}
cpufreq_freq_attr_ro(trans_table);
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index 601b88c490cf..455b4fb78cba 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 22b2cdde74d9..4c4b5dd685e3 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 14466a9b01c0..628fe899cb48 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -191,6 +192,57 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.suspend = cpufreq_generic_suspend,
};
+#define OCOTP_CFG3 0x440
+#define OCOTP_CFG3_SPEED_SHIFT 16
+#define OCOTP_CFG3_SPEED_1P2GHZ 0x3
+#define OCOTP_CFG3_SPEED_996MHZ 0x2
+#define OCOTP_CFG3_SPEED_852MHZ 0x1
+
+static void imx6q_opp_check_speed_grading(struct device *dev)
+{
+ struct device_node *np;
+ void __iomem *base;
+ u32 val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ocotp");
+ if (!np)
+ return;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ goto put_node;
+ }
+
+ /*
+ * SPEED_GRADING[1:0] defines the max speed of ARM:
+ * 2b'11: 1200000000Hz;
+ * 2b'10: 996000000Hz;
+ * 2b'01: 852000000Hz; -- i.MX6Q Only, exclusive with 996MHz.
+ * 2b'00: 792000000Hz;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+
+ if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
+ of_machine_is_compatible("fsl,imx6q"))
+ if (dev_pm_opp_disable(dev, 1200000000))
+ dev_warn(dev, "failed to disable 1.2GHz OPP\n");
+ if (val < OCOTP_CFG3_SPEED_996MHZ)
+ if (dev_pm_opp_disable(dev, 996000000))
+ dev_warn(dev, "failed to disable 996MHz OPP\n");
+ if (of_machine_is_compatible("fsl,imx6q")) {
+ if (val != OCOTP_CFG3_SPEED_852MHZ)
+ if (dev_pm_opp_disable(dev, 852000000))
+ dev_warn(dev, "failed to disable 852MHz OPP\n");
+ }
+ iounmap(base);
+put_node:
+ of_node_put(np);
+}
+
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -252,28 +304,21 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
- /*
- * We expect an OPP table supplied by platform.
- * Just, incase the platform did not supply the OPP
- * table, it will try to get it.
- */
- num = dev_pm_opp_get_opp_count(cpu_dev);
- if (num < 0) {
- ret = dev_pm_opp_of_add_table(cpu_dev);
- if (ret < 0) {
- dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
- goto put_reg;
- }
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret < 0) {
+ dev_err(cpu_dev, "failed to init OPP table: %d\n", ret);
+ goto put_reg;
+ }
- /* Because we have added the OPPs here, we must free them */
- free_opp = true;
+ imx6q_opp_check_speed_grading(cpu_dev);
- num = dev_pm_opp_get_opp_count(cpu_dev);
- if (num < 0) {
- ret = num;
- dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
- goto out_free_opp;
- }
+ /* Because we have added the OPPs here, we must free them */
+ free_opp = true;
+ num = dev_pm_opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = num;
+ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+ goto out_free_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 062d71434e47..b01e31db5f83 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1043,7 +1043,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
- pr_err("unable to alloc powernow_k8_data");
+ pr_err("unable to alloc powernow_k8_data\n");
return -ENOMEM;
}
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 3ff5160451b4..b6d7c4c98d0a 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -90,6 +90,7 @@ struct global_pstate_info {
int last_gpstate_idx;
spinlock_t gpstate_lock;
struct timer_list timer;
+ struct cpufreq_policy *policy;
};
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
@@ -625,10 +626,10 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
* according quadratic equation. Queues a new timer if it is still not equal
* to local pstate
*/
-void gpstate_timer_handler(unsigned long data)
+void gpstate_timer_handler(struct timer_list *t)
{
- struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
- struct global_pstate_info *gpstates = policy->driver_data;
+ struct global_pstate_info *gpstates = from_timer(gpstates, t, timer);
+ struct cpufreq_policy *policy = gpstates->policy;
int gpstate_idx, lpstate_idx;
unsigned long val;
unsigned int time_diff = jiffies_to_msecs(jiffies)
@@ -800,9 +801,9 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = gpstates;
/* initialize timer */
- init_timer_pinned_deferrable(&gpstates->timer);
- gpstates->timer.data = (unsigned long)policy;
- gpstates->timer.function = gpstate_timer_handler;
+ gpstates->policy = policy;
+ timer_setup(&gpstates->timer, gpstate_timer_handler,
+ TIMER_PINNED | TIMER_DEFERRABLE);
gpstates->timer.expires = jiffies +
msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
spin_lock_init(&gpstates->gpstate_lock);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
index 3eace725ccd6..9d973519d669 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ppc_cbe_cpufreq.h
*
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index ce345bf34d5d..06b024a3e474 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -58,56 +58,40 @@ module_param(pxa27x_maxfreq, uint, 0);
MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz"
"(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)");
+struct pxa_cpufreq_data {
+ struct clk *clk_core;
+};
+static struct pxa_cpufreq_data pxa_cpufreq_data;
+
struct pxa_freqs {
unsigned int khz;
- unsigned int membus;
- unsigned int cccr;
- unsigned int div2;
- unsigned int cclkcfg;
int vmin;
int vmax;
};
-/* Define the refresh period in mSec for the SDRAM and the number of rows */
-#define SDRAM_TREF 64 /* standard 64ms SDRAM */
-static unsigned int sdram_rows;
-
-#define CCLKCFG_TURBO 0x1
-#define CCLKCFG_FCS 0x2
-#define CCLKCFG_HALFTURBO 0x4
-#define CCLKCFG_FASTBUS 0x8
-#define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2)
-#define MDREFR_DRI_MASK 0xFFF
-
-#define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3)
-#define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3)
-
/*
* PXA255 definitions
*/
-/* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */
-#define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS
-
static const struct pxa_freqs pxa255_run_freqs[] =
{
- /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
- { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
- {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */
- {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */
- {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */
- {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */
- {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */
+ /* CPU MEMBUS run turbo PXbus SDRAM */
+ { 99500, -1, -1}, /* 99, 99, 50, 50 */
+ {132700, -1, -1}, /* 133, 133, 66, 66 */
+ {199100, -1, -1}, /* 199, 199, 99, 99 */
+ {265400, -1, -1}, /* 265, 265, 133, 66 */
+ {331800, -1, -1}, /* 331, 331, 166, 83 */
+ {398100, -1, -1}, /* 398, 398, 196, 99 */
};
/* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */
static const struct pxa_freqs pxa255_turbo_freqs[] =
{
- /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */
- { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */
- {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */
- {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */
- {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */
- {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */
+ /* CPU run turbo PXbus SDRAM */
+ { 99500, -1, -1}, /* 99, 99, 50, 50 */
+ {199100, -1, -1}, /* 99, 199, 50, 99 */
+ {298500, -1, -1}, /* 99, 287, 50, 99 */
+ {298600, -1, -1}, /* 199, 287, 99, 99 */
+ {398100, -1, -1}, /* 199, 398, 99, 99 */
};
#define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs)
@@ -122,47 +106,14 @@ static unsigned int pxa255_turbo_table;
module_param(pxa255_turbo_table, uint, 0);
MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)");
-/*
- * PXA270 definitions
- *
- * For the PXA27x:
- * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG.
- *
- * A = 0 => memory controller clock from table 3-7,
- * A = 1 => memory controller clock = system bus clock
- * Run mode frequency = 13 MHz * L
- * Turbo mode frequency = 13 MHz * L * N
- * System bus frequency = 13 MHz * L / (B + 1)
- *
- * In CCCR:
- * A = 1
- * L = 16 oscillator to run mode ratio
- * 2N = 6 2 * (turbo mode to run mode ratio)
- *
- * In CCLKCFG:
- * B = 1 Fast bus mode
- * HT = 0 Half-Turbo mode
- * T = 1 Turbo mode
- *
- * For now, just support some of the combinations in table 3-7 of
- * PXA27x Processor Family Developer's Manual to simplify frequency
- * change sequences.
- */
-#define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L)
-#define CCLKCFG2(B, HT, T) \
- (CCLKCFG_FCS | \
- ((B) ? CCLKCFG_FASTBUS : 0) | \
- ((HT) ? CCLKCFG_HALFTURBO : 0) | \
- ((T) ? CCLKCFG_TURBO : 0))
-
static struct pxa_freqs pxa27x_freqs[] = {
- {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
- {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
- {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
- {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
- {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },
- {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 },
- {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 }
+ {104000, 900000, 1705000 },
+ {156000, 1000000, 1705000 },
+ {208000, 1180000, 1705000 },
+ {312000, 1250000, 1705000 },
+ {416000, 1350000, 1705000 },
+ {520000, 1450000, 1705000 },
+ {624000, 1550000, 1705000 }
};
#define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs)
@@ -241,51 +192,29 @@ static void pxa27x_guess_max_freq(void)
}
}
-static void init_sdram_rows(void)
-{
- uint32_t mdcnfg = __raw_readl(MDCNFG);
- unsigned int drac2 = 0, drac0 = 0;
-
- if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3))
- drac2 = MDCNFG_DRAC2(mdcnfg);
-
- if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1))
- drac0 = MDCNFG_DRAC0(mdcnfg);
-
- sdram_rows = 1 << (11 + max(drac0, drac2));
-}
-
-static u32 mdrefr_dri(unsigned int freq)
-{
- u32 interval = freq * SDRAM_TREF / sdram_rows;
-
- return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
-}
-
static unsigned int pxa_cpufreq_get(unsigned int cpu)
{
- return get_clk_frequency_khz(0);
+ struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
+
+ return (unsigned int) clk_get_rate(data->clk_core) / 1000;
}
static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
{
struct cpufreq_frequency_table *pxa_freqs_table;
const struct pxa_freqs *pxa_freq_settings;
- unsigned long flags;
- unsigned int new_freq_cpu, new_freq_mem;
- unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg;
+ struct pxa_cpufreq_data *data = cpufreq_get_driver_data();
+ unsigned int new_freq_cpu;
int ret = 0;
/* Get the current policy */
find_freq_tables(&pxa_freqs_table, &pxa_freq_settings);
new_freq_cpu = pxa_freq_settings[idx].khz;
- new_freq_mem = pxa_freq_settings[idx].membus;
if (freq_debug)
- pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
- new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ?
- (new_freq_mem / 2000) : (new_freq_mem / 1000));
+ pr_debug("Changing CPU frequency from %d Mhz to %d Mhz\n",
+ policy->cur / 1000, new_freq_cpu / 1000);
if (vcc_core && new_freq_cpu > policy->cur) {
ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]);
@@ -293,53 +222,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
return ret;
}
- /* Calculate the next MDREFR. If we're slowing down the SDRAM clock
- * we need to preset the smaller DRI before the change. If we're
- * speeding up we need to set the larger DRI value after the change.
- */
- preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR);
- if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) {
- preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK);
- preset_mdrefr |= mdrefr_dri(new_freq_mem);
- }
- postset_mdrefr =
- (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem);
-
- /* If we're dividing the memory clock by two for the SDRAM clock, this
- * must be set prior to the change. Clearing the divide must be done
- * after the change.
- */
- if (pxa_freq_settings[idx].div2) {
- preset_mdrefr |= MDREFR_DB2_MASK;
- postset_mdrefr |= MDREFR_DB2_MASK;
- } else {
- postset_mdrefr &= ~MDREFR_DB2_MASK;
- }
-
- local_irq_save(flags);
-
- /* Set new the CCCR and prepare CCLKCFG */
- writel(pxa_freq_settings[idx].cccr, CCCR);
- cclkcfg = pxa_freq_settings[idx].cclkcfg;
-
- asm volatile(" \n\
- ldr r4, [%1] /* load MDREFR */ \n\
- b 2f \n\
- .align 5 \n\
-1: \n\
- str %3, [%1] /* preset the MDREFR */ \n\
- mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\
- str %4, [%1] /* postset the MDREFR */ \n\
- \n\
- b 3f \n\
-2: b 1b \n\
-3: nop \n\
- "
- : "=&r" (unused)
- : "r" (MDREFR), "r" (cclkcfg),
- "r" (preset_mdrefr), "r" (postset_mdrefr)
- : "r4", "r5");
- local_irq_restore(flags);
+ clk_set_rate(data->clk_core, new_freq_cpu * 1000);
/*
* Even if voltage setting fails, we don't report it, as the frequency
@@ -369,8 +252,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
pxa_cpufreq_init_voltages();
- init_sdram_rows();
-
/* set default policy and cpuinfo */
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
@@ -429,11 +310,17 @@ static struct cpufreq_driver pxa_cpufreq_driver = {
.init = pxa_cpufreq_init,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
+ .driver_data = &pxa_cpufreq_data,
};
static int __init pxa_cpu_init(void)
{
int ret = -ENODEV;
+
+ pxa_cpufreq_data.clk_core = clk_get_sys(NULL, "core");
+ if (IS_ERR(pxa_cpufreq_data.clk_core))
+ return PTR_ERR(pxa_cpufreq_data.clk_core);
+
if (cpu_is_pxa25x() || cpu_is_pxa27x())
ret = cpufreq_register_driver(&pxa_cpufreq_driver);
return ret;
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 8de2364b5995..05d299052c5c 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -53,7 +53,7 @@ static int scpi_init_opp_table(const struct cpumask *cpumask)
return ret;
}
-static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
+static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
.name = "scpi",
.get_transition_latency = scpi_get_transition_latency,
.init_opp_table = scpi_init_opp_table,
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 4894924a3ca2..195f27f9c1cb 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -177,7 +177,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
np = of_cpu_device_node_get(0);
if (!np) {
- pr_err("No cpu node found");
+ pr_err("No cpu node found\n");
return -ENODEV;
}
@@ -187,7 +187,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
prop = of_find_property(np, "cpufreq_tbl", NULL);
if (!prop || !prop->value) {
- pr_err("Invalid cpufreq_tbl");
+ pr_err("Invalid cpufreq_tbl\n");
ret = -ENODEV;
goto out_put_node;
}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index ccab452a4ef5..8085ec9000d1 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -367,7 +367,7 @@ unsigned int speedstep_detect_processor(void)
} else
return SPEEDSTEP_CPU_PIII_C;
}
-
+ /* fall through */
default:
return 0;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 4bf47de6101f..923317f03b4b 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -205,6 +205,7 @@ static int ti_cpufreq_init(void)
np = of_find_node_by_path("/");
match = of_match_node(ti_cpufreq_of_match, np);
+ of_node_put(np);
if (!match)
return -ENODEV;
@@ -217,7 +218,8 @@ static int ti_cpufreq_init(void)
opp_data->cpu_dev = get_cpu_device(0);
if (!opp_data->cpu_dev) {
pr_err("%s: Failed to get device for CPU0\n", __func__);
- return -ENODEV;
+ ret = ENODEV;
+ goto free_opp_data;
}
opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev);
@@ -262,6 +264,8 @@ register_cpufreq_dt:
fail_put_node:
of_node_put(opp_data->opp_node);
+free_opp_data:
+ kfree(opp_data);
return ret;
}
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 87e5bdc5ec74..53237289e606 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -42,7 +42,7 @@ static int ve_spc_get_transition_latency(struct device *cpu_dev)
return 1000000; /* 1 ms */
}
-static struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
+static const struct cpufreq_arm_bL_ops ve_spc_cpufreq_ops = {
.name = "vexpress-spc",
.get_transition_latency = ve_spc_get_transition_latency,
.init_opp_table = ve_spc_init_opp_table,
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 0b67a05a7aae..9d7176cee3d3 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for cpuidle.
#
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 52a75053ee03..ddee1b601b89 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -72,12 +72,94 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
};
/*
- * arm_idle_init
+ * arm_idle_init_cpu
*
* Registers the arm specific cpuidle driver with the cpuidle
* framework. It relies on core code to parse the idle states
* and initialize them using driver data structures accordingly.
*/
+static int __init arm_idle_init_cpu(int cpu)
+{
+ int ret;
+ struct cpuidle_driver *drv;
+ struct cpuidle_device *dev;
+
+ drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+ /*
+ * Initialize idle states data, starting at index 1. This
+ * driver is DT only, if no DT idle states are detected (ret
+ * == 0) let the driver initialization fail accordingly since
+ * there is no reason to initialize the idle driver if only
+ * wfi is supported.
+ */
+ ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
+ if (ret <= 0) {
+ ret = ret ? : -ENODEV;
+ goto out_kfree_drv;
+ }
+
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver\n");
+ goto out_kfree_drv;
+ }
+
+ /*
+ * Call arch CPU operations in order to initialize
+ * idle states suspend back-end specific data
+ */
+ ret = arm_cpuidle_init(cpu);
+
+ /*
+ * Skip the cpuidle device initialization if the reported
+ * failure is a HW misconfiguration/breakage (-ENXIO).
+ */
+ if (ret == -ENXIO)
+ return 0;
+
+ if (ret) {
+ pr_err("CPU %d failed to init idle CPU ops\n", cpu);
+ goto out_unregister_drv;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ pr_err("Failed to allocate cpuidle device\n");
+ ret = -ENOMEM;
+ goto out_unregister_drv;
+ }
+ dev->cpu = cpu;
+
+ ret = cpuidle_register_device(dev);
+ if (ret) {
+ pr_err("Failed to register cpuidle device for CPU %d\n",
+ cpu);
+ goto out_kfree_dev;
+ }
+
+ return 0;
+
+out_kfree_dev:
+ kfree(dev);
+out_unregister_drv:
+ cpuidle_unregister_driver(drv);
+out_kfree_drv:
+ kfree(drv);
+ return ret;
+}
+
+/*
+ * arm_idle_init - Initializes arm cpuidle driver
+ *
+ * Initializes arm cpuidle driver for all CPUs, if any CPU fails
+ * to register cpuidle driver then rollback to cancel all CPUs
+ * registeration.
+ */
static int __init arm_idle_init(void)
{
int cpu, ret;
@@ -85,79 +167,20 @@ static int __init arm_idle_init(void)
struct cpuidle_device *dev;
for_each_possible_cpu(cpu) {
-
- drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
- if (!drv) {
- ret = -ENOMEM;
- goto out_fail;
- }
-
- drv->cpumask = (struct cpumask *)cpumask_of(cpu);
-
- /*
- * Initialize idle states data, starting at index 1. This
- * driver is DT only, if no DT idle states are detected (ret
- * == 0) let the driver initialization fail accordingly since
- * there is no reason to initialize the idle driver if only
- * wfi is supported.
- */
- ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
- if (ret <= 0) {
- ret = ret ? : -ENODEV;
- goto init_fail;
- }
-
- ret = cpuidle_register_driver(drv);
- if (ret) {
- pr_err("Failed to register cpuidle driver\n");
- goto init_fail;
- }
-
- /*
- * Call arch CPU operations in order to initialize
- * idle states suspend back-end specific data
- */
- ret = arm_cpuidle_init(cpu);
-
- /*
- * Skip the cpuidle device initialization if the reported
- * failure is a HW misconfiguration/breakage (-ENXIO).
- */
- if (ret == -ENXIO)
- continue;
-
- if (ret) {
- pr_err("CPU %d failed to init idle CPU ops\n", cpu);
- goto out_fail;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- pr_err("Failed to allocate cpuidle device\n");
- ret = -ENOMEM;
+ ret = arm_idle_init_cpu(cpu);
+ if (ret)
goto out_fail;
- }
- dev->cpu = cpu;
-
- ret = cpuidle_register_device(dev);
- if (ret) {
- pr_err("Failed to register cpuidle device for CPU %d\n",
- cpu);
- kfree(dev);
- goto out_fail;
- }
}
return 0;
-init_fail:
- kfree(drv);
+
out_fail:
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
+ drv = cpuidle_get_cpu_driver(dev);
cpuidle_unregister_device(dev);
- kfree(dev);
- drv = cpuidle_get_driver();
cpuidle_unregister_driver(drv);
+ kfree(dev);
kfree(drv);
}
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
index 72b5e47286b4..dac8ff6391fa 100644
--- a/drivers/cpuidle/cpuidle-cps.c
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2014 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
+ * Author: Paul Burton <paul.burton@mips.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 42896a67aeae..e06605b21841 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cpuidle-powernv - idle state cpuidle driver.
* Adapted from drivers/cpuidle/cpuidle-pseries
@@ -383,9 +384,9 @@ static int powernv_add_idle_states(void)
* Firmware passes residency and latency values in ns.
* cpuidle expects it in us.
*/
- exit_latency = latency_ns[i] / 1000;
+ exit_latency = DIV_ROUND_UP(latency_ns[i], 1000);
if (!rc)
- target_residency = residency_ns[i] / 1000;
+ target_residency = DIV_ROUND_UP(residency_ns[i], 1000);
else
target_residency = 0;
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index e9b3853d93ea..a187a39fb866 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cpuidle-pseries - idle state cpuidle driver.
* Adapted from drivers/idle/intel_idle.c and
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 484cc8909d5c..68a16827f45f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -208,6 +208,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
return -EBUSY;
}
target_state = &drv->states[index];
+ broadcast = false;
}
/* Take note of the planned idle state. */
@@ -387,9 +388,12 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (dev->enabled)
return 0;
+ if (!cpuidle_curr_governor)
+ return -EIO;
+
drv = cpuidle_get_cpu_driver(dev);
- if (!drv || !cpuidle_curr_governor)
+ if (!drv)
return -EIO;
if (!dev->registered)
@@ -399,9 +403,11 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (ret)
return ret;
- if (cpuidle_curr_governor->enable &&
- (ret = cpuidle_curr_governor->enable(drv, dev)))
- goto fail_sysfs;
+ if (cpuidle_curr_governor->enable) {
+ ret = cpuidle_curr_governor->enable(drv, dev);
+ if (ret)
+ goto fail_sysfs;
+ }
smp_wmb();
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index f87f399b0540..2965ab32a583 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* cpuidle.h - The internal header file
*/
diff --git a/drivers/cpuidle/dt_idle_states.h b/drivers/cpuidle/dt_idle_states.h
index 4818134bc65b..14ae88cef1de 100644
--- a/drivers/cpuidle/dt_idle_states.h
+++ b/drivers/cpuidle/dt_idle_states.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DT_IDLE_STATES
#define __DT_IDLE_STATES
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ce1a2ffffb2a..1ad8745fd6d6 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -17,6 +17,7 @@
#include <linux/pm_qos.h>
#include <linux/jiffies.h>
#include <linux/tick.h>
+#include <linux/cpu.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -67,10 +68,16 @@ static int ladder_select_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
+ struct device *device = get_cpu_device(dev->cpu);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
+ int resume_latency = dev_pm_qos_raw_read_value(device);
+
+ if (resume_latency < latency_req &&
+ resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ latency_req = resume_latency;
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fe33c199fc1a..47ec920d5b71 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -199,22 +199,8 @@ config CRYPTO_CRC32_S390
It is available with IBM z13 or later.
-config CRYPTO_DEV_MV_CESA
- tristate "Marvell's Cryptographic Engine"
- depends on PLAT_ORION
- select CRYPTO_AES
- select CRYPTO_BLKCIPHER
- select CRYPTO_HASH
- select SRAM
- help
- This driver allows you to utilize the Cryptographic Engines and
- Security Accelerator (CESA) which can be found on the Marvell Orion
- and Kirkwood SoCs, such as QNAP's TS-209.
-
- Currently the driver supports AES in ECB and CBC mode without DMA.
-
config CRYPTO_DEV_MARVELL_CESA
- tristate "New Marvell's Cryptographic Engine driver"
+ tristate "Marvell's Cryptographic Engine driver"
depends on PLAT_ORION || ARCH_MVEBU
select CRYPTO_AES
select CRYPTO_DES
@@ -223,12 +209,10 @@ config CRYPTO_DEV_MARVELL_CESA
select SRAM
help
This driver allows you to utilize the Cryptographic Engines and
- Security Accelerator (CESA) which can be found on the Armada 370.
+ Security Accelerator (CESA) which can be found on MVEBU and ORION
+ platforms.
This driver supports CPU offload through DMA transfers.
- This driver is aimed at replacing the mv_cesa driver. This will only
- happen once it has received proper testing.
-
config CRYPTO_DEV_NIAGARA2
tristate "Niagara2 Stream Processing Unit driver"
select CRYPTO_DES
@@ -315,6 +299,10 @@ config CRYPTO_DEV_PPC4XX
tristate "Driver AMCC PPC4xx crypto accelerator"
depends on PPC && 4xx
select CRYPTO_HASH
+ select CRYPTO_AEAD
+ select CRYPTO_AES
+ select CRYPTO_CCM
+ select CRYPTO_GCM
select CRYPTO_BLKCIPHER
help
This option allows you to have support for AMCC crypto acceleration.
@@ -439,6 +427,20 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
algorithms execution.
+config CRYPTO_DEV_EXYNOS_HASH
+ bool "Support for Samsung Exynos HASH accelerator"
+ depends on CRYPTO_DEV_S5P
+ depends on !CRYPTO_DEV_EXYNOS_RNG && CRYPTO_DEV_EXYNOS_RNG!=m
+ select CRYPTO_SHA1
+ select CRYPTO_MD5
+ select CRYPTO_SHA256
+ help
+ Select this to offload Exynos from HASH MD5/SHA1/SHA256.
+ This will select software SHA1, MD5 and SHA256 as they are
+ needed for small and zero-size messages.
+ HASH algorithms will be disabled if EXYNOS_RNG
+ is enabled due to hw conflict.
+
config CRYPTO_DEV_NX
bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
depends on PPC64
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 808432b44c6b..2513d13ea2c4 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
@@ -14,7 +15,6 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
-obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
index b95539928fdf..e33c185fc163 100644
--- a/drivers/crypto/amcc/Makefile
+++ b/drivers/crypto/amcc/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
-crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
+crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o
crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4afca3968773..eeaf27859d80 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -26,11 +26,14 @@
#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <crypto/algapi.h>
+#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
+#include <crypto/ctr.h>
#include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
u32 save_iv, u32 ld_h, u32 ld_iv,
@@ -62,6 +65,7 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
sa->sa_command_1.bf.feedback_mode = cfb,
sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.hmac_muting = hmac_mc;
sa->sa_command_1.bf.extended_seq_num = esn;
sa->sa_command_1.bf.seq_num_mask = sn_mask;
sa->sa_command_1.bf.mutable_bit_proc = mute;
@@ -73,29 +77,29 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
int crypto4xx_encrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int ivlen = crypto_ablkcipher_ivsize(
+ crypto_ablkcipher_reqtfm(req));
+ __le32 iv[ivlen];
- ctx->direction = DIR_OUTBOUND;
- ctx->hash_final = 0;
- ctx->is_hash = 0;
- ctx->pd_ctl = 0x1;
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
}
int crypto4xx_decrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int ivlen = crypto_ablkcipher_ivsize(
+ crypto_ablkcipher_reqtfm(req));
+ __le32 iv[ivlen];
- ctx->direction = DIR_INBOUND;
- ctx->hash_final = 0;
- ctx->is_hash = 0;
- ctx->pd_ctl = 1;
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
}
/**
@@ -120,23 +124,15 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
}
/* Create SA */
- if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
if (rc)
return rc;
- if (ctx->state_record_dma_addr == 0) {
- rc = crypto4xx_alloc_state_record(ctx);
- if (rc) {
- crypto4xx_free_sa(ctx);
- return rc;
- }
- }
/* Setup SA */
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- ctx->hash_final = 0;
+ sa = ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
@@ -150,18 +146,13 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
- crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
- key, keylen);
- sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
+ sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
sa->sa_command_1.bf.key_len = keylen >> 3;
- ctx->is_hash = 0;
- ctx->direction = DIR_INBOUND;
- memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
- (void *)&ctx->state_record_dma_addr, 4);
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
- sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
return 0;
@@ -174,6 +165,396 @@ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
+ CRYPTO_FEEDBACK_MODE_128BIT_CFB);
+}
+
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
+ CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ int rc;
+
+ rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
+ CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+ if (rc)
+ return rc;
+
+ ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
+ CTR_RFC3686_NONCE_SIZE]);
+
+ return 0;
+}
+
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->iv_nonce,
+ cpu_to_le32p((u32 *) req->info),
+ cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32(1) };
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0);
+}
+
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->iv_nonce,
+ cpu_to_le32p((u32 *) req->info),
+ cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32(1) };
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len, 0);
+}
+
+static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+ bool is_ccm, bool decrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ /* authsize has to be a multiple of 4 */
+ if (aead->authsize & 3)
+ return true;
+
+ /*
+ * hardware does not handle cases where cryptlen
+ * is less than a block
+ */
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return true;
+
+ /* assoc len needs to be a multiple of 4 */
+ if (req->assoclen & 0x3)
+ return true;
+
+ /* CCM supports only counter field length of 2 and 4 bytes */
+ if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
+ return true;
+
+ /* CCM - fix CBC MAC mismatch in special case */
+ if (is_ccm && decrypt && !req->assoclen)
+ return true;
+
+ return false;
+}
+
+static int crypto4xx_aead_fallback(struct aead_request *req,
+ struct crypto4xx_ctx *ctx, bool do_decrypt)
+{
+ char aead_req_data[sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->sw_cipher.aead)]
+ __aligned(__alignof__(struct aead_request));
+
+ struct aead_request *subreq = (void *) aead_req_data;
+
+ memset(subreq, 0, sizeof(aead_req_data));
+
+ aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+ return do_decrypt ? crypto_aead_decrypt(subreq) :
+ crypto_aead_encrypt(subreq);
+}
+
+static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ int rc;
+
+ crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->sw_cipher.aead,
+ crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+ rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
+ crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(cipher,
+ crypto_aead_get_flags(ctx->sw_cipher.aead) &
+ CRYPTO_TFM_RES_MASK);
+
+ return rc;
+}
+
+/**
+ * AES-CCM Functions
+ */
+
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ /* Setup SA */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ return 0;
+}
+
+static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int len = req->cryptlen;
+ __le32 iv[16];
+ u32 tmp_sa[ctx->sa_len * 4];
+ struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
+
+ if (crypto4xx_aead_need_fallback(req, true, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ if (decrypt)
+ len -= crypto_aead_authsize(aead);
+
+ memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa));
+ sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
+
+ if (req->iv[0] == 1) {
+ /* CRYPTO_MODE_AES_ICM */
+ sa->sa_command_1.bf.crypto_mode9_8 = 1;
+ }
+
+ iv[3] = cpu_to_le32(0);
+ crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ sa, ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, false);
+}
+
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, true);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
+}
+
+/**
+ * AES-GCM Functions
+ */
+
+static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
+{
+ switch (keylen) {
+ case 16:
+ case 24:
+ case 32:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_cipher *aes_tfm = NULL;
+ uint8_t src[16] = { 0 };
+ int rc = 0;
+
+ aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(aes_tfm)) {
+ rc = PTR_ERR(aes_tfm);
+ pr_warn("could not load aes cipher driver: %d\n", rc);
+ return rc;
+ }
+
+ rc = crypto_cipher_setkey(aes_tfm, key, keylen);
+ if (rc) {
+ pr_err("setkey() failed: %d\n", rc);
+ goto out;
+ }
+
+ crypto_cipher_encrypt_one(aes_tfm, src, src);
+ crypto4xx_memcpy_to_le32(hash_start, src, 16);
+out:
+ crypto_free_cipher(aes_tfm);
+ return rc;
+}
+
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
+ crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON, SA_MC_DISABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
+
+ rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
+ key, keylen);
+ if (rc) {
+ pr_err("GCM hash key setting failed = %d\n", rc);
+ goto err;
+ }
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
+ bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int len = req->cryptlen;
+ __le32 iv[4];
+
+ if (crypto4xx_aead_need_fallback(req, false, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
+ iv[3] = cpu_to_le32(1);
+
+ if (decrypt)
+ len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ decrypt ? ctx->sa_in : ctx->sa_out,
+ ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, false);
+}
+
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, true);
+}
+
/**
* HASH SHA1 Functions
*/
@@ -183,53 +564,39 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
unsigned char hm)
{
struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_alg *my_alg;
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_ctl *sa;
- struct dynamic_sa_hash160 *sa_in;
+ struct dynamic_sa_hash160 *sa;
int rc;
+ my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
+ alg.u.hash);
ctx->dev = my_alg->dev;
- ctx->is_hash = 1;
- ctx->hash_final = 0;
/* Create SA */
- if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, sa_len);
if (rc)
return rc;
- if (ctx->state_record_dma_addr == 0) {
- crypto4xx_alloc_state_record(ctx);
- if (!ctx->state_record_dma_addr) {
- crypto4xx_free_sa(ctx);
- return -ENOMEM;
- }
- }
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
+ set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
SA_OPCODE_HASH, DIR_INBOUND);
- set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
- ctx->direction = DIR_INBOUND;
- sa->sa_contents = SA_HASH160_CONTENTS;
- sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
/* Need to zero hash digest in SA */
- memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
- memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
- sa_in->state_ptr = ctx->state_record_dma_addr;
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
+ memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
return 0;
}
@@ -240,29 +607,27 @@ int crypto4xx_hash_init(struct ahash_request *req)
int ds;
struct dynamic_sa_ctl *sa;
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa = ctx->sa_in;
ds = crypto_ahash_digestsize(
__crypto_ahash_cast(req->base.tfm));
sa->sa_command_0.bf.digest_len = ds >> 2;
sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
- ctx->is_hash = 1;
- ctx->direction = DIR_INBOUND;
return 0;
}
int crypto4xx_hash_update(struct ahash_request *req)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
- ctx->is_hash = 1;
- ctx->hash_final = 0;
- ctx->pd_ctl = 0x11;
- ctx->direction = DIR_INBOUND;
+ sg_init_one(&dst, req->result, ds);
- return crypto4xx_build_pd(&req->base, ctx, req->src,
- (struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len, 0);
}
int crypto4xx_hash_final(struct ahash_request *req)
@@ -272,15 +637,16 @@ int crypto4xx_hash_final(struct ahash_request *req)
int crypto4xx_hash_digest(struct ahash_request *req)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
- ctx->hash_final = 1;
- ctx->pd_ctl = 0x11;
- ctx->direction = DIR_INBOUND;
+ sg_init_one(&dst, req->result, ds);
- return crypto4xx_build_pd(&req->base, ctx, req->src,
- (struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len, 0);
}
/**
@@ -291,5 +657,3 @@ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
SA_HASH_MODE_HASH);
}
-
-
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 65dc78b91dea..c44954e274bc 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -35,8 +35,14 @@
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
+#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
@@ -127,21 +133,17 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
{
- ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
- &ctx->sa_in_dma_addr, GFP_ATOMIC);
+ ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC);
if (ctx->sa_in == NULL)
return -ENOMEM;
- ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
- &ctx->sa_out_dma_addr, GFP_ATOMIC);
+ ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
- dma_free_coherent(ctx->dev->core_dev->device, size * 4,
- ctx->sa_in, ctx->sa_in_dma_addr);
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
return -ENOMEM;
}
- memset(ctx->sa_in, 0, size * 4);
- memset(ctx->sa_out, 0, size * 4);
ctx->sa_len = size;
return 0;
@@ -149,40 +151,13 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
{
- if (ctx->sa_in != NULL)
- dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
- ctx->sa_in, ctx->sa_in_dma_addr);
- if (ctx->sa_out != NULL)
- dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
- ctx->sa_out, ctx->sa_out_dma_addr);
-
- ctx->sa_in_dma_addr = 0;
- ctx->sa_out_dma_addr = 0;
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
+ kfree(ctx->sa_out);
+ ctx->sa_out = NULL;
ctx->sa_len = 0;
}
-u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
-{
- ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
- sizeof(struct sa_state_record),
- &ctx->state_record_dma_addr, GFP_ATOMIC);
- if (!ctx->state_record_dma_addr)
- return -ENOMEM;
- memset(ctx->state_record, 0, sizeof(struct sa_state_record));
-
- return 0;
-}
-
-void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
-{
- if (ctx->state_record != NULL)
- dma_free_coherent(ctx->dev->core_dev->device,
- sizeof(struct sa_state_record),
- ctx->state_record,
- ctx->state_record_dma_addr);
- ctx->state_record_dma_addr = 0;
-}
-
/**
* alloc memory for the gather ring
* no need to alloc buf for the ring
@@ -191,7 +166,6 @@ void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
{
int i;
- struct pd_uinfo *pd_uinfo;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
&dev->pdr_pa, GFP_ATOMIC);
@@ -207,9 +181,9 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->pdr_pa);
return -ENOMEM;
}
- memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+ memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
- 256 * PPC4XX_NUM_PD,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
GFP_ATOMIC);
if (!dev->shadow_sa_pool)
@@ -221,16 +195,17 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
- pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * i);
+ struct ce_pd *pd = &dev->pdr[i];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
+
+ pd->sa = dev->shadow_sa_pool_pa +
+ sizeof(union shadow_sa_buf) * i;
/* alloc 256 bytes which is enough for any kind of dynamic sa */
- pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
- pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
+ pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
/* alloc state record */
- pd_uinfo->sr_va = dev->shadow_sr_pool +
- sizeof(struct sa_state_record) * i;
+ pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
sizeof(struct sa_state_record) * i;
}
@@ -240,13 +215,16 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
{
- if (dev->pdr != NULL)
+ if (dev->pdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr, dev->pdr_pa);
+
if (dev->shadow_sa_pool)
- dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
- dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+ dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+
if (dev->shadow_sr_pool)
dma_free_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
@@ -273,28 +251,21 @@ static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
{
- struct pd_uinfo *pd_uinfo;
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+ u32 tail;
unsigned long flags;
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * idx);
spin_lock_irqsave(&dev->core_dev->lock, flags);
+ pd_uinfo->state = PD_ENTRY_FREE;
+
if (dev->pdr_tail != PPC4XX_LAST_PD)
dev->pdr_tail++;
else
dev->pdr_tail = 0;
- pd_uinfo->state = PD_ENTRY_FREE;
+ tail = dev->pdr_tail;
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
- return 0;
-}
-
-static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
- dma_addr_t *pd_dma, u32 idx)
-{
- *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
-
- return dev->pdr + sizeof(struct ce_pd) * idx;
+ return tail;
}
/**
@@ -326,10 +297,11 @@ static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
* when this function is called.
* preemption or interrupt must be disabled
*/
-u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
{
u32 retval;
u32 tmp;
+
if (n >= PPC4XX_NUM_GD)
return ERING_WAS_FULL;
@@ -372,7 +344,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
{
*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
- return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
+ return &dev->gdr[idx];
}
/**
@@ -383,7 +355,6 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
- struct ce_sd *sd_array;
/* alloc memory for scatter descriptor ring */
dev->sdr = dma_alloc_coherent(dev->core_dev->device,
@@ -392,10 +363,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
if (!dev->sdr)
return -ENOMEM;
- dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
- dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_ATOMIC);
if (!dev->scatter_buffer_va) {
dma_free_coherent(dev->core_dev->device,
@@ -404,11 +374,9 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
return -ENOMEM;
}
- sd_array = dev->sdr;
-
for (i = 0; i < PPC4XX_NUM_SD; i++) {
- sd_array[i].ptr = dev->scatter_buffer_pa +
- dev->scatter_buffer_size * i;
+ dev->sdr[i].ptr = dev->scatter_buffer_pa +
+ PPC4XX_SD_BUFFER_SIZE * i;
}
return 0;
@@ -416,14 +384,14 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
{
- if (dev->sdr != NULL)
+ if (dev->sdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
dev->sdr, dev->sdr_pa);
- if (dev->scatter_buffer_va != NULL)
+ if (dev->scatter_buffer_va)
dma_free_coherent(dev->core_dev->device,
- dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
dev->scatter_buffer_va,
dev->scatter_buffer_pa);
}
@@ -477,63 +445,7 @@ static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
{
*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
- return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
-}
-
-static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
- dma_addr_t *addr, u32 *length,
- u32 *idx, u32 *offset, u32 *nbytes)
-{
- u32 len;
-
- if (*length > dev->scatter_buffer_size) {
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset,
- dev->scatter_buffer_size);
- *offset = 0;
- *length -= dev->scatter_buffer_size;
- *nbytes -= dev->scatter_buffer_size;
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
- *addr = *addr + dev->scatter_buffer_size;
- return 1;
- } else if (*length < dev->scatter_buffer_size) {
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset, *length);
- if ((*offset + *length) == dev->scatter_buffer_size) {
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
- *nbytes -= *length;
- *offset = 0;
- } else {
- *nbytes -= *length;
- *offset += *length;
- }
-
- return 0;
- } else {
- len = (*nbytes <= dev->scatter_buffer_size) ?
- (*nbytes) : dev->scatter_buffer_size;
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset,
- len);
- *offset = 0;
- *nbytes -= len;
-
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
-
- return 0;
- }
+ return &dev->sdr[idx];
}
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
@@ -542,66 +454,52 @@ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
u32 nbytes,
struct scatterlist *dst)
{
- dma_addr_t addr;
- u32 this_sd;
- u32 offset;
- u32 len;
- u32 i;
- u32 sg_len;
- struct scatterlist *sg;
+ unsigned int first_sd = pd_uinfo->first_sd;
+ unsigned int last_sd;
+ unsigned int overflow = 0;
+ unsigned int to_copy;
+ unsigned int dst_start = 0;
- this_sd = pd_uinfo->first_sd;
- offset = 0;
- i = 0;
+ /*
+ * Because the scatter buffers are all neatly organized in one
+ * big continuous ringbuffer; scatterwalk_map_and_copy() can
+ * be instructed to copy a range of buffers in one go.
+ */
+
+ last_sd = (first_sd + pd_uinfo->num_sd);
+ if (last_sd > PPC4XX_LAST_SD) {
+ last_sd = PPC4XX_LAST_SD;
+ overflow = last_sd % PPC4XX_NUM_SD;
+ }
while (nbytes) {
- sg = &dst[i];
- sg_len = sg->length;
- addr = dma_map_page(dev->core_dev->device, sg_page(sg),
- sg->offset, sg->length, DMA_TO_DEVICE);
-
- if (offset == 0) {
- len = (nbytes <= sg->length) ? nbytes : sg->length;
- while (crypto4xx_fill_one_page(dev, &addr, &len,
- &this_sd, &offset, &nbytes))
- ;
- if (!nbytes)
- return;
- i++;
- } else {
- len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
- nbytes : (dev->scatter_buffer_size - offset);
- len = (sg->length < len) ? sg->length : len;
- while (crypto4xx_fill_one_page(dev, &addr, &len,
- &this_sd, &offset, &nbytes))
- ;
- if (!nbytes)
- return;
- sg_len -= len;
- if (sg_len) {
- addr += len;
- while (crypto4xx_fill_one_page(dev, &addr,
- &sg_len, &this_sd, &offset, &nbytes))
- ;
- }
- i++;
+ void *buf = dev->scatter_buffer_va +
+ first_sd * PPC4XX_SD_BUFFER_SIZE;
+
+ to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
+ (1 + last_sd - first_sd));
+ scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
+ nbytes -= to_copy;
+
+ if (overflow) {
+ first_sd = 0;
+ last_sd = overflow;
+ dst_start += to_copy;
+ overflow = 0;
}
}
}
-static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
+static void crypto4xx_copy_digest_to_dst(void *dst,
+ struct pd_uinfo *pd_uinfo,
struct crypto4xx_ctx *ctx)
{
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- struct sa_state_record *state_record =
- (struct sa_state_record *) pd_uinfo->sr_va;
if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
+ memcpy(dst, pd_uinfo->sr_va->save_digest,
SA_HASH_ALG_SHA1_DIGEST_SIZE);
}
-
- return 0;
}
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
@@ -623,7 +521,7 @@ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
}
}
-static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
@@ -644,13 +542,13 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
dst->offset, dst->length, DMA_FROM_DEVICE);
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
- if (ablk_req->base.complete != NULL)
- ablk_req->base.complete(&ablk_req->base, 0);
- return 0;
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ ablkcipher_request_complete(ablk_req, -EINPROGRESS);
+ ablkcipher_request_complete(ablk_req, 0);
}
-static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
+static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
struct crypto4xx_ctx *ctx;
@@ -659,62 +557,93 @@ static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
ahash_req = ahash_request_cast(pd_uinfo->async_req);
ctx = crypto_tfm_ctx(ahash_req->base.tfm);
- crypto4xx_copy_digest_to_dst(pd_uinfo,
+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
crypto_tfm_ctx(ahash_req->base.tfm));
crypto4xx_ret_sg_desc(dev, pd_uinfo);
- /* call user provided callback function x */
- if (ahash_req->base.complete != NULL)
- ahash_req->base.complete(&ahash_req->base, 0);
- return 0;
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ ahash_request_complete(ahash_req, -EINPROGRESS);
+ ahash_request_complete(ahash_req, 0);
}
-static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+static void crypto4xx_aead_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
{
- struct ce_pd *pd;
- struct pd_uinfo *pd_uinfo;
+ struct aead_request *aead_req;
+ struct crypto4xx_ctx *ctx;
+ struct scatterlist *dst = pd_uinfo->dest_va;
+ int err = 0;
- pd = dev->pdr + sizeof(struct ce_pd)*idx;
- pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
- if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
- CRYPTO_ALG_TYPE_ABLKCIPHER)
- return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
- else
- return crypto4xx_ahash_done(dev, pd_uinfo);
+ aead_req = container_of(pd_uinfo->async_req, struct aead_request,
+ base);
+ ctx = crypto_tfm_ctx(aead_req->base.tfm);
+
+ if (pd_uinfo->using_sd) {
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ pd->pd_ctl_len.bf.pkt_len,
+ dst);
+ } else {
+ __dma_sync_page(sg_page(dst), dst->offset, dst->length,
+ DMA_FROM_DEVICE);
+ }
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+ /* append icv at the end */
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
+
+ crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+ cp_len);
+
+ scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+ cp_len, 1);
+ }
+
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+ if (pd->pd_ctl.bf.status & 0xff) {
+ if (pd->pd_ctl.bf.status & 0x1) {
+ /* authentication error */
+ err = -EBADMSG;
+ } else {
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
+ }
+ err = -EINVAL;
+ }
+ }
+
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ aead_request_complete(aead_req, -EINPROGRESS);
+
+ aead_request_complete(aead_req, err);
}
-/**
- * Note: Only use this function to copy items that is word aligned.
- */
-void crypto4xx_memcpy_le(unsigned int *dst,
- const unsigned char *buf,
- int len)
+static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
{
- u8 *tmp;
- for (; len >= 4; buf += 4, len -= 4)
- *dst++ = cpu_to_le32(*(unsigned int *) buf);
-
- tmp = (u8 *)dst;
- switch (len) {
- case 3:
- *tmp++ = 0;
- *tmp++ = *(buf+2);
- *tmp++ = *(buf+1);
- *tmp++ = *buf;
- break;
- case 2:
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = *(buf+1);
- *tmp++ = *buf;
+ struct ce_pd *pd = &dev->pdr[idx];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+
+ switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
break;
- case 1:
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = *buf;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto4xx_aead_done(dev, pd_uinfo, pd);
break;
- default:
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto4xx_ahash_done(dev, pd_uinfo);
break;
}
}
@@ -729,17 +658,6 @@ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
kfree(core_dev);
}
-void crypto4xx_return_pd(struct crypto4xx_device *dev,
- u32 pd_entry, struct ce_pd *pd,
- struct pd_uinfo *pd_uinfo)
-{
- /* irq should be already disabled */
- dev->pdr_head = pd_entry;
- pd->pd_ctl.w = 0;
- pd->pd_ctl_len.w = 0;
- pd_uinfo->state = PD_ENTRY_FREE;
-}
-
static u32 get_next_gd(u32 current)
{
if (current != PPC4XX_LAST_GD)
@@ -756,17 +674,19 @@ static u32 get_next_sd(u32 current)
return 0;
}
-u32 crypto4xx_build_pd(struct crypto_async_request *req,
+int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
- unsigned int datalen,
- void *iv, u32 iv_len)
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *req_sa,
+ const unsigned int sa_len,
+ const unsigned int assoclen)
{
+ struct scatterlist _dst[2];
struct crypto4xx_device *dev = ctx->dev;
- dma_addr_t addr, pd_dma, sd_dma, gd_dma;
struct dynamic_sa_ctl *sa;
- struct scatterlist *sg;
struct ce_gd *gd;
struct ce_pd *pd;
u32 num_gd, num_sd;
@@ -774,22 +694,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
u32 fst_sd = 0xffffffff;
u32 pd_entry;
unsigned long flags;
- struct pd_uinfo *pd_uinfo = NULL;
- unsigned int nbytes = datalen, idx;
- unsigned int ivlen = 0;
+ struct pd_uinfo *pd_uinfo;
+ unsigned int nbytes = datalen;
+ size_t offset_to_sr_ptr;
u32 gd_idx = 0;
+ int tmp;
+ bool is_busy;
- /* figure how many gd is needed */
- num_gd = sg_nents_for_len(src, datalen);
- if ((int)num_gd < 0) {
+ /* figure how many gd are needed */
+ tmp = sg_nents_for_len(src, assoclen + datalen);
+ if (tmp < 0) {
dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
- return -EINVAL;
+ return tmp;
}
- if (num_gd == 1)
- num_gd = 0;
+ if (tmp == 1)
+ tmp = 0;
+ num_gd = tmp;
- /* figure how many sd is needed */
- if (sg_is_last(dst) || ctx->is_hash) {
+ if (assoclen) {
+ nbytes += assoclen;
+ dst = scatterwalk_ffwd(_dst, dst, assoclen);
+ }
+
+ /* figure how many sd are needed */
+ if (sg_is_last(dst)) {
num_sd = 0;
} else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -808,6 +736,31 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
* already got must be return the original place.
*/
spin_lock_irqsave(&dev->core_dev->lock, flags);
+ /*
+ * Let the caller know to slow down, once more than 13/16ths = 81%
+ * of the available data contexts are being used simultaneously.
+ *
+ * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
+ * 31 more contexts. Before new requests have to be rejected.
+ */
+ if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 13) / 16);
+ } else {
+ /*
+ * To fix contention issues between ipsec (no blacklog) and
+ * dm-crypto (backlog) reserve 32 entries for "no backlog"
+ * data contexts.
+ */
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 15) / 16);
+
+ if (is_busy) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EBUSY;
+ }
+ }
+
if (num_gd) {
fst_gd = crypto4xx_get_n_gd(dev, num_gd);
if (fst_gd == ERING_WAS_FULL) {
@@ -835,38 +788,28 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
}
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * pd_entry);
- pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
+ pd = &dev->pdr[pd_entry];
+ pd->sa_len = sa_len;
+
+ pd_uinfo = &dev->pdr_uinfo[pd_entry];
pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
- if (iv_len || ctx->is_hash) {
- ivlen = iv_len;
- pd->sa = pd_uinfo->sa_pa;
- sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
- if (ctx->direction == DIR_INBOUND)
- memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
- else
- memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
+ if (iv_len)
+ memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
- memcpy((void *) sa + ctx->offset_to_sr_ptr,
- &pd_uinfo->sr_pa, 4);
+ sa = pd_uinfo->sa_va;
+ memcpy(sa, req_sa, sa_len * 4);
+
+ sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
+ offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
- if (iv_len)
- crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
- } else {
- if (ctx->direction == DIR_INBOUND) {
- pd->sa = ctx->sa_in_dma_addr;
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- } else {
- pd->sa = ctx->sa_out_dma_addr;
- sa = (struct dynamic_sa_ctl *) ctx->sa_out;
- }
- }
- pd->sa_len = ctx->sa_len;
if (num_gd) {
+ dma_addr_t gd_dma;
+ struct scatterlist *sg;
+
/* get first gd we are going to use */
gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd;
@@ -875,27 +818,30 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
pd->src = gd_dma;
/* enable gather */
sa->sa_command_0.bf.gather = 1;
- idx = 0;
- src = &src[0];
/* walk the sg, and setup gather array */
+
+ sg = src;
while (nbytes) {
- sg = &src[idx];
- addr = dma_map_page(dev->core_dev->device, sg_page(sg),
- sg->offset, sg->length, DMA_TO_DEVICE);
- gd->ptr = addr;
- gd->ctl_len.len = sg->length;
+ size_t len;
+
+ len = min(sg->length, nbytes);
+ gd->ptr = dma_map_page(dev->core_dev->device,
+ sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
+ gd->ctl_len.len = len;
gd->ctl_len.done = 0;
gd->ctl_len.ready = 1;
- if (sg->length >= nbytes)
+ if (len >= nbytes)
break;
+
nbytes -= sg->length;
gd_idx = get_next_gd(gd_idx);
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
- idx++;
+ sg = sg_next(sg);
}
} else {
pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
- src->offset, src->length, DMA_TO_DEVICE);
+ src->offset, min(nbytes, src->length),
+ DMA_TO_DEVICE);
/*
* Disable gather in sa command
*/
@@ -906,25 +852,24 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
pd_uinfo->first_gd = 0xffffffff;
pd_uinfo->num_gd = 0;
}
- if (ctx->is_hash || sg_is_last(dst)) {
+ if (sg_is_last(dst)) {
/*
* we know application give us dst a whole piece of memory
* no need to use scatter ring.
- * In case of is_hash, the icv is always at end of src data.
*/
pd_uinfo->using_sd = 0;
pd_uinfo->first_sd = 0xffffffff;
pd_uinfo->num_sd = 0;
pd_uinfo->dest_va = dst;
sa->sa_command_0.bf.scatter = 0;
- if (ctx->is_hash)
- pd->dest = virt_to_phys((void *)dst);
- else
- pd->dest = (u32)dma_map_page(dev->core_dev->device,
- sg_page(dst), dst->offset,
- dst->length, DMA_TO_DEVICE);
+ pd->dest = (u32)dma_map_page(dev->core_dev->device,
+ sg_page(dst), dst->offset,
+ min(datalen, dst->length),
+ DMA_TO_DEVICE);
} else {
+ dma_addr_t sd_dma;
struct ce_sd *sd = NULL;
+
u32 sd_idx = fst_sd;
nbytes = datalen;
sa->sa_command_0.bf.scatter = 1;
@@ -938,7 +883,6 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
sd->ctl.done = 0;
sd->ctl.rdy = 1;
/* sd->ptr should be setup by sd_init routine*/
- idx = 0;
if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
nbytes -= PPC4XX_SD_BUFFER_SIZE;
else
@@ -949,67 +893,97 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
/* setup scatter descriptor */
sd->ctl.done = 0;
sd->ctl.rdy = 1;
- if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+ if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
nbytes -= PPC4XX_SD_BUFFER_SIZE;
- else
+ } else {
/*
* SD entry can hold PPC4XX_SD_BUFFER_SIZE,
* which is more than nbytes, so done.
*/
nbytes = 0;
+ }
}
}
- sa->sa_command_1.bf.hash_crypto_offset = 0;
- pd->pd_ctl.w = ctx->pd_ctl;
- pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
- pd_uinfo->state = PD_ENTRY_INUSE;
+ pd->pd_ctl.w = PD_CTL_HOST_READY |
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
+ (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ PD_CTL_HASH_FINAL : 0);
+ pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
+ pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+
wmb();
/* write any value to push engine to read a pd */
+ writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
- return -EINPROGRESS;
+ return is_busy ? -EBUSY : -EINPROGRESS;
}
/**
* Algorithm Registration Functions
*/
-static int crypto4xx_alg_init(struct crypto_tfm *tfm)
+static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
+ struct crypto4xx_ctx *ctx)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
ctx->dev = amcc_alg->dev;
ctx->sa_in = NULL;
ctx->sa_out = NULL;
- ctx->sa_in_dma_addr = 0;
- ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0;
+}
- switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- default:
- tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct crypto4xx_ctx));
- break;
- }
+static int crypto4xx_ablk_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *amcc_alg;
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
return 0;
}
-static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
+static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
crypto4xx_free_sa(ctx);
- crypto4xx_free_state_record(ctx);
}
-int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
- struct crypto4xx_alg_common *crypto_alg,
- int array_size)
+static void crypto4xx_ablk_exit(struct crypto_tfm *tfm)
+{
+ crypto4xx_common_exit(crypto_tfm_ctx(tfm));
+}
+
+static int crypto4xx_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto4xx_alg *amcc_alg;
+
+ ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->sw_cipher.aead))
+ return PTR_ERR(ctx->sw_cipher.aead);
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ max(sizeof(struct crypto4xx_ctx), 32 +
+ crypto_aead_reqsize(ctx->sw_cipher.aead)));
+ return 0;
+}
+
+static void crypto4xx_aead_exit(struct crypto_aead *tfm)
+{
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto4xx_common_exit(ctx);
+ crypto_free_aead(ctx->sw_cipher.aead);
+}
+
+static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+ struct crypto4xx_alg_common *crypto_alg,
+ int array_size)
{
struct crypto4xx_alg *alg;
int i;
@@ -1024,6 +998,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
alg->dev = sec_dev;
switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ rc = crypto_register_aead(&alg->alg.u.aead);
+ break;
+
case CRYPTO_ALG_TYPE_AHASH:
rc = crypto_register_ahash(&alg->alg.u.hash);
break;
@@ -1033,12 +1011,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
break;
}
- if (rc) {
- list_del(&alg->entry);
+ if (rc)
kfree(alg);
- } else {
+ else
list_add_tail(&alg->entry, &sec_dev->alg_list);
- }
}
return 0;
@@ -1055,6 +1031,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
crypto_unregister_ahash(&alg->alg.u.hash);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&alg->alg.u.aead);
+ break;
+
default:
crypto_unregister_alg(&alg->alg.u.cipher);
}
@@ -1068,25 +1048,23 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
struct pd_uinfo *pd_uinfo;
struct ce_pd *pd;
- u32 tail;
-
- while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
- tail = core_dev->dev->pdr_tail;
- pd_uinfo = core_dev->dev->pdr_uinfo +
- sizeof(struct pd_uinfo)*tail;
- pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
- if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
- pd->pd_ctl.bf.pe_done &&
- !pd->pd_ctl.bf.host_ready) {
- pd->pd_ctl.bf.pe_done = 0;
+ u32 tail = core_dev->dev->pdr_tail;
+ u32 head = core_dev->dev->pdr_head;
+
+ do {
+ pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
+ pd = &core_dev->dev->pdr[tail];
+ if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
+ ((READ_ONCE(pd->pd_ctl.w) &
+ (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
+ PD_CTL_PE_DONE)) {
crypto4xx_pd_done(core_dev->dev, tail);
- crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
- pd_uinfo->state = PD_ENTRY_FREE;
+ tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
} else {
/* if tail not done, break */
break;
}
- }
+ } while (head != tail);
}
/**
@@ -1110,18 +1088,20 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
/**
* Supported Crypto Algorithms
*/
-struct crypto4xx_alg_common crypto4xx_alg[] = {
+static struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1134,6 +1114,147 @@ struct crypto4xx_alg_common crypto4xx_alg[] = {
}
}
}},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "cfb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = crypto4xx_setkey_rfc3686,
+ .encrypt = crypto4xx_rfc3686_encrypt,
+ .decrypt = crypto4xx_rfc3686_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+
+ /* AEAD */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_ccm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_ccm,
+ .decrypt = crypto4xx_decrypt_aes_ccm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_gcm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_gcm,
+ .decrypt = crypto4xx_decrypt_aes_gcm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
};
/**
@@ -1187,13 +1308,14 @@ static int crypto4xx_probe(struct platform_device *ofdev)
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
+ ratelimit_default_init(&core_dev->dev->aead_ratelimit);
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
goto err_build_pdr;
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
- goto err_build_gdr;
+ goto err_build_pdr;
rc = crypto4xx_build_sdr(core_dev->dev);
if (rc)
@@ -1236,12 +1358,11 @@ err_iomap:
err_request_irq:
irq_dispose_mapping(core_dev->irq);
tasklet_kill(&core_dev->tasklet);
- crypto4xx_destroy_sdr(core_dev->dev);
err_build_sdr:
+ crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
-err_build_gdr:
- crypto4xx_destroy_pdr(core_dev->dev);
err_build_pdr:
+ crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
kfree(core_dev);
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index ecfdcfe3698d..8ac3bd37203b 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,7 +22,11 @@
#ifndef __CRYPTO4XX_CORE_H__
#define __CRYPTO4XX_CORE_H__
+#include <linux/ratelimit.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
#define MODULE_NAME "crypto4xx"
@@ -34,20 +38,28 @@
#define PPC405EX_CE_RESET 0x00000008
#define CRYPTO4XX_CRYPTO_PRIORITY 300
-#define PPC4XX_LAST_PD 63
-#define PPC4XX_NUM_PD 64
-#define PPC4XX_LAST_GD 1023
+#define PPC4XX_NUM_PD 256
+#define PPC4XX_LAST_PD (PPC4XX_NUM_PD - 1)
#define PPC4XX_NUM_GD 1024
-#define PPC4XX_LAST_SD 63
-#define PPC4XX_NUM_SD 64
+#define PPC4XX_LAST_GD (PPC4XX_NUM_GD - 1)
+#define PPC4XX_NUM_SD 256
+#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
#define PPC4XX_SD_BUFFER_SIZE 2048
-#define PD_ENTRY_INUSE 1
+#define PD_ENTRY_BUSY BIT(1)
+#define PD_ENTRY_INUSE BIT(0)
#define PD_ENTRY_FREE 0
#define ERING_WAS_FULL 0xffffffff
struct crypto4xx_device;
+union shadow_sa_buf {
+ struct dynamic_sa_ctl sa;
+
+ /* alloc 256 bytes which is enough for any kind of dynamic sa */
+ u8 buf[256];
+} __packed;
+
struct pd_uinfo {
struct crypto4xx_device *dev;
u32 state;
@@ -60,9 +72,8 @@ struct pd_uinfo {
used by this packet */
u32 num_sd; /* number of scatter discriptors
used by this packet */
- void *sa_va; /* shadow sa, when using cp from ctx->sa */
- u32 sa_pa;
- void *sr_va; /* state record for shadow sa */
+ struct dynamic_sa_ctl *sa_va; /* shadow sa */
+ struct sa_state_record *sr_va; /* state record for shadow sa */
u32 sr_pa;
struct scatterlist *dest_va;
struct crypto_async_request *async_req; /* base crypto request
@@ -72,27 +83,21 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
char *name;
- u64 ce_phy_address;
void __iomem *ce_base;
void __iomem *trng_base;
- void *pdr; /* base address of packet
- descriptor ring */
- dma_addr_t pdr_pa; /* physical address used to
- program ce pdr_base_register */
- void *gdr; /* gather descriptor ring */
- dma_addr_t gdr_pa; /* physical address used to
- program ce gdr_base_register */
- void *sdr; /* scatter descriptor ring */
- dma_addr_t sdr_pa; /* physical address used to
- program ce sdr_base_register */
+ struct ce_pd *pdr; /* base address of packet descriptor ring */
+ dma_addr_t pdr_pa; /* physical address of pdr_base_register */
+ struct ce_gd *gdr; /* gather descriptor ring */
+ dma_addr_t gdr_pa; /* physical address of gdr_base_register */
+ struct ce_sd *sdr; /* scatter descriptor ring */
+ dma_addr_t sdr_pa; /* physical address of sdr_base_register */
void *scatter_buffer_va;
dma_addr_t scatter_buffer_pa;
- u32 scatter_buffer_size;
- void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */
+ union shadow_sa_buf *shadow_sa_pool;
dma_addr_t shadow_sa_pool_pa;
- void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */
+ struct sa_state_record *shadow_sr_pool;
dma_addr_t shadow_sr_pool_pa;
u32 pdr_tail;
u32 pdr_head;
@@ -100,9 +105,10 @@ struct crypto4xx_device {
u32 gdr_head;
u32 sdr_tail;
u32 sdr_head;
- void *pdr_uinfo;
+ struct pd_uinfo *pdr_uinfo;
struct list_head alg_list; /* List of algorithm supported
by this device */
+ struct ratelimit_state aead_ratelimit;
};
struct crypto4xx_core_device {
@@ -118,30 +124,13 @@ struct crypto4xx_core_device {
struct crypto4xx_ctx {
struct crypto4xx_device *dev;
- void *sa_in;
- dma_addr_t sa_in_dma_addr;
- void *sa_out;
- dma_addr_t sa_out_dma_addr;
- void *state_record;
- dma_addr_t state_record_dma_addr;
+ struct dynamic_sa_ctl *sa_in;
+ struct dynamic_sa_ctl *sa_out;
+ __le32 iv_nonce;
u32 sa_len;
- u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
- u32 direction;
- u32 next_hdr;
- u32 save_iv;
- u32 pd_ctl_len;
- u32 pd_ctl;
- u32 bypass;
- u32 is_hash;
- u32 hash_final;
-};
-
-struct crypto4xx_req_ctx {
- struct crypto4xx_device *dev; /* Device in which
- operation to send to */
- void *sa;
- u32 sa_dma_addr;
- u16 sa_len;
+ union {
+ struct crypto_aead *aead;
+ } sw_cipher;
};
struct crypto4xx_alg_common {
@@ -149,6 +138,7 @@ struct crypto4xx_alg_common {
union {
struct crypto_alg cipher;
struct ahash_alg hash;
+ struct aead_alg aead;
} u;
};
@@ -158,43 +148,90 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev;
};
-static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
- struct crypto_alg *x)
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+int crypto4xx_build_pd(struct crypto_async_request *req,
+ struct crypto4xx_ctx *ctx,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *sa,
+ const unsigned int sa_len,
+ const unsigned int assoclen);
+int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt(struct ablkcipher_request *req);
+int crypto4xx_decrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+int crypto4xx_hash_digest(struct ahash_request *req);
+int crypto4xx_hash_final(struct ahash_request *req);
+int crypto4xx_hash_update(struct ahash_request *req);
+int crypto4xx_hash_init(struct ahash_request *req);
+
+/**
+ * Note: Only use this function to copy items that is word aligned.
+ */
+static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
+ size_t len)
{
- switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- return container_of(__crypto_ahash_alg(x),
- struct crypto4xx_alg, alg.u.hash);
+ for (; len >= 4; buf += 4, len -= 4)
+ *dst++ = __swab32p((u32 *) buf);
+
+ if (len) {
+ const u8 *tmp = (u8 *)buf;
+
+ switch (len) {
+ case 3:
+ *dst = (tmp[2] << 16) |
+ (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 2:
+ *dst = (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 1:
+ *dst = tmp[0];
+ break;
+ default:
+ break;
+ }
}
+}
- return container_of(x, struct crypto4xx_alg, alg.u.cipher);
+static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32(dst, buf, len);
}
-extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
-extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
- struct crypto4xx_ctx *rctx);
-extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
-extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
-extern void crypto4xx_memcpy_le(unsigned int *dst,
- const unsigned char *buf, int len);
-extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
- struct crypto4xx_ctx *ctx,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int datalen,
- void *iv, u32 iv_len);
-extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen);
-extern int crypto4xx_encrypt(struct ablkcipher_request *req);
-extern int crypto4xx_decrypt(struct ablkcipher_request *req);
-extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-extern int crypto4xx_hash_digest(struct ahash_request *req);
-extern int crypto4xx_hash_final(struct ahash_request *req);
-extern int crypto4xx_hash_update(struct ahash_request *req);
-extern int crypto4xx_hash_init(struct ahash_request *req);
+static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
+ unsigned int authsize);
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
+
#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 279b8725559f..0a22ec5d1a96 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -261,6 +261,9 @@ union ce_pd_ctl {
} bf;
u32 w;
} __attribute__((packed));
+#define PD_CTL_HASH_FINAL BIT(4)
+#define PD_CTL_PE_DONE BIT(1)
+#define PD_CTL_HOST_READY BIT(0)
union ce_pd_ctl_len {
struct {
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
deleted file mode 100644
index 69182e2cc3ea..000000000000
--- a/drivers/crypto/amcc/crypto4xx_sa.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * AMCC SoC PPC4xx Crypto Driver
- *
- * Copyright (c) 2008 Applied Micro Circuits Corporation.
- * All rights reserved. James Hsiao <jhsiao@amcc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * @file crypto4xx_sa.c
- *
- * This file implements the security context
- * associate format.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock_types.h>
-#include <linux/highmem.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-#include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
-#include "crypto4xx_core.h"
-
-u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
-{
- u32 offset;
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
- offset = cts.bf.key_size
- + cts.bf.inner_size
- + cts.bf.outer_size
- + cts.bf.spi
- + cts.bf.seq_num0
- + cts.bf.seq_num1
- + cts.bf.seq_num_mask0
- + cts.bf.seq_num_mask1
- + cts.bf.seq_num_mask2
- + cts.bf.seq_num_mask3
- + cts.bf.iv0
- + cts.bf.iv1
- + cts.bf.iv2
- + cts.bf.iv3;
-
- return sizeof(struct dynamic_sa_ctl) + offset * 4;
-}
-
-u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
-{
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
- return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
-}
-
-u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
-{
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
-
- return sizeof(struct dynamic_sa_ctl);
-}
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
index 1352d58d4e34..a4d403528db5 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -55,6 +55,8 @@ union dynamic_sa_contents {
#define SA_OP_GROUP_BASIC 0
#define SA_OPCODE_ENCRYPT 0
#define SA_OPCODE_DECRYPT 0
+#define SA_OPCODE_ENCRYPT_HASH 1
+#define SA_OPCODE_HASH_DECRYPT 1
#define SA_OPCODE_HASH 3
#define SA_CIPHER_ALG_DES 0
#define SA_CIPHER_ALG_3DES 1
@@ -65,6 +67,8 @@ union dynamic_sa_contents {
#define SA_HASH_ALG_MD5 0
#define SA_HASH_ALG_SHA1 1
+#define SA_HASH_ALG_GHASH 12
+#define SA_HASH_ALG_CBC_MAC 14
#define SA_HASH_ALG_NULL 15
#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
@@ -112,6 +116,9 @@ union sa_command_0 {
#define CRYPTO_MODE_ECB 0
#define CRYPTO_MODE_CBC 1
+#define CRYPTO_MODE_OFB 2
+#define CRYPTO_MODE_CFB 3
+#define CRYPTO_MODE_CTR 4
#define CRYPTO_FEEDBACK_MODE_NO_FB 0
#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0
@@ -169,7 +176,7 @@ union sa_command_1 {
} __attribute__((packed));
struct dynamic_sa_ctl {
- u32 sa_contents;
+ union dynamic_sa_contents sa_contents;
union sa_command_0 sa_command_0;
union sa_command_1 sa_command_1;
} __attribute__((packed));
@@ -178,9 +185,12 @@ struct dynamic_sa_ctl {
* State Record for Security Association (SA)
*/
struct sa_state_record {
- u32 save_iv[4];
- u32 save_hash_byte_cnt[2];
- u32 save_digest[16];
+ __le32 save_iv[4];
+ __le32 save_hash_byte_cnt[2];
+ union {
+ u32 save_digest[16]; /* for MD5/SHA */
+ __le32 save_digest_le32[16]; /* GHASH / CBC */
+ };
} __attribute__((packed));
/**
@@ -189,8 +199,8 @@ struct sa_state_record {
*/
struct dynamic_sa_aes128 {
struct dynamic_sa_ctl ctrl;
- u32 key[4];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[4];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -203,8 +213,8 @@ struct dynamic_sa_aes128 {
*/
struct dynamic_sa_aes192 {
struct dynamic_sa_ctl ctrl;
- u32 key[6];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[6];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -217,8 +227,8 @@ struct dynamic_sa_aes192 {
*/
struct dynamic_sa_aes256 {
struct dynamic_sa_ctl ctrl;
- u32 key[8];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[8];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -228,16 +238,81 @@ struct dynamic_sa_aes256 {
#define SA_AES_CONTENTS 0x3e000002
/**
+ * Security Association (SA) for AES128 CCM
+ */
+struct dynamic_sa_aes128_ccm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+#define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4)
+#define SA_AES128_CCM_CONTENTS 0x3e000042
+#define SA_AES_CCM_CONTENTS 0x3e000002
+
+/**
+ * Security Association (SA) for AES128_GCM
+ */
+struct dynamic_sa_aes128_gcm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 inner_digest[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+
+#define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4)
+#define SA_AES128_GCM_CONTENTS 0x3e000442
+#define SA_AES_GCM_CONTENTS 0x3e000402
+
+/**
* Security Association (SA) for HASH160: HMAC-SHA1
*/
struct dynamic_sa_hash160 {
struct dynamic_sa_ctl ctrl;
- u32 inner_digest[5];
- u32 outer_digest[5];
+ __le32 inner_digest[5];
+ __le32 outer_digest[5];
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
#define SA_HASH160_CONTENTS 0x2000a502
+static inline u32
+get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts)
+{
+ u32 offset;
+
+ offset = cts->sa_contents.bf.key_size
+ + cts->sa_contents.bf.inner_size
+ + cts->sa_contents.bf.outer_size
+ + cts->sa_contents.bf.spi
+ + cts->sa_contents.bf.seq_num0
+ + cts->sa_contents.bf.seq_num1
+ + cts->sa_contents.bf.seq_num_mask0
+ + cts->sa_contents.bf.seq_num_mask1
+ + cts->sa_contents.bf.seq_num_mask2
+ + cts->sa_contents.bf.seq_num_mask3
+ + cts->sa_contents.bf.iv0
+ + cts->sa_contents.bf.iv1
+ + cts->sa_contents.bf.iv2
+ + cts->sa_contents.bf.iv3;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+{
+ return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+}
+
+static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts)
+{
+ return (__le32 *) ((unsigned long)cts +
+ sizeof(struct dynamic_sa_ctl) +
+ cts->sa_contents.bf.key_size * 4);
+}
+
#endif
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
index 7694679802b3..62f9d3038757 100644
--- a/drivers/crypto/atmel-aes-regs.h
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ATMEL_AES_REGS_H__
#define __ATMEL_AES_REGS_H__
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 29e20c37f3a6..691c6465b71e 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <crypto/internal/aead.h>
#include <linux/platform_data/crypto-atmel.h>
@@ -76,12 +77,11 @@
AES_FLAGS_ENCRYPT | \
AES_FLAGS_GTAGEN)
-#define AES_FLAGS_INIT BIT(2)
#define AES_FLAGS_BUSY BIT(3)
#define AES_FLAGS_DUMP_REG BIT(4)
#define AES_FLAGS_OWN_SHA BIT(5)
-#define AES_FLAGS_PERSISTENT (AES_FLAGS_INIT | AES_FLAGS_BUSY)
+#define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY
#define ATMEL_AES_QUEUE_LENGTH 50
@@ -110,6 +110,7 @@ struct atmel_aes_base_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u16 block_size;
+ bool is_aead;
};
struct atmel_aes_ctx {
@@ -156,6 +157,7 @@ struct atmel_aes_authenc_ctx {
struct atmel_aes_reqctx {
unsigned long mode;
+ u32 lastc[AES_BLOCK_SIZE / sizeof(u32)];
};
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
@@ -448,11 +450,8 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
if (err)
return err;
- if (!(dd->flags & AES_FLAGS_INIT)) {
- atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
- atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
- dd->flags |= AES_FLAGS_INIT;
- }
+ atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
+ atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
return 0;
}
@@ -497,12 +496,34 @@ static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
{
#ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
- atmel_aes_authenc_complete(dd, err);
+ if (dd->ctx->is_aead)
+ atmel_aes_authenc_complete(dd, err);
#endif
clk_disable(dd->iclk);
dd->flags &= ~AES_FLAGS_BUSY;
+ if (!dd->ctx->is_aead) {
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dd->areq);
+ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct crypto_ablkcipher *ablkcipher =
+ crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ if (rctx->mode & AES_FLAGS_ENCRYPT) {
+ scatterwalk_map_and_copy(req->info, req->dst,
+ req->nbytes - ivsize, ivsize, 0);
+ } else {
+ if (req->src == req->dst) {
+ memcpy(req->info, rctx->lastc, ivsize);
+ } else {
+ scatterwalk_map_and_copy(req->info, req->src,
+ req->nbytes - ivsize, ivsize, 0);
+ }
+ }
+ }
+
if (dd->is_async)
dd->areq->complete(dd->areq, err);
@@ -1071,11 +1092,11 @@ static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
- struct atmel_aes_base_ctx *ctx;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct atmel_aes_reqctx *rctx;
struct atmel_aes_dev *dd;
- ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
switch (mode & AES_FLAGS_OPMODE_MASK) {
case AES_FLAGS_CFB8:
ctx->block_size = CFB8_BLOCK_SIZE;
@@ -1097,6 +1118,7 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
ctx->block_size = AES_BLOCK_SIZE;
break;
}
+ ctx->is_aead = false;
dd = atmel_aes_find_dev(ctx);
if (!dd)
@@ -1105,6 +1127,13 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
rctx = ablkcipher_request_ctx(req);
rctx->mode = mode;
+ if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ scatterwalk_map_and_copy(rctx->lastc, req->src,
+ (req->nbytes - ivsize), ivsize, 0);
+ }
+
return atmel_aes_handle_queue(dd, &req->base);
}
@@ -1236,10 +1265,6 @@ static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
static struct crypto_alg aes_algs[] = {
{
.cra_name = "ecb(aes)",
@@ -1252,7 +1277,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1272,7 +1296,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1293,7 +1316,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1314,7 +1336,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1335,7 +1356,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1356,7 +1376,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1377,7 +1396,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1398,7 +1416,6 @@ static struct crypto_alg aes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_ctr_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1421,7 +1438,6 @@ static struct crypto_alg aes_cfb64_alg = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1532,7 +1548,7 @@ static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
if (err)
return atmel_aes_complete(dd, err);
- if (likely(ivsize == 12)) {
+ if (likely(ivsize == GCM_AES_IV_SIZE)) {
memcpy(ctx->j0, iv, ivsize);
ctx->j0[3] = cpu_to_be32(1);
return atmel_aes_gcm_process(dd);
@@ -1739,6 +1755,7 @@ static int atmel_aes_gcm_crypt(struct aead_request *req,
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
ctx->block_size = AES_BLOCK_SIZE;
+ ctx->is_aead = true;
dd = atmel_aes_find_dev(ctx);
if (!dd)
@@ -1808,19 +1825,13 @@ static int atmel_aes_gcm_init(struct crypto_aead *tfm)
return 0;
}
-static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
-{
-
-}
-
static struct aead_alg aes_gcm_alg = {
.setkey = atmel_aes_gcm_setkey,
.setauthsize = atmel_aes_gcm_setauthsize,
.encrypt = atmel_aes_gcm_encrypt,
.decrypt = atmel_aes_gcm_decrypt,
.init = atmel_aes_gcm_init,
- .exit = atmel_aes_gcm_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
@@ -1955,7 +1966,6 @@ static struct crypto_alg aes_xts_alg = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_aes_xts_cra_init,
- .cra_exit = atmel_aes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
@@ -2223,6 +2233,7 @@ static int atmel_aes_authenc_crypt(struct aead_request *req,
rctx->base.mode = mode;
ctx->block_size = AES_BLOCK_SIZE;
+ ctx->is_aead = true;
dd = atmel_aes_find_dev(ctx);
if (!dd)
@@ -2382,7 +2393,6 @@ static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
struct crypto_platform_data *pdata)
{
struct at_dma_slave *slave;
- int err = -ENOMEM;
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -2407,7 +2417,7 @@ err_dma_out:
dma_release_channel(dd->src.chan);
err_dma_in:
dev_warn(dd->dev, "no DMA channel available\n");
- return err;
+ return -ENODEV;
}
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
@@ -2658,8 +2668,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
- aes_dd->irq = -1;
-
/* Get the base address */
aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!aes_res) {
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
index 1b0eba4a2706..b2b5e634e80f 100644
--- a/drivers/crypto/atmel-sha-regs.h
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ATMEL_SHA_REGS_H__
#define __ATMEL_SHA_REGS_H__
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 3e2f41b3eaf3..8874aa5ca0f7 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2628,7 +2628,6 @@ static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
struct crypto_platform_data *pdata)
{
- int err = -ENOMEM;
dma_cap_mask_t mask_in;
/* Try to grab DMA channel */
@@ -2639,7 +2638,7 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
if (!dd->dma_lch_in.chan) {
dev_warn(dd->dev, "no DMA channel available\n");
- return err;
+ return -ENODEV;
}
dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
@@ -2778,8 +2777,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
- sha_dd->irq = -1;
-
/* Get the base address */
sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!sha_res) {
diff --git a/drivers/crypto/atmel-tdes-regs.h b/drivers/crypto/atmel-tdes-regs.h
index f86734d0fda4..fbd905720dfa 100644
--- a/drivers/crypto/atmel-tdes-regs.h
+++ b/drivers/crypto/atmel-tdes-regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ATMEL_TDES_REGS_H__
#define __ATMEL_TDES_REGS_H__
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index f4b335dda568..592124f8382b 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -720,7 +720,6 @@ static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
struct crypto_platform_data *pdata)
{
- int err = -ENOMEM;
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -765,7 +764,7 @@ err_dma_out:
dma_release_channel(dd->dma_lch_in.chan);
err_dma_in:
dev_warn(dd->dev, "no DMA channel available\n");
- return err;
+ return -ENODEV;
}
static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
@@ -912,10 +911,6 @@ static int atmel_tdes_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static void atmel_tdes_cra_exit(struct crypto_tfm *tfm)
-{
-}
-
static struct crypto_alg tdes_algs[] = {
{
.cra_name = "ecb(des)",
@@ -928,7 +923,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -948,7 +942,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -969,7 +962,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -990,7 +982,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1011,7 +1002,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1032,7 +1022,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1053,7 +1042,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1074,7 +1062,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
@@ -1094,7 +1081,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
@@ -1115,7 +1101,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1136,7 +1121,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1157,7 +1141,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1178,7 +1161,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 2*DES_KEY_SIZE,
@@ -1199,7 +1181,6 @@ static struct crypto_alg tdes_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
- .cra_exit = atmel_tdes_cra_exit,
.cra_u.ablkcipher = {
.min_keysize = 2*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
@@ -1382,8 +1363,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
- tdes_dd->irq = -1;
-
/* Get the base address */
tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!tdes_res) {
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 0f9754e07719..456278440863 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2072,9 +2072,9 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
del_timer(&ac->timer);
}
-static void artpec6_crypto_timeout(unsigned long data)
+static void artpec6_crypto_timeout(struct timer_list *t)
{
- struct artpec6_crypto *ac = (struct artpec6_crypto *) data;
+ struct artpec6_crypto *ac = from_timer(ac, t, timer);
dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
@@ -3063,7 +3063,7 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
spin_lock_init(&ac->queue_lock);
INIT_LIST_HEAD(&ac->queue);
INIT_LIST_HEAD(&ac->pending);
- setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac);
+ timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
ac->base = base;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 8685c7e4debd..ce70b44d0fb6 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
return 0;
}
+static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
+ u8 chan_idx)
+{
+ int err;
+ int retry_cnt = 0;
+ struct device *dev = &(iproc_priv.pdev->dev);
+
+ err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
+ if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
+ while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
+ /*
+ * Mailbox queue is full. Since MAY_SLEEP is set, assume
+ * not in atomic context and we can wait and try again.
+ */
+ retry_cnt++;
+ usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
+ err = mbox_send_message(iproc_priv.mbox[chan_idx],
+ mssg);
+ atomic_inc(&iproc_priv.mb_no_spc);
+ }
+ }
+ if (err < 0) {
+ atomic_inc(&iproc_priv.mb_send_fail);
+ return err;
+ }
+
+ /* Check error returned by mailbox controller */
+ err = mssg->error;
+ if (unlikely(err < 0)) {
+ dev_err(dev, "message error %d", err);
+ /* Signal txdone for mailbox channel */
+ }
+
+ /* Signal txdone for mailbox channel */
+ mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
+ return err;
+}
+
/**
* handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
* a single SPU request message, starting at the current position in the request
@@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
u32 pad_len; /* total length of all padding */
bool update_key = false;
struct brcm_message *mssg; /* mailbox message */
- int retry_cnt = 0;
/* number of entries in src and dst sg in mailbox message. */
u8 rx_frag_num = 2; /* response header and STATUS */
@@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
if (err)
return err;
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
- while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
- /*
- * Mailbox queue is full. Since MAY_SLEEP is set, assume
- * not in atomic context and we can wait and try again.
- */
- retry_cnt++;
- usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
- mssg);
- atomic_inc(&iproc_priv.mb_no_spc);
- }
- }
- if (unlikely(err < 0)) {
- atomic_inc(&iproc_priv.mb_send_fail);
+ err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+ if (unlikely(err < 0))
return err;
- }
return -EINPROGRESS;
}
@@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
u32 spu_hdr_len;
unsigned int digestsize;
u16 rem = 0;
- int retry_cnt = 0;
/*
* number of entries in src and dst sg. Always includes SPU msg header.
@@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
if (err)
return err;
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
- while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
- /*
- * Mailbox queue is full. Since MAY_SLEEP is set, assume
- * not in atomic context and we can wait and try again.
- */
- retry_cnt++;
- usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
- mssg);
- atomic_inc(&iproc_priv.mb_no_spc);
- }
- }
- if (err < 0) {
- atomic_inc(&iproc_priv.mb_send_fail);
+ err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+ if (unlikely(err < 0))
return err;
- }
+
return -EINPROGRESS;
}
@@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
int assoc_nents = 0;
bool incl_icv = false;
unsigned int digestsize = ctx->digestsize;
- int retry_cnt = 0;
/* number of entries in src and dst sg. Always includes SPU msg header.
*/
@@ -1367,11 +1373,11 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
* expects AAD to include just SPI and seqno. So
* subtract off the IV len.
*/
- aead_parms.assoc_size -= GCM_ESP_IV_SIZE;
+ aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
if (rctx->is_encrypt) {
aead_parms.return_iv = true;
- aead_parms.ret_iv_len = GCM_ESP_IV_SIZE;
+ aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
}
} else {
@@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
if (err)
return err;
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
- if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
- while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
- /*
- * Mailbox queue is full. Since MAY_SLEEP is set, assume
- * not in atomic context and we can wait and try again.
- */
- retry_cnt++;
- usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
- err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
- mssg);
- atomic_inc(&iproc_priv.mb_no_spc);
- }
- }
- if (err < 0) {
- atomic_inc(&iproc_priv.mb_send_fail);
+ err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
+ if (unlikely(err < 0))
return err;
- }
return -EINPROGRESS;
}
@@ -3255,7 +3246,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
},
.setkey = aead_gcm_esp_setkey,
- .ivsize = GCM_ESP_IV_SIZE,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.cipher_info = {
@@ -3301,7 +3292,7 @@ static struct iproc_alg_s driver_algs[] = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK
},
.setkey = rfc4543_gcm_esp_setkey,
- .ivsize = GCM_ESP_IV_SIZE,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.cipher_info = {
@@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev)
mcl->dev = dev;
mcl->tx_block = false;
mcl->tx_tout = 0;
- mcl->knows_txdone = false;
+ mcl->knows_txdone = true;
mcl->rx_callback = spu_rx_callback;
mcl->tx_done = NULL;
@@ -4818,7 +4809,6 @@ static int spu_dt_read(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct spu_hw *spu = &iproc_priv.spu;
struct resource *spu_ctrl_regs;
- const struct of_device_id *match;
const struct spu_type_subtype *matched_spu_type;
struct device_node *dn = pdev->dev.of_node;
int err, i;
@@ -4826,14 +4816,12 @@ static int spu_dt_read(struct platform_device *pdev)
/* Count number of mailbox channels */
spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
- match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
- if (!match) {
+ matched_spu_type = of_device_get_match_data(dev);
+ if (!matched_spu_type) {
dev_err(&pdev->dev, "Failed to match device\n");
return -ENODEV;
}
- matched_spu_type = match->data;
-
spu->spu_type = matched_spu_type->type;
spu->spu_subtype = matched_spu_type->subtype;
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 57a55eb2a255..763c425c41ca 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -23,6 +23,7 @@
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
#include <crypto/aead.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/sha3.h>
@@ -39,8 +40,6 @@
#define ARC4_STATE_SIZE 4
#define CCM_AES_IV_SIZE 16
-#define GCM_AES_IV_SIZE 12
-#define GCM_ESP_IV_SIZE 8
#define CCM_ESP_IV_SIZE 8
#define RFC4543_ICV_SIZE 16
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index 430c5570ea87..d543c010ccd9 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -271,7 +271,7 @@ int do_shash(unsigned char *name, unsigned char *result,
hash = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(hash)) {
rc = PTR_ERR(hash);
- pr_err("%s: Crypto %s allocation error %d", __func__, name, rc);
+ pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc);
return rc;
}
@@ -279,7 +279,7 @@ int do_shash(unsigned char *name, unsigned char *result,
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
rc = -ENOMEM;
- pr_err("%s: Memory allocation failure", __func__);
+ pr_err("%s: Memory allocation failure\n", __func__);
goto do_shash_err;
}
sdesc->shash.tfm = hash;
@@ -288,31 +288,31 @@ int do_shash(unsigned char *name, unsigned char *result,
if (key_len > 0) {
rc = crypto_shash_setkey(hash, key, key_len);
if (rc) {
- pr_err("%s: Could not setkey %s shash", __func__, name);
+ pr_err("%s: Could not setkey %s shash\n", __func__, name);
goto do_shash_err;
}
}
rc = crypto_shash_init(&sdesc->shash);
if (rc) {
- pr_err("%s: Could not init %s shash", __func__, name);
+ pr_err("%s: Could not init %s shash\n", __func__, name);
goto do_shash_err;
}
rc = crypto_shash_update(&sdesc->shash, data1, data1_len);
if (rc) {
- pr_err("%s: Could not update1", __func__);
+ pr_err("%s: Could not update1\n", __func__);
goto do_shash_err;
}
if (data2 && data2_len) {
rc = crypto_shash_update(&sdesc->shash, data2, data2_len);
if (rc) {
- pr_err("%s: Could not update2", __func__);
+ pr_err("%s: Could not update2\n", __func__);
goto do_shash_err;
}
}
rc = crypto_shash_final(&sdesc->shash, result);
if (rc)
- pr_err("%s: Could not generate %s hash", __func__, name);
+ pr_err("%s: Could not generate %s hash\n", __func__, name);
do_shash_err:
crypto_free_shash(hash);
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 9e2e98856b9b..cb652ee7dfc8 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the CAAM backend and dependent components
#
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 54f3b375a453..baa8dd52472d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -992,7 +992,7 @@ static void init_gcm_job(struct aead_request *req,
struct caam_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
u32 *desc = edesc->hw_desc;
- bool generic_gcm = (ivsize == 12);
+ bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
@@ -1004,7 +1004,7 @@ static void init_gcm_job(struct aead_request *req,
/* Read GCM IV */
append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last);
/* Append Salt */
if (!generic_gcm)
append_data(desc, ctx->key + ctx->cdata.keylen, 4);
@@ -1953,7 +1953,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setauthsize = rfc4106_setauthsize,
.encrypt = ipsec_gcm_encrypt,
.decrypt = ipsec_gcm_decrypt,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.caam = {
@@ -1971,7 +1971,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setauthsize = rfc4543_setauthsize,
.encrypt = ipsec_gcm_encrypt,
.decrypt = ipsec_gcm_decrypt,
- .ivsize = 8,
+ .ivsize = GCM_RFC4543_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.caam = {
@@ -1990,7 +1990,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setauthsize = gcm_setauthsize,
.encrypt = gcm_encrypt,
.decrypt = gcm_decrypt,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
.caam = {
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index 8731e4a7ff05..e412ec8f7005 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Shared descriptors for aead, ablkcipher algorithms
*
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 2eefc4a26bc2..f9f08fce4356 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -7,7 +7,7 @@
*/
#include "compat.h"
-
+#include "ctrl.h"
#include "regs.h"
#include "intern.h"
#include "desc_constr.h"
@@ -2312,6 +2312,11 @@ static int __init caam_qi_algapi_init(void)
if (!priv || !priv->qi_present)
return -ENODEV;
+ if (caam_dpaa2) {
+ dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
+ return -ENODEV;
+ }
+
INIT_LIST_HEAD(&alg_list);
/*
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 698580b60b2f..616720a04e7a 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -218,7 +218,7 @@ static inline int buf_map_to_sec4_sg(struct device *jrdev,
}
/* Map state->caam_ctx, and add it to link table */
-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+static inline int ctx_map_to_sec4_sg(struct device *jrdev,
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
@@ -773,7 +773,7 @@ static int ahash_update_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -871,9 +871,8 @@ static int ahash_final_ctx(struct ahash_request *req)
desc = edesc->hw_desc;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->src_nents = 0;
- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
@@ -967,7 +966,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
@@ -1123,7 +1122,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
dev_err(jrdev, "unable to map dst\n");
goto unmap;
}
- edesc->src_nents = 0;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1205,7 +1203,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->dst_dma = 0;
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
if (ret)
@@ -1417,7 +1414,6 @@ static int ahash_update_first(struct ahash_request *req)
}
edesc->src_nents = src_nents;
- edesc->dst_dma = 0;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
to_hash);
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index 87ab75e9df43..fd145c46eae1 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
*
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 7149cd2492e0..1c71e0cd5098 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*/
@@ -31,6 +32,7 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/internal/aead.h>
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
index 7e7bf68c9ef5..be693a2cc25e 100644
--- a/drivers/crypto/caam/ctrl.h
+++ b/drivers/crypto/caam/ctrl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CAAM control-plane driver backend public-level include definitions
*
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 2e6766a1573f..8142de7ba050 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CAAM descriptor composition header
* Definitions to support CAAM descriptor instruction generation
@@ -1439,7 +1440,7 @@
#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index d8e83ca104e0..ba1ca0806f0a 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* caam descriptor construction helper functions
*
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 3d639f3b45aa..8da88beb1abb 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CAAM Error Reporting
*
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 250e1a21c473..5aa332bac4b0 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CAAM Error Reporting code header
*
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index a52361258d3a..91f1107276e5 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CAAM/SEC 4.x driver backend
* Private/internal definitions between modules
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d258953ff488..f4f258075b89 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg)
while (rd_reg32(&jrp->rregs->outring_used)) {
- head = ACCESS_ONCE(jrp->head);
+ head = READ_ONCE(jrp->head);
spin_lock(&jrp->outlock);
@@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
spin_lock_bh(&jrp->inplock);
head = jrp->head;
- tail = ACCESS_ONCE(jrp->tail);
+ tail = READ_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
diff --git a/drivers/crypto/caam/jr.h b/drivers/crypto/caam/jr.h
index 97113a6d6c58..eab611530f36 100644
--- a/drivers/crypto/caam/jr.h
+++ b/drivers/crypto/caam/jr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CAAM public-level include definitions for the JobR backend
*
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index c425d4adaf2a..8c79c3a153dc 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CAAM/SEC 4.x functions for handling key-generation jobs
*
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index 4628f389eb64..5db055c25bd2 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CAAM/SEC 4.x definitions for handling key-generation jobs
*
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 31e59963f4d2..810f0bef0652 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CAAM Protocol Data Block (PDB) definition header file
*
diff --git a/drivers/crypto/caam/pkc_desc.c b/drivers/crypto/caam/pkc_desc.c
index 9e2ce6fe2e43..2a8d87ea94bf 100644
--- a/drivers/crypto/caam/pkc_desc.c
+++ b/drivers/crypto/caam/pkc_desc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* caam - Freescale FSL CAAM support for Public Key Cryptography descriptors
*
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index e4cf00014233..f9a44f485aac 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CAAM/SEC 4.x QI transport/backend driver
* Queue Interface backend functionality
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index ecb21f207637..357b69f57072 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Public definitions for the CAAM/QI (Queue Interface) backend.
*
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 17cfd23a38fa..fee363865d88 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CAAM hardware register-level view
*
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 936b1b630058..e586ffab8358 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CAAM/SEC 4.x functions for using scatterlists in caam driver
*
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile
index 5af2e4368267..45b7379e8e30 100644
--- a/drivers/crypto/cavium/nitrox/Makefile
+++ b/drivers/crypto/cavium/nitrox/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_NITROX_CNN55XX) += n5pf.o
n5pf-objs := nitrox_main.o \
diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
index ce330278ef8a..2ae6124e5da6 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h
index 4888c7823a5f..312f72801af6 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_common.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NITROX_COMMON_H
#define __NITROX_COMMON_H
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index 30b04c4c6076..9dcb7fdbe0a7 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NITROX_CSR_H
#define __NITROX_CSR_H
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 57858b04f165..9a476bb6d4c7 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NITROX_DEV_H
#define __NITROX_DEV_H
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index f0655f82fa7d..ab4ccf2f9e77 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/delay.h>
#include "nitrox_dev.h"
@@ -126,7 +127,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
* size and interrupt threshold.
*/
offset = NPS_PKT_IN_INSTR_BADDRX(i);
- nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma);
+ nitrox_write_csr(ndev, offset, cmdq->dma);
/* configure ring size */
offset = NPS_PKT_IN_INSTR_RSIZEX(i);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index 71f934871a89..dbead5f45df3 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/printk.h>
#include <linux/slab.h>
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index b4a391adb9b6..4fdc921ba611 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/cpumask.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index 74f4c20dc87d..d091b6f5f5dd 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NITROX_REQ_H
#define __NITROX_REQ_H
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4bb4377c5ac0..4addc238a6ef 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <crypto/internal/skcipher.h>
diff --git a/drivers/crypto/cavium/zip/Makefile b/drivers/crypto/cavium/zip/Makefile
index b2f3baaff757..020d189d793d 100644
--- a/drivers/crypto/cavium/zip/Makefile
+++ b/drivers/crypto/cavium/zip/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Cavium's ZIP Driver.
#
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 57f8debfcfb3..c4ce726b931e 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
ccp-objs := sp-dev.o sp-platform.o
ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 52313524a4dd..ff02b713c6f6 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -19,13 +19,12 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
+#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <linux/delay.h>
#include "ccp-crypto.h"
-#define AES_GCM_IVSIZE 12
-
static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
{
return ret;
@@ -95,9 +94,9 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
*/
/* Prepare the IV: 12 bytes + an integer (counter) */
- memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
for (i = 0; i < 3; i++)
- rctx->iv[i + AES_GCM_IVSIZE] = 0;
+ rctx->iv[i + GCM_AES_IV_SIZE] = 0;
rctx->iv[AES_BLOCK_SIZE - 1] = 1;
/* Set up a scatterlist for the IV */
@@ -160,7 +159,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
.encrypt = ccp_aes_gcm_encrypt,
.decrypt = ccp_aes_gcm_decrypt,
.init = ccp_aes_gcm_cra_init,
- .ivsize = AES_GCM_IVSIZE,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 35a9de7fd475..b95d19974aa6 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
/* Check if the cmd can/should be queued */
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
- ret = -EBUSY;
- if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+ if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
+ ret = -ENOSPC;
goto e_lock;
+ }
}
/* Look for an entry with the same tfm. If there is a cmd
@@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
ret = ccp_enqueue_cmd(crypto_cmd->cmd);
if (!ccp_crypto_success(ret))
goto e_lock; /* Error, don't queue it */
- if ((ret == -EBUSY) &&
- !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
- goto e_lock; /* Not backlogging, don't queue it */
}
if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 65604fc65e8f..44a4d2779b15 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -788,13 +788,12 @@ static int ccp5_init(struct ccp_device *ccp)
struct ccp_cmd_queue *cmd_q;
struct dma_pool *dma_pool;
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
- unsigned int qmr, qim, i;
+ unsigned int qmr, i;
u64 status;
u32 status_lo, status_hi;
int ret;
/* Find available queues */
- qim = 0;
qmr = ioread32(ccp->io_regs + Q_MASK_REG);
for (i = 0; i < MAX_HW_QUEUES; i++) {
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 4e029b176641..1b5035d56288 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd)
i = ccp->cmd_q_count;
if (ccp->cmd_count >= MAX_CMD_QLEN) {
- ret = -EBUSY;
- if (cmd->flags & CCP_CMD_MAY_BACKLOG)
+ if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
+ ret = -EBUSY;
list_add_tail(&cmd->entry, &ccp->backlog);
+ } else {
+ ret = -ENOSPC;
+ }
} else {
ret = -EINPROGRESS;
ccp->cmd_count++;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index d608043c0280..8b9da58459df 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -223,6 +223,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
desc->tx_desc.cookie, desc->status);
dma_cookie_complete(tx_desc);
+ dma_descriptor_unmap(tx_desc);
}
desc = __ccp_next_dma_desc(chan, desc);
@@ -230,9 +231,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
spin_unlock_irqrestore(&chan->lock, flags);
if (tx_desc) {
- if (tx_desc->callback &&
- (tx_desc->flags & DMA_PREP_INTERRUPT))
- tx_desc->callback(tx_desc->callback_param);
+ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
dma_run_dependencies(tx_desc);
}
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 0e8160701833..4eed7171e2ae 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -53,6 +53,7 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/authenc.h>
#include <crypto/ctr.h>
@@ -70,6 +71,8 @@
#include "chcr_algo.h"
#include "chcr_crypto.h"
+#define IV AES_BLOCK_SIZE
+
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->aeadctx;
@@ -102,7 +105,7 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
static inline int is_ofld_imm(const struct sk_buff *skb)
{
- return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
+ return (skb->len <= SGE_MAX_WR_LEN);
}
/*
@@ -117,6 +120,92 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
+ unsigned int entlen,
+ unsigned int skip)
+{
+ int nents = 0;
+ unsigned int less;
+ unsigned int skip_len = 0;
+
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
+ }
+
+ while (sg && reqlen) {
+ less = min(reqlen, sg_dma_len(sg) - skip_len);
+ nents += DIV_ROUND_UP(less, entlen);
+ reqlen -= less;
+ skip_len = 0;
+ sg = sg_next(sg);
+ }
+ return nents;
+}
+
+static inline void chcr_handle_ahash_resp(struct ahash_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+ int digestsize, updated_digestsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+
+ if (input == NULL)
+ goto out;
+ reqctx = ahash_request_ctx(req);
+ digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
+ if (reqctx->is_sg_map)
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ if (reqctx->dma_addr)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
+ reqctx->dma_len, DMA_TO_DEVICE);
+ reqctx->dma_addr = 0;
+ updated_digestsize = digestsize;
+ if (digestsize == SHA224_DIGEST_SIZE)
+ updated_digestsize = SHA256_DIGEST_SIZE;
+ else if (digestsize == SHA384_DIGEST_SIZE)
+ updated_digestsize = SHA512_DIGEST_SIZE;
+ if (reqctx->result == 1) {
+ reqctx->result = 0;
+ memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
+ digestsize);
+ } else {
+ memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
+ updated_digestsize);
+ }
+out:
+ req->base.complete(&req->base, err);
+
+ }
+
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+ if (reqctx->b0_dma)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
+ reqctx->b0_len, DMA_BIDIRECTIONAL);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+}
+ req->base.complete(&req->base, err);
+
+}
static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{
u8 temp[SHA512_DIGEST_SIZE];
@@ -151,29 +240,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_req_ctx ctx_req;
- unsigned int digestsize, updated_digestsize;
struct adapter *adap = padap(ctx->dev);
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- ctx_req.req.aead_req = aead_request_cast(req);
- ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
- dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
- ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
- if (ctx_req.ctx.reqctx->skb) {
- kfree_skb(ctx_req.ctx.reqctx->skb);
- ctx_req.ctx.reqctx->skb = NULL;
- }
- free_new_sg(ctx_req.ctx.reqctx->newdstsg);
- ctx_req.ctx.reqctx->newdstsg = NULL;
- if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
- chcr_verify_tag(ctx_req.req.aead_req, input,
- &err);
- ctx_req.ctx.reqctx->verify = VERIFY_HW;
- }
- ctx_req.req.aead_req->base.complete(req, err);
+ chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
@@ -182,60 +253,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
break;
case CRYPTO_ALG_TYPE_AHASH:
- ctx_req.req.ahash_req = ahash_request_cast(req);
- ctx_req.ctx.ahash_ctx =
- ahash_request_ctx(ctx_req.req.ahash_req);
- digestsize =
- crypto_ahash_digestsize(crypto_ahash_reqtfm(
- ctx_req.req.ahash_req));
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- if (ctx_req.ctx.ahash_ctx->skb) {
- kfree_skb(ctx_req.ctx.ahash_ctx->skb);
- ctx_req.ctx.ahash_ctx->skb = NULL;
- }
- if (ctx_req.ctx.ahash_ctx->result == 1) {
- ctx_req.ctx.ahash_ctx->result = 0;
- memcpy(ctx_req.req.ahash_req->result, input +
- sizeof(struct cpl_fw6_pld),
- digestsize);
- } else {
- memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
- sizeof(struct cpl_fw6_pld),
- updated_digestsize);
+ chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
}
- ctx_req.req.ahash_req->base.complete(req, err);
- break;
- }
atomic_inc(&adap->chcr_stats.complete);
return err;
}
-/*
- * calc_tx_flits_ofld - calculate # of flits for an offload packet
- * @skb: the packet
- * Returns the number of flits needed for the given offload packet.
- * These packets are already fully constructed and no additional headers
- * will be added.
- */
-static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
-{
- unsigned int flits, cnt;
-
- if (is_ofld_imm(skb))
- return DIV_ROUND_UP(skb->len, 8);
-
- flits = skb_transport_offset(skb) / 8; /* headers */
- cnt = skb_shinfo(skb)->nr_frags;
- if (skb_tail_pointer(skb) != skb_transport_header(skb))
- cnt++;
- return flits + sgl_len(cnt);
-}
-
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
+static void get_aes_decrypt_key(unsigned char *dec_key,
const unsigned char *key,
unsigned int keylength)
{
@@ -382,13 +406,19 @@ static inline int is_hmac(struct crypto_tfm *tfm)
return 0;
}
-static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg,
- struct phys_sge_parm *sg_param)
+static inline void dsgl_walk_init(struct dsgl_walk *walk,
+ struct cpl_rx_phys_dsgl *dsgl)
{
- struct phys_sge_pairs *to;
- unsigned int len = 0, left_size = sg_param->obsize;
- unsigned int nents = sg_param->nents, i, j = 0;
+ walk->dsgl = dsgl;
+ walk->nents = 0;
+ walk->to = (struct phys_sge_pairs *)(dsgl + 1);
+}
+
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+{
+ struct cpl_rx_phys_dsgl *phys_cpl;
+
+ phys_cpl = walk->dsgl;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -398,38 +428,171 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
CPL_RX_PHYS_DSGL_DCAID_V(0) |
- CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
+ CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
- phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
+ phys_cpl->rss_hdr_int.qid = htons(qid);
phys_cpl->rss_hdr_int.hash_val = 0;
- to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
- sizeof(struct cpl_rx_phys_dsgl));
- for (i = 0; nents && left_size; to++) {
- for (j = 0; j < 8 && nents && left_size; j++, nents--) {
- len = min(left_size, sg_dma_len(sg));
- to->len[j] = htons(len);
- to->addr[j] = cpu_to_be64(sg_dma_address(sg));
- left_size -= len;
+}
+
+static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
+ size_t size,
+ dma_addr_t *addr)
+{
+ int j;
+
+ if (!size)
+ return;
+ j = walk->nents;
+ walk->to->len[j % 8] = htons(size);
+ walk->to->addr[j % 8] = cpu_to_be64(*addr);
+ j++;
+ if ((j % 8) == 0)
+ walk->to++;
+ walk->nents = j;
+}
+
+static void dsgl_walk_add_sg(struct dsgl_walk *walk,
+ struct scatterlist *sg,
+ unsigned int slen,
+ unsigned int skip)
+{
+ int skip_len = 0;
+ unsigned int left_size = slen, len = 0;
+ unsigned int j = walk->nents;
+ int offset, ent_len;
+
+ if (!slen)
+ return;
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
}
}
+
+ while (left_size && sg) {
+ len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+ offset = 0;
+ while (len) {
+ ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
+ walk->to->len[j % 8] = htons(ent_len);
+ walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
+ offset + skip_len);
+ offset += ent_len;
+ len -= ent_len;
+ j++;
+ if ((j % 8) == 0)
+ walk->to++;
+ }
+ walk->last_sg = sg;
+ walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
+ skip_len) + skip_len;
+ left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
+ skip_len = 0;
+ sg = sg_next(sg);
+ }
+ walk->nents = j;
+}
+
+static inline void ulptx_walk_init(struct ulptx_walk *walk,
+ struct ulptx_sgl *ulp)
+{
+ walk->sgl = ulp;
+ walk->nents = 0;
+ walk->pair_idx = 0;
+ walk->pair = ulp->sge;
+ walk->last_sg = NULL;
+ walk->last_sg_len = 0;
}
-static inline int map_writesg_phys_cpl(struct device *dev,
- struct cpl_rx_phys_dsgl *phys_cpl,
+static inline void ulptx_walk_end(struct ulptx_walk *walk)
+{
+ walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE_V(walk->nents));
+}
+
+
+static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
+ size_t size,
+ dma_addr_t *addr)
+{
+ if (!size)
+ return;
+
+ if (walk->nents == 0) {
+ walk->sgl->len0 = cpu_to_be32(size);
+ walk->sgl->addr0 = cpu_to_be64(*addr);
+ } else {
+ walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+ walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
+ walk->pair_idx = !walk->pair_idx;
+ if (!walk->pair_idx)
+ walk->pair++;
+ }
+ walk->nents++;
+}
+
+static void ulptx_walk_add_sg(struct ulptx_walk *walk,
struct scatterlist *sg,
- struct phys_sge_parm *sg_param)
+ unsigned int len,
+ unsigned int skip)
{
- if (!sg || !sg_param->nents)
- return -EINVAL;
+ int small;
+ int skip_len = 0;
+ unsigned int sgmin;
- sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
- if (sg_param->nents == 0) {
- pr_err("CHCR : DMA mapping failed\n");
- return -EINVAL;
+ if (!len)
+ return;
+
+ while (sg && skip) {
+ if (sg_dma_len(sg) <= skip) {
+ skip -= sg_dma_len(sg);
+ skip_len = 0;
+ sg = sg_next(sg);
+ } else {
+ skip_len = skip;
+ skip = 0;
+ }
+ }
+ if (walk->nents == 0) {
+ small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
+ sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+ walk->sgl->len0 = cpu_to_be32(sgmin);
+ walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
+ walk->nents++;
+ len -= sgmin;
+ walk->last_sg = sg;
+ walk->last_sg_len = sgmin + skip_len;
+ skip_len += sgmin;
+ if (sg_dma_len(sg) == skip_len) {
+ sg = sg_next(sg);
+ skip_len = 0;
+ }
+ }
+
+ while (sg && len) {
+ small = min(sg_dma_len(sg) - skip_len, len);
+ sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
+ walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
+ walk->pair->addr[walk->pair_idx] =
+ cpu_to_be64(sg_dma_address(sg) + skip_len);
+ walk->pair_idx = !walk->pair_idx;
+ walk->nents++;
+ if (!walk->pair_idx)
+ walk->pair++;
+ len -= sgmin;
+ skip_len += sgmin;
+ walk->last_sg = sg;
+ walk->last_sg_len = skip_len;
+ if (sg_dma_len(sg) == skip_len) {
+ sg = sg_next(sg);
+ skip_len = 0;
+ }
}
- write_phys_cpl(phys_cpl, sg, sg_param);
- return 0;
}
static inline int get_aead_subtype(struct crypto_aead *aead)
@@ -449,45 +612,6 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
-static inline void write_buffer_to_skb(struct sk_buff *skb,
- unsigned int *frags,
- char *bfr,
- u8 bfr_len)
-{
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- get_page(virt_to_page(bfr));
- skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
- offset_in_page(bfr), bfr_len);
- (*frags)++;
-}
-
-
-static inline void
-write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
- struct scatterlist *sg, unsigned int count)
-{
- struct page *spage;
- unsigned int page_len;
-
- skb->len += count;
- skb->data_len += count;
- skb->truesize += count;
-
- while (count > 0) {
- if (!sg || (!(sg->length)))
- break;
- spage = sg_page(sg);
- get_page(spage);
- page_len = min(sg->length, count);
- skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
- (*frags)++;
- count -= page_len;
- sg = sg_next(sg);
- }
-}
-
static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
{
struct adapter *adap = netdev2adap(dev);
@@ -524,30 +648,46 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
struct scatterlist *dst,
unsigned int minsg,
unsigned int space,
- short int *sent,
- short int *dent)
+ unsigned int srcskip,
+ unsigned int dstskip)
{
int srclen = 0, dstlen = 0;
- int srcsg = minsg, dstsg = 0;
+ int srcsg = minsg, dstsg = minsg;
+ int offset = 0, less;
+
+ if (sg_dma_len(src) == srcskip) {
+ src = sg_next(src);
+ srcskip = 0;
+ }
- *sent = 0;
- *dent = 0;
- while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
+ if (sg_dma_len(dst) == dstskip) {
+ dst = sg_next(dst);
+ dstskip = 0;
+ }
+
+ while (src && dst &&
space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
- srclen += src->length;
+ srclen += (sg_dma_len(src) - srcskip);
srcsg++;
+ offset = 0;
while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
if (srclen <= dstlen)
break;
- dstlen += dst->length;
- dst = sg_next(dst);
+ less = min_t(unsigned int, sg_dma_len(dst) - offset -
+ dstskip, CHCR_DST_SG_SIZE);
+ dstlen += less;
+ offset += less;
+ if (offset == sg_dma_len(dst)) {
+ dst = sg_next(dst);
+ offset = 0;
+ }
dstsg++;
+ dstskip = 0;
}
src = sg_next(src);
+ srcskip = 0;
}
- *sent = srcsg - minsg;
- *dent = dstsg;
return min(srclen, dstlen);
}
@@ -576,47 +716,35 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
}
static inline void create_wreq(struct chcr_context *ctx,
struct chcr_wr *chcr_req,
- void *req, struct sk_buff *skb,
- int kctx_len, int hash_sz,
- int is_iv,
+ struct crypto_async_request *req,
+ unsigned int imm,
+ int hash_sz,
+ unsigned int len16,
unsigned int sc_len,
unsigned int lcb)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
- unsigned int immdatalen = 0, nr_frags = 0;
- if (is_ofld_imm(skb)) {
- immdatalen = skb->data_len;
- iv_loc = IV_IMMEDIATE;
- } else {
- nr_frags = skb_shinfo(skb)->nr_frags;
- }
- chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
chcr_req->wreq.pld_size_hash_size =
- htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
- FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
+ htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
chcr_req->wreq.len16_pkd =
- htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
- (calc_tx_flits_ofld(skb) * 8), 16)));
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
- is_iv ? iv_loc : IV_NOP, !!lcb,
- ctx->tx_qidx);
+ !!lcb, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
qid);
- chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(chcr_req->wreq)) >> 4)));
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
+ ((sizeof(chcr_req->wreq)) >> 4)));
- chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
- sizeof(chcr_req->key_ctx) +
- kctx_len + sc_len + immdatalen);
+ sizeof(chcr_req->key_ctx) + sc_len);
}
/**
@@ -629,47 +757,52 @@ static inline void create_wreq(struct chcr_context *ctx,
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
+ struct ulptx_sgl *ulptx;
struct chcr_blkcipher_req_ctx *reqctx =
ablkcipher_request_ctx(wrparam->req);
- struct phys_sge_parm sg_param;
- unsigned int frags = 0, transhdr_len, phys_dsgl;
+ unsigned int temp = 0, transhdr_len, dst_size;
int error;
- unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
+ int nents;
+ unsigned int kctx_len;
gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
GFP_KERNEL : GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
-
- phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
+ struct adapter *adap = padap(c_ctx(tfm)->dev);
+ nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
+ reqctx->dst_ofst);
+ dst_size = get_space_for_phys_dsgl(nents + 1);
kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
- transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
+ CHCR_SRC_SG_SIZE, reqctx->src_ofst);
+ temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
+ * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
- chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes);
+ chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
- FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1);
+ 0, 0, IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
- 0, 1, phys_dsgl);
+ 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if ((reqctx->op == CHCR_DECRYPT_OP) &&
@@ -694,26 +827,18 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
}
}
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = wrparam->bytes;
- sg_param.qid = wrparam->qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
- goto map_fail1;
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
+ chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
- skb_set_transport_header(skb, transhdr_len);
- write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
- write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
- sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
+ +(reqctx->imm ? (IV + wrparam->bytes) : 0);
+ create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
+ transhdr_len, temp,
ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
reqctx->skb = skb;
- skb_get(skb);
return skb;
-map_fail1:
- kfree_skb(skb);
err:
return ERR_PTR(error);
}
@@ -738,8 +863,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -757,8 +881,7 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned int ck_size, context_size;
u16 alignment = 0;
int err;
@@ -790,8 +913,7 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned int ck_size, context_size;
u16 alignment = 0;
int err;
@@ -822,8 +944,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned int ck_size, context_size;
u16 alignment = 0;
int err;
@@ -890,25 +1011,28 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct crypto_cipher *cipher;
int ret, i;
u8 *key;
unsigned int keylen;
+ int round = reqctx->last_req_len / AES_BLOCK_SIZE;
+ int round8 = round / 8;
cipher = ablkctx->aes_generic;
- memcpy(iv, req->info, AES_BLOCK_SIZE);
+ memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
keylen = ablkctx->enckey_len / 2;
key = ablkctx->key + keylen;
ret = crypto_cipher_setkey(cipher, key, keylen);
if (ret)
goto out;
+ /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
+ for (i = 0; i < round8; i++)
+ gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
- crypto_cipher_encrypt_one(cipher, iv, iv);
- for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
+ for (i = 0; i < (round % 8); i++)
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
crypto_cipher_decrypt_one(cipher, iv, iv);
@@ -982,65 +1106,60 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct sk_buff *skb;
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct cipher_wr_param wrparam;
int bytes;
- dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
- DMA_FROM_DEVICE);
-
- if (reqctx->skb) {
- kfree_skb(reqctx->skb);
- reqctx->skb = NULL;
- }
if (err)
- goto complete;
-
+ goto unmap;
if (req->nbytes == reqctx->processed) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
err = chcr_final_cipher_iv(req, fw6_pld, req->info);
goto complete;
}
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
err = -EBUSY;
- goto complete;
+ goto unmap;
}
}
- wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src,
- reqctx->processed);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg,
- reqctx->processed);
- if (!wrparam.srcsg || !reqctx->dst) {
- pr_err("Input sg list length less that nbytes\n");
- err = -EINVAL;
- goto complete;
- }
- bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
- SPACE_LEFT(ablkctx->enckey_len),
- &wrparam.snent, &reqctx->dst_nents);
+ if (!reqctx->imm) {
+ bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
+ SPACE_LEFT(ablkctx->enckey_len),
+ reqctx->src_ofst, reqctx->dst_ofst);
if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed;
else
bytes = ROUND_16(bytes);
+ } else {
+ /*CTR mode counter overfloa*/
+ bytes = req->nbytes - reqctx->processed;
+ }
+ dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
+ dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
if (err)
- goto complete;
+ goto unmap;
if (unlikely(bytes == 0)) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
- wrparam.srcsg,
- reqctx->dst,
- req->nbytes - reqctx->processed,
- reqctx->iv,
+ req->src,
+ req->dst,
+ req->nbytes,
+ req->info,
reqctx->op);
goto complete;
}
@@ -1048,23 +1167,24 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
CRYPTO_ALG_SUB_TYPE_CTR)
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
- reqctx->processed += bytes;
- wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
+ wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
wrparam.req = req;
wrparam.bytes = bytes;
skb = create_cipher_wr(&wrparam);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
err = PTR_ERR(skb);
- goto complete;
+ goto unmap;
}
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
+ reqctx->last_req_len = bytes;
+ reqctx->processed += bytes;
return 0;
+unmap:
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
req->base.complete(&req->base, err);
return err;
}
@@ -1077,12 +1197,10 @@ static int process_cipher(struct ablkcipher_request *req,
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
struct cipher_wr_param wrparam;
- int bytes, nents, err = -EINVAL;
+ int bytes, err = -EINVAL;
- reqctx->newdstsg = NULL;
reqctx->processed = 0;
if (!req->info)
goto error;
@@ -1093,25 +1211,41 @@ static int process_cipher(struct ablkcipher_request *req,
ablkctx->enckey_len, req->nbytes, ivsize);
goto error;
}
- wrparam.srcsg = req->src;
- if (is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return PTR_ERR(reqctx->newdstsg);
- reqctx->dstsg = reqctx->newdstsg;
+ chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+ if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+ AES_MIN_KEY_SIZE +
+ sizeof(struct cpl_rx_phys_dsgl) +
+ /*Min dsgl size*/
+ 32))) {
+ /* Can be sent as Imm*/
+ unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
+
+ dnents = sg_nents_xlen(req->dst, req->nbytes,
+ CHCR_DST_SG_SIZE, 0);
+ dnents += 1; // IV
+ phys_dsgl = get_space_for_phys_dsgl(dnents);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
+ reqctx->imm = (transhdr_len + IV + req->nbytes) <=
+ SGE_MAX_WR_LEN;
+ bytes = IV + req->nbytes;
+
} else {
- reqctx->dstsg = req->dst;
+ reqctx->imm = 0;
}
- bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
- SPACE_LEFT(ablkctx->enckey_len),
- &wrparam.snent,
- &reqctx->dst_nents);
+
+ if (!reqctx->imm) {
+ bytes = chcr_sg_ent_in_wr(req->src, req->dst,
+ MIN_CIPHER_SG,
+ SPACE_LEFT(ablkctx->enckey_len),
+ 0, 0);
if ((bytes + reqctx->processed) >= req->nbytes)
bytes = req->nbytes - reqctx->processed;
else
bytes = ROUND_16(bytes);
- if (unlikely(bytes > req->nbytes))
+ } else {
bytes = req->nbytes;
+ }
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->info, bytes);
@@ -1128,9 +1262,11 @@ static int process_cipher(struct ablkcipher_request *req,
} else {
- memcpy(reqctx->iv, req->info, ivsize);
+ memcpy(reqctx->iv, req->info, IV);
}
if (unlikely(bytes == 0)) {
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
+ req);
err = chcr_cipher_fallback(ablkctx->sw_cipher,
req->base.flags,
req->src,
@@ -1140,45 +1276,48 @@ static int process_cipher(struct ablkcipher_request *req,
op_type);
goto error;
}
- reqctx->processed = bytes;
- reqctx->dst = reqctx->dstsg;
reqctx->op = op_type;
+ reqctx->srcsg = req->src;
+ reqctx->dstsg = req->dst;
+ reqctx->src_ofst = 0;
+ reqctx->dst_ofst = 0;
wrparam.qid = qid;
wrparam.req = req;
wrparam.bytes = bytes;
*skb = create_cipher_wr(&wrparam);
if (IS_ERR(*skb)) {
err = PTR_ERR(*skb);
- goto error;
+ goto unmap;
}
+ reqctx->processed = bytes;
+ reqctx->last_req_len = bytes;
return 0;
+unmap:
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
error:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
return err;
}
static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct sk_buff *skb = NULL;
int err;
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
- CHCR_ENCRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_ENCRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1186,23 +1325,22 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
struct sk_buff *skb = NULL;
int err;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ c_ctx(tfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
- CHCR_DECRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_DECRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1350,17 +1488,19 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
struct sk_buff *skb = NULL;
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
struct chcr_wr *chcr_req;
- unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
+ struct ulptx_sgl *ulptx;
+ unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = 0;
+ unsigned int kctx_len = 0, temp = 0;
u8 hash_size_in_response = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
+ struct adapter *adap = padap(h_ctx(tfm)->dev);
+ int error = 0;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
kctx_len = param->alg_prm.result_size + iopad_alignment;
@@ -1372,15 +1512,22 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
+ SGE_MAX_WR_LEN;
+ nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
+ nents += param->bfr_len ? 1 : 0;
+ transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
+ param->sg_len), 16) * 16) :
+ (sgl_len(nents) * 8);
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb)
- return skb;
-
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+ return ERR_PTR(-ENOMEM);
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 0);
+ FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1409,37 +1556,52 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
((kctx_len +
sizeof(chcr_req->key_ctx)) >> 4));
chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
-
- skb_set_transport_header(skb, transhdr_len);
- if (param->bfr_len != 0)
- write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
- param->bfr_len);
- if (param->sg_len != 0)
- write_sg_to_skb(skb, &frags, req->src, param->sg_len);
+ ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
+ DUMMY_BYTES);
+ if (param->bfr_len != 0) {
+ req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
+ req_ctx->reqbfr, param->bfr_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
+ req_ctx->dma_addr)) {
+ error = -ENOMEM;
+ goto err;
+ }
+ req_ctx->dma_len = param->bfr_len;
+ } else {
+ req_ctx->dma_addr = 0;
+ }
+ chcr_add_hash_src_ent(req, ulptx, param);
+ /* Request upto max wr size */
+ temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
+ + param->bfr_len) : 0);
atomic_inc(&adap->chcr_stats.digest_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
- hash_size_in_response, 0, DUMMY_BYTES, 0);
+ create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
+ hash_size_in_response, transhdr_len,
+ temp, 0);
req_ctx->skb = skb;
- skb_get(skb);
return skb;
+err:
+ kfree_skb(skb);
+ return ERR_PTR(error);
}
static int chcr_ahash_update(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
struct hash_wr_param params;
+ int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1453,7 +1615,9 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->reqlen += nbytes;
return 0;
}
-
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
+ return -ENOMEM;
params.opad_needed = 0;
params.more = 1;
params.last = 0;
@@ -1464,25 +1628,27 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
skb = create_hash_wr(req, &params);
- if (!skb)
- return -ENOMEM;
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
if (remainder) {
- u8 *temp;
/* Swap buffers */
- temp = req_ctx->reqbfr;
- req_ctx->reqbfr = req_ctx->skbfr;
- req_ctx->skbfr = temp;
+ swap(req_ctx->reqbfr, req_ctx->skbfr);
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
req_ctx->reqbfr, remainder, req->nbytes -
remainder);
}
req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ return error;
}
static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
@@ -1499,13 +1665,12 @@ static int chcr_ahash_final(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct hash_wr_param params;
struct sk_buff *skb;
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (is_hmac(crypto_ahash_tfm(rtfm)))
params.opad_needed = 1;
else
@@ -1528,11 +1693,11 @@ static int chcr_ahash_final(struct ahash_request *req)
params.more = 0;
}
skb = create_hash_wr(req, &params);
- if (!skb)
- return -ENOMEM;
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -1541,17 +1706,17 @@ static int chcr_ahash_finup(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
+ int error;
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1577,34 +1742,41 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
-
- skb = create_hash_wr(req, &params);
- if (!skb)
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
return -ENOMEM;
+ skb = create_hash_wr(req, &params);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ return error;
}
static int chcr_ahash_digest(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
u8 bs;
+ int error;
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx))) {
+ h_ctx(rtfm)->tx_qidx))) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
@@ -1613,6 +1785,9 @@ static int chcr_ahash_digest(struct ahash_request *req)
params.opad_needed = 1;
else
params.opad_needed = 0;
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+ if (error)
+ return -ENOMEM;
params.last = 0;
params.more = 0;
@@ -1630,13 +1805,17 @@ static int chcr_ahash_digest(struct ahash_request *req)
}
skb = create_hash_wr(req, &params);
- if (!skb)
- return -ENOMEM;
-
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto unmap;
+ }
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+ return error;
}
static int chcr_ahash_export(struct ahash_request *areq, void *out)
@@ -1646,6 +1825,8 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
+ state->is_sg_map = 0;
+ state->result = 0;
memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
@@ -1661,6 +1842,8 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
req_ctx->data_len = state->data_len;
req_ctx->reqbfr = req_ctx->bfr1;
req_ctx->skbfr = req_ctx->bfr2;
+ req_ctx->is_sg_map = 0;
+ req_ctx->result = 0;
memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
@@ -1670,8 +1853,7 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
unsigned int digestsize = crypto_ahash_digestsize(tfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
@@ -1724,8 +1906,7 @@ out:
static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
unsigned int key_len)
{
- struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+ struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
unsigned short context_size = 0;
int err;
@@ -1764,6 +1945,7 @@ static int chcr_sha_init(struct ahash_request *areq)
req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL;
req_ctx->result = 0;
+ req_ctx->is_sg_map = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
return 0;
}
@@ -1779,8 +1961,7 @@ static int chcr_hmac_init(struct ahash_request *areq)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
- struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
+ struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
unsigned int digestsize = crypto_ahash_digestsize(rtfm);
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
@@ -1826,86 +2007,48 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
}
}
-static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
-{
- int nents = 0;
- int ret = 0;
-
- while (sgl) {
- if (sgl->length > CHCR_SG_SIZE)
- ret = 1;
- nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
- sgl = sg_next(sgl);
- }
- *newents = nents;
- return ret;
-}
-
-static inline void free_new_sg(struct scatterlist *sgl)
+static int chcr_aead_common_init(struct aead_request *req,
+ unsigned short op_type)
{
- kfree(sgl);
-}
-
-static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
- unsigned int nents)
-{
- struct scatterlist *newsg, *sg;
- int i, len, processed = 0;
- struct page *spage;
- int offset;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int error = -EINVAL;
+ unsigned int dst_size;
+ unsigned int authsize = crypto_aead_authsize(tfm);
- newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
- if (!newsg)
- return ERR_PTR(-ENOMEM);
- sg = newsg;
- sg_init_table(sg, nents);
- offset = sgl->offset;
- spage = sg_page(sgl);
- for (i = 0; i < nents; i++) {
- len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
- sg_set_page(sg, spage, len, offset);
- processed += len;
- offset += len;
- if (offset >= PAGE_SIZE) {
- offset = offset % PAGE_SIZE;
- spage++;
- }
- if (processed == sgl->length) {
- processed = 0;
- sgl = sg_next(sgl);
- if (!sgl)
- break;
- spage = sg_page(sgl);
- offset = sgl->offset;
- }
- sg = sg_next(sg);
+ dst_size = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+ if (op_type && req->cryptlen < authsize)
+ goto err;
+ error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
+ if (error) {
+ error = -ENOMEM;
+ goto err;
}
- return newsg;
+ reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
+ CHCR_SRC_SG_SIZE, 0);
+ reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
+ CHCR_SRC_SG_SIZE, req->assoclen);
+ return 0;
+err:
+ return error;
}
-static int chcr_copy_assoc(struct aead_request *req,
- struct chcr_aead_ctx *ctx)
-{
- SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
-
- skcipher_request_set_tfm(skreq, ctx->null);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
- NULL);
-
- return crypto_skcipher_encrypt(skreq);
-}
-static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
+static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
int aadmax, int wrlen,
unsigned short op_type)
{
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
+ dst_nents > MAX_DSGL_ENT ||
(req->assoclen > aadmax) ||
- (src_nent > MAX_SKB_FRAGS) ||
- (wrlen > MAX_WR_SIZE))
+ (wrlen > SGE_MAX_WR_LEN))
return 1;
return 0;
}
@@ -1913,8 +2056,7 @@ static int chcr_aead_need_fallback(struct aead_request *req, int src_nent,
static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, aeadctx->sw_cipher);
@@ -1933,96 +2075,75 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct phys_sge_parm sg_param;
- struct scatterlist *src;
- unsigned int frags = 0, transhdr_len;
- unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
- unsigned int kctx_len = 0, nents;
- unsigned short stop_offset = 0;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+ unsigned int dst_size = 0, temp;
+ unsigned int kctx_len = 0, dnents;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL, src_nent;
+ int error = -EINVAL;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
- reqctx->newdstsg = NULL;
- dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
- if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
- goto err;
-
- if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
- goto err;
- src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
- if (src_nent < 0)
- goto err;
- src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+ if (req->cryptlen == 0)
+ return NULL;
- if (req->src != req->dst) {
- error = chcr_copy_assoc(req, aeadctx);
- if (error)
- return ERR_PTR(error);
- }
- if (dst_size && is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return ERR_CAST(reqctx->newdstsg);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- reqctx->newdstsg, req->assoclen);
- } else {
- if (req->src == req->dst)
- reqctx->dst = src;
- else
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- req->dst, req->assoclen);
- }
+ reqctx->b0_dma = 0;
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
null = 1;
assoclen = 0;
}
- reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
- (op_type ? -authsize : authsize));
- if (reqctx->dst_nents < 0) {
- pr_err("AUTHENC:Invalid Destination sg entries\n");
- error = -EINVAL;
- goto err;
+ dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
+ authsize);
+ error = chcr_aead_common_init(req, op_type);
+ if (error)
+ return ERR_PTR(error);
+ if (dst_size) {
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+ req->assoclen);
+ dnents += MIN_AUTH_SG; // For IV
+ } else {
+ dnents = 0;
}
- dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+
+ dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- if (chcr_aead_need_fallback(req, src_nent + MIN_AUTH_SG,
- T6_MAX_AAD_SIZE,
- transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
- op_type)) {
+ reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+ SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
+ * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+ + MIN_GCM_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+ transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- /* LLD is going to write the sge hdr. */
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
- /* Write WR */
chcr_req = __skb_put_zero(skb, transhdr_len);
- stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
/*
* Input order is AAD,IV and Payload. where IV should be included as
@@ -2030,24 +2151,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
* to the hardware spec
*/
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2,
- (ivsize ? (assoclen + 1) : 0));
- chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+ FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
+ assoclen + 1);
+ chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
- assoclen + ivsize + 1,
- (stop_offset & 0x1F0) >> 4);
+ assoclen + IV + 1,
+ (temp & 0x1F0) >> 4);
chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
- stop_offset & 0xF,
- null ? 0 : assoclen + ivsize + 1,
- stop_offset, stop_offset);
+ temp & 0xF,
+ null ? 0 : assoclen + IV + 1,
+ temp, temp);
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_CBC,
actx->auth_mode, aeadctx->hmac_ctrl,
- ivsize >> 1);
+ IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 1, dst_size);
+ 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
if (op_type == CHCR_ENCRYPT_OP)
@@ -2060,41 +2181,312 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
4), actx->h_iopad, kctx_len -
(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
-
+ memcpy(reqctx->iv, req->iv, IV);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
- sg_param.qid = qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
- goto dstmap_fail;
-
- skb_set_transport_header(skb, transhdr_len);
-
- if (assoclen) {
- /* AAD buffer in */
- write_sg_to_skb(skb, &frags, req->src, assoclen);
-
- }
- write_buffer_to_skb(skb, &frags, req->iv, ivsize);
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
- sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+ kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, 0);
reqctx->skb = skb;
- skb_get(skb);
+ reqctx->op = op_type;
return skb;
-dstmap_fail:
- /* ivmap_fail: */
- kfree_skb(skb);
err:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
+
return ERR_PTR(error);
}
+static int chcr_aead_dma_map(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
+{
+ int error;
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int dst_size;
+
+ dst_size = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ if (!req->cryptlen || !dst_size)
+ return 0;
+ reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, reqctx->iv_dma))
+ return -ENOMEM;
+
+ if (req->src == req->dst) {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ if (!error)
+ goto err;
+ } else {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ goto err;
+ error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ if (!error) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+}
+
+static void chcr_aead_dma_unmap(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int dst_size;
+
+ dst_size = req->assoclen + req->cryptlen + (op_type ?
+ -authsize : authsize);
+ if (!req->cryptlen || !dst_size)
+ return;
+
+ dma_unmap_single(dev, reqctx->iv_dma, IV,
+ DMA_BIDIRECTIONAL);
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ }
+}
+
+static inline void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ if (reqctx->b0_dma) {
+ memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
+ buf += reqctx->b0_len;
+ }
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, assoclen, 0);
+ buf += assoclen;
+ memcpy(buf, reqctx->iv, IV);
+ buf += IV;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, req->cryptlen, req->assoclen);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (reqctx->b0_dma)
+ ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
+ &reqctx->b0_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
+ ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
+ req->assoclen);
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+static inline void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct dsgl_walk dsgl_walk;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ u32 temp;
+
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+ if (reqctx->b0_dma)
+ dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
+ dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+ temp = req->cryptlen + (op_type ? -authsize : authsize);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+ dsgl_walk_end(&dsgl_walk, qid);
+}
+
+static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ memcpy(buf, reqctx->iv, IV);
+ buf += IV;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, wrparam->bytes, reqctx->processed);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+ ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
+ reqctx->src_ofst);
+ reqctx->srcsg = ulp_walk.last_sg;
+ reqctx->src_ofst = ulp_walk.last_sg_len;
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid)
+{
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct dsgl_walk dsgl_walk;
+
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+ dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+ dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
+ reqctx->dst_ofst);
+ reqctx->dstsg = dsgl_walk.last_sg;
+ reqctx->dst_ofst = dsgl_walk.last_sg_len;
+
+ dsgl_walk_end(&dsgl_walk, qid);
+}
+
+static inline void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param)
+{
+ struct ulptx_walk ulp_walk;
+ struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
+
+ if (reqctx->imm) {
+ u8 *buf = (u8 *)ulptx;
+
+ if (param->bfr_len) {
+ memcpy(buf, reqctx->reqbfr, param->bfr_len);
+ buf += param->bfr_len;
+ }
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+ buf, param->sg_len, 0);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (param->bfr_len)
+ ulptx_walk_add_page(&ulp_walk, param->bfr_len,
+ &reqctx->dma_addr);
+ ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
+ 0);
+// reqctx->srcsg = ulp_walk.last_sg;
+// reqctx->src_ofst = ulp_walk.last_sg_len;
+ ulptx_walk_end(&ulp_walk);
+ }
+}
+
+
+static inline int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ int error = 0;
+
+ if (!req->nbytes)
+ return 0;
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ return error;
+ req_ctx->is_sg_map = 1;
+ return 0;
+}
+
+static inline void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req)
+{
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return;
+
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ req_ctx->is_sg_map = 0;
+
+}
+
+
+static int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req)
+{
+ int error;
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+ reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, reqctx->iv_dma))
+ return -ENOMEM;
+
+ if (req->src == req->dst) {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ if (!error)
+ goto err;
+ } else {
+ error = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ if (!error)
+ goto err;
+ error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ if (!error) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+}
+static void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req)
+{
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+
+ dma_unmap_single(dev, reqctx->iv_dma, IV,
+ DMA_BIDIRECTIONAL);
+ if (req->src == req->dst) {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, req->src, sg_nents(req->src),
+ DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_FROM_DEVICE);
+ }
+}
+
static int set_msg_len(u8 *block, unsigned int msglen, int csize)
{
__be32 data;
@@ -2179,15 +2571,13 @@ static int ccm_format_packet(struct aead_request *req,
static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int dst_size,
struct aead_request *req,
- unsigned short op_type,
- struct chcr_context *chcrctx)
+ unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
- unsigned int ivsize = AES_BLOCK_SIZE;
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = chcrctx->dev->rx_channel_id;
+ unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
@@ -2200,7 +2590,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
auth_offset = req->cryptlen ?
- (assoclen + ivsize + 1 + ccm_xtra) : 0;
+ (assoclen + IV + 1 + ccm_xtra) : 0;
if (op_type == CHCR_DECRYPT_OP) {
if (crypto_aead_authsize(tfm) != req->cryptlen)
tag_offset = crypto_aead_authsize(tfm);
@@ -2210,14 +2600,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, (ivsize ? (assoclen + 1) : 0) +
- ccm_xtra);
+ 2, assoclen + 1 + ccm_xtra);
sec_cpl->pldlen =
- htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+ htonl(assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */
sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
1, assoclen + ccm_xtra, assoclen
- + ivsize + 1 + ccm_xtra, 0);
+ + IV + 1 + ccm_xtra, 0);
sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
auth_offset, tag_offset,
@@ -2226,10 +2615,10 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
cipher_mode, mac_mode,
- aeadctx->hmac_ctrl, ivsize >> 1);
+ aeadctx->hmac_ctrl, IV >> 1);
sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
- 1, dst_size);
+ 0, dst_size);
}
int aead_ccm_validate_input(unsigned short op_type,
@@ -2249,131 +2638,83 @@ int aead_ccm_validate_input(unsigned short op_type,
return -EINVAL;
}
}
- if (aeadctx->enckey_len == 0) {
- pr_err("CCM: Encryption key not set\n");
- return -EINVAL;
- }
return 0;
}
-unsigned int fill_aead_req_fields(struct sk_buff *skb,
- struct aead_request *req,
- struct scatterlist *src,
- unsigned int ivsize,
- struct chcr_aead_ctx *aeadctx)
-{
- unsigned int frags = 0;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- /* b0 and aad length(if available) */
-
- write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
- (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
- if (req->assoclen) {
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
- write_sg_to_skb(skb, &frags, req->src,
- req->assoclen - 8);
- else
- write_sg_to_skb(skb, &frags, req->src, req->assoclen);
- }
- write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
- if (req->cryptlen)
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
-
- return frags;
-}
-
static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
unsigned short qid,
int size,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct phys_sge_parm sg_param;
- struct scatterlist *src;
- unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
- unsigned int dst_size = 0, kctx_len, nents;
- unsigned int sub_type;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+ unsigned int dst_size = 0, kctx_len, dnents, temp;
+ unsigned int sub_type, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL, src_nent;
+ int error = -EINVAL;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
- dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
+ reqctx->b0_dma = 0;
+ sub_type = get_aead_subtype(tfm);
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen -= 8;
+ dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
authsize);
- reqctx->newdstsg = NULL;
- if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
- goto err;
- src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
- if (src_nent < 0)
- goto err;
+ error = chcr_aead_common_init(req, op_type);
+ if (error)
+ return ERR_PTR(error);
- sub_type = get_aead_subtype(tfm);
- src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
- if (req->src != req->dst) {
- error = chcr_copy_assoc(req, aeadctx);
- if (error) {
- pr_err("AAD copy to destination buffer fails\n");
- return ERR_PTR(error);
- }
- }
- if (dst_size && is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return ERR_CAST(reqctx->newdstsg);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- reqctx->newdstsg, req->assoclen);
- } else {
- if (req->src == req->dst)
- reqctx->dst = src;
- else
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- req->dst, req->assoclen);
- }
- reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
- (op_type ? -authsize : authsize));
- if (reqctx->dst_nents < 0) {
- pr_err("CCM:Invalid Destination sg entries\n");
- error = -EINVAL;
- goto err;
- }
+
+ reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error)
goto err;
-
- dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ if (dst_size) {
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen
+ + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_CCM_SG; // For IV and B0
+ } else {
+ dnents = 0;
+ }
+ dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- if (chcr_aead_need_fallback(req, src_nent + MIN_CCM_SG,
- T6_MAX_AAD_SIZE - 18,
- transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
- op_type)) {
+ reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+ reqctx->b0_len) <= SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
+ reqctx->b0_len), 16) * 16) :
+ (sgl_len(reqctx->src_nents + reqctx->aad_nents +
+ MIN_CCM_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
+ reqctx->b0_len, transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
-
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
- chcr_req = __skb_put_zero(skb, transhdr_len);
+ chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
- fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
@@ -2381,31 +2722,37 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
16), aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
error = ccm_format_packet(req, aeadctx, sub_type, op_type);
if (error)
goto dstmap_fail;
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
- sg_param.qid = qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
+ reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
+ &reqctx->scratch_pad, reqctx->b0_len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
+ reqctx->b0_dma)) {
+ error = -ENOMEM;
goto dstmap_fail;
+ }
+
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
- skb_set_transport_header(skb, transhdr_len);
- frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
- sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+ kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+ reqctx->b0_len) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
+ transhdr_len, temp, 0);
reqctx->skb = skb;
- skb_get(skb);
+ reqctx->op = op_type;
+
return skb;
dstmap_fail:
kfree_skb(skb);
err:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
return ERR_PTR(error);
}
@@ -2415,115 +2762,84 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
unsigned short op_type)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct phys_sge_parm sg_param;
- struct scatterlist *src;
- unsigned int frags = 0, transhdr_len;
- unsigned int ivsize = AES_BLOCK_SIZE;
- unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
- unsigned char tag_offset = 0;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len, dnents = 0;
+ unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
- int error = -EINVAL, src_nent;
+ int error = -EINVAL;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
- struct adapter *adap = padap(ctx->dev);
-
- reqctx->newdstsg = NULL;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
- /* validate key size */
- if (aeadctx->enckey_len == 0)
- goto err;
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
- if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
- goto err;
- src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
- if (src_nent < 0)
- goto err;
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ assoclen = req->assoclen - 8;
- src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
- if (req->src != req->dst) {
- error = chcr_copy_assoc(req, aeadctx);
+ reqctx->b0_dma = 0;
+ dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize);
+ error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
- }
-
- if (dst_size && is_newsg(req->dst, &nents)) {
- reqctx->newdstsg = alloc_new_sg(req->dst, nents);
- if (IS_ERR(reqctx->newdstsg))
- return ERR_CAST(reqctx->newdstsg);
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- reqctx->newdstsg, assoclen);
+ if (dst_size) {
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst,
+ req->cryptlen + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_GCM_SG; // For IV
} else {
- if (req->src == req->dst)
- reqctx->dst = src;
- else
- reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
- req->dst, assoclen);
- }
-
- reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
- (op_type ? -authsize : authsize));
- if (reqctx->dst_nents < 0) {
- pr_err("GCM:Invalid Destination sg entries\n");
- error = -EINVAL;
- goto err;
+ dnents = 0;
}
-
-
- dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- if (chcr_aead_need_fallback(req, src_nent + MIN_GCM_SG,
- T6_MAX_AAD_SIZE,
- transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
- op_type)) {
+ reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+ SGE_MAX_WR_LEN;
+ temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
+ req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
+ reqctx->aad_nents + MIN_GCM_SG) * 8);
+ transhdr_len += temp;
+ transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+ transhdr_len, op_type)) {
atomic_inc(&adap->chcr_stats.fallback);
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
+ op_type);
return ERR_PTR(chcr_aead_fallback(req, op_type));
}
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ skb = alloc_skb(SGE_MAX_WR_LEN, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- /* NIC driver is going to write the sge hdr. */
- skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-
chcr_req = __skb_put_zero(skb, transhdr_len);
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
- assoclen = req->assoclen - 8;
-
- tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ //Offset of tag from end
+ temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- ctx->dev->rx_channel_id, 2, (ivsize ?
- (assoclen + 1) : 0));
+ a_ctx(tfm)->dev->rx_channel_id, 2,
+ (assoclen + 1));
chcr_req->sec_cpl.pldlen =
- htonl(assoclen + ivsize + req->cryptlen);
+ htonl(assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
- assoclen + ivsize + 1, 0);
+ assoclen + IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
- tag_offset, tag_offset);
+ FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+ temp, temp);
chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
CHCR_SCMD_AUTH_MODE_GHASH,
- aeadctx->hmac_ctrl, ivsize >> 1);
+ aeadctx->hmac_ctrl, IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 1, dst_size);
+ 0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
@@ -2534,39 +2850,28 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
if (get_aead_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
memcpy(reqctx->iv, aeadctx->salt, 4);
- memcpy(reqctx->iv + 4, req->iv, 8);
+ memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
} else {
- memcpy(reqctx->iv, req->iv, 12);
+ memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
}
*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- sg_param.nents = reqctx->dst_nents;
- sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
- sg_param.qid = qid;
- error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
- reqctx->dst, &sg_param);
- if (error)
- goto dstmap_fail;
+ ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- skb_set_transport_header(skb, transhdr_len);
- write_sg_to_skb(skb, &frags, req->src, assoclen);
- write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
- write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
+ chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
atomic_inc(&adap->chcr_stats.aead_rqst);
- create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
- sizeof(struct cpl_rx_phys_dsgl) + dst_size,
- reqctx->verify);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+ kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, reqctx->verify);
reqctx->skb = skb;
- skb_get(skb);
+ reqctx->op = op_type;
return skb;
-dstmap_fail:
- /* ivmap_fail: */
- kfree_skb(skb);
err:
- free_new_sg(reqctx->newdstsg);
- reqctx->newdstsg = NULL;
+ chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
return ERR_PTR(error);
}
@@ -2574,8 +2879,7 @@ err:
static int chcr_aead_cra_init(struct crypto_aead *tfm)
{
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct aead_alg *alg = crypto_aead_alg(tfm);
aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
@@ -2586,25 +2890,20 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
sizeof(struct aead_request) +
crypto_aead_reqsize(aeadctx->sw_cipher)));
- aeadctx->null = crypto_get_default_null_skcipher();
- if (IS_ERR(aeadctx->null))
- return PTR_ERR(aeadctx->null);
- return chcr_device_init(ctx);
+ return chcr_device_init(a_ctx(tfm));
}
static void chcr_aead_cra_exit(struct crypto_aead *tfm)
{
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
- crypto_put_default_null_skcipher();
crypto_free_aead(aeadctx->sw_cipher);
}
static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
aeadctx->mayverify = VERIFY_HW;
@@ -2613,7 +2912,7 @@ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
u32 maxauth = crypto_aead_maxauthsize(tfm);
/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
@@ -2651,7 +2950,7 @@ static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) {
case ICV_4:
@@ -2691,7 +2990,7 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) {
case ICV_8:
@@ -2717,7 +3016,7 @@ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
switch (authsize) {
case ICV_4:
@@ -2760,8 +3059,7 @@ static int chcr_ccm_common_setkey(struct crypto_aead *aead,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
unsigned char ck_size, mk_size;
int key_ctx_size = 0;
@@ -2794,8 +3092,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
int error;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
@@ -2813,8 +3110,7 @@ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
int error;
if (keylen < 3) {
@@ -2840,8 +3136,7 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(aead);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
struct crypto_cipher *cipher;
unsigned int ck_size;
@@ -2913,8 +3208,7 @@ out:
static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(authenc);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
@@ -3034,8 +3328,7 @@ out:
static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
- struct chcr_context *ctx = crypto_aead_ctx(authenc);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct crypto_authenc_keys keys;
int err;
@@ -3107,7 +3400,7 @@ static int chcr_aead_encrypt(struct aead_request *req)
static int chcr_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int size;
@@ -3140,30 +3433,29 @@ static int chcr_aead_op(struct aead_request *req,
create_wr_t create_wr_fn)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct chcr_context *ctx = crypto_aead_ctx(tfm);
struct uld_ctx *u_ctx;
struct sk_buff *skb;
- if (!ctx->dev) {
+ if (!a_ctx(tfm)->dev) {
pr_err("chcr : %s : No crypto device.\n", __func__);
return -ENXIO;
}
- u_ctx = ULD_CTX(ctx);
+ u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- ctx->tx_qidx)) {
+ a_ctx(tfm)->tx_qidx)) {
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
}
/* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], size,
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
op_type);
if (IS_ERR(skb) || !skb)
return PTR_ERR(skb);
skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
}
@@ -3385,7 +3677,7 @@ static struct chcr_alg_template driver_algs[] = {
sizeof(struct chcr_aead_ctx) +
sizeof(struct chcr_gcm_ctx),
},
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = GHASH_DIGEST_SIZE,
.setkey = chcr_gcm_setkey,
.setauthsize = chcr_gcm_setauthsize,
@@ -3405,7 +3697,7 @@ static struct chcr_alg_template driver_algs[] = {
sizeof(struct chcr_gcm_ctx),
},
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = GHASH_DIGEST_SIZE,
.setkey = chcr_gcm_setkey,
.setauthsize = chcr_4106_4309_setauthsize,
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 583008de51a3..96c9335ee728 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -176,21 +176,21 @@
KEY_CONTEXT_SALT_PRESENT_V(1) | \
KEY_CONTEXT_CTX_LEN_V((ctx_len)))
-#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \
+#define FILL_WR_OP_CCTX_SIZE \
htonl( \
FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \
FW_CRYPTO_LOOKASIDE_WR) | \
FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \
- FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \
- FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
- FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
+ FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((0)) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(0) | \
+ FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(0))
-#define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \
+#define FILL_WR_RX_Q_ID(cid, qid, lcb, fid) \
htonl( \
FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \
- FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \
+ FW_CRYPTO_LOOKASIDE_WR_IV_V((IV_NOP)) | \
FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid))
#define FILL_ULPTX_CMD_DEST(cid, qid) \
@@ -214,27 +214,22 @@
calc_tx_flits_ofld(skb) * 8), 16)))
#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\
- ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1))
-
+ ULP_TX_SC_MORE_V((immdatalen)))
#define MAX_NK 8
-#define CRYPTO_MAX_IMM_TX_PKT_LEN 256
-#define MAX_WR_SIZE 512
#define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0)
#define MAX_DSGL_ENT 32
-#define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 2)
#define MIN_CIPHER_SG 1 /* IV */
-#define MIN_AUTH_SG 2 /*IV + AAD*/
-#define MIN_GCM_SG 2 /* IV + AAD*/
+#define MIN_AUTH_SG 1 /* IV */
+#define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
-#define MIN_CCM_SG 3 /*IV+AAD+B0*/
+#define MIN_CCM_SG 2 /*IV+B0*/
#define SPACE_LEFT(len) \
- ((MAX_WR_SIZE - WR_MIN_LEN - (len)))
+ ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
-unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40,
- 48, 64, 72, 88,
- 96, 112, 120, 136,
- 144, 160, 168, 184,
- 192};
+unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
+ 96, 112, 120, 136, 144, 160, 168, 184,
+ 192, 208, 216, 232, 240, 256, 264, 280,
+ 288, 304, 312, 328, 336, 352, 360, 376};
unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
112, 112, 128, 128, 144, 144, 160, 160,
192, 192, 208, 208, 224, 224, 240, 240,
@@ -258,10 +253,8 @@ struct hash_wr_param {
struct cipher_wr_param {
struct ablkcipher_request *req;
- struct scatterlist *srcsg;
char *iv;
int bytes;
- short int snent;
unsigned short qid;
};
enum {
@@ -299,31 +292,11 @@ enum {
ICV_16 = 16
};
-struct hash_op_params {
- unsigned char mk_size;
- unsigned char pad_align;
- unsigned char auth_mode;
- char hash_name[MAX_HASH_NAME];
- unsigned short block_size;
- unsigned short word_size;
- unsigned short ipad_size;
-};
-
struct phys_sge_pairs {
__be16 len[8];
__be64 addr[8];
};
-struct phys_sge_parm {
- unsigned int nents;
- unsigned int obsize;
- unsigned short qid;
-};
-
-struct crypto_result {
- struct completion completion;
- int err;
-};
static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index b6dd9cbe815f..f5a2624081dc 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -154,15 +154,15 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */
+ if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* Create the device and add it in the device list */
u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
if (!u_ctx) {
u_ctx = ERR_PTR(-ENOMEM);
goto out;
}
- if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
- u_ctx = ERR_PTR(-ENOMEM);
- goto out;
- }
u_ctx->lldi = *lld;
out:
return u_ctx;
@@ -224,7 +224,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
- pr_err("ULD register fail: No chcr crypto support in cxgb4");
+ pr_err("ULD register fail: No chcr crypto support in cxgb4\n");
return 0;
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index c9a19b2a1e9f..94e7412f6164 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -89,7 +89,7 @@ struct uld_ctx {
struct chcr_dev *dev;
};
-struct uld_ctx * assign_chcr_device(void);
+struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 30af1ee17b87..94a87e3ad9bc 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -149,9 +149,23 @@
#define CHCR_HASH_MAX_BLOCK_SIZE_64 64
#define CHCR_HASH_MAX_BLOCK_SIZE_128 128
-#define CHCR_SG_SIZE 2048
+#define CHCR_SRC_SG_SIZE (0x10000 - sizeof(int))
+#define CHCR_DST_SG_SIZE 2048
-/* Aligned to 128 bit boundary */
+static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
+{
+ return crypto_aead_ctx(tfm);
+}
+
+static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
+{
+ return crypto_ablkcipher_ctx(tfm);
+}
+
+static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
struct ablk_ctx {
struct crypto_skcipher *sw_cipher;
@@ -165,16 +179,39 @@ struct ablk_ctx {
};
struct chcr_aead_reqctx {
struct sk_buff *skb;
- struct scatterlist *dst;
- struct scatterlist *newdstsg;
- struct scatterlist srcffwd[2];
- struct scatterlist dstffwd[2];
+ dma_addr_t iv_dma;
+ dma_addr_t b0_dma;
+ unsigned int b0_len;
+ unsigned int op;
+ short int aad_nents;
+ short int src_nents;
short int dst_nents;
+ u16 imm;
u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
};
+struct ulptx_walk {
+ struct ulptx_sgl *sgl;
+ unsigned int nents;
+ unsigned int pair_idx;
+ unsigned int last_sg_len;
+ struct scatterlist *last_sg;
+ struct ulptx_sge_pair *pair;
+
+};
+
+struct dsgl_walk {
+ unsigned int nents;
+ unsigned int last_sg_len;
+ struct scatterlist *last_sg;
+ struct cpl_rx_phys_dsgl *dsgl;
+ struct phys_sge_pairs *to;
+};
+
+
+
struct chcr_gcm_ctx {
u8 ghash_h[AEAD_H_SIZE];
};
@@ -195,7 +232,6 @@ struct __aead_ctx {
struct chcr_aead_ctx {
__be32 key_ctx_hdr;
unsigned int enckey_len;
- struct crypto_skcipher *null;
struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN];
@@ -231,8 +267,11 @@ struct chcr_ahash_req_ctx {
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 *reqbfr;
u8 *skbfr;
+ dma_addr_t dma_addr;
+ u32 dma_len;
u8 reqlen;
- /* DMA the partial hash in it */
+ u8 imm;
+ u8 is_sg_map;
u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
u64 data_len; /* Data len till time */
/* SKB which is being sent to the hardware for processing */
@@ -241,14 +280,15 @@ struct chcr_ahash_req_ctx {
struct chcr_blkcipher_req_ctx {
struct sk_buff *skb;
- struct scatterlist srcffwd[2];
- struct scatterlist dstffwd[2];
struct scatterlist *dstsg;
- struct scatterlist *dst;
- struct scatterlist *newdstsg;
unsigned int processed;
+ unsigned int last_req_len;
+ struct scatterlist *srcsg;
+ unsigned int src_ofst;
+ unsigned int dst_ofst;
unsigned int op;
- short int dst_nents;
+ dma_addr_t iv_dma;
+ u16 imm;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
};
@@ -262,24 +302,6 @@ struct chcr_alg_template {
} alg;
};
-struct chcr_req_ctx {
- union {
- struct ahash_request *ahash_req;
- struct aead_request *aead_req;
- struct ablkcipher_request *ablk_req;
- } req;
- union {
- struct chcr_ahash_req_ctx *ahash_ctx;
- struct chcr_aead_reqctx *reqctx;
- struct chcr_blkcipher_req_ctx *ablk_ctx;
- } ctx;
-};
-
-struct sge_opaque_hdr {
- void *dev;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
-};
-
typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
unsigned short qid,
int size,
@@ -290,10 +312,39 @@ static int chcr_aead_op(struct aead_request *req_base,
int size,
create_wr_t create_wr_fn);
static inline int get_aead_subtype(struct crypto_aead *aead);
-static int is_newsg(struct scatterlist *sgl, unsigned int *newents);
-static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
- unsigned int nents);
-static inline void free_new_sg(struct scatterlist *sgl);
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err);
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
+ *req, unsigned short op_type);
+static inline void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid);
+static inline void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type);
+static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam);
+static int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req);
+static void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req);
+static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid);
+int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
+static inline void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param);
+static inline int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req);
+static inline void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 3980f946874f..74feb6227101 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -308,10 +308,8 @@ unmap_cache:
ctx->base.cache_sz = 0;
}
free_cache:
- if (ctx->base.cache) {
- kfree(ctx->base.cache);
- ctx->base.cache = NULL;
- }
+ kfree(ctx->base.cache);
+ ctx->base.cache = NULL;
unlock:
spin_unlock_bh(&priv->ring[ring].egress_lock);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index dadc4a808df5..8705b28eb02c 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -534,7 +534,6 @@ static void release_ixp_crypto(struct device *dev)
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
crypt_virt, crypt_phys);
}
- return;
}
static void reset_sa_dir(struct ix_sa_dir *dir)
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 6e7a5c77a00a..293832488cc9 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -34,10 +34,6 @@
/* Limit of the crypto queue before reaching the backlog */
#define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
-static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
-module_param_named(allhwsupport, allhwsupport, int, 0444);
-MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
-
struct mv_cesa_dev *cesa_dev;
struct crypto_async_request *
@@ -76,8 +72,6 @@ static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
ctx = crypto_tfm_ctx(req->tfm);
ctx->ops->step(req);
-
- return;
}
static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
@@ -183,8 +177,7 @@ int mv_cesa_queue_req(struct crypto_async_request *req,
spin_lock_bh(&engine->lock);
ret = crypto_enqueue_request(&engine->queue, req);
if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
- (ret == -EINPROGRESS ||
- (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ (ret == -EINPROGRESS || ret == -EBUSY))
mv_cesa_tdma_chain(engine, creq);
spin_unlock_bh(&engine->lock);
@@ -202,7 +195,7 @@ static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
int i, j;
for (i = 0; i < cesa->caps->ncipher_algs; i++) {
- ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
+ ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
if (ret)
goto err_unregister_crypto;
}
@@ -222,7 +215,7 @@ err_unregister_ahash:
err_unregister_crypto:
for (j = 0; j < i; j++)
- crypto_unregister_alg(cesa->caps->cipher_algs[j]);
+ crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
return ret;
}
@@ -235,10 +228,10 @@ static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
for (i = 0; i < cesa->caps->ncipher_algs; i++)
- crypto_unregister_alg(cesa->caps->cipher_algs[i]);
+ crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
}
-static struct crypto_alg *orion_cipher_algs[] = {
+static struct skcipher_alg *orion_cipher_algs[] = {
&mv_cesa_ecb_des_alg,
&mv_cesa_cbc_des_alg,
&mv_cesa_ecb_des3_ede_alg,
@@ -254,7 +247,7 @@ static struct ahash_alg *orion_ahash_algs[] = {
&mv_ahmac_sha1_alg,
};
-static struct crypto_alg *armada_370_cipher_algs[] = {
+static struct skcipher_alg *armada_370_cipher_algs[] = {
&mv_cesa_ecb_des_alg,
&mv_cesa_cbc_des_alg,
&mv_cesa_ecb_des3_ede_alg,
@@ -459,9 +452,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
caps = match->data;
}
- if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
- return -ENOTSUPP;
-
cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
if (!cesa)
return -ENOMEM;
@@ -599,9 +589,16 @@ static int mv_cesa_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id mv_cesa_plat_id_table[] = {
+ { .name = "mv_crypto" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
+
static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
.remove = mv_cesa_remove,
+ .id_table = mv_cesa_plat_id_table,
.driver = {
.name = "marvell-cesa",
.of_match_table = mv_cesa_of_match_table,
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index b7872f62f674..d63a6ee905c9 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -1,9 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MARVELL_CESA_H__
#define __MARVELL_CESA_H__
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include <linux/crypto.h>
#include <linux/dmapool.h>
@@ -372,7 +374,7 @@ struct mv_cesa_engine;
struct mv_cesa_caps {
int nengines;
bool has_tdma;
- struct crypto_alg **cipher_algs;
+ struct skcipher_alg **cipher_algs;
int ncipher_algs;
struct ahash_alg **ahash_algs;
int nahash_algs;
@@ -538,12 +540,12 @@ struct mv_cesa_sg_std_iter {
};
/**
- * struct mv_cesa_ablkcipher_std_req - cipher standard request
+ * struct mv_cesa_skcipher_std_req - cipher standard request
* @op: operation context
* @offset: current operation offset
* @size: size of the crypto operation
*/
-struct mv_cesa_ablkcipher_std_req {
+struct mv_cesa_skcipher_std_req {
struct mv_cesa_op_ctx op;
unsigned int offset;
unsigned int size;
@@ -551,14 +553,14 @@ struct mv_cesa_ablkcipher_std_req {
};
/**
- * struct mv_cesa_ablkcipher_req - cipher request
+ * struct mv_cesa_skcipher_req - cipher request
* @req: type specific request information
* @src_nents: number of entries in the src sg list
* @dst_nents: number of entries in the dest sg list
*/
-struct mv_cesa_ablkcipher_req {
+struct mv_cesa_skcipher_req {
struct mv_cesa_req base;
- struct mv_cesa_ablkcipher_std_req std;
+ struct mv_cesa_skcipher_std_req std;
int src_nents;
int dst_nents;
};
@@ -763,7 +765,7 @@ static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
* the backlog and will be processed later. There's no need to
* clean it up.
*/
- if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ if (ret == -EBUSY)
return false;
/* Request wasn't queued, we need to clean it up */
@@ -868,11 +870,11 @@ extern struct ahash_alg mv_ahmac_md5_alg;
extern struct ahash_alg mv_ahmac_sha1_alg;
extern struct ahash_alg mv_ahmac_sha256_alg;
-extern struct crypto_alg mv_cesa_ecb_des_alg;
-extern struct crypto_alg mv_cesa_cbc_des_alg;
-extern struct crypto_alg mv_cesa_ecb_des3_ede_alg;
-extern struct crypto_alg mv_cesa_cbc_des3_ede_alg;
-extern struct crypto_alg mv_cesa_ecb_aes_alg;
-extern struct crypto_alg mv_cesa_cbc_aes_alg;
+extern struct skcipher_alg mv_cesa_ecb_des_alg;
+extern struct skcipher_alg mv_cesa_cbc_des_alg;
+extern struct skcipher_alg mv_cesa_ecb_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_cbc_des3_ede_alg;
+extern struct skcipher_alg mv_cesa_ecb_aes_alg;
+extern struct skcipher_alg mv_cesa_cbc_aes_alg;
#endif /* __MARVELL_CESA_H__ */
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 098871a22a54..0ae84ec9e21c 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -32,23 +32,23 @@ struct mv_cesa_aes_ctx {
struct crypto_aes_ctx aes;
};
-struct mv_cesa_ablkcipher_dma_iter {
+struct mv_cesa_skcipher_dma_iter {
struct mv_cesa_dma_iter base;
struct mv_cesa_sg_dma_iter src;
struct mv_cesa_sg_dma_iter dst;
};
static inline void
-mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
- struct ablkcipher_request *req)
+mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
+ struct skcipher_request *req)
{
- mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
+ mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
}
static inline bool
-mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
+mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
{
iter->src.op_offset = 0;
iter->dst.op_offset = 0;
@@ -57,9 +57,9 @@ mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
}
static inline void
-mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
+mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
if (req->dst != req->src) {
dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
@@ -73,20 +73,20 @@ mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
mv_cesa_dma_cleanup(&creq->base);
}
-static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
+static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- mv_cesa_ablkcipher_dma_cleanup(req);
+ mv_cesa_skcipher_dma_cleanup(req);
}
-static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
+static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
- size_t len = min_t(size_t, req->nbytes - sreq->offset,
+ size_t len = min_t(size_t, req->cryptlen - sreq->offset,
CESA_SA_SRAM_PAYLOAD_SIZE);
mv_cesa_adjust_op(engine, &sreq->op);
@@ -114,11 +114,11 @@ static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
}
-static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
- u32 status)
+static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
+ u32 status)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
@@ -127,122 +127,130 @@ static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
sreq->size, sreq->offset);
sreq->offset += len;
- if (sreq->offset < req->nbytes)
+ if (sreq->offset < req->cryptlen)
return -EINPROGRESS;
return 0;
}
-static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
- u32 status)
+static int mv_cesa_skcipher_process(struct crypto_async_request *req,
+ u32 status)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
struct mv_cesa_req *basereq = &creq->base;
if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
- return mv_cesa_ablkcipher_std_process(ablkreq, status);
+ return mv_cesa_skcipher_std_process(skreq, status);
return mv_cesa_dma_process(basereq, status);
}
-static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
+static void mv_cesa_skcipher_step(struct crypto_async_request *req)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_dma_step(&creq->base);
else
- mv_cesa_ablkcipher_std_step(ablkreq);
+ mv_cesa_skcipher_std_step(skreq);
}
static inline void
-mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
+mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_req *basereq = &creq->base;
mv_cesa_dma_prepare(basereq, basereq->engine);
}
static inline void
-mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
+mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
sreq->size = 0;
sreq->offset = 0;
}
-static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
- struct mv_cesa_engine *engine)
+static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
+ struct mv_cesa_engine *engine)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
creq->base.engine = engine;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- mv_cesa_ablkcipher_dma_prepare(ablkreq);
+ mv_cesa_skcipher_dma_prepare(skreq);
else
- mv_cesa_ablkcipher_std_prepare(ablkreq);
+ mv_cesa_skcipher_std_prepare(skreq);
}
static inline void
-mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
+mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
- mv_cesa_ablkcipher_cleanup(ablkreq);
+ mv_cesa_skcipher_cleanup(skreq);
}
static void
-mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
+mv_cesa_skcipher_complete(struct crypto_async_request *req)
{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct skcipher_request *skreq = skcipher_request_cast(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
struct mv_cesa_engine *engine = creq->base.engine;
unsigned int ivsize;
- atomic_sub(ablkreq->nbytes, &engine->load);
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
+ atomic_sub(skreq->cryptlen, &engine->load);
+ ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
struct mv_cesa_req *basereq;
basereq = &creq->base;
- memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+ memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
ivsize);
} else {
- memcpy_fromio(ablkreq->info,
+ memcpy_fromio(skreq->iv,
engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
ivsize);
}
}
-static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
- .step = mv_cesa_ablkcipher_step,
- .process = mv_cesa_ablkcipher_process,
- .cleanup = mv_cesa_ablkcipher_req_cleanup,
- .complete = mv_cesa_ablkcipher_complete,
+static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
+ .step = mv_cesa_skcipher_step,
+ .process = mv_cesa_skcipher_process,
+ .cleanup = mv_cesa_skcipher_req_cleanup,
+ .complete = mv_cesa_skcipher_complete,
};
-static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
+static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
{
- struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ void *ctx = crypto_tfm_ctx(tfm);
+
+ memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
+}
+
+static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+ struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
+ ctx->ops = &mv_cesa_skcipher_req_ops;
- tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
+ crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
+ sizeof(struct mv_cesa_skcipher_req));
return 0;
}
-static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
int remaining;
int offset;
@@ -251,7 +259,7 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
ret = crypto_aes_expand_key(&ctx->aes, key, len);
if (ret) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return ret;
}
@@ -264,16 +272,16 @@ static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
int ret;
if (len != DES_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -288,14 +296,14 @@ static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
-static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
+static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
if (len != DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -304,14 +312,14 @@ static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
- const struct mv_cesa_op_ctx *op_templ)
+static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
+ const struct mv_cesa_op_ctx *op_templ)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct mv_cesa_req *basereq = &creq->base;
- struct mv_cesa_ablkcipher_dma_iter iter;
+ struct mv_cesa_skcipher_dma_iter iter;
bool skip_ctx = false;
int ret;
unsigned int ivsize;
@@ -339,7 +347,7 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
}
mv_cesa_tdma_desc_iter_init(&basereq->chain);
- mv_cesa_ablkcipher_req_iter_init(&iter, req);
+ mv_cesa_skcipher_req_iter_init(&iter, req);
do {
struct mv_cesa_op_ctx *op;
@@ -370,10 +378,10 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
if (ret)
goto err_free_tdma;
- } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
+ } while (mv_cesa_skcipher_req_iter_next_op(&iter));
/* Add output data for IV */
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
+ ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
CESA_SA_DATA_SRAM_OFFSET,
CESA_TDMA_SRC_IN_SRAM, flags);
@@ -399,11 +407,11 @@ err_unmap_src:
}
static inline int
-mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
- const struct mv_cesa_op_ctx *op_templ)
+mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
+ const struct mv_cesa_op_ctx *op_templ)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct mv_cesa_skcipher_std_req *sreq = &creq->std;
struct mv_cesa_req *basereq = &creq->base;
sreq->op = *op_templ;
@@ -414,23 +422,23 @@ mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
return 0;
}
-static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
- struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
{
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int blksize = crypto_skcipher_blocksize(tfm);
int ret;
- if (!IS_ALIGNED(req->nbytes, blksize))
+ if (!IS_ALIGNED(req->cryptlen, blksize))
return -EINVAL;
- creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
+ creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (creq->src_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of src SG");
return creq->src_nents;
}
- creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (creq->dst_nents < 0) {
dev_err(cesa_dev->dev, "Invalid number of dst SG");
return creq->dst_nents;
@@ -440,36 +448,36 @@ static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
CESA_SA_DESC_CFG_OP_MSK);
if (cesa_dev->caps->has_tdma)
- ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
+ ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
else
- ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
+ ret = mv_cesa_skcipher_std_req_init(req, tmpl);
return ret;
}
-static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req,
- struct mv_cesa_op_ctx *tmpl)
+static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
+ struct mv_cesa_op_ctx *tmpl)
{
int ret;
- struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
+ struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
struct mv_cesa_engine *engine;
- ret = mv_cesa_ablkcipher_req_init(req, tmpl);
+ ret = mv_cesa_skcipher_req_init(req, tmpl);
if (ret)
return ret;
- engine = mv_cesa_select_engine(req->nbytes);
- mv_cesa_ablkcipher_prepare(&req->base, engine);
+ engine = mv_cesa_select_engine(req->cryptlen);
+ mv_cesa_skcipher_prepare(&req->base, engine);
ret = mv_cesa_queue_req(&req->base, &creq->base);
if (mv_cesa_req_needs_cleanup(&req->base, ret))
- mv_cesa_ablkcipher_cleanup(req);
+ mv_cesa_skcipher_cleanup(req);
return ret;
}
-static int mv_cesa_des_op(struct ablkcipher_request *req,
+static int mv_cesa_des_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -479,10 +487,10 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
- return mv_cesa_ablkcipher_queue_req(req, tmpl);
+ return mv_cesa_skcipher_queue_req(req, tmpl);
}
-static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -493,7 +501,7 @@ static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
return mv_cesa_des_op(req, &tmpl);
}
-static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -504,41 +512,38 @@ static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
return mv_cesa_des_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_ecb_des_alg = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "mv-ecb-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = mv_cesa_des_setkey,
- .encrypt = mv_cesa_ecb_des_encrypt,
- .decrypt = mv_cesa_ecb_des_decrypt,
- },
+struct skcipher_alg mv_cesa_ecb_des_alg = {
+ .setkey = mv_cesa_des_setkey,
+ .encrypt = mv_cesa_ecb_des_encrypt,
+ .decrypt = mv_cesa_ecb_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "mv-ecb-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_des_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
return mv_cesa_des_op(req, tmpl);
}
-static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -547,7 +552,7 @@ static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des_op(req, &tmpl);
}
-static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -556,31 +561,28 @@ static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_cbc_des_alg = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "mv-cbc-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = mv_cesa_des_setkey,
- .encrypt = mv_cesa_cbc_des_encrypt,
- .decrypt = mv_cesa_cbc_des_decrypt,
- },
+struct skcipher_alg mv_cesa_cbc_des_alg = {
+ .setkey = mv_cesa_des_setkey,
+ .encrypt = mv_cesa_cbc_des_encrypt,
+ .decrypt = mv_cesa_cbc_des_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "mv-cbc-des",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_des3_op(struct ablkcipher_request *req,
+static int mv_cesa_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -590,10 +592,10 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
- return mv_cesa_ablkcipher_queue_req(req, tmpl);
+ return mv_cesa_skcipher_queue_req(req, tmpl);
}
-static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -605,7 +607,7 @@ static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
return mv_cesa_des3_op(req, &tmpl);
}
-static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -617,39 +619,36 @@ static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
return mv_cesa_des3_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "mv-ecb-des3-ede",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = mv_cesa_des3_ede_setkey,
- .encrypt = mv_cesa_ecb_des3_ede_encrypt,
- .decrypt = mv_cesa_ecb_des3_ede_decrypt,
- },
+struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
+ .setkey = mv_cesa_des3_ede_setkey,
+ .encrypt = mv_cesa_ecb_des3_ede_encrypt,
+ .decrypt = mv_cesa_ecb_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "mv-ecb-des3-ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
- memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
+ memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
return mv_cesa_des3_op(req, tmpl);
}
-static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -661,7 +660,7 @@ static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des3_op(req, &tmpl);
}
-static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -673,31 +672,28 @@ static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_des3_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "mv-cbc-des3-ede",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = mv_cesa_des3_ede_setkey,
- .encrypt = mv_cesa_cbc_des3_ede_encrypt,
- .decrypt = mv_cesa_cbc_des3_ede_decrypt,
- },
+struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
+ .setkey = mv_cesa_des3_ede_setkey,
+ .encrypt = mv_cesa_cbc_des3_ede_encrypt,
+ .decrypt = mv_cesa_cbc_des3_ede_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "mv-cbc-des3-ede",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_aes_op(struct ablkcipher_request *req,
+static int mv_cesa_aes_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -724,10 +720,10 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
CESA_SA_DESC_CFG_CRYPTM_MSK |
CESA_SA_DESC_CFG_AES_LEN_MSK);
- return mv_cesa_ablkcipher_queue_req(req, tmpl);
+ return mv_cesa_skcipher_queue_req(req, tmpl);
}
-static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -738,7 +734,7 @@ static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
return mv_cesa_aes_op(req, &tmpl);
}
-static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -749,40 +745,37 @@ static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
return mv_cesa_aes_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "mv-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mv_cesa_aes_setkey,
- .encrypt = mv_cesa_ecb_aes_encrypt,
- .decrypt = mv_cesa_ecb_aes_decrypt,
- },
+struct skcipher_alg mv_cesa_ecb_aes_alg = {
+ .setkey = mv_cesa_aes_setkey,
+ .encrypt = mv_cesa_ecb_aes_encrypt,
+ .decrypt = mv_cesa_ecb_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "mv-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
-static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
+static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
struct mv_cesa_op_ctx *tmpl)
{
mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
CESA_SA_DESC_CFG_CRYPTCM_MSK);
- memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
+ memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
return mv_cesa_aes_op(req, tmpl);
}
-static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -791,7 +784,7 @@ static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_aes_op(req, &tmpl);
}
-static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
+static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
{
struct mv_cesa_op_ctx tmpl;
@@ -800,26 +793,23 @@ static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
return mv_cesa_cbc_aes_op(req, &tmpl);
}
-struct crypto_alg mv_cesa_cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "mv-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cesa_ablkcipher_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = mv_cesa_aes_setkey,
- .encrypt = mv_cesa_cbc_aes_encrypt,
- .decrypt = mv_cesa_cbc_aes_decrypt,
- },
+struct skcipher_alg mv_cesa_cbc_aes_alg = {
+ .setkey = mv_cesa_aes_setkey,
+ .encrypt = mv_cesa_cbc_aes_encrypt,
+ .decrypt = mv_cesa_cbc_aes_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "mv-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = mv_cesa_skcipher_cra_init,
+ .cra_exit = mv_cesa_skcipher_cra_exit,
},
};
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index c76375ff376d..d0ef171c18df 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -304,10 +304,7 @@ int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags)
struct mv_cesa_tdma_desc *tdma;
tdma = mv_cesa_dma_add_desc(chain, flags);
- if (IS_ERR(tdma))
- return PTR_ERR(tdma);
-
- return 0;
+ return PTR_ERR_OR_ZERO(tdma);
}
int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags)
diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c
index 9e845e866dec..c2058cf59f57 100644
--- a/drivers/crypto/mediatek/mtk-aes.c
+++ b/drivers/crypto/mediatek/mtk-aes.c
@@ -13,6 +13,7 @@
*/
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include "mtk-platform.h"
#define AES_QUEUE_SIZE 512
@@ -137,11 +138,6 @@ struct mtk_aes_gcm_ctx {
struct crypto_skcipher *ctr;
};
-struct mtk_aes_gcm_setkey_result {
- int err;
- struct completion completion;
-};
-
struct mtk_aes_drv {
struct list_head dev_list;
/* Device list lock */
@@ -928,25 +924,19 @@ static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
{
struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
+ /* Empty messages are not supported yet */
+ if (!gctx->textlen && !req->assoclen)
+ return -EINVAL;
+
rctx->mode = AES_FLAGS_GCM | mode;
return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
&req->base);
}
-static void mtk_gcm_setkey_done(struct crypto_async_request *req, int err)
-{
- struct mtk_aes_gcm_setkey_result *result = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- result->err = err;
- complete(&result->completion);
-}
-
/*
* Because of the hardware limitation, we need to pre-calculate key(H)
* for the GHASH operation. The result of the encryption operation
@@ -962,7 +952,7 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
u32 hash[4];
u8 iv[8];
- struct mtk_aes_gcm_setkey_result result;
+ struct crypto_wait wait;
struct scatterlist sg[1];
struct skcipher_request req;
@@ -1002,22 +992,17 @@ static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
if (!data)
return -ENOMEM;
- init_completion(&data->result.completion);
+ crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- mtk_gcm_setkey_done, &data->result);
+ crypto_req_done, &data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
AES_BLOCK_SIZE, data->iv);
- err = crypto_skcipher_encrypt(&data->req);
- if (err == -EINPROGRESS || err == -EBUSY) {
- err = wait_for_completion_interruptible(
- &data->result.completion);
- if (!err)
- err = data->result.err;
- }
+ err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
+ &data->wait);
if (err)
goto out;
@@ -1098,7 +1083,7 @@ static struct aead_alg aes_gcm_alg = {
.decrypt = mtk_aes_gcm_decrypt,
.init = mtk_aes_gcm_init,
.exit = mtk_aes_gcm_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
deleted file mode 100644
index bf25f415eea6..000000000000
--- a/drivers/crypto/mv_cesa.c
+++ /dev/null
@@ -1,1216 +0,0 @@
-/*
- * Support for Marvell's crypto engine which can be found on some Orion5X
- * boards.
- *
- * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
- * License: GPLv2
- *
- */
-#include <crypto/aes.h>
-#include <crypto/algapi.h>
-#include <linux/crypto.h>
-#include <linux/genalloc.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kthread.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-
-#include "mv_cesa.h"
-
-#define MV_CESA "MV-CESA:"
-#define MAX_HW_HASH_SIZE 0xFFFF
-#define MV_CESA_EXPIRE 500 /* msec */
-
-#define MV_CESA_DEFAULT_SRAM_SIZE 2048
-
-/*
- * STM:
- * /---------------------------------------\
- * | | request complete
- * \./ |
- * IDLE -> new request -> BUSY -> done -> DEQUEUE
- * /°\ |
- * | | more scatter entries
- * \________________/
- */
-enum engine_status {
- ENGINE_IDLE,
- ENGINE_BUSY,
- ENGINE_W_DEQUEUE,
-};
-
-/**
- * struct req_progress - used for every crypt request
- * @src_sg_it: sg iterator for src
- * @dst_sg_it: sg iterator for dst
- * @sg_src_left: bytes left in src to process (scatter list)
- * @src_start: offset to add to src start position (scatter list)
- * @crypt_len: length of current hw crypt/hash process
- * @hw_nbytes: total bytes to process in hw for this request
- * @copy_back: whether to copy data back (crypt) or not (hash)
- * @sg_dst_left: bytes left dst to process in this scatter list
- * @dst_start: offset to add to dst start position (scatter list)
- * @hw_processed_bytes: number of bytes processed by hw (request).
- *
- * sg helper are used to iterate over the scatterlist. Since the size of the
- * SRAM may be less than the scatter size, this struct struct is used to keep
- * track of progress within current scatterlist.
- */
-struct req_progress {
- struct sg_mapping_iter src_sg_it;
- struct sg_mapping_iter dst_sg_it;
- void (*complete) (void);
- void (*process) (int is_first);
-
- /* src mostly */
- int sg_src_left;
- int src_start;
- int crypt_len;
- int hw_nbytes;
- /* dst mostly */
- int copy_back;
- int sg_dst_left;
- int dst_start;
- int hw_processed_bytes;
-};
-
-struct crypto_priv {
- void __iomem *reg;
- void __iomem *sram;
- struct gen_pool *sram_pool;
- dma_addr_t sram_dma;
- int irq;
- struct clk *clk;
- struct task_struct *queue_th;
-
- /* the lock protects queue and eng_st */
- spinlock_t lock;
- struct crypto_queue queue;
- enum engine_status eng_st;
- struct timer_list completion_timer;
- struct crypto_async_request *cur_req;
- struct req_progress p;
- int max_req_size;
- int sram_size;
- int has_sha1;
- int has_hmac_sha1;
-};
-
-static struct crypto_priv *cpg;
-
-struct mv_ctx {
- u8 aes_enc_key[AES_KEY_LEN];
- u32 aes_dec_key[8];
- int key_len;
- u32 need_calc_aes_dkey;
-};
-
-enum crypto_op {
- COP_AES_ECB,
- COP_AES_CBC,
-};
-
-struct mv_req_ctx {
- enum crypto_op op;
- int decrypt;
-};
-
-enum hash_op {
- COP_SHA1,
- COP_HMAC_SHA1
-};
-
-struct mv_tfm_hash_ctx {
- struct crypto_shash *fallback;
- struct crypto_shash *base_hash;
- u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
- int count_add;
- enum hash_op op;
-};
-
-struct mv_req_hash_ctx {
- u64 count;
- u32 state[SHA1_DIGEST_SIZE / 4];
- u8 buffer[SHA1_BLOCK_SIZE];
- int first_hash; /* marks that we don't have previous state */
- int last_chunk; /* marks that this is the 'final' request */
- int extra_bytes; /* unprocessed bytes in buffer */
- enum hash_op op;
- int count_add;
-};
-
-static void mv_completion_timer_callback(unsigned long unused)
-{
- int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
-
- printk(KERN_ERR MV_CESA
- "completion timer expired (CESA %sactive), cleaning up.\n",
- active ? "" : "in");
-
- del_timer(&cpg->completion_timer);
- writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
- while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
- printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
- cpg->eng_st = ENGINE_W_DEQUEUE;
- wake_up_process(cpg->queue_th);
-}
-
-static void mv_setup_timer(void)
-{
- setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
- mod_timer(&cpg->completion_timer,
- jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
-}
-
-static void compute_aes_dec_key(struct mv_ctx *ctx)
-{
- struct crypto_aes_ctx gen_aes_key;
- int key_pos;
-
- if (!ctx->need_calc_aes_dkey)
- return;
-
- crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
-
- key_pos = ctx->key_len + 24;
- memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
- switch (ctx->key_len) {
- case AES_KEYSIZE_256:
- key_pos -= 2;
- /* fall */
- case AES_KEYSIZE_192:
- key_pos -= 2;
- memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
- 4 * 4);
- break;
- }
- ctx->need_calc_aes_dkey = 0;
-}
-
-static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
- unsigned int len)
-{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
-
- switch (len) {
- case AES_KEYSIZE_128:
- case AES_KEYSIZE_192:
- case AES_KEYSIZE_256:
- break;
- default:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- ctx->key_len = len;
- ctx->need_calc_aes_dkey = 1;
-
- memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
- return 0;
-}
-
-static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
-{
- int ret;
- void *sbuf;
- int copy_len;
-
- while (len) {
- if (!p->sg_src_left) {
- ret = sg_miter_next(&p->src_sg_it);
- BUG_ON(!ret);
- p->sg_src_left = p->src_sg_it.length;
- p->src_start = 0;
- }
-
- sbuf = p->src_sg_it.addr + p->src_start;
-
- copy_len = min(p->sg_src_left, len);
- memcpy(dbuf, sbuf, copy_len);
-
- p->src_start += copy_len;
- p->sg_src_left -= copy_len;
-
- len -= copy_len;
- dbuf += copy_len;
- }
-}
-
-static void setup_data_in(void)
-{
- struct req_progress *p = &cpg->p;
- int data_in_sram =
- min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
- copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
- data_in_sram - p->crypt_len);
- p->crypt_len = data_in_sram;
-}
-
-static void mv_process_current_q(int first_block)
-{
- struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
- struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
- struct sec_accel_config op;
-
- switch (req_ctx->op) {
- case COP_AES_ECB:
- op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
- break;
- case COP_AES_CBC:
- default:
- op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
- op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
- ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
- if (first_block)
- memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
- break;
- }
- if (req_ctx->decrypt) {
- op.config |= CFG_DIR_DEC;
- memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
- AES_KEY_LEN);
- } else {
- op.config |= CFG_DIR_ENC;
- memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
- AES_KEY_LEN);
- }
-
- switch (ctx->key_len) {
- case AES_KEYSIZE_128:
- op.config |= CFG_AES_LEN_128;
- break;
- case AES_KEYSIZE_192:
- op.config |= CFG_AES_LEN_192;
- break;
- case AES_KEYSIZE_256:
- op.config |= CFG_AES_LEN_256;
- break;
- }
- op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
- ENC_P_DST(SRAM_DATA_OUT_START);
- op.enc_key_p = SRAM_DATA_KEY_P;
-
- setup_data_in();
- op.enc_len = cpg->p.crypt_len;
- memcpy(cpg->sram + SRAM_CONFIG, &op,
- sizeof(struct sec_accel_config));
-
- /* GO */
- mv_setup_timer();
- writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-}
-
-static void mv_crypto_algo_completion(void)
-{
- struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- sg_miter_stop(&cpg->p.src_sg_it);
- sg_miter_stop(&cpg->p.dst_sg_it);
-
- if (req_ctx->op != COP_AES_CBC)
- return ;
-
- memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
-}
-
-static void mv_process_hash_current(int first_block)
-{
- struct ahash_request *req = ahash_request_cast(cpg->cur_req);
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
- struct req_progress *p = &cpg->p;
- struct sec_accel_config op = { 0 };
- int is_last;
-
- switch (req_ctx->op) {
- case COP_SHA1:
- default:
- op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
- break;
- case COP_HMAC_SHA1:
- op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
- memcpy(cpg->sram + SRAM_HMAC_IV_IN,
- tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
- break;
- }
-
- op.mac_src_p =
- MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
- req_ctx->
- count);
-
- setup_data_in();
-
- op.mac_digest =
- MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
- op.mac_iv =
- MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
- MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
-
- is_last = req_ctx->last_chunk
- && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
- && (req_ctx->count <= MAX_HW_HASH_SIZE);
- if (req_ctx->first_hash) {
- if (is_last)
- op.config |= CFG_NOT_FRAG;
- else
- op.config |= CFG_FIRST_FRAG;
-
- req_ctx->first_hash = 0;
- } else {
- if (is_last)
- op.config |= CFG_LAST_FRAG;
- else
- op.config |= CFG_MID_FRAG;
-
- if (first_block) {
- writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
- writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
- writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
- writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
- writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
- }
- }
-
- memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
-
- /* GO */
- mv_setup_timer();
- writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
-}
-
-static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
- struct shash_desc *desc)
-{
- int i;
- struct sha1_state shash_state;
-
- shash_state.count = ctx->count + ctx->count_add;
- for (i = 0; i < 5; i++)
- shash_state.state[i] = ctx->state[i];
- memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
- return crypto_shash_import(desc, &shash_state);
-}
-
-static int mv_hash_final_fallback(struct ahash_request *req)
-{
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
- SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback);
- int rc;
-
- shash->tfm = tfm_ctx->fallback;
- shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- if (unlikely(req_ctx->first_hash)) {
- crypto_shash_init(shash);
- crypto_shash_update(shash, req_ctx->buffer,
- req_ctx->extra_bytes);
- } else {
- /* only SHA1 for now....
- */
- rc = mv_hash_import_sha1_ctx(req_ctx, shash);
- if (rc)
- goto out;
- }
- rc = crypto_shash_final(shash, req->result);
-out:
- return rc;
-}
-
-static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
-{
- ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
- ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
- ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
- ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
- ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
-}
-
-static void mv_hash_algo_completion(void)
-{
- struct ahash_request *req = ahash_request_cast(cpg->cur_req);
- struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-
- if (ctx->extra_bytes)
- copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
- sg_miter_stop(&cpg->p.src_sg_it);
-
- if (likely(ctx->last_chunk)) {
- if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
- memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
- crypto_ahash_digestsize(crypto_ahash_reqtfm
- (req)));
- } else {
- mv_save_digest_state(ctx);
- mv_hash_final_fallback(req);
- }
- } else {
- mv_save_digest_state(ctx);
- }
-}
-
-static void dequeue_complete_req(void)
-{
- struct crypto_async_request *req = cpg->cur_req;
- void *buf;
- int ret;
- cpg->p.hw_processed_bytes += cpg->p.crypt_len;
- if (cpg->p.copy_back) {
- int need_copy_len = cpg->p.crypt_len;
- int sram_offset = 0;
- do {
- int dst_copy;
-
- if (!cpg->p.sg_dst_left) {
- ret = sg_miter_next(&cpg->p.dst_sg_it);
- BUG_ON(!ret);
- cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
- cpg->p.dst_start = 0;
- }
-
- buf = cpg->p.dst_sg_it.addr;
- buf += cpg->p.dst_start;
-
- dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
-
- memcpy(buf,
- cpg->sram + SRAM_DATA_OUT_START + sram_offset,
- dst_copy);
- sram_offset += dst_copy;
- cpg->p.sg_dst_left -= dst_copy;
- need_copy_len -= dst_copy;
- cpg->p.dst_start += dst_copy;
- } while (need_copy_len > 0);
- }
-
- cpg->p.crypt_len = 0;
-
- BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
- if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
- /* process next scatter list entry */
- cpg->eng_st = ENGINE_BUSY;
- cpg->p.process(0);
- } else {
- cpg->p.complete();
- cpg->eng_st = ENGINE_IDLE;
- local_bh_disable();
- req->complete(req, 0);
- local_bh_enable();
- }
-}
-
-static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
-{
- int i = 0;
- size_t cur_len;
-
- while (sl) {
- cur_len = sl[i].length;
- ++i;
- if (total_bytes > cur_len)
- total_bytes -= cur_len;
- else
- break;
- }
-
- return i;
-}
-
-static void mv_start_new_crypt_req(struct ablkcipher_request *req)
-{
- struct req_progress *p = &cpg->p;
- int num_sgs;
-
- cpg->cur_req = &req->base;
- memset(p, 0, sizeof(struct req_progress));
- p->hw_nbytes = req->nbytes;
- p->complete = mv_crypto_algo_completion;
- p->process = mv_process_current_q;
- p->copy_back = 1;
-
- num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
-
- num_sgs = count_sgs(req->dst, req->nbytes);
- sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
-
- mv_process_current_q(1);
-}
-
-static void mv_start_new_hash_req(struct ahash_request *req)
-{
- struct req_progress *p = &cpg->p;
- struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
- int num_sgs, hw_bytes, old_extra_bytes, rc;
- cpg->cur_req = &req->base;
- memset(p, 0, sizeof(struct req_progress));
- hw_bytes = req->nbytes + ctx->extra_bytes;
- old_extra_bytes = ctx->extra_bytes;
-
- ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
- if (ctx->extra_bytes != 0
- && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
- hw_bytes -= ctx->extra_bytes;
- else
- ctx->extra_bytes = 0;
-
- num_sgs = count_sgs(req->src, req->nbytes);
- sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
-
- if (hw_bytes) {
- p->hw_nbytes = hw_bytes;
- p->complete = mv_hash_algo_completion;
- p->process = mv_process_hash_current;
-
- if (unlikely(old_extra_bytes)) {
- memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
- old_extra_bytes);
- p->crypt_len = old_extra_bytes;
- }
-
- mv_process_hash_current(1);
- } else {
- copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
- ctx->extra_bytes - old_extra_bytes);
- sg_miter_stop(&p->src_sg_it);
- if (ctx->last_chunk)
- rc = mv_hash_final_fallback(req);
- else
- rc = 0;
- cpg->eng_st = ENGINE_IDLE;
- local_bh_disable();
- req->base.complete(&req->base, rc);
- local_bh_enable();
- }
-}
-
-static int queue_manag(void *data)
-{
- cpg->eng_st = ENGINE_IDLE;
- do {
- struct crypto_async_request *async_req = NULL;
- struct crypto_async_request *backlog = NULL;
-
- __set_current_state(TASK_INTERRUPTIBLE);
-
- if (cpg->eng_st == ENGINE_W_DEQUEUE)
- dequeue_complete_req();
-
- spin_lock_irq(&cpg->lock);
- if (cpg->eng_st == ENGINE_IDLE) {
- backlog = crypto_get_backlog(&cpg->queue);
- async_req = crypto_dequeue_request(&cpg->queue);
- if (async_req) {
- BUG_ON(cpg->eng_st != ENGINE_IDLE);
- cpg->eng_st = ENGINE_BUSY;
- }
- }
- spin_unlock_irq(&cpg->lock);
-
- if (backlog) {
- backlog->complete(backlog, -EINPROGRESS);
- backlog = NULL;
- }
-
- if (async_req) {
- if (crypto_tfm_alg_type(async_req->tfm) !=
- CRYPTO_ALG_TYPE_AHASH) {
- struct ablkcipher_request *req =
- ablkcipher_request_cast(async_req);
- mv_start_new_crypt_req(req);
- } else {
- struct ahash_request *req =
- ahash_request_cast(async_req);
- mv_start_new_hash_req(req);
- }
- async_req = NULL;
- }
-
- schedule();
-
- } while (!kthread_should_stop());
- return 0;
-}
-
-static int mv_handle_req(struct crypto_async_request *req)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cpg->lock, flags);
- ret = crypto_enqueue_request(&cpg->queue, req);
- spin_unlock_irqrestore(&cpg->lock, flags);
- wake_up_process(cpg->queue_th);
- return ret;
-}
-
-static int mv_enc_aes_ecb(struct ablkcipher_request *req)
-{
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_ECB;
- req_ctx->decrypt = 0;
-
- return mv_handle_req(&req->base);
-}
-
-static int mv_dec_aes_ecb(struct ablkcipher_request *req)
-{
- struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_ECB;
- req_ctx->decrypt = 1;
-
- compute_aes_dec_key(ctx);
- return mv_handle_req(&req->base);
-}
-
-static int mv_enc_aes_cbc(struct ablkcipher_request *req)
-{
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_CBC;
- req_ctx->decrypt = 0;
-
- return mv_handle_req(&req->base);
-}
-
-static int mv_dec_aes_cbc(struct ablkcipher_request *req)
-{
- struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
-
- req_ctx->op = COP_AES_CBC;
- req_ctx->decrypt = 1;
-
- compute_aes_dec_key(ctx);
- return mv_handle_req(&req->base);
-}
-
-static int mv_cra_init(struct crypto_tfm *tfm)
-{
- tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
- return 0;
-}
-
-static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
- int is_last, unsigned int req_len,
- int count_add)
-{
- memset(ctx, 0, sizeof(*ctx));
- ctx->op = op;
- ctx->count = req_len;
- ctx->first_hash = 1;
- ctx->last_chunk = is_last;
- ctx->count_add = count_add;
-}
-
-static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
- unsigned req_len)
-{
- ctx->last_chunk = is_last;
- ctx->count += req_len;
-}
-
-static int mv_hash_init(struct ahash_request *req)
-{
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
- tfm_ctx->count_add);
- return 0;
-}
-
-static int mv_hash_update(struct ahash_request *req)
-{
- if (!req->nbytes)
- return 0;
-
- mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
- return mv_handle_req(&req->base);
-}
-
-static int mv_hash_final(struct ahash_request *req)
-{
- struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
-
- ahash_request_set_crypt(req, NULL, req->result, 0);
- mv_update_hash_req_ctx(ctx, 1, 0);
- return mv_handle_req(&req->base);
-}
-
-static int mv_hash_finup(struct ahash_request *req)
-{
- mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
- return mv_handle_req(&req->base);
-}
-
-static int mv_hash_digest(struct ahash_request *req)
-{
- const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
- mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
- req->nbytes, tfm_ctx->count_add);
- return mv_handle_req(&req->base);
-}
-
-static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
- const void *ostate)
-{
- const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
- int i;
- for (i = 0; i < 5; i++) {
- ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
- ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
- }
-}
-
-static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
- unsigned int keylen)
-{
- int rc;
- struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
- int bs, ds, ss;
-
- if (!ctx->base_hash)
- return 0;
-
- rc = crypto_shash_setkey(ctx->fallback, key, keylen);
- if (rc)
- return rc;
-
- /* Can't see a way to extract the ipad/opad from the fallback tfm
- so I'm basically copying code from the hmac module */
- bs = crypto_shash_blocksize(ctx->base_hash);
- ds = crypto_shash_digestsize(ctx->base_hash);
- ss = crypto_shash_statesize(ctx->base_hash);
-
- {
- SHASH_DESC_ON_STACK(shash, ctx->base_hash);
-
- unsigned int i;
- char ipad[ss];
- char opad[ss];
-
- shash->tfm = ctx->base_hash;
- shash->flags = crypto_shash_get_flags(ctx->base_hash) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
-
- if (keylen > bs) {
- int err;
-
- err =
- crypto_shash_digest(shash, key, keylen, ipad);
- if (err)
- return err;
-
- keylen = ds;
- } else
- memcpy(ipad, key, keylen);
-
- memset(ipad + keylen, 0, bs - keylen);
- memcpy(opad, ipad, bs);
-
- for (i = 0; i < bs; i++) {
- ipad[i] ^= HMAC_IPAD_VALUE;
- opad[i] ^= HMAC_OPAD_VALUE;
- }
-
- rc = crypto_shash_init(shash) ? :
- crypto_shash_update(shash, ipad, bs) ? :
- crypto_shash_export(shash, ipad) ? :
- crypto_shash_init(shash) ? :
- crypto_shash_update(shash, opad, bs) ? :
- crypto_shash_export(shash, opad);
-
- if (rc == 0)
- mv_hash_init_ivs(ctx, ipad, opad);
-
- return rc;
- }
-}
-
-static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
- enum hash_op op, int count_add)
-{
- const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
- struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_shash *fallback_tfm = NULL;
- struct crypto_shash *base_hash = NULL;
- int err = -ENOMEM;
-
- ctx->op = op;
- ctx->count_add = count_add;
-
- /* Allocate a fallback and abort if it failed. */
- fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback_tfm)) {
- printk(KERN_WARNING MV_CESA
- "Fallback driver '%s' could not be loaded!\n",
- fallback_driver_name);
- err = PTR_ERR(fallback_tfm);
- goto out;
- }
- ctx->fallback = fallback_tfm;
-
- if (base_hash_name) {
- /* Allocate a hash to compute the ipad/opad of hmac. */
- base_hash = crypto_alloc_shash(base_hash_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(base_hash)) {
- printk(KERN_WARNING MV_CESA
- "Base driver '%s' could not be loaded!\n",
- base_hash_name);
- err = PTR_ERR(base_hash);
- goto err_bad_base;
- }
- }
- ctx->base_hash = base_hash;
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct mv_req_hash_ctx) +
- crypto_shash_descsize(ctx->fallback));
- return 0;
-err_bad_base:
- crypto_free_shash(fallback_tfm);
-out:
- return err;
-}
-
-static void mv_cra_hash_exit(struct crypto_tfm *tfm)
-{
- struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_shash(ctx->fallback);
- if (ctx->base_hash)
- crypto_free_shash(ctx->base_hash);
-}
-
-static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
-{
- return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
-}
-
-static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
-{
- return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
-}
-
-static irqreturn_t crypto_int(int irq, void *priv)
-{
- u32 val;
-
- val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
- if (!(val & SEC_INT_ACCEL0_DONE))
- return IRQ_NONE;
-
- if (!del_timer(&cpg->completion_timer)) {
- printk(KERN_WARNING MV_CESA
- "got an interrupt but no pending timer?\n");
- }
- val &= ~SEC_INT_ACCEL0_DONE;
- writel(val, cpg->reg + FPGA_INT_STATUS);
- writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
- BUG_ON(cpg->eng_st != ENGINE_BUSY);
- cpg->eng_st = ENGINE_W_DEQUEUE;
- wake_up_process(cpg->queue_th);
- return IRQ_HANDLED;
-}
-
-static struct crypto_alg mv_aes_alg_ecb = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "mv-ecb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 16,
- .cra_ctxsize = sizeof(struct mv_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cra_init,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mv_setkey_aes,
- .encrypt = mv_enc_aes_ecb,
- .decrypt = mv_dec_aes_ecb,
- },
- },
-};
-
-static struct crypto_alg mv_aes_alg_cbc = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "mv-cbc-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mv_cra_init,
- .cra_u = {
- .ablkcipher = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = mv_setkey_aes,
- .encrypt = mv_enc_aes_cbc,
- .decrypt = mv_dec_aes_cbc,
- },
- },
-};
-
-static struct ahash_alg mv_sha1_alg = {
- .init = mv_hash_init,
- .update = mv_hash_update,
- .final = mv_hash_final,
- .finup = mv_hash_finup,
- .digest = mv_hash_digest,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "mv-sha1",
- .cra_priority = 300,
- .cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
- .cra_init = mv_cra_hash_sha1_init,
- .cra_exit = mv_cra_hash_exit,
- .cra_module = THIS_MODULE,
- }
- }
-};
-
-static struct ahash_alg mv_hmac_sha1_alg = {
- .init = mv_hash_init,
- .update = mv_hash_update,
- .final = mv_hash_final,
- .finup = mv_hash_finup,
- .digest = mv_hash_digest,
- .setkey = mv_hash_setkey,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "mv-hmac-sha1",
- .cra_priority = 300,
- .cra_flags =
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
- .cra_init = mv_cra_hash_hmac_sha1_init,
- .cra_exit = mv_cra_hash_exit,
- .cra_module = THIS_MODULE,
- }
- }
-};
-
-static int mv_cesa_get_sram(struct platform_device *pdev,
- struct crypto_priv *cp)
-{
- struct resource *res;
- u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE;
-
- of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size",
- &sram_size);
-
- cp->sram_size = sram_size;
- cp->sram_pool = of_gen_pool_get(pdev->dev.of_node,
- "marvell,crypto-srams", 0);
- if (cp->sram_pool) {
- cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size,
- &cp->sram_dma);
- if (cp->sram)
- return 0;
-
- return -ENOMEM;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "sram");
- if (!res || resource_size(res) < cp->sram_size)
- return -EINVAL;
-
- cp->sram = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(cp->sram))
- return PTR_ERR(cp->sram);
-
- return 0;
-}
-
-static int mv_probe(struct platform_device *pdev)
-{
- struct crypto_priv *cp;
- struct resource *res;
- int irq;
- int ret;
-
- if (cpg) {
- printk(KERN_ERR MV_CESA "Second crypto dev?\n");
- return -EEXIST;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- if (!res)
- return -ENXIO;
-
- cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
-
- spin_lock_init(&cp->lock);
- crypto_init_queue(&cp->queue, 50);
- cp->reg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(cp->reg)) {
- ret = PTR_ERR(cp->reg);
- goto err;
- }
-
- ret = mv_cesa_get_sram(pdev, cp);
- if (ret)
- goto err;
-
- cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err;
- }
- cp->irq = irq;
-
- platform_set_drvdata(pdev, cp);
- cpg = cp;
-
- cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
- if (IS_ERR(cp->queue_th)) {
- ret = PTR_ERR(cp->queue_th);
- goto err;
- }
-
- ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
- cp);
- if (ret)
- goto err_thread;
-
- /* Not all platforms can gate the clock, so it is not
- an error if the clock does not exists. */
- cp->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(cp->clk))
- clk_prepare_enable(cp->clk);
-
- writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
- writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
- writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
- writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
-
- ret = crypto_register_alg(&mv_aes_alg_ecb);
- if (ret) {
- printk(KERN_WARNING MV_CESA
- "Could not register aes-ecb driver\n");
- goto err_irq;
- }
-
- ret = crypto_register_alg(&mv_aes_alg_cbc);
- if (ret) {
- printk(KERN_WARNING MV_CESA
- "Could not register aes-cbc driver\n");
- goto err_unreg_ecb;
- }
-
- ret = crypto_register_ahash(&mv_sha1_alg);
- if (ret == 0)
- cpg->has_sha1 = 1;
- else
- printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
-
- ret = crypto_register_ahash(&mv_hmac_sha1_alg);
- if (ret == 0) {
- cpg->has_hmac_sha1 = 1;
- } else {
- printk(KERN_WARNING MV_CESA
- "Could not register hmac-sha1 driver\n");
- }
-
- return 0;
-err_unreg_ecb:
- crypto_unregister_alg(&mv_aes_alg_ecb);
-err_irq:
- free_irq(irq, cp);
- if (!IS_ERR(cp->clk)) {
- clk_disable_unprepare(cp->clk);
- clk_put(cp->clk);
- }
-err_thread:
- kthread_stop(cp->queue_th);
-err:
- cpg = NULL;
- return ret;
-}
-
-static int mv_remove(struct platform_device *pdev)
-{
- struct crypto_priv *cp = platform_get_drvdata(pdev);
-
- crypto_unregister_alg(&mv_aes_alg_ecb);
- crypto_unregister_alg(&mv_aes_alg_cbc);
- if (cp->has_sha1)
- crypto_unregister_ahash(&mv_sha1_alg);
- if (cp->has_hmac_sha1)
- crypto_unregister_ahash(&mv_hmac_sha1_alg);
- kthread_stop(cp->queue_th);
- free_irq(cp->irq, cp);
- memset(cp->sram, 0, cp->sram_size);
-
- if (!IS_ERR(cp->clk)) {
- clk_disable_unprepare(cp->clk);
- clk_put(cp->clk);
- }
-
- cpg = NULL;
- return 0;
-}
-
-static const struct of_device_id mv_cesa_of_match_table[] = {
- { .compatible = "marvell,orion-crypto", },
- { .compatible = "marvell,kirkwood-crypto", },
- { .compatible = "marvell,dove-crypto", },
- {}
-};
-MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
-
-static struct platform_driver marvell_crypto = {
- .probe = mv_probe,
- .remove = mv_remove,
- .driver = {
- .name = "mv_crypto",
- .of_match_table = mv_cesa_of_match_table,
- },
-};
-MODULE_ALIAS("platform:mv_crypto");
-
-module_platform_driver(marvell_crypto);
-
-MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
-MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
-MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
deleted file mode 100644
index 9249d3ed184b..000000000000
--- a/drivers/crypto/mv_cesa.h
+++ /dev/null
@@ -1,150 +0,0 @@
-#ifndef __MV_CRYPTO_H__
-#define __MV_CRYPTO_H__
-
-#define DIGEST_INITIAL_VAL_A 0xdd00
-#define DIGEST_INITIAL_VAL_B 0xdd04
-#define DIGEST_INITIAL_VAL_C 0xdd08
-#define DIGEST_INITIAL_VAL_D 0xdd0c
-#define DIGEST_INITIAL_VAL_E 0xdd10
-#define DES_CMD_REG 0xdd58
-
-#define SEC_ACCEL_CMD 0xde00
-#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
-#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
-#define SEC_CMD_DISABLE_SEC (1 << 2)
-
-#define SEC_ACCEL_DESC_P0 0xde04
-#define SEC_DESC_P0_PTR(x) (x)
-
-#define SEC_ACCEL_DESC_P1 0xde14
-#define SEC_DESC_P1_PTR(x) (x)
-
-#define SEC_ACCEL_CFG 0xde08
-#define SEC_CFG_STOP_DIG_ERR (1 << 0)
-#define SEC_CFG_CH0_W_IDMA (1 << 7)
-#define SEC_CFG_CH1_W_IDMA (1 << 8)
-#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
-#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
-
-#define SEC_ACCEL_STATUS 0xde0c
-#define SEC_ST_ACT_0 (1 << 0)
-#define SEC_ST_ACT_1 (1 << 1)
-
-/*
- * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
- * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
- * someone forgot to remove it while switching to the core and moving to
- * SEC_ACCEL_INT_STATUS.
- */
-#define FPGA_INT_STATUS 0xdd68
-#define SEC_ACCEL_INT_STATUS 0xde20
-#define SEC_INT_AUTH_DONE (1 << 0)
-#define SEC_INT_DES_E_DONE (1 << 1)
-#define SEC_INT_AES_E_DONE (1 << 2)
-#define SEC_INT_AES_D_DONE (1 << 3)
-#define SEC_INT_ENC_DONE (1 << 4)
-#define SEC_INT_ACCEL0_DONE (1 << 5)
-#define SEC_INT_ACCEL1_DONE (1 << 6)
-#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
-#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
-
-#define SEC_ACCEL_INT_MASK 0xde24
-
-#define AES_KEY_LEN (8 * 4)
-
-struct sec_accel_config {
-
- u32 config;
-#define CFG_OP_MAC_ONLY 0
-#define CFG_OP_CRYPT_ONLY 1
-#define CFG_OP_MAC_CRYPT 2
-#define CFG_OP_CRYPT_MAC 3
-#define CFG_MACM_MD5 (4 << 4)
-#define CFG_MACM_SHA1 (5 << 4)
-#define CFG_MACM_HMAC_MD5 (6 << 4)
-#define CFG_MACM_HMAC_SHA1 (7 << 4)
-#define CFG_ENCM_DES (1 << 8)
-#define CFG_ENCM_3DES (2 << 8)
-#define CFG_ENCM_AES (3 << 8)
-#define CFG_DIR_ENC (0 << 12)
-#define CFG_DIR_DEC (1 << 12)
-#define CFG_ENC_MODE_ECB (0 << 16)
-#define CFG_ENC_MODE_CBC (1 << 16)
-#define CFG_3DES_EEE (0 << 20)
-#define CFG_3DES_EDE (1 << 20)
-#define CFG_AES_LEN_128 (0 << 24)
-#define CFG_AES_LEN_192 (1 << 24)
-#define CFG_AES_LEN_256 (2 << 24)
-#define CFG_NOT_FRAG (0 << 30)
-#define CFG_FIRST_FRAG (1 << 30)
-#define CFG_LAST_FRAG (2 << 30)
-#define CFG_MID_FRAG (3 << 30)
-
- u32 enc_p;
-#define ENC_P_SRC(x) (x)
-#define ENC_P_DST(x) ((x) << 16)
-
- u32 enc_len;
-#define ENC_LEN(x) (x)
-
- u32 enc_key_p;
-#define ENC_KEY_P(x) (x)
-
- u32 enc_iv;
-#define ENC_IV_POINT(x) ((x) << 0)
-#define ENC_IV_BUF_POINT(x) ((x) << 16)
-
- u32 mac_src_p;
-#define MAC_SRC_DATA_P(x) (x)
-#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
-
- u32 mac_digest;
-#define MAC_DIGEST_P(x) (x)
-#define MAC_FRAG_LEN(x) ((x) << 16)
- u32 mac_iv;
-#define MAC_INNER_IV_P(x) (x)
-#define MAC_OUTER_IV_P(x) ((x) << 16)
-}__attribute__ ((packed));
- /*
- * /-----------\ 0
- * | ACCEL CFG | 4 * 8
- * |-----------| 0x20
- * | CRYPT KEY | 8 * 4
- * |-----------| 0x40
- * | IV IN | 4 * 4
- * |-----------| 0x40 (inplace)
- * | IV BUF | 4 * 4
- * |-----------| 0x80
- * | DATA IN | 16 * x (max ->max_req_size)
- * |-----------| 0x80 (inplace operation)
- * | DATA OUT | 16 * x (max ->max_req_size)
- * \-----------/ SRAM size
- */
-
- /* Hashing memory map:
- * /-----------\ 0
- * | ACCEL CFG | 4 * 8
- * |-----------| 0x20
- * | Inner IV | 5 * 4
- * |-----------| 0x34
- * | Outer IV | 5 * 4
- * |-----------| 0x48
- * | Output BUF| 5 * 4
- * |-----------| 0x80
- * | DATA IN | 64 * x (max ->max_req_size)
- * \-----------/ SRAM size
- */
-#define SRAM_CONFIG 0x00
-#define SRAM_DATA_KEY_P 0x20
-#define SRAM_DATA_IV 0x40
-#define SRAM_DATA_IV_BUF 0x40
-#define SRAM_DATA_IN_START 0x80
-#define SRAM_DATA_OUT_START 0x80
-
-#define SRAM_HMAC_IV_IN 0x20
-#define SRAM_HMAC_IV_OUT 0x34
-#define SRAM_DIGEST_BUF 0x48
-
-#define SRAM_CFG_SPACE 0x80
-
-#endif
diff --git a/drivers/crypto/n2_asm.S b/drivers/crypto/n2_asm.S
index f7c793745a1e..9a67dbf340f4 100644
--- a/drivers/crypto/n2_asm.S
+++ b/drivers/crypto/n2_asm.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* n2_asm.S: Hypervisor calls for NCS support.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index a9fd8b9e86cd..48de52cf2ecc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1962,10 +1962,8 @@ static struct n2_crypto *alloc_n2cp(void)
static void free_n2cp(struct n2_crypto *np)
{
- if (np->cwq_info.ino_table) {
- kfree(np->cwq_info.ino_table);
- np->cwq_info.ino_table = NULL;
- }
+ kfree(np->cwq_info.ino_table);
+ np->cwq_info.ino_table = NULL;
kfree(np);
}
@@ -2079,10 +2077,8 @@ static struct n2_mau *alloc_ncp(void)
static void free_ncp(struct n2_mau *mp)
{
- if (mp->mau_info.ino_table) {
- kfree(mp->mau_info.ino_table);
- mp->mau_info.ino_table = NULL;
- }
+ kfree(mp->mau_info.ino_table);
+ mp->mau_info.ino_table = NULL;
kfree(mp);
}
diff --git a/drivers/crypto/n2_core.h b/drivers/crypto/n2_core.h
index 4bcbbeae98f5..2406763b0306 100644
--- a/drivers/crypto/n2_core.h
+++ b/drivers/crypto/n2_core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _N2_CORE_H
#define _N2_CORE_H
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index b727821c8ed4..015155da59c2 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_NX_ENCRYPT) += nx-crypto.o
nx-crypto-objs := nx.o \
nx_debugfs.o \
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 874ddf5e9087..f2246a5abcf6 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -46,7 +46,6 @@ struct nx842_workmem {
ktime_t start;
- struct vas_window *txwin; /* Used with VAS function */
char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
} __packed __aligned(WORKMEM_ALIGN);
@@ -65,7 +64,7 @@ struct nx842_coproc {
* Send the request to NX engine on the chip for the corresponding CPU
* where the process is executing. Use with VAS function.
*/
-static DEFINE_PER_CPU(struct nx842_coproc *, coproc_inst);
+static DEFINE_PER_CPU(struct vas_window *, cpu_txwin);
/* no cpu hotplug on powernv, so this list never changes after init */
static LIST_HEAD(nx842_coprocs);
@@ -193,7 +192,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
ktime_t start = wmem->start, now = ktime_get();
ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
- while (!(ACCESS_ONCE(csb->flags) & CSB_V)) {
+ while (!(READ_ONCE(csb->flags) & CSB_V)) {
cpu_relax();
now = ktime_get();
if (ktime_after(now, timeout))
@@ -586,16 +585,11 @@ static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
ccw = SET_FIELD(CCW_FC_842, ccw, fc);
crb->ccw = cpu_to_be32(ccw);
- txwin = wmem->txwin;
- /* shoudn't happen, we don't load without a coproc */
- if (!txwin) {
- pr_err_ratelimited("NX-842 coprocessor is not available");
- return -ENODEV;
- }
-
do {
wmem->start = ktime_get();
preempt_disable();
+ txwin = this_cpu_read(cpu_txwin);
+
/*
* VAS copy CRB into L2 cache. Refer <asm/vas.h>.
* @crb and @offset.
@@ -689,25 +683,6 @@ static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc,
list_add(&coproc->list, &nx842_coprocs);
}
-/*
- * Identify chip ID for each CPU and save coprocesor adddress for the
- * corresponding NX engine in percpu coproc_inst.
- * coproc_inst is used in crypto_init to open send window on the NX instance
- * for the corresponding CPU / chip where the open request is executed.
- */
-static void nx842_set_per_cpu_coproc(struct nx842_coproc *coproc)
-{
- unsigned int i, chip_id;
-
- for_each_possible_cpu(i) {
- chip_id = cpu_to_chip_id(i);
-
- if (coproc->chip_id == chip_id)
- per_cpu(coproc_inst, i) = coproc;
- }
-}
-
-
static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
{
struct vas_window *txwin = NULL;
@@ -725,15 +700,58 @@ static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
* Open a VAS send window which is used to send request to NX.
*/
txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
- if (IS_ERR(txwin)) {
+ if (IS_ERR(txwin))
pr_err("ibm,nx-842: Can not open TX window: %ld\n",
PTR_ERR(txwin));
- return NULL;
- }
return txwin;
}
+/*
+ * Identify chip ID for each CPU, open send wndow for the corresponding NX
+ * engine and save txwin in percpu cpu_txwin.
+ * cpu_txwin is used in copy/paste operation for each compression /
+ * decompression request.
+ */
+static int nx842_open_percpu_txwins(void)
+{
+ struct nx842_coproc *coproc, *n;
+ unsigned int i, chip_id;
+
+ for_each_possible_cpu(i) {
+ struct vas_window *txwin = NULL;
+
+ chip_id = cpu_to_chip_id(i);
+
+ list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+ /*
+ * Kernel requests use only high priority FIFOs. So
+ * open send windows for these FIFOs.
+ */
+
+ if (coproc->ct != VAS_COP_TYPE_842_HIPRI)
+ continue;
+
+ if (coproc->chip_id == chip_id) {
+ txwin = nx842_alloc_txwin(coproc);
+ if (IS_ERR(txwin))
+ return PTR_ERR(txwin);
+
+ per_cpu(cpu_txwin, i) = txwin;
+ break;
+ }
+ }
+
+ if (!per_cpu(cpu_txwin, i)) {
+ /* shoudn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not availavle for CPU %d\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
int vasid)
{
@@ -819,14 +837,6 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
coproc->vas.id = vasid;
nx842_add_coprocs_list(coproc, chip_id);
- /*
- * Kernel requests use only high priority FIFOs. So save coproc
- * info in percpu coproc_inst which will be used to open send
- * windows for crypto open requests later.
- */
- if (coproc->ct == VAS_COP_TYPE_842_HIPRI)
- nx842_set_per_cpu_coproc(coproc);
-
return 0;
err_out:
@@ -847,24 +857,12 @@ static int __init nx842_powernv_probe_vas(struct device_node *pn)
return -EINVAL;
}
- for_each_compatible_node(dn, NULL, "ibm,power9-vas-x") {
- if (of_get_ibm_chip_id(dn) == chip_id)
- break;
- }
-
- if (!dn) {
- pr_err("Missing VAS device node\n");
+ vasid = chip_to_vas_id(chip_id);
+ if (vasid < 0) {
+ pr_err("Unable to map chip_id %d to vasid\n", chip_id);
return -EINVAL;
}
- if (of_property_read_u32(dn, "ibm,vas-id", &vasid)) {
- pr_err("Missing ibm,vas-id device property\n");
- of_node_put(dn);
- return -EINVAL;
- }
-
- of_node_put(dn);
-
for_each_child_of_node(pn, dn) {
if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
ret = vas_cfg_coproc_info(dn, chip_id, vasid);
@@ -928,6 +926,19 @@ static int __init nx842_powernv_probe(struct device_node *dn)
static void nx842_delete_coprocs(void)
{
struct nx842_coproc *coproc, *n;
+ struct vas_window *txwin;
+ int i;
+
+ /*
+ * close percpu txwins that are opened for the corresponding coproc.
+ */
+ for_each_possible_cpu(i) {
+ txwin = per_cpu(cpu_txwin, i);
+ if (txwin)
+ vas_win_close(txwin);
+
+ per_cpu(cpu_txwin, i) = 0;
+ }
list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
if (coproc->vas.rxwin)
@@ -954,46 +965,6 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
-static int nx842_powernv_crypto_init_vas(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_workmem *wmem;
- struct nx842_coproc *coproc;
- int ret;
-
- ret = nx842_crypto_init(tfm, &nx842_powernv_driver);
-
- if (ret)
- return ret;
-
- wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
- coproc = per_cpu(coproc_inst, smp_processor_id());
-
- ret = -EINVAL;
- if (coproc && coproc->vas.rxwin) {
- wmem->txwin = nx842_alloc_txwin(coproc);
- if (!IS_ERR(wmem->txwin))
- return 0;
-
- ret = PTR_ERR(wmem->txwin);
- }
-
- return ret;
-}
-
-void nx842_powernv_crypto_exit_vas(struct crypto_tfm *tfm)
-{
- struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
- struct nx842_workmem *wmem;
-
- wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
-
- if (wmem && wmem->txwin)
- vas_win_close(wmem->txwin);
-
- nx842_crypto_exit(tfm);
-}
-
static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
{
return nx842_crypto_init(tfm, &nx842_powernv_driver);
@@ -1044,9 +1015,13 @@ static __init int nx842_powernv_init(void)
nx842_powernv_exec = nx842_exec_icswx;
} else {
+ ret = nx842_open_percpu_txwins();
+ if (ret) {
+ nx842_delete_coprocs();
+ return ret;
+ }
+
nx842_powernv_exec = nx842_exec_vas;
- nx842_powernv_alg.cra_init = nx842_powernv_crypto_init_vas;
- nx842_powernv_alg.cra_exit = nx842_powernv_crypto_exit_vas;
}
ret = crypto_register_alg(&nx842_powernv_alg);
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index cddc6d8b55d9..bf52cd1d7fca 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -1082,7 +1082,7 @@ static int nx842_remove(struct vio_dev *viodev)
return 0;
}
-static struct vio_device_id nx842_vio_driver_ids[] = {
+static const struct vio_device_id nx842_vio_driver_ids[] = {
{"ibm,compression-v1", "ibm,compression"},
{"", ""},
};
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index da3cb8c35ec7..d94e25df503b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -116,7 +116,7 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
spin_lock_init(&ctx->lock);
ctx->driver = driver;
- ctx->wmem = kzalloc(driver->workmem_size, GFP_KERNEL);
+ ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index bb2f31792683..b66f19ac600f 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NX_842_H__
#define __NX_842_H__
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index abd465f479c4..a810596b97c2 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -22,6 +22,7 @@
#include <crypto/internal/aead.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
+#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -433,7 +434,7 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
- memcpy(iv, req->iv, 12);
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
return gcm_aes_nx_crypt(req, 1, req->assoclen);
}
@@ -443,7 +444,7 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
struct nx_gcm_rctx *rctx = aead_request_ctx(req);
char *iv = rctx->iv;
- memcpy(iv, req->iv, 12);
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
return gcm_aes_nx_crypt(req, 0, req->assoclen);
}
@@ -498,7 +499,7 @@ struct aead_alg nx_gcm_aes_alg = {
},
.init = nx_crypto_ctx_aes_gcm_init,
.exit = nx_crypto_ctx_aead_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm_aes_nx_set_key,
.encrypt = gcm_aes_nx_encrypt,
@@ -516,7 +517,7 @@ struct aead_alg nx_gcm4106_aes_alg = {
},
.init = nx_crypto_ctx_aes_gcm_init,
.exit = nx_crypto_ctx_aead_exit,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = gcm4106_aes_nx_set_key,
.setauthsize = gcm4106_aes_nx_setauthsize,
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 036057abb257..3a5e31be4764 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -833,7 +833,7 @@ static void __exit nx_fini(void)
vio_unregister_driver(&nx_driver.viodriver);
}
-static struct vio_device_id nx_crypto_driver_ids[] = {
+static const struct vio_device_id nx_crypto_driver_ids[] = {
{ "ibm,sym-encryption-v1", "ibm,sym-encryption" },
{ "", "" }
};
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 9347878d4f30..c3e54af18645 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NX_H__
#define __NX_H__
diff --git a/drivers/crypto/nx/nx_csbcpb.h b/drivers/crypto/nx/nx_csbcpb.h
index a304f956d6f8..493f8490ff94 100644
--- a/drivers/crypto/nx/nx_csbcpb.h
+++ b/drivers/crypto/nx/nx_csbcpb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NX_CSBCPB_H__
#define __NX_CSBCPB_H__
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 7d4f8a4be6d8..0cc3b65d7162 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -18,6 +18,7 @@
#include <linux/omap-dma.h>
#include <linux/interrupt.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
@@ -186,7 +187,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
if (!sk_req) {
pr_err("skcipher: Failed to allocate request\n");
- return -1;
+ return -ENOMEM;
}
init_completion(&result.completion);
@@ -214,7 +215,7 @@ static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
}
/* fall through */
default:
- pr_err("Encryption of IV failed for GCM mode");
+ pr_err("Encryption of IV failed for GCM mode\n");
break;
}
@@ -311,7 +312,7 @@ static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
int err, assoclen;
memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
- memcpy(rctx->iv + 12, &counter, 4);
+ memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
if (err)
@@ -339,7 +340,7 @@ int omap_aes_gcm_encrypt(struct aead_request *req)
{
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, req->iv, 12);
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
}
@@ -347,7 +348,7 @@ int omap_aes_gcm_decrypt(struct aead_request *req)
{
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- memcpy(rctx->iv, req->iv, 12);
+ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
return omap_aes_gcm_crypt(req, FLAGS_GCM);
}
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index c376a3ee7c2c..fbec0a2e76dd 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/aead.h>
@@ -767,7 +768,7 @@ static struct aead_alg algs_aead_gcm[] = {
},
.init = omap_aes_gcm_cra_init,
.exit = omap_aes_gcm_cra_exit,
- .ivsize = 12,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = omap_aes_gcm_setkey,
.encrypt = omap_aes_gcm_encrypt,
@@ -788,7 +789,7 @@ static struct aead_alg algs_aead_gcm[] = {
.init = omap_aes_gcm_cra_init,
.exit = omap_aes_gcm_cra_exit,
.maxauthsize = AES_BLOCK_SIZE,
- .ivsize = 8,
+ .ivsize = GCM_RFC4106_IV_SIZE,
.setkey = omap_aes_4106gcm_setkey,
.encrypt = omap_aes_4106gcm_encrypt,
.decrypt = omap_aes_4106gcm_decrypt,
@@ -974,11 +975,10 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
struct device *dev, struct resource *res)
{
struct device_node *node = dev->of_node;
- const struct of_device_id *match;
int err = 0;
- match = of_match_device(of_match_ptr(omap_aes_of_match), dev);
- if (!match) {
+ dd->pdata = of_device_get_match_data(dev);
+ if (!dd->pdata) {
dev_err(dev, "no compatible OF match\n");
err = -EINVAL;
goto err;
@@ -991,8 +991,6 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
goto err;
}
- dd->pdata = match->data;
-
err:
return err;
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index d37c9506c36c..ebc5c0f11f03 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -928,16 +928,13 @@ MODULE_DEVICE_TABLE(of, omap_des_of_match);
static int omap_des_get_of(struct omap_des_dev *dd,
struct platform_device *pdev)
{
- const struct of_device_id *match;
- match = of_match_device(of_match_ptr(omap_des_of_match), &pdev->dev);
- if (!match) {
+ dd->pdata = of_device_get_match_data(&pdev->dev);
+ if (!dd->pdata) {
dev_err(&pdev->dev, "no compatible OF match\n");
return -EINVAL;
}
- dd->pdata = match->data;
-
return 0;
}
#else
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index c40ac30ec002..86b89ace836f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1944,11 +1944,10 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
struct device *dev, struct resource *res)
{
struct device_node *node = dev->of_node;
- const struct of_device_id *match;
int err = 0;
- match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
- if (!match) {
+ dd->pdata = of_device_get_match_data(dev);
+ if (!dd->pdata) {
dev_err(dev, "no compatible OF match\n");
err = -EINVAL;
goto err;
@@ -1968,8 +1967,6 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
goto err;
}
- dd->pdata = match->data;
-
err:
return err;
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index b3869748cc6b..4b6642a25df5 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -482,7 +482,7 @@ static struct crypto_alg cbc_aes_alg = {
}
};
-static struct x86_cpu_id padlock_cpu_id[] = {
+static const struct x86_cpu_id padlock_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
{}
};
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index bc72d20c32c3..d32c79328876 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -509,7 +509,7 @@ static struct shash_alg sha256_alg_nano = {
}
};
-static struct x86_cpu_id padlock_sha_ids[] = {
+static const struct x86_cpu_id padlock_sha_ids[] = {
X86_FEATURE_MATCH(X86_FEATURE_PHE),
{}
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index b6f14844702e..5a6dc53b2b9d 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1125,9 +1125,9 @@ static irqreturn_t spacc_spacc_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static void spacc_packet_timeout(unsigned long data)
+static void spacc_packet_timeout(struct timer_list *t)
{
- struct spacc_engine *engine = (struct spacc_engine *)data;
+ struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
spacc_process_done(engine);
}
@@ -1714,8 +1714,7 @@ static int spacc_probe(struct platform_device *pdev)
writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
engine->regs + SPA_IRQ_EN_REG_OFFSET);
- setup_timer(&engine->packet_timeout, spacc_packet_timeout,
- (unsigned long)engine);
+ timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0);
INIT_LIST_HEAD(&engine->pending);
INIT_LIST_HEAD(&engine->completed);
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index 8265106f1c8e..7dd15e751d02 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 92fb6ffdc062..47a8e3d8b81a 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_isr.o \
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 8afac52677a6..2d06409bd3c4 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -228,11 +228,8 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
list_add_tail(&map->list, &vfs_table);
} else if (accel_dev->is_vf && pf) {
/* VF on host */
- struct adf_accel_vf_info *vf_info;
struct vf_id_map *map;
- vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
-
map = adf_find_vf(adf_get_vf_num(accel_dev));
if (map) {
struct vf_id_map *next;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 6f5dd68449c6..13c52d6bf630 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -443,9 +443,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- if (unlikely(!params->p || !params->g))
- return -EINVAL;
-
if (qat_dh_check_params_length(params->p_size << 3))
return -EINVAL;
@@ -462,11 +459,8 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
}
ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
- if (!ctx->g) {
- dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
- ctx->p = NULL;
+ if (!ctx->g)
return -ENOMEM;
- }
memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
params->g_size);
@@ -507,18 +501,22 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
ret = qat_dh_set_params(ctx, &params);
if (ret < 0)
- return ret;
+ goto err_clear_ctx;
ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
GFP_KERNEL);
if (!ctx->xa) {
- qat_dh_clear_ctx(dev, ctx);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_clear_ctx;
}
memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
params.key_size);
return 0;
+
+err_clear_ctx:
+ qat_dh_clear_ctx(dev, ctx);
+ return ret;
}
static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index e2454d90d949..98d22c2096e3 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -567,26 +567,26 @@ qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
code_page->imp_expr_tab_offset);
if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
imp_expr_tab->entry_num) {
- pr_err("QAT: UOF can't contain imported variable to be parsed");
+ pr_err("QAT: UOF can't contain imported variable to be parsed\n");
return -EINVAL;
}
neigh_reg_tab = (struct icp_qat_uof_objtable *)
(encap_uof_obj->beg_uof +
code_page->neigh_reg_tab_offset);
if (neigh_reg_tab->entry_num) {
- pr_err("QAT: UOF can't contain shared control store feature");
+ pr_err("QAT: UOF can't contain shared control store feature\n");
return -EINVAL;
}
if (image->numpages > 1) {
- pr_err("QAT: UOF can't contain multiple pages");
+ pr_err("QAT: UOF can't contain multiple pages\n");
return -EINVAL;
}
if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use shared control store feature");
+ pr_err("QAT: UOF can't use shared control store feature\n");
return -EFAULT;
}
if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
- pr_err("QAT: UOF can't use reloadable feature");
+ pr_err("QAT: UOF can't use reloadable feature\n");
return -EFAULT;
}
return 0;
@@ -702,7 +702,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
}
}
if (!mflag) {
- pr_err("QAT: uimage uses AE not set");
+ pr_err("QAT: uimage uses AE not set\n");
return -EINVAL;
}
return 0;
@@ -791,6 +791,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_GPA_ABS:
case ICP_GPB_ABS:
ctx_mask = 0;
+ /* fall through */
case ICP_GPA_REL:
case ICP_GPB_REL:
return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
@@ -800,6 +801,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_SR_RD_ABS:
case ICP_DR_RD_ABS:
ctx_mask = 0;
+ /* fall through */
case ICP_SR_REL:
case ICP_DR_REL:
case ICP_SR_RD_REL:
@@ -809,6 +811,7 @@ static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
case ICP_SR_WR_ABS:
case ICP_DR_WR_ABS:
ctx_mask = 0;
+ /* fall through */
case ICP_SR_WR_REL:
case ICP_DR_WR_REL:
return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
index 348dc3173afa..19a7f899acff 100644
--- a/drivers/crypto/qce/Makefile
+++ b/drivers/crypto/qce/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
qcrypto-objs := core.o \
common.o \
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index b04b42f48366..ea4d96bf47e8 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -248,10 +248,7 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->fallback))
- return PTR_ERR(ctx->fallback);
-
- return 0;
+ return PTR_ERR_OR_ZERO(ctx->fallback);
}
static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 47e114ac09d0..53227d70d397 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -349,28 +349,12 @@ static int qce_ahash_digest(struct ahash_request *req)
return qce->async_req_enqueue(tmpl->qce, &req->base);
}
-struct qce_ahash_result {
- struct completion completion;
- int error;
-};
-
-static void qce_digest_complete(struct crypto_async_request *req, int error)
-{
- struct qce_ahash_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
-
- result->error = error;
- complete(&result->completion);
-}
-
static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned int digestsize = crypto_ahash_digestsize(tfm);
struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
- struct qce_ahash_result result;
+ struct crypto_wait wait;
struct ahash_request *req;
struct scatterlist sg;
unsigned int blocksize;
@@ -405,9 +389,9 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
goto err_free_ahash;
}
- init_completion(&result.completion);
+ crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- qce_digest_complete, &result);
+ crypto_req_done, &wait);
crypto_ahash_clear_flags(ahash_tfm, ~0);
buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
@@ -420,13 +404,7 @@ static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
sg_init_one(&sg, buf, keylen);
ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
- ret = crypto_ahash_digest(req);
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- ret = wait_for_completion_interruptible(&result.completion);
- if (!ret)
- ret = result.error;
- }
-
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret)
crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index ab6a1b4c40f0..d5fb4013fb42 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RK3288_CRYPTO_H__
#define __RK3288_CRYPTO_H__
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 7ac657f46d15..142c6020cec7 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1,14 +1,16 @@
/*
* Cryptographic API.
*
- * Support for Samsung S5PV210 HW acceleration.
+ * Support for Samsung S5PV210 and Exynos HW acceleration.
*
* Copyright (C) 2011 NetUP Inc. All rights reserved.
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
+ * Hash part based on omap-sham.c driver.
*/
#include <linux/clk.h>
@@ -30,98 +32,112 @@
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
-#define _SBF(s, v) ((v) << (s))
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define _SBF(s, v) ((v) << (s))
/* Feed control registers */
-#define SSS_REG_FCINTSTAT 0x0000
-#define SSS_FCINTSTAT_BRDMAINT BIT(3)
-#define SSS_FCINTSTAT_BTDMAINT BIT(2)
-#define SSS_FCINTSTAT_HRDMAINT BIT(1)
-#define SSS_FCINTSTAT_PKDMAINT BIT(0)
-
-#define SSS_REG_FCINTENSET 0x0004
-#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
-#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
-#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
-#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
-
-#define SSS_REG_FCINTENCLR 0x0008
-#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
-#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
-#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
-#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
-
-#define SSS_REG_FCINTPEND 0x000C
-#define SSS_FCINTPEND_BRDMAINTP BIT(3)
-#define SSS_FCINTPEND_BTDMAINTP BIT(2)
-#define SSS_FCINTPEND_HRDMAINTP BIT(1)
-#define SSS_FCINTPEND_PKDMAINTP BIT(0)
-
-#define SSS_REG_FCFIFOSTAT 0x0010
-#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
-#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
-#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
-#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
-#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
-#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
-#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
-#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
-
-#define SSS_REG_FCFIFOCTRL 0x0014
-#define SSS_FCFIFOCTRL_DESSEL BIT(2)
-#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
-#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
-#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
-
-#define SSS_REG_FCBRDMAS 0x0020
-#define SSS_REG_FCBRDMAL 0x0024
-#define SSS_REG_FCBRDMAC 0x0028
-#define SSS_FCBRDMAC_BYTESWAP BIT(1)
-#define SSS_FCBRDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCBTDMAS 0x0030
-#define SSS_REG_FCBTDMAL 0x0034
-#define SSS_REG_FCBTDMAC 0x0038
-#define SSS_FCBTDMAC_BYTESWAP BIT(1)
-#define SSS_FCBTDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCHRDMAS 0x0040
-#define SSS_REG_FCHRDMAL 0x0044
-#define SSS_REG_FCHRDMAC 0x0048
-#define SSS_FCHRDMAC_BYTESWAP BIT(1)
-#define SSS_FCHRDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCPKDMAS 0x0050
-#define SSS_REG_FCPKDMAL 0x0054
-#define SSS_REG_FCPKDMAC 0x0058
-#define SSS_FCPKDMAC_BYTESWAP BIT(3)
-#define SSS_FCPKDMAC_DESCEND BIT(2)
-#define SSS_FCPKDMAC_TRANSMIT BIT(1)
-#define SSS_FCPKDMAC_FLUSH BIT(0)
-
-#define SSS_REG_FCPKDMAO 0x005C
+#define SSS_REG_FCINTSTAT 0x0000
+#define SSS_FCINTSTAT_HPARTINT BIT(7)
+#define SSS_FCINTSTAT_HDONEINT BIT(5)
+#define SSS_FCINTSTAT_BRDMAINT BIT(3)
+#define SSS_FCINTSTAT_BTDMAINT BIT(2)
+#define SSS_FCINTSTAT_HRDMAINT BIT(1)
+#define SSS_FCINTSTAT_PKDMAINT BIT(0)
+
+#define SSS_REG_FCINTENSET 0x0004
+#define SSS_FCINTENSET_HPARTINTENSET BIT(7)
+#define SSS_FCINTENSET_HDONEINTENSET BIT(5)
+#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
+#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
+#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
+#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
+
+#define SSS_REG_FCINTENCLR 0x0008
+#define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
+#define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
+#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
+#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
+#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
+#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
+
+#define SSS_REG_FCINTPEND 0x000C
+#define SSS_FCINTPEND_HPARTINTP BIT(7)
+#define SSS_FCINTPEND_HDONEINTP BIT(5)
+#define SSS_FCINTPEND_BRDMAINTP BIT(3)
+#define SSS_FCINTPEND_BTDMAINTP BIT(2)
+#define SSS_FCINTPEND_HRDMAINTP BIT(1)
+#define SSS_FCINTPEND_PKDMAINTP BIT(0)
+
+#define SSS_REG_FCFIFOSTAT 0x0010
+#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
+#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
+#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
+#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
+#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
+#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
+#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
+#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
+
+#define SSS_REG_FCFIFOCTRL 0x0014
+#define SSS_FCFIFOCTRL_DESSEL BIT(2)
+#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
+#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
+#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
+#define SSS_HASHIN_MASK _SBF(0, 0x03)
+
+#define SSS_REG_FCBRDMAS 0x0020
+#define SSS_REG_FCBRDMAL 0x0024
+#define SSS_REG_FCBRDMAC 0x0028
+#define SSS_FCBRDMAC_BYTESWAP BIT(1)
+#define SSS_FCBRDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCBTDMAS 0x0030
+#define SSS_REG_FCBTDMAL 0x0034
+#define SSS_REG_FCBTDMAC 0x0038
+#define SSS_FCBTDMAC_BYTESWAP BIT(1)
+#define SSS_FCBTDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCHRDMAS 0x0040
+#define SSS_REG_FCHRDMAL 0x0044
+#define SSS_REG_FCHRDMAC 0x0048
+#define SSS_FCHRDMAC_BYTESWAP BIT(1)
+#define SSS_FCHRDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCPKDMAS 0x0050
+#define SSS_REG_FCPKDMAL 0x0054
+#define SSS_REG_FCPKDMAC 0x0058
+#define SSS_FCPKDMAC_BYTESWAP BIT(3)
+#define SSS_FCPKDMAC_DESCEND BIT(2)
+#define SSS_FCPKDMAC_TRANSMIT BIT(1)
+#define SSS_FCPKDMAC_FLUSH BIT(0)
+
+#define SSS_REG_FCPKDMAO 0x005C
/* AES registers */
#define SSS_REG_AES_CONTROL 0x00
-#define SSS_AES_BYTESWAP_DI BIT(11)
-#define SSS_AES_BYTESWAP_DO BIT(10)
-#define SSS_AES_BYTESWAP_IV BIT(9)
-#define SSS_AES_BYTESWAP_CNT BIT(8)
-#define SSS_AES_BYTESWAP_KEY BIT(7)
-#define SSS_AES_KEY_CHANGE_MODE BIT(6)
-#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
-#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
-#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
-#define SSS_AES_FIFO_MODE BIT(3)
-#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
-#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
-#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
-#define SSS_AES_MODE_DECRYPT BIT(0)
+#define SSS_AES_BYTESWAP_DI BIT(11)
+#define SSS_AES_BYTESWAP_DO BIT(10)
+#define SSS_AES_BYTESWAP_IV BIT(9)
+#define SSS_AES_BYTESWAP_CNT BIT(8)
+#define SSS_AES_BYTESWAP_KEY BIT(7)
+#define SSS_AES_KEY_CHANGE_MODE BIT(6)
+#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
+#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
+#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
+#define SSS_AES_FIFO_MODE BIT(3)
+#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
+#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
+#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
+#define SSS_AES_MODE_DECRYPT BIT(0)
#define SSS_REG_AES_STATUS 0x04
-#define SSS_AES_BUSY BIT(2)
-#define SSS_AES_INPUT_READY BIT(1)
-#define SSS_AES_OUTPUT_READY BIT(0)
+#define SSS_AES_BUSY BIT(2)
+#define SSS_AES_INPUT_READY BIT(1)
+#define SSS_AES_OUTPUT_READY BIT(0)
#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
@@ -129,26 +145,97 @@
#define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
#define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
-#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
-#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
-#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
+#define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
+#define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
+#define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
-#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
+#define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
#define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
SSS_AES_REG(dev, reg))
/* HW engine modes */
-#define FLAGS_AES_DECRYPT BIT(0)
-#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
-#define FLAGS_AES_CBC _SBF(1, 0x01)
-#define FLAGS_AES_CTR _SBF(1, 0x02)
+#define FLAGS_AES_DECRYPT BIT(0)
+#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
+#define FLAGS_AES_CBC _SBF(1, 0x01)
+#define FLAGS_AES_CTR _SBF(1, 0x02)
+
+#define AES_KEY_LEN 16
+#define CRYPTO_QUEUE_LEN 1
+
+/* HASH registers */
+#define SSS_REG_HASH_CTRL 0x00
+
+#define SSS_HASH_USER_IV_EN BIT(5)
+#define SSS_HASH_INIT_BIT BIT(4)
+#define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
+#define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
+#define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
+
+#define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
+
+#define SSS_REG_HASH_CTRL_PAUSE 0x04
+
+#define SSS_HASH_PAUSE BIT(0)
+
+#define SSS_REG_HASH_CTRL_FIFO 0x08
+
+#define SSS_HASH_FIFO_MODE_DMA BIT(0)
+#define SSS_HASH_FIFO_MODE_CPU 0
+
+#define SSS_REG_HASH_CTRL_SWAP 0x0C
+
+#define SSS_HASH_BYTESWAP_DI BIT(3)
+#define SSS_HASH_BYTESWAP_DO BIT(2)
+#define SSS_HASH_BYTESWAP_IV BIT(1)
+#define SSS_HASH_BYTESWAP_KEY BIT(0)
+
+#define SSS_REG_HASH_STATUS 0x10
+
+#define SSS_HASH_STATUS_MSG_DONE BIT(6)
+#define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
+#define SSS_HASH_STATUS_BUFFER_READY BIT(0)
+
+#define SSS_REG_HASH_MSG_SIZE_LOW 0x20
+#define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
+
+#define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
+#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
-#define AES_KEY_LEN 16
-#define CRYPTO_QUEUE_LEN 1
+#define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
+#define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
+
+#define HASH_BLOCK_SIZE 64
+#define HASH_REG_SIZEOF 4
+#define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
+#define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
+#define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
+
+/*
+ * HASH bit numbers, used by device, setting in dev->hash_flags with
+ * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
+ * to keep HASH state BUSY or FREE, or to signal state from irq_handler
+ * to hash_tasklet. SGS keep track of allocated memory for scatterlist
+ */
+#define HASH_FLAGS_BUSY 0
+#define HASH_FLAGS_FINAL 1
+#define HASH_FLAGS_DMA_ACTIVE 2
+#define HASH_FLAGS_OUTPUT_READY 3
+#define HASH_FLAGS_DMA_READY 4
+#define HASH_FLAGS_SGS_COPIED 5
+#define HASH_FLAGS_SGS_ALLOCED 6
+
+/* HASH HW constants */
+#define BUFLEN HASH_BLOCK_SIZE
+
+#define SSS_HASH_DMA_LEN_ALIGN 8
+#define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
+
+#define SSS_HASH_QUEUE_LENGTH 10
/**
* struct samsung_aes_variant - platform specific SSS driver data
* @aes_offset: AES register offset from SSS module's base.
+ * @hash_offset: HASH register offset from SSS module's base.
*
* Specifies platform specific configuration of SSS module.
* Note: A structure for driver specific platform data is used for future
@@ -156,6 +243,7 @@
*/
struct samsung_aes_variant {
unsigned int aes_offset;
+ unsigned int hash_offset;
};
struct s5p_aes_reqctx {
@@ -195,6 +283,19 @@ struct s5p_aes_ctx {
* protects against concurrent access to these fields.
* @lock: Lock for protecting both access to device hardware registers
* and fields related to current request (including the busy field).
+ * @res: Resources for hash.
+ * @io_hash_base: Per-variant offset for HASH block IO memory.
+ * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
+ * variable.
+ * @hash_flags: Flags for current HASH op.
+ * @hash_queue: Async hash queue.
+ * @hash_tasklet: New HASH request scheduling job.
+ * @xmit_buf: Buffer for current HASH request transfer into SSS block.
+ * @hash_req: Current request sending to SSS HASH block.
+ * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
+ * @hash_sg_cnt: Counter for hash_sg_iter.
+ *
+ * @use_hash: true if HASH algs enabled
*/
struct s5p_aes_dev {
struct device *dev;
@@ -215,16 +316,83 @@ struct s5p_aes_dev {
struct crypto_queue queue;
bool busy;
spinlock_t lock;
+
+ struct resource *res;
+ void __iomem *io_hash_base;
+
+ spinlock_t hash_lock; /* protect hash_ vars */
+ unsigned long hash_flags;
+ struct crypto_queue hash_queue;
+ struct tasklet_struct hash_tasklet;
+
+ u8 xmit_buf[BUFLEN];
+ struct ahash_request *hash_req;
+ struct scatterlist *hash_sg_iter;
+ unsigned int hash_sg_cnt;
+
+ bool use_hash;
};
-static struct s5p_aes_dev *s5p_dev;
+/**
+ * struct s5p_hash_reqctx - HASH request context
+ * @dd: Associated device
+ * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
+ * @digcnt: Number of bytes processed by HW (without buffer[] ones)
+ * @digest: Digest message or IV for partial result
+ * @nregs: Number of HW registers for digest or IV read/write
+ * @engine: Bits for selecting type of HASH in SSS block
+ * @sg: sg for DMA transfer
+ * @sg_len: Length of sg for DMA transfer
+ * @sgl[]: sg for joining buffer and req->src scatterlist
+ * @skip: Skip offset in req->src for current op
+ * @total: Total number of bytes for current request
+ * @finup: Keep state for finup or final.
+ * @error: Keep track of error.
+ * @bufcnt: Number of bytes holded in buffer[]
+ * @buffer[]: For byte(s) from end of req->src in UPDATE op
+ */
+struct s5p_hash_reqctx {
+ struct s5p_aes_dev *dd;
+ bool op_update;
+
+ u64 digcnt;
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ unsigned int nregs; /* digest_size / sizeof(reg) */
+ u32 engine;
+
+ struct scatterlist *sg;
+ unsigned int sg_len;
+ struct scatterlist sgl[2];
+ unsigned int skip;
+ unsigned int total;
+ bool finup;
+ bool error;
+
+ u32 bufcnt;
+ u8 buffer[0];
+};
+
+/**
+ * struct s5p_hash_ctx - HASH transformation context
+ * @dd: Associated device
+ * @flags: Bits for algorithm HASH.
+ * @fallback: Software transformation for zero message or size < BUFLEN.
+ */
+struct s5p_hash_ctx {
+ struct s5p_aes_dev *dd;
+ unsigned long flags;
+ struct crypto_shash *fallback;
+};
static const struct samsung_aes_variant s5p_aes_data = {
.aes_offset = 0x4000,
+ .hash_offset = 0x6000,
};
static const struct samsung_aes_variant exynos_aes_data = {
.aes_offset = 0x200,
+ .hash_offset = 0x400,
};
static const struct of_device_id s5p_sss_dt_match[] = {
@@ -254,6 +422,8 @@ static inline struct samsung_aes_variant *find_s5p_sss_version
platform_get_device_id(pdev)->driver_data;
}
+static struct s5p_aes_dev *s5p_dev;
+
static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
@@ -436,15 +606,65 @@ static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
return ret;
}
+static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
+{
+ return __raw_readl(dd->io_hash_base + offset);
+}
+
+static inline void s5p_hash_write(struct s5p_aes_dev *dd,
+ u32 offset, u32 value)
+{
+ __raw_writel(value, dd->io_hash_base + offset);
+}
+
+/**
+ * s5p_set_dma_hashdata() - start DMA with sg
+ * @dev: device
+ * @sg: scatterlist ready to DMA transmit
+ */
+static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
+ struct scatterlist *sg)
+{
+ dev->hash_sg_cnt--;
+ SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
+ SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
+}
+
+/**
+ * s5p_hash_rx() - get next hash_sg_iter
+ * @dev: device
+ *
+ * Return:
+ * 2 if there is no more data and it is UPDATE op
+ * 1 if new receiving (input) data is ready and can be written to device
+ * 0 if there is no more data and it is FINAL op
+ */
+static int s5p_hash_rx(struct s5p_aes_dev *dev)
+{
+ if (dev->hash_sg_cnt > 0) {
+ dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
+ return 1;
+ }
+
+ set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
+ if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
+ return 0;
+
+ return 2;
+}
+
static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
int err_dma_tx = 0;
int err_dma_rx = 0;
+ int err_dma_hx = 0;
bool tx_end = false;
+ bool hx_end = false;
unsigned long flags;
uint32_t status;
+ u32 st_bits;
int err;
spin_lock_irqsave(&dev->lock, flags);
@@ -456,6 +676,8 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
*
* If there is no more data in tx scatter list, call s5p_aes_complete()
* and schedule new tasklet.
+ *
+ * Handle hx interrupt. If there is still data map next entry.
*/
status = SSS_READ(dev, FCINTSTAT);
if (status & SSS_FCINTSTAT_BRDMAINT)
@@ -467,7 +689,29 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
err_dma_tx = s5p_aes_tx(dev);
}
- SSS_WRITE(dev, FCINTPEND, status);
+ if (status & SSS_FCINTSTAT_HRDMAINT)
+ err_dma_hx = s5p_hash_rx(dev);
+
+ st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
+ SSS_FCINTSTAT_HRDMAINT);
+ /* clear DMA bits */
+ SSS_WRITE(dev, FCINTPEND, st_bits);
+
+ /* clear HASH irq bits */
+ if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
+ /* cannot have both HPART and HDONE */
+ if (status & SSS_FCINTSTAT_HPARTINT)
+ st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
+
+ if (status & SSS_FCINTSTAT_HDONEINT)
+ st_bits = SSS_HASH_STATUS_MSG_DONE;
+
+ set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
+ s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
+ hx_end = true;
+ /* when DONE or PART, do not handle HASH DMA */
+ err_dma_hx = 0;
+ }
if (err_dma_rx < 0) {
err = err_dma_rx;
@@ -480,6 +724,8 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
if (tx_end) {
s5p_sg_done(dev);
+ if (err_dma_hx == 1)
+ s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -497,21 +743,1100 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
s5p_set_dma_outdata(dev, dev->sg_dst);
if (err_dma_rx == 1)
s5p_set_dma_indata(dev, dev->sg_src);
+ if (err_dma_hx == 1)
+ s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
}
- return IRQ_HANDLED;
+ goto hash_irq_end;
error:
s5p_sg_done(dev);
dev->busy = false;
+ if (err_dma_hx == 1)
+ s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
+
spin_unlock_irqrestore(&dev->lock, flags);
s5p_aes_complete(dev, err);
+hash_irq_end:
+ /*
+ * Note about else if:
+ * when hash_sg_iter reaches end and its UPDATE op,
+ * issue SSS_HASH_PAUSE and wait for HPART irq
+ */
+ if (hx_end)
+ tasklet_schedule(&dev->hash_tasklet);
+ else if (err_dma_hx == 2)
+ s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
+ SSS_HASH_PAUSE);
+
return IRQ_HANDLED;
}
+/**
+ * s5p_hash_read_msg() - read message or IV from HW
+ * @req: AHASH request
+ */
+static void s5p_hash_read_msg(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_aes_dev *dd = ctx->dd;
+ u32 *hash = (u32 *)ctx->digest;
+ unsigned int i;
+
+ for (i = 0; i < ctx->nregs; i++)
+ hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
+}
+
+/**
+ * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
+ * @dd: device
+ * @ctx: request context
+ */
+static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
+ struct s5p_hash_reqctx *ctx)
+{
+ u32 *hash = (u32 *)ctx->digest;
+ unsigned int i;
+
+ for (i = 0; i < ctx->nregs; i++)
+ s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
+}
+
+/**
+ * s5p_hash_write_iv() - write IV for next partial/finup op.
+ * @req: AHASH request
+ */
+static void s5p_hash_write_iv(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ s5p_hash_write_ctx_iv(ctx->dd, ctx);
+}
+
+/**
+ * s5p_hash_copy_result() - copy digest into req->result
+ * @req: AHASH request
+ */
+static void s5p_hash_copy_result(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return;
+
+ memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
+}
+
+/**
+ * s5p_hash_dma_flush() - flush HASH DMA
+ * @dev: secss device
+ */
+static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
+{
+ SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
+}
+
+/**
+ * s5p_hash_dma_enable() - enable DMA mode for HASH
+ * @dev: secss device
+ *
+ * enable DMA mode for HASH
+ */
+static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
+{
+ s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
+}
+
+/**
+ * s5p_hash_irq_disable() - disable irq HASH signals
+ * @dev: secss device
+ * @flags: bitfield with irq's to be disabled
+ */
+static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
+{
+ SSS_WRITE(dev, FCINTENCLR, flags);
+}
+
+/**
+ * s5p_hash_irq_enable() - enable irq signals
+ * @dev: secss device
+ * @flags: bitfield with irq's to be enabled
+ */
+static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
+{
+ SSS_WRITE(dev, FCINTENSET, flags);
+}
+
+/**
+ * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
+ * @dev: secss device
+ * @hashflow: HASH stream flow with/without crypto AES/DES
+ */
+static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
+{
+ unsigned long flags;
+ u32 flow;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ flow = SSS_READ(dev, FCFIFOCTRL);
+ flow &= ~SSS_HASHIN_MASK;
+ flow |= hashflow;
+ SSS_WRITE(dev, FCFIFOCTRL, flow);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/**
+ * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
+ * @dev: secss device
+ * @hashflow: HASH stream flow with/without AES/DES
+ *
+ * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
+ * enable HASH irq's HRDMA, HDONE, HPART
+ */
+static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
+{
+ s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
+ SSS_FCINTENCLR_HDONEINTENCLR |
+ SSS_FCINTENCLR_HPARTINTENCLR);
+ s5p_hash_dma_flush(dev);
+
+ s5p_hash_dma_enable(dev);
+ s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
+ s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
+ SSS_FCINTENSET_HDONEINTENSET |
+ SSS_FCINTENSET_HPARTINTENSET);
+}
+
+/**
+ * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
+ * @dd: secss device
+ * @length: length for request
+ * @final: true if final op
+ *
+ * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
+ * after previous updates, fill up IV words. For final, calculate and set
+ * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
+ * length as 2^63 so it will be never reached and set to zero prelow and
+ * prehigh.
+ *
+ * This function does not start DMA transfer.
+ */
+static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
+ bool final)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+ u32 prelow, prehigh, low, high;
+ u32 configflags, swapflags;
+ u64 tmplen;
+
+ configflags = ctx->engine | SSS_HASH_INIT_BIT;
+
+ if (likely(ctx->digcnt)) {
+ s5p_hash_write_ctx_iv(dd, ctx);
+ configflags |= SSS_HASH_USER_IV_EN;
+ }
+
+ if (final) {
+ /* number of bytes for last part */
+ low = length;
+ high = 0;
+ /* total number of bits prev hashed */
+ tmplen = ctx->digcnt * 8;
+ prelow = (u32)tmplen;
+ prehigh = (u32)(tmplen >> 32);
+ } else {
+ prelow = 0;
+ prehigh = 0;
+ low = 0;
+ high = BIT(31);
+ }
+
+ swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
+ SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
+
+ s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
+ s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
+ s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
+ s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
+
+ s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
+ s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
+}
+
+/**
+ * s5p_hash_xmit_dma() - start DMA hash processing
+ * @dd: secss device
+ * @length: length for request
+ * @final: true if final op
+ *
+ * Update digcnt here, as it is needed for finup/final op.
+ */
+static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
+ bool final)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+ unsigned int cnt;
+
+ cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
+ if (!cnt) {
+ dev_err(dd->dev, "dma_map_sg error\n");
+ ctx->error = true;
+ return -EINVAL;
+ }
+
+ set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
+ dd->hash_sg_iter = ctx->sg;
+ dd->hash_sg_cnt = cnt;
+ s5p_hash_write_ctrl(dd, length, final);
+ ctx->digcnt += length;
+ ctx->total -= length;
+
+ /* catch last interrupt */
+ if (final)
+ set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
+
+ s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
+
+ return -EINPROGRESS;
+}
+
+/**
+ * s5p_hash_copy_sgs() - copy request's bytes into new buffer
+ * @ctx: request context
+ * @sg: source scatterlist request
+ * @new_len: number of bytes to process from sg
+ *
+ * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
+ * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
+ * with allocated buffer.
+ *
+ * Set bit in dd->hash_flag so we can free it after irq ends processing.
+ */
+static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
+ struct scatterlist *sg, unsigned int new_len)
+{
+ unsigned int pages, len;
+ void *buf;
+
+ len = new_len + ctx->bufcnt;
+ pages = get_order(len);
+
+ buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
+ if (!buf) {
+ dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
+ ctx->error = true;
+ return -ENOMEM;
+ }
+
+ if (ctx->bufcnt)
+ memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
+
+ scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
+ new_len, 0);
+ sg_init_table(ctx->sgl, 1);
+ sg_set_buf(ctx->sgl, buf, len);
+ ctx->sg = ctx->sgl;
+ ctx->sg_len = 1;
+ ctx->bufcnt = 0;
+ ctx->skip = 0;
+ set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
+ * @ctx: request context
+ * @sg: source scatterlist request
+ * @new_len: number of bytes to process from sg
+ *
+ * Allocate new scatterlist table, copy data for HASH into it. If there was
+ * xmit_buf filled, prepare it first, then copy page, length and offset from
+ * source sg into it, adjusting begin and/or end for skip offset and
+ * hash_later value.
+ *
+ * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
+ * it after irq ends processing.
+ */
+static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
+ struct scatterlist *sg, unsigned int new_len)
+{
+ unsigned int skip = ctx->skip, n = sg_nents(sg);
+ struct scatterlist *tmp;
+ unsigned int len;
+
+ if (ctx->bufcnt)
+ n++;
+
+ ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
+ if (!ctx->sg) {
+ ctx->error = true;
+ return -ENOMEM;
+ }
+
+ sg_init_table(ctx->sg, n);
+
+ tmp = ctx->sg;
+
+ ctx->sg_len = 0;
+
+ if (ctx->bufcnt) {
+ sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
+ tmp = sg_next(tmp);
+ ctx->sg_len++;
+ }
+
+ while (sg && skip >= sg->length) {
+ skip -= sg->length;
+ sg = sg_next(sg);
+ }
+
+ while (sg && new_len) {
+ len = sg->length - skip;
+ if (new_len < len)
+ len = new_len;
+
+ new_len -= len;
+ sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
+ skip = 0;
+ if (new_len <= 0)
+ sg_mark_end(tmp);
+
+ tmp = sg_next(tmp);
+ ctx->sg_len++;
+ sg = sg_next(sg);
+ }
+
+ set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_prepare_sgs() - prepare sg for processing
+ * @ctx: request context
+ * @sg: source scatterlist request
+ * @nbytes: number of bytes to process from sg
+ * @final: final flag
+ *
+ * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
+ * sg table have good aligned elements (list_ok). If one of this checks fails,
+ * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
+ * data into this buffer and prepare request in sgl, or (2) allocates new sg
+ * table and prepare sg elements.
+ *
+ * For digest or finup all conditions can be good, and we may not need any
+ * fixes.
+ */
+static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
+ struct scatterlist *sg,
+ unsigned int new_len, bool final)
+{
+ unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
+ bool aligned = true, list_ok = true;
+ struct scatterlist *sg_tmp = sg;
+
+ if (!sg || !sg->length || !new_len)
+ return 0;
+
+ if (skip || !final)
+ list_ok = false;
+
+ while (nbytes > 0 && sg_tmp) {
+ n++;
+ if (skip >= sg_tmp->length) {
+ skip -= sg_tmp->length;
+ if (!sg_tmp->length) {
+ aligned = false;
+ break;
+ }
+ } else {
+ if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
+ aligned = false;
+ break;
+ }
+
+ if (nbytes < sg_tmp->length - skip) {
+ list_ok = false;
+ break;
+ }
+
+ nbytes -= sg_tmp->length - skip;
+ skip = 0;
+ }
+
+ sg_tmp = sg_next(sg_tmp);
+ }
+
+ if (!aligned)
+ return s5p_hash_copy_sgs(ctx, sg, new_len);
+ else if (!list_ok)
+ return s5p_hash_copy_sg_lists(ctx, sg, new_len);
+
+ /*
+ * Have aligned data from previous operation and/or current
+ * Note: will enter here only if (digest or finup) and aligned
+ */
+ if (ctx->bufcnt) {
+ ctx->sg_len = n;
+ sg_init_table(ctx->sgl, 2);
+ sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
+ sg_chain(ctx->sgl, 2, sg);
+ ctx->sg = ctx->sgl;
+ ctx->sg_len++;
+ } else {
+ ctx->sg = sg;
+ ctx->sg_len = n;
+ }
+
+ return 0;
+}
+
+/**
+ * s5p_hash_prepare_request() - prepare request for processing
+ * @req: AHASH request
+ * @update: true if UPDATE op
+ *
+ * Note 1: we can have update flag _and_ final flag at the same time.
+ * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
+ * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
+ * we have final op
+ */
+static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ bool final = ctx->finup;
+ int xmit_len, hash_later, nbytes;
+ int ret;
+
+ if (!req)
+ return 0;
+
+ if (update)
+ nbytes = req->nbytes;
+ else
+ nbytes = 0;
+
+ ctx->total = nbytes + ctx->bufcnt;
+ if (!ctx->total)
+ return 0;
+
+ if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
+ /* bytes left from previous request, so fill up to BUFLEN */
+ int len = BUFLEN - ctx->bufcnt % BUFLEN;
+
+ if (len > nbytes)
+ len = nbytes;
+
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+ 0, len, 0);
+ ctx->bufcnt += len;
+ nbytes -= len;
+ ctx->skip = len;
+ } else {
+ ctx->skip = 0;
+ }
+
+ if (ctx->bufcnt)
+ memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
+
+ xmit_len = ctx->total;
+ if (final) {
+ hash_later = 0;
+ } else {
+ if (IS_ALIGNED(xmit_len, BUFLEN))
+ xmit_len -= BUFLEN;
+ else
+ xmit_len -= xmit_len & (BUFLEN - 1);
+
+ hash_later = ctx->total - xmit_len;
+ /* copy hash_later bytes from end of req->src */
+ /* previous bytes are in xmit_buf, so no overwrite */
+ scatterwalk_map_and_copy(ctx->buffer, req->src,
+ req->nbytes - hash_later,
+ hash_later, 0);
+ }
+
+ if (xmit_len > BUFLEN) {
+ ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
+ final);
+ if (ret)
+ return ret;
+ } else {
+ /* have buffered data only */
+ if (unlikely(!ctx->bufcnt)) {
+ /* first update didn't fill up buffer */
+ scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
+ 0, xmit_len, 0);
+ }
+
+ sg_init_table(ctx->sgl, 1);
+ sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
+
+ ctx->sg = ctx->sgl;
+ ctx->sg_len = 1;
+ }
+
+ ctx->bufcnt = hash_later;
+ if (!final)
+ ctx->total = xmit_len;
+
+ return 0;
+}
+
+/**
+ * s5p_hash_update_dma_stop() - unmap DMA
+ * @dd: secss device
+ *
+ * Unmap scatterlist ctx->sg.
+ */
+static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
+
+ dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
+ clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
+}
+
+/**
+ * s5p_hash_finish() - copy calculated digest to crypto layer
+ * @req: AHASH request
+ */
+static void s5p_hash_finish(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_aes_dev *dd = ctx->dd;
+
+ if (ctx->digcnt)
+ s5p_hash_copy_result(req);
+
+ dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
+}
+
+/**
+ * s5p_hash_finish_req() - finish request
+ * @req: AHASH request
+ * @err: error
+ */
+static void s5p_hash_finish_req(struct ahash_request *req, int err)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_aes_dev *dd = ctx->dd;
+ unsigned long flags;
+
+ if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
+ free_pages((unsigned long)sg_virt(ctx->sg),
+ get_order(ctx->sg->length));
+
+ if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
+ kfree(ctx->sg);
+
+ ctx->sg = NULL;
+ dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
+ BIT(HASH_FLAGS_SGS_COPIED));
+
+ if (!err && !ctx->error) {
+ s5p_hash_read_msg(req);
+ if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
+ s5p_hash_finish(req);
+ } else {
+ ctx->error = true;
+ }
+
+ spin_lock_irqsave(&dd->hash_lock, flags);
+ dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
+ BIT(HASH_FLAGS_DMA_READY) |
+ BIT(HASH_FLAGS_OUTPUT_READY));
+ spin_unlock_irqrestore(&dd->hash_lock, flags);
+
+ if (req->base.complete)
+ req->base.complete(&req->base, err);
+}
+
+/**
+ * s5p_hash_handle_queue() - handle hash queue
+ * @dd: device s5p_aes_dev
+ * @req: AHASH request
+ *
+ * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
+ * device then processes the first request from the dd->queue
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
+ struct ahash_request *req)
+{
+ struct crypto_async_request *async_req, *backlog;
+ struct s5p_hash_reqctx *ctx;
+ unsigned long flags;
+ int err = 0, ret = 0;
+
+retry:
+ spin_lock_irqsave(&dd->hash_lock, flags);
+ if (req)
+ ret = ahash_enqueue_request(&dd->hash_queue, req);
+
+ if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
+ spin_unlock_irqrestore(&dd->hash_lock, flags);
+ return ret;
+ }
+
+ backlog = crypto_get_backlog(&dd->hash_queue);
+ async_req = crypto_dequeue_request(&dd->hash_queue);
+ if (async_req)
+ set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
+
+ spin_unlock_irqrestore(&dd->hash_lock, flags);
+
+ if (!async_req)
+ return ret;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ahash_request_cast(async_req);
+ dd->hash_req = req;
+ ctx = ahash_request_ctx(req);
+
+ err = s5p_hash_prepare_request(req, ctx->op_update);
+ if (err || !ctx->total)
+ goto out;
+
+ dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
+ ctx->op_update, req->nbytes);
+
+ s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
+ if (ctx->digcnt)
+ s5p_hash_write_iv(req); /* restore hash IV */
+
+ if (ctx->op_update) { /* HASH_OP_UPDATE */
+ err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
+ if (err != -EINPROGRESS && ctx->finup && !ctx->error)
+ /* no final() after finup() */
+ err = s5p_hash_xmit_dma(dd, ctx->total, true);
+ } else { /* HASH_OP_FINAL */
+ err = s5p_hash_xmit_dma(dd, ctx->total, true);
+ }
+out:
+ if (err != -EINPROGRESS) {
+ /* hash_tasklet_cb will not finish it, so do it here */
+ s5p_hash_finish_req(req, err);
+ req = NULL;
+
+ /*
+ * Execute next request immediately if there is anything
+ * in queue.
+ */
+ goto retry;
+ }
+
+ return ret;
+}
+
+/**
+ * s5p_hash_tasklet_cb() - hash tasklet
+ * @data: ptr to s5p_aes_dev
+ */
+static void s5p_hash_tasklet_cb(unsigned long data)
+{
+ struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
+
+ if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
+ s5p_hash_handle_queue(dd, NULL);
+ return;
+ }
+
+ if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
+ if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
+ &dd->hash_flags)) {
+ s5p_hash_update_dma_stop(dd);
+ }
+
+ if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
+ &dd->hash_flags)) {
+ /* hash or semi-hash ready */
+ clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
+ goto finish;
+ }
+ }
+
+ return;
+
+finish:
+ /* finish curent request */
+ s5p_hash_finish_req(dd->hash_req, 0);
+
+ /* If we are not busy, process next req */
+ if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
+ s5p_hash_handle_queue(dd, NULL);
+}
+
+/**
+ * s5p_hash_enqueue() - enqueue request
+ * @req: AHASH request
+ * @op: operation UPDATE (true) or FINAL (false)
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_enqueue(struct ahash_request *req, bool op)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->op_update = op;
+
+ return s5p_hash_handle_queue(tctx->dd, req);
+}
+
+/**
+ * s5p_hash_update() - process the hash input data
+ * @req: AHASH request
+ *
+ * If request will fit in buffer, copy it and return immediately
+ * else enqueue it with OP_UPDATE.
+ *
+ * Returns: see s5p_hash_final below.
+ */
+static int s5p_hash_update(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ if (!req->nbytes)
+ return 0;
+
+ if (ctx->bufcnt + req->nbytes <= BUFLEN) {
+ scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
+ 0, req->nbytes, 0);
+ ctx->bufcnt += req->nbytes;
+ return 0;
+ }
+
+ return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
+}
+
+/**
+ * s5p_hash_shash_digest() - calculate shash digest
+ * @tfm: crypto transformation
+ * @flags: tfm flags
+ * @data: input data
+ * @len: length of data
+ * @out: output buffer
+ */
+static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
+ const u8 *data, unsigned int len, u8 *out)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+ shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_digest(shash, data, len, out);
+}
+
+/**
+ * s5p_hash_final_shash() - calculate shash digest
+ * @req: AHASH request
+ */
+static int s5p_hash_final_shash(struct ahash_request *req)
+{
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ return s5p_hash_shash_digest(tctx->fallback, req->base.flags,
+ ctx->buffer, ctx->bufcnt, req->result);
+}
+
+/**
+ * s5p_hash_final() - close up hash and calculate digest
+ * @req: AHASH request
+ *
+ * Note: in final req->src do not have any data, and req->nbytes can be
+ * non-zero.
+ *
+ * If there were no input data processed yet and the buffered hash data is
+ * less than BUFLEN (64) then calculate the final hash immediately by using
+ * SW algorithm fallback.
+ *
+ * Otherwise enqueues the current AHASH request with OP_FINAL operation op
+ * and finalize hash message in HW. Note that if digcnt!=0 then there were
+ * previous update op, so there are always some buffered bytes in ctx->buffer,
+ * which means that ctx->bufcnt!=0
+ *
+ * Returns:
+ * 0 if the request has been processed immediately,
+ * -EINPROGRESS if the operation has been queued for later execution or is set
+ * to processing by HW,
+ * -EBUSY if queue is full and request should be resubmitted later,
+ * other negative values denotes an error.
+ */
+static int s5p_hash_final(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ ctx->finup = true;
+ if (ctx->error)
+ return -EINVAL; /* uncompleted hash is not needed */
+
+ if (!ctx->digcnt && ctx->bufcnt < BUFLEN)
+ return s5p_hash_final_shash(req);
+
+ return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
+}
+
+/**
+ * s5p_hash_finup() - process last req->src and calculate digest
+ * @req: AHASH request containing the last update data
+ *
+ * Return values: see s5p_hash_final above.
+ */
+static int s5p_hash_finup(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ int err1, err2;
+
+ ctx->finup = true;
+
+ err1 = s5p_hash_update(req);
+ if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ return err1;
+
+ /*
+ * final() has to be always called to cleanup resources even if
+ * update() failed, except EINPROGRESS or calculate digest for small
+ * size
+ */
+ err2 = s5p_hash_final(req);
+
+ return err1 ?: err2;
+}
+
+/**
+ * s5p_hash_init() - initialize AHASH request contex
+ * @req: AHASH request
+ *
+ * Init async hash request context.
+ */
+static int s5p_hash_init(struct ahash_request *req)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+
+ ctx->dd = tctx->dd;
+ ctx->error = false;
+ ctx->finup = false;
+ ctx->bufcnt = 0;
+ ctx->digcnt = 0;
+ ctx->total = 0;
+ ctx->skip = 0;
+
+ dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
+ crypto_ahash_digestsize(tfm));
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case MD5_DIGEST_SIZE:
+ ctx->engine = SSS_HASH_ENGINE_MD5;
+ ctx->nregs = HASH_MD5_MAX_REG;
+ break;
+ case SHA1_DIGEST_SIZE:
+ ctx->engine = SSS_HASH_ENGINE_SHA1;
+ ctx->nregs = HASH_SHA1_MAX_REG;
+ break;
+ case SHA256_DIGEST_SIZE:
+ ctx->engine = SSS_HASH_ENGINE_SHA256;
+ ctx->nregs = HASH_SHA256_MAX_REG;
+ break;
+ default:
+ ctx->error = true;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * s5p_hash_digest - calculate digest from req->src
+ * @req: AHASH request
+ *
+ * Return values: see s5p_hash_final above.
+ */
+static int s5p_hash_digest(struct ahash_request *req)
+{
+ return s5p_hash_init(req) ?: s5p_hash_finup(req);
+}
+
+/**
+ * s5p_hash_cra_init_alg - init crypto alg transformation
+ * @tfm: crypto transformation
+ */
+static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
+{
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ tctx->dd = s5p_dev;
+ /* Allocate a fallback and abort if it failed. */
+ tctx->fallback = crypto_alloc_shash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback)) {
+ pr_err("fallback alloc fails for '%s'\n", alg_name);
+ return PTR_ERR(tctx->fallback);
+ }
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct s5p_hash_reqctx) + BUFLEN);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_cra_init - init crypto tfm
+ * @tfm: crypto transformation
+ */
+static int s5p_hash_cra_init(struct crypto_tfm *tfm)
+{
+ return s5p_hash_cra_init_alg(tfm);
+}
+
+/**
+ * s5p_hash_cra_exit - exit crypto tfm
+ * @tfm: crypto transformation
+ *
+ * free allocated fallback
+ */
+static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_shash(tctx->fallback);
+ tctx->fallback = NULL;
+}
+
+/**
+ * s5p_hash_export - export hash state
+ * @req: AHASH request
+ * @out: buffer for exported state
+ */
+static int s5p_hash_export(struct ahash_request *req, void *out)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
+
+ return 0;
+}
+
+/**
+ * s5p_hash_import - import hash state
+ * @req: AHASH request
+ * @in: buffer with state to be imported from
+ */
+static int s5p_hash_import(struct ahash_request *req, const void *in)
+{
+ struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
+ const struct s5p_hash_reqctx *ctx_in = in;
+
+ memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
+ if (ctx_in->bufcnt > BUFLEN) {
+ ctx->error = true;
+ return -EINVAL;
+ }
+
+ ctx->dd = tctx->dd;
+ ctx->error = false;
+
+ return 0;
+}
+
+static struct ahash_alg algs_sha1_md5_sha256[] = {
+{
+ .init = s5p_hash_init,
+ .update = s5p_hash_update,
+ .final = s5p_hash_final,
+ .finup = s5p_hash_finup,
+ .digest = s5p_hash_digest,
+ .export = s5p_hash_export,
+ .import = s5p_hash_import,
+ .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "exynos-sha1",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = HASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_hash_ctx),
+ .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_hash_cra_init,
+ .cra_exit = s5p_hash_cra_exit,
+ }
+},
+{
+ .init = s5p_hash_init,
+ .update = s5p_hash_update,
+ .final = s5p_hash_final,
+ .finup = s5p_hash_finup,
+ .digest = s5p_hash_digest,
+ .export = s5p_hash_export,
+ .import = s5p_hash_import,
+ .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "exynos-md5",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = HASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_hash_ctx),
+ .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_hash_cra_init,
+ .cra_exit = s5p_hash_cra_exit,
+ }
+},
+{
+ .init = s5p_hash_init,
+ .update = s5p_hash_update,
+ .final = s5p_hash_final,
+ .finup = s5p_hash_finup,
+ .digest = s5p_hash_digest,
+ .export = s5p_hash_export,
+ .import = s5p_hash_import,
+ .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "exynos-sha256",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = HASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_hash_ctx),
+ .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_hash_cra_init,
+ .cra_exit = s5p_hash_cra_exit,
+ }
+}
+
+};
+
static void s5p_set_aes(struct s5p_aes_dev *dev,
uint8_t *key, uint8_t *iv, unsigned int keylen)
{
@@ -829,6 +2154,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
+ unsigned int hash_i;
if (s5p_dev)
return -EEXIST;
@@ -837,12 +2163,34 @@ static int s5p_aes_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
+ variant = find_s5p_sss_version(pdev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pdata->ioaddr))
- return PTR_ERR(pdata->ioaddr);
- variant = find_s5p_sss_version(pdev);
+ /*
+ * Note: HASH and PRNG uses the same registers in secss, avoid
+ * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
+ * is enabled in config. We need larger size for HASH registers in
+ * secss, current describe only AES/DES
+ */
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
+ if (variant == &exynos_aes_data) {
+ res->end += 0x300;
+ pdata->use_hash = true;
+ }
+ }
+
+ pdata->res = res;
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr)) {
+ if (!pdata->use_hash)
+ return PTR_ERR(pdata->ioaddr);
+ /* try AES without HASH */
+ res->end -= 0x300;
+ pdata->use_hash = false;
+ pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->ioaddr))
+ return PTR_ERR(pdata->ioaddr);
+ }
pdata->clk = devm_clk_get(dev, "secss");
if (IS_ERR(pdata->clk)) {
@@ -857,8 +2205,10 @@ static int s5p_aes_probe(struct platform_device *pdev)
}
spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->hash_lock);
pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
+ pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
pdata->irq_fc = platform_get_irq(pdev, 0);
if (pdata->irq_fc < 0) {
@@ -888,12 +2238,40 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_algs;
}
+ if (pdata->use_hash) {
+ tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
+ (unsigned long)pdata);
+ crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
+
+ for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
+ hash_i++) {
+ struct ahash_alg *alg;
+
+ alg = &algs_sha1_md5_sha256[hash_i];
+ err = crypto_register_ahash(alg);
+ if (err) {
+ dev_err(dev, "can't register '%s': %d\n",
+ alg->halg.base.cra_driver_name, err);
+ goto err_hash;
+ }
+ }
+ }
+
dev_info(dev, "s5p-sss driver registered\n");
return 0;
+err_hash:
+ for (j = hash_i - 1; j >= 0; j--)
+ crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
+
+ tasklet_kill(&pdata->hash_tasklet);
+ res->end -= 0x300;
+
err_algs:
- dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
+ if (i < ARRAY_SIZE(algs))
+ dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name,
+ err);
for (j = 0; j < i; j++)
crypto_unregister_alg(&algs[j]);
@@ -920,9 +2298,16 @@ static int s5p_aes_remove(struct platform_device *pdev)
crypto_unregister_alg(&algs[i]);
tasklet_kill(&pdata->tasklet);
+ if (pdata->use_hash) {
+ for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
+ crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
- clk_disable_unprepare(pdata->clk);
+ pdata->res->end -= 0x300;
+ tasklet_kill(&pdata->hash_tasklet);
+ pdata->use_hash = false;
+ }
+ clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
return 0;
@@ -942,3 +2327,4 @@ module_platform_driver(s5p_aes_crypto);
MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
+MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4835dd4a9e50..4ca4a264a833 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -895,7 +895,6 @@ static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
static int stm32_hash_update(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- int ret;
if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
return 0;
@@ -909,12 +908,7 @@ static int stm32_hash_update(struct ahash_request *req)
return 0;
}
- ret = stm32_hash_enqueue(req, HASH_OP_UPDATE);
-
- if (rctx->flags & HASH_FLAGS_FINUP)
- return ret;
-
- return 0;
+ return stm32_hash_enqueue(req, HASH_OP_UPDATE);
}
static int stm32_hash_final(struct ahash_request *req)
@@ -1070,7 +1064,6 @@ static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
{
struct stm32_hash_dev *hdev = dev_id;
- int err;
if (HASH_FLAGS_CPU & hdev->flags) {
if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
@@ -1087,8 +1080,8 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
return IRQ_HANDLED;
finish:
- /*Finish current request */
- stm32_hash_finish_req(hdev->req, err);
+ /* Finish current request */
+ stm32_hash_finish_req(hdev->req, 0);
return IRQ_HANDLED;
}
@@ -1411,11 +1404,10 @@ MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
struct device *dev)
{
- const struct of_device_id *match;
int err;
- match = of_match_device(stm32_hash_of_match, dev);
- if (!match) {
+ hdev->pdata = of_device_get_match_data(dev);
+ if (!hdev->pdata) {
dev_err(dev, "no compatible OF match\n");
return -EINVAL;
}
@@ -1423,8 +1415,6 @@ static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
err = of_property_read_u32(dev->of_node, "dma-maxburst",
&hdev->dma_maxburst);
- hdev->pdata = match->data;
-
return err;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dff88838dce7..9c80e0cb1664 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -56,29 +56,26 @@
#include "talitos.h"
static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
- bool is_sec1)
+ unsigned int len, bool is_sec1)
{
ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
- if (!is_sec1)
+ if (is_sec1) {
+ ptr->len1 = cpu_to_be16(len);
+ } else {
+ ptr->len = cpu_to_be16(len);
ptr->eptr = upper_32_bits(dma_addr);
+ }
}
static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
struct talitos_ptr *src_ptr, bool is_sec1)
{
dst_ptr->ptr = src_ptr->ptr;
- if (!is_sec1)
- dst_ptr->eptr = src_ptr->eptr;
-}
-
-static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
- bool is_sec1)
-{
if (is_sec1) {
- ptr->res = 0;
- ptr->len1 = cpu_to_be16(len);
+ dst_ptr->len1 = src_ptr->len1;
} else {
- ptr->len = cpu_to_be16(len);
+ dst_ptr->len = src_ptr->len;
+ dst_ptr->eptr = src_ptr->eptr;
}
}
@@ -116,9 +113,7 @@ static void map_single_talitos_ptr(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
- to_talitos_ptr_len(ptr, len, is_sec1);
- to_talitos_ptr(ptr, dma_addr, is_sec1);
- to_talitos_ptr_ext_set(ptr, 0, is_sec1);
+ to_talitos_ptr(ptr, dma_addr, len, is_sec1);
}
/*
@@ -165,6 +160,10 @@ static int reset_channel(struct device *dev, int ch)
/* set 36-bit addressing, done writeback enable and done IRQ enable */
setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
+ /* enable chaining descriptors */
+ if (is_sec1)
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
+ TALITOS_CCCR_LO_NE);
/* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -287,7 +286,6 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
/* map descriptor and save caller data */
if (is_sec1) {
desc->hdr1 = desc->hdr;
- desc->next_desc = 0;
request->dma_desc = dma_map_single(dev, &desc->hdr1,
TALITOS_DESC_SIZE,
DMA_BIDIRECTIONAL);
@@ -339,7 +337,12 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
/* descriptors with their done bits set don't get the error */
rmb();
- hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
+ if (!is_sec1)
+ hdr = request->desc->hdr;
+ else if (request->desc->next_desc)
+ hdr = (request->desc + 1)->hdr1;
+ else
+ hdr = request->desc->hdr1;
if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
status = 0;
@@ -393,8 +396,6 @@ static void talitos1_done_##name(unsigned long data) \
\
if (ch_done_mask & 0x10000000) \
flush_channel(dev, 0, 0, 0); \
- if (priv->num_channels == 1) \
- goto out; \
if (ch_done_mask & 0x40000000) \
flush_channel(dev, 1, 0, 0); \
if (ch_done_mask & 0x00010000) \
@@ -402,7 +403,6 @@ static void talitos1_done_##name(unsigned long data) \
if (ch_done_mask & 0x00040000) \
flush_channel(dev, 3, 0, 0); \
\
-out: \
/* At this point, all completed channels have been processed */ \
/* Unmask done interrupts for channels completed later on. */ \
spin_lock_irqsave(&priv->reg_lock, flags); \
@@ -412,6 +412,7 @@ out: \
}
DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
+DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
#define DEF_TALITOS2_DONE(name, ch_done_mask) \
static void talitos2_done_##name(unsigned long data) \
@@ -422,8 +423,6 @@ static void talitos2_done_##name(unsigned long data) \
\
if (ch_done_mask & 1) \
flush_channel(dev, 0, 0, 0); \
- if (priv->num_channels == 1) \
- goto out; \
if (ch_done_mask & (1 << 2)) \
flush_channel(dev, 1, 0, 0); \
if (ch_done_mask & (1 << 4)) \
@@ -431,7 +430,6 @@ static void talitos2_done_##name(unsigned long data) \
if (ch_done_mask & (1 << 6)) \
flush_channel(dev, 3, 0, 0); \
\
-out: \
/* At this point, all completed channels have been processed */ \
/* Unmask done interrupts for channels completed later on. */ \
spin_lock_irqsave(&priv->reg_lock, flags); \
@@ -441,6 +439,7 @@ out: \
}
DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
+DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
@@ -464,7 +463,8 @@ static u32 current_desc_hdr(struct device *dev, int ch)
tail = priv->chan[ch].tail;
iter = tail;
- while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
+ while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
+ priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
iter = (iter + 1) & (priv->fifo_len - 1);
if (iter == tail) {
dev_err(dev, "couldn't locate current descriptor\n");
@@ -472,6 +472,9 @@ static u32 current_desc_hdr(struct device *dev, int ch)
}
}
+ if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
+ return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
+
return priv->chan[ch].fifo[iter].desc->hdr;
}
@@ -825,9 +828,12 @@ struct talitos_ctx {
__be32 desc_hdr_template;
u8 key[TALITOS_MAX_KEY_SIZE];
u8 iv[TALITOS_MAX_IV_LENGTH];
+ dma_addr_t dma_key;
unsigned int keylen;
unsigned int enckeylen;
unsigned int authkeylen;
+ dma_addr_t dma_buf;
+ dma_addr_t dma_hw_context;
};
#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
@@ -836,8 +842,8 @@ struct talitos_ctx {
struct talitos_ahash_req_ctx {
u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
unsigned int hw_context_size;
- u8 buf[HASH_MAX_BLOCK_SIZE];
- u8 bufnext[HASH_MAX_BLOCK_SIZE];
+ u8 buf[2][HASH_MAX_BLOCK_SIZE];
+ int buf_idx;
unsigned int swinit;
unsigned int first;
unsigned int last;
@@ -861,6 +867,7 @@ static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = ctx->dev;
struct crypto_authenc_keys keys;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
@@ -869,12 +876,17 @@ static int aead_setkey(struct crypto_aead *authenc,
if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
goto badkey;
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
memcpy(ctx->key, keys.authkey, keys.authkeylen);
memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
ctx->keylen = keys.authkeylen + keys.enckeylen;
ctx->enckeylen = keys.enckeylen;
ctx->authkeylen = keys.authkeylen;
+ ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
+ DMA_TO_DEVICE);
return 0;
@@ -948,13 +960,13 @@ static void ipsec_esp_unmap(struct device *dev,
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct talitos_ctx *ctx = crypto_aead_ctx(aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
+ bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
+ struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
- if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ if (is_ipsec_esp)
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
DMA_FROM_DEVICE);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
+ unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
areq->assoclen);
@@ -963,7 +975,7 @@ static void ipsec_esp_unmap(struct device *dev,
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
- if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
+ if (!is_ipsec_esp) {
unsigned int dst_nents = edesc->dst_nents ? : 1;
sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
@@ -983,6 +995,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
struct aead_request *areq = context;
struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
unsigned int authsize = crypto_aead_authsize(authenc);
+ unsigned int ivsize = crypto_aead_ivsize(authenc);
struct talitos_edesc *edesc;
struct scatterlist *sg;
void *icvdata;
@@ -1003,6 +1016,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
icvdata, authsize);
}
+ dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
+
kfree(edesc);
aead_request_complete(areq, err);
@@ -1097,8 +1112,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
len = cryptlen;
to_talitos_ptr(link_tbl_ptr + count,
- sg_dma_address(sg) + offset, 0);
- to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
+ sg_dma_address(sg) + offset, len, 0);
to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
count++;
cryptlen -= len;
@@ -1116,7 +1130,7 @@ next:
return count;
}
-int talitos_sg_map(struct device *dev, struct scatterlist *src,
+static int talitos_sg_map(struct device *dev, struct scatterlist *src,
unsigned int len, struct talitos_edesc *edesc,
struct talitos_ptr *ptr,
int sg_count, unsigned int offset, int tbl_off)
@@ -1124,15 +1138,12 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
- to_talitos_ptr_len(ptr, len, is_sec1);
- to_talitos_ptr_ext_set(ptr, 0, is_sec1);
-
if (sg_count == 1) {
- to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
+ to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
return sg_count;
}
if (is_sec1) {
- to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
+ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
return sg_count;
}
sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
@@ -1143,7 +1154,7 @@ int talitos_sg_map(struct device *dev, struct scatterlist *src,
return sg_count;
}
to_talitos_ptr(ptr, edesc->dma_link_tbl +
- tbl_off * sizeof(struct talitos_ptr), is_sec1);
+ tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
return sg_count;
@@ -1170,10 +1181,12 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
bool sync_needed = false;
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
+ struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
+ struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
/* hmac key */
- map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
- DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
@@ -1194,25 +1207,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
}
/* cipher iv */
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
- to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
- to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
- } else {
- to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
- to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
- }
+ to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
/* cipher key */
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
- map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
- (char *)&ctx->key + ctx->authkeylen,
- DMA_TO_DEVICE);
- else
- map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
- (char *)&ctx->key + ctx->authkeylen,
- DMA_TO_DEVICE);
+ to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
+ ctx->enckeylen, is_sec1);
/*
* cipher in
@@ -1220,24 +1219,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
* extent is bytes of HMAC postpended to ciphertext,
* typically 12 for ipsec
*/
- to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
-
sg_link_tbl_len = cryptlen;
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ if (is_ipsec_esp) {
to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
- if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+ if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len += authsize;
}
- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
- &desc->ptr[4], sg_count, areq->assoclen,
- tbl_off);
+ ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
+ &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
- if (sg_count > 1) {
- tbl_off += sg_count;
+ if (ret > 1) {
+ tbl_off += ret;
sync_needed = true;
}
@@ -1248,47 +1243,59 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
}
- sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
- &desc->ptr[5], sg_count, areq->assoclen,
- tbl_off);
+ ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+ sg_count, areq->assoclen, tbl_off);
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ if (is_ipsec_esp)
to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
- if (sg_count > 1) {
+ /* ICV data */
+ if (ret > 1) {
+ tbl_off += ret;
edesc->icv_ool = true;
sync_needed = true;
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
+ if (is_ipsec_esp) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
int offset = (edesc->src_nents + edesc->dst_nents + 2) *
sizeof(struct talitos_ptr) + authsize;
/* Add an entry to the link table for ICV data */
- tbl_ptr += sg_count - 1;
- to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
- tbl_ptr++;
+ to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
is_sec1);
- to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
/* icv data follows link tables */
to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
- is_sec1);
+ authsize, is_sec1);
+ } else {
+ dma_addr_t addr = edesc->dma_link_tbl;
+
+ if (is_sec1)
+ addr += areq->assoclen + cryptlen;
+ else
+ addr += sizeof(struct talitos_ptr) * tbl_off;
+
+ to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
+ }
+ } else if (!is_ipsec_esp) {
+ ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
+ &desc->ptr[6], sg_count, areq->assoclen +
+ cryptlen,
+ tbl_off);
+ if (ret > 1) {
+ tbl_off += ret;
+ edesc->icv_ool = true;
+ sync_needed = true;
+ } else {
+ edesc->icv_ool = false;
}
} else {
edesc->icv_ool = false;
}
- /* ICV data */
- if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
- to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
- to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
- areq->assoclen + cryptlen, is_sec1);
- }
-
/* iv out */
- if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
+ if (is_ipsec_esp)
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
DMA_FROM_DEVICE);
@@ -1387,22 +1394,31 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
alloc_len += icv_stashing ? authsize : 0;
}
+ /* if its a ahash, add space for a second desc next to the first one */
+ if (is_sec1 && !dst)
+ alloc_len += sizeof(struct talitos_desc);
+
edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) {
dev_err(dev, "could not allocate edescriptor\n");
err = ERR_PTR(-ENOMEM);
goto error_sg;
}
+ memset(&edesc->desc, 0, sizeof(edesc->desc));
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
edesc->dma_len = dma_len;
- if (dma_len)
- edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
+ if (dma_len) {
+ void *addr = &edesc->link_tbl[0];
+
+ if (is_sec1 && !dst)
+ addr += sizeof(struct talitos_desc);
+ edesc->dma_link_tbl = dma_map_single(dev, addr,
edesc->dma_len,
DMA_BIDIRECTIONAL);
-
+ }
return edesc;
error_sg:
if (iv_dma)
@@ -1468,7 +1484,6 @@ static int aead_decrypt(struct aead_request *req)
DESC_HDR_MODE1_MDEU_CICV;
/* reset integrity check result bits */
- edesc->desc.hdr_lo = 0;
return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
}
@@ -1494,15 +1509,29 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct device *dev = ctx->dev;
+ u32 tmp[DES_EXPKEY_WORDS];
if (keylen > TALITOS_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
+ if (unlikely(crypto_ablkcipher_get_flags(cipher) &
+ CRYPTO_TFM_REQ_WEAK_KEY) &&
+ !des_ekey(tmp, key)) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
+ ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+
return 0;
}
@@ -1513,7 +1542,6 @@ static void common_nonsnoop_unmap(struct device *dev,
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
if (edesc->dma_len)
@@ -1555,16 +1583,12 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
bool is_sec1 = has_ftr_sec1(priv);
/* first DWORD empty */
- desc->ptr[0] = zero_entry;
/* cipher iv */
- to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
- to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
- to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
+ to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
/* cipher key */
- map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
- (char *)&ctx->key, DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
@@ -1599,7 +1623,6 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
DMA_FROM_DEVICE);
/* last DWORD empty */
- desc->ptr[6] = zero_entry;
if (sync_needed)
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
@@ -1663,26 +1686,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct talitos_private *priv = dev_get_drvdata(dev);
- bool is_sec1 = has_ftr_sec1(priv);
-
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
- /* When using hashctx-in, must unmap it. */
- if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
- DMA_TO_DEVICE);
-
- if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
- unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
- DMA_TO_DEVICE);
-
if (edesc->dma_len)
dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
DMA_BIDIRECTIONAL);
+ if (edesc->desc.next_desc)
+ dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
+ TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
}
static void ahash_done(struct device *dev,
@@ -1696,7 +1709,7 @@ static void ahash_done(struct device *dev,
if (!req_ctx->last && req_ctx->to_hash_later) {
/* Position any partial block for next update/final/finup */
- memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
+ req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
req_ctx->nbuf = req_ctx->to_hash_later;
}
common_nonsnoop_hash_unmap(dev, edesc, areq);
@@ -1710,7 +1723,7 @@ static void ahash_done(struct device *dev,
* SEC1 doesn't like hashing of 0 sized message, so we do the padding
* ourself and submit a padded block
*/
-void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
+static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
struct talitos_edesc *edesc,
struct talitos_ptr *ptr)
{
@@ -1729,6 +1742,7 @@ void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
static int common_nonsnoop_hash(struct talitos_edesc *edesc,
struct ahash_request *areq, unsigned int length,
+ unsigned int offset,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1745,44 +1759,48 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
int sg_count;
/* first DWORD empty */
- desc->ptr[0] = zero_entry;
/* hash context in */
if (!req_ctx->first || req_ctx->swinit) {
- map_single_talitos_ptr(dev, &desc->ptr[1],
- req_ctx->hw_context_size,
- (char *)req_ctx->hw_context,
- DMA_TO_DEVICE);
+ to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
req_ctx->swinit = 0;
- } else {
- desc->ptr[1] = zero_entry;
}
/* Indicate next op is not the first. */
req_ctx->first = 0;
/* HMAC key */
if (ctx->keylen)
- map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
- (char *)&ctx->key, DMA_TO_DEVICE);
- else
- desc->ptr[2] = zero_entry;
+ to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
+ is_sec1);
+
+ if (is_sec1 && req_ctx->nbuf)
+ length -= req_ctx->nbuf;
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
- sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
- else
+ sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
+ edesc->buf + sizeof(struct talitos_desc),
+ length, req_ctx->nbuf);
+ else if (length)
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
DMA_TO_DEVICE);
/*
* data in
*/
- sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
- &desc->ptr[3], sg_count, 0, 0);
- if (sg_count > 1)
- sync_needed = true;
+ if (is_sec1 && req_ctx->nbuf) {
+ dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
+ HASH_MAX_BLOCK_SIZE;
+
+ to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1);
+ } else {
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ &desc->ptr[3], sg_count, offset, 0);
+ if (sg_count > 1)
+ sync_needed = true;
+ }
/* fifth DWORD empty */
- desc->ptr[4] = zero_entry;
/* hash/HMAC out -or- hash context out */
if (req_ctx->last)
@@ -1790,16 +1808,44 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
crypto_ahash_digestsize(tfm),
areq->result, DMA_FROM_DEVICE);
else
- map_single_talitos_ptr(dev, &desc->ptr[5],
- req_ctx->hw_context_size,
- req_ctx->hw_context, DMA_FROM_DEVICE);
+ to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
/* last DWORD empty */
- desc->ptr[6] = zero_entry;
if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
+ if (is_sec1 && req_ctx->nbuf && length) {
+ struct talitos_desc *desc2 = desc + 1;
+ dma_addr_t next_desc;
+
+ memset(desc2, 0, sizeof(*desc2));
+ desc2->hdr = desc->hdr;
+ desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
+ desc2->hdr1 = desc2->hdr;
+ desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
+ desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
+ desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
+
+ to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
+
+ copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ &desc2->ptr[3], sg_count, offset, 0);
+ if (sg_count > 1)
+ sync_needed = true;
+ copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
+ if (req_ctx->last)
+ to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
+ req_ctx->hw_context_size, is_sec1);
+
+ next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
+ DMA_BIDIRECTIONAL);
+ desc->next_desc = cpu_to_be32(next_desc);
+ }
+
if (sync_needed)
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1818,6 +1864,11 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_private *priv = dev_get_drvdata(ctx->dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ if (is_sec1)
+ nbytes -= req_ctx->nbuf;
return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
nbytes, 0, 0, 0, areq->base.flags, false);
@@ -1826,17 +1877,35 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
static int ahash_init(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ unsigned int size;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
/* Initialize the context */
+ req_ctx->buf_idx = 0;
req_ctx->nbuf = 0;
req_ctx->first = 1; /* first indicates h/w must init its context */
req_ctx->swinit = 0; /* assume h/w init of context */
- req_ctx->hw_context_size =
- (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ req_ctx->hw_context_size = size;
+ if (ctx->dma_hw_context)
+ dma_unmap_single(dev, ctx->dma_hw_context, size,
+ DMA_BIDIRECTIONAL);
+ ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
+ DMA_BIDIRECTIONAL);
+ if (ctx->dma_buf)
+ dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
+ if (is_sec1)
+ ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
+ sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
return 0;
}
@@ -1847,6 +1916,9 @@ static int ahash_init(struct ahash_request *areq)
static int ahash_init_sha224_swinit(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
ahash_init(areq);
req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
@@ -1864,6 +1936,9 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq)
req_ctx->hw_context[8] = 0;
req_ctx->hw_context[9] = 0;
+ dma_sync_single_for_device(dev, ctx->dma_hw_context,
+ req_ctx->hw_context_size, DMA_TO_DEVICE);
+
return 0;
}
@@ -1879,6 +1954,11 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
unsigned int to_hash_later;
unsigned int nsg;
int nents;
+ struct device *dev = ctx->dev;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+ int offset = 0;
+ u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
@@ -1888,7 +1968,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
return nents;
}
sg_copy_to_buffer(areq->src, nents,
- req_ctx->buf + req_ctx->nbuf, nbytes);
+ ctx_buf + req_ctx->nbuf, nbytes);
req_ctx->nbuf += nbytes;
return 0;
}
@@ -1909,13 +1989,27 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
}
/* Chain in any previously buffered data */
- if (req_ctx->nbuf) {
+ if (!is_sec1 && req_ctx->nbuf) {
nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
sg_init_table(req_ctx->bufsl, nsg);
- sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
+ sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
if (nsg > 1)
sg_chain(req_ctx->bufsl, 2, areq->src);
req_ctx->psrc = req_ctx->bufsl;
+ } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
+ if (nbytes_to_hash > blocksize)
+ offset = blocksize - req_ctx->nbuf;
+ else
+ offset = nbytes_to_hash - req_ctx->nbuf;
+ nents = sg_nents_for_len(areq->src, offset);
+ if (nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return nents;
+ }
+ sg_copy_to_buffer(areq->src, nents,
+ ctx_buf + req_ctx->nbuf, offset);
+ req_ctx->nbuf += offset;
+ req_ctx->psrc = areq->src;
} else
req_ctx->psrc = areq->src;
@@ -1926,7 +2020,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
return nents;
}
sg_pcopy_to_buffer(areq->src, nents,
- req_ctx->bufnext,
+ req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
to_hash_later,
nbytes - to_hash_later);
}
@@ -1948,6 +2042,13 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
/* request SEC to INIT hash. */
if (req_ctx->first && !req_ctx->swinit)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+ if (is_sec1) {
+ dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
+ HASH_MAX_BLOCK_SIZE;
+
+ dma_sync_single_for_device(dev, dma_buf,
+ req_ctx->nbuf, DMA_TO_DEVICE);
+ }
/* When the tfm context has a keylen, it's an HMAC.
* A first or last (ie. not middle) descriptor must request HMAC.
@@ -1955,7 +2056,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
if (ctx->keylen && (req_ctx->first || req_ctx->last))
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
+ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
ahash_done);
}
@@ -2001,10 +2102,15 @@ static int ahash_export(struct ahash_request *areq, void *out)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct talitos_export_state *export = out;
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = ctx->dev;
+ dma_sync_single_for_cpu(dev, ctx->dma_hw_context,
+ req_ctx->hw_context_size, DMA_FROM_DEVICE);
memcpy(export->hw_context, req_ctx->hw_context,
req_ctx->hw_context_size);
- memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
+ memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
export->swinit = req_ctx->swinit;
export->first = req_ctx->first;
export->last = req_ctx->last;
@@ -2019,15 +2125,32 @@ static int ahash_import(struct ahash_request *areq, const void *in)
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
const struct talitos_export_state *export = in;
+ unsigned int size;
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
memset(req_ctx, 0, sizeof(*req_ctx));
- req_ctx->hw_context_size =
- (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
- memcpy(req_ctx->hw_context, export->hw_context,
- req_ctx->hw_context_size);
- memcpy(req_ctx->buf, export->buf, export->nbuf);
+ req_ctx->hw_context_size = size;
+ if (ctx->dma_hw_context)
+ dma_unmap_single(dev, ctx->dma_hw_context, size,
+ DMA_BIDIRECTIONAL);
+
+ memcpy(req_ctx->hw_context, export->hw_context, size);
+ ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
+ DMA_BIDIRECTIONAL);
+ if (ctx->dma_buf)
+ dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
+ memcpy(req_ctx->buf[0], export->buf, export->nbuf);
+ if (is_sec1)
+ ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
+ sizeof(req_ctx->buf),
+ DMA_TO_DEVICE);
req_ctx->swinit = export->swinit;
req_ctx->first = export->first;
req_ctx->last = export->last;
@@ -2037,22 +2160,6 @@ static int ahash_import(struct ahash_request *areq, const void *in)
return 0;
}
-struct keyhash_result {
- struct completion completion;
- int err;
-};
-
-static void keyhash_complete(struct crypto_async_request *req, int err)
-{
- struct keyhash_result *res = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
u8 *hash)
{
@@ -2060,10 +2167,10 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
struct scatterlist sg[1];
struct ahash_request *req;
- struct keyhash_result hresult;
+ struct crypto_wait wait;
int ret;
- init_completion(&hresult.completion);
+ crypto_init_wait(&wait);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req)
@@ -2072,25 +2179,13 @@ static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
/* Keep tfm keylen == 0 during hash of the long key */
ctx->keylen = 0;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- keyhash_complete, &hresult);
+ crypto_req_done, &wait);
sg_init_one(&sg[0], key, keylen);
ahash_request_set_crypt(req, sg, hash, keylen);
- ret = crypto_ahash_digest(req);
- switch (ret) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(
- &hresult.completion);
- if (!ret)
- ret = hresult.err;
- break;
- default:
- break;
- }
+ ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
+
ahash_request_free(req);
return ret;
@@ -2100,6 +2195,7 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct device *dev = ctx->dev;
unsigned int blocksize =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int digestsize = crypto_ahash_digestsize(tfm);
@@ -2122,7 +2218,11 @@ static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
memcpy(ctx->key, hash, digestsize);
}
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
ctx->keylen = keysize;
+ ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
return 0;
}
@@ -2614,7 +2714,7 @@ static struct talitos_alg_template driver_algs[] = {
.ivsize = AES_BLOCK_SIZE,
}
},
- .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
DESC_HDR_SEL0_AESU |
DESC_HDR_MODE0_AESU_CTR,
},
@@ -2951,6 +3051,36 @@ static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
return 0;
}
+static void talitos_cra_exit(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = ctx->dev;
+
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+}
+
+static void talitos_cra_exit_ahash(struct crypto_tfm *tfm)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = ctx->dev;
+ unsigned int size;
+
+ talitos_cra_exit(tfm);
+
+ size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <=
+ SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+
+ if (ctx->dma_hw_context)
+ dma_unmap_single(dev, ctx->dma_hw_context, size,
+ DMA_BIDIRECTIONAL);
+ if (ctx->dma_buf)
+ dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2,
+ DMA_TO_DEVICE);
+}
+
/*
* given the alg's descriptor header template, determine whether descriptor
* type and primary/secondary execution units required match the hw
@@ -2989,17 +3119,11 @@ static int talitos_remove(struct platform_device *ofdev)
break;
}
list_del(&t_alg->entry);
- kfree(t_alg);
}
if (hw_supports(dev, DESC_HDR_SEL0_RNG))
talitos_unregister_rng(dev);
- for (i = 0; priv->chan && i < priv->num_channels; i++)
- kfree(priv->chan[i].fifo);
-
- kfree(priv->chan);
-
for (i = 0; i < 2; i++)
if (priv->irq[i]) {
free_irq(priv->irq[i], dev);
@@ -3010,10 +3134,6 @@ static int talitos_remove(struct platform_device *ofdev)
if (priv->irq[1])
tasklet_kill(&priv->done_task[1]);
- iounmap(priv->reg);
-
- kfree(priv);
-
return 0;
}
@@ -3025,7 +3145,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
struct talitos_crypto_alg *t_alg;
struct crypto_alg *alg;
- t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
+ t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
+ GFP_KERNEL);
if (!t_alg)
return ERR_PTR(-ENOMEM);
@@ -3035,6 +3156,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_ABLKCIPHER:
alg = &t_alg->algt.alg.crypto;
alg->cra_init = talitos_cra_init;
+ alg->cra_exit = talitos_cra_exit;
alg->cra_type = &crypto_ablkcipher_type;
alg->cra_ablkcipher.setkey = ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
@@ -3043,14 +3165,21 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
+ alg->cra_exit = talitos_cra_exit;
t_alg->algt.alg.aead.init = talitos_cra_init_aead;
t_alg->algt.alg.aead.setkey = aead_setkey;
t_alg->algt.alg.aead.encrypt = aead_encrypt;
t_alg->algt.alg.aead.decrypt = aead_decrypt;
+ if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
+ !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
+ devm_kfree(dev, t_alg);
+ return ERR_PTR(-ENOTSUPP);
+ }
break;
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
+ alg->cra_exit = talitos_cra_exit_ahash;
alg->cra_type = &crypto_ahash_type;
t_alg->algt.alg.hash.init = ahash_init;
t_alg->algt.alg.hash.update = ahash_update;
@@ -3064,7 +3193,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
!strncmp(alg->cra_name, "hmac", 4)) {
- kfree(t_alg);
+ devm_kfree(dev, t_alg);
return ERR_PTR(-ENOTSUPP);
}
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
@@ -3079,7 +3208,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
break;
default:
dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
- kfree(t_alg);
+ devm_kfree(dev, t_alg);
return ERR_PTR(-EINVAL);
}
@@ -3156,11 +3285,11 @@ static int talitos_probe(struct platform_device *ofdev)
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct talitos_private *priv;
- const unsigned int *prop;
int i, err;
int stride;
+ struct resource *res;
- priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -3172,7 +3301,10 @@ static int talitos_probe(struct platform_device *ofdev)
spin_lock_init(&priv->reg_lock);
- priv->reg = of_iomap(np, 0);
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+ priv->reg = devm_ioremap(dev, res->start, resource_size(res));
if (!priv->reg) {
dev_err(dev, "failed to of_iomap\n");
err = -ENOMEM;
@@ -3180,21 +3312,11 @@ static int talitos_probe(struct platform_device *ofdev)
}
/* get SEC version capabilities from device tree */
- prop = of_get_property(np, "fsl,num-channels", NULL);
- if (prop)
- priv->num_channels = *prop;
-
- prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
- if (prop)
- priv->chfifo_len = *prop;
-
- prop = of_get_property(np, "fsl,exec-units-mask", NULL);
- if (prop)
- priv->exec_units = *prop;
-
- prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
- if (prop)
- priv->desc_types = *prop;
+ of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
+ of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
+ of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
+ of_property_read_u32(np, "fsl,descriptor-types-mask",
+ &priv->desc_types);
if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
!priv->exec_units || !priv->desc_types) {
@@ -3244,22 +3366,29 @@ static int talitos_probe(struct platform_device *ofdev)
goto err_out;
if (of_device_is_compatible(np, "fsl,sec1.0")) {
- tasklet_init(&priv->done_task[0], talitos1_done_4ch,
- (unsigned long)dev);
- } else {
- if (!priv->irq[1]) {
- tasklet_init(&priv->done_task[0], talitos2_done_4ch,
+ if (priv->num_channels == 1)
+ tasklet_init(&priv->done_task[0], talitos1_done_ch0,
(unsigned long)dev);
- } else {
+ else
+ tasklet_init(&priv->done_task[0], talitos1_done_4ch,
+ (unsigned long)dev);
+ } else {
+ if (priv->irq[1]) {
tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
(unsigned long)dev);
tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
(unsigned long)dev);
+ } else if (priv->num_channels == 1) {
+ tasklet_init(&priv->done_task[0], talitos2_done_ch0,
+ (unsigned long)dev);
+ } else {
+ tasklet_init(&priv->done_task[0], talitos2_done_4ch,
+ (unsigned long)dev);
}
}
- priv->chan = kzalloc(sizeof(struct talitos_channel) *
- priv->num_channels, GFP_KERNEL);
+ priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) *
+ priv->num_channels, GFP_KERNEL);
if (!priv->chan) {
dev_err(dev, "failed to allocate channel management space\n");
err = -ENOMEM;
@@ -3276,8 +3405,9 @@ static int talitos_probe(struct platform_device *ofdev)
spin_lock_init(&priv->chan[i].head_lock);
spin_lock_init(&priv->chan[i].tail_lock);
- priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
- priv->fifo_len, GFP_KERNEL);
+ priv->chan[i].fifo = devm_kzalloc(dev,
+ sizeof(struct talitos_request) *
+ priv->fifo_len, GFP_KERNEL);
if (!priv->chan[i].fifo) {
dev_err(dev, "failed to allocate request fifo %d\n", i);
err = -ENOMEM;
@@ -3343,7 +3473,7 @@ static int talitos_probe(struct platform_device *ofdev)
if (err) {
dev_err(dev, "%s alg registration failed\n",
alg->cra_driver_name);
- kfree(t_alg);
+ devm_kfree(dev, t_alg);
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
}
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 8dd8f40e2771..a65a63e0d6c1 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -52,8 +52,6 @@ struct talitos_ptr {
__be32 ptr; /* address */
};
-static const struct talitos_ptr zero_entry;
-
/* descriptor */
struct talitos_desc {
__be32 hdr; /* header high bits */
@@ -210,9 +208,13 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
#define TALITOS_ISR 0x1010 /* interrupt status register */
#define TALITOS1_ISR_4CHERR ISR1_FORMAT(0xa) /* 4 ch errors mask */
#define TALITOS1_ISR_4CHDONE ISR1_FORMAT(0x5) /* 4 ch done mask */
+#define TALITOS1_ISR_CH_0_ERR (2 << 28) /* ch 0 errors mask */
+#define TALITOS1_ISR_CH_0_DONE (1 << 28) /* ch 0 done mask */
#define TALITOS1_ISR_TEA_ERR 0x00000040
#define TALITOS2_ISR_4CHERR ISR2_FORMAT(0xa) /* 4 ch errors mask */
#define TALITOS2_ISR_4CHDONE ISR2_FORMAT(0x5) /* 4 ch done mask */
+#define TALITOS2_ISR_CH_0_ERR 2 /* ch 0 errors mask */
+#define TALITOS2_ISR_CH_0_DONE 1 /* ch 0 done mask */
#define TALITOS2_ISR_CH_0_2_ERR ISR2_FORMAT(0x2) /* ch 0, 2 err mask */
#define TALITOS2_ISR_CH_0_2_DONE ISR2_FORMAT(0x1) /* ch 0, 2 done mask */
#define TALITOS2_ISR_CH_1_3_ERR ISR2_FORMAT(0x8) /* ch 1, 3 err mask */
@@ -234,6 +236,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
+#define TALITOS_CCCR_LO_NE 0x8 /* fetch next descriptor enab. */
#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
#define TALITOS1_CCCR_LO_RESET 0x1 /* channel reset on SEC1 */
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 790f7cadc1ed..765f53e548ab 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1751,7 +1751,6 @@ static void __exit ux500_cryp_mod_fini(void)
{
pr_debug("[%s] is called!", __func__);
platform_driver_unregister(&cryp_driver);
- return;
}
module_init(ux500_cryp_mod_init);
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
index dd342c947ff9..cbfccccfa135 100644
--- a/drivers/crypto/virtio/Makefile
+++ b/drivers/crypto/virtio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
virtio_crypto-objs := \
virtio_crypto_algs.o \
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 5035b0dc1e40..abe8c15450df 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -319,7 +319,7 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
struct virtio_crypto *vcrypto =
virtcrypto_get_dev_node(node);
if (!vcrypto) {
- pr_err("virtio_crypto: Could not find a virtio device in the system");
+ pr_err("virtio_crypto: Could not find a virtio device in the system\n");
return -ENODEV;
}
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index 55f7c392582f..cab32cfec9c4 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 17d84217dd76..fc60d00a2e84 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -27,21 +27,23 @@
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+
#include "aesp8-ppc.h"
struct p8_aes_ctr_ctx {
- struct crypto_blkcipher *fallback;
+ struct crypto_skcipher *fallback;
struct aes_key enc_key;
};
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_blkcipher *fallback;
+ struct crypto_skcipher *fallback;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback =
- crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_skcipher(alg, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -49,11 +51,11 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+ crypto_skcipher_driver_name(fallback));
- crypto_blkcipher_set_flags(
+ crypto_skcipher_set_flags(
fallback,
- crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
+ crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
return 0;
@@ -64,7 +66,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_blkcipher(ctx->fallback);
+ crypto_free_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -83,7 +85,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -117,15 +119,14 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
struct p8_aes_ctr_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- struct blkcipher_desc fallback_desc = {
- .tfm = ctx->fallback,
- .info = desc->info,
- .flags = desc->flags
- };
if (in_interrupt()) {
- ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
- nbytes);
+ SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_tfm(req, ctx->fallback);
+ skcipher_request_set_callback(req, desc->flags, NULL, NULL);
+ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+ ret = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
} else {
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
diff --git a/drivers/crypto/vmx/aesp8-ppc.h b/drivers/crypto/vmx/aesp8-ppc.h
index 01972e16a6c0..349646b73754 100644
--- a/drivers/crypto/vmx/aesp8-ppc.h
+++ b/drivers/crypto/vmx/aesp8-ppc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/types.h>
#include <crypto/aes.h>
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index b18e67d0e065..36db2ef09e5b 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -1,4 +1,5 @@
#!/usr/bin/env perl
+# SPDX-License-Identifier: GPL-2.0
# PowerPC assembler distiller by <appro>.
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index dc7422530462..574286fac87c 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index e9f3b3e4bbf4..6833ada237ab 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -222,7 +222,8 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
{
struct resource *res;
- phys_addr_t phys;
+ /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */
+ phys_addr_t uninitialized_var(phys);
int i;
for (i = 0; i < dev_dax->num_resources; i++) {
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 557b93703532..3ec804672601 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -92,21 +92,21 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
long len;
if (blocksize != PAGE_SIZE) {
- pr_err("VFS (%s): error: unsupported blocksize for dax\n",
+ pr_debug("VFS (%s): error: unsupported blocksize for dax\n",
sb->s_id);
return -EINVAL;
}
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
if (err) {
- pr_err("VFS (%s): error: unaligned partition for dax\n",
+ pr_debug("VFS (%s): error: unaligned partition for dax\n",
sb->s_id);
return err;
}
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
if (!dax_dev) {
- pr_err("VFS (%s): error: device does not support dax\n",
+ pr_debug("VFS (%s): error: device does not support dax\n",
sb->s_id);
return -EOPNOTSUPP;
}
@@ -118,7 +118,7 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
put_dax(dax_dev);
if (len < 1) {
- pr_err("VFS (%s): error: dax access failed (%ld)",
+ pr_debug("VFS (%s): error: dax access failed (%ld)\n",
sb->s_id, len);
return len < 0 ? len : -EIO;
}
@@ -273,9 +273,6 @@ EXPORT_SYMBOL_GPL(dax_copy_from_iter);
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
{
- if (unlikely(!dax_alive(dax_dev)))
- return;
-
if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
return;
@@ -344,6 +341,9 @@ static struct inode *dax_alloc_inode(struct super_block *sb)
struct inode *inode;
dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
+ if (!dax_dev)
+ return NULL;
+
inode = &dax_dev->inode;
inode->i_rdev = 0;
return inode;
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index fbff40a508a4..32b8d4d3f12c 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PM_DEVFREQ) += devfreq.o
obj-$(CONFIG_PM_DEVFREQ_EVENT) += devfreq-event.o
obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index a1c4ee818614..78fb496ecb4e 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -28,6 +28,9 @@
#include <linux/of.h>
#include "governor.h"
+#define MAX(a,b) ((a > b) ? a : b)
+#define MIN(a,b) ((a < b) ? a : b)
+
static struct class *devfreq_class;
/*
@@ -69,6 +72,34 @@ static struct devfreq *find_device_devfreq(struct device *dev)
return ERR_PTR(-ENODEV);
}
+static unsigned long find_available_min_freq(struct devfreq *devfreq)
+{
+ struct dev_pm_opp *opp;
+ unsigned long min_freq = 0;
+
+ opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
+ if (IS_ERR(opp))
+ min_freq = 0;
+ else
+ dev_pm_opp_put(opp);
+
+ return min_freq;
+}
+
+static unsigned long find_available_max_freq(struct devfreq *devfreq)
+{
+ struct dev_pm_opp *opp;
+ unsigned long max_freq = ULONG_MAX;
+
+ opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
+ if (IS_ERR(opp))
+ max_freq = 0;
+ else
+ dev_pm_opp_put(opp);
+
+ return max_freq;
+}
+
/**
* devfreq_get_freq_level() - Lookup freq_table for the frequency
* @devfreq: the devfreq instance
@@ -85,11 +116,7 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
return -EINVAL;
}
-/**
- * devfreq_set_freq_table() - Initialize freq_table for the frequency
- * @devfreq: the devfreq instance
- */
-static void devfreq_set_freq_table(struct devfreq *devfreq)
+static int set_freq_table(struct devfreq *devfreq)
{
struct devfreq_dev_profile *profile = devfreq->profile;
struct dev_pm_opp *opp;
@@ -99,7 +126,7 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
/* Initialize the freq_table from OPP table */
count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
if (count <= 0)
- return;
+ return -EINVAL;
profile->max_state = count;
profile->freq_table = devm_kcalloc(devfreq->dev.parent,
@@ -108,7 +135,7 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
GFP_KERNEL);
if (!profile->freq_table) {
profile->max_state = 0;
- return;
+ return -ENOMEM;
}
for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
@@ -116,11 +143,13 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
if (IS_ERR(opp)) {
devm_kfree(devfreq->dev.parent, profile->freq_table);
profile->max_state = 0;
- return;
+ return PTR_ERR(opp);
}
dev_pm_opp_put(opp);
profile->freq_table[i] = freq;
}
+
+ return 0;
}
/**
@@ -227,7 +256,7 @@ static int devfreq_notify_transition(struct devfreq *devfreq,
int update_devfreq(struct devfreq *devfreq)
{
struct devfreq_freqs freqs;
- unsigned long freq, cur_freq;
+ unsigned long freq, cur_freq, min_freq, max_freq;
int err = 0;
u32 flags = 0;
@@ -245,19 +274,21 @@ int update_devfreq(struct devfreq *devfreq)
return err;
/*
- * Adjust the frequency with user freq and QoS.
+ * Adjust the frequency with user freq, QoS and available freq.
*
* List from the highest priority
* max_freq
* min_freq
*/
+ max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq);
+ min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq);
- if (devfreq->min_freq && freq < devfreq->min_freq) {
- freq = devfreq->min_freq;
+ if (min_freq && freq < min_freq) {
+ freq = min_freq;
flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
}
- if (devfreq->max_freq && freq > devfreq->max_freq) {
- freq = devfreq->max_freq;
+ if (max_freq && freq > max_freq) {
+ freq = max_freq;
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
}
@@ -280,10 +311,9 @@ int update_devfreq(struct devfreq *devfreq)
freqs.new = freq;
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
- if (devfreq->profile->freq_table)
- if (devfreq_update_status(devfreq, freq))
- dev_err(&devfreq->dev,
- "Couldn't update frequency transition information.\n");
+ if (devfreq_update_status(devfreq, freq))
+ dev_err(&devfreq->dev,
+ "Couldn't update frequency transition information.\n");
devfreq->previous_freq = freq;
return err;
@@ -466,6 +496,19 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
int ret;
mutex_lock(&devfreq->lock);
+
+ devfreq->scaling_min_freq = find_available_min_freq(devfreq);
+ if (!devfreq->scaling_min_freq) {
+ mutex_unlock(&devfreq->lock);
+ return -EINVAL;
+ }
+
+ devfreq->scaling_max_freq = find_available_max_freq(devfreq);
+ if (!devfreq->scaling_max_freq) {
+ mutex_unlock(&devfreq->lock);
+ return -EINVAL;
+ }
+
ret = update_devfreq(devfreq);
mutex_unlock(&devfreq->lock);
@@ -555,10 +598,28 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
mutex_unlock(&devfreq->lock);
- devfreq_set_freq_table(devfreq);
+ err = set_freq_table(devfreq);
+ if (err < 0)
+ goto err_out;
mutex_lock(&devfreq->lock);
}
+ devfreq->min_freq = find_available_min_freq(devfreq);
+ if (!devfreq->min_freq) {
+ mutex_unlock(&devfreq->lock);
+ err = -EINVAL;
+ goto err_dev;
+ }
+ devfreq->scaling_min_freq = devfreq->min_freq;
+
+ devfreq->max_freq = find_available_max_freq(devfreq);
+ if (!devfreq->max_freq) {
+ mutex_unlock(&devfreq->lock);
+ err = -EINVAL;
+ goto err_dev;
+ }
+ devfreq->scaling_max_freq = devfreq->max_freq;
+
dev_set_name(&devfreq->dev, "devfreq%d",
atomic_inc_return(&devfreq_no));
err = device_register(&devfreq->dev);
@@ -1082,6 +1143,14 @@ unlock:
return ret;
}
+static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(dev);
+
+ return sprintf(buf, "%lu\n", MAX(df->scaling_min_freq, df->min_freq));
+}
+
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1108,17 +1177,15 @@ unlock:
mutex_unlock(&df->lock);
return ret;
}
+static DEVICE_ATTR_RW(min_freq);
-#define show_one(name) \
-static ssize_t name##_show \
-(struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- return sprintf(buf, "%lu\n", to_devfreq(dev)->name); \
-}
-show_one(min_freq);
-show_one(max_freq);
+static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(dev);
-static DEVICE_ATTR_RW(min_freq);
+ return sprintf(buf, "%lu\n", MIN(df->scaling_max_freq, df->max_freq));
+}
static DEVICE_ATTR_RW(max_freq);
static ssize_t available_frequencies_show(struct device *d,
@@ -1126,22 +1193,16 @@ static ssize_t available_frequencies_show(struct device *d,
char *buf)
{
struct devfreq *df = to_devfreq(d);
- struct device *dev = df->dev.parent;
- struct dev_pm_opp *opp;
ssize_t count = 0;
- unsigned long freq = 0;
+ int i;
- do {
- opp = dev_pm_opp_find_freq_ceil(dev, &freq);
- if (IS_ERR(opp))
- break;
+ mutex_lock(&df->lock);
- dev_pm_opp_put(opp);
+ for (i = 0; i < df->profile->max_state; i++)
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
- "%lu ", freq);
- freq++;
- } while (1);
+ "%lu ", df->profile->freq_table[i]);
+ mutex_unlock(&df->lock);
/* Truncate the trailing space */
if (count)
count--;
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 49f68929e024..c25658b26598 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -436,7 +436,8 @@ static int exynos_bus_probe(struct platform_device *pdev)
ondemand_data->downdifferential = 5;
/* Add devfreq device to monitor and handle the exynos bus */
- bus->devfreq = devm_devfreq_add_device(dev, profile, "simple_ondemand",
+ bus->devfreq = devm_devfreq_add_device(dev, profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
ondemand_data);
if (IS_ERR(bus->devfreq)) {
dev_err(dev, "failed to add devfreq device\n");
@@ -488,7 +489,7 @@ passive:
passive_data->parent = parent_devfreq;
/* Add devfreq device for exynos bus with passive governor */
- bus->devfreq = devm_devfreq_add_device(dev, profile, "passive",
+ bus->devfreq = devm_devfreq_add_device(dev, profile, DEVFREQ_GOV_PASSIVE,
passive_data);
if (IS_ERR(bus->devfreq)) {
dev_err(dev,
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 673ad8cc9a1d..3bc29acbd54e 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -183,7 +183,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_passive = {
- .name = "passive",
+ .name = DEVFREQ_GOV_PASSIVE,
.immutable = 1,
.get_target_freq = devfreq_passive_get_target_freq,
.event_handler = devfreq_passive_event_handler,
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index c72f942f30a8..4d23ecfbd948 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -42,7 +42,7 @@ static int devfreq_performance_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_performance = {
- .name = "performance",
+ .name = DEVFREQ_GOV_PERFORMANCE,
.get_target_freq = devfreq_performance_func,
.event_handler = devfreq_performance_handler,
};
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 0c6bed567e6d..0c42f23249ef 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -39,7 +39,7 @@ static int devfreq_powersave_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_powersave = {
- .name = "powersave",
+ .name = DEVFREQ_GOV_POWERSAVE,
.get_target_freq = devfreq_powersave_func,
.event_handler = devfreq_powersave_handler,
};
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index ae72ba5e78df..28e0f2de7100 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -125,7 +125,7 @@ static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
}
static struct devfreq_governor devfreq_simple_ondemand = {
- .name = "simple_ondemand",
+ .name = DEVFREQ_GOV_SIMPLE_ONDEMAND,
.get_target_freq = devfreq_simple_ondemand_func,
.event_handler = devfreq_simple_ondemand_handler,
};
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 77028c27593c..080607c3f34d 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -87,7 +87,7 @@ static struct attribute *dev_entries[] = {
NULL,
};
static const struct attribute_group dev_attr_group = {
- .name = "userspace",
+ .name = DEVFREQ_GOV_USERSPACE,
.attrs = dev_entries,
};
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index 1b89ebbad02c..5dfbfa3cc878 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -431,7 +431,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
data->devfreq = devm_devfreq_add_device(dev,
&rk3399_devfreq_dmc_profile,
- "simple_ondemand",
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
&data->ondemand_data);
if (IS_ERR(data->devfreq))
return PTR_ERR(data->devfreq);
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 0d0677f23916..92e78d16b476 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Code to support devices on the DIO and DIO-II bus
* Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
* Copyright (C) 2004 Jochen Friedrich <jochen@scram.de>
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 9a302799040e..5d101c4053e0 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -27,7 +27,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/dma_fence.h>
-EXPORT_TRACEPOINT_SYMBOL(dma_fence_annotate_wait_on);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
diff --git a/drivers/dma-buf/sync_trace.h b/drivers/dma-buf/sync_trace.h
index d13d59ff1b85..06e468a218ff 100644
--- a/drivers/dma-buf/sync_trace.h
+++ b/drivers/dma-buf/sync_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_INCLUDE_PATH ../../drivers/dma-buf
#define TRACE_SYSTEM sync_trace
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fadc4d8783bd..27df3e2837fd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -115,7 +115,7 @@ config BCM_SBA_RAID
select DMA_ENGINE_RAID
select ASYNC_TX_DISABLE_XOR_VAL_DMA
select ASYNC_TX_DISABLE_PQ_VAL_DMA
- default ARCH_BCM_IPROC
+ default m if ARCH_BCM_IPROC
help
Enable support for Broadcom SBA RAID Engine. The SBA RAID
engine is available on most of the Broadcom iProc SoCs. It
@@ -483,6 +483,35 @@ config STM32_DMA
If you have a board based on such a MCU and wish to use DMA say Y
here.
+config STM32_DMAMUX
+ bool "STMicroelectronics STM32 dma multiplexer support"
+ depends on STM32_DMA || COMPILE_TEST
+ help
+ Enable support for the on-chip DMA multiplexer on STMicroelectronics
+ STM32 MCUs.
+ If you have a board based on such a MCU and wish to use DMAMUX say Y
+ here.
+
+config STM32_MDMA
+ bool "STMicroelectronics STM32 master dma support"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the on-chip MDMA controller on STMicroelectronics
+ STM32 platforms.
+ If you have a board based on STM32 SoC and wish to use the master DMA
+ say Y here.
+
+config SPRD_DMA
+ tristate "Spreadtrum DMA support"
+ depends on ARCH_SPRD || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the on-chip DMA controller on Spreadtrum platform.
+
config S3C24XX_DMAC
bool "Samsung S3C24XX DMA support"
depends on ARCH_S3C24XX || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index f08f8de1b567..b9dca8a0e142 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#dmaengine debug flags
subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
@@ -59,6 +60,9 @@ obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_STM32_DMA) += stm32-dma.o
+obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
+obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
+obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 7f58f06157f6..ef3f227ce3e6 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -385,7 +385,7 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
{
dev_crit(chan2dev(&atchan->chan_common),
- " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
+ "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n",
&lli->saddr, &lli->daddr,
lli->ctrla, lli->ctrlb, &lli->dscr);
}
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 6c2c44724637..3956a018bf5a 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1,9 +1,14 @@
/*
* Copyright (C) 2017 Broadcom
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
/*
@@ -25,11 +30,8 @@
*
* The Broadcom SBA RAID driver does not require any register programming
* except submitting request to SBA hardware device via mailbox channels.
- * This driver implements a DMA device with one DMA channel using a set
- * of mailbox channels provided by Broadcom SoC specific ring manager
- * driver. To exploit parallelism (as described above), all DMA request
- * coming to SBA RAID DMA channel are broken down to smaller requests
- * and submitted to multiple mailbox channels in round-robin fashion.
+ * This driver implements a DMA device with one DMA channel using a single
+ * mailbox channel provided by Broadcom SoC specific ring manager driver.
* For having more SBA DMA channels, we can create more SBA device nodes
* in Broadcom SoC specific DTS based on number of hardware rings supported
* by Broadcom SoC ring manager.
@@ -85,6 +87,7 @@
#define SBA_CMD_GALOIS 0xe
#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
+#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
/* Driver helper macros */
#define to_sba_request(tx) \
@@ -142,9 +145,7 @@ struct sba_device {
u32 max_cmds_pool_size;
/* Maibox client and Mailbox channels */
struct mbox_client client;
- int mchans_count;
- atomic_t mchans_current;
- struct mbox_chan **mchans;
+ struct mbox_chan *mchan;
struct device *mbox_dev;
/* DMA device and DMA channel */
struct dma_device dma_dev;
@@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
/* ====== General helper routines ===== */
-static void sba_peek_mchans(struct sba_device *sba)
-{
- int mchan_idx;
-
- for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
- mbox_client_peek_data(sba->mchans[mchan_idx]);
-}
-
static struct sba_request *sba_alloc_request(struct sba_device *sba)
{
bool found = false;
@@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
* would have completed which will create more
* room for new requests.
*/
- sba_peek_mchans(sba);
+ mbox_client_peek_data(sba->mchan);
return NULL;
}
@@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba)
static int sba_send_mbox_request(struct sba_device *sba,
struct sba_request *req)
{
- int mchans_idx, ret = 0;
-
- /* Select mailbox channel in round-robin fashion */
- mchans_idx = atomic_inc_return(&sba->mchans_current);
- mchans_idx = mchans_idx % sba->mchans_count;
+ int ret = 0;
/* Send message for the request */
req->msg.error = 0;
- ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
+ ret = mbox_send_message(sba->mchan, &req->msg);
if (ret < 0) {
dev_err(sba->dev, "send message failed with error %d", ret);
return ret;
@@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba,
}
/* Signal txdone for mailbox channel */
- mbox_client_txdone(sba->mchans[mchans_idx], ret);
+ mbox_client_txdone(sba->mchan, ret);
return ret;
}
@@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba)
u32 count;
struct sba_request *req;
- /*
- * Process few pending requests
- *
- * For now, we process (<number_of_mailbox_channels> * 8)
- * number of requests at a time.
- */
- count = sba->mchans_count * 8;
+ /* Process few pending requests */
+ count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
while (!list_empty(&sba->reqs_pending_list) && count) {
/* Get the first pending request */
req = list_first_entry(&sba->reqs_pending_list,
@@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba,
WARN_ON(tx->cookie < 0);
if (tx->cookie > 0) {
+ spin_lock_irqsave(&sba->reqs_lock, flags);
dma_cookie_complete(tx);
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
dmaengine_desc_get_callback_invoke(tx, NULL);
dma_descriptor_unmap(tx);
tx->callback = NULL;
@@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan,
if (ret == DMA_COMPLETE)
return ret;
- sba_peek_mchans(sba);
+ mbox_client_peek_data(sba->mchan);
return dma_cookie_status(dchan, cookie, txstate);
}
@@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba)
static int sba_probe(struct platform_device *pdev)
{
- int i, ret = 0, mchans_count;
+ int ret = 0;
struct sba_device *sba;
struct platform_device *mbox_pdev;
struct of_phandle_args args;
@@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev)
sba->dev = &pdev->dev;
platform_set_drvdata(pdev, sba);
- /* Number of channels equals number of mailbox channels */
+ /* Number of mailbox channels should be atleast 1 */
ret = of_count_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells");
if (ret <= 0)
return -ENODEV;
- mchans_count = ret;
/* Determine SBA version from DT compatible string */
if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
@@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev)
default:
return -EINVAL;
}
- sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count;
+ sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
sba->max_cmd_per_req = sba->max_pq_srcs + 3;
sba->max_xor_srcs = sba->max_cmd_per_req - 1;
sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
@@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev)
sba->client.knows_txdone = true;
sba->client.tx_tout = 0;
- /* Allocate mailbox channel array */
- sba->mchans = devm_kcalloc(&pdev->dev, mchans_count,
- sizeof(*sba->mchans), GFP_KERNEL);
- if (!sba->mchans)
- return -ENOMEM;
-
- /* Request mailbox channels */
- sba->mchans_count = 0;
- for (i = 0; i < mchans_count; i++) {
- sba->mchans[i] = mbox_request_channel(&sba->client, i);
- if (IS_ERR(sba->mchans[i])) {
- ret = PTR_ERR(sba->mchans[i]);
- goto fail_free_mchans;
- }
- sba->mchans_count++;
+ /* Request mailbox channel */
+ sba->mchan = mbox_request_channel(&sba->client, 0);
+ if (IS_ERR(sba->mchan)) {
+ ret = PTR_ERR(sba->mchan);
+ goto fail_free_mchan;
}
- atomic_set(&sba->mchans_current, 0);
/* Find-out underlying mailbox device */
ret = of_parse_phandle_with_args(pdev->dev.of_node,
"mboxes", "#mbox-cells", 0, &args);
if (ret)
- goto fail_free_mchans;
+ goto fail_free_mchan;
mbox_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!mbox_pdev) {
ret = -ENODEV;
- goto fail_free_mchans;
+ goto fail_free_mchan;
}
sba->mbox_dev = &mbox_pdev->dev;
- /* All mailbox channels should be of same ring manager device */
- for (i = 1; i < mchans_count; i++) {
- ret = of_parse_phandle_with_args(pdev->dev.of_node,
- "mboxes", "#mbox-cells", i, &args);
- if (ret)
- goto fail_free_mchans;
- mbox_pdev = of_find_device_by_node(args.np);
- of_node_put(args.np);
- if (sba->mbox_dev != &mbox_pdev->dev) {
- ret = -EINVAL;
- goto fail_free_mchans;
- }
- }
-
/* Prealloc channel resource */
ret = sba_prealloc_channel_resources(sba);
if (ret)
- goto fail_free_mchans;
+ goto fail_free_mchan;
/* Check availability of debugfs */
if (!debugfs_initialized())
@@ -1777,24 +1737,22 @@ skip_debugfs:
goto fail_free_resources;
/* Print device info */
- dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
+ dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
dma_chan_name(&sba->dma_chan), sba->ver+1,
- sba->mchans_count);
+ dev_name(sba->mbox_dev));
return 0;
fail_free_resources:
debugfs_remove_recursive(sba->root);
sba_freeup_channel_resources(sba);
-fail_free_mchans:
- for (i = 0; i < sba->mchans_count; i++)
- mbox_free_channel(sba->mchans[i]);
+fail_free_mchan:
+ mbox_free_channel(sba->mchan);
return ret;
}
static int sba_remove(struct platform_device *pdev)
{
- int i;
struct sba_device *sba = platform_get_drvdata(pdev);
dma_async_device_unregister(&sba->dma_dev);
@@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev)
sba_freeup_channel_resources(sba);
- for (i = 0; i < sba->mchans_count; i++)
- mbox_free_channel(sba->mchans[i]);
+ mbox_free_channel(sba->mchan);
return 0;
}
diff --git a/drivers/dma/bestcomm/Makefile b/drivers/dma/bestcomm/Makefile
index aed2df2a6580..8d1b33a2f0a1 100644
--- a/drivers/dma/bestcomm/Makefile
+++ b/drivers/dma/bestcomm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for BestComm & co
#
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 74794c9859f6..da74fd74636b 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1319,8 +1319,8 @@ static void coh901318_list_print(struct coh901318_chan *cohc,
int i = 0;
while (l) {
- dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%pad"
- ", dst 0x%pad, link 0x%pad virt_link_addr 0x%p\n",
+ dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src %pad"
+ ", dst %pad, link %pad virt_link_addr 0x%p\n",
i, l, l->control, &l->src_addr, &l->dst_addr,
&l->link_addr, l->virt_link_addr);
i++;
@@ -2231,7 +2231,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
spin_lock_irqsave(&cohc->lock, flg);
dev_vdbg(COHC_2_DEV(cohc),
- "[%s] channel %d src 0x%pad dest 0x%pad size %zu\n",
+ "[%s] channel %d src %pad dest %pad size %zu\n",
__func__, cohc->id, &src, &dest, size);
if (flags & DMA_PREP_INTERRUPT)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 7f0b9aa15867..2419fe524daa 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -72,6 +72,9 @@
#define AXI_DMAC_FLAG_CYCLIC BIT(0)
+/* The maximum ID allocated by the hardware is 31 */
+#define AXI_DMAC_SG_UNUSED 32U
+
struct axi_dmac_sg {
dma_addr_t src_addr;
dma_addr_t dest_addr;
@@ -80,6 +83,7 @@ struct axi_dmac_sg {
unsigned int dest_stride;
unsigned int src_stride;
unsigned int id;
+ bool schedule_when_free;
};
struct axi_dmac_desc {
@@ -200,11 +204,21 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
}
sg = &desc->sg[desc->num_submitted];
+ /* Already queued in cyclic mode. Wait for it to finish */
+ if (sg->id != AXI_DMAC_SG_UNUSED) {
+ sg->schedule_when_free = true;
+ return;
+ }
+
desc->num_submitted++;
- if (desc->num_submitted == desc->num_sgs)
- chan->next_desc = NULL;
- else
+ if (desc->num_submitted == desc->num_sgs) {
+ if (desc->cyclic)
+ desc->num_submitted = 0; /* Start again */
+ else
+ chan->next_desc = NULL;
+ } else {
chan->next_desc = desc;
+ }
sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
@@ -220,9 +234,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
/*
* If the hardware supports cyclic transfers and there is no callback to
- * call, enable hw cyclic mode to avoid unnecessary interrupts.
+ * call and only a single segment, enable hw cyclic mode to avoid
+ * unnecessary interrupts.
*/
- if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback)
+ if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
+ desc->num_sgs == 1)
flags |= AXI_DMAC_FLAG_CYCLIC;
axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
@@ -237,37 +253,52 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
struct axi_dmac_desc, vdesc.node);
}
-static void axi_dmac_transfer_done(struct axi_dmac_chan *chan,
+static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
unsigned int completed_transfers)
{
struct axi_dmac_desc *active;
struct axi_dmac_sg *sg;
+ bool start_next = false;
active = axi_dmac_active_desc(chan);
if (!active)
- return;
+ return false;
- if (active->cyclic) {
- vchan_cyclic_callback(&active->vdesc);
- } else {
- do {
- sg = &active->sg[active->num_completed];
- if (!(BIT(sg->id) & completed_transfers))
- break;
- active->num_completed++;
- if (active->num_completed == active->num_sgs) {
+ do {
+ sg = &active->sg[active->num_completed];
+ if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
+ break;
+ if (!(BIT(sg->id) & completed_transfers))
+ break;
+ active->num_completed++;
+ sg->id = AXI_DMAC_SG_UNUSED;
+ if (sg->schedule_when_free) {
+ sg->schedule_when_free = false;
+ start_next = true;
+ }
+
+ if (active->cyclic)
+ vchan_cyclic_callback(&active->vdesc);
+
+ if (active->num_completed == active->num_sgs) {
+ if (active->cyclic) {
+ active->num_completed = 0; /* wrap around */
+ } else {
list_del(&active->vdesc.node);
vchan_cookie_complete(&active->vdesc);
active = axi_dmac_active_desc(chan);
}
- } while (active);
- }
+ }
+ } while (active);
+
+ return start_next;
}
static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
{
struct axi_dmac *dmac = devid;
unsigned int pending;
+ bool start_next = false;
pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
if (!pending)
@@ -281,10 +312,10 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
unsigned int completed;
completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
- axi_dmac_transfer_done(&dmac->chan, completed);
+ start_next = axi_dmac_transfer_done(&dmac->chan, completed);
}
/* Space has become available in the descriptor queue */
- if (pending & AXI_DMAC_IRQ_SOT)
+ if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
axi_dmac_start_transfer(&dmac->chan);
spin_unlock(&dmac->chan.vchan.lock);
@@ -334,12 +365,16 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
{
struct axi_dmac_desc *desc;
+ unsigned int i;
desc = kzalloc(sizeof(struct axi_dmac_desc) +
sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
if (!desc)
return NULL;
+ for (i = 0; i < num_sgs; i++)
+ desc->sg[i].id = AXI_DMAC_SG_UNUSED;
+
desc->num_sgs = num_sgs;
return desc;
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 882ff9448c3b..501c0b063f85 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* The contents of this file are private to DMA engine drivers, and is not
* part of the API to be used by DMA engine users.
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 34ff53290b03..47edc7fbf91f 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -702,6 +702,7 @@ static int dmatest_func(void *data)
* free it this time?" dancing. For now, just
* leave it dangling.
*/
+ WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile
index 3eebd1ce2c6b..2b949c2e4504 100644
--- a/drivers/dma/dw/Makefile
+++ b/drivers/dma/dw/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
dw_dmac_core-objs := core.o
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index a7ea20e7b8e9..9364a3ed345a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -891,6 +891,10 @@ static int edma_slave_config(struct dma_chan *chan,
cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
+ if (cfg->src_maxburst > chan->device->max_burst ||
+ cfg->dst_maxburst > chan->device->max_burst)
+ return -EINVAL;
+
memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
return 0;
@@ -1868,6 +1872,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */
s_ddev->dev = ecc->dev;
INIT_LIST_HEAD(&s_ddev->channels);
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 54db1411ce73..0391f930aecc 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -730,14 +731,23 @@ static int mdc_slave_config(struct dma_chan *chan,
return 0;
}
+static int mdc_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct mdc_chan *mchan = to_mdc_chan(chan);
+ struct device *dev = mdma2dev(mchan->mdma);
+
+ return pm_runtime_get_sync(dev);
+}
+
static void mdc_free_chan_resources(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
struct mdc_dma *mdma = mchan->mdma;
+ struct device *dev = mdma2dev(mdma);
mdc_terminate_all(chan);
-
mdma->soc->disable_chan(mchan);
+ pm_runtime_put(dev);
}
static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
@@ -854,6 +864,22 @@ static const struct of_device_id mdc_dma_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
+static int img_mdc_runtime_suspend(struct device *dev)
+{
+ struct mdc_dma *mdma = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(mdma->clk);
+
+ return 0;
+}
+
+static int img_mdc_runtime_resume(struct device *dev)
+{
+ struct mdc_dma *mdma = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(mdma->clk);
+}
+
static int mdc_dma_probe(struct platform_device *pdev)
{
struct mdc_dma *mdma;
@@ -883,10 +909,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
if (IS_ERR(mdma->clk))
return PTR_ERR(mdma->clk);
- ret = clk_prepare_enable(mdma->clk);
- if (ret)
- return ret;
-
dma_cap_zero(mdma->dma_dev.cap_mask);
dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
@@ -919,12 +941,13 @@ static int mdc_dma_probe(struct platform_device *pdev)
"img,max-burst-multiplier",
&mdma->max_burst_mult);
if (ret)
- goto disable_clk;
+ return ret;
mdma->dma_dev.dev = &pdev->dev;
mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
+ mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
mdma->dma_dev.device_tx_status = mdc_tx_status;
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
@@ -945,15 +968,14 @@ static int mdc_dma_probe(struct platform_device *pdev)
mchan->mdma = mdma;
mchan->chan_nr = i;
mchan->irq = platform_get_irq(pdev, i);
- if (mchan->irq < 0) {
- ret = mchan->irq;
- goto disable_clk;
- }
+ if (mchan->irq < 0)
+ return mchan->irq;
+
ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
IRQ_TYPE_LEVEL_HIGH,
dev_name(&pdev->dev), mchan);
if (ret < 0)
- goto disable_clk;
+ return ret;
mchan->vc.desc_free = mdc_desc_free;
vchan_init(&mchan->vc, &mdma->dma_dev);
@@ -962,14 +984,19 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
sizeof(struct mdc_hw_list_desc),
4, 0);
- if (!mdma->desc_pool) {
- ret = -ENOMEM;
- goto disable_clk;
+ if (!mdma->desc_pool)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_mdc_runtime_resume(&pdev->dev);
+ if (ret)
+ return ret;
}
ret = dma_async_device_register(&mdma->dma_dev);
if (ret)
- goto disable_clk;
+ goto suspend;
ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
if (ret)
@@ -982,8 +1009,10 @@ static int mdc_dma_probe(struct platform_device *pdev)
unregister:
dma_async_device_unregister(&mdma->dma_dev);
-disable_clk:
- clk_disable_unprepare(mdma->clk);
+suspend:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_mdc_runtime_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -1004,14 +1033,47 @@ static int mdc_dma_remove(struct platform_device *pdev)
tasklet_kill(&mchan->vc.task);
}
- clk_disable_unprepare(mdma->clk);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ img_mdc_runtime_suspend(&pdev->dev);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int img_mdc_suspend_late(struct device *dev)
+{
+ struct mdc_dma *mdma = dev_get_drvdata(dev);
+ int i;
+
+ /* Check that all channels are idle */
+ for (i = 0; i < mdma->nr_channels; i++) {
+ struct mdc_chan *mchan = &mdma->channels[i];
+
+ if (unlikely(mchan->desc))
+ return -EBUSY;
+ }
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int img_mdc_resume_early(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_mdc_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_mdc_runtime_suspend,
+ img_mdc_runtime_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late,
+ img_mdc_resume_early)
+};
+
static struct platform_driver mdc_dma_driver = {
.driver = {
.name = "img-mdc-dma",
+ .pm = &img_mdc_pm_ops,
.of_match_table = of_match_ptr(mdc_dma_of_match),
},
.probe = mdc_dma_probe,
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f681df8f0ed3..331f863c605e 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -364,9 +364,9 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
local_irq_restore(flags);
}
-static void imxdma_watchdog(unsigned long data)
+static void imxdma_watchdog(struct timer_list *t)
{
- struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
+ struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
struct imxdma_engine *imxdma = imxdmac->imxdma;
int channel = imxdmac->channel;
@@ -1153,9 +1153,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
}
imxdmac->irq = irq + i;
- init_timer(&imxdmac->watchdog);
- imxdmac->watchdog.function = &imxdma_watchdog;
- imxdmac->watchdog.data = (unsigned long)imxdmac;
+ timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0);
}
imxdmac->imxdma = imxdma;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a67ec1bdc4e0..2184881afe76 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -178,6 +178,14 @@
#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
+#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
+ BIT(DMA_MEM_TO_DEV) | \
+ BIT(DMA_DEV_TO_DEV))
+
/*
* Mode/Count of data node descriptors - IPCv2
*/
@@ -1851,9 +1859,9 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
- sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
+ sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index f70cc74032ea..58d4ccd33672 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -474,7 +474,7 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
if (time_is_before_jiffies(ioat_chan->timer.expires)
&& timer_pending(&ioat_chan->timer)) {
mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
- ioat_timer_event((unsigned long)ioat_chan);
+ ioat_timer_event(&ioat_chan->timer);
}
return -ENOMEM;
@@ -862,9 +862,9 @@ static void check_active(struct ioatdma_chan *ioat_chan)
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
-void ioat_timer_event(unsigned long data)
+void ioat_timer_event(struct timer_list *t)
{
- struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+ struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
dma_addr_t phys_complete;
u64 status;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 56200eefcf5e..1ab42ec2b7ff 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -406,10 +406,9 @@ enum dma_status
ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate);
void ioat_cleanup_event(unsigned long data);
-void ioat_timer_event(unsigned long data);
+void ioat_timer_event(struct timer_list *t);
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
void ioat_issue_pending(struct dma_chan *chan);
-void ioat_timer_event(unsigned long data);
/* IOAT Init functions */
bool is_bwd_ioat(struct pci_dev *pdev);
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 93e006c3441d..2f31d3d0caa6 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -760,7 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
dma_cookie_init(&ioat_chan->dma_chan);
list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
ioat_dma->idx[idx] = ioat_chan;
- setup_timer(&ioat_chan->timer, ioat_timer_event, data);
+ timer_setup(&ioat_chan->timer, ioat_timer_event, 0);
tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
}
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index d3f918a9ee76..50559338239b 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1286,7 +1286,6 @@ MODULE_DEVICE_TABLE(of, nbpf_match);
static int nbpf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id = of_match_device(nbpf_match, dev);
struct device_node *np = dev->of_node;
struct nbpf_device *nbpf;
struct dma_device *dma_dev;
@@ -1300,10 +1299,10 @@ static int nbpf_probe(struct platform_device *pdev)
BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
/* DT only */
- if (!np || !of_id || !of_id->data)
+ if (!np)
return -ENODEV;
- cfg = of_id->data;
+ cfg = of_device_get_match_data(dev);
num_channels = cfg->num_channels;
nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 8c1665c8fe33..f6dd849159d8 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1288,6 +1288,10 @@ static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config
cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return -EINVAL;
+ if (cfg->src_maxburst > chan->device->max_burst ||
+ cfg->dst_maxburst > chan->device->max_burst)
+ return -EINVAL;
+
memcpy(&c->cfg, cfg, sizeof(c->cfg));
return 0;
@@ -1482,6 +1486,7 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
spin_lock_init(&od->lock);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index f9028e9d0dfc..afd8f27bda96 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -123,7 +123,7 @@ struct pch_dma_chan {
struct pch_dma {
struct dma_device dma;
void __iomem *membase;
- struct pci_pool *pool;
+ struct dma_pool *pool;
struct pch_dma_regs regs;
struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
struct pch_dma_chan channels[MAX_CHAN_NR];
@@ -437,7 +437,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr;
- desc = pci_pool_zalloc(pd->pool, flags, &addr);
+ desc = dma_pool_zalloc(pd->pool, flags, &addr);
if (desc) {
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->txd, chan);
@@ -549,7 +549,7 @@ static void pd_free_chan_resources(struct dma_chan *chan)
spin_unlock_irq(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
- pci_pool_free(pd->pool, desc, desc->txd.phys);
+ dma_pool_free(pd->pool, desc, desc->txd.phys);
pdc_enable_irq(chan, 0);
}
@@ -880,7 +880,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_iounmap;
}
- pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
+ pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev,
sizeof(struct pch_dma_desc), 4, 0);
if (!pd->pool) {
dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
@@ -931,7 +931,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
return 0;
err_free_pool:
- pci_pool_destroy(pd->pool);
+ dma_pool_destroy(pd->pool);
err_free_irq:
free_irq(pdev->irq, pd);
err_iounmap:
@@ -963,7 +963,7 @@ static void pch_dma_remove(struct pci_dev *pdev)
tasklet_kill(&pd_chan->tasklet);
}
- pci_pool_destroy(pd->pool);
+ dma_pool_destroy(pd->pool);
pci_iounmap(pdev, pd->membase);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f122c2a7b9f0..d7327fd5f445 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2390,7 +2390,8 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
}
/* Returns the number of descriptors added to the DMAC pool */
-static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
+static int add_desc(struct list_head *pool, spinlock_t *lock,
+ gfp_t flg, int count)
{
struct dma_pl330_desc *desc;
unsigned long flags;
@@ -2400,27 +2401,28 @@ static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
if (!desc)
return 0;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(lock, flags);
for (i = 0; i < count; i++) {
_init_desc(&desc[i]);
- list_add_tail(&desc[i].node, &pl330->desc_pool);
+ list_add_tail(&desc[i].node, pool);
}
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
return count;
}
-static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
+static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
+ spinlock_t *lock)
{
struct dma_pl330_desc *desc = NULL;
unsigned long flags;
- spin_lock_irqsave(&pl330->pool_lock, flags);
+ spin_lock_irqsave(lock, flags);
- if (!list_empty(&pl330->desc_pool)) {
- desc = list_entry(pl330->desc_pool.next,
+ if (!list_empty(pool)) {
+ desc = list_entry(pool->next,
struct dma_pl330_desc, node);
list_del_init(&desc->node);
@@ -2429,7 +2431,7 @@ static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
desc->txd.callback = NULL;
}
- spin_unlock_irqrestore(&pl330->pool_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
return desc;
}
@@ -2441,20 +2443,18 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
struct dma_pl330_desc *desc;
/* Pluck one desc from the pool of DMAC */
- desc = pluck_desc(pl330);
+ desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock);
/* If the DMAC pool is empty, alloc new */
if (!desc) {
- if (!add_desc(pl330, GFP_ATOMIC, 1))
- return NULL;
+ DEFINE_SPINLOCK(lock);
+ LIST_HEAD(pool);
- /* Try again */
- desc = pluck_desc(pl330);
- if (!desc) {
- dev_err(pch->dmac->ddma.dev,
- "%s:%d ALERT!\n", __func__, __LINE__);
+ if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
return NULL;
- }
+
+ desc = pluck_desc(&pool, &lock);
+ WARN_ON(!desc || !list_empty(&pool));
}
/* Initialize the descriptor */
@@ -2868,7 +2868,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&pl330->pool_lock);
/* Create a descriptor pool of default size */
- if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
+ if (!add_desc(&pl330->desc_pool, &pl330->pool_lock,
+ GFP_KERNEL, NR_DEFAULT_DESC))
dev_warn(&adev->dev, "unable to allocate desc\n");
INIT_LIST_HEAD(&pd->channels);
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
index 4bfc38b45220..1ae92da88b0c 100644
--- a/drivers/dma/qcom/Makefile
+++ b/drivers/dma/qcom/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 6d89fb6a6a92..d076940e0c69 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -46,6 +46,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
+#include <linux/circ_buf.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/pm_runtime.h>
@@ -78,6 +79,8 @@ struct bam_async_desc {
struct bam_desc_hw *curr_desc;
+ /* list node for the desc in the bam_chan list of descriptors */
+ struct list_head desc_node;
enum dma_transfer_direction dir;
size_t length;
struct bam_desc_hw desc[0];
@@ -347,6 +350,8 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = {
#define BAM_DESC_FIFO_SIZE SZ_32K
#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
#define BAM_FIFO_SIZE (SZ_32K - 8)
+#define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\
+ MAX_DESCRIPTORS + 1) == 0)
struct bam_chan {
struct virt_dma_chan vc;
@@ -356,8 +361,6 @@ struct bam_chan {
/* configuration from device tree */
u32 id;
- struct bam_async_desc *curr_txd; /* current running dma */
-
/* runtime configuration */
struct dma_slave_config slave;
@@ -372,6 +375,8 @@ struct bam_chan {
unsigned int initialized; /* is the channel hw initialized? */
unsigned int paused; /* is the channel paused? */
unsigned int reconfigure; /* new slave config? */
+ /* list of descriptors currently processed */
+ struct list_head desc_list;
struct list_head node;
};
@@ -539,7 +544,7 @@ static void bam_free_chan(struct dma_chan *chan)
vchan_free_chan_resources(to_virt_chan(chan));
- if (bchan->curr_txd) {
+ if (!list_empty(&bchan->desc_list)) {
dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
goto err;
}
@@ -632,8 +637,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
if (flags & DMA_PREP_INTERRUPT)
async_desc->flags |= DESC_FLAG_EOT;
- else
- async_desc->flags |= DESC_FLAG_INT;
async_desc->num_desc = num_alloc;
async_desc->curr_desc = async_desc->desc;
@@ -684,14 +687,16 @@ err_out:
static int bam_dma_terminate_all(struct dma_chan *chan)
{
struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_async_desc *async_desc, *tmp;
unsigned long flag;
LIST_HEAD(head);
/* remove all transactions, including active transaction */
spin_lock_irqsave(&bchan->vc.lock, flag);
- if (bchan->curr_txd) {
- list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
- bchan->curr_txd = NULL;
+ list_for_each_entry_safe(async_desc, tmp,
+ &bchan->desc_list, desc_node) {
+ list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
+ list_del(&async_desc->desc_node);
}
vchan_get_all_descriptors(&bchan->vc, &head);
@@ -763,9 +768,9 @@ static int bam_resume(struct dma_chan *chan)
*/
static u32 process_channel_irqs(struct bam_device *bdev)
{
- u32 i, srcs, pipe_stts;
+ u32 i, srcs, pipe_stts, offset, avail;
unsigned long flags;
- struct bam_async_desc *async_desc;
+ struct bam_async_desc *async_desc, *tmp;
srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
@@ -785,27 +790,40 @@ static u32 process_channel_irqs(struct bam_device *bdev)
writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
spin_lock_irqsave(&bchan->vc.lock, flags);
- async_desc = bchan->curr_txd;
- if (async_desc) {
- async_desc->num_desc -= async_desc->xfer_len;
- async_desc->curr_desc += async_desc->xfer_len;
- bchan->curr_txd = NULL;
+ offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
+ P_SW_OFSTS_MASK;
+ offset /= sizeof(struct bam_desc_hw);
+
+ /* Number of bytes available to read */
+ avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
+
+ list_for_each_entry_safe(async_desc, tmp,
+ &bchan->desc_list, desc_node) {
+ /* Not enough data to read */
+ if (avail < async_desc->xfer_len)
+ break;
/* manage FIFO */
bchan->head += async_desc->xfer_len;
bchan->head %= MAX_DESCRIPTORS;
+ async_desc->num_desc -= async_desc->xfer_len;
+ async_desc->curr_desc += async_desc->xfer_len;
+ avail -= async_desc->xfer_len;
+
/*
- * if complete, process cookie. Otherwise
+ * if complete, process cookie. Otherwise
* push back to front of desc_issued so that
* it gets restarted by the tasklet
*/
- if (!async_desc->num_desc)
+ if (!async_desc->num_desc) {
vchan_cookie_complete(&async_desc->vd);
- else
+ } else {
list_add(&async_desc->vd.node,
- &bchan->vc.desc_issued);
+ &bchan->vc.desc_issued);
+ }
+ list_del(&async_desc->desc_node);
}
spin_unlock_irqrestore(&bchan->vc.lock, flags);
@@ -867,6 +885,7 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct bam_chan *bchan = to_bam_chan(chan);
+ struct bam_async_desc *async_desc;
struct virt_dma_desc *vd;
int ret;
size_t residue = 0;
@@ -882,11 +901,17 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
spin_lock_irqsave(&bchan->vc.lock, flags);
vd = vchan_find_desc(&bchan->vc, cookie);
- if (vd)
+ if (vd) {
residue = container_of(vd, struct bam_async_desc, vd)->length;
- else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
- for (i = 0; i < bchan->curr_txd->num_desc; i++)
- residue += bchan->curr_txd->curr_desc[i].size;
+ } else {
+ list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
+ if (async_desc->vd.tx.cookie != cookie)
+ continue;
+
+ for (i = 0; i < async_desc->num_desc; i++)
+ residue += async_desc->curr_desc[i].size;
+ }
+ }
spin_unlock_irqrestore(&bchan->vc.lock, flags);
@@ -927,63 +952,86 @@ static void bam_start_dma(struct bam_chan *bchan)
{
struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
struct bam_device *bdev = bchan->bdev;
- struct bam_async_desc *async_desc;
+ struct bam_async_desc *async_desc = NULL;
struct bam_desc_hw *desc;
struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
sizeof(struct bam_desc_hw));
int ret;
+ unsigned int avail;
+ struct dmaengine_desc_callback cb;
lockdep_assert_held(&bchan->vc.lock);
if (!vd)
return;
- list_del(&vd->node);
-
- async_desc = container_of(vd, struct bam_async_desc, vd);
- bchan->curr_txd = async_desc;
-
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
- /* on first use, initialize the channel hardware */
- if (!bchan->initialized)
- bam_chan_init_hw(bchan, async_desc->dir);
+ while (vd && !IS_BUSY(bchan)) {
+ list_del(&vd->node);
- /* apply new slave config changes, if necessary */
- if (bchan->reconfigure)
- bam_apply_new_config(bchan, async_desc->dir);
+ async_desc = container_of(vd, struct bam_async_desc, vd);
- desc = bchan->curr_txd->curr_desc;
+ /* on first use, initialize the channel hardware */
+ if (!bchan->initialized)
+ bam_chan_init_hw(bchan, async_desc->dir);
- if (async_desc->num_desc > MAX_DESCRIPTORS)
- async_desc->xfer_len = MAX_DESCRIPTORS;
- else
- async_desc->xfer_len = async_desc->num_desc;
+ /* apply new slave config changes, if necessary */
+ if (bchan->reconfigure)
+ bam_apply_new_config(bchan, async_desc->dir);
- /* set any special flags on the last descriptor */
- if (async_desc->num_desc == async_desc->xfer_len)
- desc[async_desc->xfer_len - 1].flags |=
- cpu_to_le16(async_desc->flags);
- else
- desc[async_desc->xfer_len - 1].flags |=
- cpu_to_le16(DESC_FLAG_INT);
+ desc = async_desc->curr_desc;
+ avail = CIRC_SPACE(bchan->tail, bchan->head,
+ MAX_DESCRIPTORS + 1);
+
+ if (async_desc->num_desc > avail)
+ async_desc->xfer_len = avail;
+ else
+ async_desc->xfer_len = async_desc->num_desc;
+
+ /* set any special flags on the last descriptor */
+ if (async_desc->num_desc == async_desc->xfer_len)
+ desc[async_desc->xfer_len - 1].flags |=
+ cpu_to_le16(async_desc->flags);
- if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
- u32 partial = MAX_DESCRIPTORS - bchan->tail;
+ vd = vchan_next_desc(&bchan->vc);
- memcpy(&fifo[bchan->tail], desc,
- partial * sizeof(struct bam_desc_hw));
- memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
+ dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);
+
+ /*
+ * An interrupt is generated at this desc, if
+ * - FIFO is FULL.
+ * - No more descriptors to add.
+ * - If a callback completion was requested for this DESC,
+ * In this case, BAM will deliver the completion callback
+ * for this desc and continue processing the next desc.
+ */
+ if (((avail <= async_desc->xfer_len) || !vd ||
+ dmaengine_desc_callback_valid(&cb)) &&
+ !(async_desc->flags & DESC_FLAG_EOT))
+ desc[async_desc->xfer_len - 1].flags |=
+ cpu_to_le16(DESC_FLAG_INT);
+
+ if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
+ u32 partial = MAX_DESCRIPTORS - bchan->tail;
+
+ memcpy(&fifo[bchan->tail], desc,
+ partial * sizeof(struct bam_desc_hw));
+ memcpy(fifo, &desc[partial],
+ (async_desc->xfer_len - partial) *
sizeof(struct bam_desc_hw));
- } else {
- memcpy(&fifo[bchan->tail], desc,
- async_desc->xfer_len * sizeof(struct bam_desc_hw));
- }
+ } else {
+ memcpy(&fifo[bchan->tail], desc,
+ async_desc->xfer_len *
+ sizeof(struct bam_desc_hw));
+ }
- bchan->tail += async_desc->xfer_len;
- bchan->tail %= MAX_DESCRIPTORS;
+ bchan->tail += async_desc->xfer_len;
+ bchan->tail %= MAX_DESCRIPTORS;
+ list_add_tail(&async_desc->desc_node, &bchan->desc_list);
+ }
/* ensure descriptor writes and dma start not reordered */
wmb();
@@ -1012,7 +1060,7 @@ static void dma_tasklet(unsigned long data)
bchan = &bdev->channels[i];
spin_lock_irqsave(&bchan->vc.lock, flags);
- if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
+ if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
bam_start_dma(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
}
@@ -1033,7 +1081,7 @@ static void bam_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&bchan->vc.lock, flags);
/* if work pending and idle, start a transaction */
- if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
+ if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
bam_start_dma(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
@@ -1133,6 +1181,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
vchan_init(&bchan->vc, &bdev->common);
bchan->vc.desc_free = bam_dma_free_desc;
+ INIT_LIST_HEAD(&bchan->desc_list);
}
static const struct of_device_id bam_of_match[] = {
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 1adeb3265085..c7a89c22890e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -823,6 +823,13 @@ static const struct sa11x0_dma_channel_desc chan_desc[] = {
CD(Ser4SSPRc, DDAR_RW),
};
+static const struct dma_slave_map sa11x0_dma_map[] = {
+ { "sa11x0-ir", "tx", "Ser2ICPTr" },
+ { "sa11x0-ir", "rx", "Ser2ICPRc" },
+ { "sa11x0-ssp", "tx", "Ser4SSPTr" },
+ { "sa11x0-ssp", "rx", "Ser4SSPRc" },
+};
+
static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
struct device *dev)
{
@@ -909,6 +916,10 @@ static int sa11x0_dma_probe(struct platform_device *pdev)
spin_lock_init(&d->lock);
INIT_LIST_HEAD(&d->chan_pending);
+ d->slave.filter.fn = sa11x0_dma_filter_fn;
+ d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
+ d->slave.filter.map = sa11x0_dma_map;
+
d->base = ioremap(res->start, resource_size(res));
if (!d->base) {
ret = -ENOMEM;
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index f1e2fd64f279..7d7c9491ade1 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# DMA Engine Helpers
#
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
new file mode 100644
index 000000000000..b652071a2096
--- /dev/null
+++ b/drivers/dma/sprd-dma.c
@@ -0,0 +1,988 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "virt-dma.h"
+
+#define SPRD_DMA_CHN_REG_OFFSET 0x1000
+#define SPRD_DMA_CHN_REG_LENGTH 0x40
+#define SPRD_DMA_MEMCPY_MIN_SIZE 64
+
+/* DMA global registers definition */
+#define SPRD_DMA_GLB_PAUSE 0x0
+#define SPRD_DMA_GLB_FRAG_WAIT 0x4
+#define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
+#define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
+#define SPRD_DMA_GLB_INT_RAW_STS 0x10
+#define SPRD_DMA_GLB_INT_MSK_STS 0x14
+#define SPRD_DMA_GLB_REQ_STS 0x18
+#define SPRD_DMA_GLB_CHN_EN_STS 0x1c
+#define SPRD_DMA_GLB_DEBUG_STS 0x20
+#define SPRD_DMA_GLB_ARB_SEL_STS 0x24
+#define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
+#define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
+
+/* DMA channel registers definition */
+#define SPRD_DMA_CHN_PAUSE 0x0
+#define SPRD_DMA_CHN_REQ 0x4
+#define SPRD_DMA_CHN_CFG 0x8
+#define SPRD_DMA_CHN_INTC 0xc
+#define SPRD_DMA_CHN_SRC_ADDR 0x10
+#define SPRD_DMA_CHN_DES_ADDR 0x14
+#define SPRD_DMA_CHN_FRG_LEN 0x18
+#define SPRD_DMA_CHN_BLK_LEN 0x1c
+#define SPRD_DMA_CHN_TRSC_LEN 0x20
+#define SPRD_DMA_CHN_TRSF_STEP 0x24
+#define SPRD_DMA_CHN_WARP_PTR 0x28
+#define SPRD_DMA_CHN_WARP_TO 0x2c
+#define SPRD_DMA_CHN_LLIST_PTR 0x30
+#define SPRD_DMA_CHN_FRAG_STEP 0x34
+#define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
+#define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
+
+/* SPRD_DMA_CHN_INTC register definition */
+#define SPRD_DMA_INT_MASK GENMASK(4, 0)
+#define SPRD_DMA_INT_CLR_OFFSET 24
+#define SPRD_DMA_FRAG_INT_EN BIT(0)
+#define SPRD_DMA_BLK_INT_EN BIT(1)
+#define SPRD_DMA_TRANS_INT_EN BIT(2)
+#define SPRD_DMA_LIST_INT_EN BIT(3)
+#define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
+
+/* SPRD_DMA_CHN_CFG register definition */
+#define SPRD_DMA_CHN_EN BIT(0)
+#define SPRD_DMA_WAIT_BDONE_OFFSET 24
+#define SPRD_DMA_DONOT_WAIT_BDONE 1
+
+/* SPRD_DMA_CHN_REQ register definition */
+#define SPRD_DMA_REQ_EN BIT(0)
+
+/* SPRD_DMA_CHN_PAUSE register definition */
+#define SPRD_DMA_PAUSE_EN BIT(0)
+#define SPRD_DMA_PAUSE_STS BIT(2)
+#define SPRD_DMA_PAUSE_CNT 0x2000
+
+/* DMA_CHN_WARP_* register definition */
+#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
+#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
+#define SPRD_DMA_HIGH_ADDR_OFFSET 4
+
+/* SPRD_DMA_CHN_INTC register definition */
+#define SPRD_DMA_FRAG_INT_STS BIT(16)
+#define SPRD_DMA_BLK_INT_STS BIT(17)
+#define SPRD_DMA_TRSC_INT_STS BIT(18)
+#define SPRD_DMA_LIST_INT_STS BIT(19)
+#define SPRD_DMA_CFGERR_INT_STS BIT(20)
+#define SPRD_DMA_CHN_INT_STS \
+ (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
+ SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
+ SPRD_DMA_CFGERR_INT_STS)
+
+/* SPRD_DMA_CHN_FRG_LEN register definition */
+#define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
+#define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
+#define SPRD_DMA_SWT_MODE_OFFSET 26
+#define SPRD_DMA_REQ_MODE_OFFSET 24
+#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
+#define SPRD_DMA_FIX_SEL_OFFSET 21
+#define SPRD_DMA_FIX_EN_OFFSET 20
+#define SPRD_DMA_LLIST_END_OFFSET 19
+#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
+
+/* SPRD_DMA_CHN_BLK_LEN register definition */
+#define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
+
+/* SPRD_DMA_CHN_TRSC_LEN register definition */
+#define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
+
+/* SPRD_DMA_CHN_TRSF_STEP register definition */
+#define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
+#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
+#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
+
+#define SPRD_DMA_SOFTWARE_UID 0
+
+/*
+ * enum sprd_dma_req_mode: define the DMA request mode
+ * @SPRD_DMA_FRAG_REQ: fragment request mode
+ * @SPRD_DMA_BLK_REQ: block request mode
+ * @SPRD_DMA_TRANS_REQ: transaction request mode
+ * @SPRD_DMA_LIST_REQ: link-list request mode
+ *
+ * We have 4 types request mode: fragment mode, block mode, transaction mode
+ * and linklist mode. One transaction can contain several blocks, one block can
+ * contain several fragments. Link-list mode means we can save several DMA
+ * configuration into one reserved memory, then DMA can fetch each DMA
+ * configuration automatically to start transfer.
+ */
+enum sprd_dma_req_mode {
+ SPRD_DMA_FRAG_REQ,
+ SPRD_DMA_BLK_REQ,
+ SPRD_DMA_TRANS_REQ,
+ SPRD_DMA_LIST_REQ,
+};
+
+/*
+ * enum sprd_dma_int_type: define the DMA interrupt type
+ * @SPRD_DMA_NO_INT: do not need generate DMA interrupts.
+ * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request
+ * is done.
+ * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done.
+ * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment
+ * or one block request is done.
+ * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction
+ * request is done.
+ * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one
+ * transaction request or fragment request is done.
+ * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one
+ * transaction request or block request is done.
+ * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request
+ * is done.
+ * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is
+ * incorrect.
+ */
+enum sprd_dma_int_type {
+ SPRD_DMA_NO_INT,
+ SPRD_DMA_FRAG_INT,
+ SPRD_DMA_BLK_INT,
+ SPRD_DMA_BLK_FRAG_INT,
+ SPRD_DMA_TRANS_INT,
+ SPRD_DMA_TRANS_FRAG_INT,
+ SPRD_DMA_TRANS_BLK_INT,
+ SPRD_DMA_LIST_INT,
+ SPRD_DMA_CFGERR_INT,
+};
+
+/* dma channel hardware configuration */
+struct sprd_dma_chn_hw {
+ u32 pause;
+ u32 req;
+ u32 cfg;
+ u32 intc;
+ u32 src_addr;
+ u32 des_addr;
+ u32 frg_len;
+ u32 blk_len;
+ u32 trsc_len;
+ u32 trsf_step;
+ u32 wrap_ptr;
+ u32 wrap_to;
+ u32 llist_ptr;
+ u32 frg_step;
+ u32 src_blk_step;
+ u32 des_blk_step;
+};
+
+/* dma request description */
+struct sprd_dma_desc {
+ struct virt_dma_desc vd;
+ struct sprd_dma_chn_hw chn_hw;
+};
+
+/* dma channel description */
+struct sprd_dma_chn {
+ struct virt_dma_chan vc;
+ void __iomem *chn_base;
+ u32 chn_num;
+ u32 dev_id;
+ struct sprd_dma_desc *cur_desc;
+};
+
+/* SPRD dma device */
+struct sprd_dma_dev {
+ struct dma_device dma_dev;
+ void __iomem *glb_base;
+ struct clk *clk;
+ struct clk *ashb_clk;
+ int irq;
+ u32 total_chns;
+ struct sprd_dma_chn channels[0];
+};
+
+static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
+static struct of_dma_filter_info sprd_dma_info = {
+ .filter_fn = sprd_dma_filter_fn,
+};
+
+static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct sprd_dma_chn, vc.chan);
+}
+
+static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
+
+ return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
+}
+
+static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct sprd_dma_desc, vd);
+}
+
+static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
+ u32 mask, u32 val)
+{
+ u32 orig = readl(schan->chn_base + reg);
+ u32 tmp;
+
+ tmp = (orig & ~mask) | val;
+ writel(tmp, schan->chn_base + reg);
+}
+
+static int sprd_dma_enable(struct sprd_dma_dev *sdev)
+{
+ int ret;
+
+ ret = clk_prepare_enable(sdev->clk);
+ if (ret)
+ return ret;
+
+ /*
+ * The ashb_clk is optional and only for AGCP DMA controller, so we
+ * need add one condition to check if the ashb_clk need enable.
+ */
+ if (!IS_ERR(sdev->ashb_clk))
+ ret = clk_prepare_enable(sdev->ashb_clk);
+
+ return ret;
+}
+
+static void sprd_dma_disable(struct sprd_dma_dev *sdev)
+{
+ clk_disable_unprepare(sdev->clk);
+
+ /*
+ * Need to check if we need disable the optional ashb_clk for AGCP DMA.
+ */
+ if (!IS_ERR(sdev->ashb_clk))
+ clk_disable_unprepare(sdev->ashb_clk);
+}
+
+static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 dev_id = schan->dev_id;
+
+ if (dev_id != SPRD_DMA_SOFTWARE_UID) {
+ u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
+ SPRD_DMA_GLB_REQ_UID(dev_id);
+
+ writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
+ }
+}
+
+static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 dev_id = schan->dev_id;
+
+ if (dev_id != SPRD_DMA_SOFTWARE_UID) {
+ u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
+ SPRD_DMA_GLB_REQ_UID(dev_id);
+
+ writel(0, sdev->glb_base + uid_offset);
+ }
+}
+
+static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
+{
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
+ SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
+ SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
+}
+
+static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
+{
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
+ SPRD_DMA_CHN_EN);
+}
+
+static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
+{
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
+}
+
+static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
+{
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
+ SPRD_DMA_REQ_EN);
+}
+
+static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
+
+ if (enable) {
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
+ SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
+
+ do {
+ pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
+ if (pause & SPRD_DMA_PAUSE_STS)
+ break;
+
+ cpu_relax();
+ } while (--timeout > 0);
+
+ if (!timeout)
+ dev_warn(sdev->dma_dev.dev,
+ "pause dma controller timeout\n");
+ } else {
+ sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
+ SPRD_DMA_PAUSE_EN, 0);
+ }
+}
+
+static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
+{
+ u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
+
+ if (!(cfg & SPRD_DMA_CHN_EN))
+ return;
+
+ sprd_dma_pause_resume(schan, true);
+ sprd_dma_disable_chn(schan);
+}
+
+static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
+{
+ unsigned long addr, addr_high;
+
+ addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
+ addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
+ SPRD_DMA_HIGH_ADDR_MASK;
+
+ return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
+}
+
+static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
+ SPRD_DMA_CHN_INT_STS;
+
+ switch (intc_sts) {
+ case SPRD_DMA_CFGERR_INT_STS:
+ return SPRD_DMA_CFGERR_INT;
+
+ case SPRD_DMA_LIST_INT_STS:
+ return SPRD_DMA_LIST_INT;
+
+ case SPRD_DMA_TRSC_INT_STS:
+ return SPRD_DMA_TRANS_INT;
+
+ case SPRD_DMA_BLK_INT_STS:
+ return SPRD_DMA_BLK_INT;
+
+ case SPRD_DMA_FRAG_INT_STS:
+ return SPRD_DMA_FRAG_INT;
+
+ default:
+ dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
+ return SPRD_DMA_NO_INT;
+ }
+}
+
+static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
+{
+ u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
+
+ return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
+}
+
+static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
+ struct sprd_dma_desc *sdesc)
+{
+ struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
+
+ writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
+ writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
+ writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
+ writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
+ writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
+ writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
+ writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
+ writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
+ writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
+ writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
+ writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
+ writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
+ writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
+ writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
+ writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
+ writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
+}
+
+static void sprd_dma_start(struct sprd_dma_chn *schan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+ schan->cur_desc = to_sprd_dma_desc(vd);
+
+ /*
+ * Copy the DMA configuration from DMA descriptor to this hardware
+ * channel.
+ */
+ sprd_dma_set_chn_config(schan, schan->cur_desc);
+ sprd_dma_set_uid(schan);
+ sprd_dma_enable_chn(schan);
+
+ if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
+ sprd_dma_soft_request(schan);
+}
+
+static void sprd_dma_stop(struct sprd_dma_chn *schan)
+{
+ sprd_dma_stop_and_disable(schan);
+ sprd_dma_unset_uid(schan);
+ sprd_dma_clear_int(schan);
+}
+
+static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
+ enum sprd_dma_int_type int_type,
+ enum sprd_dma_req_mode req_mode)
+{
+ if (int_type == SPRD_DMA_NO_INT)
+ return false;
+
+ if (int_type >= req_mode + 1)
+ return true;
+ else
+ return false;
+}
+
+static irqreturn_t dma_irq_handle(int irq, void *dev_id)
+{
+ struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
+ u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
+ struct sprd_dma_chn *schan;
+ struct sprd_dma_desc *sdesc;
+ enum sprd_dma_req_mode req_type;
+ enum sprd_dma_int_type int_type;
+ bool trans_done = false;
+ u32 i;
+
+ while (irq_status) {
+ i = __ffs(irq_status);
+ irq_status &= (irq_status - 1);
+ schan = &sdev->channels[i];
+
+ spin_lock(&schan->vc.lock);
+ int_type = sprd_dma_get_int_type(schan);
+ req_type = sprd_dma_get_req_type(schan);
+ sprd_dma_clear_int(schan);
+
+ sdesc = schan->cur_desc;
+
+ /* Check if the dma request descriptor is done. */
+ trans_done = sprd_dma_check_trans_done(sdesc, int_type,
+ req_type);
+ if (trans_done == true) {
+ vchan_cookie_complete(&sdesc->vd);
+ schan->cur_desc = NULL;
+ sprd_dma_start(schan);
+ }
+ spin_unlock(&schan->vc.lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ int ret;
+
+ ret = pm_runtime_get_sync(chan->device->dev);
+ if (ret < 0)
+ return ret;
+
+ schan->dev_id = SPRD_DMA_SOFTWARE_UID;
+ return 0;
+}
+
+static void sprd_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ sprd_dma_stop(schan);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ vchan_free_chan_resources(&schan->vc);
+ pm_runtime_put(chan->device->dev);
+}
+
+static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ enum dma_status ret;
+ u32 pos;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ vd = vchan_find_desc(&schan->vc, cookie);
+ if (vd) {
+ struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
+ struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
+
+ if (hw->trsc_len > 0)
+ pos = hw->trsc_len;
+ else if (hw->blk_len > 0)
+ pos = hw->blk_len;
+ else if (hw->frg_len > 0)
+ pos = hw->frg_len;
+ else
+ pos = 0;
+ } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
+ pos = sprd_dma_get_dst_addr(schan);
+ } else {
+ pos = 0;
+ }
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ dma_set_residue(txstate, pos);
+ return ret;
+}
+
+static void sprd_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
+ sprd_dma_start(schan);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+}
+
+static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
+ dma_addr_t dest, dma_addr_t src, size_t len)
+{
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
+ struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
+ u32 datawidth, src_step, des_step, fragment_len;
+ u32 block_len, req_mode, irq_mode, transcation_len;
+ u32 fix_mode = 0, fix_en = 0;
+
+ if (IS_ALIGNED(len, 4)) {
+ datawidth = 2;
+ src_step = 4;
+ des_step = 4;
+ } else if (IS_ALIGNED(len, 2)) {
+ datawidth = 1;
+ src_step = 2;
+ des_step = 2;
+ } else {
+ datawidth = 0;
+ src_step = 1;
+ des_step = 1;
+ }
+
+ fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE;
+ if (len <= SPRD_DMA_BLK_LEN_MASK) {
+ block_len = len;
+ transcation_len = 0;
+ req_mode = SPRD_DMA_BLK_REQ;
+ irq_mode = SPRD_DMA_BLK_INT;
+ } else {
+ block_len = SPRD_DMA_MEMCPY_MIN_SIZE;
+ transcation_len = len;
+ req_mode = SPRD_DMA_TRANS_REQ;
+ irq_mode = SPRD_DMA_TRANS_INT;
+ }
+
+ hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
+ hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+ SPRD_DMA_HIGH_ADDR_MASK);
+ hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
+ SPRD_DMA_HIGH_ADDR_MASK);
+
+ hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK);
+ hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK);
+
+ if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) {
+ fix_en = 0;
+ } else {
+ fix_en = 1;
+ if (src_step)
+ fix_mode = 1;
+ else
+ fix_mode = 0;
+ }
+
+ hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET |
+ datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET |
+ req_mode << SPRD_DMA_REQ_MODE_OFFSET |
+ fix_mode << SPRD_DMA_FIX_SEL_OFFSET |
+ fix_en << SPRD_DMA_FIX_EN_OFFSET |
+ (fragment_len & SPRD_DMA_FRG_LEN_MASK);
+ hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK;
+
+ hw->intc = SPRD_DMA_CFG_ERR_INT_EN;
+
+ switch (irq_mode) {
+ case SPRD_DMA_NO_INT:
+ break;
+
+ case SPRD_DMA_FRAG_INT:
+ hw->intc |= SPRD_DMA_FRAG_INT_EN;
+ break;
+
+ case SPRD_DMA_BLK_INT:
+ hw->intc |= SPRD_DMA_BLK_INT_EN;
+ break;
+
+ case SPRD_DMA_BLK_FRAG_INT:
+ hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN;
+ break;
+
+ case SPRD_DMA_TRANS_INT:
+ hw->intc |= SPRD_DMA_TRANS_INT_EN;
+ break;
+
+ case SPRD_DMA_TRANS_FRAG_INT:
+ hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN;
+ break;
+
+ case SPRD_DMA_TRANS_BLK_INT:
+ hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN;
+ break;
+
+ case SPRD_DMA_LIST_INT:
+ hw->intc |= SPRD_DMA_LIST_INT_EN;
+ break;
+
+ case SPRD_DMA_CFGERR_INT:
+ hw->intc |= SPRD_DMA_CFG_ERR_INT_EN;
+ break;
+
+ default:
+ dev_err(sdev->dma_dev.dev, "invalid irq mode\n");
+ return -EINVAL;
+ }
+
+ if (transcation_len == 0)
+ hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK;
+ else
+ hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK;
+
+ hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) <<
+ SPRD_DMA_DEST_TRSF_STEP_OFFSET |
+ (src_step & SPRD_DMA_TRSF_STEP_MASK) <<
+ SPRD_DMA_SRC_TRSF_STEP_OFFSET;
+
+ hw->frg_step = 0;
+ hw->src_blk_step = 0;
+ hw->des_blk_step = 0;
+ hw->src_blk_step = 0;
+ return 0;
+}
+
+struct dma_async_tx_descriptor *
+sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct sprd_dma_desc *sdesc;
+ int ret;
+
+ sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
+ if (!sdesc)
+ return NULL;
+
+ ret = sprd_dma_config(chan, sdesc, dest, src, len);
+ if (ret) {
+ kfree(sdesc);
+ return NULL;
+ }
+
+ return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
+}
+
+static int sprd_dma_pause(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ sprd_dma_pause_resume(schan, true);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ return 0;
+}
+
+static int sprd_dma_resume(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ sprd_dma_pause_resume(schan, false);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ return 0;
+}
+
+static int sprd_dma_terminate_all(struct dma_chan *chan)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&schan->vc.lock, flags);
+ sprd_dma_stop(schan);
+
+ vchan_get_all_descriptors(&schan->vc, &head);
+ spin_unlock_irqrestore(&schan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&schan->vc, &head);
+ return 0;
+}
+
+static void sprd_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
+
+ kfree(sdesc);
+}
+
+static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
+ u32 req = *(u32 *)param;
+
+ if (req < sdev->total_chns)
+ return req == schan->chn_num + 1;
+ else
+ return false;
+}
+
+static int sprd_dma_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sprd_dma_dev *sdev;
+ struct sprd_dma_chn *dma_chn;
+ struct resource *res;
+ u32 chn_count;
+ int ret, i;
+
+ ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
+ if (ret) {
+ dev_err(&pdev->dev, "get dma channels count failed\n");
+ return ret;
+ }
+
+ sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev) +
+ sizeof(*dma_chn) * chn_count,
+ GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(sdev->clk)) {
+ dev_err(&pdev->dev, "get enable clock failed\n");
+ return PTR_ERR(sdev->clk);
+ }
+
+ /* ashb clock is optional for AGCP DMA */
+ sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
+ if (IS_ERR(sdev->ashb_clk))
+ dev_warn(&pdev->dev, "no optional ashb eb clock\n");
+
+ /*
+ * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
+ * DMA controller, it can or do not request the irq, which will save
+ * system power without resuming system by DMA interrupts if AGCP DMA
+ * does not request the irq. Thus the DMA interrupts property should
+ * be optional.
+ */
+ sdev->irq = platform_get_irq(pdev, 0);
+ if (sdev->irq > 0) {
+ ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
+ 0, "sprd_dma", (void *)sdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request dma irq failed\n");
+ return ret;
+ }
+ } else {
+ dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!sdev->glb_base)
+ return -ENOMEM;
+
+ dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
+ sdev->total_chns = chn_count;
+ sdev->dma_dev.chancnt = chn_count;
+ INIT_LIST_HEAD(&sdev->dma_dev.channels);
+ INIT_LIST_HEAD(&sdev->dma_dev.global_node);
+ sdev->dma_dev.dev = &pdev->dev;
+ sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
+ sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
+ sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
+ sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
+ sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
+ sdev->dma_dev.device_pause = sprd_dma_pause;
+ sdev->dma_dev.device_resume = sprd_dma_resume;
+ sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
+
+ for (i = 0; i < chn_count; i++) {
+ dma_chn = &sdev->channels[i];
+ dma_chn->chn_num = i;
+ dma_chn->cur_desc = NULL;
+ /* get each channel's registers base address. */
+ dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
+ SPRD_DMA_CHN_REG_LENGTH * i;
+
+ dma_chn->vc.desc_free = sprd_dma_free_desc;
+ vchan_init(&dma_chn->vc, &sdev->dma_dev);
+ }
+
+ platform_set_drvdata(pdev, sdev);
+ ret = sprd_dma_enable(sdev);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto err_rpm;
+
+ ret = dma_async_device_register(&sdev->dma_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
+ goto err_register;
+ }
+
+ sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
+ ret = of_dma_controller_register(np, of_dma_simple_xlate,
+ &sprd_dma_info);
+ if (ret)
+ goto err_of_register;
+
+ pm_runtime_put(&pdev->dev);
+ return 0;
+
+err_of_register:
+ dma_async_device_unregister(&sdev->dma_dev);
+err_register:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+err_rpm:
+ sprd_dma_disable(sdev);
+ return ret;
+}
+
+static int sprd_dma_remove(struct platform_device *pdev)
+{
+ struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
+ struct sprd_dma_chn *c, *cn;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ /* explicitly free the irq */
+ if (sdev->irq > 0)
+ devm_free_irq(&pdev->dev, sdev->irq, sdev);
+
+ list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
+ vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ }
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&sdev->dma_dev);
+ sprd_dma_disable(sdev);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id sprd_dma_match[] = {
+ { .compatible = "sprd,sc9860-dma", },
+ {},
+};
+
+static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
+{
+ struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
+
+ sprd_dma_disable(sdev);
+ return 0;
+}
+
+static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
+{
+ struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = sprd_dma_enable(sdev);
+ if (ret)
+ dev_err(sdev->dma_dev.dev, "enable dma failed\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops sprd_dma_pm_ops = {
+ SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
+ sprd_dma_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver sprd_dma_driver = {
+ .probe = sprd_dma_probe,
+ .remove = sprd_dma_remove,
+ .driver = {
+ .name = "sprd-dma",
+ .of_match_table = sprd_dma_match,
+ .pm = &sprd_dma_pm_ops,
+ },
+};
+module_platform_driver(sprd_dma_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DMA driver for Spreadtrum");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
+MODULE_ALIAS("platform:sprd-dma");
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
new file mode 100644
index 000000000000..d5db0f6e1ff8
--- /dev/null
+++ b/drivers/dma/stm32-dmamux.c
@@ -0,0 +1,327 @@
+/*
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * DMA Router driver for STM32 DMA MUX
+ *
+ * Based on TI DMA Crossbar driver
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define STM32_DMAMUX_CCR(x) (0x4 * (x))
+#define STM32_DMAMUX_MAX_DMA_REQUESTS 32
+#define STM32_DMAMUX_MAX_REQUESTS 255
+
+struct stm32_dmamux {
+ u32 master;
+ u32 request;
+ u32 chan_id;
+};
+
+struct stm32_dmamux_data {
+ struct dma_router dmarouter;
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *iomem;
+ u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
+ u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
+ spinlock_t lock; /* Protects register access */
+ unsigned long *dma_inuse; /* Used DMA channel */
+ u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
+ * [0] holds number of DMA Masters.
+ * To be kept at very end end of this structure
+ */
+};
+
+static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
+{
+ return readl_relaxed(iomem + reg);
+}
+
+static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
+{
+ writel_relaxed(val, iomem + reg);
+}
+
+static void stm32_dmamux_free(struct device *dev, void *route_data)
+{
+ struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
+ struct stm32_dmamux *mux = route_data;
+ unsigned long flags;
+
+ /* Clear dma request */
+ spin_lock_irqsave(&dmamux->lock, flags);
+
+ stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
+ clear_bit(mux->chan_id, dmamux->dma_inuse);
+
+ if (!IS_ERR(dmamux->clk))
+ clk_disable(dmamux->clk);
+
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
+ mux->request, mux->master, mux->chan_id);
+
+ kfree(mux);
+}
+
+static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+ struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
+ struct stm32_dmamux *mux;
+ u32 i, min, max;
+ int ret;
+ unsigned long flags;
+
+ if (dma_spec->args_count != 3) {
+ dev_err(&pdev->dev, "invalid number of dma mux args\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (dma_spec->args[0] > dmamux->dmamux_requests) {
+ dev_err(&pdev->dev, "invalid mux request number: %d\n",
+ dma_spec->args[0]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_irqsave(&dmamux->lock, flags);
+ mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
+ dmamux->dma_requests);
+ set_bit(mux->chan_id, dmamux->dma_inuse);
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ if (mux->chan_id == dmamux->dma_requests) {
+ dev_err(&pdev->dev, "Run out of free DMA requests\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Look for DMA Master */
+ for (i = 1, min = 0, max = dmamux->dma_reqs[i];
+ i <= dmamux->dma_reqs[0];
+ min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
+ if (mux->chan_id < max)
+ break;
+ mux->master = i - 1;
+
+ /* The of_node_put() will be done in of_dma_router_xlate function */
+ dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
+ if (!dma_spec->np) {
+ dev_err(&pdev->dev, "can't get dma master\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Set dma request */
+ spin_lock_irqsave(&dmamux->lock, flags);
+ if (!IS_ERR(dmamux->clk)) {
+ ret = clk_enable(dmamux->clk);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+ dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret);
+ goto error;
+ }
+ }
+ spin_unlock_irqrestore(&dmamux->lock, flags);
+
+ mux->request = dma_spec->args[0];
+
+ /* craft DMA spec */
+ dma_spec->args[3] = dma_spec->args[2];
+ dma_spec->args[2] = dma_spec->args[1];
+ dma_spec->args[1] = 0;
+ dma_spec->args[0] = mux->chan_id - min;
+ dma_spec->args_count = 4;
+
+ stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
+ mux->request);
+ dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
+ mux->request, mux->master, mux->chan_id);
+
+ return mux;
+
+error:
+ clear_bit(mux->chan_id, dmamux->dma_inuse);
+ kfree(mux);
+ return ERR_PTR(ret);
+}
+
+static const struct of_device_id stm32_stm32dma_master_match[] = {
+ { .compatible = "st,stm32-dma", },
+ {},
+};
+
+static int stm32_dmamux_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct device_node *dma_node;
+ struct stm32_dmamux_data *stm32_dmamux;
+ struct resource *res;
+ void __iomem *iomem;
+ int i, count, ret;
+ u32 dma_req;
+
+ if (!node)
+ return -ENODEV;
+
+ count = device_property_read_u32_array(&pdev->dev, "dma-masters",
+ NULL, 0);
+ if (count < 0) {
+ dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
+ return -ENODEV;
+ }
+
+ stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
+ sizeof(u32) * (count + 1), GFP_KERNEL);
+ if (!stm32_dmamux)
+ return -ENOMEM;
+
+ dma_req = 0;
+ for (i = 1; i <= count; i++) {
+ dma_node = of_parse_phandle(node, "dma-masters", i - 1);
+
+ match = of_match_node(stm32_stm32dma_master_match, dma_node);
+ if (!match) {
+ dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dma_node, "dma-requests",
+ &stm32_dmamux->dma_reqs[i])) {
+ dev_info(&pdev->dev,
+ "Missing MUX output information, using %u.\n",
+ STM32_DMAMUX_MAX_DMA_REQUESTS);
+ stm32_dmamux->dma_reqs[i] =
+ STM32_DMAMUX_MAX_DMA_REQUESTS;
+ }
+ dma_req += stm32_dmamux->dma_reqs[i];
+ of_node_put(dma_node);
+ }
+
+ if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
+ dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
+ return -ENODEV;
+ }
+
+ stm32_dmamux->dma_requests = dma_req;
+ stm32_dmamux->dma_reqs[0] = count;
+ stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
+ BITS_TO_LONGS(dma_req),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!stm32_dmamux->dma_inuse)
+ return -ENOMEM;
+
+ if (device_property_read_u32(&pdev->dev, "dma-requests",
+ &stm32_dmamux->dmamux_requests)) {
+ stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
+ dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
+ stm32_dmamux->dmamux_requests);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ iomem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+
+ spin_lock_init(&stm32_dmamux->lock);
+
+ stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(stm32_dmamux->clk)) {
+ ret = PTR_ERR(stm32_dmamux->clk);
+ if (ret == -EPROBE_DEFER)
+ dev_info(&pdev->dev, "Missing controller clock\n");
+ return ret;
+ }
+
+ stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(stm32_dmamux->rst)) {
+ reset_control_assert(stm32_dmamux->rst);
+ udelay(2);
+ reset_control_deassert(stm32_dmamux->rst);
+ }
+
+ stm32_dmamux->iomem = iomem;
+ stm32_dmamux->dmarouter.dev = &pdev->dev;
+ stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
+
+ platform_set_drvdata(pdev, stm32_dmamux);
+
+ if (!IS_ERR(stm32_dmamux->clk)) {
+ ret = clk_prepare_enable(stm32_dmamux->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* Reset the dmamux */
+ for (i = 0; i < stm32_dmamux->dma_requests; i++)
+ stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
+
+ if (!IS_ERR(stm32_dmamux->clk))
+ clk_disable(stm32_dmamux->clk);
+
+ return of_dma_router_register(node, stm32_dmamux_route_allocate,
+ &stm32_dmamux->dmarouter);
+}
+
+static const struct of_device_id stm32_dmamux_match[] = {
+ { .compatible = "st,stm32h7-dmamux" },
+ {},
+};
+
+static struct platform_driver stm32_dmamux_driver = {
+ .probe = stm32_dmamux_probe,
+ .driver = {
+ .name = "stm32-dmamux",
+ .of_match_table = stm32_dmamux_match,
+ },
+};
+
+static int __init stm32_dmamux_init(void)
+{
+ return platform_driver_register(&stm32_dmamux_driver);
+}
+arch_initcall(stm32_dmamux_init);
+
+MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
+MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
+MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
new file mode 100644
index 000000000000..daa1602eb9f5
--- /dev/null
+++ b/drivers/dma/stm32-mdma.c
@@ -0,0 +1,1682 @@
+/*
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * Driver for STM32 MDMA controller
+ *
+ * Inspired by stm32-dma.c and dma-jz4780.c
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "virt-dma.h"
+
+/* MDMA Generic getter/setter */
+#define STM32_MDMA_SHIFT(n) (ffs(n) - 1)
+#define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \
+ (mask))
+#define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \
+ STM32_MDMA_SHIFT(mask))
+
+#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
+#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
+
+/* MDMA Channel x interrupt/status register */
+#define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
+#define STM32_MDMA_CISR_CRQA BIT(16)
+#define STM32_MDMA_CISR_TCIF BIT(4)
+#define STM32_MDMA_CISR_BTIF BIT(3)
+#define STM32_MDMA_CISR_BRTIF BIT(2)
+#define STM32_MDMA_CISR_CTCIF BIT(1)
+#define STM32_MDMA_CISR_TEIF BIT(0)
+
+/* MDMA Channel x interrupt flag clear register */
+#define STM32_MDMA_CIFCR(x) (0x44 + 0x40 * (x))
+#define STM32_MDMA_CIFCR_CLTCIF BIT(4)
+#define STM32_MDMA_CIFCR_CBTIF BIT(3)
+#define STM32_MDMA_CIFCR_CBRTIF BIT(2)
+#define STM32_MDMA_CIFCR_CCTCIF BIT(1)
+#define STM32_MDMA_CIFCR_CTEIF BIT(0)
+#define STM32_MDMA_CIFCR_CLEAR_ALL (STM32_MDMA_CIFCR_CLTCIF \
+ | STM32_MDMA_CIFCR_CBTIF \
+ | STM32_MDMA_CIFCR_CBRTIF \
+ | STM32_MDMA_CIFCR_CCTCIF \
+ | STM32_MDMA_CIFCR_CTEIF)
+
+/* MDMA Channel x error status register */
+#define STM32_MDMA_CESR(x) (0x48 + 0x40 * (x))
+#define STM32_MDMA_CESR_BSE BIT(11)
+#define STM32_MDMA_CESR_ASR BIT(10)
+#define STM32_MDMA_CESR_TEMD BIT(9)
+#define STM32_MDMA_CESR_TELD BIT(8)
+#define STM32_MDMA_CESR_TED BIT(7)
+#define STM32_MDMA_CESR_TEA_MASK GENMASK(6, 0)
+
+/* MDMA Channel x control register */
+#define STM32_MDMA_CCR(x) (0x4C + 0x40 * (x))
+#define STM32_MDMA_CCR_SWRQ BIT(16)
+#define STM32_MDMA_CCR_WEX BIT(14)
+#define STM32_MDMA_CCR_HEX BIT(13)
+#define STM32_MDMA_CCR_BEX BIT(12)
+#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
+#define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CCR_PL_MASK)
+#define STM32_MDMA_CCR_TCIE BIT(5)
+#define STM32_MDMA_CCR_BTIE BIT(4)
+#define STM32_MDMA_CCR_BRTIE BIT(3)
+#define STM32_MDMA_CCR_CTCIE BIT(2)
+#define STM32_MDMA_CCR_TEIE BIT(1)
+#define STM32_MDMA_CCR_EN BIT(0)
+#define STM32_MDMA_CCR_IRQ_MASK (STM32_MDMA_CCR_TCIE \
+ | STM32_MDMA_CCR_BTIE \
+ | STM32_MDMA_CCR_BRTIE \
+ | STM32_MDMA_CCR_CTCIE \
+ | STM32_MDMA_CCR_TEIE)
+
+/* MDMA Channel x transfer configuration register */
+#define STM32_MDMA_CTCR(x) (0x50 + 0x40 * (x))
+#define STM32_MDMA_CTCR_BWM BIT(31)
+#define STM32_MDMA_CTCR_SWRM BIT(30)
+#define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28)
+#define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_TRGM_MSK)
+#define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \
+ STM32_MDMA_CTCR_TRGM_MSK)
+#define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26)
+#define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_PAM_MASK)
+#define STM32_MDMA_CTCR_PKE BIT(25)
+#define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18)
+#define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_TLEN_MSK)
+#define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \
+ STM32_MDMA_CTCR_TLEN_MSK)
+#define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18)
+#define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_LEN2_MSK)
+#define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \
+ STM32_MDMA_CTCR_LEN2_MSK)
+#define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15)
+#define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_DBURST_MASK)
+#define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12)
+#define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_SBURST_MASK)
+#define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10)
+#define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_DINCOS_MASK)
+#define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8)
+#define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_SINCOS_MASK)
+#define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6)
+#define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_DSIZE_MASK)
+#define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4)
+#define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTCR_SSIZE_MASK)
+#define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2)
+#define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_DINC_MASK)
+#define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0)
+#define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \
+ STM32_MDMA_CTCR_SINC_MASK)
+#define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \
+ | STM32_MDMA_CTCR_DINC_MASK \
+ | STM32_MDMA_CTCR_SINCOS_MASK \
+ | STM32_MDMA_CTCR_DINCOS_MASK \
+ | STM32_MDMA_CTCR_LEN2_MSK \
+ | STM32_MDMA_CTCR_TRGM_MSK)
+
+/* MDMA Channel x block number of data register */
+#define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x))
+#define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20)
+#define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CBNDTR_BRC_MK)
+#define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \
+ STM32_MDMA_CBNDTR_BRC_MK)
+
+#define STM32_MDMA_CBNDTR_BRDUM BIT(19)
+#define STM32_MDMA_CBNDTR_BRSUM BIT(18)
+#define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0)
+#define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CBNDTR_BNDT_MASK)
+
+/* MDMA Channel x source address register */
+#define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x))
+
+/* MDMA Channel x destination address register */
+#define STM32_MDMA_CDAR(x) (0x5C + 0x40 * (x))
+
+/* MDMA Channel x block repeat address update register */
+#define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x))
+#define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16)
+#define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CBRUR_DUV_MASK)
+#define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0)
+#define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CBRUR_SUV_MASK)
+
+/* MDMA Channel x link address register */
+#define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x))
+
+/* MDMA Channel x trigger and bus selection register */
+#define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x))
+#define STM32_MDMA_CTBR_DBUS BIT(17)
+#define STM32_MDMA_CTBR_SBUS BIT(16)
+#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0)
+#define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \
+ STM32_MDMA_CTBR_TSEL_MASK)
+
+/* MDMA Channel x mask address register */
+#define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x))
+
+/* MDMA Channel x mask data register */
+#define STM32_MDMA_CMDR(x) (0x74 + 0x40 * (x))
+
+#define STM32_MDMA_MAX_BUF_LEN 128
+#define STM32_MDMA_MAX_BLOCK_LEN 65536
+#define STM32_MDMA_MAX_CHANNELS 63
+#define STM32_MDMA_MAX_REQUESTS 256
+#define STM32_MDMA_MAX_BURST 128
+#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
+
+enum stm32_mdma_trigger_mode {
+ STM32_MDMA_BUFFER,
+ STM32_MDMA_BLOCK,
+ STM32_MDMA_BLOCK_REP,
+ STM32_MDMA_LINKED_LIST,
+};
+
+enum stm32_mdma_width {
+ STM32_MDMA_BYTE,
+ STM32_MDMA_HALF_WORD,
+ STM32_MDMA_WORD,
+ STM32_MDMA_DOUBLE_WORD,
+};
+
+enum stm32_mdma_inc_mode {
+ STM32_MDMA_FIXED = 0,
+ STM32_MDMA_INC = 2,
+ STM32_MDMA_DEC = 3,
+};
+
+struct stm32_mdma_chan_config {
+ u32 request;
+ u32 priority_level;
+ u32 transfer_config;
+ u32 mask_addr;
+ u32 mask_data;
+};
+
+struct stm32_mdma_hwdesc {
+ u32 ctcr;
+ u32 cbndtr;
+ u32 csar;
+ u32 cdar;
+ u32 cbrur;
+ u32 clar;
+ u32 ctbr;
+ u32 dummy;
+ u32 cmar;
+ u32 cmdr;
+} __aligned(64);
+
+struct stm32_mdma_desc {
+ struct virt_dma_desc vdesc;
+ u32 ccr;
+ struct stm32_mdma_hwdesc *hwdesc;
+ dma_addr_t hwdesc_phys;
+ bool cyclic;
+ u32 count;
+};
+
+struct stm32_mdma_chan {
+ struct virt_dma_chan vchan;
+ struct dma_pool *desc_pool;
+ u32 id;
+ struct stm32_mdma_desc *desc;
+ u32 curr_hwdesc;
+ struct dma_slave_config dma_config;
+ struct stm32_mdma_chan_config chan_config;
+ bool busy;
+ u32 mem_burst;
+ u32 mem_width;
+};
+
+struct stm32_mdma_device {
+ struct dma_device ddev;
+ void __iomem *base;
+ struct clk *clk;
+ int irq;
+ struct reset_control *rst;
+ u32 nr_channels;
+ u32 nr_requests;
+ u32 nr_ahb_addr_masks;
+ struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
+ u32 ahb_addr_masks[];
+};
+
+static struct stm32_mdma_device *stm32_mdma_get_dev(
+ struct stm32_mdma_chan *chan)
+{
+ return container_of(chan->vchan.chan.device, struct stm32_mdma_device,
+ ddev);
+}
+
+static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct stm32_mdma_chan, vchan.chan);
+}
+
+static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct stm32_mdma_desc, vdesc);
+}
+
+static struct device *chan2dev(struct stm32_mdma_chan *chan)
+{
+ return &chan->vchan.chan.dev->device;
+}
+
+static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev)
+{
+ return mdma_dev->ddev.dev;
+}
+
+static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg)
+{
+ return readl_relaxed(dmadev->base + reg);
+}
+
+static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val)
+{
+ writel_relaxed(val, dmadev->base + reg);
+}
+
+static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg,
+ u32 mask)
+{
+ void __iomem *addr = dmadev->base + reg;
+
+ writel_relaxed(readl_relaxed(addr) | mask, addr);
+}
+
+static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg,
+ u32 mask)
+{
+ void __iomem *addr = dmadev->base + reg;
+
+ writel_relaxed(readl_relaxed(addr) & ~mask, addr);
+}
+
+static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
+ struct stm32_mdma_chan *chan, u32 count)
+{
+ struct stm32_mdma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->hwdesc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
+ &desc->hwdesc_phys);
+ if (!desc->hwdesc) {
+ dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
+ kfree(desc);
+ return NULL;
+ }
+
+ desc->count = count;
+
+ return desc;
+}
+
+static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
+
+ dma_pool_free(chan->desc_pool, desc->hwdesc, desc->hwdesc_phys);
+ kfree(desc);
+}
+
+static int stm32_mdma_get_width(struct stm32_mdma_chan *chan,
+ enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ return ffs(width) - 1;
+ default:
+ dev_err(chan2dev(chan), "Dma bus width %i not supported\n",
+ width);
+ return -EINVAL;
+ }
+}
+
+static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
+ u32 buf_len, u32 tlen)
+{
+ enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+
+ for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ max_width > DMA_SLAVE_BUSWIDTH_1_BYTE;
+ max_width >>= 1) {
+ /*
+ * Address and buffer length both have to be aligned on
+ * bus width
+ */
+ if ((((buf_len | addr) & (max_width - 1)) == 0) &&
+ tlen >= max_width)
+ break;
+ }
+
+ return max_width;
+}
+
+static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
+ enum dma_slave_buswidth width)
+{
+ u32 best_burst = max_burst;
+ u32 burst_len = best_burst * width;
+
+ while ((burst_len > 0) && (tlen % burst_len)) {
+ best_burst = best_burst >> 1;
+ burst_len = best_burst * width;
+ }
+
+ return (best_burst > 0) ? best_burst : 1;
+}
+
+static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ u32 ccr, cisr, id, reg;
+ int ret;
+
+ id = chan->id;
+ reg = STM32_MDMA_CCR(id);
+
+ /* Disable interrupts */
+ stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK);
+
+ ccr = stm32_mdma_read(dmadev, reg);
+ if (ccr & STM32_MDMA_CCR_EN) {
+ stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN);
+
+ /* Ensure that any ongoing transfer has been completed */
+ ret = readl_relaxed_poll_timeout_atomic(
+ dmadev->base + STM32_MDMA_CISR(id), cisr,
+ (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000);
+ if (ret) {
+ dev_err(chan2dev(chan), "%s: timeout!\n", __func__);
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void stm32_mdma_stop(struct stm32_mdma_chan *chan)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ u32 status;
+ int ret;
+
+ /* Disable DMA */
+ ret = stm32_mdma_disable_chan(chan);
+ if (ret < 0)
+ return;
+
+ /* Clear interrupt status if it is there */
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+ if (status) {
+ dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
+ __func__, status);
+ stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
+ }
+
+ chan->busy = false;
+}
+
+static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr,
+ u32 ctbr_mask, u32 src_addr)
+{
+ u32 mask;
+ int i;
+
+ /* Check if memory device is on AHB or AXI */
+ *ctbr &= ~ctbr_mask;
+ mask = src_addr & 0xF0000000;
+ for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) {
+ if (mask == dmadev->ahb_addr_masks[i]) {
+ *ctbr |= ctbr_mask;
+ break;
+ }
+ }
+}
+
+static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
+ enum dma_transfer_direction direction,
+ u32 *mdma_ccr, u32 *mdma_ctcr,
+ u32 *mdma_ctbr, dma_addr_t addr,
+ u32 buf_len)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
+ enum dma_slave_buswidth src_addr_width, dst_addr_width;
+ phys_addr_t src_addr, dst_addr;
+ int src_bus_width, dst_bus_width;
+ u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
+ u32 ccr, ctcr, ctbr, tlen;
+
+ src_addr_width = chan->dma_config.src_addr_width;
+ dst_addr_width = chan->dma_config.dst_addr_width;
+ src_maxburst = chan->dma_config.src_maxburst;
+ dst_maxburst = chan->dma_config.dst_maxburst;
+
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+
+ /* Enable HW request mode */
+ ctcr &= ~STM32_MDMA_CTCR_SWRM;
+
+ /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */
+ ctcr &= ~STM32_MDMA_CTCR_CFG_MASK;
+ ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK;
+
+ /*
+ * For buffer transfer length (TLEN) we have to set
+ * the number of bytes - 1 in CTCR register
+ */
+ tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr);
+ ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK;
+ ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
+
+ /* Disable Pack Enable */
+ ctcr &= ~STM32_MDMA_CTCR_PKE;
+
+ /* Check burst size constraints */
+ if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST ||
+ dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) {
+ dev_err(chan2dev(chan),
+ "burst size * bus width higher than %d bytes\n",
+ STM32_MDMA_MAX_BURST);
+ return -EINVAL;
+ }
+
+ if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) ||
+ (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) {
+ dev_err(chan2dev(chan), "burst size must be a power of 2\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Configure channel control:
+ * - Clear SW request as in this case this is a HW one
+ * - Clear WEX, HEX and BEX bits
+ * - Set priority level
+ */
+ ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
+ STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK);
+ ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level);
+
+ /* Configure Trigger selection */
+ ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
+ ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ dst_addr = chan->dma_config.dst_addr;
+
+ /* Set device data size */
+ dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
+ if (dst_bus_width < 0)
+ return dst_bus_width;
+ ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
+ ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
+
+ /* Set device burst value */
+ dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+ dst_maxburst,
+ dst_addr_width);
+ chan->mem_burst = dst_best_burst;
+ ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
+ ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
+
+ /* Set memory data size */
+ src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
+ chan->mem_width = src_addr_width;
+ src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
+ if (src_bus_width < 0)
+ return src_bus_width;
+ ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK |
+ STM32_MDMA_CTCR_SINCOS_MASK;
+ ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) |
+ STM32_MDMA_CTCR_SINCOS(src_bus_width);
+
+ /* Set memory burst value */
+ src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
+ src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+ src_maxburst,
+ src_addr_width);
+ chan->mem_burst = src_best_burst;
+ ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
+ ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
+
+ /* Select bus */
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
+ dst_addr);
+
+ if (dst_bus_width != src_bus_width)
+ ctcr |= STM32_MDMA_CTCR_PKE;
+
+ /* Set destination address */
+ stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr);
+ break;
+
+ case DMA_DEV_TO_MEM:
+ src_addr = chan->dma_config.src_addr;
+
+ /* Set device data size */
+ src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
+ if (src_bus_width < 0)
+ return src_bus_width;
+ ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
+ ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
+
+ /* Set device burst value */
+ src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+ src_maxburst,
+ src_addr_width);
+ ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
+ ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
+
+ /* Set memory data size */
+ dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
+ chan->mem_width = dst_addr_width;
+ dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
+ if (dst_bus_width < 0)
+ return dst_bus_width;
+ ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK |
+ STM32_MDMA_CTCR_DINCOS_MASK);
+ ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
+ STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+
+ /* Set memory burst value */
+ dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
+ dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
+ dst_maxburst,
+ dst_addr_width);
+ ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
+ ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
+
+ /* Select bus */
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
+ src_addr);
+
+ if (dst_bus_width != src_bus_width)
+ ctcr |= STM32_MDMA_CTCR_PKE;
+
+ /* Set source address */
+ stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr);
+ break;
+
+ default:
+ dev_err(chan2dev(chan), "Dma direction is not supported\n");
+ return -EINVAL;
+ }
+
+ *mdma_ccr = ccr;
+ *mdma_ctcr = ctcr;
+ *mdma_ctbr = ctbr;
+
+ return 0;
+}
+
+static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
+ struct stm32_mdma_hwdesc *hwdesc)
+{
+ dev_dbg(chan2dev(chan), "hwdesc: 0x%p\n", hwdesc);
+ dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", hwdesc->ctcr);
+ dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", hwdesc->cbndtr);
+ dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", hwdesc->csar);
+ dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", hwdesc->cdar);
+ dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", hwdesc->cbrur);
+ dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", hwdesc->clar);
+ dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", hwdesc->ctbr);
+ dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", hwdesc->cmar);
+ dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", hwdesc->cmdr);
+}
+
+static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
+ struct stm32_mdma_desc *desc,
+ enum dma_transfer_direction dir, u32 count,
+ dma_addr_t src_addr, dma_addr_t dst_addr,
+ u32 len, u32 ctcr, u32 ctbr, bool is_last,
+ bool is_first, bool is_cyclic)
+{
+ struct stm32_mdma_chan_config *config = &chan->chan_config;
+ struct stm32_mdma_hwdesc *hwdesc;
+ u32 next = count + 1;
+
+ hwdesc = &desc->hwdesc[count];
+ hwdesc->ctcr = ctcr;
+ hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
+ STM32_MDMA_CBNDTR_BRDUM |
+ STM32_MDMA_CBNDTR_BRSUM |
+ STM32_MDMA_CBNDTR_BNDT_MASK);
+ hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
+ hwdesc->csar = src_addr;
+ hwdesc->cdar = dst_addr;
+ hwdesc->cbrur = 0;
+ hwdesc->clar = desc->hwdesc_phys + next * sizeof(*hwdesc);
+ hwdesc->ctbr = ctbr;
+ hwdesc->cmar = config->mask_addr;
+ hwdesc->cmdr = config->mask_data;
+
+ if (is_last) {
+ if (is_cyclic)
+ hwdesc->clar = desc->hwdesc_phys;
+ else
+ hwdesc->clar = 0;
+ }
+
+ stm32_mdma_dump_hwdesc(chan, hwdesc);
+}
+
+static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
+ struct stm32_mdma_desc *desc,
+ struct scatterlist *sgl, u32 sg_len,
+ enum dma_transfer_direction direction)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct dma_slave_config *dma_config = &chan->dma_config;
+ struct scatterlist *sg;
+ dma_addr_t src_addr, dst_addr;
+ u32 ccr, ctcr, ctbr;
+ int i, ret = 0;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
+ dev_err(chan2dev(chan), "Invalid block len\n");
+ return -EINVAL;
+ }
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = sg_dma_address(sg);
+ dst_addr = dma_config->dst_addr;
+ ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
+ &ctcr, &ctbr, src_addr,
+ sg_dma_len(sg));
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
+ src_addr);
+ } else {
+ src_addr = dma_config->src_addr;
+ dst_addr = sg_dma_address(sg);
+ ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
+ &ctcr, &ctbr, dst_addr,
+ sg_dma_len(sg));
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
+ dst_addr);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
+ dst_addr, sg_dma_len(sg), ctcr, ctbr,
+ i == sg_len - 1, i == 0, false);
+ }
+
+ /* Enable interrupts */
+ ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
+ ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
+ if (sg_len > 1)
+ ccr |= STM32_MDMA_CCR_BTIE;
+ desc->ccr = ccr;
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *
+stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
+ u32 sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_desc *desc;
+ int ret;
+
+ /*
+ * Once DMA is in setup cyclic mode the channel we cannot assign this
+ * channel anymore. The DMA channel needs to be aborted or terminated
+ * for allowing another request.
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ dev_err(chan2dev(chan),
+ "Request not allowed when dma in cyclic mode\n");
+ return NULL;
+ }
+
+ desc = stm32_mdma_alloc_desc(chan, sg_len);
+ if (!desc)
+ return NULL;
+
+ ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction);
+ if (ret < 0)
+ goto xfer_setup_err;
+
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+xfer_setup_err:
+ dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys);
+ kfree(desc);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct dma_slave_config *dma_config = &chan->dma_config;
+ struct stm32_mdma_desc *desc;
+ dma_addr_t src_addr, dst_addr;
+ u32 ccr, ctcr, ctbr, count;
+ int i, ret;
+
+ /*
+ * Once DMA is in setup cyclic mode the channel we cannot assign this
+ * channel anymore. The DMA channel needs to be aborted or terminated
+ * for allowing another request.
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ dev_err(chan2dev(chan),
+ "Request not allowed when dma in cyclic mode\n");
+ return NULL;
+ }
+
+ if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) {
+ dev_err(chan2dev(chan), "Invalid buffer/period len\n");
+ return NULL;
+ }
+
+ if (buf_len % period_len) {
+ dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
+ return NULL;
+ }
+
+ count = buf_len / period_len;
+
+ desc = stm32_mdma_alloc_desc(chan, count);
+ if (!desc)
+ return NULL;
+
+ /* Select bus */
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = buf_addr;
+ ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
+ &ctbr, src_addr, period_len);
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
+ src_addr);
+ } else {
+ dst_addr = buf_addr;
+ ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
+ &ctbr, dst_addr, period_len);
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
+ dst_addr);
+ }
+
+ if (ret < 0)
+ goto xfer_setup_err;
+
+ /* Enable interrupts */
+ ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
+ ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE;
+ desc->ccr = ccr;
+
+ /* Configure hwdesc list */
+ for (i = 0; i < count; i++) {
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = buf_addr + i * period_len;
+ dst_addr = dma_config->dst_addr;
+ } else {
+ src_addr = dma_config->src_addr;
+ dst_addr = buf_addr + i * period_len;
+ }
+
+ stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
+ dst_addr, period_len, ctcr, ctbr,
+ i == count - 1, i == 0, true);
+ }
+
+ desc->cyclic = true;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+xfer_setup_err:
+ dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys);
+ kfree(desc);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ enum dma_slave_buswidth max_width;
+ struct stm32_mdma_desc *desc;
+ struct stm32_mdma_hwdesc *hwdesc;
+ u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst;
+ u32 best_burst, tlen;
+ size_t xfer_count, offset;
+ int src_bus_width, dst_bus_width;
+ int i;
+
+ /*
+ * Once DMA is in setup cyclic mode the channel we cannot assign this
+ * channel anymore. The DMA channel needs to be aborted or terminated
+ * to allow another request
+ */
+ if (chan->desc && chan->desc->cyclic) {
+ dev_err(chan2dev(chan),
+ "Request not allowed when dma in cyclic mode\n");
+ return NULL;
+ }
+
+ count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN);
+ desc = stm32_mdma_alloc_desc(chan, count);
+ if (!desc)
+ return NULL;
+
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
+ ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
+ cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+
+ /* Enable sw req, some interrupts and clear other bits */
+ ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
+ STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK |
+ STM32_MDMA_CCR_IRQ_MASK);
+ ccr |= STM32_MDMA_CCR_TEIE;
+
+ /* Enable SW request mode, dest/src inc and clear other bits */
+ ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK |
+ STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE |
+ STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK |
+ STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK |
+ STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK |
+ STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK |
+ STM32_MDMA_CTCR_SINC_MASK);
+ ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) |
+ STM32_MDMA_CTCR_DINC(STM32_MDMA_INC);
+
+ /* Reset HW request */
+ ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
+
+ /* Select bus */
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src);
+ stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest);
+
+ /* Clear CBNDTR registers */
+ cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM |
+ STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK);
+
+ if (len <= STM32_MDMA_MAX_BLOCK_LEN) {
+ cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
+ if (len <= STM32_MDMA_MAX_BUF_LEN) {
+ /* Setup a buffer transfer */
+ ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE;
+ ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER);
+ } else {
+ /* Setup a block transfer */
+ ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
+ ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK);
+ }
+
+ tlen = STM32_MDMA_MAX_BUF_LEN;
+ ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
+
+ /* Set source best burst size */
+ max_width = stm32_mdma_get_max_width(src, len, tlen);
+ src_bus_width = stm32_mdma_get_width(chan, max_width);
+
+ max_burst = tlen / max_width;
+ best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
+ max_width);
+ mdma_burst = ilog2(best_burst);
+
+ ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
+ STM32_MDMA_CTCR_SSIZE(src_bus_width) |
+ STM32_MDMA_CTCR_SINCOS(src_bus_width);
+
+ /* Set destination best burst size */
+ max_width = stm32_mdma_get_max_width(dest, len, tlen);
+ dst_bus_width = stm32_mdma_get_width(chan, max_width);
+
+ max_burst = tlen / max_width;
+ best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
+ max_width);
+ mdma_burst = ilog2(best_burst);
+
+ ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
+ STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
+ STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+
+ if (dst_bus_width != src_bus_width)
+ ctcr |= STM32_MDMA_CTCR_PKE;
+
+ /* Prepare hardware descriptor */
+ hwdesc = desc->hwdesc;
+ hwdesc->ctcr = ctcr;
+ hwdesc->cbndtr = cbndtr;
+ hwdesc->csar = src;
+ hwdesc->cdar = dest;
+ hwdesc->cbrur = 0;
+ hwdesc->clar = 0;
+ hwdesc->ctbr = ctbr;
+ hwdesc->cmar = 0;
+ hwdesc->cmdr = 0;
+
+ stm32_mdma_dump_hwdesc(chan, hwdesc);
+ } else {
+ /* Setup a LLI transfer */
+ ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
+ STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1));
+ ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
+ tlen = STM32_MDMA_MAX_BUF_LEN;
+
+ for (i = 0, offset = 0; offset < len;
+ i++, offset += xfer_count) {
+ xfer_count = min_t(size_t, len - offset,
+ STM32_MDMA_MAX_BLOCK_LEN);
+
+ /* Set source best burst size */
+ max_width = stm32_mdma_get_max_width(src, len, tlen);
+ src_bus_width = stm32_mdma_get_width(chan, max_width);
+
+ max_burst = tlen / max_width;
+ best_burst = stm32_mdma_get_best_burst(len, tlen,
+ max_burst,
+ max_width);
+ mdma_burst = ilog2(best_burst);
+
+ ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
+ STM32_MDMA_CTCR_SSIZE(src_bus_width) |
+ STM32_MDMA_CTCR_SINCOS(src_bus_width);
+
+ /* Set destination best burst size */
+ max_width = stm32_mdma_get_max_width(dest, len, tlen);
+ dst_bus_width = stm32_mdma_get_width(chan, max_width);
+
+ max_burst = tlen / max_width;
+ best_burst = stm32_mdma_get_best_burst(len, tlen,
+ max_burst,
+ max_width);
+ mdma_burst = ilog2(best_burst);
+
+ ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
+ STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
+ STM32_MDMA_CTCR_DINCOS(dst_bus_width);
+
+ if (dst_bus_width != src_bus_width)
+ ctcr |= STM32_MDMA_CTCR_PKE;
+
+ /* Prepare hardware descriptor */
+ stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i,
+ src + offset, dest + offset,
+ xfer_count, ctcr, ctbr,
+ i == count - 1, i == 0, false);
+ }
+ }
+
+ desc->ccr = ccr;
+
+ desc->cyclic = false;
+
+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+
+ dev_dbg(chan2dev(chan), "CCR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)));
+ dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)));
+ dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)));
+ dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id)));
+ dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id)));
+ dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id)));
+ dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id)));
+ dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)));
+ dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id)));
+ dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n",
+ stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id)));
+}
+
+static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct virt_dma_desc *vdesc;
+ struct stm32_mdma_hwdesc *hwdesc;
+ u32 id = chan->id;
+ u32 status, reg;
+
+ vdesc = vchan_next_desc(&chan->vchan);
+ if (!vdesc) {
+ chan->desc = NULL;
+ return;
+ }
+
+ chan->desc = to_stm32_mdma_desc(vdesc);
+ hwdesc = chan->desc->hwdesc;
+ chan->curr_hwdesc = 0;
+
+ stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
+ stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr);
+ stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr);
+ stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar);
+ stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar);
+ stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur);
+ stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar);
+ stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr);
+ stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar);
+ stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr);
+
+ /* Clear interrupt status if it is there */
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
+ if (status)
+ stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status);
+
+ stm32_mdma_dump_reg(chan);
+
+ /* Start DMA */
+ stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN);
+
+ /* Set SW request in case of MEM2MEM transfer */
+ if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) {
+ reg = STM32_MDMA_CCR(id);
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
+ }
+
+ chan->busy = true;
+
+ dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
+}
+
+static void stm32_mdma_issue_pending(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ if (!vchan_issue_pending(&chan->vchan))
+ goto end;
+
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
+
+ if (!chan->desc && !chan->busy)
+ stm32_mdma_start_transfer(chan);
+
+end:
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static int stm32_mdma_pause(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ ret = stm32_mdma_disable_chan(chan);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ if (!ret)
+ dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan);
+
+ return ret;
+}
+
+static int stm32_mdma_resume(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ struct stm32_mdma_hwdesc *hwdesc;
+ unsigned long flags;
+ u32 status, reg;
+
+ hwdesc = &chan->desc->hwdesc[chan->curr_hwdesc];
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ /* Re-configure control register */
+ stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr);
+
+ /* Clear interrupt status if it is there */
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+ if (status)
+ stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
+
+ stm32_mdma_dump_reg(chan);
+
+ /* Re-start DMA */
+ reg = STM32_MDMA_CCR(chan->id);
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN);
+
+ /* Set SW request in case of MEM2MEM transfer */
+ if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM)
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan);
+
+ return 0;
+}
+
+static int stm32_mdma_terminate_all(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ if (chan->busy) {
+ stm32_mdma_stop(chan);
+ chan->desc = NULL;
+ }
+ vchan_get_all_descriptors(&chan->vchan, &head);
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&chan->vchan, &head);
+
+ return 0;
+}
+
+static void stm32_mdma_synchronize(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+
+ vchan_synchronize(&chan->vchan);
+}
+
+static int stm32_mdma_slave_config(struct dma_chan *c,
+ struct dma_slave_config *config)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+
+ memcpy(&chan->dma_config, config, sizeof(*config));
+
+ return 0;
+}
+
+static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
+ struct stm32_mdma_desc *desc,
+ u32 curr_hwdesc)
+{
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ u32 cbndtr, residue, modulo, burst_size;
+ int i;
+
+ residue = 0;
+ for (i = curr_hwdesc + 1; i < desc->count; i++) {
+ struct stm32_mdma_hwdesc *hwdesc = &desc->hwdesc[i];
+
+ residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
+ }
+ cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
+ residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+
+ if (!chan->mem_burst)
+ return residue;
+
+ burst_size = chan->mem_burst * chan->mem_width;
+ modulo = residue % burst_size;
+ if (modulo)
+ residue = residue - modulo + burst_size;
+
+ return residue;
+}
+
+static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+ u32 residue = 0;
+
+ status = dma_cookie_status(c, cookie, state);
+ if ((status == DMA_COMPLETE) || (!state))
+ return status;
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+ vdesc = vchan_find_desc(&chan->vchan, cookie);
+ if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
+ residue = stm32_mdma_desc_residue(chan, chan->desc,
+ chan->curr_hwdesc);
+ else if (vdesc)
+ residue = stm32_mdma_desc_residue(chan,
+ to_stm32_mdma_desc(vdesc), 0);
+ dma_set_residue(state, residue);
+
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+ return status;
+}
+
+static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
+{
+ list_del(&chan->desc->vdesc.node);
+ vchan_cookie_complete(&chan->desc->vdesc);
+ chan->desc = NULL;
+ chan->busy = false;
+
+ /* Start the next transfer if this driver has a next desc */
+ stm32_mdma_start_transfer(chan);
+}
+
+static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
+{
+ struct stm32_mdma_device *dmadev = devid;
+ struct stm32_mdma_chan *chan = devid;
+ u32 reg, id, ien, status, flag;
+
+ /* Find out which channel generates the interrupt */
+ status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
+ if (status) {
+ id = __ffs(status);
+ } else {
+ status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
+ if (!status) {
+ dev_dbg(mdma2dev(dmadev), "spurious it\n");
+ return IRQ_NONE;
+ }
+ id = __ffs(status);
+ /*
+ * As GISR0 provides status for channel id from 0 to 31,
+ * so GISR1 provides status for channel id from 32 to 62
+ */
+ id += 32;
+ }
+
+ chan = &dmadev->chan[id];
+ if (!chan) {
+ dev_err(chan2dev(chan), "MDMA channel not initialized\n");
+ goto exit;
+ }
+
+ /* Handle interrupt for the channel */
+ spin_lock(&chan->vchan.lock);
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+ ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
+ ien &= STM32_MDMA_CCR_IRQ_MASK;
+ ien >>= 1;
+
+ if (!(status & ien)) {
+ spin_unlock(&chan->vchan.lock);
+ dev_dbg(chan2dev(chan),
+ "spurious it (status=0x%04x, ien=0x%04x)\n",
+ status, ien);
+ return IRQ_NONE;
+ }
+
+ flag = __ffs(status & ien);
+ reg = STM32_MDMA_CIFCR(chan->id);
+
+ switch (1 << flag) {
+ case STM32_MDMA_CISR_TEIF:
+ id = chan->id;
+ status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id));
+ dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status);
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
+ break;
+
+ case STM32_MDMA_CISR_CTCIF:
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
+ stm32_mdma_xfer_end(chan);
+ break;
+
+ case STM32_MDMA_CISR_BRTIF:
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
+ break;
+
+ case STM32_MDMA_CISR_BTIF:
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
+ chan->curr_hwdesc++;
+ if (chan->desc && chan->desc->cyclic) {
+ if (chan->curr_hwdesc == chan->desc->count)
+ chan->curr_hwdesc = 0;
+ vchan_cyclic_callback(&chan->desc->vdesc);
+ }
+ break;
+
+ case STM32_MDMA_CISR_TCIF:
+ stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
+ break;
+
+ default:
+ dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n",
+ 1 << flag, status);
+ }
+
+ spin_unlock(&chan->vchan.lock);
+
+exit:
+ return IRQ_HANDLED;
+}
+
+static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ int ret;
+
+ chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device),
+ c->device->dev,
+ sizeof(struct stm32_mdma_hwdesc),
+ __alignof__(struct stm32_mdma_hwdesc),
+ 0);
+ if (!chan->desc_pool) {
+ dev_err(chan2dev(chan), "failed to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ ret = clk_prepare_enable(dmadev->clk);
+ if (ret < 0) {
+ dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm32_mdma_disable_chan(chan);
+ if (ret < 0)
+ clk_disable_unprepare(dmadev->clk);
+
+ return ret;
+}
+
+static void stm32_mdma_free_chan_resources(struct dma_chan *c)
+{
+ struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
+ struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
+ unsigned long flags;
+
+ dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
+
+ if (chan->busy) {
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+ stm32_mdma_stop(chan);
+ chan->desc = NULL;
+ spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ }
+
+ clk_disable_unprepare(dmadev->clk);
+ vchan_free_chan_resources(to_virt_chan(c));
+ dmam_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
+ struct stm32_mdma_chan *chan;
+ struct dma_chan *c;
+ struct stm32_mdma_chan_config config;
+
+ if (dma_spec->args_count < 5) {
+ dev_err(mdma2dev(dmadev), "Bad number of args\n");
+ return NULL;
+ }
+
+ config.request = dma_spec->args[0];
+ config.priority_level = dma_spec->args[1];
+ config.transfer_config = dma_spec->args[2];
+ config.mask_addr = dma_spec->args[3];
+ config.mask_data = dma_spec->args[4];
+
+ if (config.request >= dmadev->nr_requests) {
+ dev_err(mdma2dev(dmadev), "Bad request line\n");
+ return NULL;
+ }
+
+ if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) {
+ dev_err(mdma2dev(dmadev), "Priority level not supported\n");
+ return NULL;
+ }
+
+ c = dma_get_any_slave_channel(&dmadev->ddev);
+ if (!c) {
+ dev_err(mdma2dev(dmadev), "No more channel avalaible\n");
+ return NULL;
+ }
+
+ chan = to_stm32_mdma_chan(c);
+ chan->chan_config = config;
+
+ return c;
+}
+
+static const struct of_device_id stm32_mdma_of_match[] = {
+ { .compatible = "st,stm32h7-mdma", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_mdma_of_match);
+
+static int stm32_mdma_probe(struct platform_device *pdev)
+{
+ struct stm32_mdma_chan *chan;
+ struct stm32_mdma_device *dmadev;
+ struct dma_device *dd;
+ struct device_node *of_node;
+ struct resource *res;
+ u32 nr_channels, nr_requests;
+ int i, count, ret;
+
+ of_node = pdev->dev.of_node;
+ if (!of_node)
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "dma-channels",
+ &nr_channels);
+ if (ret) {
+ nr_channels = STM32_MDMA_MAX_CHANNELS;
+ dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n",
+ nr_channels);
+ }
+
+ ret = device_property_read_u32(&pdev->dev, "dma-requests",
+ &nr_requests);
+ if (ret) {
+ nr_requests = STM32_MDMA_MAX_REQUESTS;
+ dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n",
+ nr_requests);
+ }
+
+ count = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+ NULL, 0);
+ if (count < 0)
+ count = 0;
+
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count,
+ GFP_KERNEL);
+ if (!dmadev)
+ return -ENOMEM;
+
+ dmadev->nr_channels = nr_channels;
+ dmadev->nr_requests = nr_requests;
+ device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+ dmadev->ahb_addr_masks,
+ count);
+ dmadev->nr_ahb_addr_masks = count;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dmadev->base))
+ return PTR_ERR(dmadev->base);
+
+ dmadev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dmadev->clk)) {
+ ret = PTR_ERR(dmadev->clk);
+ if (ret == -EPROBE_DEFER)
+ dev_info(&pdev->dev, "Missing controller clock\n");
+ return ret;
+ }
+
+ dmadev->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(dmadev->rst)) {
+ reset_control_assert(dmadev->rst);
+ udelay(2);
+ reset_control_deassert(dmadev->rst);
+ }
+
+ dd = &dmadev->ddev;
+ dma_cap_set(DMA_SLAVE, dd->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+ dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources;
+ dd->device_free_chan_resources = stm32_mdma_free_chan_resources;
+ dd->device_tx_status = stm32_mdma_tx_status;
+ dd->device_issue_pending = stm32_mdma_issue_pending;
+ dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg;
+ dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic;
+ dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy;
+ dd->device_config = stm32_mdma_slave_config;
+ dd->device_pause = stm32_mdma_pause;
+ dd->device_resume = stm32_mdma_resume;
+ dd->device_terminate_all = stm32_mdma_terminate_all;
+ dd->device_synchronize = stm32_mdma_synchronize;
+ dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
+ BIT(DMA_MEM_TO_MEM);
+ dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ dd->max_burst = STM32_MDMA_MAX_BURST;
+ dd->dev = &pdev->dev;
+ INIT_LIST_HEAD(&dd->channels);
+
+ for (i = 0; i < dmadev->nr_channels; i++) {
+ chan = &dmadev->chan[i];
+ chan->id = i;
+ chan->vchan.desc_free = stm32_mdma_desc_free;
+ vchan_init(&chan->vchan, dd);
+ }
+
+ dmadev->irq = platform_get_irq(pdev, 0);
+ if (dmadev->irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ return dmadev->irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
+ 0, dev_name(&pdev->dev), dmadev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = dma_async_device_register(dd);
+ if (ret)
+ return ret;
+
+ ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "STM32 MDMA DMA OF registration failed %d\n", ret);
+ goto err_unregister;
+ }
+
+ platform_set_drvdata(pdev, dmadev);
+
+ dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
+
+ return 0;
+
+err_unregister:
+ dma_async_device_unregister(dd);
+
+ return ret;
+}
+
+static struct platform_driver stm32_mdma_driver = {
+ .probe = stm32_mdma_probe,
+ .driver = {
+ .name = "stm32-mdma",
+ .of_match_table = stm32_mdma_of_match,
+ },
+};
+
+static int __init stm32_mdma_init(void)
+{
+ return platform_driver_register(&stm32_mdma_driver);
+}
+
+subsys_initcall(stm32_mdma_init);
+
+MODULE_DESCRIPTION("Driver for STM32 MDMA controller");
+MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
+MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index bcd496edc70f..0cd13f17fc11 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -42,12 +42,18 @@
#define DMA_STAT 0x30
+/* Offset between DMA_IRQ_EN and DMA_IRQ_STAT limits number of channels */
+#define DMA_MAX_CHANNELS (DMA_IRQ_CHAN_NR * 0x10 / 4)
+
/*
* sun8i specific registers
*/
#define SUN8I_DMA_GATE 0x20
#define SUN8I_DMA_GATE_ENABLE 0x4
+#define SUNXI_H3_SECURE_REG 0x20
+#define SUNXI_H3_DMA_GATE 0x28
+#define SUNXI_H3_DMA_GATE_ENABLE 0x4
/*
* Channels specific registers
*/
@@ -62,16 +68,19 @@
#define DMA_CHAN_LLI_ADDR 0x08
#define DMA_CHAN_CUR_CFG 0x0c
-#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f)
+#define DMA_CHAN_MAX_DRQ 0x1f
+#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ)
#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5)
#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5)
-#define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7)
+#define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7)
+#define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6)
#define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16)
#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16)
#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
-#define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16)
+#define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16)
+#define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16)
#define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
#define DMA_CHAN_CUR_SRC 0x10
@@ -90,6 +99,9 @@
#define NORMAL_WAIT 8
#define DRQ_SDRAM 1
+/* forward declaration */
+struct sun6i_dma_dev;
+
/*
* Hardware channels / ports representation
*
@@ -111,7 +123,12 @@ struct sun6i_dma_config {
* however these SoCs really have and need this bit, as seen in the
* BSP kernel source code.
*/
- bool gate_needed;
+ void (*clock_autogate_enable)(struct sun6i_dma_dev *);
+ void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst);
+ u32 src_burst_lengths;
+ u32 dst_burst_lengths;
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
};
/*
@@ -175,6 +192,9 @@ struct sun6i_dma_dev {
struct sun6i_pchan *pchans;
struct sun6i_vchan *vchans;
const struct sun6i_dma_config *cfg;
+ u32 num_pchans;
+ u32 num_vchans;
+ u32 max_request;
};
static struct device *chan2dev(struct dma_chan *chan)
@@ -251,8 +271,12 @@ static inline s8 convert_burst(u32 maxburst)
switch (maxburst) {
case 1:
return 0;
+ case 4:
+ return 1;
case 8:
return 2;
+ case 16:
+ return 3;
default:
return -EINVAL;
}
@@ -260,11 +284,29 @@ static inline s8 convert_burst(u32 maxburst)
static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width)
{
- if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) ||
- (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
- return -EINVAL;
+ return ilog2(addr_width);
+}
+
+static void sun6i_enable_clock_autogate_a23(struct sun6i_dma_dev *sdev)
+{
+ writel(SUN8I_DMA_GATE_ENABLE, sdev->base + SUN8I_DMA_GATE);
+}
+
+static void sun6i_enable_clock_autogate_h3(struct sun6i_dma_dev *sdev)
+{
+ writel(SUNXI_H3_DMA_GATE_ENABLE, sdev->base + SUNXI_H3_DMA_GATE);
+}
- return addr_width >> 1;
+static void sun6i_set_burst_length_a31(u32 *p_cfg, s8 src_burst, s8 dst_burst)
+{
+ *p_cfg |= DMA_CHAN_CFG_SRC_BURST_A31(src_burst) |
+ DMA_CHAN_CFG_DST_BURST_A31(dst_burst);
+}
+
+static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst)
+{
+ *p_cfg |= DMA_CHAN_CFG_SRC_BURST_H3(src_burst) |
+ DMA_CHAN_CFG_DST_BURST_H3(dst_burst);
}
static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan)
@@ -399,7 +441,6 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
static void sun6i_dma_tasklet(unsigned long data)
{
struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
- const struct sun6i_dma_config *cfg = sdev->cfg;
struct sun6i_vchan *vchan;
struct sun6i_pchan *pchan;
unsigned int pchan_alloc = 0;
@@ -427,7 +468,7 @@ static void sun6i_dma_tasklet(unsigned long data)
}
spin_lock_irq(&sdev->lock);
- for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
+ for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) {
pchan = &sdev->pchans[pchan_idx];
if (pchan->vchan || list_empty(&sdev->pending))
@@ -448,7 +489,7 @@ static void sun6i_dma_tasklet(unsigned long data)
}
spin_unlock_irq(&sdev->lock);
- for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
+ for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) {
if (!(pchan_alloc & BIT(pchan_idx)))
continue;
@@ -470,7 +511,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
int i, j, ret = IRQ_NONE;
u32 status;
- for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) {
+ for (i = 0; i < sdev->num_pchans / DMA_IRQ_CHAN_NR; i++) {
status = readl(sdev->base + DMA_IRQ_STAT(i));
if (!status)
continue;
@@ -510,47 +551,49 @@ static int set_config(struct sun6i_dma_dev *sdev,
enum dma_transfer_direction direction,
u32 *p_cfg)
{
+ enum dma_slave_buswidth src_addr_width, dst_addr_width;
+ u32 src_maxburst, dst_maxburst;
s8 src_width, dst_width, src_burst, dst_burst;
+ src_addr_width = sconfig->src_addr_width;
+ dst_addr_width = sconfig->dst_addr_width;
+ src_maxburst = sconfig->src_maxburst;
+ dst_maxburst = sconfig->dst_maxburst;
+
switch (direction) {
case DMA_MEM_TO_DEV:
- src_burst = convert_burst(sconfig->src_maxburst ?
- sconfig->src_maxburst : 8);
- src_width = convert_buswidth(sconfig->src_addr_width !=
- DMA_SLAVE_BUSWIDTH_UNDEFINED ?
- sconfig->src_addr_width :
- DMA_SLAVE_BUSWIDTH_4_BYTES);
- dst_burst = convert_burst(sconfig->dst_maxburst);
- dst_width = convert_buswidth(sconfig->dst_addr_width);
+ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ src_maxburst = src_maxburst ? src_maxburst : 8;
break;
case DMA_DEV_TO_MEM:
- src_burst = convert_burst(sconfig->src_maxburst);
- src_width = convert_buswidth(sconfig->src_addr_width);
- dst_burst = convert_burst(sconfig->dst_maxburst ?
- sconfig->dst_maxburst : 8);
- dst_width = convert_buswidth(sconfig->dst_addr_width !=
- DMA_SLAVE_BUSWIDTH_UNDEFINED ?
- sconfig->dst_addr_width :
- DMA_SLAVE_BUSWIDTH_4_BYTES);
+ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dst_maxburst = dst_maxburst ? dst_maxburst : 8;
break;
default:
return -EINVAL;
}
- if (src_burst < 0)
- return src_burst;
- if (src_width < 0)
- return src_width;
- if (dst_burst < 0)
- return dst_burst;
- if (dst_width < 0)
- return dst_width;
-
- *p_cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
- DMA_CHAN_CFG_SRC_WIDTH(src_width) |
- DMA_CHAN_CFG_DST_BURST(dst_burst) |
+ if (!(BIT(src_addr_width) & sdev->slave.src_addr_widths))
+ return -EINVAL;
+ if (!(BIT(dst_addr_width) & sdev->slave.dst_addr_widths))
+ return -EINVAL;
+ if (!(BIT(src_maxburst) & sdev->cfg->src_burst_lengths))
+ return -EINVAL;
+ if (!(BIT(dst_maxburst) & sdev->cfg->dst_burst_lengths))
+ return -EINVAL;
+
+ src_width = convert_buswidth(src_addr_width);
+ dst_width = convert_buswidth(dst_addr_width);
+ dst_burst = convert_burst(dst_maxburst);
+ src_burst = convert_burst(src_maxburst);
+
+ *p_cfg = DMA_CHAN_CFG_SRC_WIDTH(src_width) |
DMA_CHAN_CFG_DST_WIDTH(dst_width);
+ sdev->cfg->set_burst_length(p_cfg, src_burst, dst_burst);
+
return 0;
}
@@ -593,11 +636,11 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_DST_LINEAR_MODE |
DMA_CHAN_CFG_SRC_LINEAR_MODE |
- DMA_CHAN_CFG_SRC_BURST(burst) |
DMA_CHAN_CFG_SRC_WIDTH(width) |
- DMA_CHAN_CFG_DST_BURST(burst) |
DMA_CHAN_CFG_DST_WIDTH(width);
+ sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst);
+
sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
sun6i_dma_dump_lli(vchan, v_lli);
@@ -948,7 +991,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
struct dma_chan *chan;
u8 port = dma_spec->args[0];
- if (port > sdev->cfg->nr_max_requests)
+ if (port > sdev->max_request)
return NULL;
chan = dma_get_any_slave_channel(&sdev->slave);
@@ -981,7 +1024,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
{
int i;
- for (i = 0; i < sdev->cfg->nr_max_vchans; i++) {
+ for (i = 0; i < sdev->num_vchans; i++) {
struct sun6i_vchan *vchan = &sdev->vchans[i];
list_del(&vchan->vc.chan.device_node);
@@ -1009,6 +1052,15 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = {
.nr_max_channels = 16,
.nr_max_requests = 30,
.nr_max_vchans = 53,
+ .set_burst_length = sun6i_set_burst_length_a31,
+ .src_burst_lengths = BIT(1) | BIT(8),
+ .dst_burst_lengths = BIT(1) | BIT(8),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
};
/*
@@ -1020,24 +1072,76 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
.nr_max_channels = 8,
.nr_max_requests = 24,
.nr_max_vchans = 37,
- .gate_needed = true,
+ .clock_autogate_enable = sun6i_enable_clock_autogate_a23,
+ .set_burst_length = sun6i_set_burst_length_a31,
+ .src_burst_lengths = BIT(1) | BIT(8),
+ .dst_burst_lengths = BIT(1) | BIT(8),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
};
static struct sun6i_dma_config sun8i_a83t_dma_cfg = {
.nr_max_channels = 8,
.nr_max_requests = 28,
.nr_max_vchans = 39,
+ .clock_autogate_enable = sun6i_enable_clock_autogate_a23,
+ .set_burst_length = sun6i_set_burst_length_a31,
+ .src_burst_lengths = BIT(1) | BIT(8),
+ .dst_burst_lengths = BIT(1) | BIT(8),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
};
/*
* The H3 has 12 physical channels, a maximum DRQ port id of 27,
* and a total of 34 usable source and destination endpoints.
+ * It also supports additional burst lengths and bus widths,
+ * and the burst length fields have different offsets.
*/
static struct sun6i_dma_config sun8i_h3_dma_cfg = {
.nr_max_channels = 12,
.nr_max_requests = 27,
.nr_max_vchans = 34,
+ .clock_autogate_enable = sun6i_enable_clock_autogate_h3,
+ .set_burst_length = sun6i_set_burst_length_h3,
+ .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+};
+
+/*
+ * The A64 binding uses the number of dma channels from the
+ * device tree node.
+ */
+static struct sun6i_dma_config sun50i_a64_dma_cfg = {
+ .clock_autogate_enable = sun6i_enable_clock_autogate_h3,
+ .set_burst_length = sun6i_set_burst_length_h3,
+ .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
};
/*
@@ -1049,7 +1153,16 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = {
.nr_max_channels = 8,
.nr_max_requests = 23,
.nr_max_vchans = 24,
- .gate_needed = true,
+ .clock_autogate_enable = sun6i_enable_clock_autogate_a23,
+ .set_burst_length = sun6i_set_burst_length_a31,
+ .src_burst_lengths = BIT(1) | BIT(8),
+ .dst_burst_lengths = BIT(1) | BIT(8),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
};
static const struct of_device_id sun6i_dma_match[] = {
@@ -1058,13 +1171,14 @@ static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
+ { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun6i_dma_match);
static int sun6i_dma_probe(struct platform_device *pdev)
{
- const struct of_device_id *device;
+ struct device_node *np = pdev->dev.of_node;
struct sun6i_dma_dev *sdc;
struct resource *res;
int ret, i;
@@ -1073,10 +1187,9 @@ static int sun6i_dma_probe(struct platform_device *pdev)
if (!sdc)
return -ENOMEM;
- device = of_match_device(sun6i_dma_match, &pdev->dev);
- if (!device)
+ sdc->cfg = of_device_get_match_data(&pdev->dev);
+ if (!sdc->cfg)
return -ENODEV;
- sdc->cfg = device->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sdc->base = devm_ioremap_resource(&pdev->dev, res);
@@ -1129,37 +1242,57 @@ static int sun6i_dma_probe(struct platform_device *pdev)
sdc->slave.device_pause = sun6i_dma_pause;
sdc->slave.device_resume = sun6i_dma_resume;
sdc->slave.device_terminate_all = sun6i_dma_terminate_all;
- sdc->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- sdc->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ sdc->slave.src_addr_widths = sdc->cfg->src_addr_widths;
+ sdc->slave.dst_addr_widths = sdc->cfg->dst_addr_widths;
sdc->slave.directions = BIT(DMA_DEV_TO_MEM) |
BIT(DMA_MEM_TO_DEV);
sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
sdc->slave.dev = &pdev->dev;
- sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
+ sdc->num_pchans = sdc->cfg->nr_max_channels;
+ sdc->num_vchans = sdc->cfg->nr_max_vchans;
+ sdc->max_request = sdc->cfg->nr_max_requests;
+
+ ret = of_property_read_u32(np, "dma-channels", &sdc->num_pchans);
+ if (ret && !sdc->num_pchans) {
+ dev_err(&pdev->dev, "Can't get dma-channels.\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "dma-requests", &sdc->max_request);
+ if (ret && !sdc->max_request) {
+ dev_info(&pdev->dev, "Missing dma-requests, using %u.\n",
+ DMA_CHAN_MAX_DRQ);
+ sdc->max_request = DMA_CHAN_MAX_DRQ;
+ }
+
+ /*
+ * If the number of vchans is not specified, derive it from the
+ * highest port number, at most one channel per port and direction.
+ */
+ if (!sdc->num_vchans)
+ sdc->num_vchans = 2 * (sdc->max_request + 1);
+
+ sdc->pchans = devm_kcalloc(&pdev->dev, sdc->num_pchans,
sizeof(struct sun6i_pchan), GFP_KERNEL);
if (!sdc->pchans)
return -ENOMEM;
- sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans,
+ sdc->vchans = devm_kcalloc(&pdev->dev, sdc->num_vchans,
sizeof(struct sun6i_vchan), GFP_KERNEL);
if (!sdc->vchans)
return -ENOMEM;
tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
- for (i = 0; i < sdc->cfg->nr_max_channels; i++) {
+ for (i = 0; i < sdc->num_pchans; i++) {
struct sun6i_pchan *pchan = &sdc->pchans[i];
pchan->idx = i;
pchan->base = sdc->base + 0x100 + i * 0x40;
}
- for (i = 0; i < sdc->cfg->nr_max_vchans; i++) {
+ for (i = 0; i < sdc->num_vchans; i++) {
struct sun6i_vchan *vchan = &sdc->vchans[i];
INIT_LIST_HEAD(&vchan->node);
@@ -1199,8 +1332,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
goto err_dma_unregister;
}
- if (sdc->cfg->gate_needed)
- writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE);
+ if (sdc->cfg->clock_autogate_enable)
+ sdc->cfg->clock_autogate_enable(sdc);
return 0;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index f1d04b70ee67..7df910e7c348 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -49,12 +49,12 @@ struct ti_am335x_xbar_data {
struct ti_am335x_xbar_map {
u16 dma_line;
- u16 mux_val;
+ u8 mux_val;
};
-static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
+static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
{
- writeb_relaxed(val & 0x1f, iomem + event);
+ writeb_relaxed(val, iomem + event);
}
static void ti_am335x_xbar_free(struct device *dev, void *route_data)
@@ -105,7 +105,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
}
map->dma_line = (u16)dma_spec->args[0];
- map->mux_val = (u16)dma_spec->args[2];
+ map->mux_val = (u8)dma_spec->args[2];
dma_spec->args[2] = 0;
dma_spec->args_count = 2;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 8722bcba489d..5eef13380ca8 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -366,6 +366,20 @@ struct xilinx_dma_chan {
u16 tdest;
};
+/**
+ * enum xdma_ip_type: DMA IP type.
+ *
+ * XDMA_TYPE_AXIDMA: Axi dma ip.
+ * XDMA_TYPE_CDMA: Axi cdma ip.
+ * XDMA_TYPE_VDMA: Axi vdma ip.
+ *
+ */
+enum xdma_ip_type {
+ XDMA_TYPE_AXIDMA = 0,
+ XDMA_TYPE_CDMA,
+ XDMA_TYPE_VDMA,
+};
+
struct xilinx_dma_config {
enum xdma_ip_type dmatype;
int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 346c4987b284..11d6419788c2 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -175,11 +175,11 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
/*
* To trigger the error, we need to read the data back
* (the data was written with errors above).
- * The ACCESS_ONCE macros and printk are used to prevent the
+ * The READ_ONCE macros and printk are used to prevent the
* the compiler optimizing these reads out.
*/
- reg = ACCESS_ONCE(ptemp[0]);
- read_reg = ACCESS_ONCE(ptemp[1]);
+ reg = READ_ONCE(ptemp[0]);
+ read_reg = READ_ONCE(ptemp[1]);
/* Force Read */
rmb();
@@ -618,7 +618,7 @@ static ssize_t altr_edac_device_trig(struct file *file,
for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) {
/* Read data so we're in the correct state */
rmb();
- if (ACCESS_ONCE(ptemp[i]))
+ if (READ_ONCE(ptemp[i]))
result = -1;
/* Toggle Error bit (it is latched), leave ECC enabled */
writel(error_mask, (drvdata->base + priv->set_err_ofst));
@@ -635,7 +635,7 @@ static ssize_t altr_edac_device_trig(struct file *file,
/* Read out written data. ECC error caused here */
for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++)
- if (ACCESS_ONCE(ptemp[i]) != i)
+ if (READ_ONCE(ptemp[i]) != i)
edac_printk(KERN_ERR, EDAC_DEVICE,
"Read doesn't match written data\n");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ac2f30295efe..8b16ec595fa7 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3434,9 +3434,14 @@ MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
static int __init amd64_edac_init(void)
{
+ const char *owner;
int err = -ENODEV;
int i;
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
if (!x86_match_cpu(amd64_cpuids))
return -ENODEV;
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
index 4709c6079848..393be3351493 100644
--- a/drivers/edac/amd64_edac_dbg.c
+++ b/drivers/edac/amd64_edac_dbg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "amd64_edac.h"
#define EDAC_DCT_ATTR_SHOW(reg) \
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index e14977ff95db..d96d6116f0fb 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "amd64_edac.h"
static ssize_t amd64_inject_section_show(struct device *dev,
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 480072139b7a..48193f5f3b56 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -53,7 +53,7 @@ static LIST_HEAD(mc_devices);
* Used to lock EDAC MC to just one module, avoiding two drivers e. g.
* apei/ghes and i7core_edac to be used at the same time.
*/
-static void const *edac_mc_owner;
+static const char *edac_mc_owner;
static struct bus_type mc_bus[EDAC_MAX_MCS];
@@ -701,6 +701,11 @@ unlock:
}
EXPORT_SYMBOL(edac_mc_find);
+const char *edac_get_owner(void)
+{
+ return edac_mc_owner;
+}
+EXPORT_SYMBOL_GPL(edac_get_owner);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 5357800e418d..4165e15995ad 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -128,6 +128,14 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
unsigned sz_pvt);
/**
+ * edac_get_owner - Return the owner's mod_name of EDAC MC
+ *
+ * Returns:
+ * Pointer to mod_name string when EDAC MC is owned. NULL otherwise.
+ */
+extern const char *edac_get_owner(void);
+
+/*
* edac_mc_add_mc_with_groups() - Insert the @mci structure into the mci
* global list and create sysfs entries associated with @mci structure.
*
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e4fcfa84fbd3..c70ea82c815c 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -50,7 +50,7 @@ int edac_mc_get_poll_msec(void)
return edac_mc_poll_msec;
}
-static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
+static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
{
unsigned long l;
int ret;
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 172598a27d7d..32a931d0cb71 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -19,7 +19,8 @@
#ifdef CONFIG_EDAC_DEBUG
-static int edac_set_debug_level(const char *buf, struct kernel_param *kp)
+static int edac_set_debug_level(const char *buf,
+ const struct kernel_param *kp)
{
unsigned long val;
int ret;
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 014871e169cc..dec88dcea036 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* edac_module.h
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 6f80eb65c26c..68b6ee18bea6 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -28,10 +28,19 @@ struct ghes_edac_pvt {
char msg[80];
};
-static LIST_HEAD(ghes_reglist);
-static DEFINE_MUTEX(ghes_edac_lock);
-static int ghes_edac_mc_num;
+static atomic_t ghes_init = ATOMIC_INIT(0);
+static struct ghes_edac_pvt *ghes_pvt;
+/*
+ * Sync with other, potentially concurrent callers of
+ * ghes_edac_report_mem_error(). We don't know what the
+ * "inventive" firmware would do.
+ */
+static DEFINE_SPINLOCK(ghes_lock);
+
+/* "ghes_edac.force_load=1" skips the platform check */
+static bool __read_mostly force_load;
+module_param(force_load, bool, 0);
/* Memory Device - Type 17 of SMBIOS spec */
struct memdev_dmi_entry {
@@ -169,18 +178,26 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
enum hw_event_mc_err_type type;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt = NULL;
+ struct ghes_edac_pvt *pvt = ghes_pvt;
+ unsigned long flags;
char *p;
u8 grain_bits;
- list_for_each_entry(pvt, &ghes_reglist, list) {
- if (ghes == pvt->ghes)
- break;
- }
if (!pvt) {
pr_err("Internal error: Can't find EDAC structure\n");
return;
}
+
+ /*
+ * We can do the locking below because GHES defers error processing
+ * from NMI to IRQ context. Whenever that changes, we'd at least
+ * know.
+ */
+ if (WARN_ON_ONCE(in_nmi()))
+ return;
+
+ spin_lock_irqsave(&ghes_lock, flags);
+
mci = pvt->mci;
e = &mci->error_desc;
@@ -398,10 +415,17 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
(e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
grain_bits, e->syndrome, pvt->detail_location);
- /* Report the error via EDAC API */
edac_raw_mc_handle_error(type, mci, e);
+ spin_unlock_irqrestore(&ghes_lock, flags);
}
-EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error);
+
+/*
+ * Known systems that are safe to enable this module.
+ */
+static struct acpi_platform_list plat_list[] = {
+ {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions},
+ { } /* End */
+};
int ghes_edac_register(struct ghes *ghes, struct device *dev)
{
@@ -409,8 +433,19 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
int rc, num_dimm = 0;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[1];
- struct ghes_edac_pvt *pvt;
struct ghes_edac_dimm_fill dimm_fill;
+ int idx;
+
+ /* Check if safe to enable on this system */
+ idx = acpi_match_platform_list(plat_list);
+ if (!force_load && idx < 0)
+ return 0;
+
+ /*
+ * We have only one logical memory controller to which all DIMMs belong.
+ */
+ if (atomic_inc_return(&ghes_init) > 1)
+ return 0;
/* Get the number of DIMMs */
dmi_walk(ghes_edac_count_dimms, &num_dimm);
@@ -425,26 +460,17 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
layers[0].size = num_dimm;
layers[0].is_virt_csrow = true;
- /*
- * We need to serialize edac_mc_alloc() and edac_mc_add_mc(),
- * to avoid duplicated memory controller numbers
- */
- mutex_lock(&ghes_edac_lock);
- mci = edac_mc_alloc(ghes_edac_mc_num, ARRAY_SIZE(layers), layers,
- sizeof(*pvt));
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
if (!mci) {
pr_info("Can't allocate memory for EDAC data\n");
- mutex_unlock(&ghes_edac_lock);
return -ENOMEM;
}
- pvt = mci->pvt_info;
- memset(pvt, 0, sizeof(*pvt));
- list_add_tail(&pvt->list, &ghes_reglist);
- pvt->ghes = ghes;
- pvt->mci = mci;
- mci->pdev = dev;
+ ghes_pvt = mci->pvt_info;
+ ghes_pvt->ghes = ghes;
+ ghes_pvt->mci = mci;
+ mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_EMPTY;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
@@ -452,36 +478,23 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci->ctl_name = "ghes_edac";
mci->dev_name = "ghes";
- if (!ghes_edac_mc_num) {
- if (!fake) {
- pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
- pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
- pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
- pr_info("If you find incorrect reports, please contact your hardware vendor\n");
- pr_info("to correct its BIOS.\n");
- pr_info("This system has %d DIMM sockets.\n",
- num_dimm);
- } else {
- pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
- pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
- pr_info("work on such system. Use this driver with caution\n");
- }
+ if (fake) {
+ pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
+ pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
+ pr_info("work on such system. Use this driver with caution\n");
+ } else if (idx < 0) {
+ pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
+ pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
+ pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
+ pr_info("If you find incorrect reports, please contact your hardware vendor\n");
+ pr_info("to correct its BIOS.\n");
+ pr_info("This system has %d DIMM sockets.\n", num_dimm);
}
if (!fake) {
- /*
- * Fill DIMM info from DMI for the memory controller #0
- *
- * Keep it in blank for the other memory controllers, as
- * there's no reliable way to properly credit each DIMM to
- * the memory controller, as different BIOSes fill the
- * DMI bank location fields on different ways
- */
- if (!ghes_edac_mc_num) {
- dimm_fill.count = 0;
- dimm_fill.mci = mci;
- dmi_walk(ghes_edac_dmidecode, &dimm_fill);
- }
+ dimm_fill.count = 0;
+ dimm_fill.mci = mci;
+ dmi_walk(ghes_edac_dmidecode, &dimm_fill);
} else {
struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
mci->n_layers, 0, 0, 0);
@@ -497,28 +510,16 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (rc < 0) {
pr_info("Can't register at EDAC core\n");
edac_mc_free(mci);
- mutex_unlock(&ghes_edac_lock);
return -ENODEV;
}
-
- ghes_edac_mc_num++;
- mutex_unlock(&ghes_edac_lock);
return 0;
}
-EXPORT_SYMBOL_GPL(ghes_edac_register);
void ghes_edac_unregister(struct ghes *ghes)
{
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt, *tmp;
-
- list_for_each_entry_safe(pvt, tmp, &ghes_reglist, list) {
- if (ghes == pvt->ghes) {
- mci = pvt->mci;
- edac_mc_del_mc(mci->pdev);
- edac_mc_free(mci);
- list_del(&pvt->list);
- }
- }
+
+ mci = ghes_pvt->mci;
+ edac_mc_del_mc(mci->pdev);
+ edac_mc_free(mci);
}
-EXPORT_SYMBOL_GPL(ghes_edac_unregister);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index c16c3b931b3d..8c5540160a23 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2159,8 +2159,13 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7core_edac.c";
- mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
- i7core_dev->socket);
+
+ mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket);
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
mci->dev_name = pci_name(i7core_dev->pdev[0]);
mci->ctl_page_to_phys = NULL;
@@ -2214,6 +2219,8 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
fail0:
kfree(mci->ctl_name);
+
+fail1:
edac_mc_free(mci);
i7core_dev->mci = NULL;
return rc;
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 0b6a68673e0e..4e9c5e596c6c 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _EDAC_MCE_AMD_H
#define _EDAC_MCE_AMD_H
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 4395c84cdcbf..df28b65358d2 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -45,6 +45,8 @@
#include "edac_module.h"
#include "pnd2_edac.h"
+#define EDAC_MOD_STR "pnd2_edac"
+
#define APL_NUM_CHANNELS 4
#define DNV_NUM_CHANNELS 2
#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
@@ -1355,7 +1357,7 @@ static int pnd2_register_mci(struct mem_ctl_info **ppmci)
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
- mci->mod_name = "pnd2_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = ops->name;
mci->ctl_name = "Pondicherry2";
@@ -1547,10 +1549,15 @@ MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
static int __init pnd2_init(void)
{
const struct x86_cpu_id *id;
+ const char *owner;
int rc;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(pnd2_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index dc0591654011..f34430f99fd8 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -36,7 +36,7 @@ static LIST_HEAD(sbridge_edac_list);
* Alter this version for the module when modifications are made
*/
#define SBRIDGE_REVISION " Ver: 1.1.2 "
-#define EDAC_MOD_STR "sbridge_edac"
+#define EDAC_MOD_STR "sb_edac"
/*
* Debug macros
@@ -462,6 +462,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
static const struct pci_id_descr pci_dev_descr_ibridge[] = {
/* Processor Home Agent */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
/* Memory controller */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
@@ -472,7 +473,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
/* Optional, mode 2HA */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
@@ -1318,9 +1318,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
int cur_reg_start;
int mc;
int channel;
- int way;
int participants[KNL_MAX_CHANNELS];
- int participant_count = 0;
for (i = 0; i < KNL_MAX_CHANNELS; i++)
mc_sizes[i] = 0;
@@ -1495,21 +1493,14 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
* this channel mapped to the given target?
*/
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
- for (way = 0; way < intrlv_ways; way++) {
- int target;
- int cha;
-
- if (KNL_MOD3(dram_rule))
- target = way;
- else
- target = 0x7 & sad_pkg(
- pvt->info.interleave_pkg, interleave_reg, way);
+ int target;
+ int cha;
+ for (target = 0; target < KNL_MAX_CHANNELS; target++) {
for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
if (knl_get_mc_route(target,
mc_route_reg[cha]) == channel
&& !participants[channel]) {
- participant_count++;
participants[channel] = 1;
break;
}
@@ -1517,10 +1508,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
}
}
- if (participant_count != intrlv_ways)
- edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n",
- participant_count, intrlv_ways);
-
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
mc = knl_channel_mc(channel);
if (participants[channel]) {
@@ -2291,6 +2278,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
next_imc:
sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
if (!sbridge_dev) {
+ /* If the HA1 wasn't found, don't create EDAC second memory controller */
+ if (dev_descr->dom == IMC1 && devno != 1) {
+ edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ pci_dev_put(pdev);
+ return 0;
+ }
if (dev_descr->dom == SOCK)
goto out_imc;
@@ -2491,6 +2485,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
pvt->pci_ta = pdev;
+ break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
pvt->pci_ras = pdev;
@@ -3155,7 +3150,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "sb_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
@@ -3287,6 +3282,11 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
break;
}
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
/* Get dimm basic config and the memory layout */
rc = get_dimm_config(mci);
if (rc < 0) {
@@ -3402,10 +3402,15 @@ static void sbridge_remove(void)
static int __init sbridge_init(void)
{
const struct x86_cpu_id *id;
+ const char *owner;
int rc;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(sbridge_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 16dea97568a1..912c4930c9ef 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -31,6 +31,8 @@
#include "edac_module.h"
+#define EDAC_MOD_STR "skx_edac"
+
/*
* Debug macros
*/
@@ -65,6 +67,7 @@ static u64 skx_tolm, skx_tohm;
struct skx_dev {
struct list_head list;
u8 bus[4];
+ int seg;
struct pci_dev *sad_all;
struct pci_dev *util_all;
u32 mcroute;
@@ -110,12 +113,12 @@ struct decoded_addr {
int bank_group;
};
-static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
+static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
list_for_each_entry(d, &skx_edac_list, list) {
- if (d->bus[idx] == bus)
+ if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number)
return d;
}
@@ -172,6 +175,7 @@ static int get_all_bus_mappings(void)
pci_dev_put(pdev);
return -ENOMEM;
}
+ d->seg = pci_domain_nr(pdev->bus);
pci_read_config_dword(pdev, 0xCC, &reg);
d->bus[0] = GET_BITFIELD(reg, 0, 7);
d->bus[1] = GET_BITFIELD(reg, 8, 15);
@@ -207,7 +211,7 @@ static int get_all_munits(const struct munit *m)
if (i == NUM_IMC)
goto fail;
}
- d = get_skx_dev(pdev->bus->number, m->busidx);
+ d = get_skx_dev(pdev->bus, m->busidx);
if (!d)
goto fail;
@@ -299,7 +303,7 @@ static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15)
-#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
+#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 0, 2, "ranks")
#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
@@ -360,7 +364,7 @@ static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
imc->mc, chan, dimmno, size, npages,
- banks, ranks, rows, cols);
+ banks, 1 << ranks, rows, cols);
imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
@@ -464,12 +468,16 @@ static int skx_register_mci(struct skx_imc *imc)
pvt = mci->pvt_info;
pvt->imc = imc;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
- imc->node_id, imc->lmc);
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", imc->node_id, imc->lmc);
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
mci->mtype_cap = MEM_FLAG_DDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "skx_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(imc->chan[0].cdev);
mci->ctl_page_to_phys = NULL;
@@ -491,6 +499,7 @@ static int skx_register_mci(struct skx_imc *imc)
fail:
kfree(mci->ctl_name);
+fail0:
edac_mc_free(mci);
imc->mci = NULL;
return rc;
@@ -1039,12 +1048,17 @@ static int __init skx_init(void)
{
const struct x86_cpu_id *id;
const struct munit *m;
+ const char *owner;
int rc = 0, i;
u8 mc = 0, src_id, node_id;
struct skx_dev *d;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(skx_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index f35d87519a3e..4803c6468bab 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -639,27 +639,6 @@ err_free:
return ret;
}
-#ifdef CONFIG_PM
-static int thunderx_lmc_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int thunderx_lmc_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- return 0;
-}
-#endif
-
static const struct pci_device_id thunderx_lmc_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) },
{ 0, },
@@ -834,10 +813,6 @@ static struct pci_driver thunderx_lmc_driver = {
.name = "thunderx_lmc_edac",
.probe = thunderx_lmc_probe,
.remove = thunderx_lmc_remove,
-#ifdef CONFIG_PM
- .suspend = thunderx_lmc_suspend,
- .resume = thunderx_lmc_resume,
-#endif
.id_table = thunderx_lmc_pci_tbl,
};
diff --git a/drivers/eisa/Makefile b/drivers/eisa/Makefile
index 5369ce957c6d..a1dd0eaec2d4 100644
--- a/drivers/eisa/Makefile
+++ b/drivers/eisa/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the Linux device tree
obj-$(CONFIG_EISA) += eisa-bus.o
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index a73624e76193..0888fdeded72 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for external connector class (extcon) devices
#
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 6f6537ab0a79..3877d86c746a 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -26,7 +26,7 @@
#include <linux/workqueue.h>
#include <linux/iio/consumer.h>
#include <linux/extcon/extcon-adc-jack.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
/**
* struct adc_jack_data - internal data for adc_jack device driver
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index f84da4a17724..da0e9bc4262f 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -27,7 +27,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <sound/soc.h>
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index f4fd03e58e37..981fba56bc18 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -22,7 +22,7 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/notifier.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/regmap.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index ebed22f22d75..ab770adcca7e 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -17,7 +17,7 @@
* GNU General Public License for more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/extcon/extcon-gpio.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 91a0023074af..7c4bc8c44c3f 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -15,7 +15,7 @@
* more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/intel_soc_pmic.h>
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index 1a45e745717d..c8691b5a9cb0 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -19,7 +19,7 @@
*/
#include <linux/acpi.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index f6414b7fa5bc..b871836da8a4 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -23,7 +23,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/max14577.h>
#include <linux/mfd/max14577-private.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#define DELAY_MS_DEFAULT 17000 /* unit: millisecond */
@@ -204,8 +204,8 @@ static int max14577_muic_set_debounce_time(struct max14577_muic_info *info,
static int max14577_muic_set_path(struct max14577_muic_info *info,
u8 val, bool attached)
{
- int ret = 0;
u8 ctrl1, ctrl2 = 0;
+ int ret;
/* Set open state to path before changing hw path */
ret = max14577_update_reg(info->max14577->regmap,
diff --git a/drivers/extcon/extcon-max3355.c b/drivers/extcon/extcon-max3355.c
index 533e16a952b8..0aa410836f4e 100644
--- a/drivers/extcon/extcon-max3355.c
+++ b/drivers/extcon/extcon-max3355.c
@@ -9,7 +9,7 @@
* may be copied, distributed, and modified under those terms.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 7a5856809047..643411066ad9 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -26,7 +26,7 @@
#include <linux/mfd/max77693.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77693-private.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/regmap.h>
#include <linux/irqdomain.h>
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 6e722d552cf1..c9fcd6cd41cb 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -11,7 +11,7 @@
* (at your option) any later version.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -80,7 +80,7 @@ enum max77843_muic_accessory_type {
MAX77843_MUIC_ADC_REMOTE_S12_BUTTON,
MAX77843_MUIC_ADC_RESERVED_ACC_1,
MAX77843_MUIC_ADC_RESERVED_ACC_2,
- MAX77843_MUIC_ADC_RESERVED_ACC_3,
+ MAX77843_MUIC_ADC_RESERVED_ACC_3, /* SmartDock */
MAX77843_MUIC_ADC_RESERVED_ACC_4,
MAX77843_MUIC_ADC_RESERVED_ACC_5,
MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2,
@@ -119,6 +119,7 @@ enum max77843_muic_charger_type {
MAX77843_MUIC_CHG_SPECIAL_BIAS,
MAX77843_MUIC_CHG_RESERVED,
MAX77843_MUIC_CHG_GND,
+ MAX77843_MUIC_CHG_DOCK,
};
static const unsigned int max77843_extcon_cable[] = {
@@ -130,6 +131,7 @@ static const unsigned int max77843_extcon_cable[] = {
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
EXTCON_DISP_MHL,
+ EXTCON_DOCK,
EXTCON_JIG,
EXTCON_NONE,
};
@@ -200,7 +202,7 @@ static const struct regmap_irq_chip max77843_muic_irq_chip = {
};
static int max77843_muic_set_path(struct max77843_muic_info *info,
- u8 val, bool attached)
+ u8 val, bool attached, bool nobccomp)
{
struct max77693_dev *max77843 = info->max77843;
int ret = 0;
@@ -210,10 +212,16 @@ static int max77843_muic_set_path(struct max77843_muic_info *info,
ctrl1 = val;
else
ctrl1 = MAX77843_MUIC_CONTROL1_SW_OPEN;
+ if (nobccomp) {
+ /* Disable BC1.2 protocol and force manual switch control */
+ ctrl1 |= MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK;
+ }
ret = regmap_update_bits(max77843->regmap_muic,
MAX77843_MUIC_REG_CONTROL1,
- MAX77843_MUIC_CONTROL1_COM_SW, ctrl1);
+ MAX77843_MUIC_CONTROL1_COM_SW |
+ MAX77843_MUIC_CONTROL1_NOBCCOMP_MASK,
+ ctrl1);
if (ret < 0) {
dev_err(info->dev, "Cannot switch MUIC port\n");
return ret;
@@ -240,6 +248,21 @@ static int max77843_muic_set_path(struct max77843_muic_info *info,
return 0;
}
+static void max77843_charger_set_otg_vbus(struct max77843_muic_info *info,
+ bool on)
+{
+ struct max77693_dev *max77843 = info->max77843;
+ unsigned int cnfg00;
+
+ if (on)
+ cnfg00 = MAX77843_CHG_OTG_MASK | MAX77843_CHG_BOOST_MASK;
+ else
+ cnfg00 = MAX77843_CHG_ENABLE | MAX77843_CHG_BUCK_MASK;
+
+ regmap_update_bits(max77843->regmap_chg, MAX77843_CHG_REG_CHG_CNFG_00,
+ MAX77843_CHG_MODE_MASK, cnfg00);
+}
+
static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
enum max77843_muic_cable_group group, bool *attached)
{
@@ -288,6 +311,19 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
break;
}
+ if (adc == MAX77843_MUIC_ADC_RESERVED_ACC_3) { /* SmartDock */
+ if (chg_type == MAX77843_MUIC_CHG_NONE) {
+ *attached = false;
+ cable_type = info->prev_chg_type;
+ info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
+ } else {
+ *attached = true;
+ cable_type = MAX77843_MUIC_CHG_DOCK;
+ info->prev_chg_type = MAX77843_MUIC_CHG_DOCK;
+ }
+ break;
+ }
+
if (chg_type == MAX77843_MUIC_CHG_NONE) {
*attached = false;
cable_type = info->prev_chg_type;
@@ -350,17 +386,18 @@ static int max77843_muic_adc_gnd_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_GND_USB_HOST_VB:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_USB,
- attached);
+ attached, false);
if (ret < 0)
return ret;
extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
+ max77843_charger_set_otg_vbus(info, attached);
break;
case MAX77843_MUIC_GND_MHL_VB:
case MAX77843_MUIC_GND_MHL:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -396,7 +433,7 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
return -EINVAL;
}
- ret = max77843_muic_set_path(info, path, attached);
+ ret = max77843_muic_set_path(info, path, attached, false);
if (ret < 0)
return ret;
@@ -405,6 +442,26 @@ static int max77843_muic_jig_handler(struct max77843_muic_info *info,
return 0;
}
+static int max77843_muic_dock_handler(struct max77843_muic_info *info,
+ bool attached)
+{
+ int ret;
+
+ dev_dbg(info->dev, "external connector is %s (adc: 0x10)\n",
+ attached ? "attached" : "detached");
+
+ ret = max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_USB,
+ attached, attached);
+ if (ret < 0)
+ return ret;
+
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL, attached);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, attached);
+ extcon_set_state_sync(info->edev, EXTCON_DOCK, attached);
+
+ return 0;
+}
+
static int max77843_muic_adc_handler(struct max77843_muic_info *info)
{
int ret, cable_type;
@@ -419,6 +476,11 @@ static int max77843_muic_adc_handler(struct max77843_muic_info *info)
info->prev_cable_type);
switch (cable_type) {
+ case MAX77843_MUIC_ADC_RESERVED_ACC_3: /* SmartDock */
+ ret = max77843_muic_dock_handler(info, attached);
+ if (ret < 0)
+ return ret;
+ break;
case MAX77843_MUIC_ADC_GROUND:
ret = max77843_muic_adc_gnd_handler(info);
if (ret < 0)
@@ -446,7 +508,6 @@ static int max77843_muic_adc_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_ADC_REMOTE_S12_BUTTON:
case MAX77843_MUIC_ADC_RESERVED_ACC_1:
case MAX77843_MUIC_ADC_RESERVED_ACC_2:
- case MAX77843_MUIC_ADC_RESERVED_ACC_3:
case MAX77843_MUIC_ADC_RESERVED_ACC_4:
case MAX77843_MUIC_ADC_RESERVED_ACC_5:
case MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE2:
@@ -490,7 +551,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_USB:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_USB,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -501,7 +562,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_DOWNSTREAM:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -511,7 +572,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_DEDICATED:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -521,7 +582,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_SPECIAL_500MA:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -531,7 +592,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
case MAX77843_MUIC_CHG_SPECIAL_1A:
ret = max77843_muic_set_path(info,
MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
if (ret < 0)
return ret;
@@ -550,6 +611,9 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
false);
break;
+ case MAX77843_MUIC_CHG_DOCK:
+ extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP, attached);
+ break;
case MAX77843_MUIC_CHG_NONE:
break;
default:
@@ -558,7 +622,7 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
attached ? "attached" : "detached", chg_type);
max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_OPEN,
- attached);
+ attached, false);
return -EINVAL;
}
@@ -798,7 +862,8 @@ static int max77843_muic_probe(struct platform_device *pdev)
max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS);
/* Set initial path for UART */
- max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART, true);
+ max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART, true,
+ false);
/* Check revision number of MUIC device */
ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 4a0612fb9c07..8152790d72e1 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -25,7 +25,7 @@
#include <linux/kobject.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/irqdomain.h>
#define DEV_NAME "max8997-muic"
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index b8cde096a808..660bbf163bf5 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -15,7 +15,7 @@
* GNU General Public License for more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index eaa355e7d9e4..e059bd5f2041 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -20,7 +20,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include "extcon-rt8973a.h"
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 106ef0297b53..0cfb5a3efdf6 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include "extcon-sm5502.h"
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 9c925b05b7aa..53762864a9f7 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -14,7 +14,7 @@
* GNU General Public License for more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 598956f1dcae..6187f731b29d 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -14,7 +14,7 @@
* GNU General Public License for more details.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/kernel.h>
#include <linux/mfd/cros_ec.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 35e9fb885486..cb38c2747684 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -36,7 +36,7 @@
#define SUPPORTED_CABLE_MAX 32
-struct __extcon_info {
+static const struct __extcon_info {
unsigned int type;
unsigned int id;
const char *name;
diff --git a/drivers/extcon/extcon.h b/drivers/extcon/extcon.h
index dddddcfa0587..93b5e0306966 100644
--- a/drivers/extcon/extcon.h
+++ b/drivers/extcon/extcon.h
@@ -1,7 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_EXTCON_INTERNAL_H__
#define __LINUX_EXTCON_INTERNAL_H__
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
/**
* struct extcon_dev - An extcon device represents one external connector.
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index e3870d5c43dd..e58c8c794778 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux IEEE 1394 implementation
#
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index d6a09b9cd8cc..4372f9e4b0da 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -137,9 +137,9 @@ int fw_cancel_transaction(struct fw_card *card,
}
EXPORT_SYMBOL(fw_cancel_transaction);
-static void split_transaction_timeout_callback(unsigned long data)
+static void split_transaction_timeout_callback(struct timer_list *timer)
{
- struct fw_transaction *t = (struct fw_transaction *)data;
+ struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
unsigned long flags;
@@ -373,8 +373,8 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
t->tlabel = tlabel;
t->card = card;
t->is_split_transaction = false;
- setup_timer(&t->split_timeout_timer,
- split_transaction_timeout_callback, (unsigned long)t);
+ timer_setup(&t->split_timeout_timer,
+ split_transaction_timeout_callback, 0);
t->callback = callback;
t->callback_data = callback_data;
@@ -423,7 +423,7 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
struct transaction_callback_data d;
struct fw_transaction t;
- init_timer_on_stack(&t.split_timeout_timer);
+ timer_setup_on_stack(&t.split_timeout_timer, NULL, 0);
init_completion(&d.done);
d.payload = payload;
fw_send_request(card, &t, tcode, destination_id, generation, speed,
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c07962ead5e4..0f0bed3a4bbb 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FIREWIRE_CORE_H
#define _FIREWIRE_CORE_H
diff --git a/drivers/firewire/nosy-user.h b/drivers/firewire/nosy-user.h
index e48aa6200c72..3446c5b772e5 100644
--- a/drivers/firewire/nosy-user.h
+++ b/drivers/firewire/nosy-user.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __nosy_user_h
#define __nosy_user_h
diff --git a/drivers/firewire/nosy.h b/drivers/firewire/nosy.h
index 078ff27f4756..4078d69e93f8 100644
--- a/drivers/firewire/nosy.h
+++ b/drivers/firewire/nosy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Chip register definitions for PCILynx chipset. Based on pcilynx.h
* from the Linux 1394 drivers, but modified a bit so the names here
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8bf89267dc25..ccf52368a073 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
__le16 res_count, next_res_count;
i = ar_first_buffer_index(ctx);
- res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
+ res_count = READ_ONCE(ctx->descriptors[i].res_count);
/* A buffer that is not yet completely filled must be the last one. */
while (i != last && res_count == 0) {
@@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
/* Peek at the next descriptor. */
next_i = ar_next_buffer_index(i);
rmb(); /* read descriptors in order */
- next_res_count = ACCESS_ONCE(
- ctx->descriptors[next_i].res_count);
+ next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
/*
* If the next descriptor is still empty, we must stop at this
* descriptor.
@@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
next_i = ar_next_buffer_index(next_i);
rmb();
- next_res_count = ACCESS_ONCE(
- ctx->descriptors[next_i].res_count);
+ next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
if (next_res_count != cpu_to_le16(PAGE_SIZE))
goto next_buffer_is_active;
}
@@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context,
u32 buffer_dma;
req_count = le16_to_cpu(last->req_count);
- res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
+ res_count = le16_to_cpu(READ_ONCE(last->res_count));
completed = req_count - res_count;
buffer_dma = le32_to_cpu(last->data_address);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index ef5e7336da68..c4d005a9901a 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FIREWIRE_OHCI_H
#define _FIREWIRE_OHCI_H
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 6e4ed5a9c6fd..fa87a055905e 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -215,6 +215,17 @@ config QCOM_SCM_64
def_bool y
depends on QCOM_SCM && ARM64
+config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
+ bool "Qualcomm download mode enabled by default"
+ depends on QCOM_SCM
+ help
+ A device with "download mode" enabled will upon an unexpected
+ warm-restart enter a special debug mode that allows the user to
+ "download" memory content over USB for offline postmortem analysis.
+ The feature can be enabled/disabled on the kernel command line.
+
+ Say Y here to enable "download mode" by default.
+
config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index a37f12e8d137..feaa890197f3 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the linux kernel.
#
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 7da9f1b83ebe..dfb373c8ba2a 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -72,21 +73,13 @@
#define MAX_DVFS_DOMAINS 8
#define MAX_DVFS_OPPS 16
-#define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16)
-#define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff)
-
-#define PROTOCOL_REV_MINOR_BITS 16
-#define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1)
-#define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS)
-#define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK)
-
-#define FW_REV_MAJOR_BITS 24
-#define FW_REV_MINOR_BITS 16
-#define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1)
-#define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1)
-#define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS)
-#define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS)
-#define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK)
+
+#define PROTO_REV_MAJOR_MASK GENMASK(31, 16)
+#define PROTO_REV_MINOR_MASK GENMASK(15, 0)
+
+#define FW_REV_MAJOR_MASK GENMASK(31, 24)
+#define FW_REV_MINOR_MASK GENMASK(23, 16)
+#define FW_REV_PATCH_MASK GENMASK(15, 0)
#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
@@ -311,10 +304,6 @@ struct clk_get_info {
u8 name[20];
} __packed;
-struct clk_get_value {
- __le32 rate;
-} __packed;
-
struct clk_set_value {
__le16 id;
__le16 reserved;
@@ -328,7 +317,9 @@ struct legacy_clk_set_value {
} __packed;
struct dvfs_info {
- __le32 header;
+ u8 domain;
+ u8 opp_count;
+ __le16 latency;
struct {
__le32 freq;
__le32 m_volt;
@@ -351,11 +342,6 @@ struct _scpi_sensor_info {
char name[20];
};
-struct sensor_value {
- __le32 lo_val;
- __le32 hi_val;
-} __packed;
-
struct dev_pstate_set {
__le16 dev_id;
u8 pstate;
@@ -419,19 +405,20 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
unsigned int len;
if (scpi_info->is_legacy) {
- struct legacy_scpi_shared_mem *mem = ch->rx_payload;
+ struct legacy_scpi_shared_mem __iomem *mem =
+ ch->rx_payload;
/* RX Length is not replied by the legacy Firmware */
len = match->rx_len;
- match->status = le32_to_cpu(mem->status);
+ match->status = ioread32(&mem->status);
memcpy_fromio(match->rx_buf, mem->payload, len);
} else {
- struct scpi_shared_mem *mem = ch->rx_payload;
+ struct scpi_shared_mem __iomem *mem = ch->rx_payload;
len = min(match->rx_len, CMD_SIZE(cmd));
- match->status = le32_to_cpu(mem->status);
+ match->status = ioread32(&mem->status);
memcpy_fromio(match->rx_buf, mem->payload, len);
}
@@ -445,11 +432,11 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
{
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
- struct scpi_shared_mem *mem = ch->rx_payload;
+ struct scpi_shared_mem __iomem *mem = ch->rx_payload;
u32 cmd = 0;
if (!scpi_info->is_legacy)
- cmd = le32_to_cpu(mem->command);
+ cmd = ioread32(&mem->command);
scpi_process_cmd(ch, cmd);
}
@@ -459,7 +446,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
unsigned long flags;
struct scpi_xfer *t = msg;
struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
- struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
+ struct scpi_shared_mem __iomem *mem = ch->tx_payload;
if (t->tx_buf) {
if (scpi_info->is_legacy)
@@ -478,7 +465,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
}
if (!scpi_info->is_legacy)
- mem->command = cpu_to_le32(t->cmd);
+ iowrite32(t->cmd, &mem->command);
}
static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -583,13 +570,13 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
static unsigned long scpi_clk_get_val(u16 clk_id)
{
int ret;
- struct clk_get_value clk;
+ __le32 rate;
__le16 le_clk_id = cpu_to_le16(clk_id);
ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
- sizeof(le_clk_id), &clk, sizeof(clk));
+ sizeof(le_clk_id), &rate, sizeof(rate));
- return ret ? ret : le32_to_cpu(clk.rate);
+ return ret ? ret : le32_to_cpu(rate);
}
static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
@@ -645,34 +632,34 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
{
+ if (domain >= MAX_DVFS_DOMAINS)
+ return ERR_PTR(-EINVAL);
+
+ return scpi_info->dvfs[domain] ?: ERR_PTR(-EINVAL);
+}
+
+static int scpi_dvfs_populate_info(struct device *dev, u8 domain)
+{
struct scpi_dvfs_info *info;
struct scpi_opp *opp;
struct dvfs_info buf;
int ret, i;
- if (domain >= MAX_DVFS_DOMAINS)
- return ERR_PTR(-EINVAL);
-
- if (scpi_info->dvfs[domain]) /* data already populated */
- return scpi_info->dvfs[domain];
-
ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
&buf, sizeof(buf));
if (ret)
- return ERR_PTR(ret);
+ return ret;
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kmalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- info->count = DVFS_OPP_COUNT(buf.header);
- info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */
+ info->count = buf.opp_count;
+ info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */
- info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
- if (!info->opps) {
- kfree(info);
- return ERR_PTR(-ENOMEM);
- }
+ info->opps = devm_kcalloc(dev, info->count, sizeof(*opp), GFP_KERNEL);
+ if (!info->opps)
+ return -ENOMEM;
for (i = 0, opp = info->opps; i < info->count; i++, opp++) {
opp->freq = le32_to_cpu(buf.opps[i].freq);
@@ -682,7 +669,15 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL);
scpi_info->dvfs[domain] = info;
- return info;
+ return 0;
+}
+
+static void scpi_dvfs_populate(struct device *dev)
+{
+ int domain;
+
+ for (domain = 0; domain < MAX_DVFS_DOMAINS; domain++)
+ scpi_dvfs_populate_info(dev, domain);
}
static int scpi_dev_domain_id(struct device *dev)
@@ -713,9 +708,6 @@ static int scpi_dvfs_get_transition_latency(struct device *dev)
if (IS_ERR(info))
return PTR_ERR(info);
- if (!info->latency)
- return 0;
-
return info->latency;
}
@@ -776,20 +768,19 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
static int scpi_sensor_get_value(u16 sensor, u64 *val)
{
__le16 id = cpu_to_le16(sensor);
- struct sensor_value buf;
+ __le64 value;
int ret;
ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
- &buf, sizeof(buf));
+ &value, sizeof(value));
if (ret)
return ret;
if (scpi_info->is_legacy)
- /* only 32-bits supported, hi_val can be junk */
- *val = le32_to_cpu(buf.lo_val);
+ /* only 32-bits supported, upper 32 bits can be junk */
+ *val = le32_to_cpup((__le32 *)&value);
else
- *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
- le32_to_cpu(buf.lo_val);
+ *val = le64_to_cpu(value);
return 0;
}
@@ -862,23 +853,19 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
static ssize_t protocol_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d.%d\n",
- PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
- PROTOCOL_REV_MINOR(scpi_info->protocol_version));
+ return sprintf(buf, "%lu.%lu\n",
+ FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
+ FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version));
}
static DEVICE_ATTR_RO(protocol_version);
static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d.%d.%d\n",
- FW_REV_MAJOR(scpi_info->firmware_version),
- FW_REV_MINOR(scpi_info->firmware_version),
- FW_REV_PATCH(scpi_info->firmware_version));
+ return sprintf(buf, "%lu.%lu.%lu\n",
+ FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
+ FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
+ FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
}
static DEVICE_ATTR_RO(firmware_version);
@@ -889,39 +876,13 @@ static struct attribute *versions_attrs[] = {
};
ATTRIBUTE_GROUPS(versions);
-static void
-scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count)
+static void scpi_free_channels(void *data)
{
+ struct scpi_drvinfo *info = data;
int i;
- for (i = 0; i < count && pchan->chan; i++, pchan++) {
- mbox_free_channel(pchan->chan);
- devm_kfree(dev, pchan->xfers);
- devm_iounmap(dev, pchan->rx_payload);
- }
-}
-
-static int scpi_remove(struct platform_device *pdev)
-{
- int i;
- struct device *dev = &pdev->dev;
- struct scpi_drvinfo *info = platform_get_drvdata(pdev);
-
- scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
-
- of_platform_depopulate(dev);
- sysfs_remove_groups(&dev->kobj, versions_groups);
- scpi_free_channels(dev, info->channels, info->num_chans);
- platform_set_drvdata(pdev, NULL);
-
- for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
- kfree(info->dvfs[i]->opps);
- kfree(info->dvfs[i]);
- }
- devm_kfree(dev, info->channels);
- devm_kfree(dev, info);
-
- return 0;
+ for (i = 0; i < info->num_chans; i++)
+ mbox_free_channel(info->channels[i].chan);
}
#define MAX_SCPI_XFERS 10
@@ -952,7 +913,6 @@ static int scpi_probe(struct platform_device *pdev)
{
int count, idx, ret;
struct resource res;
- struct scpi_chan *scpi_chan;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -969,13 +929,19 @@ static int scpi_probe(struct platform_device *pdev)
return -ENODEV;
}
- scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL);
- if (!scpi_chan)
+ scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan),
+ GFP_KERNEL);
+ if (!scpi_info->channels)
return -ENOMEM;
- for (idx = 0; idx < count; idx++) {
+ ret = devm_add_action(dev, scpi_free_channels, scpi_info);
+ if (ret)
+ return ret;
+
+ for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
resource_size_t size;
- struct scpi_chan *pchan = scpi_chan + idx;
+ int idx = scpi_info->num_chans;
+ struct scpi_chan *pchan = scpi_info->channels + idx;
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
@@ -983,15 +949,14 @@ static int scpi_probe(struct platform_device *pdev)
of_node_put(shmem);
if (ret) {
dev_err(dev, "failed to get SCPI payload mem resource\n");
- goto err;
+ return ret;
}
size = resource_size(&res);
pchan->rx_payload = devm_ioremap(dev, res.start, size);
if (!pchan->rx_payload) {
dev_err(dev, "failed to ioremap SCPI payload\n");
- ret = -EADDRNOTAVAIL;
- goto err;
+ return -EADDRNOTAVAIL;
}
pchan->tx_payload = pchan->rx_payload + (size >> 1);
@@ -1017,17 +982,11 @@ static int scpi_probe(struct platform_device *pdev)
dev_err(dev, "failed to get channel%d err %d\n",
idx, ret);
}
-err:
- scpi_free_channels(dev, scpi_chan, idx);
- scpi_info = NULL;
return ret;
}
- scpi_info->channels = scpi_chan;
- scpi_info->num_chans = count;
scpi_info->commands = scpi_std_commands;
-
- platform_set_drvdata(pdev, scpi_info);
+ scpi_info->scpi_ops = &scpi_ops;
if (scpi_info->is_legacy) {
/* Replace with legacy variants */
@@ -1043,23 +1002,23 @@ err:
ret = scpi_init_versions(scpi_info);
if (ret) {
dev_err(dev, "incorrect or no SCP firmware found\n");
- scpi_remove(pdev);
return ret;
}
- _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n",
- PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
- PROTOCOL_REV_MINOR(scpi_info->protocol_version),
- FW_REV_MAJOR(scpi_info->firmware_version),
- FW_REV_MINOR(scpi_info->firmware_version),
- FW_REV_PATCH(scpi_info->firmware_version));
- scpi_info->scpi_ops = &scpi_ops;
+ scpi_dvfs_populate(dev);
+
+ _dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n",
+ FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version),
+ FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version),
+ FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version),
+ FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version),
+ FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
- ret = sysfs_create_groups(&dev->kobj, versions_groups);
+ ret = devm_device_add_groups(dev, versions_groups);
if (ret)
dev_err(dev, "unable to create sysfs version group\n");
- return of_platform_populate(dev->of_node, NULL, NULL, dev);
+ return devm_of_platform_populate(dev);
}
static const struct of_device_id scpi_of_match[] = {
@@ -1076,7 +1035,6 @@ static struct platform_driver scpi_driver = {
.of_match_table = scpi_of_match,
},
.probe = scpi_probe,
- .remove = scpi_remove,
};
module_platform_driver(scpi_driver);
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 0329d319d89a..269501dfba53 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for linux kernel
#
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index dedf9bde44db..adaa4a964f0c 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# The stub may be linked into the kernel proper or into a separate boot binary,
# but in either case, it executes before the kernel does (with MMU disabled) so
@@ -33,13 +34,14 @@ lib-y := efi-stub-helper.o gop.o secureboot.o
lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
-arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
+arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
+arm-deps-$(CONFIG_ARM64) += sort.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
- $(patsubst %.c,lib-%.o,$(arm-deps))
+ $(patsubst %.c,lib-%.o,$(arm-deps-y))
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
@@ -90,5 +92,4 @@ quiet_cmd_stubcopy = STUBCPY $@
# explicitly by the decompressor linker script.
#
STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
-STUBCOPY_RM-$(CONFIG_ARM) += -R ___ksymtab+sort -R ___kcrctab+sort
STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index a94601d5939e..01a9d78ee415 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -350,7 +350,9 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
* The easiest way to find adjacent regions is to sort the memory map
* before traversing it.
*/
- sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+ if (IS_ENABLED(CONFIG_ARM64))
+ sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc,
+ NULL);
for (l = 0; l < map_size; l += desc_size, prev = in) {
u64 paddr, size;
@@ -367,7 +369,8 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
* a 4k page size kernel to kexec a 64k page size kernel and
* vice versa.
*/
- if (!regions_are_adjacent(prev, in) ||
+ if ((IS_ENABLED(CONFIG_ARM64) &&
+ !regions_are_adjacent(prev, in)) ||
!regions_have_compatible_memory_type_attrs(prev, in)) {
paddr = round_down(in->phys_addr, SZ_64K);
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 83f268c05007..f59564b72ddc 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DRIVERS_FIRMWARE_EFI_EFISTUB_H
#define _DRIVERS_FIRMWARE_EFI_EFISTUB_H
diff --git a/drivers/firmware/efi/libstub/string.c b/drivers/firmware/efi/libstub/string.c
index 09d5a0894343..ed10e3f602c5 100644
--- a/drivers/firmware/efi/libstub/string.c
+++ b/drivers/firmware/efi/libstub/string.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Taken from:
* linux/lib/string.c
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 78686443cb37..5fc70520e04c 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Common EFI memory map functions.
*/
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 22874544d301..7effff969eb9 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Intel Corporation; author Matt Fleming
* Copyright (c) 2014 Red Hat, Inc., Mark Salter <msalter@redhat.com>
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
index a33a6c633852..9812c6a02b40 100644
--- a/drivers/firmware/efi/test/efi_test.h
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* EFI Test driver Header
*
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
index bc4de02202ad..dcd3675efcfc 100644
--- a/drivers/firmware/google/Makefile
+++ b/drivers/firmware/google/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_GOOGLE_SMI) += gsmi.o
obj-$(CONFIG_GOOGLE_COREBOOT_TABLE) += coreboot_table.o
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index 6523ce962865..f3f4f810e5df 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -220,7 +220,7 @@ out_free_cpus:
return err;
}
-static void dummy_callback(unsigned long ignored) {}
+static void dummy_callback(struct timer_list *unused) {}
static int suspend_cpu(int index, bool broadcast)
{
@@ -287,7 +287,7 @@ static int suspend_test_thread(void *arg)
pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
cpu, drv->state_count - 1);
- setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
+ timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
int index;
/*
@@ -340,6 +340,7 @@ static int suspend_test_thread(void *arg)
* later.
*/
del_timer(&wakeup_timer);
+ destroy_timer_on_stack(&wakeup_timer);
if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
complete(&suspend_threads_done);
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 93e3b96b6dfa..dfbd894d5bb7 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -561,6 +561,12 @@ int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
return ret ? : le32_to_cpu(out);
}
+int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
+{
+ return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
+ enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
+}
+
int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
{
struct {
@@ -579,6 +585,13 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
return ret ? : le32_to_cpu(scm_ret);
}
+int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+ size_t mem_sz, phys_addr_t src, size_t src_sz,
+ phys_addr_t dest, size_t dest_sz)
+{
+ return -ENODEV;
+}
+
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare)
{
@@ -596,3 +609,21 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
{
return -ENODEV;
}
+
+int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
+ if (ret >= 0)
+ *val = ret;
+
+ return ret < 0 ? ret : 0;
+}
+
+int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
+{
+ return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
+ addr, val);
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 6e6d561708e2..688525dd4aee 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -382,6 +382,33 @@ int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
return ret ? : res.a1;
}
+int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
+ size_t mem_sz, phys_addr_t src, size_t src_sz,
+ phys_addr_t dest, size_t dest_sz)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = mem_region;
+ desc.args[1] = mem_sz;
+ desc.args[2] = src;
+ desc.args[3] = src_sz;
+ desc.args[4] = dest;
+ desc.args[5] = dest_sz;
+ desc.args[6] = 0;
+
+ desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
+ QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
+ QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
+ QCOM_MEM_PROT_ASSIGN_ID,
+ &desc, &res);
+
+ return ret ? : res.a1;
+}
+
int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare)
{
struct qcom_scm_desc desc = {0};
@@ -439,3 +466,47 @@ int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
return ret;
}
+
+int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = QCOM_SCM_SET_DLOAD_MODE;
+ desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
+ &desc, &res);
+}
+
+int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
+ unsigned int *val)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+ int ret;
+
+ desc.args[0] = addr;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
+ &desc, &res);
+ if (ret >= 0)
+ *val = res.a1;
+
+ return ret < 0 ? ret : 0;
+}
+
+int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = addr;
+ desc.args[1] = val;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
+ &desc, &res);
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index bb16510d75ba..af4c75217ea6 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -19,15 +19,20 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
#include "qcom_scm.h"
+static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
+module_param(download_mode, bool, 0);
+
#define SCM_HAS_CORE_CLK BIT(0)
#define SCM_HAS_IFACE_CLK BIT(1)
#define SCM_HAS_BUS_CLK BIT(2)
@@ -38,6 +43,21 @@ struct qcom_scm {
struct clk *iface_clk;
struct clk *bus_clk;
struct reset_controller_dev reset;
+
+ u64 dload_mode_addr;
+};
+
+struct qcom_scm_current_perm_info {
+ __le32 vmid;
+ __le32 perm;
+ __le64 ctx;
+ __le32 ctx_size;
+ __le32 unused;
+};
+
+struct qcom_scm_mem_map_info {
+ __le64 mem_addr;
+ __le64 mem_size;
};
static struct qcom_scm *__scm;
@@ -333,6 +353,66 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+{
+ return __qcom_scm_io_readl(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_readl);
+
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+{
+ return __qcom_scm_io_writel(__scm->dev, addr, val);
+}
+EXPORT_SYMBOL(qcom_scm_io_writel);
+
+static void qcom_scm_set_download_mode(bool enable)
+{
+ bool avail;
+ int ret = 0;
+
+ avail = __qcom_scm_is_call_available(__scm->dev,
+ QCOM_SCM_SVC_BOOT,
+ QCOM_SCM_SET_DLOAD_MODE);
+ if (avail) {
+ ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
+ } else if (__scm->dload_mode_addr) {
+ ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
+ enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
+ } else {
+ dev_err(__scm->dev,
+ "No available mechanism for setting download mode\n");
+ }
+
+ if (ret)
+ dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
+}
+
+static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
+{
+ struct device_node *tcsr;
+ struct device_node *np = dev->of_node;
+ struct resource res;
+ u32 offset;
+ int ret;
+
+ tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
+ if (!tcsr)
+ return 0;
+
+ ret = of_address_to_resource(tcsr, 0, &res);
+ of_node_put(tcsr);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
+ if (ret < 0)
+ return ret;
+
+ *addr = res.start + offset;
+
+ return 0;
+}
+
/**
* qcom_scm_is_available() - Checks if SCM is available
*/
@@ -348,6 +428,88 @@ int qcom_scm_set_remote_state(u32 state, u32 id)
}
EXPORT_SYMBOL(qcom_scm_set_remote_state);
+/**
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
+ * @mem_addr: mem region whose ownership need to be reassigned
+ * @mem_sz: size of the region.
+ * @srcvm: vmid for current set of owners, each set bit in
+ * flag indicate a unique owner
+ * @newvm: array having new owners and corrsponding permission
+ * flags
+ * @dest_cnt: number of owners in next set.
+ *
+ * Return negative errno on failure, 0 on success, with @srcvm updated.
+ */
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ unsigned int *srcvm,
+ struct qcom_scm_vmperm *newvm, int dest_cnt)
+{
+ struct qcom_scm_current_perm_info *destvm;
+ struct qcom_scm_mem_map_info *mem_to_map;
+ phys_addr_t mem_to_map_phys;
+ phys_addr_t dest_phys;
+ phys_addr_t ptr_phys;
+ size_t mem_to_map_sz;
+ size_t dest_sz;
+ size_t src_sz;
+ size_t ptr_sz;
+ int next_vm;
+ __le32 *src;
+ void *ptr;
+ int ret;
+ int len;
+ int i;
+
+ src_sz = hweight_long(*srcvm) * sizeof(*src);
+ mem_to_map_sz = sizeof(*mem_to_map);
+ dest_sz = dest_cnt * sizeof(*destvm);
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+ ALIGN(dest_sz, SZ_64);
+
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ /* Fill source vmid detail */
+ src = ptr;
+ len = hweight_long(*srcvm);
+ for (i = 0; i < len; i++) {
+ src[i] = cpu_to_le32(ffs(*srcvm) - 1);
+ *srcvm ^= 1 << (ffs(*srcvm) - 1);
+ }
+
+ /* Fill details of mem buff to map */
+ mem_to_map = ptr + ALIGN(src_sz, SZ_64);
+ mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
+ mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
+ mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
+
+ next_vm = 0;
+ /* Fill details of next vmid detail */
+ destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
+ for (i = 0; i < dest_cnt; i++) {
+ destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
+ destvm[i].perm = cpu_to_le32(newvm[i].perm);
+ destvm[i].ctx = 0;
+ destvm[i].ctx_size = 0;
+ next_vm |= BIT(newvm[i].vmid);
+ }
+
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+ ptr_phys, src_sz, dest_phys, dest_sz);
+ dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
+ if (ret) {
+ dev_err(__scm->dev,
+ "Assign memory protection call failed %d.\n", ret);
+ return -EINVAL;
+ }
+
+ *srcvm = next_vm;
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_assign_mem);
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
@@ -358,6 +520,10 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (!scm)
return -ENOMEM;
+ ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
+ if (ret < 0)
+ return ret;
+
clks = (unsigned long)of_device_get_match_data(&pdev->dev);
if (clks & SCM_HAS_CORE_CLK) {
scm->core_clk = devm_clk_get(&pdev->dev, "core");
@@ -406,9 +572,24 @@ static int qcom_scm_probe(struct platform_device *pdev)
__qcom_scm_init();
+ /*
+ * If requested enable "download mode", from this point on warmboot
+ * will cause the the boot stages to enter download mode, unless
+ * disabled below by a clean shutdown/reboot.
+ */
+ if (download_mode)
+ qcom_scm_set_download_mode(true);
+
return 0;
}
+static void qcom_scm_shutdown(struct platform_device *pdev)
+{
+ /* Clean shutdown, disable download mode to allow normal restart */
+ if (download_mode)
+ qcom_scm_set_download_mode(false);
+}
+
static const struct of_device_id qcom_scm_dt_match[] = {
{ .compatible = "qcom,scm-apq8064",
/* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
@@ -436,6 +617,7 @@ static struct platform_driver qcom_scm_driver = {
.of_match_table = qcom_scm_dt_match,
},
.probe = qcom_scm_probe,
+ .shutdown = qcom_scm_shutdown,
};
static int __init qcom_scm_init(void)
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 9bea691f30fb..dcd7f7917fc7 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -14,9 +14,11 @@
#define QCOM_SCM_SVC_BOOT 0x1
#define QCOM_SCM_BOOT_ADDR 0x1
+#define QCOM_SCM_SET_DLOAD_MODE 0x10
#define QCOM_SCM_BOOT_ADDR_MC 0x11
#define QCOM_SCM_SET_REMOTE_STATE 0xa
extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id);
+extern int __qcom_scm_set_dload_mode(struct device *dev, bool enable);
#define QCOM_SCM_FLAG_HLOS 0x01
#define QCOM_SCM_FLAG_COLDBOOT_MC 0x02
@@ -30,6 +32,12 @@ extern int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
#define QCOM_SCM_CMD_CORE_HOTPLUGGED 0x10
extern void __qcom_scm_cpu_power_down(u32 flags);
+#define QCOM_SCM_SVC_IO 0x5
+#define QCOM_SCM_IO_READ 0x1
+#define QCOM_SCM_IO_WRITE 0x2
+extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val);
+extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val);
+
#define QCOM_SCM_SVC_INFO 0x6
#define QCOM_IS_CALL_AVAIL_CMD 0x1
extern int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
@@ -95,5 +103,10 @@ extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare);
+#define QCOM_MEM_PROT_ASSIGN_ID 0x16
+extern int __qcom_scm_assign_mem(struct device *dev,
+ phys_addr_t mem_region, size_t mem_sz,
+ phys_addr_t src, size_t src_sz,
+ phys_addr_t dest, size_t dest_sz);
#endif
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 0e2011636fbb..5cfe39f7a45f 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -10,9 +10,9 @@
* and select subsets of aarch64), a Device Tree node (on arm), or using
* a kernel module (or command line) parameter with the following syntax:
*
- * [fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>]
+ * [qemu_fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>]
* or
- * [fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>]
+ * [qemu_fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>]
*
* where:
* <size> := size of ioport or mmio range
@@ -21,9 +21,9 @@
* <data_off> := (optional) offset of data register
*
* e.g.:
- * fw_cfg.ioport=2@0x510:0:1 (the default on x86)
+ * qemu_fw_cfg.ioport=2@0x510:0:1 (the default on x86)
* or
- * fw_cfg.mmio=0xA@0x9020000:8:0 (the default on arm)
+ * qemu_fw_cfg.mmio=0xA@0x9020000:8:0 (the default on arm)
*/
#include <linux/module.h>
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
index e34a2f79e1ad..1b826dcca719 100644
--- a/drivers/firmware/tegra/Makefile
+++ b/drivers/firmware/tegra/Makefile
@@ -1,2 +1,4 @@
-obj-$(CONFIG_TEGRA_BPMP) += bpmp.o
+tegra-bpmp-y = bpmp.o
+tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
+obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
new file mode 100644
index 000000000000..f7f6a0a5cb07
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+struct seqbuf {
+ char *buf;
+ size_t pos;
+ size_t size;
+};
+
+static void seqbuf_init(struct seqbuf *seqbuf, void *buf, size_t size)
+{
+ seqbuf->buf = buf;
+ seqbuf->size = size;
+ seqbuf->pos = 0;
+}
+
+static size_t seqbuf_avail(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos < seqbuf->size ? seqbuf->size - seqbuf->pos : 0;
+}
+
+static size_t seqbuf_status(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos <= seqbuf->size ? 0 : -EOVERFLOW;
+}
+
+static int seqbuf_eof(struct seqbuf *seqbuf)
+{
+ return seqbuf->pos >= seqbuf->size;
+}
+
+static int seqbuf_read(struct seqbuf *seqbuf, void *buf, size_t nbyte)
+{
+ nbyte = min(nbyte, seqbuf_avail(seqbuf));
+ memcpy(buf, seqbuf->buf + seqbuf->pos, nbyte);
+ seqbuf->pos += nbyte;
+ return seqbuf_status(seqbuf);
+}
+
+static int seqbuf_read_u32(struct seqbuf *seqbuf, uint32_t *v)
+{
+ int err;
+
+ err = seqbuf_read(seqbuf, v, 4);
+ *v = le32_to_cpu(*v);
+ return err;
+}
+
+static int seqbuf_read_str(struct seqbuf *seqbuf, const char **str)
+{
+ *str = seqbuf->buf + seqbuf->pos;
+ seqbuf->pos += strnlen(*str, seqbuf_avail(seqbuf));
+ seqbuf->pos++;
+ return seqbuf_status(seqbuf);
+}
+
+static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
+{
+ seqbuf->pos += offset;
+}
+
+/* map filename in Linux debugfs to corresponding entry in BPMP */
+static const char *get_filename(struct tegra_bpmp *bpmp,
+ const struct file *file, char *buf, int size)
+{
+ char root_path_buf[512];
+ const char *root_path;
+ const char *filename;
+ size_t root_len;
+
+ root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
+ sizeof(root_path_buf));
+ if (IS_ERR(root_path))
+ return NULL;
+
+ root_len = strlen(root_path);
+
+ filename = dentry_path(file->f_path.dentry, buf, size);
+ if (IS_ERR(filename))
+ return NULL;
+
+ if (strlen(filename) < root_len ||
+ strncmp(filename, root_path, root_len))
+ return NULL;
+
+ filename += root_len;
+
+ return filename;
+}
+
+static int mrq_debugfs_read(struct tegra_bpmp *bpmp,
+ dma_addr_t name, size_t sz_name,
+ dma_addr_t data, size_t sz_data,
+ size_t *nbytes)
+{
+ struct mrq_debugfs_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUGFS_READ),
+ .fop = {
+ .fnameaddr = cpu_to_le32((uint32_t)name),
+ .fnamelen = cpu_to_le32((uint32_t)sz_name),
+ .dataaddr = cpu_to_le32((uint32_t)data),
+ .datalen = cpu_to_le32((uint32_t)sz_data),
+ },
+ };
+ struct mrq_debugfs_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+
+ *nbytes = (size_t)resp.fop.nbytes;
+
+ return 0;
+}
+
+static int mrq_debugfs_write(struct tegra_bpmp *bpmp,
+ dma_addr_t name, size_t sz_name,
+ dma_addr_t data, size_t sz_data)
+{
+ const struct mrq_debugfs_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUGFS_WRITE),
+ .fop = {
+ .fnameaddr = cpu_to_le32((uint32_t)name),
+ .fnamelen = cpu_to_le32((uint32_t)sz_name),
+ .dataaddr = cpu_to_le32((uint32_t)data),
+ .datalen = cpu_to_le32((uint32_t)sz_data),
+ },
+ };
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ };
+
+ return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int mrq_debugfs_dumpdir(struct tegra_bpmp *bpmp, dma_addr_t addr,
+ size_t size, size_t *nbytes)
+{
+ const struct mrq_debugfs_request req = {
+ .cmd = cpu_to_le32(CMD_DEBUGFS_DUMPDIR),
+ .dumpdir = {
+ .dataaddr = cpu_to_le32((uint32_t)addr),
+ .datalen = cpu_to_le32((uint32_t)size),
+ },
+ };
+ struct mrq_debugfs_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_DEBUGFS,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int err;
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+
+ *nbytes = (size_t)resp.dumpdir.nbytes;
+
+ return 0;
+}
+
+static int debugfs_show(struct seq_file *m, void *p)
+{
+ struct file *file = m->private;
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ const size_t datasize = m->size;
+ const size_t namesize = SZ_256;
+ void *datavirt, *namevirt;
+ dma_addr_t dataphys, namephys;
+ char buf[256];
+ const char *filename;
+ size_t len, nbytes;
+ int ret;
+
+ filename = get_filename(bpmp, file, buf, sizeof(buf));
+ if (!filename)
+ return -ENOENT;
+
+ namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!namevirt)
+ return -ENOMEM;
+
+ datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!datavirt) {
+ ret = -ENOMEM;
+ goto free_namebuf;
+ }
+
+ len = strlen(filename);
+ strncpy(namevirt, filename, namesize);
+
+ ret = mrq_debugfs_read(bpmp, namephys, len, dataphys, datasize,
+ &nbytes);
+
+ if (!ret)
+ seq_write(m, datavirt, nbytes);
+
+ dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+ dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+ return ret;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open_size(file, debugfs_show, file, SZ_128K);
+}
+
+static ssize_t debugfs_store(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct inode *inode = file_inode(file);
+ struct tegra_bpmp *bpmp = inode->i_private;
+ const size_t datasize = count;
+ const size_t namesize = SZ_256;
+ void *datavirt, *namevirt;
+ dma_addr_t dataphys, namephys;
+ char fnamebuf[256];
+ const char *filename;
+ size_t len;
+ int ret;
+
+ filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf));
+ if (!filename)
+ return -ENOENT;
+
+ namevirt = dma_alloc_coherent(bpmp->dev, namesize, &namephys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!namevirt)
+ return -ENOMEM;
+
+ datavirt = dma_alloc_coherent(bpmp->dev, datasize, &dataphys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!datavirt) {
+ ret = -ENOMEM;
+ goto free_namebuf;
+ }
+
+ len = strlen(filename);
+ strncpy(namevirt, filename, namesize);
+
+ if (copy_from_user(datavirt, buf, count)) {
+ ret = -EFAULT;
+ goto free_databuf;
+ }
+
+ ret = mrq_debugfs_write(bpmp, namephys, len, dataphys,
+ count);
+
+free_databuf:
+ dma_free_coherent(bpmp->dev, datasize, datavirt, dataphys);
+free_namebuf:
+ dma_free_coherent(bpmp->dev, namesize, namevirt, namephys);
+
+ return ret ?: count;
+}
+
+static const struct file_operations debugfs_fops = {
+ .open = debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = debugfs_store,
+ .release = single_release,
+};
+
+static int bpmp_populate_dir(struct tegra_bpmp *bpmp, struct seqbuf *seqbuf,
+ struct dentry *parent, uint32_t depth)
+{
+ int err;
+ uint32_t d, t;
+ const char *name;
+ struct dentry *dentry;
+
+ while (!seqbuf_eof(seqbuf)) {
+ err = seqbuf_read_u32(seqbuf, &d);
+ if (err < 0)
+ return err;
+
+ if (d < depth) {
+ seqbuf_seek(seqbuf, -4);
+ /* go up a level */
+ return 0;
+ } else if (d != depth) {
+ /* malformed data received from BPMP */
+ return -EIO;
+ }
+
+ err = seqbuf_read_u32(seqbuf, &t);
+ if (err < 0)
+ return err;
+ err = seqbuf_read_str(seqbuf, &name);
+ if (err < 0)
+ return err;
+
+ if (t & DEBUGFS_S_ISDIR) {
+ dentry = debugfs_create_dir(name, parent);
+ if (!dentry)
+ return -ENOMEM;
+ err = bpmp_populate_dir(bpmp, seqbuf, dentry, depth+1);
+ if (err < 0)
+ return err;
+ } else {
+ umode_t mode;
+
+ mode = t & DEBUGFS_S_IRUSR ? S_IRUSR : 0;
+ mode |= t & DEBUGFS_S_IWUSR ? S_IWUSR : 0;
+ dentry = debugfs_create_file(name, mode,
+ parent, bpmp,
+ &debugfs_fops);
+ if (!dentry)
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int create_debugfs_mirror(struct tegra_bpmp *bpmp, void *buf,
+ size_t bufsize, struct dentry *root)
+{
+ struct seqbuf seqbuf;
+ int err;
+
+ bpmp->debugfs_mirror = debugfs_create_dir("debug", root);
+ if (!bpmp->debugfs_mirror)
+ return -ENOMEM;
+
+ seqbuf_init(&seqbuf, buf, bufsize);
+ err = bpmp_populate_dir(bpmp, &seqbuf, bpmp->debugfs_mirror, 0);
+ if (err < 0) {
+ debugfs_remove_recursive(bpmp->debugfs_mirror);
+ bpmp->debugfs_mirror = NULL;
+ }
+
+ return err;
+}
+
+static int mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
+{
+ struct mrq_query_abi_request req = { .mrq = cpu_to_le32(mrq) };
+ struct mrq_query_abi_response resp;
+ struct tegra_bpmp_message msg = {
+ .mrq = MRQ_QUERY_ABI,
+ .tx = {
+ .data = &req,
+ .size = sizeof(req),
+ },
+ .rx = {
+ .data = &resp,
+ .size = sizeof(resp),
+ },
+ };
+ int ret;
+
+ ret = tegra_bpmp_transfer(bpmp, &msg);
+ if (ret < 0) {
+ /* something went wrong; assume not supported */
+ dev_warn(bpmp->dev, "tegra_bpmp_transfer failed (%d)\n", ret);
+ return 0;
+ }
+
+ return resp.status ? 0 : 1;
+}
+
+int tegra_bpmp_init_debugfs(struct tegra_bpmp *bpmp)
+{
+ dma_addr_t phys;
+ void *virt;
+ const size_t sz = SZ_256K;
+ size_t nbytes;
+ int ret;
+ struct dentry *root;
+
+ if (!mrq_is_supported(bpmp, MRQ_DEBUGFS))
+ return 0;
+
+ root = debugfs_create_dir("bpmp", NULL);
+ if (!root)
+ return -ENOMEM;
+
+ virt = dma_alloc_coherent(bpmp->dev, sz, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mrq_debugfs_dumpdir(bpmp, phys, sz, &nbytes);
+ if (ret < 0)
+ goto free;
+
+ ret = create_debugfs_mirror(bpmp, virt, nbytes, root);
+free:
+ dma_free_coherent(bpmp->dev, sz, virt, phys);
+out:
+ if (ret < 0)
+ debugfs_remove(root);
+
+ return ret;
+}
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 73ca55b7b7ec..a7f461f2e650 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -194,16 +194,24 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
}
static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
- void *data, size_t size)
+ void *data, size_t size, int *ret)
{
+ int err;
+
if (data && size > 0)
memcpy(data, channel->ib->data, size);
- return tegra_ivc_read_advance(channel->ivc);
+ err = tegra_ivc_read_advance(channel->ivc);
+ if (err < 0)
+ return err;
+
+ *ret = channel->ib->code;
+
+ return 0;
}
static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
- void *data, size_t size)
+ void *data, size_t size, int *ret)
{
struct tegra_bpmp *bpmp = channel->bpmp;
unsigned long flags;
@@ -217,7 +225,7 @@ static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
}
spin_lock_irqsave(&bpmp->lock, flags);
- err = __tegra_bpmp_channel_read(channel, data, size);
+ err = __tegra_bpmp_channel_read(channel, data, size, ret);
clear_bit(index, bpmp->threaded.allocated);
spin_unlock_irqrestore(&bpmp->lock, flags);
@@ -337,7 +345,8 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
if (err < 0)
return err;
- return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+ return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+ &msg->rx.ret);
}
EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
@@ -371,7 +380,8 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
if (err == 0)
return -ETIMEDOUT;
- return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+ return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
+ &msg->rx.ret);
}
EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
@@ -387,8 +397,8 @@ static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
return NULL;
}
-static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
- int code, const void *data, size_t size)
+void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
+ const void *data, size_t size)
{
unsigned long flags = channel->ib->flags;
struct tegra_bpmp *bpmp = channel->bpmp;
@@ -426,6 +436,7 @@ static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
mbox_client_txdone(bpmp->mbox.channel, 0);
}
}
+EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
unsigned int mrq,
@@ -824,6 +835,10 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
if (err < 0)
goto free_mrq;
+ err = tegra_bpmp_init_debugfs(bpmp);
+ if (err < 0)
+ dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
+
return 0;
free_mrq:
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
index a01461d63f68..00de793e6423 100644
--- a/drivers/firmware/tegra/ivc.c
+++ b/drivers/firmware/tegra/ivc.c
@@ -99,11 +99,11 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
{
/*
* This function performs multiple checks on the same values with
- * security implications, so create snapshots with ACCESS_ONCE() to
+ * security implications, so create snapshots with READ_ONCE() to
* ensure that these checks use the same values.
*/
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* Perform an over-full check to prevent denial of service attacks
@@ -124,8 +124,8 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
struct tegra_ivc_header *header)
{
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* Invalid cases where the counters indicate that the queue is over
@@ -137,8 +137,8 @@ static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
struct tegra_ivc_header *header)
{
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* This function isn't expected to be used in scenarios where an
@@ -151,8 +151,8 @@ static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
{
- ACCESS_ONCE(ivc->tx.channel->tx.count) =
- ACCESS_ONCE(ivc->tx.channel->tx.count) + 1;
+ WRITE_ONCE(ivc->tx.channel->tx.count,
+ READ_ONCE(ivc->tx.channel->tx.count) + 1);
if (ivc->tx.position == ivc->num_frames - 1)
ivc->tx.position = 0;
@@ -162,8 +162,8 @@ static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
{
- ACCESS_ONCE(ivc->rx.channel->rx.count) =
- ACCESS_ONCE(ivc->rx.channel->rx.count) + 1;
+ WRITE_ONCE(ivc->rx.channel->rx.count,
+ READ_ONCE(ivc->rx.channel->rx.count) + 1);
if (ivc->rx.position == ivc->num_frames - 1)
ivc->rx.position = 0;
@@ -428,7 +428,7 @@ int tegra_ivc_notified(struct tegra_ivc *ivc)
/* Copy the receiver's state out of shared memory. */
tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
- state = ACCESS_ONCE(ivc->rx.channel->tx.state);
+ state = READ_ONCE(ivc->rx.channel->tx.state);
if (state == TEGRA_IVC_STATE_SYNC) {
offset = offsetof(struct tegra_ivc_header, tx.count);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 00cfed3c3e1a..23b12d99ddfe 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -439,7 +439,7 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout)) {
- dev_err(dev, "Mbox timedout in resp(caller: %pF)\n",
+ dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
}
diff --git a/drivers/fmc/Makefile b/drivers/fmc/Makefile
index e809322e1bac..e3da6192cf39 100644
--- a/drivers/fmc/Makefile
+++ b/drivers/fmc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FMC) += fmc.o
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index e09895f0525b..f98dcf1d89e1 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the fpga framework and fpga manager drivers.
#
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index e359930bebc8..0d7743089414 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -79,7 +79,7 @@ static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge)
return !status;
}
-static struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
+static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
.enable_set = xlnx_pr_decoupler_enable_set,
.enable_show = xlnx_pr_decoupler_enable_show,
};
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 4ea63d9bd131..e318bf8c623c 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -185,7 +185,7 @@ static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
return 0;
}
-int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
+static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
{
struct fsi_master *master = slave->master;
uint32_t irq, stat;
@@ -215,8 +215,8 @@ int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
static int fsi_slave_set_smode(struct fsi_master *master, int link, int id);
-int fsi_slave_handle_error(struct fsi_slave *slave, bool write, uint32_t addr,
- size_t size)
+static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
+ uint32_t addr, size_t size)
{
struct fsi_master *master = slave->master;
int rc, link;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3f80f167ed56..d6a8e851ad13 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,7 +139,7 @@ config GPIO_BRCMSTB
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
depends on OF_GPIO && (ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST)
select GPIO_GENERIC
- select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN
help
Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs.
@@ -286,8 +286,7 @@ config GPIO_LYNXPOINT
Requires ACPI device enumeration code to set up a platform device.
config GPIO_MB86S7X
- bool "GPIO support for Fujitsu MB86S7x Platforms"
- depends on ARCH_MB86S7X || COMPILE_TEST
+ tristate "GPIO support for Fujitsu MB86S7x Platforms"
help
Say yes here to support the GPIO controller in Fujitsu MB86S70 SoCs.
@@ -442,6 +441,15 @@ config GPIO_TEGRA
help
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
+config GPIO_TEGRA186
+ tristate "NVIDIA Tegra186 GPIO support"
+ default ARCH_TEGRA_186_SOC
+ depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
+ depends on OF_GPIO
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support GPIO pins on NVIDIA Tegra186 SoCs.
+
config GPIO_TS4800
tristate "TS-4800 DIO blocks and compatibles"
depends on OF_GPIO
@@ -475,6 +483,14 @@ config GPIO_TZ1090_PDC
help
Say yes here to support Toumaz Xenif TZ1090 PDC GPIOs.
+config GPIO_UNIPHIER
+ tristate "UniPhier GPIO support"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF_GPIO
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to support UniPhier GPIOs.
+
config GPIO_VF610
def_bool y
depends on ARCH_MXC && SOC_VF610
@@ -818,15 +834,6 @@ config GPIO_PCF857X
This driver provides an in-kernel interface to those GPIOs using
platform-neutral GPIO calls.
-config GPIO_SX150X
- bool "Semtech SX150x I2C GPIO expander (deprecated)"
- depends on PINCTRL && I2C=y
- select PINCTRL_SX150X
- default n
- help
- Say yes here to provide support for Semtech SX150x-series I2C
- GPIO expanders. The GPIO driver was replaced by a Pinctrl version.
-
config GPIO_TPIC2810
tristate "TPIC2810 8-Bit I2C GPO expander"
help
@@ -1256,6 +1263,16 @@ config GPIO_74X164
shift registers. This driver can be used to provide access
to more gpio outputs.
+config GPIO_MAX3191X
+ tristate "Maxim MAX3191x industrial serializer"
+ select CRC8
+ help
+ GPIO driver for Maxim MAX31910, MAX31911, MAX31912, MAX31913,
+ MAX31953 and MAX31963 industrial serializer, a daisy-chainable
+ chip to make 8 digital 24V inputs available via SPI. Supports
+ CRC checksums to guard against electromagnetic interference,
+ as well as undervoltage and overtemperature detection.
+
config GPIO_MAX7301
tristate "Maxim MAX7301 GPIO expander"
select GPIO_MAX730X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index aeb70e9de6f2..4bc24febb889 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# generic gpio support: platform drivers, dedicated expander chips, etc
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
@@ -69,6 +70,7 @@ obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
+obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
@@ -113,6 +115,7 @@ obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_GPIO_TEGRA) += gpio-tegra.o
+obj-$(CONFIG_GPIO_TEGRA186) += gpio-tegra186.o
obj-$(CONFIG_GPIO_THUNDERX) += gpio-thunderx.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
@@ -131,6 +134,7 @@ obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_TZ1090) += gpio-tz1090.o
obj-$(CONFIG_GPIO_TZ1090_PDC) += gpio-tz1090-pdc.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 598e209efa2d..bab3b94c5cbc 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -326,7 +326,7 @@ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
unsigned long gpio;
for_each_set_bit(gpio, &irq_mask, 2)
- generic_handle_irq(irq_find_mapping(chip->irqdomain,
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
19 + gpio*24));
raw_spin_lock(&dio48egpio->lock);
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 51f046e29ff7..add859d59766 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -209,7 +209,7 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
for_each_set_bit(bit_num, &irq_mask, 8) {
gpio = bit_num + boundary * 8;
- generic_handle_irq(irq_find_mapping(chip->irqdomain,
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
gpio));
}
}
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index ec2ce34ff473..2f16638a0589 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -199,7 +199,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
int gpio;
for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
- generic_handle_irq(irq_find_mapping(chip->irqdomain, gpio));
+ generic_handle_irq(irq_find_mapping(chip->irq.domain, gpio));
raw_spin_lock(&idio16gpio->lock);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 89863ea25de1..44c09904daa6 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -192,28 +192,20 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
mutex_lock(&adnp->i2c_lock);
err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- return;
- }
+ if (err < 0)
+ goto unlock;
err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- return;
- }
+ if (err < 0)
+ goto unlock;
err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- return;
- }
+ if (err < 0)
+ goto unlock;
err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- return;
- }
+ if (err < 0)
+ goto unlock;
mutex_unlock(&adnp->i2c_lock);
@@ -240,6 +232,11 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
direction, level, interrupt, pending);
}
}
+
+ return;
+
+unlock:
+ mutex_unlock(&adnp->i2c_lock);
}
static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios)
@@ -323,7 +320,7 @@ static irqreturn_t adnp_irq(int irq, void *data)
for_each_set_bit(bit, &pending, 8) {
unsigned int child_irq;
- child_irq = irq_find_mapping(adnp->gpio.irqdomain,
+ child_irq = irq_find_mapping(adnp->gpio.irq.domain,
base + bit);
handle_nested_irq(child_irq);
}
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index ccc02ed65b3c..8e76d390e653 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -211,7 +211,7 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
mm_gc = &altera_gc->mmchip;
- irqdomain = altera_gc->mmchip.gc.irqdomain;
+ irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
@@ -239,7 +239,7 @@ static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
altera_gc = gpiochip_get_data(irq_desc_get_handler_data(desc));
chip = irq_desc_get_chip(desc);
mm_gc = &altera_gc->mmchip;
- irqdomain = altera_gc->mmchip.gc.irqdomain;
+ irqdomain = altera_gc->mmchip.gc.irq.domain;
chained_irq_enter(chip, desc);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index bfc53995064a..6b3ca6601af2 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -411,13 +411,16 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_BOTH:
type2 |= bit;
+ /* fall through */
case IRQ_TYPE_EDGE_RISING:
type0 |= bit;
+ /* fall through */
case IRQ_TYPE_EDGE_FALLING:
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
type0 |= bit;
+ /* fall through */
case IRQ_TYPE_LEVEL_LOW:
type1 |= bit;
handler = handle_level_irq;
@@ -466,7 +469,7 @@ static void aspeed_gpio_irq_handler(struct irq_desc *desc)
reg = ioread32(bank_irq_reg(data, bank, GPIO_IRQ_STATUS));
for_each_set_bit(p, &reg, 32) {
- girq = irq_find_mapping(gc->irqdomain, i * 32 + p);
+ girq = irq_find_mapping(gc->irq.domain, i * 32 + p);
generic_handle_irq(girq);
}
@@ -498,7 +501,7 @@ static void set_irq_valid_mask(struct aspeed_gpio *gpio)
if (i >= gpio->config->nr_gpios)
break;
- clear_bit(i, gpio->chip.irq_valid_mask);
+ clear_bit(i, gpio->chip.irq.valid_mask);
}
props++;
@@ -536,12 +539,12 @@ static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
if (!have_gpio(gpiochip_get_data(chip), offset))
return -ENODEV;
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_gpio_request(chip->base + offset);
}
static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
}
static inline void __iomem *bank_debounce_reg(struct aspeed_gpio *gpio,
@@ -853,7 +856,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
- gpio->chip.irq_need_valid_mask = true;
+ gpio->chip.irq.need_valid_mask = true;
rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
if (rc < 0)
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index f33d4a5fe671..5fad89dfab7e 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -132,6 +132,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
case IRQ_TYPE_LEVEL_HIGH:
polarity |= mask;
+ /* fall through */
case IRQ_TYPE_LEVEL_LOW:
type |= mask;
break;
@@ -208,7 +209,7 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
if (pending) {
for_each_set_bit(irq, &pending, gc->ngpio)
generic_handle_irq(
- irq_linear_revmap(gc->irqdomain, irq));
+ irq_linear_revmap(gc->irq.domain, irq));
}
chained_irq_exit(irqchip, desc);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index dd0308cc8bb0..545d43a587b7 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Broadcom Corporation
+ * Copyright (C) 2015-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -19,17 +19,30 @@
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
-#include <linux/reboot.h>
-
-#define GIO_BANK_SIZE 0x20
-#define GIO_ODEN(bank) (((bank) * GIO_BANK_SIZE) + 0x00)
-#define GIO_DATA(bank) (((bank) * GIO_BANK_SIZE) + 0x04)
-#define GIO_IODIR(bank) (((bank) * GIO_BANK_SIZE) + 0x08)
-#define GIO_EC(bank) (((bank) * GIO_BANK_SIZE) + 0x0c)
-#define GIO_EI(bank) (((bank) * GIO_BANK_SIZE) + 0x10)
-#define GIO_MASK(bank) (((bank) * GIO_BANK_SIZE) + 0x14)
-#define GIO_LEVEL(bank) (((bank) * GIO_BANK_SIZE) + 0x18)
-#define GIO_STAT(bank) (((bank) * GIO_BANK_SIZE) + 0x1c)
+#include <linux/bitops.h>
+
+enum gio_reg_index {
+ GIO_REG_ODEN = 0,
+ GIO_REG_DATA,
+ GIO_REG_IODIR,
+ GIO_REG_EC,
+ GIO_REG_EI,
+ GIO_REG_MASK,
+ GIO_REG_LEVEL,
+ GIO_REG_STAT,
+ NUMBER_OF_GIO_REGISTERS
+};
+
+#define GIO_BANK_SIZE (NUMBER_OF_GIO_REGISTERS * sizeof(u32))
+#define GIO_BANK_OFF(bank, off) (((bank) * GIO_BANK_SIZE) + (off * sizeof(u32)))
+#define GIO_ODEN(bank) GIO_BANK_OFF(bank, GIO_REG_ODEN)
+#define GIO_DATA(bank) GIO_BANK_OFF(bank, GIO_REG_DATA)
+#define GIO_IODIR(bank) GIO_BANK_OFF(bank, GIO_REG_IODIR)
+#define GIO_EC(bank) GIO_BANK_OFF(bank, GIO_REG_EC)
+#define GIO_EI(bank) GIO_BANK_OFF(bank, GIO_REG_EI)
+#define GIO_MASK(bank) GIO_BANK_OFF(bank, GIO_REG_MASK)
+#define GIO_LEVEL(bank) GIO_BANK_OFF(bank, GIO_REG_LEVEL)
+#define GIO_STAT(bank) GIO_BANK_OFF(bank, GIO_REG_STAT)
struct brcmstb_gpio_bank {
struct list_head node;
@@ -37,21 +50,23 @@ struct brcmstb_gpio_bank {
struct gpio_chip gc;
struct brcmstb_gpio_priv *parent_priv;
u32 width;
- struct irq_chip irq_chip;
+ u32 wake_active;
+ u32 saved_regs[GIO_REG_STAT]; /* Don't save and restore GIO_REG_STAT */
};
struct brcmstb_gpio_priv {
struct list_head bank_list;
void __iomem *reg_base;
struct platform_device *pdev;
+ struct irq_domain *irq_domain;
+ struct irq_chip irq_chip;
int parent_irq;
int gpio_base;
- bool can_wake;
+ int num_gpios;
int parent_wake_irq;
- struct notifier_block reboot_notifier;
};
-#define MAX_GPIO_PER_BANK 32
+#define MAX_GPIO_PER_BANK 32
#define GPIO_BANK(gpio) ((gpio) >> 5)
/* assumes MAX_GPIO_PER_BANK is a multiple of 2 */
#define GPIO_BIT(gpio) ((gpio) & (MAX_GPIO_PER_BANK - 1))
@@ -63,12 +78,40 @@ brcmstb_gpio_gc_to_priv(struct gpio_chip *gc)
return bank->parent_priv;
}
+static unsigned long
+__brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
+{
+ void __iomem *reg_base = bank->parent_priv->reg_base;
+
+ return bank->gc.read_reg(reg_base + GIO_STAT(bank->id)) &
+ bank->gc.read_reg(reg_base + GIO_MASK(bank->id));
+}
+
+static unsigned long
+brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
+{
+ unsigned long status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
+ status = __brcmstb_gpio_get_active_irqs(bank);
+ spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
+
+ return status;
+}
+
+static int brcmstb_gpio_hwirq_to_offset(irq_hw_number_t hwirq,
+ struct brcmstb_gpio_bank *bank)
+{
+ return hwirq - (bank->gc.base - bank->parent_priv->gpio_base);
+}
+
static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
- unsigned int offset, bool enable)
+ unsigned int hwirq, bool enable)
{
struct gpio_chip *gc = &bank->gc;
struct brcmstb_gpio_priv *priv = bank->parent_priv;
- u32 mask = gc->pin2mask(gc, offset);
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(hwirq, bank));
u32 imask;
unsigned long flags;
@@ -82,6 +125,17 @@ static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
+static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+ /* gc_offset is relative to this gpio_chip; want real offset */
+ int hwirq = offset + (gc->base - priv->gpio_base);
+
+ if (hwirq >= priv->num_gpios)
+ return -ENXIO;
+ return irq_create_mapping(priv->irq_domain, hwirq);
+}
+
/* -------------------- IRQ chip functions -------------------- */
static void brcmstb_gpio_irq_mask(struct irq_data *d)
@@ -100,12 +154,22 @@ static void brcmstb_gpio_irq_unmask(struct irq_data *d)
brcmstb_gpio_set_imask(bank, d->hwirq, true);
}
+static void brcmstb_gpio_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct brcmstb_gpio_bank *bank = gpiochip_get_data(gc);
+ struct brcmstb_gpio_priv *priv = bank->parent_priv;
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
+
+ gc->write_reg(priv->reg_base + GIO_STAT(bank->id), mask);
+}
+
static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct brcmstb_gpio_bank *bank = gpiochip_get_data(gc);
struct brcmstb_gpio_priv *priv = bank->parent_priv;
- u32 mask = BIT(d->hwirq);
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
u32 edge_insensitive, iedge_insensitive;
u32 edge_config, iedge_config;
u32 level, ilevel;
@@ -113,13 +177,13 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
switch (type) {
case IRQ_TYPE_LEVEL_LOW:
- level = 0;
+ level = mask;
edge_config = 0;
edge_insensitive = 0;
break;
case IRQ_TYPE_LEVEL_HIGH:
level = mask;
- edge_config = 0;
+ edge_config = mask;
edge_insensitive = 0;
break;
case IRQ_TYPE_EDGE_FALLING:
@@ -166,11 +230,6 @@ static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
{
int ret = 0;
- /*
- * Only enable wake IRQ once for however many hwirqs can wake
- * since they all use the same wake IRQ. Mask will be set
- * up appropriately thanks to IRQCHIP_MASK_ON_SUSPEND flag.
- */
if (enable)
ret = enable_irq_wake(priv->parent_wake_irq);
else
@@ -184,7 +243,18 @@ static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
static int brcmstb_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+ struct brcmstb_gpio_bank *bank = gpiochip_get_data(gc);
+ struct brcmstb_gpio_priv *priv = bank->parent_priv;
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
+
+ /*
+ * Do not do anything specific for now, suspend/resume callbacks will
+ * configure the interrupt mask appropriately
+ */
+ if (enable)
+ bank->wake_active |= mask;
+ else
+ bank->wake_active &= ~mask;
return brcmstb_gpio_priv_set_wake(priv, enable);
}
@@ -195,43 +265,36 @@ static irqreturn_t brcmstb_gpio_wake_irq_handler(int irq, void *data)
if (!priv || irq != priv->parent_wake_irq)
return IRQ_NONE;
- pm_wakeup_event(&priv->pdev->dev, 0);
+
+ /* Nothing to do */
return IRQ_HANDLED;
}
static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
{
struct brcmstb_gpio_priv *priv = bank->parent_priv;
- struct irq_domain *irq_domain = bank->gc.irqdomain;
- void __iomem *reg_base = priv->reg_base;
+ struct irq_domain *domain = priv->irq_domain;
+ int hwbase = bank->gc.base - priv->gpio_base;
unsigned long status;
- unsigned long flags;
- spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
- while ((status = bank->gc.read_reg(reg_base + GIO_STAT(bank->id)) &
- bank->gc.read_reg(reg_base + GIO_MASK(bank->id)))) {
- int bit;
-
- for_each_set_bit(bit, &status, 32) {
- u32 stat = bank->gc.read_reg(reg_base +
- GIO_STAT(bank->id));
- if (bit >= bank->width)
+ while ((status = brcmstb_gpio_get_active_irqs(bank))) {
+ unsigned int irq, offset;
+
+ for_each_set_bit(offset, &status, 32) {
+ if (offset >= bank->width)
dev_warn(&priv->pdev->dev,
"IRQ for invalid GPIO (bank=%d, offset=%d)\n",
- bank->id, bit);
- bank->gc.write_reg(reg_base + GIO_STAT(bank->id),
- stat | BIT(bit));
- generic_handle_irq(irq_find_mapping(irq_domain, bit));
+ bank->id, offset);
+ irq = irq_linear_revmap(domain, hwbase + offset);
+ generic_handle_irq(irq);
}
}
- spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
}
/* Each UPG GIO block has one IRQ for all banks */
static void brcmstb_gpio_irq_handler(struct irq_desc *desc)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+ struct brcmstb_gpio_priv *priv = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct brcmstb_gpio_bank *bank;
@@ -244,19 +307,63 @@ static void brcmstb_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int brcmstb_gpio_reboot(struct notifier_block *nb,
- unsigned long action, void *data)
+static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
+ struct brcmstb_gpio_priv *priv, irq_hw_number_t hwirq)
{
- struct brcmstb_gpio_priv *priv =
- container_of(nb, struct brcmstb_gpio_priv, reboot_notifier);
+ struct brcmstb_gpio_bank *bank;
+ int i = 0;
- /* Enable GPIO for S5 cold boot */
- if (action == SYS_POWER_OFF)
- brcmstb_gpio_priv_set_wake(priv, 1);
+ /* banks are in descending order */
+ list_for_each_entry_reverse(bank, &priv->bank_list, node) {
+ i += bank->gc.ngpio;
+ if (hwirq < i)
+ return bank;
+ }
+ return NULL;
+}
+
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key brcmstb_gpio_irq_lock_class;
- return NOTIFY_DONE;
+
+static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct brcmstb_gpio_priv *priv = d->host_data;
+ struct brcmstb_gpio_bank *bank =
+ brcmstb_gpio_hwirq_to_bank(priv, hwirq);
+ struct platform_device *pdev = priv->pdev;
+ int ret;
+
+ if (!bank)
+ return -EINVAL;
+
+ dev_dbg(&pdev->dev, "Mapping irq %d for gpio line %d (bank %d)\n",
+ irq, (int)hwirq, bank->id);
+ ret = irq_set_chip_data(irq, &bank->gc);
+ if (ret < 0)
+ return ret;
+ irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class);
+ irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+ return 0;
}
+static void brcmstb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops brcmstb_gpio_irq_domain_ops = {
+ .map = brcmstb_gpio_irq_map,
+ .unmap = brcmstb_gpio_irq_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
/* Make sure that the number of banks matches up between properties */
static int brcmstb_gpio_sanity_check_banks(struct device *dev,
struct device_node *np, struct resource *res)
@@ -278,13 +385,25 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
{
struct brcmstb_gpio_priv *priv = platform_get_drvdata(pdev);
struct brcmstb_gpio_bank *bank;
- int ret = 0;
+ int offset, ret = 0, virq;
if (!priv) {
dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
return -EFAULT;
}
+ if (priv->parent_irq > 0)
+ irq_set_chained_handler_and_data(priv->parent_irq, NULL, NULL);
+
+ /* Remove all IRQ mappings and delete the domain */
+ if (priv->irq_domain) {
+ for (offset = 0; offset < priv->num_gpios; offset++) {
+ virq = irq_find_mapping(priv->irq_domain, offset);
+ irq_dispose_mapping(virq);
+ }
+ irq_domain_remove(priv->irq_domain);
+ }
+
/*
* You can lose return values below, but we report all errors, and it's
* more important to actually perform all of the steps.
@@ -292,12 +411,6 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
list_for_each_entry(bank, &priv->bank_list, node)
gpiochip_remove(&bank->gc);
- if (priv->reboot_notifier.notifier_call) {
- ret = unregister_reboot_notifier(&priv->reboot_notifier);
- if (ret)
- dev_err(&pdev->dev,
- "failed to unregister reboot notifier\n");
- }
return ret;
}
@@ -332,66 +445,163 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
return offset;
}
-/* Before calling, must have bank->parent_irq set and gpiochip registered */
+/* priv->parent_irq and priv->num_gpios must be set before calling */
static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
- struct brcmstb_gpio_bank *bank)
+ struct brcmstb_gpio_priv *priv)
{
- struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int err;
- bank->irq_chip.name = dev_name(dev);
- bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
- bank->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
- bank->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
-
- /* Ensures that all non-wakeup IRQs are disabled at suspend */
- bank->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+ priv->irq_domain =
+ irq_domain_add_linear(np, priv->num_gpios,
+ &brcmstb_gpio_irq_domain_ops,
+ priv);
+ if (!priv->irq_domain) {
+ dev_err(dev, "Couldn't allocate IRQ domain\n");
+ return -ENXIO;
+ }
- if (IS_ENABLED(CONFIG_PM_SLEEP) && !priv->can_wake &&
- of_property_read_bool(np, "wakeup-source")) {
+ if (of_property_read_bool(np, "wakeup-source")) {
priv->parent_wake_irq = platform_get_irq(pdev, 1);
if (priv->parent_wake_irq < 0) {
+ priv->parent_wake_irq = 0;
dev_warn(dev,
"Couldn't get wake IRQ - GPIOs will not be able to wake from sleep");
} else {
/*
- * Set wakeup capability before requesting wakeup
- * interrupt, so we can process boot-time "wakeups"
- * (e.g., from S5 cold boot)
+ * Set wakeup capability so we can process boot-time
+ * "wakeups" (e.g., from S5 cold boot)
*/
device_set_wakeup_capable(dev, true);
device_wakeup_enable(dev);
err = devm_request_irq(dev, priv->parent_wake_irq,
- brcmstb_gpio_wake_irq_handler, 0,
- "brcmstb-gpio-wake", priv);
+ brcmstb_gpio_wake_irq_handler,
+ IRQF_SHARED,
+ "brcmstb-gpio-wake", priv);
if (err < 0) {
dev_err(dev, "Couldn't request wake IRQ");
- return err;
+ goto out_free_domain;
}
-
- priv->reboot_notifier.notifier_call =
- brcmstb_gpio_reboot;
- register_reboot_notifier(&priv->reboot_notifier);
- priv->can_wake = true;
}
}
- if (priv->can_wake)
- bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
+ priv->irq_chip.name = dev_name(dev);
+ priv->irq_chip.irq_disable = brcmstb_gpio_irq_mask;
+ priv->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
+ priv->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
+ priv->irq_chip.irq_ack = brcmstb_gpio_irq_ack;
+ priv->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
+
+ if (priv->parent_wake_irq)
+ priv->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
+
+ irq_set_chained_handler_and_data(priv->parent_irq,
+ brcmstb_gpio_irq_handler, priv);
+ irq_set_status_flags(priv->parent_irq, IRQ_DISABLE_UNLAZY);
+
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(priv->irq_domain);
+
+ return err;
+}
+
+static void brcmstb_gpio_bank_save(struct brcmstb_gpio_priv *priv,
+ struct brcmstb_gpio_bank *bank)
+{
+ struct gpio_chip *gc = &bank->gc;
+ unsigned int i;
+
+ for (i = 0; i < GIO_REG_STAT; i++)
+ bank->saved_regs[i] = gc->read_reg(priv->reg_base +
+ GIO_BANK_OFF(bank->id, i));
+}
+
+static void brcmstb_gpio_quiesce(struct device *dev, bool save)
+{
+ struct brcmstb_gpio_priv *priv = dev_get_drvdata(dev);
+ struct brcmstb_gpio_bank *bank;
+ struct gpio_chip *gc;
+ u32 imask;
+
+ /* disable non-wake interrupt */
+ if (priv->parent_irq >= 0)
+ disable_irq(priv->parent_irq);
+
+ list_for_each_entry(bank, &priv->bank_list, node) {
+ gc = &bank->gc;
+
+ if (save)
+ brcmstb_gpio_bank_save(priv, bank);
+
+ /* Unmask GPIOs which have been flagged as wake-up sources */
+ if (priv->parent_wake_irq)
+ imask = bank->wake_active;
+ else
+ imask = 0;
+ gc->write_reg(priv->reg_base + GIO_MASK(bank->id),
+ imask);
+ }
+}
+
+static void brcmstb_gpio_shutdown(struct platform_device *pdev)
+{
+ /* Enable GPIO for S5 cold boot */
+ brcmstb_gpio_quiesce(&pdev->dev, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void brcmstb_gpio_bank_restore(struct brcmstb_gpio_priv *priv,
+ struct brcmstb_gpio_bank *bank)
+{
+ struct gpio_chip *gc = &bank->gc;
+ unsigned int i;
+
+ for (i = 0; i < GIO_REG_STAT; i++)
+ gc->write_reg(priv->reg_base + GIO_BANK_OFF(bank->id, i),
+ bank->saved_regs[i]);
+}
+
+static int brcmstb_gpio_suspend(struct device *dev)
+{
+ brcmstb_gpio_quiesce(dev, true);
+ return 0;
+}
+
+static int brcmstb_gpio_resume(struct device *dev)
+{
+ struct brcmstb_gpio_priv *priv = dev_get_drvdata(dev);
+ struct brcmstb_gpio_bank *bank;
+ bool need_wakeup_event = false;
+
+ list_for_each_entry(bank, &priv->bank_list, node) {
+ need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
+ brcmstb_gpio_bank_restore(priv, bank);
+ }
+
+ if (priv->parent_wake_irq && need_wakeup_event)
+ pm_wakeup_event(dev, 0);
- err = gpiochip_irqchip_add(&bank->gc, &bank->irq_chip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
- if (err)
- return err;
- gpiochip_set_chained_irqchip(&bank->gc, &bank->irq_chip,
- priv->parent_irq, brcmstb_gpio_irq_handler);
+ /* enable non-wake interrupt */
+ if (priv->parent_irq >= 0)
+ enable_irq(priv->parent_irq);
return 0;
}
+#else
+#define brcmstb_gpio_suspend NULL
+#define brcmstb_gpio_resume NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops brcmstb_gpio_pm_ops = {
+ .suspend_noirq = brcmstb_gpio_suspend,
+ .resume_noirq = brcmstb_gpio_resume,
+};
+
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -406,6 +616,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
int err;
static int gpio_base;
unsigned long flags = 0;
+ bool need_wakeup_event = false;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -485,16 +696,23 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
gc->of_node = np;
gc->owner = THIS_MODULE;
gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", dev->of_node);
+ if (!gc->label) {
+ err = -ENOMEM;
+ goto fail;
+ }
gc->base = gpio_base;
gc->of_gpio_n_cells = 2;
gc->of_xlate = brcmstb_gpio_of_xlate;
/* not all ngpio lines are valid, will use bank width later */
gc->ngpio = MAX_GPIO_PER_BANK;
+ if (priv->parent_irq > 0)
+ gc->to_irq = brcmstb_gpio_to_irq;
/*
* Mask all interrupts by default, since wakeup interrupts may
* be retained from S5 cold boot
*/
+ need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
gc->write_reg(reg_base + GIO_MASK(bank->id), 0);
err = gpiochip_add_data(gc, bank);
@@ -505,12 +723,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
}
gpio_base += gc->ngpio;
- if (priv->parent_irq > 0) {
- err = brcmstb_gpio_irq_setup(pdev, bank);
- if (err)
- goto fail;
- }
-
dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id,
gc->base, gc->ngpio, bank->width);
@@ -520,9 +732,19 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
num_banks++;
}
+ priv->num_gpios = gpio_base - priv->gpio_base;
+ if (priv->parent_irq > 0) {
+ err = brcmstb_gpio_irq_setup(pdev, priv);
+ if (err)
+ goto fail;
+ }
+
dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
num_banks, priv->gpio_base, gpio_base - 1);
+ if (priv->parent_wake_irq && need_wakeup_event)
+ pm_wakeup_event(dev, 0);
+
return 0;
fail:
@@ -541,9 +763,11 @@ static struct platform_driver brcmstb_gpio_driver = {
.driver = {
.name = "brcmstb-gpio",
.of_match_table = brcmstb_gpio_of_match,
+ .pm = &brcmstb_gpio_pm_ops,
},
.probe = brcmstb_gpio_probe,
.remove = brcmstb_gpio_remove,
+ .shutdown = brcmstb_gpio_shutdown,
};
module_platform_driver(brcmstb_gpio_driver);
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index e60156ec0c18..b6f0f729656c 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -295,7 +295,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
if (pending & BIT(gpio)) {
- virq = irq_find_mapping(cg->chip.irqdomain, gpio);
+ virq = irq_find_mapping(cg->chip.irq.domain, gpio);
handle_nested_irq(virq);
}
}
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index aecb847166f5..1dada68b9a27 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -420,7 +420,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
return;
}
- irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
+ irq = irq_find_mapping(dln2->gpio.irq.domain, pin);
if (!irq) {
dev_err(dln2->gpio.parent, "pin %d not mapped to IRQ\n", pin);
return;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index c07ada9c7af6..6730c6642ce3 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -25,6 +25,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/platform_data/gpio-dwapb.h>
#include <linux/slab.h>
@@ -77,6 +78,7 @@ struct dwapb_context {
u32 int_type;
u32 int_pol;
u32 int_deb;
+ u32 wake_en;
};
#endif
@@ -97,6 +99,7 @@ struct dwapb_gpio {
unsigned int nr_ports;
struct irq_domain *domain;
unsigned int flags;
+ struct reset_control *rst;
};
static inline u32 gpio_reg_v2_convert(unsigned int offset)
@@ -295,13 +298,29 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
+{
+ struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d);
+ struct dwapb_gpio *gpio = igc->private;
+ struct dwapb_context *ctx = gpio->ports[0].ctx;
+
+ if (enable)
+ ctx->wake_en |= BIT(d->hwirq);
+ else
+ ctx->wake_en &= ~BIT(d->hwirq);
+
+ return 0;
+}
+#endif
+
static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
unsigned offset, unsigned debounce)
{
struct dwapb_gpio_port *port = gpiochip_get_data(gc);
struct dwapb_gpio *gpio = port->gpio;
unsigned long flags, val_deb;
- unsigned long mask = gc->pin2mask(gc, offset);
+ unsigned long mask = BIT(offset);
spin_lock_irqsave(&gc->bgpio_lock, flags);
@@ -385,6 +404,9 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
ct->chip.irq_disable = dwapb_irq_disable;
ct->chip.irq_request_resources = dwapb_irq_reqres;
ct->chip.irq_release_resources = dwapb_irq_relres;
+#ifdef CONFIG_PM_SLEEP
+ ct->chip.irq_set_wake = dwapb_irq_set_wake;
+#endif
ct->regs.ack = gpio_reg_convert(gpio, GPIO_PORTA_EOI);
ct->regs.mask = gpio_reg_convert(gpio, GPIO_INTMASK);
ct->type = IRQ_TYPE_LEVEL_MASK;
@@ -460,7 +482,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
(pp->idx * GPIO_SWPORT_DDR_SIZE);
err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
- NULL, false);
+ NULL, 0);
if (err) {
dev_err(gpio->dev, "failed to init gpio chip for port%d\n",
port->idx);
@@ -609,6 +631,12 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
gpio->dev = &pdev->dev;
gpio->nr_ports = pdata->nports;
+ gpio->rst = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(gpio->rst))
+ return PTR_ERR(gpio->rst);
+
+ reset_control_deassert(gpio->rst);
+
gpio->ports = devm_kcalloc(&pdev->dev, gpio->nr_ports,
sizeof(*gpio->ports), GFP_KERNEL);
if (!gpio->ports)
@@ -660,6 +688,7 @@ static int dwapb_gpio_remove(struct platform_device *pdev)
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
+ reset_control_assert(gpio->rst);
return 0;
}
@@ -699,7 +728,8 @@ static int dwapb_gpio_suspend(struct device *dev)
ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
/* Mask out interrupts */
- dwapb_write(gpio, GPIO_INTMASK, 0xffffffff);
+ dwapb_write(gpio, GPIO_INTMASK,
+ 0xffffffff & ~ctx->wake_en);
}
}
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 8d32ccc980d9..b86e09e1b13b 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -239,12 +239,12 @@ static int em_gio_to_irq(struct gpio_chip *chip, unsigned offset)
static int em_gio_request(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_gpio_request(chip->base + offset);
}
static void em_gio_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
/* Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c
index 14c6aac26780..94db1bf4bfdb 100644
--- a/drivers/gpio/gpio-etraxfs.c
+++ b/drivers/gpio/gpio-etraxfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio/driver.h>
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index e9386f8b67f5..7b3394fdc624 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Faraday Technolog FTGPIO010 gpiochip and interrupt routines
* Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
@@ -149,7 +150,7 @@ static void ftgpio_gpio_irq_handler(struct irq_desc *desc)
stat = readl(g->base + GPIO_INT_STAT);
if (stat)
for_each_set_bit(offset, &stat, gc->ngpio)
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ generic_handle_irq(irq_find_mapping(gc->irq.domain,
offset));
chained_irq_exit(irqchip, desc);
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 6544a16ab02e..e2fc561f4315 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/bitops.h>
#define GRGPIO_MAX_NGPIO 32
@@ -96,12 +97,11 @@ static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
int val)
{
struct gpio_chip *gc = &priv->gc;
- unsigned long mask = gc->pin2mask(gc, offset);
if (val)
- priv->imask |= mask;
+ priv->imask |= BIT(offset);
else
- priv->imask &= ~mask;
+ priv->imask &= ~BIT(offset);
gc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
}
diff --git a/drivers/gpio/gpio-ingenic.c b/drivers/gpio/gpio-ingenic.c
index 254780730b95..15fb2bc796a8 100644
--- a/drivers/gpio/gpio-ingenic.c
+++ b/drivers/gpio/gpio-ingenic.c
@@ -242,7 +242,7 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
flag = gpio_ingenic_read_reg(jzgc, JZ4740_GPIO_FLAG);
for_each_set_bit(i, &flag, 32)
- generic_handle_irq(irq_linear_revmap(gc->irqdomain, i));
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, i));
chained_irq_exit(irq_chip, desc);
}
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index b76ecee82c3f..629575ea46a0 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -295,7 +295,7 @@ static void intel_mid_irq_handler(struct irq_desc *desc)
mask = BIT(gpio);
/* Clear before handling so we can't lose an edge */
writel(mask, gedr);
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ generic_handle_irq(irq_find_mapping(gc->irq.domain,
base + gpio));
}
}
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index 72b64039241a..fca84ccac35c 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
+#include <linux/bitops.h>
/* Loongson 1 GPIO Register Definitions */
#define GPIO_CFG 0x0
@@ -22,11 +23,10 @@ static void __iomem *gpio_reg_base;
static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
- unsigned long pinmask = gc->pin2mask(gc, offset);
unsigned long flags;
spin_lock_irqsave(&gc->bgpio_lock, flags);
- __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) | pinmask,
+ __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) | BIT(offset),
gpio_reg_base + GPIO_CFG);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -35,11 +35,10 @@ static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
- unsigned long pinmask = gc->pin2mask(gc, offset);
unsigned long flags;
spin_lock_irqsave(&gc->bgpio_lock, flags);
- __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) & ~pinmask,
+ __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) & ~BIT(offset),
gpio_reg_base + GPIO_CFG);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index fbd393b46ce0..1e557b10d73e 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -255,7 +255,7 @@ static void lp_gpio_irq_handler(struct irq_desc *desc)
mask = BIT(pin);
/* Clear before handling so we don't lose an edge */
outl(mask, reg);
- irq = irq_find_mapping(lg->chip.irqdomain, base + pin);
+ irq = irq_find_mapping(lg->chip.irq.domain, base + pin);
generic_handle_irq(irq);
}
}
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
new file mode 100644
index 000000000000..f74b1072e84b
--- /dev/null
+++ b/drivers/gpio/gpio-max3191x.c
@@ -0,0 +1,492 @@
+/*
+ * gpio-max3191x.c - GPIO driver for Maxim MAX3191x industrial serializer
+ *
+ * Copyright (C) 2017 KUNBUS GmbH
+ *
+ * The MAX3191x makes 8 digital 24V inputs available via SPI.
+ * Multiple chips can be daisy-chained, the spec does not impose
+ * a limit on the number of chips and neither does this driver.
+ *
+ * Either of two modes is selectable: In 8-bit mode, only the state
+ * of the inputs is clocked out to achieve high readout speeds;
+ * In 16-bit mode, an additional status byte is clocked out with
+ * a CRC and indicator bits for undervoltage and overtemperature.
+ * The driver returns an error instead of potentially bogus data
+ * if any of these fault conditions occur. However it does allow
+ * readout of non-faulting chips in the same daisy-chain.
+ *
+ * MAX3191x supports four debounce settings and the driver is
+ * capable of configuring these differently for each chip in the
+ * daisy-chain.
+ *
+ * If the chips are hardwired to 8-bit mode ("modesel" pulled high),
+ * gpio-pisosr.c can be used alternatively to this driver.
+ *
+ * https://datasheets.maximintegrated.com/en/ds/MAX31910.pdf
+ * https://datasheets.maximintegrated.com/en/ds/MAX31911.pdf
+ * https://datasheets.maximintegrated.com/en/ds/MAX31912.pdf
+ * https://datasheets.maximintegrated.com/en/ds/MAX31913.pdf
+ * https://datasheets.maximintegrated.com/en/ds/MAX31953-MAX31963.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/crc8.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+enum max3191x_mode {
+ STATUS_BYTE_ENABLED,
+ STATUS_BYTE_DISABLED,
+};
+
+/**
+ * struct max3191x_chip - max3191x daisy-chain
+ * @gpio: GPIO controller struct
+ * @lock: protects read sequences
+ * @nchips: number of chips in the daisy-chain
+ * @mode: current mode, 0 for 16-bit, 1 for 8-bit;
+ * for simplicity, all chips in the daisy-chain are assumed
+ * to use the same mode
+ * @modesel_pins: GPIO pins to configure modesel of each chip
+ * @fault_pins: GPIO pins to detect fault of each chip
+ * @db0_pins: GPIO pins to configure debounce of each chip
+ * @db1_pins: GPIO pins to configure debounce of each chip
+ * @mesg: SPI message to perform a readout
+ * @xfer: SPI transfer used by @mesg
+ * @crc_error: bitmap signaling CRC error for each chip
+ * @overtemp: bitmap signaling overtemperature alarm for each chip
+ * @undervolt1: bitmap signaling undervoltage alarm for each chip
+ * @undervolt2: bitmap signaling undervoltage warning for each chip
+ * @fault: bitmap signaling assertion of @fault_pins for each chip
+ * @ignore_uv: whether to ignore undervoltage alarms;
+ * set by a device property if the chips are powered through
+ * 5VOUT instead of VCC24V, in which case they will constantly
+ * signal undervoltage;
+ * for simplicity, all chips in the daisy-chain are assumed
+ * to be powered the same way
+ */
+struct max3191x_chip {
+ struct gpio_chip gpio;
+ struct mutex lock;
+ u32 nchips;
+ enum max3191x_mode mode;
+ struct gpio_descs *modesel_pins;
+ struct gpio_descs *fault_pins;
+ struct gpio_descs *db0_pins;
+ struct gpio_descs *db1_pins;
+ struct spi_message mesg;
+ struct spi_transfer xfer;
+ unsigned long *crc_error;
+ unsigned long *overtemp;
+ unsigned long *undervolt1;
+ unsigned long *undervolt2;
+ unsigned long *fault;
+ bool ignore_uv;
+};
+
+#define MAX3191X_NGPIO 8
+#define MAX3191X_CRC8_POLYNOMIAL 0xa8 /* (x^5) + x^4 + x^2 + x^0 */
+
+DECLARE_CRC8_TABLE(max3191x_crc8);
+
+static int max3191x_get_direction(struct gpio_chip *gpio, unsigned int offset)
+{
+ return 1; /* always in */
+}
+
+static int max3191x_direction_input(struct gpio_chip *gpio, unsigned int offset)
+{
+ return 0;
+}
+
+static int max3191x_direction_output(struct gpio_chip *gpio,
+ unsigned int offset, int value)
+{
+ return -EINVAL;
+}
+
+static void max3191x_set(struct gpio_chip *gpio, unsigned int offset, int value)
+{ }
+
+static void max3191x_set_multiple(struct gpio_chip *gpio, unsigned long *mask,
+ unsigned long *bits)
+{ }
+
+static unsigned int max3191x_wordlen(struct max3191x_chip *max3191x)
+{
+ return max3191x->mode == STATUS_BYTE_ENABLED ? 2 : 1;
+}
+
+static int max3191x_readout_locked(struct max3191x_chip *max3191x)
+{
+ struct device *dev = max3191x->gpio.parent;
+ struct spi_device *spi = to_spi_device(dev);
+ int val, i, ot = 0, uv1 = 0;
+
+ val = spi_sync(spi, &max3191x->mesg);
+ if (val) {
+ dev_err_ratelimited(dev, "SPI receive error %d\n", val);
+ return val;
+ }
+
+ for (i = 0; i < max3191x->nchips; i++) {
+ if (max3191x->mode == STATUS_BYTE_ENABLED) {
+ u8 in = ((u8 *)max3191x->xfer.rx_buf)[i * 2];
+ u8 status = ((u8 *)max3191x->xfer.rx_buf)[i * 2 + 1];
+
+ val = (status & 0xf8) != crc8(max3191x_crc8, &in, 1, 0);
+ __assign_bit(i, max3191x->crc_error, val);
+ if (val)
+ dev_err_ratelimited(dev,
+ "chip %d: CRC error\n", i);
+
+ ot = (status >> 1) & 1;
+ __assign_bit(i, max3191x->overtemp, ot);
+ if (ot)
+ dev_err_ratelimited(dev,
+ "chip %d: overtemperature\n", i);
+
+ if (!max3191x->ignore_uv) {
+ uv1 = !((status >> 2) & 1);
+ __assign_bit(i, max3191x->undervolt1, uv1);
+ if (uv1)
+ dev_err_ratelimited(dev,
+ "chip %d: undervoltage\n", i);
+
+ val = !(status & 1);
+ __assign_bit(i, max3191x->undervolt2, val);
+ if (val && !uv1)
+ dev_warn_ratelimited(dev,
+ "chip %d: voltage warn\n", i);
+ }
+ }
+
+ if (max3191x->fault_pins && !max3191x->ignore_uv) {
+ /* fault pin shared by all chips or per chip */
+ struct gpio_desc *fault_pin =
+ (max3191x->fault_pins->ndescs == 1)
+ ? max3191x->fault_pins->desc[0]
+ : max3191x->fault_pins->desc[i];
+
+ val = gpiod_get_value_cansleep(fault_pin);
+ if (val < 0) {
+ dev_err_ratelimited(dev,
+ "GPIO read error %d\n", val);
+ return val;
+ }
+ __assign_bit(i, max3191x->fault, val);
+ if (val && !uv1 && !ot)
+ dev_err_ratelimited(dev,
+ "chip %d: fault\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static bool max3191x_chip_is_faulting(struct max3191x_chip *max3191x,
+ unsigned int chipnum)
+{
+ /* without status byte the only diagnostic is the fault pin */
+ if (!max3191x->ignore_uv && test_bit(chipnum, max3191x->fault))
+ return true;
+
+ if (max3191x->mode == STATUS_BYTE_DISABLED)
+ return false;
+
+ return test_bit(chipnum, max3191x->crc_error) ||
+ test_bit(chipnum, max3191x->overtemp) ||
+ (!max3191x->ignore_uv &&
+ test_bit(chipnum, max3191x->undervolt1));
+}
+
+static int max3191x_get(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct max3191x_chip *max3191x = gpiochip_get_data(gpio);
+ int ret, chipnum, wordlen = max3191x_wordlen(max3191x);
+ u8 in;
+
+ mutex_lock(&max3191x->lock);
+ ret = max3191x_readout_locked(max3191x);
+ if (ret)
+ goto out_unlock;
+
+ chipnum = offset / MAX3191X_NGPIO;
+ if (max3191x_chip_is_faulting(max3191x, chipnum)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ in = ((u8 *)max3191x->xfer.rx_buf)[chipnum * wordlen];
+ ret = (in >> (offset % MAX3191X_NGPIO)) & 1;
+
+out_unlock:
+ mutex_unlock(&max3191x->lock);
+ return ret;
+}
+
+static int max3191x_get_multiple(struct gpio_chip *gpio, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct max3191x_chip *max3191x = gpiochip_get_data(gpio);
+ int ret, bit = 0, wordlen = max3191x_wordlen(max3191x);
+
+ mutex_lock(&max3191x->lock);
+ ret = max3191x_readout_locked(max3191x);
+ if (ret)
+ goto out_unlock;
+
+ while ((bit = find_next_bit(mask, gpio->ngpio, bit)) != gpio->ngpio) {
+ unsigned int chipnum = bit / MAX3191X_NGPIO;
+ unsigned long in, shift, index;
+
+ if (max3191x_chip_is_faulting(max3191x, chipnum)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ in = ((u8 *)max3191x->xfer.rx_buf)[chipnum * wordlen];
+ shift = round_down(bit % BITS_PER_LONG, MAX3191X_NGPIO);
+ index = bit / BITS_PER_LONG;
+ bits[index] &= ~(mask[index] & (0xff << shift));
+ bits[index] |= mask[index] & (in << shift); /* copy bits */
+
+ bit = (chipnum + 1) * MAX3191X_NGPIO; /* go to next chip */
+ }
+
+out_unlock:
+ mutex_unlock(&max3191x->lock);
+ return ret;
+}
+
+static int max3191x_set_config(struct gpio_chip *gpio, unsigned int offset,
+ unsigned long config)
+{
+ struct max3191x_chip *max3191x = gpiochip_get_data(gpio);
+ u32 debounce, chipnum, db0_val, db1_val;
+
+ if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ return -ENOTSUPP;
+
+ if (!max3191x->db0_pins || !max3191x->db1_pins)
+ return -EINVAL;
+
+ debounce = pinconf_to_config_argument(config);
+ switch (debounce) {
+ case 0:
+ db0_val = 0;
+ db1_val = 0;
+ break;
+ case 1 ... 25:
+ db0_val = 0;
+ db1_val = 1;
+ break;
+ case 26 ... 750:
+ db0_val = 1;
+ db1_val = 0;
+ break;
+ case 751 ... 3000:
+ db0_val = 1;
+ db1_val = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (max3191x->db0_pins->ndescs == 1)
+ chipnum = 0; /* all chips use the same pair of debounce pins */
+ else
+ chipnum = offset / MAX3191X_NGPIO; /* per chip debounce pins */
+
+ mutex_lock(&max3191x->lock);
+ gpiod_set_value_cansleep(max3191x->db0_pins->desc[chipnum], db0_val);
+ gpiod_set_value_cansleep(max3191x->db1_pins->desc[chipnum], db1_val);
+ mutex_unlock(&max3191x->lock);
+ return 0;
+}
+
+static void gpiod_set_array_single_value_cansleep(unsigned int ndescs,
+ struct gpio_desc **desc,
+ int value)
+{
+ int i, values[ndescs];
+
+ for (i = 0; i < ndescs; i++)
+ values[i] = value;
+
+ gpiod_set_array_value_cansleep(ndescs, desc, values);
+}
+
+static struct gpio_descs *devm_gpiod_get_array_optional_count(
+ struct device *dev, const char *con_id,
+ enum gpiod_flags flags, unsigned int expected)
+{
+ struct gpio_descs *descs;
+ int found = gpiod_count(dev, con_id);
+
+ if (found == -ENOENT)
+ return NULL;
+
+ if (found != expected && found != 1) {
+ dev_err(dev, "ignoring %s-gpios: found %d, expected %u or 1\n",
+ con_id, found, expected);
+ return NULL;
+ }
+
+ descs = devm_gpiod_get_array_optional(dev, con_id, flags);
+
+ if (IS_ERR(descs)) {
+ dev_err(dev, "failed to get %s-gpios: %ld\n",
+ con_id, PTR_ERR(descs));
+ return NULL;
+ }
+
+ return descs;
+}
+
+static int max3191x_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct max3191x_chip *max3191x;
+ int n, ret;
+
+ max3191x = devm_kzalloc(dev, sizeof(*max3191x), GFP_KERNEL);
+ if (!max3191x)
+ return -ENOMEM;
+ spi_set_drvdata(spi, max3191x);
+
+ max3191x->nchips = 1;
+ device_property_read_u32(dev, "#daisy-chained-devices",
+ &max3191x->nchips);
+
+ n = BITS_TO_LONGS(max3191x->nchips);
+ max3191x->crc_error = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->undervolt1 = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->undervolt2 = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->overtemp = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->fault = devm_kcalloc(dev, n, sizeof(long), GFP_KERNEL);
+ max3191x->xfer.rx_buf = devm_kcalloc(dev, max3191x->nchips,
+ 2, GFP_KERNEL);
+ if (!max3191x->crc_error || !max3191x->undervolt1 ||
+ !max3191x->overtemp || !max3191x->undervolt2 ||
+ !max3191x->fault || !max3191x->xfer.rx_buf)
+ return -ENOMEM;
+
+ max3191x->modesel_pins = devm_gpiod_get_array_optional_count(dev,
+ "maxim,modesel", GPIOD_ASIS, max3191x->nchips);
+ max3191x->fault_pins = devm_gpiod_get_array_optional_count(dev,
+ "maxim,fault", GPIOD_IN, max3191x->nchips);
+ max3191x->db0_pins = devm_gpiod_get_array_optional_count(dev,
+ "maxim,db0", GPIOD_OUT_LOW, max3191x->nchips);
+ max3191x->db1_pins = devm_gpiod_get_array_optional_count(dev,
+ "maxim,db1", GPIOD_OUT_LOW, max3191x->nchips);
+
+ max3191x->mode = device_property_read_bool(dev, "maxim,modesel-8bit")
+ ? STATUS_BYTE_DISABLED : STATUS_BYTE_ENABLED;
+ if (max3191x->modesel_pins)
+ gpiod_set_array_single_value_cansleep(
+ max3191x->modesel_pins->ndescs,
+ max3191x->modesel_pins->desc, max3191x->mode);
+
+ max3191x->ignore_uv = device_property_read_bool(dev,
+ "maxim,ignore-undervoltage");
+
+ if (max3191x->db0_pins && max3191x->db1_pins &&
+ max3191x->db0_pins->ndescs != max3191x->db1_pins->ndescs) {
+ dev_err(dev, "ignoring maxim,db*-gpios: array len mismatch\n");
+ devm_gpiod_put_array(dev, max3191x->db0_pins);
+ devm_gpiod_put_array(dev, max3191x->db1_pins);
+ max3191x->db0_pins = NULL;
+ max3191x->db1_pins = NULL;
+ }
+
+ max3191x->xfer.len = max3191x->nchips * max3191x_wordlen(max3191x);
+ spi_message_init_with_transfers(&max3191x->mesg, &max3191x->xfer, 1);
+
+ max3191x->gpio.label = spi->modalias;
+ max3191x->gpio.owner = THIS_MODULE;
+ max3191x->gpio.parent = dev;
+ max3191x->gpio.base = -1;
+ max3191x->gpio.ngpio = max3191x->nchips * MAX3191X_NGPIO;
+ max3191x->gpio.can_sleep = true;
+
+ max3191x->gpio.get_direction = max3191x_get_direction;
+ max3191x->gpio.direction_input = max3191x_direction_input;
+ max3191x->gpio.direction_output = max3191x_direction_output;
+ max3191x->gpio.set = max3191x_set;
+ max3191x->gpio.set_multiple = max3191x_set_multiple;
+ max3191x->gpio.get = max3191x_get;
+ max3191x->gpio.get_multiple = max3191x_get_multiple;
+ max3191x->gpio.set_config = max3191x_set_config;
+
+ mutex_init(&max3191x->lock);
+
+ ret = gpiochip_add_data(&max3191x->gpio, max3191x);
+ if (ret) {
+ mutex_destroy(&max3191x->lock);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max3191x_remove(struct spi_device *spi)
+{
+ struct max3191x_chip *max3191x = spi_get_drvdata(spi);
+
+ gpiochip_remove(&max3191x->gpio);
+ mutex_destroy(&max3191x->lock);
+
+ return 0;
+}
+
+static int __init max3191x_register_driver(struct spi_driver *sdrv)
+{
+ crc8_populate_msb(max3191x_crc8, MAX3191X_CRC8_POLYNOMIAL);
+ return spi_register_driver(sdrv);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id max3191x_of_id[] = {
+ { .compatible = "maxim,max31910" },
+ { .compatible = "maxim,max31911" },
+ { .compatible = "maxim,max31912" },
+ { .compatible = "maxim,max31913" },
+ { .compatible = "maxim,max31953" },
+ { .compatible = "maxim,max31963" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max3191x_of_id);
+#endif
+
+static const struct spi_device_id max3191x_spi_id[] = {
+ { "max31910" },
+ { "max31911" },
+ { "max31912" },
+ { "max31913" },
+ { "max31953" },
+ { "max31963" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, max3191x_spi_id);
+
+static struct spi_driver max3191x_driver = {
+ .driver = {
+ .name = "max3191x",
+ .of_match_table = of_match_ptr(max3191x_of_id),
+ },
+ .probe = max3191x_probe,
+ .remove = max3191x_remove,
+ .id_table = max3191x_spi_id,
+};
+module_driver(max3191x_driver, max3191x_register_driver, spi_unregister_driver);
+
+MODULE_AUTHOR("Lukas Wunner <lukas@wunner.de>");
+MODULE_DESCRIPTION("GPIO driver for Maxim MAX3191x industrial serializer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 7f4d26ce5f23..c04fae1ba32a 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -486,7 +486,7 @@ static irqreturn_t max732x_irq_handler(int irq, void *devid)
do {
level = __ffs(pending);
- handle_nested_irq(irq_find_mapping(chip->gpio_chip.irqdomain,
+ handle_nested_irq(irq_find_mapping(chip->gpio_chip.irq.domain,
level));
pending &= ~(1 << level);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 94d772677ed6..3134c0d2bfe4 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -17,6 +17,7 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -52,11 +53,6 @@ static int mb86s70_gpio_request(struct gpio_chip *gc, unsigned gpio)
spin_lock_irqsave(&gchip->lock, flags);
val = readl(gchip->base + PFR(gpio));
- if (!(val & OFFSET(gpio))) {
- spin_unlock_irqrestore(&gchip->lock, flags);
- return -EINVAL;
- }
-
val &= ~OFFSET(gpio);
writel(val, gchip->base + PFR(gpio));
@@ -209,6 +205,7 @@ static const struct of_device_id mb86s70_gpio_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-gpio" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mb86s70_gpio_dt_ids);
static struct platform_driver mb86s70_gpio_driver = {
.driver = {
@@ -218,5 +215,8 @@ static struct platform_driver mb86s70_gpio_driver = {
.probe = mb86s70_gpio_probe,
.remove = mb86s70_gpio_remove,
};
+module_platform_driver(mb86s70_gpio_driver);
-builtin_platform_driver(mb86s70_gpio_driver);
+MODULE_DESCRIPTION("MB86S7x GPIO Driver");
+MODULE_ALIAS("platform:mb86s70-gpio");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index ec8560298805..dd67a31ac337 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -357,7 +357,7 @@ static void mrfld_irq_handler(struct irq_desc *desc)
for_each_set_bit(gpio, &pending, 32) {
unsigned int irq;
- irq = irq_find_mapping(gc->irqdomain, base + gpio);
+ irq = irq_find_mapping(gc->irq.domain, base + gpio);
generic_handle_irq(irq);
}
}
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index f7da40e46c55..f9042bcc27a4 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -126,20 +126,16 @@ static unsigned long bgpio_read32be(void __iomem *reg)
return ioread32be(reg);
}
-static unsigned long bgpio_pin2mask(struct gpio_chip *gc, unsigned int pin)
+static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line)
{
- return BIT(pin);
-}
-
-static unsigned long bgpio_pin2mask_be(struct gpio_chip *gc,
- unsigned int pin)
-{
- return BIT(gc->bgpio_bits - 1 - pin);
+ if (gc->be_bits)
+ return BIT(gc->bgpio_bits - 1 - line);
+ return BIT(line);
}
static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
{
- unsigned long pinmask = gc->pin2mask(gc, gpio);
+ unsigned long pinmask = bgpio_line2mask(gc, gpio);
if (gc->bgpio_dir & pinmask)
return !!(gc->read_reg(gc->reg_set) & pinmask);
@@ -147,9 +143,76 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
return !!(gc->read_reg(gc->reg_dat) & pinmask);
}
+/*
+ * This assumes that the bits in the GPIO register are in native endianness.
+ * We only assign the function pointer if we have that.
+ */
+static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long get_mask = 0;
+ unsigned long set_mask = 0;
+ int bit = 0;
+
+ while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio) {
+ if (gc->bgpio_dir & BIT(bit))
+ set_mask |= BIT(bit);
+ else
+ get_mask |= BIT(bit);
+ }
+
+ if (set_mask)
+ *bits |= gc->read_reg(gc->reg_set) & set_mask;
+ if (get_mask)
+ *bits |= gc->read_reg(gc->reg_dat) & get_mask;
+
+ return 0;
+}
+
static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- return !!(gc->read_reg(gc->reg_dat) & gc->pin2mask(gc, gpio));
+ return !!(gc->read_reg(gc->reg_dat) & bgpio_line2mask(gc, gpio));
+}
+
+/*
+ * This only works if the bits in the GPIO register are in native endianness.
+ * It is dirt simple and fast in this case. (Also the most common case.)
+ */
+static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+
+ *bits = gc->read_reg(gc->reg_dat) & *mask;
+ return 0;
+}
+
+/*
+ * With big endian mirrored bit order it becomes more tedious.
+ */
+static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ unsigned long readmask = 0;
+ unsigned long val;
+ int bit;
+
+ /* Create a mirrored mask */
+ bit = 0;
+ while ((bit = find_next_bit(mask, gc->ngpio, bit)) != gc->ngpio)
+ readmask |= bgpio_line2mask(gc, bit);
+
+ /* Read the register */
+ val = gc->read_reg(gc->reg_dat) & readmask;
+
+ /*
+ * Mirror the result into the "bits" result, this will give line 0
+ * in bit 0 ... line 31 in bit 31 for a 32bit register.
+ */
+ bit = 0;
+ while ((bit = find_next_bit(&val, gc->ngpio, bit)) != gc->ngpio)
+ *bits |= bgpio_line2mask(gc, bit);
+
+ return 0;
}
static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -158,7 +221,7 @@ static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = gc->pin2mask(gc, gpio);
+ unsigned long mask = bgpio_line2mask(gc, gpio);
unsigned long flags;
spin_lock_irqsave(&gc->bgpio_lock, flags);
@@ -176,7 +239,7 @@ static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- unsigned long mask = gc->pin2mask(gc, gpio);
+ unsigned long mask = bgpio_line2mask(gc, gpio);
if (val)
gc->write_reg(gc->reg_set, mask);
@@ -186,7 +249,7 @@ static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = gc->pin2mask(gc, gpio);
+ unsigned long mask = bgpio_line2mask(gc, gpio);
unsigned long flags;
spin_lock_irqsave(&gc->bgpio_lock, flags);
@@ -216,9 +279,9 @@ static void bgpio_multiple_get_masks(struct gpio_chip *gc,
break;
if (__test_and_clear_bit(i, mask)) {
if (test_bit(i, bits))
- *set_mask |= gc->pin2mask(gc, i);
+ *set_mask |= bgpio_line2mask(gc, i);
else
- *clear_mask |= gc->pin2mask(gc, i);
+ *clear_mask |= bgpio_line2mask(gc, i);
}
}
}
@@ -294,7 +357,7 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir &= ~gc->pin2mask(gc, gpio);
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -305,7 +368,7 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
/* Return 0 if output, 1 of input */
- return !(gc->read_reg(gc->reg_dir) & gc->pin2mask(gc, gpio));
+ return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -316,7 +379,7 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir |= gc->pin2mask(gc, gpio);
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -330,7 +393,7 @@ static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir |= gc->pin2mask(gc, gpio);
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -346,7 +409,7 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- gc->bgpio_dir &= ~gc->pin2mask(gc, gpio);
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
gc->write_reg(gc->reg_dir, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -357,12 +420,11 @@ static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
{
/* Return 0 if output, 1 if input */
- return !!(gc->read_reg(gc->reg_dir) & gc->pin2mask(gc, gpio));
+ return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
}
static int bgpio_setup_accessors(struct device *dev,
struct gpio_chip *gc,
- bool bit_be,
bool byte_be)
{
@@ -406,8 +468,6 @@ static int bgpio_setup_accessors(struct device *dev,
return -EINVAL;
}
- gc->pin2mask = bit_be ? bgpio_pin2mask_be : bgpio_pin2mask;
-
return 0;
}
@@ -462,10 +522,24 @@ static int bgpio_setup_io(struct gpio_chip *gc,
}
if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
- (flags & BGPIOF_READ_OUTPUT_REG_SET))
+ (flags & BGPIOF_READ_OUTPUT_REG_SET)) {
gc->get = bgpio_get_set;
- else
+ if (!gc->be_bits)
+ gc->get_multiple = bgpio_get_set_multiple;
+ /*
+ * We deliberately avoid assigning the ->get_multiple() call
+ * for big endian mirrored registers which are ALSO reflecting
+ * their value in the set register when used as output. It is
+ * simply too much complexity, let the GPIO core fall back to
+ * reading each line individually in that fringe case.
+ */
+ } else {
gc->get = bgpio_get;
+ if (gc->be_bits)
+ gc->get_multiple = bgpio_get_multiple_be;
+ else
+ gc->get_multiple = bgpio_get_multiple;
+ }
return 0;
}
@@ -526,13 +600,13 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
gc->base = -1;
gc->ngpio = gc->bgpio_bits;
gc->request = bgpio_request;
+ gc->be_bits = !!(flags & BGPIOF_BIG_ENDIAN);
ret = bgpio_setup_io(gc, dat, set, clr, flags);
if (ret)
return ret;
- ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN,
- flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 8c93dec498fa..c8673a5d9412 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
#define MPC8XXX_GPIO_PINS 32
@@ -44,6 +45,16 @@ struct mpc8xxx_gpio_chip {
unsigned int irqn;
};
+/*
+ * This hardware has a big endian bit assignment such that GPIO line 0 is
+ * connected to bit 31, line 1 to bit 30 ... line 31 to bit 0.
+ * This inline helper give the right bitmask for a certain line.
+ */
+static inline u32 mpc_pin2mask(unsigned int offset)
+{
+ return BIT(31 - offset);
+}
+
/* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs
* defined as output cannot be determined by reading GPDAT register,
* so we use shadow data register instead. The status of input pins
@@ -59,7 +70,7 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
val = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
out_shadow = gc->bgpio_data & out_mask;
- return !!((val | out_shadow) & gc->pin2mask(gc, gpio));
+ return !!((val | out_shadow) & mpc_pin2mask(gpio));
}
static int mpc5121_gpio_dir_out(struct gpio_chip *gc,
@@ -120,7 +131,7 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
- | gc->pin2mask(gc, irqd_to_hwirq(d)));
+ | mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -135,7 +146,7 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
- & ~(gc->pin2mask(gc, irqd_to_hwirq(d))));
+ & ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -146,7 +157,7 @@ static void mpc8xxx_irq_ack(struct irq_data *d)
struct gpio_chip *gc = &mpc8xxx_gc->gc;
gc->write_reg(mpc8xxx_gc->regs + GPIO_IER,
- gc->pin2mask(gc, irqd_to_hwirq(d)));
+ mpc_pin2mask(irqd_to_hwirq(d)));
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
@@ -160,7 +171,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
- | gc->pin2mask(gc, irqd_to_hwirq(d)));
+ | mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -168,7 +179,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
- & ~(gc->pin2mask(gc, irqd_to_hwirq(d))));
+ & ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 3233b72b6828..e136d666f1e5 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -737,7 +737,7 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
- generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
+ generic_handle_irq(irq_find_mapping(bank->chip.irq.domain,
bit));
raw_spin_unlock_irqrestore(&bank->wa_lock,
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 1b9dbf691ae7..babb7bd2ba59 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -608,7 +608,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
for (i = 0; i < NBANK(chip); i++) {
while (pending[i]) {
level = __ffs(pending[i]);
- handle_nested_irq(irq_find_mapping(chip->gpio_chip.irqdomain,
+ handle_nested_irq(irq_find_mapping(chip->gpio_chip.irq.domain,
level + (BANK_SZ * i)));
pending[i] &= ~(1 << level);
nhandled++;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index a4fd78b9c0e4..38fbb420c6cd 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -196,7 +196,7 @@ static irqreturn_t pcf857x_irq(int irq, void *data)
mutex_unlock(&gpio->lock);
for_each_set_bit(i, &change, gpio->chip.ngpio)
- handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i));
+ handle_nested_irq(irq_find_mapping(gpio->chip.irq.domain, i));
return IRQ_HANDLED;
}
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 7de4f6a2cb49..57d1b7fbf07b 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -240,7 +240,7 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
- generic_handle_irq(irq_find_mapping(chip->irqdomain, gpio));
+ generic_handle_irq(irq_find_mapping(chip->irq.domain, gpio));
raw_spin_lock(&idio16gpio->lock);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 6aaaab79c205..b70974cb9ef1 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -221,7 +221,7 @@ static void pl061_irq_handler(struct irq_desc *desc)
pending = readb(pl061->base + GPIOMIS);
if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ generic_handle_irq(irq_find_mapping(gc->irq.domain,
offset));
}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 6029899789f3..f480fb896963 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -330,16 +330,6 @@ static int pxa_gpio_of_xlate(struct gpio_chip *gc,
}
#endif
-static int pxa_gpio_request(struct gpio_chip *chip, unsigned int offset)
-{
- return pinctrl_request_gpio(chip->base + offset);
-}
-
-static void pxa_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
- pinctrl_free_gpio(chip->base + offset);
-}
-
static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
struct device_node *np, void __iomem *regbase)
{
@@ -358,8 +348,8 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
- pchip->chip.request = pxa_gpio_request;
- pchip->chip.free = pxa_gpio_free;
+ pchip->chip.request = gpiochip_generic_request;
+ pchip->chip.free = gpiochip_generic_free;
#ifdef CONFIG_OF_GPIO
pchip->chip.of_node = np;
pchip->chip.of_xlate = pxa_gpio_of_xlate;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 1f0871553fd2..e76de57dd617 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -206,7 +207,7 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
gpio_rcar_read(p, INTMSK))) {
offset = __ffs(pending);
gpio_rcar_write(p, INTCLR, BIT(offset));
- generic_handle_irq(irq_find_mapping(p->gpio_chip.irqdomain,
+ generic_handle_irq(irq_find_mapping(p->gpio_chip.irq.domain,
offset));
irqs_handled++;
}
@@ -249,7 +250,7 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
if (error < 0)
return error;
- error = pinctrl_request_gpio(chip->base + offset);
+ error = pinctrl_gpio_request(chip->base + offset);
if (error)
pm_runtime_put(&p->pdev->dev);
@@ -260,7 +261,7 @@ static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
/*
* Set the GPIO as an input to ensure that the next GPIO request won't
@@ -393,16 +394,11 @@ MODULE_DEVICE_TABLE(of, gpio_rcar_of_table);
static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
{
struct device_node *np = p->pdev->dev.of_node;
- const struct of_device_id *match;
const struct gpio_rcar_info *info;
struct of_phandle_args args;
int ret;
- match = of_match_node(gpio_rcar_of_table, np);
- if (!match)
- return -EINVAL;
-
- info = match->data;
+ info = of_device_get_match_data(&p->pdev->dev);
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
@@ -456,19 +452,17 @@ static int gpio_rcar_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-
- if (!io || !irq) {
- dev_err(dev, "missing IRQ or IOMEM\n");
+ if (!irq) {
+ dev_err(dev, "missing IRQ\n");
ret = -EINVAL;
goto err0;
}
- p->base = devm_ioremap_nocache(dev, io->start, resource_size(io));
- if (!p->base) {
- dev_err(dev, "failed to remap I/O memory\n");
- ret = -ENXIO;
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->base = devm_ioremap_resource(dev, io);
+ if (IS_ERR(p->base)) {
+ ret = PTR_ERR(p->base);
goto err0;
}
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index e85903eddc68..23e771dba4c1 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -103,8 +103,8 @@ static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
struct gpio_reg *r = to_gpio_reg(gc);
int irq = r->irqs[offset];
- if (irq >= 0 && r->irqdomain)
- irq = irq_find_mapping(r->irqdomain, irq);
+ if (irq >= 0 && r->irq.domain)
+ irq = irq_find_mapping(r->irq.domain, irq);
return irq;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 16cbc5702865..e6e5cca624a7 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -299,7 +299,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (ret < 0)
return;
edge_det = !!(ret & mask);
-
+ /* fall through */
case STMPE1801:
rise_reg = stmpe->regs[STMPE_IDX_GPRER_LSB + bank];
fall_reg = stmpe->regs[STMPE_IDX_GPFER_LSB + bank];
@@ -312,7 +312,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (ret < 0)
return;
fall = !!(ret & mask);
-
+ /* fall through */
case STMPE801:
case STMPE1600:
irqen_reg = stmpe->regs[STMPE_IDX_IEGPIOR_LSB + bank];
@@ -397,7 +397,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
while (stat) {
int bit = __ffs(stat);
int line = bank * 8 + bit;
- int child_irq = irq_find_mapping(stmpe_gpio->chip.irqdomain,
+ int child_irq = irq_find_mapping(stmpe_gpio->chip.irq.domain,
line);
handle_nested_irq(child_irq);
@@ -451,7 +451,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
of_property_read_u32(np, "st,norequest-mask",
&stmpe_gpio->norequest_mask);
if (stmpe_gpio->norequest_mask)
- stmpe_gpio->chip.irq_need_valid_mask = true;
+ stmpe_gpio->chip.irq.need_valid_mask = true;
if (irq < 0)
dev_info(&pdev->dev,
@@ -482,7 +482,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
/* Forbid unused lines to be mapped as IRQs */
for (i = 0; i < sizeof(u32); i++)
if (stmpe_gpio->norequest_mask & BIT(i))
- clear_bit(i, stmpe_gpio->chip.irq_valid_mask);
+ clear_bit(i, stmpe_gpio->chip.irq.valid_mask);
}
ret = gpiochip_irqchip_add_nested(&stmpe_gpio->chip,
&stmpe_gpio_irq_chip,
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 091ffaaec635..ac6f2a9841e5 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -193,6 +193,9 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
tb10x_gpio->gc.label =
devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
+ if (!tb10x_gpio->gc.label)
+ return -ENOMEM;
+
tb10x_gpio->gc.parent = &pdev->dev;
tb10x_gpio->gc.owner = THIS_MODULE;
tb10x_gpio->gc.direction_input = tb10x_gpio_direction_in;
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 433b45ef332e..91a8ef8e7f3f 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -268,7 +268,7 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
while (stat) {
int bit = __ffs(stat);
int line = i * 8 + bit;
- int irq = irq_find_mapping(tc3589x_gpio->chip.irqdomain,
+ int irq = irq_find_mapping(tc3589x_gpio->chip.irq.domain,
line);
handle_nested_irq(irq);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index fbaf974277df..8db47f671708 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -141,14 +141,14 @@ static void tegra_gpio_disable(struct tegra_gpio_info *tgi, unsigned int gpio)
static int tegra_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
- return pinctrl_request_gpio(offset);
+ return pinctrl_gpio_request(offset);
}
static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
- pinctrl_free_gpio(offset);
+ pinctrl_gpio_free(offset);
tegra_gpio_disable(tgi, offset);
}
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
new file mode 100644
index 000000000000..7f1aa4c21e0d
--- /dev/null
+++ b/drivers/gpio/gpio-tegra186.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright (c) 2016-2017 NVIDIA Corporation
+ *
+ * Author: Thierry Reding <treding@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/gpio/tegra186-gpio.h>
+
+#define TEGRA186_GPIO_ENABLE_CONFIG 0x00
+#define TEGRA186_GPIO_ENABLE_CONFIG_ENABLE BIT(0)
+#define TEGRA186_GPIO_ENABLE_CONFIG_OUT BIT(1)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_NONE (0x0 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_LEVEL (0x1 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE (0x2 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE (0x3 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_MASK (0x3 << 2)
+#define TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL BIT(4)
+#define TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT BIT(6)
+
+#define TEGRA186_GPIO_DEBOUNCE_CONTROL 0x04
+#define TEGRA186_GPIO_DEBOUNCE_CONTROL_THRESHOLD(x) ((x) & 0xff)
+
+#define TEGRA186_GPIO_INPUT 0x08
+#define TEGRA186_GPIO_INPUT_HIGH BIT(0)
+
+#define TEGRA186_GPIO_OUTPUT_CONTROL 0x0c
+#define TEGRA186_GPIO_OUTPUT_CONTROL_FLOATED BIT(0)
+
+#define TEGRA186_GPIO_OUTPUT_VALUE 0x10
+#define TEGRA186_GPIO_OUTPUT_VALUE_HIGH BIT(0)
+
+#define TEGRA186_GPIO_INTERRUPT_CLEAR 0x14
+
+#define TEGRA186_GPIO_INTERRUPT_STATUS(x) (0x100 + (x) * 4)
+
+struct tegra_gpio_port {
+ const char *name;
+ unsigned int offset;
+ unsigned int pins;
+ unsigned int irq;
+};
+
+struct tegra_gpio_soc {
+ const struct tegra_gpio_port *ports;
+ unsigned int num_ports;
+ const char *name;
+};
+
+struct tegra_gpio {
+ struct gpio_chip gpio;
+ struct irq_chip intc;
+ unsigned int num_irq;
+ unsigned int *irq;
+
+ const struct tegra_gpio_soc *soc;
+
+ void __iomem *base;
+};
+
+static const struct tegra_gpio_port *
+tegra186_gpio_get_port(struct tegra_gpio *gpio, unsigned int *pin)
+{
+ unsigned int start = 0, i;
+
+ for (i = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+
+ if (*pin >= start && *pin < start + port->pins) {
+ *pin -= start;
+ return port;
+ }
+
+ start += port->pins;
+ }
+
+ return NULL;
+}
+
+static void __iomem *tegra186_gpio_get_base(struct tegra_gpio *gpio,
+ unsigned int pin)
+{
+ const struct tegra_gpio_port *port;
+
+ port = tegra186_gpio_get_port(gpio, &pin);
+ if (!port)
+ return NULL;
+
+ return gpio->base + port->offset + pin * 0x20;
+}
+
+static int tegra186_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ if (value & TEGRA186_GPIO_ENABLE_CONFIG_OUT)
+ return 0;
+
+ return 1;
+}
+
+static int tegra186_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_OUTPUT_CONTROL);
+ value |= TEGRA186_GPIO_OUTPUT_CONTROL_FLOATED;
+ writel(value, base + TEGRA186_GPIO_OUTPUT_CONTROL);
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_ENABLE;
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_OUT;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
+static int tegra186_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int level)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ /* configure output level first */
+ chip->set(chip, offset, level);
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -EINVAL;
+
+ /* set the direction */
+ value = readl(base + TEGRA186_GPIO_OUTPUT_CONTROL);
+ value &= ~TEGRA186_GPIO_OUTPUT_CONTROL_FLOATED;
+ writel(value, base + TEGRA186_GPIO_OUTPUT_CONTROL);
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_ENABLE;
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_OUT;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ return 0;
+}
+
+static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ if (value & TEGRA186_GPIO_ENABLE_CONFIG_OUT)
+ value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
+ else
+ value = readl(base + TEGRA186_GPIO_INPUT);
+
+ return value & BIT(0);
+}
+
+static void tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int level)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return;
+
+ value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
+ if (level == 0)
+ value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+ else
+ value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+
+ writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
+}
+
+static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *spec,
+ u32 *flags)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ unsigned int port, pin, i, offset = 0;
+
+ if (WARN_ON(chip->of_gpio_n_cells < 2))
+ return -EINVAL;
+
+ if (WARN_ON(spec->args_count < chip->of_gpio_n_cells))
+ return -EINVAL;
+
+ port = spec->args[0] / 8;
+ pin = spec->args[0] % 8;
+
+ if (port >= gpio->soc->num_ports) {
+ dev_err(chip->parent, "invalid port number: %u\n", port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < port; i++)
+ offset += gpio->soc->ports[i].pins;
+
+ if (flags)
+ *flags = spec->args[1];
+
+ return offset + pin;
+}
+
+static void tegra186_irq_ack(struct irq_data *data)
+{
+ struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ void __iomem *base;
+
+ base = tegra186_gpio_get_base(gpio, data->hwirq);
+ if (WARN_ON(base == NULL))
+ return;
+
+ writel(1, base + TEGRA186_GPIO_INTERRUPT_CLEAR);
+}
+
+static void tegra186_irq_mask(struct irq_data *data)
+{
+ struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, data->hwirq);
+ if (WARN_ON(base == NULL))
+ return;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+}
+
+static void tegra186_irq_unmask(struct irq_data *data)
+{
+ struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, data->hwirq);
+ if (WARN_ON(base == NULL))
+ return;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_INTERRUPT;
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+}
+
+static int tegra186_irq_set_type(struct irq_data *data, unsigned int flow)
+{
+ struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, data->hwirq);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_ENABLE_CONFIG);
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_MASK;
+ value &= ~TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+
+ switch (flow & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_NONE:
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_SINGLE_EDGE;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_DOUBLE_EDGE;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_LEVEL;
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_LEVEL;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ value |= TEGRA186_GPIO_ENABLE_CONFIG_TRIGGER_TYPE_LEVEL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ writel(value, base + TEGRA186_GPIO_ENABLE_CONFIG);
+
+ if ((flow & IRQ_TYPE_EDGE_BOTH) == 0)
+ irq_set_handler_locked(data, handle_level_irq);
+ else
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ return 0;
+}
+
+static void tegra186_gpio_irq(struct irq_desc *desc)
+{
+ struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
+ struct irq_domain *domain = gpio->gpio.irq.domain;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int parent = irq_desc_get_irq(desc);
+ unsigned int i, offset = 0;
+
+ chained_irq_enter(chip, desc);
+
+ for (i = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+ void __iomem *base = gpio->base + port->offset;
+ unsigned int pin, irq;
+ unsigned long value;
+
+ /* skip ports that are not associated with this controller */
+ if (parent != gpio->irq[port->irq])
+ goto skip;
+
+ value = readl(base + TEGRA186_GPIO_INTERRUPT_STATUS(1));
+
+ for_each_set_bit(pin, &value, port->pins) {
+ irq = irq_find_mapping(domain, offset + pin);
+ if (WARN_ON(irq == 0))
+ continue;
+
+ generic_handle_irq(irq);
+ }
+
+skip:
+ offset += port->pins;
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int tegra186_gpio_irq_domain_xlate(struct irq_domain *domain,
+ struct device_node *np,
+ const u32 *spec, unsigned int size,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(domain->host_data);
+ unsigned int port, pin, i, offset = 0;
+
+ if (size < 2)
+ return -EINVAL;
+
+ port = spec[0] / 8;
+ pin = spec[0] % 8;
+
+ if (port >= gpio->soc->num_ports) {
+ dev_err(gpio->gpio.parent, "invalid port number: %u\n", port);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < port; i++)
+ offset += gpio->soc->ports[i].pins;
+
+ *type = spec[1] & IRQ_TYPE_SENSE_MASK;
+ *hwirq = offset + pin;
+
+ return 0;
+}
+
+static const struct irq_domain_ops tegra186_gpio_irq_domain_ops = {
+ .map = gpiochip_irq_map,
+ .unmap = gpiochip_irq_unmap,
+ .xlate = tegra186_gpio_irq_domain_xlate,
+};
+
+static int tegra186_gpio_probe(struct platform_device *pdev)
+{
+ unsigned int i, j, offset;
+ struct gpio_irq_chip *irq;
+ struct tegra_gpio *gpio;
+ struct resource *res;
+ char **names;
+ int err;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->soc = of_device_get_match_data(&pdev->dev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio");
+ gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
+
+ err = platform_irq_count(pdev);
+ if (err < 0)
+ return err;
+
+ gpio->num_irq = err;
+
+ gpio->irq = devm_kcalloc(&pdev->dev, gpio->num_irq, sizeof(*gpio->irq),
+ GFP_KERNEL);
+ if (!gpio->irq)
+ return -ENOMEM;
+
+ for (i = 0; i < gpio->num_irq; i++) {
+ err = platform_get_irq(pdev, i);
+ if (err < 0)
+ return err;
+
+ gpio->irq[i] = err;
+ }
+
+ gpio->gpio.label = gpio->soc->name;
+ gpio->gpio.parent = &pdev->dev;
+
+ gpio->gpio.get_direction = tegra186_gpio_get_direction;
+ gpio->gpio.direction_input = tegra186_gpio_direction_input;
+ gpio->gpio.direction_output = tegra186_gpio_direction_output;
+ gpio->gpio.get = tegra186_gpio_get,
+ gpio->gpio.set = tegra186_gpio_set;
+
+ gpio->gpio.base = -1;
+
+ for (i = 0; i < gpio->soc->num_ports; i++)
+ gpio->gpio.ngpio += gpio->soc->ports[i].pins;
+
+ names = devm_kcalloc(gpio->gpio.parent, gpio->gpio.ngpio,
+ sizeof(*names), GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0, offset = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+ char *name;
+
+ for (j = 0; j < port->pins; j++) {
+ name = devm_kasprintf(gpio->gpio.parent, GFP_KERNEL,
+ "P%s.%02x", port->name, j);
+ if (!name)
+ return -ENOMEM;
+
+ names[offset + j] = name;
+ }
+
+ offset += port->pins;
+ }
+
+ gpio->gpio.names = (const char * const *)names;
+
+ gpio->gpio.of_node = pdev->dev.of_node;
+ gpio->gpio.of_gpio_n_cells = 2;
+ gpio->gpio.of_xlate = tegra186_gpio_of_xlate;
+
+ gpio->intc.name = pdev->dev.of_node->name;
+ gpio->intc.irq_ack = tegra186_irq_ack;
+ gpio->intc.irq_mask = tegra186_irq_mask;
+ gpio->intc.irq_unmask = tegra186_irq_unmask;
+ gpio->intc.irq_set_type = tegra186_irq_set_type;
+
+ irq = &gpio->gpio.irq;
+ irq->chip = &gpio->intc;
+ irq->domain_ops = &tegra186_gpio_irq_domain_ops;
+ irq->handler = handle_simple_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->parent_handler = tegra186_gpio_irq;
+ irq->parent_handler_data = gpio;
+ irq->num_parents = gpio->num_irq;
+ irq->parents = gpio->irq;
+
+ irq->map = devm_kcalloc(&pdev->dev, gpio->gpio.ngpio,
+ sizeof(*irq->map), GFP_KERNEL);
+ if (!irq->map)
+ return -ENOMEM;
+
+ for (i = 0, offset = 0; i < gpio->soc->num_ports; i++) {
+ const struct tegra_gpio_port *port = &gpio->soc->ports[i];
+
+ for (j = 0; j < port->pins; j++)
+ irq->map[offset + j] = irq->parents[port->irq];
+
+ offset += port->pins;
+ }
+
+ platform_set_drvdata(pdev, gpio);
+
+ err = devm_gpiochip_add_data(&pdev->dev, &gpio->gpio, gpio);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra186_gpio_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#define TEGRA_MAIN_GPIO_PORT(port, base, count, controller) \
+ [TEGRA_MAIN_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra186_main_ports[] = {
+ TEGRA_MAIN_GPIO_PORT( A, 0x2000, 7, 2),
+ TEGRA_MAIN_GPIO_PORT( B, 0x3000, 7, 3),
+ TEGRA_MAIN_GPIO_PORT( C, 0x3200, 7, 3),
+ TEGRA_MAIN_GPIO_PORT( D, 0x3400, 6, 3),
+ TEGRA_MAIN_GPIO_PORT( E, 0x2200, 8, 2),
+ TEGRA_MAIN_GPIO_PORT( F, 0x2400, 6, 2),
+ TEGRA_MAIN_GPIO_PORT( G, 0x4200, 6, 4),
+ TEGRA_MAIN_GPIO_PORT( H, 0x1000, 7, 1),
+ TEGRA_MAIN_GPIO_PORT( I, 0x0800, 8, 0),
+ TEGRA_MAIN_GPIO_PORT( J, 0x5000, 8, 5),
+ TEGRA_MAIN_GPIO_PORT( K, 0x5200, 1, 5),
+ TEGRA_MAIN_GPIO_PORT( L, 0x1200, 8, 1),
+ TEGRA_MAIN_GPIO_PORT( M, 0x5600, 6, 5),
+ TEGRA_MAIN_GPIO_PORT( N, 0x0000, 7, 0),
+ TEGRA_MAIN_GPIO_PORT( O, 0x0200, 4, 0),
+ TEGRA_MAIN_GPIO_PORT( P, 0x4000, 7, 4),
+ TEGRA_MAIN_GPIO_PORT( Q, 0x0400, 6, 0),
+ TEGRA_MAIN_GPIO_PORT( R, 0x0a00, 6, 0),
+ TEGRA_MAIN_GPIO_PORT( T, 0x0600, 4, 0),
+ TEGRA_MAIN_GPIO_PORT( X, 0x1400, 8, 1),
+ TEGRA_MAIN_GPIO_PORT( Y, 0x1600, 7, 1),
+ TEGRA_MAIN_GPIO_PORT(BB, 0x2600, 2, 2),
+ TEGRA_MAIN_GPIO_PORT(CC, 0x5400, 4, 5),
+};
+
+static const struct tegra_gpio_soc tegra186_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra186_main_ports),
+ .ports = tegra186_main_ports,
+ .name = "tegra186-gpio",
+};
+
+#define TEGRA_AON_GPIO_PORT(port, base, count, controller) \
+ [TEGRA_AON_GPIO_PORT_##port] = { \
+ .name = #port, \
+ .offset = base, \
+ .pins = count, \
+ .irq = controller, \
+ }
+
+static const struct tegra_gpio_port tegra186_aon_ports[] = {
+ TEGRA_AON_GPIO_PORT( S, 0x0200, 5, 0),
+ TEGRA_AON_GPIO_PORT( U, 0x0400, 6, 0),
+ TEGRA_AON_GPIO_PORT( V, 0x0800, 8, 0),
+ TEGRA_AON_GPIO_PORT( W, 0x0a00, 8, 0),
+ TEGRA_AON_GPIO_PORT( Z, 0x0e00, 4, 0),
+ TEGRA_AON_GPIO_PORT(AA, 0x0c00, 8, 0),
+ TEGRA_AON_GPIO_PORT(EE, 0x0600, 3, 0),
+ TEGRA_AON_GPIO_PORT(FF, 0x0000, 5, 0),
+};
+
+static const struct tegra_gpio_soc tegra186_aon_soc = {
+ .num_ports = ARRAY_SIZE(tegra186_aon_ports),
+ .ports = tegra186_aon_ports,
+ .name = "tegra186-gpio-aon",
+};
+
+static const struct of_device_id tegra186_gpio_of_match[] = {
+ {
+ .compatible = "nvidia,tegra186-gpio",
+ .data = &tegra186_main_soc
+ }, {
+ .compatible = "nvidia,tegra186-gpio-aon",
+ .data = &tegra186_aon_soc
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver tegra186_gpio_driver = {
+ .driver = {
+ .name = "tegra186-gpio",
+ .of_match_table = tegra186_gpio_of_match,
+ },
+ .probe = tegra186_gpio_probe,
+ .remove = tegra186_gpio_remove,
+};
+module_platform_driver(tegra186_gpio_driver);
+
+MODULE_DESCRIPTION("NVIDIA Tegra186 GPIO controller driver");
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 57efb251f9c4..b5adb79a631a 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -417,18 +417,6 @@ static struct irq_chip thunderx_gpio_irq_chip = {
.flags = IRQCHIP_SET_TYPE_MASKED
};
-static int thunderx_gpio_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct thunderx_gpio *txgpio = d->host_data;
-
- if (hwirq >= txgpio->chip.ngpio)
- return -EINVAL;
- if (!thunderx_gpio_is_gpio_nowarn(txgpio, hwirq))
- return -EPERM;
- return 0;
-}
-
static int thunderx_gpio_irq_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
irq_hw_number_t *hwirq,
@@ -455,7 +443,6 @@ static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
}
static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
- .map = thunderx_gpio_irq_map,
.alloc = thunderx_gpio_irq_alloc,
.translate = thunderx_gpio_irq_translate
};
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 22c5be65051f..0bb9bb583889 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -232,7 +232,7 @@ static int tz1090_gpio_request(struct gpio_chip *chip, unsigned int offset)
struct tz1090_gpio_bank *bank = gpiochip_get_data(chip);
int ret;
- ret = pinctrl_request_gpio(chip->base + offset);
+ ret = pinctrl_gpio_request(chip->base + offset);
if (ret)
return ret;
@@ -246,7 +246,7 @@ static void tz1090_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct tz1090_gpio_bank *bank = gpiochip_get_data(chip);
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
tz1090_gpio_clear_bit(bank, REG_GPIO_BIT_EN, offset);
}
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
new file mode 100644
index 000000000000..016d7427ebfa
--- /dev/null
+++ b/drivers/gpio/gpio-uniphier.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright (C) 2017 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <dt-bindings/gpio/uniphier-gpio.h>
+
+#define UNIPHIER_GPIO_BANK_MASK \
+ GENMASK((UNIPHIER_GPIO_LINES_PER_BANK) - 1, 0)
+
+#define UNIPHIER_GPIO_IRQ_MAX_NUM 24
+
+#define UNIPHIER_GPIO_PORT_DATA 0x0 /* data */
+#define UNIPHIER_GPIO_PORT_DIR 0x4 /* direction (1:in, 0:out) */
+#define UNIPHIER_GPIO_IRQ_EN 0x90 /* irq enable */
+#define UNIPHIER_GPIO_IRQ_MODE 0x94 /* irq mode (1: both edge) */
+#define UNIPHIER_GPIO_IRQ_FLT_EN 0x98 /* noise filter enable */
+#define UNIPHIER_GPIO_IRQ_FLT_CYC 0x9c /* noise filter clock cycle */
+
+struct uniphier_gpio_priv {
+ struct gpio_chip chip;
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
+ void __iomem *regs;
+ spinlock_t lock;
+ u32 saved_vals[0];
+};
+
+static unsigned int uniphier_gpio_bank_to_reg(unsigned int bank)
+{
+ unsigned int reg;
+
+ reg = (bank + 1) * 8;
+
+ /*
+ * Unfortunately, the GPIO port registers are not contiguous because
+ * offset 0x90-0x9f is used for IRQ. Add 0x10 when crossing the region.
+ */
+ if (reg >= UNIPHIER_GPIO_IRQ_EN)
+ reg += 0x10;
+
+ return reg;
+}
+
+static void uniphier_gpio_get_bank_and_mask(unsigned int offset,
+ unsigned int *bank, u32 *mask)
+{
+ *bank = offset / UNIPHIER_GPIO_LINES_PER_BANK;
+ *mask = BIT(offset % UNIPHIER_GPIO_LINES_PER_BANK);
+}
+
+static void uniphier_gpio_reg_update(struct uniphier_gpio_priv *priv,
+ unsigned int reg, u32 mask, u32 val)
+{
+ unsigned long flags;
+ u32 tmp;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ tmp = readl(priv->regs + reg);
+ tmp &= ~mask;
+ tmp |= mask & val;
+ writel(tmp, priv->regs + reg);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void uniphier_gpio_bank_write(struct gpio_chip *chip, unsigned int bank,
+ unsigned int reg, u32 mask, u32 val)
+{
+ struct uniphier_gpio_priv *priv = gpiochip_get_data(chip);
+
+ if (!mask)
+ return;
+
+ uniphier_gpio_reg_update(priv, uniphier_gpio_bank_to_reg(bank) + reg,
+ mask, val);
+}
+
+static void uniphier_gpio_offset_write(struct gpio_chip *chip,
+ unsigned int offset, unsigned int reg,
+ int val)
+{
+ unsigned int bank;
+ u32 mask;
+
+ uniphier_gpio_get_bank_and_mask(offset, &bank, &mask);
+
+ uniphier_gpio_bank_write(chip, bank, reg, mask, val ? mask : 0);
+}
+
+static int uniphier_gpio_offset_read(struct gpio_chip *chip,
+ unsigned int offset, unsigned int reg)
+{
+ struct uniphier_gpio_priv *priv = gpiochip_get_data(chip);
+ unsigned int bank, reg_offset;
+ u32 mask;
+
+ uniphier_gpio_get_bank_and_mask(offset, &bank, &mask);
+ reg_offset = uniphier_gpio_bank_to_reg(bank) + reg;
+
+ return !!(readl(priv->regs + reg_offset) & mask);
+}
+
+static int uniphier_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DIR);
+}
+
+static int uniphier_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DIR, 1);
+
+ return 0;
+}
+
+static int uniphier_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val);
+ uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DIR, 0);
+
+ return 0;
+}
+
+static int uniphier_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DATA);
+}
+
+static void uniphier_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val);
+}
+
+static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ unsigned int bank, shift, bank_mask, bank_bits;
+ int i;
+
+ for (i = 0; i < chip->ngpio; i += UNIPHIER_GPIO_LINES_PER_BANK) {
+ bank = i / UNIPHIER_GPIO_LINES_PER_BANK;
+ shift = i % BITS_PER_LONG;
+ bank_mask = (mask[BIT_WORD(i)] >> shift) &
+ UNIPHIER_GPIO_BANK_MASK;
+ bank_bits = bits[BIT_WORD(i)] >> shift;
+
+ uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA,
+ bank_mask, bank_bits);
+ }
+}
+
+static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct irq_fwspec fwspec;
+
+ if (offset < UNIPHIER_GPIO_IRQ_OFFSET)
+ return -ENXIO;
+
+ fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
+ fwspec.param_count = 2;
+ fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static void uniphier_gpio_irq_mask(struct irq_data *data)
+{
+ struct uniphier_gpio_priv *priv = data->chip_data;
+ u32 mask = BIT(data->hwirq);
+
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
+
+ return irq_chip_mask_parent(data);
+}
+
+static void uniphier_gpio_irq_unmask(struct irq_data *data)
+{
+ struct uniphier_gpio_priv *priv = data->chip_data;
+ u32 mask = BIT(data->hwirq);
+
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
+
+ return irq_chip_unmask_parent(data);
+}
+
+static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct uniphier_gpio_priv *priv = data->chip_data;
+ u32 mask = BIT(data->hwirq);
+ u32 val = 0;
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ val = mask;
+ type = IRQ_TYPE_EDGE_FALLING;
+ }
+
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_MODE, mask, val);
+ /* To enable both edge detection, the noise filter must be enabled. */
+ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_FLT_EN, mask, val);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static int uniphier_gpio_irq_get_parent_hwirq(struct uniphier_gpio_priv *priv,
+ unsigned int hwirq)
+{
+ struct device_node *np = priv->chip.parent->of_node;
+ const __be32 *range;
+ u32 base, parent_base, size;
+ int len;
+
+ range = of_get_property(np, "socionext,interrupt-ranges", &len);
+ if (!range)
+ return -EINVAL;
+
+ len /= sizeof(*range);
+
+ for (; len >= 3; len -= 3) {
+ base = be32_to_cpu(*range++);
+ parent_base = be32_to_cpu(*range++);
+ size = be32_to_cpu(*range++);
+
+ if (base <= hwirq && hwirq < base + size)
+ return hwirq - base + parent_base;
+ }
+
+ return -ENOENT;
+}
+
+static int uniphier_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+
+ *out_hwirq = fwspec->param[0];
+ *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static int uniphier_gpio_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct uniphier_gpio_priv *priv = domain->host_data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = uniphier_gpio_irq_domain_translate(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ ret = uniphier_gpio_irq_get_parent_hwirq(priv, hwirq);
+ if (ret < 0)
+ return ret;
+
+ /* parent is UniPhier AIDET */
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = ret;
+ parent_fwspec.param[1] = (type == IRQ_TYPE_EDGE_BOTH) ?
+ IRQ_TYPE_EDGE_FALLING : type;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &priv->irq_chip, priv);
+ if (ret)
+ return ret;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static int uniphier_gpio_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *data, bool early)
+{
+ struct uniphier_gpio_priv *priv = domain->host_data;
+ struct gpio_chip *chip = &priv->chip;
+
+ gpiochip_lock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
+ return 0;
+}
+
+static void uniphier_gpio_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *data)
+{
+ struct uniphier_gpio_priv *priv = domain->host_data;
+ struct gpio_chip *chip = &priv->chip;
+
+ gpiochip_unlock_as_irq(chip, data->hwirq + UNIPHIER_GPIO_IRQ_OFFSET);
+}
+
+static const struct irq_domain_ops uniphier_gpio_irq_domain_ops = {
+ .alloc = uniphier_gpio_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+ .activate = uniphier_gpio_irq_domain_activate,
+ .deactivate = uniphier_gpio_irq_domain_deactivate,
+ .translate = uniphier_gpio_irq_domain_translate,
+};
+
+static void uniphier_gpio_hw_init(struct uniphier_gpio_priv *priv)
+{
+ /*
+ * Due to the hardware design, the noise filter must be enabled to
+ * detect both edge interrupts. This filter is intended to remove the
+ * noise from the irq lines. It does not work for GPIO input, so GPIO
+ * debounce is not supported. Unfortunately, the filter period is
+ * shared among all irq lines. Just choose a sensible period here.
+ */
+ writel(0xff, priv->regs + UNIPHIER_GPIO_IRQ_FLT_CYC);
+}
+
+static unsigned int uniphier_gpio_get_nbanks(unsigned int ngpio)
+{
+ return DIV_ROUND_UP(ngpio, UNIPHIER_GPIO_LINES_PER_BANK);
+}
+
+static int uniphier_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent_np;
+ struct irq_domain *parent_domain;
+ struct uniphier_gpio_priv *priv;
+ struct gpio_chip *chip;
+ struct irq_chip *irq_chip;
+ struct resource *regs;
+ unsigned int nregs;
+ u32 ngpios;
+ int ret;
+
+ parent_np = of_irq_find_parent(dev->of_node);
+ if (!parent_np)
+ return -ENXIO;
+
+ parent_domain = irq_find_host(parent_np);
+ of_node_put(parent_np);
+ if (!parent_domain)
+ return -EPROBE_DEFER;
+
+ ret = of_property_read_u32(dev->of_node, "ngpios", &ngpios);
+ if (ret)
+ return ret;
+
+ nregs = uniphier_gpio_get_nbanks(ngpios) * 2 + 3;
+ priv = devm_kzalloc(dev,
+ sizeof(*priv) + sizeof(priv->saved_vals[0]) * nregs,
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ spin_lock_init(&priv->lock);
+
+ chip = &priv->chip;
+ chip->label = dev_name(dev);
+ chip->parent = dev;
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
+ chip->get_direction = uniphier_gpio_get_direction;
+ chip->direction_input = uniphier_gpio_direction_input;
+ chip->direction_output = uniphier_gpio_direction_output;
+ chip->get = uniphier_gpio_get;
+ chip->set = uniphier_gpio_set;
+ chip->set_multiple = uniphier_gpio_set_multiple;
+ chip->to_irq = uniphier_gpio_to_irq;
+ chip->base = -1;
+ chip->ngpio = ngpios;
+
+ irq_chip = &priv->irq_chip;
+ irq_chip->name = dev_name(dev);
+ irq_chip->irq_mask = uniphier_gpio_irq_mask;
+ irq_chip->irq_unmask = uniphier_gpio_irq_unmask;
+ irq_chip->irq_eoi = irq_chip_eoi_parent;
+ irq_chip->irq_set_affinity = irq_chip_set_affinity_parent;
+ irq_chip->irq_set_type = uniphier_gpio_irq_set_type;
+
+ uniphier_gpio_hw_init(priv);
+
+ ret = devm_gpiochip_add_data(dev, chip, priv);
+ if (ret)
+ return ret;
+
+ priv->domain = irq_domain_create_hierarchy(
+ parent_domain, 0,
+ UNIPHIER_GPIO_IRQ_MAX_NUM,
+ of_node_to_fwnode(dev->of_node),
+ &uniphier_gpio_irq_domain_ops, priv);
+ if (!priv->domain)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int uniphier_gpio_remove(struct platform_device *pdev)
+{
+ struct uniphier_gpio_priv *priv = platform_get_drvdata(pdev);
+
+ irq_domain_remove(priv->domain);
+
+ return 0;
+}
+
+static int __maybe_unused uniphier_gpio_suspend(struct device *dev)
+{
+ struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
+ unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
+ u32 *val = priv->saved_vals;
+ unsigned int reg;
+ int i;
+
+ for (i = 0; i < nbanks; i++) {
+ reg = uniphier_gpio_bank_to_reg(i);
+
+ *val++ = readl(priv->regs + reg + UNIPHIER_GPIO_PORT_DATA);
+ *val++ = readl(priv->regs + reg + UNIPHIER_GPIO_PORT_DIR);
+ }
+
+ *val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_EN);
+ *val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_MODE);
+ *val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_FLT_EN);
+
+ return 0;
+}
+
+static int __maybe_unused uniphier_gpio_resume(struct device *dev)
+{
+ struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
+ unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
+ const u32 *val = priv->saved_vals;
+ unsigned int reg;
+ int i;
+
+ for (i = 0; i < nbanks; i++) {
+ reg = uniphier_gpio_bank_to_reg(i);
+
+ writel(*val++, priv->regs + reg + UNIPHIER_GPIO_PORT_DATA);
+ writel(*val++, priv->regs + reg + UNIPHIER_GPIO_PORT_DIR);
+ }
+
+ writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_EN);
+ writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_MODE);
+ writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_FLT_EN);
+
+ uniphier_gpio_hw_init(priv);
+
+ return 0;
+}
+
+static const struct dev_pm_ops uniphier_gpio_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(uniphier_gpio_suspend,
+ uniphier_gpio_resume)
+};
+
+static const struct of_device_id uniphier_gpio_match[] = {
+ { .compatible = "socionext,uniphier-gpio" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_gpio_match);
+
+static struct platform_driver uniphier_gpio_driver = {
+ .probe = uniphier_gpio_probe,
+ .remove = uniphier_gpio_remove,
+ .driver = {
+ .name = "uniphier-gpio",
+ .of_match_table = uniphier_gpio_match,
+ .pm = &uniphier_gpio_pm_ops,
+ },
+};
+module_platform_driver(uniphier_gpio_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index cbe9e06861de..4610cc2938ad 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -160,7 +160,7 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(pin, &irq_isfr, VF610_GPIO_PER_PORT) {
vf610_gpio_writel(BIT(pin), port->base + PORT_ISFR);
- generic_handle_irq(irq_find_mapping(port->gc.irqdomain, pin));
+ generic_handle_irq(irq_find_mapping(port->gc.irq.domain, pin));
}
chained_irq_exit(chip, desc);
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 85341eab795d..dde7c6aecbb5 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -350,7 +350,7 @@ static irqreturn_t wcove_gpio_irq_handler(int irq, void *data)
offset = (gpio > GROUP0_NR_IRQS) ? 1 : 0;
mask = (offset == 1) ? BIT(gpio - GROUP0_NR_IRQS) :
BIT(gpio);
- virq = irq_find_mapping(wg->chip.irqdomain, gpio);
+ virq = irq_find_mapping(wg->chip.irq.domain, gpio);
handle_nested_irq(virq);
regmap_update_bits(wg->regmap, IRQ_STATUS_BASE + offset,
mask, mask);
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index 5037974ac063..746648244bf3 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -332,7 +332,7 @@ static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
int_id = inb(ws16c48gpio->base + 8 + port);
for_each_set_bit(gpio, &int_id, 8)
generic_handle_irq(irq_find_mapping(
- chip->irqdomain, gpio + 8*port));
+ chip->irq.domain, gpio + 8*port));
}
int_pending = inb(ws16c48gpio->base + 6) & 0x7;
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 033258634b8c..2313af82fad3 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -130,18 +130,16 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
(gpio > HWIRQ_TO_GPIO(priv, priv->nirq)))
return -ENXIO;
- if (gc->parent->of_node)
- fwspec.fwnode = of_node_to_fwnode(gc->parent->of_node);
- else
- fwspec.fwnode = gc->parent->fwnode;
+ fwspec.fwnode = gc->parent->fwnode;
fwspec.param_count = 2;
fwspec.param[0] = GPIO_TO_HWIRQ(priv, gpio);
fwspec.param[1] = IRQ_TYPE_NONE;
return irq_create_fwspec_mapping(&fwspec);
}
-static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
- struct irq_data *irq_data)
+static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
+ struct irq_data *irq_data,
+ bool early)
{
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
@@ -150,11 +148,12 @@ static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
dev_err(priv->gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
- return;
+ return -ENOSPC;
}
xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
+ return 0;
}
static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
@@ -231,7 +230,6 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *regs;
struct irq_domain *parent_domain = NULL;
- struct fwnode_handle *fwnode;
u32 val32;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -285,18 +283,13 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- if (pdev->dev.of_node)
- fwnode = of_node_to_fwnode(pdev->dev.of_node);
- else
- fwnode = pdev->dev.fwnode;
-
priv->irq_domain = irq_domain_create_hierarchy(parent_domain,
- 0, priv->nirq, fwnode,
+ 0, priv->nirq, pdev->dev.fwnode,
&xgene_gpio_sb_domain_ops, priv);
if (!priv->irq_domain)
return -ENODEV;
- priv->gc.irqdomain = priv->irq_domain;
+ priv->gc.irq.domain = priv->irq_domain;
ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
if (ret) {
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index d857e1d8e731..e74bd43a6974 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -225,7 +225,7 @@ static void xlp_gpio_generic_handler(struct irq_desc *desc)
if (gpio_stat & BIT(gpio % XLP_GPIO_REGSZ))
generic_handle_irq(irq_find_mapping(
- priv->chip.irqdomain, gpio));
+ priv->chip.irq.domain, gpio));
}
chained_irq_exit(irqchip, desc);
}
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index be3a87da8438..5eacad9b2692 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -170,7 +170,7 @@ static void zx_irq_handler(struct irq_desc *desc)
writew_relaxed(pending, chip->base + ZX_GPIO_IC);
if (pending) {
for_each_set_bit(offset, &pending, ZX_GPIO_NR)
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
+ generic_handle_irq(irq_find_mapping(gc->irq.domain,
offset));
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index b3cc948a2d8b..75ee877e5cd5 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -562,7 +562,7 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
unsigned long pending)
{
unsigned int bank_offset = gpio->p_data->bank_min[bank_num];
- struct irq_domain *irqdomain = gpio->chip.irqdomain;
+ struct irq_domain *irqdomain = gpio->chip.irq.domain;
int offset;
if (!pending)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index bfcd20699ec8..e0d59e61b52f 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -153,8 +153,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_OPEN_SOURCE;
}
- if (of_flags & OF_GPIO_SLEEP_MAY_LOOSE_VALUE)
- *flags |= GPIO_SLEEP_MAY_LOOSE_VALUE;
+ if (of_flags & OF_GPIO_SLEEP_MAY_LOSE_VALUE)
+ *flags |= GPIO_SLEEP_MAY_LOSE_VALUE;
return desc;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index eb80dac4e26a..aad84a6306c4 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -72,6 +72,8 @@ static LIST_HEAD(gpio_lookup_list);
LIST_HEAD(gpio_devices);
static void gpiochip_free_hogs(struct gpio_chip *chip);
+static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+ struct lock_class_key *key);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -365,28 +367,28 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
struct linehandle_state *lh = filep->private_data;
void __user *ip = (void __user *)arg;
struct gpiohandle_data ghd;
+ int vals[GPIOHANDLES_MAX];
int i;
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
- int val;
+ /* TODO: check if descriptors are really input */
+ int ret = gpiod_get_array_value_complex(false,
+ true,
+ lh->numdescs,
+ lh->descs,
+ vals);
+ if (ret)
+ return ret;
memset(&ghd, 0, sizeof(ghd));
-
- /* TODO: check if descriptors are really input */
- for (i = 0; i < lh->numdescs; i++) {
- val = gpiod_get_value_cansleep(lh->descs[i]);
- if (val < 0)
- return val;
- ghd.values[i] = val;
- }
+ for (i = 0; i < lh->numdescs; i++)
+ ghd.values[i] = vals[i];
if (copy_to_user(ip, &ghd, sizeof(ghd)))
return -EFAULT;
return 0;
} else if (cmd == GPIOHANDLE_SET_LINE_VALUES_IOCTL) {
- int vals[GPIOHANDLES_MAX];
-
/* TODO: check if descriptors are really output */
if (copy_from_user(&ghd, ip, sizeof(ghd)))
return -EFAULT;
@@ -444,12 +446,25 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
struct linehandle_state *lh;
struct file *file;
int fd, i, ret;
+ u32 lflags;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
return -EFAULT;
if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
return -EINVAL;
+ lflags = handlereq.flags;
+
+ /* Return an error if an unknown flag is set */
+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
+ return -EINVAL;
+
+ /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
+ if (!(lflags & GPIOHANDLE_REQUEST_OUTPUT) &&
+ ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
+ return -EINVAL;
+
lh = kzalloc(sizeof(*lh), GFP_KERNEL);
if (!lh)
return -ENOMEM;
@@ -470,7 +485,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
/* Request each GPIO */
for (i = 0; i < handlereq.lines; i++) {
u32 offset = handlereq.lineoffsets[i];
- u32 lflags = handlereq.flags;
struct gpio_desc *desc;
if (offset >= gdev->ngpio) {
@@ -478,12 +492,6 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
goto out_free_descs;
}
- /* Return an error if a unknown flag is set */
- if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
- ret = -EINVAL;
- goto out_free_descs;
- }
-
desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
@@ -1091,30 +1099,8 @@ static void gpiochip_setup_devs(void)
}
}
-/**
- * gpiochip_add_data() - register a gpio_chip
- * @chip: the chip to register, with chip->base initialized
- * @data: driver-private data associated with this chip
- *
- * Context: potentially before irqs will work
- *
- * When gpiochip_add_data() is called very early during boot, so that GPIOs
- * can be freely used, the chip->parent device must be registered before
- * the gpio framework's arch_initcall(). Otherwise sysfs initialization
- * for GPIOs will fail rudely.
- *
- * gpiochip_add_data() must only be called after gpiolib initialization,
- * ie after core_initcall().
- *
- * If chip->base is negative, this requests dynamic assignment of
- * a range of valid GPIOs.
- *
- * Returns:
- * A negative errno if the chip can't be registered, such as because the
- * chip->base is invalid or already associated with a different chip.
- * Otherwise it returns zero as a success code.
- */
-int gpiochip_add_data(struct gpio_chip *chip, void *data)
+int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+ struct lock_class_key *key)
{
unsigned long flags;
int status = 0;
@@ -1260,6 +1246,10 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
if (status)
goto err_remove_from_list;
+ status = gpiochip_add_irqchip(chip, key);
+ if (status)
+ goto err_remove_chip;
+
status = of_gpiochip_add(chip);
if (status)
goto err_remove_chip;
@@ -1303,7 +1293,7 @@ err_free_gdev:
kfree(gdev);
return status;
}
-EXPORT_SYMBOL_GPL(gpiochip_add_data);
+EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key);
/**
* gpiochip_get_data() - get per-subdriver data for the chip
@@ -1498,33 +1488,33 @@ static struct gpio_chip *find_chip_by_name(const char *name)
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
{
- if (!gpiochip->irq_need_valid_mask)
+ if (!gpiochip->irq.need_valid_mask)
return 0;
- gpiochip->irq_valid_mask = kcalloc(BITS_TO_LONGS(gpiochip->ngpio),
+ gpiochip->irq.valid_mask = kcalloc(BITS_TO_LONGS(gpiochip->ngpio),
sizeof(long), GFP_KERNEL);
- if (!gpiochip->irq_valid_mask)
+ if (!gpiochip->irq.valid_mask)
return -ENOMEM;
/* Assume by default all GPIOs are valid */
- bitmap_fill(gpiochip->irq_valid_mask, gpiochip->ngpio);
+ bitmap_fill(gpiochip->irq.valid_mask, gpiochip->ngpio);
return 0;
}
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
{
- kfree(gpiochip->irq_valid_mask);
- gpiochip->irq_valid_mask = NULL;
+ kfree(gpiochip->irq.valid_mask);
+ gpiochip->irq.valid_mask = NULL;
}
static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
unsigned int offset)
{
/* No mask means all valid */
- if (likely(!gpiochip->irq_valid_mask))
+ if (likely(!gpiochip->irq.valid_mask))
return true;
- return test_bit(offset, gpiochip->irq_valid_mask);
+ return test_bit(offset, gpiochip->irq.valid_mask);
}
/**
@@ -1544,7 +1534,7 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
{
unsigned int offset;
- if (!gpiochip->irqdomain) {
+ if (!gpiochip->irq.domain) {
chip_err(gpiochip, "called %s before setting up irqchip\n",
__func__);
return;
@@ -1564,14 +1554,15 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
irq_set_chained_handler_and_data(parent_irq, parent_handler,
gpiochip);
- gpiochip->irq_chained_parent = parent_irq;
+ gpiochip->irq.parents = &parent_irq;
+ gpiochip->irq.num_parents = 1;
}
/* Set the parent IRQ for all affected IRQs */
for (offset = 0; offset < gpiochip->ngpio; offset++) {
if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
continue;
- irq_set_parent(irq_find_mapping(gpiochip->irqdomain, offset),
+ irq_set_parent(irq_find_mapping(gpiochip->irq.domain, offset),
parent_irq);
}
}
@@ -1591,6 +1582,11 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
unsigned int parent_irq,
irq_flow_handler_t parent_handler)
{
+ if (gpiochip->irq.threaded) {
+ chip_err(gpiochip, "tried to chain a threaded gpiochip\n");
+ return;
+ }
+
gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
parent_handler);
}
@@ -1607,10 +1603,6 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int parent_irq)
{
- if (!gpiochip->irq_nested) {
- chip_err(gpiochip, "tried to nest a chained gpiochip\n");
- return;
- }
gpiochip_set_cascaded_irqchip(gpiochip, irqchip, parent_irq,
NULL);
}
@@ -1626,10 +1618,11 @@ EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
* gpiochip by assigning the gpiochip as chip data, and using the irqchip
* stored inside the gpiochip.
*/
-static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
+int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
{
struct gpio_chip *chip = d->host_data;
+ int err = 0;
if (!gpiochip_irqchip_irq_valid(chip, hwirq))
return -ENXIO;
@@ -1639,32 +1632,42 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
*/
- irq_set_lockdep_class(irq, chip->lock_key);
- irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
+ irq_set_lockdep_class(irq, chip->irq.lock_key);
+ irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
/* Chips that use nested thread handlers have them marked */
- if (chip->irq_nested)
+ if (chip->irq.threaded)
irq_set_nested_thread(irq, 1);
irq_set_noprobe(irq);
+ if (chip->irq.num_parents == 1)
+ err = irq_set_parent(irq, chip->irq.parents[0]);
+ else if (chip->irq.map)
+ err = irq_set_parent(irq, chip->irq.map[hwirq]);
+
+ if (err < 0)
+ return err;
+
/*
* No set-up of the hardware will happen if IRQ_TYPE_NONE
* is passed as default type.
*/
- if (chip->irq_default_type != IRQ_TYPE_NONE)
- irq_set_irq_type(irq, chip->irq_default_type);
+ if (chip->irq.default_type != IRQ_TYPE_NONE)
+ irq_set_irq_type(irq, chip->irq.default_type);
return 0;
}
+EXPORT_SYMBOL_GPL(gpiochip_irq_map);
-static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
+void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq)
{
struct gpio_chip *chip = d->host_data;
- if (chip->irq_nested)
+ if (chip->irq.threaded)
irq_set_nested_thread(irq, 0);
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
+EXPORT_SYMBOL_GPL(gpiochip_irq_unmap);
static const struct irq_domain_ops gpiochip_domain_ops = {
.map = gpiochip_irq_map,
@@ -1702,7 +1705,94 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
{
if (!gpiochip_irqchip_irq_valid(chip, offset))
return -ENXIO;
- return irq_create_mapping(chip->irqdomain, offset);
+
+ return irq_create_mapping(chip->irq.domain, offset);
+}
+
+/**
+ * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
+ * @gpiochip: the GPIO chip to add the IRQ chip to
+ * @lock_key: lockdep class
+ */
+static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+ struct lock_class_key *lock_key)
+{
+ struct irq_chip *irqchip = gpiochip->irq.chip;
+ const struct irq_domain_ops *ops;
+ struct device_node *np;
+ unsigned int type;
+ unsigned int i;
+
+ if (!irqchip)
+ return 0;
+
+ if (gpiochip->irq.parent_handler && gpiochip->can_sleep) {
+ chip_err(gpiochip, "you cannot have chained interrupts on a "
+ "chip that may sleep\n");
+ return -EINVAL;
+ }
+
+ np = gpiochip->gpiodev->dev.of_node;
+ type = gpiochip->irq.default_type;
+
+ /*
+ * Specifying a default trigger is a terrible idea if DT or ACPI is
+ * used to configure the interrupts, as you may end up with
+ * conflicting triggers. Tell the user, and reset to NONE.
+ */
+ if (WARN(np && type != IRQ_TYPE_NONE,
+ "%s: Ignoring %u default trigger\n", np->full_name, type))
+ type = IRQ_TYPE_NONE;
+
+ if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
+ acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
+ "Ignoring %u default trigger\n", type);
+ type = IRQ_TYPE_NONE;
+ }
+
+ gpiochip->to_irq = gpiochip_to_irq;
+ gpiochip->irq.default_type = type;
+ gpiochip->irq.lock_key = lock_key;
+
+ if (gpiochip->irq.domain_ops)
+ ops = gpiochip->irq.domain_ops;
+ else
+ ops = &gpiochip_domain_ops;
+
+ gpiochip->irq.domain = irq_domain_add_simple(np, gpiochip->ngpio,
+ gpiochip->irq.first,
+ ops, gpiochip);
+ if (!gpiochip->irq.domain)
+ return -EINVAL;
+
+ /*
+ * It is possible for a driver to override this, but only if the
+ * alternative functions are both implemented.
+ */
+ if (!irqchip->irq_request_resources &&
+ !irqchip->irq_release_resources) {
+ irqchip->irq_request_resources = gpiochip_irq_reqres;
+ irqchip->irq_release_resources = gpiochip_irq_relres;
+ }
+
+ if (gpiochip->irq.parent_handler) {
+ void *data = gpiochip->irq.parent_handler_data ?: gpiochip;
+
+ for (i = 0; i < gpiochip->irq.num_parents; i++) {
+ /*
+ * The parent IRQ chip is already using the chip_data
+ * for this IRQ chip, so our callbacks simply use the
+ * handler_data.
+ */
+ irq_set_chained_handler_and_data(gpiochip->irq.parents[i],
+ gpiochip->irq.parent_handler,
+ data);
+ }
+ }
+
+ acpi_gpiochip_request_interrupts(gpiochip);
+
+ return 0;
}
/**
@@ -1717,26 +1807,34 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
acpi_gpiochip_free_interrupts(gpiochip);
- if (gpiochip->irq_chained_parent) {
- irq_set_chained_handler(gpiochip->irq_chained_parent, NULL);
- irq_set_handler_data(gpiochip->irq_chained_parent, NULL);
+ if (gpiochip->irq.chip && gpiochip->irq.parent_handler) {
+ struct gpio_irq_chip *irq = &gpiochip->irq;
+ unsigned int i;
+
+ for (i = 0; i < irq->num_parents; i++)
+ irq_set_chained_handler_and_data(irq->parents[i],
+ NULL, NULL);
}
/* Remove all IRQ mappings and delete the domain */
- if (gpiochip->irqdomain) {
+ if (gpiochip->irq.domain) {
+ unsigned int irq;
+
for (offset = 0; offset < gpiochip->ngpio; offset++) {
if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
continue;
- irq_dispose_mapping(
- irq_find_mapping(gpiochip->irqdomain, offset));
+
+ irq = irq_find_mapping(gpiochip->irq.domain, offset);
+ irq_dispose_mapping(irq);
}
- irq_domain_remove(gpiochip->irqdomain);
+
+ irq_domain_remove(gpiochip->irq.domain);
}
- if (gpiochip->irqchip) {
- gpiochip->irqchip->irq_request_resources = NULL;
- gpiochip->irqchip->irq_release_resources = NULL;
- gpiochip->irqchip = NULL;
+ if (gpiochip->irq.chip) {
+ gpiochip->irq.chip->irq_request_resources = NULL;
+ gpiochip->irq.chip->irq_release_resources = NULL;
+ gpiochip->irq.chip = NULL;
}
gpiochip_irqchip_free_valid_mask(gpiochip);
@@ -1751,8 +1849,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* @handler: the irq handler to use (often a predefined irq core function)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
- * @nested: whether this is a nested irqchip calling handle_nested_irq()
- * in its IRQ handler
+ * @threaded: whether this irqchip uses a nested thread handler
* @lock_key: lockdep class
*
* This function closely associates a certain irqchip with a certain
@@ -1774,7 +1871,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
- bool nested,
+ bool threaded,
struct lock_class_key *lock_key)
{
struct device_node *of_node;
@@ -1786,7 +1883,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
pr_err("missing gpiochip .dev parent pointer\n");
return -EINVAL;
}
- gpiochip->irq_nested = nested;
+ gpiochip->irq.threaded = threaded;
of_node = gpiochip->parent->of_node;
#ifdef CONFIG_OF_GPIO
/*
@@ -1811,16 +1908,16 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
type = IRQ_TYPE_NONE;
}
- gpiochip->irqchip = irqchip;
- gpiochip->irq_handler = handler;
- gpiochip->irq_default_type = type;
+ gpiochip->irq.chip = irqchip;
+ gpiochip->irq.handler = handler;
+ gpiochip->irq.default_type = type;
gpiochip->to_irq = gpiochip_to_irq;
- gpiochip->lock_key = lock_key;
- gpiochip->irqdomain = irq_domain_add_simple(of_node,
+ gpiochip->irq.lock_key = lock_key;
+ gpiochip->irq.domain = irq_domain_add_simple(of_node,
gpiochip->ngpio, first_irq,
&gpiochip_domain_ops, gpiochip);
- if (!gpiochip->irqdomain) {
- gpiochip->irqchip = NULL;
+ if (!gpiochip->irq.domain) {
+ gpiochip->irq.chip = NULL;
return -EINVAL;
}
@@ -1842,6 +1939,12 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
+static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
+ struct lock_class_key *key)
+{
+ return 0;
+}
+
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
{
@@ -1859,7 +1962,7 @@ static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
*/
int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_request_gpio(chip->gpiodev->base + offset);
+ return pinctrl_gpio_request(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_request);
@@ -1870,7 +1973,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
*/
void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->gpiodev->base + offset);
+ pinctrl_gpio_free(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
@@ -2013,7 +2116,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
* on each other, and help provide better diagnostics in debugfs.
* They're called even less than the "set direction" calls.
*/
-static int __gpiod_request(struct gpio_desc *desc, const char *label)
+static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
{
struct gpio_chip *chip = desc->gdev->chip;
int status;
@@ -2106,7 +2209,7 @@ int gpiod_request(struct gpio_desc *desc, const char *label)
gdev = desc->gdev;
if (try_module_get(gdev->owner)) {
- status = __gpiod_request(desc, label);
+ status = gpiod_request_commit(desc, label);
if (status < 0)
module_put(gdev->owner);
else
@@ -2119,7 +2222,7 @@ int gpiod_request(struct gpio_desc *desc, const char *label)
return status;
}
-static bool __gpiod_free(struct gpio_desc *desc)
+static bool gpiod_free_commit(struct gpio_desc *desc)
{
bool ret = false;
unsigned long flags;
@@ -2154,7 +2257,7 @@ static bool __gpiod_free(struct gpio_desc *desc)
void gpiod_free(struct gpio_desc *desc)
{
- if (desc && desc->gdev && __gpiod_free(desc)) {
+ if (desc && desc->gdev && gpiod_free_commit(desc)) {
module_put(desc->gdev->owner);
put_device(&desc->gdev->dev);
} else {
@@ -2217,7 +2320,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
return desc;
}
- err = __gpiod_request(desc, label);
+ err = gpiod_request_commit(desc, label);
if (err < 0)
return ERR_PTR(err);
@@ -2235,7 +2338,7 @@ EXPORT_SYMBOL_GPL(gpiochip_request_own_desc);
void gpiochip_free_own_desc(struct gpio_desc *desc)
{
if (desc)
- __gpiod_free(desc);
+ gpiod_free_commit(desc);
}
EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
@@ -2291,44 +2394,12 @@ static int gpio_set_drive_single_ended(struct gpio_chip *gc, unsigned offset,
return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
}
-static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
+static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
struct gpio_chip *gc = desc->gdev->chip;
int val = !!value;
int ret;
- /* GPIOs used for IRQs shall not be set as output */
- if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
- gpiod_err(desc,
- "%s: tried to set a GPIO tied to an IRQ as output\n",
- __func__);
- return -EIO;
- }
-
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
- /* First see if we can enable open drain in hardware */
- ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_OPEN_DRAIN);
- if (!ret)
- goto set_output_value;
- /* Emulate open drain by not actively driving the line high */
- if (val)
- return gpiod_direction_input(desc);
- }
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
- ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_OPEN_SOURCE);
- if (!ret)
- goto set_output_value;
- /* Emulate open source by not actively driving the line low */
- if (!val)
- return gpiod_direction_input(desc);
- } else {
- gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
- PIN_CONFIG_DRIVE_PUSH_PULL);
- }
-
-set_output_value:
if (!gc->set || !gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() or direction_output() operations\n",
@@ -2358,7 +2429,7 @@ set_output_value:
int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
VALIDATE_DESC(desc);
- return _gpiod_direction_output_raw(desc, value);
+ return gpiod_direction_output_raw_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
@@ -2376,12 +2447,48 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
+ struct gpio_chip *gc = desc->gdev->chip;
+ int ret;
+
VALIDATE_DESC(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
else
value = !!value;
- return _gpiod_direction_output_raw(desc, value);
+
+ /* GPIOs used for IRQs shall not be set as output */
+ if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
+ gpiod_err(desc,
+ "%s: tried to set a GPIO tied to an IRQ as output\n",
+ __func__);
+ return -EIO;
+ }
+
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+ /* First see if we can enable open drain in hardware */
+ ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_OPEN_DRAIN);
+ if (!ret)
+ goto set_output_value;
+ /* Emulate open drain by not actively driving the line high */
+ if (value)
+ return gpiod_direction_input(desc);
+ }
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
+ ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_OPEN_SOURCE);
+ if (!ret)
+ goto set_output_value;
+ /* Emulate open source by not actively driving the line low */
+ if (!value)
+ return gpiod_direction_input(desc);
+ } else {
+ gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
+ PIN_CONFIG_DRIVE_PUSH_PULL);
+ }
+
+set_output_value:
+ return gpiod_direction_output_raw_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_direction_output);
@@ -2448,7 +2555,7 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
* that the GPIO was actually requested.
*/
-static int _gpiod_get_raw_value(const struct gpio_desc *desc)
+static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
int offset;
@@ -2462,6 +2569,71 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
return value;
}
+static int gpio_chip_get_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
+{
+ if (chip->get_multiple) {
+ return chip->get_multiple(chip, mask, bits);
+ } else if (chip->get) {
+ int i, value;
+
+ for_each_set_bit(i, mask, chip->ngpio) {
+ value = chip->get(chip, i);
+ if (value < 0)
+ return value;
+ __assign_bit(i, bits, value);
+ }
+ return 0;
+ }
+ return -EIO;
+}
+
+int gpiod_get_array_value_complex(bool raw, bool can_sleep,
+ unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ int i = 0;
+
+ while (i < array_size) {
+ struct gpio_chip *chip = desc_array[i]->gdev->chip;
+ unsigned long mask[BITS_TO_LONGS(chip->ngpio)];
+ unsigned long bits[BITS_TO_LONGS(chip->ngpio)];
+ int first, j, ret;
+
+ if (!can_sleep)
+ WARN_ON(chip->can_sleep);
+
+ /* collect all inputs belonging to the same chip */
+ first = i;
+ memset(mask, 0, sizeof(mask));
+ do {
+ const struct gpio_desc *desc = desc_array[i];
+ int hwgpio = gpio_chip_hwgpio(desc);
+
+ __set_bit(hwgpio, mask);
+ i++;
+ } while ((i < array_size) &&
+ (desc_array[i]->gdev->chip == chip));
+
+ ret = gpio_chip_get_multiple(chip, mask, bits);
+ if (ret)
+ return ret;
+
+ for (j = first; j < i; j++) {
+ const struct gpio_desc *desc = desc_array[j];
+ int hwgpio = gpio_chip_hwgpio(desc);
+ int value = test_bit(hwgpio, bits);
+
+ if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ value = !value;
+ value_array[j] = value;
+ trace_gpio_value(desc_to_gpio(desc), 1, value);
+ }
+ }
+ return 0;
+}
+
/**
* gpiod_get_raw_value() - return a gpio's raw value
* @desc: gpio whose value will be returned
@@ -2477,7 +2649,7 @@ int gpiod_get_raw_value(const struct gpio_desc *desc)
VALIDATE_DESC(desc);
/* Should be using gpio_get_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
- return _gpiod_get_raw_value(desc);
+ return gpiod_get_raw_value_commit(desc);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
@@ -2499,7 +2671,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
/* Should be using gpio_get_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
- value = _gpiod_get_raw_value(desc);
+ value = gpiod_get_raw_value_commit(desc);
if (value < 0)
return value;
@@ -2510,12 +2682,57 @@ int gpiod_get_value(const struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_get_value);
+/**
+ * gpiod_get_raw_array_value() - read raw values from an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be read
+ * @value_array: array to store the read values
+ *
+ * Read the raw values of the GPIOs, i.e. the values of the physical lines
+ * without regard for their ACTIVE_LOW status. Return 0 in case of success,
+ * else an error code.
+ *
+ * This function should be called from contexts where we cannot sleep,
+ * and it will complain if the GPIO chip functions potentially sleep.
+ */
+int gpiod_get_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array)
+{
+ if (!desc_array)
+ return -EINVAL;
+ return gpiod_get_array_value_complex(true, false, array_size,
+ desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value);
+
+/**
+ * gpiod_get_array_value() - read values from an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be read
+ * @value_array: array to store the read values
+ *
+ * Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
+ * into account. Return 0 in case of success, else an error code.
+ *
+ * This function should be called from contexts where we cannot sleep,
+ * and it will complain if the GPIO chip functions potentially sleep.
+ */
+int gpiod_get_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array)
+{
+ if (!desc_array)
+ return -EINVAL;
+ return gpiod_get_array_value_complex(false, false, array_size,
+ desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_array_value);
+
/*
- * _gpio_set_open_drain_value() - Set the open drain gpio's value.
+ * gpio_set_open_drain_value_commit() - Set the open drain gpio's value.
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
+static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
int err = 0;
struct gpio_chip *chip = desc->gdev->chip;
@@ -2542,7 +2759,7 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
+static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
int err = 0;
struct gpio_chip *chip = desc->gdev->chip;
@@ -2564,18 +2781,13 @@ static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
__func__, err);
}
-static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value)
+static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
struct gpio_chip *chip;
chip = desc->gdev->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- _gpio_set_open_drain_value(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- _gpio_set_open_source_value(desc, value);
- else
- chip->set(chip, gpio_chip_hwgpio(desc), value);
+ chip->set(chip, gpio_chip_hwgpio(desc), value);
}
/*
@@ -2630,10 +2842,10 @@ void gpiod_set_array_value_complex(bool raw, bool can_sleep,
* collect all normal outputs belonging to the same chip
* open drain and open source outputs are set individually
*/
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
- _gpio_set_open_drain_value(desc, value);
- } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
- _gpio_set_open_source_value(desc, value);
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
+ gpio_set_open_drain_value_commit(desc, value);
+ } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
+ gpio_set_open_source_value_commit(desc, value);
} else {
__set_bit(hwgpio, mask);
if (value)
@@ -2667,7 +2879,7 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
VALIDATE_DESC_VOID(desc);
/* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->chip->can_sleep);
- _gpiod_set_raw_value(desc, value);
+ gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
@@ -2676,8 +2888,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
* @desc: gpio whose value will be assigned
* @value: value to assign
*
- * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW status into
- * account
+ * Set the logical value of the GPIO, i.e. taking its ACTIVE_LOW,
+ * OPEN_DRAIN and OPEN_SOURCE flags into account.
*
* This function should be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
@@ -2689,7 +2901,12 @@ void gpiod_set_value(struct gpio_desc *desc, int value)
WARN_ON(desc->gdev->chip->can_sleep);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- _gpiod_set_raw_value(desc, value);
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ gpio_set_open_drain_value_commit(desc, value);
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ gpio_set_open_source_value_commit(desc, value);
+ else
+ gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value);
@@ -2890,7 +3107,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return false;
- return !test_bit(FLAG_SLEEP_MAY_LOOSE_VALUE,
+ return !test_bit(FLAG_SLEEP_MAY_LOSE_VALUE,
&chip->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -2908,7 +3125,7 @@ int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
might_sleep_if(extra_checks);
VALIDATE_DESC(desc);
- return _gpiod_get_raw_value(desc);
+ return gpiod_get_raw_value_commit(desc);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep);
@@ -2927,7 +3144,7 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
might_sleep_if(extra_checks);
VALIDATE_DESC(desc);
- value = _gpiod_get_raw_value(desc);
+ value = gpiod_get_raw_value_commit(desc);
if (value < 0)
return value;
@@ -2939,6 +3156,53 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep);
/**
+ * gpiod_get_raw_array_value_cansleep() - read raw values from an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be read
+ * @value_array: array to store the read values
+ *
+ * Read the raw values of the GPIOs, i.e. the values of the physical lines
+ * without regard for their ACTIVE_LOW status. Return 0 in case of success,
+ * else an error code.
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ might_sleep_if(extra_checks);
+ if (!desc_array)
+ return -EINVAL;
+ return gpiod_get_array_value_complex(true, true, array_size,
+ desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_raw_array_value_cansleep);
+
+/**
+ * gpiod_get_array_value_cansleep() - read values from an array of GPIOs
+ * @array_size: number of elements in the descriptor / value arrays
+ * @desc_array: array of GPIO descriptors whose values will be read
+ * @value_array: array to store the read values
+ *
+ * Read the logical values of the GPIOs, i.e. taking their ACTIVE_LOW status
+ * into account. Return 0 in case of success, else an error code.
+ *
+ * This function is to be called from contexts that can sleep.
+ */
+int gpiod_get_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ might_sleep_if(extra_checks);
+ if (!desc_array)
+ return -EINVAL;
+ return gpiod_get_array_value_complex(false, true, array_size,
+ desc_array, value_array);
+}
+EXPORT_SYMBOL_GPL(gpiod_get_array_value_cansleep);
+
+/**
* gpiod_set_raw_value_cansleep() - assign a gpio's raw value
* @desc: gpio whose value will be assigned
* @value: value to assign
@@ -2952,7 +3216,7 @@ void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep_if(extra_checks);
VALIDATE_DESC_VOID(desc);
- _gpiod_set_raw_value(desc, value);
+ gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
@@ -2972,7 +3236,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
VALIDATE_DESC_VOID(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- _gpiod_set_raw_value(desc, value);
+ gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
@@ -3264,12 +3528,25 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
if (lflags & GPIO_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+
if (lflags & GPIO_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ else if (dflags & GPIOD_FLAGS_BIT_OPEN_DRAIN) {
+ /*
+ * This enforces open drain mode from the consumer side.
+ * This is necessary for some busses like I2C, but the lookup
+ * should *REALLY* have specified them as open drain in the
+ * first place, so print a little warning here.
+ */
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ gpiod_warn(desc,
+ "enforced open drain please flag it properly in DT/ACPI DSDT/board file\n");
+ }
+
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if (lflags & GPIO_SLEEP_MAY_LOOSE_VALUE)
- set_bit(FLAG_SLEEP_MAY_LOOSE_VALUE, &desc->flags);
+ if (lflags & GPIO_SLEEP_MAY_LOSE_VALUE)
+ set_bit(FLAG_SLEEP_MAY_LOSE_VALUE, &desc->flags);
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index d003ccb12781..af48322839c3 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -180,6 +180,10 @@ static inline bool acpi_can_fallback_to_crs(struct acpi_device *adev,
#endif
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
+int gpiod_get_array_value_complex(bool raw, bool can_sleep,
+ unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
void gpiod_set_array_value_complex(bool raw, bool can_sleep,
unsigned int array_size,
struct gpio_desc **desc_array,
@@ -201,7 +205,7 @@ struct gpio_desc {
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_SLEEP_MAY_LOOSE_VALUE 12 /* GPIO may loose value in sleep */
+#define FLAG_SLEEP_MAY_LOSE_VALUE 12 /* GPIO may lose value in sleep */
/* Connection label */
const char *label;
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 81ff79336623..e9500844333e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 26682454a446..e8af1f5e8a79 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -41,3 +41,4 @@ config DRM_AMDGPU_GART_DEBUGFS
pages. Uses more memory for housekeeping, enable only for debugging.
source "drivers/gpu/drm/amd/acp/Kconfig"
+source "drivers/gpu/drm/amd/display/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index ef9a3b6d7b62..78d609123420 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,15 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
FULL_AMD_PATH=$(src)/..
+DISPLAY_FOLDER_NAME=display
+FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc \
- -I$(FULL_AMD_PATH)/acp/include
+ -I$(FULL_AMD_PATH)/acp/include \
+ -I$(FULL_AMD_DISPLAY_PATH) \
+ -I$(FULL_AMD_DISPLAY_PATH)/include \
+ -I$(FULL_AMD_DISPLAY_PATH)/dc \
+ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
amdgpu-y := amdgpu_drv.o
@@ -132,4 +139,13 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
+ifneq ($(CONFIG_DRM_AMD_DC),)
+
+RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
+include $(FULL_AMD_DISPLAY_PATH)/Makefile
+
+amdgpu-y += $(AMD_DISPLAY_FILES)
+
+endif
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index cbcb6a153aba..5afaf6016b4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -66,6 +66,7 @@
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
+#include "amdgpu_dm.h"
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
@@ -101,6 +102,8 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
+extern int amdgpu_dc;
+extern int amdgpu_dc_log;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict;
@@ -1535,6 +1538,7 @@ struct amdgpu_device {
/* display */
bool enable_virtual_display;
struct amdgpu_mode_info mode_info;
+ /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src pageflip_irq;
@@ -1590,6 +1594,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* display related functionality */
+ struct amdgpu_display_manager dm;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
struct mutex mn_lock;
@@ -1653,6 +1660,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+
/*
* Registers read & write functions.
*/
@@ -1911,5 +1921,11 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping);
+#if defined(CONFIG_DRM_AMD_DC)
+int amdgpu_dm_display_resume(struct amdgpu_device *adev );
+#else
+static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
+#endif
+
#include "amdgpu_object.h"
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a7afe553e0a1..f2b72c7c6857 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -911,10 +911,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
struct cgs_display_info *info)
{
CGS_FUNC_ADEV;
- struct amdgpu_crtc *amdgpu_crtc;
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- uint32_t line_time_us, vblank_lines;
struct cgs_mode_info *mode_info;
if (info == NULL)
@@ -928,30 +924,43 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
mode_info->ref_clock = adev->clock.spll.reference_freq;
}
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
- info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
- info->display_count++;
- }
- if (mode_info != NULL &&
- crtc->enabled && amdgpu_crtc->enabled &&
- amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- mode_info->vblank_time_us = vblank_lines * line_time_us;
- mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- mode_info->ref_clock = adev->clock.spll.reference_freq;
- mode_info = NULL;
+ if (!amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct drm_device *ddev = adev->ddev;
+ struct drm_crtc *crtc;
+ uint32_t line_time_us, vblank_lines;
+
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (crtc->enabled) {
+ info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
+ info->display_count++;
+ }
+ if (mode_info != NULL &&
+ crtc->enabled && amdgpu_crtc->enabled &&
+ amdgpu_crtc->hw_mode.clock) {
+ line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
+ amdgpu_crtc->hw_mode.clock;
+ vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ amdgpu_crtc->hw_mode.crtc_vdisplay +
+ (amdgpu_crtc->v_border * 2);
+ mode_info->vblank_time_us = vblank_lines * line_time_us;
+ mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ mode_info = NULL;
+ }
}
}
+ } else {
+ info->display_count = adev->pm.pm_display_cfg.num_display;
+ if (mode_info != NULL) {
+ mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
+ mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ }
}
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index bdef497a6a26..a57cec737c18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -562,8 +562,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
* invalidated it. Free it and try again
*/
release_pages(e->user_pages,
- bo->tbo.ttm->num_pages,
- false);
+ bo->tbo.ttm->num_pages);
kvfree(e->user_pages);
e->user_pages = NULL;
}
@@ -694,8 +693,7 @@ error_free_pages:
continue;
release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
- false);
+ e->robj->tbo.ttm->num_pages);
kvfree(e->user_pages);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2c85e0a98608..2c574374d9b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -31,6 +31,7 @@
#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
@@ -2049,6 +2050,52 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
}
}
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+{
+ switch (asic_type) {
+#if defined(CONFIG_DRM_AMD_DC)
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
+ return amdgpu_dc != 0;
+#endif
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ return amdgpu_dc > 0;
+ case CHIP_VEGA10:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+#endif
+ return amdgpu_dc != 0;
+#endif
+ default:
+ return false;
+ }
+}
+
+/**
+ * amdgpu_device_has_dc_support - check if dc is supported
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Returns true for supported, false for not supported
+ */
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return amdgpu_device_asic_has_dc_support(adev->asic_type);
+}
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2103,7 +2150,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
-
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -2245,7 +2291,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
/* init i2c buses */
- amdgpu_atombios_i2c_init(adev);
+ if (!amdgpu_device_has_dc_support(adev))
+ amdgpu_atombios_i2c_init(adev);
}
/* Fence driver */
@@ -2381,7 +2428,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->accel_working = false;
cancel_delayed_work_sync(&adev->late_init_work);
/* free i2c buses */
- amdgpu_i2c_fini(adev);
+ if (!amdgpu_device_has_dc_support(adev))
+ amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev);
kfree(adev->bios);
adev->bios = NULL;
@@ -2432,12 +2480,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
- /* turn off display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+ drm_modeset_unlock_all(dev);
}
- drm_modeset_unlock_all(dev);
amdgpu_amdkfd_suspend(adev);
@@ -2580,13 +2630,25 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* blat the mode back in */
if (fbcon) {
- drm_helper_resume_force_mode(dev);
- /* turn on display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* pre DCE11 */
+ drm_helper_resume_force_mode(dev);
+
+ /* turn on display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ drm_modeset_unlock_all(dev);
+ } else {
+ /*
+ * There is no equivalent atomic helper to turn on
+ * display, so we defined our own function for this,
+ * once suspend resume is supported by the atomic
+ * framework this will be reworked
+ */
+ amdgpu_dm_display_resume(adev);
}
- drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
@@ -2603,7 +2665,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth++;
#endif
- drm_helper_hpd_irq_event(dev);
+ if (!amdgpu_device_has_dc_support(adev))
+ drm_helper_hpd_irq_event(dev);
+ else
+ drm_kms_helper_hotplug_event(dev);
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
@@ -2903,6 +2968,7 @@ give_up_reset:
*/
int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
+ struct drm_atomic_state *state = NULL;
int i, r;
int resched;
bool need_full_reset, vram_lost = false;
@@ -2916,6 +2982,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ /* store modesetting */
+ if (amdgpu_device_has_dc_support(adev))
+ state = drm_atomic_helper_suspend(adev->ddev);
/* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3032,7 +3101,11 @@ out:
}
}
- drm_helper_resume_force_mode(adev->ddev);
+ if (amdgpu_device_has_dc_support(adev)) {
+ r = drm_atomic_helper_resume(adev->ddev, state);
+ amdgpu_dm_display_resume(adev);
+ } else
+ drm_helper_resume_force_mode(adev->ddev);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6ad243293a78..138beb550a58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -518,7 +518,7 @@ amdgpu_framebuffer_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *
+struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -556,7 +556,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return &amdgpu_fb->base;
}
-static void amdgpu_output_poll_changed(struct drm_device *dev)
+void amdgpu_output_poll_changed(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
amdgpu_fb_output_poll_changed(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
new file mode 100644
index 000000000000..3cc0ef0c055e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DISPLAY_H__
+#define __AMDGPU_DISPLAY_H__
+
+struct drm_framebuffer *
+amdgpu_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+void amdgpu_output_poll_changed(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 7279fb5c3abc..56caaeee6fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -433,7 +433,7 @@ struct amdgpu_pm {
uint32_t fw_version;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+ struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index dd2f060d62a8..ec96bb1f9eaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -106,6 +106,8 @@ int amdgpu_vm_debug = 0;
int amdgpu_vram_page_split = 512;
int amdgpu_vm_update_mode = -1;
int amdgpu_exp_hw_support = 0;
+int amdgpu_dc = -1;
+int amdgpu_dc_log = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0;
@@ -211,6 +213,12 @@ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
+module_param_named(dc, amdgpu_dc, int, 0444);
+
+MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
+module_param_named(dc_log, amdgpu_dc_log, int, 0444);
+
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
@@ -518,15 +526,15 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */
- {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 562930b17a6d..90fa8e8bc6fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -42,11 +42,6 @@
this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass.
*/
-struct amdgpu_fbdev {
- struct drm_fb_helper helper;
- struct amdgpu_framebuffer rfb;
- struct amdgpu_device *adev;
-};
static int
amdgpufb_open(struct fb_info *info, int user)
@@ -353,7 +348,8 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(adev->ddev);
+ if (!amdgpu_device_has_dc_support(adev))
+ drm_helper_disable_unused_functions(adev->ddev);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index fb9f88ef6059..2fa95aef74d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -268,9 +268,10 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
*
* Checks for fence activity.
*/
-static void amdgpu_fence_fallback(unsigned long arg)
+static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = (void *)arg;
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
amdgpu_fence_process(ring);
}
@@ -286,7 +287,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+ uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
struct dma_fence *fence, **ptr;
int r;
@@ -350,7 +351,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
- emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
+ emitted += READ_ONCE(ring->fence_drv.sync_seq);
return lower_32_bits(emitted);
}
@@ -422,8 +423,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
- setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
- (unsigned long)ring);
+ timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 14aff2f15a94..e87eedcc0da9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -351,7 +351,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
return 0;
free_pages:
- release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
+ release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
release_object:
drm_gem_object_put_unlocked(gobj);
@@ -787,11 +787,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
- offset = ACCESS_ONCE(bo->tbo.mem.start);
+ offset = READ_ONCE(bo->tbo.mem.start);
if (offset != AMDGPU_BO_INVALID_OFFSET)
seq_printf(m, " @ 0x%010Lx", offset);
- pin_count = ACCESS_ONCE(bo->pin_count);
+ pin_count = READ_ONCE(bo->pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 538e5f27d120..47c5ce9807db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -37,6 +37,10 @@
#include <linux/pm_runtime.h>
+#ifdef CONFIG_DRM_AMD_DC
+#include "amdgpu_dm_irq.h"
+#endif
+
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
/*
@@ -221,15 +225,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock);
- if (!adev->enable_virtual_display)
- /* Disable vblank irqs aggressively for power-saving */
- adev->ddev->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
- if (r) {
- return r;
- }
-
/* enable msi */
adev->irq.msi_enabled = false;
@@ -241,7 +236,21 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
}
- INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ if (!adev->enable_virtual_display)
+ /* Disable vblank irqs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev->ddev->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ /* pre DCE11 */
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_hotplug_work_func);
+ }
+
INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
adev->irq.installed = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6f0b26dae3b0..720139e182a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1030,7 +1030,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 2af2678ddaf6..ffde1e9666e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,11 +38,15 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
#include "amdgpu_irq.h"
+#include <drm/drm_dp_mst_helper.h>
+#include "modules/inc/mod_freesync.h"
+
struct amdgpu_bo;
struct amdgpu_device;
struct amdgpu_encoder;
@@ -53,9 +57,13 @@ struct amdgpu_hpd;
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
+#define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base)
+
+#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
+#define AMDGPU_MAX_PLANES 6
#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
@@ -292,6 +300,30 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
+ /* it is used to enter or exit into free sync mode */
+ int (*notify_freesync)(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+ /* it is used to allow enablement of freesync mode */
+ int (*set_freesync_property)(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+
+
+};
+
+struct amdgpu_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+
+ /* caching for later use */
+ uint64_t address;
+};
+
+struct amdgpu_fbdev {
+ struct drm_fb_helper helper;
+ struct amdgpu_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct amdgpu_device *adev;
};
struct amdgpu_mode_info {
@@ -299,6 +331,7 @@ struct amdgpu_mode_info {
struct card_info *atom_card_info;
bool mode_config_initialized;
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
@@ -328,6 +361,7 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
+ const enum drm_plane_type *plane_type;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -400,6 +434,14 @@ struct amdgpu_crtc {
/* for virtual dce */
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
+
+ int otg_inst;
+ struct drm_pending_vblank_event *event;
+};
+
+struct amdgpu_plane {
+ struct drm_plane base;
+ enum drm_plane_type plane_type;
};
struct amdgpu_encoder_atom_dig {
@@ -489,6 +531,19 @@ enum amdgpu_connector_dither {
AMDGPU_FMT_DITHER_ENABLE = 1,
};
+struct amdgpu_dm_dp_aux {
+ struct drm_dp_aux aux;
+ struct ddc_service *ddc_service;
+};
+
+struct amdgpu_i2c_adapter {
+ struct i2c_adapter base;
+
+ struct ddc_service *ddc_service;
+};
+
+#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
+
struct amdgpu_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -500,6 +555,14 @@ struct amdgpu_connector {
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
+ /* number of modes generated from EDID at 'dc_sink' */
+ int num_modes;
+ /* The 'old' sink - before an HPD.
+ * The 'current' sink is in dc_link->sink. */
+ struct dc_sink *dc_sink;
+ struct dc_link *dc_link;
+ struct dc_sink *dc_em_sink;
+ const struct dc_stream *stream;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
@@ -510,11 +573,39 @@ struct amdgpu_connector {
enum amdgpu_connector_audio audio;
enum amdgpu_connector_dither dither;
unsigned pixelclock_for_modeset;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ struct amdgpu_encoder *mst_encoder;
+ struct semaphore mst_sem;
+
+ /* TODO see if we can merge with ddc_bus or make a dm_connector */
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Monitor range limits */
+ int min_vfreq ;
+ int max_vfreq ;
+ int pixel_clock_mhz;
+
+ /*freesync caps*/
+ struct mod_freesync_caps caps;
+
+ struct mutex hpd_lock;
+
};
-struct amdgpu_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
+/* TODO: start to use this struct and remove same field from base one */
+struct amdgpu_mst_connector {
+ struct amdgpu_connector base;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ bool is_mst_connector;
+ struct amdgpu_encoder *mst_encoder;
};
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ce00f629dcce..6c570d4e4516 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1470,7 +1470,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
+ if (amdgpu_crtc->enabled) {
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
adev->pm.dpm.new_active_crtc_count++;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 213988f336ed..f337c316ec2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
index 9ec96b9e85d1..b160b958e5fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright Red Hat Inc 2010.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1f036af85ba6..ad5bf86ee8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -746,7 +746,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
return 0;
release_pages:
- release_pages(pages, pinned, 0);
+ release_pages(pages, pinned);
up_read(&current->mm->mmap_sem);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 567c4a5cf90c..793b1470284d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -65,6 +65,7 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
#include "dce_virtual.h"
@@ -1900,6 +1901,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
@@ -1914,6 +1919,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
@@ -1928,6 +1937,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
@@ -1943,6 +1956,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 3ca9d114f630..4e67fe1e7955 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -532,6 +532,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
@@ -545,6 +551,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 2581543b35a7..920910ac8663 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -371,6 +371,10 @@ static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!(adev->flags & AMD_IS_APU) &&
+ (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
+ return -ENOENT;
+
uvd_v6_0_set_ring_funcs(adev);
if (uvd_v6_0_enc_support(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 90332f55cfba..cf81065e3c5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -365,15 +365,10 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
{
u32 tmp;
- /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
if ((adev->asic_type == CHIP_FIJI) ||
- (adev->asic_type == CHIP_STONEY) ||
- (adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12))
+ (adev->asic_type == CHIP_STONEY))
return AMDGPU_VCE_HARVEST_VCE1;
- /* Tonga and CZ are dual or single pipe */
if (adev->flags & AMD_IS_APU)
tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
VCE_HARVEST_FUSE_MACRO__MASK) >>
@@ -391,6 +386,11 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
case 3:
return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
default:
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12))
+ return AMDGPU_VCE_HARVEST_VCE1;
+
return 0;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f3cfef48aa99..3a4c2fa7e36d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -77,6 +77,7 @@
#endif
#include "dce_virtual.h"
#include "mxgpu_vi.h"
+#include "amdgpu_dm.h"
/*
* Indirect registers accessor
@@ -1502,6 +1503,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1518,6 +1523,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1536,6 +1545,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1550,6 +1563,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1567,6 +1584,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index b400d5664252..7bb0bc0ca3d6 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Heterogenous System Architecture support for AMD GPU devices
#
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
new file mode 100644
index 000000000000..ec3285f65517
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -0,0 +1,45 @@
+menu "Display Engine Configuration"
+ depends on DRM && DRM_AMDGPU
+
+config DRM_AMD_DC
+ bool "AMD DC - Enable new display engine"
+ default y
+ help
+ Choose this option if you want to use the new display engine
+ support for AMDGPU. This adds required support for Vega and
+ Raven ASICs.
+
+config DRM_AMD_DC_PRE_VEGA
+ bool "DC support for Polaris and older ASICs"
+ default n
+ help
+ Choose this option to enable the new DC support for older asics
+ by default. This includes Polaris, Carrizo, Tonga, Bonaire,
+ and Hawaii.
+
+config DRM_AMD_DC_FBC
+ bool "AMD FBC - Enable Frame Buffer Compression"
+ depends on DRM_AMD_DC
+ help
+ Choose this option if you want to use frame buffer compression
+ support.
+ This is a power optimisation feature, check its availability
+ on your hardware before enabling this option.
+
+
+config DRM_AMD_DC_DCN1_0
+ bool "DCN 1.0 Raven family"
+ depends on DRM_AMD_DC && X86
+ help
+ Choose this option if you want to have
+ RV family for display engine
+
+config DEBUG_KERNEL_DC
+ bool "Enable kgdb break in DC"
+ depends on DRM_AMD_DC
+ help
+ Choose this option
+ if you want to hit
+ kdgb_break in assert.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
new file mode 100644
index 000000000000..8ba37dd9cf7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the DAL (Display Abstract Layer), which is a sub-component
+# of the AMDGPU drm driver.
+# It provides the HW control for display related functionalities.
+
+AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
+
+subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
+
+#TODO: remove when Timing Sync feature is complete
+subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
+
+DAL_LIBS = amdgpu_dm dc modules/freesync
+
+AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
+
+include $(AMD_DAL)
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
new file mode 100644
index 000000000000..46464678f2b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -0,0 +1,107 @@
+===============================================================================
+TODOs
+===============================================================================
+
+1. Base this on drm-next - WIP
+
+
+2. Cleanup commit history
+
+
+3. WIP - Drop page flip helper and use DRM's version
+
+
+4. DONE - Flatten all DC objects
+ * dc_stream/core_stream/stream should just be dc_stream
+ * Same for other DC objects
+
+ "Is there any major reason to keep all those abstractions?
+
+ Could you collapse everything into struct dc_stream?
+
+ I haven't looked recently but I didn't get the impression there was a
+ lot of design around what was public/protected, more whatever needed
+ to be used by someone else was in public."
+ ~ Dave Airlie
+
+
+5. DONE - Rename DC objects to align more with DRM
+ * dc_surface -> dc_plane_state
+ * dc_stream -> dc_stream_state
+
+
+6. DONE - Per-plane and per-stream validation
+
+
+7. WIP - Per-plane and per-stream commit
+
+
+8. WIP - Split pipe_ctx into plane and stream resource structs
+
+
+9. Attach plane and stream reources to state object instead of validate_context
+
+
+10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
+ * Use drm_display_info instead
+ * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
+
+ "Making sure you use the sink-specific helper libraries and kernel
+ subsystems, since there's really no good reason to have 2nd
+ implementation of those in the kernel. Looks likes that's done for mst
+ and edid parsing. There's still a bit a midlayer feeling to the edid
+ parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
+ think it'd be much better if you convert that over to reading stuff
+ from drm_display_info and if needed, push stuff into the core). Also,
+ I can't come up with a good reason why DC needs all this (except to
+ reimplement half of our edid quirk table, which really isn't a good
+ idea). Might be good if you put this onto the list of things to fix
+ long-term, but imo not a blocker. Definitely make sure new stuff
+ doesn't slip in (i.e. if you start adding edid quirks to DC instead of
+ the drm core, refactoring to use the core edid stuff was pointless)."
+ ~ Daniel Vetter
+
+
+11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
+overy complicated HW programming function for sendind and receiving i2c/aux
+commands. We can greatly simplify that and move it into dc/dceXYZ like other
+HW blocks.
+
+12. drm_modeset_lock in MST should no longer be needed in recent kernels
+ * Adopt appropriate locking scheme
+
+13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
+a few indirections, and consider removing entirely and using the
+drm_atomic_helper_best_encoder default behaviour.
+
+14. core/dc_debug.c, consider switching to the atomic state debug helpers and
+moving all your driver state printing into the various atomic_print_state
+callbacks. There's also plans to expose this stuff in a standard way across all
+drivers, to make debugging userspace compositors easier across different hw.
+
+15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
+dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
+
+16. Move to core SCDC helpers (I think those are new since initial DC review).
+
+17. There's still a pretty massive layer cake around dp aux and DPCD handling,
+with like 3 levels of abstraction and using your own structures instead of the
+stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
+incompatible styles, just means more reasons not to add a third (or well third
+one gets to do the cleanup refactor).
+
+18. There's a pile of sink handling code, both for DP and HDMI where I didn't
+immediately recognize the standard. I think long term it'd be best for the drm
+subsystem if we try to move as much of that into helpers/core as possible, and
+share it with drivers. But that's a very long term goal, and by far not just an
+issue with DC - other drivers, especially around DP sink handling, are equally
+guilty.
+
+19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
+stuff just isn't up to the challenges either. We need to figure out something
+that integrates better with DRM and linux debug printing, while not being
+useless with filtering output. dynamic debug printing might be an option.
+
+20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
+retimer that we need to program to pass PHY compliance. Currently that's
+bypassing the i2c device and goes directly to HW. This should be changed.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
new file mode 100644
index 000000000000..4699e47aa76b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the 'dm' sub-component of DAL.
+# It provides the control and status of dm blocks.
+
+
+
+AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
+
+ifneq ($(CONFIG_DRM_AMD_DC),)
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
+
+AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
+
+AMD_DISPLAY_FILES += $(AMDGPU_DM)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
new file mode 100644
index 000000000000..889ed24084e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -0,0 +1,4925 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services_types.h"
+#include "dc.h"
+#include "dc/inc/core_types.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_pm.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+#include "dm_services_types.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/types.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_edid.h>
+
+#include "modules/inc/mod_freesync.h"
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "ivsrcid/irqsrcs_dcn_1_0.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "soc15_common.h"
+#endif
+
+#include "modules/inc/mod_freesync.h"
+
+#include "i2caux_interface.h"
+
+/* basic init/fini API */
+static int amdgpu_dm_init(struct amdgpu_device *adev);
+static void amdgpu_dm_fini(struct amdgpu_device *adev);
+
+/* initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+/* removes and deallocates the drm structures, created by the above function */
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_plane *aplane,
+ unsigned long possible_crtcs);
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ uint32_t link_index);
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *amdgpu_dm_connector,
+ uint32_t link_index,
+ struct amdgpu_encoder *amdgpu_encoder);
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index);
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock);
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+
+
+
+static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+};
+
+static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
+};
+
+static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
+};
+
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else {
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+ acrtc->base.state);
+
+
+ if (acrtc_state->stream == NULL) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ return dc_stream_get_vblank_counter(acrtc_state->stream);
+ }
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ uint32_t v_blank_start, v_blank_end, h_position, v_position;
+
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+ else {
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+ acrtc->base.state);
+
+ if (acrtc_state->stream == NULL) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc_state->stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+ }
+
+ return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+ return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static struct amdgpu_crtc *
+get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ int otg_inst)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ /*
+ * following if is check inherited from both functions where this one is
+ * used now. Need to be checked why it could happen.
+ */
+ if (otg_inst == -1) {
+ WARN_ON(1);
+ return adev->mode_info.crtcs[0];
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->otg_inst == otg_inst)
+ return amdgpu_crtc;
+ }
+
+ return NULL;
+}
+
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ unsigned long flags;
+
+ amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+ /* IRQ could occur when in initial stage */
+ /*TODO work and BO cleanup */
+ if (amdgpu_crtc == NULL) {
+ DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id,
+ amdgpu_crtc);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return;
+ }
+
+
+ /* wakeup usersapce */
+ if (amdgpu_crtc->event) {
+ /* Update to correct count/ts if racing with vblank irq */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->event = NULL;
+
+ } else
+ WARN_ON(1);
+
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
+ __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+}
+
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ uint8_t crtc_index = 0;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+ if (acrtc)
+ crtc_index = acrtc->crtc_id;
+
+ drm_handle_vblank(adev->ddev, crtc_index);
+}
+
+static int dm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void* handle);
+
+static void hotplug_notify_work_func(struct work_struct *work)
+{
+ struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
+ struct drm_device *dev = dm->ddev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dal_asic_id.h"
+/* Allocate memory for FBC compressed data */
+/* TODO: Dynamic allocation */
+#define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
+
+static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
+{
+ int r;
+ struct dm_comressor_info *compressor = &adev->dm.compressor;
+
+ if (!compressor->bo_ptr) {
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
+ &compressor->gpu_addr, &compressor->cpu_addr);
+
+ if (r)
+ DRM_ERROR("DM: Failed to initialize fbc\n");
+ }
+
+}
+#endif
+
+
+/* Init display KMS
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+ struct dc_init_data init_data;
+ adev->dm.ddev = adev->ddev;
+ adev->dm.adev = adev;
+
+ /* Zero all the fields */
+ memset(&init_data, 0, sizeof(init_data));
+
+ /* initialize DAL's lock (for SYNC context use) */
+ spin_lock_init(&adev->dm.dal_lock);
+
+ /* initialize DAL's mutex */
+ mutex_init(&adev->dm.dal_mutex);
+
+ if(amdgpu_dm_irq_init(adev)) {
+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ goto error;
+ }
+
+ init_data.asic_id.chip_family = adev->family;
+
+ init_data.asic_id.pci_revision_id = adev->rev_id;
+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+
+ init_data.asic_id.vram_width = adev->mc.vram_width;
+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
+ init_data.asic_id.atombios_base_address =
+ adev->mode_info.atom_context->bios;
+
+ init_data.driver = adev;
+
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ goto error;
+ }
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+ adev->dm.dal = NULL;
+
+ init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+ if (amdgpu_dc_log)
+ init_data.log_mask = DC_DEFAULT_LOG_MASK;
+ else
+ init_data.log_mask = DC_MIN_LOG_MASK;
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (adev->family == FAMILY_CZ)
+ amdgpu_dm_initialize_fbc(adev);
+ init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
+#endif
+ /* Display Core create. */
+ adev->dm.dc = dc_create(&init_data);
+
+ if (adev->dm.dc) {
+ DRM_INFO("Display Core initialized!\n");
+ } else {
+ DRM_INFO("Display Core failed to initialize!\n");
+ goto error;
+ }
+
+ INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
+
+ adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+ if (!adev->dm.freesync_module) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize freesync_module.\n");
+ } else
+ DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ adev->dm.freesync_module);
+
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+ /* TODO: Add_display_info? */
+
+ /* TODO use dynamic cursor width */
+ adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+ adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+
+ if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+ return 0;
+error:
+ amdgpu_dm_fini(adev);
+
+ return -1;
+}
+
+static void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+ amdgpu_dm_destroy_drm_device(&adev->dm);
+ /*
+ * TODO: pageflip, vlank interrupt
+ *
+ * amdgpu_dm_irq_fini(adev);
+ */
+
+ if (adev->dm.cgs_device) {
+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+ adev->dm.cgs_device = NULL;
+ }
+ if (adev->dm.freesync_module) {
+ mod_freesync_destroy(adev->dm.freesync_module);
+ adev->dm.freesync_module = NULL;
+ }
+ /* DC Destroy TODO: Replace destroy DAL */
+ if (adev->dm.dc)
+ dc_destroy(&adev->dm.dc);
+ return;
+}
+
+static int dm_sw_init(void *handle)
+{
+ return 0;
+}
+
+static int dm_sw_fini(void *handle)
+{
+ return 0;
+}
+
+static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch) {
+ DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to start MST\n");
+ ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
+ return ret;
+ }
+ }
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+
+static int dm_late_init(void *handle)
+{
+ struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
+
+ return detect_mst_link_for_all_connectors(dev);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ !aconnector->mst_port) {
+
+ if (suspend)
+ drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
+ else
+ drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
+ }
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static int dm_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Create DAL display manager */
+ amdgpu_dm_init(adev);
+ amdgpu_dm_hpd_init(adev);
+
+ return 0;
+}
+
+static int dm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_hpd_fini(adev);
+
+ amdgpu_dm_irq_fini(adev);
+ amdgpu_dm_fini(adev);
+ return 0;
+}
+
+static int dm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
+
+ s3_handle_mst(adev->ddev, true);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+
+ return ret;
+}
+
+static struct amdgpu_dm_connector *
+amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ uint32_t i;
+ struct drm_connector_state *new_con_state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc_from_state;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ crtc_from_state = new_con_state->crtc;
+
+ if (crtc_from_state == crtc)
+ return to_amdgpu_dm_connector(connector);
+ }
+
+ return NULL;
+}
+
+static int dm_resume(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ /* power on hardware */
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ return 0;
+}
+
+int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev->ddev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
+
+ int ret = 0;
+ int i;
+
+ /* program HPD filter */
+ dc_resume(dm->dc);
+
+ /* On resume we need to rewrite the MSTM control bits to enamble MST*/
+ s3_handle_mst(ddev, false);
+
+ /*
+ * early enable HPD Rx IRQ, should be done before set mode as short
+ * pulse interrupts are used for MST
+ */
+ amdgpu_dm_irq_resume_early(adev);
+
+ /* Do detection*/
+ list_for_each_entry(connector,
+ &ddev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ /*
+ * this is the case when traversing through already created
+ * MST connectors, should be skipped
+ */
+ if (aconnector->mst_port)
+ continue;
+
+ mutex_lock(&aconnector->hpd_lock);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ aconnector->dc_sink = NULL;
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ mutex_unlock(&aconnector->hpd_lock);
+ }
+
+ /* Force mode set in atomic comit */
+ for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
+ new_crtc_state->active_changed = true;
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ }
+
+ for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
+
+ drm_atomic_state_put(adev->dm.cached_state);
+ adev->dm.cached_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ return ret;
+}
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+ .name = "dm",
+ .early_init = dm_early_init,
+ .late_init = dm_late_init,
+ .sw_init = dm_sw_init,
+ .sw_fini = dm_sw_fini,
+ .hw_init = dm_hw_init,
+ .hw_fini = dm_hw_fini,
+ .suspend = dm_suspend,
+ .resume = dm_resume,
+ .is_idle = dm_is_idle,
+ .wait_for_idle = dm_wait_for_idle,
+ .check_soft_reset = dm_check_soft_reset,
+ .soft_reset = dm_soft_reset,
+ .set_clockgating_state = dm_set_clockgating_state,
+ .set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+};
+
+
+static struct drm_atomic_state *
+dm_atomic_state_alloc(struct drm_device *dev)
+{
+ struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return NULL;
+
+ if (drm_atomic_state_init(dev, &state->base) < 0)
+ goto fail;
+
+ return &state->base;
+
+fail:
+ kfree(state);
+ return NULL;
+}
+
+static void
+dm_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state->context) {
+ dc_release_state(dm_state->context);
+ dm_state->context = NULL;
+ }
+
+ drm_atomic_state_default_clear(state);
+}
+
+static void
+dm_atomic_state_alloc_free(struct drm_atomic_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ drm_atomic_state_default_release(state);
+ kfree(dm_state);
+}
+
+static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ .fb_create = amdgpu_user_framebuffer_create,
+ .output_poll_changed = amdgpu_output_poll_changed,
+ .atomic_check = amdgpu_dm_atomic_check,
+ .atomic_commit = amdgpu_dm_atomic_commit,
+ .atomic_state_alloc = dm_atomic_state_alloc,
+ .atomic_state_clear = dm_atomic_state_clear,
+ .atomic_state_free = dm_atomic_state_alloc_free
+};
+
+static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
+};
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_sink *sink;
+
+ /* MST handled by drm_mst framework */
+ if (aconnector->mst_mgr.mst_state == true)
+ return;
+
+
+ sink = aconnector->dc_link->local_sink;
+
+ /* Edid mgmt connector gets first update only in mode_valid hook and then
+ * the connector sink is set to either fake or physical sink depends on link status.
+ * don't do it here if u are during boot
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+ && aconnector->dc_em_sink) {
+
+ /* For S3 resume with headless use eml_sink to fake stream
+ * because on resume connecotr->sink is set ti NULL
+ */
+ mutex_lock(&dev->mode_config.mutex);
+
+ if (sink) {
+ if (aconnector->dc_sink) {
+ amdgpu_dm_remove_sink_from_freesync_module(
+ connector);
+ /* retain and release bellow are used for
+ * bump up refcount for sink because the link don't point
+ * to it anymore after disconnect so on next crtc to connector
+ * reshuffle by UMD we will get into unwanted dc_sink release
+ */
+ if (aconnector->dc_sink != aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_sink);
+ }
+ aconnector->dc_sink = sink;
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, aconnector->edid);
+ } else {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ if (!aconnector->dc_sink)
+ aconnector->dc_sink = aconnector->dc_em_sink;
+ else if (aconnector->dc_sink != aconnector->dc_em_sink)
+ dc_sink_retain(aconnector->dc_sink);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return;
+ }
+
+ /*
+ * TODO: temporary guard to look for proper fix
+ * if this sink is MST sink, we should not do anything
+ */
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ return;
+
+ if (aconnector->dc_sink == sink) {
+ /* We got a DP short pulse (Link Loss, DP CTS, etc...).
+ * Do nothing!! */
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+ aconnector->connector_id, aconnector->dc_sink, sink);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /* 1. Update status of the drm connector
+ * 2. Send an event and let userspace tell us what to do */
+ if (sink) {
+ /* TODO: check if we still need the S3 mode update workaround.
+ * If yes, put it here. */
+ if (aconnector->dc_sink)
+ amdgpu_dm_remove_sink_from_freesync_module(
+ connector);
+
+ aconnector->dc_sink = sink;
+ if (sink->dc_edid.length == 0) {
+ aconnector->edid = NULL;
+ } else {
+ aconnector->edid =
+ (struct edid *) sink->dc_edid.raw_edid;
+
+
+ drm_mode_connector_update_edid_property(connector,
+ aconnector->edid);
+ }
+ amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
+
+ } else {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ drm_mode_connector_update_edid_property(connector, NULL);
+ aconnector->num_modes = 0;
+ aconnector->dc_sink = NULL;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+
+ /* In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in it's own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+ mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+{
+ uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ uint8_t dret;
+ bool new_irq_handled = false;
+ int dpcd_addr;
+ int dpcd_bytes_to_read;
+
+ const int max_process_count = 30;
+ int process_count = 0;
+
+ const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+ if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+ dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+ /* DPCD 0x200 - 0x201 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT;
+ } else {
+ dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+ /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT_ESI;
+ }
+
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ while (dret == dpcd_bytes_to_read &&
+ process_count < max_process_count) {
+ uint8_t retry;
+ dret = 0;
+
+ process_count++;
+
+ DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ /* handle HPD short pulse irq */
+ if (aconnector->mst_mgr.mst_state)
+ drm_dp_mst_hpd_irq(
+ &aconnector->mst_mgr,
+ esi,
+ &new_irq_handled);
+
+ if (new_irq_handled) {
+ /* ACK at DPCD to notify down stream */
+ const int ack_dpcd_bytes_to_write =
+ dpcd_bytes_to_read - 1;
+
+ for (retry = 0; retry < 3; retry++) {
+ uint8_t wret;
+
+ wret = drm_dp_dpcd_write(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr + 1,
+ &esi[1],
+ ack_dpcd_bytes_to_write);
+ if (wret == ack_dpcd_bytes_to_write)
+ break;
+ }
+
+ /* check if there is new irq to be handle */
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ new_irq_handled = false;
+ } else {
+ break;
+ }
+ }
+
+ if (process_count == max_process_count)
+ DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_link *dc_link = aconnector->dc_link;
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+
+ /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+ * retired.
+ */
+ if (dc_link->type != dc_connection_mst_branch)
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+ !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+ if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_hotplug_event(dev);
+ }
+ }
+ if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+ (dc_link->type == dc_connection_mst_branch))
+ dm_handle_hpd_rx_irq(aconnector);
+
+ if (dc_link->type != dc_connection_mst_branch)
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ const struct dc_link *dc_link;
+ struct dc_interrupt_params int_params = {0};
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ dc_link = aconnector->dc_link;
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_irq,
+ (void *) aconnector);
+ }
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+
+ /* Also register for DP short pulse (hpd_rx). */
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd_rx;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+ }
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_RAVEN)
+ client_id = AMDGPU_IH_CLIENTID_DCE;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /* Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling. */
+
+ /* Use VBLANK interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /* Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ * */
+
+ /* Use VSTARTUP interrupt */
+ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+ i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+#endif
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+ adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+ /* indicate support of immediate flip */
+ adev->ddev->mode_config.async_page_flip = true;
+
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+ if (dc_link_set_backlight_level(dm->backlight_link,
+ bd->props.brightness, 0, 0))
+ return 0;
+ else
+ return 1;
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+};
+
+static void
+amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+{
+ char bl_name[16];
+ struct backlight_properties props = { 0 };
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+ dm->adev->ddev->primary->index);
+
+ dm->backlight_dev = backlight_device_register(bl_name,
+ dm->adev->ddev->dev,
+ dm,
+ &amdgpu_dm_backlight_ops,
+ &props);
+
+ if (IS_ERR(dm->backlight_dev))
+ DRM_ERROR("DM: Backlight registration failed!\n");
+ else
+ DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+#endif
+
+/* In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ uint32_t i;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct amdgpu_encoder *aencoder = NULL;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ uint32_t link_cnt;
+ unsigned long possible_crtcs;
+
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize mode config\n");
+ return -1;
+ }
+
+ for (i = 0; i < dm->dc->caps.max_planes; i++) {
+ struct amdgpu_plane *plane;
+
+ plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+ mode_info->planes[i] = plane;
+
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ goto fail;
+ }
+ plane->base.type = mode_info->plane_type[i];
+
+ /*
+ * HACK: IGT tests expect that each plane can only have one
+ * one possible CRTC. For now, set one CRTC for each
+ * plane that is not an underlay, but still allow multiple
+ * CRTCs for underlay planes.
+ */
+ possible_crtcs = 1 << i;
+ if (i >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < dm->dc->caps.max_streams; i++)
+ if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
+ goto fail;
+ }
+
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+
+ if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indexes\n",
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ continue;
+ }
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ goto fail;
+
+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+ if (!aencoder)
+ goto fail;
+
+ if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+ DRM_ERROR("KMS: Failed to initialize encoder\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+ DRM_ERROR("KMS: Failed to initialize connector\n");
+ goto fail;
+ }
+
+ if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
+ DETECT_REASON_BOOT))
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ /* Software is initialized. Now we can register interrupt handlers. */
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGA10:
+ if (dce110_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ /*
+ * Temporary disable until pplib/smu interaction is implemented
+ */
+ dm->dc->debug.disable_stutter = true;
+ break;
+#endif
+ default:
+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ kfree(aencoder);
+ kfree(aconnector);
+ for (i = 0; i < dm->dc->caps.max_planes; i++)
+ kfree(mode_info->planes[i]);
+ return -1;
+}
+
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+ drm_mode_config_cleanup(dm->ddev);
+ return;
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/**
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+ /* TODO: implement later */
+}
+
+static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+ u8 level)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+}
+
+static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+ return 0;
+}
+
+static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct mod_freesync_params freesync_params;
+ uint8_t num_streams;
+ uint8_t i;
+
+ struct amdgpu_device *adev = dev->dev_private;
+ int r = 0;
+
+ /* Get freesync enable flag from DRM */
+
+ num_streams = dc_get_current_stream_count(adev->dm.dc);
+
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream;
+ stream = dc_get_stream_at_index(adev->dm.dc, i);
+
+ mod_freesync_update_state(adev->dm.freesync_module,
+ &stream, 1, &freesync_params);
+ }
+
+ return r;
+}
+
+static const struct amdgpu_display_funcs dm_display_funcs = {
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+ .vblank_wait = NULL,
+ .backlight_set_level =
+ dm_set_backlight_level,/* called unconditionally */
+ .backlight_get_level =
+ dm_get_backlight_level,/* called unconditionally */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+ .page_flip_get_scanoutpos =
+ dm_crtc_get_scanoutpos,/* called unconditionally */
+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
+ .notify_freesync = amdgpu_notify_freesync,
+
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ int s3_state;
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
+
+ ret = kstrtoint(buf, 0, &s3_state);
+
+ if (ret == 0) {
+ if (s3_state) {
+ dm_resume(adev);
+ amdgpu_dm_display_resume(adev);
+ drm_kms_helper_hotplug_event(adev->ddev);
+ } else
+ dm_suspend(adev);
+ }
+
+ return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
+ amdgpu_dm_set_irq_funcs(adev);
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_KAVERI:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_CARRIZO:
+ adev->mode_info.num_crtc = 3;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ adev->mode_info.plane_type = dm_plane_type_carizzo;
+ break;
+ case CHIP_STONEY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ adev->mode_info.plane_type = dm_plane_type_stoney;
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_POLARIS10:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_VEGA10:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+#endif
+ default:
+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ return -EINVAL;
+ }
+
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dm_display_funcs;
+
+ /* Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+ * amdgpu_device_init() */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+ device_create_file(
+ adev->ddev->dev,
+ &dev_attr_s3_debug);
+#endif
+
+ return 0;
+}
+
+struct dm_connector_state {
+ struct drm_connector_state base;
+
+ enum amdgpu_rmx_type scaling;
+ uint8_t underscan_vborder;
+ uint8_t underscan_hborder;
+ bool underscan_enable;
+};
+
+#define to_dm_connector_state(x)\
+ container_of((x), struct dm_connector_state, base)
+
+static bool modeset_required(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_stream_state *old_stream)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ if (!crtc_state->enable)
+ return false;
+
+ return crtc_state->active;
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ return !crtc_state->enable || !crtc_state->active;
+}
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
+ struct dc_plane_state *plane_state)
+{
+ plane_state->src_rect.x = state->src_x >> 16;
+ plane_state->src_rect.y = state->src_y >> 16;
+ /*we ignore for now mantissa and do not to deal with floating pixels :(*/
+ plane_state->src_rect.width = state->src_w >> 16;
+
+ if (plane_state->src_rect.width == 0)
+ return false;
+
+ plane_state->src_rect.height = state->src_h >> 16;
+ if (plane_state->src_rect.height == 0)
+ return false;
+
+ plane_state->dst_rect.x = state->crtc_x;
+ plane_state->dst_rect.y = state->crtc_y;
+
+ if (state->crtc_w == 0)
+ return false;
+
+ plane_state->dst_rect.width = state->crtc_w;
+
+ if (state->crtc_h == 0)
+ return false;
+
+ plane_state->dst_rect.height = state->crtc_h;
+
+ plane_state->clip_rect = plane_state->dst_rect;
+
+ switch (state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_state->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_state->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_state->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_state->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_state->rotation = ROTATION_ANGLE_0;
+ break;
+ }
+
+ return true;
+}
+static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
+ uint64_t *tiling_flags,
+ uint64_t *fb_location)
+{
+ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ int r = amdgpu_bo_reserve(rbo, false);
+
+ if (unlikely(r)) {
+ // Don't show error msg. when return -ERESTARTSYS
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Unable to reserve buffer: %d\n", r);
+ return r;
+ }
+
+ if (fb_location)
+ *fb_location = amdgpu_bo_gpu_offset(rbo);
+
+ if (tiling_flags)
+ amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+
+ amdgpu_bo_unreserve(rbo);
+
+ return r;
+}
+
+static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
+ struct dc_plane_state *plane_state,
+ const struct amdgpu_framebuffer *amdgpu_fb,
+ bool addReq)
+{
+ uint64_t tiling_flags;
+ uint64_t fb_location = 0;
+ uint64_t chroma_addr = 0;
+ unsigned int awidth;
+ const struct drm_framebuffer *fb = &amdgpu_fb->base;
+ int ret = 0;
+ struct drm_format_name_buf format_name;
+
+ ret = get_fb_info(
+ amdgpu_fb,
+ &tiling_flags,
+ addReq == true ? &fb_location:NULL);
+
+ if (ret)
+ return ret;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(fb->format->format, &format_name));
+ return -EINVAL;
+ }
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
+ plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
+ plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
+ plane_state->plane_size.grph.surface_size.x = 0;
+ plane_state->plane_size.grph.surface_size.y = 0;
+ plane_state->plane_size.grph.surface_size.width = fb->width;
+ plane_state->plane_size.grph.surface_size.height = fb->height;
+ plane_state->plane_size.grph.surface_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+ /* TODO: unhardcode */
+ plane_state->color_space = COLOR_SPACE_SRGB;
+
+ } else {
+ awidth = ALIGN(fb->width, 64);
+ plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ plane_state->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(fb_location);
+ plane_state->address.video_progressive.luma_addr.high_part
+ = upper_32_bits(fb_location);
+ chroma_addr = fb_location + (u64)(awidth * fb->height);
+ plane_state->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(chroma_addr);
+ plane_state->address.video_progressive.chroma_addr.high_part
+ = upper_32_bits(chroma_addr);
+ plane_state->plane_size.video.luma_size.x = 0;
+ plane_state->plane_size.video.luma_size.y = 0;
+ plane_state->plane_size.video.luma_size.width = awidth;
+ plane_state->plane_size.video.luma_size.height = fb->height;
+ /* TODO: unhardcode */
+ plane_state->plane_size.video.luma_pitch = awidth;
+
+ plane_state->plane_size.video.chroma_size.x = 0;
+ plane_state->plane_size.video.chroma_size.y = 0;
+ plane_state->plane_size.video.chroma_size.width = awidth;
+ plane_state->plane_size.video.chroma_size.height = fb->height;
+ plane_state->plane_size.video.chroma_pitch = awidth / 2;
+
+ /* TODO: unhardcode */
+ plane_state->color_space = COLOR_SPACE_YCBCR709;
+ }
+
+ memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
+
+ /* Fill GFX8 params */
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
+ unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
+
+ bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+ num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+ /* XXX fix me for VI */
+ plane_state->tiling_info.gfx8.num_banks = num_banks;
+ plane_state->tiling_info.gfx8.array_mode =
+ DC_ARRAY_2D_TILED_THIN1;
+ plane_state->tiling_info.gfx8.tile_split = tile_split;
+ plane_state->tiling_info.gfx8.bank_width = bankw;
+ plane_state->tiling_info.gfx8.bank_height = bankh;
+ plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
+ plane_state->tiling_info.gfx8.tile_mode =
+ DC_ADDR_SURF_MICRO_TILING_DISPLAY;
+ } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
+ == DC_ARRAY_1D_TILED_THIN1) {
+ plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
+ }
+
+ plane_state->tiling_info.gfx8.pipe_config =
+ AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_RAVEN) {
+ /* Fill GFX9 params */
+ plane_state->tiling_info.gfx9.num_pipes =
+ adev->gfx.config.gb_addr_config_fields.num_pipes;
+ plane_state->tiling_info.gfx9.num_banks =
+ adev->gfx.config.gb_addr_config_fields.num_banks;
+ plane_state->tiling_info.gfx9.pipe_interleave =
+ adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
+ plane_state->tiling_info.gfx9.num_shader_engines =
+ adev->gfx.config.gb_addr_config_fields.num_se;
+ plane_state->tiling_info.gfx9.max_compressed_frags =
+ adev->gfx.config.gb_addr_config_fields.max_compress_frags;
+ plane_state->tiling_info.gfx9.num_rb_per_se =
+ adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
+ plane_state->tiling_info.gfx9.swizzle =
+ AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
+ plane_state->tiling_info.gfx9.shaderEnable = 1;
+ }
+
+ plane_state->visible = true;
+ plane_state->scaling_quality.h_taps_c = 0;
+ plane_state->scaling_quality.v_taps_c = 0;
+
+ /* is this needed? is plane_state zeroed at allocation? */
+ plane_state->scaling_quality.h_taps = 0;
+ plane_state->scaling_quality.v_taps = 0;
+ plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+ return ret;
+
+}
+
+static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
+ struct dc_plane_state *plane_state)
+{
+ int i;
+ struct dc_gamma *gamma;
+ struct drm_color_lut *lut =
+ (struct drm_color_lut *) crtc_state->gamma_lut->data;
+
+ gamma = dc_create_gamma();
+
+ if (gamma == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
+ gamma->type = GAMMA_RGB_256;
+ gamma->num_entries = GAMMA_RGB_256_ENTRIES;
+ for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
+ gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
+ gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
+ gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
+ }
+
+ plane_state->gamma_correction = gamma;
+}
+
+static int fill_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state,
+ bool addrReq)
+{
+ const struct amdgpu_framebuffer *amdgpu_fb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ const struct drm_crtc *crtc = plane_state->crtc;
+ struct dc_transfer_func *input_tf;
+ int ret = 0;
+
+ if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
+ return -EINVAL;
+
+ ret = fill_plane_attributes_from_fb(
+ crtc->dev->dev_private,
+ dc_plane_state,
+ amdgpu_fb,
+ addrReq);
+
+ if (ret)
+ return ret;
+
+ input_tf = dc_create_transfer_func();
+
+ if (input_tf == NULL)
+ return -ENOMEM;
+
+ input_tf->type = TF_TYPE_PREDEFINED;
+ input_tf->tf = TRANSFER_FUNCTION_SRGB;
+
+ dc_plane_state->in_transfer_func = input_tf;
+
+ /* In case of gamma set, update gamma value */
+ if (crtc_state->gamma_lut)
+ fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
+
+ return ret;
+}
+
+/*****************************************************************************/
+
+static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ const struct dm_connector_state *dm_state,
+ struct dc_stream_state *stream)
+{
+ enum amdgpu_rmx_type rmx_type;
+
+ struct rect src = { 0 }; /* viewport in composition space*/
+ struct rect dst = { 0 }; /* stream addressable area */
+
+ /* no mode. nothing to be done */
+ if (!mode)
+ return;
+
+ /* Full screen scaling by default */
+ src.width = mode->hdisplay;
+ src.height = mode->vdisplay;
+ dst.width = stream->timing.h_addressable;
+ dst.height = stream->timing.v_addressable;
+
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
+ }
+
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
+
+ stream->src = src;
+ stream->dst = dst;
+
+ DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static enum dc_color_depth
+convert_color_depth_from_display_info(const struct drm_connector *connector)
+{
+ uint32_t bpc = connector->display_info.bpc;
+
+ /* Limited color depth to 8bit
+ * TODO: Still need to handle deep color
+ */
+ if (bpc > 8)
+ bpc = 8;
+
+ switch (bpc) {
+ case 0:
+ /* Temporary Work around, DRM don't parse color depth for
+ * EDID revision before 1.4
+ * TODO: Fix edid parsing
+ */
+ return COLOR_DEPTH_888;
+ case 6:
+ return COLOR_DEPTH_666;
+ case 8:
+ return COLOR_DEPTH_888;
+ case 10:
+ return COLOR_DEPTH_101010;
+ case 12:
+ return COLOR_DEPTH_121212;
+ case 14:
+ return COLOR_DEPTH_141414;
+ case 16:
+ return COLOR_DEPTH_161616;
+ default:
+ return COLOR_DEPTH_UNDEFINED;
+ }
+}
+
+static enum dc_aspect_ratio
+get_aspect_ratio(const struct drm_display_mode *mode_in)
+{
+ int32_t width = mode_in->crtc_hdisplay * 9;
+ int32_t height = mode_in->crtc_vdisplay * 16;
+
+ if ((width - height) < 10 && (width - height) > -10)
+ return ASPECT_RATIO_16_9;
+ else
+ return ASPECT_RATIO_4_3;
+}
+
+static enum dc_color_space
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
+{
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+ switch (dc_crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ case PIXEL_ENCODING_YCBCR444:
+ case PIXEL_ENCODING_YCBCR420:
+ {
+ /*
+ * 27030khz is the separation point between HDTV and SDTV
+ * according to HDMI spec, we use YCbCr709 and YCbCr601
+ * respectively
+ */
+ if (dc_crtc_timing->pix_clk_khz > 27030) {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ } else {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ }
+
+ }
+ break;
+ case PIXEL_ENCODING_RGB:
+ color_space = COLOR_SPACE_SRGB;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return color_space;
+}
+
+/*****************************************************************************/
+
+static void
+fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
+ const struct drm_display_mode *mode_in,
+ const struct drm_connector *connector)
+{
+ struct dc_crtc_timing *timing_out = &stream->timing;
+
+ memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+
+ timing_out->h_border_left = 0;
+ timing_out->h_border_right = 0;
+ timing_out->v_border_top = 0;
+ timing_out->v_border_bottom = 0;
+ /* TODO: un-hardcode */
+
+ if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+ else
+ timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+ timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+ timing_out->display_color_depth = convert_color_depth_from_display_info(
+ connector);
+ timing_out->scan_type = SCANNING_TYPE_NODATA;
+ timing_out->hdmi_vic = 0;
+ timing_out->vic = drm_match_cea_mode(mode_in);
+
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width =
+ mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch =
+ mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch =
+ mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width =
+ mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_khz = mode_in->crtc_clock;
+ timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+
+ stream->output_color_space = get_output_color_space(timing_out);
+
+ {
+ struct dc_transfer_func *tf = dc_create_transfer_func();
+
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func = tf;
+ }
+}
+
+static void fill_audio_info(struct audio_info *audio_info,
+ const struct drm_connector *drm_connector,
+ const struct dc_sink *dc_sink)
+{
+ int i = 0;
+ int cea_revision = 0;
+ const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+ audio_info->manufacture_id = edid_caps->manufacturer_id;
+ audio_info->product_id = edid_caps->product_id;
+
+ cea_revision = drm_connector->display_info.cea_rev;
+
+ strncpy(audio_info->display_name,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+
+ if (cea_revision >= 3) {
+ audio_info->mode_count = edid_caps->audio_mode_count;
+
+ for (i = 0; i < audio_info->mode_count; ++i) {
+ audio_info->modes[i].format_code =
+ (enum audio_format_code)
+ (edid_caps->audio_modes[i].format_code);
+ audio_info->modes[i].channel_count =
+ edid_caps->audio_modes[i].channel_count;
+ audio_info->modes[i].sample_rates.all =
+ edid_caps->audio_modes[i].sample_rate;
+ audio_info->modes[i].sample_size =
+ edid_caps->audio_modes[i].sample_size;
+ }
+ }
+
+ audio_info->flags.all = edid_caps->speaker_flags;
+
+ /* TODO: We only check for the progressive mode, check for interlace mode too */
+ if (drm_connector->latency_present[0]) {
+ audio_info->video_latency = drm_connector->video_latency[0];
+ audio_info->audio_latency = drm_connector->audio_latency[0];
+ }
+
+ /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void
+copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
+ struct drm_display_mode *dst_mode)
+{
+ dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+ dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+ dst_mode->crtc_clock = src_mode->crtc_clock;
+ dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+ dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+ dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
+ dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+ dst_mode->crtc_htotal = src_mode->crtc_htotal;
+ dst_mode->crtc_hskew = src_mode->crtc_hskew;
+ dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
+ dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
+ dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
+ dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
+ dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
+}
+
+static void
+decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+ const struct drm_display_mode *native_mode,
+ bool scale_enabled)
+{
+ if (scale_enabled) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else if (native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else {
+ /* no scaling nor amdgpu inserted, no need to patch */
+ }
+}
+
+static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink *sink = NULL;
+ struct dc_sink_init_data sink_init_data = { 0 };
+
+ sink_init_data.link = aconnector->dc_link;
+ sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DRM_ERROR("Failed to create sink!\n");
+ return -ENOMEM;
+ }
+
+ sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+ aconnector->fake_enable = true;
+
+ aconnector->dc_sink = sink;
+ aconnector->dc_link->local_sink = sink;
+
+ return 0;
+}
+
+static struct dc_stream_state *
+create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state)
+{
+ struct drm_display_mode *preferred_mode = NULL;
+ const struct drm_connector *drm_connector;
+ struct dc_stream_state *stream = NULL;
+ struct drm_display_mode mode = *drm_mode;
+ bool native_mode_found = false;
+
+ if (aconnector == NULL) {
+ DRM_ERROR("aconnector is NULL!\n");
+ goto drm_connector_null;
+ }
+
+ if (dm_state == NULL) {
+ DRM_ERROR("dm_state is NULL!\n");
+ goto dm_state_null;
+ }
+
+ drm_connector = &aconnector->base;
+
+ if (!aconnector->dc_sink) {
+ /*
+ * Exclude MST from creating fake_sink
+ * TODO: need to enable MST into fake_sink feature
+ */
+ if (aconnector->mst_port)
+ goto stream_create_fail;
+
+ if (create_fake_sink(aconnector))
+ goto stream_create_fail;
+ }
+
+ stream = dc_create_stream_for_sink(aconnector->dc_sink);
+
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto stream_create_fail;
+ }
+
+ list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
+ /* Search for preferred mode */
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+ native_mode_found = true;
+ break;
+ }
+ }
+ if (!native_mode_found)
+ preferred_mode = list_first_entry_or_null(
+ &aconnector->base.modes,
+ struct drm_display_mode,
+ head);
+
+ if (preferred_mode == NULL) {
+ /* This may not be an error, the use case is when we we have no
+ * usermode calls to reset and set mode upon hotplug. In this
+ * case, we call set mode ourselves to restore the previous mode
+ * and the modelist may not be filled in in time.
+ */
+ DRM_DEBUG_DRIVER("No preferred mode found\n");
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
+ &mode, preferred_mode,
+ dm_state->scaling != RMX_OFF);
+ }
+
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base);
+ update_stream_scaling_settings(&mode, dm_state, stream);
+
+ fill_audio_info(
+ &stream->audio_info,
+ drm_connector,
+ aconnector->dc_sink);
+
+stream_create_fail:
+dm_state_null:
+drm_connector_null:
+ return stream;
+}
+
+static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static void dm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dm_crtc_state *cur = to_dm_crtc_state(state);
+
+ /* TODO Destroy dc_stream objects are stream object is flattened */
+ if (cur->stream)
+ dc_stream_release(cur->stream);
+
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+
+ kfree(state);
+}
+
+static void dm_crtc_reset_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state;
+
+ if (crtc->state)
+ dm_crtc_destroy_state(crtc, crtc->state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (WARN_ON(!state))
+ return;
+
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+
+}
+
+static struct drm_crtc_state *
+dm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state, *cur;
+
+ cur = to_dm_crtc_state(crtc->state);
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ if (cur->stream) {
+ state->stream = cur->stream;
+ dc_stream_retain(state->stream);
+ }
+
+ /* TODO Duplicate dc_stream after objects are stream object is flattened */
+
+ return &state->base;
+}
+
+/* Implemented only the options currently availible for the driver */
+static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
+ .reset = dm_crtc_reset_state,
+ .destroy = amdgpu_dm_crtc_destroy,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = dm_crtc_duplicate_state,
+ .atomic_destroy_state = dm_crtc_destroy_state,
+};
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ bool connected;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /* Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activit. */
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+ !aconnector->fake_enable)
+ connected = (aconnector->dc_sink != NULL);
+ else
+ connected = (aconnector->base.force == DRM_FORCE_ON);
+
+ return (connected ? connector_status_connected :
+ connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *connector_state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dm_connector_state *dm_old_state =
+ to_dm_connector_state(connector->state);
+ struct dm_connector_state *dm_new_state =
+ to_dm_connector_state(connector_state);
+
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum amdgpu_rmx_type rmx_type;
+
+ switch (val) {
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
+ case DRM_MODE_SCALE_NONE:
+ default:
+ rmx_type = RMX_OFF;
+ break;
+ }
+
+ if (dm_old_state->scaling == rmx_type)
+ return 0;
+
+ dm_new_state->scaling = rmx_type;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ dm_new_state->underscan_hborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ dm_new_state->underscan_vborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dm_connector_state *dm_state =
+ to_dm_connector_state(state);
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (dm_state->scaling) {
+ case RMX_CENTER:
+ *val = DRM_MODE_SCALE_CENTER;
+ break;
+ case RMX_ASPECT:
+ *val = DRM_MODE_SCALE_ASPECT;
+ break;
+ case RMX_FULL:
+ *val = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ case RMX_OFF:
+ default:
+ *val = DRM_MODE_SCALE_NONE;
+ break;
+ }
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ *val = dm_state->underscan_hborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ *val = dm_state->underscan_vborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ }
+ return ret;
+}
+
+static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ const struct dc_link *link = aconnector->dc_link;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev) {
+ backlight_device_unregister(dm->backlight_dev);
+ dm->backlight_dev = NULL;
+ }
+
+ }
+#endif
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ kfree(state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (state) {
+ state->scaling = RMX_OFF;
+ state->underscan_enable = false;
+ state->underscan_hborder = 0;
+ state->underscan_vborder = 0;
+
+ connector->state = &state->base;
+ connector->state->connector = connector;
+ }
+}
+
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ struct dm_connector_state *new_state =
+ kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+ if (new_state) {
+ __drm_atomic_helper_connector_duplicate_state(connector,
+ &new_state->base);
+ return &new_state->base;
+ }
+
+ return NULL;
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .detect = amdgpu_dm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = amdgpu_dm_connector_destroy,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+};
+
+static struct drm_encoder *best_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ DRM_DEBUG_DRIVER("Finding the best encoder\n");
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_ERROR("Couldn't find a matching encoder for our connector\n");
+ return NULL;
+ }
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ DRM_ERROR("No encoder id\n");
+ return NULL;
+}
+
+static int get_modes(struct drm_connector *connector)
+{
+ return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_VIRTUAL
+ };
+ struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
+ if (!aconnector->base.edid_blob_ptr ||
+ !aconnector->base.edid_blob_ptr->data) {
+ DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
+ aconnector->base.name);
+
+ aconnector->base.force = DRM_FORCE_OFF;
+ aconnector->base.override_edid = false;
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ aconnector->dc_em_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ if (aconnector->base.force == DRM_FORCE_ON)
+ aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ aconnector->dc_link->local_sink :
+ aconnector->dc_em_sink;
+}
+
+static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+ /* In case of headless boot with force on for DP managed connector
+ * Those settings have to be != 0 to get initial modeset
+ */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+ }
+
+
+ aconnector->base.override_edid = true;
+ create_eml_sink(aconnector);
+}
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int result = MODE_ERROR;
+ struct dc_sink *dc_sink;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+ return result;
+
+ /* Only run this the first time mode_valid is called to initilialize
+ * EDID mgmt
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+ !aconnector->dc_em_sink)
+ handle_edid_mgmt(aconnector);
+
+ dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
+
+ if (dc_sink == NULL) {
+ DRM_ERROR("dc_sink is NULL!\n");
+ goto fail;
+ }
+
+ stream = dc_create_stream_for_sink(dc_sink);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto fail;
+ }
+
+ drm_mode_set_crtcinfo(mode, 0);
+ fill_stream_properties_from_drm_display_mode(stream, mode, connector);
+
+ stream->src.width = mode->hdisplay;
+ stream->src.height = mode->vdisplay;
+ stream->dst = stream->src;
+
+ if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
+ result = MODE_OK;
+
+ dc_stream_release(stream);
+
+fail:
+ /* TODO: error handling*/
+ return result;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+ /*
+ * If hotplug a second bigger display in FB Con mode, bigger resolution
+ * modes will be filtered by drm_mode_validate_size(), and those modes
+ * is missing after user start lightdm. So we need to renew modes list.
+ * in get_modes call back, not just return the modes count
+ */
+ .get_modes = get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .best_encoder = best_encoder
+};
+
+static void dm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+}
+
+static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
+ int ret = -EINVAL;
+
+ if (unlikely(!dm_crtc_state->stream &&
+ modeset_required(state, NULL, dm_crtc_state->stream))) {
+ WARN_ON(1);
+ return ret;
+ }
+
+ /* In some use cases, like reset, no stream is attached */
+ if (!dm_crtc_state->stream)
+ return 0;
+
+ if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
+ return 0;
+
+ return ret;
+}
+
+static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
+ .disable = dm_crtc_helper_disable,
+ .atomic_check = dm_crtc_helper_atomic_check,
+ .mode_fixup = dm_crtc_helper_mode_fixup
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ .disable = dm_encoder_helper_disable,
+ .atomic_check = dm_encoder_helper_atomic_check
+};
+
+static void dm_drm_plane_reset(struct drm_plane *plane)
+{
+ struct dm_plane_state *amdgpu_state = NULL;
+
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+
+ amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
+ WARN_ON(amdgpu_state == NULL);
+
+ if (amdgpu_state) {
+ plane->state = &amdgpu_state->base;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
+ }
+}
+
+static struct drm_plane_state *
+dm_drm_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
+
+ old_dm_plane_state = to_dm_plane_state(plane->state);
+ dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
+ if (!dm_plane_state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
+
+ if (old_dm_plane_state->dc_state) {
+ dm_plane_state->dc_state = old_dm_plane_state->dc_state;
+ dc_plane_state_retain(dm_plane_state->dc_state);
+ }
+
+ return &dm_plane_state->base;
+}
+
+void dm_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+ if (dm_plane_state->dc_state)
+ dc_plane_state_release(dm_plane_state->dc_state);
+
+ drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+static const struct drm_plane_funcs dm_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = dm_drm_plane_reset,
+ .atomic_duplicate_state = dm_drm_plane_duplicate_state,
+ .atomic_destroy_state = dm_drm_plane_destroy_state,
+};
+
+static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct amdgpu_framebuffer *afb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *rbo;
+ uint64_t chroma_addr = 0;
+ int r;
+ struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
+ unsigned int awidth;
+
+ dm_plane_state_old = to_dm_plane_state(plane->state);
+ dm_plane_state_new = to_dm_plane_state(new_state);
+
+ if (!new_state->fb) {
+ DRM_DEBUG_DRIVER("No FB bound\n");
+ return 0;
+ }
+
+ afb = to_amdgpu_framebuffer(new_state->fb);
+
+ obj = afb->obj;
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
+
+
+ amdgpu_bo_unreserve(rbo);
+
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ return r;
+ }
+
+ amdgpu_bo_ref(rbo);
+
+ if (dm_plane_state_new->dc_state &&
+ dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+ struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
+ plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
+ } else {
+ awidth = ALIGN(new_state->fb->width, 64);
+ plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ plane_state->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(afb->address);
+ plane_state->address.video_progressive.luma_addr.high_part
+ = upper_32_bits(afb->address);
+ chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
+ plane_state->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(chroma_addr);
+ plane_state->address.video_progressive.chroma_addr.high_part
+ = upper_32_bits(chroma_addr);
+ }
+ }
+
+ /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
+ * prepare and cleanup in drm_atomic_helper_prepare_planes
+ * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
+ * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
+ * code touching fram buffers should be avoided for DC.
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
+
+ acrtc->cursor_bo = obj;
+ }
+ return 0;
+}
+
+static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct amdgpu_bo *rbo;
+ struct amdgpu_framebuffer *afb;
+ int r;
+
+ if (!old_state->fb)
+ return;
+
+ afb = to_amdgpu_framebuffer(old_state->fb);
+ rbo = gem_to_amdgpu_bo(afb->obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ return;
+ }
+
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unref(&rbo);
+}
+
+static int dm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct amdgpu_device *adev = plane->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+ if (!dm_plane_state->dc_state)
+ return 0;
+
+ if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
+ return 0;
+
+ return -EINVAL;
+}
+
+static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
+ .prepare_fb = dm_plane_helper_prepare_fb,
+ .cleanup_fb = dm_plane_helper_cleanup_fb,
+ .atomic_check = dm_plane_atomic_check,
+};
+
+/*
+ * TODO: these are currently initialized to rgb formats only.
+ * For future use cases we should either initialize them dynamically based on
+ * plane capabilities, or initialize this array to all formats, so internal drm
+ * check will succeed, and let DC to implement proper check
+ */
+static const uint32_t rgb_formats[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+};
+
+static const uint32_t yuv_formats[] = {
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+};
+
+static const u32 cursor_formats[] = {
+ DRM_FORMAT_ARGB8888
+};
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_plane *aplane,
+ unsigned long possible_crtcs)
+{
+ int res = -EPERM;
+
+ switch (aplane->base.type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ aplane->base.format_default = true;
+
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ rgb_formats,
+ ARRAY_SIZE(rgb_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ yuv_formats,
+ ARRAY_SIZE(yuv_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ cursor_formats,
+ ARRAY_SIZE(cursor_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ }
+
+ drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+
+ /* Create (reset) the plane state */
+ if (aplane->base.funcs->reset)
+ aplane->base.funcs->reset(&aplane->base);
+
+
+ return res;
+}
+
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ uint32_t crtc_index)
+{
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_plane *cursor_plane;
+
+ int res = -ENOMEM;
+
+ cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
+ if (!cursor_plane)
+ goto fail;
+
+ cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+
+ acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
+ if (!acrtc)
+ goto fail;
+
+ res = drm_crtc_init_with_planes(
+ dm->ddev,
+ &acrtc->base,
+ plane,
+ &cursor_plane->base,
+ &amdgpu_dm_crtc_funcs, NULL);
+
+ if (res)
+ goto fail;
+
+ drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
+
+ /* Create (reset) the plane state */
+ if (acrtc->base.funcs->reset)
+ acrtc->base.funcs->reset(&acrtc->base);
+
+ acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
+ acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
+
+ acrtc->crtc_id = crtc_index;
+ acrtc->base.enabled = false;
+
+ dm->adev->mode_info.crtcs[crtc_index] = acrtc;
+ drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
+
+ return 0;
+
+fail:
+ kfree(acrtc);
+ kfree(cursor_plane);
+ return res;
+}
+
+
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return DRM_MODE_CONNECTOR_DVID;
+ case SIGNAL_TYPE_VIRTUAL:
+ return DRM_MODE_CONNECTOR_VIRTUAL;
+
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper =
+ connector->helper_private;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ encoder = helper->best_encoder(connector);
+
+ if (encoder == NULL)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->native_mode.clock = 0;
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode = NULL;
+
+ list_for_each_entry(preferred_mode,
+ &connector->probed_modes,
+ head) {
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+ amdgpu_encoder->native_mode = *preferred_mode;
+
+ break;
+ }
+
+ }
+}
+
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+ char *name,
+ int hdisplay, int vdisplay)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+ mode = drm_mode_duplicate(dev, native_mode);
+
+ if (mode == NULL)
+ return NULL;
+
+ mode->hdisplay = hdisplay;
+ mode->vdisplay = vdisplay;
+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+ };
+
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
+ struct drm_display_mode *curmode = NULL;
+ bool mode_existed = false;
+
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
+
+ list_for_each_entry(curmode, &connector->probed_modes, head) {
+ if (common_modes[i].w == curmode->hdisplay &&
+ common_modes[i].h == curmode->vdisplay) {
+ mode_existed = true;
+ break;
+ }
+ }
+
+ if (mode_existed)
+ continue;
+
+ mode = amdgpu_dm_create_common_mode(encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ drm_mode_probed_add(connector, mode);
+ amdgpu_dm_connector->num_modes++;
+ }
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ amdgpu_dm_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
+
+ drm_edid_to_eld(connector, edid);
+
+ amdgpu_dm_get_native_mode(connector);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+}
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper =
+ connector->helper_private;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_encoder *encoder;
+ struct edid *edid = amdgpu_dm_connector->edid;
+
+ encoder = helper->best_encoder(connector);
+
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ return amdgpu_dm_connector->num_modes;
+}
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
+{
+ struct amdgpu_device *adev = dm->ddev->dev_private;
+
+ aconnector->connector_id = link_index;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+ aconnector->base.doublescan_allowed = false;
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+
+ mutex_init(&aconnector->hpd_lock);
+
+ /* configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
+ */
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
+ }
+
+ drm_object_attach_property(&aconnector->base.base,
+ dm->ddev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_vborder_property,
+ 0);
+
+}
+
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+ struct ddc_service *ddc_service = i2c->ddc_service;
+ struct i2c_command cmd;
+ int i;
+ int result = -EIO;
+
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+ return result;
+
+ cmd.number_of_payloads = num;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = 100;
+
+ for (i = 0; i < num; i++) {
+ cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+ cmd.payloads[i].address = msgs[i].addr;
+ cmd.payloads[i].length = msgs[i].len;
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+ if (dal_i2caux_submit_i2c_command(
+ ddc_service->ctx->i2caux,
+ ddc_service->ddc_pin,
+ &cmd))
+ result = num;
+
+ kfree(cmd.payloads);
+ return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+ .master_xfer = amdgpu_dm_i2c_xfer,
+ .functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+ int link_index,
+ int *res)
+{
+ struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+ struct amdgpu_i2c_adapter *i2c;
+
+ i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+ i2c->base.owner = THIS_MODULE;
+ i2c->base.class = I2C_CLASS_DDC;
+ i2c->base.dev.parent = &adev->pdev->dev;
+ i2c->base.algo = &amdgpu_dm_i2c_algo;
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
+
+ return i2c;
+}
+
+/* Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ uint32_t link_index,
+ struct amdgpu_encoder *aencoder)
+{
+ int res = 0;
+ int connector_type;
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct amdgpu_i2c_adapter *i2c;
+
+ link->priv = aconnector;
+
+ DRM_DEBUG_DRIVER("%s()\n", __func__);
+
+ i2c = create_i2c(link->ddc, link->link_index, &res);
+ if (!i2c) {
+ DRM_ERROR("Failed to create i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ aconnector->i2c = i2c;
+ res = i2c_add_adapter(&i2c->base);
+
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ goto out_free;
+ }
+
+ connector_type = to_drm_connector_type(link->connector_signal);
+
+ res = drm_connector_init(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type);
+
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ goto out_free;
+ }
+
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
+
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+ connector_type,
+ link,
+ link_index);
+
+ drm_mode_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
+
+ drm_connector_register(&aconnector->base);
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_initialize_dp_connector(dm, aconnector);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ /* NOTE: this currently will create backlight device even if a panel
+ * is not connected to the eDP/LVDS connector.
+ *
+ * This is less than ideal but we don't have sink information at this
+ * stage since detection happens after. We can't do detection earlier
+ * since MST detection needs connectors to be created first.
+ */
+ if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ /* Event if registration failed, we should continue with
+ * DM initialization because not having a backlight control
+ * is better then a black screen.
+ */
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev)
+ dm->backlight_link = link;
+ }
+#endif
+
+out_free:
+ if (res) {
+ kfree(i2c);
+ aconnector->i2c = NULL;
+ }
+ return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ return 0x1;
+ case 2:
+ return 0x3;
+ case 3:
+ return 0x7;
+ case 4:
+ return 0xf;
+ case 5:
+ return 0x1f;
+ case 6:
+ default:
+ return 0x3f;
+ }
+}
+
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+
+ aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ if (!res)
+ aencoder->encoder_id = link_index;
+ else
+ aencoder->encoder_id = -1;
+
+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+ return res;
+}
+
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ bool enable)
+{
+ /*
+ * this is not correct translation but will work as soon as VBLANK
+ * constant is the same as PFLIP
+ */
+ int irq_type =
+ amdgpu_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+
+ if (enable) {
+ drm_crtc_vblank_on(&acrtc->base);
+ amdgpu_irq_get(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ } else {
+
+ amdgpu_irq_put(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+}
+
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
+{
+ if (dm_state->scaling != old_dm_state->scaling)
+ return true;
+ if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+ if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+ if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+ dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+ return true;
+ return false;
+}
+
+static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+{
+ /* this is the update mode case */
+ if (adev->dm.freesync_module)
+ mod_freesync_remove_stream(adev->dm.freesync_module, stream);
+
+ acrtc->otg_inst = -1;
+ acrtc->enabled = false;
+}
+
+static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct dc_cursor_position *position)
+{
+ struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int x, y;
+ int xorigin = 0, yorigin = 0;
+
+ if (!crtc || !plane->state->fb) {
+ position->enable = false;
+ position->x = 0;
+ position->y = 0;
+ return 0;
+ }
+
+ if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
+ (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
+ DRM_ERROR("%s: bad cursor width or height %d x %d\n",
+ __func__,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+ return -EINVAL;
+ }
+
+ x = plane->state->crtc_x;
+ y = plane->state->crtc_y;
+ /* avivo cursor are offset into the total surface */
+ x += crtc->primary->state->src_x >> 16;
+ y += crtc->primary->state->src_y >> 16;
+ if (x < 0) {
+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+ y = 0;
+ }
+ position->enable = true;
+ position->x = x;
+ position->y = y;
+ position->x_hotspot = xorigin;
+ position->y_hotspot = yorigin;
+
+ return 0;
+}
+
+static void handle_cursor_update(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state)
+{
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
+ struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
+ struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ uint64_t address = afb ? afb->address : 0;
+ struct dc_cursor_position position;
+ struct dc_cursor_attributes attributes;
+ int ret;
+
+ if (!plane->state->fb && !old_plane_state->fb)
+ return;
+
+ DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
+ __func__,
+ amdgpu_crtc->crtc_id,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+
+ ret = get_cursor_position(plane, crtc, &position);
+ if (ret)
+ return;
+
+ if (!position.enable) {
+ /* turn off cursor */
+ if (crtc_state && crtc_state->stream)
+ dc_stream_set_cursor_position(crtc_state->stream,
+ &position);
+ return;
+ }
+
+ amdgpu_crtc->cursor_width = plane->state->crtc_w;
+ amdgpu_crtc->cursor_height = plane->state->crtc_h;
+
+ attributes.address.high_part = upper_32_bits(address);
+ attributes.address.low_part = lower_32_bits(address);
+ attributes.width = plane->state->crtc_w;
+ attributes.height = plane->state->crtc_h;
+ attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
+ attributes.rotation_angle = 0;
+ attributes.attribute_flags.value = 0;
+
+ attributes.pitch = attributes.width;
+
+ if (crtc_state->stream) {
+ if (!dc_stream_set_cursor_attributes(crtc_state->stream,
+ &attributes))
+ DRM_ERROR("DC failed to set cursor attributes\n");
+
+ if (!dc_stream_set_cursor_position(crtc_state->stream,
+ &position))
+ DRM_ERROR("DC failed to set cursor position\n");
+ }
+}
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
+
+ acrtc->event = acrtc->base.state->event;
+
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
+
+ DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
+}
+
+/*
+ * Executes flip
+ *
+ * Waits on all BO's fences and for proper vblank count
+ */
+static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ uint32_t target,
+ struct dc_state *state)
+{
+ unsigned long flags;
+ uint32_t target_vblank;
+ int r, vpos, hpos;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ struct dc_flip_addrs addr = { {0} };
+ /* TODO eliminate or rename surface_update */
+ struct dc_surface_update surface_updates[1] = { {0} };
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+
+
+ /* Prepare wait for target vblank early - before the fence-waits */
+ target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
+
+ /* TODO This might fail and hence better not used, wait
+ * explicitly on fences instead
+ * and in general should be called for
+ * blocking commit to as per framework helpers
+ */
+ r = amdgpu_bo_reserve(abo, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve buffer before flip\n");
+ WARN_ON(1);
+ }
+
+ /* Wait for all fences on this FB */
+ WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT) < 0);
+
+ amdgpu_bo_unreserve(abo);
+
+ /* Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc->enabled &&
+ (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(target_vblank -
+ amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
+ usleep_range(1000, 1100);
+ }
+
+ /* Flip */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ /* update crtc fb */
+ crtc->primary->fb = fb;
+
+ WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
+ WARN_ON(!acrtc_state->stream);
+
+ addr.address.grph.addr.low_part = lower_32_bits(afb->address);
+ addr.address.grph.addr.high_part = upper_32_bits(afb->address);
+ addr.flip_immediate = async_flip;
+
+
+ if (acrtc->base.state->event)
+ prepare_flip_isr(acrtc);
+
+ surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
+ surface_updates->flip_addr = &addr;
+
+
+ dc_commit_updates_for_stream(adev->dm.dc,
+ surface_updates,
+ 1,
+ acrtc_state->stream,
+ NULL,
+ &surface_updates->surface,
+ state);
+
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
+ __func__,
+ addr.address.grph.addr.high_part,
+ addr.address.grph.addr.low_part);
+
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+ struct drm_crtc *pcrtc,
+ bool *wait_for_vblank)
+{
+ uint32_t i;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct dc_stream_state *dc_stream_attach;
+ struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct drm_crtc_state *new_pcrtc_state =
+ drm_atomic_get_new_crtc_state(state, pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ int planes_count = 0;
+ unsigned long flags;
+
+ /* update planes when needed */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ bool pflip_needed;
+ struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ handle_cursor_update(plane, old_plane_state);
+ continue;
+ }
+
+ if (!fb || !crtc || pcrtc != crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->active)
+ continue;
+
+ pflip_needed = !state->allow_modeset;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
+ DRM_ERROR("%s: acrtc %d, already busy\n",
+ __func__,
+ acrtc_attach->crtc_id);
+ /* In commit tail framework this cannot happen */
+ WARN_ON(1);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ if (!pflip_needed) {
+ WARN_ON(!dm_new_plane_state->dc_state);
+
+ plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
+
+ dc_stream_attach = acrtc_state->stream;
+ planes_count++;
+
+ } else if (new_crtc_state->planes_changed) {
+ /* Assume even ONE crtc with immediate flip means
+ * entire can't wait for VBLANK
+ * TODO Check if it's correct
+ */
+ *wait_for_vblank =
+ new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
+ false : true;
+
+ /* TODO: Needs rework for multiplane flip */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_crtc_vblank_get(crtc);
+
+ amdgpu_dm_do_flip(
+ crtc,
+ fb,
+ drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ dm_state->context);
+ }
+
+ }
+
+ if (planes_count) {
+ unsigned long flags;
+
+ if (new_pcrtc_state->event) {
+
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ prepare_flip_isr(acrtc_attach);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ if (false == dc_commit_planes_to_stream(dm->dc,
+ plane_states_constructed,
+ planes_count,
+ dc_stream_attach,
+ dm_state->context))
+ dm_error("%s: Failed to attach plane!\n", __func__);
+ } else {
+ /*TODO BUG Here should go disable planes on CRTC. */
+ }
+}
+
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct amdgpu_device *adev = dev->dev_private;
+ int i;
+
+ /*
+ * We evade vblanks and pflips on crtc that
+ * should be changed. We do it here to flush & disable
+ * interrupts before drm_swap_state is called in drm_atomic_helper_commit
+ * it will update crtc->dm_crtc_state->stream pointer which is used in
+ * the ISRs.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
+ manage_dm_interrupts(adev, acrtc, false);
+ }
+ /* Add check here for SoC's that support hardware cursor plane, to
+ * unset legacy_cursor_update */
+
+ return drm_atomic_helper_commit(dev, state, nonblock);
+
+ /*TODO Handle EINTR, reenable IRQ*/
+}
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ uint32_t i, j;
+ uint32_t new_crtcs_count = 0;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
+ struct dc_stream_state *new_stream = NULL;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+
+ dm_state = to_dm_atomic_state(state);
+
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+ */
+
+ if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+
+ DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+ if (!dm_new_crtc_state->stream) {
+ /*
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnect in fact.
+ * dc_sink in NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ continue;
+ }
+
+
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+
+ /*
+ * this loop saves set mode crtcs
+ * we needed to enable vblanks once all
+ * resources acquired in dc after dc_commit_streams
+ */
+
+ /*TODO move all this into dm_crtc_state, get rid of
+ * new_crtcs array and use old and new atomic states
+ * instead
+ */
+ new_crtcs[new_crtcs_count] = acrtc;
+ new_crtcs_count++;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+ } else if (modereset_required(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+
+ /* i.e. reset mode */
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ }
+ } /* for_each_crtc_in_state() */
+
+ /*
+ * Add streams after required streams from new and replaced streams
+ * are removed from freesync module
+ */
+ if (adev->dm.freesync_module) {
+ for (i = 0; i < new_crtcs_count; i++) {
+ struct amdgpu_dm_connector *aconnector = NULL;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ &new_crtcs[i]->base);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ new_stream = dm_new_crtc_state->stream;
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(
+ state,
+ &new_crtcs[i]->base);
+ if (!aconnector) {
+ DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
+ "skipping freesync init\n",
+ new_crtcs[i]->crtc_id);
+ continue;
+ }
+
+ mod_freesync_add_stream(adev->dm.freesync_module,
+ new_stream, &aconnector->caps);
+ }
+ }
+
+ if (dm_state->context)
+ WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (!status)
+ DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
+ }
+ }
+
+ /* Handle scaling and underscan changes*/
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct dc_stream_status *status = NULL;
+
+ if (acrtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+ dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+
+ status = dc_stream_get_status(dm_new_crtc_state->stream);
+ WARN_ON(!status);
+ WARN_ON(!status->plane_count);
+
+ if (!dm_new_crtc_state->stream)
+ continue;
+
+ /*TODO How it works with MPO ?*/
+ if (!dc_commit_planes_to_stream(
+ dm->dc,
+ status->plane_states,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ dm_state->context))
+ dm_error("%s: Failed to update stream scaling!\n", __func__);
+ }
+
+ for (i = 0; i < new_crtcs_count; i++) {
+ /*
+ * loop to enable interrupts on newly arrived crtc
+ */
+ struct amdgpu_crtc *acrtc = new_crtcs[i];
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (adev->dm.freesync_module)
+ mod_freesync_notify_mode_change(
+ adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
+
+ manage_dm_interrupts(adev, acrtc, true);
+ }
+
+ /* update planes when needed per crtc*/
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream)
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+ }
+
+
+ /*
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
+ */
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+ if (new_crtc_state->event)
+ drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ /* Signal HW programming completion */
+ drm_atomic_helper_commit_hw_done(state);
+
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
+ int ret = 0;
+ struct drm_device *ddev = connector->dev;
+ struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+ struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ struct drm_plane *plane = disconnected_acrtc->base.primary;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+ /* Construct an atomic state to restore previous display setting */
+
+ /*
+ * Attach connectors to drm_atomic_state
+ */
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ if (ret)
+ goto err;
+
+ /* Attach crtc to drm_atomic_state*/
+ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto err;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+
+ /* Attach plane to drm_atomic_state */
+ plane_state = drm_atomic_get_plane_state(state, plane);
+
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (ret)
+ goto err;
+
+
+ /* Call commit internally with the state we just constructed */
+ ret = drm_atomic_commit(state);
+ if (!ret)
+ return 0;
+
+err:
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+
+/*
+ * This functions handle all cases when set mode does not come upon hotplug.
+ * This include when the same display is unplugged then plugged back into the
+ * same port and when we are running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
+
+ if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+ return;
+
+ disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+
+ if (!disconnected_acrtc || !acrtc_state->stream)
+ return;
+
+ /*
+ * If the previous sink is not released and different from the current,
+ * we deduce we are in a state where we can not rely on usermode call
+ * to turn on the display, so we do it here
+ */
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
+ dm_force_atomic_commit(&aconnector->base);
+}
+
+/*`
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
+
+ /* Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ /* Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
+
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
+ "timed out\n", crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static int dm_update_crtcs_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct dc_stream_state *new_stream;
+ int ret = 0;
+
+ /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *new_con_state = NULL;
+ struct dm_connector_state *dm_conn_state = NULL;
+
+ new_stream = NULL;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
+
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+
+ /* TODO This hack should go away */
+ if (aconnector && enable) {
+ // Make sure fake sink is created in plug-in scenario
+ new_con_state = drm_atomic_get_connector_state(state,
+ &aconnector->base);
+
+ if (IS_ERR(new_con_state)) {
+ ret = PTR_ERR_OR_ZERO(new_con_state);
+ break;
+ }
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ new_stream = create_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_conn_state);
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+ * was disconnected during S3, in this case it not and
+ * error, the OS will be updated after detection, and
+ * do the right thing on next atomic commit
+ */
+
+ if (!new_stream) {
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ break;
+ }
+ }
+
+ if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+
+ new_crtc_state->mode_changed = false;
+
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
+
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto next_crtc;
+
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Remove stream for any changed/disabled CRTC */
+ if (!enable) {
+
+ if (!dm_old_crtc_state->stream)
+ goto next_crtc;
+
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ /* i.e. reset mode */
+ if (dc_remove_stream_from_ctx(
+ dc,
+ dm_state->context,
+ dm_old_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dc_stream_release(dm_old_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else {/* Add stream for any updated/enabled CRTC */
+ /*
+ * Quick fix to prevent NULL pointer on new_stream when
+ * added MST connectors not found in existing crtc_state in the chained mode
+ * TODO: need to dig out the root cause of that
+ */
+ if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
+ goto next_crtc;
+
+ if (modereset_required(new_crtc_state))
+ goto next_crtc;
+
+ if (modeset_required(new_crtc_state, new_stream,
+ dm_old_crtc_state->stream)) {
+
+ WARN_ON(dm_new_crtc_state->stream);
+
+ dm_new_crtc_state->stream = new_stream;
+ dc_stream_retain(new_stream);
+
+ DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ if (dc_add_stream_to_ctx(
+ dc,
+ dm_state->context,
+ dm_new_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+next_crtc:
+ /* Release extra reference */
+ if (new_stream)
+ dc_stream_release(new_stream);
+ }
+
+ return ret;
+
+fail:
+ if (new_stream)
+ dc_stream_release(new_stream);
+ return ret;
+}
+
+static int dm_update_planes_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ int i ;
+ /* TODO return page_flip_needed() function */
+ bool pflip_needed = !state->allow_modeset;
+ int ret = 0;
+
+ if (pflip_needed)
+ return ret;
+
+ /* Add new planes */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ new_plane_crtc = new_plane_state->crtc;
+ old_plane_crtc = old_plane_state->crtc;
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
+
+ /*TODO Implement atomic check for cursor plane */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ /* Remove any changed/removed planes */
+ if (!enable) {
+
+ if (!old_plane_crtc)
+ continue;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!dm_old_crtc_state->stream)
+ continue;
+
+ DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
+
+ if (!dc_remove_plane_from_context(
+ dc,
+ dm_old_crtc_state->stream,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+
+ ret = EINVAL;
+ return ret;
+ }
+
+
+ dc_plane_state_release(dm_old_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else { /* Add new planes */
+
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ continue;
+
+ if (!new_plane_crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (!dm_new_crtc_state->stream)
+ continue;
+
+
+ WARN_ON(dm_new_plane_state->dc_state);
+
+ dm_new_plane_state->dc_state = dc_create_plane_state(dc);
+
+ DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
+
+ if (!dm_new_plane_state->dc_state) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = fill_plane_attributes(
+ new_plane_crtc->dev->dev_private,
+ dm_new_plane_state->dc_state,
+ new_plane_state,
+ new_crtc_state,
+ false);
+ if (ret)
+ return ret;
+
+
+ if (!dc_add_plane_to_context(
+ dc,
+ dm_new_crtc_state->stream,
+ dm_new_plane_state->dc_state,
+ dm_state->context)) {
+
+ ret = -EINVAL;
+ return ret;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+
+ return ret;
+}
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int i;
+ int ret;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+
+ /*
+ * This bool will be set for true for any modeset/reset
+ * or plane update which implies non fast surface update.
+ */
+ bool lock_and_validation_needed = false;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ goto fail;
+
+ /*
+ * legacy_cursor_update should be made false for SoC's having
+ * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
+ * otherwise for software cursor plane,
+ * we should not add it to list of affected planes.
+ */
+ if (state->legacy_cursor_update) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->color_mgmt_changed) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+ } else {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (!new_crtc_state->enable)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+
+ dm_state->context = dc_create_state();
+ ASSERT(dm_state->context);
+ dc_resource_state_copy_construct_current(dc, dm_state->context);
+
+ /* Remove exiting planes if they are modified */
+ ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Disable all crtcs which require disable */
+ ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Enable all crtcs which require enable */
+ ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Add new/modified planes */
+ ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Run this here since we want to validate the streams we created */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ goto fail;
+
+ /* Check scaling and underscan changes*/
+ /*TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(
+ drm_atomic_get_new_crtc_state(state, &acrtc->base)))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ lock_and_validation_needed = true;
+ }
+
+ /*
+ * For full updates case when
+ * removing/adding/updating streams on once CRTC while flipping
+ * on another CRTC,
+ * acquiring global lock will guarantee that any such full
+ * update commit
+ * will wait for completion of any outstanding flip using DRMs
+ * synchronization events.
+ */
+
+ if (lock_and_validation_needed) {
+
+ ret = do_aquire_global_lock(dev, state);
+ if (ret)
+ goto fail;
+
+ if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ /* Must be success */
+ WARN_ON(ret);
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ else
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+
+ return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+ uint8_t dpcd_data;
+ bool capable = false;
+
+ if (amdgpu_dm_connector->dc_link &&
+ dm_helpers_dp_read_dpcd(
+ NULL,
+ amdgpu_dm_connector->dc_link,
+ DP_DOWN_STREAM_PORT_COUNT,
+ &dpcd_data,
+ sizeof(dpcd_data))) {
+ capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
+ }
+
+ return capable;
+}
+void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int i;
+ uint64_t val_capable;
+ bool edid_check_required;
+ struct detailed_timing *timing;
+ struct detailed_non_pixel *data;
+ struct detailed_data_monitor_range *range;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ edid_check_required = false;
+ if (!amdgpu_dm_connector->dc_sink) {
+ DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
+ return;
+ }
+ if (!adev->dm.freesync_module)
+ return;
+ /*
+ * if edid non zero restrict freesync only for dp and edp
+ */
+ if (edid) {
+ if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+ || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ edid_check_required = is_dp_capable_without_timing_msa(
+ adev->dm.dc,
+ amdgpu_dm_connector);
+ }
+ }
+ val_capable = 0;
+ if (edid_check_required == true && (edid->version > 1 ||
+ (edid->version == 1 && edid->revision > 1))) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+ range = &data->data.range;
+ /*
+ * Check if monitor has continuous frequency mode
+ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != 1)
+ continue;
+
+ amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+ amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+ break;
+ }
+
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10) {
+ amdgpu_dm_connector->caps.supported = true;
+ amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
+ amdgpu_dm_connector->min_vfreq * 1000000;
+ amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
+ amdgpu_dm_connector->max_vfreq * 1000000;
+ val_capable = 1;
+ }
+ }
+
+ /*
+ * TODO figure out how to notify user-mode or DRM of freesync caps
+ * once we figure out how to deal with freesync in an upstreamable
+ * fashion
+ */
+
+}
+
+void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
+{
+ /*
+ * TODO fill in once we figure out how to deal with freesync in
+ * an upstreamable fashion
+ */
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
new file mode 100644
index 000000000000..117521c6a6ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_H__
+#define __AMDGPU_DM_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include "dc.h"
+
+/*
+ * This file contains the definition for amdgpu_display_manager
+ * and its API for amdgpu driver's use.
+ * This component provides all the display related functionality
+ * and this is the only component that calls DAL API.
+ * The API contained here intended for amdgpu driver use.
+ * The API that is called directly from KMS framework is located
+ * in amdgpu_dm_kms.h file
+ */
+
+#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
+/*
+#include "include/amdgpu_dal_power_if.h"
+#include "amdgpu_dm_irq.h"
+*/
+
+#include "irq_types.h"
+#include "signal_types.h"
+
+/* Forward declarations */
+struct amdgpu_device;
+struct drm_device;
+struct amdgpu_dm_irq_handler_data;
+
+struct amdgpu_dm_prev_state {
+ struct drm_framebuffer *fb;
+ int32_t x;
+ int32_t y;
+ struct drm_display_mode mode;
+};
+
+struct common_irq_params {
+ struct amdgpu_device *adev;
+ enum dc_irq_source irq_src;
+};
+
+struct irq_list_head {
+ struct list_head head;
+ /* In case this interrupt needs post-processing, 'work' will be queued*/
+ struct work_struct work;
+};
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+struct dm_comressor_info {
+ void *cpu_addr;
+ struct amdgpu_bo *bo_ptr;
+ uint64_t gpu_addr;
+};
+#endif
+
+
+struct amdgpu_display_manager {
+ struct dal *dal;
+ struct dc *dc;
+ struct cgs_device *cgs_device;
+ /* lock to be used when DAL is called from SYNC IRQ context */
+ spinlock_t dal_lock;
+
+ struct amdgpu_device *adev; /*AMD base driver*/
+ struct drm_device *ddev; /*DRM base driver*/
+ u16 display_indexes_num;
+
+ struct amdgpu_dm_prev_state prev_state;
+
+ /*
+ * 'irq_source_handler_table' holds a list of handlers
+ * per (DAL) IRQ source.
+ *
+ * Each IRQ source may need to be handled at different contexts.
+ * By 'context' we mean, for example:
+ * - The ISR context, which is the direct interrupt handler.
+ * - The 'deferred' context - this is the post-processing of the
+ * interrupt, but at a lower priority.
+ *
+ * Note that handlers are called in the same order as they were
+ * registered (FIFO).
+ */
+ struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+ struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
+
+ struct common_irq_params
+ pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
+
+ struct common_irq_params
+ vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
+
+ /* this spin lock synchronizes access to 'irq_handler_list_table' */
+ spinlock_t irq_handler_list_table_lock;
+
+ /* Timer-related data. */
+ struct list_head timer_handler_list;
+ struct workqueue_struct *timer_workqueue;
+
+ /* Use dal_mutex for any activity which is NOT syncronized by
+ * DRM mode setting locks.
+ * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
+ * DRM mode setting locks being acquired. This is where dal_mutex
+ * is acquired before calling into DAL. */
+ struct mutex dal_mutex;
+
+ struct backlight_device *backlight_dev;
+
+ const struct dc_link *backlight_link;
+
+ struct work_struct mst_hotplug_work;
+
+ struct mod_freesync *freesync_module;
+
+ /**
+ * Caches device atomic state for suspend/resume
+ */
+ struct drm_atomic_state *cached_state;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ struct dm_comressor_info compressor;
+#endif
+};
+
+struct amdgpu_dm_connector {
+
+ struct drm_connector base;
+ uint32_t connector_id;
+
+ /* we need to mind the EDID between detect
+ and get modes due to analog/digital/tvencoder */
+ struct edid *edid;
+
+ /* shared with amdgpu */
+ struct amdgpu_hpd hpd;
+
+ /* number of modes generated from EDID at 'dc_sink' */
+ int num_modes;
+
+ /* The 'old' sink - before an HPD.
+ * The 'current' sink is in dc_link->sink. */
+ struct dc_sink *dc_sink;
+ struct dc_link *dc_link;
+ struct dc_sink *dc_em_sink;
+
+ /* DM only */
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_dm_connector *mst_port;
+ struct amdgpu_encoder *mst_encoder;
+
+ /* TODO see if we can merge with ddc_bus or make a dm_connector */
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Monitor range limits */
+ int min_vfreq ;
+ int max_vfreq ;
+ int pixel_clock_mhz;
+
+ /*freesync caps*/
+ struct mod_freesync_caps caps;
+
+ struct mutex hpd_lock;
+
+ bool fake_enable;
+};
+
+#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
+
+extern const struct amdgpu_ip_block_version dm_ip_block;
+
+struct amdgpu_framebuffer;
+struct amdgpu_display_manager;
+struct dc_validation_set;
+struct dc_plane_state;
+
+struct dm_plane_state {
+ struct drm_plane_state base;
+ struct dc_plane_state *dc_state;
+};
+
+struct dm_crtc_state {
+ struct drm_crtc_state base;
+ struct dc_stream_state *stream;
+};
+
+#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
+
+struct dm_atomic_state {
+ struct drm_atomic_state base;
+
+ struct dc_state *context;
+};
+
+#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
+
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val);
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index);
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector);
+
+void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
+ struct edid *edid);
+
+void
+amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector);
+
+extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
+
+#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
new file mode 100644
index 000000000000..9bd142f65f9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+#include <linux/version.h>
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_edid.h>
+
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "dc.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+
+#include "dm_helpers.h"
+
+/* dm_helpers_parse_edid_caps
+ *
+ * Parse edid caps
+ *
+ * @edid: [in] pointer to edid
+ * edid_caps: [in] pointer to edid caps
+ * @return
+ * void
+ * */
+enum dc_edid_status dm_helpers_parse_edid_caps(
+ struct dc_context *ctx,
+ const struct dc_edid *edid,
+ struct dc_edid_caps *edid_caps)
+{
+ struct edid *edid_buf = (struct edid *) edid->raw_edid;
+ struct cea_sad *sads;
+ int sad_count = -1;
+ int sadb_count = -1;
+ int i = 0;
+ int j = 0;
+ uint8_t *sadb = NULL;
+
+ enum dc_edid_status result = EDID_OK;
+
+ if (!edid_caps || !edid)
+ return EDID_BAD_INPUT;
+
+ if (!drm_edid_is_valid(edid_buf))
+ result = EDID_BAD_CHECKSUM;
+
+ edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
+ ((uint16_t) edid_buf->mfg_id[1])<<8;
+ edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
+ ((uint16_t) edid_buf->prod_code[1])<<8;
+ edid_caps->serial_number = edid_buf->serial;
+ edid_caps->manufacture_week = edid_buf->mfg_week;
+ edid_caps->manufacture_year = edid_buf->mfg_year;
+
+ /* One of the four detailed_timings stores the monitor name. It's
+ * stored in an array of length 13. */
+ for (i = 0; i < 4; i++) {
+ if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
+ while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
+ if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
+ break;
+
+ edid_caps->display_name[j] =
+ edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
+ j++;
+ }
+ }
+ }
+
+ edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
+ (struct edid *) edid->raw_edid);
+
+ sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
+ if (sad_count <= 0) {
+ DRM_INFO("SADs count is: %d, don't need to read it\n",
+ sad_count);
+ return result;
+ }
+
+ edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
+ for (i = 0; i < edid_caps->audio_mode_count; ++i) {
+ struct cea_sad *sad = &sads[i];
+
+ edid_caps->audio_modes[i].format_code = sad->format;
+ edid_caps->audio_modes[i].channel_count = sad->channels;
+ edid_caps->audio_modes[i].sample_rate = sad->freq;
+ edid_caps->audio_modes[i].sample_size = sad->byte2;
+ }
+
+ sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
+
+ if (sadb_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
+ sadb_count = 0;
+ }
+
+ if (sadb_count)
+ edid_caps->speaker_flags = sadb[0];
+ else
+ edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
+
+ kfree(sads);
+ kfree(sadb);
+
+ return result;
+}
+
+static void get_payload_table(
+ struct amdgpu_dm_connector *aconnector,
+ struct dp_mst_stream_allocation_table *proposed_table)
+{
+ int i;
+ struct drm_dp_mst_topology_mgr *mst_mgr =
+ &aconnector->mst_port->mst_mgr;
+
+ mutex_lock(&mst_mgr->payload_lock);
+
+ proposed_table->stream_count = 0;
+
+ /* number of active streams */
+ for (i = 0; i < mst_mgr->max_payloads; i++) {
+ if (mst_mgr->payloads[i].num_slots == 0)
+ break; /* end of vcp_id table */
+
+ ASSERT(mst_mgr->payloads[i].payload_state !=
+ DP_PAYLOAD_DELETE_LOCAL);
+
+ if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
+ mst_mgr->payloads[i].payload_state ==
+ DP_PAYLOAD_REMOTE) {
+
+ struct dp_mst_stream_allocation *sa =
+ &proposed_table->stream_allocations[
+ proposed_table->stream_count];
+
+ sa->slot_count = mst_mgr->payloads[i].num_slots;
+ sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
+ proposed_table->stream_count++;
+ }
+ }
+
+ mutex_unlock(&mst_mgr->payload_lock);
+}
+
+/*
+ * Writes payload allocation table in immediate downstream device.
+ */
+bool dm_helpers_dp_mst_write_payload_allocation_table(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ struct dp_mst_stream_allocation_table *proposed_table,
+ bool enable)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ int slots = 0;
+ bool ret;
+ int clock;
+ int bpp = 0;
+ int pbn = 0;
+
+ aconnector = stream->sink->priv;
+
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!mst_mgr->mst_state)
+ return false;
+
+ mst_port = aconnector->port;
+
+ if (enable) {
+ clock = stream->timing.pix_clk_khz;
+
+ switch (stream->timing.display_color_depth) {
+
+ case COLOR_DEPTH_666:
+ bpp = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bpp = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bpp = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bpp = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bpp = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bpp = 16;
+ break;
+ default:
+ ASSERT(bpp != 0);
+ break;
+ }
+
+ bpp = bpp * 3;
+
+ /* TODO need to know link rate */
+
+ pbn = drm_dp_calc_pbn_mode(clock, bpp);
+
+ slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
+ ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
+
+ if (!ret)
+ return false;
+
+ } else {
+ drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
+ }
+
+ ret = drm_dp_update_payload_part1(mst_mgr);
+
+ /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+ * AUX message. The sequence is slot 1-63 allocated sequence for each
+ * stream. AMD ASIC stream slot allocation should follow the same
+ * sequence. copy DRM MST allocation to dc */
+
+ get_payload_table(aconnector, proposed_table);
+
+ if (ret)
+ return false;
+
+ return true;
+}
+
+/*
+ * Polls for ACT (allocation change trigger) handled and sends
+ * ALLOCATE_PAYLOAD message.
+ */
+bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ int ret;
+
+ aconnector = stream->sink->priv;
+
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!mst_mgr->mst_state)
+ return false;
+
+ ret = drm_dp_check_act_status(mst_mgr);
+
+ if (ret)
+ return false;
+
+ return true;
+}
+
+bool dm_helpers_dp_mst_send_payload_allocation(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ bool enable)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ int ret;
+
+ aconnector = stream->sink->priv;
+
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
+ mst_port = aconnector->port;
+
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!mst_mgr->mst_state)
+ return false;
+
+ ret = drm_dp_update_payload_part2(mst_mgr);
+
+ if (ret)
+ return false;
+
+ if (!enable)
+ drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
+
+ return true;
+}
+
+bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event)
+{
+ return true;
+}
+
+void dm_dtn_log_begin(struct dc_context *ctx)
+{}
+
+void dm_dtn_log_append_v(struct dc_context *ctx,
+ const char *pMsg, ...)
+{}
+
+void dm_dtn_log_end(struct dc_context *ctx)
+{}
+
+bool dm_helpers_dp_mst_start_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ bool boot)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ if (boot) {
+ DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+ return true;
+ }
+
+ DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
+}
+
+void dm_helpers_dp_mst_stop_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return;
+ }
+
+ DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ if (aconnector->mst_mgr.mst_state == true)
+ drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
+}
+
+bool dm_helpers_dp_read_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size)
+{
+
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
+ data, size) > 0;
+}
+
+bool dm_helpers_dp_write_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
+ address, (uint8_t *)data, size) > 0;
+}
+
+bool dm_helpers_submit_i2c(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ struct i2c_command *cmd)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ struct i2c_msg *msgs;
+ int i = 0;
+ int num = cmd->number_of_payloads;
+ bool result;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
+
+ if (!msgs)
+ return false;
+
+ for (i = 0; i < num; i++) {
+ msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
+ msgs[i].addr = cmd->payloads[i].address;
+ msgs[i].len = cmd->payloads[i].length;
+ msgs[i].buf = cmd->payloads[i].data;
+ }
+
+ result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
+
+ kfree(msgs);
+
+ return result;
+}
+
+enum dc_edid_status dm_helpers_read_local_edid(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ struct dc_sink *sink)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ struct i2c_adapter *ddc;
+ int retry = 3;
+ enum dc_edid_status edid_status;
+ struct edid *edid;
+
+ if (link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /* some dongles read edid incorrectly the first time,
+ * do check sum and retry to make sure read correct edid.
+ */
+ do {
+
+ edid = drm_get_edid(&aconnector->base, ddc);
+
+ if (!edid)
+ return EDID_NO_RESPONSE;
+
+ sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
+ memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
+
+ /* We don't need the original edid anymore */
+ kfree(edid);
+
+ edid_status = dm_helpers_parse_edid_caps(
+ ctx,
+ &sink->dc_edid,
+ &sink->edid_caps);
+
+ } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
+
+ if (edid_status != EDID_OK)
+ DRM_ERROR("EDID err: %d, on connector: %s",
+ edid_status,
+ aconnector->base.name);
+
+ return edid_status;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
new file mode 100644
index 000000000000..ca5d0d1581dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include "dm_services_types.h"
+#include "dc.h"
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+
+/******************************************************************************
+ * Private declarations.
+ *****************************************************************************/
+
+struct handler_common_data {
+ struct list_head list;
+ interrupt_handler handler;
+ void *handler_arg;
+
+ /* DM which this handler belongs to */
+ struct amdgpu_display_manager *dm;
+};
+
+struct amdgpu_dm_irq_handler_data {
+ struct handler_common_data hcd;
+ /* DAL irq source which registered for this interrupt. */
+ enum dc_irq_source irq_source;
+};
+
+struct amdgpu_dm_timer_handler_data {
+ struct handler_common_data hcd;
+ struct delayed_work d_work;
+};
+
+#define DM_IRQ_TABLE_LOCK(adev, flags) \
+ spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
+
+#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
+ spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
+
+/******************************************************************************
+ * Private functions.
+ *****************************************************************************/
+
+static void init_handler_common_data(struct handler_common_data *hcd,
+ void (*ih)(void *),
+ void *args,
+ struct amdgpu_display_manager *dm)
+{
+ hcd->handler = ih;
+ hcd->handler_arg = args;
+ hcd->dm = dm;
+}
+
+/**
+ * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ *
+ * @work: work struct
+ */
+static void dm_irq_work_func(struct work_struct *work)
+{
+ struct list_head *entry;
+ struct irq_list_head *irq_list_head =
+ container_of(work, struct irq_list_head, work);
+ struct list_head *handler_list = &irq_list_head->head;
+ struct amdgpu_dm_irq_handler_data *handler_data;
+
+ list_for_each(entry, handler_list) {
+ handler_data =
+ list_entry(
+ entry,
+ struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
+ handler_data->irq_source);
+
+ DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
+ handler_data->irq_source);
+
+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ }
+
+ /* Call a DAL subcomponent which registered for interrupt notification
+ * at INTERRUPT_LOW_IRQ_CONTEXT.
+ * (The most common use is HPD interrupt) */
+}
+
+/**
+ * Remove a handler and return a pointer to hander list from which the
+ * handler was removed.
+ */
+static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
+ void *ih,
+ const struct dc_interrupt_params *int_params)
+{
+ struct list_head *hnd_list;
+ struct list_head *entry, *tmp;
+ struct amdgpu_dm_irq_handler_data *handler;
+ unsigned long irq_table_flags;
+ bool handler_removed = false;
+ enum dc_irq_source irq_source;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ irq_source = int_params->irq_source;
+
+ switch (int_params->int_context) {
+ case INTERRUPT_HIGH_IRQ_CONTEXT:
+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+ break;
+ case INTERRUPT_LOW_IRQ_CONTEXT:
+ default:
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+ break;
+ }
+
+ list_for_each_safe(entry, tmp, hnd_list) {
+
+ handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ if (ih == handler) {
+ /* Found our handler. Remove it from the list. */
+ list_del(&handler->hcd.list);
+ handler_removed = true;
+ break;
+ }
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (handler_removed == false) {
+ /* Not necessarily an error - caller may not
+ * know the context. */
+ return NULL;
+ }
+
+ kfree(handler);
+
+ DRM_DEBUG_KMS(
+ "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
+ ih, int_params->irq_source, int_params->int_context);
+
+ return hnd_list;
+}
+
+/* If 'handler_in == NULL' then remove ALL handlers. */
+static void remove_timer_handler(struct amdgpu_device *adev,
+ struct amdgpu_dm_timer_handler_data *handler_in)
+{
+ struct amdgpu_dm_timer_handler_data *handler_temp;
+ struct list_head *handler_list;
+ struct list_head *entry, *tmp;
+ unsigned long irq_table_flags;
+ bool handler_removed = false;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ handler_list = &adev->dm.timer_handler_list;
+
+ list_for_each_safe(entry, tmp, handler_list) {
+ /* Note that list_for_each_safe() guarantees that
+ * handler_temp is NOT null. */
+ handler_temp = list_entry(entry,
+ struct amdgpu_dm_timer_handler_data, hcd.list);
+
+ if (handler_in == NULL || handler_in == handler_temp) {
+ list_del(&handler_temp->hcd.list);
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
+ handler_temp);
+
+ if (handler_in == NULL) {
+ /* Since it is still in the queue, it must
+ * be cancelled. */
+ cancel_delayed_work_sync(&handler_temp->d_work);
+ }
+
+ kfree(handler_temp);
+ handler_removed = true;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ }
+
+ /* Remove ALL handlers. */
+ if (handler_in == NULL)
+ continue;
+
+ /* Remove a SPECIFIC handler.
+ * Found our handler - we can stop here. */
+ if (handler_in == handler_temp)
+ break;
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (handler_in != NULL && handler_removed == false)
+ DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
+ handler_in);
+}
+
+static bool
+validate_irq_registration_params(struct dc_interrupt_params *int_params,
+ void (*ih)(void *))
+{
+ if (NULL == int_params || NULL == ih) {
+ DRM_ERROR("DM_IRQ: invalid input!\n");
+ return false;
+ }
+
+ if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
+ DRM_ERROR("DM_IRQ: invalid context: %d!\n",
+ int_params->int_context);
+ return false;
+ }
+
+ if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
+ DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
+ int_params->irq_source);
+ return false;
+ }
+
+ return true;
+}
+
+static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
+ irq_handler_idx handler_idx)
+{
+ if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+ DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
+ return false;
+ }
+
+ if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
+ DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
+ return false;
+ }
+
+ return true;
+}
+/******************************************************************************
+ * Public functions.
+ *
+ * Note: caller is responsible for input validation.
+ *****************************************************************************/
+
+void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+ struct dc_interrupt_params *int_params,
+ void (*ih)(void *),
+ void *handler_args)
+{
+ struct list_head *hnd_list;
+ struct amdgpu_dm_irq_handler_data *handler_data;
+ unsigned long irq_table_flags;
+ enum dc_irq_source irq_source;
+
+ if (false == validate_irq_registration_params(int_params, ih))
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+
+ handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+ if (!handler_data) {
+ DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+ }
+
+ memset(handler_data, 0, sizeof(*handler_data));
+
+ init_handler_common_data(&handler_data->hcd, ih, handler_args,
+ &adev->dm);
+
+ irq_source = int_params->irq_source;
+
+ handler_data->irq_source = irq_source;
+
+ /* Lock the list, add the handler. */
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ switch (int_params->int_context) {
+ case INTERRUPT_HIGH_IRQ_CONTEXT:
+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+ break;
+ case INTERRUPT_LOW_IRQ_CONTEXT:
+ default:
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+ break;
+ }
+
+ list_add_tail(&handler_data->hcd.list, hnd_list);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ /* This pointer will be stored by code which requested interrupt
+ * registration.
+ * The same pointer will be needed in order to unregister the
+ * interrupt. */
+
+ DRM_DEBUG_KMS(
+ "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
+ handler_data,
+ irq_source,
+ int_params->int_context);
+
+ return handler_data;
+}
+
+void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source,
+ void *ih)
+{
+ struct list_head *handler_list;
+ struct dc_interrupt_params int_params;
+ int i;
+
+ if (false == validate_irq_unregistration_params(irq_source, ih))
+ return;
+
+ memset(&int_params, 0, sizeof(int_params));
+
+ int_params.irq_source = irq_source;
+
+ for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
+
+ int_params.int_context = i;
+
+ handler_list = remove_irq_handler(adev, ih, &int_params);
+
+ if (handler_list != NULL)
+ break;
+ }
+
+ if (handler_list == NULL) {
+ /* If we got here, it means we searched all irq contexts
+ * for this irq source, but the handler was not found. */
+ DRM_ERROR(
+ "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
+ ih, irq_source);
+ }
+}
+
+int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+{
+ int src;
+ struct irq_list_head *lh;
+
+ DRM_DEBUG_KMS("DM_IRQ\n");
+
+ spin_lock_init(&adev->dm.irq_handler_list_table_lock);
+
+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+ /* low context handler list init */
+ lh = &adev->dm.irq_handler_list_low_tab[src];
+ INIT_LIST_HEAD(&lh->head);
+ INIT_WORK(&lh->work, dm_irq_work_func);
+
+ /* high context handler init */
+ INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
+ }
+
+ INIT_LIST_HEAD(&adev->dm.timer_handler_list);
+
+ /* allocate and initialize the workqueue for DM timer */
+ adev->dm.timer_workqueue = create_singlethread_workqueue(
+ "dm_timer_queue");
+ if (adev->dm.timer_workqueue == NULL) {
+ DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* DM IRQ and timer resource release */
+void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+{
+ int src;
+ struct irq_list_head *lh;
+ DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+
+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+
+ /* The handler was removed from the table,
+ * it means it is safe to flush all the 'work'
+ * (because no code can schedule a new one). */
+ lh = &adev->dm.irq_handler_list_low_tab[src];
+ flush_work(&lh->work);
+ }
+
+ /* Cancel ALL timers and release handlers (if any). */
+ remove_timer_handler(adev, NULL);
+ /* Release the queue itself. */
+ destroy_workqueue(adev->dm.timer_workqueue);
+}
+
+int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h;
+ struct list_head *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: suspend\n");
+
+ /**
+ * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK
+ * will be disabled from manage_dm_interrupts on disable CRTC.
+ */
+ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dc_interrupt_set(adev->dm.dc, src, false);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ return 0;
+}
+
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h, *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: early resume\n");
+
+ /* re-enable short pulse interrupts HW interrupt */
+ for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dc_interrupt_set(adev->dm.dc, src, true);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ return 0;
+}
+
+int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h, *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: resume\n");
+
+ /**
+ * Renable HW interrupt for HPD and only since FLIP and VBLANK
+ * will be enabled from manage_dm_interrupts on enable CRTC.
+ */
+ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dc_interrupt_set(adev->dm.dc, src, true);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ return 0;
+}
+
+/**
+ * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
+ * "irq_source".
+ */
+static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source)
+{
+ unsigned long irq_table_flags;
+ struct work_struct *work = NULL;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
+ work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (work) {
+ if (!schedule_work(work))
+ DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
+ irq_source);
+ }
+
+}
+
+/** amdgpu_dm_irq_immediate_work
+ * Callback high irq work immediately, don't send to work queue
+ */
+static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source)
+{
+ struct amdgpu_dm_irq_handler_data *handler_data;
+ struct list_head *entry;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ list_for_each(
+ entry,
+ &adev->dm.irq_handler_list_high_tab[irq_source]) {
+
+ handler_data =
+ list_entry(
+ entry,
+ struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ /* Call a subcomponent which registered for immediate
+ * interrupt notification */
+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
+/*
+ * amdgpu_dm_irq_handler
+ *
+ * Generic IRQ handler, calls all registered high irq work immediately, and
+ * schedules work for low irq
+ */
+static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+
+ enum dc_irq_source src =
+ dc_interrupt_to_irq_source(
+ adev->dm.dc,
+ entry->src_id,
+ entry->src_data[0]);
+
+ dc_interrupt_ack(adev->dm.dc, src);
+
+ /* Call high irq work immediately */
+ amdgpu_dm_irq_immediate_work(adev, src);
+ /*Schedule low_irq work */
+ amdgpu_dm_irq_schedule_work(adev, src);
+
+ return 0;
+}
+
+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+{
+ switch (type) {
+ case AMDGPU_HPD_1:
+ return DC_IRQ_SOURCE_HPD1;
+ case AMDGPU_HPD_2:
+ return DC_IRQ_SOURCE_HPD2;
+ case AMDGPU_HPD_3:
+ return DC_IRQ_SOURCE_HPD3;
+ case AMDGPU_HPD_4:
+ return DC_IRQ_SOURCE_HPD4;
+ case AMDGPU_HPD_5:
+ return DC_IRQ_SOURCE_HPD5;
+ case AMDGPU_HPD_6:
+ return DC_IRQ_SOURCE_HPD6;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
+ bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dc_interrupt_set(adev->dm.dc, src, st);
+ return 0;
+}
+
+static inline int dm_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned crtc_id,
+ enum amdgpu_interrupt_state state,
+ const enum irq_type dal_irq_type,
+ const char *func)
+{
+ bool st;
+ enum dc_irq_source irq_source;
+
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
+
+ if (!acrtc) {
+ DRM_ERROR(
+ "%s: crtc is NULL at id :%d\n",
+ func,
+ crtc_id);
+ return 0;
+ }
+
+ irq_source = dal_irq_type + acrtc->otg_inst;
+
+ st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dc_interrupt_set(adev->dm.dc, irq_source, st);
+ return 0;
+}
+
+static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_PFLIP,
+ __func__);
+}
+
+static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_VBLANK,
+ __func__);
+}
+
+static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
+ .set = amdgpu_dm_set_crtc_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
+ .set = amdgpu_dm_set_pflip_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
+ .set = amdgpu_dm_set_hpd_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
+
+ adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
+}
+
+/*
+ * amdgpu_dm_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd_rx,
+ true);
+ }
+ }
+}
+
+/**
+ * amdgpu_dm_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+
+ dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd_rx,
+ false);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
new file mode 100644
index 000000000000..82f8e761beca
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DM_IRQ_H__
+#define __AMDGPU_DM_IRQ_H__
+
+#include "irq_types.h" /* DAL irq definitions */
+
+/*
+ * Display Manager IRQ-related interfaces (for use by DAL).
+ */
+
+/**
+ * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM initialization.
+ *
+ * Returns:
+ * 0 - success
+ * non-zero - error
+ */
+int amdgpu_dm_irq_init(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM destruction.
+ *
+ */
+void amdgpu_dm_irq_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
+ *
+ * @adev: AMD DRM device
+ * @int_params: parameters for the irq
+ * @ih: pointer to the irq hander function
+ * @handler_args: arguments which will be passed to ih
+ *
+ * Returns:
+ * IRQ Handler Index on success.
+ * NULL on failure.
+ *
+ * Cannot be called from an interrupt handler.
+ */
+void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+ struct dc_interrupt_params *int_params,
+ void (*ih)(void *),
+ void *handler_args);
+
+/**
+ * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
+ * by amdgpu_dm_irq_register_interrupt().
+ *
+ * @adev: AMD DRM device.
+ * @ih_index: irq handler index which was returned by
+ * amdgpu_dm_irq_register_interrupt
+ */
+void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source,
+ void *ih_index);
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
+
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
+ *
+ */
+int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
+ * amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
+ *
+ */
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
+int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_DM_IRQ_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
new file mode 100644
index 000000000000..f8efb98b1fa7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/version.h>
+#include <drm/drm_atomic_helper.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "dc.h"
+#include "dm_helpers.h"
+
+#include "dc_link_ddc.h"
+
+/* #define TRACE_DPCD */
+
+#ifdef TRACE_DPCD
+#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
+
+static inline char *side_band_msg_type_to_str(uint32_t address)
+{
+ static char str[10] = {0};
+
+ if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
+ strcpy(str, "DOWN_REQ");
+ else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
+ strcpy(str, "UP_REP");
+ else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
+ strcpy(str, "DOWN_REP");
+ else
+ strcpy(str, "UP_REQ");
+
+ return str;
+}
+
+static void log_dpcd(uint8_t type,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size,
+ bool res)
+{
+ DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
+ (type == DP_AUX_NATIVE_READ) ||
+ (type == DP_AUX_I2C_READ) ?
+ "Read" : "Write",
+ address,
+ SIDE_BAND_MSG(address) ?
+ side_band_msg_type_to_str(address) : "Nop",
+ res ? "OK" : "Fail");
+
+ if (res) {
+ print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
+ }
+}
+#endif
+
+static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
+ I2C_MOT_TRUE : I2C_MOT_FALSE;
+ enum ddc_result res;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_READ:
+ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ false,
+ I2C_MOT_UNDEF,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_NATIVE_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ false,
+ I2C_MOT_UNDEF,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_I2C_READ:
+ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ true,
+ mot,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_I2C_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ true,
+ mot,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ default:
+ return 0;
+ }
+
+#ifdef TRACE_DPCD
+ log_dpcd(msg->request,
+ msg->address,
+ msg->buffer,
+ msg->size,
+ r == DDC_RESULT_SUCESSFULL);
+#endif
+
+ return msg->size;
+}
+
+static enum drm_connector_status
+dm_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *master = aconnector->mst_port;
+
+ enum drm_connector_status status =
+ drm_dp_mst_detect_port(
+ connector,
+ &master->mst_mgr,
+ aconnector->port);
+
+ return status;
+}
+
+static void
+dm_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+
+ drm_encoder_cleanup(&amdgpu_encoder->base);
+ kfree(amdgpu_encoder);
+ drm_connector_cleanup(connector);
+ kfree(amdgpu_dm_connector);
+}
+
+static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
+ .detect = dm_dp_mst_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = dm_dp_mst_connector_destroy,
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+};
+
+static int dm_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int ret;
+
+ ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+
+ return ret;
+}
+
+static int dm_dp_mst_get_modes(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ int ret = 0;
+
+ if (!aconnector)
+ return dm_connector_update_modes(connector, NULL);
+
+ if (!aconnector->edid) {
+ struct edid *edid;
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+ if (!edid) {
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+ return ret;
+ }
+
+ aconnector->edid = edid;
+
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ dc_sink->priv = aconnector;
+ aconnector->dc_sink = dc_sink;
+
+ if (aconnector->dc_sink)
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, edid);
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base, edid);
+ }
+
+ ret = dm_connector_update_modes(connector, aconnector->edid);
+
+ return ret;
+}
+
+static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ return &amdgpu_dm_connector->mst_encoder->base;
+}
+
+static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
+ .get_modes = dm_dp_mst_get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .best_encoder = dm_mst_best_encoder,
+};
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static struct amdgpu_encoder *
+dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder;
+ struct drm_encoder *encoder;
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->base.helper_private;
+ struct drm_encoder *enc_master =
+ connector_funcs->best_encoder(&connector->base);
+
+ DRM_DEBUG_KMS("enc master is %p\n", enc_master);
+ amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
+ if (!amdgpu_encoder)
+ return NULL;
+
+ encoder = &amdgpu_encoder->base;
+ encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ drm_encoder_init(
+ dev,
+ &amdgpu_encoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_DPMST,
+ NULL);
+
+ drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
+
+ return amdgpu_encoder;
+}
+
+static struct drm_connector *
+dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ const char *pathprop)
+{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->mst_port == master
+ && !aconnector->port) {
+ DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ aconnector->port = port;
+ drm_mode_connector_set_path_property(connector, pathprop);
+
+ drm_connector_list_iter_end(&conn_iter);
+ return &aconnector->base;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ return NULL;
+
+ connector = &aconnector->base;
+ aconnector->port = port;
+ aconnector->mst_port = master;
+
+ if (drm_connector_init(
+ dev,
+ connector,
+ &dm_dp_mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort)) {
+ kfree(aconnector);
+ return NULL;
+ }
+ drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
+
+ amdgpu_dm_connector_init_helper(
+ &adev->dm,
+ aconnector,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ master->dc_link,
+ master->connector_id);
+
+ aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
+
+ /*
+ * TODO: understand why this one is needed
+ */
+ drm_object_attach_property(
+ &connector->base,
+ dev->mode_config.path_property,
+ 0);
+ drm_object_attach_property(
+ &connector->base,
+ dev->mode_config.tile_property,
+ 0);
+
+ drm_mode_connector_set_path_property(connector, pathprop);
+
+ /*
+ * Initialize connector state before adding the connectror to drm and
+ * framebuffer lists
+ */
+ amdgpu_dm_connector_funcs_reset(connector);
+
+ DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ DRM_DEBUG_KMS(":%d\n", connector->base.id);
+
+ return connector;
+}
+
+static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ aconnector->port = NULL;
+ if (aconnector->dc_sink) {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ }
+ if (aconnector->edid) {
+ kfree(aconnector->edid);
+ aconnector->edid = NULL;
+ }
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+}
+
+static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+static void dm_dp_mst_register_connector(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ if (adev->mode_info.rfbdev)
+ drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
+ else
+ DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
+
+ drm_connector_register(connector);
+
+}
+
+static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
+ .add_connector = dm_dp_add_mst_connector,
+ .destroy_connector = dm_dp_destroy_mst_connector,
+ .hotplug = dm_dp_mst_hotplug,
+ .register_connector = dm_dp_mst_register_connector
+};
+
+void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->dm_dp_aux.aux.name = "dmdc";
+ aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
+ aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
+ aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
+
+ drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ aconnector->mst_mgr.cbs = &dm_mst_cbs;
+ drm_dp_mst_topology_mgr_init(
+ &aconnector->mst_mgr,
+ dm->adev->ddev,
+ &aconnector->dm_dp_aux.aux,
+ 16,
+ 4,
+ aconnector->connector_id);
+}
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
new file mode 100644
index 000000000000..2da851b40042
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
+#define __DAL_AMDGPU_DM_MST_TYPES_H__
+
+struct amdgpu_display_manager;
+struct amdgpu_dm_connector;
+
+void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
new file mode 100644
index 000000000000..5df8fd5b537c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_pm.h"
+
+unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+ /* TODO: return actual timestamp */
+ return 0;
+}
+
+bool dm_write_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag)
+{
+ /*TODO implement*/
+ return false;
+}
+
+bool dm_read_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag)
+{
+ /*TODO implement*/
+ return false;
+}
+
+/**** power component interfaces ****/
+
+bool dm_pp_pre_dce_clock_change(
+ struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *requested_state,
+ struct dm_pp_gpu_clock_range *actual_state)
+{
+ /*TODO*/
+ return false;
+}
+
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+
+ if (adev->pm.dpm_enabled) {
+
+ memset(&adev->pm.pm_display_cfg, 0,
+ sizeof(adev->pm.pm_display_cfg));
+
+ adev->pm.pm_display_cfg.cpu_cc6_disable =
+ pp_display_cfg->cpu_cc6_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_disable =
+ pp_display_cfg->cpu_pstate_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+ pp_display_cfg->cpu_pstate_separation_time;
+
+ adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+ pp_display_cfg->nb_pstate_switch_disable;
+
+ adev->pm.pm_display_cfg.num_display =
+ pp_display_cfg->display_count;
+ adev->pm.pm_display_cfg.num_path_including_non_display =
+ pp_display_cfg->display_count;
+
+ adev->pm.pm_display_cfg.min_core_set_clock =
+ pp_display_cfg->min_engine_clock_khz/10;
+ adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_mem_set_clock =
+ pp_display_cfg->min_memory_clock_khz/10;
+
+ adev->pm.pm_display_cfg.multi_monitor_in_sync =
+ pp_display_cfg->all_displays_in_sync;
+ adev->pm.pm_display_cfg.min_vblank_time =
+ pp_display_cfg->avail_mclk_switch_time_us;
+
+ adev->pm.pm_display_cfg.display_clk =
+ pp_display_cfg->disp_clk_khz/10;
+
+ adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+ adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+ adev->pm.pm_display_cfg.line_time_in_us =
+ pp_display_cfg->line_time_in_us;
+
+ adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+ adev->pm.pm_display_cfg.crossfire_display_index = -1;
+ adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+ /* TODO: complete implementation of
+ * amd_powerplay_display_configuration_change().
+ * Follow example of:
+ * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+ * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+ amd_powerplay_display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+
+ /* TODO: replace by a separate call to 'apply display cfg'? */
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+ return true;
+}
+
+bool dc_service_get_system_clocks_range(
+ const struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *sys_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+
+ /* Default values, in case PPLib is not compiled-in. */
+ sys_clks->mclk.max_khz = 800000;
+ sys_clks->mclk.min_khz = 800000;
+
+ sys_clks->sclk.max_khz = 600000;
+ sys_clks->sclk.min_khz = 300000;
+
+ if (adev->pm.dpm_enabled) {
+ sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
+ sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
+
+ sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
+ sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
+ }
+
+ return true;
+}
+
+static void get_default_clock_levels(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clks)
+{
+ uint32_t disp_clks_in_khz[6] = {
+ 300000, 400000, 496560, 626090, 685720, 757900 };
+ uint32_t sclks_in_khz[6] = {
+ 300000, 360000, 423530, 514290, 626090, 720000 };
+ uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+ switch (clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, disp_clks_in_khz,
+ sizeof(disp_clks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, sclks_in_khz,
+ sizeof(sclks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ clks->num_levels = 2;
+ memmove(clks->clocks_in_khz, mclks_in_khz,
+ sizeof(mclks_in_khz));
+ break;
+ default:
+ clks->num_levels = 0;
+ break;
+ }
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+ enum dm_pp_clock_type dm_pp_clk_type)
+{
+ enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+ switch (dm_pp_clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ amd_pp_clk_type = amd_pp_disp_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ amd_pp_clk_type = amd_pp_sys_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ amd_pp_clk_type = amd_pp_mem_clock;
+ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+ break;
+ }
+
+ return amd_pp_clk_type;
+}
+
+static void pp_to_dc_clock_levels(
+ const struct amd_pp_clocks *pp_clks,
+ struct dm_pp_clock_levels *dc_clks,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->count,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ dc_clks->num_levels = pp_clks->count;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+ /* translate 10kHz to kHz */
+ dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
+ }
+}
+
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *dc_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct amd_pp_clocks pp_clks = { 0 };
+ struct amd_pp_simple_clock_info validation_clks = { 0 };
+ uint32_t i;
+
+ if (amd_powerplay_get_clock_by_type(pp_handle,
+ dc_to_pp_clock_type(clk_type), &pp_clks)) {
+ /* Error in pplib. Provide default values. */
+ get_default_clock_levels(clk_type, dc_clks);
+ return true;
+ }
+
+ pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+ if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
+ &validation_clks)) {
+ /* Error in pplib. Provide default values. */
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
+
+ DRM_INFO("DM_PPLIB: Validation clocks:\n");
+ DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
+ validation_clks.engine_max_clock);
+ DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
+ validation_clks.memory_max_clock);
+ DRM_INFO("DM_PPLIB: level : %d\n",
+ validation_clks.level);
+
+ /* Translate 10 kHz to kHz. */
+ validation_clks.engine_max_clock *= 10;
+ validation_clks.memory_max_clock *= 10;
+
+ /* Determine the highest non-boosted level from the Validation Clocks */
+ if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+ /* This clock is higher the validation clock.
+ * Than means the previous one is the highest
+ * non-boosted one. */
+ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+ DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+void dm_pp_get_funcs_rv(
+ struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs)
+{}
+
+/**** end of power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
new file mode 100644
index 000000000000..4f83e3011743
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile for Display Core (dc) component.
+#
+
+DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
+
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+DC_LIBS += dcn10 dml
+endif
+
+DC_LIBS += dce120
+
+DC_LIBS += dce112
+DC_LIBS += dce110
+DC_LIBS += dce100
+DC_LIBS += dce80
+
+AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
+
+include $(AMD_DC)
+
+DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
+dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
+
+AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
+
+AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
+
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
+AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
new file mode 100644
index 000000000000..43c5ccdeeb72
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the 'utils' sub-component of DAL.
+# It provides the general basic services required by other DAL
+# subcomponents.
+
+BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
+ logger.o log_helpers.o vector.o
+
+AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BASICS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
new file mode 100644
index 000000000000..23c9a0ec0181
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#define DIVIDER 10000
+
+/* S2D13 value in [-3.00...0.9999] */
+#define S2D13_MIN (-3 * DIVIDER)
+#define S2D13_MAX (3 * DIVIDER)
+
+uint16_t fixed_point_to_int_frac(
+ struct fixed31_32 arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits)
+{
+ int32_t numerator;
+ int32_t divisor = 1 << fractional_bits;
+
+ uint16_t result;
+
+ uint16_t d = (uint16_t)dal_fixed31_32_floor(
+ dal_fixed31_32_abs(
+ arg));
+
+ if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
+ numerator = (uint16_t)dal_fixed31_32_floor(
+ dal_fixed31_32_mul_int(
+ arg,
+ divisor));
+ else {
+ numerator = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(
+ dal_fixed31_32_from_int(
+ 1LL << integer_bits),
+ dal_fixed31_32_recip(
+ dal_fixed31_32_from_int(
+ divisor))));
+ }
+
+ if (numerator >= 0)
+ result = (uint16_t)numerator;
+ else
+ result = (uint16_t)(
+ (1 << (integer_bits + fractional_bits + 1)) + numerator);
+
+ if ((result != 0) && dal_fixed31_32_lt(
+ arg, dal_fixed31_32_zero))
+ result |= 1 << (integer_bits + fractional_bits);
+
+ return result;
+}
+/**
+* convert_float_matrix
+* This converts a double into HW register spec defined format S2D13.
+* @param :
+* @return None
+*/
+void convert_float_matrix(
+ uint16_t *matrix,
+ struct fixed31_32 *flt,
+ uint32_t buffer_size)
+{
+ const struct fixed31_32 min_2_13 =
+ dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
+ const struct fixed31_32 max_2_13 =
+ dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
+ uint32_t i;
+
+ for (i = 0; i < buffer_size; ++i) {
+ uint32_t reg_value =
+ fixed_point_to_int_frac(
+ dal_fixed31_32_clamp(
+ flt[i],
+ min_2_13,
+ max_2_13),
+ 2,
+ 13);
+
+ matrix[i] = (uint16_t)reg_value;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
new file mode 100644
index 000000000000..ade785c4fdc7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_CONVERSION_H__
+#define __DAL_CONVERSION_H__
+
+#include "include/fixed31_32.h"
+
+uint16_t fixed_point_to_int_frac(
+ struct fixed31_32 arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits);
+
+void convert_float_matrix(
+ uint16_t *matrix,
+ struct fixed31_32 *flt,
+ uint32_t buffer_size);
+
+static inline unsigned int log_2(unsigned int num)
+{
+ return ilog2(num);
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
new file mode 100644
index 000000000000..26936892c6f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed31_32.h"
+
+static inline uint64_t abs_i64(
+ int64_t arg)
+{
+ if (arg > 0)
+ return (uint64_t)arg;
+ else
+ return (uint64_t)(-arg);
+}
+
+/*
+ * @brief
+ * result = dividend / divisor
+ * *remainder = dividend % divisor
+ */
+static inline uint64_t complete_integer_division_u64(
+ uint64_t dividend,
+ uint64_t divisor,
+ uint64_t *remainder)
+{
+ uint64_t result;
+
+ ASSERT(divisor);
+
+ result = div64_u64_rem(dividend, divisor, remainder);
+
+ return result;
+}
+
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+ ((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+struct fixed31_32 dal_fixed31_32_from_fraction(
+ int64_t numerator,
+ int64_t denominator)
+{
+ struct fixed31_32 res;
+
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+
+ uint64_t arg1_value = arg1_negative ? -numerator : numerator;
+ uint64_t arg2_value = arg2_negative ? -denominator : denominator;
+
+ uint64_t remainder;
+
+ /* determine integer part */
+
+ uint64_t res_value = complete_integer_division_u64(
+ arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= LONG_MAX);
+
+ /* determine fractional part */
+ {
+ uint32_t i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint64_t summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= LLONG_MAX - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (int64_t)res_value;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_from_int_nonconst(
+ int64_t arg)
+{
+ struct fixed31_32 res;
+
+ ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
+
+ res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_shl(
+ struct fixed31_32 arg,
+ uint8_t shift)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+ ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
+
+ res.value = arg.value << shift;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+ ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+ ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
+ uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
+
+ uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
+ uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
+
+ uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ uint64_t tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sqr(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 res;
+
+ uint64_t arg_value = abs_i64(arg.value);
+
+ uint64_t arg_int = GET_INTEGER_PART(arg_value);
+
+ uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
+
+ uint64_t tmp;
+
+ res.value = arg_int * arg_int;
+
+ ASSERT(res.value <= LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg_int * arg_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg_fra * arg_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_recip(
+ struct fixed31_32 arg)
+{
+ /*
+ * @note
+ * Good idea to use Newton's method
+ */
+
+ ASSERT(arg.value);
+
+ return dal_fixed31_32_from_fraction(
+ dal_fixed31_32_one.value,
+ arg.value);
+}
+
+struct fixed31_32 dal_fixed31_32_sinc(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 square;
+
+ struct fixed31_32 res = dal_fixed31_32_one;
+
+ int32_t n = 27;
+
+ struct fixed31_32 arg_norm = arg;
+
+ if (dal_fixed31_32_le(
+ dal_fixed31_32_two_pi,
+ dal_fixed31_32_abs(arg))) {
+ arg_norm = dal_fixed31_32_sub(
+ arg_norm,
+ dal_fixed31_32_mul_int(
+ dal_fixed31_32_two_pi,
+ (int32_t)div64_s64(
+ arg_norm.value,
+ dal_fixed31_32_two_pi.value)));
+ }
+
+ square = dal_fixed31_32_sqr(arg_norm);
+
+ do {
+ res = dal_fixed31_32_sub(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n > 2);
+
+ if (arg.value != arg_norm.value)
+ res = dal_fixed31_32_div(
+ dal_fixed31_32_mul(res, arg_norm),
+ arg);
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sin(
+ struct fixed31_32 arg)
+{
+ return dal_fixed31_32_mul(
+ arg,
+ dal_fixed31_32_sinc(arg));
+}
+
+struct fixed31_32 dal_fixed31_32_cos(
+ struct fixed31_32 arg)
+{
+ /* TODO implement argument normalization */
+
+ const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
+
+ struct fixed31_32 res = dal_fixed31_32_one;
+
+ int32_t n = 26;
+
+ do {
+ res = dal_fixed31_32_sub(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n != 0);
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = exp(arg),
+ * where abs(arg) < 1
+ *
+ * Calculated as Taylor series.
+ */
+static struct fixed31_32 fixed31_32_exp_from_taylor_series(
+ struct fixed31_32 arg)
+{
+ uint32_t n = 9;
+
+ struct fixed31_32 res = dal_fixed31_32_from_fraction(
+ n + 2,
+ n + 1);
+ /* TODO find correct res */
+
+ ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
+
+ do
+ res = dal_fixed31_32_add(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ arg,
+ res),
+ n));
+ while (--n != 1);
+
+ return dal_fixed31_32_add(
+ dal_fixed31_32_one,
+ dal_fixed31_32_mul(
+ arg,
+ res));
+}
+
+struct fixed31_32 dal_fixed31_32_exp(
+ struct fixed31_32 arg)
+{
+ /*
+ * @brief
+ * Main equation is:
+ * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
+ * where m = round(x / ln(2)), r = x - m * ln(2)
+ */
+
+ if (dal_fixed31_32_le(
+ dal_fixed31_32_ln2_div_2,
+ dal_fixed31_32_abs(arg))) {
+ int32_t m = dal_fixed31_32_round(
+ dal_fixed31_32_div(
+ arg,
+ dal_fixed31_32_ln2));
+
+ struct fixed31_32 r = dal_fixed31_32_sub(
+ arg,
+ dal_fixed31_32_mul_int(
+ dal_fixed31_32_ln2,
+ m));
+
+ ASSERT(m != 0);
+
+ ASSERT(dal_fixed31_32_lt(
+ dal_fixed31_32_abs(r),
+ dal_fixed31_32_one));
+
+ if (m > 0)
+ return dal_fixed31_32_shl(
+ fixed31_32_exp_from_taylor_series(r),
+ (uint8_t)m);
+ else
+ return dal_fixed31_32_div_int(
+ fixed31_32_exp_from_taylor_series(r),
+ 1LL << -m);
+ } else if (arg.value != 0)
+ return fixed31_32_exp_from_taylor_series(arg);
+ else
+ return dal_fixed31_32_one;
+}
+
+struct fixed31_32 dal_fixed31_32_log(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
+ /* TODO improve 1st estimation */
+
+ struct fixed31_32 error;
+
+ ASSERT(arg.value > 0);
+ /* TODO if arg is negative, return NaN */
+ /* TODO if arg is zero, return -INF */
+
+ do {
+ struct fixed31_32 res1 = dal_fixed31_32_add(
+ dal_fixed31_32_sub(
+ res,
+ dal_fixed31_32_one),
+ dal_fixed31_32_div(
+ arg,
+ dal_fixed31_32_exp(res)));
+
+ error = dal_fixed31_32_sub(
+ res,
+ res1);
+
+ res = res1;
+ /* TODO determine max_allowed_error based on quality of exp() */
+ } while (abs_i64(error.value) > 100ULL);
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return dal_fixed31_32_exp(
+ dal_fixed31_32_mul(
+ dal_fixed31_32_log(arg1),
+ arg2));
+}
+
+int32_t dal_fixed31_32_floor(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_round(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ const int64_t summand = dal_fixed31_32_half.value;
+
+ ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_ceil(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ const int64_t summand = dal_fixed31_32_one.value -
+ dal_fixed31_32_epsilon.value;
+
+ ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+/* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+ * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+static inline uint32_t ux_dy(
+ int64_t value,
+ uint32_t integer_bits,
+ uint32_t fractional_bits)
+{
+ /* 1. create mask of integer part */
+ uint32_t result = (1 << integer_bits) - 1;
+ /* 2. mask out fractional part */
+ uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
+ /* 3. shrink fixed point integer part to be of integer_bits width*/
+ result &= GET_INTEGER_PART(value);
+ /* 4. make space for fractional part to be filled in after integer */
+ result <<= fractional_bits;
+ /* 5. shrink fixed point fractional part to of fractional_bits width*/
+ fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits;
+ /* 6. merge the result */
+ return result | fractional_part;
+}
+
+uint32_t dal_fixed31_32_u2d19(
+ struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 2, 19);
+}
+
+uint32_t dal_fixed31_32_u0d19(
+ struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 0, 19);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
new file mode 100644
index 000000000000..4d3aaa82a07b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed32_32.h"
+
+static uint64_t u64_div(uint64_t n, uint64_t d)
+{
+ uint32_t i = 0;
+ uint64_t r;
+ uint64_t q = div64_u64_rem(n, d, &r);
+
+ for (i = 0; i < 32; ++i) {
+ uint64_t sbit = q & (1ULL<<63);
+
+ r <<= 1;
+ r |= sbit ? 1 : 0;
+ q <<= 1;
+ if (r >= d) {
+ r -= d;
+ q |= 1;
+ }
+ }
+
+ if (2*r >= d)
+ q += 1;
+ return q;
+}
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx = {lhs.value + rhs.value};
+
+ ASSERT(fx.value >= rhs.value);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
+
+ ASSERT(fx.value >= (uint64_t)rhs << 32);
+ return fx;
+
+}
+struct fixed32_32 dal_fixed32_32_sub(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+
+ ASSERT(lhs.value >= rhs.value);
+ fx.value = lhs.value - rhs.value;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+
+ ASSERT(lhs.value >= ((uint64_t)rhs<<32));
+ fx.value = lhs.value - ((uint64_t)rhs<<32);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_mul(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+ uint64_t lhs_int = lhs.value>>32;
+ uint64_t lhs_frac = (uint32_t)lhs.value;
+ uint64_t rhs_int = rhs.value>>32;
+ uint64_t rhs_frac = (uint32_t)rhs.value;
+ uint64_t ahbh = lhs_int * rhs_int;
+ uint64_t ahbl = lhs_int * rhs_frac;
+ uint64_t albh = lhs_frac * rhs_int;
+ uint64_t albl = lhs_frac * rhs_frac;
+
+ ASSERT((ahbh>>32) == 0);
+
+ fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
+ return fx;
+
+}
+
+struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+ uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
+ uint64_t lhsf;
+
+ ASSERT((lhsi>>32) == 0);
+ lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
+ ASSERT((lhsi<<32) + lhsf >= lhsf);
+ fx.value = (lhsi<<32) + lhsf;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div(lhs.value, rhs.value);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
+ return fx;
+}
+
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
+{
+ ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
+ return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
+}
+
+uint32_t dal_fixed32_32_round(struct fixed32_32 v)
+{
+ ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
+ return (v.value + (1ULL<<31))>>32;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c b/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
new file mode 100644
index 000000000000..147822545252
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/grph_object_id.h"
+
+static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
+{
+ bool rc = true;
+
+ switch (id.type) {
+ case OBJECT_TYPE_UNKNOWN:
+ rc = false;
+ break;
+ case OBJECT_TYPE_GPU:
+ case OBJECT_TYPE_ENGINE:
+ /* do NOT check for id.id == 0 */
+ if (id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ default:
+ if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+bool dal_graphics_object_id_is_equal(
+ struct graphics_object_id id1,
+ struct graphics_object_id id2)
+{
+ if (false == dal_graphics_object_id_is_valid(id1)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id1'!\n", __func__);
+ return false;
+ }
+
+ if (false == dal_graphics_object_id_is_valid(id2)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id2'!\n", __func__);
+ return false;
+ }
+
+ if (id1.id == id2.id && id1.enum_id == id2.enum_id
+ && id1.type == id2.type)
+ return true;
+
+ return false;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
new file mode 100644
index 000000000000..785b943b60ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "logger.h"
+#include "include/logger_interface.h"
+#include "dm_helpers.h"
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+struct dc_signal_type_info {
+ enum signal_type type;
+ char name[MAX_NAME_LEN];
+};
+
+static const struct dc_signal_type_info signal_type_info_tbl[] = {
+ {SIGNAL_TYPE_NONE, "NC"},
+ {SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
+ {SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
+ {SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
+ {SIGNAL_TYPE_LVDS, "LVDS"},
+ {SIGNAL_TYPE_RGB, "VGA"},
+ {SIGNAL_TYPE_DISPLAY_PORT, "DP"},
+ {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
+ {SIGNAL_TYPE_EDP, "eDP"},
+ {SIGNAL_TYPE_VIRTUAL, "Virtual"}
+};
+
+void dc_conn_log(struct dc_context *ctx,
+ const struct dc_link *link,
+ uint8_t *hex_data,
+ int hex_data_count,
+ enum dc_log_type event,
+ const char *msg,
+ ...)
+{
+ int i;
+ va_list args;
+ struct log_entry entry = { 0 };
+ enum signal_type signal;
+
+ if (link->local_sink)
+ signal = link->local_sink->sink_signal;
+ else
+ signal = link->connector_signal;
+
+ if (link->type == dc_connection_mst_branch)
+ signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+
+ dm_logger_open(ctx->logger, &entry, event);
+
+ for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
+ if (signal == signal_type_info_tbl[i].type)
+ break;
+
+ dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
+ signal_type_info_tbl[i].name,
+ link->link_index);
+
+ va_start(args, msg);
+ entry.buf_offset += dm_log_to_buffer(
+ &entry.buf[entry.buf_offset],
+ LOG_MAX_LINE_SIZE - entry.buf_offset,
+ msg, args);
+
+ if (entry.buf[strlen(entry.buf) - 1] == '\n') {
+ entry.buf[strlen(entry.buf) - 1] = '\0';
+ entry.buf_offset--;
+ }
+
+ if (hex_data)
+ for (i = 0; i < hex_data_count; i++)
+ dm_logger_append(&entry, "%2.2X ", hex_data[i]);
+
+ dm_logger_append(&entry, "^\n");
+ dm_helpers_dc_conn_log(ctx, &entry, event);
+ dm_logger_close(&entry);
+
+ va_end(args);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
new file mode 100644
index 000000000000..e04e8ecd4874
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "include/logger_interface.h"
+#include "logger.h"
+
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct dc_log_type_info log_type_info_tbl[] = {
+ {LOG_ERROR, "Error"},
+ {LOG_WARNING, "Warning"},
+ {LOG_DEBUG, "Debug"},
+ {LOG_DC, "DC_Interface"},
+ {LOG_SURFACE, "Surface"},
+ {LOG_HW_HOTPLUG, "HW_Hotplug"},
+ {LOG_HW_LINK_TRAINING, "HW_LKTN"},
+ {LOG_HW_SET_MODE, "HW_Mode"},
+ {LOG_HW_RESUME_S3, "HW_Resume"},
+ {LOG_HW_AUDIO, "HW_Audio"},
+ {LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
+ {LOG_MST, "MST"},
+ {LOG_SCALER, "Scaler"},
+ {LOG_BIOS, "BIOS"},
+ {LOG_BANDWIDTH_CALCS, "BWCalcs"},
+ {LOG_BANDWIDTH_VALIDATION, "BWValidation"},
+ {LOG_I2C_AUX, "I2C_AUX"},
+ {LOG_SYNC, "Sync"},
+ {LOG_BACKLIGHT, "Backlight"},
+ {LOG_FEATURE_OVERRIDE, "Override"},
+ {LOG_DETECTION_EDID_PARSER, "Edid"},
+ {LOG_DETECTION_DP_CAPS, "DP_Caps"},
+ {LOG_RESOURCE, "Resource"},
+ {LOG_DML, "DML"},
+ {LOG_EVENT_MODE_SET, "Mode"},
+ {LOG_EVENT_DETECTION, "Detect"},
+ {LOG_EVENT_LINK_TRAINING, "LKTN"},
+ {LOG_EVENT_LINK_LOSS, "LinkLoss"},
+ {LOG_EVENT_UNDERFLOW, "Underflow"},
+ {LOG_IF_TRACE, "InterfaceTrace"},
+ {LOG_DTN, "DTN"}
+};
+
+
+/* ----------- Object init and destruction ----------- */
+static bool construct(struct dc_context *ctx, struct dal_logger *logger,
+ uint32_t log_mask)
+{
+ /* malloc buffer and init offsets */
+ logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
+ logger->log_buffer = (char *)kzalloc(logger->log_buffer_size * sizeof(char),
+ GFP_KERNEL);
+
+ if (!logger->log_buffer)
+ return false;
+
+ /* Initialize both offsets to start of buffer (empty) */
+ logger->buffer_read_offset = 0;
+ logger->buffer_write_offset = 0;
+
+ logger->open_count = 0;
+
+ logger->flags.bits.ENABLE_CONSOLE = 1;
+ logger->flags.bits.ENABLE_BUFFER = 0;
+
+ logger->ctx = ctx;
+
+ logger->mask = log_mask;
+
+ return true;
+}
+
+static void destruct(struct dal_logger *logger)
+{
+ if (logger->log_buffer) {
+ kfree(logger->log_buffer);
+ logger->log_buffer = NULL;
+ }
+}
+
+struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
+{
+ /* malloc struct */
+ struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
+ GFP_KERNEL);
+
+ if (!logger)
+ return NULL;
+ if (!construct(ctx, logger, log_mask)) {
+ kfree(logger);
+ return NULL;
+ }
+
+ return logger;
+}
+
+uint32_t dal_logger_destroy(struct dal_logger **logger)
+{
+ if (logger == NULL || *logger == NULL)
+ return 1;
+ destruct(*logger);
+ kfree(*logger);
+ *logger = NULL;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+
+static bool dal_logger_should_log(
+ struct dal_logger *logger,
+ enum dc_log_type log_type)
+{
+ if (logger->mask & (1 << log_type))
+ return true;
+
+ return false;
+}
+
+static void log_to_debug_console(struct log_entry *entry)
+{
+ struct dal_logger *logger = entry->logger;
+
+ if (logger->flags.bits.ENABLE_CONSOLE == 0)
+ return;
+
+ if (entry->buf_offset) {
+ switch (entry->type) {
+ case LOG_ERROR:
+ dm_error("%s", entry->buf);
+ break;
+ default:
+ dm_output_to_console("%s", entry->buf);
+ break;
+ }
+ }
+}
+
+/* Print everything unread existing in log_buffer to debug console*/
+void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
+{
+ char *string_start = &logger->log_buffer[logger->buffer_read_offset];
+
+ if (should_warn)
+ dm_output_to_console(
+ "---------------- FLUSHING LOG BUFFER ----------------\n");
+ while (logger->buffer_read_offset < logger->buffer_write_offset) {
+
+ if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
+ dm_output_to_console("%s", string_start);
+ string_start = logger->log_buffer + logger->buffer_read_offset + 1;
+ }
+ logger->buffer_read_offset++;
+ }
+ if (should_warn)
+ dm_output_to_console(
+ "-------------- END FLUSHING LOG BUFFER --------------\n\n");
+}
+
+static void log_to_internal_buffer(struct log_entry *entry)
+{
+
+ uint32_t size = entry->buf_offset;
+ struct dal_logger *logger = entry->logger;
+
+ if (logger->flags.bits.ENABLE_BUFFER == 0)
+ return;
+
+ if (logger->log_buffer == NULL)
+ return;
+
+ if (size > 0 && size < logger->log_buffer_size) {
+
+ int buffer_space = logger->log_buffer_size -
+ logger->buffer_write_offset;
+
+ if (logger->buffer_write_offset == logger->buffer_read_offset) {
+ /* Buffer is empty, start writing at beginning */
+ buffer_space = logger->log_buffer_size;
+ logger->buffer_write_offset = 0;
+ logger->buffer_read_offset = 0;
+ }
+
+ if (buffer_space > size) {
+ /* No wrap around, copy 'size' bytes
+ * from 'entry->buf' to 'log_buffer'
+ */
+ memmove(logger->log_buffer +
+ logger->buffer_write_offset,
+ entry->buf, size);
+ logger->buffer_write_offset += size;
+
+ } else {
+ /* Not enough room remaining, we should flush
+ * existing logs */
+
+ /* Flush existing unread logs to console */
+ dm_logger_flush_buffer(logger, true);
+
+ /* Start writing to beginning of buffer */
+ memmove(logger->log_buffer, entry->buf, size);
+ logger->buffer_write_offset = size;
+ logger->buffer_read_offset = 0;
+ }
+
+ }
+}
+
+static void log_heading(struct log_entry *entry)
+{
+ int j;
+
+ for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
+
+ const struct dc_log_type_info *info = &log_type_info_tbl[j];
+
+ if (info->type == entry->type)
+ dm_logger_append(entry, "[%s]\t", info->name);
+ }
+}
+
+static void append_entry(
+ struct log_entry *entry,
+ char *buffer,
+ uint32_t buf_size)
+{
+ if (!entry->buf ||
+ entry->buf_offset + buf_size > entry->max_buf_bytes
+ ) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* Todo: check if off by 1 byte due to \0 anywhere */
+ memmove(entry->buf + entry->buf_offset, buffer, buf_size);
+ entry->buf_offset += buf_size;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Warning: Be careful that 'msg' is null terminated and the total size is
+ * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
+ */
+void dm_logger_write(
+ struct dal_logger *logger,
+ enum dc_log_type log_type,
+ const char *msg,
+ ...)
+{
+ if (logger && dal_logger_should_log(logger, log_type)) {
+ uint32_t size;
+ va_list args;
+ char buffer[LOG_MAX_LINE_SIZE];
+ struct log_entry entry;
+
+ va_start(args, msg);
+
+ entry.logger = logger;
+
+ entry.buf = buffer;
+
+ entry.buf_offset = 0;
+ entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+ entry.type = log_type;
+
+ log_heading(&entry);
+
+ size = dm_log_to_buffer(
+ buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
+
+ buffer[entry.buf_offset + size] = '\0';
+ entry.buf_offset += size + 1;
+
+ /* --Flush log_entry buffer-- */
+ /* print to kernel console */
+ log_to_debug_console(&entry);
+ /* log internally for dsat */
+ log_to_internal_buffer(&entry);
+
+ va_end(args);
+ }
+}
+
+/* Same as dm_logger_write, except without open() and close(), which must
+ * be done separately.
+ */
+void dm_logger_append(
+ struct log_entry *entry,
+ const char *msg,
+ ...)
+{
+ struct dal_logger *logger;
+
+ if (!entry) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ logger = entry->logger;
+
+ if (logger && logger->open_count > 0 &&
+ dal_logger_should_log(logger, entry->type)) {
+
+ uint32_t size;
+ va_list args;
+ char buffer[LOG_MAX_LINE_SIZE];
+
+ va_start(args, msg);
+
+ size = dm_log_to_buffer(
+ buffer, LOG_MAX_LINE_SIZE, msg, args);
+
+ if (size < LOG_MAX_LINE_SIZE - 1) {
+ append_entry(entry, buffer, size);
+ } else {
+ append_entry(entry, "LOG_ERROR, line too long\n", 27);
+ }
+
+ va_end(args);
+ }
+}
+
+void dm_logger_open(
+ struct dal_logger *logger,
+ struct log_entry *entry, /* out */
+ enum dc_log_type log_type)
+{
+ if (!entry) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ entry->type = log_type;
+ entry->logger = logger;
+
+ entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char),
+ GFP_KERNEL);
+
+ entry->buf_offset = 0;
+ entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+ logger->open_count++;
+
+ log_heading(entry);
+}
+
+void dm_logger_close(struct log_entry *entry)
+{
+ struct dal_logger *logger = entry->logger;
+
+ if (logger && logger->open_count > 0) {
+ logger->open_count--;
+ } else {
+ BREAK_TO_DEBUGGER();
+ goto cleanup;
+ }
+
+ /* --Flush log_entry buffer-- */
+ /* print to kernel console */
+ log_to_debug_console(entry);
+ /* log internally for dsat */
+ log_to_internal_buffer(entry);
+
+ /* TODO: Write end heading */
+
+cleanup:
+ if (entry->buf) {
+ kfree(entry->buf);
+ entry->buf = NULL;
+ entry->buf_offset = 0;
+ entry->max_buf_bytes = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.h b/drivers/gpu/drm/amd/display/dc/basics/logger.h
new file mode 100644
index 000000000000..09722f0f8aa3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_H__
+#define __DAL_LOGGER_H__
+
+
+#endif /* __DAL_LOGGER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
new file mode 100644
index 000000000000..217b8f1f7bf6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/vector.h"
+
+bool dal_vector_construct(
+ struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size)
+{
+ vector->container = NULL;
+
+ if (!struct_size || !capacity) {
+ /* Container must be non-zero size*/
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ vector->container = kzalloc(struct_size * capacity, GFP_KERNEL);
+ if (vector->container == NULL)
+ return false;
+ vector->capacity = capacity;
+ vector->struct_size = struct_size;
+ vector->count = 0;
+ vector->ctx = ctx;
+ return true;
+}
+
+bool dal_vector_presized_costruct(
+ struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t count,
+ void *initial_value,
+ uint32_t struct_size)
+{
+ uint32_t i;
+
+ vector->container = NULL;
+
+ if (!struct_size || !count) {
+ /* Container must be non-zero size*/
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ vector->container = kzalloc(struct_size * count, GFP_KERNEL);
+
+ if (vector->container == NULL)
+ return false;
+
+ /* If caller didn't supply initial value then the default
+ * of all zeros is expected, which is exactly what dal_alloc()
+ * initialises the memory to. */
+ if (NULL != initial_value) {
+ for (i = 0; i < count; ++i)
+ memmove(
+ vector->container + i * struct_size,
+ initial_value,
+ struct_size);
+ }
+
+ vector->capacity = count;
+ vector->struct_size = struct_size;
+ vector->count = count;
+ return true;
+}
+
+struct vector *dal_vector_presized_create(
+ struct dc_context *ctx,
+ uint32_t size,
+ void *initial_value,
+ uint32_t struct_size)
+{
+ struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL);
+
+ if (vector == NULL)
+ return NULL;
+
+ if (dal_vector_presized_costruct(
+ vector, ctx, size, initial_value, struct_size))
+ return vector;
+
+ BREAK_TO_DEBUGGER();
+ kfree(vector);
+ return NULL;
+}
+
+struct vector *dal_vector_create(
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size)
+{
+ struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL);
+
+ if (vector == NULL)
+ return NULL;
+
+ if (dal_vector_construct(vector, ctx, capacity, struct_size))
+ return vector;
+
+ BREAK_TO_DEBUGGER();
+ kfree(vector);
+ return NULL;
+}
+
+void dal_vector_destruct(
+ struct vector *vector)
+{
+ kfree(vector->container);
+ vector->count = 0;
+ vector->capacity = 0;
+}
+
+void dal_vector_destroy(
+ struct vector **vector)
+{
+ if (vector == NULL || *vector == NULL)
+ return;
+ dal_vector_destruct(*vector);
+ kfree(*vector);
+ *vector = NULL;
+}
+
+uint32_t dal_vector_get_count(
+ const struct vector *vector)
+{
+ return vector->count;
+}
+
+void *dal_vector_at_index(
+ const struct vector *vector,
+ uint32_t index)
+{
+ if (vector->container == NULL || index >= vector->count)
+ return NULL;
+ return vector->container + (index * vector->struct_size);
+}
+
+bool dal_vector_remove_at_index(
+ struct vector *vector,
+ uint32_t index)
+{
+ if (index >= vector->count)
+ return false;
+
+ if (index != vector->count - 1)
+ memmove(
+ vector->container + (index * vector->struct_size),
+ vector->container + ((index + 1) * vector->struct_size),
+ (vector->count - index - 1) * vector->struct_size);
+ vector->count -= 1;
+
+ return true;
+}
+
+void dal_vector_set_at_index(
+ const struct vector *vector,
+ const void *what,
+ uint32_t index)
+{
+ void *where = dal_vector_at_index(vector, index);
+
+ if (!where) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ memmove(
+ where,
+ what,
+ vector->struct_size);
+}
+
+static inline uint32_t calc_increased_capacity(
+ uint32_t old_capacity)
+{
+ return old_capacity * 2;
+}
+
+bool dal_vector_insert_at(
+ struct vector *vector,
+ const void *what,
+ uint32_t position)
+{
+ uint8_t *insert_address;
+
+ if (vector->count == vector->capacity) {
+ if (!dal_vector_reserve(
+ vector,
+ calc_increased_capacity(vector->capacity)))
+ return false;
+ }
+
+ insert_address = vector->container + (vector->struct_size * position);
+
+ if (vector->count && position < vector->count)
+ memmove(
+ insert_address + vector->struct_size,
+ insert_address,
+ vector->struct_size * (vector->count - position));
+
+ memmove(
+ insert_address,
+ what,
+ vector->struct_size);
+
+ vector->count++;
+
+ return true;
+}
+
+bool dal_vector_append(
+ struct vector *vector,
+ const void *item)
+{
+ return dal_vector_insert_at(vector, item, vector->count);
+}
+
+struct vector *dal_vector_clone(
+ const struct vector *vector)
+{
+ struct vector *vec_cloned;
+ uint32_t count;
+
+ /* create new vector */
+ count = dal_vector_get_count(vector);
+
+ if (count == 0)
+ /* when count is 0 we still want to create clone of the vector
+ */
+ vec_cloned = dal_vector_create(
+ vector->ctx,
+ vector->capacity,
+ vector->struct_size);
+ else
+ /* Call "presized create" version, independently of how the
+ * original vector was created.
+ * The owner of original vector must know how to treat the new
+ * vector - as "presized" or as "regular".
+ * But from vector point of view it doesn't matter. */
+ vec_cloned = dal_vector_presized_create(vector->ctx, count,
+ NULL,/* no initial value */
+ vector->struct_size);
+
+ if (NULL == vec_cloned) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ /* copy vector's data */
+ memmove(vec_cloned->container, vector->container,
+ vec_cloned->struct_size * vec_cloned->capacity);
+
+ return vec_cloned;
+}
+
+uint32_t dal_vector_capacity(const struct vector *vector)
+{
+ return vector->capacity;
+}
+
+bool dal_vector_reserve(struct vector *vector, uint32_t capacity)
+{
+ void *new_container;
+
+ if (capacity <= vector->capacity)
+ return true;
+
+ new_container = krealloc(vector->container,
+ capacity * vector->struct_size, GFP_KERNEL);
+
+ if (new_container) {
+ vector->container = new_container;
+ vector->capacity = capacity;
+ return true;
+ }
+
+ return false;
+}
+
+void dal_vector_clear(struct vector *vector)
+{
+ vector->count = 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/Makefile b/drivers/gpu/drm/amd/display/dc/bios/Makefile
new file mode 100644
index 000000000000..6ec815dce9cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/Makefile
@@ -0,0 +1,27 @@
+#
+# Makefile for the 'bios' sub-component of DAL.
+# It provides the parsing and executing controls for atom bios image.
+
+BIOS = bios_parser.o bios_parser_interface.o bios_parser_helper.o command_table.o command_table_helper.o bios_parser_common.o
+
+BIOS += command_table2.o command_table_helper2.o bios_parser2.o
+
+AMD_DAL_BIOS = $(addprefix $(AMDDALPATH)/dc/bios/,$(BIOS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BIOS)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+# All DCE8.x are derived from DCE8.0, so 8.0 MUST be defined if ANY of
+# DCE8.x is compiled.
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce80/command_table_helper_dce80.o
+
+###############################################################################
+# DCE 11x
+###############################################################################
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce110/command_table_helper_dce110.o
+
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper_dce112.o
+
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper2_dce112.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
new file mode 100644
index 000000000000..aaaebd06d7ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -0,0 +1,3871 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "dc_bios_types.h"
+#include "include/gpio_service_interface.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/bios_parser_interface.h"
+#include "include/i2caux_interface.h"
+#include "include/logger_interface.h"
+
+#include "command_table.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "bios_parser.h"
+#include "bios_parser_types_internal.h"
+#include "bios_parser_interface.h"
+
+#include "bios_parser_common.h"
+/* TODO remove - only needed for default i2c speed */
+#include "dc.h"
+
+#define THREE_PERCENT_OF_10000 300
+
+#define LAST_RECORD_TYPE 0xff
+
+/* GUID to validate external display connection info table (aka OPM module) */
+static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
+ 0x91, 0x6E, 0x57, 0x09,
+ 0x3F, 0x6D, 0xD2, 0x11,
+ 0x39, 0x8E, 0x00, 0xA0,
+ 0xC9, 0x69, 0x72, 0x3B};
+
+#define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table)
+
+static void get_atom_data_table_revision(
+ ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+ struct atom_data_revision *tbl_revision);
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+ ATOM_OBJECT *object);
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+ uint16_t **id_list);
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+ ATOM_OBJECT *object, uint16_t **id_list);
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+ struct graphics_object_id id);
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+ ATOM_I2C_RECORD *record,
+ struct graphics_object_i2c_info *info);
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+ ATOM_OBJECT *object);
+static struct device_id device_type_from_device_id(uint16_t device_id);
+static uint32_t signal_to_ss_id(enum as_signal_type signal);
+static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object);
+
+#define BIOS_IMAGE_SIZE_OFFSET 2
+#define BIOS_IMAGE_SIZE_UNIT 512
+
+/*****************************************************************************/
+static bool bios_parser_construct(
+ struct bios_parser *bp,
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+static uint8_t bios_parser_get_connectors_number(
+ struct dc_bios *dcb);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+ struct dc_bios *dcb,
+ struct embedded_panel_info *info);
+
+/*****************************************************************************/
+
+struct dc_bios *bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ struct bios_parser *bp = NULL;
+
+ bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
+ if (!bp)
+ return NULL;
+
+ if (bios_parser_construct(bp, init, dce_version))
+ return &bp->base;
+
+ kfree(bp);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static void destruct(struct bios_parser *bp)
+{
+ kfree(bp->base.bios_local_image);
+ kfree(bp->base.integrated_info);
+}
+
+static void bios_parser_destroy(struct dc_bios **dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(*dcb);
+
+ if (!bp) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ destruct(bp);
+
+ kfree(bp);
+ *dcb = NULL;
+}
+
+static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset)
+{
+ ATOM_OBJECT_TABLE *table;
+
+ uint32_t object_table_offset = bp->object_info_tbl_offset + offset;
+
+ table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset);
+
+ if (!table)
+ return 0;
+ else
+ return table->ucNumberOfObjects;
+}
+
+static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ return get_number_of_objects(bp,
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset));
+}
+
+static struct graphics_object_id bios_parser_get_encoder_id(
+ struct dc_bios *dcb,
+ uint32_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+ uint32_t encoder_table_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+
+ ATOM_OBJECT_TABLE *tbl =
+ GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+ if (tbl && tbl->ucNumberOfObjects > i) {
+ const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+ object_id = object_id_from_bios_object_id(id);
+ }
+
+ return object_id;
+}
+
+static struct graphics_object_id bios_parser_get_connector_id(
+ struct dc_bios *dcb,
+ uint8_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+ uint32_t connector_table_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+ ATOM_OBJECT_TABLE *tbl =
+ GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
+
+ if (tbl && tbl->ucNumberOfObjects > i) {
+ const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+ object_id = object_id_from_bios_object_id(id);
+ }
+
+ return object_id;
+}
+
+static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+ struct graphics_object_id id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object = get_bios_object(bp, id);
+
+ return get_dst_number_from_object(bp, object);
+}
+
+static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id)
+{
+ uint32_t number;
+ uint16_t *id;
+ ATOM_OBJECT *object;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!src_object_id)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ number = get_src_obj_list(bp, object, &id);
+
+ if (number <= index)
+ return BP_RESULT_BADINPUT;
+
+ *src_object_id = object_id_from_bios_object_id(id[index]);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *dest_object_id)
+{
+ uint32_t number;
+ uint16_t *id;
+ ATOM_OBJECT *object;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!dest_object_id)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ number = get_dest_obj_list(bp, object, &id);
+
+ if (number <= index)
+ return BP_RESULT_BADINPUT;
+
+ *dest_object_id = object_id_from_bios_object_id(id[index]);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info)
+{
+ uint32_t offset;
+ ATOM_OBJECT *object;
+ ATOM_COMMON_RECORD_HEADER *header;
+ ATOM_I2C_RECORD *record;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+ && sizeof(ATOM_I2C_RECORD) <= header->ucRecordSize) {
+ /* get the I2C info */
+ record = (ATOM_I2C_RECORD *) header;
+
+ if (get_gpio_i2c_info(bp, record, info) == BP_RESULT_OK)
+ return BP_RESULT_OK;
+ }
+
+ offset += header->ucRecordSize;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_voltage_ddc_info_v1(uint8_t *i2c_line,
+ ATOM_COMMON_TABLE_HEADER *header,
+ uint8_t *address)
+{
+ enum bp_result result = BP_RESULT_NORECORD;
+ ATOM_VOLTAGE_OBJECT_INFO *info =
+ (ATOM_VOLTAGE_OBJECT_INFO *) address;
+
+ uint8_t *voltage_current_object = (uint8_t *) &info->asVoltageObj[0];
+
+ while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+ ATOM_VOLTAGE_OBJECT *object =
+ (ATOM_VOLTAGE_OBJECT *) voltage_current_object;
+
+ if ((object->ucVoltageType == SET_VOLTAGE_INIT_MODE) &&
+ (object->ucVoltageType &
+ VOLTAGE_CONTROLLED_BY_I2C_MASK)) {
+
+ *i2c_line = object->asControl.ucVoltageControlI2cLine
+ ^ 0x90;
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ voltage_current_object += object->ucSize;
+ }
+ return result;
+}
+
+static enum bp_result get_voltage_ddc_info_v3(uint8_t *i2c_line,
+ uint32_t index,
+ ATOM_COMMON_TABLE_HEADER *header,
+ uint8_t *address)
+{
+ enum bp_result result = BP_RESULT_NORECORD;
+ ATOM_VOLTAGE_OBJECT_INFO_V3_1 *info =
+ (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *) address;
+
+ uint8_t *voltage_current_object =
+ (uint8_t *) (&(info->asVoltageObj[0]));
+
+ while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+ ATOM_I2C_VOLTAGE_OBJECT_V3 *object =
+ (ATOM_I2C_VOLTAGE_OBJECT_V3 *) voltage_current_object;
+
+ if (object->sHeader.ucVoltageMode ==
+ ATOM_INIT_VOLTAGE_REGULATOR) {
+ if (object->sHeader.ucVoltageType == index) {
+ *i2c_line = object->ucVoltageControlI2cLine
+ ^ 0x90;
+ result = BP_RESULT_OK;
+ break;
+ }
+ }
+
+ voltage_current_object += le16_to_cpu(object->sHeader.usSize);
+ }
+ return result;
+}
+
+static enum bp_result bios_parser_get_thermal_ddc_info(
+ struct dc_bios *dcb,
+ uint32_t i2c_channel_id,
+ struct graphics_object_i2c_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_I2C_ID_CONFIG_ACCESS *config;
+ ATOM_I2C_RECORD record;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_channel_id;
+
+ record.sucI2cId.bfHW_Capable = config->sbfAccess.bfHW_Capable;
+ record.sucI2cId.bfI2C_LineMux = config->sbfAccess.bfI2C_LineMux;
+ record.sucI2cId.bfHW_EngineID = config->sbfAccess.bfHW_EngineID;
+
+ return get_gpio_i2c_info(bp, &record, info);
+}
+
+static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+ uint32_t index,
+ struct graphics_object_i2c_info *info)
+{
+ uint8_t i2c_line = 0;
+ enum bp_result result = BP_RESULT_NORECORD;
+ uint8_t *voltage_info_address;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision = {0};
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!DATA_TABLES(VoltageObjectInfo))
+ return result;
+
+ voltage_info_address = bios_get_image(&bp->base, DATA_TABLES(VoltageObjectInfo), sizeof(ATOM_COMMON_TABLE_HEADER));
+
+ header = (ATOM_COMMON_TABLE_HEADER *) voltage_info_address;
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 1:
+ case 2:
+ result = get_voltage_ddc_info_v1(&i2c_line, header,
+ voltage_info_address);
+ break;
+ case 3:
+ if (revision.minor != 1)
+ break;
+ result = get_voltage_ddc_info_v3(&i2c_line, index, header,
+ voltage_info_address);
+ break;
+ }
+
+ if (result == BP_RESULT_OK)
+ result = bios_parser_get_thermal_ddc_info(dcb,
+ i2c_line, info);
+
+ return result;
+}
+
+/* TODO: temporary commented out to suppress 'defined but not used' warning */
+#if 0
+static enum bp_result bios_parser_get_ddc_info_for_i2c_line(
+ struct bios_parser *bp,
+ uint8_t i2c_line, struct graphics_object_i2c_info *info)
+{
+ uint32_t offset;
+ ATOM_OBJECT *object;
+ ATOM_OBJECT_TABLE *table;
+ uint32_t i;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+ offset += bp->object_info_tbl_offset;
+
+ table = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+
+ if (!table)
+ return BP_RESULT_BADBIOSTABLE;
+
+ for (i = 0; i < table->ucNumberOfObjects; i++) {
+ object = &table->asObjects[i];
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ ATOM_COMMON_RECORD_HEADER *header =
+ GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ offset += header->ucRecordSize;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+ && sizeof(ATOM_I2C_RECORD) <=
+ header->ucRecordSize) {
+ ATOM_I2C_RECORD *record =
+ (ATOM_I2C_RECORD *) header;
+
+ if (i2c_line != record->sucI2cId.bfI2C_LineMux)
+ continue;
+
+ /* get the I2C info */
+ if (get_gpio_i2c_info(bp, record, info) ==
+ BP_RESULT_OK)
+ return BP_RESULT_OK;
+ }
+ }
+ }
+
+ return BP_RESULT_NORECORD;
+}
+#endif
+
+static enum bp_result bios_parser_get_hpd_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object;
+ ATOM_HPD_INT_RECORD *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_hpd_record(bp, object);
+
+ if (record != NULL) {
+ info->hpd_int_gpio_uid = record->ucHPDIntGPIOID;
+ info->hpd_active = record->ucPlugged_PinState;
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_get_device_tag_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object,
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD **record)
+{
+ ATOM_COMMON_RECORD_HEADER *header;
+ uint32_t offset;
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ offset += header->ucRecordSize;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE !=
+ header->ucRecordType)
+ continue;
+
+ if (sizeof(ATOM_CONNECTOR_DEVICE_TAG) > header->ucRecordSize)
+ continue;
+
+ *record = (ATOM_CONNECTOR_DEVICE_TAG_RECORD *) header;
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_get_device_tag(
+ struct dc_bios *dcb,
+ struct graphics_object_id connector_object_id,
+ uint32_t device_tag_index,
+ struct connector_device_tag_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object;
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD *record = NULL;
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* getBiosObject will return MXM object */
+ object = get_bios_object(bp, connector_object_id);
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (bios_parser_get_device_tag_record(bp, object, &record)
+ != BP_RESULT_OK)
+ return BP_RESULT_NORECORD;
+
+ if (device_tag_index >= record->ucNumberOfDevice)
+ return BP_RESULT_NORECORD;
+
+ device_tag = &record->asDeviceTag[device_tag_index];
+
+ info->acpi_device = le32_to_cpu(device_tag->ulACPIDeviceEnum);
+ info->dev_id =
+ device_type_from_device_id(le16_to_cpu(device_tag->usDeviceID));
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v2_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v2_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
+static enum bp_result bios_parser_get_firmware_info(
+ struct dc_bios *dcb,
+ struct dc_firmware_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision;
+
+ if (info && DATA_TABLES(FirmwareInfo)) {
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(FirmwareInfo));
+ get_atom_data_table_revision(header, &revision);
+ switch (revision.major) {
+ case 1:
+ switch (revision.minor) {
+ case 4:
+ result = get_firmware_info_v1_4(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case 2:
+ switch (revision.minor) {
+ case 1:
+ result = get_firmware_info_v2_1(bp, info);
+ break;
+ case 2:
+ result = get_firmware_info_v2_2(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return result;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ ATOM_FIRMWARE_INFO_V1_4 *firmware_info =
+ GET_IMAGE(ATOM_FIRMWARE_INFO_V1_4,
+ DATA_TABLES(FirmwareInfo));
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. We need to convert from 10KHz units into
+ * KHz units */
+ info->pll_info.crystal_frequency =
+ le16_to_cpu(firmware_info->usReferenceClock) * 10;
+ info->pll_info.min_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+ info->pll_info.max_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+ info->pll_info.min_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+ info->pll_info.max_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+
+ if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+ /* Since there is no information on the SS, report conservative
+ * value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+ if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+ /* Since there is no information on the SS,report conservative
+ * value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info);
+
+static enum bp_result get_firmware_info_v2_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ ATOM_FIRMWARE_INFO_V2_1 *firmwareInfo =
+ GET_IMAGE(ATOM_FIRMWARE_INFO_V2_1, DATA_TABLES(FirmwareInfo));
+ struct spread_spectrum_info internalSS;
+ uint32_t index;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!firmwareInfo)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. We need to convert from 10KHz units into
+ * KHz units */
+ info->pll_info.crystal_frequency =
+ le16_to_cpu(firmwareInfo->usCoreReferenceClock) * 10;
+ info->pll_info.min_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmwareInfo->usMinPixelClockPLL_Input) * 10;
+ info->pll_info.max_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmwareInfo->usMaxPixelClockPLL_Input) * 10;
+ info->pll_info.min_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmwareInfo->ulMinPixelClockPLL_Output) * 10;
+ info->pll_info.max_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmwareInfo->ulMaxPixelClockPLL_Output) * 10;
+ info->default_display_engine_pll_frequency =
+ le32_to_cpu(firmwareInfo->ulDefaultDispEngineClkFreq) * 10;
+ info->external_clock_source_frequency_for_dp =
+ le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
+ info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+
+ /* There should be only one entry in the SS info table for Memory Clock
+ */
+ index = 0;
+ if (firmwareInfo->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_MEMORY_SS, index, &internalSS) == BP_RESULT_OK) {
+ if (internalSS.spread_spectrum_percentage) {
+ info->feature.memory_clk_ss_percentage =
+ internalSS.spread_spectrum_percentage;
+ if (internalSS.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.memory_clk_ss_percentage;
+ info->feature.memory_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ /* There should be only one entry in the SS info table for Engine Clock
+ */
+ index = 1;
+ if (firmwareInfo->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_ENGINE_SS, index, &internalSS) == BP_RESULT_OK) {
+ if (internalSS.spread_spectrum_percentage) {
+ info->feature.engine_clk_ss_percentage =
+ internalSS.spread_spectrum_percentage;
+ if (internalSS.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.engine_clk_ss_percentage;
+ info->feature.engine_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v2_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+ struct spread_spectrum_info internal_ss;
+ uint32_t index;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(ATOM_FIRMWARE_INFO_V2_2,
+ DATA_TABLES(FirmwareInfo));
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. We need to convert from 10KHz units into
+ * KHz units */
+ info->pll_info.crystal_frequency =
+ le16_to_cpu(firmware_info->usCoreReferenceClock) * 10;
+ info->pll_info.min_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+ info->pll_info.max_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+ info->pll_info.min_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+ info->pll_info.max_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+ info->default_display_engine_pll_frequency =
+ le32_to_cpu(firmware_info->ulDefaultDispEngineClkFreq) * 10;
+ info->external_clock_source_frequency_for_dp =
+ le16_to_cpu(firmware_info->usUniphyDPModeExtClkFreq) * 10;
+
+ /* There should be only one entry in the SS info table for Memory Clock
+ */
+ index = 0;
+ if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_MEMORY_SS, index, &internal_ss) == BP_RESULT_OK) {
+ if (internal_ss.spread_spectrum_percentage) {
+ info->feature.memory_clk_ss_percentage =
+ internal_ss.spread_spectrum_percentage;
+ if (internal_ss.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.memory_clk_ss_percentage;
+ info->feature.memory_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ /* There should be only one entry in the SS info table for Engine Clock
+ */
+ index = 1;
+ if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_ENGINE_SS, index, &internal_ss) == BP_RESULT_OK) {
+ if (internal_ss.spread_spectrum_percentage) {
+ info->feature.engine_clk_ss_percentage =
+ internal_ss.spread_spectrum_percentage;
+ if (internal_ss.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.engine_clk_ss_percentage;
+ info->feature.engine_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ /* Remote Display */
+ info->remote_display_config = firmware_info->ucRemoteDisplayConfig;
+
+ /* Is allowed minimum BL level */
+ info->min_allowed_bl_level = firmware_info->ucMinAllowedBL_Level;
+ /* Used starting from CI */
+ info->smu_gpu_pll_output_freq =
+ (uint32_t) (le32_to_cpu(firmware_info->ulGPUPLL_OutputFreq) * 10);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ ATOM_ASIC_INTERNAL_SS_INFO_V3 *ss_table_header_include;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+ uint32_t table_size;
+ uint32_t i;
+ uint32_t table_index = 0;
+
+ if (!ss_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return BP_RESULT_UNSUPPORTED;
+
+ ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ table_size =
+ (le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+ &ss_table_header_include->asSpreadSpectrum[0];
+
+ memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+ for (i = 0; i < table_size; i++) {
+ if (tbl[i].ucClockIndication != (uint8_t) id)
+ continue;
+
+ if (table_index != index) {
+ table_index++;
+ continue;
+ }
+ /* VBIOS introduced new defines for Version 3, same values as
+ * before, so now use these new ones for Version 3.
+ * Shouldn't affect field VBIOS's V3 as define values are still
+ * same.
+ * #define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01
+ * #define SS_MODE_V3_EXTERNAL_SS_MASK 0x02
+
+ * Old VBIOS defines:
+ * #define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
+ * #define ATOM_EXTERNAL_SS_MASK 0x00000002
+ */
+
+ if (SS_MODE_V3_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode)
+ ss_info->type.EXTERNAL = true;
+
+ if (SS_MODE_V3_CENTRE_SPREAD_MASK & tbl[i].ucSpreadSpectrumMode)
+ ss_info->type.CENTER_MODE = true;
+
+ /* Older VBIOS (in field) always provides SS percentage in 0.01%
+ * units set Divider to 100 */
+ ss_info->spread_percentage_divider = 100;
+
+ /* #define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10 */
+ if (SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK
+ & tbl[i].ucSpreadSpectrumMode)
+ ss_info->spread_percentage_divider = 1000;
+
+ ss_info->type.STEP_AND_DELAY_INFO = false;
+ /* convert [10KHz] into [KHz] */
+ ss_info->target_clock_range =
+ le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+ ss_info->spread_spectrum_percentage =
+ (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+ ss_info->spread_spectrum_range =
+ (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+
+ return BP_RESULT_OK;
+ }
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_transmitter_control(
+ struct dc_bios *dcb,
+ struct bp_transmitter_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.transmitter_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.transmitter_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_encoder_control(
+ struct dc_bios *dcb,
+ struct bp_encoder_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.dig_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dig_encoder_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_adjust_pixel_clock(
+ struct dc_bios *dcb,
+ struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.adjust_display_pll)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.adjust_display_pll(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_pixel_clock(
+ struct dc_bios *dcb,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_pixel_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_dce_clock(
+ struct dc_bios *dcb,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_dce_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_spread_spectrum_on_ppll(
+ struct dc_bios *dcb,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_spread_spectrum_on_ppll)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_spread_spectrum_on_ppll(
+ bp, bp_params, enable);
+
+}
+
+static enum bp_result bios_parser_program_crtc_timing(
+ struct dc_bios *dcb,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_crtc_timing)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
+}
+
+static enum bp_result bios_parser_program_display_engine_pll(
+ struct dc_bios *dcb,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.program_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.program_clock(bp, bp_params);
+
+}
+
+
+static enum bp_result bios_parser_enable_crtc(
+ struct dc_bios *dcb,
+ enum controller_id id,
+ bool enable)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_crtc)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_crtc(bp, id, enable);
+}
+
+static enum bp_result bios_parser_crtc_source_select(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_disp_power_gating(
+ struct dc_bios *dcb,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_disp_power_gating)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
+ action);
+}
+
+static bool bios_parser_is_device_id_supported(
+ struct dc_bios *dcb,
+ struct device_id id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ uint32_t mask = get_support_mask_for_device_id(id);
+
+ return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
+}
+
+static enum bp_result bios_parser_crt_control(
+ struct dc_bios *dcb,
+ enum engine_id engine_id,
+ bool enable,
+ uint32_t pixel_clock)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ uint8_t standard;
+
+ if (!bp->cmd_tbl.dac1_encoder_control &&
+ engine_id == ENGINE_ID_DACA)
+ return BP_RESULT_FAILURE;
+ if (!bp->cmd_tbl.dac2_encoder_control &&
+ engine_id == ENGINE_ID_DACB)
+ return BP_RESULT_FAILURE;
+ /* validate params */
+ switch (engine_id) {
+ case ENGINE_ID_DACA:
+ case ENGINE_ID_DACB:
+ break;
+ default:
+ /* unsupported engine */
+ return BP_RESULT_FAILURE;
+ }
+
+ standard = ATOM_DAC1_PS2; /* == ATOM_DAC2_PS2 */
+
+ if (enable) {
+ if (engine_id == ENGINE_ID_DACA) {
+ bp->cmd_tbl.dac1_encoder_control(bp, enable,
+ pixel_clock, standard);
+ if (bp->cmd_tbl.dac1_output_control != NULL)
+ bp->cmd_tbl.dac1_output_control(bp, enable);
+ } else {
+ bp->cmd_tbl.dac2_encoder_control(bp, enable,
+ pixel_clock, standard);
+ if (bp->cmd_tbl.dac2_output_control != NULL)
+ bp->cmd_tbl.dac2_output_control(bp, enable);
+ }
+ } else {
+ if (engine_id == ENGINE_ID_DACA) {
+ if (bp->cmd_tbl.dac1_output_control != NULL)
+ bp->cmd_tbl.dac1_output_control(bp, enable);
+ bp->cmd_tbl.dac1_encoder_control(bp, enable,
+ pixel_clock, standard);
+ } else {
+ if (bp->cmd_tbl.dac2_output_control != NULL)
+ bp->cmd_tbl.dac2_output_control(bp, enable);
+ bp->cmd_tbl.dac2_encoder_control(bp, enable,
+ pixel_clock, standard);
+ }
+ }
+
+ return BP_RESULT_OK;
+}
+
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ ATOM_COMMON_RECORD_HEADER *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_HPD_INT_RECORD_TYPE == header->ucRecordType
+ && sizeof(ATOM_HPD_INT_RECORD) <= header->ucRecordSize)
+ return (ATOM_HPD_INT_RECORD *) header;
+
+ offset += header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+/**
+ * Get I2C information of input object id
+ *
+ * search all records to find the ATOM_I2C_RECORD_TYPE record IR
+ */
+static ATOM_I2C_RECORD *get_i2c_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ ATOM_COMMON_RECORD_HEADER *record_header;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER();
+ /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!record_header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == record_header->ucRecordType ||
+ 0 == record_header->ucRecordSize)
+ break;
+
+ if (ATOM_I2C_RECORD_TYPE == record_header->ucRecordType &&
+ sizeof(ATOM_I2C_RECORD) <=
+ record_header->ucRecordSize) {
+ return (ATOM_I2C_RECORD *)record_header;
+ }
+
+ offset += record_header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+static enum bp_result get_ss_info_from_ss_info_table(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info);
+static enum bp_result get_ss_info_from_tbl(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info);
+/**
+ * bios_parser_get_spread_spectrum_info
+ * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
+ * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
+ * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
+ * there is only one entry for each signal /ss id. However, there is
+ * no planning of supporting multiple spread Sprectum entry for EverGreen
+ * @param [in] this
+ * @param [in] signal, ASSignalType to be converted to info index
+ * @param [in] index, number of entries that match the converted info index
+ * @param [out] ss_info, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result bios_parser_get_spread_spectrum_info(
+ struct dc_bios *dcb,
+ enum as_signal_type signal,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ uint32_t clk_id_ss = 0;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!ss_info) /* check for bad input */
+ return BP_RESULT_BADINPUT;
+ /* signal translation */
+ clk_id_ss = signal_to_ss_id(signal);
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ if (!index)
+ return get_ss_info_from_ss_info_table(bp, clk_id_ss,
+ ss_info);
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ get_atom_data_table_revision(header, &tbl_revision);
+
+ switch (tbl_revision.major) {
+ case 2:
+ switch (tbl_revision.minor) {
+ case 1:
+ /* there can not be more then one entry for Internal
+ * SS Info table version 2.1 */
+ if (!index)
+ return get_ss_info_from_tbl(bp, clk_id_ss,
+ ss_info);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case 3:
+ switch (tbl_revision.minor) {
+ case 1:
+ return get_ss_info_v3_1(bp, clk_id_ss, index, ss_info);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ /* there can not be more then one entry for SS Info table */
+ return result;
+}
+
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *info);
+
+/**
+ * get_ss_info_from_table
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param this
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_tbl(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info)
+{
+ if (!ss_info) /* check for bad input, if ss_info is not NULL */
+ return BP_RESULT_BADINPUT;
+ /* for SS_Info table only support DP and LVDS */
+ if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+ return get_ss_info_from_ss_info_table(bp, id, ss_info);
+ else
+ return get_ss_info_from_internal_ss_info_tbl_V2_1(bp, id,
+ ss_info);
+}
+
+/**
+ * get_ss_info_from_internal_ss_info_tbl_V2_1
+ * Get spread sprectrum information from the ASIC_InternalSS_Info table Ver 2.1
+ * from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *info)
+{
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ ATOM_ASIC_INTERNAL_SS_INFO_V2 *header;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+ uint32_t tbl_size, i;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return result;
+
+ header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+ DATA_TABLES(ASIC_InternalSS_Info));
+
+ memset(info, 0, sizeof(struct spread_spectrum_info));
+
+ tbl_size = (le16_to_cpu(header->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+ &(header->asSpreadSpectrum[0]);
+ for (i = 0; i < tbl_size; i++) {
+ result = BP_RESULT_NORECORD;
+
+ if (tbl[i].ucClockIndication != (uint8_t)id)
+ continue;
+
+ if (ATOM_EXTERNAL_SS_MASK
+ & tbl[i].ucSpreadSpectrumMode) {
+ info->type.EXTERNAL = true;
+ }
+ if (ATOM_SS_CENTRE_SPREAD_MODE_MASK
+ & tbl[i].ucSpreadSpectrumMode) {
+ info->type.CENTER_MODE = true;
+ }
+ info->type.STEP_AND_DELAY_INFO = false;
+ /* convert [10KHz] into [KHz] */
+ info->target_clock_range =
+ le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+ info->spread_spectrum_percentage =
+ (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+ info->spread_spectrum_range =
+ (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ return result;
+
+}
+
+/**
+ * get_ss_info_from_ss_info_table
+ * Get spread sprectrum information from the SS_Info table from the VBIOS
+ * if the pointer to info is NULL, indicate the caller what to know the number
+ * of entries that matches the id
+ * for, the SS_Info table, there should not be more than 1 entry match.
+ *
+ * @param [in] id, spread sprectrum id
+ * @param [out] pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_ss_info_table(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info)
+{
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ ATOM_SPREAD_SPECTRUM_INFO *tbl;
+ ATOM_COMMON_TABLE_HEADER *header;
+ uint32_t table_size;
+ uint32_t i;
+ uint32_t id_local = SS_ID_UNKNOWN;
+ struct atom_data_revision revision;
+
+ /* exist of the SS_Info table */
+ /* check for bad input, pSSinfo can not be NULL */
+ if (!DATA_TABLES(SS_Info) || !ss_info)
+ return result;
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(SS_Info));
+ get_atom_data_table_revision(header, &revision);
+
+ tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
+
+ if (1 != revision.major || 2 > revision.minor)
+ return result;
+
+ /* have to convert from Internal_SS format to SS_Info format */
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_DP:
+ id_local = SS_ID_DP1;
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ {
+ struct embedded_panel_info panel_info;
+
+ if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+ == BP_RESULT_OK)
+ id_local = panel_info.ss_id;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (id_local == SS_ID_UNKNOWN)
+ return result;
+
+ table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+ for (i = 0; i < table_size; i++) {
+ if (id_local != (uint32_t)tbl->asSS_Info[i].ucSS_Id)
+ continue;
+
+ memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+ if (ATOM_EXTERNAL_SS_MASK &
+ tbl->asSS_Info[i].ucSpreadSpectrumType)
+ ss_info->type.EXTERNAL = true;
+
+ if (ATOM_SS_CENTRE_SPREAD_MODE_MASK &
+ tbl->asSS_Info[i].ucSpreadSpectrumType)
+ ss_info->type.CENTER_MODE = true;
+
+ ss_info->type.STEP_AND_DELAY_INFO = true;
+ ss_info->spread_spectrum_percentage =
+ (uint32_t)le16_to_cpu(tbl->asSS_Info[i].usSpreadSpectrumPercentage);
+ ss_info->step_and_delay_info.step = tbl->asSS_Info[i].ucSS_Step;
+ ss_info->step_and_delay_info.delay =
+ tbl->asSS_Info[i].ucSS_Delay;
+ ss_info->step_and_delay_info.recommended_ref_div =
+ tbl->asSS_Info[i].ucRecommendedRef_Div;
+ ss_info->spread_spectrum_range =
+ (uint32_t)tbl->asSS_Info[i].ucSS_Range * 10000;
+
+ /* there will be only one entry for each display type in SS_info
+ * table */
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ return result;
+}
+static enum bp_result get_embedded_panel_info_v1_2(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info);
+static enum bp_result get_embedded_panel_info_v1_3(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+ struct dc_bios *dcb,
+ struct embedded_panel_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_COMMON_TABLE_HEADER *hdr;
+
+ if (!DATA_TABLES(LCD_Info))
+ return BP_RESULT_FAILURE;
+
+ hdr = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(LCD_Info));
+
+ if (!hdr)
+ return BP_RESULT_BADBIOSTABLE;
+
+ switch (hdr->ucTableFormatRevision) {
+ case 1:
+ switch (hdr->ucTableContentRevision) {
+ case 0:
+ case 1:
+ case 2:
+ return get_embedded_panel_info_v1_2(bp, info);
+ case 3:
+ return get_embedded_panel_info_v1_3(bp, info);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
+static enum bp_result get_embedded_panel_info_v1_2(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info)
+{
+ ATOM_LVDS_INFO_V12 *lvds;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(LVDS_Info))
+ return BP_RESULT_UNSUPPORTED;
+
+ lvds =
+ GET_IMAGE(ATOM_LVDS_INFO_V12, DATA_TABLES(LVDS_Info));
+
+ if (!lvds)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (1 != lvds->sHeader.ucTableFormatRevision
+ || 2 > lvds->sHeader.ucTableContentRevision)
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units*/
+ info->lcd_timing.pixel_clk =
+ le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+ /* usHActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.horizontal_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usHActive);
+ /* usHBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.horizontal_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+ /* usVActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.vertical_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usVActive);
+ /* usVBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.vertical_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+ info->lcd_timing.horizontal_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+ info->lcd_timing.horizontal_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+ info->lcd_timing.vertical_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+ info->lcd_timing.vertical_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+ info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+ info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+ info->lcd_timing.misc_info.H_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+ info->lcd_timing.misc_info.V_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+ info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+ info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+ info->lcd_timing.misc_info.COMPOSITE_SYNC =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+ info->lcd_timing.misc_info.INTERLACE =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+ info->lcd_timing.misc_info.DOUBLE_CLOCK =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+ info->ss_id = lvds->ucSS_Id;
+
+ {
+ uint8_t rr = le16_to_cpu(lvds->usSupportedRefreshRate);
+ /* Get minimum supported refresh rate*/
+ if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+ info->supported_rr.REFRESH_RATE_30HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+ info->supported_rr.REFRESH_RATE_40HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+ info->supported_rr.REFRESH_RATE_48HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+ info->supported_rr.REFRESH_RATE_50HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+ info->supported_rr.REFRESH_RATE_60HZ = 1;
+ }
+
+ /*Drr panel support can be reported by VBIOS*/
+ if (LCDPANEL_CAP_DRR_SUPPORTED
+ & lvds->ucLCDPanel_SpecialHandlingCap)
+ info->drr_enabled = 1;
+
+ if (ATOM_PANEL_MISC_DUAL & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+ if (ATOM_PANEL_MISC_888RGB & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.RGB888 = true;
+
+ info->lcd_timing.misc_info.GREY_LEVEL =
+ (uint32_t) (ATOM_PANEL_MISC_GREY_LEVEL &
+ lvds->ucLVDS_Misc) >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT;
+
+ if (ATOM_PANEL_MISC_SPATIAL & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.SPATIAL = true;
+
+ if (ATOM_PANEL_MISC_TEMPORAL & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.TEMPORAL = true;
+
+ if (ATOM_PANEL_MISC_API_ENABLED & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.API_ENABLED = true;
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_embedded_panel_info_v1_3(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info)
+{
+ ATOM_LCD_INFO_V13 *lvds;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(LCD_Info))
+ return BP_RESULT_UNSUPPORTED;
+
+ lvds = GET_IMAGE(ATOM_LCD_INFO_V13, DATA_TABLES(LCD_Info));
+
+ if (!lvds)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (!((1 == lvds->sHeader.ucTableFormatRevision)
+ && (3 <= lvds->sHeader.ucTableContentRevision)))
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units */
+ info->lcd_timing.pixel_clk =
+ le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+ /* usHActive does not include borders, according to VBIOS team */
+ info->lcd_timing.horizontal_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usHActive);
+ /* usHBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.horizontal_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+ /* usVActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.vertical_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usVActive);
+ /* usVBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.vertical_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+ info->lcd_timing.horizontal_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+ info->lcd_timing.horizontal_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+ info->lcd_timing.vertical_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+ info->lcd_timing.vertical_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+ info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+ info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+ info->lcd_timing.misc_info.H_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+ info->lcd_timing.misc_info.V_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+ info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+ info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+ info->lcd_timing.misc_info.COMPOSITE_SYNC =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+ info->lcd_timing.misc_info.INTERLACE =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+ info->lcd_timing.misc_info.DOUBLE_CLOCK =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+ info->ss_id = lvds->ucSS_Id;
+
+ /* Drr panel support can be reported by VBIOS*/
+ if (LCDPANEL_CAP_V13_DRR_SUPPORTED
+ & lvds->ucLCDPanel_SpecialHandlingCap)
+ info->drr_enabled = 1;
+
+ /* Get supported refresh rate*/
+ if (info->drr_enabled == 1) {
+ uint8_t min_rr =
+ lvds->sRefreshRateSupport.ucMinRefreshRateForDRR;
+ uint8_t rr = lvds->sRefreshRateSupport.ucSupportedRefreshRate;
+
+ if (min_rr != 0) {
+ if (SUPPORTED_LCD_REFRESHRATE_30Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_30HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_40Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_40HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_48Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_48HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_50Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_50HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_60Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_60HZ = 1;
+ } else {
+ if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+ info->supported_rr.REFRESH_RATE_30HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+ info->supported_rr.REFRESH_RATE_40HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+ info->supported_rr.REFRESH_RATE_48HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+ info->supported_rr.REFRESH_RATE_50HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+ info->supported_rr.REFRESH_RATE_60HZ = 1;
+ }
+ }
+
+ if (ATOM_PANEL_MISC_V13_DUAL & lvds->ucLCD_Misc)
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+ if (ATOM_PANEL_MISC_V13_8BIT_PER_COLOR & lvds->ucLCD_Misc)
+ info->lcd_timing.misc_info.RGB888 = true;
+
+ info->lcd_timing.misc_info.GREY_LEVEL =
+ (uint32_t) (ATOM_PANEL_MISC_V13_GREY_LEVEL &
+ lvds->ucLCD_Misc) >> ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT;
+
+ return BP_RESULT_OK;
+}
+
+/**
+ * bios_parser_get_encoder_cap_info
+ *
+ * @brief
+ * Get encoder capability information of input object id
+ *
+ * @param object_id, Object id
+ * @param object_id, encoder cap information structure
+ *
+ * @return Bios parser result code
+ *
+ */
+static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object;
+ ATOM_ENCODER_CAP_RECORD_V2 *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_encoder_cap_record(bp, object);
+ if (!record)
+ return BP_RESULT_NORECORD;
+
+ info->DP_HBR2_EN = record->usHBR2En;
+ info->DP_HBR3_EN = record->usHBR3En;
+ info->HDMI_6GB_EN = record->usHDMI6GEn;
+ return BP_RESULT_OK;
+}
+
+/**
+ * get_encoder_cap_record
+ *
+ * @brief
+ * Get encoder cap record for the object
+ *
+ * @param object, ATOM object
+ *
+ * @return atom encoder cap record
+ *
+ * @note
+ * search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
+ */
+static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ ATOM_COMMON_RECORD_HEADER *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ offset += header->ucRecordSize;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_ENCODER_CAP_RECORD_TYPE != header->ucRecordType)
+ continue;
+
+ if (sizeof(ATOM_ENCODER_CAP_RECORD_V2) <= header->ucRecordSize)
+ return (ATOM_ENCODER_CAP_RECORD_V2 *)header;
+ }
+
+ return NULL;
+}
+
+static uint32_t get_ss_entry_number(
+ struct bios_parser *bp,
+ uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+ struct bios_parser *bp,
+ uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ struct bios_parser *bp,
+ uint32_t id);
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+ struct bios_parser *bp,
+ uint32_t id);
+
+/**
+ * BiosParserObject::GetNumberofSpreadSpectrumEntry
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
+ * the VBIOS that match the SSid (to be converted from signal)
+ *
+ * @param[in] signal, ASSignalType to be converted to SSid
+ * @return number of SS Entry that match the signal
+ */
+static uint32_t bios_parser_get_ss_entry_number(
+ struct dc_bios *dcb,
+ enum as_signal_type signal)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ uint32_t ss_id = 0;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision;
+
+ ss_id = signal_to_ss_id(signal);
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return get_ss_entry_number_from_ss_info_tbl(bp, ss_id);
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 2:
+ switch (revision.minor) {
+ case 1:
+ return get_ss_entry_number(bp, ss_id);
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (revision.minor) {
+ case 1:
+ return
+ get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ bp, ss_id);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * get_ss_entry_number_from_ss_info_tbl
+ * Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
+ *
+ * @note There can only be one entry for each id for SS_Info Table
+ *
+ * @param [in] id, spread spectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+ struct bios_parser *bp,
+ uint32_t id)
+{
+ ATOM_SPREAD_SPECTRUM_INFO *tbl;
+ ATOM_COMMON_TABLE_HEADER *header;
+ uint32_t table_size;
+ uint32_t i;
+ uint32_t number = 0;
+ uint32_t id_local = SS_ID_UNKNOWN;
+ struct atom_data_revision revision;
+
+ /* SS_Info table exist */
+ if (!DATA_TABLES(SS_Info))
+ return number;
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(SS_Info));
+ get_atom_data_table_revision(header, &revision);
+
+ tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
+ DATA_TABLES(SS_Info));
+
+ if (1 != revision.major || 2 > revision.minor)
+ return number;
+
+ /* have to convert from Internal_SS format to SS_Info format */
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_DP:
+ id_local = SS_ID_DP1;
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS: {
+ struct embedded_panel_info panel_info;
+
+ if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+ == BP_RESULT_OK)
+ id_local = panel_info.ss_id;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (id_local == SS_ID_UNKNOWN)
+ return number;
+
+ table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+ for (i = 0; i < table_size; i++)
+ if (id_local == (uint32_t)tbl->asSS_Info[i].ucSS_Id) {
+ number = 1;
+ break;
+ }
+
+ return number;
+}
+
+/**
+ * get_ss_entry_number
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param id, spread sprectrum info index
+ * @return Bios parser result code
+ */
+static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
+{
+ if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+ return get_ss_entry_number_from_ss_info_tbl(bp, id);
+
+ return get_ss_entry_number_from_internal_ss_info_tbl_v2_1(bp, id);
+}
+
+/**
+ * get_ss_entry_number_from_internal_ss_info_tbl_v2_1
+ * Get NUmber of spread sprectrum entry from the ASIC_InternalSS_Info table
+ * Ver 2.1 from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+ struct bios_parser *bp,
+ uint32_t id)
+{
+ ATOM_ASIC_INTERNAL_SS_INFO_V2 *header_include;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+ uint32_t size;
+ uint32_t i;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return 0;
+
+ header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+ DATA_TABLES(ASIC_InternalSS_Info));
+
+ size = (le16_to_cpu(header_include->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+ &header_include->asSpreadSpectrum[0];
+ for (i = 0; i < size; i++)
+ if (tbl[i].ucClockIndication == (uint8_t)id)
+ return 1;
+
+ return 0;
+}
+/**
+ * get_ss_entry_number_from_internal_ss_info_table_V3_1
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
+ * the VBIOS that matches id
+ *
+ * @param[in] id, spread sprectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ struct bios_parser *bp,
+ uint32_t id)
+{
+ uint32_t number = 0;
+ ATOM_ASIC_INTERNAL_SS_INFO_V3 *header_include;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+ uint32_t size;
+ uint32_t i;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return number;
+
+ header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+ &header_include->asSpreadSpectrum[0];
+
+ for (i = 0; i < size; i++)
+ if (tbl[i].ucClockIndication == (uint8_t)id)
+ number++;
+
+ return number;
+}
+
+/**
+ * bios_parser_get_gpio_pin_info
+ * Get GpioPin information of input gpio id
+ *
+ * @param gpio_id, GPIO ID
+ * @param info, GpioPin information structure
+ * @return Bios parser result code
+ * @note
+ * to get the GPIO PIN INFO, we need:
+ * 1. get the GPIO_ID from other object table, see GetHPDInfo()
+ * 2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
+ * offset/mask
+ */
+static enum bp_result bios_parser_get_gpio_pin_info(
+ struct dc_bios *dcb,
+ uint32_t gpio_id,
+ struct gpio_pin_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_GPIO_PIN_LUT *header;
+ uint32_t count = 0;
+ uint32_t i = 0;
+
+ if (!DATA_TABLES(GPIO_Pin_LUT))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(ATOM_GPIO_PIN_LUT, DATA_TABLES(GPIO_Pin_LUT));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_PIN_LUT)
+ > le16_to_cpu(header->sHeader.usStructureSize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (1 != header->sHeader.ucTableContentRevision)
+ return BP_RESULT_UNSUPPORTED;
+
+ count = (le16_to_cpu(header->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+ for (i = 0; i < count; ++i) {
+ if (header->asGPIO_Pin[i].ucGPIO_ID != gpio_id)
+ continue;
+
+ info->offset =
+ (uint32_t) le16_to_cpu(header->asGPIO_Pin[i].usGpioPin_AIndex);
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask = (uint32_t) (1 <<
+ header->asGPIO_Pin[i].ucGpioPinBitShift);
+ info->mask_y = info->mask + 2;
+ info->mask_en = info->mask + 1;
+ info->mask_mask = info->mask - 1;
+
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+ ATOM_I2C_RECORD *record,
+ struct graphics_object_i2c_info *info)
+{
+ ATOM_GPIO_I2C_INFO *header;
+ uint32_t count = 0;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* get the GPIO_I2C info */
+ if (!DATA_TABLES(GPIO_I2C_Info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(ATOM_GPIO_I2C_INFO, DATA_TABLES(GPIO_I2C_Info));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_I2C_ASSIGMENT)
+ > le16_to_cpu(header->sHeader.usStructureSize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (1 != header->sHeader.ucTableContentRevision)
+ return BP_RESULT_UNSUPPORTED;
+
+ /* get data count */
+ count = (le16_to_cpu(header->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+ if (count < record->sucI2cId.bfI2C_LineMux)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* get the GPIO_I2C_INFO */
+ info->i2c_hw_assist = record->sucI2cId.bfHW_Capable;
+ info->i2c_line = record->sucI2cId.bfI2C_LineMux;
+ info->i2c_engine_id = record->sucI2cId.bfHW_EngineID;
+ info->i2c_slave_address = record->ucI2CAddr;
+
+ info->gpio_info.clk_mask_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkMaskRegisterIndex);
+ info->gpio_info.clk_en_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkEnRegisterIndex);
+ info->gpio_info.clk_y_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkY_RegisterIndex);
+ info->gpio_info.clk_a_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkA_RegisterIndex);
+ info->gpio_info.data_mask_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataMaskRegisterIndex);
+ info->gpio_info.data_en_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataEnRegisterIndex);
+ info->gpio_info.data_y_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataY_RegisterIndex);
+ info->gpio_info.data_a_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataA_RegisterIndex);
+
+ info->gpio_info.clk_mask_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkMaskShift;
+ info->gpio_info.clk_en_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkEnShift;
+ info->gpio_info.clk_y_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkY_Shift;
+ info->gpio_info.clk_a_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkA_Shift;
+ info->gpio_info.data_mask_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataMaskShift;
+ info->gpio_info.data_en_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataEnShift;
+ info->gpio_info.data_y_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataY_Shift;
+ info->gpio_info.data_a_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataA_Shift;
+
+ return BP_RESULT_OK;
+}
+
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+ struct graphics_object_id id)
+{
+ uint32_t offset;
+ ATOM_OBJECT_TABLE *tbl;
+ uint32_t i;
+
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+ break;
+
+ case OBJECT_TYPE_CONNECTOR:
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+ break;
+
+ case OBJECT_TYPE_ROUTER:
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usRouterObjectTableOffset);
+ break;
+
+ case OBJECT_TYPE_GENERIC:
+ if (bp->object_info_tbl.revision.minor < 3)
+ return NULL;
+ offset = le16_to_cpu(bp->object_info_tbl.v1_3->usMiscObjectTableOffset);
+ break;
+
+ default:
+ return NULL;
+ }
+
+ offset += bp->object_info_tbl_offset;
+
+ tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+ if (!tbl)
+ return NULL;
+
+ for (i = 0; i < tbl->ucNumberOfObjects; i++)
+ if (dal_graphics_object_id_is_equal(id,
+ object_id_from_bios_object_id(
+ le16_to_cpu(tbl->asObjects[i].usObjectID))))
+ return &tbl->asObjects[i];
+
+ return NULL;
+}
+
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+ ATOM_OBJECT *object, uint16_t **id_list)
+{
+ uint32_t offset;
+ uint8_t *number;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return 0;
+ }
+
+ offset = le16_to_cpu(object->usSrcDstTableOffset)
+ + bp->object_info_tbl_offset;
+
+ number = GET_IMAGE(uint8_t, offset);
+ if (!number)
+ return 0;
+
+ offset += sizeof(uint8_t);
+ offset += sizeof(uint16_t) * (*number);
+
+ number = GET_IMAGE(uint8_t, offset);
+ if ((!number) || (!*number))
+ return 0;
+
+ offset += sizeof(uint8_t);
+ *id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+ if (!*id_list)
+ return 0;
+
+ return *number;
+}
+
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+ uint16_t **id_list)
+{
+ uint32_t offset;
+ uint8_t *number;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return 0;
+ }
+
+ offset = le16_to_cpu(object->usSrcDstTableOffset)
+ + bp->object_info_tbl_offset;
+
+ number = GET_IMAGE(uint8_t, offset);
+ if (!number)
+ return 0;
+
+ offset += sizeof(uint8_t);
+ *id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+ if (!*id_list)
+ return 0;
+
+ return *number;
+}
+
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ uint8_t *number;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid encoder object id*/
+ return 0;
+ }
+
+ offset = le16_to_cpu(object->usSrcDstTableOffset)
+ + bp->object_info_tbl_offset;
+
+ number = GET_IMAGE(uint8_t, offset);
+ if (!number)
+ return 0;
+
+ offset += sizeof(uint8_t);
+ offset += sizeof(uint16_t) * (*number);
+
+ number = GET_IMAGE(uint8_t, offset);
+
+ if (!number)
+ return 0;
+
+ return *number;
+}
+
+static struct device_id device_type_from_device_id(uint16_t device_id)
+{
+
+ struct device_id result_device_id;
+
+ switch (device_id) {
+ case ATOM_DEVICE_LCD1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_LCD;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DEVICE_LCD2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_LCD;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DEVICE_CRT1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_CRT;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DEVICE_CRT2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_CRT;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DEVICE_DFP1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DEVICE_DFP2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DEVICE_DFP3_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 3;
+ break;
+
+ case ATOM_DEVICE_DFP4_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 4;
+ break;
+
+ case ATOM_DEVICE_DFP5_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 5;
+ break;
+
+ case ATOM_DEVICE_DFP6_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 6;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER(); /* Invalid device Id */
+ result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+ result_device_id.enum_id = 0;
+ }
+ return result_device_id;
+}
+
+static void get_atom_data_table_revision(
+ ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+ struct atom_data_revision *tbl_revision)
+{
+ if (!tbl_revision)
+ return;
+
+ /* initialize the revision to 0 which is invalid revision */
+ tbl_revision->major = 0;
+ tbl_revision->minor = 0;
+
+ if (!atom_data_tbl)
+ return;
+
+ tbl_revision->major =
+ (uint32_t) GET_DATA_TABLE_MAJOR_REVISION(atom_data_tbl);
+ tbl_revision->minor =
+ (uint32_t) GET_DATA_TABLE_MINOR_REVISION(atom_data_tbl);
+}
+
+static uint32_t signal_to_ss_id(enum as_signal_type signal)
+{
+ uint32_t clk_id_ss = 0;
+
+ switch (signal) {
+ case AS_SIGNAL_TYPE_DVI:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_TMDS;
+ break;
+ case AS_SIGNAL_TYPE_HDMI:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_HDMI;
+ break;
+ case AS_SIGNAL_TYPE_LVDS:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_LVDS;
+ break;
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_DP;
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+ clk_id_ss = ASIC_INTERNAL_GPUPLL_SS;
+ break;
+ default:
+ break;
+ }
+ return clk_id_ss;
+}
+
+static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+{
+ enum dal_device_type device_type = device_id.device_type;
+ uint32_t enum_id = device_id.enum_id;
+
+ switch (device_type) {
+ case DEVICE_TYPE_LCD:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_LCD1_SUPPORT;
+ case 2:
+ return ATOM_DEVICE_LCD2_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_CRT:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_CRT1_SUPPORT;
+ case 2:
+ return ATOM_DEVICE_CRT2_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_DFP:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_DFP1_SUPPORT;
+ case 2:
+ return ATOM_DEVICE_DFP2_SUPPORT;
+ case 3:
+ return ATOM_DEVICE_DFP3_SUPPORT;
+ case 4:
+ return ATOM_DEVICE_DFP4_SUPPORT;
+ case 5:
+ return ATOM_DEVICE_DFP5_SUPPORT;
+ case 6:
+ return ATOM_DEVICE_DFP6_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_CV:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_CV_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_TV:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_TV1_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ };
+
+ /* Unidentified device ID, return empty support mask. */
+ return 0;
+}
+
+/**
+ * HwContext interface for writing MM registers
+ */
+
+static bool i2c_read(
+ struct bios_parser *bp,
+ struct graphics_object_i2c_info *i2c_info,
+ uint8_t *buffer,
+ uint32_t length)
+{
+ struct ddc *ddc;
+ uint8_t offset[2] = { 0, 0 };
+ bool result = false;
+ struct i2c_command cmd;
+ struct gpio_ddc_hw_info hw_info = {
+ i2c_info->i2c_hw_assist,
+ i2c_info->i2c_line };
+
+ ddc = dal_gpio_create_ddc(bp->base.ctx->gpio_service,
+ i2c_info->gpio_info.clk_a_register_index,
+ (1 << i2c_info->gpio_info.clk_a_shift), &hw_info);
+
+ if (!ddc)
+ return result;
+
+ /*Using SW engine */
+ cmd.engine = I2C_COMMAND_ENGINE_SW;
+ cmd.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+ {
+ struct i2c_payload payloads[] = {
+ {
+ .address = i2c_info->i2c_slave_address >> 1,
+ .data = offset,
+ .length = sizeof(offset),
+ .write = true
+ },
+ {
+ .address = i2c_info->i2c_slave_address >> 1,
+ .data = buffer,
+ .length = length,
+ .write = false
+ }
+ };
+
+ cmd.payloads = payloads;
+ cmd.number_of_payloads = ARRAY_SIZE(payloads);
+
+ /* TODO route this through drm i2c_adapter */
+ result = dal_i2caux_submit_i2c_command(
+ ddc->ctx->i2caux,
+ ddc,
+ &cmd);
+ }
+
+ dal_gpio_destroy_ddc(&ddc);
+
+ return result;
+}
+
+/**
+ * Read external display connection info table through i2c.
+ * validate the GUID and checksum.
+ *
+ * @return enum bp_result whether all data was sucessfully read
+ */
+static enum bp_result get_ext_display_connection_info(
+ struct bios_parser *bp,
+ ATOM_OBJECT *opm_object,
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *ext_display_connection_info_tbl)
+{
+ bool config_tbl_present = false;
+ ATOM_I2C_RECORD *i2c_record = NULL;
+ uint32_t i = 0;
+
+ if (opm_object == NULL)
+ return BP_RESULT_BADINPUT;
+
+ i2c_record = get_i2c_record(bp, opm_object);
+
+ if (i2c_record != NULL) {
+ ATOM_GPIO_I2C_INFO *gpio_i2c_header;
+ struct graphics_object_i2c_info i2c_info;
+
+ gpio_i2c_header = GET_IMAGE(ATOM_GPIO_I2C_INFO,
+ bp->master_data_tbl->ListOfDataTables.GPIO_I2C_Info);
+
+ if (NULL == gpio_i2c_header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (get_gpio_i2c_info(bp, i2c_record, &i2c_info) !=
+ BP_RESULT_OK)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (i2c_read(bp,
+ &i2c_info,
+ (uint8_t *)ext_display_connection_info_tbl,
+ sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO))) {
+ config_tbl_present = true;
+ }
+ }
+
+ /* Validate GUID */
+ if (config_tbl_present)
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; i++) {
+ if (ext_display_connection_info_tbl->ucGuid[i]
+ != ext_display_connection_guid[i]) {
+ config_tbl_present = false;
+ break;
+ }
+ }
+
+ /* Validate checksum */
+ if (config_tbl_present) {
+ uint8_t check_sum = 0;
+ uint8_t *buf =
+ (uint8_t *)ext_display_connection_info_tbl;
+
+ for (i = 0; i < sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO);
+ i++) {
+ check_sum += buf[i];
+ }
+
+ if (check_sum != 0)
+ config_tbl_present = false;
+ }
+
+ if (config_tbl_present)
+ return BP_RESULT_OK;
+ else
+ return BP_RESULT_FAILURE;
+}
+
+/*
+ * Gets the first device ID in the same group as the given ID for enumerating.
+ * For instance, if any DFP device ID is passed, returns the device ID for DFP1.
+ *
+ * The first device ID in the same group as the passed device ID, or 0 if no
+ * matching device group found.
+ */
+static uint32_t enum_first_device_id(uint32_t dev_id)
+{
+ /* Return the first in the group that this ID belongs to. */
+ if (dev_id & ATOM_DEVICE_CRT_SUPPORT)
+ return ATOM_DEVICE_CRT1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_DFP_SUPPORT)
+ return ATOM_DEVICE_DFP1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_LCD_SUPPORT)
+ return ATOM_DEVICE_LCD1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_TV_SUPPORT)
+ return ATOM_DEVICE_TV1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_CV_SUPPORT)
+ return ATOM_DEVICE_CV_SUPPORT;
+
+ /* No group found for this device ID. */
+
+ dm_error("%s: incorrect input %d\n", __func__, dev_id);
+ /* No matching support flag for given device ID */
+ return 0;
+}
+
+/*
+ * Gets the next device ID in the group for a given device ID.
+ *
+ * The current device ID being enumerated on.
+ *
+ * The next device ID in the group, or 0 if no device exists.
+ */
+static uint32_t enum_next_dev_id(uint32_t dev_id)
+{
+ /* Get next device ID in the group. */
+ switch (dev_id) {
+ case ATOM_DEVICE_CRT1_SUPPORT:
+ return ATOM_DEVICE_CRT2_SUPPORT;
+ case ATOM_DEVICE_LCD1_SUPPORT:
+ return ATOM_DEVICE_LCD2_SUPPORT;
+ case ATOM_DEVICE_DFP1_SUPPORT:
+ return ATOM_DEVICE_DFP2_SUPPORT;
+ case ATOM_DEVICE_DFP2_SUPPORT:
+ return ATOM_DEVICE_DFP3_SUPPORT;
+ case ATOM_DEVICE_DFP3_SUPPORT:
+ return ATOM_DEVICE_DFP4_SUPPORT;
+ case ATOM_DEVICE_DFP4_SUPPORT:
+ return ATOM_DEVICE_DFP5_SUPPORT;
+ case ATOM_DEVICE_DFP5_SUPPORT:
+ return ATOM_DEVICE_DFP6_SUPPORT;
+ }
+
+ /* Done enumerating through devices. */
+ return 0;
+}
+
+/*
+ * Returns the new device tag record for patched BIOS object.
+ *
+ * [IN] pExtDisplayPath - External display path to copy device tag from.
+ * [IN] deviceSupport - Bit vector for device ID support flags.
+ * [OUT] pDeviceTag - Device tag structure to fill with patched data.
+ *
+ * True if a compatible device ID was found, false otherwise.
+ */
+static bool get_patched_device_tag(
+ struct bios_parser *bp,
+ EXT_DISPLAY_PATH *ext_display_path,
+ uint32_t device_support,
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag)
+{
+ uint32_t dev_id;
+ /* Use fallback behaviour if not supported. */
+ if (!bp->remap_device_tags) {
+ device_tag->ulACPIDeviceEnum =
+ cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+ device_tag->usDeviceID =
+ cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceTag));
+ return true;
+ }
+
+ /* Find the first unused in the same group. */
+ dev_id = enum_first_device_id(le16_to_cpu(ext_display_path->usDeviceTag));
+ while (dev_id != 0) {
+ /* Assign this device ID if supported. */
+ if ((device_support & dev_id) != 0) {
+ device_tag->ulACPIDeviceEnum =
+ cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+ device_tag->usDeviceID = cpu_to_le16((USHORT) dev_id);
+ return true;
+ }
+
+ dev_id = enum_next_dev_id(dev_id);
+ }
+
+ /* No compatible device ID found. */
+ return false;
+}
+
+/*
+ * Adds a device tag to a BIOS object's device tag record if there is
+ * matching device ID supported.
+ *
+ * pObject - Pointer to the BIOS object to add the device tag to.
+ * pExtDisplayPath - Display path to retrieve base device ID from.
+ * pDeviceSupport - Pointer to bit vector for supported device IDs.
+ */
+static void add_device_tag_from_ext_display_path(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object,
+ EXT_DISPLAY_PATH *ext_display_path,
+ uint32_t *device_support)
+{
+ /* Get device tag record for object. */
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag = NULL;
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD *device_tag_record = NULL;
+ enum bp_result result =
+ bios_parser_get_device_tag_record(
+ bp, object, &device_tag_record);
+
+ if ((le16_to_cpu(ext_display_path->usDeviceTag) != CONNECTOR_OBJECT_ID_NONE)
+ && (result == BP_RESULT_OK)) {
+ uint8_t index;
+
+ if ((device_tag_record->ucNumberOfDevice == 1) &&
+ (le16_to_cpu(device_tag_record->asDeviceTag[0].usDeviceID) == 0)) {
+ /*Workaround bug in current VBIOS releases where
+ * ucNumberOfDevice = 1 but there is no actual device
+ * tag data. This w/a is temporary until the updated
+ * VBIOS is distributed. */
+ device_tag_record->ucNumberOfDevice =
+ device_tag_record->ucNumberOfDevice - 1;
+ }
+
+ /* Attempt to find a matching device ID. */
+ index = device_tag_record->ucNumberOfDevice;
+ device_tag = &device_tag_record->asDeviceTag[index];
+ if (get_patched_device_tag(
+ bp,
+ ext_display_path,
+ *device_support,
+ device_tag)) {
+ /* Update cached device support to remove assigned ID.
+ */
+ *device_support &= ~le16_to_cpu(device_tag->usDeviceID);
+ device_tag_record->ucNumberOfDevice++;
+ }
+ }
+}
+
+/*
+ * Read out a single EXT_DISPLAY_PATH from the external display connection info
+ * table. The specific entry in the table is determined by the enum_id passed
+ * in.
+ *
+ * EXT_DISPLAY_PATH describing a single Configuration table entry
+ */
+
+#define INVALID_CONNECTOR 0xffff
+
+static EXT_DISPLAY_PATH *get_ext_display_path_entry(
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *config_table,
+ uint32_t bios_object_id)
+{
+ EXT_DISPLAY_PATH *ext_display_path;
+ uint32_t ext_display_path_index =
+ ((bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT) - 1;
+
+ if (ext_display_path_index >= MAX_NUMBER_OF_EXT_DISPLAY_PATH)
+ return NULL;
+
+ ext_display_path = &config_table->sPath[ext_display_path_index];
+
+ if (le16_to_cpu(ext_display_path->usDeviceConnector) == INVALID_CONNECTOR)
+ ext_display_path->usDeviceConnector = cpu_to_le16(0);
+
+ return ext_display_path;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_AUXDDC_LUT_RECORD *get_ext_connector_aux_ddc_lut_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ ATOM_COMMON_RECORD_HEADER *header;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER();
+ /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ 0 == header->ucRecordSize)
+ break;
+
+ if (ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE ==
+ header->ucRecordType &&
+ sizeof(ATOM_CONNECTOR_AUXDDC_LUT_RECORD) <=
+ header->ucRecordSize)
+ return (ATOM_CONNECTOR_AUXDDC_LUT_RECORD *)(header);
+
+ offset += header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_HPDPIN_LUT_RECORD *get_ext_connector_hpd_pin_lut_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ ATOM_COMMON_RECORD_HEADER *header;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER();
+ /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ 0 == header->ucRecordSize)
+ break;
+
+ if (ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE ==
+ header->ucRecordType &&
+ sizeof(ATOM_CONNECTOR_HPDPIN_LUT_RECORD) <=
+ header->ucRecordSize)
+ return (ATOM_CONNECTOR_HPDPIN_LUT_RECORD *)header;
+
+ offset += header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table. This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module). With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+static enum bp_result patch_bios_image_from_ext_display_connection_info(
+ struct bios_parser *bp)
+{
+ ATOM_OBJECT_TABLE *connector_tbl;
+ uint32_t connector_tbl_offset;
+ struct graphics_object_id object_id;
+ ATOM_OBJECT *object;
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO ext_display_connection_info_tbl;
+ EXT_DISPLAY_PATH *ext_display_path;
+ ATOM_CONNECTOR_AUXDDC_LUT_RECORD *aux_ddc_lut_record = NULL;
+ ATOM_I2C_RECORD *i2c_record = NULL;
+ ATOM_CONNECTOR_HPDPIN_LUT_RECORD *hpd_pin_lut_record = NULL;
+ ATOM_HPD_INT_RECORD *hpd_record = NULL;
+ ATOM_OBJECT_TABLE *encoder_table;
+ uint32_t encoder_table_offset;
+ ATOM_OBJECT *opm_object = NULL;
+ uint32_t i = 0;
+ struct graphics_object_id opm_object_id =
+ dal_graphics_object_id_init(
+ GENERIC_ID_MXM_OPM,
+ ENUM_ID_1,
+ OBJECT_TYPE_GENERIC);
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD *dev_tag_record;
+ uint32_t cached_device_support =
+ le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport);
+
+ uint32_t dst_number;
+ uint16_t *dst_object_id_list;
+
+ opm_object = get_bios_object(bp, opm_object_id);
+ if (!opm_object)
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(&ext_display_connection_info_tbl, 0,
+ sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO));
+
+ connector_tbl_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+ connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+ /* Read Connector info table from EEPROM through i2c */
+ if (get_ext_display_connection_info(bp,
+ opm_object,
+ &ext_display_connection_info_tbl) != BP_RESULT_OK) {
+
+ dm_logger_write(bp->base.ctx->logger, LOG_WARNING,
+ "%s: Failed to read Connection Info Table", __func__);
+ return BP_RESULT_UNSUPPORTED;
+ }
+
+ /* Get pointer to AUX/DDC and HPD LUTs */
+ aux_ddc_lut_record =
+ get_ext_connector_aux_ddc_lut_record(bp, opm_object);
+ hpd_pin_lut_record =
+ get_ext_connector_hpd_pin_lut_record(bp, opm_object);
+
+ if ((aux_ddc_lut_record == NULL) || (hpd_pin_lut_record == NULL))
+ return BP_RESULT_UNSUPPORTED;
+
+ /* Cache support bits for currently unmapped device types. */
+ if (bp->remap_device_tags) {
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; ++i) {
+ uint32_t j;
+ /* Remove support for all non-MXM connectors. */
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(
+ le16_to_cpu(object->usObjectID));
+ if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+ (CONNECTOR_ID_MXM == object_id.id))
+ continue;
+
+ /* Remove support for all device tags. */
+ if (bios_parser_get_device_tag_record(
+ bp, object, &dev_tag_record) != BP_RESULT_OK)
+ continue;
+
+ for (j = 0; j < dev_tag_record->ucNumberOfDevice; ++j) {
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag =
+ &dev_tag_record->asDeviceTag[j];
+ cached_device_support &=
+ ~le16_to_cpu(device_tag->usDeviceID);
+ }
+ }
+ }
+
+ /* Find all MXM connector objects and patch them with connector info
+ * from the external display connection info table. */
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+ uint32_t j;
+
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+ if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+ (CONNECTOR_ID_MXM != object_id.id))
+ continue;
+
+ /* Get the correct connection info table entry based on the enum
+ * id. */
+ ext_display_path = get_ext_display_path_entry(
+ &ext_display_connection_info_tbl,
+ le16_to_cpu(object->usObjectID));
+ if (!ext_display_path)
+ return BP_RESULT_FAILURE;
+
+ /* Patch device connector ID */
+ object->usObjectID =
+ cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceConnector));
+
+ /* Patch device tag, ulACPIDeviceEnum. */
+ add_device_tag_from_ext_display_path(
+ bp,
+ object,
+ ext_display_path,
+ &cached_device_support);
+
+ /* Patch HPD info */
+ if (ext_display_path->ucExtHPDPINLutIndex <
+ MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES) {
+ hpd_record = get_hpd_record(bp, object);
+ if (hpd_record) {
+ uint8_t index =
+ ext_display_path->ucExtHPDPINLutIndex;
+ hpd_record->ucHPDIntGPIOID =
+ hpd_pin_lut_record->ucHPDPINMap[index];
+ } else {
+ BREAK_TO_DEBUGGER();
+ /* Invalid hpd record */
+ return BP_RESULT_FAILURE;
+ }
+ }
+
+ /* Patch I2C/AUX info */
+ if (ext_display_path->ucExtHPDPINLutIndex <
+ MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES) {
+ i2c_record = get_i2c_record(bp, object);
+ if (i2c_record) {
+ uint8_t index =
+ ext_display_path->ucExtAUXDDCLutIndex;
+ i2c_record->sucI2cId =
+ aux_ddc_lut_record->ucAUXDDCMap[index];
+ } else {
+ BREAK_TO_DEBUGGER();
+ /* Invalid I2C record */
+ return BP_RESULT_FAILURE;
+ }
+ }
+
+ /* Merge with other MXM connectors that map to the same physical
+ * connector. */
+ for (j = i + 1;
+ j < connector_tbl->ucNumberOfObjects; j++) {
+ ATOM_OBJECT *next_object;
+ struct graphics_object_id next_object_id;
+ EXT_DISPLAY_PATH *next_ext_display_path;
+
+ next_object = &connector_tbl->asObjects[j];
+ next_object_id = object_id_from_bios_object_id(
+ le16_to_cpu(next_object->usObjectID));
+
+ if ((OBJECT_TYPE_CONNECTOR != next_object_id.type) &&
+ (CONNECTOR_ID_MXM == next_object_id.id))
+ continue;
+
+ next_ext_display_path = get_ext_display_path_entry(
+ &ext_display_connection_info_tbl,
+ le16_to_cpu(next_object->usObjectID));
+
+ if (next_ext_display_path == NULL)
+ return BP_RESULT_FAILURE;
+
+ /* Merge if using same connector. */
+ if ((le16_to_cpu(next_ext_display_path->usDeviceConnector) ==
+ le16_to_cpu(ext_display_path->usDeviceConnector)) &&
+ (le16_to_cpu(ext_display_path->usDeviceConnector) != 0)) {
+ /* Clear duplicate connector from table. */
+ next_object->usObjectID = cpu_to_le16(0);
+ add_device_tag_from_ext_display_path(
+ bp,
+ object,
+ ext_display_path,
+ &cached_device_support);
+ }
+ }
+ }
+
+ /* Find all encoders which have an MXM object as their destination.
+ * Replace the MXM object with the real connector Id from the external
+ * display connection info table */
+
+ encoder_table_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+ encoder_table = GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+ for (i = 0; i < encoder_table->ucNumberOfObjects; i++) {
+ uint32_t j;
+
+ object = &encoder_table->asObjects[i];
+
+ dst_number = get_dest_obj_list(bp, object, &dst_object_id_list);
+
+ for (j = 0; j < dst_number; j++) {
+ object_id = object_id_from_bios_object_id(
+ dst_object_id_list[j]);
+
+ if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+ (CONNECTOR_ID_MXM != object_id.id))
+ continue;
+
+ /* Get the correct connection info table entry based on
+ * the enum id. */
+ ext_display_path =
+ get_ext_display_path_entry(
+ &ext_display_connection_info_tbl,
+ dst_object_id_list[j]);
+
+ if (ext_display_path == NULL)
+ return BP_RESULT_FAILURE;
+
+ dst_object_id_list[j] =
+ le16_to_cpu(ext_display_path->usDeviceConnector);
+ }
+ }
+
+ return BP_RESULT_OK;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table. This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module). With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+
+static void process_ext_display_connection_info(struct bios_parser *bp)
+{
+ ATOM_OBJECT_TABLE *connector_tbl;
+ uint32_t connector_tbl_offset;
+ struct graphics_object_id object_id;
+ ATOM_OBJECT *object;
+ bool mxm_connector_found = false;
+ bool null_entry_found = false;
+ uint32_t i = 0;
+
+ connector_tbl_offset = bp->object_info_tbl_offset +
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+ connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+ /* Look for MXM connectors to determine whether we need patch the VBIOS
+ * connector info table. Look for null entries to determine whether we
+ * need to compact connector table. */
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+
+ if ((OBJECT_TYPE_CONNECTOR == object_id.type) &&
+ (CONNECTOR_ID_MXM == object_id.id)) {
+ /* Once we found MXM connector - we can break */
+ mxm_connector_found = true;
+ break;
+ } else if (OBJECT_TYPE_CONNECTOR != object_id.type) {
+ /* We need to continue looping - to check if MXM
+ * connector present */
+ null_entry_found = true;
+ }
+ }
+
+ /* Patch BIOS image */
+ if (mxm_connector_found || null_entry_found) {
+ uint32_t connectors_num = 0;
+ uint8_t *original_bios;
+ /* Step 1: Replace bios image with the new copy which will be
+ * patched */
+ bp->base.bios_local_image = kzalloc(bp->base.bios_size,
+ GFP_KERNEL);
+ if (bp->base.bios_local_image == NULL) {
+ BREAK_TO_DEBUGGER();
+ /* Failed to alloc bp->base.bios_local_image */
+ return;
+ }
+
+ memmove(bp->base.bios_local_image, bp->base.bios, bp->base.bios_size);
+ original_bios = bp->base.bios;
+ bp->base.bios = bp->base.bios_local_image;
+ connector_tbl =
+ GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+ /* Step 2: (only if MXM connector found) Patch BIOS image with
+ * info from external module */
+ if (mxm_connector_found &&
+ patch_bios_image_from_ext_display_connection_info(bp) !=
+ BP_RESULT_OK) {
+ /* Patching the bios image has failed. We will copy
+ * again original image provided and afterwards
+ * only remove null entries */
+ memmove(
+ bp->base.bios_local_image,
+ original_bios,
+ bp->base.bios_size);
+ }
+
+ /* Step 3: Compact connector table (remove null entries, valid
+ * entries moved to beginning) */
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(
+ le16_to_cpu(object->usObjectID));
+
+ if (OBJECT_TYPE_CONNECTOR != object_id.type)
+ continue;
+
+ if (i != connectors_num) {
+ memmove(
+ &connector_tbl->
+ asObjects[connectors_num],
+ object,
+ sizeof(ATOM_OBJECT));
+ }
+ ++connectors_num;
+ }
+ connector_tbl->ucNumberOfObjects = (uint8_t)connectors_num;
+ }
+}
+
+static void bios_parser_post_init(struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ process_ext_display_connection_info(bp);
+}
+
+/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+ * update critical state bit in VBIOS scratch register
+ *
+ * @param
+ * bool - to set or reset state
+ */
+static void bios_parser_set_scratch_critical_state(
+ struct dc_bios *dcb,
+ bool state)
+{
+ bios_set_scratch_critical_state(dcb, state);
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v8(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ ATOM_INTEGRATED_SYSTEM_INFO_V1_8 *info_v8;
+ uint32_t i;
+
+ info_v8 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_8,
+ bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+ if (info_v8 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+ info->boot_up_engine_clock = le32_to_cpu(info_v8->ulBootUpEngineClock) * 10;
+ info->dentist_vco_freq = le32_to_cpu(info_v8->ulDentistVCOFreq) * 10;
+ info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
+
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->disp_clk_voltage[i].max_supported_clk =
+ le32_to_cpu(info_v8->sDISPCLK_Voltage[i].
+ ulMaximumSupportedCLK) * 10;
+ info->disp_clk_voltage[i].voltage_index =
+ le32_to_cpu(info_v8->sDISPCLK_Voltage[i].ulVoltageIndex);
+ }
+
+ info->boot_up_req_display_vector =
+ le32_to_cpu(info_v8->ulBootUpReqDisplayVector);
+ info->gpu_cap_info =
+ le32_to_cpu(info_v8->ulGPUCapInfo);
+
+ /*
+ * system_config: Bit[0] = 0 : PCIE power gating disabled
+ * = 1 : PCIE power gating enabled
+ * Bit[1] = 0 : DDR-PLL shut down disabled
+ * = 1 : DDR-PLL shut down enabled
+ * Bit[2] = 0 : DDR-PLL power down disabled
+ * = 1 : DDR-PLL power down enabled
+ */
+ info->system_config = le32_to_cpu(info_v8->ulSystemConfig);
+ info->cpu_cap_info = le32_to_cpu(info_v8->ulCPUCapInfo);
+ info->boot_up_nb_voltage =
+ le16_to_cpu(info_v8->usBootUpNBVoltage);
+ info->ext_disp_conn_info_offset =
+ le16_to_cpu(info_v8->usExtDispConnInfoOffset);
+ info->memory_type = info_v8->ucMemoryType;
+ info->ma_channel_number = info_v8->ucUMAChannelNumber;
+ info->gmc_restore_reset_time =
+ le32_to_cpu(info_v8->ulGMCRestoreResetTime);
+
+ info->minimum_n_clk =
+ le32_to_cpu(info_v8->ulNbpStateNClkFreq[0]);
+ for (i = 1; i < 4; ++i)
+ info->minimum_n_clk =
+ info->minimum_n_clk < le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]) ?
+ info->minimum_n_clk : le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]);
+
+ info->idle_n_clk = le32_to_cpu(info_v8->ulIdleNClk);
+ info->ddr_dll_power_up_time =
+ le32_to_cpu(info_v8->ulDDR_DLL_PowerUpTime);
+ info->ddr_pll_power_up_time =
+ le32_to_cpu(info_v8->ulDDR_PLL_PowerUpTime);
+ info->pcie_clk_ss_type = le16_to_cpu(info_v8->usPCIEClkSSType);
+ info->lvds_ss_percentage =
+ le16_to_cpu(info_v8->usLvdsSSPercentage);
+ info->lvds_sspread_rate_in_10hz =
+ le16_to_cpu(info_v8->usLvdsSSpreadRateIn10Hz);
+ info->hdmi_ss_percentage =
+ le16_to_cpu(info_v8->usHDMISSPercentage);
+ info->hdmi_sspread_rate_in_10hz =
+ le16_to_cpu(info_v8->usHDMISSpreadRateIn10Hz);
+ info->dvi_ss_percentage =
+ le16_to_cpu(info_v8->usDVISSPercentage);
+ info->dvi_sspread_rate_in_10_hz =
+ le16_to_cpu(info_v8->usDVISSpreadRateIn10Hz);
+
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v8->usMaxLVDSPclkFreqInSingleLink);
+ info->lvds_misc = info_v8->ucLvdsMisc;
+ info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+ info_v8->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+ info_v8->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+ info_v8->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+ info_v8->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+ info_v8->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+ info_v8->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ info->lvds_off_to_on_delay_in_4ms =
+ info_v8->ucLVDSOffToOnDelay_in4Ms;
+ info->lvds_bit_depth_control_val =
+ le32_to_cpu(info_v8->ulLCDBitDepthControlVal);
+
+ for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->avail_s_clk[i].supported_s_clk =
+ le32_to_cpu(info_v8->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+ info->avail_s_clk[i].voltage_index =
+ le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageIndex);
+ info->avail_s_clk[i].voltage_id =
+ le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageID);
+ }
+
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+ info_v8->sExtDispConnInfo.ucGuid[i];
+ }
+
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+ info->ext_disp_conn_info.path[i].device_connector_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+ info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+ info->ext_disp_conn_info.path[i].device_tag =
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceTag);
+ info->ext_disp_conn_info.path[i].device_acpi_enum =
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+ info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+ info_v8->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+ info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+ info_v8->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+ info->ext_disp_conn_info.path[i].channel_mapping.raw =
+ info_v8->sExtDispConnInfo.sPath[i].ucChannelMapping;
+ }
+ info->ext_disp_conn_info.checksum =
+ info_v8->sExtDispConnInfo.ucChecksum;
+
+ return BP_RESULT_OK;
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v9(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info_v9;
+ uint32_t i;
+
+ info_v9 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_9,
+ bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+ if (!info_v9)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->boot_up_engine_clock = le32_to_cpu(info_v9->ulBootUpEngineClock) * 10;
+ info->dentist_vco_freq = le32_to_cpu(info_v9->ulDentistVCOFreq) * 10;
+ info->boot_up_uma_clock = le32_to_cpu(info_v9->ulBootUpUMAClock) * 10;
+
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->disp_clk_voltage[i].max_supported_clk =
+ le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulMaximumSupportedCLK) * 10;
+ info->disp_clk_voltage[i].voltage_index =
+ le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulVoltageIndex);
+ }
+
+ info->boot_up_req_display_vector =
+ le32_to_cpu(info_v9->ulBootUpReqDisplayVector);
+ info->gpu_cap_info = le32_to_cpu(info_v9->ulGPUCapInfo);
+
+ /*
+ * system_config: Bit[0] = 0 : PCIE power gating disabled
+ * = 1 : PCIE power gating enabled
+ * Bit[1] = 0 : DDR-PLL shut down disabled
+ * = 1 : DDR-PLL shut down enabled
+ * Bit[2] = 0 : DDR-PLL power down disabled
+ * = 1 : DDR-PLL power down enabled
+ */
+ info->system_config = le32_to_cpu(info_v9->ulSystemConfig);
+ info->cpu_cap_info = le32_to_cpu(info_v9->ulCPUCapInfo);
+ info->boot_up_nb_voltage = le16_to_cpu(info_v9->usBootUpNBVoltage);
+ info->ext_disp_conn_info_offset = le16_to_cpu(info_v9->usExtDispConnInfoOffset);
+ info->memory_type = info_v9->ucMemoryType;
+ info->ma_channel_number = info_v9->ucUMAChannelNumber;
+ info->gmc_restore_reset_time = le32_to_cpu(info_v9->ulGMCRestoreResetTime);
+
+ info->minimum_n_clk = le32_to_cpu(info_v9->ulNbpStateNClkFreq[0]);
+ for (i = 1; i < 4; ++i)
+ info->minimum_n_clk =
+ info->minimum_n_clk < le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]) ?
+ info->minimum_n_clk : le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]);
+
+ info->idle_n_clk = le32_to_cpu(info_v9->ulIdleNClk);
+ info->ddr_dll_power_up_time = le32_to_cpu(info_v9->ulDDR_DLL_PowerUpTime);
+ info->ddr_pll_power_up_time = le32_to_cpu(info_v9->ulDDR_PLL_PowerUpTime);
+ info->pcie_clk_ss_type = le16_to_cpu(info_v9->usPCIEClkSSType);
+ info->lvds_ss_percentage = le16_to_cpu(info_v9->usLvdsSSPercentage);
+ info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v9->usLvdsSSpreadRateIn10Hz);
+ info->hdmi_ss_percentage = le16_to_cpu(info_v9->usHDMISSPercentage);
+ info->hdmi_sspread_rate_in_10hz = le16_to_cpu(info_v9->usHDMISSpreadRateIn10Hz);
+ info->dvi_ss_percentage = le16_to_cpu(info_v9->usDVISSPercentage);
+ info->dvi_sspread_rate_in_10_hz = le16_to_cpu(info_v9->usDVISSpreadRateIn10Hz);
+
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v9->usMaxLVDSPclkFreqInSingleLink);
+ info->lvds_misc = info_v9->ucLvdsMisc;
+ info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+ info_v9->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+ info_v9->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+ info_v9->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+ info_v9->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+ info_v9->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+ info_v9->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ info->lvds_off_to_on_delay_in_4ms =
+ info_v9->ucLVDSOffToOnDelay_in4Ms;
+ info->lvds_bit_depth_control_val =
+ le32_to_cpu(info_v9->ulLCDBitDepthControlVal);
+
+ for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->avail_s_clk[i].supported_s_clk =
+ le32_to_cpu(info_v9->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+ info->avail_s_clk[i].voltage_index =
+ le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageIndex);
+ info->avail_s_clk[i].voltage_id =
+ le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageID);
+ }
+
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+ info_v9->sExtDispConnInfo.ucGuid[i];
+ }
+
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+ info->ext_disp_conn_info.path[i].device_connector_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+ info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+ info->ext_disp_conn_info.path[i].device_tag =
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceTag);
+ info->ext_disp_conn_info.path[i].device_acpi_enum =
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+ info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+ info_v9->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+ info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+ info_v9->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+ info->ext_disp_conn_info.path[i].channel_mapping.raw =
+ info_v9->sExtDispConnInfo.sPath[i].ucChannelMapping;
+ }
+ info->ext_disp_conn_info.checksum =
+ info_v9->sExtDispConnInfo.ucChecksum;
+
+ return BP_RESULT_OK;
+}
+
+/*
+ * construct_integrated_info
+ *
+ * @brief
+ * Get integrated BIOS information based on table revision
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result construct_integrated_info(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision;
+
+ if (bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo) {
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+ get_atom_data_table_revision(header, &revision);
+
+ /* Don't need to check major revision as they are all 1 */
+ switch (revision.minor) {
+ case 8:
+ result = get_integrated_info_v8(bp, info);
+ break;
+ case 9:
+ result = get_integrated_info_v9(bp, info);
+ break;
+ default:
+ return result;
+
+ }
+ }
+
+ /* Sort voltage table from low to high*/
+ if (result == BP_RESULT_OK) {
+ struct clock_voltage_caps temp = {0, 0};
+ uint32_t i;
+ uint32_t j;
+
+ for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ for (j = i; j > 0; --j) {
+ if (
+ info->disp_clk_voltage[j].max_supported_clk <
+ info->disp_clk_voltage[j-1].max_supported_clk) {
+ /* swap j and j - 1*/
+ temp = info->disp_clk_voltage[j-1];
+ info->disp_clk_voltage[j-1] =
+ info->disp_clk_voltage[j];
+ info->disp_clk_voltage[j] = temp;
+ }
+ }
+ }
+
+ }
+
+ return result;
+}
+
+static struct integrated_info *bios_parser_create_integrated_info(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct integrated_info *info = NULL;
+
+ info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
+
+ if (info == NULL) {
+ ASSERT_CRITICAL(0);
+ return NULL;
+ }
+
+ if (construct_integrated_info(bp, info) == BP_RESULT_OK)
+ return info;
+
+ kfree(info);
+
+ return NULL;
+}
+
+/******************************************************************************/
+
+static const struct dc_vbios_funcs vbios_funcs = {
+ .get_connectors_number = bios_parser_get_connectors_number,
+
+ .get_encoder_id = bios_parser_get_encoder_id,
+
+ .get_connector_id = bios_parser_get_connector_id,
+
+ .get_dst_number = bios_parser_get_dst_number,
+
+ .get_src_obj = bios_parser_get_src_obj,
+
+ .get_dst_obj = bios_parser_get_dst_obj,
+
+ .get_i2c_info = bios_parser_get_i2c_info,
+
+ .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+
+ .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+
+ .get_hpd_info = bios_parser_get_hpd_info,
+
+ .get_device_tag = bios_parser_get_device_tag,
+
+ .get_firmware_info = bios_parser_get_firmware_info,
+
+ .get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
+
+ .get_ss_entry_number = bios_parser_get_ss_entry_number,
+
+ .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+ .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+ .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+ .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+ .get_encoder_cap_info = bios_parser_get_encoder_cap_info,
+
+ /* bios scratch register communication */
+ .is_accelerated_mode = bios_is_accelerated_mode,
+
+ .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+ .is_device_id_supported = bios_parser_is_device_id_supported,
+
+ /* COMMANDS */
+ .encoder_control = bios_parser_encoder_control,
+
+ .transmitter_control = bios_parser_transmitter_control,
+
+ .crt_control = bios_parser_crt_control, /* not used in DAL3. keep for now in case we need to support VGA on Bonaire */
+
+ .enable_crtc = bios_parser_enable_crtc,
+
+ .adjust_pixel_clock = bios_parser_adjust_pixel_clock,
+
+ .set_pixel_clock = bios_parser_set_pixel_clock,
+
+ .set_dce_clock = bios_parser_set_dce_clock,
+
+ .enable_spread_spectrum_on_ppll = bios_parser_enable_spread_spectrum_on_ppll,
+
+ .program_crtc_timing = bios_parser_program_crtc_timing, /* still use. should probably retire and program directly */
+
+ .crtc_source_select = bios_parser_crtc_source_select, /* still use. should probably retire and program directly */
+
+ .program_display_engine_pll = bios_parser_program_display_engine_pll,
+
+ .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+ /* SW init and patch */
+ .post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
+
+ .bios_parser_destroy = bios_parser_destroy,
+};
+
+static bool bios_parser_construct(
+ struct bios_parser *bp,
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ uint16_t *rom_header_offset = NULL;
+ ATOM_ROM_HEADER *rom_header = NULL;
+ ATOM_OBJECT_HEADER *object_info_tbl;
+ struct atom_data_revision tbl_rev = {0};
+
+ if (!init)
+ return false;
+
+ if (!init->bios)
+ return false;
+
+ bp->base.funcs = &vbios_funcs;
+ bp->base.bios = init->bios;
+ bp->base.bios_size = bp->base.bios[BIOS_IMAGE_SIZE_OFFSET] * BIOS_IMAGE_SIZE_UNIT;
+
+ bp->base.ctx = init->ctx;
+ bp->base.bios_local_image = NULL;
+
+ rom_header_offset =
+ GET_IMAGE(uint16_t, OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER);
+
+ if (!rom_header_offset)
+ return false;
+
+ rom_header = GET_IMAGE(ATOM_ROM_HEADER, *rom_header_offset);
+
+ if (!rom_header)
+ return false;
+
+ get_atom_data_table_revision(&rom_header->sHeader, &tbl_rev);
+ if (tbl_rev.major >= 2 && tbl_rev.minor >= 2)
+ return false;
+
+ bp->master_data_tbl =
+ GET_IMAGE(ATOM_MASTER_DATA_TABLE,
+ rom_header->usMasterDataTableOffset);
+
+ if (!bp->master_data_tbl)
+ return false;
+
+ bp->object_info_tbl_offset = DATA_TABLES(Object_Header);
+
+ if (!bp->object_info_tbl_offset)
+ return false;
+
+ object_info_tbl =
+ GET_IMAGE(ATOM_OBJECT_HEADER, bp->object_info_tbl_offset);
+
+ if (!object_info_tbl)
+ return false;
+
+ get_atom_data_table_revision(&object_info_tbl->sHeader,
+ &bp->object_info_tbl.revision);
+
+ if (bp->object_info_tbl.revision.major == 1
+ && bp->object_info_tbl.revision.minor >= 3) {
+ ATOM_OBJECT_HEADER_V3 *tbl_v3;
+
+ tbl_v3 = GET_IMAGE(ATOM_OBJECT_HEADER_V3,
+ bp->object_info_tbl_offset);
+ if (!tbl_v3)
+ return false;
+
+ bp->object_info_tbl.v1_3 = tbl_v3;
+ } else if (bp->object_info_tbl.revision.major == 1
+ && bp->object_info_tbl.revision.minor >= 1)
+ bp->object_info_tbl.v1_1 = object_info_tbl;
+ else
+ return false;
+
+ dal_bios_parser_init_cmd_tbl(bp);
+ dal_bios_parser_init_cmd_tbl_helper(&bp->cmd_helper, dce_version);
+
+ bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+
+ return true;
+}
+
+/******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
new file mode 100644
index 000000000000..d6f16275048f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_H__
+#define __DAL_BIOS_PARSER_H__
+
+struct dc_bios *bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
new file mode 100644
index 000000000000..1ee1717f2e6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -0,0 +1,1934 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "dc_bios_types.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/bios_parser_interface.h"
+#include "include/i2caux_interface.h"
+#include "include/logger_interface.h"
+
+#include "command_table2.h"
+
+#include "bios_parser_helper.h"
+#include "command_table_helper2.h"
+#include "bios_parser2.h"
+#include "bios_parser_types_internal2.h"
+#include "bios_parser_interface.h"
+
+#include "bios_parser_common.h"
+#define LAST_RECORD_TYPE 0xff
+
+
+struct i2c_id_config_access {
+ uint8_t bfI2C_LineMux:4;
+ uint8_t bfHW_EngineID:3;
+ uint8_t bfHW_Capable:1;
+ uint8_t ucAccess;
+};
+
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+ struct atom_i2c_record *record,
+ struct graphics_object_i2c_info *info);
+
+static enum bp_result bios_parser_get_firmware_info(
+ struct dc_bios *dcb,
+ struct dc_firmware_info *info);
+
+static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info);
+
+static enum bp_result get_firmware_info_v3_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
+static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object);
+
+static struct atom_encoder_caps_record *get_encoder_cap_record(
+ struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object);
+
+#define BIOS_IMAGE_SIZE_OFFSET 2
+#define BIOS_IMAGE_SIZE_UNIT 512
+
+#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
+
+
+static void destruct(struct bios_parser *bp)
+{
+ kfree(bp->base.bios_local_image);
+ kfree(bp->base.integrated_info);
+}
+
+static void firmware_parser_destroy(struct dc_bios **dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(*dcb);
+
+ if (!bp) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ destruct(bp);
+
+ kfree(bp);
+ *dcb = NULL;
+}
+
+static void get_atom_data_table_revision(
+ struct atom_common_table_header *atom_data_tbl,
+ struct atom_data_revision *tbl_revision)
+{
+ if (!tbl_revision)
+ return;
+
+ /* initialize the revision to 0 which is invalid revision */
+ tbl_revision->major = 0;
+ tbl_revision->minor = 0;
+
+ if (!atom_data_tbl)
+ return;
+
+ tbl_revision->major =
+ (uint32_t) atom_data_tbl->format_revision & 0x3f;
+ tbl_revision->minor =
+ (uint32_t) atom_data_tbl->content_revision & 0x3f;
+}
+
+/* BIOS oject table displaypath is per connector.
+ * There is extra path not for connector. BIOS fill its encoderid as 0
+ */
+static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ unsigned int count = 0;
+ unsigned int i;
+
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ if (bp->object_info_tbl.v1_4->display_path[i].encoderobjid != 0)
+ count++;
+ }
+ return count;
+}
+
+static struct graphics_object_id bios_parser_get_encoder_id(
+ struct dc_bios *dcb,
+ uint32_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+ if (bp->object_info_tbl.v1_4->number_of_path > i)
+ object_id = object_id_from_bios_object_id(
+ bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+
+ return object_id;
+}
+
+static struct graphics_object_id bios_parser_get_connector_id(
+ struct dc_bios *dcb,
+ uint8_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+ struct object_info_table *tbl = &bp->object_info_tbl;
+ struct display_object_info_table_v1_4 *v1_4 = tbl->v1_4;
+
+ if (v1_4->number_of_path > i) {
+ /* If display_objid is generic object id, the encoderObj
+ * /extencoderobjId should be 0
+ */
+ if (v1_4->display_path[i].encoderobjid != 0 &&
+ v1_4->display_path[i].display_objid != 0)
+ object_id = object_id_from_bios_object_id(
+ v1_4->display_path[i].display_objid);
+ }
+
+ return object_id;
+}
+
+
+/* TODO: GetNumberOfSrc*/
+
+static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+ struct graphics_object_id id)
+{
+ /* connector has 1 Dest, encoder has 0 Dest */
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ return 0;
+ case OBJECT_TYPE_CONNECTOR:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* removed getSrcObjList, getDestObjList*/
+
+
+static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ unsigned int i;
+ enum bp_result bp_result = BP_RESULT_BADINPUT;
+ struct graphics_object_id obj_id = {0};
+ struct object_info_table *tbl = &bp->object_info_tbl;
+
+ if (!src_object_id)
+ return bp_result;
+
+ switch (object_id.type) {
+ /* Encoder's Source is GPU. BIOS does not provide GPU, since all
+ * displaypaths point to same GPU (0x1100). Hardcode GPU object type
+ */
+ case OBJECT_TYPE_ENCODER:
+ /* TODO: since num of src must be less than 2.
+ * If found in for loop, should break.
+ * DAL2 implementation may be changed too
+ */
+ for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].encoderobjid);
+ if (object_id.type == obj_id.type &&
+ object_id.id == obj_id.id &&
+ object_id.enum_id ==
+ obj_id.enum_id) {
+ *src_object_id =
+ object_id_from_bios_object_id(0x1100);
+ /* break; */
+ }
+ }
+ bp_result = BP_RESULT_OK;
+ break;
+ case OBJECT_TYPE_CONNECTOR:
+ for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].display_objid);
+
+ if (object_id.type == obj_id.type &&
+ object_id.id == obj_id.id &&
+ object_id.enum_id == obj_id.enum_id) {
+ *src_object_id =
+ object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].encoderobjid);
+ /* break; */
+ }
+ }
+ bp_result = BP_RESULT_OK;
+ break;
+ default:
+ break;
+ }
+
+ return bp_result;
+}
+
+static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *dest_object_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ unsigned int i;
+ enum bp_result bp_result = BP_RESULT_BADINPUT;
+ struct graphics_object_id obj_id = {0};
+ struct object_info_table *tbl = &bp->object_info_tbl;
+
+ if (!dest_object_id)
+ return BP_RESULT_BADINPUT;
+
+ switch (object_id.type) {
+ case OBJECT_TYPE_ENCODER:
+ /* TODO: since num of src must be less than 2.
+ * If found in for loop, should break.
+ * DAL2 implementation may be changed too
+ */
+ for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].encoderobjid);
+ if (object_id.type == obj_id.type &&
+ object_id.id == obj_id.id &&
+ object_id.enum_id ==
+ obj_id.enum_id) {
+ *dest_object_id =
+ object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].display_objid);
+ /* break; */
+ }
+ }
+ bp_result = BP_RESULT_OK;
+ break;
+ default:
+ break;
+ }
+
+ return bp_result;
+}
+
+
+/* from graphics_object_id, find display path which includes the object_id */
+static struct atom_display_object_path_v2 *get_bios_object(
+ struct bios_parser *bp,
+ struct graphics_object_id id)
+{
+ unsigned int i;
+ struct graphics_object_id obj_id = {0};
+
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+ if (id.type == obj_id.type &&
+ id.id == obj_id.id &&
+ id.enum_id == obj_id.enum_id)
+ return
+ &bp->object_info_tbl.v1_4->display_path[i];
+ }
+ case OBJECT_TYPE_CONNECTOR:
+ case OBJECT_TYPE_GENERIC:
+ /* Both Generic and Connector Object ID
+ * will be stored on display_objid
+ */
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ bp->object_info_tbl.v1_4->display_path[i].display_objid
+ );
+ if (id.type == obj_id.type &&
+ id.id == obj_id.id &&
+ id.enum_id == obj_id.enum_id)
+ return
+ &bp->object_info_tbl.v1_4->display_path[i];
+ }
+ default:
+ return NULL;
+ }
+}
+
+static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info)
+{
+ uint32_t offset;
+ struct atom_display_object_path_v2 *object;
+ struct atom_common_record_header *header;
+ struct atom_i2c_record *record;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ offset = object->disp_recordoffset + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(struct atom_common_record_header, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (header->record_type == LAST_RECORD_TYPE ||
+ !header->record_size)
+ break;
+
+ if (header->record_type == ATOM_I2C_RECORD_TYPE
+ && sizeof(struct atom_i2c_record) <=
+ header->record_size) {
+ /* get the I2C info */
+ record = (struct atom_i2c_record *) header;
+
+ if (get_gpio_i2c_info(bp, record, info) ==
+ BP_RESULT_OK)
+ return BP_RESULT_OK;
+ }
+
+ offset += header->record_size;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_gpio_i2c_info(
+ struct bios_parser *bp,
+ struct atom_i2c_record *record,
+ struct graphics_object_i2c_info *info)
+{
+ struct atom_gpio_pin_lut_v2_1 *header;
+ uint32_t count = 0;
+ unsigned int table_index = 0;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* get the GPIO_I2C info */
+ if (!DATA_TABLES(gpio_pin_lut))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1,
+ DATA_TABLES(gpio_pin_lut));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(struct atom_common_table_header) +
+ sizeof(struct atom_gpio_pin_assignment) >
+ le16_to_cpu(header->table_header.structuresize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* TODO: is version change? */
+ if (header->table_header.content_revision != 1)
+ return BP_RESULT_UNSUPPORTED;
+
+ /* get data count */
+ count = (le16_to_cpu(header->table_header.structuresize)
+ - sizeof(struct atom_common_table_header))
+ / sizeof(struct atom_gpio_pin_assignment);
+
+ table_index = record->i2c_id & I2C_HW_LANE_MUX;
+
+ if (count < table_index) {
+ bool find_valid = false;
+
+ for (table_index = 0; table_index < count; table_index++) {
+ if (((record->i2c_id & I2C_HW_CAP) == (
+ header->gpio_pin[table_index].gpio_id &
+ I2C_HW_CAP)) &&
+ ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_ENGINE_ID_MASK)) &&
+ ((record->i2c_id & I2C_HW_LANE_MUX) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_LANE_MUX))) {
+ /* still valid */
+ find_valid = true;
+ break;
+ }
+ }
+ /* If we don't find the entry that we are looking for then
+ * we will return BP_Result_BadBiosTable.
+ */
+ if (find_valid == false)
+ return BP_RESULT_BADBIOSTABLE;
+ }
+
+ /* get the GPIO_I2C_INFO */
+ info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false;
+ info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX;
+ info->i2c_engine_id = (record->i2c_id & I2C_HW_ENGINE_ID_MASK) >> 4;
+ info->i2c_slave_address = record->i2c_slave_addr;
+
+ /* TODO: check how to get register offset for en, Y, etc. */
+ info->gpio_info.clk_a_register_index =
+ le16_to_cpu(
+ header->gpio_pin[table_index].data_a_reg_index);
+ info->gpio_info.clk_a_shift =
+ header->gpio_pin[table_index].gpio_bitshift;
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_voltage_ddc_info_v4(
+ uint8_t *i2c_line,
+ uint32_t index,
+ struct atom_common_table_header *header,
+ uint8_t *address)
+{
+ enum bp_result result = BP_RESULT_NORECORD;
+ struct atom_voltage_objects_info_v4_1 *info =
+ (struct atom_voltage_objects_info_v4_1 *) address;
+
+ uint8_t *voltage_current_object =
+ (uint8_t *) (&(info->voltage_object[0]));
+
+ while ((address + le16_to_cpu(header->structuresize)) >
+ voltage_current_object) {
+ struct atom_i2c_voltage_object_v4 *object =
+ (struct atom_i2c_voltage_object_v4 *)
+ voltage_current_object;
+
+ if (object->header.voltage_mode ==
+ ATOM_INIT_VOLTAGE_REGULATOR) {
+ if (object->header.voltage_type == index) {
+ *i2c_line = object->i2c_id ^ 0x90;
+ result = BP_RESULT_OK;
+ break;
+ }
+ }
+
+ voltage_current_object +=
+ le16_to_cpu(object->header.object_size);
+ }
+ return result;
+}
+
+static enum bp_result bios_parser_get_thermal_ddc_info(
+ struct dc_bios *dcb,
+ uint32_t i2c_channel_id,
+ struct graphics_object_i2c_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct i2c_id_config_access *config;
+ struct atom_i2c_record record;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ config = (struct i2c_id_config_access *) &i2c_channel_id;
+
+ record.i2c_id = config->bfHW_Capable;
+ record.i2c_id |= config->bfI2C_LineMux;
+ record.i2c_id |= config->bfHW_EngineID;
+
+ return get_gpio_i2c_info(bp, &record, info);
+}
+
+static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+ uint32_t index,
+ struct graphics_object_i2c_info *info)
+{
+ uint8_t i2c_line = 0;
+ enum bp_result result = BP_RESULT_NORECORD;
+ uint8_t *voltage_info_address;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision = {0};
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!DATA_TABLES(voltageobject_info))
+ return result;
+
+ voltage_info_address = bios_get_image(&bp->base,
+ DATA_TABLES(voltageobject_info),
+ sizeof(struct atom_common_table_header));
+
+ header = (struct atom_common_table_header *) voltage_info_address;
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 4:
+ if (revision.minor != 1)
+ break;
+ result = get_voltage_ddc_info_v4(&i2c_line, index, header,
+ voltage_info_address);
+ break;
+ }
+
+ if (result == BP_RESULT_OK)
+ result = bios_parser_get_thermal_ddc_info(dcb,
+ i2c_line, info);
+
+ return result;
+}
+
+static enum bp_result bios_parser_get_hpd_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_display_object_path_v2 *object;
+ struct atom_hpd_int_record *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_hpd_record(bp, object);
+
+ if (record != NULL) {
+ info->hpd_int_gpio_uid = record->pin_id;
+ info->hpd_active = record->plugin_pin_state;
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static struct atom_hpd_int_record *get_hpd_record(
+ struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object)
+{
+ struct atom_common_record_header *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->disp_recordoffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(struct atom_common_record_header, offset);
+
+ if (!header)
+ return NULL;
+
+ if (header->record_type == LAST_RECORD_TYPE ||
+ !header->record_size)
+ break;
+
+ if (header->record_type == ATOM_HPD_INT_RECORD_TYPE
+ && sizeof(struct atom_hpd_int_record) <=
+ header->record_size)
+ return (struct atom_hpd_int_record *) header;
+
+ offset += header->record_size;
+ }
+
+ return NULL;
+}
+
+/**
+ * bios_parser_get_gpio_pin_info
+ * Get GpioPin information of input gpio id
+ *
+ * @param gpio_id, GPIO ID
+ * @param info, GpioPin information structure
+ * @return Bios parser result code
+ * @note
+ * to get the GPIO PIN INFO, we need:
+ * 1. get the GPIO_ID from other object table, see GetHPDInfo()
+ * 2. in DATA_TABLE.GPIO_Pin_LUT, search all records,
+ * to get the registerA offset/mask
+ */
+static enum bp_result bios_parser_get_gpio_pin_info(
+ struct dc_bios *dcb,
+ uint32_t gpio_id,
+ struct gpio_pin_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_gpio_pin_lut_v2_1 *header;
+ uint32_t count = 0;
+ uint32_t i = 0;
+
+ if (!DATA_TABLES(gpio_pin_lut))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1,
+ DATA_TABLES(gpio_pin_lut));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(struct atom_common_table_header) +
+ sizeof(struct atom_gpio_pin_lut_v2_1)
+ > le16_to_cpu(header->table_header.structuresize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (header->table_header.content_revision != 1)
+ return BP_RESULT_UNSUPPORTED;
+
+ /* Temporary hard code gpio pin info */
+#if defined(FOR_SIMNOW_BOOT)
+ {
+ struct atom_gpio_pin_assignment gpio_pin[8] = {
+ {0x5db5, 0, 0, 1, 0},
+ {0x5db5, 8, 8, 2, 0},
+ {0x5db5, 0x10, 0x10, 3, 0},
+ {0x5db5, 0x18, 0x14, 4, 0},
+ {0x5db5, 0x1A, 0x18, 5, 0},
+ {0x5db5, 0x1C, 0x1C, 6, 0},
+ };
+
+ count = 6;
+ memmove(header->gpio_pin, gpio_pin, sizeof(gpio_pin));
+ }
+#else
+ count = (le16_to_cpu(header->table_header.structuresize)
+ - sizeof(struct atom_common_table_header))
+ / sizeof(struct atom_gpio_pin_assignment);
+#endif
+ for (i = 0; i < count; ++i) {
+ if (header->gpio_pin[i].gpio_id != gpio_id)
+ continue;
+
+ info->offset =
+ (uint32_t) le16_to_cpu(
+ header->gpio_pin[i].data_a_reg_index);
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask = (uint32_t) (1 <<
+ header->gpio_pin[i].gpio_bitshift);
+ info->mask_y = info->mask + 2;
+ info->mask_en = info->mask + 1;
+ info->mask_mask = info->mask - 1;
+
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static struct device_id device_type_from_device_id(uint16_t device_id)
+{
+
+ struct device_id result_device_id;
+
+ result_device_id.raw_device_tag = device_id;
+
+ switch (device_id) {
+ case ATOM_DISPLAY_LCD1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_LCD;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DISPLAY_DFP1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DISPLAY_DFP2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DISPLAY_DFP3_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 3;
+ break;
+
+ case ATOM_DISPLAY_DFP4_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 4;
+ break;
+
+ case ATOM_DISPLAY_DFP5_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 5;
+ break;
+
+ case ATOM_DISPLAY_DFP6_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 6;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER(); /* Invalid device Id */
+ result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+ result_device_id.enum_id = 0;
+ }
+ return result_device_id;
+}
+
+static enum bp_result bios_parser_get_device_tag(
+ struct dc_bios *dcb,
+ struct graphics_object_id connector_object_id,
+ uint32_t device_tag_index,
+ struct connector_device_tag_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_display_object_path_v2 *object;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* getBiosObject will return MXM object */
+ object = get_bios_object(bp, connector_object_id);
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ info->acpi_device = 0; /* BIOS no longer provides this */
+ info->dev_id = device_type_from_device_id(object->device_tag);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v4_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+
+ if (!ss_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ ss_info->type.STEP_AND_DELAY_INFO = false;
+ ss_info->spread_percentage_divider = 1000;
+ /* BIOS no longer uses target clock. Always enable for now */
+ ss_info->target_clock_range = 0xffffffff;
+
+ switch (id) {
+ case AS_SIGNAL_TYPE_DVI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dvi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dvi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_HDMI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->hdmi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ /* TODO LVDS not support anymore? */
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dp_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dp_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+ /* atom_firmware: DAL only get data from dce_info table.
+ * if data within smu_info is needed for DAL, VBIOS should
+ * copy it into dce_info
+ */
+ result = BP_RESULT_UNSUPPORTED;
+ break;
+ default:
+ result = BP_RESULT_UNSUPPORTED;
+ }
+
+ return result;
+}
+
+static enum bp_result get_ss_info_v4_2(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL;
+ struct atom_smu_info_v3_1 *smu_info = NULL;
+
+ if (!ss_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (!DATA_TABLES(smu_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_2,
+ DATA_TABLES(dce_info));
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ smu_info = GET_IMAGE(struct atom_smu_info_v3_1, DATA_TABLES(smu_info));
+ if (!smu_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ ss_info->type.STEP_AND_DELAY_INFO = false;
+ ss_info->spread_percentage_divider = 1000;
+ /* BIOS no longer uses target clock. Always enable for now */
+ ss_info->target_clock_range = 0xffffffff;
+
+ switch (id) {
+ case AS_SIGNAL_TYPE_DVI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dvi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dvi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_HDMI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->hdmi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ /* TODO LVDS not support anymore? */
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+ ss_info->spread_spectrum_percentage =
+ smu_info->gpuclk_ss_percentage;
+ ss_info->spread_spectrum_range =
+ smu_info->gpuclk_ss_rate_10hz * 10;
+ if (smu_info->gpuclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+ /* atom_firmware: DAL only get data from dce_info table.
+ * if data within smu_info is needed for DAL, VBIOS should
+ * copy it into dce_info
+ */
+ result = BP_RESULT_UNSUPPORTED;
+ break;
+ default:
+ result = BP_RESULT_UNSUPPORTED;
+ }
+
+ return result;
+}
+
+/**
+ * bios_parser_get_spread_spectrum_info
+ * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
+ * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
+ * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info
+ * ver 3.1,
+ * there is only one entry for each signal /ss id. However, there is
+ * no planning of supporting multiple spread Sprectum entry for EverGreen
+ * @param [in] this
+ * @param [in] signal, ASSignalType to be converted to info index
+ * @param [in] index, number of entries that match the converted info index
+ * @param [out] ss_info, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result bios_parser_get_spread_spectrum_info(
+ struct dc_bios *dcb,
+ enum as_signal_type signal,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!ss_info) /* check for bad input */
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_UNSUPPORTED;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+ get_atom_data_table_revision(header, &tbl_revision);
+
+ switch (tbl_revision.major) {
+ case 4:
+ switch (tbl_revision.minor) {
+ case 1:
+ return get_ss_info_v4_1(bp, signal, index, ss_info);
+ case 2:
+ return get_ss_info_v4_2(bp, signal, index, ss_info);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ /* there can not be more then one entry for SS Info table */
+ return result;
+}
+
+static enum bp_result get_embedded_panel_info_v2_1(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info)
+{
+ struct lcd_info_v2_1 *lvds;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(lcd_info))
+ return BP_RESULT_UNSUPPORTED;
+
+ lvds = GET_IMAGE(struct lcd_info_v2_1, DATA_TABLES(lcd_info));
+
+ if (!lvds)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* TODO: previous vv1_3, should v2_1 */
+ if (!((lvds->table_header.format_revision == 2)
+ && (lvds->table_header.content_revision >= 1)))
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units */
+ info->lcd_timing.pixel_clk =
+ le16_to_cpu(lvds->lcd_timing.pixclk) * 10;
+ /* usHActive does not include borders, according to VBIOS team */
+ info->lcd_timing.horizontal_addressable =
+ le16_to_cpu(lvds->lcd_timing.h_active);
+ /* usHBlanking_Time includes borders, so we should really be
+ * subtractingborders duing this translation, but LVDS generally
+ * doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders
+ */
+ info->lcd_timing.horizontal_blanking_time =
+ le16_to_cpu(lvds->lcd_timing.h_blanking_time);
+ /* usVActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.vertical_addressable =
+ le16_to_cpu(lvds->lcd_timing.v_active);
+ /* usVBlanking_Time includes borders, so we should really be
+ * subtracting borders duing this translation, but LVDS generally
+ * doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders
+ */
+ info->lcd_timing.vertical_blanking_time =
+ le16_to_cpu(lvds->lcd_timing.v_blanking_time);
+ info->lcd_timing.horizontal_sync_offset =
+ le16_to_cpu(lvds->lcd_timing.h_sync_offset);
+ info->lcd_timing.horizontal_sync_width =
+ le16_to_cpu(lvds->lcd_timing.h_sync_width);
+ info->lcd_timing.vertical_sync_offset =
+ le16_to_cpu(lvds->lcd_timing.v_sync_offset);
+ info->lcd_timing.vertical_sync_width =
+ le16_to_cpu(lvds->lcd_timing.v_syncwidth);
+ info->lcd_timing.horizontal_border = lvds->lcd_timing.h_border;
+ info->lcd_timing.vertical_border = lvds->lcd_timing.v_border;
+
+ /* not provided by VBIOS */
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
+
+ info->lcd_timing.misc_info.H_SYNC_POLARITY =
+ ~(uint32_t)
+ (lvds->lcd_timing.miscinfo & ATOM_HSYNC_POLARITY);
+ info->lcd_timing.misc_info.V_SYNC_POLARITY =
+ ~(uint32_t)
+ (lvds->lcd_timing.miscinfo & ATOM_VSYNC_POLARITY);
+
+ /* not provided by VBIOS */
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
+
+ info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+ !!(lvds->lcd_timing.miscinfo & ATOM_H_REPLICATIONBY2);
+ info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+ !!(lvds->lcd_timing.miscinfo & ATOM_V_REPLICATIONBY2);
+ info->lcd_timing.misc_info.COMPOSITE_SYNC =
+ !!(lvds->lcd_timing.miscinfo & ATOM_COMPOSITESYNC);
+ info->lcd_timing.misc_info.INTERLACE =
+ !!(lvds->lcd_timing.miscinfo & ATOM_INTERLACE);
+
+ /* not provided by VBIOS*/
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = 0;
+ /* not provided by VBIOS*/
+ info->ss_id = 0;
+
+ info->realtek_eDPToLVDS =
+ !!(lvds->dplvdsrxid == eDP_TO_LVDS_REALTEK_ID);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+ struct dc_bios *dcb,
+ struct embedded_panel_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!DATA_TABLES(lcd_info))
+ return BP_RESULT_FAILURE;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(lcd_info));
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ get_atom_data_table_revision(header, &tbl_revision);
+
+
+ switch (tbl_revision.major) {
+ case 2:
+ switch (tbl_revision.minor) {
+ case 1:
+ return get_embedded_panel_info_v2_1(bp, info);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
+static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+{
+ enum dal_device_type device_type = device_id.device_type;
+ uint32_t enum_id = device_id.enum_id;
+
+ switch (device_type) {
+ case DEVICE_TYPE_LCD:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DISPLAY_LCD1_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_DFP:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DISPLAY_DFP1_SUPPORT;
+ case 2:
+ return ATOM_DISPLAY_DFP2_SUPPORT;
+ case 3:
+ return ATOM_DISPLAY_DFP3_SUPPORT;
+ case 4:
+ return ATOM_DISPLAY_DFP4_SUPPORT;
+ case 5:
+ return ATOM_DISPLAY_DFP5_SUPPORT;
+ case 6:
+ return ATOM_DISPLAY_DFP6_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ };
+
+ /* Unidentified device ID, return empty support mask. */
+ return 0;
+}
+
+static bool bios_parser_is_device_id_supported(
+ struct dc_bios *dcb,
+ struct device_id id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ uint32_t mask = get_support_mask_for_device_id(id);
+
+ return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) &
+ mask) != 0;
+}
+
+static void bios_parser_post_init(
+ struct dc_bios *dcb)
+{
+ /* TODO for OPM module. Need implement later */
+}
+
+static uint32_t bios_parser_get_ss_entry_number(
+ struct dc_bios *dcb,
+ enum as_signal_type signal)
+{
+ /* TODO: DAL2 atomfirmware implementation does not need this.
+ * why DAL3 need this?
+ */
+ return 1;
+}
+
+static enum bp_result bios_parser_transmitter_control(
+ struct dc_bios *dcb,
+ struct bp_transmitter_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.transmitter_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.transmitter_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_encoder_control(
+ struct dc_bios *dcb,
+ struct bp_encoder_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.dig_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dig_encoder_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_set_pixel_clock(
+ struct dc_bios *dcb,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_pixel_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_dce_clock(
+ struct dc_bios *dcb,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_dce_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+}
+
+static unsigned int bios_parser_get_smu_clock_info(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.get_smu_clock_info)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.get_smu_clock_info(bp);
+}
+
+static enum bp_result bios_parser_program_crtc_timing(
+ struct dc_bios *dcb,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_crtc_timing)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_crtc(
+ struct dc_bios *dcb,
+ enum controller_id id,
+ bool enable)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_crtc)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_crtc(bp, id, enable);
+}
+
+static enum bp_result bios_parser_crtc_source_select(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_disp_power_gating(
+ struct dc_bios *dcb,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_disp_power_gating)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
+ action);
+}
+
+static bool bios_parser_is_accelerated_mode(
+ struct dc_bios *dcb)
+{
+ return bios_is_accelerated_mode(dcb);
+}
+
+
+/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+ * update critical state bit in VBIOS scratch register
+ *
+ * @param
+ * bool - to set or reset state
+ */
+static void bios_parser_set_scratch_critical_state(
+ struct dc_bios *dcb,
+ bool state)
+{
+ bios_set_scratch_critical_state(dcb, state);
+}
+
+static enum bp_result bios_parser_get_firmware_info(
+ struct dc_bios *dcb,
+ struct dc_firmware_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ struct atom_common_table_header *header;
+
+ struct atom_data_revision revision;
+
+ if (info && DATA_TABLES(firmwareinfo)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(firmwareinfo));
+ get_atom_data_table_revision(header, &revision);
+ switch (revision.major) {
+ case 3:
+ switch (revision.minor) {
+ case 1:
+ result = get_firmware_info_v3_1(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return result;
+}
+
+static enum bp_result get_firmware_info_v3_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ struct atom_firmware_info_v3_1 *firmware_info;
+ struct atom_display_controller_info_v4_1 *dce_info = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_1,
+ DATA_TABLES(firmwareinfo));
+
+ dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+
+ if (!firmware_info || !dce_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. */
+ /* We need to convert from 10KHz units into KHz units */
+ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+ info->default_engine_clk = firmware_info->bootup_sclk_in10khz * 10;
+
+ /* 27MHz for Vega10: */
+ info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
+
+ /* Hardcode frequency if BIOS gives no DCE Ref Clk */
+ if (info->pll_info.crystal_frequency == 0)
+ info->pll_info.crystal_frequency = 27000;
+ /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
+ info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10;
+ info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
+
+ /* Get GPU PLL VCO Clock */
+
+ if (bp->cmd_tbl.get_smu_clock_info != NULL) {
+ /* VBIOS gives in 10KHz */
+ info->smu_gpu_pll_output_freq =
+ bp->cmd_tbl.get_smu_clock_info(bp) * 10;
+ }
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_display_object_path_v2 *object;
+ struct atom_encoder_caps_record *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_encoder_cap_record(bp, object);
+ if (!record)
+ return BP_RESULT_NORECORD;
+
+ info->DP_HBR2_CAP = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HBR2) ? 1 : 0;
+ info->DP_HBR2_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HBR2_EN) ? 1 : 0;
+ info->DP_HBR3_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
+ info->HDMI_6GB_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
+
+ return BP_RESULT_OK;
+}
+
+
+static struct atom_encoder_caps_record *get_encoder_cap_record(
+ struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object)
+{
+ struct atom_common_record_header *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = object->encoder_recordoffset + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(struct atom_common_record_header, offset);
+
+ if (!header)
+ return NULL;
+
+ offset += header->record_size;
+
+ if (header->record_type == LAST_RECORD_TYPE ||
+ !header->record_size)
+ break;
+
+ if (header->record_type != ATOM_ENCODER_CAP_RECORD_TYPE)
+ continue;
+
+ if (sizeof(struct atom_encoder_caps_record) <=
+ header->record_size)
+ return (struct atom_encoder_caps_record *)header;
+ }
+
+ return NULL;
+}
+
+/*
+ * get_integrated_info_v11
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v11(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ struct atom_integrated_system_info_v1_11 *info_v11;
+ uint32_t i;
+
+ info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
+ DATA_TABLES(integratedsysteminfo));
+
+ if (info_v11 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->gpu_cap_info =
+ le32_to_cpu(info_v11->gpucapinfo);
+ /*
+ * system_config: Bit[0] = 0 : PCIE power gating disabled
+ * = 1 : PCIE power gating enabled
+ * Bit[1] = 0 : DDR-PLL shut down disabled
+ * = 1 : DDR-PLL shut down enabled
+ * Bit[2] = 0 : DDR-PLL power down disabled
+ * = 1 : DDR-PLL power down enabled
+ */
+ info->system_config = le32_to_cpu(info_v11->system_config);
+ info->cpu_cap_info = le32_to_cpu(info_v11->cpucapinfo);
+ info->memory_type = info_v11->memorytype;
+ info->ma_channel_number = info_v11->umachannelnumber;
+ info->lvds_ss_percentage =
+ le16_to_cpu(info_v11->lvds_ss_percentage);
+ info->lvds_sspread_rate_in_10hz =
+ le16_to_cpu(info_v11->lvds_ss_rate_10hz);
+ info->hdmi_ss_percentage =
+ le16_to_cpu(info_v11->hdmi_ss_percentage);
+ info->hdmi_sspread_rate_in_10hz =
+ le16_to_cpu(info_v11->hdmi_ss_rate_10hz);
+ info->dvi_ss_percentage =
+ le16_to_cpu(info_v11->dvi_ss_percentage);
+ info->dvi_sspread_rate_in_10_hz =
+ le16_to_cpu(info_v11->dvi_ss_rate_10hz);
+ info->lvds_misc = info_v11->lvds_misc;
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+ info_v11->extdispconninfo.guid[i];
+ }
+
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+ info->ext_disp_conn_info.path[i].device_connector_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v11->extdispconninfo.path[i].connectorobjid));
+
+ info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(
+ info_v11->extdispconninfo.path[i].ext_encoder_objid));
+
+ info->ext_disp_conn_info.path[i].device_tag =
+ le16_to_cpu(
+ info_v11->extdispconninfo.path[i].device_tag);
+ info->ext_disp_conn_info.path[i].device_acpi_enum =
+ le16_to_cpu(
+ info_v11->extdispconninfo.path[i].device_acpi_enum);
+ info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+ info_v11->extdispconninfo.path[i].auxddclut_index;
+ info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+ info_v11->extdispconninfo.path[i].hpdlut_index;
+ info->ext_disp_conn_info.path[i].channel_mapping.raw =
+ info_v11->extdispconninfo.path[i].channelmapping;
+ info->ext_disp_conn_info.path[i].caps =
+ le16_to_cpu(info_v11->extdispconninfo.path[i].caps);
+ }
+ info->ext_disp_conn_info.checksum =
+ info_v11->extdispconninfo.checksum;
+
+ info->dp0_ext_hdmi_slv_addr = info_v11->dp0_retimer_set.HdmiSlvAddr;
+ info->dp0_ext_hdmi_reg_num = info_v11->dp0_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp0_ext_hdmi_reg_num; i++) {
+ info->dp0_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp0_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp0_ext_hdmi_6g_reg_num = info_v11->dp0_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp0_ext_hdmi_6g_reg_num; i++) {
+ info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+ info->dp1_ext_hdmi_slv_addr = info_v11->dp1_retimer_set.HdmiSlvAddr;
+ info->dp1_ext_hdmi_reg_num = info_v11->dp1_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp1_ext_hdmi_reg_num; i++) {
+ info->dp1_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp1_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp1_ext_hdmi_6g_reg_num = info_v11->dp1_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp1_ext_hdmi_6g_reg_num; i++) {
+ info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+ info->dp2_ext_hdmi_slv_addr = info_v11->dp2_retimer_set.HdmiSlvAddr;
+ info->dp2_ext_hdmi_reg_num = info_v11->dp2_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp2_ext_hdmi_reg_num; i++) {
+ info->dp2_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp2_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp2_ext_hdmi_6g_reg_num = info_v11->dp2_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp2_ext_hdmi_6g_reg_num; i++) {
+ info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+ info->dp3_ext_hdmi_slv_addr = info_v11->dp3_retimer_set.HdmiSlvAddr;
+ info->dp3_ext_hdmi_reg_num = info_v11->dp3_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp3_ext_hdmi_reg_num; i++) {
+ info->dp3_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp3_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp3_ext_hdmi_6g_reg_num = info_v11->dp3_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp3_ext_hdmi_6g_reg_num; i++) {
+ info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+
+ /** TODO - review **/
+ #if 0
+ info->boot_up_engine_clock = le32_to_cpu(info_v11->ulBootUpEngineClock)
+ * 10;
+ info->dentist_vco_freq = le32_to_cpu(info_v11->ulDentistVCOFreq) * 10;
+ info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
+
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->disp_clk_voltage[i].max_supported_clk =
+ le32_to_cpu(info_v11->sDISPCLK_Voltage[i].
+ ulMaximumSupportedCLK) * 10;
+ info->disp_clk_voltage[i].voltage_index =
+ le32_to_cpu(info_v11->sDISPCLK_Voltage[i].ulVoltageIndex);
+ }
+
+ info->boot_up_req_display_vector =
+ le32_to_cpu(info_v11->ulBootUpReqDisplayVector);
+ info->boot_up_nb_voltage =
+ le16_to_cpu(info_v11->usBootUpNBVoltage);
+ info->ext_disp_conn_info_offset =
+ le16_to_cpu(info_v11->usExtDispConnInfoOffset);
+ info->gmc_restore_reset_time =
+ le32_to_cpu(info_v11->ulGMCRestoreResetTime);
+ info->minimum_n_clk =
+ le32_to_cpu(info_v11->ulNbpStateNClkFreq[0]);
+ for (i = 1; i < 4; ++i)
+ info->minimum_n_clk =
+ info->minimum_n_clk <
+ le32_to_cpu(info_v11->ulNbpStateNClkFreq[i]) ?
+ info->minimum_n_clk : le32_to_cpu(
+ info_v11->ulNbpStateNClkFreq[i]);
+
+ info->idle_n_clk = le32_to_cpu(info_v11->ulIdleNClk);
+ info->ddr_dll_power_up_time =
+ le32_to_cpu(info_v11->ulDDR_DLL_PowerUpTime);
+ info->ddr_pll_power_up_time =
+ le32_to_cpu(info_v11->ulDDR_PLL_PowerUpTime);
+ info->pcie_clk_ss_type = le16_to_cpu(info_v11->usPCIEClkSSType);
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink);
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink);
+ info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+ info_v11->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+ info_v11->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+ info_v11->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+ info_v11->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+ info_v11->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+ info_v11->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ info->lvds_off_to_on_delay_in_4ms =
+ info_v11->ucLVDSOffToOnDelay_in4Ms;
+ info->lvds_bit_depth_control_val =
+ le32_to_cpu(info_v11->ulLCDBitDepthControlVal);
+
+ for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->avail_s_clk[i].supported_s_clk =
+ le32_to_cpu(info_v11->sAvail_SCLK[i].ulSupportedSCLK)
+ * 10;
+ info->avail_s_clk[i].voltage_index =
+ le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageIndex);
+ info->avail_s_clk[i].voltage_id =
+ le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageID);
+ }
+ #endif /* TODO*/
+
+ return BP_RESULT_OK;
+}
+
+
+/*
+ * construct_integrated_info
+ *
+ * @brief
+ * Get integrated BIOS information based on table revision
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result construct_integrated_info(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+
+ struct clock_voltage_caps temp = {0, 0};
+ uint32_t i;
+ uint32_t j;
+
+ if (info && DATA_TABLES(integratedsysteminfo)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(integratedsysteminfo));
+
+ get_atom_data_table_revision(header, &revision);
+
+ /* Don't need to check major revision as they are all 1 */
+ switch (revision.minor) {
+ case 11:
+ result = get_integrated_info_v11(bp, info);
+ break;
+ default:
+ return result;
+ }
+ }
+
+ if (result != BP_RESULT_OK)
+ return result;
+
+ /* Sort voltage table from low to high*/
+ for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ for (j = i; j > 0; --j) {
+ if (info->disp_clk_voltage[j].max_supported_clk <
+ info->disp_clk_voltage[j-1].max_supported_clk
+ ) {
+ /* swap j and j - 1*/
+ temp = info->disp_clk_voltage[j-1];
+ info->disp_clk_voltage[j-1] =
+ info->disp_clk_voltage[j];
+ info->disp_clk_voltage[j] = temp;
+ }
+ }
+ }
+
+ return result;
+}
+
+static struct integrated_info *bios_parser_create_integrated_info(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct integrated_info *info = NULL;
+
+ info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
+
+ if (info == NULL) {
+ ASSERT_CRITICAL(0);
+ return NULL;
+ }
+
+ if (construct_integrated_info(bp, info) == BP_RESULT_OK)
+ return info;
+
+ kfree(info);
+
+ return NULL;
+}
+
+static const struct dc_vbios_funcs vbios_funcs = {
+ .get_connectors_number = bios_parser_get_connectors_number,
+
+ .get_encoder_id = bios_parser_get_encoder_id,
+
+ .get_connector_id = bios_parser_get_connector_id,
+
+ .get_dst_number = bios_parser_get_dst_number,
+
+ .get_src_obj = bios_parser_get_src_obj,
+
+ .get_dst_obj = bios_parser_get_dst_obj,
+
+ .get_i2c_info = bios_parser_get_i2c_info,
+
+ .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+
+ .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+
+ .get_hpd_info = bios_parser_get_hpd_info,
+
+ .get_device_tag = bios_parser_get_device_tag,
+
+ .get_firmware_info = bios_parser_get_firmware_info,
+
+ .get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
+
+ .get_ss_entry_number = bios_parser_get_ss_entry_number,
+
+ .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+ .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+ .get_encoder_cap_info = bios_parser_get_encoder_cap_info,
+
+ .is_device_id_supported = bios_parser_is_device_id_supported,
+
+
+
+ .is_accelerated_mode = bios_parser_is_accelerated_mode,
+
+ .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+
+/* COMMANDS */
+ .encoder_control = bios_parser_encoder_control,
+
+ .transmitter_control = bios_parser_transmitter_control,
+
+ .enable_crtc = bios_parser_enable_crtc,
+
+ .set_pixel_clock = bios_parser_set_pixel_clock,
+
+ .set_dce_clock = bios_parser_set_dce_clock,
+
+ .program_crtc_timing = bios_parser_program_crtc_timing,
+
+ /* .blank_crtc = bios_parser_blank_crtc, */
+
+ .crtc_source_select = bios_parser_crtc_source_select,
+
+ /* .external_encoder_control = bios_parser_external_encoder_control, */
+
+ .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+ .post_init = bios_parser_post_init,
+
+ .bios_parser_destroy = firmware_parser_destroy,
+
+ .get_smu_clock_info = bios_parser_get_smu_clock_info,
+};
+
+static bool bios_parser_construct(
+ struct bios_parser *bp,
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ uint16_t *rom_header_offset = NULL;
+ struct atom_rom_header_v2_2 *rom_header = NULL;
+ struct display_object_info_table_v1_4 *object_info_tbl;
+ struct atom_data_revision tbl_rev = {0};
+
+ if (!init)
+ return false;
+
+ if (!init->bios)
+ return false;
+
+ bp->base.funcs = &vbios_funcs;
+ bp->base.bios = init->bios;
+ bp->base.bios_size = bp->base.bios[OFFSET_TO_ATOM_ROM_IMAGE_SIZE] * BIOS_IMAGE_SIZE_UNIT;
+
+ bp->base.ctx = init->ctx;
+
+ bp->base.bios_local_image = NULL;
+
+ rom_header_offset =
+ GET_IMAGE(uint16_t, OFFSET_TO_ATOM_ROM_HEADER_POINTER);
+
+ if (!rom_header_offset)
+ return false;
+
+ rom_header = GET_IMAGE(struct atom_rom_header_v2_2, *rom_header_offset);
+
+ if (!rom_header)
+ return false;
+
+ get_atom_data_table_revision(&rom_header->table_header, &tbl_rev);
+ if (!(tbl_rev.major >= 2 && tbl_rev.minor >= 2))
+ return false;
+
+ bp->master_data_tbl =
+ GET_IMAGE(struct atom_master_data_table_v2_1,
+ rom_header->masterdatatable_offset);
+
+ if (!bp->master_data_tbl)
+ return false;
+
+ bp->object_info_tbl_offset = DATA_TABLES(displayobjectinfo);
+
+ if (!bp->object_info_tbl_offset)
+ return false;
+
+ object_info_tbl =
+ GET_IMAGE(struct display_object_info_table_v1_4,
+ bp->object_info_tbl_offset);
+
+ if (!object_info_tbl)
+ return false;
+
+ get_atom_data_table_revision(&object_info_tbl->table_header,
+ &bp->object_info_tbl.revision);
+
+ if (bp->object_info_tbl.revision.major == 1
+ && bp->object_info_tbl.revision.minor >= 4) {
+ struct display_object_info_table_v1_4 *tbl_v1_4;
+
+ tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4,
+ bp->object_info_tbl_offset);
+ if (!tbl_v1_4)
+ return false;
+
+ bp->object_info_tbl.v1_4 = tbl_v1_4;
+ } else
+ return false;
+
+ dal_firmware_parser_init_cmd_tbl(bp);
+ dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version);
+
+ bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+
+ return true;
+}
+
+struct dc_bios *firmware_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ struct bios_parser *bp = NULL;
+
+ bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
+ if (!bp)
+ return NULL;
+
+ if (bios_parser_construct(bp, init, dce_version))
+ return &bp->base;
+
+ kfree(bp);
+ return NULL;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h
new file mode 100644
index 000000000000..cb40546cdafe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER2_H__
+#define __DAL_BIOS_PARSER2_H__
+
+struct dc_bios *firmware_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
new file mode 100644
index 000000000000..a8cb039d2572
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "bios_parser_common.h"
+#include "include/grph_object_ctrl_defs.h"
+
+static enum object_type object_type_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_object_type = (bios_object_id & OBJECT_TYPE_MASK)
+ >> OBJECT_TYPE_SHIFT;
+ enum object_type object_type;
+
+ switch (bios_object_type) {
+ case GRAPH_OBJECT_TYPE_GPU:
+ object_type = OBJECT_TYPE_GPU;
+ break;
+ case GRAPH_OBJECT_TYPE_ENCODER:
+ object_type = OBJECT_TYPE_ENCODER;
+ break;
+ case GRAPH_OBJECT_TYPE_CONNECTOR:
+ object_type = OBJECT_TYPE_CONNECTOR;
+ break;
+ case GRAPH_OBJECT_TYPE_ROUTER:
+ object_type = OBJECT_TYPE_ROUTER;
+ break;
+ case GRAPH_OBJECT_TYPE_GENERIC:
+ object_type = OBJECT_TYPE_GENERIC;
+ break;
+ default:
+ object_type = OBJECT_TYPE_UNKNOWN;
+ break;
+ }
+
+ return object_type;
+}
+
+static enum object_enum_id enum_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_enum_id =
+ (bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ enum object_enum_id id;
+
+ switch (bios_enum_id) {
+ case GRAPH_OBJECT_ENUM_ID1:
+ id = ENUM_ID_1;
+ break;
+ case GRAPH_OBJECT_ENUM_ID2:
+ id = ENUM_ID_2;
+ break;
+ case GRAPH_OBJECT_ENUM_ID3:
+ id = ENUM_ID_3;
+ break;
+ case GRAPH_OBJECT_ENUM_ID4:
+ id = ENUM_ID_4;
+ break;
+ case GRAPH_OBJECT_ENUM_ID5:
+ id = ENUM_ID_5;
+ break;
+ case GRAPH_OBJECT_ENUM_ID6:
+ id = ENUM_ID_6;
+ break;
+ case GRAPH_OBJECT_ENUM_ID7:
+ id = ENUM_ID_7;
+ break;
+ default:
+ id = ENUM_ID_UNKNOWN;
+ break;
+ }
+
+ return id;
+}
+
+static uint32_t gpu_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ return (bios_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+}
+
+static enum encoder_id encoder_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_encoder_id = gpu_id_from_bios_object_id(bios_object_id);
+ enum encoder_id id;
+
+ switch (bios_encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ id = ENCODER_ID_INTERNAL_LVDS;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ id = ENCODER_ID_INTERNAL_TMDS1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS2:
+ id = ENCODER_ID_INTERNAL_TMDS2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ id = ENCODER_ID_INTERNAL_DAC1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ id = ENCODER_ID_INTERNAL_DAC2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ id = ENCODER_ID_INTERNAL_LVTM1;
+ break;
+ case ENCODER_OBJECT_ID_HDMI_INTERNAL:
+ id = ENCODER_ID_INTERNAL_HDMI;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ id = ENCODER_ID_INTERNAL_KLDSCP_TMDS1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ id = ENCODER_ID_INTERNAL_KLDSCP_DAC1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ id = ENCODER_ID_INTERNAL_KLDSCP_DAC2;
+ break;
+ case ENCODER_OBJECT_ID_MVPU_FPGA:
+ id = ENCODER_ID_EXTERNAL_MVPU_FPGA;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ id = ENCODER_ID_INTERNAL_DDI;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ id = ENCODER_ID_INTERNAL_UNIPHY;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ id = ENCODER_ID_INTERNAL_KLDSCP_LVTMA;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ id = ENCODER_ID_INTERNAL_UNIPHY1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ id = ENCODER_ID_INTERNAL_UNIPHY2;
+ break;
+ case ENCODER_OBJECT_ID_ALMOND: /* ENCODER_OBJECT_ID_NUTMEG */
+ id = ENCODER_ID_EXTERNAL_NUTMEG;
+ break;
+ case ENCODER_OBJECT_ID_TRAVIS:
+ id = ENCODER_ID_EXTERNAL_TRAVIS;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ id = ENCODER_ID_INTERNAL_UNIPHY3;
+ break;
+ default:
+ id = ENCODER_ID_UNKNOWN;
+ ASSERT(0);
+ break;
+ }
+
+ return id;
+}
+
+static enum connector_id connector_id_from_bios_object_id(
+ uint32_t bios_object_id)
+{
+ uint32_t bios_connector_id = gpu_id_from_bios_object_id(bios_object_id);
+
+ enum connector_id id;
+
+ switch (bios_connector_id) {
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ id = CONNECTOR_ID_SINGLE_LINK_DVII;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ id = CONNECTOR_ID_DUAL_LINK_DVII;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ id = CONNECTOR_ID_SINGLE_LINK_DVID;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ id = CONNECTOR_ID_DUAL_LINK_DVID;
+ break;
+ case CONNECTOR_OBJECT_ID_VGA:
+ id = CONNECTOR_ID_VGA;
+ break;
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ id = CONNECTOR_ID_HDMI_TYPE_A;
+ break;
+ case CONNECTOR_OBJECT_ID_LVDS:
+ id = CONNECTOR_ID_LVDS;
+ break;
+ case CONNECTOR_OBJECT_ID_PCIE_CONNECTOR:
+ id = CONNECTOR_ID_PCIE;
+ break;
+ case CONNECTOR_OBJECT_ID_HARDCODE_DVI:
+ id = CONNECTOR_ID_HARDCODE_DVI;
+ break;
+ case CONNECTOR_OBJECT_ID_DISPLAYPORT:
+ id = CONNECTOR_ID_DISPLAY_PORT;
+ break;
+ case CONNECTOR_OBJECT_ID_eDP:
+ id = CONNECTOR_ID_EDP;
+ break;
+ case CONNECTOR_OBJECT_ID_MXM:
+ id = CONNECTOR_ID_MXM;
+ break;
+ default:
+ id = CONNECTOR_ID_UNKNOWN;
+ break;
+ }
+
+ return id;
+}
+
+static enum generic_id generic_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_generic_id = gpu_id_from_bios_object_id(bios_object_id);
+
+ enum generic_id id;
+
+ switch (bios_generic_id) {
+ case GENERIC_OBJECT_ID_MXM_OPM:
+ id = GENERIC_ID_MXM_OPM;
+ break;
+ case GENERIC_OBJECT_ID_GLSYNC:
+ id = GENERIC_ID_GLSYNC;
+ break;
+ case GENERIC_OBJECT_ID_STEREO_PIN:
+ id = GENERIC_ID_STEREO;
+ break;
+ default:
+ id = GENERIC_ID_UNKNOWN;
+ break;
+ }
+
+ return id;
+}
+
+static uint32_t id_from_bios_object_id(enum object_type type,
+ uint32_t bios_object_id)
+{
+ switch (type) {
+ case OBJECT_TYPE_GPU:
+ return gpu_id_from_bios_object_id(bios_object_id);
+ case OBJECT_TYPE_ENCODER:
+ return (uint32_t)encoder_id_from_bios_object_id(bios_object_id);
+ case OBJECT_TYPE_CONNECTOR:
+ return (uint32_t)connector_id_from_bios_object_id(
+ bios_object_id);
+ case OBJECT_TYPE_GENERIC:
+ return generic_id_from_bios_object_id(bios_object_id);
+ default:
+ return 0;
+ }
+}
+
+struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ enum object_type type;
+ enum object_enum_id enum_id;
+ struct graphics_object_id go_id = { 0 };
+
+ type = object_type_from_bios_object_id(bios_object_id);
+
+ if (OBJECT_TYPE_UNKNOWN == type)
+ return go_id;
+
+ enum_id = enum_id_from_bios_object_id(bios_object_id);
+
+ if (ENUM_ID_UNKNOWN == enum_id)
+ return go_id;
+
+ go_id = dal_graphics_object_id_init(
+ id_from_bios_object_id(type, bios_object_id), enum_id, type);
+
+ return go_id;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h
new file mode 100644
index 000000000000..a076c61dfae4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __BIOS_PARSER_COMMON_H__
+#define __BIOS_PARSER_COMMON_H__
+
+#include "dm_services.h"
+#include "ObjectID.h"
+
+struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
new file mode 100644
index 000000000000..5c9e5108c32c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "command_table.h"
+#include "bios_parser_types_internal.h"
+
+uint8_t *bios_get_image(struct dc_bios *bp,
+ uint32_t offset,
+ uint32_t size)
+{
+ if (bp->bios && offset + size < bp->bios_size)
+ return bp->bios + offset;
+ else
+ return NULL;
+}
+
+#include "reg_helper.h"
+
+#define CTX \
+ bios->ctx
+#define REG(reg)\
+ (bios->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ATOM_ ## field_name ## _SHIFT, ATOM_ ## field_name
+
+bool bios_is_accelerated_mode(
+ struct dc_bios *bios)
+{
+ uint32_t acc_mode;
+ REG_GET(BIOS_SCRATCH_6, S6_ACC_MODE, &acc_mode);
+ return (acc_mode == 1);
+}
+
+
+void bios_set_scratch_acc_mode_change(
+ struct dc_bios *bios)
+{
+ REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, 1);
+}
+
+
+void bios_set_scratch_critical_state(
+ struct dc_bios *bios,
+ bool state)
+{
+ uint32_t critial_state = state ? 1 : 0;
+ REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
+}
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
new file mode 100644
index 000000000000..c0047efeb006
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_HELPER_H__
+#define __DAL_BIOS_PARSER_HELPER_H__
+
+struct bios_parser;
+
+uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset,
+ uint32_t size);
+
+bool bios_is_accelerated_mode(struct dc_bios *bios);
+void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
+void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
+
+#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
new file mode 100644
index 000000000000..0079a1e26efd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+
+#include "bios_parser_interface.h"
+#include "bios_parser.h"
+
+#include "bios_parser2.h"
+
+
+struct dc_bios *dal_bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ struct dc_bios *bios = NULL;
+
+ bios = firmware_parser_create(init, dce_version);
+
+ /* Fall back to old bios parser for older asics */
+ if (bios == NULL)
+ bios = bios_parser_create(init, dce_version);
+
+ return bios;
+}
+
+void dal_bios_parser_destroy(struct dc_bios **dcb)
+{
+ struct dc_bios *bios = *dcb;
+
+ bios->funcs->bios_parser_destroy(dcb);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
new file mode 100644
index 000000000000..5918923bfb93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_BIOS_H__
+#define __DAL_BIOS_PARSER_TYPES_BIOS_H__
+
+#include "dc_bios_types.h"
+#include "bios_parser_helper.h"
+
+struct atom_data_revision {
+ uint32_t major;
+ uint32_t minor;
+};
+
+struct object_info_table {
+ struct atom_data_revision revision;
+ union {
+ ATOM_OBJECT_HEADER *v1_1;
+ ATOM_OBJECT_HEADER_V3 *v1_3;
+ };
+};
+
+enum spread_spectrum_id {
+ SS_ID_UNKNOWN = 0,
+ SS_ID_DP1 = 0xf1,
+ SS_ID_DP2 = 0xf2,
+ SS_ID_LVLINK_2700MHZ = 0xf3,
+ SS_ID_LVLINK_1620MHZ = 0xf4
+};
+
+struct bios_parser {
+ struct dc_bios base;
+
+ struct object_info_table object_info_tbl;
+ uint32_t object_info_tbl_offset;
+ ATOM_MASTER_DATA_TABLE *master_data_tbl;
+
+ const struct bios_parser_helper *bios_helper;
+
+ const struct command_table_helper *cmd_helper;
+ struct cmd_tbl cmd_tbl;
+
+ bool remap_device_tags;
+};
+
+/* Bios Parser from DC Bios */
+#define BP_FROM_DCB(dc_bios) \
+ container_of(dc_bios, struct bios_parser, base)
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h
new file mode 100644
index 000000000000..bf1f5c86e65c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_BIOS2_H__
+#define __DAL_BIOS_PARSER_TYPES_BIOS2_H__
+
+#include "dc_bios_types.h"
+#include "bios_parser_helper.h"
+
+/* use atomfirmware_bringup.h only. Not atombios.h anymore */
+
+struct atom_data_revision {
+ uint32_t major;
+ uint32_t minor;
+};
+
+struct object_info_table {
+ struct atom_data_revision revision;
+ union {
+ struct display_object_info_table_v1_4 *v1_4;
+ };
+};
+
+enum spread_spectrum_id {
+ SS_ID_UNKNOWN = 0,
+ SS_ID_DP1 = 0xf1,
+ SS_ID_DP2 = 0xf2,
+ SS_ID_LVLINK_2700MHZ = 0xf3,
+ SS_ID_LVLINK_1620MHZ = 0xf4
+};
+
+struct bios_parser {
+ struct dc_bios base;
+
+ struct object_info_table object_info_tbl;
+ uint32_t object_info_tbl_offset;
+ struct atom_master_data_table_v2_1 *master_data_tbl;
+
+
+ const struct bios_parser_helper *bios_helper;
+
+ const struct command_table_helper *cmd_helper;
+ struct cmd_tbl cmd_tbl;
+
+ bool remap_device_tags;
+};
+
+/* Bios Parser from DC Bios */
+#define BP_FROM_DCB(dc_bios) \
+ container_of(dc_bios, struct bios_parser, base)
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
new file mode 100644
index 000000000000..3f7b2dabc2b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -0,0 +1,2424 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_interface.h"
+
+#include "command_table.h"
+#include "command_table_helper.h"
+#include "bios_parser_helper.h"
+#include "bios_parser_types_internal.h"
+
+#define EXEC_BIOS_CMD_TABLE(command, params)\
+ (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+ GetIndexIntoMasterTable(COMMAND, command), \
+ &params) == 0)
+
+#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
+ cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+ GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
+
+#define BIOS_CMD_TABLE_PARA_REVISION(command)\
+ bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+ GetIndexIntoMasterTable(COMMAND, command))
+
+static void init_dig_encoder_control(struct bios_parser *bp);
+static void init_transmitter_control(struct bios_parser *bp);
+static void init_set_pixel_clock(struct bios_parser *bp);
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
+static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_output_control(struct bios_parser *bp);
+static void init_set_crtc_timing(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
+static void init_enable_crtc(struct bios_parser *bp);
+static void init_enable_crtc_mem_req(struct bios_parser *bp);
+static void init_external_encoder_control(struct bios_parser *bp);
+static void init_enable_disp_power_gating(struct bios_parser *bp);
+static void init_program_clock(struct bios_parser *bp);
+static void init_set_dce_clock(struct bios_parser *bp);
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+ init_dig_encoder_control(bp);
+ init_transmitter_control(bp);
+ init_set_pixel_clock(bp);
+ init_enable_spread_spectrum_on_ppll(bp);
+ init_adjust_display_pll(bp);
+ init_dac_encoder_control(bp);
+ init_dac_output_control(bp);
+ init_set_crtc_timing(bp);
+ init_select_crtc_source(bp);
+ init_enable_crtc(bp);
+ init_enable_crtc_mem_req(bp);
+ init_program_clock(bp);
+ init_external_encoder_control(bp);
+ init_enable_disp_power_gating(bp);
+ init_set_dce_clock(bp);
+}
+
+static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+ uint32_t index)
+{
+ uint8_t frev, crev;
+
+ if (cgs_atom_get_cmd_table_revs(cgs_device,
+ index,
+ &frev, &crev) != 0)
+ return 0;
+ return crev;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** D I G E N C O D E R C O N T R O L
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result encoder_control_digx_v3(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static enum bp_result encoder_control_digx_v4(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static enum bp_result encoder_control_digx_v5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp);
+
+static void init_dig_encoder_control(struct bios_parser *bp)
+{
+ uint32_t version =
+ BIOS_CMD_TABLE_PARA_REVISION(DIGxEncoderControl);
+
+ switch (version) {
+ case 2:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v3;
+ break;
+ case 4:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v4;
+ break;
+
+ case 5:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v5;
+ break;
+
+ default:
+ init_encoder_control_dig_v1(bp);
+ break;
+ }
+}
+
+static enum bp_result encoder_control_dig_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig1_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig2_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp)
+{
+ struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+ if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG1EncoderControl))
+ cmd_tbl->encoder_control_dig1 = encoder_control_dig1_v1;
+ else
+ cmd_tbl->encoder_control_dig1 = NULL;
+
+ if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG2EncoderControl))
+ cmd_tbl->encoder_control_dig2 = encoder_control_dig2_v1;
+ else
+ cmd_tbl->encoder_control_dig2 = NULL;
+
+ cmd_tbl->dig_encoder_control = encoder_control_dig_v1;
+}
+
+static enum bp_result encoder_control_dig_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+ if (cntl != NULL)
+ switch (cntl->engine_id) {
+ case ENGINE_ID_DIGA:
+ if (cmd_tbl->encoder_control_dig1 != NULL)
+ result =
+ cmd_tbl->encoder_control_dig1(bp, cntl);
+ break;
+ case ENGINE_ID_DIGB:
+ if (cmd_tbl->encoder_control_dig2 != NULL)
+ result =
+ cmd_tbl->encoder_control_dig2(bp, cntl);
+ break;
+
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static enum bp_result encoder_control_dig1_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+ bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+ if (EXEC_BIOS_CMD_TABLE(DIG1EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_dig2_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+ bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+ if (EXEC_BIOS_CMD_TABLE(DIG2EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_digx_v3(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V3 params = {0};
+
+ if (LANE_COUNT_FOUR < cntl->lanes_number)
+ params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+ else
+ params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+ params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ params.ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio);
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_digx_v4(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V4 params = {0};
+
+ if (LANE_COUNT_FOUR < cntl->lanes_number)
+ params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+ else
+ params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+ params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ params.ucEncoderMode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_digx_v5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENCODER_STREAM_SETUP_PARAMETERS_V5 params = {0};
+
+ params.ucDigId = (uint8_t)(cntl->engine_id);
+ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+
+ params.ulPixelClock = cntl->pixel_clock / 10;
+ params.ucDigMode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.ulPixelClock =
+ (params.ulPixelClock * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ulPixelClock =
+ (params.ulPixelClock * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ulPixelClock =
+ (params.ulPixelClock * 48) / 24;
+ break;
+ default:
+ break;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** TRANSMITTER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result transmitter_control_v2(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v3(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v4(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_5(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+
+static void init_transmitter_control(struct bios_parser *bp)
+{
+ uint8_t frev;
+ uint8_t crev;
+
+ if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
+ frev, crev) != 0)
+ BREAK_TO_DEBUGGER();
+ switch (crev) {
+ case 2:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v3;
+ break;
+ case 4:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v4;
+ break;
+ case 5:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v1_5;
+ break;
+ case 6:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
+ break;
+ default:
+ bp->cmd_tbl.transmitter_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result transmitter_control_v2(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 params;
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+
+ memset(&params, 0, sizeof(params));
+
+ switch (cntl->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ case TRANSMITTER_TRAVIS_LCD:
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ switch (cntl->action) {
+ case TRANSMITTER_CONTROL_INIT:
+ if ((CONNECTOR_ID_DUAL_LINK_DVII == connector_id) ||
+ (CONNECTOR_ID_DUAL_LINK_DVID == connector_id))
+ /* on INIT this bit should be set according to the
+ * phisycal connector
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* connector object id */
+ params.usInitInfo =
+ cpu_to_le16((uint8_t)cntl->connector_obj_id.id);
+ break;
+ case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+ /* votage swing and pre-emphsis */
+ params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+ params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+ break;
+ default:
+ /* if dual-link */
+ if (LANE_COUNT_FOUR < cntl->lanes_number) {
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 20KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+ } else
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ break;
+ }
+
+ /* 00 - coherent mode
+ * 01 - incoherent mode
+ */
+
+ params.acConfig.fCoherentMode = cntl->coherent;
+
+ if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+ /* Bit2: Transmitter Link selection
+ * =0 when bit0=0, single link A/C/E, when bit0=1,
+ * master link A/C/E
+ * =1 when bit0=0, single link B/D/F, when bit0=1,
+ * master link B/D/F
+ */
+ params.acConfig.ucLinkSel = 1;
+
+ if (ENGINE_ID_DIGB == cntl->engine_id)
+ /* Bit3: Transmitter data source selection
+ * =0 DIGA is data source.
+ * =1 DIGB is data source.
+ * This bit is only useful when ucAction= ATOM_ENABLE
+ */
+ params.acConfig.ucEncoderSel = 1;
+
+ if (CONNECTOR_ID_DISPLAY_PORT == connector_id)
+ /* Bit4: DP connector flag
+ * =0 connector is none-DP connector
+ * =1 connector is DP connector
+ */
+ params.acConfig.fDPConnector = 1;
+
+ /* Bit[7:6]: Transmitter selection
+ * =0 UNIPHY_ENCODER: UNIPHYA/B
+ * =1 UNIPHY1_ENCODER: UNIPHYC/D
+ * =2 UNIPHY2_ENCODER: UNIPHYE/F
+ * =3 reserved
+ */
+ params.acConfig.ucTransmitterSel =
+ (uint8_t)bp->cmd_helper->transmitter_bp_to_atom(
+ cntl->transmitter);
+
+ params.ucAction = (uint8_t)cntl->action;
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v3(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 params;
+ uint32_t pll_id;
+ enum connector_id conn_id =
+ dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ bool dual_link_conn = (CONNECTOR_ID_DUAL_LINK_DVII == conn_id)
+ || (CONNECTOR_ID_DUAL_LINK_DVID == conn_id);
+
+ memset(&params, 0, sizeof(params));
+
+ switch (cntl->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ case TRANSMITTER_TRAVIS_LCD:
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (!cmd->clock_source_id_to_atom(cntl->pll_id, &pll_id))
+ return BP_RESULT_BADINPUT;
+
+ /* fill information based on the action */
+ switch (cntl->action) {
+ case TRANSMITTER_CONTROL_INIT:
+ if (dual_link_conn) {
+ /* on INIT this bit should be set according to the
+ * phisycal connector
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+ }
+
+ /* connector object id */
+ params.usInitInfo =
+ cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+ break;
+ case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+ /* votage swing and pre-emphsis */
+ params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+ params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+ break;
+ default:
+ if (dual_link_conn && cntl->multi_path)
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* if dual-link */
+ if (LANE_COUNT_FOUR < cntl->lanes_number) {
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 20KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+ } else {
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ }
+ break;
+ }
+
+ /* 00 - coherent mode
+ * 01 - incoherent mode
+ */
+
+ params.acConfig.fCoherentMode = cntl->coherent;
+
+ if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+ /* Bit2: Transmitter Link selection
+ * =0 when bit0=0, single link A/C/E, when bit0=1,
+ * master link A/C/E
+ * =1 when bit0=0, single link B/D/F, when bit0=1,
+ * master link B/D/F
+ */
+ params.acConfig.ucLinkSel = 1;
+
+ if (ENGINE_ID_DIGB == cntl->engine_id)
+ /* Bit3: Transmitter data source selection
+ * =0 DIGA is data source.
+ * =1 DIGB is data source.
+ * This bit is only useful when ucAction= ATOM_ENABLE
+ */
+ params.acConfig.ucEncoderSel = 1;
+
+ /* Bit[7:6]: Transmitter selection
+ * =0 UNIPHY_ENCODER: UNIPHYA/B
+ * =1 UNIPHY1_ENCODER: UNIPHYC/D
+ * =2 UNIPHY2_ENCODER: UNIPHYE/F
+ * =3 reserved
+ */
+ params.acConfig.ucTransmitterSel =
+ (uint8_t)cmd->transmitter_bp_to_atom(cntl->transmitter);
+
+ params.ucLaneNum = (uint8_t)cntl->lanes_number;
+
+ params.acConfig.ucRefClkSource = (uint8_t)pll_id;
+
+ params.ucAction = (uint8_t)cntl->action;
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v4(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 params;
+ uint32_t ref_clk_src_id;
+ enum connector_id conn_id =
+ dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+ const struct command_table_helper *cmd = bp->cmd_helper;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (cntl->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ case TRANSMITTER_TRAVIS_LCD:
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (!cmd->clock_source_id_to_ref_clk_src(cntl->pll_id, &ref_clk_src_id))
+ return BP_RESULT_BADINPUT;
+
+ switch (cntl->action) {
+ case TRANSMITTER_CONTROL_INIT:
+ {
+ if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+ (CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+ /* on INIT this bit should be set according to the
+ * phisycal connector
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* connector object id */
+ params.usInitInfo =
+ cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+ }
+ break;
+ case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+ /* votage swing and pre-emphsis */
+ params.asMode.ucLaneSel = (uint8_t)(cntl->lane_select);
+ params.asMode.ucLaneSet = (uint8_t)(cntl->lane_settings);
+ break;
+ default:
+ if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+ (CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* if dual-link */
+ if (LANE_COUNT_FOUR < cntl->lanes_number)
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 20KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+ else {
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ }
+ break;
+ }
+
+ /* 00 - coherent mode
+ * 01 - incoherent mode
+ */
+
+ params.acConfig.fCoherentMode = cntl->coherent;
+
+ if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+ /* Bit2: Transmitter Link selection
+ * =0 when bit0=0, single link A/C/E, when bit0=1,
+ * master link A/C/E
+ * =1 when bit0=0, single link B/D/F, when bit0=1,
+ * master link B/D/F
+ */
+ params.acConfig.ucLinkSel = 1;
+
+ if (ENGINE_ID_DIGB == cntl->engine_id)
+ /* Bit3: Transmitter data source selection
+ * =0 DIGA is data source.
+ * =1 DIGB is data source.
+ * This bit is only useful when ucAction= ATOM_ENABLE
+ */
+ params.acConfig.ucEncoderSel = 1;
+
+ /* Bit[7:6]: Transmitter selection
+ * =0 UNIPHY_ENCODER: UNIPHYA/B
+ * =1 UNIPHY1_ENCODER: UNIPHYC/D
+ * =2 UNIPHY2_ENCODER: UNIPHYE/F
+ * =3 reserved
+ */
+ params.acConfig.ucTransmitterSel =
+ (uint8_t)(cmd->transmitter_bp_to_atom(cntl->transmitter));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ params.acConfig.ucRefClkSource = (uint8_t)(ref_clk_src_id);
+ params.ucAction = (uint8_t)(cntl->action);
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v1_5(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 params;
+
+ memset(&params, 0, sizeof(params));
+ params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+ params.ucAction = (uint8_t)cntl->action;
+ params.ucLaneNum = (uint8_t)cntl->lanes_number;
+ params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+
+ params.ucDigMode =
+ cmd->signal_type_to_atom_dig_mode(cntl->signal);
+ params.asConfig.ucPhyClkSrcId =
+ cmd->clock_source_id_to_atom_phy_clk_src_id(cntl->pll_id);
+ /* 00 - coherent mode */
+ params.asConfig.ucCoherentMode = cntl->coherent;
+ params.asConfig.ucHPDSel =
+ cmd->hpd_sel_to_atom(cntl->hpd_sel);
+ params.ucDigEncoderSel =
+ cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+ params.ucDPLaneSet = (uint8_t) cntl->lane_settings;
+ params.usSymClock = cpu_to_le16((uint16_t) (cntl->pixel_clock / 10));
+ /*
+ * In SI/TN case, caller have to set usPixelClock as following:
+ * DP mode: usPixelClock = DP_LINK_CLOCK/10
+ * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+ * DVI single link mode: usPixelClock = pixel clock
+ * DVI dual link mode: usPixelClock = pixel clock
+ * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+ * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+ * LVDS mode: usPixelClock = pixel clock
+ */
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 params;
+
+ memset(&params, 0, sizeof(params));
+ params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+ params.ucAction = (uint8_t)cntl->action;
+
+ if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
+ params.ucDPLaneSet = (uint8_t)cntl->lane_settings;
+ else
+ params.ucDigMode = cmd->signal_type_to_atom_dig_mode(cntl->signal);
+
+ params.ucLaneNum = (uint8_t)cntl->lanes_number;
+ params.ucHPDSel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
+ params.ucDigEncoderSel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+ params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+ params.ulSymClock = cntl->pixel_clock/10;
+
+ /*
+ * In SI/TN case, caller have to set usPixelClock as following:
+ * DP mode: usPixelClock = DP_LINK_CLOCK/10
+ * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+ * DVI single link mode: usPixelClock = pixel clock
+ * DVI dual link mode: usPixelClock = pixel clock
+ * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+ * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+ * LVDS mode: usPixelClock = pixel clock
+ */
+ switch (cntl->signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.ulSymClock =
+ cpu_to_le16((le16_to_cpu(params.ulSymClock) * 30) / 24);
+ break;
+ case COLOR_DEPTH_121212:
+ params.ulSymClock =
+ cpu_to_le16((le16_to_cpu(params.ulSymClock) * 36) / 24);
+ break;
+ case COLOR_DEPTH_161616:
+ params.ulSymClock =
+ cpu_to_le16((le16_to_cpu(params.ulSymClock) * 48) / 24);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SET PIXEL CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_pixel_clock_v3(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
+static void init_set_pixel_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+ case 3:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v3;
+ break;
+ case 5:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v5;
+ break;
+ case 6:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v6;
+ break;
+ case 7:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+ break;
+ default:
+ bp->cmd_tbl.set_pixel_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_pixel_clock_v3(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ PIXEL_CLOCK_PARAMETERS_V3 *params;
+ SET_PIXEL_CLOCK_PS_ALLOCATION allocation;
+
+ memset(&allocation, 0, sizeof(allocation));
+
+ if (CLOCK_SOURCE_ID_PLL1 == bp_params->pll_id)
+ allocation.sPCLKInput.ucPpll = ATOM_PPLL1;
+ else if (CLOCK_SOURCE_ID_PLL2 == bp_params->pll_id)
+ allocation.sPCLKInput.ucPpll = ATOM_PPLL2;
+ else
+ return BP_RESULT_BADINPUT;
+
+ allocation.sPCLKInput.usRefDiv =
+ cpu_to_le16((uint16_t)bp_params->reference_divider);
+ allocation.sPCLKInput.usFbDiv =
+ cpu_to_le16((uint16_t)bp_params->feedback_divider);
+ allocation.sPCLKInput.ucFracFbDiv =
+ (uint8_t)bp_params->fractional_feedback_divider;
+ allocation.sPCLKInput.ucPostDiv =
+ (uint8_t)bp_params->pixel_clock_post_divider;
+
+ /* We need to convert from KHz units into 10KHz units */
+ allocation.sPCLKInput.usPixelClock =
+ cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+ params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput;
+ params->ucTransmitterId =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ params->ucEncoderMode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false));
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ params->ucMiscInfo |= PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK)
+ params->ucMiscInfo |= PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK;
+
+ if (CONTROLLER_ID_D1 != bp_params->controller_id)
+ params->ucMiscInfo |= PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, allocation))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V5
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V5 {
+ PIXEL_CLOCK_PARAMETERS_V5 sPCLKInput;
+ /* Caller doesn't need to init this portion */
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V5;
+#endif
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V6
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V6 {
+ PIXEL_CLOCK_PARAMETERS_V6 sPCLKInput;
+ /* Caller doesn't need to init this portion */
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V6;
+#endif
+
+static enum bp_result set_pixel_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V5 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &controller_id)) {
+ clk.sPCLKInput.ucCRTC = controller_id;
+ clk.sPCLKInput.ucPpll = (uint8_t)pll_id;
+ clk.sPCLKInput.ucRefDiv =
+ (uint8_t)(bp_params->reference_divider);
+ clk.sPCLKInput.usFbDiv =
+ cpu_to_le16((uint16_t)(bp_params->feedback_divider));
+ clk.sPCLKInput.ulFbDivDecFrac =
+ cpu_to_le32(bp_params->fractional_feedback_divider);
+ clk.sPCLKInput.ucPostDiv =
+ (uint8_t)(bp_params->pixel_clock_post_divider);
+ clk.sPCLKInput.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ clk.sPCLKInput.ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.sPCLKInput.usPixelClock =
+ cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+ /* clkV5.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0: 24bpp
+ * =1:30bpp, =2:32bpp
+ * driver choose program it itself, i.e. here we program it
+ * to 888 by default.
+ */
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+static enum bp_result set_pixel_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V6 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &controller_id)) {
+ /* Note: VBIOS still wants to use ucCRTC name which is now
+ * 1 byte in ULONG
+ *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+ *{
+ * target the pixel clock to drive the CRTC timing.
+ * ULONG ulPixelClock:24;
+ * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+ * previous version.
+ * ATOM_CRTC1~6, indicate the CRTC controller to
+ * ULONG ucCRTC:8;
+ * drive the pixel clock. not used for DCPLL case.
+ *}CRTC_PIXEL_CLOCK_FREQ;
+ *union
+ *{
+ * pixel clock and CRTC id frequency
+ * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+ * ULONG ulDispEngClkFreq; dispclk frequency
+ *};
+ */
+ clk.sPCLKInput.ulCrtcPclkFreq.ucCRTC = controller_id;
+ clk.sPCLKInput.ucPpll = (uint8_t) pll_id;
+ clk.sPCLKInput.ucRefDiv =
+ (uint8_t) bp_params->reference_divider;
+ clk.sPCLKInput.usFbDiv =
+ cpu_to_le16((uint16_t) bp_params->feedback_divider);
+ clk.sPCLKInput.ulFbDivDecFrac =
+ cpu_to_le32(bp_params->fractional_feedback_divider);
+ clk.sPCLKInput.ucPostDiv =
+ (uint8_t) bp_params->pixel_clock_post_divider;
+ clk.sPCLKInput.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ clk.sPCLKInput.ucEncoderMode =
+ (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock =
+ cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) {
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL;
+ }
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) {
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+ }
+
+ /* clkV6.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0:
+ * 24bpp =1:30bpp, =2:32bpp
+ * driver choose program it itself, i.e. here we pass required
+ * target rate that includes deep color.
+ */
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ PIXEL_CLOCK_PARAMETERS_V7 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &controller_id)) {
+ /* Note: VBIOS still wants to use ucCRTC name which is now
+ * 1 byte in ULONG
+ *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+ *{
+ * target the pixel clock to drive the CRTC timing.
+ * ULONG ulPixelClock:24;
+ * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+ * previous version.
+ * ATOM_CRTC1~6, indicate the CRTC controller to
+ * ULONG ucCRTC:8;
+ * drive the pixel clock. not used for DCPLL case.
+ *}CRTC_PIXEL_CLOCK_FREQ;
+ *union
+ *{
+ * pixel clock and CRTC id frequency
+ * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+ * ULONG ulDispEngClkFreq; dispclk frequency
+ *};
+ */
+ clk.ucCRTC = controller_id;
+ clk.ucPpll = (uint8_t) pll_id;
+ clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id));
+ clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock * 10);
+
+ clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth);
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC;
+
+ if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
+
+ if (bp_params->flags.SUPPORT_YUV_420)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
+
+ if (bp_params->flags.SET_XTALIN_REF_SRC)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
+
+ if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
+
+ if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+ }
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE PIXEL CLOCK SS
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL)) {
+ case 1:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+ enable_spread_spectrum_on_ppll_v1;
+ break;
+ case 2:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+ enable_spread_spectrum_on_ppll_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+ enable_spread_spectrum_on_ppll_v3;
+ break;
+ default:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL params;
+
+ memset(&params, 0, sizeof(params));
+
+ if ((enable == true) && (bp_params->percentage > 0))
+ params.ucEnable = ATOM_ENABLE;
+ else
+ params.ucEnable = ATOM_DISABLE;
+
+ params.usSpreadSpectrumPercentage =
+ cpu_to_le16((uint16_t)bp_params->percentage);
+ params.ucSpreadSpectrumStep =
+ (uint8_t)bp_params->ver1.step;
+ params.ucSpreadSpectrumDelay =
+ (uint8_t)bp_params->ver1.delay;
+ /* convert back to unit of 10KHz */
+ params.ucSpreadSpectrumRange =
+ (uint8_t)(bp_params->ver1.range / 10000);
+
+ if (bp_params->flags.EXTERNAL_SS)
+ params.ucSpreadSpectrumType |= ATOM_EXTERNAL_SS_MASK;
+
+ if (bp_params->flags.CENTER_SPREAD)
+ params.ucSpreadSpectrumType |= ATOM_SS_CENTRE_SPREAD_MODE;
+
+ if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+ params.ucPpll = ATOM_PPLL1;
+ else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+ params.ucPpll = ATOM_PPLL2;
+ else
+ BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 params;
+
+ memset(&params, 0, sizeof(params));
+
+ if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P1PLL;
+ else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P2PLL;
+ else
+ BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+ if ((enable == true) && (bp_params->percentage > 0)) {
+ params.ucEnable = ATOM_ENABLE;
+
+ params.usSpreadSpectrumPercentage =
+ cpu_to_le16((uint16_t)(bp_params->percentage));
+ params.usSpreadSpectrumStep =
+ cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+ if (bp_params->flags.EXTERNAL_SS)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD;
+
+ if (bp_params->flags.CENTER_SPREAD)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD;
+
+ /* Both amounts need to be left shifted first before bit
+ * comparison. Otherwise, the result will always be zero here
+ */
+ params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+ ((bp_params->ds.feedback_amount <<
+ ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK) |
+ ((bp_params->ds.nfrac_amount <<
+ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK)));
+ } else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 params;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (bp_params->pll_id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ /* ATOM_PPLL_SS_TYPE_V3_P0PLL; this is pixel clock only,
+ * not for SI display clock.
+ */
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P1PLL;
+ break;
+
+ case CLOCK_SOURCE_ID_PLL2:
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P2PLL;
+ break;
+
+ case CLOCK_SOURCE_ID_DCPLL:
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER();
+ /* Unexpected PLL value!! */
+ return result;
+ }
+
+ if (enable == true) {
+ params.ucEnable = ATOM_ENABLE;
+
+ params.usSpreadSpectrumAmountFrac =
+ cpu_to_le16((uint16_t)(bp_params->ds_frac_amount));
+ params.usSpreadSpectrumStep =
+ cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+ if (bp_params->flags.EXTERNAL_SS)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD;
+ if (bp_params->flags.CENTER_SPREAD)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD;
+
+ /* Both amounts need to be left shifted first before bit
+ * comparison. Otherwise, the result will always be zero here
+ */
+ params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+ ((bp_params->ds.feedback_amount <<
+ ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK) |
+ ((bp_params->ds.nfrac_amount <<
+ ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK)));
+ } else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ADJUST DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result adjust_display_pll_v2(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+static enum bp_result adjust_display_pll_v3(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+
+static void init_adjust_display_pll(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll)) {
+ case 2:
+ bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
+ break;
+ default:
+ bp->cmd_tbl.adjust_display_pll = NULL;
+ break;
+ }
+}
+
+static enum bp_result adjust_display_pll_v2(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION params = { 0 };
+
+ /* We need to convert from KHz units into 10KHz units and then convert
+ * output pixel clock back 10KHz-->KHz */
+ uint32_t pixel_clock_10KHz_in = bp_params->pixel_clock / 10;
+
+ params.usPixelClock = cpu_to_le16((uint16_t)(pixel_clock_10KHz_in));
+ params.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ params.ucEncodeMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+ return result;
+}
+
+static enum bp_result adjust_display_pll_v3(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 params;
+ uint32_t pixel_clk_10_kHz_in = bp_params->pixel_clock / 10;
+
+ memset(&params, 0, sizeof(params));
+
+ /* We need to convert from KHz units into 10KHz units and then convert
+ * output pixel clock back 10KHz-->KHz */
+ params.sInput.usPixelClock = cpu_to_le16((uint16_t)pixel_clk_10_kHz_in);
+ params.sInput.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ params.sInput.ucEncodeMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ if (bp_params->ss_enable == true)
+ params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE;
+
+ if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+ params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK;
+
+ if (EXEC_BIOS_CMD_TABLE(AdjustDisplayPll, params)) {
+ /* Convert output pixel clock back 10KHz-->KHz: multiply
+ * original pixel clock in KHz by ratio
+ * [output pxlClk/input pxlClk] */
+ uint64_t pixel_clk_10_khz_out =
+ (uint64_t)le32_to_cpu(params.sOutput.ulDispPllFreq);
+ uint64_t pixel_clk = (uint64_t)bp_params->pixel_clock;
+
+ if (pixel_clk_10_kHz_in != 0) {
+ bp_params->adjusted_pixel_clock =
+ div_u64(pixel_clk * pixel_clk_10_khz_out,
+ pixel_clk_10_kHz_in);
+ } else {
+ bp_params->adjusted_pixel_clock = 0;
+ BREAK_TO_DEBUGGER();
+ }
+
+ bp_params->reference_divider = params.sOutput.ucRefDiv;
+ bp_params->pixel_clock_post_divider = params.sOutput.ucPostDiv;
+
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** DAC ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac1_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+static enum bp_result dac2_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+
+static void init_dac_encoder_control(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1EncoderControl)) {
+ case 1:
+ bp->cmd_tbl.dac1_encoder_control = dac1_encoder_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac1_encoder_control = NULL;
+ break;
+ }
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2EncoderControl)) {
+ case 1:
+ bp->cmd_tbl.dac2_encoder_control = dac2_encoder_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac2_encoder_control = NULL;
+ break;
+ }
+}
+
+static void dac_encoder_control_prepare_params(
+ DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard)
+{
+ params->ucDacStandard = dac_standard;
+ if (enable)
+ params->ucAction = ATOM_ENABLE;
+ else
+ params->ucAction = ATOM_DISABLE;
+
+ /* We need to convert from KHz units into 10KHz units
+ * it looks as if the TvControl do not care about pixel clock
+ */
+ params->usPixelClock = cpu_to_le16((uint16_t)(pixel_clock / 10));
+}
+
+static enum bp_result dac1_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+ dac_encoder_control_prepare_params(
+ &params,
+ enable,
+ pixel_clock,
+ dac_standard);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC1EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac2_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+ dac_encoder_control_prepare_params(
+ &params,
+ enable,
+ pixel_clock,
+ dac_standard);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC2EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** DAC OUTPUT CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result dac1_output_control_v1(
+ struct bios_parser *bp,
+ bool enable);
+static enum bp_result dac2_output_control_v1(
+ struct bios_parser *bp,
+ bool enable);
+
+static void init_dac_output_control(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1OutputControl)) {
+ case 1:
+ bp->cmd_tbl.dac1_output_control = dac1_output_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac1_output_control = NULL;
+ break;
+ }
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2OutputControl)) {
+ case 1:
+ bp->cmd_tbl.dac2_output_control = dac2_output_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac2_output_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result dac1_output_control_v1(
+ struct bios_parser *bp, bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+ if (enable)
+ params.ucAction = ATOM_ENABLE;
+ else
+ params.ucAction = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(DAC1OutputControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac2_output_control_v1(
+ struct bios_parser *bp, bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+ if (enable)
+ params.ucAction = ATOM_ENABLE;
+ else
+ params.ucAction = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(DAC2OutputControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SET CRTC TIMING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+static enum bp_result set_crtc_timing_v1(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+
+static void init_set_crtc_timing(struct bios_parser *bp)
+{
+ uint32_t dtd_version =
+ BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_UsingDTDTiming);
+ if (dtd_version > 2)
+ switch (dtd_version) {
+ case 3:
+ bp->cmd_tbl.set_crtc_timing =
+ set_crtc_using_dtd_timing_v3;
+ break;
+ default:
+ bp->cmd_tbl.set_crtc_timing = NULL;
+ break;
+ }
+ else
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing)) {
+ case 1:
+ bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
+ break;
+ default:
+ bp->cmd_tbl.set_crtc_timing = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_crtc_timing_v1(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION params = {0};
+ uint8_t atom_controller_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+
+ params.usH_Total = cpu_to_le16((uint16_t)(bp_params->h_total));
+ params.usH_Disp = cpu_to_le16((uint16_t)(bp_params->h_addressable));
+ params.usH_SyncStart = cpu_to_le16((uint16_t)(bp_params->h_sync_start));
+ params.usH_SyncWidth = cpu_to_le16((uint16_t)(bp_params->h_sync_width));
+ params.usV_Total = cpu_to_le16((uint16_t)(bp_params->v_total));
+ params.usV_Disp = cpu_to_le16((uint16_t)(bp_params->v_addressable));
+ params.usV_SyncStart =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start));
+ params.usV_SyncWidth =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_width));
+
+ /* VBIOS does not expect any value except zero into this call, for
+ * underscan use another entry ProgramOverscan call but when mode
+ * 1776x1000 with the overscan 72x44 .e.i. 1920x1080 @30 DAL2 is ok,
+ * but when same ,but 60 Hz there is corruption
+ * DAL1 does not allow the mode 1776x1000@60
+ */
+ params.ucOverscanRight = (uint8_t)bp_params->h_overscan_right;
+ params.ucOverscanLeft = (uint8_t)bp_params->h_overscan_left;
+ params.ucOverscanBottom = (uint8_t)bp_params->v_overscan_bottom;
+ params.ucOverscanTop = (uint8_t)bp_params->v_overscan_top;
+
+ if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+ if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+ if (bp_params->flags.INTERLACE) {
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+ /* original DAL code has this condition to apply tis for
+ * non-TV/CV only due to complex MV testing for possible
+ * impact
+ * if (pACParameters->signal != SignalType_YPbPr &&
+ * pACParameters->signal != SignalType_Composite &&
+ * pACParameters->signal != SignalType_SVideo)
+ */
+ /* HW will deduct 0.5 line from 2nd feild.
+ * i.e. for 1080i, it is 2 lines for 1st field, 2.5
+ * lines for the 2nd feild. we need input as 5 instead
+ * of 4, but it is 4 either from Edid data
+ * (spec CEA 861) or CEA timing table.
+ */
+ params.usV_SyncStart =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start + 1));
+ }
+
+ if (bp_params->flags.HORZ_COUNT_BY_TWO)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+ if (EXEC_BIOS_CMD_TABLE(SetCRTC_Timing, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_CRTC_USING_DTD_TIMING_PARAMETERS params = {0};
+ uint8_t atom_controller_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+
+ /* bios usH_Size wants h addressable size */
+ params.usH_Size = cpu_to_le16((uint16_t)bp_params->h_addressable);
+ /* bios usH_Blanking_Time wants borders included in blanking */
+ params.usH_Blanking_Time =
+ cpu_to_le16((uint16_t)(bp_params->h_total - bp_params->h_addressable));
+ /* bios usV_Size wants v addressable size */
+ params.usV_Size = cpu_to_le16((uint16_t)bp_params->v_addressable);
+ /* bios usV_Blanking_Time wants borders included in blanking */
+ params.usV_Blanking_Time =
+ cpu_to_le16((uint16_t)(bp_params->v_total - bp_params->v_addressable));
+ /* bios usHSyncOffset is the offset from the end of h addressable,
+ * our horizontalSyncStart is the offset from the beginning
+ * of h addressable */
+ params.usH_SyncOffset =
+ cpu_to_le16((uint16_t)(bp_params->h_sync_start - bp_params->h_addressable));
+ params.usH_SyncWidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
+ /* bios usHSyncOffset is the offset from the end of v addressable,
+ * our verticalSyncStart is the offset from the beginning of
+ * v addressable */
+ params.usV_SyncOffset =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start - bp_params->v_addressable));
+ params.usV_SyncWidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
+
+ /* we assume that overscan from original timing does not get bigger
+ * than 255
+ * we will program all the borders in the Set CRTC Overscan call below
+ */
+
+ if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+ if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+ if (bp_params->flags.INTERLACE) {
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+ /* original DAL code has this condition to apply this
+ * for non-TV/CV only
+ * due to complex MV testing for possible impact
+ * if ( pACParameters->signal != SignalType_YPbPr &&
+ * pACParameters->signal != SignalType_Composite &&
+ * pACParameters->signal != SignalType_SVideo)
+ */
+ {
+ /* HW will deduct 0.5 line from 2nd feild.
+ * i.e. for 1080i, it is 2 lines for 1st field,
+ * 2.5 lines for the 2nd feild. we need input as 5
+ * instead of 4.
+ * but it is 4 either from Edid data (spec CEA 861)
+ * or CEA timing table.
+ */
+ params.usV_SyncOffset =
+ cpu_to_le16(le16_to_cpu(params.usV_SyncOffset) + 1);
+
+ }
+ }
+
+ if (bp_params->flags.HORZ_COUNT_BY_TWO)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+ if (EXEC_BIOS_CMD_TABLE(SetCRTC_UsingDTDTiming, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+ case 2:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V2 params;
+ uint8_t atom_controller_id;
+ uint32_t atom_engine_id;
+ enum signal_type s = bp_params->signal;
+
+ memset(&params, 0, sizeof(params));
+
+ /* set controller id */
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+ else
+ return BP_RESULT_FAILURE;
+
+ /* set encoder id */
+ if (bp->cmd_helper->engine_bp_to_atom(
+ bp_params->engine_id, &atom_engine_id))
+ params.ucEncoderID = (uint8_t)atom_engine_id;
+ else
+ return BP_RESULT_FAILURE;
+
+ if (SIGNAL_TYPE_EDP == s ||
+ (SIGNAL_TYPE_DISPLAY_PORT == s &&
+ SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+ s = SIGNAL_TYPE_LVDS;
+
+ params.ucEncodeMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ s, bp_params->enable_dp_audio);
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ bool result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+ uint8_t atom_controller_id;
+ uint32_t atom_engine_id;
+ enum signal_type s = bp_params->signal;
+
+ memset(&params, 0, sizeof(params));
+
+ if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+ &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+ else
+ return result;
+
+ if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
+ &atom_engine_id))
+ params.ucEncoderID = (uint8_t)atom_engine_id;
+ else
+ return result;
+
+ if (SIGNAL_TYPE_EDP == s ||
+ (SIGNAL_TYPE_DISPLAY_PORT == s &&
+ SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+ s = SIGNAL_TYPE_LVDS;
+
+ params.ucEncodeMode =
+ bp->cmd_helper->encoder_mode_bp_to_atom(
+ s, bp_params->enable_dp_audio);
+ /* Needed for VBIOS Random Spatial Dithering feature */
+ params.ucDstBpc = (uint8_t)(bp_params->display_output_bit_depth);
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE CRTC
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+
+static void init_enable_crtc(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC)) {
+ case 1:
+ bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+ break;
+ default:
+ bp->cmd_tbl.enable_crtc = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable)
+{
+ bool result = BP_RESULT_FAILURE;
+ ENABLE_CRTC_PARAMETERS params = {0};
+ uint8_t id;
+
+ if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
+ params.ucCRTC = id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ if (enable)
+ params.ucEnable = ATOM_ENABLE;
+ else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableCRTC, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE CRTC MEM REQ
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_mem_req_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+
+static void init_enable_crtc_mem_req(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTCMemReq)) {
+ case 1:
+ bp->cmd_tbl.enable_crtc_mem_req = enable_crtc_mem_req_v1;
+ break;
+ default:
+ bp->cmd_tbl.enable_crtc_mem_req = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_crtc_mem_req_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable)
+{
+ bool result = BP_RESULT_BADINPUT;
+ ENABLE_CRTC_PARAMETERS params = {0};
+ uint8_t id;
+
+ if (bp->cmd_helper->controller_id_to_atom(controller_id, &id)) {
+ params.ucCRTC = id;
+
+ if (enable)
+ params.ucEnable = ATOM_ENABLE;
+ else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableCRTCMemReq, params))
+ result = BP_RESULT_OK;
+ else
+ result = BP_RESULT_FAILURE;
+ }
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result program_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result program_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
+static void init_program_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+ case 5:
+ bp->cmd_tbl.program_clock = program_clock_v5;
+ break;
+ case 6:
+ bp->cmd_tbl.program_clock = program_clock_v6;
+ break;
+ default:
+ bp->cmd_tbl.program_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result program_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V5 params;
+ uint32_t atom_pll_id;
+
+ memset(&params, 0, sizeof(params));
+ if (!bp->cmd_helper->clock_source_id_to_atom(
+ bp_params->pll_id, &atom_pll_id)) {
+ BREAK_TO_DEBUGGER(); /* Invalid Inpute!! */
+ return BP_RESULT_BADINPUT;
+ }
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id;
+ params.sPCLKInput.usPixelClock =
+ cpu_to_le16((uint16_t) (bp_params->target_pixel_clock / 10));
+ params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID;
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result program_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V6 params;
+ uint32_t atom_pll_id;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!bp->cmd_helper->clock_source_id_to_atom(
+ bp_params->pll_id, &atom_pll_id)) {
+ BREAK_TO_DEBUGGER(); /*Invalid Input!!*/
+ return BP_RESULT_BADINPUT;
+ }
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id;
+ params.sPCLKInput.ulDispEngClkFreq =
+ cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) {
+ /* True display clock is returned by VBIOS if DFS bypass
+ * is enabled. */
+ bp_params->dfs_bypass_display_clock =
+ (uint32_t)(le32_to_cpu(params.sPCLKInput.ulDispEngClkFreq) * 10);
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** EXTERNAL ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+
+static void init_external_encoder_control(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(ExternalEncoderControl)) {
+ case 3:
+ bp->cmd_tbl.external_encoder_control =
+ external_encoder_control_v3;
+ break;
+ default:
+ bp->cmd_tbl.external_encoder_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ /* we need use _PS_Alloc struct */
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 params;
+ EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 *cntl_params;
+ struct graphics_object_id encoder;
+ bool is_input_signal_dp = false;
+
+ memset(&params, 0, sizeof(params));
+
+ cntl_params = &params.sExtEncoder;
+
+ encoder = cntl->encoder_id;
+
+ /* check if encoder supports external encoder control table */
+ switch (dal_graphics_object_id_get_encoder_id(encoder)) {
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ is_input_signal_dp = true;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER();
+ return BP_RESULT_BADINPUT;
+ }
+
+ /* Fill information based on the action
+ *
+ * Bit[6:4]: indicate external encoder, applied to all functions.
+ * =0: external encoder1, mapped to external encoder enum id1
+ * =1: external encoder2, mapped to external encoder enum id2
+ *
+ * enum ObjectEnumId
+ * {
+ * EnumId_Unknown = 0,
+ * EnumId_1,
+ * EnumId_2,
+ * };
+ */
+ cntl_params->ucConfig = (uint8_t)((encoder.enum_id - 1) << 4);
+
+ switch (cntl->action) {
+ case EXTERNAL_ENCODER_CONTROL_INIT:
+ /* output display connector type. Only valid in encoder
+ * initialization */
+ cntl_params->usConnectorId =
+ cpu_to_le16((uint16_t)cntl->connector_obj_id.id);
+ break;
+ case EXTERNAL_ENCODER_CONTROL_SETUP:
+ /* EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 pixel clock unit in
+ * 10KHz
+ * output display device pixel clock frequency in unit of 10KHz.
+ * Only valid in setup and enableoutput
+ */
+ cntl_params->usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ /* Indicate display output signal type drive by external
+ * encoder, only valid in setup and enableoutput */
+ cntl_params->ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal, false);
+
+ if (is_input_signal_dp) {
+ /* Bit[0]: indicate link rate, =1: 2.7Ghz, =0: 1.62Ghz,
+ * only valid in encoder setup with DP mode. */
+ if (LINK_RATE_HIGH == cntl->link_rate)
+ cntl_params->ucConfig |= 1;
+ /* output color depth Indicate encoder data bpc format
+ * in DP mode, only valid in encoder setup in DP mode.
+ */
+ cntl_params->ucBitPerColor =
+ (uint8_t)(cntl->color_depth);
+ }
+ /* Indicate how many lanes used by external encoder, only valid
+ * in encoder setup and enableoutput. */
+ cntl_params->ucLaneNum = (uint8_t)(cntl->lanes_number);
+ break;
+ case EXTERNAL_ENCODER_CONTROL_ENABLE:
+ cntl_params->usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ cntl_params->ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal, false);
+ cntl_params->ucLaneNum = (uint8_t)cntl->lanes_number;
+ break;
+ default:
+ break;
+ }
+
+ cntl_params->ucAction = (uint8_t)cntl->action;
+
+ if (EXEC_BIOS_CMD_TABLE(ExternalEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE DISPLAY POWER GATING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+
+static void init_enable_disp_power_gating(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating)) {
+ case 1:
+ bp->cmd_tbl.enable_disp_power_gating =
+ enable_disp_power_gating_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.enable_disp_power_gating = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION params = {0};
+ uint8_t atom_crtc_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
+ params.ucDispPipeId = atom_crtc_id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ params.ucEnable =
+ bp->cmd_helper->disp_power_gating_action_to_atom(action);
+
+ if (EXEC_BIOS_CMD_TABLE(EnableDispPowerGating, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SET DCE CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+
+static void init_set_dce_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock)) {
+ case 1:
+ bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.set_dce_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ SET_DCE_CLOCK_PS_ALLOCATION_V2_1 params;
+ uint32_t atom_pll_id;
+ uint32_t atom_clock_type;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
+ !cmd->dc_clock_type_to_atom(bp_params->clock_type, &atom_clock_type))
+ return BP_RESULT_BADINPUT;
+
+ params.asParam.ucDCEClkSrc = atom_pll_id;
+ params.asParam.ucDCEClkType = atom_clock_type;
+
+ if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
+ if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
+
+ if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
+
+ if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
+
+ if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
+ }
+ else
+ /* only program clock frequency if display clock is used; VBIOS will program DPREFCLK */
+ /* We need to convert from KHz units into 10KHz units */
+ params.asParam.ulDCEClkFreq = cpu_to_le32(bp_params->target_clock_frequency / 10);
+
+ if (EXEC_BIOS_CMD_TABLE(SetDCEClock, params)) {
+ /* Convert from 10KHz units back to KHz */
+ bp_params->target_clock_frequency = le32_to_cpu(params.asParam.ulDCEClkFreq) * 10;
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
new file mode 100644
index 000000000000..94f3d43a7471
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_H__
+#define __DAL_COMMAND_TABLE_H__
+
+struct bios_parser;
+struct bp_encoder_control;
+
+struct cmd_tbl {
+ enum bp_result (*dig_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig1)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig2)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*transmitter_control)(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *control);
+ enum bp_result (*set_pixel_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+ enum bp_result (*adjust_display_pll)(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*dac1_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac2_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac1_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*dac2_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*set_crtc_timing)(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+ enum bp_result (*enable_crtc)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*enable_crtc_mem_req)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*program_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*external_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+ enum bp_result (*enable_disp_power_gating)(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+ enum bp_result (*set_dce_clock)(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+};
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
new file mode 100644
index 000000000000..ba68693758a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -0,0 +1,812 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "include/bios_parser_interface.h"
+
+#include "command_table2.h"
+#include "command_table_helper2.h"
+#include "bios_parser_helper.h"
+#include "bios_parser_types_internal2.h"
+
+#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\
+ (((char *)(&((\
+ struct atom_master_list_of_##MasterOrData##_functions_v2_1 *)0)\
+ ->FieldName)-(char *)0)/sizeof(uint16_t))
+
+#define EXEC_BIOS_CMD_TABLE(fname, params)\
+ (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname), \
+ &params) == 0)
+
+#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
+ cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev)
+
+#define BIOS_CMD_TABLE_PARA_REVISION(fname)\
+ bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname))
+
+static void init_dig_encoder_control(struct bios_parser *bp);
+static void init_transmitter_control(struct bios_parser *bp);
+static void init_set_pixel_clock(struct bios_parser *bp);
+
+static void init_set_crtc_timing(struct bios_parser *bp);
+
+static void init_select_crtc_source(struct bios_parser *bp);
+static void init_enable_crtc(struct bios_parser *bp);
+
+static void init_external_encoder_control(struct bios_parser *bp);
+static void init_enable_disp_power_gating(struct bios_parser *bp);
+static void init_set_dce_clock(struct bios_parser *bp);
+static void init_get_smu_clock_info(struct bios_parser *bp);
+
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+ init_dig_encoder_control(bp);
+ init_transmitter_control(bp);
+ init_set_pixel_clock(bp);
+
+ init_set_crtc_timing(bp);
+
+ init_select_crtc_source(bp);
+ init_enable_crtc(bp);
+
+ init_external_encoder_control(bp);
+ init_enable_disp_power_gating(bp);
+ init_set_dce_clock(bp);
+ init_get_smu_clock_info(bp);
+}
+
+static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+ uint32_t index)
+{
+ uint8_t frev, crev;
+
+ if (cgs_atom_get_cmd_table_revs(cgs_device,
+ index,
+ &frev, &crev) != 0)
+ return 0;
+ return crev;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** D I G E N C O D E R C O N T R O L
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result encoder_control_digx_v1_5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static void init_dig_encoder_control(struct bios_parser *bp)
+{
+ uint32_t version =
+ BIOS_CMD_TABLE_PARA_REVISION(digxencodercontrol);
+
+ switch (version) {
+ case 5:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
+ break;
+ default:
+ bp->cmd_tbl.dig_encoder_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result encoder_control_digx_v1_5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct dig_encoder_stream_setup_parameters_v1_5 params = {0};
+
+ params.digid = (uint8_t)(cntl->engine_id);
+ params.action = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+
+ params.pclk_10khz = cntl->pixel_clock / 10;
+ params.digmode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio));
+ params.lanenum = (uint8_t)(cntl->lanes_number);
+
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.bitpercolor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.bitpercolor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.bitpercolor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.bitpercolor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.pclk_10khz =
+ (params.pclk_10khz * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ params.pclk_10khz =
+ (params.pclk_10khz * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ params.pclk_10khz =
+ (params.pclk_10khz * 48) / 24;
+ break;
+ default:
+ break;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*****************************************************************************
+ ******************************************************************************
+ **
+ ** TRANSMITTER CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+
+static void init_transmitter_control(struct bios_parser *bp)
+{
+ uint8_t frev;
+ uint8_t crev;
+
+ if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) != 0)
+ BREAK_TO_DEBUGGER();
+ switch (crev) {
+ case 6:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
+ break;
+ default:
+ bp->cmd_tbl.transmitter_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ struct dig_transmitter_control_ps_allocation_v1_6 ps = { { 0 } };
+
+ ps.param.phyid = cmd->phy_id_to_atom(cntl->transmitter);
+ ps.param.action = (uint8_t)cntl->action;
+
+ if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
+ ps.param.mode_laneset.dplaneset = (uint8_t)cntl->lane_settings;
+ else
+ ps.param.mode_laneset.digmode =
+ cmd->signal_type_to_atom_dig_mode(cntl->signal);
+
+ ps.param.lanenum = (uint8_t)cntl->lanes_number;
+ ps.param.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
+ ps.param.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+ ps.param.connobj_id = (uint8_t)cntl->connector_obj_id.id;
+ ps.param.symclk_10khz = cntl->pixel_clock/10;
+
+
+ if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
+ cntl->action == TRANSMITTER_CONTROL_ACTIAVATE ||
+ cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) {
+ dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
+ "%s:ps.param.symclk_10khz = %d\n",\
+ __func__, ps.param.symclk_10khz);
+ }
+
+
+/*color_depth not used any more, driver has deep color factor in the Phyclk*/
+ if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps))
+ result = BP_RESULT_OK;
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** SET PIXEL CLOCK
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
+static void init_set_pixel_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) {
+ case 7:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+ break;
+ default:
+ bp->cmd_tbl.set_pixel_clock = NULL;
+ break;
+ }
+}
+
+
+
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct set_pixel_clock_parameter_v1_7 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(bp_params->
+ controller_id, &controller_id)) {
+ /* Note: VBIOS still wants to use ucCRTC name which is now
+ * 1 byte in ULONG
+ *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+ *{
+ * target the pixel clock to drive the CRTC timing.
+ * ULONG ulPixelClock:24;
+ * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+ * previous version.
+ * ATOM_CRTC1~6, indicate the CRTC controller to
+ * ULONG ucCRTC:8;
+ * drive the pixel clock. not used for DCPLL case.
+ *}CRTC_PIXEL_CLOCK_FREQ;
+ *union
+ *{
+ * pixel clock and CRTC id frequency
+ * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+ * ULONG ulDispEngClkFreq; dispclk frequency
+ *};
+ */
+ clk.crtc_id = controller_id;
+ clk.pll_id = (uint8_t) pll_id;
+ clk.encoderobjid =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+
+ clk.encoder_mode = (uint8_t) bp->
+ cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock *
+ 10);
+
+ clk.deep_color_ratio =
+ (uint8_t) bp->cmd_helper->
+ transmitter_color_depth_to_atom(
+ bp_params->color_depth);
+ dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
+ "%s:program display clock = %d"\
+ "colorDepth = %d\n", __func__,\
+ bp_params->target_pixel_clock, bp_params->color_depth);
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
+
+ if (bp_params->flags.SUPPORT_YUV_420)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
+
+ if (bp_params->flags.SET_XTALIN_REF_SRC)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
+
+ if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
+
+ if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+
+ if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk))
+ result = BP_RESULT_OK;
+ }
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** SET CRTC TIMING
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+
+static void init_set_crtc_timing(struct bios_parser *bp)
+{
+ uint32_t dtd_version =
+ BIOS_CMD_TABLE_PARA_REVISION(setcrtc_usingdtdtiming);
+
+ switch (dtd_version) {
+ case 3:
+ bp->cmd_tbl.set_crtc_timing =
+ set_crtc_using_dtd_timing_v3;
+ break;
+ default:
+ bp->cmd_tbl.set_crtc_timing = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct set_crtc_using_dtd_timing_parameters params = {0};
+ uint8_t atom_controller_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.crtc_id = atom_controller_id;
+
+ /* bios usH_Size wants h addressable size */
+ params.h_size = cpu_to_le16((uint16_t)bp_params->h_addressable);
+ /* bios usH_Blanking_Time wants borders included in blanking */
+ params.h_blanking_time =
+ cpu_to_le16((uint16_t)(bp_params->h_total -
+ bp_params->h_addressable));
+ /* bios usV_Size wants v addressable size */
+ params.v_size = cpu_to_le16((uint16_t)bp_params->v_addressable);
+ /* bios usV_Blanking_Time wants borders included in blanking */
+ params.v_blanking_time =
+ cpu_to_le16((uint16_t)(bp_params->v_total -
+ bp_params->v_addressable));
+ /* bios usHSyncOffset is the offset from the end of h addressable,
+ * our horizontalSyncStart is the offset from the beginning
+ * of h addressable
+ */
+ params.h_syncoffset =
+ cpu_to_le16((uint16_t)(bp_params->h_sync_start -
+ bp_params->h_addressable));
+ params.h_syncwidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
+ /* bios usHSyncOffset is the offset from the end of v addressable,
+ * our verticalSyncStart is the offset from the beginning of
+ * v addressable
+ */
+ params.v_syncoffset =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start -
+ bp_params->v_addressable));
+ params.v_syncwidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
+
+ /* we assume that overscan from original timing does not get bigger
+ * than 255
+ * we will program all the borders in the Set CRTC Overscan call below
+ */
+
+ if (bp_params->flags.HSYNC_POSITIVE_POLARITY == 0)
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ ATOM_HSYNC_POLARITY);
+
+ if (bp_params->flags.VSYNC_POSITIVE_POLARITY == 0)
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ ATOM_VSYNC_POLARITY);
+
+ if (bp_params->flags.INTERLACE) {
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ ATOM_INTERLACE);
+
+ /* original DAL code has this condition to apply this
+ * for non-TV/CV only
+ * due to complex MV testing for possible impact
+ * if ( pACParameters->signal != SignalType_YPbPr &&
+ * pACParameters->signal != SignalType_Composite &&
+ * pACParameters->signal != SignalType_SVideo)
+ */
+ {
+ /* HW will deduct 0.5 line from 2nd feild.
+ * i.e. for 1080i, it is 2 lines for 1st field,
+ * 2.5 lines for the 2nd feild. we need input as 5
+ * instead of 4.
+ * but it is 4 either from Edid data (spec CEA 861)
+ * or CEA timing table.
+ */
+ params.v_syncoffset =
+ cpu_to_le16(le16_to_cpu(params.v_syncoffset) +
+ 1);
+
+ }
+ }
+
+ if (bp_params->flags.HORZ_COUNT_BY_TWO)
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ 0x100); /* ATOM_DOUBLE_CLOCK_MODE */
+
+ if (EXEC_BIOS_CMD_TABLE(setcrtc_usingdtdtiming, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** SELECT CRTC SOURCE
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source)) {
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ bool result = BP_RESULT_FAILURE;
+ struct select_crtc_source_parameters_v2_3 params;
+ uint8_t atom_controller_id;
+ uint32_t atom_engine_id;
+ enum signal_type s = bp_params->signal;
+
+ memset(&params, 0, sizeof(params));
+
+ if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+ &atom_controller_id))
+ params.crtc_id = atom_controller_id;
+ else
+ return result;
+
+ if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
+ &atom_engine_id))
+ params.encoder_id = (uint8_t)atom_engine_id;
+ else
+ return result;
+
+ if (s == SIGNAL_TYPE_EDP ||
+ (s == SIGNAL_TYPE_DISPLAY_PORT && bp_params->sink_signal ==
+ SIGNAL_TYPE_LVDS))
+ s = SIGNAL_TYPE_LVDS;
+
+ params.encode_mode =
+ bp->cmd_helper->encoder_mode_bp_to_atom(
+ s, bp_params->enable_dp_audio);
+ /* Needed for VBIOS Random Spatial Dithering feature */
+ params.dst_bpc = (uint8_t)(bp_params->display_output_bit_depth);
+
+ if (EXEC_BIOS_CMD_TABLE(selectcrtc_source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** ENABLE CRTC
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+
+static void init_enable_crtc(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(enablecrtc)) {
+ case 1:
+ bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+ break;
+ default:
+ bp->cmd_tbl.enable_crtc = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable)
+{
+ bool result = BP_RESULT_FAILURE;
+ struct enable_crtc_parameters params = {0};
+ uint8_t id;
+
+ if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
+ params.crtc_id = id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ if (enable)
+ params.enable = ATOM_ENABLE;
+ else
+ params.enable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(enablecrtc, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** DISPLAY PLL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** EXTERNAL ENCODER CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+
+static void init_external_encoder_control(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(externalencodercontrol)) {
+ case 3:
+ bp->cmd_tbl.external_encoder_control =
+ external_encoder_control_v3;
+ break;
+ default:
+ bp->cmd_tbl.external_encoder_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl)
+{
+ /* TODO */
+ return BP_RESULT_OK;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** ENABLE DISPLAY POWER GATING
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+
+static void init_enable_disp_power_gating(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)) {
+ case 1:
+ bp->cmd_tbl.enable_disp_power_gating =
+ enable_disp_power_gating_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.enable_disp_power_gating = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+
+ struct enable_disp_power_gating_ps_allocation ps = { { 0 } };
+ uint8_t atom_crtc_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
+ ps.param.disp_pipe_id = atom_crtc_id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ ps.param.enable =
+ bp->cmd_helper->disp_power_gating_action_to_atom(action);
+
+ if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+*******************************************************************************
+ **
+ ** SET DCE CLOCK
+ **
+*******************************************************************************
+*******************************************************************************/
+
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+
+static void init_set_dce_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(setdceclock)) {
+ case 1:
+ bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.set_dce_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ struct set_dce_clock_ps_allocation_v2_1 params;
+ uint32_t atom_pll_id;
+ uint32_t atom_clock_type;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
+ !cmd->dc_clock_type_to_atom(bp_params->clock_type,
+ &atom_clock_type))
+ return BP_RESULT_BADINPUT;
+
+ params.param.dceclksrc = atom_pll_id;
+ params.param.dceclktype = atom_clock_type;
+
+ if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
+ if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
+
+ if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
+
+ if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
+
+ if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
+ } else
+ /* only program clock frequency if display clock is used;
+ * VBIOS will program DPREFCLK
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.param.dceclk_10khz = cpu_to_le32(
+ bp_params->target_clock_frequency / 10);
+ dm_logger_write(bp->base.ctx->logger, LOG_BIOS,
+ "%s:target_clock_frequency = %d"\
+ "clock_type = %d \n", __func__,\
+ bp_params->target_clock_frequency,\
+ bp_params->clock_type);
+
+ if (EXEC_BIOS_CMD_TABLE(setdceclock, params)) {
+ /* Convert from 10KHz units back to KHz */
+ bp_params->target_clock_frequency = le32_to_cpu(
+ params.param.dceclk_10khz) * 10;
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** GET SMU CLOCK INFO
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp);
+
+static void init_get_smu_clock_info(struct bios_parser *bp)
+{
+ /* TODO add switch for table vrsion */
+ bp->cmd_tbl.get_smu_clock_info = get_smu_clock_info_v3_1;
+
+}
+
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp)
+{
+ struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
+
+ smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
+
+ /* Get Specific Clock */
+ if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
+ memmove(&smu_output, &smu_input, sizeof(
+ struct atom_get_smu_clock_info_parameters_v3_1));
+ return smu_output.atom_smu_outputclkfreq.syspllvcofreq_10khz;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
new file mode 100644
index 000000000000..59061b806df5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE2_H__
+#define __DAL_COMMAND_TABLE2_H__
+
+struct bios_parser;
+struct bp_encoder_control;
+
+struct cmd_tbl {
+ enum bp_result (*dig_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig1)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig2)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*transmitter_control)(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *control);
+ enum bp_result (*set_pixel_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+ enum bp_result (*adjust_display_pll)(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*dac1_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac2_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac1_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*dac2_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*set_crtc_timing)(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+ enum bp_result (*enable_crtc)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*enable_crtc_mem_req)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*program_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*external_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+ enum bp_result (*enable_disp_power_gating)(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+ enum bp_result (*set_dce_clock)(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+ unsigned int (*get_smu_clock_info)(
+ struct bios_parser *bp);
+
+};
+
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
new file mode 100644
index 000000000000..2979358c6a55
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "command_table_helper.h"
+
+bool dal_bios_parser_init_cmd_tbl_helper(
+ const struct command_table_helper **h,
+ enum dce_version dce)
+{
+ switch (dce) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ *h = dal_cmd_tbl_helper_dce80_get_table();
+ return true;
+
+ case DCE_VERSION_10_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_2:
+ *h = dal_cmd_tbl_helper_dce112_get_table();
+ return true;
+
+ default:
+ /* Unsupported DCE */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/* real implementations */
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+ enum controller_id id,
+ uint8_t *atom_id)
+{
+ if (atom_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CONTROLLER_ID_D0:
+ *atom_id = ATOM_CRTC1;
+ return true;
+ case CONTROLLER_ID_D1:
+ *atom_id = ATOM_CRTC2;
+ return true;
+ case CONTROLLER_ID_D2:
+ *atom_id = ATOM_CRTC3;
+ return true;
+ case CONTROLLER_ID_D3:
+ *atom_id = ATOM_CRTC4;
+ return true;
+ case CONTROLLER_ID_D4:
+ *atom_id = ATOM_CRTC5;
+ return true;
+ case CONTROLLER_ID_D5:
+ *atom_id = ATOM_CRTC6;
+ return true;
+ case CONTROLLER_ID_UNDERLAY0:
+ *atom_id = ATOM_UNDERLAY_PIPE0;
+ return true;
+ case CONTROLLER_ID_UNDEFINED:
+ *atom_id = ATOM_CRTC_INVALID;
+ return true;
+ default:
+ /* Wrong controller id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/**
+* translate_transmitter_bp_to_atom
+*
+* @brief
+* Translate the Transmitter to the corresponding ATOM BIOS value
+*
+* @param
+* input transmitter
+* output digitalTransmitter
+* // =00: Digital Transmitter1 ( UNIPHY linkAB )
+* // =01: Digital Transmitter2 ( UNIPHY linkCD )
+* // =02: Digital Transmitter3 ( UNIPHY linkEF )
+*/
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+ enum transmitter t)
+{
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_TRAVIS_LCD:
+ return 0;
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ return 1;
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ return 2;
+ default:
+ /* Invalid Transmitter Type! */
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+}
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+ enum signal_type s,
+ bool enable_dp_audio)
+{
+ switch (s) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return ATOM_ENCODER_MODE_DVI;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return ATOM_ENCODER_MODE_HDMI;
+ case SIGNAL_TYPE_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_VIRTUAL:
+ if (enable_dp_audio)
+ return ATOM_ENCODER_MODE_DP_AUDIO;
+ else
+ return ATOM_ENCODER_MODE_DP;
+ case SIGNAL_TYPE_RGB:
+ return ATOM_ENCODER_MODE_CRT;
+ default:
+ return ATOM_ENCODER_MODE_CRT;
+ }
+}
+
+void dal_cmd_table_helper_assign_control_parameter(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param)
+{
+ /* there are three transmitter blocks, each one has two links 4-lanes
+ * each, A+B, C+D, E+F, Uniphy A, C and E are enumerated as link 0 in
+ * each transmitter block B, D and F as link 1, third transmitter block
+ * has non splitable links (UniphyE and UniphyF can not be configured
+ * separately to drive two different streams)
+ */
+ if ((control->transmitter == TRANSMITTER_UNIPHY_B) ||
+ (control->transmitter == TRANSMITTER_UNIPHY_D) ||
+ (control->transmitter == TRANSMITTER_UNIPHY_F)) {
+ /* Bit2: Link Select
+ * =0: PHY linkA/C/E
+ * =1: PHY linkB/D/F
+ */
+ ctrl_param->acConfig.ucLinkSel = 1;
+ }
+
+ /* Bit[4:3]: Transmitter Selection
+ * =00: Digital Transmitter1 ( UNIPHY linkAB )
+ * =01: Digital Transmitter2 ( UNIPHY linkCD )
+ * =02: Digital Transmitter3 ( UNIPHY linkEF )
+ * =03: Reserved
+ */
+ ctrl_param->acConfig.ucTransmitterSel =
+ (uint8_t)(h->transmitter_bp_to_atom(control->transmitter));
+
+ /* We need to convert from KHz units into 10KHz units */
+ ctrl_param->ucAction = h->encoder_action_to_atom(control->action);
+ ctrl_param->usPixelClock = cpu_to_le16((uint16_t)(control->pixel_clock / 10));
+ ctrl_param->ucEncoderMode =
+ (uint8_t)(h->encoder_mode_bp_to_atom(
+ control->signal, control->enable_dp_audio));
+ ctrl_param->ucLaneNum = (uint8_t)(control->lanes_number);
+}
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id)
+{
+ if (ref_clk_src_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL1:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
+ return true;
+ case CLOCK_SOURCE_ID_PLL2:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
+ return true;
+ case CLOCK_SOURCE_ID_DCPLL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
+ return true;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
+ return true;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
+ return true;
+ default:
+ /* Unsupported clock source id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+ enum encoder_id id)
+{
+ switch (id) {
+ case ENCODER_ID_INTERNAL_LVDS:
+ return ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ case ENCODER_ID_INTERNAL_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ case ENCODER_ID_INTERNAL_TMDS2:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
+ case ENCODER_ID_INTERNAL_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ case ENCODER_ID_INTERNAL_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ case ENCODER_ID_INTERNAL_LVTM1:
+ return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ case ENCODER_ID_INTERNAL_HDMI:
+ return ENCODER_OBJECT_ID_HDMI_INTERNAL;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return ENCODER_OBJECT_ID_TRAVIS;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ return ENCODER_OBJECT_ID_NUTMEG;
+ case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+ return ENCODER_OBJECT_ID_MVPU_FPGA;
+ case ENCODER_ID_INTERNAL_DDI:
+ return ENCODER_OBJECT_ID_INTERNAL_DDI;
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
+ case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
+ case ENCODER_ID_INTERNAL_WIRELESS:
+ return ENCODER_OBJECT_ID_INTERNAL_VCE;
+ case ENCODER_ID_UNKNOWN:
+ return ENCODER_OBJECT_ID_NONE;
+ default:
+ /* Invalid encoder id */
+ BREAK_TO_DEBUGGER();
+ return ENCODER_OBJECT_ID_NONE;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
new file mode 100644
index 000000000000..1fab634b66be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_H__
+#define __DAL_COMMAND_TABLE_HELPER_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper_dce112.h"
+
+struct command_table_helper {
+ bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+ uint8_t (*encoder_action_to_atom)(
+ enum bp_encoder_control_action action);
+ uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+ bool enable_dp_audio);
+ bool (*engine_bp_to_atom)(enum engine_id engine_id,
+ uint32_t *atom_engine_id);
+ void (*assign_control_parameter)(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+ bool (*clock_source_id_to_atom)(enum clock_source_id id,
+ uint32_t *atom_pll_id);
+ bool (*clock_source_id_to_ref_clk_src)(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+ uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+ uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+ uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+ enum clock_source_id id);
+ uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+ uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+ uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+ uint8_t (*phy_id_to_atom)(enum transmitter t);
+ uint8_t (*disp_power_gating_action_to_atom)(
+ enum bp_pipe_control_action action);
+ bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type);
+ uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
+};
+
+bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h,
+ enum dce_version dce);
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+ enum controller_id id,
+ uint8_t *atom_id);
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+ enum signal_type s,
+ bool enable_dp_audio);
+
+void dal_cmd_table_helper_assign_control_parameter(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+ enum transmitter t);
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+ enum encoder_id id);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
new file mode 100644
index 000000000000..9a4d30dd4969
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "include/bios_parser_types.h"
+
+#include "command_table_helper2.h"
+
+bool dal_bios_parser_init_cmd_tbl_helper2(
+ const struct command_table_helper **h,
+ enum dce_version dce)
+{
+ switch (dce) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ *h = dal_cmd_tbl_helper_dce80_get_table();
+ return true;
+
+ case DCE_VERSION_10_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_2:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+#endif
+
+ case DCE_VERSION_12_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+
+ default:
+ /* Unsupported DCE */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/* real implementations */
+
+bool dal_cmd_table_helper_controller_id_to_atom2(
+ enum controller_id id,
+ uint8_t *atom_id)
+{
+ if (atom_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CONTROLLER_ID_D0:
+ *atom_id = ATOM_CRTC1;
+ return true;
+ case CONTROLLER_ID_D1:
+ *atom_id = ATOM_CRTC2;
+ return true;
+ case CONTROLLER_ID_D2:
+ *atom_id = ATOM_CRTC3;
+ return true;
+ case CONTROLLER_ID_D3:
+ *atom_id = ATOM_CRTC4;
+ return true;
+ case CONTROLLER_ID_D4:
+ *atom_id = ATOM_CRTC5;
+ return true;
+ case CONTROLLER_ID_D5:
+ *atom_id = ATOM_CRTC6;
+ return true;
+ /* TODO :case CONTROLLER_ID_UNDERLAY0:
+ *atom_id = ATOM_UNDERLAY_PIPE0;
+ return true;
+ */
+ case CONTROLLER_ID_UNDEFINED:
+ *atom_id = ATOM_CRTC_INVALID;
+ return true;
+ default:
+ /* Wrong controller id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/**
+* translate_transmitter_bp_to_atom
+*
+* @brief
+* Translate the Transmitter to the corresponding ATOM BIOS value
+*
+* @param
+* input transmitter
+* output digitalTransmitter
+* // =00: Digital Transmitter1 ( UNIPHY linkAB )
+* // =01: Digital Transmitter2 ( UNIPHY linkCD )
+* // =02: Digital Transmitter3 ( UNIPHY linkEF )
+*/
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
+ enum transmitter t)
+{
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_TRAVIS_LCD:
+ return 0;
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ return 1;
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ return 2;
+ default:
+ /* Invalid Transmitter Type! */
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+}
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom2(
+ enum signal_type s,
+ bool enable_dp_audio)
+{
+ switch (s) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return ATOM_ENCODER_MODE_DVI;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return ATOM_ENCODER_MODE_HDMI;
+ case SIGNAL_TYPE_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_VIRTUAL:
+ if (enable_dp_audio)
+ return ATOM_ENCODER_MODE_DP_AUDIO;
+ else
+ return ATOM_ENCODER_MODE_DP;
+ case SIGNAL_TYPE_RGB:
+ return ATOM_ENCODER_MODE_CRT;
+ default:
+ return ATOM_ENCODER_MODE_CRT;
+ }
+}
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src2(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id)
+{
+ if (ref_clk_src_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL1:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
+ return true;
+ case CLOCK_SOURCE_ID_PLL2:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
+ return true;
+ /*TODO:case CLOCK_SOURCE_ID_DCPLL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
+ return true;
+ */
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
+ return true;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
+ return true;
+ default:
+ /* Unsupported clock source id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom2(
+ enum encoder_id id)
+{
+ switch (id) {
+ case ENCODER_ID_INTERNAL_LVDS:
+ return ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ case ENCODER_ID_INTERNAL_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ case ENCODER_ID_INTERNAL_TMDS2:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
+ case ENCODER_ID_INTERNAL_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ case ENCODER_ID_INTERNAL_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ case ENCODER_ID_INTERNAL_LVTM1:
+ return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ case ENCODER_ID_INTERNAL_HDMI:
+ return ENCODER_OBJECT_ID_HDMI_INTERNAL;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return ENCODER_OBJECT_ID_TRAVIS;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ return ENCODER_OBJECT_ID_NUTMEG;
+ case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+ return ENCODER_OBJECT_ID_MVPU_FPGA;
+ case ENCODER_ID_INTERNAL_DDI:
+ return ENCODER_OBJECT_ID_INTERNAL_DDI;
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
+ case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
+ case ENCODER_ID_INTERNAL_WIRELESS:
+ return ENCODER_OBJECT_ID_INTERNAL_VCE;
+ case ENCODER_ID_INTERNAL_VIRTUAL:
+ return ENCODER_OBJECT_ID_NONE;
+ case ENCODER_ID_UNKNOWN:
+ return ENCODER_OBJECT_ID_NONE;
+ default:
+ /* Invalid encoder id */
+ BREAK_TO_DEBUGGER();
+ return ENCODER_OBJECT_ID_NONE;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
new file mode 100644
index 000000000000..9f587c91d843
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER2_H__
+#define __DAL_COMMAND_TABLE_HELPER2_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper2_dce112.h"
+
+struct command_table_helper {
+ bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+ uint8_t (*encoder_action_to_atom)(
+ enum bp_encoder_control_action action);
+ uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+ bool enable_dp_audio);
+ bool (*engine_bp_to_atom)(enum engine_id engine_id,
+ uint32_t *atom_engine_id);
+ bool (*clock_source_id_to_atom)(enum clock_source_id id,
+ uint32_t *atom_pll_id);
+ bool (*clock_source_id_to_ref_clk_src)(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+ uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+ uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+ uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+ enum clock_source_id id);
+ uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+ uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+ uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+ uint8_t (*phy_id_to_atom)(enum transmitter t);
+ uint8_t (*disp_power_gating_action_to_atom)(
+ enum bp_pipe_control_action action);
+ bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type);
+ uint8_t (*transmitter_color_depth_to_atom)(
+ enum transmitter_color_depth id);
+};
+
+bool dal_bios_parser_init_cmd_tbl_helper2(const struct command_table_helper **h,
+ enum dce_version dce);
+
+bool dal_cmd_table_helper_controller_id_to_atom2(
+ enum controller_id id,
+ uint8_t *atom_id);
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom2(
+ enum signal_type s,
+ bool enable_dp_audio);
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src2(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
+ enum transmitter t);
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom2(
+ enum encoder_id id);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
new file mode 100644
index 000000000000..ca24154468c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+ break;
+ case ENGINE_ID_UNKNOWN:
+ /* No DIG_FRONT is associated to DIG_BACKEND */
+ atom_dig_encoder_sel = 0;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ }
+
+ return 0;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ *atom_pll_id = ATOM_PPLL1;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ *atom_pll_id = ATOM_PPLL2;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_EXT_PLL1;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ /* for VCE encoding,
+ * we need to pass in ATOM_PPLL_INVALID
+ */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ /* When programming DP DTO PLL ID should be invalid */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ /* Should not happen */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter = NULL,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
new file mode 100644
index 000000000000..eb60c2ead992
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE110_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE110_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
new file mode 100644
index 000000000000..0237ae575068
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper2.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
+ break;
+ case ENGINE_ID_UNKNOWN:
+ /* No DIG_FRONT is associated to DIG_BACKEND */
+ atom_dig_encoder_sel = 0;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ }
+
+ return 0;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_COMBO_PHY_PLL0:
+ *atom_pll_id = ATOM_COMBOPHY_PLL0;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL1:
+ *atom_pll_id = ATOM_COMBOPHY_PLL1;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL2:
+ *atom_pll_id = ATOM_COMBOPHY_PLL2;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL3:
+ *atom_pll_id = ATOM_COMBOPHY_PLL3;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL4:
+ *atom_pll_id = ATOM_COMBOPHY_PLL4;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL5:
+ *atom_pll_id = ATOM_COMBOPHY_PLL5;
+ break;
+ case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_GCK_DFS;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ /* Should not happen */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static bool dc_clock_type_to_atom(
+ enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type)
+{
+ bool retCode = true;
+
+ if (atom_clock_type != NULL) {
+ switch (id) {
+ case DCECLOCK_TYPE_DISPLAY_CLOCK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
+ break;
+
+ case DCECLOCK_TYPE_DPREFCLK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
+ break;
+
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+ }
+
+ return retCode;
+}
+
+static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
+{
+ uint8_t atomColorDepth = 0;
+
+ switch (id) {
+ case TRANSMITTER_COLOR_DEPTH_24:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_30:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_48:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atomColorDepth;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom2,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom2,
+ .encoder_mode_bp_to_atom =
+ dal_cmd_table_helper_encoder_mode_bp_to_atom2,
+ .dc_clock_type_to_atom = dc_clock_type_to_atom,
+ .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
new file mode 100644
index 000000000000..abf28a06f5bc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER2_DCE112_H__
+#define __DAL_COMMAND_TABLE_HELPER2_DCE112_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
new file mode 100644
index 000000000000..452034f83e4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
+ break;
+ case ENGINE_ID_UNKNOWN:
+ /* No DIG_FRONT is associated to DIG_BACKEND */
+ atom_dig_encoder_sel = 0;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ }
+
+ return 0;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_COMBO_PHY_PLL0:
+ *atom_pll_id = ATOM_COMBOPHY_PLL0;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL1:
+ *atom_pll_id = ATOM_COMBOPHY_PLL1;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL2:
+ *atom_pll_id = ATOM_COMBOPHY_PLL2;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL3:
+ *atom_pll_id = ATOM_COMBOPHY_PLL3;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL4:
+ *atom_pll_id = ATOM_COMBOPHY_PLL4;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL5:
+ *atom_pll_id = ATOM_COMBOPHY_PLL5;
+ break;
+ case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_GCK_DFS;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ /* Should not happen */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static bool dc_clock_type_to_atom(
+ enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type)
+{
+ bool retCode = true;
+
+ if (atom_clock_type != NULL) {
+ switch (id) {
+ case DCECLOCK_TYPE_DISPLAY_CLOCK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
+ break;
+
+ case DCECLOCK_TYPE_DPREFCLK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
+ break;
+
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+ }
+
+ return retCode;
+}
+
+static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
+{
+ uint8_t atomColorDepth = 0;
+
+ switch (id) {
+ case TRANSMITTER_COLOR_DEPTH_24:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_30:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_48:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atomColorDepth;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter = NULL,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+ .dc_clock_type_to_atom = dc_clock_type_to_atom,
+ .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
new file mode 100644
index 000000000000..dc3660951355
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
new file mode 100644
index 000000000000..8b30b558cf1f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/grph_object_id.h"
+#include "include/grph_object_defs.h"
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ *atom_pll_id = ATOM_PPLL1;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ *atom_pll_id = ATOM_PPLL2;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_EXT_PLL1;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ /* for VCE encoding,
+ * we need to pass in ATOM_PPLL_INVALID
+ */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ /* When programming DP DTO PLL ID should be invalid */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ BREAK_TO_DEBUGGER(); /* check when this will happen! */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ }
+
+ return atom_dig_encoder_sel;
+}
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter =
+ dal_cmd_table_helper_assign_control_parameter,
+ .clock_source_id_to_ref_clk_src =
+ dal_cmd_table_helper_clock_source_id_to_ref_clk_src,
+ .transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom =
+ dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
new file mode 100644
index 000000000000..e675c359e306
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+
+struct command_table_helper;
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
new file mode 100644
index 000000000000..41ef35995b02
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the 'calcs' sub-component of DAL.
+# It calculates Bandwidth and Watermarks values for HW programming
+#
+
+CFLAGS_dcn_calcs.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dcn_calc_auto.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dcn_calc_math.o := -mhard-float -msse -mpreferred-stack-boundary=4 -Wno-tautological-compare
+
+BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
+
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
+endif
+
+AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
new file mode 100644
index 000000000000..6ca288fb5fb9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "bw_fixed.h"
+
+
+#define MIN_I64 \
+ (int64_t)(-(1LL << 63))
+
+#define MAX_I64 \
+ (int64_t)((1ULL << 63) - 1)
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+static uint64_t abs_i64(int64_t arg)
+{
+ if (arg >= 0)
+ return (uint64_t)(arg);
+ else
+ return (uint64_t)(-arg);
+}
+
+struct bw_fixed bw_int_to_fixed_nonconst(int64_t value)
+{
+ struct bw_fixed res;
+ ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32);
+ res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
+ return res;
+}
+
+struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
+{
+ struct bw_fixed res;
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+ uint64_t arg1_value;
+ uint64_t arg2_value;
+ uint64_t remainder;
+
+ /* determine integer part */
+ uint64_t res_value;
+
+ ASSERT(denominator != 0);
+
+ arg1_value = abs_i64(numerator);
+ arg2_value = abs_i64(denominator);
+ res_value = div64_u64_rem(arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= BW_FIXED_MAX_I32);
+
+ /* determine fractional part */
+ {
+ uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART;
+
+ do
+ {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value)
+ {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint64_t summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= MAX_I64 - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (int64_t)(res_value);
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+ return res;
+}
+
+struct bw_fixed bw_floor2(
+ const struct bw_fixed arg,
+ const struct bw_fixed significance)
+{
+ struct bw_fixed result;
+ int64_t multiplicand;
+
+ multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+ result.value = abs_i64(significance.value) * multiplicand;
+ ASSERT(abs_i64(result.value) <= abs_i64(arg.value));
+ return result;
+}
+
+struct bw_fixed bw_ceil2(
+ const struct bw_fixed arg,
+ const struct bw_fixed significance)
+{
+ struct bw_fixed result;
+ int64_t multiplicand;
+
+ multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+ result.value = abs_i64(significance.value) * multiplicand;
+ if (abs_i64(result.value) < abs_i64(arg.value)) {
+ if (arg.value < 0)
+ result.value -= abs_i64(significance.value);
+ else
+ result.value += abs_i64(significance.value);
+ }
+ return result;
+}
+
+struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ uint64_t arg1_value = abs_i64(arg1.value);
+ uint64_t arg2_value = abs_i64(arg2.value);
+
+ uint64_t arg1_int = BW_FIXED_GET_INTEGER_PART(arg1_value);
+ uint64_t arg2_int = BW_FIXED_GET_INTEGER_PART(arg2_value);
+
+ uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ uint64_t tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= BW_FIXED_MAX_I32);
+
+ res.value <<= BW_FIXED_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> BW_FIXED_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value));
+
+ ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+ return res;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
new file mode 100644
index 000000000000..7243c37f569e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "custom_float.h"
+
+
+static bool build_custom_float(
+ struct fixed31_32 value,
+ const struct custom_float_format *format,
+ bool *negative,
+ uint32_t *mantissa,
+ uint32_t *exponenta)
+{
+ uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
+
+ const struct fixed31_32 mantissa_constant_plus_max_fraction =
+ dal_fixed31_32_from_fraction(
+ (1LL << (format->mantissa_bits + 1)) - 1,
+ 1LL << format->mantissa_bits);
+
+ struct fixed31_32 mantiss;
+
+ if (dal_fixed31_32_eq(
+ value,
+ dal_fixed31_32_zero)) {
+ *negative = false;
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ if (dal_fixed31_32_lt(
+ value,
+ dal_fixed31_32_zero)) {
+ *negative = format->sign;
+ value = dal_fixed31_32_neg(value);
+ } else {
+ *negative = false;
+ }
+
+ if (dal_fixed31_32_lt(
+ value,
+ dal_fixed31_32_one)) {
+ uint32_t i = 1;
+
+ do {
+ value = dal_fixed31_32_shl(value, 1);
+ ++i;
+ } while (dal_fixed31_32_lt(
+ value,
+ dal_fixed31_32_one));
+
+ --i;
+
+ if (exp_offset <= i) {
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ *exponenta = exp_offset - i;
+ } else if (dal_fixed31_32_le(
+ mantissa_constant_plus_max_fraction,
+ value)) {
+ uint32_t i = 1;
+
+ do {
+ value = dal_fixed31_32_shr(value, 1);
+ ++i;
+ } while (dal_fixed31_32_lt(
+ mantissa_constant_plus_max_fraction,
+ value));
+
+ *exponenta = exp_offset + i - 1;
+ } else {
+ *exponenta = exp_offset;
+ }
+
+ mantiss = dal_fixed31_32_sub(
+ value,
+ dal_fixed31_32_one);
+
+ if (dal_fixed31_32_lt(
+ mantiss,
+ dal_fixed31_32_zero) ||
+ dal_fixed31_32_lt(
+ dal_fixed31_32_one,
+ mantiss))
+ mantiss = dal_fixed31_32_zero;
+ else
+ mantiss = dal_fixed31_32_shl(
+ mantiss,
+ format->mantissa_bits);
+
+ *mantissa = dal_fixed31_32_floor(mantiss);
+
+ return true;
+}
+
+static bool setup_custom_float(
+ const struct custom_float_format *format,
+ bool negative,
+ uint32_t mantissa,
+ uint32_t exponenta,
+ uint32_t *result)
+{
+ uint32_t i = 0;
+ uint32_t j = 0;
+
+ uint32_t value = 0;
+
+ /* verification code:
+ * once calculation is ok we can remove it
+ */
+
+ const uint32_t mantissa_mask =
+ (1 << (format->mantissa_bits + 1)) - 1;
+
+ const uint32_t exponenta_mask =
+ (1 << (format->exponenta_bits + 1)) - 1;
+
+ if (mantissa & ~mantissa_mask) {
+ BREAK_TO_DEBUGGER();
+ mantissa = mantissa_mask;
+ }
+
+ if (exponenta & ~exponenta_mask) {
+ BREAK_TO_DEBUGGER();
+ exponenta = exponenta_mask;
+ }
+
+ /* end of verification code */
+
+ while (i < format->mantissa_bits) {
+ uint32_t mask = 1 << i;
+
+ if (mantissa & mask)
+ value |= mask;
+
+ ++i;
+ }
+
+ while (j < format->exponenta_bits) {
+ uint32_t mask = 1 << j;
+
+ if (exponenta & mask)
+ value |= mask << i;
+
+ ++j;
+ }
+
+ if (negative && format->sign)
+ value |= 1 << (i + j);
+
+ *result = value;
+
+ return true;
+}
+
+bool convert_to_custom_float_format(
+ struct fixed31_32 value,
+ const struct custom_float_format *format,
+ uint32_t *result)
+{
+ uint32_t mantissa;
+ uint32_t exponenta;
+ bool negative;
+
+ return build_custom_float(
+ value, format, &negative, &mantissa, &exponenta) &&
+ setup_custom_float(
+ format, negative, mantissa, exponenta, result);
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
new file mode 100644
index 000000000000..6347712db834
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -0,0 +1,3257 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+/*******************************************************************************
+ * Private Functions
+ ******************************************************************************/
+
+static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asic_id)
+{
+ switch (asic_id.chip_family) {
+
+ case FAMILY_CZ:
+ if (ASIC_REV_IS_STONEY(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_STONEY;
+ return BW_CALCS_VERSION_CARRIZO;
+
+ case FAMILY_VI:
+ if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS10;
+ if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS11;
+ return BW_CALCS_VERSION_INVALID;
+
+ case FAMILY_AI:
+ return BW_CALCS_VERSION_VEGA10;
+
+ default:
+ return BW_CALCS_VERSION_INVALID;
+ }
+}
+
+static void calculate_bandwidth(
+ const struct bw_calcs_dceip *dceip,
+ const struct bw_calcs_vbios *vbios,
+ struct bw_calcs_data *data)
+
+{
+ const int32_t pixels_per_chunk = 512;
+ const int32_t high = 2;
+ const int32_t mid = 1;
+ const int32_t low = 0;
+ const uint32_t s_low = 0;
+ const uint32_t s_mid1 = 1;
+ const uint32_t s_mid2 = 2;
+ const uint32_t s_mid3 = 3;
+ const uint32_t s_mid4 = 4;
+ const uint32_t s_mid5 = 5;
+ const uint32_t s_mid6 = 6;
+ const uint32_t s_high = 7;
+ const uint32_t bus_efficiency = 1;
+ const uint32_t dmif_chunk_buff_margin = 1;
+
+ uint32_t max_chunks_fbc_mode;
+ int32_t num_cursor_lines;
+
+ int32_t i, j, k;
+ struct bw_fixed yclk[3];
+ struct bw_fixed sclk[8];
+ bool d0_underlay_enable;
+ bool d1_underlay_enable;
+ bool fbc_enabled;
+ bool lpt_enabled;
+ enum bw_defines sclk_message;
+ enum bw_defines yclk_message;
+ enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
+ enum bw_defines tiling_mode[maximum_number_of_surfaces];
+ enum bw_defines surface_type[maximum_number_of_surfaces];
+ enum bw_defines voltage;
+ enum bw_defines pipe_check;
+ enum bw_defines hsr_check;
+ enum bw_defines vsr_check;
+ enum bw_defines lb_size_check;
+ enum bw_defines fbc_check;
+ enum bw_defines rotation_check;
+ enum bw_defines mode_check;
+ enum bw_defines nbp_state_change_enable_blank;
+ /*initialize variables*/
+ int32_t number_of_displays_enabled = 0;
+ int32_t number_of_displays_enabled_with_margin = 0;
+ int32_t number_of_aligned_displays_with_no_margin = 0;
+
+ yclk[low] = vbios->low_yclk;
+ yclk[mid] = vbios->mid_yclk;
+ yclk[high] = vbios->high_yclk;
+ sclk[s_low] = vbios->low_sclk;
+ sclk[s_mid1] = vbios->mid1_sclk;
+ sclk[s_mid2] = vbios->mid2_sclk;
+ sclk[s_mid3] = vbios->mid3_sclk;
+ sclk[s_mid4] = vbios->mid4_sclk;
+ sclk[s_mid5] = vbios->mid5_sclk;
+ sclk[s_mid6] = vbios->mid6_sclk;
+ sclk[s_high] = vbios->high_sclk;
+ /*''''''''''''''''''*/
+ /* surface assignment:*/
+ /* 0: d0 underlay or underlay luma*/
+ /* 1: d0 underlay chroma*/
+ /* 2: d1 underlay or underlay luma*/
+ /* 3: d1 underlay chroma*/
+ /* 4: d0 graphics*/
+ /* 5: d1 graphics*/
+ /* 6: d2 graphics*/
+ /* 7: d3 graphics, same mode as d2*/
+ /* 8: d4 graphics, same mode as d2*/
+ /* 9: d5 graphics, same mode as d2*/
+ /* ...*/
+ /* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/
+ /* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/
+ /* underlay luma and chroma surface parameters from spreadsheet*/
+
+
+
+
+ if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
+ else {
+ d0_underlay_enable = 1;
+ }
+ if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
+ else {
+ d1_underlay_enable = 1;
+ }
+ data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
+ switch (data->underlay_surface_type) {
+ case bw_def_420:
+ surface_type[0] = bw_def_underlay420_luma;
+ surface_type[2] = bw_def_underlay420_luma;
+ data->bytes_per_pixel[0] = 1;
+ data->bytes_per_pixel[2] = 1;
+ surface_type[1] = bw_def_underlay420_chroma;
+ surface_type[3] = bw_def_underlay420_chroma;
+ data->bytes_per_pixel[1] = 2;
+ data->bytes_per_pixel[3] = 2;
+ data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component;
+ data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component;
+ data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component;
+ data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component;
+ break;
+ case bw_def_422:
+ surface_type[0] = bw_def_underlay422;
+ surface_type[2] = bw_def_underlay422;
+ data->bytes_per_pixel[0] = 2;
+ data->bytes_per_pixel[2] = 2;
+ data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component;
+ data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component;
+ break;
+ default:
+ surface_type[0] = bw_def_underlay444;
+ surface_type[2] = bw_def_underlay444;
+ data->bytes_per_pixel[0] = 4;
+ data->bytes_per_pixel[2] = 4;
+ data->lb_size_per_component[0] = dceip->lb_size_per_component444;
+ data->lb_size_per_component[2] = dceip->lb_size_per_component444;
+ break;
+ }
+ if (d0_underlay_enable) {
+ switch (data->underlay_surface_type) {
+ case bw_def_420:
+ data->enable[0] = 1;
+ data->enable[1] = 1;
+ break;
+ default:
+ data->enable[0] = 1;
+ data->enable[1] = 0;
+ break;
+ }
+ }
+ else {
+ data->enable[0] = 0;
+ data->enable[1] = 0;
+ }
+ if (d1_underlay_enable) {
+ switch (data->underlay_surface_type) {
+ case bw_def_420:
+ data->enable[2] = 1;
+ data->enable[3] = 1;
+ break;
+ default:
+ data->enable[2] = 1;
+ data->enable[3] = 0;
+ break;
+ }
+ }
+ else {
+ data->enable[2] = 0;
+ data->enable[3] = 0;
+ }
+ data->use_alpha[0] = 0;
+ data->use_alpha[1] = 0;
+ data->use_alpha[2] = 0;
+ data->use_alpha[3] = 0;
+ data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable;
+ data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable;
+ data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable;
+ data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable;
+ /*underlay0 same and graphics display pipe0*/
+ data->interlace_mode[0] = data->interlace_mode[4];
+ data->interlace_mode[1] = data->interlace_mode[4];
+ /*underlay1 same and graphics display pipe1*/
+ data->interlace_mode[2] = data->interlace_mode[5];
+ data->interlace_mode[3] = data->interlace_mode[5];
+ /*underlay0 same and graphics display pipe0*/
+ data->h_total[0] = data->h_total[4];
+ data->v_total[0] = data->v_total[4];
+ data->h_total[1] = data->h_total[4];
+ data->v_total[1] = data->v_total[4];
+ /*underlay1 same and graphics display pipe1*/
+ data->h_total[2] = data->h_total[5];
+ data->v_total[2] = data->v_total[5];
+ data->h_total[3] = data->h_total[5];
+ data->v_total[3] = data->v_total[5];
+ /*underlay0 same and graphics display pipe0*/
+ data->pixel_rate[0] = data->pixel_rate[4];
+ data->pixel_rate[1] = data->pixel_rate[4];
+ /*underlay1 same and graphics display pipe1*/
+ data->pixel_rate[2] = data->pixel_rate[5];
+ data->pixel_rate[3] = data->pixel_rate[5];
+ if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) {
+ tiling_mode[0] = bw_def_linear;
+ tiling_mode[1] = bw_def_linear;
+ tiling_mode[2] = bw_def_linear;
+ tiling_mode[3] = bw_def_linear;
+ }
+ else {
+ tiling_mode[0] = bw_def_landscape;
+ tiling_mode[1] = bw_def_landscape;
+ tiling_mode[2] = bw_def_landscape;
+ tiling_mode[3] = bw_def_landscape;
+ }
+ data->lb_bpc[0] = data->underlay_lb_bpc;
+ data->lb_bpc[1] = data->underlay_lb_bpc;
+ data->lb_bpc[2] = data->underlay_lb_bpc;
+ data->lb_bpc[3] = data->underlay_lb_bpc;
+ data->compression_rate[0] = bw_int_to_fixed(1);
+ data->compression_rate[1] = bw_int_to_fixed(1);
+ data->compression_rate[2] = bw_int_to_fixed(1);
+ data->compression_rate[3] = bw_int_to_fixed(1);
+ data->access_one_channel_only[0] = 0;
+ data->access_one_channel_only[1] = 0;
+ data->access_one_channel_only[2] = 0;
+ data->access_one_channel_only[3] = 0;
+ data->cursor_width_pixels[0] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[1] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[2] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[3] = bw_int_to_fixed(0);
+ /* graphics surface parameters from spreadsheet*/
+ fbc_enabled = 0;
+ lpt_enabled = 0;
+ for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
+ if (i < data->number_of_displays + 4) {
+ if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
+ data->enable[i] = 0;
+ data->use_alpha[i] = 0;
+ }
+ else if (i == 4 && data->d0_underlay_mode == bw_def_blend) {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 1;
+ }
+ else if (i == 4) {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 0;
+ }
+ else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) {
+ data->enable[i] = 0;
+ data->use_alpha[i] = 0;
+ }
+ else if (i == 5 && data->d1_underlay_mode == bw_def_blend) {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 1;
+ }
+ else {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 0;
+ }
+ }
+ else {
+ data->enable[i] = 0;
+ data->use_alpha[i] = 0;
+ }
+ data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable;
+ surface_type[i] = bw_def_graphics;
+ data->lb_size_per_component[i] = dceip->lb_size_per_component444;
+ if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) {
+ tiling_mode[i] = bw_def_linear;
+ }
+ else {
+ tiling_mode[i] = bw_def_tiled;
+ }
+ data->lb_bpc[i] = data->graphics_lb_bpc;
+ if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) {
+ data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate);
+ data->access_one_channel_only[i] = data->lpt_en[i];
+ }
+ else {
+ data->compression_rate[i] = bw_int_to_fixed(1);
+ data->access_one_channel_only[i] = 0;
+ }
+ if (data->fbc_en[i] == 1) {
+ fbc_enabled = 1;
+ if (data->lpt_en[i] == 1) {
+ lpt_enabled = 1;
+ }
+ }
+ data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
+ }
+ /* display_write_back420*/
+ data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0;
+ data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0;
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->enable[maximum_number_of_surfaces - 2] = 1;
+ data->enable[maximum_number_of_surfaces - 1] = 1;
+ }
+ else {
+ data->enable[maximum_number_of_surfaces - 2] = 0;
+ data->enable[maximum_number_of_surfaces - 1] = 0;
+ }
+ surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma;
+ surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma;
+ data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component;
+ data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component;
+ data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1;
+ data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2;
+ data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5];
+ data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5];
+ data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+ data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+ tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear;
+ tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear;
+ data->lb_bpc[maximum_number_of_surfaces - 2] = 8;
+ data->lb_bpc[maximum_number_of_surfaces - 1] = 8;
+ data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0;
+ data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0;
+ /*assume display pipe1 has dwb enabled*/
+ data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5];
+ data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5];
+ data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5];
+ data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5];
+ data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5];
+ data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5];
+ data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5];
+ data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5];
+ data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5];
+ data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5];
+ data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5];
+ data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5];
+ data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono;
+ data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono;
+ data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+ data->use_alpha[maximum_number_of_surfaces - 2] = 0;
+ data->use_alpha[maximum_number_of_surfaces - 1] = 0;
+ /*mode check calculations:*/
+ /* mode within dce ip capabilities*/
+ /* fbc*/
+ /* hsr*/
+ /* vsr*/
+ /* lb size*/
+ /*effective scaling source and ratios:*/
+ /*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/
+ /*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/
+ /*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/
+ /*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/
+ /*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/
+ /*in interlace mode there is 2:1 vertical downscaling for each field*/
+ /*in panning or bezel adjustment mode the source width has an extra 128 pixels*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) {
+ data->h_taps[i] = bw_int_to_fixed(1);
+ data->v_taps[i] = bw_int_to_fixed(1);
+ }
+ if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) {
+ data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2));
+ data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2));
+ data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2));
+ data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2));
+ data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2));
+ }
+ else {
+ data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i];
+ data->src_width_after_surface_type = data->src_width[i];
+ data->src_height_after_surface_type = data->src_height[i];
+ data->hsr_after_surface_type = data->h_scale_ratio[i];
+ data->vsr_after_surface_type = data->v_scale_ratio[i];
+ }
+ if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->src_width_after_rotation = data->src_height_after_surface_type;
+ data->src_height_after_rotation = data->src_width_after_surface_type;
+ data->hsr_after_rotation = data->vsr_after_surface_type;
+ data->vsr_after_rotation = data->hsr_after_surface_type;
+ }
+ else {
+ data->src_width_after_rotation = data->src_width_after_surface_type;
+ data->src_height_after_rotation = data->src_height_after_surface_type;
+ data->hsr_after_rotation = data->hsr_after_surface_type;
+ data->vsr_after_rotation = data->vsr_after_surface_type;
+ }
+ switch (data->stereo_mode[i]) {
+ case bw_def_top_bottom:
+ data->source_width_pixels[i] = data->src_width_after_rotation;
+ data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation);
+ data->hsr_after_stereo = data->hsr_after_rotation;
+ data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation);
+ break;
+ case bw_def_side_by_side:
+ data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation);
+ data->source_height_pixels = data->src_height_after_rotation;
+ data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation);
+ data->vsr_after_stereo = data->vsr_after_rotation;
+ break;
+ default:
+ data->source_width_pixels[i] = data->src_width_after_rotation;
+ data->source_height_pixels = data->src_height_after_rotation;
+ data->hsr_after_stereo = data->hsr_after_rotation;
+ data->vsr_after_stereo = data->vsr_after_rotation;
+ break;
+ }
+ data->hsr[i] = data->hsr_after_stereo;
+ if (data->interlace_mode[i]) {
+ data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2));
+ }
+ else {
+ data->vsr[i] = data->vsr_after_stereo;
+ }
+ if (data->panning_and_bezel_adjustment != bw_def_none) {
+ data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256));
+ }
+ else {
+ data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128));
+ }
+ data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels;
+ }
+ }
+ /*mode support checks:*/
+ /*the number of graphics and underlay pipes is limited by the ip support*/
+ /*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/
+ /*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/
+ /*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/
+ /*the number of lines in the line buffer has to exceed the number of vertical taps*/
+ /*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/
+ /*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+ /*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+ /*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/
+ /*rotation is not supported with linear of stereo modes*/
+ if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) {
+ pipe_check = bw_def_ok;
+ }
+ else {
+ pipe_check = bw_def_notok;
+ }
+ hsr_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) {
+ if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) {
+ hsr_check = bw_def_hsr_mtn_4;
+ }
+ else {
+ if (bw_mtn(data->hsr[i], data->h_taps[i])) {
+ hsr_check = bw_def_hsr_mtn_h_taps;
+ }
+ else {
+ if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) {
+ hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr;
+ }
+ }
+ }
+ }
+ }
+ }
+ vsr_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) {
+ if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) {
+ vsr_check = bw_def_vsr_mtn_4;
+ }
+ else {
+ if (bw_mtn(data->vsr[i], data->v_taps[i])) {
+ vsr_check = bw_def_vsr_mtn_v_taps;
+ }
+ }
+ }
+ }
+ }
+ lb_size_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) {
+ data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]);
+ }
+ else {
+ data->source_width_in_lb = data->source_width_pixels[i];
+ }
+ switch (data->lb_bpc[i]) {
+ case 8:
+ data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875ul, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+ break;
+ case 10:
+ data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+ break;
+ default:
+ data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48));
+ break;
+ }
+ data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1));
+ /*clamp the partitions to the maxium number supported by the lb*/
+ if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+ data->lb_partitions_max[i] = bw_int_to_fixed(10);
+ }
+ else {
+ data->lb_partitions_max[i] = bw_int_to_fixed(7);
+ }
+ data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]);
+ if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) {
+ lb_size_check = bw_def_notok;
+ }
+ }
+ }
+ fbc_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) {
+ fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo;
+ }
+ }
+ rotation_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) {
+ rotation_check = bw_def_invalid_linear_or_stereo_mode;
+ }
+ }
+ }
+ if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) {
+ mode_check = bw_def_ok;
+ }
+ else {
+ mode_check = bw_def_notok;
+ }
+ /*number of memory channels for write-back client*/
+ data->number_of_dram_wrchannels = vbios->number_of_dram_channels;
+ data->number_of_dram_channels = vbios->number_of_dram_channels;
+ /*modify number of memory channels if lpt mode is enabled*/
+ /* low power tiling mode register*/
+ /* 0 = use channel 0*/
+ /* 1 = use channel 0 and 1*/
+ /* 2 = use channel 0,1,2,3*/
+ if ((fbc_enabled == 1 && lpt_enabled == 1)) {
+ data->dram_efficiency = bw_int_to_fixed(1);
+ if (dceip->low_power_tiling_mode == 0) {
+ data->number_of_dram_channels = 1;
+ }
+ else if (dceip->low_power_tiling_mode == 1) {
+ data->number_of_dram_channels = 2;
+ }
+ else if (dceip->low_power_tiling_mode == 2) {
+ data->number_of_dram_channels = 4;
+ }
+ else {
+ data->number_of_dram_channels = 1;
+ }
+ }
+ else {
+ data->dram_efficiency = bw_frc_to_fixed(8, 10);
+ }
+ /*memory request size and latency hiding:*/
+ /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
+ /*the display write-back requests are single line*/
+ /*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/
+ /*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) {
+ if ((i < 4)) {
+ /*underlay portrait tiling mode is not supported*/
+ data->orthogonal_rotation[i] = 1;
+ }
+ else {
+ /*graphics portrait tiling mode*/
+ if ((data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling)) {
+ data->orthogonal_rotation[i] = 0;
+ }
+ else {
+ data->orthogonal_rotation[i] = 1;
+ }
+ }
+ }
+ else {
+ if ((i < 4)) {
+ /*underlay landscape tiling mode is only supported*/
+ if ((data->underlay_micro_tile_mode == bw_def_display_micro_tiling)) {
+ data->orthogonal_rotation[i] = 0;
+ }
+ else {
+ data->orthogonal_rotation[i] = 1;
+ }
+ }
+ else {
+ /*graphics landscape tiling mode*/
+ if ((data->graphics_micro_tile_mode == bw_def_display_micro_tiling)) {
+ data->orthogonal_rotation[i] = 0;
+ }
+ else {
+ data->orthogonal_rotation[i] = 1;
+ }
+ }
+ }
+ if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) {
+ data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling;
+ }
+ else {
+ data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling;
+ }
+ if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(1);
+ }
+ else if (tiling_mode[i] == bw_def_linear) {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ }
+ else {
+ if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) {
+ switch (data->bytes_per_pixel[i]) {
+ case 8:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ if (data->orthogonal_rotation[i]) {
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+ }
+ else {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ }
+ break;
+ case 4:
+ if (data->orthogonal_rotation[i]) {
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+ }
+ else {
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ }
+ break;
+ case 2:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+ break;
+ default:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+ break;
+ }
+ }
+ else {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ if (data->orthogonal_rotation[i]) {
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+ }
+ else {
+ switch (data->bytes_per_pixel[i]) {
+ case 4:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ break;
+ case 2:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+ break;
+ default:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ /*requested peak bandwidth:*/
+ /*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/
+ /*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/
+ /*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/
+ /**/
+ /*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/
+ /*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/
+ /**/
+ /*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/
+ /*rounded up to even and divided by the line times for initialization, which is normally three.*/
+ /*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/
+ /*rounded up to line pairs if not doing line buffer prefetching.*/
+ /**/
+ /*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/
+ /*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/
+ /**/
+ /*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/
+ /*vertical scale ratio and the number of vertical taps increased by one. add one more for possible odd line*/
+ /*panning/bezel adjustment mode.*/
+ /**/
+ /*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/
+ /*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/
+ /*furthermore, there is only one line time for initialization.*/
+ /**/
+ /*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/
+ /*the ceiling of the vertical scale ratio.*/
+ /**/
+ /*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/
+ /**/
+ /*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/
+ /*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/
+ /*it applies when the lines in per line out is not 2 or 4. it does not apply when there is a line buffer between the scl and blnd.*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1));
+ if (data->panning_and_bezel_adjustment == bw_def_any_lines) {
+ data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
+ }
+ if (data->stereo_mode[i] == bw_def_top_bottom) {
+ v_filter_init_mode[i] = bw_def_manual;
+ data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
+ }
+ else {
+ v_filter_init_mode[i] = bw_def_auto;
+ }
+ if (data->stereo_mode[i] == bw_def_top_bottom) {
+ data->num_lines_at_frame_start = bw_int_to_fixed(1);
+ }
+ else {
+ data->num_lines_at_frame_start = bw_int_to_fixed(3);
+ }
+ if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) {
+ data->line_buffer_prefetch[i] = 0;
+ }
+ else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) {
+ data->line_buffer_prefetch[i] = 1;
+ }
+ else {
+ data->line_buffer_prefetch[i] = 0;
+ }
+ data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start);
+ if (data->line_buffer_prefetch[i] == 1) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]);
+ }
+ else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1);
+ } else if (bw_leq(data->vsr[i],
+ bw_frc_to_fixed(4, 3))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3));
+ } else if (bw_leq(data->vsr[i],
+ bw_frc_to_fixed(6, 4))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4));
+ }
+ else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2);
+ }
+ else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3);
+ }
+ else {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4);
+ }
+ if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) {
+ data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1);
+ }
+ else {
+ data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2))));
+ }
+ data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]);
+ data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]);
+ }
+ }
+ /*outstanding chunk request limit*/
+ /*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/
+ /*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/
+ /**/
+ /*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/
+ /**/
+ /*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/
+ /*and underlay.*/
+ /**/
+ /*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/
+ /*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/
+ /*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/
+ /*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/
+ /*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/
+ /*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) {
+ data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin;
+ }
+ else {
+ data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin;
+ }
+ }
+ if (data->fbc_en[i] == 1) {
+ max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin;
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ switch (surface_type[i]) {
+ case bw_def_display_write_back420_luma:
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size);
+ break;
+ case bw_def_display_write_back420_chroma:
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size);
+ break;
+ case bw_def_underlay420_luma:
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+ break;
+ case bw_def_underlay420_chroma:
+ data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2));
+ break;
+ case bw_def_underlay422:case bw_def_underlay444:
+ if (data->orthogonal_rotation[i] == 0) {
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+ }
+ else {
+ data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size));
+ }
+ break;
+ default:
+ if (data->fbc_en[i] == 1) {
+ /*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/
+ if (data->number_of_displays == 1) {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+ }
+ else {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+ }
+ }
+ else {
+ /*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/
+ if (data->number_of_displays == 1) {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+ }
+ else {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+ }
+ }
+ break;
+ }
+ if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+ data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+ data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+ }
+ else {
+ data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ }
+ }
+ }
+ data->min_dmif_size_in_time = bw_int_to_fixed(9999);
+ data->min_mcifwr_size_in_time = bw_int_to_fixed(9999);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) {
+ data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+ }
+ }
+ else {
+ if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) {
+ data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+ }
+ }
+ }
+ }
+ data->total_requests_for_dmif_size = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i]));
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) {
+ data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i]));
+ }
+ else {
+ data->adjusted_data_buffer_size[i] = data->data_buffer_size[i];
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0)) {
+ /*set maximum chunk limit if only one graphic pipe is enabled*/
+ data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127);
+ }
+ else {
+ data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1));
+ /*clamp maximum chunk limit in the graphic display pipe*/
+ if ((i >= 4)) {
+ data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]);
+ }
+ }
+ }
+ }
+ /*outstanding pte request limit*/
+ /*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/
+ /*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/
+ /*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/
+ /*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/
+ /*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/
+ /*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/
+ /*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/
+ /*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/
+ if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) {
+ data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
+ }
+ else {
+ data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+ if (tiling_mode[i] == bw_def_linear) {
+ data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+ data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(1);
+ data->scatter_gather_pte_request_rows = bw_int_to_fixed(1);
+ data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
+ }
+ else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) {
+ data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+ switch (data->bytes_per_pixel[i]) {
+ case 4:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+ break;
+ case 2:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+ break;
+ default:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+ break;
+ }
+ data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+ data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+ }
+ else {
+ data->useful_pte_per_pte_request = bw_int_to_fixed(1);
+ switch (data->bytes_per_pixel[i]) {
+ case 4:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+ break;
+ case 2:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+ break;
+ default:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+ break;
+ }
+ data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+ data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+ }
+ data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request);
+ data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]);
+ data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]);
+ if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) {
+ data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank;
+ }
+ else {
+ data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1))));
+ }
+ }
+ }
+ /*pitch padding recommended for efficiency in linear mode*/
+ /*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/
+ /*if that is the case it is recommended to pad the pitch by at least 256 pixels*/
+ data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels));
+
+ /*pixel transfer time*/
+ /*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/
+ /*for dmif, pte and cursor requests have to be included.*/
+ /*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/
+ /*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/
+ /*the page close-open time is determined by trc and the number of page close-opens*/
+ /*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/
+ /*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/
+ /*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/
+ /*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/
+ /*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/
+ /*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/
+ /*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/
+ /*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/
+ /*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/
+ /*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/
+ /*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/
+ /*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/
+ /*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/
+ data->cursor_total_data = bw_int_to_fixed(0);
+ data->cursor_total_request_groups = bw_int_to_fixed(0);
+ data->scatter_gather_total_pte_requests = bw_int_to_fixed(0);
+ data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4)));
+ if (dceip->large_cursor == 1) {
+ data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1)));
+ }
+ else {
+ data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1)));
+ }
+ if (data->scatter_gather_enable_for_pipe[i]) {
+ data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]);
+ data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1)));
+ }
+ }
+ }
+ data->tile_width_in_pixels = bw_int_to_fixed(8);
+ data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+ data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) {
+ data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i])));
+ }
+ else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) {
+ data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice;
+ }
+ else {
+ data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i];
+ }
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+ }
+ else {
+ data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+ }
+ }
+ }
+ data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000));
+ data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ }
+ }
+ data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i]));
+ }
+ }
+ }
+ data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+ data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
+ data->total_display_reads_required_data = bw_int_to_fixed(0);
+ data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
+ data->total_display_writes_required_data = bw_int_to_fixed(0);
+ data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i];
+ /*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width. each*/
+ /*pseudo-channel may be read independently of one another.*/
+ /*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/
+ /*the 64 or 32 byte sized data is stored in one pseudo-channel.*/
+ /*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/
+ /*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/
+ /*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/
+ /*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/
+ /*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/
+ /*the memory efficiency will be 50% for the 32 byte sized data.*/
+ if (vbios->memory_type == bw_def_hbm) {
+ data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i];
+ }
+ else {
+ data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1)));
+ }
+ data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data);
+ data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data);
+ }
+ else {
+ data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]);
+ data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1))));
+ }
+ }
+ }
+ data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+ data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) {
+ data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]);
+ }
+ else {
+ if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) {
+ data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512);
+ }
+ else {
+ data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0);
+ }
+ }
+ data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i])));
+ data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]);
+ }
+ }
+ for (i = 0; i <= 2; i++) {
+ for (j = 0; j <= 7; j++) {
+ data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ for (j = 0; j <= 2; j++) {
+ for (k = 0; k <= 7; k++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ /*time to transfer data from the dmif buffer to the lb. since the mc to dmif transfer time overlaps*/
+ /*with the dmif to lb transfer time, only time to transfer the last chunk is considered.*/
+ data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor)))));
+ data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i]));
+ /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
+ /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
+ /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+ /*immediately serviced without a gap in the urgent requests.*/
+ /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+ if (surface_type[i] == bw_def_graphics) {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+ break;
+ }
+ if (data->use_alpha[i] == 1) {
+ data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+ }
+ }
+ else {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = bw_int_to_fixed(3);
+ break;
+ }
+ }
+ if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+ data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+ }
+ else {
+ data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+ }
+ data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))));
+ }
+ else {
+ data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]));
+ /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
+ /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
+ /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+ /*immediately serviced without a gap in the urgent requests.*/
+ /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+ data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))));
+ }
+ }
+ }
+ }
+ }
+ /*cpu c-state and p-state change enable*/
+ /*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/
+ /*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/
+ /*condition for the blackout duration:*/
+ /* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/
+ /*condition for the blackout recovery:*/
+ /* recovery time > dmif burst time + 2 * urgent latency*/
+ /* recovery time > (display bw * blackout duration + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/
+ /* / (dispclk - display bw)*/
+ /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
+ /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
+ if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) {
+ data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+ }
+ else {
+ data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+ }
+ }
+ else {
+ data->cursor_latency_hiding[i] = bw_int_to_fixed(9999);
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ }
+ else {
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ }
+ data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]);
+ }
+ }
+ for (i = 0; i <= 2; i++) {
+ for (j = 0; j <= 7; j++) {
+ data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999);
+ data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0);
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0);
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+ data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k]))));
+ if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+ }
+ else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+ }
+ }
+ else {
+ data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+ data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+ if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+ }
+ else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) {
+ data->cpup_state_change_enable = bw_def_yes;
+ if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) {
+ data->cpuc_state_change_enable = bw_def_yes;
+ }
+ else {
+ data->cpuc_state_change_enable = bw_def_no;
+ }
+ }
+ else {
+ data->cpup_state_change_enable = bw_def_no;
+ data->cpuc_state_change_enable = bw_def_no;
+ }
+ /*nb p-state change enable*/
+ /*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/
+ /*below the maximum.*/
+ /*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/
+ /*minus the dmif burst time, minus the source line transfer time*/
+ /*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/
+ /*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ }
+ else {
+ /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ }
+ data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
+ }
+ }
+ /*initialize variables*/
+ number_of_displays_enabled = 0;
+ number_of_displays_enabled_with_margin = 0;
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ number_of_displays_enabled = number_of_displays_enabled + 1;
+ }
+ data->display_pstate_change_enable[k] = 0;
+ }
+ for (i = 0; i <= 2; i++) {
+ for (j = 0; j <= 7; j++) {
+ data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
+ data->dram_speed_change_margin = bw_int_to_fixed(9999);
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0);
+ data->num_displays_with_margin[i][j] = 0;
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+ if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+ /*determine the minimum dram clock change margin for each set of clock frequencies*/
+ data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+ /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->display_pstate_change_enable[k] = 1;
+ data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ }
+ }
+ }
+ else {
+ data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+ if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+ /*determine the minimum dram clock change margin for each display pipe*/
+ data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+ /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->display_pstate_change_enable[k] = 1;
+ data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ /*determine the number of displays with margin to switch in the v_active region*/
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if ((data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1)) {
+ number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1;
+ }
+ }
+ /*determine the number of displays that don't have any dram clock change margin, but*/
+ /*have the same resolution. these displays can switch in a common vblank region if*/
+ /*their frames are aligned.*/
+ data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999);
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+ data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+ }
+ else {
+ data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+ data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ data->displays_with_same_mode[i] = bw_int_to_fixed(0);
+ if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
+ for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
+ if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
+ data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
+ }
+ }
+ }
+ }
+ /*compute the maximum number of aligned displays with no margin*/
+ number_of_aligned_displays_with_no_margin = 0;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i]));
+ }
+ /*dram clock change is possible, if all displays have positive margin except for one display or a group of*/
+ /*aligned displays with the same timing.*/
+ /*the display(s) with the negative margin can be switched in the v_blank region while the other*/
+ /*displays are in v_blank or v_active.*/
+ if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) {
+ data->nbp_state_change_enable = bw_def_yes;
+ }
+ else {
+ data->nbp_state_change_enable = bw_def_no;
+ }
+ /*dram clock change is possible only in vblank if all displays are aligned and have no margin*/
+ if ((number_of_aligned_displays_with_no_margin == number_of_displays_enabled)) {
+ nbp_state_change_enable_blank = bw_def_yes;
+ }
+ else {
+ nbp_state_change_enable_blank = bw_def_no;
+ }
+ /*required yclk(pclk)*/
+ /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
+ /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
+ /*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/
+ data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999);
+ /* number of cursor lines stored in the cursor data return buffer*/
+ num_cursor_lines = 0;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) {
+ /*compute number of cursor lines stored in data return buffer*/
+ if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) {
+ num_cursor_lines = 4;
+ }
+ else {
+ num_cursor_lines = 2;
+ }
+ data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i]));
+ }
+ }
+ }
+ /*compute minimum time to read one chunk from the dmif buffer*/
+ if ((number_of_displays_enabled > 2)) {
+ data->chunk_request_delay = 0;
+ }
+ else {
+ data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk));
+ }
+ data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time);
+ data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay));
+ data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency);
+ data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer);
+ data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer);
+ data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips);
+ data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time);
+ if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+ data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+ yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+ data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+ yclk_message = bw_def_exceeded_allowed_page_close_open;
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else {
+ data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
+ if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
+ yclk_message = bw_fixed_to_int(vbios->low_yclk);
+ data->y_clk_level = low;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
+ yclk_message = bw_fixed_to_int(vbios->mid_yclk);
+ data->y_clk_level = mid;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
+ yclk_message = bw_fixed_to_int(vbios->high_yclk);
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else {
+ yclk_message = bw_def_exceeded_allowed_maximum_bw;
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ }
+ /*required sclk*/
+ /*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/
+ /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
+ /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
+ /*for dmif, pte and cursor requests have to be included.*/
+ data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+ data->required_sclk = bw_int_to_fixed(9999);
+ sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+ data->sclk_level = s_high;
+ }
+ else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+ data->required_sclk = bw_int_to_fixed(9999);
+ sclk_message = bw_def_exceeded_allowed_page_close_open;
+ data->sclk_level = s_high;
+ }
+ else {
+ data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
+ if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_low;
+ data->sclk_level = s_low;
+ data->required_sclk = vbios->low_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid1;
+ data->required_sclk = vbios->mid1_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid2;
+ data->required_sclk = vbios->mid2_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid3;
+ data->required_sclk = vbios->mid3_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid4;
+ data->required_sclk = vbios->mid4_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid5;
+ data->required_sclk = vbios->mid5_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid6;
+ data->required_sclk = vbios->mid6_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_high])) {
+ sclk_message = bw_def_high;
+ data->sclk_level = s_high;
+ data->required_sclk = vbios->high_sclk;
+ }
+ else {
+ sclk_message = bw_def_exceeded_allowed_maximum_sclk;
+ data->sclk_level = s_high;
+ /*required_sclk = high_sclk*/
+ }
+ }
+ /*dispclk*/
+ /*if dispclk is set to the maximum, ramping is not required. dispclk required without ramping is less than the dispclk required with ramping.*/
+ /*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/
+ /*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/
+ /*if that does not happen either, dispclk required is the dispclk required with ramping.*/
+ /*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/
+ /*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/
+ /*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time. it applies when the lines in per line out is not 2 or 4.*/
+ /*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/
+ /*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/
+ /*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/
+ /*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/
+ /*the scaling limits factor itself it also clamped to at least 1*/
+ /*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/
+ data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100)));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] == bw_def_graphics) {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+ break;
+ }
+ if (data->use_alpha[i] == 1) {
+ data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+ }
+ }
+ else {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component;
+ break;
+ }
+ }
+ if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+ data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+ }
+ else {
+ data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+ }
+ data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk);
+ data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput)));
+ data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput));
+ }
+ }
+ data->total_dispclk_required_with_ramping = bw_int_to_fixed(0);
+ data->total_dispclk_required_without_ramping = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) {
+ data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i];
+ }
+ if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) {
+ data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i];
+ }
+ }
+ }
+ data->total_read_request_bandwidth = bw_int_to_fixed(0);
+ data->total_write_request_bandwidth = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]);
+ }
+ else {
+ data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]);
+ }
+ }
+ }
+ data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency);
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+ if (data->cpuc_state_change_enable == bw_def_yes) {
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+ }
+ if (data->cpup_state_change_enable == bw_def_yes) {
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+ }
+ if (data->nbp_state_change_enable == bw_def_yes) {
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+ }
+ if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+ data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth;
+ }
+ else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+ data->dispclk = vbios->high_voltage_max_dispclk;
+ }
+ else {
+ data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth;
+ }
+ /* required core voltage*/
+ /* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/
+ /* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/
+ /* otherwise, the core voltage required is high if the three clocks are within the high limits*/
+ /* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/
+ if (pipe_check == bw_def_notok) {
+ voltage = bw_def_na;
+ }
+ else if (mode_check == bw_def_notok) {
+ voltage = bw_def_notok;
+ }
+ else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) {
+ voltage = bw_def_0_72;
+ }
+ else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) {
+ voltage = bw_def_0_8;
+ }
+ else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) {
+ if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) {
+ voltage = bw_def_high_no_nbp_state_change;
+ }
+ else {
+ voltage = bw_def_0_9;
+ }
+ }
+ else {
+ voltage = bw_def_notok;
+ }
+ if (voltage == bw_def_0_72) {
+ data->max_phyclk = vbios->low_voltage_max_phyclk;
+ }
+ else if (voltage == bw_def_0_8) {
+ data->max_phyclk = vbios->mid_voltage_max_phyclk;
+ }
+ else {
+ data->max_phyclk = vbios->high_voltage_max_phyclk;
+ }
+ /*required blackout recovery time*/
+ data->blackout_recovery_time = bw_int_to_fixed(0);
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]));
+ if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+ }
+ }
+ else {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]));
+ if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+ }
+ }
+ }
+ }
+ /*sclk deep sleep*/
+ /*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/
+ /*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/
+ /*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/
+ /*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/
+ /*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+ data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+ }
+ else if (surface_type[i] == bw_def_graphics) {
+ data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ }
+ else if (data->orthogonal_rotation[i] == 0) {
+ data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+ }
+ else {
+ data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ }
+ }
+ }
+ data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) {
+ data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i];
+ }
+ }
+ }
+ data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth);
+ /*urgent, stutter and nb-p_state watermark*/
+ /*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/
+ /*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel. it does not apply to the writeback.*/
+ /*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/
+ /*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/
+ /*blackout_duration is added to the urgent watermark*/
+ data->chunk_request_time = bw_int_to_fixed(0);
+ data->cursor_request_time = bw_int_to_fixed(0);
+ /*compute total time to request one chunk from each active display pipe*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2))))));
+ }
+ }
+ /*compute total time to request cursor data*/
+ data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level]))));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i]));
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+ data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+ data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+ /*unconditionally remove black out time from the nb p_state watermark*/
+ if ((data->display_pstate_change_enable[i] == 1)) {
+ data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+ }
+ else {
+ /*maximize the watermark to force the switch in the vb_lank region of the frame*/
+ data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+ }
+ }
+ else {
+ data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+ data->stutter_exit_watermark[i] = bw_int_to_fixed(0);
+ data->stutter_entry_watermark[i] = bw_int_to_fixed(0);
+ if ((data->display_pstate_change_enable[i] == 1)) {
+ data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+ }
+ else {
+ /*maximize the watermark to force the switch in the vb_lank region of the frame*/
+ data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+ }
+ }
+ }
+ }
+ /*stutter mode enable*/
+ /*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/
+ /*display pipe.*/
+ data->stutter_mode_enable = data->cpuc_state_change_enable;
+ if (data->number_of_displays > 1) {
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) {
+ data->stutter_mode_enable = bw_def_no;
+ }
+ }
+ }
+ }
+ /*performance metrics*/
+ /* display read access efficiency (%)*/
+ /* display write back access efficiency (%)*/
+ /* stutter efficiency (%)*/
+ /* extra underlay pitch recommended for efficiency (pixels)*/
+ /* immediate flip time (us)*/
+ /* latency for other clients due to urgent display read (us)*/
+ /* latency for other clients due to urgent display write (us)*/
+ /* average bandwidth consumed by display (no compression) (gb/s)*/
+ /* required dram bandwidth (gb/s)*/
+ /* required sclk (m_hz)*/
+ /* required rd urgent latency (us)*/
+ /* nb p-state change margin (us)*/
+ /*dmif and mcifwr dram access efficiency*/
+ /*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time. but it cannot exceed the dram efficiency provided by the memory subsystem*/
+ data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1));
+ if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) {
+ data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1));
+ }
+ else {
+ data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
+ }
+ /*average bandwidth*/
+ /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
+ /*the average bandwidth with compression is the same, divided by the compression ratio*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
+ }
+ }
+ data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
+ data->total_average_bandwidth = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
+ data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
+ }
+ }
+ /*stutter efficiency*/
+ /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
+ /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
+ /*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/
+ /*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/
+ /*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/
+ /*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i]))));
+ data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]);
+ }
+ }
+ data->min_stutter_refresh_duration = bw_int_to_fixed(9999);
+ data->total_stutter_dmif_buffer_size = 0;
+ data->total_bytes_requested = 0;
+ data->min_stutter_dmif_buffer_size = 9999;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) {
+ data->min_stutter_refresh_duration = data->stutter_refresh_duration[i];
+ data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i])))));
+ data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]);
+ }
+ data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
+ }
+ }
+ data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32))));
+ data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
+ data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
+ data->time_in_self_refresh = data->min_stutter_refresh_duration;
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->stutter_efficiency = bw_int_to_fixed(0);
+ }
+ else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) {
+ data->stutter_efficiency = bw_int_to_fixed(0);
+ }
+ else {
+ /*compute stutter efficiency assuming 60 hz refresh rate*/
+ data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100)));
+ }
+ /*immediate flip time*/
+ /*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/
+ /*otherwise, it may take just one urgenr memory trip*/
+ data->worst_number_of_trips_to_memory = bw_int_to_fixed(1);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+ data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1));
+ if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) {
+ data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i];
+ }
+ }
+ }
+ data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency);
+ /*worst latency for other clients*/
+ /*it is the urgent latency plus the urgent burst time*/
+ data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]);
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time);
+ }
+ else {
+ data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0);
+ }
+ /*dmif mc urgent latency suppported in high sclk and yclk*/
+ data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips);
+ /*dram speed/p-state change margin*/
+ /*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/
+ data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+ data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+ data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+ }
+ }
+ /*sclk required vs urgent latency*/
+ for (i = 1; i <= 5; i++) {
+ data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
+ if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
+ data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ }
+ else {
+ data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
+ }
+ }
+ /*output link bit per pixel supported*/
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ data->output_bpphdmi[k] = bw_def_na;
+ data->output_bppdp4_lane_hbr[k] = bw_def_na;
+ data->output_bppdp4_lane_hbr2[k] = bw_def_na;
+ data->output_bppdp4_lane_hbr3[k] = bw_def_na;
+ if (data->enable[k]) {
+ data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24)));
+ if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) {
+ data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+ }
+ if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) {
+ data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+ }
+ if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) {
+ data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+ }
+ }
+ }
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
+ struct bw_calcs_vbios *bw_vbios,
+ struct hw_asic_id asic_id)
+{
+ struct bw_calcs_dceip dceip = { 0 };
+ struct bw_calcs_vbios vbios = { 0 };
+
+ enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id);
+
+ dceip.version = version;
+
+ switch (version) {
+ case BW_CALCS_VERSION_CARRIZO:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 64;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(1600);
+ vbios.mid_yclk = bw_int_to_fixed(1600);
+ vbios.low_yclk = bw_frc_to_fixed(66666, 100);
+ vbios.low_sclk = bw_int_to_fixed(200);
+ vbios.mid1_sclk = bw_int_to_fixed(300);
+ vbios.mid2_sclk = bw_int_to_fixed(300);
+ vbios.mid3_sclk = bw_int_to_fixed(300);
+ vbios.mid4_sclk = bw_int_to_fixed(300);
+ vbios.mid5_sclk = bw_int_to_fixed(300);
+ vbios.mid6_sclk = bw_int_to_fixed(300);
+ vbios.high_sclk = bw_frc_to_fixed(62609, 100);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(50);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 3;
+ dceip.number_of_underlay_pipes = 1;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = false;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 2;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(0);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+ break;
+ case BW_CALCS_VERSION_POLARIS10:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 32;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(6000);
+ vbios.mid_yclk = bw_int_to_fixed(3200);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(300);
+ vbios.mid1_sclk = bw_int_to_fixed(400);
+ vbios.mid2_sclk = bw_int_to_fixed(500);
+ vbios.mid3_sclk = bw_int_to_fixed(600);
+ vbios.mid4_sclk = bw_int_to_fixed(700);
+ vbios.mid5_sclk = bw_int_to_fixed(800);
+ vbios.mid6_sclk = bw_int_to_fixed(974);
+ vbios.high_sclk = bw_int_to_fixed(1154);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 6;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ case BW_CALCS_VERSION_POLARIS11:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 32;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(6000);
+ vbios.mid_yclk = bw_int_to_fixed(3200);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(300);
+ vbios.mid1_sclk = bw_int_to_fixed(400);
+ vbios.mid2_sclk = bw_int_to_fixed(500);
+ vbios.mid3_sclk = bw_int_to_fixed(600);
+ vbios.mid4_sclk = bw_int_to_fixed(700);
+ vbios.mid5_sclk = bw_int_to_fixed(800);
+ vbios.mid6_sclk = bw_int_to_fixed(974);
+ vbios.high_sclk = bw_int_to_fixed(1154);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ if (vbios.number_of_dram_channels == 2) // 64-bit
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ else
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 5;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ case BW_CALCS_VERSION_STONEY:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 64;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(1866);
+ vbios.mid_yclk = bw_int_to_fixed(1866);
+ vbios.low_yclk = bw_int_to_fixed(1333);
+ vbios.low_sclk = bw_int_to_fixed(200);
+ vbios.mid1_sclk = bw_int_to_fixed(600);
+ vbios.mid2_sclk = bw_int_to_fixed(600);
+ vbios.mid3_sclk = bw_int_to_fixed(600);
+ vbios.mid4_sclk = bw_int_to_fixed(600);
+ vbios.mid5_sclk = bw_int_to_fixed(600);
+ vbios.mid6_sclk = bw_int_to_fixed(600);
+ vbios.high_sclk = bw_int_to_fixed(800);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(50);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 2;
+ dceip.number_of_underlay_pipes = 1;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 2;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(0);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ case BW_CALCS_VERSION_VEGA10:
+ vbios.memory_type = bw_def_hbm;
+ vbios.dram_channel_width_in_bits = 128;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 16;
+ vbios.high_yclk = bw_int_to_fixed(2400);
+ vbios.mid_yclk = bw_int_to_fixed(1700);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(300);
+ vbios.mid1_sclk = bw_int_to_fixed(350);
+ vbios.mid2_sclk = bw_int_to_fixed(400);
+ vbios.mid3_sclk = bw_int_to_fixed(500);
+ vbios.mid4_sclk = bw_int_to_fixed(600);
+ vbios.mid5_sclk = bw_int_to_fixed(700);
+ vbios.mid6_sclk = bw_int_to_fixed(760);
+ vbios.high_sclk = bw_int_to_fixed(776);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(460);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(670);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1133);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
+ vbios.stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(39);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = false;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = true;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 6;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = true;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 24576;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = false;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ default:
+ break;
+ }
+ *bw_dceip = dceip;
+ *bw_vbios = vbios;
+
+}
+
+/**
+ * Compare calculated (required) clocks against the clocks available at
+ * maximum voltage (max Performance Level).
+ */
+static bool is_display_configuration_supported(
+ const struct bw_calcs_vbios *vbios,
+ const struct dce_bw_output *calcs_output)
+{
+ uint32_t int_max_clk;
+
+ int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk);
+ int_max_clk *= 1000; /* MHz to kHz */
+ if (calcs_output->dispclk_khz > int_max_clk)
+ return false;
+
+ int_max_clk = bw_fixed_to_int(vbios->high_sclk);
+ int_max_clk *= 1000; /* MHz to kHz */
+ if (calcs_output->sclk_khz > int_max_clk)
+ return false;
+
+ return true;
+}
+
+static void populate_initial_data(
+ const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data)
+{
+ int i, j;
+ int num_displays = 0;
+
+ data->underlay_surface_type = bw_def_420;
+ data->panning_and_bezel_adjustment = bw_def_none;
+ data->graphics_lb_bpc = 10;
+ data->underlay_lb_bpc = 8;
+ data->underlay_tiling_mode = bw_def_tiled;
+ data->graphics_tiling_mode = bw_def_tiled;
+ data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
+ data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
+
+ /* Pipes with underlay first */
+ for (i = 0; i < pipe_count; i++) {
+ if (!pipe[i].stream || !pipe[i].bottom_pipe)
+ continue;
+
+ ASSERT(pipe[i].plane_state);
+
+ if (num_displays == 0) {
+ if (!pipe[i].plane_state->visible)
+ data->d0_underlay_mode = bw_def_underlay_only;
+ else
+ data->d0_underlay_mode = bw_def_blend;
+ } else {
+ if (!pipe[i].plane_state->visible)
+ data->d1_underlay_mode = bw_def_underlay_only;
+ else
+ data->d1_underlay_mode = bw_def_blend;
+ }
+
+ data->fbc_en[num_displays + 4] = false;
+ data->lpt_en[num_displays + 4] = false;
+ data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
+ data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
+ data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000);
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
+ data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
+ data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
+ data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
+ data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
+ switch (pipe[i].plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ break;
+ case ROTATION_ANGLE_90:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+ break;
+ case ROTATION_ANGLE_180:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+ break;
+ case ROTATION_ANGLE_270:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+ break;
+ default:
+ break;
+ }
+ switch (pipe[i].plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ data->bytes_per_pixel[num_displays + 4] = 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ data->bytes_per_pixel[num_displays + 4] = 8;
+ break;
+ default:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ }
+ data->interlace_mode[num_displays + 4] = false;
+ data->stereo_mode[num_displays + 4] = bw_def_mono;
+
+
+ for (j = 0; j < 2; j++) {
+ data->fbc_en[num_displays * 2 + j] = false;
+ data->lpt_en[num_displays * 2 + j] = false;
+
+ data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height);
+ data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed(
+ pipe[i].bottom_pipe->plane_state->plane_size.grph.surface_pitch);
+ data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps);
+ data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps);
+ data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+ pipe[i].bottom_pipe->plane_res.scl_data.ratios.horz.value);
+ data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+ pipe[i].bottom_pipe->plane_res.scl_data.ratios.vert.value);
+ switch (pipe[i].bottom_pipe->plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0);
+ break;
+ case ROTATION_ANGLE_90:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90);
+ break;
+ case ROTATION_ANGLE_180:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180);
+ break;
+ case ROTATION_ANGLE_270:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270);
+ break;
+ default:
+ break;
+ }
+ data->stereo_mode[num_displays * 2 + j] = bw_def_mono;
+ }
+
+ num_displays++;
+ }
+
+ /* Pipes without underlay after */
+ for (i = 0; i < pipe_count; i++) {
+ if (!pipe[i].stream || pipe[i].bottom_pipe)
+ continue;
+
+
+ data->fbc_en[num_displays + 4] = false;
+ data->lpt_en[num_displays + 4] = false;
+ data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
+ data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
+ data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000);
+ if (pipe[i].plane_state) {
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
+ data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
+ data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
+ data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
+ data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
+ switch (pipe[i].plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ break;
+ case ROTATION_ANGLE_90:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+ break;
+ case ROTATION_ANGLE_180:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+ break;
+ case ROTATION_ANGLE_270:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+ break;
+ default:
+ break;
+ }
+ switch (pipe[i].plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ data->bytes_per_pixel[num_displays + 4] = 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ data->bytes_per_pixel[num_displays + 4] = 8;
+ break;
+ default:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ }
+ } else {
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_addressable);
+ data->h_taps[num_displays + 4] = bw_int_to_fixed(1);
+ data->v_taps[num_displays + 4] = bw_int_to_fixed(1);
+ data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+ data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ }
+
+ data->interlace_mode[num_displays + 4] = false;
+ data->stereo_mode[num_displays + 4] = bw_def_mono;
+ num_displays++;
+ }
+
+ data->number_of_displays = num_displays;
+}
+
+/**
+ * Return:
+ * true - Display(s) configuration supported.
+ * In this case 'calcs_output' contains data for HW programming
+ * false - Display(s) configuration not supported (not enough bandwidth).
+ */
+
+bool bw_calcs(struct dc_context *ctx,
+ const struct bw_calcs_dceip *dceip,
+ const struct bw_calcs_vbios *vbios,
+ const struct pipe_ctx pipe[],
+ int pipe_count,
+ struct dce_bw_output *calcs_output)
+{
+ struct bw_calcs_data *data = kzalloc(sizeof(struct bw_calcs_data),
+ GFP_KERNEL);
+ if (!data)
+ return false;
+
+ populate_initial_data(pipe, pipe_count, data);
+
+ /*TODO: this should be taken out calcs output and assigned during timing sync for pplib use*/
+ calcs_output->all_displays_in_sync = false;
+
+ if (data->number_of_displays != 0) {
+ uint8_t yclk_lvl, sclk_lvl;
+ struct bw_fixed high_sclk = vbios->high_sclk;
+ struct bw_fixed mid1_sclk = vbios->mid1_sclk;
+ struct bw_fixed mid2_sclk = vbios->mid2_sclk;
+ struct bw_fixed mid3_sclk = vbios->mid3_sclk;
+ struct bw_fixed mid4_sclk = vbios->mid4_sclk;
+ struct bw_fixed mid5_sclk = vbios->mid5_sclk;
+ struct bw_fixed mid6_sclk = vbios->mid6_sclk;
+ struct bw_fixed low_sclk = vbios->low_sclk;
+ struct bw_fixed high_yclk = vbios->high_yclk;
+ struct bw_fixed mid_yclk = vbios->mid_yclk;
+ struct bw_fixed low_yclk = vbios->low_yclk;
+
+ calculate_bandwidth(dceip, vbios, data);
+
+ yclk_lvl = data->y_clk_level;
+ sclk_lvl = data->sclk_level;
+
+ calcs_output->nbp_state_change_enable =
+ data->nbp_state_change_enable;
+ calcs_output->cpuc_state_change_enable =
+ data->cpuc_state_change_enable;
+ calcs_output->cpup_state_change_enable =
+ data->cpup_state_change_enable;
+ calcs_output->stutter_mode_enable =
+ data->stutter_mode_enable;
+ calcs_output->dispclk_khz =
+ bw_fixed_to_int(bw_mul(data->dispclk,
+ bw_int_to_fixed(1000)));
+ calcs_output->blackout_recovery_time_us =
+ bw_fixed_to_int(data->blackout_recovery_time);
+ calcs_output->sclk_khz =
+ bw_fixed_to_int(bw_mul(data->required_sclk,
+ bw_int_to_fixed(1000)));
+ calcs_output->sclk_deep_sleep_khz =
+ bw_fixed_to_int(bw_mul(data->sclk_deep_sleep,
+ bw_int_to_fixed(1000)));
+ if (yclk_lvl == 0)
+ calcs_output->yclk_khz = bw_fixed_to_int(
+ bw_mul(low_yclk, bw_int_to_fixed(1000)));
+ else if (yclk_lvl == 1)
+ calcs_output->yclk_khz = bw_fixed_to_int(
+ bw_mul(mid_yclk, bw_int_to_fixed(1000)));
+ else
+ calcs_output->yclk_khz = bw_fixed_to_int(
+ bw_mul(high_yclk, bw_int_to_fixed(1000)));
+
+ /* units: nanosecond, 16bit storage. */
+
+ calcs_output->nbp_state_change_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->stutter_exit_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->urgent_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+
+ if (dceip->version != BW_CALCS_VERSION_CARRIZO) {
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+ calculate_bandwidth(dceip, vbios, data);
+
+ calcs_output->nbp_state_change_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4],bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->stutter_exit_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->urgent_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+ calculate_bandwidth(dceip, vbios, data);
+
+ calcs_output->nbp_state_change_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+ calcs_output->stutter_exit_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+ calcs_output->urgent_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+ }
+
+ if (dceip->version == BW_CALCS_VERSION_CARRIZO) {
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk;
+ ((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk;
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk;
+ } else {
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+ }
+
+ calculate_bandwidth(dceip, vbios, data);
+
+ calcs_output->nbp_state_change_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+ calcs_output->stutter_exit_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+ calcs_output->urgent_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk;
+ ((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk;
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk;
+ ((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk;
+ } else {
+ calcs_output->nbp_state_change_enable = true;
+ calcs_output->cpuc_state_change_enable = true;
+ calcs_output->cpup_state_change_enable = true;
+ calcs_output->stutter_mode_enable = true;
+ calcs_output->dispclk_khz = 0;
+ calcs_output->sclk_khz = 0;
+ }
+
+ kfree(data);
+
+ return is_display_configuration_supported(vbios, calcs_output);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
new file mode 100644
index 000000000000..626f9cf8aad2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -0,0 +1,1899 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn_calc_auto.h"
+#include "dcn_calc_math.h"
+
+/*REVISION#250*/
+void scaler_settings_calculation(struct dcn_bw_internal_vars *v)
+{
+ int k;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->allow_different_hratio_vratio == dcn_bw_yes) {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->h_ratio[k] = v->viewport_width[k] / v->scaler_rec_out_width[k];
+ v->v_ratio[k] = v->viewport_height[k] / v->scaler_recout_height[k];
+ }
+ else {
+ v->h_ratio[k] = v->viewport_height[k] / v->scaler_rec_out_width[k];
+ v->v_ratio[k] = v->viewport_width[k] / v->scaler_recout_height[k];
+ }
+ }
+ else {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->h_ratio[k] =dcn_bw_max2(v->viewport_width[k] / v->scaler_rec_out_width[k], v->viewport_height[k] / v->scaler_recout_height[k]);
+ }
+ else {
+ v->h_ratio[k] =dcn_bw_max2(v->viewport_height[k] / v->scaler_rec_out_width[k], v->viewport_width[k] / v->scaler_recout_height[k]);
+ }
+ v->v_ratio[k] = v->h_ratio[k];
+ }
+ if (v->interlace_output[k] == 1.0) {
+ v->v_ratio[k] = 2.0 * v->v_ratio[k];
+ }
+ if ((v->underscan_output[k] == 1.0)) {
+ v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor;
+ v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor;
+ }
+ }
+ /*scaler taps calculation*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > 1.0) {
+ v->acceptable_quality_hta_ps =dcn_bw_min2(v->max_hscl_taps, 2.0 *dcn_bw_ceil2(v->h_ratio[k], 1.0));
+ }
+ else if (v->h_ratio[k] < 1.0) {
+ v->acceptable_quality_hta_ps = 4.0;
+ }
+ else {
+ v->acceptable_quality_hta_ps = 1.0;
+ }
+ if (v->ta_pscalculation == dcn_bw_override) {
+ v->htaps[k] = v->override_hta_ps[k];
+ }
+ else {
+ v->htaps[k] = v->acceptable_quality_hta_ps;
+ }
+ if (v->v_ratio[k] > 1.0) {
+ v->acceptable_quality_vta_ps =dcn_bw_min2(v->max_vscl_taps, 2.0 *dcn_bw_ceil2(v->v_ratio[k], 1.0));
+ }
+ else if (v->v_ratio[k] < 1.0) {
+ v->acceptable_quality_vta_ps = 4.0;
+ }
+ else {
+ v->acceptable_quality_vta_ps = 1.0;
+ }
+ if (v->ta_pscalculation == dcn_bw_override) {
+ v->vtaps[k] = v->override_vta_ps[k];
+ }
+ else {
+ v->vtaps[k] = v->acceptable_quality_vta_ps;
+ }
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->vta_pschroma[k] = 0.0;
+ v->hta_pschroma[k] = 0.0;
+ }
+ else {
+ if (v->ta_pscalculation == dcn_bw_override) {
+ v->vta_pschroma[k] = v->override_vta_pschroma[k];
+ v->hta_pschroma[k] = v->override_hta_pschroma[k];
+ }
+ else {
+ v->vta_pschroma[k] = v->acceptable_quality_vta_ps;
+ v->hta_pschroma[k] = v->acceptable_quality_hta_ps;
+ }
+ }
+ }
+}
+
+void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
+{
+ int i;
+ int j;
+ int k;
+ /*mode support, voltage state and soc configuration*/
+
+ /*scale ratio support check*/
+
+ v->scale_ratio_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > v->max_hscl_ratio || v->v_ratio[k] > v->max_vscl_ratio || v->h_ratio[k] > v->htaps[k] || v->v_ratio[k] > v->vtaps[k] || (v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16 && (v->h_ratio[k] / 2.0 > v->hta_pschroma[k] || v->v_ratio[k] / 2.0 > v->vta_pschroma[k]))) {
+ v->scale_ratio_support = dcn_bw_no;
+ }
+ }
+ /*source format, pixel format and scan support check*/
+
+ v->source_format_pixel_and_scan_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_surface_mode[k] == dcn_bw_sw_linear && v->source_scan[k] != dcn_bw_hor) || ((v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_var_d || v->source_surface_mode[k] == dcn_bw_sw_var_d_x) && v->source_pixel_format[k] != dcn_bw_rgb_sub_64)) {
+ v->source_format_pixel_and_scan_support = dcn_bw_no;
+ }
+ }
+ /*bandwidth support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->swath_width_ysingle_dpp[k] = v->viewport_width[k];
+ }
+ else {
+ v->swath_width_ysingle_dpp[k] = v->viewport_height[k];
+ }
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pixel_in_dety[k] = 8.0;
+ v->byte_per_pixel_in_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pixel_in_dety[k] = 4.0;
+ v->byte_per_pixel_in_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pixel_in_dety[k] = 2.0;
+ v->byte_per_pixel_in_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pixel_in_dety[k] = 1.0;
+ v->byte_per_pixel_in_detc[k] = 2.0;
+ }
+ else {
+ v->byte_per_pixel_in_dety[k] = 4.0f / 3.0f;
+ v->byte_per_pixel_in_detc[k] = 8.0f / 3.0f;
+ }
+ }
+ v->total_read_bandwidth_consumed_gbyte_per_second = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->read_bandwidth[k] = v->swath_width_ysingle_dpp[k] * (dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) * v->v_ratio[k] +dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0 * v->v_ratio[k] / 2) / (v->htotal[k] / v->pixel_clock[k]);
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
+ }
+ if (v->pte_enable == dcn_bw_yes && v->source_scan[k] != dcn_bw_hor && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x)) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 64);
+ }
+ else if (v->pte_enable == dcn_bw_yes && v->source_scan[k] == dcn_bw_hor && (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32) && (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x)) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
+ }
+ else if (v->pte_enable == dcn_bw_yes) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 512);
+ }
+ v->total_read_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->read_bandwidth[k] / 1000.0;
+ }
+ v->total_write_bandwidth_consumed_gbyte_per_second = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
+ v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
+ }
+ else if (v->output[k] == dcn_bw_writeback) {
+ v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
+ }
+ else {
+ v->write_bandwidth[k] = 0.0;
+ }
+ v->total_write_bandwidth_consumed_gbyte_per_second = v->total_write_bandwidth_consumed_gbyte_per_second + v->write_bandwidth[k] / 1000.0;
+ }
+ v->total_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->total_write_bandwidth_consumed_gbyte_per_second;
+ v->dcc_enabled_in_any_plane = dcn_bw_no;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->dcc_enabled_in_any_plane = dcn_bw_yes;
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
+ v->return_bw_per_state[i] = v->return_bw_todcn_per_state;
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0);
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ if ((v->total_read_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->return_bw_per_state[i]) && (v->total_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0)) {
+ v->bandwidth_support[i] = dcn_bw_yes;
+ }
+ else {
+ v->bandwidth_support[i] = dcn_bw_no;
+ }
+ }
+ /*writeback latency support check*/
+
+ v->writeback_latency_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444 && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0 > (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
+ v->writeback_latency_support = dcn_bw_no;
+ }
+ else if (v->output[k] == dcn_bw_writeback && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) >dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
+ v->writeback_latency_support = dcn_bw_no;
+ }
+ }
+ /*re-ordering buffer support check*/
+
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ v->urgent_round_trip_and_out_of_order_latency_per_state[i] = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk_per_state[i] + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw_per_state[i];
+ if ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / v->return_bw_per_state[i] > v->urgent_round_trip_and_out_of_order_latency_per_state[i]) {
+ v->rob_support[i] = dcn_bw_yes;
+ }
+ else {
+ v->rob_support[i] = dcn_bw_no;
+ }
+ }
+ /*display io support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_dp && v->dsc_capability == dcn_bw_yes) {
+ if (v->output_format[k] == dcn_bw_420) {
+ v->required_output_bw = v->pixel_clock[k] / 2.0;
+ }
+ else {
+ v->required_output_bw = v->pixel_clock[k];
+ }
+ }
+ else if (v->output_format[k] == dcn_bw_420) {
+ v->required_output_bw = v->pixel_clock[k] * 3.0 / 2.0;
+ }
+ else {
+ v->required_output_bw = v->pixel_clock[k] * 3.0;
+ }
+ if (v->output[k] == dcn_bw_hdmi) {
+ v->required_phyclk[k] = v->required_output_bw;
+ switch (v->output_deep_color[k]) {
+ case dcn_bw_encoder_10bpc:
+ v->required_phyclk[k] = v->required_phyclk[k] * 5.0 / 4;
+ break;
+ case dcn_bw_encoder_12bpc:
+ v->required_phyclk[k] = v->required_phyclk[k] * 3.0 / 2;
+ break;
+ default:
+ break;
+ }
+ v->required_phyclk[k] = v->required_phyclk[k] / 3.0;
+ }
+ else if (v->output[k] == dcn_bw_dp) {
+ v->required_phyclk[k] = v->required_output_bw / 4.0;
+ }
+ else {
+ v->required_phyclk[k] = 0.0;
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ v->dio_support[i] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->required_phyclk[k] > v->phyclk_per_state[i] || (v->output[k] == dcn_bw_hdmi && v->required_phyclk[k] > 600.0)) {
+ v->dio_support[i] = dcn_bw_no;
+ }
+ }
+ }
+ /*total available writeback support check*/
+
+ v->total_number_of_active_writeback = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback) {
+ v->total_number_of_active_writeback = v->total_number_of_active_writeback + 1.0;
+ }
+ }
+ if (v->total_number_of_active_writeback <= v->max_num_writeback) {
+ v->total_available_writeback_support = dcn_bw_yes;
+ }
+ else {
+ v->total_available_writeback_support = dcn_bw_no;
+ }
+ /*maximum dispclk/dppclk support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > 1.0) {
+ v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->pscl_factor_chroma[k] = 0.0;
+ v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], 1.0);
+ }
+ else {
+ if (v->h_ratio[k] / 2.0 > 1.0) {
+ v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max5(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_factor_chroma[k], 1.0);
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_block_height_y[k] = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->read256_block_height_y[k] = 4.0;
+ }
+ else {
+ v->read256_block_height_y[k] = 8.0;
+ }
+ v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
+ v->read256_block_height_c[k] = 0.0;
+ v->read256_block_width_c[k] = 0.0;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_block_height_y[k] = 1.0;
+ v->read256_block_height_c[k] = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->read256_block_height_y[k] = 16.0;
+ v->read256_block_height_c[k] = 8.0;
+ }
+ else {
+ v->read256_block_height_y[k] = 8.0;
+ v->read256_block_height_c[k] = 8.0;
+ }
+ v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
+ v->read256_block_width_c[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->read256_block_height_c[k];
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->max_swath_height_y[k] = v->read256_block_height_y[k];
+ v->max_swath_height_c[k] = v->read256_block_height_c[k];
+ }
+ else {
+ v->max_swath_height_y[k] = v->read256_block_width_y[k];
+ v->max_swath_height_c[k] = v->read256_block_width_c[k];
+ }
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ }
+ else {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+ }
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ else {
+ v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
+ }
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
+ v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ }
+ else {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+ }
+ }
+ else {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ }
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->maximum_swath_width = 8192.0;
+ }
+ else {
+ v->maximum_swath_width = 5120.0;
+ }
+ v->number_of_dpp_required_for_det_size =dcn_bw_ceil2(v->swath_width_ysingle_dpp[k] /dcn_bw_min2(v->maximum_swath_width, v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / (v->byte_per_pixel_in_dety[k] * v->min_swath_height_y[k] + v->byte_per_pixel_in_detc[k] / 2.0 * v->min_swath_height_c[k])), 1.0);
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->number_of_dpp_required_for_lb_size =dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0);
+ }
+ else {
+ v->number_of_dpp_required_for_lb_size =dcn_bw_max2(dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0),dcn_bw_ceil2((v->vta_pschroma[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k] / 2.0, 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0));
+ }
+ v->number_of_dpp_required_for_det_and_lb_size[k] =dcn_bw_max2(v->number_of_dpp_required_for_det_size, v->number_of_dpp_required_for_lb_size);
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->total_number_of_active_dpp[i][j] = 0.0;
+ v->required_dispclk[i][j] = 0.0;
+ v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ if (v->odm_capability == dcn_bw_yes) {
+ v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k] / 2.0, v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ }
+ else {
+ v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ }
+ if (i < number_of_states) {
+ v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ }
+ if (v->min_dispclk_using_single_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i]) && v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
+ v->no_of_dpp[i][j][k] = 1.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
+ }
+ else if (v->min_dispclk_using_dual_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+ v->no_of_dpp[i][j][k] = 2.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+ }
+ else {
+ v->no_of_dpp[i][j][k] = 2.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+ v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+ }
+ v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+ }
+ if (v->total_number_of_active_dpp[i][j] > v->max_num_dpp) {
+ v->total_number_of_active_dpp[i][j] = 0.0;
+ v->required_dispclk[i][j] = 0.0;
+ v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ if (i < number_of_states) {
+ v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ }
+ if (v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
+ v->no_of_dpp[i][j][k] = 1.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
+ if (v->min_dispclk_using_single_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+ v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+ }
+ }
+ else {
+ v->no_of_dpp[i][j][k] = 2.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+ if (v->min_dispclk_using_dual_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+ v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+ }
+ }
+ v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+ }
+ }
+ }
+ }
+ /*viewport size check*/
+
+ v->viewport_size_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->number_of_dpp_required_for_det_and_lb_size[k] > 2.0) {
+ v->viewport_size_support = dcn_bw_no;
+ }
+ }
+ /*total available pipes support check*/
+
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ if (v->total_number_of_active_dpp[i][j] <= v->max_num_dpp) {
+ v->total_available_pipes_support[i][j] = dcn_bw_yes;
+ }
+ else {
+ v->total_available_pipes_support[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ /*urgent latency support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->swath_width_yper_state[i][j][k] = v->swath_width_ysingle_dpp[k] / v->no_of_dpp[i][j][k];
+ v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->max_swath_height_y[k];
+ v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pixel_in_dety[k] * v->max_swath_height_y[k];
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
+ }
+ if (v->max_swath_height_c[k] > 0.0) {
+ v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k];
+ }
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
+ if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
+ v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k];
+ v->swath_height_cper_state[i][j][k] = v->max_swath_height_c[k];
+ }
+ else {
+ v->swath_height_yper_state[i][j][k] = v->min_swath_height_y[k];
+ v->swath_height_cper_state[i][j][k] = v->min_swath_height_c[k];
+ }
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+ v->lines_in_det_chroma = 0.0;
+ }
+ else if (v->swath_height_yper_state[i][j][k] <= v->swath_height_cper_state[i][j][k]) {
+ v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+ v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_detc[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
+ }
+ else {
+ v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+ v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
+ }
+ v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
+ v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+ v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]);
+ v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
+ }
+ }
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->urgent_latency_support[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->urgent_latency_support_us_per_state[i][j][k] < v->urgent_latency / 1.0) {
+ v->urgent_latency_support[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ }
+ /*prefetch check*/
+
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->total_number_of_dcc_active_dpp[i][j] = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->total_number_of_dcc_active_dpp[i][j] = v->total_number_of_dcc_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+ }
+ }
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->projected_dcfclk_deep_sleep = 8.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, v->pixel_clock[k] / 16.0);
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ if (v->v_ratio[k] <= 1.0) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
+ }
+ }
+ else {
+ if (v->v_ratio[k] <= 1.0) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
+ }
+ if (v->v_ratio[k] / 2.0 <= 1.0) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->h_ratio[k] / 2.0 * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->pscl_factor_chroma[k] * v->required_dispclk[i][j] / (1 + j));
+ }
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_req_height_y = 8.0 * v->read256_block_height_y[k];
+ v->meta_req_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->meta_req_height_y;
+ v->meta_surface_width_y =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0, v->meta_req_width_y) + v->meta_req_width_y;
+ v->meta_surface_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, v->meta_req_height_y) + v->meta_req_height_y;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_per_frame_y = (dcn_bw_ceil2((v->meta_surface_width_y * v->meta_surface_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_per_frame_y = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_bytes_y = v->meta_surface_width_y * v->meta_req_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
+ }
+ else {
+ v->meta_row_bytes_y = v->meta_surface_height_y * v->meta_req_width_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_per_frame_y = 0.0;
+ v->meta_row_bytes_y = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_block_size_bytes_y = 256.0;
+ v->macro_tile_block_height_y = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_block_size_bytes_y = 4096.0;
+ v->macro_tile_block_height_y = 4.0 * v->read256_block_height_y[k];
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_block_size_bytes_y = 64.0 * 1024;
+ v->macro_tile_block_height_y = 16.0 * v->read256_block_height_y[k];
+ }
+ else {
+ v->macro_tile_block_size_bytes_y = 256.0 * 1024;
+ v->macro_tile_block_height_y = 32.0 * v->read256_block_height_y[k];
+ }
+ if (v->macro_tile_block_size_bytes_y <= 65536.0) {
+ v->data_pte_req_height_y = v->macro_tile_block_height_y;
+ }
+ else {
+ v->data_pte_req_height_y = 16.0 * v->read256_block_height_y[k];
+ }
+ v->data_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->data_pte_req_height_y * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_y / (v->viewport_width[k] / v->no_of_dpp[i][j][k]), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
+ }
+ else {
+ v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->data_pte_req_height_y, 1.0) + 1);
+ }
+ }
+ else {
+ v->dpte_bytes_per_row_y = 0.0;
+ }
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_req_height_c = 8.0 * v->read256_block_height_c[k];
+ v->meta_req_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->meta_req_height_c;
+ v->meta_surface_width_c =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0, v->meta_req_width_c) + v->meta_req_width_c;
+ v->meta_surface_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, v->meta_req_height_c) + v->meta_req_height_c;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_per_frame_c = (dcn_bw_ceil2((v->meta_surface_width_c * v->meta_surface_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_per_frame_c = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_bytes_c = v->meta_surface_width_c * v->meta_req_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
+ }
+ else {
+ v->meta_row_bytes_c = v->meta_surface_height_c * v->meta_req_width_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_per_frame_c = 0.0;
+ v->meta_row_bytes_c = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_block_size_bytes_c = 256.0;
+ v->macro_tile_block_height_c = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_block_size_bytes_c = 4096.0;
+ v->macro_tile_block_height_c = 4.0 * v->read256_block_height_c[k];
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_block_size_bytes_c = 64.0 * 1024;
+ v->macro_tile_block_height_c = 16.0 * v->read256_block_height_c[k];
+ }
+ else {
+ v->macro_tile_block_size_bytes_c = 256.0 * 1024;
+ v->macro_tile_block_height_c = 32.0 * v->read256_block_height_c[k];
+ }
+ v->macro_tile_block_width_c = v->macro_tile_block_size_bytes_c /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->macro_tile_block_height_c;
+ if (v->macro_tile_block_size_bytes_c <= 65536.0) {
+ v->data_pte_req_height_c = v->macro_tile_block_height_c;
+ }
+ else {
+ v->data_pte_req_height_c = 16.0 * v->read256_block_height_c[k];
+ }
+ v->data_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->data_pte_req_height_c * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_c / (v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
+ }
+ else {
+ v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->data_pte_req_height_c, 1.0) + 1);
+ }
+ }
+ else {
+ v->dpte_bytes_per_row_c = 0.0;
+ }
+ }
+ else {
+ v->dpte_bytes_per_row_c = 0.0;
+ v->meta_pte_bytes_per_frame_c = 0.0;
+ v->meta_row_bytes_c = 0.0;
+ }
+ v->dpte_bytes_per_row[k] = v->dpte_bytes_per_row_y + v->dpte_bytes_per_row_c;
+ v->meta_pte_bytes_per_frame[k] = v->meta_pte_bytes_per_frame_y + v->meta_pte_bytes_per_frame_c;
+ v->meta_row_bytes[k] = v->meta_row_bytes_y + v->meta_row_bytes_c;
+ v->v_init_y = (v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0;
+ v->prefill_y[k] =dcn_bw_floor2(v->v_init_y, 1.0);
+ v->max_num_sw_y[k] =dcn_bw_ceil2((v->prefill_y[k] - 1.0) / v->swath_height_yper_state[i][j][k], 1.0) + 1;
+ if (v->prefill_y[k] > 1.0) {
+ v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] - 2.0), v->swath_height_yper_state[i][j][k]);
+ }
+ else {
+ v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] + v->swath_height_yper_state[i][j][k] - 2.0), v->swath_height_yper_state[i][j][k]);
+ }
+ v->max_partial_sw_y =dcn_bw_max2(1.0, v->max_partial_sw_y);
+ v->prefetch_lines_y[k] = v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k] + v->max_partial_sw_y;
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ v->v_init_c = (v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0;
+ v->prefill_c[k] =dcn_bw_floor2(v->v_init_c, 1.0);
+ v->max_num_sw_c[k] =dcn_bw_ceil2((v->prefill_c[k] - 1.0) / v->swath_height_cper_state[i][j][k], 1.0) + 1;
+ if (v->prefill_c[k] > 1.0) {
+ v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] - 2.0), v->swath_height_cper_state[i][j][k]);
+ }
+ else {
+ v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] + v->swath_height_cper_state[i][j][k] - 2.0), v->swath_height_cper_state[i][j][k]);
+ }
+ v->max_partial_sw_c =dcn_bw_max2(1.0, v->max_partial_sw_c);
+ v->prefetch_lines_c[k] = v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k] + v->max_partial_sw_c;
+ }
+ else {
+ v->prefetch_lines_c[k] = 0.0;
+ }
+ v->dst_x_after_scaler = 90.0 * v->pixel_clock[k] / (v->required_dispclk[i][j] / (j + 1)) + 42.0 * v->pixel_clock[k] / v->required_dispclk[i][j];
+ if (v->no_of_dpp[i][j][k] > 1.0) {
+ v->dst_x_after_scaler = v->dst_x_after_scaler + v->scaler_rec_out_width[k] / 2.0;
+ }
+ if (v->output_format[k] == dcn_bw_420) {
+ v->dst_y_after_scaler = 1.0;
+ }
+ else {
+ v->dst_y_after_scaler = 0.0;
+ }
+ v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep;
+ v->v_update_offset[k] =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]);
+ v->v_update_width[k] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
+ v->v_ready_offset[k] =dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
+ v->time_setup = (v->v_update_offset[k] + v->v_update_width[k] + v->v_ready_offset[k]) / v->pixel_clock[k];
+ v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i];
+ if (v->pte_enable == dcn_bw_yes) {
+ v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i];
+ }
+ if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
+ v->maximum_vstartup = v->vtotal[k] - v->vactive[k] - 1.0;
+ }
+ else {
+ v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0;
+ }
+ v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]);
+ v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4;
+ v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
+ }
+ v->bw_available_for_immediate_flip = v->return_bw_per_state[i];
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->bw_available_for_immediate_flip = v->bw_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth[k], v->prefetch_bw[k]);
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->total_immediate_flip_bytes[k] = 0.0;
+ if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->total_immediate_flip_bytes[k] = v->total_immediate_flip_bytes[k] + v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k];
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
+ v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ v->time_for_meta_pte_without_immediate_flip =dcn_bw_max3(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ }
+ else {
+ v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
+ v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
+ }
+ if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
+ v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency);
+ v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max3((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency);
+ }
+ else {
+ v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip);
+ v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency - v->time_for_meta_pte_without_immediate_flip);
+ }
+ v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k];
+ v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k];
+ if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+ if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywith_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+ if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwith_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
+ v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = 999999.0;
+ }
+ if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip > 0.0) {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+ if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywithout_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+ if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwithout_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
+ v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k]) +dcn_bw_max2(v->meta_pte_bytes_per_frame[k] / (v->lines_for_meta_pte_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / (v->lines_for_meta_and_dpte_row_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]));
+ }
+ else {
+ v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
+ }
+ }
+ v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = v->maximum_read_bandwidth_with_prefetch_without_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
+ }
+ v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
+ if (v->maximum_read_bandwidth_with_prefetch_with_immediate_flip > v->return_bw_per_state[i]) {
+ v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_with_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_with_immediate_flip[k] >= 16.0) {
+ v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
+ if (v->maximum_read_bandwidth_with_prefetch_without_immediate_flip > v->return_bw_per_state[i]) {
+ v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_without_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_without_immediate_flip[k] >= 16.0) {
+ v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywith_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwith_immediate_flip[i][j][k] > 4.0)) || ((v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 || v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)))) {
+ v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)) {
+ v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ }
+ /*mode support, voltage state and soc configuration*/
+
+ for (i = number_of_states_plus_one; i >= 0; i--) {
+ for (j = 0; j <= 1; j++) {
+ if (v->scale_ratio_support == dcn_bw_yes && v->source_format_pixel_and_scan_support == dcn_bw_yes && v->viewport_size_support == dcn_bw_yes && v->bandwidth_support[i] == dcn_bw_yes && v->dio_support[i] == dcn_bw_yes && v->urgent_latency_support[i][j] == dcn_bw_yes && v->rob_support[i] == dcn_bw_yes && v->dispclk_dppclk_support[i][j] == dcn_bw_yes && v->total_available_pipes_support[i][j] == dcn_bw_yes && v->total_available_writeback_support == dcn_bw_yes && v->writeback_latency_support == dcn_bw_yes) {
+ if (v->prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes) {
+ v->mode_support_with_immediate_flip[i][j] = dcn_bw_yes;
+ }
+ else {
+ v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ if (v->prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes) {
+ v->mode_support_without_immediate_flip[i][j] = dcn_bw_yes;
+ }
+ else {
+ v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ else {
+ v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
+ v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ for (i = number_of_states_plus_one; i >= 0; i--) {
+ if ((i == number_of_states_plus_one || v->mode_support_with_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_with_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
+ v->voltage_level_with_immediate_flip = i;
+ }
+ }
+ for (i = number_of_states_plus_one; i >= 0; i--) {
+ if ((i == number_of_states_plus_one || v->mode_support_without_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_without_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
+ v->voltage_level_without_immediate_flip = i;
+ }
+ }
+ if (v->voltage_level_with_immediate_flip == number_of_states_plus_one) {
+ v->immediate_flip_supported = dcn_bw_no;
+ v->voltage_level = v->voltage_level_without_immediate_flip;
+ }
+ else {
+ v->immediate_flip_supported = dcn_bw_yes;
+ v->voltage_level = v->voltage_level_with_immediate_flip;
+ }
+ v->dcfclk = v->dcfclk_per_state[v->voltage_level];
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
+ for (j = 0; j <= 1; j++) {
+ v->required_dispclk_per_ratio[j] = v->required_dispclk[v->voltage_level][j];
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dpp_per_plane_per_ratio[j][k] = v->no_of_dpp[v->voltage_level][j][k];
+ }
+ v->dispclk_dppclk_support_per_ratio[j] = v->dispclk_dppclk_support[v->voltage_level][j];
+ }
+ v->max_phyclk = v->phyclk_per_state[v->voltage_level];
+}
+void display_pipe_configuration(struct dcn_bw_internal_vars *v)
+{
+ int j;
+ int k;
+ /*display pipe configuration*/
+
+ for (j = 0; j <= 1; j++) {
+ v->total_number_of_active_dpp_per_ratio[j] = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->total_number_of_active_dpp_per_ratio[j] = v->total_number_of_active_dpp_per_ratio[j] + v->dpp_per_plane_per_ratio[j][k];
+ }
+ }
+ if ((v->dispclk_dppclk_support_per_ratio[0] == dcn_bw_yes && v->dispclk_dppclk_support_per_ratio[1] == dcn_bw_no) || (v->dispclk_dppclk_support_per_ratio[0] == v->dispclk_dppclk_support_per_ratio[1] && (v->total_number_of_active_dpp_per_ratio[0] < v->total_number_of_active_dpp_per_ratio[1] || (((v->total_number_of_active_dpp_per_ratio[0] == v->total_number_of_active_dpp_per_ratio[1]) && v->required_dispclk_per_ratio[0] <= 0.5 * v->required_dispclk_per_ratio[1]))))) {
+ v->dispclk_dppclk_ratio = 1;
+ v->final_error_message = v->error_message[0];
+ }
+ else {
+ v->dispclk_dppclk_ratio = 2;
+ v->final_error_message = v->error_message[1];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dpp_per_plane[k] = v->dpp_per_plane_per_ratio[v->dispclk_dppclk_ratio - 1][k];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pix_dety = 8.0;
+ v->byte_per_pix_detc = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pix_dety = 4.0;
+ v->byte_per_pix_detc = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pix_dety = 2.0;
+ v->byte_per_pix_detc = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pix_dety = 1.0;
+ v->byte_per_pix_detc = 2.0;
+ }
+ else {
+ v->byte_per_pix_dety = 4.0f / 3.0f;
+ v->byte_per_pix_detc = 8.0f / 3.0f;
+ }
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_bytes_block_height_y = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->read256_bytes_block_height_y = 4.0;
+ }
+ else {
+ v->read256_bytes_block_height_y = 8.0;
+ }
+ v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
+ v->read256_bytes_block_height_c = 0.0;
+ v->read256_bytes_block_width_c = 0.0;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_bytes_block_height_y = 1.0;
+ v->read256_bytes_block_height_c = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->read256_bytes_block_height_y = 16.0;
+ v->read256_bytes_block_height_c = 8.0;
+ }
+ else {
+ v->read256_bytes_block_height_y = 8.0;
+ v->read256_bytes_block_height_c = 8.0;
+ }
+ v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
+ v->read256_bytes_block_width_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->read256_bytes_block_height_c;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->maximum_swath_height_y = v->read256_bytes_block_height_y;
+ v->maximum_swath_height_c = v->read256_bytes_block_height_c;
+ }
+ else {
+ v->maximum_swath_height_y = v->read256_bytes_block_width_y;
+ v->maximum_swath_height_c = v->read256_bytes_block_width_c;
+ }
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ }
+ else {
+ v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+ }
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ else {
+ v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
+ }
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
+ v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ }
+ else {
+ v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+ }
+ }
+ else {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->swath_width = v->viewport_width[k] / v->dpp_per_plane[k];
+ }
+ else {
+ v->swath_width = v->viewport_height[k] / v->dpp_per_plane[k];
+ }
+ v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->maximum_swath_height_y;
+ v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pix_dety * v->maximum_swath_height_y;
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
+ }
+ if (v->maximum_swath_height_c > 0.0) {
+ v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c;
+ }
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
+ if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
+ v->swath_height_y[k] = v->maximum_swath_height_y;
+ v->swath_height_c[k] = v->maximum_swath_height_c;
+ }
+ else {
+ v->swath_height_y[k] = v->minimum_swath_height_y;
+ v->swath_height_c[k] = v->minimum_swath_height_c;
+ }
+ if (v->swath_height_c[k] == 0.0) {
+ v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0;
+ v->det_buffer_size_c[k] = 0.0;
+ }
+ else if (v->swath_height_y[k] <= v->swath_height_c[k]) {
+ v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
+ v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
+ }
+ else {
+ v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0;
+ v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 3.0;
+ }
+ }
+}
+void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(struct dcn_bw_internal_vars *v)
+{
+ int k;
+ /*dispclk and dppclk calculation*/
+
+ v->dispclk_with_ramping = 0.0;
+ v->dispclk_without_ramping = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > 1.0) {
+ v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ v->dppclk_using_single_dpp_luma = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_throughput[k], 1.0);
+ if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->pscl_throughput_chroma[k] = 0.0;
+ v->dppclk_using_single_dpp = v->dppclk_using_single_dpp_luma;
+ }
+ else {
+ if (v->h_ratio[k] > 1.0) {
+ v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ v->dppclk_using_single_dpp_chroma = v->pixel_clock[k] *dcn_bw_max3(v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_throughput_chroma[k], 1.0);
+ v->dppclk_using_single_dpp =dcn_bw_max2(v->dppclk_using_single_dpp_luma, v->dppclk_using_single_dpp_chroma);
+ }
+ if (v->odm_capable == dcn_bw_yes) {
+ v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
+ v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0));
+ }
+ else {
+ v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
+ v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0));
+ }
+ }
+ if (v->dispclk_without_ramping > v->max_dispclk[number_of_states]) {
+ v->dispclk = v->dispclk_without_ramping;
+ }
+ else if (v->dispclk_with_ramping > v->max_dispclk[number_of_states]) {
+ v->dispclk = v->max_dispclk[number_of_states];
+ }
+ else {
+ v->dispclk = v->dispclk_with_ramping;
+ }
+ v->dppclk = v->dispclk / v->dispclk_dppclk_ratio;
+ /*urgent watermark*/
+
+ v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
+ v->dcc_enabled_any_plane = dcn_bw_no;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->dcc_enabled_any_plane = dcn_bw_yes;
+ }
+ }
+ v->return_bw = v->return_bandwidth_to_dcn;
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0);
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k];
+ }
+ else {
+ v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k];
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pixel_dety[k] = 8.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pixel_dety[k] = 4.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pixel_dety[k] = 2.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pixel_dety[k] = 1.0;
+ v->byte_per_pixel_detc[k] = 2.0;
+ }
+ else {
+ v->byte_per_pixel_dety[k] = 4.0f / 3.0f;
+ v->byte_per_pixel_detc[k] = 8.0f / 3.0f;
+ }
+ }
+ v->total_data_read_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k];
+ v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0;
+ v->total_data_read_bandwidth = v->total_data_read_bandwidth + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k];
+ }
+ v->total_active_dpp = 0.0;
+ v->total_dcc_active_dpp = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->total_active_dpp = v->total_active_dpp + v->dpp_per_plane[k];
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->total_dcc_active_dpp = v->total_dcc_active_dpp + v->dpp_per_plane[k];
+ }
+ }
+ v->urgent_round_trip_and_out_of_order_latency = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw;
+ v->last_pixel_of_line_extra_watermark = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->v_ratio[k] <= 1.0) {
+ v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+ }
+ v->data_fabric_line_delivery_time_luma = v->swath_width_y[k] * v->swath_height_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->return_bw * v->read_bandwidth_plane_luma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
+ v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_luma - v->display_pipe_line_delivery_time_luma[k]);
+ if (v->byte_per_pixel_detc[k] == 0.0) {
+ v->display_pipe_line_delivery_time_chroma[k] = 0.0;
+ }
+ else {
+ if (v->v_ratio[k] / 2.0 <= 1.0) {
+ v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] / (v->h_ratio[k] / 2.0) / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 / v->pscl_throughput_chroma[k] / v->dppclk;
+ }
+ v->data_fabric_line_delivery_time_chroma = v->swath_width_y[k] / 2.0 * v->swath_height_c[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->return_bw * v->read_bandwidth_plane_chroma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
+ v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_chroma - v->display_pipe_line_delivery_time_chroma[k]);
+ }
+ }
+ v->urgent_extra_latency = v->urgent_round_trip_and_out_of_order_latency + (v->total_active_dpp * v->pixel_chunk_size_in_kbyte + v->total_dcc_active_dpp * v->meta_chunk_size) * 1024.0 / v->return_bw;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->urgent_extra_latency = v->urgent_extra_latency + v->total_active_dpp * v->pte_chunk_size * 1024.0 / v->return_bw;
+ }
+ v->urgent_watermark = v->urgent_latency + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
+ v->ptemeta_urgent_watermark = v->urgent_watermark + 2.0 * v->urgent_latency;
+ /*nb p-state/dram clock change watermark*/
+
+ v->dram_clock_change_watermark = v->dram_clock_change_latency + v->urgent_watermark;
+ v->total_active_writeback = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback) {
+ v->total_active_writeback = v->total_active_writeback + 1.0;
+ }
+ }
+ if (v->total_active_writeback <= 1.0) {
+ v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency;
+ }
+ else {
+ v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency + v->writeback_chunk_size * 1024.0 / 32.0 / v->socclk;
+ }
+ /*stutter efficiency*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->lines_in_dety[k] = v->det_buffer_size_y[k] / v->byte_per_pixel_dety[k] / v->swath_width_y[k];
+ v->lines_in_dety_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_dety[k], v->swath_height_y[k]);
+ v->full_det_buffering_time_y[k] = v->lines_in_dety_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k];
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->lines_in_detc[k] = v->det_buffer_size_c[k] / v->byte_per_pixel_detc[k] / (v->swath_width_y[k] / 2.0);
+ v->lines_in_detc_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_detc[k], v->swath_height_c[k]);
+ v->full_det_buffering_time_c[k] = v->lines_in_detc_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0);
+ }
+ else {
+ v->lines_in_detc[k] = 0.0;
+ v->lines_in_detc_rounded_down_to_swath[k] = 0.0;
+ v->full_det_buffering_time_c[k] = 999999.0;
+ }
+ }
+ v->min_full_det_buffering_time = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->full_det_buffering_time_y[k] < v->min_full_det_buffering_time) {
+ v->min_full_det_buffering_time = v->full_det_buffering_time_y[k];
+ v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
+ }
+ if (v->full_det_buffering_time_c[k] < v->min_full_det_buffering_time) {
+ v->min_full_det_buffering_time = v->full_det_buffering_time_c[k];
+ v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
+ }
+ }
+ v->average_read_bandwidth_gbyte_per_second = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / v->dcc_rate[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / v->dcc_rate[k] / 1000.0;
+ }
+ else {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / 1000.0;
+ }
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 256.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 256.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 512.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 512.0;
+ }
+ }
+ v->part_of_burst_that_fits_in_rob =dcn_bw_min2(v->min_full_det_buffering_time * v->total_data_read_bandwidth, v->rob_buffer_size_in_kbyte * 1024.0 * v->total_data_read_bandwidth / (v->average_read_bandwidth_gbyte_per_second * 1000.0));
+ v->stutter_burst_time = v->part_of_burst_that_fits_in_rob * (v->average_read_bandwidth_gbyte_per_second * 1000.0) / v->total_data_read_bandwidth / v->return_bw + (v->min_full_det_buffering_time * v->total_data_read_bandwidth - v->part_of_burst_that_fits_in_rob) / (v->dcfclk * 64.0);
+ if (v->total_active_writeback == 0.0) {
+ v->stutter_efficiency_not_including_vblank = (1.0 - (v->sr_exit_time + v->stutter_burst_time) / v->min_full_det_buffering_time) * 100.0;
+ }
+ else {
+ v->stutter_efficiency_not_including_vblank = 0.0;
+ }
+ v->smallest_vblank = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
+ v->v_blank_time = (v->vtotal[k] - v->vactive[k]) * v->htotal[k] / v->pixel_clock[k];
+ }
+ else {
+ v->v_blank_time = 0.0;
+ }
+ v->smallest_vblank =dcn_bw_min2(v->smallest_vblank, v->v_blank_time);
+ }
+ v->stutter_efficiency = (v->stutter_efficiency_not_including_vblank / 100.0 * (v->frame_time_for_min_full_det_buffering_time - v->smallest_vblank) + v->smallest_vblank) / v->frame_time_for_min_full_det_buffering_time * 100.0;
+ /*dcfclk deep sleep*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 32.0 / v->display_pipe_line_delivery_time_luma[k], 1.1 * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 32.0 / v->display_pipe_line_delivery_time_chroma[k]);
+ }
+ else {
+ v->dcfclk_deep_sleep_per_plane[k] = 1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 64.0 / v->display_pipe_line_delivery_time_luma[k];
+ }
+ v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(v->dcfclk_deep_sleep_per_plane[k], v->pixel_clock[k] / 16.0);
+ }
+ v->dcf_clk_deep_sleep = 8.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dcf_clk_deep_sleep =dcn_bw_max2(v->dcf_clk_deep_sleep, v->dcfclk_deep_sleep_per_plane[k]);
+ }
+ /*stutter watermark*/
+
+ v->stutter_exit_watermark = v->sr_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency + 10.0 / v->dcf_clk_deep_sleep;
+ v->stutter_enter_plus_exit_watermark = v->sr_enter_plus_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
+ /*urgent latency supported*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->effective_det_plus_lb_lines_luma =dcn_bw_floor2(v->lines_in_dety[k] +dcn_bw_min2(v->lines_in_dety[k] * v->dppclk * v->byte_per_pixel_dety[k] * v->pscl_throughput[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_y[k]);
+ v->urgent_latency_support_us_luma = v->effective_det_plus_lb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_det_plus_lb_lines_luma * v->swath_width_y[k] * v->byte_per_pixel_dety[k] / (v->return_bw / v->dpp_per_plane[k]);
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->effective_det_plus_lb_lines_chroma =dcn_bw_floor2(v->lines_in_detc[k] +dcn_bw_min2(v->lines_in_detc[k] * v->dppclk * v->byte_per_pixel_detc[k] * v->pscl_throughput_chroma[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_c[k]);
+ v->urgent_latency_support_us_chroma = v->effective_det_plus_lb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_det_plus_lb_lines_chroma * (v->swath_width_y[k] / 2.0) * v->byte_per_pixel_detc[k] / (v->return_bw / v->dpp_per_plane[k]);
+ v->urgent_latency_support_us[k] =dcn_bw_min2(v->urgent_latency_support_us_luma, v->urgent_latency_support_us_chroma);
+ }
+ else {
+ v->urgent_latency_support_us[k] = v->urgent_latency_support_us_luma;
+ }
+ }
+ v->min_urgent_latency_support_us = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->min_urgent_latency_support_us =dcn_bw_min2(v->min_urgent_latency_support_us, v->urgent_latency_support_us[k]);
+ }
+ /*non-urgent latency tolerance*/
+
+ v->non_urgent_latency_tolerance = v->min_urgent_latency_support_us - v->urgent_watermark;
+ /*prefetch*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->block_height256_bytes_y = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->block_height256_bytes_y = 4.0;
+ }
+ else {
+ v->block_height256_bytes_y = 8.0;
+ }
+ v->block_height256_bytes_c = 0.0;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->block_height256_bytes_y = 1.0;
+ v->block_height256_bytes_c = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->block_height256_bytes_y = 16.0;
+ v->block_height256_bytes_c = 8.0;
+ }
+ else {
+ v->block_height256_bytes_y = 8.0;
+ v->block_height256_bytes_c = 8.0;
+ }
+ }
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_request_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (8.0 * v->block_height256_bytes_y);
+ v->meta_surf_width_y =dcn_bw_ceil2(v->swath_width_y[k] - 1.0, v->meta_request_width_y) + v->meta_request_width_y;
+ v->meta_surf_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, 8.0 * v->block_height256_bytes_y) + 8.0 * v->block_height256_bytes_y;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_frame_y = (dcn_bw_ceil2((v->meta_surf_width_y * v->meta_surf_height_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_frame_y = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_byte_y = v->meta_surf_width_y * 8.0 * v->block_height256_bytes_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
+ }
+ else {
+ v->meta_row_byte_y = v->meta_surf_height_y * v->meta_request_width_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_frame_y = 0.0;
+ v->meta_row_byte_y = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_size_byte_y = 256.0;
+ v->macro_tile_height_y = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_size_byte_y = 4096.0;
+ v->macro_tile_height_y = 4.0 * v->block_height256_bytes_y;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_size_byte_y = 64.0 * 1024;
+ v->macro_tile_height_y = 16.0 * v->block_height256_bytes_y;
+ }
+ else {
+ v->macro_tile_size_byte_y = 256.0 * 1024;
+ v->macro_tile_height_y = 32.0 * v->block_height256_bytes_y;
+ }
+ if (v->macro_tile_size_byte_y <= 65536.0) {
+ v->pixel_pte_req_height_y = v->macro_tile_height_y;
+ }
+ else {
+ v->pixel_pte_req_height_y = 16.0 * v->block_height256_bytes_y;
+ }
+ v->pixel_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / v->pixel_pte_req_height_y * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_y / v->swath_width_y[k], 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
+ }
+ else {
+ v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->pixel_pte_req_height_y, 1.0) + 1);
+ }
+ }
+ else {
+ v->pixel_pte_bytes_per_row_y = 0.0;
+ }
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_request_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (8.0 * v->block_height256_bytes_c);
+ v->meta_surf_width_c =dcn_bw_ceil2(v->swath_width_y[k] / 2.0 - 1.0, v->meta_request_width_c) + v->meta_request_width_c;
+ v->meta_surf_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, 8.0 * v->block_height256_bytes_c) + 8.0 * v->block_height256_bytes_c;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_frame_c = (dcn_bw_ceil2((v->meta_surf_width_c * v->meta_surf_height_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_frame_c = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_byte_c = v->meta_surf_width_c * 8.0 * v->block_height256_bytes_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
+ }
+ else {
+ v->meta_row_byte_c = v->meta_surf_height_c * v->meta_request_width_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_frame_c = 0.0;
+ v->meta_row_byte_c = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_size_bytes_c = 256.0;
+ v->macro_tile_height_c = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_size_bytes_c = 4096.0;
+ v->macro_tile_height_c = 4.0 * v->block_height256_bytes_c;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_size_bytes_c = 64.0 * 1024;
+ v->macro_tile_height_c = 16.0 * v->block_height256_bytes_c;
+ }
+ else {
+ v->macro_tile_size_bytes_c = 256.0 * 1024;
+ v->macro_tile_height_c = 32.0 * v->block_height256_bytes_c;
+ }
+ if (v->macro_tile_size_bytes_c <= 65536.0) {
+ v->pixel_pte_req_height_c = v->macro_tile_height_c;
+ }
+ else {
+ v->pixel_pte_req_height_c = 16.0 * v->block_height256_bytes_c;
+ }
+ v->pixel_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / v->pixel_pte_req_height_c * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_c / (v->swath_width_y[k] / 2.0), 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
+ }
+ else {
+ v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->pixel_pte_req_height_c, 1.0) + 1);
+ }
+ }
+ else {
+ v->pixel_pte_bytes_per_row_c = 0.0;
+ }
+ }
+ else {
+ v->pixel_pte_bytes_per_row_c = 0.0;
+ v->meta_pte_bytes_frame_c = 0.0;
+ v->meta_row_byte_c = 0.0;
+ }
+ v->pixel_pte_bytes_per_row[k] = v->pixel_pte_bytes_per_row_y + v->pixel_pte_bytes_per_row_c;
+ v->meta_pte_bytes_frame[k] = v->meta_pte_bytes_frame_y + v->meta_pte_bytes_frame_c;
+ v->meta_row_byte[k] = v->meta_row_byte_y + v->meta_row_byte_c;
+ v->v_init_pre_fill_y[k] =dcn_bw_floor2((v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0, 1.0);
+ v->max_num_swath_y[k] =dcn_bw_ceil2((v->v_init_pre_fill_y[k] - 1.0) / v->swath_height_y[k], 1.0) + 1;
+ if (v->v_init_pre_fill_y[k] > 1.0) {
+ v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] - 2.0), v->swath_height_y[k]);
+ }
+ else {
+ v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] + v->swath_height_y[k] - 2.0), v->swath_height_y[k]);
+ }
+ v->max_partial_swath_y =dcn_bw_max2(1.0, v->max_partial_swath_y);
+ v->prefetch_source_lines_y[k] = v->max_num_swath_y[k] * v->swath_height_y[k] + v->max_partial_swath_y;
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ v->v_init_pre_fill_c[k] =dcn_bw_floor2((v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0, 1.0);
+ v->max_num_swath_c[k] =dcn_bw_ceil2((v->v_init_pre_fill_c[k] - 1.0) / v->swath_height_c[k], 1.0) + 1;
+ if (v->v_init_pre_fill_c[k] > 1.0) {
+ v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] - 2.0), v->swath_height_c[k]);
+ }
+ else {
+ v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] + v->swath_height_c[k] - 2.0), v->swath_height_c[k]);
+ }
+ v->max_partial_swath_c =dcn_bw_max2(1.0, v->max_partial_swath_c);
+ }
+ else {
+ v->max_num_swath_c[k] = 0.0;
+ v->max_partial_swath_c = 0.0;
+ }
+ v->prefetch_source_lines_c[k] = v->max_num_swath_c[k] * v->swath_height_c[k] + v->max_partial_swath_c;
+ }
+ v->t_calc = 24.0 / v->dcf_clk_deep_sleep;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
+ v->max_vstartup_lines[k] = v->vtotal[k] - v->vactive[k] - 1.0;
+ }
+ else {
+ v->max_vstartup_lines[k] = v->v_sync_plus_back_porch[k] - 1.0;
+ }
+ }
+ v->next_prefetch_mode = 0.0;
+ do {
+ v->v_startup_lines = 13.0;
+ do {
+ v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_yes;
+ v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_no;
+ v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
+ v->v_ratio_prefetch_more_than4 = dcn_bw_no;
+ v->destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
+ v->prefetch_mode = v->next_prefetch_mode;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dstx_after_scaler = 90.0 * v->pixel_clock[k] / v->dppclk + 42.0 * v->pixel_clock[k] / v->dispclk;
+ if (v->dpp_per_plane[k] > 1.0) {
+ v->dstx_after_scaler = v->dstx_after_scaler + v->scaler_rec_out_width[k] / 2.0;
+ }
+ if (v->output_format[k] == dcn_bw_420) {
+ v->dsty_after_scaler = 1.0;
+ }
+ else {
+ v->dsty_after_scaler = 0.0;
+ }
+ v->v_update_offset_pix =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk);
+ v->v_update_width_pix = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k];
+ v->v_ready_offset_pix =dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k];
+ v->t_setup = (v->v_update_offset_pix + v->v_update_width_pix + v->v_ready_offset_pix) / v->pixel_clock[k];
+ v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]);
+ if (v->prefetch_mode == 0.0) {
+ v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency);
+ }
+ else if (v->prefetch_mode == 1.0) {
+ v->t_wait =dcn_bw_max2(v->sr_enter_plus_exit_time, v->urgent_latency);
+ }
+ else {
+ v->t_wait = v->urgent_latency;
+ }
+ v->destination_lines_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->v_startup[k] - v->t_wait / (v->htotal[k] / v->pixel_clock[k]) - (v->t_calc + v->t_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dsty_after_scaler + v->dstx_after_scaler / v->htotal[k]) + 0.125), 1.0) / 4;
+ if (v->destination_lines_for_prefetch[k] > 0.0) {
+ v->prefetch_bandwidth[k] = (v->meta_pte_bytes_frame[k] + 2.0 * v->meta_row_byte[k] + 2.0 * v->pixel_pte_bytes_per_row[k] + v->prefetch_source_lines_y[k] * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0)) / (v->destination_lines_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->prefetch_bandwidth[k] = 999999.0;
+ }
+ }
+ v->bandwidth_available_for_immediate_flip = v->return_bw;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->bandwidth_available_for_immediate_flip = v->bandwidth_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->prefetch_bandwidth[k]);
+ }
+ v->tot_immediate_flip_bytes = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->tot_immediate_flip_bytes = v->tot_immediate_flip_bytes + v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k];
+ }
+ }
+ v->max_rd_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->time_for_fetching_meta_pte =dcn_bw_max5(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->meta_pte_bytes_frame[k] * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ }
+ else {
+ v->time_for_fetching_meta_pte =dcn_bw_max3(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ }
+ }
+ else {
+ v->time_for_fetching_meta_pte = v->htotal[k] / v->pixel_clock[k] / 4.0;
+ }
+ v->destination_lines_to_request_vm_inv_blank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_meta_pte / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ if ((v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes)) {
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->time_for_fetching_row_in_vblank =dcn_bw_max5((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, 2.0 * v->urgent_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+ }
+ else {
+ v->time_for_fetching_row_in_vblank =dcn_bw_max3((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+ }
+ }
+ else {
+ v->time_for_fetching_row_in_vblank =dcn_bw_max2(v->urgent_extra_latency - v->time_for_fetching_meta_pte, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+ }
+ v->destination_lines_to_request_row_in_vblank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_row_in_vblank / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_to_request_prefetch_pixel_data = v->destination_lines_for_prefetch[k] - v->destination_lines_to_request_vm_inv_blank[k] - v->destination_lines_to_request_row_in_vblank[k];
+ if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+ v->v_ratio_prefetch_y[k] = v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data;
+ if ((v->swath_height_y[k] > 4.0)) {
+ if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_y[k] - 3.0) / 2.0) {
+ v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], v->max_num_swath_y[k] * v->swath_height_y[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_y[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_prefetch_y[k] = 999999.0;
+ }
+ }
+ }
+ else {
+ v->v_ratio_prefetch_y[k] = 999999.0;
+ }
+ v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], 1.0);
+ if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+ v->v_ratio_prefetch_c[k] = v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data;
+ if ((v->swath_height_c[k] > 4.0)) {
+ if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_c[k] - 3.0) / 2.0) {
+ v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], v->max_num_swath_c[k] * v->swath_height_c[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_c[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_prefetch_c[k] = 999999.0;
+ }
+ }
+ }
+ else {
+ v->v_ratio_prefetch_c[k] = 999999.0;
+ }
+ v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], 1.0);
+ if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+ v->required_prefetch_pix_data_bw = v->dpp_per_plane[k] * (v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 2.0) * v->swath_width_y[k] / (v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->required_prefetch_pix_data_bw = 999999.0;
+ }
+ v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->required_prefetch_pix_data_bw);
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->meta_pte_bytes_frame[k] / (v->destination_lines_to_request_vm_inv_blank[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / (v->destination_lines_to_request_row_in_vblank[k] * v->htotal[k] / v->pixel_clock[k]));
+ }
+ if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
+ v->v_ratio_prefetch_more_than4 = dcn_bw_yes;
+ }
+ if (v->destination_lines_for_prefetch[k] < 2.0) {
+ v->destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
+ }
+ if (v->max_vstartup_lines[k] > v->v_startup_lines) {
+ if (v->required_prefetch_pix_data_bw > (v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k])) {
+ v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_no;
+ }
+ if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
+ v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_yes;
+ }
+ if (v->destination_lines_for_prefetch[k] < 2.0) {
+ v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
+ }
+ }
+ }
+ if (v->max_rd_bandwidth <= v->return_bw && v->v_ratio_prefetch_more_than4 == dcn_bw_no && v->destination_line_times_for_prefetch_less_than2 == dcn_bw_no) {
+ v->prefetch_mode_supported = dcn_bw_yes;
+ }
+ else {
+ v->prefetch_mode_supported = dcn_bw_no;
+ }
+ v->v_startup_lines = v->v_startup_lines + 1.0;
+ } while (!(v->prefetch_mode_supported == dcn_bw_yes || (v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw == dcn_bw_yes && v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 == dcn_bw_no && v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 == dcn_bw_no)));
+ v->next_prefetch_mode = v->next_prefetch_mode + 1.0;
+ } while (!(v->prefetch_mode_supported == dcn_bw_yes || v->prefetch_mode == 2.0));
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->v_ratio_prefetch_y[k] <= 1.0) {
+ v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+ }
+ if (v->byte_per_pixel_detc[k] == 0.0) {
+ v->display_pipe_line_delivery_time_chroma_prefetch[k] = 0.0;
+ }
+ else {
+ if (v->v_ratio_prefetch_c[k] <= 1.0) {
+ v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+ }
+ }
+ }
+ /*min ttuv_blank*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->prefetch_mode == 0.0) {
+ v->allow_dram_clock_change_during_vblank[k] = dcn_bw_yes;
+ v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
+ v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max3(v->dram_clock_change_watermark, v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
+ }
+ else if (v->prefetch_mode == 1.0) {
+ v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
+ v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
+ v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max2(v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
+ }
+ else {
+ v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
+ v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_no;
+ v->min_ttuv_blank[k] = v->t_calc + v->urgent_watermark;
+ }
+ }
+ /*nb p-state/dram clock change support*/
+
+ v->active_dp_ps = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->active_dp_ps = v->active_dp_ps + v->dpp_per_plane[k];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->lb_latency_hiding_source_lines_y =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
+ v->lb_latency_hiding_source_lines_c =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+ v->effective_lb_latency_hiding_y = v->lb_latency_hiding_source_lines_y / v->v_ratio[k] * (v->htotal[k] / v->pixel_clock[k]);
+ v->effective_lb_latency_hiding_c = v->lb_latency_hiding_source_lines_c / (v->v_ratio[k] / 2.0) * (v->htotal[k] / v->pixel_clock[k]);
+ if (v->swath_width_y[k] > 2.0 * v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_y = v->dpp_output_buffer_pixels / v->swath_width_y[k];
+ }
+ else if (v->swath_width_y[k] > v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_y = 0.5;
+ }
+ else {
+ v->dpp_output_buffer_lines_y = 1.0;
+ }
+ if (v->swath_width_y[k] / 2.0 > 2.0 * v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_c = v->dpp_output_buffer_pixels / (v->swath_width_y[k] / 2.0);
+ }
+ else if (v->swath_width_y[k] / 2.0 > v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_c = 0.5;
+ }
+ else {
+ v->dpp_output_buffer_lines_c = 1.0;
+ }
+ v->dppopp_buffering_y = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_y + v->opp_output_buffer_lines);
+ v->max_det_buffering_time_y = v->full_det_buffering_time_y[k] + (v->lines_in_dety[k] - v->lines_in_dety_rounded_down_to_swath[k]) / v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
+ v->active_dram_clock_change_latency_margin_y = v->dppopp_buffering_y + v->effective_lb_latency_hiding_y + v->max_det_buffering_time_y - v->dram_clock_change_watermark;
+ if (v->active_dp_ps > 1.0) {
+ v->active_dram_clock_change_latency_margin_y = v->active_dram_clock_change_latency_margin_y - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
+ }
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->dppopp_buffering_c = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_c + v->opp_output_buffer_lines);
+ v->max_det_buffering_time_c = v->full_det_buffering_time_c[k] + (v->lines_in_detc[k] - v->lines_in_detc_rounded_down_to_swath[k]) / v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
+ v->active_dram_clock_change_latency_margin_c = v->dppopp_buffering_c + v->effective_lb_latency_hiding_c + v->max_det_buffering_time_c - v->dram_clock_change_watermark;
+ if (v->active_dp_ps > 1.0) {
+ v->active_dram_clock_change_latency_margin_c = v->active_dram_clock_change_latency_margin_c - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
+ }
+ v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin_y, v->active_dram_clock_change_latency_margin_c);
+ }
+ else {
+ v->active_dram_clock_change_latency_margin[k] = v->active_dram_clock_change_latency_margin_y;
+ }
+ if (v->output_format[k] == dcn_bw_444) {
+ v->writeback_dram_clock_change_latency_margin = (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0) - v->writeback_dram_clock_change_watermark;
+ }
+ else {
+ v->writeback_dram_clock_change_latency_margin =dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k])) - v->writeback_dram_clock_change_watermark;
+ }
+ if (v->output[k] == dcn_bw_writeback) {
+ v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin[k], v->writeback_dram_clock_change_latency_margin);
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->allow_dram_clock_change_during_vblank[k] == dcn_bw_yes) {
+ v->v_blank_dram_clock_change_latency_margin[k] = (v->vtotal[k] - v->scaler_recout_height[k]) * (v->htotal[k] / v->pixel_clock[k]) -dcn_bw_max2(v->dram_clock_change_watermark, v->writeback_dram_clock_change_watermark);
+ }
+ else {
+ v->v_blank_dram_clock_change_latency_margin[k] = 0.0;
+ }
+ }
+ v->min_active_dram_clock_change_margin = 999999.0;
+ v->v_blank_of_min_active_dram_clock_change_margin = 999999.0;
+ v->second_min_active_dram_clock_change_margin = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->active_dram_clock_change_latency_margin[k] < v->min_active_dram_clock_change_margin) {
+ v->second_min_active_dram_clock_change_margin = v->min_active_dram_clock_change_margin;
+ v->min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
+ v->v_blank_of_min_active_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
+ }
+ else if (v->active_dram_clock_change_latency_margin[k] < v->second_min_active_dram_clock_change_margin) {
+ v->second_min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
+ }
+ }
+ v->min_vblank_dram_clock_change_margin = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->min_vblank_dram_clock_change_margin > v->v_blank_dram_clock_change_latency_margin[k]) {
+ v->min_vblank_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
+ }
+ }
+ if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
+ v->dram_clock_change_margin =dcn_bw_max2(v->min_active_dram_clock_change_margin, v->min_vblank_dram_clock_change_margin);
+ }
+ else if (v->v_blank_of_min_active_dram_clock_change_margin > v->min_active_dram_clock_change_margin) {
+ v->dram_clock_change_margin =dcn_bw_min2(v->second_min_active_dram_clock_change_margin, v->v_blank_of_min_active_dram_clock_change_margin);
+ }
+ else {
+ v->dram_clock_change_margin = v->min_active_dram_clock_change_margin;
+ }
+ if (v->min_active_dram_clock_change_margin > 0.0) {
+ v->dram_clock_change_support = dcn_bw_supported_in_v_active;
+ }
+ else if (v->dram_clock_change_margin > 0.0) {
+ v->dram_clock_change_support = dcn_bw_supported_in_v_blank;
+ }
+ else {
+ v->dram_clock_change_support = dcn_bw_not_supported;
+ }
+ /*maximum bandwidth used*/
+
+ v->wr_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
+ v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
+ }
+ else if (v->output[k] == dcn_bw_writeback) {
+ v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
+ }
+ }
+ v->max_used_bw = v->max_rd_bandwidth + v->wr_bandwidth;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
new file mode 100644
index 000000000000..03f06f682ead
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN_CALC_AUTO_H_
+#define _DCN_CALC_AUTO_H_
+
+#include "dcn_calcs.h"
+
+void scaler_settings_calculation(struct dcn_bw_internal_vars *v);
+void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v);
+void display_pipe_configuration(struct dcn_bw_internal_vars *v);
+void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(
+ struct dcn_bw_internal_vars *v);
+
+#endif /* _DCN_CALC_AUTO_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
new file mode 100644
index 000000000000..b6abe0f3bb15
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn_calc_math.h"
+
+float dcn_bw_mod(const float arg1, const float arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 - arg1 * ((int) (arg1 / arg2));
+}
+
+float dcn_bw_min2(const float arg1, const float arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 < arg2 ? arg1 : arg2;
+}
+
+unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 > arg2 ? arg1 : arg2;
+}
+float dcn_bw_max2(const float arg1, const float arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 > arg2 ? arg1 : arg2;
+}
+
+float dcn_bw_floor2(const float arg, const float significance)
+{
+ if (significance == 0)
+ return 0;
+ return ((int) (arg / significance)) * significance;
+}
+
+float dcn_bw_ceil2(const float arg, const float significance)
+{
+ float flr = dcn_bw_floor2(arg, significance);
+ if (significance == 0)
+ return 0;
+ return flr + 0.00001 >= arg ? arg : flr + significance;
+}
+
+float dcn_bw_max3(float v1, float v2, float v3)
+{
+ return v3 > dcn_bw_max2(v1, v2) ? v3 : dcn_bw_max2(v1, v2);
+}
+
+float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5)
+{
+ return dcn_bw_max3(v1, v2, v3) > dcn_bw_max2(v4, v5) ? dcn_bw_max3(v1, v2, v3) : dcn_bw_max2(v4, v5);
+}
+
+float dcn_bw_pow(float a, float exp)
+{
+ float temp;
+ /*ASSERT(exp == (int)exp);*/
+ if ((int)exp == 0)
+ return 1;
+ temp = dcn_bw_pow(a, (int)(exp / 2));
+ if (((int)exp % 2) == 0) {
+ return temp * temp;
+ } else {
+ if ((int)exp > 0)
+ return a * temp * temp;
+ else
+ return (temp * temp) / a;
+ }
+}
+
+float dcn_bw_log(float a, float b)
+{
+ int * const exp_ptr = (int *)(&a);
+ int x = *exp_ptr;
+ const int log_2 = ((x >> 23) & 255) - 128;
+ x &= ~(255 << 23);
+ x += 127 << 23;
+ *exp_ptr = x;
+
+ a = ((-1.0f / 3) * a + 2) * a - 2.0f / 3;
+
+ if (b > 2.00001 || b < 1.99999)
+ return (a + log_2) / dcn_bw_log(b, 2);
+ else
+ return (a + log_2);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
new file mode 100644
index 000000000000..f46ab0e24ca1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN_CALC_MATH_H_
+#define _DCN_CALC_MATH_H_
+
+float dcn_bw_mod(const float arg1, const float arg2);
+float dcn_bw_min2(const float arg1, const float arg2);
+unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2);
+float dcn_bw_max2(const float arg1, const float arg2);
+float dcn_bw_floor2(const float arg, const float significance);
+float dcn_bw_ceil2(const float arg, const float significance);
+float dcn_bw_max3(float v1, float v2, float v3);
+float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5);
+float dcn_bw_pow(float a, float exp);
+float dcn_bw_log(float a, float b);
+
+#endif /* _DCN_CALC_MATH_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
new file mode 100644
index 000000000000..3dce35e66b09
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -0,0 +1,1626 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn_calcs.h"
+#include "dcn_calc_auto.h"
+#include "dc.h"
+#include "dal_asic_id.h"
+
+#include "resource.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn_calc_math.h"
+
+/* Defaults from spreadsheet rev#247 */
+const struct dcn_soc_bounding_box dcn10_soc_defaults = {
+ /* latencies */
+ .sr_exit_time = 17, /*us*/
+ .sr_enter_plus_exit_time = 19, /*us*/
+ .urgent_latency = 4, /*us*/
+ .dram_clock_change_latency = 17, /*us*/
+ .write_back_latency = 12, /*us*/
+ .percent_of_ideal_drambw_received_after_urg_latency = 80, /*%*/
+
+ /* below default clocks derived from STA target base on
+ * slow-slow corner + 10% margin with voltages aligned to FCLK.
+ *
+ * Use these value if fused value doesn't make sense as earlier
+ * part don't have correct value fused */
+ /* default DCF CLK DPM on RV*/
+ .dcfclkv_max0p9 = 655, /* MHz, = 3600/5.5 */
+ .dcfclkv_nom0p8 = 626, /* MHz, = 3600/5.75 */
+ .dcfclkv_mid0p72 = 600, /* MHz, = 3600/6, bypass */
+ .dcfclkv_min0p65 = 300, /* MHz, = 3600/12, bypass */
+
+ /* default DISP CLK voltage state on RV */
+ .max_dispclk_vmax0p9 = 1108, /* MHz, = 3600/3.25 */
+ .max_dispclk_vnom0p8 = 1029, /* MHz, = 3600/3.5 */
+ .max_dispclk_vmid0p72 = 960, /* MHz, = 3600/3.75 */
+ .max_dispclk_vmin0p65 = 626, /* MHz, = 3600/5.75 */
+
+ /* default DPP CLK voltage state on RV */
+ .max_dppclk_vmax0p9 = 720, /* MHz, = 3600/5 */
+ .max_dppclk_vnom0p8 = 686, /* MHz, = 3600/5.25 */
+ .max_dppclk_vmid0p72 = 626, /* MHz, = 3600/5.75 */
+ .max_dppclk_vmin0p65 = 400, /* MHz, = 3600/9 */
+
+ /* default PHY CLK voltage state on RV */
+ .phyclkv_max0p9 = 900, /*MHz*/
+ .phyclkv_nom0p8 = 847, /*MHz*/
+ .phyclkv_mid0p72 = 800, /*MHz*/
+ .phyclkv_min0p65 = 600, /*MHz*/
+
+ /* BW depend on FCLK, MCLK, # of channels */
+ /* dual channel BW */
+ .fabric_and_dram_bandwidth_vmax0p9 = 38.4f, /*GB/s*/
+ .fabric_and_dram_bandwidth_vnom0p8 = 34.133f, /*GB/s*/
+ .fabric_and_dram_bandwidth_vmid0p72 = 29.866f, /*GB/s*/
+ .fabric_and_dram_bandwidth_vmin0p65 = 12.8f, /*GB/s*/
+ /* single channel BW
+ .fabric_and_dram_bandwidth_vmax0p9 = 19.2f,
+ .fabric_and_dram_bandwidth_vnom0p8 = 17.066f,
+ .fabric_and_dram_bandwidth_vmid0p72 = 14.933f,
+ .fabric_and_dram_bandwidth_vmin0p65 = 12.8f,
+ */
+
+ .number_of_channels = 2,
+
+ .socclk = 208, /*MHz*/
+ .downspreading = 0.5f, /*%*/
+ .round_trip_ping_latency_cycles = 128, /*DCFCLK Cycles*/
+ .urgent_out_of_order_return_per_channel = 256, /*bytes*/
+ .vmm_page_size = 4096, /*bytes*/
+ .return_bus_width = 64, /*bytes*/
+ .max_request_size = 256, /*bytes*/
+
+ /* Depends on user class (client vs embedded, workstation, etc) */
+ .percent_disp_bw_limit = 0.3f /*%*/
+};
+
+const struct dcn_ip_params dcn10_ip_defaults = {
+ .rob_buffer_size_in_kbyte = 64,
+ .det_buffer_size_in_kbyte = 164,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_in_kbyte = 8,
+ .pte_enable = dcn_bw_yes,
+ .pte_chunk_size = 2, /*kbytes*/
+ .meta_chunk_size = 2, /*kbytes*/
+ .writeback_chunk_size = 2, /*kbytes*/
+ .odm_capability = dcn_bw_no,
+ .dsc_capability = dcn_bw_no,
+ .line_buffer_size = 589824, /*bit*/
+ .max_line_buffer_lines = 12,
+ .is_line_buffer_bpp_fixed = dcn_bw_no,
+ .line_buffer_fixed_bpp = dcn_bw_na,
+ .writeback_luma_buffer_size = 12, /*kbytes*/
+ .writeback_chroma_buffer_size = 8, /*kbytes*/
+ .max_num_dpp = 4,
+ .max_num_writeback = 2,
+ .max_dchub_topscl_throughput = 4, /*pixels/dppclk*/
+ .max_pscl_tolb_throughput = 2, /*pixels/dppclk*/
+ .max_lb_tovscl_throughput = 4, /*pixels/dppclk*/
+ .max_vscl_tohscl_throughput = 4, /*pixels/dppclk*/
+ .max_hscl_ratio = 4,
+ .max_vscl_ratio = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .pte_buffer_size_in_requests = 42,
+ .dispclk_ramping_margin = 1, /*%*/
+ .under_scan_factor = 1.11f,
+ .max_inter_dcn_tile_repeaters = 8,
+ .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = dcn_bw_no,
+ .bug_forcing_luma_and_chroma_request_to_same_size_fixed = dcn_bw_no,
+ .dcfclk_cstate_latency = 10 /*TODO clone of something else? sr_enter_plus_exit_time?*/
+};
+
+static enum dcn_bw_defs tl_sw_mode_to_bw_defs(enum swizzle_mode_values sw_mode)
+{
+ switch (sw_mode) {
+ case DC_SW_LINEAR:
+ return dcn_bw_sw_linear;
+ case DC_SW_4KB_S:
+ return dcn_bw_sw_4_kb_s;
+ case DC_SW_4KB_D:
+ return dcn_bw_sw_4_kb_d;
+ case DC_SW_64KB_S:
+ return dcn_bw_sw_64_kb_s;
+ case DC_SW_64KB_D:
+ return dcn_bw_sw_64_kb_d;
+ case DC_SW_VAR_S:
+ return dcn_bw_sw_var_s;
+ case DC_SW_VAR_D:
+ return dcn_bw_sw_var_d;
+ case DC_SW_64KB_S_T:
+ return dcn_bw_sw_64_kb_s_t;
+ case DC_SW_64KB_D_T:
+ return dcn_bw_sw_64_kb_d_t;
+ case DC_SW_4KB_S_X:
+ return dcn_bw_sw_4_kb_s_x;
+ case DC_SW_4KB_D_X:
+ return dcn_bw_sw_4_kb_d_x;
+ case DC_SW_64KB_S_X:
+ return dcn_bw_sw_64_kb_s_x;
+ case DC_SW_64KB_D_X:
+ return dcn_bw_sw_64_kb_d_x;
+ case DC_SW_VAR_S_X:
+ return dcn_bw_sw_var_s_x;
+ case DC_SW_VAR_D_X:
+ return dcn_bw_sw_var_d_x;
+ case DC_SW_256B_S:
+ case DC_SW_256_D:
+ case DC_SW_256_R:
+ case DC_SW_4KB_R:
+ case DC_SW_64KB_R:
+ case DC_SW_VAR_R:
+ case DC_SW_4KB_R_X:
+ case DC_SW_64KB_R_X:
+ case DC_SW_VAR_R_X:
+ default:
+ BREAK_TO_DEBUGGER(); /*not in formula*/
+ return dcn_bw_sw_4_kb_s;
+ }
+}
+
+static int tl_lb_bpp_to_int(enum lb_pixel_depth depth)
+{
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ return 18;
+ case LB_PIXEL_DEPTH_24BPP:
+ return 24;
+ case LB_PIXEL_DEPTH_30BPP:
+ return 30;
+ case LB_PIXEL_DEPTH_36BPP:
+ return 36;
+ default:
+ return 30;
+ }
+}
+
+static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format format)
+{
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ return dcn_bw_rgb_sub_16;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ return dcn_bw_rgb_sub_32;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ return dcn_bw_rgb_sub_64;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ return dcn_bw_yuv420_sub_8;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ return dcn_bw_yuv420_sub_10;
+ default:
+ return dcn_bw_rgb_sub_32;
+ }
+}
+
+static void pipe_ctx_to_e2e_pipe_params (
+ const struct pipe_ctx *pipe,
+ struct _vcs_dpi_display_pipe_params_st *input)
+{
+ input->src.is_hsplit = false;
+ if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state)
+ input->src.is_hsplit = true;
+ else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
+ input->src.is_hsplit = true;
+
+ input->src.dcc = pipe->plane_state->dcc.enable;
+ input->src.dcc_rate = 1;
+ input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
+ input->src.source_scan = dm_horz;
+ input->src.sw_mode = pipe->plane_state->tiling_info.gfx9.swizzle;
+
+ input->src.viewport_width = pipe->plane_res.scl_data.viewport.width;
+ input->src.viewport_height = pipe->plane_res.scl_data.viewport.height;
+ input->src.data_pitch = pipe->plane_res.scl_data.viewport.width;
+ input->src.data_pitch_c = pipe->plane_res.scl_data.viewport.width;
+ input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */
+ input->src.cur0_bpp = 32;
+
+ switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
+ /* for 4/8/16 high tiles */
+ case DC_SW_LINEAR:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_4k_tile;
+ break;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ input->src.is_display_sw = 0;
+ input->src.macro_tile_size = dm_4k_tile;
+ break;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_X:
+ case DC_SW_64KB_S_T:
+ input->src.is_display_sw = 0;
+ input->src.macro_tile_size = dm_64k_tile;
+ break;
+ case DC_SW_VAR_S:
+ case DC_SW_VAR_S_X:
+ input->src.is_display_sw = 0;
+ input->src.macro_tile_size = dm_256k_tile;
+ break;
+
+ /* For 64bpp 2 high tiles */
+ case DC_SW_4KB_D:
+ case DC_SW_4KB_D_X:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_4k_tile;
+ break;
+ case DC_SW_64KB_D:
+ case DC_SW_64KB_D_X:
+ case DC_SW_64KB_D_T:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_64k_tile;
+ break;
+ case DC_SW_VAR_D:
+ case DC_SW_VAR_D_X:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_256k_tile;
+ break;
+
+ /* Unsupported swizzle modes for dcn */
+ case DC_SW_256B_S:
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+
+ switch (pipe->plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ case ROTATION_ANGLE_180:
+ input->src.source_scan = dm_horz;
+ break;
+ case ROTATION_ANGLE_90:
+ case ROTATION_ANGLE_270:
+ input->src.source_scan = dm_vert;
+ break;
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+
+ /* TODO: Fix pixel format mappings */
+ switch (pipe->plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ input->src.source_format = dm_420_8;
+ input->src.viewport_width_c = input->src.viewport_width / 2;
+ input->src.viewport_height_c = input->src.viewport_height / 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ input->src.source_format = dm_420_10;
+ input->src.viewport_width_c = input->src.viewport_width / 2;
+ input->src.viewport_height_c = input->src.viewport_height / 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ input->src.source_format = dm_444_64;
+ input->src.viewport_width_c = input->src.viewport_width;
+ input->src.viewport_height_c = input->src.viewport_height;
+ break;
+ default:
+ input->src.source_format = dm_444_32;
+ input->src.viewport_width_c = input->src.viewport_width;
+ input->src.viewport_height_c = input->src.viewport_height;
+ break;
+ }
+
+ input->scale_taps.htaps = pipe->plane_res.scl_data.taps.h_taps;
+ input->scale_ratio_depth.hscl_ratio = pipe->plane_res.scl_data.ratios.horz.value/4294967296.0;
+ input->scale_ratio_depth.vscl_ratio = pipe->plane_res.scl_data.ratios.vert.value/4294967296.0;
+ input->scale_ratio_depth.vinit = pipe->plane_res.scl_data.inits.v.value/4294967296.0;
+ if (input->scale_ratio_depth.vinit < 1.0)
+ input->scale_ratio_depth.vinit = 1;
+ input->scale_taps.vtaps = pipe->plane_res.scl_data.taps.v_taps;
+ input->scale_taps.vtaps_c = pipe->plane_res.scl_data.taps.v_taps_c;
+ input->scale_taps.htaps_c = pipe->plane_res.scl_data.taps.h_taps_c;
+ input->scale_ratio_depth.hscl_ratio_c = pipe->plane_res.scl_data.ratios.horz_c.value/4294967296.0;
+ input->scale_ratio_depth.vscl_ratio_c = pipe->plane_res.scl_data.ratios.vert_c.value/4294967296.0;
+ input->scale_ratio_depth.vinit_c = pipe->plane_res.scl_data.inits.v_c.value/4294967296.0;
+ if (input->scale_ratio_depth.vinit_c < 1.0)
+ input->scale_ratio_depth.vinit_c = 1;
+ switch (pipe->plane_res.scl_data.lb_params.depth) {
+ case LB_PIXEL_DEPTH_30BPP:
+ input->scale_ratio_depth.lb_depth = 30; break;
+ case LB_PIXEL_DEPTH_36BPP:
+ input->scale_ratio_depth.lb_depth = 36; break;
+ default:
+ input->scale_ratio_depth.lb_depth = 24; break;
+ }
+
+
+ input->dest.vactive = pipe->stream->timing.v_addressable + pipe->stream->timing.v_border_top
+ + pipe->stream->timing.v_border_bottom;
+
+ input->dest.recout_width = pipe->plane_res.scl_data.recout.width;
+ input->dest.recout_height = pipe->plane_res.scl_data.recout.height;
+
+ input->dest.full_recout_width = pipe->plane_res.scl_data.recout.width;
+ input->dest.full_recout_height = pipe->plane_res.scl_data.recout.height;
+
+ input->dest.htotal = pipe->stream->timing.h_total;
+ input->dest.hblank_start = input->dest.htotal - pipe->stream->timing.h_front_porch;
+ input->dest.hblank_end = input->dest.hblank_start
+ - pipe->stream->timing.h_addressable
+ - pipe->stream->timing.h_border_left
+ - pipe->stream->timing.h_border_right;
+
+ input->dest.vtotal = pipe->stream->timing.v_total;
+ input->dest.vblank_start = input->dest.vtotal - pipe->stream->timing.v_front_porch;
+ input->dest.vblank_end = input->dest.vblank_start
+ - pipe->stream->timing.v_addressable
+ - pipe->stream->timing.v_border_bottom
+ - pipe->stream->timing.v_border_top;
+ input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_khz/1000.0;
+ input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start;
+ input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
+ input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
+ input->dest.vupdate_width = pipe->pipe_dlg_param.vupdate_width;
+
+}
+
+static void dcn_bw_calc_rq_dlg_ttu(
+ const struct dc *dc,
+ const struct dcn_bw_internal_vars *v,
+ struct pipe_ctx *pipe,
+ int in_idx)
+{
+ struct display_mode_lib *dml = (struct display_mode_lib *)(&dc->dml);
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs;
+ struct _vcs_dpi_display_rq_params_st rq_param = {0};
+ struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0};
+ struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } };
+ float total_active_bw = 0;
+ float total_prefetch_bw = 0;
+ int total_flip_bytes = 0;
+ int i;
+
+ for (i = 0; i < number_of_planes; i++) {
+ total_active_bw += v->read_bandwidth[i];
+ total_prefetch_bw += v->prefetch_bandwidth[i];
+ total_flip_bytes += v->total_immediate_flip_bytes[i];
+ }
+ dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
+ if (dlg_sys_param.total_flip_bw < 0.0)
+ dlg_sys_param.total_flip_bw = 0;
+
+ dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark;
+ dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
+ dlg_sys_param.t_urg_wm_us = v->urgent_watermark;
+ dlg_sys_param.t_extra_us = v->urgent_extra_latency;
+ dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
+ dlg_sys_param.total_flip_bytes = total_flip_bytes;
+
+ pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe);
+ input.clks_cfg.dcfclk_mhz = v->dcfclk;
+ input.clks_cfg.dispclk_mhz = v->dispclk;
+ input.clks_cfg.dppclk_mhz = v->dppclk;
+ input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+ input.clks_cfg.socclk_mhz = v->socclk;
+ input.clks_cfg.voltage = v->voltage_level;
+// dc->dml.logger = pool->base.logger;
+ input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
+ input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
+ //input[in_idx].dout.output_standard;
+ switch (v->output_deep_color[in_idx]) {
+ case dcn_bw_encoder_12bpc:
+ input.dout.output_bpc = dm_out_12;
+ break;
+ case dcn_bw_encoder_10bpc:
+ input.dout.output_bpc = dm_out_10;
+ break;
+ case dcn_bw_encoder_8bpc:
+ default:
+ input.dout.output_bpc = dm_out_8;
+ break;
+ }
+
+ /*todo: soc->sr_enter_plus_exit_time??*/
+ dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
+
+ dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src);
+ dml1_extract_rq_regs(dml, rq_regs, rq_param);
+ dml1_rq_dlg_get_dlg_params(
+ dml,
+ dlg_regs,
+ ttu_regs,
+ rq_param.dlg,
+ dlg_sys_param,
+ input,
+ true,
+ true,
+ v->pte_enable == dcn_bw_yes,
+ pipe->plane_state->flip_immediate);
+}
+
+static void split_stream_across_pipes(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe)
+{
+ int pipe_idx = secondary_pipe->pipe_idx;
+
+ if (!primary_pipe->plane_state)
+ return;
+
+ *secondary_pipe = *primary_pipe;
+
+ secondary_pipe->pipe_idx = pipe_idx;
+ secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
+ if (primary_pipe->bottom_pipe) {
+ ASSERT(primary_pipe->bottom_pipe != secondary_pipe);
+ secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
+ secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
+ }
+ primary_pipe->bottom_pipe = secondary_pipe;
+ secondary_pipe->top_pipe = primary_pipe;
+
+ resource_build_scaling_params(primary_pipe);
+ resource_build_scaling_params(secondary_pipe);
+}
+
+static void calc_wm_sets_and_perf_params(
+ struct dc_state *context,
+ struct dcn_bw_internal_vars *v)
+{
+ /* Calculate set A last to keep internal var state consistent for required config */
+ if (v->voltage_level < 2) {
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
+
+ v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
+ v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
+ v->dcfclk = v->dcfclkv_nom0p8;
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
+ }
+
+ if (v->voltage_level < 3) {
+ v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->dcfclk_per_state[2] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[1] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[0] = v->dcfclkv_max0p9;
+ v->dcfclk = v->dcfclkv_max0p9;
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
+ }
+
+ v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
+ v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
+ v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
+ v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
+ v->dcfclk = v->dcfclk_per_state[v->voltage_level];
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ if (v->voltage_level >= 2) {
+ context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
+ context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ }
+ if (v->voltage_level >= 3)
+ context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+}
+
+static bool dcn_bw_apply_registry_override(struct dc *dc)
+{
+ bool updated = false;
+
+ kernel_fpu_begin();
+ if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
+ && dc->debug.sr_exit_time_ns) {
+ updated = true;
+ dc->dcn_soc->sr_exit_time = dc->debug.sr_exit_time_ns / 1000.0;
+ }
+
+ if ((int)(dc->dcn_soc->sr_enter_plus_exit_time * 1000)
+ != dc->debug.sr_enter_plus_exit_time_ns
+ && dc->debug.sr_enter_plus_exit_time_ns) {
+ updated = true;
+ dc->dcn_soc->sr_enter_plus_exit_time =
+ dc->debug.sr_enter_plus_exit_time_ns / 1000.0;
+ }
+
+ if ((int)(dc->dcn_soc->urgent_latency * 1000) != dc->debug.urgent_latency_ns
+ && dc->debug.urgent_latency_ns) {
+ updated = true;
+ dc->dcn_soc->urgent_latency = dc->debug.urgent_latency_ns / 1000.0;
+ }
+
+ if ((int)(dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency * 1000)
+ != dc->debug.percent_of_ideal_drambw
+ && dc->debug.percent_of_ideal_drambw) {
+ updated = true;
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency =
+ dc->debug.percent_of_ideal_drambw;
+ }
+
+ if ((int)(dc->dcn_soc->dram_clock_change_latency * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ updated = true;
+ dc->dcn_soc->dram_clock_change_latency =
+ dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
+ kernel_fpu_end();
+
+ return updated;
+}
+
+void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
+{
+ /*
+ * disable optional pipe split by lower dispclk bounding box
+ * at DPM0
+ */
+ v->max_dispclk[0] = v->max_dppclk_vmin0p65;
+}
+
+void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
+ unsigned int pixel_rate_khz)
+{
+ float pixel_rate_mhz = pixel_rate_khz / 1000;
+
+ /*
+ * force enabling pipe split by lower dpp clock for DPM0 to just
+ * below the specify pixel_rate, so bw calc would split pipe.
+ */
+ if (pixel_rate_mhz < v->max_dppclk[0])
+ v->max_dppclk[0] = pixel_rate_mhz;
+}
+
+void hack_bounding_box(struct dcn_bw_internal_vars *v,
+ struct dc_debug *dbg,
+ struct dc_state *context)
+{
+ if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) {
+ hack_disable_optional_pipe_split(v);
+ }
+
+ if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP &&
+ context->stream_count >= 2) {
+ hack_disable_optional_pipe_split(v);
+ }
+
+ if (context->stream_count == 1 &&
+ dbg->force_single_disp_pipe_split) {
+ struct dc_stream_state *stream0 = context->streams[0];
+
+ hack_force_pipe_split(v, stream0->timing.pix_clk_khz);
+ }
+}
+
+bool dcn_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ const struct resource_pool *pool = dc->res_pool;
+ struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
+ int i, input_idx;
+ int vesa_sync_start, asic_blank_end, asic_blank_start;
+ bool bw_limit_pass;
+ float bw_limit;
+
+ PERFORMANCE_TRACE_START();
+ if (dcn_bw_apply_registry_override(dc))
+ dcn_bw_sync_calcs_and_dml(dc);
+
+ memset(v, 0, sizeof(*v));
+ kernel_fpu_begin();
+ v->sr_exit_time = dc->dcn_soc->sr_exit_time;
+ v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
+ v->urgent_latency = dc->dcn_soc->urgent_latency;
+ v->write_back_latency = dc->dcn_soc->write_back_latency;
+ v->percent_of_ideal_drambw_received_after_urg_latency =
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
+
+ v->dcfclkv_min0p65 = dc->dcn_soc->dcfclkv_min0p65;
+ v->dcfclkv_mid0p72 = dc->dcn_soc->dcfclkv_mid0p72;
+ v->dcfclkv_nom0p8 = dc->dcn_soc->dcfclkv_nom0p8;
+ v->dcfclkv_max0p9 = dc->dcn_soc->dcfclkv_max0p9;
+
+ v->max_dispclk_vmin0p65 = dc->dcn_soc->max_dispclk_vmin0p65;
+ v->max_dispclk_vmid0p72 = dc->dcn_soc->max_dispclk_vmid0p72;
+ v->max_dispclk_vnom0p8 = dc->dcn_soc->max_dispclk_vnom0p8;
+ v->max_dispclk_vmax0p9 = dc->dcn_soc->max_dispclk_vmax0p9;
+
+ v->max_dppclk_vmin0p65 = dc->dcn_soc->max_dppclk_vmin0p65;
+ v->max_dppclk_vmid0p72 = dc->dcn_soc->max_dppclk_vmid0p72;
+ v->max_dppclk_vnom0p8 = dc->dcn_soc->max_dppclk_vnom0p8;
+ v->max_dppclk_vmax0p9 = dc->dcn_soc->max_dppclk_vmax0p9;
+
+ v->socclk = dc->dcn_soc->socclk;
+
+ v->fabric_and_dram_bandwidth_vmin0p65 = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
+ v->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
+ v->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
+
+ v->phyclkv_min0p65 = dc->dcn_soc->phyclkv_min0p65;
+ v->phyclkv_mid0p72 = dc->dcn_soc->phyclkv_mid0p72;
+ v->phyclkv_nom0p8 = dc->dcn_soc->phyclkv_nom0p8;
+ v->phyclkv_max0p9 = dc->dcn_soc->phyclkv_max0p9;
+
+ v->downspreading = dc->dcn_soc->downspreading;
+ v->round_trip_ping_latency_cycles = dc->dcn_soc->round_trip_ping_latency_cycles;
+ v->urgent_out_of_order_return_per_channel = dc->dcn_soc->urgent_out_of_order_return_per_channel;
+ v->number_of_channels = dc->dcn_soc->number_of_channels;
+ v->vmm_page_size = dc->dcn_soc->vmm_page_size;
+ v->dram_clock_change_latency = dc->dcn_soc->dram_clock_change_latency;
+ v->return_bus_width = dc->dcn_soc->return_bus_width;
+
+ v->rob_buffer_size_in_kbyte = dc->dcn_ip->rob_buffer_size_in_kbyte;
+ v->det_buffer_size_in_kbyte = dc->dcn_ip->det_buffer_size_in_kbyte;
+ v->dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
+ v->opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
+ v->pixel_chunk_size_in_kbyte = dc->dcn_ip->pixel_chunk_size_in_kbyte;
+ v->pte_enable = dc->dcn_ip->pte_enable;
+ v->pte_chunk_size = dc->dcn_ip->pte_chunk_size;
+ v->meta_chunk_size = dc->dcn_ip->meta_chunk_size;
+ v->writeback_chunk_size = dc->dcn_ip->writeback_chunk_size;
+ v->odm_capability = dc->dcn_ip->odm_capability;
+ v->dsc_capability = dc->dcn_ip->dsc_capability;
+ v->line_buffer_size = dc->dcn_ip->line_buffer_size;
+ v->is_line_buffer_bpp_fixed = dc->dcn_ip->is_line_buffer_bpp_fixed;
+ v->line_buffer_fixed_bpp = dc->dcn_ip->line_buffer_fixed_bpp;
+ v->max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
+ v->writeback_luma_buffer_size = dc->dcn_ip->writeback_luma_buffer_size;
+ v->writeback_chroma_buffer_size = dc->dcn_ip->writeback_chroma_buffer_size;
+ v->max_num_dpp = dc->dcn_ip->max_num_dpp;
+ v->max_num_writeback = dc->dcn_ip->max_num_writeback;
+ v->max_dchub_topscl_throughput = dc->dcn_ip->max_dchub_topscl_throughput;
+ v->max_pscl_tolb_throughput = dc->dcn_ip->max_pscl_tolb_throughput;
+ v->max_lb_tovscl_throughput = dc->dcn_ip->max_lb_tovscl_throughput;
+ v->max_vscl_tohscl_throughput = dc->dcn_ip->max_vscl_tohscl_throughput;
+ v->max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
+ v->max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
+ v->max_hscl_taps = dc->dcn_ip->max_hscl_taps;
+ v->max_vscl_taps = dc->dcn_ip->max_vscl_taps;
+ v->under_scan_factor = dc->dcn_ip->under_scan_factor;
+ v->pte_buffer_size_in_requests = dc->dcn_ip->pte_buffer_size_in_requests;
+ v->dispclk_ramping_margin = dc->dcn_ip->dispclk_ramping_margin;
+ v->max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
+ v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
+ dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ v->bug_forcing_luma_and_chroma_request_to_same_size_fixed =
+ dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+
+ v->voltage[5] = dcn_bw_no_support;
+ v->voltage[4] = dcn_bw_v_max0p9;
+ v->voltage[3] = dcn_bw_v_max0p9;
+ v->voltage[2] = dcn_bw_v_nom0p8;
+ v->voltage[1] = dcn_bw_v_mid0p72;
+ v->voltage[0] = dcn_bw_v_min0p65;
+ v->fabric_and_dram_bandwidth_per_state[5] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[4] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[3] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
+ v->dcfclk_per_state[5] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[4] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[3] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
+ v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
+ v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
+ v->max_dispclk[5] = v->max_dispclk_vmax0p9;
+ v->max_dispclk[4] = v->max_dispclk_vmax0p9;
+ v->max_dispclk[3] = v->max_dispclk_vmax0p9;
+ v->max_dispclk[2] = v->max_dispclk_vnom0p8;
+ v->max_dispclk[1] = v->max_dispclk_vmid0p72;
+ v->max_dispclk[0] = v->max_dispclk_vmin0p65;
+ v->max_dppclk[5] = v->max_dppclk_vmax0p9;
+ v->max_dppclk[4] = v->max_dppclk_vmax0p9;
+ v->max_dppclk[3] = v->max_dppclk_vmax0p9;
+ v->max_dppclk[2] = v->max_dppclk_vnom0p8;
+ v->max_dppclk[1] = v->max_dppclk_vmid0p72;
+ v->max_dppclk[0] = v->max_dppclk_vmin0p65;
+ v->phyclk_per_state[5] = v->phyclkv_max0p9;
+ v->phyclk_per_state[4] = v->phyclkv_max0p9;
+ v->phyclk_per_state[3] = v->phyclkv_max0p9;
+ v->phyclk_per_state[2] = v->phyclkv_nom0p8;
+ v->phyclk_per_state[1] = v->phyclkv_mid0p72;
+ v->phyclk_per_state[0] = v->phyclkv_min0p65;
+
+ hack_bounding_box(v, &dc->debug, context);
+
+ if (v->voltage_override == dcn_bw_v_max0p9) {
+ v->voltage_override_level = number_of_states - 1;
+ } else if (v->voltage_override == dcn_bw_v_nom0p8) {
+ v->voltage_override_level = number_of_states - 2;
+ } else if (v->voltage_override == dcn_bw_v_mid0p72) {
+ v->voltage_override_level = number_of_states - 3;
+ } else {
+ v->voltage_override_level = 0;
+ }
+ v->synchronized_vblank = dcn_bw_no;
+ v->ta_pscalculation = dcn_bw_override;
+ v->allow_different_hratio_vratio = dcn_bw_yes;
+
+
+ for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+ /* skip all but first of split pipes */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ continue;
+
+ v->underscan_output[input_idx] = false; /* taken care of in recout already*/
+ v->interlace_output[input_idx] = false;
+
+ v->htotal[input_idx] = pipe->stream->timing.h_total;
+ v->vtotal[input_idx] = pipe->stream->timing.v_total;
+ v->vactive[input_idx] = pipe->stream->timing.v_addressable +
+ pipe->stream->timing.v_border_top + pipe->stream->timing.v_border_bottom;
+ v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total
+ - v->vactive[input_idx]
+ - pipe->stream->timing.v_front_porch;
+ v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz / 1000.0f;
+
+ if (!pipe->plane_state) {
+ v->dcc_enable[input_idx] = dcn_bw_yes;
+ v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32;
+ v->source_surface_mode[input_idx] = dcn_bw_sw_4_kb_s;
+ v->lb_bit_per_pixel[input_idx] = 30;
+ v->viewport_width[input_idx] = pipe->stream->timing.h_addressable;
+ v->viewport_height[input_idx] = pipe->stream->timing.v_addressable;
+ v->scaler_rec_out_width[input_idx] = pipe->stream->timing.h_addressable;
+ v->scaler_recout_height[input_idx] = pipe->stream->timing.v_addressable;
+ v->override_hta_ps[input_idx] = 1;
+ v->override_vta_ps[input_idx] = 1;
+ v->override_hta_pschroma[input_idx] = 1;
+ v->override_vta_pschroma[input_idx] = 1;
+ v->source_scan[input_idx] = dcn_bw_hor;
+
+ } else {
+ v->viewport_height[input_idx] = pipe->plane_res.scl_data.viewport.height;
+ v->viewport_width[input_idx] = pipe->plane_res.scl_data.viewport.width;
+ v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width;
+ v->scaler_recout_height[input_idx] = pipe->plane_res.scl_data.recout.height;
+ if (pipe->bottom_pipe && pipe->bottom_pipe->plane_state == pipe->plane_state) {
+ if (pipe->plane_state->rotation % 2 == 0) {
+ int viewport_end = pipe->plane_res.scl_data.viewport.width
+ + pipe->plane_res.scl_data.viewport.x;
+ int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.width
+ + pipe->bottom_pipe->plane_res.scl_data.viewport.x;
+
+ if (viewport_end > viewport_b_end)
+ v->viewport_width[input_idx] = viewport_end
+ - pipe->bottom_pipe->plane_res.scl_data.viewport.x;
+ else
+ v->viewport_width[input_idx] = viewport_b_end
+ - pipe->plane_res.scl_data.viewport.x;
+ } else {
+ int viewport_end = pipe->plane_res.scl_data.viewport.height
+ + pipe->plane_res.scl_data.viewport.y;
+ int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.height
+ + pipe->bottom_pipe->plane_res.scl_data.viewport.y;
+
+ if (viewport_end > viewport_b_end)
+ v->viewport_height[input_idx] = viewport_end
+ - pipe->bottom_pipe->plane_res.scl_data.viewport.y;
+ else
+ v->viewport_height[input_idx] = viewport_b_end
+ - pipe->plane_res.scl_data.viewport.y;
+ }
+ v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width
+ + pipe->bottom_pipe->plane_res.scl_data.recout.width;
+ }
+
+ v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+ v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
+ pipe->plane_state->format);
+ v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
+ pipe->plane_state->tiling_info.gfx9.swizzle);
+ v->lb_bit_per_pixel[input_idx] = tl_lb_bpp_to_int(pipe->plane_res.scl_data.lb_params.depth);
+ v->override_hta_ps[input_idx] = pipe->plane_res.scl_data.taps.h_taps;
+ v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
+ v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
+ v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+ v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
+ }
+ if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
+ v->lb_bit_per_pixel[input_idx] = v->line_buffer_fixed_bpp;
+ v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/
+ v->output_format[input_idx] = pipe->stream->timing.pixel_encoding ==
+ PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444;
+ v->output[input_idx] = pipe->stream->sink->sink_signal ==
+ SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp;
+ v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc;
+ if (v->output[input_idx] == dcn_bw_hdmi) {
+ switch (pipe->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_101010:
+ v->output_deep_color[input_idx] = dcn_bw_encoder_10bpc;
+ break;
+ case COLOR_DEPTH_121212:
+ v->output_deep_color[input_idx] = dcn_bw_encoder_12bpc;
+ break;
+ case COLOR_DEPTH_161616:
+ v->output_deep_color[input_idx] = dcn_bw_encoder_16bpc;
+ break;
+ default:
+ break;
+ }
+ }
+
+ input_idx++;
+ }
+ v->number_of_active_planes = input_idx;
+
+ scaler_settings_calculation(v);
+ mode_support_and_system_configuration(v);
+
+ if (v->voltage_level == 0 &&
+ (dc->debug.sr_exit_time_dpm0_ns
+ || dc->debug.sr_enter_plus_exit_time_dpm0_ns)) {
+
+ if (dc->debug.sr_enter_plus_exit_time_dpm0_ns)
+ v->sr_enter_plus_exit_time =
+ dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
+ if (dc->debug.sr_exit_time_dpm0_ns)
+ v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
+ dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
+ dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
+ mode_support_and_system_configuration(v);
+ }
+
+ if (v->voltage_level != 5) {
+ float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
+ if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
+ bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
+ else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
+ bw_consumed = v->fabric_and_dram_bandwidth_vmid0p72;
+ else if (bw_consumed < v->fabric_and_dram_bandwidth_vnom0p8)
+ bw_consumed = v->fabric_and_dram_bandwidth_vnom0p8;
+ else
+ bw_consumed = v->fabric_and_dram_bandwidth_vmax0p9;
+
+ if (bw_consumed < v->fabric_and_dram_bandwidth)
+ if (dc->debug.voltage_align_fclk)
+ bw_consumed = v->fabric_and_dram_bandwidth;
+
+ display_pipe_configuration(v);
+ calc_wm_sets_and_perf_params(context, v);
+ context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ (ddr4_dram_factor_single_Channel * v->number_of_channels));
+ if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
+ context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ }
+
+ context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+
+ context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
+ if (dc->debug.max_disp_clk == true)
+ context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+
+ if (context->bw.dcn.calc_clk.dispclk_khz <
+ dc->debug.min_disp_clk_khz) {
+ context->bw.dcn.calc_clk.dispclk_khz =
+ dc->debug.min_disp_clk_khz;
+ }
+
+ context->bw.dcn.calc_clk.dppclk_div = (int)(v->dispclk_dppclk_ratio) == 2;
+
+ for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /* skip inactive pipe */
+ if (!pipe->stream)
+ continue;
+ /* skip all but first of split pipes */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ continue;
+
+ pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
+ pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
+ pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+ pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+ pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
+ vesa_sync_start = pipe->stream->timing.v_addressable +
+ pipe->stream->timing.v_border_bottom +
+ pipe->stream->timing.v_front_porch;
+
+ asic_blank_end = (pipe->stream->timing.v_total -
+ vesa_sync_start -
+ pipe->stream->timing.v_border_top)
+ * (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
+
+ asic_blank_start = asic_blank_end +
+ (pipe->stream->timing.v_border_top +
+ pipe->stream->timing.v_addressable +
+ pipe->stream->timing.v_border_bottom)
+ * (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
+
+ pipe->pipe_dlg_param.vblank_start = asic_blank_start;
+ pipe->pipe_dlg_param.vblank_end = asic_blank_end;
+
+ if (pipe->plane_state) {
+ struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
+
+ if (v->dpp_per_plane[input_idx] == 2 ||
+ ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
+ if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+ /* update previously split pipe */
+ hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
+ hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
+ hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+ hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+ hsplit_pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
+ hsplit_pipe->pipe_dlg_param.vblank_start = pipe->pipe_dlg_param.vblank_start;
+ hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
+ } else {
+ /* pipe not split previously needs split */
+ hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool);
+ ASSERT(hsplit_pipe);
+ split_stream_across_pipes(
+ &context->res_ctx, pool,
+ pipe, hsplit_pipe);
+ }
+
+ dcn_bw_calc_rq_dlg_ttu(dc, v, hsplit_pipe, input_idx);
+ } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+ /* merge previously split pipe */
+ pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
+ if (hsplit_pipe->bottom_pipe)
+ hsplit_pipe->bottom_pipe->top_pipe = pipe;
+ hsplit_pipe->plane_state = NULL;
+ hsplit_pipe->stream = NULL;
+ hsplit_pipe->top_pipe = NULL;
+ hsplit_pipe->bottom_pipe = NULL;
+ resource_build_scaling_params(pipe);
+ }
+ /* for now important to do this after pipe split for building e2e params */
+ dcn_bw_calc_rq_dlg_ttu(dc, v, pipe, input_idx);
+ }
+
+ input_idx++;
+ }
+ }
+
+ if (v->voltage_level == 0) {
+
+ dc->dml.soc.sr_enter_plus_exit_time_us =
+ dc->dcn_soc->sr_enter_plus_exit_time;
+ dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ }
+
+ /*
+ * BW limit is set to prevent display from impacting other system functions
+ */
+
+ bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
+ bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
+
+ kernel_fpu_end();
+
+ PERFORMANCE_TRACE_END();
+
+ if (bw_limit_pass && v->voltage_level != 5)
+ return true;
+ else
+ return false;
+}
+
+static unsigned int dcn_find_normalized_clock_vdd_Level(
+ const struct dc *dc,
+ enum dm_pp_clock_type clocks_type,
+ int clocks_in_khz)
+{
+ int vdd_level = dcn_bw_v_min0p65;
+
+ if (clocks_in_khz == 0)/*todo some clock not in the considerations*/
+ return vdd_level;
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+ if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ {
+ unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+ if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ }
+ break;
+
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+ if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+
+ default:
+ break;
+ }
+ return vdd_level;
+}
+
+unsigned int dcn_find_dcfclk_suits_all(
+ const struct dc *dc,
+ struct clocks_value *clocks)
+{
+ unsigned vdd_level, vdd_level_temp;
+ unsigned dcf_clk;
+
+ /*find a common supported voltage level*/
+ vdd_level = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz);
+
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz);
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz);
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz);
+
+ /*find that level conresponding dcfclk*/
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ if (vdd_level == dcn_bw_v_max0p91) {
+ BREAK_TO_DEBUGGER();
+ dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
+ } else if (vdd_level == dcn_bw_v_max0p9)
+ dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
+ else if (vdd_level == dcn_bw_v_nom0p8)
+ dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000;
+ else if (vdd_level == dcn_bw_v_mid0p72)
+ dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000;
+ else
+ dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
+
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\tdcf_clk for voltage = %d\n", dcf_clk);
+ return dcf_clk;
+}
+
+void dcn_bw_update_from_pplib(struct dc *dc)
+{
+ struct dc_context *ctx = dc->ctx;
+ struct dm_pp_clock_levels_with_voltage clks = {0};
+
+ kernel_fpu_begin();
+
+ /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
+
+ if (dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &clks) &&
+ clks.num_levels != 0) {
+ ASSERT(clks.num_levels >= 3);
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (clks.data[0].clocks_in_khz / 1000.0) / 1000.0;
+ if (clks.num_levels > 2) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ } else {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ }
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ } else
+ BREAK_TO_DEBUGGER();
+ if (dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &clks) &&
+ clks.num_levels >= 3) {
+ dc->dcn_soc->dcfclkv_min0p65 = clks.data[0].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_mid0p72 = clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_nom0p8 = clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_max0p9 = clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0;
+ } else
+ BREAK_TO_DEBUGGER();
+
+ kernel_fpu_end();
+}
+
+void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+{
+ struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
+ struct pp_smu_wm_range_sets ranges = {0};
+ int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz;
+ int max_dcfclk_khz, min_dcfclk_khz;
+ int socclk_khz;
+ const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
+ unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+ if (!pp->set_wm_ranges)
+ return;
+
+ kernel_fpu_begin();
+ max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
+ nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
+ mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
+ min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+ max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
+ min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+ socclk_khz = dc->dcn_soc->socclk * 1000;
+ kernel_fpu_end();
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+ * Watermark Set
+ */
+ /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
+ * care what the value is, hence min to overdrive level
+ */
+ ranges.num_reader_wm_sets = WM_COUNT;
+ ranges.num_writer_wm_sets = WM_COUNT;
+ ranges.reader_wm_sets[0].wm_inst = WM_A;
+ ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz;
+ ranges.writer_wm_sets[0].wm_inst = WM_A;
+ ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz;
+
+ ranges.reader_wm_sets[1].wm_inst = WM_B;
+ ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
+ ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
+ ranges.writer_wm_sets[1].wm_inst = WM_B;
+ ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
+ ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
+
+
+ ranges.reader_wm_sets[2].wm_inst = WM_C;
+ ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
+ ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
+ ranges.writer_wm_sets[2].wm_inst = WM_C;
+ ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
+ ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
+
+ ranges.reader_wm_sets[3].wm_inst = WM_D;
+ ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
+ ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
+ ranges.writer_wm_sets[3].wm_inst = WM_D;
+ ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
+ ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
+
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
+ ranges.reader_wm_sets[0].wm_inst = WM_A;
+ ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = 800000;
+ ranges.writer_wm_sets[0].wm_inst = WM_A;
+ ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = 800000;
+
+ ranges.reader_wm_sets[1].wm_inst = WM_B;
+ ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
+ ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
+ ranges.writer_wm_sets[1].wm_inst = WM_B;
+ ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
+ ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
+
+
+ ranges.reader_wm_sets[2].wm_inst = WM_C;
+ ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
+ ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
+ ranges.writer_wm_sets[2].wm_inst = WM_C;
+ ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
+ ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
+
+ ranges.reader_wm_sets[3].wm_inst = WM_D;
+ ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
+ ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
+ ranges.writer_wm_sets[3].wm_inst = WM_D;
+ ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
+ ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
+ }
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ pp->set_wm_ranges(&pp->pp_smu, &ranges);
+}
+
+void dcn_bw_sync_calcs_and_dml(struct dc *dc)
+{
+ kernel_fpu_begin();
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "sr_exit_time: %d ns\n"
+ "sr_enter_plus_exit_time: %d ns\n"
+ "urgent_latency: %d ns\n"
+ "write_back_latency: %d ns\n"
+ "percent_of_ideal_drambw_received_after_urg_latency: %d %\n"
+ "max_request_size: %d bytes\n"
+ "dcfclkv_max0p9: %d kHz\n"
+ "dcfclkv_nom0p8: %d kHz\n"
+ "dcfclkv_mid0p72: %d kHz\n"
+ "dcfclkv_min0p65: %d kHz\n"
+ "max_dispclk_vmax0p9: %d kHz\n"
+ "max_dispclk_vnom0p8: %d kHz\n"
+ "max_dispclk_vmid0p72: %d kHz\n"
+ "max_dispclk_vmin0p65: %d kHz\n"
+ "max_dppclk_vmax0p9: %d kHz\n"
+ "max_dppclk_vnom0p8: %d kHz\n"
+ "max_dppclk_vmid0p72: %d kHz\n"
+ "max_dppclk_vmin0p65: %d kHz\n"
+ "socclk: %d kHz\n"
+ "fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n"
+ "fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n"
+ "fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n"
+ "fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n"
+ "phyclkv_max0p9: %d kHz\n"
+ "phyclkv_nom0p8: %d kHz\n"
+ "phyclkv_mid0p72: %d kHz\n"
+ "phyclkv_min0p65: %d kHz\n"
+ "downspreading: %d %\n"
+ "round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
+ "urgent_out_of_order_return_per_channel: %d Bytes\n"
+ "number_of_channels: %d\n"
+ "vmm_page_size: %d Bytes\n"
+ "dram_clock_change_latency: %d ns\n"
+ "return_bus_width: %d Bytes\n",
+ dc->dcn_soc->sr_exit_time * 1000,
+ dc->dcn_soc->sr_enter_plus_exit_time * 1000,
+ dc->dcn_soc->urgent_latency * 1000,
+ dc->dcn_soc->write_back_latency * 1000,
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency,
+ dc->dcn_soc->max_request_size,
+ dc->dcn_soc->dcfclkv_max0p9 * 1000,
+ dc->dcn_soc->dcfclkv_nom0p8 * 1000,
+ dc->dcn_soc->dcfclkv_mid0p72 * 1000,
+ dc->dcn_soc->dcfclkv_min0p65 * 1000,
+ dc->dcn_soc->max_dispclk_vmax0p9 * 1000,
+ dc->dcn_soc->max_dispclk_vnom0p8 * 1000,
+ dc->dcn_soc->max_dispclk_vmid0p72 * 1000,
+ dc->dcn_soc->max_dispclk_vmin0p65 * 1000,
+ dc->dcn_soc->max_dppclk_vmax0p9 * 1000,
+ dc->dcn_soc->max_dppclk_vnom0p8 * 1000,
+ dc->dcn_soc->max_dppclk_vmid0p72 * 1000,
+ dc->dcn_soc->max_dppclk_vmin0p65 * 1000,
+ dc->dcn_soc->socclk * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000,
+ dc->dcn_soc->phyclkv_max0p9 * 1000,
+ dc->dcn_soc->phyclkv_nom0p8 * 1000,
+ dc->dcn_soc->phyclkv_mid0p72 * 1000,
+ dc->dcn_soc->phyclkv_min0p65 * 1000,
+ dc->dcn_soc->downspreading * 100,
+ dc->dcn_soc->round_trip_ping_latency_cycles,
+ dc->dcn_soc->urgent_out_of_order_return_per_channel,
+ dc->dcn_soc->number_of_channels,
+ dc->dcn_soc->vmm_page_size,
+ dc->dcn_soc->dram_clock_change_latency * 1000,
+ dc->dcn_soc->return_bus_width);
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "rob_buffer_size_in_kbyte: %d\n"
+ "det_buffer_size_in_kbyte: %d\n"
+ "dpp_output_buffer_pixels: %d\n"
+ "opp_output_buffer_lines: %d\n"
+ "pixel_chunk_size_in_kbyte: %d\n"
+ "pte_enable: %d\n"
+ "pte_chunk_size: %d kbytes\n"
+ "meta_chunk_size: %d kbytes\n"
+ "writeback_chunk_size: %d kbytes\n"
+ "odm_capability: %d\n"
+ "dsc_capability: %d\n"
+ "line_buffer_size: %d bits\n"
+ "max_line_buffer_lines: %d\n"
+ "is_line_buffer_bpp_fixed: %d\n"
+ "line_buffer_fixed_bpp: %d\n"
+ "writeback_luma_buffer_size: %d kbytes\n"
+ "writeback_chroma_buffer_size: %d kbytes\n"
+ "max_num_dpp: %d\n"
+ "max_num_writeback: %d\n"
+ "max_dchub_topscl_throughput: %d pixels/dppclk\n"
+ "max_pscl_tolb_throughput: %d pixels/dppclk\n"
+ "max_lb_tovscl_throughput: %d pixels/dppclk\n"
+ "max_vscl_tohscl_throughput: %d pixels/dppclk\n"
+ "max_hscl_ratio: %d\n"
+ "max_vscl_ratio: %d\n"
+ "max_hscl_taps: %d\n"
+ "max_vscl_taps: %d\n"
+ "pte_buffer_size_in_requests: %d\n"
+ "dispclk_ramping_margin: %d %\n"
+ "under_scan_factor: %d %\n"
+ "max_inter_dcn_tile_repeaters: %d\n"
+ "can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
+ "bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
+ "dcfclk_cstate_latency: %d\n",
+ dc->dcn_ip->rob_buffer_size_in_kbyte,
+ dc->dcn_ip->det_buffer_size_in_kbyte,
+ dc->dcn_ip->dpp_output_buffer_pixels,
+ dc->dcn_ip->opp_output_buffer_lines,
+ dc->dcn_ip->pixel_chunk_size_in_kbyte,
+ dc->dcn_ip->pte_enable,
+ dc->dcn_ip->pte_chunk_size,
+ dc->dcn_ip->meta_chunk_size,
+ dc->dcn_ip->writeback_chunk_size,
+ dc->dcn_ip->odm_capability,
+ dc->dcn_ip->dsc_capability,
+ dc->dcn_ip->line_buffer_size,
+ dc->dcn_ip->max_line_buffer_lines,
+ dc->dcn_ip->is_line_buffer_bpp_fixed,
+ dc->dcn_ip->line_buffer_fixed_bpp,
+ dc->dcn_ip->writeback_luma_buffer_size,
+ dc->dcn_ip->writeback_chroma_buffer_size,
+ dc->dcn_ip->max_num_dpp,
+ dc->dcn_ip->max_num_writeback,
+ dc->dcn_ip->max_dchub_topscl_throughput,
+ dc->dcn_ip->max_pscl_tolb_throughput,
+ dc->dcn_ip->max_lb_tovscl_throughput,
+ dc->dcn_ip->max_vscl_tohscl_throughput,
+ dc->dcn_ip->max_hscl_ratio,
+ dc->dcn_ip->max_vscl_ratio,
+ dc->dcn_ip->max_hscl_taps,
+ dc->dcn_ip->max_vscl_taps,
+ dc->dcn_ip->pte_buffer_size_in_requests,
+ dc->dcn_ip->dispclk_ramping_margin,
+ dc->dcn_ip->under_scan_factor * 100,
+ dc->dcn_ip->max_inter_dcn_tile_repeaters,
+ dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,
+ dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed,
+ dc->dcn_ip->dcfclk_cstate_latency);
+ dc->dml.soc.vmin.socclk_mhz = dc->dcn_soc->socclk;
+ dc->dml.soc.vmid.socclk_mhz = dc->dcn_soc->socclk;
+ dc->dml.soc.vnom.socclk_mhz = dc->dcn_soc->socclk;
+ dc->dml.soc.vmax.socclk_mhz = dc->dcn_soc->socclk;
+
+ dc->dml.soc.vmin.dcfclk_mhz = dc->dcn_soc->dcfclkv_min0p65;
+ dc->dml.soc.vmid.dcfclk_mhz = dc->dcn_soc->dcfclkv_mid0p72;
+ dc->dml.soc.vnom.dcfclk_mhz = dc->dcn_soc->dcfclkv_nom0p8;
+ dc->dml.soc.vmax.dcfclk_mhz = dc->dcn_soc->dcfclkv_max0p9;
+
+ dc->dml.soc.vmin.dispclk_mhz = dc->dcn_soc->max_dispclk_vmin0p65;
+ dc->dml.soc.vmid.dispclk_mhz = dc->dcn_soc->max_dispclk_vmid0p72;
+ dc->dml.soc.vnom.dispclk_mhz = dc->dcn_soc->max_dispclk_vnom0p8;
+ dc->dml.soc.vmax.dispclk_mhz = dc->dcn_soc->max_dispclk_vmax0p9;
+
+ dc->dml.soc.vmin.dppclk_mhz = dc->dcn_soc->max_dppclk_vmin0p65;
+ dc->dml.soc.vmid.dppclk_mhz = dc->dcn_soc->max_dppclk_vmid0p72;
+ dc->dml.soc.vnom.dppclk_mhz = dc->dcn_soc->max_dppclk_vnom0p8;
+ dc->dml.soc.vmax.dppclk_mhz = dc->dcn_soc->max_dppclk_vmax0p9;
+
+ dc->dml.soc.vmin.phyclk_mhz = dc->dcn_soc->phyclkv_min0p65;
+ dc->dml.soc.vmid.phyclk_mhz = dc->dcn_soc->phyclkv_mid0p72;
+ dc->dml.soc.vnom.phyclk_mhz = dc->dcn_soc->phyclkv_nom0p8;
+ dc->dml.soc.vmax.phyclk_mhz = dc->dcn_soc->phyclkv_max0p9;
+
+ dc->dml.soc.vmin.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
+ dc->dml.soc.vmid.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
+ dc->dml.soc.vnom.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
+ dc->dml.soc.vmax.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
+
+ dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time;
+ dc->dml.soc.urgent_latency_us = dc->dcn_soc->urgent_latency;
+ dc->dml.soc.writeback_latency_us = dc->dcn_soc->write_back_latency;
+ dc->dml.soc.ideal_dram_bw_after_urgent_percent =
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
+ dc->dml.soc.max_request_size_bytes = dc->dcn_soc->max_request_size;
+ dc->dml.soc.downspread_percent = dc->dcn_soc->downspreading;
+ dc->dml.soc.round_trip_ping_latency_dcfclk_cycles =
+ dc->dcn_soc->round_trip_ping_latency_cycles;
+ dc->dml.soc.urgent_out_of_order_return_per_channel_bytes =
+ dc->dcn_soc->urgent_out_of_order_return_per_channel;
+ dc->dml.soc.num_chans = dc->dcn_soc->number_of_channels;
+ dc->dml.soc.vmm_page_size_bytes = dc->dcn_soc->vmm_page_size;
+ dc->dml.soc.dram_clock_change_latency_us = dc->dcn_soc->dram_clock_change_latency;
+ dc->dml.soc.return_bus_width_bytes = dc->dcn_soc->return_bus_width;
+
+ dc->dml.ip.rob_buffer_size_kbytes = dc->dcn_ip->rob_buffer_size_in_kbyte;
+ dc->dml.ip.det_buffer_size_kbytes = dc->dcn_ip->det_buffer_size_in_kbyte;
+ dc->dml.ip.dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
+ dc->dml.ip.opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
+ dc->dml.ip.pixel_chunk_size_kbytes = dc->dcn_ip->pixel_chunk_size_in_kbyte;
+ dc->dml.ip.pte_enable = dc->dcn_ip->pte_enable == dcn_bw_yes;
+ dc->dml.ip.pte_chunk_size_kbytes = dc->dcn_ip->pte_chunk_size;
+ dc->dml.ip.meta_chunk_size_kbytes = dc->dcn_ip->meta_chunk_size;
+ dc->dml.ip.writeback_chunk_size_kbytes = dc->dcn_ip->writeback_chunk_size;
+ dc->dml.ip.line_buffer_size_bits = dc->dcn_ip->line_buffer_size;
+ dc->dml.ip.max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
+ dc->dml.ip.IsLineBufferBppFixed = dc->dcn_ip->is_line_buffer_bpp_fixed == dcn_bw_yes;
+ dc->dml.ip.LineBufferFixedBpp = dc->dcn_ip->line_buffer_fixed_bpp;
+ dc->dml.ip.writeback_luma_buffer_size_kbytes = dc->dcn_ip->writeback_luma_buffer_size;
+ dc->dml.ip.writeback_chroma_buffer_size_kbytes = dc->dcn_ip->writeback_chroma_buffer_size;
+ dc->dml.ip.max_num_dpp = dc->dcn_ip->max_num_dpp;
+ dc->dml.ip.max_num_wb = dc->dcn_ip->max_num_writeback;
+ dc->dml.ip.max_dchub_pscl_bw_pix_per_clk = dc->dcn_ip->max_dchub_topscl_throughput;
+ dc->dml.ip.max_pscl_lb_bw_pix_per_clk = dc->dcn_ip->max_pscl_tolb_throughput;
+ dc->dml.ip.max_lb_vscl_bw_pix_per_clk = dc->dcn_ip->max_lb_tovscl_throughput;
+ dc->dml.ip.max_vscl_hscl_bw_pix_per_clk = dc->dcn_ip->max_vscl_tohscl_throughput;
+ dc->dml.ip.max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
+ dc->dml.ip.max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
+ dc->dml.ip.max_hscl_taps = dc->dcn_ip->max_hscl_taps;
+ dc->dml.ip.max_vscl_taps = dc->dcn_ip->max_vscl_taps;
+ /*pte_buffer_size_in_requests missing in dml*/
+ dc->dml.ip.dispclk_ramp_margin_percent = dc->dcn_ip->dispclk_ramping_margin;
+ dc->dml.ip.underscan_factor = dc->dcn_ip->under_scan_factor;
+ dc->dml.ip.max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
+ dc->dml.ip.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
+ dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes;
+ dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
+ dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
+ dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
+ kernel_fpu_end();
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
new file mode 100644
index 000000000000..fe63f5894d43
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -0,0 +1,1677 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#include "clock_source.h"
+#include "dc_bios_types.h"
+
+#include "bios_parser_interface.h"
+#include "include/irq_service_interface.h"
+#include "transform.h"
+#include "dpp.h"
+#include "timing_generator.h"
+#include "virtual/virtual_link_encoder.h"
+
+#include "link_hwss.h"
+#include "link_encoder.h"
+
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "mem_input.h"
+#include "hubp.h"
+
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destroy_links(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (NULL != dc->links[i])
+ link_destroy(&dc->links[i]);
+ }
+}
+
+static bool create_links(
+ struct dc *dc,
+ uint32_t num_virtual_links)
+{
+ int i;
+ int connectors_num;
+ struct dc_bios *bios = dc->ctx->dc_bios;
+
+ dc->link_count = 0;
+
+ connectors_num = bios->funcs->get_connectors_number(bios);
+
+ if (connectors_num > ENUM_ID_COUNT) {
+ dm_error(
+ "DC: Number of connectors %d exceeds maximum of %d!\n",
+ connectors_num,
+ ENUM_ID_COUNT);
+ return false;
+ }
+
+ if (connectors_num == 0 && num_virtual_links == 0) {
+ dm_error("DC: Number of connectors is zero!\n");
+ }
+
+ dm_output_to_console(
+ "DC: %s: connectors_num: physical:%d, virtual:%d\n",
+ __func__,
+ connectors_num,
+ num_virtual_links);
+
+ for (i = 0; i < connectors_num; i++) {
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
+ link_init_params.ctx = dc->ctx;
+ /* next BIOS object table connector */
+ link_init_params.connector_index = i;
+ link_init_params.link_index = dc->link_count;
+ link_init_params.dc = dc;
+ link = link_create(&link_init_params);
+
+ if (link) {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
+ }
+
+ for (i = 0; i < num_virtual_links; i++) {
+ struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
+ struct encoder_init_data enc_init = {0};
+
+ if (link == NULL) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->ctx = dc->ctx;
+ link->dc = dc;
+ link->connector_signal = SIGNAL_TYPE_VIRTUAL;
+ link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ link->link_id.id = CONNECTOR_ID_VIRTUAL;
+ link->link_id.enum_id = ENUM_ID_1;
+ link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
+
+ enc_init.ctx = dc->ctx;
+ enc_init.channel = CHANNEL_ID_UNKNOWN;
+ enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
+ enc_init.transmitter = TRANSMITTER_UNKNOWN;
+ enc_init.connector = link->link_id;
+ enc_init.encoder.type = OBJECT_TYPE_ENCODER;
+ enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
+ enc_init.encoder.enum_id = ENUM_ID_1;
+ virtual_link_encoder_construct(link->link_enc, &enc_init);
+
+ link->link_index = dc->link_count;
+ dc->links[dc->link_count] = link;
+ dc->link_count++;
+ }
+
+ return true;
+
+failed_alloc:
+ return false;
+}
+
+static bool stream_adjust_vmin_vmax(struct dc *dc,
+ struct dc_stream_state **streams, int num_streams,
+ int vmin, int vmax)
+{
+ /* TODO: Support multiple streams */
+ struct dc_stream_state *stream = streams[0];
+ int i = 0;
+ bool ret = false;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ dc->hwss.set_drr(&pipe, 1, vmin, vmax);
+
+ /* build and update the info frame */
+ resource_build_info_frame(pipe);
+ dc->hwss.update_info_frame(pipe);
+
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+static bool stream_get_crtc_position(struct dc *dc,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int *v_pos, unsigned int *nom_v_pos)
+{
+ /* TODO: Support multiple streams */
+ struct dc_stream_state *stream = streams[0];
+ int i = 0;
+ bool ret = false;
+ struct crtc_position position;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ dc->hwss.get_position(&pipe, 1, &position);
+
+ *v_pos = position.vertical_count;
+ *nom_v_pos = position.nominal_vcount;
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
+{
+ int i = 0;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_gamut_remap(pipes);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
+{
+ int i = 0;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_csc_matrix(pipes,
+ stream->output_color_space,
+ stream->csc_color_matrix.matrix);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+static void set_static_screen_events(struct dc *dc,
+ struct dc_stream_state **streams,
+ int num_streams,
+ const struct dc_static_screen_events *events)
+{
+ int i = 0;
+ int j = 0;
+ struct pipe_ctx *pipes_affected[MAX_PIPES];
+ int num_pipes_affected = 0;
+
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream = streams[i];
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].stream
+ == stream) {
+ pipes_affected[num_pipes_affected++] =
+ &dc->current_state->res_ctx.pipe_ctx[j];
+ }
+ }
+ }
+
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+}
+
+static void set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+{
+
+ int i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i] == link)
+ break;
+ }
+
+ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+static void perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ int i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc_link_dp_perform_link_training(
+ dc->links[i],
+ link_setting,
+ skip_video_pattern);
+}
+
+static void set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ link->preferred_link_setting = *link_setting;
+ dp_retrain_link_dp_test(link, link_setting, false);
+}
+
+static void enable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_enable_hpd(link);
+}
+
+static void disable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_disable_hpd(link);
+}
+
+
+static void set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ if (link != NULL)
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ p_link_settings,
+ p_custom_pattern,
+ cust_pattern_size);
+}
+
+static void set_dither_option(struct dc_stream_state *stream,
+ enum dc_dither_option option)
+{
+ struct bit_depth_reduction_params params;
+ struct dc_link *link = stream->status.link;
+ struct pipe_ctx *pipes = NULL;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
+ stream) {
+ pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ memset(&params, 0, sizeof(params));
+ if (!pipes)
+ return;
+ if (option > DITHER_OPTION_MAX)
+ return;
+
+ stream->dither_option = option;
+
+ resource_build_bit_depth_reduction_params(stream,
+ &params);
+ stream->bit_depth_params = params;
+ pipes->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
+}
+
+void set_dpms(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ bool dpms_off)
+{
+ struct pipe_ctx *pipe_ctx = NULL;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ if (!pipe_ctx) {
+ ASSERT(0);
+ return;
+ }
+
+ if (stream->dpms_off != dpms_off) {
+ stream->dpms_off = dpms_off;
+ if (dpms_off)
+ core_link_disable_stream(pipe_ctx,
+ KEEP_ACQUIRED_RESOURCE);
+ else
+ core_link_enable_stream(dc->current_state, pipe_ctx);
+ }
+}
+
+static void allocate_dc_stream_funcs(struct dc *dc)
+{
+ if (dc->hwss.set_drr != NULL) {
+ dc->stream_funcs.adjust_vmin_vmax =
+ stream_adjust_vmin_vmax;
+ }
+
+ dc->stream_funcs.set_static_screen_events =
+ set_static_screen_events;
+
+ dc->stream_funcs.get_crtc_position =
+ stream_get_crtc_position;
+
+ dc->stream_funcs.set_gamut_remap =
+ set_gamut_remap;
+
+ dc->stream_funcs.program_csc_matrix =
+ program_csc_matrix;
+
+ dc->stream_funcs.set_dither_option =
+ set_dither_option;
+
+ dc->stream_funcs.set_dpms =
+ set_dpms;
+
+ dc->link_funcs.set_drive_settings =
+ set_drive_settings;
+
+ dc->link_funcs.perform_link_training =
+ perform_link_training;
+
+ dc->link_funcs.set_preferred_link_settings =
+ set_preferred_link_settings;
+
+ dc->link_funcs.enable_hpd =
+ enable_hpd;
+
+ dc->link_funcs.disable_hpd =
+ disable_hpd;
+
+ dc->link_funcs.set_test_pattern =
+ set_test_pattern;
+}
+
+static void destruct(struct dc *dc)
+{
+ dc_release_state(dc->current_state);
+ dc->current_state = NULL;
+
+ destroy_links(dc);
+
+ dc_destroy_resource_pool(dc);
+
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
+
+ if (dc->ctx->i2caux)
+ dal_i2caux_destroy(&dc->ctx->i2caux);
+
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+
+ if (dc->ctx->logger)
+ dal_logger_destroy(&dc->ctx->logger);
+
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+
+ kfree(dc->bw_vbios);
+ dc->bw_vbios = NULL;
+
+ kfree(dc->bw_dceip);
+ dc->bw_dceip = NULL;
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ kfree(dc->dcn_soc);
+ dc->dcn_soc = NULL;
+
+ kfree(dc->dcn_ip);
+ dc->dcn_ip = NULL;
+
+#endif
+}
+
+static bool construct(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dal_logger *logger;
+ struct dc_context *dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+ struct bw_calcs_dceip *dc_dceip = kzalloc(sizeof(*dc_dceip),
+ GFP_KERNEL);
+ struct bw_calcs_vbios *dc_vbios = kzalloc(sizeof(*dc_vbios),
+ GFP_KERNEL);
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dcn_soc_bounding_box *dcn_soc = kzalloc(sizeof(*dcn_soc),
+ GFP_KERNEL);
+ struct dcn_ip_params *dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
+#endif
+
+ enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+
+ if (!dc_dceip) {
+ dm_error("%s: failed to create dceip\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_dceip = dc_dceip;
+
+ if (!dc_vbios) {
+ dm_error("%s: failed to create vbios\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_vbios = dc_vbios;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (!dcn_soc) {
+ dm_error("%s: failed to create dcn_soc\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_soc = dcn_soc;
+
+ if (!dcn_ip) {
+ dm_error("%s: failed to create dcn_ip\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_ip = dcn_ip;
+#endif
+
+ if (!dc_ctx) {
+ dm_error("%s: failed to create ctx\n", __func__);
+ goto fail;
+ }
+
+ dc->current_state = dc_create_state();
+
+ if (!dc->current_state) {
+ dm_error("%s: failed to create validate ctx\n", __func__);
+ goto fail;
+ }
+
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+
+ /* Create logger */
+ logger = dal_logger_create(dc_ctx, init_params->log_mask);
+
+ if (!logger) {
+ /* can *not* call logger. call base driver 'print error' */
+ dm_error("%s: failed to create Logger!\n", __func__);
+ goto fail;
+ }
+ dc_ctx->logger = logger;
+ dc->ctx = dc_ctx;
+ dc->ctx->dce_environment = init_params->dce_environment;
+
+ dc_version = resource_parse_asic_id(init_params->asic_id);
+ dc->ctx->dce_version = dc_version;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
+#endif
+ /* Resource should construct all asic specific resources.
+ * This should be the only place where we need to parse the asic id
+ */
+ if (init_params->vbios_override)
+ dc_ctx->dc_bios = init_params->vbios_override;
+ else {
+ /* Create BIOS parser */
+ struct bp_init_data bp_init_data;
+
+ bp_init_data.ctx = dc_ctx;
+ bp_init_data.bios = init_params->asic_id.atombios_base_address;
+
+ dc_ctx->dc_bios = dal_bios_parser_create(
+ &bp_init_data, dc_version);
+
+ if (!dc_ctx->dc_bios) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc_ctx->created_bios = true;
+ }
+
+ /* Create I2C AUX */
+ dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
+
+ if (!dc_ctx->i2caux) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ /* Create GPIO service */
+ dc_ctx->gpio_service = dal_gpio_service_create(
+ dc_version,
+ dc_ctx->dce_environment,
+ dc_ctx);
+
+ if (!dc_ctx->gpio_service) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc->res_pool = dc_create_resource_pool(
+ dc,
+ init_params->num_virtual_links,
+ dc_version,
+ init_params->asic_id);
+ if (!dc->res_pool)
+ goto fail;
+
+ dc_resource_state_construct(dc, dc->current_state);
+
+ if (!create_links(dc, init_params->num_virtual_links))
+ goto fail;
+
+ allocate_dc_stream_funcs(dc);
+
+ return true;
+
+fail:
+
+ destruct(dc);
+ return false;
+}
+
+static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+{
+ int i, j;
+ struct dc_state *dangling_context = dc_create_state();
+ struct dc_state *current_ctx;
+
+ if (dangling_context == NULL)
+ return;
+
+ dc_resource_state_copy_construct(dc->current_state, dangling_context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *old_stream =
+ dc->current_state->res_ctx.pipe_ctx[i].stream;
+ bool should_disable = true;
+
+ for (j = 0; j < context->stream_count; j++) {
+ if (old_stream == context->streams[j]) {
+ should_disable = false;
+ break;
+ }
+ }
+ if (should_disable && old_stream) {
+ dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ }
+ }
+
+ current_ctx = dc->current_state;
+ dc->current_state = dangling_context;
+ dc_release_state(current_ctx);
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+struct dc *dc_create(const struct dc_init_data *init_params)
+ {
+ struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ unsigned int full_pipe_count;
+
+ if (NULL == dc)
+ goto alloc_fail;
+
+ if (false == construct(dc, init_params))
+ goto construct_fail;
+
+ /*TODO: separate HW and SW initialization*/
+ dc->hwss.init_hw(dc);
+
+ full_pipe_count = dc->res_pool->pipe_count;
+ if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+ full_pipe_count--;
+ dc->caps.max_streams = min(
+ full_pipe_count,
+ dc->res_pool->stream_enc_count);
+
+ dc->caps.max_links = dc->link_count;
+ dc->caps.max_audios = dc->res_pool->audio_count;
+
+ dc->config = init_params->flags;
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Display Core initialized\n");
+
+
+ /* TODO: missing feature to be enabled */
+ dc->debug.disable_dfs_bypass = true;
+
+ return dc;
+
+construct_fail:
+ kfree(dc);
+
+alloc_fail:
+ return NULL;
+}
+
+void dc_destroy(struct dc **dc)
+{
+ destruct(*dc);
+ kfree(*dc);
+ *dc = NULL;
+}
+
+static void program_timing_sync(
+ struct dc *dc,
+ struct dc_state *ctx)
+{
+ int i, j;
+ int group_index = 0;
+ int pipe_count = dc->res_pool->pipe_count;
+ struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+ if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
+ continue;
+
+ unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
+ }
+
+ for (i = 0; i < pipe_count; i++) {
+ int group_size = 1;
+ struct pipe_ctx *pipe_set[MAX_PIPES];
+
+ if (!unsynced_pipes[i])
+ continue;
+
+ pipe_set[0] = unsynced_pipes[i];
+ unsynced_pipes[i] = NULL;
+
+ /* Add tg to the set, search rest of the tg's for ones with
+ * same timing, add all tgs with same timing to the group
+ */
+ for (j = i + 1; j < pipe_count; j++) {
+ if (!unsynced_pipes[j])
+ continue;
+
+ if (resource_are_streams_timing_synchronizable(
+ unsynced_pipes[j]->stream,
+ pipe_set[0]->stream)) {
+ pipe_set[group_size] = unsynced_pipes[j];
+ unsynced_pipes[j] = NULL;
+ group_size++;
+ }
+ }
+
+ /* set first unblanked pipe as master */
+ for (j = 0; j < group_size; j++) {
+ struct pipe_ctx *temp;
+
+ if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ if (j == 0)
+ break;
+
+ temp = pipe_set[0];
+ pipe_set[0] = pipe_set[j];
+ pipe_set[j] = temp;
+ break;
+ }
+ }
+
+ /* remove any other unblanked pipes as they have already been synced */
+ for (j = j + 1; j < group_size; j++) {
+ if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ }
+ }
+
+ if (group_size > 1) {
+ dc->hwss.enable_timing_synchronization(
+ dc, group_index, group_size, pipe_set);
+ group_index++;
+ }
+ }
+}
+
+static bool context_changed(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ uint8_t i;
+
+ if (context->stream_count != dc->current_state->stream_count)
+ return true;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->streams[i] != context->streams[i])
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_enable_stereo(
+ struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count)
+{
+ bool ret = true;
+ int i, j;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context != NULL)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ for (j = 0 ; pipe && j < stream_count; j++) {
+ if (streams[j] && streams[j] == pipe->stream &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+
+ return ret;
+}
+
+
+/*
+ * Applies given context to HW and copy it into current context.
+ * It's up to the user to release the src context afterwards.
+ */
+static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ struct pipe_ctx *pipe;
+ int i, j, k, l;
+ struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
+
+ disable_dangling_plane(dc, context);
+
+ for (i = 0; i < context->stream_count; i++)
+ dc_streams[i] = context->streams[i];
+
+ if (!dcb->funcs->is_accelerated_mode(dcb))
+ dc->hwss.enable_accelerated_mode(dc);
+
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_sink *sink = context->streams[i]->sink;
+
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
+
+ /*
+ * enable stereo
+ * TODO rework dc_enable_stereo call to work with validation sets?
+ */
+ for (k = 0; k < MAX_PIPES; k++) {
+ pipe = &context->res_ctx.pipe_ctx[k];
+
+ for (l = 0 ; pipe && l < context->stream_count; l++) {
+ if (context->streams[l] &&
+ context->streams[l] == pipe->stream &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+
+ CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
+ context->streams[i]->timing.h_addressable,
+ context->streams[i]->timing.v_addressable,
+ context->streams[i]->timing.h_total,
+ context->streams[i]->timing.v_total,
+ context->streams[i]->timing.pix_clk_khz);
+ }
+
+ dc->hwss.ready_shared_resources(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+ }
+ result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+ program_timing_sync(dc, context);
+
+ dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+
+ for (i = 0; i < context->stream_count; i++) {
+ for (j = 0; j < MAX_PIPES; j++) {
+ pipe = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe->top_pipe && pipe->stream == context->streams[i])
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+ }
+
+ dc_release_state(dc->current_state);
+
+ dc->current_state = context;
+
+ dc_retain_state(dc->current_state);
+
+ dc->hwss.optimize_shared_resources(dc);
+
+ return result;
+}
+
+bool dc_commit_state(struct dc *dc, struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ int i;
+
+ if (false == context_changed(dc, context))
+ return DC_OK;
+
+ dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
+ __func__, context->stream_count);
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ dc_stream_log(stream,
+ dc->ctx->logger,
+ LOG_DC);
+ }
+
+ result = dc_commit_state_no_check(dc, context);
+
+ return (result == DC_OK);
+}
+
+
+bool dc_post_update_surfaces_to_stream(struct dc *dc)
+{
+ int i;
+ struct dc_state *context = dc->current_state;
+
+ post_surface_trace(dc);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL
+ || context->res_ctx.pipe_ctx[i].plane_state == NULL)
+ dc->hwss.power_down_front_end(dc, i);
+
+ /* 3rd param should be true, temp w/a for RV*/
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
+#else
+ dc->hwss.set_bandwidth(dc, context, true);
+#endif
+ return true;
+}
+
+/*
+ * TODO this whole function needs to go
+ *
+ * dc_surface_update is needlessly complex. See if we can just replace this
+ * with a dc_plane_state and follow the atomic model a bit more closely here.
+ */
+bool dc_commit_planes_to_stream(
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *state)
+{
+ /* no need to dynamically allocate this. it's pretty small */
+ struct dc_surface_update updates[MAX_SURFACES];
+ struct dc_flip_addrs *flip_addr;
+ struct dc_plane_info *plane_info;
+ struct dc_scaling_info *scaling_info;
+ int i;
+ struct dc_stream_update *stream_update =
+ kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+
+ if (!stream_update) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
+ GFP_KERNEL);
+ plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
+ GFP_KERNEL);
+ scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
+ GFP_KERNEL);
+
+ if (!flip_addr || !plane_info || !scaling_info) {
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return false;
+ }
+
+ memset(updates, 0, sizeof(updates));
+
+ stream_update->src = dc_stream->src;
+ stream_update->dst = dc_stream->dst;
+ stream_update->out_transfer_func = dc_stream->out_transfer_func;
+
+ for (i = 0; i < new_plane_count; i++) {
+ updates[i].surface = plane_states[i];
+ updates[i].gamma =
+ (struct dc_gamma *)plane_states[i]->gamma_correction;
+ updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
+ flip_addr[i].address = plane_states[i]->address;
+ flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
+ plane_info[i].color_space = plane_states[i]->color_space;
+ plane_info[i].format = plane_states[i]->format;
+ plane_info[i].plane_size = plane_states[i]->plane_size;
+ plane_info[i].rotation = plane_states[i]->rotation;
+ plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
+ plane_info[i].stereo_format = plane_states[i]->stereo_format;
+ plane_info[i].tiling_info = plane_states[i]->tiling_info;
+ plane_info[i].visible = plane_states[i]->visible;
+ plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
+ plane_info[i].dcc = plane_states[i]->dcc;
+ scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
+ scaling_info[i].src_rect = plane_states[i]->src_rect;
+ scaling_info[i].dst_rect = plane_states[i]->dst_rect;
+ scaling_info[i].clip_rect = plane_states[i]->clip_rect;
+
+ updates[i].flip_addr = &flip_addr[i];
+ updates[i].plane_info = &plane_info[i];
+ updates[i].scaling_info = &scaling_info[i];
+ }
+
+ dc_commit_updates_for_stream(
+ dc,
+ updates,
+ new_plane_count,
+ dc_stream, stream_update, plane_states, state);
+
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return true;
+}
+
+struct dc_state *dc_create_state(void)
+{
+ struct dc_state *context = kzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!context)
+ return NULL;
+
+ kref_init(&context->refcount);
+ return context;
+}
+
+void dc_retain_state(struct dc_state *context)
+{
+ kref_get(&context->refcount);
+}
+
+static void dc_state_free(struct kref *kref)
+{
+ struct dc_state *context = container_of(kref, struct dc_state, refcount);
+ dc_resource_state_destruct(context);
+ kfree(context);
+}
+
+void dc_release_state(struct dc_state *context)
+{
+ kref_put(&context->refcount, dc_state_free);
+}
+
+static bool is_surface_in_context(
+ const struct dc_state *context,
+ const struct dc_plane_state *plane_state)
+{
+ int j;
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (plane_state == pipe_ctx->plane_state) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
+{
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ return 12;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ return 16;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ return 32;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ return 64;
+ default:
+ ASSERT_CRITICAL(false);
+ return -1;
+ }
+}
+
+static enum surface_update_type get_plane_info_update_type(
+ const struct dc_surface_update *u,
+ int surface_index)
+{
+ struct dc_plane_info temp_plane_info;
+ memset(&temp_plane_info, 0, sizeof(temp_plane_info));
+
+ if (!u->plane_info)
+ return UPDATE_TYPE_FAST;
+
+ temp_plane_info = *u->plane_info;
+
+ /* Copy all parameters that will cause a full update
+ * from current surface, the rest of the parameters
+ * from provided plane configuration.
+ * Perform memory compare and special validation
+ * for those that can cause fast/medium updates
+ */
+
+ /* Full update parameters */
+ temp_plane_info.color_space = u->surface->color_space;
+ temp_plane_info.dcc = u->surface->dcc;
+ temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
+ temp_plane_info.plane_size = u->surface->plane_size;
+ temp_plane_info.rotation = u->surface->rotation;
+ temp_plane_info.stereo_format = u->surface->stereo_format;
+
+ if (surface_index == 0)
+ temp_plane_info.visible = u->plane_info->visible;
+ else
+ temp_plane_info.visible = u->surface->visible;
+
+ if (memcmp(u->plane_info, &temp_plane_info,
+ sizeof(struct dc_plane_info)) != 0)
+ return UPDATE_TYPE_FULL;
+
+ if (pixel_format_to_bpp(u->plane_info->format) !=
+ pixel_format_to_bpp(u->surface->format)) {
+ /* different bytes per element will require full bandwidth
+ * and DML calculation
+ */
+ return UPDATE_TYPE_FULL;
+ }
+
+ if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
+ sizeof(union dc_tiling_info)) != 0) {
+ /* todo: below are HW dependent, we should add a hook to
+ * DCE/N resource and validated there.
+ */
+ if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ /* swizzled mode requires RQ to be setup properly,
+ * thus need to run DML to calculate RQ settings
+ */
+ return UPDATE_TYPE_FULL;
+ }
+ }
+
+ return UPDATE_TYPE_MED;
+}
+
+static enum surface_update_type get_scaling_info_update_type(
+ const struct dc_surface_update *u)
+{
+ if (!u->scaling_info)
+ return UPDATE_TYPE_FAST;
+
+ if (u->scaling_info->src_rect.width != u->surface->src_rect.width
+ || u->scaling_info->src_rect.height != u->surface->src_rect.height
+ || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
+ || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
+ || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+ || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
+ return UPDATE_TYPE_FULL;
+
+ if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ || u->scaling_info->src_rect.y != u->surface->src_rect.y
+ || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+ || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
+ || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
+ || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ return UPDATE_TYPE_MED;
+
+ return UPDATE_TYPE_FAST;
+}
+
+static enum surface_update_type det_surface_update(
+ const struct dc *dc,
+ const struct dc_surface_update *u,
+ int surface_index)
+{
+ const struct dc_state *context = dc->current_state;
+ enum surface_update_type type = UPDATE_TYPE_FAST;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+ if (!is_surface_in_context(context, u->surface))
+ return UPDATE_TYPE_FULL;
+
+ type = get_plane_info_update_type(u, surface_index);
+ if (overall_type < type)
+ overall_type = type;
+
+ type = get_scaling_info_update_type(u);
+ if (overall_type < type)
+ overall_type = type;
+
+ if (u->in_transfer_func ||
+ u->hdr_static_metadata) {
+ if (overall_type < UPDATE_TYPE_MED)
+ overall_type = UPDATE_TYPE_MED;
+ }
+
+ return overall_type;
+}
+
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status)
+{
+ int i;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return UPDATE_TYPE_FULL;
+
+ if (stream_update)
+ return UPDATE_TYPE_FULL;
+
+ for (i = 0 ; i < surface_count; i++) {
+ enum surface_update_type type =
+ det_surface_update(dc, &updates[i], i);
+
+ if (type == UPDATE_TYPE_FULL)
+ return type;
+
+ if (overall_type < type)
+ overall_type = type;
+ }
+
+ return overall_type;
+}
+
+static struct dc_stream_status *stream_get_status(
+ struct dc_state *ctx,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ for (i = 0; i < ctx->stream_count; i++) {
+ if (stream == ctx->streams[i]) {
+ return &ctx->stream_status[i];
+ }
+ }
+
+ return NULL;
+}
+
+static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
+
+
+static void commit_planes_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int i, j;
+
+ if (update_type == UPDATE_TYPE_FULL) {
+ dc->hwss.set_bandwidth(dc, context, false);
+ context_clock_trace(dc, context);
+ }
+
+ if (update_type > UPDATE_TYPE_FAST) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
+ }
+ }
+
+ if (surface_count == 0) {
+ /*
+ * In case of turning off screen, no need to program front end a second time.
+ * just return after program front end.
+ */
+ dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
+ return;
+ }
+
+ /* Lock pipes for provided surfaces, or all active if full update*/
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
+ continue;
+ if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+ continue;
+
+ dc->hwss.pipe_control_lock(
+ dc,
+ pipe_ctx,
+ true);
+ }
+ if (update_type == UPDATE_TYPE_FULL)
+ break;
+ }
+
+ /* Full fe update*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->plane_state)
+ continue;
+
+ if (!pipe_ctx->top_pipe && pipe_ctx->stream) {
+ struct dc_stream_status *stream_status = stream_get_status(context, pipe_ctx->stream);
+
+ dc->hwss.apply_ctx_for_surface(
+ dc, pipe_ctx->stream, stream_status->plane_count, context);
+ }
+ }
+
+ if (update_type > UPDATE_TYPE_FAST)
+ context_timing_trace(dc, &context->res_ctx);
+
+ /* Perform requested Updates */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ if (update_type == UPDATE_TYPE_MED)
+ dc->hwss.apply_ctx_for_surface(
+ dc, stream, surface_count, context);
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ if (srf_updates[i].flip_addr)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ /* work around to program degamma regs for split pipe after set mode. */
+ if (srf_updates[i].in_transfer_func || (pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state))
+ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+
+ if (stream_update != NULL &&
+ stream_update->out_transfer_func != NULL) {
+ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+
+ if (srf_updates[i].hdr_static_metadata) {
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
+ }
+ }
+ }
+
+ /* Unlock pipes */
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ for (j = 0; j < surface_count; j++) {
+ if (update_type != UPDATE_TYPE_FULL &&
+ srf_updates[j].surface != pipe_ctx->plane_state)
+ continue;
+ if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+ continue;
+
+ dc->hwss.pipe_control_lock(
+ dc,
+ pipe_ctx,
+ false);
+
+ break;
+ }
+ }
+}
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_plane_state **plane_states,
+ struct dc_state *state)
+{
+ const struct dc_stream_status *stream_status;
+ enum surface_update_type update_type;
+ struct dc_state *context;
+ struct dc_context *dc_ctx = dc->ctx;
+ int i, j;
+
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
+
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+
+ if (update_type >= update_surface_trace_level)
+ update_surface_trace(dc, srf_updates, surface_count);
+
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+
+ /* initialize scratch memory for building context */
+ context = dc_create_state();
+ if (context == NULL) {
+ DC_ERROR("Failed to allocate new validate context!\n");
+ return;
+ }
+
+ dc_resource_state_copy_construct(state, context);
+ }
+
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *surface = srf_updates[i].surface;
+
+ /* TODO: On flip we don't build the state, so it still has the
+ * old address. Which is why we are updating the address here
+ */
+ if (srf_updates[i].flip_addr) {
+ surface->address = srf_updates[i].flip_addr->address;
+ surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
+
+ }
+
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
+ }
+
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+
+ if (update_type >= UPDATE_TYPE_FULL)
+ dc_post_update_surfaces_to_stream(dc);
+
+ if (dc->current_state != context) {
+
+ struct dc_state *old = dc->current_state;
+
+ dc->current_state = context;
+ dc_release_state(old);
+
+ }
+
+ return;
+
+}
+
+uint8_t dc_get_current_stream_count(struct dc *dc)
+{
+ return dc->current_state->stream_count;
+}
+
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
+{
+ if (i < dc->current_state->stream_count)
+ return dc->current_state->streams[i];
+ return NULL;
+}
+
+enum dc_irq_source dc_interrupt_to_irq_source(
+ struct dc *dc,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
+}
+
+void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+{
+
+ if (dc == NULL)
+ return;
+
+ dal_irq_service_set(dc->res_pool->irqs, src, enable);
+}
+
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
+{
+ dal_irq_service_ack(dc->res_pool->irqs, src);
+}
+
+void dc_set_power_state(
+ struct dc *dc,
+ enum dc_acpi_cm_power_state power_state)
+{
+ struct kref refcount;
+
+ switch (power_state) {
+ case DC_ACPI_CM_POWER_STATE_D0:
+ dc_resource_state_construct(dc, dc->current_state);
+
+ dc->hwss.init_hw(dc);
+ break;
+ default:
+
+ dc->hwss.power_down(dc);
+
+ /* Zero out the current context so that on resume we start with
+ * clean state, and dc hw programming optimizations will not
+ * cause any trouble.
+ */
+
+ /* Preserve refcount */
+ refcount = dc->current_state->refcount;
+ dc_resource_state_destruct(dc->current_state);
+ memset(dc->current_state, 0,
+ sizeof(*dc->current_state));
+
+ dc->current_state->refcount = refcount;
+
+ break;
+ }
+
+}
+
+void dc_resume(struct dc *dc)
+{
+
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++)
+ core_link_resume(dc->links[i]);
+}
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd)
+{
+
+ struct dc_link *link = dc->links[link_index];
+ struct ddc_service *ddc = link->ddc;
+
+ return dal_i2caux_submit_i2c_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ cmd);
+}
+
+static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
+{
+ if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ dc_sink_retain(sink);
+
+ dc_link->remote_sinks[dc_link->sink_count] = sink;
+ dc_link->sink_count++;
+
+ return true;
+}
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data)
+{
+ struct dc_sink *dc_sink;
+ enum dc_edid_status edid_status;
+
+ if (len > MAX_EDID_BUFFER_SIZE) {
+ dm_error("Max EDID buffer size breached!\n");
+ return NULL;
+ }
+
+ if (!init_data) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (!init_data->link) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dc_sink = dc_sink_create(init_data);
+
+ if (!dc_sink)
+ return NULL;
+
+ memmove(dc_sink->dc_edid.raw_edid, edid, len);
+ dc_sink->dc_edid.length = len;
+
+ if (!link_add_remote_sink_helper(
+ link,
+ dc_sink))
+ goto fail_add_sink;
+
+ edid_status = dm_helpers_parse_edid_caps(
+ link->ctx,
+ &dc_sink->dc_edid,
+ &dc_sink->edid_caps);
+
+ if (edid_status != EDID_OK)
+ goto fail;
+
+ return dc_sink;
+fail:
+ dc_link_remove_remote_sink(link, dc_sink);
+fail_add_sink:
+ dc_sink_release(dc_sink);
+ return NULL;
+}
+
+void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+ int i;
+
+ if (!link->sink_count) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == sink) {
+ dc_sink_release(sink);
+ link->remote_sinks[i] = NULL;
+
+ /* shrink array to remove empty place */
+ while (i < link->sink_count - 1) {
+ link->remote_sinks[i] = link->remote_sinks[i+1];
+ i++;
+ }
+ link->remote_sinks[i] = NULL;
+ link->sink_count--;
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
new file mode 100644
index 000000000000..6acee5426e4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -0,0 +1,359 @@
+/*
+ * dc_debug.c
+ *
+ * Created on: Nov 3, 2016
+ * Author: yonsun
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#define SURFACE_TRACE(...) do {\
+ if (dc->debug.surface_trace) \
+ dm_logger_write(logger, \
+ LOG_IF_TRACE, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define TIMING_TRACE(...) do {\
+ if (dc->debug.timing_trace) \
+ dm_logger_write(logger, \
+ LOG_SYNC, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define CLOCK_TRACE(...) do {\
+ if (dc->debug.clock_trace) \
+ dm_logger_write(logger, \
+ LOG_BANDWIDTH_CALCS, \
+ ##__VA_ARGS__); \
+} while (0)
+
+void pre_surface_trace(
+ struct dc *dc,
+ const struct dc_plane_state *const *plane_states,
+ int surface_count)
+{
+ int i;
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ for (i = 0; i < surface_count; i++) {
+ const struct dc_plane_state *plane_state = plane_states[i];
+
+ SURFACE_TRACE("Planes %d:\n", i);
+
+ SURFACE_TRACE(
+ "plane_state->visible = %d;\n"
+ "plane_state->flip_immediate = %d;\n"
+ "plane_state->address.type = %d;\n"
+ "plane_state->address.grph.addr.quad_part = 0x%X;\n"
+ "plane_state->address.grph.meta_addr.quad_part = 0x%X;\n"
+ "plane_state->scaling_quality.h_taps = %d;\n"
+ "plane_state->scaling_quality.v_taps = %d;\n"
+ "plane_state->scaling_quality.h_taps_c = %d;\n"
+ "plane_state->scaling_quality.v_taps_c = %d;\n",
+ plane_state->visible,
+ plane_state->flip_immediate,
+ plane_state->address.type,
+ plane_state->address.grph.addr.quad_part,
+ plane_state->address.grph.meta_addr.quad_part,
+ plane_state->scaling_quality.h_taps,
+ plane_state->scaling_quality.v_taps,
+ plane_state->scaling_quality.h_taps_c,
+ plane_state->scaling_quality.v_taps_c);
+
+ SURFACE_TRACE(
+ "plane_state->src_rect.x = %d;\n"
+ "plane_state->src_rect.y = %d;\n"
+ "plane_state->src_rect.width = %d;\n"
+ "plane_state->src_rect.height = %d;\n"
+ "plane_state->dst_rect.x = %d;\n"
+ "plane_state->dst_rect.y = %d;\n"
+ "plane_state->dst_rect.width = %d;\n"
+ "plane_state->dst_rect.height = %d;\n"
+ "plane_state->clip_rect.x = %d;\n"
+ "plane_state->clip_rect.y = %d;\n"
+ "plane_state->clip_rect.width = %d;\n"
+ "plane_state->clip_rect.height = %d;\n",
+ plane_state->src_rect.x,
+ plane_state->src_rect.y,
+ plane_state->src_rect.width,
+ plane_state->src_rect.height,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.height,
+ plane_state->clip_rect.x,
+ plane_state->clip_rect.y,
+ plane_state->clip_rect.width,
+ plane_state->clip_rect.height);
+
+ SURFACE_TRACE(
+ "plane_state->plane_size.grph.surface_size.x = %d;\n"
+ "plane_state->plane_size.grph.surface_size.y = %d;\n"
+ "plane_state->plane_size.grph.surface_size.width = %d;\n"
+ "plane_state->plane_size.grph.surface_size.height = %d;\n"
+ "plane_state->plane_size.grph.surface_pitch = %d;\n",
+ plane_state->plane_size.grph.surface_size.x,
+ plane_state->plane_size.grph.surface_size.y,
+ plane_state->plane_size.grph.surface_size.width,
+ plane_state->plane_size.grph.surface_size.height,
+ plane_state->plane_size.grph.surface_pitch);
+
+
+ SURFACE_TRACE(
+ "plane_state->tiling_info.gfx8.num_banks = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_width = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_width_c = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_height = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_height_c = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_aspect = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_aspect_c = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_split = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_split_c = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_mode = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_mode_c = %d;\n",
+ plane_state->tiling_info.gfx8.num_banks,
+ plane_state->tiling_info.gfx8.bank_width,
+ plane_state->tiling_info.gfx8.bank_width_c,
+ plane_state->tiling_info.gfx8.bank_height,
+ plane_state->tiling_info.gfx8.bank_height_c,
+ plane_state->tiling_info.gfx8.tile_aspect,
+ plane_state->tiling_info.gfx8.tile_aspect_c,
+ plane_state->tiling_info.gfx8.tile_split,
+ plane_state->tiling_info.gfx8.tile_split_c,
+ plane_state->tiling_info.gfx8.tile_mode,
+ plane_state->tiling_info.gfx8.tile_mode_c);
+
+ SURFACE_TRACE(
+ "plane_state->tiling_info.gfx8.pipe_config = %d;\n"
+ "plane_state->tiling_info.gfx8.array_mode = %d;\n"
+ "plane_state->color_space = %d;\n"
+ "plane_state->dcc.enable = %d;\n"
+ "plane_state->format = %d;\n"
+ "plane_state->rotation = %d;\n"
+ "plane_state->stereo_format = %d;\n",
+ plane_state->tiling_info.gfx8.pipe_config,
+ plane_state->tiling_info.gfx8.array_mode,
+ plane_state->color_space,
+ plane_state->dcc.enable,
+ plane_state->format,
+ plane_state->rotation,
+ plane_state->stereo_format);
+
+ SURFACE_TRACE("plane_state->tiling_info.gfx9.swizzle = %d;\n",
+ plane_state->tiling_info.gfx9.swizzle);
+
+ SURFACE_TRACE("\n");
+ }
+ SURFACE_TRACE("\n");
+}
+
+void update_surface_trace(
+ struct dc *dc,
+ const struct dc_surface_update *updates,
+ int surface_count)
+{
+ int i;
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ for (i = 0; i < surface_count; i++) {
+ const struct dc_surface_update *update = &updates[i];
+
+ SURFACE_TRACE("Update %d\n", i);
+ if (update->flip_addr) {
+ SURFACE_TRACE("flip_addr->address.type = %d;\n"
+ "flip_addr->address.grph.addr.quad_part = 0x%X;\n"
+ "flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n"
+ "flip_addr->flip_immediate = %d;\n",
+ update->flip_addr->address.type,
+ update->flip_addr->address.grph.addr.quad_part,
+ update->flip_addr->address.grph.meta_addr.quad_part,
+ update->flip_addr->flip_immediate);
+ }
+
+ if (update->plane_info) {
+ SURFACE_TRACE(
+ "plane_info->color_space = %d;\n"
+ "plane_info->format = %d;\n"
+ "plane_info->plane_size.grph.surface_pitch = %d;\n"
+ "plane_info->plane_size.grph.surface_size.height = %d;\n"
+ "plane_info->plane_size.grph.surface_size.width = %d;\n"
+ "plane_info->plane_size.grph.surface_size.x = %d;\n"
+ "plane_info->plane_size.grph.surface_size.y = %d;\n"
+ "plane_info->rotation = %d;\n",
+ update->plane_info->color_space,
+ update->plane_info->format,
+ update->plane_info->plane_size.grph.surface_pitch,
+ update->plane_info->plane_size.grph.surface_size.height,
+ update->plane_info->plane_size.grph.surface_size.width,
+ update->plane_info->plane_size.grph.surface_size.x,
+ update->plane_info->plane_size.grph.surface_size.y,
+ update->plane_info->rotation,
+ update->plane_info->stereo_format);
+
+ SURFACE_TRACE(
+ "plane_info->tiling_info.gfx8.num_banks = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_width = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_width_c = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_height = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_height_c = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_aspect = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_aspect_c = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_split = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_split_c = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_mode = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_mode_c = %d;\n",
+ update->plane_info->tiling_info.gfx8.num_banks,
+ update->plane_info->tiling_info.gfx8.bank_width,
+ update->plane_info->tiling_info.gfx8.bank_width_c,
+ update->plane_info->tiling_info.gfx8.bank_height,
+ update->plane_info->tiling_info.gfx8.bank_height_c,
+ update->plane_info->tiling_info.gfx8.tile_aspect,
+ update->plane_info->tiling_info.gfx8.tile_aspect_c,
+ update->plane_info->tiling_info.gfx8.tile_split,
+ update->plane_info->tiling_info.gfx8.tile_split_c,
+ update->plane_info->tiling_info.gfx8.tile_mode,
+ update->plane_info->tiling_info.gfx8.tile_mode_c);
+
+ SURFACE_TRACE(
+ "plane_info->tiling_info.gfx8.pipe_config = %d;\n"
+ "plane_info->tiling_info.gfx8.array_mode = %d;\n"
+ "plane_info->visible = %d;\n"
+ "plane_info->per_pixel_alpha = %d;\n",
+ update->plane_info->tiling_info.gfx8.pipe_config,
+ update->plane_info->tiling_info.gfx8.array_mode,
+ update->plane_info->visible,
+ update->plane_info->per_pixel_alpha);
+
+ SURFACE_TRACE("surface->tiling_info.gfx9.swizzle = %d;\n",
+ update->plane_info->tiling_info.gfx9.swizzle);
+ }
+
+ if (update->scaling_info) {
+ SURFACE_TRACE(
+ "scaling_info->src_rect.x = %d;\n"
+ "scaling_info->src_rect.y = %d;\n"
+ "scaling_info->src_rect.width = %d;\n"
+ "scaling_info->src_rect.height = %d;\n"
+ "scaling_info->dst_rect.x = %d;\n"
+ "scaling_info->dst_rect.y = %d;\n"
+ "scaling_info->dst_rect.width = %d;\n"
+ "scaling_info->dst_rect.height = %d;\n"
+ "scaling_info->clip_rect.x = %d;\n"
+ "scaling_info->clip_rect.y = %d;\n"
+ "scaling_info->clip_rect.width = %d;\n"
+ "scaling_info->clip_rect.height = %d;\n"
+ "scaling_info->scaling_quality.h_taps = %d;\n"
+ "scaling_info->scaling_quality.v_taps = %d;\n"
+ "scaling_info->scaling_quality.h_taps_c = %d;\n"
+ "scaling_info->scaling_quality.v_taps_c = %d;\n",
+ update->scaling_info->src_rect.x,
+ update->scaling_info->src_rect.y,
+ update->scaling_info->src_rect.width,
+ update->scaling_info->src_rect.height,
+ update->scaling_info->dst_rect.x,
+ update->scaling_info->dst_rect.y,
+ update->scaling_info->dst_rect.width,
+ update->scaling_info->dst_rect.height,
+ update->scaling_info->clip_rect.x,
+ update->scaling_info->clip_rect.y,
+ update->scaling_info->clip_rect.width,
+ update->scaling_info->clip_rect.height,
+ update->scaling_info->scaling_quality.h_taps,
+ update->scaling_info->scaling_quality.v_taps,
+ update->scaling_info->scaling_quality.h_taps_c,
+ update->scaling_info->scaling_quality.v_taps_c);
+ }
+ SURFACE_TRACE("\n");
+ }
+ SURFACE_TRACE("\n");
+}
+
+void post_surface_trace(struct dc *dc)
+{
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ SURFACE_TRACE("post surface process.\n");
+
+}
+
+void context_timing_trace(
+ struct dc *dc,
+ struct resource_context *res_ctx)
+{
+ int i;
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+ int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
+ struct crtc_position position;
+ unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+
+
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ /* get_position() returns CRTC vertical/horizontal counter
+ * hence not applicable for underlay pipe
+ */
+ if (pipe_ctx->stream == NULL
+ || pipe_ctx->pipe_idx == underlay_idx)
+ continue;
+
+ pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
+ h_pos[i] = position.horizontal_count;
+ v_pos[i] = position.vertical_count;
+ }
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n",
+ pipe_ctx->stream_res.tg->inst,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ h_pos[i], v_pos[i]);
+ }
+}
+
+void context_clock_trace(
+ struct dc *dc,
+ struct dc_state *context)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ CLOCK_TRACE("Current: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
+ "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ context->bw.dcn.calc_clk.dispclk_khz,
+ context->bw.dcn.calc_clk.dppclk_div,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.calc_clk.fclk_khz,
+ context->bw.dcn.calc_clk.dram_ccm_us,
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+ CLOCK_TRACE("Calculated: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
+ "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ context->bw.dcn.calc_clk.dispclk_khz,
+ context->bw.dcn.calc_clk.dppclk_div,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.calc_clk.fclk_khz,
+ context->bw.dcn.calc_clk.dram_ccm_us,
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
new file mode 100644
index 000000000000..71993d5983bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "timing_generator.h"
+#include "hw_sequencer.h"
+
+/* used as index in array of black_color_format */
+enum black_color_format {
+ BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
+ BLACK_COLOR_FORMAT_RGB_LIMITED,
+ BLACK_COLOR_FORMAT_YUV_TV,
+ BLACK_COLOR_FORMAT_YUV_CV,
+ BLACK_COLOR_FORMAT_YUV_SUPER_AA,
+ BLACK_COLOR_FORMAT_DEBUG,
+};
+
+static const struct tg_color black_color_format[] = {
+ /* BlackColorFormat_RGB_FullRange */
+ {0, 0, 0},
+ /* BlackColorFormat_RGB_Limited */
+ {0x40, 0x40, 0x40},
+ /* BlackColorFormat_YUV_TV */
+ {0x200, 0x40, 0x200},
+ /* BlackColorFormat_YUV_CV */
+ {0x1f4, 0x40, 0x1f4},
+ /* BlackColorFormat_YUV_SuperAA */
+ {0x1a2, 0x20, 0x1a2},
+ /* visual confirm debug */
+ {0xff, 0xff, 0},
+};
+
+void color_space_to_black_color(
+ const struct dc *dc,
+ enum dc_color_space colorspace,
+ struct tg_color *black_color)
+{
+ switch (colorspace) {
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
+ break;
+
+ case COLOR_SPACE_SRGB_LIMITED:
+ *black_color =
+ black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
+ break;
+
+ default:
+ /* fefault is sRGB black (full range). */
+ *black_color =
+ black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
+ /* default is sRGB black 0. */
+ break;
+ }
+}
+
+bool hwss_wait_for_blank_complete(
+ struct timing_generator *tg)
+{
+ int counter;
+
+ for (counter = 0; counter < 100; counter++) {
+ if (tg->funcs->is_blanked(tg))
+ break;
+
+ msleep(1);
+ }
+
+ if (counter == 100) {
+ dm_error("DC: failed to blank crtc!\n");
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
new file mode 100644
index 000000000000..0602610489d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -0,0 +1,2367 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "atom.h"
+#include "dm_helpers.h"
+#include "dc.h"
+#include "grph_object_id.h"
+#include "gpio_service_interface.h"
+#include "core_status.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+
+#include "link_encoder.h"
+#include "hw_sequencer.h"
+#include "resource.h"
+#include "abm.h"
+#include "fixed31_32.h"
+#include "dpcd_defs.h"
+#include "dmcu.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_enum.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define LINK_INFO(...) \
+ dm_logger_write(dc_ctx->logger, LOG_HW_HOTPLUG, \
+ __VA_ARGS__)
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+
+enum {
+ LINK_RATE_REF_FREQ_IN_MHZ = 27,
+ PEAK_FACTOR_X1000 = 1006
+};
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destruct(struct dc_link *link)
+{
+ int i;
+
+ if (link->ddc)
+ dal_ddc_service_destroy(&link->ddc);
+
+ if(link->link_enc)
+ link->link_enc->funcs->destroy(&link->link_enc);
+
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ for (i = 0; i < link->sink_count; ++i)
+ dc_sink_release(link->remote_sinks[i]);
+}
+
+struct gpio *get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service)
+{
+ enum bp_result bp_result;
+ struct graphics_object_hpd_info hpd_info;
+ struct gpio_pin_info pin_info;
+
+ if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK)
+ return NULL;
+
+ bp_result = dcb->funcs->get_gpio_pin_info(dcb,
+ hpd_info.hpd_int_gpio_uid, &pin_info);
+
+ if (bp_result != BP_RESULT_OK) {
+ ASSERT(bp_result == BP_RESULT_NORECORD);
+ return NULL;
+ }
+
+ return dal_gpio_service_create_irq(
+ gpio_service,
+ pin_info.offset,
+ pin_info.mask);
+}
+
+/*
+ * Function: program_hpd_filter
+ *
+ * @brief
+ * Programs HPD filter on associated HPD line
+ *
+ * @param [in] delay_on_connect_in_ms: Connect filter timeout
+ * @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
+ *
+ * @return
+ * true on success, false otherwise
+ */
+static bool program_hpd_filter(
+ const struct dc_link *link)
+{
+ bool result = false;
+
+ struct gpio *hpd;
+
+ int delay_on_connect_in_ms = 0;
+ int delay_on_disconnect_in_ms = 0;
+
+ /* Verify feature is supported */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* Program hpd filter */
+ delay_on_connect_in_ms = 500;
+ delay_on_disconnect_in_ms = 100;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* Program hpd filter to allow DP signal to settle */
+ /* 500: not able to detect MST <-> SST switch as HPD is low for
+ * only 100ms on DELL U2413
+ * 0: some passive dongle still show aux mode instead of i2c
+ * 20-50:not enough to hide bouncing HPD with passive dongle.
+ * also see intermittent i2c read issues.
+ */
+ delay_on_connect_in_ms = 80;
+ delay_on_disconnect_in_ms = 0;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ default:
+ /* Don't program hpd filter */
+ return false;
+ }
+
+ /* Obtain HPD handle */
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (!hpd)
+ return result;
+
+ /* Setup HPD filtering */
+ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+ struct gpio_hpd_config config;
+
+ config.delay_on_connect = delay_on_connect_in_ms;
+ config.delay_on_disconnect = delay_on_disconnect_in_ms;
+
+ dal_irq_setup_hpd_filter(hpd, &config);
+
+ dal_gpio_close(hpd);
+
+ result = true;
+ } else {
+ ASSERT_CRITICAL(false);
+ }
+
+ /* Release HPD handle */
+ dal_gpio_destroy_irq(&hpd);
+
+ return result;
+}
+
+static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+{
+ uint32_t is_hpd_high = 0;
+ struct gpio *hpd_pin;
+
+ /* todo: may need to lock gpio access */
+ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ if (hpd_pin == NULL)
+ goto hpd_gpio_failure;
+
+ dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
+ dal_gpio_get_value(hpd_pin, &is_hpd_high);
+ dal_gpio_close(hpd_pin);
+ dal_gpio_destroy_irq(&hpd_pin);
+
+ if (is_hpd_high) {
+ *type = dc_connection_single;
+ /* TODO: need to do the actual detection */
+ } else {
+ *type = dc_connection_none;
+ }
+
+ return true;
+
+hpd_gpio_failure:
+ return false;
+}
+
+static enum ddc_transaction_type get_ddc_transaction_type(
+ enum signal_type sink_signal)
+{
+ enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
+
+ switch (sink_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_RGB:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* MST does not use I2COverAux, but there is the
+ * SPECIAL use case for "immediate dwnstrm device
+ * access" (EPR#370830). */
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ default:
+ break;
+ }
+
+ return transaction_type;
+}
+
+static enum signal_type get_basic_signal_type(
+ struct graphics_object_id encoder,
+ struct graphics_object_id downstream)
+{
+ if (downstream.type == OBJECT_TYPE_CONNECTOR) {
+ switch (downstream.id) {
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ {
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ }
+ }
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ case CONNECTOR_ID_VGA:
+ return SIGNAL_TYPE_RGB;
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ return SIGNAL_TYPE_HDMI_TYPE_A;
+ case CONNECTOR_ID_LVDS:
+ return SIGNAL_TYPE_LVDS;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ case CONNECTOR_ID_EDP:
+ return SIGNAL_TYPE_EDP;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ } else if (downstream.type == OBJECT_TYPE_ENCODER) {
+ switch (downstream.id) {
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ }
+
+ return SIGNAL_TYPE_NONE;
+}
+
+/*
+ * @brief
+ * Check whether there is a dongle on DP connector
+ */
+static bool is_dp_sink_present(struct dc_link *link)
+{
+ enum gpio_result gpio_result;
+ uint32_t clock_pin = 0;
+
+ struct ddc *ddc;
+
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(link->link_id);
+
+ bool present =
+ ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+ (connector_id == CONNECTOR_ID_EDP));
+
+ ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return present;
+ }
+
+ /* Open GPIO and set it to I2C mode */
+ /* Note: this GpioMode_Input will be converted
+ * to GpioConfigType_I2cAuxDualMode in GPIO component,
+ * which indicates we need additional delay */
+
+ if (GPIO_RESULT_OK != dal_ddc_open(
+ ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+ dal_gpio_destroy_ddc(&ddc);
+
+ return present;
+ }
+
+ /* Read GPIO: DP sink is present if both clock and data pins are zero */
+ /* [anaumov] in DAL2, there was no check for GPIO failure */
+
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+ ASSERT(gpio_result == GPIO_RESULT_OK);
+
+ present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
+
+ dal_ddc_close(ddc);
+
+ return present;
+}
+
+/*
+ * @brief
+ * Detect output sink type
+ */
+static enum signal_type link_detect_sink(
+ struct dc_link *link,
+ enum dc_detect_reason reason)
+{
+ enum signal_type result = get_basic_signal_type(
+ link->link_enc->id, link->link_id);
+
+ /* Internal digital encoder will detect only dongles
+ * that require digital signal */
+
+ /* Detection mechanism is different
+ * for different native connectors.
+ * LVDS connector supports only LVDS signal;
+ * PCIE is a bus slot, the actual connector needs to be detected first;
+ * eDP connector supports only eDP signal;
+ * HDMI should check straps for audio */
+
+ /* PCIE detects the actual connector on add-on board */
+
+ if (link->link_id.id == CONNECTOR_ID_PCIE) {
+ /* ZAZTODO implement PCIE add-on card detection */
+ }
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A: {
+ /* check audio support:
+ * if native HDMI is not supported, switch to DVI */
+ struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+
+ if (!aud_support->hdmi_audio_native)
+ if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT: {
+ /* DP HPD short pulse. Passive DP dongle will not
+ * have short pulse
+ */
+ if (reason != DETECT_REASON_HPDRX) {
+ /* Check whether DP signal detected: if not -
+ * we assume signal is DVI; it could be corrected
+ * to HDMI after dongle detection
+ */
+ if (!is_dp_sink_present(link))
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static enum signal_type decide_signal_from_strap_and_dongle_type(
+ enum display_dongle_type dongle_type,
+ struct audio_support *audio_support)
+{
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+
+ switch (dongle_type) {
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ if (audio_support->hdmi_audio_on_dongle)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ if (audio_support->hdmi_audio_native)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ default:
+ signal = SIGNAL_TYPE_NONE;
+ break;
+ }
+
+ return signal;
+}
+
+static enum signal_type dp_passive_dongle_detection(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap,
+ struct audio_support *audio_support)
+{
+ dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+ ddc, sink_cap);
+ return decide_signal_from_strap_and_dongle_type(
+ sink_cap->dongle_type,
+ audio_support);
+}
+
+static void link_disconnect_sink(struct dc_link *link)
+{
+ if (link->local_sink) {
+ dc_sink_release(link->local_sink);
+ link->local_sink = NULL;
+ }
+
+ link->dpcd_sink_count = 0;
+}
+
+static void detect_dp(
+ struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+ bool *converter_disable_audio,
+ struct audio_support *audio_support,
+ enum dc_detect_reason reason)
+{
+ bool boot = false;
+ sink_caps->signal = link_detect_sink(link, reason);
+ sink_caps->transaction_type =
+ get_ddc_transaction_type(sink_caps->signal);
+
+ if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+ detect_dp_sink_caps(link);
+
+ /* DP active dongles */
+ if (is_dp_active_dongle(link)) {
+ link->type = dc_connection_active_dongle;
+ if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+ /*
+ * active dongle unplug processing for short irq
+ */
+ link_disconnect_sink(link);
+ return;
+ }
+
+ if (link->dpcd_caps.dongle_type !=
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ *converter_disable_audio = true;
+ }
+ }
+ if (is_mst_supported(link)) {
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ link->type = dc_connection_mst_branch;
+
+ /*
+ * This call will initiate MST topology discovery. Which
+ * will detect MST ports and add new DRM connector DRM
+ * framework. Then read EDID via remote i2c over aux. In
+ * the end, will notify DRM detect result and save EDID
+ * into DRM framework.
+ *
+ * .detect is called by .fill_modes.
+ * .fill_modes is called by user mode ioctl
+ * DRM_IOCTL_MODE_GETCONNECTOR.
+ *
+ * .get_modes is called by .fill_modes.
+ *
+ * call .get_modes, AMDGPU DM implementation will create
+ * new dc_sink and add to dc_link. For long HPD plug
+ * in/out, MST has its own handle.
+ *
+ * Therefore, just after dc_create, link->sink is not
+ * created for MST until user mode app calls
+ * DRM_IOCTL_MODE_GETCONNECTOR.
+ *
+ * Need check ->sink usages in case ->sink = NULL
+ * TODO: s3 resume check
+ */
+ if (reason == DETECT_REASON_BOOT)
+ boot = true;
+
+ if (!dm_helpers_dp_mst_start_top_mgr(
+ link->ctx,
+ link, boot)) {
+ /* MST not supported */
+ link->type = dc_connection_single;
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+ }
+ }
+ } else {
+ /* DP passive dongles */
+ sink_caps->signal = dp_passive_dongle_detection(link->ddc,
+ sink_caps,
+ audio_support);
+ }
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ uint8_t i;
+ bool converter_disable_audio = false;
+ struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct dc_sink *sink = NULL;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+ return false;
+
+ if (false == detect_sink(link, &new_connection_type)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP &&
+ link->local_sink)
+ return true;
+
+ link_disconnect_sink(link);
+
+ if (new_connection_type != dc_connection_none) {
+ link->type = new_connection_type;
+
+ /* From Disconnected-to-Connected. */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ if (aud_support->hdmi_audio_native)
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ detect_edp_sink_caps(link);
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ detect_dp(
+ link,
+ &sink_caps,
+ &converter_disable_audio,
+ aud_support, reason);
+
+ /* Active dongle downstream unplug */
+ if (link->type == dc_connection_active_dongle
+ && link->dpcd_caps.sink_count.
+ bits.SINK_COUNT == 0)
+ return true;
+
+ if (link->type == dc_connection_mst_branch) {
+ LINK_INFO("link=%d, mst branch is now Connected\n",
+ link->link_index);
+ /* Need to setup mst link_cap struct here
+ * otherwise dc_link_detect() will leave mst link_cap
+ * empty which leads to allocate_mst_payload() has "0"
+ * pbn_per_slot value leading to exception on dal_fixed31_32_div()
+ */
+ link->verified_link_cap = link->reported_link_cap;
+ return false;
+ }
+
+ break;
+ }
+
+ default:
+ DC_ERROR("Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return false;
+ } /* switch() */
+
+ if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
+ link->dpcd_sink_count = link->dpcd_caps.sink_count.
+ bits.SINK_COUNT;
+ else
+ link->dpcd_sink_count = 1;
+
+ dal_ddc_service_set_transaction_type(
+ link->ddc,
+ sink_caps.transaction_type);
+
+ link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode(
+ link->ddc);
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DC_ERROR("Failed to create sink!\n");
+ return false;
+ }
+
+ sink->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
+ sink->converter_disable_audio = converter_disable_audio;
+
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ switch (edid_status) {
+ case EDID_BAD_CHECKSUM:
+ dm_logger_write(link->ctx->logger, LOG_ERROR,
+ "EDID checksum invalid.\n");
+ break;
+ case EDID_NO_RESPONSE:
+ dm_logger_write(link->ctx->logger, LOG_ERROR,
+ "No EDID read.\n");
+ return false;
+
+ default:
+ break;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ sink_caps.transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ /*
+ * TODO debug why Dell 2413 doesn't like
+ * two link trainings
+ */
+
+ /* deal with non-mst cases */
+ dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+ }
+
+ /* HDMI-DVI Dongle */
+ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+ !sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
+ /* Connectivity log: detection */
+ for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
+ CONN_DATA_DETECT(link,
+ &sink->dc_edid.raw_edid[i * EDID_BLOCK_SIZE],
+ EDID_BLOCK_SIZE,
+ "%s: [Block %d] ", sink->edid_caps.display_name, i);
+ }
+
+ dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+ "%s: "
+ "manufacturer_id = %X, "
+ "product_id = %X, "
+ "serial_number = %X, "
+ "manufacture_week = %d, "
+ "manufacture_year = %d, "
+ "display_name = %s, "
+ "speaker_flag = %d, "
+ "audio_mode_count = %d\n",
+ __func__,
+ sink->edid_caps.manufacturer_id,
+ sink->edid_caps.product_id,
+ sink->edid_caps.serial_number,
+ sink->edid_caps.manufacture_week,
+ sink->edid_caps.manufacture_year,
+ sink->edid_caps.display_name,
+ sink->edid_caps.speaker_flags,
+ sink->edid_caps.audio_mode_count);
+
+ for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
+ dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+ "%s: mode number = %d, "
+ "format_code = %d, "
+ "channel_count = %d, "
+ "sample_rate = %d, "
+ "sample_size = %d\n",
+ __func__,
+ i,
+ sink->edid_caps.audio_modes[i].format_code,
+ sink->edid_caps.audio_modes[i].channel_count,
+ sink->edid_caps.audio_modes[i].sample_rate,
+ sink->edid_caps.audio_modes[i].sample_size);
+ }
+
+ } else {
+ /* From Connected-to-Disconnected. */
+ if (link->type == dc_connection_mst_branch) {
+ LINK_INFO("link=%d, mst branch is now Disconnected\n",
+ link->link_index);
+
+ dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
+
+ link->mst_stream_alloc_table.stream_count = 0;
+ memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+ }
+
+ link->type = dc_connection_none;
+ sink_caps.signal = SIGNAL_TYPE_NONE;
+ }
+
+ LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
+ link->link_index, sink,
+ (sink_caps.signal == SIGNAL_TYPE_NONE ?
+ "Disconnected":"Connected"));
+
+ return true;
+}
+
+static enum hpd_source_id get_hpd_line(
+ struct dc_link *link)
+{
+ struct gpio *hpd;
+ enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
+
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (hpd) {
+ switch (dal_irq_get_source(hpd)) {
+ case DC_IRQ_SOURCE_HPD1:
+ hpd_id = HPD_SOURCEID1;
+ break;
+ case DC_IRQ_SOURCE_HPD2:
+ hpd_id = HPD_SOURCEID2;
+ break;
+ case DC_IRQ_SOURCE_HPD3:
+ hpd_id = HPD_SOURCEID3;
+ break;
+ case DC_IRQ_SOURCE_HPD4:
+ hpd_id = HPD_SOURCEID4;
+ break;
+ case DC_IRQ_SOURCE_HPD5:
+ hpd_id = HPD_SOURCEID5;
+ break;
+ case DC_IRQ_SOURCE_HPD6:
+ hpd_id = HPD_SOURCEID6;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ dal_gpio_destroy_irq(&hpd);
+ }
+
+ return hpd_id;
+}
+
+static enum channel_id get_ddc_line(struct dc_link *link)
+{
+ struct ddc *ddc;
+ enum channel_id channel = CHANNEL_ID_UNKNOWN;
+
+ ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+ if (ddc) {
+ switch (dal_ddc_get_line(ddc)) {
+ case GPIO_DDC_LINE_DDC1:
+ channel = CHANNEL_ID_DDC1;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ channel = CHANNEL_ID_DDC2;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ channel = CHANNEL_ID_DDC3;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ channel = CHANNEL_ID_DDC4;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ channel = CHANNEL_ID_DDC5;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ channel = CHANNEL_ID_DDC6;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ channel = CHANNEL_ID_DDC_VGA;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ channel = CHANNEL_ID_I2C_PAD;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+ }
+
+ return channel;
+}
+
+static enum transmitter translate_encoder_to_transmitter(
+ struct graphics_object_id encoder)
+{
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_A;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_B;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_C;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_D;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_E;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_F;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_G;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_NUTMEG_CRT;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_TRAVIS_CRT;
+ case ENUM_ID_2:
+ return TRANSMITTER_TRAVIS_LCD;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+}
+
+static bool construct(
+ struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ uint8_t i;
+ struct gpio *hpd_gpio = NULL;
+ struct ddc_service_init_data ddc_service_init_data = { { 0 } };
+ struct dc_context *dc_ctx = init_params->ctx;
+ struct encoder_init_data enc_init_data = { 0 };
+ struct integrated_info info = {{{ 0 }}};
+ struct dc_bios *bios = init_params->dc->ctx->dc_bios;
+ const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ link->dc = init_params->dc;
+ link->ctx = dc_ctx;
+ link->link_index = init_params->link_index;
+
+ link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+
+ if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+ dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
+ __func__, init_params->connector_index);
+ goto create_fail;
+ }
+
+ hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (hpd_gpio != NULL)
+ link->irq_source_hpd = dal_irq_get_source(hpd_gpio);
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+ if (hpd_gpio != NULL)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(hpd_gpio);
+
+ break;
+ case CONNECTOR_ID_EDP:
+ link->connector_signal = SIGNAL_TYPE_EDP;
+
+ if (hpd_gpio != NULL) {
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(hpd_gpio);
+ }
+ break;
+ default:
+ dm_logger_write(dc_ctx->logger, LOG_WARNING,
+ "Unsupported Connector type:%d!\n", link->link_id.id);
+ goto create_fail;
+ }
+
+ if (hpd_gpio != NULL) {
+ dal_gpio_destroy_irq(&hpd_gpio);
+ hpd_gpio = NULL;
+ }
+
+ /* TODO: #DAL3 Implement id to str function.*/
+ LINK_INFO("Connector[%d] description:"
+ "signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
+
+ ddc_service_init_data.ctx = link->ctx;
+ ddc_service_init_data.id = link->link_id;
+ ddc_service_init_data.link = link;
+ link->ddc = dal_ddc_service_create(&ddc_service_init_data);
+
+ if (link->ddc == NULL) {
+ DC_ERROR("Failed to create ddc_service!\n");
+ goto ddc_create_fail;
+ }
+
+ link->ddc_hw_inst =
+ dal_ddc_get_line(
+ dal_ddc_service_get_ddc_pin(link->ddc));
+
+ enc_init_data.ctx = dc_ctx;
+ bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+ enc_init_data.connector = link->link_id;
+ enc_init_data.channel = get_ddc_line(link);
+ enc_init_data.hpd_source = get_hpd_line(link);
+
+ link->hpd_src = enc_init_data.hpd_source;
+
+ enc_init_data.transmitter =
+ translate_encoder_to_transmitter(enc_init_data.encoder);
+ link->link_enc = link->dc->res_pool->funcs->link_enc_create(
+ &enc_init_data);
+
+ if( link->link_enc == NULL) {
+ DC_ERROR("Failed to create link encoder!\n");
+ goto link_enc_create_fail;
+ }
+
+ link->link_enc_hw_inst = link->link_enc->transmitter;
+
+ for (i = 0; i < 4; i++) {
+ if (BP_RESULT_OK !=
+ bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+ DC_ERROR("Failed to find device tag!\n");
+ goto device_tag_fail;
+ }
+
+ /* Look for device tag that matches connector signal,
+ * CRT for rgb, LCD for other supported signal tyes
+ */
+ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
+ && link->connector_signal != SIGNAL_TYPE_RGB)
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
+ && link->connector_signal == SIGNAL_TYPE_RGB)
+ continue;
+ break;
+ }
+
+ if (bios->integrated_info)
+ info = *bios->integrated_info;
+
+ /* Look for channel mapping corresponding to connector and device tag */
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
+ struct external_display_path *path =
+ &info.ext_disp_conn_info.path[i];
+ if (path->device_connector_id.enum_id == link->link_id.enum_id
+ && path->device_connector_id.id == link->link_id.id
+ && path->device_connector_id.type == link->link_id.type) {
+
+ if (link->device_tag.acpi_device != 0
+ && path->device_acpi_enum == link->device_tag.acpi_device) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ } else if (path->device_tag ==
+ link->device_tag.dev_id.raw_device_tag) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ }
+ break;
+ }
+ }
+
+ /*
+ * TODO check if GPIO programmed correctly
+ *
+ * If GPIO isn't programmed correctly HPD might not rise or drain
+ * fast enough, leading to bounces.
+ */
+ program_hpd_filter(link);
+
+ return true;
+device_tag_fail:
+ link->link_enc->funcs->destroy(&link->link_enc);
+link_enc_create_fail:
+ dal_ddc_service_destroy(&link->ddc);
+ddc_create_fail:
+create_fail:
+
+ if (hpd_gpio != NULL) {
+ dal_gpio_destroy_irq(&hpd_gpio);
+ }
+
+ return false;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+struct dc_link *link_create(const struct link_init_data *init_params)
+{
+ struct dc_link *link =
+ kzalloc(sizeof(*link), GFP_KERNEL);
+
+ if (NULL == link)
+ goto alloc_fail;
+
+ if (false == construct(link, init_params))
+ goto construct_fail;
+
+ return link;
+
+construct_fail:
+ kfree(link);
+
+alloc_fail:
+ return NULL;
+}
+
+void link_destroy(struct dc_link **link)
+{
+ destruct(*link);
+ kfree(*link);
+ *link = NULL;
+}
+
+static void dpcd_configure_panel_mode(
+ struct dc_link *link,
+ enum dp_panel_mode panel_mode)
+{
+ union dpcd_edp_config edp_config_set;
+ bool panel_mode_edp = false;
+
+ memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
+
+ if (DP_PANEL_MODE_DEFAULT != panel_mode) {
+
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ case DP_PANEL_MODE_SPECIAL:
+ panel_mode_edp = true;
+ break;
+
+ default:
+ break;
+ }
+
+ /*set edp panel mode in receiver*/
+ core_link_read_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ if (edp_config_set.bits.PANEL_MODE_EDP
+ != panel_mode_edp) {
+ enum ddc_result result = DDC_RESULT_UNKNOWN;
+
+ edp_config_set.bits.PANEL_MODE_EDP =
+ panel_mode_edp;
+ result = core_link_write_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ ASSERT(result == DDC_RESULT_SUCESSFULL);
+ }
+ }
+ dm_logger_write(link->ctx->logger, LOG_DETECTION_DP_CAPS,
+ "Link: %d eDP panel mode supported: %d "
+ "eDP panel mode enabled: %d \n",
+ link->link_index,
+ link->dpcd_caps.panel_mode_edp,
+ panel_mode_edp);
+}
+
+static void enable_stream_features(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ union down_spread_ctrl downspread;
+
+ core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+}
+
+static enum dc_status enable_link_dp(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_status status;
+ bool skip_video_pattern;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link_settings link_settings = {0};
+ enum dp_panel_mode panel_mode;
+ enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
+
+ /* get link settings for video mode timing */
+ decide_link_settings(stream, &link_settings);
+
+ /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
+ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+ */
+ if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_rate = LINK_RATE_HIGH3;
+
+ if (link_settings.link_rate == max_link_rate) {
+ if (state->dis_clk->funcs->set_min_clocks_state) {
+ if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
+ state->dis_clk->funcs->set_min_clocks_state(
+ state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
+ } else {
+ uint32_t dp_phyclk_in_khz;
+ const struct clocks_value clocks_value =
+ state->dis_clk->cur_clocks_value;
+
+ /* 27mhz = 27000000hz= 27000khz */
+ dp_phyclk_in_khz = link_settings.link_rate * 27000;
+
+ if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
+ (dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
+ (dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
+ state->dis_clk->funcs->apply_clock_voltage_request(
+ state->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ dp_phyclk_in_khz,
+ false,
+ true);
+ }
+ }
+ }
+
+ dp_enable_link_phy(
+ link,
+ pipe_ctx->stream->signal,
+ pipe_ctx->clock_source->id,
+ &link_settings);
+
+ panel_mode = dp_get_panel_mode(link);
+ dpcd_configure_panel_mode(link, panel_mode);
+
+ skip_video_pattern = true;
+
+ if (link_settings.link_rate == LINK_RATE_LOW)
+ skip_video_pattern = false;
+
+ if (perform_link_training_with_retries(
+ link,
+ &link_settings,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS)) {
+ link->cur_link_settings = link_settings;
+ status = DC_OK;
+ }
+ else
+ status = DC_FAIL_DP_LINK_TRAINING;
+
+ enable_stream_features(pipe_ctx);
+
+ return status;
+}
+
+static enum dc_status enable_link_dp_mst(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->sink->link;
+
+ /* sink signal type after MST branch is MST. Multiple MST sinks
+ * share one link. Link DP PHY is enable or training only once.
+ */
+ if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
+ return DC_OK;
+
+ /* set the sink to MST mode before enabling the link */
+ dp_enable_mst_on_sink(link, true);
+
+ return enable_link_dp(state, pipe_ctx);
+}
+
+static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
+ enum engine_id eng_id,
+ struct ext_hdmi_settings *settings)
+{
+ bool result = false;
+ int i = 0;
+ struct integrated_info *integrated_info =
+ pipe_ctx->stream->ctx->dc_bios->integrated_info;
+
+ if (integrated_info == NULL)
+ return false;
+
+ /*
+ * Get retimer settings from sbios for passing SI eye test for DCE11
+ * The setting values are varied based on board revision and port id
+ * Therefore the setting values of each ports is passed by sbios.
+ */
+
+ // Check if current bios contains ext Hdmi settings
+ if (integrated_info->gpu_cap_info & 0x20) {
+ switch (eng_id) {
+ case ENGINE_ID_DIGA:
+ settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp0_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp0_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp1_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp1_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp2_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp2_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp3_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp3_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ if (result == true) {
+ // Validate settings from bios integrated info table
+ if (settings->slv_addr == 0)
+ return false;
+ if (settings->reg_num > 9)
+ return false;
+ if (settings->reg_num_6g > 3)
+ return false;
+
+ for (i = 0; i < settings->reg_num; i++) {
+ if (settings->reg_settings[i].i2c_reg_index > 0x20)
+ return false;
+ }
+
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ if (settings->reg_settings_6g[i].i2c_reg_index > 0x20)
+ return false;
+ }
+ }
+ }
+
+ return result;
+}
+
+static bool i2c_write(struct pipe_ctx *pipe_ctx,
+ uint8_t address, uint8_t *buffer, uint32_t length)
+{
+ struct i2c_command cmd = {0};
+ struct i2c_payload payload = {0};
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.number_of_payloads = 1;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz;
+
+ payload.address = address;
+ payload.data = buffer;
+ payload.length = length;
+ payload.write = true;
+ cmd.payloads = &payload;
+
+ if (dc_submit_i2c(pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream->sink->link->link_index, &cmd))
+ return true;
+
+ return false;
+}
+
+static void write_i2c_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz,
+ struct ext_hdmi_settings *settings)
+{
+ uint8_t slave_address = (settings->slv_addr >> 1);
+ uint8_t buffer[2];
+ const uint8_t apply_rx_tx_change = 0x4;
+ uint8_t offset = 0xA;
+ uint8_t value = 0;
+ int i = 0;
+ bool i2c_success = false;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Start Ext-Hdmi programming*/
+
+ for (i = 0; i < settings->reg_num; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings[i].i2c_reg_val;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA ||
+ settings->reg_settings[i].i2c_reg_index == 0xB ||
+ settings->reg_settings[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings[i].i2c_reg_val;
+ else {
+ i2c_success =
+ dal_ddc_service_query_ddc_data(
+ pipe_ctx->stream->sink->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+ }
+ }
+
+ /* Apply 3G settings */
+ if (is_over_340mhz) {
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings_6g[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings_6g[i].i2c_reg_val;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xB ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings_6g[i].i2c_reg_val;
+ else {
+ i2c_success =
+ dal_ddc_service_query_ddc_data(
+ pipe_ctx->stream->sink->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+ }
+ }
+ }
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ }
+}
+
+static void write_i2c_default_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xBA >> 1);
+ uint8_t buffer[2];
+ bool i2c_success = false;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Program Slave Address for tuning single integrity */
+ /* Write offset 0x0A to 0x13 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x13;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0B to 0xDA or 0xD8 */
+ buffer[0] = 0x0B;
+ buffer[1] = is_over_340mhz ? 0xDA : 0xD8;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0C to 0x1D or 0x91 */
+ buffer[0] = 0x0C;
+ buffer[1] = is_over_340mhz ? 0x1D : 0x91;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+}
+
+static void write_i2c_redriver_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xF0 >> 1);
+ uint8_t buffer[16];
+ bool i2c_success = false;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ // Program Slave Address for tuning single integrity
+ buffer[3] = 0x4E;
+ buffer[4] = 0x4E;
+ buffer[5] = 0x4E;
+ buffer[6] = is_over_340mhz ? 0x4E : 0x4A;
+
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+}
+
+static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ enum dc_color_depth display_color_depth;
+ enum engine_id eng_id;
+ struct ext_hdmi_settings settings = {0};
+ bool is_over_340mhz = false;
+ bool is_vga_mode = (stream->timing.h_addressable == 640)
+ && (stream->timing.v_addressable == 480);
+
+ if (stream->phy_pix_clk > 340000)
+ is_over_340mhz = true;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ unsigned short masked_chip_caps = pipe_ctx->stream->sink->link->chip_caps &
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ /* DP159, Retimer settings */
+ eng_id = pipe_ctx->stream_res.stream_enc->id;
+
+ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) {
+ write_i2c_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz, &settings);
+ } else {
+ write_i2c_default_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz);
+ }
+ } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ /* PI3EQX1204, Redriver settings */
+ write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
+ }
+ }
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ dal_ddc_service_write_scdc_data(
+ stream->sink->link->ddc,
+ stream->phy_pix_clk,
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
+ memset(&stream->sink->link->cur_link_settings, 0,
+ sizeof(struct dc_link_settings));
+
+ display_color_depth = stream->timing.display_color_depth;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ display_color_depth = COLOR_DEPTH_888;
+
+ link->link_enc->funcs->enable_tmds_output(
+ link->link_enc,
+ pipe_ctx->clock_source->id,
+ display_color_depth,
+ pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
+ stream->phy_pix_clk);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ dal_ddc_service_read_scdc_data(link->ddc);
+}
+
+/****************************enable_link***********************************/
+static enum dc_status enable_link(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ switch (pipe_ctx->stream->signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ status = enable_link_dp(state, pipe_ctx);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ status = enable_link_dp_mst(state, pipe_ctx);
+ msleep(200);
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ enable_link_hdmi(pipe_ctx);
+ status = DC_OK;
+ break;
+ case SIGNAL_TYPE_VIRTUAL:
+ status = DC_OK;
+ break;
+ default:
+ break;
+ }
+
+ if (pipe_ctx->stream_res.audio && status == DC_OK) {
+ /* notify audio driver for audio modes of monitor */
+ pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+
+ /* un-mute audio */
+ /* TODO: audio should be per stream rather than per link */
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, false);
+ }
+
+ return status;
+}
+
+static void disable_link(struct dc_link *link, enum signal_type signal)
+{
+ /*
+ * TODO: implement call for dp_set_hw_test_pattern
+ * it is needed for compliance testing
+ */
+
+ /* here we need to specify that encoder output settings
+ * need to be calculated as for the set mode,
+ * it will lead to querying dynamic link capabilities
+ * which should be done before enable output */
+
+ if (dc_is_dp_signal(signal)) {
+ /* SST DP, eDP */
+ if (dc_is_dp_sst_signal(signal))
+ dp_disable_link_phy(link, signal);
+ else
+ dp_disable_link_phy_mst(link, signal);
+ } else
+ link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+}
+
+enum dc_status dc_link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+
+ /* A hack to avoid failing any modes for EDID override feature on
+ * topology change such as lower quality cable for DP or different dongle
+ */
+ if (link->remote_sinks[0])
+ return DC_OK;
+
+ if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
+ return DC_EXCEED_DONGLE_MAX_CLK;
+
+ switch (stream->signal) {
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (!dp_validate_mode_timing(
+ link,
+ timing))
+ return DC_NO_DP_LINK_BANDWIDTH;
+ break;
+
+ default:
+ break;
+ }
+
+ return DC_OK;
+}
+
+
+bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct abm *abm = core_dc->res_pool->abm;
+ unsigned int controller_id = 0;
+ int i;
+
+ if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ return false;
+
+ dm_logger_write(link->ctx->logger, LOG_BACKLIGHT,
+ "New Backlight level: %d (0x%X)\n", level, level);
+
+ if (dc_is_embedded_signal(link->connector_signal)) {
+ if (stream != NULL) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream
+ == stream)
+ /* DMCU -1 for all controller id values,
+ * therefore +1 here
+ */
+ controller_id =
+ core_dc->current_state->
+ res_ctx.pipe_ctx[i].stream_res.tg->inst +
+ 1;
+ }
+ }
+ abm->funcs->set_backlight_level(
+ abm,
+ level,
+ frame_ramp,
+ controller_id);
+ }
+
+ return true;
+}
+
+bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ if (dmcu != NULL && link->psr_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, enable, wait);
+
+ return true;
+}
+
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ if (dmcu != NULL && link->psr_enabled)
+ dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+ return true;
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int i;
+
+ psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (link != NULL &&
+ dmcu != NULL) {
+ /* updateSinkPsrDpcdConfig*/
+ union dpcd_psr_configuration psr_configuration;
+
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+ psr_configuration.bits.ENABLE = 1;
+ psr_configuration.bits.CRC_VERIFICATION = 1;
+ psr_configuration.bits.FRAME_CAPTURE_INDICATION =
+ psr_config->psr_frame_capture_indication_req;
+
+ /* Check for PSR v2*/
+ if (psr_config->psr_version == 0x2) {
+ /* For PSR v2 selective update.
+ * Indicates whether sink should start capturing
+ * immediately following active scan line,
+ * or starting with the 2nd active scan line.
+ */
+ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+ /*For PSR v2, determines whether Sink should generate
+ * IRQ_HPD when CRC mismatch is detected.
+ */
+ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 368,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+ psr_context->transmitterId = link->link_enc->transmitter;
+ psr_context->engineId = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ psr_context->controllerId =
+ core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
+ psr_context->phyType = PHY_TYPE_UNIPHY;
+ /*PhyId is associated with the transmitter id*/
+ psr_context->smuPhyId = link->link_enc->transmitter;
+
+ psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+ psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+ timing.pix_clk_khz * 1000),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ psr_context->psrSupportedDisplayConfig = true;
+ psr_context->psrExitLinkTrainingRequired =
+ psr_config->psr_exit_link_training_required;
+ psr_context->sdpTransmitLineNumDeadline =
+ psr_config->psr_sdp_transmit_line_num_deadline;
+ psr_context->psrFrameCaptureIndicationReq =
+ psr_config->psr_frame_capture_indication_req;
+
+ psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+ psr_context->numberOfControllers =
+ link->dc->res_pool->res_cap->num_timing_generator;
+
+ psr_context->rfb_update_auto_en = true;
+
+ /* 2 frames before enter PSR. */
+ psr_context->timehyst_frames = 2;
+ /* half a frame
+ * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+ */
+ psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+ psr_context->aux_repeats = 10;
+
+ psr_context->psr_level.u32all = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /*skip power down the single pipe since it blocks the cstate*/
+ if (ASIC_REV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+#endif
+
+ /* SMU will perform additional powerdown sequence.
+ * For unsupported ASICs, set psr_level flag to skip PSR
+ * static screen notification to SMU.
+ * (Always set for DAL2, did not check ASIC)
+ */
+ psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION = 1;
+
+ /* Complete PSR entry before aborting to prevent intermittent
+ * freezes on certain eDPs
+ */
+ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+ /* Controls additional delay after remote frame capture before
+ * continuing power down, default = 0
+ */
+ psr_context->frame_delay = 0;
+
+ link->psr_enabled = true;
+ dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ return true;
+ } else
+ return false;
+
+}
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
+{
+ return &link->link_status;
+}
+
+void core_link_resume(struct dc_link *link)
+{
+ if (link->connector_signal != SIGNAL_TYPE_VIRTUAL)
+ program_hpd_filter(link);
+}
+
+static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
+{
+ struct dc_link_settings *link_settings =
+ &stream->sink->link->cur_link_settings;
+ uint32_t link_rate_in_mbps =
+ link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
+ struct fixed31_32 mbps = dal_fixed31_32_from_int(
+ link_rate_in_mbps * link_settings->lane_count);
+
+ return dal_fixed31_32_div_int(mbps, 54);
+}
+
+static int get_color_depth(enum dc_color_depth color_depth)
+{
+ switch (color_depth) {
+ case COLOR_DEPTH_666: return 6;
+ case COLOR_DEPTH_888: return 8;
+ case COLOR_DEPTH_101010: return 10;
+ case COLOR_DEPTH_121212: return 12;
+ case COLOR_DEPTH_141414: return 14;
+ case COLOR_DEPTH_161616: return 16;
+ default: return 0;
+ }
+}
+
+static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t bpc;
+ uint64_t kbps;
+ struct fixed31_32 peak_kbps;
+ uint32_t numerator;
+ uint32_t denominator;
+
+ bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
+ kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk * bpc * 3;
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+ */
+
+ numerator = 64 * PEAK_FACTOR_X1000;
+ denominator = 54 * 8 * 1000 * 1000;
+ kbps *= numerator;
+ peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
+
+ return peak_kbps;
+}
+
+static void update_mst_stream_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *stream_enc,
+ const struct dp_mst_stream_allocation_table *proposed_table)
+{
+ struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
+ { 0 } };
+ struct link_mst_stream_allocation *dc_alloc;
+
+ int i;
+ int j;
+
+ /* if DRM proposed_table has more than one new payload */
+ ASSERT(proposed_table->stream_count -
+ link->mst_stream_alloc_table.stream_count < 2);
+
+ /* copy proposed_table to link, add stream encoder */
+ for (i = 0; i < proposed_table->stream_count; i++) {
+
+ for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
+ dc_alloc =
+ &link->mst_stream_alloc_table.stream_allocations[j];
+
+ if (dc_alloc->vcp_id ==
+ proposed_table->stream_allocations[i].vcp_id) {
+
+ work_table[i] = *dc_alloc;
+ break; /* exit j loop */
+ }
+ }
+
+ /* new vcp_id */
+ if (j == link->mst_stream_alloc_table.stream_count) {
+ work_table[i].vcp_id =
+ proposed_table->stream_allocations[i].vcp_id;
+ work_table[i].slot_count =
+ proposed_table->stream_allocations[i].slot_count;
+ work_table[i].stream_enc = stream_enc;
+ }
+ }
+
+ /* update link->mst_stream_alloc_table with work_table */
+ link->mst_stream_alloc_table.stream_count =
+ proposed_table->stream_count;
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++)
+ link->mst_stream_alloc_table.stream_allocations[i] =
+ work_table[i];
+}
+
+/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
+ * because stream_encoder is not exposed to dm
+ */
+static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ uint8_t i;
+
+ /* enable_link_dp_mst already check link->enabled_stream_count
+ * and stream is in link->stream[]. This is called during set mode,
+ * stream_enc is available.
+ */
+
+ /* get calculate VC payload for stream: stream_alloc */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ update_mst_stream_alloc_table(
+ link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+ }
+ else
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "stream_enc[%d]: 0x%x "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* program DP source TX for payload */
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ /* send down message */
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+
+ /* slot X.Y for only current stream */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ pbn = get_pbn_from_timing(pipe_ctx);
+ avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+
+}
+
+static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
+ uint8_t i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
+
+ /* deallocate_mst_payload is called before disable link. When mode or
+ * disable/enable monitor, new stream is created which is not in link
+ * stream[] yet. For this, payload is not allocated yet, so de-alloc
+ * should not done. For new mode set, map_resources will get engine
+ * for new stream, so stream_enc->id should be validated until here.
+ */
+
+ /* slot X.Y */
+ stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ /* TODO: which component is responsible for remove payload table? */
+ if (mst_mode) {
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ false)) {
+
+ update_mst_stream_alloc_table(
+ link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+ }
+ else {
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ }
+ }
+
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "%s"
+ "stream_count: %d: ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "stream_enc[%d]: 0x%x "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ if (mst_mode) {
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+ }
+
+ return DC_OK;
+}
+
+void core_link_enable_stream(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+
+ enum dc_status status = enable_link(state, pipe_ctx);
+
+ if (status != DC_OK) {
+ dm_logger_write(pipe_ctx->stream->ctx->logger,
+ LOG_WARNING, "enabling link %u failed: %d\n",
+ pipe_ctx->stream->sink->link->link_index,
+ status);
+
+ /* Abort stream enable *unless* the failure was due to
+ * DP link training - some DP monitors will recover and
+ * show the stream anyway. But MST displays can't proceed
+ * without link training.
+ */
+ if (status != DC_FAIL_DP_LINK_TRAINING ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ }
+
+ /* turn off otg test pattern if enable */
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
+
+ core_dc->hwss.enable_stream(pipe_ctx);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ allocate_mst_payload(pipe_ctx);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ core_dc->hwss.unblank_stream(pipe_ctx,
+ &pipe_ctx->stream->sink->link->cur_link_settings);
+}
+
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ deallocate_mst_payload(pipe_ctx);
+
+ core_dc->hwss.disable_stream(pipe_ctx, option);
+
+ disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
+}
+
+void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+ return;
+
+ core_dc->hwss.set_avmute(pipe_ctx, enable);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
new file mode 100644
index 000000000000..d5294798b0a5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -0,0 +1,775 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "gpio_service_interface.h"
+#include "include/ddc_service_types.h"
+#include "include/grph_object_id.h"
+#include "include/dpcd_defs.h"
+#include "include/logger_interface.h"
+#include "include/vector.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+
+#define AUX_POWER_UP_WA_DELAY 500
+#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+
+/* CV smart dongle slave address for retrieving supported HDTV modes*/
+#define CV_SMART_DONGLE_ADDRESS 0x20
+/* DVI-HDMI dongle slave address for retrieving dongle signature*/
+#define DVI_HDMI_DONGLE_ADDRESS 0x68
+static const int8_t dvi_hdmi_dongle_signature_str[] = "6140063500G";
+struct dvi_hdmi_dongle_signature_data {
+ int8_t vendor[3];/* "AMD" */
+ uint8_t version[2];
+ uint8_t size;
+ int8_t id[11];/* "6140063500G"*/
+};
+/* DP-HDMI dongle slave address for retrieving dongle signature*/
+#define DP_HDMI_DONGLE_ADDRESS 0x40
+static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
+#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
+
+struct dp_hdmi_dongle_signature_data {
+ int8_t id[15];/* "DP-HDMI ADAPTOR"*/
+ uint8_t eot;/* end of transmition '\x4' */
+};
+
+/* SCDC Address defines (HDMI 2.0)*/
+#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
+#define HDMI_SCDC_ADDRESS 0x54
+#define HDMI_SCDC_SINK_VERSION 0x01
+#define HDMI_SCDC_SOURCE_VERSION 0x02
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS 0x40
+#define HDMI_SCDC_ERR_DETECT 0x50
+#define HDMI_SCDC_TEST_CONFIG 0xC0
+
+union hdmi_scdc_update_read_data {
+ uint8_t byte[2];
+ struct {
+ uint8_t STATUS_UPDATE:1;
+ uint8_t CED_UPDATE:1;
+ uint8_t RR_TEST:1;
+ uint8_t RESERVED:5;
+ uint8_t RESERVED2:8;
+ } fields;
+};
+
+union hdmi_scdc_status_flags_data {
+ uint8_t byte[2];
+ struct {
+ uint8_t CLOCK_DETECTED:1;
+ uint8_t CH0_LOCKED:1;
+ uint8_t CH1_LOCKED:1;
+ uint8_t CH2_LOCKED:1;
+ uint8_t RESERVED:4;
+ uint8_t RESERVED2:8;
+ } fields;
+};
+
+union hdmi_scdc_ced_data {
+ uint8_t byte[7];
+ struct {
+ uint8_t CH0_8LOW:8;
+ uint8_t CH0_7HIGH:7;
+ uint8_t CH0_VALID:1;
+ uint8_t CH1_8LOW:8;
+ uint8_t CH1_7HIGH:7;
+ uint8_t CH1_VALID:1;
+ uint8_t CH2_8LOW:8;
+ uint8_t CH2_7HIGH:7;
+ uint8_t CH2_VALID:1;
+ uint8_t CHECKSUM:8;
+ } fields;
+};
+
+union hdmi_scdc_test_config_Data {
+ uint8_t byte;
+ struct {
+ uint8_t TEST_READ_REQUEST_DELAY:7;
+ uint8_t TEST_READ_REQUEST: 1;
+ } fields;
+};
+
+struct i2c_payloads {
+ struct vector payloads;
+};
+
+struct aux_payloads {
+ struct vector payloads;
+};
+
+static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+ struct i2c_payloads *payloads;
+
+ payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
+
+ if (!payloads)
+ return NULL;
+
+ if (dal_vector_construct(
+ &payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
+ return payloads;
+
+ kfree(payloads);
+ return NULL;
+
+}
+
+static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+{
+ return (struct i2c_payload *)p->payloads.container;
+}
+
+static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+{
+ return p->payloads.count;
+}
+
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+{
+ if (!p || !*p)
+ return;
+ dal_vector_destruct(&(*p)->payloads);
+ kfree(*p);
+ *p = NULL;
+
+}
+
+static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+ struct aux_payloads *payloads;
+
+ payloads = kzalloc(sizeof(struct aux_payloads), GFP_KERNEL);
+
+ if (!payloads)
+ return NULL;
+
+ if (dal_vector_construct(
+ &payloads->payloads, ctx, count, sizeof(struct aux_payload)))
+ return payloads;
+
+ kfree(payloads);
+ return NULL;
+}
+
+static struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p)
+{
+ return (struct aux_payload *)p->payloads.container;
+}
+
+static uint32_t dal_ddc_aux_payloads_get_count(struct aux_payloads *p)
+{
+ return p->payloads.count;
+}
+
+static void dal_ddc_aux_payloads_destroy(struct aux_payloads **p)
+{
+ if (!p || !*p)
+ return;
+
+ dal_vector_destruct(&(*p)->payloads);
+ kfree(*p);
+ *p = NULL;
+}
+
+#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+void dal_ddc_i2c_payloads_add(
+ struct i2c_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write)
+{
+ uint32_t payload_size = EDID_SEGMENT_SIZE;
+ uint32_t pos;
+
+ for (pos = 0; pos < len; pos += payload_size) {
+ struct i2c_payload payload = {
+ .write = write,
+ .address = address,
+ .length = DDC_MIN(payload_size, len - pos),
+ .data = data + pos };
+ dal_vector_append(&payloads->payloads, &payload);
+ }
+
+}
+
+void dal_ddc_aux_payloads_add(
+ struct aux_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write)
+{
+ uint32_t payload_size = DEFAULT_AUX_MAX_DATA_SIZE;
+ uint32_t pos;
+
+ for (pos = 0; pos < len; pos += payload_size) {
+ struct aux_payload payload = {
+ .i2c_over_aux = true,
+ .write = write,
+ .address = address,
+ .length = DDC_MIN(payload_size, len - pos),
+ .data = data + pos };
+ dal_vector_append(&payloads->payloads, &payload);
+ }
+}
+
+static void construct(
+ struct ddc_service *ddc_service,
+ struct ddc_service_init_data *init_data)
+{
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(init_data->id);
+
+ struct gpio_service *gpio_service = init_data->ctx->gpio_service;
+ struct graphics_object_i2c_info i2c_info;
+ struct gpio_ddc_hw_info hw_info;
+ struct dc_bios *dcb = init_data->ctx->dc_bios;
+
+ ddc_service->link = init_data->link;
+ ddc_service->ctx = init_data->ctx;
+
+ if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
+ ddc_service->ddc_pin = NULL;
+ } else {
+ hw_info.ddc_channel = i2c_info.i2c_line;
+ hw_info.hw_supported = i2c_info.i2c_hw_assist;
+
+ ddc_service->ddc_pin = dal_gpio_create_ddc(
+ gpio_service,
+ i2c_info.gpio_info.clk_a_register_index,
+ 1 << i2c_info.gpio_info.clk_a_shift,
+ &hw_info);
+ }
+
+ ddc_service->flags.EDID_QUERY_DONE_ONCE = false;
+ ddc_service->flags.FORCE_READ_REPEATED_START = false;
+ ddc_service->flags.EDID_STRESS_READ = false;
+
+ ddc_service->flags.IS_INTERNAL_DISPLAY =
+ connector_id == CONNECTOR_ID_EDP ||
+ connector_id == CONNECTOR_ID_LVDS;
+
+ ddc_service->wa.raw = 0;
+}
+
+struct ddc_service *dal_ddc_service_create(
+ struct ddc_service_init_data *init_data)
+{
+ struct ddc_service *ddc_service;
+
+ ddc_service = kzalloc(sizeof(struct ddc_service), GFP_KERNEL);
+
+ if (!ddc_service)
+ return NULL;
+
+ construct(ddc_service, init_data);
+ return ddc_service;
+}
+
+static void destruct(struct ddc_service *ddc)
+{
+ if (ddc->ddc_pin)
+ dal_gpio_destroy_ddc(&ddc->ddc_pin);
+}
+
+void dal_ddc_service_destroy(struct ddc_service **ddc)
+{
+ if (!ddc || !*ddc) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ destruct(*ddc);
+ kfree(*ddc);
+ *ddc = NULL;
+}
+
+enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc)
+{
+ return DDC_SERVICE_TYPE_CONNECTOR;
+}
+
+void dal_ddc_service_set_transaction_type(
+ struct ddc_service *ddc,
+ enum ddc_transaction_type type)
+{
+ ddc->transaction_type = type;
+}
+
+bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
+{
+ switch (ddc->transaction_type) {
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+void ddc_service_set_dongle_type(struct ddc_service *ddc,
+ enum display_dongle_type dongle_type)
+{
+ ddc->dongle_type = dongle_type;
+}
+
+static uint32_t defer_delay_converter_wa(
+ struct ddc_service *ddc,
+ uint32_t defer_delay)
+{
+ struct dc_link *link = ddc->link;
+
+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_4 &&
+ !memcmp(link->dpcd_caps.branch_dev_name,
+ DP_DVI_CONVERTER_ID_4,
+ sizeof(link->dpcd_caps.branch_dev_name)))
+ return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY ?
+ defer_delay : I2C_OVER_AUX_DEFER_WA_DELAY;
+
+ return defer_delay;
+}
+
+#define DP_TRANSLATOR_DELAY 5
+
+uint32_t get_defer_delay(struct ddc_service *ddc)
+{
+ uint32_t defer_delay = 0;
+
+ switch (ddc->transaction_type) {
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+ if ((DISPLAY_DONGLE_DP_VGA_CONVERTER == ddc->dongle_type) ||
+ (DISPLAY_DONGLE_DP_DVI_CONVERTER == ddc->dongle_type) ||
+ (DISPLAY_DONGLE_DP_HDMI_CONVERTER ==
+ ddc->dongle_type)) {
+
+ defer_delay = DP_TRANSLATOR_DELAY;
+
+ defer_delay =
+ defer_delay_converter_wa(ddc, defer_delay);
+
+ } else /*sink has a delay different from an Active Converter*/
+ defer_delay = 0;
+ break;
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+ defer_delay = DP_TRANSLATOR_DELAY;
+ break;
+ default:
+ break;
+ }
+ return defer_delay;
+}
+
+static bool i2c_read(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *buffer,
+ uint32_t len)
+{
+ uint8_t offs_data = 0;
+ struct i2c_payload payloads[2] = {
+ {
+ .write = true,
+ .address = address,
+ .length = 1,
+ .data = &offs_data },
+ {
+ .write = false,
+ .address = address,
+ .length = len,
+ .data = buffer } };
+
+ struct i2c_command command = {
+ .payloads = payloads,
+ .number_of_payloads = 2,
+ .engine = DDC_I2C_COMMAND_ENGINE,
+ .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+ return dm_helpers_submit_i2c(
+ ddc->ctx,
+ ddc->link,
+ &command);
+}
+
+void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap)
+{
+ uint8_t i;
+ bool is_valid_hdmi_signature;
+ enum display_dongle_type *dongle = &sink_cap->dongle_type;
+ uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
+ bool is_type2_dongle = false;
+ struct dp_hdmi_dongle_signature_data *dongle_signature;
+
+ /* Assume we have no valid DP passive dongle connected */
+ *dongle = DISPLAY_DONGLE_NONE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+
+ /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
+ if (!i2c_read(
+ ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf))) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ return;
+ }
+
+ /* Check if Type 2 dongle.*/
+ if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
+ is_type2_dongle = true;
+
+ dongle_signature =
+ (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
+
+ is_valid_hdmi_signature = true;
+
+ /* Check EOT */
+ if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
+ is_valid_hdmi_signature = false;
+ }
+
+ /* Check signature */
+ for (i = 0; i < sizeof(dongle_signature->id); ++i) {
+ /* If its not the right signature,
+ * skip mismatch in subversion byte.*/
+ if (dongle_signature->id[i] !=
+ dp_hdmi_dongle_signature_str[i] && i != 3) {
+
+ if (is_type2_dongle) {
+ is_valid_hdmi_signature = false;
+ break;
+ }
+
+ }
+ }
+
+ if (is_type2_dongle) {
+ uint32_t max_tmds_clk =
+ type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
+
+ max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
+
+ if (0 == max_tmds_clk ||
+ max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
+ max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle %dMhz: ",
+ max_tmds_clk);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
+ max_tmds_clk);
+
+ }
+
+ /* Multiply by 1000 to convert to kHz. */
+ sink_cap->max_hdmi_pixel_clock =
+ max_tmds_clk * 1000;
+ }
+
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ }
+ }
+
+ return;
+}
+
+enum {
+ DP_SINK_CAP_SIZE =
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1
+};
+
+bool dal_ddc_service_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size)
+{
+ bool ret;
+ uint32_t payload_size =
+ dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
+ DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
+
+ uint32_t write_payloads =
+ (write_size + payload_size - 1) / payload_size;
+
+ uint32_t read_payloads =
+ (read_size + payload_size - 1) / payload_size;
+
+ uint32_t payloads_num = write_payloads + read_payloads;
+
+ if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
+ return false;
+
+ /*TODO: len of payload data for i2c and aux is uint8!!!!,
+ * but we want to read 256 over i2c!!!!*/
+ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+
+ struct aux_payloads *payloads =
+ dal_ddc_aux_payloads_create(ddc->ctx, payloads_num);
+
+ struct aux_command command = {
+ .payloads = dal_ddc_aux_payloads_get(payloads),
+ .number_of_payloads = 0,
+ .defer_delay = get_defer_delay(ddc),
+ .max_defer_write_retry = 0 };
+
+ dal_ddc_aux_payloads_add(
+ payloads, address, write_size, write_buf, true);
+
+ dal_ddc_aux_payloads_add(
+ payloads, address, read_size, read_buf, false);
+
+ command.number_of_payloads =
+ dal_ddc_aux_payloads_get_count(payloads);
+
+ ret = dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command);
+
+ dal_ddc_aux_payloads_destroy(&payloads);
+
+ } else {
+ struct i2c_payloads *payloads =
+ dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+
+ struct i2c_command command = {
+ .payloads = dal_ddc_i2c_payloads_get(payloads),
+ .number_of_payloads = 0,
+ .engine = DDC_I2C_COMMAND_ENGINE,
+ .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+ dal_ddc_i2c_payloads_add(
+ payloads, address, write_size, write_buf, true);
+
+ dal_ddc_i2c_payloads_add(
+ payloads, address, read_size, read_buf, false);
+
+ command.number_of_payloads =
+ dal_ddc_i2c_payloads_get_count(payloads);
+
+ ret = dm_helpers_submit_i2c(
+ ddc->ctx,
+ ddc->link,
+ &command);
+
+ dal_ddc_i2c_payloads_destroy(&payloads);
+ }
+
+ return ret;
+}
+
+enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t len)
+{
+ struct aux_payload read_payload = {
+ .i2c_over_aux = i2c,
+ .write = false,
+ .address = address,
+ .length = len,
+ .data = data,
+ };
+ struct aux_command command = {
+ .payloads = &read_payload,
+ .number_of_payloads = 1,
+ .defer_delay = 0,
+ .max_defer_write_retry = 0,
+ .mot = mot
+ };
+
+ if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+ BREAK_TO_DEBUGGER();
+ return DDC_RESULT_FAILED_INVALID_OPERATION;
+ }
+
+ if (dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command))
+ return DDC_RESULT_SUCESSFULL;
+
+ return DDC_RESULT_FAILED_OPERATION;
+}
+
+enum ddc_result dal_ddc_service_write_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t len)
+{
+ struct aux_payload write_payload = {
+ .i2c_over_aux = i2c,
+ .write = true,
+ .address = address,
+ .length = len,
+ .data = (uint8_t *)data,
+ };
+ struct aux_command command = {
+ .payloads = &write_payload,
+ .number_of_payloads = 1,
+ .defer_delay = 0,
+ .max_defer_write_retry = 0,
+ .mot = mot
+ };
+
+ if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+ BREAK_TO_DEBUGGER();
+ return DDC_RESULT_FAILED_INVALID_OPERATION;
+ }
+
+ if (dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command))
+ return DDC_RESULT_SUCESSFULL;
+
+ return DDC_RESULT_FAILED_OPERATION;
+}
+
+/*test only function*/
+void dal_ddc_service_set_ddc_pin(
+ struct ddc_service *ddc_service,
+ struct ddc *ddc)
+{
+ ddc_service->ddc_pin = ddc;
+}
+
+struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service)
+{
+ return ddc_service->ddc_pin;
+}
+
+void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
+ uint32_t pix_clk,
+ bool lte_340_scramble)
+{
+ bool over_340_mhz = pix_clk > 340000 ? 1 : 0;
+ uint8_t slave_address = HDMI_SCDC_ADDRESS;
+ uint8_t offset = HDMI_SCDC_SINK_VERSION;
+ uint8_t sink_version = 0;
+ uint8_t write_buffer[2] = {0};
+ /*Lower than 340 Scramble bit from SCDC caps*/
+
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), &sink_version, sizeof(sink_version));
+ if (sink_version == 1) {
+ /*Source Version = 1*/
+ write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;
+ write_buffer[1] = 1;
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ write_buffer, sizeof(write_buffer), NULL, 0);
+ /*Read Request from SCDC caps*/
+ }
+ write_buffer[0] = HDMI_SCDC_TMDS_CONFIG;
+
+ if (over_340_mhz) {
+ write_buffer[1] = 3;
+ } else if (lte_340_scramble) {
+ write_buffer[1] = 1;
+ } else {
+ write_buffer[1] = 0;
+ }
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer,
+ sizeof(write_buffer), NULL, 0);
+}
+
+void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
+{
+ uint8_t slave_address = HDMI_SCDC_ADDRESS;
+ uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
+ uint8_t tmds_config = 0;
+
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), &tmds_config, sizeof(tmds_config));
+ if (tmds_config & 0x1) {
+ union hdmi_scdc_status_flags_data status_data = { {0} };
+ uint8_t scramble_status = 0;
+
+ offset = HDMI_SCDC_SCRAMBLER_STATUS;
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ &offset, sizeof(offset), &scramble_status,
+ sizeof(scramble_status));
+ offset = HDMI_SCDC_STATUS_FLAGS;
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ &offset, sizeof(offset), status_data.byte,
+ sizeof(status_data.byte));
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
new file mode 100644
index 000000000000..ced42484dcfc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -0,0 +1,2587 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+#include "dm_services.h"
+#include "dc.h"
+#include "dc_link_dp.h"
+#include "dm_helpers.h"
+
+#include "inc/core_types.h"
+#include "link_hwss.h"
+#include "dc_link_ddc.h"
+#include "core_status.h"
+#include "dpcd_defs.h"
+
+#include "resource.h"
+
+/* maximum pre emphasis level allowed for each voltage swing level*/
+static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
+ PRE_EMPHASIS_LEVEL3,
+ PRE_EMPHASIS_LEVEL2,
+ PRE_EMPHASIS_LEVEL1,
+ PRE_EMPHASIS_DISABLED };
+
+enum {
+ POST_LT_ADJ_REQ_LIMIT = 6,
+ POST_LT_ADJ_REQ_TIMEOUT = 200
+};
+
+enum {
+ LINK_TRAINING_MAX_RETRY_COUNT = 5,
+ /* to avoid infinite loop where-in the receiver
+ * switches between different VS
+ */
+ LINK_TRAINING_MAX_CR_RETRY = 100
+};
+
+static bool decide_fallback_link_setting(
+ struct dc_link_settings initial_link_settings,
+ struct dc_link_settings *current_link_setting,
+ enum link_training_result training_result);
+static struct dc_link_settings get_common_supported_link_settings (
+ struct dc_link_settings link_setting_a,
+ struct dc_link_settings link_setting_b);
+
+static void wait_for_training_aux_rd_interval(
+ struct dc_link *link,
+ uint32_t default_wait_in_micro_secs)
+{
+ union training_aux_rd_interval training_rd_interval;
+
+ /* overwrite the delay if rev > 1.1*/
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ /* DP 1.2 or later - retrieve delay through
+ * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ default_wait_in_micro_secs =
+ training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
+
+ udelay(default_wait_in_micro_secs);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n wait = %d\n",
+ __func__,
+ default_wait_in_micro_secs);
+}
+
+static void dpcd_set_training_pattern(
+ struct dc_link *link,
+ union dpcd_training_pattern dpcd_pattern)
+{
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ 1);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x pattern = %x\n",
+ __func__,
+ DP_TRAINING_PATTERN_SET,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+static void dpcd_set_link_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings)
+{
+ uint8_t rate = (uint8_t)
+ (lt_settings->link_settings.link_rate);
+
+ union down_spread_ctrl downspread = {{0}};
+ union lane_count_set lane_count_set = {{0}};
+ uint8_t link_set_buffer[2];
+
+ downspread.raw = (uint8_t)
+ (lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = 1;
+
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+
+ link_set_buffer[0] = rate;
+ link_set_buffer[1] = lane_count_set.raw;
+
+ core_link_write_dpcd(link, DP_LINK_BW_SET,
+ link_set_buffer, 2);
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+
+}
+
+static enum dpcd_training_patterns
+ hw_training_pattern_to_dpcd_training_pattern(
+ struct dc_link *link,
+ enum hw_dp_training_pattern pattern)
+{
+ enum dpcd_training_patterns dpcd_tr_pattern =
+ DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+ switch (pattern) {
+ case HW_DP_TRAINING_PATTERN_1:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
+ break;
+ case HW_DP_TRAINING_PATTERN_2:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
+ break;
+ case HW_DP_TRAINING_PATTERN_3:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
+ break;
+ case HW_DP_TRAINING_PATTERN_4:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
+ break;
+ default:
+ ASSERT(0);
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+
+ return dpcd_tr_pattern;
+
+}
+
+static void dpcd_set_lt_pattern_and_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum hw_dp_training_pattern pattern)
+{
+ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+ const uint32_t dpcd_base_lt_offset =
+ DP_TRAINING_PATTERN_SET;
+ uint8_t dpcd_lt_buffer[5] = {0};
+ union dpcd_training_pattern dpcd_pattern = {{0}};
+ uint32_t lane;
+ uint32_t size_in_bytes;
+ bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+
+ /*****************************************************************
+ * DpcdAddress_TrainingPatternSet
+ *****************************************************************/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+ hw_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+ dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
+ = dpcd_pattern.raw;
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x pattern = %x\n",
+ __func__,
+ DP_TRAINING_PATTERN_SET,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+
+ /*****************************************************************
+ * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
+ *****************************************************************/
+ for (lane = 0; lane <
+ (uint32_t)(lt_settings->link_settings.lane_count); lane++) {
+
+ dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
+
+ dpcd_lane[lane].bits.MAX_SWING_REACHED =
+ (lt_settings->lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (lt_settings->lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ }
+
+ /* concatinate everything into one buffer*/
+
+ size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
+
+ // 0x00103 - 0x00102
+ memmove(
+ &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset],
+ dpcd_lane,
+ size_in_bytes);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n %x VS set = %x PE set = %x \
+ max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+ if (edp_workaround) {
+ /* for eDP write in 2 parts because the 5-byte burst is
+ * causing issues on some eDP panels (EPR#366724)
+ */
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw) );
+
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(dpcd_lane),
+ size_in_bytes);
+
+ } else
+ /* write it all in (1 + number-of-lanes)-byte burst*/
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ size_in_bytes + sizeof(dpcd_pattern.raw) );
+
+ link->cur_lane_setting = lt_settings->lane_settings[0];
+}
+
+static bool is_cr_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ bool done = true;
+ uint32_t lane;
+ /*LANEx_CR_DONE bits All 1's?*/
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+ if (!dpcd_lane_status[lane].bits.CR_DONE_0)
+ done = false;
+ }
+ return done;
+
+}
+
+static bool is_ch_eq_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status,
+ union lane_align_status_updated *lane_status_updated)
+{
+ bool done = true;
+ uint32_t lane;
+ if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
+ done = false;
+ else {
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+ if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
+ !dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
+ done = false;
+ }
+ }
+ return done;
+
+}
+
+static void update_drive_settings(
+ struct link_training_settings *dest,
+ struct link_training_settings src)
+{
+ uint32_t lane;
+ for (lane = 0; lane < src.link_settings.lane_count; lane++) {
+ dest->lane_settings[lane].VOLTAGE_SWING =
+ src.lane_settings[lane].VOLTAGE_SWING;
+ dest->lane_settings[lane].PRE_EMPHASIS =
+ src.lane_settings[lane].PRE_EMPHASIS;
+ dest->lane_settings[lane].POST_CURSOR2 =
+ src.lane_settings[lane].POST_CURSOR2;
+ }
+}
+
+static uint8_t get_nibble_at_index(const uint8_t *buf,
+ uint32_t index)
+{
+ uint8_t nibble;
+ nibble = buf[index / 2];
+
+ if (index % 2)
+ nibble >>= 4;
+ else
+ nibble &= 0x0F;
+
+ return nibble;
+}
+
+static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
+ enum dc_voltage_swing voltage)
+{
+ enum dc_pre_emphasis pre_emphasis;
+ pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
+
+ if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
+ pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
+
+ return pre_emphasis;
+
+}
+
+static void find_max_drive_settings(
+ const struct link_training_settings *link_training_setting,
+ struct link_training_settings *max_lt_setting)
+{
+ uint32_t lane;
+ struct dc_lane_settings max_requested;
+
+ max_requested.VOLTAGE_SWING =
+ link_training_setting->
+ lane_settings[0].VOLTAGE_SWING;
+ max_requested.PRE_EMPHASIS =
+ link_training_setting->
+ lane_settings[0].PRE_EMPHASIS;
+ /*max_requested.postCursor2 =
+ * link_training_setting->laneSettings[0].postCursor2;*/
+
+ /* Determine what the maximum of the requested settings are*/
+ for (lane = 1; lane < link_training_setting->link_settings.lane_count;
+ lane++) {
+ if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
+ max_requested.VOLTAGE_SWING)
+
+ max_requested.VOLTAGE_SWING =
+ link_training_setting->
+ lane_settings[lane].VOLTAGE_SWING;
+
+ if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
+ max_requested.PRE_EMPHASIS)
+ max_requested.PRE_EMPHASIS =
+ link_training_setting->
+ lane_settings[lane].PRE_EMPHASIS;
+
+ /*
+ if (link_training_setting->laneSettings[lane].postCursor2 >
+ max_requested.postCursor2)
+ {
+ max_requested.postCursor2 =
+ link_training_setting->laneSettings[lane].postCursor2;
+ }
+ */
+ }
+
+ /* make sure the requested settings are
+ * not higher than maximum settings*/
+ if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
+ max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
+
+ if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
+ max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
+ /*
+ if (max_requested.postCursor2 > PostCursor2_MaxLevel)
+ max_requested.postCursor2 = PostCursor2_MaxLevel;
+ */
+
+ /* make sure the pre-emphasis matches the voltage swing*/
+ if (max_requested.PRE_EMPHASIS >
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING))
+ max_requested.PRE_EMPHASIS =
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING);
+
+ /*
+ * Post Cursor2 levels are completely independent from
+ * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
+ * can only be applied to each allowable combination of voltage
+ * swing and pre-emphasis levels */
+ /* if ( max_requested.postCursor2 >
+ * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
+ * max_requested.postCursor2 =
+ * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
+ */
+
+ max_lt_setting->link_settings.link_rate =
+ link_training_setting->link_settings.link_rate;
+ max_lt_setting->link_settings.lane_count =
+ link_training_setting->link_settings.lane_count;
+ max_lt_setting->link_settings.link_spread =
+ link_training_setting->link_settings.link_spread;
+
+ for (lane = 0; lane <
+ link_training_setting->link_settings.lane_count;
+ lane++) {
+ max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
+ max_requested.VOLTAGE_SWING;
+ max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
+ max_requested.PRE_EMPHASIS;
+ /*max_lt_setting->laneSettings[lane].postCursor2 =
+ * max_requested.postCursor2;
+ */
+ }
+
+}
+
+static void get_lane_status_and_drive_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ union lane_status *ln_status,
+ union lane_align_status_updated *ln_status_updated,
+ struct link_training_settings *req_settings)
+{
+ uint8_t dpcd_buf[6] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
+ struct link_training_settings request_settings = {{0}};
+ uint32_t lane;
+
+ memset(req_settings, '\0', sizeof(struct link_training_settings));
+
+ core_link_read_dpcd(
+ link,
+ DP_LANE0_1_STATUS,
+ (uint8_t *)(dpcd_buf),
+ sizeof(dpcd_buf));
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->link_settings.lane_count);
+ lane++) {
+
+ ln_status[lane].raw =
+ get_nibble_at_index(&dpcd_buf[0], lane);
+ dpcd_lane_adjust[lane].raw =
+ get_nibble_at_index(&dpcd_buf[4], lane);
+ }
+
+ ln_status_updated->raw = dpcd_buf[2];
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
+ __func__,
+ DP_LANE0_1_STATUS, dpcd_buf[0],
+ DP_LANE2_3_STATUS, dpcd_buf[1]);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
+ __func__,
+ DP_ADJUST_REQUEST_LANE0_1,
+ dpcd_buf[4],
+ DP_ADJUST_REQUEST_LANE2_3,
+ dpcd_buf[5]);
+
+ /*copy to req_settings*/
+ request_settings.link_settings.lane_count =
+ link_training_setting->link_settings.lane_count;
+ request_settings.link_settings.link_rate =
+ link_training_setting->link_settings.link_rate;
+ request_settings.link_settings.link_spread =
+ link_training_setting->link_settings.link_spread;
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->link_settings.lane_count);
+ lane++) {
+
+ request_settings.lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
+ VOLTAGE_SWING_LANE);
+ request_settings.lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
+ PRE_EMPHASIS_LANE);
+ }
+
+ /*Note: for postcursor2, read adjusted
+ * postcursor2 settings from*/
+ /*DpcdAddress_AdjustRequestPostCursor2 =
+ *0x020C (not implemented yet)*/
+
+ /* we find the maximum of the requested settings across all lanes*/
+ /* and set this maximum for all lanes*/
+ find_max_drive_settings(&request_settings, req_settings);
+
+ /* if post cursor 2 is needed in the future,
+ * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
+ */
+
+}
+
+static void dpcd_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+ uint32_t lane;
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->
+ link_settings.lane_count);
+ lane++) {
+ dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(link_training_setting->
+ lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(link_training_setting->
+ lane_settings[lane].PRE_EMPHASIS);
+ dpcd_lane[lane].bits.MAX_SWING_REACHED =
+ (link_training_setting->
+ lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (link_training_setting->
+ lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ }
+
+ core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(dpcd_lane),
+ link_training_setting->link_settings.lane_count);
+
+ /*
+ if (LTSettings.link.rate == LinkRate_High2)
+ {
+ DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
+ for ( uint32_t lane = 0;
+ lane < lane_count_DPMax; lane++)
+ {
+ dpcd_lane2[lane].bits.post_cursor2_set =
+ static_cast<unsigned char>(
+ LTSettings.laneSettings[lane].postCursor2);
+ dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
+ }
+ m_pDpcdAccessSrv->WriteDpcdData(
+ DpcdAddress_Lane0Set2,
+ reinterpret_cast<unsigned char*>(dpcd_lane2),
+ LTSettings.link.lanes);
+ }
+ */
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x VS set = %x PE set = %x \
+ max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+ link->cur_lane_setting = link_training_setting->lane_settings[0];
+
+}
+
+static bool is_max_vs_reached(
+ const struct link_training_settings *lt_settings)
+{
+ uint32_t lane;
+ for (lane = 0; lane <
+ (uint32_t)(lt_settings->link_settings.lane_count);
+ lane++) {
+ if (lt_settings->lane_settings[lane].VOLTAGE_SWING
+ == VOLTAGE_SWING_MAX_LEVEL)
+ return true;
+ }
+ return false;
+
+}
+
+void dc_link_dp_set_drive_settings(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ /* program ASIC PHY settings*/
+ dp_set_hw_lane_settings(link, lt_settings);
+
+ /* Notify DP sink the PHY settings from source */
+ dpcd_set_lane_settings(link, lt_settings);
+}
+
+static bool perform_post_lt_adj_req_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum dc_lane_count lane_count =
+ lt_settings->link_settings.lane_count;
+
+ uint32_t adj_req_count;
+ uint32_t adj_req_timer;
+ bool req_drv_setting_changed;
+ uint32_t lane;
+
+ req_drv_setting_changed = false;
+ for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
+ adj_req_count++) {
+
+ req_drv_setting_changed = false;
+
+ for (adj_req_timer = 0;
+ adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
+ adj_req_timer++) {
+
+ struct link_training_settings req_settings;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated
+ dpcd_lane_status_updated;
+
+ get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings);
+
+ if (dpcd_lane_status_updated.bits.
+ POST_LT_ADJ_REQ_IN_PROGRESS == 0)
+ return true;
+
+ if (!is_cr_done(lane_count, dpcd_lane_status))
+ return false;
+
+ if (!is_ch_eq_done(
+ lane_count,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated))
+ return false;
+
+ for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
+
+ if (lt_settings->
+ lane_settings[lane].VOLTAGE_SWING !=
+ req_settings.lane_settings[lane].
+ VOLTAGE_SWING ||
+ lt_settings->lane_settings[lane].PRE_EMPHASIS !=
+ req_settings.lane_settings[lane].PRE_EMPHASIS) {
+
+ req_drv_setting_changed = true;
+ break;
+ }
+ }
+
+ if (req_drv_setting_changed) {
+ update_drive_settings(
+ lt_settings,req_settings);
+
+ dc_link_dp_set_drive_settings(link,
+ lt_settings);
+ break;
+ }
+
+ msleep(1);
+ }
+
+ if (!req_drv_setting_changed) {
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "%s: Post Link Training Adjust Request Timed out\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+ }
+ }
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "%s: Post Link Training Adjust Request limit reached\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+
+}
+
+static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
+{
+ enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2;
+ struct encoder_feature_support *features = &link->link_enc->features;
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+
+ if (features->flags.bits.IS_TPS3_CAPABLE)
+ highest_tp = HW_DP_TRAINING_PATTERN_3;
+
+ if (features->flags.bits.IS_TPS4_CAPABLE)
+ highest_tp = HW_DP_TRAINING_PATTERN_4;
+
+ if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
+ highest_tp >= HW_DP_TRAINING_PATTERN_4)
+ return HW_DP_TRAINING_PATTERN_4;
+
+ if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
+ highest_tp >= HW_DP_TRAINING_PATTERN_3)
+ return HW_DP_TRAINING_PATTERN_3;
+
+ return HW_DP_TRAINING_PATTERN_2;
+}
+
+static enum link_training_result perform_channel_equalization_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ struct link_training_settings req_settings;
+ enum hw_dp_training_pattern hw_tr_pattern;
+ uint32_t retries_ch_eq;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {{0}};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+
+ hw_tr_pattern = get_supported_tp(link);
+
+ dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, lt_settings);
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration*/
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ hw_tr_pattern);
+ else
+ dpcd_set_lane_settings(link, lt_settings);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_for_training_aux_rd_interval(link, 400);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink*/
+
+ get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings);
+
+ /* 5. check CR done*/
+ if (!is_cr_done(lane_count, dpcd_lane_status))
+ return LINK_TRAINING_EQ_FAIL_CR;
+
+ /* 6. check CHEQ done*/
+ if (is_ch_eq_done(lane_count,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated))
+ return LINK_TRAINING_SUCCESS;
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ update_drive_settings(lt_settings, req_settings);
+ }
+
+ return LINK_TRAINING_EQ_FAIL_EQ;
+
+}
+
+static bool perform_clock_recovery_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t lane;
+ struct link_training_settings req_settings;
+ enum dc_lane_count lane_count =
+ lt_settings->link_settings.lane_count;
+ enum hw_dp_training_pattern hw_tr_pattern = HW_DP_TRAINING_PATTERN_1;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+
+ retries_cr = 0;
+ retry_count = 0;
+ /* initial drive setting (VS/PE/PC2)*/
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->lane_settings[lane].VOLTAGE_SWING =
+ VOLTAGE_SWING_LEVEL0;
+ lt_settings->lane_settings[lane].PRE_EMPHASIS =
+ PRE_EMPHASIS_DISABLED;
+ lt_settings->lane_settings[lane].POST_CURSOR2 =
+ POST_CURSOR2_DISABLED;
+ }
+
+ dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+ /* najeeb - The synaptics MST hub can put the LT in
+ * infinite loop by switching the VS
+ */
+ /* between level 0 and level 1 continuously, here
+ * we try for CR lock for LinkTrainingMaxCRRetry count*/
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ /* 1. call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(
+ link,
+ lt_settings);
+
+ /* 2. update DPCD of the receiver*/
+ if (!retries_cr)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.*/
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ hw_tr_pattern);
+ else
+ dpcd_set_lane_settings(
+ link,
+ lt_settings);
+
+ /* 3. wait receiver to lock-on*/
+ wait_for_training_aux_rd_interval(
+ link,
+ 100);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings);
+
+ /* 5. check CR done*/
+ if (is_cr_done(lane_count, dpcd_lane_status))
+ return true;
+
+ /* 6. max VS reached*/
+ if (is_max_vs_reached(lt_settings))
+ return false;
+
+ /* 7. same voltage*/
+ /* Note: VS same for all lanes,
+ * so comparing first lane is sufficient*/
+ if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
+ req_settings.lane_settings[0].VOLTAGE_SWING)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ update_drive_settings(lt_settings, req_settings);
+
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ dm_logger_write(link->ctx->logger, LOG_ERROR,
+ "%s: Link Training Error, could not \
+ get CR after %d tries. \
+ Possibly voltage swing issue", __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ return false;
+}
+
+static inline bool perform_link_training_int(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ bool status)
+{
+ union lane_count_set lane_count_set = { {0} };
+ union dpcd_training_pattern dpcd_pattern = { {0} };
+
+ /* 3. set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+ dpcd_set_training_pattern(link, dpcd_pattern);
+
+ /* 4. mainlink output idle pattern*/
+ dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ /*
+ * 5. post training adjust if required
+ * If the upstream DPTX and downstream DPRX both support TPS4,
+ * TPS4 must be used instead of POST_LT_ADJ_REQ.
+ */
+ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
+ get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
+ return status;
+
+ if (status &&
+ perform_post_lt_adj_req_sequence(link, lt_settings) == false)
+ status = false;
+
+ lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
+ lane_count_set.bits.ENHANCED_FRAMING = 1;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+ core_link_write_dpcd(
+ link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
+
+ return status;
+}
+
+enum link_training_result dc_link_dp_perform_link_training(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+
+ char *link_rate = "Unknown";
+ struct link_training_settings lt_settings;
+
+ memset(&lt_settings, '\0', sizeof(lt_settings));
+
+ lt_settings.link_settings.link_rate = link_setting->link_rate;
+ lt_settings.link_settings.lane_count = link_setting->lane_count;
+
+ /*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
+
+ /* TODO hard coded to SS for now
+ * lt_settings.link_settings.link_spread =
+ * dal_display_path_is_ss_supported(
+ * path_mode->display_path) ?
+ * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+ * LINK_SPREAD_DISABLED;
+ */
+ lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+ /* 1. set link rate, lane count and spread*/
+ dpcd_set_link_settings(link, &lt_settings);
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)*/
+ if (!perform_clock_recovery_sequence(link, &lt_settings)) {
+ status = LINK_TRAINING_CR_FAIL;
+ } else {
+ status = perform_channel_equalization_sequence(link,
+ &lt_settings);
+ }
+
+ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
+ if (!perform_link_training_int(link,
+ &lt_settings,
+ status == LINK_TRAINING_SUCCESS)) {
+ /* the next link training setting in this case
+ * would be the same as CR failure case.
+ */
+ status = LINK_TRAINING_CR_FAIL;
+ }
+ }
+
+ /* 6. print status message*/
+ switch (lt_settings.link_settings.link_rate) {
+
+ case LINK_RATE_LOW:
+ link_rate = "RBR";
+ break;
+ case LINK_RATE_HIGH:
+ link_rate = "HBR";
+ break;
+ case LINK_RATE_HIGH2:
+ link_rate = "HBR2";
+ break;
+ case LINK_RATE_RBR2:
+ link_rate = "RBR2";
+ break;
+ case LINK_RATE_HIGH3:
+ link_rate = "HBR3";
+ break;
+ default:
+ break;
+ }
+
+ /* Connectivity log: link training */
+ CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d",
+ link_rate,
+ lt_settings.link_settings.lane_count,
+ (status == LINK_TRAINING_SUCCESS) ? "pass" :
+ ((status == LINK_TRAINING_CR_FAIL) ? "CR failed" :
+ "EQ failed"),
+ lt_settings.lane_settings[0].VOLTAGE_SWING,
+ lt_settings.lane_settings[0].PRE_EMPHASIS);
+
+ return status;
+}
+
+
+bool perform_link_training_with_retries(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts)
+{
+ uint8_t j;
+ uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+
+ for (j = 0; j < attempts; ++j) {
+
+ if (dc_link_dp_perform_link_training(
+ link,
+ link_setting,
+ skip_video_pattern) == LINK_TRAINING_SUCCESS)
+ return true;
+
+ msleep(delay_between_attempts);
+ delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+ }
+
+ return false;
+}
+
+static struct dc_link_settings get_max_link_cap(struct dc_link *link)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ};
+
+ /* Higher link settings based on feature supported */
+ if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+ if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ /* Lower link settings based on sink's link cap */
+ if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count =
+ link->reported_link_cap.lane_count;
+ if (link->reported_link_cap.link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate =
+ link->reported_link_cap.link_rate;
+ if (link->reported_link_cap.link_spread <
+ max_link_cap.link_spread)
+ max_link_cap.link_spread =
+ link->reported_link_cap.link_spread;
+ return max_link_cap;
+}
+
+bool dp_hbr_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting)
+{
+ struct dc_link_settings max_link_cap = {0};
+ struct dc_link_settings cur_link_setting = {0};
+ struct dc_link_settings *cur = &cur_link_setting;
+ struct dc_link_settings initial_link_settings = {0};
+ bool success;
+ bool skip_link_training;
+ bool skip_video_pattern;
+ struct clock_source *dp_cs;
+ enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
+ enum link_training_result status;
+
+ success = false;
+ skip_link_training = false;
+
+ max_link_cap = get_max_link_cap(link);
+
+ /* TODO implement override and monitor patch later */
+
+ /* try to train the link from high to low to
+ * find the physical link capability
+ */
+ /* disable PHY done possible by BIOS, will be done by driver itself */
+ dp_disable_link_phy(link, link->connector_signal);
+
+ dp_cs = link->dc->res_pool->dp_clock_source;
+
+ if (dp_cs)
+ dp_cs_id = dp_cs->id;
+ else {
+ /*
+ * dp clock source is not initialized for some reason.
+ * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
+ */
+ ASSERT(dp_cs);
+ }
+
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+ cur_link_setting = initial_link_settings;
+ do {
+ skip_video_pattern = true;
+
+ if (cur->link_rate == LINK_RATE_LOW)
+ skip_video_pattern = false;
+
+ dp_enable_link_phy(
+ link,
+ link->connector_signal,
+ dp_cs_id,
+ cur);
+
+ if (skip_link_training)
+ success = true;
+ else {
+ status = dc_link_dp_perform_link_training(
+ link,
+ cur,
+ skip_video_pattern);
+ if (status == LINK_TRAINING_SUCCESS)
+ success = true;
+ }
+
+ if (success)
+ link->verified_link_cap = *cur;
+
+ /* always disable the link before trying another
+ * setting or before returning we'll enable it later
+ * based on the actual mode we're driving
+ */
+ dp_disable_link_phy(link, link->connector_signal);
+ } while (!success && decide_fallback_link_setting(
+ initial_link_settings, cur, status));
+
+ /* Link Training failed for all Link Settings
+ * (Lane Count is still unknown)
+ */
+ if (!success) {
+ /* If all LT fails for all settings,
+ * set verified = failed safe (1 lane low)
+ */
+ link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+ link->verified_link_cap.link_rate = LINK_RATE_LOW;
+
+ link->verified_link_cap.link_spread =
+ LINK_SPREAD_DISABLED;
+ }
+
+
+ return success;
+}
+
+static struct dc_link_settings get_common_supported_link_settings (
+ struct dc_link_settings link_setting_a,
+ struct dc_link_settings link_setting_b)
+{
+ struct dc_link_settings link_settings = {0};
+
+ link_settings.lane_count =
+ (link_setting_a.lane_count <=
+ link_setting_b.lane_count) ?
+ link_setting_a.lane_count :
+ link_setting_b.lane_count;
+ link_settings.link_rate =
+ (link_setting_a.link_rate <=
+ link_setting_b.link_rate) ?
+ link_setting_a.link_rate :
+ link_setting_b.link_rate;
+ link_settings.link_spread = LINK_SPREAD_DISABLED;
+
+ /* in DP compliance test, DPR-120 may have
+ * a random value in its MAX_LINK_BW dpcd field.
+ * We map it to the maximum supported link rate that
+ * is smaller than MAX_LINK_BW in this case.
+ */
+ if (link_settings.link_rate > LINK_RATE_HIGH3) {
+ link_settings.link_rate = LINK_RATE_HIGH3;
+ } else if (link_settings.link_rate < LINK_RATE_HIGH3
+ && link_settings.link_rate > LINK_RATE_HIGH2) {
+ link_settings.link_rate = LINK_RATE_HIGH2;
+ } else if (link_settings.link_rate < LINK_RATE_HIGH2
+ && link_settings.link_rate > LINK_RATE_HIGH) {
+ link_settings.link_rate = LINK_RATE_HIGH;
+ } else if (link_settings.link_rate < LINK_RATE_HIGH
+ && link_settings.link_rate > LINK_RATE_LOW) {
+ link_settings.link_rate = LINK_RATE_LOW;
+ } else if (link_settings.link_rate < LINK_RATE_LOW) {
+ link_settings.link_rate = LINK_RATE_UNKNOWN;
+ }
+
+ return link_settings;
+}
+
+static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count)
+{
+ return lane_count <= LANE_COUNT_ONE;
+}
+
+static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate)
+{
+ return link_rate <= LINK_RATE_LOW;
+}
+
+static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_FOUR:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_ONE;
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_UNKNOWN;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_HIGH3:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_LOW;
+ case LINK_RATE_LOW:
+ return LINK_RATE_UNKNOWN;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_FOUR;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_LOW:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH3;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+/*
+ * function: set link rate and lane count fallback based
+ * on current link setting and last link training result
+ * return value:
+ * true - link setting could be set
+ * false - has reached minimum setting
+ * and no further fallback could be done
+ */
+static bool decide_fallback_link_setting(
+ struct dc_link_settings initial_link_settings,
+ struct dc_link_settings *current_link_setting,
+ enum link_training_result training_result)
+{
+ if (!current_link_setting)
+ return false;
+
+ switch (training_result) {
+ case LINK_TRAINING_CR_FAIL:
+ {
+ if (!reached_minimum_link_rate
+ (current_link_setting->link_rate)) {
+ current_link_setting->link_rate =
+ reduce_link_rate(
+ current_link_setting->link_rate);
+ } else if (!reached_minimum_lane_count
+ (current_link_setting->lane_count)) {
+ current_link_setting->link_rate =
+ initial_link_settings.link_rate;
+ current_link_setting->lane_count =
+ reduce_lane_count(
+ current_link_setting->lane_count);
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_EQ:
+ {
+ if (!reached_minimum_lane_count
+ (current_link_setting->lane_count)) {
+ current_link_setting->lane_count =
+ reduce_lane_count(
+ current_link_setting->lane_count);
+ } else if (!reached_minimum_link_rate
+ (current_link_setting->link_rate)) {
+ current_link_setting->link_rate =
+ reduce_link_rate(
+ current_link_setting->link_rate);
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_CR:
+ {
+ if (!reached_minimum_link_rate
+ (current_link_setting->link_rate)) {
+ current_link_setting->link_rate =
+ reduce_link_rate(
+ current_link_setting->link_rate);
+ } else {
+ return false;
+ }
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+
+static uint32_t bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+ switch (timing->display_color_depth) {
+
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_khz;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1)
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+
+ return kbps;
+
+}
+
+static uint32_t bandwidth_in_kbps_from_link_settings(
+ const struct dc_link_settings *link_setting)
+{
+ uint32_t link_rate_in_kbps = link_setting->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+
+ uint32_t lane_count = link_setting->lane_count;
+ uint32_t kbps = link_rate_in_kbps;
+ kbps *= lane_count;
+ kbps *= 8; /* 8 bits per byte*/
+
+ return kbps;
+
+}
+
+bool dp_validate_mode_timing(
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t req_bw;
+ uint32_t max_bw;
+
+ const struct dc_link_settings *link_setting;
+
+ /*always DP fail safe mode*/
+ if (timing->pix_clk_khz == (uint32_t)25175 &&
+ timing->h_addressable == (uint32_t)640 &&
+ timing->v_addressable == (uint32_t)480)
+ return true;
+
+ /* We always use verified link settings */
+ link_setting = &link->verified_link_cap;
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /*if (flags.DYNAMIC_VALIDATION == 1 &&
+ link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
+ link_setting = &link->verified_link_cap;
+ */
+
+ req_bw = bandwidth_in_kbps_from_timing(timing);
+ max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+
+ if (req_bw <= max_bw) {
+ /* remember the biggest mode here, during
+ * initial link training (to get
+ * verified_link_cap), LS sends event about
+ * cannot train at reported cap to upper
+ * layer and upper layer will re-enumerate modes.
+ * this is not necessary if the lower
+ * verified_link_cap is enough to drive
+ * all the modes */
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /* if (flags.DYNAMIC_VALIDATION == 1)
+ dpsst->max_req_bw_for_verified_linkcap = dal_max(
+ dpsst->max_req_bw_for_verified_linkcap, req_bw); */
+ return true;
+ } else
+ return false;
+}
+
+void decide_link_settings(struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting)
+{
+
+ struct dc_link_settings initial_link_setting = {
+ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED};
+ struct dc_link_settings current_link_setting =
+ initial_link_setting;
+ struct dc_link *link;
+ uint32_t req_bw;
+ uint32_t link_bw;
+
+ req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
+
+ link = stream->sink->link;
+
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ *link_setting = link->preferred_link_setting;
+ return;
+ }
+
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
+ if (is_mst_supported(link)) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = bandwidth_in_kbps_from_link_settings(
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return;
+ }
+
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+
+ BREAK_TO_DEBUGGER();
+ ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
+
+ *link_setting = link->verified_link_cap;
+}
+
+/*************************Short Pulse IRQ***************************/
+
+static bool hpd_rx_irq_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ uint8_t irq_reg_rx_power_state;
+ enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+ union lane_status lane_status;
+ uint32_t lane;
+ bool sink_status_changed;
+ bool return_code;
+
+ sink_status_changed = false;
+ return_code = false;
+
+ if (link->cur_link_settings.lane_count == 0)
+ return return_code;
+ /*1. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+
+ if (dpcd_result != DC_OK) {
+ irq_reg_rx_power_state = DP_SET_POWER_D0;
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: DPCD read failed to obtain power state.\n",
+ __func__);
+ }
+
+ if (irq_reg_rx_power_state == DP_SET_POWER_D0) {
+
+ /*2. Check that Link Status changed, before re-training.*/
+
+ /*parse lane status*/
+ for (lane = 0;
+ lane < link->cur_link_settings.lane_count;
+ lane++) {
+
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)*/
+ lane_status.raw = get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed*/
+ sink_status_changed = true;
+ break;
+ }
+
+ }
+
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
+ INTERLANE_ALIGN_DONE) {
+
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: Link Status changed.\n",
+ __func__);
+
+ return_code = true;
+ }
+ }
+
+ return return_code;
+}
+
+static enum dc_status read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ /* The HW reads 16 bytes from 200h on HPD,
+ * but if we get an AUX_DEFER, the HW cannot retry
+ * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+ * fail, so we now explicitly read 6 bytes which is
+ * the req from the above mentioned test cases.
+ */
+ return core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ irq_data->raw,
+ sizeof(union hpd_irq_data));
+}
+
+static bool allow_hpd_rx_irq(const struct dc_link *link)
+{
+ /*
+ * Don't handle RX IRQ unless one of following is met:
+ * 1) The link is established (cur_link_settings != unknown)
+ * 2) We kicked off MST detection
+ * 3) We know we're dealing with an active dongle
+ */
+
+ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+ (link->type == dc_connection_mst_branch) ||
+ is_dp_active_dongle(link))
+ return true;
+
+ return false;
+}
+
+static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
+{
+ union dpcd_psr_configuration psr_configuration;
+
+ if (!link->psr_enabled)
+ return false;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 368,/*DpcdAddress_PSR_Enable_Cfg*/
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+
+ if (psr_configuration.bits.ENABLE) {
+ unsigned char dpcdbuf[3] = {0};
+ union psr_error_status psr_error_status;
+ union psr_sink_psr_status psr_sink_psr_status;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 0x2006, /*DpcdAddress_PSR_Error_Status*/
+ (unsigned char *) dpcdbuf,
+ sizeof(dpcdbuf));
+
+ /*DPCD 2006h ERROR STATUS*/
+ psr_error_status.raw = dpcdbuf[0];
+ /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/
+ psr_sink_psr_status.raw = dpcdbuf[2];
+
+ if (psr_error_status.bits.LINK_CRC_ERROR ||
+ psr_error_status.bits.RFB_STORAGE_ERROR) {
+ /* Acknowledge and clear error bits */
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 8198,/*DpcdAddress_PSR_Error_Status*/
+ &psr_error_status.raw,
+ sizeof(psr_error_status.raw));
+
+ /* PSR error, disable and re-enable PSR */
+ dc_link_set_psr_enable(link, false, true);
+ dc_link_set_psr_enable(link, true, true);
+
+ return true;
+ } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
+ PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
+ /* No error is detect, PSR is active.
+ * We should return with IRQ_HPD handled without
+ * checking for loss of sync since PSR would have
+ * powered down main link.
+ */
+ return true;
+ }
+ }
+ return false;
+}
+
+static void dp_test_send_link_training(struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LANE_COUNT,
+ (unsigned char *)(&link_settings.lane_count),
+ 1);
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LINK_RATE,
+ (unsigned char *)(&link_settings.link_rate),
+ 1);
+
+ /* Set preferred link settings */
+ link->verified_link_cap.lane_count = link_settings.lane_count;
+ link->verified_link_cap.link_rate = link_settings.link_rate;
+
+ dp_retrain_link_dp_test(link, &link_settings, false);
+}
+
+/* TODO hbr2 compliance eye output is unstable
+ * (toggling on and off) with debugger break
+ * This caueses intermittent PHY automation failure
+ * Need to look into the root cause */
+static uint8_t force_tps4_for_cp2520 = 1;
+
+static void dp_test_send_phy_test_pattern(struct dc_link *link)
+{
+ union phy_test_pattern dpcd_test_pattern;
+ union lane_adjust dpcd_lane_adjustment[2];
+ unsigned char dpcd_post_cursor_2_adjustment = 0;
+ unsigned char test_80_bit_pattern[
+ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+ enum dp_test_pattern test_pattern;
+ struct dc_link_training_settings link_settings;
+ union lane_adjust dpcd_lane_adjust;
+ unsigned int lane;
+ struct link_training_settings link_training_settings;
+ int i = 0;
+
+ dpcd_test_pattern.raw = 0;
+ memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
+ memset(&link_settings, 0, sizeof(link_settings));
+
+ /* get phy test pattern and pattern parameters from DP receiver */
+ core_link_read_dpcd(
+ link,
+ DP_TEST_PHY_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_LANE0_1,
+ &dpcd_lane_adjustment[0].raw,
+ sizeof(dpcd_lane_adjustment));
+
+ /*get post cursor 2 parameters
+ * For DP 1.1a or eariler, this DPCD register's value is 0
+ * For DP 1.2 or later:
+ * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
+ * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
+ */
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_POST_CURSOR2,
+ &dpcd_post_cursor_2_adjustment,
+ sizeof(dpcd_post_cursor_2_adjustment));
+
+ /* translate request */
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case PHY_TEST_PATTERN_D10_2:
+ test_pattern = DP_TEST_PATTERN_D102;
+ break;
+ case PHY_TEST_PATTERN_SYMBOL_ERROR:
+ test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case PHY_TEST_PATTERN_PRBS7:
+ test_pattern = DP_TEST_PATTERN_PRBS7;
+ break;
+ case PHY_TEST_PATTERN_80BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_CP2520_1:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (force_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_2:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (force_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM)
+ core_link_read_dpcd(
+ link,
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ test_80_bit_pattern,
+ sizeof(test_80_bit_pattern));
+
+ /* prepare link training settings */
+ link_settings.link = link->cur_link_settings;
+
+ for (lane = 0; lane <
+ (unsigned int)(link->cur_link_settings.lane_count);
+ lane++) {
+ dpcd_lane_adjust.raw =
+ get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
+ link_settings.lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)
+ (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+ link_settings.lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)
+ (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+ link_settings.lane_settings[lane].POST_CURSOR2 =
+ (enum dc_post_cursor2)
+ ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+ }
+
+ for (i = 0; i < 4; i++)
+ link_training_settings.lane_settings[i] =
+ link_settings.lane_settings[i];
+ link_training_settings.link_settings = link_settings.link;
+ link_training_settings.allow_invalid_msa_timing_param = false;
+ /*Usage: Measure DP physical lane signal
+ * by DP SI test equipment automatically.
+ * PHY test pattern request is generated by equipment via HPD interrupt.
+ * HPD needs to be active all the time. HPD should be active
+ * all the time. Do not touch it.
+ * forward request to DS
+ */
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ &link_training_settings,
+ test_80_bit_pattern,
+ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1);
+}
+
+static void dp_test_send_link_test_pattern(struct dc_link *link)
+{
+ union link_test_pattern dpcd_test_pattern;
+ union test_misc dpcd_test_params;
+ enum dp_test_pattern test_pattern;
+
+ memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
+ memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
+
+ /* get link test pattern and pattern parameters */
+ core_link_read_dpcd(
+ link,
+ DP_TEST_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_TEST_MISC0,
+ &dpcd_test_params.raw,
+ sizeof(dpcd_test_params));
+
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case LINK_TEST_PATTERN_COLOR_RAMP:
+ test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
+ break;
+ case LINK_TEST_PATTERN_VERTICAL_BARS:
+ test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
+ break; /* black and white */
+ case LINK_TEST_PATTERN_COLOR_SQUARES:
+ test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
+ TEST_DYN_RANGE_VESA ?
+ DP_TEST_PATTERN_COLOR_SQUARES :
+ DP_TEST_PATTERN_COLOR_SQUARES_CEA);
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ NULL,
+ NULL,
+ 0);
+}
+
+static void handle_automated_test(struct dc_link *link)
+{
+ union test_request test_request;
+ union test_response test_response;
+
+ memset(&test_request, 0, sizeof(test_request));
+ memset(&test_response, 0, sizeof(test_response));
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+ if (test_request.bits.LINK_TRAINING) {
+ /* ACK first to let DP RX test box monitor LT sequence */
+ test_response.bits.ACK = 1;
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ dp_test_send_link_training(link);
+ /* no acknowledge request is needed again */
+ test_response.bits.ACK = 0;
+ }
+ if (test_request.bits.LINK_TEST_PATTRN) {
+ dp_test_send_link_test_pattern(link);
+ test_response.bits.ACK = 1;
+ }
+ if (test_request.bits.PHY_TEST_PATTERN) {
+ dp_test_send_phy_test_pattern(link);
+ test_response.bits.ACK = 1;
+ }
+ if (!test_request.raw)
+ /* no requests, revert all test signals
+ * TODO: revert all test signals
+ */
+ test_response.bits.ACK = 1;
+ /* send request acknowledgment */
+ if (test_response.bits.ACK)
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
+{
+ union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+ union device_service_irq device_service_clear = { { 0 } };
+ enum dc_status result = DDC_RESULT_UNKNOWN;
+ bool status = false;
+ /* For use cases related to down stream connection status change,
+ * PSR and device auto test, refer to function handle_sst_hpd_irq
+ * in DAL2.1*/
+
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: Got short pulse HPD on link %d\n",
+ __func__, link->link_index);
+
+
+ /* All the "handle_hpd_irq_xxx()" methods
+ * should be called only after
+ * dal_dpsst_ls_read_hpd_irq_data
+ * Order of calls is important too
+ */
+ result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+ if (out_hpd_irq_dpcd_data)
+ *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
+
+ if (result != DC_OK) {
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: DPCD read failed to obtain irq data\n",
+ __func__);
+ return false;
+ }
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ device_service_clear.bits.AUTOMATED_TEST = 1;
+ core_link_write_dpcd(
+ link,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &device_service_clear.raw,
+ sizeof(device_service_clear.raw));
+ device_service_clear.raw = 0;
+ handle_automated_test(link);
+ return false;
+ }
+
+ if (!allow_hpd_rx_irq(link)) {
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: skipping HPD handling on %d\n",
+ __func__, link->link_index);
+ return false;
+ }
+
+ if (handle_hpd_irq_psr_sink(link))
+ /* PSR-related error was detected and handled */
+ return true;
+
+ /* If PSR-related error handled, Main link may be off,
+ * so do not handle as a normal sink status change interrupt.
+ */
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY)
+ return true;
+
+ /* check if we have MST msg and return since we poll for it */
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY)
+ return false;
+
+ /* For now we only handle 'Downstream port status' case.
+ * If we got sink count changed it means
+ * Downstream port status changed,
+ * then DM should call DC to do the detection. */
+ if (hpd_rx_irq_check_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
+ /* Connectivity log: link loss */
+ CONN_DATA_LINK_LOSS(link,
+ hpd_irq_dpcd_data.raw,
+ sizeof(hpd_irq_dpcd_data),
+ "Status: ");
+
+ perform_link_training_with_retries(link,
+ &link->cur_link_settings,
+ true, LINK_TRAINING_ATTEMPTS);
+
+ status = false;
+ }
+
+ if (link->type == dc_connection_active_dongle &&
+ hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
+ != link->dpcd_sink_count)
+ status = true;
+
+ /* reasons for HPD RX:
+ * 1. Link Loss - ie Re-train the Link
+ * 2. MST sideband message
+ * 3. Automated Test - ie. Internal Commit
+ * 4. CP (copy protection) - (not interesting for DM???)
+ * 5. DRR
+ * 6. Downstream Port status changed
+ * -ie. Detect - this the only one
+ * which is interesting for DM because
+ * it must call dc_link_detect.
+ */
+ return status;
+}
+
+/*query dpcd for version and mst cap addresses*/
+bool is_mst_supported(struct dc_link *link)
+{
+ bool mst = false;
+ enum dc_status st = DC_OK;
+ union dpcd_rev rev;
+ union mstm_cap cap;
+
+ rev.raw = 0;
+ cap.raw = 0;
+
+ st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
+ sizeof(rev));
+
+ if (st == DC_OK && rev.raw >= DPCD_REV_12) {
+
+ st = core_link_read_dpcd(link, DP_MSTM_CAP,
+ &cap.raw, sizeof(cap));
+ if (st == DC_OK && cap.bits.MST_CAP == 1)
+ mst = true;
+ }
+ return mst;
+
+}
+
+bool is_dp_active_dongle(const struct dc_link *link)
+{
+ enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
+
+ return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
+ (dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
+ (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+}
+
+static void get_active_converter_info(
+ uint8_t data, struct dc_link *link)
+{
+ union dp_downstream_port_present ds_port = { .byte = data };
+
+ /* decode converter info*/
+ if (!ds_port.fields.PORT_PRESENT) {
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ ddc_service_set_dongle_type(link->ddc,
+ link->dpcd_caps.dongle_type);
+ return;
+ }
+
+ switch (ds_port.fields.PORT_TYPE) {
+ case DOWNSTREAM_VGA:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWNSTREAM_DVI_HDMI:
+ /* At this point we don't know is it DVI or HDMI,
+ * assume DVI.*/
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ default:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ break;
+ }
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
+ uint8_t det_caps[4];
+ union dwnstream_port_caps_byte0 *port_caps =
+ (union dwnstream_port_caps_byte0 *)det_caps;
+ core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
+ det_caps, sizeof(det_caps));
+
+ switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+ case DOWN_STREAM_DETAILED_VGA:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_DVI:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_HDMI:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER;
+
+ link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type;
+ if (ds_port.fields.DETAILED_CAPS) {
+
+ union dwnstream_port_caps_byte3_hdmi
+ hdmi_caps = {.raw = det_caps[3] };
+ union dwnstream_port_caps_byte1
+ hdmi_color_caps = {.raw = det_caps[2] };
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
+ det_caps[1] * 25000;
+
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
+ hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
+ hdmi_caps.bits.YCrCr422_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
+ hdmi_caps.bits.YCrCr420_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
+ hdmi_caps.bits.YCrCr422_CONVERSION;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
+ hdmi_caps.bits.YCrCr420_CONVERSION;
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
+ hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT;
+
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+
+ break;
+ }
+ }
+
+ ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
+
+ {
+ struct dp_device_vendor_id dp_id;
+
+ /* read IEEE branch device id */
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_OUI,
+ (uint8_t *)&dp_id,
+ sizeof(dp_id));
+
+ link->dpcd_caps.branch_dev_id =
+ (dp_id.ieee_oui[0] << 16) +
+ (dp_id.ieee_oui[1] << 8) +
+ dp_id.ieee_oui[2];
+
+ memmove(
+ link->dpcd_caps.branch_dev_name,
+ dp_id.ieee_device_id,
+ sizeof(dp_id.ieee_device_id));
+ }
+
+ {
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.branch_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+ }
+}
+
+static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
+ int length)
+{
+ int retry = 0;
+ union dp_downstream_port_present ds_port = { 0 };
+
+ if (!link->dpcd_caps.dpcd_rev.raw) {
+ do {
+ dp_receiver_power_ctrl(link, true);
+ core_link_read_dpcd(link, DP_DPCD_REV,
+ dpcd_data, length);
+ link->dpcd_caps.dpcd_rev.raw = dpcd_data[
+ DP_DPCD_REV -
+ DP_DPCD_REV];
+ } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
+ }
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
+ switch (link->dpcd_caps.branch_dev_id) {
+ /* Some active dongles (DP-VGA, DP-DLDVI converters) power down
+ * all internal circuits including AUX communication preventing
+ * reading DPCD table and EDID (spec violation).
+ * Encoder will skip DP RX power down on disable_output to
+ * keep receiver powered all the time.*/
+ case DP_BRANCH_DEVICE_ID_1:
+ case DP_BRANCH_DEVICE_ID_4:
+ link->wa_flags.dp_keep_receiver_powered = true;
+ break;
+
+ /* TODO: May need work around for other dongles. */
+ default:
+ link->wa_flags.dp_keep_receiver_powered = false;
+ break;
+ }
+ } else
+ link->wa_flags.dp_keep_receiver_powered = false;
+}
+
+static void retrieve_link_cap(struct dc_link *link)
+{
+ uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1];
+
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+ union dp_downstream_port_present ds_port = { 0 };
+
+ memset(dpcd_data, '\0', sizeof(dpcd_data));
+ memset(&down_strm_port_count,
+ '\0', sizeof(union down_stream_port_count));
+ memset(&edp_config_cap, '\0',
+ sizeof(union edp_configuration_cap));
+
+ core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+
+ {
+ union training_aux_rd_interval aux_rd_interval;
+
+ aux_rd_interval.raw =
+ dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
+
+ if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
+ core_link_read_dpcd(
+ link,
+ DP_DP13_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ }
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ get_active_converter_info(ds_port.byte, link);
+
+ dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = dpcd_data[
+ DP_MAX_LINK_RATE - DP_DPCD_REV];
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+ link->test_pattern_enabled = false;
+ link->compliance_test_state.raw = 0;
+
+ /* read sink count */
+ core_link_read_dpcd(link,
+ DP_SINK_COUNT,
+ &link->dpcd_caps.sink_count.raw,
+ sizeof(link->dpcd_caps.sink_count.raw));
+
+ /* Connectivity log: detection */
+ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+}
+
+void detect_dp_sink_caps(struct dc_link *link)
+{
+ retrieve_link_cap(link);
+
+ /* dc init_hw has power encoder using default
+ * signal for connector. For native DP, no
+ * need to power up encoder again. If not native
+ * DP, hw_init may need check signal or power up
+ * encoder here.
+ */
+ /* TODO save sink caps in link->sink */
+}
+
+void detect_edp_sink_caps(struct dc_link *link)
+{
+ retrieve_link_cap(link);
+ link->verified_link_cap = link->reported_link_cap;
+}
+
+void dc_link_dp_enable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->enable_hpd(encoder);
+}
+
+void dc_link_dp_disable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->disable_hpd(encoder);
+}
+
+static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
+{
+ if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
+ test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
+ test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+ return true;
+ else
+ return false;
+}
+
+static void set_crtc_test_pattern(struct dc_link *link,
+ struct pipe_ctx *pipe_ctx,
+ enum dp_test_pattern test_pattern)
+{
+ enum controller_dp_test_pattern controller_test_pattern;
+ enum dc_color_depth color_depth = pipe_ctx->
+ stream->timing.display_color_depth;
+ struct bit_depth_reduction_params params;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ break;
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
+ break;
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
+ break;
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
+ break;
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
+ break;
+ default:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ break;
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ {
+ /* disable bit depth reduction */
+ pipe_ctx->stream->bit_depth_params = params;
+ pipe_ctx->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ controller_test_pattern, color_depth);
+ }
+ break;
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ {
+ /* restore bitdepth reduction */
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &params);
+ pipe_ctx->stream->bit_depth_params = params;
+ pipe_ctx->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &pipes[0];
+ unsigned int lane;
+ unsigned int i;
+ unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
+ union dpcd_training_pattern training_pattern;
+ enum dpcd_phy_test_patterns pattern;
+
+ memset(&training_pattern, 0, sizeof(training_pattern));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream->sink->link == link) {
+ pipe_ctx = &pipes[i];
+ break;
+ }
+ }
+
+ /* Reset CRTC Test Pattern if it is currently running and request
+ * is VideoMode Reset DP Phy Test Pattern if it is currently running
+ * and request is VideoMode
+ */
+ if (link->test_pattern_enabled && test_pattern ==
+ DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set CRTC Test Pattern */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ dp_set_hw_test_pattern(link, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ /* Unblank Stream */
+ link->dc->hwss.unblank_stream(
+ pipe_ctx,
+ &link->verified_link_cap);
+ /* TODO:m_pHwss->MuteAudioEndpoint
+ * (pPathMode->pDisplayPath, false);
+ */
+
+ /* Reset Test Pattern state */
+ link->test_pattern_enabled = false;
+
+ return true;
+ }
+
+ /* Check for PHY Test Patterns */
+ if (is_dp_phy_pattern(test_pattern)) {
+ /* Set DPCD Lane Settings before running test pattern */
+ if (p_link_settings != NULL) {
+ dp_set_hw_lane_settings(link, p_link_settings);
+ dpcd_set_lane_settings(link, p_link_settings);
+ }
+
+ /* Blank stream if running test pattern */
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /*TODO:
+ * m_pHwss->
+ * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
+ */
+ /* Blank stream */
+ pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+ }
+
+ dp_set_hw_test_pattern(link, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ if (p_link_settings != NULL)
+ dpcd_set_link_settings(link,
+ p_link_settings);
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ pattern = PHY_TEST_PATTERN_NONE;
+ break;
+ case DP_TEST_PATTERN_D102:
+ pattern = PHY_TEST_PATTERN_D10_2;
+ break;
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ pattern = PHY_TEST_PATTERN_PRBS7;
+ break;
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_CP2520_1:
+ pattern = PHY_TEST_PATTERN_CP2520_1;
+ break;
+ case DP_TEST_PATTERN_CP2520_2:
+ pattern = PHY_TEST_PATTERN_CP2520_2;
+ break;
+ case DP_TEST_PATTERN_CP2520_3:
+ pattern = PHY_TEST_PATTERN_CP2520_3;
+ break;
+ default:
+ return false;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
+ /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
+ return false;
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.2 or later - DP receiver's link quality
+ * pattern is set using DPCD LINK_QUAL_LANEx_SET
+ * register (0x10B~0x10E)\
+ */
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
+ link_qual_pattern[lane] =
+ (unsigned char)(pattern);
+
+ core_link_write_dpcd(link,
+ DP_LINK_QUAL_LANE0_SET,
+ link_qual_pattern,
+ sizeof(link_qual_pattern));
+ } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
+ link->dpcd_caps.dpcd_rev.raw == 0) {
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.1a or earlier - DP receiver's link
+ * quality pattern is set using
+ * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
+ * register (0x102). We will use v_1.3 when we are
+ * setting test pattern for DP 1.1.
+ */
+ core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ }
+ } else {
+ /* CRTC Patterns */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ }
+
+ return true;
+}
+
+void dp_enable_mst_on_sink(struct dc_link *link, bool enable)
+{
+ unsigned char mstmCntl;
+
+ core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+ if (enable)
+ mstmCntl |= DP_MST_EN;
+ else
+ mstmCntl &= (~DP_MST_EN);
+
+ core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
new file mode 100644
index 000000000000..9a33b471270a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -0,0 +1,331 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+
+
+#include "dm_services.h"
+#include "dc.h"
+#include "inc/core_types.h"
+#include "include/ddc_service_types.h"
+#include "include/i2caux_interface.h"
+#include "link_hwss.h"
+#include "hw_sequencer.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dpcd_defs.h"
+
+enum dc_status core_link_read_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size)
+{
+ if (!dm_helpers_dp_read_dpcd(link->ctx,
+ link,
+ address, data, size))
+ return DC_ERROR_UNEXPECTED;
+
+ return DC_OK;
+}
+
+enum dc_status core_link_write_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size)
+{
+ if (!dm_helpers_dp_write_dpcd(link->ctx,
+ link,
+ address, data, size))
+ return DC_ERROR_UNEXPECTED;
+
+ return DC_OK;
+}
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on)
+{
+ uint8_t state;
+
+ state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+
+ core_link_write_dpcd(link, DP_SET_POWER, &state,
+ sizeof(state));
+}
+
+void dp_enable_link_phy(
+ struct dc_link *link,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct link_encoder *link_enc = link->link_enc;
+
+ struct pipe_ctx *pipes =
+ link->dc->current_state->res_ctx.pipe_ctx;
+ struct clock_source *dp_cs =
+ link->dc->res_pool->dp_clock_source;
+ unsigned int i;
+ /* If the current pixel clock source is not DTO(happens after
+ * switching from HDMI passive dongle to DP on the same connector),
+ * switch the pixel clock source to DTO.
+ */
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream != NULL &&
+ pipes[i].stream->sink != NULL &&
+ pipes[i].stream->sink->link == link) {
+ if (pipes[i].clock_source != NULL &&
+ pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ pipes[i].clock_source = dp_cs;
+ pipes[i].stream_res.pix_clk_params.requested_pix_clk =
+ pipes[i].stream->timing.pix_clk_khz;
+ pipes[i].clock_source->funcs->program_pix_clk(
+ pipes[i].clock_source,
+ &pipes[i].stream_res.pix_clk_params,
+ &pipes[i].pll_settings);
+ }
+ }
+ }
+
+ if (dc_is_dp_sst_signal(signal)) {
+ if (signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_power_control(link->link_enc, true);
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ link->dc->hwss.edp_backlight_control(link, true);
+ } else
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ } else {
+ link_enc->funcs->enable_dp_mst_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ }
+
+ dp_receiver_power_ctrl(link, true);
+}
+
+static bool edp_receiver_ready_T9(struct dc_link *link)
+{
+ unsigned int tries = 0;
+ unsigned char sinkstatus = 0;
+ unsigned char edpRev = 0;
+ enum dc_status result = DC_OK;
+ result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+ if (edpRev < DP_EDP_12)
+ return true;
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ do {
+ sinkstatus = 1;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 0)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(100); //MAx T9
+ } while (++tries < 50);
+ return result;
+}
+
+void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
+{
+ if (!link->wa_flags.dp_keep_receiver_powered)
+ dp_receiver_power_ctrl(link, false);
+
+ if (signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_backlight_control(link, false);
+ edp_receiver_ready_T9(link);
+ link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+ link->dc->hwss.edp_power_control(link->link_enc, false);
+ } else
+ link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+
+ /* Clear current link setting.*/
+ memset(&link->cur_link_settings, 0,
+ sizeof(link->cur_link_settings));
+}
+
+void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
+{
+ /* MST disable link only when no stream use the link */
+ if (link->mst_stream_alloc_table.stream_count > 0)
+ return;
+
+ dp_disable_link_phy(link, signal);
+
+ /* set the sink to SST mode after disabling the link */
+ dp_enable_mst_on_sink(link, false);
+}
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ enum hw_dp_training_pattern pattern)
+{
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+
+ switch (pattern) {
+ case HW_DP_TRAINING_PATTERN_1:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
+ break;
+ case HW_DP_TRAINING_PATTERN_2:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
+ break;
+ case HW_DP_TRAINING_PATTERN_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
+ break;
+ case HW_DP_TRAINING_PATTERN_4:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ default:
+ break;
+ }
+
+ dp_set_hw_test_pattern(link, test_pattern, NULL, 0);
+
+ return true;
+}
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_settings)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ /* call Encoder to set lane settings */
+ encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+}
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
+{
+ /* We need to explicitly check that connector
+ * is not DP. Some Travis_VGA get reported
+ * by video bios as DP.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
+
+ switch (link->dpcd_caps.branch_dev_id) {
+ case DP_BRANCH_DEVICE_ID_2:
+ if (strncmp(
+ link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_2,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ case DP_BRANCH_DEVICE_ID_3:
+ if (strncmp(link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_3,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (link->dpcd_caps.panel_mode_edp) {
+ return DP_PANEL_MODE_EDP;
+ }
+
+ return DP_PANEL_MODE_DEFAULT;
+}
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size)
+{
+ struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+ struct link_encoder *encoder = link->link_enc;
+
+ pattern_param.dp_phy_pattern = test_pattern;
+ pattern_param.custom_pattern = custom_pattern;
+ pattern_param.custom_pattern_size = custom_pattern_size;
+ pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+ encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+}
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ struct pipe_ctx *pipes =
+ &link->dc->current_state->res_ctx.pipe_ctx[0];
+ unsigned int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream != NULL &&
+ pipes[i].stream->sink != NULL &&
+ pipes[i].stream->sink->link != NULL &&
+ pipes[i].stream_res.stream_enc != NULL &&
+ pipes[i].stream->sink->link == link) {
+ udelay(100);
+
+ pipes[i].stream_res.stream_enc->funcs->dp_blank(
+ pipes[i].stream_res.stream_enc);
+
+ /* disable any test pattern that might be active */
+ dp_set_hw_test_pattern(link,
+ DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ dp_receiver_power_ctrl(link, false);
+
+ link->dc->hwss.disable_stream(&pipes[i], KEEP_ACQUIRED_RESOURCE);
+
+ link->link_enc->funcs->disable_output(
+ link->link_enc,
+ SIGNAL_TYPE_DISPLAY_PORT,
+ link);
+
+ /* Clear current link setting. */
+ memset(&link->cur_link_settings, 0,
+ sizeof(link->cur_link_settings));
+
+ link->link_enc->funcs->enable_dp_output(
+ link->link_enc,
+ link_setting,
+ pipes[i].clock_source->id);
+
+ dp_receiver_power_ctrl(link, true);
+
+ perform_link_training_with_retries(
+ link,
+ link_setting,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS);
+
+ link->cur_link_settings = *link_setting;
+
+ link->dc->hwss.enable_stream(&pipes[i]);
+
+ link->dc->hwss.unblank_stream(&pipes[i],
+ link_setting);
+
+ if (pipes[i].stream_res.audio) {
+ /* notify audio driver for
+ * audio modes of monitor */
+ pipes[i].stream_res.audio->funcs->az_enable(
+ pipes[i].stream_res.audio);
+
+ /* un-mute audio */
+ /* TODO: audio should be per stream rather than
+ * per link */
+ pipes[i].stream_res.stream_enc->funcs->
+ audio_mute_control(
+ pipes[i].stream_res.stream_enc, false);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
new file mode 100644
index 000000000000..d1cdf9f8853d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -0,0 +1,2795 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "link_encoder.h"
+#include "stream_encoder.h"
+#include "opp.h"
+#include "timing_generator.h"
+#include "transform.h"
+#include "dpp.h"
+#include "core_types.h"
+#include "set_mode_types.h"
+#include "virtual/virtual_stream_encoder.h"
+
+#include "dce80/dce80_resource.h"
+#include "dce100/dce100_resource.h"
+#include "dce110/dce110_resource.h"
+#include "dce112/dce112_resource.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/dcn10_resource.h"
+#endif
+#include "dce120/dce120_resource.h"
+
+enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
+{
+ enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ switch (asic_id.chip_family) {
+
+ case FAMILY_CI:
+ dc_version = DCE_VERSION_8_0;
+ break;
+ case FAMILY_KV:
+ if (ASIC_REV_IS_KALINDI(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_BHAVANI(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_GODAVARI(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_8_3;
+ else
+ dc_version = DCE_VERSION_8_1;
+ break;
+ case FAMILY_CZ:
+ dc_version = DCE_VERSION_11_0;
+ break;
+
+ case FAMILY_VI:
+ if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
+ dc_version = DCE_VERSION_10_0;
+ break;
+ }
+ if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
+ dc_version = DCE_VERSION_11_2;
+ }
+ break;
+ case FAMILY_AI:
+ dc_version = DCE_VERSION_12_0;
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case FAMILY_RV:
+ dc_version = DCN_VERSION_1_0;
+ break;
+#endif
+ default:
+ dc_version = DCE_VERSION_UNKNOWN;
+ break;
+ }
+ return dc_version;
+}
+
+struct resource_pool *dc_create_resource_pool(
+ struct dc *dc,
+ int num_virtual_links,
+ enum dce_version dc_version,
+ struct hw_asic_id asic_id)
+{
+ struct resource_pool *res_pool = NULL;
+
+ switch (dc_version) {
+ case DCE_VERSION_8_0:
+ res_pool = dce80_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_8_1:
+ res_pool = dce81_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_8_3:
+ res_pool = dce83_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_10_0:
+ res_pool = dce100_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_11_0:
+ res_pool = dce110_create_resource_pool(
+ num_virtual_links, dc, asic_id);
+ break;
+ case DCE_VERSION_11_2:
+ res_pool = dce112_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_12_0:
+ res_pool = dce120_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ res_pool = dcn10_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+#endif
+
+
+ default:
+ break;
+ }
+ if (res_pool != NULL) {
+ struct dc_firmware_info fw_info = { { 0 } };
+
+ if (dc->ctx->dc_bios->funcs->get_firmware_info(
+ dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
+ res_pool->ref_clock_inKhz = fw_info.pll_info.crystal_frequency;
+ } else
+ ASSERT_CRITICAL(false);
+ }
+
+ return res_pool;
+}
+
+void dc_destroy_resource_pool(struct dc *dc)
+{
+ if (dc) {
+ if (dc->res_pool)
+ dc->res_pool->funcs->destroy(&dc->res_pool);
+
+ kfree(dc->hwseq);
+ }
+}
+
+static void update_num_audio(
+ const struct resource_straps *straps,
+ unsigned int *num_audio,
+ struct audio_support *aud_support)
+{
+ aud_support->dp_audio = true;
+ aud_support->hdmi_audio_native = false;
+ aud_support->hdmi_audio_on_dongle = false;
+
+ if (straps->hdmi_disable == 0) {
+ if (straps->dc_pinstraps_audio & 0x2) {
+ aud_support->hdmi_audio_on_dongle = true;
+ aud_support->hdmi_audio_native = true;
+ }
+ }
+
+ switch (straps->audio_stream_number) {
+ case 0: /* multi streams supported */
+ break;
+ case 1: /* multi streams not supported */
+ *num_audio = 1;
+ break;
+ default:
+ DC_ERR("DC: unexpected audio fuse!\n");
+ }
+}
+
+bool resource_construct(
+ unsigned int num_virtual_links,
+ struct dc *dc,
+ struct resource_pool *pool,
+ const struct resource_create_funcs *create_funcs)
+{
+ struct dc_context *ctx = dc->ctx;
+ const struct resource_caps *caps = pool->res_cap;
+ int i;
+ unsigned int num_audio = caps->num_audio;
+ struct resource_straps straps = {0};
+
+ if (create_funcs->read_dce_straps)
+ create_funcs->read_dce_straps(dc->ctx, &straps);
+
+ pool->audio_count = 0;
+ if (create_funcs->create_audio) {
+ /* find the total number of streams available via the
+ * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+ * registers (one for each pin) starting from pin 1
+ * up to the max number of audio pins.
+ * We stop on the first pin where
+ * PORT_CONNECTIVITY == 1 (as instructed by HW team).
+ */
+ update_num_audio(&straps, &num_audio, &pool->audio_support);
+ for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+ struct audio *aud = create_funcs->create_audio(ctx, i);
+
+ if (aud == NULL) {
+ DC_ERR("DC: failed to create audio!\n");
+ return false;
+ }
+
+ if (!aud->funcs->endpoint_valid(aud)) {
+ aud->funcs->destroy(&aud);
+ break;
+ }
+
+ pool->audios[i] = aud;
+ pool->audio_count++;
+ }
+ }
+
+ pool->stream_enc_count = 0;
+ if (create_funcs->create_stream_encoder) {
+ for (i = 0; i < caps->num_stream_encoder; i++) {
+ pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx);
+ if (pool->stream_enc[i] == NULL)
+ DC_ERR("DC: failed to create stream_encoder!\n");
+ pool->stream_enc_count++;
+ }
+ }
+ dc->caps.dynamic_audio = false;
+ if (pool->audio_count < pool->stream_enc_count) {
+ dc->caps.dynamic_audio = true;
+ }
+ for (i = 0; i < num_virtual_links; i++) {
+ pool->stream_enc[pool->stream_enc_count] =
+ virtual_stream_encoder_create(
+ ctx, ctx->dc_bios);
+ if (pool->stream_enc[pool->stream_enc_count] == NULL) {
+ DC_ERR("DC: failed to create stream_encoder!\n");
+ return false;
+ }
+ pool->stream_enc_count++;
+ }
+
+ dc->hwseq = create_funcs->create_hwseq(ctx);
+
+ return true;
+}
+
+
+void resource_unreference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i;
+
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] != clock_source)
+ continue;
+
+ res_ctx->clock_source_ref_count[i]--;
+
+ break;
+ }
+
+ if (pool->dp_clock_source == clock_source)
+ res_ctx->dp_clock_source_ref_count--;
+}
+
+void resource_reference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i;
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] != clock_source)
+ continue;
+
+ res_ctx->clock_source_ref_count[i]++;
+ break;
+ }
+
+ if (pool->dp_clock_source == clock_source)
+ res_ctx->dp_clock_source_ref_count++;
+}
+
+bool resource_are_streams_timing_synchronizable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2)
+{
+ if (stream1->timing.h_total != stream2->timing.h_total)
+ return false;
+
+ if (stream1->timing.v_total != stream2->timing.v_total)
+ return false;
+
+ if (stream1->timing.h_addressable
+ != stream2->timing.h_addressable)
+ return false;
+
+ if (stream1->timing.v_addressable
+ != stream2->timing.v_addressable)
+ return false;
+
+ if (stream1->timing.pix_clk_khz
+ != stream2->timing.pix_clk_khz)
+ return false;
+
+ if (stream1->phy_pix_clk != stream2->phy_pix_clk
+ && (!dc_is_dp_signal(stream1->signal)
+ || !dc_is_dp_signal(stream2->signal)))
+ return false;
+
+ return true;
+}
+
+static bool is_sharable_clk_src(
+ const struct pipe_ctx *pipe_with_clk_src,
+ const struct pipe_ctx *pipe)
+{
+ if (pipe_with_clk_src->clock_source == NULL)
+ return false;
+
+ if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return false;
+
+ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
+ return false;
+
+ if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
+ && dc_is_dvi_signal(pipe->stream->signal))
+ return false;
+
+ if (dc_is_hdmi_signal(pipe->stream->signal)
+ && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+ return false;
+
+ if (!resource_are_streams_timing_synchronizable(
+ pipe_with_clk_src->stream, pipe->stream))
+ return false;
+
+ return true;
+}
+
+struct clock_source *resource_find_used_clk_src_for_sharing(
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx)
+{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (is_sharable_clk_src(&res_ctx->pipe_ctx[i], pipe_ctx))
+ return res_ctx->pipe_ctx[i].clock_source;
+ }
+
+ return NULL;
+}
+
+static enum pixel_format convert_pixel_format_to_dalsurface(
+ enum surface_pixel_format surface_pixel_format)
+{
+ enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+
+ switch (surface_pixel_format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ dal_pixel_format = PIXEL_FORMAT_INDEX8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ dal_pixel_format = PIXEL_FORMAT_RGB565;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ dal_pixel_format = PIXEL_FORMAT_RGB565;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ dal_pixel_format = PIXEL_FORMAT_FP16;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ dal_pixel_format = PIXEL_FORMAT_420BPP8;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ dal_pixel_format = PIXEL_FORMAT_420BPP10;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ default:
+ dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+ break;
+ }
+ return dal_pixel_format;
+}
+
+static void rect_swap_helper(struct rect *rect)
+{
+ uint32_t temp = 0;
+
+ temp = rect->height;
+ rect->height = rect->width;
+ rect->width = temp;
+
+ temp = rect->x;
+ rect->x = rect->y;
+ rect->y = temp;
+}
+
+static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect surf_src = plane_state->src_rect;
+ struct rect clip = { 0 };
+ int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ bool pri_split = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+
+ if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+ pri_split = false;
+ sec_split = false;
+ }
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+ /* The actual clip is an intersection between stream
+ * source and surface clip
+ */
+ clip.x = stream->src.x > plane_state->clip_rect.x ?
+ stream->src.x : plane_state->clip_rect.x;
+
+ clip.width = stream->src.x + stream->src.width <
+ plane_state->clip_rect.x + plane_state->clip_rect.width ?
+ stream->src.x + stream->src.width - clip.x :
+ plane_state->clip_rect.x + plane_state->clip_rect.width - clip.x ;
+
+ clip.y = stream->src.y > plane_state->clip_rect.y ?
+ stream->src.y : plane_state->clip_rect.y;
+
+ clip.height = stream->src.y + stream->src.height <
+ plane_state->clip_rect.y + plane_state->clip_rect.height ?
+ stream->src.y + stream->src.height - clip.y :
+ plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
+
+ /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
+ * num_pixels = clip.num_pix * scl_ratio
+ */
+ data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) *
+ surf_src.width / plane_state->dst_rect.width;
+ data->viewport.width = clip.width *
+ surf_src.width / plane_state->dst_rect.width;
+
+ data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) *
+ surf_src.height / plane_state->dst_rect.height;
+ data->viewport.height = clip.height *
+ surf_src.height / plane_state->dst_rect.height;
+
+ /* Round down, compensate in init */
+ data->viewport_c.x = data->viewport.x / vpc_div;
+ data->viewport_c.y = data->viewport.y / vpc_div;
+ data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
+ dal_fixed31_32_half : dal_fixed31_32_zero;
+ data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
+ dal_fixed31_32_half : dal_fixed31_32_zero;
+ /* Round up, assume original video size always even dimensions */
+ data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
+ data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+
+ /* Handle hsplit */
+ if (pri_split || sec_split) {
+ /* HMirror XOR Secondary_pipe XOR Rotation_180 */
+ bool right_view = (sec_split != plane_state->horizontal_mirror) !=
+ (plane_state->rotation == ROTATION_ANGLE_180);
+
+ if (plane_state->rotation == ROTATION_ANGLE_90
+ || plane_state->rotation == ROTATION_ANGLE_270)
+ /* Secondary_pipe XOR Rotation_270 */
+ right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
+
+ if (right_view) {
+ data->viewport.width /= 2;
+ data->viewport_c.width /= 2;
+ data->viewport.x += data->viewport.width;
+ data->viewport_c.x += data->viewport_c.width;
+ /* Ceil offset pipe */
+ data->viewport.width += data->viewport.width % 2;
+ data->viewport_c.width += data->viewport_c.width % 2;
+ } else {
+ data->viewport.width /= 2;
+ data->viewport_c.width /= 2;
+ }
+ }
+
+ if (plane_state->rotation == ROTATION_ANGLE_90 ||
+ plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+ }
+}
+
+static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect surf_src = plane_state->src_rect;
+ struct rect surf_clip = plane_state->clip_rect;
+ int recout_full_x, recout_full_y;
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+ pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
+ if (stream->src.x < surf_clip.x)
+ pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
+ - stream->src.x) * stream->dst.width
+ / stream->src.width;
+
+ pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width *
+ stream->dst.width / stream->src.width;
+ if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x >
+ stream->dst.x + stream->dst.width)
+ pipe_ctx->plane_res.scl_data.recout.width =
+ stream->dst.x + stream->dst.width
+ - pipe_ctx->plane_res.scl_data.recout.x;
+
+ pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y;
+ if (stream->src.y < surf_clip.y)
+ pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y
+ - stream->src.y) * stream->dst.height
+ / stream->src.height;
+
+ pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height *
+ stream->dst.height / stream->src.height;
+ if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y >
+ stream->dst.y + stream->dst.height)
+ pipe_ctx->plane_res.scl_data.recout.height =
+ stream->dst.y + stream->dst.height
+ - pipe_ctx->plane_res.scl_data.recout.y;
+
+ /* Handle h & vsplit */
+ if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
+ pipe_ctx->plane_state) {
+ if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+ pipe_ctx->plane_res.scl_data.recout.height /= 2;
+ pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+ /* Floor primary pipe, ceil 2ndary pipe */
+ pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2;
+ } else {
+ pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
+ pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2;
+ }
+ } else if (pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
+ if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+ pipe_ctx->plane_res.scl_data.recout.height /= 2;
+ else
+ pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ }
+
+ /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
+ * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+ * ratio)
+ */
+ recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ * stream->dst.width / stream->src.width -
+ surf_src.x * plane_state->dst_rect.width / surf_src.width
+ * stream->dst.width / stream->src.width;
+ recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ * stream->dst.height / stream->src.height -
+ surf_src.y * plane_state->dst_rect.height / surf_src.height
+ * stream->dst.height / stream->src.height;
+
+ recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
+ recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
+}
+
+static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect surf_src = plane_state->src_rect;
+ const int in_w = stream->src.width;
+ const int in_h = stream->src.height;
+ const int out_w = stream->dst.width;
+ const int out_h = stream->dst.height;
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+ pipe_ctx->plane_res.scl_data.ratios.horz = dal_fixed31_32_from_fraction(
+ surf_src.width,
+ plane_state->dst_rect.width);
+ pipe_ctx->plane_res.scl_data.ratios.vert = dal_fixed31_32_from_fraction(
+ surf_src.height,
+ plane_state->dst_rect.height);
+
+ if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
+ pipe_ctx->plane_res.scl_data.ratios.horz.value *= 2;
+ else if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+ pipe_ctx->plane_res.scl_data.ratios.vert.value *= 2;
+
+ pipe_ctx->plane_res.scl_data.ratios.vert.value = div64_s64(
+ pipe_ctx->plane_res.scl_data.ratios.vert.value * in_h, out_h);
+ pipe_ctx->plane_res.scl_data.ratios.horz.value = div64_s64(
+ pipe_ctx->plane_res.scl_data.ratios.horz.value * in_w, out_w);
+
+ pipe_ctx->plane_res.scl_data.ratios.horz_c = pipe_ctx->plane_res.scl_data.ratios.horz;
+ pipe_ctx->plane_res.scl_data.ratios.vert_c = pipe_ctx->plane_res.scl_data.ratios.vert;
+
+ if (pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP8
+ || pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP10) {
+ pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2;
+ pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2;
+ }
+}
+
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+{
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect src = pipe_ctx->plane_state->src_rect;
+ int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&src);
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+ }
+
+ /*
+ * Init calculated according to formula:
+ * init = (scaling_ratio + number_of_taps + 1) / 2
+ * init_bot = init + scaling_ratio
+ * init_c = init + truncated_vp_c_offset(from calculate viewport)
+ */
+ data->inits.h = dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
+
+ data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
+
+ data->inits.v = dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
+
+ data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
+
+
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.h, data->ratios.horz));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ int vp_clip = (src.x + src.width) / vpc_div -
+ data->viewport_c.width - data->viewport_c.x;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.h_c, data->ratios.horz_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ int vp_clip = (src.y + src.height) / vpc_div -
+ data->viewport_c.height - data->viewport_c.y;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
+ }
+
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.x) {
+ int int_part;
+
+ data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
+ data->ratios.horz, recout_skip->width));
+ int_part = dal_fixed31_32_floor(data->inits.h) - data->viewport.x;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : data->viewport.x;
+ data->viewport.x -= int_adj;
+ data->viewport.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps) {
+ data->viewport.x += int_part - data->taps.h_taps;
+ data->viewport.width -= int_part - data->taps.h_taps;
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+ data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
+ }
+
+ if (data->viewport_c.x) {
+ int int_part;
+
+ data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
+ data->ratios.horz_c, recout_skip->width));
+ int_part = dal_fixed31_32_floor(data->inits.h_c) - data->viewport_c.x;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+ data->viewport_c.x -= int_adj;
+ data->viewport_c.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps_c) {
+ data->viewport_c.x += int_part - data->taps.h_taps_c;
+ data->viewport_c.width -= int_part - data->taps.h_taps_c;
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+ data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
+ }
+
+ if (data->viewport.y) {
+ int int_part;
+
+ data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
+ data->ratios.vert, recout_skip->height));
+ int_part = dal_fixed31_32_floor(data->inits.v) - data->viewport.y;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : data->viewport.y;
+ data->viewport.y -= int_adj;
+ data->viewport.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps) {
+ data->viewport.y += int_part - data->taps.v_taps;
+ data->viewport.height -= int_part - data->taps.v_taps;
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+ data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
+ }
+
+ if (data->viewport_c.y) {
+ int int_part;
+
+ data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
+ data->ratios.vert_c, recout_skip->height));
+ int_part = dal_fixed31_32_floor(data->inits.v_c) - data->viewport_c.y;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+ data->viewport_c.y -= int_adj;
+ data->viewport_c.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps_c) {
+ data->viewport_c.y += int_part - data->taps.v_taps_c;
+ data->viewport_c.height -= int_part - data->taps.v_taps_c;
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+ data->inits.v_c = dal_fixed31_32_add_int(data->inits.v_c, int_part);
+ }
+
+ /* Interlaced inits based on final vert inits */
+ data->inits.v_bot = dal_fixed31_32_add(data->inits.v, data->ratios.vert);
+ data->inits.v_c_bot = dal_fixed31_32_add(data->inits.v_c, data->ratios.vert_c);
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+ }
+}
+
+bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct view recout_skip = { 0 };
+ bool res = false;
+
+ /* Important: scaling ratio calculation requires pixel format,
+ * lb depth calculation requires recout and taps require scaling ratios.
+ * Inits require viewport, taps, ratios and recout of split pipe
+ */
+ pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
+ pipe_ctx->plane_state->format);
+
+ calculate_scaling_ratios(pipe_ctx);
+
+ calculate_viewport(pipe_ctx);
+
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
+ return false;
+
+ calculate_recout(pipe_ctx, &recout_skip);
+
+ /**
+ * Setting line buffer pixel depth to 24bpp yields banding
+ * on certain displays, such as the Sharp 4k
+ */
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+
+ pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
+ pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
+
+ pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
+ pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+
+ /* Taps calculations */
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ if (!res) {
+ /* Try 24 bpp linebuffer */
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
+
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+ }
+
+ if (res)
+ /* May need to re-check lb size after this in some obscure scenario */
+ calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
+
+ dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
+ "%s: Viewport:\nheight:%d width:%d x:%d "
+ "y:%d\n dst_rect:\nheight:%d width:%d x:%d "
+ "y:%d\n",
+ __func__,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ plane_state->dst_rect.height,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y);
+
+ return res;
+}
+
+
+enum dc_status resource_build_scaling_params_for_context(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state != NULL &&
+ context->res_ctx.pipe_ctx[i].stream != NULL)
+ if (!resource_build_scaling_params(&context->res_ctx.pipe_ctx[i]))
+ return DC_FAIL_SCALING;
+ }
+
+ return DC_OK;
+}
+
+struct pipe_ctx *find_idle_secondary_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+ struct pipe_ctx *secondary_pipe = NULL;
+
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+
+ return secondary_pipe;
+}
+
+struct pipe_ctx *resource_get_head_pipe_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream)
+{
+ int i;
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream == stream &&
+ !res_ctx->pipe_ctx[i].top_pipe) {
+ return &res_ctx->pipe_ctx[i];
+ break;
+ }
+ }
+ return NULL;
+}
+
+static struct pipe_ctx *resource_get_tail_pipe_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *head_pipe, *tail_pipe;
+ head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+
+ if (!head_pipe)
+ return NULL;
+
+ tail_pipe = head_pipe->bottom_pipe;
+
+ while (tail_pipe) {
+ head_pipe = tail_pipe;
+ tail_pipe = tail_pipe->bottom_pipe;
+ }
+
+ return head_pipe;
+}
+
+/*
+ * A free_pipe for a stream is defined here as a pipe
+ * that has no surface attached yet
+ */
+static struct pipe_ctx *acquire_free_pipe_for_stream(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ struct pipe_ctx *head_pipe = NULL;
+
+ /* Find head pipe, which has the back end set up*/
+
+ head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+
+ if (!head_pipe)
+ ASSERT(0);
+
+ if (!head_pipe->plane_state)
+ return head_pipe;
+
+ /* Re-use pipe already acquired for this stream if available*/
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == stream &&
+ !res_ctx->pipe_ctx[i].plane_state) {
+ return &res_ctx->pipe_ctx[i];
+ }
+ }
+
+ /*
+ * At this point we have no re-useable pipe for this stream and we need
+ * to acquire an idle one to satisfy the request
+ */
+
+ if (!pool->funcs->acquire_idle_pipe_for_layer)
+ return NULL;
+
+ return pool->funcs->acquire_idle_pipe_for_layer(context, pool, stream);
+
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+static int acquire_first_split_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state) {
+ pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
+ if (pipe_ctx->bottom_pipe)
+ pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
+
+ memset(pipe_ctx, 0, sizeof(*pipe_ctx));
+ pipe_ctx->stream_res.tg = pool->timing_generators[i];
+ pipe_ctx->plane_res.hubp = pool->hubps[i];
+ pipe_ctx->plane_res.ipp = pool->ipps[i];
+ pipe_ctx->plane_res.dpp = pool->dpps[i];
+ pipe_ctx->stream_res.opp = pool->opps[i];
+ pipe_ctx->pipe_idx = i;
+
+ pipe_ctx->stream = stream;
+ return i;
+ }
+ }
+ return -1;
+}
+#endif
+
+bool dc_add_plane_to_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context)
+{
+ int i;
+ struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *head_pipe, *tail_pipe, *free_pipe;
+ struct dc_stream_status *stream_status = NULL;
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ stream_status = &context->stream_status[i];
+ break;
+ }
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to attach surface!\n");
+ return false;
+ }
+
+
+ if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+ plane_state, MAX_SURFACE_NUM);
+ return false;
+ }
+
+ head_pipe = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!head_pipe) {
+ dm_error("Head pipe not found for stream_state %p !\n", stream);
+ return false;
+ }
+
+ free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (!free_pipe) {
+ int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+ if (pipe_idx >= 0)
+ free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
+ }
+#endif
+ if (!free_pipe)
+ return false;
+
+ /* retain new surfaces */
+ dc_plane_state_retain(plane_state);
+ free_pipe->plane_state = plane_state;
+
+ if (head_pipe != free_pipe) {
+
+ tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
+ ASSERT(tail_pipe);
+
+ free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
+ free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
+ free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
+ free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
+ free_pipe->clock_source = tail_pipe->clock_source;
+ free_pipe->top_pipe = tail_pipe;
+ tail_pipe->bottom_pipe = free_pipe;
+ }
+
+ /* assign new surfaces*/
+ stream_status->plane_states[stream_status->plane_count] = plane_state;
+
+ stream_status->plane_count++;
+
+ return true;
+}
+
+bool dc_remove_plane_from_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context)
+{
+ int i;
+ struct dc_stream_status *stream_status = NULL;
+ struct resource_pool *pool = dc->res_pool;
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ stream_status = &context->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to remove plane.\n");
+ return false;
+ }
+
+ /* release pipe for plane*/
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *pipe_ctx;
+
+ if (context->res_ctx.pipe_ctx[i].plane_state == plane_state) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->top_pipe)
+ pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
+
+ /* Second condition is to avoid setting NULL to top pipe
+ * of tail pipe making it look like head pipe in subsequent
+ * deletes
+ */
+ if (pipe_ctx->bottom_pipe && pipe_ctx->top_pipe)
+ pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
+
+ /*
+ * For head pipe detach surfaces from pipe for tail
+ * pipe just zero it out
+ */
+ if (!pipe_ctx->top_pipe) {
+ pipe_ctx->plane_state = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ } else {
+ memset(pipe_ctx, 0, sizeof(*pipe_ctx));
+ }
+ }
+ }
+
+
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i] == plane_state) {
+
+ dc_plane_state_release(stream_status->plane_states[i]);
+ break;
+ }
+ }
+
+ if (i == stream_status->plane_count) {
+ dm_error("Existing plane_state not found; failed to detach it!\n");
+ return false;
+ }
+
+ stream_status->plane_count--;
+
+ /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
+ for (; i < stream_status->plane_count; i++)
+ stream_status->plane_states[i] = stream_status->plane_states[i + 1];
+
+ stream_status->plane_states[stream_status->plane_count] = NULL;
+
+ return true;
+}
+
+bool dc_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ int i, old_plane_count;
+ struct dc_stream_status *stream_status = NULL;
+ struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ stream_status = &context->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream %p not found!\n", stream);
+ return false;
+ }
+
+ old_plane_count = stream_status->plane_count;
+
+ for (i = 0; i < old_plane_count; i++)
+ del_planes[i] = stream_status->plane_states[i];
+
+ for (i = 0; i < old_plane_count; i++)
+ if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context))
+ return false;
+
+ return true;
+}
+
+static bool add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ const struct dc_validation_set set[],
+ int set_count,
+ struct dc_state *context)
+{
+ int i, j;
+
+ for (i = 0; i < set_count; i++)
+ if (set[i].stream == stream)
+ break;
+
+ if (i == set_count) {
+ dm_error("Stream %p not found in set!\n", stream);
+ return false;
+ }
+
+ for (j = 0; j < set[i].plane_count; j++)
+ if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context))
+ return false;
+
+ return true;
+}
+
+bool dc_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *context)
+{
+ struct dc_validation_set set;
+ int i;
+
+ set.stream = stream;
+ set.plane_count = plane_count;
+
+ for (i = 0; i < plane_count; i++)
+ set.plane_states[i] = plane_states[i];
+
+ return add_all_planes_for_stream(dc, stream, &set, 1, context);
+}
+
+
+
+static bool is_timing_changed(struct dc_stream_state *cur_stream,
+ struct dc_stream_state *new_stream)
+{
+ if (cur_stream == NULL)
+ return true;
+
+ /* If sink pointer changed, it means this is a hotplug, we should do
+ * full hw setting.
+ */
+ if (cur_stream->sink != new_stream->sink)
+ return true;
+
+ /* If output color space is changed, need to reprogram info frames */
+ if (cur_stream->output_color_space != new_stream->output_color_space)
+ return true;
+
+ return memcmp(
+ &cur_stream->timing,
+ &new_stream->timing,
+ sizeof(struct dc_crtc_timing)) != 0;
+}
+
+static bool are_stream_backends_same(
+ struct dc_stream_state *stream_a, struct dc_stream_state *stream_b)
+{
+ if (stream_a == stream_b)
+ return true;
+
+ if (stream_a == NULL || stream_b == NULL)
+ return false;
+
+ if (is_timing_changed(stream_a, stream_b))
+ return false;
+
+ return true;
+}
+
+bool dc_is_stream_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream)
+{
+
+ if (!are_stream_backends_same(old_stream, stream))
+ return false;
+
+ return true;
+}
+
+bool dc_is_stream_scaling_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream)
+{
+ if (old_stream == stream)
+ return true;
+
+ if (old_stream == NULL || stream == NULL)
+ return false;
+
+ if (memcmp(&old_stream->src,
+ &stream->src,
+ sizeof(struct rect)) != 0)
+ return false;
+
+ if (memcmp(&old_stream->dst,
+ &stream->dst,
+ sizeof(struct rect)) != 0)
+ return false;
+
+ return true;
+}
+
+/* Maximum TMDS single link pixel clock 165MHz */
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
+
+static void update_stream_engine_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct stream_encoder *stream_enc,
+ bool acquired)
+{
+ int i;
+
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (pool->stream_enc[i] == stream_enc)
+ res_ctx->is_stream_enc_acquired[i] = acquired;
+ }
+}
+
+/* TODO: release audio object */
+void update_audio_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct audio *audio,
+ bool acquired)
+{
+ int i;
+ for (i = 0; i < pool->audio_count; i++) {
+ if (pool->audios[i] == audio)
+ res_ctx->is_audio_acquired[i] = acquired;
+ }
+}
+
+static int acquire_first_free_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ pipe_ctx->stream_res.tg = pool->timing_generators[i];
+ pipe_ctx->plane_res.mi = pool->mis[i];
+ pipe_ctx->plane_res.hubp = pool->hubps[i];
+ pipe_ctx->plane_res.ipp = pool->ipps[i];
+ pipe_ctx->plane_res.xfm = pool->transforms[i];
+ pipe_ctx->plane_res.dpp = pool->dpps[i];
+ pipe_ctx->stream_res.opp = pool->opps[i];
+ pipe_ctx->pipe_idx = i;
+
+
+ pipe_ctx->stream = stream;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static struct stream_encoder *find_first_free_match_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+ int j = -1;
+ struct dc_link *link = stream->sink->link;
+
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (!res_ctx->is_stream_enc_acquired[i] &&
+ pool->stream_enc[i]) {
+ /* Store first available for MST second display
+ * in daisy chain use case */
+ j = i;
+ if (pool->stream_enc[i]->id ==
+ link->link_enc->preferred_engine)
+ return pool->stream_enc[i];
+ }
+ }
+
+ /*
+ * below can happen in cases when stream encoder is acquired:
+ * 1) for second MST display in chain, so preferred engine already
+ * acquired;
+ * 2) for another link, which preferred engine already acquired by any
+ * MST configuration.
+ *
+ * If signal is of DP type and preferred engine not found, return last available
+ *
+ * TODO - This is just a patch up and a generic solution is
+ * required for non DP connectors.
+ */
+
+ if (j >= 0 && dc_is_dp_signal(stream->signal))
+ return pool->stream_enc[j];
+
+ return NULL;
+}
+
+static struct audio *find_first_free_audio(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+ for (i = 0; i < pool->audio_count; i++) {
+ if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+ return pool->audios[i];
+ }
+ }
+ /*not found the matching one, first come first serve*/
+ for (i = 0; i < pool->audio_count; i++) {
+ if (res_ctx->is_audio_acquired[i] == false) {
+ return pool->audios[i];
+ }
+ }
+ return 0;
+}
+
+bool resource_is_stream_unchanged(
+ struct dc_state *old_context, struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < old_context->stream_count; i++) {
+ struct dc_stream_state *old_stream = old_context->streams[i];
+
+ if (are_stream_backends_same(old_stream, stream))
+ return true;
+ }
+
+ return false;
+}
+
+enum dc_status dc_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ enum dc_status res;
+
+ if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
+ DC_ERROR("Max streams reached, can add stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ new_ctx->streams[new_ctx->stream_count] = stream;
+ dc_stream_retain(stream);
+ new_ctx->stream_count++;
+
+ res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
+ if (res != DC_OK)
+ DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res);
+
+ return res;
+}
+
+enum dc_status dc_remove_stream_from_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_context *dc_ctx = dc->ctx;
+ struct pipe_ctx *del_pipe = NULL;
+
+ /* Release primary pipe */
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
+ !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+ del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+
+ ASSERT(del_pipe->stream_res.stream_enc);
+ update_stream_engine_usage(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->stream_res.stream_enc,
+ false);
+
+ if (del_pipe->stream_res.audio)
+ update_audio_usage(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->stream_res.audio,
+ false);
+
+ resource_unreference_clock_source(&new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->clock_source);
+
+ memset(del_pipe, 0, sizeof(*del_pipe));
+
+ break;
+ }
+ }
+
+ if (!del_pipe) {
+ DC_ERROR("Pipe not found for stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++)
+ if (new_ctx->streams[i] == stream)
+ break;
+
+ if (new_ctx->streams[i] != stream) {
+ DC_ERROR("Context doesn't have stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ dc_stream_release(new_ctx->streams[i]);
+ new_ctx->stream_count--;
+
+ /* Trim back arrays */
+ for (; i < new_ctx->stream_count; i++) {
+ new_ctx->streams[i] = new_ctx->streams[i + 1];
+ new_ctx->stream_status[i] = new_ctx->stream_status[i + 1];
+ }
+
+ new_ctx->streams[new_ctx->stream_count] = NULL;
+ memset(
+ &new_ctx->stream_status[new_ctx->stream_count],
+ 0,
+ sizeof(new_ctx->stream_status[0]));
+
+ return DC_OK;
+}
+
+static void copy_pipe_ctx(
+ const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
+{
+ struct dc_plane_state *plane_state = to_pipe_ctx->plane_state;
+ struct dc_stream_state *stream = to_pipe_ctx->stream;
+
+ *to_pipe_ctx = *from_pipe_ctx;
+ to_pipe_ctx->stream = stream;
+ if (plane_state != NULL)
+ to_pipe_ctx->plane_state = plane_state;
+}
+
+static struct dc_stream_state *find_pll_sharable_stream(
+ struct dc_stream_state *stream_needs_pll,
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream_has_pll = context->streams[i];
+
+ /* We are looking for non dp, non virtual stream */
+ if (resource_are_streams_timing_synchronizable(
+ stream_needs_pll, stream_has_pll)
+ && !dc_is_dp_signal(stream_has_pll->signal)
+ && stream_has_pll->sink->link->connector_signal
+ != SIGNAL_TYPE_VIRTUAL)
+ return stream_has_pll;
+
+ }
+
+ return NULL;
+}
+
+static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
+{
+ uint32_t pix_clk = timing->pix_clk_khz;
+ uint32_t normalized_pix_clk = pix_clk;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pix_clk /= 2;
+ if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ normalized_pix_clk = pix_clk;
+ break;
+ case COLOR_DEPTH_101010:
+ normalized_pix_clk = (pix_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_pix_clk = (pix_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_pix_clk = (pix_clk * 48) / 24;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ return normalized_pix_clk;
+}
+
+static void calculate_phy_pix_clks(struct dc_stream_state *stream)
+{
+ /* update actual pixel clock on all streams */
+ if (dc_is_hdmi_signal(stream->signal))
+ stream->phy_pix_clk = get_norm_pix_clk(
+ &stream->timing);
+ else
+ stream->phy_pix_clk =
+ stream->timing.pix_clk_khz;
+}
+
+enum dc_status resource_map_pool_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ const struct resource_pool *pool = dc->res_pool;
+ int i;
+ struct dc_context *dc_ctx = dc->ctx;
+ struct pipe_ctx *pipe_ctx = NULL;
+ int pipe_idx = -1;
+
+ /* TODO Check if this is needed */
+ /*if (!resource_is_stream_unchanged(old_context, stream)) {
+ if (stream != NULL && old_context->streams[i] != NULL) {
+ stream->bit_depth_params =
+ old_context->streams[i]->bit_depth_params;
+ stream->clamping = old_context->streams[i]->clamping;
+ continue;
+ }
+ }
+ */
+
+ /* acquire new resources */
+ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (pipe_idx < 0)
+ pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+#endif
+
+ if (pipe_idx < 0)
+ return DC_NO_CONTROLLER_RESOURCE;
+
+ pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
+ pipe_ctx->stream_res.stream_enc =
+ find_first_free_match_stream_enc_for_link(
+ &context->res_ctx, pool, stream);
+
+ if (!pipe_ctx->stream_res.stream_enc)
+ return DC_NO_STREAM_ENG_RESOURCE;
+
+ update_stream_engine_usage(
+ &context->res_ctx, pool,
+ pipe_ctx->stream_res.stream_enc,
+ true);
+
+ /* TODO: Add check if ASIC support and EDID audio */
+ if (!stream->sink->converter_disable_audio &&
+ dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
+ stream->audio_info.mode_count) {
+ pipe_ctx->stream_res.audio = find_first_free_audio(
+ &context->res_ctx, pool);
+
+ /*
+ * Audio assigned in order first come first get.
+ * There are asics which has number of audio
+ * resources less then number of pipes
+ */
+ if (pipe_ctx->stream_res.audio)
+ update_audio_usage(&context->res_ctx, pool,
+ pipe_ctx->stream_res.audio, true);
+ }
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
+ context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+ return DC_OK;
+ }
+
+ DC_ERROR("Stream %p not found in new ctx!\n", stream);
+ return DC_ERROR_UNEXPECTED;
+}
+
+/* first stream in the context is used to populate the rest */
+void validate_guaranteed_copy_streams(
+ struct dc_state *context,
+ int max_streams)
+{
+ int i;
+
+ for (i = 1; i < max_streams; i++) {
+ context->streams[i] = context->streams[0];
+
+ copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
+ &context->res_ctx.pipe_ctx[i]);
+ context->res_ctx.pipe_ctx[i].stream =
+ context->res_ctx.pipe_ctx[0].stream;
+
+ dc_stream_retain(context->streams[i]);
+ context->stream_count++;
+ }
+}
+
+void dc_resource_state_copy_construct_current(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+{
+ dc_resource_state_copy_construct(dc->current_state, dst_ctx);
+}
+
+
+void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+{
+ dst_ctx->dis_clk = dc->res_pool->display_clock;
+}
+
+enum dc_status dc_validate_global_state(
+ struct dc *dc,
+ struct dc_state *new_ctx)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ int i, j;
+
+ if (dc->res_pool->funcs->validate_global) {
+ result = dc->res_pool->funcs->validate_global(dc, new_ctx);
+ if (result != DC_OK)
+ return result;
+ }
+
+ for (i = 0; new_ctx && i < new_ctx->stream_count; i++) {
+ struct dc_stream_state *stream = new_ctx->streams[i];
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* Switch to dp clock source only if there is
+ * no non dp stream that shares the same timing
+ * with the dp stream.
+ */
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+ !find_pll_sharable_stream(stream, new_ctx)) {
+
+ resource_unreference_clock_source(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ pipe_ctx->clock_source);
+
+ pipe_ctx->clock_source = dc->res_pool->dp_clock_source;
+ resource_reference_clock_source(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ pipe_ctx->clock_source);
+ }
+ }
+ }
+
+ result = resource_build_scaling_params_for_context(dc, new_ctx);
+
+ if (result == DC_OK)
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static void patch_gamut_packet_checksum(
+ struct encoder_info_packet *gamut_packet)
+{
+ /* For gamut we recalc checksum */
+ if (gamut_packet->valid) {
+ uint8_t chk_sum = 0;
+ uint8_t *ptr;
+ uint8_t i;
+
+ /*start of the Gamut data. */
+ ptr = &gamut_packet->sb[3];
+
+ for (i = 0; i <= gamut_packet->sb[1]; i++)
+ chk_sum += ptr[i];
+
+ gamut_packet->sb[2] = (uint8_t) (0x100 - chk_sum);
+ }
+}
+
+static void set_avi_info_frame(
+ struct encoder_info_packet *info_packet,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+ struct info_frame info_frame = { {0} };
+ uint32_t pixel_encoding = 0;
+ enum scanning_type scan_type = SCANNING_TYPE_NODATA;
+ enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
+ bool itc = false;
+ uint8_t itc_value = 0;
+ uint8_t cn0_cn1 = 0;
+ unsigned int cn0_cn1_value = 0;
+ uint8_t *check_sum = NULL;
+ uint8_t byte_index = 0;
+ union hdmi_info_packet *hdmi_info = &info_frame.avi_info_packet.info_packet_hdmi;
+ union display_content_support support = {0};
+ unsigned int vic = pipe_ctx->stream->timing.vic;
+ enum dc_timing_3d_format format;
+
+ color_space = pipe_ctx->stream->output_color_space;
+ if (color_space == COLOR_SPACE_UNKNOWN)
+ color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
+ COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709;
+
+ /* Initialize header */
+ hdmi_info->bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
+ /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
+ * not be used in HDMI 2.0 (Section 10.1) */
+ hdmi_info->bits.header.version = 2;
+ hdmi_info->bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
+
+ /*
+ * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
+ * according to HDMI 2.0 spec (Section 10.1)
+ */
+
+ switch (stream->timing.pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ pixel_encoding = 1;
+ break;
+
+ case PIXEL_ENCODING_YCBCR444:
+ pixel_encoding = 2;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ pixel_encoding = 3;
+ break;
+
+ case PIXEL_ENCODING_RGB:
+ default:
+ pixel_encoding = 0;
+ }
+
+ /* Y0_Y1_Y2 : The pixel encoding */
+ /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
+ hdmi_info->bits.Y0_Y1_Y2 = pixel_encoding;
+
+ /* A0 = 1 Active Format Information valid */
+ hdmi_info->bits.A0 = ACTIVE_FORMAT_VALID;
+
+ /* B0, B1 = 3; Bar info data is valid */
+ hdmi_info->bits.B0_B1 = BAR_INFO_BOTH_VALID;
+
+ hdmi_info->bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
+
+ /* S0, S1 : Underscan / Overscan */
+ /* TODO: un-hardcode scan type */
+ scan_type = SCANNING_TYPE_UNDERSCAN;
+ hdmi_info->bits.S0_S1 = scan_type;
+
+ /* C0, C1 : Colorimetry */
+ if (color_space == COLOR_SPACE_YCBCR709 ||
+ color_space == COLOR_SPACE_YCBCR709_LIMITED)
+ hdmi_info->bits.C0_C1 = COLORIMETRY_ITU709;
+ else if (color_space == COLOR_SPACE_YCBCR601 ||
+ color_space == COLOR_SPACE_YCBCR601_LIMITED)
+ hdmi_info->bits.C0_C1 = COLORIMETRY_ITU601;
+ else {
+ hdmi_info->bits.C0_C1 = COLORIMETRY_NO_DATA;
+ }
+ if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE ||
+ color_space == COLOR_SPACE_2020_YCBCR) {
+ hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
+ hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
+ } else if (color_space == COLOR_SPACE_ADOBERGB) {
+ hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
+ hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
+ }
+
+ /* TODO: un-hardcode aspect ratio */
+ aspect = stream->timing.aspect_ratio;
+
+ switch (aspect) {
+ case ASPECT_RATIO_4_3:
+ case ASPECT_RATIO_16_9:
+ hdmi_info->bits.M0_M1 = aspect;
+ break;
+
+ case ASPECT_RATIO_NO_DATA:
+ case ASPECT_RATIO_64_27:
+ case ASPECT_RATIO_256_135:
+ default:
+ hdmi_info->bits.M0_M1 = 0;
+ }
+
+ /* Active Format Aspect ratio - same as Picture Aspect Ratio. */
+ hdmi_info->bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
+
+ /* TODO: un-hardcode cn0_cn1 and itc */
+
+ cn0_cn1 = 0;
+ cn0_cn1_value = 0;
+
+ itc = true;
+ itc_value = 1;
+
+ support = stream->sink->edid_caps.content_support;
+
+ if (itc) {
+ if (!support.bits.valid_content_type) {
+ cn0_cn1_value = 0;
+ } else {
+ if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GRAPHICS) {
+ if (support.bits.graphics_content == 1) {
+ cn0_cn1_value = 0;
+ }
+ } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_PHOTO) {
+ if (support.bits.photo_content == 1) {
+ cn0_cn1_value = 1;
+ } else {
+ cn0_cn1_value = 0;
+ itc_value = 0;
+ }
+ } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_CINEMA) {
+ if (support.bits.cinema_content == 1) {
+ cn0_cn1_value = 2;
+ } else {
+ cn0_cn1_value = 0;
+ itc_value = 0;
+ }
+ } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GAME) {
+ if (support.bits.game_content == 1) {
+ cn0_cn1_value = 3;
+ } else {
+ cn0_cn1_value = 0;
+ itc_value = 0;
+ }
+ }
+ }
+ hdmi_info->bits.CN0_CN1 = cn0_cn1_value;
+ hdmi_info->bits.ITC = itc_value;
+ }
+
+ /* TODO : We should handle YCC quantization */
+ /* but we do not have matrix calculation */
+ if (stream->sink->edid_caps.qs_bit == 1 &&
+ stream->sink->edid_caps.qy_bit == 1) {
+ if (color_space == COLOR_SPACE_SRGB ||
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+ } else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ } else {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ }
+ } else {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ }
+
+ ///VIC
+ format = stream->timing.timing_3d_format;
+ /*todo, add 3DStereo support*/
+ if (format != TIMING_3D_FORMAT_NONE) {
+ // Based on HDMI specs hdmi vic needs to be converted to cea vic when 3D is enabled
+ switch (pipe_ctx->stream->timing.hdmi_vic) {
+ case 1:
+ vic = 95;
+ break;
+ case 2:
+ vic = 94;
+ break;
+ case 3:
+ vic = 93;
+ break;
+ case 4:
+ vic = 98;
+ break;
+ default:
+ break;
+ }
+ }
+ hdmi_info->bits.VIC0_VIC7 = vic;
+
+ /* pixel repetition
+ * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
+ * repetition start from 1 */
+ hdmi_info->bits.PR0_PR3 = 0;
+
+ /* Bar Info
+ * barTop: Line Number of End of Top Bar.
+ * barBottom: Line Number of Start of Bottom Bar.
+ * barLeft: Pixel Number of End of Left Bar.
+ * barRight: Pixel Number of Start of Right Bar. */
+ hdmi_info->bits.bar_top = stream->timing.v_border_top;
+ hdmi_info->bits.bar_bottom = (stream->timing.v_total
+ - stream->timing.v_border_bottom + 1);
+ hdmi_info->bits.bar_left = stream->timing.h_border_left;
+ hdmi_info->bits.bar_right = (stream->timing.h_total
+ - stream->timing.h_border_right + 1);
+
+ /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
+ check_sum = &info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
+
+ *check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
+
+ for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
+ *check_sum += hdmi_info->packet_raw_data.sb[byte_index];
+
+ /* one byte complement */
+ *check_sum = (uint8_t) (0x100 - *check_sum);
+
+ /* Store in hw_path_mode */
+ info_packet->hb0 = hdmi_info->packet_raw_data.hb0;
+ info_packet->hb1 = hdmi_info->packet_raw_data.hb1;
+ info_packet->hb2 = hdmi_info->packet_raw_data.hb2;
+
+ for (byte_index = 0; byte_index < sizeof(info_frame.avi_info_packet.
+ info_packet_hdmi.packet_raw_data.sb); byte_index++)
+ info_packet->sb[byte_index] = info_frame.avi_info_packet.
+ info_packet_hdmi.packet_raw_data.sb[byte_index];
+
+ info_packet->valid = true;
+}
+
+static void set_vendor_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ uint32_t length = 0;
+ bool hdmi_vic_mode = false;
+ uint8_t checksum = 0;
+ uint32_t i = 0;
+ enum dc_timing_3d_format format;
+ // Can be different depending on packet content /*todo*/
+ // unsigned int length = pPathMode->dolbyVision ? 24 : 5;
+
+ info_packet->valid = false;
+
+ format = stream->timing.timing_3d_format;
+ if (stream->view_format == VIEW_3D_FORMAT_NONE)
+ format = TIMING_3D_FORMAT_NONE;
+
+ /* Can be different depending on packet content */
+ length = 5;
+
+ if (stream->timing.hdmi_vic != 0
+ && stream->timing.h_total >= 3840
+ && stream->timing.v_total >= 2160)
+ hdmi_vic_mode = true;
+
+ /* According to HDMI 1.4a CTS, VSIF should be sent
+ * for both 3D stereo and HDMI VIC modes.
+ * For all other modes, there is no VSIF sent. */
+
+ if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+ return;
+
+ /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
+ info_packet->sb[1] = 0x03;
+ info_packet->sb[2] = 0x0C;
+ info_packet->sb[3] = 0x00;
+
+ /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
+ * The value for HDMI_Video_Format are:
+ * 0x0 (0b000) - No additional HDMI video format is presented in this
+ * packet
+ * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
+ * parameter follows
+ * 0x2 (0b010) - 3D format indication present. 3D_Structure and
+ * potentially 3D_Ext_Data follows
+ * 0x3..0x7 (0b011..0b111) - reserved for future use */
+ if (format != TIMING_3D_FORMAT_NONE)
+ info_packet->sb[4] = (2 << 5);
+ else if (hdmi_vic_mode)
+ info_packet->sb[4] = (1 << 5);
+
+ /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
+ * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
+ * The value for 3D_Structure are:
+ * 0x0 - Frame Packing
+ * 0x1 - Field Alternative
+ * 0x2 - Line Alternative
+ * 0x3 - Side-by-Side (full)
+ * 0x4 - L + depth
+ * 0x5 - L + depth + graphics + graphics-depth
+ * 0x6 - Top-and-Bottom
+ * 0x7 - Reserved for future use
+ * 0x8 - Side-by-Side (Half)
+ * 0x9..0xE - Reserved for future use
+ * 0xF - Not used */
+ switch (format) {
+ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+ info_packet->sb[5] = (0x0 << 4);
+ break;
+
+ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+ case TIMING_3D_FORMAT_SBS_SW_PACKED:
+ info_packet->sb[5] = (0x8 << 4);
+ length = 6;
+ break;
+
+ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+ case TIMING_3D_FORMAT_TB_SW_PACKED:
+ info_packet->sb[5] = (0x6 << 4);
+ break;
+
+ default:
+ break;
+ }
+
+ /*PB5: If PB4 is set to 0x1 (extended resolution format)
+ * fill PB5 with the correct HDMI VIC code */
+ if (hdmi_vic_mode)
+ info_packet->sb[5] = stream->timing.hdmi_vic;
+
+ /* Header */
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
+ info_packet->hb1 = 0x01; /* Version */
+
+ /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
+ info_packet->hb2 = (uint8_t) (length);
+
+ /* Calculate checksum */
+ checksum = 0;
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= length; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
+static void set_spd_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ /* SPD info packet for FreeSync */
+
+ unsigned char checksum = 0;
+ unsigned int idx, payload_size = 0;
+
+ /* Check if Freesync is supported. Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (stream->freesync_ctx.supported == false)
+ return;
+
+ if (dc_is_hdmi_signal(stream->signal)) {
+
+ /* HEADER */
+
+ /* HB0 = Packet Type = 0x83 (Source Product
+ * Descriptor InfoFrame)
+ */
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_SPD;
+
+ /* HB1 = Version = 0x01 */
+ info_packet->hb1 = 0x01;
+
+ /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
+ info_packet->hb2 = 0x08;
+
+ payload_size = 0x08;
+
+ } else if (dc_is_dp_signal(stream->signal)) {
+
+ /* HEADER */
+
+ /* HB0 = Secondary-data Packet ID = 0 - Only non-zero
+ * when used to associate audio related info packets
+ */
+ info_packet->hb0 = 0x00;
+
+ /* HB1 = Packet Type = 0x83 (Source Product
+ * Descriptor InfoFrame)
+ */
+ info_packet->hb1 = HDMI_INFOFRAME_TYPE_SPD;
+
+ /* HB2 = [Bits 7:0 = Least significant eight bits -
+ * For INFOFRAME, the value must be 1Bh]
+ */
+ info_packet->hb2 = 0x1B;
+
+ /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1]
+ * [Bits 1:0 = Most significant two bits = 0x00]
+ */
+ info_packet->hb3 = 0x04;
+
+ payload_size = 0x1B;
+ }
+
+ /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+ info_packet->sb[1] = 0x1A;
+
+ /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+ info_packet->sb[2] = 0x00;
+
+ /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+ info_packet->sb[3] = 0x00;
+
+ /* PB4 = Reserved */
+ info_packet->sb[4] = 0x00;
+
+ /* PB5 = Reserved */
+ info_packet->sb[5] = 0x00;
+
+ /* PB6 = [Bits 7:3 = Reserved] */
+ info_packet->sb[6] = 0x00;
+
+ if (stream->freesync_ctx.supported == true)
+ /* PB6 = [Bit 0 = FreeSync Supported] */
+ info_packet->sb[6] |= 0x01;
+
+ if (stream->freesync_ctx.enabled == true)
+ /* PB6 = [Bit 1 = FreeSync Enabled] */
+ info_packet->sb[6] |= 0x02;
+
+ if (stream->freesync_ctx.active == true)
+ /* PB6 = [Bit 2 = FreeSync Active] */
+ info_packet->sb[6] |= 0x04;
+
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ info_packet->sb[7] = (unsigned char) (stream->freesync_ctx.
+ min_refresh_in_micro_hz / 1000000);
+
+ /* PB8 = FreeSync Maximum refresh rate (Hz)
+ *
+ * Note: We do not use the maximum capable refresh rate
+ * of the panel, because we should never go above the field
+ * rate of the mode timing set.
+ */
+ info_packet->sb[8] = (unsigned char) (stream->freesync_ctx.
+ nominal_refresh_in_micro_hz / 1000000);
+
+ /* PB9 - PB27 = Reserved */
+ for (idx = 9; idx <= 27; idx++)
+ info_packet->sb[idx] = 0x00;
+
+ /* Calculate checksum */
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+ checksum += info_packet->hb3;
+
+ for (idx = 1; idx <= payload_size; idx++)
+ checksum += info_packet->sb[idx];
+
+ /* PB0 = Checksum (one byte complement) */
+ info_packet->sb[0] = (unsigned char) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
+static void set_hdr_static_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_plane_state *plane_state,
+ struct dc_stream_state *stream)
+{
+ uint16_t i = 0;
+ enum signal_type signal = stream->signal;
+ struct dc_hdr_static_metadata hdr_metadata;
+ uint32_t data;
+
+ if (!plane_state)
+ return;
+
+ hdr_metadata = plane_state->hdr_static_ctx;
+
+ if (!hdr_metadata.hdr_supported)
+ return;
+
+ if (dc_is_hdmi_signal(signal)) {
+ info_packet->valid = true;
+
+ info_packet->hb0 = 0x87;
+ info_packet->hb1 = 0x01;
+ info_packet->hb2 = 0x1A;
+ i = 1;
+ } else if (dc_is_dp_signal(signal)) {
+ info_packet->valid = true;
+
+ info_packet->hb0 = 0x00;
+ info_packet->hb1 = 0x87;
+ info_packet->hb2 = 0x1D;
+ info_packet->hb3 = (0x13 << 2);
+ i = 2;
+ }
+
+ data = hdr_metadata.is_hdr;
+ info_packet->sb[i++] = data ? 0x02 : 0x00;
+ info_packet->sb[i++] = 0x00;
+
+ data = hdr_metadata.chromaticity_green_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_green_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_blue_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_blue_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_red_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_red_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_white_point_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_white_point_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.max_luminance;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.min_luminance;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.maximum_content_light_level;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.maximum_frame_average_light_level;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ if (dc_is_hdmi_signal(signal)) {
+ uint32_t checksum = 0;
+
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= info_packet->hb2; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = 0x100 - checksum;
+ } else if (dc_is_dp_signal(signal)) {
+ info_packet->sb[0] = 0x01;
+ info_packet->sb[1] = 0x1A;
+ }
+}
+
+static void set_vsc_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ unsigned int vscPacketRevision = 0;
+ unsigned int i;
+
+ if (stream->sink->link->psr_enabled) {
+ vscPacketRevision = 2;
+ }
+
+ /* VSC packet not needed based on the features
+ * supported by this DP display
+ */
+ if (vscPacketRevision == 0)
+ return;
+
+ if (vscPacketRevision == 0x2) {
+ /* Secondary-data Packet ID = 0*/
+ info_packet->hb0 = 0x00;
+ /* 07h - Packet Type Value indicating Video
+ * Stream Configuration packet
+ */
+ info_packet->hb1 = 0x07;
+ /* 02h = VSC SDP supporting 3D stereo and PSR
+ * (applies to eDP v1.3 or higher).
+ */
+ info_packet->hb2 = 0x02;
+ /* 08h = VSC packet supporting 3D stereo + PSR
+ * (HB2 = 02h).
+ */
+ info_packet->hb3 = 0x08;
+
+ for (i = 0; i < 28; i++)
+ info_packet->sb[i] = 0;
+
+ info_packet->valid = true;
+ }
+
+ /*TODO: stereo 3D support and extend pixel encoding colorimetry*/
+}
+
+void dc_resource_state_destruct(struct dc_state *context)
+{
+ int i, j;
+
+ for (i = 0; i < context->stream_count; i++) {
+ for (j = 0; j < context->stream_status[i].plane_count; j++)
+ dc_plane_state_release(
+ context->stream_status[i].plane_states[j]);
+
+ context->stream_status[i].plane_count = 0;
+ dc_stream_release(context->streams[i]);
+ context->streams[i] = NULL;
+ }
+}
+
+/*
+ * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
+ * by the src_ctx
+ */
+void dc_resource_state_copy_construct(
+ const struct dc_state *src_ctx,
+ struct dc_state *dst_ctx)
+{
+ int i, j;
+ struct kref refcount = dst_ctx->refcount;
+
+ *dst_ctx = *src_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ }
+
+ for (i = 0; i < dst_ctx->stream_count; i++) {
+ dc_stream_retain(dst_ctx->streams[i]);
+ for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ dst_ctx->stream_status[i].plane_states[j]);
+ }
+
+ /* context refcount should not be overridden */
+ dst_ctx->refcount = refcount;
+
+}
+
+struct clock_source *dc_resource_find_first_free_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+
+ for (i = 0; i < pool->clk_src_count; ++i) {
+ if (res_ctx->clock_source_ref_count[i] == 0)
+ return pool->clock_sources[i];
+ }
+
+ return NULL;
+}
+
+void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* default all packets to invalid */
+ info->avi.valid = false;
+ info->gamut.valid = false;
+ info->vendor.valid = false;
+ info->spd.valid = false;
+ info->hdrsmd.valid = false;
+ info->vsc.valid = false;
+
+ signal = pipe_ctx->stream->signal;
+
+ /* HDMi and DP have different info packets*/
+ if (dc_is_hdmi_signal(signal)) {
+ set_avi_info_frame(&info->avi, pipe_ctx);
+
+ set_vendor_info_packet(&info->vendor, pipe_ctx->stream);
+
+ set_spd_info_packet(&info->spd, pipe_ctx->stream);
+
+ set_hdr_static_info_packet(&info->hdrsmd,
+ pipe_ctx->plane_state, pipe_ctx->stream);
+
+ } else if (dc_is_dp_signal(signal)) {
+ set_vsc_info_packet(&info->vsc, pipe_ctx->stream);
+
+ set_spd_info_packet(&info->spd, pipe_ctx->stream);
+
+ set_hdr_static_info_packet(&info->hdrsmd,
+ pipe_ctx->plane_state, pipe_ctx->stream);
+ }
+
+ patch_gamut_packet_checksum(&info->gamut);
+}
+
+enum dc_status resource_map_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ /* acquire new resources */
+ const struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ &context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)
+ || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ pipe_ctx->clock_source = pool->dp_clock_source;
+ else {
+ pipe_ctx->clock_source = NULL;
+
+ if (!dc->config.disable_disp_pll_sharing)
+ pipe_ctx->clock_source = resource_find_used_clk_src_for_sharing(
+ &context->res_ctx,
+ pipe_ctx);
+
+ if (pipe_ctx->clock_source == NULL)
+ pipe_ctx->clock_source =
+ dc_resource_find_first_free_pll(
+ &context->res_ctx,
+ pool);
+ }
+
+ if (pipe_ctx->clock_source == NULL)
+ return DC_NO_CLOCK_SOURCE_RESOURCE;
+
+ resource_reference_clock_source(
+ &context->res_ctx, pool,
+ pipe_ctx->clock_source);
+
+ return DC_OK;
+}
+
+/*
+ * Note: We need to disable output if clock sources change,
+ * since bios does optimization and doesn't apply if changing
+ * PHY when not already disabled.
+ */
+bool pipe_need_reprogram(
+ struct pipe_ctx *pipe_ctx_old,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx_old->stream)
+ return false;
+
+ if (pipe_ctx_old->stream->sink != pipe_ctx->stream->sink)
+ return true;
+
+ if (pipe_ctx_old->stream->signal != pipe_ctx->stream->signal)
+ return true;
+
+ if (pipe_ctx_old->stream_res.audio != pipe_ctx->stream_res.audio)
+ return true;
+
+ if (pipe_ctx_old->clock_source != pipe_ctx->clock_source
+ && pipe_ctx_old->stream != pipe_ctx->stream)
+ return true;
+
+ if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc)
+ return true;
+
+ if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+ return true;
+
+
+ return false;
+}
+
+void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ struct bit_depth_reduction_params *fmt_bit_depth)
+{
+ enum dc_dither_option option = stream->dither_option;
+ enum dc_pixel_encoding pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
+
+ if (option == DITHER_OPTION_DEFAULT) {
+ switch (stream->timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ option = DITHER_OPTION_SPATIAL6;
+ break;
+ case COLOR_DEPTH_888:
+ option = DITHER_OPTION_SPATIAL8;
+ break;
+ case COLOR_DEPTH_101010:
+ option = DITHER_OPTION_SPATIAL10;
+ break;
+ default:
+ option = DITHER_OPTION_DISABLE;
+ }
+ }
+
+ if (option == DITHER_OPTION_DISABLE)
+ return;
+
+ if (option == DITHER_OPTION_TRUN6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
+ } else if (option == DITHER_OPTION_TRUN8 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
+ } else if (option == DITHER_OPTION_TRUN10 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ }
+
+ /* special case - Formatter can only reduce by 4 bits at most.
+ * When reducing from 12 to 6 bits,
+ * HW recommends we use trunc with round mode
+ * (if we did nothing, trunc to 10 bits would be used)
+ * note that any 12->10 bit reduction is ignored prior to DCE8,
+ * as the input was 10 bits.
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ fmt_bit_depth->flags.TRUNCATE_MODE = 1;
+ }
+
+ /* spatial dither
+ * note that spatial modes 1-3 are never used
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL10 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ }
+
+ if (option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL10) {
+ fmt_bit_depth->flags.FRAME_RANDOM = 0;
+ } else {
+ fmt_bit_depth->flags.FRAME_RANDOM = 1;
+ }
+
+ //////////////////////
+ //// temporal dither
+ //////////////////////
+ if (option == DITHER_OPTION_FM6 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_SPATIAL10_FM6 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
+ } else if (option == DITHER_OPTION_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM8) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
+ } else if (option == DITHER_OPTION_FM10) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
+ }
+
+ fmt_bit_depth->pixel_encoding = pixel_encoding;
+}
+
+enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
+{
+ struct dc *core_dc = dc;
+ struct dc_link *link = stream->sink->link;
+ struct timing_generator *tg = core_dc->res_pool->timing_generators[0];
+ enum dc_status res = DC_OK;
+
+ calculate_phy_pix_clks(stream);
+
+ if (!tg->funcs->validate_timing(tg, &stream->timing))
+ res = DC_FAIL_CONTROLLER_VALIDATE;
+
+ if (res == DC_OK)
+ if (!link->link_enc->funcs->validate_output_with_stream(
+ link->link_enc, stream))
+ res = DC_FAIL_ENC_VALIDATE;
+
+ /* TODO: validate audio ASIC caps, encoder */
+
+ if (res == DC_OK)
+ res = dc_link_validate_mode_timing(stream,
+ link,
+ &stream->timing);
+
+ return res;
+}
+
+enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state)
+{
+ enum dc_status res = DC_OK;
+
+ /* TODO For now validates pixel format only */
+ if (dc->res_pool->funcs->validate_plane)
+ return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps);
+
+ return res;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
new file mode 100644
index 000000000000..25fae38409ab
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+
+static void destruct(struct dc_sink *sink)
+{
+ if (sink->dc_container_id) {
+ kfree(sink->dc_container_id);
+ sink->dc_container_id = NULL;
+ }
+}
+
+static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
+{
+
+ struct dc_link *link = init_params->link;
+
+ if (!link)
+ return false;
+
+ sink->sink_signal = init_params->sink_signal;
+ sink->link = link;
+ sink->ctx = link->ctx;
+ sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
+ sink->converter_disable_audio = init_params->converter_disable_audio;
+ sink->dc_container_id = NULL;
+
+ return true;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+void dc_sink_retain(struct dc_sink *sink)
+{
+ kref_get(&sink->refcount);
+}
+
+static void dc_sink_free(struct kref *kref)
+{
+ struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
+ destruct(sink);
+ kfree(sink);
+}
+
+void dc_sink_release(struct dc_sink *sink)
+{
+ kref_put(&sink->refcount, dc_sink_free);
+}
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
+{
+ struct dc_sink *sink = kzalloc(sizeof(*sink), GFP_KERNEL);
+
+ if (NULL == sink)
+ goto alloc_fail;
+
+ if (false == construct(sink, init_params))
+ goto construct_fail;
+
+ kref_init(&sink->refcount);
+
+ return sink;
+
+construct_fail:
+ kfree(sink);
+
+alloc_fail:
+ return NULL;
+}
+
+/*******************************************************************************
+ * Protected functions - visible only inside of DC (not visible in DM)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
new file mode 100644
index 000000000000..b00a6040a697
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "resource.h"
+#include "ipp.h"
+#include "timing_generator.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
+static void update_stream_signal(struct dc_stream_state *stream)
+{
+ if (stream->output_signal == SIGNAL_TYPE_NONE) {
+ struct dc_sink *dc_sink = stream->sink;
+
+ if (dc_sink->sink_signal == SIGNAL_TYPE_NONE)
+ stream->signal = stream->sink->link->connector_signal;
+ else
+ stream->signal = dc_sink->sink_signal;
+ } else {
+ stream->signal = stream->output_signal;
+ }
+
+ if (dc_is_dvi_signal(stream->signal)) {
+ if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
+ stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ else
+ stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+}
+
+static void construct(struct dc_stream_state *stream,
+ struct dc_sink *dc_sink_data)
+{
+ uint32_t i = 0;
+
+ stream->sink = dc_sink_data;
+ stream->ctx = stream->sink->ctx;
+
+ dc_sink_retain(dc_sink_data);
+
+ /* Copy audio modes */
+ /* TODO - Remove this translation */
+ for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
+ {
+ stream->audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count;
+ stream->audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code;
+ stream->audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate;
+ stream->audio_info.modes[i].sample_size = dc_sink_data->edid_caps.audio_modes[i].sample_size;
+ }
+ stream->audio_info.mode_count = dc_sink_data->edid_caps.audio_mode_count;
+ stream->audio_info.audio_latency = dc_sink_data->edid_caps.audio_latency;
+ stream->audio_info.video_latency = dc_sink_data->edid_caps.video_latency;
+ memmove(
+ stream->audio_info.display_name,
+ dc_sink_data->edid_caps.display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+ stream->audio_info.manufacture_id = dc_sink_data->edid_caps.manufacturer_id;
+ stream->audio_info.product_id = dc_sink_data->edid_caps.product_id;
+ stream->audio_info.flags.all = dc_sink_data->edid_caps.speaker_flags;
+
+ if (dc_sink_data->dc_container_id != NULL) {
+ struct dc_container_id *dc_container_id = dc_sink_data->dc_container_id;
+
+ stream->audio_info.port_id[0] = dc_container_id->portId[0];
+ stream->audio_info.port_id[1] = dc_container_id->portId[1];
+ } else {
+ /* TODO - WindowDM has implemented,
+ other DMs need Unhardcode port_id */
+ stream->audio_info.port_id[0] = 0x5558859e;
+ stream->audio_info.port_id[1] = 0xd989449;
+ }
+
+ /* EDID CAP translation for HDMI 2.0 */
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
+
+ stream->status.link = stream->sink->link;
+
+ update_stream_signal(stream);
+}
+
+static void destruct(struct dc_stream_state *stream)
+{
+ dc_sink_release(stream->sink);
+ if (stream->out_transfer_func != NULL) {
+ dc_transfer_func_release(
+ stream->out_transfer_func);
+ stream->out_transfer_func = NULL;
+ }
+}
+
+void dc_stream_retain(struct dc_stream_state *stream)
+{
+ kref_get(&stream->refcount);
+}
+
+static void dc_stream_free(struct kref *kref)
+{
+ struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount);
+
+ destruct(stream);
+ kfree(stream);
+}
+
+void dc_stream_release(struct dc_stream_state *stream)
+{
+ if (stream != NULL) {
+ kref_put(&stream->refcount, dc_stream_free);
+ }
+}
+
+struct dc_stream_state *dc_create_stream_for_sink(
+ struct dc_sink *sink)
+{
+ struct dc_stream_state *stream;
+
+ if (sink == NULL)
+ return NULL;
+
+ stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
+ if (stream == NULL)
+ return NULL;
+
+ construct(stream, sink);
+
+ kref_init(&stream->refcount);
+
+ return stream;
+}
+
+struct dc_stream_status *dc_stream_get_status(
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+ struct dc *dc = stream->ctx->dc;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (stream == dc->current_state->streams[i])
+ return &dc->current_state->stream_status[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * Update the cursor attributes and set cursor surface address
+ */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (NULL == stream) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+ if (NULL == attributes) {
+ dm_error("DC: attributes is NULL!\n");
+ return false;
+ }
+
+ if (attributes->address.quad_part == 0) {
+ dm_output_to_console("DC: Cursor address is 0!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ continue;
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ continue;
+
+
+ if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL)
+ pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
+ pipe_ctx->plane_res.ipp, attributes);
+
+ if (pipe_ctx->plane_res.hubp != NULL &&
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.hubp, attributes);
+
+ if (pipe_ctx->plane_res.mi != NULL &&
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.mi, attributes);
+
+
+ if (pipe_ctx->plane_res.xfm != NULL &&
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.xfm, attributes);
+
+ if (pipe_ctx->plane_res.dpp != NULL &&
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.dpp, attributes);
+ }
+
+ stream->cursor_attributes = *attributes;
+
+ return true;
+}
+
+bool dc_stream_set_cursor_position(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (NULL == stream) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ if (NULL == position) {
+ dm_error("DC: cursor position is NULL!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct transform *xfm = pipe_ctx->plane_res.xfm;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_cursor_position pos_cpy = *position;
+ struct dc_cursor_mi_param param = {
+ .pixel_clk_khz = stream->timing.pix_clk_khz,
+ .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
+ .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+ .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ };
+
+ if (pipe_ctx->stream != stream ||
+ (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
+ !pipe_ctx->plane_state ||
+ (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ continue;
+
+ if (pipe_ctx->plane_state->address.type
+ == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pos_cpy.enable = false;
+
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ pos_cpy.enable = false;
+
+
+ if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
+ ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
+
+ if (mi != NULL && mi->funcs->set_cursor_position != NULL)
+ mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
+
+ if (hubp != NULL && hubp->funcs->set_cursor_position != NULL)
+ hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+
+ if (xfm != NULL && xfm->funcs->set_cursor_position != NULL)
+ xfm->funcs->set_cursor_position(xfm, &pos_cpy, &param, hubp->curs_attr.width);
+
+ if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
+ dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
+
+ }
+
+ return true;
+}
+
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+{
+ uint8_t i;
+ struct dc *core_dc = stream->ctx->dc;
+ struct resource_context *res_ctx =
+ &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+ if (res_ctx->pipe_ctx[i].stream != stream)
+ continue;
+
+ return tg->funcs->get_frame_count(tg);
+ }
+
+ return 0;
+}
+
+bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ uint8_t i;
+ bool ret = false;
+ struct dc *core_dc = stream->ctx->dc;
+ struct resource_context *res_ctx =
+ &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+ if (res_ctx->pipe_ctx[i].stream != stream)
+ continue;
+
+ tg->funcs->get_scanoutpos(tg,
+ v_blank_start,
+ v_blank_end,
+ h_position,
+ v_position);
+
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+
+void dc_stream_log(
+ const struct dc_stream_state *stream,
+ struct dal_logger *dm_logger,
+ enum dc_log_type log_type)
+{
+
+ dm_logger_write(dm_logger,
+ log_type,
+ "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
+ stream,
+ stream->src.x,
+ stream->src.y,
+ stream->src.width,
+ stream->src.height,
+ stream->dst.x,
+ stream->dst.y,
+ stream->dst.width,
+ stream->dst.height,
+ stream->output_color_space);
+ dm_logger_write(dm_logger,
+ log_type,
+ "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
+ stream->timing.pix_clk_khz,
+ stream->timing.h_total,
+ stream->timing.v_total,
+ stream->timing.pixel_encoding,
+ stream->timing.display_color_depth);
+ dm_logger_write(dm_logger,
+ log_type,
+ "\tsink name: %s, serial: %d\n",
+ stream->sink->edid_caps.display_name,
+ stream->sink->edid_caps.serial_number);
+ dm_logger_write(dm_logger,
+ log_type,
+ "\tlink: %d\n",
+ stream->sink->link->link_index);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
new file mode 100644
index 000000000000..ade5b8ee9c3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* DC interface (public) */
+#include "dm_services.h"
+#include "dc.h"
+
+/* DC core (private) */
+#include "core_types.h"
+#include "transform.h"
+#include "dpp.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+{
+ plane_state->ctx = ctx;
+}
+
+static void destruct(struct dc_plane_state *plane_state)
+{
+ if (plane_state->gamma_correction != NULL) {
+ dc_gamma_release(&plane_state->gamma_correction);
+ }
+ if (plane_state->in_transfer_func != NULL) {
+ dc_transfer_func_release(
+ plane_state->in_transfer_func);
+ plane_state->in_transfer_func = NULL;
+ }
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
+ uint32_t controller_id)
+{
+ plane_state->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
+ /*register_flip_interrupt(surface);*/
+}
+
+struct dc_plane_state *dc_create_plane_state(struct dc *dc)
+{
+ struct dc *core_dc = dc;
+
+ struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
+ GFP_KERNEL);
+
+ if (NULL == plane_state)
+ return NULL;
+
+ kref_init(&plane_state->refcount);
+ construct(core_dc->ctx, plane_state);
+
+ return plane_state;
+}
+
+const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state)
+{
+ const struct dc_plane_status *plane_status;
+ struct dc *core_dc;
+ int i;
+
+ if (!plane_state ||
+ !plane_state->ctx ||
+ !plane_state->ctx->dc) {
+ ASSERT(0);
+ return NULL; /* remove this if above assert never hit */
+ }
+
+ plane_status = &plane_state->status;
+ core_dc = plane_state->ctx->dc;
+
+ if (core_dc->current_state == NULL)
+ return NULL;
+
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx =
+ &core_dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ core_dc->hwss.update_pending_status(pipe_ctx);
+ }
+
+ return plane_status;
+}
+
+void dc_plane_state_retain(struct dc_plane_state *plane_state)
+{
+ kref_get(&plane_state->refcount);
+}
+
+static void dc_plane_state_free(struct kref *kref)
+{
+ struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
+ destruct(plane_state);
+ kfree(plane_state);
+}
+
+void dc_plane_state_release(struct dc_plane_state *plane_state)
+{
+ kref_put(&plane_state->refcount, dc_plane_state_free);
+}
+
+void dc_gamma_retain(struct dc_gamma *gamma)
+{
+ kref_get(&gamma->refcount);
+}
+
+static void dc_gamma_free(struct kref *kref)
+{
+ struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
+ kfree(gamma);
+}
+
+void dc_gamma_release(struct dc_gamma **gamma)
+{
+ kref_put(&(*gamma)->refcount, dc_gamma_free);
+ *gamma = NULL;
+}
+
+struct dc_gamma *dc_create_gamma(void)
+{
+ struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
+
+ if (gamma == NULL)
+ goto alloc_fail;
+
+ kref_init(&gamma->refcount);
+ return gamma;
+
+alloc_fail:
+ return NULL;
+}
+
+void dc_transfer_func_retain(struct dc_transfer_func *tf)
+{
+ kref_get(&tf->refcount);
+}
+
+static void dc_transfer_func_free(struct kref *kref)
+{
+ struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
+ kfree(tf);
+}
+
+void dc_transfer_func_release(struct dc_transfer_func *tf)
+{
+ kref_put(&tf->refcount, dc_transfer_func_free);
+}
+
+struct dc_transfer_func *dc_create_transfer_func(void)
+{
+ struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
+
+ if (tf == NULL)
+ goto alloc_fail;
+
+ kref_init(&tf->refcount);
+
+ return tf;
+
+alloc_fail:
+ return NULL;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
new file mode 100644
index 000000000000..9d8f4a55c74e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -0,0 +1,1103 @@
+/*
+ * Copyright 2012-14 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_INTERFACE_H_
+#define DC_INTERFACE_H_
+
+#include "dc_types.h"
+#include "grph_object_defs.h"
+#include "logger_types.h"
+#include "gpio_types.h"
+#include "link_service_types.h"
+#include "grph_object_ctrl_defs.h"
+#include <inc/hw/opp.h>
+
+#include "inc/hw_sequencer.h"
+#include "inc/compressor.h"
+#include "dml/display_mode_lib.h"
+
+#define DC_VER "3.1.07"
+
+#define MAX_SURFACES 3
+#define MAX_STREAMS 6
+#define MAX_SINKS_PER_LINK 4
+
+
+/*******************************************************************************
+ * Display Core Interfaces
+ ******************************************************************************/
+struct dc_caps {
+ uint32_t max_streams;
+ uint32_t max_links;
+ uint32_t max_audios;
+ uint32_t max_slave_planes;
+ uint32_t max_planes;
+ uint32_t max_downscale_ratio;
+ uint32_t i2c_speed_in_khz;
+ unsigned int max_cursor_size;
+ unsigned int max_video_width;
+ bool dcc_const_color;
+ bool dynamic_audio;
+};
+
+struct dc_dcc_surface_param {
+ struct dc_size surface_size;
+ enum surface_pixel_format format;
+ enum swizzle_mode_values swizzle_mode;
+ enum dc_scan_direction scan;
+};
+
+struct dc_dcc_setting {
+ unsigned int max_compressed_blk_size;
+ unsigned int max_uncompressed_blk_size;
+ bool independent_64b_blks;
+};
+
+struct dc_surface_dcc_cap {
+ union {
+ struct {
+ struct dc_dcc_setting rgb;
+ } grph;
+
+ struct {
+ struct dc_dcc_setting luma;
+ struct dc_dcc_setting chroma;
+ } video;
+ };
+
+ bool capable;
+ bool const_color_support;
+};
+
+struct dc_static_screen_events {
+ bool cursor_update;
+ bool surface_update;
+ bool overlay_update;
+};
+
+/* Forward declaration*/
+struct dc;
+struct dc_plane_state;
+struct dc_state;
+
+struct dc_cap_funcs {
+ bool (*get_dcc_compression_cap)(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+};
+
+struct dc_stream_state_funcs {
+ bool (*adjust_vmin_vmax)(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ int vmin,
+ int vmax);
+ bool (*get_crtc_position)(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ unsigned int *v_pos,
+ unsigned int *nom_v_pos);
+
+ bool (*set_gamut_remap)(struct dc *dc,
+ const struct dc_stream_state *stream);
+
+ bool (*program_csc_matrix)(struct dc *dc,
+ struct dc_stream_state *stream);
+
+ void (*set_static_screen_events)(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ const struct dc_static_screen_events *events);
+
+ void (*set_dither_option)(struct dc_stream_state *stream,
+ enum dc_dither_option option);
+
+ void (*set_dpms)(struct dc *dc,
+ struct dc_stream_state *stream,
+ bool dpms_off);
+};
+
+struct link_training_settings;
+
+struct dc_link_funcs {
+ void (*set_drive_settings)(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link);
+ void (*perform_link_training)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+ void (*set_preferred_link_settings)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+ void (*enable_hpd)(const struct dc_link *link);
+ void (*disable_hpd)(const struct dc_link *link);
+ void (*set_test_pattern)(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+};
+
+/* Structure to hold configuration flags set by dm at dc creation. */
+struct dc_config {
+ bool gpu_vm_support;
+ bool disable_disp_pll_sharing;
+};
+
+enum dcc_option {
+ DCC_ENABLE = 0,
+ DCC_DISABLE = 1,
+ DCC_HALF_REQ_DISALBE = 2,
+};
+
+enum pipe_split_policy {
+ MPC_SPLIT_DYNAMIC = 0,
+ MPC_SPLIT_AVOID = 1,
+ MPC_SPLIT_AVOID_MULT_DISP = 2,
+};
+
+enum wm_report_mode {
+ WM_REPORT_DEFAULT = 0,
+ WM_REPORT_OVERRIDE = 1,
+};
+
+struct dc_debug {
+ bool surface_visual_confirm;
+ bool sanity_checks;
+ bool max_disp_clk;
+ bool surface_trace;
+ bool timing_trace;
+ bool clock_trace;
+ bool validation_trace;
+
+ /* stutter efficiency related */
+ bool disable_stutter;
+ bool use_max_lb;
+ enum dcc_option disable_dcc;
+ enum pipe_split_policy pipe_split_policy;
+ bool force_single_disp_pipe_split;
+ bool voltage_align_fclk;
+
+ bool disable_dfs_bypass;
+ bool disable_dpp_power_gate;
+ bool disable_hubp_power_gate;
+ bool disable_pplib_wm_range;
+ enum wm_report_mode pplib_wm_report_mode;
+ unsigned int min_disp_clk_khz;
+ int sr_exit_time_dpm0_ns;
+ int sr_enter_plus_exit_time_dpm0_ns;
+ int sr_exit_time_ns;
+ int sr_enter_plus_exit_time_ns;
+ int urgent_latency_ns;
+ int percent_of_ideal_drambw;
+ int dram_clock_change_latency_ns;
+ int always_scale;
+ bool disable_pplib_clock_request;
+ bool disable_clock_gate;
+ bool disable_dmcu;
+ bool disable_psr;
+ bool force_abm_enable;
+ bool disable_hbup_pg;
+ bool disable_dpp_pg;
+ bool disable_stereo_support;
+ bool vsr_support;
+ bool performance_trace;
+};
+struct dc_state;
+struct resource_pool;
+struct dce_hwseq;
+struct dc {
+ struct dc_caps caps;
+ struct dc_cap_funcs cap_funcs;
+ struct dc_stream_state_funcs stream_funcs;
+ struct dc_link_funcs link_funcs;
+ struct dc_config config;
+ struct dc_debug debug;
+
+ struct dc_context *ctx;
+
+ uint8_t link_count;
+ struct dc_link *links[MAX_PIPES * 2];
+
+ struct dc_state *current_state;
+ struct resource_pool *res_pool;
+
+ /* Display Engine Clock levels */
+ struct dm_pp_clock_levels sclk_lvls;
+
+ /* Inputs into BW and WM calculations. */
+ struct bw_calcs_dceip *bw_dceip;
+ struct bw_calcs_vbios *bw_vbios;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+ struct display_mode_lib dml;
+#endif
+
+ /* HW functions */
+ struct hw_sequencer_funcs hwss;
+ struct dce_hwseq *hwseq;
+
+ /* temp store of dm_pp_display_configuration
+ * to compare to see if display config changed
+ */
+ struct dm_pp_display_configuration prev_display_config;
+
+ /* FBC compressor */
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ struct compressor *fbc_compressor;
+#endif
+};
+
+enum frame_buffer_mode {
+ FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
+ FRAME_BUFFER_MODE_ZFB_ONLY,
+ FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL,
+} ;
+
+struct dchub_init_data {
+ int64_t zfb_phys_addr_base;
+ int64_t zfb_mc_base_addr;
+ uint64_t zfb_size_in_byte;
+ enum frame_buffer_mode fb_mode;
+ bool dchub_initialzied;
+ bool dchub_info_valid;
+};
+
+struct dc_init_data {
+ struct hw_asic_id asic_id;
+ void *driver; /* ctx */
+ struct cgs_device *cgs_device;
+
+ int num_virtual_links;
+ /*
+ * If 'vbios_override' not NULL, it will be called instead
+ * of the real VBIOS. Intended use is Diagnostics on FPGA.
+ */
+ struct dc_bios *vbios_override;
+ enum dce_environment dce_environment;
+
+ struct dc_config flags;
+ uint32_t log_mask;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint64_t fbc_gpu_addr;
+#endif
+};
+
+struct dc *dc_create(const struct dc_init_data *init_params);
+
+void dc_destroy(struct dc **dc);
+
+/*******************************************************************************
+ * Surface Interfaces
+ ******************************************************************************/
+
+enum {
+ TRANSFER_FUNC_POINTS = 1025
+};
+
+// Moved here from color module for linux
+enum color_transfer_func {
+ transfer_func_unknown,
+ transfer_func_srgb,
+ transfer_func_bt709,
+ transfer_func_pq2084,
+ transfer_func_pq2084_interim,
+ transfer_func_linear_0_1,
+ transfer_func_linear_0_125,
+ transfer_func_dolbyvision,
+ transfer_func_gamma_22,
+ transfer_func_gamma_26
+};
+
+enum color_color_space {
+ color_space_unsupported,
+ color_space_srgb,
+ color_space_bt601,
+ color_space_bt709,
+ color_space_xv_ycc_bt601,
+ color_space_xv_ycc_bt709,
+ color_space_xr_rgb,
+ color_space_bt2020,
+ color_space_adobe,
+ color_space_dci_p3,
+ color_space_sc_rgb_ms_ref,
+ color_space_display_native,
+ color_space_app_ctrl,
+ color_space_dolby_vision,
+ color_space_custom_coordinates
+};
+
+struct dc_hdr_static_metadata {
+ /* display chromaticities and white point in units of 0.00001 */
+ unsigned int chromaticity_green_x;
+ unsigned int chromaticity_green_y;
+ unsigned int chromaticity_blue_x;
+ unsigned int chromaticity_blue_y;
+ unsigned int chromaticity_red_x;
+ unsigned int chromaticity_red_y;
+ unsigned int chromaticity_white_point_x;
+ unsigned int chromaticity_white_point_y;
+
+ uint32_t min_luminance;
+ uint32_t max_luminance;
+ uint32_t maximum_content_light_level;
+ uint32_t maximum_frame_average_light_level;
+
+ bool hdr_supported;
+ bool is_hdr;
+};
+
+enum dc_transfer_func_type {
+ TF_TYPE_PREDEFINED,
+ TF_TYPE_DISTRIBUTED_POINTS,
+ TF_TYPE_BYPASS
+};
+
+struct dc_transfer_func_distributed_points {
+ struct fixed31_32 red[TRANSFER_FUNC_POINTS];
+ struct fixed31_32 green[TRANSFER_FUNC_POINTS];
+ struct fixed31_32 blue[TRANSFER_FUNC_POINTS];
+
+ uint16_t end_exponent;
+ uint16_t x_point_at_y1_red;
+ uint16_t x_point_at_y1_green;
+ uint16_t x_point_at_y1_blue;
+};
+
+enum dc_transfer_func_predefined {
+ TRANSFER_FUNCTION_SRGB,
+ TRANSFER_FUNCTION_BT709,
+ TRANSFER_FUNCTION_PQ,
+ TRANSFER_FUNCTION_LINEAR,
+};
+
+struct dc_transfer_func {
+ struct kref refcount;
+ struct dc_transfer_func_distributed_points tf_pts;
+ enum dc_transfer_func_type type;
+ enum dc_transfer_func_predefined tf;
+ struct dc_context *ctx;
+};
+
+/*
+ * This structure is filled in by dc_surface_get_status and contains
+ * the last requested address and the currently active address so the called
+ * can determine if there are any outstanding flips
+ */
+struct dc_plane_status {
+ struct dc_plane_address requested_address;
+ struct dc_plane_address current_address;
+ bool is_flip_pending;
+ bool is_right_eye;
+};
+
+struct dc_plane_state {
+ struct dc_plane_address address;
+ struct scaling_taps scaling_quality;
+ struct rect src_rect;
+ struct rect dst_rect;
+ struct rect clip_rect;
+
+ union plane_size plane_size;
+ union dc_tiling_info tiling_info;
+
+ struct dc_plane_dcc_param dcc;
+ struct dc_hdr_static_metadata hdr_static_ctx;
+
+ struct dc_gamma *gamma_correction;
+ struct dc_transfer_func *in_transfer_func;
+
+ // sourceContentAttribute cache
+ bool is_source_input_valid;
+ struct dc_hdr_static_metadata source_input_mastering_info;
+ enum color_color_space source_input_color_space;
+ enum color_transfer_func source_input_tf;
+
+ enum dc_color_space color_space;
+ enum surface_pixel_format format;
+ enum dc_rotation_angle rotation;
+ enum plane_stereo_format stereo_format;
+
+ bool per_pixel_alpha;
+ bool visible;
+ bool flip_immediate;
+ bool horizontal_mirror;
+
+ /* private to DC core */
+ struct dc_plane_status status;
+ struct dc_context *ctx;
+
+ /* private to dc_surface.c */
+ enum dc_irq_source irq_source;
+ struct kref refcount;
+};
+
+struct dc_plane_info {
+ union plane_size plane_size;
+ union dc_tiling_info tiling_info;
+ struct dc_plane_dcc_param dcc;
+ enum surface_pixel_format format;
+ enum dc_rotation_angle rotation;
+ enum plane_stereo_format stereo_format;
+ enum dc_color_space color_space; /*todo: wrong place, fits in scaling info*/
+ bool horizontal_mirror;
+ bool visible;
+ bool per_pixel_alpha;
+};
+
+struct dc_scaling_info {
+ struct rect src_rect;
+ struct rect dst_rect;
+ struct rect clip_rect;
+ struct scaling_taps scaling_quality;
+};
+
+struct dc_surface_update {
+ struct dc_plane_state *surface;
+
+ /* isr safe update parameters. null means no updates */
+ struct dc_flip_addrs *flip_addr;
+ struct dc_plane_info *plane_info;
+ struct dc_scaling_info *scaling_info;
+ /* following updates require alloc/sleep/spin that is not isr safe,
+ * null means no updates
+ */
+ /* gamma TO BE REMOVED */
+ struct dc_gamma *gamma;
+ struct dc_transfer_func *in_transfer_func;
+ struct dc_hdr_static_metadata *hdr_static_metadata;
+};
+
+/*
+ * Create a new surface with default parameters;
+ */
+struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state);
+
+void dc_plane_state_retain(struct dc_plane_state *plane_state);
+void dc_plane_state_release(struct dc_plane_state *plane_state);
+
+void dc_gamma_retain(struct dc_gamma *dc_gamma);
+void dc_gamma_release(struct dc_gamma **dc_gamma);
+struct dc_gamma *dc_create_gamma(void);
+
+void dc_transfer_func_retain(struct dc_transfer_func *dc_tf);
+void dc_transfer_func_release(struct dc_transfer_func *dc_tf);
+struct dc_transfer_func *dc_create_transfer_func(void);
+
+/*
+ * This structure holds a surface address. There could be multiple addresses
+ * in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such
+ * as frame durations and DCC format can also be set.
+ */
+struct dc_flip_addrs {
+ struct dc_plane_address address;
+ bool flip_immediate;
+ /* TODO: add flip duration for FreeSync */
+};
+
+bool dc_post_update_surfaces_to_stream(
+ struct dc *dc);
+
+/* Surface update type is used by dc_update_surfaces_and_stream
+ * The update type is determined at the very beginning of the function based
+ * on parameters passed in and decides how much programming (or updating) is
+ * going to be done during the call.
+ *
+ * UPDATE_TYPE_FAST is used for really fast updates that do not require much
+ * logical calculations or hardware register programming. This update MUST be
+ * ISR safe on windows. Currently fast update will only be used to flip surface
+ * address.
+ *
+ * UPDATE_TYPE_MED is used for slower updates which require significant hw
+ * re-programming however do not affect bandwidth consumption or clock
+ * requirements. At present, this is the level at which front end updates
+ * that do not require us to run bw_calcs happen. These are in/out transfer func
+ * updates, viewport offset changes, recout size changes and pixel depth changes.
+ * This update can be done at ISR, but we want to minimize how often this happens.
+ *
+ * UPDATE_TYPE_FULL is slow. Really slow. This requires us to recalculate our
+ * bandwidth and clocks, possibly rearrange some pipes and reprogram anything front
+ * end related. Any time viewport dimensions, recout dimensions, scaling ratios or
+ * gamma need to be adjusted or pipe needs to be turned on (or disconnected) we do
+ * a full update. This cannot be done at ISR level and should be a rare event.
+ * Unless someone is stress testing mpo enter/exit, playing with colour or adjusting
+ * underscan we don't expect to see this call at all.
+ */
+
+enum surface_update_type {
+ UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
+ UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
+ UPDATE_TYPE_FULL, /* may need to shuffle resources */
+};
+
+/*******************************************************************************
+ * Stream Interfaces
+ ******************************************************************************/
+
+struct dc_stream_status {
+ int primary_otg_inst;
+ int stream_enc_inst;
+ int plane_count;
+ struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+
+ /*
+ * link this stream passes through
+ */
+ struct dc_link *link;
+};
+
+struct dc_stream_state {
+ struct dc_sink *sink;
+ struct dc_crtc_timing timing;
+
+ struct rect src; /* composition area */
+ struct rect dst; /* stream addressable area */
+
+ struct audio_info audio_info;
+
+ struct freesync_context freesync_ctx;
+
+ struct dc_transfer_func *out_transfer_func;
+ struct colorspace_transform gamut_remap_matrix;
+ struct csc_transform csc_color_matrix;
+
+ enum signal_type output_signal;
+
+ enum dc_color_space output_color_space;
+ enum dc_dither_option dither_option;
+
+ enum view_3d_format view_format;
+
+ bool ignore_msa_timing_param;
+ /* TODO: custom INFO packets */
+ /* TODO: ABM info (DMCU) */
+ /* TODO: PSR info */
+ /* TODO: CEA VIC */
+
+ /* from core_stream struct */
+ struct dc_context *ctx;
+
+ /* used by DCP and FMT */
+ struct bit_depth_reduction_params bit_depth_params;
+ struct clamping_and_pixel_encoding_params clamping;
+
+ int phy_pix_clk;
+ enum signal_type signal;
+ bool dpms_off;
+
+ struct dc_stream_status status;
+
+ struct dc_cursor_attributes cursor_attributes;
+
+ /* from stream struct */
+ struct kref refcount;
+};
+
+struct dc_stream_update {
+ struct rect src;
+ struct rect dst;
+ struct dc_transfer_func *out_transfer_func;
+};
+
+bool dc_is_stream_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+bool dc_is_stream_scaling_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+
+/*
+ * Set up surface attributes and associate to a stream
+ * The surfaces parameter is an absolute set of all surface active for the stream.
+ * If no surfaces are provided, the stream will be blanked; no memory read.
+ * Any flip related attribute changes must be done through this interface.
+ *
+ * After this call:
+ * Surfaces attributes are programmed and configured to be composed into stream.
+ * This does not trigger a flip. No surface address is programmed.
+ */
+
+bool dc_commit_planes_to_stream(
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *state);
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_plane_state **plane_states,
+ struct dc_state *state);
+/*
+ * Log the current stream state.
+ */
+void dc_stream_log(
+ const struct dc_stream_state *stream,
+ struct dal_logger *dc_logger,
+ enum dc_log_type log_type);
+
+uint8_t dc_get_current_stream_count(struct dc *dc);
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
+
+/*
+ * Return the current frame counter.
+ */
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
+
+/* TODO: Return parsed values rather than direct register read
+ * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos)
+ * being refactored properly to be dce-specific
+ */
+bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+enum dc_status dc_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
+
+enum dc_status dc_remove_stream_from_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
+
+
+bool dc_add_plane_to_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+
+bool dc_remove_plane_from_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+
+bool dc_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context);
+
+bool dc_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *context);
+
+/*
+ * Structure to store surface/stream associations for validation
+ */
+struct dc_validation_set {
+ struct dc_stream_state *stream;
+ struct dc_plane_state *plane_states[MAX_SURFACES];
+ uint8_t plane_count;
+};
+
+enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
+
+enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
+
+enum dc_status dc_validate_global_state(
+ struct dc *dc,
+ struct dc_state *new_ctx);
+
+/*
+ * This function takes a stream and checks if it is guaranteed to be supported.
+ * Guaranteed means that MAX_COFUNC similar streams are supported.
+ *
+ * After this call:
+ * No hardware is programmed for call. Only validation is done.
+ */
+
+
+void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx);
+
+void dc_resource_state_copy_construct(
+ const struct dc_state *src_ctx,
+ struct dc_state *dst_ctx);
+
+void dc_resource_state_copy_construct_current(
+ const struct dc *dc,
+ struct dc_state *dst_ctx);
+
+void dc_resource_state_destruct(struct dc_state *context);
+
+/*
+ * TODO update to make it about validation sets
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+bool dc_commit_state(struct dc *dc, struct dc_state *context);
+
+/*
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+/*
+ * Enable stereo when commit_streams is not required,
+ * for example, frame alternate.
+ */
+bool dc_enable_stereo(
+ struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count);
+
+/**
+ * Create a new default stream for the requested sink
+ */
+struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
+
+void dc_stream_retain(struct dc_stream_state *dc_stream);
+void dc_stream_release(struct dc_stream_state *dc_stream);
+
+struct dc_stream_status *dc_stream_get_status(
+ struct dc_stream_state *dc_stream);
+
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status);
+
+
+struct dc_state *dc_create_state(void);
+void dc_retain_state(struct dc_state *context);
+void dc_release_state(struct dc_state *context);
+
+/*******************************************************************************
+ * Link Interfaces
+ ******************************************************************************/
+
+struct dpcd_caps {
+ union dpcd_rev dpcd_rev;
+ union max_lane_count max_ln_count;
+ union max_down_spread max_down_spread;
+
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ /* Dongle's downstream count. */
+ union sink_count sink_count;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ struct dc_dongle_caps dongle_caps;
+
+ uint32_t sink_dev_id;
+ uint32_t branch_dev_id;
+ int8_t branch_dev_name[6];
+ int8_t branch_hw_revision;
+
+ bool allow_invalid_MSA_timing_param;
+ bool panel_mode_edp;
+ bool dpcd_display_control_capable;
+};
+
+struct dc_link_status {
+ struct dpcd_caps *dpcd_caps;
+};
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct link_mst_stream_allocation {
+ /* DIG front */
+ const struct stream_encoder *stream_enc;
+ /* associate DRM payload table with DC stream encoder */
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in transport packet */
+ uint8_t slot_count;
+};
+
+/* DP MST stream allocation table */
+struct link_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting;
+ struct dc_link_settings preferred_link_setting;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+
+ bool test_pattern_enabled;
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ enum edp_revision edp_revision;
+ bool psr_enabled;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ } wa_flags;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+
+};
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
+
+/*
+ * Return an enumerated dc_link. dc_link order is constant and determined at
+ * boot time. They cannot be created or destroyed.
+ * Use dc_get_caps() to get number of links.
+ */
+static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+{
+ return dc->links[link_index];
+}
+
+/* Set backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream);
+
+bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+
+bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
+
+bool dc_link_setup_psr(struct dc_link *dc_link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+
+/* Request DC to detect if there is a Panel connected.
+ * boot - If this call is during initial boot.
+ * Return false for any type of detection failure or MST detection
+ * true otherwise. True meaning further action is required (status update
+ * and OS notification).
+ */
+enum dc_detect_reason {
+ DETECT_REASON_BOOT,
+ DETECT_REASON_HPD,
+ DETECT_REASON_HPDRX,
+};
+
+bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
+
+/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
+ * Return:
+ * true - Downstream port status changed. DM should call DC to do the
+ * detection.
+ * false - no change in Downstream port status. No further action required
+ * from DM. */
+bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+
+struct dc_sink_init_data;
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *dc_link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+
+void dc_link_remove_remote_sink(
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+/* Used by diagnostics for virtual link at the moment */
+
+void dc_link_dp_set_drive_settings(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+enum link_training_result dc_link_dp_perform_link_training(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+void dc_link_dp_enable_hpd(const struct dc_link *link);
+
+void dc_link_dp_disable_hpd(const struct dc_link *link);
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
+/*******************************************************************************
+ * Sink Interfaces - A sink corresponds to a display output device
+ ******************************************************************************/
+
+struct dc_container_id {
+ // 128bit GUID in binary form
+ unsigned char guid[16];
+ // 8 byte port ID -> ELD.PortID
+ unsigned int portId[2];
+ // 128bit GUID in binary formufacturer name -> ELD.ManufacturerName
+ unsigned short manufacturerName;
+ // 2 byte product code -> ELD.ProductCode
+ unsigned short productCode;
+};
+
+
+
+/*
+ * The sink structure contains EDID and other display device properties
+ */
+struct dc_sink {
+ enum signal_type sink_signal;
+ struct dc_edid dc_edid; /* raw edid */
+ struct dc_edid_caps edid_caps; /* parse display caps */
+ struct dc_container_id *dc_container_id;
+ uint32_t dongle_max_pix_clk;
+ void *priv;
+ struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
+ bool converter_disable_audio;
+
+ /* private to DC core */
+ struct dc_link *link;
+ struct dc_context *ctx;
+
+ /* private to dc_sink.c */
+ struct kref refcount;
+};
+
+void dc_sink_retain(struct dc_sink *sink);
+void dc_sink_release(struct dc_sink *sink);
+
+struct dc_sink_init_data {
+ enum signal_type sink_signal;
+ struct dc_link *link;
+ uint32_t dongle_max_pix_clk;
+ bool converter_disable_audio;
+};
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
+
+/*******************************************************************************
+ * Cursor interfaces - To manages the cursor within a stream
+ ******************************************************************************/
+/* TODO: Deprecated once we switch to dc_set_cursor_position */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes);
+
+bool dc_stream_set_cursor_position(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position);
+
+/* Newer interfaces */
+struct dc_cursor {
+ struct dc_plane_address address;
+ struct dc_cursor_attributes attributes;
+};
+
+/*******************************************************************************
+ * Interrupt interfaces
+ ******************************************************************************/
+enum dc_irq_source dc_interrupt_to_irq_source(
+ struct dc *dc,
+ uint32_t src_id,
+ uint32_t ext_id);
+void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
+enum dc_irq_source dc_get_hpd_irq_source_at_index(
+ struct dc *dc, uint32_t link_index);
+
+/*******************************************************************************
+ * Power Interfaces
+ ******************************************************************************/
+
+void dc_set_power_state(
+ struct dc *dc,
+ enum dc_acpi_cm_power_state power_state);
+void dc_resume(struct dc *dc);
+
+/*
+ * DPCD access interfaces
+ */
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd);
+
+
+#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
new file mode 100644
index 000000000000..273d80a4ebce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_BIOS_TYPES_H
+#define DC_BIOS_TYPES_H
+
+/******************************************************************************
+ * Interface file for VBIOS implementations.
+ *
+ * The default implementation is inside DC.
+ * Display Manager (which instantiates DC) has the option to supply it's own
+ * (external to DC) implementation of VBIOS, which will be called by DC, using
+ * this interface.
+ * (The intended use is Diagnostics, but other uses may appear.)
+ *****************************************************************************/
+
+#include "include/bios_parser_types.h"
+
+struct dc_vbios_funcs {
+ uint8_t (*get_connectors_number)(struct dc_bios *bios);
+
+ struct graphics_object_id (*get_encoder_id)(
+ struct dc_bios *bios,
+ uint32_t i);
+ struct graphics_object_id (*get_connector_id)(
+ struct dc_bios *bios,
+ uint8_t connector_index);
+ uint32_t (*get_dst_number)(
+ struct dc_bios *bios,
+ struct graphics_object_id id);
+
+ enum bp_result (*get_src_obj)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id);
+ enum bp_result (*get_dst_obj)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *dest_object_id);
+
+ enum bp_result (*get_i2c_info)(
+ struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info);
+
+ enum bp_result (*get_voltage_ddc_info)(
+ struct dc_bios *bios,
+ uint32_t index,
+ struct graphics_object_i2c_info *info);
+ enum bp_result (*get_thermal_ddc_info)(
+ struct dc_bios *bios,
+ uint32_t i2c_channel_id,
+ struct graphics_object_i2c_info *info);
+ enum bp_result (*get_hpd_info)(
+ struct dc_bios *bios,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info);
+ enum bp_result (*get_device_tag)(
+ struct dc_bios *bios,
+ struct graphics_object_id connector_object_id,
+ uint32_t device_tag_index,
+ struct connector_device_tag_info *info);
+ enum bp_result (*get_firmware_info)(
+ struct dc_bios *bios,
+ struct dc_firmware_info *info);
+ enum bp_result (*get_spread_spectrum_info)(
+ struct dc_bios *bios,
+ enum as_signal_type signal,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info);
+ uint32_t (*get_ss_entry_number)(
+ struct dc_bios *bios,
+ enum as_signal_type signal);
+ enum bp_result (*get_embedded_panel_info)(
+ struct dc_bios *bios,
+ struct embedded_panel_info *info);
+ enum bp_result (*get_gpio_pin_info)(
+ struct dc_bios *bios,
+ uint32_t gpio_id,
+ struct gpio_pin_info *info);
+ enum bp_result (*get_encoder_cap_info)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info);
+
+ bool (*is_lid_status_changed)(
+ struct dc_bios *bios);
+ bool (*is_display_config_changed)(
+ struct dc_bios *bios);
+ bool (*is_accelerated_mode)(
+ struct dc_bios *bios);
+ void (*get_bios_event_info)(
+ struct dc_bios *bios,
+ struct bios_event_info *info);
+ void (*update_requested_backlight_level)(
+ struct dc_bios *bios,
+ uint32_t backlight_8bit);
+ uint32_t (*get_requested_backlight_level)(
+ struct dc_bios *bios);
+ void (*take_backlight_control)(
+ struct dc_bios *bios,
+ bool cntl);
+
+ bool (*is_active_display)(
+ struct dc_bios *bios,
+ enum signal_type signal,
+ const struct connector_device_tag_info *device_tag);
+ enum controller_id (*get_embedded_display_controller_id)(
+ struct dc_bios *bios);
+ uint32_t (*get_embedded_display_refresh_rate)(
+ struct dc_bios *bios);
+
+ void (*set_scratch_critical_state)(
+ struct dc_bios *bios,
+ bool state);
+ bool (*is_device_id_supported)(
+ struct dc_bios *bios,
+ struct device_id id);
+
+ /* COMMANDS */
+
+ enum bp_result (*encoder_control)(
+ struct dc_bios *bios,
+ struct bp_encoder_control *cntl);
+ enum bp_result (*transmitter_control)(
+ struct dc_bios *bios,
+ struct bp_transmitter_control *cntl);
+ enum bp_result (*crt_control)(
+ struct dc_bios *bios,
+ enum engine_id engine_id,
+ bool enable,
+ uint32_t pixel_clock);
+ enum bp_result (*enable_crtc)(
+ struct dc_bios *bios,
+ enum controller_id id,
+ bool enable);
+ enum bp_result (*adjust_pixel_clock)(
+ struct dc_bios *bios,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*set_pixel_clock)(
+ struct dc_bios *bios,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*set_dce_clock)(
+ struct dc_bios *bios,
+ struct bp_set_dce_clock_parameters *bp_params);
+ unsigned int (*get_smu_clock_info)(
+ struct dc_bios *bios);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct dc_bios *bios,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+ enum bp_result (*program_crtc_timing)(
+ struct dc_bios *bios,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+
+ enum bp_result (*crtc_source_select)(
+ struct dc_bios *bios,
+ struct bp_crtc_source_select *bp_params);
+ enum bp_result (*program_display_engine_pll)(
+ struct dc_bios *bios,
+ struct bp_pixel_clock_parameters *bp_params);
+
+ enum signal_type (*dac_load_detect)(
+ struct dc_bios *bios,
+ struct graphics_object_id encoder,
+ struct graphics_object_id connector,
+ enum signal_type display_signal);
+
+ enum bp_result (*enable_disp_power_gating)(
+ struct dc_bios *bios,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action);
+
+ void (*post_init)(struct dc_bios *bios);
+
+ void (*bios_parser_destroy)(struct dc_bios **dcb);
+};
+
+struct bios_registers {
+ uint32_t BIOS_SCRATCH_6;
+};
+
+struct dc_bios {
+ const struct dc_vbios_funcs *funcs;
+
+ uint8_t *bios;
+ uint32_t bios_size;
+
+ uint8_t *bios_local_image;
+
+ struct dc_context *ctx;
+ const struct bios_registers *regs;
+ struct integrated_info *integrated_info;
+};
+
+#endif /* DC_BIOS_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
new file mode 100644
index 000000000000..e1affeb5cc51
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_DDC_TYPES_H_
+#define DC_DDC_TYPES_H_
+
+struct i2c_payload {
+ bool write;
+ uint8_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2c_command_engine {
+ I2C_COMMAND_ENGINE_DEFAULT,
+ I2C_COMMAND_ENGINE_SW,
+ I2C_COMMAND_ENGINE_HW
+};
+
+struct i2c_command {
+ struct i2c_payload *payloads;
+ uint8_t number_of_payloads;
+
+ enum i2c_command_engine engine;
+
+ /* expressed in KHz
+ * zero means "use default value" */
+ uint32_t speed;
+};
+
+struct gpio_ddc_hw_info {
+ bool hw_supported;
+ uint32_t ddc_channel;
+};
+
+struct ddc {
+ struct gpio *pin_data;
+ struct gpio *pin_clock;
+ struct gpio_ddc_hw_info hw_info;
+ struct dc_context *ctx;
+};
+
+union ddc_wa {
+ struct {
+ uint32_t DP_SKIP_POWER_OFF:1;
+ uint32_t DP_AUX_POWER_UP_WA_DELAY:1;
+ } bits;
+ uint32_t raw;
+};
+
+struct ddc_flags {
+ uint8_t EDID_QUERY_DONE_ONCE:1;
+ uint8_t IS_INTERNAL_DISPLAY:1;
+ uint8_t FORCE_READ_REPEATED_START:1;
+ uint8_t EDID_STRESS_READ:1;
+
+};
+
+enum ddc_transaction_type {
+ DDC_TRANSACTION_TYPE_NONE = 0,
+ DDC_TRANSACTION_TYPE_I2C,
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX,
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER,
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER
+};
+
+enum display_dongle_type {
+ DISPLAY_DONGLE_NONE = 0,
+ /* Active converter types*/
+ DISPLAY_DONGLE_DP_VGA_CONVERTER,
+ DISPLAY_DONGLE_DP_DVI_CONVERTER,
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ /* DP-HDMI/DVI passive dongles (Type 1 and Type 2)*/
+ DISPLAY_DONGLE_DP_DVI_DONGLE,
+ DISPLAY_DONGLE_DP_HDMI_DONGLE,
+ /* Other types of dongle*/
+ DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE,
+};
+
+struct ddc_service {
+ struct ddc *ddc_pin;
+ struct ddc_flags flags;
+ union ddc_wa wa;
+ enum ddc_transaction_type transaction_type;
+ enum display_dongle_type dongle_type;
+ struct dc_context *ctx;
+ struct dc_link *link;
+
+ uint32_t address;
+ uint32_t edid_buf_len;
+ uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+};
+
+#endif /* DC_DDC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
new file mode 100644
index 000000000000..77e2de69cca3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -0,0 +1,467 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_DP_TYPES_H
+#define DC_DP_TYPES_H
+
+enum dc_lane_count {
+ LANE_COUNT_UNKNOWN = 0,
+ LANE_COUNT_ONE = 1,
+ LANE_COUNT_TWO = 2,
+ LANE_COUNT_FOUR = 4,
+ LANE_COUNT_EIGHT = 8,
+ LANE_COUNT_DP_MAX = LANE_COUNT_FOUR
+};
+
+/* This is actually a reference clock (27MHz) multiplier
+ * 162MBps bandwidth for 1.62GHz like rate,
+ * 270MBps for 2.70GHz,
+ * 324MBps for 3.24Ghz,
+ * 540MBps for 5.40GHz
+ * 810MBps for 8.10GHz
+ */
+enum dc_link_rate {
+ LINK_RATE_UNKNOWN = 0,
+ LINK_RATE_LOW = 0x06,
+ LINK_RATE_HIGH = 0x0A,
+ LINK_RATE_RBR2 = 0x0C,
+ LINK_RATE_HIGH2 = 0x14,
+ LINK_RATE_HIGH3 = 0x1E
+};
+
+enum dc_link_spread {
+ LINK_SPREAD_DISABLED = 0x00,
+ /* 0.5 % downspread 30 kHz */
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ = 0x10,
+ /* 0.5 % downspread 33 kHz */
+ LINK_SPREAD_05_DOWNSPREAD_33KHZ = 0x11
+};
+
+enum dc_voltage_swing {
+ VOLTAGE_SWING_LEVEL0 = 0, /* direct HW translation! */
+ VOLTAGE_SWING_LEVEL1,
+ VOLTAGE_SWING_LEVEL2,
+ VOLTAGE_SWING_LEVEL3,
+ VOLTAGE_SWING_MAX_LEVEL = VOLTAGE_SWING_LEVEL3
+};
+
+enum dc_pre_emphasis {
+ PRE_EMPHASIS_DISABLED = 0, /* direct HW translation! */
+ PRE_EMPHASIS_LEVEL1,
+ PRE_EMPHASIS_LEVEL2,
+ PRE_EMPHASIS_LEVEL3,
+ PRE_EMPHASIS_MAX_LEVEL = PRE_EMPHASIS_LEVEL3
+};
+/* Post Cursor 2 is optional for transmitter
+ * and it applies only to the main link operating at HBR2
+ */
+enum dc_post_cursor2 {
+ POST_CURSOR2_DISABLED = 0, /* direct HW translation! */
+ POST_CURSOR2_LEVEL1,
+ POST_CURSOR2_LEVEL2,
+ POST_CURSOR2_LEVEL3,
+ POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
+};
+
+struct dc_link_settings {
+ enum dc_lane_count lane_count;
+ enum dc_link_rate link_rate;
+ enum dc_link_spread link_spread;
+};
+
+struct dc_lane_settings {
+ enum dc_voltage_swing VOLTAGE_SWING;
+ enum dc_pre_emphasis PRE_EMPHASIS;
+ enum dc_post_cursor2 POST_CURSOR2;
+};
+
+struct dc_link_training_settings {
+ struct dc_link_settings link;
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+};
+
+
+union dpcd_rev {
+ struct {
+ uint8_t MINOR:4;
+ uint8_t MAJOR:4;
+ } bits;
+ uint8_t raw;
+};
+
+union max_lane_count {
+ struct {
+ uint8_t MAX_LANE_COUNT:5;
+ uint8_t POST_LT_ADJ_REQ_SUPPORTED:1;
+ uint8_t TPS3_SUPPORTED:1;
+ uint8_t ENHANCED_FRAME_CAP:1;
+ } bits;
+ uint8_t raw;
+};
+
+union max_down_spread {
+ struct {
+ uint8_t MAX_DOWN_SPREAD:1;
+ uint8_t RESERVED:5;
+ uint8_t NO_AUX_HANDSHAKE_LINK_TRAINING:1;
+ uint8_t TPS4_SUPPORTED:1;
+ } bits;
+ uint8_t raw;
+};
+
+union mstm_cap {
+ struct {
+ uint8_t MST_CAP:1;
+ uint8_t RESERVED:7;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_count_set {
+ struct {
+ uint8_t LANE_COUNT_SET:5;
+ uint8_t POST_LT_ADJ_REQ_GRANTED:1;
+ uint8_t RESERVED:1;
+ uint8_t ENHANCED_FRAMING:1;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_status {
+ struct {
+ uint8_t CR_DONE_0:1;
+ uint8_t CHANNEL_EQ_DONE_0:1;
+ uint8_t SYMBOL_LOCKED_0:1;
+ uint8_t RESERVED0:1;
+ uint8_t CR_DONE_1:1;
+ uint8_t CHANNEL_EQ_DONE_1:1;
+ uint8_t SYMBOL_LOCKED_1:1;
+ uint8_t RESERVED_1:1;
+ } bits;
+ uint8_t raw;
+};
+
+union device_service_irq {
+ struct {
+ uint8_t REMOTE_CONTROL_CMD_PENDING:1;
+ uint8_t AUTOMATED_TEST:1;
+ uint8_t CP_IRQ:1;
+ uint8_t MCCS_IRQ:1;
+ uint8_t DOWN_REP_MSG_RDY:1;
+ uint8_t UP_REQ_MSG_RDY:1;
+ uint8_t SINK_SPECIFIC:1;
+ uint8_t reserved:1;
+ } bits;
+ uint8_t raw;
+};
+
+union sink_count {
+ struct {
+ uint8_t SINK_COUNT:6;
+ uint8_t CPREADY:1;
+ uint8_t RESERVED:1;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_align_status_updated {
+ struct {
+ uint8_t INTERLANE_ALIGN_DONE:1;
+ uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1;
+ uint8_t RESERVED:4;
+ uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1;
+ uint8_t LINK_STATUS_UPDATED:1;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_adjust {
+ struct {
+ uint8_t VOLTAGE_SWING_LANE:2;
+ uint8_t PRE_EMPHASIS_LANE:2;
+ uint8_t RESERVED:4;
+ } bits;
+ uint8_t raw;
+};
+
+union dpcd_training_pattern {
+ struct {
+ uint8_t TRAINING_PATTERN_SET:4;
+ uint8_t RECOVERED_CLOCK_OUT_EN:1;
+ uint8_t SCRAMBLING_DISABLE:1;
+ uint8_t SYMBOL_ERROR_COUNT_SEL:2;
+ } v1_4;
+ struct {
+ uint8_t TRAINING_PATTERN_SET:2;
+ uint8_t LINK_QUAL_PATTERN_SET:2;
+ uint8_t RESERVED:4;
+ } v1_3;
+ uint8_t raw;
+};
+
+/* Training Lane is used to configure downstream DP device's voltage swing
+and pre-emphasis levels*/
+/* The DPCD addresses are from 0x103 to 0x106*/
+union dpcd_training_lane {
+ struct {
+ uint8_t VOLTAGE_SWING_SET:2;
+ uint8_t MAX_SWING_REACHED:1;
+ uint8_t PRE_EMPHASIS_SET:2;
+ uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+ uint8_t RESERVED:2;
+ } bits;
+ uint8_t raw;
+};
+
+/* TMDS-converter related */
+union dwnstream_port_caps_byte0 {
+ struct {
+ uint8_t DWN_STRM_PORTX_TYPE:3;
+ uint8_t DWN_STRM_PORTX_HPD:1;
+ uint8_t RESERVERD:4;
+ } bits;
+ uint8_t raw;
+};
+
+/* these are the detailed types stored at DWN_STRM_PORTX_CAP (00080h)*/
+enum dpcd_downstream_port_detailed_type {
+ DOWN_STREAM_DETAILED_DP = 0,
+ DOWN_STREAM_DETAILED_VGA,
+ DOWN_STREAM_DETAILED_DVI,
+ DOWN_STREAM_DETAILED_HDMI,
+ DOWN_STREAM_DETAILED_NONDDC,/* has no EDID (TV,CV)*/
+ DOWN_STREAM_DETAILED_DP_PLUS_PLUS
+};
+
+union dwnstream_port_caps_byte1 {
+ struct {
+ uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_downstream_port_present {
+ uint8_t byte;
+ struct {
+ uint8_t PORT_PRESENT:1;
+ uint8_t PORT_TYPE:2;
+ uint8_t FMT_CONVERSION:1;
+ uint8_t DETAILED_CAPS:1;
+ uint8_t RESERVED:3;
+ } fields;
+};
+
+union dwnstream_port_caps_byte3_dvi {
+ struct {
+ uint8_t RESERVED1:1;
+ uint8_t DUAL_LINK:1;
+ uint8_t HIGH_COLOR_DEPTH:1;
+ uint8_t RESERVED2:5;
+ } bits;
+ uint8_t raw;
+};
+
+union dwnstream_port_caps_byte3_hdmi {
+ struct {
+ uint8_t FRAME_SEQ_TO_FRAME_PACK:1;
+ uint8_t YCrCr422_PASS_THROUGH:1;
+ uint8_t YCrCr420_PASS_THROUGH:1;
+ uint8_t YCrCr422_CONVERSION:1;
+ uint8_t YCrCr420_CONVERSION:1;
+ uint8_t RESERVED:3;
+ } bits;
+ uint8_t raw;
+};
+
+/*4-byte structure for detailed capabilities of a down-stream port
+(DP-to-TMDS converter).*/
+
+union sink_status {
+ struct {
+ uint8_t RX_PORT0_STATUS:1;
+ uint8_t RX_PORT1_STATUS:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+/*6-byte structure corresponding to 6 registers (200h-205h)
+read during handling of HPD-IRQ*/
+union hpd_irq_data {
+ struct {
+ union sink_count sink_cnt;/* 200h */
+ union device_service_irq device_service_irq;/* 201h */
+ union lane_status lane01_status;/* 202h */
+ union lane_status lane23_status;/* 203h */
+ union lane_align_status_updated lane_status_updated;/* 204h */
+ union sink_status sink_status;
+ } bytes;
+ uint8_t raw[6];
+};
+
+union down_stream_port_count {
+ struct {
+ uint8_t DOWN_STR_PORT_COUNT:4;
+ uint8_t RESERVED:2; /*Bits 5:4 = RESERVED. Read all 0s.*/
+ /*Bit 6 = MSA_TIMING_PAR_IGNORED
+ 0 = Sink device requires the MSA timing parameters
+ 1 = Sink device is capable of rendering incoming video
+ stream without MSA timing parameters*/
+ uint8_t IGNORE_MSA_TIMING_PARAM:1;
+ /*Bit 7 = OUI Support
+ 0 = OUI not supported
+ 1 = OUI supported
+ (OUI and Device Identification mandatory for DP 1.2)*/
+ uint8_t OUI_SUPPORT:1;
+ } bits;
+ uint8_t raw;
+};
+
+union down_spread_ctrl {
+ struct {
+ uint8_t RESERVED1:4;/* Bit 3:0 = RESERVED. Read all 0s*/
+ /* Bits 4 = SPREAD_AMP. Spreading amplitude
+ 0 = Main link signal is not downspread
+ 1 = Main link signal is downspread <= 0.5%
+ with frequency in the range of 30kHz ~ 33kHz*/
+ uint8_t SPREAD_AMP:1;
+ uint8_t RESERVED2:2;/*Bit 6:5 = RESERVED. Read all 0s*/
+ /*Bit 7 = MSA_TIMING_PAR_IGNORE_EN
+ 0 = Source device will send valid data for the MSA Timing Params
+ 1 = Source device may send invalid data for these MSA Timing Params*/
+ uint8_t IGNORE_MSA_TIMING_PARAM:1;
+ } bits;
+ uint8_t raw;
+};
+
+union dpcd_edp_config {
+ struct {
+ uint8_t PANEL_MODE_EDP:1;
+ uint8_t FRAMING_CHANGE_ENABLE:1;
+ uint8_t RESERVED:5;
+ uint8_t PANEL_SELF_TEST_ENABLE:1;
+ } bits;
+ uint8_t raw;
+};
+
+struct dp_device_vendor_id {
+ uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
+ uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
+};
+
+struct dp_sink_hw_fw_revision {
+ uint8_t ieee_hw_rev;
+ uint8_t ieee_fw_rev[2];
+};
+
+/*DPCD register of DP receiver capability field bits-*/
+union edp_configuration_cap {
+ struct {
+ uint8_t ALT_SCRAMBLER_RESET:1;
+ uint8_t FRAMING_CHANGE:1;
+ uint8_t RESERVED:1;
+ uint8_t DPCD_DISPLAY_CONTROL_CAPABLE:1;
+ uint8_t RESERVED2:4;
+ } bits;
+ uint8_t raw;
+};
+
+union training_aux_rd_interval {
+ struct {
+ uint8_t TRAINIG_AUX_RD_INTERVAL:7;
+ uint8_t EXT_RECIEVER_CAP_FIELD_PRESENT:1;
+ } bits;
+ uint8_t raw;
+};
+
+/* Automated test structures */
+union test_request {
+ struct {
+ uint8_t LINK_TRAINING :1;
+ uint8_t LINK_TEST_PATTRN :1;
+ uint8_t EDID_REAT :1;
+ uint8_t PHY_TEST_PATTERN :1;
+ uint8_t AUDIO_TEST_PATTERN :1;
+ uint8_t RESERVED :1;
+ uint8_t TEST_STEREO_3D :1;
+ } bits;
+ uint8_t raw;
+};
+
+union test_response {
+ struct {
+ uint8_t ACK :1;
+ uint8_t NO_ACK :1;
+ uint8_t RESERVED :6;
+ } bits;
+ uint8_t raw;
+};
+
+union phy_test_pattern {
+ struct {
+ /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1
+ * and 3 bits for DP1.2.
+ */
+ uint8_t PATTERN :3;
+ /* BY speci, bit7:2 is 0 for DP1.1. */
+ uint8_t RESERVED :5;
+ } bits;
+ uint8_t raw;
+};
+
+/* States of Compliance Test Specification (CTS DP1.2). */
+union compliance_test_state {
+ struct {
+ unsigned char STEREO_3D_RUNNING : 1;
+ unsigned char RESERVED : 7;
+ } bits;
+ unsigned char raw;
+};
+
+union link_test_pattern {
+ struct {
+ /* dpcd_link_test_patterns */
+ unsigned char PATTERN :2;
+ unsigned char RESERVED:6;
+ } bits;
+ unsigned char raw;
+};
+
+union test_misc {
+ struct dpcd_test_misc_bits {
+ unsigned char SYNC_CLOCK :1;
+ /* dpcd_test_color_format */
+ unsigned char CLR_FORMAT :2;
+ /* dpcd_test_dyn_range */
+ unsigned char DYN_RANGE :1;
+ unsigned char YCBCR :1;
+ /* dpcd_test_bit_depth */
+ unsigned char BPC :3;
+ } bits;
+ unsigned char raw;
+};
+
+#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
new file mode 100644
index 000000000000..0d84b2a1ccfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -0,0 +1,171 @@
+/*
+ * dc_helper.c
+ *
+ * Created on: Aug 30, 2016
+ * Author: agrodzov
+ */
+#include "dm_services.h"
+#include <stdarg.h>
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ uint32_t shift, mask, field_value;
+ int i = 1;
+
+ va_list ap;
+ va_start(ap, field_value1);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+ i++;
+ }
+
+ dm_write_reg(ctx, addr, reg_val);
+ va_end(ap);
+
+ return reg_val;
+}
+
+uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift, uint32_t mask, uint32_t *field_value)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value = get_reg_field_value_ex(reg_val, mask, shift);
+ return reg_val;
+}
+
+uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ return reg_val;
+}
+
+uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ return reg_val;
+}
+
+uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+ return reg_val;
+}
+
+uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+ *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+ return reg_val;
+}
+
+/* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
+ * compiler won't be able to check for size match and is prone to stack corruption type of bugs
+
+uint32_t generic_reg_get(const struct dc_context *ctx,
+ uint32_t addr, int n, ...)
+{
+ uint32_t shift, mask;
+ uint32_t *field_value;
+ uint32_t reg_val;
+ int i = 0;
+
+ reg_val = dm_read_reg(ctx, addr);
+
+ va_list ap;
+ va_start(ap, n);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t *);
+
+ *field_value = get_reg_field_value_ex(reg_val, mask, shift);
+ i++;
+ }
+
+ va_end(ap);
+
+ return reg_val;
+}
+*/
+
+uint32_t generic_reg_wait(const struct dc_context *ctx,
+ uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
+ unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
+ const char *func_name, int line)
+{
+ uint32_t field_value;
+ uint32_t reg_val;
+ int i;
+
+ /* something is terribly wrong if time out is > 200ms. (5Hz) */
+ ASSERT(delay_between_poll_us * time_out_num_tries <= 200000);
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ /* 35 seconds */
+ delay_between_poll_us = 35000;
+ time_out_num_tries = 1000;
+ }
+
+ for (i = 0; i <= time_out_num_tries; i++) {
+ if (i) {
+ if (delay_between_poll_us >= 1000)
+ msleep(delay_between_poll_us/1000);
+ else if (delay_between_poll_us > 0)
+ udelay(delay_between_poll_us);
+ }
+
+ reg_val = dm_read_reg(ctx, addr);
+
+ field_value = get_reg_field_value_ex(reg_val, mask, shift);
+
+ if (field_value == condition_value)
+ return reg_val;
+ }
+
+ dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
+ delay_between_poll_us, time_out_num_tries,
+ func_name, line);
+
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ BREAK_TO_DEBUGGER();
+
+ return reg_val;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
new file mode 100644
index 000000000000..1a9f57fb0838
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -0,0 +1,706 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_HW_TYPES_H
+#define DC_HW_TYPES_H
+
+#include "os_types.h"
+#include "fixed31_32.h"
+#include "signal_types.h"
+
+/******************************************************************************
+ * Data types for Virtual HW Layer of DAL3.
+ * (see DAL3 design documents for HW Layer definition)
+ *
+ * The intended uses are:
+ * 1. Generation pseudocode sequences for HW programming.
+ * 2. Implementation of real HW programming by HW Sequencer of DAL3.
+ *
+ * Note: do *not* add any types which are *not* used for HW programming - this
+ * will ensure separation of Logic layer from HW layer.
+ ******************************************************************************/
+
+union large_integer {
+ struct {
+ uint32_t low_part;
+ int32_t high_part;
+ };
+
+ struct {
+ uint32_t low_part;
+ int32_t high_part;
+ } u;
+
+ int64_t quad_part;
+};
+
+#define PHYSICAL_ADDRESS_LOC union large_integer
+
+enum dc_plane_addr_type {
+ PLN_ADDR_TYPE_GRAPHICS = 0,
+ PLN_ADDR_TYPE_GRPH_STEREO,
+ PLN_ADDR_TYPE_VIDEO_PROGRESSIVE,
+};
+
+struct dc_plane_address {
+ enum dc_plane_addr_type type;
+ bool tmz_surface;
+ union {
+ struct{
+ PHYSICAL_ADDRESS_LOC addr;
+ PHYSICAL_ADDRESS_LOC meta_addr;
+ union large_integer dcc_const_color;
+ } grph;
+
+ /*stereo*/
+ struct {
+ PHYSICAL_ADDRESS_LOC left_addr;
+ PHYSICAL_ADDRESS_LOC left_meta_addr;
+ union large_integer left_dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC right_addr;
+ PHYSICAL_ADDRESS_LOC right_meta_addr;
+ union large_integer right_dcc_const_color;
+
+ } grph_stereo;
+
+ /*video progressive*/
+ struct {
+ PHYSICAL_ADDRESS_LOC luma_addr;
+ PHYSICAL_ADDRESS_LOC luma_meta_addr;
+ union large_integer luma_dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC chroma_addr;
+ PHYSICAL_ADDRESS_LOC chroma_meta_addr;
+ union large_integer chroma_dcc_const_color;
+ } video_progressive;
+ };
+};
+
+struct dc_size {
+ int width;
+ int height;
+};
+
+struct rect {
+ int x;
+ int y;
+ int width;
+ int height;
+};
+
+union plane_size {
+ /* Grph or Video will be selected
+ * based on format above:
+ * Use Video structure if
+ * format >= DalPixelFormat_VideoBegin
+ * else use Grph structure
+ */
+ struct {
+ struct rect surface_size;
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch
+ * is 32 pixel aligned.
+ */
+ int surface_pitch;
+ } grph;
+
+ struct {
+ struct rect luma_size;
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch is
+ * 32 pixel aligned.
+ */
+ int luma_pitch;
+
+ struct rect chroma_size;
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch is
+ * 32 pixel aligned.
+ */
+ int chroma_pitch;
+ } video;
+};
+
+struct dc_plane_dcc_param {
+ bool enable;
+
+ union {
+ struct {
+ int meta_pitch;
+ bool independent_64b_blks;
+ } grph;
+
+ struct {
+ int meta_pitch_l;
+ bool independent_64b_blks_l;
+
+ int meta_pitch_c;
+ bool independent_64b_blks_c;
+ } video;
+ };
+};
+
+/*Displayable pixel format in fb*/
+enum surface_pixel_format {
+ SURFACE_PIXEL_FORMAT_GRPH_BEGIN = 0,
+ /*TOBE REMOVED paletta 256 colors*/
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS =
+ SURFACE_PIXEL_FORMAT_GRPH_BEGIN,
+ /*16 bpp*/
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB1555,
+ /*16 bpp*/
+ SURFACE_PIXEL_FORMAT_GRPH_RGB565,
+ /*32 bpp*/
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB8888,
+ /*32 bpp swaped*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR8888,
+
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010,
+ /*swaped*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010,
+ /*TOBE REMOVED swaped, XR_BIAS has no differance
+ * for pixel layout than previous and we can
+ * delete this after discusion*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS,
+ /*64 bpp */
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616,
+ /*float*/
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F,
+ /*swaped & float*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
+ /*grow graphics here if necessary */
+
+ SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
+ SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
+ SURFACE_PIXEL_FORMAT_INVALID
+
+ /*grow 444 video here if necessary */
+};
+
+
+
+/* Pixel format */
+enum pixel_format {
+ /*graph*/
+ PIXEL_FORMAT_UNINITIALIZED,
+ PIXEL_FORMAT_INDEX8,
+ PIXEL_FORMAT_RGB565,
+ PIXEL_FORMAT_ARGB8888,
+ PIXEL_FORMAT_ARGB2101010,
+ PIXEL_FORMAT_ARGB2101010_XRBIAS,
+ PIXEL_FORMAT_FP16,
+ /*video*/
+ PIXEL_FORMAT_420BPP8,
+ PIXEL_FORMAT_420BPP10,
+ /*end of pixel format definition*/
+ PIXEL_FORMAT_INVALID,
+
+ PIXEL_FORMAT_GRPH_BEGIN = PIXEL_FORMAT_INDEX8,
+ PIXEL_FORMAT_GRPH_END = PIXEL_FORMAT_FP16,
+ PIXEL_FORMAT_VIDEO_BEGIN = PIXEL_FORMAT_420BPP8,
+ PIXEL_FORMAT_VIDEO_END = PIXEL_FORMAT_420BPP10,
+ PIXEL_FORMAT_UNKNOWN
+};
+
+enum tile_split_values {
+ DC_DISPLAY_MICRO_TILING = 0x0,
+ DC_THIN_MICRO_TILING = 0x1,
+ DC_DEPTH_MICRO_TILING = 0x2,
+ DC_ROTATED_MICRO_TILING = 0x3,
+};
+
+/* TODO: These values come from hardware spec. We need to readdress this
+ * if they ever change.
+ */
+enum array_mode_values {
+ DC_ARRAY_LINEAR_GENERAL = 0,
+ DC_ARRAY_LINEAR_ALLIGNED,
+ DC_ARRAY_1D_TILED_THIN1,
+ DC_ARRAY_1D_TILED_THICK,
+ DC_ARRAY_2D_TILED_THIN1,
+ DC_ARRAY_PRT_TILED_THIN1,
+ DC_ARRAY_PRT_2D_TILED_THIN1,
+ DC_ARRAY_2D_TILED_THICK,
+ DC_ARRAY_2D_TILED_X_THICK,
+ DC_ARRAY_PRT_TILED_THICK,
+ DC_ARRAY_PRT_2D_TILED_THICK,
+ DC_ARRAY_PRT_3D_TILED_THIN1,
+ DC_ARRAY_3D_TILED_THIN1,
+ DC_ARRAY_3D_TILED_THICK,
+ DC_ARRAY_3D_TILED_X_THICK,
+ DC_ARRAY_PRT_3D_TILED_THICK,
+};
+
+enum tile_mode_values {
+ DC_ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
+ DC_ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
+};
+
+enum swizzle_mode_values {
+ DC_SW_LINEAR = 0,
+ DC_SW_256B_S = 1,
+ DC_SW_256_D = 2,
+ DC_SW_256_R = 3,
+ DC_SW_4KB_S = 5,
+ DC_SW_4KB_D = 6,
+ DC_SW_4KB_R = 7,
+ DC_SW_64KB_S = 9,
+ DC_SW_64KB_D = 10,
+ DC_SW_64KB_R = 11,
+ DC_SW_VAR_S = 13,
+ DC_SW_VAR_D = 14,
+ DC_SW_VAR_R = 15,
+ DC_SW_64KB_S_T = 17,
+ DC_SW_64KB_D_T = 18,
+ DC_SW_4KB_S_X = 21,
+ DC_SW_4KB_D_X = 22,
+ DC_SW_4KB_R_X = 23,
+ DC_SW_64KB_S_X = 25,
+ DC_SW_64KB_D_X = 26,
+ DC_SW_64KB_R_X = 27,
+ DC_SW_VAR_S_X = 29,
+ DC_SW_VAR_D_X = 30,
+ DC_SW_VAR_R_X = 31,
+ DC_SW_MAX
+};
+
+union dc_tiling_info {
+
+ struct {
+ /* Specifies the number of memory banks for tiling
+ * purposes.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 2,4,8,16
+ */
+ unsigned int num_banks;
+ /* Specifies the number of tiles in the x direction
+ * to be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_width;
+ unsigned int bank_width_c;
+ /* Specifies the number of tiles in the y direction to
+ * be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_height;
+ unsigned int bank_height_c;
+ /* Specifies the macro tile aspect ratio. Only applies
+ * to 2D and 3D tiling modes.
+ */
+ unsigned int tile_aspect;
+ unsigned int tile_aspect_c;
+ /* Specifies the number of bytes that will be stored
+ * contiguously for each tile.
+ * If the tile data requires more storage than this
+ * amount, it is split into multiple slices.
+ * This field must not be larger than
+ * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
+ * Only applies to 2D and 3D tiling modes.
+ * For color render targets, TILE_SPLIT >= 256B.
+ */
+ enum tile_split_values tile_split;
+ enum tile_split_values tile_split_c;
+ /* Specifies the addressing within a tile.
+ * 0x0 - DISPLAY_MICRO_TILING
+ * 0x1 - THIN_MICRO_TILING
+ * 0x2 - DEPTH_MICRO_TILING
+ * 0x3 - ROTATED_MICRO_TILING
+ */
+ enum tile_mode_values tile_mode;
+ enum tile_mode_values tile_mode_c;
+ /* Specifies the number of pipes and how they are
+ * interleaved in the surface.
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ unsigned int pipe_config;
+ /* Specifies the tiling mode of the surface.
+ * THIN tiles use an 8x8x1 tile size.
+ * THICK tiles use an 8x8x4 tile size.
+ * 2D tiling modes rotate banks for successive Z slices
+ * 3D tiling modes rotate pipes and banks for Z slices
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ enum array_mode_values array_mode;
+ } gfx8;
+
+ struct {
+ unsigned int num_pipes;
+ unsigned int num_banks;
+ unsigned int pipe_interleave;
+ unsigned int num_shader_engines;
+ unsigned int num_rb_per_se;
+ unsigned int max_compressed_frags;
+ bool shaderEnable;
+
+ enum swizzle_mode_values swizzle;
+ bool meta_linear;
+ bool rb_aligned;
+ bool pipe_aligned;
+ } gfx9;
+};
+
+/* Rotation angle */
+enum dc_rotation_angle {
+ ROTATION_ANGLE_0 = 0,
+ ROTATION_ANGLE_90,
+ ROTATION_ANGLE_180,
+ ROTATION_ANGLE_270,
+ ROTATION_ANGLE_COUNT
+};
+
+enum dc_scan_direction {
+ SCAN_DIRECTION_UNKNOWN = 0,
+ SCAN_DIRECTION_HORIZONTAL = 1, /* 0, 180 rotation */
+ SCAN_DIRECTION_VERTICAL = 2, /* 90, 270 rotation */
+};
+
+struct dc_cursor_position {
+ uint32_t x;
+ uint32_t y;
+
+ uint32_t x_hotspot;
+ uint32_t y_hotspot;
+
+ /*
+ * This parameter indicates whether HW cursor should be enabled
+ */
+ bool enable;
+
+};
+
+struct dc_cursor_mi_param {
+ unsigned int pixel_clk_khz;
+ unsigned int ref_clk_khz;
+ unsigned int viewport_x_start;
+ unsigned int viewport_width;
+ struct fixed31_32 h_scale_ratio;
+};
+
+/* IPP related types */
+
+enum {
+ GAMMA_RGB_256_ENTRIES = 256,
+ GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
+ GAMMA_MAX_ENTRIES = 1024
+};
+
+enum dc_gamma_type {
+ GAMMA_RGB_256 = 1,
+ GAMMA_RGB_FLOAT_1024 = 2
+};
+
+struct dc_gamma {
+ struct kref refcount;
+ enum dc_gamma_type type;
+ unsigned int num_entries;
+
+ struct dc_gamma_entries {
+ struct fixed31_32 red[GAMMA_MAX_ENTRIES];
+ struct fixed31_32 green[GAMMA_MAX_ENTRIES];
+ struct fixed31_32 blue[GAMMA_MAX_ENTRIES];
+ } entries;
+
+ /* private to DC core */
+ struct dc_context *ctx;
+};
+
+/* Used by both ipp amd opp functions*/
+/* TODO: to be consolidated with enum color_space */
+
+/*
+ * This enum is for programming CURSOR_MODE register field. What this register
+ * should be programmed to depends on OS requested cursor shape flags and what
+ * we stored in the cursor surface.
+ */
+enum dc_cursor_color_format {
+ CURSOR_MODE_MONO,
+ CURSOR_MODE_COLOR_1BIT_AND,
+ CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,
+ CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA
+};
+
+/*
+ * This is all the parameters required by DAL in order to update the cursor
+ * attributes, including the new cursor image surface address, size, hotspot
+ * location, color format, etc.
+ */
+
+union dc_cursor_attribute_flags {
+ struct {
+ uint32_t ENABLE_MAGNIFICATION:1;
+ uint32_t INVERSE_TRANSPARENT_CLAMPING:1;
+ uint32_t HORIZONTAL_MIRROR:1;
+ uint32_t VERTICAL_MIRROR:1;
+ uint32_t INVERT_PIXEL_DATA:1;
+ uint32_t ZERO_EXPANSION:1;
+ uint32_t MIN_MAX_INVERT:1;
+ uint32_t RESERVED:25;
+ } bits;
+ uint32_t value;
+};
+
+struct dc_cursor_attributes {
+ PHYSICAL_ADDRESS_LOC address;
+ uint32_t pitch;
+
+ /* Width and height should correspond to cursor surface width x heigh */
+ uint32_t width;
+ uint32_t height;
+
+ enum dc_cursor_color_format color_format;
+
+ /* In case we support HW Cursor rotation in the future */
+ enum dc_rotation_angle rotation_angle;
+
+ union dc_cursor_attribute_flags attribute_flags;
+};
+
+/* OPP */
+
+enum dc_color_space {
+ COLOR_SPACE_UNKNOWN,
+ COLOR_SPACE_SRGB,
+ COLOR_SPACE_SRGB_LIMITED,
+ COLOR_SPACE_YCBCR601,
+ COLOR_SPACE_YCBCR709,
+ COLOR_SPACE_YCBCR601_LIMITED,
+ COLOR_SPACE_YCBCR709_LIMITED,
+ COLOR_SPACE_2020_RGB_FULLRANGE,
+ COLOR_SPACE_2020_RGB_LIMITEDRANGE,
+ COLOR_SPACE_2020_YCBCR,
+ COLOR_SPACE_ADOBERGB,
+};
+
+enum dc_dither_option {
+ DITHER_OPTION_DEFAULT,
+ DITHER_OPTION_DISABLE,
+ DITHER_OPTION_FM6,
+ DITHER_OPTION_FM8,
+ DITHER_OPTION_FM10,
+ DITHER_OPTION_SPATIAL6_FRAME_RANDOM,
+ DITHER_OPTION_SPATIAL8_FRAME_RANDOM,
+ DITHER_OPTION_SPATIAL10_FRAME_RANDOM,
+ DITHER_OPTION_SPATIAL6,
+ DITHER_OPTION_SPATIAL8,
+ DITHER_OPTION_SPATIAL10,
+ DITHER_OPTION_TRUN6,
+ DITHER_OPTION_TRUN8,
+ DITHER_OPTION_TRUN10,
+ DITHER_OPTION_TRUN10_SPATIAL8,
+ DITHER_OPTION_TRUN10_SPATIAL6,
+ DITHER_OPTION_TRUN10_FM8,
+ DITHER_OPTION_TRUN10_FM6,
+ DITHER_OPTION_TRUN10_SPATIAL8_FM6,
+ DITHER_OPTION_SPATIAL10_FM8,
+ DITHER_OPTION_SPATIAL10_FM6,
+ DITHER_OPTION_TRUN8_SPATIAL6,
+ DITHER_OPTION_TRUN8_FM6,
+ DITHER_OPTION_SPATIAL8_FM6,
+ DITHER_OPTION_MAX = DITHER_OPTION_SPATIAL8_FM6,
+ DITHER_OPTION_INVALID
+};
+
+enum dc_quantization_range {
+ QUANTIZATION_RANGE_UNKNOWN,
+ QUANTIZATION_RANGE_FULL,
+ QUANTIZATION_RANGE_LIMITED
+};
+
+/* XFM */
+
+/* used in struct dc_plane_state */
+struct scaling_taps {
+ uint32_t v_taps;
+ uint32_t h_taps;
+ uint32_t v_taps_c;
+ uint32_t h_taps_c;
+};
+
+enum dc_timing_standard {
+ TIMING_STANDARD_UNDEFINED,
+ TIMING_STANDARD_DMT,
+ TIMING_STANDARD_GTF,
+ TIMING_STANDARD_CVT,
+ TIMING_STANDARD_CVT_RB,
+ TIMING_STANDARD_CEA770,
+ TIMING_STANDARD_CEA861,
+ TIMING_STANDARD_HDMI,
+ TIMING_STANDARD_TV_NTSC,
+ TIMING_STANDARD_TV_NTSC_J,
+ TIMING_STANDARD_TV_PAL,
+ TIMING_STANDARD_TV_PAL_M,
+ TIMING_STANDARD_TV_PAL_CN,
+ TIMING_STANDARD_TV_SECAM,
+ TIMING_STANDARD_EXPLICIT,
+ /*!< For explicit timings from EDID, VBIOS, etc.*/
+ TIMING_STANDARD_USER_OVERRIDE,
+ /*!< For mode timing override by user*/
+ TIMING_STANDARD_MAX
+};
+
+
+
+enum dc_color_depth {
+ COLOR_DEPTH_UNDEFINED,
+ COLOR_DEPTH_666,
+ COLOR_DEPTH_888,
+ COLOR_DEPTH_101010,
+ COLOR_DEPTH_121212,
+ COLOR_DEPTH_141414,
+ COLOR_DEPTH_161616,
+ COLOR_DEPTH_COUNT
+};
+
+enum dc_pixel_encoding {
+ PIXEL_ENCODING_UNDEFINED,
+ PIXEL_ENCODING_RGB,
+ PIXEL_ENCODING_YCBCR422,
+ PIXEL_ENCODING_YCBCR444,
+ PIXEL_ENCODING_YCBCR420,
+ PIXEL_ENCODING_COUNT
+};
+
+enum dc_aspect_ratio {
+ ASPECT_RATIO_NO_DATA,
+ ASPECT_RATIO_4_3,
+ ASPECT_RATIO_16_9,
+ ASPECT_RATIO_64_27,
+ ASPECT_RATIO_256_135,
+ ASPECT_RATIO_FUTURE
+};
+
+enum scanning_type {
+ SCANNING_TYPE_NODATA = 0,
+ SCANNING_TYPE_OVERSCAN,
+ SCANNING_TYPE_UNDERSCAN,
+ SCANNING_TYPE_FUTURE,
+ SCANNING_TYPE_UNDEFINED
+};
+
+struct dc_crtc_timing_flags {
+ uint32_t INTERLACE :1;
+ uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+ it is positive polarity --reversed with dal1 or video bios define*/
+ uint32_t VSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+ it is positive polarity --reversed with dal1 or video bios define*/
+
+ uint32_t HORZ_COUNT_BY_TWO:1;
+
+ uint32_t EXCLUSIVE_3D :1; /* if this bit set,
+ timing can be driven in 3D format only
+ and there is no corresponding 2D timing*/
+ uint32_t RIGHT_EYE_3D_POLARITY :1; /* 1 - means right eye polarity
+ (right eye = '1', left eye = '0') */
+ uint32_t SUB_SAMPLE_3D :1; /* 1 - means left/right images subsampled
+ when mixed into 3D image. 0 - means summation (3D timing is doubled)*/
+ uint32_t USE_IN_3D_VIEW_ONLY :1; /* Do not use this timing in 2D View,
+ because corresponding 2D timing also present in the list*/
+ uint32_t STEREO_3D_PREFERENCE :1; /* Means this is 2D timing
+ and we want to match priority of corresponding 3D timing*/
+ uint32_t Y_ONLY :1;
+
+ uint32_t YCBCR420 :1; /* TODO: shouldn't need this flag, should be a separate pixel format */
+ uint32_t DTD_COUNTER :5; /* values 1 to 16 */
+
+ uint32_t FORCE_HDR :1;
+
+ /* HDMI 2.0 - Support scrambling for TMDS character
+ * rates less than or equal to 340Mcsc */
+ uint32_t LTE_340MCSC_SCRAMBLE:1;
+
+};
+
+enum dc_timing_3d_format {
+ TIMING_3D_FORMAT_NONE,
+ TIMING_3D_FORMAT_FRAME_ALTERNATE, /* No stereosync at all*/
+ TIMING_3D_FORMAT_INBAND_FA, /* Inband Frame Alternate (DVI/DP)*/
+ TIMING_3D_FORMAT_DP_HDMI_INBAND_FA, /* Inband FA to HDMI Frame Pack*/
+ /* for active DP-HDMI dongle*/
+ TIMING_3D_FORMAT_SIDEBAND_FA, /* Sideband Frame Alternate (eDP)*/
+ TIMING_3D_FORMAT_HW_FRAME_PACKING,
+ TIMING_3D_FORMAT_SW_FRAME_PACKING,
+ TIMING_3D_FORMAT_ROW_INTERLEAVE,
+ TIMING_3D_FORMAT_COLUMN_INTERLEAVE,
+ TIMING_3D_FORMAT_PIXEL_INTERLEAVE,
+ TIMING_3D_FORMAT_SIDE_BY_SIDE,
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM,
+ TIMING_3D_FORMAT_SBS_SW_PACKED,
+ /* Side-by-side, packed by application/driver into 2D frame*/
+ TIMING_3D_FORMAT_TB_SW_PACKED,
+ /* Top-and-bottom, packed by application/driver into 2D frame*/
+
+ TIMING_3D_FORMAT_MAX,
+};
+
+
+struct dc_crtc_timing {
+
+ uint32_t h_total;
+ uint32_t h_border_left;
+ uint32_t h_addressable;
+ uint32_t h_border_right;
+ uint32_t h_front_porch;
+ uint32_t h_sync_width;
+
+ uint32_t v_total;
+ uint32_t v_border_top;
+ uint32_t v_addressable;
+ uint32_t v_border_bottom;
+ uint32_t v_front_porch;
+ uint32_t v_sync_width;
+
+ uint32_t pix_clk_khz;
+
+ uint32_t vic;
+ uint32_t hdmi_vic;
+ enum dc_timing_3d_format timing_3d_format;
+ enum dc_color_depth display_color_depth;
+ enum dc_pixel_encoding pixel_encoding;
+ enum dc_aspect_ratio aspect_ratio;
+ enum scanning_type scan_type;
+
+ struct dc_crtc_timing_flags flags;
+};
+
+#define MAX_TG_COLOR_VALUE 0x3FF
+struct tg_color {
+ /* Maximum 10 bits color value */
+ uint16_t color_r_cr;
+ uint16_t color_g_y;
+ uint16_t color_b_cb;
+};
+
+#endif /* DC_HW_TYPES_H */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
new file mode 100644
index 000000000000..a8698e399111
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -0,0 +1,652 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_TYPES_H_
+#define DC_TYPES_H_
+
+#include "fixed32_32.h"
+#include "fixed31_32.h"
+#include "irq_types.h"
+#include "dc_dp_types.h"
+#include "dc_hw_types.h"
+#include "dal_types.h"
+#include "grph_object_defs.h"
+
+/* forward declarations */
+struct dc_plane_state;
+struct dc_stream_state;
+struct dc_link;
+struct dc_sink;
+struct dal;
+
+/********************************
+ * Environment definitions
+ ********************************/
+enum dce_environment {
+ DCE_ENV_PRODUCTION_DRV = 0,
+ /* Emulation on FPGA, in "Maximus" System.
+ * This environment enforces that *only* DC registers accessed.
+ * (access to non-DC registers will hang FPGA) */
+ DCE_ENV_FPGA_MAXIMUS,
+ /* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
+ * requirements of Diagnostics team. */
+ DCE_ENV_DIAG
+};
+
+/* Note: use these macro definitions instead of direct comparison! */
+#define IS_FPGA_MAXIMUS_DC(dce_environment) \
+ (dce_environment == DCE_ENV_FPGA_MAXIMUS)
+
+#define IS_DIAG_DC(dce_environment) \
+ (IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG))
+
+struct hw_asic_id {
+ uint32_t chip_id;
+ uint32_t chip_family;
+ uint32_t pci_revision_id;
+ uint32_t hw_internal_rev;
+ uint32_t vram_type;
+ uint32_t vram_width;
+ uint32_t feature_flags;
+ uint32_t fake_paths_num;
+ void *atombios_base_address;
+};
+
+struct dc_context {
+ struct dc *dc;
+
+ void *driver_context; /* e.g. amdgpu_device */
+
+ struct dal_logger *logger;
+ void *cgs_device;
+
+ enum dce_environment dce_environment;
+ struct hw_asic_id asic_id;
+
+ /* todo: below should probably move to dc. to facilitate removal
+ * of AS we will store these here
+ */
+ enum dce_version dce_version;
+ struct dc_bios *dc_bios;
+ bool created_bios;
+ struct gpio_service *gpio_service;
+ struct i2caux *i2caux;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint64_t fbc_gpu_addr;
+#endif
+};
+
+
+#define MAX_EDID_BUFFER_SIZE 512
+#define EDID_BLOCK_SIZE 128
+#define MAX_SURFACE_NUM 4
+#define NUM_PIXEL_FORMATS 10
+
+#include "dc_ddc_types.h"
+
+enum tiling_mode {
+ TILING_MODE_INVALID,
+ TILING_MODE_LINEAR,
+ TILING_MODE_TILED,
+ TILING_MODE_COUNT
+};
+
+enum view_3d_format {
+ VIEW_3D_FORMAT_NONE = 0,
+ VIEW_3D_FORMAT_FRAME_SEQUENTIAL,
+ VIEW_3D_FORMAT_SIDE_BY_SIDE,
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM,
+ VIEW_3D_FORMAT_COUNT,
+ VIEW_3D_FORMAT_FIRST = VIEW_3D_FORMAT_FRAME_SEQUENTIAL
+};
+
+enum plane_stereo_format {
+ PLANE_STEREO_FORMAT_NONE = 0,
+ PLANE_STEREO_FORMAT_SIDE_BY_SIDE = 1,
+ PLANE_STEREO_FORMAT_TOP_AND_BOTTOM = 2,
+ PLANE_STEREO_FORMAT_FRAME_ALTERNATE = 3,
+ PLANE_STEREO_FORMAT_ROW_INTERLEAVED = 5,
+ PLANE_STEREO_FORMAT_COLUMN_INTERLEAVED = 6,
+ PLANE_STEREO_FORMAT_CHECKER_BOARD = 7
+};
+
+/* TODO: Find way to calculate number of bits
+ * Please increase if pixel_format enum increases
+ * num from PIXEL_FORMAT_INDEX8 to PIXEL_FORMAT_444BPP32
+ */
+
+enum dc_edid_connector_type {
+ EDID_CONNECTOR_UNKNOWN = 0,
+ EDID_CONNECTOR_ANALOG = 1,
+ EDID_CONNECTOR_DIGITAL = 10,
+ EDID_CONNECTOR_DVI = 11,
+ EDID_CONNECTOR_HDMIA = 12,
+ EDID_CONNECTOR_MDDI = 14,
+ EDID_CONNECTOR_DISPLAYPORT = 15
+};
+
+enum dc_edid_status {
+ EDID_OK,
+ EDID_BAD_INPUT,
+ EDID_NO_RESPONSE,
+ EDID_BAD_CHECKSUM,
+ EDID_THE_SAME,
+};
+
+/* audio capability from EDID*/
+struct dc_cea_audio_mode {
+ uint8_t format_code; /* ucData[0] [6:3]*/
+ uint8_t channel_count; /* ucData[0] [2:0]*/
+ uint8_t sample_rate; /* ucData[1]*/
+ union {
+ uint8_t sample_size; /* for LPCM*/
+ /* for Audio Formats 2-8 (Max bit rate divided by 8 kHz)*/
+ uint8_t max_bit_rate;
+ uint8_t audio_codec_vendor_specific; /* for Audio Formats 9-15*/
+ };
+};
+
+struct dc_edid {
+ uint32_t length;
+ uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+};
+
+/* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
+ * is used. In this case we assume speaker location are: front left, front
+ * right and front center. */
+#define DEFAULT_SPEAKER_LOCATION 5
+
+#define DC_MAX_AUDIO_DESC_COUNT 16
+
+#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
+
+union display_content_support {
+ unsigned int raw;
+ struct {
+ unsigned int valid_content_type :1;
+ unsigned int game_content :1;
+ unsigned int cinema_content :1;
+ unsigned int photo_content :1;
+ unsigned int graphics_content :1;
+ unsigned int reserved :27;
+ } bits;
+};
+
+struct dc_edid_caps {
+ /* sink identification */
+ uint16_t manufacturer_id;
+ uint16_t product_id;
+ uint32_t serial_number;
+ uint8_t manufacture_week;
+ uint8_t manufacture_year;
+ uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+
+ /* audio caps */
+ uint8_t speaker_flags;
+ uint32_t audio_mode_count;
+ struct dc_cea_audio_mode audio_modes[DC_MAX_AUDIO_DESC_COUNT];
+ uint32_t audio_latency;
+ uint32_t video_latency;
+
+ union display_content_support content_support;
+
+ uint8_t qs_bit;
+ uint8_t qy_bit;
+
+ /*HDMI 2.0 caps*/
+ bool lte_340mcsc_scramble;
+
+ bool edid_hdmi;
+};
+
+struct view {
+ uint32_t width;
+ uint32_t height;
+};
+
+struct dc_mode_flags {
+ /* note: part of refresh rate flag*/
+ uint32_t INTERLACE :1;
+ /* native display timing*/
+ uint32_t NATIVE :1;
+ /* preferred is the recommended mode, one per display */
+ uint32_t PREFERRED :1;
+ /* true if this mode should use reduced blanking timings
+ *_not_ related to the Reduced Blanking adjustment*/
+ uint32_t REDUCED_BLANKING :1;
+ /* note: part of refreshrate flag*/
+ uint32_t VIDEO_OPTIMIZED_RATE :1;
+ /* should be reported to upper layers as mode_flags*/
+ uint32_t PACKED_PIXEL_FORMAT :1;
+ /*< preferred view*/
+ uint32_t PREFERRED_VIEW :1;
+ /* this timing should be used only in tiled mode*/
+ uint32_t TILED_MODE :1;
+ uint32_t DSE_MODE :1;
+ /* Refresh rate divider when Miracast sink is using a
+ different rate than the output display device
+ Must be zero for wired displays and non-zero for
+ Miracast displays*/
+ uint32_t MIRACAST_REFRESH_DIVIDER;
+};
+
+
+enum dc_timing_source {
+ TIMING_SOURCE_UNDEFINED,
+
+ /* explicitly specifed by user, most important*/
+ TIMING_SOURCE_USER_FORCED,
+ TIMING_SOURCE_USER_OVERRIDE,
+ TIMING_SOURCE_CUSTOM,
+ TIMING_SOURCE_EXPLICIT,
+
+ /* explicitly specified by the display device, more important*/
+ TIMING_SOURCE_EDID_CEA_SVD_3D,
+ TIMING_SOURCE_EDID_CEA_SVD_PREFERRED,
+ TIMING_SOURCE_EDID_CEA_SVD_420,
+ TIMING_SOURCE_EDID_DETAILED,
+ TIMING_SOURCE_EDID_ESTABLISHED,
+ TIMING_SOURCE_EDID_STANDARD,
+ TIMING_SOURCE_EDID_CEA_SVD,
+ TIMING_SOURCE_EDID_CVT_3BYTE,
+ TIMING_SOURCE_EDID_4BYTE,
+ TIMING_SOURCE_VBIOS,
+ TIMING_SOURCE_CV,
+ TIMING_SOURCE_TV,
+ TIMING_SOURCE_HDMI_VIC,
+
+ /* implicitly specified by display device, still safe but less important*/
+ TIMING_SOURCE_DEFAULT,
+
+ /* only used for custom base modes */
+ TIMING_SOURCE_CUSTOM_BASE,
+
+ /* these timing might not work, least important*/
+ TIMING_SOURCE_RANGELIMIT,
+ TIMING_SOURCE_OS_FORCED,
+ TIMING_SOURCE_IMPLICIT,
+
+ /* only used by default mode list*/
+ TIMING_SOURCE_BASICMODE,
+
+ TIMING_SOURCE_COUNT
+};
+
+
+struct stereo_3d_features {
+ bool supported ;
+ bool allTimings ;
+ bool cloneMode ;
+ bool scaling ;
+ bool singleFrameSWPacked;
+};
+
+enum dc_timing_support_method {
+ TIMING_SUPPORT_METHOD_UNDEFINED,
+ TIMING_SUPPORT_METHOD_EXPLICIT,
+ TIMING_SUPPORT_METHOD_IMPLICIT,
+ TIMING_SUPPORT_METHOD_NATIVE
+};
+
+struct dc_mode_info {
+ uint32_t pixel_width;
+ uint32_t pixel_height;
+ uint32_t field_rate;
+ /* Vertical refresh rate for progressive modes.
+ * Field rate for interlaced modes.*/
+
+ enum dc_timing_standard timing_standard;
+ enum dc_timing_source timing_source;
+ struct dc_mode_flags flags;
+};
+
+enum dc_power_state {
+ DC_POWER_STATE_ON = 1,
+ DC_POWER_STATE_STANDBY,
+ DC_POWER_STATE_SUSPEND,
+ DC_POWER_STATE_OFF
+};
+
+/* DC PowerStates */
+enum dc_video_power_state {
+ DC_VIDEO_POWER_UNSPECIFIED = 0,
+ DC_VIDEO_POWER_ON = 1,
+ DC_VIDEO_POWER_STANDBY,
+ DC_VIDEO_POWER_SUSPEND,
+ DC_VIDEO_POWER_OFF,
+ DC_VIDEO_POWER_HIBERNATE,
+ DC_VIDEO_POWER_SHUTDOWN,
+ DC_VIDEO_POWER_ULPS, /* BACO or Ultra-Light-Power-State */
+ DC_VIDEO_POWER_AFTER_RESET,
+ DC_VIDEO_POWER_MAXIMUM
+};
+
+enum dc_acpi_cm_power_state {
+ DC_ACPI_CM_POWER_STATE_D0 = 1,
+ DC_ACPI_CM_POWER_STATE_D1 = 2,
+ DC_ACPI_CM_POWER_STATE_D2 = 4,
+ DC_ACPI_CM_POWER_STATE_D3 = 8
+};
+
+enum dc_connection_type {
+ dc_connection_none,
+ dc_connection_single,
+ dc_connection_mst_branch,
+ dc_connection_active_dongle
+};
+
+struct dc_csc_adjustments {
+ struct fixed31_32 contrast;
+ struct fixed31_32 saturation;
+ struct fixed31_32 brightness;
+ struct fixed31_32 hue;
+};
+
+enum {
+ MAX_LANES = 2,
+ MAX_COFUNC_PATH = 6,
+ LAYER_INDEX_PRIMARY = -1,
+};
+
+enum dpcd_downstream_port_max_bpc {
+ DOWN_STREAM_MAX_8BPC = 0,
+ DOWN_STREAM_MAX_10BPC,
+ DOWN_STREAM_MAX_12BPC,
+ DOWN_STREAM_MAX_16BPC
+};
+struct dc_dongle_caps {
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ bool extendedCapValid;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ bool is_dp_hdmi_s3d_converter;
+ bool is_dp_hdmi_ycbcr422_pass_through;
+ bool is_dp_hdmi_ycbcr420_pass_through;
+ bool is_dp_hdmi_ycbcr422_converter;
+ bool is_dp_hdmi_ycbcr420_converter;
+ uint32_t dp_hdmi_max_bpc;
+ uint32_t dp_hdmi_max_pixel_clk;
+};
+/* Scaling format */
+enum scaling_transformation {
+ SCALING_TRANSFORMATION_UNINITIALIZED,
+ SCALING_TRANSFORMATION_IDENTITY = 0x0001,
+ SCALING_TRANSFORMATION_CENTER_TIMING = 0x0002,
+ SCALING_TRANSFORMATION_FULL_SCREEN_SCALE = 0x0004,
+ SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE = 0x0008,
+ SCALING_TRANSFORMATION_DAL_DECIDE = 0x0010,
+ SCALING_TRANSFORMATION_INVALID = 0x80000000,
+
+ /* Flag the first and last */
+ SCALING_TRANSFORMATION_BEGING = SCALING_TRANSFORMATION_IDENTITY,
+ SCALING_TRANSFORMATION_END =
+ SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE
+};
+
+enum display_content_type {
+ DISPLAY_CONTENT_TYPE_NO_DATA = 0,
+ DISPLAY_CONTENT_TYPE_GRAPHICS = 1,
+ DISPLAY_CONTENT_TYPE_PHOTO = 2,
+ DISPLAY_CONTENT_TYPE_CINEMA = 4,
+ DISPLAY_CONTENT_TYPE_GAME = 8
+};
+
+/* audio*/
+
+union audio_sample_rates {
+ struct sample_rates {
+ uint8_t RATE_32:1;
+ uint8_t RATE_44_1:1;
+ uint8_t RATE_48:1;
+ uint8_t RATE_88_2:1;
+ uint8_t RATE_96:1;
+ uint8_t RATE_176_4:1;
+ uint8_t RATE_192:1;
+ } rate;
+
+ uint8_t all;
+};
+
+struct audio_speaker_flags {
+ uint32_t FL_FR:1;
+ uint32_t LFE:1;
+ uint32_t FC:1;
+ uint32_t RL_RR:1;
+ uint32_t RC:1;
+ uint32_t FLC_FRC:1;
+ uint32_t RLC_RRC:1;
+ uint32_t SUPPORT_AI:1;
+};
+
+struct audio_speaker_info {
+ uint32_t ALLSPEAKERS:7;
+ uint32_t SUPPORT_AI:1;
+};
+
+
+struct audio_info_flags {
+
+ union {
+
+ struct audio_speaker_flags speaker_flags;
+ struct audio_speaker_info info;
+
+ uint8_t all;
+ };
+};
+
+enum audio_format_code {
+ AUDIO_FORMAT_CODE_FIRST = 1,
+ AUDIO_FORMAT_CODE_LINEARPCM = AUDIO_FORMAT_CODE_FIRST,
+
+ AUDIO_FORMAT_CODE_AC3,
+ /*Layers 1 & 2 */
+ AUDIO_FORMAT_CODE_MPEG1,
+ /*MPEG1 Layer 3 */
+ AUDIO_FORMAT_CODE_MP3,
+ /*multichannel */
+ AUDIO_FORMAT_CODE_MPEG2,
+ AUDIO_FORMAT_CODE_AAC,
+ AUDIO_FORMAT_CODE_DTS,
+ AUDIO_FORMAT_CODE_ATRAC,
+ AUDIO_FORMAT_CODE_1BITAUDIO,
+ AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS,
+ AUDIO_FORMAT_CODE_DTS_HD,
+ AUDIO_FORMAT_CODE_MAT_MLP,
+ AUDIO_FORMAT_CODE_DST,
+ AUDIO_FORMAT_CODE_WMAPRO,
+ AUDIO_FORMAT_CODE_LAST,
+ AUDIO_FORMAT_CODE_COUNT =
+ AUDIO_FORMAT_CODE_LAST - AUDIO_FORMAT_CODE_FIRST
+};
+
+struct audio_mode {
+ /* ucData[0] [6:3] */
+ enum audio_format_code format_code;
+ /* ucData[0] [2:0] */
+ uint8_t channel_count;
+ /* ucData[1] */
+ union audio_sample_rates sample_rates;
+ union {
+ /* for LPCM */
+ uint8_t sample_size;
+ /* for Audio Formats 2-8 (Max bit rate divided by 8 kHz) */
+ uint8_t max_bit_rate;
+ /* for Audio Formats 9-15 */
+ uint8_t vendor_specific;
+ };
+};
+
+struct audio_info {
+ struct audio_info_flags flags;
+ uint32_t video_latency;
+ uint32_t audio_latency;
+ uint32_t display_index;
+ uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+ uint32_t manufacture_id;
+ uint32_t product_id;
+ /* PortID used for ContainerID when defined */
+ uint32_t port_id[2];
+ uint32_t mode_count;
+ /* this field must be last in this struct */
+ struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
+};
+
+struct freesync_context {
+ bool supported;
+ bool enabled;
+ bool active;
+
+ unsigned int min_refresh_in_micro_hz;
+ unsigned int nominal_refresh_in_micro_hz;
+};
+
+struct psr_config {
+ unsigned char psr_version;
+ unsigned int psr_rfb_setup_time;
+ bool psr_exit_link_training_required;
+
+ bool psr_frame_capture_indication_req;
+ unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
+union dmcu_psr_level {
+ struct {
+ unsigned int SKIP_CRC:1;
+ unsigned int SKIP_DP_VID_STREAM_DISABLE:1;
+ unsigned int SKIP_PHY_POWER_DOWN:1;
+ unsigned int SKIP_AUX_ACK_CHECK:1;
+ unsigned int SKIP_CRTC_DISABLE:1;
+ unsigned int SKIP_AUX_RFB_CAPTURE_CHECK:1;
+ unsigned int SKIP_SMU_NOTIFICATION:1;
+ unsigned int SKIP_AUTO_STATE_ADVANCE:1;
+ unsigned int DISABLE_PSR_ENTRY_ABORT:1;
+ unsigned int SKIP_SINGLE_OTG_DISABLE:1;
+ unsigned int RESERVED:22;
+ } bits;
+ unsigned int u32all;
+};
+
+enum physical_phy_id {
+ PHYLD_0,
+ PHYLD_1,
+ PHYLD_2,
+ PHYLD_3,
+ PHYLD_4,
+ PHYLD_5,
+ PHYLD_6,
+ PHYLD_7,
+ PHYLD_8,
+ PHYLD_9,
+ PHYLD_COUNT,
+ PHYLD_UNKNOWN = (-1L)
+};
+
+enum phy_type {
+ PHY_TYPE_UNKNOWN = 1,
+ PHY_TYPE_PCIE_PHY = 2,
+ PHY_TYPE_UNIPHY = 3,
+};
+
+struct psr_context {
+ /* ddc line */
+ enum channel_id channel;
+ /* Transmitter id */
+ enum transmitter transmitterId;
+ /* Engine Id is used for Dig Be source select */
+ enum engine_id engineId;
+ /* Controller Id used for Dig Fe source select */
+ enum controller_id controllerId;
+ /* Pcie or Uniphy */
+ enum phy_type phyType;
+ /* Physical PHY Id used by SMU interpretation */
+ enum physical_phy_id smuPhyId;
+ /* Vertical total pixels from crtc timing.
+ * This is used for static screen detection.
+ * ie. If we want to detect half a frame,
+ * we use this to determine the hyst lines.
+ */
+ unsigned int crtcTimingVerticalTotal;
+ /* PSR supported from panel capabilities and
+ * current display configuration
+ */
+ bool psrSupportedDisplayConfig;
+ /* Whether fast link training is supported by the panel */
+ bool psrExitLinkTrainingRequired;
+ /* If RFB setup time is greater than the total VBLANK time,
+ * it is not possible for the sink to capture the video frame
+ * in the same frame the SDP is sent. In this case,
+ * the frame capture indication bit should be set and an extra
+ * static frame should be transmitted to the sink.
+ */
+ bool psrFrameCaptureIndicationReq;
+ /* Set the last possible line SDP may be transmitted without violating
+ * the RFB setup time or entering the active video frame.
+ */
+ unsigned int sdpTransmitLineNumDeadline;
+ /* The VSync rate in Hz used to calculate the
+ * step size for smooth brightness feature
+ */
+ unsigned int vsyncRateHz;
+ unsigned int skipPsrWaitForPllLock;
+ unsigned int numberOfControllers;
+ /* Unused, for future use. To indicate that first changed frame from
+ * state3 shouldn't result in psr_inactive, but rather to perform
+ * an automatic single frame rfb_update.
+ */
+ bool rfb_update_auto_en;
+ /* Number of frame before entering static screen */
+ unsigned int timehyst_frames;
+ /* Partial frames before entering static screen */
+ unsigned int hyst_lines;
+ /* # of repeated AUX transaction attempts to make before
+ * indicating failure to the driver
+ */
+ unsigned int aux_repeats;
+ /* Controls hw blocks to power down during PSR active state */
+ union dmcu_psr_level psr_level;
+ /* Controls additional delay after remote frame capture before
+ * continuing powerd own
+ */
+ unsigned int frame_delay;
+};
+
+struct colorspace_transform {
+ struct fixed31_32 matrix[12];
+ bool enable_remap;
+};
+
+struct csc_transform {
+ uint16_t matrix[12];
+ bool enable_adjustment;
+};
+
+enum i2c_mot_mode {
+ I2C_MOT_UNDEF,
+ I2C_MOT_TRUE,
+ I2C_MOT_FALSE
+};
+
+#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
new file mode 100644
index 000000000000..8abec0bed379
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for common 'dce' logic
+# HW object file under this folder follow similar pattern for HW programming
+# - register offset and/or shift + mask stored in the dec_hw struct
+# - register programming through common macros that look up register
+# offset/shift/mask stored in dce_hw struct
+
+DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+
+
+AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
new file mode 100644
index 000000000000..0e0336c5af4e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_abm.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "dc.h"
+
+#include "atom.h"
+
+
+#define TO_DCE_ABM(abm)\
+ container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+ (abm_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ abm_dce->abm_shift->field_name, abm_dce->abm_mask->field_name
+
+#define CTX \
+ abm_dce->base.ctx
+
+#define MCP_ABM_LEVEL_SET 0x65
+#define MCP_ABM_PIPE_SET 0x66
+#define MCP_BL_SET 0x67
+
+#define MCP_DISABLE_ABM_IMMEDIATELY 255
+
+struct abm_backlight_registers {
+ unsigned int BL_PWM_CNTL;
+ unsigned int BL_PWM_CNTL2;
+ unsigned int BL_PWM_PERIOD_CNTL;
+ unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+/* registers setting needs to be save and restored used at InitBacklight */
+static struct abm_backlight_registers stored_backlight_registers = {0};
+
+
+static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
+{
+ uint64_t current_backlight;
+ uint32_t round_result;
+ uint32_t pwm_period_cntl, bl_period, bl_int_count;
+ uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+ uint32_t bl_period_mask, bl_pwm_mask;
+
+ pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
+
+ bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
+ REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
+
+ if (bl_int_count == 0)
+ bl_int_count = 16;
+
+ bl_period_mask = (1 << bl_int_count) - 1;
+ bl_period &= bl_period_mask;
+
+ bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
+
+ if (fractional_duty_cycle_en == 0)
+ bl_pwm &= bl_pwm_mask;
+ else
+ bl_pwm &= 0xFFFF;
+
+ current_backlight = bl_pwm << (1 + bl_int_count);
+
+ if (bl_period == 0)
+ bl_period = 0xFFFF;
+
+ current_backlight = div_u64(current_backlight, bl_period);
+ current_backlight = (current_backlight + 1) >> 1;
+
+ current_backlight = (uint64_t)(current_backlight) * bl_period;
+
+ round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+
+ round_result = (round_result >> (bl_int_count-1)) & 1;
+
+ current_backlight >>= bl_int_count;
+ current_backlight += round_result;
+
+ return (uint32_t)(current_backlight);
+}
+
+static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
+{
+ uint32_t backlight_24bit;
+ uint32_t backlight_17bit;
+ uint32_t backlight_16bit;
+ uint32_t masked_pwm_period;
+ uint8_t rounding_bit;
+ uint8_t bit_count;
+ uint64_t active_duty_cycle;
+ uint32_t pwm_period_bitcnt;
+
+ /*
+ * 1. Convert 8-bit value to 17 bit U1.16 format
+ * (1 integer, 16 fractional bits)
+ */
+
+ /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
+ * effectively multiplying value by 256/255
+ * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
+ */
+ backlight_24bit = level * 0x10101;
+
+ /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
+ * used for rounding, take most significant bit of fraction for
+ * rounding, e.g. for 0xEFEFEF, rounding bit is 1
+ */
+ rounding_bit = (backlight_24bit >> 7) & 1;
+
+ /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
+ * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
+ */
+ backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
+
+ /*
+ * 2. Find 16 bit backlight active duty cycle, where 0 <= backlight
+ * active duty cycle <= backlight period
+ */
+
+ /* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+ */
+ REG_GET_2(BL_PWM_PERIOD_CNTL,
+ BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
+ BL_PWM_PERIOD, &masked_pwm_period);
+
+ if (pwm_period_bitcnt == 0)
+ bit_count = 16;
+ else
+ bit_count = pwm_period_bitcnt;
+
+ /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+ masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
+
+ /* 2.2 Calculate integer active duty cycle required upper 16 bits
+ * contain integer component, lower 16 bits contain fractional component
+ * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+ */
+ active_duty_cycle = backlight_17bit * masked_pwm_period;
+
+ /* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+ * components shift by bitCount then mask 16 bits and add rounding bit
+ * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+ */
+ backlight_16bit = active_duty_cycle >> bit_count;
+ backlight_16bit &= 0xFFFF;
+ backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+ /*
+ * 3. Program register with updated value
+ */
+
+ /* 3.1 Lock group 2 backlight registers */
+
+ REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
+ BL_PWM_GRP1_REG_LOCK, 1);
+
+ // 3.2 Write new active duty cycle
+ REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+ /* 3.3 Unlock group 2 backlight registers */
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ /* 5.4.4 Wait for pending bit to be cleared */
+ REG_WAIT(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
+ 1, 10000);
+}
+
+static void dmcu_set_backlight_level(
+ struct dce_abm *abm_dce,
+ uint32_t level,
+ uint32_t frame_ramp,
+ uint32_t controller_id)
+{
+ unsigned int backlight_16_bit = (level * 0x10101) >> 8;
+ unsigned int backlight_17_bit = backlight_16_bit +
+ (((backlight_16_bit & 0x80) >> 7) & 1);
+ uint32_t rampingBoundary = 0xFFFF;
+ uint32_t s2;
+
+ /* set ramping boundary */
+ REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
+
+ /* setDMCUParam_Pipe */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
+ MASTER_COMM_CMD_REG_BYTE1, controller_id);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+ 0, 1, 80000);
+
+ /* setDMCUParam_BL */
+ REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit);
+
+ /* write ramp */
+ if (controller_id == 0)
+ frame_ramp = 0;
+ REG_WRITE(MASTER_COMM_DATA_REG1, frame_ramp);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_BL_SET);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* UpdateRequestedBacklightLevel */
+ s2 = REG_READ(BIOS_SCRATCH_2);
+
+ s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+ ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+ s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+static void dce_abm_init(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int backlight = get_current_backlight_16_bit(abm_dce);
+
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101);
+ REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101);
+
+ REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+ ABM1_HG_NUM_OF_BINS_SEL, 0,
+ ABM1_HG_VMAX_SEL, 1,
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+ REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+ ABM1_IPCSC_COEFF_SEL_R, 2,
+ ABM1_IPCSC_COEFF_SEL_G, 4,
+ ABM1_IPCSC_COEFF_SEL_B, 2);
+
+ REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+ BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+ BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL,
+ BL1_PWM_USER_LEVEL, backlight);
+
+ REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+ REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+}
+
+static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+ return (backlight >> 8);
+}
+
+static bool dce_abm_set_level(struct abm *abm, uint32_t level)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
+ /* setDMCUParam_ABMLevel */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
+ MASTER_COMM_CMD_REG_BYTE2, level);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
+}
+
+static bool dce_abm_immediate_disable(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
+ /* setDMCUParam_ABMLevel */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
+ MASTER_COMM_CMD_REG_BYTE2, MCP_DISABLE_ABM_IMMEDIATELY);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
+}
+
+static bool dce_abm_init_backlight(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ uint32_t value;
+
+ /* It must not be 0, so we have to restore them
+ * Bios bug w/a - period resets to zero,
+ * restoring to cache values which is always correct
+ */
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
+ if (value == 0 || value == 1) {
+ if (stored_backlight_registers.BL_PWM_CNTL != 0) {
+ REG_WRITE(BL_PWM_CNTL,
+ stored_backlight_registers.BL_PWM_CNTL);
+ REG_WRITE(BL_PWM_CNTL2,
+ stored_backlight_registers.BL_PWM_CNTL2);
+ REG_WRITE(BL_PWM_PERIOD_CNTL,
+ stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+ REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
+ BL_PWM_REF_DIV,
+ stored_backlight_registers.
+ LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ } else {
+ /* TODO: Note: This should not really happen since VBIOS
+ * should have initialized PWM registers on boot.
+ */
+ REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
+ REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
+ }
+ } else {
+ stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &stored_backlight_registers.
+ LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ }
+
+ /* Have driver take backlight control
+ * TakeBacklightControl(true)
+ */
+ value = REG_READ(BIOS_SCRATCH_2);
+ value |= ATOM_S2_VRI_BRIGHT_ENABLE;
+ REG_WRITE(BIOS_SCRATCH_2, value);
+
+ /* Enable the backlight output */
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+ /* Unlock group 2 backlight registers */
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ return true;
+}
+
+static bool is_dmcu_initialized(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int dmcu_uc_reset;
+
+ REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
+
+ return !dmcu_uc_reset;
+}
+
+static bool dce_abm_set_backlight_level(
+ struct abm *abm,
+ unsigned int backlight_level,
+ unsigned int frame_ramp,
+ unsigned int controller_id)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+ dm_logger_write(abm->ctx->logger, LOG_BACKLIGHT,
+ "New Backlight level: %d (0x%X)\n",
+ backlight_level, backlight_level);
+
+ /* If DMCU is in reset state, DMCU is uninitialized */
+ if (is_dmcu_initialized(abm))
+ dmcu_set_backlight_level(abm_dce,
+ backlight_level,
+ frame_ramp,
+ controller_id);
+ else
+ driver_set_backlight_level(abm_dce, backlight_level);
+
+ return true;
+}
+
+static const struct abm_funcs dce_funcs = {
+ .abm_init = dce_abm_init,
+ .set_abm_level = dce_abm_set_level,
+ .init_backlight = dce_abm_init_backlight,
+ .set_backlight_level = dce_abm_set_backlight_level,
+ .get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
+ .set_abm_immediate_disable = dce_abm_immediate_disable,
+ .is_dmcu_initialized = is_dmcu_initialized
+};
+
+static void dce_abm_construct(
+ struct dce_abm *abm_dce,
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct abm *base = &abm_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ abm_dce->regs = regs;
+ abm_dce->abm_shift = abm_shift;
+ abm_dce->abm_mask = abm_mask;
+}
+
+struct abm *dce_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+
+ if (abm_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+
+ abm_dce->base.funcs = &dce_funcs;
+
+ return &abm_dce->base;
+}
+
+void dce_abm_destroy(struct abm **abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
+
+ kfree(abm_dce);
+ *abm = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
new file mode 100644
index 000000000000..59e909ec88f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_ABM_H_
+#define _DCE_ABM_H_
+
+#include "abm.h"
+
+#define ABM_COMMON_REG_LIST_DCE_BASE() \
+ SR(BL_PWM_PERIOD_CNTL), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_GRP1_REG_LOCK), \
+ SR(LVTMA_PWRSEQ_REF_DIV), \
+ SR(MASTER_COMM_CNTL_REG), \
+ SR(MASTER_COMM_CMD_REG), \
+ SR(MASTER_COMM_DATA_REG1), \
+ SR(DMCU_STATUS)
+
+#define ABM_DCE110_COMMON_REG_LIST() \
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SR(DC_ABM1_HG_SAMPLE_RATE), \
+ SR(DC_ABM1_LS_SAMPLE_RATE), \
+ SR(BL1_PWM_BL_UPDATE_SAMPLE_RATE), \
+ SR(DC_ABM1_HG_MISC_CTRL), \
+ SR(DC_ABM1_IPCSC_COEFF_SEL), \
+ SR(BL1_PWM_CURRENT_ABM_LEVEL), \
+ SR(BL1_PWM_TARGET_ABM_LEVEL), \
+ SR(BL1_PWM_USER_LEVEL), \
+ SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \
+ SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
+ SR(BIOS_SCRATCH_2)
+
+#define ABM_DCN10_REG_LIST(id)\
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ NBIO_SR(BIOS_SCRATCH_2)
+
+#define ABM_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
+ ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
+ ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
+ ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
+ ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
+ ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
+ ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
+ ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
+ ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
+ ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh), \
+ ABM_SF(DMCU_STATUS, UC_IN_RESET, mask_sh)
+
+#define ABM_MASK_SH_LIST_DCE110(mask_sh) \
+ ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_NUM_OF_BINS_SEL, mask_sh), \
+ ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_VMAX_SEL, mask_sh), \
+ ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, mask_sh), \
+ ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_R, mask_sh), \
+ ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_G, mask_sh), \
+ ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_B, mask_sh), \
+ ABM_SF(BL1_PWM_CURRENT_ABM_LEVEL, \
+ BL1_PWM_CURRENT_ABM_LEVEL, mask_sh), \
+ ABM_SF(BL1_PWM_TARGET_ABM_LEVEL, \
+ BL1_PWM_TARGET_ABM_LEVEL, mask_sh), \
+ ABM_SF(BL1_PWM_USER_LEVEL, \
+ BL1_PWM_USER_LEVEL, mask_sh), \
+ ABM_SF(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
+
+#define ABM_MASK_SH_LIST_DCN10(mask_sh) \
+ ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_NUM_OF_BINS_SEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_VMAX_SEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_R, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_G, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_B, mask_sh), \
+ ABM_SF(ABM0_BL1_PWM_CURRENT_ABM_LEVEL, \
+ BL1_PWM_CURRENT_ABM_LEVEL, mask_sh), \
+ ABM_SF(ABM0_BL1_PWM_TARGET_ABM_LEVEL, \
+ BL1_PWM_TARGET_ABM_LEVEL, mask_sh), \
+ ABM_SF(ABM0_BL1_PWM_USER_LEVEL, \
+ BL1_PWM_USER_LEVEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
+
+#define ABM_REG_FIELD_LIST(type) \
+ type ABM1_HG_NUM_OF_BINS_SEL; \
+ type ABM1_HG_VMAX_SEL; \
+ type ABM1_HG_BIN_BITWIDTH_SIZE_SEL; \
+ type ABM1_IPCSC_COEFF_SEL_R; \
+ type ABM1_IPCSC_COEFF_SEL_G; \
+ type ABM1_IPCSC_COEFF_SEL_B; \
+ type BL1_PWM_CURRENT_ABM_LEVEL; \
+ type BL1_PWM_TARGET_ABM_LEVEL; \
+ type BL1_PWM_USER_LEVEL; \
+ type ABM1_LS_MIN_PIXEL_VALUE_THRES; \
+ type ABM1_LS_MAX_PIXEL_VALUE_THRES; \
+ type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
+ type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
+ type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
+ type BL_PWM_PERIOD; \
+ type BL_PWM_PERIOD_BITCNT; \
+ type BL_ACTIVE_INT_FRAC_CNT; \
+ type BL_PWM_FRACTIONAL_EN; \
+ type MASTER_COMM_INTERRUPT; \
+ type MASTER_COMM_CMD_REG_BYTE0; \
+ type MASTER_COMM_CMD_REG_BYTE1; \
+ type MASTER_COMM_CMD_REG_BYTE2; \
+ type BL_PWM_REF_DIV; \
+ type BL_PWM_EN; \
+ type UC_IN_RESET; \
+ type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
+ type BL_PWM_GRP1_REG_LOCK; \
+ type BL_PWM_GRP1_REG_UPDATE_PENDING
+
+struct dce_abm_shift {
+ ABM_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_abm_mask {
+ ABM_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_abm_registers {
+ uint32_t BL_PWM_PERIOD_CNTL;
+ uint32_t BL_PWM_CNTL;
+ uint32_t BL_PWM_CNTL2;
+ uint32_t LVTMA_PWRSEQ_REF_DIV;
+ uint32_t DC_ABM1_HG_SAMPLE_RATE;
+ uint32_t DC_ABM1_LS_SAMPLE_RATE;
+ uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
+ uint32_t DC_ABM1_HG_MISC_CTRL;
+ uint32_t DC_ABM1_IPCSC_COEFF_SEL;
+ uint32_t BL1_PWM_CURRENT_ABM_LEVEL;
+ uint32_t BL1_PWM_TARGET_ABM_LEVEL;
+ uint32_t BL1_PWM_USER_LEVEL;
+ uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES;
+ uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS;
+ uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t MASTER_COMM_CMD_REG;
+ uint32_t MASTER_COMM_DATA_REG1;
+ uint32_t BIOS_SCRATCH_2;
+ uint32_t DMCU_STATUS;
+ uint32_t BL_PWM_GRP1_REG_LOCK;
+};
+
+struct dce_abm {
+ struct abm base;
+ const struct dce_abm_registers *regs;
+ const struct dce_abm_shift *abm_shift;
+ const struct dce_abm_mask *abm_mask;
+};
+
+struct abm *dce_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask);
+
+void dce_abm_destroy(struct abm **abm);
+
+#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
new file mode 100644
index 000000000000..81c40f8864db
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -0,0 +1,945 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dce_audio.h"
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define DCE_AUD(audio)\
+ container_of(audio, struct dce_audio, base)
+
+#define CTX \
+ aud->base.ctx
+#define REG(reg)\
+ (aud->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ aud->shifts->field_name, aud->masks->field_name
+
+#define IX_REG(reg)\
+ ix ## reg
+
+#define AZ_REG_READ(reg_name) \
+ read_indirect_azalia_reg(audio, IX_REG(reg_name))
+
+#define AZ_REG_WRITE(reg_name, value) \
+ write_indirect_azalia_reg(audio, IX_REG(reg_name), value)
+
+static void write_indirect_azalia_reg(struct audio *audio,
+ uint32_t reg_index,
+ uint32_t reg_data)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */
+ REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+ AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+ /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
+ REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
+ AZALIA_ENDPOINT_REG_DATA, reg_data);
+
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
+ reg_index, reg_data);
+}
+
+static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ uint32_t value = 0;
+
+ /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */
+ REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+ AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+ /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
+ value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
+
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
+ reg_index, value);
+
+ return value;
+}
+
+static bool is_audio_format_supported(
+ const struct audio_info *audio_info,
+ enum audio_format_code audio_format_code,
+ uint32_t *format_index)
+{
+ uint32_t index;
+ uint32_t max_channe_index = 0;
+ bool found = false;
+
+ if (audio_info == NULL)
+ return found;
+
+ /* pass through whole array */
+ for (index = 0; index < audio_info->mode_count; index++) {
+ if (audio_info->modes[index].format_code == audio_format_code) {
+ if (found) {
+ /* format has multiply entries, choose one with
+ * highst number of channels */
+ if (audio_info->modes[index].channel_count >
+ audio_info->modes[max_channe_index].channel_count) {
+ max_channe_index = index;
+ }
+ } else {
+ /* format found, save it's index */
+ found = true;
+ max_channe_index = index;
+ }
+ }
+ }
+
+ /* return index */
+ if (found && format_index != NULL)
+ *format_index = max_channe_index;
+
+ return found;
+}
+
+/*For HDMI, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_hdmi(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ union audio_sample_rates *sample_rates)
+{
+ uint32_t samples;
+ uint32_t h_blank;
+ bool limit_freq_to_48_khz = false;
+ bool limit_freq_to_88_2_khz = false;
+ bool limit_freq_to_96_khz = false;
+ bool limit_freq_to_174_4_khz = false;
+
+ /* For two channels supported return whatever sink support,unmodified*/
+ if (channel_count > 2) {
+
+ /* Based on HDMI spec 1.3 Table 7.5 */
+ if ((crtc_info->requested_pixel_clock <= 27000) &&
+ (crtc_info->v_active <= 576) &&
+ !(crtc_info->interlaced) &&
+ !(crtc_info->pixel_repetition == 2 ||
+ crtc_info->pixel_repetition == 4)) {
+ limit_freq_to_48_khz = true;
+
+ } else if ((crtc_info->requested_pixel_clock <= 27000) &&
+ (crtc_info->v_active <= 576) &&
+ (crtc_info->interlaced) &&
+ (crtc_info->pixel_repetition == 2)) {
+ limit_freq_to_88_2_khz = true;
+
+ } else if ((crtc_info->requested_pixel_clock <= 54000) &&
+ (crtc_info->v_active <= 576) &&
+ !(crtc_info->interlaced)) {
+ limit_freq_to_174_4_khz = true;
+ }
+ }
+
+ /* Also do some calculation for the available Audio Bandwidth for the
+ * 8 ch (i.e. for the Layout 1 => ch > 2)
+ */
+ h_blank = crtc_info->h_total - crtc_info->h_active;
+
+ if (crtc_info->pixel_repetition)
+ h_blank *= crtc_info->pixel_repetition;
+
+ /*based on HDMI spec 1.3 Table 7.5 */
+ h_blank -= 58;
+ /*for Control Period */
+ h_blank -= 16;
+
+ samples = h_blank * 10;
+ /* Number of Audio Packets (multiplied by 10) per Line (for 8 ch number
+ * of Audio samples per line multiplied by 10 - Layout 1)
+ */
+ samples /= 32;
+ samples *= crtc_info->v_active;
+ /*Number of samples multiplied by 10, per second */
+ samples *= crtc_info->refresh_rate;
+ /*Number of Audio samples per second */
+ samples /= 10;
+
+ /* @todo do it after deep color is implemented
+ * 8xx - deep color bandwidth scaling
+ * Extra bandwidth is avaliable in deep color b/c link runs faster than
+ * pixel rate. This has the effect of allowing more tmds characters to
+ * be transmitted during blank
+ */
+
+ switch (crtc_info->color_depth) {
+ case COLOR_DEPTH_888:
+ samples *= 4;
+ break;
+ case COLOR_DEPTH_101010:
+ samples *= 5;
+ break;
+ case COLOR_DEPTH_121212:
+ samples *= 6;
+ break;
+ default:
+ samples *= 4;
+ break;
+ }
+
+ samples /= 4;
+
+ /*check limitation*/
+ if (samples < 88200)
+ limit_freq_to_48_khz = true;
+ else if (samples < 96000)
+ limit_freq_to_88_2_khz = true;
+ else if (samples < 176400)
+ limit_freq_to_96_khz = true;
+ else if (samples < 192000)
+ limit_freq_to_174_4_khz = true;
+
+ if (sample_rates != NULL) {
+ /* limit frequencies */
+ if (limit_freq_to_174_4_khz)
+ sample_rates->rate.RATE_192 = 0;
+
+ if (limit_freq_to_96_khz) {
+ sample_rates->rate.RATE_192 = 0;
+ sample_rates->rate.RATE_176_4 = 0;
+ }
+ if (limit_freq_to_88_2_khz) {
+ sample_rates->rate.RATE_192 = 0;
+ sample_rates->rate.RATE_176_4 = 0;
+ sample_rates->rate.RATE_96 = 0;
+ }
+ if (limit_freq_to_48_khz) {
+ sample_rates->rate.RATE_192 = 0;
+ sample_rates->rate.RATE_176_4 = 0;
+ sample_rates->rate.RATE_96 = 0;
+ sample_rates->rate.RATE_88_2 = 0;
+ }
+ }
+}
+
+/*For DP SST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpsst(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ union audio_sample_rates *sample_rates)
+{
+ /* do nothing */
+}
+
+/*For DP MST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpmst(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ union audio_sample_rates *sample_rates)
+{
+ /* do nothing */
+}
+
+static void check_audio_bandwidth(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ enum signal_type signal,
+ union audio_sample_rates *sample_rates)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ check_audio_bandwidth_hdmi(
+ crtc_info, channel_count, sample_rates);
+ break;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ check_audio_bandwidth_dpsst(
+ crtc_info, channel_count, sample_rates);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ check_audio_bandwidth_dpmst(
+ crtc_info, channel_count, sample_rates);
+ break;
+ default:
+ break;
+ }
+}
+
+/* expose/not expose HBR capability to Audio driver */
+static void set_high_bit_rate_capable(
+ struct audio *audio,
+ bool capable)
+{
+ uint32_t value = 0;
+
+ /* set high bit rate audio capable*/
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR);
+
+ set_reg_field_value(value, capable,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR,
+ HBR_CAPABLE);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value);
+}
+
+/* set video latency in in ms/2+1 */
+static void set_video_latency(
+ struct audio *audio,
+ int latency_in_ms)
+{
+ uint32_t value = 0;
+
+ if ((latency_in_ms < 0) || (latency_in_ms > 255))
+ return;
+
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+ set_reg_field_value(value, latency_in_ms,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ VIDEO_LIPSYNC);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ value);
+}
+
+/* set audio latency in in ms/2+1 */
+static void set_audio_latency(
+ struct audio *audio,
+ int latency_in_ms)
+{
+ uint32_t value = 0;
+
+ if (latency_in_ms < 0)
+ latency_in_ms = 0;
+
+ if (latency_in_ms > 255)
+ latency_in_ms = 255;
+
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+ set_reg_field_value(value, latency_in_ms,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ AUDIO_LIPSYNC);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ value);
+}
+
+void dce_aud_az_enable(struct audio *audio)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+ uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ AUDIO_ENABLED);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n",
+ audio->inst, value);
+}
+
+void dce_aud_az_disable(struct audio *audio)
+{
+ uint32_t value;
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+ set_reg_field_value(value, 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ AUDIO_ENABLED);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+
+ set_reg_field_value(value, 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n",
+ audio->inst, value);
+}
+
+void dce_aud_az_configure(
+ struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_info *audio_info)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ uint32_t speakers = audio_info->flags.info.ALLSPEAKERS;
+ uint32_t value;
+ uint32_t field = 0;
+ enum audio_format_code audio_format_code;
+ uint32_t format_index;
+ uint32_t index;
+ bool is_ac3_supported = false;
+ union audio_sample_rates sample_rate;
+ uint32_t strlen = 0;
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+
+ /* Speaker Allocation */
+ /*
+ uint32_t value;
+ uint32_t field = 0;*/
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+
+ set_reg_field_value(value,
+ speakers,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ SPEAKER_ALLOCATION);
+
+ /* LFE_PLAYBACK_LEVEL = LFEPBL
+ * LFEPBL = 0 : Unknown or refer to other information
+ * LFEPBL = 1 : 0dB playback
+ * LFEPBL = 2 : +10dB playback
+ * LFE_BL = 3 : Reserved
+ */
+ set_reg_field_value(value,
+ 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ LFE_PLAYBACK_LEVEL);
+ /* todo: according to reg spec LFE_PLAYBACK_LEVEL is read only.
+ * why are we writing to it? DCE8 does not write this */
+
+
+ set_reg_field_value(value,
+ 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ HDMI_CONNECTION);
+
+ set_reg_field_value(value,
+ 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ DP_CONNECTION);
+
+ field = get_reg_field_value(value,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ EXTRA_CONNECTION_INFO);
+
+ field &= ~0x1;
+
+ set_reg_field_value(value,
+ field,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ EXTRA_CONNECTION_INFO);
+
+ /* set audio for output signal */
+ switch (signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ set_reg_field_value(value,
+ 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ HDMI_CONNECTION);
+
+ break;
+
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ set_reg_field_value(value,
+ 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ DP_CONNECTION);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, value);
+
+ /* Audio Descriptors */
+ /* pass through all formats */
+ for (format_index = 0; format_index < AUDIO_FORMAT_CODE_COUNT;
+ format_index++) {
+ audio_format_code =
+ (AUDIO_FORMAT_CODE_FIRST + format_index);
+
+ /* those are unsupported, skip programming */
+ if (audio_format_code == AUDIO_FORMAT_CODE_1BITAUDIO ||
+ audio_format_code == AUDIO_FORMAT_CODE_DST)
+ continue;
+
+ value = 0;
+
+ /* check if supported */
+ if (is_audio_format_supported(
+ audio_info, audio_format_code, &index)) {
+ const struct audio_mode *audio_mode =
+ &audio_info->modes[index];
+ union audio_sample_rates sample_rates =
+ audio_mode->sample_rates;
+ uint8_t byte2 = audio_mode->max_bit_rate;
+
+ /* adjust specific properties */
+ switch (audio_format_code) {
+ case AUDIO_FORMAT_CODE_LINEARPCM: {
+ check_audio_bandwidth(
+ crtc_info,
+ audio_mode->channel_count,
+ signal,
+ &sample_rates);
+
+ byte2 = audio_mode->sample_size;
+
+ set_reg_field_value(value,
+ sample_rates.all,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ SUPPORTED_FREQUENCIES_STEREO);
+ }
+ break;
+ case AUDIO_FORMAT_CODE_AC3:
+ is_ac3_supported = true;
+ break;
+ case AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS:
+ case AUDIO_FORMAT_CODE_DTS_HD:
+ case AUDIO_FORMAT_CODE_MAT_MLP:
+ case AUDIO_FORMAT_CODE_DST:
+ case AUDIO_FORMAT_CODE_WMAPRO:
+ byte2 = audio_mode->vendor_specific;
+ break;
+ default:
+ break;
+ }
+
+ /* fill audio format data */
+ set_reg_field_value(value,
+ audio_mode->channel_count - 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ MAX_CHANNELS);
+
+ set_reg_field_value(value,
+ sample_rates.all,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ SUPPORTED_FREQUENCIES);
+
+ set_reg_field_value(value,
+ byte2,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ DESCRIPTOR_BYTE_2);
+ } /* if */
+
+ AZ_REG_WRITE(
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 + format_index,
+ value);
+ } /* for */
+
+ if (is_ac3_supported)
+ /* todo: this reg global. why program global register? */
+ REG_WRITE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS,
+ 0x05);
+
+ /* check for 192khz/8-Ch support for HBR requirements */
+ sample_rate.all = 0;
+ sample_rate.rate.RATE_192 = 1;
+
+ check_audio_bandwidth(
+ crtc_info,
+ 8,
+ signal,
+ &sample_rate);
+
+ set_high_bit_rate_capable(audio, sample_rate.rate.RATE_192);
+
+ /* Audio and Video Lipsync */
+ set_video_latency(audio, audio_info->video_latency);
+ set_audio_latency(audio, audio_info->audio_latency);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->manufacture_id,
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+ MANUFACTURER_ID);
+
+ set_reg_field_value(value, audio_info->product_id,
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+ PRODUCT_ID);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+ value);
+
+ value = 0;
+
+ /*get display name string length */
+ while (audio_info->display_name[strlen++] != '\0') {
+ if (strlen >=
+ MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS)
+ break;
+ }
+ set_reg_field_value(value, strlen,
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+ SINK_DESCRIPTION_LEN);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+ value);
+
+ /*
+ *write the port ID:
+ *PORT_ID0 = display index
+ *PORT_ID1 = 16bit BDF
+ *(format MSB->LSB: 8bit Bus, 5bit Device, 3bit Function)
+ */
+
+ value = 0;
+
+ set_reg_field_value(value, audio_info->port_id[0],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2,
+ PORT_ID0);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->port_id[1],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3,
+ PORT_ID1);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3, value);
+
+ /*write the 18 char monitor string */
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[0],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION0);
+
+ set_reg_field_value(value, audio_info->display_name[1],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION1);
+
+ set_reg_field_value(value, audio_info->display_name[2],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION2);
+
+ set_reg_field_value(value, audio_info->display_name[3],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION3);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[4],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION4);
+
+ set_reg_field_value(value, audio_info->display_name[5],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION5);
+
+ set_reg_field_value(value, audio_info->display_name[6],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION6);
+
+ set_reg_field_value(value, audio_info->display_name[7],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION7);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[8],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION8);
+
+ set_reg_field_value(value, audio_info->display_name[9],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION9);
+
+ set_reg_field_value(value, audio_info->display_name[10],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION10);
+
+ set_reg_field_value(value, audio_info->display_name[11],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION11);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[12],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION12);
+
+ set_reg_field_value(value, audio_info->display_name[13],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION13);
+
+ set_reg_field_value(value, audio_info->display_name[14],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION14);
+
+ set_reg_field_value(value, audio_info->display_name[15],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION15);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[16],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+ DESCRIPTION16);
+
+ set_reg_field_value(value, audio_info->display_name[17],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+ DESCRIPTION17);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value);
+}
+
+/*
+* todo: wall clk related functionality probably belong to clock_src.
+*/
+
+/* search pixel clock value for Azalia HDMI Audio */
+static void get_azalia_clock_info_hdmi(
+ uint32_t crtc_pixel_clock_in_khz,
+ uint32_t actual_pixel_clock_in_khz,
+ struct azalia_clock_info *azalia_clock_info)
+{
+ /* audio_dto_phase= 24 * 10,000;
+ * 24MHz in [100Hz] units */
+ azalia_clock_info->audio_dto_phase =
+ 24 * 10000;
+
+ /* audio_dto_module = PCLKFrequency * 10,000;
+ * [khz] -> [100Hz] */
+ azalia_clock_info->audio_dto_module =
+ actual_pixel_clock_in_khz * 10;
+}
+
+static void get_azalia_clock_info_dp(
+ uint32_t requested_pixel_clock_in_khz,
+ const struct audio_pll_info *pll_info,
+ struct azalia_clock_info *azalia_clock_info)
+{
+ /* Reported dpDtoSourceClockInkhz value for
+ * DCE8 already adjusted for SS, do not need any
+ * adjustment here anymore
+ */
+
+ /*audio_dto_phase = 24 * 10,000;
+ * 24MHz in [100Hz] units */
+ azalia_clock_info->audio_dto_phase = 24 * 10000;
+
+ /*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
+ * [khz] ->[100Hz] */
+ azalia_clock_info->audio_dto_module =
+ pll_info->dp_dto_source_clock_in_khz * 10;
+}
+
+void dce_aud_wall_dto_setup(
+ struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ struct azalia_clock_info clock_info = { 0 };
+
+ if (dc_is_hdmi_signal(signal)) {
+ uint32_t src_sel;
+
+ /*DTO0 Programming goal:
+ -generate 24MHz, 128*Fs from 24MHz
+ -use DTO0 when an active HDMI port is connected
+ (optionally a DP is connected) */
+
+ /* calculate DTO settings */
+ get_azalia_clock_info_hdmi(
+ crtc_info->requested_pixel_clock,
+ crtc_info->calculated_pixel_clock,
+ &clock_info);
+
+ dm_logger_write(audio->ctx->logger, LOG_HW_AUDIO,\
+ "\n%s:Input::requested_pixel_clock = %d"\
+ "calculated_pixel_clock =%d\n"\
+ "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\
+ crtc_info->requested_pixel_clock,\
+ crtc_info->calculated_pixel_clock,\
+ clock_info.audio_dto_module,\
+ clock_info.audio_dto_phase);
+
+ /* On TN/SI, Program DTO source select and DTO select before
+ programming DTO modulo and DTO phase. These bits must be
+ programmed first, otherwise there will be no HDMI audio at boot
+ up. This is a HW sequence change (different from old ASICs).
+ Caution when changing this programming sequence.
+
+ HDMI enabled, using DTO0
+ program master CRTC for DTO0 */
+ src_sel = pll_info->dto_source - DTO_SOURCE_ID0;
+ REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel,
+ DCCG_AUDIO_DTO_SEL, 0);
+
+ /* module */
+ REG_UPDATE(DCCG_AUDIO_DTO0_MODULE,
+ DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module);
+
+ /* phase */
+ REG_UPDATE(DCCG_AUDIO_DTO0_PHASE,
+ DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase);
+ } else {
+ /*DTO1 Programming goal:
+ -generate 24MHz, 512*Fs, 128*Fs from 24MHz
+ -default is to used DTO1, and switch to DTO0 when an audio
+ master HDMI port is connected
+ -use as default for DP
+
+ calculate DTO settings */
+ get_azalia_clock_info_dp(
+ crtc_info->requested_pixel_clock,
+ pll_info,
+ &clock_info);
+
+ /* Program DTO select before programming DTO modulo and DTO
+ phase. default to use DTO1 */
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 1);
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 1);
+ /* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1)
+ * Select 512fs for DP TODO: web register definition
+ * does not match register header file
+ * DCE11 version it's commented out while DCE8 it's set to 1
+ */
+
+ /* module */
+ REG_UPDATE(DCCG_AUDIO_DTO1_MODULE,
+ DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module);
+
+ /* phase */
+ REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
+ DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1);
+
+ }
+}
+
+static bool dce_aud_endpoint_valid(struct audio *audio)
+{
+ uint32_t value;
+ uint32_t port_connectivity;
+
+ value = AZ_REG_READ(
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+
+ port_connectivity = get_reg_field_value(value,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
+ PORT_CONNECTIVITY);
+
+ return !(port_connectivity == 1);
+}
+
+/* initialize HW state */
+void dce_aud_hw_init(
+ struct audio *audio)
+{
+ uint32_t value;
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ /* we only need to program the following registers once, so we only do
+ it for the inst 0*/
+ if (audio->inst != 0)
+ return;
+
+ /* Suport R5 - 32khz
+ * Suport R6 - 44.1khz
+ * Suport R7 - 48khz
+ */
+ /*disable clock gating before write to endpoint register*/
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+ REG_UPDATE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES,
+ AUDIO_RATE_CAPABILITIES, 0x70);
+
+ /*Keep alive bit to verify HW block in BU. */
+ REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES,
+ CLKSTOP, 1,
+ EPSS, 1);
+}
+
+static const struct audio_funcs funcs = {
+ .endpoint_valid = dce_aud_endpoint_valid,
+ .hw_init = dce_aud_hw_init,
+ .wall_dto_setup = dce_aud_wall_dto_setup,
+ .az_enable = dce_aud_az_enable,
+ .az_disable = dce_aud_az_disable,
+ .az_configure = dce_aud_az_configure,
+ .destroy = dce_aud_destroy,
+};
+
+void dce_aud_destroy(struct audio **audio)
+{
+ struct dce_audio *aud = DCE_AUD(*audio);
+
+ kfree(aud);
+ *audio = NULL;
+}
+
+struct audio *dce_audio_create(
+ struct dc_context *ctx,
+ unsigned int inst,
+ const struct dce_audio_registers *reg,
+ const struct dce_audio_shift *shifts,
+ const struct dce_aduio_mask *masks
+ )
+{
+ struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+
+ if (audio == NULL) {
+ ASSERT_CRITICAL(audio);
+ return NULL;
+ }
+
+ audio->base.ctx = ctx;
+ audio->base.inst = inst;
+ audio->base.funcs = &funcs;
+
+ audio->regs = reg;
+ audio->shifts = shifts;
+ audio->masks = masks;
+
+ return &audio->base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
new file mode 100644
index 000000000000..0dc5ff137c7a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_AUDIO_DCE_110_H__
+#define __DAL_AUDIO_DCE_110_H__
+
+#include "audio.h"
+
+#define AUD_COMMON_REG_LIST(id)\
+ SRI(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id),\
+ SRI(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id),\
+ SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS),\
+ SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES),\
+ SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES),\
+ SR(DCCG_AUDIO_DTO_SOURCE),\
+ SR(DCCG_AUDIO_DTO0_MODULE),\
+ SR(DCCG_AUDIO_DTO0_PHASE),\
+ SR(DCCG_AUDIO_DTO1_MODULE),\
+ SR(DCCG_AUDIO_DTO1_PHASE)
+
+
+ /* set field name */
+#define SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+
+#define AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO2_USE_512FBR_DTO, mask_sh),\
+ SF(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, mask_sh),\
+ SF(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, mask_sh),\
+ SF(DCCG_AUDIO_DTO1_MODULE, DCCG_AUDIO_DTO1_MODULE, mask_sh),\
+ SF(DCCG_AUDIO_DTO1_PHASE, DCCG_AUDIO_DTO1_PHASE, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, AUDIO_RATE_CAPABILITIES, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, CLKSTOP, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, EPSS, mask_sh)
+
+#define AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh),\
+ SF(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh)
+
+
+struct dce_audio_registers {
+ uint32_t AZALIA_F0_CODEC_ENDPOINT_INDEX;
+ uint32_t AZALIA_F0_CODEC_ENDPOINT_DATA;
+
+ uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS;
+ uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES;
+ uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES;
+
+ uint32_t DCCG_AUDIO_DTO_SOURCE;
+ uint32_t DCCG_AUDIO_DTO0_MODULE;
+ uint32_t DCCG_AUDIO_DTO0_PHASE;
+ uint32_t DCCG_AUDIO_DTO1_MODULE;
+ uint32_t DCCG_AUDIO_DTO1_PHASE;
+
+ uint32_t AUDIO_RATE_CAPABILITIES;
+};
+
+struct dce_audio_shift {
+ uint8_t AZALIA_ENDPOINT_REG_INDEX;
+ uint8_t AZALIA_ENDPOINT_REG_DATA;
+
+ uint8_t AUDIO_RATE_CAPABILITIES;
+ uint8_t CLKSTOP;
+ uint8_t EPSS;
+
+ uint8_t DCCG_AUDIO_DTO0_SOURCE_SEL;
+ uint8_t DCCG_AUDIO_DTO_SEL;
+ uint8_t DCCG_AUDIO_DTO0_MODULE;
+ uint8_t DCCG_AUDIO_DTO0_PHASE;
+ uint8_t DCCG_AUDIO_DTO1_MODULE;
+ uint8_t DCCG_AUDIO_DTO1_PHASE;
+ uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
+};
+
+struct dce_aduio_mask {
+ uint32_t AZALIA_ENDPOINT_REG_INDEX;
+ uint32_t AZALIA_ENDPOINT_REG_DATA;
+
+ uint32_t AUDIO_RATE_CAPABILITIES;
+ uint32_t CLKSTOP;
+ uint32_t EPSS;
+
+ uint32_t DCCG_AUDIO_DTO0_SOURCE_SEL;
+ uint32_t DCCG_AUDIO_DTO_SEL;
+ uint32_t DCCG_AUDIO_DTO0_MODULE;
+ uint32_t DCCG_AUDIO_DTO0_PHASE;
+ uint32_t DCCG_AUDIO_DTO1_MODULE;
+ uint32_t DCCG_AUDIO_DTO1_PHASE;
+ uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
+};
+
+struct dce_audio {
+ struct audio base;
+ const struct dce_audio_registers *regs;
+ const struct dce_audio_shift *shifts;
+ const struct dce_aduio_mask *masks;
+};
+
+struct audio *dce_audio_create(
+ struct dc_context *ctx,
+ unsigned int inst,
+ const struct dce_audio_registers *reg,
+ const struct dce_audio_shift *shifts,
+ const struct dce_aduio_mask *masks);
+
+void dce_aud_destroy(struct audio **audio);
+
+void dce_aud_hw_init(struct audio *audio);
+
+void dce_aud_az_enable(struct audio *audio);
+void dce_aud_az_disable(struct audio *audio);
+
+void dce_aud_az_configure(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_info *audio_info);
+
+void dce_aud_wall_dto_setup(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info);
+
+#endif /*__DAL_AUDIO_DCE_110_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
new file mode 100644
index 000000000000..31280d252753
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -0,0 +1,1383 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+
+#include "dc_types.h"
+#include "core_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+
+#include "dce_clock_source.h"
+
+#include "reg_helper.h"
+
+#define REG(reg)\
+ (clk_src->regs->reg)
+
+#define CTX \
+ clk_src->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
+
+#define FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM 6
+#define CALC_PLL_CLK_SRC_ERR_TOLERANCE 1
+#define MAX_PLL_CALC_ERROR 0xFFFFFFFF
+
+static const struct spread_spectrum_data *get_ss_data_entry(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal,
+ uint32_t pix_clk_khz)
+{
+
+ uint32_t entrys_num;
+ uint32_t i;
+ struct spread_spectrum_data *ss_parm = NULL;
+ struct spread_spectrum_data *ret = NULL;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ ss_parm = clk_src->dvi_ss_params;
+ entrys_num = clk_src->dvi_ss_params_cnt;
+ break;
+
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ ss_parm = clk_src->hdmi_ss_params;
+ entrys_num = clk_src->hdmi_ss_params_cnt;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_VIRTUAL:
+ ss_parm = clk_src->dp_ss_params;
+ entrys_num = clk_src->dp_ss_params_cnt;
+ break;
+
+ default:
+ ss_parm = NULL;
+ entrys_num = 0;
+ break;
+ }
+
+ if (ss_parm == NULL)
+ return ret;
+
+ for (i = 0; i < entrys_num; ++i, ++ss_parm) {
+ if (ss_parm->freq_range_khz >= pix_clk_khz) {
+ ret = ss_parm;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+* Function: calculate_fb_and_fractional_fb_divider
+*
+* * DESCRIPTION: Calculates feedback and fractional feedback dividers values
+*
+*PARAMETERS:
+* targetPixelClock Desired frequency in 10 KHz
+* ref_divider Reference divider (already known)
+* postDivider Post Divider (already known)
+* feedback_divider_param Pointer where to store
+* calculated feedback divider value
+* fract_feedback_divider_param Pointer where to store
+* calculated fract feedback divider value
+*
+*RETURNS:
+* It fills the locations pointed by feedback_divider_param
+* and fract_feedback_divider_param
+* It returns - true if feedback divider not 0
+* - false should never happen)
+*/
+static bool calculate_fb_and_fractional_fb_divider(
+ struct calc_pll_clock_source *calc_pll_cs,
+ uint32_t target_pix_clk_khz,
+ uint32_t ref_divider,
+ uint32_t post_divider,
+ uint32_t *feedback_divider_param,
+ uint32_t *fract_feedback_divider_param)
+{
+ uint64_t feedback_divider;
+
+ feedback_divider =
+ (uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
+ feedback_divider *= 10;
+ /* additional factor, since we divide by 10 afterwards */
+ feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
+ feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz);
+
+/*Round to the number of precision
+ * The following code replace the old code (ullfeedbackDivider + 5)/10
+ * for example if the difference between the number
+ * of fractional feedback decimal point and the fractional FB Divider precision
+ * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
+
+ feedback_divider += (uint64_t)
+ (5 * calc_pll_cs->fract_fb_divider_precision_factor);
+ feedback_divider =
+ div_u64(feedback_divider,
+ calc_pll_cs->fract_fb_divider_precision_factor * 10);
+ feedback_divider *= (uint64_t)
+ (calc_pll_cs->fract_fb_divider_precision_factor);
+
+ *feedback_divider_param =
+ div_u64_rem(
+ feedback_divider,
+ calc_pll_cs->fract_fb_divider_factor,
+ fract_feedback_divider_param);
+
+ if (*feedback_divider_param != 0)
+ return true;
+ return false;
+}
+
+/**
+*calc_fb_divider_checking_tolerance
+*
+*DESCRIPTION: Calculates Feedback and Fractional Feedback divider values
+* for passed Reference and Post divider, checking for tolerance.
+*PARAMETERS:
+* pll_settings Pointer to structure
+* ref_divider Reference divider (already known)
+* postDivider Post Divider (already known)
+* tolerance Tolerance for Calculated Pixel Clock to be within
+*
+*RETURNS:
+* It fills the PLLSettings structure with PLL Dividers values
+* if calculated values are within required tolerance
+* It returns - true if eror is within tolerance
+* - false if eror is not within tolerance
+*/
+static bool calc_fb_divider_checking_tolerance(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct pll_settings *pll_settings,
+ uint32_t ref_divider,
+ uint32_t post_divider,
+ uint32_t tolerance)
+{
+ uint32_t feedback_divider;
+ uint32_t fract_feedback_divider;
+ uint32_t actual_calculated_clock_khz;
+ uint32_t abs_err;
+ uint64_t actual_calc_clk_khz;
+
+ calculate_fb_and_fractional_fb_divider(
+ calc_pll_cs,
+ pll_settings->adjusted_pix_clk,
+ ref_divider,
+ post_divider,
+ &feedback_divider,
+ &fract_feedback_divider);
+
+ /*Actual calculated value*/
+ actual_calc_clk_khz = (uint64_t)(feedback_divider *
+ calc_pll_cs->fract_fb_divider_factor) +
+ fract_feedback_divider;
+ actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
+ actual_calc_clk_khz =
+ div_u64(actual_calc_clk_khz,
+ ref_divider * post_divider *
+ calc_pll_cs->fract_fb_divider_factor);
+
+ actual_calculated_clock_khz = (uint32_t)(actual_calc_clk_khz);
+
+ abs_err = (actual_calculated_clock_khz >
+ pll_settings->adjusted_pix_clk)
+ ? actual_calculated_clock_khz -
+ pll_settings->adjusted_pix_clk
+ : pll_settings->adjusted_pix_clk -
+ actual_calculated_clock_khz;
+
+ if (abs_err <= tolerance) {
+ /*found good values*/
+ pll_settings->reference_freq = calc_pll_cs->ref_freq_khz;
+ pll_settings->reference_divider = ref_divider;
+ pll_settings->feedback_divider = feedback_divider;
+ pll_settings->fract_feedback_divider = fract_feedback_divider;
+ pll_settings->pix_clk_post_divider = post_divider;
+ pll_settings->calculated_pix_clk =
+ actual_calculated_clock_khz;
+ pll_settings->vco_freq =
+ actual_calculated_clock_khz * post_divider;
+ return true;
+ }
+ return false;
+}
+
+static bool calc_pll_dividers_in_range(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct pll_settings *pll_settings,
+ uint32_t min_ref_divider,
+ uint32_t max_ref_divider,
+ uint32_t min_post_divider,
+ uint32_t max_post_divider,
+ uint32_t err_tolerance)
+{
+ uint32_t ref_divider;
+ uint32_t post_divider;
+ uint32_t tolerance;
+
+/* This is err_tolerance / 10000 = 0.0025 - acceptable error of 0.25%
+ * This is errorTolerance / 10000 = 0.0001 - acceptable error of 0.01%*/
+ tolerance = (pll_settings->adjusted_pix_clk * err_tolerance) /
+ 10000;
+ if (tolerance < CALC_PLL_CLK_SRC_ERR_TOLERANCE)
+ tolerance = CALC_PLL_CLK_SRC_ERR_TOLERANCE;
+
+ for (
+ post_divider = max_post_divider;
+ post_divider >= min_post_divider;
+ --post_divider) {
+ for (
+ ref_divider = min_ref_divider;
+ ref_divider <= max_ref_divider;
+ ++ref_divider) {
+ if (calc_fb_divider_checking_tolerance(
+ calc_pll_cs,
+ pll_settings,
+ ref_divider,
+ post_divider,
+ tolerance)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static uint32_t calculate_pixel_clock_pll_dividers(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct pll_settings *pll_settings)
+{
+ uint32_t err_tolerance;
+ uint32_t min_post_divider;
+ uint32_t max_post_divider;
+ uint32_t min_ref_divider;
+ uint32_t max_ref_divider;
+
+ if (pll_settings->adjusted_pix_clk == 0) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "%s Bad requested pixel clock", __func__);
+ return MAX_PLL_CALC_ERROR;
+ }
+
+/* 1) Find Post divider ranges */
+ if (pll_settings->pix_clk_post_divider) {
+ min_post_divider = pll_settings->pix_clk_post_divider;
+ max_post_divider = pll_settings->pix_clk_post_divider;
+ } else {
+ min_post_divider = calc_pll_cs->min_pix_clock_pll_post_divider;
+ if (min_post_divider * pll_settings->adjusted_pix_clk <
+ calc_pll_cs->min_vco_khz) {
+ min_post_divider = calc_pll_cs->min_vco_khz /
+ pll_settings->adjusted_pix_clk;
+ if ((min_post_divider *
+ pll_settings->adjusted_pix_clk) <
+ calc_pll_cs->min_vco_khz)
+ min_post_divider++;
+ }
+
+ max_post_divider = calc_pll_cs->max_pix_clock_pll_post_divider;
+ if (max_post_divider * pll_settings->adjusted_pix_clk
+ > calc_pll_cs->max_vco_khz)
+ max_post_divider = calc_pll_cs->max_vco_khz /
+ pll_settings->adjusted_pix_clk;
+ }
+
+/* 2) Find Reference divider ranges
+ * When SS is enabled, or for Display Port even without SS,
+ * pll_settings->referenceDivider is not zero.
+ * So calculate PPLL FB and fractional FB divider
+ * using the passed reference divider*/
+
+ if (pll_settings->reference_divider) {
+ min_ref_divider = pll_settings->reference_divider;
+ max_ref_divider = pll_settings->reference_divider;
+ } else {
+ min_ref_divider = ((calc_pll_cs->ref_freq_khz
+ / calc_pll_cs->max_pll_input_freq_khz)
+ > calc_pll_cs->min_pll_ref_divider)
+ ? calc_pll_cs->ref_freq_khz
+ / calc_pll_cs->max_pll_input_freq_khz
+ : calc_pll_cs->min_pll_ref_divider;
+
+ max_ref_divider = ((calc_pll_cs->ref_freq_khz
+ / calc_pll_cs->min_pll_input_freq_khz)
+ < calc_pll_cs->max_pll_ref_divider)
+ ? calc_pll_cs->ref_freq_khz /
+ calc_pll_cs->min_pll_input_freq_khz
+ : calc_pll_cs->max_pll_ref_divider;
+ }
+
+/* If some parameters are invalid we could have scenario when "min">"max"
+ * which produced endless loop later.
+ * We should investigate why we get the wrong parameters.
+ * But to follow the similar logic when "adjustedPixelClock" is set to be 0
+ * it is better to return here than cause system hang/watchdog timeout later.
+ * ## SVS Wed 15 Jul 2009 */
+
+ if (min_post_divider > max_post_divider) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "%s Post divider range is invalid", __func__);
+ return MAX_PLL_CALC_ERROR;
+ }
+
+ if (min_ref_divider > max_ref_divider) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "%s Reference divider range is invalid", __func__);
+ return MAX_PLL_CALC_ERROR;
+ }
+
+/* 3) Try to find PLL dividers given ranges
+ * starting with minimal error tolerance.
+ * Increase error tolerance until PLL dividers found*/
+ err_tolerance = MAX_PLL_CALC_ERROR;
+
+ while (!calc_pll_dividers_in_range(
+ calc_pll_cs,
+ pll_settings,
+ min_ref_divider,
+ max_ref_divider,
+ min_post_divider,
+ max_post_divider,
+ err_tolerance))
+ err_tolerance += (err_tolerance > 10)
+ ? (err_tolerance / 10)
+ : 1;
+
+ return err_tolerance;
+}
+
+static bool pll_adjust_pix_clk(
+ struct dce110_clk_src *clk_src,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t actual_pix_clk_khz = 0;
+ uint32_t requested_clk_khz = 0;
+ struct bp_adjust_pixel_clock_parameters bp_adjust_pixel_clock_params = {
+ 0 };
+ enum bp_result bp_result;
+ switch (pix_clk_params->signal_type) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ requested_clk_khz = pix_clk_params->requested_pix_clk;
+ if (pix_clk_params->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ requested_clk_khz = (requested_clk_khz * 5) >> 2;
+ break; /* x1.25*/
+ case COLOR_DEPTH_121212:
+ requested_clk_khz = (requested_clk_khz * 6) >> 2;
+ break; /* x1.5*/
+ case COLOR_DEPTH_161616:
+ requested_clk_khz = requested_clk_khz * 2;
+ break; /* x2.0*/
+ default:
+ break;
+ }
+ }
+ actual_pix_clk_khz = requested_clk_khz;
+ }
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ requested_clk_khz = pix_clk_params->requested_sym_clk;
+ actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
+ break;
+
+ default:
+ requested_clk_khz = pix_clk_params->requested_pix_clk;
+ actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
+ break;
+ }
+
+ bp_adjust_pixel_clock_params.pixel_clock = requested_clk_khz;
+ bp_adjust_pixel_clock_params.
+ encoder_object_id = pix_clk_params->encoder_object_id;
+ bp_adjust_pixel_clock_params.signal_type = pix_clk_params->signal_type;
+ bp_adjust_pixel_clock_params.
+ ss_enable = pix_clk_params->flags.ENABLE_SS;
+ bp_result = clk_src->bios->funcs->adjust_pixel_clock(
+ clk_src->bios, &bp_adjust_pixel_clock_params);
+ if (bp_result == BP_RESULT_OK) {
+ pll_settings->actual_pix_clk = actual_pix_clk_khz;
+ pll_settings->adjusted_pix_clk =
+ bp_adjust_pixel_clock_params.adjusted_pixel_clock;
+ pll_settings->reference_divider =
+ bp_adjust_pixel_clock_params.reference_divider;
+ pll_settings->pix_clk_post_divider =
+ bp_adjust_pixel_clock_params.pixel_clock_post_divider;
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Calculate PLL Dividers for given Clock Value.
+ * First will call VBIOS Adjust Exec table to check if requested Pixel clock
+ * will be Adjusted based on usage.
+ * Then it will calculate PLL Dividers for this Adjusted clock using preferred
+ * method (Maximum VCO frequency).
+ *
+ * \return
+ * Calculation error in units of 0.01%
+ */
+
+static uint32_t dce110_get_pix_clk_dividers_helper (
+ struct dce110_clk_src *clk_src,
+ struct pll_settings *pll_settings,
+ struct pixel_clk_params *pix_clk_params)
+{
+ uint32_t field = 0;
+ uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+
+ /* Check if reference clock is external (not pcie/xtalin)
+ * HW Dce80 spec:
+ * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB
+ * 04 - HSYNCA, 05 - GENLK_CLK, 06 - PCIE_REFCLK, 07 - DVOCLK0 */
+ REG_GET(PLL_CNTL, PLL_REF_DIV_SRC, &field);
+ pll_settings->use_external_clk = (field > 1);
+
+ /* VBIOS by default enables DP SS (spread on IDCLK) for DCE 8.0 always
+ * (we do not care any more from SI for some older DP Sink which
+ * does not report SS support, no known issues) */
+ if ((pix_clk_params->flags.ENABLE_SS) ||
+ (dc_is_dp_signal(pix_clk_params->signal_type))) {
+
+ const struct spread_spectrum_data *ss_data = get_ss_data_entry(
+ clk_src,
+ pix_clk_params->signal_type,
+ pll_settings->adjusted_pix_clk);
+
+ if (NULL != ss_data)
+ pll_settings->ss_percentage = ss_data->percentage;
+ }
+
+ /* Check VBIOS AdjustPixelClock Exec table */
+ if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) {
+ /* Should never happen, ASSERT and fill up values to be able
+ * to continue. */
+ dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+ "%s: Failed to adjust pixel clock!!", __func__);
+ pll_settings->actual_pix_clk =
+ pix_clk_params->requested_pix_clk;
+ pll_settings->adjusted_pix_clk =
+ pix_clk_params->requested_pix_clk;
+
+ if (dc_is_dp_signal(pix_clk_params->signal_type))
+ pll_settings->adjusted_pix_clk = 100000;
+ }
+
+ /* Calculate Dividers */
+ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+ /*Calculate Dividers by HDMI object, no SS case or SS case */
+ pll_calc_error =
+ calculate_pixel_clock_pll_dividers(
+ &clk_src->calc_pll_hdmi,
+ pll_settings);
+ else
+ /*Calculate Dividers by default object, no SS case or SS case */
+ pll_calc_error =
+ calculate_pixel_clock_pll_dividers(
+ &clk_src->calc_pll,
+ pll_settings);
+
+ return pll_calc_error;
+}
+
+static void dce112_get_pix_clk_dividers_helper (
+ struct dce110_clk_src *clk_src,
+ struct pll_settings *pll_settings,
+ struct pixel_clk_params *pix_clk_params)
+{
+ uint32_t actualPixelClockInKHz;
+
+ actualPixelClockInKHz = pix_clk_params->requested_pix_clk;
+ /* Calculate Dividers */
+ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ actualPixelClockInKHz = (actualPixelClockInKHz * 5) >> 2;
+ break;
+ case COLOR_DEPTH_121212:
+ actualPixelClockInKHz = (actualPixelClockInKHz * 6) >> 2;
+ break;
+ case COLOR_DEPTH_161616:
+ actualPixelClockInKHz = actualPixelClockInKHz * 2;
+ break;
+ default:
+ break;
+ }
+ }
+ pll_settings->actual_pix_clk = actualPixelClockInKHz;
+ pll_settings->adjusted_pix_clk = actualPixelClockInKHz;
+ pll_settings->calculated_pix_clk = pix_clk_params->requested_pix_clk;
+}
+
+static uint32_t dce110_get_pix_clk_dividers(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
+ uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+
+ if (pix_clk_params == NULL || pll_settings == NULL
+ || pix_clk_params->requested_pix_clk == 0) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+ "%s: Invalid parameters!!\n", __func__);
+ return pll_calc_error;
+ }
+
+ memset(pll_settings, 0, sizeof(*pll_settings));
+
+ if (cs->id == CLOCK_SOURCE_ID_DP_DTO ||
+ cs->id == CLOCK_SOURCE_ID_EXTERNAL) {
+ pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz;
+ pll_settings->calculated_pix_clk = clk_src->ext_clk_khz;
+ pll_settings->actual_pix_clk =
+ pix_clk_params->requested_pix_clk;
+ return 0;
+ }
+
+ switch (cs->ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ pll_calc_error =
+ dce110_get_pix_clk_dividers_helper(clk_src,
+ pll_settings, pix_clk_params);
+ break;
+ case DCE_VERSION_11_2:
+ case DCE_VERSION_12_0:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+#endif
+
+ dce112_get_pix_clk_dividers_helper(clk_src,
+ pll_settings, pix_clk_params);
+ break;
+ default:
+ break;
+ }
+
+ return pll_calc_error;
+}
+
+static uint32_t dce110_get_pll_pixel_rate_in_hz(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ struct dc *dc_core = cs->ctx->dc;
+ struct dc_state *context = dc_core->current_state;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst];
+
+ /* This function need separate to different DCE version, before separate, just use pixel clock */
+ return pipe_ctx->stream->phy_pix_clk;
+
+}
+
+static uint32_t dce110_get_dp_pixel_rate_from_combo_phy_pll(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ struct dc *dc_core = cs->ctx->dc;
+ struct dc_state *context = dc_core->current_state;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst];
+
+ /* This function need separate to different DCE version, before separate, just use pixel clock */
+ return pipe_ctx->stream->phy_pix_clk;
+}
+
+static uint32_t dce110_get_d_to_pixel_rate_in_hz(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
+ int dto_enabled = 0;
+ struct fixed31_32 pix_rate;
+
+ REG_GET(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, &dto_enabled);
+
+ if (dto_enabled) {
+ uint32_t phase = 0;
+ uint32_t modulo = 0;
+ REG_GET(PHASE[inst], DP_DTO0_PHASE, &phase);
+ REG_GET(MODULO[inst], DP_DTO0_MODULO, &modulo);
+
+ if (modulo == 0) {
+ return 0;
+ }
+
+ pix_rate = dal_fixed31_32_from_int(clk_src->ref_freq_khz);
+ pix_rate = dal_fixed31_32_mul_int(pix_rate, 1000);
+ pix_rate = dal_fixed31_32_mul_int(pix_rate, phase);
+ pix_rate = dal_fixed31_32_div_int(pix_rate, modulo);
+
+ return dal_fixed31_32_round(pix_rate);
+ } else {
+ return dce110_get_dp_pixel_rate_from_combo_phy_pll(cs, pix_clk_params, pll_settings);
+ }
+}
+
+static uint32_t dce110_get_pix_rate_in_hz(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t pix_rate = 0;
+ switch (pix_clk_params->signal_type) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_VIRTUAL:
+ pix_rate = dce110_get_d_to_pixel_rate_in_hz(cs, pix_clk_params, pll_settings);
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ default:
+ pix_rate = dce110_get_pll_pixel_rate_in_hz(cs, pix_clk_params, pll_settings);
+ break;
+ }
+
+ return pix_rate;
+}
+
+static bool disable_spread_spectrum(struct dce110_clk_src *clk_src)
+{
+ enum bp_result result;
+ struct bp_spread_spectrum_parameters bp_ss_params = {0};
+
+ bp_ss_params.pll_id = clk_src->base.id;
+
+ /*Call ASICControl to process ATOMBIOS Exec table*/
+ result = clk_src->bios->funcs->enable_spread_spectrum_on_ppll(
+ clk_src->bios,
+ &bp_ss_params,
+ false);
+
+ return result == BP_RESULT_OK;
+}
+
+static bool calculate_ss(
+ const struct pll_settings *pll_settings,
+ const struct spread_spectrum_data *ss_data,
+ struct delta_sigma_data *ds_data)
+{
+ struct fixed32_32 fb_div;
+ struct fixed32_32 ss_amount;
+ struct fixed32_32 ss_nslip_amount;
+ struct fixed32_32 ss_ds_frac_amount;
+ struct fixed32_32 ss_step_size;
+ struct fixed32_32 modulation_time;
+
+ if (ds_data == NULL)
+ return false;
+ if (ss_data == NULL)
+ return false;
+ if (ss_data->percentage == 0)
+ return false;
+ if (pll_settings == NULL)
+ return false;
+
+ memset(ds_data, 0, sizeof(struct delta_sigma_data));
+
+ /* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/
+ /* 6 decimal point support in fractional feedback divider */
+ fb_div = dal_fixed32_32_from_fraction(
+ pll_settings->fract_feedback_divider, 1000000);
+ fb_div = dal_fixed32_32_add_int(fb_div, pll_settings->feedback_divider);
+
+ ds_data->ds_frac_amount = 0;
+ /*spreadSpectrumPercentage is in the unit of .01%,
+ * so have to divided by 100 * 100*/
+ ss_amount = dal_fixed32_32_mul(
+ fb_div, dal_fixed32_32_from_fraction(ss_data->percentage,
+ 100 * ss_data->percentage_divider));
+ ds_data->feedback_amount = dal_fixed32_32_floor(ss_amount);
+
+ ss_nslip_amount = dal_fixed32_32_sub(ss_amount,
+ dal_fixed32_32_from_int(ds_data->feedback_amount));
+ ss_nslip_amount = dal_fixed32_32_mul_int(ss_nslip_amount, 10);
+ ds_data->nfrac_amount = dal_fixed32_32_floor(ss_nslip_amount);
+
+ ss_ds_frac_amount = dal_fixed32_32_sub(ss_nslip_amount,
+ dal_fixed32_32_from_int(ds_data->nfrac_amount));
+ ss_ds_frac_amount = dal_fixed32_32_mul_int(ss_ds_frac_amount, 65536);
+ ds_data->ds_frac_amount = dal_fixed32_32_floor(ss_ds_frac_amount);
+
+ /* compute SS_STEP_SIZE_DSFRAC */
+ modulation_time = dal_fixed32_32_from_fraction(
+ pll_settings->reference_freq * 1000,
+ pll_settings->reference_divider * ss_data->modulation_freq_hz);
+
+ if (ss_data->flags.CENTER_SPREAD)
+ modulation_time = dal_fixed32_32_div_int(modulation_time, 4);
+ else
+ modulation_time = dal_fixed32_32_div_int(modulation_time, 2);
+
+ ss_step_size = dal_fixed32_32_div(ss_amount, modulation_time);
+ /* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/
+ ss_step_size = dal_fixed32_32_mul_int(ss_step_size, 65536 * 10);
+ ds_data->ds_frac_size = dal_fixed32_32_floor(ss_step_size);
+
+ return true;
+}
+
+static bool enable_spread_spectrum(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal, struct pll_settings *pll_settings)
+{
+ struct bp_spread_spectrum_parameters bp_params = {0};
+ struct delta_sigma_data d_s_data;
+ const struct spread_spectrum_data *ss_data = NULL;
+
+ ss_data = get_ss_data_entry(
+ clk_src,
+ signal,
+ pll_settings->calculated_pix_clk);
+
+/* Pixel clock PLL has been programmed to generate desired pixel clock,
+ * now enable SS on pixel clock */
+/* TODO is it OK to return true not doing anything ??*/
+ if (ss_data != NULL && pll_settings->ss_percentage != 0) {
+ if (calculate_ss(pll_settings, ss_data, &d_s_data)) {
+ bp_params.ds.feedback_amount =
+ d_s_data.feedback_amount;
+ bp_params.ds.nfrac_amount =
+ d_s_data.nfrac_amount;
+ bp_params.ds.ds_frac_size = d_s_data.ds_frac_size;
+ bp_params.ds_frac_amount =
+ d_s_data.ds_frac_amount;
+ bp_params.flags.DS_TYPE = 1;
+ bp_params.pll_id = clk_src->base.id;
+ bp_params.percentage = ss_data->percentage;
+ if (ss_data->flags.CENTER_SPREAD)
+ bp_params.flags.CENTER_SPREAD = 1;
+ if (ss_data->flags.EXTERNAL_SS)
+ bp_params.flags.EXTERNAL_SS = 1;
+
+ if (BP_RESULT_OK !=
+ clk_src->bios->funcs->
+ enable_spread_spectrum_on_ppll(
+ clk_src->bios,
+ &bp_params,
+ true))
+ return false;
+ } else
+ return false;
+ }
+ return true;
+}
+
+static void dce110_program_pixel_clk_resync(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal_type,
+ enum dc_color_depth colordepth)
+{
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 0);
+ /*
+ 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1)
+ 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
+ 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2)
+ 48 bit mode: TMDS clock = 2 x pixel clock (2:1)
+ */
+ if (signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
+ return;
+
+ switch (colordepth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 0);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 1);
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 2);
+ break;
+ case COLOR_DEPTH_161616:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 3);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dce112_program_pixel_clk_resync(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal_type,
+ enum dc_color_depth colordepth,
+ bool enable_ycbcr420)
+{
+ uint32_t deep_color_cntl = 0;
+ uint32_t double_rate_enable = 0;
+
+ /*
+ 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1)
+ 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
+ 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2)
+ 48 bit mode: TMDS clock = 2 x pixel clock (2:1)
+ */
+ if (signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+ double_rate_enable = enable_ycbcr420 ? 1 : 0;
+
+ switch (colordepth) {
+ case COLOR_DEPTH_888:
+ deep_color_cntl = 0;
+ break;
+ case COLOR_DEPTH_101010:
+ deep_color_cntl = 1;
+ break;
+ case COLOR_DEPTH_121212:
+ deep_color_cntl = 2;
+ break;
+ case COLOR_DEPTH_161616:
+ deep_color_cntl = 3;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (clk_src->cs_mask->PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE)
+ REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
+ PHYPLLA_DCCG_DEEP_COLOR_CNTL, deep_color_cntl,
+ PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, double_rate_enable);
+ else
+ REG_UPDATE(PIXCLK_RESYNC_CNTL,
+ PHYPLLA_DCCG_DEEP_COLOR_CNTL, deep_color_cntl);
+
+}
+
+static bool dce110_program_pix_clk(
+ struct clock_source *clock_source,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
+ struct bp_pixel_clock_parameters bp_pc_params = {0};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
+ unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ unsigned dp_dto_ref_kHz = 600000;
+ /* DPREF clock from FPGA TODO: Does FPGA have this value? */
+ unsigned clock_kHz = pll_settings->actual_pix_clk;
+
+ /* For faster simulation, if mode pixe clock less than 290MHz,
+ * pixel clock can be hard coded to 290Mhz. For 4K mode, pixel clock
+ * is greater than 500Mhz, need real pixel clock
+ * clock_kHz = 290000;
+ */
+ /* TODO: un-hardcode when we can set display clock properly*/
+ /*clock_kHz = pix_clk_params->requested_pix_clk;*/
+ clock_kHz = 290000;
+
+ /* Set DTO values: phase = target clock, modulo = reference clock */
+ REG_WRITE(PHASE[inst], clock_kHz);
+ REG_WRITE(MODULO[inst], dp_dto_ref_kHz);
+
+ /* Enable DTO */
+ REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ return true;
+ }
+#endif
+ /* First disable SS
+ * ATOMBIOS will enable by default SS on PLL for DP,
+ * do not disable it here
+ */
+ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL &&
+ !dc_is_dp_signal(pix_clk_params->signal_type) &&
+ clock_source->ctx->dce_version <= DCE_VERSION_11_0)
+ disable_spread_spectrum(clk_src);
+
+ /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
+ bp_pc_params.controller_id = pix_clk_params->controller_id;
+ bp_pc_params.pll_id = clock_source->id;
+ bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk;
+ bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
+ bp_pc_params.signal_type = pix_clk_params->signal_type;
+
+ switch (clock_source->ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ bp_pc_params.reference_divider = pll_settings->reference_divider;
+ bp_pc_params.feedback_divider = pll_settings->feedback_divider;
+ bp_pc_params.fractional_feedback_divider =
+ pll_settings->fract_feedback_divider;
+ bp_pc_params.pixel_clock_post_divider =
+ pll_settings->pix_clk_post_divider;
+ bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+
+ if (clk_src->bios->funcs->set_pixel_clock(
+ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ return false;
+ /* Enable SS
+ * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock),
+ * based on HW display PLL team, SS control settings should be programmed
+ * during PLL Reset, but they do not have effect
+ * until SS_EN is asserted.*/
+ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL
+ && !dc_is_dp_signal(pix_clk_params->signal_type)) {
+
+ if (pix_clk_params->flags.ENABLE_SS)
+ if (!enable_spread_spectrum(clk_src,
+ pix_clk_params->signal_type,
+ pll_settings))
+ return false;
+
+ /* Resync deep color DTO */
+ dce110_program_pixel_clk_resync(clk_src,
+ pix_clk_params->signal_type,
+ pix_clk_params->color_depth);
+ }
+
+ break;
+ case DCE_VERSION_11_2:
+ case DCE_VERSION_12_0:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+#endif
+
+ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+ bp_pc_params.flags.SET_XTALIN_REF_SRC =
+ !pll_settings->use_external_clk;
+ if (pix_clk_params->flags.SUPPORT_YCBCR420) {
+ bp_pc_params.flags.SUPPORT_YUV_420 = 1;
+ }
+ }
+ if (clk_src->bios->funcs->set_pixel_clock(
+ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ return false;
+ /* Resync deep color DTO */
+ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO)
+ dce112_program_pixel_clk_resync(clk_src,
+ pix_clk_params->signal_type,
+ pix_clk_params->color_depth,
+ pix_clk_params->flags.SUPPORT_YCBCR420);
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool dce110_clock_source_power_down(
+ struct clock_source *clk_src)
+{
+ struct dce110_clk_src *dce110_clk_src = TO_DCE110_CLK_SRC(clk_src);
+ enum bp_result bp_result;
+ struct bp_pixel_clock_parameters bp_pixel_clock_params = {0};
+
+ if (clk_src->dp_clk_src)
+ return true;
+
+ /* If Pixel Clock is 0 it means Power Down Pll*/
+ bp_pixel_clock_params.controller_id = CONTROLLER_ID_UNDEFINED;
+ bp_pixel_clock_params.pll_id = clk_src->id;
+ bp_pixel_clock_params.flags.FORCE_PROGRAMMING_OF_PLL = 1;
+
+ /*Call ASICControl to process ATOMBIOS Exec table*/
+ bp_result = dce110_clk_src->bios->funcs->set_pixel_clock(
+ dce110_clk_src->bios,
+ &bp_pixel_clock_params);
+
+ return bp_result == BP_RESULT_OK;
+}
+
+/*****************************************/
+/* Constructor */
+/*****************************************/
+static const struct clock_source_funcs dce110_clk_src_funcs = {
+ .cs_power_down = dce110_clock_source_power_down,
+ .program_pix_clk = dce110_program_pix_clk,
+ .get_pix_clk_dividers = dce110_get_pix_clk_dividers,
+ .get_pix_rate_in_hz = dce110_get_pix_rate_in_hz
+};
+
+static void get_ss_info_from_atombios(
+ struct dce110_clk_src *clk_src,
+ enum as_signal_type as_signal,
+ struct spread_spectrum_data *spread_spectrum_data[],
+ uint32_t *ss_entries_num)
+{
+ enum bp_result bp_result = BP_RESULT_FAILURE;
+ struct spread_spectrum_info *ss_info;
+ struct spread_spectrum_data *ss_data;
+ struct spread_spectrum_info *ss_info_cur;
+ struct spread_spectrum_data *ss_data_cur;
+ uint32_t i;
+
+ if (ss_entries_num == NULL) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid entry !!!\n");
+ return;
+ }
+ if (spread_spectrum_data == NULL) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid array pointer!!!\n");
+ return;
+ }
+
+ spread_spectrum_data[0] = NULL;
+ *ss_entries_num = 0;
+
+ *ss_entries_num = clk_src->bios->funcs->get_ss_entry_number(
+ clk_src->bios,
+ as_signal);
+
+ if (*ss_entries_num == 0)
+ return;
+
+ ss_info = kzalloc(sizeof(struct spread_spectrum_info) * (*ss_entries_num),
+ GFP_KERNEL);
+ ss_info_cur = ss_info;
+ if (ss_info == NULL)
+ return;
+
+ ss_data = kzalloc(sizeof(struct spread_spectrum_data) * (*ss_entries_num),
+ GFP_KERNEL);
+ if (ss_data == NULL)
+ goto out_free_info;
+
+ for (i = 0, ss_info_cur = ss_info;
+ i < (*ss_entries_num);
+ ++i, ++ss_info_cur) {
+
+ bp_result = clk_src->bios->funcs->get_spread_spectrum_info(
+ clk_src->bios,
+ as_signal,
+ i,
+ ss_info_cur);
+
+ if (bp_result != BP_RESULT_OK)
+ goto out_free_data;
+ }
+
+ for (i = 0, ss_info_cur = ss_info, ss_data_cur = ss_data;
+ i < (*ss_entries_num);
+ ++i, ++ss_info_cur, ++ss_data_cur) {
+
+ if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid ATOMBIOS SS Table!!!\n");
+ goto out_free_data;
+ }
+
+ /* for HDMI check SS percentage,
+ * if it is > 6 (0.06%), the ATOMBIOS table info is invalid*/
+ if (as_signal == AS_SIGNAL_TYPE_HDMI
+ && ss_info_cur->spread_spectrum_percentage > 6){
+ /* invalid input, do nothing */
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid SS percentage ");
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "for HDMI in ATOMBIOS info Table!!!\n");
+ continue;
+ }
+ if (ss_info_cur->spread_percentage_divider == 1000) {
+ /* Keep previous precision from ATOMBIOS for these
+ * in case new precision set by ATOMBIOS for these
+ * (otherwise all code in DCE specific classes
+ * for all previous ASICs would need
+ * to be updated for SS calculations,
+ * Audio SS compensation and DP DTO SS compensation
+ * which assumes fixed SS percentage Divider = 100)*/
+ ss_info_cur->spread_spectrum_percentage /= 10;
+ ss_info_cur->spread_percentage_divider = 100;
+ }
+
+ ss_data_cur->freq_range_khz = ss_info_cur->target_clock_range;
+ ss_data_cur->percentage =
+ ss_info_cur->spread_spectrum_percentage;
+ ss_data_cur->percentage_divider =
+ ss_info_cur->spread_percentage_divider;
+ ss_data_cur->modulation_freq_hz =
+ ss_info_cur->spread_spectrum_range;
+
+ if (ss_info_cur->type.CENTER_MODE)
+ ss_data_cur->flags.CENTER_SPREAD = 1;
+
+ if (ss_info_cur->type.EXTERNAL)
+ ss_data_cur->flags.EXTERNAL_SS = 1;
+
+ }
+
+ *spread_spectrum_data = ss_data;
+ kfree(ss_info);
+ return;
+
+out_free_data:
+ kfree(ss_data);
+ *ss_entries_num = 0;
+out_free_info:
+ kfree(ss_info);
+}
+
+static void ss_info_from_atombios_create(
+ struct dce110_clk_src *clk_src)
+{
+ get_ss_info_from_atombios(
+ clk_src,
+ AS_SIGNAL_TYPE_DISPLAY_PORT,
+ &clk_src->dp_ss_params,
+ &clk_src->dp_ss_params_cnt);
+ get_ss_info_from_atombios(
+ clk_src,
+ AS_SIGNAL_TYPE_HDMI,
+ &clk_src->hdmi_ss_params,
+ &clk_src->hdmi_ss_params_cnt);
+ get_ss_info_from_atombios(
+ clk_src,
+ AS_SIGNAL_TYPE_DVI,
+ &clk_src->dvi_ss_params,
+ &clk_src->dvi_ss_params_cnt);
+}
+
+static bool calc_pll_max_vco_construct(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct calc_pll_clock_source_init_data *init_data)
+{
+ uint32_t i;
+ struct dc_firmware_info fw_info = { { 0 } };
+ if (calc_pll_cs == NULL ||
+ init_data == NULL ||
+ init_data->bp == NULL)
+ return false;
+
+ if (init_data->bp->funcs->get_firmware_info(
+ init_data->bp,
+ &fw_info) != BP_RESULT_OK)
+ return false;
+
+ calc_pll_cs->ctx = init_data->ctx;
+ calc_pll_cs->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+ calc_pll_cs->min_vco_khz =
+ fw_info.pll_info.min_output_pxl_clk_pll_frequency;
+ calc_pll_cs->max_vco_khz =
+ fw_info.pll_info.max_output_pxl_clk_pll_frequency;
+
+ if (init_data->max_override_input_pxl_clk_pll_freq_khz != 0)
+ calc_pll_cs->max_pll_input_freq_khz =
+ init_data->max_override_input_pxl_clk_pll_freq_khz;
+ else
+ calc_pll_cs->max_pll_input_freq_khz =
+ fw_info.pll_info.max_input_pxl_clk_pll_frequency;
+
+ if (init_data->min_override_input_pxl_clk_pll_freq_khz != 0)
+ calc_pll_cs->min_pll_input_freq_khz =
+ init_data->min_override_input_pxl_clk_pll_freq_khz;
+ else
+ calc_pll_cs->min_pll_input_freq_khz =
+ fw_info.pll_info.min_input_pxl_clk_pll_frequency;
+
+ calc_pll_cs->min_pix_clock_pll_post_divider =
+ init_data->min_pix_clk_pll_post_divider;
+ calc_pll_cs->max_pix_clock_pll_post_divider =
+ init_data->max_pix_clk_pll_post_divider;
+ calc_pll_cs->min_pll_ref_divider =
+ init_data->min_pll_ref_divider;
+ calc_pll_cs->max_pll_ref_divider =
+ init_data->max_pll_ref_divider;
+
+ if (init_data->num_fract_fb_divider_decimal_point == 0 ||
+ init_data->num_fract_fb_divider_decimal_point_precision >
+ init_data->num_fract_fb_divider_decimal_point) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "The dec point num or precision is incorrect!");
+ return false;
+ }
+ if (init_data->num_fract_fb_divider_decimal_point_precision == 0) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "Incorrect fract feedback divider precision num!");
+ return false;
+ }
+
+ calc_pll_cs->fract_fb_divider_decimal_points_num =
+ init_data->num_fract_fb_divider_decimal_point;
+ calc_pll_cs->fract_fb_divider_precision =
+ init_data->num_fract_fb_divider_decimal_point_precision;
+ calc_pll_cs->fract_fb_divider_factor = 1;
+ for (i = 0; i < calc_pll_cs->fract_fb_divider_decimal_points_num; ++i)
+ calc_pll_cs->fract_fb_divider_factor *= 10;
+
+ calc_pll_cs->fract_fb_divider_precision_factor = 1;
+ for (
+ i = 0;
+ i < (calc_pll_cs->fract_fb_divider_decimal_points_num -
+ calc_pll_cs->fract_fb_divider_precision);
+ ++i)
+ calc_pll_cs->fract_fb_divider_precision_factor *= 10;
+
+ return true;
+}
+
+bool dce110_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask)
+{
+ struct dc_firmware_info fw_info = { { 0 } };
+ struct calc_pll_clock_source_init_data calc_pll_cs_init_data_hdmi;
+ struct calc_pll_clock_source_init_data calc_pll_cs_init_data;
+
+ clk_src->base.ctx = ctx;
+ clk_src->bios = bios;
+ clk_src->base.id = id;
+ clk_src->base.funcs = &dce110_clk_src_funcs;
+
+ clk_src->regs = regs;
+ clk_src->cs_shift = cs_shift;
+ clk_src->cs_mask = cs_mask;
+
+ if (clk_src->bios->funcs->get_firmware_info(
+ clk_src->bios, &fw_info) != BP_RESULT_OK) {
+ ASSERT_CRITICAL(false);
+ goto unexpected_failure;
+ }
+
+ clk_src->ext_clk_khz =
+ fw_info.external_clock_source_frequency_for_dp;
+
+ switch (clk_src->base.ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+
+ /* structure normally used with PLL ranges from ATOMBIOS; DS on by default */
+ calc_pll_cs_init_data.bp = bios;
+ calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1;
+ calc_pll_cs_init_data.max_pix_clk_pll_post_divider =
+ clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+ calc_pll_cs_init_data.min_pll_ref_divider = 1;
+ calc_pll_cs_init_data.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
+ /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz = 0;
+ /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz = 0;
+ /*numberOfFractFBDividerDecimalPoints*/
+ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ /*number of decimal point to round off for fractional feedback divider value*/
+ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ calc_pll_cs_init_data.ctx = ctx;
+
+ /*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */
+ calc_pll_cs_init_data_hdmi.bp = bios;
+ calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1;
+ calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider =
+ clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+ calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1;
+ calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
+ /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500;
+ /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000;
+ /*numberOfFractFBDividerDecimalPoints*/
+ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ /*number of decimal point to round off for fractional feedback divider value*/
+ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ calc_pll_cs_init_data_hdmi.ctx = ctx;
+
+ clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+
+ if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL)
+ return true;
+
+ /* PLL only from here on */
+ ss_info_from_atombios_create(clk_src);
+
+ if (!calc_pll_max_vco_construct(
+ &clk_src->calc_pll,
+ &calc_pll_cs_init_data)) {
+ ASSERT_CRITICAL(false);
+ goto unexpected_failure;
+ }
+
+
+ calc_pll_cs_init_data_hdmi.
+ min_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz/2;
+ calc_pll_cs_init_data_hdmi.
+ max_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz;
+
+
+ if (!calc_pll_max_vco_construct(
+ &clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) {
+ ASSERT_CRITICAL(false);
+ goto unexpected_failure;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+
+unexpected_failure:
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
new file mode 100644
index 000000000000..c45e2f76189e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -0,0 +1,145 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_CLOCK_SOURCE_DCE_H__
+#define __DC_CLOCK_SOURCE_DCE_H__
+
+#include "../inc/clock_source.h"
+
+#define TO_DCE110_CLK_SRC(clk_src)\
+ container_of(clk_src, struct dce110_clk_src, base)
+
+#define CS_COMMON_REG_LIST_DCE_100_110(id) \
+ SRI(RESYNC_CNTL, PIXCLK, id), \
+ SRI(PLL_CNTL, BPHYC_PLL, id)
+
+#define CS_COMMON_REG_LIST_DCE_80(id) \
+ SRI(RESYNC_CNTL, PIXCLK, id), \
+ SRI(PLL_CNTL, DCCG_PLL, id)
+
+#define CS_COMMON_REG_LIST_DCE_112(id) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, id)
+
+
+#define CS_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ CS_SF(PLL_CNTL, PLL_REF_DIV_SRC, mask_sh),\
+ CS_SF(PIXCLK1_RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, mask_sh),\
+ CS_SF(PLL_POST_DIV, PLL_POST_DIV_PIXCLK, mask_sh),\
+ CS_SF(PLL_REF_DIV, PLL_REF_DIV, mask_sh)
+
+#define CS_COMMON_MASK_SH_LIST_DCE_112(mask_sh)\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+
+#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
+ SRII(PHASE, DP_DTO, 0),\
+ SRII(PHASE, DP_DTO, 1),\
+ SRII(PHASE, DP_DTO, 2),\
+ SRII(PHASE, DP_DTO, 3),\
+ SRII(MODULO, DP_DTO, 0),\
+ SRII(MODULO, DP_DTO, 1),\
+ SRII(MODULO, DP_DTO, 2),\
+ SRII(MODULO, DP_DTO, 3),\
+ SRII(PIXEL_RATE_CNTL, OTG, 0), \
+ SRII(PIXEL_RATE_CNTL, OTG, 1), \
+ SRII(PIXEL_RATE_CNTL, OTG, 2), \
+ SRII(PIXEL_RATE_CNTL, OTG, 3)
+
+#define CS_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+ CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
+ CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
+ CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
+
+#endif
+
+#define CS_REG_FIELD_LIST(type) \
+ type PLL_REF_DIV_SRC; \
+ type DCCG_DEEP_COLOR_CNTL1; \
+ type PHYPLLA_DCCG_DEEP_COLOR_CNTL; \
+ type PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE; \
+ type PLL_POST_DIV_PIXCLK; \
+ type PLL_REF_DIV; \
+ type DP_DTO0_PHASE; \
+ type DP_DTO0_MODULO; \
+ type DP_DTO0_ENABLE;
+
+struct dce110_clk_src_shift {
+ CS_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce110_clk_src_mask{
+ CS_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce110_clk_src_regs {
+ uint32_t RESYNC_CNTL;
+ uint32_t PIXCLK_RESYNC_CNTL;
+ uint32_t PLL_CNTL;
+
+ /* below are for DTO.
+ * todo: should probably use different struct to not waste space
+ */
+ uint32_t PHASE[MAX_PIPES];
+ uint32_t MODULO[MAX_PIPES];
+ uint32_t PIXEL_RATE_CNTL[MAX_PIPES];
+};
+
+struct dce110_clk_src {
+ struct clock_source base;
+ const struct dce110_clk_src_regs *regs;
+ const struct dce110_clk_src_mask *cs_mask;
+ const struct dce110_clk_src_shift *cs_shift;
+ struct dc_bios *bios;
+
+ struct spread_spectrum_data *dp_ss_params;
+ uint32_t dp_ss_params_cnt;
+ struct spread_spectrum_data *hdmi_ss_params;
+ uint32_t hdmi_ss_params_cnt;
+ struct spread_spectrum_data *dvi_ss_params;
+ uint32_t dvi_ss_params_cnt;
+
+ uint32_t ext_clk_khz;
+ uint32_t ref_freq_khz;
+
+ struct calc_pll_clock_source calc_pll;
+ struct calc_pll_clock_source calc_pll_hdmi;
+};
+
+bool dce110_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
new file mode 100644
index 000000000000..9031d22285ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_clocks.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "bios_parser_interface.h"
+#include "dc.h"
+#include "dce_abm.h"
+#include "dmcu.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn_calcs.h"
+#endif
+#include "core_types.h"
+
+
+#define TO_DCE_CLOCKS(clocks)\
+ container_of(clocks, struct dce_disp_clk, base)
+
+#define REG(reg) \
+ (clk_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
+
+#define CTX \
+ clk_dce->base.ctx
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+
+static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+/* Starting point for each divider range.*/
+enum dce_divider_range_start {
+ DIVIDER_RANGE_01_START = 200, /* 2.00*/
+ DIVIDER_RANGE_02_START = 1600, /* 16.00*/
+ DIVIDER_RANGE_03_START = 3200, /* 32.00*/
+ DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
+};
+
+/* Ranges for divider identifiers (Divider ID or DID)
+ mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
+enum dce_divider_id_register_setting {
+ DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
+ DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
+ DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
+ DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
+};
+
+/* Step size between each divider within a range.
+ Incrementing the DENTIST_DISPCLK_WDIVIDER by one
+ will increment the divider by this much.*/
+enum dce_divider_range_step_size {
+ DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
+ DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
+ DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
+};
+
+static bool dce_divider_range_construct(
+ struct dce_divider_range *div_range,
+ int range_start,
+ int range_step,
+ int did_min,
+ int did_max)
+{
+ div_range->div_range_start = range_start;
+ div_range->div_range_step = range_step;
+ div_range->did_min = did_min;
+ div_range->did_max = did_max;
+
+ if (div_range->div_range_step == 0) {
+ div_range->div_range_step = 1;
+ /*div_range_step cannot be zero*/
+ BREAK_TO_DEBUGGER();
+ }
+ /* Calculate this based on the other inputs.*/
+ /* See DividerRange.h for explanation of */
+ /* the relationship between divider id (DID) and a divider.*/
+ /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
+ /* Maximum divider identified in this range =
+ * (Number of Divider IDs)*Step size between dividers
+ * + The start of this range.*/
+ div_range->div_range_end = (did_max - did_min) * range_step
+ + range_start;
+ return true;
+}
+
+static int dce_divider_range_calc_divider(
+ struct dce_divider_range *div_range,
+ int did)
+{
+ /* Is this DID within our range?*/
+ if ((did < div_range->did_min) || (did >= div_range->did_max))
+ return INVALID_DIVIDER;
+
+ return ((did - div_range->did_min) * div_range->div_range_step)
+ + div_range->div_range_start;
+
+}
+
+static int dce_divider_range_get_divider(
+ struct dce_divider_range *div_range,
+ int ranges_num,
+ int did)
+{
+ int div = INVALID_DIVIDER;
+ int i;
+
+ for (i = 0; i < ranges_num; i++) {
+ /* Calculate divider with given divider ID*/
+ div = dce_divider_range_calc_divider(&div_range[i], did);
+ /* Found a valid return divider*/
+ if (div != INVALID_DIVIDER)
+ break;
+ }
+ return div;
+}
+
+static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int dprefclk_wdivider;
+ int dprefclk_src_sel;
+ int dp_ref_clk_khz = 600000;
+ int target_div = INVALID_DIVIDER;
+
+ /* ASSERT DP Reference Clock source is from DFS*/
+ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+ ASSERT(dprefclk_src_sel == 0);
+
+ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+ target_div = dce_divider_range_get_divider(
+ clk_dce->divider_ranges,
+ DIVIDER_RANGE_MAX,
+ dprefclk_wdivider);
+
+ if (target_div != INVALID_DIVIDER) {
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
+ * clk_dce->dentist_vco_freq_khz) / target_div;
+ }
+
+ /* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
+ dal_fixed32_32_from_fraction(
+ clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed32_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
+ ss_percentage);
+ adj_dp_ref_clk_khz =
+ dal_fixed32_32_mul_int(
+ ss_percentage,
+ dp_ref_clk_khz);
+ dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
+ }
+
+ return dp_ref_clk_khz;
+}
+
+/* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS
+ * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
+ * clock implementation
+ */
+static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int dp_ref_clk_khz = 600000;
+
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
+ dal_fixed32_32_from_fraction(
+ clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed32_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
+ ss_percentage);
+ adj_dp_ref_clk_khz =
+ dal_fixed32_32_mul_int(
+ ss_percentage,
+ dp_ref_clk_khz);
+ dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
+ }
+
+ return dp_ref_clk_khz;
+}
+static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct display_clock *clk,
+ struct state_dependent_clocks *req_clocks)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int i;
+ enum dm_pp_clocks_state low_req_clk;
+
+ /* Iterate from highest supported to lowest valid state, and update
+ * lowest RequiredState with the lowest state that satisfies
+ * all required clocks
+ */
+ for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+ if (req_clocks->display_clk_khz >
+ clk_dce->max_clks_by_state[i].display_clk_khz
+ || req_clocks->pixel_clk_khz >
+ clk_dce->max_clks_by_state[i].pixel_clk_khz)
+ break;
+
+ low_req_clk = i + 1;
+ if (low_req_clk > clk->max_clks_state) {
+ dm_logger_write(clk->ctx->logger, LOG_WARNING,
+ "%s: clocks unsupported", __func__);
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ }
+
+ return low_req_clk;
+}
+
+static bool dce_clock_set_min_clocks_state(
+ struct display_clock *clk,
+ enum dm_pp_clocks_state clocks_state)
+{
+ struct dm_pp_power_level_change_request level_change_req = {
+ clocks_state };
+
+ if (clocks_state > clk->max_clks_state) {
+ /*Requested state exceeds max supported state.*/
+ dm_logger_write(clk->ctx->logger, LOG_WARNING,
+ "Requested state exceeds max supported state");
+ return false;
+ } else if (clocks_state == clk->cur_min_clks_state) {
+ /*if we're trying to set the same state, we can just return
+ * since nothing needs to be done*/
+ return true;
+ }
+
+ /* get max clock state from PPLIB */
+ if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
+ clk->cur_min_clks_state = clocks_state;
+
+ return true;
+}
+
+static int dce_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+ struct dc_bios *bp = clk->ctx->dc_bios;
+ int actual_clock = requested_clk_khz;
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_dce->dentist_vco_freq_khz / 64);
+
+ /* Prepare to program display clock*/
+ pxl_clk_params.target_pixel_clock = requested_clk_khz;
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+ if (clk_dce->dfs_bypass_enabled) {
+
+ /* Cache the fixed display clock*/
+ clk_dce->dfs_bypass_disp_clk =
+ pxl_clk_params.dfs_bypass_display_clock;
+ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+ }
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ return actual_clock;
+}
+
+static int dce_psr_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dc_context *ctx = clk_dce->base.ctx;
+ struct dc *core_dc = ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clk_khz = requested_clk_khz;
+
+ actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
+
+ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
+ return actual_clk_khz;
+}
+
+static int dce112_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk->ctx->dc_bios;
+ struct dc *core_dc = clk->ctx->dc;
+ struct abm *abm = core_dc->res_pool->abm;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clock = requested_clk_khz;
+ /* Prepare to program display clock*/
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_dce->dentist_vco_freq_khz / 62);
+
+ dce_clk_params.target_clock_frequency = requested_clk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+ actual_clock = dce_clk_params.target_clock_frequency;
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ /*Program DP ref Clock*/
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+ if (abm->funcs->is_dmcu_initialized(abm) && clk_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ clk_dce->dfs_bypass_disp_clk = actual_clock;
+ return actual_clock;
+}
+
+static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+{
+ struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
+ struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
+ struct integrated_info info = { { { 0 } } };
+ struct dc_firmware_info fw_info = { { 0 } };
+ int i;
+
+ if (bp->integrated_info)
+ info = *bp->integrated_info;
+
+ clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_dce->dentist_vco_freq_khz =
+ fw_info.smu_gpu_pll_output_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0)
+ clk_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ /*update the maximum display clock for each power state*/
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ switch (i) {
+ case 0:
+ clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+ break;
+
+ case 1:
+ clk_state = DM_PP_CLOCKS_STATE_LOW;
+ break;
+
+ case 2:
+ clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ break;
+
+ case 3:
+ clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+ break;
+
+ default:
+ clk_state = DM_PP_CLOCKS_STATE_INVALID;
+ break;
+ }
+
+ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+ * check for > 100MHz*/
+ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+ clk_dce->max_clks_by_state[clk_state].display_clk_khz =
+ info.disp_clk_voltage[i].max_supported_clk;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_dce->dfs_bypass_enabled = true;
+
+ clk_dce->use_max_disp_clk = debug->max_disp_clk;
+}
+
+static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+{
+ struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
+ int ss_info_num = bp->funcs->get_ss_entry_number(
+ bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+ if (ss_info_num) {
+ struct spread_spectrum_info info = { { 0 } };
+ enum bp_result result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_dce->ss_on_dprefclk = true;
+ clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* TODO: Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+
+ return;
+ }
+
+ result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_dce->ss_on_dprefclk = true;
+ clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+ }
+ }
+}
+
+static bool dce_apply_clock_voltage_request(
+ struct display_clock *clk,
+ enum dm_pp_clock_type clocks_type,
+ int clocks_in_khz,
+ bool pre_mode_set,
+ bool update_dp_phyclk)
+{
+ bool send_request = false;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ clock_voltage_req.clk_type = clocks_type;
+ clock_voltage_req.clocks_in_khz = clocks_in_khz;
+
+ /* to pplib */
+ if (pre_mode_set) {
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) {
+ clk->cur_clocks_value.dispclk_notify_pplib_done = true;
+ send_request = true;
+ } else
+ clk->cur_clocks_value.dispclk_notify_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+ clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) {
+ clk->cur_clocks_value.pixelclk_notify_pplib_done = true;
+ send_request = true;
+ } else
+ clk->cur_clocks_value.pixelclk_notify_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+ clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) {
+ clk->cur_clocks_value.phyclk_notigy_pplib_done = true;
+ send_request = true;
+ } else
+ clk->cur_clocks_value.phyclk_notigy_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+ clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ } else {
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ if (!clk->cur_clocks_value.dispclk_notify_pplib_done)
+ send_request = true;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ if (!clk->cur_clocks_value.pixelclk_notify_pplib_done)
+ send_request = true;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ if (!clk->cur_clocks_value.phyclk_notigy_pplib_done)
+ send_request = true;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ if (send_request) {
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (clk->ctx->dce_version >= DCN_VERSION_1_0) {
+ struct dc *core_dc = clk->ctx->dc;
+ /*use dcfclk request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz =
+ dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value);
+ }
+#endif
+ dm_pp_apply_clock_for_voltage_request(
+ clk->ctx, &clock_voltage_req);
+ }
+ if (update_dp_phyclk && (clocks_in_khz >
+ clk->cur_clocks_value.max_dp_phyclk_in_khz))
+ clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
+
+ return true;
+}
+
+
+static const struct display_clock_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
+ .apply_clock_voltage_request = dce_apply_clock_voltage_request,
+ .set_clock = dce112_set_clock
+};
+
+static const struct display_clock_funcs dce112_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+ .get_required_clocks_state = dce_get_required_clocks_state,
+ .set_min_clocks_state = dce_clock_set_min_clocks_state,
+ .set_clock = dce112_set_clock
+};
+
+static const struct display_clock_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+ .get_required_clocks_state = dce_get_required_clocks_state,
+ .set_min_clocks_state = dce_clock_set_min_clocks_state,
+ .set_clock = dce_psr_set_clock
+};
+
+static const struct display_clock_funcs dce_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+ .get_required_clocks_state = dce_get_required_clocks_state,
+ .set_min_clocks_state = dce_clock_set_min_clocks_state,
+ .set_clock = dce_set_clock
+};
+
+static void dce_disp_clk_construct(
+ struct dce_disp_clk *clk_dce,
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct display_clock *base = &clk_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ clk_dce->regs = regs;
+ clk_dce->clk_shift = clk_shift;
+ clk_dce->clk_mask = clk_mask;
+
+ clk_dce->dfs_bypass_disp_clk = 0;
+
+ clk_dce->dprefclk_ss_percentage = 0;
+ clk_dce->dprefclk_ss_divider = 1000;
+ clk_dce->ss_on_dprefclk = false;
+
+ base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ dce_clock_read_integrated_info(clk_dce);
+ dce_clock_read_ss_info(clk_dce);
+
+ dce_divider_range_construct(
+ &clk_dce->divider_ranges[DIVIDER_RANGE_01],
+ DIVIDER_RANGE_01_START,
+ DIVIDER_RANGE_01_STEP_SIZE,
+ DIVIDER_RANGE_01_BASE_DIVIDER_ID,
+ DIVIDER_RANGE_02_BASE_DIVIDER_ID);
+ dce_divider_range_construct(
+ &clk_dce->divider_ranges[DIVIDER_RANGE_02],
+ DIVIDER_RANGE_02_START,
+ DIVIDER_RANGE_02_STEP_SIZE,
+ DIVIDER_RANGE_02_BASE_DIVIDER_ID,
+ DIVIDER_RANGE_03_BASE_DIVIDER_ID);
+ dce_divider_range_construct(
+ &clk_dce->divider_ranges[DIVIDER_RANGE_03],
+ DIVIDER_RANGE_03_START,
+ DIVIDER_RANGE_03_STEP_SIZE,
+ DIVIDER_RANGE_03_BASE_DIVIDER_ID,
+ DIVIDER_RANGE_MAX_DIVIDER_ID);
+}
+
+struct display_clock *dce_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce80_max_clks_by_state,
+ sizeof(dce80_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ return &clk_dce->base;
+}
+
+struct display_clock *dce110_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce110_max_clks_by_state,
+ sizeof(dce110_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_dce->base.funcs = &dce110_funcs;
+
+ return &clk_dce->base;
+}
+
+struct display_clock *dce112_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce112_max_clks_by_state,
+ sizeof(dce112_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_dce->base.funcs = &dce112_funcs;
+
+ return &clk_dce->base;
+}
+
+struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce120_max_clks_by_state,
+ sizeof(dce120_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, NULL, NULL, NULL);
+
+ clk_dce->base.funcs = &dce120_funcs;
+
+ /* new in dce120 */
+ if (!ctx->dc->debug.disable_pplib_clock_request &&
+ dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
+ && clk_level_info.num_levels)
+ clk_dce->max_displ_clk_in_khz =
+ clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
+ else
+ clk_dce->max_displ_clk_in_khz = 1133000;
+
+ return &clk_dce->base;
+}
+
+void dce_disp_clk_destroy(struct display_clock **disp_clk)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
+
+ kfree(clk_dce);
+ *disp_clk = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
new file mode 100644
index 000000000000..0e717e0dc8f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_CLOCKS_H_
+#define _DCE_CLOCKS_H_
+
+#include "display_clock.h"
+
+#define CLK_COMMON_REG_LIST_DCE_BASE() \
+ .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+ .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+
+#define CLK_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+
+#define CLK_REG_FIELD_LIST(type) \
+ type DPREFCLK_SRC_SEL; \
+ type DENTIST_DPREFCLK_WDIVIDER;
+
+struct dce_disp_clk_shift {
+ CLK_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_disp_clk_mask {
+ CLK_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_disp_clk_registers {
+ uint32_t DPREFCLK_CNTL;
+ uint32_t DENTIST_DISPCLK_CNTL;
+};
+
+/* Array identifiers and count for the divider ranges.*/
+enum dce_divider_range_count {
+ DIVIDER_RANGE_01 = 0,
+ DIVIDER_RANGE_02,
+ DIVIDER_RANGE_03,
+ DIVIDER_RANGE_MAX /* == 3*/
+};
+
+enum dce_divider_error_types {
+ INVALID_DID = 0,
+ INVALID_DIVIDER = 1
+};
+
+struct dce_divider_range {
+ int div_range_start;
+ /* The end of this range of dividers.*/
+ int div_range_end;
+ /* The distance between each divider in this range.*/
+ int div_range_step;
+ /* The divider id for the lowest divider.*/
+ int did_min;
+ /* The divider id for the highest divider.*/
+ int did_max;
+};
+
+struct dce_disp_clk {
+ struct display_clock base;
+ const struct dce_disp_clk_registers *regs;
+ const struct dce_disp_clk_shift *clk_shift;
+ const struct dce_disp_clk_mask *clk_mask;
+
+ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+ struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
+
+ bool use_max_disp_clk;
+ int dentist_vco_freq_khz;
+
+ /* Cache the status of DFS-bypass feature*/
+ bool dfs_bypass_enabled;
+ /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+ * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+ int dfs_bypass_disp_clk;
+
+ /* Flag for Enabled SS on DPREFCLK */
+ bool ss_on_dprefclk;
+ /* DPREFCLK SS percentage (if down-spread enabled) */
+ int dprefclk_ss_percentage;
+ /* DPREFCLK SS percentage Divider (100 or 1000) */
+ int dprefclk_ss_divider;
+
+ /* max disp_clk from PPLIB for max validation display clock*/
+ int max_displ_clk_in_khz;
+};
+
+
+struct display_clock *dce_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+struct display_clock *dce110_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+struct display_clock *dce112_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
+
+void dce_disp_clk_destroy(struct display_clock **disp_clk);
+
+#endif /* _DCE_CLOCKS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
new file mode 100644
index 000000000000..fd77df573b61
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dce_dmcu.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "dc.h"
+
+#define TO_DCE_DMCU(dmcu)\
+ container_of(dmcu, struct dce_dmcu, base)
+
+#define REG(reg) \
+ (dmcu_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dmcu_dce->dmcu_shift->field_name, dmcu_dce->dmcu_mask->field_name
+
+#define CTX \
+ dmcu_dce->base.ctx
+
+/* PSR related commands */
+#define PSR_ENABLE 0x20
+#define PSR_EXIT 0x21
+#define PSR_SET 0x23
+#define PSR_SET_WAITLOOP 0x31
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
+unsigned int cached_wait_loop_number = 0;
+
+bool dce_dmcu_load_iram(struct dmcu *dmcu,
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int count = 0;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 1,
+ IRAM_WR_ADDR_AUTO_INC, 1);
+
+ REG_WAIT(DCI_MEM_PWR_STATUS, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ REG_WRITE(DMCU_IRAM_WR_CTRL, start_offset);
+
+ for (count = 0; count < bytes; count++)
+ REG_WRITE(DMCU_IRAM_WR_DATA, src[count]);
+
+ /* Disable write access to IRAM to allow dynamic sleep state */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 0,
+ IRAM_WR_ADDR_AUTO_INC, 0);
+
+ return true;
+}
+
+static void dce_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ uint32_t psrStateOffset = 0xf0;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
+
+ REG_WAIT(DCI_MEM_PWR_STATUS, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
+ REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
+
+ /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
+ *psr_state = REG_READ(DMCU_IRAM_RD_DATA);
+
+ /* Disable write access to IRAM after finished using IRAM
+ * in order to allow dynamic sleep state
+ */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0);
+}
+
+static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ unsigned int retryCount;
+ uint32_t psr_state = 0;
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_Cmd */
+ if (enable)
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_ENABLE);
+ else
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_EXIT);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ if (wait == true) {
+ for (retryCount = 0; retryCount <= 100; retryCount++) {
+ dce_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(10);
+ }
+ }
+}
+
+static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ union dce_dmcu_psr_config_data_reg1 masterCmdData1;
+ union dce_dmcu_psr_config_data_reg2 masterCmdData2;
+ union dce_dmcu_psr_config_data_reg3 masterCmdData3;
+
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+
+ /* Enable static screen interrupts for PSR supported display */
+ /* Disable the interrupt coming from other displays. */
+ REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 0,
+ STATIC_SCREEN2_INT_TO_UC_EN, 0,
+ STATIC_SCREEN3_INT_TO_UC_EN, 0,
+ STATIC_SCREEN4_INT_TO_UC_EN, 0);
+
+ switch (psr_context->controllerId) {
+ /* Driver uses case 1 for unconfigured */
+ case 1:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN2_INT_TO_UC_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN3_INT_TO_UC_EN, 1);
+ break;
+ case 4:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN4_INT_TO_UC_EN, 1);
+ break;
+ case 5:
+ /* CZ/NL only has 4 CRTC!!
+ * really valid.
+ * There is no interrupt enable mask for these instances.
+ */
+ break;
+ case 6:
+ /* CZ/NL only has 4 CRTC!!
+ * These are here because they are defined in HW regspec,
+ * but not really valid. There is no interrupt enable mask
+ * for these instances.
+ */
+ break;
+ default:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ }
+
+ link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+ psr_context->sdpTransmitLineNumDeadline);
+
+ if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_PSRHostConfigData */
+ masterCmdData1.u32All = 0;
+ masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames;
+ masterCmdData1.bits.hyst_lines = psr_context->hyst_lines;
+ masterCmdData1.bits.rfb_update_auto_en =
+ psr_context->rfb_update_auto_en;
+ masterCmdData1.bits.dp_port_num = psr_context->transmitterId;
+ masterCmdData1.bits.dcp_sel = psr_context->controllerId;
+ masterCmdData1.bits.phy_type = psr_context->phyType;
+ masterCmdData1.bits.frame_cap_ind =
+ psr_context->psrFrameCaptureIndicationReq;
+ masterCmdData1.bits.aux_chan = psr_context->channel;
+ masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
+ masterCmdData1.u32All);
+
+ masterCmdData2.u32All = 0;
+ masterCmdData2.bits.dig_fe = psr_context->engineId;
+ masterCmdData2.bits.dig_be = psr_context->transmitterId;
+ masterCmdData2.bits.skip_wait_for_pll_lock =
+ psr_context->skipPsrWaitForPllLock;
+ masterCmdData2.bits.frame_delay = psr_context->frame_delay;
+ masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId;
+ masterCmdData2.bits.num_of_controllers =
+ psr_context->numberOfControllers;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2),
+ masterCmdData2.u32All);
+
+ masterCmdData3.u32All = 0;
+ masterCmdData3.bits.psr_level = psr_context->psr_level.u32all;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
+ masterCmdData3.u32All);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void dce_psr_wait_loop(
+ struct dmcu *dmcu,
+ unsigned int wait_loop_number)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
+ if (cached_wait_loop_number == wait_loop_number)
+ return;
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ masterCmdData1.u32 = 0;
+ masterCmdData1.bits.wait_loop = wait_loop_number;
+ cached_wait_loop_number = wait_loop_number;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET_WAITLOOP);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void dce_get_psr_wait_loop(unsigned int *psr_wait_loop_number)
+{
+ *psr_wait_loop_number = cached_wait_loop_number;
+ return;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int count = 0;
+
+ REG_UPDATE(DMCU_CTRL, DMCU_ENABLE, 1);
+
+ /* Enable write access to IRAM */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 1,
+ IRAM_WR_ADDR_AUTO_INC, 1);
+
+ REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ REG_WRITE(DMCU_IRAM_WR_CTRL, start_offset);
+
+ for (count = 0; count < bytes; count++)
+ REG_WRITE(DMCU_IRAM_WR_DATA, src[count]);
+
+ /* Disable write access to IRAM to allow dynamic sleep state */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 0,
+ IRAM_WR_ADDR_AUTO_INC, 0);
+
+ return true;
+}
+
+static void dcn10_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ uint32_t psrStateOffset = 0xf0;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
+
+ REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
+ REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
+
+ /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
+ *psr_state = REG_READ(DMCU_IRAM_RD_DATA);
+
+ /* Disable write access to IRAM after finished using IRAM
+ * in order to allow dynamic sleep state
+ */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0);
+}
+
+static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ unsigned int retryCount;
+ uint32_t psr_state = 0;
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_Cmd */
+ if (enable)
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_ENABLE);
+ else
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_EXIT);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* Below loops 1000 x 500us = 500 ms.
+ * Exit PSR may need to wait 1-2 frames to power up. Timeout after at
+ * least a few frames. Should never hit the max retry assert below.
+ */
+ if (wait == true) {
+ for (retryCount = 0; retryCount <= 1000; retryCount++) {
+ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(500);
+ }
+
+ /* assert if max retry hit */
+ ASSERT(retryCount <= 1000);
+ }
+}
+
+static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ union dce_dmcu_psr_config_data_reg1 masterCmdData1;
+ union dce_dmcu_psr_config_data_reg2 masterCmdData2;
+ union dce_dmcu_psr_config_data_reg3 masterCmdData3;
+
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+
+ /* Enable static screen interrupts for PSR supported display */
+ /* Disable the interrupt coming from other displays. */
+ REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 0,
+ STATIC_SCREEN2_INT_TO_UC_EN, 0,
+ STATIC_SCREEN3_INT_TO_UC_EN, 0,
+ STATIC_SCREEN4_INT_TO_UC_EN, 0);
+
+ switch (psr_context->controllerId) {
+ /* Driver uses case 1 for unconfigured */
+ case 1:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN2_INT_TO_UC_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN3_INT_TO_UC_EN, 1);
+ break;
+ case 4:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN4_INT_TO_UC_EN, 1);
+ break;
+ case 5:
+ /* CZ/NL only has 4 CRTC!!
+ * really valid.
+ * There is no interrupt enable mask for these instances.
+ */
+ break;
+ case 6:
+ /* CZ/NL only has 4 CRTC!!
+ * These are here because they are defined in HW regspec,
+ * but not really valid. There is no interrupt enable mask
+ * for these instances.
+ */
+ break;
+ default:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ }
+
+ link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+ psr_context->sdpTransmitLineNumDeadline);
+
+ if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_PSRHostConfigData */
+ masterCmdData1.u32All = 0;
+ masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames;
+ masterCmdData1.bits.hyst_lines = psr_context->hyst_lines;
+ masterCmdData1.bits.rfb_update_auto_en =
+ psr_context->rfb_update_auto_en;
+ masterCmdData1.bits.dp_port_num = psr_context->transmitterId;
+ masterCmdData1.bits.dcp_sel = psr_context->controllerId;
+ masterCmdData1.bits.phy_type = psr_context->phyType;
+ masterCmdData1.bits.frame_cap_ind =
+ psr_context->psrFrameCaptureIndicationReq;
+ masterCmdData1.bits.aux_chan = psr_context->channel;
+ masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
+ masterCmdData1.u32All);
+
+ masterCmdData2.u32All = 0;
+ masterCmdData2.bits.dig_fe = psr_context->engineId;
+ masterCmdData2.bits.dig_be = psr_context->transmitterId;
+ masterCmdData2.bits.skip_wait_for_pll_lock =
+ psr_context->skipPsrWaitForPllLock;
+ masterCmdData2.bits.frame_delay = psr_context->frame_delay;
+ masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId;
+ masterCmdData2.bits.num_of_controllers =
+ psr_context->numberOfControllers;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2),
+ masterCmdData2.u32All);
+
+ masterCmdData3.u32All = 0;
+ masterCmdData3.bits.psr_level = psr_context->psr_level.u32all;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
+ masterCmdData3.u32All);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void dcn10_psr_wait_loop(
+ struct dmcu *dmcu,
+ unsigned int wait_loop_number)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
+ if (wait_loop_number != 0) {
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ masterCmdData1.u32 = 0;
+ masterCmdData1.bits.wait_loop = wait_loop_number;
+ cached_wait_loop_number = wait_loop_number;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET_WAITLOOP);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ }
+}
+
+static void dcn10_get_psr_wait_loop(unsigned int *psr_wait_loop_number)
+{
+ *psr_wait_loop_number = cached_wait_loop_number;
+ return;
+}
+
+#endif
+
+static const struct dmcu_funcs dce_funcs = {
+ .load_iram = dce_dmcu_load_iram,
+ .set_psr_enable = dce_dmcu_set_psr_enable,
+ .setup_psr = dce_dmcu_setup_psr,
+ .get_psr_state = dce_get_dmcu_psr_state,
+ .set_psr_wait_loop = dce_psr_wait_loop,
+ .get_psr_wait_loop = dce_get_psr_wait_loop
+};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+static const struct dmcu_funcs dcn10_funcs = {
+ .load_iram = dcn10_dmcu_load_iram,
+ .set_psr_enable = dcn10_dmcu_set_psr_enable,
+ .setup_psr = dcn10_dmcu_setup_psr,
+ .get_psr_state = dcn10_get_dmcu_psr_state,
+ .set_psr_wait_loop = dcn10_psr_wait_loop,
+ .get_psr_wait_loop = dcn10_get_psr_wait_loop
+};
+#endif
+
+static void dce_dmcu_construct(
+ struct dce_dmcu *dmcu_dce,
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dmcu *base = &dmcu_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ dmcu_dce->regs = regs;
+ dmcu_dce->dmcu_shift = dmcu_shift;
+ dmcu_dce->dmcu_mask = dmcu_mask;
+}
+
+struct dmcu *dce_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+
+ if (dmcu_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_dmcu_construct(
+ dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ dmcu_dce->base.funcs = &dce_funcs;
+
+ return &dmcu_dce->base;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+struct dmcu *dcn10_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+
+ if (dmcu_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_dmcu_construct(
+ dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ dmcu_dce->base.funcs = &dcn10_funcs;
+
+ return &dmcu_dce->base;
+}
+#endif
+
+void dce_dmcu_destroy(struct dmcu **dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
+
+ kfree(dmcu_dce);
+ *dmcu = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
new file mode 100644
index 000000000000..b85f53c2f6f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_DMCU_H_
+#define _DCE_DMCU_H_
+
+#include "dmcu.h"
+
+#define DMCU_COMMON_REG_LIST_DCE_BASE() \
+ SR(DMCU_CTRL), \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_WR_CTRL), \
+ SR(DMCU_IRAM_WR_DATA), \
+ SR(MASTER_COMM_DATA_REG1), \
+ SR(MASTER_COMM_DATA_REG2), \
+ SR(MASTER_COMM_DATA_REG3), \
+ SR(MASTER_COMM_CMD_REG), \
+ SR(MASTER_COMM_CNTL_REG), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SR(SMU_INTERRUPT_CONTROL)
+
+#define DMCU_DCE110_COMMON_REG_LIST() \
+ DMCU_COMMON_REG_LIST_DCE_BASE(), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define DMCU_DCN10_REG_LIST()\
+ DMCU_COMMON_REG_LIST_DCE_BASE(), \
+ SR(DMU_MEM_PWR_CNTL)
+
+#define DMCU_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ DMCU_SF(DMCU_CTRL, \
+ DMCU_ENABLE, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_HOST_ACCESS_EN, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_WR_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(MASTER_COMM_CMD_REG, \
+ MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+ DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN1_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN2_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN3_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
+
+#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \
+ DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ DMCU_SF(DCI_MEM_PWR_STATUS, \
+ DMCU_IRAM_MEM_PWR_STATE, mask_sh)
+
+#define DMCU_MASK_SH_LIST_DCN10(mask_sh) \
+ DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ DMCU_SF(DMU_MEM_PWR_CNTL, \
+ DMCU_IRAM_MEM_PWR_STATE, mask_sh)
+
+#define DMCU_REG_FIELD_LIST(type) \
+ type DMCU_IRAM_MEM_PWR_STATE; \
+ type IRAM_HOST_ACCESS_EN; \
+ type IRAM_WR_ADDR_AUTO_INC; \
+ type DMCU_ENABLE; \
+ type MASTER_COMM_CMD_REG_BYTE0; \
+ type MASTER_COMM_INTERRUPT; \
+ type DPHY_RX_FAST_TRAINING_CAPABLE; \
+ type DPHY_LOAD_BS_COUNT; \
+ type STATIC_SCREEN1_INT_TO_UC_EN; \
+ type STATIC_SCREEN2_INT_TO_UC_EN; \
+ type STATIC_SCREEN3_INT_TO_UC_EN; \
+ type STATIC_SCREEN4_INT_TO_UC_EN; \
+ type DP_SEC_GSP0_LINE_NUM; \
+ type DP_SEC_GSP0_PRIORITY; \
+ type DC_SMU_INT_ENABLE
+
+struct dce_dmcu_shift {
+ DMCU_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_dmcu_mask {
+ DMCU_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_dmcu_registers {
+ uint32_t DMCU_CTRL;
+ uint32_t DMCU_RAM_ACCESS_CTRL;
+ uint32_t DCI_MEM_PWR_STATUS;
+ uint32_t DMU_MEM_PWR_CNTL;
+ uint32_t DMCU_IRAM_WR_CTRL;
+ uint32_t DMCU_IRAM_WR_DATA;
+
+ uint32_t MASTER_COMM_DATA_REG1;
+ uint32_t MASTER_COMM_DATA_REG2;
+ uint32_t MASTER_COMM_DATA_REG3;
+ uint32_t MASTER_COMM_CMD_REG;
+ uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t DMCU_IRAM_RD_CTRL;
+ uint32_t DMCU_IRAM_RD_DATA;
+ uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
+ uint32_t SMU_INTERRUPT_CONTROL;
+};
+
+struct dce_dmcu {
+ struct dmcu base;
+ const struct dce_dmcu_registers *regs;
+ const struct dce_dmcu_shift *dmcu_shift;
+ const struct dce_dmcu_mask *dmcu_mask;
+};
+
+/*******************************************************************
+ * MASTER_COMM_DATA_REG1 Bit position Data
+ * 7:0 hyst_frames[7:0]
+ * 14:8 hyst_lines[6:0]
+ * 15 RFB_UPDATE_AUTO_EN
+ * 18:16 phy_num[2:0]
+ * 21:19 dcp_sel[2:0]
+ * 22 phy_type
+ * 23 frame_cap_ind
+ * 26:24 aux_chan[2:0]
+ * 30:27 aux_repeat[3:0]
+ * 31:31 reserved[31:31]
+ ******************************************************************/
+union dce_dmcu_psr_config_data_reg1 {
+ struct {
+ unsigned int timehyst_frames:8; /*[7:0]*/
+ unsigned int hyst_lines:7; /*[14:8]*/
+ unsigned int rfb_update_auto_en:1; /*[15:15]*/
+ unsigned int dp_port_num:3; /*[18:16]*/
+ unsigned int dcp_sel:3; /*[21:19]*/
+ unsigned int phy_type:1; /*[22:22]*/
+ unsigned int frame_cap_ind:1; /*[23:23]*/
+ unsigned int aux_chan:3; /*[26:24]*/
+ unsigned int aux_repeat:4; /*[30:27]*/
+ unsigned int reserved:1; /*[31:31]*/
+ } bits;
+ unsigned int u32All;
+};
+
+/*******************************************************************
+ * MASTER_COMM_DATA_REG2
+ *******************************************************************/
+union dce_dmcu_psr_config_data_reg2 {
+ struct {
+ unsigned int dig_fe:3; /*[2:0]*/
+ unsigned int dig_be:3; /*[5:3]*/
+ unsigned int skip_wait_for_pll_lock:1; /*[6:6]*/
+ unsigned int reserved:9; /*[15:7]*/
+ unsigned int frame_delay:8; /*[23:16]*/
+ unsigned int smu_phy_id:4; /*[27:24]*/
+ unsigned int num_of_controllers:4; /*[31:28]*/
+ } bits;
+ unsigned int u32All;
+};
+
+/*******************************************************************
+ * MASTER_COMM_DATA_REG3
+ *******************************************************************/
+union dce_dmcu_psr_config_data_reg3 {
+ struct {
+ unsigned int psr_level:16; /*[15:0]*/
+ unsigned int link_rate:4; /*[19:16]*/
+ unsigned int reserved:12; /*[31:20]*/
+ } bits;
+ unsigned int u32All;
+};
+
+union dce_dmcu_psr_config_data_wait_loop_reg1 {
+ struct {
+ unsigned int wait_loop:16; /* [15:0] */
+ unsigned int reserved:16; /* [31:16] */
+ } bits;
+ unsigned int u32;
+};
+
+struct dmcu *dce_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask);
+
+struct dmcu *dcn10_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask);
+
+void dce_dmcu_destroy(struct dmcu **dmcu);
+
+#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
new file mode 100644
index 000000000000..d2e66b1bc0ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_hwseq.h"
+#include "reg_helper.h"
+#include "hw_sequencer.h"
+#include "core_types.h"
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+void dce_enable_fe_clock(struct dce_hwseq *hws,
+ unsigned int fe_inst, bool enable)
+{
+ REG_UPDATE(DCFE_CLOCK_CONTROL[fe_inst],
+ DCFE_CLOCK_ENABLE, enable);
+}
+
+void dce_pipe_control_lock(struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ uint32_t lock_val = lock ? 1 : 0;
+ uint32_t dcp_grph, scl, blnd, update_lock_mode, val;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Not lock pipe when blank */
+ if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
+ return;
+
+ val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->pipe_idx],
+ BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
+ BLND_SCL_V_UPDATE_LOCK, &scl,
+ BLND_BLND_V_UPDATE_LOCK, &blnd,
+ BLND_V_UPDATE_LOCK_MODE, &update_lock_mode);
+
+ dcp_grph = lock_val;
+ scl = lock_val;
+ blnd = lock_val;
+ update_lock_mode = lock_val;
+
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
+ BLND_SCL_V_UPDATE_LOCK, scl);
+
+ if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0)
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ BLND_BLND_V_UPDATE_LOCK, blnd,
+ BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
+
+ if (hws->wa.blnd_crtc_trigger) {
+ if (!lock) {
+ uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->pipe_idx]);
+ REG_WRITE(CRTC_H_BLANK_START_END[pipe->pipe_idx], value);
+ }
+ }
+}
+
+void dce_set_blender_mode(struct dce_hwseq *hws,
+ unsigned int blnd_inst,
+ enum blnd_mode mode)
+{
+ uint32_t feedthrough = 1;
+ uint32_t blnd_mode = 0;
+ uint32_t multiplied_mode = 0;
+ uint32_t alpha_mode = 2;
+
+ switch (mode) {
+ case BLND_MODE_OTHER_PIPE:
+ feedthrough = 0;
+ blnd_mode = 1;
+ alpha_mode = 0;
+ break;
+ case BLND_MODE_BLENDING:
+ feedthrough = 0;
+ blnd_mode = 2;
+ alpha_mode = 0;
+ multiplied_mode = 1;
+ break;
+ case BLND_MODE_CURRENT_PIPE:
+ default:
+ if (REG(BLND_CONTROL[blnd_inst]) == REG(BLNDV_CONTROL) ||
+ blnd_inst == 0)
+ feedthrough = 0;
+ break;
+ }
+
+ REG_UPDATE(BLND_CONTROL[blnd_inst],
+ BLND_MODE, blnd_mode);
+
+ if (hws->masks->BLND_ALPHA_MODE != 0) {
+ REG_UPDATE_3(BLND_CONTROL[blnd_inst],
+ BLND_FEEDTHROUGH_EN, feedthrough,
+ BLND_ALPHA_MODE, alpha_mode,
+ BLND_MULTIPLIED_MODE, multiplied_mode);
+ }
+}
+
+
+static void dce_disable_sram_shut_down(struct dce_hwseq *hws)
+{
+ if (REG(DC_MEM_GLOBAL_PWR_REQ_CNTL))
+ REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL,
+ DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
+}
+
+static void dce_underlay_clock_enable(struct dce_hwseq *hws)
+{
+ /* todo: why do we need this at boot? is dce_enable_fe_clock enough? */
+ if (REG(DCFEV_CLOCK_CONTROL))
+ REG_UPDATE(DCFEV_CLOCK_CONTROL,
+ DCFEV_CLOCK_ENABLE, 1);
+}
+
+static void enable_hw_base_light_sleep(void)
+{
+ /* TODO: implement */
+}
+
+static void disable_sw_manual_control_light_sleep(void)
+{
+ /* TODO: implement */
+}
+
+void dce_clock_gating_power_up(struct dce_hwseq *hws,
+ bool enable)
+{
+ if (enable) {
+ enable_hw_base_light_sleep();
+ disable_sw_manual_control_light_sleep();
+ } else {
+ dce_disable_sram_shut_down(hws);
+ dce_underlay_clock_enable(hws);
+ }
+}
+
+void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
+ struct clock_source *clk_src,
+ unsigned int tg_inst)
+{
+ if (clk_src->id == CLOCK_SOURCE_ID_DP_DTO || clk_src->dp_clk_src) {
+ REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
+ DP_DTO0_ENABLE, 1);
+
+ } else if (clk_src->id >= CLOCK_SOURCE_COMBO_PHY_PLL0) {
+ uint32_t rate_source = clk_src->id - CLOCK_SOURCE_COMBO_PHY_PLL0;
+
+ REG_UPDATE_2(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
+ PHYPLL_PIXEL_RATE_SOURCE, rate_source,
+ PIXEL_RATE_PLL_SOURCE, 0);
+
+ REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
+ DP_DTO0_ENABLE, 0);
+
+ } else if (clk_src->id <= CLOCK_SOURCE_ID_PLL2) {
+ uint32_t rate_source = clk_src->id - CLOCK_SOURCE_ID_PLL0;
+
+ REG_UPDATE_2(PIXEL_RATE_CNTL[tg_inst],
+ PIXEL_RATE_SOURCE, rate_source,
+ DP_DTO0_ENABLE, 0);
+
+ if (REG(PHYPLL_PIXEL_RATE_CNTL[tg_inst]))
+ REG_UPDATE(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
+ PIXEL_RATE_PLL_SOURCE, 1);
+ } else {
+ DC_ERR("Unknown clock source. clk_src id: %d, TG_inst: %d",
+ clk_src->id, tg_inst);
+ }
+}
+
+/* Only use LUT for 8 bit formats */
+bool dce_use_lut(const struct dc_plane_state *plane_state)
+{
+ switch (plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
new file mode 100644
index 000000000000..52506155e361
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DCE_HWSEQ_H__
+#define __DCE_HWSEQ_H__
+
+#include "hw_sequencer.h"
+
+#define BL_REG_LIST()\
+ SR(LVTMA_PWRSEQ_CNTL), \
+ SR(LVTMA_PWRSEQ_STATE)
+
+#define HWSEQ_DCEF_REG_LIST_DCE8() \
+ .DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[2] = mmCRTC2_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[3] = mmCRTC3_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[4] = mmCRTC4_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[5] = mmCRTC5_CRTC_DCFE_CLOCK_CONTROL
+
+#define HWSEQ_DCEF_REG_LIST() \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 3), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 4), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 5), \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
+
+#define HWSEQ_BLND_REG_LIST() \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 0), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 1), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 3), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 4), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 5), \
+ SRII(BLND_CONTROL, BLND, 0), \
+ SRII(BLND_CONTROL, BLND, 1), \
+ SRII(BLND_CONTROL, BLND, 2), \
+ SRII(BLND_CONTROL, BLND, 3), \
+ SRII(BLND_CONTROL, BLND, 4), \
+ SRII(BLND_CONTROL, BLND, 5)
+
+#define HWSEQ_PIXEL_RATE_REG_LIST(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1), \
+ SRII(PIXEL_RATE_CNTL, blk, 2), \
+ SRII(PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_PHYPLL_REG_LIST(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_DCE11_REG_LIST_BASE() \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
+ SR(DCFEV_CLOCK_CONTROL), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
+ SRII(CRTC_H_BLANK_START_END, CRTC, 0),\
+ SRII(CRTC_H_BLANK_START_END, CRTC, 1),\
+ SRII(BLND_V_UPDATE_LOCK, BLND, 0),\
+ SRII(BLND_V_UPDATE_LOCK, BLND, 1),\
+ SRII(BLND_CONTROL, BLND, 0),\
+ SRII(BLND_CONTROL, BLND, 1),\
+ SR(BLNDV_CONTROL),\
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
+ BL_REG_LIST()
+
+#define HWSEQ_DCE8_REG_LIST() \
+ HWSEQ_DCEF_REG_LIST_DCE8(), \
+ HWSEQ_BLND_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
+ BL_REG_LIST()
+
+#define HWSEQ_DCE10_REG_LIST() \
+ HWSEQ_DCEF_REG_LIST(), \
+ HWSEQ_BLND_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+ BL_REG_LIST()
+
+#define HWSEQ_ST_REG_LIST() \
+ HWSEQ_DCE11_REG_LIST_BASE(), \
+ .DCFE_CLOCK_CONTROL[2] = mmDCFEV_CLOCK_CONTROL, \
+ .CRTC_H_BLANK_START_END[2] = mmCRTCV_H_BLANK_START_END, \
+ .BLND_V_UPDATE_LOCK[2] = mmBLNDV_V_UPDATE_LOCK, \
+ .BLND_CONTROL[2] = mmBLNDV_CONTROL
+
+#define HWSEQ_CZ_REG_LIST() \
+ HWSEQ_DCE11_REG_LIST_BASE(), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
+ SRII(CRTC_H_BLANK_START_END, CRTC, 2), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
+ SRII(BLND_CONTROL, BLND, 2), \
+ .DCFE_CLOCK_CONTROL[3] = mmDCFEV_CLOCK_CONTROL, \
+ .CRTC_H_BLANK_START_END[3] = mmCRTCV_H_BLANK_START_END, \
+ .BLND_V_UPDATE_LOCK[3] = mmBLNDV_V_UPDATE_LOCK, \
+ .BLND_CONTROL[3] = mmBLNDV_CONTROL
+
+#define HWSEQ_DCE120_REG_LIST() \
+ HWSEQ_DCE10_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+ HWSEQ_PHYPLL_REG_LIST(CRTC), \
+ SR(DCHUB_FB_LOCATION),\
+ SR(DCHUB_AGP_BASE),\
+ SR(DCHUB_AGP_BOT),\
+ SR(DCHUB_AGP_TOP), \
+ BL_REG_LIST()
+
+#define HWSEQ_DCE112_REG_LIST() \
+ HWSEQ_DCE10_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+ HWSEQ_PHYPLL_REG_LIST(CRTC), \
+ BL_REG_LIST()
+
+#define HWSEQ_DCN_REG_LIST()\
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 0), \
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 1), \
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 2), \
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 3), \
+ SRII(DCHUBP_CNTL, HUBP, 0), \
+ SRII(DCHUBP_CNTL, HUBP, 1), \
+ SRII(DCHUBP_CNTL, HUBP, 2), \
+ SRII(DCHUBP_CNTL, HUBP, 3), \
+ SRII(HUBP_CLK_CNTL, HUBP, 0), \
+ SRII(HUBP_CLK_CNTL, HUBP, 1), \
+ SRII(HUBP_CLK_CNTL, HUBP, 2), \
+ SRII(HUBP_CLK_CNTL, HUBP, 3), \
+ SRII(DPP_CONTROL, DPP_TOP, 0), \
+ SRII(DPP_CONTROL, DPP_TOP, 1), \
+ SRII(DPP_CONTROL, DPP_TOP, 2), \
+ SRII(DPP_CONTROL, DPP_TOP, 3), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 0), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 1), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 2), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 3), \
+ SR(REFCLK_CNTL), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
+ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
+ SR(DCHUBBUB_ARB_SAT_LEVEL),\
+ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
+ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_TEST_DEBUG_INDEX), \
+ SR(DCHUBBUB_TEST_DEBUG_DATA), \
+ SR(DIO_MEM_PWR_CTRL), \
+ SR(DCCG_GATE_DISABLE_CNTL), \
+ SR(DCCG_GATE_DISABLE_CNTL2), \
+ SR(DCFCLK_CNTL),\
+ SR(DCFCLK_CNTL), \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),\
+ MMHUB_SR(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),\
+ MMHUB_SR(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_LOW_ADDR),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_HIGH_ADDR)
+
+#define HWSEQ_SR_WATERMARK_REG_LIST()\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
+
+#define HWSEQ_DCN1_REG_LIST()\
+ HWSEQ_DCN_REG_LIST(), \
+ HWSEQ_SR_WATERMARK_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
+ HWSEQ_PHYPLL_REG_LIST(OTG), \
+ SR(DCHUBBUB_SDPIF_FB_TOP),\
+ SR(DCHUBBUB_SDPIF_FB_BASE),\
+ SR(DCHUBBUB_SDPIF_FB_OFFSET),\
+ SR(DCHUBBUB_SDPIF_AGP_BASE),\
+ SR(DCHUBBUB_SDPIF_AGP_BOT),\
+ SR(DCHUBBUB_SDPIF_AGP_TOP),\
+ SR(DOMAIN0_PG_CONFIG), \
+ SR(DOMAIN1_PG_CONFIG), \
+ SR(DOMAIN2_PG_CONFIG), \
+ SR(DOMAIN3_PG_CONFIG), \
+ SR(DOMAIN4_PG_CONFIG), \
+ SR(DOMAIN5_PG_CONFIG), \
+ SR(DOMAIN6_PG_CONFIG), \
+ SR(DOMAIN7_PG_CONFIG), \
+ SR(DOMAIN0_PG_STATUS), \
+ SR(DOMAIN1_PG_STATUS), \
+ SR(DOMAIN2_PG_STATUS), \
+ SR(DOMAIN3_PG_STATUS), \
+ SR(DOMAIN4_PG_STATUS), \
+ SR(DOMAIN5_PG_STATUS), \
+ SR(DOMAIN6_PG_STATUS), \
+ SR(DOMAIN7_PG_STATUS), \
+ SR(D1VGA_CONTROL), \
+ SR(D2VGA_CONTROL), \
+ SR(D3VGA_CONTROL), \
+ SR(D4VGA_CONTROL), \
+ SR(DC_IP_REQUEST_CNTL), \
+ BL_REG_LIST()
+
+struct dce_hwseq_registers {
+
+ /* Backlight registers */
+ uint32_t LVTMA_PWRSEQ_CNTL;
+ uint32_t LVTMA_PWRSEQ_STATE;
+
+ uint32_t DCFE_CLOCK_CONTROL[6];
+ uint32_t DCFEV_CLOCK_CONTROL;
+ uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
+ uint32_t BLND_V_UPDATE_LOCK[6];
+ uint32_t BLND_CONTROL[6];
+ uint32_t BLNDV_CONTROL;
+ uint32_t CRTC_H_BLANK_START_END[6];
+ uint32_t PIXEL_RATE_CNTL[6];
+ uint32_t PHYPLL_PIXEL_RATE_CNTL[6];
+ /*DCHUB*/
+ uint32_t DCHUB_FB_LOCATION;
+ uint32_t DCHUB_AGP_BASE;
+ uint32_t DCHUB_AGP_BOT;
+ uint32_t DCHUB_AGP_TOP;
+
+ uint32_t OTG_GLOBAL_SYNC_STATUS[4];
+ uint32_t DCHUBP_CNTL[4];
+ uint32_t HUBP_CLK_CNTL[4];
+ uint32_t DPP_CONTROL[4];
+ uint32_t OPP_PIPE_CONTROL[4];
+ uint32_t REFCLK_CNTL;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL;
+ uint32_t DCHUBBUB_ARB_SAT_LEVEL;
+ uint32_t DCHUBBUB_ARB_DF_REQ_OUTSTAND;
+ uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL;
+ uint32_t DCHUBBUB_ARB_DRAM_STATE_CNTL;
+ uint32_t DCHUBBUB_TEST_DEBUG_INDEX;
+ uint32_t DCHUBBUB_TEST_DEBUG_DATA;
+ uint32_t DCHUBBUB_SDPIF_FB_TOP;
+ uint32_t DCHUBBUB_SDPIF_FB_BASE;
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
+ uint32_t DCHUBBUB_SDPIF_AGP_BASE;
+ uint32_t DCHUBBUB_SDPIF_AGP_BOT;
+ uint32_t DCHUBBUB_SDPIF_AGP_TOP;
+ uint32_t DC_IP_REQUEST_CNTL;
+ uint32_t DOMAIN0_PG_CONFIG;
+ uint32_t DOMAIN1_PG_CONFIG;
+ uint32_t DOMAIN2_PG_CONFIG;
+ uint32_t DOMAIN3_PG_CONFIG;
+ uint32_t DOMAIN4_PG_CONFIG;
+ uint32_t DOMAIN5_PG_CONFIG;
+ uint32_t DOMAIN6_PG_CONFIG;
+ uint32_t DOMAIN7_PG_CONFIG;
+ uint32_t DOMAIN0_PG_STATUS;
+ uint32_t DOMAIN1_PG_STATUS;
+ uint32_t DOMAIN2_PG_STATUS;
+ uint32_t DOMAIN3_PG_STATUS;
+ uint32_t DOMAIN4_PG_STATUS;
+ uint32_t DOMAIN5_PG_STATUS;
+ uint32_t DOMAIN6_PG_STATUS;
+ uint32_t DOMAIN7_PG_STATUS;
+ uint32_t DIO_MEM_PWR_CTRL;
+ uint32_t DCCG_GATE_DISABLE_CNTL;
+ uint32_t DCCG_GATE_DISABLE_CNTL2;
+ uint32_t DCFCLK_CNTL;
+ uint32_t MICROSECOND_TIME_BASE_DIV;
+ uint32_t MILLISECOND_TIME_BASE_DIV;
+ uint32_t DISPCLK_FREQ_CHANGE_CNTL;
+ uint32_t RBBMIF_TIMEOUT_DIS;
+ uint32_t RBBMIF_TIMEOUT_DIS_2;
+ uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t DCHUBBUB_CRC_CTRL;
+ uint32_t DPP_TOP0_DPP_CRC_CTRL;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_B_A;
+ uint32_t MPC_CRC_CTRL;
+ uint32_t MPC_CRC_RESULT_GB;
+ uint32_t MPC_CRC_RESULT_C;
+ uint32_t MPC_CRC_RESULT_AR;
+ uint32_t D1VGA_CONTROL;
+ uint32_t D2VGA_CONTROL;
+ uint32_t D3VGA_CONTROL;
+ uint32_t D4VGA_CONTROL;
+ /* MMHUB registers. read only. temporary hack */
+ uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32;
+ uint32_t VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32;
+ uint32_t VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32;
+ uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;
+ uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
+ uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
+ uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
+};
+ /* set field name */
+#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
+ .field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
+
+#define HWS_SF1(blk_name, reg_name, field_name, post_fix)\
+ .field_name = blk_name ## reg_name ## __ ## blk_name ## field_name ## post_fix
+
+
+#define HWSEQ_DCEF_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF(blk, CLOCK_CONTROL, DCFE_CLOCK_ENABLE, mask_sh),\
+ SF(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
+
+#define HWSEQ_BLND_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_BLND_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_V_UPDATE_LOCK_MODE, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_FEEDTHROUGH_EN, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_ALPHA_MODE, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_MODE, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_MULTIPLIED_MODE, mask_sh)
+
+#define HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF1(blk, PIXEL_RATE_CNTL, PIXEL_RATE_SOURCE, mask_sh),\
+ HWS_SF(blk, PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
+
+#define HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
+ HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
+
+#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
+ .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
+ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
+ HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
+ SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_DCE112_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
+ HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)\
+ SF(DCHUB_FB_LOCATION, FB_TOP, mask_sh),\
+ SF(DCHUB_FB_LOCATION, FB_BASE, mask_sh),\
+ SF(DCHUB_AGP_BASE, AGP_BASE, mask_sh),\
+ SF(DCHUB_AGP_BOT, AGP_BOT, mask_sh),\
+ SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_DCE12_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE0_DCFE_),\
+ HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
+ HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
+ HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
+ HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
+ HWS_SF(OTG0_, OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR, mask_sh), \
+ HWS_SF(OTG0_, OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_OCCURRED, mask_sh), \
+ HWS_SF(HUBP0_, DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh), \
+ HWS_SF(HUBP0_, HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh), \
+ HWS_SF(DPP_TOP0_, DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
+ HWS_SF(OPP_PIPE0_, OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh),\
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
+ HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
+
+#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh), \
+ HWS_SF(DPP_TOP0_, DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh), \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, PAGE_DIRECTORY_ENTRY_HI32, mask_sh),\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, PAGE_DIRECTORY_ENTRY_LO32, mask_sh),\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, LOGICAL_PAGE_NUMBER_HI4, mask_sh),\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, LOGICAL_PAGE_NUMBER_LO32, mask_sh),\
+ HWS_SF(, VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, PHYSICAL_PAGE_ADDR_HI4, mask_sh),\
+ HWS_SF(, VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, PHYSICAL_PAGE_ADDR_LO32, mask_sh),\
+ HWS_SF(, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, PHYSICAL_PAGE_NUMBER_MSB, mask_sh),\
+ HWS_SF(, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, PHYSICAL_PAGE_NUMBER_LSB, mask_sh),\
+ HWS_SF(, MC_VM_SYSTEM_APERTURE_LOW_ADDR, LOGICAL_ADDR, mask_sh),\
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN0_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN1_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN2_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN3_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_CONFIG, DOMAIN4_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_CONFIG, DOMAIN5_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_CONFIG, DOMAIN6_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_CONFIG, DOMAIN7_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN0_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN1_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN2_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN3_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_STATUS, DOMAIN4_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_STATUS, DOMAIN5_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_REG_FIELD_LIST(type) \
+ type DCFE_CLOCK_ENABLE; \
+ type DCFEV_CLOCK_ENABLE; \
+ type DC_MEM_GLOBAL_PWR_REQ_DIS; \
+ type BLND_DCP_GRPH_V_UPDATE_LOCK; \
+ type BLND_SCL_V_UPDATE_LOCK; \
+ type BLND_DCP_GRPH_SURF_V_UPDATE_LOCK; \
+ type BLND_BLND_V_UPDATE_LOCK; \
+ type BLND_V_UPDATE_LOCK_MODE; \
+ type BLND_FEEDTHROUGH_EN; \
+ type BLND_ALPHA_MODE; \
+ type BLND_MODE; \
+ type BLND_MULTIPLIED_MODE; \
+ type DP_DTO0_ENABLE; \
+ type PIXEL_RATE_SOURCE; \
+ type PHYPLL_PIXEL_RATE_SOURCE; \
+ type PIXEL_RATE_PLL_SOURCE; \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ type PAGE_DIRECTORY_ENTRY_HI32;\
+ type PAGE_DIRECTORY_ENTRY_LO32;\
+ type LOGICAL_PAGE_NUMBER_HI4;\
+ type LOGICAL_PAGE_NUMBER_LO32;\
+ type PHYSICAL_PAGE_ADDR_HI4;\
+ type PHYSICAL_PAGE_ADDR_LO32;\
+ type PHYSICAL_PAGE_NUMBER_MSB;\
+ type PHYSICAL_PAGE_NUMBER_LSB;\
+ type LOGICAL_ADDR; \
+ type ENABLE_L1_TLB;\
+ type SYSTEM_ACCESS_MODE;\
+ type LVTMA_BLON;\
+ type LVTMA_PWRSEQ_TARGET_STATE_R;
+
+#define HWSEQ_DCN_REG_FIELD_LIST(type) \
+ type VUPDATE_NO_LOCK_EVENT_CLEAR; \
+ type VUPDATE_NO_LOCK_EVENT_OCCURRED; \
+ type HUBP_VTG_SEL; \
+ type HUBP_CLOCK_ENABLE; \
+ type DPP_CLOCK_ENABLE; \
+ type DPPCLK_RATE_CONTROL; \
+ type SDPIF_FB_TOP;\
+ type SDPIF_FB_BASE;\
+ type SDPIF_FB_OFFSET;\
+ type SDPIF_AGP_BASE;\
+ type SDPIF_AGP_BOT;\
+ type SDPIF_AGP_TOP;\
+ type FB_TOP;\
+ type FB_BASE;\
+ type FB_OFFSET;\
+ type AGP_BASE;\
+ type AGP_BOT;\
+ type AGP_TOP;\
+ type DCHUBBUB_GLOBAL_TIMER_ENABLE; \
+ type DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST;\
+ type DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE;\
+ type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE;\
+ type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE;\
+ type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE;\
+ type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE;\
+ type DCHUBBUB_ARB_SAT_LEVEL;\
+ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\
+ type OPP_PIPE_CLOCK_EN;\
+ type IP_REQUEST_EN; \
+ type DOMAIN0_POWER_FORCEON; \
+ type DOMAIN0_POWER_GATE; \
+ type DOMAIN1_POWER_FORCEON; \
+ type DOMAIN1_POWER_GATE; \
+ type DOMAIN2_POWER_FORCEON; \
+ type DOMAIN2_POWER_GATE; \
+ type DOMAIN3_POWER_FORCEON; \
+ type DOMAIN3_POWER_GATE; \
+ type DOMAIN4_POWER_FORCEON; \
+ type DOMAIN4_POWER_GATE; \
+ type DOMAIN5_POWER_FORCEON; \
+ type DOMAIN5_POWER_GATE; \
+ type DOMAIN6_POWER_FORCEON; \
+ type DOMAIN6_POWER_GATE; \
+ type DOMAIN7_POWER_FORCEON; \
+ type DOMAIN7_POWER_GATE; \
+ type DOMAIN0_PGFSM_PWR_STATUS; \
+ type DOMAIN1_PGFSM_PWR_STATUS; \
+ type DOMAIN2_PGFSM_PWR_STATUS; \
+ type DOMAIN3_PGFSM_PWR_STATUS; \
+ type DOMAIN4_PGFSM_PWR_STATUS; \
+ type DOMAIN5_PGFSM_PWR_STATUS; \
+ type DOMAIN6_PGFSM_PWR_STATUS; \
+ type DOMAIN7_PGFSM_PWR_STATUS; \
+ type DCFCLK_GATE_DIS; \
+ type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
+ type DENTIST_DPPCLK_WDIVIDER;
+
+struct dce_hwseq_shift {
+ HWSEQ_REG_FIELD_LIST(uint8_t)
+ HWSEQ_DCN_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_hwseq_mask {
+ HWSEQ_REG_FIELD_LIST(uint32_t)
+ HWSEQ_DCN_REG_FIELD_LIST(uint32_t)
+};
+
+
+enum blnd_mode {
+ BLND_MODE_CURRENT_PIPE = 0,/* Data from current pipe only */
+ BLND_MODE_OTHER_PIPE, /* Data from other pipe only */
+ BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */
+};
+
+void dce_enable_fe_clock(struct dce_hwseq *hwss,
+ unsigned int inst, bool enable);
+
+void dce_pipe_control_lock(struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+
+void dce_set_blender_mode(struct dce_hwseq *hws,
+ unsigned int blnd_inst, enum blnd_mode mode);
+
+void dce_clock_gating_power_up(struct dce_hwseq *hws,
+ bool enable);
+
+void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
+ struct clock_source *clk_src,
+ unsigned int tg_inst);
+
+bool dce_use_lut(const struct dc_plane_state *plane_state);
+#endif /*__DCE_HWSEQ_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
new file mode 100644
index 000000000000..d618fdd0cc82
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_ipp.h"
+#include "reg_helper.h"
+#include "dm_services.h"
+
+#define REG(reg) \
+ (ipp_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ipp_dce->ipp_shift->field_name, ipp_dce->ipp_mask->field_name
+
+#define CTX \
+ ipp_dce->base.ctx
+
+
+static void dce_ipp_cursor_set_position(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_position *position,
+ const struct dc_cursor_mi_param *param)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+
+ /* lock cursor registers */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, true);
+
+ /* Flag passed in structure differentiates cursor enable/disable. */
+ /* Update if it differs from cached state. */
+ REG_UPDATE(CUR_CONTROL, CURSOR_EN, position->enable);
+
+ REG_SET_2(CUR_POSITION, 0,
+ CURSOR_X_POSITION, position->x,
+ CURSOR_Y_POSITION, position->y);
+
+ REG_SET_2(CUR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, position->x_hotspot,
+ CURSOR_HOT_SPOT_Y, position->y_hotspot);
+
+ /* unlock cursor registers */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, false);
+}
+
+static void dce_ipp_cursor_set_attributes(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_attributes *attributes)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+ int mode;
+
+ /* Lock cursor registers */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, true);
+
+ /* Program cursor control */
+ switch (attributes->color_format) {
+ case CURSOR_MODE_MONO:
+ mode = 0;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ mode = 1;
+ break;
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ mode = 2;
+ break;
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ mode = 3;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* unsupported */
+ mode = 0;
+ }
+
+ REG_UPDATE_3(CUR_CONTROL,
+ CURSOR_MODE, mode,
+ CURSOR_2X_MAGNIFY, attributes->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CUR_INV_TRANS_CLAMP, attributes->attribute_flags.bits.INVERSE_TRANSPARENT_CLAMPING);
+
+ if (attributes->color_format == CURSOR_MODE_MONO) {
+ REG_SET_3(CUR_COLOR1, 0,
+ CUR_COLOR1_BLUE, 0,
+ CUR_COLOR1_GREEN, 0,
+ CUR_COLOR1_RED, 0);
+
+ REG_SET_3(CUR_COLOR2, 0,
+ CUR_COLOR2_BLUE, 0xff,
+ CUR_COLOR2_GREEN, 0xff,
+ CUR_COLOR2_RED, 0xff);
+ }
+
+ /*
+ * Program cursor size -- NOTE: HW spec specifies that HW register
+ * stores size as (height - 1, width - 1)
+ */
+ REG_SET_2(CUR_SIZE, 0,
+ CURSOR_WIDTH, attributes->width-1,
+ CURSOR_HEIGHT, attributes->height-1);
+
+ /* Program cursor surface address */
+ /* SURFACE_ADDRESS_HIGH: Higher order bits (39:32) of hardware cursor
+ * surface base address in byte. It is 4K byte aligned.
+ * The correct way to program cursor surface address is to first write
+ * to CUR_SURFACE_ADDRESS_HIGH, and then write to CUR_SURFACE_ADDRESS
+ */
+ REG_SET(CUR_SURFACE_ADDRESS_HIGH, 0,
+ CURSOR_SURFACE_ADDRESS_HIGH, attributes->address.high_part);
+
+ REG_SET(CUR_SURFACE_ADDRESS, 0,
+ CURSOR_SURFACE_ADDRESS, attributes->address.low_part);
+
+ /* Unlock Cursor registers. */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, false);
+}
+
+
+static void dce_ipp_program_prescale(
+ struct input_pixel_processor *ipp,
+ struct ipp_prescale_params *params)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+
+ /* set to bypass mode first before change */
+ REG_UPDATE(PRESCALE_GRPH_CONTROL,
+ GRPH_PRESCALE_BYPASS,
+ 1);
+
+ REG_SET_2(PRESCALE_VALUES_GRPH_R, 0,
+ GRPH_PRESCALE_SCALE_R, params->scale,
+ GRPH_PRESCALE_BIAS_R, params->bias);
+
+ REG_SET_2(PRESCALE_VALUES_GRPH_G, 0,
+ GRPH_PRESCALE_SCALE_G, params->scale,
+ GRPH_PRESCALE_BIAS_G, params->bias);
+
+ REG_SET_2(PRESCALE_VALUES_GRPH_B, 0,
+ GRPH_PRESCALE_SCALE_B, params->scale,
+ GRPH_PRESCALE_BIAS_B, params->bias);
+
+ if (params->mode != IPP_PRESCALE_MODE_BYPASS) {
+ REG_UPDATE(PRESCALE_GRPH_CONTROL,
+ GRPH_PRESCALE_BYPASS, 0);
+
+ /* If prescale is in use, then legacy lut should be bypassed */
+ REG_UPDATE(INPUT_GAMMA_CONTROL,
+ GRPH_INPUT_GAMMA_MODE, 1);
+ }
+}
+
+static void dce_ipp_program_input_lut(
+ struct input_pixel_processor *ipp,
+ const struct dc_gamma *gamma)
+{
+ int i;
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+
+ /* power on LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_SET(DCFE_MEM_PWR_CTRL, 0, DCP_LUT_MEM_PWR_DIS, 1);
+
+ /* enable all */
+ REG_SET(DC_LUT_WRITE_EN_MASK, 0, DC_LUT_WRITE_EN_MASK, 0x7);
+
+ /* 256 entry mode */
+ REG_UPDATE(DC_LUT_RW_MODE, DC_LUT_RW_MODE, 0);
+
+ /* LUT-256, unsigned, integer, new u0.12 format */
+ REG_SET_3(DC_LUT_CONTROL, 0,
+ DC_LUT_DATA_R_FORMAT, 3,
+ DC_LUT_DATA_G_FORMAT, 3,
+ DC_LUT_DATA_B_FORMAT, 3);
+
+ /* start from index 0 */
+ REG_SET(DC_LUT_RW_INDEX, 0,
+ DC_LUT_RW_INDEX, 0);
+
+ for (i = 0; i < gamma->num_entries; i++) {
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.red[i]));
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.green[i]));
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.blue[i]));
+ }
+
+ /* power off LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_SET(DCFE_MEM_PWR_CTRL, 0, DCP_LUT_MEM_PWR_DIS, 0);
+
+ /* bypass prescale, enable legacy LUT */
+ REG_UPDATE(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
+ REG_UPDATE(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
+}
+
+static void dce_ipp_set_degamma(
+ struct input_pixel_processor *ipp,
+ enum ipp_degamma_mode mode)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+ uint32_t degamma_type = (mode == IPP_DEGAMMA_MODE_HW_sRGB) ? 1 : 0;
+
+ ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS ||
+ mode == IPP_DEGAMMA_MODE_HW_sRGB);
+
+ REG_SET_3(DEGAMMA_CONTROL, 0,
+ GRPH_DEGAMMA_MODE, degamma_type,
+ CURSOR_DEGAMMA_MODE, degamma_type,
+ CURSOR2_DEGAMMA_MODE, degamma_type);
+}
+
+static const struct ipp_funcs dce_ipp_funcs = {
+ .ipp_cursor_set_attributes = dce_ipp_cursor_set_attributes,
+ .ipp_cursor_set_position = dce_ipp_cursor_set_position,
+ .ipp_program_prescale = dce_ipp_program_prescale,
+ .ipp_program_input_lut = dce_ipp_program_input_lut,
+ .ipp_set_degamma = dce_ipp_set_degamma
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dce_ipp_construct(
+ struct dce_ipp *ipp_dce,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_ipp_registers *regs,
+ const struct dce_ipp_shift *ipp_shift,
+ const struct dce_ipp_mask *ipp_mask)
+{
+ ipp_dce->base.ctx = ctx;
+ ipp_dce->base.inst = inst;
+ ipp_dce->base.funcs = &dce_ipp_funcs;
+
+ ipp_dce->regs = regs;
+ ipp_dce->ipp_shift = ipp_shift;
+ ipp_dce->ipp_mask = ipp_mask;
+}
+
+void dce_ipp_destroy(struct input_pixel_processor **ipp)
+{
+ kfree(TO_DCE_IPP(*ipp));
+ *ipp = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h
new file mode 100644
index 000000000000..ca04e97d44c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCE_IPP_H_
+#define _DCE_IPP_H_
+
+#include "ipp.h"
+
+#define TO_DCE_IPP(ipp)\
+ container_of(ipp, struct dce_ipp, base)
+
+#define IPP_COMMON_REG_LIST_DCE_BASE(id) \
+ SRI(CUR_UPDATE, DCP, id), \
+ SRI(CUR_CONTROL, DCP, id), \
+ SRI(CUR_POSITION, DCP, id), \
+ SRI(CUR_HOT_SPOT, DCP, id), \
+ SRI(CUR_COLOR1, DCP, id), \
+ SRI(CUR_COLOR2, DCP, id), \
+ SRI(CUR_SIZE, DCP, id), \
+ SRI(CUR_SURFACE_ADDRESS_HIGH, DCP, id), \
+ SRI(CUR_SURFACE_ADDRESS, DCP, id), \
+ SRI(PRESCALE_GRPH_CONTROL, DCP, id), \
+ SRI(PRESCALE_VALUES_GRPH_R, DCP, id), \
+ SRI(PRESCALE_VALUES_GRPH_G, DCP, id), \
+ SRI(PRESCALE_VALUES_GRPH_B, DCP, id), \
+ SRI(INPUT_GAMMA_CONTROL, DCP, id), \
+ SRI(DC_LUT_WRITE_EN_MASK, DCP, id), \
+ SRI(DC_LUT_RW_MODE, DCP, id), \
+ SRI(DC_LUT_CONTROL, DCP, id), \
+ SRI(DC_LUT_RW_INDEX, DCP, id), \
+ SRI(DC_LUT_SEQ_COLOR, DCP, id), \
+ SRI(DEGAMMA_CONTROL, DCP, id)
+
+#define IPP_DCE100_REG_LIST_DCE_BASE(id) \
+ IPP_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, CRTC, id)
+
+#define IPP_DCE110_REG_LIST_DCE_BASE(id) \
+ IPP_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, DCFE, id)
+
+#define IPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ IPP_SF(CUR_UPDATE, CURSOR_UPDATE_LOCK, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_EN, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(CUR_CONTROL, CUR_INV_TRANS_CLAMP, mask_sh), \
+ IPP_SF(CUR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(CUR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_BLUE, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_GREEN, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_RED, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_BLUE, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_GREEN, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_RED, mask_sh), \
+ IPP_SF(CUR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(CUR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(CUR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(CUR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_SCALE_R, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_BIAS_R, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_SCALE_G, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_BIAS_G, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_SCALE_B, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_BIAS_B, mask_sh), \
+ IPP_SF(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, mask_sh), \
+ IPP_SF(DC_LUT_WRITE_EN_MASK, DC_LUT_WRITE_EN_MASK, mask_sh), \
+ IPP_SF(DC_LUT_RW_MODE, DC_LUT_RW_MODE, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_R_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_G_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_B_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_RW_INDEX, DC_LUT_RW_INDEX, mask_sh), \
+ IPP_SF(DC_LUT_SEQ_COLOR, DC_LUT_SEQ_COLOR, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, mask_sh)
+
+#define IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ IPP_SF(DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh)
+
+#define IPP_DCE120_MASK_SH_LIST_SOC_BASE(mask_sh) \
+ IPP_SF(DCP0_CUR_UPDATE, CURSOR_UPDATE_LOCK, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CURSOR_EN, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CUR_INV_TRANS_CLAMP, mask_sh), \
+ IPP_SF(DCP0_CUR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(DCP0_CUR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(DCP0_CUR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(DCP0_CUR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR1, CUR_COLOR1_BLUE, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR1, CUR_COLOR1_GREEN, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR1, CUR_COLOR1_RED, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR2, CUR_COLOR2_BLUE, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR2, CUR_COLOR2_GREEN, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR2, CUR_COLOR2_RED, mask_sh), \
+ IPP_SF(DCP0_CUR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(DCP0_CUR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(DCP0_CUR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(DCP0_CUR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_SCALE_R, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_BIAS_R, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_SCALE_G, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_BIAS_G, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_SCALE_B, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_BIAS_B, mask_sh), \
+ IPP_SF(DCP0_INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, mask_sh), \
+ IPP_SF(DCFE0_DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_WRITE_EN_MASK, DC_LUT_WRITE_EN_MASK, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_RW_MODE, DC_LUT_RW_MODE, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_CONTROL, DC_LUT_DATA_R_FORMAT, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_CONTROL, DC_LUT_DATA_G_FORMAT, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_CONTROL, DC_LUT_DATA_B_FORMAT, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_RW_INDEX, DC_LUT_RW_INDEX, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_SEQ_COLOR, DC_LUT_SEQ_COLOR, mask_sh), \
+ IPP_SF(DCP0_DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, mask_sh)
+
+#define IPP_REG_FIELD_LIST(type) \
+ type CURSOR_UPDATE_LOCK; \
+ type CURSOR_EN; \
+ type CURSOR_X_POSITION; \
+ type CURSOR_Y_POSITION; \
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_MODE; \
+ type CURSOR_2X_MAGNIFY; \
+ type CUR_INV_TRANS_CLAMP; \
+ type CUR_COLOR1_BLUE; \
+ type CUR_COLOR1_GREEN; \
+ type CUR_COLOR1_RED; \
+ type CUR_COLOR2_BLUE; \
+ type CUR_COLOR2_GREEN; \
+ type CUR_COLOR2_RED; \
+ type CURSOR_WIDTH; \
+ type CURSOR_HEIGHT; \
+ type CURSOR_SURFACE_ADDRESS_HIGH; \
+ type CURSOR_SURFACE_ADDRESS; \
+ type GRPH_PRESCALE_BYPASS; \
+ type GRPH_PRESCALE_SCALE_R; \
+ type GRPH_PRESCALE_BIAS_R; \
+ type GRPH_PRESCALE_SCALE_G; \
+ type GRPH_PRESCALE_BIAS_G; \
+ type GRPH_PRESCALE_SCALE_B; \
+ type GRPH_PRESCALE_BIAS_B; \
+ type GRPH_INPUT_GAMMA_MODE; \
+ type DCP_LUT_MEM_PWR_DIS; \
+ type DC_LUT_WRITE_EN_MASK; \
+ type DC_LUT_RW_MODE; \
+ type DC_LUT_DATA_R_FORMAT; \
+ type DC_LUT_DATA_G_FORMAT; \
+ type DC_LUT_DATA_B_FORMAT; \
+ type DC_LUT_RW_INDEX; \
+ type DC_LUT_SEQ_COLOR; \
+ type GRPH_DEGAMMA_MODE; \
+ type CURSOR_DEGAMMA_MODE; \
+ type CURSOR2_DEGAMMA_MODE
+
+struct dce_ipp_shift {
+ IPP_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_ipp_mask {
+ IPP_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_ipp_registers {
+ uint32_t CUR_UPDATE;
+ uint32_t CUR_CONTROL;
+ uint32_t CUR_POSITION;
+ uint32_t CUR_HOT_SPOT;
+ uint32_t CUR_COLOR1;
+ uint32_t CUR_COLOR2;
+ uint32_t CUR_SIZE;
+ uint32_t CUR_SURFACE_ADDRESS_HIGH;
+ uint32_t CUR_SURFACE_ADDRESS;
+ uint32_t PRESCALE_GRPH_CONTROL;
+ uint32_t PRESCALE_VALUES_GRPH_R;
+ uint32_t PRESCALE_VALUES_GRPH_G;
+ uint32_t PRESCALE_VALUES_GRPH_B;
+ uint32_t INPUT_GAMMA_CONTROL;
+ uint32_t DCFE_MEM_PWR_CTRL;
+ uint32_t DC_LUT_WRITE_EN_MASK;
+ uint32_t DC_LUT_RW_MODE;
+ uint32_t DC_LUT_CONTROL;
+ uint32_t DC_LUT_RW_INDEX;
+ uint32_t DC_LUT_SEQ_COLOR;
+ uint32_t DEGAMMA_CONTROL;
+};
+
+struct dce_ipp {
+ struct input_pixel_processor base;
+ const struct dce_ipp_registers *regs;
+ const struct dce_ipp_shift *ipp_shift;
+ const struct dce_ipp_mask *ipp_mask;
+};
+
+void dce_ipp_construct(struct dce_ipp *ipp_dce,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_ipp_registers *regs,
+ const struct dce_ipp_shift *ipp_shift,
+ const struct dce_ipp_mask *ipp_mask);
+
+void dce_ipp_destroy(struct input_pixel_processor **ipp);
+
+#endif /* _DCE_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
new file mode 100644
index 000000000000..fe88852b4774
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -0,0 +1,1379 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dce_link_encoder.h"
+#include "stream_encoder.h"
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+
+#ifndef DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE__SHIFT
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE__SHIFT 0xa
+#endif
+
+#ifndef DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE_MASK
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE_MASK 0x00000400L
+#endif
+
+#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+#endif
+
+#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#endif
+
+#define CTX \
+ enc110->base.ctx
+
+#define REG(reg)\
+ (enc110->link_regs->reg)
+
+#define AUX_REG(reg)\
+ (enc110->aux_regs->reg)
+
+#define HPD_REG(reg)\
+ (enc110->hpd_regs->reg)
+
+#define DEFAULT_AUX_MAX_DATA_SIZE 16
+#define AUX_MAX_DEFER_WRITE_RETRY 20
+/*
+ * @brief
+ * Trigger Source Select
+ * ASIC-dependent, actual values for register programming
+ */
+#define DCE110_DIG_FE_SOURCE_SELECT_INVALID 0x0
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGA 0x1
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGB 0x2
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGC 0x4
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGD 0x08
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGE 0x10
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
+
+/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
+#define TMDS_MIN_PIXEL_CLOCK 25000
+/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
+#define TMDS_MAX_PIXEL_CLOCK 165000
+/* For current ASICs pixel clock - 600MHz */
+#define MAX_ENCODER_CLOCK 600000
+
+enum {
+ DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define DIG_REG(reg)\
+ (reg + enc110->offsets.dig)
+
+#define DP_REG(reg)\
+ (reg + enc110->offsets.dp)
+
+static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
+ .validate_output_with_stream =
+ dce110_link_encoder_validate_output_with_stream,
+ .hw_init = dce110_link_encoder_hw_init,
+ .setup = dce110_link_encoder_setup,
+ .enable_tmds_output = dce110_link_encoder_enable_tmds_output,
+ .enable_dp_output = dce110_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output,
+ .disable_output = dce110_link_encoder_disable_output,
+ .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dce110_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dce110_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dce110_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dce110_link_encoder_enable_hpd,
+ .disable_hpd = dce110_link_encoder_disable_hpd,
+ .destroy = dce110_link_encoder_destroy
+};
+
+static enum bp_result link_transmitter_control(
+ struct dce110_link_encoder *enc110,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result;
+ struct dc_bios *bp = enc110->base.ctx->dc_bios;
+
+ result = bp->funcs->transmitter_control(bp, cntl);
+
+ return result;
+}
+
+static void enable_phy_bypass_mode(
+ struct dce110_link_encoder *enc110,
+ bool enable)
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable);
+
+}
+
+static void disable_prbs_symbols(
+ struct dce110_link_encoder *enc110,
+ bool disable)
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_UPDATE_4(DP_DPHY_CNTL,
+ DPHY_ATEST_SEL_LANE0, disable,
+ DPHY_ATEST_SEL_LANE1, disable,
+ DPHY_ATEST_SEL_LANE2, disable,
+ DPHY_ATEST_SEL_LANE3, disable);
+}
+
+static void disable_prbs_mode(
+ struct dce110_link_encoder *enc110)
+{
+ REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0);
+}
+
+static void program_pattern_symbols(
+ struct dce110_link_encoder *enc110,
+ uint16_t pattern_symbols[8])
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_SET_3(DP_DPHY_SYM0, 0,
+ DPHY_SYM1, pattern_symbols[0],
+ DPHY_SYM2, pattern_symbols[1],
+ DPHY_SYM3, pattern_symbols[2]);
+
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_SET_3(DP_DPHY_SYM1, 0,
+ DPHY_SYM4, pattern_symbols[3],
+ DPHY_SYM5, pattern_symbols[4],
+ DPHY_SYM6, pattern_symbols[5]);
+
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_SET_2(DP_DPHY_SYM2, 0,
+ DPHY_SYM7, pattern_symbols[6],
+ DPHY_SYM8, pattern_symbols[7]);
+}
+
+static void set_dp_phy_pattern_d102(
+ struct dce110_link_encoder *enc110)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* For 10-bit PRBS or debug symbols
+ * please use the following sequence: */
+
+ /* Enable debug symbols on the lanes */
+
+ disable_prbs_symbols(enc110, true);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+
+ /* Program debug symbols to be output */
+ {
+ uint16_t pattern_symbols[8] = {
+ 0x2AA, 0x2AA, 0x2AA, 0x2AA,
+ 0x2AA, 0x2AA, 0x2AA, 0x2AA
+ };
+
+ program_pattern_symbols(enc110, pattern_symbols);
+ }
+
+ /* Enable phy bypass mode to enable the test pattern */
+
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_link_training_complete(
+ struct dce110_link_encoder *enc110,
+ bool complete)
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete);
+
+}
+
+void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
+ struct link_encoder *enc,
+ uint32_t index)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ /* Write Training Pattern */
+
+ REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index);
+
+ /* Set HW Register Training Complete to false */
+
+ set_link_training_complete(enc110, false);
+
+ /* Disable PHY Bypass mode to output Training Pattern */
+
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+}
+
+static void setup_panel_mode(
+ struct dce110_link_encoder *enc110,
+ enum dp_panel_mode panel_mode)
+{
+ uint32_t value;
+
+ ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
+ value = REG_READ(DP_DPHY_INTERNAL_CTRL);
+
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ value = 0x1;
+ break;
+ case DP_PANEL_MODE_SPECIAL:
+ value = 0x11;
+ break;
+ default:
+ value = 0x0;
+ break;
+ }
+
+ REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
+}
+
+static void set_dp_phy_pattern_symbol_error(
+ struct dce110_link_encoder *enc110)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* program correct panel mode*/
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+
+ /* A PRBS23 pattern is used for most DP electrical measurements. */
+
+ /* Enable PRBS symbols on the lanes */
+ disable_prbs_symbols(enc110, false);
+
+ /* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */
+ REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+ DPHY_PRBS_SEL, 1,
+ DPHY_PRBS_EN, 1);
+
+ /* Enable phy bypass mode to enable the test pattern */
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_prbs7(
+ struct dce110_link_encoder *enc110)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* A PRBS7 pattern is used for most DP electrical measurements. */
+
+ /* Enable PRBS symbols on the lanes */
+ disable_prbs_symbols(enc110, false);
+
+ /* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */
+ REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+ DPHY_PRBS_SEL, 0,
+ DPHY_PRBS_EN, 1);
+
+ /* Enable phy bypass mode to enable the test pattern */
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_80bit_custom(
+ struct dce110_link_encoder *enc110,
+ const uint8_t *pattern)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Enable debug symbols on the lanes */
+
+ disable_prbs_symbols(enc110, true);
+
+ /* Enable PHY bypass mode to enable the test pattern */
+ /* TODO is it really needed ? */
+
+ enable_phy_bypass_mode(enc110, true);
+
+ /* Program 80 bit custom pattern */
+ {
+ uint16_t pattern_symbols[8];
+
+ pattern_symbols[0] =
+ ((pattern[1] & 0x03) << 8) | pattern[0];
+ pattern_symbols[1] =
+ ((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f);
+ pattern_symbols[2] =
+ ((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f);
+ pattern_symbols[3] =
+ (pattern[4] << 2) | ((pattern[3] >> 6) & 0x03);
+ pattern_symbols[4] =
+ ((pattern[6] & 0x03) << 8) | pattern[5];
+ pattern_symbols[5] =
+ ((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f);
+ pattern_symbols[6] =
+ ((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f);
+ pattern_symbols[7] =
+ (pattern[9] << 2) | ((pattern[8] >> 6) & 0x03);
+
+ program_pattern_symbols(enc110, pattern_symbols);
+ }
+
+ /* Enable phy bypass mode to enable the test pattern */
+
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_hbr2_compliance_cp2520_2(
+ struct dce110_link_encoder *enc110,
+ unsigned int cp2520_pattern)
+{
+
+ /* previously there is a register DP_HBR2_EYE_PATTERN
+ * that is enabled to get the pattern.
+ * But it does not work with the latest spec change,
+ * so we are programming the following registers manually.
+ *
+ * The following settings have been confirmed
+ * by Nick Chorney and Sandra Liu */
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Setup DIG encoder in DP SST mode */
+ enc110->base.funcs->setup(&enc110->base, SIGNAL_TYPE_DISPLAY_PORT);
+
+ /* ensure normal panel mode. */
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+
+ /* no vbid after BS (SR)
+ * DP_LINK_FRAMING_CNTL changed history Sandra Liu
+ * 11000260 / 11000104 / 110000FC */
+ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+ DP_IDLE_BS_INTERVAL, 0xFC,
+ DP_VBID_DISABLE, 1,
+ DP_VID_ENHANCED_FRAME_MODE, 1);
+
+ /* swap every BS with SR */
+ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0);
+
+ /* select cp2520 patterns */
+ if (REG(DP_DPHY_HBR2_PATTERN_CONTROL))
+ REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL,
+ DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern);
+ else
+ /* pre-DCE11 can only generate CP2520 pattern 2 */
+ ASSERT(cp2520_pattern == 2);
+
+ /* set link training complete */
+ set_link_training_complete(enc110, true);
+
+ /* disable video stream */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+}
+
+static void set_dp_phy_pattern_passthrough_mode(
+ struct dce110_link_encoder *enc110,
+ enum dp_panel_mode panel_mode)
+{
+ /* program correct panel mode */
+ setup_panel_mode(enc110, panel_mode);
+
+ /* restore LINK_FRAMING_CNTL and DPHY_SCRAMBLER_BS_COUNT
+ * in case we were doing HBR2 compliance pattern before
+ */
+ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+ DP_IDLE_BS_INTERVAL, 0x2000,
+ DP_VBID_DISABLE, 0,
+ DP_VID_ENHANCED_FRAME_MODE, 1);
+
+ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF);
+
+ /* set link training complete */
+ set_link_training_complete(enc110, true);
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+}
+
+/* return value is bit-vector */
+static uint8_t get_frontend_source(
+ enum engine_id engine)
+{
+ switch (engine) {
+ case ENGINE_ID_DIGA:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGA;
+ case ENGINE_ID_DIGB:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGB;
+ case ENGINE_ID_DIGC:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGC;
+ case ENGINE_ID_DIGD:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGD;
+ case ENGINE_ID_DIGE:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGE;
+ case ENGINE_ID_DIGF:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGF;
+ case ENGINE_ID_DIGG:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGG;
+ default:
+ ASSERT_CRITICAL(false);
+ return DCE110_DIG_FE_SOURCE_SELECT_INVALID;
+ }
+}
+
+static void configure_encoder(
+ struct dce110_link_encoder *enc110,
+ const struct dc_link_settings *link_settings)
+{
+ /* set number of lanes */
+
+ REG_SET(DP_CONFIG, 0,
+ DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
+
+ /* setup scrambler */
+ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1);
+}
+
+static void aux_initialize(
+ struct dce110_link_encoder *enc110)
+{
+ struct dc_context *ctx = enc110->base.ctx;
+ enum hpd_source_id hpd_source = enc110->base.hpd_source;
+ uint32_t addr = AUX_REG(AUX_CONTROL);
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(value, hpd_source, AUX_CONTROL, AUX_HPD_SEL);
+ set_reg_field_value(value, 0, AUX_CONTROL, AUX_LS_READ_EN);
+ dm_write_reg(ctx, addr, value);
+
+ addr = AUX_REG(AUX_DPHY_RX_CONTROL0);
+ value = dm_read_reg(ctx, addr);
+
+ /* 1/4 window (the maximum allowed) */
+ set_reg_field_value(value, 1,
+ AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW);
+ dm_write_reg(ctx, addr, value);
+
+}
+
+void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
+ bool exit_link_training_required)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ if (exit_link_training_required)
+ REG_UPDATE(DP_DPHY_FAST_TRAINING,
+ DPHY_RX_FAST_TRAINING_CAPABLE, 1);
+ else {
+ REG_UPDATE(DP_DPHY_FAST_TRAINING,
+ DPHY_RX_FAST_TRAINING_CAPABLE, 0);
+ /*In DCE 11, we are able to pre-program a Force SR register
+ * to be able to trigger SR symbol after 5 idle patterns
+ * transmitted. Upon PSR Exit, DMCU can trigger
+ * DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to
+ * DPHY_LOAD_BS_COUNT_START and the internal counter
+ * reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be
+ * replaced by SR symbol once.
+ */
+
+ REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5);
+ }
+}
+
+void dce110_psr_program_secondary_packet(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ REG_UPDATE_2(DP_SEC_CNTL1,
+ DP_SEC_GSP0_LINE_NUM, sdp_transmit_line_num_deadline,
+ DP_SEC_GSP0_PRIORITY, 1);
+}
+
+static bool is_dig_enabled(const struct dce110_link_encoder *enc110)
+{
+ uint32_t value;
+
+ REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
+ return value;
+}
+
+static void link_encoder_disable(struct dce110_link_encoder *enc110)
+{
+ /* reset training pattern */
+ REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0,
+ DPHY_TRAINING_PATTERN_SEL, 0);
+
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+
+ /* reset panel mode */
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+}
+
+static void hpd_initialize(
+ struct dce110_link_encoder *enc110)
+{
+ /* Associate HPD with DIG_BE */
+ enum hpd_source_id hpd_source = enc110->base.hpd_source;
+
+ REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source);
+}
+
+bool dce110_link_encoder_validate_dvi_output(
+ const struct dce110_link_encoder *enc110,
+ enum signal_type connector_signal,
+ enum signal_type signal,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK;
+
+ if (signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ max_pixel_clock *= 2;
+
+ /* This handles the case of HDMI downgrade to DVI we don't want to
+ * we don't want to cap the pixel clock if the DDI is not DVI.
+ */
+ if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK &&
+ connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ max_pixel_clock = enc110->base.features.max_hdmi_pixel_clock;
+
+ /* DVI only support RGB pixel encoding */
+ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+ return false;
+
+ /*connect DVI via adpater's HDMI connector*/
+ if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) &&
+ signal != SIGNAL_TYPE_HDMI_TYPE_A &&
+ crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK)
+ return false;
+ if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+ return false;
+
+ if (crtc_timing->pix_clk_khz > max_pixel_clock)
+ return false;
+
+ /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ break;
+ case COLOR_DEPTH_101010:
+ case COLOR_DEPTH_161616:
+ if (signal != SIGNAL_TYPE_DVI_DUAL_LINK)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool dce110_link_encoder_validate_hdmi_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing,
+ int adjusted_pix_clk_khz)
+{
+ enum dc_color_depth max_deep_color =
+ enc110->base.features.max_hdmi_deep_color;
+
+ if (max_deep_color < crtc_timing->display_color_depth)
+ return false;
+
+ if (crtc_timing->display_color_depth < COLOR_DEPTH_888)
+ return false;
+ if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+ return false;
+
+ if ((adjusted_pix_clk_khz == 0) ||
+ (adjusted_pix_clk_khz > enc110->base.features.max_hdmi_pixel_clock))
+ return false;
+
+ /* DCE11 HW does not support 420 */
+ if (!enc110->base.features.ycbcr420_supported &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
+
+ if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
+ adjusted_pix_clk_khz >= 300000)
+ return false;
+ return true;
+}
+
+bool dce110_link_encoder_validate_dp_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ /* default RGB only */
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ return true;
+
+ if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE)
+ return true;
+
+ /* for DCE 8.x or later DP Y-only feature,
+ * we need ASIC cap + FeatureSupportDPYonly, not support 666 */
+ if (crtc_timing->flags.Y_ONLY &&
+ enc110->base.features.flags.bits.IS_YCBCR_CAPABLE &&
+ crtc_timing->display_color_depth != COLOR_DEPTH_666)
+ return true;
+
+ return false;
+}
+
+void dce110_link_encoder_construct(
+ struct dce110_link_encoder *enc110,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dce110_link_enc_registers *link_regs,
+ const struct dce110_link_enc_aux_registers *aux_regs,
+ const struct dce110_link_enc_hpd_registers *hpd_regs)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+
+ enc110->base.funcs = &dce110_lnk_enc_funcs;
+ enc110->base.ctx = init_data->ctx;
+ enc110->base.id = init_data->encoder;
+
+ enc110->base.hpd_source = init_data->hpd_source;
+ enc110->base.connector = init_data->connector;
+
+ enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc110->base.features = *enc_features;
+
+ enc110->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc110->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc110->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc110->link_regs = link_regs;
+ enc110->aux_regs = aux_regs;
+ enc110->hpd_regs = hpd_regs;
+
+ switch (enc110->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc110->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc110->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc110->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc110->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc110->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc110->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc110->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* Override features with DCE-specific values */
+ if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
+ enc110->base.ctx->dc_bios, enc110->base.id,
+ &bp_cap_info)) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ }
+}
+
+bool dce110_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ bool is_valid;
+
+ switch (stream->signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ is_valid = dce110_link_encoder_validate_dvi_output(
+ enc110,
+ stream->sink->link->connector_signal,
+ stream->signal,
+ &stream->timing);
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ is_valid = dce110_link_encoder_validate_hdmi_output(
+ enc110,
+ &stream->timing,
+ stream->phy_pix_clk);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ is_valid = dce110_link_encoder_validate_dp_output(
+ enc110, &stream->timing);
+ break;
+ case SIGNAL_TYPE_EDP:
+ is_valid =
+ (stream->timing.
+ pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ break;
+ case SIGNAL_TYPE_VIRTUAL:
+ is_valid = true;
+ break;
+ default:
+ is_valid = false;
+ break;
+ }
+
+ return is_valid;
+}
+
+void dce110_link_encoder_hw_init(
+ struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ cntl.action = TRANSMITTER_CONTROL_INIT;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.connector_obj_id = enc110->base.connector;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.coherent = false;
+ cntl.hpd_sel = enc110->base.hpd_source;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (enc110->base.connector.id == CONNECTOR_ID_LVDS) {
+ cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ ASSERT(result == BP_RESULT_OK);
+
+ } else if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
+ ctx->dc->hwss.edp_power_control(enc, true);
+ }
+ aux_initialize(enc110);
+
+ /* reinitialize HPD.
+ * hpd_initialize() will pass DIG_FE id to HW context.
+ * All other routine within HW context will use fe_engine_offset
+ * as DIG_FE id even caller pass DIG_FE id.
+ * So this routine must be called first. */
+ hpd_initialize(enc110);
+}
+
+void dce110_link_encoder_destroy(struct link_encoder **enc)
+{
+ kfree(TO_DCE110_LINK_ENC(*enc));
+ *enc = NULL;
+}
+
+void dce110_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ switch (signal) {
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ /* DP SST */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0);
+ break;
+ case SIGNAL_TYPE_LVDS:
+ /* LVDS */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1);
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ /* TMDS-DVI */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2);
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* TMDS-HDMI */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* DP MST */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ /* invalid mode ! */
+ break;
+ }
+
+}
+
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dce110_link_encoder_enable_tmds_output(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = enc->preferred_engine;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ if (hdmi) {
+ cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ cntl.lanes_number = 4;
+ } else if (dual_link) {
+ cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ cntl.lanes_number = 8;
+ } else {
+ cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ cntl.lanes_number = 4;
+ }
+ cntl.hpd_sel = enc110->base.hpd_source;
+
+ cntl.pixel_clock = pixel_clock;
+ cntl.color_depth = color_depth;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+/* enables DP PHY output */
+void dce110_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ /* number_of_lanes is used for pixel clock adjust,
+ * but it's not passed to asic_control.
+ * We need to set number of lanes manually.
+ */
+ configure_encoder(enc110, link_settings);
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = enc->preferred_engine;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ;
+ /* TODO: check if undefined works */
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+/* enables DP PHY output in MST mode */
+void dce110_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ /* number_of_lanes is used for pixel clock adjust,
+ * but it's not passed to asic_control.
+ * We need to set number of lanes manually.
+ */
+ configure_encoder(enc110, link_settings);
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ;
+ /* TODO: check if undefined works */
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+/*
+ * @brief
+ * Disable transmitter and its encoder
+ */
+void dce110_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal,
+ struct dc_link *link)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ if (!is_dig_enabled(enc110)) {
+ /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+ return;
+ }
+ if (enc110->base.connector.id == CONNECTOR_ID_EDP)
+ ctx->dc->hwss.edp_backlight_control(link, false);
+ /* Power-down RX and disable GPU PHY should be paired.
+ * Disabling PHY without powering down RX may cause
+ * symbol lock loss, on which we will get DP Sink interrupt. */
+
+ /* There is a case for the DP active dongles
+ * where we want to disable the PHY but keep RX powered,
+ * for those we need to ignore DP Sink interrupt
+ * by checking lane count that has been set
+ * on the last do_enable_output(). */
+
+ /* disable transmitter */
+ cntl.action = TRANSMITTER_CONTROL_DISABLE;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.signal = signal;
+ cntl.connector_obj_id = enc110->base.connector;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* disable encoder */
+ if (dc_is_dp_signal(signal))
+ link_encoder_disable(enc110);
+
+ if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
+ /* power down eDP panel */
+ /* TODO: Power control cause regression, we should implement
+ * it properly, for now just comment it.
+ *
+ * link_encoder_edp_wait_for_hpd_ready(
+ link_enc,
+ link_enc->connector,
+ false);
+
+ * link_encoder_edp_power_control(
+ link_enc, false); */
+ }
+}
+
+void dce110_link_encoder_dp_set_lane_settings(
+ struct link_encoder *enc,
+ const struct link_training_settings *link_settings)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ union dpcd_training_lane_set training_lane_set = { { 0 } };
+ int32_t lane = 0;
+ struct bp_transmitter_control cntl = { 0 };
+
+ if (!link_settings) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.connector_obj_id = enc110->base.connector;
+ cntl.lanes_number = link_settings->link_settings.lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_settings.link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+
+ for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) {
+ /* translate lane settings */
+
+ training_lane_set.bits.VOLTAGE_SWING_SET =
+ link_settings->lane_settings[lane].VOLTAGE_SWING;
+ training_lane_set.bits.PRE_EMPHASIS_SET =
+ link_settings->lane_settings[lane].PRE_EMPHASIS;
+
+ /* post cursor 2 setting only applies to HBR2 link rate */
+ if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) {
+ /* this is passed to VBIOS
+ * to program post cursor 2 level */
+
+ training_lane_set.bits.POST_CURSOR2_SET =
+ link_settings->lane_settings[lane].POST_CURSOR2;
+ }
+
+ cntl.lane_select = lane;
+ cntl.lane_settings = training_lane_set.raw;
+
+ /* call VBIOS table to set voltage swing and pre-emphasis */
+ link_transmitter_control(enc110, &cntl);
+ }
+}
+
+/* set DP PHY test and training patterns */
+void dce110_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ switch (param->dp_phy_pattern) {
+ case DP_TEST_PATTERN_TRAINING_PATTERN1:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN2:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN3:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN4:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3);
+ break;
+ case DP_TEST_PATTERN_D102:
+ set_dp_phy_pattern_d102(enc110);
+ break;
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ set_dp_phy_pattern_symbol_error(enc110);
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ set_dp_phy_pattern_prbs7(enc110);
+ break;
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ set_dp_phy_pattern_80bit_custom(
+ enc110, param->custom_pattern);
+ break;
+ case DP_TEST_PATTERN_CP2520_1:
+ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 1);
+ break;
+ case DP_TEST_PATTERN_CP2520_2:
+ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 2);
+ break;
+ case DP_TEST_PATTERN_CP2520_3:
+ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 3);
+ break;
+ case DP_TEST_PATTERN_VIDEO_MODE: {
+ set_dp_phy_pattern_passthrough_mode(
+ enc110, param->dp_panel_mode);
+ break;
+ }
+
+ default:
+ /* invalid phy pattern */
+ ASSERT_CRITICAL(false);
+ break;
+ }
+}
+
+static void fill_stream_allocation_row_info(
+ const struct link_mst_stream_allocation *stream_allocation,
+ uint32_t *src,
+ uint32_t *slots)
+{
+ const struct stream_encoder *stream_enc = stream_allocation->stream_enc;
+
+ if (stream_enc) {
+ *src = stream_enc->id;
+ *slots = stream_allocation->slot_count;
+ } else {
+ *src = 0;
+ *slots = 0;
+ }
+}
+
+/* programs DP MST VC payload allocation */
+void dce110_link_encoder_update_mst_stream_allocation_table(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ uint32_t value0 = 0;
+ uint32_t value1 = 0;
+ uint32_t value2 = 0;
+ uint32_t slots = 0;
+ uint32_t src = 0;
+ uint32_t retries = 0;
+
+ /* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/
+
+ /* --- Set MSE Stream Attribute -
+ * Setup VC Payload Table on Tx Side,
+ * Issue allocation change trigger
+ * to commit payload on both tx and rx side */
+
+ /* we should clean-up table each time */
+
+ if (table->stream_count >= 1) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[0],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT0,
+ DP_MSE_SAT_SRC0, src,
+ DP_MSE_SAT_SLOT_COUNT0, slots);
+
+ if (table->stream_count >= 2) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[1],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT0,
+ DP_MSE_SAT_SRC1, src,
+ DP_MSE_SAT_SLOT_COUNT1, slots);
+
+ if (table->stream_count >= 3) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[2],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT1,
+ DP_MSE_SAT_SRC2, src,
+ DP_MSE_SAT_SLOT_COUNT2, slots);
+
+ if (table->stream_count >= 4) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[3],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT1,
+ DP_MSE_SAT_SRC3, src,
+ DP_MSE_SAT_SLOT_COUNT3, slots);
+
+ /* --- wait for transaction finish */
+
+ /* send allocation change trigger (ACT) ?
+ * this step first sends the ACT,
+ * then double buffers the SAT into the hardware
+ * making the new allocation active on the DP MST mode link */
+
+
+ /* DP_MSE_SAT_UPDATE:
+ * 0 - No Action
+ * 1 - Update SAT with trigger
+ * 2 - Update SAT without trigger */
+
+ REG_UPDATE(DP_MSE_SAT_UPDATE,
+ DP_MSE_SAT_UPDATE, 1);
+
+ /* wait for update to complete
+ * (i.e. DP_MSE_SAT_UPDATE field is reset to 0)
+ * then wait for the transmission
+ * of at least 16 MTP headers on immediate local link.
+ * i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0
+ * a value of 1 indicates that DP MST mode
+ * is in the 16 MTP keepout region after a VC has been added.
+ * MST stream bandwidth (VC rate) can be configured
+ * after this bit is cleared */
+
+ do {
+ udelay(10);
+
+ value0 = REG_READ(DP_MSE_SAT_UPDATE);
+
+ REG_GET(DP_MSE_SAT_UPDATE,
+ DP_MSE_SAT_UPDATE, &value1);
+
+ REG_GET(DP_MSE_SAT_UPDATE,
+ DP_MSE_16_MTP_KEEPOUT, &value2);
+
+ /* bit field DP_MSE_SAT_UPDATE is set to 1 already */
+ if (!value1 && !value2)
+ break;
+ ++retries;
+ } while (retries < DP_MST_UPDATE_MAX_RETRY);
+}
+
+void dce110_link_encoder_connect_dig_be_to_fe(
+ struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ uint32_t field;
+
+ if (engine != ENGINE_ID_UNKNOWN) {
+
+ REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field);
+
+ if (connect)
+ field |= get_frontend_source(engine);
+ else
+ field &= ~get_frontend_source(engine);
+
+ REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field);
+ }
+}
+
+void dce110_link_encoder_enable_hpd(struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ uint32_t addr = HPD_REG(DC_HPD_CONTROL);
+ uint32_t hpd_enable = 0;
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN);
+
+ if (hpd_enable == 0)
+ set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN);
+}
+
+void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ uint32_t addr = HPD_REG(DC_HPD_CONTROL);
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
new file mode 100644
index 000000000000..494067dedd03
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCE110_H__
+#define __DC_LINK_ENCODER__DCE110_H__
+
+#include "link_encoder.h"
+
+#define TO_DCE110_LINK_ENC(link_encoder)\
+ container_of(link_encoder, struct dce110_link_encoder, base)
+
+/* Not found regs in dce120 spec
+ * BIOS_SCRATCH_2
+ * DP_DPHY_INTERNAL_CTRL
+ */
+
+#define AUX_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
+
+#define HPD_REG_LIST(id)\
+ SRI(DC_HPD_CONTROL, HPD, id)
+
+#define LE_COMMON_REG_LIST_BASE(id) \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SRI(DIG_BE_CNTL, DIG, id), \
+ SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(DP_CONFIG, DP, id), \
+ SRI(DP_DPHY_CNTL, DP, id), \
+ SRI(DP_DPHY_PRBS_CNTL, DP, id), \
+ SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
+ SRI(DP_DPHY_SYM0, DP, id), \
+ SRI(DP_DPHY_SYM1, DP, id), \
+ SRI(DP_DPHY_SYM2, DP, id), \
+ SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+ SRI(DP_LINK_CNTL, DP, id), \
+ SRI(DP_LINK_FRAMING_CNTL, DP, id), \
+ SRI(DP_MSE_SAT0, DP, id), \
+ SRI(DP_MSE_SAT1, DP, id), \
+ SRI(DP_MSE_SAT2, DP, id), \
+ SRI(DP_MSE_SAT_UPDATE, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_DPHY_FAST_TRAINING, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id)
+
+#define LE_COMMON_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCE80_REG_LIST(id)\
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ LE_COMMON_REG_LIST_BASE(id)
+
+#define LE_DCE100_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCE110_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCE120_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCN10_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
+struct dce110_link_enc_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_DPHY_RX_CONTROL0;
+};
+
+struct dce110_link_enc_hpd_registers {
+ uint32_t DC_HPD_CONTROL;
+};
+
+struct dce110_link_enc_registers {
+ /* DMCU registers */
+ uint32_t MASTER_COMM_DATA_REG1;
+ uint32_t MASTER_COMM_DATA_REG2;
+ uint32_t MASTER_COMM_DATA_REG3;
+ uint32_t MASTER_COMM_CMD_REG;
+ uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t DMCU_RAM_ACCESS_CTRL;
+ uint32_t DCI_MEM_PWR_STATUS;
+ uint32_t DMU_MEM_PWR_CNTL;
+ uint32_t DMCU_IRAM_RD_CTRL;
+ uint32_t DMCU_IRAM_RD_DATA;
+ uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
+
+ /* Common DP registers */
+ uint32_t DIG_BE_CNTL;
+ uint32_t DIG_BE_EN_CNTL;
+ uint32_t DP_CONFIG;
+ uint32_t DP_DPHY_CNTL;
+ uint32_t DP_DPHY_INTERNAL_CTRL;
+ uint32_t DP_DPHY_PRBS_CNTL;
+ uint32_t DP_DPHY_SCRAM_CNTL;
+ uint32_t DP_DPHY_SYM0;
+ uint32_t DP_DPHY_SYM1;
+ uint32_t DP_DPHY_SYM2;
+ uint32_t DP_DPHY_TRAINING_PATTERN_SEL;
+ uint32_t DP_LINK_CNTL;
+ uint32_t DP_LINK_FRAMING_CNTL;
+ uint32_t DP_MSE_SAT0;
+ uint32_t DP_MSE_SAT1;
+ uint32_t DP_MSE_SAT2;
+ uint32_t DP_MSE_SAT_UPDATE;
+ uint32_t DP_SEC_CNTL;
+ uint32_t DP_VID_STREAM_CNTL;
+ uint32_t DP_DPHY_FAST_TRAINING;
+ uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
+ uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
+ uint32_t DP_SEC_CNTL1;
+};
+
+struct dce110_link_encoder {
+ struct link_encoder base;
+ const struct dce110_link_enc_registers *link_regs;
+ const struct dce110_link_enc_aux_registers *aux_regs;
+ const struct dce110_link_enc_hpd_registers *hpd_regs;
+};
+
+
+void dce110_link_encoder_construct(
+ struct dce110_link_encoder *enc110,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dce110_link_enc_registers *link_regs,
+ const struct dce110_link_enc_aux_registers *aux_regs,
+ const struct dce110_link_enc_hpd_registers *hpd_regs);
+
+bool dce110_link_encoder_validate_dvi_output(
+ const struct dce110_link_encoder *enc110,
+ enum signal_type connector_signal,
+ enum signal_type signal,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_rgb_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_dp_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_wireless_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream);
+
+/****************** HW programming ************************/
+
+/* initialize HW */ /* why do we initialze aux in here? */
+void dce110_link_encoder_hw_init(struct link_encoder *enc);
+
+void dce110_link_encoder_destroy(struct link_encoder **enc);
+
+/* program DIG_MODE in DIG_BE */
+/* TODO can this be combined with enable_output? */
+void dce110_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal);
+
+/* enables TMDS PHY output */
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dce110_link_encoder_enable_tmds_output(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock);
+
+/* enables DP PHY output */
+void dce110_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/* enables DP PHY output in MST mode */
+void dce110_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/* disable PHY output */
+void dce110_link_encoder_disable_output(
+ struct link_encoder *link_enc,
+ enum signal_type signal,
+ struct dc_link *link);
+
+/* set DP lane settings */
+void dce110_link_encoder_dp_set_lane_settings(
+ struct link_encoder *enc,
+ const struct link_training_settings *link_settings);
+
+void dce110_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param);
+
+/* programs DP MST VC payload allocation */
+void dce110_link_encoder_update_mst_stream_allocation_table(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+
+void dce110_link_encoder_connect_dig_be_to_fe(
+ struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect);
+
+void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
+ struct link_encoder *enc,
+ uint32_t index);
+
+void dce110_link_encoder_enable_hpd(struct link_encoder *enc);
+
+void dce110_link_encoder_disable_hpd(struct link_encoder *enc);
+
+void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
+ bool exit_link_training_required);
+
+void dce110_psr_program_secondary_packet(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline);
+
+#endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
new file mode 100644
index 000000000000..0790f25c7b3b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_mem_input.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+
+#define CTX \
+ dce_mi->base.ctx
+#define REG(reg)\
+ dce_mi->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_mi->shifts->field_name, dce_mi->masks->field_name
+
+struct pte_setting {
+ unsigned int bpp;
+ unsigned int page_width;
+ unsigned int page_height;
+ unsigned char min_pte_before_flip_horiz_scan;
+ unsigned char min_pte_before_flip_vert_scan;
+ unsigned char pte_req_per_chunk;
+ unsigned char param_6;
+ unsigned char param_7;
+ unsigned char param_8;
+};
+
+enum mi_bits_per_pixel {
+ mi_bpp_8 = 0,
+ mi_bpp_16,
+ mi_bpp_32,
+ mi_bpp_64,
+ mi_bpp_count,
+};
+
+enum mi_tiling_format {
+ mi_tiling_linear = 0,
+ mi_tiling_1D,
+ mi_tiling_2D,
+ mi_tiling_count,
+};
+
+static const struct pte_setting pte_settings[mi_tiling_count][mi_bpp_count] = {
+ [mi_tiling_linear] = {
+ { 8, 4096, 1, 8, 0, 1, 0, 0, 0},
+ { 16, 2048, 1, 8, 0, 1, 0, 0, 0},
+ { 32, 1024, 1, 8, 0, 1, 0, 0, 0},
+ { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */
+ },
+ [mi_tiling_1D] = {
+ { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */
+ { 16, 256, 8, 2, 0, 1, 0, 0, 0},
+ { 32, 128, 8, 4, 0, 1, 0, 0, 0},
+ { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */
+ },
+ [mi_tiling_2D] = {
+ { 8, 64, 64, 8, 8, 1, 4, 0, 0},
+ { 16, 64, 32, 8, 16, 1, 8, 0, 0},
+ { 32, 32, 32, 16, 16, 1, 8, 0, 0},
+ { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */
+ },
+};
+
+static enum mi_bits_per_pixel get_mi_bpp(
+ enum surface_pixel_format format)
+{
+ if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616)
+ return mi_bpp_64;
+ else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888)
+ return mi_bpp_32;
+ else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB1555)
+ return mi_bpp_16;
+ else
+ return mi_bpp_8;
+}
+
+static enum mi_tiling_format get_mi_tiling(
+ union dc_tiling_info *tiling_info)
+{
+ switch (tiling_info->gfx8.array_mode) {
+ case DC_ARRAY_1D_TILED_THIN1:
+ case DC_ARRAY_1D_TILED_THICK:
+ case DC_ARRAY_PRT_TILED_THIN1:
+ return mi_tiling_1D;
+ case DC_ARRAY_2D_TILED_THIN1:
+ case DC_ARRAY_2D_TILED_THICK:
+ case DC_ARRAY_2D_TILED_X_THICK:
+ case DC_ARRAY_PRT_2D_TILED_THIN1:
+ case DC_ARRAY_PRT_2D_TILED_THICK:
+ return mi_tiling_2D;
+ case DC_ARRAY_LINEAR_GENERAL:
+ case DC_ARRAY_LINEAR_ALLIGNED:
+ return mi_tiling_linear;
+ default:
+ return mi_tiling_2D;
+ }
+}
+
+static bool is_vert_scan(enum dc_rotation_angle rotation)
+{
+ switch (rotation) {
+ case ROTATION_ANGLE_90:
+ case ROTATION_ANGLE_270:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void dce_mi_program_pte_vm(
+ struct mem_input *mi,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ enum mi_bits_per_pixel mi_bpp = get_mi_bpp(format);
+ enum mi_tiling_format mi_tiling = get_mi_tiling(tiling_info);
+ const struct pte_setting *pte = &pte_settings[mi_tiling][mi_bpp];
+
+ unsigned int page_width = log_2(pte->page_width);
+ unsigned int page_height = log_2(pte->page_height);
+ unsigned int min_pte_before_flip = is_vert_scan(rotation) ?
+ pte->min_pte_before_flip_vert_scan :
+ pte->min_pte_before_flip_horiz_scan;
+
+ REG_UPDATE(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT,
+ GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0xff);
+
+ REG_UPDATE_3(DVMM_PTE_CONTROL,
+ DVMM_PAGE_WIDTH, page_width,
+ DVMM_PAGE_HEIGHT, page_height,
+ DVMM_MIN_PTE_BEFORE_FLIP, min_pte_before_flip);
+
+ REG_UPDATE_2(DVMM_PTE_ARB_CONTROL,
+ DVMM_PTE_REQ_PER_CHUNK, pte->pte_req_per_chunk,
+ DVMM_MAX_PTE_REQ_OUTSTANDING, 0xff);
+}
+
+static void program_urgency_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t urgency_low_wm,
+ uint32_t urgency_high_wm)
+{
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK, wm_select);
+
+ REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0,
+ URGENCY_LOW_WATERMARK, urgency_low_wm,
+ URGENCY_HIGH_WATERMARK, urgency_high_wm);
+}
+
+static void program_nbp_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t nbp_wm)
+{
+ if (REG(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL)) {
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK, wm_select);
+
+ REG_UPDATE_3(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE, 1,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, 1,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1);
+
+ REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK, nbp_wm);
+ }
+
+ if (REG(DPG_PIPE_LOW_POWER_CONTROL)) {
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ PSTATE_CHANGE_WATERMARK_MASK, wm_select);
+
+ REG_UPDATE_3(DPG_PIPE_LOW_POWER_CONTROL,
+ PSTATE_CHANGE_ENABLE, 1,
+ PSTATE_CHANGE_URGENT_DURING_REQUEST, 1,
+ PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1);
+
+ REG_UPDATE(DPG_PIPE_LOW_POWER_CONTROL,
+ PSTATE_CHANGE_WATERMARK, nbp_wm);
+ }
+}
+
+static void program_stutter_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t stutter_mark)
+{
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
+
+ if (REG(DPG_PIPE_STUTTER_CONTROL2))
+ REG_UPDATE(DPG_PIPE_STUTTER_CONTROL2,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+ else
+ REG_UPDATE(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+}
+
+static void dce_mi_program_display_marks(
+ struct mem_input *mi,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+ program_urgency_watermark(dce_mi, 2, /* set a */
+ urgent.a_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 1, /* set d */
+ urgent.d_mark, total_dest_line_time_ns);
+
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE, stutter_en,
+ STUTTER_IGNORE_FBC, 1);
+ program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */
+ program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */
+
+ program_stutter_watermark(dce_mi, 2, stutter.a_mark); /* set a */
+ program_stutter_watermark(dce_mi, 1, stutter.d_mark); /* set d */
+}
+
+static void dce120_mi_program_display_marks(struct mem_input *mi,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+ program_urgency_watermark(dce_mi, 0, /* set a */
+ urgent.a_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 1, /* set b */
+ urgent.b_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 2, /* set c */
+ urgent.c_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 3, /* set d */
+ urgent.d_mark, total_dest_line_time_ns);
+
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE, stutter_en,
+ STUTTER_IGNORE_FBC, 1);
+ program_nbp_watermark(dce_mi, 0, nbp.a_mark); /* set a */
+ program_nbp_watermark(dce_mi, 1, nbp.b_mark); /* set b */
+ program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
+ program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
+
+ program_stutter_watermark(dce_mi, 0, stutter.a_mark); /* set a */
+ program_stutter_watermark(dce_mi, 1, stutter.b_mark); /* set b */
+ program_stutter_watermark(dce_mi, 2, stutter.c_mark); /* set c */
+ program_stutter_watermark(dce_mi, 3, stutter.d_mark); /* set d */
+}
+
+static void program_tiling(
+ struct dce_mem_input *dce_mi, const union dc_tiling_info *info)
+{
+ if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
+ REG_UPDATE_6(GRPH_CONTROL,
+ GRPH_SW_MODE, info->gfx9.swizzle,
+ GRPH_NUM_BANKS, log_2(info->gfx9.num_banks),
+ GRPH_NUM_SHADER_ENGINES, log_2(info->gfx9.num_shader_engines),
+ GRPH_NUM_PIPES, log_2(info->gfx9.num_pipes),
+ GRPH_COLOR_EXPANSION_MODE, 1,
+ GRPH_SE_ENABLE, info->gfx9.shaderEnable);
+ /* TODO: DCP0_GRPH_CONTROL__GRPH_SE_ENABLE where to get info
+ GRPH_SE_ENABLE, 1,
+ GRPH_Z, 0);
+ */
+ }
+
+ if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX8 */
+ REG_UPDATE_9(GRPH_CONTROL,
+ GRPH_NUM_BANKS, info->gfx8.num_banks,
+ GRPH_BANK_WIDTH, info->gfx8.bank_width,
+ GRPH_BANK_HEIGHT, info->gfx8.bank_height,
+ GRPH_MACRO_TILE_ASPECT, info->gfx8.tile_aspect,
+ GRPH_TILE_SPLIT, info->gfx8.tile_split,
+ GRPH_MICRO_TILE_MODE, info->gfx8.tile_mode,
+ GRPH_PIPE_CONFIG, info->gfx8.pipe_config,
+ GRPH_ARRAY_MODE, info->gfx8.array_mode,
+ GRPH_COLOR_EXPANSION_MODE, 1);
+ /* 01 - DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP: zero expansion for YCbCr */
+ /*
+ GRPH_Z, 0);
+ */
+ }
+}
+
+
+static void program_size_and_rotation(
+ struct dce_mem_input *dce_mi,
+ enum dc_rotation_angle rotation,
+ const union plane_size *plane_size)
+{
+ const struct rect *in_rect = &plane_size->grph.surface_size;
+ struct rect hw_rect = plane_size->grph.surface_size;
+ const uint32_t rotation_angles[ROTATION_ANGLE_COUNT] = {
+ [ROTATION_ANGLE_0] = 0,
+ [ROTATION_ANGLE_90] = 1,
+ [ROTATION_ANGLE_180] = 2,
+ [ROTATION_ANGLE_270] = 3,
+ };
+
+ if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) {
+ hw_rect.x = in_rect->y;
+ hw_rect.y = in_rect->x;
+
+ hw_rect.height = in_rect->width;
+ hw_rect.width = in_rect->height;
+ }
+
+ REG_SET(GRPH_X_START, 0,
+ GRPH_X_START, hw_rect.x);
+
+ REG_SET(GRPH_Y_START, 0,
+ GRPH_Y_START, hw_rect.y);
+
+ REG_SET(GRPH_X_END, 0,
+ GRPH_X_END, hw_rect.width);
+
+ REG_SET(GRPH_Y_END, 0,
+ GRPH_Y_END, hw_rect.height);
+
+ REG_SET(GRPH_PITCH, 0,
+ GRPH_PITCH, plane_size->grph.surface_pitch);
+
+ REG_SET(HW_ROTATION, 0,
+ GRPH_ROTATION_ANGLE, rotation_angles[rotation]);
+}
+
+static void program_grph_pixel_format(
+ struct dce_mem_input *dce_mi,
+ enum surface_pixel_format format)
+{
+ uint32_t red_xbar = 0, blue_xbar = 0; /* no swap */
+ uint32_t grph_depth = 0, grph_format = 0;
+ uint32_t sign = 0, floating = 0;
+
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 ||
+ /*todo: doesn't look like we handle BGRA here,
+ * should problem swap endian*/
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ /* ABGR formats */
+ red_xbar = 2;
+ blue_xbar = 2;
+ }
+
+ REG_SET_2(GRPH_SWAP_CNTL, 0,
+ GRPH_RED_CROSSBAR, red_xbar,
+ GRPH_BLUE_CROSSBAR, blue_xbar);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ grph_depth = 0;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ grph_depth = 1;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ grph_depth = 1;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ grph_depth = 2;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ grph_depth = 2;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ sign = 1;
+ floating = 1;
+ /* no break */
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ grph_depth = 3;
+ grph_format = 0;
+ break;
+ default:
+ DC_ERR("unsupported grph pixel format");
+ break;
+ }
+
+ REG_UPDATE_2(GRPH_CONTROL,
+ GRPH_DEPTH, grph_depth,
+ GRPH_FORMAT, grph_format);
+
+ REG_UPDATE_4(PRESCALE_GRPH_CONTROL,
+ GRPH_PRESCALE_SELECT, floating,
+ GRPH_PRESCALE_R_SIGN, sign,
+ GRPH_PRESCALE_G_SIGN, sign,
+ GRPH_PRESCALE_B_SIGN, sign);
+}
+
+static void dce_mi_program_surface_config(
+ struct mem_input *mi,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1);
+
+ program_tiling(dce_mi, tiling_info);
+ program_size_and_rotation(dce_mi, rotation, plane_size);
+
+ if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN &&
+ format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ program_grph_pixel_format(dce_mi, format);
+}
+
+static uint32_t get_dmif_switch_time_us(
+ uint32_t h_total,
+ uint32_t v_total,
+ uint32_t pix_clk_khz)
+{
+ uint32_t frame_time;
+ uint32_t pixels_per_second;
+ uint32_t pixels_per_frame;
+ uint32_t refresh_rate;
+ const uint32_t us_in_sec = 1000000;
+ const uint32_t min_single_frame_time_us = 30000;
+ /*return double of frame time*/
+ const uint32_t single_frame_time_multiplier = 2;
+
+ if (!h_total || v_total || !pix_clk_khz)
+ return single_frame_time_multiplier * min_single_frame_time_us;
+
+ /*TODO: should we use pixel format normalized pixel clock here?*/
+ pixels_per_second = pix_clk_khz * 1000;
+ pixels_per_frame = h_total * v_total;
+
+ if (!pixels_per_second || !pixels_per_frame) {
+ /* avoid division by zero */
+ ASSERT(pixels_per_frame);
+ ASSERT(pixels_per_second);
+ return single_frame_time_multiplier * min_single_frame_time_us;
+ }
+
+ refresh_rate = pixels_per_second / pixels_per_frame;
+
+ if (!refresh_rate) {
+ /* avoid division by zero*/
+ ASSERT(refresh_rate);
+ return single_frame_time_multiplier * min_single_frame_time_us;
+ }
+
+ frame_time = us_in_sec / refresh_rate;
+
+ if (frame_time < min_single_frame_time_us)
+ frame_time = min_single_frame_time_us;
+
+ frame_time *= single_frame_time_multiplier;
+
+ return frame_time;
+}
+
+static void dce_mi_allocate_dmif(
+ struct mem_input *mi,
+ uint32_t h_total,
+ uint32_t v_total,
+ uint32_t pix_clk_khz,
+ uint32_t total_stream_num)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ const uint32_t retry_delay = 10;
+ uint32_t retry_count = get_dmif_switch_time_us(
+ h_total,
+ v_total,
+ pix_clk_khz) / retry_delay;
+
+ uint32_t pix_dur;
+ uint32_t buffers_allocated;
+ uint32_t dmif_buffer_control;
+
+ dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
+
+ if (buffers_allocated == 2)
+ return;
+
+ REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
+ DMIF_BUFFERS_ALLOCATED, 2);
+
+ REG_WAIT(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
+ retry_delay, retry_count);
+
+ if (pix_clk_khz != 0) {
+ pix_dur = 1000000000ULL / pix_clk_khz;
+
+ REG_UPDATE(DPG_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION, pix_dur);
+ }
+
+ if (dce_mi->wa.single_head_rdreq_dmif_limit) {
+ uint32_t eanble = (total_stream_num > 1) ? 0 :
+ dce_mi->wa.single_head_rdreq_dmif_limit;
+
+ REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
+ ENABLE, eanble);
+ }
+}
+
+static void dce_mi_free_dmif(
+ struct mem_input *mi,
+ uint32_t total_stream_num)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t buffers_allocated;
+ uint32_t dmif_buffer_control;
+
+ dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
+
+ if (buffers_allocated == 0)
+ return;
+
+ REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
+ DMIF_BUFFERS_ALLOCATED, 0);
+
+ REG_WAIT(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
+ 10, 3500);
+
+ if (dce_mi->wa.single_head_rdreq_dmif_limit) {
+ uint32_t eanble = (total_stream_num > 1) ? 0 :
+ dce_mi->wa.single_head_rdreq_dmif_limit;
+
+ REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
+ ENABLE, eanble);
+ }
+}
+
+
+static void program_sec_addr(
+ struct dce_mem_input *dce_mi,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ /*high register MUST be programmed first*/
+ REG_SET(GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ address.high_part);
+
+ REG_SET_2(GRPH_SECONDARY_SURFACE_ADDRESS, 0,
+ GRPH_SECONDARY_SURFACE_ADDRESS, address.low_part >> 8,
+ GRPH_SECONDARY_DFQ_ENABLE, 0);
+}
+
+static void program_pri_addr(
+ struct dce_mem_input *dce_mi,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ /*high register MUST be programmed first*/
+ REG_SET(GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ address.high_part);
+
+ REG_SET(GRPH_PRIMARY_SURFACE_ADDRESS, 0,
+ GRPH_PRIMARY_SURFACE_ADDRESS,
+ address.low_part >> 8);
+}
+
+
+static bool dce_mi_is_flip_pending(struct mem_input *mem_input)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mem_input);
+ uint32_t update_pending;
+
+ REG_GET(GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING, &update_pending);
+ if (update_pending)
+ return true;
+
+ mem_input->current_address = mem_input->request_address;
+ return false;
+}
+
+static bool dce_mi_program_surface_flip_and_addr(
+ struct mem_input *mem_input,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mem_input);
+
+ REG_UPDATE(GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
+
+ REG_UPDATE(
+ GRPH_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_H_RETRACE_EN, flip_immediate ? 1 : 0);
+
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ if (address->grph.addr.quad_part == 0)
+ break;
+ program_pri_addr(dce_mi, address->grph.addr);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0 ||
+ address->grph_stereo.right_addr.quad_part == 0)
+ break;
+ program_pri_addr(dce_mi, address->grph_stereo.left_addr);
+ program_sec_addr(dce_mi, address->grph_stereo.right_addr);
+ break;
+ default:
+ /* not supported */
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ mem_input->request_address = *address;
+
+ if (flip_immediate)
+ mem_input->current_address = *address;
+
+ REG_UPDATE(GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
+
+ return true;
+}
+
+static struct mem_input_funcs dce_mi_funcs = {
+ .mem_input_program_display_marks = dce_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+
+
+void dce_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask)
+{
+ dce_mi->base.ctx = ctx;
+
+ dce_mi->base.inst = inst;
+ dce_mi->base.funcs = &dce_mi_funcs;
+
+ dce_mi->regs = regs;
+ dce_mi->shifts = mi_shift;
+ dce_mi->masks = mi_mask;
+}
+
+void dce112_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask)
+{
+ dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
+ dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
new file mode 100644
index 000000000000..05d39c0cbe87
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DCE_MEM_INPUT_H__
+#define __DCE_MEM_INPUT_H__
+
+#include "dc_hw_types.h"
+#include "mem_input.h"
+
+#define TO_DCE_MEM_INPUT(mem_input)\
+ container_of(mem_input, struct dce_mem_input, base)
+
+#define MI_DCE_BASE_REG_LIST(id)\
+ SRI(GRPH_ENABLE, DCP, id),\
+ SRI(GRPH_CONTROL, DCP, id),\
+ SRI(GRPH_X_START, DCP, id),\
+ SRI(GRPH_Y_START, DCP, id),\
+ SRI(GRPH_X_END, DCP, id),\
+ SRI(GRPH_Y_END, DCP, id),\
+ SRI(GRPH_PITCH, DCP, id),\
+ SRI(HW_ROTATION, DCP, id),\
+ SRI(GRPH_SWAP_CNTL, DCP, id),\
+ SRI(PRESCALE_GRPH_CONTROL, DCP, id),\
+ SRI(GRPH_UPDATE, DCP, id),\
+ SRI(GRPH_FLIP_CONTROL, DCP, id),\
+ SRI(GRPH_PRIMARY_SURFACE_ADDRESS, DCP, id),\
+ SRI(GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, DCP, id),\
+ SRI(GRPH_SECONDARY_SURFACE_ADDRESS, DCP, id),\
+ SRI(GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, DCP, id),\
+ SRI(DPG_PIPE_ARBITRATION_CONTROL1, DMIF_PG, id),\
+ SRI(DPG_WATERMARK_MASK_CONTROL, DMIF_PG, id),\
+ SRI(DPG_PIPE_URGENCY_CONTROL, DMIF_PG, id),\
+ SRI(DPG_PIPE_STUTTER_CONTROL, DMIF_PG, id),\
+ SRI(DMIF_BUFFER_CONTROL, PIPE, id)
+
+#define MI_DCE_PTE_REG_LIST(id)\
+ SRI(DVMM_PTE_CONTROL, DCP, id),\
+ SRI(DVMM_PTE_ARB_CONTROL, DCP, id)
+
+#define MI_DCE8_REG_LIST(id)\
+ MI_DCE_BASE_REG_LIST(id),\
+ SRI(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, DMIF_PG, id)
+
+#define MI_DCE11_2_REG_LIST(id)\
+ MI_DCE8_REG_LIST(id),\
+ SRI(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, DCP, id)
+
+#define MI_DCE11_REG_LIST(id)\
+ MI_DCE11_2_REG_LIST(id),\
+ MI_DCE_PTE_REG_LIST(id)
+
+#define MI_DCE12_REG_LIST(id)\
+ MI_DCE_BASE_REG_LIST(id),\
+ MI_DCE_PTE_REG_LIST(id),\
+ SRI(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, DCP, id),\
+ SRI(DPG_PIPE_STUTTER_CONTROL2, DMIF_PG, id),\
+ SRI(DPG_PIPE_LOW_POWER_CONTROL, DMIF_PG, id),\
+ SR(DCHUB_FB_LOCATION),\
+ SR(DCHUB_AGP_BASE),\
+ SR(DCHUB_AGP_BOT),\
+ SR(DCHUB_AGP_TOP)
+
+struct dce_mem_input_registers {
+ /* DCP */
+ uint32_t GRPH_ENABLE;
+ uint32_t GRPH_CONTROL;
+ uint32_t GRPH_X_START;
+ uint32_t GRPH_Y_START;
+ uint32_t GRPH_X_END;
+ uint32_t GRPH_Y_END;
+ uint32_t GRPH_PITCH;
+ uint32_t HW_ROTATION;
+ uint32_t GRPH_SWAP_CNTL;
+ uint32_t PRESCALE_GRPH_CONTROL;
+ uint32_t GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT;
+ uint32_t DVMM_PTE_CONTROL;
+ uint32_t DVMM_PTE_ARB_CONTROL;
+ uint32_t GRPH_UPDATE;
+ uint32_t GRPH_FLIP_CONTROL;
+ uint32_t GRPH_PRIMARY_SURFACE_ADDRESS;
+ uint32_t GRPH_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t GRPH_SECONDARY_SURFACE_ADDRESS;
+ uint32_t GRPH_SECONDARY_SURFACE_ADDRESS_HIGH;
+ /* DMIF_PG */
+ uint32_t DPG_PIPE_ARBITRATION_CONTROL1;
+ uint32_t DPG_WATERMARK_MASK_CONTROL;
+ uint32_t DPG_PIPE_URGENCY_CONTROL;
+ uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
+ uint32_t DPG_PIPE_LOW_POWER_CONTROL;
+ uint32_t DPG_PIPE_STUTTER_CONTROL;
+ uint32_t DPG_PIPE_STUTTER_CONTROL2;
+ /* DCI */
+ uint32_t DMIF_BUFFER_CONTROL;
+ /* MC_HUB */
+ uint32_t MC_HUB_RDREQ_DMIF_LIMIT;
+ /*DCHUB*/
+ uint32_t DCHUB_FB_LOCATION;
+ uint32_t DCHUB_AGP_BASE;
+ uint32_t DCHUB_AGP_BOT;
+ uint32_t DCHUB_AGP_TOP;
+};
+
+/* Set_Filed_for_Block */
+#define SFB(blk_name, reg_name, field_name, post_fix)\
+ .field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
+
+#define MI_GFX8_TILE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_BANK_WIDTH, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_BANK_HEIGHT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_TILE_SPLIT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_PIPE_CONFIG, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_ARRAY_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh)
+
+#define MI_DCP_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
+ SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
+ SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
+ SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
+ SFB(blk, GRPH_Y_END, GRPH_Y_END, mask_sh),\
+ SFB(blk, GRPH_PITCH, GRPH_PITCH, mask_sh),\
+ SFB(blk, HW_ROTATION, GRPH_ROTATION_ANGLE, mask_sh),\
+ SFB(blk, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, mask_sh),\
+ SFB(blk, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_SELECT, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_R_SIGN, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_G_SIGN, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_B_SIGN, mask_sh),\
+ SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS, GRPH_SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS, GRPH_PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ SFB(blk, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING, mask_sh),\
+ SFB(blk, GRPH_UPDATE, GRPH_UPDATE_LOCK, mask_sh),\
+ SFB(blk, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN, mask_sh)
+
+#define MI_DCP_DCE11_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, mask_sh)
+
+#define MI_DCP_PTE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH, mask_sh),\
+ SFB(blk, DVMM_PTE_CONTROL, DVMM_PAGE_HEIGHT, mask_sh),\
+ SFB(blk, DVMM_PTE_CONTROL, DVMM_MIN_PTE_BEFORE_FLIP, mask_sh),\
+ SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK, mask_sh),\
+ SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING, mask_sh)
+
+#define MI_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_IGNORE_FBC, mask_sh),\
+ SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, mask_sh),\
+ SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED, mask_sh)
+
+#define MI_DMIF_PG_MASK_SH_DCE(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK, mask_sh)
+
+#define MI_DCE8_MASK_SH_LIST(mask_sh)\
+ MI_DCP_MASK_SH_LIST(mask_sh, ),\
+ MI_DMIF_PG_MASK_SH_LIST(mask_sh, ),\
+ MI_DMIF_PG_MASK_SH_DCE(mask_sh, ),\
+ MI_GFX8_TILE_MASK_SH_LIST(mask_sh, )
+
+#define MI_DCE11_2_MASK_SH_LIST(mask_sh)\
+ MI_DCE8_MASK_SH_LIST(mask_sh),\
+ MI_DCP_DCE11_MASK_SH_LIST(mask_sh, )
+
+#define MI_DCE11_MASK_SH_LIST(mask_sh)\
+ MI_DCE11_2_MASK_SH_LIST(mask_sh),\
+ MI_DCP_PTE_MASK_SH_LIST(mask_sh, )
+
+#define MI_GFX9_TILE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_CONTROL, GRPH_SW_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_SE_ENABLE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_SHADER_ENGINES, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_PIPES, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh)
+
+#define MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_WATERMARK, mask_sh)
+
+#define MI_GFX9_DCHUB_MASK_SH_LIST(mask_sh)\
+ SF(DCHUB_FB_LOCATION, FB_TOP, mask_sh),\
+ SF(DCHUB_FB_LOCATION, FB_BASE, mask_sh),\
+ SF(DCHUB_AGP_BASE, AGP_BASE, mask_sh),\
+ SF(DCHUB_AGP_BOT, AGP_BOT, mask_sh),\
+ SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh)
+
+#define MI_DCE12_MASK_SH_LIST(mask_sh)\
+ MI_DCP_MASK_SH_LIST(mask_sh, DCP0_),\
+ SF(DCP0_GRPH_SECONDARY_SURFACE_ADDRESS, GRPH_SECONDARY_DFQ_ENABLE, mask_sh),\
+ MI_DCP_DCE11_MASK_SH_LIST(mask_sh, DCP0_),\
+ MI_DCP_PTE_MASK_SH_LIST(mask_sh, DCP0_),\
+ MI_DMIF_PG_MASK_SH_LIST(mask_sh, DMIF_PG0_),\
+ MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, DMIF_PG0_),\
+ MI_GFX9_TILE_MASK_SH_LIST(mask_sh, DCP0_),\
+ MI_GFX9_DCHUB_MASK_SH_LIST(mask_sh)
+
+#define MI_REG_FIELD_LIST(type) \
+ type GRPH_ENABLE; \
+ type GRPH_X_START; \
+ type GRPH_Y_START; \
+ type GRPH_X_END; \
+ type GRPH_Y_END; \
+ type GRPH_PITCH; \
+ type GRPH_ROTATION_ANGLE; \
+ type GRPH_RED_CROSSBAR; \
+ type GRPH_BLUE_CROSSBAR; \
+ type GRPH_PRESCALE_SELECT; \
+ type GRPH_PRESCALE_R_SIGN; \
+ type GRPH_PRESCALE_G_SIGN; \
+ type GRPH_PRESCALE_B_SIGN; \
+ type GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT; \
+ type DVMM_PAGE_WIDTH; \
+ type DVMM_PAGE_HEIGHT; \
+ type DVMM_MIN_PTE_BEFORE_FLIP; \
+ type DVMM_PTE_REQ_PER_CHUNK; \
+ type DVMM_MAX_PTE_REQ_OUTSTANDING; \
+ type GRPH_DEPTH; \
+ type GRPH_FORMAT; \
+ type GRPH_NUM_BANKS; \
+ type GRPH_BANK_WIDTH;\
+ type GRPH_BANK_HEIGHT;\
+ type GRPH_MACRO_TILE_ASPECT;\
+ type GRPH_TILE_SPLIT;\
+ type GRPH_MICRO_TILE_MODE;\
+ type GRPH_PIPE_CONFIG;\
+ type GRPH_ARRAY_MODE;\
+ type GRPH_COLOR_EXPANSION_MODE;\
+ type GRPH_SW_MODE; \
+ type GRPH_SE_ENABLE; \
+ type GRPH_NUM_SHADER_ENGINES; \
+ type GRPH_NUM_PIPES; \
+ type GRPH_SECONDARY_SURFACE_ADDRESS_HIGH; \
+ type GRPH_SECONDARY_SURFACE_ADDRESS; \
+ type GRPH_SECONDARY_DFQ_ENABLE; \
+ type GRPH_PRIMARY_SURFACE_ADDRESS_HIGH; \
+ type GRPH_PRIMARY_SURFACE_ADDRESS; \
+ type GRPH_SURFACE_UPDATE_PENDING; \
+ type GRPH_SURFACE_UPDATE_H_RETRACE_EN; \
+ type GRPH_UPDATE_LOCK; \
+ type PIXEL_DURATION; \
+ type URGENCY_WATERMARK_MASK; \
+ type PSTATE_CHANGE_WATERMARK_MASK; \
+ type NB_PSTATE_CHANGE_WATERMARK_MASK; \
+ type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \
+ type URGENCY_LOW_WATERMARK; \
+ type URGENCY_HIGH_WATERMARK; \
+ type NB_PSTATE_CHANGE_ENABLE; \
+ type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \
+ type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
+ type NB_PSTATE_CHANGE_WATERMARK; \
+ type PSTATE_CHANGE_ENABLE; \
+ type PSTATE_CHANGE_URGENT_DURING_REQUEST; \
+ type PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
+ type PSTATE_CHANGE_WATERMARK; \
+ type STUTTER_ENABLE; \
+ type STUTTER_IGNORE_FBC; \
+ type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \
+ type DMIF_BUFFERS_ALLOCATED; \
+ type DMIF_BUFFERS_ALLOCATION_COMPLETED; \
+ type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\
+ type FB_BASE; \
+ type FB_TOP; \
+ type AGP_BASE; \
+ type AGP_TOP; \
+ type AGP_BOT; \
+
+struct dce_mem_input_shift {
+ MI_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_mem_input_mask {
+ MI_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_mem_input_wa {
+ uint8_t single_head_rdreq_dmif_limit;
+};
+
+struct dce_mem_input {
+ struct mem_input base;
+
+ const struct dce_mem_input_registers *regs;
+ const struct dce_mem_input_shift *shifts;
+ const struct dce_mem_input_mask *masks;
+
+ struct dce_mem_input_wa wa;
+};
+
+void dce_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask);
+
+void dce112_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask);
+
+#endif /*__DCE_MEM_INPUT_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
new file mode 100644
index 000000000000..3931412ab6d3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "basics/conversion.h"
+
+#include "dce_opp.h"
+
+#include "reg_helper.h"
+
+#define REG(reg)\
+ (opp110->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ opp110->opp_shift->field_name, opp110->opp_mask->field_name
+
+#define CTX \
+ opp110->base.ctx
+
+enum {
+ MAX_PWL_ENTRY = 128,
+ MAX_REGIONS_NUMBER = 16
+};
+
+enum {
+ MAX_LUT_ENTRY = 256,
+ MAX_NUMBER_OF_ENTRIES = 256
+};
+
+
+enum {
+ OUTPUT_CSC_MATRIX_SIZE = 12
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ *****************************************************************************
+ * Function: regamma_config_regions_and_segments
+ *
+ * build regamma curve by using predefined hw points
+ * uses interface parameters ,like EDID coeff.
+ *
+ * @param : parameters interface parameters
+ * @return void
+ *
+ * @note
+ *
+ * @see
+ *
+ *****************************************************************************
+ */
+
+
+
+/**
+ * set_truncation
+ * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
+ * 2) enable truncation
+ * 3) HW remove 12bit FMT support for DCE11 power saving reason.
+ */
+static void set_truncation(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable truncation*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 0,
+ FMT_TRUNCATE_DEPTH, 0,
+ FMT_TRUNCATE_MODE, 0);
+
+
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ /* 8bpc trunc on YCbCr422*/
+ if (params->flags.TRUNCATE_DEPTH == 1)
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH, 1,
+ FMT_TRUNCATE_MODE, 0);
+ else if (params->flags.TRUNCATE_DEPTH == 2)
+ /* 10bpc trunc on YCbCr422*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH, 2,
+ FMT_TRUNCATE_MODE, 0);
+ return;
+ }
+ /* on other format-to do */
+ if (params->flags.TRUNCATE_ENABLED == 0 ||
+ params->flags.TRUNCATE_DEPTH == 2)
+ return;
+ /*Set truncation depth and Enable truncation*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH,
+ params->flags.TRUNCATE_MODE,
+ FMT_TRUNCATE_MODE,
+ params->flags.TRUNCATE_DEPTH);
+}
+
+
+/**
+ * set_spatial_dither
+ * 1) set spatial dithering mode: pattern of seed
+ * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp
+ * 3) set random seed
+ * 4) set random mode
+ * lfsr is reset every frame or not reset
+ * RGB dithering method
+ * 0: RGB data are all dithered with x^28+x^3+1
+ * 1: R data is dithered with x^28+x^3+1
+ * G data is dithered with x^28+X^9+1
+ * B data is dithered with x^28+x^13+1
+ * enable high pass filter or not
+ * 5) enable spatical dithering
+ */
+static void set_spatial_dither(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable spatial (random) dithering*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_SPATIAL_DITHER_EN, 0,
+ FMT_SPATIAL_DITHER_DEPTH, 0,
+ FMT_SPATIAL_DITHER_MODE, 0);
+
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_HIGHPASS_RANDOM_ENABLE, 0,
+ FMT_FRAME_RANDOM_ENABLE, 0,
+ FMT_RGB_RANDOM_ENABLE, 0);
+
+ REG_UPDATE(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_EN, 0);
+
+ /* no 10bpc on DCE11*/
+ if (params->flags.SPATIAL_DITHER_ENABLED == 0 ||
+ params->flags.SPATIAL_DITHER_DEPTH == 2)
+ return;
+
+ /* only use FRAME_COUNTER_MAX if frameRandom == 1*/
+
+ if (opp110->opp_mask->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX &&
+ opp110->opp_mask->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP) {
+ if (params->flags.FRAME_RANDOM == 1) {
+ if (params->flags.SPATIAL_DITHER_DEPTH == 0 ||
+ params->flags.SPATIAL_DITHER_DEPTH == 1) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2);
+ } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1);
+ } else
+ return;
+ } else {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0);
+ }
+ }
+ /* Set seed for random values for
+ * spatial dithering for R,G,B channels
+ */
+ REG_UPDATE(FMT_DITHER_RAND_R_SEED,
+ FMT_RAND_R_SEED, params->r_seed_value);
+
+ REG_UPDATE(FMT_DITHER_RAND_G_SEED,
+ FMT_RAND_G_SEED, params->g_seed_value);
+
+ REG_UPDATE(FMT_DITHER_RAND_B_SEED,
+ FMT_RAND_B_SEED, params->b_seed_value);
+
+ /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero
+ * offset for the R/Cr channel, lower 4LSB
+ * is forced to zeros. Typically set to 0
+ * RGB and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero
+ * offset for the G/Y channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB
+ * and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero
+ * offset for the B/Cb channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB and
+ * 0x80000 YCbCr.
+ */
+
+ /* Disable High pass filter
+ * Reset only at startup
+ * Set RGB data dithered with x^28+x^3+1
+ */
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM,
+ FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM,
+ FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM);
+
+ /* Set spatial dithering bit depth
+ * Set spatial dithering mode
+ * (default is Seed patterrn AAAA...)
+ * Enable spatial dithering
+ */
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH,
+ FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE,
+ FMT_SPATIAL_DITHER_EN, 1);
+}
+
+/**
+ * SetTemporalDither (Frame Modulation)
+ * 1) set temporal dither depth
+ * 2) select pattern: from hard-coded pattern or programmable pattern
+ * 3) select optimized strips for BGR or RGB LCD sub-pixel
+ * 4) set s matrix
+ * 5) set t matrix
+ * 6) set grey level for 0.25, 0.5, 0.75
+ * 7) enable temporal dithering
+ */
+
+static void set_temporal_dither(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable temporal (frame modulation) dithering first*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_EN, 0,
+ FMT_TEMPORAL_DITHER_RESET, 0,
+ FMT_TEMPORAL_DITHER_OFFSET, 0);
+
+ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_DEPTH, 0,
+ FMT_TEMPORAL_LEVEL, 0);
+
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_25FRC_SEL, 0,
+ FMT_50FRC_SEL, 0,
+ FMT_75FRC_SEL, 0);
+
+ /* no 10bpc dither on DCE11*/
+ if (params->flags.FRAME_MODULATION_ENABLED == 0 ||
+ params->flags.FRAME_MODULATION_DEPTH == 2)
+ return;
+
+ /* Set temporal dithering depth*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_DEPTH, params->flags.FRAME_MODULATION_DEPTH,
+ FMT_TEMPORAL_DITHER_RESET, 0,
+ FMT_TEMPORAL_DITHER_OFFSET, 0);
+
+ /*Select legacy pattern based on FRC and Temporal level*/
+ if (REG(FMT_TEMPORAL_DITHER_PATTERN_CONTROL)) {
+ REG_WRITE(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, 0);
+ /*Set s matrix*/
+ REG_WRITE(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, 0);
+ /*Set t matrix*/
+ REG_WRITE(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, 0);
+ }
+
+ /*Select patterns for 0.25, 0.5 and 0.75 grey level*/
+ REG_UPDATE(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_LEVEL, params->flags.TEMPORAL_LEVEL);
+
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_25FRC_SEL, params->flags.FRC25,
+ FMT_50FRC_SEL, params->flags.FRC50,
+ FMT_75FRC_SEL, params->flags.FRC75);
+
+ /*Enable bit reduction by temporal (frame modulation) dithering*/
+ REG_UPDATE(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_EN, 1);
+}
+
+/**
+ * Set Clamping
+ * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
+ * 1 for 8 bpc
+ * 2 for 10 bpc
+ * 3 for 12 bpc
+ * 7 for programable
+ * 2) Enable clamp if Limited range requested
+ */
+void dce110_opp_set_clamping(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 0,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+
+ switch (params->clamping_level) {
+ case CLAMPING_FULL_RANGE:
+ break;
+ case CLAMPING_LIMITED_RANGE_8BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 1);
+ break;
+ case CLAMPING_LIMITED_RANGE_10BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 2);
+ break;
+ case CLAMPING_LIMITED_RANGE_12BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 3);
+ break;
+ case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
+ /*Set clamp control*/
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 7);
+
+ /*set the defaults*/
+ REG_SET_2(FMT_CLAMP_COMPONENT_R, 0,
+ FMT_CLAMP_LOWER_R, 0x10,
+ FMT_CLAMP_UPPER_R, 0xFEF);
+
+ REG_SET_2(FMT_CLAMP_COMPONENT_G, 0,
+ FMT_CLAMP_LOWER_G, 0x10,
+ FMT_CLAMP_UPPER_G, 0xFEF);
+
+ REG_SET_2(FMT_CLAMP_COMPONENT_B, 0,
+ FMT_CLAMP_LOWER_B, 0x10,
+ FMT_CLAMP_UPPER_B, 0xFEF);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * set_pixel_encoding
+ *
+ * Set Pixel Encoding
+ * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
+ * 1: YCbCr 4:2:2
+ */
+static void set_pixel_encoding(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ if (opp110->opp_mask->FMT_CBCR_BIT_REDUCTION_BYPASS)
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_SUBSAMPLING_MODE, 0,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
+ else
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_SUBSAMPLING_MODE, 0);
+
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 1,
+ FMT_SUBSAMPLING_ORDER, 0);
+ }
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 2,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 1);
+ }
+
+}
+
+void dce110_opp_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ set_truncation(opp110, params);
+ set_spatial_dither(opp110, params);
+ set_temporal_dither(opp110, params);
+}
+
+void dce110_opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ dce110_opp_set_clamping(opp110, params);
+ set_pixel_encoding(opp110, params);
+}
+
+static void program_formatter_420_memory(struct output_pixel_processor *opp)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+ uint32_t fmt_mem_cntl_value;
+
+ /* Program source select*/
+ /* Use HW default source select for FMT_MEMORYx_CONTROL */
+ /* Use that value for FMT_SRC_SELECT as well*/
+ REG_GET(CONTROL,
+ FMT420_MEM0_SOURCE_SEL, &fmt_mem_cntl_value);
+
+ REG_UPDATE(FMT_CONTROL,
+ FMT_SRC_SELECT, fmt_mem_cntl_value);
+
+ /* Turn on the memory */
+ REG_UPDATE(CONTROL,
+ FMT420_MEM0_PWR_FORCE, 0);
+}
+
+void dce110_opp_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 0,
+ FMT_DYNAMIC_EXP_MODE, 0);
+
+ /*00 - 10-bit -> 12-bit dynamic expansion*/
+ /*01 - 8-bit -> 12-bit dynamic expansion*/
+ if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ switch (color_dpth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 1);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE_2(
+ FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void program_formatter_reset_dig_resync_fifo(struct output_pixel_processor *opp)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ /* clear previous phase lock status*/
+ REG_UPDATE(FMT_CONTROL,
+ FMT_420_PIXEL_PHASE_LOCKED_CLEAR, 1);
+
+ /* poll until FMT_420_PIXEL_PHASE_LOCKED become 1*/
+ REG_WAIT(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, 1, 10, 10);
+
+}
+
+void dce110_opp_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ /* dithering is affected by <CrtcSourceSelect>, hence should be
+ * programmed afterwards */
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ program_formatter_420_memory(opp);
+
+ dce110_opp_program_bit_depth_reduction(
+ opp,
+ fmt_bit_depth);
+
+ dce110_opp_program_clamping_and_pixel_encoding(
+ opp,
+ clamping);
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ program_formatter_reset_dig_resync_fifo(opp);
+
+ return;
+}
+
+
+
+
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static const struct opp_funcs funcs = {
+ .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
+ .opp_destroy = dce110_opp_destroy,
+ .opp_program_fmt = dce110_opp_program_fmt,
+ .opp_program_bit_depth_reduction = dce110_opp_program_bit_depth_reduction
+};
+
+void dce110_opp_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_opp_registers *regs,
+ const struct dce_opp_shift *opp_shift,
+ const struct dce_opp_mask *opp_mask)
+{
+ opp110->base.funcs = &funcs;
+
+ opp110->base.ctx = ctx;
+
+ opp110->base.inst = inst;
+
+ opp110->regs = regs;
+ opp110->opp_shift = opp_shift;
+ opp110->opp_mask = opp_mask;
+}
+
+void dce110_opp_destroy(struct output_pixel_processor **opp)
+{
+ if (*opp)
+ kfree(FROM_DCE11_OPP(*opp));
+ *opp = NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
new file mode 100644
index 000000000000..2ab0147cbd9d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -0,0 +1,310 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCE_H__
+#define __DC_OPP_DCE_H__
+
+#include "dc_types.h"
+#include "opp.h"
+#include "core_types.h"
+
+#define FROM_DCE11_OPP(opp)\
+ container_of(opp, struct dce110_opp, base)
+
+enum dce110_opp_reg_type {
+ DCE110_OPP_REG_DCP = 0,
+ DCE110_OPP_REG_DCFE,
+ DCE110_OPP_REG_FMT,
+
+ DCE110_OPP_REG_MAX
+};
+
+#define OPP_COMMON_REG_LIST_BASE(id) \
+ SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \
+ SRI(FMT_CONTROL, FMT, id), \
+ SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI(FMT_CLAMP_CNTL, FMT, id), \
+ SRI(FMT_CLAMP_COMPONENT_R, FMT, id), \
+ SRI(FMT_CLAMP_COMPONENT_G, FMT, id), \
+ SRI(FMT_CLAMP_COMPONENT_B, FMT, id)
+
+#define OPP_DCE_80_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id)
+
+#define OPP_DCE_100_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id)
+
+#define OPP_DCE_110_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id)
+
+#define OPP_DCE_112_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id), \
+ SRI(CONTROL, FMT_MEMORY, id)
+
+#define OPP_DCE_120_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(CONTROL, FMT_MEMORY, id)
+
+#define OPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh),\
+ OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_LEVEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_50FRC_SEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_75FRC_SEL, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SRC_SELECT, mask_sh),\
+ OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh),\
+ OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_R, FMT_CLAMP_LOWER_R, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_R, FMT_CLAMP_UPPER_R, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_G, FMT_CLAMP_LOWER_G, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_G, FMT_CLAMP_UPPER_G, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_B, FMT_CLAMP_LOWER_B, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_B, FMT_CLAMP_UPPER_B, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SUBSAMPLING_ORDER, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_110(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_100(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_112(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_SOURCE_SEL, mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_PWR_FORCE, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED_CLEAR, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_80(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_120(mask_sh)\
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_LEVEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_50FRC_SEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_75FRC_SEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh),\
+ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
+ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
+ OPP_SF(FMT0_FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_SOURCE_SEL, mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_PWR_FORCE, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SRC_SELECT, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED_CLEAR, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_R, FMT_CLAMP_LOWER_R, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_R, FMT_CLAMP_UPPER_R, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_G, FMT_CLAMP_LOWER_G, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_G, FMT_CLAMP_UPPER_G, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_B, FMT_CLAMP_LOWER_B, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_B, FMT_CLAMP_UPPER_B, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_ORDER, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh)
+
+#define OPP_REG_FIELD_LIST(type) \
+ type FMT_DYNAMIC_EXP_EN; \
+ type FMT_DYNAMIC_EXP_MODE; \
+ type FMT_TRUNCATE_EN; \
+ type FMT_TRUNCATE_DEPTH; \
+ type FMT_TRUNCATE_MODE; \
+ type FMT_SPATIAL_DITHER_EN; \
+ type FMT_SPATIAL_DITHER_DEPTH; \
+ type FMT_SPATIAL_DITHER_MODE; \
+ type FMT_TEMPORAL_DITHER_EN; \
+ type FMT_TEMPORAL_DITHER_RESET; \
+ type FMT_TEMPORAL_DITHER_OFFSET; \
+ type FMT_TEMPORAL_DITHER_DEPTH; \
+ type FMT_TEMPORAL_LEVEL; \
+ type FMT_25FRC_SEL; \
+ type FMT_50FRC_SEL; \
+ type FMT_75FRC_SEL; \
+ type FMT_HIGHPASS_RANDOM_ENABLE; \
+ type FMT_FRAME_RANDOM_ENABLE; \
+ type FMT_RGB_RANDOM_ENABLE; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP; \
+ type FMT_STEREOSYNC_OVERRIDE; \
+ type FMT_RAND_R_SEED; \
+ type FMT_RAND_G_SEED; \
+ type FMT_RAND_B_SEED; \
+ type FMT420_MEM0_SOURCE_SEL; \
+ type FMT420_MEM0_PWR_FORCE; \
+ type FMT_SRC_SELECT; \
+ type FMT_420_PIXEL_PHASE_LOCKED_CLEAR; \
+ type FMT_420_PIXEL_PHASE_LOCKED; \
+ type FMT_CLAMP_DATA_EN; \
+ type FMT_CLAMP_COLOR_FORMAT; \
+ type FMT_CLAMP_LOWER_R; \
+ type FMT_CLAMP_UPPER_R; \
+ type FMT_CLAMP_LOWER_G; \
+ type FMT_CLAMP_UPPER_G; \
+ type FMT_CLAMP_LOWER_B; \
+ type FMT_CLAMP_UPPER_B; \
+ type FMT_PIXEL_ENCODING; \
+ type FMT_SUBSAMPLING_ORDER; \
+ type FMT_SUBSAMPLING_MODE; \
+ type FMT_CBCR_BIT_REDUCTION_BYPASS;\
+
+struct dce_opp_shift {
+ OPP_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_opp_mask {
+ OPP_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_opp_registers {
+ uint32_t FMT_DYNAMIC_EXP_CNTL;
+ uint32_t FMT_BIT_DEPTH_CONTROL;
+ uint32_t FMT_CONTROL;
+ uint32_t FMT_DITHER_RAND_R_SEED;
+ uint32_t FMT_DITHER_RAND_G_SEED;
+ uint32_t FMT_DITHER_RAND_B_SEED;
+ uint32_t FMT_TEMPORAL_DITHER_PATTERN_CONTROL;
+ uint32_t FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX;
+ uint32_t FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX;
+ uint32_t CONTROL;
+ uint32_t FMT_CLAMP_CNTL;
+ uint32_t FMT_CLAMP_COMPONENT_R;
+ uint32_t FMT_CLAMP_COMPONENT_G;
+ uint32_t FMT_CLAMP_COMPONENT_B;
+};
+
+/* OPP RELATED */
+#define TO_DCE110_OPP(opp)\
+ container_of(opp, struct dce110_opp, base)
+
+struct dce110_opp {
+ struct output_pixel_processor base;
+ const struct dce_opp_registers *regs;
+ const struct dce_opp_shift *opp_shift;
+ const struct dce_opp_mask *opp_mask;
+};
+
+void dce110_opp_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_opp_registers *regs,
+ const struct dce_opp_shift *opp_shift,
+ const struct dce_opp_mask *opp_mask);
+
+void dce110_opp_destroy(struct output_pixel_processor **opp);
+
+
+
+/* FORMATTER RELATED */
+void dce110_opp_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params);
+
+void dce110_opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params);
+
+void dce110_opp_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal);
+
+void dce110_opp_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+void dce110_opp_set_clamping(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
new file mode 100644
index 000000000000..6243450b41b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -0,0 +1,1119 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "transform.h"
+
+static const uint16_t filter_2tap_16p[18] = {
+ 4096, 0,
+ 3840, 256,
+ 3584, 512,
+ 3328, 768,
+ 3072, 1024,
+ 2816, 1280,
+ 2560, 1536,
+ 2304, 1792,
+ 2048, 2048
+};
+
+static const uint16_t filter_3tap_16p_upscale[27] = {
+ 2048, 2048, 0,
+ 1708, 2424, 16348,
+ 1372, 2796, 16308,
+ 1056, 3148, 16272,
+ 768, 3464, 16244,
+ 512, 3728, 16236,
+ 296, 3928, 16252,
+ 124, 4052, 16296,
+ 0, 4096, 0
+};
+
+static const uint16_t filter_3tap_16p_117[27] = {
+ 2048, 2048, 0,
+ 1824, 2276, 16376,
+ 1600, 2496, 16380,
+ 1376, 2700, 16,
+ 1156, 2880, 52,
+ 948, 3032, 108,
+ 756, 3144, 192,
+ 580, 3212, 296,
+ 428, 3236, 428
+};
+
+static const uint16_t filter_3tap_16p_150[27] = {
+ 2048, 2048, 0,
+ 1872, 2184, 36,
+ 1692, 2308, 88,
+ 1516, 2420, 156,
+ 1340, 2516, 236,
+ 1168, 2592, 328,
+ 1004, 2648, 440,
+ 844, 2684, 560,
+ 696, 2696, 696
+};
+
+static const uint16_t filter_3tap_16p_183[27] = {
+ 2048, 2048, 0,
+ 1892, 2104, 92,
+ 1744, 2152, 196,
+ 1592, 2196, 300,
+ 1448, 2232, 412,
+ 1304, 2256, 528,
+ 1168, 2276, 648,
+ 1032, 2288, 772,
+ 900, 2292, 900
+};
+
+static const uint16_t filter_4tap_16p_upscale[36] = {
+ 0, 4096, 0, 0,
+ 16240, 4056, 180, 16380,
+ 16136, 3952, 404, 16364,
+ 16072, 3780, 664, 16344,
+ 16040, 3556, 952, 16312,
+ 16036, 3284, 1268, 16272,
+ 16052, 2980, 1604, 16224,
+ 16084, 2648, 1952, 16176,
+ 16128, 2304, 2304, 16128
+};
+
+static const uint16_t filter_4tap_16p_117[36] = {
+ 428, 3236, 428, 0,
+ 276, 3232, 604, 16364,
+ 148, 3184, 800, 16340,
+ 44, 3104, 1016, 16312,
+ 16344, 2984, 1244, 16284,
+ 16284, 2832, 1488, 16256,
+ 16244, 2648, 1732, 16236,
+ 16220, 2440, 1976, 16220,
+ 16212, 2216, 2216, 16212
+};
+
+static const uint16_t filter_4tap_16p_150[36] = {
+ 696, 2700, 696, 0,
+ 560, 2700, 848, 16364,
+ 436, 2676, 1008, 16348,
+ 328, 2628, 1180, 16336,
+ 232, 2556, 1356, 16328,
+ 152, 2460, 1536, 16328,
+ 84, 2344, 1716, 16332,
+ 28, 2208, 1888, 16348,
+ 16376, 2052, 2052, 16376
+};
+
+static const uint16_t filter_4tap_16p_183[36] = {
+ 940, 2208, 940, 0,
+ 832, 2200, 1052, 4,
+ 728, 2180, 1164, 16,
+ 628, 2148, 1280, 36,
+ 536, 2100, 1392, 60,
+ 448, 2044, 1504, 92,
+ 368, 1976, 1612, 132,
+ 296, 1900, 1716, 176,
+ 232, 1812, 1812, 232
+};
+
+static const uint16_t filter_2tap_64p[66] = {
+ 4096, 0,
+ 4032, 64,
+ 3968, 128,
+ 3904, 192,
+ 3840, 256,
+ 3776, 320,
+ 3712, 384,
+ 3648, 448,
+ 3584, 512,
+ 3520, 576,
+ 3456, 640,
+ 3392, 704,
+ 3328, 768,
+ 3264, 832,
+ 3200, 896,
+ 3136, 960,
+ 3072, 1024,
+ 3008, 1088,
+ 2944, 1152,
+ 2880, 1216,
+ 2816, 1280,
+ 2752, 1344,
+ 2688, 1408,
+ 2624, 1472,
+ 2560, 1536,
+ 2496, 1600,
+ 2432, 1664,
+ 2368, 1728,
+ 2304, 1792,
+ 2240, 1856,
+ 2176, 1920,
+ 2112, 1984,
+ 2048, 2048 };
+
+static const uint16_t filter_3tap_64p_upscale[99] = {
+ 2048, 2048, 0,
+ 1960, 2140, 16376,
+ 1876, 2236, 16364,
+ 1792, 2328, 16356,
+ 1708, 2424, 16348,
+ 1620, 2516, 16336,
+ 1540, 2612, 16328,
+ 1456, 2704, 16316,
+ 1372, 2796, 16308,
+ 1292, 2884, 16296,
+ 1212, 2976, 16288,
+ 1136, 3060, 16280,
+ 1056, 3148, 16272,
+ 984, 3228, 16264,
+ 908, 3312, 16256,
+ 836, 3388, 16248,
+ 768, 3464, 16244,
+ 700, 3536, 16240,
+ 636, 3604, 16236,
+ 572, 3668, 16236,
+ 512, 3728, 16236,
+ 456, 3784, 16236,
+ 400, 3836, 16240,
+ 348, 3884, 16244,
+ 296, 3928, 16252,
+ 252, 3964, 16260,
+ 204, 4000, 16268,
+ 164, 4028, 16284,
+ 124, 4052, 16296,
+ 88, 4072, 16316,
+ 56, 4084, 16336,
+ 24, 4092, 16356,
+ 0, 4096, 0
+};
+
+static const uint16_t filter_3tap_64p_117[99] = {
+ 2048, 2048, 0,
+ 1992, 2104, 16380,
+ 1936, 2160, 16380,
+ 1880, 2220, 16376,
+ 1824, 2276, 16376,
+ 1768, 2332, 16376,
+ 1712, 2388, 16376,
+ 1656, 2444, 16376,
+ 1600, 2496, 16380,
+ 1544, 2548, 0,
+ 1488, 2600, 4,
+ 1432, 2652, 8,
+ 1376, 2700, 16,
+ 1320, 2748, 20,
+ 1264, 2796, 32,
+ 1212, 2840, 40,
+ 1156, 2880, 52,
+ 1104, 2920, 64,
+ 1052, 2960, 80,
+ 1000, 2996, 92,
+ 948, 3032, 108,
+ 900, 3060, 128,
+ 852, 3092, 148,
+ 804, 3120, 168,
+ 756, 3144, 192,
+ 712, 3164, 216,
+ 668, 3184, 240,
+ 624, 3200, 268,
+ 580, 3212, 296,
+ 540, 3220, 328,
+ 500, 3228, 360,
+ 464, 3232, 392,
+ 428, 3236, 428
+};
+
+static const uint16_t filter_3tap_64p_150[99] = {
+ 2048, 2048, 0,
+ 2004, 2080, 8,
+ 1960, 2116, 16,
+ 1916, 2148, 28,
+ 1872, 2184, 36,
+ 1824, 2216, 48,
+ 1780, 2248, 60,
+ 1736, 2280, 76,
+ 1692, 2308, 88,
+ 1648, 2336, 104,
+ 1604, 2368, 120,
+ 1560, 2392, 136,
+ 1516, 2420, 156,
+ 1472, 2444, 172,
+ 1428, 2472, 192,
+ 1384, 2492, 212,
+ 1340, 2516, 236,
+ 1296, 2536, 256,
+ 1252, 2556, 280,
+ 1212, 2576, 304,
+ 1168, 2592, 328,
+ 1124, 2608, 356,
+ 1084, 2624, 384,
+ 1044, 2636, 412,
+ 1004, 2648, 440,
+ 964, 2660, 468,
+ 924, 2668, 500,
+ 884, 2676, 528,
+ 844, 2684, 560,
+ 808, 2688, 596,
+ 768, 2692, 628,
+ 732, 2696, 664,
+ 696, 2696, 696
+};
+
+static const uint16_t filter_3tap_64p_183[99] = {
+ 2048, 2048, 0,
+ 2008, 2060, 20,
+ 1968, 2076, 44,
+ 1932, 2088, 68,
+ 1892, 2104, 92,
+ 1856, 2116, 120,
+ 1816, 2128, 144,
+ 1780, 2140, 168,
+ 1744, 2152, 196,
+ 1704, 2164, 220,
+ 1668, 2176, 248,
+ 1632, 2188, 272,
+ 1592, 2196, 300,
+ 1556, 2204, 328,
+ 1520, 2216, 356,
+ 1484, 2224, 384,
+ 1448, 2232, 412,
+ 1412, 2240, 440,
+ 1376, 2244, 468,
+ 1340, 2252, 496,
+ 1304, 2256, 528,
+ 1272, 2264, 556,
+ 1236, 2268, 584,
+ 1200, 2272, 616,
+ 1168, 2276, 648,
+ 1132, 2280, 676,
+ 1100, 2284, 708,
+ 1064, 2288, 740,
+ 1032, 2288, 772,
+ 996, 2292, 800,
+ 964, 2292, 832,
+ 932, 2292, 868,
+ 900, 2292, 900
+};
+
+static const uint16_t filter_4tap_64p_upscale[132] = {
+ 0, 4096, 0, 0,
+ 16344, 4092, 40, 0,
+ 16308, 4084, 84, 16380,
+ 16272, 4072, 132, 16380,
+ 16240, 4056, 180, 16380,
+ 16212, 4036, 232, 16376,
+ 16184, 4012, 288, 16372,
+ 16160, 3984, 344, 16368,
+ 16136, 3952, 404, 16364,
+ 16116, 3916, 464, 16360,
+ 16100, 3872, 528, 16356,
+ 16084, 3828, 596, 16348,
+ 16072, 3780, 664, 16344,
+ 16060, 3728, 732, 16336,
+ 16052, 3676, 804, 16328,
+ 16044, 3616, 876, 16320,
+ 16040, 3556, 952, 16312,
+ 16036, 3492, 1028, 16300,
+ 16032, 3424, 1108, 16292,
+ 16032, 3356, 1188, 16280,
+ 16036, 3284, 1268, 16272,
+ 16036, 3212, 1352, 16260,
+ 16040, 3136, 1436, 16248,
+ 16044, 3056, 1520, 16236,
+ 16052, 2980, 1604, 16224,
+ 16060, 2896, 1688, 16212,
+ 16064, 2816, 1776, 16200,
+ 16076, 2732, 1864, 16188,
+ 16084, 2648, 1952, 16176,
+ 16092, 2564, 2040, 16164,
+ 16104, 2476, 2128, 16152,
+ 16116, 2388, 2216, 16140,
+ 16128, 2304, 2304, 16128 };
+
+static const uint16_t filter_4tap_64p_117[132] = {
+ 420, 3248, 420, 0,
+ 380, 3248, 464, 16380,
+ 344, 3248, 508, 16372,
+ 308, 3248, 552, 16368,
+ 272, 3240, 596, 16364,
+ 236, 3236, 644, 16356,
+ 204, 3224, 692, 16352,
+ 172, 3212, 744, 16344,
+ 144, 3196, 796, 16340,
+ 116, 3180, 848, 16332,
+ 88, 3160, 900, 16324,
+ 60, 3136, 956, 16320,
+ 36, 3112, 1012, 16312,
+ 16, 3084, 1068, 16304,
+ 16380, 3056, 1124, 16296,
+ 16360, 3024, 1184, 16292,
+ 16340, 2992, 1244, 16284,
+ 16324, 2956, 1304, 16276,
+ 16308, 2920, 1364, 16268,
+ 16292, 2880, 1424, 16264,
+ 16280, 2836, 1484, 16256,
+ 16268, 2792, 1548, 16252,
+ 16256, 2748, 1608, 16244,
+ 16248, 2700, 1668, 16240,
+ 16240, 2652, 1732, 16232,
+ 16232, 2604, 1792, 16228,
+ 16228, 2552, 1856, 16224,
+ 16220, 2500, 1916, 16220,
+ 16216, 2444, 1980, 16216,
+ 16216, 2388, 2040, 16216,
+ 16212, 2332, 2100, 16212,
+ 16212, 2276, 2160, 16212,
+ 16212, 2220, 2220, 16212 };
+
+static const uint16_t filter_4tap_64p_150[132] = {
+ 696, 2700, 696, 0,
+ 660, 2704, 732, 16380,
+ 628, 2704, 768, 16376,
+ 596, 2704, 804, 16372,
+ 564, 2700, 844, 16364,
+ 532, 2696, 884, 16360,
+ 500, 2692, 924, 16356,
+ 472, 2684, 964, 16352,
+ 440, 2676, 1004, 16352,
+ 412, 2668, 1044, 16348,
+ 384, 2656, 1088, 16344,
+ 360, 2644, 1128, 16340,
+ 332, 2632, 1172, 16336,
+ 308, 2616, 1216, 16336,
+ 284, 2600, 1260, 16332,
+ 260, 2580, 1304, 16332,
+ 236, 2560, 1348, 16328,
+ 216, 2540, 1392, 16328,
+ 196, 2516, 1436, 16328,
+ 176, 2492, 1480, 16324,
+ 156, 2468, 1524, 16324,
+ 136, 2440, 1568, 16328,
+ 120, 2412, 1612, 16328,
+ 104, 2384, 1656, 16328,
+ 88, 2352, 1700, 16332,
+ 72, 2324, 1744, 16332,
+ 60, 2288, 1788, 16336,
+ 48, 2256, 1828, 16340,
+ 36, 2220, 1872, 16344,
+ 24, 2184, 1912, 16352,
+ 12, 2148, 1952, 16356,
+ 4, 2112, 1996, 16364,
+ 16380, 2072, 2036, 16372 };
+
+static const uint16_t filter_4tap_64p_183[132] = {
+ 944, 2204, 944, 0,
+ 916, 2204, 972, 0,
+ 888, 2200, 996, 0,
+ 860, 2200, 1024, 4,
+ 832, 2196, 1052, 4,
+ 808, 2192, 1080, 8,
+ 780, 2188, 1108, 12,
+ 756, 2180, 1140, 12,
+ 728, 2176, 1168, 16,
+ 704, 2168, 1196, 20,
+ 680, 2160, 1224, 24,
+ 656, 2152, 1252, 28,
+ 632, 2144, 1280, 36,
+ 608, 2132, 1308, 40,
+ 584, 2120, 1336, 48,
+ 560, 2112, 1364, 52,
+ 536, 2096, 1392, 60,
+ 516, 2084, 1420, 68,
+ 492, 2072, 1448, 76,
+ 472, 2056, 1476, 84,
+ 452, 2040, 1504, 92,
+ 428, 2024, 1532, 100,
+ 408, 2008, 1560, 112,
+ 392, 1992, 1584, 120,
+ 372, 1972, 1612, 132,
+ 352, 1956, 1636, 144,
+ 336, 1936, 1664, 156,
+ 316, 1916, 1688, 168,
+ 300, 1896, 1712, 180,
+ 284, 1876, 1736, 192,
+ 268, 1852, 1760, 208,
+ 252, 1832, 1784, 220,
+ 236, 1808, 1808, 236 };
+
+static const uint16_t filter_5tap_64p_upscale[165] = {
+ 15936, 2496, 2496, 15936, 0,
+ 15948, 2404, 2580, 15924, 0,
+ 15960, 2312, 2664, 15912, 4,
+ 15976, 2220, 2748, 15904, 8,
+ 15992, 2128, 2832, 15896, 12,
+ 16004, 2036, 2912, 15888, 16,
+ 16020, 1944, 2992, 15880, 20,
+ 16036, 1852, 3068, 15876, 20,
+ 16056, 1760, 3140, 15876, 24,
+ 16072, 1668, 3216, 15872, 28,
+ 16088, 1580, 3284, 15872, 32,
+ 16104, 1492, 3352, 15876, 32,
+ 16120, 1404, 3420, 15876, 36,
+ 16140, 1316, 3480, 15884, 40,
+ 16156, 1228, 3540, 15892, 40,
+ 16172, 1144, 3600, 15900, 40,
+ 16188, 1060, 3652, 15908, 44,
+ 16204, 980, 3704, 15924, 44,
+ 16220, 900, 3756, 15936, 44,
+ 16236, 824, 3800, 15956, 44,
+ 16248, 744, 3844, 15972, 44,
+ 16264, 672, 3884, 15996, 44,
+ 16276, 600, 3920, 16020, 44,
+ 16292, 528, 3952, 16044, 40,
+ 16304, 460, 3980, 16072, 40,
+ 16316, 396, 4008, 16104, 36,
+ 16328, 332, 4032, 16136, 32,
+ 16336, 272, 4048, 16172, 28,
+ 16348, 212, 4064, 16208, 24,
+ 16356, 156, 4080, 16248, 16,
+ 16368, 100, 4088, 16292, 12,
+ 16376, 48, 4092, 16336, 4,
+ 0, 0, 4096, 0, 0 };
+
+static const uint16_t filter_5tap_64p_117[165] = {
+ 16056, 2372, 2372, 16056, 0,
+ 16052, 2312, 2432, 16060, 0,
+ 16052, 2252, 2488, 16064, 0,
+ 16052, 2188, 2548, 16072, 0,
+ 16052, 2124, 2600, 16076, 0,
+ 16052, 2064, 2656, 16088, 0,
+ 16052, 2000, 2708, 16096, 0,
+ 16056, 1932, 2760, 16108, 0,
+ 16060, 1868, 2808, 16120, 0,
+ 16064, 1804, 2856, 16132, 0,
+ 16068, 1740, 2904, 16148, 16380,
+ 16076, 1676, 2948, 16164, 16380,
+ 16080, 1612, 2992, 16180, 16376,
+ 16088, 1544, 3032, 16200, 16372,
+ 16096, 1480, 3072, 16220, 16372,
+ 16104, 1420, 3108, 16244, 16368,
+ 16112, 1356, 3144, 16268, 16364,
+ 16120, 1292, 3180, 16292, 16360,
+ 16128, 1232, 3212, 16320, 16356,
+ 16136, 1168, 3240, 16344, 16352,
+ 16144, 1108, 3268, 16376, 16344,
+ 16156, 1048, 3292, 20, 16340,
+ 16164, 988, 3316, 52, 16332,
+ 16172, 932, 3336, 88, 16328,
+ 16184, 872, 3356, 124, 16320,
+ 16192, 816, 3372, 160, 16316,
+ 16204, 760, 3388, 196, 16308,
+ 16212, 708, 3400, 236, 16300,
+ 16220, 656, 3412, 276, 16292,
+ 16232, 604, 3420, 320, 16284,
+ 16240, 552, 3424, 364, 16276,
+ 16248, 504, 3428, 408, 16268,
+ 16256, 456, 3428, 456, 16256 };
+
+static const uint16_t filter_5tap_64p_150[165] = {
+ 16368, 2064, 2064, 16368, 0,
+ 16352, 2028, 2100, 16380, 16380,
+ 16340, 1996, 2132, 12, 16376,
+ 16328, 1960, 2168, 24, 16376,
+ 16316, 1924, 2204, 44, 16372,
+ 16308, 1888, 2236, 60, 16368,
+ 16296, 1848, 2268, 76, 16364,
+ 16288, 1812, 2300, 96, 16360,
+ 16280, 1772, 2328, 116, 16356,
+ 16272, 1736, 2360, 136, 16352,
+ 16268, 1696, 2388, 160, 16348,
+ 16260, 1656, 2416, 180, 16344,
+ 16256, 1616, 2440, 204, 16340,
+ 16248, 1576, 2464, 228, 16336,
+ 16244, 1536, 2492, 252, 16332,
+ 16240, 1496, 2512, 276, 16324,
+ 16240, 1456, 2536, 304, 16320,
+ 16236, 1416, 2556, 332, 16316,
+ 16232, 1376, 2576, 360, 16312,
+ 16232, 1336, 2592, 388, 16308,
+ 16232, 1296, 2612, 416, 16300,
+ 16232, 1256, 2628, 448, 16296,
+ 16232, 1216, 2640, 480, 16292,
+ 16232, 1172, 2652, 512, 16288,
+ 16232, 1132, 2664, 544, 16284,
+ 16232, 1092, 2676, 576, 16280,
+ 16236, 1056, 2684, 608, 16272,
+ 16236, 1016, 2692, 644, 16268,
+ 16240, 976, 2700, 680, 16264,
+ 16240, 936, 2704, 712, 16260,
+ 16244, 900, 2708, 748, 16256,
+ 16248, 860, 2708, 788, 16252,
+ 16248, 824, 2708, 824, 16248 };
+
+static const uint16_t filter_5tap_64p_183[165] = {
+ 228, 1816, 1816, 228, 0,
+ 216, 1792, 1836, 248, 16380,
+ 200, 1772, 1860, 264, 16376,
+ 184, 1748, 1884, 280, 16376,
+ 168, 1728, 1904, 300, 16372,
+ 156, 1704, 1928, 316, 16368,
+ 144, 1680, 1948, 336, 16364,
+ 128, 1656, 1968, 356, 16364,
+ 116, 1632, 1988, 376, 16360,
+ 104, 1604, 2008, 396, 16356,
+ 96, 1580, 2024, 416, 16356,
+ 84, 1556, 2044, 440, 16352,
+ 72, 1528, 2060, 460, 16348,
+ 64, 1504, 2076, 484, 16348,
+ 52, 1476, 2092, 504, 16344,
+ 44, 1448, 2104, 528, 16344,
+ 36, 1424, 2120, 552, 16340,
+ 28, 1396, 2132, 576, 16340,
+ 20, 1368, 2144, 600, 16340,
+ 12, 1340, 2156, 624, 16336,
+ 4, 1312, 2168, 652, 16336,
+ 0, 1284, 2180, 676, 16336,
+ 16376, 1256, 2188, 700, 16332,
+ 16372, 1228, 2196, 728, 16332,
+ 16368, 1200, 2204, 752, 16332,
+ 16364, 1172, 2212, 780, 16332,
+ 16356, 1144, 2216, 808, 16332,
+ 16352, 1116, 2220, 836, 16332,
+ 16352, 1084, 2224, 860, 16332,
+ 16348, 1056, 2228, 888, 16336,
+ 16344, 1028, 2232, 916, 16336,
+ 16340, 1000, 2232, 944, 16336,
+ 16340, 972, 2232, 972, 16340 };
+
+static const uint16_t filter_6tap_64p_upscale[198] = {
+ 0, 0, 4092, 0, 0, 0,
+ 12, 16332, 4092, 52, 16368, 0,
+ 24, 16280, 4088, 108, 16356, 0,
+ 36, 16236, 4080, 168, 16340, 0,
+ 44, 16188, 4064, 228, 16324, 0,
+ 56, 16148, 4052, 292, 16308, 0,
+ 64, 16108, 4032, 356, 16292, 4,
+ 72, 16072, 4008, 424, 16276, 4,
+ 80, 16036, 3980, 492, 16256, 4,
+ 88, 16004, 3952, 564, 16240, 8,
+ 96, 15972, 3920, 636, 16220, 8,
+ 100, 15944, 3884, 712, 16204, 12,
+ 108, 15916, 3844, 788, 16184, 16,
+ 112, 15896, 3800, 864, 16164, 20,
+ 116, 15872, 3756, 944, 16144, 20,
+ 120, 15852, 3708, 1024, 16124, 24,
+ 120, 15836, 3656, 1108, 16104, 28,
+ 124, 15824, 3600, 1192, 16084, 32,
+ 124, 15808, 3544, 1276, 16064, 36,
+ 124, 15800, 3484, 1360, 16044, 40,
+ 128, 15792, 3420, 1448, 16024, 44,
+ 128, 15784, 3352, 1536, 16004, 48,
+ 124, 15780, 3288, 1624, 15988, 52,
+ 124, 15776, 3216, 1712, 15968, 56,
+ 124, 15776, 3144, 1800, 15948, 64,
+ 120, 15776, 3068, 1888, 15932, 68,
+ 120, 15780, 2992, 1976, 15912, 72,
+ 116, 15784, 2916, 2064, 15896, 76,
+ 112, 15792, 2836, 2152, 15880, 80,
+ 108, 15796, 2752, 2244, 15868, 84,
+ 104, 15804, 2672, 2328, 15852, 88,
+ 104, 15816, 2588, 2416, 15840, 92,
+ 100, 15828, 2504, 2504, 15828, 100 };
+
+static const uint16_t filter_6tap_64p_117[198] = {
+ 16168, 476, 3568, 476, 16168, 0,
+ 16180, 428, 3564, 528, 16156, 0,
+ 16192, 376, 3556, 584, 16144, 4,
+ 16204, 328, 3548, 636, 16128, 4,
+ 16216, 280, 3540, 692, 16116, 8,
+ 16228, 232, 3524, 748, 16104, 12,
+ 16240, 188, 3512, 808, 16092, 12,
+ 16252, 148, 3492, 864, 16080, 16,
+ 16264, 104, 3472, 924, 16068, 16,
+ 16276, 64, 3452, 984, 16056, 20,
+ 16284, 28, 3428, 1044, 16048, 24,
+ 16296, 16376, 3400, 1108, 16036, 24,
+ 16304, 16340, 3372, 1168, 16024, 28,
+ 16316, 16304, 3340, 1232, 16016, 32,
+ 16324, 16272, 3308, 1296, 16004, 32,
+ 16332, 16244, 3272, 1360, 15996, 36,
+ 16344, 16212, 3236, 1424, 15988, 36,
+ 16352, 16188, 3200, 1488, 15980, 40,
+ 16360, 16160, 3160, 1552, 15972, 40,
+ 16368, 16136, 3116, 1616, 15964, 40,
+ 16372, 16112, 3072, 1680, 15956, 44,
+ 16380, 16092, 3028, 1744, 15952, 44,
+ 0, 16072, 2980, 1808, 15948, 44,
+ 8, 16052, 2932, 1872, 15944, 48,
+ 12, 16036, 2880, 1936, 15940, 48,
+ 16, 16020, 2828, 2000, 15936, 48,
+ 20, 16008, 2776, 2064, 15936, 48,
+ 24, 15996, 2724, 2128, 15936, 48,
+ 28, 15984, 2668, 2192, 15936, 48,
+ 32, 15972, 2612, 2252, 15940, 44,
+ 36, 15964, 2552, 2316, 15940, 44,
+ 40, 15956, 2496, 2376, 15944, 44,
+ 40, 15952, 2436, 2436, 15952, 40 };
+
+static const uint16_t filter_6tap_64p_150[198] = {
+ 16148, 920, 2724, 920, 16148, 0,
+ 16152, 880, 2724, 956, 16148, 0,
+ 16152, 844, 2720, 996, 16144, 0,
+ 16156, 804, 2716, 1032, 16144, 0,
+ 16156, 768, 2712, 1072, 16144, 0,
+ 16160, 732, 2708, 1112, 16144, 16380,
+ 16164, 696, 2700, 1152, 16144, 16380,
+ 16168, 660, 2692, 1192, 16148, 16380,
+ 16172, 628, 2684, 1232, 16148, 16380,
+ 16176, 592, 2672, 1272, 16152, 16376,
+ 16180, 560, 2660, 1312, 16152, 16376,
+ 16184, 524, 2648, 1348, 16156, 16376,
+ 16192, 492, 2632, 1388, 16160, 16372,
+ 16196, 460, 2616, 1428, 16164, 16372,
+ 16200, 432, 2600, 1468, 16168, 16368,
+ 16204, 400, 2584, 1508, 16176, 16364,
+ 16212, 368, 2564, 1548, 16180, 16364,
+ 16216, 340, 2544, 1588, 16188, 16360,
+ 16220, 312, 2524, 1628, 16196, 16356,
+ 16228, 284, 2504, 1668, 16204, 16356,
+ 16232, 256, 2480, 1704, 16212, 16352,
+ 16240, 232, 2456, 1744, 16224, 16348,
+ 16244, 204, 2432, 1780, 16232, 16344,
+ 16248, 180, 2408, 1820, 16244, 16340,
+ 16256, 156, 2380, 1856, 16256, 16336,
+ 16260, 132, 2352, 1896, 16268, 16332,
+ 16268, 108, 2324, 1932, 16280, 16328,
+ 16272, 88, 2296, 1968, 16292, 16324,
+ 16276, 64, 2268, 2004, 16308, 16320,
+ 16284, 44, 2236, 2036, 16324, 16312,
+ 16288, 24, 2204, 2072, 16340, 16308,
+ 16292, 8, 2172, 2108, 16356, 16304,
+ 16300, 16372, 2140, 2140, 16372, 16300 };
+
+static const uint16_t filter_6tap_64p_183[198] = {
+ 16296, 1032, 2196, 1032, 16296, 0,
+ 16292, 1004, 2200, 1060, 16304, 16380,
+ 16288, 976, 2200, 1088, 16308, 16380,
+ 16284, 952, 2196, 1116, 16312, 16376,
+ 16284, 924, 2196, 1144, 16320, 16376,
+ 16280, 900, 2192, 1172, 16324, 16372,
+ 16276, 872, 2192, 1200, 16332, 16368,
+ 16276, 848, 2188, 1228, 16340, 16368,
+ 16272, 820, 2180, 1256, 16348, 16364,
+ 16272, 796, 2176, 1280, 16356, 16360,
+ 16268, 768, 2168, 1308, 16364, 16360,
+ 16268, 744, 2164, 1336, 16372, 16356,
+ 16268, 716, 2156, 1364, 16380, 16352,
+ 16264, 692, 2148, 1392, 4, 16352,
+ 16264, 668, 2136, 1420, 16, 16348,
+ 16264, 644, 2128, 1448, 28, 16344,
+ 16264, 620, 2116, 1472, 36, 16340,
+ 16264, 596, 2108, 1500, 48, 16340,
+ 16268, 572, 2096, 1524, 60, 16336,
+ 16268, 548, 2080, 1552, 72, 16332,
+ 16268, 524, 2068, 1576, 88, 16328,
+ 16268, 504, 2056, 1604, 100, 16324,
+ 16272, 480, 2040, 1628, 112, 16324,
+ 16272, 456, 2024, 1652, 128, 16320,
+ 16272, 436, 2008, 1680, 144, 16316,
+ 16276, 416, 1992, 1704, 156, 16312,
+ 16276, 392, 1976, 1724, 172, 16308,
+ 16280, 372, 1956, 1748, 188, 16308,
+ 16280, 352, 1940, 1772, 204, 16304,
+ 16284, 332, 1920, 1796, 224, 16300,
+ 16288, 312, 1900, 1816, 240, 16296,
+ 16288, 296, 1880, 1840, 256, 16296,
+ 16292, 276, 1860, 1860, 276, 16292 };
+
+static const uint16_t filter_7tap_64p_upscale[231] = {
+ 176, 15760, 2488, 2488, 15760, 176, 0,
+ 172, 15772, 2404, 2572, 15752, 180, 16380,
+ 168, 15784, 2324, 2656, 15740, 184, 16380,
+ 164, 15800, 2240, 2736, 15732, 188, 16376,
+ 160, 15812, 2152, 2816, 15728, 192, 16376,
+ 152, 15828, 2068, 2896, 15724, 192, 16376,
+ 148, 15848, 1984, 2972, 15720, 196, 16372,
+ 140, 15864, 1896, 3048, 15720, 196, 16372,
+ 136, 15884, 1812, 3124, 15720, 196, 16368,
+ 128, 15900, 1724, 3196, 15720, 196, 16368,
+ 120, 15920, 1640, 3268, 15724, 196, 16368,
+ 116, 15940, 1552, 3336, 15732, 196, 16364,
+ 108, 15964, 1468, 3400, 15740, 196, 16364,
+ 104, 15984, 1384, 3464, 15748, 192, 16364,
+ 96, 16004, 1300, 3524, 15760, 188, 16364,
+ 88, 16028, 1216, 3584, 15776, 184, 16364,
+ 84, 16048, 1132, 3640, 15792, 180, 16360,
+ 76, 16072, 1048, 3692, 15812, 176, 16360,
+ 68, 16092, 968, 3744, 15832, 168, 16360,
+ 64, 16116, 888, 3788, 15856, 160, 16360,
+ 56, 16140, 812, 3832, 15884, 152, 16360,
+ 52, 16160, 732, 3876, 15912, 144, 16360,
+ 44, 16184, 656, 3912, 15944, 136, 16364,
+ 40, 16204, 584, 3944, 15976, 124, 16364,
+ 32, 16228, 512, 3976, 16012, 116, 16364,
+ 28, 16248, 440, 4004, 16048, 104, 16364,
+ 24, 16268, 372, 4028, 16092, 88, 16368,
+ 20, 16288, 304, 4048, 16132, 76, 16368,
+ 12, 16308, 240, 4064, 16180, 60, 16372,
+ 8, 16328, 176, 4076, 16228, 48, 16372,
+ 4, 16348, 112, 4088, 16276, 32, 16376,
+ 0, 16364, 56, 4092, 16328, 16, 16380,
+ 0, 0, 0, 4096, 0, 0, 0 };
+
+static const uint16_t filter_7tap_64p_117[231] = {
+ 92, 15868, 2464, 2464, 15868, 92, 0,
+ 96, 15864, 2404, 2528, 15876, 88, 0,
+ 100, 15860, 2344, 2584, 15884, 84, 0,
+ 104, 15856, 2280, 2644, 15892, 76, 0,
+ 108, 15852, 2216, 2700, 15904, 72, 0,
+ 108, 15852, 2152, 2756, 15916, 64, 0,
+ 112, 15852, 2088, 2812, 15932, 60, 0,
+ 112, 15852, 2024, 2864, 15948, 52, 0,
+ 112, 15856, 1960, 2916, 15964, 44, 0,
+ 116, 15860, 1892, 2964, 15984, 36, 0,
+ 116, 15864, 1828, 3016, 16004, 24, 4,
+ 116, 15868, 1760, 3060, 16024, 16, 4,
+ 116, 15876, 1696, 3108, 16048, 8, 8,
+ 116, 15884, 1628, 3152, 16072, 16380, 8,
+ 112, 15892, 1564, 3192, 16100, 16372, 8,
+ 112, 15900, 1496, 3232, 16124, 16360, 12,
+ 112, 15908, 1428, 3268, 16156, 16348, 12,
+ 108, 15920, 1364, 3304, 16188, 16336, 16,
+ 108, 15928, 1300, 3340, 16220, 16324, 20,
+ 104, 15940, 1232, 3372, 16252, 16312, 20,
+ 104, 15952, 1168, 3400, 16288, 16300, 24,
+ 100, 15964, 1104, 3428, 16328, 16284, 28,
+ 96, 15980, 1040, 3452, 16364, 16272, 28,
+ 96, 15992, 976, 3476, 20, 16256, 32,
+ 92, 16004, 916, 3496, 64, 16244, 36,
+ 88, 16020, 856, 3516, 108, 16228, 40,
+ 84, 16032, 792, 3532, 152, 16216, 44,
+ 80, 16048, 732, 3544, 200, 16200, 48,
+ 80, 16064, 676, 3556, 248, 16184, 48,
+ 76, 16080, 616, 3564, 296, 16168, 52,
+ 72, 16092, 560, 3568, 344, 16156, 56,
+ 68, 16108, 504, 3572, 396, 16140, 60,
+ 64, 16124, 452, 3576, 452, 16124, 64 };
+
+static const uint16_t filter_7tap_64p_150[231] = {
+ 16224, 16380, 2208, 2208, 16380, 16224, 0,
+ 16232, 16360, 2172, 2236, 16, 16216, 0,
+ 16236, 16340, 2140, 2268, 40, 16212, 0,
+ 16244, 16324, 2104, 2296, 60, 16204, 4,
+ 16252, 16304, 2072, 2324, 84, 16196, 4,
+ 16256, 16288, 2036, 2352, 108, 16192, 4,
+ 16264, 16268, 2000, 2380, 132, 16184, 8,
+ 16272, 16252, 1960, 2408, 160, 16176, 8,
+ 16276, 16240, 1924, 2432, 184, 16172, 8,
+ 16284, 16224, 1888, 2456, 212, 16164, 8,
+ 16288, 16212, 1848, 2480, 240, 16160, 12,
+ 16296, 16196, 1812, 2500, 268, 16152, 12,
+ 16300, 16184, 1772, 2524, 296, 16144, 12,
+ 16308, 16172, 1736, 2544, 324, 16140, 12,
+ 16312, 16164, 1696, 2564, 356, 16136, 12,
+ 16320, 16152, 1656, 2584, 388, 16128, 12,
+ 16324, 16144, 1616, 2600, 416, 16124, 12,
+ 16328, 16136, 1576, 2616, 448, 16116, 12,
+ 16332, 16128, 1536, 2632, 480, 16112, 12,
+ 16340, 16120, 1496, 2648, 516, 16108, 12,
+ 16344, 16112, 1456, 2660, 548, 16104, 12,
+ 16348, 16104, 1416, 2672, 580, 16100, 12,
+ 16352, 16100, 1376, 2684, 616, 16096, 12,
+ 16356, 16096, 1336, 2696, 652, 16092, 12,
+ 16360, 16092, 1296, 2704, 688, 16088, 12,
+ 16364, 16088, 1256, 2712, 720, 16084, 12,
+ 16368, 16084, 1220, 2720, 760, 16084, 8,
+ 16368, 16080, 1180, 2724, 796, 16080, 8,
+ 16372, 16080, 1140, 2732, 832, 16080, 8,
+ 16376, 16076, 1100, 2732, 868, 16076, 4,
+ 16380, 16076, 1060, 2736, 908, 16076, 4,
+ 16380, 16076, 1020, 2740, 944, 16076, 0,
+ 0, 16076, 984, 2740, 984, 16076, 0 };
+
+static const uint16_t filter_7tap_64p_183[231] = {
+ 16216, 324, 1884, 1884, 324, 16216, 0,
+ 16220, 304, 1864, 1904, 344, 16216, 0,
+ 16224, 284, 1844, 1924, 364, 16216, 0,
+ 16224, 264, 1824, 1944, 384, 16212, 16380,
+ 16228, 248, 1804, 1960, 408, 16212, 16380,
+ 16228, 228, 1784, 1976, 428, 16208, 16380,
+ 16232, 212, 1760, 1996, 452, 16208, 16380,
+ 16236, 192, 1740, 2012, 472, 16208, 16376,
+ 16240, 176, 1716, 2028, 496, 16208, 16376,
+ 16240, 160, 1696, 2040, 516, 16208, 16376,
+ 16244, 144, 1672, 2056, 540, 16208, 16376,
+ 16248, 128, 1648, 2068, 564, 16208, 16372,
+ 16252, 112, 1624, 2084, 588, 16208, 16372,
+ 16256, 96, 1600, 2096, 612, 16208, 16368,
+ 16256, 84, 1576, 2108, 636, 16208, 16368,
+ 16260, 68, 1552, 2120, 660, 16208, 16368,
+ 16264, 56, 1524, 2132, 684, 16212, 16364,
+ 16268, 40, 1500, 2140, 712, 16212, 16364,
+ 16272, 28, 1476, 2152, 736, 16216, 16360,
+ 16276, 16, 1448, 2160, 760, 16216, 16356,
+ 16280, 4, 1424, 2168, 788, 16220, 16356,
+ 16284, 16376, 1396, 2176, 812, 16224, 16352,
+ 16288, 16368, 1372, 2184, 840, 16224, 16352,
+ 16292, 16356, 1344, 2188, 864, 16228, 16348,
+ 16292, 16344, 1320, 2196, 892, 16232, 16344,
+ 16296, 16336, 1292, 2200, 916, 16236, 16344,
+ 16300, 16324, 1264, 2204, 944, 16240, 16340,
+ 16304, 16316, 1240, 2208, 972, 16248, 16336,
+ 16308, 16308, 1212, 2212, 996, 16252, 16332,
+ 16312, 16300, 1184, 2216, 1024, 16256, 16332,
+ 16316, 16292, 1160, 2216, 1052, 16264, 16328,
+ 16316, 16284, 1132, 2216, 1076, 16268, 16324,
+ 16320, 16276, 1104, 2216, 1104, 16276, 16320 };
+
+static const uint16_t filter_8tap_64p_upscale[264] = {
+ 0, 0, 0, 4096, 0, 0, 0, 0,
+ 16376, 20, 16328, 4092, 56, 16364, 4, 0,
+ 16372, 36, 16272, 4088, 116, 16340, 12, 0,
+ 16364, 56, 16220, 4080, 180, 16320, 20, 0,
+ 16360, 76, 16172, 4064, 244, 16296, 24, 16380,
+ 16356, 92, 16124, 4048, 312, 16276, 32, 16380,
+ 16352, 108, 16080, 4032, 380, 16252, 40, 16380,
+ 16344, 124, 16036, 4008, 452, 16228, 48, 16380,
+ 16340, 136, 15996, 3980, 524, 16204, 56, 16380,
+ 16340, 152, 15956, 3952, 600, 16180, 64, 16376,
+ 16336, 164, 15920, 3920, 672, 16156, 76, 16376,
+ 16332, 176, 15888, 3884, 752, 16132, 84, 16376,
+ 16328, 188, 15860, 3844, 828, 16104, 92, 16372,
+ 16328, 200, 15828, 3800, 908, 16080, 100, 16372,
+ 16324, 208, 15804, 3756, 992, 16056, 108, 16372,
+ 16324, 216, 15780, 3708, 1072, 16032, 120, 16368,
+ 16320, 224, 15760, 3656, 1156, 16008, 128, 16368,
+ 16320, 232, 15740, 3604, 1240, 15984, 136, 16364,
+ 16320, 240, 15724, 3548, 1324, 15960, 144, 16364,
+ 16320, 244, 15708, 3488, 1412, 15936, 152, 16360,
+ 16320, 248, 15696, 3428, 1496, 15912, 160, 16360,
+ 16320, 252, 15688, 3364, 1584, 15892, 172, 16356,
+ 16320, 256, 15680, 3296, 1672, 15868, 180, 16352,
+ 16320, 256, 15672, 3228, 1756, 15848, 188, 16352,
+ 16320, 256, 15668, 3156, 1844, 15828, 192, 16348,
+ 16320, 260, 15668, 3084, 1932, 15808, 200, 16348,
+ 16320, 256, 15668, 3012, 2020, 15792, 208, 16344,
+ 16324, 256, 15668, 2936, 2108, 15772, 216, 16344,
+ 16324, 256, 15672, 2856, 2192, 15756, 220, 16340,
+ 16324, 252, 15676, 2776, 2280, 15740, 228, 16336,
+ 16328, 252, 15684, 2696, 2364, 15728, 232, 16336,
+ 16328, 248, 15692, 2616, 2448, 15716, 240, 16332,
+ 16332, 244, 15704, 2532, 2532, 15704, 244, 16332 };
+
+static const uint16_t filter_8tap_64p_117[264] = {
+ 116, 16100, 428, 3564, 428, 16100, 116, 0,
+ 112, 16116, 376, 3564, 484, 16084, 120, 16380,
+ 104, 16136, 324, 3560, 540, 16064, 124, 16380,
+ 100, 16152, 272, 3556, 600, 16048, 128, 16380,
+ 96, 16168, 220, 3548, 656, 16032, 136, 16376,
+ 88, 16188, 172, 3540, 716, 16016, 140, 16376,
+ 84, 16204, 124, 3528, 780, 16000, 144, 16376,
+ 80, 16220, 76, 3512, 840, 15984, 148, 16372,
+ 76, 16236, 32, 3496, 904, 15968, 152, 16372,
+ 68, 16252, 16376, 3480, 968, 15952, 156, 16372,
+ 64, 16268, 16332, 3456, 1032, 15936, 160, 16372,
+ 60, 16284, 16292, 3432, 1096, 15920, 164, 16368,
+ 56, 16300, 16252, 3408, 1164, 15908, 164, 16368,
+ 48, 16316, 16216, 3380, 1228, 15892, 168, 16368,
+ 44, 16332, 16180, 3348, 1296, 15880, 168, 16368,
+ 40, 16348, 16148, 3316, 1364, 15868, 172, 16364,
+ 36, 16360, 16116, 3284, 1428, 15856, 172, 16364,
+ 32, 16376, 16084, 3248, 1496, 15848, 176, 16364,
+ 28, 4, 16052, 3208, 1564, 15836, 176, 16364,
+ 24, 16, 16028, 3168, 1632, 15828, 176, 16364,
+ 20, 28, 16000, 3124, 1700, 15820, 176, 16364,
+ 16, 40, 15976, 3080, 1768, 15812, 176, 16364,
+ 12, 52, 15952, 3036, 1836, 15808, 176, 16364,
+ 8, 64, 15932, 2988, 1904, 15800, 176, 16364,
+ 4, 76, 15912, 2940, 1972, 15800, 172, 16364,
+ 4, 84, 15892, 2888, 2040, 15796, 172, 16364,
+ 0, 96, 15876, 2836, 2104, 15792, 168, 16364,
+ 16380, 104, 15864, 2780, 2172, 15792, 164, 16364,
+ 16380, 112, 15848, 2724, 2236, 15792, 160, 16364,
+ 16376, 120, 15836, 2668, 2300, 15796, 156, 16368,
+ 16376, 128, 15828, 2608, 2364, 15800, 152, 16368,
+ 16372, 136, 15816, 2548, 2428, 15804, 148, 16368,
+ 16372, 140, 15812, 2488, 2488, 15812, 140, 16372 };
+
+static const uint16_t filter_8tap_64p_150[264] = {
+ 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0,
+ 0, 16020, 992, 2756, 1068, 16024, 16376, 0,
+ 4, 16020, 952, 2752, 1108, 16024, 16372, 0,
+ 8, 16020, 916, 2748, 1148, 16028, 16368, 0,
+ 12, 16020, 876, 2744, 1184, 16032, 16364, 4,
+ 16, 16020, 840, 2740, 1224, 16036, 16356, 4,
+ 20, 16024, 800, 2732, 1264, 16040, 16352, 4,
+ 20, 16024, 764, 2724, 1304, 16044, 16348, 8,
+ 24, 16028, 728, 2716, 1344, 16052, 16340, 8,
+ 28, 16028, 692, 2704, 1380, 16056, 16336, 12,
+ 28, 16032, 656, 2696, 1420, 16064, 16328, 12,
+ 32, 16036, 620, 2684, 1460, 16072, 16324, 12,
+ 36, 16040, 584, 2668, 1500, 16080, 16316, 16,
+ 36, 16044, 548, 2656, 1536, 16088, 16308, 16,
+ 36, 16048, 516, 2640, 1576, 16096, 16304, 20,
+ 40, 16052, 480, 2624, 1612, 16108, 16296, 20,
+ 40, 16060, 448, 2608, 1652, 16120, 16288, 20,
+ 44, 16064, 416, 2588, 1692, 16132, 16280, 24,
+ 44, 16068, 384, 2568, 1728, 16144, 16276, 24,
+ 44, 16076, 352, 2548, 1764, 16156, 16268, 28,
+ 44, 16080, 320, 2528, 1804, 16168, 16260, 28,
+ 44, 16088, 292, 2508, 1840, 16184, 16252, 28,
+ 44, 16096, 264, 2484, 1876, 16200, 16244, 32,
+ 48, 16100, 232, 2460, 1912, 16216, 16236, 32,
+ 48, 16108, 204, 2436, 1948, 16232, 16228, 32,
+ 48, 16116, 176, 2412, 1980, 16248, 16220, 36,
+ 48, 16124, 152, 2384, 2016, 16264, 16216, 36,
+ 44, 16128, 124, 2356, 2052, 16284, 16208, 36,
+ 44, 16136, 100, 2328, 2084, 16304, 16200, 40,
+ 44, 16144, 72, 2300, 2116, 16324, 16192, 40,
+ 44, 16152, 48, 2272, 2148, 16344, 16184, 40,
+ 44, 16160, 24, 2244, 2180, 16364, 16176, 40,
+ 44, 16168, 4, 2212, 2212, 4, 16168, 44 };
+
+static const uint16_t filter_8tap_64p_183[264] = {
+ 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0,
+ 16268, 16256, 1136, 2240, 1188, 16272, 16260, 0,
+ 16272, 16248, 1108, 2240, 1216, 16280, 16256, 0,
+ 16276, 16240, 1080, 2236, 1240, 16292, 16252, 0,
+ 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0,
+ 16284, 16224, 1028, 2232, 1292, 16312, 16244, 0,
+ 16288, 16216, 1000, 2228, 1320, 16324, 16240, 0,
+ 16292, 16212, 976, 2224, 1344, 16336, 16236, 0,
+ 16296, 16204, 948, 2220, 1372, 16348, 16232, 0,
+ 16300, 16200, 920, 2212, 1396, 16360, 16228, 4,
+ 16304, 16196, 896, 2204, 1424, 16372, 16224, 4,
+ 16308, 16188, 868, 2200, 1448, 0, 16220, 4,
+ 16312, 16184, 844, 2192, 1472, 12, 16216, 4,
+ 16316, 16180, 816, 2184, 1500, 28, 16212, 4,
+ 16320, 16176, 792, 2172, 1524, 40, 16208, 4,
+ 16324, 16172, 764, 2164, 1548, 56, 16204, 0,
+ 16328, 16172, 740, 2156, 1572, 72, 16200, 0,
+ 16328, 16168, 712, 2144, 1596, 88, 16196, 0,
+ 16332, 16164, 688, 2132, 1620, 100, 16192, 0,
+ 16336, 16164, 664, 2120, 1644, 120, 16192, 0,
+ 16340, 16160, 640, 2108, 1668, 136, 16188, 0,
+ 16344, 16160, 616, 2096, 1688, 152, 16184, 0,
+ 16344, 16160, 592, 2080, 1712, 168, 16180, 0,
+ 16348, 16156, 568, 2068, 1736, 188, 16176, 16380,
+ 16352, 16156, 544, 2052, 1756, 204, 16176, 16380,
+ 16352, 16156, 520, 2036, 1780, 224, 16172, 16380,
+ 16356, 16156, 496, 2024, 1800, 244, 16172, 16380,
+ 16360, 16156, 472, 2008, 1820, 260, 16168, 16376,
+ 16360, 16156, 452, 1988, 1840, 280, 16164, 16376,
+ 16364, 16156, 428, 1972, 1860, 300, 16164, 16376,
+ 16364, 16156, 408, 1956, 1880, 320, 16164, 16372,
+ 16368, 16160, 384, 1936, 1900, 344, 16160, 16372,
+ 16368, 16160, 364, 1920, 1920, 364, 16160, 16368 };
+
+const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_3tap_16p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_3tap_16p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_3tap_16p_150;
+ else
+ return filter_3tap_16p_183;
+}
+
+const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_3tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_3tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_3tap_64p_150;
+ else
+ return filter_3tap_64p_183;
+}
+
+const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_4tap_16p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_4tap_16p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_4tap_16p_150;
+ else
+ return filter_4tap_16p_183;
+}
+
+const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_4tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_4tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_4tap_64p_150;
+ else
+ return filter_4tap_64p_183;
+}
+
+const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_5tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_5tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_5tap_64p_150;
+ else
+ return filter_5tap_64p_183;
+}
+
+const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_6tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_6tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_6tap_64p_150;
+ else
+ return filter_6tap_64p_183;
+}
+
+const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_7tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_7tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_7tap_64p_150;
+ else
+ return filter_7tap_64p_183;
+}
+
+const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_8tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_8tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_8tap_64p_150;
+ else
+ return filter_8tap_64p_183;
+}
+
+const uint16_t *get_filter_2tap_16p(void)
+{
+ return filter_2tap_16p;
+}
+
+const uint16_t *get_filter_2tap_64p(void)
+{
+ return filter_2tap_64p;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
new file mode 100644
index 000000000000..4fd49a16c3b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -0,0 +1,1617 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dce_stream_encoder.h"
+#include "reg_helper.h"
+
+enum DP_PIXEL_ENCODING {
+DP_PIXEL_ENCODING_RGB444 = 0x00000000,
+DP_PIXEL_ENCODING_YCBCR422 = 0x00000001,
+DP_PIXEL_ENCODING_YCBCR444 = 0x00000002,
+DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x00000003,
+DP_PIXEL_ENCODING_Y_ONLY = 0x00000004,
+DP_PIXEL_ENCODING_YCBCR420 = 0x00000005,
+DP_PIXEL_ENCODING_RESERVED = 0x00000006,
+};
+
+
+enum DP_COMPONENT_DEPTH {
+DP_COMPONENT_DEPTH_6BPC = 0x00000000,
+DP_COMPONENT_DEPTH_8BPC = 0x00000001,
+DP_COMPONENT_DEPTH_10BPC = 0x00000002,
+DP_COMPONENT_DEPTH_12BPC = 0x00000003,
+DP_COMPONENT_DEPTH_16BPC = 0x00000004,
+DP_COMPONENT_DEPTH_RESERVED = 0x00000005,
+};
+
+
+#define REG(reg)\
+ (enc110->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc110->se_shift->field_name, enc110->se_mask->field_name
+
+#define VBI_LINE_0 0
+#define DP_BLANK_MAX_RETRY 20
+#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
+
+#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
+#endif
+
+enum {
+ DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define DCE110_SE(audio)\
+ container_of(audio, struct dce110_stream_encoder, base)
+
+#define CTX \
+ enc110->base.ctx
+
+static void dce110_update_generic_info_packet(
+ struct dce110_stream_encoder *enc110,
+ uint32_t packet_index,
+ const struct encoder_info_packet *info_packet)
+{
+ uint32_t regval;
+ /* TODOFPGA Figure out a proper number for max_retries polling for lock
+ * use 50 for now.
+ */
+ uint32_t max_retries = 50;
+
+ if (REG(AFMT_VBI_PACKET_CONTROL1)) {
+ if (packet_index >= 8)
+ ASSERT(0);
+
+ /* poll dig_update_lock is not locked -> asic internal signal
+ * assume otg master lock will unlock it
+ */
+/* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
+ 0, 10, max_retries);*/
+
+ /* check if HW reading GSP memory */
+ REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
+ 0, 10, max_retries);
+
+ /* HW does is not reading GSP memory not reading too long ->
+ * something wrong. clear GPS memory access and notify?
+ * hw SW is writing to GSP memory
+ */
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
+ }
+ /* choose which generic packet to use */
+ {
+ regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
+ AFMT_GENERIC_INDEX, packet_index);
+ }
+
+ /* write generic packet header
+ * (4th byte is for GENERIC0 only) */
+ {
+ REG_SET_4(AFMT_GENERIC_HDR, 0,
+ AFMT_GENERIC_HB0, info_packet->hb0,
+ AFMT_GENERIC_HB1, info_packet->hb1,
+ AFMT_GENERIC_HB2, info_packet->hb2,
+ AFMT_GENERIC_HB3, info_packet->hb3);
+ }
+
+ /* write generic packet contents
+ * (we never use last 4 bytes)
+ * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers */
+ {
+ const uint32_t *content =
+ (const uint32_t *) &info_packet->sb[0];
+
+ REG_WRITE(AFMT_GENERIC_0, *content++);
+ REG_WRITE(AFMT_GENERIC_1, *content++);
+ REG_WRITE(AFMT_GENERIC_2, *content++);
+ REG_WRITE(AFMT_GENERIC_3, *content++);
+ REG_WRITE(AFMT_GENERIC_4, *content++);
+ REG_WRITE(AFMT_GENERIC_5, *content++);
+ REG_WRITE(AFMT_GENERIC_6, *content++);
+ REG_WRITE(AFMT_GENERIC_7, *content);
+ }
+
+ if (!REG(AFMT_VBI_PACKET_CONTROL1)) {
+ /* force double-buffered packet update */
+ REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL,
+ AFMT_GENERIC0_UPDATE, (packet_index == 0),
+ AFMT_GENERIC2_UPDATE, (packet_index == 2));
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(AFMT_VBI_PACKET_CONTROL1)) {
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC0_FRAME_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC1_FRAME_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC2_FRAME_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC3_FRAME_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC4_FRAME_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC5_FRAME_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC6_FRAME_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC7_FRAME_UPDATE, 1);
+ break;
+ default:
+ break;
+ }
+ }
+#endif
+}
+
+static void dce110_update_hdmi_info_packet(
+ struct dce110_stream_encoder *enc110,
+ uint32_t packet_index,
+ const struct encoder_info_packet *info_packet)
+{
+ struct dc_context *ctx = enc110->base.ctx;
+ uint32_t cont, send, line;
+
+ if (info_packet->valid) {
+ dce110_update_generic_info_packet(
+ enc110,
+ packet_index,
+ info_packet);
+
+ /* enable transmission of packet(s) -
+ * packet transmission begins on the next frame */
+ cont = 1;
+ /* send packet(s) every frame */
+ send = 1;
+ /* select line number to send packets on */
+ line = 2;
+ } else {
+ cont = 0;
+ send = 0;
+ line = 0;
+ }
+
+ /* choose which generic packet control to use */
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 1:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 2:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 3:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case 4:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 5:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 6:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL3))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 7:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL3))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+#endif
+ default:
+ /* invalid HW packet index */
+ dm_logger_write(
+ ctx->logger, LOG_WARNING,
+ "Invalid HW packet index: %s()\n",
+ __func__);
+ return;
+ }
+}
+
+/* setup stream encoder in dp mode */
+static void dce110_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ uint32_t h_active_start;
+ uint32_t v_active_start;
+ uint32_t misc0 = 0;
+ uint32_t misc1 = 0;
+ uint32_t h_blank;
+ uint32_t h_back_porch;
+ uint8_t synchronous_clock = 0; /* asynchronous mode */
+ uint8_t colorimetry_bpc;
+#endif
+
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(DP_DB_CNTL))
+ REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
+#endif
+
+ /* set pixel encoding */
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_YCBCR422);
+ break;
+ case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_YCBCR444);
+
+ if (crtc_timing->flags.Y_ONLY)
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
+ /* HW testing only, no use case yet.
+ * Color depth of Y-only could be
+ * 8, 10, 12, 16 bits */
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_Y_ONLY);
+ /* Note: DP_MSA_MISC1 bit 7 is the indicator
+ * of Y-only mode.
+ * This bit is set in HW if register
+ * DP_PIXEL_ENCODING is programmed to 0x4 */
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_YCBCR420);
+ if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (enc110->se_mask->DP_VID_N_MUL)
+ REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
+#endif
+ break;
+ default:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_RGB444);
+ break;
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(DP_MSA_MISC))
+ misc1 = REG_READ(DP_MSA_MISC);
+#endif
+
+ /* set color depth */
+
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ 0);
+ break;
+ case COLOR_DEPTH_888:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_8BPC);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_10BPC);
+
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_12BPC);
+ break;
+ default:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_6BPC);
+ break;
+ }
+
+ /* set dynamic range and YCbCr range */
+ if (enc110->se_mask->DP_DYN_RANGE && enc110->se_mask->DP_YCBCR_RANGE)
+ REG_UPDATE_2(
+ DP_PIXEL_FORMAT,
+ DP_DYN_RANGE, 0,
+ DP_YCBCR_RANGE, 0);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ colorimetry_bpc = 0;
+ break;
+ case COLOR_DEPTH_888:
+ colorimetry_bpc = 1;
+ break;
+ case COLOR_DEPTH_101010:
+ colorimetry_bpc = 2;
+ break;
+ case COLOR_DEPTH_121212:
+ colorimetry_bpc = 3;
+ break;
+ default:
+ colorimetry_bpc = 0;
+ break;
+ }
+
+ misc0 = misc0 | synchronous_clock;
+ misc0 = colorimetry_bpc << 5;
+
+ if (REG(DP_MSA_TIMING_PARAM1)) {
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ misc0 = misc0 | 0x0;
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_YCBCR601:
+ misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_YCBCR709:
+ misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_ADOBERGB:
+ case COLOR_SPACE_UNKNOWN:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* do nothing */
+ break;
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(DP_MSA_COLORIMETRY))
+ REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
+
+ if (REG(DP_MSA_MISC))
+ REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */
+
+ /* dcn new register
+ * dc_crtc_timing is vesa dmt struct. data from edid
+ */
+ if (REG(DP_MSA_TIMING_PARAM1))
+ REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
+ DP_MSA_HTOTAL, crtc_timing->h_total,
+ DP_MSA_VTOTAL, crtc_timing->v_total);
+#endif
+
+ /* calcuate from vesa timing parameters
+ * h_active_start related to leading edge of sync
+ */
+
+ h_blank = crtc_timing->h_total - crtc_timing->h_border_left -
+ crtc_timing->h_addressable - crtc_timing->h_border_right;
+
+ h_back_porch = h_blank - crtc_timing->h_front_porch -
+ crtc_timing->h_sync_width;
+
+ /* start at begining of left border */
+ h_active_start = crtc_timing->h_sync_width + h_back_porch;
+
+
+ v_active_start = crtc_timing->v_total - crtc_timing->v_border_top -
+ crtc_timing->v_addressable - crtc_timing->v_border_bottom -
+ crtc_timing->v_front_porch;
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /* start at begining of left border */
+ if (REG(DP_MSA_TIMING_PARAM2))
+ REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
+ DP_MSA_HSTART, h_active_start,
+ DP_MSA_VSTART, v_active_start);
+
+ if (REG(DP_MSA_TIMING_PARAM3))
+ REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
+ DP_MSA_HSYNCWIDTH,
+ crtc_timing->h_sync_width,
+ DP_MSA_HSYNCPOLARITY,
+ !crtc_timing->flags.HSYNC_POSITIVE_POLARITY,
+ DP_MSA_VSYNCWIDTH,
+ crtc_timing->v_sync_width,
+ DP_MSA_VSYNCPOLARITY,
+ !crtc_timing->flags.VSYNC_POSITIVE_POLARITY);
+
+ /* HWDITH include border or overscan */
+ if (REG(DP_MSA_TIMING_PARAM4))
+ REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
+ DP_MSA_HWIDTH, crtc_timing->h_border_left +
+ crtc_timing->h_addressable + crtc_timing->h_border_right,
+ DP_MSA_VHEIGHT, crtc_timing->v_border_top +
+ crtc_timing->v_addressable + crtc_timing->v_border_bottom);
+#endif
+ }
+#endif
+}
+
+static void dce110_stream_encoder_set_stream_attribute_helper(
+ struct dce110_stream_encoder *enc110,
+ struct dc_crtc_timing *crtc_timing)
+{
+ if (enc110->regs->TMDS_CNTL) {
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 1);
+ break;
+ default:
+ REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 0);
+ break;
+ }
+ REG_UPDATE(TMDS_CNTL, TMDS_COLOR_FORMAT, 0);
+ } else if (enc110->regs->DIG_FE_CNTL) {
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1);
+ break;
+ default:
+ REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0);
+ break;
+ }
+ REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0);
+ }
+
+}
+
+/* setup stream encoder in hdmi mode */
+static void dce110_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc110->base.id;
+ cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ cntl.enable_dp_audio = enable_audio;
+ cntl.pixel_clock = actual_pix_clk_khz;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+
+ if (enc110->base.bp->funcs->encoder_control(
+ enc110->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
+
+ /* setup HDMI engine */
+ if (!enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
+ REG_UPDATE_3(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else if (enc110->regs->DIG_FE_CNTL) {
+ REG_UPDATE_5(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0,
+ HDMI_DATA_SCRAMBLE_EN, 0,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ }
+
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+ break;
+ case COLOR_DEPTH_101010:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_121212:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_161616:
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 3,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ break;
+ default:
+ break;
+ }
+
+ if (enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
+ if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
+ * Clock channel frequency is 1/4 of character rate.
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 1);
+ } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
+
+ /* TODO: New feature for DCE11, still need to implement */
+
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
+ * Clock channel frequency is the same
+ * as character rate
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ }
+ }
+
+ REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
+ HDMI_GC_CONT, 1,
+ HDMI_GC_SEND, 1,
+ HDMI_NULL_SEND, 1);
+
+ /* following belongs to audio */
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
+ VBI_LINE_0 + 2);
+
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
+
+}
+
+/* setup stream encoder in dvi mode */
+static void dce110_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc110->base.id;
+ cntl.signal = is_dual_link ?
+ SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
+ cntl.enable_dp_audio = false;
+ cntl.pixel_clock = crtc_timing->pix_clk_khz;
+ cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
+
+ if (enc110->base.bp->funcs->encoder_control(
+ enc110->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
+ ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
+ dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
+}
+
+static void dce110_stream_encoder_set_mst_bandwidth(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t x = dal_fixed31_32_floor(
+ avg_time_slots_per_mtp);
+ uint32_t y = dal_fixed31_32_ceil(
+ dal_fixed31_32_shl(
+ dal_fixed31_32_sub_int(
+ avg_time_slots_per_mtp,
+ x),
+ 26));
+
+ {
+ REG_SET_2(DP_MSE_RATE_CNTL, 0,
+ DP_MSE_RATE_X, x,
+ DP_MSE_RATE_Y, y);
+ }
+
+ /* wait for update to be completed on the link */
+ /* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */
+ /* is reset to 0 (not pending) */
+ REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING,
+ 0,
+ 10, DP_MST_UPDATE_MAX_RETRY);
+}
+
+static void dce110_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
+ enc110->se_mask->HDMI_AVI_INFO_SEND) {
+
+ if (info_frame->avi.valid) {
+ const uint32_t *content =
+ (const uint32_t *) &info_frame->avi.sb[0];
+
+ REG_WRITE(AFMT_AVI_INFO0, content[0]);
+
+ REG_WRITE(AFMT_AVI_INFO1, content[1]);
+
+ REG_WRITE(AFMT_AVI_INFO2, content[2]);
+
+ REG_WRITE(AFMT_AVI_INFO3, content[3]);
+
+ REG_UPDATE(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION,
+ info_frame->avi.hb1);
+
+ REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
+ HDMI_AVI_INFO_SEND, 1,
+ HDMI_AVI_INFO_CONT, 1);
+
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE,
+ VBI_LINE_0 + 2);
+
+ } else {
+ REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
+ HDMI_AVI_INFO_SEND, 0,
+ HDMI_AVI_INFO_CONT, 0);
+ }
+ }
+
+ if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
+ enc110->se_mask->HDMI_AVI_INFO_SEND) {
+ dce110_update_hdmi_info_packet(enc110, 0, &info_frame->vendor);
+ dce110_update_hdmi_info_packet(enc110, 1, &info_frame->gamut);
+ dce110_update_hdmi_info_packet(enc110, 2, &info_frame->spd);
+ dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (enc110->se_mask->HDMI_DB_DISABLE) {
+ /* for bring up, disable dp double TODO */
+ if (REG(HDMI_DB_CONTROL))
+ REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
+
+ dce110_update_hdmi_info_packet(enc110, 0, &info_frame->avi);
+ dce110_update_hdmi_info_packet(enc110, 1, &info_frame->vendor);
+ dce110_update_hdmi_info_packet(enc110, 2, &info_frame->gamut);
+ dce110_update_hdmi_info_packet(enc110, 3, &info_frame->spd);
+ dce110_update_hdmi_info_packet(enc110, 4, &info_frame->hdrsmd);
+ }
+#endif
+}
+
+static void dce110_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ /* stop generic packets 0 & 1 on HDMI */
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0);
+
+ /* stop generic packets 2 & 3 on HDMI */
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /* stop generic packets 2 & 3 on HDMI */
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+
+ if (REG(HDMI_GENERIC_PACKET_CONTROL3))
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL3, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+#endif
+}
+
+static void dce110_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t value = REG_READ(DP_SEC_CNTL);
+
+ if (info_frame->vsc.valid)
+ dce110_update_generic_info_packet(
+ enc110,
+ 0, /* packetIndex */
+ &info_frame->vsc);
+
+ if (info_frame->spd.valid)
+ dce110_update_generic_info_packet(
+ enc110,
+ 2, /* packetIndex */
+ &info_frame->spd);
+
+ if (info_frame->hdrsmd.valid)
+ dce110_update_generic_info_packet(
+ enc110,
+ 3, /* packetIndex */
+ &info_frame->hdrsmd);
+
+ /* enable/disable transmission of packet(s).
+ * If enabled, packet transmission begins on the next frame
+ */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+
+ /* This bit is the master enable bit.
+ * When enabling secondary stream engine,
+ * this master bit must also be set.
+ * This register shared with audio info frame.
+ * Therefore we need to enable master bit
+ * if at least on of the fields is not 0
+ */
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void dce110_stream_encoder_stop_dp_info_packets(
+ struct stream_encoder *enc)
+{
+ /* stop generic packets on DP */
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t value = REG_READ(DP_SEC_CNTL);
+
+ if (enc110->se_mask->DP_SEC_AVI_ENABLE) {
+ REG_SET_7(DP_SEC_CNTL, 0,
+ DP_SEC_GSP0_ENABLE, 0,
+ DP_SEC_GSP1_ENABLE, 0,
+ DP_SEC_GSP2_ENABLE, 0,
+ DP_SEC_GSP3_ENABLE, 0,
+ DP_SEC_AVI_ENABLE, 0,
+ DP_SEC_MPG_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (enc110->se_mask->DP_SEC_GSP7_ENABLE) {
+ REG_SET_10(DP_SEC_CNTL, 0,
+ DP_SEC_GSP0_ENABLE, 0,
+ DP_SEC_GSP1_ENABLE, 0,
+ DP_SEC_GSP2_ENABLE, 0,
+ DP_SEC_GSP3_ENABLE, 0,
+ DP_SEC_GSP4_ENABLE, 0,
+ DP_SEC_GSP5_ENABLE, 0,
+ DP_SEC_GSP6_ENABLE, 0,
+ DP_SEC_GSP7_ENABLE, 0,
+ DP_SEC_MPG_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+ }
+#endif
+ /* this register shared with audio info frame.
+ * therefore we need to keep master enabled
+ * if at least one of the fields is not 0 */
+
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+static void dce110_stream_encoder_dp_blank(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t retries = 0;
+ uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
+
+ /* Note: For CZ, we are changing driver default to disable
+ * stream deferred to next VBLANK. If results are positive, we
+ * will make the same change to all DCE versions. There are a
+ * handful of panels that cannot handle disable stream at
+ * HBLANK and will result in a white line flash across the
+ * screen on stream disable. */
+
+ /* Specify the video stream disable point
+ * (2 = start of the next vertical blank) */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
+ /* Larger delay to wait until VBLANK - use max retry of
+ * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
+ max_retries = DP_BLANK_MAX_RETRY * 150;
+
+ /* disable DP stream */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+ /* the encoder stops sending the video stream
+ * at the start of the vertical blanking.
+ * Poll for DP_VID_STREAM_STATUS == 0
+ */
+
+ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
+ 0,
+ 10, max_retries);
+
+ ASSERT(retries <= max_retries);
+
+ /* Tell the DP encoder to ignore timing from CRTC, must be done after
+ * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+ * complete, stream status will be stuck in video stream enabled state,
+ * i.e. DP_VID_STREAM_STATUS stuck at 1.
+ */
+
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
+}
+
+/* output video stream to link encoder */
+static void dce110_stream_encoder_dp_unblank(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
+ uint32_t n_vid = 0x8000;
+ uint32_t m_vid;
+
+ /* M / N = Fstream / Flink
+ * m_vid / n_vid = pixel rate / link rate
+ */
+
+ uint64_t m_vid_l = n_vid;
+
+ m_vid_l *= param->pixel_clk_khz;
+ m_vid_l = div_u64(m_vid_l,
+ param->link_settings.link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ);
+
+ m_vid = (uint32_t) m_vid_l;
+
+ /* enable auto measurement */
+
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
+
+ /* auto measurement need 1 full 0x8000 symbol cycle to kick in,
+ * therefore program initial value for Mvid and Nvid
+ */
+
+ REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
+
+ REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
+
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+ }
+
+ /* set DIG_START to 0x1 to resync FIFO */
+
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+
+ /* switch DP encoder to CRTC data */
+
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
+
+ /* wait 100us for DIG/DP logic to prime
+ * (i.e. a few video lines)
+ */
+ udelay(100);
+
+ /* the hardware would start sending video at the start of the next DP
+ * frame (i.e. rising edge of the vblank).
+ * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
+ * register has no effect on enable transition! HW always guarantees
+ * VID_STREAM enable at start of next frame, and this is not
+ * programmable
+ */
+
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+}
+
+static void dce110_stream_encoder_set_avmute(
+ struct stream_encoder *enc,
+ bool enable)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ unsigned int value = enable ? 1 : 0;
+
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value);
+}
+
+
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
+
+#include "include/audio_types.h"
+
+/**
+* speakersToChannels
+*
+* @brief
+* translate speakers to channels
+*
+* FL - Front Left
+* FR - Front Right
+* RL - Rear Left
+* RR - Rear Right
+* RC - Rear Center
+* FC - Front Center
+* FLC - Front Left Center
+* FRC - Front Right Center
+* RLC - Rear Left Center
+* RRC - Rear Right Center
+* LFE - Low Freq Effect
+*
+* FC
+* FLC FRC
+* FL FR
+*
+* LFE
+* ()
+*
+*
+* RL RR
+* RLC RRC
+* RC
+*
+* ch 8 7 6 5 4 3 2 1
+* 0b00000011 - - - - - - FR FL
+* 0b00000111 - - - - - LFE FR FL
+* 0b00001011 - - - - FC - FR FL
+* 0b00001111 - - - - FC LFE FR FL
+* 0b00010011 - - - RC - - FR FL
+* 0b00010111 - - - RC - LFE FR FL
+* 0b00011011 - - - RC FC - FR FL
+* 0b00011111 - - - RC FC LFE FR FL
+* 0b00110011 - - RR RL - - FR FL
+* 0b00110111 - - RR RL - LFE FR FL
+* 0b00111011 - - RR RL FC - FR FL
+* 0b00111111 - - RR RL FC LFE FR FL
+* 0b01110011 - RC RR RL - - FR FL
+* 0b01110111 - RC RR RL - LFE FR FL
+* 0b01111011 - RC RR RL FC - FR FL
+* 0b01111111 - RC RR RL FC LFE FR FL
+* 0b11110011 RRC RLC RR RL - - FR FL
+* 0b11110111 RRC RLC RR RL - LFE FR FL
+* 0b11111011 RRC RLC RR RL FC - FR FL
+* 0b11111111 RRC RLC RR RL FC LFE FR FL
+* 0b11000011 FRC FLC - - - - FR FL
+* 0b11000111 FRC FLC - - - LFE FR FL
+* 0b11001011 FRC FLC - - FC - FR FL
+* 0b11001111 FRC FLC - - FC LFE FR FL
+* 0b11010011 FRC FLC - RC - - FR FL
+* 0b11010111 FRC FLC - RC - LFE FR FL
+* 0b11011011 FRC FLC - RC FC - FR FL
+* 0b11011111 FRC FLC - RC FC LFE FR FL
+* 0b11110011 FRC FLC RR RL - - FR FL
+* 0b11110111 FRC FLC RR RL - LFE FR FL
+* 0b11111011 FRC FLC RR RL FC - FR FL
+* 0b11111111 FRC FLC RR RL FC LFE FR FL
+*
+* @param
+* speakers - speaker information as it comes from CEA audio block
+*/
+/* translate speakers to channels */
+
+union audio_cea_channels {
+ uint8_t all;
+ struct audio_cea_channels_bits {
+ uint32_t FL:1;
+ uint32_t FR:1;
+ uint32_t LFE:1;
+ uint32_t FC:1;
+ uint32_t RL_RC:1;
+ uint32_t RR:1;
+ uint32_t RC_RLC_FLC:1;
+ uint32_t RRC_FRC:1;
+ } channels;
+};
+
+struct audio_clock_info {
+ /* pixel clock frequency*/
+ uint32_t pixel_clock_in_10khz;
+ /* N - 32KHz audio */
+ uint32_t n_32khz;
+ /* CTS - 32KHz audio*/
+ uint32_t cts_32khz;
+ uint32_t n_44khz;
+ uint32_t cts_44khz;
+ uint32_t n_48khz;
+ uint32_t cts_48khz;
+};
+
+/* 25.2MHz/1.001*/
+/* 25.2MHz/1.001*/
+/* 25.2MHz*/
+/* 27MHz */
+/* 27MHz*1.001*/
+/* 27MHz*1.001*/
+/* 54MHz*/
+/* 54MHz*1.001*/
+/* 74.25MHz/1.001*/
+/* 74.25MHz*/
+/* 148.5MHz/1.001*/
+/* 148.5MHz*/
+
+static const struct audio_clock_info audio_clock_info_table[16] = {
+ {2517, 4576, 28125, 7007, 31250, 6864, 28125},
+ {2518, 4576, 28125, 7007, 31250, 6864, 28125},
+ {2520, 4096, 25200, 6272, 28000, 6144, 25200},
+ {2700, 4096, 27000, 6272, 30000, 6144, 27000},
+ {2702, 4096, 27027, 6272, 30030, 6144, 27027},
+ {2703, 4096, 27027, 6272, 30030, 6144, 27027},
+ {5400, 4096, 54000, 6272, 60000, 6144, 54000},
+ {5405, 4096, 54054, 6272, 60060, 6144, 54054},
+ {7417, 11648, 210937, 17836, 234375, 11648, 140625},
+ {7425, 4096, 74250, 6272, 82500, 6144, 74250},
+ {14835, 11648, 421875, 8918, 234375, 5824, 140625},
+ {14850, 4096, 148500, 6272, 165000, 6144, 148500},
+ {29670, 5824, 421875, 4459, 234375, 5824, 281250},
+ {29700, 3072, 222750, 4704, 247500, 5120, 247500},
+ {59340, 5824, 843750, 8918, 937500, 5824, 562500},
+ {59400, 3072, 445500, 9408, 990000, 6144, 594000}
+};
+
+static const struct audio_clock_info audio_clock_info_table_36bpc[14] = {
+ {2517, 9152, 84375, 7007, 48875, 9152, 56250},
+ {2518, 9152, 84375, 7007, 48875, 9152, 56250},
+ {2520, 4096, 37800, 6272, 42000, 6144, 37800},
+ {2700, 4096, 40500, 6272, 45000, 6144, 40500},
+ {2702, 8192, 81081, 6272, 45045, 8192, 54054},
+ {2703, 8192, 81081, 6272, 45045, 8192, 54054},
+ {5400, 4096, 81000, 6272, 90000, 6144, 81000},
+ {5405, 4096, 81081, 6272, 90090, 6144, 81081},
+ {7417, 11648, 316406, 17836, 351562, 11648, 210937},
+ {7425, 4096, 111375, 6272, 123750, 6144, 111375},
+ {14835, 11648, 632812, 17836, 703125, 11648, 421875},
+ {14850, 4096, 222750, 6272, 247500, 6144, 222750},
+ {29670, 5824, 632812, 8918, 703125, 5824, 421875},
+ {29700, 4096, 445500, 4704, 371250, 5120, 371250}
+};
+
+static const struct audio_clock_info audio_clock_info_table_48bpc[14] = {
+ {2517, 4576, 56250, 7007, 62500, 6864, 56250},
+ {2518, 4576, 56250, 7007, 62500, 6864, 56250},
+ {2520, 4096, 50400, 6272, 56000, 6144, 50400},
+ {2700, 4096, 54000, 6272, 60000, 6144, 54000},
+ {2702, 4096, 54054, 6267, 60060, 8192, 54054},
+ {2703, 4096, 54054, 6272, 60060, 8192, 54054},
+ {5400, 4096, 108000, 6272, 120000, 6144, 108000},
+ {5405, 4096, 108108, 6272, 120120, 6144, 108108},
+ {7417, 11648, 421875, 17836, 468750, 11648, 281250},
+ {7425, 4096, 148500, 6272, 165000, 6144, 148500},
+ {14835, 11648, 843750, 8918, 468750, 11648, 281250},
+ {14850, 4096, 297000, 6272, 330000, 6144, 297000},
+ {29670, 5824, 843750, 4459, 468750, 5824, 562500},
+ {29700, 3072, 445500, 4704, 495000, 5120, 495000}
+
+
+};
+
+static union audio_cea_channels speakers_to_channels(
+ struct audio_speaker_flags speaker_flags)
+{
+ union audio_cea_channels cea_channels = {0};
+
+ /* these are one to one */
+ cea_channels.channels.FL = speaker_flags.FL_FR;
+ cea_channels.channels.FR = speaker_flags.FL_FR;
+ cea_channels.channels.LFE = speaker_flags.LFE;
+ cea_channels.channels.FC = speaker_flags.FC;
+
+ /* if Rear Left and Right exist move RC speaker to channel 7
+ * otherwise to channel 5
+ */
+ if (speaker_flags.RL_RR) {
+ cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+ cea_channels.channels.RR = speaker_flags.RL_RR;
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+ } else {
+ cea_channels.channels.RL_RC = speaker_flags.RC;
+ }
+
+ /* FRONT Left Right Center and REAR Left Right Center are exclusive */
+ if (speaker_flags.FLC_FRC) {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+ } else {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+ }
+
+ return cea_channels;
+}
+
+static uint32_t calc_max_audio_packets_per_line(
+ const struct audio_crtc_info *crtc_info)
+{
+ uint32_t max_packets_per_line;
+
+ max_packets_per_line =
+ crtc_info->h_total - crtc_info->h_active;
+
+ if (crtc_info->pixel_repetition)
+ max_packets_per_line *= crtc_info->pixel_repetition;
+
+ /* for other hdmi features */
+ max_packets_per_line -= 58;
+ /* for Control Period */
+ max_packets_per_line -= 16;
+ /* Number of Audio Packets per Line */
+ max_packets_per_line /= 32;
+
+ return max_packets_per_line;
+}
+
+static void get_audio_clock_info(
+ enum dc_color_depth color_depth,
+ uint32_t crtc_pixel_clock_in_khz,
+ uint32_t actual_pixel_clock_in_khz,
+ struct audio_clock_info *audio_clock_info)
+{
+ const struct audio_clock_info *clock_info;
+ uint32_t index;
+ uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
+ uint32_t audio_array_size;
+
+ switch (color_depth) {
+ case COLOR_DEPTH_161616:
+ clock_info = audio_clock_info_table_48bpc;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table_48bpc);
+ break;
+ case COLOR_DEPTH_121212:
+ clock_info = audio_clock_info_table_36bpc;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table_36bpc);
+ break;
+ default:
+ clock_info = audio_clock_info_table;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table);
+ break;
+ }
+
+ if (clock_info != NULL) {
+ /* search for exact pixel clock in table */
+ for (index = 0; index < audio_array_size; index++) {
+ if (clock_info[index].pixel_clock_in_10khz >
+ crtc_pixel_clock_in_10khz)
+ break; /* not match */
+ else if (clock_info[index].pixel_clock_in_10khz ==
+ crtc_pixel_clock_in_10khz) {
+ /* match found */
+ *audio_clock_info = clock_info[index];
+ return;
+ }
+ }
+ }
+
+ /* not found */
+ if (actual_pixel_clock_in_khz == 0)
+ actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
+
+ /* See HDMI spec the table entry under
+ * pixel clock of "Other". */
+ audio_clock_info->pixel_clock_in_10khz =
+ actual_pixel_clock_in_khz / 10;
+ audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
+ audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
+ audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
+
+ audio_clock_info->n_32khz = 4096;
+ audio_clock_info->n_44khz = 6272;
+ audio_clock_info->n_48khz = 6144;
+}
+
+static void dce110_se_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *audio_info)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ uint32_t speakers = 0;
+ uint32_t channels = 0;
+
+ ASSERT(audio_info);
+ if (audio_info == NULL)
+ /* This should not happen.it does so we don't get BSOD*/
+ return;
+
+ speakers = audio_info->flags.info.ALLSPEAKERS;
+ channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+ /* setup the audio stream source select (audio -> dig mapping) */
+ REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
+
+ /* Channel allocation */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
+}
+
+static void dce110_se_setup_hdmi_audio(
+ struct stream_encoder *enc,
+ const struct audio_crtc_info *crtc_info)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ struct audio_clock_info audio_clock_info = {0};
+ uint32_t max_packets_per_line;
+
+ /* For now still do calculation, although this field is ignored when
+ above HDMI_PACKET_GEN_VERSION set to 1 */
+ max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
+
+ /* HDMI_AUDIO_PACKET_CONTROL */
+ REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
+ HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+ HDMI_AUDIO_DELAY_EN, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* HDMI_ACR_PACKET_CONTROL */
+ REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
+ HDMI_ACR_AUTO_SEND, 1,
+ HDMI_ACR_SOURCE, 0,
+ HDMI_ACR_AUDIO_PRIORITY, 0);
+
+ /* Program audio clock sample/regeneration parameters */
+ get_audio_clock_info(crtc_info->color_depth,
+ crtc_info->requested_pixel_clock,
+ crtc_info->calculated_pixel_clock,
+ &audio_clock_info);
+ dm_logger_write(enc->ctx->logger, LOG_HW_AUDIO,
+ "\n%s:Input::requested_pixel_clock = %d" \
+ "calculated_pixel_clock = %d \n", __func__, \
+ crtc_info->requested_pixel_clock, \
+ crtc_info->calculated_pixel_clock);
+
+ /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
+
+ /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
+
+ /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
+
+ /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
+
+ /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
+
+ /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
+
+ /* Video driver cannot know in advance which sample rate will
+ be used by HD Audio driver
+ HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
+ programmed below in interruppt callback */
+
+ /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
+ AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+ REG_UPDATE_2(AFMT_60958_0,
+ AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
+ AFMT_60958_CS_CLOCK_ACCURACY, 0);
+
+ /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
+ REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+
+ /*AFMT_60958_2 now keep this settings until
+ * Programming guide comes out*/
+ REG_UPDATE_6(AFMT_60958_2,
+ AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
+ AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
+ AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
+ AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
+ AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
+ AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+}
+
+static void dce110_se_setup_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ /* --- DP Audio packet configurations --- */
+
+ /* ATP Configuration */
+ REG_SET(DP_SEC_AUD_N, 0,
+ DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
+
+ /* Async/auto-calc timestamp mode */
+ REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
+ DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
+
+ /* --- The following are the registers
+ * copied from the SetupHDMI --- */
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* AFMT_INFOFRAME_CONTROL0 */
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+ REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
+}
+
+static void dce110_se_enable_audio_clock(
+ struct stream_encoder *enc,
+ bool enable)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ if (REG(AFMT_CNTL) == 0)
+ return; /* DCE8/10 does not have this register */
+
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable);
+
+ /* wait for AFMT clock to turn on,
+ * expectation: this should complete in 1-2 reads
+ *
+ * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10);
+ *
+ * TODO: wait for clock_on does not work well. May need HW
+ * program sequence. But audio seems work normally even without wait
+ * for clock_on status change
+ */
+}
+
+static void dce110_se_enable_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ /* Enable Audio packets */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
+
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(DP_SEC_CNTL,
+ DP_SEC_ATP_ENABLE, 1,
+ DP_SEC_AIP_ENABLE, 1);
+
+ /* Program STREAM_ENABLE after all the other enables. */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void dce110_se_disable_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t value = REG_READ(DP_SEC_CNTL);
+
+ /* Disable Audio packets */
+ REG_UPDATE_5(DP_SEC_CNTL,
+ DP_SEC_ASP_ENABLE, 0,
+ DP_SEC_ATP_ENABLE, 0,
+ DP_SEC_AIP_ENABLE, 0,
+ DP_SEC_ACM_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+
+ /* This register shared with encoder info frame. Therefore we need to
+ keep master enabled if at least on of the fields is not 0 */
+ if (value != 0)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+void dce110_se_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
+}
+
+void dce110_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info)
+{
+ dce110_se_audio_setup(enc, az_inst, info);
+}
+
+void dce110_se_dp_audio_enable(
+ struct stream_encoder *enc)
+{
+ dce110_se_enable_audio_clock(enc, true);
+ dce110_se_setup_dp_audio(enc);
+ dce110_se_enable_dp_audio(enc);
+}
+
+void dce110_se_dp_audio_disable(
+ struct stream_encoder *enc)
+{
+ dce110_se_disable_dp_audio(enc);
+ dce110_se_enable_audio_clock(enc, false);
+}
+
+void dce110_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info)
+{
+ dce110_se_enable_audio_clock(enc, true);
+ dce110_se_setup_hdmi_audio(enc, audio_crtc_info);
+ dce110_se_audio_setup(enc, az_inst, info);
+}
+
+void dce110_se_hdmi_audio_disable(
+ struct stream_encoder *enc)
+{
+ dce110_se_enable_audio_clock(enc, false);
+}
+
+
+static void setup_stereo_sync(
+ struct stream_encoder *enc,
+ int tg_inst, bool enable)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, tg_inst);
+ REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable);
+}
+
+
+static const struct stream_encoder_funcs dce110_str_enc_funcs = {
+ .dp_set_stream_attribute =
+ dce110_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ dce110_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ dce110_stream_encoder_dvi_set_stream_attribute,
+ .set_mst_bandwidth =
+ dce110_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+ dce110_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ dce110_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets =
+ dce110_stream_encoder_update_dp_info_packets,
+ .stop_dp_info_packets =
+ dce110_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ dce110_stream_encoder_dp_blank,
+ .dp_unblank =
+ dce110_stream_encoder_dp_unblank,
+ .audio_mute_control = dce110_se_audio_mute_control,
+
+ .dp_audio_setup = dce110_se_dp_audio_setup,
+ .dp_audio_enable = dce110_se_dp_audio_enable,
+ .dp_audio_disable = dce110_se_dp_audio_disable,
+
+ .hdmi_audio_setup = dce110_se_hdmi_audio_setup,
+ .hdmi_audio_disable = dce110_se_hdmi_audio_disable,
+ .setup_stereo_sync = setup_stereo_sync,
+ .set_avmute = dce110_stream_encoder_set_avmute,
+
+};
+
+void dce110_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dce110_stream_enc_registers *regs,
+ const struct dce_stream_encoder_shift *se_shift,
+ const struct dce_stream_encoder_mask *se_mask)
+{
+ enc110->base.funcs = &dce110_str_enc_funcs;
+ enc110->base.ctx = ctx;
+ enc110->base.id = eng_id;
+ enc110->base.bp = bp;
+ enc110->regs = regs;
+ enc110->se_shift = se_shift;
+ enc110->se_mask = se_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
new file mode 100644
index 000000000000..6c28229c76eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -0,0 +1,733 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_STREAM_ENCODER_DCE110_H__
+#define __DC_STREAM_ENCODER_DCE110_H__
+
+#include "stream_encoder.h"
+
+#define DCE110STRENC_FROM_STRENC(stream_encoder)\
+ container_of(stream_encoder, struct dce110_stream_encoder, base)
+
+#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
+#endif
+
+
+#define SE_COMMON_REG_LIST_DCE_BASE(id) \
+ SE_COMMON_REG_LIST_BASE(id),\
+ SRI(AFMT_AVI_INFO0, DIG, id), \
+ SRI(AFMT_AVI_INFO1, DIG, id), \
+ SRI(AFMT_AVI_INFO2, DIG, id), \
+ SRI(AFMT_AVI_INFO3, DIG, id)
+
+#define SE_COMMON_REG_LIST_BASE(id) \
+ SRI(AFMT_GENERIC_0, DIG, id), \
+ SRI(AFMT_GENERIC_1, DIG, id), \
+ SRI(AFMT_GENERIC_2, DIG, id), \
+ SRI(AFMT_GENERIC_3, DIG, id), \
+ SRI(AFMT_GENERIC_4, DIG, id), \
+ SRI(AFMT_GENERIC_5, DIG, id), \
+ SRI(AFMT_GENERIC_6, DIG, id), \
+ SRI(AFMT_GENERIC_7, DIG, id), \
+ SRI(AFMT_GENERIC_HDR, DIG, id), \
+ SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \
+ SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \
+ SRI(AFMT_60958_0, DIG, id), \
+ SRI(AFMT_60958_1, DIG, id), \
+ SRI(AFMT_60958_2, DIG, id), \
+ SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(HDMI_CONTROL, DIG, id), \
+ SRI(HDMI_GC, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+ SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_32_0, DIG, id),\
+ SRI(HDMI_ACR_32_1, DIG, id),\
+ SRI(HDMI_ACR_44_0, DIG, id),\
+ SRI(HDMI_ACR_44_1, DIG, id),\
+ SRI(HDMI_ACR_48_0, DIG, id),\
+ SRI(HDMI_ACR_48_1, DIG, id),\
+ SRI(TMDS_CNTL, DIG, id), \
+ SRI(DP_MSE_RATE_CNTL, DP, id), \
+ SRI(DP_MSE_RATE_UPDATE, DP, id), \
+ SRI(DP_PIXEL_FORMAT, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_STEER_FIFO, DP, id), \
+ SRI(DP_VID_M, DP, id), \
+ SRI(DP_VID_N, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_VID_TIMING, DP, id), \
+ SRI(DP_SEC_AUD_N, DP, id), \
+ SRI(DP_SEC_TIMESTAMP, DP, id)
+
+#define SE_COMMON_REG_LIST(id)\
+ SE_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(AFMT_CNTL, DIG, id)
+
+#define SE_DCN_REG_LIST(id)\
+ SE_COMMON_REG_LIST_BASE(id),\
+ SRI(AFMT_CNTL, DIG, id),\
+ SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id),\
+ SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+ SRI(DP_DB_CNTL, DP, id), \
+ SRI(DP_MSA_MISC, DP, id), \
+ SRI(DP_MSA_COLORIMETRY, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM1, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM2, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM3, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM4, DP, id), \
+ SRI(HDMI_DB_CONTROL, DIG, id)
+
+#define SE_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
+ SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, mask_sh),\
+ SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC2_UPDATE, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_DYN_RANGE, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_YCBCR_RANGE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+ SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+ SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+ SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+ SE_SF(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+ SE_SF(HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+ SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+ SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+ SE_SF(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_AVI_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+ SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+ SE_SF(DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+ SE_SF(DP_VID_N, DP_VID_N, mask_sh),\
+ SE_SF(DP_VID_M, DP_VID_M, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_START, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
+ SE_SF(AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
+ SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+ SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+ SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+ SE_SF(HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+ SE_SF(HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+ SE_SF(HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+ SE_SF(HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+ SE_SF(HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+ SE_SF(HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+ SE_SF(AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+ SE_SF(DIG0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+ SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\
+ SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_START, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+ SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE80_100(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+ SE_SF(TMDS_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+ SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE112(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+ SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE120(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC2_UPDATE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_DYN_RANGE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_YCBCR_RANGE, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AVI_ENABLE, mask_sh),\
+ SE_SF(DIG0_AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCN10(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
+ SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh)
+
+struct dce_stream_encoder_shift {
+ uint8_t AFMT_GENERIC_INDEX;
+ uint8_t AFMT_GENERIC0_UPDATE;
+ uint8_t AFMT_GENERIC2_UPDATE;
+ uint8_t AFMT_GENERIC_HB0;
+ uint8_t AFMT_GENERIC_HB1;
+ uint8_t AFMT_GENERIC_HB2;
+ uint8_t AFMT_GENERIC_HB3;
+ uint8_t AFMT_GENERIC_LOCK_STATUS;
+ uint8_t AFMT_GENERIC_CONFLICT;
+ uint8_t AFMT_GENERIC_CONFLICT_CLR;
+ uint8_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC0_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC1_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC2_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC3_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC4_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC5_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC6_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC7_FRAME_UPDATE;
+ uint8_t HDMI_GENERIC0_CONT;
+ uint8_t HDMI_GENERIC0_SEND;
+ uint8_t HDMI_GENERIC0_LINE;
+ uint8_t HDMI_GENERIC1_CONT;
+ uint8_t HDMI_GENERIC1_SEND;
+ uint8_t HDMI_GENERIC1_LINE;
+ uint8_t DP_PIXEL_ENCODING;
+ uint8_t DP_COMPONENT_DEPTH;
+ uint8_t DP_DYN_RANGE;
+ uint8_t DP_YCBCR_RANGE;
+ uint8_t HDMI_PACKET_GEN_VERSION;
+ uint8_t HDMI_KEEPOUT_MODE;
+ uint8_t HDMI_DEEP_COLOR_ENABLE;
+ uint8_t HDMI_CLOCK_CHANNEL_RATE;
+ uint8_t HDMI_DEEP_COLOR_DEPTH;
+ uint8_t HDMI_GC_CONT;
+ uint8_t HDMI_GC_SEND;
+ uint8_t HDMI_NULL_SEND;
+ uint8_t HDMI_DATA_SCRAMBLE_EN;
+ uint8_t HDMI_AUDIO_INFO_SEND;
+ uint8_t AFMT_AUDIO_INFO_UPDATE;
+ uint8_t HDMI_AUDIO_INFO_LINE;
+ uint8_t HDMI_GC_AVMUTE;
+ uint8_t DP_MSE_RATE_X;
+ uint8_t DP_MSE_RATE_Y;
+ uint8_t DP_MSE_RATE_UPDATE_PENDING;
+ uint8_t AFMT_AVI_INFO_VERSION;
+ uint8_t HDMI_AVI_INFO_SEND;
+ uint8_t HDMI_AVI_INFO_CONT;
+ uint8_t HDMI_AVI_INFO_LINE;
+ uint8_t DP_SEC_GSP0_ENABLE;
+ uint8_t DP_SEC_STREAM_ENABLE;
+ uint8_t DP_SEC_GSP1_ENABLE;
+ uint8_t DP_SEC_GSP2_ENABLE;
+ uint8_t DP_SEC_GSP3_ENABLE;
+ uint8_t DP_SEC_GSP4_ENABLE;
+ uint8_t DP_SEC_GSP5_ENABLE;
+ uint8_t DP_SEC_GSP6_ENABLE;
+ uint8_t DP_SEC_GSP7_ENABLE;
+ uint8_t DP_SEC_AVI_ENABLE;
+ uint8_t DP_SEC_MPG_ENABLE;
+ uint8_t DP_VID_STREAM_DIS_DEFER;
+ uint8_t DP_VID_STREAM_ENABLE;
+ uint8_t DP_VID_STREAM_STATUS;
+ uint8_t DP_STEER_FIFO_RESET;
+ uint8_t DP_VID_M_N_GEN_EN;
+ uint8_t DP_VID_N;
+ uint8_t DP_VID_M;
+ uint8_t DIG_START;
+ uint8_t AFMT_AUDIO_SRC_SELECT;
+ uint8_t AFMT_AUDIO_CHANNEL_ENABLE;
+ uint8_t HDMI_AUDIO_PACKETS_PER_LINE;
+ uint8_t HDMI_AUDIO_DELAY_EN;
+ uint8_t AFMT_60958_CS_UPDATE;
+ uint8_t AFMT_AUDIO_LAYOUT_OVRD;
+ uint8_t AFMT_60958_OSF_OVRD;
+ uint8_t HDMI_ACR_AUTO_SEND;
+ uint8_t HDMI_ACR_SOURCE;
+ uint8_t HDMI_ACR_AUDIO_PRIORITY;
+ uint8_t HDMI_ACR_CTS_32;
+ uint8_t HDMI_ACR_N_32;
+ uint8_t HDMI_ACR_CTS_44;
+ uint8_t HDMI_ACR_N_44;
+ uint8_t HDMI_ACR_CTS_48;
+ uint8_t HDMI_ACR_N_48;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+ uint8_t AFMT_60958_CS_CLOCK_ACCURACY;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+ uint8_t DP_SEC_AUD_N;
+ uint8_t DP_SEC_TIMESTAMP_MODE;
+ uint8_t DP_SEC_ASP_ENABLE;
+ uint8_t DP_SEC_ATP_ENABLE;
+ uint8_t DP_SEC_AIP_ENABLE;
+ uint8_t DP_SEC_ACM_ENABLE;
+ uint8_t AFMT_AUDIO_SAMPLE_SEND;
+ uint8_t AFMT_AUDIO_CLOCK_EN;
+ uint8_t TMDS_PIXEL_ENCODING;
+ uint8_t TMDS_COLOR_FORMAT;
+ uint8_t DIG_STEREOSYNC_SELECT;
+ uint8_t DIG_STEREOSYNC_GATE_EN;
+ uint8_t DP_DB_DISABLE;
+ uint8_t DP_MSA_MISC0;
+ uint8_t DP_MSA_HTOTAL;
+ uint8_t DP_MSA_VTOTAL;
+ uint8_t DP_MSA_HSTART;
+ uint8_t DP_MSA_VSTART;
+ uint8_t DP_MSA_HSYNCWIDTH;
+ uint8_t DP_MSA_HSYNCPOLARITY;
+ uint8_t DP_MSA_VSYNCWIDTH;
+ uint8_t DP_MSA_VSYNCPOLARITY;
+ uint8_t DP_MSA_HWIDTH;
+ uint8_t DP_MSA_VHEIGHT;
+ uint8_t HDMI_DB_DISABLE;
+ uint8_t DP_VID_N_MUL;
+ uint8_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dce_stream_encoder_mask {
+ uint32_t AFMT_GENERIC_INDEX;
+ uint32_t AFMT_GENERIC0_UPDATE;
+ uint32_t AFMT_GENERIC2_UPDATE;
+ uint32_t AFMT_GENERIC_HB0;
+ uint32_t AFMT_GENERIC_HB1;
+ uint32_t AFMT_GENERIC_HB2;
+ uint32_t AFMT_GENERIC_HB3;
+ uint32_t AFMT_GENERIC_LOCK_STATUS;
+ uint32_t AFMT_GENERIC_CONFLICT;
+ uint32_t AFMT_GENERIC_CONFLICT_CLR;
+ uint32_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC0_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC1_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC2_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC3_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC4_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC5_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC6_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC7_FRAME_UPDATE;
+ uint32_t HDMI_GENERIC0_CONT;
+ uint32_t HDMI_GENERIC0_SEND;
+ uint32_t HDMI_GENERIC0_LINE;
+ uint32_t HDMI_GENERIC1_CONT;
+ uint32_t HDMI_GENERIC1_SEND;
+ uint32_t HDMI_GENERIC1_LINE;
+ uint32_t DP_PIXEL_ENCODING;
+ uint32_t DP_COMPONENT_DEPTH;
+ uint32_t DP_DYN_RANGE;
+ uint32_t DP_YCBCR_RANGE;
+ uint32_t HDMI_PACKET_GEN_VERSION;
+ uint32_t HDMI_KEEPOUT_MODE;
+ uint32_t HDMI_DEEP_COLOR_ENABLE;
+ uint32_t HDMI_CLOCK_CHANNEL_RATE;
+ uint32_t HDMI_DEEP_COLOR_DEPTH;
+ uint32_t HDMI_GC_CONT;
+ uint32_t HDMI_GC_SEND;
+ uint32_t HDMI_NULL_SEND;
+ uint32_t HDMI_DATA_SCRAMBLE_EN;
+ uint32_t HDMI_AUDIO_INFO_SEND;
+ uint32_t AFMT_AUDIO_INFO_UPDATE;
+ uint32_t HDMI_AUDIO_INFO_LINE;
+ uint32_t HDMI_GC_AVMUTE;
+ uint32_t DP_MSE_RATE_X;
+ uint32_t DP_MSE_RATE_Y;
+ uint32_t DP_MSE_RATE_UPDATE_PENDING;
+ uint32_t AFMT_AVI_INFO_VERSION;
+ uint32_t HDMI_AVI_INFO_SEND;
+ uint32_t HDMI_AVI_INFO_CONT;
+ uint32_t HDMI_AVI_INFO_LINE;
+ uint32_t DP_SEC_GSP0_ENABLE;
+ uint32_t DP_SEC_STREAM_ENABLE;
+ uint32_t DP_SEC_GSP1_ENABLE;
+ uint32_t DP_SEC_GSP2_ENABLE;
+ uint32_t DP_SEC_GSP3_ENABLE;
+ uint32_t DP_SEC_GSP4_ENABLE;
+ uint32_t DP_SEC_GSP5_ENABLE;
+ uint32_t DP_SEC_GSP6_ENABLE;
+ uint32_t DP_SEC_GSP7_ENABLE;
+ uint32_t DP_SEC_AVI_ENABLE;
+ uint32_t DP_SEC_MPG_ENABLE;
+ uint32_t DP_VID_STREAM_DIS_DEFER;
+ uint32_t DP_VID_STREAM_ENABLE;
+ uint32_t DP_VID_STREAM_STATUS;
+ uint32_t DP_STEER_FIFO_RESET;
+ uint32_t DP_VID_M_N_GEN_EN;
+ uint32_t DP_VID_N;
+ uint32_t DP_VID_M;
+ uint32_t DIG_START;
+ uint32_t AFMT_AUDIO_SRC_SELECT;
+ uint32_t AFMT_AUDIO_CHANNEL_ENABLE;
+ uint32_t HDMI_AUDIO_PACKETS_PER_LINE;
+ uint32_t HDMI_AUDIO_DELAY_EN;
+ uint32_t AFMT_60958_CS_UPDATE;
+ uint32_t AFMT_AUDIO_LAYOUT_OVRD;
+ uint32_t AFMT_60958_OSF_OVRD;
+ uint32_t HDMI_ACR_AUTO_SEND;
+ uint32_t HDMI_ACR_SOURCE;
+ uint32_t HDMI_ACR_AUDIO_PRIORITY;
+ uint32_t HDMI_ACR_CTS_32;
+ uint32_t HDMI_ACR_N_32;
+ uint32_t HDMI_ACR_CTS_44;
+ uint32_t HDMI_ACR_N_44;
+ uint32_t HDMI_ACR_CTS_48;
+ uint32_t HDMI_ACR_N_48;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+ uint32_t AFMT_60958_CS_CLOCK_ACCURACY;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+ uint32_t DP_SEC_AUD_N;
+ uint32_t DP_SEC_TIMESTAMP_MODE;
+ uint32_t DP_SEC_ASP_ENABLE;
+ uint32_t DP_SEC_ATP_ENABLE;
+ uint32_t DP_SEC_AIP_ENABLE;
+ uint32_t DP_SEC_ACM_ENABLE;
+ uint32_t AFMT_AUDIO_SAMPLE_SEND;
+ uint32_t AFMT_AUDIO_CLOCK_EN;
+ uint32_t TMDS_PIXEL_ENCODING;
+ uint32_t DIG_STEREOSYNC_SELECT;
+ uint32_t DIG_STEREOSYNC_GATE_EN;
+ uint32_t TMDS_COLOR_FORMAT;
+ uint32_t DP_DB_DISABLE;
+ uint32_t DP_MSA_MISC0;
+ uint32_t DP_MSA_HTOTAL;
+ uint32_t DP_MSA_VTOTAL;
+ uint32_t DP_MSA_HSTART;
+ uint32_t DP_MSA_VSTART;
+ uint32_t DP_MSA_HSYNCWIDTH;
+ uint32_t DP_MSA_HSYNCPOLARITY;
+ uint32_t DP_MSA_VSYNCWIDTH;
+ uint32_t DP_MSA_VSYNCPOLARITY;
+ uint32_t DP_MSA_HWIDTH;
+ uint32_t DP_MSA_VHEIGHT;
+ uint32_t HDMI_DB_DISABLE;
+ uint32_t DP_VID_N_MUL;
+ uint32_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dce110_stream_enc_registers {
+ uint32_t AFMT_CNTL;
+ uint32_t AFMT_AVI_INFO0;
+ uint32_t AFMT_AVI_INFO1;
+ uint32_t AFMT_AVI_INFO2;
+ uint32_t AFMT_AVI_INFO3;
+ uint32_t AFMT_GENERIC_0;
+ uint32_t AFMT_GENERIC_1;
+ uint32_t AFMT_GENERIC_2;
+ uint32_t AFMT_GENERIC_3;
+ uint32_t AFMT_GENERIC_4;
+ uint32_t AFMT_GENERIC_5;
+ uint32_t AFMT_GENERIC_6;
+ uint32_t AFMT_GENERIC_7;
+ uint32_t AFMT_GENERIC_HDR;
+ uint32_t AFMT_INFOFRAME_CONTROL0;
+ uint32_t AFMT_VBI_PACKET_CONTROL;
+ uint32_t AFMT_VBI_PACKET_CONTROL1;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+ uint32_t AFMT_AUDIO_SRC_CONTROL;
+ uint32_t AFMT_60958_0;
+ uint32_t AFMT_60958_1;
+ uint32_t AFMT_60958_2;
+ uint32_t DIG_FE_CNTL;
+ uint32_t DP_MSE_RATE_CNTL;
+ uint32_t DP_MSE_RATE_UPDATE;
+ uint32_t DP_PIXEL_FORMAT;
+ uint32_t DP_SEC_CNTL;
+ uint32_t DP_STEER_FIFO;
+ uint32_t DP_VID_M;
+ uint32_t DP_VID_N;
+ uint32_t DP_VID_STREAM_CNTL;
+ uint32_t DP_VID_TIMING;
+ uint32_t DP_SEC_AUD_N;
+ uint32_t DP_SEC_TIMESTAMP;
+ uint32_t HDMI_CONTROL;
+ uint32_t HDMI_GC;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL0;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL1;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL2;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL3;
+ uint32_t HDMI_INFOFRAME_CONTROL0;
+ uint32_t HDMI_INFOFRAME_CONTROL1;
+ uint32_t HDMI_VBI_PACKET_CONTROL;
+ uint32_t HDMI_AUDIO_PACKET_CONTROL;
+ uint32_t HDMI_ACR_PACKET_CONTROL;
+ uint32_t HDMI_ACR_32_0;
+ uint32_t HDMI_ACR_32_1;
+ uint32_t HDMI_ACR_44_0;
+ uint32_t HDMI_ACR_44_1;
+ uint32_t HDMI_ACR_48_0;
+ uint32_t HDMI_ACR_48_1;
+ uint32_t TMDS_CNTL;
+ uint32_t DP_DB_CNTL;
+ uint32_t DP_MSA_MISC;
+ uint32_t DP_MSA_COLORIMETRY;
+ uint32_t DP_MSA_TIMING_PARAM1;
+ uint32_t DP_MSA_TIMING_PARAM2;
+ uint32_t DP_MSA_TIMING_PARAM3;
+ uint32_t DP_MSA_TIMING_PARAM4;
+ uint32_t HDMI_DB_CONTROL;
+};
+
+struct dce110_stream_encoder {
+ struct stream_encoder base;
+ const struct dce110_stream_enc_registers *regs;
+ const struct dce_stream_encoder_shift *se_shift;
+ const struct dce_stream_encoder_mask *se_mask;
+};
+
+void dce110_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dce110_stream_enc_registers *regs,
+ const struct dce_stream_encoder_shift *se_shift,
+ const struct dce_stream_encoder_mask *se_mask);
+
+
+void dce110_se_audio_mute_control(
+ struct stream_encoder *enc, bool mute);
+
+void dce110_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info);
+
+void dce110_se_dp_audio_enable(
+ struct stream_encoder *enc);
+
+void dce110_se_dp_audio_disable(
+ struct stream_encoder *enc);
+
+void dce110_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info);
+
+void dce110_se_hdmi_audio_disable(
+ struct stream_encoder *enc);
+
+#endif /* __DC_STREAM_ENCODER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
new file mode 100644
index 000000000000..ae32af31eff1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -0,0 +1,1463 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_transform.h"
+#include "reg_helper.h"
+#include "opp.h"
+#include "basics/conversion.h"
+#include "dc.h"
+
+#define REG(reg) \
+ (xfm_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ xfm_dce->xfm_shift->field_name, xfm_dce->xfm_mask->field_name
+
+#define CTX \
+ xfm_dce->base.ctx
+
+#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
+#define GAMUT_MATRIX_SIZE 12
+#define SCL_PHASES 16
+
+enum dcp_out_trunc_round_mode {
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_MODE_ROUND
+};
+
+enum dcp_out_trunc_round_depth {
+ DCP_OUT_TRUNC_ROUND_DEPTH_14BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_13BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_12BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_11BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_10BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_9BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_8BIT
+};
+
+/* defines the various methods of bit reduction available for use */
+enum dcp_bit_depth_reduction_mode {
+ DCP_BIT_DEPTH_REDUCTION_MODE_DITHER,
+ DCP_BIT_DEPTH_REDUCTION_MODE_ROUND,
+ DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE,
+ DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED,
+ DCP_BIT_DEPTH_REDUCTION_MODE_INVALID
+};
+
+enum dcp_spatial_dither_mode {
+ DCP_SPATIAL_DITHER_MODE_AAAA,
+ DCP_SPATIAL_DITHER_MODE_A_AA_A,
+ DCP_SPATIAL_DITHER_MODE_AABBAABB,
+ DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC,
+ DCP_SPATIAL_DITHER_MODE_INVALID
+};
+
+enum dcp_spatial_dither_depth {
+ DCP_SPATIAL_DITHER_DEPTH_30BPP,
+ DCP_SPATIAL_DITHER_DEPTH_24BPP
+};
+
+enum csc_color_mode {
+ /* 00 - BITS2:0 Bypass */
+ CSC_COLOR_MODE_GRAPHICS_BYPASS,
+ /* 01 - hard coded coefficient TV RGB */
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED,
+ /* 04 - programmable OUTPUT CSC coefficient */
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC,
+};
+
+enum grph_color_adjust_option {
+ GRPH_COLOR_MATRIX_HW_DEFAULT = 1,
+ GRPH_COLOR_MATRIX_SW
+};
+
+static const struct out_csc_color_matrix global_color_matrix[] = {
+{ COLOR_SPACE_SRGB,
+ { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+{ COLOR_SPACE_SRGB_LIMITED,
+ { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} },
+{ COLOR_SPACE_YCBCR601,
+ { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47,
+ 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA,
+ 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+/* TODO: correct values below */
+{ COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+ 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }
+};
+
+static bool setup_scaling_configuration(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
+
+ if (data->taps.h_taps + data->taps.v_taps <= 2) {
+ /* Set bypass */
+ if (xfm_dce->xfm_mask->SCL_PSCL_EN != 0)
+ REG_UPDATE_2(SCL_MODE, SCL_MODE, 0, SCL_PSCL_EN, 0);
+ else
+ REG_UPDATE(SCL_MODE, SCL_MODE, 0);
+ return false;
+ }
+
+ REG_SET_2(SCL_TAP_CONTROL, 0,
+ SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
+ SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
+
+ if (data->format <= PIXEL_FORMAT_GRPH_END)
+ REG_UPDATE(SCL_MODE, SCL_MODE, 1);
+ else
+ REG_UPDATE(SCL_MODE, SCL_MODE, 2);
+
+ if (xfm_dce->xfm_mask->SCL_PSCL_EN != 0)
+ REG_UPDATE(SCL_MODE, SCL_PSCL_EN, 1);
+
+ /* 1 - Replace out of bound pixels with edge */
+ REG_SET(SCL_CONTROL, 0, SCL_BOUNDARY_MODE, 1);
+
+ return true;
+}
+
+static void program_overscan(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ int overscan_right = data->h_active
+ - data->recout.x - data->recout.width;
+ int overscan_bottom = data->v_active
+ - data->recout.y - data->recout.height;
+
+ if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ overscan_bottom += 2;
+ overscan_right += 2;
+ }
+
+ if (overscan_right < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_right = 0;
+ }
+ if (overscan_bottom < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_bottom = 0;
+ }
+
+ REG_SET_2(EXT_OVERSCAN_LEFT_RIGHT, 0,
+ EXT_OVERSCAN_LEFT, data->recout.x,
+ EXT_OVERSCAN_RIGHT, overscan_right);
+ REG_SET_2(EXT_OVERSCAN_TOP_BOTTOM, 0,
+ EXT_OVERSCAN_TOP, data->recout.y,
+ EXT_OVERSCAN_BOTTOM, overscan_bottom);
+}
+
+static void program_multi_taps_filter(
+ struct dce_transform *xfm_dce,
+ int taps,
+ const uint16_t *coeffs,
+ enum ram_filter_type filter_type)
+{
+ int phase, pair;
+ int array_idx = 0;
+ int taps_pairs = (taps + 1) / 2;
+ int phases_to_program = SCL_PHASES / 2 + 1;
+
+ uint32_t power_ctl = 0;
+
+ if (!coeffs)
+ return;
+
+ /*We need to disable power gating on coeff memory to do programming*/
+ if (REG(DCFE_MEM_PWR_CTRL)) {
+ power_ctl = REG_READ(DCFE_MEM_PWR_CTRL);
+ REG_SET(DCFE_MEM_PWR_CTRL, power_ctl, SCL_COEFF_MEM_PWR_DIS, 1);
+
+ REG_WAIT(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, 0, 1, 10);
+ }
+ for (phase = 0; phase < phases_to_program; phase++) {
+ /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror
+ phase 0 is unique and phase N/2 is unique if N is even*/
+ for (pair = 0; pair < taps_pairs; pair++) {
+ uint16_t odd_coeff = 0;
+ uint16_t even_coeff = coeffs[array_idx];
+
+ REG_SET_3(SCL_COEF_RAM_SELECT, 0,
+ SCL_C_RAM_FILTER_TYPE, filter_type,
+ SCL_C_RAM_PHASE, phase,
+ SCL_C_RAM_TAP_PAIR_IDX, pair);
+
+ if (taps % 2 && pair == taps_pairs - 1)
+ array_idx++;
+ else {
+ odd_coeff = coeffs[array_idx + 1];
+ array_idx += 2;
+ }
+
+ REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
+ SCL_C_RAM_EVEN_TAP_COEF_EN, 1,
+ SCL_C_RAM_EVEN_TAP_COEF, even_coeff,
+ SCL_C_RAM_ODD_TAP_COEF_EN, 1,
+ SCL_C_RAM_ODD_TAP_COEF, odd_coeff);
+ }
+ }
+
+ /*We need to restore power gating on coeff memory to initial state*/
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_WRITE(DCFE_MEM_PWR_CTRL, power_ctl);
+}
+
+static void program_viewport(
+ struct dce_transform *xfm_dce,
+ const struct rect *view_port)
+{
+ REG_SET_2(VIEWPORT_START, 0,
+ VIEWPORT_X_START, view_port->x,
+ VIEWPORT_Y_START, view_port->y);
+
+ REG_SET_2(VIEWPORT_SIZE, 0,
+ VIEWPORT_HEIGHT, view_port->height,
+ VIEWPORT_WIDTH, view_port->width);
+
+ /* TODO: add stereo support */
+}
+
+static void calculate_inits(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data,
+ struct scl_ratios_inits *inits)
+{
+ struct fixed31_32 h_init;
+ struct fixed31_32 v_init;
+
+ inits->h_int_scale_ratio =
+ dal_fixed31_32_u2d19(data->ratios.horz) << 5;
+ inits->v_int_scale_ratio =
+ dal_fixed31_32_u2d19(data->ratios.vert) << 5;
+
+ h_init =
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_add(
+ data->ratios.horz,
+ dal_fixed31_32_from_int(data->taps.h_taps + 1)),
+ 2);
+ inits->h_init.integer = dal_fixed31_32_floor(h_init);
+ inits->h_init.fraction = dal_fixed31_32_u0d19(h_init) << 5;
+
+ v_init =
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_add(
+ data->ratios.vert,
+ dal_fixed31_32_from_int(data->taps.v_taps + 1)),
+ 2);
+ inits->v_init.integer = dal_fixed31_32_floor(v_init);
+ inits->v_init.fraction = dal_fixed31_32_u0d19(v_init) << 5;
+}
+
+static void program_scl_ratios_inits(
+ struct dce_transform *xfm_dce,
+ struct scl_ratios_inits *inits)
+{
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+ SCL_H_SCALE_RATIO, inits->h_int_scale_ratio);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+ SCL_V_SCALE_RATIO, inits->v_int_scale_ratio);
+
+ REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
+ SCL_H_INIT_INT, inits->h_init.integer,
+ SCL_H_INIT_FRAC, inits->h_init.fraction);
+
+ REG_SET_2(SCL_VERT_FILTER_INIT, 0,
+ SCL_V_INIT_INT, inits->v_init.integer,
+ SCL_V_INIT_FRAC, inits->v_init.fraction);
+
+ REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+}
+
+static const uint16_t *get_filter_coeffs_16p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 4)
+ return get_filter_4tap_16p(ratio);
+ else if (taps == 3)
+ return get_filter_3tap_16p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_16p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static void dce_transform_set_scaler(
+ struct transform *xfm,
+ const struct scaler_data *data)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ bool is_scaling_required;
+ bool filter_updated = false;
+ const uint16_t *coeffs_v, *coeffs_h;
+
+ /*Use all three pieces of memory always*/
+ REG_SET_2(LB_MEMORY_CTRL, 0,
+ LB_MEMORY_CONFIG, 0,
+ LB_MEMORY_SIZE, xfm_dce->lb_memory_size);
+
+ /* Clear SCL_F_SHARP_CONTROL value to 0 */
+ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
+
+ /* 1. Program overscan */
+ program_overscan(xfm_dce, data);
+
+ /* 2. Program taps and configuration */
+ is_scaling_required = setup_scaling_configuration(xfm_dce, data);
+
+ if (is_scaling_required) {
+ /* 3. Calculate and program ratio, filter initialization */
+ struct scl_ratios_inits inits = { 0 };
+
+ calculate_inits(xfm_dce, data, &inits);
+
+ program_scl_ratios_inits(xfm_dce, &inits);
+
+ coeffs_v = get_filter_coeffs_16p(data->taps.v_taps, data->ratios.vert);
+ coeffs_h = get_filter_coeffs_16p(data->taps.h_taps, data->ratios.horz);
+
+ if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
+ /* 4. Program vertical filters */
+ if (xfm_dce->filter_v == NULL)
+ REG_SET(SCL_VERT_FILTER_CONTROL, 0,
+ SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_RGB_Y_VERTICAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_ALPHA_VERTICAL);
+
+ /* 5. Program horizontal filters */
+ if (xfm_dce->filter_h == NULL)
+ REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
+ SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_RGB_Y_HORIZONTAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_ALPHA_HORIZONTAL);
+
+ xfm_dce->filter_v = coeffs_v;
+ xfm_dce->filter_h = coeffs_h;
+ filter_updated = true;
+ }
+ }
+
+ /* 6. Program the viewport */
+ program_viewport(xfm_dce, &data->viewport);
+
+ /* 7. Set bit to flip to new coefficient memory */
+ if (filter_updated)
+ REG_UPDATE(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, 1);
+
+ REG_UPDATE(LB_DATA_FORMAT, ALPHA_EN, data->lb_params.alpha_en);
+}
+
+/*****************************************************************************
+ * set_clamp
+ *
+ * @param depth : bit depth to set the clamp to (should match denorm)
+ *
+ * @brief
+ * Programs clamp according to panel bit depth.
+ *
+ *******************************************************************************/
+static void set_clamp(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth)
+{
+ int clamp_max = 0;
+
+ /* At the clamp block the data will be MSB aligned, so we set the max
+ * clamp accordingly.
+ * For example, the max value for 6 bits MSB aligned (14 bit bus) would
+ * be "11 1111 0000 0000" in binary, so 0x3F00.
+ */
+ switch (depth) {
+ case COLOR_DEPTH_666:
+ /* 6bit MSB aligned on 14 bit bus '11 1111 0000 0000' */
+ clamp_max = 0x3F00;
+ break;
+ case COLOR_DEPTH_888:
+ /* 8bit MSB aligned on 14 bit bus '11 1111 1100 0000' */
+ clamp_max = 0x3FC0;
+ break;
+ case COLOR_DEPTH_101010:
+ /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+ clamp_max = 0x3FFC;
+ break;
+ case COLOR_DEPTH_121212:
+ /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
+ clamp_max = 0x3FFF;
+ break;
+ default:
+ clamp_max = 0x3FC0;
+ BREAK_TO_DEBUGGER(); /* Invalid clamp bit depth */
+ }
+ REG_SET_2(OUT_CLAMP_CONTROL_B_CB, 0,
+ OUT_CLAMP_MIN_B_CB, 0,
+ OUT_CLAMP_MAX_B_CB, clamp_max);
+
+ REG_SET_2(OUT_CLAMP_CONTROL_G_Y, 0,
+ OUT_CLAMP_MIN_G_Y, 0,
+ OUT_CLAMP_MAX_G_Y, clamp_max);
+
+ REG_SET_2(OUT_CLAMP_CONTROL_R_CR, 0,
+ OUT_CLAMP_MIN_R_CR, 0,
+ OUT_CLAMP_MAX_R_CR, clamp_max);
+}
+
+/*******************************************************************************
+ * set_round
+ *
+ * @brief
+ * Programs Round/Truncate
+ *
+ * @param [in] mode :round or truncate
+ * @param [in] depth :bit depth to round/truncate to
+ OUT_ROUND_TRUNC_MODE 3:0 0xA Output data round or truncate mode
+ POSSIBLE VALUES:
+ 00 - truncate to u0.12
+ 01 - truncate to u0.11
+ 02 - truncate to u0.10
+ 03 - truncate to u0.9
+ 04 - truncate to u0.8
+ 05 - reserved
+ 06 - truncate to u0.14
+ 07 - truncate to u0.13 set_reg_field_value(
+ value,
+ clamp_max,
+ OUT_CLAMP_CONTROL_R_CR,
+ OUT_CLAMP_MAX_R_CR);
+ 08 - round to u0.12
+ 09 - round to u0.11
+ 10 - round to u0.10
+ 11 - round to u0.9
+ 12 - round to u0.8
+ 13 - reserved
+ 14 - round to u0.14
+ 15 - round to u0.13
+
+ ******************************************************************************/
+static void set_round(
+ struct dce_transform *xfm_dce,
+ enum dcp_out_trunc_round_mode mode,
+ enum dcp_out_trunc_round_depth depth)
+{
+ int depth_bits = 0;
+ int mode_bit = 0;
+
+ /* set up bit depth */
+ switch (depth) {
+ case DCP_OUT_TRUNC_ROUND_DEPTH_14BIT:
+ depth_bits = 6;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_13BIT:
+ depth_bits = 7;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_12BIT:
+ depth_bits = 0;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_11BIT:
+ depth_bits = 1;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_10BIT:
+ depth_bits = 2;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_9BIT:
+ depth_bits = 3;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_8BIT:
+ depth_bits = 4;
+ break;
+ default:
+ depth_bits = 4;
+ BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_depth */
+ }
+
+ /* set up round or truncate */
+ switch (mode) {
+ case DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE:
+ mode_bit = 0;
+ break;
+ case DCP_OUT_TRUNC_ROUND_MODE_ROUND:
+ mode_bit = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_mode */
+ }
+
+ depth_bits |= mode_bit << 3;
+
+ REG_SET(OUT_ROUND_CONTROL, 0, OUT_ROUND_TRUNC_MODE, depth_bits);
+}
+
+/*****************************************************************************
+ * set_dither
+ *
+ * @brief
+ * Programs Dither
+ *
+ * @param [in] dither_enable : enable dither
+ * @param [in] dither_mode : dither mode to set
+ * @param [in] dither_depth : bit depth to dither to
+ * @param [in] frame_random_enable : enable frame random
+ * @param [in] rgb_random_enable : enable rgb random
+ * @param [in] highpass_random_enable : enable highpass random
+ *
+ ******************************************************************************/
+
+static void set_dither(
+ struct dce_transform *xfm_dce,
+ bool dither_enable,
+ enum dcp_spatial_dither_mode dither_mode,
+ enum dcp_spatial_dither_depth dither_depth,
+ bool frame_random_enable,
+ bool rgb_random_enable,
+ bool highpass_random_enable)
+{
+ int dither_depth_bits = 0;
+ int dither_mode_bits = 0;
+
+ switch (dither_mode) {
+ case DCP_SPATIAL_DITHER_MODE_AAAA:
+ dither_mode_bits = 0;
+ break;
+ case DCP_SPATIAL_DITHER_MODE_A_AA_A:
+ dither_mode_bits = 1;
+ break;
+ case DCP_SPATIAL_DITHER_MODE_AABBAABB:
+ dither_mode_bits = 2;
+ break;
+ case DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC:
+ dither_mode_bits = 3;
+ break;
+ default:
+ /* Invalid dcp_spatial_dither_mode */
+ BREAK_TO_DEBUGGER();
+ }
+
+ switch (dither_depth) {
+ case DCP_SPATIAL_DITHER_DEPTH_30BPP:
+ dither_depth_bits = 0;
+ break;
+ case DCP_SPATIAL_DITHER_DEPTH_24BPP:
+ dither_depth_bits = 1;
+ break;
+ default:
+ /* Invalid dcp_spatial_dither_depth */
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* write the register */
+ REG_SET_6(DCP_SPATIAL_DITHER_CNTL, 0,
+ DCP_SPATIAL_DITHER_EN, dither_enable,
+ DCP_SPATIAL_DITHER_MODE, dither_mode_bits,
+ DCP_SPATIAL_DITHER_DEPTH, dither_depth_bits,
+ DCP_FRAME_RANDOM_ENABLE, frame_random_enable,
+ DCP_RGB_RANDOM_ENABLE, rgb_random_enable,
+ DCP_HIGHPASS_RANDOM_ENABLE, highpass_random_enable);
+}
+
+/*****************************************************************************
+ * dce_transform_bit_depth_reduction_program
+ *
+ * @brief
+ * Programs the DCP bit depth reduction registers (Clamp, Round/Truncate,
+ * Dither) for dce
+ *
+ * @param depth : bit depth to set the clamp to (should match denorm)
+ *
+ ******************************************************************************/
+static void program_bit_depth_reduction(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ enum dcp_bit_depth_reduction_mode depth_reduction_mode;
+ enum dcp_spatial_dither_mode spatial_dither_mode;
+ bool frame_random_enable;
+ bool rgb_random_enable;
+ bool highpass_random_enable;
+
+ ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
+
+ if (bit_depth_params->flags.SPATIAL_DITHER_ENABLED) {
+ depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DITHER;
+ frame_random_enable = true;
+ rgb_random_enable = true;
+ highpass_random_enable = true;
+
+ } else {
+ depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED;
+ frame_random_enable = false;
+ rgb_random_enable = false;
+ highpass_random_enable = false;
+ }
+
+ spatial_dither_mode = DCP_SPATIAL_DITHER_MODE_A_AA_A;
+
+ set_clamp(xfm_dce, depth);
+
+ switch (depth_reduction_mode) {
+ case DCP_BIT_DEPTH_REDUCTION_MODE_DITHER:
+ /* Spatial Dither: Set round/truncate to bypass (12bit),
+ * enable Dither (30bpp) */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
+
+ set_dither(xfm_dce, true, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+ case DCP_BIT_DEPTH_REDUCTION_MODE_ROUND:
+ /* Round: Enable round (10bit), disable Dither */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_ROUND,
+ DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
+
+ set_dither(xfm_dce, false, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+ case DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE: /* Truncate */
+ /* Truncate: Enable truncate (10bit), disable Dither */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
+
+ set_dither(xfm_dce, false, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+
+ case DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED: /* Disabled */
+ /* Truncate: Set round/truncate to bypass (12bit),
+ * disable Dither */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
+
+ set_dither(xfm_dce, false, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+ default:
+ /* Invalid DCP Depth reduction mode */
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static int dce_transform_get_max_num_of_supported_lines(
+ struct dce_transform *xfm_dce,
+ enum lb_pixel_depth depth,
+ int pixel_width)
+{
+ int pixels_per_entries = 0;
+ int max_pixels_supports = 0;
+
+ ASSERT(pixel_width);
+
+ /* Find number of pixels that can fit into a single LB entry and
+ * take floor of the value since we cannot store a single pixel
+ * across multiple entries. */
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 18;
+ break;
+
+ case LB_PIXEL_DEPTH_24BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 24;
+ break;
+
+ case LB_PIXEL_DEPTH_30BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 30;
+ break;
+
+ case LB_PIXEL_DEPTH_36BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 36;
+ break;
+
+ default:
+ dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LB pixel depth",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ ASSERT(pixels_per_entries);
+
+ max_pixels_supports =
+ pixels_per_entries *
+ xfm_dce->lb_memory_size;
+
+ return (max_pixels_supports / pixel_width);
+}
+
+static void set_denormalization(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth)
+{
+ int denorm_mode = 0;
+
+ switch (depth) {
+ case COLOR_DEPTH_666:
+ /* 63/64 for 6 bit output color depth */
+ denorm_mode = 1;
+ break;
+ case COLOR_DEPTH_888:
+ /* Unity for 8 bit output color depth
+ * because prescale is disabled by default */
+ denorm_mode = 0;
+ break;
+ case COLOR_DEPTH_101010:
+ /* 1023/1024 for 10 bit output color depth */
+ denorm_mode = 3;
+ break;
+ case COLOR_DEPTH_121212:
+ /* 4095/4096 for 12 bit output color depth */
+ denorm_mode = 5;
+ break;
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* not valid used case! */
+ break;
+ }
+
+ REG_SET(DENORM_CONTROL, 0, DENORM_MODE, denorm_mode);
+}
+
+static void dce_transform_set_pixel_storage_depth(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_depth, expan_mode;
+ enum dc_color_depth color_depth;
+
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ color_depth = COLOR_DEPTH_666;
+ pixel_depth = 2;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_24BPP:
+ color_depth = COLOR_DEPTH_888;
+ pixel_depth = 1;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_30BPP:
+ color_depth = COLOR_DEPTH_101010;
+ pixel_depth = 0;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_36BPP:
+ color_depth = COLOR_DEPTH_121212;
+ pixel_depth = 3;
+ expan_mode = 0;
+ break;
+ default:
+ color_depth = COLOR_DEPTH_101010;
+ pixel_depth = 0;
+ expan_mode = 1;
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ set_denormalization(xfm_dce, color_depth);
+ program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params);
+
+ REG_UPDATE_2(LB_DATA_FORMAT,
+ PIXEL_DEPTH, pixel_depth,
+ PIXEL_EXPAN_MODE, expan_mode);
+
+ if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+ /*we should use unsupported capabilities
+ * unless it is required by w/a*/
+ dm_logger_write(xfm->ctx->logger, LOG_WARNING,
+ "%s: Capability not supported",
+ __func__);
+ }
+}
+
+static void program_gamut_remap(
+ struct dce_transform *xfm_dce,
+ const uint16_t *reg_val)
+{
+ if (reg_val) {
+ REG_SET_2(GAMUT_REMAP_C11_C12, 0,
+ GAMUT_REMAP_C11, reg_val[0],
+ GAMUT_REMAP_C12, reg_val[1]);
+ REG_SET_2(GAMUT_REMAP_C13_C14, 0,
+ GAMUT_REMAP_C13, reg_val[2],
+ GAMUT_REMAP_C14, reg_val[3]);
+ REG_SET_2(GAMUT_REMAP_C21_C22, 0,
+ GAMUT_REMAP_C21, reg_val[4],
+ GAMUT_REMAP_C22, reg_val[5]);
+ REG_SET_2(GAMUT_REMAP_C23_C24, 0,
+ GAMUT_REMAP_C23, reg_val[6],
+ GAMUT_REMAP_C24, reg_val[7]);
+ REG_SET_2(GAMUT_REMAP_C31_C32, 0,
+ GAMUT_REMAP_C31, reg_val[8],
+ GAMUT_REMAP_C32, reg_val[9]);
+ REG_SET_2(GAMUT_REMAP_C33_C34, 0,
+ GAMUT_REMAP_C33, reg_val[10],
+ GAMUT_REMAP_C34, reg_val[11]);
+
+ REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 1);
+ } else
+ REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 0);
+
+}
+
+/**
+ *****************************************************************************
+ * Function: dal_transform_wide_gamut_set_gamut_remap
+ *
+ * @param [in] const struct xfm_grph_csc_adjustment *adjust
+ *
+ * @return
+ * void
+ *
+ * @note calculate and apply color temperature adjustment to in Rgb color space
+ *
+ * @see
+ *
+ *****************************************************************************
+ */
+static void dce_transform_set_gamut_remap(
+ struct transform *xfm,
+ const struct xfm_grph_csc_adjustment *adjust)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(xfm_dce, NULL);
+ else {
+ struct fixed31_32 arr_matrix[GAMUT_MATRIX_SIZE];
+ uint16_t arr_reg_val[GAMUT_MATRIX_SIZE];
+
+ arr_matrix[0] = adjust->temperature_matrix[0];
+ arr_matrix[1] = adjust->temperature_matrix[1];
+ arr_matrix[2] = adjust->temperature_matrix[2];
+ arr_matrix[3] = dal_fixed31_32_zero;
+
+ arr_matrix[4] = adjust->temperature_matrix[3];
+ arr_matrix[5] = adjust->temperature_matrix[4];
+ arr_matrix[6] = adjust->temperature_matrix[5];
+ arr_matrix[7] = dal_fixed31_32_zero;
+
+ arr_matrix[8] = adjust->temperature_matrix[6];
+ arr_matrix[9] = adjust->temperature_matrix[7];
+ arr_matrix[10] = adjust->temperature_matrix[8];
+ arr_matrix[11] = dal_fixed31_32_zero;
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, GAMUT_MATRIX_SIZE);
+
+ program_gamut_remap(xfm_dce, arr_reg_val);
+ }
+}
+
+static uint32_t decide_taps(struct fixed31_32 ratio, uint32_t in_taps, bool chroma)
+{
+ uint32_t taps;
+
+ if (IDENTITY_RATIO(ratio)) {
+ return 1;
+ } else if (in_taps != 0) {
+ taps = in_taps;
+ } else {
+ taps = 4;
+ }
+
+ if (chroma) {
+ taps /= 2;
+ if (taps < 2)
+ taps = 2;
+ }
+
+ return taps;
+}
+
+
+bool dce_transform_get_optimal_number_of_taps(
+ struct transform *xfm,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_width = scl_data->viewport.width;
+ int max_num_of_lines;
+
+ if (xfm_dce->prescaler_on &&
+ (scl_data->viewport.width > scl_data->recout.width))
+ pixel_width = scl_data->recout.width;
+
+ max_num_of_lines = dce_transform_get_max_num_of_supported_lines(
+ xfm_dce,
+ scl_data->lb_params.depth,
+ pixel_width);
+
+ /* Fail if in_taps are impossible */
+ if (in_taps->v_taps >= max_num_of_lines)
+ return false;
+
+ /*
+ * Set taps according to this policy (in this order)
+ * - Use 1 for no scaling
+ * - Use input taps
+ * - Use 4 and reduce as required by line buffer size
+ * - Decide chroma taps if chroma is scaled
+ *
+ * Ignore input chroma taps. Decide based on non-chroma
+ */
+ scl_data->taps.h_taps = decide_taps(scl_data->ratios.horz, in_taps->h_taps, false);
+ scl_data->taps.v_taps = decide_taps(scl_data->ratios.vert, in_taps->v_taps, false);
+ scl_data->taps.h_taps_c = decide_taps(scl_data->ratios.horz_c, in_taps->h_taps, true);
+ scl_data->taps.v_taps_c = decide_taps(scl_data->ratios.vert_c, in_taps->v_taps, true);
+
+ if (!IDENTITY_RATIO(scl_data->ratios.vert)) {
+ /* reduce v_taps if needed but ensure we have at least two */
+ if (in_taps->v_taps == 0
+ && max_num_of_lines <= scl_data->taps.v_taps
+ && scl_data->taps.v_taps > 1) {
+ scl_data->taps.v_taps = max_num_of_lines - 1;
+ }
+
+ if (scl_data->taps.v_taps <= 1)
+ return false;
+ }
+
+ if (!IDENTITY_RATIO(scl_data->ratios.vert_c)) {
+ /* reduce chroma v_taps if needed but ensure we have at least two */
+ if (max_num_of_lines <= scl_data->taps.v_taps_c && scl_data->taps.v_taps_c > 1) {
+ scl_data->taps.v_taps_c = max_num_of_lines - 1;
+ }
+
+ if (scl_data->taps.v_taps_c <= 1)
+ return false;
+ }
+
+ /* we've got valid taps */
+ return true;
+}
+
+static void dce_transform_reset(struct transform *xfm)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ xfm_dce->filter_h = NULL;
+ xfm_dce->filter_v = NULL;
+}
+
+static void program_color_matrix(
+ struct dce_transform *xfm_dce,
+ const struct out_csc_color_matrix *tbl_entry,
+ enum grph_color_adjust_option options)
+{
+ {
+ REG_SET_2(OUTPUT_CSC_C11_C12, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[0],
+ OUTPUT_CSC_C12, tbl_entry->regval[1]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C13_C14, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[2],
+ OUTPUT_CSC_C12, tbl_entry->regval[3]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C21_C22, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[4],
+ OUTPUT_CSC_C12, tbl_entry->regval[5]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C23_C24, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[6],
+ OUTPUT_CSC_C12, tbl_entry->regval[7]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C31_C32, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[8],
+ OUTPUT_CSC_C12, tbl_entry->regval[9]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C33_C34, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[10],
+ OUTPUT_CSC_C12, tbl_entry->regval[11]);
+ }
+}
+
+static bool configure_graphics_mode(
+ struct dce_transform *xfm_dce,
+ enum csc_color_mode config,
+ enum graphics_csc_adjust_type csc_adjust_type,
+ enum dc_color_space color_space)
+{
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+
+ if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) {
+ if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC) {
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 4);
+ } else {
+
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* TV RGB */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 1);
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 2);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 3);
+ break;
+ default:
+ return false;
+ }
+ }
+ } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) {
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+ break;
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* TV RGB */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 1);
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 2);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 3);
+ break;
+ default:
+ return false;
+ }
+
+ } else
+ /* by pass */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+
+ return true;
+}
+
+void dce110_opp_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+
+ program_color_matrix(
+ xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
+
+ /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
+ configure_graphics_mode(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
+ tbl_entry->color_space);
+}
+
+void dce110_opp_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED;
+
+ if (default_adjust->force_hw_default == false) {
+ const struct out_csc_color_matrix *elm;
+ /* currently parameter not in use */
+ enum grph_color_adjust_option option =
+ GRPH_COLOR_MATRIX_HW_DEFAULT;
+ uint32_t i;
+ /*
+ * HW default false we program locally defined matrix
+ * HW default true we use predefined hw matrix and we
+ * do not need to program matrix
+ * OEM wants the HW default via runtime parameter.
+ */
+ option = GRPH_COLOR_MATRIX_SW;
+
+ for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) {
+ elm = &global_color_matrix[i];
+ if (elm->color_space != default_adjust->out_color_space)
+ continue;
+ /* program the matrix with default values from this
+ * file */
+ program_color_matrix(xfm_dce, elm, option);
+ config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+ break;
+ }
+ }
+
+ /* configure the what we programmed :
+ * 1. Default values from this file
+ * 2. Use hardware default from ROM_A and we do not need to program
+ * matrix */
+
+ configure_graphics_mode(xfm_dce, config,
+ default_adjust->csc_adjust_type,
+ default_adjust->out_color_space);
+}
+
+static void program_pwl(
+ struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
+{
+ uint32_t value;
+ int retval;
+
+ {
+ uint8_t max_tries = 10;
+ uint8_t counter = 0;
+
+ /* Power on LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, 1);
+ else
+ REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, 1);
+
+ while (counter < max_tries) {
+ if (REG(DCFE_MEM_PWR_STATUS)) {
+ value = REG_READ(DCFE_MEM_PWR_STATUS);
+ REG_GET(DCFE_MEM_PWR_STATUS,
+ DCP_REGAMMA_MEM_PWR_STATE,
+ &retval);
+
+ if (retval == 0)
+ break;
+ ++counter;
+ } else {
+ value = REG_READ(DCFE_MEM_LIGHT_SLEEP_CNTL);
+ REG_GET(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_MEM_PWR_STATE,
+ &retval);
+
+ if (retval == 0)
+ break;
+ ++counter;
+ }
+ }
+
+ if (counter == max_tries) {
+ dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
+ "%s: regamma lut was not powered on "
+ "in a timely manner,"
+ " programming still proceeds\n",
+ __func__);
+ }
+ }
+
+ REG_UPDATE(REGAMMA_LUT_WRITE_EN_MASK,
+ REGAMMA_LUT_WRITE_EN_MASK, 7);
+
+ REG_WRITE(REGAMMA_LUT_INDEX, 0);
+
+ /* Program REGAMMA_LUT_DATA */
+ {
+ uint32_t i = 0;
+ const struct pwl_result_data *rgb = params->rgb_resulted;
+
+ while (i != params->hw_points_num) {
+
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->red_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->green_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->blue_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_red_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_green_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_blue_reg);
+
+ ++rgb;
+ ++i;
+ }
+ }
+
+ /* we are done with DCP LUT memory; re-enable low power mode */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, 0);
+ else
+ REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, 0);
+}
+
+static void regamma_config_regions_and_segments(
+ struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+
+ {
+ REG_SET_2(REGAMMA_CNTLA_START_CNTL, 0,
+ REGAMMA_CNTLA_EXP_REGION_START, params->arr_points[0].custom_float_x,
+ REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, 0);
+ }
+ {
+ REG_SET(REGAMMA_CNTLA_SLOPE_CNTL, 0,
+ REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, params->arr_points[0].custom_float_slope);
+
+ }
+ {
+ REG_SET(REGAMMA_CNTLA_END_CNTL1, 0,
+ REGAMMA_CNTLA_EXP_REGION_END, params->arr_points[1].custom_float_x);
+ }
+ {
+ REG_SET_2(REGAMMA_CNTLA_END_CNTL2, 0,
+ REGAMMA_CNTLA_EXP_REGION_END_BASE, params->arr_points[1].custom_float_y,
+ REGAMMA_CNTLA_EXP_REGION_END_SLOPE, params->arr_points[2].custom_float_slope);
+ }
+
+ curve = params->arr_curve_points;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_0_1, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_2_3, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_4_5, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_6_7, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_8_9, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_10_11, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_12_13, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_14_15, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+}
+
+
+
+void dce110_opp_program_regamma_pwl(
+ struct transform *xfm,
+ const struct pwl_params *params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ /* Setup regions */
+ regamma_config_regions_and_segments(xfm_dce, params);
+
+ /* Program PWL */
+ program_pwl(xfm_dce, params);
+}
+
+void dce110_opp_power_on_regamma_lut(
+ struct transform *xfm,
+ bool power_on)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE_2(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, power_on,
+ DCP_LUT_MEM_PWR_DIS, power_on);
+ else
+ REG_UPDATE_2(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, power_on,
+ DCP_LUT_LIGHT_SLEEP_DIS, power_on);
+
+}
+
+void dce110_opp_set_regamma_mode(struct transform *xfm,
+ enum opp_regamma mode)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ REG_SET(REGAMMA_CONTROL, 0,
+ GRPH_REGAMMA_MODE, mode);
+}
+
+static const struct transform_funcs dce_transform_funcs = {
+ .transform_reset = dce_transform_reset,
+ .transform_set_scaler =
+ dce_transform_set_scaler,
+ .transform_set_gamut_remap =
+ dce_transform_set_gamut_remap,
+ .opp_set_csc_adjustment = dce110_opp_set_csc_adjustment,
+ .opp_set_csc_default = dce110_opp_set_csc_default,
+ .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut,
+ .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl,
+ .opp_set_regamma_mode = dce110_opp_set_regamma_mode,
+ .transform_set_pixel_storage_depth =
+ dce_transform_set_pixel_storage_depth,
+ .transform_get_optimal_number_of_taps =
+ dce_transform_get_optimal_number_of_taps
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dce_transform_construct(
+ struct dce_transform *xfm_dce,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_transform_registers *regs,
+ const struct dce_transform_shift *xfm_shift,
+ const struct dce_transform_mask *xfm_mask)
+{
+ xfm_dce->base.ctx = ctx;
+
+ xfm_dce->base.inst = inst;
+ xfm_dce->base.funcs = &dce_transform_funcs;
+
+ xfm_dce->regs = regs;
+ xfm_dce->xfm_shift = xfm_shift;
+ xfm_dce->xfm_mask = xfm_mask;
+
+ xfm_dce->prescaler_on = true;
+ xfm_dce->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
new file mode 100644
index 000000000000..bfc94b4927b9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCE_DCE_TRANSFORM_H_
+#define _DCE_DCE_TRANSFORM_H_
+
+
+#include "transform.h"
+
+#define TO_DCE_TRANSFORM(transform)\
+ container_of(transform, struct dce_transform, base)
+
+#define LB_TOTAL_NUMBER_OF_ENTRIES 1712
+#define LB_BITS_PER_ENTRY 144
+
+#define XFM_COMMON_REG_LIST_DCE_BASE(id) \
+ SRI(LB_DATA_FORMAT, LB, id), \
+ SRI(GAMUT_REMAP_CONTROL, DCP, id), \
+ SRI(GAMUT_REMAP_C11_C12, DCP, id), \
+ SRI(GAMUT_REMAP_C13_C14, DCP, id), \
+ SRI(GAMUT_REMAP_C21_C22, DCP, id), \
+ SRI(GAMUT_REMAP_C23_C24, DCP, id), \
+ SRI(GAMUT_REMAP_C31_C32, DCP, id), \
+ SRI(GAMUT_REMAP_C33_C34, DCP, id), \
+ SRI(OUTPUT_CSC_C11_C12, DCP, id), \
+ SRI(OUTPUT_CSC_C13_C14, DCP, id), \
+ SRI(OUTPUT_CSC_C21_C22, DCP, id), \
+ SRI(OUTPUT_CSC_C23_C24, DCP, id), \
+ SRI(OUTPUT_CSC_C31_C32, DCP, id), \
+ SRI(OUTPUT_CSC_C33_C34, DCP, id), \
+ SRI(OUTPUT_CSC_CONTROL, DCP, id), \
+ SRI(REGAMMA_CNTLA_START_CNTL, DCP, id), \
+ SRI(REGAMMA_CNTLA_SLOPE_CNTL, DCP, id), \
+ SRI(REGAMMA_CNTLA_END_CNTL1, DCP, id), \
+ SRI(REGAMMA_CNTLA_END_CNTL2, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_0_1, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_2_3, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_4_5, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_6_7, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_8_9, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_10_11, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_12_13, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_14_15, DCP, id), \
+ SRI(REGAMMA_LUT_WRITE_EN_MASK, DCP, id), \
+ SRI(REGAMMA_LUT_INDEX, DCP, id), \
+ SRI(REGAMMA_LUT_DATA, DCP, id), \
+ SRI(REGAMMA_CONTROL, DCP, id), \
+ SRI(DENORM_CONTROL, DCP, id), \
+ SRI(DCP_SPATIAL_DITHER_CNTL, DCP, id), \
+ SRI(OUT_ROUND_CONTROL, DCP, id), \
+ SRI(OUT_CLAMP_CONTROL_R_CR, DCP, id), \
+ SRI(OUT_CLAMP_CONTROL_G_Y, DCP, id), \
+ SRI(OUT_CLAMP_CONTROL_B_CB, DCP, id), \
+ SRI(SCL_MODE, SCL, id), \
+ SRI(SCL_TAP_CONTROL, SCL, id), \
+ SRI(SCL_CONTROL, SCL, id), \
+ SRI(SCL_BYPASS_CONTROL, SCL, id), \
+ SRI(EXT_OVERSCAN_LEFT_RIGHT, SCL, id), \
+ SRI(EXT_OVERSCAN_TOP_BOTTOM, SCL, id), \
+ SRI(SCL_VERT_FILTER_CONTROL, SCL, id), \
+ SRI(SCL_HORZ_FILTER_CONTROL, SCL, id), \
+ SRI(SCL_COEF_RAM_SELECT, SCL, id), \
+ SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
+ SRI(VIEWPORT_START, SCL, id), \
+ SRI(VIEWPORT_SIZE, SCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT, SCL, id), \
+ SRI(SCL_VERT_FILTER_INIT, SCL, id), \
+ SRI(SCL_AUTOMATIC_MODE_CONTROL, SCL, id), \
+ SRI(LB_MEMORY_CTRL, LB, id), \
+ SRI(SCL_UPDATE, SCL, id), \
+ SRI(SCL_F_SHARP_CONTROL, SCL, id)
+
+#define XFM_COMMON_REG_LIST_DCE80(id) \
+ XFM_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_LIGHT_SLEEP_CNTL, CRTC, id)
+
+#define XFM_COMMON_REG_LIST_DCE100(id) \
+ XFM_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, CRTC, id), \
+ SRI(DCFE_MEM_PWR_STATUS, CRTC, id)
+
+#define XFM_COMMON_REG_LIST_DCE110(id) \
+ XFM_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, DCFE, id), \
+ SRI(DCFE_MEM_PWR_STATUS, DCFE, id)
+
+#define XFM_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MIN_G_Y, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MAX_G_Y, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MIN_R_CR, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MAX_R_CR, mask_sh), \
+ XFM_SF(OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DENORM_CONTROL, DENORM_MODE, mask_sh), \
+ XFM_SF(LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh), \
+ XFM_SF(LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \
+ XFM_SF(GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \
+ XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C11, mask_sh),\
+ XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C12, mask_sh),\
+ XFM_SF(OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_SLOPE_CNTL, REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL1, REGAMMA_CNTLA_EXP_REGION_END, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_BASE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_SLOPE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, mask_sh),\
+ XFM_SF(REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
+ XFM_SF(SCL_MODE, SCL_MODE, mask_sh), \
+ XFM_SF(SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh), \
+ XFM_SF(SCL_BYPASS_CONTROL, SCL_BYPASS_MODE, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \
+ XFM_SF(VIEWPORT_START, VIEWPORT_X_START, mask_sh), \
+ XFM_SF(VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \
+ XFM_SF(VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \
+ XFM_SF(VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \
+ XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mask_sh), \
+ XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_SIZE, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, mask_sh), \
+ XFM_SF(LB_DATA_FORMAT, ALPHA_EN, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_DCE80(mask_sh) \
+ XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, mask_sh),\
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, DCP_LUT_LIGHT_SLEEP_DIS, mask_sh),\
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_MEM_PWR_STATE, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_DCE110(mask_sh) \
+ XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ XFM_SF(DCFE_MEM_PWR_CTRL, SCL_COEFF_MEM_PWR_DIS, mask_sh), \
+ XFM_SF(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, mask_sh), \
+ XFM_SF(DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE_MEM_PWR_STATUS, DCP_REGAMMA_MEM_PWR_STATE, mask_sh),\
+ XFM_SF(SCL_MODE, SCL_PSCL_EN, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MIN_G_Y, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MAX_G_Y, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MIN_R_CR, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MAX_R_CR, mask_sh), \
+ XFM_SF(DCP0_OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP0_DENORM_CONTROL, DENORM_MODE, mask_sh), \
+ XFM_SF(LB0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh), \
+ XFM_SF(LB0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \
+ XFM_SF(DCP0_OUTPUT_CSC_C11_C12, OUTPUT_CSC_C11, mask_sh),\
+ XFM_SF(DCP0_OUTPUT_CSC_C11_C12, OUTPUT_CSC_C12, mask_sh),\
+ XFM_SF(DCP0_OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_SLOPE_CNTL, REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_END_CNTL1, REGAMMA_CNTLA_EXP_REGION_END, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_BASE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_SLOPE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
+ XFM_SF(SCL0_SCL_MODE, SCL_MODE, mask_sh), \
+ XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL0_SCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh), \
+ XFM_SF(SCL0_SCL_BYPASS_CONTROL, SCL_BYPASS_MODE, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_START, VIEWPORT_X_START, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \
+ XFM_SF(LB0_LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mask_sh), \
+ XFM_SF(LB0_LB_MEMORY_CTRL, LB_MEMORY_SIZE, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, mask_sh), \
+ XFM_SF(LB0_LB_DATA_FORMAT, ALPHA_EN, mask_sh), \
+ XFM_SF(DCFE0_DCFE_MEM_PWR_CTRL, SCL_COEFF_MEM_PWR_DIS, mask_sh), \
+ XFM_SF(DCFE0_DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE0_DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE0_DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, mask_sh), \
+ XFM_SF(SCL0_SCL_MODE, SCL_PSCL_EN, mask_sh)
+
+#define XFM_REG_FIELD_LIST(type) \
+ type OUT_CLAMP_MIN_B_CB; \
+ type OUT_CLAMP_MAX_B_CB; \
+ type OUT_CLAMP_MIN_G_Y; \
+ type OUT_CLAMP_MAX_G_Y; \
+ type OUT_CLAMP_MIN_R_CR; \
+ type OUT_CLAMP_MAX_R_CR; \
+ type OUT_ROUND_TRUNC_MODE; \
+ type DCP_SPATIAL_DITHER_EN; \
+ type DCP_SPATIAL_DITHER_MODE; \
+ type DCP_SPATIAL_DITHER_DEPTH; \
+ type DCP_FRAME_RANDOM_ENABLE; \
+ type DCP_RGB_RANDOM_ENABLE; \
+ type DCP_HIGHPASS_RANDOM_ENABLE; \
+ type DENORM_MODE; \
+ type PIXEL_DEPTH; \
+ type PIXEL_EXPAN_MODE; \
+ type GAMUT_REMAP_C11; \
+ type GAMUT_REMAP_C12; \
+ type GAMUT_REMAP_C13; \
+ type GAMUT_REMAP_C14; \
+ type GAMUT_REMAP_C21; \
+ type GAMUT_REMAP_C22; \
+ type GAMUT_REMAP_C23; \
+ type GAMUT_REMAP_C24; \
+ type GAMUT_REMAP_C31; \
+ type GAMUT_REMAP_C32; \
+ type GAMUT_REMAP_C33; \
+ type GAMUT_REMAP_C34; \
+ type GRPH_GAMUT_REMAP_MODE; \
+ type OUTPUT_CSC_C11; \
+ type OUTPUT_CSC_C12; \
+ type OUTPUT_CSC_GRPH_MODE; \
+ type DCP_REGAMMA_MEM_PWR_DIS; \
+ type DCP_LUT_MEM_PWR_DIS; \
+ type REGAMMA_LUT_LIGHT_SLEEP_DIS; \
+ type DCP_LUT_LIGHT_SLEEP_DIS; \
+ type REGAMMA_CNTLA_EXP_REGION_START; \
+ type REGAMMA_CNTLA_EXP_REGION_START_SEGMENT; \
+ type REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE; \
+ type REGAMMA_CNTLA_EXP_REGION_END; \
+ type REGAMMA_CNTLA_EXP_REGION_END_BASE; \
+ type REGAMMA_CNTLA_EXP_REGION_END_SLOPE; \
+ type REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET; \
+ type REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS; \
+ type REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET; \
+ type REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS; \
+ type DCP_REGAMMA_MEM_PWR_STATE; \
+ type REGAMMA_LUT_MEM_PWR_STATE; \
+ type REGAMMA_LUT_WRITE_EN_MASK; \
+ type GRPH_REGAMMA_MODE; \
+ type SCL_MODE; \
+ type SCL_BYPASS_MODE; \
+ type SCL_PSCL_EN; \
+ type SCL_H_NUM_OF_TAPS; \
+ type SCL_V_NUM_OF_TAPS; \
+ type SCL_BOUNDARY_MODE; \
+ type EXT_OVERSCAN_LEFT; \
+ type EXT_OVERSCAN_RIGHT; \
+ type EXT_OVERSCAN_TOP; \
+ type EXT_OVERSCAN_BOTTOM; \
+ type SCL_COEFF_MEM_PWR_DIS; \
+ type SCL_COEFF_MEM_PWR_STATE; \
+ type SCL_C_RAM_FILTER_TYPE; \
+ type SCL_C_RAM_PHASE; \
+ type SCL_C_RAM_TAP_PAIR_IDX; \
+ type SCL_C_RAM_EVEN_TAP_COEF_EN; \
+ type SCL_C_RAM_EVEN_TAP_COEF; \
+ type SCL_C_RAM_ODD_TAP_COEF_EN; \
+ type SCL_C_RAM_ODD_TAP_COEF; \
+ type VIEWPORT_X_START; \
+ type VIEWPORT_Y_START; \
+ type VIEWPORT_HEIGHT; \
+ type VIEWPORT_WIDTH; \
+ type SCL_H_SCALE_RATIO; \
+ type SCL_V_SCALE_RATIO; \
+ type SCL_H_INIT_INT; \
+ type SCL_H_INIT_FRAC; \
+ type SCL_V_INIT_INT; \
+ type SCL_V_INIT_FRAC; \
+ type LB_MEMORY_CONFIG; \
+ type LB_MEMORY_SIZE; \
+ type SCL_V_2TAP_HARDCODE_COEF_EN; \
+ type SCL_H_2TAP_HARDCODE_COEF_EN; \
+ type SCL_COEF_UPDATE_COMPLETE; \
+ type ALPHA_EN
+
+struct dce_transform_shift {
+ XFM_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_transform_mask {
+ XFM_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_transform_registers {
+ uint32_t LB_DATA_FORMAT;
+ uint32_t GAMUT_REMAP_CONTROL;
+ uint32_t GAMUT_REMAP_C11_C12;
+ uint32_t GAMUT_REMAP_C13_C14;
+ uint32_t GAMUT_REMAP_C21_C22;
+ uint32_t GAMUT_REMAP_C23_C24;
+ uint32_t GAMUT_REMAP_C31_C32;
+ uint32_t GAMUT_REMAP_C33_C34;
+ uint32_t OUTPUT_CSC_C11_C12;
+ uint32_t OUTPUT_CSC_C13_C14;
+ uint32_t OUTPUT_CSC_C21_C22;
+ uint32_t OUTPUT_CSC_C23_C24;
+ uint32_t OUTPUT_CSC_C31_C32;
+ uint32_t OUTPUT_CSC_C33_C34;
+ uint32_t OUTPUT_CSC_CONTROL;
+ uint32_t DCFE_MEM_LIGHT_SLEEP_CNTL;
+ uint32_t REGAMMA_CNTLA_START_CNTL;
+ uint32_t REGAMMA_CNTLA_SLOPE_CNTL;
+ uint32_t REGAMMA_CNTLA_END_CNTL1;
+ uint32_t REGAMMA_CNTLA_END_CNTL2;
+ uint32_t REGAMMA_CNTLA_REGION_0_1;
+ uint32_t REGAMMA_CNTLA_REGION_2_3;
+ uint32_t REGAMMA_CNTLA_REGION_4_5;
+ uint32_t REGAMMA_CNTLA_REGION_6_7;
+ uint32_t REGAMMA_CNTLA_REGION_8_9;
+ uint32_t REGAMMA_CNTLA_REGION_10_11;
+ uint32_t REGAMMA_CNTLA_REGION_12_13;
+ uint32_t REGAMMA_CNTLA_REGION_14_15;
+ uint32_t REGAMMA_LUT_WRITE_EN_MASK;
+ uint32_t REGAMMA_LUT_INDEX;
+ uint32_t REGAMMA_LUT_DATA;
+ uint32_t REGAMMA_CONTROL;
+ uint32_t DENORM_CONTROL;
+ uint32_t DCP_SPATIAL_DITHER_CNTL;
+ uint32_t OUT_ROUND_CONTROL;
+ uint32_t OUT_CLAMP_CONTROL_R_CR;
+ uint32_t OUT_CLAMP_CONTROL_G_Y;
+ uint32_t OUT_CLAMP_CONTROL_B_CB;
+ uint32_t SCL_MODE;
+ uint32_t SCL_TAP_CONTROL;
+ uint32_t SCL_CONTROL;
+ uint32_t SCL_BYPASS_CONTROL;
+ uint32_t EXT_OVERSCAN_LEFT_RIGHT;
+ uint32_t EXT_OVERSCAN_TOP_BOTTOM;
+ uint32_t SCL_VERT_FILTER_CONTROL;
+ uint32_t SCL_HORZ_FILTER_CONTROL;
+ uint32_t DCFE_MEM_PWR_CTRL;
+ uint32_t DCFE_MEM_PWR_STATUS;
+ uint32_t SCL_COEF_RAM_SELECT;
+ uint32_t SCL_COEF_RAM_TAP_DATA;
+ uint32_t VIEWPORT_START;
+ uint32_t VIEWPORT_SIZE;
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO;
+ uint32_t SCL_HORZ_FILTER_INIT;
+ uint32_t SCL_VERT_FILTER_INIT;
+ uint32_t SCL_AUTOMATIC_MODE_CONTROL;
+ uint32_t LB_MEMORY_CTRL;
+ uint32_t SCL_UPDATE;
+ uint32_t SCL_F_SHARP_CONTROL;
+};
+
+struct init_int_and_frac {
+ uint32_t integer;
+ uint32_t fraction;
+};
+
+struct scl_ratios_inits {
+ uint32_t h_int_scale_ratio;
+ uint32_t v_int_scale_ratio;
+ struct init_int_and_frac h_init;
+ struct init_int_and_frac v_init;
+};
+
+enum ram_filter_type {
+ FILTER_TYPE_RGB_Y_VERTICAL = 0, /* 0 - RGB/Y Vertical filter */
+ FILTER_TYPE_CBCR_VERTICAL = 1, /* 1 - CbCr Vertical filter */
+ FILTER_TYPE_RGB_Y_HORIZONTAL = 2, /* 1 - RGB/Y Horizontal filter */
+ FILTER_TYPE_CBCR_HORIZONTAL = 3, /* 3 - CbCr Horizontal filter */
+ FILTER_TYPE_ALPHA_VERTICAL = 4, /* 4 - Alpha Vertical filter. */
+ FILTER_TYPE_ALPHA_HORIZONTAL = 5, /* 5 - Alpha Horizontal filter. */
+};
+
+struct dce_transform {
+ struct transform base;
+ const struct dce_transform_registers *regs;
+ const struct dce_transform_shift *xfm_shift;
+ const struct dce_transform_mask *xfm_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool prescaler_on;
+};
+
+void dce_transform_construct(struct dce_transform *xfm_dce,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_transform_registers *regs,
+ const struct dce_transform_shift *xfm_shift,
+ const struct dce_transform_mask *xfm_mask);
+
+bool dce_transform_get_optimal_number_of_taps(
+ struct transform *xfm,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+void dce110_opp_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry);
+
+void dce110_opp_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust);
+
+/* REGAMMA RELATED */
+void dce110_opp_power_on_regamma_lut(
+ struct transform *xfm,
+ bool power_on);
+
+void dce110_opp_program_regamma_pwl(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+void dce110_opp_set_regamma_mode(struct transform *xfm,
+ enum opp_regamma mode);
+
+#endif /* _DCE_DCE_TRANSFORM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
new file mode 100644
index 000000000000..ea40870624b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -0,0 +1,23 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE100 = dce100_resource.o dce100_hw_sequencer.o
+
+AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE100)
+
+
+###############################################################################
+# DCE 10x
+###############################################################################
+ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0
+TG_DCE100 = dce100_resource.o
+
+AMD_DAL_TG_DCE100 = $(addprefix \
+ $(AMDDALPATH)/dc/dce100/,$(TG_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100)
+endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
new file mode 100644
index 000000000000..e7a694835e3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+#include "dce100_hw_sequencer.h"
+#include "resource.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+
+/* include DCE10 register header files */
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+struct dce100_hw_seq_reg_offsets {
+ uint32_t blnd;
+ uint32_t crtc;
+};
+
+static const struct dce100_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+/***************************PIPE_CONTROL***********************************/
+
+static bool dce100_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+static void dce100_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->avail_mclk_switch_time_us =
+ dce110_get_min_vblank_time_us(context);
+ /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ / MEMORY_TYPE_MULTIPLIER;*/
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+ struct dm_pp_display_configuration)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+ dc->prev_display_config = *pp_display_cfg;
+}
+
+void dce100_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce100_pplib_apply_display_requirements(dc, context);
+}
+
+
+/**************************************************************************/
+
+void dce100_hw_sequencer_construct(struct dc *dc)
+{
+ dce110_hw_sequencer_construct(dc);
+
+ dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwss.set_bandwidth = dce100_set_bandwidth;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
new file mode 100644
index 000000000000..cb5384ef46c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -0,0 +1,42 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE100_H__
+#define __DC_HWSS_DCE100_H__
+
+#include "core_types.h"
+
+struct dc;
+struct dc_state;
+
+void dce100_hw_sequencer_construct(struct dc *dc);
+
+void dce100_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
+#endif /* __DC_HWSS_DCE100_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
new file mode 100644
index 000000000000..90911258bdb3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -0,0 +1,933 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dce110/dce110_timing_generator.h"
+#include "irq/dce110/irq_service_dce110.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+
+#include "dce/dce_mem_input.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "dce100/dce100_hw_sequencer.h"
+
+#include "reg_helper.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+ #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+ #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+ #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+ #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
+ #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
+ #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
+ #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
+ #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
+ #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
+#endif
+
+static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE100_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE100(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE100_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST_DCE_BASE(id),\
+ .AFMT_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5),
+ stream_enc_regs(6)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_100_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_100(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
+};
+
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_100_110(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+
+
+#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 3
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x1918
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce100_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce110_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct stream_encoder *dce100_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id], &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE10_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE10_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE10_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce100_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce100_stream_encoder_create,
+ .create_hwseq = dce100_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE8_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE8_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE8_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+static struct mem_input *dce100_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 2;
+ return &dce_mi->base;
+}
+
+static void dce100_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce100_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ return &transform->base;
+}
+
+static struct input_pixel_processor *dce100_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 300000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dce100_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+struct output_pixel_processor *dce100_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct clock_source *dce100_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce100_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce100_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL)
+ dce100_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce100_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL)
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL)
+ dal_irq_service_destroy(&pool->base.irqs);
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+bool dce100_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+
+ return true;
+}
+
+static bool dce100_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce100_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce100_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+enum dc_status dce100_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+enum dc_status dce100_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ if (!dce100_validate_bandwidth(dc, context))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static void dce100_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
+{
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return DC_OK;
+
+ return DC_FAIL_SURFACE_VALIDATE;
+}
+
+static const struct resource_funcs dce100_res_pool_funcs = {
+ .destroy = dce100_destroy_resource_pool,
+ .link_enc_create = dce100_link_encoder_create,
+ .validate_guaranteed = dce100_validate_guaranteed,
+ .validate_bandwidth = dce100_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce100_add_stream_to_ctx,
+ .validate_global = dce100_validate_global
+};
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce100_res_pool_funcs;
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ /* get static clock information for PPLIB or firmware, save
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce100_timing_generator_create(
+ ctx,
+ i,
+ &dce100_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce100_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce100_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce100_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce100_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce100_hw_sequencer_construct(dc);
+ return true;
+
+res_create_fail:
+ destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dce100_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
new file mode 100644
index 000000000000..de8fdf438f9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
@@ -0,0 +1,26 @@
+/*
+ * dce100_resource.h
+ *
+ * Created on: 2016-01-20
+ * Author: qyang
+ */
+
+#ifndef DCE100_RESOURCE_H_
+#define DCE100_RESOURCE_H_
+
+struct dc;
+struct resource_pool;
+struct dc_validation_set;
+
+struct resource_pool *dce100_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+
+enum dc_status dce100_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+
+#endif /* DCE100_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
new file mode 100644
index 000000000000..98d956e2f218
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE110 = dce110_timing_generator.o \
+dce110_compressor.o dce110_hw_sequencer.o dce110_resource.o \
+dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
+dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o
+
+AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE110)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
new file mode 100644
index 000000000000..6923662413cd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#include "gmc/gmc_8_2_d.h"
+
+#include "include/logger_interface.h"
+
+#include "dce110_compressor.h"
+
+#define DCP_REG(reg)\
+ (reg + cp110->offsets.dcp_offset)
+#define DMIF_REG(reg)\
+ (reg + cp110->offsets.dmif_offset)
+
+static const struct dce110_compressor_reg_offsets reg_offsets[] = {
+{
+ .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+}
+};
+
+static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
+
+enum fbc_idle_force {
+ /* Bit 0 - Display registers updated */
+ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
+
+ /* Bit 2 - FBC_GRPH_COMP_EN register updated */
+ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
+ /* Bit 3 - FBC_SRC_SEL register updated */
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
+ /* Bit 4 - FBC_MIN_COMPRESSION register updated */
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
+ /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
+ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
+ /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
+ /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
+
+ /* Bit 24 - Memory write to region 0 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
+ /* Bit 25 - Memory write to region 1 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
+ /* Bit 26 - Memory write to region 2 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
+ /* Bit 27 - Memory write to region 3 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
+
+ /* Bit 28 - Memory write from any client other than MCIF */
+ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
+ /* Bit 29 - CG statics screen signal is inactive */
+ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
+};
+
+
+static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
+{
+ return 256 * ((pixels + 255) / 256);
+}
+
+static void wait_for_fbc_state_changed(
+ struct dce110_compressor *cp110,
+ bool enabled)
+{
+ uint8_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+ while (counter < 10) {
+ value = dm_read_reg(cp110->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+ msleep(10);
+ counter++;
+ }
+
+ if (counter == 10) {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ } else {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_SYNC,
+ "FBC status changed to %d", enabled);
+ }
+
+
+}
+
+void dce110_compressor_power_up_fbc(struct compressor *compressor)
+{
+ uint32_t value;
+ uint32_t addr;
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
+ set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
+ if (compressor->options.bits.CLK_GATING_DISABLED == 1) {
+ /* HW needs to do power measurement comparison. */
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CNTL,
+ FBC_COMP_CLK_GATE_EN);
+ }
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_MODE;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
+ /* 1 ==> 4:1 */
+ /* 2 ==> 8:1 */
+ /* 0xF ==> 1:1 */
+ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
+ dm_write_reg(compressor->ctx, addr, value);
+ compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
+
+ value = 0;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
+
+ value = 0xFFFFFF;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
+}
+
+void dce110_compressor_enable_fbc(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ (!dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL))) {
+
+ uint32_t addr;
+ uint32_t value, misc_value;
+
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(
+ value,
+ params->inst,
+ FBC_CNTL, FBC_SRC_SEL);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Keep track of enum controller_id FBC is attached to */
+ compressor->is_enabled = true;
+ compressor->attached_inst = params->inst;
+ cp110->offsets = reg_offsets[params->inst];
+
+ /* Toggle it as there is bug in HW */
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* FBC usage with scatter & gather for dce110 */
+ misc_value = dm_read_reg(compressor->ctx, mmFBC_MISC);
+
+ set_reg_field_value(misc_value, 1,
+ FBC_MISC, FBC_INVALIDATE_ON_ERROR);
+ set_reg_field_value(misc_value, 1,
+ FBC_MISC, FBC_DECOMPRESS_ERROR_CLEAR);
+ set_reg_field_value(misc_value, 0x14,
+ FBC_MISC, FBC_SLOW_REQ_INTERVAL);
+
+ dm_write_reg(compressor->ctx, mmFBC_MISC, misc_value);
+
+ /* Enable FBC */
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ wait_for_fbc_state_changed(cp110, true);
+ }
+}
+
+void dce110_compressor_disable_fbc(struct compressor *compressor)
+{
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ wait_for_fbc_state_changed(cp110, false);
+ }
+}
+
+bool dce110_compressor_is_fbc_enabled_in_hw(
+ struct compressor *compressor,
+ uint32_t *inst)
+{
+ /* Check the hardware register */
+ uint32_t value;
+
+ value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
+ if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ value = dm_read_reg(compressor->ctx, mmFBC_MISC);
+ if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) {
+ value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+
+ if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
+ if (inst != NULL)
+ *inst =
+ compressor->attached_inst;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void dce110_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+ uint32_t value = 0;
+ uint32_t fbc_pitch = 0;
+ uint32_t compressed_surf_address_low_part =
+ compressor->compr_surface_address.addr.low_part;
+
+ /* Clear content first. */
+ dm_write_reg(
+ compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ 0);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
+
+ /* Write address, HIGH has to be first. */
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ compressor->compr_surface_address.addr.high_part);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
+ compressed_surf_address_low_part);
+
+ fbc_pitch = align_to_chunks_number_per_line(params->source_view_width);
+
+ if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
+ fbc_pitch = fbc_pitch / 8;
+ else
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Unexpected DCE11 compression ratio",
+ __func__);
+
+ /* Clear content first. */
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
+
+ /* Write FBC Pitch. */
+ set_reg_field_value(
+ value,
+ fbc_pitch,
+ GRPH_COMPRESS_PITCH,
+ GRPH_COMPRESS_PITCH);
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
+
+}
+
+void dce110_compressor_set_fbc_invalidation_triggers(
+ struct compressor *compressor,
+ uint32_t fbc_trigger)
+{
+ /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
+ * for DCE 11 regions cannot be used - does not work with S/G
+ */
+ uint32_t addr = mmFBC_CLIENT_REGION_MASK;
+ uint32_t value = dm_read_reg(compressor->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CLIENT_REGION_MASK,
+ FBC_MEMORY_REGION_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Setup events when to clear all CSM entries (effectively marking
+ * current compressed data invalid)
+ * For DCE 11 CSM metadata 11111 means - "Not Compressed"
+ * Used as the initial value of the metadata sent to the compressor
+ * after invalidation, to indicate that the compressor should attempt
+ * to compress all chunks on the current pass. Also used when the chunk
+ * is not successfully written to memory.
+ * When this CSM value is detected, FBC reads from the uncompressed
+ * buffer. Set events according to passed in value, these events are
+ * valid for DCE11:
+ * - bit 0 - display register updated
+ * - bit 28 - memory write from any client except from MCIF
+ * - bit 29 - CG static screen signal is inactive
+ * In addition, DCE11.1 also needs to set new DCE11.1 specific events
+ * that are used to trigger invalidation on certain register changes,
+ * for example enabling of Alpha Compression may trigger invalidation of
+ * FBC once bit is set. These events are as follows:
+ * - Bit 2 - FBC_GRPH_COMP_EN register updated
+ * - Bit 3 - FBC_SRC_SEL register updated
+ * - Bit 4 - FBC_MIN_COMPRESSION register updated
+ * - Bit 5 - FBC_ALPHA_COMP_EN register updated
+ * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
+ * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
+ */
+ addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ fbc_trigger |
+ FBC_IDLE_FORCE_GRPH_COMP_EN |
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE |
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
+ FBC_IDLE_FORCE_ALPHA_COMP_EN |
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ FBC_IDLE_FORCE_CLEAR_MASK,
+ FBC_IDLE_FORCE_CLEAR_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+struct compressor *dce110_compressor_create(struct dc_context *ctx)
+{
+ struct dce110_compressor *cp110 =
+ kzalloc(sizeof(struct dce110_compressor), GFP_KERNEL);
+
+ if (!cp110)
+ return NULL;
+
+ dce110_compressor_construct(cp110, ctx);
+ return &cp110->base;
+}
+
+void dce110_compressor_destroy(struct compressor **compressor)
+{
+ kfree(TO_DCE110_COMPRESSOR(*compressor));
+ *compressor = NULL;
+}
+
+bool dce110_get_required_compressed_surfacesize(struct fbc_input_info fbc_input_info,
+ struct fbc_requested_compressed_size size)
+{
+ bool result = false;
+
+ unsigned int max_x = FBC_MAX_X, max_y = FBC_MAX_Y;
+
+ get_max_support_fbc_buffersize(&max_x, &max_y);
+
+ if (fbc_input_info.dynamic_fbc_buffer_alloc == 0) {
+ /*
+ * For DCE11 here use Max HW supported size: HW Support up to 3840x2400 resolution
+ * or 18000 chunks.
+ */
+ size.preferred_size = size.min_size = align_to_chunks_number_per_line(max_x) * max_y * 4; /* (For FBC when LPT not supported). */
+ size.preferred_size_alignment = size.min_size_alignment = 0x100; /* For FBC when LPT not supported */
+ size.bits.preferred_must_be_framebuffer_pool = 1;
+ size.bits.min_must_be_framebuffer_pool = 1;
+
+ result = true;
+ }
+ /*
+ * Maybe to add registry key support with optional size here to override above
+ * for debugging purposes
+ */
+
+ return result;
+}
+
+
+void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
+{
+ *max_x = FBC_MAX_X;
+ *max_y = FBC_MAX_Y;
+
+ /* if (m_smallLocalFrameBufferMemory == 1)
+ * {
+ * *max_x = FBC_MAX_X_SG;
+ * *max_y = FBC_MAX_Y_SG;
+ * }
+ */
+}
+
+
+unsigned int controller_id_to_index(enum controller_id controller_id)
+{
+ unsigned int index = 0;
+
+ switch (controller_id) {
+ case CONTROLLER_ID_D0:
+ index = 0;
+ break;
+ case CONTROLLER_ID_D1:
+ index = 1;
+ break;
+ case CONTROLLER_ID_D2:
+ index = 2;
+ break;
+ case CONTROLLER_ID_D3:
+ index = 3;
+ break;
+ default:
+ break;
+ }
+ return index;
+}
+
+
+static const struct compressor_funcs dce110_compressor_funcs = {
+ .power_up_fbc = dce110_compressor_power_up_fbc,
+ .enable_fbc = dce110_compressor_enable_fbc,
+ .disable_fbc = dce110_compressor_disable_fbc,
+ .set_fbc_invalidation_triggers = dce110_compressor_set_fbc_invalidation_triggers,
+ .surface_address_and_pitch = dce110_compressor_program_compressed_surface_address_and_pitch,
+ .is_fbc_enabled_in_hw = dce110_compressor_is_fbc_enabled_in_hw
+};
+
+
+void dce110_compressor_construct(struct dce110_compressor *compressor,
+ struct dc_context *ctx)
+{
+
+ compressor->base.options.raw = 0;
+ compressor->base.options.bits.FBC_SUPPORT = true;
+
+ /* for dce 11 always use one dram channel for lpt */
+ compressor->base.lpt_channels_num = 1;
+ compressor->base.options.bits.DUMMY_BACKEND = false;
+
+ /*
+ * check if this system has more than 1 dram channel; if only 1 then lpt
+ * should not be supported
+ */
+
+
+ compressor->base.options.bits.CLK_GATING_DISABLED = false;
+
+ compressor->base.ctx = ctx;
+ compressor->base.embedded_panel_h_size = 0;
+ compressor->base.embedded_panel_v_size = 0;
+ compressor->base.memory_bus_width = ctx->asic_id.vram_width;
+ compressor->base.allocated_size = 0;
+ compressor->base.preferred_requested_size = 0;
+ compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
+ compressor->base.banks_num = 0;
+ compressor->base.raw_size = 0;
+ compressor->base.channel_interleave_size = 0;
+ compressor->base.dram_channels_num = 0;
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ compressor->base.funcs = &dce110_compressor_funcs;
+
+#endif
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
new file mode 100644
index 000000000000..26c7335a1cbf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
@@ -0,0 +1,81 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMPRESSOR_DCE110_H__
+#define __DC_COMPRESSOR_DCE110_H__
+
+#include "../inc/compressor.h"
+
+#define TO_DCE110_COMPRESSOR(compressor)\
+ container_of(compressor, struct dce110_compressor, base)
+
+struct dce110_compressor_reg_offsets {
+ uint32_t dcp_offset;
+ uint32_t dmif_offset;
+};
+
+struct dce110_compressor {
+ struct compressor base;
+ struct dce110_compressor_reg_offsets offsets;
+};
+
+struct compressor *dce110_compressor_create(struct dc_context *ctx);
+
+void dce110_compressor_construct(struct dce110_compressor *cp110,
+ struct dc_context *ctx);
+
+void dce110_compressor_destroy(struct compressor **cp);
+
+/* FBC RELATED */
+void dce110_compressor_power_up_fbc(struct compressor *cp);
+
+void dce110_compressor_enable_fbc(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+void dce110_compressor_disable_fbc(struct compressor *cp);
+
+void dce110_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
+ uint32_t fbc_trigger);
+
+void dce110_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce110_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+
+/* LPT RELATED */
+void dce110_compressor_enable_lpt(struct compressor *cp);
+
+void dce110_compressor_disable_lpt(struct compressor *cp);
+
+void dce110_compressor_program_lpt_control(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce110_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
+
+void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
new file mode 100644
index 000000000000..1229a3315018
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -0,0 +1,2987 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dc.h"
+#include "dc_bios_types.h"
+#include "core_types.h"
+#include "core_status.h"
+#include "resource.h"
+#include "dm_helpers.h"
+#include "dce110_hw_sequencer.h"
+#include "dce110_timing_generator.h"
+#include "dce/dce_hwseq.h"
+#include "gpio_service_interface.h"
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dce110_compressor.h"
+#endif
+
+#include "bios/bios_parser_helper.h"
+#include "timing_generator.h"
+#include "mem_input.h"
+#include "opp.h"
+#include "ipp.h"
+#include "transform.h"
+#include "stream_encoder.h"
+#include "link_encoder.h"
+#include "link_hwss.h"
+#include "clock_source.h"
+#include "abm.h"
+#include "audio.h"
+#include "reg_helper.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "custom_float.h"
+
+/*
+ * All values are in milliseconds;
+ * For eDP, after power-up/power/down,
+ * 300/500 msec max. delay from LCDVCC to black video generation
+ */
+#define PANEL_POWER_UP_TIMEOUT 300
+#define PANEL_POWER_DOWN_TIMEOUT 500
+#define HPD_CHECK_INTERVAL 10
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+struct dce110_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+static const struct dce110_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTCV_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_BLND(reg, id)\
+ (reg + reg_offsets[id].blnd)
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+#define MAX_WATERMARK 0xFFFF
+#define SAFE_NBP_MARK 0x7FFF
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+/***************************PIPE_CONTROL***********************************/
+static void dce110_init_pte(struct dc_context *ctx)
+{
+ uint32_t addr;
+ uint32_t value = 0;
+ uint32_t chunk_int = 0;
+ uint32_t chunk_mul = 0;
+
+ addr = mmUNP_DVMM_PTE_CONTROL;
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DVMM_PTE_CONTROL,
+ DVMM_USE_SINGLE_PTE);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE0);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE1);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmDVMM_PTE_REQ;
+ value = dm_read_reg(ctx, addr);
+
+ chunk_int = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ chunk_mul = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ if (chunk_int != 0x4 || chunk_mul != 0x4) {
+
+ set_reg_field_value(
+ value,
+ 255,
+ DVMM_PTE_REQ,
+ MAX_PTEREQ_TO_ISSUE);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ dm_write_reg(ctx, addr, value);
+ }
+}
+/**************************************************************************/
+
+static void enable_display_pipe_clock_gating(
+ struct dc_context *ctx,
+ bool clock_gating)
+{
+ /*TODO*/
+}
+
+static bool dce110_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ return true;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (controller_id == underlay_idx)
+ controller_id = CONTROLLER_ID_UNDERLAY0 - 1;
+
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ *
+ * Bios parser accepts controller_id = 6 as indicative of
+ * underlay pipe in dce110. But we do not support more
+ * than 3.
+ */
+ if (controller_id < CONTROLLER_ID_MAX - 1)
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (power_gating != PIPE_GATING_CONTROL_ENABLE)
+ dce110_init_pte(ctx);
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+static void build_prescale_params(struct ipp_prescale_params *prescale_params,
+ const struct dc_plane_state *plane_state)
+{
+ prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
+
+ switch (plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ prescale_params->scale = 0x2020;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ prescale_params->scale = 0x2008;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ prescale_params->scale = 0x2000;
+ break;
+ default:
+ ASSERT(false);
+ break;
+ }
+}
+
+static bool dce110_set_input_transfer_func(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
+{
+ struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+ const struct dc_transfer_func *tf = NULL;
+ struct ipp_prescale_params prescale_params = { 0 };
+ bool result = true;
+
+ if (ipp == NULL)
+ return false;
+
+ if (plane_state->in_transfer_func)
+ tf = plane_state->in_transfer_func;
+
+ build_prescale_params(&prescale_params, plane_state);
+ ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
+
+ if (plane_state->gamma_correction && dce_use_lut(plane_state))
+ ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
+
+ if (tf == NULL) {
+ /* Default case if no input transfer function specified */
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ } else if (tf->type == TF_TYPE_PREDEFINED) {
+ switch (tf->tf) {
+ case TRANSFER_FUNCTION_SRGB:
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ break;
+ case TRANSFER_FUNCTION_BT709:
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_HW_xvYCC);
+ break;
+ case TRANSFER_FUNCTION_LINEAR:
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_BYPASS);
+ break;
+ case TRANSFER_FUNCTION_PQ:
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+ } else if (tf->type == TF_TYPE_BYPASS) {
+ ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_BYPASS);
+ } else {
+ /*TF_TYPE_DISTRIBUTED_POINTS - Not supported in DCE 11*/
+ result = false;
+ }
+
+ return result;
+}
+
+static bool convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].x,
+ &fmt,
+ &arr_points[0].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].offset,
+ &fmt,
+ &arr_points[0].custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].slope,
+ &fmt,
+ &arr_points[0].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].x,
+ &fmt,
+ &arr_points[1].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].y,
+ &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[2].slope,
+ &fmt,
+ &arr_points[2].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(
+ rgb->red,
+ &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->green,
+ &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->blue,
+ &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_red,
+ &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_green,
+ &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_blue,
+ &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+
+static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
+ *output_tf, struct pwl_params *regamma_params)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t segment_start, segment_end;
+ uint32_t i, j, k, seg_distr[16], increment, start_index, hw_points;
+
+ if (output_tf == NULL || regamma_params == NULL ||
+ output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ arr_points = regamma_params->arr_points;
+ rgb_resulted = regamma_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(regamma_params, 0, sizeof(struct pwl_params));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* 16 segments
+ * segments are from 2^-11 to 2^5
+ */
+ segment_start = -11;
+ segment_end = 5;
+
+ seg_distr[0] = 2;
+ seg_distr[1] = 2;
+ seg_distr[2] = 2;
+ seg_distr[3] = 2;
+ seg_distr[4] = 2;
+ seg_distr[5] = 2;
+ seg_distr[6] = 3;
+ seg_distr[7] = 4;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
+ seg_distr[10] = 4;
+ seg_distr[11] = 5;
+ seg_distr[12] = 5;
+ seg_distr[13] = 5;
+ seg_distr[14] = 5;
+ seg_distr[15] = 5;
+
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ */
+ segment_start = -10;
+ segment_end = 0;
+
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 5;
+ seg_distr[9] = 5;
+ seg_distr[10] = -1;
+ seg_distr[11] = -1;
+ seg_distr[12] = -1;
+ seg_distr[13] = -1;
+ seg_distr[14] = -1;
+ seg_distr[15] = -1;
+ }
+
+ for (k = 0; k < 16; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (segment_end - segment_start); k++) {
+ increment = 32 / (1 << seg_distr[k]);
+ start_index = (segment_start + k + 25) * 32;
+ for (i = start_index; i < start_index + 32; i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (segment_end + 25) * 32;
+ rgb_resulted[hw_points - 1].red =
+ output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green =
+ output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue =
+ output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+ arr_points[2].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(
+ arr_points[0].y,
+ arr_points[0].x);
+
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+ arr_points[2].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+ arr_points[2].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ arr_points[2].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ regamma_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < 16 && i < 16; k++) {
+ if (seg_distr[k] != -1) {
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ regamma_params->arr_curve_points[i].offset =
+ regamma_params->arr_curve_points[k].
+ offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(
+ rgb_plus_1->red,
+ rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(
+ rgb_plus_1->green,
+ rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(
+ rgb_plus_1->blue,
+ rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+
+ convert_to_custom_float(rgb_resulted, arr_points, hw_points);
+
+ return true;
+}
+
+static bool dce110_set_output_transfer_func(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+{
+ struct transform *xfm = pipe_ctx->plane_res.xfm;
+
+ xfm->funcs->opp_power_on_regamma_lut(xfm, true);
+ xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
+
+ if (stream->out_transfer_func &&
+ stream->out_transfer_func->type ==
+ TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func->tf ==
+ TRANSFER_FUNCTION_SRGB) {
+ xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB);
+ } else if (dce110_translate_regamma_to_hw_format(
+ stream->out_transfer_func, &xfm->regamma_params)) {
+ xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params);
+ xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER);
+ } else {
+ xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_BYPASS);
+ }
+
+ xfm->funcs->opp_power_on_regamma_lut(xfm, false);
+
+ return true;
+}
+
+static enum dc_status bios_parser_crtc_source_select(
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_bios *dcb;
+ /* call VBIOS table to set CRTC source for the HW
+ * encoder block
+ * note: video bios clears all FMT setting here. */
+ struct bp_crtc_source_select crtc_source_select = {0};
+ const struct dc_sink *sink = pipe_ctx->stream->sink;
+
+ crtc_source_select.engine_id = pipe_ctx->stream_res.stream_enc->id;
+ crtc_source_select.controller_id = pipe_ctx->pipe_idx + 1;
+ /*TODO: Need to un-hardcode color depth, dp_audio and account for
+ * the case where signal and sink signal is different (translator
+ * encoder)*/
+ crtc_source_select.signal = pipe_ctx->stream->signal;
+ crtc_source_select.enable_dp_audio = false;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+
+ switch (pipe_ctx->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ crtc_source_select.display_output_bit_depth = PANEL_6BIT_COLOR;
+ break;
+ case COLOR_DEPTH_888:
+ crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ crtc_source_select.display_output_bit_depth = PANEL_10BIT_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ crtc_source_select.display_output_bit_depth = PANEL_12BIT_COLOR;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR;
+ break;
+ }
+
+ dcb = sink->ctx->dc_bios;
+
+ if (BP_RESULT_OK != dcb->funcs->crtc_source_select(
+ dcb,
+ &crtc_source_select)) {
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ return DC_OK;
+}
+
+void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ ASSERT(pipe_ctx->stream);
+
+ if (pipe_ctx->stream_res.stream_enc == NULL)
+ return; /* this is not root pipe */
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+}
+
+void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
+{
+ enum dc_lane_count lane_count =
+ pipe_ctx->stream->sink->link->cur_link_settings.lane_count;
+
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link *link = pipe_ctx->stream->sink->link;
+
+ /* 1. update AVI info frame (HDMI, DP)
+ * we always need to update info frame
+ */
+ uint32_t active_total_with_borders;
+ uint32_t early_control = 0;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ /* TODOFPGA may change to hwss.update_info_frame */
+ dce110_update_info_frame(pipe_ctx);
+ /* enable early control to avoid corruption on DP monitor*/
+ active_total_with_borders =
+ timing->h_addressable
+ + timing->h_border_left
+ + timing->h_border_right;
+
+ if (lane_count != 0)
+ early_control = active_total_with_borders % lane_count;
+
+ if (early_control == 0)
+ early_control = lane_count;
+
+ tg->funcs->set_early_control(tg, early_control);
+
+ /* enable audio only within mode set */
+ if (pipe_ctx->stream_res.audio != NULL) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
+ }
+
+ /* For MST, there are multiply stream go to only one link.
+ * connect DIG back_end to front_end while enable_stream and
+ * disconnect them during disable_stream
+ * BY this, it is logic clean to separate stream and link */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+
+}
+
+/*todo: cloned in stream enc, fix*/
+static bool is_panel_backlight_on(struct dce_hwseq *hws)
+{
+ uint32_t value;
+
+ REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
+
+ return value;
+}
+
+static bool is_panel_powered_on(struct dce_hwseq *hws)
+{
+ uint32_t value;
+
+ REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value);
+ return value == 1;
+}
+
+static enum bp_result link_transmitter_control(
+ struct dc_bios *bios,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result;
+
+ result = bios->funcs->transmitter_control(bios, cntl);
+
+ return result;
+}
+
+/*
+ * @brief
+ * eDP only.
+ */
+void hwss_edp_wait_for_hpd_ready(
+ struct link_encoder *enc,
+ bool power_up)
+{
+ struct dc_context *ctx = enc->ctx;
+ struct graphics_object_id connector = enc->connector;
+ struct gpio *hpd;
+ bool edp_hpd_high = false;
+ uint32_t time_elapsed = 0;
+ uint32_t timeout = power_up ?
+ PANEL_POWER_UP_TIMEOUT : PANEL_POWER_DOWN_TIMEOUT;
+
+ if (dal_graphics_object_id_get_connector_id(connector)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (!power_up)
+ /*
+ * From KV, we will not HPD low after turning off VCC -
+ * instead, we will check the SW timer in power_up().
+ */
+ return;
+
+ /*
+ * When we power on/off the eDP panel,
+ * we need to wait until SENSE bit is high/low.
+ */
+
+ /* obtain HPD */
+ /* TODO what to do with this? */
+ hpd = get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
+
+ if (!hpd) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ dal_gpio_open(hpd, GPIO_MODE_INTERRUPT);
+
+ /* wait until timeout or panel detected */
+
+ do {
+ uint32_t detected = 0;
+
+ dal_gpio_get_value(hpd, &detected);
+
+ if (!(detected ^ power_up)) {
+ edp_hpd_high = true;
+ break;
+ }
+
+ msleep(HPD_CHECK_INTERVAL);
+
+ time_elapsed += HPD_CHECK_INTERVAL;
+ } while (time_elapsed < timeout);
+
+ dal_gpio_close(hpd);
+
+ dal_gpio_destroy_irq(&hpd);
+
+ if (false == edp_hpd_high) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: wait timed out!\n", __func__);
+ }
+}
+
+void hwss_edp_power_control(
+ struct link_encoder *enc,
+ bool power_up)
+{
+ struct dc_context *ctx = enc->ctx;
+ struct dce_hwseq *hwseq = ctx->dc->hwseq;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result bp_result;
+
+
+ if (dal_graphics_object_id_get_connector_id(enc->connector)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (power_up != is_panel_powered_on(hwseq)) {
+ /* Send VBIOS command to prompt eDP panel power */
+
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: Panel Power action: %s\n",
+ __func__, (power_up ? "On":"Off"));
+
+ cntl.action = power_up ?
+ TRANSMITTER_CONTROL_POWER_ON :
+ TRANSMITTER_CONTROL_POWER_OFF;
+ cntl.transmitter = enc->transmitter;
+ cntl.connector_obj_id = enc->connector;
+ cntl.coherent = false;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.hpd_sel = enc->hpd_source;
+
+ bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
+
+ if (bp_result != BP_RESULT_OK)
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Panel Power bp_result: %d\n",
+ __func__, bp_result);
+ } else {
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: Skipping Panel Power action: %s\n",
+ __func__, (power_up ? "On":"Off"));
+ }
+
+ hwss_edp_wait_for_hpd_ready(enc, true);
+}
+
+/*todo: cloned in stream enc, fix*/
+/*
+ * @brief
+ * eDP only. Control the backlight of the eDP panel
+ */
+void hwss_edp_backlight_control(
+ struct dc_link *link,
+ bool enable)
+{
+ struct dce_hwseq *hws = link->dc->hwseq;
+ struct dc_context *ctx = link->dc->ctx;
+ struct bp_transmitter_control cntl = { 0 };
+
+ if (dal_graphics_object_id_get_connector_id(link->link_id)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (enable && is_panel_backlight_on(hws)) {
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: panel already powered up. Do nothing.\n",
+ __func__);
+ return;
+ }
+
+ /* Send VBIOS command to control eDP panel backlight */
+
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: backlight action: %s\n",
+ __func__, (enable ? "On":"Off"));
+
+ cntl.action = enable ?
+ TRANSMITTER_CONTROL_BACKLIGHT_ON :
+ TRANSMITTER_CONTROL_BACKLIGHT_OFF;
+
+ /*cntl.engine_id = ctx->engine;*/
+ cntl.transmitter = link->link_enc->transmitter;
+ cntl.connector_obj_id = link->link_enc->connector;
+ /*todo: unhardcode*/
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.hpd_sel = link->link_enc->hpd_source;
+
+ /* For eDP, the following delays might need to be considered
+ * after link training completed:
+ * idle period - min. accounts for required BS-Idle pattern,
+ * max. allows for source frame synchronization);
+ * 50 msec max. delay from valid video data from source
+ * to video on dislpay or backlight enable.
+ *
+ * Disable the delay for now.
+ * Enable it in the future if necessary.
+ */
+ /* dc_service_sleep_in_milliseconds(50); */
+ link_transmitter_control(link->dc->ctx->dc_bios, &cntl);
+}
+
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (pipe_ctx->stream_res.audio) {
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
+ pipe_ctx->stream_res.stream_enc);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable(
+ pipe_ctx->stream_res.stream_enc);
+ /*don't free audio if it is from retrain or internal disable stream*/
+ if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ pipe_ctx->stream_res.audio = NULL;
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
+ }
+
+ /* TODO: notify audio driver for if audio modes list changed
+ * add audio mode list change flag */
+ /* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
+ * stream->stream_engine_id);
+ */
+ }
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, true);
+
+
+ /* blank at encoder level */
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
+ hwss_edp_backlight_control(link, false);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+ }
+ link->link_enc->funcs->connect_dig_be_to_fe(
+ link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+
+}
+
+void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = { { 0 } };
+ struct dc_link *link = pipe_ctx->stream->sink->link;
+
+ /* only 3 items below are used by unblank */
+ params.pixel_clk_khz =
+ pipe_ctx->stream->timing.pix_clk_khz;
+ params.link_settings.link_rate = link_settings->link_rate;
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ if (link->connector_signal == SIGNAL_TYPE_EDP)
+ hwss_edp_backlight_control(link, true);
+}
+
+
+void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ if (pipe_ctx != NULL && pipe_ctx->stream_res.stream_enc != NULL)
+ pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable);
+}
+
+static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+{
+ switch (crtc_id) {
+ case CONTROLLER_ID_D0:
+ return DTO_SOURCE_ID0;
+ case CONTROLLER_ID_D1:
+ return DTO_SOURCE_ID1;
+ case CONTROLLER_ID_D2:
+ return DTO_SOURCE_ID2;
+ case CONTROLLER_ID_D3:
+ return DTO_SOURCE_ID3;
+ case CONTROLLER_ID_D4:
+ return DTO_SOURCE_ID4;
+ case CONTROLLER_ID_D5:
+ return DTO_SOURCE_ID5;
+ default:
+ return DTO_SOURCE_UNKNOWN;
+ }
+}
+
+static void build_audio_output(
+ struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ audio_output->engine_id = pipe_ctx->stream_res.stream_enc->id;
+
+ audio_output->signal = pipe_ctx->stream->signal;
+
+ /* audio_crtc_info */
+
+ audio_output->crtc_info.h_total =
+ stream->timing.h_total;
+
+ /*
+ * Audio packets are sent during actual CRTC blank physical signal, we
+ * need to specify actual active signal portion
+ */
+ audio_output->crtc_info.h_active =
+ stream->timing.h_addressable
+ + stream->timing.h_border_left
+ + stream->timing.h_border_right;
+
+ audio_output->crtc_info.v_active =
+ stream->timing.v_addressable
+ + stream->timing.v_border_top
+ + stream->timing.v_border_bottom;
+
+ audio_output->crtc_info.pixel_repetition = 1;
+
+ audio_output->crtc_info.interlaced =
+ stream->timing.flags.INTERLACE;
+
+ audio_output->crtc_info.refresh_rate =
+ (stream->timing.pix_clk_khz*1000)/
+ (stream->timing.h_total*stream->timing.v_total);
+
+ audio_output->crtc_info.color_depth =
+ stream->timing.display_color_depth;
+
+ audio_output->crtc_info.requested_pixel_clock =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+ audio_output->crtc_info.calculated_pixel_clock =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+/*for HDMI, audio ACR is with deep color ratio factor*/
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) &&
+ audio_output->crtc_info.requested_pixel_clock ==
+ stream->timing.pix_clk_khz) {
+ if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ audio_output->crtc_info.requested_pixel_clock =
+ audio_output->crtc_info.requested_pixel_clock/2;
+ audio_output->crtc_info.calculated_pixel_clock =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk/2;
+
+ }
+ }
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ audio_output->pll_info.dp_dto_source_clock_in_khz =
+ state->dis_clk->funcs->get_dp_ref_clk_frequency(
+ state->dis_clk);
+ }
+
+ audio_output->pll_info.feed_back_divider =
+ pipe_ctx->pll_settings.feedback_divider;
+
+ audio_output->pll_info.dto_source =
+ translate_to_dto_source(
+ pipe_ctx->pipe_idx + 1);
+
+ /* TODO hard code to enable for now. Need get from stream */
+ audio_output->pll_info.ss_enabled = true;
+
+ audio_output->pll_info.ss_percentage =
+ pipe_ctx->pll_settings.ss_percentage;
+}
+
+static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4;
+
+ switch (pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB8888:
+ /* set boarder color to red */
+ color->color_r_cr = color_value;
+ break;
+
+ case PIXEL_FORMAT_ARGB2101010:
+ /* set boarder color to blue */
+ color->color_b_cb = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP8:
+ /* set boarder color to green */
+ color->color_g_y = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP10:
+ /* set boarder color to yellow */
+ color->color_g_y = color_value;
+ color->color_r_cr = color_value;
+ break;
+ case PIXEL_FORMAT_FP16:
+ /* set boarder color to white */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
+ color->color_g_y = color_value;
+ break;
+ default:
+ break;
+ }
+}
+
+static void program_scaler(const struct dc *dc,
+ const struct pipe_ctx *pipe_ctx)
+{
+ struct tg_color color = {0};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /* TOFPGA */
+ if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
+ return;
+#endif
+
+ if (dc->debug.surface_visual_confirm)
+ get_surface_visual_confirm_color(pipe_ctx, &color);
+ else
+ color_space_to_black_color(dc,
+ pipe_ctx->stream->output_color_space,
+ &color);
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
+ pipe_ctx->plane_res.xfm,
+ pipe_ctx->plane_res.scl_data.lb_params.depth,
+ &pipe_ctx->stream->bit_depth_params);
+
+ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
+ pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
+ pipe_ctx->stream_res.tg,
+ &color);
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
+ &pipe_ctx->plane_res.scl_data);
+}
+
+static enum dc_status dce110_prog_pixclk_crtc_otg(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
+ pipe_ctx[pipe_ctx->pipe_idx];
+ struct tg_color black_color = {0};
+
+ if (!pipe_ctx_old->stream) {
+
+ /* program blank color */
+ color_space_to_black_color(dc,
+ stream->output_color_space, &black_color);
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+
+ /*
+ * Must blank CRTC after disabling power gating and before any
+ * programming, otherwise CRTC will be hung in bad state
+ */
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
+
+ if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ true);
+
+ pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
+ pipe_ctx->stream_res.tg,
+ 0x182);
+ }
+
+ if (!pipe_ctx_old->stream) {
+ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(
+ pipe_ctx->stream_res.tg)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+ }
+
+
+
+ return DC_OK;
+}
+
+static enum dc_status apply_single_controller_ctx_to_hw(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
+ pipe_ctx[pipe_ctx->pipe_idx];
+
+ /* */
+ dc->hwss.prog_pixclk_crtc_otg(pipe_ctx, context, dc);
+
+ /* FPGA does not program backend */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ return DC_OK;
+ }
+ /* TODO: move to stream encoder */
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ if (DC_OK != bios_parser_crtc_source_select(pipe_ctx)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ stream->sink->link->link_enc->funcs->setup(
+ stream->sink->link->link_enc,
+ pipe_ctx->stream->signal);
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.tg->inst,
+ stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
+
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ stream->output_color_space);
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ stream->phy_pix_clk,
+ pipe_ctx->stream_res.audio != NULL);
+
+ if (dc_is_dvi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
+ true : false);
+
+ resource_build_info_frame(pipe_ctx);
+ dce110_update_info_frame(pipe_ctx);
+ if (!pipe_ctx_old->stream) {
+ if (!pipe_ctx->stream->dpms_off)
+ core_link_enable_stream(context, pipe_ctx);
+ }
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+
+ pipe_ctx->stream->sink->link->psr_enabled = false;
+
+ return DC_OK;
+}
+
+/******************************************************************************/
+
+static void power_down_encoders(struct dc *dc)
+{
+ int i;
+ enum connector_id connector_id;
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+
+ /* do not know BIOS back-front mapping, simply blank all. It will not
+ * hurt for non-DP
+ */
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ dc->res_pool->stream_enc[i]->funcs->dp_blank(
+ dc->res_pool->stream_enc[i]);
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id);
+ if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+ (connector_id == CONNECTOR_ID_EDP)) {
+
+ if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
+ dp_receiver_power_ctrl(dc->links[i], false);
+ if (connector_id == CONNECTOR_ID_EDP)
+ signal = SIGNAL_TYPE_EDP;
+ }
+
+ dc->links[i]->link_enc->funcs->disable_output(
+ dc->links[i]->link_enc, signal, dc->links[i]);
+ }
+}
+
+static void power_down_controllers(struct dc *dc)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ dc->res_pool->timing_generators[i]->funcs->disable_crtc(
+ dc->res_pool->timing_generators[i]);
+ }
+}
+
+static void power_down_clock_sources(struct dc *dc)
+{
+ int i;
+
+ if (dc->res_pool->dp_clock_source->funcs->cs_power_down(
+ dc->res_pool->dp_clock_source) == false)
+ dm_error("Failed to power down pll! (dp clk src)\n");
+
+ for (i = 0; i < dc->res_pool->clk_src_count; i++) {
+ if (dc->res_pool->clock_sources[i]->funcs->cs_power_down(
+ dc->res_pool->clock_sources[i]) == false)
+ dm_error("Failed to power down pll! (clk src index=%d)\n", i);
+ }
+}
+
+static void power_down_all_hw_blocks(struct dc *dc)
+{
+ power_down_encoders(dc);
+
+ power_down_controllers(dc);
+
+ power_down_clock_sources(dc);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+#endif
+}
+
+static void disable_vga_and_power_gate_all_controllers(
+ struct dc *dc)
+{
+ int i;
+ struct timing_generator *tg;
+ struct dc_context *ctx = dc->ctx;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->disable_vga)
+ tg->funcs->disable_vga(tg);
+
+ /* Enable CLOCK gating for each pipe BEFORE controller
+ * powergating. */
+ enable_display_pipe_clock_gating(ctx,
+ true);
+
+ dc->hwss.power_down_front_end(dc, i);
+ }
+}
+
+/**
+ * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
+ * 1. Power down all DC HW blocks
+ * 2. Disable VGA engine on all controllers
+ * 3. Enable power gating for controller
+ * 4. Set acc_mode_change bit (VBIOS will clear this bit when going to FSDOS)
+ */
+void dce110_enable_accelerated_mode(struct dc *dc)
+{
+ power_down_all_hw_blocks(dc);
+
+ disable_vga_and_power_gate_all_controllers(dc);
+ bios_set_scratch_acc_mode_change(dc->ctx->dc_bios);
+}
+
+static uint32_t compute_pstate_blackout_duration(
+ struct bw_fixed blackout_duration,
+ const struct dc_stream_state *stream)
+{
+ uint32_t total_dest_line_time_ns;
+ uint32_t pstate_blackout_duration_ns;
+
+ pstate_blackout_duration_ns = 1000 * blackout_duration.value >> 24;
+
+ total_dest_line_time_ns = 1000000UL *
+ stream->timing.h_total /
+ stream->timing.pix_clk_khz +
+ pstate_blackout_duration_ns;
+
+ return total_dest_line_time_ns;
+}
+
+void dce110_set_displaymarks(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ uint8_t i, num_pipes;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+
+ for (i = 0, num_pipes = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ uint32_t total_dest_line_time_ns;
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ total_dest_line_time_ns = compute_pstate_blackout_duration(
+ dc->bw_vbios->blackout_duration, pipe_ctx->stream);
+ pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks(
+ pipe_ctx->plane_res.mi,
+ context->bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw.dce.urgent_wm_ns[num_pipes],
+ total_dest_line_time_ns);
+ if (i == underlay_idx) {
+ num_pipes++;
+ pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks(
+ pipe_ctx->plane_res.mi,
+ context->bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw.dce.urgent_wm_ns[num_pipes],
+ total_dest_line_time_ns);
+ }
+ num_pipes++;
+ }
+}
+
+static void set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+ int underlay_idx = pool->underlay_pipe_index;
+ struct dce_watermarks max_marks = {
+ MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK };
+ struct dce_watermarks nbp_marks = {
+ SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK };
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream == NULL || res_ctx->pipe_ctx[i].plane_res.mi == NULL)
+ continue;
+
+ res_ctx->pipe_ctx[i].plane_res.mi->funcs->mem_input_program_display_marks(
+ res_ctx->pipe_ctx[i].plane_res.mi,
+ nbp_marks,
+ max_marks,
+ max_marks,
+ MAX_WATERMARK);
+
+ if (i == underlay_idx)
+ res_ctx->pipe_ctx[i].plane_res.mi->funcs->mem_input_program_chroma_display_marks(
+ res_ctx->pipe_ctx[i].plane_res.mi,
+ nbp_marks,
+ max_marks,
+ max_marks,
+ MAX_WATERMARK);
+
+ }
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+static void set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, int vmin, int vmax)
+{
+ int i = 0;
+ struct drr_params params = {0};
+
+ params.vertical_total_max = vmax;
+ params.vertical_total_min = vmin;
+
+ /* TODO: If multiple pipes are to be supported, you need
+ * some GSL stuff
+ */
+
+ for (i = 0; i < num_pipes; i++) {
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(pipe_ctx[i]->stream_res.tg, &params);
+ }
+}
+
+static void get_position(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ struct crtc_position *position)
+{
+ int i = 0;
+
+ /* TODO: handle pipes > 1
+ */
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
+}
+
+static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events)
+{
+ unsigned int i;
+ unsigned int value = 0;
+
+ if (events->overlay_update)
+ value |= 0x100;
+ if (events->surface_update)
+ value |= 0x80;
+ if (events->cursor_update)
+ value |= 0x2;
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ value |= 0x84;
+#endif
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+}
+
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet.
+ * TODO: after mode set, pre_mode_set = false,
+ * may read PLL register to get pixel clock
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(
+ struct dc *dc,
+ struct dc_state *context,
+ bool pre_mode_set)
+{
+ uint32_t max_pix_clk = 0;
+ int i;
+
+ if (!pre_mode_set) {
+ /* TODO: read ASIC register to get pixel clock */
+ ASSERT(0);
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ /* do not check under lay */
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+ max_pix_clk =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+ }
+
+ if (max_pix_clk == 0)
+ ASSERT(0);
+
+ return max_pix_clk;
+}
+
+/*
+ * Find clock state based on clock requested. if clock value is 0, simply
+ * set clock state as requested without finding clock state by clock value
+ */
+
+static void apply_min_clocks(
+ struct dc *dc,
+ struct dc_state *context,
+ enum dm_pp_clocks_state *clocks_state,
+ bool pre_mode_set)
+{
+ struct state_dependent_clocks req_clocks = {0};
+
+ if (!pre_mode_set) {
+ /* set clock_state without verification */
+ if (context->dis_clk->funcs->set_min_clocks_state) {
+ context->dis_clk->funcs->set_min_clocks_state(
+ context->dis_clk, *clocks_state);
+ return;
+ }
+
+ /* TODO: This is incorrect. Figure out how to fix. */
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+ context->dis_clk->cur_clocks_value.dispclk_in_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_PIXELCLK,
+ context->dis_clk->cur_clocks_value.max_pixelclk_in_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ context->dis_clk->cur_clocks_value.max_non_dp_phyclk_in_khz,
+ pre_mode_set,
+ false);
+ return;
+ }
+
+ /* get the required state based on state dependent clocks:
+ * display clock and pixel clock
+ */
+ req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
+
+ req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
+ dc, context, true);
+
+ if (context->dis_clk->funcs->get_required_clocks_state) {
+ *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
+ context->dis_clk, &req_clocks);
+ context->dis_clk->funcs->set_min_clocks_state(
+ context->dis_clk, *clocks_state);
+ } else {
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+ req_clocks.display_clk_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_PIXELCLK,
+ req_clocks.pixel_clk_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ req_clocks.pixel_clk_khz,
+ pre_mode_set,
+ false);
+ }
+}
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+
+/*
+ * Check if FBC can be enabled
+ */
+static enum dc_status validate_fbc(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[0];
+
+ ASSERT(dc->fbc_compressor);
+
+ /* FBC memory should be allocated */
+ if (!dc->ctx->fbc_gpu_addr)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Only supports single display */
+ if (context->stream_count != 1)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Only supports eDP */
+ if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
+ return DC_ERROR_UNEXPECTED;
+
+ /* PSR should not be enabled */
+ if (pipe_ctx->stream->sink->link->psr_enabled)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Only for non-linear tiling */
+ if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
+ return DC_ERROR_UNEXPECTED;
+
+ return DC_OK;
+}
+
+/*
+ * Enable FBC
+ */
+static enum dc_status enable_fbc(struct dc *dc,
+ struct dc_state *context)
+{
+ enum dc_status status = validate_fbc(dc, context);
+
+ if (status == DC_OK) {
+ /* Program GRPH COMPRESSED ADDRESS and PITCH */
+ struct compr_addr_and_pitch_params params = {0, 0, 0};
+ struct compressor *compr = dc->fbc_compressor;
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[0];
+
+ params.source_view_width =
+ pipe_ctx->stream->timing.h_addressable;
+ params.source_view_height =
+ pipe_ctx->stream->timing.v_addressable;
+
+ compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
+
+ compr->funcs->surface_address_and_pitch(compr, &params);
+ compr->funcs->set_fbc_invalidation_triggers(compr, 1);
+
+ compr->funcs->enable_fbc(compr, &params);
+ }
+ return status;
+}
+#endif
+
+static enum dc_status apply_ctx_to_hw_fpga(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream)
+ continue;
+
+ status = apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ if (status != DC_OK)
+ return status;
+ }
+
+ return DC_OK;
+}
+
+static void dce110_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /* Note: We need to disable output if clock sources change,
+ * since bios does optimization and doesn't apply if changing
+ * PHY when not already disabled.
+ */
+
+ /* Skip underlay pipe since it will be handled in commit surface*/
+ if (!pipe_ctx_old->stream || pipe_ctx_old->top_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ /* disable already, no need to disable again */
+ if (pipe_ctx->stream && !pipe_ctx->stream->dpms_off)
+ core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
+
+ pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
+ if (!hwss_wait_for_blank_complete(pipe_ctx_old->stream_res.tg)) {
+ dm_error("DC: failed to blank crtc!\n");
+ BREAK_TO_DEBUGGER();
+ }
+ pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
+ pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
+ pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
+
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+
+ dc->hwss.power_down_front_end(dc, pipe_ctx_old->pipe_idx);
+
+ pipe_ctx_old->stream = NULL;
+ }
+ }
+}
+
+
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status status;
+ int i;
+ enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+ dc->hwss.reset_hw_ctx_wrap(dc, context);
+
+ /* Skip applying if no targets */
+ if (context->stream_count <= 0)
+ return DC_OK;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ apply_ctx_to_hw_fpga(dc, context);
+ return DC_OK;
+ }
+
+ /* Apply new context */
+ dcb->funcs->set_scratch_critical_state(dcb, true);
+
+ /* below is for real asic only */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream) {
+ if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
+ dce_crtc_switch_to_clk_src(dc->hwseq,
+ pipe_ctx->clock_source, i);
+ continue;
+ }
+
+ dc->hwss.enable_display_power_gating(
+ dc, i, dc->ctx->dc_bios,
+ PIPE_GATING_CONTROL_DISABLE);
+ }
+
+ set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+#endif
+ /*TODO: when pplib works*/
+ apply_min_clocks(dc, context, &clocks_state, true);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+ if (context->bw.dcn.calc_clk.fclk_khz
+ > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
+ struct dm_pp_clock_for_voltage_req clock;
+
+ clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+ clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
+ dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+ dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+ context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+ }
+ if (context->bw.dcn.calc_clk.dcfclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+ struct dm_pp_clock_for_voltage_req clock;
+
+ clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
+ dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+ dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+ context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+ }
+ if (context->bw.dcn.calc_clk.dispclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ dc->current_state->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ context->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ }
+ } else
+#endif
+ if (context->bw.dce.dispclk_khz
+ > dc->current_state->bw.dce.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ }
+ /* program audio wall clock. use HDMI as clock source if HDMI
+ * audio active. Otherwise, use DP as clock source
+ * first, loop to find any HDMI audio, if not, loop find DP audio
+ */
+ /* Setup audio rate clock source */
+ /* Issue:
+ * Audio lag happened on DP monitor when unplug a HDMI monitor
+ *
+ * Cause:
+ * In case of DP and HDMI connected or HDMI only, DCCG_AUDIO_DTO_SEL
+ * is set to either dto0 or dto1, audio should work fine.
+ * In case of DP connected only, DCCG_AUDIO_DTO_SEL should be dto1,
+ * set to dto0 will cause audio lag.
+ *
+ * Solution:
+ * Not optimized audio wall dto setup. When mode set, iterate pipe_ctx,
+ * find first available pipe with audio, setup audio wall DTO per topology
+ * instead of per pipe.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+ continue;
+
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &audio_output.pll_info);
+ break;
+ }
+ }
+
+ /* no HDMI audio is found, try DP audio */
+ if (i == dc->res_pool->pipe_count) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (!dc_is_dp_signal(pipe_ctx->stream->signal))
+ continue;
+
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &audio_output.pll_info);
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx->stream && pipe_ctx_old->stream
+ && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (context->res_ctx.pipe_ctx[i].stream_res.audio != NULL) {
+
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info,
+ &audio_output.crtc_info);
+
+ pipe_ctx->stream_res.audio->funcs->az_configure(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &pipe_ctx->stream->audio_info);
+ }
+
+ status = apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ if (dc->hwss.power_on_front_end)
+ dc->hwss.power_on_front_end(dc, pipe_ctx, context);
+
+ if (DC_OK != status)
+ return status;
+ }
+
+ /* pplib is notified if disp_num changed */
+ dc->hwss.set_bandwidth(dc, context, true);
+
+ /* to save power */
+ apply_min_clocks(dc, context, &clocks_state, false);
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ enable_fbc(dc, context);
+
+#endif
+
+ return DC_OK;
+}
+
+/*******************************************************************************
+ * Front End programming
+ ******************************************************************************/
+static void set_default_colors(struct pipe_ctx *pipe_ctx)
+{
+ struct default_adjustment default_adjust = { 0 };
+
+ default_adjust.force_hw_default = false;
+ if (pipe_ctx->plane_state == NULL)
+ default_adjust.in_color_space = COLOR_SPACE_SRGB;
+ else
+ default_adjust.in_color_space =
+ pipe_ctx->plane_state->color_space;
+ if (pipe_ctx->stream == NULL)
+ default_adjust.out_color_space = COLOR_SPACE_SRGB;
+ else
+ default_adjust.out_color_space =
+ pipe_ctx->stream->output_color_space;
+ default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
+ default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format;
+
+ /* display color depth */
+ default_adjust.color_depth =
+ pipe_ctx->stream->timing.display_color_depth;
+
+ /* Lb color depth */
+ default_adjust.lb_color_depth = pipe_ctx->plane_res.scl_data.lb_params.depth;
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_default(
+ pipe_ctx->plane_res.xfm, &default_adjust);
+}
+
+
+/*******************************************************************************
+ * In order to turn on/off specific surface we will program
+ * Blender + CRTC
+ *
+ * In case that we have two surfaces and they have a different visibility
+ * we can't turn off the CRTC since it will turn off the entire display
+ *
+ * |----------------------------------------------- |
+ * |bottom pipe|curr pipe | | |
+ * |Surface |Surface | Blender | CRCT |
+ * |visibility |visibility | Configuration| |
+ * |------------------------------------------------|
+ * | off | off | CURRENT_PIPE | blank |
+ * | off | on | CURRENT_PIPE | unblank |
+ * | on | off | OTHER_PIPE | unblank |
+ * | on | on | BLENDING | unblank |
+ * -------------------------------------------------|
+ *
+ ******************************************************************************/
+static void program_surface_visibility(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum blnd_mode blender_mode = BLND_MODE_CURRENT_PIPE;
+ bool blank_target = false;
+
+ if (pipe_ctx->bottom_pipe) {
+
+ /* For now we are supporting only two pipes */
+ ASSERT(pipe_ctx->bottom_pipe->bottom_pipe == NULL);
+
+ if (pipe_ctx->bottom_pipe->plane_state->visible) {
+ if (pipe_ctx->plane_state->visible)
+ blender_mode = BLND_MODE_BLENDING;
+ else
+ blender_mode = BLND_MODE_OTHER_PIPE;
+
+ } else if (!pipe_ctx->plane_state->visible)
+ blank_target = true;
+
+ } else if (!pipe_ctx->plane_state->visible)
+ blank_target = true;
+
+ dce_set_blender_mode(dc->hwseq, pipe_ctx->pipe_idx, blender_mode);
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target);
+
+}
+
+static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
+{
+ struct xfm_grph_csc_adjustment adjust;
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+}
+
+/**
+ * TODO REMOVE, USE UPDATE INSTEAD
+ */
+static void set_plane_config(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct resource_context *res_ctx)
+{
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct xfm_grph_csc_adjustment adjust;
+ struct out_csc_color_matrix tbl_entry;
+ unsigned int i;
+
+ memset(&adjust, 0, sizeof(adjust));
+ memset(&tbl_entry, 0, sizeof(tbl_entry));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+ dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
+
+ set_default_colors(pipe_ctx);
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ tbl_entry.color_space =
+ pipe_ctx->stream->output_color_space;
+
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] =
+ pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
+ (pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+ program_scaler(dc, pipe_ctx);
+
+ program_surface_visibility(dc, pipe_ctx);
+
+ mi->funcs->mem_input_program_surface_config(
+ mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &plane_state->plane_size,
+ plane_state->rotation,
+ NULL,
+ false);
+ if (mi->funcs->set_blank)
+ mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
+
+ if (dc->config.gpu_vm_support)
+ mi->funcs->mem_input_program_pte_vm(
+ pipe_ctx->plane_res.mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation);
+}
+
+static void update_plane_addr(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+
+ pipe_ctx->plane_res.mi->funcs->mem_input_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.mi,
+ &plane_state->address,
+ plane_state->flip_immediate);
+
+ plane_state->status.requested_address = plane_state->address;
+}
+
+void dce110_update_pending_status(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+
+ plane_state->status.is_flip_pending =
+ pipe_ctx->plane_res.mi->funcs->mem_input_is_flip_pending(
+ pipe_ctx->plane_res.mi);
+
+ if (plane_state->status.is_flip_pending && !plane_state->visible)
+ pipe_ctx->plane_res.mi->current_address = pipe_ctx->plane_res.mi->request_address;
+
+ plane_state->status.current_address = pipe_ctx->plane_res.mi->current_address;
+ if (pipe_ctx->plane_res.mi->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ pipe_ctx->stream_res.tg->funcs->is_stereo_left_eye) {
+ plane_state->status.is_right_eye =\
+ !pipe_ctx->stream_res.tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
+ }
+}
+
+void dce110_power_down(struct dc *dc)
+{
+ power_down_all_hw_blocks(dc);
+ disable_vga_and_power_gate_all_controllers(dc);
+}
+
+static bool wait_for_reset_trigger_to_occur(
+ struct dc_context *dc_ctx,
+ struct timing_generator *tg)
+{
+ bool rc = false;
+
+ /* To avoid endless loop we wait at most
+ * frames_to_wait_on_triggered_reset frames for the reset to occur. */
+ const uint32_t frames_to_wait_on_triggered_reset = 10;
+ uint32_t i;
+
+ for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
+
+ if (!tg->funcs->is_counter_moving(tg)) {
+ DC_ERROR("TG counter is not moving!\n");
+ break;
+ }
+
+ if (tg->funcs->did_triggered_reset_occur(tg)) {
+ rc = true;
+ /* usually occurs at i=1 */
+ DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
+ i);
+ break;
+ }
+
+ /* Wait for one frame. */
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
+ }
+
+ if (false == rc)
+ DC_ERROR("GSL: Timeout on reset trigger!\n");
+
+ return rc;
+}
+
+/* Enable timing synchronization for a group of Timing Generators. */
+static void dce110_enable_timing_synchronization(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[])
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dcp_gsl_params gsl_params = { 0 };
+ int i;
+
+ DC_SYNC_INFO("GSL: Setting-up...\n");
+
+ /* Designate a single TG in the group as a master.
+ * Since HW doesn't care which one, we always assign
+ * the 1st one in the group. */
+ gsl_params.gsl_group = 0;
+ gsl_params.gsl_master = grouped_pipes[0]->stream_res.tg->inst;
+
+ for (i = 0; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock(
+ grouped_pipes[i]->stream_res.tg, &gsl_params);
+
+ /* Reset slave controllers on master VSync */
+ DC_SYNC_INFO("GSL: enabling trigger-reset\n");
+
+ for (i = 1 /* skip the master */; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg, gsl_params.gsl_group);
+
+
+
+ for (i = 1 /* skip the master */; i < group_size; i++) {
+ DC_SYNC_INFO("GSL: waiting for reset to occur.\n");
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
+ /* Regardless of success of the wait above, remove the reset or
+ * the driver will start timing out on Display requests. */
+ DC_SYNC_INFO("GSL: disabling trigger-reset.\n");
+ grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(grouped_pipes[i]->stream_res.tg);
+ }
+
+
+ /* GSL Vblank synchronization is a one time sync mechanism, assumption
+ * is that the sync'ed displays will not drift out of sync over time*/
+ DC_SYNC_INFO("GSL: Restoring register states.\n");
+ for (i = 0; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->tear_down_global_swap_lock(grouped_pipes[i]->stream_res.tg);
+
+ DC_SYNC_INFO("GSL: Set-up complete.\n");
+}
+
+static void init_hw(struct dc *dc)
+{
+ int i;
+ struct dc_bios *bp;
+ struct transform *xfm;
+ struct abm *abm;
+
+ bp = dc->ctx->dc_bios;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ xfm = dc->res_pool->transforms[i];
+ xfm->funcs->transform_reset(xfm);
+
+ dc->hwss.enable_display_power_gating(
+ dc, i, bp,
+ PIPE_GATING_CONTROL_INIT);
+ dc->hwss.enable_display_power_gating(
+ dc, i, bp,
+ PIPE_GATING_CONTROL_DISABLE);
+ dc->hwss.enable_display_pipe_clock_gating(
+ dc->ctx,
+ true);
+ }
+
+ dce_clock_gating_power_up(dc->hwseq, false);
+ /***************************************/
+
+ for (i = 0; i < dc->link_count; i++) {
+ /****************************************/
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector). */
+ struct dc_link *link = dc->links[i];
+ link->link_enc->funcs->hw_init(link->link_enc);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ tg->funcs->disable_vga(tg);
+
+ /* Blank controller using driver code instead of
+ * command table. */
+ tg->funcs->set_blank(tg, true);
+ hwss_wait_for_blank_complete(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->audio_count; i++) {
+ struct audio *audio = dc->res_pool->audios[i];
+ audio->funcs->hw_init(audio);
+ }
+
+ abm = dc->res_pool->abm;
+ if (abm != NULL) {
+ abm->funcs->init_backlight(abm);
+ abm->funcs->abm_init(abm);
+ }
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
+#endif
+
+}
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg)
+{
+ int j;
+ int num_cfgs = 0;
+
+ for (j = 0; j < context->stream_count; j++) {
+ int k;
+
+ const struct dc_stream_state *stream = context->streams[j];
+ struct dm_pp_single_disp_config *cfg =
+ &pp_display_cfg->disp_configs[num_cfgs];
+ const struct pipe_ctx *pipe_ctx = NULL;
+
+ for (k = 0; k < MAX_PIPES; k++)
+ if (stream == context->res_ctx.pipe_ctx[k].stream) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[k];
+ break;
+ }
+
+ ASSERT(pipe_ctx != NULL);
+
+ num_cfgs++;
+ cfg->signal = pipe_ctx->stream->signal;
+ cfg->pipe_idx = pipe_ctx->pipe_idx;
+ cfg->src_height = stream->src.height;
+ cfg->src_width = stream->src.width;
+ cfg->ddi_channel_mapping =
+ stream->sink->link->ddi_channel_mapping.raw;
+ cfg->transmitter =
+ stream->sink->link->link_enc->transmitter;
+ cfg->link_settings.lane_count =
+ stream->sink->link->cur_link_settings.lane_count;
+ cfg->link_settings.link_rate =
+ stream->sink->link->cur_link_settings.link_rate;
+ cfg->link_settings.link_spread =
+ stream->sink->link->cur_link_settings.link_spread;
+ cfg->sym_clock = stream->phy_pix_clk;
+ /* Round v_refresh*/
+ cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+ cfg->v_refresh /= stream->timing.h_total;
+ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+ / stream->timing.v_total;
+ }
+
+ pp_display_cfg->display_count = num_cfgs;
+}
+
+uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+{
+ uint8_t j;
+ uint32_t min_vertical_blank_time = -1;
+
+ for (j = 0; j < context->stream_count; j++) {
+ struct dc_stream_state *stream = context->streams[j];
+ uint32_t vertical_blank_in_pixels = 0;
+ uint32_t vertical_blank_time = 0;
+
+ vertical_blank_in_pixels = stream->timing.h_total *
+ (stream->timing.v_total
+ - stream->timing.v_addressable);
+
+ vertical_blank_time = vertical_blank_in_pixels
+ * 1000 / stream->timing.pix_clk_khz;
+
+ if (min_vertical_blank_time > vertical_blank_time)
+ min_vertical_blank_time = vertical_blank_time;
+ }
+
+ return min_vertical_blank_time;
+}
+
+static int determine_sclk_from_bounding_box(
+ const struct dc *dc,
+ int required_sclk)
+{
+ int i;
+
+ /*
+ * Some asics do not give us sclk levels, so we just report the actual
+ * required sclk
+ */
+ if (dc->sclk_lvls.num_levels == 0)
+ return required_sclk;
+
+ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+ return dc->sclk_lvls.clocks_in_khz[i];
+ }
+ /*
+ * even maximum level could not satisfy requirement, this
+ * is unexpected at this stage, should have been caught at
+ * validation time
+ */
+ ASSERT(0);
+ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+static void pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->all_displays_in_sync =
+ context->bw.dce.all_displays_in_sync;
+ pp_display_cfg->nb_pstate_switch_disable =
+ context->bw.dce.nbp_state_change_enable == false;
+ pp_display_cfg->cpu_cc6_disable =
+ context->bw.dce.cpuc_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_disable =
+ context->bw.dce.cpup_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_separation_time =
+ context->bw.dce.blackout_recovery_time_us;
+
+ pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ / MEMORY_TYPE_MULTIPLIER;
+
+ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+ dc,
+ context->bw.dce.sclk_khz);
+
+ pp_display_cfg->min_engine_clock_deep_sleep_khz
+ = context->bw.dce.sclk_deep_sleep_khz;
+
+ pp_display_cfg->avail_mclk_switch_time_us =
+ dce110_get_min_vblank_time_us(context);
+ /* TODO: dce11.2*/
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+ pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz;
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ /* TODO: is this still applicable?*/
+ if (pp_display_cfg->display_count == 1) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index =
+ pp_display_cfg->disp_configs[0].pipe_idx;
+ pp_display_cfg->line_time_in_us = timing->h_total * 1000
+ / timing->pix_clk_khz;
+ }
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+ struct dm_pp_display_configuration)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+ dc->prev_display_config = *pp_display_cfg;
+}
+
+static void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ dce110_set_displaymarks(dc, context);
+
+ if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+
+ pplib_apply_display_requirements(dc, context);
+}
+
+static void dce110_program_front_end_for_pipe(
+ struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct pipe_ctx *old_pipe = NULL;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct xfm_grph_csc_adjustment adjust;
+ struct out_csc_color_matrix tbl_entry;
+ struct pipe_ctx *cur_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+ unsigned int i;
+
+ memset(&tbl_entry, 0, sizeof(tbl_entry));
+
+ if (dc->current_state)
+ old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+ dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
+
+ set_default_colors(pipe_ctx);
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ tbl_entry.color_space =
+ pipe_ctx->stream->output_color_space;
+
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] =
+ pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
+ (pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+
+ program_scaler(dc, pipe_ctx);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor && old_pipe->stream) {
+ if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+ else
+ enable_fbc(dc, dc->current_state);
+ }
+#endif
+
+ mi->funcs->mem_input_program_surface_config(
+ mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &plane_state->plane_size,
+ plane_state->rotation,
+ NULL,
+ false);
+ if (mi->funcs->set_blank)
+ mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
+
+ if (dc->config.gpu_vm_support)
+ mi->funcs->mem_input_program_pte_vm(
+ pipe_ctx->plane_res.mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation);
+
+ /* Moved programming gamma from dc to hwss */
+ if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) {
+ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+
+ dm_logger_write(dc->ctx->logger, LOG_SURFACE,
+ "Pipe:%d 0x%x: addr hi:0x%x, "
+ "addr low:0x%x, "
+ "src: %d, %d, %d,"
+ " %d; dst: %d, %d, %d, %d;"
+ "clip: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->plane_state,
+ pipe_ctx->plane_state->address.grph.addr.high_part,
+ pipe_ctx->plane_state->address.grph.addr.low_part,
+ pipe_ctx->plane_state->src_rect.x,
+ pipe_ctx->plane_state->src_rect.y,
+ pipe_ctx->plane_state->src_rect.width,
+ pipe_ctx->plane_state->src_rect.height,
+ pipe_ctx->plane_state->dst_rect.x,
+ pipe_ctx->plane_state->dst_rect.y,
+ pipe_ctx->plane_state->dst_rect.width,
+ pipe_ctx->plane_state->dst_rect.height,
+ pipe_ctx->plane_state->clip_rect.x,
+ pipe_ctx->plane_state->clip_rect.y,
+ pipe_ctx->plane_state->clip_rect.width,
+ pipe_ctx->plane_state->clip_rect.height);
+
+ dm_logger_write(dc->ctx->logger, LOG_SURFACE,
+ "Pipe %d: width, height, x, y\n"
+ "viewport:%d, %d, %d, %d\n"
+ "recout: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ pipe_ctx->plane_res.scl_data.recout.width,
+ pipe_ctx->plane_res.scl_data.recout.height,
+ pipe_ctx->plane_res.scl_data.recout.x,
+ pipe_ctx->plane_res.scl_data.recout.y);
+}
+
+static void dce110_apply_ctx_for_surface(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context)
+{
+ int i, be_idx;
+
+ if (num_planes == 0)
+ return;
+
+ be_idx = -1;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (stream == context->res_ctx.pipe_ctx[i].stream) {
+ be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
+ break;
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* Need to allocate mem before program front end for Fiji */
+ if (pipe_ctx->plane_res.mi != NULL)
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
+ pipe_ctx->plane_res.mi,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ pipe_ctx->stream->timing.pix_clk_khz,
+ context->stream_count);
+
+ dce110_program_front_end_for_pipe(dc, pipe_ctx);
+ program_surface_visibility(dc, pipe_ctx);
+
+ }
+}
+
+static void dce110_power_down_fe(struct dc *dc, int fe_idx)
+{
+ /* Do not power down fe when stream is active on dce*/
+ if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)
+ return;
+
+ dc->hwss.enable_display_power_gating(
+ dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE);
+
+ dc->res_pool->transforms[fe_idx]->funcs->transform_reset(
+ dc->res_pool->transforms[fe_idx]);
+}
+
+static void dce110_wait_for_mpcc_disconnect(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx)
+{
+ /* do nothing*/
+}
+
+static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix)
+{
+ int i;
+ struct out_csc_color_matrix tbl_entry;
+
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ enum dc_color_space color_space =
+ pipe_ctx->stream->output_color_space;
+
+ //uint16_t matrix[12];
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = color_space;
+ //tbl_entry.regval = matrix;
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+}
+
+static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
+
+static void optimize_shared_resources(struct dc *dc) {}
+
+static const struct hw_sequencer_funcs dce110_funcs = {
+ .program_gamut_remap = program_gamut_remap,
+ .program_csc_matrix = program_csc_matrix,
+ .init_hw = init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dce110_apply_ctx_for_surface,
+ .set_plane_config = set_plane_config,
+ .update_plane_addr = update_plane_addr,
+ .update_pending_status = dce110_update_pending_status,
+ .set_input_transfer_func = dce110_set_input_transfer_func,
+ .set_output_transfer_func = dce110_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dce110_enable_timing_synchronization,
+ .update_info_frame = dce110_update_info_frame,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dce110_unblank_stream,
+ .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
+ .enable_display_power_gating = dce110_enable_display_power_gating,
+ .power_down_front_end = dce110_power_down_fe,
+ .pipe_control_lock = dce_pipe_control_lock,
+ .set_bandwidth = dce110_set_bandwidth,
+ .set_drr = set_drr,
+ .get_position = get_position,
+ .set_static_screen_control = set_static_screen_control,
+ .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
+ .prog_pixclk_crtc_otg = dce110_prog_pixclk_crtc_otg,
+ .setup_stereo = NULL,
+ .set_avmute = dce110_set_avmute,
+ .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
+ .ready_shared_resources = ready_shared_resources,
+ .optimize_shared_resources = optimize_shared_resources,
+ .edp_backlight_control = hwss_edp_backlight_control,
+ .edp_power_control = hwss_edp_power_control,
+};
+
+void dce110_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dce110_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
new file mode 100644
index 000000000000..4d72bb99be93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -0,0 +1,81 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE110_H__
+#define __DC_HWSS_DCE110_H__
+
+#include "core_types.h"
+
+#define GAMMA_HW_POINTS_NUM 256
+struct dc;
+struct dc_state;
+struct dm_pp_display_configuration;
+
+void dce110_hw_sequencer_construct(struct dc *dc);
+
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context);
+
+void dce110_set_display_clock(struct dc_state *context);
+
+void dce110_set_displaymarks(
+ const struct dc *dc,
+ struct dc_state *context);
+
+void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
+
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+
+void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+
+void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
+
+void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+void dce110_enable_accelerated_mode(struct dc *dc);
+
+void dce110_power_down(struct dc *dc);
+
+void dce110_update_pending_status(struct pipe_ctx *pipe_ctx);
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+void hwss_edp_power_control(
+ struct link_encoder *enc,
+ bool power_up);
+
+void hwss_edp_backlight_control(
+ struct dc_link *link,
+ bool enable);
+
+#endif /* __DC_HWSS_DCE110_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
new file mode 100644
index 000000000000..a06c6024deb4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -0,0 +1,1052 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+/* TODO: this needs to be looked at, used by Stella's workaround*/
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+
+#include "include/logger_interface.h"
+#include "inc/dce_calcs.h"
+
+#include "dce/dce_mem_input.h"
+
+static void set_flip_control(
+ struct dce_mem_input *mem_input110,
+ bool immediate)
+{
+ uint32_t value = 0;
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_FLIP_CONTROL);
+
+ set_reg_field_value(value, 1,
+ UNP_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_PENDING_MODE);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_FLIP_CONTROL,
+ value);
+}
+
+/* chroma part */
+static void program_pri_addr_c(
+ struct dce_mem_input *mem_input110,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ uint32_t value = 0;
+ uint32_t temp = 0;
+ /*high register MUST be programmed first*/
+ temp = address.high_part &
+UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ value);
+
+ temp = 0;
+ value = 0;
+ temp = address.low_part >>
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C,
+ GRPH_PRIMARY_SURFACE_ADDRESS_C);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_C,
+ value);
+}
+
+/* luma part */
+static void program_pri_addr_l(
+ struct dce_mem_input *mem_input110,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ uint32_t value = 0;
+ uint32_t temp = 0;
+
+ /*high register MUST be programmed first*/
+ temp = address.high_part &
+UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MASK;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L,
+ GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L,
+ value);
+
+ temp = 0;
+ value = 0;
+ temp = address.low_part >>
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L,
+ GRPH_PRIMARY_SURFACE_ADDRESS_L);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_L,
+ value);
+}
+
+static void program_addr(
+ struct dce_mem_input *mem_input110,
+ const struct dc_plane_address *addr)
+{
+ switch (addr->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ program_pri_addr_l(
+ mem_input110,
+ addr->grph.addr);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ program_pri_addr_c(
+ mem_input110,
+ addr->video_progressive.chroma_addr);
+ program_pri_addr_l(
+ mem_input110,
+ addr->video_progressive.luma_addr);
+ break;
+ default:
+ /* not supported */
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+static void enable(struct dce_mem_input *mem_input110)
+{
+ uint32_t value = 0;
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_GRPH_ENABLE);
+ set_reg_field_value(value, 1, UNP_GRPH_ENABLE, GRPH_ENABLE);
+ dm_write_reg(mem_input110->base.ctx,
+ mmUNP_GRPH_ENABLE,
+ value);
+}
+
+static void program_tiling(
+ struct dce_mem_input *mem_input110,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format)
+{
+ uint32_t value = 0;
+
+ set_reg_field_value(value, info->gfx8.num_banks,
+ UNP_GRPH_CONTROL, GRPH_NUM_BANKS);
+
+ set_reg_field_value(value, info->gfx8.bank_width,
+ UNP_GRPH_CONTROL, GRPH_BANK_WIDTH_L);
+
+ set_reg_field_value(value, info->gfx8.bank_height,
+ UNP_GRPH_CONTROL, GRPH_BANK_HEIGHT_L);
+
+ set_reg_field_value(value, info->gfx8.tile_aspect,
+ UNP_GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT_L);
+
+ set_reg_field_value(value, info->gfx8.tile_split,
+ UNP_GRPH_CONTROL, GRPH_TILE_SPLIT_L);
+
+ set_reg_field_value(value, info->gfx8.tile_mode,
+ UNP_GRPH_CONTROL, GRPH_MICRO_TILE_MODE_L);
+
+ set_reg_field_value(value, info->gfx8.pipe_config,
+ UNP_GRPH_CONTROL, GRPH_PIPE_CONFIG);
+
+ set_reg_field_value(value, info->gfx8.array_mode,
+ UNP_GRPH_CONTROL, GRPH_ARRAY_MODE);
+
+ set_reg_field_value(value, 1,
+ UNP_GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE);
+
+ set_reg_field_value(value, 0,
+ UNP_GRPH_CONTROL, GRPH_Z);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL,
+ value);
+
+ value = 0;
+
+ set_reg_field_value(value, info->gfx8.bank_width_c,
+ UNP_GRPH_CONTROL_C, GRPH_BANK_WIDTH_C);
+
+ set_reg_field_value(value, info->gfx8.bank_height_c,
+ UNP_GRPH_CONTROL_C, GRPH_BANK_HEIGHT_C);
+
+ set_reg_field_value(value, info->gfx8.tile_aspect_c,
+ UNP_GRPH_CONTROL_C, GRPH_MACRO_TILE_ASPECT_C);
+
+ set_reg_field_value(value, info->gfx8.tile_split_c,
+ UNP_GRPH_CONTROL_C, GRPH_TILE_SPLIT_C);
+
+ set_reg_field_value(value, info->gfx8.tile_mode_c,
+ UNP_GRPH_CONTROL_C, GRPH_MICRO_TILE_MODE_C);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_C,
+ value);
+}
+
+static void program_size_and_rotation(
+ struct dce_mem_input *mem_input110,
+ enum dc_rotation_angle rotation,
+ const union plane_size *plane_size)
+{
+ uint32_t value = 0;
+ union plane_size local_size = *plane_size;
+
+ if (rotation == ROTATION_ANGLE_90 ||
+ rotation == ROTATION_ANGLE_270) {
+
+ uint32_t swap;
+ swap = local_size.video.luma_size.x;
+ local_size.video.luma_size.x =
+ local_size.video.luma_size.y;
+ local_size.video.luma_size.y = swap;
+
+ swap = local_size.video.luma_size.width;
+ local_size.video.luma_size.width =
+ local_size.video.luma_size.height;
+ local_size.video.luma_size.height = swap;
+
+ swap = local_size.video.chroma_size.x;
+ local_size.video.chroma_size.x =
+ local_size.video.chroma_size.y;
+ local_size.video.chroma_size.y = swap;
+
+ swap = local_size.video.chroma_size.width;
+ local_size.video.chroma_size.width =
+ local_size.video.chroma_size.height;
+ local_size.video.chroma_size.height = swap;
+ }
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.luma_pitch,
+ UNP_GRPH_PITCH_L, GRPH_PITCH_L);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PITCH_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.chroma_pitch,
+ UNP_GRPH_PITCH_C, GRPH_PITCH_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PITCH_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_X_START_L, GRPH_X_START_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_START_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_X_START_C, GRPH_X_START_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_START_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_Y_START_L, GRPH_Y_START_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_START_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_Y_START_C, GRPH_Y_START_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_START_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.luma_size.x +
+ local_size.video.luma_size.width,
+ UNP_GRPH_X_END_L, GRPH_X_END_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_END_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.chroma_size.x +
+ local_size.video.chroma_size.width,
+ UNP_GRPH_X_END_C, GRPH_X_END_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_END_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.luma_size.y +
+ local_size.video.luma_size.height,
+ UNP_GRPH_Y_END_L, GRPH_Y_END_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_END_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.chroma_size.y +
+ local_size.video.chroma_size.height,
+ UNP_GRPH_Y_END_C, GRPH_Y_END_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_END_C,
+ value);
+
+ value = 0;
+ switch (rotation) {
+ case ROTATION_ANGLE_90:
+ set_reg_field_value(value, 3,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ case ROTATION_ANGLE_180:
+ set_reg_field_value(value, 2,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ case ROTATION_ANGLE_270:
+ set_reg_field_value(value, 1,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ default:
+ set_reg_field_value(value, 0,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ }
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_HW_ROTATION,
+ value);
+}
+
+static void program_pixel_format(
+ struct dce_mem_input *mem_input110,
+ enum surface_pixel_format format)
+{
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ uint32_t value;
+ uint8_t grph_depth;
+ uint8_t grph_format;
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ grph_depth = 0;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ grph_depth = 1;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ grph_depth = 2;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ grph_depth = 2;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ grph_depth = 3;
+ grph_format = 0;
+ break;
+ default:
+ grph_depth = 2;
+ grph_format = 0;
+ break;
+ }
+
+ set_reg_field_value(
+ value,
+ grph_depth,
+ UNP_GRPH_CONTROL,
+ GRPH_DEPTH);
+ set_reg_field_value(
+ value,
+ grph_format,
+ UNP_GRPH_CONTROL,
+ GRPH_FORMAT);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL,
+ value);
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP);
+
+ /* VIDEO FORMAT 0 */
+ set_reg_field_value(
+ value,
+ 0,
+ UNP_GRPH_CONTROL_EXP,
+ VIDEO_FORMAT);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP,
+ value);
+
+ } else {
+ /* Video 422 and 420 needs UNP_GRPH_CONTROL_EXP programmed */
+ uint32_t value;
+ uint8_t video_format;
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ video_format = 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ video_format = 3;
+ break;
+ default:
+ video_format = 0;
+ break;
+ }
+
+ set_reg_field_value(
+ value,
+ video_format,
+ UNP_GRPH_CONTROL_EXP,
+ VIDEO_FORMAT);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP,
+ value);
+ }
+}
+
+bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+ uint32_t value;
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_GRPH_UPDATE);
+
+ if (get_reg_field_value(value, UNP_GRPH_UPDATE,
+ GRPH_SURFACE_UPDATE_PENDING))
+ return true;
+
+ mem_input->current_address = mem_input->request_address;
+ return false;
+}
+
+bool dce_mem_input_v_program_surface_flip_and_addr(
+ struct mem_input *mem_input,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+
+ set_flip_control(mem_input110, flip_immediate);
+ program_addr(mem_input110,
+ address);
+
+ mem_input->request_address = *address;
+
+ return true;
+}
+
+/* Scatter Gather param tables */
+static const unsigned int dvmm_Hw_Setting_2DTiling[4][9] = {
+ { 8, 64, 64, 8, 8, 1, 4, 0, 0},
+ { 16, 64, 32, 8, 16, 1, 8, 0, 0},
+ { 32, 32, 32, 16, 16, 1, 8, 0, 0},
+ { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */
+};
+
+static const unsigned int dvmm_Hw_Setting_1DTiling[4][9] = {
+ { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */
+ { 16, 256, 8, 2, 0, 1, 0, 0, 0},
+ { 32, 128, 8, 4, 0, 1, 0, 0, 0},
+ { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */
+};
+
+static const unsigned int dvmm_Hw_Setting_Linear[4][9] = {
+ { 8, 4096, 1, 8, 0, 1, 0, 0, 0},
+ { 16, 2048, 1, 8, 0, 1, 0, 0, 0},
+ { 32, 1024, 1, 8, 0, 1, 0, 0, 0},
+ { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */
+};
+
+/* Helper to get table entry from surface info */
+static const unsigned int *get_dvmm_hw_setting(
+ union dc_tiling_info *tiling_info,
+ enum surface_pixel_format format,
+ bool chroma)
+{
+ enum bits_per_pixel {
+ bpp_8 = 0,
+ bpp_16,
+ bpp_32,
+ bpp_64
+ } bpp;
+
+ if (format >= SURFACE_PIXEL_FORMAT_INVALID)
+ bpp = bpp_32;
+ else if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ bpp = chroma ? bpp_16 : bpp_8;
+ else
+ bpp = bpp_8;
+
+ switch (tiling_info->gfx8.array_mode) {
+ case DC_ARRAY_1D_TILED_THIN1:
+ case DC_ARRAY_1D_TILED_THICK:
+ case DC_ARRAY_PRT_TILED_THIN1:
+ return dvmm_Hw_Setting_1DTiling[bpp];
+ case DC_ARRAY_2D_TILED_THIN1:
+ case DC_ARRAY_2D_TILED_THICK:
+ case DC_ARRAY_2D_TILED_X_THICK:
+ case DC_ARRAY_PRT_2D_TILED_THIN1:
+ case DC_ARRAY_PRT_2D_TILED_THICK:
+ return dvmm_Hw_Setting_2DTiling[bpp];
+ case DC_ARRAY_LINEAR_GENERAL:
+ case DC_ARRAY_LINEAR_ALLIGNED:
+ return dvmm_Hw_Setting_Linear[bpp];
+ default:
+ return dvmm_Hw_Setting_2DTiling[bpp];
+ }
+}
+
+void dce_mem_input_v_program_pte_vm(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+ const unsigned int *pte = get_dvmm_hw_setting(tiling_info, format, false);
+ const unsigned int *pte_chroma = get_dvmm_hw_setting(tiling_info, format, true);
+
+ unsigned int page_width = 0;
+ unsigned int page_height = 0;
+ unsigned int page_width_chroma = 0;
+ unsigned int page_height_chroma = 0;
+ unsigned int temp_page_width = pte[1];
+ unsigned int temp_page_height = pte[2];
+ unsigned int min_pte_before_flip = 0;
+ unsigned int min_pte_before_flip_chroma = 0;
+ uint32_t value = 0;
+
+ while ((temp_page_width >>= 1) != 0)
+ page_width++;
+ while ((temp_page_height >>= 1) != 0)
+ page_height++;
+
+ temp_page_width = pte_chroma[1];
+ temp_page_height = pte_chroma[2];
+ while ((temp_page_width >>= 1) != 0)
+ page_width_chroma++;
+ while ((temp_page_height >>= 1) != 0)
+ page_height_chroma++;
+
+ switch (rotation) {
+ case ROTATION_ANGLE_90:
+ case ROTATION_ANGLE_270:
+ min_pte_before_flip = pte[4];
+ min_pte_before_flip_chroma = pte_chroma[4];
+ break;
+ default:
+ min_pte_before_flip = pte[3];
+ min_pte_before_flip_chroma = pte_chroma[3];
+ break;
+ }
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT);
+ /* TODO: un-hardcode requestlimit */
+ set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_L);
+ set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_C);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL);
+ set_reg_field_value(value, page_width, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH);
+ set_reg_field_value(value, page_height, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_HEIGHT);
+ set_reg_field_value(value, min_pte_before_flip, UNP_DVMM_PTE_CONTROL, DVMM_MIN_PTE_BEFORE_FLIP);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL);
+ set_reg_field_value(value, pte[5], UNP_DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK);
+ set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL_C);
+ set_reg_field_value(value, page_width_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_WIDTH_C);
+ set_reg_field_value(value, page_height_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_HEIGHT_C);
+ set_reg_field_value(value, min_pte_before_flip_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_MIN_PTE_BEFORE_FLIP_C);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL_C, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C);
+ set_reg_field_value(value, pte_chroma[5], UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_PTE_REQ_PER_CHUNK_C);
+ set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_MAX_PTE_REQ_OUTSTANDING_C);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C, value);
+}
+
+void dce_mem_input_v_program_surface_config(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizotal_mirror)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+
+ enable(mem_input110);
+ program_tiling(mem_input110, tiling_info, format);
+ program_size_and_rotation(mem_input110, rotation, plane_size);
+ program_pixel_format(mem_input110, format);
+}
+
+static void program_urgency_watermark(
+ const struct dc_context *ctx,
+ const uint32_t urgency_addr,
+ const uint32_t wm_addr,
+ struct dce_watermarks marks_low,
+ uint32_t total_dest_line_time_ns)
+{
+ /* register value */
+ uint32_t urgency_cntl = 0;
+ uint32_t wm_mask_cntl = 0;
+
+ /*Write mask to enable reading/writing of watermark set A*/
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 1,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ urgency_cntl = dm_read_reg(ctx, urgency_addr);
+
+ set_reg_field_value(
+ urgency_cntl,
+ marks_low.a_mark,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_LOW_WATERMARK);
+
+ set_reg_field_value(
+ urgency_cntl,
+ total_dest_line_time_ns,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_HIGH_WATERMARK);
+ dm_write_reg(ctx, urgency_addr, urgency_cntl);
+
+ /*Write mask to enable reading/writing of watermark set B*/
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 2,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ urgency_cntl = dm_read_reg(ctx, urgency_addr);
+
+ set_reg_field_value(urgency_cntl,
+ marks_low.b_mark,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_LOW_WATERMARK);
+
+ set_reg_field_value(urgency_cntl,
+ total_dest_line_time_ns,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_HIGH_WATERMARK);
+
+ dm_write_reg(ctx, urgency_addr, urgency_cntl);
+}
+
+static void program_urgency_watermark_l(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks_low,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark(
+ ctx,
+ mmDPGV0_PIPE_URGENCY_CONTROL,
+ mmDPGV0_WATERMARK_MASK_CONTROL,
+ marks_low,
+ total_dest_line_time_ns);
+}
+
+static void program_urgency_watermark_c(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks_low,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark(
+ ctx,
+ mmDPGV1_PIPE_URGENCY_CONTROL,
+ mmDPGV1_WATERMARK_MASK_CONTROL,
+ marks_low,
+ total_dest_line_time_ns);
+}
+
+static void program_stutter_watermark(
+ const struct dc_context *ctx,
+ const uint32_t stutter_addr,
+ const uint32_t wm_addr,
+ struct dce_watermarks marks)
+{
+ /* register value */
+ uint32_t stutter_cntl = 0;
+ uint32_t wm_mask_cntl = 0;
+
+ /*Write mask to enable reading/writing of watermark set A*/
+
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 1,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ stutter_cntl = dm_read_reg(ctx, stutter_addr);
+
+ if (ctx->dc->debug.disable_stutter) {
+ set_reg_field_value(stutter_cntl,
+ 0,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE);
+ } else {
+ set_reg_field_value(stutter_cntl,
+ 1,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE);
+ }
+
+ set_reg_field_value(stutter_cntl,
+ 1,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_IGNORE_FBC);
+
+ /*Write watermark set A*/
+ set_reg_field_value(stutter_cntl,
+ marks.a_mark,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK);
+ dm_write_reg(ctx, stutter_addr, stutter_cntl);
+
+ /*Write mask to enable reading/writing of watermark set B*/
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 2,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ stutter_cntl = dm_read_reg(ctx, stutter_addr);
+ /*Write watermark set B*/
+ set_reg_field_value(stutter_cntl,
+ marks.b_mark,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK);
+ dm_write_reg(ctx, stutter_addr, stutter_cntl);
+}
+
+static void program_stutter_watermark_l(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_stutter_watermark(ctx,
+ mmDPGV0_PIPE_STUTTER_CONTROL,
+ mmDPGV0_WATERMARK_MASK_CONTROL,
+ marks);
+}
+
+static void program_stutter_watermark_c(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_stutter_watermark(ctx,
+ mmDPGV1_PIPE_STUTTER_CONTROL,
+ mmDPGV1_WATERMARK_MASK_CONTROL,
+ marks);
+}
+
+static void program_nbp_watermark(
+ const struct dc_context *ctx,
+ const uint32_t wm_mask_ctrl_addr,
+ const uint32_t nbp_pstate_ctrl_addr,
+ struct dce_watermarks marks)
+{
+ uint32_t value;
+
+ /* Write mask to enable reading/writing of watermark set A */
+
+ value = dm_read_reg(ctx, wm_mask_ctrl_addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_mask_ctrl_addr, value);
+
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+
+ /* Write watermark set A */
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+ set_reg_field_value(
+ value,
+ marks.a_mark,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+
+ /* Write mask to enable reading/writing of watermark set B */
+ value = dm_read_reg(ctx, wm_mask_ctrl_addr);
+ set_reg_field_value(
+ value,
+ 2,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_mask_ctrl_addr, value);
+
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+
+ /* Write watermark set B */
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+ set_reg_field_value(
+ value,
+ marks.b_mark,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+}
+
+static void program_nbp_watermark_l(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_nbp_watermark(ctx,
+ mmDPGV0_WATERMARK_MASK_CONTROL,
+ mmDPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ marks);
+}
+
+static void program_nbp_watermark_c(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_nbp_watermark(ctx,
+ mmDPGV1_WATERMARK_MASK_CONTROL,
+ mmDPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ marks);
+}
+
+void dce_mem_input_v_program_display_marks(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark_l(
+ mem_input->ctx,
+ urgent,
+ total_dest_line_time_ns);
+
+ program_nbp_watermark_l(
+ mem_input->ctx,
+ nbp);
+
+ program_stutter_watermark_l(
+ mem_input->ctx,
+ stutter);
+
+}
+
+void dce_mem_input_program_chroma_display_marks(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark_c(
+ mem_input->ctx,
+ urgent,
+ total_dest_line_time_ns);
+
+ program_nbp_watermark_c(
+ mem_input->ctx,
+ nbp);
+
+ program_stutter_watermark_c(
+ mem_input->ctx,
+ stutter);
+}
+
+void dce110_allocate_mem_input_v(
+ struct mem_input *mi,
+ uint32_t h_total,/* for current stream */
+ uint32_t v_total,/* for current stream */
+ uint32_t pix_clk_khz,/* for current stream */
+ uint32_t total_stream_num)
+{
+ uint32_t addr;
+ uint32_t value;
+ uint32_t pix_dur;
+ if (pix_clk_khz != 0) {
+ addr = mmDPGV0_PIPE_ARBITRATION_CONTROL1;
+ value = dm_read_reg(mi->ctx, addr);
+ pix_dur = 1000000000ULL / pix_clk_khz;
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPGV0_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+ dm_write_reg(mi->ctx, addr, value);
+
+ addr = mmDPGV1_PIPE_ARBITRATION_CONTROL1;
+ value = dm_read_reg(mi->ctx, addr);
+ pix_dur = 1000000000ULL / pix_clk_khz;
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPGV1_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+ dm_write_reg(mi->ctx, addr, value);
+
+ addr = mmDPGV0_PIPE_ARBITRATION_CONTROL2;
+ value = 0x4000800;
+ dm_write_reg(mi->ctx, addr, value);
+
+ addr = mmDPGV1_PIPE_ARBITRATION_CONTROL2;
+ value = 0x4000800;
+ dm_write_reg(mi->ctx, addr, value);
+ }
+
+}
+
+void dce110_free_mem_input_v(
+ struct mem_input *mi,
+ uint32_t total_stream_num)
+{
+}
+
+static struct mem_input_funcs dce110_mem_input_v_funcs = {
+ .mem_input_program_display_marks =
+ dce_mem_input_v_program_display_marks,
+ .mem_input_program_chroma_display_marks =
+ dce_mem_input_program_chroma_display_marks,
+ .allocate_mem_input = dce110_allocate_mem_input_v,
+ .free_mem_input = dce110_free_mem_input_v,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mem_input_v_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm =
+ dce_mem_input_v_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mem_input_v_program_surface_config,
+ .mem_input_is_flip_pending =
+ dce_mem_input_v_is_surface_pending
+};
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dce110_mem_input_v_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx)
+{
+ dce_mi->base.funcs = &dce110_mem_input_v_funcs;
+ dce_mi->base.ctx = ctx;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h
new file mode 100644
index 000000000000..f01d4a607fea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h
@@ -0,0 +1,35 @@
+/* Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MEM_INPUT_V_DCE110_H__
+#define __DC_MEM_INPUT_V_DCE110_H__
+
+#include "mem_input.h"
+#include "dce/dce_mem_input.h"
+
+void dce110_mem_input_v_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
new file mode 100644
index 000000000000..feb397b5c1a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce110_transform_v.h"
+#include "basics/conversion.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+
+enum {
+ OUTPUT_CSC_MATRIX_SIZE = 12
+};
+
+/* constrast:0 - 2.0, default 1.0 */
+#define UNDERLAY_CONTRAST_DEFAULT 100
+#define UNDERLAY_CONTRAST_MAX 200
+#define UNDERLAY_CONTRAST_MIN 0
+#define UNDERLAY_CONTRAST_STEP 1
+#define UNDERLAY_CONTRAST_DIVIDER 100
+
+/* Saturation: 0 - 2.0; default 1.0 */
+#define UNDERLAY_SATURATION_DEFAULT 100 /*1.00*/
+#define UNDERLAY_SATURATION_MIN 0
+#define UNDERLAY_SATURATION_MAX 200 /* 2.00 */
+#define UNDERLAY_SATURATION_STEP 1 /* 0.01 */
+/*actual max overlay saturation
+ * value = UNDERLAY_SATURATION_MAX /UNDERLAY_SATURATION_DIVIDER
+ */
+
+/* Hue */
+#define UNDERLAY_HUE_DEFAULT 0
+#define UNDERLAY_HUE_MIN -300
+#define UNDERLAY_HUE_MAX 300
+#define UNDERLAY_HUE_STEP 5
+#define UNDERLAY_HUE_DIVIDER 10 /* HW range: -30 ~ +30 */
+#define UNDERLAY_SATURATION_DIVIDER 100
+
+/* Brightness: in DAL usually -.25 ~ .25.
+ * In MMD is -100 to +100 in 16-235 range; which when scaled to full range is
+ * ~-116 to +116. When normalized this is about 0.4566.
+ * With 100 divider this becomes 46, but we may use another for better precision
+ * The ideal one is 100/219 ((100/255)*(255/219)),
+ * i.e. min/max = +-100, divider = 219
+ * default 0.0
+ */
+#define UNDERLAY_BRIGHTNESS_DEFAULT 0
+#define UNDERLAY_BRIGHTNESS_MIN -46 /* ~116/255 */
+#define UNDERLAY_BRIGHTNESS_MAX 46
+#define UNDERLAY_BRIGHTNESS_STEP 1 /* .01 */
+#define UNDERLAY_BRIGHTNESS_DIVIDER 100
+
+static const struct out_csc_color_matrix global_color_matrix[] = {
+{ COLOR_SPACE_SRGB,
+ { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+{ COLOR_SPACE_SRGB_LIMITED,
+ { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} },
+{ COLOR_SPACE_YCBCR601,
+ { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47,
+ 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA,
+ 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+/* TODO: correct values below */
+{ COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+ 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }
+};
+
+enum csc_color_mode {
+ /* 00 - BITS2:0 Bypass */
+ CSC_COLOR_MODE_GRAPHICS_BYPASS,
+ /* 01 - hard coded coefficient TV RGB */
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED,
+ /* 04 - programmable OUTPUT CSC coefficient */
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC,
+};
+
+enum grph_color_adjust_option {
+ GRPH_COLOR_MATRIX_HW_DEFAULT = 1,
+ GRPH_COLOR_MATRIX_SW
+};
+
+static void program_color_matrix_v(
+ struct dce_transform *xfm_dce,
+ const struct out_csc_color_matrix *tbl_entry,
+ enum grph_color_adjust_option options)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t cntl_value = dm_read_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL);
+ bool use_set_a = (get_reg_field_value(cntl_value,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE) != 4);
+
+ set_reg_field_value(
+ cntl_value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+
+ if (use_set_a) {
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C11_C12_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[0],
+ OUTPUT_CSC_C11_C12_A,
+ OUTPUT_CSC_C11_A);
+
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[1],
+ OUTPUT_CSC_C11_C12_A,
+ OUTPUT_CSC_C12_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C13_C14_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[2],
+ OUTPUT_CSC_C13_C14_A,
+ OUTPUT_CSC_C13_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[3],
+ OUTPUT_CSC_C13_C14_A,
+ OUTPUT_CSC_C14_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C21_C22_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[4],
+ OUTPUT_CSC_C21_C22_A,
+ OUTPUT_CSC_C21_A);
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[5],
+ OUTPUT_CSC_C21_C22_A,
+ OUTPUT_CSC_C22_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C23_C24_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[6],
+ OUTPUT_CSC_C23_C24_A,
+ OUTPUT_CSC_C23_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[7],
+ OUTPUT_CSC_C23_C24_A,
+ OUTPUT_CSC_C24_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C31_C32_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[8],
+ OUTPUT_CSC_C31_C32_A,
+ OUTPUT_CSC_C31_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[9],
+ OUTPUT_CSC_C31_C32_A,
+ OUTPUT_CSC_C32_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C33_C34_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[10],
+ OUTPUT_CSC_C33_C34_A,
+ OUTPUT_CSC_C33_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[11],
+ OUTPUT_CSC_C33_C34_A,
+ OUTPUT_CSC_C34_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ set_reg_field_value(
+ cntl_value,
+ 4,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ } else {
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C11_C12_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[0],
+ OUTPUT_CSC_C11_C12_B,
+ OUTPUT_CSC_C11_B);
+
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[1],
+ OUTPUT_CSC_C11_C12_B,
+ OUTPUT_CSC_C12_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C13_C14_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[2],
+ OUTPUT_CSC_C13_C14_B,
+ OUTPUT_CSC_C13_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[3],
+ OUTPUT_CSC_C13_C14_B,
+ OUTPUT_CSC_C14_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C21_C22_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[4],
+ OUTPUT_CSC_C21_C22_B,
+ OUTPUT_CSC_C21_B);
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[5],
+ OUTPUT_CSC_C21_C22_B,
+ OUTPUT_CSC_C22_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C23_C24_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[6],
+ OUTPUT_CSC_C23_C24_B,
+ OUTPUT_CSC_C23_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[7],
+ OUTPUT_CSC_C23_C24_B,
+ OUTPUT_CSC_C24_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C31_C32_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[8],
+ OUTPUT_CSC_C31_C32_B,
+ OUTPUT_CSC_C31_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[9],
+ OUTPUT_CSC_C31_C32_B,
+ OUTPUT_CSC_C32_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C33_C34_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[10],
+ OUTPUT_CSC_C33_C34_B,
+ OUTPUT_CSC_C33_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[11],
+ OUTPUT_CSC_C33_C34_B,
+ OUTPUT_CSC_C34_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ set_reg_field_value(
+ cntl_value,
+ 5,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ }
+
+ dm_write_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL, cntl_value);
+}
+
+static bool configure_graphics_mode_v(
+ struct dce_transform *xfm_dce,
+ enum csc_color_mode config,
+ enum graphics_csc_adjust_type csc_adjust_type,
+ enum dc_color_space color_space)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t addr = mmCOL_MAN_OUTPUT_CSC_CONTROL;
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+
+ if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) {
+ if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC)
+ return true;
+
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* not supported for underlay on CZ */
+ return false;
+
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ set_reg_field_value(
+ value,
+ 2,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ set_reg_field_value(
+ value,
+ 3,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ default:
+ return false;
+ }
+
+ } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) {
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* not supported for underlay on CZ */
+ return false;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ set_reg_field_value(
+ value,
+ 2,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ set_reg_field_value(
+ value,
+ 3,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ default:
+ return false;
+ }
+
+ } else
+ /* by pass */
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+
+ addr = mmCOL_MAN_OUTPUT_CSC_CONTROL;
+ dm_write_reg(ctx, addr, value);
+
+ return true;
+}
+
+/*TODO: color depth is not correct when this is called*/
+static void set_Denormalization(struct transform *xfm,
+ enum dc_color_depth color_depth)
+{
+ uint32_t value = dm_read_reg(xfm->ctx, mmDENORM_CLAMP_CONTROL);
+
+ switch (color_depth) {
+ case COLOR_DEPTH_888:
+ /* 255/256 for 8 bit output color depth */
+ set_reg_field_value(
+ value,
+ 1,
+ DENORM_CLAMP_CONTROL,
+ DENORM_MODE);
+ break;
+ case COLOR_DEPTH_101010:
+ /* 1023/1024 for 10 bit output color depth */
+ set_reg_field_value(
+ value,
+ 2,
+ DENORM_CLAMP_CONTROL,
+ DENORM_MODE);
+ break;
+ case COLOR_DEPTH_121212:
+ /* 4095/4096 for 12 bit output color depth */
+ set_reg_field_value(
+ value,
+ 3,
+ DENORM_CLAMP_CONTROL,
+ DENORM_MODE);
+ break;
+ default:
+ /* not valid case */
+ break;
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ DENORM_CLAMP_CONTROL,
+ DENORM_10BIT_OUT);
+
+ dm_write_reg(xfm->ctx, mmDENORM_CLAMP_CONTROL, value);
+}
+
+struct input_csc_matrix {
+ enum dc_color_space color_space;
+ uint32_t regval[12];
+};
+
+static const struct input_csc_matrix input_csc_matrix[] = {
+ {COLOR_SPACE_SRGB,
+/*1_1 1_2 1_3 1_4 2_1 2_2 2_3 2_4 3_1 3_2 3_3 3_4 */
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_SRGB_LIMITED,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_YCBCR601,
+ {0x2cdd, 0x2000, 0x0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0x0, 0x2000, 0x38b4, 0xe3a6} },
+ {COLOR_SPACE_YCBCR601_LIMITED,
+ {0x3353, 0x2568, 0x0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0x0, 0x2568, 0x40de, 0xdd3a} },
+ {COLOR_SPACE_YCBCR709,
+ {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+ 0x2000, 0x3b61, 0xe24f} },
+ {COLOR_SPACE_YCBCR709_LIMITED,
+ {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+ 0x2568, 0x43ee, 0xdbb2} }
+};
+
+static void program_input_csc(
+ struct transform *xfm, enum dc_color_space color_space)
+{
+ int arr_size = sizeof(input_csc_matrix)/sizeof(struct input_csc_matrix);
+ struct dc_context *ctx = xfm->ctx;
+ const uint32_t *regval = NULL;
+ bool use_set_a;
+ uint32_t value;
+ int i;
+
+ for (i = 0; i < arr_size; i++)
+ if (input_csc_matrix[i].color_space == color_space) {
+ regval = input_csc_matrix[i].regval;
+ break;
+ }
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /*
+ * 1 == set A, the logic is 'if currently we're not using set A,
+ * then use set A, otherwise use set B'
+ */
+ value = dm_read_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL);
+ use_set_a = get_reg_field_value(
+ value, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_MODE) != 1;
+
+ if (use_set_a) {
+ /* fixed S2.13 format */
+ value = 0;
+ set_reg_field_value(
+ value, regval[0], INPUT_CSC_C11_C12_A, INPUT_CSC_C11_A);
+ set_reg_field_value(
+ value, regval[1], INPUT_CSC_C11_C12_A, INPUT_CSC_C12_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C11_C12_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[2], INPUT_CSC_C13_C14_A, INPUT_CSC_C13_A);
+ set_reg_field_value(
+ value, regval[3], INPUT_CSC_C13_C14_A, INPUT_CSC_C14_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C13_C14_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[4], INPUT_CSC_C21_C22_A, INPUT_CSC_C21_A);
+ set_reg_field_value(
+ value, regval[5], INPUT_CSC_C21_C22_A, INPUT_CSC_C22_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C21_C22_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[6], INPUT_CSC_C23_C24_A, INPUT_CSC_C23_A);
+ set_reg_field_value(
+ value, regval[7], INPUT_CSC_C23_C24_A, INPUT_CSC_C24_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C23_C24_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[8], INPUT_CSC_C31_C32_A, INPUT_CSC_C31_A);
+ set_reg_field_value(
+ value, regval[9], INPUT_CSC_C31_C32_A, INPUT_CSC_C32_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C31_C32_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[10], INPUT_CSC_C33_C34_A, INPUT_CSC_C33_A);
+ set_reg_field_value(
+ value, regval[11], INPUT_CSC_C33_C34_A, INPUT_CSC_C34_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C33_C34_A, value);
+ } else {
+ /* fixed S2.13 format */
+ value = 0;
+ set_reg_field_value(
+ value, regval[0], INPUT_CSC_C11_C12_B, INPUT_CSC_C11_B);
+ set_reg_field_value(
+ value, regval[1], INPUT_CSC_C11_C12_B, INPUT_CSC_C12_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C11_C12_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[2], INPUT_CSC_C13_C14_B, INPUT_CSC_C13_B);
+ set_reg_field_value(
+ value, regval[3], INPUT_CSC_C13_C14_B, INPUT_CSC_C14_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C13_C14_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[4], INPUT_CSC_C21_C22_B, INPUT_CSC_C21_B);
+ set_reg_field_value(
+ value, regval[5], INPUT_CSC_C21_C22_B, INPUT_CSC_C22_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C21_C22_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[6], INPUT_CSC_C23_C24_B, INPUT_CSC_C23_B);
+ set_reg_field_value(
+ value, regval[7], INPUT_CSC_C23_C24_B, INPUT_CSC_C24_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C23_C24_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[8], INPUT_CSC_C31_C32_B, INPUT_CSC_C31_B);
+ set_reg_field_value(
+ value, regval[9], INPUT_CSC_C31_C32_B, INPUT_CSC_C32_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C31_C32_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[10], INPUT_CSC_C33_C34_B, INPUT_CSC_C33_B);
+ set_reg_field_value(
+ value, regval[11], INPUT_CSC_C33_C34_B, INPUT_CSC_C34_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C33_C34_B, value);
+ }
+
+ /* KK: leave INPUT_CSC_CONVERSION_MODE at default */
+ value = 0;
+ /*
+ * select 8.4 input type instead of default 12.0. From the discussion
+ * with HW team, this format depends on the UNP surface format, so for
+ * 8-bit we should select 8.4 (4 bits truncated). For 10 it should be
+ * 10.2. For Carrizo we only support 8-bit surfaces on underlay pipe
+ * so we can always keep this at 8.4 (input_type=2). If the later asics
+ * start supporting 10+ bits, we will have a problem: surface
+ * programming including UNP_GRPH* is being done in DalISR after this,
+ * so either we pass surface format to here, or move this logic to ISR
+ */
+
+ set_reg_field_value(
+ value, 2, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_INPUT_TYPE);
+ set_reg_field_value(
+ value,
+ use_set_a ? 1 : 2,
+ COL_MAN_INPUT_CSC_CONTROL,
+ INPUT_CSC_MODE);
+
+ dm_write_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL, value);
+}
+
+void dce110_opp_v_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED;
+
+ if (default_adjust->force_hw_default == false) {
+ const struct out_csc_color_matrix *elm;
+ /* currently parameter not in use */
+ enum grph_color_adjust_option option =
+ GRPH_COLOR_MATRIX_HW_DEFAULT;
+ uint32_t i;
+ /*
+ * HW default false we program locally defined matrix
+ * HW default true we use predefined hw matrix and we
+ * do not need to program matrix
+ * OEM wants the HW default via runtime parameter.
+ */
+ option = GRPH_COLOR_MATRIX_SW;
+
+ for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) {
+ elm = &global_color_matrix[i];
+ if (elm->color_space != default_adjust->out_color_space)
+ continue;
+ /* program the matrix with default values from this
+ * file
+ */
+ program_color_matrix_v(xfm_dce, elm, option);
+ config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+ break;
+ }
+ }
+
+ program_input_csc(xfm, default_adjust->in_color_space);
+
+ /* configure the what we programmed :
+ * 1. Default values from this file
+ * 2. Use hardware default from ROM_A and we do not need to program
+ * matrix
+ */
+
+ configure_graphics_mode_v(xfm_dce, config,
+ default_adjust->csc_adjust_type,
+ default_adjust->out_color_space);
+
+ set_Denormalization(xfm, default_adjust->color_depth);
+}
+
+void dce110_opp_v_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+
+ program_color_matrix_v(
+ xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
+
+ /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
+ configure_graphics_mode_v(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
+ tbl_entry->color_space);
+
+ /*TODO: Check if denormalization is needed*/
+ /*set_Denormalization(opp, adjust->color_depth);*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
new file mode 100644
index 000000000000..e98ed3058ea2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dce110_transform_v.h"
+
+static void power_on_lut(struct transform *xfm,
+ bool power_on, bool inputgamma, bool regamma)
+{
+ uint32_t value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL);
+ int i;
+
+ if (power_on) {
+ if (inputgamma)
+ set_reg_field_value(
+ value,
+ 1,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
+ if (regamma)
+ set_reg_field_value(
+ value,
+ 1,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
+ } else {
+ if (inputgamma)
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
+ if (regamma)
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
+ }
+
+ dm_write_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL, value);
+
+ for (i = 0; i < 3; i++) {
+ value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL);
+ if (get_reg_field_value(value,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS) &&
+ get_reg_field_value(value,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS))
+ break;
+
+ udelay(2);
+ }
+}
+
+static void set_bypass_input_gamma(struct dce_transform *xfm_dce)
+{
+ uint32_t value;
+
+ value = dm_read_reg(xfm_dce->base.ctx,
+ mmCOL_MAN_INPUT_GAMMA_CONTROL1);
+
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_INPUT_GAMMA_CONTROL1,
+ INPUT_GAMMA_MODE);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmCOL_MAN_INPUT_GAMMA_CONTROL1, value);
+}
+
+static void configure_regamma_mode(struct dce_transform *xfm_dce, uint32_t mode)
+{
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ mode,
+ GAMMA_CORR_CONTROL,
+ GAMMA_CORR_MODE);
+
+ dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CONTROL, 0);
+}
+
+/*
+ *****************************************************************************
+ * Function: regamma_config_regions_and_segments
+ *
+ * build regamma curve by using predefined hw points
+ * uses interface parameters ,like EDID coeff.
+ *
+ * @param : parameters interface parameters
+ * @return void
+ *
+ * @note
+ *
+ * @see
+ *
+ *****************************************************************************
+ */
+static void regamma_config_regions_and_segments(
+ struct dce_transform *xfm_dce, const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+ uint32_t value = 0;
+
+ {
+ set_reg_field_value(
+ value,
+ params->arr_points[0].custom_float_x,
+ GAMMA_CORR_CNTLA_START_CNTL,
+ GAMMA_CORR_CNTLA_EXP_REGION_START);
+
+ set_reg_field_value(
+ value,
+ 0,
+ GAMMA_CORR_CNTLA_START_CNTL,
+ GAMMA_CORR_CNTLA_EXP_REGION_START_SEGMENT);
+
+ dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_START_CNTL,
+ value);
+ }
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ params->arr_points[0].custom_float_slope,
+ GAMMA_CORR_CNTLA_SLOPE_CNTL,
+ GAMMA_CORR_CNTLA_EXP_REGION_LINEAR_SLOPE);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_SLOPE_CNTL, value);
+ }
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ params->arr_points[1].custom_float_x,
+ GAMMA_CORR_CNTLA_END_CNTL1,
+ GAMMA_CORR_CNTLA_EXP_REGION_END);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_END_CNTL1, value);
+ }
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ params->arr_points[2].custom_float_slope,
+ GAMMA_CORR_CNTLA_END_CNTL2,
+ GAMMA_CORR_CNTLA_EXP_REGION_END_BASE);
+
+ set_reg_field_value(
+ value,
+ params->arr_points[1].custom_float_y,
+ GAMMA_CORR_CNTLA_END_CNTL2,
+ GAMMA_CORR_CNTLA_EXP_REGION_END_SLOPE);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_END_CNTL2, value);
+ }
+
+ curve = params->arr_curve_points;
+
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION0_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION0_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION1_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION1_NUM_SEGMENTS);
+
+ dm_write_reg(
+ xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_0_1,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION2_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION2_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION3_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION3_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_2_3,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION4_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION4_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION5_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION5_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_4_5,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION6_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION6_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION7_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION7_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_6_7,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION8_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION8_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION9_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION9_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_8_9,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION10_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION10_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION11_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION11_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_10_11,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION12_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION12_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION13_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION13_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_12_13,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION14_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION14_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION15_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION15_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_14_15,
+ value);
+ }
+}
+
+static void program_pwl(struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
+{
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ 7,
+ GAMMA_CORR_LUT_WRITE_EN_MASK,
+ GAMMA_CORR_LUT_WRITE_EN_MASK);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_LUT_WRITE_EN_MASK, value);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_LUT_INDEX, 0);
+
+ /* Program REGAMMA_LUT_DATA */
+ {
+ const uint32_t addr = mmGAMMA_CORR_LUT_DATA;
+ uint32_t i = 0;
+ const struct pwl_result_data *rgb =
+ params->rgb_resulted;
+
+ while (i != params->hw_points_num) {
+ dm_write_reg(xfm_dce->base.ctx, addr, rgb->red_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr, rgb->green_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr, rgb->blue_reg);
+
+ dm_write_reg(xfm_dce->base.ctx, addr,
+ rgb->delta_red_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr,
+ rgb->delta_green_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr,
+ rgb->delta_blue_reg);
+
+ ++rgb;
+ ++i;
+ }
+ }
+}
+
+void dce110_opp_program_regamma_pwl_v(
+ struct transform *xfm,
+ const struct pwl_params *params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ /* Setup regions */
+ regamma_config_regions_and_segments(xfm_dce, params);
+
+ set_bypass_input_gamma(xfm_dce);
+
+ /* Power on gamma LUT memory */
+ power_on_lut(xfm, true, false, true);
+
+ /* Program PWL */
+ program_pwl(xfm_dce, params);
+
+ /* program regamma config */
+ configure_regamma_mode(xfm_dce, 1);
+
+ /* Power return to auto back */
+ power_on_lut(xfm, false, false, true);
+}
+
+void dce110_opp_power_on_regamma_lut_v(
+ struct transform *xfm,
+ bool power_on)
+{
+ uint32_t value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_FORCE);
+
+ set_reg_field_value(
+ value,
+ power_on,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_FORCE);
+
+ set_reg_field_value(
+ value,
+ power_on,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
+
+ dm_write_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL, value);
+}
+
+void dce110_opp_set_regamma_mode_v(
+ struct transform *xfm,
+ enum opp_regamma mode)
+{
+ // TODO: need to implement the function
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c
new file mode 100644
index 000000000000..3545e43a4b77
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dce/dce_opp.h"
+#include "dce110_opp_v.h"
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static const struct opp_funcs funcs = {
+ .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
+ .opp_destroy = dce110_opp_destroy,
+ .opp_program_fmt = dce110_opp_program_fmt,
+ .opp_program_bit_depth_reduction =
+ dce110_opp_program_bit_depth_reduction
+};
+
+void dce110_opp_v_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx)
+{
+ opp110->base.funcs = &funcs;
+
+ opp110->base.ctx = ctx;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h
new file mode 100644
index 000000000000..152af4c418cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h
@@ -0,0 +1,39 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCE110_V_H__
+#define __DC_OPP_DCE110_V_H__
+
+#include "dc_types.h"
+#include "opp.h"
+#include "core_types.h"
+
+void dce110_opp_v_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx);
+
+/* underlay callbacks */
+
+
+
+#endif /* __DC_OPP_DCE110_V_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
new file mode 100644
index 000000000000..db96d2b47ff1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -0,0 +1,1327 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "dce110/dce110_resource.h"
+
+#include "include/irq_service_interface.h"
+#include "dce/dce_audio.h"
+#include "dce110/dce110_timing_generator.h"
+#include "irq/dce110/irq_service_dce110.h"
+#include "dce110/dce110_timing_generator_v.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_mem_input.h"
+#include "dce110/dce110_mem_input_v.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce110/dce110_transform_v.h"
+#include "dce/dce_opp.h"
+#include "dce110/dce110_opp_v.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dce110/dce110_compressor.h"
+#endif
+
+#include "reg_helper.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+ #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+ #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+ #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+ #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
+ #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
+ #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
+ #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
+ #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
+ #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
+#endif
+
+#ifndef DPHY_RX_FAST_TRAINING_CAPABLE
+ #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1
+#endif
+
+static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE110_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE110(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE110_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_110_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_110(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+/* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */
+
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_100_110(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps carrizo_resource_cap = {
+ .num_timing_generator = 3,
+ .num_video_plane = 1,
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 2,
+};
+
+static const struct resource_caps stoney_resource_cap = {
+ .num_timing_generator = 2,
+ .num_video_plane = 1,
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 2,
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x4819
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce110_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce110_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct stream_encoder *dce110_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_stoney_reg = {
+ HWSEQ_ST_REG_LIST()
+};
+
+static const struct dce_hwseq_registers hwseq_cz_reg = {
+ HWSEQ_CZ_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE11_MASK_SH_LIST(__SHIFT),
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE11_MASK_SH_LIST(_MASK),
+};
+
+static struct dce_hwseq *dce110_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ?
+ &hwseq_stoney_reg : &hwseq_cz_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ hws->wa.blnd_crtc_trigger = true;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce110_stream_encoder_create,
+ .create_hwseq = dce110_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE11_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE11_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE11_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+
+static struct mem_input *dce110_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 3;
+ return &dce_mi->base;
+}
+
+static void dce110_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce110_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ return &transform->base;
+}
+
+static struct input_pixel_processor *dce110_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 594000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+static struct link_encoder *dce110_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+static struct output_pixel_processor *dce110_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct clock_source *dce110_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce110_clock_source_destroy(struct clock_source **clk_src)
+{
+ struct dce110_clk_src *dce110_clk_src;
+
+ if (!clk_src)
+ return;
+
+ dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src);
+
+ kfree(dce110_clk_src->dp_ss_params);
+ kfree(dce110_clk_src->hdmi_ss_params);
+ kfree(dce110_clk_src->dvi_ss_params);
+
+ kfree(dce110_clk_src);
+ *clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce110_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce110_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce110_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+
+static void get_pixel_clock_parameters(
+ const struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+
+ /*TODO: is this halved for YCbCr 420? in that case we might want to move
+ * the pixel clock normalization for hdmi up to here instead of doing it
+ * in pll_adjust_pix_clk
+ */
+ pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
+ pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
+ /* TODO: un-hardcode*/
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
+ PIXEL_ENCODING_YCBCR420);
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+ }
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ pixel_clk_params->requested_pix_clk = pixel_clk_params->requested_pix_clk / 2;
+ }
+}
+
+void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+}
+
+static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx)
+{
+ if (pipe_ctx->pipe_idx != underlay_idx)
+ return true;
+ if (!pipe_ctx->plane_state)
+ return false;
+ if (pipe_ctx->plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ return true;
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (!is_surface_pixel_format_supported(pipe_ctx,
+ dc->res_pool->underlay_pipe_index))
+ return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ /* TODO: validate audio ASIC caps, encoder */
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+static bool dce110_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ bool result = false;
+
+ dm_logger_write(
+ dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "%s: start",
+ __func__);
+
+ if (bw_calcs(
+ dc->ctx,
+ dc->bw_dceip,
+ dc->bw_vbios,
+ context->res_ctx.pipe_ctx,
+ dc->res_pool->pipe_count,
+ &context->bw.dce))
+ result = true;
+
+ if (!result)
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
+ "%s: %dx%d@%d Bandwidth validation failed!\n",
+ __func__,
+ context->streams[0]->timing.h_addressable,
+ context->streams[0]->timing.v_addressable,
+ context->streams[0]->timing.pix_clk_khz);
+
+ if (memcmp(&dc->current_state->bw.dce,
+ &context->bw.dce, sizeof(context->bw.dce))) {
+ struct log_entry log_entry;
+ dm_logger_open(
+ dc->ctx->logger,
+ &log_entry,
+ LOG_BANDWIDTH_CALCS);
+ dm_logger_append(&log_entry, "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ __func__,
+ context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw.dce.stutter_mode_enable);
+ dm_logger_append(&log_entry,
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.cpuc_state_change_enable,
+ context->bw.dce.cpup_state_change_enable,
+ context->bw.dce.nbp_state_change_enable,
+ context->bw.dce.all_displays_in_sync,
+ context->bw.dce.dispclk_khz,
+ context->bw.dce.sclk_khz,
+ context->bw.dce.sclk_deep_sleep_khz,
+ context->bw.dce.yclk_khz,
+ context->bw.dce.blackout_recovery_time_us);
+ dm_logger_close(&log_entry);
+ }
+ return result;
+}
+
+static bool dce110_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i, j;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 2)
+ return false;
+
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ struct dc_plane_state *plane =
+ context->stream_status[i].plane_states[j];
+
+ /* underlay validation */
+ if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+
+ if ((plane->src_rect.width > 1920 ||
+ plane->src_rect.height > 1080))
+ return false;
+
+ /* irrespective of plane format,
+ * stream should be RGB encoded
+ */
+ if (context->streams[i]->timing.pixel_encoding
+ != PIXEL_ENCODING_RGB)
+ return false;
+
+ }
+
+ }
+ }
+
+ return true;
+}
+
+enum dc_status dce110_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce110_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static enum dc_status dce110_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, new_ctx, dc_stream);
+
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+static enum dc_status dce110_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ if (!dce110_validate_bandwidth(dc, context))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static struct pipe_ctx *dce110_acquire_underlay(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ struct dc *dc = stream->ctx->dc;
+ struct resource_context *res_ctx = &context->res_ctx;
+ unsigned int underlay_idx = pool->underlay_pipe_index;
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
+
+ if (res_ctx->pipe_ctx[underlay_idx].stream)
+ return NULL;
+
+ pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx];
+ pipe_ctx->plane_res.mi = pool->mis[underlay_idx];
+ /*pipe_ctx->plane_res.ipp = res_ctx->pool->ipps[underlay_idx];*/
+ pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx];
+ pipe_ctx->stream_res.opp = pool->opps[underlay_idx];
+ pipe_ctx->pipe_idx = underlay_idx;
+
+ pipe_ctx->stream = stream;
+
+ if (!dc->current_state->res_ctx.pipe_ctx[underlay_idx].stream) {
+ struct tg_color black_color = {0};
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+
+ dc->hwss.enable_display_power_gating(
+ dc,
+ pipe_ctx->pipe_idx,
+ dcb, PIPE_GATING_CONTROL_DISABLE);
+
+ /*
+ * This is for powering on underlay, so crtc does not
+ * need to be enabled
+ */
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg,
+ &stream->timing,
+ false);
+
+ pipe_ctx->stream_res.tg->funcs->enable_advanced_request(
+ pipe_ctx->stream_res.tg,
+ true,
+ &stream->timing);
+
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi,
+ stream->timing.h_total,
+ stream->timing.v_total,
+ stream->timing.pix_clk_khz,
+ context->stream_count);
+
+ color_space_to_black_color(dc,
+ COLOR_SPACE_YCBCR601, &black_color);
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+ }
+
+ return pipe_ctx;
+}
+
+static void dce110_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+
+static const struct resource_funcs dce110_res_pool_funcs = {
+ .destroy = dce110_destroy_resource_pool,
+ .link_enc_create = dce110_link_encoder_create,
+ .validate_guaranteed = dce110_validate_guaranteed,
+ .validate_bandwidth = dce110_validate_bandwidth,
+ .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
+ .add_stream_to_ctx = dce110_add_stream_to_ctx,
+ .validate_global = dce110_validate_global
+};
+
+static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ struct dce110_timing_generator *dce110_tgv = kzalloc(sizeof(*dce110_tgv),
+ GFP_KERNEL);
+ struct dce_transform *dce110_xfmv = kzalloc(sizeof(*dce110_xfmv),
+ GFP_KERNEL);
+ struct dce_mem_input *dce110_miv = kzalloc(sizeof(*dce110_miv),
+ GFP_KERNEL);
+ struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv),
+ GFP_KERNEL);
+
+ if ((dce110_tgv == NULL) ||
+ (dce110_xfmv == NULL) ||
+ (dce110_miv == NULL) ||
+ (dce110_oppv == NULL))
+ return false;
+
+ dce110_opp_v_construct(dce110_oppv, ctx);
+
+ dce110_timing_generator_v_construct(dce110_tgv, ctx);
+ dce110_mem_input_v_construct(dce110_miv, ctx);
+ dce110_transform_v_construct(dce110_xfmv, ctx);
+
+ pool->opps[pool->pipe_count] = &dce110_oppv->base;
+ pool->timing_generators[pool->pipe_count] = &dce110_tgv->base;
+ pool->mis[pool->pipe_count] = &dce110_miv->base;
+ pool->transforms[pool->pipe_count] = &dce110_xfmv->base;
+ pool->pipe_count++;
+
+ /* update the public caps to indicate an underlay is available */
+ ctx->dc->caps.max_slave_planes = 1;
+ ctx->dc->caps.max_slave_planes = 1;
+
+ return true;
+}
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+ struct dm_pp_clock_levels clks = {0};
+
+ /*do system clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &clks);
+ /* convert all the clock fro kHz to fix point mHz */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1], 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels/8], 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*2/8], 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*3/8], 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*4/8], 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*5/8], 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*6/8], 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0], 1000);
+ dc->sclk_lvls = clks;
+
+ /*do display clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+ &clks);
+ dc->bw_vbios->high_voltage_max_dispclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1], 1000);
+ dc->bw_vbios->mid_voltage_max_dispclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels>>1], 1000);
+ dc->bw_vbios->low_voltage_max_dispclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0], 1000);
+
+ /*do memory clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &clks);
+
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+}
+
+const struct resource_caps *dce110_resource_cap(
+ struct hw_asic_id *asic_id)
+{
+ if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev))
+ return &stoney_resource_cap;
+ else
+ return &carrizo_resource_cap;
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool,
+ struct hw_asic_id asic_id)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = dce110_resource_cap(&ctx->asic_id);
+ pool->base.funcs = &dce110_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.underlay_pipe_index = pool->base.pipe_count;
+
+ dc->caps.max_downscale_ratio = 150;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1,
+ &clk_src_regs[1], false);
+
+ pool->base.clk_src_count = 2;
+
+ /* TODO: find out if CZ support 3 PLLs */
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce110_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ /* get static clock information for PPLIB or firmware, save
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce110_timing_generator_create(
+ ctx, i, &dce110_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce110_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce110_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce110_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce110_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ dc->fbc_compressor = dce110_compressor_create(ctx);
+
+
+
+#endif
+ if (!underlay_create(ctx, &pool->base))
+ goto res_create_fail;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce110_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+ bw_calcs_data_update_from_pplib(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce110_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct hw_asic_id asic_id)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool, asic_id))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
new file mode 100644
index 000000000000..e5f168c1f8c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
@@ -0,0 +1,49 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE110_H__
+#define __DC_RESOURCE_DCE110_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+#define TO_DCE110_RES_POOL(pool)\
+ container_of(pool, struct dce110_resource_pool, base)
+
+struct dce110_resource_pool {
+ struct resource_pool base;
+};
+
+void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx);
+
+struct resource_pool *dce110_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct hw_asic_id asic_id);
+
+#endif /* __DC_RESOURCE_DCE110_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
new file mode 100644
index 000000000000..67ac737eaa7e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -0,0 +1,1966 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dc_types.h"
+#include "dc_bios_types.h"
+#include "dc.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "dce110_timing_generator.h"
+
+#include "timing_generator.h"
+
+
+#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10
+
+#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1)
+#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1)
+
+#define CRTC_REG(reg) (reg + tg110->offsets.crtc)
+#define DCP_REG(reg) (reg + tg110->offsets.dcp)
+
+/* Flowing register offsets are same in files of
+ * dce/dce_11_0_d.h
+ * dce/vi_polaris10_p/vi_polaris10_d.h
+ *
+ * So we can create dce110 timing generator to use it.
+ */
+
+
+/*
+* apply_front_porch_workaround
+*
+* This is a workaround for a bug that has existed since R5xx and has not been
+* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+*/
+static void dce110_timing_generator_apply_front_porch_workaround(
+ struct timing_generator *tg,
+ struct dc_crtc_timing *timing)
+{
+ if (timing->flags.INTERLACE == 1) {
+ if (timing->v_front_porch < 2)
+ timing->v_front_porch = 2;
+ } else {
+ if (timing->v_front_porch < 1)
+ timing->v_front_porch = 1;
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: is_in_vertical_blank
+ *
+ * @brief
+ * check the current status of CRTC to check if we are in Vertical Blank
+ * regioneased" state
+ *
+ * @return
+ * true if currently in blank region, false otherwise
+ *
+ *****************************************************************************
+ */
+static bool dce110_timing_generator_is_in_vertical_blank(
+ struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+ uint32_t field = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ addr = CRTC_REG(mmCRTC_STATUS);
+ value = dm_read_reg(tg->ctx, addr);
+ field = get_reg_field_value(value, CRTC_STATUS, CRTC_V_BLANK);
+ return field == 1;
+}
+
+void dce110_timing_generator_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ uint32_t regval;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t address = CRTC_REG(mmCRTC_CONTROL);
+
+ regval = dm_read_reg(tg->ctx, address);
+ set_reg_field_value(regval, early_cntl,
+ CRTC_CONTROL, CRTC_HBLANK_EARLY_CONTROL);
+ dm_write_reg(tg->ctx, address, regval);
+}
+
+/**
+ * Enable CRTC
+ * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ */
+bool dce110_timing_generator_enable_crtc(struct timing_generator *tg)
+{
+ enum bp_result result;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+
+ /*
+ * 3 is used to make sure V_UPDATE occurs at the beginning of the first
+ * line of vertical front porch
+ */
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_MASTER_UPDATE_MODE,
+ MASTER_UPDATE_MODE);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_MODE), value);
+
+ /* TODO: may want this on to catch underflow */
+ value = 0;
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK), value);
+
+ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, true);
+
+ return result == BP_RESULT_OK;
+}
+
+void dce110_timing_generator_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+/**
+ *****************************************************************************
+ * Function: disable_stereo
+ *
+ * @brief
+ * Disables active stereo on controller
+ * Frame Packing need to be disabled in vBlank or when CRTC not running
+ *****************************************************************************
+ */
+#if 0
+@TODOSTEREO
+static void disable_stereo(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_3D_STRUCTURE_CONTROL);
+ uint32_t value = 0;
+ uint32_t test = 0;
+ uint32_t field = 0;
+ uint32_t struc_en = 0;
+ uint32_t struc_stereo_sel_ovr = 0;
+
+ value = dm_read_reg(tg->ctx, addr);
+ struc_en = get_reg_field_value(
+ value,
+ CRTC_3D_STRUCTURE_CONTROL,
+ CRTC_3D_STRUCTURE_EN);
+
+ struc_stereo_sel_ovr = get_reg_field_value(
+ value,
+ CRTC_3D_STRUCTURE_CONTROL,
+ CRTC_3D_STRUCTURE_STEREO_SEL_OVR);
+
+ /*
+ * When disabling Frame Packing in 2 step mode, we need to program both
+ * registers at the same frame
+ * Programming it in the beginning of VActive makes sure we are ok
+ */
+
+ if (struc_en != 0 && struc_stereo_sel_ovr == 0) {
+ tg->funcs->wait_for_vblank(tg);
+ tg->funcs->wait_for_vactive(tg);
+ }
+
+ value = 0;
+ dm_write_reg(tg->ctx, addr, value);
+
+ addr = tg->regs[IDX_CRTC_STEREO_CONTROL];
+ dm_write_reg(tg->ctx, addr, value);
+}
+#endif
+
+/**
+ * disable_crtc - call ASIC Control Object to disable Timing generator.
+ */
+bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
+{
+ enum bp_result result;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, false);
+
+ /* Need to make sure stereo is disabled according to the DCE5.0 spec */
+
+ /*
+ * @TODOSTEREO call this when adding stereo support
+ * tg->funcs->disable_stereo(tg);
+ */
+
+ return result == BP_RESULT_OK;
+}
+
+/**
+* program_horz_count_by_2
+* Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
+*
+*/
+static void program_horz_count_by_2(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t regval;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ regval = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_COUNT_CONTROL));
+
+ set_reg_field_value(regval, 0, CRTC_COUNT_CONTROL,
+ CRTC_HORZ_COUNT_BY2_EN);
+
+ if (timing->flags.HORZ_COUNT_BY_TWO)
+ set_reg_field_value(regval, 1, CRTC_COUNT_CONTROL,
+ CRTC_HORZ_COUNT_BY2_EN);
+
+ dm_write_reg(tg->ctx,
+ CRTC_REG(mmCRTC_COUNT_CONTROL), regval);
+}
+
+/**
+ * program_timing_generator
+ * Program CRTC Timing Registers - DxCRTC_H_*, DxCRTC_V_*, Pixel repetition.
+ * Call ASIC Control Object to program Timings.
+ */
+bool dce110_timing_generator_program_timing_generator(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *dc_crtc_timing)
+{
+ enum bp_result result;
+ struct bp_hw_crtc_timing_parameters bp_params;
+ struct dc_crtc_timing patched_crtc_timing;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ uint32_t vsync_offset = dc_crtc_timing->v_border_bottom +
+ dc_crtc_timing->v_front_porch;
+ uint32_t v_sync_start =dc_crtc_timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = dc_crtc_timing->h_border_right +
+ dc_crtc_timing->h_front_porch;
+ uint32_t h_sync_start = dc_crtc_timing->h_addressable + hsync_offset;
+
+ memset(&bp_params, 0, sizeof(struct bp_hw_crtc_timing_parameters));
+
+ /* Due to an asic bug we need to apply the Front Porch workaround prior
+ * to programming the timing.
+ */
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ dce110_timing_generator_apply_front_porch_workaround(tg, &patched_crtc_timing);
+
+ bp_params.controller_id = tg110->controller_id;
+
+ bp_params.h_total = patched_crtc_timing.h_total;
+ bp_params.h_addressable =
+ patched_crtc_timing.h_addressable;
+ bp_params.v_total = patched_crtc_timing.v_total;
+ bp_params.v_addressable = patched_crtc_timing.v_addressable;
+
+ bp_params.h_sync_start = h_sync_start;
+ bp_params.h_sync_width = patched_crtc_timing.h_sync_width;
+ bp_params.v_sync_start = v_sync_start;
+ bp_params.v_sync_width = patched_crtc_timing.v_sync_width;
+
+ /* Set overscan */
+ bp_params.h_overscan_left =
+ patched_crtc_timing.h_border_left;
+ bp_params.h_overscan_right =
+ patched_crtc_timing.h_border_right;
+ bp_params.v_overscan_top = patched_crtc_timing.v_border_top;
+ bp_params.v_overscan_bottom =
+ patched_crtc_timing.v_border_bottom;
+
+ /* Set flags */
+ if (patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY == 1)
+ bp_params.flags.HSYNC_POSITIVE_POLARITY = 1;
+
+ if (patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY == 1)
+ bp_params.flags.VSYNC_POSITIVE_POLARITY = 1;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1)
+ bp_params.flags.INTERLACE = 1;
+
+ if (patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1)
+ bp_params.flags.HORZ_COUNT_BY_TWO = 1;
+
+ result = tg->bp->funcs->program_crtc_timing(tg->bp, &bp_params);
+
+ program_horz_count_by_2(tg, &patched_crtc_timing);
+
+ tg110->base.funcs->enable_advanced_request(tg, true, &patched_crtc_timing);
+
+ /* Enable stereo - only when we need to pack 3D frame. Other types
+ * of stereo handled in explicit call */
+
+ return result == BP_RESULT_OK;
+}
+
+/**
+ *****************************************************************************
+ * Function: set_drr
+ *
+ * @brief
+ * Program dynamic refresh rate registers m_DxCRTC_V_TOTAL_*.
+ *
+ * @param [in] pHwCrtcTiming: point to H
+ * wCrtcTiming struct
+ *****************************************************************************
+ */
+void dce110_timing_generator_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params)
+{
+ /* register values */
+ uint32_t v_total_min = 0;
+ uint32_t v_total_max = 0;
+ uint32_t v_total_cntl = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ uint32_t addr = 0;
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
+ v_total_min = dm_read_reg(tg->ctx, addr);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
+ v_total_max = dm_read_reg(tg->ctx, addr);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL);
+ v_total_cntl = dm_read_reg(tg->ctx, addr);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ set_reg_field_value(v_total_max,
+ params->vertical_total_max - 1,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
+
+ set_reg_field_value(v_total_min,
+ params->vertical_total_min - 1,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+
+ set_reg_field_value(v_total_cntl,
+ 1,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MIN_SEL);
+
+ set_reg_field_value(v_total_cntl,
+ 1,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MAX_SEL);
+
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_ON_EVENT);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_TO_MASTER_VSYNC);
+
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_SET_V_TOTAL_MIN_MASK_EN);
+
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_SET_V_TOTAL_MIN_MASK);
+ } else {
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_SET_V_TOTAL_MIN_MASK);
+ set_reg_field_value(v_total_min,
+ 0,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+ set_reg_field_value(v_total_max,
+ 0,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MIN_SEL);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MAX_SEL);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_ON_EVENT);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_TO_MASTER_VSYNC);
+ }
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
+ dm_write_reg(tg->ctx, addr, v_total_min);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
+ dm_write_reg(tg->ctx, addr, v_total_max);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL);
+ dm_write_reg(tg->ctx, addr, v_total_cntl);
+}
+
+void dce110_timing_generator_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t static_screen_cntl = 0;
+ uint32_t addr = 0;
+
+ addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
+ static_screen_cntl = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(static_screen_cntl,
+ value,
+ CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK);
+
+ set_reg_field_value(static_screen_cntl,
+ 2,
+ CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_FRAME_COUNT);
+
+ dm_write_reg(tg->ctx, addr, static_screen_cntl);
+}
+
+/*
+ * get_vblank_counter
+ *
+ * @brief
+ * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which
+ * holds the counter of frames.
+ *
+ * @param
+ * struct timing_generator *tg - [in] timing generator which controls the
+ * desired CRTC
+ *
+ * @return
+ * Counter of frames, which should equal to number of vblanks.
+ */
+uint32_t dce110_timing_generator_get_vblank_counter(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_STATUS_FRAME_COUNT);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+ uint32_t field = get_reg_field_value(
+ value, CRTC_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
+
+ return field;
+}
+
+/**
+ *****************************************************************************
+ * Function: dce110_timing_generator_get_position
+ *
+ * @brief
+ * Returns CRTC vertical/horizontal counters
+ *
+ * @param [out] position
+ *****************************************************************************
+ */
+void dce110_timing_generator_get_position(struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_STATUS_POSITION));
+
+ position->horizontal_count = get_reg_field_value(
+ value,
+ CRTC_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ position->vertical_count = get_reg_field_value(
+ value,
+ CRTC_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_NOM_VERT_POSITION));
+
+ position->nominal_vcount = get_reg_field_value(
+ value,
+ CRTC_NOM_VERT_POSITION,
+ CRTC_VERT_COUNT_NOM);
+}
+
+/**
+ *****************************************************************************
+ * Function: get_crtc_scanoutpos
+ *
+ * @brief
+ * Returns CRTC vertical/horizontal counters
+ *
+ * @param [out] vpos, hpos
+ *****************************************************************************
+ */
+void dce110_timing_generator_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ struct crtc_position position;
+
+ uint32_t value = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_BLANK_START_END));
+
+ *v_blank_start = get_reg_field_value(value,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+ *v_blank_end = get_reg_field_value(value,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ dce110_timing_generator_get_position(
+ tg, &position);
+
+ *h_position = position.horizontal_count;
+ *v_position = position.vertical_count;
+}
+
+/* TODO: is it safe to assume that mask/shift of Primary and Underlay
+ * are the same?
+ * For example: today CRTC_H_TOTAL == CRTCV_H_TOTAL but is it always
+ * guaranteed? */
+void dce110_timing_generator_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t vsync_offset = timing->v_border_bottom +
+ timing->v_front_porch;
+ uint32_t v_sync_start =timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr = 0;
+ uint32_t tmp = 0;
+
+ addr = CRTC_REG(mmCRTC_H_TOTAL);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->h_total - 1,
+ CRTC_H_TOTAL,
+ CRTC_H_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ /* In case of V_TOTAL_CONTROL is on, make sure V_TOTAL_MAX and
+ * V_TOTAL_MIN are equal to V_TOTAL.
+ */
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_H_BLANK_START_END);
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->h_total -
+ (h_sync_start + timing->h_border_left);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_H_BLANK_START_END,
+ CRTC_H_BLANK_END);
+
+ tmp = tmp + timing->h_addressable +
+ timing->h_border_left + timing->h_border_right;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_H_BLANK_START_END,
+ CRTC_H_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_V_BLANK_START_END);
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->v_total - (v_sync_start + timing->v_border_top);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ tmp = tmp + timing->v_addressable + timing->v_border_top +
+ timing->v_border_bottom;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+}
+
+void dce110_timing_generator_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value;
+ uint32_t addr;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
+
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN);
+
+ set_reg_field_value(
+ value,
+ mode,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_MODE);
+
+ set_reg_field_value(
+ value,
+ dyn_range,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE);
+ set_reg_field_value(
+ value,
+ bit_depth,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_COLOR_FORMAT);
+ dm_write_reg(ctx, addr, value);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* CRTC_TEST_PATTERN_DATA has 16 bits,
+ * lowest 6 are hardwired to ZERO
+ * color bits should be left aligned aligned to MSB
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
+ dm_write_reg(ctx, addr, value);
+
+ /* We have to write the mask before data, similar to pipeline.
+ * For example, for 8 bpc, if we want RGB0 to be magenta,
+ * and RGB1 to be cyan,
+ * we need to make 7 writes:
+ * MASK DATA
+ * 000001 00000000 00000000 set mask to R0
+ * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
+ * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
+ * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
+ * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
+ * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
+ * 100000 11111111 00000000 B1 255, 0xFF00
+ *
+ * we will make a loop of 6 in which we prepare the mask,
+ * then write, then prepare the color for next write.
+ * first iteration will write mask only,
+ * but each next iteration color prepared in
+ * previous iteration will be written within new mask,
+ * the last component will written separately,
+ * mask is not changing between 6th and 7th write
+ * and color will be prepared by last iteration
+ */
+
+ /* write color, color values mask in CRTC_TEST_PATTERN_MASK
+ * is B1, G1, R1, B0, G0, R0
+ */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR);
+ for (index = 0; index < 6; index++) {
+ /* prepare color mask, first write PATTERN_DATA
+ * will have all zeros
+ */
+ set_reg_field_value(
+ value,
+ (1 << index),
+ CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_MASK);
+ /* write color component */
+ dm_write_reg(ctx, addr, value);
+ /* prepare next color component,
+ * will be written in the next iteration
+ */
+ set_reg_field_value(
+ value,
+ dst_color[index],
+ CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_DATA);
+ }
+ /* write last color component,
+ * it's been already prepared in the loop
+ */
+ dm_write_reg(ctx, addr, value);
+
+ /* enable test pattern */
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN);
+
+ set_reg_field_value(
+ value,
+ mode,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_MODE);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE);
+
+ set_reg_field_value(
+ value,
+ bit_depth,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_COLOR_FORMAT);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ set_reg_field_value(
+ value,
+ inc_base,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC1);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ set_reg_field_value(
+ value,
+ inc_base,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC1);
+ set_reg_field_value(
+ value,
+ 8,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ set_reg_field_value(
+ value,
+ inc_base,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0);
+ set_reg_field_value(
+ value,
+ inc_base + 2,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC1);
+ set_reg_field_value(
+ value,
+ 8,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+ set_reg_field_value(
+ value,
+ 5,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 384 << 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET);
+ }
+ break;
+ default:
+ break;
+ }
+ dm_write_reg(ctx, addr, value);
+
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR);
+ dm_write_reg(ctx, addr, value);
+
+ /* enable test pattern */
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN);
+
+ set_reg_field_value(
+ value,
+ mode,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_MODE);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE);
+ /* add color depth translation here */
+ set_reg_field_value(
+ value,
+ bit_depth,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_COLOR_FORMAT);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ value = 0;
+ dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL), value);
+ dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_COLOR), value);
+ dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS),
+ value);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+* dce110_timing_generator_validate_timing
+* The timing generators support a maximum display size of is 8192 x 8192 pixels,
+* including both active display and blanking periods. Check H Total and V Total.
+*/
+bool dce110_timing_generator_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ enum signal_type signal)
+{
+ uint32_t h_blank;
+ uint32_t h_back_porch;
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ ASSERT(timing != NULL);
+
+ if (!timing)
+ return false;
+
+ /* Currently we don't support 3D, so block all 3D timings */
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
+ return false;
+
+ /* Temporarily blocking interlacing mode until it's supported */
+ if (timing->flags.INTERLACE == 1)
+ return false;
+
+ /* Check maximum number of pixels supported by Timing Generator
+ * (Currently will never fail, in order to fail needs display which
+ * needs more than 8192 horizontal and
+ * more than 8192 vertical total pixels)
+ */
+ if (timing->h_total > tg110->max_h_total ||
+ timing->v_total > tg110->max_v_total)
+ return false;
+
+ h_blank = (timing->h_total - timing->h_addressable -
+ timing->h_border_right -
+ timing->h_border_left);
+
+ if (h_blank < tg110->min_h_blank)
+ return false;
+
+ if (timing->h_front_porch < tg110->min_h_front_porch)
+ return false;
+
+ h_back_porch = h_blank - (h_sync_start -
+ timing->h_addressable -
+ timing->h_border_right -
+ timing->h_sync_width);
+
+ if (h_back_porch < tg110->min_h_back_porch)
+ return false;
+
+ return true;
+}
+
+/**
+* Wait till we are at the beginning of VBlank.
+*/
+void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
+{
+ /* We want to catch beginning of VBlank here, so if the first try are
+ * in VBlank, we might be very close to Active, in this case wait for
+ * another frame
+ */
+ while (dce110_timing_generator_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+
+ while (!dce110_timing_generator_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/**
+* Wait till we are in VActive (anywhere in VActive)
+*/
+void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
+{
+ while (dce110_timing_generator_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: dce110_timing_generator_setup_global_swap_lock
+ *
+ * @brief
+ * Setups Global Swap Lock group for current pipe
+ * Pipe can join or leave GSL group, become a TimingServer or TimingClient
+ *
+ * @param [in] gsl_params: setup data
+ *****************************************************************************
+ */
+
+void dce110_timing_generator_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t address = DCP_REG(mmDCP_GSL_CONTROL);
+ uint32_t check_point = FLIP_READY_BACK_LOOKUP;
+
+ value = dm_read_reg(tg->ctx, address);
+
+ /* This pipe will belong to GSL Group zero. */
+ set_reg_field_value(value,
+ 1,
+ DCP_GSL_CONTROL,
+ DCP_GSL0_EN);
+
+ set_reg_field_value(value,
+ gsl_params->gsl_master == tg->inst,
+ DCP_GSL_CONTROL,
+ DCP_GSL_MASTER_EN);
+
+ set_reg_field_value(value,
+ HFLIP_READY_DELAY,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
+
+ /* Keep signal low (pending high) during 6 lines.
+ * Also defines minimum interval before re-checking signal. */
+ set_reg_field_value(value,
+ HFLIP_CHECK_DELAY,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
+
+
+ {
+ uint32_t value_crtc_vtotal;
+
+ value_crtc_vtotal = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_TOTAL));
+
+ set_reg_field_value(value,
+ 0,/* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ DCP_GSL_CONTROL,
+ DCP_GSL_SYNC_SOURCE);
+
+ /* Checkpoint relative to end of frame */
+ check_point = get_reg_field_value(value_crtc_vtotal,
+ CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_GSL_WINDOW), 0);
+ }
+
+ set_reg_field_value(value,
+ 1,
+ DCP_GSL_CONTROL,
+ DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
+
+ dm_write_reg(tg->ctx, address, value);
+
+ /********************************************************************/
+ address = CRTC_REG(mmCRTC_GSL_CONTROL);
+
+ value = 0;
+ set_reg_field_value(value,
+ check_point - FLIP_READY_BACK_LOOKUP,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM);
+
+ set_reg_field_value(value,
+ VFLIP_READY_DELAY,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_FORCE_DELAY);
+
+ dm_write_reg(tg->ctx, address, value);
+}
+
+void dce110_timing_generator_tear_down_global_swap_lock(
+ struct timing_generator *tg)
+{
+ /* Clear all the register writes done by
+ * dce110_timing_generator_setup_global_swap_lock
+ */
+
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t address = DCP_REG(mmDCP_GSL_CONTROL);
+
+ value = 0;
+
+ /* This pipe will belong to GSL Group zero. */
+ /* Settig HW default values from reg specs */
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL0_EN);
+
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL_MASTER_EN);
+
+ set_reg_field_value(value,
+ 0x2,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
+
+ set_reg_field_value(value,
+ 0x6,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
+
+ /* Restore DCP_GSL_PURPOSE_SURFACE_FLIP */
+ {
+ uint32_t value_crtc_vtotal;
+
+ value_crtc_vtotal = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_TOTAL));
+
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL_SYNC_SOURCE);
+ }
+
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
+
+ dm_write_reg(tg->ctx, address, value);
+
+ /********************************************************************/
+ address = CRTC_REG(mmCRTC_GSL_CONTROL);
+
+ value = 0;
+ set_reg_field_value(value,
+ 0,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM);
+
+ set_reg_field_value(value,
+ 0x2,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_FORCE_DELAY);
+
+ dm_write_reg(tg->ctx, address, value);
+}
+/**
+ *****************************************************************************
+ * Function: is_counter_moving
+ *
+ * @brief
+ * check if the timing generator is currently going
+ *
+ * @return
+ * true if currently going, false if currently paused or stopped.
+ *
+ *****************************************************************************
+ */
+bool dce110_timing_generator_is_counter_moving(struct timing_generator *tg)
+{
+ struct crtc_position position1, position2;
+
+ tg->funcs->get_position(tg, &position1);
+ tg->funcs->get_position(tg, &position2);
+
+ if (position1.horizontal_count == position2.horizontal_count &&
+ position1.vertical_count == position2.vertical_count)
+ return false;
+ else
+ return true;
+}
+
+void dce110_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PROGRESSIVE_START_LINE_EARLY);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_INTERLACE_START_LINE_EARLY);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+/*TODO: Figure out if we need this function. */
+void dce110_timing_generator_set_lock_master(struct timing_generator *tg,
+ bool lock)
+{
+ struct dc_context *ctx = tg->ctx;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK);
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ lock ? 1 : 0,
+ CRTC_MASTER_UPDATE_LOCK,
+ MASTER_UPDATE_LOCK);
+
+ dm_write_reg(ctx, addr, value);
+}
+
+void dce110_timing_generator_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source_tg_inst)
+{
+ uint32_t value;
+ uint32_t rising_edge = 0;
+ uint32_t falling_edge = 0;
+ enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Setup trigger edge */
+ {
+ uint32_t pol_value = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_SYNC_A_CNTL));
+
+ /* Register spec has reversed definition:
+ * 0 for positive, 1 for negative */
+ if (get_reg_field_value(pol_value,
+ CRTC_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL) == 0) {
+ rising_edge = 1;
+ } else {
+ falling_edge = 1;
+ }
+ }
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
+
+ trig_src_select = TRIGGER_SOURCE_SELECT_GSL_GROUP0;
+
+ set_reg_field_value(value,
+ trig_src_select,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT);
+
+ set_reg_field_value(value,
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_POLARITY_SELECT);
+
+ set_reg_field_value(value,
+ rising_edge,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_RISING_EDGE_DETECT_CNTL);
+
+ set_reg_field_value(value,
+ falling_edge,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL);
+
+ set_reg_field_value(value,
+ 0, /* send every signal */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_FREQUENCY_SELECT);
+
+ set_reg_field_value(value,
+ 0, /* no delay */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_DELAY);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
+
+ /**************************************************************/
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ set_reg_field_value(value,
+ 2, /* force H count to H_TOTAL and V count to V_TOTAL */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE);
+
+ set_reg_field_value(value,
+ 1, /* TriggerB - we never use TriggerA */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_TRIG_SEL);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
+}
+
+void dce110_timing_generator_disable_reset_trigger(
+ struct timing_generator *tg)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ set_reg_field_value(value,
+ 0, /* force counter now mode is disabled */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
+
+ /********************************************************************/
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
+
+ set_reg_field_value(value,
+ TRIGGER_SOURCE_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT);
+
+ set_reg_field_value(value,
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_POLARITY_SELECT);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
+}
+
+/**
+ *****************************************************************************
+ * @brief
+ * Checks whether CRTC triggered reset occurred
+ *
+ * @return
+ * true if triggered reset occurred, false otherwise
+ *****************************************************************************
+ */
+bool dce110_timing_generator_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ return get_reg_field_value(value,
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_OCCURRED) != 0;
+}
+
+/**
+ * dce110_timing_generator_disable_vga
+ * Turn OFF VGA Mode and Timing - DxVGA_CONTROL
+ * VGA Mode and VGA Timing is used by VBIOS on CRT Monitors;
+ */
+void dce110_timing_generator_disable_vga(
+ struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ switch (tg110->controller_id) {
+ case CONTROLLER_ID_D0:
+ addr = mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D1:
+ addr = mmD2VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D2:
+ addr = mmD3VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D3:
+ addr = mmD4VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D4:
+ addr = mmD5VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D5:
+ addr = mmD6VGA_CONTROL;
+ break;
+ default:
+ break;
+ }
+ value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_MODE_ENABLE);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_TIMING_SELECT);
+ set_reg_field_value(
+ value, 0, D1VGA_CONTROL, D1VGA_SYNC_POLARITY_SELECT);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_OVERSCAN_COLOR_EN);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+/**
+* set_overscan_color_black
+*
+* @param :black_color is one of the color space
+* :this routine will set overscan black color according to the color space.
+* @return none
+*/
+
+void dce110_timing_generator_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t addr;
+ uint32_t value = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ set_reg_field_value(
+ value,
+ color->color_b_cb,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ color->color_r_cr,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ set_reg_field_value(
+ value,
+ color->color_g_y,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR);
+ dm_write_reg(ctx, addr, value);
+ addr = CRTC_REG(mmCRTC_BLACK_COLOR);
+ dm_write_reg(ctx, addr, value);
+ /* This is desirable to have a constant DAC output voltage during the
+ * blank time that is higher than the 0 volt reference level that the
+ * DAC outputs when the NBLANK signal
+ * is asserted low, such as for output to an analog TV. */
+ addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR);
+ dm_write_reg(ctx, addr, value);
+
+ /* TO DO we have to program EXT registers and we need to know LB DATA
+ * format because it is used when more 10 , i.e. 12 bits per color
+ *
+ * m_mmDxCRTC_OVERSCAN_COLOR_EXT
+ * m_mmDxCRTC_BLACK_COLOR_EXT
+ * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
+ */
+
+}
+
+void dce110_tg_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR);
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+void dce110_tg_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_b_cb,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_g_y,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_r_cr,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR);
+ dm_write_reg(ctx, addr, value);
+}
+
+void dce110_tg_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (use_vbios)
+ dce110_timing_generator_program_timing_generator(tg, timing);
+ else
+ dce110_timing_generator_program_blanking(tg, timing);
+}
+
+bool dce110_tg_is_blanked(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL));
+
+ if (get_reg_field_value(
+ value,
+ CRTC_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN) == 1 &&
+ get_reg_field_value(
+ value,
+ CRTC_BLANK_CONTROL,
+ CRTC_CURRENT_BLANK_STATE) == 1)
+ return true;
+ return false;
+}
+
+void dce110_tg_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_DOUBLE_BUFFER_CONTROL,
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_DOUBLE_BUFFER_CONTROL), value);
+ value = 0;
+
+ if (enable_blanking) {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), value);
+
+ } else
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), 0);
+}
+
+bool dce110_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ return dce110_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
+}
+
+void dce110_tg_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ dce110_timing_generator_wait_for_vblank(tg);
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ dce110_timing_generator_wait_for_vactive(tg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void dce110_tg_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color)
+{
+ if (blank_color != NULL)
+ dce110_tg_program_blank_color(tg, blank_color);
+ if (overscan_color != NULL)
+ dce110_tg_set_overscan_color(tg, overscan_color);
+}
+
+/* Gets first line of blank region of the display timing for CRTC
+ * and programms is as a trigger to fire vertical interrupt
+ */
+bool dce110_arm_vert_intr(struct timing_generator *tg, uint8_t width)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t v_blank_start = 0;
+ uint32_t v_blank_end = 0;
+ uint32_t val = 0;
+ uint32_t h_position, v_position;
+
+ tg->funcs->get_scanoutpos(
+ tg,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ if (v_blank_start == 0 || v_blank_end == 0)
+ return false;
+
+ set_reg_field_value(
+ val,
+ v_blank_start,
+ CRTC_VERTICAL_INTERRUPT0_POSITION,
+ CRTC_VERTICAL_INTERRUPT0_LINE_START);
+
+ /* Set interval width for interrupt to fire to 1 scanline */
+ set_reg_field_value(
+ val,
+ v_blank_start + width,
+ CRTC_VERTICAL_INTERRUPT0_POSITION,
+ CRTC_VERTICAL_INTERRUPT0_LINE_END);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_VERTICAL_INTERRUPT0_POSITION), val);
+
+ return true;
+}
+
+static const struct timing_generator_funcs dce110_tg_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = dce110_tg_program_timing,
+ .enable_crtc = dce110_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ .get_position = dce110_timing_generator_get_position,
+ .get_frame_count = dce110_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce110_timing_generator_set_early_control,
+ .wait_for_state = dce110_tg_wait_for_state,
+ .set_blank = dce110_tg_set_blank,
+ .is_blanked = dce110_tg_is_blanked,
+ .set_colors = dce110_tg_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_program_blank_color,
+ .disable_vga = dce110_timing_generator_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_tear_down_global_swap_lock,
+ .enable_advanced_request =
+ dce110_timing_generator_enable_advanced_request,
+ .set_drr =
+ dce110_timing_generator_set_drr,
+ .set_static_screen_control =
+ dce110_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce110_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce110_arm_vert_intr,
+};
+
+void dce110_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+
+ tg110->offsets = *offsets;
+
+ tg110->base.funcs = &dce110_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
new file mode 100644
index 000000000000..82737dea6984
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE110_H__
+#define __DC_TIMING_GENERATOR_DCE110_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+
+/* GSL Sync related values */
+
+/* In VSync mode, after 4 units of time, master pipe will generate
+ * flip_ready signal */
+#define VFLIP_READY_DELAY 4
+/* In HSync mode, after 2 units of time, master pipe will generate
+ * flip_ready signal */
+#define HFLIP_READY_DELAY 2
+/* 6 lines delay between forcing flip and checking all pipes ready */
+#define HFLIP_CHECK_DELAY 6
+/* 3 lines before end of frame */
+#define FLIP_READY_BACK_LOOKUP 3
+
+/* Trigger Source Select - ASIC-defendant, actual values for the
+ * register programming */
+enum trigger_source_select {
+ TRIGGER_SOURCE_SELECT_LOGIC_ZERO = 0,
+ TRIGGER_SOURCE_SELECT_CRTC_VSYNCA = 1,
+ TRIGGER_SOURCE_SELECT_CRTC_HSYNCA = 2,
+ TRIGGER_SOURCE_SELECT_CRTC_VSYNCB = 3,
+ TRIGGER_SOURCE_SELECT_CRTC_HSYNCB = 4,
+ TRIGGER_SOURCE_SELECT_GENERICF = 5,
+ TRIGGER_SOURCE_SELECT_GENERICE = 6,
+ TRIGGER_SOURCE_SELECT_VSYNCA = 7,
+ TRIGGER_SOURCE_SELECT_HSYNCA = 8,
+ TRIGGER_SOURCE_SELECT_VSYNCB = 9,
+ TRIGGER_SOURCE_SELECT_HSYNCB = 10,
+ TRIGGER_SOURCE_SELECT_HPD1 = 11,
+ TRIGGER_SOURCE_SELECT_HPD2 = 12,
+ TRIGGER_SOURCE_SELECT_GENERICD = 13,
+ TRIGGER_SOURCE_SELECT_GENERICC = 14,
+ TRIGGER_SOURCE_SELECT_VIDEO_CAPTURE = 15,
+ TRIGGER_SOURCE_SELECT_GSL_GROUP0 = 16,
+ TRIGGER_SOURCE_SELECT_GSL_GROUP1 = 17,
+ TRIGGER_SOURCE_SELECT_GSL_GROUP2 = 18,
+ TRIGGER_SOURCE_SELECT_BLONY = 19,
+ TRIGGER_SOURCE_SELECT_GENERICA = 20,
+ TRIGGER_SOURCE_SELECT_GENERICB = 21,
+ TRIGGER_SOURCE_SELECT_GSL_ALLOW_FLIP = 22,
+ TRIGGER_SOURCE_SELECT_MANUAL_TRIGGER = 23
+};
+
+/* Trigger Source Select - ASIC-dependant, actual values for the
+ * register programming */
+enum trigger_polarity_select {
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO = 0,
+ TRIGGER_POLARITY_SELECT_CRTC = 1,
+ TRIGGER_POLARITY_SELECT_GENERICA = 2,
+ TRIGGER_POLARITY_SELECT_GENERICB = 3,
+ TRIGGER_POLARITY_SELECT_HSYNCA = 4,
+ TRIGGER_POLARITY_SELECT_HSYNCB = 5,
+ TRIGGER_POLARITY_SELECT_VIDEO_CAPTURE = 6,
+ TRIGGER_POLARITY_SELECT_GENERICC = 7
+};
+
+
+struct dce110_timing_generator_offsets {
+ int32_t crtc;
+ int32_t dcp;
+
+ /* DCE80 use only */
+ int32_t dmif;
+};
+
+struct dce110_timing_generator {
+ struct timing_generator base;
+ struct dce110_timing_generator_offsets offsets;
+ struct dce110_timing_generator_offsets derived_offsets;
+
+ enum controller_id controller_id;
+
+ uint32_t max_h_total;
+ uint32_t max_v_total;
+
+ uint32_t min_h_blank;
+ uint32_t min_h_front_porch;
+ uint32_t min_h_back_porch;
+
+ /* DCE 12 */
+ uint32_t min_h_sync_width;
+ uint32_t min_v_sync_width;
+ uint32_t min_v_blank;
+
+};
+
+#define DCE110TG_FROM_TG(tg)\
+ container_of(tg, struct dce110_timing_generator, base)
+
+void dce110_timing_generator_construct(
+ struct dce110_timing_generator *tg,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+/* determine if given timing can be supported by TG */
+bool dce110_timing_generator_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ enum signal_type signal);
+
+/******** HW programming ************/
+
+/* Program timing generator with given timing */
+bool dce110_timing_generator_program_timing_generator(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *dc_crtc_timing);
+
+/* Disable/Enable Timing Generator */
+bool dce110_timing_generator_enable_crtc(struct timing_generator *tg);
+bool dce110_timing_generator_disable_crtc(struct timing_generator *tg);
+
+void dce110_timing_generator_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl);
+
+/**************** TG current status ******************/
+
+/* return the current frame counter. Used by Linux kernel DRM */
+uint32_t dce110_timing_generator_get_vblank_counter(
+ struct timing_generator *tg);
+
+void dce110_timing_generator_get_position(
+ struct timing_generator *tg,
+ struct crtc_position *position);
+
+/* return true if TG counter is moving. false if TG is stopped */
+bool dce110_timing_generator_is_counter_moving(struct timing_generator *tg);
+
+/* wait until TG is in beginning of vertical blank region */
+void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg);
+
+/* wait until TG is in beginning of active region */
+void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg);
+
+/*********** Timing Generator Synchronization routines ****/
+
+/* Setups Global Swap Lock group, TimingServer or TimingClient*/
+void dce110_timing_generator_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params);
+
+/* Clear all the register writes done by setup_global_swap_lock */
+void dce110_timing_generator_tear_down_global_swap_lock(
+ struct timing_generator *tg);
+
+/* Reset slave controllers on master VSync */
+void dce110_timing_generator_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source);
+
+/* disabling trigger-reset */
+void dce110_timing_generator_disable_reset_trigger(
+ struct timing_generator *tg);
+
+/* Checks whether CRTC triggered reset occurred */
+bool dce110_timing_generator_did_triggered_reset_occur(
+ struct timing_generator *tg);
+
+/******** Stuff to move to other virtual HW objects *****************/
+/* Move to enable accelerated mode */
+void dce110_timing_generator_disable_vga(struct timing_generator *tg);
+/* TODO: Should we move it to transform */
+/* Fully program CRTC timing in timing generator */
+void dce110_timing_generator_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+
+/* TODO: Should we move it to opp? */
+/* Combine with below and move YUV/RGB color conversion to SW layer */
+void dce110_timing_generator_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color);
+/* Combine with above and move YUV/RGB color conversion to SW layer */
+void dce110_timing_generator_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color);
+void dce110_timing_generator_color_space_to_black_color(
+ enum dc_color_space colorspace,
+ struct tg_color *black_color);
+/*************** End-of-move ********************/
+
+/* Not called yet */
+void dce110_timing_generator_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth);
+
+void dce110_timing_generator_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params);
+
+void dce110_timing_generator_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value);
+
+void dce110_timing_generator_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+void dce110_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing);
+
+void dce110_timing_generator_set_lock_master(struct timing_generator *tg,
+ bool lock);
+
+void dce110_tg_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color);
+
+void dce110_tg_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color);
+
+void dce110_tg_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios);
+
+bool dce110_tg_is_blanked(struct timing_generator *tg);
+
+void dce110_tg_set_blank(struct timing_generator *tg,
+ bool enable_blanking);
+
+bool dce110_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+
+void dce110_tg_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state);
+
+void dce110_tg_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color);
+
+bool dce110_arm_vert_intr(
+ struct timing_generator *tg, uint8_t width);
+
+#endif /* __DC_TIMING_GENERATOR_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
new file mode 100644
index 000000000000..07d9303d5477
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -0,0 +1,688 @@
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dc_types.h"
+#include "dc_bios_types.h"
+#include "dc.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "dce110_timing_generator.h"
+#include "dce110_timing_generator_v.h"
+
+#include "timing_generator.h"
+
+/** ********************************************************************************
+ *
+ * DCE11 Timing Generator Implementation
+ *
+ **********************************************************************************/
+
+/**
+* Enable CRTCV
+*/
+
+static bool dce110_timing_generator_v_enable_crtc(struct timing_generator *tg)
+{
+/*
+* Set MASTER_UPDATE_MODE to 0
+* This is needed for DRR, and also suggested to be default value by Syed.
+*/
+
+ uint32_t value;
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ CRTCV_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE);
+ dm_write_reg(tg->ctx,
+ mmCRTCV_MASTER_UPDATE_MODE, value);
+
+ /* TODO: may want this on for looking for underflow */
+ value = 0;
+ dm_write_reg(tg->ctx, mmCRTCV_MASTER_UPDATE_MODE, value);
+
+ value = 0;
+ set_reg_field_value(value, 1,
+ CRTCV_MASTER_EN, CRTC_MASTER_EN);
+ dm_write_reg(tg->ctx,
+ mmCRTCV_MASTER_EN, value);
+
+ return true;
+}
+
+static bool dce110_timing_generator_v_disable_crtc(struct timing_generator *tg)
+{
+ uint32_t value;
+
+ value = dm_read_reg(tg->ctx,
+ mmCRTCV_CONTROL);
+ set_reg_field_value(value, 0,
+ CRTCV_CONTROL, CRTC_DISABLE_POINT_CNTL);
+ set_reg_field_value(value, 0,
+ CRTCV_CONTROL, CRTC_MASTER_EN);
+ dm_write_reg(tg->ctx,
+ mmCRTCV_CONTROL, value);
+ /*
+ * TODO: call this when adding stereo support
+ * tg->funcs->disable_stereo(tg);
+ */
+ return true;
+}
+
+static void dce110_timing_generator_v_blank_crtc(struct timing_generator *tg)
+{
+ uint32_t addr = mmCRTCV_BLANK_CONTROL;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DE_MODE);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_unblank_crtc(struct timing_generator *tg)
+{
+ uint32_t addr = mmCRTCV_BLANK_CONTROL;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DE_MODE);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static bool dce110_timing_generator_v_is_in_vertical_blank(
+ struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+ uint32_t field = 0;
+
+ addr = mmCRTCV_STATUS;
+ value = dm_read_reg(tg->ctx, addr);
+ field = get_reg_field_value(value, CRTCV_STATUS, CRTC_V_BLANK);
+ return field == 1;
+}
+
+static bool dce110_timing_generator_v_is_counter_moving(struct timing_generator *tg)
+{
+ uint32_t value;
+ uint32_t h1 = 0;
+ uint32_t h2 = 0;
+ uint32_t v1 = 0;
+ uint32_t v2 = 0;
+
+ value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION);
+
+ h1 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ v1 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION);
+
+ h2 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ v2 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ if (h1 == h2 && v1 == v2)
+ return false;
+ else
+ return true;
+}
+
+static void dce110_timing_generator_v_wait_for_vblank(struct timing_generator *tg)
+{
+ /* We want to catch beginning of VBlank here, so if the first try are
+ * in VBlank, we might be very close to Active, in this case wait for
+ * another frame
+ */
+ while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_v_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+
+ while (!dce110_timing_generator_v_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_v_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/**
+* Wait till we are in VActive (anywhere in VActive)
+*/
+static void dce110_timing_generator_v_wait_for_vactive(struct timing_generator *tg)
+{
+ while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_v_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+static void dce110_timing_generator_v_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ dce110_timing_generator_v_wait_for_vblank(tg);
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ dce110_timing_generator_v_wait_for_vactive(tg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void dce110_timing_generator_v_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t vsync_offset = timing->v_border_bottom +
+ timing->v_front_porch;
+ uint32_t v_sync_start = timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr = 0;
+ uint32_t tmp = 0;
+
+ addr = mmCRTCV_H_TOTAL;
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->h_total - 1,
+ CRTCV_H_TOTAL,
+ CRTC_H_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_TOTAL;
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTCV_V_TOTAL,
+ CRTC_V_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_H_BLANK_START_END;
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->h_total -
+ (h_sync_start + timing->h_border_left);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_H_BLANK_START_END,
+ CRTC_H_BLANK_END);
+
+ tmp = tmp + timing->h_addressable +
+ timing->h_border_left + timing->h_border_right;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_H_BLANK_START_END,
+ CRTC_H_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_BLANK_START_END;
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->v_total - (v_sync_start + timing->v_border_top);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ tmp = tmp + timing->v_addressable + timing->v_border_top +
+ timing->v_border_bottom;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_H_SYNC_A;
+ value = 0;
+ set_reg_field_value(
+ value,
+ timing->h_sync_width,
+ CRTCV_H_SYNC_A,
+ CRTC_H_SYNC_A_END);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_H_SYNC_A_CNTL;
+ value = dm_read_reg(ctx, addr);
+ if (timing->flags.HSYNC_POSITIVE_POLARITY) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_H_SYNC_A_CNTL,
+ CRTC_H_SYNC_A_POL);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_H_SYNC_A_CNTL,
+ CRTC_H_SYNC_A_POL);
+ }
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_SYNC_A;
+ value = 0;
+ set_reg_field_value(
+ value,
+ timing->v_sync_width,
+ CRTCV_V_SYNC_A,
+ CRTC_V_SYNC_A_END);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_SYNC_A_CNTL;
+ value = dm_read_reg(ctx, addr);
+ if (timing->flags.VSYNC_POSITIVE_POLARITY) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL);
+ }
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_INTERLACE_CONTROL;
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->flags.INTERLACE,
+ CRTCV_INTERLACE_CONTROL,
+ CRTC_INTERLACE_ENABLE);
+ dm_write_reg(ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t addr = mmCRTCV_START_LINE_CONTROL;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ }
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 2,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ if (enable_blanking)
+ dce110_timing_generator_v_blank_crtc(tg);
+ else
+ dce110_timing_generator_v_unblank_crtc(tg);
+}
+
+static void dce110_timing_generator_v_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (use_vbios)
+ dce110_timing_generator_program_timing_generator(tg, timing);
+ else
+ dce110_timing_generator_v_program_blanking(tg, timing);
+}
+
+static void dce110_timing_generator_v_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ uint32_t addr = mmCRTCV_BLACK_COLOR;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t addr;
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ color->color_b_cb,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ color->color_r_cr,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ set_reg_field_value(
+ value,
+ color->color_g_y,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ addr = mmCRTCV_OVERSCAN_COLOR;
+ dm_write_reg(ctx, addr, value);
+ addr = mmCRTCV_BLACK_COLOR;
+ dm_write_reg(ctx, addr, value);
+ /* This is desirable to have a constant DAC output voltage during the
+ * blank time that is higher than the 0 volt reference level that the
+ * DAC outputs when the NBLANK signal
+ * is asserted low, such as for output to an analog TV. */
+ addr = mmCRTCV_BLANK_DATA_COLOR;
+ dm_write_reg(ctx, addr, value);
+
+ /* TO DO we have to program EXT registers and we need to know LB DATA
+ * format because it is used when more 10 , i.e. 12 bits per color
+ *
+ * m_mmDxCRTC_OVERSCAN_COLOR_EXT
+ * m_mmDxCRTC_BLACK_COLOR_EXT
+ * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
+ */
+}
+
+static void dce110_tg_v_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ uint32_t addr = mmCRTCV_BLACK_COLOR;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+
+ addr = mmCRTCV_BLANK_DATA_COLOR;
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr;
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_b_cb,
+ CRTCV_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_g_y,
+ CRTCV_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_r_cr,
+ CRTCV_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ addr = mmCRTCV_OVERSCAN_COLOR;
+ dm_write_reg(ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color)
+{
+ if (blank_color != NULL)
+ dce110_tg_v_program_blank_color(tg, blank_color);
+ if (overscan_color != NULL)
+ dce110_timing_generator_v_set_overscan_color(tg, overscan_color);
+}
+
+static void dce110_timing_generator_v_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ uint32_t regval;
+ uint32_t address = mmCRTC_CONTROL;
+
+ regval = dm_read_reg(tg->ctx, address);
+ set_reg_field_value(regval, early_cntl,
+ CRTCV_CONTROL, CRTC_HBLANK_EARLY_CONTROL);
+ dm_write_reg(tg->ctx, address, regval);
+}
+
+static uint32_t dce110_timing_generator_v_get_vblank_counter(struct timing_generator *tg)
+{
+ uint32_t addr = mmCRTCV_STATUS_FRAME_COUNT;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+ uint32_t field = get_reg_field_value(
+ value, CRTCV_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
+
+ return field;
+}
+
+static bool dce110_timing_generator_v_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return false;
+}
+
+static void dce110_timing_generator_v_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source_tg_inst)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_disable_reset_trigger(
+ struct timing_generator *tg)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_tear_down_global_swap_lock(
+ struct timing_generator *tg)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_disable_vga(
+ struct timing_generator *tg)
+{
+ return;
+}
+
+static bool dce110_tg_v_is_blanked(struct timing_generator *tg)
+{
+ /* Signal comes from the primary pipe, underlay is never blanked. */
+ return false;
+}
+
+/** ********************************************************************************************
+ *
+ * DCE11 Timing Generator Constructor / Destructor
+ *
+ *********************************************************************************************/
+static const struct timing_generator_funcs dce110_tg_v_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = dce110_timing_generator_v_program_timing,
+ .enable_crtc = dce110_timing_generator_v_enable_crtc,
+ .disable_crtc = dce110_timing_generator_v_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_v_is_counter_moving,
+ .get_position = NULL, /* Not to be implemented for underlay*/
+ .get_frame_count = dce110_timing_generator_v_get_vblank_counter,
+ .set_early_control = dce110_timing_generator_v_set_early_control,
+ .wait_for_state = dce110_timing_generator_v_wait_for_state,
+ .set_blank = dce110_timing_generator_v_set_blank,
+ .is_blanked = dce110_tg_v_is_blanked,
+ .set_colors = dce110_timing_generator_v_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_v_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_v_program_blank_color,
+ .disable_vga = dce110_timing_generator_v_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_v_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_v_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_v_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_v_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_v_tear_down_global_swap_lock,
+ .enable_advanced_request =
+ dce110_timing_generator_v_enable_advanced_request
+};
+
+void dce110_timing_generator_v_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx)
+{
+ tg110->controller_id = CONTROLLER_ID_UNDERLAY0;
+
+ tg110->base.funcs = &dce110_tg_v_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h
new file mode 100644
index 000000000000..d2623a5994e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_V_DCE110_H__
+#define __DC_TIMING_GENERATOR_V_DCE110_H__
+
+void dce110_timing_generator_v_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx);
+
+#endif /* __DC_TIMING_GENERATOR_V_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
new file mode 100644
index 000000000000..47390dc58306
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110_transform_v.h"
+#include "dm_services.h"
+#include "dc.h"
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define SCLV_PHASES 64
+
+struct sclv_ratios_inits {
+ uint32_t h_int_scale_ratio_luma;
+ uint32_t h_int_scale_ratio_chroma;
+ uint32_t v_int_scale_ratio_luma;
+ uint32_t v_int_scale_ratio_chroma;
+ struct init_int_and_frac h_init_luma;
+ struct init_int_and_frac h_init_chroma;
+ struct init_int_and_frac v_init_luma;
+ struct init_int_and_frac v_init_chroma;
+};
+
+static void calculate_viewport(
+ const struct scaler_data *scl_data,
+ struct rect *luma_viewport,
+ struct rect *chroma_viewport)
+{
+ /*Do not set chroma vp for rgb444 pixel format*/
+ luma_viewport->x = scl_data->viewport.x - scl_data->viewport.x % 2;
+ luma_viewport->y = scl_data->viewport.y - scl_data->viewport.y % 2;
+ luma_viewport->width =
+ scl_data->viewport.width - scl_data->viewport.width % 2;
+ luma_viewport->height =
+ scl_data->viewport.height - scl_data->viewport.height % 2;
+ chroma_viewport->x = luma_viewport->x;
+ chroma_viewport->y = luma_viewport->y;
+ chroma_viewport->height = luma_viewport->height;
+ chroma_viewport->width = luma_viewport->width;
+
+ if (scl_data->format == PIXEL_FORMAT_420BPP8) {
+ luma_viewport->height += luma_viewport->height % 2;
+ luma_viewport->width += luma_viewport->width % 2;
+ /*for 420 video chroma is 1/4 the area of luma, scaled
+ *vertically and horizontally
+ */
+ chroma_viewport->x = luma_viewport->x / 2;
+ chroma_viewport->y = luma_viewport->y / 2;
+ chroma_viewport->height = luma_viewport->height / 2;
+ chroma_viewport->width = luma_viewport->width / 2;
+ }
+}
+
+static void program_viewport(
+ struct dce_transform *xfm_dce,
+ struct rect *luma_view_port,
+ struct rect *chroma_view_port)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t value = 0;
+ uint32_t addr = 0;
+
+ if (luma_view_port->width != 0 && luma_view_port->height != 0) {
+ addr = mmSCLV_VIEWPORT_START;
+ value = 0;
+ set_reg_field_value(
+ value,
+ luma_view_port->x,
+ SCLV_VIEWPORT_START,
+ VIEWPORT_X_START);
+ set_reg_field_value(
+ value,
+ luma_view_port->y,
+ SCLV_VIEWPORT_START,
+ VIEWPORT_Y_START);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VIEWPORT_SIZE;
+ value = 0;
+ set_reg_field_value(
+ value,
+ luma_view_port->height,
+ SCLV_VIEWPORT_SIZE,
+ VIEWPORT_HEIGHT);
+ set_reg_field_value(
+ value,
+ luma_view_port->width,
+ SCLV_VIEWPORT_SIZE,
+ VIEWPORT_WIDTH);
+ dm_write_reg(ctx, addr, value);
+ }
+
+ if (chroma_view_port->width != 0 && chroma_view_port->height != 0) {
+ addr = mmSCLV_VIEWPORT_START_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ chroma_view_port->x,
+ SCLV_VIEWPORT_START_C,
+ VIEWPORT_X_START_C);
+ set_reg_field_value(
+ value,
+ chroma_view_port->y,
+ SCLV_VIEWPORT_START_C,
+ VIEWPORT_Y_START_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VIEWPORT_SIZE_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ chroma_view_port->height,
+ SCLV_VIEWPORT_SIZE_C,
+ VIEWPORT_HEIGHT_C);
+ set_reg_field_value(
+ value,
+ chroma_view_port->width,
+ SCLV_VIEWPORT_SIZE_C,
+ VIEWPORT_WIDTH_C);
+ dm_write_reg(ctx, addr, value);
+ }
+}
+
+/*
+ * Function:
+ * void setup_scaling_configuration
+ *
+ * Purpose: setup scaling mode : bypass, RGb, YCbCr and nummber of taps
+ * Input: data
+ *
+ * Output:
+ * void
+ */
+static bool setup_scaling_configuration(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ bool is_scaling_needed = false;
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t value = 0;
+
+ set_reg_field_value(value, data->taps.h_taps - 1,
+ SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS);
+ set_reg_field_value(value, data->taps.v_taps - 1,
+ SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS);
+ set_reg_field_value(value, data->taps.h_taps_c - 1,
+ SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS_C);
+ set_reg_field_value(value, data->taps.v_taps_c - 1,
+ SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS_C);
+ dm_write_reg(ctx, mmSCLV_TAP_CONTROL, value);
+
+ value = 0;
+ if (data->taps.h_taps + data->taps.v_taps > 2) {
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE);
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN);
+ is_scaling_needed = true;
+ } else {
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE);
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN);
+ }
+
+ if (data->taps.h_taps_c + data->taps.v_taps_c > 2) {
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE_C);
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN_C);
+ is_scaling_needed = true;
+ } else if (data->format != PIXEL_FORMAT_420BPP8) {
+ set_reg_field_value(
+ value,
+ get_reg_field_value(value, SCLV_MODE, SCL_MODE),
+ SCLV_MODE,
+ SCL_MODE_C);
+ set_reg_field_value(
+ value,
+ get_reg_field_value(value, SCLV_MODE, SCL_PSCL_EN),
+ SCLV_MODE,
+ SCL_PSCL_EN_C);
+ } else {
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE_C);
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN_C);
+ }
+ dm_write_reg(ctx, mmSCLV_MODE, value);
+
+ value = 0;
+ /*
+ * 0 - Replaced out of bound pixels with black pixel
+ * (or any other required color)
+ * 1 - Replaced out of bound pixels with the edge pixel
+ */
+ set_reg_field_value(value, 1, SCLV_CONTROL, SCL_BOUNDARY_MODE);
+ dm_write_reg(ctx, mmSCLV_CONTROL, value);
+
+ return is_scaling_needed;
+}
+
+/**
+* Function:
+* void program_overscan
+*
+* Purpose: Programs overscan border
+* Input: overscan
+*
+* Output:
+ void
+*/
+static void program_overscan(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ uint32_t overscan_left_right = 0;
+ uint32_t overscan_top_bottom = 0;
+
+ int overscan_right = data->h_active - data->recout.x - data->recout.width;
+ int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
+
+ if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ overscan_bottom += 2;
+ overscan_right += 2;
+ }
+
+ if (overscan_right < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_right = 0;
+ }
+ if (overscan_bottom < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_bottom = 0;
+ }
+
+ set_reg_field_value(overscan_left_right, data->recout.x,
+ EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT);
+
+ set_reg_field_value(overscan_left_right, overscan_right,
+ EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT);
+
+ set_reg_field_value(overscan_top_bottom, data->recout.y,
+ EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP);
+
+ set_reg_field_value(overscan_top_bottom, overscan_bottom,
+ EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmSCLV_EXT_OVERSCAN_LEFT_RIGHT,
+ overscan_left_right);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmSCLV_EXT_OVERSCAN_TOP_BOTTOM,
+ overscan_top_bottom);
+}
+
+static void set_coeff_update_complete(
+ struct dce_transform *xfm_dce)
+{
+ uint32_t value;
+
+ value = dm_read_reg(xfm_dce->base.ctx, mmSCLV_UPDATE);
+ set_reg_field_value(value, 1, SCLV_UPDATE, SCL_COEF_UPDATE_COMPLETE);
+ dm_write_reg(xfm_dce->base.ctx, mmSCLV_UPDATE, value);
+}
+
+static void program_multi_taps_filter(
+ struct dce_transform *xfm_dce,
+ int taps,
+ const uint16_t *coeffs,
+ enum ram_filter_type filter_type)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ int i, phase, pair;
+ int array_idx = 0;
+ int taps_pairs = (taps + 1) / 2;
+ int phases_to_program = SCLV_PHASES / 2 + 1;
+
+ uint32_t select = 0;
+ uint32_t power_ctl, power_ctl_off;
+
+ if (!coeffs)
+ return;
+
+ /*We need to disable power gating on coeff memory to do programming*/
+ power_ctl = dm_read_reg(ctx, mmDCFEV_MEM_PWR_CTRL);
+ power_ctl_off = power_ctl;
+ set_reg_field_value(power_ctl_off, 1, DCFEV_MEM_PWR_CTRL, SCLV_COEFF_MEM_PWR_DIS);
+ dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl_off);
+
+ /*Wait to disable gating:*/
+ for (i = 0; i < 10; i++) {
+ if (get_reg_field_value(
+ dm_read_reg(ctx, mmDCFEV_MEM_PWR_STATUS),
+ DCFEV_MEM_PWR_STATUS,
+ SCLV_COEFF_MEM_PWR_STATE) == 0)
+ break;
+
+ udelay(1);
+ }
+
+ set_reg_field_value(select, filter_type, SCLV_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE);
+
+ for (phase = 0; phase < phases_to_program; phase++) {
+ /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror
+ phase 0 is unique and phase N/2 is unique if N is even*/
+ set_reg_field_value(select, phase, SCLV_COEF_RAM_SELECT, SCL_C_RAM_PHASE);
+ for (pair = 0; pair < taps_pairs; pair++) {
+ uint32_t data = 0;
+
+ set_reg_field_value(select, pair,
+ SCLV_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX);
+
+ dm_write_reg(ctx, mmSCLV_COEF_RAM_SELECT, select);
+
+ set_reg_field_value(
+ data, 1,
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_EVEN_TAP_COEF_EN);
+ set_reg_field_value(
+ data, coeffs[array_idx],
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_EVEN_TAP_COEF);
+
+ if (taps % 2 && pair == taps_pairs - 1) {
+ set_reg_field_value(
+ data, 0,
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_ODD_TAP_COEF_EN);
+ array_idx++;
+ } else {
+ set_reg_field_value(
+ data, 1,
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_ODD_TAP_COEF_EN);
+ set_reg_field_value(
+ data, coeffs[array_idx + 1],
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_ODD_TAP_COEF);
+
+ array_idx += 2;
+ }
+
+ dm_write_reg(ctx, mmSCLV_COEF_RAM_TAP_DATA, data);
+ }
+ }
+
+ /*We need to restore power gating on coeff memory to initial state*/
+ dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl);
+}
+
+static void calculate_inits(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data,
+ struct sclv_ratios_inits *inits,
+ struct rect *luma_viewport,
+ struct rect *chroma_viewport)
+{
+ inits->h_int_scale_ratio_luma =
+ dal_fixed31_32_u2d19(data->ratios.horz) << 5;
+ inits->v_int_scale_ratio_luma =
+ dal_fixed31_32_u2d19(data->ratios.vert) << 5;
+ inits->h_int_scale_ratio_chroma =
+ dal_fixed31_32_u2d19(data->ratios.horz_c) << 5;
+ inits->v_int_scale_ratio_chroma =
+ dal_fixed31_32_u2d19(data->ratios.vert_c) << 5;
+
+ inits->h_init_luma.integer = 1;
+ inits->v_init_luma.integer = 1;
+ inits->h_init_chroma.integer = 1;
+ inits->v_init_chroma.integer = 1;
+}
+
+static void program_scl_ratios_inits(
+ struct dce_transform *xfm_dce,
+ struct sclv_ratios_inits *inits)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t addr = mmSCLV_HORZ_FILTER_SCALE_RATIO;
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ inits->h_int_scale_ratio_luma,
+ SCLV_HORZ_FILTER_SCALE_RATIO,
+ SCL_H_SCALE_RATIO);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_SCALE_RATIO;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_int_scale_ratio_luma,
+ SCLV_VERT_FILTER_SCALE_RATIO,
+ SCL_V_SCALE_RATIO);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_HORZ_FILTER_SCALE_RATIO_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->h_int_scale_ratio_chroma,
+ SCLV_HORZ_FILTER_SCALE_RATIO_C,
+ SCL_H_SCALE_RATIO_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_SCALE_RATIO_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_int_scale_ratio_chroma,
+ SCLV_VERT_FILTER_SCALE_RATIO_C,
+ SCL_V_SCALE_RATIO_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_HORZ_FILTER_INIT;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->h_init_luma.fraction,
+ SCLV_HORZ_FILTER_INIT,
+ SCL_H_INIT_FRAC);
+ set_reg_field_value(
+ value,
+ inits->h_init_luma.integer,
+ SCLV_HORZ_FILTER_INIT,
+ SCL_H_INIT_INT);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_INIT;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_init_luma.fraction,
+ SCLV_VERT_FILTER_INIT,
+ SCL_V_INIT_FRAC);
+ set_reg_field_value(
+ value,
+ inits->v_init_luma.integer,
+ SCLV_VERT_FILTER_INIT,
+ SCL_V_INIT_INT);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_HORZ_FILTER_INIT_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->h_init_chroma.fraction,
+ SCLV_HORZ_FILTER_INIT_C,
+ SCL_H_INIT_FRAC_C);
+ set_reg_field_value(
+ value,
+ inits->h_init_chroma.integer,
+ SCLV_HORZ_FILTER_INIT_C,
+ SCL_H_INIT_INT_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_INIT_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_init_chroma.fraction,
+ SCLV_VERT_FILTER_INIT_C,
+ SCL_V_INIT_FRAC_C);
+ set_reg_field_value(
+ value,
+ inits->v_init_chroma.integer,
+ SCLV_VERT_FILTER_INIT_C,
+ SCL_V_INIT_INT_C);
+ dm_write_reg(ctx, addr, value);
+}
+
+static const uint16_t *get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 4)
+ return get_filter_4tap_64p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_64p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static bool dce110_xfmv_power_up_line_buffer(struct transform *xfm)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ uint32_t value;
+
+ value = dm_read_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL);
+
+ /*Use all three pieces of memory always*/
+ set_reg_field_value(value, 0, LBV_MEMORY_CTRL, LB_MEMORY_CONFIG);
+ /*hard coded number DCE11 1712(0x6B0) Partitions: 720/960/1712*/
+ set_reg_field_value(value, xfm_dce->lb_memory_size, LBV_MEMORY_CTRL,
+ LB_MEMORY_SIZE);
+
+ dm_write_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL, value);
+
+ return true;
+}
+
+static void dce110_xfmv_set_scaler(
+ struct transform *xfm,
+ const struct scaler_data *data)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ bool is_scaling_required = false;
+ bool filter_updated = false;
+ const uint16_t *coeffs_v, *coeffs_h, *coeffs_h_c, *coeffs_v_c;
+ struct rect luma_viewport = {0};
+ struct rect chroma_viewport = {0};
+
+ dce110_xfmv_power_up_line_buffer(xfm);
+ /* 1. Calculate viewport, viewport programming should happen after init
+ * calculations as they may require an adjustment in the viewport.
+ */
+
+ calculate_viewport(data, &luma_viewport, &chroma_viewport);
+
+ /* 2. Program overscan */
+ program_overscan(xfm_dce, data);
+
+ /* 3. Program taps and configuration */
+ is_scaling_required = setup_scaling_configuration(xfm_dce, data);
+
+ if (is_scaling_required) {
+ /* 4. Calculate and program ratio, filter initialization */
+
+ struct sclv_ratios_inits inits = { 0 };
+
+ calculate_inits(
+ xfm_dce,
+ data,
+ &inits,
+ &luma_viewport,
+ &chroma_viewport);
+
+ program_scl_ratios_inits(xfm_dce, &inits);
+
+ coeffs_v = get_filter_coeffs_64p(data->taps.v_taps, data->ratios.vert);
+ coeffs_h = get_filter_coeffs_64p(data->taps.h_taps, data->ratios.horz);
+ coeffs_v_c = get_filter_coeffs_64p(data->taps.v_taps_c, data->ratios.vert_c);
+ coeffs_h_c = get_filter_coeffs_64p(data->taps.h_taps_c, data->ratios.horz_c);
+
+ if (coeffs_v != xfm_dce->filter_v
+ || coeffs_v_c != xfm_dce->filter_v_c
+ || coeffs_h != xfm_dce->filter_h
+ || coeffs_h_c != xfm_dce->filter_h_c) {
+ /* 5. Program vertical filters */
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_RGB_Y_VERTICAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps_c,
+ coeffs_v_c,
+ FILTER_TYPE_CBCR_VERTICAL);
+
+ /* 6. Program horizontal filters */
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_RGB_Y_HORIZONTAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps_c,
+ coeffs_h_c,
+ FILTER_TYPE_CBCR_HORIZONTAL);
+
+ xfm_dce->filter_v = coeffs_v;
+ xfm_dce->filter_v_c = coeffs_v_c;
+ xfm_dce->filter_h = coeffs_h;
+ xfm_dce->filter_h_c = coeffs_h_c;
+ filter_updated = true;
+ }
+ }
+
+ /* 7. Program the viewport */
+ program_viewport(xfm_dce, &luma_viewport, &chroma_viewport);
+
+ /* 8. Set bit to flip to new coefficient memory */
+ if (filter_updated)
+ set_coeff_update_complete(xfm_dce);
+}
+
+static void dce110_xfmv_reset(struct transform *xfm)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ xfm_dce->filter_h = NULL;
+ xfm_dce->filter_v = NULL;
+ xfm_dce->filter_h_c = NULL;
+ xfm_dce->filter_v_c = NULL;
+}
+
+static void dce110_xfmv_set_gamut_remap(
+ struct transform *xfm,
+ const struct xfm_grph_csc_adjustment *adjust)
+{
+ /* DO NOTHING*/
+}
+
+static void dce110_xfmv_set_pixel_storage_depth(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_depth = 0;
+ int expan_mode = 0;
+ uint32_t reg_data = 0;
+
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ pixel_depth = 2;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_24BPP:
+ pixel_depth = 1;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_30BPP:
+ pixel_depth = 0;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_36BPP:
+ pixel_depth = 3;
+ expan_mode = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ set_reg_field_value(
+ reg_data,
+ expan_mode,
+ LBV_DATA_FORMAT,
+ PIXEL_EXPAN_MODE);
+
+ set_reg_field_value(
+ reg_data,
+ pixel_depth,
+ LBV_DATA_FORMAT,
+ PIXEL_DEPTH);
+
+ dm_write_reg(xfm->ctx, mmLBV_DATA_FORMAT, reg_data);
+
+ if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+ /*we should use unsupported capabilities
+ * unless it is required by w/a*/
+ dm_logger_write(xfm->ctx->logger, LOG_WARNING,
+ "%s: Capability not supported",
+ __func__);
+ }
+}
+
+static const struct transform_funcs dce110_xfmv_funcs = {
+ .transform_reset = dce110_xfmv_reset,
+ .transform_set_scaler = dce110_xfmv_set_scaler,
+ .transform_set_gamut_remap =
+ dce110_xfmv_set_gamut_remap,
+ .opp_set_csc_default = dce110_opp_v_set_csc_default,
+ .opp_set_csc_adjustment = dce110_opp_v_set_csc_adjustment,
+ .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut_v,
+ .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl_v,
+ .opp_set_regamma_mode = dce110_opp_set_regamma_mode_v,
+ .transform_set_pixel_storage_depth =
+ dce110_xfmv_set_pixel_storage_depth,
+ .transform_get_optimal_number_of_taps =
+ dce_transform_get_optimal_number_of_taps
+};
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+bool dce110_transform_v_construct(
+ struct dce_transform *xfm_dce,
+ struct dc_context *ctx)
+{
+ xfm_dce->base.ctx = ctx;
+
+ xfm_dce->base.funcs = &dce110_xfmv_funcs;
+
+ xfm_dce->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ xfm_dce->prescaler_on = true;
+ xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h
new file mode 100644
index 000000000000..b70780210aad
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h
@@ -0,0 +1,58 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TRANSFORM_V_DCE110_H__
+#define __DAL_TRANSFORM_V_DCE110_H__
+
+#include "../dce/dce_transform.h"
+
+#define LB_TOTAL_NUMBER_OF_ENTRIES 1712
+#define LB_BITS_PER_ENTRY 144
+
+bool dce110_transform_v_construct(
+ struct dce_transform *xfm110,
+ struct dc_context *ctx);
+
+void dce110_opp_v_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust);
+
+void dce110_opp_v_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry);
+
+
+void dce110_opp_program_regamma_pwl_v(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+void dce110_opp_power_on_regamma_lut_v(
+ struct transform *xfm,
+ bool power_on);
+
+void dce110_opp_set_regamma_mode_v(
+ struct transform *xfm,
+ enum opp_regamma mode);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
new file mode 100644
index 000000000000..265ac4310d85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE112 = dce112_compressor.o dce112_hw_sequencer.o \
+dce112_resource.o
+
+AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE112)
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
new file mode 100644
index 000000000000..69649928768c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
@@ -0,0 +1,854 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+
+#include "include/logger_interface.h"
+
+#include "dce112_compressor.h"
+
+#define DCP_REG(reg)\
+ (reg + cp110->offsets.dcp_offset)
+#define DMIF_REG(reg)\
+ (reg + cp110->offsets.dmif_offset)
+
+static const struct dce112_compressor_reg_offsets reg_offsets[] = {
+{
+ .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+}
+};
+
+static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
+
+enum fbc_idle_force {
+ /* Bit 0 - Display registers updated */
+ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
+
+ /* Bit 2 - FBC_GRPH_COMP_EN register updated */
+ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
+ /* Bit 3 - FBC_SRC_SEL register updated */
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
+ /* Bit 4 - FBC_MIN_COMPRESSION register updated */
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
+ /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
+ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
+ /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
+ /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
+
+ /* Bit 24 - Memory write to region 0 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
+ /* Bit 25 - Memory write to region 1 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
+ /* Bit 26 - Memory write to region 2 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
+ /* Bit 27 - Memory write to region 3 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
+
+ /* Bit 28 - Memory write from any client other than MCIF */
+ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
+ /* Bit 29 - CG statics screen signal is inactive */
+ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
+};
+
+static uint32_t lpt_size_alignment(struct dce112_compressor *cp110)
+{
+ /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
+ return cp110->base.raw_size * cp110->base.banks_num *
+ cp110->base.dram_channels_num;
+}
+
+static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
+ uint32_t lpt_control)
+{
+ /*LPT MC Config */
+ if (cp110->base.options.bits.LPT_MC_CONFIG == 1) {
+ /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
+ * 00 - 1 CHANNEL
+ * 01 - 2 CHANNELS
+ * 02 - 4 OR 6 CHANNELS
+ * (Only for discrete GPU, N/A for CZ)
+ * 03 - 8 OR 12 CHANNELS
+ * (Only for discrete GPU, N/A for CZ) */
+ switch (cp110->base.dram_channels_num) {
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ case 1:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_PIPES!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LPT NUM_BANKS is in
+ * GRPH_CONTROL.GRPH_NUM_BANKS register field
+ * Specifies the number of memory banks for tiling
+ * purposes. Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES:
+ * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
+ * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
+ * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
+ * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
+ switch (cp110->base.banks_num) {
+ case 16:
+ set_reg_field_value(
+ lpt_control,
+ 3,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 8:
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 4:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_BANKS!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping is in DMIF_ADDR_CALC.
+ * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
+ * Carrizo specifies the memory interleave per pipe.
+ * It effectively specifies the location of pipe bits in
+ * the memory address.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
+ * interleave
+ * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
+ * interleave
+ */
+ switch (cp110->base.channel_interleave_size) {
+ case 256: /*256B */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ case 512: /*512B */
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT INTERLEAVE_SIZE!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
+ * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
+ * for Carrizo. Specifies the size of dram row in bytes.
+ * This should match up with NOOFCOLS field in
+ * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
+ * This register DMIF_ADDR_CALC is not used by the
+ * hardware as it is only used for addrlib assertions.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
+ * boundary
+ * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
+ * boundary
+ * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
+ * boundary */
+ switch (cp110->base.raw_size) {
+ case 4096: /*4 KB */
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 2048:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 1024:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT ROW_SIZE!!!",
+ __func__);
+ break;
+ }
+ } else {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: LPT MC Configuration is not provided",
+ __func__);
+ }
+
+ return lpt_control;
+}
+
+static bool is_source_bigger_than_epanel_size(
+ struct dce112_compressor *cp110,
+ uint32_t source_view_width,
+ uint32_t source_view_height)
+{
+ if (cp110->base.embedded_panel_h_size != 0 &&
+ cp110->base.embedded_panel_v_size != 0 &&
+ ((source_view_width * source_view_height) >
+ (cp110->base.embedded_panel_h_size *
+ cp110->base.embedded_panel_v_size)))
+ return true;
+
+ return false;
+}
+
+static uint32_t align_to_chunks_number_per_line(
+ struct dce112_compressor *cp110,
+ uint32_t pixels)
+{
+ return 256 * ((pixels + 255) / 256);
+}
+
+static void wait_for_fbc_state_changed(
+ struct dce112_compressor *cp110,
+ bool enabled)
+{
+ uint8_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+ while (counter < 10) {
+ value = dm_read_reg(cp110->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+ udelay(10);
+ counter++;
+ }
+
+ if (counter == 10) {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ }
+}
+
+void dce112_compressor_power_up_fbc(struct compressor *compressor)
+{
+ uint32_t value;
+ uint32_t addr;
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
+ set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
+ if (compressor->options.bits.CLK_GATING_DISABLED == 1) {
+ /* HW needs to do power measurement comparison. */
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CNTL,
+ FBC_COMP_CLK_GATE_EN);
+ }
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_MODE;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
+ /* 1 ==> 4:1 */
+ /* 2 ==> 8:1 */
+ /* 0xF ==> 1:1 */
+ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
+ dm_write_reg(compressor->ctx, addr, value);
+ compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
+
+ value = 0;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
+
+ value = 0xFFFFFF;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
+}
+
+void dce112_compressor_enable_fbc(
+ struct compressor *compressor,
+ uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ (compressor->options.bits.DUMMY_BACKEND == 0) &&
+ (!dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
+ (!is_source_bigger_than_epanel_size(
+ cp110,
+ params->source_view_width,
+ params->source_view_height))) {
+
+ uint32_t addr;
+ uint32_t value;
+
+ /* Before enabling FBC first need to enable LPT if applicable
+ * LPT state should always be changed (enable/disable) while FBC
+ * is disabled */
+ if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
+ (params->source_view_width *
+ params->source_view_height <=
+ dce11_one_lpt_channel_max_resolution)) {
+ dce112_compressor_enable_lpt(compressor);
+ }
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(
+ value,
+ params->inst,
+ FBC_CNTL, FBC_SRC_SEL);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Keep track of enum controller_id FBC is attached to */
+ compressor->is_enabled = true;
+ compressor->attached_inst = params->inst;
+ cp110->offsets = reg_offsets[params->inst];
+
+ /*Toggle it as there is bug in HW */
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ wait_for_fbc_state_changed(cp110, true);
+ }
+}
+
+void dce112_compressor_disable_fbc(struct compressor *compressor)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ /* Whenever disabling FBC make sure LPT is disabled if LPT
+ * supported */
+ if (compressor->options.bits.LPT_SUPPORT)
+ dce112_compressor_disable_lpt(compressor);
+
+ wait_for_fbc_state_changed(cp110, false);
+ }
+}
+
+bool dce112_compressor_is_fbc_enabled_in_hw(
+ struct compressor *compressor,
+ uint32_t *inst)
+{
+ /* Check the hardware register */
+ uint32_t value;
+
+ value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
+ if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ value = dm_read_reg(compressor->ctx, mmFBC_MISC);
+ if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) {
+ value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+
+ if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
+ if (inst != NULL)
+ *inst =
+ compressor->attached_inst;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
+{
+ /* Check the hardware register */
+ uint32_t value = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ return get_reg_field_value(
+ value,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+}
+
+void dce112_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t value = 0;
+ uint32_t fbc_pitch = 0;
+ uint32_t compressed_surf_address_low_part =
+ compressor->compr_surface_address.addr.low_part;
+
+ /* Clear content first. */
+ dm_write_reg(
+ compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ 0);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
+
+ if (compressor->options.bits.LPT_SUPPORT) {
+ uint32_t lpt_alignment = lpt_size_alignment(cp110);
+
+ if (lpt_alignment != 0) {
+ compressed_surf_address_low_part =
+ ((compressed_surf_address_low_part
+ + (lpt_alignment - 1)) / lpt_alignment)
+ * lpt_alignment;
+ }
+ }
+
+ /* Write address, HIGH has to be first. */
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ compressor->compr_surface_address.addr.high_part);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
+ compressed_surf_address_low_part);
+
+ fbc_pitch = align_to_chunks_number_per_line(
+ cp110,
+ params->source_view_width);
+
+ if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
+ fbc_pitch = fbc_pitch / 8;
+ else
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Unexpected DCE11 compression ratio",
+ __func__);
+
+ /* Clear content first. */
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
+
+ /* Write FBC Pitch. */
+ set_reg_field_value(
+ value,
+ fbc_pitch,
+ GRPH_COMPRESS_PITCH,
+ GRPH_COMPRESS_PITCH);
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
+
+}
+
+void dce112_compressor_disable_lpt(struct compressor *compressor)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t inx;
+
+ /* Disable all pipes LPT Stutter */
+ for (inx = 0; inx < 3; inx++) {
+ value =
+ dm_read_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 0,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
+ value);
+ }
+ /* Disable Underlay pipe LPT Stutter */
+ addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0,
+ DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Disable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Clear selection of Channel(s) containing Compressed Surface */
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0xFFFFFFFF,
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
+}
+
+void dce112_compressor_enable_lpt(struct compressor *compressor)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t value_control;
+ uint32_t channels;
+
+ /* Enable LPT Stutter from Display pipe */
+ value = dm_read_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 1,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
+
+ /* Enable Underlay pipe LPT Stutter */
+ addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
+ * will disable LPT.
+ * STCTRL_LPT_TARGETn corresponds to channel n. */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value_control = dm_read_reg(compressor->ctx, addr);
+ channels = get_reg_field_value(value_control,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ channels + 1, /* not mentioned in programming guide,
+ but follow DCE8.1 */
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Enable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce112_compressor_program_lpt_control(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t rows_per_channel;
+ uint32_t lpt_alignment;
+ uint32_t source_view_width;
+ uint32_t source_view_height;
+ uint32_t lpt_control = 0;
+
+ if (!compressor->options.bits.LPT_SUPPORT)
+ return;
+
+ lpt_control = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ /* POSSIBLE VALUES for Low Power Tiling Mode:
+ * 00 - Use channel 0
+ * 01 - Use Channel 0 and 1
+ * 02 - Use Channel 0,1,2,3
+ * 03 - reserved */
+ switch (compressor->lpt_channels_num) {
+ /* case 2:
+ * Use Channel 0 & 1 / Not used for DCE 11 */
+ case 1:
+ /*Use Channel 0 for LPT for DCE 11 */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+ break;
+ default:
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Invalid selected DRAM channels for LPT!!!",
+ __func__);
+ break;
+ }
+
+ lpt_control = lpt_memory_control_config(cp110, lpt_control);
+
+ /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
+ * FBC compressed surface pitch.
+ * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
+ * Surface Pitch) / (Row Size * Number of Channels *
+ * Number of Banks)). */
+ rows_per_channel = 0;
+ lpt_alignment = lpt_size_alignment(cp110);
+ source_view_width =
+ align_to_chunks_number_per_line(
+ cp110,
+ params->source_view_width);
+ source_view_height = (params->source_view_height + 1) & (~0x1);
+
+ if (lpt_alignment != 0) {
+ rows_per_channel = source_view_width * source_view_height * 4;
+ rows_per_channel =
+ (rows_per_channel % lpt_alignment) ?
+ (rows_per_channel / lpt_alignment + 1) :
+ rows_per_channel / lpt_alignment;
+ }
+
+ set_reg_field_value(
+ lpt_control,
+ rows_per_channel,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROWS_PER_CHAN);
+
+ dm_write_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL, lpt_control);
+}
+
+/*
+ * DCE 11 Frame Buffer Compression Implementation
+ */
+
+void dce112_compressor_set_fbc_invalidation_triggers(
+ struct compressor *compressor,
+ uint32_t fbc_trigger)
+{
+ /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
+ * for DCE 11 regions cannot be used - does not work with S/G
+ */
+ uint32_t addr = mmFBC_CLIENT_REGION_MASK;
+ uint32_t value = dm_read_reg(compressor->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CLIENT_REGION_MASK,
+ FBC_MEMORY_REGION_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Setup events when to clear all CSM entries (effectively marking
+ * current compressed data invalid)
+ * For DCE 11 CSM metadata 11111 means - "Not Compressed"
+ * Used as the initial value of the metadata sent to the compressor
+ * after invalidation, to indicate that the compressor should attempt
+ * to compress all chunks on the current pass. Also used when the chunk
+ * is not successfully written to memory.
+ * When this CSM value is detected, FBC reads from the uncompressed
+ * buffer. Set events according to passed in value, these events are
+ * valid for DCE11:
+ * - bit 0 - display register updated
+ * - bit 28 - memory write from any client except from MCIF
+ * - bit 29 - CG static screen signal is inactive
+ * In addition, DCE11.1 also needs to set new DCE11.1 specific events
+ * that are used to trigger invalidation on certain register changes,
+ * for example enabling of Alpha Compression may trigger invalidation of
+ * FBC once bit is set. These events are as follows:
+ * - Bit 2 - FBC_GRPH_COMP_EN register updated
+ * - Bit 3 - FBC_SRC_SEL register updated
+ * - Bit 4 - FBC_MIN_COMPRESSION register updated
+ * - Bit 5 - FBC_ALPHA_COMP_EN register updated
+ * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
+ * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
+ */
+ addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ fbc_trigger |
+ FBC_IDLE_FORCE_GRPH_COMP_EN |
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE |
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
+ FBC_IDLE_FORCE_ALPHA_COMP_EN |
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ FBC_IDLE_FORCE_CLEAR_MASK,
+ FBC_IDLE_FORCE_CLEAR_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce112_compressor_construct(struct dce112_compressor *compressor,
+ struct dc_context *ctx)
+{
+ struct dc_bios *bp = ctx->dc_bios;
+ struct embedded_panel_info panel_info;
+
+ compressor->base.options.raw = 0;
+ compressor->base.options.bits.FBC_SUPPORT = true;
+ compressor->base.options.bits.LPT_SUPPORT = true;
+ /* For DCE 11 always use one DRAM channel for LPT */
+ compressor->base.lpt_channels_num = 1;
+ compressor->base.options.bits.DUMMY_BACKEND = false;
+
+ /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
+ * should not be supported */
+ if (compressor->base.memory_bus_width == 64)
+ compressor->base.options.bits.LPT_SUPPORT = false;
+
+ compressor->base.options.bits.CLK_GATING_DISABLED = false;
+
+ compressor->base.ctx = ctx;
+ compressor->base.embedded_panel_h_size = 0;
+ compressor->base.embedded_panel_v_size = 0;
+ compressor->base.memory_bus_width = ctx->asic_id.vram_width;
+ compressor->base.allocated_size = 0;
+ compressor->base.preferred_requested_size = 0;
+ compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
+ compressor->base.banks_num = 0;
+ compressor->base.raw_size = 0;
+ compressor->base.channel_interleave_size = 0;
+ compressor->base.dram_channels_num = 0;
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+
+ if (BP_RESULT_OK ==
+ bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
+ compressor->base.embedded_panel_h_size =
+ panel_info.lcd_timing.horizontal_addressable;
+ compressor->base.embedded_panel_v_size =
+ panel_info.lcd_timing.vertical_addressable;
+ }
+}
+
+struct compressor *dce112_compressor_create(struct dc_context *ctx)
+{
+ struct dce112_compressor *cp110 =
+ kzalloc(sizeof(struct dce112_compressor), GFP_KERNEL);
+
+ if (!cp110)
+ return NULL;
+
+ dce112_compressor_construct(cp110, ctx);
+ return &cp110->base;
+}
+
+void dce112_compressor_destroy(struct compressor **compressor)
+{
+ kfree(TO_DCE112_COMPRESSOR(*compressor));
+ *compressor = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h
new file mode 100644
index 000000000000..f1227133f6df
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h
@@ -0,0 +1,78 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMPRESSOR_DCE112_H__
+#define __DC_COMPRESSOR_DCE112_H__
+
+#include "../inc/compressor.h"
+
+#define TO_DCE112_COMPRESSOR(compressor)\
+ container_of(compressor, struct dce112_compressor, base)
+
+struct dce112_compressor_reg_offsets {
+ uint32_t dcp_offset;
+ uint32_t dmif_offset;
+};
+
+struct dce112_compressor {
+ struct compressor base;
+ struct dce112_compressor_reg_offsets offsets;
+};
+
+struct compressor *dce112_compressor_create(struct dc_context *ctx);
+
+void dce112_compressor_construct(struct dce112_compressor *cp110,
+ struct dc_context *ctx);
+
+void dce112_compressor_destroy(struct compressor **cp);
+
+/* FBC RELATED */
+void dce112_compressor_power_up_fbc(struct compressor *cp);
+
+void dce112_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params);
+
+void dce112_compressor_disable_fbc(struct compressor *cp);
+
+void dce112_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
+ uint32_t fbc_trigger);
+
+void dce112_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce112_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+
+/* LPT RELATED */
+void dce112_compressor_enable_lpt(struct compressor *cp);
+
+void dce112_compressor_disable_lpt(struct compressor *cp);
+
+void dce112_compressor_program_lpt_control(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
new file mode 100644
index 000000000000..1e4a7c13f0ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce112_hw_sequencer.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+
+/* include DCE11.2 register header files */
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+struct dce112_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+
+static const struct dce112_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+static void dce112_init_pte(struct dc_context *ctx)
+{
+ uint32_t addr;
+ uint32_t value = 0;
+ uint32_t chunk_int = 0;
+ uint32_t chunk_mul = 0;
+
+ addr = mmDVMM_PTE_REQ;
+ value = dm_read_reg(ctx, addr);
+
+ chunk_int = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ chunk_mul = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ if (chunk_int != 0x4 || chunk_mul != 0x4) {
+
+ set_reg_field_value(
+ value,
+ 255,
+ DVMM_PTE_REQ,
+ MAX_PTEREQ_TO_ISSUE);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ dm_write_reg(ctx, addr, value);
+ }
+}
+
+static bool dce112_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ return true;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (power_gating != PIPE_GATING_CONTROL_ENABLE)
+ dce112_init_pte(ctx);
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+void dce112_hw_sequencer_construct(struct dc *dc)
+{
+ /* All registers used by dce11.2 match those in dce11 in offset and
+ * structure
+ */
+ dce110_hw_sequencer_construct(dc);
+ dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
new file mode 100644
index 000000000000..e646f4a37fa2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE112_H__
+#define __DC_HWSS_DCE112_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dce112_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE112_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
new file mode 100644
index 000000000000..663e0a047a4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -0,0 +1,1283 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dce110/dce110_resource.h"
+#include "dce110/dce110_timing_generator.h"
+
+#include "irq/dce110/irq_service_dce110.h"
+
+#include "dce/dce_mem_input.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+
+#include "dce/dce_hwseq.h"
+#include "dce112/dce112_hw_sequencer.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#include "reg_helper.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+#include "dce100/dce100_resource.h"
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+ #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+ #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+ #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+ #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
+ #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
+ #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
+ #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
+ #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
+ #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
+#endif
+
+enum dce112_clk_src_array_id {
+ DCE112_CLK_SRC_PLL0,
+ DCE112_CLK_SRC_PLL1,
+ DCE112_CLK_SRC_PLL2,
+ DCE112_CLK_SRC_PLL3,
+ DCE112_CLK_SRC_PLL4,
+ DCE112_CLK_SRC_PLL5,
+
+ DCE112_CLK_SRC_TOTAL
+};
+
+static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE110_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE110(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE110_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE112(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE112(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_112_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5)
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(index, id)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCE_112(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E),
+ clk_src_regs(5, F)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps polaris_10_resource_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
+};
+
+static const struct resource_caps polaris_11_resource_cap = {
+ .num_timing_generator = 5,
+ .num_audio = 5,
+ .num_stream_encoder = 5,
+ .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x4819
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+
+static struct timing_generator *dce112_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce110_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct stream_encoder *dce112_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE112_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE112_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE112_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce112_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce112_stream_encoder_create,
+ .create_hwseq = dce112_hwseq_create,
+};
+
+#define mi_inst_regs(id) { MI_DCE11_2_REG_LIST(id) }
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE11_2_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE11_2_MASK_SH_LIST(_MASK)
+};
+
+static struct mem_input *dce112_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ return &dce_mi->base;
+}
+
+static void dce112_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce112_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->lb_memory_size = 0x1404; /*5124*/
+ return &transform->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dce112_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+static struct input_pixel_processor *dce112_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+struct output_pixel_processor *dce112_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct clock_source *dce112_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce112_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce112_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce112_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce112_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+static struct clock_source *find_matching_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct dc_stream_state *const stream)
+{
+ switch (stream->sink->link->link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL0];
+ case TRANSMITTER_UNIPHY_B:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL1];
+ case TRANSMITTER_UNIPHY_C:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL2];
+ case TRANSMITTER_UNIPHY_D:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL3];
+ case TRANSMITTER_UNIPHY_E:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL4];
+ case TRANSMITTER_UNIPHY_F:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL5];
+ default:
+ return NULL;
+ };
+
+ return 0;
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+bool dce112_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ bool result = false;
+
+ dm_logger_write(
+ dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "%s: start",
+ __func__);
+
+ if (bw_calcs(
+ dc->ctx,
+ dc->bw_dceip,
+ dc->bw_vbios,
+ context->res_ctx.pipe_ctx,
+ dc->res_pool->pipe_count,
+ &context->bw.dce))
+ result = true;
+
+ if (!result)
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
+ "%s: Bandwidth validation failed!",
+ __func__);
+
+ if (memcmp(&dc->current_state->bw.dce,
+ &context->bw.dce, sizeof(context->bw.dce))) {
+ struct log_entry log_entry;
+ dm_logger_open(
+ dc->ctx->logger,
+ &log_entry,
+ LOG_BANDWIDTH_CALCS);
+ dm_logger_append(&log_entry, "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ __func__,
+ context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw.dce.stutter_mode_enable);
+ dm_logger_append(&log_entry,
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.cpuc_state_change_enable,
+ context->bw.dce.cpup_state_change_enable,
+ context->bw.dce.nbp_state_change_enable,
+ context->bw.dce.all_displays_in_sync,
+ context->bw.dce.dispclk_khz,
+ context->bw.dce.sclk_khz,
+ context->bw.dce.sclk_deep_sleep_khz,
+ context->bw.dce.yclk_khz,
+ context->bw.dce.blackout_recovery_time_us);
+ dm_logger_close(&log_entry);
+ }
+ return result;
+}
+
+enum dc_status resource_map_phy_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+
+ /* acquire new resources */
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ &context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)
+ || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ pipe_ctx->clock_source =
+ dc->res_pool->dp_clock_source;
+ else
+ pipe_ctx->clock_source = find_matching_pll(
+ &context->res_ctx, dc->res_pool,
+ stream);
+
+ if (pipe_ctx->clock_source == NULL)
+ return DC_NO_CLOCK_SOURCE_RESOURCE;
+
+ resource_reference_clock_source(
+ &context->res_ctx,
+ dc->res_pool,
+ pipe_ctx->clock_source);
+
+ return DC_OK;
+}
+
+static bool dce112_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce112_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
+
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+enum dc_status dce112_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, context, stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ if (!dce112_validate_bandwidth(dc, context))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+enum dc_status dce112_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce112_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static void dce112_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce112_res_pool_funcs = {
+ .destroy = dce112_destroy_resource_pool,
+ .link_enc_create = dce112_link_encoder_create,
+ .validate_guaranteed = dce112_validate_guaranteed,
+ .validate_bandwidth = dce112_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce112_add_stream_to_ctx,
+ .validate_global = dce112_validate_global
+};
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+ struct dm_pp_clock_levels_with_latency eng_clks = {0};
+ struct dm_pp_clock_levels_with_latency mem_clks = {0};
+ struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
+ struct dm_pp_clock_levels clks = {0};
+
+ /*do system clock TODO PPLIB: after PPLIB implement,
+ * then remove old way
+ */
+ if (!dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &eng_clks)) {
+
+ /* This is only for temporary */
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &clks);
+ /* convert all the clock fro kHz to fix point mHz */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1], 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels/8], 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*2/8], 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*3/8], 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*4/8], 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*5/8], 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*6/8], 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0], 1000);
+
+ /*do memory clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &clks);
+
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+
+ return;
+ }
+
+ /* convert all the clock fro kHz to fix point mHz TODO: wloop data */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ eng_clks.data[0].clocks_in_khz, 1000);
+
+ /*do memory clock*/
+ dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &mem_clks);
+
+ /* we don't need to call PPLIB for validation clock since they
+ * also give us the highest sclk and highest mclk (UMA clock).
+ * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
+ * YCLK = UMACLK*m_memoryTypeMultiplier
+ */
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+ * Watermark Set
+ */
+ clk_ranges.num_wm_sets = 4;
+ clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A;
+ clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B;
+ clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C;
+ clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
+
+ clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D;
+ clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
+}
+
+const struct resource_caps *dce112_resource_cap(
+ struct hw_asic_id *asic_id)
+{
+ if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id->hw_internal_rev))
+ return &polaris_11_resource_cap;
+ else
+ return &polaris_10_resource_cap;
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = dce112_resource_cap(&ctx->asic_id);
+ pool->base.funcs = &dce112_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL0] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL1] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL2] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL3] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL4] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL5] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ &clk_src_regs[5], false);
+ pool->base.clk_src_count = DCE112_CLK_SRC_TOTAL;
+
+ pool->base.dp_clock_source = dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true);
+
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce112_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ /* get static clock information for PPLIB or firmware, save
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce112_timing_generator_create(
+ ctx,
+ i,
+ &dce112_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce112_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce112_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce112_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce112_opp_create(
+ ctx,
+ i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ /* Create hardware sequencer */
+ dce112_hw_sequencer_construct(dc);
+
+ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+ bw_calcs_data_update_from_pplib(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce112_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
new file mode 100644
index 000000000000..d5c19d34eb0a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -0,0 +1,61 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE112_H__
+#define __DC_RESOURCE_DCE112_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce112_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+enum dc_status dce112_validate_with_context(
+ struct dc *dc,
+ const struct dc_validation_set set[],
+ int set_count,
+ struct dc_state *context,
+ struct dc_state *old_context);
+
+enum dc_status dce112_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context);
+
+bool dce112_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
+enum dc_status dce112_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+
+
+#endif /* __DC_RESOURCE_DCE112_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
new file mode 100644
index 000000000000..1779b963525c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+
+DCE120 = dce120_resource.o dce120_timing_generator.o \
+dce120_hw_sequencer.o
+
+AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE120) \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
new file mode 100644
index 000000000000..1a0b54d6034e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce120_hw_sequencer.h"
+#include "dce/dce_hwseq.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+#include "reg_helper.h"
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+struct dce120_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+#define CNTL_ID(controller_id)\
+ controller_id
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+#if 0
+static void dce120_init_pte(struct dc_context *ctx, uint8_t controller_id)
+{
+ uint32_t addr;
+ uint32_t value = 0;
+ uint32_t chunk_int = 0;
+ uint32_t chunk_mul = 0;
+/*
+ addr = mmDCP0_DVMM_PTE_CONTROL + controller_id *
+ (mmDCP1_DVMM_PTE_CONTROL- mmDCP0_DVMM_PTE_CONTROL);
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value, 0, DCP, controller_id,
+ DVMM_PTE_CONTROL,
+ DVMM_USE_SINGLE_PTE);
+
+ set_reg_field_value_soc15(
+ value, 1, DCP, controller_id,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE0);
+
+ set_reg_field_value_soc15(
+ value, 1, DCP, controller_id,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE1);
+
+ dm_write_reg(ctx, addr, value);*/
+
+ addr = mmDVMM_PTE_REQ;
+ value = dm_read_reg(ctx, addr);
+
+ chunk_int = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ chunk_mul = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ if (chunk_int != 0x4 || chunk_mul != 0x4) {
+
+ set_reg_field_value(
+ value,
+ 255,
+ DVMM_PTE_REQ,
+ MAX_PTEREQ_TO_ISSUE);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ dm_write_reg(ctx, addr, value);
+ }
+}
+#endif
+
+static bool dce120_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ /* disable for bringup */
+#if 0
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ return true;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) {
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmCRTC0_CRTC_MASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (power_gating != PIPE_GATING_CONTROL_ENABLE)
+ dce120_init_pte(ctx, controller_id);
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+#endif
+ return false;
+}
+
+static void dce120_update_dchub(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data)
+{
+ /* TODO: port code from dal2 */
+ switch (dh_data->fb_mode) {
+ case FRAME_BUFFER_MODE_ZFB_ONLY:
+ /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
+ REG_UPDATE_2(DCHUB_FB_LOCATION,
+ FB_TOP, 0,
+ FB_BASE, 0x0FFFF);
+
+ REG_UPDATE(DCHUB_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUB_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUB_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUB_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUB_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUB_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_LOCAL_ONLY:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUB_AGP_BASE,
+ AGP_BASE, 0);
+
+ REG_UPDATE(DCHUB_AGP_BOT,
+ AGP_BOT, 0x03FFFF);
+
+ REG_UPDATE(DCHUB_AGP_TOP,
+ AGP_TOP, 0);
+ break;
+ default:
+ break;
+ }
+
+ dh_data->dchub_initialzied = true;
+ dh_data->dchub_info_valid = false;
+}
+
+
+
+void dce120_hw_sequencer_construct(struct dc *dc)
+{
+ /* All registers used by dce11.2 match those in dce11 in offset and
+ * structure
+ */
+ dce110_hw_sequencer_construct(dc);
+ dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
+ dc->hwss.update_dchub = dce120_update_dchub;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
new file mode 100644
index 000000000000..77a6b86d7606
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE120_H__
+#define __DC_HWSS_DCE120_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dce120_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE112_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
new file mode 100644
index 000000000000..5c48c22d9d98
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -0,0 +1,1004 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.cls
+*
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+
+#include "stream_encoder.h"
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dce120_resource.h"
+#include "dce112/dce112_resource.h"
+
+#include "dce110/dce110_resource.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce120_timing_generator.h"
+#include "irq/dce120/irq_service_dce120.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_mem_input.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce120/dce120_hw_sequencer.h"
+#include "dce/dce_transform.h"
+
+#include "dce/dce_audio.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_hwseq.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+#include "vega10/NBIO/nbio_6_1_offset.h"
+#include "reg_helper.h"
+
+#include "dce100/dce100_resource.h"
+
+#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#endif
+
+enum dce120_clk_src_array_id {
+ DCE120_CLK_SRC_PLL0,
+ DCE120_CLK_SRC_PLL1,
+ DCE120_CLK_SRC_PLL2,
+ DCE120_CLK_SRC_PLL3,
+ DCE120_CLK_SRC_PLL4,
+ DCE120_CLK_SRC_PLL5,
+
+ DCE120_CLK_SRC_TOTAL
+};
+
+static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ }
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE_INNER(seg) \
+ NBIF_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE110_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE120_MASK_SH_LIST_SOC_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE120_MASK_SH_LIST_SOC_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE110(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_SOC_BASE(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_SOC_BASE(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE120_REG_LIST(id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE120(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_120_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_120(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5)
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(index, id)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCE_112(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E),
+ clk_src_regs(5, F)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+};
+
+struct output_pixel_processor *dce120_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 7,
+ .num_stream_encoder = 6,
+ .num_pll = 6,
+};
+
+static const struct dc_debug debug_defaults = {
+ .disable_clock_gate = true,
+};
+
+struct clock_source *dce120_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(*clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce120_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+
+bool dce120_hw_sequencer_create(struct dc *dc)
+{
+ /* All registers used by dce11.2 match those in dce11 in offset and
+ * structure
+ */
+ dce120_hw_sequencer_construct(dc);
+
+ /*TODO Move to separate file and Override what is needed */
+
+ return true;
+}
+
+static struct timing_generator *dce120_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce120_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static void dce120_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce120_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL)
+ dce120_clock_source_destroy(
+ &pool->base.clock_sources[i]);
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce120_clock_source_destroy(&pool->base.dp_clock_source);
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ uint32_t reg_val = dm_read_reg_soc15(ctx, mmCC_DC_MISC_STRAPS, 0);
+
+ straps->audio_stream_number = get_reg_field_value(reg_val,
+ CC_DC_MISC_STRAPS,
+ AUDIO_STREAM_NUMBER);
+ straps->hdmi_disable = get_reg_field_value(reg_val,
+ CC_DC_MISC_STRAPS,
+ HDMI_DISABLE);
+
+ reg_val = dm_read_reg_soc15(ctx, mmDC_PINSTRAPS, 0);
+ straps->dc_pinstraps_audio = get_reg_field_value(reg_val,
+ DC_PINSTRAPS,
+ DC_PINSTRAPS_AUDIO);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+static struct link_encoder *dce120_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+
+ return &enc110->base;
+}
+
+static struct input_pixel_processor *dce120_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static struct stream_encoder *dce120_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE120_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE12_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE12_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce120_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce120_stream_encoder_create,
+ .create_hwseq = dce120_hwseq_create,
+};
+
+#define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) }
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE12_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE12_MASK_SH_LIST(_MASK)
+};
+
+static struct mem_input *dce120_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ return &dce_mi->base;
+}
+
+static struct transform *dce120_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->lb_memory_size = 0x1404; /*5124*/
+ return &transform->base;
+}
+
+static void dce120_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce120_res_pool_funcs = {
+ .destroy = dce120_destroy_resource_pool,
+ .link_enc_create = dce120_link_encoder_create,
+ .validate_guaranteed = dce112_validate_guaranteed,
+ .validate_bandwidth = dce112_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce112_add_stream_to_ctx
+};
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+ struct dm_pp_clock_levels_with_latency eng_clks = {0};
+ struct dm_pp_clock_levels_with_latency mem_clks = {0};
+ struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
+ int i;
+ unsigned int clk;
+ unsigned int latency;
+
+ /*do system clock*/
+ if (!dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &eng_clks) || eng_clks.num_levels == 0) {
+
+ eng_clks.num_levels = 8;
+ clk = 300000;
+
+ for (i = 0; i < eng_clks.num_levels; i++) {
+ eng_clks.data[i].clocks_in_khz = clk;
+ clk += 100000;
+ }
+ }
+
+ /* convert all the clock fro kHz to fix point mHz TODO: wloop data */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ eng_clks.data[0].clocks_in_khz, 1000);
+
+ /*do memory clock*/
+ if (!dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &mem_clks) || mem_clks.num_levels == 0) {
+
+ mem_clks.num_levels = 3;
+ clk = 250000;
+ latency = 45;
+
+ for (i = 0; i < eng_clks.num_levels; i++) {
+ mem_clks.data[i].clocks_in_khz = clk;
+ mem_clks.data[i].latency_in_us = latency;
+ clk += 500000;
+ latency -= 5;
+ }
+
+ }
+
+ /* we don't need to call PPLIB for validation clock since they
+ * also give us the highest sclk and highest mclk (UMA clock).
+ * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
+ * YCLK = UMACLK*m_memoryTypeMultiplier
+ */
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+ * Watermark Set
+ */
+ clk_ranges.num_wm_sets = 4;
+ clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A;
+ clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B;
+ clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C;
+ clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
+
+ clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D;
+ clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data irq_init_data;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce120_res_pool_funcs;
+
+ /* TODO: Fill more data from GreenlandAsicCapability.cpp */
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+ dc->debug = debug_defaults;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ &clk_src_regs[5], false);
+ pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
+
+ pool->base.dp_clock_source =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto clk_src_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce120_disp_clk_create(ctx);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto disp_clk_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ irq_init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
+ if (!pool->base.irqs)
+ goto irqs_create_fail;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce120_timing_generator_create(
+ ctx,
+ i,
+ &dce120_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto controller_create_fail;
+ }
+
+ pool->base.mis[i] = dce120_mem_input_create(ctx, i);
+
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto controller_create_fail;
+ }
+
+ pool->base.ipps[i] = dce120_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto controller_create_fail;
+ }
+
+ pool->base.transforms[i] = dce120_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce120_opp_create(
+ ctx,
+ i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ }
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ if (!dce120_hw_sequencer_create(dc))
+ goto controller_create_fail;
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+ bw_calcs_data_update_from_pplib(dc);
+
+ return true;
+
+irqs_create_fail:
+controller_create_fail:
+disp_clk_create_fail:
+clk_src_create_fail:
+res_create_fail:
+
+ destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dce120_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h
new file mode 100644
index 000000000000..3d1f3cf012f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h
@@ -0,0 +1,39 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE120_H__
+#define __DC_RESOURCE_DCE120_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce120_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+#endif /* __DC_RESOURCE_DCE120_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
new file mode 100644
index 000000000000..2502182d5e82
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -0,0 +1,1174 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "dc_types.h"
+#include "dc_bios_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "dce120_timing_generator.h"
+
+#include "timing_generator.h"
+
+#define CRTC_REG_UPDATE_N(reg_name, n, ...) \
+ generic_reg_update_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__)
+
+#define CRTC_REG_SET_N(reg_name, n, ...) \
+ generic_reg_set_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__)
+
+#define CRTC_REG_UPDATE(reg, field, val) \
+ CRTC_REG_UPDATE_N(reg, 1, FD(reg##__##field), val)
+
+#define CRTC_REG_UPDATE_2(reg, field1, val1, field2, val2) \
+ CRTC_REG_UPDATE_N(reg, 2, FD(reg##__##field1), val1, FD(reg##__##field2), val2)
+
+#define CRTC_REG_UPDATE_3(reg, field1, val1, field2, val2, field3, val3) \
+ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
+
+#define CRTC_REG_UPDATE_4(reg, field1, val1, field2, val2, field3, val3, field4, val4) \
+ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3, FD(reg##__##field4), val4)
+
+#define CRTC_REG_UPDATE_5(reg, field1, val1, field2, val2, field3, val3, field4, val4, field5, val5) \
+ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3, FD(reg##__##field4), val4, FD(reg##__##field5), val5)
+
+#define CRTC_REG_SET(reg, field, val) \
+ CRTC_REG_SET_N(reg, 1, FD(reg##__##field), val)
+
+#define CRTC_REG_SET_2(reg, field1, val1, field2, val2) \
+ CRTC_REG_SET_N(reg, 2, FD(reg##__##field1), val1, FD(reg##__##field2), val2)
+
+#define CRTC_REG_SET_3(reg, field1, val1, field2, val2, field3, val3) \
+ CRTC_REG_SET_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
+
+/**
+ *****************************************************************************
+ * Function: is_in_vertical_blank
+ *
+ * @brief
+ * check the current status of CRTC to check if we are in Vertical Blank
+ * regioneased" state
+ *
+ * @return
+ * true if currently in blank region, false otherwise
+ *
+ *****************************************************************************
+ */
+static bool dce120_timing_generator_is_in_vertical_blank(
+ struct timing_generator *tg)
+{
+ uint32_t field = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS,
+ tg110->offsets.crtc);
+
+ field = get_reg_field_value(value, CRTC0_CRTC_STATUS, CRTC_V_BLANK);
+ return field == 1;
+}
+
+
+/* determine if given timing can be supported by TG */
+bool dce120_timing_generator_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ enum signal_type signal)
+{
+ uint32_t interlace_factor = timing->flags.INTERLACE ? 2 : 1;
+ uint32_t v_blank =
+ (timing->v_total - timing->v_addressable -
+ timing->v_border_top - timing->v_border_bottom) *
+ interlace_factor;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ if (!dce110_timing_generator_validate_timing(
+ tg,
+ timing,
+ signal))
+ return false;
+
+
+ if (v_blank < tg110->min_v_blank ||
+ timing->h_sync_width < tg110->min_h_sync_width ||
+ timing->v_sync_width < tg110->min_v_sync_width)
+ return false;
+
+ return true;
+}
+
+bool dce120_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ return dce120_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
+}
+
+/******** HW programming ************/
+/* Disable/Enable Timing Generator */
+bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
+{
+ enum bp_result result;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Set MASTER_UPDATE_MODE to 0
+ * This is needed for DRR, and also suggested to be default value by Syed.*/
+
+ CRTC_REG_UPDATE(CRTC0_CRTC_MASTER_UPDATE_MODE,
+ MASTER_UPDATE_MODE, 0);
+
+ CRTC_REG_UPDATE(CRTC0_CRTC_MASTER_UPDATE_LOCK,
+ UNDERFLOW_UPDATE_LOCK, 0);
+
+ /* TODO API for AtomFirmware didn't change*/
+ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, true);
+
+ return result == BP_RESULT_OK;
+}
+
+void dce120_timing_generator_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE(CRTC0_CRTC_CONTROL,
+ CRTC_HBLANK_EARLY_CONTROL, early_cntl);
+}
+
+/**************** TG current status ******************/
+
+/* return the current frame counter. Used by Linux kernel DRM */
+uint32_t dce120_timing_generator_get_vblank_counter(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS_FRAME_COUNT,
+ tg110->offsets.crtc);
+ uint32_t field = get_reg_field_value(
+ value, CRTC0_CRTC_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
+
+ return field;
+}
+
+/* Get current H and V position */
+void dce120_timing_generator_get_crtc_position(
+ struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS_POSITION,
+ tg110->offsets.crtc);
+
+ position->horizontal_count = get_reg_field_value(value,
+ CRTC0_CRTC_STATUS_POSITION, CRTC_HORZ_COUNT);
+
+ position->vertical_count = get_reg_field_value(value,
+ CRTC0_CRTC_STATUS_POSITION, CRTC_VERT_COUNT);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_NOM_VERT_POSITION,
+ tg110->offsets.crtc);
+
+ position->nominal_vcount = get_reg_field_value(value,
+ CRTC0_CRTC_NOM_VERT_POSITION, CRTC_VERT_COUNT_NOM);
+}
+
+/* wait until TG is in beginning of vertical blank region */
+void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
+{
+ /* We want to catch beginning of VBlank here, so if the first try are
+ * in VBlank, we might be very close to Active, in this case wait for
+ * another frame
+ */
+ while (dce120_timing_generator_is_in_vertical_blank(tg)) {
+ if (!tg->funcs->is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+
+ while (!dce120_timing_generator_is_in_vertical_blank(tg)) {
+ if (!tg->funcs->is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/* wait until TG is in beginning of active region */
+void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
+{
+ while (dce120_timing_generator_is_in_vertical_blank(tg)) {
+ if (!tg->funcs->is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/*********** Timing Generator Synchronization routines ****/
+
+/* Setups Global Swap Lock group, TimingServer or TimingClient*/
+void dce120_timing_generator_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value_crtc_vtotal =
+ dm_read_reg_soc15(tg->ctx,
+ mmCRTC0_CRTC_V_TOTAL,
+ tg110->offsets.crtc);
+ /* Checkpoint relative to end of frame */
+ uint32_t check_point =
+ get_reg_field_value(value_crtc_vtotal,
+ CRTC0_CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
+
+
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_GSL_WINDOW, tg110->offsets.crtc, 0);
+
+ CRTC_REG_UPDATE_N(DCP0_DCP_GSL_CONTROL, 6,
+ /* This pipe will belong to GSL Group zero. */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL0_EN), 1,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_MASTER_EN), gsl_params->gsl_master == tg->inst,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY), HFLIP_READY_DELAY,
+ /* Keep signal low (pending high) during 6 lines.
+ * Also defines minimum interval before re-checking signal. */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY), HFLIP_CHECK_DELAY,
+ /* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING), 1);
+
+ CRTC_REG_SET_2(
+ CRTC0_CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM, check_point - FLIP_READY_BACK_LOOKUP,
+ CRTC_GSL_FORCE_DELAY, VFLIP_READY_DELAY);
+}
+
+/* Clear all the register writes done by setup_global_swap_lock */
+void dce120_timing_generator_tear_down_global_swap_lock(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Settig HW default values from reg specs */
+ CRTC_REG_SET_N(DCP0_DCP_GSL_CONTROL, 6,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL0_EN), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_MASTER_EN), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY), HFLIP_READY_DELAY,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY), HFLIP_CHECK_DELAY,
+ /* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING), 0);
+
+ CRTC_REG_SET_2(CRTC0_CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM, 0,
+ CRTC_GSL_FORCE_DELAY, 0x2); /*TODO Why this value here ?*/
+}
+
+/* Reset slave controllers on master VSync */
+void dce120_timing_generator_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source)
+{
+ enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t rising_edge = 0;
+ uint32_t falling_edge = 0;
+ /* Setup trigger edge */
+ uint32_t pol_value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_V_SYNC_A_CNTL,
+ tg110->offsets.crtc);
+
+ /* Register spec has reversed definition:
+ * 0 for positive, 1 for negative */
+ if (get_reg_field_value(pol_value,
+ CRTC0_CRTC_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL) == 0) {
+ rising_edge = 1;
+ } else {
+ falling_edge = 1;
+ }
+
+ /* TODO What about other sources ?*/
+ trig_src_select = TRIGGER_SOURCE_SELECT_GSL_GROUP0;
+
+ CRTC_REG_UPDATE_N(CRTC0_CRTC_TRIGB_CNTL, 7,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT), trig_src_select,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT), TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL), rising_edge,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL), falling_edge,
+ /* send every signal */
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT), 0,
+ /* no delay */
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY), 0,
+ /* clear trigger status */
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR), 1);
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE, 2,
+ CRTC_FORCE_COUNT_NOW_TRIG_SEL, 1,
+ CRTC_FORCE_COUNT_NOW_CLEAR, 1);
+}
+
+/* disabling trigger-reset */
+void dce120_timing_generator_disable_reset_trigger(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE_2(
+ CRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE, 0,
+ CRTC_FORCE_COUNT_NOW_CLEAR, 1);
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT, TRIGGER_SOURCE_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_POLARITY_SELECT, TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ /* clear trigger status */
+ CRTC_TRIGB_CLEAR, 1);
+
+}
+
+/* Checks whether CRTC triggered reset occurred */
+bool dce120_timing_generator_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ tg110->offsets.crtc);
+
+ return get_reg_field_value(value,
+ CRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_OCCURRED) != 0;
+}
+
+
+/******** Stuff to move to other virtual HW objects *****************/
+/* Move to enable accelerated mode */
+void dce120_timing_generator_disable_vga(struct timing_generator *tg)
+{
+ uint32_t offset = 0;
+ uint32_t value = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ switch (tg110->controller_id) {
+ case CONTROLLER_ID_D0:
+ offset = 0;
+ break;
+ case CONTROLLER_ID_D1:
+ offset = mmD2VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D2:
+ offset = mmD3VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D3:
+ offset = mmD4VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D4:
+ offset = mmD5VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D5:
+ offset = mmD6VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ default:
+ break;
+ }
+
+ value = dm_read_reg_soc15(tg->ctx, mmD1VGA_CONTROL, offset);
+
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_MODE_ENABLE);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_TIMING_SELECT);
+ set_reg_field_value(
+ value, 0, D1VGA_CONTROL, D1VGA_SYNC_POLARITY_SELECT);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_OVERSCAN_COLOR_EN);
+
+ dm_write_reg_soc15(tg->ctx, mmD1VGA_CONTROL, offset, value);
+}
+/* TODO: Should we move it to transform */
+/* Fully program CRTC timing in timing generator */
+void dce120_timing_generator_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t tmp1 = 0;
+ uint32_t tmp2 = 0;
+ uint32_t vsync_offset = timing->v_border_bottom +
+ timing->v_front_porch;
+ uint32_t v_sync_start = timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_H_TOTAL,
+ CRTC_H_TOTAL,
+ timing->h_total - 1);
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL,
+ CRTC_V_TOTAL,
+ timing->v_total - 1);
+
+ /* In case of V_TOTAL_CONTROL is on, make sure V_TOTAL_MAX and
+ * V_TOTAL_MIN are equal to V_TOTAL.
+ */
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX,
+ timing->v_total - 1);
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN,
+ timing->v_total - 1);
+
+ tmp1 = timing->h_total -
+ (h_sync_start + timing->h_border_left);
+ tmp2 = tmp1 + timing->h_addressable +
+ timing->h_border_left + timing->h_border_right;
+
+ CRTC_REG_UPDATE_2(
+ CRTC0_CRTC_H_BLANK_START_END,
+ CRTC_H_BLANK_END, tmp1,
+ CRTC_H_BLANK_START, tmp2);
+
+ tmp1 = timing->v_total - (v_sync_start + timing->v_border_top);
+ tmp2 = tmp1 + timing->v_addressable + timing->v_border_top +
+ timing->v_border_bottom;
+
+ CRTC_REG_UPDATE_2(
+ CRTC0_CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END, tmp1,
+ CRTC_V_BLANK_START, tmp2);
+}
+
+/* TODO: Should we move it to opp? */
+/* Combine with below and move YUV/RGB color conversion to SW layer */
+void dce120_timing_generator_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB, black_color->color_b_cb,
+ CRTC_BLACK_COLOR_G_Y, black_color->color_g_y,
+ CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
+}
+/* Combine with above and move YUV/RGB color conversion to SW layer */
+void dce120_timing_generator_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+ CRTC_REG_SET_3(
+ CRTC0_CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE, color->color_b_cb,
+ CRTC_OVERSCAN_COLOR_GREEN, color->color_g_y,
+ CRTC_OVERSCAN_COLOR_RED, color->color_r_cr);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_OVERSCAN_COLOR,
+ tg110->offsets.crtc);
+
+ dm_write_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLACK_COLOR,
+ tg110->offsets.crtc,
+ value);
+
+ /* This is desirable to have a constant DAC output voltage during the
+ * blank time that is higher than the 0 volt reference level that the
+ * DAC outputs when the NBLANK signal
+ * is asserted low, such as for output to an analog TV. */
+ dm_write_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLANK_DATA_COLOR,
+ tg110->offsets.crtc,
+ value);
+
+ /* TO DO we have to program EXT registers and we need to know LB DATA
+ * format because it is used when more 10 , i.e. 12 bits per color
+ *
+ * m_mmDxCRTC_OVERSCAN_COLOR_EXT
+ * m_mmDxCRTC_BLACK_COLOR_EXT
+ * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
+ */
+}
+
+void dce120_timing_generator_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params)
+{
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN, params->vertical_total_min - 1);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX, params->vertical_total_max - 1);
+ CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 6,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 1,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 1,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_EN), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK,
+ 0x180);
+
+ } else {
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN, 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX, 0);
+ CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 5,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK,
+ 0);
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: dce120_timing_generator_get_position
+ *
+ * @brief
+ * Returns CRTC vertical/horizontal counters
+ *
+ * @param [out] position
+ *****************************************************************************
+ */
+void dce120_timing_generator_get_position(struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS_POSITION,
+ tg110->offsets.crtc);
+
+ position->horizontal_count = get_reg_field_value(
+ value,
+ CRTC0_CRTC_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ position->vertical_count = get_reg_field_value(
+ value,
+ CRTC0_CRTC_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_NOM_VERT_POSITION,
+ tg110->offsets.crtc);
+
+ position->nominal_vcount = get_reg_field_value(
+ value,
+ CRTC0_CRTC_NOM_VERT_POSITION,
+ CRTC_VERT_COUNT_NOM);
+}
+
+
+void dce120_timing_generator_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ struct crtc_position position;
+
+ uint32_t v_blank_start_end = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_V_BLANK_START_END,
+ tg110->offsets.crtc);
+
+ *v_blank_start = get_reg_field_value(v_blank_start_end,
+ CRTC0_CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+ *v_blank_end = get_reg_field_value(v_blank_start_end,
+ CRTC0_CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ dce120_timing_generator_get_crtc_position(
+ tg, &position);
+
+ *h_position = position.horizontal_count;
+ *v_position = position.vertical_count;
+}
+
+void dce120_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t v_sync_width_and_b_porch =
+ timing->v_total - timing->v_addressable -
+ timing->v_border_bottom - timing->v_front_porch;
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_START_LINE_CONTROL,
+ tg110->offsets.crtc);
+
+ set_reg_field_value(
+ value,
+ enable ? 0 : 1,
+ CRTC0_CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+
+ /* Program advanced line position acc.to the best case from fetching data perspective to hide MC latency
+ * and prefilling Line Buffer in V Blank (to 10 lines as LB can store max 10 lines)
+ */
+ if (v_sync_width_and_b_porch > 10)
+ v_sync_width_and_b_porch = 10;
+
+ set_reg_field_value(
+ value,
+ v_sync_width_and_b_porch,
+ CRTC0_CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+
+ dm_write_reg_soc15(tg->ctx,
+ mmCRTC0_CRTC_START_LINE_CONTROL,
+ tg110->offsets.crtc,
+ value);
+}
+
+void dce120_tg_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB, black_color->color_b_cb,
+ CRTC_BLACK_COLOR_G_Y, black_color->color_g_y,
+ CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLACK_COLOR,
+ tg110->offsets.crtc);
+ dm_write_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLANK_DATA_COLOR,
+ tg110->offsets.crtc,
+ value);
+}
+
+void dce120_tg_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_SET_3(
+ CRTC0_CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE, overscan_color->color_b_cb,
+ CRTC_OVERSCAN_COLOR_GREEN, overscan_color->color_g_y,
+ CRTC_OVERSCAN_COLOR_RED, overscan_color->color_r_cr);
+}
+
+void dce120_tg_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (use_vbios)
+ dce110_timing_generator_program_timing_generator(tg, timing);
+ else
+ dce120_timing_generator_program_blanking(tg, timing);
+}
+
+bool dce120_tg_is_blanked(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLANK_CONTROL,
+ tg110->offsets.crtc);
+
+ if (get_reg_field_value(
+ value,
+ CRTC0_CRTC_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN) == 1 &&
+ get_reg_field_value(
+ value,
+ CRTC0_CRTC_BLANK_CONTROL,
+ CRTC_CURRENT_BLANK_STATE) == 1)
+ return true;
+
+ return false;
+}
+
+void dce120_tg_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_SET(
+ CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+
+ if (enable_blanking)
+ CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
+ else
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_BLANK_CONTROL,
+ tg110->offsets.crtc, 0);
+}
+
+bool dce120_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+
+void dce120_tg_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ dce120_timing_generator_wait_for_vblank(tg);
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ dce120_timing_generator_wait_for_vactive(tg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void dce120_tg_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color)
+{
+ if (blank_color != NULL)
+ dce120_tg_program_blank_color(tg, blank_color);
+
+ if (overscan_color != NULL)
+ dce120_tg_set_overscan_color(tg, overscan_color);
+}
+
+static void dce120_timing_generator_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK, value,
+ CRTC_STATIC_SCREEN_FRAME_COUNT, 2);
+}
+
+void dce120_timing_generator_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES, 6,
+ CRTC_TEST_PATTERN_HRES, 6);
+
+ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN, 1,
+ CRTC_TEST_PATTERN_MODE, mode,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE, dyn_range,
+ CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* CRTC_TEST_PATTERN_DATA has 16 bits,
+ * lowest 6 are hardwired to ZERO
+ * color bits should be left aligned aligned to MSB
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS, tg110->offsets.crtc, 0);
+
+ /* We have to write the mask before data, similar to pipeline.
+ * For example, for 8 bpc, if we want RGB0 to be magenta,
+ * and RGB1 to be cyan,
+ * we need to make 7 writes:
+ * MASK DATA
+ * 000001 00000000 00000000 set mask to R0
+ * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
+ * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
+ * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
+ * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
+ * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
+ * 100000 11111111 00000000 B1 255, 0xFF00
+ *
+ * we will make a loop of 6 in which we prepare the mask,
+ * then write, then prepare the color for next write.
+ * first iteration will write mask only,
+ * but each next iteration color prepared in
+ * previous iteration will be written within new mask,
+ * the last component will written separately,
+ * mask is not changing between 6th and 7th write
+ * and color will be prepared by last iteration
+ */
+
+ /* write color, color values mask in CRTC_TEST_PATTERN_MASK
+ * is B1, G1, R1, B0, G0, R0
+ */
+ value = 0;
+ for (index = 0; index < 6; index++) {
+ /* prepare color mask, first write PATTERN_DATA
+ * will have all zeros
+ */
+ set_reg_field_value(
+ value,
+ (1 << index),
+ CRTC0_CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_MASK);
+ /* write color component */
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value);
+ /* prepare next color component,
+ * will be written in the next iteration
+ */
+ set_reg_field_value(
+ value,
+ dst_color[index],
+ CRTC0_CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_DATA);
+ }
+ /* write last color component,
+ * it's been already prepared in the loop
+ */
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value);
+
+ /* enable test pattern */
+ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN, 1,
+ CRTC_TEST_PATTERN_MODE, mode,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0, inc_base,
+ CRTC_TEST_PATTERN_INC1, 0,
+ CRTC_TEST_PATTERN_HRES, 6,
+ CRTC_TEST_PATTERN_VRES, 6,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0, inc_base,
+ CRTC_TEST_PATTERN_INC1, 0,
+ CRTC_TEST_PATTERN_HRES, 8,
+ CRTC_TEST_PATTERN_VRES, 6,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0, inc_base,
+ CRTC_TEST_PATTERN_INC1, inc_base + 2,
+ CRTC_TEST_PATTERN_HRES, 8,
+ CRTC_TEST_PATTERN_VRES, 5,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET, 384 << 6);
+ }
+ break;
+ default:
+ break;
+ }
+
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, 0);
+
+ /* enable test pattern */
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_CONTROL, tg110->offsets.crtc, 0);
+
+ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN, 1,
+ CRTC_TEST_PATTERN_MODE, mode,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ value = 0;
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_CONTROL, tg110->offsets.crtc, value);
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value);
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS, tg110->offsets.crtc, value);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static bool dce120_arm_vert_intr(
+ struct timing_generator *tg,
+ uint8_t width)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t v_blank_start, v_blank_end, h_position, v_position;
+
+ tg->funcs->get_scanoutpos(
+ tg,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ if (v_blank_start == 0 || v_blank_end == 0)
+ return false;
+
+ CRTC_REG_SET_2(
+ CRTC0_CRTC_VERTICAL_INTERRUPT0_POSITION,
+ CRTC_VERTICAL_INTERRUPT0_LINE_START, v_blank_start,
+ CRTC_VERTICAL_INTERRUPT0_LINE_END, v_blank_start + width);
+
+ return true;
+}
+
+static const struct timing_generator_funcs dce120_tg_funcs = {
+ .validate_timing = dce120_tg_validate_timing,
+ .program_timing = dce120_tg_program_timing,
+ .enable_crtc = dce120_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ /* never be called */
+ .get_position = dce120_timing_generator_get_crtc_position,
+ .get_frame_count = dce120_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce120_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce120_timing_generator_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = dce120_tg_wait_for_state,
+ .set_blank = dce120_tg_set_blank,
+ .is_blanked = dce120_tg_is_blanked,
+ /* never be called */
+ .set_colors = dce120_tg_set_colors,
+ .set_overscan_blank_color = dce120_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce120_timing_generator_program_blank_color,
+ .disable_vga = dce120_timing_generator_disable_vga,
+ .did_triggered_reset_occur = dce120_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock = dce120_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce120_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce120_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock = dce120_timing_generator_tear_down_global_swap_lock,
+ .enable_advanced_request = dce120_timing_generator_enable_advanced_request,
+ .set_drr = dce120_timing_generator_set_drr,
+ .set_static_screen_control = dce120_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce120_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce120_arm_vert_intr,
+};
+
+
+void dce120_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+
+ tg110->offsets = *offsets;
+
+ tg110->base.funcs = &dce120_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC0_CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC0_CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ /*//CRTC requires a minimum HBLANK = 32 pixels and o
+ * Minimum HSYNC = 8 pixels*/
+ tg110->min_h_blank = 32;
+ /*DCE12_CRTC_Block_ARch.doc*/
+ tg110->min_h_front_porch = 0;
+ tg110->min_h_back_porch = 0;
+
+ tg110->min_h_sync_width = 8;
+ tg110->min_v_sync_width = 1;
+ tg110->min_v_blank = 3;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h
new file mode 100644
index 000000000000..549d70b23e82
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE120_H__
+#define __DC_TIMING_GENERATOR_DCE120_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+#include "dce110/dce110_timing_generator.h"
+
+
+void dce120_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+#endif /* __DC_TIMING_GENERATOR_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
new file mode 100644
index 000000000000..c1105895e5fa
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE80 = dce80_timing_generator.o dce80_compressor.o dce80_hw_sequencer.o \
+ dce80_resource.o
+
+AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE80)
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
new file mode 100644
index 000000000000..951f2caba9b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
@@ -0,0 +1,834 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+
+#include "include/logger_interface.h"
+#include "dce80_compressor.h"
+
+#define DCP_REG(reg)\
+ (reg + cp80->offsets.dcp_offset)
+#define DMIF_REG(reg)\
+ (reg + cp80->offsets.dmif_offset)
+
+static const struct dce80_compressor_reg_offsets reg_offsets[] = {
+{
+ .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG3_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG4_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG5_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+}
+};
+
+static const uint32_t dce8_one_lpt_channel_max_resolution = 2048 * 1200;
+
+enum fbc_idle_force {
+ /* Bit 0 - Display registers updated */
+ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
+
+ /* Bit 2 - FBC_GRPH_COMP_EN register updated */
+ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
+ /* Bit 3 - FBC_SRC_SEL register updated */
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
+ /* Bit 4 - FBC_MIN_COMPRESSION register updated */
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
+ /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
+ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
+ /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
+ /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
+
+ /* Bit 24 - Memory write to region 0 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
+ /* Bit 25 - Memory write to region 1 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
+ /* Bit 26 - Memory write to region 2 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
+ /* Bit 27 - Memory write to region 3 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
+
+ /* Bit 28 - Memory write from any client other than MCIF */
+ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
+ /* Bit 29 - CG statics screen signal is inactive */
+ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
+};
+
+static uint32_t lpt_size_alignment(struct dce80_compressor *cp80)
+{
+ /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
+ return cp80->base.raw_size * cp80->base.banks_num *
+ cp80->base.dram_channels_num;
+}
+
+static uint32_t lpt_memory_control_config(struct dce80_compressor *cp80,
+ uint32_t lpt_control)
+{
+ /*LPT MC Config */
+ if (cp80->base.options.bits.LPT_MC_CONFIG == 1) {
+ /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
+ * 00 - 1 CHANNEL
+ * 01 - 2 CHANNELS
+ * 02 - 4 OR 6 CHANNELS
+ * (Only for discrete GPU, N/A for CZ)
+ * 03 - 8 OR 12 CHANNELS
+ * (Only for discrete GPU, N/A for CZ) */
+ switch (cp80->base.dram_channels_num) {
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ case 1:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_PIPES!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LPT NUM_BANKS is in
+ * GRPH_CONTROL.GRPH_NUM_BANKS register field
+ * Specifies the number of memory banks for tiling
+ * purposes. Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES:
+ * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
+ * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
+ * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
+ * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
+ switch (cp80->base.banks_num) {
+ case 16:
+ set_reg_field_value(
+ lpt_control,
+ 3,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 8:
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 4:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_BANKS!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping is in DMIF_ADDR_CALC.
+ * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
+ * Carrizo specifies the memory interleave per pipe.
+ * It effectively specifies the location of pipe bits in
+ * the memory address.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
+ * interleave
+ * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
+ * interleave
+ */
+ switch (cp80->base.channel_interleave_size) {
+ case 256: /*256B */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ case 512: /*512B */
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT INTERLEAVE_SIZE!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
+ * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
+ * for Carrizo. Specifies the size of dram row in bytes.
+ * This should match up with NOOFCOLS field in
+ * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
+ * This register DMIF_ADDR_CALC is not used by the
+ * hardware as it is only used for addrlib assertions.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
+ * boundary
+ * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
+ * boundary
+ * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
+ * boundary */
+ switch (cp80->base.raw_size) {
+ case 4096: /*4 KB */
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 2048:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 1024:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT ROW_SIZE!!!",
+ __func__);
+ break;
+ }
+ } else {
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: LPT MC Configuration is not provided",
+ __func__);
+ }
+
+ return lpt_control;
+}
+
+static bool is_source_bigger_than_epanel_size(
+ struct dce80_compressor *cp80,
+ uint32_t source_view_width,
+ uint32_t source_view_height)
+{
+ if (cp80->base.embedded_panel_h_size != 0 &&
+ cp80->base.embedded_panel_v_size != 0 &&
+ ((source_view_width * source_view_height) >
+ (cp80->base.embedded_panel_h_size *
+ cp80->base.embedded_panel_v_size)))
+ return true;
+
+ return false;
+}
+
+static uint32_t align_to_chunks_number_per_line(
+ struct dce80_compressor *cp80,
+ uint32_t pixels)
+{
+ return 256 * ((pixels + 255) / 256);
+}
+
+static void wait_for_fbc_state_changed(
+ struct dce80_compressor *cp80,
+ bool enabled)
+{
+ uint8_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+ while (counter < 10) {
+ value = dm_read_reg(cp80->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+ udelay(10);
+ counter++;
+ }
+
+ if (counter == 10) {
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ }
+}
+
+void dce80_compressor_power_up_fbc(struct compressor *compressor)
+{
+ uint32_t value;
+ uint32_t addr;
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
+ set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_MODE;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
+ /* 1 ==> 4:1 */
+ /* 2 ==> 8:1 */
+ /* 0xF ==> 1:1 */
+ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
+ dm_write_reg(compressor->ctx, addr, value);
+ compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
+
+ value = 0;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
+
+ value = 0xFFFFFF;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
+}
+
+void dce80_compressor_enable_fbc(
+ struct compressor *compressor,
+ uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ (compressor->options.bits.DUMMY_BACKEND == 0) &&
+ (!dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
+ (!is_source_bigger_than_epanel_size(
+ cp80,
+ params->source_view_width,
+ params->source_view_height))) {
+
+ uint32_t addr;
+ uint32_t value;
+
+ /* Before enabling FBC first need to enable LPT if applicable
+ * LPT state should always be changed (enable/disable) while FBC
+ * is disabled */
+ if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
+ (params->source_view_width *
+ params->source_view_height <=
+ dce8_one_lpt_channel_max_resolution)) {
+ dce80_compressor_enable_lpt(compressor);
+ }
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(
+ value,
+ params->inst,
+ FBC_CNTL, FBC_SRC_SEL);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Keep track of enum controller_id FBC is attached to */
+ compressor->is_enabled = true;
+ compressor->attached_inst = params->inst;
+ cp80->offsets = reg_offsets[params->inst];
+
+ /*Toggle it as there is bug in HW */
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ wait_for_fbc_state_changed(cp80, true);
+ }
+}
+
+void dce80_compressor_disable_fbc(struct compressor *compressor)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ /* Whenever disabling FBC make sure LPT is disabled if LPT
+ * supported */
+ if (compressor->options.bits.LPT_SUPPORT)
+ dce80_compressor_disable_lpt(compressor);
+
+ wait_for_fbc_state_changed(cp80, false);
+ }
+}
+
+bool dce80_compressor_is_fbc_enabled_in_hw(
+ struct compressor *compressor,
+ uint32_t *inst)
+{
+ /* Check the hardware register */
+ uint32_t value;
+
+ value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
+ if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ return false;
+}
+
+bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
+{
+ /* Check the hardware register */
+ uint32_t value = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ return get_reg_field_value(
+ value,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+}
+
+void dce80_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t value = 0;
+ uint32_t fbc_pitch = 0;
+ uint32_t compressed_surf_address_low_part =
+ compressor->compr_surface_address.addr.low_part;
+
+ /* Clear content first. */
+ dm_write_reg(
+ compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ 0);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
+
+ if (compressor->options.bits.LPT_SUPPORT) {
+ uint32_t lpt_alignment = lpt_size_alignment(cp80);
+
+ if (lpt_alignment != 0) {
+ compressed_surf_address_low_part =
+ ((compressed_surf_address_low_part
+ + (lpt_alignment - 1)) / lpt_alignment)
+ * lpt_alignment;
+ }
+ }
+
+ /* Write address, HIGH has to be first. */
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ compressor->compr_surface_address.addr.high_part);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
+ compressed_surf_address_low_part);
+
+ fbc_pitch = align_to_chunks_number_per_line(
+ cp80,
+ params->source_view_width);
+
+ if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
+ fbc_pitch = fbc_pitch / 8;
+ else
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Unexpected DCE8 compression ratio",
+ __func__);
+
+ /* Clear content first. */
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
+
+ /* Write FBC Pitch. */
+ set_reg_field_value(
+ value,
+ fbc_pitch,
+ GRPH_COMPRESS_PITCH,
+ GRPH_COMPRESS_PITCH);
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
+
+}
+
+void dce80_compressor_disable_lpt(struct compressor *compressor)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t inx;
+
+ /* Disable all pipes LPT Stutter */
+ for (inx = 0; inx < 3; inx++) {
+ value =
+ dm_read_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 0,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
+ value);
+ }
+
+ /* Disable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Clear selection of Channel(s) containing Compressed Surface */
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0xFFFFFFFF,
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
+}
+
+void dce80_compressor_enable_lpt(struct compressor *compressor)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t value_control;
+ uint32_t channels;
+
+ /* Enable LPT Stutter from Display pipe */
+ value = dm_read_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 1,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
+
+ /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
+ * will disable LPT.
+ * STCTRL_LPT_TARGETn corresponds to channel n. */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value_control = dm_read_reg(compressor->ctx, addr);
+ channels = get_reg_field_value(value_control,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ channels + 1, /* not mentioned in programming guide,
+ but follow DCE8.1 */
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Enable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce80_compressor_program_lpt_control(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t rows_per_channel;
+ uint32_t lpt_alignment;
+ uint32_t source_view_width;
+ uint32_t source_view_height;
+ uint32_t lpt_control = 0;
+
+ if (!compressor->options.bits.LPT_SUPPORT)
+ return;
+
+ lpt_control = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ /* POSSIBLE VALUES for Low Power Tiling Mode:
+ * 00 - Use channel 0
+ * 01 - Use Channel 0 and 1
+ * 02 - Use Channel 0,1,2,3
+ * 03 - reserved */
+ switch (compressor->lpt_channels_num) {
+ /* case 2:
+ * Use Channel 0 & 1 / Not used for DCE 11 */
+ case 1:
+ /*Use Channel 0 for LPT for DCE 11 */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+ break;
+ default:
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Invalid selected DRAM channels for LPT!!!",
+ __func__);
+ break;
+ }
+
+ lpt_control = lpt_memory_control_config(cp80, lpt_control);
+
+ /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
+ * FBC compressed surface pitch.
+ * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
+ * Surface Pitch) / (Row Size * Number of Channels *
+ * Number of Banks)). */
+ rows_per_channel = 0;
+ lpt_alignment = lpt_size_alignment(cp80);
+ source_view_width =
+ align_to_chunks_number_per_line(
+ cp80,
+ params->source_view_width);
+ source_view_height = (params->source_view_height + 1) & (~0x1);
+
+ if (lpt_alignment != 0) {
+ rows_per_channel = source_view_width * source_view_height * 4;
+ rows_per_channel =
+ (rows_per_channel % lpt_alignment) ?
+ (rows_per_channel / lpt_alignment + 1) :
+ rows_per_channel / lpt_alignment;
+ }
+
+ set_reg_field_value(
+ lpt_control,
+ rows_per_channel,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROWS_PER_CHAN);
+
+ dm_write_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL, lpt_control);
+}
+
+/*
+ * DCE 11 Frame Buffer Compression Implementation
+ */
+
+void dce80_compressor_set_fbc_invalidation_triggers(
+ struct compressor *compressor,
+ uint32_t fbc_trigger)
+{
+ /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
+ * for DCE 11 regions cannot be used - does not work with S/G
+ */
+ uint32_t addr = mmFBC_CLIENT_REGION_MASK;
+ uint32_t value = dm_read_reg(compressor->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CLIENT_REGION_MASK,
+ FBC_MEMORY_REGION_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Setup events when to clear all CSM entries (effectively marking
+ * current compressed data invalid)
+ * For DCE 11 CSM metadata 11111 means - "Not Compressed"
+ * Used as the initial value of the metadata sent to the compressor
+ * after invalidation, to indicate that the compressor should attempt
+ * to compress all chunks on the current pass. Also used when the chunk
+ * is not successfully written to memory.
+ * When this CSM value is detected, FBC reads from the uncompressed
+ * buffer. Set events according to passed in value, these events are
+ * valid for DCE8:
+ * - bit 0 - display register updated
+ * - bit 28 - memory write from any client except from MCIF
+ * - bit 29 - CG static screen signal is inactive
+ * In addition, DCE8.1 also needs to set new DCE8.1 specific events
+ * that are used to trigger invalidation on certain register changes,
+ * for example enabling of Alpha Compression may trigger invalidation of
+ * FBC once bit is set. These events are as follows:
+ * - Bit 2 - FBC_GRPH_COMP_EN register updated
+ * - Bit 3 - FBC_SRC_SEL register updated
+ * - Bit 4 - FBC_MIN_COMPRESSION register updated
+ * - Bit 5 - FBC_ALPHA_COMP_EN register updated
+ * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
+ * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
+ */
+ addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ fbc_trigger |
+ FBC_IDLE_FORCE_GRPH_COMP_EN |
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE |
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
+ FBC_IDLE_FORCE_ALPHA_COMP_EN |
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ FBC_IDLE_FORCE_CLEAR_MASK,
+ FBC_IDLE_FORCE_CLEAR_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce80_compressor_construct(struct dce80_compressor *compressor,
+ struct dc_context *ctx)
+{
+ struct dc_bios *bp = ctx->dc_bios;
+ struct embedded_panel_info panel_info;
+
+ compressor->base.options.raw = 0;
+ compressor->base.options.bits.FBC_SUPPORT = true;
+ compressor->base.options.bits.LPT_SUPPORT = true;
+ /* For DCE 11 always use one DRAM channel for LPT */
+ compressor->base.lpt_channels_num = 1;
+ compressor->base.options.bits.DUMMY_BACKEND = false;
+
+ /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
+ * should not be supported */
+ if (compressor->base.memory_bus_width == 64)
+ compressor->base.options.bits.LPT_SUPPORT = false;
+
+ compressor->base.options.bits.CLK_GATING_DISABLED = false;
+
+ compressor->base.ctx = ctx;
+ compressor->base.embedded_panel_h_size = 0;
+ compressor->base.embedded_panel_v_size = 0;
+ compressor->base.memory_bus_width = ctx->asic_id.vram_width;
+ compressor->base.allocated_size = 0;
+ compressor->base.preferred_requested_size = 0;
+ compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
+ compressor->base.banks_num = 0;
+ compressor->base.raw_size = 0;
+ compressor->base.channel_interleave_size = 0;
+ compressor->base.dram_channels_num = 0;
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+
+ if (BP_RESULT_OK ==
+ bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
+ compressor->base.embedded_panel_h_size =
+ panel_info.lcd_timing.horizontal_addressable;
+ compressor->base.embedded_panel_v_size =
+ panel_info.lcd_timing.vertical_addressable;
+ }
+}
+
+struct compressor *dce80_compressor_create(struct dc_context *ctx)
+{
+ struct dce80_compressor *cp80 =
+ kzalloc(sizeof(struct dce80_compressor), GFP_KERNEL);
+
+ if (!cp80)
+ return NULL;
+
+ dce80_compressor_construct(cp80, ctx);
+ return &cp80->base;
+}
+
+void dce80_compressor_destroy(struct compressor **compressor)
+{
+ kfree(TO_DCE80_COMPRESSOR(*compressor));
+ *compressor = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
new file mode 100644
index 000000000000..cca58b044402
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
@@ -0,0 +1,78 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMPRESSOR_DCE80_H__
+#define __DC_COMPRESSOR_DCE80_H__
+
+#include "../inc/compressor.h"
+
+#define TO_DCE80_COMPRESSOR(compressor)\
+ container_of(compressor, struct dce80_compressor, base)
+
+struct dce80_compressor_reg_offsets {
+ uint32_t dcp_offset;
+ uint32_t dmif_offset;
+};
+
+struct dce80_compressor {
+ struct compressor base;
+ struct dce80_compressor_reg_offsets offsets;
+};
+
+struct compressor *dce80_compressor_create(struct dc_context *ctx);
+
+void dce80_compressor_construct(struct dce80_compressor *cp80,
+ struct dc_context *ctx);
+
+void dce80_compressor_destroy(struct compressor **cp);
+
+/* FBC RELATED */
+void dce80_compressor_power_up_fbc(struct compressor *cp);
+
+void dce80_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params);
+
+void dce80_compressor_disable_fbc(struct compressor *cp);
+
+void dce80_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
+ uint32_t fbc_trigger);
+
+void dce80_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce80_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+
+/* LPT RELATED */
+void dce80_compressor_enable_lpt(struct compressor *cp);
+
+void dce80_compressor_disable_lpt(struct compressor *cp);
+
+void dce80_compressor_program_lpt_control(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
new file mode 100644
index 000000000000..ccfcf1c0eeb3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce80_hw_sequencer.h"
+
+#include "dce/dce_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce100/dce100_hw_sequencer.h"
+
+/* include DCE8 register header files */
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+struct dce80_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+/***************************PIPE_CONTROL***********************************/
+
+static bool dce80_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+void dce80_hw_sequencer_construct(struct dc *dc)
+{
+ dce110_hw_sequencer_construct(dc);
+
+ dc->hwss.enable_display_power_gating = dce80_enable_display_power_gating;
+ dc->hwss.pipe_control_lock = dce_pipe_control_lock;
+ dc->hwss.set_bandwidth = dce100_set_bandwidth;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
new file mode 100644
index 000000000000..7a1b31def66f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE80_H__
+#define __DC_HWSS_DCE80_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dce80_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE80_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
new file mode 100644
index 000000000000..9c18efd3446f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -0,0 +1,1257 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "irq/dce80/irq_service_dce80.h"
+#include "dce110/dce110_timing_generator.h"
+#include "dce110/dce110_resource.h"
+#include "dce80/dce80_timing_generator.h"
+#include "dce/dce_mem_input.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_mem_input.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "dce80/dce80_hw_sequencer.h"
+#include "dce100/dce100_resource.h"
+
+#include "reg_helper.h"
+
+/* TODO remove this include */
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE
+#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE
+#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE
+#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE
+#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE
+#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE
+#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE
+#define mmDP6_DP_DPHY_INTERNAL_CTRL 0x4EDE
+#endif
+
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x1CCE
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x4ECE
+#endif
+
+
+#ifndef mmHPD_DC_HPD_CONTROL
+ #define mmHPD_DC_HPD_CONTROL 0x189A
+ #define mmHPD0_DC_HPD_CONTROL 0x189A
+ #define mmHPD1_DC_HPD_CONTROL 0x18A2
+ #define mmHPD2_DC_HPD_CONTROL 0x18AA
+ #define mmHPD3_DC_HPD_CONTROL 0x18B2
+ #define mmHPD4_DC_HPD_CONTROL 0x18BA
+ #define mmHPD5_DC_HPD_CONTROL 0x18C2
+#endif
+
+#define DCE11_DIG_FE_CNTL 0x4a00
+#define DCE11_DIG_BE_CNTL 0x4a47
+#define DCE11_DP_SEC 0x4ac3
+
+static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_COMMON_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE80(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE80(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE80(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE80_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST_DCE_BASE(id),\
+ .AFMT_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5),
+ stream_enc_regs(6)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_80_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_80(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_80(id),\
+}
+
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 3,
+};
+
+static const struct resource_caps res_cap_81 = {
+ .num_timing_generator = 4,
+ .num_audio = 7,
+ .num_stream_encoder = 7,
+ .num_pll = 3,
+};
+
+static const struct resource_caps res_cap_83 = {
+ .num_timing_generator = 2,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 2,
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x1918
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce80_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce80_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct output_pixel_processor *dce80_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static struct stream_encoder *dce80_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE8_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE8_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE8_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce80_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce80_stream_encoder_create,
+ .create_hwseq = dce80_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE8_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE8_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE8_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+static struct mem_input *dce80_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 2;
+ return &dce_mi->base;
+}
+
+static void dce80_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce80_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->prescaler_on = false;
+ return &transform->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 297000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dce80_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+struct clock_source *dce80_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce80_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static struct input_pixel_processor *dce80_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce80_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce80_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce80_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+bool dce80_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+
+ return true;
+}
+
+static bool dce80_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce80_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce80_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+enum dc_status dce80_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ result = dce80_validate_bandwidth(dc, context);
+
+ return result;
+}
+
+static void dce80_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce80_res_pool_funcs = {
+ .destroy = dce80_destroy_resource_pool,
+ .link_enc_create = dce80_link_encoder_create,
+ .validate_guaranteed = dce80_validate_guaranteed,
+ .validate_bandwidth = dce80_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce100_add_stream_to_ctx,
+ .validate_global = dce80_validate_global
+};
+
+static bool dce80_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce80_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce80_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce80_timing_generator_create(
+ ctx, i, &dce80_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce80_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce80_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce80_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce80_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce80_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce80_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce80_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool dce81_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_81;
+ pool->base.funcs = &dce80_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap_81.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce80_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce80_timing_generator_create(
+ ctx, i, &dce80_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce80_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce80_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce80_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce80_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce80_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce81_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce81_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool dce83_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_83;
+ pool->base.funcs = &dce80_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap_83.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 2;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 1;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce80_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce80_timing_generator_create(
+ ctx, i, &dce80_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce80_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce80_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce80_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce80_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce80_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce83_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce83_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
new file mode 100644
index 000000000000..eff31ab83a39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
@@ -0,0 +1,47 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE80_H__
+#define __DC_RESOURCE_DCE80_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce80_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+struct resource_pool *dce81_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+struct resource_pool *dce83_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+#endif /* __DC_RESOURCE_DCE80_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
new file mode 100644
index 000000000000..265894851493
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE8 register header files */
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "dc_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "../dce110/dce110_timing_generator.h"
+#include "dce80_timing_generator.h"
+
+#include "timing_generator.h"
+
+enum black_color_format {
+ BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0, /* used as index in array */
+ BLACK_COLOR_FORMAT_RGB_LIMITED,
+ BLACK_COLOR_FORMAT_YUV_TV,
+ BLACK_COLOR_FORMAT_YUV_CV,
+ BLACK_COLOR_FORMAT_YUV_SUPER_AA,
+
+ BLACK_COLOR_FORMAT_COUNT
+};
+
+static const struct dce110_timing_generator_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+}
+};
+
+#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10
+
+#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1)
+#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1)
+
+#define CRTC_REG(reg) (reg + tg110->offsets.crtc)
+#define DCP_REG(reg) (reg + tg110->offsets.dcp)
+#define DMIF_REG(reg) (reg + tg110->offsets.dmif)
+
+void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz)
+{
+ uint64_t pix_dur;
+ uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1
+ + DCE110TG_FROM_TG(tg)->offsets.dmif;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (pix_clk_khz == 0)
+ return;
+
+ pix_dur = 1000000000 / pix_clk_khz;
+
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPG_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (!use_vbios)
+ program_pix_dur(tg, timing->pix_clk_khz);
+
+ dce110_tg_program_timing(tg, timing, use_vbios);
+}
+
+static const struct timing_generator_funcs dce80_tg_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = program_timing,
+ .enable_crtc = dce110_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ .get_position = dce110_timing_generator_get_position,
+ .get_frame_count = dce110_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce110_timing_generator_set_early_control,
+ .wait_for_state = dce110_tg_wait_for_state,
+ .set_blank = dce110_tg_set_blank,
+ .is_blanked = dce110_tg_is_blanked,
+ .set_colors = dce110_tg_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_program_blank_color,
+ .disable_vga = dce110_timing_generator_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_tear_down_global_swap_lock,
+ .set_drr = dce110_timing_generator_set_drr,
+ .set_static_screen_control =
+ dce110_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce110_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce110_arm_vert_intr,
+
+ /* DCE8.0 overrides */
+ .enable_advanced_request =
+ dce80_timing_generator_enable_advanced_request,
+};
+
+void dce80_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+ tg110->offsets = *offsets;
+ tg110->derived_offsets = reg_offsets[instance];
+
+ tg110->base.funcs = &dce80_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
+
+void dce80_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PROGRESSIVE_START_LINE_EARLY);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_INTERLACE_START_LINE_EARLY);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
new file mode 100644
index 000000000000..9cebb24c94c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE80_H__
+#define __DC_TIMING_GENERATOR_DCE80_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+
+/* DCE8.0 implementation inherits from DCE11.0 */
+void dce80_timing_generator_construct(
+ struct dce110_timing_generator *tg,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+/******** HW programming ************/
+void dce80_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing);
+
+#endif /* __DC_TIMING_GENERATOR_DCE80_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
new file mode 100644
index 000000000000..ebeb88283a14
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for DCN.
+
+DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_timing_generator.o \
+ dcn10_hubp.o dcn10_mpc.o \
+ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o
+
+AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
new file mode 100644
index 000000000000..7f579cb19f4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+
+#include "dcn10_cm_common.h"
+
+#define REG(reg) reg
+
+#define CTX \
+ ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ reg->shifts.field_name, reg->masks.field_name
+
+void cm_helper_program_color_matrices(
+ struct dc_context *ctx,
+ const uint16_t *regval,
+ const struct color_matrices_reg *reg)
+{
+ uint32_t cur_csc_reg;
+ unsigned int i = 0;
+
+ for (cur_csc_reg = reg->csc_c11_c12;
+ cur_csc_reg <= reg->csc_c33_c34;
+ cur_csc_reg++) {
+
+ const uint16_t *regval0 = &(regval[2 * i]);
+ const uint16_t *regval1 = &(regval[(2 * i) + 1]);
+
+ REG_SET_2(cur_csc_reg, 0,
+ csc_c11, *regval0,
+ csc_c12, *regval1);
+
+ i++;
+ }
+
+}
+
+void cm_helper_program_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct xfer_func_reg *reg)
+{
+ uint32_t reg_region_cur;
+ unsigned int i = 0;
+
+ REG_SET_2(reg->start_cntl_b, 0,
+ exp_region_start, params->arr_points[0].custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_g, 0,
+ exp_region_start, params->arr_points[0].custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_r, 0,
+ exp_region_start, params->arr_points[0].custom_float_x,
+ exp_resion_start_segment, 0);
+
+ REG_SET(reg->start_slope_cntl_b, 0,
+ field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ REG_SET(reg->start_slope_cntl_g, 0,
+ field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ REG_SET(reg->start_slope_cntl_r, 0,
+ field_region_linear_slope, params->arr_points[0].custom_float_slope);
+
+ REG_SET(reg->start_end_cntl1_b, 0,
+ field_region_end, params->arr_points[1].custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_b, 0,
+ field_region_end_slope, params->arr_points[1].custom_float_slope,
+ field_region_end_base, params->arr_points[1].custom_float_y);
+
+ REG_SET(reg->start_end_cntl1_g, 0,
+ field_region_end, params->arr_points[1].custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_g, 0,
+ field_region_end_slope, params->arr_points[1].custom_float_slope,
+ field_region_end_base, params->arr_points[1].custom_float_y);
+
+ REG_SET(reg->start_end_cntl1_r, 0,
+ field_region_end, params->arr_points[1].custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_r, 0,
+ field_region_end_slope, params->arr_points[1].custom_float_slope,
+ field_region_end_base, params->arr_points[1].custom_float_y);
+
+ for (reg_region_cur = reg->region_start;
+ reg_region_cur <= reg->region_end;
+ reg_region_cur++) {
+
+ const struct gamma_curve *curve0 = &(params->arr_curve_points[2 * i]);
+ const struct gamma_curve *curve1 = &(params->arr_curve_points[(2 * i) + 1]);
+
+ REG_SET_4(reg_region_cur, 0,
+ exp_region0_lut_offset, curve0->offset,
+ exp_region0_num_segments, curve0->segments_num,
+ exp_region1_lut_offset, curve1->offset,
+ exp_region1_num_segments, curve1->segments_num);
+
+ i++;
+ }
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
new file mode 100644
index 000000000000..64836dcf21f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN10_CM_COMMON_H__
+#define __DAL_DCN10_CM_COMMON_H__
+
+#define TF_HELPER_REG_FIELD_LIST(type) \
+ type exp_region0_lut_offset; \
+ type exp_region0_num_segments; \
+ type exp_region1_lut_offset; \
+ type exp_region1_num_segments;\
+ type field_region_end;\
+ type field_region_end_slope;\
+ type field_region_end_base;\
+ type exp_region_start;\
+ type exp_resion_start_segment;\
+ type field_region_linear_slope
+
+#define TF_CM_REG_FIELD_LIST(type) \
+ type csc_c11; \
+ type csc_c12
+
+struct xfer_func_shift {
+ TF_HELPER_REG_FIELD_LIST(uint8_t);
+};
+
+struct xfer_func_mask {
+ TF_HELPER_REG_FIELD_LIST(uint32_t);
+};
+
+struct xfer_func_reg {
+ struct xfer_func_shift shifts;
+ struct xfer_func_mask masks;
+
+ uint32_t start_cntl_b;
+ uint32_t start_cntl_g;
+ uint32_t start_cntl_r;
+ uint32_t start_slope_cntl_b;
+ uint32_t start_slope_cntl_g;
+ uint32_t start_slope_cntl_r;
+ uint32_t start_end_cntl1_b;
+ uint32_t start_end_cntl2_b;
+ uint32_t start_end_cntl1_g;
+ uint32_t start_end_cntl2_g;
+ uint32_t start_end_cntl1_r;
+ uint32_t start_end_cntl2_r;
+ uint32_t region_start;
+ uint32_t region_end;
+};
+
+struct cm_color_matrix_shift {
+ TF_CM_REG_FIELD_LIST(uint8_t);
+};
+
+struct cm_color_matrix_mask {
+ TF_CM_REG_FIELD_LIST(uint32_t);
+};
+
+struct color_matrices_reg{
+ struct cm_color_matrix_shift shifts;
+ struct cm_color_matrix_mask masks;
+
+ uint32_t csc_c11_c12;
+ uint32_t csc_c33_c34;
+};
+
+void cm_helper_program_color_matrices(
+ struct dc_context *ctx,
+ const uint16_t *regval,
+ const struct color_matrices_reg *reg);
+
+void cm_helper_program_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct xfer_func_reg *reg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
new file mode 100644
index 000000000000..74e7c82bdc76
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+#include "basics/conversion.h"
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+enum pixel_format_description {
+ PIXEL_FORMAT_FIXED = 0,
+ PIXEL_FORMAT_FIXED16,
+ PIXEL_FORMAT_FLOAT
+
+};
+
+enum dcn10_coef_filter_type_sel {
+ SCL_COEF_LUMA_VERT_FILTER = 0,
+ SCL_COEF_LUMA_HORZ_FILTER = 1,
+ SCL_COEF_CHROMA_VERT_FILTER = 2,
+ SCL_COEF_CHROMA_HORZ_FILTER = 3,
+ SCL_COEF_ALPHA_VERT_FILTER = 4,
+ SCL_COEF_ALPHA_HORZ_FILTER = 5
+};
+
+enum dscl_autocal_mode {
+ AUTOCAL_MODE_OFF = 0,
+
+ /* Autocal calculate the scaling ratio and initial phase and the
+ * DSCL_MODE_SEL must be set to 1
+ */
+ AUTOCAL_MODE_AUTOSCALE = 1,
+ /* Autocal perform auto centering without replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOCENTER = 2,
+ /* Autocal perform auto centering and auto replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOREPLICATE = 3
+};
+
+enum dscl_mode_sel {
+ DSCL_MODE_SCALING_444_BYPASS = 0,
+ DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DSCL_MODE_DSCL_BYPASS = 6
+};
+
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
+
+/* Program gamut remap in bypass mode */
+void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
+{
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ /* Gamut remap in bypass */
+}
+
+#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
+
+
+bool dpp_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ uint32_t pixel_width;
+
+ if (scl_data->viewport.width > scl_data->recout.width)
+ pixel_width = scl_data->recout.width;
+ else
+ pixel_width = scl_data->viewport.width;
+
+ /* TODO: add lb check */
+
+ /* No support for programming ratio of 4, drop to 3.99999.. */
+ if (scl_data->ratios.horz.value == (4ll << 32))
+ scl_data->ratios.horz.value--;
+ if (scl_data->ratios.vert.value == (4ll << 32))
+ scl_data->ratios.vert.value--;
+ if (scl_data->ratios.horz_c.value == (4ll << 32))
+ scl_data->ratios.horz_c.value--;
+ if (scl_data->ratios.vert_c.value == (4ll << 32))
+ scl_data->ratios.vert_c.value--;
+
+ /* Set default taps if none are provided */
+ if (in_taps->h_taps == 0)
+ scl_data->taps.h_taps = 4;
+ else
+ scl_data->taps.h_taps = in_taps->h_taps;
+ if (in_taps->v_taps == 0)
+ scl_data->taps.v_taps = 4;
+ else
+ scl_data->taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0)
+ scl_data->taps.v_taps_c = 2;
+ else
+ scl_data->taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0)
+ scl_data->taps.h_taps_c = 2;
+ /* Only 1 and even h_taps_c are supported by hw */
+ else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
+ else
+ scl_data->taps.h_taps_c = in_taps->h_taps_c;
+
+ if (!dpp->ctx->dc->debug.always_scale) {
+ if (IDENTITY_RATIO(scl_data->ratios.horz))
+ scl_data->taps.h_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.v_taps = 1;
+ /*
+ * Spreadsheet doesn't handle taps_c is one properly,
+ * need to force Chroma to always be scaled to pass
+ * bandwidth validation.
+ */
+ }
+
+ return true;
+}
+
+void dpp_reset(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ dpp->filter_h_c = NULL;
+ dpp->filter_v_c = NULL;
+ dpp->filter_h = NULL;
+ dpp->filter_v = NULL;
+
+ /* set boundary mode to 0 */
+ REG_SET(DSCL_CONTROL, 0, SCL_BOUNDARY_MODE, 0);
+}
+
+
+
+static void dpp1_cm_set_regamma_pwl(
+ struct dpp *dpp_base, const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ dpp1_cm_power_on_regamma_lut(dpp_base, true);
+ dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe);
+
+ if (dpp->is_write_to_ram_a_safe)
+ dpp1_cm_program_regamma_luta_settings(dpp_base, params);
+ else
+ dpp1_cm_program_regamma_lutb_settings(dpp_base, params);
+
+ dpp1_cm_program_regamma_lut(
+ dpp_base, params->rgb_resulted, params->hw_points_num);
+}
+
+static void dpp1_cm_set_regamma_mode(
+ struct dpp *dpp_base,
+ enum opp_regamma mode)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ uint32_t re_mode = 0;
+ uint32_t obuf_bypass = 0; /* need for pipe split */
+ uint32_t obuf_hupscale = 0;
+
+ switch (mode) {
+ case OPP_REGAMMA_BYPASS:
+ re_mode = 0;
+ break;
+ case OPP_REGAMMA_SRGB:
+ re_mode = 1;
+ break;
+ case OPP_REGAMMA_3_6:
+ re_mode = 2;
+ break;
+ case OPP_REGAMMA_USER:
+ re_mode = dpp->is_write_to_ram_a_safe ? 3 : 4;
+ dpp->is_write_to_ram_a_safe = !dpp->is_write_to_ram_a_safe;
+ break;
+ default:
+ break;
+ }
+
+ REG_SET(CM_RGAM_CONTROL, 0, CM_RGAM_LUT_MODE, re_mode);
+ REG_UPDATE_2(OBUF_CONTROL,
+ OBUF_BYPASS, obuf_bypass,
+ OBUF_H_2X_UPSCALE_EN, obuf_hupscale);
+}
+
+static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\
+ enum pixel_format_description *fmt)
+{
+
+ if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F ||
+ input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F)
+ *fmt = PIXEL_FORMAT_FLOAT;
+ else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616)
+ *fmt = PIXEL_FORMAT_FIXED16;
+ else
+ *fmt = PIXEL_FORMAT_FIXED;
+}
+
+static void dpp1_set_degamma_format_float(
+ struct dpp *dpp_base,
+ bool is_float)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (is_float) {
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 3);
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 1);
+ } else {
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 2);
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 0);
+ }
+}
+
+void dpp1_cnv_setup (
+ struct dpp *dpp_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode)
+{
+ uint32_t pixel_format;
+ uint32_t alpha_en;
+ enum pixel_format_description fmt ;
+ enum dc_color_space color_space;
+ enum dcn10_input_csc_select select;
+ bool is_float;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ bool force_disable_cursor = false;
+
+ dpp1_setup_format_flags(input_format, &fmt);
+ alpha_en = 1;
+ pixel_format = 0;
+ color_space = COLOR_SPACE_SRGB;
+ select = INPUT_CSC_SELECT_BYPASS;
+ is_float = false;
+
+ switch (fmt) {
+ case PIXEL_FORMAT_FIXED:
+ case PIXEL_FORMAT_FIXED16:
+ /*when output is float then FORMAT_CONTROL__OUTPUT_FP=1*/
+ REG_SET_3(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode,
+ OUTPUT_FP, 0);
+ break;
+ case PIXEL_FORMAT_FLOAT:
+ REG_SET_3(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode,
+ OUTPUT_FP, 1);
+ is_float = true;
+ break;
+ default:
+
+ break;
+ }
+
+ dpp1_set_degamma_format_float(dpp_base, is_float);
+
+ switch (input_format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ pixel_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ pixel_format = 3;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ pixel_format = 8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ pixel_format = 10;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ force_disable_cursor = false;
+ pixel_format = 65;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 64;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ force_disable_cursor = true;
+ pixel_format = 67;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 66;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ pixel_format = 22;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ pixel_format = 24;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ pixel_format = 25;
+ break;
+ default:
+ break;
+ }
+ REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
+
+ dpp1_program_input_csc(dpp_base, color_space, select);
+
+ if (force_disable_cursor) {
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, 0);
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, 0);
+ }
+}
+
+void dpp1_set_cursor_attributes(
+ struct dpp *dpp_base,
+ const struct dc_cursor_attributes *attr)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ enum dc_cursor_color_format color_format = attr->color_format;
+
+ REG_UPDATE_2(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0);
+
+ if (color_format == CURSOR_MODE_MONO) {
+ /* todo: clarify what to program these to */
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
+
+ /* TODO: Fixed vs float */
+
+ REG_UPDATE_3(FORMAT_CONTROL,
+ CNVC_BYPASS, 0,
+ FORMAT_CONTROL__ALPHA_EN, 1,
+ FORMAT_EXPANSION_MODE, 0);
+}
+
+
+void dpp1_set_cursor_position(
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+ uint32_t width)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+
+ if (src_x_offset >= (int)param->viewport_width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + (int)width < 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, cur_en);
+
+}
+
+static const struct dpp_funcs dcn10_dpp_funcs = {
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .opp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
+ .opp_set_csc_default = dpp1_cm_set_output_csc_default,
+ .opp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut,
+ .opp_program_regamma_lut = dpp1_cm_program_regamma_lut,
+ .opp_configure_regamma_lut = dpp1_cm_configure_regamma_lut,
+ .opp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings,
+ .opp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings,
+ .opp_program_regamma_pwl = dpp1_cm_set_regamma_pwl,
+ .opp_set_regamma_mode = dpp1_cm_set_regamma_mode,
+ .ipp_set_degamma = dpp1_set_degamma,
+ .ipp_program_input_lut = dpp1_program_input_lut,
+ .ipp_program_degamma_pwl = dpp1_set_degamma_pwl,
+ .ipp_setup = dpp1_cnv_setup,
+ .ipp_full_bypass = dpp1_full_bypass,
+ .set_cursor_attributes = dpp1_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+};
+
+static struct dpp_caps dcn10_dpp_cap = {
+ .dscl_data_proc_format = DSCL_DATA_PRCESSING_FIXED_FORMAT,
+ .dscl_calc_lb_num_partitions = dpp1_dscl_calc_lb_num_partitions,
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dpp1_construct(
+ struct dcn10_dpp *dpp,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_dpp_registers *tf_regs,
+ const struct dcn_dpp_shift *tf_shift,
+ const struct dcn_dpp_mask *tf_mask)
+{
+ dpp->base.ctx = ctx;
+
+ dpp->base.inst = inst;
+ dpp->base.funcs = &dcn10_dpp_funcs;
+ dpp->base.caps = &dcn10_dpp_cap;
+
+ dpp->tf_regs = tf_regs;
+ dpp->tf_shift = tf_shift;
+ dpp->tf_mask = tf_mask;
+
+ dpp->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
new file mode 100644
index 000000000000..a9782b1aba47
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -0,0 +1,1386 @@
+/* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DPP_DCN10_H__
+#define __DAL_DPP_DCN10_H__
+
+#include "dpp.h"
+
+#define TO_DCN10_DPP(dpp)\
+ container_of(dpp, struct dcn10_dpp, base)
+
+/* TODO: Use correct number of taps. Using polaris values for now */
+#define LB_TOTAL_NUMBER_OF_ENTRIES 5124
+#define LB_BITS_PER_ENTRY 144
+
+#define TF_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+//Used to resolve corner case
+#define TF2_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## _ ## field_name ## post_fix
+
+#define TF_REG_LIST_DCN(id) \
+ SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\
+ SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\
+ SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
+ SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
+ SRI(OTG_H_BLANK, DSCL, id), \
+ SRI(OTG_V_BLANK, DSCL, id), \
+ SRI(SCL_MODE, DSCL, id), \
+ SRI(LB_DATA_FORMAT, DSCL, id), \
+ SRI(LB_MEMORY_CTRL, DSCL, id), \
+ SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(SCL_BLACK_OFFSET, DSCL, id), \
+ SRI(DSCL_CONTROL, DSCL, id), \
+ SRI(SCL_TAP_CONTROL, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
+ SRI(DSCL_2TAP_CONTROL, DSCL, id), \
+ SRI(MPC_SIZE, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_BOT, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_BOT_C, DSCL, id), \
+ SRI(RECOUT_START, DSCL, id), \
+ SRI(RECOUT_SIZE, DSCL, id), \
+ SRI(OBUF_CONTROL, DSCL, id), \
+ SRI(CM_ICSC_CONTROL, CM, id), \
+ SRI(CM_ICSC_C11_C12, CM, id), \
+ SRI(CM_ICSC_C33_C34, CM, id), \
+ SRI(CM_DGAM_RAMB_START_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMB_START_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMB_START_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL1_B, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL2_B, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL1_G, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL2_G, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL1_R, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL2_R, CM, id), \
+ SRI(CM_DGAM_RAMB_REGION_0_1, CM, id), \
+ SRI(CM_DGAM_RAMB_REGION_14_15, CM, id), \
+ SRI(CM_DGAM_RAMA_START_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMA_START_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMA_START_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMA_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL1_B, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL2_B, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL1_G, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL2_G, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL1_R, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL2_R, CM, id), \
+ SRI(CM_DGAM_RAMA_REGION_0_1, CM, id), \
+ SRI(CM_DGAM_RAMA_REGION_14_15, CM, id), \
+ SRI(CM_MEM_PWR_CTRL, CM, id), \
+ SRI(CM_DGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_DGAM_LUT_INDEX, CM, id), \
+ SRI(CM_DGAM_LUT_DATA, CM, id), \
+ SRI(CM_CONTROL, CM, id), \
+ SRI(CM_DGAM_CONTROL, CM, id), \
+ SRI(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id)
+
+
+
+#define TF_REG_LIST_DCN10(id) \
+ TF_REG_LIST_DCN(id), \
+ SRI(CM_COMA_C11_C12, CM, id),\
+ SRI(CM_COMA_C33_C34, CM, id),\
+ SRI(CM_COMB_C11_C12, CM, id),\
+ SRI(CM_COMB_C33_C34, CM, id),\
+ SRI(CM_OCSC_CONTROL, CM, id), \
+ SRI(CM_OCSC_C11_C12, CM, id), \
+ SRI(CM_OCSC_C33_C34, CM, id), \
+ SRI(CM_MEM_PWR_CTRL, CM, id), \
+ SRI(CM_RGAM_LUT_DATA, CM, id), \
+ SRI(CM_RGAM_LUT_WRITE_EN_MASK, CM, id),\
+ SRI(CM_RGAM_LUT_INDEX, CM, id), \
+ SRI(CM_RGAM_RAMB_START_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMB_START_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMB_START_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL1_B, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL2_B, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL1_G, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL2_G, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL1_R, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL2_R, CM, id), \
+ SRI(CM_RGAM_RAMB_REGION_0_1, CM, id), \
+ SRI(CM_RGAM_RAMB_REGION_32_33, CM, id), \
+ SRI(CM_RGAM_RAMA_START_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMA_START_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMA_START_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMA_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL1_B, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL2_B, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL1_G, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL2_G, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL1_R, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL2_R, CM, id), \
+ SRI(CM_RGAM_RAMA_REGION_0_1, CM, id), \
+ SRI(CM_RGAM_RAMA_REGION_32_33, CM, id), \
+ SRI(CM_RGAM_CONTROL, CM, id), \
+ SRI(CM_IGAM_CONTROL, CM, id), \
+ SRI(CM_IGAM_LUT_RW_CONTROL, CM, id), \
+ SRI(CM_IGAM_LUT_RW_INDEX, CM, id), \
+ SRI(CM_IGAM_LUT_SEQ_COLOR, CM, id), \
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CM_CMOUT_CONTROL, CM, id)
+
+
+#define TF_REG_LIST_SH_MASK_DCN(mask_sh)\
+ TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\
+ TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
+ TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\
+ TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\
+ TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_FRAC_BOT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_INT_BOT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_FRAC_BOT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_INT_BOT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \
+ TF_SF(DSCL0_OBUF_CONTROL, OBUF_BYPASS, mask_sh), \
+ TF_SF(CM0_CM_ICSC_CONTROL, CM_ICSC_MODE, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C11, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C12, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C33, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C34, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_B, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_G, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_R, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_B, CM_DGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_G, CM_DGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_R, CM_DGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_B, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_G, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_R, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_B, CM_DGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_G, CM_DGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_R, CM_DGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
+ TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh)
+
+#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_REDUCE_MODE, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, DYNAMIC_PIXEL_DEPTH, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, DITHER_EN, mask_sh),\
+ TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C11, mask_sh),\
+ TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C12, mask_sh),\
+ TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C33, mask_sh),\
+ TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C34, mask_sh),\
+ TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C11, mask_sh),\
+ TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C12, mask_sh),\
+ TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C33, mask_sh),\
+ TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C34, mask_sh),\
+ TF_SF(CM0_CM_OCSC_CONTROL, CM_OCSC_MODE, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C11, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C12, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C33, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C34, mask_sh), \
+ TF_SF(CM0_CM_MEM_PWR_CTRL, RGAM_MEM_PWR_FORCE, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_DATA, CM_RGAM_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_INDEX, CM_RGAM_LUT_INDEX, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_B, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_G, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_R, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_B, CM_RGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_G, CM_RGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_R, CM_RGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_B, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_G, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_R, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_B, CM_RGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_G, CM_RGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_R, CM_RGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_CONTROL, CM_RGAM_LUT_MODE, mask_sh), \
+ TF_SF(DSCL0_OBUF_CONTROL, OBUF_H_2X_UPSCALE_EN, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_R, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_G, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_B, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, mask_sh), \
+ TF_SF(CM0_CM_CONTROL, CM_BYPASS_EN, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_SEQ_COLOR, CM_IGAM_LUT_SEQ_COLOR, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh), \
+ TF_SF(CM0_CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh)
+
+#define TF_REG_FIELD_LIST(type) \
+ type EXT_OVERSCAN_LEFT; \
+ type EXT_OVERSCAN_RIGHT; \
+ type EXT_OVERSCAN_BOTTOM; \
+ type EXT_OVERSCAN_TOP; \
+ type OTG_H_BLANK_START; \
+ type OTG_H_BLANK_END; \
+ type OTG_V_BLANK_START; \
+ type OTG_V_BLANK_END; \
+ type PIXEL_DEPTH; \
+ type PIXEL_EXPAN_MODE; \
+ type PIXEL_REDUCE_MODE; \
+ type DYNAMIC_PIXEL_DEPTH; \
+ type DITHER_EN; \
+ type INTERLEAVE_EN; \
+ type LB_DATA_FORMAT__ALPHA_EN; \
+ type MEMORY_CONFIG; \
+ type LB_MAX_PARTITIONS; \
+ type AUTOCAL_MODE; \
+ type AUTOCAL_NUM_PIPE; \
+ type AUTOCAL_PIPE_ID; \
+ type SCL_BLACK_OFFSET_RGB_Y; \
+ type SCL_BLACK_OFFSET_CBCR; \
+ type SCL_BOUNDARY_MODE; \
+ type SCL_V_NUM_TAPS; \
+ type SCL_H_NUM_TAPS; \
+ type SCL_V_NUM_TAPS_C; \
+ type SCL_H_NUM_TAPS_C; \
+ type SCL_COEF_RAM_TAP_PAIR_IDX; \
+ type SCL_COEF_RAM_PHASE; \
+ type SCL_COEF_RAM_FILTER_TYPE; \
+ type SCL_COEF_RAM_EVEN_TAP_COEF; \
+ type SCL_COEF_RAM_EVEN_TAP_COEF_EN; \
+ type SCL_COEF_RAM_ODD_TAP_COEF; \
+ type SCL_COEF_RAM_ODD_TAP_COEF_EN; \
+ type SCL_H_2TAP_HARDCODE_COEF_EN; \
+ type SCL_H_2TAP_SHARP_EN; \
+ type SCL_H_2TAP_SHARP_FACTOR; \
+ type SCL_V_2TAP_HARDCODE_COEF_EN; \
+ type SCL_V_2TAP_SHARP_EN; \
+ type SCL_V_2TAP_SHARP_FACTOR; \
+ type SCL_COEF_RAM_SELECT; \
+ type DSCL_MODE; \
+ type RECOUT_START_X; \
+ type RECOUT_START_Y; \
+ type RECOUT_WIDTH; \
+ type RECOUT_HEIGHT; \
+ type MPC_WIDTH; \
+ type MPC_HEIGHT; \
+ type SCL_H_SCALE_RATIO; \
+ type SCL_V_SCALE_RATIO; \
+ type SCL_H_SCALE_RATIO_C; \
+ type SCL_V_SCALE_RATIO_C; \
+ type SCL_H_INIT_FRAC; \
+ type SCL_H_INIT_INT; \
+ type SCL_H_INIT_FRAC_C; \
+ type SCL_H_INIT_INT_C; \
+ type SCL_V_INIT_FRAC; \
+ type SCL_V_INIT_INT; \
+ type SCL_V_INIT_FRAC_BOT; \
+ type SCL_V_INIT_INT_BOT; \
+ type SCL_V_INIT_FRAC_C; \
+ type SCL_V_INIT_INT_C; \
+ type SCL_V_INIT_FRAC_BOT_C; \
+ type SCL_V_INIT_INT_BOT_C; \
+ type SCL_CHROMA_COEF_MODE; \
+ type SCL_COEF_RAM_SELECT_CURRENT; \
+ type CM_GAMUT_REMAP_MODE; \
+ type CM_GAMUT_REMAP_C11; \
+ type CM_GAMUT_REMAP_C12; \
+ type CM_GAMUT_REMAP_C33; \
+ type CM_GAMUT_REMAP_C34; \
+ type CM_COMA_C11; \
+ type CM_COMA_C12; \
+ type CM_COMA_C33; \
+ type CM_COMA_C34; \
+ type CM_COMB_C11; \
+ type CM_COMB_C12; \
+ type CM_COMB_C33; \
+ type CM_COMB_C34; \
+ type CM_OCSC_MODE; \
+ type CM_OCSC_C11; \
+ type CM_OCSC_C12; \
+ type CM_OCSC_C33; \
+ type CM_OCSC_C34; \
+ type RGAM_MEM_PWR_FORCE; \
+ type CM_RGAM_LUT_DATA; \
+ type CM_RGAM_LUT_WRITE_EN_MASK; \
+ type CM_RGAM_LUT_WRITE_SEL; \
+ type CM_RGAM_LUT_INDEX; \
+ type CM_RGAM_RAMB_EXP_REGION_START_B; \
+ type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_RGAM_RAMB_EXP_REGION_START_G; \
+ type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_RGAM_RAMB_EXP_REGION_START_R; \
+ type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_RGAM_RAMB_EXP_REGION_END_B; \
+ type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; \
+ type CM_RGAM_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_RGAM_RAMB_EXP_REGION_END_G; \
+ type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G; \
+ type CM_RGAM_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_RGAM_RAMB_EXP_REGION_END_R; \
+ type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R; \
+ type CM_RGAM_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION_START_B; \
+ type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_RGAM_RAMA_EXP_REGION_START_G; \
+ type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_RGAM_RAMA_EXP_REGION_START_R; \
+ type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_RGAM_RAMA_EXP_REGION_END_B; \
+ type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_RGAM_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_RGAM_RAMA_EXP_REGION_END_G; \
+ type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G; \
+ type CM_RGAM_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_RGAM_RAMA_EXP_REGION_END_R; \
+ type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R; \
+ type CM_RGAM_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_RGAM_LUT_MODE; \
+ type CM_CMOUT_ROUND_TRUNC_MODE; \
+ type OBUF_BYPASS; \
+ type OBUF_H_2X_UPSCALE_EN; \
+ type CM_BLNDGAM_LUT_MODE; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_BLNDGAM_LUT_WRITE_EN_MASK; \
+ type CM_BLNDGAM_LUT_WRITE_SEL; \
+ type CM_BLNDGAM_LUT_INDEX; \
+ type CM_BLNDGAM_LUT_DATA; \
+ type CM_3DLUT_MODE; \
+ type CM_3DLUT_SIZE; \
+ type CM_3DLUT_INDEX; \
+ type CM_3DLUT_DATA0; \
+ type CM_3DLUT_DATA1; \
+ type CM_3DLUT_DATA_30BIT; \
+ type CM_3DLUT_WRITE_EN_MASK; \
+ type CM_3DLUT_RAM_SEL; \
+ type CM_3DLUT_30BIT_EN; \
+ type CM_3DLUT_CONFIG_STATUS; \
+ type CM_3DLUT_READ_SEL; \
+ type CM_SHAPER_LUT_MODE; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_R; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_R; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_R; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_R; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_SHAPER_LUT_WRITE_EN_MASK; \
+ type CM_SHAPER_LUT_WRITE_SEL; \
+ type CM_SHAPER_LUT_INDEX; \
+ type CM_SHAPER_LUT_DATA; \
+ type CM_DGAM_CONFIG_STATUS; \
+ type CM_ICSC_MODE; \
+ type CM_ICSC_C11; \
+ type CM_ICSC_C12; \
+ type CM_ICSC_C33; \
+ type CM_ICSC_C34; \
+ type CM_DGAM_RAMB_EXP_REGION_START_B; \
+ type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_DGAM_RAMB_EXP_REGION_START_G; \
+ type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_DGAM_RAMB_EXP_REGION_START_R; \
+ type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_DGAM_RAMB_EXP_REGION_END_B; \
+ type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; \
+ type CM_DGAM_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_DGAM_RAMB_EXP_REGION_END_G; \
+ type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G; \
+ type CM_DGAM_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_DGAM_RAMB_EXP_REGION_END_R; \
+ type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R; \
+ type CM_DGAM_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION_START_B; \
+ type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_DGAM_RAMA_EXP_REGION_START_G; \
+ type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_DGAM_RAMA_EXP_REGION_START_R; \
+ type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_DGAM_RAMA_EXP_REGION_END_B; \
+ type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_DGAM_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_DGAM_RAMA_EXP_REGION_END_G; \
+ type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G; \
+ type CM_DGAM_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_DGAM_RAMA_EXP_REGION_END_R; \
+ type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R; \
+ type CM_DGAM_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \
+ type SHARED_MEM_PWR_DIS; \
+ type CM_IGAM_LUT_FORMAT_R; \
+ type CM_IGAM_LUT_FORMAT_G; \
+ type CM_IGAM_LUT_FORMAT_B; \
+ type CM_IGAM_LUT_HOST_EN; \
+ type CM_IGAM_LUT_RW_MODE; \
+ type CM_IGAM_LUT_WRITE_EN_MASK; \
+ type CM_IGAM_LUT_SEL; \
+ type CM_IGAM_LUT_SEQ_COLOR; \
+ type CM_IGAM_DGAM_CONFIG_STATUS; \
+ type CM_DGAM_LUT_WRITE_EN_MASK; \
+ type CM_DGAM_LUT_WRITE_SEL; \
+ type CM_DGAM_LUT_INDEX; \
+ type CM_DGAM_LUT_DATA; \
+ type CM_DGAM_LUT_MODE; \
+ type CM_IGAM_LUT_MODE; \
+ type CM_IGAM_INPUT_FORMAT; \
+ type CM_IGAM_LUT_RW_INDEX; \
+ type CM_BYPASS_EN; \
+ type FORMAT_EXPANSION_MODE; \
+ type CNVC_BYPASS; \
+ type OUTPUT_FP; \
+ type CNVC_SURFACE_PIXEL_FORMAT; \
+ type CURSOR_MODE; \
+ type CURSOR_PITCH; \
+ type CURSOR_LINES_PER_CHUNK; \
+ type CURSOR_ENABLE; \
+ type CUR0_MODE; \
+ type CUR0_EXPANSION_MODE; \
+ type CUR0_ENABLE; \
+ type CM_BYPASS; \
+ type FORMAT_CONTROL__ALPHA_EN; \
+ type CUR0_COLOR0; \
+ type CUR0_COLOR1
+
+
+
+struct dcn_dpp_shift {
+ TF_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn_dpp_mask {
+ TF_REG_FIELD_LIST(uint32_t);
+};
+
+
+
+
+struct dcn_dpp_registers {
+ uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT;
+ uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM;
+ uint32_t OTG_H_BLANK;
+ uint32_t OTG_V_BLANK;
+ uint32_t SCL_MODE;
+ uint32_t LB_DATA_FORMAT;
+ uint32_t LB_MEMORY_CTRL;
+ uint32_t DSCL_AUTOCAL;
+ uint32_t SCL_BLACK_OFFSET;
+ uint32_t DSCL_CONTROL;
+ uint32_t SCL_TAP_CONTROL;
+ uint32_t SCL_COEF_RAM_TAP_SELECT;
+ uint32_t SCL_COEF_RAM_TAP_DATA;
+ uint32_t DSCL_2TAP_CONTROL;
+ uint32_t MPC_SIZE;
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO;
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C;
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO_C;
+ uint32_t SCL_HORZ_FILTER_INIT;
+ uint32_t SCL_HORZ_FILTER_INIT_C;
+ uint32_t SCL_VERT_FILTER_INIT;
+ uint32_t SCL_VERT_FILTER_INIT_BOT;
+ uint32_t SCL_VERT_FILTER_INIT_C;
+ uint32_t SCL_VERT_FILTER_INIT_BOT_C;
+ uint32_t RECOUT_START;
+ uint32_t RECOUT_SIZE;
+ uint32_t CM_GAMUT_REMAP_CONTROL;
+ uint32_t CM_GAMUT_REMAP_C11_C12;
+ uint32_t CM_GAMUT_REMAP_C33_C34;
+ uint32_t CM_COMA_C11_C12;
+ uint32_t CM_COMA_C33_C34;
+ uint32_t CM_COMB_C11_C12;
+ uint32_t CM_COMB_C33_C34;
+ uint32_t CM_OCSC_CONTROL;
+ uint32_t CM_OCSC_C11_C12;
+ uint32_t CM_OCSC_C33_C34;
+ uint32_t CM_MEM_PWR_CTRL;
+ uint32_t CM_RGAM_LUT_DATA;
+ uint32_t CM_RGAM_LUT_WRITE_EN_MASK;
+ uint32_t CM_RGAM_LUT_INDEX;
+ uint32_t CM_RGAM_RAMB_START_CNTL_B;
+ uint32_t CM_RGAM_RAMB_START_CNTL_G;
+ uint32_t CM_RGAM_RAMB_START_CNTL_R;
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B;
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G;
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R;
+ uint32_t CM_RGAM_RAMB_END_CNTL1_B;
+ uint32_t CM_RGAM_RAMB_END_CNTL2_B;
+ uint32_t CM_RGAM_RAMB_END_CNTL1_G;
+ uint32_t CM_RGAM_RAMB_END_CNTL2_G;
+ uint32_t CM_RGAM_RAMB_END_CNTL1_R;
+ uint32_t CM_RGAM_RAMB_END_CNTL2_R;
+ uint32_t CM_RGAM_RAMB_REGION_0_1;
+ uint32_t CM_RGAM_RAMB_REGION_32_33;
+ uint32_t CM_RGAM_RAMA_START_CNTL_B;
+ uint32_t CM_RGAM_RAMA_START_CNTL_G;
+ uint32_t CM_RGAM_RAMA_START_CNTL_R;
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B;
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G;
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R;
+ uint32_t CM_RGAM_RAMA_END_CNTL1_B;
+ uint32_t CM_RGAM_RAMA_END_CNTL2_B;
+ uint32_t CM_RGAM_RAMA_END_CNTL1_G;
+ uint32_t CM_RGAM_RAMA_END_CNTL2_G;
+ uint32_t CM_RGAM_RAMA_END_CNTL1_R;
+ uint32_t CM_RGAM_RAMA_END_CNTL2_R;
+ uint32_t CM_RGAM_RAMA_REGION_0_1;
+ uint32_t CM_RGAM_RAMA_REGION_32_33;
+ uint32_t CM_RGAM_CONTROL;
+ uint32_t CM_CMOUT_CONTROL;
+ uint32_t OBUF_CONTROL;
+ uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK;
+ uint32_t CM_BLNDGAM_CONTROL;
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R;
+ uint32_t CM_BLNDGAM_RAMB_REGION_0_1;
+ uint32_t CM_BLNDGAM_RAMB_REGION_2_3;
+ uint32_t CM_BLNDGAM_RAMB_REGION_4_5;
+ uint32_t CM_BLNDGAM_RAMB_REGION_6_7;
+ uint32_t CM_BLNDGAM_RAMB_REGION_8_9;
+ uint32_t CM_BLNDGAM_RAMB_REGION_10_11;
+ uint32_t CM_BLNDGAM_RAMB_REGION_12_13;
+ uint32_t CM_BLNDGAM_RAMB_REGION_14_15;
+ uint32_t CM_BLNDGAM_RAMB_REGION_16_17;
+ uint32_t CM_BLNDGAM_RAMB_REGION_18_19;
+ uint32_t CM_BLNDGAM_RAMB_REGION_20_21;
+ uint32_t CM_BLNDGAM_RAMB_REGION_22_23;
+ uint32_t CM_BLNDGAM_RAMB_REGION_24_25;
+ uint32_t CM_BLNDGAM_RAMB_REGION_26_27;
+ uint32_t CM_BLNDGAM_RAMB_REGION_28_29;
+ uint32_t CM_BLNDGAM_RAMB_REGION_30_31;
+ uint32_t CM_BLNDGAM_RAMB_REGION_32_33;
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R;
+ uint32_t CM_BLNDGAM_RAMA_REGION_0_1;
+ uint32_t CM_BLNDGAM_RAMA_REGION_2_3;
+ uint32_t CM_BLNDGAM_RAMA_REGION_4_5;
+ uint32_t CM_BLNDGAM_RAMA_REGION_6_7;
+ uint32_t CM_BLNDGAM_RAMA_REGION_8_9;
+ uint32_t CM_BLNDGAM_RAMA_REGION_10_11;
+ uint32_t CM_BLNDGAM_RAMA_REGION_12_13;
+ uint32_t CM_BLNDGAM_RAMA_REGION_14_15;
+ uint32_t CM_BLNDGAM_RAMA_REGION_16_17;
+ uint32_t CM_BLNDGAM_RAMA_REGION_18_19;
+ uint32_t CM_BLNDGAM_RAMA_REGION_20_21;
+ uint32_t CM_BLNDGAM_RAMA_REGION_22_23;
+ uint32_t CM_BLNDGAM_RAMA_REGION_24_25;
+ uint32_t CM_BLNDGAM_RAMA_REGION_26_27;
+ uint32_t CM_BLNDGAM_RAMA_REGION_28_29;
+ uint32_t CM_BLNDGAM_RAMA_REGION_30_31;
+ uint32_t CM_BLNDGAM_RAMA_REGION_32_33;
+ uint32_t CM_BLNDGAM_LUT_INDEX;
+ uint32_t CM_BLNDGAM_LUT_DATA;
+ uint32_t CM_3DLUT_MODE;
+ uint32_t CM_3DLUT_INDEX;
+ uint32_t CM_3DLUT_DATA;
+ uint32_t CM_3DLUT_DATA_30BIT;
+ uint32_t CM_3DLUT_READ_WRITE_CONTROL;
+ uint32_t CM_SHAPER_LUT_WRITE_EN_MASK;
+ uint32_t CM_SHAPER_CONTROL;
+ uint32_t CM_SHAPER_RAMB_START_CNTL_B;
+ uint32_t CM_SHAPER_RAMB_START_CNTL_G;
+ uint32_t CM_SHAPER_RAMB_START_CNTL_R;
+ uint32_t CM_SHAPER_RAMB_END_CNTL_B;
+ uint32_t CM_SHAPER_RAMB_END_CNTL_G;
+ uint32_t CM_SHAPER_RAMB_END_CNTL_R;
+ uint32_t CM_SHAPER_RAMB_REGION_0_1;
+ uint32_t CM_SHAPER_RAMB_REGION_2_3;
+ uint32_t CM_SHAPER_RAMB_REGION_4_5;
+ uint32_t CM_SHAPER_RAMB_REGION_6_7;
+ uint32_t CM_SHAPER_RAMB_REGION_8_9;
+ uint32_t CM_SHAPER_RAMB_REGION_10_11;
+ uint32_t CM_SHAPER_RAMB_REGION_12_13;
+ uint32_t CM_SHAPER_RAMB_REGION_14_15;
+ uint32_t CM_SHAPER_RAMB_REGION_16_17;
+ uint32_t CM_SHAPER_RAMB_REGION_18_19;
+ uint32_t CM_SHAPER_RAMB_REGION_20_21;
+ uint32_t CM_SHAPER_RAMB_REGION_22_23;
+ uint32_t CM_SHAPER_RAMB_REGION_24_25;
+ uint32_t CM_SHAPER_RAMB_REGION_26_27;
+ uint32_t CM_SHAPER_RAMB_REGION_28_29;
+ uint32_t CM_SHAPER_RAMB_REGION_30_31;
+ uint32_t CM_SHAPER_RAMB_REGION_32_33;
+ uint32_t CM_SHAPER_RAMA_START_CNTL_B;
+ uint32_t CM_SHAPER_RAMA_START_CNTL_G;
+ uint32_t CM_SHAPER_RAMA_START_CNTL_R;
+ uint32_t CM_SHAPER_RAMA_END_CNTL_B;
+ uint32_t CM_SHAPER_RAMA_END_CNTL_G;
+ uint32_t CM_SHAPER_RAMA_END_CNTL_R;
+ uint32_t CM_SHAPER_RAMA_REGION_0_1;
+ uint32_t CM_SHAPER_RAMA_REGION_2_3;
+ uint32_t CM_SHAPER_RAMA_REGION_4_5;
+ uint32_t CM_SHAPER_RAMA_REGION_6_7;
+ uint32_t CM_SHAPER_RAMA_REGION_8_9;
+ uint32_t CM_SHAPER_RAMA_REGION_10_11;
+ uint32_t CM_SHAPER_RAMA_REGION_12_13;
+ uint32_t CM_SHAPER_RAMA_REGION_14_15;
+ uint32_t CM_SHAPER_RAMA_REGION_16_17;
+ uint32_t CM_SHAPER_RAMA_REGION_18_19;
+ uint32_t CM_SHAPER_RAMA_REGION_20_21;
+ uint32_t CM_SHAPER_RAMA_REGION_22_23;
+ uint32_t CM_SHAPER_RAMA_REGION_24_25;
+ uint32_t CM_SHAPER_RAMA_REGION_26_27;
+ uint32_t CM_SHAPER_RAMA_REGION_28_29;
+ uint32_t CM_SHAPER_RAMA_REGION_30_31;
+ uint32_t CM_SHAPER_RAMA_REGION_32_33;
+ uint32_t CM_SHAPER_LUT_INDEX;
+ uint32_t CM_SHAPER_LUT_DATA;
+ uint32_t CM_ICSC_CONTROL;
+ uint32_t CM_ICSC_C11_C12;
+ uint32_t CM_ICSC_C33_C34;
+ uint32_t CM_DGAM_RAMB_START_CNTL_B;
+ uint32_t CM_DGAM_RAMB_START_CNTL_G;
+ uint32_t CM_DGAM_RAMB_START_CNTL_R;
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B;
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G;
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R;
+ uint32_t CM_DGAM_RAMB_END_CNTL1_B;
+ uint32_t CM_DGAM_RAMB_END_CNTL2_B;
+ uint32_t CM_DGAM_RAMB_END_CNTL1_G;
+ uint32_t CM_DGAM_RAMB_END_CNTL2_G;
+ uint32_t CM_DGAM_RAMB_END_CNTL1_R;
+ uint32_t CM_DGAM_RAMB_END_CNTL2_R;
+ uint32_t CM_DGAM_RAMB_REGION_0_1;
+ uint32_t CM_DGAM_RAMB_REGION_14_15;
+ uint32_t CM_DGAM_RAMA_START_CNTL_B;
+ uint32_t CM_DGAM_RAMA_START_CNTL_G;
+ uint32_t CM_DGAM_RAMA_START_CNTL_R;
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B;
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G;
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R;
+ uint32_t CM_DGAM_RAMA_END_CNTL1_B;
+ uint32_t CM_DGAM_RAMA_END_CNTL2_B;
+ uint32_t CM_DGAM_RAMA_END_CNTL1_G;
+ uint32_t CM_DGAM_RAMA_END_CNTL2_G;
+ uint32_t CM_DGAM_RAMA_END_CNTL1_R;
+ uint32_t CM_DGAM_RAMA_END_CNTL2_R;
+ uint32_t CM_DGAM_RAMA_REGION_0_1;
+ uint32_t CM_DGAM_RAMA_REGION_14_15;
+ uint32_t CM_DGAM_LUT_WRITE_EN_MASK;
+ uint32_t CM_DGAM_LUT_INDEX;
+ uint32_t CM_DGAM_LUT_DATA;
+ uint32_t CM_CONTROL;
+ uint32_t CM_DGAM_CONTROL;
+ uint32_t CM_IGAM_CONTROL;
+ uint32_t CM_IGAM_LUT_RW_CONTROL;
+ uint32_t CM_IGAM_LUT_RW_INDEX;
+ uint32_t CM_IGAM_LUT_SEQ_COLOR;
+ uint32_t FORMAT_CONTROL;
+ uint32_t CNVC_SURFACE_PIXEL_FORMAT;
+ uint32_t CURSOR_CONTROL;
+ uint32_t CURSOR0_CONTROL;
+ uint32_t CURSOR0_COLOR0;
+ uint32_t CURSOR0_COLOR1;
+};
+
+struct dcn10_dpp {
+ struct dpp base;
+
+ const struct dcn_dpp_registers *tf_regs;
+ const struct dcn_dpp_shift *tf_shift;
+ const struct dcn_dpp_mask *tf_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool is_write_to_ram_a_safe;
+};
+
+enum dcn10_input_csc_select {
+ INPUT_CSC_SELECT_BYPASS = 0,
+ INPUT_CSC_SELECT_ICSC,
+ INPUT_CSC_SELECT_COMA
+};
+
+bool dpp1_dscl_is_lb_conf_valid(
+ int ceil_vratio,
+ int num_partitions,
+ int vtaps);
+
+void dpp1_dscl_calc_lb_num_partitions(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c);
+
+void dpp1_degamma_ram_select(
+ struct dpp *dpp_base,
+ bool use_ram_a);
+
+void dpp1_program_degamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+void dpp1_program_degamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+void dpp1_program_degamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ bool is_ram_a);
+
+void dpp1_power_on_degamma_lut(
+ struct dpp *dpp_base,
+ bool power_on);
+
+void dpp1_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select select);
+
+void dpp1_program_input_lut(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma);
+
+void dpp1_full_bypass(struct dpp *dpp_base);
+
+void dpp1_set_degamma(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode);
+
+void dpp1_set_degamma_pwl(struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+bool dpp_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+void dpp_reset(struct dpp *dpp_base);
+
+void dpp1_cm_program_regamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+
+void dpp1_cm_power_on_regamma_lut(
+ struct dpp *dpp_base,
+ bool power_on);
+
+void dpp1_cm_configure_regamma_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a);
+
+/*program re gamma RAM A*/
+void dpp1_cm_program_regamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+/*program re gamma RAM B*/
+void dpp1_cm_program_regamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+void dpp1_cm_set_output_csc_adjustment(
+ struct dpp *dpp_base,
+ const struct out_csc_color_matrix *tbl_entry);
+
+void dpp1_cm_set_output_csc_default(
+ struct dpp *dpp_base,
+ const struct default_adjustment *default_adjust);
+
+void dpp1_cm_set_gamut_remap(
+ struct dpp *dpp,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp1_dscl_set_scaler_manual_scale(
+ struct dpp *dpp_base,
+ const struct scaler_data *scl_data);
+
+void dpp1_cnv_setup (
+ struct dpp *dpp_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+void dpp1_full_bypass(struct dpp *dpp_base);
+
+void dpp1_construct(struct dcn10_dpp *dpp1,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_dpp_registers *tf_regs,
+ const struct dcn_dpp_shift *tf_shift,
+ const struct dcn_dpp_mask *tf_mask);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
new file mode 100644
index 000000000000..40627c244bf5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -0,0 +1,816 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+#include "basics/conversion.h"
+#include "dcn10_cm_common.h"
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+struct dcn10_input_csc_matrix {
+ enum dc_color_space color_space;
+ uint16_t regval[12];
+};
+
+enum dcn10_coef_filter_type_sel {
+ SCL_COEF_LUMA_VERT_FILTER = 0,
+ SCL_COEF_LUMA_HORZ_FILTER = 1,
+ SCL_COEF_CHROMA_VERT_FILTER = 2,
+ SCL_COEF_CHROMA_HORZ_FILTER = 3,
+ SCL_COEF_ALPHA_VERT_FILTER = 4,
+ SCL_COEF_ALPHA_HORZ_FILTER = 5
+};
+
+enum dscl_autocal_mode {
+ AUTOCAL_MODE_OFF = 0,
+
+ /* Autocal calculate the scaling ratio and initial phase and the
+ * DSCL_MODE_SEL must be set to 1
+ */
+ AUTOCAL_MODE_AUTOSCALE = 1,
+ /* Autocal perform auto centering without replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOCENTER = 2,
+ /* Autocal perform auto centering and auto replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOREPLICATE = 3
+};
+
+enum dscl_mode_sel {
+ DSCL_MODE_SCALING_444_BYPASS = 0,
+ DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DSCL_MODE_DSCL_BYPASS = 6
+};
+
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
+
+static const struct dcn10_input_csc_matrix dcn10_input_csc_matrix[] = {
+ {COLOR_SPACE_SRGB,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_SRGB_LIMITED,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_YCBCR601,
+ {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0, 0x2000, 0x38b4, 0xe3a6} },
+ {COLOR_SPACE_YCBCR601_LIMITED,
+ {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0, 0x2568, 0x40de, 0xdd3a} },
+ {COLOR_SPACE_YCBCR709,
+ {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+ 0x2000, 0x3b61, 0xe24f} },
+
+ {COLOR_SPACE_YCBCR709_LIMITED,
+ {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+ 0x2568, 0x43ee, 0xdbb2} }
+};
+
+
+
+static void program_gamut_remap(
+ struct dcn10_dpp *dpp,
+ const uint16_t *regval,
+ enum gamut_remap_select select)
+{
+ uint16_t selection = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+ switch (select) {
+ case GAMUT_REMAP_COEFF:
+ selection = 1;
+ break;
+ case GAMUT_REMAP_COMA_COEFF:
+ selection = 2;
+ break;
+ case GAMUT_REMAP_COMB_COEFF:
+ selection = 3;
+ break;
+ default:
+ break;
+ }
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+
+ if (select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, selection);
+
+}
+
+void dpp1_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ arr_matrix[0] = adjust->temperature_matrix[0];
+ arr_matrix[1] = adjust->temperature_matrix[1];
+ arr_matrix[2] = adjust->temperature_matrix[2];
+ arr_matrix[3] = dal_fixed31_32_zero;
+
+ arr_matrix[4] = adjust->temperature_matrix[3];
+ arr_matrix[5] = adjust->temperature_matrix[4];
+ arr_matrix[6] = adjust->temperature_matrix[5];
+ arr_matrix[7] = dal_fixed31_32_zero;
+
+ arr_matrix[8] = adjust->temperature_matrix[6];
+ arr_matrix[9] = adjust->temperature_matrix[7];
+ arr_matrix[10] = adjust->temperature_matrix[8];
+ arr_matrix[11] = dal_fixed31_32_zero;
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
+ }
+}
+
+void dpp1_cm_set_output_csc_default(
+ struct dpp *dpp_base,
+ const struct default_adjustment *default_adjust)
+{
+
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ uint32_t ocsc_mode = 0;
+
+ if (default_adjust != NULL) {
+ switch (default_adjust->out_color_space) {
+ case COLOR_SPACE_SRGB:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ ocsc_mode = 0;
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ ocsc_mode = 1;
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ ocsc_mode = 2;
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_2020_YCBCR:
+ ocsc_mode = 3;
+ break;
+ case COLOR_SPACE_UNKNOWN:
+ default:
+ break;
+ }
+ }
+
+ REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
+
+}
+
+static void dpp1_cm_get_reg_field(
+ struct dcn10_dpp *dpp,
+ struct xfer_func_reg *reg)
+{
+ reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B;
+ reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
+ reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
+ reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
+}
+
+static void dpp1_cm_program_color_matrix(
+ struct dcn10_dpp *dpp,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ uint32_t mode;
+ struct color_matrices_reg gam_regs;
+
+ REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
+
+ if (tbl_entry == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
+
+ if (mode == 4) {
+
+ gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ tbl_entry->regval,
+ &gam_regs);
+
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ tbl_entry->regval,
+ &gam_regs);
+ }
+}
+
+void dpp1_cm_set_output_csc_adjustment(
+ struct dpp *dpp_base,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ //enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+ uint32_t ocsc_mode = 4;
+
+ /**
+ *if (tbl_entry != NULL) {
+ * switch (tbl_entry->color_space) {
+ * case COLOR_SPACE_SRGB:
+ * case COLOR_SPACE_2020_RGB_FULLRANGE:
+ * ocsc_mode = 0;
+ * break;
+ * case COLOR_SPACE_SRGB_LIMITED:
+ * case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ * ocsc_mode = 1;
+ * break;
+ * case COLOR_SPACE_YCBCR601:
+ * case COLOR_SPACE_YCBCR601_LIMITED:
+ * ocsc_mode = 2;
+ * break;
+ * case COLOR_SPACE_YCBCR709:
+ * case COLOR_SPACE_YCBCR709_LIMITED:
+ * case COLOR_SPACE_2020_YCBCR:
+ * ocsc_mode = 3;
+ * break;
+ * case COLOR_SPACE_UNKNOWN:
+ * default:
+ * break;
+ * }
+ *}
+ */
+
+ REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
+ dpp1_cm_program_color_matrix(dpp, tbl_entry);
+}
+
+void dpp1_cm_power_on_regamma_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+
+}
+
+void dpp1_cm_program_regamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ for (i = 0 ; i < num; i++) {
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_RGAM_LUT_DATA, 0,
+ CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0,
+ CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0,
+ CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
+
+ }
+
+}
+
+void dpp1_cm_configure_regamma_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
+ CM_RGAM_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
+ CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
+ REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0);
+}
+
+/*program re gamma RAM A*/
+void dpp1_cm_program_regamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+
+}
+
+/*program re gamma RAM B*/
+void dpp1_cm_program_regamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+void dpp1_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select select)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t selection = 1;
+ struct color_matrices_reg gam_regs;
+
+ if (select == INPUT_CSC_SELECT_BYPASS) {
+ REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+ return;
+ }
+
+ for (i = 0; i < arr_size; i++)
+ if (dcn10_input_csc_matrix[i].color_space == color_space) {
+ regval = dcn10_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (select == INPUT_CSC_SELECT_COMA)
+ selection = 2;
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, selection);
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+
+ if (select == INPUT_CSC_SELECT_ICSC) {
+
+ gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+/*program de gamma RAM B*/
+void dpp1_program_degamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15);
+
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+/*program de gamma RAM A*/
+void dpp1_program_degamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+void dpp1_power_on_degamma_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ SHARED_MEM_PWR_DIS, power_on == true ? 0:1);
+
+}
+
+static void dpp1_enable_cm_block(
+ struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8);
+ REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0);
+}
+
+void dpp1_set_degamma(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ dpp1_enable_cm_block(dpp_base);
+
+ switch (mode) {
+ case IPP_DEGAMMA_MODE_BYPASS:
+ /* Setting de gamma bypass for now */
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0);
+ break;
+ case IPP_DEGAMMA_MODE_HW_sRGB:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1);
+ break;
+ case IPP_DEGAMMA_MODE_HW_xvYCC:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+void dpp1_degamma_ram_select(
+ struct dpp *dpp_base,
+ bool use_ram_a)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (use_ram_a)
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ else
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4);
+
+}
+
+static bool dpp1_degamma_ram_inuse(
+ struct dpp *dpp_base,
+ bool *ram_a_inuse)
+{
+ bool ret = false;
+ uint32_t status_reg = 0;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
+ &status_reg);
+
+ if (status_reg == 9) {
+ *ram_a_inuse = true;
+ ret = true;
+ } else if (status_reg == 10) {
+ *ram_a_inuse = false;
+ ret = true;
+ }
+ return ret;
+}
+
+void dpp1_program_degamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ bool is_ram_a)
+{
+ uint32_t i;
+
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0);
+ REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK,
+ CM_DGAM_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL,
+ is_ram_a == true ? 0:1);
+
+ REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0);
+ for (i = 0 ; i < num; i++) {
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg);
+ }
+}
+
+void dpp1_set_degamma_pwl(struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ bool is_ram_a = true;
+
+ dpp1_power_on_degamma_lut(dpp_base, true);
+ dpp1_enable_cm_block(dpp_base);
+ dpp1_degamma_ram_inuse(dpp_base, &is_ram_a);
+ if (is_ram_a == true)
+ dpp1_program_degamma_lutb_settings(dpp_base, params);
+ else
+ dpp1_program_degamma_luta_settings(dpp_base, params);
+
+ dpp1_program_degamma_lut(dpp_base, params->rgb_resulted,
+ params->hw_points_num, !is_ram_a);
+ dpp1_degamma_ram_select(dpp_base, !is_ram_a);
+}
+
+void dpp1_full_bypass(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ /* Input pixel format: ARGB8888 */
+ REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, 0x8);
+
+ /* Zero expansion */
+ REG_SET_3(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_CONTROL__ALPHA_EN, 0,
+ FORMAT_EXPANSION_MODE, 0);
+
+ /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
+ if (dpp->tf_mask->CM_BYPASS_EN)
+ REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
+
+ /* Setting degamma bypass for now */
+ REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
+}
+
+static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base,
+ bool *ram_a_inuse)
+{
+ bool in_use = false;
+ uint32_t status_reg = 0;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
+ &status_reg);
+
+ // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB
+ if (status_reg == 1 || status_reg == 3 || status_reg == 4) {
+ *ram_a_inuse = true;
+ in_use = true;
+ // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB
+ } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) {
+ *ram_a_inuse = false;
+ in_use = true;
+ }
+ return in_use;
+}
+
+/*
+ * Input gamma LUT currently supports 256 values only. This means input color
+ * can have a maximum of 8 bits per channel (= 256 possible values) in order to
+ * have a one-to-one mapping with the LUT. Truncation will occur with color
+ * values greater than 8 bits.
+ *
+ * In the future, this function should support additional input gamma methods,
+ * such as piecewise linear mapping, and input gamma bypass.
+ */
+void dpp1_program_input_lut(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma)
+{
+ int i;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ bool rama_occupied = false;
+ uint32_t ram_num;
+ // Power on LUT memory.
+ REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1);
+ dpp1_enable_cm_block(dpp_base);
+ // Determine whether to use RAM A or RAM B
+ dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied);
+ if (!rama_occupied)
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0);
+ else
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1);
+ // RW mode is 256-entry LUT
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0);
+ // IGAM Input format should be 8 bits per channel.
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0);
+ // Do not mask any R,G,B values
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7);
+ // LUT-256, unsigned, integer, new u0.12 format
+ REG_UPDATE_3(
+ CM_IGAM_CONTROL,
+ CM_IGAM_LUT_FORMAT_R, 3,
+ CM_IGAM_LUT_FORMAT_G, 3,
+ CM_IGAM_LUT_FORMAT_B, 3);
+ // Start at index 0 of IGAM LUT
+ REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
+ for (i = 0; i < gamma->num_entries; i++) {
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.red[i]));
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.green[i]));
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.blue[i]));
+ }
+ // Power off LUT memory
+ REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0);
+ // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
+ REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
new file mode 100644
index 000000000000..cbad36410b32
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+#include "basics/conversion.h"
+
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+enum dcn10_coef_filter_type_sel {
+ SCL_COEF_LUMA_VERT_FILTER = 0,
+ SCL_COEF_LUMA_HORZ_FILTER = 1,
+ SCL_COEF_CHROMA_VERT_FILTER = 2,
+ SCL_COEF_CHROMA_HORZ_FILTER = 3,
+ SCL_COEF_ALPHA_VERT_FILTER = 4,
+ SCL_COEF_ALPHA_HORZ_FILTER = 5
+};
+
+enum dscl_autocal_mode {
+ AUTOCAL_MODE_OFF = 0,
+
+ /* Autocal calculate the scaling ratio and initial phase and the
+ * DSCL_MODE_SEL must be set to 1
+ */
+ AUTOCAL_MODE_AUTOSCALE = 1,
+ /* Autocal perform auto centering without replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOCENTER = 2,
+ /* Autocal perform auto centering and auto replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOREPLICATE = 3
+};
+
+enum dscl_mode_sel {
+ DSCL_MODE_SCALING_444_BYPASS = 0,
+ DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DSCL_MODE_DSCL_BYPASS = 6
+};
+
+static void dpp1_dscl_set_overscan(
+ struct dcn10_dpp *dpp,
+ const struct scaler_data *data)
+{
+ uint32_t left = data->recout.x;
+ uint32_t top = data->recout.y;
+
+ int right = data->h_active - data->recout.x - data->recout.width;
+ int bottom = data->v_active - data->recout.y - data->recout.height;
+
+ if (right < 0) {
+ BREAK_TO_DEBUGGER();
+ right = 0;
+ }
+ if (bottom < 0) {
+ BREAK_TO_DEBUGGER();
+ bottom = 0;
+ }
+
+ REG_SET_2(DSCL_EXT_OVERSCAN_LEFT_RIGHT, 0,
+ EXT_OVERSCAN_LEFT, left,
+ EXT_OVERSCAN_RIGHT, right);
+
+ REG_SET_2(DSCL_EXT_OVERSCAN_TOP_BOTTOM, 0,
+ EXT_OVERSCAN_BOTTOM, bottom,
+ EXT_OVERSCAN_TOP, top);
+}
+
+static void dpp1_dscl_set_otg_blank(
+ struct dcn10_dpp *dpp, const struct scaler_data *data)
+{
+ uint32_t h_blank_start = data->h_active;
+ uint32_t h_blank_end = 0;
+ uint32_t v_blank_start = data->v_active;
+ uint32_t v_blank_end = 0;
+
+ REG_SET_2(OTG_H_BLANK, 0,
+ OTG_H_BLANK_START, h_blank_start,
+ OTG_H_BLANK_END, h_blank_end);
+
+ REG_SET_2(OTG_V_BLANK, 0,
+ OTG_V_BLANK_START, v_blank_start,
+ OTG_V_BLANK_END, v_blank_end);
+}
+
+static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
+{
+ if (depth == LB_PIXEL_DEPTH_30BPP)
+ return 0; /* 10 bpc */
+ else if (depth == LB_PIXEL_DEPTH_24BPP)
+ return 1; /* 8 bpc */
+ else if (depth == LB_PIXEL_DEPTH_18BPP)
+ return 2; /* 6 bpc */
+ else if (depth == LB_PIXEL_DEPTH_36BPP)
+ return 3; /* 12 bpc */
+ else {
+ ASSERT(0);
+ return -1; /* Unsupported */
+ }
+}
+
+static bool dpp1_dscl_is_video_format(enum pixel_format format)
+{
+ if (format >= PIXEL_FORMAT_VIDEO_BEGIN
+ && format <= PIXEL_FORMAT_VIDEO_END)
+ return true;
+ else
+ return false;
+}
+
+static bool dpp1_dscl_is_420_format(enum pixel_format format)
+{
+ if (format == PIXEL_FORMAT_420BPP8 ||
+ format == PIXEL_FORMAT_420BPP10)
+ return true;
+ else
+ return false;
+}
+
+static enum dscl_mode_sel dpp1_dscl_get_dscl_mode(
+ struct dpp *dpp_base,
+ const struct scaler_data *data,
+ bool dbg_always_scale)
+{
+ const long long one = dal_fixed31_32_one.value;
+
+ if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
+ /* DSCL is processing data in fixed format */
+ if (data->format == PIXEL_FORMAT_FP16)
+ return DSCL_MODE_DSCL_BYPASS;
+ }
+
+ if (data->ratios.horz.value == one
+ && data->ratios.vert.value == one
+ && data->ratios.horz_c.value == one
+ && data->ratios.vert_c.value == one
+ && !dbg_always_scale)
+ return DSCL_MODE_SCALING_444_BYPASS;
+
+ if (!dpp1_dscl_is_420_format(data->format)) {
+ if (dpp1_dscl_is_video_format(data->format))
+ return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
+ else
+ return DSCL_MODE_SCALING_444_RGB_ENABLE;
+ }
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ return DSCL_MODE_SCALING_420_LUMA_BYPASS;
+ if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
+ return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
+
+ return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
+}
+
+static void dpp1_dscl_set_lb(
+ struct dcn10_dpp *dpp,
+ const struct line_buffer_params *lb_params,
+ enum lb_memory_config mem_size_config)
+{
+ /* LB */
+ if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
+ /* DSCL caps: pixel data processed in fixed format */
+ uint32_t pixel_depth = dpp1_dscl_get_pixel_depth_val(lb_params->depth);
+ uint32_t dyn_pix_depth = lb_params->dynamic_pixel_depth;
+
+ REG_SET_7(LB_DATA_FORMAT, 0,
+ PIXEL_DEPTH, pixel_depth, /* Pixel depth stored in LB */
+ PIXEL_EXPAN_MODE, lb_params->pixel_expan_mode, /* Pixel expansion mode */
+ PIXEL_REDUCE_MODE, 1, /* Pixel reduction mode: Rounding */
+ DYNAMIC_PIXEL_DEPTH, dyn_pix_depth, /* Dynamic expansion pixel depth */
+ DITHER_EN, 0, /* Dithering enable: Disabled */
+ INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
+ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
+ }
+
+ REG_SET_2(LB_MEMORY_CTRL, 0,
+ MEMORY_CONFIG, mem_size_config,
+ LB_MAX_PARTITIONS, 63);
+}
+
+static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 8)
+ return get_filter_8tap_64p(ratio);
+ else if (taps == 7)
+ return get_filter_7tap_64p(ratio);
+ else if (taps == 6)
+ return get_filter_6tap_64p(ratio);
+ else if (taps == 5)
+ return get_filter_5tap_64p(ratio);
+ else if (taps == 4)
+ return get_filter_4tap_64p(ratio);
+ else if (taps == 3)
+ return get_filter_3tap_64p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_64p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static void dpp1_dscl_set_scaler_filter(
+ struct dcn10_dpp *dpp,
+ uint32_t taps,
+ enum dcn10_coef_filter_type_sel filter_type,
+ const uint16_t *filter)
+{
+ const int tap_pairs = (taps + 1) / 2;
+ int phase;
+ int pair;
+ uint16_t odd_coef, even_coef;
+
+ REG_SET_3(SCL_COEF_RAM_TAP_SELECT, 0,
+ SCL_COEF_RAM_TAP_PAIR_IDX, 0,
+ SCL_COEF_RAM_PHASE, 0,
+ SCL_COEF_RAM_FILTER_TYPE, filter_type);
+
+ for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
+ for (pair = 0; pair < tap_pairs; pair++) {
+ even_coef = filter[phase * taps + 2 * pair];
+ if ((pair * 2 + 1) < taps)
+ odd_coef = filter[phase * taps + 2 * pair + 1];
+ else
+ odd_coef = 0;
+
+ REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
+ /* Even tap coefficient (bits 1:0 fixed to 0) */
+ SCL_COEF_RAM_EVEN_TAP_COEF, even_coef,
+ /* Write/read control for even coefficient */
+ SCL_COEF_RAM_EVEN_TAP_COEF_EN, 1,
+ /* Odd tap coefficient (bits 1:0 fixed to 0) */
+ SCL_COEF_RAM_ODD_TAP_COEF, odd_coef,
+ /* Write/read control for odd coefficient */
+ SCL_COEF_RAM_ODD_TAP_COEF_EN, 1);
+ }
+ }
+
+}
+
+static void dpp1_dscl_set_scl_filter(
+ struct dcn10_dpp *dpp,
+ const struct scaler_data *scl_data,
+ bool chroma_coef_mode)
+{
+ bool h_2tap_hardcode_coef_en = false;
+ bool v_2tap_hardcode_coef_en = false;
+ bool h_2tap_sharp_en = false;
+ bool v_2tap_sharp_en = false;
+ uint32_t h_2tap_sharp_factor = scl_data->sharpness.horz;
+ uint32_t v_2tap_sharp_factor = scl_data->sharpness.vert;
+ bool coef_ram_current;
+ const uint16_t *filter_h = NULL;
+ const uint16_t *filter_v = NULL;
+ const uint16_t *filter_h_c = NULL;
+ const uint16_t *filter_v_c = NULL;
+
+ h_2tap_hardcode_coef_en = scl_data->taps.h_taps < 3
+ && scl_data->taps.h_taps_c < 3
+ && (scl_data->taps.h_taps > 1 && scl_data->taps.h_taps_c > 1);
+ v_2tap_hardcode_coef_en = scl_data->taps.v_taps < 3
+ && scl_data->taps.v_taps_c < 3
+ && (scl_data->taps.v_taps > 1 && scl_data->taps.v_taps_c > 1);
+
+ h_2tap_sharp_en = h_2tap_hardcode_coef_en && h_2tap_sharp_factor != 0;
+ v_2tap_sharp_en = v_2tap_hardcode_coef_en && v_2tap_sharp_factor != 0;
+
+ REG_UPDATE_6(DSCL_2TAP_CONTROL,
+ SCL_H_2TAP_HARDCODE_COEF_EN, h_2tap_hardcode_coef_en,
+ SCL_H_2TAP_SHARP_EN, h_2tap_sharp_en,
+ SCL_H_2TAP_SHARP_FACTOR, h_2tap_sharp_factor,
+ SCL_V_2TAP_HARDCODE_COEF_EN, v_2tap_hardcode_coef_en,
+ SCL_V_2TAP_SHARP_EN, v_2tap_sharp_en,
+ SCL_V_2TAP_SHARP_FACTOR, v_2tap_sharp_factor);
+
+ if (!v_2tap_hardcode_coef_en || !h_2tap_hardcode_coef_en) {
+ bool filter_updated = false;
+
+ filter_h = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.h_taps, scl_data->ratios.horz);
+ filter_v = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.v_taps, scl_data->ratios.vert);
+
+ filter_updated = (filter_h && (filter_h != dpp->filter_h))
+ || (filter_v && (filter_v != dpp->filter_v));
+
+ if (chroma_coef_mode) {
+ filter_h_c = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.h_taps_c, scl_data->ratios.horz_c);
+ filter_v_c = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.v_taps_c, scl_data->ratios.vert_c);
+ filter_updated = filter_updated || (filter_h_c && (filter_h_c != dpp->filter_h_c))
+ || (filter_v_c && (filter_v_c != dpp->filter_v_c));
+ }
+
+ if (filter_updated) {
+ uint32_t scl_mode = REG_READ(SCL_MODE);
+
+ if (!h_2tap_hardcode_coef_en && filter_h) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.h_taps,
+ SCL_COEF_LUMA_HORZ_FILTER, filter_h);
+ }
+ dpp->filter_h = filter_h;
+ if (!v_2tap_hardcode_coef_en && filter_v) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.v_taps,
+ SCL_COEF_LUMA_VERT_FILTER, filter_v);
+ }
+ dpp->filter_v = filter_v;
+ if (chroma_coef_mode) {
+ if (!h_2tap_hardcode_coef_en && filter_h_c) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.h_taps_c,
+ SCL_COEF_CHROMA_HORZ_FILTER, filter_h_c);
+ }
+ if (!v_2tap_hardcode_coef_en && filter_v_c) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.v_taps_c,
+ SCL_COEF_CHROMA_VERT_FILTER, filter_v_c);
+ }
+ }
+ dpp->filter_h_c = filter_h_c;
+ dpp->filter_v_c = filter_v_c;
+
+ coef_ram_current = get_reg_field_value_ex(
+ scl_mode, dpp->tf_mask->SCL_COEF_RAM_SELECT_CURRENT,
+ dpp->tf_shift->SCL_COEF_RAM_SELECT_CURRENT);
+
+ /* Swap coefficient RAM and set chroma coefficient mode */
+ REG_SET_2(SCL_MODE, scl_mode,
+ SCL_COEF_RAM_SELECT, !coef_ram_current,
+ SCL_CHROMA_COEF_MODE, chroma_coef_mode);
+ }
+ }
+}
+
+static int dpp1_dscl_get_lb_depth_bpc(enum lb_pixel_depth depth)
+{
+ if (depth == LB_PIXEL_DEPTH_30BPP)
+ return 10;
+ else if (depth == LB_PIXEL_DEPTH_24BPP)
+ return 8;
+ else if (depth == LB_PIXEL_DEPTH_18BPP)
+ return 6;
+ else if (depth == LB_PIXEL_DEPTH_36BPP)
+ return 12;
+ else {
+ BREAK_TO_DEBUGGER();
+ return -1; /* Unsupported */
+ }
+}
+
+void dpp1_dscl_calc_lb_num_partitions(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c)
+{
+ int line_size = scl_data->viewport.width < scl_data->recout.width ?
+ scl_data->viewport.width : scl_data->recout.width;
+ int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
+ scl_data->viewport_c.width : scl_data->recout.width;
+ int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
+ int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
+ int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
+ int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
+ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ if (lb_config == LB_MEMORY_CONFIG_1) {
+ lb_memory_size = 816;
+ lb_memory_size_c = 816;
+ lb_memory_size_a = 984;
+ } else if (lb_config == LB_MEMORY_CONFIG_2) {
+ lb_memory_size = 1088;
+ lb_memory_size_c = 1088;
+ lb_memory_size_a = 1312;
+ } else if (lb_config == LB_MEMORY_CONFIG_3) {
+ /* 420 mode: using 3rd mem from Y, Cr and Cb */
+ lb_memory_size = 816 + 1088 + 848 + 848 + 848;
+ lb_memory_size_c = 816 + 1088;
+ lb_memory_size_a = 984 + 1312 + 456;
+ } else {
+ lb_memory_size = 816 + 1088 + 848;
+ lb_memory_size_c = 816 + 1088 + 848;
+ lb_memory_size_a = 984 + 1312 + 456;
+ }
+ *num_part_y = lb_memory_size / memory_line_size_y;
+ *num_part_c = lb_memory_size_c / memory_line_size_c;
+ num_partitions_a = lb_memory_size_a / memory_line_size_a;
+
+ if (scl_data->lb_params.alpha_en
+ && (num_partitions_a < *num_part_y))
+ *num_part_y = num_partitions_a;
+
+ if (*num_part_y > 64)
+ *num_part_y = 64;
+ if (*num_part_c > 64)
+ *num_part_c = 64;
+
+}
+
+bool dpp1_dscl_is_lb_conf_valid(int ceil_vratio, int num_partitions, int vtaps)
+{
+ if (ceil_vratio > 2)
+ return vtaps <= (num_partitions - ceil_vratio + 2);
+ else
+ return vtaps <= num_partitions;
+}
+
+/*find first match configuration which meets the min required lb size*/
+static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *dpp,
+ const struct scaler_data *scl_data)
+{
+ int num_part_y, num_part_c;
+ int vtaps = scl_data->taps.v_taps;
+ int vtaps_c = scl_data->taps.v_taps_c;
+ int ceil_vratio = dal_fixed31_32_ceil(scl_data->ratios.vert);
+ int ceil_vratio_c = dal_fixed31_32_ceil(scl_data->ratios.vert_c);
+ enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
+
+ if (dpp->base.ctx->dc->debug.use_max_lb)
+ return mem_cfg;
+
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);
+
+ if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
+ return LB_MEMORY_CONFIG_1;
+
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_2, &num_part_y, &num_part_c);
+
+ if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
+ return LB_MEMORY_CONFIG_2;
+
+ if (scl_data->format == PIXEL_FORMAT_420BPP8
+ || scl_data->format == PIXEL_FORMAT_420BPP10) {
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_3, &num_part_y, &num_part_c);
+
+ if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
+ return LB_MEMORY_CONFIG_3;
+ }
+
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_0, &num_part_y, &num_part_c);
+
+ /*Ensure we can support the requested number of vtaps*/
+ ASSERT(dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c));
+
+ return LB_MEMORY_CONFIG_0;
+}
+
+void dpp1_dscl_set_scaler_auto_scale(
+ struct dpp *dpp_base,
+ const struct scaler_data *scl_data)
+{
+ enum lb_memory_config lb_config;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode(
+ dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
+ bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
+ && scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+
+ dpp1_dscl_set_overscan(dpp, scl_data);
+
+ dpp1_dscl_set_otg_blank(dpp, scl_data);
+
+ REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
+
+ if (dscl_mode == DSCL_MODE_DSCL_BYPASS)
+ return;
+
+ lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data);
+ dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
+
+ if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
+ return;
+
+ /* TODO: v_min */
+ REG_SET_3(DSCL_AUTOCAL, 0,
+ AUTOCAL_MODE, AUTOCAL_MODE_AUTOSCALE,
+ AUTOCAL_NUM_PIPE, 0,
+ AUTOCAL_PIPE_ID, 0);
+
+ /* Black offsets */
+ if (ycbcr)
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
+ else
+
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
+
+ REG_SET_4(SCL_TAP_CONTROL, 0,
+ SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1,
+ SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1,
+ SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1,
+ SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
+
+ dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+}
+
+
+static void dpp1_dscl_set_manual_ratio_init(
+ struct dcn10_dpp *dpp, const struct scaler_data *data)
+{
+ uint32_t init_frac = 0;
+ uint32_t init_int = 0;
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+ SCL_H_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.horz) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+ SCL_V_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.vert) << 5);
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
+ SCL_H_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.horz_c) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
+ SCL_V_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.vert_c) << 5);
+
+ /*
+ * 0.24 format for fraction, first five bits zeroed
+ */
+ init_frac = dal_fixed31_32_u0d19(data->inits.h) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.h);
+ REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
+ SCL_H_INIT_FRAC, init_frac,
+ SCL_H_INIT_INT, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.h_c) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.h_c);
+ REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0,
+ SCL_H_INIT_FRAC_C, init_frac,
+ SCL_H_INIT_INT_C, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v);
+ REG_SET_2(SCL_VERT_FILTER_INIT, 0,
+ SCL_V_INIT_FRAC, init_frac,
+ SCL_V_INIT_INT, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v_bot) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v_bot);
+ REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
+ SCL_V_INIT_FRAC_BOT, init_frac,
+ SCL_V_INIT_INT_BOT, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v_c) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v_c);
+ REG_SET_2(SCL_VERT_FILTER_INIT_C, 0,
+ SCL_V_INIT_FRAC_C, init_frac,
+ SCL_V_INIT_INT_C, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v_c_bot) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v_c_bot);
+ REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
+ SCL_V_INIT_FRAC_BOT_C, init_frac,
+ SCL_V_INIT_INT_BOT_C, init_int);
+}
+
+
+
+static void dpp1_dscl_set_recout(
+ struct dcn10_dpp *dpp, const struct rect *recout)
+{
+ REG_SET_2(RECOUT_START, 0,
+ /* First pixel of RECOUT */
+ RECOUT_START_X, recout->x,
+ /* First line of RECOUT */
+ RECOUT_START_Y, recout->y);
+
+ REG_SET_2(RECOUT_SIZE, 0,
+ /* Number of RECOUT horizontal pixels */
+ RECOUT_WIDTH, recout->width,
+ /* Number of RECOUT vertical lines */
+ RECOUT_HEIGHT, recout->height
+ - dpp->base.ctx->dc->debug.surface_visual_confirm * 4 *
+ (dpp->base.inst + 1));
+}
+
+/* Main function to program scaler and line buffer in manual scaling mode */
+void dpp1_dscl_set_scaler_manual_scale(
+ struct dpp *dpp_base,
+ const struct scaler_data *scl_data)
+{
+ enum lb_memory_config lb_config;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode(
+ dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
+ bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
+ && scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+
+ /* Recout */
+ dpp1_dscl_set_recout(dpp, &scl_data->recout);
+
+ /* MPC Size */
+ REG_SET_2(MPC_SIZE, 0,
+ /* Number of horizontal pixels of MPC */
+ MPC_WIDTH, scl_data->h_active,
+ /* Number of vertical lines of MPC */
+ MPC_HEIGHT, scl_data->v_active);
+
+ /* SCL mode */
+ REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
+
+ if (dscl_mode == DSCL_MODE_DSCL_BYPASS)
+ return;
+
+ /* LB */
+ lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data);
+ dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
+
+ if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
+ return;
+
+ /* Autocal off */
+ REG_SET_3(DSCL_AUTOCAL, 0,
+ AUTOCAL_MODE, AUTOCAL_MODE_OFF,
+ AUTOCAL_NUM_PIPE, 0,
+ AUTOCAL_PIPE_ID, 0);
+
+ /* Black offsets */
+ if (ycbcr)
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
+ else
+
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
+
+ /* Manually calculate scale ratio and init values */
+ dpp1_dscl_set_manual_ratio_init(dpp, scl_data);
+
+ /* HTaps/VTaps */
+ REG_SET_4(SCL_TAP_CONTROL, 0,
+ SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1,
+ SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1,
+ SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1,
+ SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
+
+ dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
new file mode 100644
index 000000000000..b13dee64e0ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -0,0 +1,960 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+#include "dcn10_hubp.h"
+
+#define REG(reg)\
+ hubp1->mi_regs->reg
+
+#define CTX \
+ hubp1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp1->mi_shift->field_name, hubp1->mi_mask->field_name
+
+void hubp1_set_blank(struct hubp *hubp, bool blank)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t blank_en = blank ? 1 : 0;
+
+ REG_UPDATE_2(DCHUBP_CNTL,
+ HUBP_BLANK_EN, blank_en,
+ HUBP_TTU_DISABLE, blank_en);
+
+ if (blank) {
+ REG_WAIT(DCHUBP_CNTL,
+ HUBP_NO_OUTSTANDING_REQ, 1,
+ 1, 200);
+ hubp->mpcc_id = 0xf;
+ hubp->opp_id = 0xf;
+ }
+}
+
+static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t blank_en = blank ? 1 : 0;
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, blank_en);
+}
+
+static void hubp1_vready_workaround(struct hubp *hubp,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ uint32_t value = 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ /* set HBUBREQ_DEBUG_DB[12] = 1 */
+ value = REG_READ(HUBPREQ_DEBUG_DB);
+
+ /* hack mode disable */
+ value |= 0x100;
+ value &= ~0x1000;
+
+ if ((pipe_dest->vstartup_start - 2*(pipe_dest->vready_offset+pipe_dest->vupdate_width
+ + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+ /* if (eco_fix_needed(otg_global_sync_timing)
+ * set HBUBREQ_DEBUG_DB[12] = 1 */
+ value |= 0x1000;
+ }
+
+ REG_WRITE(HUBPREQ_DEBUG_DB, value);
+}
+
+void hubp1_program_tiling(
+ struct dcn10_hubp *hubp1,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format)
+{
+ REG_UPDATE_6(DCSURF_ADDR_CONFIG,
+ NUM_PIPES, log_2(info->gfx9.num_pipes),
+ NUM_BANKS, log_2(info->gfx9.num_banks),
+ PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
+ NUM_SE, log_2(info->gfx9.num_shader_engines),
+ NUM_RB_PER_SE, log_2(info->gfx9.num_rb_per_se),
+ MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags));
+
+ REG_UPDATE_4(DCSURF_TILING_CONFIG,
+ SW_MODE, info->gfx9.swizzle,
+ META_LINEAR, info->gfx9.meta_linear,
+ RB_ALIGNED, info->gfx9.rb_aligned,
+ PIPE_ALIGNED, info->gfx9.pipe_aligned);
+}
+
+void hubp1_program_size_and_rotation(
+ struct dcn10_hubp *hubp1,
+ enum dc_rotation_angle rotation,
+ enum surface_pixel_format format,
+ const union plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
+
+ /* Program data and meta surface pitch (calculation from addrlib)
+ * 444 or 420 luma
+ */
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ pitch = plane_size->video.luma_pitch - 1;
+ meta_pitch = dcc->video.meta_pitch_l - 1;
+ pitch_c = plane_size->video.chroma_pitch - 1;
+ meta_pitch_c = dcc->video.meta_pitch_c - 1;
+ } else {
+ pitch = plane_size->grph.surface_pitch - 1;
+ meta_pitch = dcc->grph.meta_pitch - 1;
+ pitch_c = 0;
+ meta_pitch_c = 0;
+ }
+
+ if (!dcc->enable) {
+ meta_pitch = 0;
+ meta_pitch_c = 0;
+ }
+
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH,
+ PITCH, pitch, META_PITCH, meta_pitch);
+
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+
+ if (horizontal_mirror)
+ mirror = 1;
+ else
+ mirror = 0;
+
+
+ /* Program rotation angle and horz mirror - no mirror */
+ if (rotation == ROTATION_ANGLE_0)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 0,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_90)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 1,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_180)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 2,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_270)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 3,
+ H_MIRROR_EN, mirror);
+}
+
+void hubp1_program_pixel_format(
+ struct dcn10_hubp *hubp1,
+ enum surface_pixel_format format)
+{
+ uint32_t red_bar = 3;
+ uint32_t blue_bar = 2;
+
+ /* swap for ABGR format */
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ red_bar = 2;
+ blue_bar = 3;
+ }
+
+ REG_UPDATE_2(HUBPRET_CONTROL,
+ CROSSBAR_SRC_CB_B, blue_bar,
+ CROSSBAR_SRC_CR_R, red_bar);
+
+ /* Mapping is same as ipp programming (cnvc) */
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 1);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 3);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 8);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 10);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 22);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 24);
+ break;
+
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 65);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 64);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 67);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 66);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ /* don't see the need of program the xbar in DCN 1.0 */
+}
+
+bool hubp1_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ /* program flip type */
+ REG_SET(DCSURF_FLIP_CONTROL, 0,
+ SURFACE_FLIP_TYPE, flip_immediate);
+
+ /* HW automatically latch rest of address register on write to
+ * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
+ *
+ * program high first and then the low addr, order matters!
+ */
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ /* DCN1.0 does not support const color
+ * TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1
+ * base on address->grph.dcc_const_color
+ * x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma
+ * x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma
+ */
+
+ if (address->grph.addr.quad_part == 0)
+ break;
+
+ REG_UPDATE(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->grph.meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph.meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph.meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph.addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph.addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0
+ || address->video_progressive.chroma_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->video_progressive.luma_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->video_progressive.luma_addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0)
+ break;
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_meta_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+ SECONDARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.right_meta_addr.low_part);
+ }
+ if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.left_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+ SECONDARY_SURFACE_ADDRESS,
+ address->grph_stereo.right_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph_stereo.left_addr.low_part);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ hubp->request_address = *address;
+
+ if (flip_immediate)
+ hubp->current_address = *address;
+
+ return true;
+}
+
+void hubp1_dcc_control(struct hubp *hubp, bool enable,
+ bool independent_64b_blks)
+{
+ uint32_t dcc_en = enable ? 1 : 0;
+ uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc_en,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
+}
+
+void hubp1_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
+ hubp1_program_tiling(hubp1, tiling_info, format);
+ hubp1_program_size_and_rotation(
+ hubp1, rotation, format, plane_size, dcc, horizontal_mirror);
+ hubp1_program_pixel_format(hubp1, format);
+}
+
+void hubp1_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
+}
+
+
+void hubp1_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ /* DLG - Per hubp */
+ REG_SET_2(BLANK_OFFSET_0, 0,
+ REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, dlg_attr->dlg_vblank_end);
+
+ REG_SET(BLANK_OFFSET_1, 0,
+ MIN_DST_Y_NEXT_START, dlg_attr->min_dst_y_next_start);
+
+ REG_SET(DST_DIMENSIONS, 0,
+ REFCYC_PER_HTOTAL, dlg_attr->refcyc_per_htotal);
+
+ REG_SET_2(DST_AFTER_SCALER, 0,
+ REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
+
+ if (REG(PREFETCH_SETTINS))
+ REG_SET_2(PREFETCH_SETTINS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+ else
+ REG_SET_2(PREFETCH_SETTINGS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+
+ REG_SET_2(VBLANK_PARAMETERS_0, 0,
+ DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+
+ REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
+ REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_SET(VBLANK_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ REG_SET(VBLANK_PARAMETERS_3, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ REG_SET(NOM_PARAMETERS_0, 0,
+ DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
+
+ REG_SET(NOM_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_SET(NOM_PARAMETERS_4, 0,
+ DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_SET(NOM_PARAMETERS_5, 0,
+ REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_SET_2(PER_LINE_DELIVERY, 0,
+ REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
+
+ if (REG(PREFETCH_SETTINS_C))
+ REG_SET(PREFETCH_SETTINS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+ else
+ REG_SET(PREFETCH_SETTINGS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+
+ REG_SET(VBLANK_PARAMETERS_2, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ REG_SET(VBLANK_PARAMETERS_4, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ REG_SET(NOM_PARAMETERS_2, 0,
+ DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
+
+ REG_SET(NOM_PARAMETERS_3, 0,
+ REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_SET(NOM_PARAMETERS_6, 0,
+ DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_SET(NOM_PARAMETERS_7, 0,
+ REFCYC_PER_META_CHUNK_NOM_C, dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_SET_2(DCN_TTU_QOS_WM, 0,
+ QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
+
+ REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
+ MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_SET_3(DCN_SURF0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
+
+ REG_SET(DCN_SURF0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_l);
+
+ REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
+
+ REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_c);
+}
+
+static void hubp1_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ /* otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
+ hubp1_program_requestor(hubp, rq_regs);
+ hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
+ hubp1_vready_workaround(hubp, pipe_dest);
+}
+
+bool hubp1_is_flip_pending(struct hubp *hubp)
+{
+ uint32_t flip_pending = 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct dc_plane_address earliest_inuse_address;
+
+ REG_GET(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_PENDING, &flip_pending);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
+ SURFACE_EARLIEST_INUSE_ADDRESS, &earliest_inuse_address.grph.addr.low_part);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &earliest_inuse_address.grph.addr.high_part);
+
+ if (flip_pending)
+ return true;
+
+ if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ return true;
+
+ hubp->current_address = hubp->request_address;
+ return false;
+}
+
+uint32_t aperture_default_system = 1;
+uint32_t context0_default_system; /* = 0;*/
+
+static void hubp1_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
+
+ mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
+ mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 12;
+ mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 12;
+
+ REG_SET_2(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 0,
+ MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, aperture_default_system, /* 1 = system physical memory */
+ MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mc_vm_apt_default.high_part);
+ REG_SET(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 0,
+ MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mc_vm_apt_default.low_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, mc_vm_apt_low.high_part);
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mc_vm_apt_low.low_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mc_vm_apt_high.high_part);
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mc_vm_apt_high.low_part);
+}
+
+static void hubp1_set_vm_context0_settings(struct hubp *hubp,
+ const struct vm_context0_param *vm0)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ /* pte base */
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, vm0->pte_base.high_part);
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, vm0->pte_base.low_part);
+
+ /* pte start */
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, vm0->pte_start.high_part);
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, vm0->pte_start.low_part);
+
+ /* pte end */
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, vm0->pte_end.high_part);
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, vm0->pte_end.low_part);
+
+ /* fault handling */
+ REG_SET_2(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0,
+ VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, vm0->fault_default.high_part,
+ VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM, context0_default_system);
+ REG_SET(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0,
+ VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, vm0->fault_default.low_part);
+
+ /* control: enable VM PTE*/
+ REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
+ ENABLE_L1_TLB, 1,
+ SYSTEM_ACCESS_MODE, 3);
+}
+
+void min_set_viewport(
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
+ PRI_VIEWPORT_WIDTH, viewport->width,
+ PRI_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0,
+ PRI_VIEWPORT_X_START, viewport->x,
+ PRI_VIEWPORT_Y_START, viewport->y);
+
+ /*for stereo*/
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0,
+ SEC_VIEWPORT_WIDTH, viewport->width,
+ SEC_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0,
+ SEC_VIEWPORT_X_START, viewport->x,
+ SEC_VIEWPORT_Y_START, viewport->y);
+
+ /* DC supports NV12 only at the moment */
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
+ PRI_VIEWPORT_WIDTH_C, viewport_c->width,
+ PRI_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
+ PRI_VIEWPORT_X_START_C, viewport_c->x,
+ PRI_VIEWPORT_Y_START_C, viewport_c->y);
+}
+
+void hubp1_read_state(struct dcn10_hubp *hubp1,
+ struct dcn_hubp_state *s)
+{
+ REG_GET(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &s->pixel_format);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi);
+
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION,
+ PRI_VIEWPORT_WIDTH, &s->viewport_width,
+ PRI_VIEWPORT_HEIGHT, &s->viewport_height);
+
+ REG_GET_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, &s->rotation_angle,
+ H_MIRROR_EN, &s->h_mirror_en);
+
+ REG_GET(DCSURF_TILING_CONFIG,
+ SW_MODE, &s->sw_mode);
+
+ REG_GET(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, &s->dcc_en);
+
+ REG_GET_3(DCHUBP_CNTL,
+ HUBP_BLANK_EN, &s->blank_en,
+ HUBP_TTU_DISABLE, &s->ttu_disable,
+ HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+
+ REG_GET(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &s->min_ttu_vblank);
+
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
+}
+
+enum cursor_pitch {
+ CURSOR_PITCH_64_PIXELS = 0,
+ CURSOR_PITCH_128_PIXELS,
+ CURSOR_PITCH_256_PIXELS
+};
+
+enum cursor_lines_per_chunk {
+ CURSOR_LINE_PER_CHUNK_2 = 1,
+ CURSOR_LINE_PER_CHUNK_4,
+ CURSOR_LINE_PER_CHUNK_8,
+ CURSOR_LINE_PER_CHUNK_16
+};
+
+static bool ippn10_cursor_program_control(
+ struct dcn10_hubp *hubp1,
+ bool pixel_data_invert,
+ enum dc_cursor_color_format color_format)
+{
+ if (REG(CURSOR_SETTINS))
+ REG_SET_2(CURSOR_SETTINS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+ else
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ return true;
+}
+
+static enum cursor_pitch ippn10_get_cursor_pitch(
+ unsigned int pitch)
+{
+ enum cursor_pitch hw_pitch;
+
+ switch (pitch) {
+ case 64:
+ hw_pitch = CURSOR_PITCH_64_PIXELS;
+ break;
+ case 128:
+ hw_pitch = CURSOR_PITCH_128_PIXELS;
+ break;
+ case 256:
+ hw_pitch = CURSOR_PITCH_256_PIXELS;
+ break;
+ default:
+ DC_ERR("Invalid cursor pitch of %d. "
+ "Only 64/128/256 is supported on DCN.\n", pitch);
+ hw_pitch = CURSOR_PITCH_64_PIXELS;
+ break;
+ }
+ return hw_pitch;
+}
+
+static enum cursor_lines_per_chunk ippn10_get_lines_per_chunk(
+ unsigned int cur_width,
+ enum dc_cursor_color_format format)
+{
+ enum cursor_lines_per_chunk line_per_chunk;
+
+ if (format == CURSOR_MODE_MONO)
+ /* impl B. expansion in CUR Buffer reader */
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+ else if (cur_width <= 32)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+ else if (cur_width <= 64)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_8;
+ else if (cur_width <= 128)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_4;
+ else
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_2;
+
+ return line_per_chunk;
+}
+
+void hubp1_cursor_set_attributes(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ enum cursor_pitch hw_pitch = ippn10_get_cursor_pitch(attr->pitch);
+ enum cursor_lines_per_chunk lpc = ippn10_get_lines_per_chunk(
+ attr->width, attr->color_format);
+
+ hubp->curs_attr = *attr;
+
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+ REG_UPDATE_3(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+ ippn10_cursor_program_control(hubp1,
+ attr->attribute_flags.bits.INVERT_PIXEL_DATA,
+ attr->color_format);
+}
+
+void hubp1_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+ uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
+
+ /*
+ * Guard aganst cursor_set_position() from being called with invalid
+ * attributes
+ *
+ * TODO: Look at combining cursor_set_position() and
+ * cursor_set_attributes() into cursor_update()
+ */
+ if (hubp->curs_attr.address.quad_part == 0)
+ return;
+
+ dst_x_offset *= param->ref_clk_khz;
+ dst_x_offset /= param->pixel_clk_khz;
+
+ ASSERT(param->h_scale_ratio.value);
+
+ if (param->h_scale_ratio.value)
+ dst_x_offset = dal_fixed31_32_floor(dal_fixed31_32_div(
+ dal_fixed31_32_from_int(dst_x_offset),
+ param->h_scale_ratio));
+
+ if (src_x_offset >= (int)param->viewport_width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + (int)hubp->curs_attr.width < 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp1_cursor_set_attributes(hubp, &hubp->curs_attr);
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
+
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ /* TODO Handle surface pixel formats other than 4:4:4 */
+}
+
+static struct hubp_funcs dcn10_hubp_funcs = {
+ .hubp_program_surface_flip_and_addr =
+ hubp1_program_surface_flip_and_addr,
+ .hubp_program_surface_config =
+ hubp1_program_surface_config,
+ .hubp_is_flip_pending = hubp1_is_flip_pending,
+ .hubp_setup = hubp1_setup,
+ .hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings,
+ .hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
+ .set_blank = hubp1_set_blank,
+ .dcc_control = hubp1_dcc_control,
+ .mem_program_viewport = min_set_viewport,
+ .set_hubp_blank_en = hubp1_set_hubp_blank_en,
+ .set_cursor_attributes = hubp1_cursor_set_attributes,
+ .set_cursor_position = hubp1_cursor_set_position,
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dcn10_hubp_construct(
+ struct dcn10_hubp *hubp1,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_mi_registers *mi_regs,
+ const struct dcn_mi_shift *mi_shift,
+ const struct dcn_mi_mask *mi_mask)
+{
+ hubp1->base.funcs = &dcn10_hubp_funcs;
+ hubp1->base.ctx = ctx;
+ hubp1->mi_regs = mi_regs;
+ hubp1->mi_shift = mi_shift;
+ hubp1->mi_mask = mi_mask;
+ hubp1->base.inst = inst;
+ hubp1->base.opp_id = 0xf;
+ hubp1->base.mpcc_id = 0xf;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
new file mode 100644
index 000000000000..66db453c801b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -0,0 +1,683 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MEM_INPUT_DCN10_H__
+#define __DC_MEM_INPUT_DCN10_H__
+
+#include "hubp.h"
+
+#define TO_DCN10_HUBP(hubp)\
+ container_of(hubp, struct dcn10_hubp, base)
+
+#define MI_REG_LIST_DCN(id)\
+ SRI(DCHUBP_CNTL, HUBP, id),\
+ SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
+ SRI(DCSURF_ADDR_CONFIG, HUBP, id),\
+ SRI(DCSURF_TILING_CONFIG, HUBP, id),\
+ SRI(DCSURF_SURFACE_PITCH, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_PITCH_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_CONFIG, HUBP, id),\
+ SRI(DCSURF_FLIP_CONTROL, HUBPREQ, id),\
+ SRI(DCSURF_PRI_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI(DCSURF_PRI_VIEWPORT_START, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
+ SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\
+ SRI(HUBPRET_CONTROL, HUBPRET, id),\
+ SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\
+ SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\
+ SRI(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id),\
+ SRI(BLANK_OFFSET_0, HUBPREQ, id),\
+ SRI(BLANK_OFFSET_1, HUBPREQ, id),\
+ SRI(DST_DIMENSIONS, HUBPREQ, id),\
+ SRI(DST_AFTER_SCALER, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_0, HUBPREQ, id),\
+ SRI(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_1, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_3, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_4, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_5, HUBPREQ, id),\
+ SRI(PER_LINE_DELIVERY_PRE, HUBPREQ, id),\
+ SRI(PER_LINE_DELIVERY, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_2, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_4, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_3, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_6, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_7, HUBPREQ, id),\
+ SRI(DCN_TTU_QOS_WM, HUBPREQ, id),\
+ SRI(DCN_GLOBAL_TTU_CNTL, HUBPREQ, id),\
+ SRI(DCN_SURF0_TTU_CNTL0, HUBPREQ, id),\
+ SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
+ SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
+ SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
+ SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id)
+
+#define MI_REG_LIST_DCN10(id)\
+ MI_REG_LIST_DCN(id),\
+ SRI(PREFETCH_SETTINS, HUBPREQ, id),\
+ SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, HUBPREQ, id),\
+ SR(DCHUBBUB_SDPIF_FB_BASE),\
+ SR(DCHUBBUB_SDPIF_FB_OFFSET),\
+ SRI(CURSOR_SETTINS, HUBPREQ, id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR, id), \
+ SRI(CURSOR_SIZE, CURSOR, id), \
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CURSOR_POSITION, CURSOR, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR, id)
+
+
+
+struct dcn_mi_registers {
+ uint32_t DCHUBP_CNTL;
+ uint32_t HUBPREQ_DEBUG_DB;
+ uint32_t DCSURF_ADDR_CONFIG;
+ uint32_t DCSURF_TILING_CONFIG;
+ uint32_t DCSURF_SURFACE_PITCH;
+ uint32_t DCSURF_SURFACE_PITCH_C;
+ uint32_t DCSURF_SURFACE_CONFIG;
+ uint32_t DCSURF_FLIP_CONTROL;
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION;
+ uint32_t DCSURF_PRI_VIEWPORT_START;
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION;
+ uint32_t DCSURF_SEC_VIEWPORT_START;
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C;
+ uint32_t DCSURF_PRI_VIEWPORT_START_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_SURFACE_INUSE;
+ uint32_t DCSURF_SURFACE_INUSE_HIGH;
+ uint32_t DCSURF_SURFACE_INUSE_C;
+ uint32_t DCSURF_SURFACE_INUSE_HIGH_C;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C;
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t HUBPRET_CONTROL;
+ uint32_t DCN_EXPANSION_MODE;
+ uint32_t DCHUBP_REQ_SIZE_CONFIG;
+ uint32_t DCHUBP_REQ_SIZE_CONFIG_C;
+ uint32_t BLANK_OFFSET_0;
+ uint32_t BLANK_OFFSET_1;
+ uint32_t DST_DIMENSIONS;
+ uint32_t DST_AFTER_SCALER;
+ uint32_t PREFETCH_SETTINS;
+ uint32_t PREFETCH_SETTINGS;
+ uint32_t VBLANK_PARAMETERS_0;
+ uint32_t REF_FREQ_TO_PIX_FREQ;
+ uint32_t VBLANK_PARAMETERS_1;
+ uint32_t VBLANK_PARAMETERS_3;
+ uint32_t NOM_PARAMETERS_0;
+ uint32_t NOM_PARAMETERS_1;
+ uint32_t NOM_PARAMETERS_4;
+ uint32_t NOM_PARAMETERS_5;
+ uint32_t PER_LINE_DELIVERY_PRE;
+ uint32_t PER_LINE_DELIVERY;
+ uint32_t PREFETCH_SETTINS_C;
+ uint32_t PREFETCH_SETTINGS_C;
+ uint32_t VBLANK_PARAMETERS_2;
+ uint32_t VBLANK_PARAMETERS_4;
+ uint32_t NOM_PARAMETERS_2;
+ uint32_t NOM_PARAMETERS_3;
+ uint32_t NOM_PARAMETERS_6;
+ uint32_t NOM_PARAMETERS_7;
+ uint32_t DCN_TTU_QOS_WM;
+ uint32_t DCN_GLOBAL_TTU_CNTL;
+ uint32_t DCN_SURF0_TTU_CNTL0;
+ uint32_t DCN_SURF0_TTU_CNTL1;
+ uint32_t DCN_SURF1_TTU_CNTL0;
+ uint32_t DCN_SURF1_TTU_CNTL1;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB;
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
+ uint32_t DCN_VM_MX_L1_TLB_CNTL;
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR;
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR;
+ uint32_t DCHUBBUB_SDPIF_FB_BASE;
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
+ uint32_t DCN_VM_FB_LOCATION_TOP;
+ uint32_t DCN_VM_FB_LOCATION_BASE;
+ uint32_t DCN_VM_FB_OFFSET;
+ uint32_t DCN_VM_AGP_BASE;
+ uint32_t DCN_VM_AGP_BOT;
+ uint32_t DCN_VM_AGP_TOP;
+ uint32_t CURSOR_SETTINS;
+ uint32_t CURSOR_SETTINGS;
+ uint32_t CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR_SIZE;
+ uint32_t CURSOR_CONTROL;
+ uint32_t CURSOR_POSITION;
+ uint32_t CURSOR_HOT_SPOT;
+ uint32_t CURSOR_DST_OFFSET;
+};
+
+#define MI_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define MI_MASK_SH_LIST_DCN(mask_sh)\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_SE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_RB_PER_SE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_X_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_Y_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_WIDTH, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_HEIGHT, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_X_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_Y_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_WIDTH_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS, SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, PRIMARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS, PRIMARY_META_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, SECONDARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C, SURFACE_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE, SURFACE_EARLIEST_INUSE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
+ MI_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ MI_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ MI_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, CRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
+ MI_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
+ MI_SF(HUBPREQ0_BLANK_OFFSET_0, DLG_V_BLANK_END, mask_sh),\
+ MI_SF(HUBPREQ0_BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, mask_sh),\
+ MI_SF(HUBPREQ0_DST_DIMENSIONS, REFCYC_PER_HTOTAL, mask_sh),\
+ MI_SF(HUBPREQ0_DST_AFTER_SCALER, REFCYC_X_AFTER_SCALER, mask_sh),\
+ MI_SF(HUBPREQ0_DST_AFTER_SCALER, DST_Y_AFTER_SCALER, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_VM_VBLANK, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_ROW_VBLANK, mask_sh),\
+ MI_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_C, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_L, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_HIGH_WM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, QoS_LEVEL_FLIP, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh)
+
+#define MI_MASK_SH_LIST_DCN10(mask_sh)\
+ MI_MASK_SH_LIST_DCN(mask_sh),\
+ MI_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
+ MI_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
+ MI_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mask_sh),\
+ MI_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh),\
+ MI_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ MI_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
+
+#define DCN_MI_REG_FIELD_LIST(type) \
+ type HUBP_BLANK_EN;\
+ type HUBP_TTU_DISABLE;\
+ type HUBP_NO_OUTSTANDING_REQ;\
+ type HUBP_UNDERFLOW_STATUS;\
+ type NUM_PIPES;\
+ type NUM_BANKS;\
+ type PIPE_INTERLEAVE;\
+ type NUM_SE;\
+ type NUM_RB_PER_SE;\
+ type MAX_COMPRESSED_FRAGS;\
+ type SW_MODE;\
+ type META_LINEAR;\
+ type RB_ALIGNED;\
+ type PIPE_ALIGNED;\
+ type PITCH;\
+ type META_PITCH;\
+ type PITCH_C;\
+ type META_PITCH_C;\
+ type ROTATION_ANGLE;\
+ type H_MIRROR_EN;\
+ type SURFACE_PIXEL_FORMAT;\
+ type SURFACE_FLIP_TYPE;\
+ type SURFACE_UPDATE_LOCK;\
+ type SURFACE_FLIP_PENDING;\
+ type PRI_VIEWPORT_WIDTH; \
+ type PRI_VIEWPORT_HEIGHT; \
+ type PRI_VIEWPORT_X_START; \
+ type PRI_VIEWPORT_Y_START; \
+ type SEC_VIEWPORT_WIDTH; \
+ type SEC_VIEWPORT_HEIGHT; \
+ type SEC_VIEWPORT_X_START; \
+ type SEC_VIEWPORT_Y_START; \
+ type PRI_VIEWPORT_WIDTH_C; \
+ type PRI_VIEWPORT_HEIGHT_C; \
+ type PRI_VIEWPORT_X_START_C; \
+ type PRI_VIEWPORT_Y_START_C; \
+ type PRIMARY_SURFACE_ADDRESS_HIGH;\
+ type PRIMARY_SURFACE_ADDRESS;\
+ type SECONDARY_SURFACE_ADDRESS_HIGH;\
+ type SECONDARY_SURFACE_ADDRESS;\
+ type PRIMARY_META_SURFACE_ADDRESS_HIGH;\
+ type PRIMARY_META_SURFACE_ADDRESS;\
+ type SECONDARY_META_SURFACE_ADDRESS_HIGH;\
+ type SECONDARY_META_SURFACE_ADDRESS;\
+ type PRIMARY_SURFACE_ADDRESS_HIGH_C;\
+ type PRIMARY_SURFACE_ADDRESS_C;\
+ type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\
+ type PRIMARY_META_SURFACE_ADDRESS_C;\
+ type SURFACE_INUSE_ADDRESS;\
+ type SURFACE_INUSE_ADDRESS_HIGH;\
+ type SURFACE_INUSE_ADDRESS_C;\
+ type SURFACE_INUSE_ADDRESS_HIGH_C;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS_HIGH;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS_C;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C;\
+ type PRIMARY_SURFACE_TMZ;\
+ type PRIMARY_SURFACE_DCC_EN;\
+ type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
+ type DET_BUF_PLANE1_BASE_ADDRESS;\
+ type CROSSBAR_SRC_CB_B;\
+ type CROSSBAR_SRC_CR_R;\
+ type DRQ_EXPANSION_MODE;\
+ type PRQ_EXPANSION_MODE;\
+ type MRQ_EXPANSION_MODE;\
+ type CRQ_EXPANSION_MODE;\
+ type CHUNK_SIZE;\
+ type MIN_CHUNK_SIZE;\
+ type META_CHUNK_SIZE;\
+ type MIN_META_CHUNK_SIZE;\
+ type DPTE_GROUP_SIZE;\
+ type MPTE_GROUP_SIZE;\
+ type SWATH_HEIGHT;\
+ type PTE_ROW_HEIGHT_LINEAR;\
+ type CHUNK_SIZE_C;\
+ type MIN_CHUNK_SIZE_C;\
+ type META_CHUNK_SIZE_C;\
+ type MIN_META_CHUNK_SIZE_C;\
+ type DPTE_GROUP_SIZE_C;\
+ type MPTE_GROUP_SIZE_C;\
+ type SWATH_HEIGHT_C;\
+ type PTE_ROW_HEIGHT_LINEAR_C;\
+ type REFCYC_H_BLANK_END;\
+ type DLG_V_BLANK_END;\
+ type MIN_DST_Y_NEXT_START;\
+ type REFCYC_PER_HTOTAL;\
+ type REFCYC_X_AFTER_SCALER;\
+ type DST_Y_AFTER_SCALER;\
+ type DST_Y_PREFETCH;\
+ type VRATIO_PREFETCH;\
+ type DST_Y_PER_VM_VBLANK;\
+ type DST_Y_PER_ROW_VBLANK;\
+ type REF_FREQ_TO_PIX_FREQ;\
+ type REFCYC_PER_PTE_GROUP_VBLANK_L;\
+ type REFCYC_PER_META_CHUNK_VBLANK_L;\
+ type DST_Y_PER_PTE_ROW_NOM_L;\
+ type REFCYC_PER_PTE_GROUP_NOM_L;\
+ type DST_Y_PER_META_ROW_NOM_L;\
+ type REFCYC_PER_META_CHUNK_NOM_L;\
+ type REFCYC_PER_LINE_DELIVERY_PRE_L;\
+ type REFCYC_PER_LINE_DELIVERY_PRE_C;\
+ type REFCYC_PER_LINE_DELIVERY_L;\
+ type REFCYC_PER_LINE_DELIVERY_C;\
+ type VRATIO_PREFETCH_C;\
+ type REFCYC_PER_PTE_GROUP_VBLANK_C;\
+ type REFCYC_PER_META_CHUNK_VBLANK_C;\
+ type DST_Y_PER_PTE_ROW_NOM_C;\
+ type REFCYC_PER_PTE_GROUP_NOM_C;\
+ type DST_Y_PER_META_ROW_NOM_C;\
+ type REFCYC_PER_META_CHUNK_NOM_C;\
+ type QoS_LEVEL_LOW_WM;\
+ type QoS_LEVEL_HIGH_WM;\
+ type MIN_TTU_VBLANK;\
+ type QoS_LEVEL_FLIP;\
+ type REFCYC_PER_REQ_DELIVERY;\
+ type QoS_LEVEL_FIXED;\
+ type QoS_RAMP_DISABLE;\
+ type REFCYC_PER_REQ_DELIVERY_PRE;\
+ type VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB;\
+ type VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB;\
+ type VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB;\
+ type VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB;\
+ type VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB;\
+ type VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB;\
+ type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB;\
+ type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM;\
+ type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\
+ type ENABLE_L1_TLB;\
+ type SYSTEM_ACCESS_MODE;\
+ type MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
+ type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
+ type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
+ type MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB;\
+ type MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB;\
+ type MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB;\
+ type MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;\
+ type MC_VM_SYSTEM_APERTURE_LOW_ADDR;\
+ type MC_VM_SYSTEM_APERTURE_HIGH_ADDR;\
+ type SDPIF_FB_TOP;\
+ type SDPIF_FB_BASE;\
+ type SDPIF_FB_OFFSET;\
+ type SDPIF_AGP_BASE;\
+ type SDPIF_AGP_BOT;\
+ type SDPIF_AGP_TOP;\
+ type FB_TOP;\
+ type FB_BASE;\
+ type FB_OFFSET;\
+ type AGP_BASE;\
+ type AGP_BOT;\
+ type AGP_TOP;\
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ type PAGE_DIRECTORY_ENTRY_HI32;\
+ type PAGE_DIRECTORY_ENTRY_LO32;\
+ type LOGICAL_PAGE_NUMBER_HI4;\
+ type LOGICAL_PAGE_NUMBER_LO32;\
+ type PHYSICAL_PAGE_ADDR_HI4;\
+ type PHYSICAL_PAGE_ADDR_LO32;\
+ type PHYSICAL_PAGE_NUMBER_MSB;\
+ type PHYSICAL_PAGE_NUMBER_LSB;\
+ type LOGICAL_ADDR;\
+ type CURSOR0_DST_Y_OFFSET; \
+ type CURSOR0_CHUNK_HDL_ADJUST; \
+ type CURSOR_SURFACE_ADDRESS_HIGH; \
+ type CURSOR_SURFACE_ADDRESS; \
+ type CURSOR_WIDTH; \
+ type CURSOR_HEIGHT; \
+ type CURSOR_MODE; \
+ type CURSOR_2X_MAGNIFY; \
+ type CURSOR_PITCH; \
+ type CURSOR_LINES_PER_CHUNK; \
+ type CURSOR_ENABLE; \
+ type CURSOR_X_POSITION; \
+ type CURSOR_Y_POSITION; \
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_DST_X_OFFSET; \
+ type OUTPUT_FP
+
+struct dcn_mi_shift {
+ DCN_MI_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn_mi_mask {
+ DCN_MI_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_hubp {
+ struct hubp base;
+ const struct dcn_mi_registers *mi_regs;
+ const struct dcn_mi_shift *mi_shift;
+ const struct dcn_mi_mask *mi_mask;
+};
+
+void hubp1_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+void hubp1_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+
+void hubp1_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs);
+
+void hubp1_program_pixel_format(
+ struct dcn10_hubp *hubp,
+ enum surface_pixel_format format);
+
+void hubp1_program_size_and_rotation(
+ struct dcn10_hubp *hubp,
+ enum dc_rotation_angle rotation,
+ enum surface_pixel_format format,
+ const union plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+void hubp1_program_tiling(
+ struct dcn10_hubp *hubp,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format);
+
+void hubp1_dcc_control(struct hubp *hubp,
+ bool enable,
+ bool independent_64b_blks);
+
+bool hubp1_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+bool hubp1_is_flip_pending(struct hubp *hubp);
+
+void hubp1_cursor_set_attributes(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr);
+
+void hubp1_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+void hubp1_set_blank(struct hubp *hubp, bool blank);
+
+void min_set_viewport(struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+void dcn10_hubp_construct(
+ struct dcn10_hubp *hubp1,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_mi_registers *mi_regs,
+ const struct dcn_mi_shift *mi_shift,
+ const struct dcn_mi_mask *mi_mask);
+
+
+struct dcn_hubp_state {
+ uint32_t pixel_format;
+ uint32_t inuse_addr_hi;
+ uint32_t viewport_width;
+ uint32_t viewport_height;
+ uint32_t rotation_angle;
+ uint32_t h_mirror_en;
+ uint32_t sw_mode;
+ uint32_t dcc_en;
+ uint32_t blank_en;
+ uint32_t underflow_status;
+ uint32_t ttu_disable;
+ uint32_t min_ttu_vblank;
+ uint32_t qos_level_low_wm;
+ uint32_t qos_level_high_wm;
+};
+void hubp1_read_state(struct dcn10_hubp *hubp1,
+ struct dcn_hubp_state *s);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
new file mode 100644
index 000000000000..961ad5c3b454
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -0,0 +1,2958 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "resource.h"
+#include "custom_float.h"
+#include "dcn10_hw_sequencer.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_hwseq.h"
+#include "abm.h"
+#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10/dcn10_dpp.h"
+#include "dcn10/dcn10_mpc.h"
+#include "timing_generator.h"
+#include "opp.h"
+#include "ipp.h"
+#include "mpc.h"
+#include "reg_helper.h"
+#include "custom_float.h"
+#include "dcn10_hubp.h"
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+static void log_mpc_crc(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (REG(MPC_CRC_RESULT_GB))
+ DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
+ REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
+ if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
+ DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
+ REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
+}
+
+void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
+{
+ static const uint32_t ref_clk_mhz = 48;
+ static const unsigned int frac = 10;
+ uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
+
+ DTN_INFO("%d.%d \t ",
+ us_x10 / frac,
+ us_x10 % frac);
+}
+
+#define DTN_INFO_MICRO_SEC(ref_cycle) \
+ print_microsec(dc_ctx, ref_cycle)
+
+struct dcn_hubbub_wm_set {
+ uint32_t wm_set;
+ uint32_t data_urgent;
+ uint32_t pte_meta_urgent;
+ uint32_t sr_enter;
+ uint32_t sr_exit;
+ uint32_t dram_clk_chanage;
+};
+
+struct dcn_hubbub_wm {
+ struct dcn_hubbub_wm_set sets[4];
+};
+
+static void dcn10_hubbub_wm_read_state(struct dce_hwseq *hws,
+ struct dcn_hubbub_wm *wm)
+{
+ struct dcn_hubbub_wm_set *s;
+
+ s = &wm->sets[0];
+ s->wm_set = 0;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+
+ s = &wm->sets[1];
+ s->wm_set = 1;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+
+ s = &wm->sets[2];
+ s->wm_set = 2;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+
+ s = &wm->sets[3];
+ s->wm_set = 3;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+}
+
+static void dcn10_log_hubbub_state(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dcn_hubbub_wm wm;
+ int i;
+
+ dcn10_hubbub_wm_read_state(dc->hwseq, &wm);
+
+ DTN_INFO("HUBBUB WM: \t data_urgent \t pte_meta_urgent \t "
+ "sr_enter \t sr_exit \t dram_clk_change \n");
+
+ for (i = 0; i < 4; i++) {
+ struct dcn_hubbub_wm_set *s;
+
+ s = &wm.sets[i];
+ DTN_INFO("WM_Set[%d]:\t ", s->wm_set);
+ DTN_INFO_MICRO_SEC(s->data_urgent);
+ DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
+ DTN_INFO_MICRO_SEC(s->sr_enter);
+ DTN_INFO_MICRO_SEC(s->sr_exit);
+ DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
+ DTN_INFO("\n");
+ }
+
+ DTN_INFO("\n");
+}
+
+static void dcn10_log_hw_state(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO_BEGIN();
+
+ dcn10_log_hubbub_state(dc);
+
+ DTN_INFO("HUBP:\t format \t addr_hi \t width \t height \t "
+ "rotation \t mirror \t sw_mode \t "
+ "dcc_en \t blank_en \t ttu_dis \t underflow \t "
+ "min_ttu_vblank \t qos_low_wm \t qos_high_wm \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct hubp *hubp = pool->hubps[i];
+ struct dcn_hubp_state s;
+
+ hubp1_read_state(TO_DCN10_HUBP(hubp), &s);
+
+ DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t "
+ "%xh \t %xh \t %xh \t "
+ "%d \t %d \t %d \t %xh \t",
+ i,
+ s.pixel_format,
+ s.inuse_addr_hi,
+ s.viewport_width,
+ s.viewport_height,
+ s.rotation_angle,
+ s.h_mirror_en,
+ s.sw_mode,
+ s.dcc_en,
+ s.blank_en,
+ s.ttu_disable,
+ s.underflow_status);
+ DTN_INFO_MICRO_SEC(s.min_ttu_vblank);
+ DTN_INFO_MICRO_SEC(s.qos_level_low_wm);
+ DTN_INFO_MICRO_SEC(s.qos_level_high_wm);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+
+ DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t "
+ "h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n");
+
+ for (i = 0; i < pool->res_cap->num_timing_generator; i++) {
+ struct timing_generator *tg = pool->timing_generators[i];
+ struct dcn_otg_state s = {0};
+
+ tgn10_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+
+ //only print if OTG master is enabled
+ if ((s.otg_enabled & 1) == 0)
+ continue;
+
+ DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t "
+ "%d \t %d \t %d \t %d \t %d \t %d \t "
+ "%d \t %d \t %d \t %d \t %d \t ",
+ i,
+ s.v_blank_start,
+ s.v_blank_end,
+ s.v_sync_a_start,
+ s.v_sync_a_end,
+ s.v_sync_a_pol,
+ s.v_total_max,
+ s.v_total_min,
+ s.h_blank_start,
+ s.h_blank_end,
+ s.h_sync_a_start,
+ s.h_sync_a_end,
+ s.h_sync_a_pol,
+ s.h_total,
+ s.v_total,
+ s.underflow_occurred_status);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+
+ log_mpc_crc(dc);
+
+ DTN_INFO_END();
+}
+
+static void verify_allow_pstate_change_high(
+ struct dce_hwseq *hws)
+{
+ /* pstate latency is ~20us so if we wait over 40us and pstate allow
+ * still not asserted, we are probably stuck and going to hang
+ *
+ * TODO: Figure out why it takes ~100us on linux
+ * pstate takes around ~100us on linux. Unknown currently as to
+ * why it takes that long on linux
+ */
+ static unsigned int pstate_wait_timeout_us = 200;
+ static unsigned int pstate_wait_expected_timeout_us = 40;
+ static unsigned int max_sampled_pstate_wait_us; /* data collection */
+ static bool forced_pstate_allow; /* help with revert wa */
+ static bool should_log_hw_state; /* prevent hw state log by default */
+
+ unsigned int debug_index = 0x7;
+ unsigned int debug_data;
+ unsigned int i;
+
+ if (forced_pstate_allow) {
+ /* we hacked to force pstate allow to prevent hang last time
+ * we verify_allow_pstate_change_high. so disable force
+ * here so we can check status
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
+ forced_pstate_allow = false;
+ }
+
+ /* description "3-0: Pipe0 cursor0 QOS
+ * 7-4: Pipe1 cursor0 QOS
+ * 11-8: Pipe2 cursor0 QOS
+ * 15-12: Pipe3 cursor0 QOS
+ * 16: Pipe0 Plane0 Allow Pstate Change
+ * 17: Pipe1 Plane0 Allow Pstate Change
+ * 18: Pipe2 Plane0 Allow Pstate Change
+ * 19: Pipe3 Plane0 Allow Pstate Change
+ * 20: Pipe0 Plane1 Allow Pstate Change
+ * 21: Pipe1 Plane1 Allow Pstate Change
+ * 22: Pipe2 Plane1 Allow Pstate Change
+ * 23: Pipe3 Plane1 Allow Pstate Change
+ * 24: Pipe0 cursor0 Allow Pstate Change
+ * 25: Pipe1 cursor0 Allow Pstate Change
+ * 26: Pipe2 cursor0 Allow Pstate Change
+ * 27: Pipe3 cursor0 Allow Pstate Change
+ * 28: WB0 Allow Pstate Change
+ * 29: WB1 Allow Pstate Change
+ * 30: Arbiter's allow_pstate_change
+ * 31: SOC pstate change request
+ */
+
+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, debug_index);
+
+ for (i = 0; i < pstate_wait_timeout_us; i++) {
+ debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
+
+ if (debug_data & (1 << 30)) {
+
+ if (i > pstate_wait_expected_timeout_us)
+ dm_logger_write(hws->ctx->logger, LOG_WARNING,
+ "pstate took longer than expected ~%dus\n",
+ i);
+
+ return;
+ }
+ if (max_sampled_pstate_wait_us < i)
+ max_sampled_pstate_wait_us = i;
+
+ udelay(1);
+ }
+
+ /* force pstate allow to prevent system hang
+ * and break to debugger to investigate
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
+ forced_pstate_allow = true;
+
+ if (should_log_hw_state) {
+ dcn10_log_hw_state(hws->ctx->dc);
+ }
+
+ dm_logger_write(hws->ctx->logger, LOG_WARNING,
+ "pstate TEST_DEBUG_DATA: 0x%X\n",
+ debug_data);
+ BREAK_TO_DEBUGGER();
+}
+
+static void enable_dppclk(
+ struct dce_hwseq *hws,
+ uint8_t plane_id,
+ uint32_t requested_pix_clk,
+ bool dppclk_div)
+{
+ dm_logger_write(hws->ctx->logger, LOG_SURFACE,
+ "dppclk_rate_control for pipe %d programed to %d\n",
+ plane_id,
+ dppclk_div);
+
+ if (hws->shifts->DPPCLK_RATE_CONTROL)
+ REG_UPDATE_2(DPP_CONTROL[plane_id],
+ DPPCLK_RATE_CONTROL, dppclk_div,
+ DPP_CLOCK_ENABLE, 1);
+ else
+ REG_UPDATE(DPP_CONTROL[plane_id],
+ DPP_CLOCK_ENABLE, 1);
+}
+
+static void enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable)
+{
+ bool force_on = 1; /* disable power gating */
+
+ if (enable)
+ force_on = 0;
+
+ /* DCHUBP0/1/2/3 */
+ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
+
+ /* DPP0/1/2/3 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
+}
+
+static void disable_vga(
+ struct dce_hwseq *hws)
+{
+ REG_WRITE(D1VGA_CONTROL, 0);
+ REG_WRITE(D2VGA_CONTROL, 0);
+ REG_WRITE(D3VGA_CONTROL, 0);
+ REG_WRITE(D4VGA_CONTROL, 0);
+}
+
+static void dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate)
+ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG,
+ DOMAIN1_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN1_PG_STATUS,
+ DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DPP1 */
+ REG_UPDATE(DOMAIN3_PG_CONFIG,
+ DOMAIN3_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN3_PG_STATUS,
+ DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DPP2 */
+ REG_UPDATE(DOMAIN5_PG_CONFIG,
+ DOMAIN5_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN5_PG_STATUS,
+ DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DPP3 */
+ REG_UPDATE(DOMAIN7_PG_CONFIG,
+ DOMAIN7_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN7_PG_STATUS,
+ DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static uint32_t convert_and_clamp(
+ uint32_t wm_ns,
+ uint32_t refclk_mhz,
+ uint32_t clamp_value)
+{
+ uint32_t ret_val = 0;
+ ret_val = wm_ns * refclk_mhz;
+ ret_val /= 1000;
+
+ if (ret_val > clamp_value)
+ ret_val = clamp_value;
+
+ return ret_val;
+}
+
+static void program_watermarks(
+ struct dce_hwseq *hws,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz)
+{
+ uint32_t force_en = hws->ctx->dc->debug.disable_stutter ? 1 : 0;
+ /*
+ * Need to clamp to max of the register values (i.e. no wrap)
+ * for dcn1, all wm registers are 21-bit wide
+ */
+ uint32_t prog_wm_value;
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
+
+ /* Repeat for water mark set A, B, C and D. */
+ /* clock state A */
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.urgent_ns, prog_wm_value);
+
+ prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+
+ /* clock state B */
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.urgent_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ /* clock state C */
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.urgent_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ /* clock state D */
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.urgent_ns, prog_wm_value);
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+
+ REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
+
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, force_en);
+
+#if 0
+ REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+#endif
+}
+
+
+static void dcn10_update_dchub(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data)
+{
+ /* TODO: port code from dal2 */
+ switch (dh_data->fb_mode) {
+ case FRAME_BUFFER_MODE_ZFB_ONLY:
+ /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
+ REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
+ SDPIF_FB_TOP, 0);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
+ SDPIF_FB_BASE, 0x0FFFF);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_LOCAL_ONLY:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, 0);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, 0X03FFFF);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, 0);
+ break;
+ default:
+ break;
+ }
+
+ dh_data->dchub_initialzied = true;
+ dh_data->dchub_info_valid = false;
+}
+
+static void hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+ if (hws->ctx->dc->debug.disable_hubp_power_gate)
+ return;
+
+ switch (hubp_inst) {
+ case 0: /* DCHUBP0 */
+ REG_UPDATE(DOMAIN0_PG_CONFIG,
+ DOMAIN0_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN0_PG_STATUS,
+ DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DCHUBP1 */
+ REG_UPDATE(DOMAIN2_PG_CONFIG,
+ DOMAIN2_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN2_PG_STATUS,
+ DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DCHUBP2 */
+ REG_UPDATE(DOMAIN4_PG_CONFIG,
+ DOMAIN4_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN4_PG_STATUS,
+ DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DCHUBP3 */
+ REG_UPDATE(DOMAIN6_PG_CONFIG,
+ DOMAIN6_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN6_PG_STATUS,
+ DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void power_on_plane(
+ struct dce_hwseq *hws,
+ int plane_id)
+{
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ dpp_pg_control(hws, plane_id, true);
+ hubp_pg_control(hws, plane_id, true);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ dm_logger_write(hws->ctx->logger, LOG_DEBUG,
+ "Un-gated front end for pipe %d\n", plane_id);
+ }
+}
+
+static void undo_DEGVIDCN10_253_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = dc->res_pool->hubps[0];
+ int pwr_status = 0;
+
+ REG_GET(DOMAIN0_PG_STATUS, DOMAIN0_PGFSM_PWR_STATUS, &pwr_status);
+ /* Don't need to blank if hubp is power gated*/
+ if (pwr_status == 2)
+ return;
+
+ hubp->funcs->set_blank(hubp, true);
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+
+ hubp_pg_control(hws, 0, false);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+}
+
+static void apply_DEGVIDCN10_253_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = dc->res_pool->hubps[0];
+
+ if (dc->debug.disable_stutter)
+ return;
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+
+ hubp_pg_control(hws, 0, true);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+
+ hubp->funcs->set_hubp_blank_en(hubp, false);
+}
+
+static void bios_golden_init(struct dc *dc)
+{
+ struct dc_bios *bp = dc->ctx->dc_bios;
+ int i;
+
+ /* initialize dcn global */
+ bp->funcs->enable_disp_power_gating(bp,
+ CONTROLLER_ID_D0, ASIC_PIPE_INIT);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ /* initialize dcn per pipe */
+ bp->funcs->enable_disp_power_gating(bp,
+ CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
+ }
+}
+
+static void dcn10_init_hw(struct dc *dc)
+{
+ int i;
+ struct abm *abm = dc->res_pool->abm;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ REG_WRITE(REFCLK_CNTL, 0);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ enable_power_gating_plane(dc->hwseq, true);
+ return;
+ }
+ /* end of FPGA. Below if real ASIC */
+
+ bios_golden_init(dc);
+
+ disable_vga(dc->hwseq);
+
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ link->link_enc->funcs->hw_init(link->link_enc);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dpp *dpp = dc->res_pool->dpps[i];
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ dpp->funcs->dpp_reset(dpp);
+ dc->res_pool->mpc->funcs->remove(
+ dc->res_pool->mpc, &(dc->res_pool->opps[i]->mpc_tree),
+ dc->res_pool->opps[i]->inst, i);
+
+ /* Blank controller using driver code instead of
+ * command table.
+ */
+ tg->funcs->set_blank(tg, true);
+ hwss_wait_for_blank_complete(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->audio_count; i++) {
+ struct audio *audio = dc->res_pool->audios[i];
+
+ audio->funcs->hw_init(audio);
+ }
+
+ if (abm != NULL) {
+ abm->funcs->init_backlight(abm);
+ abm->funcs->abm_init(abm);
+ }
+
+ /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ enable_power_gating_plane(dc->hwseq, true);
+}
+
+static enum dc_status dcn10_prog_pixclk_crtc_otg(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ bool enableStereo = stream->timing.timing_3d_format == TIMING_3D_FORMAT_NONE ?
+ false:true;
+ bool rightEyePolarity = stream->timing.flags.RIGHT_EYE_3D_POLARITY;
+
+
+ /* by upper caller loop, pipe0 is parent pipe and be called first.
+ * back end is set up by for pipe0. Other children pipe share back end
+ * with pipe 0. No program is needed.
+ */
+ if (pipe_ctx->top_pipe != NULL)
+ return DC_OK;
+
+ /* TODO check if timing_changed, disable stream if timing changed */
+
+ /* HW program guide assume display already disable
+ * by unplug sequence. OTG assume stop.
+ */
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
+
+ if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+ pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_offset = pipe_ctx->pipe_dlg_param.vupdate_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_width = pipe_ctx->pipe_dlg_param.vupdate_width;
+
+ pipe_ctx->stream_res.tg->dlg_otg_param.signal = pipe_ctx->stream->signal;
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ true);
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+ pipe_ctx->stream_res.opp,
+ enableStereo,
+ rightEyePolarity);
+
+#if 0 /* move to after enable_crtc */
+ /* TODO: OPP FMT, ABM. etc. should be done here. */
+ /* or FPGA now. instance 0 only. TODO: move to opp.c */
+
+ inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+#endif
+ /* program otg blank color */
+ color_space = stream->output_color_space;
+ color_space_to_black_color(dc, color_space, &black_color);
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
+ hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
+
+ /* VTG is within DCHUB command block. DCFCLK is always on */
+ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ /* TODO program crtc source select for non-virtual signal*/
+ /* TODO program FMT */
+ /* TODO setup link_enc */
+ /* TODO set stream attributes */
+ /* TODO program audio */
+ /* TODO enable stream if timing changed */
+ /* TODO unblank stream if DP */
+
+ return DC_OK;
+}
+
+static void reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ int i;
+
+ if (pipe_ctx->stream_res.stream_enc == NULL) {
+ pipe_ctx->stream = NULL;
+ return;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ /* DPMS may already disable */
+ if (!pipe_ctx->stream->dpms_off)
+ core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
+ }
+
+ /* by upper caller loop, parent pipe: pipe0, will be reset last.
+ * back end share by all pipes and will be disable only when disable
+ * parent pipe.
+ */
+ if (pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
+ break;
+
+ if (i == dc->res_pool->pipe_count)
+ return;
+
+ pipe_ctx->stream = NULL;
+ dm_logger_write(dc->ctx->logger, LOG_DEBUG,
+ "Reset back end for pipe %d, tg:%d\n",
+ pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync */
+static void plane_atomic_disconnect(struct dc *dc,
+ int fe_idx)
+{
+ struct hubp *hubp = dc->res_pool->hubps[fe_idx];
+ struct mpc *mpc = dc->res_pool->mpc;
+ int opp_id, z_idx;
+ int mpcc_id = -1;
+
+ /* look at tree rather than mi here to know if we already reset */
+ for (opp_id = 0; opp_id < dc->res_pool->pipe_count; opp_id++) {
+ struct output_pixel_processor *opp = dc->res_pool->opps[opp_id];
+
+ for (z_idx = 0; z_idx < opp->mpc_tree.num_pipes; z_idx++) {
+ if (opp->mpc_tree.dpp[z_idx] == fe_idx) {
+ mpcc_id = opp->mpc_tree.mpcc[z_idx];
+ break;
+ }
+ }
+ if (mpcc_id != -1)
+ break;
+ }
+ /*Already reset*/
+ if (opp_id == dc->res_pool->pipe_count)
+ return;
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+ hubp->funcs->dcc_control(hubp, false, false);
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ mpc->funcs->remove(mpc, &(dc->res_pool->opps[opp_id]->mpc_tree),
+ dc->res_pool->opps[opp_id]->inst, fe_idx);
+}
+
+/* disable HW used by plane.
+ * note: cannot disable until disconnect is complete */
+static void plane_atomic_disable(struct dc *dc,
+ int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = dc->res_pool->hubps[fe_idx];
+ struct mpc *mpc = dc->res_pool->mpc;
+ int opp_id = hubp->opp_id;
+
+ if (opp_id == 0xf)
+ return;
+
+ mpc->funcs->wait_for_idle(mpc, hubp->mpcc_id);
+ dc->res_pool->opps[hubp->opp_id]->mpcc_disconnect_pending[hubp->mpcc_id] = false;
+ /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ "[debug_mpo: atomic disable finished on mpcc %d]\n",
+ fe_idx);*/
+
+ hubp->funcs->set_blank(hubp, true);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ REG_UPDATE(HUBP_CLK_CNTL[fe_idx],
+ HUBP_CLOCK_ENABLE, 0);
+ REG_UPDATE(DPP_CONTROL[fe_idx],
+ DPP_CLOCK_ENABLE, 0);
+
+ if (dc->res_pool->opps[opp_id]->mpc_tree.num_pipes == 0)
+ REG_UPDATE(OPP_PIPE_CONTROL[opp_id],
+ OPP_PIPE_CLOCK_EN, 0);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+/*
+ * kill power to plane hw
+ * note: cannot power down until plane is disable
+ */
+static void plane_atomic_power_down(struct dc *dc, int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dpp *dpp = dc->res_pool->dpps[fe_idx];
+
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ dpp_pg_control(hws, fe_idx, false);
+ hubp_pg_control(hws, fe_idx, false);
+ dpp->funcs->dpp_reset(dpp);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ dm_logger_write(dc->ctx->logger, LOG_DEBUG,
+ "Power gated front end %d\n", fe_idx);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+}
+
+
+static void reset_front_end(
+ struct dc *dc,
+ int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct timing_generator *tg;
+ int opp_id = dc->res_pool->hubps[fe_idx]->opp_id;
+
+ /*Already reset*/
+ if (opp_id == 0xf)
+ return;
+
+ tg = dc->res_pool->timing_generators[opp_id];
+ tg->funcs->lock(tg);
+
+ plane_atomic_disconnect(dc, fe_idx);
+
+ REG_UPDATE(OTG_GLOBAL_SYNC_STATUS[tg->inst], VUPDATE_NO_LOCK_EVENT_CLEAR, 1);
+ tg->funcs->unlock(tg);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(hws);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_GLOBAL_SYNC_STATUS[tg->inst],
+ VUPDATE_NO_LOCK_EVENT_OCCURRED, 1,
+ 1, 100000);
+
+ plane_atomic_disable(dc, fe_idx);
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Reset front end %d\n",
+ fe_idx);
+}
+
+static void dcn10_power_down_fe(struct dc *dc, int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dpp *dpp = dc->res_pool->dpps[fe_idx];
+
+ reset_front_end(dc, fe_idx);
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ dpp_pg_control(hws, fe_idx, false);
+ hubp_pg_control(hws, fe_idx, false);
+ dpp->funcs->dpp_reset(dpp);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ dm_logger_write(dc->ctx->logger, LOG_DEBUG,
+ "Power gated front end %d\n", fe_idx);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+static void reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ /* Reset Front End*/
+ /* Lock*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct timing_generator *tg = cur_pipe_ctx->stream_res.tg;
+
+ if (cur_pipe_ctx->stream)
+ tg->funcs->lock(tg);
+ }
+ /* Disconnect*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->plane_state ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+
+ plane_atomic_disconnect(dc, i);
+ }
+ }
+ /* Unlock*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct timing_generator *tg = cur_pipe_ctx->stream_res.tg;
+
+ if (cur_pipe_ctx->stream)
+ tg->funcs->unlock(tg);
+ }
+
+ /* Disable and Powerdown*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /*if (!pipe_ctx_old->stream)
+ continue;*/
+
+ if (pipe_ctx->stream && pipe_ctx->plane_state
+ && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ plane_atomic_disable(dc, i);
+
+ if (!pipe_ctx->stream || !pipe_ctx->plane_state)
+ plane_atomic_power_down(dc, i);
+ }
+
+ /* Reset Back End*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+
+}
+
+static bool patch_address_for_sbs_tb_stereo(
+ struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ (pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
+ *addr = plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.left_addr =
+ plane_state->address.grph_stereo.right_addr;
+ return true;
+ } else {
+ if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
+ plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
+ plane_state->address.grph_stereo.right_addr =
+ plane_state->address.grph_stereo.left_addr;
+ }
+ }
+ return false;
+}
+
+static void toggle_watermark_change_req(struct dce_hwseq *hws)
+{
+ uint32_t watermark_change_req;
+
+ REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
+
+ if (watermark_change_req)
+ watermark_change_req = 0;
+ else
+ watermark_change_req = 1;
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
+}
+
+static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ bool addr_patched = false;
+ PHYSICAL_ADDRESS_LOC addr;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+ addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+ pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.hubp,
+ &plane_state->address,
+ plane_state->flip_immediate);
+ plane_state->status.requested_address = plane_state->address;
+ if (addr_patched)
+ pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
+}
+
+static bool dcn10_set_input_transfer_func(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ const struct dc_transfer_func *tf = NULL;
+ bool result = true;
+
+ if (dpp_base == NULL)
+ return false;
+
+ if (plane_state->in_transfer_func)
+ tf = plane_state->in_transfer_func;
+
+ if (plane_state->gamma_correction && dce_use_lut(plane_state))
+ dpp_base->funcs->ipp_program_input_lut(dpp_base,
+ plane_state->gamma_correction);
+
+ if (tf == NULL)
+ dpp_base->funcs->ipp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ else if (tf->type == TF_TYPE_PREDEFINED) {
+ switch (tf->tf) {
+ case TRANSFER_FUNCTION_SRGB:
+ dpp_base->funcs->ipp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ break;
+ case TRANSFER_FUNCTION_BT709:
+ dpp_base->funcs->ipp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_HW_xvYCC);
+ break;
+ case TRANSFER_FUNCTION_LINEAR:
+ dpp_base->funcs->ipp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_BYPASS);
+ break;
+ case TRANSFER_FUNCTION_PQ:
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+ } else if (tf->type == TF_TYPE_BYPASS) {
+ dpp_base->funcs->ipp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ } else {
+ /*TF_TYPE_DISTRIBUTED_POINTS*/
+ result = false;
+ }
+
+ return result;
+}
+/*modify the method to handle rgb for arr_points*/
+static bool convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].x,
+ &fmt,
+ &arr_points[0].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].offset,
+ &fmt,
+ &arr_points[0].custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].slope,
+ &fmt,
+ &arr_points[0].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].x,
+ &fmt,
+ &arr_points[1].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].y,
+ &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].slope,
+ &fmt,
+ &arr_points[1].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(
+ rgb->red,
+ &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->green,
+ &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->blue,
+ &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_red,
+ &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_green,
+ &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_blue,
+ &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+#define MAX_REGIONS_NUMBER 34
+#define MAX_LOW_POINT 25
+#define NUMBER_SEGMENTS 32
+
+static bool dcn10_translate_regamma_to_hw_format(const struct dc_transfer_func
+ *output_tf, struct pwl_params *regamma_params)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t segment_start, segment_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || regamma_params == NULL ||
+ output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ arr_points = regamma_params->arr_points;
+ rgb_resulted = regamma_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(regamma_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* 32 segments
+ * segments are from 2^-25 to 2^7
+ */
+ for (i = 0; i < 32 ; i++)
+ seg_distr[i] = 3;
+
+ segment_start = -25;
+ segment_end = 7;
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ * There are less than 256 points, for optimization
+ */
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 5;
+ seg_distr[9] = 5;
+
+ segment_start = -10;
+ segment_end = 0;
+ }
+
+ for (i = segment_end - segment_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (segment_end - segment_start); k++) {
+ increment = NUMBER_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (segment_start + k + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SEGMENTS; i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (segment_end + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ rgb_resulted[hw_points - 1].red =
+ output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green =
+ output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue =
+ output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+ arr_points[2].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(
+ arr_points[0].y,
+ arr_points[0].x);
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+ arr_points[2].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+ arr_points[2].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ arr_points[2].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ regamma_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1) {
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ regamma_params->arr_curve_points[i].offset =
+ regamma_params->arr_curve_points[k].
+ offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(
+ rgb_plus_1->red,
+ rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(
+ rgb_plus_1->green,
+ rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(
+ rgb_plus_1->blue,
+ rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+
+ convert_to_custom_float(rgb_resulted, arr_points, hw_points);
+
+ return true;
+}
+
+static bool dcn10_set_output_transfer_func(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+{
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+
+ if (dpp == NULL)
+ return false;
+
+ dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
+
+ if (stream->out_transfer_func &&
+ stream->out_transfer_func->type ==
+ TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func->tf ==
+ TRANSFER_FUNCTION_SRGB) {
+ dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_SRGB);
+ } else if (dcn10_translate_regamma_to_hw_format(
+ stream->out_transfer_func, &dpp->regamma_params)) {
+ dpp->funcs->opp_program_regamma_pwl(dpp, &dpp->regamma_params);
+ dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_USER);
+ } else {
+ dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_BYPASS);
+ }
+
+ return true;
+}
+
+static void dcn10_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ struct hubp *hubp = NULL;
+ hubp = dc->res_pool->hubps[pipe->pipe_idx];
+ /* use TG master update lock to lock everything on the TG
+ * therefore only top pipe need to lock
+ */
+ if (pipe->top_pipe)
+ return;
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ if (lock)
+ pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+static bool wait_for_reset_trigger_to_occur(
+ struct dc_context *dc_ctx,
+ struct timing_generator *tg)
+{
+ bool rc = false;
+
+ /* To avoid endless loop we wait at most
+ * frames_to_wait_on_triggered_reset frames for the reset to occur. */
+ const uint32_t frames_to_wait_on_triggered_reset = 10;
+ int i;
+
+ for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
+
+ if (!tg->funcs->is_counter_moving(tg)) {
+ DC_ERROR("TG counter is not moving!\n");
+ break;
+ }
+
+ if (tg->funcs->did_triggered_reset_occur(tg)) {
+ rc = true;
+ /* usually occurs at i=1 */
+ DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
+ i);
+ break;
+ }
+
+ /* Wait for one frame. */
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
+ }
+
+ if (false == rc)
+ DC_ERROR("GSL: Timeout on reset trigger!\n");
+
+ return rc;
+}
+
+static void dcn10_enable_timing_synchronization(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[])
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ int i;
+
+ DC_SYNC_INFO("Setting up OTG reset trigger\n");
+
+ for (i = 1; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg, grouped_pipes[0]->stream_res.tg->inst);
+
+
+ DC_SYNC_INFO("Waiting for trigger\n");
+
+ /* Need to get only check 1 pipe for having reset as all the others are
+ * synchronized. Look at last pipe programmed to reset.
+ */
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
+ for (i = 1; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg);
+
+ DC_SYNC_INFO("Sync complete\n");
+}
+
+static void print_rq_dlg_ttu(
+ struct dc *core_dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== DML TTU Output parameters [%d] ==============\n"
+ "qos_level_low_wm: %d, \n"
+ "qos_level_high_wm: %d, \n"
+ "min_ttu_vblank: %d, \n"
+ "qos_level_flip: %d, \n"
+ "refcyc_per_req_delivery_l: %d, \n"
+ "qos_level_fixed_l: %d, \n"
+ "qos_ramp_disable_l: %d, \n"
+ "refcyc_per_req_delivery_pre_l: %d, \n"
+ "refcyc_per_req_delivery_c: %d, \n"
+ "qos_level_fixed_c: %d, \n"
+ "qos_ramp_disable_c: %d, \n"
+ "refcyc_per_req_delivery_pre_c: %d\n"
+ "=============================================================\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->ttu_regs.qos_level_low_wm,
+ pipe_ctx->ttu_regs.qos_level_high_wm,
+ pipe_ctx->ttu_regs.min_ttu_vblank,
+ pipe_ctx->ttu_regs.qos_level_flip,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
+ pipe_ctx->ttu_regs.qos_level_fixed_l,
+ pipe_ctx->ttu_regs.qos_ramp_disable_l,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
+ pipe_ctx->ttu_regs.qos_level_fixed_c,
+ pipe_ctx->ttu_regs.qos_ramp_disable_c,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
+ );
+
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== DML DLG Output parameters [%d] ==============\n"
+ "refcyc_h_blank_end: %d, \n"
+ "dlg_vblank_end: %d, \n"
+ "min_dst_y_next_start: %d, \n"
+ "refcyc_per_htotal: %d, \n"
+ "refcyc_x_after_scaler: %d, \n"
+ "dst_y_after_scaler: %d, \n"
+ "dst_y_prefetch: %d, \n"
+ "dst_y_per_vm_vblank: %d, \n"
+ "dst_y_per_row_vblank: %d, \n"
+ "ref_freq_to_pix_freq: %d, \n"
+ "vratio_prefetch: %d, \n"
+ "refcyc_per_pte_group_vblank_l: %d, \n"
+ "refcyc_per_meta_chunk_vblank_l: %d, \n"
+ "dst_y_per_pte_row_nom_l: %d, \n"
+ "refcyc_per_pte_group_nom_l: %d, \n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->dlg_regs.refcyc_h_blank_end,
+ pipe_ctx->dlg_regs.dlg_vblank_end,
+ pipe_ctx->dlg_regs.min_dst_y_next_start,
+ pipe_ctx->dlg_regs.refcyc_per_htotal,
+ pipe_ctx->dlg_regs.refcyc_x_after_scaler,
+ pipe_ctx->dlg_regs.dst_y_after_scaler,
+ pipe_ctx->dlg_regs.dst_y_prefetch,
+ pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
+ pipe_ctx->dlg_regs.dst_y_per_row_vblank,
+ pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
+ pipe_ctx->dlg_regs.vratio_prefetch,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
+ pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
+ );
+
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\ndst_y_per_meta_row_nom_l: %d, \n"
+ "refcyc_per_meta_chunk_nom_l: %d, \n"
+ "refcyc_per_line_delivery_pre_l: %d, \n"
+ "refcyc_per_line_delivery_l: %d, \n"
+ "vratio_prefetch_c: %d, \n"
+ "refcyc_per_pte_group_vblank_c: %d, \n"
+ "refcyc_per_meta_chunk_vblank_c: %d, \n"
+ "dst_y_per_pte_row_nom_c: %d, \n"
+ "refcyc_per_pte_group_nom_c: %d, \n"
+ "dst_y_per_meta_row_nom_c: %d, \n"
+ "refcyc_per_meta_chunk_nom_c: %d, \n"
+ "refcyc_per_line_delivery_pre_c: %d, \n"
+ "refcyc_per_line_delivery_c: %d \n"
+ "========================================================\n",
+ pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
+ pipe_ctx->dlg_regs.vratio_prefetch_c,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
+ pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
+ pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
+ );
+
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== DML RQ Output parameters [%d] ==============\n"
+ "chunk_size: %d \n"
+ "min_chunk_size: %d \n"
+ "meta_chunk_size: %d \n"
+ "min_meta_chunk_size: %d \n"
+ "dpte_group_size: %d \n"
+ "mpte_group_size: %d \n"
+ "swath_height: %d \n"
+ "pte_row_height_linear: %d \n"
+ "========================================================\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->rq_regs.rq_regs_l.chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
+ pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
+ pipe_ctx->rq_regs.rq_regs_l.swath_height,
+ pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
+ );
+}
+
+static void dcn10_power_on_fe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ power_on_plane(dc->hwseq,
+ pipe_ctx->pipe_idx);
+
+ /* enable DCFCLK current DCHUB */
+ REG_UPDATE(HUBP_CLK_CNTL[pipe_ctx->pipe_idx],
+ HUBP_CLOCK_ENABLE, 1);
+
+ /* make sure OPP_PIPE_CLOCK_EN = 1 */
+ REG_UPDATE(OPP_PIPE_CONTROL[pipe_ctx->stream_res.tg->inst],
+ OPP_PIPE_CLOCK_EN, 1);
+ /*TODO: REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, 0x1f);*/
+
+ if (plane_state) {
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Pipe:%d 0x%x: addr hi:0x%x, "
+ "addr low:0x%x, "
+ "src: %d, %d, %d,"
+ " %d; dst: %d, %d, %d, %d;\n",
+ pipe_ctx->pipe_idx,
+ plane_state,
+ plane_state->address.grph.addr.high_part,
+ plane_state->address.grph.addr.low_part,
+ plane_state->src_rect.x,
+ plane_state->src_rect.y,
+ plane_state->src_rect.width,
+ plane_state->src_rect.height,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.height);
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Pipe %d: width, height, x, y format:%d\n"
+ "viewport:%d, %d, %d, %d\n"
+ "recout: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ plane_state->format,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ pipe_ctx->plane_res.scl_data.recout.width,
+ pipe_ctx->plane_res.scl_data.recout.height,
+ pipe_ctx->plane_res.scl_data.recout.x,
+ pipe_ctx->plane_res.scl_data.recout.y);
+ print_rq_dlg_ttu(dc, pipe_ctx);
+ }
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+}
+
+static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
+{
+ struct dpp_grph_csc_adjustment adjust;
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
+}
+
+
+static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix)
+{
+ int i;
+ struct out_csc_color_matrix tbl_entry;
+
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ enum dc_color_space color_space =
+ pipe_ctx->stream->output_color_space;
+
+ //uint16_t matrix[12];
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = color_space;
+ //tbl_entry.regval = matrix;
+ pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ }
+}
+static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ return false;
+}
+
+static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+static bool is_rgb_cspace(enum dc_color_space output_color_space)
+{
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ case COLOR_SPACE_SRGB_LIMITED:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_ADOBERGB:
+ return true;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_2020_YCBCR:
+ return false;
+ default:
+ /* Add a case to switch */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+static void dcn10_get_surface_visual_confirm_color(
+ const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ switch (pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB8888:
+ /* set boarder color to red */
+ color->color_r_cr = color_value;
+ break;
+
+ case PIXEL_FORMAT_ARGB2101010:
+ /* set boarder color to blue */
+ color->color_b_cb = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP8:
+ /* set boarder color to green */
+ color->color_g_y = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP10:
+ /* set boarder color to yellow */
+ color->color_g_y = color_value;
+ color->color_r_cr = color_value;
+ break;
+ case PIXEL_FORMAT_FP16:
+ /* set boarder color to white */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
+ color->color_g_y = color_value;
+ break;
+ default:
+ break;
+ }
+}
+
+static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
+ struct vm_system_aperture_param *apt,
+ struct dce_hwseq *hws)
+{
+ PHYSICAL_ADDRESS_LOC physical_page_number;
+ uint32_t logical_addr_low;
+ uint32_t logical_addr_high;
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
+ REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ LOGICAL_ADDR, &logical_addr_low);
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ LOGICAL_ADDR, &logical_addr_high);
+
+ apt->sys_default.quad_part = physical_page_number.quad_part << 12;
+ apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
+ apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
+}
+
+/* Temporary read settings, future will get values from kmd directly */
+static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
+ struct vm_context0_param *vm0,
+ struct dce_hwseq *hws)
+{
+ PHYSICAL_ADDRESS_LOC fb_base;
+ PHYSICAL_ADDRESS_LOC fb_offset;
+ uint32_t fb_base_value;
+ uint32_t fb_offset_value;
+
+ REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
+ REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
+
+ REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
+ REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
+
+ /*
+ * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
+ * Therefore we need to do
+ * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
+ */
+ fb_base.quad_part = (uint64_t)fb_base_value << 24;
+ fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
+ vm0->pte_base.quad_part += fb_base.quad_part;
+ vm0->pte_base.quad_part -= fb_offset.quad_part;
+}
+
+static void dcn10_program_pte_vm(struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation,
+ struct dce_hwseq *hws)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct vm_system_aperture_param apt = { {{ 0 } } };
+ struct vm_context0_param vm0 = { { { 0 } } };
+
+
+ mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
+ mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
+
+ hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
+ hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
+}
+
+static void update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ union plane_size size = plane_state->plane_size;
+ struct mpcc_cfg mpcc_cfg = {0};
+ struct pipe_ctx *top_pipe;
+ bool per_pixel_alpha = plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+
+ /* TODO: proper fix once fpga works */
+ /* depends on DML calculation, DPP clock value may change dynamically */
+ enable_dppclk(
+ dc->hwseq,
+ pipe_ctx->pipe_idx,
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk,
+ context->bw.dcn.calc_clk.dppclk_div);
+ dc->current_state->bw.dcn.cur_clk.dppclk_div =
+ context->bw.dcn.calc_clk.dppclk_div;
+ context->bw.dcn.cur_clk.dppclk_div = context->bw.dcn.calc_clk.dppclk_div;
+
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ REG_UPDATE(DCHUBP_CNTL[pipe_ctx->pipe_idx], HUBP_VTG_SEL, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+
+ size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+
+ if (dc->config.gpu_vm_support)
+ dcn10_program_pte_vm(
+ pipe_ctx->plane_res.hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation,
+ hws
+ );
+
+ dpp->funcs->ipp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO);
+
+ mpcc_cfg.dpp_id = hubp->inst;
+ mpcc_cfg.opp_id = pipe_ctx->stream_res.opp->inst;
+ mpcc_cfg.tree_cfg = &(pipe_ctx->stream_res.opp->mpc_tree);
+ for (top_pipe = pipe_ctx->top_pipe; top_pipe; top_pipe = top_pipe->top_pipe)
+ mpcc_cfg.z_index++;
+ if (dc->debug.surface_visual_confirm)
+ dcn10_get_surface_visual_confirm_color(
+ pipe_ctx, &mpcc_cfg.black_color);
+ else
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space,
+ &mpcc_cfg.black_color);
+ mpcc_cfg.per_pixel_alpha = per_pixel_alpha;
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha.
+ */
+ mpcc_cfg.pre_multiplied_alpha = is_rgb_cspace(
+ pipe_ctx->stream->output_color_space)
+ && per_pixel_alpha;
+ hubp->mpcc_id = dc->res_pool->mpc->funcs->add(dc->res_pool->mpc, &mpcc_cfg);
+ hubp->opp_id = mpcc_cfg.opp_id;
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+
+ hubp->funcs->mem_program_viewport(hubp,
+ &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
+
+ /*gamut remap*/
+ program_gamut_remap(pipe_ctx);
+
+ program_csc_matrix(pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix);
+
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror);
+
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
+ if (is_pipe_tree_visible(pipe_ctx))
+ hubp->funcs->set_blank(hubp, false);
+}
+
+
+static void program_all_pipe_in_tree(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+
+ if (pipe_ctx->top_pipe == NULL) {
+
+ /* lock otg_master_update to process all pipes associated with
+ * this OTG. this is done only one time.
+ */
+ /* watermark is for all pipes */
+ program_watermarks(dc->hwseq, &context->bw.dcn.watermarks, ref_clk_mhz);
+
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after watermark update */
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_offset = pipe_ctx->pipe_dlg_param.vupdate_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_width = pipe_ctx->pipe_dlg_param.vupdate_width;
+ pipe_ctx->stream_res.tg->dlg_otg_param.signal = pipe_ctx->stream->signal;
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, !is_pipe_tree_visible(pipe_ctx));
+ }
+
+ if (pipe_ctx->plane_state != NULL) {
+ struct dc_cursor_position position = { 0 };
+ struct pipe_ctx *cur_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+
+ dcn10_power_on_fe(dc, pipe_ctx, context);
+
+ /* temporary dcn1 wa:
+ * watermark update requires toggle after a/b/c/d sets are programmed
+ * if hubp is pg then wm value doesn't get properaged to hubp
+ * need to toggle after ungate to ensure wm gets to hubp.
+ *
+ * final solution: we need to get SMU to do the toggle as
+ * DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST is owned by SMU we should have
+ * both driver and fw accessing same register
+ */
+ toggle_watermark_change_req(dc->hwseq);
+
+ update_dchubp_dpp(dc, pipe_ctx, context);
+
+ /* TODO: this is a hack w/a for switching from mpo to pipe split */
+ dc_stream_set_cursor_position(pipe_ctx->stream, &position);
+
+ dc_stream_set_cursor_attributes(pipe_ctx->stream,
+ &pipe_ctx->stream->cursor_attributes);
+
+ if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) {
+ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+ }
+
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after each pipe is programmed */
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
+ program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
+}
+
+static void dcn10_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->all_displays_in_sync = false;/*todo*/
+ pp_display_cfg->nb_pstate_switch_disable = false;
+ pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
+ pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
+ pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+ pp_display_cfg->avail_mclk_switch_time_us =
+ context->bw.dcn.cur_clk.dram_ccm_us > 0 ? context->bw.dcn.cur_clk.dram_ccm_us : 0;
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us =
+ context->bw.dcn.cur_clk.min_active_dram_ccm_us > 0 ? context->bw.dcn.cur_clk.min_active_dram_ccm_us : 0;
+ pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+ struct dm_pp_display_configuration)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+ dc->prev_display_config = *pp_display_cfg;
+}
+
+static void optimize_shared_resources(struct dc *dc)
+{
+ if (dc->current_state->stream_count == 0) {
+ apply_DEGVIDCN10_253_wa(dc);
+ /* S0i2 message */
+ dcn10_pplib_apply_display_requirements(dc, dc->current_state);
+ }
+
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+}
+
+static void ready_shared_resources(struct dc *dc, struct dc_state *context)
+{
+ if (dc->current_state->stream_count == 0 &&
+ !dc->debug.disable_stutter)
+ undo_DEGVIDCN10_253_wa(dc);
+
+ /* S0i2 message */
+ if (dc->current_state->stream_count == 0 &&
+ context->stream_count != 0)
+ dcn10_pplib_apply_display_requirements(dc, context);
+}
+
+static void dcn10_apply_ctx_for_surface(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context)
+{
+ int i, be_idx;
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ be_idx = -1;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (stream == context->res_ctx.pipe_ctx[i].stream) {
+ be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
+ break;
+ }
+ }
+
+ ASSERT(be_idx != -1);
+
+ if (num_planes == 0) {
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (old_pipe_ctx->stream_res.tg && old_pipe_ctx->stream_res.tg->inst == be_idx) {
+ old_pipe_ctx->stream_res.tg->funcs->set_blank(old_pipe_ctx->stream_res.tg, true);
+ dcn10_power_down_fe(dc, old_pipe_ctx->pipe_idx);
+ }
+ }
+ return;
+ }
+
+ /* reset unused mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
+ continue;
+
+ /*
+ * Powergate reused pipes that are not powergated
+ * fairly hacky right now, using opp_id as indicator
+ */
+
+ if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
+ if (pipe_ctx->plane_res.hubp->opp_id != 0xf && pipe_ctx->stream_res.tg->inst == be_idx) {
+ dcn10_power_down_fe(dc, pipe_ctx->pipe_idx);
+ /*
+ * power down fe will unlock when calling reset, need
+ * to lock it back here. Messy, need rework.
+ */
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+ }
+ }
+
+
+ if ((!pipe_ctx->plane_state && old_pipe_ctx->plane_state)
+ || (!pipe_ctx->stream && old_pipe_ctx->stream)) {
+ if (old_pipe_ctx->stream_res.tg->inst != be_idx)
+ continue;
+
+ if (!old_pipe_ctx->top_pipe) {
+ ASSERT(0);
+ continue;
+ }
+
+ /* reset mpc */
+ dc->res_pool->mpc->funcs->remove(
+ dc->res_pool->mpc,
+ &(old_pipe_ctx->stream_res.opp->mpc_tree),
+ old_pipe_ctx->stream_res.opp->inst,
+ old_pipe_ctx->pipe_idx);
+ old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[old_pipe_ctx->plane_res.hubp->mpcc_id] = true;
+
+ /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ "[debug_mpo: apply_ctx disconnect pending on mpcc %d]\n",
+ old_pipe_ctx->mpcc->inst);*/
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ old_pipe_ctx->top_pipe = NULL;
+ old_pipe_ctx->bottom_pipe = NULL;
+ old_pipe_ctx->plane_state = NULL;
+ old_pipe_ctx->stream = NULL;
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Reset mpcc for pipe %d\n",
+ old_pipe_ctx->pipe_idx);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* looking for top pipe to program */
+ if (!pipe_ctx->top_pipe)
+ program_all_pipe_in_tree(dc, pipe_ctx, context);
+ }
+
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== Watermark parameters ==============\n"
+ "a.urgent_ns: %d \n"
+ "a.cstate_enter_plus_exit: %d \n"
+ "a.cstate_exit: %d \n"
+ "a.pstate_change: %d \n"
+ "a.pte_meta_urgent: %d \n"
+ "b.urgent_ns: %d \n"
+ "b.cstate_enter_plus_exit: %d \n"
+ "b.cstate_exit: %d \n"
+ "b.pstate_change: %d \n"
+ "b.pte_meta_urgent: %d \n",
+ context->bw.dcn.watermarks.a.urgent_ns,
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
+ context->bw.dcn.watermarks.b.urgent_ns,
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.b.pte_meta_urgent_ns
+ );
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\nc.urgent_ns: %d \n"
+ "c.cstate_enter_plus_exit: %d \n"
+ "c.cstate_exit: %d \n"
+ "c.pstate_change: %d \n"
+ "c.pte_meta_urgent: %d \n"
+ "d.urgent_ns: %d \n"
+ "d.cstate_enter_plus_exit: %d \n"
+ "d.cstate_exit: %d \n"
+ "d.pstate_change: %d \n"
+ "d.pte_meta_urgent: %d \n"
+ "========================================================\n",
+ context->bw.dcn.watermarks.c.urgent_ns,
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
+ context->bw.dcn.watermarks.d.urgent_ns,
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.d.pte_meta_urgent_ns
+ );
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+static void dcn10_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ struct pp_smu_display_requirement_rv *smu_req_cur =
+ &dc->res_pool->pp_smu_req;
+ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
+ if (decrease_allowed || context->bw.dcn.calc_clk.dispclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ dc->current_state->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+ smu_req.hard_min_dcefclk_khz =
+ context->bw.dcn.calc_clk.dcfclk_khz;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz
+ > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
+ smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz
+ > dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
+ dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz =
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+ context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+ }
+
+ smu_req.display_count = context->stream_count;
+
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+ *smu_req_cur = smu_req;
+
+ /* Decrease in freq is increase in period so opposite comparison for dram_ccm */
+ if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us
+ < dc->current_state->bw.dcn.cur_clk.dram_ccm_us) {
+ dc->current_state->bw.dcn.calc_clk.dram_ccm_us =
+ context->bw.dcn.calc_clk.dram_ccm_us;
+ context->bw.dcn.cur_clk.dram_ccm_us =
+ context->bw.dcn.calc_clk.dram_ccm_us;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us
+ < dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) {
+ dc->current_state->bw.dcn.calc_clk.min_active_dram_ccm_us =
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us;
+ context->bw.dcn.cur_clk.min_active_dram_ccm_us =
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us;
+ }
+ dcn10_pplib_apply_display_requirements(dc, context);
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ /* need to fix this function. not doing the right thing here */
+}
+
+static void set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, int vmin, int vmax)
+{
+ int i = 0;
+ struct drr_params params = {0};
+
+ params.vertical_total_max = vmax;
+ params.vertical_total_min = vmin;
+
+ /* TODO: If multiple pipes are to be supported, you need
+ * some GSL stuff
+ */
+ for (i = 0; i < num_pipes; i++) {
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(pipe_ctx[i]->stream_res.tg, &params);
+ }
+}
+
+static void get_position(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ struct crtc_position *position)
+{
+ int i = 0;
+
+ /* TODO: handle pipes > 1
+ */
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
+}
+
+static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events)
+{
+ unsigned int i;
+ unsigned int value = 0;
+
+ if (events->surface_update)
+ value |= 0x80;
+ if (events->cursor_update)
+ value |= 0x2;
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+}
+
+static void set_plane_config(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct resource_context *res_ctx)
+{
+ /* TODO */
+ program_gamut_remap(pipe_ctx);
+}
+
+static void dcn10_config_stereo_parameters(
+ struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
+{
+ enum view_3d_format view_format = stream->view_format;
+ enum dc_timing_3d_format timing_3d_format =\
+ stream->timing.timing_3d_format;
+ bool non_stereo_timing = false;
+
+ if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
+ timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
+ timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
+ non_stereo_timing = true;
+
+ if (non_stereo_timing == false &&
+ view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
+
+ flags->PROGRAM_STEREO = 1;
+ flags->PROGRAM_POLARITY = 1;
+ if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
+ timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
+ timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
+ enum display_dongle_type dongle = \
+ stream->sink->link->ddc->dongle_type;
+ if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ flags->DISABLE_STEREO_DP_SYNC = 1;
+ }
+ flags->RIGHT_EYE_POLARITY =\
+ stream->timing.flags.RIGHT_EYE_3D_POLARITY;
+ if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ flags->FRAME_PACKED = 1;
+ }
+
+ return;
+}
+
+static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
+{
+ struct crtc_stereo_flags flags = { 0 };
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ dcn10_config_stereo_parameters(stream, &flags);
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+ pipe_ctx->stream_res.opp,
+ flags.PROGRAM_STEREO == 1 ? true:false,
+ stream->timing.flags.RIGHT_EYE_3D_POLARITY == 1 ? true:false);
+
+ pipe_ctx->stream_res.tg->funcs->program_stereo(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ &flags);
+
+ return;
+}
+
+static void dcn10_wait_for_mpcc_disconnect(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx)
+{
+ int i;
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ if (!pipe_ctx->stream_res.opp)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i]) {
+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, i);
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i] = false;
+ res_pool->hubps[i]->funcs->set_blank(res_pool->hubps[i], true);
+ /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ "[debug_mpo: wait_for_mpcc finished waiting on mpcc %d]\n",
+ i);*/
+ }
+ }
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+}
+
+static bool dcn10_dummy_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ return true;
+}
+
+void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (plane_state == NULL)
+ return;
+
+ plane_state->status.is_flip_pending =
+ pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
+ pipe_ctx->plane_res.hubp);
+
+ plane_state->status.current_address = pipe_ctx->plane_res.hubp->current_address;
+ if (pipe_ctx->plane_res.hubp->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ tg->funcs->is_stereo_left_eye) {
+ plane_state->status.is_right_eye =
+ !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
+ }
+}
+
+
+
+static const struct hw_sequencer_funcs dcn10_funcs = {
+ .program_gamut_remap = program_gamut_remap,
+ .program_csc_matrix = program_csc_matrix,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .set_plane_config = set_plane_config,
+ .update_plane_addr = dcn10_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .set_input_transfer_func = dcn10_set_input_transfer_func,
+ .set_output_transfer_func = dcn10_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .update_info_frame = dce110_update_info_frame,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dce110_unblank_stream,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .power_down_front_end = dcn10_power_down_fe,
+ .power_on_front_end = dcn10_power_on_fe,
+ .pipe_control_lock = dcn10_pipe_control_lock,
+ .set_bandwidth = dcn10_set_bandwidth,
+ .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
+ .prog_pixclk_crtc_otg = dcn10_prog_pixclk_crtc_otg,
+ .set_drr = set_drr,
+ .get_position = get_position,
+ .set_static_screen_control = set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .ready_shared_resources = ready_shared_resources,
+ .optimize_shared_resources = optimize_shared_resources,
+ .edp_backlight_control = hwss_edp_backlight_control,
+ .edp_power_control = hwss_edp_power_control
+};
+
+
+void dcn10_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn10_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
new file mode 100644
index 000000000000..ca53dc1cc19b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -0,0 +1,38 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN10_H__
+#define __DC_HWSS_DCN10_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dcn10_hw_sequencer_construct(struct dc *dc);
+extern void fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
new file mode 100644
index 000000000000..08db1e6b5166
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn10_ipp.h"
+#include "reg_helper.h"
+
+#define REG(reg) \
+ (ippn10->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ippn10->ipp_shift->field_name, ippn10->ipp_mask->field_name
+
+#define CTX \
+ ippn10->base.ctx
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static void dcn10_ipp_destroy(struct input_pixel_processor **ipp)
+{
+ kfree(TO_DCN10_IPP(*ipp));
+ *ipp = NULL;
+}
+
+static const struct ipp_funcs dcn10_ipp_funcs = {
+ .ipp_destroy = dcn10_ipp_destroy
+};
+
+void dcn10_ipp_construct(
+ struct dcn10_ipp *ippn10,
+ struct dc_context *ctx,
+ int inst,
+ const struct dcn10_ipp_registers *regs,
+ const struct dcn10_ipp_shift *ipp_shift,
+ const struct dcn10_ipp_mask *ipp_mask)
+{
+ ippn10->base.ctx = ctx;
+ ippn10->base.inst = inst;
+ ippn10->base.funcs = &dcn10_ipp_funcs;
+
+ ippn10->regs = regs;
+ ippn10->ipp_shift = ipp_shift;
+ ippn10->ipp_mask = ipp_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
new file mode 100644
index 000000000000..d7b5bd20352a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN10_IPP_H_
+#define _DCN10_IPP_H_
+
+#include "ipp.h"
+
+#define TO_DCN10_IPP(ipp)\
+ container_of(ipp, struct dcn10_ipp, base)
+
+#define IPP_REG_LIST_DCN(id) \
+ SRI(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI(DPP_CONTROL, DPP_TOP, id), \
+ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id)
+
+#define IPP_REG_LIST_DCN10(id) \
+ IPP_REG_LIST_DCN(id), \
+ SRI(CURSOR_SETTINS, HUBPREQ, id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR, id), \
+ SRI(CURSOR_SIZE, CURSOR, id), \
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CURSOR_POSITION, CURSOR, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR, id)
+
+#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+
+#define IPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define IPP_MASK_SH_LIST_DCN(mask_sh) \
+ IPP_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, ALPHA_EN, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh)
+
+#define IPP_MASK_SH_LIST_DCN10(mask_sh) \
+ IPP_MASK_SH_LIST_DCN(mask_sh),\
+ IPP_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ IPP_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh)
+
+#define IPP_DCN10_REG_FIELD_LIST(type) \
+ type CNVC_SURFACE_PIXEL_FORMAT; \
+ type CNVC_BYPASS; \
+ type ALPHA_EN; \
+ type FORMAT_EXPANSION_MODE; \
+ type CURSOR0_DST_Y_OFFSET; \
+ type CURSOR0_CHUNK_HDL_ADJUST; \
+ type CUR0_MODE; \
+ type CUR0_COLOR0; \
+ type CUR0_COLOR1; \
+ type CUR0_EXPANSION_MODE; \
+ type CURSOR_SURFACE_ADDRESS_HIGH; \
+ type CURSOR_SURFACE_ADDRESS; \
+ type CURSOR_WIDTH; \
+ type CURSOR_HEIGHT; \
+ type CURSOR_MODE; \
+ type CURSOR_2X_MAGNIFY; \
+ type CURSOR_PITCH; \
+ type CURSOR_LINES_PER_CHUNK; \
+ type CURSOR_ENABLE; \
+ type CUR0_ENABLE; \
+ type CURSOR_X_POSITION; \
+ type CURSOR_Y_POSITION; \
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_DST_X_OFFSET; \
+ type OUTPUT_FP
+
+struct dcn10_ipp_shift {
+ IPP_DCN10_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn10_ipp_mask {
+ IPP_DCN10_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_ipp_registers {
+ uint32_t DPP_CONTROL;
+ uint32_t CURSOR_SETTINS;
+ uint32_t CURSOR_SETTINGS;
+ uint32_t CNVC_SURFACE_PIXEL_FORMAT;
+ uint32_t CURSOR0_CONTROL;
+ uint32_t CURSOR0_COLOR0;
+ uint32_t CURSOR0_COLOR1;
+ uint32_t FORMAT_CONTROL;
+ uint32_t CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR_SIZE;
+ uint32_t CURSOR_CONTROL;
+ uint32_t CURSOR_POSITION;
+ uint32_t CURSOR_HOT_SPOT;
+ uint32_t CURSOR_DST_OFFSET;
+};
+
+struct dcn10_ipp {
+ struct input_pixel_processor base;
+
+ const struct dcn10_ipp_registers *regs;
+ const struct dcn10_ipp_shift *ipp_shift;
+ const struct dcn10_ipp_mask *ipp_mask;
+
+ struct dc_cursor_attributes curs_attr;
+};
+
+void dcn10_ipp_construct(struct dcn10_ipp *ippn10,
+ struct dc_context *ctx,
+ int inst,
+ const struct dcn10_ipp_registers *regs,
+ const struct dcn10_ipp_shift *ipp_shift,
+ const struct dcn10_ipp_mask *ipp_mask);
+
+#endif /* _DCN10_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
new file mode 100644
index 000000000000..76573e1f5b01
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn10_mpc.h"
+#include "dc.h"
+#include "mem_input.h"
+
+#define REG(reg)\
+ mpc10->mpc_regs->reg
+
+#define CTX \
+ mpc10->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mpc10->mpc_shift->field_name, mpc10->mpc_mask->field_name
+
+#define MODE_TOP_ONLY 1
+#define MODE_BLEND 3
+#define BLND_PP_ALPHA 0
+#define BLND_GLOBAL_ALPHA 2
+
+
+static void mpc10_set_bg_color(
+ struct dcn10_mpc *mpc10,
+ struct tg_color *bg_color,
+ int id)
+{
+ /* mpc color is 12 bit. tg_color is 10 bit */
+ /* todo: might want to use 16 bit to represent color and have each
+ * hw block translate to correct color depth.
+ */
+ uint32_t bg_r_cr = bg_color->color_r_cr << 2;
+ uint32_t bg_g_y = bg_color->color_g_y << 2;
+ uint32_t bg_b_cb = bg_color->color_b_cb << 2;
+
+ REG_SET(MPCC_BG_R_CR[id], 0,
+ MPCC_BG_R_CR, bg_r_cr);
+ REG_SET(MPCC_BG_G_Y[id], 0,
+ MPCC_BG_G_Y, bg_g_y);
+ REG_SET(MPCC_BG_B_CB[id], 0,
+ MPCC_BG_B_CB, bg_b_cb);
+}
+
+void mpc10_assert_idle_mpcc(struct mpc *mpc, int id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ ASSERT(!(mpc10->mpcc_in_use_mask & 1 << id));
+ REG_WAIT(MPCC_STATUS[id],
+ MPCC_IDLE, 1,
+ 1, 100000);
+}
+
+static int mpc10_get_idle_mpcc_id(struct dcn10_mpc *mpc10)
+{
+ int i;
+ int last_free_mpcc_id = -1;
+
+ for (i = 0; i < mpc10->num_mpcc; i++) {
+ uint32_t is_idle = 0;
+
+ if (mpc10->mpcc_in_use_mask & 1 << i)
+ continue;
+
+ last_free_mpcc_id = i;
+ REG_GET(MPCC_STATUS[i], MPCC_IDLE, &is_idle);
+ if (is_idle)
+ return i;
+ }
+
+ /* This assert should never trigger, we have mpcc leak if it does */
+ ASSERT(last_free_mpcc_id != -1);
+
+ mpc10_assert_idle_mpcc(&mpc10->base, last_free_mpcc_id);
+ return last_free_mpcc_id;
+}
+
+static void mpc10_assert_mpcc_idle_before_connect(struct dcn10_mpc *mpc10, int id)
+{
+ unsigned int top_sel, mpc_busy, mpc_idle;
+
+ REG_GET(MPCC_TOP_SEL[id],
+ MPCC_TOP_SEL, &top_sel);
+
+ if (top_sel == 0xf) {
+ REG_GET_2(MPCC_STATUS[id],
+ MPCC_BUSY, &mpc_busy,
+ MPCC_IDLE, &mpc_idle);
+
+ ASSERT(mpc_busy == 0);
+ ASSERT(mpc_idle == 1);
+ }
+}
+
+void mpc10_mpcc_remove(
+ struct mpc *mpc,
+ struct mpc_tree_cfg *tree_cfg,
+ int opp_id,
+ int dpp_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_id, z_idx;
+
+ /* find z_idx for the dpp to be removed */
+ for (z_idx = 0; z_idx < tree_cfg->num_pipes; z_idx++)
+ if (tree_cfg->dpp[z_idx] == dpp_id)
+ break;
+
+ if (z_idx == tree_cfg->num_pipes) {
+ /* In case of resume from S3/S4, remove mpcc from bios left over */
+ REG_SET(MPCC_OPP_ID[dpp_id], 0,
+ MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_TOP_SEL[dpp_id], 0,
+ MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[dpp_id], 0,
+ MPCC_BOT_SEL, 0xf);
+ return;
+ }
+
+ mpcc_id = tree_cfg->mpcc[z_idx];
+
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0,
+ MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
+ MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+
+ if (z_idx > 0) {
+ int top_mpcc_id = tree_cfg->mpcc[z_idx - 1];
+
+ if (z_idx + 1 < tree_cfg->num_pipes)
+ /* mpcc to be removed is in the middle of the tree */
+ REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
+ MPCC_BOT_SEL, tree_cfg->mpcc[z_idx + 1]);
+ else {
+ /* mpcc to be removed is at the bottom of the tree */
+ REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+ REG_UPDATE(MPCC_CONTROL[top_mpcc_id],
+ MPCC_MODE, MODE_TOP_ONLY);
+ }
+ } else if (tree_cfg->num_pipes > 1)
+ /* mpcc to be removed is at the top of the tree */
+ REG_SET(MUX[opp_id], 0,
+ MPC_OUT_MUX, tree_cfg->mpcc[z_idx + 1]);
+ else
+ /* mpcc to be removed is the only one in the tree */
+ REG_SET(MUX[opp_id], 0, MPC_OUT_MUX, 0xf);
+
+ /* mark this mpcc as not in use */
+ mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
+ tree_cfg->num_pipes--;
+ for (; z_idx < tree_cfg->num_pipes; z_idx++) {
+ tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx + 1];
+ tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx + 1];
+ }
+ tree_cfg->dpp[tree_cfg->num_pipes] = 0xdeadbeef;
+ tree_cfg->mpcc[tree_cfg->num_pipes] = 0xdeadbeef;
+}
+
+static void mpc10_add_to_tree_cfg(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg,
+ int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_mode = MODE_TOP_ONLY;
+ int position = cfg->z_index;
+ struct mpc_tree_cfg *tree_cfg = cfg->tree_cfg;
+ int alpha_blnd_mode = cfg->per_pixel_alpha ?
+ BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
+ int z_idx;
+
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0,
+ MPCC_OPP_ID, cfg->opp_id);
+
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
+ MPCC_TOP_SEL, cfg->dpp_id);
+
+ if (position == 0) {
+ /* idle dpp/mpcc is added to the top layer of tree */
+
+ if (tree_cfg->num_pipes > 0) {
+ /* get instance of previous top mpcc */
+ int prev_top_mpcc_id = tree_cfg->mpcc[0];
+
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, prev_top_mpcc_id);
+ mpcc_mode = MODE_BLEND;
+ }
+
+ /* opp will get new output. from new added mpcc */
+ REG_SET(MUX[cfg->opp_id], 0, MPC_OUT_MUX, mpcc_id);
+
+ } else if (position == tree_cfg->num_pipes) {
+ /* idle dpp/mpcc is added to the bottom layer of tree */
+
+ /* get instance of previous bottom mpcc, set to middle layer */
+ int prev_bot_mpcc_id = tree_cfg->mpcc[tree_cfg->num_pipes - 1];
+
+ REG_SET(MPCC_BOT_SEL[prev_bot_mpcc_id], 0,
+ MPCC_BOT_SEL, mpcc_id);
+ REG_UPDATE(MPCC_CONTROL[prev_bot_mpcc_id],
+ MPCC_MODE, MODE_BLEND);
+
+ /* mpcc_id become new bottom mpcc*/
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+
+ } else {
+ /* idle dpp/mpcc is added to middle of tree */
+ int above_mpcc_id = tree_cfg->mpcc[position - 1];
+ int below_mpcc_id = tree_cfg->mpcc[position];
+
+ /* mpcc above new mpcc_id has new bottom mux*/
+ REG_SET(MPCC_BOT_SEL[above_mpcc_id], 0,
+ MPCC_BOT_SEL, mpcc_id);
+ REG_UPDATE(MPCC_CONTROL[above_mpcc_id],
+ MPCC_MODE, MODE_BLEND);
+
+ /* mpcc_id bottom mux is from below mpcc*/
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, below_mpcc_id);
+ mpcc_mode = MODE_BLEND;
+ }
+
+ REG_SET_4(MPCC_CONTROL[mpcc_id], 0xffffffff,
+ MPCC_MODE, mpcc_mode,
+ MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, false);
+
+ /* update mpc_tree_cfg with new mpcc */
+ for (z_idx = tree_cfg->num_pipes; z_idx > position; z_idx--) {
+ tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx - 1];
+ tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx - 1];
+ }
+ tree_cfg->dpp[position] = cfg->dpp_id;
+ tree_cfg->mpcc[position] = mpcc_id;
+ tree_cfg->num_pipes++;
+}
+
+int mpc10_mpcc_add(struct mpc *mpc, struct mpcc_cfg *cfg)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_id, z_idx;
+
+ ASSERT(cfg->z_index < mpc10->num_mpcc);
+
+ /* check in dpp already exists in mpc tree */
+ for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
+ if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
+ break;
+ if (z_idx == cfg->tree_cfg->num_pipes) {
+ ASSERT(cfg->z_index <= cfg->tree_cfg->num_pipes);
+ mpcc_id = mpc10_get_idle_mpcc_id(mpc10);
+
+ /*
+ * TODO: remove hack
+ * Note: currently there is a bug in init_hw such that
+ * on resume from hibernate, BIOS sets up MPCC0, and
+ * we do mpcc_remove but the mpcc cannot go to idle
+ * after remove. This cause us to pick mpcc1 here,
+ * which causes a pstate hang for yet unknown reason.
+ */
+ mpcc_id = cfg->dpp_id;
+ /* end hack*/
+
+ ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
+
+ if (mpc->ctx->dc->debug.sanity_checks)
+ mpc10_assert_mpcc_idle_before_connect(mpc10, mpcc_id);
+ } else {
+ ASSERT(cfg->z_index < cfg->tree_cfg->num_pipes);
+ mpcc_id = cfg->tree_cfg->mpcc[z_idx];
+ mpc10_mpcc_remove(mpc, cfg->tree_cfg, cfg->opp_id, cfg->dpp_id);
+ }
+
+ /* add dpp/mpcc pair to mpc_tree_cfg and update mpcc registers */
+ mpc10_add_to_tree_cfg(mpc, cfg, mpcc_id);
+
+ /* set background color */
+ mpc10_set_bg_color(mpc10, &cfg->black_color, mpcc_id);
+
+ /* mark this mpcc as in use */
+ mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
+
+ return mpcc_id;
+}
+
+void mpc10_update_blend_mode(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_id, z_idx;
+ int alpha_blnd_mode = cfg->per_pixel_alpha ?
+ BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
+
+ /* find z_idx for the dpp that requires blending mode update*/
+ for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
+ if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
+ break;
+
+ ASSERT(z_idx < cfg->tree_cfg->num_pipes);
+ mpcc_id = cfg->tree_cfg->mpcc[z_idx];
+
+ REG_UPDATE_2(MPCC_CONTROL[mpcc_id],
+ MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha);
+}
+
+const struct mpc_funcs dcn10_mpc_funcs = {
+ .add = mpc10_mpcc_add,
+ .remove = mpc10_mpcc_remove,
+ .wait_for_idle = mpc10_assert_idle_mpcc,
+ .update_blend_mode = mpc10_update_blend_mode,
+};
+
+void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
+ struct dc_context *ctx,
+ const struct dcn_mpc_registers *mpc_regs,
+ const struct dcn_mpc_shift *mpc_shift,
+ const struct dcn_mpc_mask *mpc_mask,
+ int num_mpcc)
+{
+ mpc10->base.ctx = ctx;
+
+ mpc10->base.funcs = &dcn10_mpc_funcs;
+
+ mpc10->mpc_regs = mpc_regs;
+ mpc10->mpc_shift = mpc_shift;
+ mpc10->mpc_mask = mpc_mask;
+
+ mpc10->mpcc_in_use_mask = 0;
+ mpc10->num_mpcc = num_mpcc;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
new file mode 100644
index 000000000000..683ce4aaa76e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -0,0 +1,138 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_DCN10_H__
+#define __DC_MPCC_DCN10_H__
+
+#include "mpc.h"
+
+#define TO_DCN10_MPC(mpc_base) \
+ container_of(mpc_base, struct dcn10_mpc, base)
+
+#define MAX_MPCC 6
+#define MAX_OPP 6
+
+#define MPC_COMMON_REG_LIST_DCN1_0(inst) \
+ SRII(MPCC_TOP_SEL, MPCC, inst),\
+ SRII(MPCC_BOT_SEL, MPCC, inst),\
+ SRII(MPCC_CONTROL, MPCC, inst),\
+ SRII(MPCC_STATUS, MPCC, inst),\
+ SRII(MPCC_OPP_ID, MPCC, inst),\
+ SRII(MPCC_BG_G_Y, MPCC, inst),\
+ SRII(MPCC_BG_R_CR, MPCC, inst),\
+ SRII(MPCC_BG_B_CB, MPCC, inst),\
+ SRII(MPCC_BG_B_CB, MPCC, inst)
+
+#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
+ SRII(MUX, MPC_OUT, inst)
+
+#define MPC_COMMON_REG_VARIABLE_LIST \
+ uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
+ uint32_t MPCC_BOT_SEL[MAX_MPCC]; \
+ uint32_t MPCC_CONTROL[MAX_MPCC]; \
+ uint32_t MPCC_STATUS[MAX_MPCC]; \
+ uint32_t MPCC_OPP_ID[MAX_MPCC]; \
+ uint32_t MPCC_BG_G_Y[MAX_MPCC]; \
+ uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
+ uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
+ uint32_t MUX[MAX_OPP];
+
+#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+ SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
+ SF(MPCC0_MPCC_BOT_SEL, MPCC_BOT_SEL, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_MODE, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_BLND_MODE, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_MULTIPLIED_MODE, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_BLND_ACTIVE_OVERLAP_ONLY, mask_sh),\
+ SF(MPCC0_MPCC_STATUS, MPCC_IDLE, mask_sh),\
+ SF(MPCC0_MPCC_STATUS, MPCC_BUSY, mask_sh),\
+ SF(MPCC0_MPCC_OPP_ID, MPCC_OPP_ID, mask_sh),\
+ SF(MPCC0_MPCC_BG_G_Y, MPCC_BG_G_Y, mask_sh),\
+ SF(MPCC0_MPCC_BG_R_CR, MPCC_BG_R_CR, mask_sh),\
+ SF(MPCC0_MPCC_BG_B_CB, MPCC_BG_B_CB, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
+
+#define MPC_REG_FIELD_LIST(type) \
+ type MPCC_TOP_SEL;\
+ type MPCC_BOT_SEL;\
+ type MPCC_MODE;\
+ type MPCC_ALPHA_BLND_MODE;\
+ type MPCC_ALPHA_MULTIPLIED_MODE;\
+ type MPCC_BLND_ACTIVE_OVERLAP_ONLY;\
+ type MPCC_IDLE;\
+ type MPCC_BUSY;\
+ type MPCC_OPP_ID;\
+ type MPCC_BG_G_Y;\
+ type MPCC_BG_R_CR;\
+ type MPCC_BG_B_CB;\
+ type MPC_OUT_MUX;
+
+struct dcn_mpc_registers {
+ MPC_COMMON_REG_VARIABLE_LIST
+};
+
+struct dcn_mpc_shift {
+ MPC_REG_FIELD_LIST(uint8_t)
+};
+
+struct dcn_mpc_mask {
+ MPC_REG_FIELD_LIST(uint32_t)
+};
+
+struct dcn10_mpc {
+ struct mpc base;
+
+ int mpcc_in_use_mask;
+ int num_mpcc;
+ const struct dcn_mpc_registers *mpc_regs;
+ const struct dcn_mpc_shift *mpc_shift;
+ const struct dcn_mpc_mask *mpc_mask;
+};
+
+void dcn10_mpc_construct(struct dcn10_mpc *mpcc10,
+ struct dc_context *ctx,
+ const struct dcn_mpc_registers *mpc_regs,
+ const struct dcn_mpc_shift *mpc_shift,
+ const struct dcn_mpc_mask *mpc_mask,
+ int num_mpcc);
+
+int mpc10_mpcc_add(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg);
+
+void mpc10_mpcc_remove(
+ struct mpc *mpc,
+ struct mpc_tree_cfg *tree_cfg,
+ int opp_id,
+ int dpp_id);
+
+void mpc10_assert_idle_mpcc(
+ struct mpc *mpc,
+ int id);
+
+void mpc10_update_blend_mode(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
new file mode 100644
index 000000000000..a136f70b7a3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn10_opp.h"
+#include "reg_helper.h"
+
+#define REG(reg) \
+ (oppn10->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ oppn10->opp_shift->field_name, oppn10->opp_mask->field_name
+
+#define CTX \
+ oppn10->base.ctx
+
+
+
+/************* FORMATTER ************/
+
+/**
+ * set_truncation
+ * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
+ * 2) enable truncation
+ * 3) HW remove 12bit FMT support for DCE11 power saving reason.
+ */
+static void set_truncation(
+ struct dcn10_opp *oppn10,
+ const struct bit_depth_reduction_params *params)
+{
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, params->flags.TRUNCATE_ENABLED,
+ FMT_TRUNCATE_DEPTH, params->flags.TRUNCATE_DEPTH,
+ FMT_TRUNCATE_MODE, params->flags.TRUNCATE_MODE);
+}
+
+static void set_spatial_dither(
+ struct dcn10_opp *oppn10,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable spatial (random) dithering*/
+ REG_UPDATE_7(FMT_BIT_DEPTH_CONTROL,
+ FMT_SPATIAL_DITHER_EN, 0,
+ FMT_SPATIAL_DITHER_MODE, 0,
+ FMT_SPATIAL_DITHER_DEPTH, 0,
+ FMT_TEMPORAL_DITHER_EN, 0,
+ FMT_HIGHPASS_RANDOM_ENABLE, 0,
+ FMT_FRAME_RANDOM_ENABLE, 0,
+ FMT_RGB_RANDOM_ENABLE, 0);
+
+
+ /* only use FRAME_COUNTER_MAX if frameRandom == 1*/
+ if (params->flags.FRAME_RANDOM == 1) {
+ if (params->flags.SPATIAL_DITHER_DEPTH == 0 || params->flags.SPATIAL_DITHER_DEPTH == 1) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2);
+ } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1);
+ } else {
+ return;
+ }
+ } else {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0);
+ }
+
+ /*Set seed for random values for
+ * spatial dithering for R,G,B channels*/
+
+ REG_SET(FMT_DITHER_RAND_R_SEED, 0,
+ FMT_RAND_R_SEED, params->r_seed_value);
+
+ REG_SET(FMT_DITHER_RAND_G_SEED, 0,
+ FMT_RAND_G_SEED, params->g_seed_value);
+
+ REG_SET(FMT_DITHER_RAND_B_SEED, 0,
+ FMT_RAND_B_SEED, params->b_seed_value);
+
+ /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero
+ * offset for the R/Cr channel, lower 4LSB
+ * is forced to zeros. Typically set to 0
+ * RGB and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero
+ * offset for the G/Y channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB
+ * and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero
+ * offset for the B/Cb channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB and
+ * 0x80000 YCbCr.
+ */
+
+ REG_UPDATE_6(FMT_BIT_DEPTH_CONTROL,
+ /*Enable spatial dithering*/
+ FMT_SPATIAL_DITHER_EN, params->flags.SPATIAL_DITHER_ENABLED,
+ /* Set spatial dithering mode
+ * (default is Seed patterrn AAAA...)
+ */
+ FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE,
+ /*Set spatial dithering bit depth*/
+ FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH,
+ /*Disable High pass filter*/
+ FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM,
+ /*Reset only at startup*/
+ FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM,
+ /*Set RGB data dithered with x^28+x^3+1*/
+ FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM);
+}
+
+static void oppn10_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ set_truncation(oppn10, params);
+ set_spatial_dither(oppn10, params);
+ /* TODO
+ * set_temporal_dither(oppn10, params);
+ */
+}
+
+/**
+ * set_pixel_encoding
+ *
+ * Set Pixel Encoding
+ * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
+ * 1: YCbCr 4:2:2
+ */
+static void set_pixel_encoding(
+ struct dcn10_opp *oppn10,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ switch (params->pixel_encoding) {
+
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1);
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * Set Clamping
+ * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
+ * 1 for 8 bpc
+ * 2 for 10 bpc
+ * 3 for 12 bpc
+ * 7 for programable
+ * 2) Enable clamp if Limited range requested
+ */
+static void opp_set_clamping(
+ struct dcn10_opp *oppn10,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 0,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+
+ switch (params->clamping_level) {
+ case CLAMPING_FULL_RANGE:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+ break;
+ case CLAMPING_LIMITED_RANGE_8BPC:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 1);
+ break;
+ case CLAMPING_LIMITED_RANGE_10BPC:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 2);
+
+ break;
+ case CLAMPING_LIMITED_RANGE_12BPC:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 3);
+ break;
+ case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
+ /* TODO */
+ default:
+ break;
+ }
+
+}
+
+static void oppn10_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 0,
+ FMT_DYNAMIC_EXP_MODE, 0);
+
+ /*00 - 10-bit -> 12-bit dynamic expansion*/
+ /*01 - 8-bit -> 12-bit dynamic expansion*/
+ if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ signal == SIGNAL_TYPE_VIRTUAL) {
+ switch (color_dpth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 1);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ opp_set_clamping(oppn10, params);
+ set_pixel_encoding(oppn10, params);
+}
+
+static void oppn10_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ REG_UPDATE(FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, 0);
+
+ /* dithering is affected by <CrtcSourceSelect>, hence should be
+ * programmed afterwards */
+ oppn10_program_bit_depth_reduction(
+ opp,
+ fmt_bit_depth);
+
+ opp_program_clamping_and_pixel_encoding(
+ opp,
+ clamping);
+
+ return;
+}
+
+
+
+static void oppn10_set_stereo_polarity(
+ struct output_pixel_processor *opp,
+ bool enable, bool rightEyePolarity)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, enable);
+}
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static void dcn10_opp_destroy(struct output_pixel_processor **opp)
+{
+ kfree(TO_DCN10_OPP(*opp));
+ *opp = NULL;
+}
+
+static struct opp_funcs dcn10_opp_funcs = {
+ .opp_set_dyn_expansion = oppn10_set_dyn_expansion,
+ .opp_program_fmt = oppn10_program_fmt,
+ .opp_program_bit_depth_reduction = oppn10_program_bit_depth_reduction,
+ .opp_set_stereo_polarity = oppn10_set_stereo_polarity,
+ .opp_destroy = dcn10_opp_destroy
+};
+
+void dcn10_opp_construct(struct dcn10_opp *oppn10,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn10_opp_registers *regs,
+ const struct dcn10_opp_shift *opp_shift,
+ const struct dcn10_opp_mask *opp_mask)
+{
+ int i;
+ oppn10->base.ctx = ctx;
+ oppn10->base.inst = inst;
+ oppn10->base.funcs = &dcn10_opp_funcs;
+
+ oppn10->base.mpc_tree.dpp[0] = inst;
+ oppn10->base.mpc_tree.mpcc[0] = inst;
+ oppn10->base.mpc_tree.num_pipes = 1;
+ for (i = 0; i < MAX_PIPES; i++)
+ oppn10->base.mpcc_disconnect_pending[i] = false;
+
+ oppn10->regs = regs;
+ oppn10->opp_shift = opp_shift;
+ oppn10->opp_mask = opp_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
new file mode 100644
index 000000000000..790ce6014832
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -0,0 +1,186 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCN10_H__
+#define __DC_OPP_DCN10_H__
+
+#include "opp.h"
+
+#define TO_DCN10_OPP(opp)\
+ container_of(opp, struct dcn10_opp, base)
+
+#define OPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define OPP_REG_LIST_DCN(id) \
+ SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \
+ SRI(FMT_CONTROL, FMT, id), \
+ SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI(FMT_CLAMP_CNTL, FMT, id), \
+ SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id)
+
+#define OPP_REG_LIST_DCN10(id) \
+ OPP_REG_LIST_DCN(id)
+
+#define OPP_MASK_SH_LIST_DCN(mask_sh) \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \
+ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \
+ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \
+ OPP_SF(FMT0_FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh), \
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh), \
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh)
+
+#define OPP_MASK_SH_LIST_DCN10(mask_sh) \
+ OPP_MASK_SH_LIST_DCN(mask_sh)
+
+#define OPP_DCN10_REG_FIELD_LIST(type) \
+ type DPG_EN; \
+ type DPG_MODE; \
+ type DPG_VRES; \
+ type DPG_HRES; \
+ type DPG_COLOUR0_R_CR; \
+ type DPG_COLOUR1_R_CR; \
+ type DPG_COLOUR0_B_CB; \
+ type DPG_COLOUR1_B_CB; \
+ type DPG_COLOUR0_G_Y; \
+ type DPG_COLOUR1_G_Y; \
+ type CM_OCSC_C11; \
+ type CM_OCSC_C12; \
+ type CM_OCSC_C13; \
+ type CM_OCSC_C14; \
+ type CM_OCSC_C21; \
+ type CM_OCSC_C22; \
+ type CM_OCSC_C23; \
+ type CM_OCSC_C24; \
+ type CM_OCSC_C31; \
+ type CM_OCSC_C32; \
+ type CM_OCSC_C33; \
+ type CM_OCSC_C34; \
+ type CM_COMB_C11; \
+ type CM_COMB_C12; \
+ type CM_COMB_C13; \
+ type CM_COMB_C14; \
+ type CM_COMB_C21; \
+ type CM_COMB_C22; \
+ type CM_COMB_C23; \
+ type CM_COMB_C24; \
+ type CM_COMB_C31; \
+ type CM_COMB_C32; \
+ type CM_COMB_C33; \
+ type CM_COMB_C34; \
+ type FMT_TRUNCATE_EN; \
+ type FMT_TRUNCATE_DEPTH; \
+ type FMT_TRUNCATE_MODE; \
+ type FMT_SPATIAL_DITHER_EN; \
+ type FMT_SPATIAL_DITHER_MODE; \
+ type FMT_SPATIAL_DITHER_DEPTH; \
+ type FMT_TEMPORAL_DITHER_EN; \
+ type FMT_HIGHPASS_RANDOM_ENABLE; \
+ type FMT_FRAME_RANDOM_ENABLE; \
+ type FMT_RGB_RANDOM_ENABLE; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP; \
+ type FMT_RAND_R_SEED; \
+ type FMT_RAND_G_SEED; \
+ type FMT_RAND_B_SEED; \
+ type FMT_PIXEL_ENCODING; \
+ type FMT_CLAMP_DATA_EN; \
+ type FMT_CLAMP_COLOR_FORMAT; \
+ type FMT_DYNAMIC_EXP_EN; \
+ type FMT_DYNAMIC_EXP_MODE; \
+ type FMT_MAP420MEM_PWR_FORCE; \
+ type FMT_STEREOSYNC_OVERRIDE
+
+struct dcn10_opp_shift {
+ OPP_DCN10_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn10_opp_mask {
+ OPP_DCN10_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_opp_registers {
+ uint32_t DPG_CONTROL;
+ uint32_t DPG_COLOUR_B_CB;
+ uint32_t DPG_COLOUR_G_Y;
+ uint32_t DPG_COLOUR_R_CR;
+ uint32_t CM_OCSC_C11_C12;
+ uint32_t CM_OCSC_C13_C14;
+ uint32_t CM_OCSC_C21_C22;
+ uint32_t CM_OCSC_C23_C24;
+ uint32_t CM_OCSC_C31_C32;
+ uint32_t CM_OCSC_C33_C34;
+ uint32_t CM_COMB_C11_C12;
+ uint32_t CM_COMB_C13_C14;
+ uint32_t CM_COMB_C21_C22;
+ uint32_t CM_COMB_C23_C24;
+ uint32_t CM_COMB_C31_C32;
+ uint32_t CM_COMB_C33_C34;
+ uint32_t FMT_BIT_DEPTH_CONTROL;
+ uint32_t FMT_CONTROL;
+ uint32_t FMT_DITHER_RAND_R_SEED;
+ uint32_t FMT_DITHER_RAND_G_SEED;
+ uint32_t FMT_DITHER_RAND_B_SEED;
+ uint32_t FMT_CLAMP_CNTL;
+ uint32_t FMT_DYNAMIC_EXP_CNTL;
+ uint32_t FMT_MAP420_MEMORY_CONTROL;
+};
+
+struct dcn10_opp {
+ struct output_pixel_processor base;
+
+ const struct dcn10_opp_registers *regs;
+ const struct dcn10_opp_shift *opp_shift;
+ const struct dcn10_opp_mask *opp_mask;
+
+ bool is_write_to_ram_a_safe;
+};
+
+void dcn10_opp_construct(struct dcn10_opp *oppn10,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn10_opp_registers *regs,
+ const struct dcn10_opp_shift *opp_shift,
+ const struct dcn10_opp_mask *opp_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
new file mode 100644
index 000000000000..4c4bd72d4e40
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -0,0 +1,1466 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn10/dcn10_resource.h"
+
+#include "dcn10/dcn10_ipp.h"
+#include "dcn10/dcn10_mpc.h"
+#include "irq/dcn10/irq_service_dcn10.h"
+#include "dcn10/dcn10_dpp.h"
+#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_opp.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dce112/dce112_resource.h"
+#include "dcn10_hubp.h"
+
+#include "vega10/soc15ip.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+
+#include "raven1/NBIO/nbio_7_0_offset.h"
+
+#include "raven1/MMHUB/mmhub_9_1_offset.h"
+#include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
+
+#include "reg_helper.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#endif
+
+
+enum dcn10_clk_src_array_id {
+ DCN10_CLK_SRC_PLL0,
+ DCN10_CLK_SRC_PLL1,
+ DCN10_CLK_SRC_PLL2,
+ DCN10_CLK_SRC_PLL3,
+ DCN10_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) \
+ NBIF_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* MMHUB */
+#define MMHUB_BASE_INNER(seg) \
+ MMHUB_BASE__INST0_SEG ## seg
+
+#define MMHUB_BASE(seg) \
+ MMHUB_BASE_INNER(seg)
+
+#define MMHUB_SR(reg_name)\
+ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCN10_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCN10(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCN10_REG_LIST(0)
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN10(_MASK)
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_DCN_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+ .AFMT_AVI_INFO0 = 0,\
+ .AFMT_AVI_INFO1 = 0,\
+ .AFMT_AVI_INFO2 = 0,\
+ .AFMT_AVI_INFO3 = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN10(_MASK),
+ .AFMT_GENERIC0_UPDATE = 0,
+ .AFMT_GENERIC2_UPDATE = 0,
+ .DP_DYN_RANGE = 0,
+ .DP_YCBCR_RANGE = 0,
+ .HDMI_AVI_INFO_SEND = 0,
+ .HDMI_AVI_INFO_CONT = 0,
+ .HDMI_AVI_INFO_LINE = 0,
+ .DP_SEC_AVI_ENABLE = 0,
+ .AFMT_AVI_INFO_VERSION = 0
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCN10_REG_LIST(id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_REG_LIST_DCN10(id),\
+}
+
+static const struct dcn10_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+};
+
+static const struct dcn10_ipp_shift ipp_shift = {
+ IPP_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn10_ipp_mask ipp_mask = {
+ IPP_MASK_SH_LIST_DCN10(_MASK),
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN10(id),\
+}
+
+static const struct dcn10_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+};
+
+static const struct dcn10_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn10_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN10(_MASK),
+};
+
+#define tf_regs(id)\
+[id] = {\
+ TF_REG_LIST_DCN10(id),\
+}
+
+static const struct dcn_dpp_registers tf_regs[] = {
+ tf_regs(0),
+ tf_regs(1),
+ tf_regs(2),
+ tf_regs(3),
+};
+
+static const struct dcn_dpp_shift tf_shift = {
+ TF_REG_LIST_SH_MASK_DCN10(__SHIFT)
+};
+
+static const struct dcn_dpp_mask tf_mask = {
+ TF_REG_LIST_SH_MASK_DCN10(_MASK),
+};
+
+static const struct dcn_mpc_registers mpc_regs = {
+ MPC_COMMON_REG_LIST_DCN1_0(0),
+ MPC_COMMON_REG_LIST_DCN1_0(1),
+ MPC_COMMON_REG_LIST_DCN1_0(2),
+ MPC_COMMON_REG_LIST_DCN1_0(3),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3)
+};
+
+static const struct dcn_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+};
+
+static const struct dcn_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
+};
+
+#define tg_regs(id)\
+[id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
+
+static const struct dcn_tg_registers tg_regs[] = {
+ tg_regs(0),
+ tg_regs(1),
+ tg_regs(2),
+ tg_regs(3),
+};
+
+static const struct dcn_tg_shift tg_shift = {
+ TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+};
+
+static const struct dcn_tg_mask tg_mask = {
+ TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
+};
+
+
+static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_6)
+};
+
+#define mi_regs(id)\
+[id] = {\
+ MI_REG_LIST_DCN10(id)\
+}
+
+
+static const struct dcn_mi_registers mi_regs[] = {
+ mi_regs(0),
+ mi_regs(1),
+ mi_regs(2),
+ mi_regs(3),
+};
+
+static const struct dcn_mi_shift mi_shift = {
+ MI_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn_mi_mask mi_mask = {
+ MI_MASK_SH_LIST_DCN10(_MASK)
+};
+
+#define clk_src_regs(index, pllid)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
+};
+
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 4,
+ .num_video_plane = 4,
+ .num_audio = 4,
+ .num_stream_encoder = 4,
+ .num_pll = 4,
+};
+
+static const struct dc_debug debug_defaults_drv = {
+ .sanity_checks = true,
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+
+ .min_disp_clk_khz = 300000,
+
+ .disable_pplib_clock_request = true,
+ .disable_pplib_wm_range = false,
+ .pplib_wm_report_mode = WM_REPORT_DEFAULT,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .force_single_disp_pipe_split = true,
+ .disable_dcc = DCC_ENABLE,
+ .voltage_align_fclk = true,
+ .disable_stereo_support = true,
+ .vsr_support = true,
+ .performance_trace = false,
+};
+
+static const struct dc_debug debug_defaults_diags = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = true,
+ .clock_trace = true,
+ .disable_stutter = true,
+ .disable_pplib_clock_request = true,
+ .disable_pplib_wm_range = true
+};
+
+static void dcn10_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN10_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn10_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn10_dpp *dpp =
+ kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL);
+
+ if (!dpp)
+ return NULL;
+
+ dpp1_construct(dpp, ctx, inst,
+ &tf_regs[inst], &tf_shift, &tf_mask);
+ return &dpp->base;
+}
+
+static struct input_pixel_processor *dcn10_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_ipp *ipp =
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn10_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+
+static struct output_pixel_processor *dcn10_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_opp *opp =
+ kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn10_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
+{
+ struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
+ GFP_KERNEL);
+
+ if (!mpc10)
+ return NULL;
+
+ dcn10_mpc_construct(mpc10, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ 4);
+
+ return &mpc10->base;
+}
+
+static struct timing_generator *dcn10_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct dcn10_timing_generator *tgn10 =
+ kzalloc(sizeof(struct dcn10_timing_generator), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &tg_regs[instance];
+ tgn10->tg_shift = &tg_shift;
+ tgn10->tg_mask = &tg_mask;
+
+ dcn10_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dcn10_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+
+ return &enc110->base;
+}
+
+struct clock_source *dcn10_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct stream_encoder *dcn10_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCN1_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN1_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN1_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dcn10_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dcn10_stream_encoder_create,
+ .create_hwseq = dcn10_hwseq_create,
+};
+
+static const struct resource_create_funcs res_create_maximus_funcs = {
+ .read_dce_straps = NULL,
+ .create_audio = NULL,
+ .create_stream_encoder = NULL,
+ .create_hwseq = dcn10_hwseq_create,
+};
+
+void dcn10_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx)
+{
+ struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+
+ if (!pp_smu)
+ return pp_smu;
+
+ dm_pp_get_funcs_rv(ctx, pp_smu);
+ return pp_smu;
+}
+
+static void destruct(struct dcn10_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ /* TODO: free dcn version of stream encoder once implemented
+ * rather than using virtual stream encoder
+ */
+ kfree(pool->base.stream_enc[i]);
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN10_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.dpps[i] != NULL)
+ dcn10_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN10_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++)
+ kfree(pool->base.stream_enc[i]);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn10_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn10_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ kfree(pool->base.pp_smu);
+}
+
+static struct hubp *dcn10_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn10_hubp *hubp1 =
+ kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL);
+
+ if (!hubp1)
+ return NULL;
+
+ dcn10_hubp_construct(hubp1, ctx, inst,
+ &mi_regs[inst], &mi_shift, &mi_mask);
+ return &hubp1->base;
+}
+
+static void get_pixel_clock_parameters(
+ const struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
+ pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
+ /* TODO: un-hardcode*/
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pixel_clk_params->requested_pix_clk /= 2;
+
+}
+
+static void build_clamping_params(struct dc_stream_state *stream)
+{
+ stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
+ stream->clamping.c_depth = stream->timing.display_color_depth;
+ stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
+}
+
+static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ build_clamping_params(pipe_ctx->stream);
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ /*TODO Seems unneeded anymore */
+ /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
+ if (stream != NULL && old_context->streams[i] != NULL) {
+ todo: shouldn't have to copy missing parameter here
+ resource_build_bit_depth_reduction_params(stream,
+ &stream->bit_depth_params);
+ stream->clamping.pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(stream,
+ &stream->bit_depth_params);
+ build_clamping_params(stream);
+
+ continue;
+ }
+ }
+ */
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ build_pipe_hw_param(pipe_ctx);
+ return DC_OK;
+}
+
+enum dc_status dcn10_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
+
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+enum dc_status dcn10_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+ if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+ struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
+
+ if (!head_pipe)
+ ASSERT(0);
+
+ if (!idle_pipe)
+ return false;
+
+ idle_pipe->stream = head_pipe->stream;
+ idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+
+ idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
+
+ return idle_pipe;
+}
+
+enum dcc_control {
+ dcc_control__256_256_xxx,
+ dcc_control__128_128_xxx,
+ dcc_control__256_64_64,
+};
+
+enum segment_order {
+ segment_order__na,
+ segment_order__contiguous,
+ segment_order__non_contiguous,
+};
+
+static bool dcc_support_pixel_format(
+ enum surface_pixel_format format,
+ unsigned int *bytes_per_element)
+{
+ /* DML: get_bytes_per_element */
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ *bytes_per_element = 2;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ *bytes_per_element = 4;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ *bytes_per_element = 8;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert)
+{
+ bool standard_swizzle = false;
+ bool display_swizzle = false;
+
+ switch (swizzle) {
+ case DC_SW_4KB_S:
+ case DC_SW_64KB_S:
+ case DC_SW_VAR_S:
+ case DC_SW_4KB_S_X:
+ case DC_SW_64KB_S_X:
+ case DC_SW_VAR_S_X:
+ standard_swizzle = true;
+ break;
+ case DC_SW_4KB_D:
+ case DC_SW_64KB_D:
+ case DC_SW_VAR_D:
+ case DC_SW_4KB_D_X:
+ case DC_SW_64KB_D_X:
+ case DC_SW_VAR_D_X:
+ display_swizzle = true;
+ break;
+ default:
+ break;
+ }
+
+ if (bytes_per_element == 1 && standard_swizzle) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__na;
+ return true;
+ }
+ if (bytes_per_element == 2 && standard_swizzle) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4 && standard_swizzle) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8 && standard_swizzle) {
+ *segment_order_horz = segment_order__na;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8 && display_swizzle) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+
+ return false;
+}
+
+static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ /* copied from DML. might want to refactor DML to leverage from DML */
+ /* DML : get_blk256_size */
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static void det_request_size(
+ unsigned int height,
+ unsigned int width,
+ unsigned int bpe,
+ bool *req128_horz_wc,
+ bool *req128_vert_wc)
+{
+ unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
+
+ unsigned int blk256_height = 0;
+ unsigned int blk256_width = 0;
+ unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+
+ get_blk256_size(&blk256_width, &blk256_height, bpe);
+
+ swath_bytes_horz_wc = height * blk256_height * bpe;
+ swath_bytes_vert_wc = width * blk256_width * bpe;
+
+ *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+
+ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+}
+
+static bool get_dcc_compression_cap(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
+ enum dcc_control dcc_control;
+ unsigned int bpe;
+ enum segment_order segment_order_horz, segment_order_vert;
+ bool req128_horz_wc, req128_vert_wc;
+
+ memset(output, 0, sizeof(*output));
+
+ if (dc->debug.disable_dcc == DCC_DISABLE)
+ return false;
+
+ if (!dcc_support_pixel_format(input->format,
+ &bpe))
+ return false;
+
+ if (!dcc_support_swizzle(input->swizzle_mode, bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ det_request_size(input->surface_size.height, input->surface_size.width,
+ bpe, &req128_horz_wc, &req128_vert_wc);
+
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256_xxx;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64_64;
+ else
+ /* reg128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__128_128_xxx;
+ }
+
+ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+ dcc_control != dcc_control__256_256_xxx)
+ return false;
+
+ switch (dcc_control) {
+ case dcc_control__256_256_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 256;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__128_128_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 128;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__256_64_64:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 64;
+ output->grph.rgb.independent_64b_blks = true;
+ break;
+ }
+
+ output->capable = true;
+ output->const_color_support = false;
+
+ return true;
+}
+
+
+static void dcn10_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
+
+ destruct(dcn10_pool);
+ kfree(dcn10_pool);
+ *pool = NULL;
+}
+
+static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
+{
+ if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+ && caps->max_video_width != 0
+ && plane_state->src_rect.width > caps->max_video_width)
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = get_dcc_compression_cap
+};
+
+static struct resource_funcs dcn10_res_pool_funcs = {
+ .destroy = dcn10_destroy_resource_pool,
+ .link_enc_create = dcn10_link_encoder_create,
+ .validate_guaranteed = dcn10_validate_guaranteed,
+ .validate_bandwidth = dcn_validate_bandwidth,
+ .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
+ .validate_plane = dcn10_validate_plane,
+ .add_stream_to_ctx = dcn10_add_stream_to_ctx
+};
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
+ /* RV1 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn10_resource_pool *pool)
+{
+ int i;
+ int j;
+ struct dc_context *ctx = dc->ctx;
+ uint32_t pipe_fuses = read_pipe_fuses(ctx);
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dcn10_res_pool_funcs;
+
+ /*
+ * TODO fill in from actual raven resource when we create
+ * more than virtual encoder
+ */
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ /* max pipe num for ASIC before check pipe fuses */
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
+ dc->caps.max_video_width = 3840;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 256;
+
+ dc->caps.max_slave_planes = 1;
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+ else
+ dc->debug = debug_defaults_diags;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL0] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL1] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL2] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+
+ pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
+
+ pool->base.dp_clock_source =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ /* todo: not reuse phy_pll registers */
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto clock_source_create_fail;
+ }
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pool->base.display_clock = dce120_disp_clk_create(ctx);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto disp_clk_create_fail;
+ }
+ }
+
+ pool->base.dmcu = dcn10_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
+ memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
+ memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
+
+ if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->dcn_soc->urgent_latency = 3;
+ dc->debug.disable_dmcu = true;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
+ }
+
+
+ dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
+ ASSERT(dc->dcn_soc->number_of_channels < 3);
+ if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
+ dc->dcn_soc->number_of_channels = 2;
+
+ if (dc->dcn_soc->number_of_channels == 1) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
+ if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
+ }
+ }
+
+ pool->base.pp_smu = dcn10_pp_smu_create(ctx);
+
+ if (!dc->debug.disable_pplib_clock_request)
+ dcn_bw_update_from_pplib(dc);
+ dcn_bw_sync_calcs_and_dml(dc);
+ if (!dc->debug.disable_pplib_wm_range) {
+ dc->res_pool = &pool->base;
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+ }
+
+ {
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
+ if (!pool->base.irqs)
+ goto irqs_create_fail;
+ #endif
+ }
+
+ /* index to valid pipe resource */
+ j = 0;
+ /* mem input -> ipp -> dpp -> opp -> TG */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ /* if pipe is disabled, skip instance of HW pipe,
+ * i.e, skip ASIC register instance
+ */
+ if ((pipe_fuses & (1 << i)) != 0)
+ continue;
+
+ pool->base.hubps[j] = dcn10_hubp_create(ctx, i);
+ if (pool->base.hubps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto mi_create_fail;
+ }
+
+ pool->base.ipps[j] = dcn10_ipp_create(ctx, i);
+ if (pool->base.ipps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto ipp_create_fail;
+ }
+
+ pool->base.dpps[j] = dcn10_dpp_create(ctx, i);
+ if (pool->base.dpps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpp!\n");
+ goto dpp_create_fail;
+ }
+
+ pool->base.opps[j] = dcn10_opp_create(ctx, i);
+ if (pool->base.opps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto opp_create_fail;
+ }
+
+ pool->base.timing_generators[j] = dcn10_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto otg_create_fail;
+ }
+ /* check next valid pipe */
+ j++;
+ }
+
+ /* valid pipe num */
+ pool->base.pipe_count = j;
+
+ /* within dml lib, it is hard code to 4. If ASIC pipe is fused,
+ * the value may be changed
+ */
+ dc->dml.ip.max_num_dpp = pool->base.pipe_count;
+ dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
+
+ pool->base.mpc = dcn10_mpc_create(ctx);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto mpc_create_fail;
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
+ &res_create_funcs : &res_create_maximus_funcs)))
+ goto res_create_fail;
+
+ dcn10_hw_sequencer_construct(dc);
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ dc->cap_funcs = cap_funcs;
+
+ return true;
+
+disp_clk_create_fail:
+mpc_create_fail:
+otg_create_fail:
+opp_create_fail:
+dpp_create_fail:
+ipp_create_fail:
+mi_create_fail:
+irqs_create_fail:
+res_create_fail:
+clock_source_create_fail:
+
+ destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn10_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dcn10_resource_pool *pool =
+ kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
new file mode 100644
index 000000000000..8f71225bc61b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
@@ -0,0 +1,47 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCN10_H__
+#define __DC_RESOURCE_DCN10_H__
+
+#include "core_types.h"
+
+#define TO_DCN10_RES_POOL(pool)\
+ container_of(pool, struct dcn10_resource_pool, base)
+
+struct dc;
+struct resource_pool;
+struct _vcs_dpi_display_pipe_params_st;
+
+struct dcn10_resource_pool {
+ struct resource_pool base;
+};
+struct resource_pool *dcn10_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+
+#endif /* __DC_RESOURCE_DCN10_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
new file mode 100644
index 000000000000..c7333cdf1802
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
@@ -0,0 +1,1203 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn10_timing_generator.h"
+#include "dc.h"
+
+#define REG(reg)\
+ tgn10->tg_regs->reg
+
+#define CTX \
+ tgn10->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ tgn10->tg_shift->field_name, tgn10->tg_mask->field_name
+
+#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100
+
+/**
+* apply_front_porch_workaround TODO FPGA still need?
+*
+* This is a workaround for a bug that has existed since R5xx and has not been
+* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+*/
+static void tgn10_apply_front_porch_workaround(
+ struct timing_generator *tg,
+ struct dc_crtc_timing *timing)
+{
+ if (timing->flags.INTERLACE == 1) {
+ if (timing->v_front_porch < 2)
+ timing->v_front_porch = 2;
+ } else {
+ if (timing->v_front_porch < 1)
+ timing->v_front_porch = 1;
+ }
+}
+
+static void tgn10_program_global_sync(
+ struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ if (tg->dlg_otg_param.vstartup_start == 0) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ REG_SET(OTG_VSTARTUP_PARAM, 0,
+ VSTARTUP_START, tg->dlg_otg_param.vstartup_start);
+
+ REG_SET_2(OTG_VUPDATE_PARAM, 0,
+ VUPDATE_OFFSET, tg->dlg_otg_param.vupdate_offset,
+ VUPDATE_WIDTH, tg->dlg_otg_param.vupdate_width);
+
+ REG_SET(OTG_VREADY_PARAM, 0,
+ VREADY_OFFSET, tg->dlg_otg_param.vready_offset);
+}
+
+static void tgn10_disable_stereo(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET(OTG_STEREO_CONTROL, 0,
+ OTG_STEREO_EN, 0);
+
+ REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0,
+ OTG_3D_STRUCTURE_EN, 0,
+ OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
+ OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
+
+ REG_UPDATE(OPPBUF_CONTROL,
+ OPPBUF_ACTIVE_WIDTH, 0);
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+ OPPBUF_3D_VACT_SPACE1_SIZE, 0);
+}
+
+/**
+ * program_timing_generator used by mode timing set
+ * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
+ * Including SYNC. Call BIOS command table to program Timings.
+ */
+static void tgn10_program_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *dc_crtc_timing,
+ bool use_vbios)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t vesa_sync_start;
+ uint32_t asic_blank_end;
+ uint32_t asic_blank_start;
+ uint32_t v_total;
+ uint32_t v_sync_end;
+ uint32_t v_init, v_fp2;
+ uint32_t h_sync_polarity, v_sync_polarity;
+ uint32_t interlace_factor;
+ uint32_t start_point = 0;
+ uint32_t field_num = 0;
+ uint32_t h_div_2;
+ int32_t vertical_line_start;
+
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ patched_crtc_timing = *dc_crtc_timing;
+ tgn10_apply_front_porch_workaround(tg, &patched_crtc_timing);
+
+ /* Load horizontal timing */
+
+ /* CRTC_H_TOTAL = vesa.h_total - 1 */
+ REG_SET(OTG_H_TOTAL, 0,
+ OTG_H_TOTAL, patched_crtc_timing.h_total - 1);
+
+ /* h_sync_start = 0, h_sync_end = vesa.h_sync_width */
+ REG_UPDATE_2(OTG_H_SYNC_A,
+ OTG_H_SYNC_A_START, 0,
+ OTG_H_SYNC_A_END, patched_crtc_timing.h_sync_width);
+
+ /* asic_h_blank_end = HsyncWidth + HbackPorch =
+ * vesa. usHorizontalTotal - vesa. usHorizontalSyncStart -
+ * vesa.h_left_border
+ */
+ vesa_sync_start = patched_crtc_timing.h_addressable +
+ patched_crtc_timing.h_border_right +
+ patched_crtc_timing.h_front_porch;
+
+ asic_blank_end = patched_crtc_timing.h_total -
+ vesa_sync_start -
+ patched_crtc_timing.h_border_left;
+
+ /* h_blank_start = v_blank_end + v_active */
+ asic_blank_start = asic_blank_end +
+ patched_crtc_timing.h_border_left +
+ patched_crtc_timing.h_addressable +
+ patched_crtc_timing.h_border_right;
+
+ REG_UPDATE_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, asic_blank_start,
+ OTG_H_BLANK_END, asic_blank_end);
+
+ /* h_sync polarity */
+ h_sync_polarity = patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ?
+ 0 : 1;
+
+ REG_UPDATE(OTG_H_SYNC_A_CNTL,
+ OTG_H_SYNC_A_POL, h_sync_polarity);
+
+ /* Load vertical timing */
+
+ /* CRTC_V_TOTAL = v_total - 1 */
+ if (patched_crtc_timing.flags.INTERLACE) {
+ interlace_factor = 2;
+ v_total = 2 * patched_crtc_timing.v_total;
+ } else {
+ interlace_factor = 1;
+ v_total = patched_crtc_timing.v_total - 1;
+ }
+ REG_SET(OTG_V_TOTAL, 0,
+ OTG_V_TOTAL, v_total);
+
+ /* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
+ * OTG_V_TOTAL_MIN are equal to V_TOTAL.
+ */
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, v_total);
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, v_total);
+
+ /* v_sync_start = 0, v_sync_end = v_sync_width */
+ v_sync_end = patched_crtc_timing.v_sync_width * interlace_factor;
+
+ REG_UPDATE_2(OTG_V_SYNC_A,
+ OTG_V_SYNC_A_START, 0,
+ OTG_V_SYNC_A_END, v_sync_end);
+
+ vesa_sync_start = patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom +
+ patched_crtc_timing.v_front_porch;
+
+ asic_blank_end = (patched_crtc_timing.v_total -
+ vesa_sync_start -
+ patched_crtc_timing.v_border_top)
+ * interlace_factor;
+
+ /* v_blank_start = v_blank_end + v_active */
+ asic_blank_start = asic_blank_end +
+ (patched_crtc_timing.v_border_top +
+ patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom)
+ * interlace_factor;
+
+ REG_UPDATE_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, asic_blank_start,
+ OTG_V_BLANK_END, asic_blank_end);
+
+ /* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
+ * program the reg for interrupt postition.
+ */
+ vertical_line_start = asic_blank_end - tg->dlg_otg_param.vstartup_start + 1;
+ if (vertical_line_start < 0) {
+ ASSERT(0);
+ vertical_line_start = 0;
+ }
+ REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
+ OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
+
+ /* v_sync polarity */
+ v_sync_polarity = patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ?
+ 0 : 1;
+
+ REG_UPDATE(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, v_sync_polarity);
+
+ v_init = asic_blank_start;
+ if (tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ tg->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
+ start_point = 1;
+ if (patched_crtc_timing.flags.INTERLACE == 1)
+ field_num = 1;
+ }
+ v_fp2 = 0;
+ if (tg->dlg_otg_param.vstartup_start > asic_blank_end)
+ v_fp2 = tg->dlg_otg_param.vstartup_start > asic_blank_end;
+
+ /* Interlace */
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ REG_UPDATE(OTG_INTERLACE_CONTROL,
+ OTG_INTERLACE_ENABLE, 1);
+ v_init = v_init / 2;
+ if ((tg->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
+ v_fp2 = v_fp2 / 2;
+ }
+ else
+ REG_UPDATE(OTG_INTERLACE_CONTROL,
+ OTG_INTERLACE_ENABLE, 0);
+
+
+ /* VTG enable set to 0 first VInit */
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 0);
+
+ REG_UPDATE_2(CONTROL,
+ VTG0_FP2, v_fp2,
+ VTG0_VCOUNT_INIT, v_init);
+
+ /* original code is using VTG offset to address OTG reg, seems wrong */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_START_POINT_CNTL, start_point,
+ OTG_FIELD_NUMBER_CNTL, field_num);
+
+ tgn10_program_global_sync(tg);
+
+ /* TODO
+ * patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
+ * program_horz_count_by_2
+ * for DVI 30bpp mode, 0 otherwise
+ * program_horz_count_by_2(tg, &patched_crtc_timing);
+ */
+
+ /* Enable stereo - only when we need to pack 3D frame. Other types
+ * of stereo handled in explicit call
+ */
+ h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ?
+ 1 : 0;
+
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_BY2, h_div_2);
+
+}
+
+/**
+ * unblank_crtc
+ * Call ASIC Control Object to UnBlank CRTC.
+ */
+static void tgn10_unblank_crtc(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t vertical_interrupt_enable = 0;
+
+ REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
+ OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable);
+
+ /* temporary work around for vertical interrupt, once vertical interrupt enabled,
+ * this check will be removed.
+ */
+ if (vertical_interrupt_enable)
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
+
+ REG_UPDATE_2(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, 0,
+ OTG_BLANK_DE_MODE, 0);
+}
+
+/**
+ * blank_crtc
+ * Call ASIC Control Object to Blank CRTC.
+ */
+
+static void tgn10_blank_crtc(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_UPDATE_2(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, 1,
+ OTG_BLANK_DE_MODE, 0);
+
+ /* todo: why are we waiting for BLANK_DATA_EN? shouldn't we be waiting
+ * for status?
+ */
+ REG_WAIT(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, 1,
+ 1, 100000);
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+}
+
+static void tgn10_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ if (enable_blanking)
+ tgn10_blank_crtc(tg);
+ else
+ tgn10_unblank_crtc(tg);
+}
+
+static bool tgn10_is_blanked(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t blank_en;
+ uint32_t blank_state;
+
+ REG_GET_2(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, &blank_en,
+ OTG_CURRENT_BLANK_STATE, &blank_state);
+
+ return blank_en && blank_state;
+}
+
+static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ if (enable) {
+ REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_EN, 1,
+ OPTC_INPUT_CLK_GATE_DIS, 1);
+
+ REG_WAIT(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_ON, 1,
+ 1, 1000);
+
+ /* Enable clock */
+ REG_UPDATE_2(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_EN, 1,
+ OTG_CLOCK_GATE_DIS, 1);
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_ON, 1,
+ 1, 1000);
+ } else {
+ REG_UPDATE_2(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_GATE_DIS, 0,
+ OTG_CLOCK_EN, 0);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_ON, 0,
+ 1, 1000);
+
+ REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_GATE_DIS, 0,
+ OPTC_INPUT_CLK_EN, 0);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_ON, 0,
+ 1, 1000);
+ }
+}
+
+/**
+ * Enable CRTC
+ * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ */
+static bool tgn10_enable_crtc(struct timing_generator *tg)
+{
+ /* TODO FPGA wait for answer
+ * OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE
+ * OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK
+ */
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ /* opp instance for OTG. For DCN1.0, ODM is remoed.
+ * OPP and OPTC should 1:1 mapping
+ */
+ REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SRC_SEL, tg->inst);
+
+ /* VTG enable first is for HW workaround */
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 1);
+
+ /* Enable CRTC */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_DISABLE_POINT_CNTL, 3,
+ OTG_MASTER_EN, 1);
+
+ return true;
+}
+
+/* disable_crtc - call ASIC Control Object to disable Timing generator. */
+static bool tgn10_disable_crtc(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ /* disable otg request until end of the first line
+ * in the vertical blank region
+ */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_DISABLE_POINT_CNTL, 3,
+ OTG_MASTER_EN, 0);
+
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 0);
+
+ /* CRTC disabled, so disable clock. */
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_BUSY, 0,
+ 1, 100000);
+
+ return true;
+}
+
+
+static void tgn10_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET_3(OTG_BLACK_COLOR, 0,
+ OTG_BLACK_COLOR_B_CB, black_color->color_b_cb,
+ OTG_BLACK_COLOR_G_Y, black_color->color_g_y,
+ OTG_BLACK_COLOR_R_CR, black_color->color_r_cr);
+}
+
+static bool tgn10_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t interlace_factor;
+ uint32_t v_blank;
+ uint32_t h_blank;
+ uint32_t min_v_blank;
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ ASSERT(timing != NULL);
+
+ interlace_factor = timing->flags.INTERLACE ? 2 : 1;
+ v_blank = (timing->v_total - timing->v_addressable -
+ timing->v_border_top - timing->v_border_bottom) *
+ interlace_factor;
+
+ h_blank = (timing->h_total - timing->h_addressable -
+ timing->h_border_right -
+ timing->h_border_left);
+
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
+ return false;
+
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
+ tg->ctx->dc->debug.disable_stereo_support)
+ return false;
+ /* Temporarily blocking interlacing mode until it's supported */
+ if (timing->flags.INTERLACE == 1)
+ return false;
+
+ /* Check maximum number of pixels supported by Timing Generator
+ * (Currently will never fail, in order to fail needs display which
+ * needs more than 8192 horizontal and
+ * more than 8192 vertical total pixels)
+ */
+ if (timing->h_total > tgn10->max_h_total ||
+ timing->v_total > tgn10->max_v_total)
+ return false;
+
+
+ if (h_blank < tgn10->min_h_blank)
+ return false;
+
+ if (timing->h_sync_width < tgn10->min_h_sync_width ||
+ timing->v_sync_width < tgn10->min_v_sync_width)
+ return false;
+
+ min_v_blank = timing->flags.INTERLACE?tgn10->min_v_blank_interlace:tgn10->min_v_blank;
+
+ if (v_blank < min_v_blank)
+ return false;
+
+ return true;
+
+}
+
+/*
+ * get_vblank_counter
+ *
+ * @brief
+ * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which
+ * holds the counter of frames.
+ *
+ * @param
+ * struct timing_generator *tg - [in] timing generator which controls the
+ * desired CRTC
+ *
+ * @return
+ * Counter of frames, which should equal to number of vblanks.
+ */
+static uint32_t tgn10_get_vblank_counter(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t frame_count;
+
+ REG_GET(OTG_STATUS_FRAME_COUNT,
+ OTG_FRAME_COUNT, &frame_count);
+
+ return frame_count;
+}
+
+static void tgn10_lock(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET(OTG_GLOBAL_CONTROL0, 0,
+ OTG_MASTER_UPDATE_LOCK_SEL, tg->inst);
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 100);
+}
+
+static void tgn10_unlock(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 0);
+
+ /* why are we waiting here? */
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_UPDATE_PENDING, 0,
+ 1, 100000);
+}
+
+static void tgn10_get_position(struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_GET_2(OTG_STATUS_POSITION,
+ OTG_HORZ_COUNT, &position->horizontal_count,
+ OTG_VERT_COUNT, &position->vertical_count);
+
+ REG_GET(OTG_NOM_VERT_POSITION,
+ OTG_VERT_COUNT_NOM, &position->nominal_vcount);
+}
+
+static bool tgn10_is_counter_moving(struct timing_generator *tg)
+{
+ struct crtc_position position1, position2;
+
+ tg->funcs->get_position(tg, &position1);
+ tg->funcs->get_position(tg, &position2);
+
+ if (position1.horizontal_count == position2.horizontal_count &&
+ position1.vertical_count == position2.vertical_count)
+ return false;
+ else
+ return true;
+}
+
+static bool tgn10_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t occurred;
+
+ REG_GET(OTG_FORCE_COUNT_NOW_CNTL,
+ OTG_FORCE_COUNT_NOW_OCCURRED, &occurred);
+
+ return occurred != 0;
+}
+
+static void tgn10_enable_reset_trigger(struct timing_generator *tg, int source_tg_inst)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t falling_edge;
+
+ REG_GET(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, &falling_edge);
+
+ if (falling_edge)
+ REG_SET_3(OTG_TRIGA_CNTL, 0,
+ /* vsync signal from selected OTG pipe based
+ * on OTG_TRIG_SOURCE_PIPE_SELECT setting
+ */
+ OTG_TRIGA_SOURCE_SELECT, 20,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
+ /* always detect falling edge */
+ OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 1);
+ else
+ REG_SET_3(OTG_TRIGA_CNTL, 0,
+ /* vsync signal from selected OTG pipe based
+ * on OTG_TRIG_SOURCE_PIPE_SELECT setting
+ */
+ OTG_TRIGA_SOURCE_SELECT, 20,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
+ /* always detect rising edge */
+ OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1);
+
+ REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
+ /* force H count to H_TOTAL and V count to V_TOTAL in
+ * progressive mode and V_TOTAL-1 in interlaced mode
+ */
+ OTG_FORCE_COUNT_NOW_MODE, 2);
+}
+
+static void tgn10_disable_reset_trigger(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_WRITE(OTG_TRIGA_CNTL, 0);
+
+ REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
+ OTG_FORCE_COUNT_NOW_CLEAR, 1);
+}
+
+static void tgn10_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ REG_WAIT(OTG_STATUS,
+ OTG_V_BLANK, 1,
+ 1, 100000); /* 1 vupdate at 10hz */
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ REG_WAIT(OTG_STATUS,
+ OTG_V_ACTIVE_DISP, 1,
+ 1, 100000); /* 1 vupdate at 10hz */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void tgn10_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ /* asic design change, do not need this control
+ * empty for share caller logic
+ */
+}
+
+
+static void tgn10_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ /* Bit 8 is no longer applicable in RV for PSR case,
+ * set bit 8 to 0 if given
+ */
+ if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
+ != 0)
+ value = value &
+ ~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN;
+
+ REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0,
+ OTG_STATIC_SCREEN_EVENT_MASK, value,
+ OTG_STATIC_SCREEN_FRAME_COUNT, 2);
+}
+
+
+/**
+ *****************************************************************************
+ * Function: set_drr
+ *
+ * @brief
+ * Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
+ *
+ *****************************************************************************
+ */
+static void tgn10_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
+
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, params->vertical_total_min - 1);
+
+ REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, 0);
+ } else {
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, 0);
+
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, 0);
+
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+ }
+}
+
+static void tgn10_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+ uint32_t pattern_mask;
+ uint32_t pattern_data;
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+
+ REG_UPDATE_2(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_VRES, 6,
+ OTG_TEST_PATTERN_HRES, 6);
+
+ REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL,
+ OTG_TEST_PATTERN_EN, 1,
+ OTG_TEST_PATTERN_MODE, mode,
+ OTG_TEST_PATTERN_DYNAMIC_RANGE, dyn_range,
+ OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* CRTC_TEST_PATTERN_DATA has 16 bits,
+ * lowest 6 are hardwired to ZERO
+ * color bits should be left aligned aligned to MSB
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0);
+
+ /* We have to write the mask before data, similar to pipeline.
+ * For example, for 8 bpc, if we want RGB0 to be magenta,
+ * and RGB1 to be cyan,
+ * we need to make 7 writes:
+ * MASK DATA
+ * 000001 00000000 00000000 set mask to R0
+ * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
+ * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
+ * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
+ * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
+ * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
+ * 100000 11111111 00000000 B1 255, 0xFF00
+ *
+ * we will make a loop of 6 in which we prepare the mask,
+ * then write, then prepare the color for next write.
+ * first iteration will write mask only,
+ * but each next iteration color prepared in
+ * previous iteration will be written within new mask,
+ * the last component will written separately,
+ * mask is not changing between 6th and 7th write
+ * and color will be prepared by last iteration
+ */
+
+ /* write color, color values mask in CRTC_TEST_PATTERN_MASK
+ * is B1, G1, R1, B0, G0, R0
+ */
+ pattern_data = 0;
+ for (index = 0; index < 6; index++) {
+ /* prepare color mask, first write PATTERN_DATA
+ * will have all zeros
+ */
+ pattern_mask = (1 << index);
+
+ /* write color component */
+ REG_SET_2(OTG_TEST_PATTERN_COLOR, 0,
+ OTG_TEST_PATTERN_MASK, pattern_mask,
+ OTG_TEST_PATTERN_DATA, pattern_data);
+
+ /* prepare next color component,
+ * will be written in the next iteration
+ */
+ pattern_data = dst_color[index];
+ }
+ /* write last color component,
+ * it's been already prepared in the loop
+ */
+ REG_SET_2(OTG_TEST_PATTERN_COLOR, 0,
+ OTG_TEST_PATTERN_MASK, pattern_mask,
+ OTG_TEST_PATTERN_DATA, pattern_data);
+
+ /* enable test pattern */
+ REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL,
+ OTG_TEST_PATTERN_EN, 1,
+ OTG_TEST_PATTERN_MODE, mode,
+ OTG_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_INC0, inc_base,
+ OTG_TEST_PATTERN_INC1, 0,
+ OTG_TEST_PATTERN_HRES, 6,
+ OTG_TEST_PATTERN_VRES, 6,
+ OTG_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_INC0, inc_base,
+ OTG_TEST_PATTERN_INC1, 0,
+ OTG_TEST_PATTERN_HRES, 8,
+ OTG_TEST_PATTERN_VRES, 6,
+ OTG_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_INC0, inc_base,
+ OTG_TEST_PATTERN_INC1, inc_base + 2,
+ OTG_TEST_PATTERN_HRES, 8,
+ OTG_TEST_PATTERN_VRES, 5,
+ OTG_TEST_PATTERN_RAMP0_OFFSET, 384 << 6);
+ }
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(OTG_TEST_PATTERN_COLOR, 0);
+
+ /* enable test pattern */
+ REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0);
+
+ REG_SET_4(OTG_TEST_PATTERN_CONTROL, 0,
+ OTG_TEST_PATTERN_EN, 1,
+ OTG_TEST_PATTERN_MODE, mode,
+ OTG_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0);
+ REG_WRITE(OTG_TEST_PATTERN_COLOR, 0);
+ REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0);
+ }
+ break;
+ default:
+ break;
+
+ }
+}
+
+static void tgn10_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct crtc_position position;
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, v_blank_start,
+ OTG_V_BLANK_END, v_blank_end);
+
+ tgn10_get_position(tg, &position);
+
+ *h_position = position.horizontal_count;
+ *v_position = position.vertical_count;
+}
+
+
+
+static void tgn10_enable_stereo(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ uint32_t active_width = timing->h_addressable;
+ uint32_t space1_size = timing->v_total - timing->v_addressable;
+
+ if (flags) {
+ uint32_t stereo_en;
+ stereo_en = flags->FRAME_PACKED == 0 ? 1 : 0;
+
+ if (flags->PROGRAM_STEREO)
+ REG_UPDATE_3(OTG_STEREO_CONTROL,
+ OTG_STEREO_EN, stereo_en,
+ OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
+ OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
+
+ if (flags->PROGRAM_POLARITY)
+ REG_UPDATE(OTG_STEREO_CONTROL,
+ OTG_STEREO_EYE_FLAG_POLARITY,
+ flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
+
+ if (flags->DISABLE_STEREO_DP_SYNC)
+ REG_UPDATE(OTG_STEREO_CONTROL,
+ OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1);
+
+ if (flags->PROGRAM_STEREO)
+ REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL,
+ OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED,
+ OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED,
+ OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
+
+ }
+
+ REG_UPDATE(OPPBUF_CONTROL,
+ OPPBUF_ACTIVE_WIDTH, active_width);
+
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+ OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
+}
+
+static void tgn10_program_stereo(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
+{
+ if (flags->PROGRAM_STEREO)
+ tgn10_enable_stereo(tg, timing, flags);
+ else
+ tgn10_disable_stereo(tg);
+}
+
+
+static bool tgn10_is_stereo_left_eye(struct timing_generator *tg)
+{
+ bool ret = false;
+ uint32_t left_eye = 0;
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_GET(OTG_STEREO_STATUS,
+ OTG_STEREO_CURRENT_EYE, &left_eye);
+ if (left_eye == 1)
+ ret = true;
+ else
+ ret = false;
+
+ return ret;
+}
+
+void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+ struct dcn_otg_state *s)
+{
+ REG_GET(OTG_CONTROL,
+ OTG_MASTER_EN, &s->otg_enabled);
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &s->v_blank_start,
+ OTG_V_BLANK_END, &s->v_blank_end);
+
+ REG_GET(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, &s->v_sync_a_pol);
+
+ REG_GET(OTG_V_TOTAL,
+ OTG_V_TOTAL, &s->v_total);
+
+ REG_GET(OTG_V_TOTAL_MAX,
+ OTG_V_TOTAL_MAX, &s->v_total_max);
+
+ REG_GET(OTG_V_TOTAL_MIN,
+ OTG_V_TOTAL_MIN, &s->v_total_min);
+
+ REG_GET_2(OTG_V_SYNC_A,
+ OTG_V_SYNC_A_START, &s->v_sync_a_start,
+ OTG_V_SYNC_A_END, &s->v_sync_a_end);
+
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &s->h_blank_start,
+ OTG_H_BLANK_END, &s->h_blank_end);
+
+ REG_GET_2(OTG_H_SYNC_A,
+ OTG_H_SYNC_A_START, &s->h_sync_a_start,
+ OTG_H_SYNC_A_END, &s->h_sync_a_end);
+
+ REG_GET(OTG_H_SYNC_A_CNTL,
+ OTG_H_SYNC_A_POL, &s->h_sync_a_pol);
+
+ REG_GET(OTG_H_TOTAL,
+ OTG_H_TOTAL, &s->h_total);
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+}
+
+
+static const struct timing_generator_funcs dcn10_tg_funcs = {
+ .validate_timing = tgn10_validate_timing,
+ .program_timing = tgn10_program_timing,
+ .program_global_sync = tgn10_program_global_sync,
+ .enable_crtc = tgn10_enable_crtc,
+ .disable_crtc = tgn10_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = tgn10_is_counter_moving,
+ .get_position = tgn10_get_position,
+ .get_frame_count = tgn10_get_vblank_counter,
+ .get_scanoutpos = tgn10_get_crtc_scanoutpos,
+ .set_early_control = tgn10_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = tgn10_wait_for_state,
+ .set_blank = tgn10_set_blank,
+ .is_blanked = tgn10_is_blanked,
+ .set_blank_color = tgn10_program_blank_color,
+ .did_triggered_reset_occur = tgn10_did_triggered_reset_occur,
+ .enable_reset_trigger = tgn10_enable_reset_trigger,
+ .disable_reset_trigger = tgn10_disable_reset_trigger,
+ .lock = tgn10_lock,
+ .unlock = tgn10_unlock,
+ .enable_optc_clock = tgn10_enable_optc_clock,
+ .set_drr = tgn10_set_drr,
+ .set_static_screen_control = tgn10_set_static_screen_control,
+ .set_test_pattern = tgn10_set_test_pattern,
+ .program_stereo = tgn10_program_stereo,
+ .is_stereo_left_eye = tgn10_is_stereo_left_eye
+};
+
+void dcn10_timing_generator_init(struct dcn10_timing_generator *tgn10)
+{
+ tgn10->base.funcs = &dcn10_tg_funcs;
+
+ tgn10->max_h_total = tgn10->tg_mask->OTG_H_TOTAL + 1;
+ tgn10->max_v_total = tgn10->tg_mask->OTG_V_TOTAL + 1;
+
+ tgn10->min_h_blank = 32;
+ tgn10->min_v_blank = 3;
+ tgn10->min_v_blank_interlace = 5;
+ tgn10->min_h_sync_width = 8;
+ tgn10->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
new file mode 100644
index 000000000000..7d4818d7aa31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCN10_H__
+#define __DC_TIMING_GENERATOR_DCN10_H__
+
+#include "timing_generator.h"
+
+#define DCN10TG_FROM_TG(tg)\
+ container_of(tg, struct dcn10_timing_generator, base)
+
+#define TG_COMMON_REG_LIST_DCN(inst) \
+ SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
+ SRI(OTG_VUPDATE_PARAM, OTG, inst),\
+ SRI(OTG_VREADY_PARAM, OTG, inst),\
+ SRI(OTG_BLANK_CONTROL, OTG, inst),\
+ SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\
+ SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\
+ SRI(OTG_H_TOTAL, OTG, inst),\
+ SRI(OTG_H_BLANK_START_END, OTG, inst),\
+ SRI(OTG_H_SYNC_A, OTG, inst),\
+ SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_H_TIMING_CNTL, OTG, inst),\
+ SRI(OTG_V_TOTAL, OTG, inst),\
+ SRI(OTG_V_BLANK_START_END, OTG, inst),\
+ SRI(OTG_V_SYNC_A, OTG, inst),\
+ SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_INTERLACE_CONTROL, OTG, inst),\
+ SRI(OTG_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_CONTROL, OTG, inst),\
+ SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_STATUS, OTG, inst),\
+ SRI(OTG_V_TOTAL_MAX, OTG, inst),\
+ SRI(OTG_V_TOTAL_MIN, OTG, inst),\
+ SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\
+ SRI(OTG_TRIGA_CNTL, OTG, inst),\
+ SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
+ SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
+ SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\
+ SRI(OTG_STATUS, OTG, inst),\
+ SRI(OTG_STATUS_POSITION, OTG, inst),\
+ SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
+ SRI(OTG_BLACK_COLOR, OTG, inst),\
+ SRI(OTG_CLOCK_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
+ SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
+ SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
+ SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
+ SRI(OPPBUF_CONTROL, OPPBUF, inst),\
+ SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, inst),\
+ SRI(CONTROL, VTG, inst)
+
+#define TG_COMMON_REG_LIST_DCN1_0(inst) \
+ TG_COMMON_REG_LIST_DCN(inst),\
+ SRI(OTG_TEST_PATTERN_PARAMETERS, OTG, inst),\
+ SRI(OTG_TEST_PATTERN_CONTROL, OTG, inst),\
+ SRI(OTG_TEST_PATTERN_COLOR, OTG, inst)
+
+
+struct dcn_tg_registers {
+ uint32_t OTG_VSTARTUP_PARAM;
+ uint32_t OTG_VUPDATE_PARAM;
+ uint32_t OTG_VREADY_PARAM;
+ uint32_t OTG_BLANK_CONTROL;
+ uint32_t OTG_MASTER_UPDATE_LOCK;
+ uint32_t OTG_GLOBAL_CONTROL0;
+ uint32_t OTG_DOUBLE_BUFFER_CONTROL;
+ uint32_t OTG_H_TOTAL;
+ uint32_t OTG_H_BLANK_START_END;
+ uint32_t OTG_H_SYNC_A;
+ uint32_t OTG_H_SYNC_A_CNTL;
+ uint32_t OTG_H_TIMING_CNTL;
+ uint32_t OTG_V_TOTAL;
+ uint32_t OTG_V_BLANK_START_END;
+ uint32_t OTG_V_SYNC_A;
+ uint32_t OTG_V_SYNC_A_CNTL;
+ uint32_t OTG_INTERLACE_CONTROL;
+ uint32_t OTG_CONTROL;
+ uint32_t OTG_STEREO_CONTROL;
+ uint32_t OTG_3D_STRUCTURE_CONTROL;
+ uint32_t OTG_STEREO_STATUS;
+ uint32_t OTG_V_TOTAL_MAX;
+ uint32_t OTG_V_TOTAL_MIN;
+ uint32_t OTG_V_TOTAL_CONTROL;
+ uint32_t OTG_TRIGA_CNTL;
+ uint32_t OTG_FORCE_COUNT_NOW_CNTL;
+ uint32_t OTG_STATIC_SCREEN_CONTROL;
+ uint32_t OTG_STATUS_FRAME_COUNT;
+ uint32_t OTG_STATUS;
+ uint32_t OTG_STATUS_POSITION;
+ uint32_t OTG_NOM_VERT_POSITION;
+ uint32_t OTG_BLACK_COLOR;
+ uint32_t OTG_TEST_PATTERN_PARAMETERS;
+ uint32_t OTG_TEST_PATTERN_CONTROL;
+ uint32_t OTG_TEST_PATTERN_COLOR;
+ uint32_t OTG_CLOCK_CONTROL;
+ uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
+ uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
+ uint32_t OPTC_INPUT_CLOCK_CONTROL;
+ uint32_t OPTC_DATA_SOURCE_SELECT;
+ uint32_t OPTC_INPUT_GLOBAL_CONTROL;
+ uint32_t OPPBUF_CONTROL;
+ uint32_t OPPBUF_3D_PARAMETERS_0;
+ uint32_t CONTROL;
+};
+
+#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
+ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\
+ SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\
+ SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DATA_EN, mask_sh),\
+ SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DE_MODE, mask_sh),\
+ SF(OTG0_OTG_BLANK_CONTROL, OTG_CURRENT_BLANK_STATE, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\
+ SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_INTERLACE_CONTROL, OTG_INTERLACE_ENABLE, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\
+ SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK_EN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\
+ SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\
+ SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_B_CB, mask_sh),\
+ SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_G_Y, mask_sh),\
+ SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_R_CR, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
+ SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
+ SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh)
+
+#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+ TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC0, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC1, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_VRES, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_HRES, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_RAMP0_OFFSET, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_EN, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_MODE, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_DYNAMIC_RANGE, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_COLOR_FORMAT, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_MASK, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh)
+
+#define TG_REG_FIELD_LIST(type) \
+ type VSTARTUP_START;\
+ type VUPDATE_OFFSET;\
+ type VUPDATE_WIDTH;\
+ type VREADY_OFFSET;\
+ type OTG_BLANK_DATA_EN;\
+ type OTG_BLANK_DE_MODE;\
+ type OTG_CURRENT_BLANK_STATE;\
+ type OTG_MASTER_UPDATE_LOCK;\
+ type UPDATE_LOCK_STATUS;\
+ type OTG_UPDATE_PENDING;\
+ type OTG_MASTER_UPDATE_LOCK_SEL;\
+ type OTG_BLANK_DATA_DOUBLE_BUFFER_EN;\
+ type OTG_H_TOTAL;\
+ type OTG_H_BLANK_START;\
+ type OTG_H_BLANK_END;\
+ type OTG_H_SYNC_A_START;\
+ type OTG_H_SYNC_A_END;\
+ type OTG_H_SYNC_A_POL;\
+ type OTG_H_TIMING_DIV_BY2;\
+ type OTG_V_TOTAL;\
+ type OTG_V_BLANK_START;\
+ type OTG_V_BLANK_END;\
+ type OTG_V_SYNC_A_START;\
+ type OTG_V_SYNC_A_END;\
+ type OTG_V_SYNC_A_POL;\
+ type OTG_INTERLACE_ENABLE;\
+ type OTG_MASTER_EN;\
+ type OTG_START_POINT_CNTL;\
+ type OTG_DISABLE_POINT_CNTL;\
+ type OTG_FIELD_NUMBER_CNTL;\
+ type OTG_STEREO_EN;\
+ type OTG_STEREO_SYNC_OUTPUT_LINE_NUM;\
+ type OTG_STEREO_SYNC_OUTPUT_POLARITY;\
+ type OTG_STEREO_EYE_FLAG_POLARITY;\
+ type OTG_STEREO_CURRENT_EYE;\
+ type OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP;\
+ type OTG_3D_STRUCTURE_EN;\
+ type OTG_3D_STRUCTURE_V_UPDATE_MODE;\
+ type OTG_3D_STRUCTURE_STEREO_SEL_OVR;\
+ type OTG_V_TOTAL_MAX;\
+ type OTG_V_TOTAL_MIN;\
+ type OTG_V_TOTAL_MIN_SEL;\
+ type OTG_V_TOTAL_MAX_SEL;\
+ type OTG_FORCE_LOCK_ON_EVENT;\
+ type OTG_SET_V_TOTAL_MIN_MASK_EN;\
+ type OTG_SET_V_TOTAL_MIN_MASK;\
+ type OTG_FORCE_COUNT_NOW_CLEAR;\
+ type OTG_FORCE_COUNT_NOW_MODE;\
+ type OTG_FORCE_COUNT_NOW_OCCURRED;\
+ type OTG_TRIGA_SOURCE_SELECT;\
+ type OTG_TRIGA_SOURCE_PIPE_SELECT;\
+ type OTG_TRIGA_RISING_EDGE_DETECT_CNTL;\
+ type OTG_TRIGA_FALLING_EDGE_DETECT_CNTL;\
+ type OTG_STATIC_SCREEN_EVENT_MASK;\
+ type OTG_STATIC_SCREEN_FRAME_COUNT;\
+ type OTG_FRAME_COUNT;\
+ type OTG_V_BLANK;\
+ type OTG_V_ACTIVE_DISP;\
+ type OTG_HORZ_COUNT;\
+ type OTG_VERT_COUNT;\
+ type OTG_VERT_COUNT_NOM;\
+ type OTG_BLACK_COLOR_B_CB;\
+ type OTG_BLACK_COLOR_G_Y;\
+ type OTG_BLACK_COLOR_R_CR;\
+ type OTG_TEST_PATTERN_INC0;\
+ type OTG_TEST_PATTERN_INC1;\
+ type OTG_TEST_PATTERN_VRES;\
+ type OTG_TEST_PATTERN_HRES;\
+ type OTG_TEST_PATTERN_RAMP0_OFFSET;\
+ type OTG_TEST_PATTERN_EN;\
+ type OTG_TEST_PATTERN_MODE;\
+ type OTG_TEST_PATTERN_DYNAMIC_RANGE;\
+ type OTG_TEST_PATTERN_COLOR_FORMAT;\
+ type OTG_TEST_PATTERN_MASK;\
+ type OTG_TEST_PATTERN_DATA;\
+ type OTG_BUSY;\
+ type OTG_CLOCK_EN;\
+ type OTG_CLOCK_ON;\
+ type OTG_CLOCK_GATE_DIS;\
+ type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\
+ type OTG_VERTICAL_INTERRUPT2_LINE_START;\
+ type OPTC_INPUT_CLK_EN;\
+ type OPTC_INPUT_CLK_ON;\
+ type OPTC_INPUT_CLK_GATE_DIS;\
+ type OPTC_SRC_SEL;\
+ type OPTC_SEG0_SRC_SEL;\
+ type OPTC_UNDERFLOW_OCCURRED_STATUS;\
+ type OPPBUF_ACTIVE_WIDTH;\
+ type OPPBUF_3D_VACT_SPACE1_SIZE;\
+ type VTG0_ENABLE;\
+ type VTG0_FP2;\
+ type VTG0_VCOUNT_INIT;
+
+struct dcn_tg_shift {
+ TG_REG_FIELD_LIST(uint8_t)
+};
+
+struct dcn_tg_mask {
+ TG_REG_FIELD_LIST(uint32_t)
+};
+
+struct dcn10_timing_generator {
+ struct timing_generator base;
+
+ const struct dcn_tg_registers *tg_regs;
+ const struct dcn_tg_shift *tg_shift;
+ const struct dcn_tg_mask *tg_mask;
+
+ enum controller_id controller_id;
+
+ uint32_t max_h_total;
+ uint32_t max_v_total;
+
+ uint32_t min_h_blank;
+
+ uint32_t min_h_sync_width;
+ uint32_t min_v_sync_width;
+ uint32_t min_v_blank;
+ uint32_t min_v_blank_interlace;
+};
+
+void dcn10_timing_generator_init(struct dcn10_timing_generator *tg);
+
+struct dcn_otg_state {
+ uint32_t v_blank_start;
+ uint32_t v_blank_end;
+ uint32_t v_sync_a_pol;
+ uint32_t v_total;
+ uint32_t v_total_max;
+ uint32_t v_total_min;
+ uint32_t v_sync_a_start;
+ uint32_t v_sync_a_end;
+ uint32_t h_blank_start;
+ uint32_t h_blank_end;
+ uint32_t h_sync_a_start;
+ uint32_t h_sync_a_end;
+ uint32_t h_sync_a_pol;
+ uint32_t h_total;
+ uint32_t underflow_occurred_status;
+ uint32_t otg_enabled;
+};
+
+void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+ struct dcn_otg_state *s);
+
+#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
new file mode 100644
index 000000000000..ab88f07772a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * This file defines helper functions provided by the Display Manager to
+ * Display Core.
+ */
+#ifndef __DM_HELPERS__
+#define __DM_HELPERS__
+
+#include "dc_types.h"
+#include "dc.h"
+
+struct dp_mst_stream_allocation_table;
+
+enum dc_edid_status dm_helpers_parse_edid_caps(
+ struct dc_context *ctx,
+ const struct dc_edid *edid,
+ struct dc_edid_caps *edid_caps);
+
+/*
+ * Writes payload allocation table in immediate downstream device.
+ */
+bool dm_helpers_dp_mst_write_payload_allocation_table(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ struct dp_mst_stream_allocation_table *proposed_table,
+ bool enable);
+
+/*
+ * Polls for ACT (allocation change trigger) handled and
+ */
+bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream);
+/*
+ * Sends ALLOCATE_PAYLOAD message.
+ */
+bool dm_helpers_dp_mst_send_payload_allocation(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ bool enable);
+
+bool dm_helpers_dp_mst_start_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ bool boot);
+
+void dm_helpers_dp_mst_stop_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link);
+/**
+ * OS specific aux read callback.
+ */
+bool dm_helpers_dp_read_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+
+/**
+ * OS specific aux write callback.
+ */
+bool dm_helpers_dp_write_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+
+bool dm_helpers_submit_i2c(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ struct i2c_command *cmd);
+
+enum dc_edid_status dm_helpers_read_local_edid(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+
+#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
new file mode 100644
index 000000000000..bbfa83252fc1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DM_PP_SMU_IF__H
+#define DM_PP_SMU_IF__H
+
+/*
+ * interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
+ */
+
+
+struct pp_smu {
+ struct dc_context *ctx;
+};
+
+enum wm_set_id {
+ WM_A,
+ WM_B,
+ WM_C,
+ WM_D,
+ WM_COUNT,
+};
+
+struct pp_smu_wm_set_range {
+ enum wm_set_id wm_inst;
+ uint32_t min_fill_clk_khz;
+ uint32_t max_fill_clk_khz;
+ uint32_t min_drain_clk_khz;
+ uint32_t max_drain_clk_khz;
+};
+
+struct pp_smu_wm_range_sets {
+ uint32_t num_reader_wm_sets;
+ struct pp_smu_wm_set_range reader_wm_sets[WM_COUNT];
+
+ uint32_t num_writer_wm_sets;
+ struct pp_smu_wm_set_range writer_wm_sets[WM_COUNT];
+};
+
+struct pp_smu_display_requirement_rv {
+ /* PPSMC_MSG_SetDisplayCount: count
+ * 0 triggers S0i2 optimization
+ */
+ unsigned int display_count;
+
+ /* PPSMC_MSG_SetHardMinFclkByFreq: khz
+ * FCLK will vary with DPM, but never below requested hard min
+ */
+ unsigned int hard_min_fclk_khz;
+
+ /* PPSMC_MSG_SetHardMinDcefclkByFreq: khz
+ * fixed clock at requested freq, either from FCH bypass or DFS
+ */
+ unsigned int hard_min_dcefclk_khz;
+
+ /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
+ * when DF is in cstate, dcf clock is further divided down
+ * to just above given frequency
+ */
+ unsigned int min_deep_sleep_dcefclk_mhz;
+};
+
+struct pp_smu_funcs_rv {
+ struct pp_smu pp_smu;
+
+ void (*set_display_requirement)(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req);
+
+ /* which SMU message? are reader and writer WM separate SMU msg? */
+ void (*set_wm_ranges)(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges);
+
+};
+
+#if 0
+struct pp_smu_funcs_rv {
+
+ /* PPSMC_MSG_SetDisplayCount
+ * 0 triggers S0i2 optimization
+ */
+ void (*set_display_count)(struct pp_smu *pp, int count);
+
+ /* PPSMC_MSG_SetHardMinFclkByFreq
+ * FCLK will vary with DPM, but never below requested hard min
+ */
+ void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
+
+ /* PPSMC_MSG_SetHardMinDcefclkByFreq
+ * fixed clock at requested freq, either from FCH bypass or DFS
+ */
+ void (*set_hard_min_dcefclk_by_freq)(struct pp_smu *pp, int khz);
+
+ /* PPSMC_MSG_SetMinDeepSleepDcefclk
+ * when DF is in cstate, dcf clock is further divided down
+ * to just above given frequency
+ */
+ void (*set_min_deep_sleep_dcefclk)(struct pp_smu *pp, int mhz);
+
+ /* todo: aesthetic
+ * watermark range table
+ */
+
+ /* todo: functional/feature
+ * PPSMC_MSG_SetHardMinSocclkByFreq: required to support DWB
+ */
+};
+#endif
+
+#endif /* DM_PP_SMU_IF__H */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
new file mode 100644
index 000000000000..d4917037ac42
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * This file defines external dependencies of Display Core.
+ */
+
+#ifndef __DM_SERVICES_H__
+
+#define __DM_SERVICES_H__
+
+/* TODO: remove when DC is complete. */
+#include "dm_services_types.h"
+#include "logger_interface.h"
+#include "link_service_types.h"
+
+#undef DEPRECATED
+
+irq_handler_idx dm_register_interrupt(
+ struct dc_context *ctx,
+ struct dc_interrupt_params *int_params,
+ interrupt_handler ih,
+ void *handler_args);
+
+
+/*
+ *
+ * GPU registers access
+ *
+ */
+
+/* enable for debugging new code, this adds 50k to the driver size. */
+/* #define DM_CHECK_ADDR_0 */
+
+#define dm_read_reg(ctx, address) \
+ dm_read_reg_func(ctx, address, __func__)
+
+static inline uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name)
+{
+ uint32_t value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+ value = cgs_read_register(ctx->cgs_device, address);
+
+ return value;
+}
+
+#define dm_write_reg(ctx, address, value) \
+ dm_write_reg_func(ctx, address, value, __func__)
+
+static inline void dm_write_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ uint32_t value,
+ const char *func_name)
+{
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register write. address = 0");
+ return;
+ }
+#endif
+ cgs_write_register(ctx->cgs_device, address, value);
+}
+
+static inline uint32_t dm_read_index_reg(
+ const struct dc_context *ctx,
+ enum cgs_ind_reg addr_space,
+ uint32_t index)
+{
+ return cgs_read_ind_register(ctx->cgs_device, addr_space, index);
+}
+
+static inline void dm_write_index_reg(
+ const struct dc_context *ctx,
+ enum cgs_ind_reg addr_space,
+ uint32_t index,
+ uint32_t value)
+{
+ cgs_write_ind_register(ctx->cgs_device, addr_space, index, value);
+}
+
+static inline uint32_t get_reg_field_value_ex(
+ uint32_t reg_value,
+ uint32_t mask,
+ uint8_t shift)
+{
+ return (mask & reg_value) >> shift;
+}
+
+#define get_reg_field_value(reg_value, reg_name, reg_field)\
+ get_reg_field_value_ex(\
+ (reg_value),\
+ reg_name ## __ ## reg_field ## _MASK,\
+ reg_name ## __ ## reg_field ## __SHIFT)
+
+static inline uint32_t set_reg_field_value_ex(
+ uint32_t reg_value,
+ uint32_t value,
+ uint32_t mask,
+ uint8_t shift)
+{
+ ASSERT(mask != 0);
+ return (reg_value & ~mask) | (mask & (value << shift));
+}
+
+#define set_reg_field_value(reg_value, value, reg_name, reg_field)\
+ (reg_value) = set_reg_field_value_ex(\
+ (reg_value),\
+ (value),\
+ reg_name ## __ ## reg_field ## _MASK,\
+ reg_name ## __ ## reg_field ## __SHIFT)
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
+#define FD(reg_field) reg_field ## __SHIFT, \
+ reg_field ## _MASK
+
+/*
+ * return number of poll before condition is met
+ * return 0 if condition is not meet after specified time out tries
+ */
+unsigned int generic_reg_wait(const struct dc_context *ctx,
+ uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value,
+ unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
+ const char *func_name, int line);
+
+
+/* These macros need to be used with soc15 registers in order to retrieve
+ * the actual offset.
+ */
+#define dm_write_reg_soc15(ctx, reg, inst_offset, value) \
+ dm_write_reg_func(ctx, reg + DCE_BASE.instance[0].segment[reg##_BASE_IDX] + inst_offset, value, __func__)
+
+#define dm_read_reg_soc15(ctx, reg, inst_offset) \
+ dm_read_reg_func(ctx, reg + DCE_BASE.instance[0].segment[reg##_BASE_IDX] + inst_offset, __func__)
+
+#define generic_reg_update_soc15(ctx, inst_offset, reg_name, n, ...)\
+ generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, \
+ dm_read_reg_func(ctx, mm##reg_name + DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + inst_offset, __func__), \
+ n, __VA_ARGS__)
+
+#define generic_reg_set_soc15(ctx, inst_offset, reg_name, n, ...)\
+ generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
+ n, __VA_ARGS__)
+
+#define get_reg_field_value_soc15(reg_value, block, reg_num, reg_name, reg_field)\
+ get_reg_field_value_ex(\
+ (reg_value),\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
+
+#define set_reg_field_value_soc15(reg_value, value, block, reg_num, reg_name, reg_field)\
+ (reg_value) = set_reg_field_value_ex(\
+ (reg_value),\
+ (value),\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
+
+/**************************************
+ * Power Play (PP) interfaces
+ **************************************/
+
+/* DAL calls this function to notify PP about clocks it needs for the Mode Set.
+ * This is done *before* it changes DCE clock.
+ *
+ * If required clock is higher than current, then PP will increase the voltage.
+ *
+ * If required clock is lower than current, then PP will defer reduction of
+ * voltage until the call to dc_service_pp_post_dce_clock_change().
+ *
+ * \input - Contains clocks needed for Mode Set.
+ *
+ * \output - Contains clocks adjusted by PP which DAL should use for Mode Set.
+ * Valid only if function returns zero.
+ *
+ * \returns true - call is successful
+ * false - call failed
+ */
+bool dm_pp_pre_dce_clock_change(
+ struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *requested_state,
+ struct dm_pp_gpu_clock_range *actual_state);
+
+/* The returned clocks range are 'static' system clocks which will be used for
+ * mode validation purposes.
+ *
+ * \returns true - call is successful
+ * false - call failed
+ */
+bool dc_service_get_system_clocks_range(
+ const struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *sys_clks);
+
+/* Gets valid clocks levels from pplib
+ *
+ * input: clk_type - display clk / sclk / mem clk
+ *
+ * output: array of valid clock levels for given type in ascending order,
+ * with invalid levels filtered out
+ *
+ */
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clk_level_info);
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info);
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info);
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
+
+void dm_pp_get_funcs_rv(struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs);
+
+/* DAL calls this function to notify PP about completion of Mode Set.
+ * For PP it means that current DCE clocks are those which were returned
+ * by dc_service_pp_pre_dce_clock_change(), in the 'output' parameter.
+ *
+ * If the clocks are higher than before, then PP does nothing.
+ *
+ * If the clocks are lower than before, then PP reduces the voltage.
+ *
+ * \returns true - call is successful
+ * false - call failed
+ */
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg);
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req);
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req);
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info);
+
+/****** end of PP interfaces ******/
+
+struct persistent_data_flag {
+ bool save_per_link;
+ bool save_per_edid;
+};
+
+/* Call to write data in registry editor for persistent data storage.
+ *
+ * \inputs sink - identify edid/link for registry folder creation
+ * module name - identify folders for registry
+ * key name - identify keys within folders for registry
+ * params - value to write in defined folder/key
+ * size - size of the input params
+ * flag - determine whether to save by link or edid
+ *
+ * \returns true - call is successful
+ * false - call failed
+ *
+ * sink module key
+ * -----------------------------------------------------------------------------
+ * NULL NULL NULL - failure
+ * NULL NULL - - create key with param value
+ * under base folder
+ * NULL - NULL - create module folder under base folder
+ * - NULL NULL - failure
+ * NULL - - - create key under module folder
+ * with no edid/link identification
+ * - NULL - - create key with param value
+ * under base folder
+ * - - NULL - create module folder under base folder
+ * - - - - create key under module folder
+ * with edid/link identification
+ */
+bool dm_write_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag);
+
+
+/* Call to read data in registry editor for persistent data storage.
+ *
+ * \inputs sink - identify edid/link for registry folder creation
+ * module name - identify folders for registry
+ * key name - identify keys within folders for registry
+ * size - size of the output params
+ * flag - determine whether it was save by link or edid
+ *
+ * \returns params - value read from defined folder/key
+ * true - call is successful
+ * false - call failed
+ *
+ * sink module key
+ * -----------------------------------------------------------------------------
+ * NULL NULL NULL - failure
+ * NULL NULL - - read key under base folder
+ * NULL - NULL - failure
+ * - NULL NULL - failure
+ * NULL - - - read key under module folder
+ * with no edid/link identification
+ * - NULL - - read key under base folder
+ * - - NULL - failure
+ * - - - - read key under module folder
+ * with edid/link identification
+ */
+bool dm_read_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag);
+
+bool dm_query_extended_brightness_caps
+ (struct dc_context *ctx, enum dm_acpi_display_type display,
+ struct dm_acpi_atif_backlight_caps *pCaps);
+
+bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
+
+/*
+ *
+ * print-out services
+ *
+ */
+#define dm_log_to_buffer(buffer, size, fmt, args)\
+ vsnprintf(buffer, size, fmt, args)
+
+unsigned long long dm_get_timestamp(struct dc_context *ctx);
+
+/*
+ * Debug and verification hooks
+ */
+bool dm_helpers_dc_conn_log(
+ struct dc_context *ctx,
+ struct log_entry *entry,
+ enum dc_log_type event);
+
+void dm_dtn_log_begin(struct dc_context *ctx);
+void dm_dtn_log_append_v(struct dc_context *ctx, const char *msg, ...);
+void dm_dtn_log_end(struct dc_context *ctx);
+
+#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
new file mode 100644
index 000000000000..fa26cf488b3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DM_SERVICES_TYPES_H__
+#define __DM_SERVICES_TYPES_H__
+
+#include "os_types.h"
+#include "dc_types.h"
+
+#include "dm_pp_smu.h"
+
+struct dm_pp_clock_range {
+ int min_khz;
+ int max_khz;
+};
+
+enum dm_pp_clocks_state {
+ DM_PP_CLOCKS_STATE_INVALID,
+ DM_PP_CLOCKS_STATE_ULTRA_LOW,
+ DM_PP_CLOCKS_STATE_LOW,
+ DM_PP_CLOCKS_STATE_NOMINAL,
+ DM_PP_CLOCKS_STATE_PERFORMANCE,
+
+ /* Starting from DCE11, Max 8 levels of DPM state supported. */
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_INVALID = DM_PP_CLOCKS_STATE_INVALID,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_0,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_1,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_2,
+ /* to be backward compatible */
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_3,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_4,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_5,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_6,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_7,
+
+ DM_PP_CLOCKS_MAX_STATES
+};
+
+struct dm_pp_gpu_clock_range {
+ enum dm_pp_clocks_state clock_state;
+ struct dm_pp_clock_range sclk;
+ struct dm_pp_clock_range mclk;
+ struct dm_pp_clock_range eclk;
+ struct dm_pp_clock_range dclk;
+};
+
+enum dm_pp_clock_type {
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK = 1,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK, /* System clock */
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ DM_PP_CLOCK_TYPE_DCFCLK,
+ DM_PP_CLOCK_TYPE_DCEFCLK,
+ DM_PP_CLOCK_TYPE_SOCCLK,
+ DM_PP_CLOCK_TYPE_PIXELCLK,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ DM_PP_CLOCK_TYPE_DPPCLK,
+ DM_PP_CLOCK_TYPE_FCLK,
+};
+
+#define DC_DECODE_PP_CLOCK_TYPE(clk_type) \
+ (clk_type) == DM_PP_CLOCK_TYPE_DISPLAY_CLK ? "Display" : \
+ (clk_type) == DM_PP_CLOCK_TYPE_ENGINE_CLK ? "Engine" : \
+ (clk_type) == DM_PP_CLOCK_TYPE_MEMORY_CLK ? "Memory" : "Invalid"
+
+#define DM_PP_MAX_CLOCK_LEVELS 8
+
+struct dm_pp_clock_levels {
+ uint32_t num_levels;
+ uint32_t clocks_in_khz[DM_PP_MAX_CLOCK_LEVELS];
+};
+
+struct dm_pp_clock_with_latency {
+ uint32_t clocks_in_khz;
+ uint32_t latency_in_us;
+};
+
+struct dm_pp_clock_levels_with_latency {
+ uint32_t num_levels;
+ struct dm_pp_clock_with_latency data[DM_PP_MAX_CLOCK_LEVELS];
+};
+
+struct dm_pp_clock_with_voltage {
+ uint32_t clocks_in_khz;
+ uint32_t voltage_in_mv;
+};
+
+struct dm_pp_clock_levels_with_voltage {
+ uint32_t num_levels;
+ struct dm_pp_clock_with_voltage data[DM_PP_MAX_CLOCK_LEVELS];
+};
+
+struct dm_pp_single_disp_config {
+ enum signal_type signal;
+ uint8_t transmitter;
+ uint8_t ddi_channel_mapping;
+ uint8_t pipe_idx;
+ uint32_t src_height;
+ uint32_t src_width;
+ uint32_t v_refresh;
+ uint32_t sym_clock; /* HDMI only */
+ struct dc_link_settings link_settings; /* DP only */
+};
+
+#define MAX_WM_SETS 4
+
+enum dm_pp_wm_set_id {
+ WM_SET_A = 0,
+ WM_SET_B,
+ WM_SET_C,
+ WM_SET_D,
+ WM_SET_INVALID = 0xffff,
+};
+
+struct dm_pp_clock_range_for_wm_set {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_eng_clk_in_khz;
+ uint32_t wm_max_eng_clk_in_khz;
+ uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+};
+
+struct dm_pp_wm_sets_with_clock_ranges {
+ uint32_t num_wm_sets;
+ struct dm_pp_clock_range_for_wm_set wm_clk_ranges[MAX_WM_SETS];
+};
+
+struct dm_pp_clock_range_for_dmif_wm_set_soc15 {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_dcfclk_clk_in_khz;
+ uint32_t wm_max_dcfclk_clk_in_khz;
+ uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+};
+
+struct dm_pp_clock_range_for_mcif_wm_set_soc15 {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_socclk_clk_in_khz;
+ uint32_t wm_max_socclk_clk_in_khz;
+ uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+};
+
+struct dm_pp_wm_sets_with_clock_ranges_soc15 {
+ uint32_t num_wm_dmif_sets;
+ uint32_t num_wm_mcif_sets;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15
+ wm_dmif_clocks_ranges[MAX_WM_SETS];
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15
+ wm_mcif_clocks_ranges[MAX_WM_SETS];
+};
+
+#define MAX_DISPLAY_CONFIGS 6
+
+struct dm_pp_display_configuration {
+ bool nb_pstate_switch_disable;/* controls NB PState switch */
+ bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
+ bool cpu_pstate_disable;
+ uint32_t cpu_pstate_separation_time;
+
+ uint32_t min_memory_clock_khz;
+ uint32_t min_engine_clock_khz;
+ uint32_t min_engine_clock_deep_sleep_khz;
+
+ uint32_t avail_mclk_switch_time_us;
+ uint32_t avail_mclk_switch_time_in_disp_active_us;
+ uint32_t min_dcfclock_khz;
+ uint32_t min_dcfc_deep_sleep_clock_khz;
+
+ uint32_t disp_clk_khz;
+
+ bool all_displays_in_sync;
+
+ uint8_t display_count;
+ struct dm_pp_single_disp_config disp_configs[MAX_DISPLAY_CONFIGS];
+
+ /*Controller Index of primary display - used in MCLK SMC switching hang
+ * SW Workaround*/
+ uint8_t crtc_index;
+ /*htotal*1000/pixelclk - used in MCLK SMC switching hang SW Workaround*/
+ uint32_t line_time_in_us;
+};
+
+struct dm_bl_data_point {
+ /* Brightness level in percentage */
+ uint8_t luminance;
+ /* Brightness level as effective value in range 0-255,
+ * corresponding to above percentage
+ */
+ uint8_t signalLevel;
+};
+
+/* Total size of the structure should not exceed 256 bytes */
+struct dm_acpi_atif_backlight_caps {
+
+
+ uint16_t size; /* Bytes 0-1 (2 bytes) */
+ uint16_t flags; /* Byted 2-3 (2 bytes) */
+ uint8_t errorCode; /* Byte 4 */
+ uint8_t acLevelPercentage; /* Byte 5 */
+ uint8_t dcLevelPercentage; /* Byte 6 */
+ uint8_t minInputSignal; /* Byte 7 */
+ uint8_t maxInputSignal; /* Byte 8 */
+ uint8_t numOfDataPoints; /* Byte 9 */
+ struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/
+};
+
+enum dm_acpi_display_type {
+ AcpiDisplayType_LCD1 = 0,
+ AcpiDisplayType_CRT1 = 1,
+ AcpiDisplayType_DFP1 = 3,
+ AcpiDisplayType_CRT2 = 4,
+ AcpiDisplayType_LCD2 = 5,
+ AcpiDisplayType_DFP2 = 7,
+ AcpiDisplayType_DFP3 = 9,
+ AcpiDisplayType_DFP4 = 10,
+ AcpiDisplayType_DFP5 = 11,
+ AcpiDisplayType_DFP6 = 12
+};
+
+enum dm_pp_power_level {
+ DM_PP_POWER_LEVEL_INVALID,
+ DM_PP_POWER_LEVEL_ULTRA_LOW,
+ DM_PP_POWER_LEVEL_LOW,
+ DM_PP_POWER_LEVEL_NOMINAL,
+ DM_PP_POWER_LEVEL_PERFORMANCE,
+
+ DM_PP_POWER_LEVEL_0 = DM_PP_POWER_LEVEL_ULTRA_LOW,
+ DM_PP_POWER_LEVEL_1 = DM_PP_POWER_LEVEL_LOW,
+ DM_PP_POWER_LEVEL_2 = DM_PP_POWER_LEVEL_NOMINAL,
+ DM_PP_POWER_LEVEL_3 = DM_PP_POWER_LEVEL_PERFORMANCE,
+ DM_PP_POWER_LEVEL_4 = DM_PP_CLOCKS_DPM_STATE_LEVEL_3 + 1,
+ DM_PP_POWER_LEVEL_5 = DM_PP_CLOCKS_DPM_STATE_LEVEL_4 + 1,
+ DM_PP_POWER_LEVEL_6 = DM_PP_CLOCKS_DPM_STATE_LEVEL_5 + 1,
+ DM_PP_POWER_LEVEL_7 = DM_PP_CLOCKS_DPM_STATE_LEVEL_6 + 1,
+};
+
+struct dm_pp_power_level_change_request {
+ enum dm_pp_power_level power_level;
+};
+
+struct dm_pp_clock_for_voltage_req {
+ enum dm_pp_clock_type clk_type;
+ uint32_t clocks_in_khz;
+};
+
+struct dm_pp_static_clock_info {
+ uint32_t max_sclk_khz;
+ uint32_t max_mclk_khz;
+
+ /* max possible display block clocks state */
+ enum dm_pp_clocks_state max_clocks_state;
+};
+
+struct dtn_min_clk_info {
+ uint32_t disp_clk_khz;
+ uint32_t min_engine_clock_khz;
+ uint32_t min_memory_clock_khz;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
new file mode 100644
index 000000000000..87bab8e8139f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the 'utils' sub-component of DAL.
+# It provides the general basic services required by other DAL
+# subcomponents.
+
+CFLAGS_display_mode_vba.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_mode_lib.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_pipe_clocks.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dml1_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_rq_dlg_helpers.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_soc_bounding_box.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dml_common_defs.o := -mhard-float -msse -mpreferred-stack-boundary=4
+
+
+DML = display_mode_lib.o display_rq_dlg_calc.o \
+ display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
+ soc_bounding_box.o dml_common_defs.o display_mode_vba.o
+
+AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
new file mode 100644
index 000000000000..ea4cde952f4f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DC_FEATURES_H__
+#define __DC_FEATURES_H__
+
+// local features
+#define DC__PRESENT 1
+#define DC__PRESENT__1 1
+#define DC__NUM_DPP 4
+#define DC__VOLTAGE_STATES 7
+#define DC__NUM_DPP__4 1
+#define DC__NUM_DPP__0_PRESENT 1
+#define DC__NUM_DPP__1_PRESENT 1
+#define DC__NUM_DPP__2_PRESENT 1
+#define DC__NUM_DPP__3_PRESENT 1
+#define DC__NUM_DPP__MAX 8
+#define DC__NUM_DPP__MAX__8 1
+#define DC__PIPE_10BIT 0
+#define DC__PIPE_10BIT__0 1
+#define DC__PIPE_10BIT__MAX 1
+#define DC__PIPE_10BIT__MAX__1 1
+#define DC__NUM_OPP 4
+#define DC__NUM_OPP__4 1
+#define DC__NUM_OPP__0_PRESENT 1
+#define DC__NUM_OPP__1_PRESENT 1
+#define DC__NUM_OPP__2_PRESENT 1
+#define DC__NUM_OPP__3_PRESENT 1
+#define DC__NUM_OPP__MAX 6
+#define DC__NUM_OPP__MAX__6 1
+#define DC__NUM_DSC 0
+#define DC__NUM_DSC__0 1
+#define DC__NUM_DSC__MAX 6
+#define DC__NUM_DSC__MAX__6 1
+#define DC__NUM_ABM 1
+#define DC__NUM_ABM__1 1
+#define DC__NUM_ABM__0_PRESENT 1
+#define DC__NUM_ABM__MAX 2
+#define DC__NUM_ABM__MAX__2 1
+#define DC__ODM_PRESENT 0
+#define DC__ODM_PRESENT__0 1
+#define DC__NUM_OTG 4
+#define DC__NUM_OTG__4 1
+#define DC__NUM_OTG__0_PRESENT 1
+#define DC__NUM_OTG__1_PRESENT 1
+#define DC__NUM_OTG__2_PRESENT 1
+#define DC__NUM_OTG__3_PRESENT 1
+#define DC__NUM_OTG__MAX 6
+#define DC__NUM_OTG__MAX__6 1
+#define DC__NUM_DWB 2
+#define DC__NUM_DWB__2 1
+#define DC__NUM_DWB__0_PRESENT 1
+#define DC__NUM_DWB__1_PRESENT 1
+#define DC__NUM_DWB__MAX 2
+#define DC__NUM_DWB__MAX__2 1
+#define DC__NUM_DIG 4
+#define DC__NUM_DIG__4 1
+#define DC__NUM_DIG__0_PRESENT 1
+#define DC__NUM_DIG__1_PRESENT 1
+#define DC__NUM_DIG__2_PRESENT 1
+#define DC__NUM_DIG__3_PRESENT 1
+#define DC__NUM_DIG__MAX 6
+#define DC__NUM_DIG__MAX__6 1
+#define DC__NUM_AUX 4
+#define DC__NUM_AUX__4 1
+#define DC__NUM_AUX__0_PRESENT 1
+#define DC__NUM_AUX__1_PRESENT 1
+#define DC__NUM_AUX__2_PRESENT 1
+#define DC__NUM_AUX__3_PRESENT 1
+#define DC__NUM_AUX__MAX 6
+#define DC__NUM_AUX__MAX__6 1
+#define DC__NUM_AUDIO_STREAMS 4
+#define DC__NUM_AUDIO_STREAMS__4 1
+#define DC__NUM_AUDIO_STREAMS__0_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__1_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__2_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__3_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__MAX 8
+#define DC__NUM_AUDIO_STREAMS__MAX__8 1
+#define DC__NUM_AUDIO_ENDPOINTS 6
+#define DC__NUM_AUDIO_ENDPOINTS__6 1
+#define DC__NUM_AUDIO_ENDPOINTS__0_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__1_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__2_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__3_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__4_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__5_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__MAX 8
+#define DC__NUM_AUDIO_ENDPOINTS__MAX__8 1
+#define DC__NUM_AUDIO_INPUT_STREAMS 0
+#define DC__NUM_AUDIO_INPUT_STREAMS__0 1
+#define DC__NUM_AUDIO_INPUT_STREAMS__MAX 8
+#define DC__NUM_AUDIO_INPUT_STREAMS__MAX__8 1
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS 0
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS__0 1
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS__MAX 8
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS__MAX__8 1
+#define DC__NUM_CURSOR 1
+#define DC__NUM_CURSOR__1 1
+#define DC__NUM_CURSOR__0_PRESENT 1
+#define DC__NUM_CURSOR__MAX 2
+#define DC__NUM_CURSOR__MAX__2 1
+#define DC__DIGITAL_BYPASS_PRESENT 0
+#define DC__DIGITAL_BYPASS_PRESENT__0 1
+#define DC__HCID_HWMAJVER 1
+#define DC__HCID_HWMAJVER__1 1
+#define DC__HCID_HWMINVER 0
+#define DC__HCID_HWMINVER__0 1
+#define DC__HCID_HWREV 0
+#define DC__HCID_HWREV__0 1
+#define DC__ROMSTRAP_PRESENT 0
+#define DC__ROMSTRAP_PRESENT__0 1
+#define DC__NUM_RBBMIF_DECODES 30
+#define DC__NUM_RBBMIF_DECODES__30 1
+#define DC__NUM_DBG_REGS 36
+#define DC__NUM_DBG_REGS__36 1
+#define DC__NUM_PIPES_UNDERLAY 0
+#define DC__NUM_PIPES_UNDERLAY__0 1
+#define DC__NUM_PIPES_UNDERLAY__MAX 2
+#define DC__NUM_PIPES_UNDERLAY__MAX__2 1
+#define DC__NUM_VCE_ENGINE 1
+#define DC__NUM_VCE_ENGINE__1 1
+#define DC__NUM_VCE_ENGINE__0_PRESENT 1
+#define DC__NUM_VCE_ENGINE__MAX 2
+#define DC__NUM_VCE_ENGINE__MAX__2 1
+#define DC__OTG_EXTERNAL_SYNC_PRESENT 0
+#define DC__OTG_EXTERNAL_SYNC_PRESENT__0 1
+#define DC__OTG_CRC_PRESENT 1
+#define DC__OTG_CRC_PRESENT__1 1
+#define DC__VIP_PRESENT 0
+#define DC__VIP_PRESENT__0 1
+#define DC__DTMTEST_PRESENT 0
+#define DC__DTMTEST_PRESENT__0 1
+#define DC__POWER_GATE_PRESENT 1
+#define DC__POWER_GATE_PRESENT__1 1
+#define DC__MEM_PG 1
+#define DC__MEM_PG__1 1
+#define DC__FMT_SRC_SEL_PRESENT 0
+#define DC__FMT_SRC_SEL_PRESENT__0 1
+#define DC__DIG_FEATURES__HDMI_PRESENT 1
+#define DC__DIG_FEATURES__HDMI_PRESENT__1 1
+#define DC__DIG_FEATURES__DP_PRESENT 1
+#define DC__DIG_FEATURES__DP_PRESENT__1 1
+#define DC__DIG_FEATURES__DP_MST_PRESENT 1
+#define DC__DIG_FEATURES__DP_MST_PRESENT__1 1
+#define DC__DIG_LP_FEATURES__HDMI_PRESENT 0
+#define DC__DIG_LP_FEATURES__HDMI_PRESENT__0 1
+#define DC__DIG_LP_FEATURES__DP_PRESENT 1
+#define DC__DIG_LP_FEATURES__DP_PRESENT__1 1
+#define DC__DIG_LP_FEATURES__DP_MST_PRESENT 0
+#define DC__DIG_LP_FEATURES__DP_MST_PRESENT__0 1
+#define DC__DIG_RESYNC_FIFO_SIZE 14
+#define DC__DIG_RESYNC_FIFO_SIZE__14 1
+#define DC__DIG_RESYNC_FIFO_SIZE__0_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__1_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__2_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__3_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__4_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__5_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__6_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__7_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__8_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__9_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__10_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__11_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__12_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__13_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__MAX 16
+#define DC__DIG_RESYNC_FIFO_SIZE__MAX__16 1
+#define DC__DAC_RESYNC_FIFO_SIZE 12
+#define DC__DAC_RESYNC_FIFO_SIZE__12 1
+#define DC__DAC_RESYNC_FIFO_SIZE__0_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__1_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__2_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__3_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__4_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__5_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__6_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__7_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__8_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__9_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__10_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__11_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__MAX 16
+#define DC__DAC_RESYNC_FIFO_SIZE__MAX__16 1
+#define DC__DVO_RESYNC_FIFO_SIZE 12
+#define DC__DVO_RESYNC_FIFO_SIZE__12 1
+#define DC__DVO_RESYNC_FIFO_SIZE__0_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__1_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__2_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__3_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__4_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__5_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__6_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__7_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__8_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__9_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__10_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__11_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__MAX 16
+#define DC__DVO_RESYNC_FIFO_SIZE__MAX__16 1
+#define DC__MEM_CDC_PRESENT 1
+#define DC__MEM_CDC_PRESENT__1 1
+#define DC__NUM_HPD 4
+#define DC__NUM_HPD__4 1
+#define DC__NUM_HPD__0_PRESENT 1
+#define DC__NUM_HPD__1_PRESENT 1
+#define DC__NUM_HPD__2_PRESENT 1
+#define DC__NUM_HPD__3_PRESENT 1
+#define DC__NUM_HPD__MAX 6
+#define DC__NUM_HPD__MAX__6 1
+#define DC__NUM_DDC_PAIRS 4
+#define DC__NUM_DDC_PAIRS__4 1
+#define DC__NUM_DDC_PAIRS__0_PRESENT 1
+#define DC__NUM_DDC_PAIRS__1_PRESENT 1
+#define DC__NUM_DDC_PAIRS__2_PRESENT 1
+#define DC__NUM_DDC_PAIRS__3_PRESENT 1
+#define DC__NUM_DDC_PAIRS__MAX 6
+#define DC__NUM_DDC_PAIRS__MAX__6 1
+#define DC__NUM_AUDIO_PLL 0
+#define DC__NUM_AUDIO_PLL__0 1
+#define DC__NUM_AUDIO_PLL__MAX 2
+#define DC__NUM_AUDIO_PLL__MAX__2 1
+#define DC__NUM_PIXEL_PLL 1
+#define DC__NUM_PIXEL_PLL__1 1
+#define DC__NUM_PIXEL_PLL__0_PRESENT 1
+#define DC__NUM_PIXEL_PLL__MAX 4
+#define DC__NUM_PIXEL_PLL__MAX__4 1
+#define DC__NUM_CASCADED_PLL 0
+#define DC__NUM_CASCADED_PLL__0 1
+#define DC__NUM_CASCADED_PLL__MAX 3
+#define DC__NUM_CASCADED_PLL__MAX__3 1
+#define DC__PIXCLK_FROM_PHYPLL 1
+#define DC__PIXCLK_FROM_PHYPLL__1 1
+#define DC__NB_STUTTER_MODE_PRESENT 0
+#define DC__NB_STUTTER_MODE_PRESENT__0 1
+#define DC__I2S0_AND_SPDIF0_PRESENT 0
+#define DC__I2S0_AND_SPDIF0_PRESENT__0 1
+#define DC__I2S1_PRESENT 0
+#define DC__I2S1_PRESENT__0 1
+#define DC__SPDIF1_PRESENT 0
+#define DC__SPDIF1_PRESENT__0 1
+#define DC__DSI_PRESENT 0
+#define DC__DSI_PRESENT__0 1
+#define DC__DACA_PRESENT 0
+#define DC__DACA_PRESENT__0 1
+#define DC__DACB_PRESENT 0
+#define DC__DACB_PRESENT__0 1
+#define DC__NUM_PIPES 4
+#define DC__NUM_PIPES__4 1
+#define DC__NUM_PIPES__0_PRESENT 1
+#define DC__NUM_PIPES__1_PRESENT 1
+#define DC__NUM_PIPES__2_PRESENT 1
+#define DC__NUM_PIPES__3_PRESENT 1
+#define DC__NUM_PIPES__MAX 6
+#define DC__NUM_PIPES__MAX__6 1
+#define DC__NUM_DIG_LP 0
+#define DC__NUM_DIG_LP__0 1
+#define DC__NUM_DIG_LP__MAX 2
+#define DC__NUM_DIG_LP__MAX__2 1
+#define DC__DPDEBUG_PRESENT 0
+#define DC__DPDEBUG_PRESENT__0 1
+#define DC__DISPLAY_WB_PRESENT 1
+#define DC__DISPLAY_WB_PRESENT__1 1
+#define DC__NUM_CWB 0
+#define DC__NUM_CWB__0 1
+#define DC__NUM_CWB__MAX 2
+#define DC__NUM_CWB__MAX__2 1
+#define DC__MVP_PRESENT 0
+#define DC__MVP_PRESENT__0 1
+#define DC__DVO_PRESENT 0
+#define DC__DVO_PRESENT__0 1
+#define DC__ABM_PRESENT 0
+#define DC__ABM_PRESENT__0 1
+#define DC__BPHYC_PLL_PRESENT 0
+#define DC__BPHYC_PLL_PRESENT__0 1
+#define DC__BPHYC_UNIPHY_PRESENT 0
+#define DC__BPHYC_UNIPHY_PRESENT__0 1
+#define DC__PHY_BROADCAST_PRESENT 0
+#define DC__PHY_BROADCAST_PRESENT__0 1
+#define DC__NUM_OF_DCRX_SD 0
+#define DC__NUM_OF_DCRX_SD__0 1
+#define DC__DVO_17BIT_MAPPING 0
+#define DC__DVO_17BIT_MAPPING__0 1
+#define DC__AVSYNC_PRESENT 0
+#define DC__AVSYNC_PRESENT__0 1
+#define DC__NUM_OF_DCRX_PORTS 0
+#define DC__NUM_OF_DCRX_PORTS__0 1
+#define DC__NUM_OF_DCRX_PORTS__MAX 1
+#define DC__NUM_OF_DCRX_PORTS__MAX__1 1
+#define DC__NUM_PHY 4
+#define DC__NUM_PHY__4 1
+#define DC__NUM_PHY__0_PRESENT 1
+#define DC__NUM_PHY__1_PRESENT 1
+#define DC__NUM_PHY__2_PRESENT 1
+#define DC__NUM_PHY__3_PRESENT 1
+#define DC__NUM_PHY__MAX 7
+#define DC__NUM_PHY__MAX__7 1
+#define DC__NUM_PHY_LP 0
+#define DC__NUM_PHY_LP__0 1
+#define DC__NUM_PHY_LP__MAX 2
+#define DC__NUM_PHY_LP__MAX__2 1
+#define DC__SYNC_CELL vid_sync_gf14lpp
+#define DC__SYNC_CELL__VID_SYNC_GF14LPP 1
+#define DC__USE_NEW_VSS 1
+#define DC__USE_NEW_VSS__1 1
+#define DC__SYNC_CELL_DISPCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DISPCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DVOCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DVOCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_PIXCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_PIXCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_SYMCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_SYMCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DPPCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DPPCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DPREFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DPREFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_REFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_REFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_PCIE_REFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_PCIE_REFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_MVPCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_MVPCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_SCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_SCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DCEFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DCEFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_AMCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_AMCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DSICLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DSICLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_BYTECLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_BYTECLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_ESCCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_ESCCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DB_CLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DB_CLK_NUM_LATCHES__6 1
+#define UNIPHYA_PRESENT 1
+#define UNIPHYA_PRESENT__1 1
+#define DC__UNIPHYA_PRESENT 1
+#define DC__UNIPHYA_PRESENT__1 1
+#define UNIPHYB_PRESENT 1
+#define UNIPHYB_PRESENT__1 1
+#define DC__UNIPHYB_PRESENT 1
+#define DC__UNIPHYB_PRESENT__1 1
+#define UNIPHYC_PRESENT 1
+#define UNIPHYC_PRESENT__1 1
+#define DC__UNIPHYC_PRESENT 1
+#define DC__UNIPHYC_PRESENT__1 1
+#define UNIPHYD_PRESENT 1
+#define UNIPHYD_PRESENT__1 1
+#define DC__UNIPHYD_PRESENT 1
+#define DC__UNIPHYD_PRESENT__1 1
+#define UNIPHYE_PRESENT 0
+#define UNIPHYE_PRESENT__0 1
+#define DC__UNIPHYE_PRESENT 0
+#define DC__UNIPHYE_PRESENT__0 1
+#define UNIPHYF_PRESENT 0
+#define UNIPHYF_PRESENT__0 1
+#define DC__UNIPHYF_PRESENT 0
+#define DC__UNIPHYF_PRESENT__0 1
+#define UNIPHYG_PRESENT 0
+#define UNIPHYG_PRESENT__0 1
+#define DC__UNIPHYG_PRESENT 0
+#define DC__UNIPHYG_PRESENT__0 1
+#define DC__TMDS_LINK tmds_link_dual
+#define DC__TMDS_LINK__TMDS_LINK_DUAL 1
+#define DC__WBSCL_PIXBW 8
+#define DC__WBSCL_PIXBW__8 1
+#define DC__DWB_CSC_PRESENT 0
+#define DC__DWB_CSC_PRESENT__0 1
+#define DC__DWB_LUMA_SCL_PRESENT 0
+#define DC__DWB_LUMA_SCL_PRESENT__0 1
+#define DC__DENTIST_INTERFACE_PRESENT 1
+#define DC__DENTIST_INTERFACE_PRESENT__1 1
+#define DC__GENERICA_PRESENT 1
+#define DC__GENERICA_PRESENT__1 1
+#define DC__GENERICB_PRESENT 1
+#define DC__GENERICB_PRESENT__1 1
+#define DC__GENERICC_PRESENT 0
+#define DC__GENERICC_PRESENT__0 1
+#define DC__GENERICD_PRESENT 0
+#define DC__GENERICD_PRESENT__0 1
+#define DC__GENERICE_PRESENT 0
+#define DC__GENERICE_PRESENT__0 1
+#define DC__GENERICF_PRESENT 0
+#define DC__GENERICF_PRESENT__0 1
+#define DC__GENERICG_PRESENT 0
+#define DC__GENERICG_PRESENT__0 1
+#define DC__UNIPHY_VOLTAGE_MODE 1
+#define DC__UNIPHY_VOLTAGE_MODE__1 1
+#define DC__BLON_TYPE dedicated
+#define DC__BLON_TYPE__DEDICATED 1
+#define DC__UNIPHY_STAGGER_CH_PRESENT 1
+#define DC__UNIPHY_STAGGER_CH_PRESENT__1 1
+#define DC__XDMA_PRESENT 0
+#define DC__XDMA_PRESENT__0 1
+#define XDMA__PRESENT 0
+#define XDMA__PRESENT__0 1
+#define DC__DP_MEM_PG 0
+#define DC__DP_MEM_PG__0 1
+#define DP__MEM_PG 0
+#define DP__MEM_PG__0 1
+#define DC__AFMT_MEM_PG 0
+#define DC__AFMT_MEM_PG__0 1
+#define AFMT__MEM_PG 0
+#define AFMT__MEM_PG__0 1
+#define DC__HDMI_MEM_PG 0
+#define DC__HDMI_MEM_PG__0 1
+#define HDMI__MEM_PG 0
+#define HDMI__MEM_PG__0 1
+#define DC__I2C_MEM_PG 0
+#define DC__I2C_MEM_PG__0 1
+#define I2C__MEM_PG 0
+#define I2C__MEM_PG__0 1
+#define DC__DSCL_MEM_PG 0
+#define DC__DSCL_MEM_PG__0 1
+#define DSCL__MEM_PG 0
+#define DSCL__MEM_PG__0 1
+#define DC__CM_MEM_PG 0
+#define DC__CM_MEM_PG__0 1
+#define CM__MEM_PG 0
+#define CM__MEM_PG__0 1
+#define DC__OBUF_MEM_PG 0
+#define DC__OBUF_MEM_PG__0 1
+#define OBUF__MEM_PG 0
+#define OBUF__MEM_PG__0 1
+#define DC__WBIF_MEM_PG 1
+#define DC__WBIF_MEM_PG__1 1
+#define WBIF__MEM_PG 1
+#define WBIF__MEM_PG__1 1
+#define DC__VGA_MEM_PG 0
+#define DC__VGA_MEM_PG__0 1
+#define VGA__MEM_PG 0
+#define VGA__MEM_PG__0 1
+#define DC__FMT_MEM_PG 0
+#define DC__FMT_MEM_PG__0 1
+#define FMT__MEM_PG 0
+#define FMT__MEM_PG__0 1
+#define DC__ODM_MEM_PG 0
+#define DC__ODM_MEM_PG__0 1
+#define ODM__MEM_PG 0
+#define ODM__MEM_PG__0 1
+#define DC__DSI_MEM_PG 0
+#define DC__DSI_MEM_PG__0 1
+#define DSI__MEM_PG 0
+#define DSI__MEM_PG__0 1
+#define DC__AZ_MEM_PG 1
+#define DC__AZ_MEM_PG__1 1
+#define AZ__MEM_PG 1
+#define AZ__MEM_PG__1 1
+#define DC__WBSCL_MEM1P1024X64QS_MEM_PG 1
+#define DC__WBSCL_MEM1P1024X64QS_MEM_PG__1 1
+#define WBSCL_MEM1P1024X64QS__MEM_PG 1
+#define WBSCL_MEM1P1024X64QS__MEM_PG__1 1
+#define DC__WBSCL_MEM1P528X64QS_MEM_PG 1
+#define DC__WBSCL_MEM1P528X64QS_MEM_PG__1 1
+#define WBSCL_MEM1P528X64QS__MEM_PG 1
+#define WBSCL_MEM1P528X64QS__MEM_PG__1 1
+#define DC__DMCU_MEM1P1024X32BQS_MEM_PG 1
+#define DC__DMCU_MEM1P1024X32BQS_MEM_PG__1 1
+#define DMCU_MEM1P1024X32BQS__MEM_PG 1
+#define DMCU_MEM1P1024X32BQS__MEM_PG__1 1
+#define DC__HUBBUB_SDP_TAG_INT_MEM_PG 0
+#define DC__HUBBUB_SDP_TAG_INT_MEM_PG__0 1
+#define HUBBUB_SDP_TAG_INT__MEM_PG 0
+#define HUBBUB_SDP_TAG_INT__MEM_PG__0 1
+#define DC__HUBBUB_SDP_TAG_EXT_MEM_PG 0
+#define DC__HUBBUB_SDP_TAG_EXT_MEM_PG__0 1
+#define HUBBUB_SDP_TAG_EXT__MEM_PG 0
+#define HUBBUB_SDP_TAG_EXT__MEM_PG__0 1
+#define DC__HUBBUB_RET_ZERO_MEM_PG 0
+#define DC__HUBBUB_RET_ZERO_MEM_PG__0 1
+#define HUBBUB_RET_ZERO__MEM_PG 0
+#define HUBBUB_RET_ZERO__MEM_PG__0 1
+#define DC__HUBBUB_RET_ROB_MEM_PG 0
+#define DC__HUBBUB_RET_ROB_MEM_PG__0 1
+#define HUBBUB_RET_ROB__MEM_PG 0
+#define HUBBUB_RET_ROB__MEM_PG__0 1
+#define DC__HUBPRET_CUR_ROB_MEM_PG 0
+#define DC__HUBPRET_CUR_ROB_MEM_PG__0 1
+#define HUBPRET_CUR_ROB__MEM_PG 0
+#define HUBPRET_CUR_ROB__MEM_PG__0 1
+#define DC__HUBPRET_CUR_CDC_MEM_PG 0
+#define DC__HUBPRET_CUR_CDC_MEM_PG__0 1
+#define HUBPRET_CUR_CDC__MEM_PG 0
+#define HUBPRET_CUR_CDC__MEM_PG__0 1
+#define DC__HUBPREQ_MPTE_MEM_PG 0
+#define DC__HUBPREQ_MPTE_MEM_PG__0 1
+#define HUBPREQ_MPTE__MEM_PG 0
+#define HUBPREQ_MPTE__MEM_PG__0 1
+#define DC__HUBPREQ_META_MEM_PG 0
+#define DC__HUBPREQ_META_MEM_PG__0 1
+#define HUBPREQ_META__MEM_PG 0
+#define HUBPREQ_META__MEM_PG__0 1
+#define DC__HUBPREQ_DPTE_MEM_PG 0
+#define DC__HUBPREQ_DPTE_MEM_PG__0 1
+#define HUBPREQ_DPTE__MEM_PG 0
+#define HUBPREQ_DPTE__MEM_PG__0 1
+#define DC__HUBPRET_DET_MEM_PG 0
+#define DC__HUBPRET_DET_MEM_PG__0 1
+#define HUBPRET_DET__MEM_PG 0
+#define HUBPRET_DET__MEM_PG__0 1
+#define DC__HUBPRET_PIX_CDC_MEM_PG 0
+#define DC__HUBPRET_PIX_CDC_MEM_PG__0 1
+#define HUBPRET_PIX_CDC__MEM_PG 0
+#define HUBPRET_PIX_CDC__MEM_PG__0 1
+#define DC__TOP_BLKS__DCCG 1
+#define DC__TOP_BLKS__DCHUBBUB 1
+#define DC__TOP_BLKS__DCHUBP 1
+#define DC__TOP_BLKS__HDA 1
+#define DC__TOP_BLKS__DIO 1
+#define DC__TOP_BLKS__DCIO 1
+#define DC__TOP_BLKS__DMU 1
+#define DC__TOP_BLKS__DPP 1
+#define DC__TOP_BLKS__MPC 1
+#define DC__TOP_BLKS__OPP 1
+#define DC__TOP_BLKS__OPTC 1
+#define DC__TOP_BLKS__MMHUBBUB 1
+#define DC__TOP_BLKS__WB 1
+#define DC__TOP_BLKS__MAX 13
+#define DC__TOP_BLKS__MAX__13 1
+#define DC__DCHUBP_DPP_SF_PIXEL_CREDITS 9
+#define DC__DCHUBP_DPP_SF_PIXEL_CREDITS__9 1
+#define DC__DPP_MPC_SF_PIXEL_CREDITS 9
+#define DC__DPP_MPC_SF_PIXEL_CREDITS__9 1
+#define DC__MPC_OPP_SF_PIXEL_CREDITS 8
+#define DC__MPC_OPP_SF_PIXEL_CREDITS__8 1
+#define DC__OPP_OPTC_SF_PIXEL_CREDITS 8
+#define DC__OPP_OPTC_SF_PIXEL_CREDITS__8 1
+#define DC__SFR_SFT_ROUND_TRIP_DELAY 5
+#define DC__SFR_SFT_ROUND_TRIP_DELAY__5 1
+#define DC__REPEATER_PROJECT_MAX 8
+#define DC__REPEATER_PROJECT_MAX__8 1
+#define DC__SURFACE_422_CAPABLE 0
+#define DC__SURFACE_422_CAPABLE__0 1
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
new file mode 100644
index 000000000000..b1ad3553f900
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DISPLAY_MODE_ENUMS_H__
+#define __DISPLAY_MODE_ENUMS_H__
+
+enum output_encoder_class {
+ dm_dp = 0, dm_hdmi = 1, dm_wb = 2, dm_edp
+};
+enum output_format_class {
+ dm_444 = 0, dm_420 = 1, dm_n422, dm_s422
+};
+enum source_format_class {
+ dm_444_16 = 0,
+ dm_444_32 = 1,
+ dm_444_64 = 2,
+ dm_420_8 = 3,
+ dm_420_10 = 4,
+ dm_422_8 = 5,
+ dm_422_10 = 6,
+ dm_444_8 = 7,
+ dm_mono_8,
+ dm_mono_16
+};
+enum output_bpc_class {
+ dm_out_6 = 0, dm_out_8 = 1, dm_out_10 = 2, dm_out_12 = 3, dm_out_16 = 4
+};
+enum scan_direction_class {
+ dm_horz = 0, dm_vert = 1
+};
+enum dm_swizzle_mode {
+ dm_sw_linear = 0,
+ dm_sw_256b_s = 1,
+ dm_sw_256b_d = 2,
+ dm_sw_SPARE_0 = 3,
+ dm_sw_SPARE_1 = 4,
+ dm_sw_4kb_s = 5,
+ dm_sw_4kb_d = 6,
+ dm_sw_SPARE_2 = 7,
+ dm_sw_SPARE_3 = 8,
+ dm_sw_64kb_s = 9,
+ dm_sw_64kb_d = 10,
+ dm_sw_SPARE_4 = 11,
+ dm_sw_SPARE_5 = 12,
+ dm_sw_var_s = 13,
+ dm_sw_var_d = 14,
+ dm_sw_SPARE_6 = 15,
+ dm_sw_SPARE_7 = 16,
+ dm_sw_64kb_s_t = 17,
+ dm_sw_64kb_d_t = 18,
+ dm_sw_SPARE_10 = 19,
+ dm_sw_SPARE_11 = 20,
+ dm_sw_4kb_s_x = 21,
+ dm_sw_4kb_d_x = 22,
+ dm_sw_SPARE_12 = 23,
+ dm_sw_SPARE_13 = 24,
+ dm_sw_64kb_s_x = 25,
+ dm_sw_64kb_d_x = 26,
+ dm_sw_SPARE_14 = 27,
+ dm_sw_SPARE_15 = 28,
+ dm_sw_var_s_x = 29,
+ dm_sw_var_d_x = 30,
+ dm_sw_64kb_r_x,
+ dm_sw_gfx7_2d_thin_lvp,
+ dm_sw_gfx7_2d_thin_gl
+};
+enum lb_depth {
+ dm_lb_10 = 0, dm_lb_8 = 1, dm_lb_6 = 2, dm_lb_12 = 3, dm_lb_16
+};
+enum voltage_state {
+ dm_vmin = 0, dm_vmid = 1, dm_vnom = 2, dm_vmax = 3
+};
+enum source_macro_tile_size {
+ dm_4k_tile = 0, dm_64k_tile = 1, dm_256k_tile = 2
+};
+enum cursor_bpp {
+ dm_cur_2bit = 0, dm_cur_32bit = 1, dm_cur_64bit = 2
+};
+enum clock_change_support {
+ dm_dram_clock_change_uninitialized = 0,
+ dm_dram_clock_change_vactive,
+ dm_dram_clock_change_vblank,
+ dm_dram_clock_change_unsupported
+};
+
+enum output_standard {
+ dm_std_uninitialized = 0, dm_std_cvtr2, dm_std_cvt
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
new file mode 100644
index 000000000000..4c31fa54af39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_mode_lib.h"
+#include "dc_features.h"
+
+static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
+{
+ if (project == DML_PROJECT_RAVEN1) {
+ soc->sr_exit_time_us = 9.0;
+ soc->sr_enter_plus_exit_time_us = 11.0;
+ soc->urgent_latency_us = 4.0;
+ soc->writeback_latency_us = 12.0;
+ soc->ideal_dram_bw_after_urgent_percent = 80.0;
+ soc->max_request_size_bytes = 256;
+
+ soc->vmin.dcfclk_mhz = 300.0;
+ soc->vmin.dispclk_mhz = 608.0;
+ soc->vmin.dppclk_mhz = 435.0;
+ soc->vmin.dram_bw_per_chan_gbps = 12.8;
+ soc->vmin.phyclk_mhz = 540.0;
+ soc->vmin.socclk_mhz = 208.0;
+
+ soc->vmid.dcfclk_mhz = 600.0;
+ soc->vmid.dispclk_mhz = 661.0;
+ soc->vmid.dppclk_mhz = 661.0;
+ soc->vmid.dram_bw_per_chan_gbps = 12.8;
+ soc->vmid.phyclk_mhz = 540.0;
+ soc->vmid.socclk_mhz = 208.0;
+
+ soc->vnom.dcfclk_mhz = 600.0;
+ soc->vnom.dispclk_mhz = 661.0;
+ soc->vnom.dppclk_mhz = 661.0;
+ soc->vnom.dram_bw_per_chan_gbps = 38.4;
+ soc->vnom.phyclk_mhz = 810;
+ soc->vnom.socclk_mhz = 208.0;
+
+ soc->vmax.dcfclk_mhz = 600.0;
+ soc->vmax.dispclk_mhz = 1086.0;
+ soc->vmax.dppclk_mhz = 661.0;
+ soc->vmax.dram_bw_per_chan_gbps = 38.4;
+ soc->vmax.phyclk_mhz = 810.0;
+ soc->vmax.socclk_mhz = 208.0;
+
+ soc->downspread_percent = 0.5;
+ soc->dram_page_open_time_ns = 50.0;
+ soc->dram_rw_turnaround_time_ns = 17.5;
+ soc->dram_return_buffer_per_channel_bytes = 8192;
+ soc->round_trip_ping_latency_dcfclk_cycles = 128;
+ soc->urgent_out_of_order_return_per_channel_bytes = 256;
+ soc->channel_interleave_bytes = 256;
+ soc->num_banks = 8;
+ soc->num_chans = 2;
+ soc->vmm_page_size_bytes = 4096;
+ soc->dram_clock_change_latency_us = 17.0;
+ soc->writeback_dram_clock_change_latency_us = 23.0;
+ soc->return_bus_width_bytes = 64;
+ } else {
+ BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+ }
+}
+
+static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
+{
+ if (project == DML_PROJECT_RAVEN1) {
+ ip->rob_buffer_size_kbytes = 64;
+ ip->det_buffer_size_kbytes = 164;
+ ip->dpte_buffer_size_in_pte_reqs = 42;
+ ip->dpp_output_buffer_pixels = 2560;
+ ip->opp_output_buffer_lines = 1;
+ ip->pixel_chunk_size_kbytes = 8;
+ ip->pte_enable = 1;
+ ip->pte_chunk_size_kbytes = 2;
+ ip->meta_chunk_size_kbytes = 2;
+ ip->writeback_chunk_size_kbytes = 2;
+ ip->line_buffer_size_bits = 589824;
+ ip->max_line_buffer_lines = 12;
+ ip->IsLineBufferBppFixed = 0;
+ ip->LineBufferFixedBpp = -1;
+ ip->writeback_luma_buffer_size_kbytes = 12;
+ ip->writeback_chroma_buffer_size_kbytes = 8;
+ ip->max_num_dpp = 4;
+ ip->max_num_wb = 2;
+ ip->max_dchub_pscl_bw_pix_per_clk = 4;
+ ip->max_pscl_lb_bw_pix_per_clk = 2;
+ ip->max_lb_vscl_bw_pix_per_clk = 4;
+ ip->max_vscl_hscl_bw_pix_per_clk = 4;
+ ip->max_hscl_ratio = 4;
+ ip->max_vscl_ratio = 4;
+ ip->hscl_mults = 4;
+ ip->vscl_mults = 4;
+ ip->max_hscl_taps = 8;
+ ip->max_vscl_taps = 8;
+ ip->dispclk_ramp_margin_percent = 1;
+ ip->underscan_factor = 1.10;
+ ip->min_vblank_lines = 14;
+ ip->dppclk_delay_subtotal = 90;
+ ip->dispclk_delay_subtotal = 42;
+ ip->dcfclk_cstate_latency = 10;
+ ip->max_inter_dcn_tile_repeaters = 8;
+ ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0;
+ ip->bug_forcing_LC_req_same_size_fixed = 0;
+ } else {
+ BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+ }
+}
+
+void dml_init_instance(struct display_mode_lib *lib, enum dml_project project)
+{
+ if (lib->project != project) {
+ set_soc_bounding_box(&lib->soc, project);
+ set_ip_params(&lib->ip, project);
+ lib->project = project;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
new file mode 100644
index 000000000000..26f4f2a3d90d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DISPLAY_MODE_LIB_H__
+#define __DISPLAY_MODE_LIB_H__
+
+
+#include "dml_common_defs.h"
+#include "soc_bounding_box.h"
+#include "display_mode_vba.h"
+#include "display_rq_dlg_calc.h"
+#include "dml1_display_rq_dlg_calc.h"
+
+enum dml_project {
+ DML_PROJECT_UNDEFINED,
+ DML_PROJECT_RAVEN1
+};
+
+struct display_mode_lib {
+ struct _vcs_dpi_ip_params_st ip;
+ struct _vcs_dpi_soc_bounding_box_st soc;
+ enum dml_project project;
+ struct vba_vars_st vba;
+ struct dal_logger *logger;
+};
+
+void dml_init_instance(struct display_mode_lib *lib, enum dml_project project);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
new file mode 100644
index 000000000000..baf182177736
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -0,0 +1,557 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DISPLAY_MODE_STRUCTS_H__
+#define __DISPLAY_MODE_STRUCTS_H__
+
+typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
+typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
+typedef struct _vcs_dpi_ip_params_st ip_params_st;
+typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st;
+typedef struct _vcs_dpi_display_output_params_st display_output_params_st;
+typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st;
+typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st;
+typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st;
+typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st;
+typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st;
+typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st;
+typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st;
+typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st;
+typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st;
+typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st;
+typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st;
+typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st;
+typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st;
+typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st;
+typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st;
+typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st;
+typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st;
+typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st;
+typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st;
+typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st;
+typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st;
+typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st;
+typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st;
+typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st;
+typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st;
+
+struct _vcs_dpi_voltage_scaling_st {
+ int state;
+ double dscclk_mhz;
+ double dcfclk_mhz;
+ double socclk_mhz;
+ double dram_speed_mhz;
+ double fabricclk_mhz;
+ double dispclk_mhz;
+ double dram_bw_per_chan_gbps;
+ double phyclk_mhz;
+ double dppclk_mhz;
+};
+
+struct _vcs_dpi_soc_bounding_box_st {
+ double sr_exit_time_us;
+ double sr_enter_plus_exit_time_us;
+ double urgent_latency_us;
+ double writeback_latency_us;
+ double ideal_dram_bw_after_urgent_percent;
+ unsigned int max_request_size_bytes;
+ struct _vcs_dpi_voltage_scaling_st vmin;
+ struct _vcs_dpi_voltage_scaling_st vmid;
+ struct _vcs_dpi_voltage_scaling_st vnom;
+ struct _vcs_dpi_voltage_scaling_st vmax;
+ double downspread_percent;
+ double dram_page_open_time_ns;
+ double dram_rw_turnaround_time_ns;
+ double dram_return_buffer_per_channel_bytes;
+ double dram_channel_width_bytes;
+ double fabric_datapath_to_dcn_data_return_bytes;
+ double dcn_downspread_percent;
+ double dispclk_dppclk_vco_speed_mhz;
+ double dfs_vco_period_ps;
+ unsigned int round_trip_ping_latency_dcfclk_cycles;
+ unsigned int urgent_out_of_order_return_per_channel_bytes;
+ unsigned int channel_interleave_bytes;
+ unsigned int num_banks;
+ unsigned int num_chans;
+ unsigned int vmm_page_size_bytes;
+ double dram_clock_change_latency_us;
+ double writeback_dram_clock_change_latency_us;
+ unsigned int return_bus_width_bytes;
+ unsigned int voltage_override;
+ double xfc_bus_transport_time_us;
+ double xfc_xbuf_latency_tolerance_us;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+};
+
+struct _vcs_dpi_ip_params_st {
+ unsigned int max_inter_dcn_tile_repeaters;
+ unsigned int num_dsc;
+ unsigned int odm_capable;
+ unsigned int rob_buffer_size_kbytes;
+ unsigned int det_buffer_size_kbytes;
+ unsigned int dpte_buffer_size_in_pte_reqs;
+ unsigned int pde_proc_buffer_size_64k_reqs;
+ unsigned int dpp_output_buffer_pixels;
+ unsigned int opp_output_buffer_lines;
+ unsigned int pixel_chunk_size_kbytes;
+ unsigned char pte_enable;
+ unsigned int pte_chunk_size_kbytes;
+ unsigned int meta_chunk_size_kbytes;
+ unsigned int writeback_chunk_size_kbytes;
+ unsigned int line_buffer_size_bits;
+ unsigned int max_line_buffer_lines;
+ unsigned int writeback_luma_buffer_size_kbytes;
+ unsigned int writeback_chroma_buffer_size_kbytes;
+ unsigned int writeback_chroma_line_buffer_width_pixels;
+ unsigned int max_page_table_levels;
+ unsigned int max_num_dpp;
+ unsigned int max_num_otg;
+ unsigned int cursor_chunk_size;
+ unsigned int cursor_buffer_size;
+ unsigned int max_num_wb;
+ unsigned int max_dchub_pscl_bw_pix_per_clk;
+ unsigned int max_pscl_lb_bw_pix_per_clk;
+ unsigned int max_lb_vscl_bw_pix_per_clk;
+ unsigned int max_vscl_hscl_bw_pix_per_clk;
+ double max_hscl_ratio;
+ double max_vscl_ratio;
+ unsigned int hscl_mults;
+ unsigned int vscl_mults;
+ unsigned int max_hscl_taps;
+ unsigned int max_vscl_taps;
+ unsigned int xfc_supported;
+ unsigned int ptoi_supported;
+ unsigned int xfc_fill_constant_bytes;
+ double dispclk_ramp_margin_percent;
+ double xfc_fill_bw_overhead_percent;
+ double underscan_factor;
+ unsigned int min_vblank_lines;
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int dcfclk_cstate_latency;
+ unsigned int dppclk_delay_scl;
+ unsigned int dppclk_delay_scl_lb_only;
+ unsigned int dppclk_delay_cnvc_formatter;
+ unsigned int dppclk_delay_cnvc_cursor;
+ unsigned int is_line_buffer_bpp_fixed;
+ unsigned int line_buffer_fixed_bpp;
+ unsigned int dcc_supported;
+
+ unsigned int IsLineBufferBppFixed;
+ unsigned int LineBufferFixedBpp;
+ unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ unsigned int bug_forcing_LC_req_same_size_fixed;
+};
+
+struct _vcs_dpi_display_xfc_params_st {
+ double xfc_tslv_vready_offset_us;
+ double xfc_tslv_vupdate_width_us;
+ double xfc_tslv_vupdate_offset_us;
+ int xfc_slv_chunk_size_bytes;
+};
+
+struct _vcs_dpi_display_pipe_source_params_st {
+ int source_format;
+ unsigned char dcc;
+ unsigned int dcc_override;
+ unsigned int dcc_rate;
+ unsigned char dcc_use_global;
+ unsigned char vm;
+ unsigned char vm_levels_force_en;
+ unsigned int vm_levels_force;
+ int source_scan;
+ int sw_mode;
+ int macro_tile_size;
+ unsigned char is_display_sw;
+ unsigned int viewport_width;
+ unsigned int viewport_height;
+ unsigned int viewport_y_y;
+ unsigned int viewport_y_c;
+ unsigned int viewport_width_c;
+ unsigned int viewport_height_c;
+ unsigned int data_pitch;
+ unsigned int data_pitch_c;
+ unsigned int meta_pitch;
+ unsigned int meta_pitch_c;
+ unsigned int cur0_src_width;
+ int cur0_bpp;
+ unsigned int cur1_src_width;
+ int cur1_bpp;
+ int num_cursors;
+ unsigned char is_hsplit;
+ unsigned char dynamic_metadata_enable;
+ unsigned int dynamic_metadata_lines_before_active;
+ unsigned int dynamic_metadata_xmit_bytes;
+ unsigned int hsplit_grp;
+ unsigned char xfc_enable;
+ unsigned char xfc_slave;
+ struct _vcs_dpi_display_xfc_params_st xfc_params;
+};
+struct writeback_st {
+ int wb_src_height;
+ int wb_dst_width;
+ int wb_dst_height;
+ int wb_pixel_format;
+ int wb_htaps_luma;
+ int wb_vtaps_luma;
+ int wb_htaps_chroma;
+ int wb_vtaps_chroma;
+ int wb_hratio;
+ int wb_vratio;
+};
+
+struct _vcs_dpi_display_output_params_st {
+ int dp_lanes;
+ int output_bpp;
+ int dsc_enable;
+ int wb_enable;
+ int output_bpc;
+ int output_type;
+ int output_format;
+ int output_standard;
+ int dsc_slices;
+ struct writeback_st wb;
+};
+
+struct _vcs_dpi_display_bandwidth_st {
+ double total_bw_consumed_gbps;
+ double guaranteed_urgent_return_bw_gbps;
+};
+
+struct _vcs_dpi_scaler_ratio_depth_st {
+ double hscl_ratio;
+ double vscl_ratio;
+ double hscl_ratio_c;
+ double vscl_ratio_c;
+ double vinit;
+ double vinit_c;
+ double vinit_bot;
+ double vinit_bot_c;
+ int lb_depth;
+ int scl_enable;
+};
+
+struct _vcs_dpi_scaler_taps_st {
+ unsigned int htaps;
+ unsigned int vtaps;
+ unsigned int htaps_c;
+ unsigned int vtaps_c;
+};
+
+struct _vcs_dpi_display_pipe_dest_params_st {
+ unsigned int recout_width;
+ unsigned int recout_height;
+ unsigned int full_recout_width;
+ unsigned int full_recout_height;
+ unsigned int hblank_start;
+ unsigned int hblank_end;
+ unsigned int vblank_start;
+ unsigned int vblank_end;
+ unsigned int htotal;
+ unsigned int vtotal;
+ unsigned int vactive;
+ unsigned int hactive;
+ unsigned int vstartup_start;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+ unsigned char interlaced;
+ unsigned char underscan;
+ double pixel_rate_mhz;
+ unsigned char synchronized_vblank_all_planes;
+ unsigned char otg_inst;
+ unsigned char odm_split_cnt;
+ unsigned char odm_combine;
+};
+
+struct _vcs_dpi_display_pipe_params_st {
+ display_pipe_source_params_st src;
+ display_pipe_dest_params_st dest;
+ scaler_ratio_depth_st scale_ratio_depth;
+ scaler_taps_st scale_taps;
+};
+
+struct _vcs_dpi_display_clocks_and_cfg_st {
+ int voltage;
+ double dppclk_mhz;
+ double refclk_mhz;
+ double dispclk_mhz;
+ double dcfclk_mhz;
+ double socclk_mhz;
+};
+
+struct _vcs_dpi_display_e2e_pipe_params_st {
+ display_pipe_params_st pipe;
+ display_output_params_st dout;
+ display_clocks_and_cfg_st clks_cfg;
+};
+
+struct _vcs_dpi_dchub_buffer_sizing_st {
+ unsigned int swath_width_y;
+ unsigned int swath_height_y;
+ unsigned int swath_height_c;
+ unsigned int detail_buffer_size_y;
+};
+
+struct _vcs_dpi_watermarks_perf_st {
+ double stutter_eff_in_active_region_percent;
+ double urgent_latency_supported_us;
+ double non_urgent_latency_supported_us;
+ double dram_clock_change_margin_us;
+ double dram_access_eff_percent;
+};
+
+struct _vcs_dpi_cstate_pstate_watermarks_st {
+ double cstate_exit_us;
+ double cstate_enter_plus_exit_us;
+ double pstate_change_us;
+};
+
+struct _vcs_dpi_wm_calc_pipe_params_st {
+ unsigned int num_dpp;
+ int voltage;
+ int output_type;
+ double dcfclk_mhz;
+ double socclk_mhz;
+ double dppclk_mhz;
+ double pixclk_mhz;
+ unsigned char interlace_en;
+ unsigned char pte_enable;
+ unsigned char dcc_enable;
+ double dcc_rate;
+ double bytes_per_pixel_c;
+ double bytes_per_pixel_y;
+ unsigned int swath_width_y;
+ unsigned int swath_height_y;
+ unsigned int swath_height_c;
+ unsigned int det_buffer_size_y;
+ double h_ratio;
+ double v_ratio;
+ unsigned int h_taps;
+ unsigned int h_total;
+ unsigned int v_total;
+ unsigned int v_active;
+ unsigned int e2e_index;
+ double display_pipe_line_delivery_time;
+ double read_bw;
+ unsigned int lines_in_det_y;
+ unsigned int lines_in_det_y_rounded_down_to_swath;
+ double full_det_buffering_time;
+ double dcfclk_deepsleep_mhz_per_plane;
+};
+
+struct _vcs_dpi_vratio_pre_st {
+ double vratio_pre_l;
+ double vratio_pre_c;
+};
+
+struct _vcs_dpi_display_data_rq_misc_params_st {
+ unsigned int full_swath_bytes;
+ unsigned int stored_swath_bytes;
+ unsigned int blk256_height;
+ unsigned int blk256_width;
+ unsigned int req_height;
+ unsigned int req_width;
+};
+
+struct _vcs_dpi_display_data_rq_sizing_params_st {
+ unsigned int chunk_bytes;
+ unsigned int min_chunk_bytes;
+ unsigned int meta_chunk_bytes;
+ unsigned int min_meta_chunk_bytes;
+ unsigned int mpte_group_bytes;
+ unsigned int dpte_group_bytes;
+};
+
+struct _vcs_dpi_display_data_rq_dlg_params_st {
+ unsigned int swath_width_ub;
+ unsigned int swath_height;
+ unsigned int req_per_swath_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ unsigned int dpte_req_per_row_ub;
+ unsigned int dpte_groups_per_row_ub;
+ unsigned int dpte_row_height;
+ unsigned int dpte_bytes_per_row_ub;
+ unsigned int meta_chunks_per_row_ub;
+ unsigned int meta_req_per_row_ub;
+ unsigned int meta_row_height;
+ unsigned int meta_bytes_per_row_ub;
+};
+
+struct _vcs_dpi_display_cur_rq_dlg_params_st {
+ unsigned char enable;
+ unsigned int swath_height;
+ unsigned int req_per_line;
+};
+
+struct _vcs_dpi_display_rq_dlg_params_st {
+ display_data_rq_dlg_params_st rq_l;
+ display_data_rq_dlg_params_st rq_c;
+ display_cur_rq_dlg_params_st rq_cur0;
+};
+
+struct _vcs_dpi_display_rq_sizing_params_st {
+ display_data_rq_sizing_params_st rq_l;
+ display_data_rq_sizing_params_st rq_c;
+};
+
+struct _vcs_dpi_display_rq_misc_params_st {
+ display_data_rq_misc_params_st rq_l;
+ display_data_rq_misc_params_st rq_c;
+};
+
+struct _vcs_dpi_display_rq_params_st {
+ unsigned char yuv420;
+ unsigned char yuv420_10bpc;
+ display_rq_misc_params_st misc;
+ display_rq_sizing_params_st sizing;
+ display_rq_dlg_params_st dlg;
+};
+
+struct _vcs_dpi_display_dlg_regs_st {
+ unsigned int refcyc_h_blank_end;
+ unsigned int dlg_vblank_end;
+ unsigned int min_dst_y_next_start;
+ unsigned int refcyc_per_htotal;
+ unsigned int refcyc_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ unsigned int dst_y_prefetch;
+ unsigned int dst_y_per_vm_vblank;
+ unsigned int dst_y_per_row_vblank;
+ unsigned int dst_y_per_vm_flip;
+ unsigned int dst_y_per_row_flip;
+ unsigned int ref_freq_to_pix_freq;
+ unsigned int vratio_prefetch;
+ unsigned int vratio_prefetch_c;
+ unsigned int refcyc_per_pte_group_vblank_l;
+ unsigned int refcyc_per_pte_group_vblank_c;
+ unsigned int refcyc_per_meta_chunk_vblank_l;
+ unsigned int refcyc_per_meta_chunk_vblank_c;
+ unsigned int refcyc_per_pte_group_flip_l;
+ unsigned int refcyc_per_pte_group_flip_c;
+ unsigned int refcyc_per_meta_chunk_flip_l;
+ unsigned int refcyc_per_meta_chunk_flip_c;
+ unsigned int dst_y_per_pte_row_nom_l;
+ unsigned int dst_y_per_pte_row_nom_c;
+ unsigned int refcyc_per_pte_group_nom_l;
+ unsigned int refcyc_per_pte_group_nom_c;
+ unsigned int dst_y_per_meta_row_nom_l;
+ unsigned int dst_y_per_meta_row_nom_c;
+ unsigned int refcyc_per_meta_chunk_nom_l;
+ unsigned int refcyc_per_meta_chunk_nom_c;
+ unsigned int refcyc_per_line_delivery_pre_l;
+ unsigned int refcyc_per_line_delivery_pre_c;
+ unsigned int refcyc_per_line_delivery_l;
+ unsigned int refcyc_per_line_delivery_c;
+ unsigned int chunk_hdl_adjust_cur0;
+ unsigned int chunk_hdl_adjust_cur1;
+ unsigned int vready_after_vcount0;
+ unsigned int dst_y_offset_cur0;
+ unsigned int dst_y_offset_cur1;
+ unsigned int xfc_reg_transfer_delay;
+ unsigned int xfc_reg_precharge_delay;
+ unsigned int xfc_reg_remote_surface_flip_latency;
+ unsigned int xfc_reg_prefetch_margin;
+ unsigned int dst_y_delta_drq_limit;
+};
+
+struct _vcs_dpi_display_ttu_regs_st {
+ unsigned int qos_level_low_wm;
+ unsigned int qos_level_high_wm;
+ unsigned int min_ttu_vblank;
+ unsigned int qos_level_flip;
+ unsigned int refcyc_per_req_delivery_l;
+ unsigned int refcyc_per_req_delivery_c;
+ unsigned int refcyc_per_req_delivery_cur0;
+ unsigned int refcyc_per_req_delivery_cur1;
+ unsigned int refcyc_per_req_delivery_pre_l;
+ unsigned int refcyc_per_req_delivery_pre_c;
+ unsigned int refcyc_per_req_delivery_pre_cur0;
+ unsigned int refcyc_per_req_delivery_pre_cur1;
+ unsigned int qos_level_fixed_l;
+ unsigned int qos_level_fixed_c;
+ unsigned int qos_level_fixed_cur0;
+ unsigned int qos_level_fixed_cur1;
+ unsigned int qos_ramp_disable_l;
+ unsigned int qos_ramp_disable_c;
+ unsigned int qos_ramp_disable_cur0;
+ unsigned int qos_ramp_disable_cur1;
+};
+
+struct _vcs_dpi_display_data_rq_regs_st {
+ unsigned int chunk_size;
+ unsigned int min_chunk_size;
+ unsigned int meta_chunk_size;
+ unsigned int min_meta_chunk_size;
+ unsigned int dpte_group_size;
+ unsigned int mpte_group_size;
+ unsigned int swath_height;
+ unsigned int pte_row_height_linear;
+};
+
+struct _vcs_dpi_display_rq_regs_st {
+ display_data_rq_regs_st rq_regs_l;
+ display_data_rq_regs_st rq_regs_c;
+ unsigned int drq_expansion_mode;
+ unsigned int prq_expansion_mode;
+ unsigned int mrq_expansion_mode;
+ unsigned int crq_expansion_mode;
+ unsigned int plane1_base_address;
+};
+
+struct _vcs_dpi_display_dlg_sys_params_st {
+ double t_mclk_wm_us;
+ double t_urg_wm_us;
+ double t_sr_wm_us;
+ double t_extra_us;
+ double mem_trip_us;
+ double t_srx_delay_us;
+ double deepsleep_dcfclk_mhz;
+ double total_flip_bw;
+ unsigned int total_flip_bytes;
+};
+
+struct _vcs_dpi_display_dlg_prefetch_param_st {
+ double prefetch_bw;
+ unsigned int flip_bytes;
+};
+
+struct _vcs_dpi_display_pipe_clock_st {
+ double dcfclk_mhz;
+ double dispclk_mhz;
+ double socclk_mhz;
+ double dscclk_mhz[6];
+ double dppclk_mhz[6];
+};
+
+struct _vcs_dpi_display_arb_params_st {
+ int max_req_outstanding;
+ int min_req_outstanding;
+ int sat_level_us;
+};
+
+#endif /*__DISPLAY_MODE_STRUCTS_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
new file mode 100644
index 000000000000..ea661ee44674
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -0,0 +1,6124 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_mode_lib.h"
+#include "display_mode_vba.h"
+
+#include "dml_inline_defs.h"
+
+static const unsigned int NumberOfStates = DC__VOLTAGE_STATES;
+
+static void fetch_socbb_params(struct display_mode_lib *mode_lib);
+static void fetch_ip_params(struct display_mode_lib *mode_lib);
+static void fetch_pipe_params(struct display_mode_lib *mode_lib);
+static void recalculate_params(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+static void recalculate(struct display_mode_lib *mode_lib);
+static double adjust_ReturnBW(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ bool DCCEnabledAnyPlane,
+ double ReturnBandwidthToDCN);
+static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib);
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat);
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
+// Super monster function with some 45 argument
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ double DCFClkDeepSleep,
+ unsigned int DSCDelay,
+ unsigned int DPPPerPlane,
+ bool ScalerEnabled,
+ unsigned int NumberOfCursors,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int VBlank,
+ unsigned int HTotal,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int PageTableLevels,
+ bool VirtualMemoryEnable,
+ bool DynamicMetadataEnable,
+ unsigned int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool InterlaceEnable,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBW,
+ unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ unsigned int *VUpdateOffsetPix,
+ unsigned int *VUpdateWidthPix,
+ unsigned int *VReadyOffsetPix);
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath);
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidthY,
+ bool VirtualMemoryEnable,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int PDEProcessingBufIn64KBReqs,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_height,
+ unsigned int *meta_row_height);
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime);
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk);
+static double CalculateWriteBackDISPCLK(
+ enum source_format_class WritebackPixelFormat,
+ double PixelClock,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ double WritebackDestinationWidth,
+ unsigned int HTotal,
+ unsigned int WritebackChromaLineBufferWidth);
+static void CalculateActiveRowBandwidth(
+ bool VirtualMemoryEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw,
+ double *qual_row_bw);
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int MaxPageTableLevels,
+ bool VirtualMemoryEnable,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int ImmediateFlipBytes,
+ double LineTime,
+ double Tno_bw,
+ double VRatio,
+ double PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ double qual_row_bw,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe);
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth);
+static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib);
+static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp);
+static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
+
+void set_prefetch_mode(
+ struct display_mode_lib *mode_lib,
+ bool cstate_en,
+ bool pstate_en,
+ bool ignore_viewport_pos,
+ bool immediate_flip_support)
+{
+ unsigned int prefetch_mode;
+
+ if (cstate_en && pstate_en)
+ prefetch_mode = 0;
+ else if (cstate_en)
+ prefetch_mode = 1;
+ else
+ prefetch_mode = 2;
+ if (prefetch_mode != mode_lib->vba.PrefetchMode
+ || ignore_viewport_pos != mode_lib->vba.IgnoreViewportPositioning
+ || immediate_flip_support != mode_lib->vba.ImmediateFlipSupport) {
+ DTRACE(
+ " Prefetch mode has changed from %i to %i. Recalculating.",
+ prefetch_mode,
+ mode_lib->vba.PrefetchMode);
+ mode_lib->vba.PrefetchMode = prefetch_mode;
+ mode_lib->vba.IgnoreViewportPositioning = ignore_viewport_pos;
+ mode_lib->vba.ImmediateFlipSupport = immediate_flip_support;
+ recalculate(mode_lib);
+ }
+}
+
+unsigned int dml_get_voltage_level(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ bool need_recalculate = memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
+ || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
+ || num_pipes != mode_lib->vba.cache_num_pipes
+ || memcmp(pipes, mode_lib->vba.cache_pipes,
+ sizeof(display_e2e_pipe_params_st) * num_pipes) != 0;
+
+ mode_lib->vba.soc = mode_lib->soc;
+ mode_lib->vba.ip = mode_lib->ip;
+ memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
+ mode_lib->vba.cache_num_pipes = num_pipes;
+
+ if (need_recalculate && pipes[0].clks_cfg.dppclk_mhz != 0)
+ recalculate(mode_lib);
+ else {
+ fetch_socbb_params(mode_lib);
+ fetch_ip_params(mode_lib);
+ fetch_pipe_params(mode_lib);
+ }
+ ModeSupportAndSystemConfigurationFull(mode_lib);
+
+ return mode_lib->vba.VoltageLevel;
+}
+
+#define dml_get_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes) \
+{ \
+ recalculate_params(mode_lib, pipes, num_pipes); \
+ return var; \
+}
+
+dml_get_attr_func(clk_dcf_deepsleep, mode_lib->vba.DCFClkDeepSleep);
+dml_get_attr_func(wm_urgent, mode_lib->vba.UrgentWatermark);
+dml_get_attr_func(wm_memory_trip, mode_lib->vba.MemoryTripWatermark);
+dml_get_attr_func(wm_writeback_urgent, mode_lib->vba.WritebackUrgentWatermark);
+dml_get_attr_func(wm_stutter_exit, mode_lib->vba.StutterExitWatermark);
+dml_get_attr_func(wm_stutter_enter_exit, mode_lib->vba.StutterEnterPlusExitWatermark);
+dml_get_attr_func(wm_dram_clock_change, mode_lib->vba.DRAMClockChangeWatermark);
+dml_get_attr_func(wm_writeback_dram_clock_change, mode_lib->vba.WritebackDRAMClockChangeWatermark);
+dml_get_attr_func(wm_xfc_underflow, mode_lib->vba.UrgentWatermark); // xfc_underflow maps to urgent
+dml_get_attr_func(stutter_efficiency, mode_lib->vba.StutterEfficiency);
+dml_get_attr_func(stutter_efficiency_no_vblank, mode_lib->vba.StutterEfficiencyNotIncludingVBlank);
+dml_get_attr_func(urgent_latency, mode_lib->vba.MinUrgentLatencySupportUs);
+dml_get_attr_func(urgent_extra_latency, mode_lib->vba.UrgentExtraLatency);
+dml_get_attr_func(nonurgent_latency, mode_lib->vba.NonUrgentLatencyTolerance);
+dml_get_attr_func(
+ dram_clock_change_latency,
+ mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
+dml_get_attr_func(dispclk_calculated, mode_lib->vba.DISPCLK_calculated);
+dml_get_attr_func(total_data_read_bw, mode_lib->vba.TotalDataReadBandwidth);
+dml_get_attr_func(return_bw, mode_lib->vba.ReturnBW);
+dml_get_attr_func(tcalc, mode_lib->vba.TCalc);
+
+#define dml_get_pipe_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe) \
+{\
+ unsigned int which_plane; \
+ recalculate_params(mode_lib, pipes, num_pipes); \
+ which_plane = mode_lib->vba.pipe_plane[which_pipe]; \
+ return var[which_plane]; \
+}
+
+dml_get_pipe_attr_func(dsc_delay, mode_lib->vba.DSCDelay);
+dml_get_pipe_attr_func(dppclk_calculated, mode_lib->vba.DPPCLK_calculated);
+dml_get_pipe_attr_func(dscclk_calculated, mode_lib->vba.DSCCLK_calculated);
+dml_get_pipe_attr_func(min_ttu_vblank, mode_lib->vba.MinTTUVBlank);
+dml_get_pipe_attr_func(vratio_prefetch_l, mode_lib->vba.VRatioPrefetchY);
+dml_get_pipe_attr_func(vratio_prefetch_c, mode_lib->vba.VRatioPrefetchC);
+dml_get_pipe_attr_func(dst_x_after_scaler, mode_lib->vba.DSTXAfterScaler);
+dml_get_pipe_attr_func(dst_y_after_scaler, mode_lib->vba.DSTYAfterScaler);
+dml_get_pipe_attr_func(dst_y_per_vm_vblank, mode_lib->vba.DestinationLinesToRequestVMInVBlank);
+dml_get_pipe_attr_func(dst_y_per_row_vblank, mode_lib->vba.DestinationLinesToRequestRowInVBlank);
+dml_get_pipe_attr_func(dst_y_prefetch, mode_lib->vba.DestinationLinesForPrefetch);
+dml_get_pipe_attr_func(dst_y_per_vm_flip, mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip);
+dml_get_pipe_attr_func(
+ dst_y_per_row_flip,
+ mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip);
+
+dml_get_pipe_attr_func(xfc_transfer_delay, mode_lib->vba.XFCTransferDelay);
+dml_get_pipe_attr_func(xfc_precharge_delay, mode_lib->vba.XFCPrechargeDelay);
+dml_get_pipe_attr_func(xfc_remote_surface_flip_latency, mode_lib->vba.XFCRemoteSurfaceFlipLatency);
+dml_get_pipe_attr_func(xfc_prefetch_margin, mode_lib->vba.XFCPrefetchMargin);
+
+unsigned int get_vstartup_calculated(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes,
+ unsigned int which_pipe)
+{
+ unsigned int which_plane;
+
+ recalculate_params(mode_lib, pipes, num_pipes);
+ which_plane = mode_lib->vba.pipe_plane[which_pipe];
+ return mode_lib->vba.VStartup[which_plane];
+}
+
+double get_total_immediate_flip_bytes(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ recalculate_params(mode_lib, pipes, num_pipes);
+ return mode_lib->vba.TotImmediateFlipBytes;
+}
+
+double get_total_immediate_flip_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ recalculate_params(mode_lib, pipes, num_pipes);
+ return mode_lib->vba.ImmediateFlipBW;
+}
+
+double get_total_prefetch_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ unsigned int k;
+ double total_prefetch_bw = 0.0;
+
+ recalculate_params(mode_lib, pipes, num_pipes);
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ total_prefetch_bw += mode_lib->vba.PrefetchBandwidth[k];
+ return total_prefetch_bw;
+}
+
+static void fetch_socbb_params(struct display_mode_lib *mode_lib)
+{
+ soc_bounding_box_st *soc = &mode_lib->vba.soc;
+ unsigned int i;
+
+ // SOC Bounding Box Parameters
+ mode_lib->vba.ReturnBusWidth = soc->return_bus_width_bytes;
+ mode_lib->vba.NumberOfChannels = soc->num_chans;
+ mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency =
+ soc->ideal_dram_bw_after_urgent_percent; // there's always that one bastard variable that's so long it throws everything out of alignment!
+ mode_lib->vba.UrgentLatency = soc->urgent_latency_us;
+ mode_lib->vba.RoundTripPingLatencyCycles = soc->round_trip_ping_latency_dcfclk_cycles;
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannel =
+ soc->urgent_out_of_order_return_per_channel_bytes;
+ mode_lib->vba.WritebackLatency = soc->writeback_latency_us;
+ mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
+ mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
+ mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
+ mode_lib->vba.Downspreading = soc->downspread_percent;
+ mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
+ mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new
+ mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
+ // Set the voltage scaling clocks as the defaults. Most of these will
+ // be set to different values by the test
+ for (i = 0; i < DC__VOLTAGE_STATES; i++)
+ if (soc->clock_limits[i].state == mode_lib->vba.VoltageLevel)
+ break;
+
+ mode_lib->vba.DCFCLK = soc->clock_limits[i].dcfclk_mhz;
+ mode_lib->vba.SOCCLK = soc->clock_limits[i].socclk_mhz;
+ mode_lib->vba.DRAMSpeed = soc->clock_limits[i].dram_speed_mhz;
+ mode_lib->vba.FabricClock = soc->clock_limits[i].fabricclk_mhz;
+
+ mode_lib->vba.XFCBusTransportTime = soc->xfc_bus_transport_time_us;
+ mode_lib->vba.XFCXBUFLatencyTolerance = soc->xfc_xbuf_latency_tolerance_us;
+
+ mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp = false;
+ mode_lib->vba.MaxHSCLRatio = 4;
+ mode_lib->vba.MaxVSCLRatio = 4;
+ mode_lib->vba.MaxNumWriteback = 0; /*TODO*/
+ mode_lib->vba.WritebackLumaAndChromaScalingSupported = true;
+ mode_lib->vba.Cursor64BppSupport = true;
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.DCFCLKPerState[i] = soc->clock_limits[i].dcfclk_mhz;
+ mode_lib->vba.FabricClockPerState[i] = soc->clock_limits[i].fabricclk_mhz;
+ mode_lib->vba.SOCCLKPerState[i] = soc->clock_limits[i].socclk_mhz;
+ mode_lib->vba.PHYCLKPerState[i] = soc->clock_limits[i].phyclk_mhz;
+ mode_lib->vba.MaxDppclk[i] = soc->clock_limits[i].dppclk_mhz;
+ mode_lib->vba.MaxDSCCLK[i] = soc->clock_limits[i].dscclk_mhz;
+ mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
+ mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
+ }
+}
+
+static void fetch_ip_params(struct display_mode_lib *mode_lib)
+{
+ ip_params_st *ip = &mode_lib->vba.ip;
+
+ // IP Parameters
+ mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
+ mode_lib->vba.MaxNumOTG = ip->max_num_otg;
+ mode_lib->vba.CursorChunkSize = ip->cursor_chunk_size;
+ mode_lib->vba.CursorBufferSize = ip->cursor_buffer_size;
+
+ mode_lib->vba.MaxDCHUBToPSCLThroughput = ip->max_dchub_pscl_bw_pix_per_clk;
+ mode_lib->vba.MaxPSCLToLBThroughput = ip->max_pscl_lb_bw_pix_per_clk;
+ mode_lib->vba.ROBBufferSizeInKByte = ip->rob_buffer_size_kbytes;
+ mode_lib->vba.DETBufferSizeInKByte = ip->det_buffer_size_kbytes;
+ mode_lib->vba.PixelChunkSizeInKByte = ip->pixel_chunk_size_kbytes;
+ mode_lib->vba.MetaChunkSize = ip->meta_chunk_size_kbytes;
+ mode_lib->vba.PTEChunkSize = ip->pte_chunk_size_kbytes;
+ mode_lib->vba.WritebackChunkSize = ip->writeback_chunk_size_kbytes;
+ mode_lib->vba.LineBufferSize = ip->line_buffer_size_bits;
+ mode_lib->vba.MaxLineBufferLines = ip->max_line_buffer_lines;
+ mode_lib->vba.PTEBufferSizeInRequests = ip->dpte_buffer_size_in_pte_reqs;
+ mode_lib->vba.DPPOutputBufferPixels = ip->dpp_output_buffer_pixels;
+ mode_lib->vba.OPPOutputBufferLines = ip->opp_output_buffer_lines;
+ mode_lib->vba.WritebackInterfaceLumaBufferSize = ip->writeback_luma_buffer_size_kbytes;
+ mode_lib->vba.WritebackInterfaceChromaBufferSize = ip->writeback_chroma_buffer_size_kbytes;
+ mode_lib->vba.WritebackChromaLineBufferWidth =
+ ip->writeback_chroma_line_buffer_width_pixels;
+ mode_lib->vba.MaxPageTableLevels = ip->max_page_table_levels;
+ mode_lib->vba.MaxInterDCNTileRepeaters = ip->max_inter_dcn_tile_repeaters;
+ mode_lib->vba.NumberOfDSC = ip->num_dsc;
+ mode_lib->vba.ODMCapability = ip->odm_capable;
+ mode_lib->vba.DISPCLKRampingMargin = ip->dispclk_ramp_margin_percent;
+
+ mode_lib->vba.XFCSupported = ip->xfc_supported;
+ mode_lib->vba.XFCFillBWOverhead = ip->xfc_fill_bw_overhead_percent;
+ mode_lib->vba.XFCFillConstant = ip->xfc_fill_constant_bytes;
+ mode_lib->vba.DPPCLKDelaySubtotal = ip->dppclk_delay_subtotal;
+ mode_lib->vba.DPPCLKDelaySCL = ip->dppclk_delay_scl;
+ mode_lib->vba.DPPCLKDelaySCLLBOnly = ip->dppclk_delay_scl_lb_only;
+ mode_lib->vba.DPPCLKDelayCNVCFormater = ip->dppclk_delay_cnvc_formatter;
+ mode_lib->vba.DPPCLKDelayCNVCCursor = ip->dppclk_delay_cnvc_cursor;
+ mode_lib->vba.DISPCLKDelaySubtotal = ip->dispclk_delay_subtotal;
+
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP = ip->ptoi_supported;
+
+ mode_lib->vba.PDEProcessingBufIn64KBReqs = ip->pde_proc_buffer_size_64k_reqs;
+}
+
+static void fetch_pipe_params(struct display_mode_lib *mode_lib)
+{
+ display_e2e_pipe_params_st *pipes = mode_lib->vba.cache_pipes;
+ ip_params_st *ip = &mode_lib->vba.ip;
+
+ unsigned int OTGInstPlane[DC__NUM_DPP__MAX];
+ unsigned int j, k;
+ bool PlaneVisited[DC__NUM_DPP__MAX];
+ bool visited[DC__NUM_DPP__MAX];
+
+ // Convert Pipes to Planes
+ for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k)
+ visited[k] = false;
+
+ mode_lib->vba.NumberOfActivePlanes = 0;
+ for (j = 0; j < mode_lib->vba.cache_num_pipes; ++j) {
+ display_pipe_source_params_st *src = &pipes[j].pipe.src;
+ display_pipe_dest_params_st *dst = &pipes[j].pipe.dest;
+ scaler_ratio_depth_st *scl = &pipes[j].pipe.scale_ratio_depth;
+ scaler_taps_st *taps = &pipes[j].pipe.scale_taps;
+ display_output_params_st *dout = &pipes[j].dout;
+ display_clocks_and_cfg_st *clks = &pipes[j].clks_cfg;
+
+ if (visited[j])
+ continue;
+ visited[j] = true;
+
+ mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
+
+ mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
+ mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
+ (enum scan_direction_class) (src->source_scan);
+ mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_width;
+ mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_height;
+ mode_lib->vba.ViewportYStartY[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_y_y;
+ mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_y_c;
+ mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
+ mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
+ mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
+ mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
+ mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] = scl->vscl_ratio;
+ mode_lib->vba.ScalerEnabled[mode_lib->vba.NumberOfActivePlanes] = scl->scl_enable;
+ mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes] = dst->interlaced;
+ if (mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes])
+ mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] *= 2.0;
+ mode_lib->vba.htaps[mode_lib->vba.NumberOfActivePlanes] = taps->htaps;
+ mode_lib->vba.vtaps[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps;
+ mode_lib->vba.HTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->htaps_c;
+ mode_lib->vba.VTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps_c;
+ mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
+ mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
+ mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
+ src->dcc_use_global ?
+ ip->dcc_supported : src->dcc && ip->dcc_supported;
+ mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
+ mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
+ (enum source_format_class) (src->source_format);
+ mode_lib->vba.HActive[mode_lib->vba.NumberOfActivePlanes] = dst->hactive;
+ mode_lib->vba.VActive[mode_lib->vba.NumberOfActivePlanes] = dst->vactive;
+ mode_lib->vba.SurfaceTiling[mode_lib->vba.NumberOfActivePlanes] =
+ (enum dm_swizzle_mode) (src->sw_mode);
+ mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] =
+ dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
+ mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
+ dst->odm_combine;
+ mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
+ (enum output_format_class) (dout->output_format);
+ mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
+ (enum output_encoder_class) (dout->output_type);
+ mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
+ mode_lib->vba.OutputLinkDPLanes[mode_lib->vba.NumberOfActivePlanes] =
+ dout->dp_lanes;
+ mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
+ mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
+ dout->dsc_slices;
+ mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+ dout->output_bpc == 0 ? 12 : dout->output_bpc;
+ mode_lib->vba.WritebackEnable[mode_lib->vba.NumberOfActivePlanes] = dout->wb_enable;
+ mode_lib->vba.WritebackSourceHeight[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_src_height;
+ mode_lib->vba.WritebackDestinationWidth[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_dst_width;
+ mode_lib->vba.WritebackDestinationHeight[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_dst_height;
+ mode_lib->vba.WritebackPixelFormat[mode_lib->vba.NumberOfActivePlanes] =
+ (enum source_format_class) (dout->wb.wb_pixel_format);
+ mode_lib->vba.WritebackLumaHTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_htaps_luma;
+ mode_lib->vba.WritebackLumaVTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vtaps_luma;
+ mode_lib->vba.WritebackChromaHTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_htaps_chroma;
+ mode_lib->vba.WritebackChromaVTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vtaps_chroma;
+ mode_lib->vba.WritebackHRatio[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_hratio;
+ mode_lib->vba.WritebackVRatio[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vratio;
+
+ mode_lib->vba.DynamicMetadataEnable[mode_lib->vba.NumberOfActivePlanes] =
+ src->dynamic_metadata_enable;
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[mode_lib->vba.NumberOfActivePlanes] =
+ src->dynamic_metadata_lines_before_active;
+ mode_lib->vba.DynamicMetadataTransmittedBytes[mode_lib->vba.NumberOfActivePlanes] =
+ src->dynamic_metadata_xmit_bytes;
+
+ mode_lib->vba.XFCEnabled[mode_lib->vba.NumberOfActivePlanes] = src->xfc_enable
+ && ip->xfc_supported;
+ mode_lib->vba.XFCSlvChunkSize = src->xfc_params.xfc_slv_chunk_size_bytes;
+ mode_lib->vba.XFCTSlvVupdateOffset = src->xfc_params.xfc_tslv_vupdate_offset_us;
+ mode_lib->vba.XFCTSlvVupdateWidth = src->xfc_params.xfc_tslv_vupdate_width_us;
+ mode_lib->vba.XFCTSlvVreadyOffset = src->xfc_params.xfc_tslv_vready_offset_us;
+ mode_lib->vba.PixelClock[mode_lib->vba.NumberOfActivePlanes] = dst->pixel_rate_mhz;
+ mode_lib->vba.DPPCLK[mode_lib->vba.NumberOfActivePlanes] = clks->dppclk_mhz;
+ if (ip->is_line_buffer_bpp_fixed)
+ mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] =
+ ip->line_buffer_fixed_bpp;
+ else {
+ unsigned int lb_depth;
+
+ switch (scl->lb_depth) {
+ case dm_lb_6:
+ lb_depth = 18;
+ break;
+ case dm_lb_8:
+ lb_depth = 24;
+ break;
+ case dm_lb_10:
+ lb_depth = 30;
+ break;
+ case dm_lb_12:
+ lb_depth = 36;
+ break;
+ case dm_lb_16:
+ lb_depth = 48;
+ break;
+ default:
+ lb_depth = 36;
+ }
+ mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] = lb_depth;
+ }
+ mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes] = 0;
+ // The DML spreadsheet assumes that the two cursors utilize the same amount of bandwidth. We'll
+ // calculate things a little more accurately
+ for (k = 0; k < DC__NUM_CURSOR__MAX; ++k) {
+ switch (k) {
+ case 0:
+ mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][0] =
+ CursorBppEnumToBits(
+ (enum cursor_bpp) (src->cur0_bpp));
+ mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][0] =
+ src->cur0_src_width;
+ if (src->cur0_src_width > 0)
+ mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
+ break;
+ case 1:
+ mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][1] =
+ CursorBppEnumToBits(
+ (enum cursor_bpp) (src->cur1_bpp));
+ mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][1] =
+ src->cur1_src_width;
+ if (src->cur1_src_width > 0)
+ mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
+ break;
+ default:
+ dml_print(
+ "ERROR: Number of cursors specified exceeds supported maximum\n")
+ ;
+ }
+ }
+
+ OTGInstPlane[mode_lib->vba.NumberOfActivePlanes] = dst->otg_inst;
+
+ if (dst->odm_combine && !src->is_hsplit)
+ dml_print(
+ "ERROR: ODM Combine is specified but is_hsplit has not be specified for pipe %i\n",
+ j);
+
+ if (src->is_hsplit) {
+ for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
+ display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
+ display_output_params_st *dout_k = &pipes[k].dout;
+
+ if (src_k->is_hsplit && !visited[k]
+ && src->hsplit_grp == src_k->hsplit_grp) {
+ mode_lib->vba.pipe_plane[k] =
+ mode_lib->vba.NumberOfActivePlanes;
+ mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
+ if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
+ == dm_horz)
+ mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_width;
+ else
+ mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_height;
+
+ mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
+ dout_k->dsc_slices;
+ visited[k] = true;
+ }
+ }
+ }
+
+ mode_lib->vba.NumberOfActivePlanes++;
+ }
+
+ // handle overlays through dml_ml->vba.BlendingAndTiming
+ // dml_ml->vba.BlendingAndTiming tells you which instance to look at to get timing, the so called 'master'
+
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ PlaneVisited[j] = false;
+
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ for (k = j + 1; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!PlaneVisited[k] && OTGInstPlane[j] == OTGInstPlane[k]) {
+ // doesn't matter, so choose the smaller one
+ mode_lib->vba.BlendingAndTiming[j] = j;
+ PlaneVisited[j] = true;
+ mode_lib->vba.BlendingAndTiming[k] = j;
+ PlaneVisited[k] = true;
+ }
+ }
+
+ if (!PlaneVisited[j]) {
+ mode_lib->vba.BlendingAndTiming[j] = j;
+ PlaneVisited[j] = true;
+ }
+ }
+
+ // TODO: dml_ml->vba.ODMCombineEnabled => 2 * dml_ml->vba.DPPPerPlane...actually maybe not since all pipes are specified
+ // Do we want the dscclk to automatically be halved? Guess not since the value is specified
+
+ mode_lib->vba.SynchronizedVBlank = pipes[0].pipe.dest.synchronized_vblank_all_planes;
+ for (k = 1; k < mode_lib->vba.cache_num_pipes; ++k)
+ ASSERT(mode_lib->vba.SynchronizedVBlank == pipes[k].pipe.dest.synchronized_vblank_all_planes);
+
+ mode_lib->vba.VirtualMemoryEnable = false;
+ mode_lib->vba.OverridePageTableLevels = 0;
+
+ for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k) {
+ mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable
+ || !!pipes[k].pipe.src.vm;
+ mode_lib->vba.OverridePageTableLevels =
+ (pipes[k].pipe.src.vm_levels_force_en
+ && mode_lib->vba.OverridePageTableLevels
+ < pipes[k].pipe.src.vm_levels_force) ?
+ pipes[k].pipe.src.vm_levels_force :
+ mode_lib->vba.OverridePageTableLevels;
+ }
+
+ if (mode_lib->vba.OverridePageTableLevels)
+ mode_lib->vba.MaxPageTableLevels = mode_lib->vba.OverridePageTableLevels;
+
+ mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable && !!ip->pte_enable;
+
+ mode_lib->vba.FabricAndDRAMBandwidth = dml_min(
+ mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels
+ * mode_lib->vba.DRAMChannelWidth,
+ mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn)
+ / 1000.0;
+
+ // TODO: Must be consistent across all pipes
+ // DCCProgrammingAssumesScanDirectionUnknown = src.dcc_scan_dir_unknown;
+}
+
+static void recalculate(struct display_mode_lib *mode_lib)
+{
+ ModeSupportAndSystemConfiguration(mode_lib);
+ PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
+ DisplayPipeConfiguration(mode_lib);
+ DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
+}
+
+// in wm mode we pull the parameters needed from the display_e2e_pipe_params_st structs
+// rather than working them out as in recalculate_ms
+static void recalculate_params(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ // This is only safe to use memcmp because there are non-POD types in struct display_mode_lib
+ if (memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
+ || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
+ || num_pipes != mode_lib->vba.cache_num_pipes
+ || memcmp(
+ pipes,
+ mode_lib->vba.cache_pipes,
+ sizeof(display_e2e_pipe_params_st) * num_pipes) != 0) {
+ mode_lib->vba.soc = mode_lib->soc;
+ mode_lib->vba.ip = mode_lib->ip;
+ memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
+ mode_lib->vba.cache_num_pipes = num_pipes;
+ recalculate(mode_lib);
+ }
+}
+
+static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
+{
+ soc_bounding_box_st *soc = &mode_lib->vba.soc;
+ unsigned int i, k;
+ unsigned int total_pipes = 0;
+
+ mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
+ for (i = 1; i < mode_lib->vba.cache_num_pipes; ++i)
+ ASSERT(mode_lib->vba.VoltageLevel == -1 || mode_lib->vba.VoltageLevel == mode_lib->vba.cache_pipes[i].clks_cfg.voltage);
+
+ mode_lib->vba.DCFCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dcfclk_mhz;
+ mode_lib->vba.SOCCLK = mode_lib->vba.cache_pipes[0].clks_cfg.socclk_mhz;
+
+ if (mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz > 0.0)
+ mode_lib->vba.DISPCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz;
+ else
+ mode_lib->vba.DISPCLK = soc->clock_limits[mode_lib->vba.VoltageLevel].dispclk_mhz;
+
+ fetch_socbb_params(mode_lib);
+ fetch_ip_params(mode_lib);
+ fetch_pipe_params(mode_lib);
+
+ // Total Available Pipes Support Check
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ total_pipes += mode_lib->vba.DPPPerPlane[k];
+ ASSERT(total_pipes <= DC__NUM_DPP__MAX);
+}
+
+static double adjust_ReturnBW(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ bool DCCEnabledAnyPlane,
+ double ReturnBandwidthToDCN)
+{
+ double CriticalCompression;
+
+ if (DCCEnabledAnyPlane
+ && ReturnBandwidthToDCN
+ > mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0)
+ ReturnBW =
+ dml_min(
+ ReturnBW,
+ ReturnBandwidthToDCN * 4
+ * (1.0
+ - mode_lib->vba.UrgentLatency
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024
+ / ReturnBandwidthToDCN
+ - mode_lib->vba.DCFCLK
+ * mode_lib->vba.ReturnBusWidth
+ / 4)
+ + mode_lib->vba.UrgentLatency));
+
+ CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK
+ * mode_lib->vba.UrgentLatency
+ / (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024);
+
+ if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0)
+ ReturnBW =
+ dml_min(
+ ReturnBW,
+ 4.0 * ReturnBandwidthToDCN
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLK
+ * mode_lib->vba.UrgentLatency
+ / dml_pow(
+ (ReturnBandwidthToDCN
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024),
+ 2));
+
+ return ReturnBW;
+}
+
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat)
+{
+ // valid bpc = source bits per component in the set of {8, 10, 12}
+ // valid bpp = increments of 1/16 of a bit
+ // min = 6/7/8 in N420/N422/444, respectively
+ // max = such that compression is 1:1
+ //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
+ //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
+ //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
+
+ // fixed value
+ unsigned int rcModelSize = 8192;
+
+ // N422/N420 operate at 2 pixels per clock
+ unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
+ Delay, pixels;
+
+ if (pixelFormat == dm_n422 || pixelFormat == dm_420)
+ pixelsPerClock = 2;
+ // #all other modes operate at 1 pixel per clock
+ else
+ pixelsPerClock = 1;
+
+ //initial transmit delay as per PPS
+ initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
+
+ //compute ssm delay
+ if (bpc == 8)
+ D = 81;
+ else if (bpc == 10)
+ D = 89;
+ else
+ D = 113;
+
+ //divide by pixel per cycle to compute slice width as seen by DSC
+ w = sliceWidth / pixelsPerClock;
+
+ //422 mode has an additional cycle of delay
+ if (pixelFormat == dm_s422)
+ s = 1;
+ else
+ s = 0;
+
+ //main calculation for the dscce
+ ix = initalXmitDelay + 45;
+ wx = (w + 2) / 3;
+ p = 3 * wx - w;
+ l0 = ix / w;
+ a = ix + p * l0;
+ ax = (a + 2) / 3 + D + 6 + 1;
+ l = (ax + wx - 1) / wx;
+ if ((ix % w) == 0 && p != 0)
+ lstall = 1;
+ else
+ lstall = 0;
+ Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22;
+
+ //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
+ pixels = Delay * 3 * pixelsPerClock;
+ return pixels;
+}
+
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
+{
+ unsigned int Delay = 0;
+
+ if (pixelFormat == dm_420) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 2;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 13;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 3;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else if (pixelFormat == dm_n422) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 1;
+ // dscc - input deserializer
+ Delay = Delay + 5;
+ // dscc - input cdc fifo
+ Delay = Delay + 25;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 10;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // sft
+ Delay = Delay + 1;
+ }
+
+ return Delay;
+}
+
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ double DCFClkDeepSleep,
+ unsigned int DSCDelay,
+ unsigned int DPPPerPlane,
+ bool ScalerEnabled,
+ unsigned int NumberOfCursors,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int VBlank,
+ unsigned int HTotal,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int PageTableLevels,
+ bool VirtualMemoryEnable,
+ bool DynamicMetadataEnable,
+ unsigned int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool InterlaceEnable,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBW,
+ unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ unsigned int *VUpdateOffsetPix,
+ unsigned int *VUpdateWidthPix,
+ unsigned int *VReadyOffsetPix)
+{
+ bool MyError = false;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
+ double Tdm, LineTime, Tsetup;
+ double dst_y_prefetch_equ;
+ double Tsw_oto;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tpre_oto;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+
+ if (ScalerEnabled)
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
+ else
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
+
+ DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor;
+
+ DISPCLKCycles = DISPCLKDelaySubtotal;
+
+ if (DPPCLK == 0.0 || DISPCLK == 0.0)
+ return true;
+
+ *DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK
+ + DSCDelay;
+
+ if (DPPPerPlane > 1)
+ *DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
+
+ if (OutputFormat == dm_420 || (InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
+ *DSTYAfterScaler = 1;
+ else
+ *DSTYAfterScaler = 0;
+
+ DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal));
+
+ *VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
+ TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK);
+ *VUpdateWidthPix = (14.0 / DCFClkDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime)
+ * PixelClock;
+
+ *VReadyOffsetPix = dml_max(
+ 150.0 / DPPCLK,
+ TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / DPPCLK)
+ * PixelClock;
+
+ Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
+
+ LineTime = (double) HTotal / PixelClock;
+
+ if (DynamicMetadataEnable) {
+ double Tdmbf, Tdmec, Tdmsks;
+
+ Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
+ Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
+ Tdmec = LineTime;
+ if (DynamicMetadataLinesBeforeActiveRequired == 0)
+ Tdmsks = VBlank * LineTime / 2.0;
+ else
+ Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
+ if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
+ Tdmsks = Tdmsks / 2;
+ if (VStartup * LineTime
+ < Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
+ MyError = true;
+ *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = (Tsetup + TWait
+ + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) / LineTime;
+ } else
+ *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = 0.0;
+ } else
+ Tdm = 0;
+
+ if (VirtualMemoryEnable) {
+ if (PageTableLevels == 4)
+ *Tno_bw = UrgentExtraLatency + UrgentLatency;
+ else if (PageTableLevels == 3)
+ *Tno_bw = UrgentExtraLatency;
+ else
+ *Tno_bw = 0;
+ } else if (DCCEnable)
+ *Tno_bw = LineTime;
+ else
+ *Tno_bw = LineTime / 4;
+
+ dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
+ - (Tsetup + Tdm) / LineTime
+ - (*DSTYAfterScaler + *DSTXAfterScaler / HTotal);
+
+ Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
+
+ prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow
+ + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ + PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2))
+ / Tsw_oto;
+
+ if (VirtualMemoryEnable == true) {
+ Tvm_oto =
+ dml_max(
+ *Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatency
+ * (PageTableLevels
+ - 1),
+ LineTime / 4.0));
+ } else
+ Tvm_oto = LineTime / 4.0;
+
+ if ((VirtualMemoryEnable == true || DCCEnable == true)) {
+ Tr0_oto = dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto,
+ dml_max(UrgentLatency, dml_max(LineTime - Tvm_oto, LineTime / 4)));
+ } else
+ Tr0_oto = LineTime - Tvm_oto;
+
+ Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto;
+
+ dst_y_prefetch_oto = Tpre_oto / LineTime;
+
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ)
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ else
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+
+ *DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1)
+ / 4;
+
+ dml_print("DML: VStartup: %d\n", VStartup);
+ dml_print("DML: TCalc: %f\n", TCalc);
+ dml_print("DML: TWait: %f\n", TWait);
+ dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
+ dml_print("DML: LineTime: %f\n", LineTime);
+ dml_print("DML: Tsetup: %f\n", Tsetup);
+ dml_print("DML: Tdm: %f\n", Tdm);
+ dml_print("DML: DSTYAfterScaler: %f\n", *DSTYAfterScaler);
+ dml_print("DML: DSTXAfterScaler: %f\n", *DSTXAfterScaler);
+ dml_print("DML: HTotal: %d\n", HTotal);
+
+ *PrefetchBandwidth = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ if (*DestinationLinesForPrefetch > 1) {
+ *PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte
+ + 2 * PixelPTEBytesPerRow
+ + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ + PrefetchSourceLinesC * SwathWidthY / 2
+ * dml_ceil(BytePerPixelDETC, 2))
+ / (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
+ if (VirtualMemoryEnable) {
+ TimeForFetchingMetaPTE =
+ dml_max(
+ *Tno_bw
+ + (double) PDEAndMetaPTEBytesFrame
+ / *PrefetchBandwidth,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatency
+ * (PageTableLevels
+ - 1),
+ LineTime / 4));
+ } else {
+ if (NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingMetaPTE = LineTime / 4;
+ else
+ TimeForFetchingMetaPTE = 0.0;
+ }
+
+ if ((VirtualMemoryEnable == true || DCCEnable == true)) {
+ TimeForFetchingRowInVBlank =
+ dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / *PrefetchBandwidth,
+ dml_max(
+ UrgentLatency,
+ dml_max(
+ LineTime
+ - TimeForFetchingMetaPTE,
+ LineTime
+ / 4.0)));
+ } else {
+ if (NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE;
+ else
+ TimeForFetchingRowInVBlank = 0.0;
+ }
+
+ *DestinationLinesToRequestVMInVBlank = dml_floor(
+ 4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125),
+ 1) / 4.0;
+
+ *DestinationLinesToRequestRowInVBlank = dml_floor(
+ 4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125),
+ 1) / 4.0;
+
+ LinesToRequestPrefetchPixelData =
+ *DestinationLinesForPrefetch
+ - ((NumberOfCursors > 0 || VirtualMemoryEnable
+ || DCCEnable) ?
+ (*DestinationLinesToRequestVMInVBlank
+ + *DestinationLinesToRequestRowInVBlank) :
+ 0.0);
+
+ if (LinesToRequestPrefetchPixelData > 0) {
+
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ *VRatioPrefetchY =
+ dml_max(
+ (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData,
+ (double) MaxNumSwathY
+ * SwathHeightY
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillY
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ }
+ }
+
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+
+ if ((SwathHeightC > 4)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ *VRatioPrefetchC =
+ dml_max(
+ *VRatioPrefetchC,
+ (double) MaxNumSwathC
+ * SwathHeightC
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillC
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchC = 0;
+ }
+ }
+
+ *RequiredPrefetchPixDataBW =
+ DPPPerPlane
+ * ((double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData
+ * dml_ceil(
+ BytePerPixelDETY,
+ 1)
+ + (double) PrefetchSourceLinesC
+ / LinesToRequestPrefetchPixelData
+ * dml_ceil(
+ BytePerPixelDETC,
+ 2)
+ / 2)
+ * SwathWidthY / LineTime;
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ }
+
+ } else {
+ MyError = true;
+ }
+
+ if (MyError) {
+ *PrefetchBandwidth = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *DestinationLinesForPrefetch = 0;
+ LinesToRequestPrefetchPixelData = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ }
+
+ return MyError;
+}
+
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
+}
+
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
+}
+
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath)
+{
+ unsigned int MaxPartialSwath;
+
+ if (ProgressiveToInterlaceUnitInOPP)
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
+ else
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
+
+ if (!mode_lib->vba.IgnoreViewportPositioning) {
+
+ *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
+ % SwathHeight;
+ MaxPartialSwath = dml_max(1U, MaxPartialSwath);
+
+ } else {
+
+ if (ViewportYStart != 0)
+ dml_print(
+ "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
+
+ *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
+ % SwathHeight;
+ }
+
+ return *MaxNumSwath * SwathHeight + MaxPartialSwath;
+}
+
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidth,
+ bool VirtualMemoryEnable,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int PDEProcessingBufIn64KBReqs,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_height,
+ unsigned int *meta_row_height)
+{
+ unsigned int MetaRequestHeight;
+ unsigned int MetaRequestWidth;
+ unsigned int MetaSurfWidth;
+ unsigned int MetaSurfHeight;
+ unsigned int MPDEBytesFrame;
+ unsigned int MetaPTEBytesFrame;
+ unsigned int DCCMetaSurfaceBytes;
+
+ unsigned int MacroTileSizeBytes;
+ unsigned int MacroTileHeight;
+ unsigned int DPDE0BytesFrame;
+ unsigned int ExtraDPDEBytesFrame;
+ unsigned int PDEAndMetaPTEBytesFrame;
+
+ if (DCCEnable == true) {
+ MetaRequestHeight = 8 * BlockHeight256Bytes;
+ MetaRequestWidth = 8 * BlockWidth256Bytes;
+ if (ScanDirection == dm_horz) {
+ *meta_row_height = MetaRequestHeight;
+ MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth)
+ + MetaRequestWidth;
+ *MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0;
+ } else {
+ *meta_row_height = MetaRequestWidth;
+ MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight)
+ + MetaRequestHeight;
+ *MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0;
+ }
+ if (ScanDirection == dm_horz) {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ } else {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(
+ (double) ViewportHeight - 1,
+ 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ }
+ if (VirtualMemoryEnable == true) {
+ MetaPTEBytesFrame = (dml_ceil(
+ (double) (DCCMetaSurfaceBytes - VMMPageSize)
+ / (8 * VMMPageSize),
+ 1) + 1) * 64;
+ MPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 1);
+ } else {
+ MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ }
+ } else {
+ MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ *MetaRowByte = 0;
+ }
+
+ if (SurfaceTiling == dm_sw_linear) {
+ MacroTileSizeBytes = 256;
+ MacroTileHeight = 1;
+ } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
+ || SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
+ MacroTileSizeBytes = 4096;
+ MacroTileHeight = 4 * BlockHeight256Bytes;
+ } else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
+ || SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
+ || SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
+ || SurfaceTiling == dm_sw_64kb_r_x) {
+ MacroTileSizeBytes = 65536;
+ MacroTileHeight = 16 * BlockHeight256Bytes;
+ } else {
+ MacroTileSizeBytes = 262144;
+ MacroTileHeight = 32 * BlockHeight256Bytes;
+ }
+ *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
+
+ if (VirtualMemoryEnable == true && mode_lib->vba.MaxPageTableLevels > 1) {
+ if (ScanDirection == dm_horz) {
+ DPDE0BytesFrame =
+ 64
+ * (dml_ceil(
+ ((Pitch
+ * (dml_ceil(
+ ViewportHeight
+ - 1,
+ MacroTileHeight)
+ + MacroTileHeight)
+ * BytePerPixel)
+ - MacroTileSizeBytes)
+ / (8
+ * 2097152),
+ 1) + 1);
+ } else {
+ DPDE0BytesFrame =
+ 64
+ * (dml_ceil(
+ ((Pitch
+ * (dml_ceil(
+ (double) SwathWidth
+ - 1,
+ MacroTileHeight)
+ + MacroTileHeight)
+ * BytePerPixel)
+ - MacroTileSizeBytes)
+ / (8
+ * 2097152),
+ 1) + 1);
+ }
+ ExtraDPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 2);
+ } else {
+ DPDE0BytesFrame = 0;
+ ExtraDPDEBytesFrame = 0;
+ }
+
+ PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame
+ + ExtraDPDEBytesFrame;
+
+ if (VirtualMemoryEnable == true) {
+ unsigned int PTERequestSize;
+ unsigned int PixelPTEReqHeight;
+ unsigned int PixelPTEReqWidth;
+ double FractionOfPTEReturnDrop;
+ unsigned int EffectivePDEProcessingBufIn64KBReqs;
+
+ if (SurfaceTiling == dm_sw_linear) {
+ PixelPTEReqHeight = 1;
+ PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
+ PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ } else if (MacroTileSizeBytes == 4096) {
+ PixelPTEReqHeight = MacroTileHeight;
+ PixelPTEReqWidth = 8 * *MacroTileWidth;
+ PTERequestSize = 64;
+ if (ScanDirection == dm_horz)
+ FractionOfPTEReturnDrop = 0;
+ else
+ FractionOfPTEReturnDrop = 7 / 8;
+ } else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
+ PixelPTEReqHeight = 16 * BlockHeight256Bytes;
+ PixelPTEReqWidth = 16 * BlockWidth256Bytes;
+ PTERequestSize = 128;
+ FractionOfPTEReturnDrop = 0;
+ } else {
+ PixelPTEReqHeight = MacroTileHeight;
+ PixelPTEReqWidth = 8 * *MacroTileWidth;
+ PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ }
+
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)
+ EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2;
+ else
+ EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs;
+
+ if (SurfaceTiling == dm_sw_linear) {
+ *dpte_row_height =
+ dml_min(
+ 128,
+ 1
+ << (unsigned int) dml_floor(
+ dml_log2(
+ dml_min(
+ (double) PTEBufferSizeInRequests
+ * PixelPTEReqWidth,
+ EffectivePDEProcessingBufIn64KBReqs
+ * 65536.0
+ / BytePerPixel)
+ / Pitch),
+ 1));
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(
+ (double) (Pitch * *dpte_row_height - 1)
+ / PixelPTEReqWidth,
+ 1) + 1);
+ } else if (ScanDirection == dm_horz) {
+ *dpte_row_height = PixelPTEReqHeight;
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1)
+ + 1);
+ } else {
+ *dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth);
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(
+ ((double) SwathWidth - 1)
+ / PixelPTEReqHeight,
+ 1) + 1);
+ }
+ if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
+ <= 64 * PTEBufferSizeInRequests) {
+ *PTEBufferSizeNotExceeded = true;
+ } else {
+ *PTEBufferSizeNotExceeded = false;
+ }
+ } else {
+ *PixelPTEBytesPerRow = 0;
+ *PTEBufferSizeNotExceeded = true;
+ }
+
+ return PDEAndMetaPTEBytesFrame;
+}
+
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib)
+{
+ unsigned int j, k;
+
+ mode_lib->vba.WritebackDISPCLK = 0.0;
+ mode_lib->vba.DISPCLKWithRamping = 0;
+ mode_lib->vba.DISPCLKWithoutRamping = 0;
+ mode_lib->vba.GlobalDPPCLK = 0.0;
+
+ // dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation
+ //
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k]) {
+ mode_lib->vba.WritebackDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1));
+ } else {
+ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPLuma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0;
+ mode_lib->vba.DPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma;
+ } else {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4
+ / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPChroma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
+ * mode_lib->vba.PixelClock[k];
+ }
+
+ mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max(
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma,
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k)
+ continue;
+ if (mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ } else if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ }
+ }
+
+ mode_lib->vba.DISPCLKWithRamping = dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.WritebackDISPCLK);
+ mode_lib->vba.DISPCLKWithoutRamping = dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.WritebackDISPCLK);
+
+ ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.soc.clock_limits[NumberOfStates - 1].dispclk_mhz,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
+ } else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
+ } else {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
+ }
+ DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.DPPPerPlane[k]
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
+ mode_lib->vba.GlobalDPPCLK = dml_max(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DPPCLK_calculated[k]);
+ }
+ mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
+ * dml_ceil(
+ mode_lib->vba.DPPCLK_calculated[k] * 255
+ / mode_lib->vba.GlobalDPPCLK,
+ 1);
+ DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
+ }
+
+ // Urgent Watermark
+ mode_lib->vba.DCCEnabledAnyPlane = false;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ if (mode_lib->vba.DCCEnable[k])
+ mode_lib->vba.DCCEnabledAnyPlane = true;
+
+ mode_lib->vba.ReturnBandwidthToDCN = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
+ mode_lib->vba.FabricAndDRAMBandwidth * 1000)
+ * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency / 100;
+
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN;
+ mode_lib->vba.ReturnBW = adjust_ReturnBW(
+ mode_lib,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.DCCEnabledAnyPlane,
+ mode_lib->vba.ReturnBandwidthToDCN);
+
+ // Let's do this calculation again??
+ mode_lib->vba.ReturnBandwidthToDCN = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
+ mode_lib->vba.FabricAndDRAMBandwidth * 1000);
+ mode_lib->vba.ReturnBW = adjust_ReturnBW(
+ mode_lib,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.DCCEnabledAnyPlane,
+ mode_lib->vba.ReturnBandwidthToDCN);
+
+ DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
+ DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
+ DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz)
+ mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
+ else
+ mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ MainPlaneDoesODMCombine = true;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[j] == true)
+ MainPlaneDoesODMCombine = true;
+
+ if (MainPlaneDoesODMCombine == true)
+ mode_lib->vba.SwathWidthY[k] = dml_min(
+ (double) mode_lib->vba.SwathWidthSingleDPPY[k],
+ dml_round(
+ mode_lib->vba.HActive[k] / 2.0
+ * mode_lib->vba.HRatio[k]));
+ else
+ mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ / mode_lib->vba.DPPPerPlane[k];
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ mode_lib->vba.BytePerPixelDETY[k] = 8;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ mode_lib->vba.BytePerPixelDETY[k] = 4;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ mode_lib->vba.BytePerPixelDETY[k] = 2;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
+ mode_lib->vba.BytePerPixelDETY[k] = 1;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ mode_lib->vba.BytePerPixelDETY[k] = 1;
+ mode_lib->vba.BytePerPixelDETC[k] = 2;
+ } else { // dm_420_10
+ mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0;
+ mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0;
+ }
+ }
+
+ mode_lib->vba.TotalDataReadBandwidth = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ / 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k] / 2;
+ DTRACE(
+ " read_bw[%i] = %fBps",
+ k,
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]);
+ mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k];
+ }
+
+ mode_lib->vba.TotalDCCActiveDPP = 0;
+ mode_lib->vba.TotalActiveDPP = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ if (mode_lib->vba.DCCEnable[k])
+ mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ }
+
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
+ + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
+ * mode_lib->vba.NumberOfChannels
+ / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.LastPixelOfLineExtraWatermark = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double DataFabricLineDeliveryTimeLuma, DataFabricLineDeliveryTimeChroma;
+
+ if (mode_lib->vba.VRatio[k] <= 1.0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
+ (double) mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ else
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
+ (double) mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+
+ DataFabricLineDeliveryTimeLuma = mode_lib->vba.SwathWidthSingleDPPY[k]
+ * mode_lib->vba.SwathHeightY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
+ / (mode_lib->vba.ReturnBW * mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / mode_lib->vba.TotalDataReadBandwidth);
+ mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(
+ mode_lib->vba.LastPixelOfLineExtraWatermark,
+ DataFabricLineDeliveryTimeLuma
+ - mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k]);
+
+ if (mode_lib->vba.BytePerPixelDETC[k] == 0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = 0.0;
+ else if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
+ mode_lib->vba.SwathWidthY[k] / 2.0
+ * mode_lib->vba.DPPPerPlane[k]
+ / (mode_lib->vba.HRatio[k] / 2.0)
+ / mode_lib->vba.PixelClock[k];
+ else
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
+ mode_lib->vba.SwathWidthY[k] / 2.0
+ / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
+ / mode_lib->vba.DPPCLK[k];
+
+ DataFabricLineDeliveryTimeChroma = mode_lib->vba.SwathWidthSingleDPPY[k] / 2.0
+ * mode_lib->vba.SwathHeightC[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
+ / (mode_lib->vba.ReturnBW
+ * mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / mode_lib->vba.TotalDataReadBandwidth);
+ mode_lib->vba.LastPixelOfLineExtraWatermark =
+ dml_max(
+ mode_lib->vba.LastPixelOfLineExtraWatermark,
+ DataFabricLineDeliveryTimeChroma
+ - mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
+ }
+
+ mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency
+ + (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte
+ + mode_lib->vba.TotalDCCActiveDPP
+ * mode_lib->vba.MetaChunkSize) * 1024.0
+ / mode_lib->vba.ReturnBW;
+
+ if (mode_lib->vba.VirtualMemoryEnable)
+ mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP
+ * mode_lib->vba.PTEChunkSize * 1024.0 / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatency
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency;
+
+ DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency);
+ DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark);
+
+ mode_lib->vba.MemoryTripWatermark = mode_lib->vba.UrgentLatency;
+
+ mode_lib->vba.TotalActiveWriteback = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k])
+ mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
+ }
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1)
+ mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency;
+ else
+ mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency
+ + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
+ / mode_lib->vba.SOCCLK;
+
+ DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark);
+
+ // NB P-State/DRAM Clock Change Watermark
+ mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.UrgentWatermark;
+
+ DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark);
+
+ DTRACE(" calculating wb pstate watermark");
+ DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback);
+ DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK);
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1)
+ mode_lib->vba.WritebackDRAMClockChangeWatermark =
+ mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.WritebackLatency;
+ else
+ mode_lib->vba.WritebackDRAMClockChangeWatermark =
+ mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.WritebackLatency
+ + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
+ / mode_lib->vba.SOCCLK;
+
+ DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark);
+
+ // Stutter Efficiency
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k]
+ / mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k];
+ mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor(
+ mode_lib->vba.LinesInDETY[k],
+ mode_lib->vba.SwathHeightY[k]);
+ mode_lib->vba.FullDETBufferingTimeY[k] =
+ mode_lib->vba.LinesInDETYRoundedDownToSwath[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k];
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k]
+ / mode_lib->vba.BytePerPixelDETC[k]
+ / (mode_lib->vba.SwathWidthY[k] / 2);
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor(
+ mode_lib->vba.LinesInDETC[k],
+ mode_lib->vba.SwathHeightC[k]);
+ mode_lib->vba.FullDETBufferingTimeC[k] =
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k] / 2);
+ } else {
+ mode_lib->vba.LinesInDETC[k] = 0;
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0;
+ mode_lib->vba.FullDETBufferingTimeC[k] = 999999;
+ }
+ }
+
+ mode_lib->vba.MinFullDETBufferingTime = 999999.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.FullDETBufferingTimeY[k]
+ < mode_lib->vba.MinFullDETBufferingTime) {
+ mode_lib->vba.MinFullDETBufferingTime =
+ mode_lib->vba.FullDETBufferingTimeY[k];
+ mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
+ (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ }
+ if (mode_lib->vba.FullDETBufferingTimeC[k]
+ < mode_lib->vba.MinFullDETBufferingTime) {
+ mode_lib->vba.MinFullDETBufferingTime =
+ mode_lib->vba.FullDETBufferingTimeC[k];
+ mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
+ (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ }
+ }
+
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.DCCEnable[k]) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / mode_lib->vba.DCCRate[k]
+ / 1000
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / mode_lib->vba.DCCRate[k]
+ / 1000;
+ } else {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000;
+ }
+ if (mode_lib->vba.DCCEnable[k]) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000 / 256
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000 / 256;
+ }
+ if (mode_lib->vba.VirtualMemoryEnable) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000 / 512
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000 / 512;
+ }
+ }
+
+ mode_lib->vba.PartOfBurstThatFitsInROB =
+ dml_min(
+ mode_lib->vba.MinFullDETBufferingTime
+ * mode_lib->vba.TotalDataReadBandwidth,
+ mode_lib->vba.ROBBufferSizeInKByte * 1024
+ * mode_lib->vba.TotalDataReadBandwidth
+ / (mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ * 1000));
+ mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
+ * (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000)
+ / mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW
+ + (mode_lib->vba.MinFullDETBufferingTime
+ * mode_lib->vba.TotalDataReadBandwidth
+ - mode_lib->vba.PartOfBurstThatFitsInROB)
+ / (mode_lib->vba.DCFCLK * 64);
+ if (mode_lib->vba.TotalActiveWriteback == 0) {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
+ - (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
+ / mode_lib->vba.MinFullDETBufferingTime) * 100;
+ } else {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
+ }
+
+ mode_lib->vba.SmallestVBlank = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.VBlankTime = 0;
+ }
+ mode_lib->vba.SmallestVBlank = dml_min(
+ mode_lib->vba.SmallestVBlank,
+ mode_lib->vba.VBlankTime);
+ }
+
+ mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
+ * (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
+ - mode_lib->vba.SmallestVBlank)
+ + mode_lib->vba.SmallestVBlank)
+ / mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
+
+ // dml_ml->vba.DCFCLK Deep Sleep
+ mode_lib->vba.DCFClkDeepSleep = 8.0;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) {
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.DCFCLKDeepSleepPerPlane =
+ dml_max(
+ 1.1 * mode_lib->vba.SwathWidthY[k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelDETY[k],
+ 1) / 32
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k],
+ 1.1 * mode_lib->vba.SwathWidthY[k] / 2.0
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelDETC[k],
+ 2) / 32
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
+ } else
+ mode_lib->vba.DCFCLKDeepSleepPerPlane = 1.1 * mode_lib->vba.SwathWidthY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k];
+ mode_lib->vba.DCFCLKDeepSleepPerPlane = dml_max(
+ mode_lib->vba.DCFCLKDeepSleepPerPlane,
+ mode_lib->vba.PixelClock[k] / 16.0);
+ mode_lib->vba.DCFClkDeepSleep = dml_max(
+ mode_lib->vba.DCFClkDeepSleep,
+ mode_lib->vba.DCFCLKDeepSleepPerPlane);
+
+ DTRACE(
+ " dcfclk_deepsleep_per_plane[%i] = %fMHz",
+ k,
+ mode_lib->vba.DCFCLKDeepSleepPerPlane);
+ }
+
+ DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFClkDeepSleep);
+
+ // Stutter Watermark
+ mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFClkDeepSleep;
+ mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency;
+
+ DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark);
+ DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark);
+
+ // Urgent Latency Supported
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.EffectiveDETPlusLBLinesLuma =
+ dml_floor(
+ mode_lib->vba.LinesInDETY[k]
+ + dml_min(
+ mode_lib->vba.LinesInDETY[k]
+ * mode_lib->vba.DPPCLK[k]
+ * mode_lib->vba.BytePerPixelDETY[k]
+ * mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]),
+ (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
+ mode_lib->vba.SwathHeightY[k]);
+
+ mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETPlusLBLinesLuma
+ * mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.BytePerPixelDETY[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]);
+
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.EffectiveDETPlusLBLinesChroma =
+ dml_floor(
+ mode_lib->vba.LinesInDETC[k]
+ + dml_min(
+ mode_lib->vba.LinesInDETC[k]
+ * mode_lib->vba.DPPCLK[k]
+ * mode_lib->vba.BytePerPixelDETC[k]
+ * mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]),
+ (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
+ mode_lib->vba.SwathHeightC[k]);
+ mode_lib->vba.UrgentLatencySupportUsChroma =
+ mode_lib->vba.EffectiveDETPlusLBLinesChroma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k] / 2)
+ - mode_lib->vba.EffectiveDETPlusLBLinesChroma
+ * (mode_lib->vba.SwathWidthY[k]
+ / 2)
+ * mode_lib->vba.BytePerPixelDETC[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]);
+ mode_lib->vba.UrgentLatencySupportUs[k] = dml_min(
+ mode_lib->vba.UrgentLatencySupportUsLuma,
+ mode_lib->vba.UrgentLatencySupportUsChroma);
+ } else {
+ mode_lib->vba.UrgentLatencySupportUs[k] =
+ mode_lib->vba.UrgentLatencySupportUsLuma;
+ }
+ }
+
+ mode_lib->vba.MinUrgentLatencySupportUs = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.MinUrgentLatencySupportUs = dml_min(
+ mode_lib->vba.MinUrgentLatencySupportUs,
+ mode_lib->vba.UrgentLatencySupportUs[k]);
+ }
+
+ // Non-Urgent Latency Tolerance
+ mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs
+ - mode_lib->vba.UrgentWatermark;
+
+ // DSCCLK
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
+ mode_lib->vba.DSCCLK_calculated[k] = 0.0;
+ } else {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k] == dm_n422)
+ mode_lib->vba.DSCFormatFactor = 2;
+ else
+ mode_lib->vba.DSCFormatFactor = 1;
+ if (mode_lib->vba.ODMCombineEnabled[k])
+ mode_lib->vba.DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 6
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ else
+ mode_lib->vba.DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 3
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ }
+ }
+
+ // DSC Delay
+ // TODO
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double bpp = mode_lib->vba.OutputBpp[k];
+ unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
+
+ if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
+ if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DSCDelay[k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ mode_lib->vba.DSCDelay[k] =
+ 2
+ * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices / 2.0,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]));
+ }
+ mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ mode_lib->vba.DSCDelay[k] = 0;
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
+ if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.DSCEnabled[j])
+ mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j];
+
+ // Prefetch
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PixelPTEBytesPerRowY;
+ unsigned int MetaRowByteY;
+ unsigned int MetaRowByteC;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int PixelPTEBytesPerRowC;
+
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2),
+ &mode_lib->vba.BlockHeight256BytesY[k],
+ &mode_lib->vba.BlockHeight256BytesC[k],
+ &mode_lib->vba.BlockWidth256BytesY[k],
+ &mode_lib->vba.BlockWidth256BytesC[k]);
+ PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.BlockHeight256BytesY[k],
+ mode_lib->vba.BlockWidth256BytesY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ mode_lib->vba.SwathWidthY[k],
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &MetaRowByteY,
+ &PixelPTEBytesPerRowY,
+ &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
+ &mode_lib->vba.dpte_row_height[k],
+ &mode_lib->vba.meta_row_height[k]);
+ mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.ViewportYStartY[k],
+ &mode_lib->vba.VInitPreFillY[k],
+ &mode_lib->vba.MaxNumSwathY[k]);
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
+ PDEAndMetaPTEBytesFrameC =
+ CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.BlockHeight256BytesC[k],
+ mode_lib->vba.BlockWidth256BytesC[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelDETC[k],
+ 2),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2,
+ mode_lib->vba.ViewportHeight[k] / 2,
+ mode_lib->vba.SwathWidthY[k] / 2,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchC[k],
+ 0,
+ &mode_lib->vba.MacroTileWidthC[k],
+ &MetaRowByteC,
+ &PixelPTEBytesPerRowC,
+ &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
+ &mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_height_chroma[k]);
+ mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k] / 2,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightC[k],
+ mode_lib->vba.ViewportYStartC[k],
+ &mode_lib->vba.VInitPreFillC[k],
+ &mode_lib->vba.MaxNumSwathC[k]);
+ } else {
+ PixelPTEBytesPerRowC = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC = 0;
+ mode_lib->vba.MaxNumSwathC[k] = 0;
+ mode_lib->vba.PrefetchSourceLinesC[k] = 0;
+ }
+
+ mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ + PDEAndMetaPTEBytesFrameC;
+ mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
+
+ CalculateActiveRowBandwidth(
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ MetaRowByteY,
+ MetaRowByteC,
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.meta_row_height_chroma[k],
+ PixelPTEBytesPerRowY,
+ PixelPTEBytesPerRowC,
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_bw[k],
+ &mode_lib->vba.dpte_row_bw[k],
+ &mode_lib->vba.qual_row_bw[k]);
+ }
+
+ mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFClkDeepSleep;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k])
+ / mode_lib->vba.DISPCLK;
+ } else
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[j] == k
+ && mode_lib->vba.WritebackEnable[j] == true) {
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ dml_max(
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k],
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[j],
+ mode_lib->vba.WritebackHRatio[j],
+ mode_lib->vba.WritebackVRatio[j],
+ mode_lib->vba.WritebackLumaHTaps[j],
+ mode_lib->vba.WritebackLumaVTaps[j],
+ mode_lib->vba.WritebackChromaHTaps[j],
+ mode_lib->vba.WritebackChromaVTaps[j],
+ mode_lib->vba.WritebackDestinationWidth[j])
+ / mode_lib->vba.DISPCLK);
+ }
+ }
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j)
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j];
+
+ mode_lib->vba.VStartupLines = 13;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.MaxVStartupLines[k] =
+ mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ - dml_max(
+ 1.0,
+ dml_ceil(
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k]
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1));
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ mode_lib->vba.MaximumMaxVStartupLines = dml_max(
+ mode_lib->vba.MaximumMaxVStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.cursor_bw[k] = 0.0;
+ for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j)
+ mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j]
+ * mode_lib->vba.CursorBPP[k][j] / 8.0
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ }
+
+ do {
+ double MaxTotalRDBandwidth = 0;
+ bool DestinationLineTimesForPrefetchLessThan2 = false;
+ bool VRatioPrefetchMoreThan4 = false;
+ bool prefetch_vm_bw_valid = true;
+ bool prefetch_row_bw_valid = true;
+ double TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthY[k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelDETY[k],
+ 1),
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
+ }
+ mode_lib->vba.ErrorResult[k] =
+ CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.DPPCLK[k],
+ mode_lib->vba.DISPCLK,
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.DCFClkDeepSleep,
+ mode_lib->vba.DSCDelay[k],
+ mode_lib->vba.DPPPerPlane[k],
+ mode_lib->vba.ScalerEnabled[k],
+ mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ (unsigned int) (mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.HRatio[k]),
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]),
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.TCalc,
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
+ mode_lib->vba.MetaRowByte[k],
+ mode_lib->vba.PixelPTEBytesPerRow[k],
+ mode_lib->vba.PrefetchSourceLinesY[k],
+ mode_lib->vba.SwathWidthY[k],
+ mode_lib->vba.BytePerPixelDETY[k],
+ mode_lib->vba.VInitPreFillY[k],
+ mode_lib->vba.MaxNumSwathY[k],
+ mode_lib->vba.PrefetchSourceLinesC[k],
+ mode_lib->vba.BytePerPixelDETC[k],
+ mode_lib->vba.VInitPreFillC[k],
+ mode_lib->vba.MaxNumSwathC[k],
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.SwathHeightC[k],
+ TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ &mode_lib->vba.DSTXAfterScaler[k],
+ &mode_lib->vba.DSTYAfterScaler[k],
+ &mode_lib->vba.DestinationLinesForPrefetch[k],
+ &mode_lib->vba.PrefetchBandwidth[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInVBlank[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInVBlank[k],
+ &mode_lib->vba.VRatioPrefetchY[k],
+ &mode_lib->vba.VRatioPrefetchC[k],
+ &mode_lib->vba.RequiredPrefetchPixDataBW[k],
+ &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &mode_lib->vba.Tno_bw[k],
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.VStartup[k] = dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]);
+ if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
+ != 0) {
+ mode_lib->vba.VStartup[k] =
+ mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
+ }
+ } else {
+ mode_lib->vba.VStartup[k] =
+ dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+
+ if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0)
+ mode_lib->vba.prefetch_vm_bw[k] = 0;
+ else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) {
+ mode_lib->vba.prefetch_vm_bw[k] =
+ (double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ / (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_vm_bw[k] = 0;
+ prefetch_vm_bw_valid = false;
+ }
+ if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]
+ == 0)
+ mode_lib->vba.prefetch_row_bw[k] = 0;
+ else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) {
+ mode_lib->vba.prefetch_row_bw[k] =
+ (double) (mode_lib->vba.MetaRowByte[k]
+ + mode_lib->vba.PixelPTEBytesPerRow[k])
+ / (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_row_bw[k] = 0;
+ prefetch_row_bw_valid = false;
+ }
+
+ MaxTotalRDBandwidth =
+ MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k]
+ + dml_max(
+ mode_lib->vba.prefetch_vm_bw[k],
+ dml_max(
+ mode_lib->vba.prefetch_row_bw[k],
+ dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k],
+ mode_lib->vba.RequiredPrefetchPixDataBW[k])
+ + mode_lib->vba.meta_row_bw[k]
+ + mode_lib->vba.dpte_row_bw[k]));
+
+ if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2)
+ DestinationLineTimesForPrefetchLessThan2 = true;
+ if (mode_lib->vba.VRatioPrefetchY[k] > 4
+ || mode_lib->vba.VRatioPrefetchC[k] > 4)
+ VRatioPrefetchMoreThan4 = true;
+ }
+
+ if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid
+ && prefetch_row_bw_valid && !VRatioPrefetchMoreThan4
+ && !DestinationLineTimesForPrefetchLessThan2)
+ mode_lib->vba.PrefetchModeSupported = true;
+ else {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
+ }
+
+ if (mode_lib->vba.PrefetchModeSupported == true) {
+ double final_flip_bw[DC__NUM_DPP__MAX];
+ unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
+ double total_dcn_read_bw_with_flip = 0;
+
+ mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - mode_lib->vba.cursor_bw[k]
+ - dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ + mode_lib->vba.qual_row_bw[k],
+ mode_lib->vba.PrefetchBandwidth[k]);
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ ImmediateFlipBytes[k] = 0;
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ ImmediateFlipBytes[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ + mode_lib->vba.MetaRowByte[k]
+ + mode_lib->vba.PixelPTEBytesPerRow[k];
+ }
+ }
+ mode_lib->vba.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.TotImmediateFlipBytes =
+ mode_lib->vba.TotImmediateFlipBytes
+ + ImmediateFlipBytes[k];
+ }
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ ImmediateFlipBytes[k],
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.Tno_bw[k],
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
+ mode_lib->vba.MetaRowByte[k],
+ mode_lib->vba.PixelPTEBytesPerRow[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.qual_row_bw[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
+ &final_flip_bw[k],
+ &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ total_dcn_read_bw_with_flip =
+ total_dcn_read_bw_with_flip
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max(
+ mode_lib->vba.prefetch_vm_bw[k],
+ dml_max(
+ mode_lib->vba.prefetch_row_bw[k],
+ final_flip_bw[k]
+ + dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k],
+ mode_lib->vba.RequiredPrefetchPixDataBW[k])));
+ }
+ mode_lib->vba.ImmediateFlipSupported = true;
+ if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ }
+ } else {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ErrorResult[k]) {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
+ }
+ }
+
+ mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
+ } while (!((mode_lib->vba.PrefetchModeSupported
+ && (!mode_lib->vba.ImmediateFlipSupport
+ || mode_lib->vba.ImmediateFlipSupported))
+ || mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
+
+ //Display Pipeline Delivery Time in Prefetch
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.VRatioPrefetchY[k] <= 1) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+ }
+ if (mode_lib->vba.BytePerPixelDETC[k] == 0) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
+ } else {
+ if (mode_lib->vba.VRatioPrefetchC[k] <= 1) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+ }
+ }
+ }
+
+ // Min TTUVBlank
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.PrefetchMode == 0) {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ mode_lib->vba.MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.DRAMClockChangeWatermark,
+ dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark));
+ } else if (mode_lib->vba.PrefetchMode == 1) {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ mode_lib->vba.MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark);
+ } else {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false;
+ mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
+ }
+ if (!mode_lib->vba.DynamicMetadataEnable[k])
+ mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc
+ + mode_lib->vba.MinTTUVBlank[k];
+ }
+
+ // DCC Configuration
+ mode_lib->vba.ActiveDPPs = 0;
+ // NB P-State/DRAM Clock Change Support
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k];
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double DPPOutputBufferLinesY;
+ double DPPOutputBufferLinesC;
+ double DPPOPPBufferingY;
+ double MaxDETBufferingTimeY;
+ double ActiveDRAMClockChangeLatencyMarginY;
+
+ mode_lib->vba.LBLatencyHidingSourceLinesY =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ (unsigned int) dml_floor(
+ (double) mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthY[k]
+ / dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)),
+ 1)) - (mode_lib->vba.vtaps[k] - 1);
+
+ mode_lib->vba.LBLatencyHidingSourceLinesC =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ (unsigned int) dml_floor(
+ (double) mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthY[k]
+ / 2.0
+ / dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2,
+ 1.0)),
+ 1))
+ - (mode_lib->vba.VTAPsChroma[k] - 1);
+
+ EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY
+ / mode_lib->vba.VRatio[k]
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+
+ EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
+ / (mode_lib->vba.VRatio[k] / 2)
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+
+ if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels
+ / mode_lib->vba.SwathWidthY[k];
+ } else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = 0.5;
+ } else {
+ DPPOutputBufferLinesY = 1;
+ }
+
+ if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels
+ / (mode_lib->vba.SwathWidthY[k] / 2);
+ } else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = 0.5;
+ } else {
+ DPPOutputBufferLinesC = 1;
+ }
+
+ DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines);
+ MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k]
+ + (mode_lib->vba.LinesInDETY[k]
+ - mode_lib->vba.LinesInDETYRoundedDownToSwath[k])
+ / mode_lib->vba.SwathHeightY[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+
+ ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY
+ + MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark;
+
+ if (mode_lib->vba.ActiveDPPs > 1) {
+ ActiveDRAMClockChangeLatencyMarginY =
+ ActiveDRAMClockChangeLatencyMarginY
+ - (1 - 1 / (mode_lib->vba.ActiveDPPs - 1))
+ * mode_lib->vba.SwathHeightY[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ }
+
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ double DPPOPPBufferingC = (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ * (DPPOutputBufferLinesC
+ + mode_lib->vba.OPPOutputBufferLines);
+ double MaxDETBufferingTimeC =
+ mode_lib->vba.FullDETBufferingTimeC[k]
+ + (mode_lib->vba.LinesInDETC[k]
+ - mode_lib->vba.LinesInDETCRoundedDownToSwath[k])
+ / mode_lib->vba.SwathHeightC[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC
+ + EffectiveLBLatencyHidingC + MaxDETBufferingTimeC
+ - mode_lib->vba.DRAMClockChangeWatermark;
+
+ if (mode_lib->vba.ActiveDPPs > 1) {
+ ActiveDRAMClockChangeLatencyMarginC =
+ ActiveDRAMClockChangeLatencyMarginC
+ - (1
+ - 1
+ / (mode_lib->vba.ActiveDPPs
+ - 1))
+ * mode_lib->vba.SwathHeightC[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ ActiveDRAMClockChangeLatencyMarginY,
+ ActiveDRAMClockChangeLatencyMarginC);
+ } else {
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] =
+ ActiveDRAMClockChangeLatencyMarginY;
+ }
+
+ if (mode_lib->vba.WritebackEnable[k]) {
+ double WritebackDRAMClockChangeLatencyMargin;
+
+ if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ WritebackDRAMClockChangeLatencyMargin =
+ (double) (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ + mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ * 4)
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ } else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ WritebackDRAMClockChangeLatencyMargin =
+ dml_min(
+ (double) mode_lib->vba.WritebackInterfaceLumaBufferSize
+ * 8.0 / 10,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize
+ * 8 / 10)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]))
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ } else {
+ WritebackDRAMClockChangeLatencyMargin =
+ dml_min(
+ (double) mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]))
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+ < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin =
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
+ mode_lib->vba.MinActiveDRAMClockChangeMargin
+ + mode_lib->vba.DRAMClockChangeLatency;
+
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vactive;
+ } else {
+ if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vblank;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
+ mode_lib->vba.DRAMClockChangeSupport =
+ dm_dram_clock_change_unsupported;
+ }
+ }
+ } else {
+ mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
+ }
+ }
+
+ //XFC Parameters:
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ double TWait;
+
+ mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
+ mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
+ mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
+ TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthY[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] =
+ dml_floor(
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.XFCTransferDelay[k] =
+ dml_ceil(
+ mode_lib->vba.XFCBusTransportTime
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.XFCPrechargeDelay[k] =
+ dml_ceil(
+ (mode_lib->vba.XFCBusTransportTime
+ + mode_lib->vba.TInitXFill
+ + mode_lib->vba.TslvChk)
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
+ * mode_lib->vba.SrcActiveDrainRate;
+ mode_lib->vba.FinalFillMargin =
+ (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.XFCFillConstant;
+ mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.FinalFillMargin;
+ mode_lib->vba.RemainingFillLevel = dml_max(
+ 0.0,
+ mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
+ mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
+ / (mode_lib->vba.SrcActiveDrainRate
+ * mode_lib->vba.XFCFillBWOverhead / 100);
+ mode_lib->vba.XFCPrefetchMargin[k] =
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ + mode_lib->vba.TFinalxFill
+ + (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0;
+ mode_lib->vba.XFCSlaveVupdateWidth[k] = 0;
+ mode_lib->vba.XFCSlaveVReadyOffset[k] = 0;
+ mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0;
+ mode_lib->vba.XFCPrechargeDelay[k] = 0;
+ mode_lib->vba.XFCTransferDelay[k] = 0;
+ mode_lib->vba.XFCPrefetchMargin[k] = 0;
+ }
+ }
+}
+
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
+{
+ double BytePerPixDETY;
+ double BytePerPixDETC;
+ double Read256BytesBlockHeightY;
+ double Read256BytesBlockHeightC;
+ double Read256BytesBlockWidthY;
+ double Read256BytesBlockWidthC;
+ double MaximumSwathHeightY;
+ double MaximumSwathHeightC;
+ double MinimumSwathHeightY;
+ double MinimumSwathHeightC;
+ double SwathWidth;
+ double SwathWidthGranularityY;
+ double SwathWidthGranularityC;
+ double RoundedUpMaxSwathSizeBytesY;
+ double RoundedUpMaxSwathSizeBytesC;
+ unsigned int j, k;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ BytePerPixDETY = 8;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ BytePerPixDETY = 4;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ BytePerPixDETY = 2;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 2;
+ } else {
+ BytePerPixDETY = 4.0 / 3.0;
+ BytePerPixDETC = 8.0 / 3.0;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ Read256BytesBlockHeightY = 4;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ Read256BytesBlockHeightY = 8;
+ } else {
+ Read256BytesBlockHeightY = 16;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockHeightC = 0;
+ Read256BytesBlockWidthC = 0;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ Read256BytesBlockHeightC = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ Read256BytesBlockHeightY = 16;
+ Read256BytesBlockHeightC = 8;
+ } else {
+ Read256BytesBlockHeightY = 8;
+ Read256BytesBlockHeightC = 8;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
+ / Read256BytesBlockHeightC;
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ MaximumSwathHeightY = Read256BytesBlockHeightY;
+ MaximumSwathHeightC = Read256BytesBlockHeightC;
+ } else {
+ MaximumSwathHeightY = Read256BytesBlockWidthY;
+ MaximumSwathHeightC = Read256BytesBlockWidthC;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
+ && mode_lib->vba.SourceScan[k] != dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ }
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ }
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ SwathWidth = mode_lib->vba.ViewportWidth[k];
+ } else {
+ SwathWidth = mode_lib->vba.ViewportHeight[k];
+ }
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ MainPlaneDoesODMCombine = true;
+ }
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ MainPlaneDoesODMCombine = true;
+ }
+ }
+
+ if (MainPlaneDoesODMCombine == true) {
+ SwathWidth = dml_min(
+ SwathWidth,
+ mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
+ } else {
+ SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
+ }
+
+ SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
+ RoundedUpMaxSwathSizeBytesY = (dml_ceil(
+ (double) (SwathWidth - 1),
+ SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
+ * MaximumSwathHeightY;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
+ + 256;
+ }
+ if (MaximumSwathHeightC > 0) {
+ SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
+ / MaximumSwathHeightC;
+ RoundedUpMaxSwathSizeBytesC = (dml_ceil(
+ (double) (SwathWidth / 2.0 - 1),
+ SwathWidthGranularityC) + SwathWidthGranularityC)
+ * BytePerPixDETC * MaximumSwathHeightC;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesC = dml_ceil(
+ RoundedUpMaxSwathSizeBytesC,
+ 256) + 256;
+ }
+ } else
+ RoundedUpMaxSwathSizeBytesC = 0.0;
+
+ if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
+ <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
+ mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
+ } else {
+ mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
+ }
+
+ if (mode_lib->vba.SwathHeightC[k] == 0) {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte * 1024;
+ mode_lib->vba.DETBufferSizeC[k] = 0;
+ } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2;
+ mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2;
+ } else {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 * 2 / 3;
+ mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 3;
+ }
+ }
+}
+
+bool Calculate256BBlockSizes(
+ enum source_format_class SourcePixelFormat,
+ enum dm_swizzle_mode SurfaceTiling,
+ unsigned int BytePerPixelY,
+ unsigned int BytePerPixelC,
+ unsigned int *BlockHeight256BytesY,
+ unsigned int *BlockHeight256BytesC,
+ unsigned int *BlockWidth256BytesY,
+ unsigned int *BlockWidth256BytesC)
+{
+ if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
+ || SourcePixelFormat == dm_444_16
+ || SourcePixelFormat == dm_444_8)) {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ } else if (SourcePixelFormat == dm_444_64) {
+ *BlockHeight256BytesY = 4;
+ } else if (SourcePixelFormat == dm_444_8) {
+ *BlockHeight256BytesY = 16;
+ } else {
+ *BlockHeight256BytesY = 8;
+ }
+ *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
+ *BlockHeight256BytesC = 0;
+ *BlockWidth256BytesC = 0;
+ } else {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ *BlockHeight256BytesC = 1;
+ } else if (SourcePixelFormat == dm_420_8) {
+ *BlockHeight256BytesY = 16;
+ *BlockHeight256BytesC = 8;
+ } else {
+ *BlockHeight256BytesY = 8;
+ *BlockHeight256BytesC = 8;
+ }
+ *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
+ *BlockWidth256BytesC = 256 / BytePerPixelC / *BlockHeight256BytesC;
+ }
+ return true;
+}
+
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime)
+{
+ if (PrefetchMode == 0) {
+ return dml_max(
+ DRAMClockChangeLatency + UrgentLatency,
+ dml_max(SREnterPlusExitTime, UrgentLatency));
+ } else if (PrefetchMode == 1) {
+ return dml_max(SREnterPlusExitTime, UrgentLatency);
+ } else {
+ return UrgentLatency;
+ }
+}
+
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk)
+{
+ double TSlvSetup, AvgfillRate, result;
+
+ *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
+ TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
+ *TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
+ AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
+ *TslvChk = XFCSlvChunkSize / AvgfillRate;
+ dml_print(
+ "DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
+ *SrcActiveDrainRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
+ result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
+ return result;
+}
+
+static double CalculateWriteBackDISPCLK(
+ enum source_format_class WritebackPixelFormat,
+ double PixelClock,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ double WritebackDestinationWidth,
+ unsigned int HTotal,
+ unsigned int WritebackChromaLineBufferWidth)
+{
+ double CalculateWriteBackDISPCLK =
+ 1.01 * PixelClock
+ * dml_max(
+ dml_ceil(WritebackLumaHTaps / 4.0, 1)
+ / WritebackHRatio,
+ dml_max(
+ (WritebackLumaVTaps
+ * dml_ceil(
+ 1.0
+ / WritebackVRatio,
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1)
+ + dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1))
+ / (double) HTotal
+ + dml_ceil(
+ 1.0
+ / WritebackVRatio,
+ 1)
+ * (dml_ceil(
+ WritebackLumaVTaps
+ / 4.0,
+ 1)
+ + 4.0)
+ / (double) HTotal,
+ dml_ceil(
+ 1.0
+ / WritebackVRatio,
+ 1)
+ * WritebackDestinationWidth
+ / (double) HTotal));
+ if (WritebackPixelFormat != dm_444_32) {
+ CalculateWriteBackDISPCLK =
+ dml_max(
+ CalculateWriteBackDISPCLK,
+ 1.01 * PixelClock
+ * dml_max(
+ dml_ceil(
+ WritebackChromaHTaps
+ / 2.0,
+ 1)
+ / (2
+ * WritebackHRatio),
+ dml_max(
+ (WritebackChromaVTaps
+ * dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / 2.0,
+ 1)
+ + dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / WritebackChromaLineBufferWidth,
+ 1))
+ / HTotal
+ + dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * (dml_ceil(
+ WritebackChromaVTaps
+ / 4.0,
+ 1)
+ + 4)
+ / HTotal,
+ dml_ceil(
+ 1.0
+ / (2
+ * WritebackVRatio),
+ 1)
+ * WritebackDestinationWidth
+ / 2.0
+ / HTotal)));
+ }
+ return CalculateWriteBackDISPCLK;
+}
+
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth)
+{
+ double CalculateWriteBackDelay =
+ dml_max(
+ dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
+ WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1)
+ + dml_ceil(1.0 / WritebackVRatio, 1)
+ * (dml_ceil(
+ WritebackLumaVTaps
+ / 4.0,
+ 1) + 4));
+
+ if (WritebackPixelFormat != dm_444_32) {
+ CalculateWriteBackDelay =
+ dml_max(
+ CalculateWriteBackDelay,
+ dml_max(
+ dml_ceil(
+ WritebackChromaHTaps
+ / 2.0,
+ 1)
+ / (2
+ * WritebackHRatio),
+ WritebackChromaVTaps
+ * dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / 2.0,
+ 1)
+ + dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * (dml_ceil(
+ WritebackChromaVTaps
+ / 4.0,
+ 1)
+ + 4)));
+ }
+ return CalculateWriteBackDelay;
+}
+
+static void CalculateActiveRowBandwidth(
+ bool VirtualMemoryEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw,
+ double *qual_row_bw)
+{
+ if (DCCEnable != true) {
+ *meta_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ + VRatio / 2 * MetaRowByteChroma
+ / (meta_row_height_chroma * LineTime);
+ } else {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
+ }
+
+ if (VirtualMemoryEnable != true) {
+ *dpte_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ + VRatio / 2 * PixelPTEBytesPerRowChroma
+ / (dpte_row_height_chroma * LineTime);
+ } else {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
+ }
+
+ if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) {
+ *qual_row_bw = *meta_row_bw + *dpte_row_bw;
+ } else {
+ *qual_row_bw = 0;
+ }
+}
+
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int MaxPageTableLevels,
+ bool VirtualMemoryEnable,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int ImmediateFlipBytes,
+ double LineTime,
+ double Tno_bw,
+ double VRatio,
+ double PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ double qual_row_bw,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe)
+{
+ double min_row_time = 0.0;
+
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *DestinationLinesToRequestVMInImmediateFlip = 0.0;
+ *DestinationLinesToRequestRowInImmediateFlip = 0.0;
+ *final_flip_bw = qual_row_bw;
+ *ImmediateFlipSupportedForPipe = true;
+ } else {
+ double TimeForFetchingMetaPTEImmediateFlip;
+ double TimeForFetchingRowInVBlankImmediateFlip;
+
+ if (VirtualMemoryEnable == true) {
+ mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
+ * ImmediateFlipBytes / TotImmediateFlipBytes;
+ TimeForFetchingMetaPTEImmediateFlip =
+ dml_max(
+ Tno_bw
+ + PDEAndMetaPTEBytesFrame
+ / mode_lib->vba.ImmediateFlipBW,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatency
+ * (MaxPageTableLevels
+ - 1),
+ LineTime / 4.0));
+ } else {
+ TimeForFetchingMetaPTEImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestVMInImmediateFlip = dml_floor(
+ 4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
+ 1) / 4.0;
+
+ if ((VirtualMemoryEnable == true || DCCEnable == true)) {
+ mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
+ * ImmediateFlipBytes / TotImmediateFlipBytes;
+ TimeForFetchingRowInVBlankImmediateFlip = dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / mode_lib->vba.ImmediateFlipBW,
+ dml_max(UrgentLatency, LineTime / 4.0));
+ } else {
+ TimeForFetchingRowInVBlankImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestRowInImmediateFlip = dml_floor(
+ 4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125),
+ 1) / 4.0;
+
+ if (VirtualMemoryEnable == true) {
+ *final_flip_bw =
+ dml_max(
+ PDEAndMetaPTEBytesFrame
+ / (*DestinationLinesToRequestVMInImmediateFlip
+ * LineTime),
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / (TimeForFetchingRowInVBlankImmediateFlip
+ * LineTime));
+ } else if (MetaRowByte + PixelPTEBytesPerRow > 0) {
+ *final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow)
+ / (TimeForFetchingRowInVBlankImmediateFlip * LineTime);
+ } else {
+ *final_flip_bw = 0;
+ }
+
+ if (VirtualMemoryEnable && !DCCEnable)
+ min_row_time = dpte_row_height * LineTime / VRatio;
+ else if (!VirtualMemoryEnable && DCCEnable)
+ min_row_time = meta_row_height * LineTime / VRatio;
+ else
+ min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime
+ / VRatio;
+
+ if (*DestinationLinesToRequestVMInImmediateFlip >= 8
+ || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ || TimeForFetchingMetaPTEImmediateFlip
+ + 2 * TimeForFetchingRowInVBlankImmediateFlip
+ > min_row_time)
+ *ImmediateFlipSupportedForPipe = false;
+ else
+ *ImmediateFlipSupportedForPipe = true;
+ }
+}
+
+static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib)
+{
+ unsigned int k;
+
+ //Progressive To dml_ml->vba.Interlace Unit Effect
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.PixelClockBackEnd[k] = mode_lib->vba.PixelClock[k];
+ if (mode_lib->vba.Interlace[k] == 1
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true) {
+ mode_lib->vba.PixelClock[k] = 2 * mode_lib->vba.PixelClock[k];
+ }
+ }
+}
+
+static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp)
+{
+ switch (ebpp) {
+ case dm_cur_2bit:
+ return 2;
+ case dm_cur_32bit:
+ return 32;
+ case dm_cur_64bit:
+ return 64;
+ default:
+ return 0;
+ }
+}
+
+static unsigned int TruncToValidBPP(
+ double DecimalBPP,
+ bool DSCEnabled,
+ enum output_encoder_class Output,
+ enum output_format_class Format,
+ unsigned int DSCInputBitPerComponent)
+{
+ if (Output == dm_hdmi) {
+ if (Format == dm_420) {
+ if (DecimalBPP >= 18)
+ return 18;
+ else if (DecimalBPP >= 15)
+ return 15;
+ else if (DecimalBPP >= 12)
+ return 12;
+ else
+ return 0;
+ } else if (Format == dm_444) {
+ if (DecimalBPP >= 36)
+ return 36;
+ else if (DecimalBPP >= 30)
+ return 30;
+ else if (DecimalBPP >= 24)
+ return 24;
+ else
+ return 0;
+ } else {
+ if (DecimalBPP / 1.5 >= 24)
+ return 24;
+ else if (DecimalBPP / 1.5 >= 20)
+ return 20;
+ else if (DecimalBPP / 1.5 >= 16)
+ return 16;
+ else
+ return 0;
+ }
+ } else {
+ if (DSCEnabled) {
+ if (Format == dm_420) {
+ if (DecimalBPP < 6)
+ return 0;
+ else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
+ return 1.5 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ } else if (Format == dm_n422) {
+ if (DecimalBPP < 7)
+ return 0;
+ else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
+ return 2 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ } else {
+ if (DecimalBPP < 8)
+ return 0;
+ else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
+ return 3 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ }
+ } else if (Format == dm_420) {
+ if (DecimalBPP >= 18)
+ return 18;
+ else if (DecimalBPP >= 15)
+ return 15;
+ else if (DecimalBPP >= 12)
+ return 12;
+ else
+ return 0;
+ } else if (Format == dm_s422 || Format == dm_n422) {
+ if (DecimalBPP >= 24)
+ return 24;
+ else if (DecimalBPP >= 20)
+ return 20;
+ else if (DecimalBPP >= 16)
+ return 16;
+ else
+ return 0;
+ } else {
+ if (DecimalBPP >= 36)
+ return 36;
+ else if (DecimalBPP >= 30)
+ return 30;
+ else if (DecimalBPP >= 24)
+ return 24;
+ else
+ return 0;
+ }
+ }
+}
+
+static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
+{
+ int i;
+ unsigned int j, k;
+ /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+
+ /*Scale Ratio, taps Support Check*/
+
+ mode_lib->vba.ScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ScalerEnabled[k] == false
+ && ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
+ || mode_lib->vba.HRatio[k] != 1.0
+ || mode_lib->vba.htaps[k] != 1.0
+ || mode_lib->vba.VRatio[k] != 1.0
+ || mode_lib->vba.vtaps[k] != 1.0)) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ } else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
+ || mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
+ || (mode_lib->vba.htaps[k] > 1.0
+ && (mode_lib->vba.htaps[k] % 2) == 1)
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
+ || (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
+ && (mode_lib->vba.HRatio[k] / 2.0
+ > mode_lib->vba.HTAPsChroma[k]
+ || mode_lib->vba.VRatio[k] / 2.0
+ > mode_lib->vba.VTAPsChroma[k]))) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ }
+ }
+ /*Source Format, Pixel Format and Scan Support Check*/
+
+ mode_lib->vba.SourceFormatPixelAndScanSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ && mode_lib->vba.SourceScan[k] != dm_horz)
+ || ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
+ || (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
+ && (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10))
+ || (((mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_gfx7_2d_thin_gl
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_gfx7_2d_thin_lvp)
+ && !((mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_32)
+ && mode_lib->vba.SourceScan[k]
+ == dm_horz
+ && mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
+ == true
+ && mode_lib->vba.DCCEnable[k]
+ == false))
+ || (mode_lib->vba.DCCEnable[k] == true
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_linear
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10)))) {
+ mode_lib->vba.SourceFormatPixelAndScanSupport = false;
+ }
+ }
+ /*Bandwidth Support Check*/
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
+ } else {
+ mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
+ }
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 8.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 4.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 2.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 2.0;
+ } else {
+ mode_lib->vba.BytePerPixelInDETY[k] = 4.0 / 3;
+ mode_lib->vba.BytePerPixelInDETC[k] = 8.0 / 3;
+ }
+ }
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.SwathWidthYSingleDPP[k]
+ * (dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
+ * mode_lib->vba.VRatio[k]
+ + dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
+ / 2.0 * mode_lib->vba.VRatio[k] / 2)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 256);
+ }
+ if (mode_lib->vba.VirtualMemoryEnable == true
+ && mode_lib->vba.SourceScan[k] != dm_horz
+ && (mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x)) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 64);
+ } else if (mode_lib->vba.VirtualMemoryEnable == true
+ && mode_lib->vba.SourceScan[k] == dm_horz
+ && (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32)
+ && (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x)) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 256);
+ } else if (mode_lib->vba.VirtualMemoryEnable == true) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 512);
+ }
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond =
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
+ + mode_lib->vba.ReadBandwidth[k] / 1000.0;
+ }
+ mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 4.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 3.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 1.5;
+ } else {
+ mode_lib->vba.WriteBandwidth[k] = 0.0;
+ }
+ mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond =
+ mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond
+ + mode_lib->vba.WriteBandwidth[k] / 1000.0;
+ }
+ mode_lib->vba.TotalBandwidthConsumedGBytePerSecond =
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
+ + mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond;
+ mode_lib->vba.DCCEnabledInAnyPlane = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.DCCEnabledInAnyPlane = true;
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[i] = dml_min(
+ mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
+ * mode_lib->vba.DRAMChannelWidth,
+ mode_lib->vba.FabricClockPerState[i]
+ * mode_lib->vba.FabricDatapathToDCNDataReturn)
+ / 1000;
+ mode_lib->vba.ReturnBWToDCNPerState = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0)
+ * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
+ / 100;
+ mode_lib->vba.ReturnBWPerState[i] = mode_lib->vba.ReturnBWToDCNPerState;
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true
+ && mode_lib->vba.ReturnBWToDCNPerState
+ > mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.ReturnBWToDCNPerState * 4.0
+ * (1.0
+ - mode_lib->vba.UrgentLatency
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ - mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0)
+ + mode_lib->vba.UrgentLatency)));
+ }
+ mode_lib->vba.CriticalPoint =
+ 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0);
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
+ && mode_lib->vba.CriticalPoint < 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ dml_pow(
+ 4.0
+ * mode_lib->vba.ReturnBWToDCNPerState
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0),
+ 2));
+ }
+ mode_lib->vba.ReturnBWToDCNPerState = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0);
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true
+ && mode_lib->vba.ReturnBWToDCNPerState
+ > mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.ReturnBWToDCNPerState * 4.0
+ * (1.0
+ - mode_lib->vba.UrgentLatency
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ - mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0)
+ + mode_lib->vba.UrgentLatency)));
+ }
+ mode_lib->vba.CriticalPoint =
+ 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0);
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
+ && mode_lib->vba.CriticalPoint < 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ dml_pow(
+ 4.0
+ * mode_lib->vba.ReturnBWToDCNPerState
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0),
+ 2));
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ if ((mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond * 1000.0
+ <= mode_lib->vba.ReturnBWPerState[i])
+ && (mode_lib->vba.TotalBandwidthConsumedGBytePerSecond * 1000.0
+ <= mode_lib->vba.FabricAndDRAMBandwidthPerState[i]
+ * 1000.0
+ * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
+ / 100.0)) {
+ mode_lib->vba.BandwidthSupport[i] = true;
+ } else {
+ mode_lib->vba.BandwidthSupport[i] = false;
+ }
+ }
+ /*Writeback Latency support check*/
+
+ mode_lib->vba.WritebackLatencySupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ if (mode_lib->vba.WriteBandwidth[k]
+ > (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ + mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ } else {
+ if (mode_lib->vba.WriteBandwidth[k]
+ > 1.5
+ * dml_min(
+ mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ }
+ }
+ }
+ /*Re-ordering Buffer Support Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32.0)
+ / mode_lib->vba.DCFCLKPerState[i]
+ + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
+ * mode_lib->vba.NumberOfChannels
+ / mode_lib->vba.ReturnBWPerState[i];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0 / mode_lib->vba.ReturnBWPerState[i]
+ > mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
+ mode_lib->vba.ROBSupport[i] = true;
+ } else {
+ mode_lib->vba.ROBSupport[i] = false;
+ }
+ }
+ /*Writeback Mode Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveWriteback = 0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.TotalNumberOfActiveWriteback =
+ mode_lib->vba.TotalNumberOfActiveWriteback + 1;
+ }
+ }
+ mode_lib->vba.WritebackModeSupport = true;
+ if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.Writeback10bpc420Supported != true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ }
+ /*Writeback Scale Ratio and Taps Support Check*/
+
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
+ && (mode_lib->vba.WritebackHRatio[k] != 1.0
+ || mode_lib->vba.WritebackVRatio[k] != 1.0)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackMaxVSCLRatio
+ || mode_lib->vba.WritebackHRatio[k]
+ < mode_lib->vba.WritebackMinHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ < mode_lib->vba.WritebackMinVSCLRatio
+ || mode_lib->vba.WritebackLumaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackLumaHTaps[k]
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackLumaVTaps[k]
+ || (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
+ == 1))
+ || (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
+ && (mode_lib->vba.WritebackChromaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || 2.0
+ * mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackChromaHTaps[k]
+ || 2.0
+ * mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackChromaVTaps[k]
+ || (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
+ mode_lib->vba.WritebackLumaVExtra =
+ dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
+ } else {
+ mode_lib->vba.WritebackLumaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > (mode_lib->vba.WritebackLineBufferLumaBufferSize
+ + mode_lib->vba.WritebackLineBufferChromaBufferSize)
+ / 3.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
+ mode_lib->vba.WritebackChromaVExtra = 0.0;
+ } else {
+ mode_lib->vba.WritebackChromaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ }
+ }
+ /*Maximum DISPCLK/DPPCLK Support check*/
+
+ mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackRequiredDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackRequiredDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.HRatio[k] > 1.0) {
+ mode_lib->vba.PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.PSCL_FACTOR_CHROMA[k] = 0.0;
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max3(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_FACTOR[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
+ && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ } else {
+ if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
+ mode_lib->vba.PSCL_FACTOR_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2.0
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_FACTOR_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max5(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_FACTOR[k],
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2.0),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4.0
+ / mode_lib->vba.PSCL_FACTOR_CHROMA[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
+ || mode_lib->vba.HTAPsChroma[k] > 6.0
+ || mode_lib->vba.VTAPsChroma[k] > 6.0)
+ && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
+ dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
+ &mode_lib->vba.Read256BlockHeightY[k],
+ &mode_lib->vba.Read256BlockHeightC[k],
+ &mode_lib->vba.Read256BlockWidthY[k],
+ &mode_lib->vba.Read256BlockWidthC[k]);
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockHeightY[k];
+ mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockHeightC[k];
+ } else {
+ mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockWidthY[k];
+ mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockWidthC[k];
+ }
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ } else {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
+ / 2.0;
+ }
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
+ / 2.0;
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k]
+ / 2.0;
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ } else {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ }
+ }
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
+ } else {
+ mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
+ }
+ mode_lib->vba.MaximumSwathWidthInDETBuffer =
+ dml_min(
+ mode_lib->vba.MaximumSwathWidthSupport,
+ mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0
+ / (mode_lib->vba.BytePerPixelInDETY[k]
+ * mode_lib->vba.MinSwathHeightY[k]
+ + mode_lib->vba.BytePerPixelInDETC[k]
+ / 2.0
+ * mode_lib->vba.MinSwathHeightC[k]));
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ mode_lib->vba.LineBufferSize
+ * dml_max(mode_lib->vba.HRatio[k], 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0));
+ } else {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ dml_min(
+ mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0)),
+ 2.0 * mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2.0,
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.VTAPsChroma[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k]
+ / 2.0,
+ 1.0)
+ - 2,
+ 0.0)));
+ }
+ mode_lib->vba.MaximumSwathWidth[k] = dml_min(
+ mode_lib->vba.MaximumSwathWidthInDETBuffer,
+ mode_lib->vba.MaximumSwathWidthInLineBuffer);
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDppclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.RequiredDISPCLK[i] = 0.0;
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * (1.0
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100.0);
+ if (mode_lib->vba.ODMCapability == true
+ && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ / 2.0;
+ } else {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ }
+ if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ && mode_lib->vba.SwathWidthYSingleDPP[k]
+ <= mode_lib->vba.MaximumSwathWidth[k]
+ && mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
+ mode_lib->vba.NoOfDPP[i][k] = 1;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ } else {
+ mode_lib->vba.NoOfDPP[i][k] = 2;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ / 2.0;
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k] / mode_lib->vba.NoOfDPP[i][k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotalNumberOfActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ if ((mode_lib->vba.MaxDispclk[i] == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
+ && mode_lib->vba.MaxDppclk[i]
+ == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])
+ && (mode_lib->vba.TotalNumberOfActiveDPP[i]
+ > mode_lib->vba.MaxNumDPP
+ || mode_lib->vba.DISPCLK_DPPCLK_Support[i] == false)) {
+ mode_lib->vba.RequiredDISPCLK[i] = 0.0;
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ if (mode_lib->vba.ODMCapability == true
+ && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ / 2.0;
+ } else {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ }
+ if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ && mode_lib->vba.SwathWidthYSingleDPP[k]
+ <= mode_lib->vba.MaximumSwathWidth[k]
+ && mode_lib->vba.ODMCombineEnablePerState[i][k]
+ == false) {
+ mode_lib->vba.NoOfDPP[i][k] = 1;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ } else {
+ mode_lib->vba.NoOfDPP[i][k] = 2;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ / 2.0;
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.NoOfDPP[i][k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotalNumberOfActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ }
+ if (mode_lib->vba.TotalNumberOfActiveDPP[i] > mode_lib->vba.MaxNumDPP) {
+ mode_lib->vba.RequiredDISPCLK[i] = 0.0;
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
+ if (mode_lib->vba.SwathWidthYSingleDPP[k]
+ <= mode_lib->vba.MaximumSwathWidth[k]) {
+ mode_lib->vba.NoOfDPP[i][k] = 1;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ } else {
+ mode_lib->vba.NoOfDPP[i][k] = 2;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ / 2.0;
+ }
+ if (!(mode_lib->vba.MaxDispclk[i]
+ == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
+ && mode_lib->vba.MaxDppclk[i]
+ == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])) {
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * (1.0
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100.0);
+ } else {
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.NoOfDPP[i][k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotalNumberOfActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.WritebackRequiredDISPCLK);
+ if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
+ < mode_lib->vba.WritebackRequiredDISPCLK) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ /*Viewport Size Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.ViewportSizeSupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
+ if (dml_min(
+ mode_lib->vba.SwathWidthYSingleDPP[k],
+ dml_round(
+ mode_lib->vba.HActive[k] / 2.0
+ * mode_lib->vba.HRatio[k]))
+ > mode_lib->vba.MaximumSwathWidth[k]) {
+ mode_lib->vba.ViewportSizeSupport[i] = false;
+ }
+ } else {
+ if (mode_lib->vba.SwathWidthYSingleDPP[k] / 2.0
+ > mode_lib->vba.MaximumSwathWidth[k]) {
+ mode_lib->vba.ViewportSizeSupport[i] = false;
+ }
+ }
+ }
+ }
+ /*Total Available Pipes Support Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ if (mode_lib->vba.TotalNumberOfActiveDPP[i] <= mode_lib->vba.MaxNumDPP) {
+ mode_lib->vba.TotalAvailablePipesSupport[i] = true;
+ } else {
+ mode_lib->vba.TotalAvailablePipesSupport[i] = false;
+ }
+ }
+ /*Total Available OTG Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
+ + 1.0;
+ }
+ }
+ if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
+ mode_lib->vba.NumberOfOTGSupport = true;
+ } else {
+ mode_lib->vba.NumberOfOTGSupport = false;
+ }
+ /*Display IO and DSC Support Check*/
+
+ mode_lib->vba.NonsupportedDSCInputBPC = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
+ mode_lib->vba.NonsupportedDSCInputBPC = true;
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.RequiresDSC[i][k] = 0;
+ mode_lib->vba.RequiresFEC[i][k] = 0;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.Output[k] == dm_hdmi) {
+ mode_lib->vba.RequiresDSC[i][k] = 0;
+ mode_lib->vba.RequiresFEC[i][k] = 0;
+ mode_lib->vba.OutputBppPerState[i][k] =
+ TruncToValidBPP(
+ dml_min(
+ 600.0,
+ mode_lib->vba.PHYCLKPerState[i])
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 24,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ } else if (mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp) {
+ if (mode_lib->vba.Output[k] == dm_edp) {
+ mode_lib->vba.EffectiveFECOverhead = 0.0;
+ } else {
+ mode_lib->vba.EffectiveFECOverhead =
+ mode_lib->vba.FECOverhead;
+ }
+ if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
+ mode_lib->vba.Outbpp =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * (1.0
+ - mode_lib->vba.EffectiveFECOverhead
+ / 100.0)
+ * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ mode_lib->vba.RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ mode_lib->vba.RequiresFEC[i][k] =
+ true;
+ } else {
+ mode_lib->vba.RequiresFEC[i][k] =
+ false;
+ }
+ mode_lib->vba.Outbpp =
+ mode_lib->vba.OutbppDSC;
+ } else {
+ mode_lib->vba.RequiresDSC[i][k] = false;
+ mode_lib->vba.RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == 0) {
+ mode_lib->vba.Outbpp =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * (1.0
+ - mode_lib->vba.EffectiveFECOverhead
+ / 100.0)
+ * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ mode_lib->vba.RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ mode_lib->vba.RequiresFEC[i][k] =
+ true;
+ } else {
+ mode_lib->vba.RequiresFEC[i][k] =
+ false;
+ }
+ mode_lib->vba.Outbpp =
+ mode_lib->vba.OutbppDSC;
+ } else {
+ mode_lib->vba.RequiresDSC[i][k] = false;
+ mode_lib->vba.RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == 0
+ && mode_lib->vba.PHYCLKPerState[i]
+ >= 810.0) {
+ mode_lib->vba.Outbpp =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * (1.0
+ - mode_lib->vba.EffectiveFECOverhead
+ / 100.0)
+ * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true
+ || mode_lib->vba.Outbpp == 0) {
+ mode_lib->vba.RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ mode_lib->vba.RequiresFEC[i][k] =
+ true;
+ } else {
+ mode_lib->vba.RequiresFEC[i][k] =
+ false;
+ }
+ mode_lib->vba.Outbpp =
+ mode_lib->vba.OutbppDSC;
+ } else {
+ mode_lib->vba.RequiresDSC[i][k] = false;
+ mode_lib->vba.RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ }
+ } else {
+ mode_lib->vba.OutputBppPerState[i][k] = 0;
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.DIOSupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.OutputBppPerState[i][k] == 0
+ || (mode_lib->vba.OutputFormat[k] == dm_420
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP
+ == true)) {
+ mode_lib->vba.DIOSupport[i] = false;
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if ((mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp)) {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k]
+ == dm_n422) {
+ mode_lib->vba.DSCFormatFactor = 2;
+ } else {
+ mode_lib->vba.DSCFormatFactor = 1;
+ }
+ if (mode_lib->vba.RequiresDSC[i][k] == true) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k]
+ == true) {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 6.0
+ / mode_lib->vba.DSCFormatFactor
+ > (1.0
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * mode_lib->vba.MaxDSCCLK[i]) {
+ mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ } else {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 3.0
+ / mode_lib->vba.DSCFormatFactor
+ > (1.0
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * mode_lib->vba.MaxDSCCLK[i]) {
+ mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.NotEnoughDSCUnits[i] = false;
+ mode_lib->vba.TotalDSCUnitsRequired = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.RequiresDSC[i][k] == true) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 2.0;
+ } else {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 1.0;
+ }
+ }
+ }
+ if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
+ mode_lib->vba.NotEnoughDSCUnits[i] = true;
+ }
+ }
+ /*DSC Delay per state*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k) {
+ mode_lib->vba.slices = 0;
+ } else if (mode_lib->vba.RequiresDSC[i][k] == 0
+ || mode_lib->vba.RequiresDSC[i][k] == false) {
+ mode_lib->vba.slices = 0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
+ mode_lib->vba.slices = dml_ceil(
+ mode_lib->vba.PixelClockBackEnd[k] / 400.0,
+ 4.0);
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
+ mode_lib->vba.slices = 8.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
+ mode_lib->vba.slices = 4.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
+ mode_lib->vba.slices = 2.0;
+ } else {
+ mode_lib->vba.slices = 1.0;
+ }
+ if (mode_lib->vba.OutputBppPerState[i][k] == 0
+ || mode_lib->vba.OutputBppPerState[i][k] == 0) {
+ mode_lib->vba.bpp = 0.0;
+ } else {
+ mode_lib->vba.bpp = mode_lib->vba.OutputBppPerState[i][k];
+ }
+ if (mode_lib->vba.RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(
+ mode_lib->vba.HActive[k]
+ / mode_lib->vba.slices,
+ 1.0),
+ mode_lib->vba.slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ 2.0
+ * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(
+ mode_lib->vba.HActive[k]
+ / mode_lib->vba.slices,
+ 1.0),
+ mode_lib->vba.slices
+ / 2,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]));
+ }
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ mode_lib->vba.DSCDelayPerState[i][k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ mode_lib->vba.DSCDelayPerState[i][k] = 0.0;
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.RequiresDSC[i][j] == true) {
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ mode_lib->vba.DSCDelayPerState[i][j];
+ }
+ }
+ }
+ }
+ /*Urgent Latency Support Check*/
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
+ mode_lib->vba.SwathWidthYPerState[i][k] =
+ dml_min(
+ mode_lib->vba.SwathWidthYSingleDPP[k],
+ dml_round(
+ mode_lib->vba.HActive[k]
+ / 2.0
+ * mode_lib->vba.HRatio[k]));
+ } else {
+ mode_lib->vba.SwathWidthYPerState[i][k] =
+ mode_lib->vba.SwathWidthYSingleDPP[k]
+ / mode_lib->vba.NoOfDPP[i][k];
+ }
+ mode_lib->vba.SwathWidthGranularityY = 256.0
+ / dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
+ / mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY = (dml_ceil(
+ mode_lib->vba.SwathWidthYPerState[i][k] - 1.0,
+ mode_lib->vba.SwathWidthGranularityY)
+ + mode_lib->vba.SwathWidthGranularityY)
+ * mode_lib->vba.BytePerPixelInDETY[k]
+ * mode_lib->vba.MaxSwathHeightY[k];
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY = dml_ceil(
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY,
+ 256.0) + 256;
+ }
+ if (mode_lib->vba.MaxSwathHeightC[k] > 0.0) {
+ mode_lib->vba.SwathWidthGranularityC = 256.0
+ / dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
+ / mode_lib->vba.MaxSwathHeightC[k];
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = (dml_ceil(
+ mode_lib->vba.SwathWidthYPerState[i][k] / 2.0 - 1.0,
+ mode_lib->vba.SwathWidthGranularityC)
+ + mode_lib->vba.SwathWidthGranularityC)
+ * mode_lib->vba.BytePerPixelInDETC[k]
+ * mode_lib->vba.MaxSwathHeightC[k];
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = dml_ceil(
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC,
+ 256.0) + 256;
+ }
+ } else {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = 0.0;
+ }
+ if (mode_lib->vba.RoundedUpMaxSwathSizeBytesY
+ + mode_lib->vba.RoundedUpMaxSwathSizeBytesC
+ <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
+ mode_lib->vba.SwathHeightYPerState[i][k] =
+ mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.SwathHeightCPerState[i][k] =
+ mode_lib->vba.MaxSwathHeightC[k];
+ } else {
+ mode_lib->vba.SwathHeightYPerState[i][k] =
+ mode_lib->vba.MinSwathHeightY[k];
+ mode_lib->vba.SwathHeightCPerState[i][k] =
+ mode_lib->vba.MinSwathHeightC[k];
+ }
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / mode_lib->vba.BytePerPixelInDETY[k]
+ / mode_lib->vba.SwathWidthYPerState[i][k];
+ mode_lib->vba.LinesInDETChroma = 0.0;
+ } else if (mode_lib->vba.SwathHeightYPerState[i][k]
+ <= mode_lib->vba.SwathHeightCPerState[i][k]) {
+ mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETY[k]
+ / mode_lib->vba.SwathWidthYPerState[i][k];
+ mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETC[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
+ } else {
+ mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 * 2.0 / 3.0
+ / mode_lib->vba.BytePerPixelInDETY[k]
+ / mode_lib->vba.SwathWidthYPerState[i][k];
+ mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 3.0 / mode_lib->vba.BytePerPixelInDETY[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
+ }
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ dml_floor(
+ mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k]
+ / dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)),
+ 1.0))
+ - (mode_lib->vba.vtaps[k] - 1.0);
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ dml_floor(
+ mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k]
+ / 2.0
+ / dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2.0,
+ 1.0)),
+ 1.0))
+ - (mode_lib->vba.VTAPsChroma[k] - 1.0);
+ mode_lib->vba.EffectiveDETLBLinesLuma =
+ dml_floor(
+ mode_lib->vba.LinesInDETLuma
+ + dml_min(
+ mode_lib->vba.LinesInDETLuma
+ * mode_lib->vba.RequiredDISPCLK[i]
+ * mode_lib->vba.BytePerPixelInDETY[k]
+ * mode_lib->vba.PSCL_FACTOR[k]
+ / mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
+ mode_lib->vba.SwathHeightYPerState[i][k]);
+ mode_lib->vba.EffectiveDETLBLinesChroma =
+ dml_floor(
+ mode_lib->vba.LinesInDETChroma
+ + dml_min(
+ mode_lib->vba.LinesInDETChroma
+ * mode_lib->vba.RequiredDISPCLK[i]
+ * mode_lib->vba.BytePerPixelInDETC[k]
+ * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
+ / mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
+ mode_lib->vba.SwathHeightCPerState[i][k]);
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
+ mode_lib->vba.EffectiveDETLBLinesLuma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETLBLinesLuma
+ * mode_lib->vba.SwathWidthYPerState[i][k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / (mode_lib->vba.ReturnBWPerState[i]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
+ dml_min(
+ mode_lib->vba.EffectiveDETLBLinesLuma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETLBLinesLuma
+ * mode_lib->vba.SwathWidthYPerState[i][k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / (mode_lib->vba.ReturnBWPerState[i]
+ / mode_lib->vba.NoOfDPP[i][k]),
+ mode_lib->vba.EffectiveDETLBLinesChroma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k]
+ / 2.0)
+ - mode_lib->vba.EffectiveDETLBLinesChroma
+ * mode_lib->vba.SwathWidthYPerState[i][k]
+ / 2.0
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / (mode_lib->vba.ReturnBWPerState[i]
+ / mode_lib->vba.NoOfDPP[i][k]));
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.UrgentLatencySupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.UrgentLatencySupportUsPerState[i][k]
+ < mode_lib->vba.UrgentLatency / 1.0) {
+ mode_lib->vba.UrgentLatencySupport[i] = false;
+ }
+ }
+ }
+ /*Prefetch Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep = 8.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.PixelClock[k] / 16.0);
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ if (mode_lib->vba.VRatio[k] <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 64.0
+ * mode_lib->vba.HRatio[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 64.0
+ * mode_lib->vba.PSCL_FACTOR[k]
+ * mode_lib->vba.RequiredDPPCLK[i][k]);
+ }
+ } else {
+ if (mode_lib->vba.VRatio[k] <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 32.0
+ * mode_lib->vba.HRatio[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 32.0
+ * mode_lib->vba.PSCL_FACTOR[k]
+ * mode_lib->vba.RequiredDPPCLK[i][k]);
+ }
+ if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / 32.0
+ * mode_lib->vba.HRatio[k]
+ / 2.0
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / 32.0
+ * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
+ * mode_lib->vba.RequiredDPPCLK[i][k]);
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.Read256BlockHeightY[k],
+ mode_lib->vba.Read256BlockWidthY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ mode_lib->vba.SwathWidthYPerState[i][k],
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &mode_lib->vba.MetaRowBytesY,
+ &mode_lib->vba.DPTEBytesPerRowY,
+ &mode_lib->vba.PTEBufferSizeNotExceededY[i][k],
+ &mode_lib->vba.dpte_row_height[k],
+ &mode_lib->vba.meta_row_height[k]);
+ mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightYPerState[i][k],
+ mode_lib->vba.ViewportYStartY[k],
+ &mode_lib->vba.PrefillY[k],
+ &mode_lib->vba.MaxNumSwY[k]);
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.Read256BlockHeightY[k],
+ mode_lib->vba.Read256BlockWidthY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0,
+ mode_lib->vba.ViewportHeight[k] / 2.0,
+ mode_lib->vba.SwathWidthYPerState[i][k] / 2.0,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchC[k],
+ 0.0,
+ &mode_lib->vba.MacroTileWidthC[k],
+ &mode_lib->vba.MetaRowBytesC,
+ &mode_lib->vba.DPTEBytesPerRowC,
+ &mode_lib->vba.PTEBufferSizeNotExceededC[i][k],
+ &mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_height_chroma[k]);
+ mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k] / 2.0,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightCPerState[i][k],
+ mode_lib->vba.ViewportYStartC[k],
+ &mode_lib->vba.PrefillC[k],
+ &mode_lib->vba.MaxNumSwC[k]);
+ } else {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
+ mode_lib->vba.MetaRowBytesC = 0.0;
+ mode_lib->vba.DPTEBytesPerRowC = 0.0;
+ mode_lib->vba.PrefetchLinesC[k] = 0.0;
+ mode_lib->vba.PTEBufferSizeNotExceededC[i][k] = true;
+ }
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY
+ + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
+ mode_lib->vba.MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY
+ + mode_lib->vba.MetaRowBytesC;
+ mode_lib->vba.DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY
+ + mode_lib->vba.DPTEBytesPerRowC;
+ }
+ mode_lib->vba.ExtraLatency =
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]
+ + (mode_lib->vba.TotalNumberOfActiveDPP[i]
+ * mode_lib->vba.PixelChunkSizeInKByte
+ + mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
+ * mode_lib->vba.MetaChunkSize)
+ * 1024.0
+ / mode_lib->vba.ReturnBWPerState[i];
+ if (mode_lib->vba.VirtualMemoryEnable == true) {
+ mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ + mode_lib->vba.TotalNumberOfActiveDPP[i]
+ * mode_lib->vba.PTEChunkSize * 1024.0
+ / mode_lib->vba.ReturnBWPerState[i];
+ }
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackDelay[i][k] =
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k])
+ / mode_lib->vba.RequiredDISPCLK[i];
+ } else {
+ mode_lib->vba.WritebackDelay[i][k] = 0.0;
+ }
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[j] == k
+ && mode_lib->vba.WritebackEnable[j]
+ == true) {
+ mode_lib->vba.WritebackDelay[i][k] =
+ dml_max(
+ mode_lib->vba.WritebackDelay[i][k],
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[j],
+ mode_lib->vba.WritebackHRatio[j],
+ mode_lib->vba.WritebackVRatio[j],
+ mode_lib->vba.WritebackLumaHTaps[j],
+ mode_lib->vba.WritebackLumaVTaps[j],
+ mode_lib->vba.WritebackChromaHTaps[j],
+ mode_lib->vba.WritebackChromaVTaps[j],
+ mode_lib->vba.WritebackDestinationWidth[j])
+ / mode_lib->vba.RequiredDISPCLK[i]);
+ }
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j) {
+ mode_lib->vba.WritebackDelay[i][k] =
+ mode_lib->vba.WritebackDelay[i][j];
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.MaximumVStartup[k] =
+ mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ - dml_max(
+ 1.0,
+ dml_ceil(
+ mode_lib->vba.WritebackDelay[i][k]
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1.0));
+ }
+ mode_lib->vba.TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthYPerState[i][k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0),
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
+ }
+ mode_lib->vba.IsErrorResult[i][k] =
+ CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.RequiredDPPCLK[i][k],
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.DSCDelayPerState[i][k],
+ mode_lib->vba.NoOfDPP[i][k],
+ mode_lib->vba.ScalerEnabled[k],
+ mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ mode_lib->vba.SwathWidthYPerState[i][k]
+ / mode_lib->vba.HRatio[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ mode_lib->vba.MaximumVStartup[k],
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
+ mode_lib->vba.MetaRowBytes[k],
+ mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.PrefetchLinesY[k],
+ mode_lib->vba.SwathWidthYPerState[i][k],
+ mode_lib->vba.BytePerPixelInDETY[k],
+ mode_lib->vba.PrefillY[k],
+ mode_lib->vba.MaxNumSwY[k],
+ mode_lib->vba.PrefetchLinesC[k],
+ mode_lib->vba.BytePerPixelInDETC[k],
+ mode_lib->vba.PrefillC[k],
+ mode_lib->vba.MaxNumSwC[k],
+ mode_lib->vba.SwathHeightYPerState[i][k],
+ mode_lib->vba.SwathHeightCPerState[i][k],
+ mode_lib->vba.TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.DSTXAfterScaler,
+ mode_lib->vba.DSTYAfterScaler,
+ &mode_lib->vba.LineTimesForPrefetch[k],
+ &mode_lib->vba.PrefetchBW[k],
+ &mode_lib->vba.LinesForMetaPTE[k],
+ &mode_lib->vba.LinesForMetaAndDPTERow[k],
+ &mode_lib->vba.VRatioPreY[i][k],
+ &mode_lib->vba.VRatioPreC[i][k],
+ &mode_lib->vba.RequiredPrefetchPixelDataBW[i][k],
+ &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &mode_lib->vba.Tno_bw[k],
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.cursor_bw[k] = mode_lib->vba.NumberOfCursors[k]
+ * mode_lib->vba.CursorWidth[k][0]
+ * mode_lib->vba.CursorBPP[k][0] / 8.0
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ }
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
+ mode_lib->vba.prefetch_vm_bw_valid = true;
+ mode_lib->vba.prefetch_row_bw_valid = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] == 0.0) {
+ mode_lib->vba.prefetch_vm_bw[k] = 0.0;
+ } else if (mode_lib->vba.LinesForMetaPTE[k] > 0.0) {
+ mode_lib->vba.prefetch_vm_bw[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
+ / (mode_lib->vba.LinesForMetaPTE[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_vm_bw[k] = 0.0;
+ mode_lib->vba.prefetch_vm_bw_valid = false;
+ }
+ if (mode_lib->vba.MetaRowBytes[k] + mode_lib->vba.DPTEBytesPerRow[k]
+ == 0.0) {
+ mode_lib->vba.prefetch_row_bw[k] = 0.0;
+ } else if (mode_lib->vba.LinesForMetaAndDPTERow[k] > 0.0) {
+ mode_lib->vba.prefetch_row_bw[k] = (mode_lib->vba.MetaRowBytes[k]
+ + mode_lib->vba.DPTEBytesPerRow[k])
+ / (mode_lib->vba.LinesForMetaAndDPTERow[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_row_bw[k] = 0.0;
+ mode_lib->vba.prefetch_row_bw_valid = false;
+ }
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch =
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max4(
+ mode_lib->vba.prefetch_vm_bw[k],
+ mode_lib->vba.prefetch_row_bw[k],
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]);
+ }
+ mode_lib->vba.PrefetchSupported[i] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ > mode_lib->vba.ReturnBWPerState[i]
+ || mode_lib->vba.prefetch_vm_bw_valid == false
+ || mode_lib->vba.prefetch_row_bw_valid == false) {
+ mode_lib->vba.PrefetchSupported[i] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.LineTimesForPrefetch[k] < 2.0
+ || mode_lib->vba.LinesForMetaPTE[k] >= 8.0
+ || mode_lib->vba.LinesForMetaAndDPTERow[k] >= 16.0
+ || mode_lib->vba.IsErrorResult[i][k] == true) {
+ mode_lib->vba.PrefetchSupported[i] = false;
+ }
+ }
+ mode_lib->vba.VRatioInPrefetchSupported[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.VRatioPreY[i][k] > 4.0
+ || mode_lib->vba.VRatioPreC[i][k] > 4.0
+ || mode_lib->vba.IsErrorResult[i][k] == true) {
+ mode_lib->vba.VRatioInPrefetchSupported[i] = false;
+ }
+ }
+ if (mode_lib->vba.PrefetchSupported[i] == true
+ && mode_lib->vba.VRatioInPrefetchSupported[i] == true) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.ReturnBWPerState[i];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - mode_lib->vba.cursor_bw[k]
+ - dml_max(
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.PrefetchBW[k]);
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ImmediateFlipBytes[k] = 0.0;
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.ImmediateFlipBytes[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
+ + mode_lib->vba.MetaRowBytes[k]
+ + mode_lib->vba.DPTEBytesPerRow[k];
+ }
+ }
+ mode_lib->vba.TotImmediateFlipBytes = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.TotImmediateFlipBytes =
+ mode_lib->vba.TotImmediateFlipBytes
+ + mode_lib->vba.ImmediateFlipBytes[k];
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.ImmediateFlipBytes[k],
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.Tno_bw[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
+ mode_lib->vba.MetaRowBytes[k],
+ mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.qual_row_bw[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
+ &mode_lib->vba.final_flip_bw[k],
+ &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
+ }
+ mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.total_dcn_read_bw_with_flip =
+ mode_lib->vba.total_dcn_read_bw_with_flip
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max3(
+ mode_lib->vba.prefetch_vm_bw[k],
+ mode_lib->vba.prefetch_row_bw[k],
+ mode_lib->vba.final_flip_bw[k]
+ + dml_max(
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]));
+ }
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = true;
+ if (mode_lib->vba.total_dcn_read_bw_with_flip
+ > mode_lib->vba.ReturnBWPerState[i]) {
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
+ }
+ }
+ } else {
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
+ }
+ }
+ /*PTE Buffer Size Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.PTEBufferSizeNotExceeded[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.PTEBufferSizeNotExceededY[i][k] == false
+ || mode_lib->vba.PTEBufferSizeNotExceededC[i][k] == false) {
+ mode_lib->vba.PTEBufferSizeNotExceeded[i] = false;
+ }
+ }
+ }
+ /*Cursor Support Check*/
+
+ mode_lib->vba.CursorSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.CursorWidth[k][0] > 0.0) {
+ if (dml_floor(
+ dml_floor(
+ mode_lib->vba.CursorBufferSize
+ - mode_lib->vba.CursorChunkSize,
+ mode_lib->vba.CursorChunkSize) * 1024.0
+ / (mode_lib->vba.CursorWidth[k][0]
+ * mode_lib->vba.CursorBPP[k][0]
+ / 8.0),
+ 1.0)
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatency
+ || (mode_lib->vba.CursorBPP[k][0] == 64.0
+ && mode_lib->vba.Cursor64BppSupport == false)) {
+ mode_lib->vba.CursorSupport = false;
+ }
+ }
+ }
+ /*Valid Pitch Check*/
+
+ mode_lib->vba.PitchSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.AlignedYPitch[k] = dml_ceil(
+ dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
+ mode_lib->vba.MacroTileWidthY[k]);
+ if (mode_lib->vba.AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.AlignedDCCMetaPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.DCCMetaPitchY[k],
+ mode_lib->vba.ViewportWidth[k]),
+ 64.0 * mode_lib->vba.Read256BlockWidthY[k]);
+ } else {
+ mode_lib->vba.AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
+ }
+ if (mode_lib->vba.AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
+ mode_lib->vba.AlignedCPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.PitchC[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0),
+ mode_lib->vba.MacroTileWidthC[k]);
+ } else {
+ mode_lib->vba.AlignedCPitch[k] = mode_lib->vba.PitchC[k];
+ }
+ if (mode_lib->vba.AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ }
+ /*Mode Support, Voltage State and SOC Configuration*/
+
+ for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
+ if (mode_lib->vba.ScaleRatioAndTapsSupport == true
+ && mode_lib->vba.SourceFormatPixelAndScanSupport == true
+ && mode_lib->vba.ViewportSizeSupport[i] == true
+ && mode_lib->vba.BandwidthSupport[i] == true
+ && mode_lib->vba.DIOSupport[i] == true
+ && mode_lib->vba.NotEnoughDSCUnits[i] == false
+ && mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] == false
+ && mode_lib->vba.UrgentLatencySupport[i] == true
+ && mode_lib->vba.ROBSupport[i] == true
+ && mode_lib->vba.DISPCLK_DPPCLK_Support[i] == true
+ && mode_lib->vba.TotalAvailablePipesSupport[i] == true
+ && mode_lib->vba.NumberOfOTGSupport == true
+ && mode_lib->vba.WritebackModeSupport == true
+ && mode_lib->vba.WritebackLatencySupport == true
+ && mode_lib->vba.WritebackScaleRatioAndTapsSupport == true
+ && mode_lib->vba.CursorSupport == true
+ && mode_lib->vba.PitchSupport == true
+ && mode_lib->vba.PrefetchSupported[i] == true
+ && mode_lib->vba.VRatioInPrefetchSupported[i] == true
+ && mode_lib->vba.PTEBufferSizeNotExceeded[i] == true
+ && mode_lib->vba.NonsupportedDSCInputBPC == false) {
+ mode_lib->vba.ModeSupport[i] = true;
+ } else {
+ mode_lib->vba.ModeSupport[i] = false;
+ }
+ }
+ for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
+ if (i == DC__VOLTAGE_STATES || mode_lib->vba.ModeSupport[i] == true) {
+ mode_lib->vba.VoltageLevel = i;
+ }
+ }
+ mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.FabricAndDRAMBandwidth =
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ImmediateFlipSupport =
+ mode_lib->vba.ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.DPPPerPlane[k] = mode_lib->vba.NoOfDPP[mode_lib->vba.VoltageLevel][k];
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.ODMCombineEnabled[k] =
+ mode_lib->vba.ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
+ } else {
+ mode_lib->vba.ODMCombineEnabled[k] = 0;
+ }
+ mode_lib->vba.DSCEnabled[k] =
+ mode_lib->vba.RequiresDSC[mode_lib->vba.VoltageLevel][k];
+ mode_lib->vba.OutputBpp[k] =
+ mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k];
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
new file mode 100644
index 000000000000..4112409cd974
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -0,0 +1,598 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML2_DISPLAY_MODE_VBA_H__
+#define __DML2_DISPLAY_MODE_VBA_H__
+
+#include "dml_common_defs.h"
+
+struct display_mode_lib;
+
+void set_prefetch_mode(struct display_mode_lib *mode_lib,
+ bool cstate_en,
+ bool pstate_en,
+ bool ignore_viewport_pos,
+ bool immediate_flip_support);
+
+#define dml_get_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes)
+
+dml_get_attr_decl(clk_dcf_deepsleep);
+dml_get_attr_decl(wm_urgent);
+dml_get_attr_decl(wm_memory_trip);
+dml_get_attr_decl(wm_writeback_urgent);
+dml_get_attr_decl(wm_stutter_exit);
+dml_get_attr_decl(wm_stutter_enter_exit);
+dml_get_attr_decl(wm_dram_clock_change);
+dml_get_attr_decl(wm_writeback_dram_clock_change);
+dml_get_attr_decl(wm_xfc_underflow);
+dml_get_attr_decl(stutter_efficiency_no_vblank);
+dml_get_attr_decl(stutter_efficiency);
+dml_get_attr_decl(urgent_latency);
+dml_get_attr_decl(urgent_extra_latency);
+dml_get_attr_decl(nonurgent_latency);
+dml_get_attr_decl(dram_clock_change_latency);
+dml_get_attr_decl(dispclk_calculated);
+dml_get_attr_decl(total_data_read_bw);
+dml_get_attr_decl(return_bw);
+dml_get_attr_decl(tcalc);
+
+#define dml_get_pipe_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe)
+
+dml_get_pipe_attr_decl(dsc_delay);
+dml_get_pipe_attr_decl(dppclk_calculated);
+dml_get_pipe_attr_decl(dscclk_calculated);
+dml_get_pipe_attr_decl(min_ttu_vblank);
+dml_get_pipe_attr_decl(vratio_prefetch_l);
+dml_get_pipe_attr_decl(vratio_prefetch_c);
+dml_get_pipe_attr_decl(dst_x_after_scaler);
+dml_get_pipe_attr_decl(dst_y_after_scaler);
+dml_get_pipe_attr_decl(dst_y_per_vm_vblank);
+dml_get_pipe_attr_decl(dst_y_per_row_vblank);
+dml_get_pipe_attr_decl(dst_y_prefetch);
+dml_get_pipe_attr_decl(dst_y_per_vm_flip);
+dml_get_pipe_attr_decl(dst_y_per_row_flip);
+dml_get_pipe_attr_decl(xfc_transfer_delay);
+dml_get_pipe_attr_decl(xfc_precharge_delay);
+dml_get_pipe_attr_decl(xfc_remote_surface_flip_latency);
+dml_get_pipe_attr_decl(xfc_prefetch_margin);
+
+unsigned int get_vstartup_calculated(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes,
+ unsigned int which_pipe);
+
+double get_total_immediate_flip_bytes(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+double get_total_immediate_flip_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+double get_total_prefetch_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+
+unsigned int dml_get_voltage_level(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+
+bool Calculate256BBlockSizes(
+ enum source_format_class SourcePixelFormat,
+ enum dm_swizzle_mode SurfaceTiling,
+ unsigned int BytePerPixelY,
+ unsigned int BytePerPixelC,
+ unsigned int *BlockHeight256BytesY,
+ unsigned int *BlockHeight256BytesC,
+ unsigned int *BlockWidth256BytesY,
+ unsigned int *BlockWidth256BytesC);
+
+
+struct vba_vars_st {
+ ip_params_st ip;
+ soc_bounding_box_st soc;
+
+ unsigned int MaximumMaxVStartupLines;
+ double cursor_bw[DC__NUM_DPP__MAX];
+ double meta_row_bw[DC__NUM_DPP__MAX];
+ double dpte_row_bw[DC__NUM_DPP__MAX];
+ double qual_row_bw[DC__NUM_DPP__MAX];
+ double WritebackDISPCLK;
+ double PSCL_THROUGHPUT_LUMA[DC__NUM_DPP__MAX];
+ double PSCL_THROUGHPUT_CHROMA[DC__NUM_DPP__MAX];
+ double DPPCLKUsingSingleDPPLuma;
+ double DPPCLKUsingSingleDPPChroma;
+ double DPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
+ double DISPCLKWithRamping;
+ double DISPCLKWithoutRamping;
+ double GlobalDPPCLK;
+ double DISPCLKWithRampingRoundedToDFSGranularity;
+ double DISPCLKWithoutRampingRoundedToDFSGranularity;
+ double MaxDispclkRoundedToDFSGranularity;
+ bool DCCEnabledAnyPlane;
+ double ReturnBandwidthToDCN;
+ unsigned int SwathWidthY[DC__NUM_DPP__MAX];
+ unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
+ double BytePerPixelDETY[DC__NUM_DPP__MAX];
+ double BytePerPixelDETC[DC__NUM_DPP__MAX];
+ double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX];
+ double ReadBandwidthPlaneChroma[DC__NUM_DPP__MAX];
+ unsigned int TotalActiveDPP;
+ unsigned int TotalDCCActiveDPP;
+ double UrgentRoundTripAndOutOfOrderLatency;
+ double DisplayPipeLineDeliveryTimeLuma[DC__NUM_DPP__MAX]; // WM
+ double DisplayPipeLineDeliveryTimeChroma[DC__NUM_DPP__MAX]; // WM
+ double LinesInDETY[DC__NUM_DPP__MAX]; // WM
+ double LinesInDETC[DC__NUM_DPP__MAX]; // WM
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
+ unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
+ double FullDETBufferingTimeY[DC__NUM_DPP__MAX]; // WM
+ double FullDETBufferingTimeC[DC__NUM_DPP__MAX]; // WM
+ double MinFullDETBufferingTime;
+ double FrameTimeForMinFullDETBufferingTime;
+ double AverageReadBandwidthGBytePerSecond;
+ double PartOfBurstThatFitsInROB;
+ double StutterBurstTime;
+ //unsigned int NextPrefetchMode;
+ double VBlankTime;
+ double SmallestVBlank;
+ double DCFCLKDeepSleepPerPlane;
+ double EffectiveDETPlusLBLinesLuma;
+ double EffectiveDETPlusLBLinesChroma;
+ double UrgentLatencySupportUsLuma;
+ double UrgentLatencySupportUsChroma;
+ double UrgentLatencySupportUs[DC__NUM_DPP__MAX];
+ unsigned int DSCFormatFactor;
+ unsigned int BlockHeight256BytesY[DC__NUM_DPP__MAX];
+ unsigned int BlockHeight256BytesC[DC__NUM_DPP__MAX];
+ unsigned int BlockWidth256BytesY[DC__NUM_DPP__MAX];
+ unsigned int BlockWidth256BytesC[DC__NUM_DPP__MAX];
+ double VInitPreFillY[DC__NUM_DPP__MAX];
+ double VInitPreFillC[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwathY[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwathC[DC__NUM_DPP__MAX];
+ double PrefetchSourceLinesY[DC__NUM_DPP__MAX];
+ double PrefetchSourceLinesC[DC__NUM_DPP__MAX];
+ double PixelPTEBytesPerRow[DC__NUM_DPP__MAX];
+ double MetaRowByte[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_chroma[DC__NUM_DPP__MAX];
+ unsigned int meta_row_height[DC__NUM_DPP__MAX];
+ unsigned int meta_row_height_chroma[DC__NUM_DPP__MAX];
+
+ unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
+ unsigned int MaxVStartupLines[DC__NUM_DPP__MAX];
+ double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PrefetchModeSupported;
+ bool AllowDRAMClockChangeDuringVBlank[DC__NUM_DPP__MAX];
+ bool AllowDRAMSelfRefreshDuringVBlank[DC__NUM_DPP__MAX];
+ double RequiredPrefetchPixDataBW[DC__NUM_DPP__MAX];
+ double XFCRemoteSurfaceFlipDelay;
+ double TInitXFill;
+ double TslvChk;
+ double SrcActiveDrainRate;
+ double Tno_bw[DC__NUM_DPP__MAX];
+ bool ImmediateFlipSupported;
+
+ double prefetch_vm_bw[DC__NUM_DPP__MAX];
+ double prefetch_row_bw[DC__NUM_DPP__MAX];
+ bool ImmediateFlipSupportedForPipe[DC__NUM_DPP__MAX];
+ unsigned int VStartupLines;
+ double DisplayPipeLineDeliveryTimeLumaPrefetch[DC__NUM_DPP__MAX];
+ double DisplayPipeLineDeliveryTimeChromaPrefetch[DC__NUM_DPP__MAX];
+ unsigned int ActiveDPPs;
+ unsigned int LBLatencyHidingSourceLinesY;
+ unsigned int LBLatencyHidingSourceLinesC;
+ double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX];
+ double MinActiveDRAMClockChangeMargin;
+ double XFCSlaveVUpdateOffset[DC__NUM_DPP__MAX];
+ double XFCSlaveVupdateWidth[DC__NUM_DPP__MAX];
+ double XFCSlaveVReadyOffset[DC__NUM_DPP__MAX];
+ double InitFillLevel;
+ double FinalFillMargin;
+ double FinalFillLevel;
+ double RemainingFillLevel;
+ double TFinalxFill;
+
+
+ //
+ // SOC Bounding Box Parameters
+ //
+ double SRExitTime;
+ double SREnterPlusExitTime;
+ double UrgentLatency;
+ double WritebackLatency;
+ double PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency;
+ double NumberOfChannels;
+ double DRAMChannelWidth;
+ double FabricDatapathToDCNDataReturn;
+ double ReturnBusWidth;
+ double Downspreading;
+ double DISPCLKDPPCLKDSCCLKDownSpreading;
+ double DISPCLKDPPCLKVCOSpeed;
+ double RoundTripPingLatencyCycles;
+ double UrgentOutOfOrderReturnPerChannel;
+ unsigned int VMMPageSize;
+ double DRAMClockChangeLatency;
+ double XFCBusTransportTime;
+ double XFCXBUFLatencyTolerance;
+
+ //
+ // IP Parameters
+ //
+ unsigned int ROBBufferSizeInKByte;
+ double DETBufferSizeInKByte;
+ unsigned int DPPOutputBufferPixels;
+ unsigned int OPPOutputBufferLines;
+ unsigned int PixelChunkSizeInKByte;
+ double ReturnBW;
+ bool VirtualMemoryEnable;
+ unsigned int MaxPageTableLevels;
+ unsigned int OverridePageTableLevels;
+ unsigned int PTEChunkSize;
+ unsigned int MetaChunkSize;
+ unsigned int WritebackChunkSize;
+ bool ODMCapability;
+ unsigned int NumberOfDSC;
+ unsigned int LineBufferSize;
+ unsigned int MaxLineBufferLines;
+ unsigned int WritebackInterfaceLumaBufferSize;
+ unsigned int WritebackInterfaceChromaBufferSize;
+ unsigned int WritebackChromaLineBufferWidth;
+ double MaxDCHUBToPSCLThroughput;
+ double MaxPSCLToLBThroughput;
+ unsigned int PTEBufferSizeInRequests;
+ double DISPCLKRampingMargin;
+ unsigned int MaxInterDCNTileRepeaters;
+ bool XFCSupported;
+ double XFCSlvChunkSize;
+ double XFCFillBWOverhead;
+ double XFCFillConstant;
+ double XFCTSlvVupdateOffset;
+ double XFCTSlvVupdateWidth;
+ double XFCTSlvVreadyOffset;
+ double DPPCLKDelaySubtotal;
+ double DPPCLKDelaySCL;
+ double DPPCLKDelaySCLLBOnly;
+ double DPPCLKDelayCNVCFormater;
+ double DPPCLKDelayCNVCCursor;
+ double DISPCLKDelaySubtotal;
+ bool ProgressiveToInterlaceUnitInOPP;
+ unsigned int PDEProcessingBufIn64KBReqs;
+
+ // Pipe/Plane Parameters
+ int VoltageLevel;
+ double FabricAndDRAMBandwidth;
+ double FabricClock;
+ double DRAMSpeed;
+ double DISPCLK;
+ double SOCCLK;
+ double DCFCLK;
+
+ unsigned int NumberOfActivePlanes;
+ unsigned int ViewportWidth[DC__NUM_DPP__MAX];
+ unsigned int ViewportHeight[DC__NUM_DPP__MAX];
+ unsigned int ViewportYStartY[DC__NUM_DPP__MAX];
+ unsigned int ViewportYStartC[DC__NUM_DPP__MAX];
+ unsigned int PitchY[DC__NUM_DPP__MAX];
+ unsigned int PitchC[DC__NUM_DPP__MAX];
+ double HRatio[DC__NUM_DPP__MAX];
+ double VRatio[DC__NUM_DPP__MAX];
+ unsigned int htaps[DC__NUM_DPP__MAX];
+ unsigned int vtaps[DC__NUM_DPP__MAX];
+ unsigned int HTAPsChroma[DC__NUM_DPP__MAX];
+ unsigned int VTAPsChroma[DC__NUM_DPP__MAX];
+ unsigned int HTotal[DC__NUM_DPP__MAX];
+ unsigned int VTotal[DC__NUM_DPP__MAX];
+ unsigned int DPPPerPlane[DC__NUM_DPP__MAX];
+ double PixelClock[DC__NUM_DPP__MAX];
+ double PixelClockBackEnd[DC__NUM_DPP__MAX];
+ double DPPCLK[DC__NUM_DPP__MAX];
+ bool DCCEnable[DC__NUM_DPP__MAX];
+ unsigned int DCCMetaPitchY[DC__NUM_DPP__MAX];
+ enum scan_direction_class SourceScan[DC__NUM_DPP__MAX];
+ enum source_format_class SourcePixelFormat[DC__NUM_DPP__MAX];
+ bool WritebackEnable[DC__NUM_DPP__MAX];
+ double WritebackDestinationWidth[DC__NUM_DPP__MAX];
+ double WritebackDestinationHeight[DC__NUM_DPP__MAX];
+ double WritebackSourceHeight[DC__NUM_DPP__MAX];
+ enum source_format_class WritebackPixelFormat[DC__NUM_DPP__MAX];
+ unsigned int WritebackLumaHTaps[DC__NUM_DPP__MAX];
+ unsigned int WritebackLumaVTaps[DC__NUM_DPP__MAX];
+ unsigned int WritebackChromaHTaps[DC__NUM_DPP__MAX];
+ unsigned int WritebackChromaVTaps[DC__NUM_DPP__MAX];
+ double WritebackHRatio[DC__NUM_DPP__MAX];
+ double WritebackVRatio[DC__NUM_DPP__MAX];
+ unsigned int HActive[DC__NUM_DPP__MAX];
+ unsigned int VActive[DC__NUM_DPP__MAX];
+ bool Interlace[DC__NUM_DPP__MAX];
+ enum dm_swizzle_mode SurfaceTiling[DC__NUM_DPP__MAX];
+ unsigned int ScalerRecoutWidth[DC__NUM_DPP__MAX];
+ bool DynamicMetadataEnable[DC__NUM_DPP__MAX];
+ unsigned int DynamicMetadataLinesBeforeActiveRequired[DC__NUM_DPP__MAX];
+ unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];
+ double DCCRate[DC__NUM_DPP__MAX];
+ bool ODMCombineEnabled[DC__NUM_DPP__MAX];
+ double OutputBpp[DC__NUM_DPP__MAX];
+ unsigned int NumberOfDSCSlices[DC__NUM_DPP__MAX];
+ bool DSCEnabled[DC__NUM_DPP__MAX];
+ unsigned int DSCDelay[DC__NUM_DPP__MAX];
+ unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
+ enum output_format_class OutputFormat[DC__NUM_DPP__MAX];
+ enum output_encoder_class Output[DC__NUM_DPP__MAX];
+ unsigned int BlendingAndTiming[DC__NUM_DPP__MAX];
+ bool SynchronizedVBlank;
+ unsigned int NumberOfCursors[DC__NUM_DPP__MAX];
+ unsigned int CursorWidth[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
+ unsigned int CursorBPP[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
+ bool XFCEnabled[DC__NUM_DPP__MAX];
+ bool ScalerEnabled[DC__NUM_DPP__MAX];
+
+ // Intermediates/Informational
+ bool ImmediateFlipSupport;
+ unsigned int SwathHeightY[DC__NUM_DPP__MAX];
+ unsigned int SwathHeightC[DC__NUM_DPP__MAX];
+ unsigned int DETBufferSizeY[DC__NUM_DPP__MAX];
+ unsigned int DETBufferSizeC[DC__NUM_DPP__MAX];
+ unsigned int LBBitPerPixel[DC__NUM_DPP__MAX];
+ double LastPixelOfLineExtraWatermark;
+ double TotalDataReadBandwidth;
+ unsigned int TotalActiveWriteback;
+ unsigned int EffectiveLBLatencyHidingSourceLinesLuma;
+ unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
+ double BandwidthAvailableForImmediateFlip;
+ unsigned int PrefetchMode;
+ bool IgnoreViewportPositioning;
+ double PrefetchBandwidth[DC__NUM_DPP__MAX];
+ bool ErrorResult[DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesFrame[DC__NUM_DPP__MAX];
+
+ //
+ // Calculated dml_ml->vba.Outputs
+ //
+ double DCFClkDeepSleep;
+ double UrgentWatermark;
+ double UrgentExtraLatency;
+ double MemoryTripWatermark;
+ double WritebackUrgentWatermark;
+ double StutterExitWatermark;
+ double StutterEnterPlusExitWatermark;
+ double DRAMClockChangeWatermark;
+ double WritebackDRAMClockChangeWatermark;
+ double StutterEfficiency;
+ double StutterEfficiencyNotIncludingVBlank;
+ double MinUrgentLatencySupportUs;
+ double NonUrgentLatencyTolerance;
+ double MinActiveDRAMClockChangeLatencySupported;
+ enum clock_change_support DRAMClockChangeSupport;
+
+ // These are the clocks calcuated by the library but they are not actually
+ // used explicitly. They are fetched by tests and then possibly used. The
+ // ultimate values to use are the ones specified by the parameters to DML
+ double DISPCLK_calculated;
+ double DSCCLK_calculated[DC__NUM_DPP__MAX];
+ double DPPCLK_calculated[DC__NUM_DPP__MAX];
+
+ unsigned int VStartup[DC__NUM_DPP__MAX];
+ unsigned int VUpdateOffsetPix[DC__NUM_DPP__MAX];
+ unsigned int VUpdateWidthPix[DC__NUM_DPP__MAX];
+ unsigned int VReadyOffsetPix[DC__NUM_DPP__MAX];
+ unsigned int VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
+
+ double ImmediateFlipBW;
+ unsigned int TotImmediateFlipBytes;
+ double TCalc;
+ double MinTTUVBlank[DC__NUM_DPP__MAX];
+ double VRatioPrefetchY[DC__NUM_DPP__MAX];
+ double VRatioPrefetchC[DC__NUM_DPP__MAX];
+ double DSTXAfterScaler[DC__NUM_DPP__MAX];
+ double DSTYAfterScaler[DC__NUM_DPP__MAX];
+
+ double DestinationLinesToRequestVMInVBlank[DC__NUM_DPP__MAX];
+ double DestinationLinesToRequestRowInVBlank[DC__NUM_DPP__MAX];
+ double DestinationLinesForPrefetch[DC__NUM_DPP__MAX];
+ double DestinationLinesToRequestRowInImmediateFlip[DC__NUM_DPP__MAX];
+ double DestinationLinesToRequestVMInImmediateFlip[DC__NUM_DPP__MAX];
+
+ double XFCTransferDelay[DC__NUM_DPP__MAX];
+ double XFCPrechargeDelay[DC__NUM_DPP__MAX];
+ double XFCRemoteSurfaceFlipLatency[DC__NUM_DPP__MAX];
+ double XFCPrefetchMargin[DC__NUM_DPP__MAX];
+
+ display_e2e_pipe_params_st cache_pipes[DC__NUM_DPP__MAX];
+ unsigned int cache_num_pipes;
+ unsigned int pipe_plane[DC__NUM_DPP__MAX];
+
+ /* vba mode support */
+ /*inputs*/
+ bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
+ double MaxHSCLRatio;
+ double MaxVSCLRatio;
+ unsigned int MaxNumWriteback;
+ bool WritebackLumaAndChromaScalingSupported;
+ bool Cursor64BppSupport;
+ double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
+ double FabricClockPerState[DC__VOLTAGE_STATES + 1];
+ double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
+ double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
+ double MaxDppclk[DC__VOLTAGE_STATES + 1];
+ double MaxDSCCLK[DC__VOLTAGE_STATES + 1];
+ double DRAMSpeedPerState[DC__VOLTAGE_STATES + 1];
+ double MaxDispclk[DC__VOLTAGE_STATES + 1];
+
+ /*outputs*/
+ bool ScaleRatioAndTapsSupport;
+ bool SourceFormatPixelAndScanSupport;
+ unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
+ double BytePerPixelInDETY[DC__NUM_DPP__MAX];
+ double BytePerPixelInDETC[DC__NUM_DPP__MAX];
+ double TotalReadBandwidthConsumedGBytePerSecond;
+ double ReadBandwidth[DC__NUM_DPP__MAX];
+ double TotalWriteBandwidthConsumedGBytePerSecond;
+ double WriteBandwidth[DC__NUM_DPP__MAX];
+ double TotalBandwidthConsumedGBytePerSecond;
+ bool DCCEnabledInAnyPlane;
+ bool WritebackLatencySupport;
+ bool WritebackModeSupport;
+ bool Writeback10bpc420Supported;
+ bool BandwidthSupport[DC__VOLTAGE_STATES + 1];
+ unsigned int TotalNumberOfActiveWriteback;
+ double CriticalPoint;
+ double ReturnBWToDCNPerState;
+ double FabricAndDRAMBandwidthPerState[DC__VOLTAGE_STATES + 1];
+ double ReturnBWPerState[DC__VOLTAGE_STATES + 1];
+ double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
+ bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PrefetchSupported[DC__VOLTAGE_STATES + 1];
+ bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1];
+ bool DISPCLK_DPPCLK_Support[DC__VOLTAGE_STATES + 1];
+ bool TotalAvailablePipesSupport[DC__VOLTAGE_STATES + 1];
+ bool UrgentLatencySupport[DC__VOLTAGE_STATES + 1];
+ bool ModeSupport[DC__VOLTAGE_STATES + 1];
+ bool DIOSupport[DC__VOLTAGE_STATES + 1];
+ bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
+ bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
+ bool ROBSupport[DC__VOLTAGE_STATES + 1];
+ bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1];
+ bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool IsErrorResult[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1];
+ bool prefetch_vm_bw_valid;
+ bool prefetch_row_bw_valid;
+ bool NumberOfOTGSupport;
+ bool NonsupportedDSCInputBPC;
+ bool WritebackScaleRatioAndTapsSupport;
+ bool CursorSupport;
+ bool PitchSupport;
+
+ double WritebackLineBufferLumaBufferSize;
+ double WritebackLineBufferChromaBufferSize;
+ double WritebackMinHSCLRatio;
+ double WritebackMinVSCLRatio;
+ double WritebackMaxHSCLRatio;
+ double WritebackMaxVSCLRatio;
+ double WritebackMaxHSCLTaps;
+ double WritebackMaxVSCLTaps;
+ unsigned int MaxNumDPP;
+ unsigned int MaxNumOTG;
+ double CursorBufferSize;
+ double CursorChunkSize;
+ unsigned int Mode;
+ unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double OutputLinkDPLanes[DC__NUM_DPP__MAX];
+ double SwathWidthYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double SwathHeightYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double SwathHeightCPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double UrgentLatencySupportUsPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double VRatioPreY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double VRatioPreC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double RequiredPrefetchPixelDataBW[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double RequiredDPPCLK[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double RequiredDISPCLK[DC__VOLTAGE_STATES + 1];
+ double TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1];
+ double TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1];
+ double PrefetchBW[DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX];
+ double MetaRowBytes[DC__NUM_DPP__MAX];
+ double DPTEBytesPerRow[DC__NUM_DPP__MAX];
+ double PrefetchLinesY[DC__NUM_DPP__MAX];
+ double PrefetchLinesC[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
+ double PrefillY[DC__NUM_DPP__MAX];
+ double PrefillC[DC__NUM_DPP__MAX];
+ double LineTimesForPrefetch[DC__NUM_DPP__MAX];
+ double LinesForMetaPTE[DC__NUM_DPP__MAX];
+ double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];
+ double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
+ double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ unsigned int OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
+ unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
+ unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
+ unsigned int Read256BlockWidthC[DC__NUM_DPP__MAX];
+ unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
+ double MaxSwathHeightY[DC__NUM_DPP__MAX];
+ double MaxSwathHeightC[DC__NUM_DPP__MAX];
+ double MinSwathHeightY[DC__NUM_DPP__MAX];
+ double MinSwathHeightC[DC__NUM_DPP__MAX];
+ double PSCL_FACTOR[DC__NUM_DPP__MAX];
+ double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
+ double MaximumVStartup[DC__NUM_DPP__MAX];
+ double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
+ double AlignedYPitch[DC__NUM_DPP__MAX];
+ double AlignedCPitch[DC__NUM_DPP__MAX];
+ double MaximumSwathWidth[DC__NUM_DPP__MAX];
+ double final_flip_bw[DC__NUM_DPP__MAX];
+ double ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1];
+
+ double WritebackLumaVExtra;
+ double WritebackChromaVExtra;
+ double WritebackRequiredDISPCLK;
+ double MaximumSwathWidthSupport;
+ double MaximumSwathWidthInDETBuffer;
+ double MaximumSwathWidthInLineBuffer;
+ double MaxDispclkRoundedDownToDFSGranularity;
+ double MaxDppclkRoundedDownToDFSGranularity;
+ double PlaneRequiredDISPCLKWithoutODMCombine;
+ double PlaneRequiredDISPCLK;
+ double TotalNumberOfActiveOTG;
+ double FECOverhead;
+ double EffectiveFECOverhead;
+ unsigned int Outbpp;
+ unsigned int OutbppDSC;
+ double TotalDSCUnitsRequired;
+ double bpp;
+ unsigned int slices;
+ double SwathWidthGranularityY;
+ double RoundedUpMaxSwathSizeBytesY;
+ double SwathWidthGranularityC;
+ double RoundedUpMaxSwathSizeBytesC;
+ double LinesInDETLuma;
+ double LinesInDETChroma;
+ double EffectiveDETLBLinesLuma;
+ double EffectiveDETLBLinesChroma;
+ double ProjectedDCFCLKDeepSleep;
+ double PDEAndMetaPTEBytesPerFrameY;
+ double PDEAndMetaPTEBytesPerFrameC;
+ unsigned int MetaRowBytesY;
+ unsigned int MetaRowBytesC;
+ unsigned int DPTEBytesPerRowC;
+ unsigned int DPTEBytesPerRowY;
+ double ExtraLatency;
+ double TimeCalc;
+ double TWait;
+ double MaximumReadBandwidthWithPrefetch;
+ double total_dcn_read_bw_with_flip;
+};
+
+#endif /* _DML2_DISPLAY_MODE_VBA_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
new file mode 100644
index 000000000000..8ba962df42e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
@@ -0,0 +1,1763 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_mode_lib.h"
+#include "display_mode_vba.h"
+#include "display_rq_dlg_calc.h"
+
+static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp);
+
+#include "dml_inline_defs.h"
+
+static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+{
+ unsigned int ret_val = 0;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+ ret_val = 2;
+ } else if (source_format == dm_444_32) {
+ if (!is_chroma)
+ ret_val = 4;
+ } else if (source_format == dm_444_64) {
+ if (!is_chroma)
+ ret_val = 8;
+ } else if (source_format == dm_420_8) {
+ if (is_chroma)
+ ret_val = 2;
+ else
+ ret_val = 1;
+ } else if (source_format == dm_420_10) {
+ if (is_chroma)
+ ret_val = 4;
+ else
+ ret_val = 2;
+ } else if (source_format == dm_444_8) {
+ ret_val = 1;
+ }
+ return ret_val;
+}
+
+static bool is_dual_plane(enum source_format_class source_format)
+{
+ bool ret_val = 0;
+
+ if ((source_format == dm_420_8) || (source_format == dm_420_10))
+ ret_val = 1;
+
+ return ret_val;
+}
+
+static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
+ double refclk_freq_in_mhz,
+ double pclk_freq_in_mhz,
+ bool odm_combine,
+ unsigned int recout_width,
+ unsigned int hactive,
+ double vratio,
+ double hscale_pixel_rate,
+ unsigned int delivery_width,
+ unsigned int req_per_swath_ub)
+{
+ double refcyc_per_delivery = 0.0;
+
+ if (vratio <= 1.0) {
+ if (odm_combine)
+ refcyc_per_delivery = (double) refclk_freq_in_mhz
+ * dml_min((double) recout_width, (double) hactive / 2.0)
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ else
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ } else {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
+ / (double) hscale_pixel_rate / (double) req_per_swath_ub;
+ }
+
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
+ dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
+ dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
+ dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
+
+ return refcyc_per_delivery;
+
+}
+
+static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
+{
+ if (tile_size == dm_256k_tile)
+ return (256 * 1024);
+ else if (tile_size == dm_64k_tile)
+ return (64 * 1024);
+ else
+ return (4 * 1024);
+}
+
+static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
+ display_data_rq_regs_st *rq_regs,
+ const display_data_rq_sizing_params_st rq_sizing)
+{
+ dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
+ print__data_rq_sizing_params_st(mode_lib, rq_sizing);
+
+ rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+
+ if (rq_sizing.min_chunk_bytes == 0)
+ rq_regs->min_chunk_size = 0;
+ else
+ rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
+ if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->min_meta_chunk_size = 0;
+ else
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+
+ rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+}
+
+static void extract_rq_regs(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_rq_params_st rq_param)
+{
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+ unsigned int detile_buf_plane1_addr = 0;
+
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ 1) - 3;
+
+ if (rq_param.yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ 1) - 3;
+ }
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+
+ // FIXME: take the max between luma, chroma chunk size?
+ // okay for now, as we are setting chunk_bytes to 8kb anyways
+ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ rq_regs->drq_expansion_mode = 0;
+ } else {
+ rq_regs->drq_expansion_mode = 2;
+ }
+ rq_regs->prq_expansion_mode = 1;
+ rq_regs->mrq_expansion_mode = 1;
+ rq_regs->crq_expansion_mode = 1;
+
+ if (rq_param.yuv420) {
+ if ((double) rq_param.misc.rq_l.stored_swath_bytes
+ / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
+ } else {
+ detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
+ 256,
+ 0) / 64.0; // 2/3 to chroma
+ }
+ }
+ rq_regs->plane1_base_address = detile_buf_plane1_addr;
+}
+
+static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ unsigned int total_swath_bytes = 0;
+ unsigned int swath_bytes_l = 0;
+ unsigned int swath_bytes_c = 0;
+ unsigned int full_swath_bytes_packed_l = 0;
+ unsigned int full_swath_bytes_packed_c = 0;
+ bool req128_l = 0;
+ bool req128_c = 0;
+ bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ unsigned int log2_swath_height_l = 0;
+ unsigned int log2_swath_height_c = 0;
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+
+ full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
+ full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
+
+ if (rq_param->yuv420_10bpc) {
+ full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ }
+
+ if (rq_param->yuv420) {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
+ req128_l = 0;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ } else { //128b request (for luma only for yuv420 8bpc)
+ req128_l = 1;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ }
+ // Note: assumption, the config that pass in will fit into
+ // the detiled buffer.
+ } else {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes)
+ req128_l = 0;
+ else
+ req128_l = 1;
+
+ swath_bytes_l = total_swath_bytes;
+ swath_bytes_c = 0;
+ }
+ rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
+ rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
+
+ if (surf_linear) {
+ log2_swath_height_l = 0;
+ log2_swath_height_c = 0;
+ } else if (!surf_vert) {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ } else {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ }
+ rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+
+ dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
+ dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
+ __func__,
+ full_swath_bytes_packed_l);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
+ __func__,
+ full_swath_bytes_packed_c);
+}
+
+static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ unsigned int vp_width,
+ unsigned int vp_height,
+ unsigned int data_pitch,
+ unsigned int meta_pitch,
+ unsigned int source_format,
+ unsigned int tiling,
+ unsigned int macro_tile_size,
+ unsigned int source_scan,
+ unsigned int is_chroma)
+{
+ bool surf_linear = (tiling == dm_sw_linear);
+ bool surf_vert = (source_scan == dm_vert);
+
+ unsigned int bytes_per_element;
+ unsigned int bytes_per_element_y = get_bytes_per_element((enum source_format_class)(source_format),
+ false);
+ unsigned int bytes_per_element_c = get_bytes_per_element((enum source_format_class)(source_format),
+ true);
+
+ unsigned int blk256_width = 0;
+ unsigned int blk256_height = 0;
+
+ unsigned int blk256_width_y = 0;
+ unsigned int blk256_height_y = 0;
+ unsigned int blk256_width_c = 0;
+ unsigned int blk256_height_c = 0;
+ unsigned int log2_bytes_per_element;
+ unsigned int log2_blk256_width;
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int meta_req_width;
+ unsigned int meta_req_height;
+ unsigned int log2_meta_row_height;
+ unsigned int meta_row_width_ub;
+ unsigned int log2_meta_chunk_bytes;
+ unsigned int log2_meta_chunk_height;
+
+ //full sized meta chunk width in unit of data elements
+ unsigned int log2_meta_chunk_width;
+ unsigned int log2_min_meta_chunk_bytes;
+ unsigned int min_meta_chunk_width;
+ unsigned int meta_chunk_width;
+ unsigned int meta_chunk_per_row_int;
+ unsigned int meta_row_remainder;
+ unsigned int meta_chunk_threshold;
+ unsigned int meta_blk_bytes;
+ unsigned int meta_blk_height;
+ unsigned int meta_blk_width;
+ unsigned int meta_surface_bytes;
+ unsigned int vmpg_bytes;
+ unsigned int meta_pte_req_per_frame_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ const unsigned int dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
+ const unsigned int pde_proc_buffer_size_64k_reqs =
+ mode_lib->ip.pde_proc_buffer_size_64k_reqs;
+
+ unsigned int log2_vmpg_height = 0;
+ unsigned int log2_vmpg_width = 0;
+ unsigned int log2_dpte_req_height_ptes = 0;
+ unsigned int log2_dpte_req_height = 0;
+ unsigned int log2_dpte_req_width = 0;
+ unsigned int log2_dpte_row_height_linear = 0;
+ unsigned int log2_dpte_row_height = 0;
+ unsigned int log2_dpte_group_width = 0;
+ unsigned int dpte_row_width_ub = 0;
+ unsigned int dpte_req_height = 0;
+ unsigned int dpte_req_width = 0;
+ unsigned int dpte_group_width = 0;
+ unsigned int log2_dpte_group_bytes = 0;
+ unsigned int log2_dpte_group_length = 0;
+ unsigned int pde_buf_entries;
+ bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
+
+ Calculate256BBlockSizes((enum source_format_class)(source_format),
+ (enum dm_swizzle_mode)(tiling),
+ bytes_per_element_y,
+ bytes_per_element_c,
+ &blk256_height_y,
+ &blk256_height_c,
+ &blk256_width_y,
+ &blk256_width_c);
+
+ if (!is_chroma) {
+ blk256_width = blk256_width_y;
+ blk256_height = blk256_height_y;
+ bytes_per_element = bytes_per_element_y;
+ } else {
+ blk256_width = blk256_width_c;
+ blk256_height = blk256_height_c;
+ bytes_per_element = bytes_per_element_c;
+ }
+
+ log2_bytes_per_element = dml_log2(bytes_per_element);
+
+ dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
+ dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
+ dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
+ dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
+
+ log2_blk256_width = dml_log2((double) blk256_width);
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes = surf_linear ?
+ 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ // remember log rule
+ // "+" in log is multiply
+ // "-" in log is divide
+ // "/2" is like square root
+ // blk is vertical biased
+ if (tiling != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; // blk height of 1
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ if (!surf_vert) {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ + blk256_width;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
+ } else {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_height - 1, blk256_height, 1)
+ + blk256_height;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
+ }
+
+ if (!surf_vert)
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
+ * bytes_per_element;
+ else
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
+ * bytes_per_element;
+
+ rq_misc_param->blk256_height = blk256_height;
+ rq_misc_param->blk256_width = blk256_width;
+
+ // -------
+ // meta
+ // -------
+ log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
+
+ // each 64b meta request for dcn is 8x8 meta elements and
+ // a meta element covers one 256b block of the the data surface.
+ log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ meta_req_width = 1 << log2_meta_req_width;
+ meta_req_height = 1 << log2_meta_req_height;
+ log2_meta_row_height = 0;
+ meta_row_width_ub = 0;
+
+ // the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ // calculate upper bound of the meta_row_width
+ if (!surf_vert) {
+ log2_meta_row_height = log2_meta_req_height;
+ meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ + meta_req_width;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
+ } else {
+ log2_meta_row_height = log2_meta_req_width;
+ meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ + meta_req_height;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
+ }
+ rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
+
+ rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
+
+ log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
+ log2_meta_chunk_height = log2_meta_row_height;
+
+ //full sized meta chunk width in unit of data elements
+ log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height;
+ log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
+ min_meta_chunk_width = 1
+ << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height);
+ meta_chunk_width = 1 << log2_meta_chunk_width;
+ meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
+ meta_row_remainder = meta_row_width_ub % meta_chunk_width;
+ meta_chunk_threshold = 0;
+ meta_blk_bytes = 4096;
+ meta_blk_height = blk256_height * 64;
+ meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
+ meta_surface_bytes = meta_pitch
+ * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
+ * bytes_per_element / 256;
+ vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
+ meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
+ 8 * vmpg_bytes,
+ 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
+ meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
+ rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
+
+ dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
+ dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
+ dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
+ dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_req_per_frame_ub);
+ dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_bytes_per_frame_ub);
+
+ if (!surf_vert)
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
+ else
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
+
+ if (meta_row_remainder <= meta_chunk_threshold)
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ else
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+
+ // ------
+ // dpte
+ // ------
+ if (surf_linear) {
+ log2_vmpg_height = 0; // one line high
+ } else {
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ }
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ // only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
+ if (surf_linear) { //one 64B PTE request returns 8 PTEs
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_vmpg_width + 3;
+ log2_dpte_req_height = 0;
+ } else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
+ //one 64B req gives 8x1 PTEs for 4KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ } else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
+ //two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
+ log2_dpte_req_height_ptes = 4;
+ log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
+ log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
+ } else { //64KB page size and must 64KB tile block
+ //one 64B req gives 8x1 PTEs for 64KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ }
+
+ // The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ // log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
+ // That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
+ //log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ //log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_height = 1 << log2_dpte_req_height;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ // calculate pitch dpte row buffer can hold
+ // round the result down to a power of two.
+ pde_buf_entries = yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
+ if (surf_linear) {
+ unsigned int dpte_row_height;
+
+ log2_dpte_row_height_linear = dml_floor(dml_log2(dml_min(64 * 1024 * pde_buf_entries
+ / bytes_per_element,
+ dpte_buf_in_pte_reqs
+ * dpte_req_width)
+ / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ // For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
+ // the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
+ dpte_row_height = 1 << log2_dpte_row_height;
+ dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
+ dpte_req_width,
+ 1) + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ // the upper bound of the dpte_row_width without dependency on viewport position follows.
+ // for tiled mode, row height is the same as req height and row store up to vp size upper bound
+ if (!surf_vert) {
+ log2_dpte_row_height = log2_dpte_req_height;
+ dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ + dpte_req_height;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
+ }
+ }
+ if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
+ else
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
+
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+
+ // the dpte_group_bytes is reduced for the specific case of vertical
+ // access of a tile surface that has dpte request of 8x1 ptes.
+ if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ rq_sizing_param->dpte_group_bytes = 512;
+ else
+ //full size
+ rq_sizing_param->dpte_group_bytes = 2048;
+
+ //since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
+ log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
+ log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
+
+ // full sized data pte group width in elements
+ if (!surf_vert)
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
+ else
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
+
+ //But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
+ if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
+ log2_dpte_group_width = log2_dpte_group_width - 1;
+
+ dpte_group_width = 1 << log2_dpte_group_width;
+
+ // since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
+ // the upper bound for the dpte groups per row is as follows.
+ rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width,
+ 1);
+}
+
+static void get_surf_rq_param(struct display_mode_lib *mode_lib,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ const display_pipe_source_params_st pipe_src_param,
+ bool is_chroma)
+{
+ bool mode_422 = 0;
+ unsigned int vp_width = 0;
+ unsigned int vp_height = 0;
+ unsigned int data_pitch = 0;
+ unsigned int meta_pitch = 0;
+ unsigned int ppe = mode_422 ? 2 : 1;
+
+ // FIXME check if ppe apply for both luma and chroma in 422 case
+ if (is_chroma) {
+ vp_width = pipe_src_param.viewport_width_c / ppe;
+ vp_height = pipe_src_param.viewport_height_c;
+ data_pitch = pipe_src_param.data_pitch_c;
+ meta_pitch = pipe_src_param.meta_pitch_c;
+ } else {
+ vp_width = pipe_src_param.viewport_width / ppe;
+ vp_height = pipe_src_param.viewport_height;
+ data_pitch = pipe_src_param.data_pitch;
+ meta_pitch = pipe_src_param.meta_pitch;
+ }
+
+ rq_sizing_param->chunk_bytes = 8192;
+
+ if (rq_sizing_param->chunk_bytes == 64 * 1024)
+ rq_sizing_param->min_chunk_bytes = 0;
+ else
+ rq_sizing_param->min_chunk_bytes = 1024;
+
+ rq_sizing_param->meta_chunk_bytes = 2048;
+ rq_sizing_param->min_meta_chunk_bytes = 256;
+
+ rq_sizing_param->mpte_group_bytes = 2048;
+
+ get_meta_and_pte_attr(mode_lib,
+ rq_dlg_param,
+ rq_misc_param,
+ rq_sizing_param,
+ vp_width,
+ vp_height,
+ data_pitch,
+ meta_pitch,
+ pipe_src_param.source_format,
+ pipe_src_param.sw_mode,
+ pipe_src_param.macro_tile_size,
+ pipe_src_param.source_scan,
+ is_chroma);
+}
+
+void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ // get param for luma surface
+ rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
+ || pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_l),
+ &(rq_param->dlg.rq_l),
+ &(rq_param->misc.rq_l),
+ pipe_src_param,
+ 0);
+
+ if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
+ // get param for chroma surface
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_c),
+ &(rq_param->dlg.rq_c),
+ &(rq_param->misc.rq_c),
+ pipe_src_param,
+ 1);
+ }
+
+ // calculate how to split the det buffer space between luma and chroma
+ handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
+ print__rq_params_st(mode_lib, *rq_param);
+}
+
+void dml_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ display_rq_params_st rq_param = {0};
+
+ memset(rq_regs, 0, sizeof(*rq_regs));
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_src_param);
+ extract_rq_regs(mode_lib, rq_regs, rq_param);
+
+ print__rq_regs_st(mode_lib, *rq_regs);
+}
+
+// Note: currently taken in as is.
+// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
+void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
+ const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
+ const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
+ const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
+ const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
+ const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
+
+ // -------------------------
+ // Section 1.15.2.1: OTG dependent Params
+ // -------------------------
+ // Timing
+ unsigned int htotal = dst->htotal;
+// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
+ unsigned int hblank_end = dst->hblank_end;
+ unsigned int vblank_start = dst->vblank_start;
+ unsigned int vblank_end = dst->vblank_end;
+ unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
+
+ double dppclk_freq_in_mhz = clks->dppclk_mhz;
+ double dispclk_freq_in_mhz = clks->dispclk_mhz;
+ double refclk_freq_in_mhz = clks->refclk_mhz;
+ double pclk_freq_in_mhz = dst->pixel_rate_mhz;
+ bool interlaced = dst->interlaced;
+
+ double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
+
+ double min_dcfclk_mhz;
+ double t_calc_us;
+ double min_ttu_vblank;
+
+ double min_dst_y_ttu_vblank;
+ unsigned int dlg_vblank_start;
+ bool dual_plane;
+ bool mode_422;
+ unsigned int access_dir;
+ unsigned int vp_height_l;
+ unsigned int vp_width_l;
+ unsigned int vp_height_c;
+ unsigned int vp_width_c;
+
+ // Scaling
+ unsigned int htaps_l;
+ unsigned int htaps_c;
+ double hratio_l;
+ double hratio_c;
+ double vratio_l;
+ double vratio_c;
+ bool scl_enable;
+
+ double line_time_in_us;
+ // double vinit_l;
+ // double vinit_c;
+ // double vinit_bot_l;
+ // double vinit_bot_c;
+
+ // unsigned int swath_height_l;
+ unsigned int swath_width_ub_l;
+ // unsigned int dpte_bytes_per_row_ub_l;
+ unsigned int dpte_groups_per_row_ub_l;
+ // unsigned int meta_pte_bytes_per_frame_ub_l;
+ // unsigned int meta_bytes_per_row_ub_l;
+
+ // unsigned int swath_height_c;
+ unsigned int swath_width_ub_c;
+ // unsigned int dpte_bytes_per_row_ub_c;
+ unsigned int dpte_groups_per_row_ub_c;
+
+ unsigned int meta_chunks_per_row_ub_l;
+ unsigned int meta_chunks_per_row_ub_c;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int pixel_rate_delay_subtotal;
+
+ unsigned int vstartup_start;
+ unsigned int dst_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ double line_wait;
+ double dst_y_prefetch;
+ double dst_y_per_vm_vblank;
+ double dst_y_per_row_vblank;
+ double dst_y_per_vm_flip;
+ double dst_y_per_row_flip;
+ double min_dst_y_per_vm_vblank;
+ double min_dst_y_per_row_vblank;
+ double lsw;
+ double vratio_pre_l;
+ double vratio_pre_c;
+ unsigned int req_per_swath_ub_l;
+ unsigned int req_per_swath_ub_c;
+ unsigned int meta_row_height_l;
+ unsigned int meta_row_height_c;
+ unsigned int swath_width_pixels_ub_l;
+ unsigned int swath_width_pixels_ub_c;
+ unsigned int scaler_rec_in_width_l;
+ unsigned int scaler_rec_in_width_c;
+ unsigned int dpte_row_height_l;
+ unsigned int dpte_row_height_c;
+ double hscale_pixel_rate_l;
+ double hscale_pixel_rate_c;
+ double min_hratio_fact_l;
+ double min_hratio_fact_c;
+ double refcyc_per_line_delivery_pre_l;
+ double refcyc_per_line_delivery_pre_c;
+ double refcyc_per_line_delivery_l;
+ double refcyc_per_line_delivery_c;
+
+ double refcyc_per_req_delivery_pre_l;
+ double refcyc_per_req_delivery_pre_c;
+ double refcyc_per_req_delivery_l;
+ double refcyc_per_req_delivery_c;
+
+ unsigned int full_recout_width;
+ double xfc_transfer_delay;
+ double xfc_precharge_delay;
+ double xfc_remote_surface_flip_latency;
+ double xfc_dst_y_delta_drq_limit;
+ double xfc_prefetch_margin;
+ double refcyc_per_req_delivery_pre_cur0;
+ double refcyc_per_req_delivery_cur0;
+ double refcyc_per_req_delivery_pre_cur1;
+ double refcyc_per_req_delivery_cur1;
+
+ memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+
+ dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
+ dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
+ dml_print("DML_DLG: %s: vm_en = %d\n", __func__, vm_en);
+ dml_print("DML_DLG: %s: ignore_viewport_pos = %d\n", __func__, ignore_viewport_pos);
+ dml_print("DML_DLG: %s: immediate_flip_support = %d\n", __func__, immediate_flip_support);
+
+ dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
+ ASSERT(ref_freq_to_pix_freq < 4.0);
+
+ disp_dlg_regs->ref_freq_to_pix_freq =
+ (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
+ disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
+ * dml_pow(2, 8));
+ disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
+ * (double) ref_freq_to_pix_freq);
+ ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
+
+ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ set_prefetch_mode(mode_lib, cstate_en, pstate_en, ignore_viewport_pos, immediate_flip_support);
+ t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
+ min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
+ dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
+ + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
+
+ dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
+ __func__,
+ min_dcfclk_mhz);
+ dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
+ __func__,
+ min_ttu_vblank);
+ dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
+ __func__,
+ min_dst_y_ttu_vblank);
+ dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
+ __func__,
+ t_calc_us);
+ dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
+ __func__,
+ disp_dlg_regs->min_dst_y_next_start);
+ dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
+ __func__,
+ ref_freq_to_pix_freq);
+
+ // -------------------------
+ // Section 1.15.2.2: Prefetch, Active and TTU
+ // -------------------------
+ // Prefetch Calc
+ // Source
+// dcc_en = src.dcc;
+ dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
+ mode_422 = 0; // FIXME
+ access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
+// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
+ vp_height_l = src->viewport_height;
+ vp_width_l = src->viewport_width;
+ vp_height_c = src->viewport_height_c;
+ vp_width_c = src->viewport_width_c;
+
+ // Scaling
+ htaps_l = taps->htaps;
+ htaps_c = taps->htaps_c;
+ hratio_l = scl->hscl_ratio;
+ hratio_c = scl->hscl_ratio_c;
+ vratio_l = scl->vscl_ratio;
+ vratio_c = scl->vscl_ratio_c;
+ scl_enable = scl->scl_enable;
+
+ line_time_in_us = (htotal / pclk_freq_in_mhz);
+// vinit_l = scl.vinit;
+// vinit_c = scl.vinit_c;
+// vinit_bot_l = scl.vinit_bot;
+// vinit_bot_c = scl.vinit_bot_c;
+
+// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
+// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
+// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
+// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+
+// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
+ // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ vupdate_offset = dst->vupdate_offset;
+ vupdate_width = dst->vupdate_width;
+ vready_offset = dst->vready_offset;
+
+ dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
+ dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
+
+ if (scl_enable)
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
+ else
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
+
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
+
+ if (dout->dsc_enable) {
+ double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dispclk_delay_subtotal += dsc_delay;
+ }
+
+ pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
+
+ vstartup_start = dst->vstartup_start;
+ if (interlaced) {
+ if (vstartup_start / 2.0
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end / 2.0)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ } else {
+ if (vstartup_start
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ }
+
+ // TODO: Where is this coming from?
+ if (interlaced)
+ vstartup_start = vstartup_start / 2;
+
+ // TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
+ if (vstartup_start >= min_vblank) {
+ dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
+ __func__,
+ vblank_start,
+ vblank_end);
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ min_vblank = vstartup_start + 1;
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ }
+
+ dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
+ dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
+ __func__,
+ pixel_rate_delay_subtotal);
+ dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n",
+ __func__,
+ dst_x_after_scaler);
+ dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n",
+ __func__,
+ dst_y_after_scaler);
+
+ // Lwait
+ line_wait = mode_lib->soc.urgent_latency_us;
+ if (cstate_en)
+ line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
+ if (pstate_en)
+ line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
+ + mode_lib->soc.urgent_latency_us,
+ line_wait);
+ line_wait = line_wait / line_time_in_us;
+
+ dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
+
+ dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_per_vm_vblank = 8.0;
+ min_dst_y_per_row_vblank = 16.0;
+
+ // magic!
+ if (htotal <= 75) {
+ min_vblank = 300;
+ min_dst_y_per_vm_vblank = 100.0;
+ min_dst_y_per_row_vblank = 100.0;
+ }
+
+ dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
+ dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
+
+ ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
+ ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
+
+ ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
+ lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
+
+ dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
+
+ vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
+ dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
+
+ // Active
+ req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ swath_width_pixels_ub_l = 0;
+ swath_width_pixels_ub_c = 0;
+ scaler_rec_in_width_l = 0;
+ scaler_rec_in_width_c = 0;
+ dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+
+ if (mode_422) {
+ swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
+ swath_width_pixels_ub_c = swath_width_ub_c * 2;
+ } else {
+ swath_width_pixels_ub_l = swath_width_ub_l * 1;
+ swath_width_pixels_ub_c = swath_width_ub_c * 1;
+ }
+
+ hscale_pixel_rate_l = 0.;
+ hscale_pixel_rate_c = 0.;
+ min_hratio_fact_l = 1.0;
+ min_hratio_fact_c = 1.0;
+
+ if (htaps_l <= 1)
+ min_hratio_fact_l = 2.0;
+ else if (htaps_l <= 6) {
+ if ((hratio_l * 2.0) > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l * 2.0;
+ } else {
+ if (hratio_l > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l;
+ }
+
+ hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
+
+ if (htaps_c <= 1)
+ min_hratio_fact_c = 2.0;
+ else if (htaps_c <= 6) {
+ if ((hratio_c * 2.0) > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c * 2.0;
+ } else {
+ if (hratio_c > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c;
+ }
+
+ hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
+
+ refcyc_per_line_delivery_pre_l = 0.;
+ refcyc_per_line_delivery_pre_c = 0.;
+ refcyc_per_line_delivery_l = 0.;
+ refcyc_per_line_delivery_c = 0.;
+
+ refcyc_per_req_delivery_pre_l = 0.;
+ refcyc_per_req_delivery_pre_c = 0.;
+ refcyc_per_req_delivery_l = 0.;
+ refcyc_per_req_delivery_c = 0.;
+
+ full_recout_width = 0;
+ // In ODM
+ if (src->is_hsplit) {
+ // This "hack" is only allowed (and valid) for MPC combine. In ODM
+ // combine, you MUST specify the full_recout_width...according to Oswin
+ if (dst->full_recout_width == 0 && !dst->odm_combine) {
+ dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
+ __func__);
+ full_recout_width = dst->recout_width * 2; // assume half split for dcn1
+ } else
+ full_recout_width = dst->full_recout_width;
+ } else
+ full_recout_width = dst->recout_width;
+
+ // mpc_combine and odm_combine are mutually exclusive
+ refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: full_recout_width = %d\n",
+ __func__,
+ full_recout_width);
+ dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
+ __func__,
+ hscale_pixel_rate_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_l);
+
+ if (dual_plane) {
+ refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_c);
+ }
+
+ // TTU - Luma / Chroma
+ if (access_dir) { // vertical access
+ scaler_rec_in_width_l = vp_height_l;
+ scaler_rec_in_width_c = vp_height_c;
+ } else {
+ scaler_rec_in_width_l = vp_width_l;
+ scaler_rec_in_width_c = vp_width_c;
+ }
+
+ refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+ refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_l);
+
+ ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+ refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_c);
+
+ ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
+ }
+
+ // XFC
+ xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ xfc_precharge_delay = get_xfc_precharge_delay(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
+ xfc_prefetch_margin = get_xfc_prefetch_margin(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+
+ // TTU - Cursor
+ refcyc_per_req_delivery_pre_cur0 = 0.0;
+ refcyc_per_req_delivery_cur0 = 0.0;
+ if (src->num_cursors > 0) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur0,
+ &refcyc_per_req_delivery_cur0,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur0_src_width,
+ (enum cursor_bpp)(src->cur0_bpp));
+ }
+
+ refcyc_per_req_delivery_pre_cur1 = 0.0;
+ refcyc_per_req_delivery_cur1 = 0.0;
+ if (src->num_cursors > 1) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur1,
+ &refcyc_per_req_delivery_cur1,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur1_src_width,
+ (enum cursor_bpp)(src->cur1_bpp));
+ }
+
+ // TTU - Misc
+ // all hard-coded
+
+ // Assignment to register structures
+ disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
+ disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
+ ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
+ disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
+
+ disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+ < (unsigned int) dml_pow(2, 13));
+ }
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
+ }
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ if (dual_plane) {
+ disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * dml_pow(2, 2));
+ if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
+ dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
+ __func__,
+ disp_dlg_regs->dst_y_per_pte_row_nom_c,
+ (unsigned int) dml_pow(2, 17) - 1);
+ }
+ }
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_nom_c =
+ (unsigned int) ((double) dpte_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
+
+ // TODO: Is this the right calculation? Does htotal need to be halved?
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
+ (unsigned int) ((double) meta_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
+ }
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
+ disp_dlg_regs->dst_y_offset_cur0 = 0;
+ disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
+ disp_dlg_regs->dst_y_offset_cur1 = 0;
+
+ disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
+ disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
+ disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
+ disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(xfc_prefetch_margin * refclk_freq_in_mhz,
+ 1);
+
+ // slave has to have this value also set to off
+ if (src->xfc_enable && !src->xfc_slave)
+ disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
+ else
+ disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
+ * dml_pow(2, 10));
+ disp_ttu_regs->qos_level_low_wm = 0;
+ ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
+ disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
+ * ref_freq_to_pix_freq);
+ ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
+
+ disp_ttu_regs->qos_level_flip = 14;
+ disp_ttu_regs->qos_level_fixed_l = 8;
+ disp_ttu_regs->qos_level_fixed_c = 8;
+ disp_ttu_regs->qos_level_fixed_cur0 = 8;
+ disp_ttu_regs->qos_ramp_disable_l = 0;
+ disp_ttu_regs->qos_ramp_disable_c = 0;
+ disp_ttu_regs->qos_ramp_disable_cur0 = 0;
+
+ disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
+ ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
+
+ print__ttu_regs_st(mode_lib, *disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+}
+
+void dml_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ display_rq_params_st rq_param = {0};
+ display_dlg_sys_params_st dlg_sys_param = {0};
+
+ // Get watermark and Tex.
+ dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
+ / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
+
+ print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+
+ // system parameter calculation done
+
+ dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
+ dml_rq_dlg_get_dlg_params(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx,
+ dlg_regs,
+ ttu_regs,
+ rq_param.dlg,
+ dlg_sys_param,
+ cstate_en,
+ pstate_en,
+ vm_en,
+ ignore_viewport_pos,
+ immediate_flip_support);
+ dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
+}
+
+void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param)
+{
+ memset(arb_param, 0, sizeof(*arb_param));
+ arb_param->max_req_outstanding = 256;
+ arb_param->min_req_outstanding = 68;
+ arb_param->sat_level_us = 60;
+}
+
+void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp)
+{
+ unsigned int cur_src_width = cur_width;
+ unsigned int cur_req_size = 0;
+ unsigned int cur_req_width = 0;
+ double cur_width_ub = 0.0;
+ double cur_req_per_width = 0.0;
+ double hactive_cur = 0.0;
+
+ ASSERT(cur_src_width <= 256);
+
+ *refcyc_per_req_delivery_pre_cur = 0.0;
+ *refcyc_per_req_delivery_cur = 0.0;
+ if (cur_src_width > 0) {
+ unsigned int cur_bit_per_pixel = 0;
+
+ if (cur_bpp == dm_cur_2bit) {
+ cur_req_size = 64; // byte
+ cur_bit_per_pixel = 2;
+ } else { // 32bit
+ cur_bit_per_pixel = 32;
+ if (cur_src_width >= 1 && cur_src_width <= 16)
+ cur_req_size = 64;
+ else if (cur_src_width >= 17 && cur_src_width <= 31)
+ cur_req_size = 128;
+ else
+ cur_req_size = 256;
+ }
+
+ cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
+ cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
+ * (double) cur_req_width;
+ cur_req_per_width = cur_width_ub / (double) cur_req_width;
+ hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+
+ if (vratio_pre_l <= 1.0) {
+ *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
+
+ if (vratio_l <= 1.0) {
+ *refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ dml_print("DML_DLG: %s: cur_req_width = %d\n",
+ __func__,
+ cur_req_width);
+ dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
+ __func__,
+ cur_width_ub);
+ dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
+ __func__,
+ cur_req_per_width);
+ dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
+ __func__,
+ hactive_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_pre_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_cur);
+
+ ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
+ }
+}
+
+unsigned int dml_rq_dlg_get_calculated_vstartup(struct display_mode_lib *mode_lib,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx)
+{
+ unsigned int vstartup_pipe[DC__NUM_PIPES__MAX];
+ bool visited[DC__NUM_PIPES__MAX];
+ unsigned int pipe_inst = 0;
+ unsigned int i, j, k;
+
+ for (k = 0; k < num_pipes; ++k)
+ visited[k] = false;
+
+ for (i = 0; i < num_pipes; i++) {
+ if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
+ unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
+
+ for (j = i; j < num_pipes; j++) {
+ if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp
+ && e2e_pipe_param[j].pipe.src.is_hsplit
+ && !visited[j]) {
+ vstartup_pipe[j] = get_vstartup_calculated(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_inst);
+ visited[j] = true;
+ }
+ }
+
+ pipe_inst++;
+ }
+
+ if (!visited[i]) {
+ vstartup_pipe[i] = get_vstartup_calculated(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_inst);
+ visited[i] = true;
+ pipe_inst++;
+ }
+ }
+
+ return vstartup_pipe[pipe_idx];
+
+}
+
+void dml_rq_dlg_get_row_heights(struct display_mode_lib *mode_lib,
+ unsigned int *o_dpte_row_height,
+ unsigned int *o_meta_row_height,
+ unsigned int vp_width,
+ unsigned int data_pitch,
+ int source_format,
+ int tiling,
+ int macro_tile_size,
+ int source_scan,
+ int is_chroma)
+{
+ display_data_rq_dlg_params_st rq_dlg_param;
+ display_data_rq_misc_params_st rq_misc_param;
+ display_data_rq_sizing_params_st rq_sizing_param;
+
+ get_meta_and_pte_attr(mode_lib,
+ &rq_dlg_param,
+ &rq_misc_param,
+ &rq_sizing_param,
+ vp_width,
+ 0, // dummy
+ data_pitch,
+ 0, // dummy
+ source_format,
+ tiling,
+ macro_tile_size,
+ source_scan,
+ is_chroma);
+
+ *o_dpte_row_height = rq_dlg_param.dpte_row_height;
+ *o_meta_row_height = rq_dlg_param.meta_row_height;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
new file mode 100644
index 000000000000..efdd4c73d8f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML2_DISPLAY_RQ_DLG_CALC_H__
+#define __DML2_DISPLAY_RQ_DLG_CALC_H__
+
+#include "dml_common_defs.h"
+#include "display_rq_dlg_helpers.h"
+
+struct display_mode_lib;
+
+// Function: dml_rq_dlg_get_rq_params
+// Calculate requestor related parameters that register definition agnostic
+// (i.e. this layer does try to separate real values from register definition)
+// Input:
+// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+// Output:
+// rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
+//
+void dml_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param);
+
+// Function: dml_rq_dlg_get_rq_reg
+// Main entry point for test to get the register values out of this DML class.
+// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+// and then populate the rq_regs struct
+// Input:
+// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+// Output:
+// rq_regs - struct that holds all the RQ registers field value.
+// See also: <display_rq_regs_st>
+void dml_rq_dlg_get_rq_reg(
+ struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_source_params_st pipe_src_param);
+
+// Function: dml_rq_dlg_get_dlg_params
+// Calculate deadline related parameters
+//
+void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support);
+
+// Function: dml_rq_dlg_get_dlg_param_prefetch
+// For flip_bw programming guide change, now dml needs to calculate the flip_bytes and prefetch_bw
+// for ALL pipes and use this info to calculate the prefetch programming.
+// Output: prefetch_param.prefetch_bw and flip_bytes
+void dml_rq_dlg_get_dlg_params_prefetch(
+ struct display_mode_lib *mode_lib,
+ display_dlg_prefetch_param_st *prefetch_param,
+ display_rq_dlg_params_st rq_dlg_param,
+ display_dlg_sys_params_st dlg_sys_param,
+ display_e2e_pipe_params_st e2e_pipe_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en);
+
+// Function: dml_rq_dlg_get_dlg_reg
+// Calculate and return DLG and TTU register struct given the system setting
+// Output:
+// dlg_regs - output DLG register struct
+// ttu_regs - output DLG TTU register struct
+// Input:
+// e2e_pipe_param - "compacted" array of e2e pipe param struct
+// num_pipes - num of active "pipe" or "route"
+// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
+// cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered.
+// Added for legacy or unrealistic timing tests.
+void dml_rq_dlg_get_dlg_reg(
+ struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support);
+
+// Function: dml_rq_dlg_get_calculated_vstartup
+// Calculate and return vstartup
+// Output:
+// unsigned int vstartup
+// Input:
+// e2e_pipe_param - "compacted" array of e2e pipe param struct
+// num_pipes - num of active "pipe" or "route"
+// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
+// NOTE: this MUST be called after setting the prefetch mode!
+unsigned int dml_rq_dlg_get_calculated_vstartup(
+ struct display_mode_lib *mode_lib,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx);
+
+// Function: dml_rq_dlg_get_row_heights
+// Calculate dpte and meta row heights
+void dml_rq_dlg_get_row_heights(
+ struct display_mode_lib *mode_lib,
+ unsigned int *o_dpte_row_height,
+ unsigned int *o_meta_row_height,
+ unsigned int vp_width,
+ unsigned int data_pitch,
+ int source_format,
+ int tiling,
+ int macro_tile_size,
+ int source_scan,
+ int is_chroma);
+
+// Function: dml_rq_dlg_get_arb_params
+void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
new file mode 100644
index 000000000000..189052e911fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_rq_dlg_helpers.h"
+
+void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
+{
+ dml_print("DML_RQ_DLG_CALC: ***************************\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA> ===\n");
+ print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_c);
+
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_c);
+
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_c);
+ dml_print("DML_RQ_DLG_CALC: ***************************\n");
+}
+
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing.chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing.min_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing.meta_chunk_bytes);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_meta_chunk_bytes = %0d\n",
+ rq_sizing.min_meta_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing.mpte_group_bytes);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing.dpte_group_bytes);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: swath_width_ub = %0d\n",
+ rq_dlg_param.swath_width_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: swath_height = %0d\n",
+ rq_dlg_param.swath_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: req_per_swath_ub = %0d\n",
+ rq_dlg_param.req_per_swath_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_pte_bytes_per_frame_ub = %0d\n",
+ rq_dlg_param.meta_pte_bytes_per_frame_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_req_per_row_ub = %0d\n",
+ rq_dlg_param.dpte_req_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_groups_per_row_ub = %0d\n",
+ rq_dlg_param.dpte_groups_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_row_height = %0d\n",
+ rq_dlg_param.dpte_row_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_bytes_per_row_ub = %0d\n",
+ rq_dlg_param.dpte_bytes_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_chunks_per_row_ub = %0d\n",
+ rq_dlg_param.meta_chunks_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_req_per_row_ub = %0d\n",
+ rq_dlg_param.meta_req_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_row_height = %0d\n",
+ rq_dlg_param.meta_row_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_bytes_per_row_ub = %0d\n",
+ rq_dlg_param.meta_bytes_per_row_ub);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: full_swath_bytes = %0d\n",
+ rq_misc_param.full_swath_bytes);
+ dml_print(
+ "DML_RQ_DLG_CALC: stored_swath_bytes = %0d\n",
+ rq_misc_param.stored_swath_bytes);
+ dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param.blk256_width);
+ dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param.blk256_height);
+ dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param.req_width);
+ dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param.req_height);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_c);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param.t_mclk_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param.t_urg_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param.t_sr_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param.t_extra_us);
+ dml_print(
+ "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n",
+ dlg_sys_param.t_srx_delay_us);
+ dml_print(
+ "DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
+ dlg_sys_param.deepsleep_dcfclk_mhz);
+ dml_print(
+ "DML_RQ_DLG_CALC: total_flip_bw = %3.2f\n",
+ dlg_sys_param.total_flip_bw);
+ dml_print(
+ "DML_RQ_DLG_CALC: total_flip_bytes = %i\n",
+ dlg_sys_param.total_flip_bytes);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st rq_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n");
+ dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs.chunk_size);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs.min_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs.meta_chunk_size);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_meta_chunk_size = 0x%0x\n",
+ rq_regs.min_meta_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs.dpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs.mpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs.swath_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: pte_row_height_linear = 0x%0x\n",
+ rq_regs.pte_row_height_linear);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_REGS_ST\n");
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_c);
+ dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs.drq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs.prq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs.mrq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs.crq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs.plane1_base_address);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_h_blank_end = 0x%0x\n",
+ dlg_regs.refcyc_h_blank_end);
+ dml_print(
+ "DML_RQ_DLG_CALC: dlg_vblank_end = 0x%0x\n",
+ dlg_regs.dlg_vblank_end);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_dst_y_next_start = 0x%0x\n",
+ dlg_regs.min_dst_y_next_start);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_htotal = 0x%0x\n",
+ dlg_regs.refcyc_per_htotal);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_x_after_scaler = 0x%0x\n",
+ dlg_regs.refcyc_x_after_scaler);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_after_scaler = 0x%0x\n",
+ dlg_regs.dst_y_after_scaler);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_prefetch = 0x%0x\n",
+ dlg_regs.dst_y_prefetch);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_vm_vblank = 0x%0x\n",
+ dlg_regs.dst_y_per_vm_vblank);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_row_vblank = 0x%0x\n",
+ dlg_regs.dst_y_per_row_vblank);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_vm_flip = 0x%0x\n",
+ dlg_regs.dst_y_per_vm_flip);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_row_flip = 0x%0x\n",
+ dlg_regs.dst_y_per_row_flip);
+ dml_print(
+ "DML_RQ_DLG_CALC: ref_freq_to_pix_freq = 0x%0x\n",
+ dlg_regs.ref_freq_to_pix_freq);
+ dml_print(
+ "DML_RQ_DLG_CALC: vratio_prefetch = 0x%0x\n",
+ dlg_regs.vratio_prefetch);
+ dml_print(
+ "DML_RQ_DLG_CALC: vratio_prefetch_c = 0x%0x\n",
+ dlg_regs.vratio_prefetch_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_l = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_vblank_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_c = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_vblank_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_l = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_vblank_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_c = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_vblank_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_l = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_flip_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_c = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_flip_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_l = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_flip_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_c = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_flip_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_l = 0x%0x\n",
+ dlg_regs.dst_y_per_pte_row_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_c = 0x%0x\n",
+ dlg_regs.dst_y_per_pte_row_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_l = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_c = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_l = 0x%0x\n",
+ dlg_regs.dst_y_per_meta_row_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_c = 0x%0x\n",
+ dlg_regs.dst_y_per_meta_row_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_l = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_c = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_l = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_pre_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_c = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_pre_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_l = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_c = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur0 = 0x%0x\n",
+ dlg_regs.chunk_hdl_adjust_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_offset_cur1 = 0x%0x\n",
+ dlg_regs.dst_y_offset_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur1 = 0x%0x\n",
+ dlg_regs.chunk_hdl_adjust_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: vready_after_vcount0 = 0x%0x\n",
+ dlg_regs.vready_after_vcount0);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_delta_drq_limit = 0x%0x\n",
+ dlg_regs.dst_y_delta_drq_limit);
+ dml_print(
+ "DML_RQ_DLG_CALC: xfc_reg_transfer_delay = 0x%0x\n",
+ dlg_regs.xfc_reg_transfer_delay);
+ dml_print(
+ "DML_RQ_DLG_CALC: xfc_reg_precharge_delay = 0x%0x\n",
+ dlg_regs.xfc_reg_precharge_delay);
+ dml_print(
+ "DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
+ dlg_regs.xfc_reg_remote_surface_flip_latency);
+
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_low_wm = 0x%0x\n",
+ ttu_regs.qos_level_low_wm);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_high_wm = 0x%0x\n",
+ ttu_regs.qos_level_high_wm);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_ttu_vblank = 0x%0x\n",
+ ttu_regs.min_ttu_vblank);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_flip = 0x%0x\n",
+ ttu_regs.qos_level_flip);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_l = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_l = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_c = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_c = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur0 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur0 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur1 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur1 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_l = 0x%0x\n",
+ ttu_regs.qos_level_fixed_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_l = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_c = 0x%0x\n",
+ ttu_regs.qos_level_fixed_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_c = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_cur0 = 0x%0x\n",
+ ttu_regs.qos_level_fixed_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_cur0 = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_cur1 = 0x%0x\n",
+ ttu_regs.qos_level_fixed_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_cur1 = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_cur1);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
new file mode 100644
index 000000000000..1f24db830737
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DISPLAY_RQ_DLG_HELPERS_H__
+#define __DISPLAY_RQ_DLG_HELPERS_H__
+
+#include "dml_common_defs.h"
+#include "display_mode_lib.h"
+
+/* Function: Printer functions
+ * Print various struct
+ */
+void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param);
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing);
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param);
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param);
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param);
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param);
+
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st data_rq_regs);
+void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs);
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs);
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
new file mode 100644
index 000000000000..1e4b1e383401
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -0,0 +1,1905 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dml1_display_rq_dlg_calc.h"
+#include "display_mode_lib.h"
+
+#include "dml_inline_defs.h"
+
+static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+{
+ unsigned int ret_val = 0;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+ ret_val = 2;
+ } else if (source_format == dm_444_32) {
+ if (!is_chroma)
+ ret_val = 4;
+ } else if (source_format == dm_444_64) {
+ if (!is_chroma)
+ ret_val = 8;
+ } else if (source_format == dm_420_8) {
+ if (is_chroma)
+ ret_val = 2;
+ else
+ ret_val = 1;
+ } else if (source_format == dm_420_10) {
+ if (is_chroma)
+ ret_val = 4;
+ else
+ ret_val = 2;
+ }
+ return ret_val;
+}
+
+static bool is_dual_plane(enum source_format_class source_format)
+{
+ bool ret_val = 0;
+
+ if ((source_format == dm_420_8) || (source_format == dm_420_10))
+ ret_val = 1;
+
+ return ret_val;
+}
+
+static void get_blk256_size(
+ unsigned int *blk256_width,
+ unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static double get_refcyc_per_delivery(
+ struct display_mode_lib *mode_lib,
+ double refclk_freq_in_mhz,
+ double pclk_freq_in_mhz,
+ unsigned int recout_width,
+ double vratio,
+ double hscale_pixel_rate,
+ unsigned int delivery_width,
+ unsigned int req_per_swath_ub)
+{
+ double refcyc_per_delivery = 0.0;
+
+ if (vratio <= 1.0) {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ } else {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
+ / (double) hscale_pixel_rate / (double) req_per_swath_ub;
+ }
+
+ DTRACE("DLG: %s: refclk_freq_in_mhz = %3.2f", __func__, refclk_freq_in_mhz);
+ DTRACE("DLG: %s: pclk_freq_in_mhz = %3.2f", __func__, pclk_freq_in_mhz);
+ DTRACE("DLG: %s: recout_width = %d", __func__, recout_width);
+ DTRACE("DLG: %s: vratio = %3.2f", __func__, vratio);
+ DTRACE("DLG: %s: req_per_swath_ub = %d", __func__, req_per_swath_ub);
+ DTRACE("DLG: %s: refcyc_per_delivery= %3.2f", __func__, refcyc_per_delivery);
+
+ return refcyc_per_delivery;
+
+}
+
+static double get_vratio_pre(
+ struct display_mode_lib *mode_lib,
+ unsigned int max_num_sw,
+ unsigned int max_partial_sw,
+ unsigned int swath_height,
+ double vinit,
+ double l_sw)
+{
+ double prefill = dml_floor(vinit, 1);
+ double vratio_pre = 1.0;
+
+ vratio_pre = (max_num_sw * swath_height + max_partial_sw) / l_sw;
+
+ if (swath_height > 4) {
+ double tmp0 = (max_num_sw * swath_height) / (l_sw - (prefill - 3.0) / 2.0);
+
+ if (tmp0 > vratio_pre)
+ vratio_pre = tmp0;
+ }
+
+ DTRACE("DLG: %s: max_num_sw = %0d", __func__, max_num_sw);
+ DTRACE("DLG: %s: max_partial_sw = %0d", __func__, max_partial_sw);
+ DTRACE("DLG: %s: swath_height = %0d", __func__, swath_height);
+ DTRACE("DLG: %s: vinit = %3.2f", __func__, vinit);
+ DTRACE("DLG: %s: vratio_pre = %3.2f", __func__, vratio_pre);
+
+ if (vratio_pre < 1.0) {
+ DTRACE("WARNING_DLG: %s: vratio_pre=%3.2f < 1.0, set to 1.0", __func__, vratio_pre);
+ vratio_pre = 1.0;
+ }
+
+ if (vratio_pre > 4.0) {
+ DTRACE(
+ "WARNING_DLG: %s: vratio_pre=%3.2f > 4.0 (max scaling ratio). set to 4.0",
+ __func__,
+ vratio_pre);
+ vratio_pre = 4.0;
+ }
+
+ return vratio_pre;
+}
+
+static void get_swath_need(
+ struct display_mode_lib *mode_lib,
+ unsigned int *max_num_sw,
+ unsigned int *max_partial_sw,
+ unsigned int swath_height,
+ double vinit)
+{
+ double prefill = dml_floor(vinit, 1);
+ unsigned int max_partial_sw_int;
+
+ DTRACE("DLG: %s: swath_height = %0d", __func__, swath_height);
+ DTRACE("DLG: %s: vinit = %3.2f", __func__, vinit);
+
+ ASSERT(prefill > 0.0 && prefill <= 8.0);
+
+ *max_num_sw = (unsigned int) (dml_ceil((prefill - 1.0) / (double) swath_height, 1) + 1.0); /* prefill has to be >= 1 */
+ max_partial_sw_int =
+ (prefill == 1) ?
+ (swath_height - 1) :
+ ((unsigned int) (prefill - 2.0) % swath_height);
+ *max_partial_sw = (max_partial_sw_int < 1) ? 1 : max_partial_sw_int; /* ensure minimum of 1 is used */
+
+ DTRACE("DLG: %s: max_num_sw = %0d", __func__, *max_num_sw);
+ DTRACE("DLG: %s: max_partial_sw = %0d", __func__, *max_partial_sw);
+}
+
+static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
+{
+ if (tile_size == dm_256k_tile)
+ return (256 * 1024);
+ else if (tile_size == dm_64k_tile)
+ return (64 * 1024);
+ else
+ return (4 * 1024);
+}
+
+static void extract_rq_sizing_regs(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_data_rq_regs_st *rq_regs,
+ const struct _vcs_dpi_display_data_rq_sizing_params_st rq_sizing)
+{
+ DTRACE("DLG: %s: rq_sizing param", __func__);
+ print__data_rq_sizing_params_st(mode_lib, rq_sizing);
+
+ rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+
+ if (rq_sizing.min_chunk_bytes == 0)
+ rq_regs->min_chunk_size = 0;
+ else
+ rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
+ if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->min_meta_chunk_size = 0;
+ else
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+
+ rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+}
+
+void dml1_extract_rq_regs(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ const struct _vcs_dpi_display_rq_params_st rq_param)
+{
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+ unsigned int detile_buf_plane1_addr = 0;
+
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ if (rq_param.yuv420)
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+
+ /* FIXME: take the max between luma, chroma chunk size?
+ * okay for now, as we are setting chunk_bytes to 8kb anyways
+ */
+ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
+ rq_regs->drq_expansion_mode = 0;
+ } else {
+ rq_regs->drq_expansion_mode = 2;
+ }
+ rq_regs->prq_expansion_mode = 1;
+ rq_regs->mrq_expansion_mode = 1;
+ rq_regs->crq_expansion_mode = 1;
+
+ if (rq_param.yuv420) {
+ if ((double) rq_param.misc.rq_l.stored_swath_bytes
+ / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); /* half to chroma */
+ } else {
+ detile_buf_plane1_addr = dml_round_to_multiple(
+ (unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
+ 256,
+ 0) / 64.0; /* 2/3 to chroma */
+ }
+ }
+ rq_regs->plane1_base_address = detile_buf_plane1_addr;
+}
+
+static void handle_det_buf_split(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_params_st *rq_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+{
+ unsigned int total_swath_bytes = 0;
+ unsigned int swath_bytes_l = 0;
+ unsigned int swath_bytes_c = 0;
+ unsigned int full_swath_bytes_packed_l = 0;
+ unsigned int full_swath_bytes_packed_c = 0;
+ bool req128_l = 0;
+ bool req128_c = 0;
+ bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ unsigned int log2_swath_height_l = 0;
+ unsigned int log2_swath_height_c = 0;
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+
+ full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
+ full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
+
+ if (rq_param->yuv420_10bpc) {
+ full_swath_bytes_packed_l = dml_round_to_multiple(
+ rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ full_swath_bytes_packed_c = dml_round_to_multiple(
+ rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ }
+
+ if (rq_param->yuv420) {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes) { /*full 256b request */
+ req128_l = 0;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ } else { /*128b request (for luma only for yuv420 8bpc) */
+ req128_l = 1;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ }
+
+ /* Bug workaround, luma and chroma req size needs to be the same. (see: DEGVIDCN10-137)
+ * TODO: Remove after rtl fix
+ */
+ if (req128_l == 1) {
+ req128_c = 1;
+ DTRACE("DLG: %s: bug workaround DEGVIDCN10-137", __func__);
+ }
+
+ /* Note: assumption, the config that pass in will fit into
+ * the detiled buffer.
+ */
+ } else {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes)
+ req128_l = 0;
+ else
+ req128_l = 1;
+
+ swath_bytes_l = total_swath_bytes;
+ swath_bytes_c = 0;
+ }
+ rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
+ rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
+
+ if (surf_linear) {
+ log2_swath_height_l = 0;
+ log2_swath_height_c = 0;
+ } else if (!surf_vert) {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ } else {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ }
+ rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+
+ DTRACE("DLG: %s: req128_l = %0d", __func__, req128_l);
+ DTRACE("DLG: %s: req128_c = %0d", __func__, req128_c);
+ DTRACE("DLG: %s: full_swath_bytes_packed_l = %0d", __func__, full_swath_bytes_packed_l);
+ DTRACE("DLG: %s: full_swath_bytes_packed_c = %0d", __func__, full_swath_bytes_packed_c);
+}
+
+/* Need refactor. */
+static void dml1_rq_dlg_get_row_heights(
+ struct display_mode_lib *mode_lib,
+ unsigned int *o_dpte_row_height,
+ unsigned int *o_meta_row_height,
+ unsigned int vp_width,
+ unsigned int data_pitch,
+ int source_format,
+ int tiling,
+ int macro_tile_size,
+ int source_scan,
+ int is_chroma)
+{
+ bool surf_linear = (tiling == dm_sw_linear);
+ bool surf_vert = (source_scan == dm_vert);
+
+ unsigned int bytes_per_element = get_bytes_per_element(
+ (enum source_format_class) source_format,
+ is_chroma);
+ unsigned int log2_bytes_per_element = dml_log2(bytes_per_element);
+ unsigned int blk256_width = 0;
+ unsigned int blk256_height = 0;
+
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int log2_meta_row_height;
+ unsigned int log2_vmpg_bytes;
+ unsigned int dpte_buf_in_pte_reqs;
+ unsigned int log2_vmpg_height;
+ unsigned int log2_vmpg_width;
+ unsigned int log2_dpte_req_height_ptes;
+ unsigned int log2_dpte_req_width_ptes;
+ unsigned int log2_dpte_req_height;
+ unsigned int log2_dpte_req_width;
+ unsigned int log2_dpte_row_height_linear;
+ unsigned int log2_dpte_row_height;
+ unsigned int dpte_req_width;
+
+ if (surf_linear) {
+ blk256_width = 256;
+ blk256_height = 1;
+ } else {
+ get_blk256_size(&blk256_width, &blk256_height, bytes_per_element);
+ }
+
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes = surf_linear ?
+ 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ /* remember log rule
+ * "+" in log is multiply
+ * "-" in log is divide
+ * "/2" is like square root
+ * blk is vertical biased
+ */
+ if (tiling != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; /* blk height of 1 */
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ /* ------- */
+ /* meta */
+ /* ------- */
+ log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
+
+ /* each 64b meta request for dcn is 8x8 meta elements and
+ * a meta element covers one 256b block of the the data surface.
+ */
+ log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 */
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ log2_meta_row_height = 0;
+
+ /* the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ * calculate upper bound of the meta_row_width
+ */
+ if (!surf_vert)
+ log2_meta_row_height = log2_meta_req_height;
+ else
+ log2_meta_row_height = log2_meta_req_width;
+
+ *o_meta_row_height = 1 << log2_meta_row_height;
+
+ /* ------ */
+ /* dpte */
+ /* ------ */
+ log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
+
+ log2_vmpg_height = 0;
+ log2_vmpg_width = 0;
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width_ptes = 0;
+ log2_dpte_req_height = 0;
+ log2_dpte_req_width = 0;
+ log2_dpte_row_height_linear = 0;
+ log2_dpte_row_height = 0;
+ dpte_req_width = 0; /* 64b dpte req width in data element */
+
+ if (surf_linear)
+ log2_vmpg_height = 0; /* one line high */
+ else
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ /* only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4. */
+ if (log2_blk_bytes <= log2_vmpg_bytes)
+ log2_dpte_req_height_ptes = 0;
+ else if (log2_blk_height - log2_vmpg_height >= 2)
+ log2_dpte_req_height_ptes = 2;
+ else
+ log2_dpte_req_height_ptes = log2_blk_height - log2_vmpg_height;
+ log2_dpte_req_width_ptes = 3 - log2_dpte_req_height_ptes;
+
+ ASSERT((log2_dpte_req_width_ptes == 3 && log2_dpte_req_height_ptes == 0) || /* 8x1 */
+ (log2_dpte_req_width_ptes == 2 && log2_dpte_req_height_ptes == 1) || /* 4x2 */
+ (log2_dpte_req_width_ptes == 1 && log2_dpte_req_height_ptes == 2)); /* 2x4 */
+
+ /* the dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ * log2_wmpg_width is how much 1 pte represent, now trying to calculate how much 64b pte req represent
+ */
+ log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ /* calculate pitch dpte row buffer can hold
+ * round the result down to a power of two.
+ */
+ if (surf_linear) {
+ log2_dpte_row_height_linear = dml_floor(
+ dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ } else {
+ /* the upper bound of the dpte_row_width without dependency on viewport position follows. */
+ if (!surf_vert)
+ log2_dpte_row_height = log2_dpte_req_height;
+ else
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ }
+
+ /* From programming guide:
+ * There is a special case of saving only half of ptes returned due to buffer space limits.
+ * this case applies to 4 and 8bpe in horizontal access of a vp_width greater than 2560+16
+ * when the pte request is 2x4 ptes (which happens when vmpg_bytes =4kb and tile blk_bytes >=64kb).
+ */
+ if (!surf_vert && vp_width > (2560 + 16) && bytes_per_element >= 4 && log2_vmpg_bytes == 12
+ && log2_blk_bytes >= 16)
+ log2_dpte_row_height = log2_dpte_row_height - 1; /*half of the full height */
+
+ *o_dpte_row_height = 1 << log2_dpte_row_height;
+}
+
+static void get_surf_rq_param(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing_param,
+ struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param,
+ struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param,
+ bool is_chroma)
+{
+ bool mode_422 = 0;
+ unsigned int vp_width = 0;
+ unsigned int vp_height = 0;
+ unsigned int data_pitch = 0;
+ unsigned int meta_pitch = 0;
+ unsigned int ppe = mode_422 ? 2 : 1;
+ bool surf_linear;
+ bool surf_vert;
+ unsigned int bytes_per_element;
+ unsigned int log2_bytes_per_element;
+ unsigned int blk256_width;
+ unsigned int blk256_height;
+ unsigned int log2_blk256_width;
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int meta_req_width;
+ unsigned int meta_req_height;
+ unsigned int log2_meta_row_height;
+ unsigned int meta_row_width_ub;
+ unsigned int log2_meta_chunk_bytes;
+ unsigned int log2_meta_chunk_height;
+ unsigned int log2_meta_chunk_width;
+ unsigned int log2_min_meta_chunk_bytes;
+ unsigned int min_meta_chunk_width;
+ unsigned int meta_chunk_width;
+ unsigned int meta_chunk_per_row_int;
+ unsigned int meta_row_remainder;
+ unsigned int meta_chunk_threshold;
+ unsigned int meta_blk_bytes;
+ unsigned int meta_blk_height;
+ unsigned int meta_blk_width;
+ unsigned int meta_surface_bytes;
+ unsigned int vmpg_bytes;
+ unsigned int meta_pte_req_per_frame_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ unsigned int log2_vmpg_bytes;
+ unsigned int dpte_buf_in_pte_reqs;
+ unsigned int log2_vmpg_height;
+ unsigned int log2_vmpg_width;
+ unsigned int log2_dpte_req_height_ptes;
+ unsigned int log2_dpte_req_width_ptes;
+ unsigned int log2_dpte_req_height;
+ unsigned int log2_dpte_req_width;
+ unsigned int log2_dpte_row_height_linear;
+ unsigned int log2_dpte_row_height;
+ unsigned int log2_dpte_group_width;
+ unsigned int dpte_row_width_ub;
+ unsigned int dpte_row_height;
+ unsigned int dpte_req_height;
+ unsigned int dpte_req_width;
+ unsigned int dpte_group_width;
+ unsigned int log2_dpte_group_bytes;
+ unsigned int log2_dpte_group_length;
+ unsigned int func_meta_row_height, func_dpte_row_height;
+
+ /* FIXME check if ppe apply for both luma and chroma in 422 case */
+ if (is_chroma) {
+ vp_width = pipe_src_param.viewport_width_c / ppe;
+ vp_height = pipe_src_param.viewport_height_c;
+ data_pitch = pipe_src_param.data_pitch_c;
+ meta_pitch = pipe_src_param.meta_pitch_c;
+ } else {
+ vp_width = pipe_src_param.viewport_width / ppe;
+ vp_height = pipe_src_param.viewport_height;
+ data_pitch = pipe_src_param.data_pitch;
+ meta_pitch = pipe_src_param.meta_pitch;
+ }
+
+ rq_sizing_param->chunk_bytes = 8192;
+
+ if (rq_sizing_param->chunk_bytes == 64 * 1024)
+ rq_sizing_param->min_chunk_bytes = 0;
+ else
+ rq_sizing_param->min_chunk_bytes = 1024;
+
+ rq_sizing_param->meta_chunk_bytes = 2048;
+ rq_sizing_param->min_meta_chunk_bytes = 256;
+
+ rq_sizing_param->mpte_group_bytes = 2048;
+
+ surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ surf_vert = (pipe_src_param.source_scan == dm_vert);
+
+ bytes_per_element = get_bytes_per_element(
+ (enum source_format_class) pipe_src_param.source_format,
+ is_chroma);
+ log2_bytes_per_element = dml_log2(bytes_per_element);
+ blk256_width = 0;
+ blk256_height = 0;
+
+ if (surf_linear) {
+ blk256_width = 256 / bytes_per_element;
+ blk256_height = 1;
+ } else {
+ get_blk256_size(&blk256_width, &blk256_height, bytes_per_element);
+ }
+
+ DTRACE("DLG: %s: surf_linear = %d", __func__, surf_linear);
+ DTRACE("DLG: %s: surf_vert = %d", __func__, surf_vert);
+ DTRACE("DLG: %s: blk256_width = %d", __func__, blk256_width);
+ DTRACE("DLG: %s: blk256_height = %d", __func__, blk256_height);
+
+ log2_blk256_width = dml_log2((double) blk256_width);
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes =
+ surf_linear ? 256 : get_blk_size_bytes(
+ (enum source_macro_tile_size) pipe_src_param.macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ /* remember log rule
+ * "+" in log is multiply
+ * "-" in log is divide
+ * "/2" is like square root
+ * blk is vertical biased
+ */
+ if (pipe_src_param.sw_mode != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; /* blk height of 1 */
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ if (!surf_vert) {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ + blk256_width;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
+ } else {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(
+ vp_height - 1,
+ blk256_height,
+ 1) + blk256_height;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
+ }
+
+ if (!surf_vert)
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
+ * bytes_per_element;
+ else
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
+ * bytes_per_element;
+
+ rq_misc_param->blk256_height = blk256_height;
+ rq_misc_param->blk256_width = blk256_width;
+
+ /* ------- */
+ /* meta */
+ /* ------- */
+ log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
+
+ /* each 64b meta request for dcn is 8x8 meta elements and
+ * a meta element covers one 256b block of the the data surface.
+ */
+ log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 byte, each byte represent 1 blk256 */
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ meta_req_width = 1 << log2_meta_req_width;
+ meta_req_height = 1 << log2_meta_req_height;
+ log2_meta_row_height = 0;
+ meta_row_width_ub = 0;
+
+ /* the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ * calculate upper bound of the meta_row_width
+ */
+ if (!surf_vert) {
+ log2_meta_row_height = log2_meta_req_height;
+ meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ + meta_req_width;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
+ } else {
+ log2_meta_row_height = log2_meta_req_width;
+ meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ + meta_req_height;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
+ }
+ rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
+
+ log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
+ log2_meta_chunk_height = log2_meta_row_height;
+
+ /*full sized meta chunk width in unit of data elements */
+ log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height;
+ log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
+ min_meta_chunk_width = 1
+ << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height);
+ meta_chunk_width = 1 << log2_meta_chunk_width;
+ meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
+ meta_row_remainder = meta_row_width_ub % meta_chunk_width;
+ meta_chunk_threshold = 0;
+ meta_blk_bytes = 4096;
+ meta_blk_height = blk256_height * 64;
+ meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
+ meta_surface_bytes = meta_pitch
+ * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1)
+ + meta_blk_height) * bytes_per_element / 256;
+ vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
+ meta_pte_req_per_frame_ub = (dml_round_to_multiple(
+ meta_surface_bytes - vmpg_bytes,
+ 8 * vmpg_bytes,
+ 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
+ meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; /*64B mpte request */
+ rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
+
+ DTRACE("DLG: %s: meta_blk_height = %d", __func__, meta_blk_height);
+ DTRACE("DLG: %s: meta_blk_width = %d", __func__, meta_blk_width);
+ DTRACE("DLG: %s: meta_surface_bytes = %d", __func__, meta_surface_bytes);
+ DTRACE("DLG: %s: meta_pte_req_per_frame_ub = %d", __func__, meta_pte_req_per_frame_ub);
+ DTRACE("DLG: %s: meta_pte_bytes_per_frame_ub = %d", __func__, meta_pte_bytes_per_frame_ub);
+
+ if (!surf_vert)
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
+ else
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
+
+ if (meta_row_remainder <= meta_chunk_threshold)
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ else
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+
+ rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
+
+ /* ------ */
+ /* dpte */
+ /* ------ */
+ log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
+
+ log2_vmpg_height = 0;
+ log2_vmpg_width = 0;
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width_ptes = 0;
+ log2_dpte_req_height = 0;
+ log2_dpte_req_width = 0;
+ log2_dpte_row_height_linear = 0;
+ log2_dpte_row_height = 0;
+ log2_dpte_group_width = 0;
+ dpte_row_width_ub = 0;
+ dpte_row_height = 0;
+ dpte_req_height = 0; /* 64b dpte req height in data element */
+ dpte_req_width = 0; /* 64b dpte req width in data element */
+ dpte_group_width = 0;
+ log2_dpte_group_bytes = 0;
+ log2_dpte_group_length = 0;
+
+ if (surf_linear)
+ log2_vmpg_height = 0; /* one line high */
+ else
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ /* only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4. */
+ if (log2_blk_bytes <= log2_vmpg_bytes)
+ log2_dpte_req_height_ptes = 0;
+ else if (log2_blk_height - log2_vmpg_height >= 2)
+ log2_dpte_req_height_ptes = 2;
+ else
+ log2_dpte_req_height_ptes = log2_blk_height - log2_vmpg_height;
+ log2_dpte_req_width_ptes = 3 - log2_dpte_req_height_ptes;
+
+ /* Ensure we only have the 3 shapes */
+ ASSERT((log2_dpte_req_width_ptes == 3 && log2_dpte_req_height_ptes == 0) || /* 8x1 */
+ (log2_dpte_req_width_ptes == 2 && log2_dpte_req_height_ptes == 1) || /* 4x2 */
+ (log2_dpte_req_width_ptes == 1 && log2_dpte_req_height_ptes == 2)); /* 2x4 */
+
+ /* The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ * log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
+ * That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
+ */
+ log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_height = 1 << log2_dpte_req_height;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ /* calculate pitch dpte row buffer can hold
+ * round the result down to a power of two.
+ */
+ if (surf_linear) {
+ log2_dpte_row_height_linear = dml_floor(
+ dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+
+ /* For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
+ * the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
+ */
+ dpte_row_width_ub = dml_round_to_multiple(
+ data_pitch * dpte_row_height - 1,
+ dpte_req_width,
+ 1) + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ /* for tiled mode, row height is the same as req height and row store up to vp size upper bound */
+ if (!surf_vert) {
+ log2_dpte_row_height = log2_dpte_req_height;
+ dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ + dpte_req_height;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
+ }
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+ }
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64;
+
+ /* From programming guide:
+ * There is a special case of saving only half of ptes returned due to buffer space limits.
+ * this case applies to 4 and 8bpe in horizontal access of a vp_width greater than 2560+16
+ * when the pte request is 2x4 ptes (which happens when vmpg_bytes =4kb and tile blk_bytes >=64kb).
+ */
+ if (!surf_vert && vp_width > (2560 + 16) && bytes_per_element >= 4 && log2_vmpg_bytes == 12
+ && log2_blk_bytes >= 16) {
+ log2_dpte_row_height = log2_dpte_row_height - 1; /*half of the full height */
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+ }
+
+ /* the dpte_group_bytes is reduced for the specific case of vertical
+ * access of a tile surface that has dpte request of 8x1 ptes.
+ */
+ if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) /*reduced, in this case, will have page fault within a group */
+ rq_sizing_param->dpte_group_bytes = 512;
+ else
+ /*full size */
+ rq_sizing_param->dpte_group_bytes = 2048;
+
+ /*since pte request size is 64byte, the number of data pte requests per full sized group is as follows. */
+ log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
+ log2_dpte_group_length = log2_dpte_group_bytes - 6; /*length in 64b requests */
+
+ /* full sized data pte group width in elements */
+ if (!surf_vert)
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
+ else
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
+
+ dpte_group_width = 1 << log2_dpte_group_width;
+
+ /* since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
+ * the upper bound for the dpte groups per row is as follows.
+ */
+ rq_dlg_param->dpte_groups_per_row_ub = dml_ceil(
+ (double) dpte_row_width_ub / dpte_group_width,
+ 1);
+
+ dml1_rq_dlg_get_row_heights(
+ mode_lib,
+ &func_dpte_row_height,
+ &func_meta_row_height,
+ vp_width,
+ data_pitch,
+ pipe_src_param.source_format,
+ pipe_src_param.sw_mode,
+ pipe_src_param.macro_tile_size,
+ pipe_src_param.source_scan,
+ is_chroma);
+
+ /* Just a check to make sure this function and the new one give the same
+ * result. The standalone get_row_heights() function is based off of the
+ * code in this function so the same changes need to be made to both.
+ */
+ if (rq_dlg_param->meta_row_height != func_meta_row_height) {
+ DTRACE(
+ "MISMATCH: rq_dlg_param->meta_row_height = %d",
+ rq_dlg_param->meta_row_height);
+ DTRACE("MISMATCH: func_meta_row_height = %d", func_meta_row_height);
+ ASSERT(0);
+ }
+
+ if (rq_dlg_param->dpte_row_height != func_dpte_row_height) {
+ DTRACE(
+ "MISMATCH: rq_dlg_param->dpte_row_height = %d",
+ rq_dlg_param->dpte_row_height);
+ DTRACE("MISMATCH: func_dpte_row_height = %d", func_dpte_row_height);
+ ASSERT(0);
+ }
+}
+
+void dml1_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_params_st *rq_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+{
+ /* get param for luma surface */
+ rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
+ || pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+
+ get_surf_rq_param(
+ mode_lib,
+ &(rq_param->sizing.rq_l),
+ &(rq_param->dlg.rq_l),
+ &(rq_param->misc.rq_l),
+ pipe_src_param,
+ 0);
+
+ if (is_dual_plane((enum source_format_class) pipe_src_param.source_format)) {
+ /* get param for chroma surface */
+ get_surf_rq_param(
+ mode_lib,
+ &(rq_param->sizing.rq_c),
+ &(rq_param->dlg.rq_c),
+ &(rq_param->misc.rq_c),
+ pipe_src_param,
+ 1);
+ }
+
+ /* calculate how to split the det buffer space between luma and chroma */
+ handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
+ print__rq_params_st(mode_lib, *rq_param);
+}
+
+/* Note: currently taken in as is.
+ * Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
+ */
+void dml1_rq_dlg_get_dlg_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_dlg_regs_st *disp_dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *disp_ttu_regs,
+ const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool iflip_en)
+{
+ /* Timing */
+ unsigned int htotal = e2e_pipe_param.pipe.dest.htotal;
+ unsigned int hblank_end = e2e_pipe_param.pipe.dest.hblank_end;
+ unsigned int vblank_start = e2e_pipe_param.pipe.dest.vblank_start;
+ unsigned int vblank_end = e2e_pipe_param.pipe.dest.vblank_end;
+ bool interlaced = e2e_pipe_param.pipe.dest.interlaced;
+ unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
+
+ double pclk_freq_in_mhz = e2e_pipe_param.pipe.dest.pixel_rate_mhz;
+ double refclk_freq_in_mhz = e2e_pipe_param.clks_cfg.refclk_mhz;
+ double dppclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dppclk_mhz;
+ double dispclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dispclk_mhz;
+
+ double ref_freq_to_pix_freq;
+ double prefetch_xy_calc_in_dcfclk;
+ double min_dcfclk_mhz;
+ double t_calc_us;
+ double min_ttu_vblank;
+ double min_dst_y_ttu_vblank;
+ unsigned int dlg_vblank_start;
+ bool dcc_en;
+ bool dual_plane;
+ bool mode_422;
+ unsigned int access_dir;
+ unsigned int bytes_per_element_l;
+ unsigned int bytes_per_element_c;
+ unsigned int vp_height_l;
+ unsigned int vp_width_l;
+ unsigned int vp_height_c;
+ unsigned int vp_width_c;
+ unsigned int htaps_l;
+ unsigned int htaps_c;
+ double hratios_l;
+ double hratios_c;
+ double vratio_l;
+ double vratio_c;
+ double line_time_in_us;
+ double vinit_l;
+ double vinit_c;
+ double vinit_bot_l;
+ double vinit_bot_c;
+ unsigned int swath_height_l;
+ unsigned int swath_width_ub_l;
+ unsigned int dpte_bytes_per_row_ub_l;
+ unsigned int dpte_groups_per_row_ub_l;
+ unsigned int meta_pte_bytes_per_frame_ub_l;
+ unsigned int meta_bytes_per_row_ub_l;
+ unsigned int swath_height_c;
+ unsigned int swath_width_ub_c;
+ unsigned int dpte_bytes_per_row_ub_c;
+ unsigned int dpte_groups_per_row_ub_c;
+ unsigned int meta_chunks_per_row_ub_l;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int pixel_rate_delay_subtotal;
+ unsigned int vstartup_start;
+ unsigned int dst_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ double line_wait;
+ double line_o;
+ double line_setup;
+ double line_calc;
+ double dst_y_prefetch;
+ double t_pre_us;
+ unsigned int vm_bytes;
+ unsigned int meta_row_bytes;
+ unsigned int max_num_sw_l;
+ unsigned int max_num_sw_c;
+ unsigned int max_partial_sw_l;
+ unsigned int max_partial_sw_c;
+ double max_vinit_l;
+ double max_vinit_c;
+ unsigned int lsw_l;
+ unsigned int lsw_c;
+ unsigned int sw_bytes_ub_l;
+ unsigned int sw_bytes_ub_c;
+ unsigned int sw_bytes;
+ unsigned int dpte_row_bytes;
+ double prefetch_bw;
+ double flip_bw;
+ double t_vm_us;
+ double t_r0_us;
+ double dst_y_per_vm_vblank;
+ double dst_y_per_row_vblank;
+ double min_dst_y_per_vm_vblank;
+ double min_dst_y_per_row_vblank;
+ double lsw;
+ double vratio_pre_l;
+ double vratio_pre_c;
+ unsigned int req_per_swath_ub_l;
+ unsigned int req_per_swath_ub_c;
+ unsigned int meta_row_height_l;
+ unsigned int swath_width_pixels_ub_l;
+ unsigned int swath_width_pixels_ub_c;
+ unsigned int scaler_rec_in_width_l;
+ unsigned int scaler_rec_in_width_c;
+ unsigned int dpte_row_height_l;
+ unsigned int dpte_row_height_c;
+ double hscale_pixel_rate_l;
+ double hscale_pixel_rate_c;
+ double min_hratio_fact_l;
+ double min_hratio_fact_c;
+ double refcyc_per_line_delivery_pre_l;
+ double refcyc_per_line_delivery_pre_c;
+ double refcyc_per_line_delivery_l;
+ double refcyc_per_line_delivery_c;
+ double refcyc_per_req_delivery_pre_l;
+ double refcyc_per_req_delivery_pre_c;
+ double refcyc_per_req_delivery_l;
+ double refcyc_per_req_delivery_c;
+ double refcyc_per_req_delivery_pre_cur0;
+ double refcyc_per_req_delivery_cur0;
+ unsigned int full_recout_width;
+ double hratios_cur0;
+ unsigned int cur0_src_width;
+ enum cursor_bpp cur0_bpp;
+ unsigned int cur0_req_size;
+ unsigned int cur0_req_width;
+ double cur0_width_ub;
+ double cur0_req_per_width;
+ double hactive_cur0;
+
+ memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+
+ DTRACE("DLG: %s: cstate_en = %d", __func__, cstate_en);
+ DTRACE("DLG: %s: pstate_en = %d", __func__, pstate_en);
+ DTRACE("DLG: %s: vm_en = %d", __func__, vm_en);
+ DTRACE("DLG: %s: iflip_en = %d", __func__, iflip_en);
+
+ /* ------------------------- */
+ /* Section 1.5.2.1: OTG dependent Params */
+ /* ------------------------- */
+ DTRACE("DLG: %s: dppclk_freq_in_mhz = %3.2f", __func__, dppclk_freq_in_mhz);
+ DTRACE("DLG: %s: dispclk_freq_in_mhz = %3.2f", __func__, dispclk_freq_in_mhz);
+ DTRACE("DLG: %s: refclk_freq_in_mhz = %3.2f", __func__, refclk_freq_in_mhz);
+ DTRACE("DLG: %s: pclk_freq_in_mhz = %3.2f", __func__, pclk_freq_in_mhz);
+ DTRACE("DLG: %s: interlaced = %d", __func__, interlaced);
+
+ ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
+ ASSERT(ref_freq_to_pix_freq < 4.0);
+ disp_dlg_regs->ref_freq_to_pix_freq =
+ (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
+ disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
+ * dml_pow(2, 8));
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
+ * (double) ref_freq_to_pix_freq);
+ ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
+ disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
+
+ prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */
+ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
+ min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
+ if (cstate_en)
+ min_ttu_vblank = dml_max(dlg_sys_param.t_sr_wm_us, min_ttu_vblank);
+ if (pstate_en)
+ min_ttu_vblank = dml_max(dlg_sys_param.t_mclk_wm_us, min_ttu_vblank);
+ min_ttu_vblank = min_ttu_vblank + t_calc_us;
+
+ min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
+ dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
+ + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
+
+ DTRACE("DLG: %s: min_dcfclk_mhz = %3.2f", __func__, min_dcfclk_mhz);
+ DTRACE("DLG: %s: min_ttu_vblank = %3.2f", __func__, min_ttu_vblank);
+ DTRACE(
+ "DLG: %s: min_dst_y_ttu_vblank = %3.2f",
+ __func__,
+ min_dst_y_ttu_vblank);
+ DTRACE("DLG: %s: t_calc_us = %3.2f", __func__, t_calc_us);
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x",
+ __func__,
+ disp_dlg_regs->min_dst_y_next_start);
+ DTRACE(
+ "DLG: %s: ref_freq_to_pix_freq = %3.2f",
+ __func__,
+ ref_freq_to_pix_freq);
+
+ /* ------------------------- */
+ /* Section 1.5.2.2: Prefetch, Active and TTU */
+ /* ------------------------- */
+ /* Prefetch Calc */
+ /* Source */
+ dcc_en = e2e_pipe_param.pipe.src.dcc;
+ dual_plane = is_dual_plane(
+ (enum source_format_class) e2e_pipe_param.pipe.src.source_format);
+ mode_422 = 0; /* FIXME */
+ access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
+ bytes_per_element_l = get_bytes_per_element(
+ (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ 0);
+ bytes_per_element_c = get_bytes_per_element(
+ (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ 1);
+ vp_height_l = e2e_pipe_param.pipe.src.viewport_height;
+ vp_width_l = e2e_pipe_param.pipe.src.viewport_width;
+ vp_height_c = e2e_pipe_param.pipe.src.viewport_height_c;
+ vp_width_c = e2e_pipe_param.pipe.src.viewport_width_c;
+
+ /* Scaling */
+ htaps_l = e2e_pipe_param.pipe.scale_taps.htaps;
+ htaps_c = e2e_pipe_param.pipe.scale_taps.htaps_c;
+ hratios_l = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
+ hratios_c = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio_c;
+ vratio_l = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio;
+ vratio_c = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio_c;
+
+ line_time_in_us = (htotal / pclk_freq_in_mhz);
+ vinit_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit;
+ vinit_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_c;
+ vinit_bot_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot;
+ vinit_bot_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot_c;
+
+ swath_height_l = rq_dlg_param.rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
+ dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
+ meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
+ meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+
+ swath_height_c = rq_dlg_param.rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
+ dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
+ vupdate_offset = e2e_pipe_param.pipe.dest.vupdate_offset;
+ vupdate_width = e2e_pipe_param.pipe.dest.vupdate_width;
+ vready_offset = e2e_pipe_param.pipe.dest.vready_offset;
+
+ dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
+ dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
+ pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
+
+ vstartup_start = e2e_pipe_param.pipe.dest.vstartup_start;
+
+ if (interlaced)
+ vstartup_start = vstartup_start / 2;
+
+ if (vstartup_start >= min_vblank) {
+ DTRACE(
+ "WARNING_DLG: %s: vblank_start=%d vblank_end=%d",
+ __func__,
+ vblank_start,
+ vblank_end);
+ DTRACE(
+ "WARNING_DLG: %s: vstartup_start=%d should be less than min_vblank=%d",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ min_vblank = vstartup_start + 1;
+ DTRACE(
+ "WARNING_DLG: %s: vstartup_start=%d should be less than min_vblank=%d",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ }
+
+ dst_x_after_scaler = 0;
+ dst_y_after_scaler = 0;
+
+ if (e2e_pipe_param.pipe.src.is_hsplit)
+ dst_x_after_scaler = pixel_rate_delay_subtotal
+ + e2e_pipe_param.pipe.dest.recout_width;
+ else
+ dst_x_after_scaler = pixel_rate_delay_subtotal;
+
+ if (e2e_pipe_param.dout.output_format == dm_420)
+ dst_y_after_scaler = 1;
+ else
+ dst_y_after_scaler = 0;
+
+ if (dst_x_after_scaler >= htotal) {
+ dst_x_after_scaler = dst_x_after_scaler - htotal;
+ dst_y_after_scaler = dst_y_after_scaler + 1;
+ }
+
+ DTRACE("DLG: %s: htotal = %d", __func__, htotal);
+ DTRACE(
+ "DLG: %s: pixel_rate_delay_subtotal = %d",
+ __func__,
+ pixel_rate_delay_subtotal);
+ DTRACE("DLG: %s: dst_x_after_scaler = %d", __func__, dst_x_after_scaler);
+ DTRACE("DLG: %s: dst_y_after_scaler = %d", __func__, dst_y_after_scaler);
+
+ line_wait = mode_lib->soc.urgent_latency_us;
+ if (cstate_en)
+ line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
+ if (pstate_en)
+ line_wait = dml_max(
+ mode_lib->soc.dram_clock_change_latency_us
+ + mode_lib->soc.urgent_latency_us,
+ line_wait);
+ line_wait = line_wait / line_time_in_us;
+
+ line_o = (double) dst_y_after_scaler + dst_x_after_scaler / (double) htotal;
+ line_setup = (double) (vupdate_offset + vupdate_width + vready_offset) / (double) htotal;
+ line_calc = t_calc_us / line_time_in_us;
+
+ DTRACE(
+ "DLG: %s: soc.sr_enter_plus_exit_time_us = %3.2f",
+ __func__,
+ (double) mode_lib->soc.sr_enter_plus_exit_time_us);
+ DTRACE(
+ "DLG: %s: soc.dram_clock_change_latency_us = %3.2f",
+ __func__,
+ (double) mode_lib->soc.dram_clock_change_latency_us);
+ DTRACE(
+ "DLG: %s: soc.urgent_latency_us = %3.2f",
+ __func__,
+ mode_lib->soc.urgent_latency_us);
+
+ DTRACE("DLG: %s: swath_height_l = %d", __func__, swath_height_l);
+ if (dual_plane)
+ DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c);
+
+ DTRACE(
+ "DLG: %s: t_srx_delay_us = %3.2f",
+ __func__,
+ (double) dlg_sys_param.t_srx_delay_us);
+ DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
+ DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
+ DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
+ DTRACE("DLG: %s: vready_offset = %d", __func__, vready_offset);
+ DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, line_time_in_us);
+ DTRACE("DLG: %s: line_wait = %3.2f", __func__, line_wait);
+ DTRACE("DLG: %s: line_o = %3.2f", __func__, line_o);
+ DTRACE("DLG: %s: line_setup = %3.2f", __func__, line_setup);
+ DTRACE("DLG: %s: line_calc = %3.2f", __func__, line_calc);
+
+ dst_y_prefetch = ((double) min_vblank - 1.0)
+ - (line_setup + line_calc + line_wait + line_o);
+ DTRACE("DLG: %s: dst_y_prefetch (before rnd) = %3.2f", __func__, dst_y_prefetch);
+ ASSERT(dst_y_prefetch >= 2.0);
+
+ dst_y_prefetch = dml_floor(4.0 * (dst_y_prefetch + 0.125), 1) / 4;
+ DTRACE("DLG: %s: dst_y_prefetch (after rnd) = %3.2f", __func__, dst_y_prefetch);
+
+ t_pre_us = dst_y_prefetch * line_time_in_us;
+ vm_bytes = 0;
+ meta_row_bytes = 0;
+
+ if (dcc_en && vm_en)
+ vm_bytes = meta_pte_bytes_per_frame_ub_l;
+ if (dcc_en)
+ meta_row_bytes = meta_bytes_per_row_ub_l;
+
+ max_num_sw_l = 0;
+ max_num_sw_c = 0;
+ max_partial_sw_l = 0;
+ max_partial_sw_c = 0;
+
+ max_vinit_l = interlaced ? dml_max(vinit_l, vinit_bot_l) : vinit_l;
+ max_vinit_c = interlaced ? dml_max(vinit_c, vinit_bot_c) : vinit_c;
+
+ get_swath_need(mode_lib, &max_num_sw_l, &max_partial_sw_l, swath_height_l, max_vinit_l);
+ if (dual_plane)
+ get_swath_need(
+ mode_lib,
+ &max_num_sw_c,
+ &max_partial_sw_c,
+ swath_height_c,
+ max_vinit_c);
+
+ lsw_l = max_num_sw_l * swath_height_l + max_partial_sw_l;
+ lsw_c = max_num_sw_c * swath_height_c + max_partial_sw_c;
+ sw_bytes_ub_l = lsw_l * swath_width_ub_l * bytes_per_element_l;
+ sw_bytes_ub_c = lsw_c * swath_width_ub_c * bytes_per_element_c;
+ sw_bytes = 0;
+ dpte_row_bytes = 0;
+
+ if (vm_en) {
+ if (dual_plane)
+ dpte_row_bytes = dpte_bytes_per_row_ub_l + dpte_bytes_per_row_ub_c;
+ else
+ dpte_row_bytes = dpte_bytes_per_row_ub_l;
+ } else {
+ dpte_row_bytes = 0;
+ }
+
+ if (dual_plane)
+ sw_bytes = sw_bytes_ub_l + sw_bytes_ub_c;
+ else
+ sw_bytes = sw_bytes_ub_l;
+
+ DTRACE("DLG: %s: sw_bytes_ub_l = %d", __func__, sw_bytes_ub_l);
+ DTRACE("DLG: %s: sw_bytes_ub_c = %d", __func__, sw_bytes_ub_c);
+ DTRACE("DLG: %s: sw_bytes = %d", __func__, sw_bytes);
+ DTRACE("DLG: %s: vm_bytes = %d", __func__, vm_bytes);
+ DTRACE("DLG: %s: meta_row_bytes = %d", __func__, meta_row_bytes);
+ DTRACE("DLG: %s: dpte_row_bytes = %d", __func__, dpte_row_bytes);
+
+ prefetch_bw = (vm_bytes + 2 * dpte_row_bytes + 2 * meta_row_bytes + sw_bytes) / t_pre_us;
+ flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param.total_flip_bw)
+ / (double) dlg_sys_param.total_flip_bytes;
+ t_vm_us = line_time_in_us / 4.0;
+ if (vm_en && dcc_en) {
+ t_vm_us = dml_max(
+ dlg_sys_param.t_extra_us,
+ dml_max((double) vm_bytes / prefetch_bw, t_vm_us));
+
+ if (iflip_en && !dual_plane) {
+ t_vm_us = dml_max(mode_lib->soc.urgent_latency_us, t_vm_us);
+ if (flip_bw > 0.)
+ t_vm_us = dml_max(vm_bytes / flip_bw, t_vm_us);
+ }
+ }
+
+ t_r0_us = dml_max(dlg_sys_param.t_extra_us - t_vm_us, line_time_in_us - t_vm_us);
+
+ if (vm_en || dcc_en) {
+ t_r0_us = dml_max(
+ (double) (dpte_row_bytes + meta_row_bytes) / prefetch_bw,
+ dlg_sys_param.t_extra_us);
+ t_r0_us = dml_max((double) (line_time_in_us - t_vm_us), t_r0_us);
+
+ if (iflip_en && !dual_plane) {
+ t_r0_us = dml_max(mode_lib->soc.urgent_latency_us * 2.0, t_r0_us);
+ if (flip_bw > 0.)
+ t_r0_us = dml_max(
+ (dpte_row_bytes + meta_row_bytes) / flip_bw,
+ t_r0_us);
+ }
+ }
+
+ disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; /* in terms of line */
+ disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; /* in terms of refclk */
+ ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->dst_y_after_scaler = 0x%0x",
+ __func__,
+ disp_dlg_regs->dst_y_after_scaler);
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->refcyc_x_after_scaler = 0x%0x",
+ __func__,
+ disp_dlg_regs->refcyc_x_after_scaler);
+
+ disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->dst_y_prefetch = %d",
+ __func__,
+ disp_dlg_regs->dst_y_prefetch);
+
+ dst_y_per_vm_vblank = 0.0;
+ dst_y_per_row_vblank = 0.0;
+
+ dst_y_per_vm_vblank = t_vm_us / line_time_in_us;
+ dst_y_per_vm_vblank = dml_floor(4.0 * (dst_y_per_vm_vblank + 0.125), 1) / 4.0;
+ disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
+
+ dst_y_per_row_vblank = t_r0_us / line_time_in_us;
+ dst_y_per_row_vblank = dml_floor(4.0 * (dst_y_per_row_vblank + 0.125), 1) / 4.0;
+ disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
+
+ DTRACE("DLG: %s: lsw_l = %d", __func__, lsw_l);
+ DTRACE("DLG: %s: lsw_c = %d", __func__, lsw_c);
+ DTRACE("DLG: %s: dpte_bytes_per_row_ub_l = %d", __func__, dpte_bytes_per_row_ub_l);
+ DTRACE("DLG: %s: dpte_bytes_per_row_ub_c = %d", __func__, dpte_bytes_per_row_ub_c);
+
+ DTRACE("DLG: %s: prefetch_bw = %3.2f", __func__, prefetch_bw);
+ DTRACE("DLG: %s: flip_bw = %3.2f", __func__, flip_bw);
+ DTRACE("DLG: %s: t_pre_us = %3.2f", __func__, t_pre_us);
+ DTRACE("DLG: %s: t_vm_us = %3.2f", __func__, t_vm_us);
+ DTRACE("DLG: %s: t_r0_us = %3.2f", __func__, t_r0_us);
+ DTRACE("DLG: %s: dst_y_per_vm_vblank = %3.2f", __func__, dst_y_per_vm_vblank);
+ DTRACE("DLG: %s: dst_y_per_row_vblank = %3.2f", __func__, dst_y_per_row_vblank);
+ DTRACE("DLG: %s: dst_y_prefetch = %3.2f", __func__, dst_y_prefetch);
+
+ min_dst_y_per_vm_vblank = 8.0;
+ min_dst_y_per_row_vblank = 16.0;
+ if (htotal <= 75) {
+ min_vblank = 300;
+ min_dst_y_per_vm_vblank = 100.0;
+ min_dst_y_per_row_vblank = 100.0;
+ }
+
+ ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
+ ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
+
+ ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
+ lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
+
+ DTRACE("DLG: %s: lsw = %3.2f", __func__, lsw);
+
+ vratio_pre_l = get_vratio_pre(
+ mode_lib,
+ max_num_sw_l,
+ max_partial_sw_l,
+ swath_height_l,
+ max_vinit_l,
+ lsw);
+ vratio_pre_c = 1.0;
+ if (dual_plane)
+ vratio_pre_c = get_vratio_pre(
+ mode_lib,
+ max_num_sw_c,
+ max_partial_sw_c,
+ swath_height_c,
+ max_vinit_c,
+ lsw);
+
+ DTRACE("DLG: %s: vratio_pre_l=%3.2f", __func__, vratio_pre_l);
+ DTRACE("DLG: %s: vratio_pre_c=%3.2f", __func__, vratio_pre_c);
+
+ ASSERT(vratio_pre_l <= 4.0);
+ if (vratio_pre_l >= 4.0)
+ disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 21) - 1;
+ else
+ disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
+
+ ASSERT(vratio_pre_c <= 4.0);
+ if (vratio_pre_c >= 4.0)
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) dml_pow(2, 21) - 1;
+ else
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_c);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;/* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
+
+ /* Active */
+ req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ swath_width_pixels_ub_l = 0;
+ swath_width_pixels_ub_c = 0;
+ scaler_rec_in_width_l = 0;
+ scaler_rec_in_width_c = 0;
+ dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_c < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; /* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
+
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
+
+ if (mode_422) {
+ swath_width_pixels_ub_l = swath_width_ub_l * 2; /* *2 for 2 pixel per element */
+ swath_width_pixels_ub_c = swath_width_ub_c * 2;
+ } else {
+ swath_width_pixels_ub_l = swath_width_ub_l * 1;
+ swath_width_pixels_ub_c = swath_width_ub_c * 1;
+ }
+
+ hscale_pixel_rate_l = 0.;
+ hscale_pixel_rate_c = 0.;
+ min_hratio_fact_l = 1.0;
+ min_hratio_fact_c = 1.0;
+
+ if (htaps_l <= 1)
+ min_hratio_fact_l = 2.0;
+ else if (htaps_l <= 6) {
+ if ((hratios_l * 2.0) > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratios_l * 2.0;
+ } else {
+ if (hratios_l > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratios_l;
+ }
+
+ hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
+
+ if (htaps_c <= 1)
+ min_hratio_fact_c = 2.0;
+ else if (htaps_c <= 6) {
+ if ((hratios_c * 2.0) > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratios_c * 2.0;
+ } else {
+ if (hratios_c > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratios_c;
+ }
+
+ hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
+
+ refcyc_per_line_delivery_pre_l = 0.;
+ refcyc_per_line_delivery_pre_c = 0.;
+ refcyc_per_line_delivery_l = 0.;
+ refcyc_per_line_delivery_c = 0.;
+
+ refcyc_per_req_delivery_pre_l = 0.;
+ refcyc_per_req_delivery_pre_c = 0.;
+ refcyc_per_req_delivery_l = 0.;
+ refcyc_per_req_delivery_c = 0.;
+ refcyc_per_req_delivery_pre_cur0 = 0.;
+ refcyc_per_req_delivery_cur0 = 0.;
+
+ full_recout_width = 0;
+ if (e2e_pipe_param.pipe.src.is_hsplit) {
+ if (e2e_pipe_param.pipe.dest.full_recout_width == 0) {
+ DTRACE("DLG: %s: Warningfull_recout_width not set in hsplit mode", __func__);
+ full_recout_width = e2e_pipe_param.pipe.dest.recout_width * 2; /* assume half split for dcn1 */
+ } else
+ full_recout_width = e2e_pipe_param.pipe.dest.full_recout_width;
+ } else
+ full_recout_width = e2e_pipe_param.pipe.dest.recout_width;
+
+ refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); /* per line */
+
+ refcyc_per_line_delivery_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); /* per line */
+
+ DTRACE("DLG: %s: full_recout_width = %d", __func__, full_recout_width);
+ DTRACE("DLG: %s: hscale_pixel_rate_l = %3.2f", __func__, hscale_pixel_rate_l);
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_pre_l);
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_l = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_l);
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_pre_l,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_l,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); /* per line */
+
+ refcyc_per_line_delivery_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); /* per line */
+
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_pre_c);
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_c = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_c);
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_pre_c,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_c,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
+ }
+ disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
+
+ /* TTU - Luma / Chroma */
+ if (access_dir) { /* vertical access */
+ scaler_rec_in_width_l = vp_height_l;
+ scaler_rec_in_width_c = vp_height_c;
+ } else {
+ scaler_rec_in_width_l = vp_width_l;
+ scaler_rec_in_width_c = vp_width_c;
+ }
+
+ refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); /* per req */
+ refcyc_per_req_delivery_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); /* per req */
+
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_pre_l);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_l = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_l);
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
+ * dml_pow(2, 10));
+
+ ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); /* per req */
+ refcyc_per_req_delivery_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); /* per req */
+
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_pre_c);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_c = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_c);
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_c =
+ (unsigned int) (refcyc_per_req_delivery_pre_c * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
+ * dml_pow(2, 10));
+
+ ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
+ }
+
+ /* TTU - Cursor */
+ hratios_cur0 = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
+ cur0_src_width = e2e_pipe_param.pipe.src.cur0_src_width; /* cursor source width */
+ cur0_bpp = (enum cursor_bpp) e2e_pipe_param.pipe.src.cur0_bpp;
+ cur0_req_size = 0;
+ cur0_req_width = 0;
+ cur0_width_ub = 0.0;
+ cur0_req_per_width = 0.0;
+ hactive_cur0 = 0.0;
+
+ ASSERT(cur0_src_width <= 256);
+
+ if (cur0_src_width > 0) {
+ unsigned int cur0_bit_per_pixel = 0;
+
+ if (cur0_bpp == dm_cur_2bit) {
+ cur0_req_size = 64; /* byte */
+ cur0_bit_per_pixel = 2;
+ } else { /* 32bit */
+ cur0_bit_per_pixel = 32;
+ if (cur0_src_width >= 1 && cur0_src_width <= 16)
+ cur0_req_size = 64;
+ else if (cur0_src_width >= 17 && cur0_src_width <= 31)
+ cur0_req_size = 128;
+ else
+ cur0_req_size = 256;
+ }
+
+ cur0_req_width = (double) cur0_req_size / ((double) cur0_bit_per_pixel / 8.0);
+ cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1)
+ * (double) cur0_req_width;
+ cur0_req_per_width = cur0_width_ub / (double) cur0_req_width;
+ hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */
+
+ if (vratio_pre_l <= 1.0) {
+ refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq
+ / (double) cur0_req_per_width;
+ } else {
+ refcyc_per_req_delivery_pre_cur0 = (double) refclk_freq_in_mhz
+ * (double) cur0_src_width / hscale_pixel_rate_l
+ / (double) cur0_req_per_width;
+ }
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
+ ASSERT(refcyc_per_req_delivery_pre_cur0 < dml_pow(2, 13));
+
+ if (vratio_l <= 1.0) {
+ refcyc_per_req_delivery_cur0 = hactive_cur0 * ref_freq_to_pix_freq
+ / (double) cur0_req_per_width;
+ } else {
+ refcyc_per_req_delivery_cur0 = (double) refclk_freq_in_mhz
+ * (double) cur0_src_width / hscale_pixel_rate_l
+ / (double) cur0_req_per_width;
+ }
+
+ DTRACE("DLG: %s: cur0_req_width = %d", __func__, cur0_req_width);
+ DTRACE(
+ "DLG: %s: cur0_width_ub = %3.2f",
+ __func__,
+ cur0_width_ub);
+ DTRACE(
+ "DLG: %s: cur0_req_per_width = %3.2f",
+ __func__,
+ cur0_req_per_width);
+ DTRACE(
+ "DLG: %s: hactive_cur0 = %3.2f",
+ __func__,
+ hactive_cur0);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_pre_cur0 = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_pre_cur0);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_cur0 = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_cur0);
+
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_cur0 * dml_pow(2, 10));
+ ASSERT(refcyc_per_req_delivery_cur0 < dml_pow(2, 13));
+ } else {
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 = 0;
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 = 0;
+ }
+
+ /* TTU - Misc */
+ disp_ttu_regs->qos_level_low_wm = 0;
+ ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
+ disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
+ * ref_freq_to_pix_freq);
+ ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
+
+ disp_ttu_regs->qos_level_flip = 14;
+ disp_ttu_regs->qos_level_fixed_l = 8;
+ disp_ttu_regs->qos_level_fixed_c = 8;
+ disp_ttu_regs->qos_level_fixed_cur0 = 8;
+ disp_ttu_regs->qos_ramp_disable_l = 0;
+ disp_ttu_regs->qos_ramp_disable_c = 0;
+ disp_ttu_regs->qos_ramp_disable_cur0 = 0;
+
+ disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
+ ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
+
+ print__ttu_regs_st(mode_lib, *disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
new file mode 100644
index 000000000000..987d7671cd0f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DISPLAY_RQ_DLG_CALC_H__
+#define __DISPLAY_RQ_DLG_CALC_H__
+
+#include "dml_common_defs.h"
+#include "display_rq_dlg_helpers.h"
+
+struct display_mode_lib;
+
+void dml1_extract_rq_regs(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ const struct _vcs_dpi_display_rq_params_st rq_param);
+/* Function: dml_rq_dlg_get_rq_params
+ * Calculate requestor related parameters that register definition agnostic
+ * (i.e. this layer does try to separate real values from register definition)
+ * Input:
+ * pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+ * Output:
+ * rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
+ */
+void dml1_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_params_st *rq_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param);
+
+
+/* Function: dml_rq_dlg_get_dlg_params
+ * Calculate deadline related parameters
+ */
+void dml1_rq_dlg_get_dlg_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool iflip_en);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
new file mode 100644
index 000000000000..b953b02a1512
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dml_common_defs.h"
+#include "../calcs/dcn_calc_math.h"
+
+#include "dml_inline_defs.h"
+
+double dml_round(double a)
+{
+ double round_pt = 0.5;
+ double ceil = dml_ceil(a, 1);
+ double floor = dml_floor(a, 1);
+
+ if (a - floor >= round_pt)
+ return ceil;
+ else
+ return floor;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
new file mode 100644
index 000000000000..b2847bc469fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMMON_DEFS_H__
+#define __DC_COMMON_DEFS_H__
+
+#include "dm_services.h"
+#include "dc_features.h"
+#include "display_mode_structs.h"
+#include "display_mode_enums.h"
+
+#define dml_print(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
+#define DTRACE(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
+
+double dml_round(double a);
+
+#endif /* __DC_COMMON_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
new file mode 100644
index 000000000000..e68086b8a22f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML_INLINE_DEFS_H__
+#define __DML_INLINE_DEFS_H__
+
+#include "dml_common_defs.h"
+#include "../calcs/dcn_calc_math.h"
+
+static inline double dml_min(double a, double b)
+{
+ return (double) dcn_bw_min2(a, b);
+}
+
+static inline double dml_max(double a, double b)
+{
+ return (double) dcn_bw_max2(a, b);
+}
+
+static inline double dml_max3(double a, double b, double c)
+{
+ return dml_max(dml_max(a, b), c);
+}
+
+static inline double dml_max4(double a, double b, double c, double d)
+{
+ return dml_max(dml_max(a, b), dml_max(c, d));
+}
+
+static inline double dml_max5(double a, double b, double c, double d, double e)
+{
+ return dml_max(dml_max4(a, b, c, d), e);
+}
+
+static inline double dml_ceil(double a, double granularity)
+{
+ return (double) dcn_bw_ceil2(a, granularity);
+}
+
+static inline double dml_floor(double a, double granularity)
+{
+ return (double) dcn_bw_floor2(a, granularity);
+}
+
+static inline int dml_log2(double x)
+{
+ return dml_round((double)dcn_bw_log(x, 2));
+}
+
+static inline double dml_pow(double a, int exp)
+{
+ return (double) dcn_bw_pow(a, exp);
+}
+
+static inline double dml_fmod(double f, int val)
+{
+ return (double) dcn_bw_mod(f, val);
+}
+
+static inline double dml_ceil_2(double f)
+{
+ return (double) dcn_bw_ceil2(f, 2);
+}
+
+static inline double dml_ceil_ex(double x, double granularity)
+{
+ return (double) dcn_bw_ceil2(x, granularity);
+}
+
+static inline double dml_floor_ex(double x, double granularity)
+{
+ return (double) dcn_bw_floor2(x, granularity);
+}
+
+static inline double dml_log(double x, double base)
+{
+ return (double) dcn_bw_log(x, base);
+}
+
+static inline unsigned int dml_round_to_multiple(unsigned int num,
+ unsigned int multiple,
+ bool up)
+{
+ unsigned int remainder;
+
+ if (multiple == 0)
+ return num;
+
+ remainder = num % multiple;
+
+ if (remainder == 0)
+ return num;
+
+ if (up)
+ return (num + multiple - remainder);
+ else
+ return (num - remainder);
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
new file mode 100644
index 000000000000..bc7d8c707221
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "soc_bounding_box.h"
+#include "display_mode_lib.h"
+#include "dc_features.h"
+
+#include "dml_inline_defs.h"
+void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
+{
+ to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
+ to_box->sr_exit_time_us = from_box->sr_exit_time_us;
+ to_box->sr_enter_plus_exit_time_us = from_box->sr_enter_plus_exit_time_us;
+ to_box->urgent_latency_us = from_box->urgent_latency_us;
+ to_box->writeback_latency_us = from_box->writeback_latency_us;
+}
+
+voltage_scaling_st dml_socbb_voltage_scaling(
+ const soc_bounding_box_st *soc,
+ enum voltage_state voltage)
+{
+ const voltage_scaling_st *voltage_state;
+ const voltage_scaling_st * const voltage_end = soc->clock_limits + DC__VOLTAGE_STATES;
+
+ for (voltage_state = soc->clock_limits;
+ voltage_state < voltage_end && voltage_state->state != voltage;
+ voltage_state++) {
+ }
+
+ if (voltage_state < voltage_end)
+ return *voltage_state;
+ return soc->clock_limits[DC__VOLTAGE_STATES - 1];
+}
+
+double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage)
+{
+ double return_bw;
+
+ voltage_scaling_st state = dml_socbb_voltage_scaling(box, voltage);
+
+ return_bw = dml_min((double) box->return_bus_width_bytes * state.dcfclk_mhz,
+ state.dram_bw_per_chan_gbps * 1000.0 * (double) box->num_chans
+ * box->ideal_dram_bw_after_urgent_percent / 100.0);
+
+ return_bw = dml_min((double) box->return_bus_width_bytes * state.fabricclk_mhz, return_bw);
+
+ return return_bw;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
new file mode 100644
index 000000000000..7a65206a6d21
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __SOC_BOUNDING_BOX_H__
+#define __SOC_BOUNDING_BOX_H__
+
+#include "dml_common_defs.h"
+
+void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box);
+voltage_scaling_st dml_socbb_voltage_scaling(const soc_bounding_box_st *box, enum voltage_state voltage);
+double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
new file mode 100644
index 000000000000..70d01a9e9676
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -0,0 +1,58 @@
+#
+# Makefile for the 'gpio' sub-component of DAL.
+# It provides the control and status of HW GPIO pins.
+
+GPIO = gpio_base.o gpio_service.o hw_factory.o \
+ hw_gpio.o hw_hpd.o hw_ddc.o hw_translate.o
+
+AMD_DAL_GPIO = $(addprefix $(AMDDALPATH)/dc/gpio/,$(GPIO))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+# all DCE8.x are derived from DCE8.0
+GPIO_DCE80 = hw_translate_dce80.o hw_factory_dce80.o
+
+AMD_DAL_GPIO_DCE80 = $(addprefix $(AMDDALPATH)/dc/gpio/dce80/,$(GPIO_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE80)
+
+###############################################################################
+# DCE 11x
+###############################################################################
+GPIO_DCE110 = hw_translate_dce110.o hw_factory_dce110.o
+
+AMD_DAL_GPIO_DCE110 = $(addprefix $(AMDDALPATH)/dc/gpio/dce110/,$(GPIO_DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE110)
+
+###############################################################################
+# DCE 12x
+###############################################################################
+GPIO_DCE120 = hw_translate_dce120.o hw_factory_dce120.o
+
+AMD_DAL_GPIO_DCE120 = $(addprefix $(AMDDALPATH)/dc/gpio/dce120/,$(GPIO_DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
+
+###############################################################################
+# DCN 1x
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
+
+AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10)
+endif
+
+###############################################################################
+# Diagnostics on FPGA
+###############################################################################
+GPIO_DIAG_FPGA = hw_translate_diag.o hw_factory_diag.o
+
+AMD_DAL_GPIO_DIAG_FPGA = $(addprefix $(AMDDALPATH)/dc/gpio/diagnostics/,$(GPIO_DIAG_FPGA))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DIAG_FPGA)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
new file mode 100644
index 000000000000..20d81bca119c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+#include "hw_factory_dce110.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define REG(reg_name)\
+ mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ mm ## block ## id ## _ ## reg_name
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+
+/*
+ * dal_hw_factory_dce110_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dce110_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h
new file mode 100644
index 000000000000..ecf06ed0d587
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE110_H__
+#define __DAL_HW_FACTORY_DCE110_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dce110_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c
new file mode 100644
index 000000000000..ac4cddbba815
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "hw_translate_dce110.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case mmDC_GPIO_GENERIC_A:
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case mmDC_GPIO_HPD_A:
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case mmDC_GPIO_SYNCA_A:
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* mmDC_GPIO_GENLK_MASK */
+ case mmDC_GPIO_GENLK_A:
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case mmDC_GPIO_DDC1_A:
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case mmDC_GPIO_DDC2_A:
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case mmDC_GPIO_DDC3_A:
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case mmDC_GPIO_DDC4_A:
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case mmDC_GPIO_DDC5_A:
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case mmDC_GPIO_DDC6_A:
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case mmDC_GPIO_DDCVGA_A:
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case mmDC_GPIO_I2CPAD_A:
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case mmDC_GPIO_PWRSEQ_A:
+ case mmDC_GPIO_PAD_STRENGTH_1:
+ case mmDC_GPIO_PAD_STRENGTH_2:
+ case mmDC_GPIO_DEBUG:
+ return false;
+ /* UNEXPECTED */
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = mmDC_GPIO_GENERIC_A;
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = mmDC_GPIO_HPD_A;
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dce110_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dce110_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h
new file mode 100644
index 000000000000..4d16e09853c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE110_H__
+#define __DAL_HW_TRANSLATE_DCE110_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dce110_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
new file mode 100644
index 000000000000..4ced9a7d63dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "hw_factory_dce120.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#define block HPD
+#define reg_num 0
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+
+/* fucntion table */
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+/*
+ * dal_hw_factory_dce120_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dce120_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h
new file mode 100644
index 000000000000..db260c351f73
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE120_H__
+#define __DAL_HW_FACTORY_DCE120_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dce120_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
new file mode 100644
index 000000000000..af3843a69652
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "hw_translate_dce120.h"
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case REG(DC_GPIO_GENERIC_A):
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case REG(DC_GPIO_HPD_A):
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case REG(DC_GPIO_SYNCA_A):
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* REG(DC_GPIO_GENLK_MASK */
+ case REG(DC_GPIO_GENLK_A):
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case REG(DC_GPIO_DDC1_A):
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case REG(DC_GPIO_DDC2_A):
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case REG(DC_GPIO_DDC3_A):
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case REG(DC_GPIO_DDC4_A):
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case REG(DC_GPIO_DDC5_A):
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case REG(DC_GPIO_DDC6_A):
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case REG(DC_GPIO_DDCVGA_A):
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case REG(DC_GPIO_I2CPAD_A):
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case REG(DC_GPIO_PWRSEQ_A):
+ case REG(DC_GPIO_PAD_STRENGTH_1):
+ case REG(DC_GPIO_PAD_STRENGTH_2):
+ case REG(DC_GPIO_DEBUG):
+ return false;
+ /* UNEXPECTED */
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = REG(DC_GPIO_GENERIC_A);
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = REG(DC_GPIO_HPD_A);
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dce120_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dce120_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h
new file mode 100644
index 000000000000..c21766894af3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE120_H__
+#define __DAL_HW_TRANSLATE_DCE120_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dce120_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
new file mode 100644
index 000000000000..48b67866377e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+#include "hw_factory_dce80.h"
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#define REG(reg_name)\
+ mm ## reg_name
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define HPD_REG_LIST_DCE8(id) \
+ HPD_GPIO_REG_LIST(id), \
+ .int_status = mmDC_HPD ## id ## _INT_STATUS,\
+ .toggle_filt_cntl = mmDC_HPD ## id ## _TOGGLE_FILT_CNTL
+
+#define HPD_MASK_SH_LIST_DCE8(mask_sh) \
+ .DC_HPD_SENSE_DELAYED = DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED ## mask_sh,\
+ .DC_HPD_SENSE = DC_HPD1_INT_STATUS__DC_HPD1_SENSE ## mask_sh,\
+ .DC_HPD_CONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY ## mask_sh,\
+ .DC_HPD_DISCONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY ## mask_sh
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST_DCE8(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5),
+ hpd_regs(6)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST_DCE8(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST_DCE8(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+
+void dal_hw_factory_dce80_init(
+ struct hw_factory *factory)
+{
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h
new file mode 100644
index 000000000000..e78a8b36f35a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE80_H__
+#define __DAL_HW_FACTORY_DCE80_H__
+
+void dal_hw_factory_dce80_init(
+ struct hw_factory *factory);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c
new file mode 100644
index 000000000000..fabb9da504be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "hw_translate_dce80.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+#include "smu/smu_7_0_1_d.h"
+
+/*
+ * @brief
+ * Returns index of first bit (starting with LSB) which is set
+ */
+static uint32_t index_from_vector(
+ uint32_t vector)
+{
+ uint32_t result = 0;
+ uint32_t mask = 1;
+
+ do {
+ if (vector == mask)
+ return result;
+
+ ++result;
+ mask <<= 1;
+ } while (mask);
+
+ BREAK_TO_DEBUGGER();
+
+ return GPIO_ENUM_UNKNOWN;
+}
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case mmDC_GPIO_GENERIC_A:
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* HPD */
+ case mmDC_GPIO_HPD_A:
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case mmDC_GPIO_SYNCA_A:
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* mmDC_GPIO_GENLK_MASK */
+ case mmDC_GPIO_GENLK_A:
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* GPIOPAD */
+ case mmGPIOPAD_A:
+ *id = GPIO_ID_GPIO_PAD;
+ *en = index_from_vector(mask);
+ return (*en <= GPIO_GPIO_PAD_MAX);
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case mmDC_GPIO_DDC1_A:
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case mmDC_GPIO_DDC2_A:
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case mmDC_GPIO_DDC3_A:
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case mmDC_GPIO_DDC4_A:
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case mmDC_GPIO_DDC5_A:
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case mmDC_GPIO_DDC6_A:
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case mmDC_GPIO_DDCVGA_A:
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case mmDC_GPIO_I2CPAD_A:
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case mmDC_GPIO_PWRSEQ_A:
+ case mmDC_GPIO_PAD_STRENGTH_1:
+ case mmDC_GPIO_PAD_STRENGTH_2:
+ case mmDC_GPIO_DEBUG:
+ return false;
+ /* UNEXPECTED */
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = mmDC_GPIO_GENERIC_A;
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = mmDC_GPIO_HPD_A;
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GPIO_PAD:
+ info->offset = mmGPIOPAD_A;
+ info->mask = (1 << en);
+ result = (info->mask <= GPIO_GPIO_PAD_MAX);
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+void dal_hw_translate_dce80_init(
+ struct hw_translate *translate)
+{
+ translate->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h
new file mode 100644
index 000000000000..374f2f3282a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE80_H__
+#define __DAL_HW_TRANSLATE_DCE80_H__
+
+void dal_hw_translate_dce80_init(
+ struct hw_translate *tr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
new file mode 100644
index 000000000000..409763c70ce5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "hw_factory_dcn10.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#define block HPD
+#define reg_num 0
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+
+/* fucntion table */
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+/*
+ * dal_hw_factory_dcn10_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dcn10_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h
new file mode 100644
index 000000000000..2cc7a585b1f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCN10_H__
+#define __DAL_HW_FACTORY_DCN10_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dcn10_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
new file mode 100644
index 000000000000..64a6915b846b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "hw_translate_dcn10.h"
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case REG(DC_GPIO_GENERIC_A):
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case REG(DC_GPIO_HPD_A):
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case REG(DC_GPIO_SYNCA_A):
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* REG(DC_GPIO_GENLK_MASK */
+ case REG(DC_GPIO_GENLK_A):
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case REG(DC_GPIO_DDC1_A):
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case REG(DC_GPIO_DDC2_A):
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case REG(DC_GPIO_DDC3_A):
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case REG(DC_GPIO_DDC4_A):
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case REG(DC_GPIO_DDC5_A):
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case REG(DC_GPIO_DDC6_A):
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case REG(DC_GPIO_DDCVGA_A):
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case REG(DC_GPIO_I2CPAD_A):
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case REG(DC_GPIO_PWRSEQ_A):
+ case REG(DC_GPIO_PAD_STRENGTH_1):
+ case REG(DC_GPIO_PAD_STRENGTH_2):
+ case REG(DC_GPIO_DEBUG):
+ return false;
+ /* UNEXPECTED */
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = REG(DC_GPIO_GENERIC_A);
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = REG(DC_GPIO_HPD_A);
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dcn10_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dcn10_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h
new file mode 100644
index 000000000000..9edef53c80a0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCN10_H__
+#define __DAL_HW_TRANSLATE_DCN10_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dcn10_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
new file mode 100644
index 000000000000..9c4a56c738c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_
+
+#include "gpio_regs.h"
+
+/****************************** new register headers */
+/*** following in header */
+
+#define DDC_GPIO_REG_LIST_ENTRY(type,cd,id) \
+ .type ## _reg = REG(DC_GPIO_DDC ## id ## _ ## type),\
+ .type ## _mask = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## __SHIFT
+
+#define DDC_GPIO_REG_LIST(cd,id) \
+ {\
+ DDC_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
+ DDC_GPIO_REG_LIST_ENTRY(A,cd,id),\
+ DDC_GPIO_REG_LIST_ENTRY(EN,cd,id),\
+ DDC_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ }
+
+#define DDC_REG_LIST(cd,id) \
+ DDC_GPIO_REG_LIST(cd,id),\
+ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP)
+
+#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\
+ .type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\
+ .type ## _mask = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## __SHIFT
+
+#define DDC_GPIO_VGA_REG_LIST(cd) \
+ {\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(MASK,cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(A,cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(EN,cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(Y,cd)\
+ }
+
+#define DDC_VGA_REG_LIST(cd) \
+ DDC_GPIO_VGA_REG_LIST(cd),\
+ .ddc_setup = mmDC_I2C_DDCVGA_SETUP
+
+#define DDC_GPIO_I2C_REG_LIST_ENTRY(type,cd) \
+ .type ## _reg = REG(DC_GPIO_I2CPAD_ ## type),\
+ .type ## _mask = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## __SHIFT
+
+#define DDC_GPIO_I2C_REG_LIST(cd) \
+ {\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(MASK,cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(A,cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(EN,cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(Y,cd)\
+ }
+
+#define DDC_I2C_REG_LIST(cd) \
+ DDC_GPIO_I2C_REG_LIST(cd),\
+ .ddc_setup = 0
+
+#define DDC_MASK_SH_LIST(mask_sh) \
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
+ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
+
+
+struct ddc_registers {
+ struct gpio_registers gpio;
+ uint32_t ddc_setup;
+};
+
+struct ddc_sh_mask {
+ /* i2c_dd_setup */
+ uint32_t DC_I2C_DDC1_ENABLE;
+ uint32_t DC_I2C_DDC1_EDID_DETECT_ENABLE;
+ uint32_t DC_I2C_DDC1_EDID_DETECT_MODE;
+ /* ddc1_mask */
+ uint32_t DC_GPIO_DDC1DATA_PD_EN;
+ uint32_t DC_GPIO_DDC1CLK_PD_EN;
+ uint32_t AUX_PAD1_MODE;
+ /* i2cpad_mask */
+ uint32_t DC_GPIO_SDA_PD_DIS;
+ uint32_t DC_GPIO_SCL_PD_DIS;
+};
+
+
+
+/*** following in dc_resource */
+
+#define ddc_data_regs(id) \
+{\
+ DDC_REG_LIST(DATA,id)\
+}
+
+#define ddc_clk_regs(id) \
+{\
+ DDC_REG_LIST(CLK,id)\
+}
+
+#define ddc_vga_data_regs \
+{\
+ DDC_VGA_REG_LIST(DATA)\
+}
+
+#define ddc_vga_clk_regs \
+{\
+ DDC_VGA_REG_LIST(CLK)\
+}
+
+#define ddc_i2c_data_regs \
+{\
+ DDC_I2C_REG_LIST(SDA)\
+}
+
+#define ddc_i2c_clk_regs \
+{\
+ DDC_I2C_REG_LIST(SCL)\
+}
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
new file mode 100644
index 000000000000..26695b963c58
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+/* function table */
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = NULL,
+ .create_ddc_clock = NULL,
+ .create_generic = NULL,
+ .create_hpd = NULL,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+};
+
+void dal_hw_factory_diag_fpga_init(struct hw_factory *factory)
+{
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
new file mode 100644
index 000000000000..8a74f6adb8ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DIAG_FPGA_H__
+#define __DAL_HW_FACTORY_DIAG_FPGA_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_diag_fpga_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
new file mode 100644
index 000000000000..bf9068846927
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+
+#include "../hw_translate.h"
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = NULL,
+ .id_to_offset = NULL,
+};
+
+void dal_hw_translate_diag_fpga_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h
new file mode 100644
index 000000000000..4f053241fe96
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DIAG_FPGA_H__
+#define __DAL_HW_TRANSLATE_DIAG_FPGA_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_diag_fpga_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
new file mode 100644
index 000000000000..1d1efd72b291
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+
+#include "include/gpio_interface.h"
+#include "include/gpio_service_interface.h"
+#include "hw_gpio.h"
+#include "hw_translate.h"
+#include "hw_factory.h"
+#include "gpio_service.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Public API
+ */
+
+enum gpio_result dal_gpio_open(
+ struct gpio *gpio,
+ enum gpio_mode mode)
+{
+ return dal_gpio_open_ex(gpio, mode);
+}
+
+enum gpio_result dal_gpio_open_ex(
+ struct gpio *gpio,
+ enum gpio_mode mode)
+{
+ if (gpio->pin) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_ALREADY_OPENED;
+ }
+
+ gpio->mode = mode;
+
+ return dal_gpio_service_open(
+ gpio->service, gpio->id, gpio->en, mode, &gpio->pin);
+}
+
+enum gpio_result dal_gpio_get_value(
+ const struct gpio *gpio,
+ uint32_t *value)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->get_value(gpio->pin, value);
+}
+
+enum gpio_result dal_gpio_set_value(
+ const struct gpio *gpio,
+ uint32_t value)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->set_value(gpio->pin, value);
+}
+
+enum gpio_mode dal_gpio_get_mode(
+ const struct gpio *gpio)
+{
+ return gpio->mode;
+}
+
+enum gpio_result dal_gpio_change_mode(
+ struct gpio *gpio,
+ enum gpio_mode mode)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->change_mode(gpio->pin, mode);
+}
+
+enum gpio_id dal_gpio_get_id(
+ const struct gpio *gpio)
+{
+ return gpio->id;
+}
+
+uint32_t dal_gpio_get_enum(
+ const struct gpio *gpio)
+{
+ return gpio->en;
+}
+
+enum gpio_result dal_gpio_set_config(
+ struct gpio *gpio,
+ const struct gpio_config_data *config_data)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->set_config(gpio->pin, config_data);
+}
+
+enum gpio_result dal_gpio_get_pin_info(
+ const struct gpio *gpio,
+ struct gpio_pin_info *pin_info)
+{
+ return gpio->service->translate.funcs->id_to_offset(
+ gpio->id, gpio->en, pin_info) ?
+ GPIO_RESULT_OK : GPIO_RESULT_INVALID_DATA;
+}
+
+enum sync_source dal_gpio_get_sync_source(
+ const struct gpio *gpio)
+{
+ switch (gpio->id) {
+ case GPIO_ID_GENERIC:
+ switch (gpio->en) {
+ case GPIO_GENERIC_A:
+ return SYNC_SOURCE_IO_GENERIC_A;
+ case GPIO_GENERIC_B:
+ return SYNC_SOURCE_IO_GENERIC_B;
+ case GPIO_GENERIC_C:
+ return SYNC_SOURCE_IO_GENERIC_C;
+ case GPIO_GENERIC_D:
+ return SYNC_SOURCE_IO_GENERIC_D;
+ case GPIO_GENERIC_E:
+ return SYNC_SOURCE_IO_GENERIC_E;
+ case GPIO_GENERIC_F:
+ return SYNC_SOURCE_IO_GENERIC_F;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (gpio->en) {
+ case GPIO_SYNC_HSYNC_A:
+ return SYNC_SOURCE_IO_HSYNC_A;
+ case GPIO_SYNC_VSYNC_A:
+ return SYNC_SOURCE_IO_VSYNC_A;
+ case GPIO_SYNC_HSYNC_B:
+ return SYNC_SOURCE_IO_HSYNC_B;
+ case GPIO_SYNC_VSYNC_B:
+ return SYNC_SOURCE_IO_VSYNC_B;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ case GPIO_ID_HPD:
+ switch (gpio->en) {
+ case GPIO_HPD_1:
+ return SYNC_SOURCE_IO_HPD1;
+ case GPIO_HPD_2:
+ return SYNC_SOURCE_IO_HPD2;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (gpio->en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ return SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ return SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC;
+ case GPIO_GSL_SWAPLOCK_A:
+ return SYNC_SOURCE_GSL_IO_SWAPLOCK_A;
+ case GPIO_GSL_SWAPLOCK_B:
+ return SYNC_SOURCE_GSL_IO_SWAPLOCK_B;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+}
+
+enum gpio_pin_output_state dal_gpio_get_output_state(
+ const struct gpio *gpio)
+{
+ return gpio->output_state;
+}
+
+void dal_gpio_close(
+ struct gpio *gpio)
+{
+ if (!gpio)
+ return;
+
+ dal_gpio_service_close(gpio->service, &gpio->pin);
+
+ gpio->mode = GPIO_MODE_UNKNOWN;
+}
+
+/*
+ * @brief
+ * Creation and destruction
+ */
+
+struct gpio *dal_gpio_create(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_pin_output_state output_state)
+{
+ struct gpio *gpio = kzalloc(sizeof(struct gpio), GFP_KERNEL);
+
+ if (!gpio) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ gpio->service = service;
+ gpio->pin = NULL;
+ gpio->id = id;
+ gpio->en = en;
+ gpio->mode = GPIO_MODE_UNKNOWN;
+ gpio->output_state = output_state;
+
+ return gpio;
+}
+
+void dal_gpio_destroy(
+ struct gpio **gpio)
+{
+ if (!gpio || !*gpio) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ dal_gpio_close(*gpio);
+
+ kfree(*gpio);
+
+ *gpio = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h
new file mode 100644
index 000000000000..5c5925299f8d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_
+
+struct gpio_registers {
+ uint32_t MASK_reg;
+ uint32_t MASK_mask;
+ uint32_t MASK_shift;
+ uint32_t A_reg;
+ uint32_t A_mask;
+ uint32_t A_shift;
+ uint32_t EN_reg;
+ uint32_t EN_mask;
+ uint32_t EN_shift;
+ uint32_t Y_reg;
+ uint32_t Y_mask;
+ uint32_t Y_shift;
+};
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
new file mode 100644
index 000000000000..80038e0e610f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+#include "include/gpio_interface.h"
+#include "include/gpio_service_interface.h"
+#include "hw_translate.h"
+#include "hw_factory.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "gpio_service.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "hw_gpio.h"
+
+/*
+ * @brief
+ * Public API.
+ */
+
+struct gpio_service *dal_gpio_service_create(
+ enum dce_version dce_version_major,
+ enum dce_version dce_version_minor,
+ struct dc_context *ctx)
+{
+ struct gpio_service *service;
+
+ uint32_t index_of_id;
+
+ service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
+
+ if (!service) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (!dal_hw_translate_init(&service->translate, dce_version_major,
+ dce_version_minor)) {
+ BREAK_TO_DEBUGGER();
+ goto failure_1;
+ }
+
+ if (!dal_hw_factory_init(&service->factory, dce_version_major,
+ dce_version_minor)) {
+ BREAK_TO_DEBUGGER();
+ goto failure_1;
+ }
+
+ /* allocate and initialize business storage */
+ {
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ index_of_id = 0;
+ service->ctx = ctx;
+
+ do {
+ uint32_t number_of_bits =
+ service->factory.number_of_pins[index_of_id];
+
+ uint32_t number_of_uints =
+ (number_of_bits + bits_per_uint - 1) /
+ bits_per_uint;
+
+ uint32_t *slot;
+
+ if (number_of_bits) {
+ uint32_t index_of_uint = 0;
+
+ slot = kzalloc(number_of_uints * sizeof(uint32_t),
+ GFP_KERNEL);
+
+ if (!slot) {
+ BREAK_TO_DEBUGGER();
+ goto failure_2;
+ }
+
+ do {
+ slot[index_of_uint] = 0;
+
+ ++index_of_uint;
+ } while (index_of_uint < number_of_uints);
+ } else
+ slot = NULL;
+
+ service->busyness[index_of_id] = slot;
+
+ ++index_of_id;
+ } while (index_of_id < GPIO_ID_COUNT);
+ }
+
+ return service;
+
+failure_2:
+ while (index_of_id) {
+ uint32_t *slot;
+
+ --index_of_id;
+
+ slot = service->busyness[index_of_id];
+
+ kfree(slot);
+ }
+
+failure_1:
+ kfree(service);
+
+ return NULL;
+}
+
+struct gpio *dal_gpio_service_create_irq(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask)
+{
+ enum gpio_id id;
+ uint32_t en;
+
+ if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ return dal_gpio_create_irq(service, id, en);
+}
+
+void dal_gpio_service_destroy(
+ struct gpio_service **ptr)
+{
+ if (!ptr || !*ptr) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* free business storage */
+ {
+ uint32_t index_of_id = 0;
+
+ do {
+ uint32_t *slot = (*ptr)->busyness[index_of_id];
+
+ kfree(slot);
+
+ ++index_of_id;
+ } while (index_of_id < GPIO_ID_COUNT);
+ }
+
+ kfree(*ptr);
+
+ *ptr = NULL;
+}
+
+/*
+ * @brief
+ * Private API.
+ */
+
+static bool is_pin_busy(
+ const struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
+
+ return 0 != (*slot & (1 << (en % bits_per_uint)));
+}
+
+static void set_pin_busy(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ service->busyness[id][en / bits_per_uint] |=
+ (1 << (en % bits_per_uint));
+}
+
+static void set_pin_free(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ service->busyness[id][en / bits_per_uint] &=
+ ~(1 << (en % bits_per_uint));
+}
+
+enum gpio_result dal_gpio_service_open(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_mode mode,
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_gpio_pin *pin;
+
+ if (!service->busyness[id]) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_OPEN_FAILED;
+ }
+
+ if (is_pin_busy(service, id, en)) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_DEVICE_BUSY;
+ }
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ pin = service->factory.funcs->create_ddc_data(
+ service->ctx, id, en);
+ service->factory.funcs->define_ddc_registers(pin, en);
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ pin = service->factory.funcs->create_ddc_clock(
+ service->ctx, id, en);
+ service->factory.funcs->define_ddc_registers(pin, en);
+ break;
+ case GPIO_ID_GENERIC:
+ pin = service->factory.funcs->create_generic(
+ service->ctx, id, en);
+ break;
+ case GPIO_ID_HPD:
+ pin = service->factory.funcs->create_hpd(
+ service->ctx, id, en);
+ service->factory.funcs->define_hpd_registers(pin, en);
+ break;
+ case GPIO_ID_SYNC:
+ pin = service->factory.funcs->create_sync(
+ service->ctx, id, en);
+ break;
+ case GPIO_ID_GSL:
+ pin = service->factory.funcs->create_gsl(
+ service->ctx, id, en);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+
+ if (!pin) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+
+ if (!pin->funcs->open(pin, mode)) {
+ ASSERT_CRITICAL(false);
+ dal_gpio_service_close(service, &pin);
+ return GPIO_RESULT_OPEN_FAILED;
+ }
+
+ set_pin_busy(service, id, en);
+ *ptr = pin;
+ return GPIO_RESULT_OK;
+}
+
+void dal_gpio_service_close(
+ struct gpio_service *service,
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_gpio_pin *pin;
+
+ if (!ptr) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ pin = *ptr;
+
+ if (pin) {
+ set_pin_free(service, pin->id, pin->en);
+
+ pin->funcs->close(pin);
+
+ pin->funcs->destroy(ptr);
+ }
+}
+
+
+enum dc_irq_source dal_irq_get_source(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1 +
+ dal_gpio_get_enum(irq));
+ case GPIO_ID_GPIO_PAD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_GPIOPAD0 +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+enum dc_irq_source dal_irq_get_rx_source(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1RX +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+enum gpio_result dal_irq_setup_hpd_filter(
+ struct gpio *irq,
+ struct gpio_hpd_config *config)
+{
+ struct gpio_config_data config_data;
+
+ if (!config)
+ return GPIO_RESULT_INVALID_DATA;
+
+ config_data.type = GPIO_CONFIG_TYPE_HPD;
+ config_data.config.hpd = *config;
+
+ return dal_gpio_set_config(irq, &config_data);
+}
+
+/*
+ * @brief
+ * Creation and destruction
+ */
+
+struct gpio *dal_gpio_create_irq(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct gpio *irq;
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ case GPIO_ID_GPIO_PAD:
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ irq = dal_gpio_create(
+ service, id, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ if (irq)
+ return irq;
+
+ ASSERT_CRITICAL(false);
+ return NULL;
+}
+
+void dal_gpio_destroy_irq(
+ struct gpio **irq)
+{
+ if (!irq || !*irq) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ dal_gpio_close(*irq);
+ dal_gpio_destroy(irq);
+ kfree(*irq);
+
+ *irq = NULL;
+}
+
+struct ddc *dal_gpio_create_ddc(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask,
+ struct gpio_ddc_hw_info *info)
+{
+ enum gpio_id id;
+ uint32_t en;
+ struct ddc *ddc;
+
+ if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en))
+ return NULL;
+
+ ddc = kzalloc(sizeof(struct ddc), GFP_KERNEL);
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ ddc->pin_data = dal_gpio_create(
+ service, GPIO_ID_DDC_DATA, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ if (!ddc->pin_data) {
+ BREAK_TO_DEBUGGER();
+ goto failure_1;
+ }
+
+ ddc->pin_clock = dal_gpio_create(
+ service, GPIO_ID_DDC_CLOCK, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ if (!ddc->pin_clock) {
+ BREAK_TO_DEBUGGER();
+ goto failure_2;
+ }
+
+ ddc->hw_info = *info;
+
+ ddc->ctx = service->ctx;
+
+ return ddc;
+
+failure_2:
+ dal_gpio_destroy(&ddc->pin_data);
+
+failure_1:
+ kfree(ddc);
+
+ return NULL;
+}
+
+void dal_gpio_destroy_ddc(
+ struct ddc **ddc)
+{
+ if (!ddc || !*ddc) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ dal_ddc_close(*ddc);
+ dal_gpio_destroy(&(*ddc)->pin_data);
+ dal_gpio_destroy(&(*ddc)->pin_clock);
+ kfree(*ddc);
+
+ *ddc = NULL;
+}
+
+enum gpio_result dal_ddc_open(
+ struct ddc *ddc,
+ enum gpio_mode mode,
+ enum gpio_ddc_config_type config_type)
+{
+ enum gpio_result result;
+
+ struct gpio_config_data config_data;
+ struct hw_gpio *hw_data;
+ struct hw_gpio *hw_clock;
+
+ result = dal_gpio_open_ex(ddc->pin_data, mode);
+
+ if (result != GPIO_RESULT_OK) {
+ BREAK_TO_DEBUGGER();
+ return result;
+ }
+
+ result = dal_gpio_open_ex(ddc->pin_clock, mode);
+
+ if (result != GPIO_RESULT_OK) {
+ BREAK_TO_DEBUGGER();
+ goto failure;
+ }
+
+ /* DDC clock and data pins should belong
+ * to the same DDC block id,
+ * we use the data pin to set the pad mode. */
+
+ if (mode == GPIO_MODE_INPUT)
+ /* this is from detect_sink_type,
+ * we need extra delay there */
+ config_data.type = GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE;
+ else
+ config_data.type = GPIO_CONFIG_TYPE_DDC;
+
+ config_data.config.ddc.type = config_type;
+
+ hw_data = FROM_HW_GPIO_PIN(ddc->pin_data->pin);
+ hw_clock = FROM_HW_GPIO_PIN(ddc->pin_clock->pin);
+
+ config_data.config.ddc.data_en_bit_present = hw_data->store.en != 0;
+ config_data.config.ddc.clock_en_bit_present = hw_clock->store.en != 0;
+
+ result = dal_gpio_set_config(ddc->pin_data, &config_data);
+
+ if (result == GPIO_RESULT_OK)
+ return result;
+
+ BREAK_TO_DEBUGGER();
+
+ dal_gpio_close(ddc->pin_clock);
+
+failure:
+ dal_gpio_close(ddc->pin_data);
+
+ return result;
+}
+
+enum gpio_result dal_ddc_change_mode(
+ struct ddc *ddc,
+ enum gpio_mode mode)
+{
+ enum gpio_result result;
+
+ enum gpio_mode original_mode =
+ dal_gpio_get_mode(ddc->pin_data);
+
+ result = dal_gpio_change_mode(ddc->pin_data, mode);
+
+ /* [anaumov] DAL2 code returns GPIO_RESULT_NON_SPECIFIC_ERROR
+ * in case of failures;
+ * set_mode() is so that, in case of failure,
+ * we must explicitly set original mode */
+
+ if (result != GPIO_RESULT_OK)
+ goto failure;
+
+ result = dal_gpio_change_mode(ddc->pin_clock, mode);
+
+ if (result == GPIO_RESULT_OK)
+ return result;
+
+ dal_gpio_change_mode(ddc->pin_clock, original_mode);
+
+failure:
+ dal_gpio_change_mode(ddc->pin_data, original_mode);
+
+ return result;
+}
+
+enum gpio_ddc_line dal_ddc_get_line(
+ const struct ddc *ddc)
+{
+ return (enum gpio_ddc_line)dal_gpio_get_enum(ddc->pin_data);
+}
+
+enum gpio_result dal_ddc_set_config(
+ struct ddc *ddc,
+ enum gpio_ddc_config_type config_type)
+{
+ struct gpio_config_data config_data;
+
+ config_data.type = GPIO_CONFIG_TYPE_DDC;
+
+ config_data.config.ddc.type = config_type;
+ config_data.config.ddc.data_en_bit_present = false;
+ config_data.config.ddc.clock_en_bit_present = false;
+
+ return dal_gpio_set_config(ddc->pin_data, &config_data);
+}
+
+void dal_ddc_close(
+ struct ddc *ddc)
+{
+ dal_gpio_close(ddc->pin_clock);
+ dal_gpio_close(ddc->pin_data);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
new file mode 100644
index 000000000000..c7f3081f59cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_SERVICE_H__
+#define __DAL_GPIO_SERVICE_H__
+
+struct hw_translate;
+struct hw_factory;
+
+struct gpio_service {
+ struct dc_context *ctx;
+ struct hw_translate translate;
+ struct hw_factory factory;
+ /*
+ * @brief
+ * Business storage.
+ * For each member of 'enum gpio_id',
+ * store array of bits (packed into uint32_t slots),
+ * index individual bit by 'en' value */
+ uint32_t *busyness[GPIO_ID_COUNT];
+};
+
+enum gpio_result dal_gpio_service_open(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_mode mode,
+ struct hw_gpio_pin **ptr);
+
+void dal_gpio_service_close(
+ struct gpio_service *service,
+ struct hw_gpio_pin **ptr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
new file mode 100644
index 000000000000..dcfdd71b2304
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
+
+#include "gpio_regs.h"
+
+#define ONE_MORE_0 1
+#define ONE_MORE_1 2
+#define ONE_MORE_2 3
+#define ONE_MORE_3 4
+#define ONE_MORE_4 5
+#define ONE_MORE_5 6
+
+
+#define HPD_GPIO_REG_LIST_ENTRY(type,cd,id) \
+ .type ## _reg = REG(DC_GPIO_HPD_## type),\
+ .type ## _mask = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## __SHIFT
+
+#define HPD_GPIO_REG_LIST(id) \
+ {\
+ HPD_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
+ HPD_GPIO_REG_LIST_ENTRY(A,cd,id),\
+ HPD_GPIO_REG_LIST_ENTRY(EN,cd,id),\
+ HPD_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ }
+
+#define HPD_REG_LIST(id) \
+ HPD_GPIO_REG_LIST(ONE_MORE_ ## id), \
+ .int_status = REGI(DC_HPD_INT_STATUS, HPD, id),\
+ .toggle_filt_cntl = REGI(DC_HPD_TOGGLE_FILT_CNTL, HPD, id)
+
+ #define HPD_MASK_SH_LIST(mask_sh) \
+ SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED, mask_sh),\
+ SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE, mask_sh),\
+ SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_CONNECT_INT_DELAY, mask_sh),\
+ SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_DISCONNECT_INT_DELAY, mask_sh)
+
+struct hpd_registers {
+ struct gpio_registers gpio;
+ uint32_t int_status;
+ uint32_t toggle_filt_cntl;
+};
+
+struct hpd_sh_mask {
+ /* int_status */
+ uint32_t DC_HPD_SENSE_DELAYED;
+ uint32_t DC_HPD_SENSE;
+ /* toggle_filt_cntl */
+ uint32_t DC_HPD_CONNECT_INT_DELAY;
+ uint32_t DC_HPD_DISCONNECT_INT_DELAY;
+};
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
new file mode 100644
index 000000000000..310f48965b27
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+#include "hw_ddc.h"
+
+#include "reg_helper.h"
+#include "gpio_regs.h"
+
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ddc->shifts->field_name, ddc->masks->field_name
+
+#define CTX \
+ ddc->base.base.ctx
+#define REG(reg)\
+ (ddc->regs->reg)
+
+static void destruct(
+ struct hw_ddc *pin)
+{
+ dal_hw_gpio_destruct(&pin->base);
+}
+
+static void destroy(
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr);
+
+ destruct(pin);
+
+ kfree(pin);
+
+ *ptr = NULL;
+}
+
+static enum gpio_result set_config(
+ struct hw_gpio_pin *ptr,
+ const struct gpio_config_data *config_data)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(ptr);
+ struct hw_gpio *hw_gpio = NULL;
+ uint32_t regval;
+ uint32_t ddc_data_pd_en = 0;
+ uint32_t ddc_clk_pd_en = 0;
+ uint32_t aux_pad_mode = 0;
+
+ hw_gpio = &ddc->base;
+
+ if (hw_gpio == NULL) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ regval = REG_GET_3(gpio.MASK_reg,
+ DC_GPIO_DDC1DATA_PD_EN, &ddc_data_pd_en,
+ DC_GPIO_DDC1CLK_PD_EN, &ddc_clk_pd_en,
+ AUX_PAD1_MODE, &aux_pad_mode);
+
+ switch (config_data->config.ddc.type) {
+ case GPIO_DDC_CONFIG_TYPE_MODE_I2C:
+ /* On plug-in, there is a transient level on the pad
+ * which must be discharged through the internal pull-down.
+ * Enable internal pull-down, 2.5msec discharge time
+ * is required for detection of AUX mode */
+ if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) {
+ if (!ddc_data_pd_en || !ddc_clk_pd_en) {
+
+ REG_SET_2(gpio.MASK_reg, regval,
+ DC_GPIO_DDC1DATA_PD_EN, 1,
+ DC_GPIO_DDC1CLK_PD_EN, 1);
+
+ if (config_data->type ==
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
+ msleep(3);
+ }
+ } else {
+ uint32_t reg2;
+ uint32_t sda_pd_dis = 0;
+ uint32_t scl_pd_dis = 0;
+
+ reg2 = REG_GET_2(gpio.MASK_reg,
+ DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
+ DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
+
+ if (sda_pd_dis) {
+ REG_SET(gpio.MASK_reg, regval,
+ DC_GPIO_SDA_PD_DIS, 0);
+
+ if (config_data->type ==
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
+ msleep(3);
+ }
+
+ if (!scl_pd_dis) {
+ REG_SET(gpio.MASK_reg, regval,
+ DC_GPIO_SCL_PD_DIS, 1);
+
+ if (config_data->type ==
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
+ msleep(3);
+ }
+ }
+
+ if (aux_pad_mode) {
+ /* let pins to get de-asserted
+ * before setting pad to I2C mode */
+ if (config_data->config.ddc.data_en_bit_present ||
+ config_data->config.ddc.clock_en_bit_present)
+ /* [anaumov] in DAL2, there was
+ * dc_service_delay_in_microseconds(2000); */
+ msleep(2);
+
+ /* set the I2C pad mode */
+ /* read the register again,
+ * some bits may have been changed */
+ REG_UPDATE(gpio.MASK_reg,
+ AUX_PAD1_MODE, 0);
+ }
+
+ return GPIO_RESULT_OK;
+ case GPIO_DDC_CONFIG_TYPE_MODE_AUX:
+ /* set the AUX pad mode */
+ if (!aux_pad_mode) {
+ REG_SET(gpio.MASK_reg, regval,
+ AUX_PAD1_MODE, 1);
+ }
+
+ return GPIO_RESULT_OK;
+ case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
+ if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
+ (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ REG_UPDATE_3(ddc_setup,
+ DC_I2C_DDC1_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_MODE, 0);
+ return GPIO_RESULT_OK;
+ }
+ break;
+ case GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT:
+ if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
+ (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ REG_UPDATE_3(ddc_setup,
+ DC_I2C_DDC1_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_MODE, 1);
+ return GPIO_RESULT_OK;
+ }
+ break;
+ case GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING:
+ if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
+ (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ REG_UPDATE_2(ddc_setup,
+ DC_I2C_DDC1_ENABLE, 0,
+ DC_I2C_DDC1_EDID_DETECT_ENABLE, 0);
+ return GPIO_RESULT_OK;
+ }
+ break;
+ }
+
+ BREAK_TO_DEBUGGER();
+
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+}
+
+static const struct hw_gpio_pin_funcs funcs = {
+ .destroy = destroy,
+ .open = dal_hw_gpio_open,
+ .get_value = dal_hw_gpio_get_value,
+ .set_value = dal_hw_gpio_set_value,
+ .set_config = set_config,
+ .change_mode = dal_hw_gpio_change_mode,
+ .close = dal_hw_gpio_close,
+};
+
+static void construct(
+ struct hw_ddc *ddc,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_gpio_construct(&ddc->base, id, en, ctx);
+ ddc->base.base.funcs = &funcs;
+}
+
+struct hw_gpio_pin *dal_hw_ddc_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct hw_ddc *pin;
+
+ if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ pin = kzalloc(sizeof(struct hw_ddc), GFP_KERNEL);
+ if (!pin) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(pin, id, en, ctx);
+ return &pin->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
new file mode 100644
index 000000000000..9690e2a885d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_DDC_H__
+#define __DAL_HW_DDC_H__
+
+#include "ddc_regs.h"
+
+struct hw_ddc {
+ struct hw_gpio base;
+ const struct ddc_registers *regs;
+ const struct ddc_sh_mask *shifts;
+ const struct ddc_sh_mask *masks;
+};
+
+#define HW_DDC_FROM_BASE(hw_gpio) \
+ container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_ddc, base)
+
+struct hw_gpio_pin *dal_hw_ddc_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
new file mode 100644
index 000000000000..87b580fa4bc9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "hw_factory.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce80/hw_factory_dce80.h"
+#include "dce110/hw_factory_dce110.h"
+#include "dce120/hw_factory_dce120.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/hw_factory_dcn10.h"
+#endif
+
+#include "diagnostics/hw_factory_diag.h"
+
+/*
+ * This unit
+ */
+
+bool dal_hw_factory_init(
+ struct hw_factory *factory,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment)
+{
+ if (IS_FPGA_MAXIMUS_DC(dce_environment)) {
+ dal_hw_factory_diag_fpga_init(factory);
+ return true;
+ }
+
+ switch (dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ dal_hw_factory_dce80_init(factory);
+ return true;
+
+ case DCE_VERSION_10_0:
+ dal_hw_factory_dce110_init(factory);
+ return true;
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
+ dal_hw_factory_dce110_init(factory);
+ return true;
+ case DCE_VERSION_12_0:
+ dal_hw_factory_dce120_init(factory);
+ return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ dal_hw_factory_dcn10_init(factory);
+ return true;
+#endif
+
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+void dal_hw_factory_destroy(
+ struct dc_context *ctx,
+ struct hw_factory **factory)
+{
+ if (!factory || !*factory) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ kfree(*factory);
+
+ *factory = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
new file mode 100644
index 000000000000..6e4dd3521935
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_H__
+#define __DAL_HW_FACTORY_H__
+
+struct hw_gpio_pin;
+struct hw_hpd;
+
+struct hw_factory {
+ uint32_t number_of_pins[GPIO_ID_COUNT];
+
+ const struct hw_factory_funcs {
+ struct hw_gpio_pin *(*create_ddc_data)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_ddc_clock)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_generic)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_hpd)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_sync)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_gsl)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ void (*define_hpd_registers)(
+ struct hw_gpio_pin *pin,
+ uint32_t en);
+ void (*define_ddc_registers)(
+ struct hw_gpio_pin *pin,
+ uint32_t en);
+ } *funcs;
+};
+
+bool dal_hw_factory_init(
+ struct hw_factory *factory,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
new file mode 100644
index 000000000000..660510842ecf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+
+#include "reg_helper.h"
+#include "gpio_regs.h"
+
+#undef FN
+#define FN(reg_name, field_name) \
+ gpio->regs->field_name ## _shift, gpio->regs->field_name ## _mask
+
+#define CTX \
+ gpio->base.ctx
+#define REG(reg)\
+ (gpio->regs->reg)
+
+static void store_registers(
+ struct hw_gpio *gpio)
+{
+ REG_GET(MASK_reg, MASK, &gpio->store.mask);
+ REG_GET(A_reg, A, &gpio->store.a);
+ REG_GET(EN_reg, EN, &gpio->store.en);
+ /* TODO store GPIO_MUX_CONTROL if we ever use it */
+}
+
+static void restore_registers(
+ struct hw_gpio *gpio)
+{
+ REG_UPDATE(MASK_reg, MASK, gpio->store.mask);
+ REG_UPDATE(A_reg, A, gpio->store.a);
+ REG_UPDATE(EN_reg, EN, gpio->store.en);
+ /* TODO restore GPIO_MUX_CONTROL if we ever use it */
+}
+
+bool dal_hw_gpio_open(
+ struct hw_gpio_pin *ptr,
+ enum gpio_mode mode)
+{
+ struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
+
+ store_registers(pin);
+
+ ptr->opened = (dal_hw_gpio_config_mode(pin, mode) == GPIO_RESULT_OK);
+
+ return ptr->opened;
+}
+
+enum gpio_result dal_hw_gpio_get_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t *value)
+{
+ const struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
+
+ enum gpio_result result = GPIO_RESULT_OK;
+
+ switch (ptr->mode) {
+ case GPIO_MODE_INPUT:
+ case GPIO_MODE_OUTPUT:
+ case GPIO_MODE_HARDWARE:
+ case GPIO_MODE_FAST_OUTPUT:
+ REG_GET(Y_reg, Y, value);
+ break;
+ default:
+ result = GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+
+ return result;
+}
+
+enum gpio_result dal_hw_gpio_set_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t value)
+{
+ struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
+
+ /* This is the public interface
+ * where the input comes from client, not shifted yet
+ * (because client does not know the shifts). */
+
+ switch (ptr->mode) {
+ case GPIO_MODE_OUTPUT:
+ REG_UPDATE(A_reg, A, value);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_FAST_OUTPUT:
+ /* We use (EN) to faster switch (used in DDC GPIO).
+ * So (A) is grounded, output is driven by (EN = 0)
+ * to pull the line down (output == 0) and (EN=1)
+ * then output is tri-state */
+ REG_UPDATE(EN_reg, EN, ~value);
+ return GPIO_RESULT_OK;
+ default:
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+}
+
+enum gpio_result dal_hw_gpio_change_mode(
+ struct hw_gpio_pin *ptr,
+ enum gpio_mode mode)
+{
+ struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
+
+ return dal_hw_gpio_config_mode(pin, mode);
+}
+
+void dal_hw_gpio_close(
+ struct hw_gpio_pin *ptr)
+{
+ struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
+
+ restore_registers(pin);
+
+ ptr->mode = GPIO_MODE_UNKNOWN;
+ ptr->opened = false;
+}
+
+enum gpio_result dal_hw_gpio_config_mode(
+ struct hw_gpio *gpio,
+ enum gpio_mode mode)
+{
+ gpio->base.mode = mode;
+
+ switch (mode) {
+ case GPIO_MODE_INPUT:
+ /* turn off output enable, act as input pin;
+ * program the pin as GPIO, mask out signal driven by HW */
+ REG_UPDATE(EN_reg, EN, 0);
+ REG_UPDATE(MASK_reg, MASK, 1);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_OUTPUT:
+ /* turn on output enable, act as output pin;
+ * program the pin as GPIO, mask out signal driven by HW */
+ REG_UPDATE(A_reg, A, 0);
+ REG_UPDATE(MASK_reg, MASK, 1);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_FAST_OUTPUT:
+ /* grounding the A register then use the EN register bit
+ * will have faster effect on the rise time */
+ REG_UPDATE(A_reg, A, 0);
+ REG_UPDATE(MASK_reg, MASK, 1);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_HARDWARE:
+ /* program the pin as tri-state, pin is driven by HW */
+ REG_UPDATE(MASK_reg, MASK, 0);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_INTERRUPT:
+ /* Interrupt mode supported only by HPD (IrqGpio) pins. */
+ REG_UPDATE(MASK_reg, MASK, 0);
+ return GPIO_RESULT_OK;
+ default:
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+}
+
+void dal_hw_gpio_construct(
+ struct hw_gpio *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ pin->base.ctx = ctx;
+ pin->base.id = id;
+ pin->base.en = en;
+ pin->base.mode = GPIO_MODE_UNKNOWN;
+ pin->base.opened = false;
+
+ pin->store.mask = 0;
+ pin->store.a = 0;
+ pin->store.en = 0;
+ pin->store.mux = 0;
+
+ pin->mux_supported = false;
+}
+
+void dal_hw_gpio_destruct(
+ struct hw_gpio *pin)
+{
+ ASSERT(!pin->base.opened);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h
new file mode 100644
index 000000000000..bca0cef18ff9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_GPIO_H__
+#define __DAL_HW_GPIO_H__
+
+#include "gpio_regs.h"
+
+#define FROM_HW_GPIO_PIN(ptr) \
+ container_of((ptr), struct hw_gpio, base)
+
+struct addr_mask {
+ uint32_t addr;
+ uint32_t mask;
+};
+
+struct hw_gpio_pin {
+ const struct hw_gpio_pin_funcs *funcs;
+ enum gpio_id id;
+ uint32_t en;
+ enum gpio_mode mode;
+ bool opened;
+ struct dc_context *ctx;
+};
+
+struct hw_gpio_pin_funcs {
+ void (*destroy)(
+ struct hw_gpio_pin **ptr);
+ bool (*open)(
+ struct hw_gpio_pin *pin,
+ enum gpio_mode mode);
+ enum gpio_result (*get_value)(
+ const struct hw_gpio_pin *pin,
+ uint32_t *value);
+ enum gpio_result (*set_value)(
+ const struct hw_gpio_pin *pin,
+ uint32_t value);
+ enum gpio_result (*set_config)(
+ struct hw_gpio_pin *pin,
+ const struct gpio_config_data *config_data);
+ enum gpio_result (*change_mode)(
+ struct hw_gpio_pin *pin,
+ enum gpio_mode mode);
+ void (*close)(
+ struct hw_gpio_pin *pin);
+};
+
+
+struct hw_gpio;
+
+/* Register indices are represented by member variables
+ * and are to be filled in by constructors of derived classes.
+ * These members permit the use of common code
+ * for programming registers, where the sequence is the same
+ * but register sets are different.
+ * Some GPIOs have HW mux which allows to choose
+ * what is the source of the signal in HW mode */
+
+struct hw_gpio_pin_reg {
+ struct addr_mask DC_GPIO_DATA_MASK;
+ struct addr_mask DC_GPIO_DATA_A;
+ struct addr_mask DC_GPIO_DATA_EN;
+ struct addr_mask DC_GPIO_DATA_Y;
+};
+
+struct hw_gpio_mux_reg {
+ struct addr_mask GPIO_MUX_CONTROL;
+ struct addr_mask GPIO_MUX_STEREO_SEL;
+};
+
+struct hw_gpio {
+ struct hw_gpio_pin base;
+
+ /* variables to save register value */
+ struct {
+ uint32_t mask;
+ uint32_t a;
+ uint32_t en;
+ uint32_t mux;
+ } store;
+
+ /* GPIO MUX support */
+ bool mux_supported;
+ const struct gpio_registers *regs;
+};
+
+#define HW_GPIO_FROM_BASE(hw_gpio_pin) \
+ container_of((hw_gpio_pin), struct hw_gpio, base)
+
+void dal_hw_gpio_construct(
+ struct hw_gpio *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx);
+
+bool dal_hw_gpio_open(
+ struct hw_gpio_pin *pin,
+ enum gpio_mode mode);
+
+enum gpio_result dal_hw_gpio_get_value(
+ const struct hw_gpio_pin *pin,
+ uint32_t *value);
+
+enum gpio_result dal_hw_gpio_config_mode(
+ struct hw_gpio *pin,
+ enum gpio_mode mode);
+
+void dal_hw_gpio_destruct(
+ struct hw_gpio *pin);
+
+enum gpio_result dal_hw_gpio_set_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t value);
+
+enum gpio_result dal_hw_gpio_change_mode(
+ struct hw_gpio_pin *ptr,
+ enum gpio_mode mode);
+
+void dal_hw_gpio_close(
+ struct hw_gpio_pin *ptr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
new file mode 100644
index 000000000000..784feccc5853
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+#include "hw_hpd.h"
+
+#include "reg_helper.h"
+#include "hpd_regs.h"
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hpd->shifts->field_name, hpd->masks->field_name
+
+#define CTX \
+ hpd->base.base.ctx
+#define REG(reg)\
+ (hpd->regs->reg)
+
+static void dal_hw_hpd_construct(
+ struct hw_hpd *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_gpio_construct(&pin->base, id, en, ctx);
+}
+
+static void dal_hw_hpd_destruct(
+ struct hw_hpd *pin)
+{
+ dal_hw_gpio_destruct(&pin->base);
+}
+
+
+static void destruct(
+ struct hw_hpd *hpd)
+{
+ dal_hw_hpd_destruct(hpd);
+}
+
+static void destroy(
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr);
+
+ destruct(hpd);
+
+ kfree(hpd);
+
+ *ptr = NULL;
+}
+
+static enum gpio_result get_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t *value)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
+ uint32_t hpd_delayed = 0;
+
+ /* in Interrupt mode we ask for SENSE bit */
+
+ if (ptr->mode == GPIO_MODE_INTERRUPT) {
+
+ REG_GET(int_status,
+ DC_HPD_SENSE_DELAYED, &hpd_delayed);
+
+ *value = hpd_delayed;
+ return GPIO_RESULT_OK;
+ }
+
+ /* in any other modes, operate as normal GPIO */
+
+ return dal_hw_gpio_get_value(ptr, value);
+}
+
+static enum gpio_result set_config(
+ struct hw_gpio_pin *ptr,
+ const struct gpio_config_data *config_data)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
+
+ if (!config_data)
+ return GPIO_RESULT_INVALID_DATA;
+
+ REG_UPDATE_2(toggle_filt_cntl,
+ DC_HPD_CONNECT_INT_DELAY, config_data->config.hpd.delay_on_connect / 10,
+ DC_HPD_DISCONNECT_INT_DELAY, config_data->config.hpd.delay_on_disconnect / 10);
+
+ return GPIO_RESULT_OK;
+}
+
+static const struct hw_gpio_pin_funcs funcs = {
+ .destroy = destroy,
+ .open = dal_hw_gpio_open,
+ .get_value = get_value,
+ .set_value = dal_hw_gpio_set_value,
+ .set_config = set_config,
+ .change_mode = dal_hw_gpio_change_mode,
+ .close = dal_hw_gpio_close,
+};
+
+static void construct(
+ struct hw_hpd *hpd,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_hpd_construct(hpd, id, en, ctx);
+ hpd->base.base.funcs = &funcs;
+}
+
+struct hw_gpio_pin *dal_hw_hpd_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct hw_hpd *hpd;
+
+ if (id != GPIO_ID_HPD) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ if ((en < GPIO_HPD_MIN) || (en > GPIO_HPD_MAX)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ hpd = kzalloc(sizeof(struct hw_hpd), GFP_KERNEL);
+ if (!hpd) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(hpd, id, en, ctx);
+ return &hpd->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
new file mode 100644
index 000000000000..4ab7a208f781
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_HPD_H__
+#define __DAL_HW_HPD_H__
+
+#include "hpd_regs.h"
+
+struct hw_hpd {
+ struct hw_gpio base;
+ const struct hpd_registers *regs;
+ const struct hpd_sh_mask *shifts;
+ const struct hpd_sh_mask *masks;
+};
+
+#define HW_HPD_FROM_BASE(hw_gpio) \
+ container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_hpd, base)
+
+struct hw_gpio_pin *dal_hw_hpd_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
new file mode 100644
index 000000000000..0ae8ace25739
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "hw_translate.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce80/hw_translate_dce80.h"
+#include "dce110/hw_translate_dce110.h"
+#include "dce120/hw_translate_dce120.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/hw_translate_dcn10.h"
+#endif
+
+#include "diagnostics/hw_translate_diag.h"
+
+/*
+ * This unit
+ */
+
+bool dal_hw_translate_init(
+ struct hw_translate *translate,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment)
+{
+ if (IS_FPGA_MAXIMUS_DC(dce_environment)) {
+ dal_hw_translate_diag_fpga_init(translate);
+ return true;
+ }
+
+ switch (dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ dal_hw_translate_dce80_init(translate);
+ return true;
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
+ dal_hw_translate_dce110_init(translate);
+ return true;
+ case DCE_VERSION_12_0:
+ dal_hw_translate_dce120_init(translate);
+ return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ dal_hw_translate_dcn10_init(translate);
+ return true;
+#endif
+
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h
new file mode 100644
index 000000000000..3a7d89ca1605
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_H__
+#define __DAL_HW_TRANSLATE_H__
+
+struct hw_translate_funcs {
+ bool (*offset_to_id)(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en);
+ bool (*id_to_offset)(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info);
+};
+
+struct hw_translate {
+ const struct hw_translate_funcs *funcs;
+};
+
+bool dal_hw_translate_init(
+ struct hw_translate *translate,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
new file mode 100644
index 000000000000..55603400acd9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -0,0 +1,78 @@
+#
+# Makefile for the 'i2c' sub-component of DAL.
+# It provides the control and status of HW i2c engine of the adapter.
+
+I2CAUX = aux_engine.o engine_base.o i2caux.o i2c_engine.o \
+ i2c_generic_hw_engine.o i2c_hw_engine.o i2c_sw_engine.o
+
+AMD_DAL_I2CAUX = $(addprefix $(AMDDALPATH)/dc/i2caux/,$(I2CAUX))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX)
+
+###############################################################################
+# DCE 8x family
+###############################################################################
+I2CAUX_DCE80 = i2caux_dce80.o i2c_hw_engine_dce80.o \
+ i2c_sw_engine_dce80.o
+
+AMD_DAL_I2CAUX_DCE80 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce80/,$(I2CAUX_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE80)
+
+###############################################################################
+# DCE 100 family
+###############################################################################
+I2CAUX_DCE100 = i2caux_dce100.o
+
+AMD_DAL_I2CAUX_DCE100 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce100/,$(I2CAUX_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE100)
+
+###############################################################################
+# DCE 110 family
+###############################################################################
+I2CAUX_DCE110 = i2caux_dce110.o i2c_sw_engine_dce110.o i2c_hw_engine_dce110.o \
+ aux_engine_dce110.o
+
+AMD_DAL_I2CAUX_DCE110 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce110/,$(I2CAUX_DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE110)
+
+###############################################################################
+# DCE 112 family
+###############################################################################
+I2CAUX_DCE112 = i2caux_dce112.o
+
+AMD_DAL_I2CAUX_DCE112 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce112/,$(I2CAUX_DCE112))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
+
+###############################################################################
+# DCN 1.0 family
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+I2CAUX_DCN1 = i2caux_dcn10.o
+
+AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCN1)
+endif
+
+###############################################################################
+# DCE 120 family
+###############################################################################
+I2CAUX_DCE120 = i2caux_dce120.o
+
+AMD_DAL_I2CAUX_DCE120 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce120/,$(I2CAUX_DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE120)
+
+###############################################################################
+# Diagnostics on FPGA
+###############################################################################
+I2CAUX_DIAG = i2caux_diag.o
+
+AMD_DAL_I2CAUX_DIAG = $(addprefix $(AMDDALPATH)/dc/i2caux/diagnostics/,$(I2CAUX_DIAG))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DIAG)
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
new file mode 100644
index 000000000000..fc7a7d4ebca5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "aux_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "include/link_service_types.h"
+
+/*
+ * This unit
+ */
+
+enum {
+ AUX_INVALID_REPLY_RETRY_COUNTER = 1,
+ AUX_TIMED_OUT_RETRY_COUNTER = 2,
+ AUX_DEFER_RETRY_COUNTER = 6
+};
+
+#define FROM_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine, base)
+
+enum i2caux_engine_type dal_aux_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_AUX;
+}
+
+bool dal_aux_engine_acquire(
+ struct engine *engine,
+ struct ddc *ddc)
+{
+ struct aux_engine *aux_engine = FROM_ENGINE(engine);
+
+ enum gpio_result result;
+ if (aux_engine->funcs->is_engine_available) {
+ /*check whether SW could use the engine*/
+ if (!aux_engine->funcs->is_engine_available(aux_engine)) {
+ return false;
+ }
+ }
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ if (!aux_engine->funcs->acquire_engine(aux_engine)) {
+ dal_ddc_close(ddc);
+ return false;
+ }
+
+ engine->ddc = ddc;
+
+ return true;
+}
+
+struct read_command_context {
+ uint8_t *buffer;
+ uint32_t current_read_length;
+ uint32_t offset;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t invalid_reply_retry_aux_on_ack;
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+static void process_read_reply(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->defer_retry_aux = 0;
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->transaction_complete = true;
+ ctx->operation_succeeded = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static void process_read_request(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else {
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->buffer;
+
+ process_read_reply(engine, ctx);
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here */
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static bool read_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct read_command_context ctx;
+
+ ctx.buffer = request->payload.data;
+ ctx.current_read_length = request->payload.length;
+ ctx.offset = 0;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.invalid_reply_retry_aux_on_ack = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ do {
+ memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
+
+ ctx.request.data = ctx.buffer + ctx.offset;
+ ctx.request.length = ctx.current_read_length;
+
+ process_read_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ return ctx.operation_succeeded;
+}
+
+struct write_command_context {
+ bool mot;
+
+ uint8_t *buffer;
+ uint32_t current_write_length;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t max_defer_retry;
+ uint32_t ack_m_retry;
+
+ uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+static void process_write_reply(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->operation_succeeded = true;
+
+ if (ctx->returned_byte) {
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ctx->current_write_length = 0;
+
+ ++ctx->ack_m_retry;
+
+ if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(300);
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->defer_retry_aux = 0;
+ ctx->ack_m_retry = 0;
+ ctx->transaction_complete = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+ ctx->current_write_length = 0;
+
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static void process_write_request(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->reply_data;
+
+ process_write_reply(engine, ctx);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here */
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static bool write_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct write_command_context ctx;
+
+ ctx.mot = middle_of_transaction;
+ ctx.buffer = request->payload.data;
+ ctx.current_write_length = request->payload.length;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.ack_m_retry = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ ctx.max_defer_retry =
+ (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
+ engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
+
+ do {
+ ctx.request.data = ctx.buffer;
+ ctx.request.length = ctx.current_write_length;
+
+ process_write_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ return ctx.operation_succeeded;
+}
+
+static bool end_of_transaction_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request)
+{
+ struct i2caux_transaction_request dummy_request;
+ uint8_t dummy_data;
+
+ /* [tcheng] We only need to send the stop (read with MOT = 0)
+ * for I2C-over-Aux, not native AUX */
+
+ if (request->payload.address_space !=
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
+ return false;
+
+ dummy_request.operation = request->operation;
+ dummy_request.payload.address_space = request->payload.address_space;
+ dummy_request.payload.address = request->payload.address;
+
+ /*
+ * Add a dummy byte due to some receiver quirk
+ * where one byte is sent along with MOT = 0.
+ * Ideally this should be 0.
+ */
+
+ dummy_request.payload.length = 0;
+ dummy_request.payload.data = &dummy_data;
+
+ if (request->operation == I2CAUX_TRANSACTION_READ)
+ return read_command(engine, &dummy_request, false);
+ else
+ return write_command(engine, &dummy_request, false);
+
+ /* according Syed, it does not need now DoDummyMOT */
+}
+
+bool dal_aux_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct aux_engine *aux_engine = FROM_ENGINE(engine);
+
+ bool result;
+ bool mot_used = true;
+
+ switch (request->operation) {
+ case I2CAUX_TRANSACTION_READ:
+ result = read_command(aux_engine, request, mot_used);
+ break;
+ case I2CAUX_TRANSACTION_WRITE:
+ result = write_command(aux_engine, request, mot_used);
+ break;
+ default:
+ result = false;
+ }
+
+ /* [tcheng]
+ * need to send stop for the last transaction to free up the AUX
+ * if the above command fails, this would be the last transaction */
+
+ if (!middle_of_transaction || !result)
+ end_of_transaction_command(aux_engine, request);
+
+ /* mask AUX interrupt */
+
+ return result;
+}
+
+void dal_aux_engine_construct(
+ struct aux_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2caux_construct_engine(&engine->base, ctx);
+ engine->delay = 0;
+ engine->max_defer_write_retry = 0;
+}
+
+void dal_aux_engine_destruct(
+ struct aux_engine *engine)
+{
+ dal_i2caux_destruct_engine(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
new file mode 100644
index 000000000000..8e71324ccb10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_H__
+#define __DAL_AUX_ENGINE_H__
+
+enum aux_transaction_type {
+ AUX_TRANSACTION_TYPE_DP,
+ AUX_TRANSACTION_TYPE_I2C
+};
+
+struct aux_request_transaction_data {
+ enum aux_transaction_type type;
+ enum i2caux_transaction_action action;
+ /* 20-bit AUX channel transaction address */
+ uint32_t address;
+ /* delay, in 100-microsecond units */
+ uint8_t delay;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum aux_transaction_reply {
+ AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
+ AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+
+ AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+ AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+
+ AUX_TRANSACTION_REPLY_INVALID = 0xFF
+};
+
+struct aux_reply_transaction_data {
+ enum aux_transaction_reply status;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum aux_channel_operation_result {
+ AUX_CHANNEL_OPERATION_SUCCEEDED,
+ AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+ AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
+};
+
+struct aux_engine;
+
+struct aux_engine_funcs {
+ void (*destroy)(
+ struct aux_engine **ptr);
+ bool (*acquire_engine)(
+ struct aux_engine *engine);
+ void (*configure)(
+ struct aux_engine *engine,
+ union aux_config cfg);
+ void (*submit_channel_request)(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply);
+ enum aux_channel_operation_result (*get_channel_status)(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes);
+ bool (*is_engine_available) (
+ struct aux_engine *engine);
+};
+
+struct aux_engine {
+ struct engine base;
+ const struct aux_engine_funcs *funcs;
+ /* following values are expressed in milliseconds */
+ uint32_t delay;
+ uint32_t max_defer_write_retry;
+
+ bool acquire_reset;
+};
+
+void dal_aux_engine_construct(
+ struct aux_engine *engine,
+ struct dc_context *ctx);
+
+void dal_aux_engine_destruct(
+ struct aux_engine *engine);
+bool dal_aux_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+bool dal_aux_engine_acquire(
+ struct engine *ptr,
+ struct ddc *ddc);
+enum i2caux_engine_type dal_aux_engine_get_engine_type(
+ const struct engine *engine);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
new file mode 100644
index 000000000000..e8d3781deaed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2c_hw_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dce100_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+static const struct dce110_i2c_hw_engine_registers dce100_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE100(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE100(_MASK)
+};
+
+struct i2caux *dal_i2caux_dce100_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce100_aux_regs,
+ dce100_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
new file mode 100644
index 000000000000..2b508d3e0ef4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE100_H__
+#define __DAL_I2C_AUX_DCE100_H__
+
+struct i2caux *dal_i2caux_dce100_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCE100_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
new file mode 100644
index 000000000000..81f9f3e34c10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../aux_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "aux_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+#include "dce/dce_11_0_sh_mask.h"
+
+#define CTX \
+ aux110->base.base.ctx
+#define REG(reg_name)\
+ (aux110->regs->reg_name)
+#include "reg_helper.h"
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct aux_engine *'
+ * to 'struct aux_engine_dce110 *'
+ */
+#define FROM_AUX_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine_dce110, base)
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct aux_engine_dce110 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
+
+static void release_engine(
+ struct engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
+
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+}
+
+static void destruct(
+ struct aux_engine_dce110 *engine);
+
+static void destroy(
+ struct aux_engine **aux_engine)
+{
+ struct aux_engine_dce110 *engine = FROM_AUX_ENGINE(*aux_engine);
+
+ destruct(engine);
+
+ kfree(engine);
+
+ *aux_engine = NULL;
+}
+
+#define SW_CAN_ACCESS_AUX 1
+#define DMCU_CAN_ACCESS_AUX 2
+
+static bool is_engine_available(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field != DMCU_CAN_ACCESS_AUX);
+}
+static bool acquire_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+ if (field == DMCU_CAN_ACCESS_AUX)
+ return false;
+ /* enable AUX before request SW to access AUX */
+ value = REG_READ(AUX_CONTROL);
+ field = get_reg_field_value(value,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (field == 0) {
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*DP_AUX block as part of the enable sequence*/
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_RESET);
+ }
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*poll HW to make sure reset it done*/
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
+ 1, 11);
+
+ set_reg_field_value(
+ value,
+ 0,
+ AUX_CONTROL,
+ AUX_RESET);
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
+ 1, 11);
+ }
+ } /*if (field)*/
+
+ /* request SW to access AUX */
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
+
+ value = REG_READ(AUX_ARB_CONTROL);
+ field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field == SW_CAN_ACCESS_AUX);
+}
+
+#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
+ ((command) | ((0xF0000 & (address)) >> 16))
+
+#define COMPOSE_AUX_SW_DATA_8_15(address) \
+ ((0xFF00 & (address)) >> 8)
+
+#define COMPOSE_AUX_SW_DATA_0_7(address) \
+ (0xFF & (address))
+
+static void submit_channel_request(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t value;
+ uint32_t length;
+
+ bool is_write =
+ ((request->type == AUX_TRANSACTION_TYPE_DP) &&
+ (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
+ ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
+ ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+
+ /* set the delay and the number of bytes to write */
+
+ /* The length include
+ * the 4 bit header and the 20 bit address
+ * (that is 3 byte).
+ * If the requested length is non zero this means
+ * an addition byte specifying the length is required. */
+
+ length = request->length ? 4 : 3;
+ if (is_write)
+ length += request->length;
+
+ REG_UPDATE_2(AUX_SW_CONTROL,
+ AUX_SW_START_DELAY, request->delay,
+ AUX_SW_WR_BYTES, length);
+
+ /* program action and address and payload data (if 'is_write') */
+ value = REG_UPDATE_4(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_DATA_RW, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
+
+ value = REG_SET_2(AUX_SW_DATA, value,
+ AUX_SW_AUTOINCREMENT_DISABLE, 0,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
+
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
+
+ if (request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->length - 1);
+ }
+
+ if (is_write) {
+ /* Load the HW buffer with the Data to be sent.
+ * This is relevant for write operation.
+ * For read, the data recived data will be
+ * processed in process_channel_reply(). */
+ uint32_t i = 0;
+
+ while (i < request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->data[i]);
+
+ ++i;
+ }
+ }
+
+ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+}
+
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ /* Need to do a read to get the number of bytes to process
+ * Alternatively, this information can be passed -
+ * but that causes coupling which isn't good either. */
+
+ uint32_t bytes_replied;
+ uint32_t value;
+
+ value = REG_GET(AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
+
+ if (bytes_replied) {
+ uint32_t reply_result;
+
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
+
+ REG_GET(AUX_SW_DATA,
+ AUX_SW_DATA, &reply_result);
+
+ reply_result = reply_result >> 4;
+
+ switch (reply_result) {
+ case 0: /* ACK */ {
+ uint32_t i = 0;
+
+ /* first byte was already used
+ * to get the command status */
+ --bytes_replied;
+
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
+
+ REG_GET(AUX_SW_DATA,
+ AUX_SW_DATA, &aux_sw_data_val);
+
+ reply->data[i] = aux_sw_data_val;
+ ++i;
+ }
+
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+ }
+ break;
+ case 1: /* NACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
+ break;
+ case 2: /* DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
+ break;
+ case 4: /* AUX ACK / I2C NACK */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
+ break;
+ case 8: /* AUX ACK / I2C DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
+ break;
+ default:
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ }
+ } else {
+ /* Need to handle an error case...
+ * hopefully, upper layer function won't call this function
+ * if the number of bytes in the reply was 0
+ * because there was surely an error that was asserted
+ * that should have been handled
+ * for hot plug case, this could happens*/
+ if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ ASSERT_CRITICAL(false);
+ }
+}
+
+static enum aux_channel_operation_result get_channel_status(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value;
+
+ if (returned_bytes == NULL) {
+ /*caller pass NULL pointer*/
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
+ }
+ *returned_bytes = 0;
+
+ /* poll to make sure that SW_DONE is asserted */
+ value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ 10, aux110->timeout_period/10);
+
+ /* Note that the following bits are set in 'status.bits'
+ * during CTS 4.2.1.2 (FW 3.3.1):
+ * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
+ * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
+ *
+ * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
+ * HW debugging bit and should be ignored. */
+ if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
+ if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+
+ else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
+ (value &
+ AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+
+ *returned_bytes = get_reg_field_value(value,
+ AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT);
+
+ if (*returned_bytes == 0)
+ return
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+ else {
+ *returned_bytes -= 1;
+ return AUX_CHANNEL_OPERATION_SUCCEEDED;
+ }
+ } else {
+ /*time_elapsed >= aux_engine->timeout_period */
+ if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ ASSERT_CRITICAL(false);
+
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+ }
+}
+
+static const struct aux_engine_funcs aux_engine_funcs = {
+ .destroy = destroy,
+ .acquire_engine = acquire_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .get_channel_status = get_channel_status,
+ .is_engine_available = is_engine_available,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .submit_request = dal_aux_engine_submit_request,
+ .get_engine_type = dal_aux_engine_get_engine_type,
+ .acquire = dal_aux_engine_acquire,
+};
+
+static void construct(
+ struct aux_engine_dce110 *engine,
+ const struct aux_engine_dce110_init_data *aux_init_data)
+{
+ dal_aux_engine_construct(&engine->base, aux_init_data->ctx);
+ engine->base.base.funcs = &engine_funcs;
+ engine->base.funcs = &aux_engine_funcs;
+
+ engine->timeout_period = aux_init_data->timeout_period;
+ engine->regs = aux_init_data->regs;
+}
+
+static void destruct(
+ struct aux_engine_dce110 *engine)
+{
+ struct aux_engine_dce110 *aux110 = engine;
+/*temp w/a, to do*/
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_DMCU_DONE_USING_AUX_REG, 1);
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+ dal_aux_engine_destruct(&engine->base);
+}
+
+struct aux_engine *dal_aux_engine_dce110_create(
+ const struct aux_engine_dce110_init_data *aux_init_data)
+{
+ struct aux_engine_dce110 *engine;
+
+ if (!aux_init_data) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+
+ if (!engine) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(engine, aux_init_data);
+ return &engine->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
new file mode 100644
index 000000000000..85ee82162590
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_DCE110_H__
+#define __DAL_AUX_ENGINE_DCE110_H__
+
+#include "../aux_engine.h"
+
+#define AUX_COMMON_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_DATA, DP_AUX, id), \
+ SRI(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_STATUS, DP_AUX, id), \
+ SR(AUXN_IMPCAL), \
+ SR(AUXP_IMPCAL)
+
+struct dce110_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_ARB_CONTROL;
+ uint32_t AUX_SW_DATA;
+ uint32_t AUX_SW_CONTROL;
+ uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_SW_STATUS;
+ uint32_t AUXN_IMPCAL;
+ uint32_t AUXP_IMPCAL;
+
+ uint32_t AUX_RESET_MASK;
+};
+
+struct aux_engine_dce110 {
+ struct aux_engine base;
+ const struct dce110_aux_registers *regs;
+ struct {
+ uint32_t aux_control;
+ uint32_t aux_arb_control;
+ uint32_t aux_sw_data;
+ uint32_t aux_sw_control;
+ uint32_t aux_interrupt_control;
+ uint32_t aux_sw_status;
+ } addr;
+ uint32_t timeout_period;
+};
+
+struct aux_engine_dce110_init_data {
+ uint32_t engine_id;
+ uint32_t timeout_period;
+ struct dc_context *ctx;
+ const struct dce110_aux_registers *regs;
+};
+
+struct aux_engine *dal_aux_engine_dce110_create(
+ const struct aux_engine_dce110_init_data *aux_init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
new file mode 100644
index 000000000000..56e25b3d65fd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_hw_engine.h"
+#include "../i2c_generic_hw_engine.h"
+/*
+ * Header of this unit
+ */
+
+#include "i2c_hw_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+#include "reg_helper.h"
+
+/*
+ * This unit
+ */
+
+enum dc_i2c_status {
+ DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+};
+
+enum dc_i2c_arbitration {
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
+};
+
+enum {
+ /* No timeout in HW
+ * (timeout implemented in SW by querying status) */
+ I2C_SETUP_TIME_LIMIT = 255,
+ I2C_HW_BUFFER_SIZE = 538
+};
+
+/*
+ * @brief
+ * Cast pointer to 'struct i2c_hw_engine *'
+ * to pointer 'struct i2c_hw_engine_dce110 *'
+ */
+#define FROM_I2C_HW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_hw_engine_dce110, base)
+/*
+ * @brief
+ * Cast pointer to 'struct i2c_engine *'
+ * to pointer to 'struct i2c_hw_engine_dce110 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
+
+/*
+ * @brief
+ * Cast pointer to 'struct engine *'
+ * to 'pointer to struct i2c_hw_engine_dce110 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+#define CTX \
+ hw_engine->base.base.base.ctx
+
+#define REG(reg_name)\
+ (hw_engine->regs->reg_name)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hw_engine->i2c_shift->field_name, hw_engine->i2c_mask->field_name
+
+#include "reg_helper.h"
+
+static void disable_i2c_hw_engine(
+ struct i2c_hw_engine_dce110 *hw_engine)
+{
+ REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 0);
+}
+
+static void release_engine(
+ struct engine *engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_engine *base = NULL;
+ bool safe_to_reset;
+
+ base = &hw_engine->base.base;
+
+ /* Restore original HW engine speed */
+
+ base->funcs->set_speed(base, hw_engine->base.original_speed);
+
+ /* Release I2C */
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1);
+
+ /* Reset HW engine */
+ {
+ uint32_t i2c_sw_status = 0;
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+ /* if used by SW, safe to reset */
+ safe_to_reset = (i2c_sw_status == 1);
+ }
+
+ if (safe_to_reset)
+ REG_UPDATE_2(
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET, 1,
+ DC_I2C_SW_STATUS_RESET, 1);
+ else
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1);
+
+ /* HW I2c engine - clock gating feature */
+ if (!hw_engine->engine_keep_power_up_count)
+ disable_i2c_hw_engine(hw_engine);
+}
+
+static bool setup_engine(
+ struct i2c_engine *i2c_engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+
+ /* Program pin select */
+ REG_UPDATE_6(
+ DC_I2C_CONTROL,
+ DC_I2C_GO, 0,
+ DC_I2C_SOFT_RESET, 0,
+ DC_I2C_SEND_RESET, 0,
+ DC_I2C_SW_STATUS_RESET, 1,
+ DC_I2C_TRANSACTION_COUNT, 0,
+ DC_I2C_DDC_SELECT, hw_engine->engine_id);
+
+ /* Program time limit */
+ REG_UPDATE_N(
+ SETUP, 2,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
+
+ /* Program HW priority
+ * set to High - interrupt software I2C at any time
+ * Enable restart of SW I2C that was interrupted by HW
+ * disable queuing of software while I2C is in use by HW */
+ REG_UPDATE_2(
+ DC_I2C_ARBITRATION,
+ DC_I2C_NO_QUEUED_SW_GO, 0,
+ DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
+
+ return true;
+}
+
+static uint32_t get_speed(
+ const struct i2c_engine *i2c_engine)
+{
+ const struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+ uint32_t pre_scale = 0;
+
+ REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
+
+ /* [anaumov] it seems following is unnecessary */
+ /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
+ return pre_scale ?
+ hw_engine->reference_frequency / pre_scale :
+ hw_engine->base.default_speed;
+}
+
+static void set_speed(
+ struct i2c_engine *i2c_engine,
+ uint32_t speed)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+
+ if (speed) {
+ if (hw_engine->i2c_mask->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
+ REG_UPDATE_N(
+ SPEED, 3,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
+ else
+ REG_UPDATE_N(
+ SPEED, 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
+ }
+}
+
+static inline void reset_hw_engine(struct engine *engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
+
+ REG_UPDATE_2(
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET, 1,
+ DC_I2C_SW_STATUS_RESET, 1);
+}
+
+static bool is_hw_busy(struct engine *engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
+ uint32_t i2c_sw_status = 0;
+
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
+ return false;
+
+ reset_hw_engine(engine);
+
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+ return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
+}
+
+
+#define STOP_TRANS_PREDICAT \
+ ((hw_engine->transaction_count == 3) || \
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || \
+ (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ))
+
+#define SET_I2C_TRANSACTION(id) \
+ do { \
+ REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)), \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \
+ if (STOP_TRANS_PREDICAT) \
+ last_transaction = true; \
+ } while (false)
+
+
+static bool process_transaction(
+ struct i2c_hw_engine_dce110 *hw_engine,
+ struct i2c_request_transaction_data *request)
+{
+ uint32_t length = request->length;
+ uint8_t *buffer = request->data;
+ uint32_t value = 0;
+
+ bool last_transaction = false;
+
+ struct dc_context *ctx = NULL;
+
+ ctx = hw_engine->base.base.base.ctx;
+
+
+
+ switch (hw_engine->transaction_count) {
+ case 0:
+ SET_I2C_TRANSACTION(0);
+ break;
+ case 1:
+ SET_I2C_TRANSACTION(1);
+ break;
+ case 2:
+ SET_I2C_TRANSACTION(2);
+ break;
+ case 3:
+ SET_I2C_TRANSACTION(3);
+ break;
+ default:
+ /* TODO Warning ? */
+ break;
+ }
+
+
+ /* Write the I2C address and I2C data
+ * into the hardware circular buffer, one byte per entry.
+ * As an example, the 7-bit I2C slave address for CRT monitor
+ * for reading DDC/EDID information is 0b1010001.
+ * For an I2C send operation, the LSB must be programmed to 0;
+ * for I2C receive operation, the LSB must be programmed to 1. */
+ if (hw_engine->transaction_count == 0) {
+ value = REG_SET_4(DC_I2C_DATA, 0,
+ DC_I2C_DATA_RW, false,
+ DC_I2C_DATA, request->address,
+ DC_I2C_INDEX, 0,
+ DC_I2C_INDEX_WRITE, 1);
+ hw_engine->buffer_used_write = 0;
+ } else
+ value = REG_SET_2(DC_I2C_DATA, 0,
+ DC_I2C_DATA_RW, false,
+ DC_I2C_DATA, request->address);
+
+ hw_engine->buffer_used_write++;
+
+ if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
+ while (length) {
+ REG_SET_2(DC_I2C_DATA, value,
+ DC_I2C_INDEX_WRITE, 0,
+ DC_I2C_DATA, *buffer++);
+ hw_engine->buffer_used_write++;
+ --length;
+ }
+ }
+
+ ++hw_engine->transaction_count;
+ hw_engine->buffer_used_bytes += length + 1;
+
+ return last_transaction;
+}
+
+static void execute_transaction(
+ struct i2c_hw_engine_dce110 *hw_engine)
+{
+ REG_UPDATE_N(SETUP, 5,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0);
+
+
+ REG_UPDATE_5(DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET, 0,
+ DC_I2C_SW_STATUS_RESET, 0,
+ DC_I2C_SEND_RESET, 0,
+ DC_I2C_GO, 0,
+ DC_I2C_TRANSACTION_COUNT, hw_engine->transaction_count - 1);
+
+ /* start I2C transfer */
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1);
+
+ /* all transactions were executed and HW buffer became empty
+ * (even though it actually happens when status becomes DONE) */
+ hw_engine->transaction_count = 0;
+ hw_engine->buffer_used_bytes = 0;
+}
+
+static void submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request)
+{
+ request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ if (!process_transaction(FROM_I2C_ENGINE(engine), request))
+ return;
+
+ if (is_hw_busy(&engine->base)) {
+ request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ return;
+ }
+
+ execute_transaction(FROM_I2C_ENGINE(engine));
+}
+
+static void process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply)
+{
+ uint32_t length = reply->length;
+ uint8_t *buffer = reply->data;
+
+ struct i2c_hw_engine_dce110 *hw_engine =
+ FROM_I2C_ENGINE(engine);
+
+
+ REG_SET_3(DC_I2C_DATA, 0,
+ DC_I2C_INDEX, hw_engine->buffer_used_write,
+ DC_I2C_DATA_RW, 1,
+ DC_I2C_INDEX_WRITE, 1);
+
+ while (length) {
+ /* after reading the status,
+ * if the I2C operation executed successfully
+ * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
+ * should read data bytes from I2C circular data buffer */
+
+ uint32_t i2c_data;
+
+ REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
+ *buffer++ = i2c_data;
+
+ --length;
+ }
+}
+
+static enum i2c_channel_operation_result get_channel_status(
+ struct i2c_engine *i2c_engine,
+ uint8_t *returned_bytes)
+{
+ uint32_t i2c_sw_status = 0;
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+ uint32_t value =
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_STOPPED_ON_NACK)
+ return I2C_CHANNEL_OPERATION_NO_RESPONSE;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_TIMEOUT)
+ return I2C_CHANNEL_OPERATION_TIMEOUT;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_ABORTED)
+ return I2C_CHANNEL_OPERATION_FAILED;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_DONE)
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ /*
+ * this is the case when HW used for communication, I2C_SW_STATUS
+ * could be zero
+ */
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+}
+
+static uint32_t get_hw_buffer_available_size(
+ const struct i2c_hw_engine *engine)
+{
+ return I2C_HW_BUFFER_SIZE -
+ FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
+}
+
+static uint32_t get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length)
+{
+ uint32_t speed = engine->base.funcs->get_speed(&engine->base);
+
+ uint32_t period_timeout;
+ uint32_t num_of_clock_stretches;
+
+ if (!speed)
+ return 0;
+
+ period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
+
+ num_of_clock_stretches = 1 + (length << 3) + 1;
+ num_of_clock_stretches +=
+ (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
+ (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
+
+ return period_timeout * num_of_clock_stretches;
+}
+
+static void destroy(
+ struct i2c_engine **i2c_engine)
+{
+ struct i2c_hw_engine_dce110 *engine_dce110 =
+ FROM_I2C_ENGINE(*i2c_engine);
+
+ dal_i2c_hw_engine_destruct(&engine_dce110->base);
+
+ kfree(engine_dce110);
+
+ *i2c_engine = NULL;
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .destroy = destroy,
+ .get_speed = get_speed,
+ .set_speed = set_speed,
+ .setup_engine = setup_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .get_channel_status = get_channel_status,
+ .acquire_engine = dal_i2c_hw_engine_acquire_engine,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_hw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_hw_engine_submit_request,
+};
+
+static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
+ .get_hw_buffer_available_size = get_hw_buffer_available_size,
+ .get_transaction_timeout = get_transaction_timeout,
+ .wait_on_operation_result = dal_i2c_hw_engine_wait_on_operation_result,
+};
+
+static void construct(
+ struct i2c_hw_engine_dce110 *hw_engine,
+ const struct i2c_hw_engine_dce110_create_arg *arg)
+{
+ uint32_t xtal_ref_div = 0;
+
+ dal_i2c_hw_engine_construct(&hw_engine->base, arg->ctx);
+
+ hw_engine->base.base.base.funcs = &engine_funcs;
+ hw_engine->base.base.funcs = &i2c_engine_funcs;
+ hw_engine->base.funcs = &i2c_hw_engine_funcs;
+ hw_engine->base.default_speed = arg->default_speed;
+
+ hw_engine->regs = arg->regs;
+ hw_engine->i2c_shift = arg->i2c_shift;
+ hw_engine->i2c_mask = arg->i2c_mask;
+
+ hw_engine->engine_id = arg->engine_id;
+
+ hw_engine->buffer_used_bytes = 0;
+ hw_engine->transaction_count = 0;
+ hw_engine->engine_keep_power_up_count = 1;
+
+
+ REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
+
+ if (xtal_ref_div == 0) {
+ dm_logger_write(
+ hw_engine->base.base.base.ctx->logger, LOG_WARNING,
+ "Invalid base timer divider\n",
+ __func__);
+ xtal_ref_div = 2;
+ }
+
+ /*Calculating Reference Clock by divding original frequency by
+ * XTAL_REF_DIV.
+ * At upper level, uint32_t reference_frequency =
+ * dal_i2caux_get_reference_clock(as) >> 1
+ * which already divided by 2. So we need x2 to get original
+ * reference clock from ppll_info
+ */
+ hw_engine->reference_frequency =
+ (arg->reference_frequency * 2) / xtal_ref_div;
+}
+
+struct i2c_engine *dal_i2c_hw_engine_dce110_create(
+ const struct i2c_hw_engine_dce110_create_arg *arg)
+{
+ struct i2c_hw_engine_dce110 *engine_dce10;
+
+ if (!arg) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+ if (!arg->reference_frequency) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ engine_dce10 = kzalloc(sizeof(struct i2c_hw_engine_dce110),
+ GFP_KERNEL);
+
+ if (!engine_dce10) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(engine_dce10, arg);
+ return &engine_dce10->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
new file mode 100644
index 000000000000..5bb04085f670
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_HW_ENGINE_DCE110_H__
+#define __DAL_I2C_HW_ENGINE_DCE110_H__
+
+#define I2C_HW_ENGINE_COMMON_REG_LIST(id)\
+ SRI(SETUP, DC_I2C_DDC, id),\
+ SRI(SPEED, DC_I2C_DDC, id),\
+ SR(DC_I2C_ARBITRATION),\
+ SR(DC_I2C_CONTROL),\
+ SR(DC_I2C_SW_STATUS),\
+ SR(DC_I2C_TRANSACTION0),\
+ SR(DC_I2C_TRANSACTION1),\
+ SR(DC_I2C_TRANSACTION2),\
+ SR(DC_I2C_TRANSACTION3),\
+ SR(DC_I2C_DATA),\
+ SR(MICROSECOND_TIME_BASE_DIV)
+
+#define I2C_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_PRIORITY, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_GO, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_SEND_RESET, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_TRANSACTION_COUNT, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_DDC_SELECT, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STOPPED_ON_NACK, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_TIMEOUT, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_ABORTED, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_DONE, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_START0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_RW0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_COUNT0, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_DATA_RW, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
+ I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
+
+#define I2C_COMMON_MASK_SH_LIST_DCE100(mask_sh)\
+ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
+ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL, mask_sh)
+
+struct dce110_i2c_hw_engine_shift {
+ uint8_t DC_I2C_DDC1_ENABLE;
+ uint8_t DC_I2C_DDC1_TIME_LIMIT;
+ uint8_t DC_I2C_DDC1_DATA_DRIVE_EN;
+ uint8_t DC_I2C_DDC1_CLK_DRIVE_EN;
+ uint8_t DC_I2C_DDC1_DATA_DRIVE_SEL;
+ uint8_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
+ uint8_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
+ uint8_t DC_I2C_SW_DONE_USING_I2C_REG;
+ uint8_t DC_I2C_NO_QUEUED_SW_GO;
+ uint8_t DC_I2C_SW_PRIORITY;
+ uint8_t DC_I2C_SOFT_RESET;
+ uint8_t DC_I2C_SW_STATUS_RESET;
+ uint8_t DC_I2C_GO;
+ uint8_t DC_I2C_SEND_RESET;
+ uint8_t DC_I2C_TRANSACTION_COUNT;
+ uint8_t DC_I2C_DDC_SELECT;
+ uint8_t DC_I2C_DDC1_PRESCALE;
+ uint8_t DC_I2C_DDC1_THRESHOLD;
+ uint8_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
+ uint8_t DC_I2C_SW_STOPPED_ON_NACK;
+ uint8_t DC_I2C_SW_TIMEOUT;
+ uint8_t DC_I2C_SW_ABORTED;
+ uint8_t DC_I2C_SW_DONE;
+ uint8_t DC_I2C_SW_STATUS;
+ uint8_t DC_I2C_STOP_ON_NACK0;
+ uint8_t DC_I2C_START0;
+ uint8_t DC_I2C_RW0;
+ uint8_t DC_I2C_STOP0;
+ uint8_t DC_I2C_COUNT0;
+ uint8_t DC_I2C_DATA_RW;
+ uint8_t DC_I2C_DATA;
+ uint8_t DC_I2C_INDEX;
+ uint8_t DC_I2C_INDEX_WRITE;
+ uint8_t XTAL_REF_DIV;
+};
+
+struct dce110_i2c_hw_engine_mask {
+ uint32_t DC_I2C_DDC1_ENABLE;
+ uint32_t DC_I2C_DDC1_TIME_LIMIT;
+ uint32_t DC_I2C_DDC1_DATA_DRIVE_EN;
+ uint32_t DC_I2C_DDC1_CLK_DRIVE_EN;
+ uint32_t DC_I2C_DDC1_DATA_DRIVE_SEL;
+ uint32_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
+ uint32_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
+ uint32_t DC_I2C_SW_DONE_USING_I2C_REG;
+ uint32_t DC_I2C_NO_QUEUED_SW_GO;
+ uint32_t DC_I2C_SW_PRIORITY;
+ uint32_t DC_I2C_SOFT_RESET;
+ uint32_t DC_I2C_SW_STATUS_RESET;
+ uint32_t DC_I2C_GO;
+ uint32_t DC_I2C_SEND_RESET;
+ uint32_t DC_I2C_TRANSACTION_COUNT;
+ uint32_t DC_I2C_DDC_SELECT;
+ uint32_t DC_I2C_DDC1_PRESCALE;
+ uint32_t DC_I2C_DDC1_THRESHOLD;
+ uint32_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
+ uint32_t DC_I2C_SW_STOPPED_ON_NACK;
+ uint32_t DC_I2C_SW_TIMEOUT;
+ uint32_t DC_I2C_SW_ABORTED;
+ uint32_t DC_I2C_SW_DONE;
+ uint32_t DC_I2C_SW_STATUS;
+ uint32_t DC_I2C_STOP_ON_NACK0;
+ uint32_t DC_I2C_START0;
+ uint32_t DC_I2C_RW0;
+ uint32_t DC_I2C_STOP0;
+ uint32_t DC_I2C_COUNT0;
+ uint32_t DC_I2C_DATA_RW;
+ uint32_t DC_I2C_DATA;
+ uint32_t DC_I2C_INDEX;
+ uint32_t DC_I2C_INDEX_WRITE;
+ uint32_t XTAL_REF_DIV;
+};
+
+struct dce110_i2c_hw_engine_registers {
+ uint32_t SETUP;
+ uint32_t SPEED;
+ uint32_t DC_I2C_ARBITRATION;
+ uint32_t DC_I2C_CONTROL;
+ uint32_t DC_I2C_SW_STATUS;
+ uint32_t DC_I2C_TRANSACTION0;
+ uint32_t DC_I2C_TRANSACTION1;
+ uint32_t DC_I2C_TRANSACTION2;
+ uint32_t DC_I2C_TRANSACTION3;
+ uint32_t DC_I2C_DATA;
+ uint32_t MICROSECOND_TIME_BASE_DIV;
+};
+
+struct i2c_hw_engine_dce110 {
+ struct i2c_hw_engine base;
+ const struct dce110_i2c_hw_engine_registers *regs;
+ const struct dce110_i2c_hw_engine_shift *i2c_shift;
+ const struct dce110_i2c_hw_engine_mask *i2c_mask;
+ struct {
+ uint32_t DC_I2C_DDCX_SETUP;
+ uint32_t DC_I2C_DDCX_SPEED;
+ } addr;
+ uint32_t engine_id;
+ /* expressed in kilohertz */
+ uint32_t reference_frequency;
+ /* number of bytes currently used in HW buffer */
+ uint32_t buffer_used_bytes;
+ /* number of bytes used for write transaction in HW buffer
+ * - this will be used as the index to read from*/
+ uint32_t buffer_used_write;
+ /* number of pending transactions (before GO) */
+ uint32_t transaction_count;
+ uint32_t engine_keep_power_up_count;
+};
+
+struct i2c_hw_engine_dce110_create_arg {
+ uint32_t engine_id;
+ uint32_t reference_frequency;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+ const struct dce110_i2c_hw_engine_registers *regs;
+ const struct dce110_i2c_hw_engine_shift *i2c_shift;
+ const struct dce110_i2c_hw_engine_mask *i2c_mask;
+};
+
+struct i2c_engine *dal_i2c_hw_engine_dce110_create(
+ const struct i2c_hw_engine_dce110_create_arg *arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
new file mode 100644
index 000000000000..3aa7f791e523
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_sw_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct i2c_sw_engine *'
+ * to 'struct i2c_sw_engine_dce110 *'
+ */
+#define FROM_I2C_SW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_sw_engine_dce110, base)
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+static void release_engine(
+ struct engine *engine)
+{
+}
+
+static void destruct(
+ struct i2c_sw_engine_dce110 *engine)
+{
+ dal_i2c_sw_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **engine)
+{
+ struct i2c_sw_engine_dce110 *sw_engine = FROM_I2C_ENGINE(*engine);
+
+ destruct(sw_engine);
+
+ kfree(sw_engine);
+
+ *engine = NULL;
+}
+
+static bool acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc_handle)
+{
+ return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .destroy = destroy,
+ .get_speed = dal_i2c_sw_engine_get_speed,
+ .set_speed = dal_i2c_sw_engine_set_speed,
+ .setup_engine = dal_i2c_engine_setup_i2c_engine,
+ .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
+ .process_channel_reply = dal_i2c_engine_process_channel_reply,
+ .get_channel_status = dal_i2c_sw_engine_get_channel_status,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_sw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_sw_engine_submit_request,
+};
+
+static void construct(
+ struct i2c_sw_engine_dce110 *engine_dce110,
+ const struct i2c_sw_engine_dce110_create_arg *arg_dce110)
+{
+ struct i2c_sw_engine_create_arg arg_base;
+
+ arg_base.ctx = arg_dce110->ctx;
+ arg_base.default_speed = arg_dce110->default_speed;
+
+ dal_i2c_sw_engine_construct(&engine_dce110->base, &arg_base);
+
+ /*struct engine struct engine_funcs*/
+ engine_dce110->base.base.base.funcs = &engine_funcs;
+ /*struct i2c_engine struct i2c_engine_funcs*/
+ engine_dce110->base.base.funcs = &i2c_engine_funcs;
+ engine_dce110->base.default_speed = arg_dce110->default_speed;
+ engine_dce110->engine_id = arg_dce110->engine_id;
+}
+
+struct i2c_engine *dal_i2c_sw_engine_dce110_create(
+ const struct i2c_sw_engine_dce110_create_arg *arg)
+{
+ struct i2c_sw_engine_dce110 *engine_dce110;
+
+ if (!arg) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ engine_dce110 = kzalloc(sizeof(struct i2c_sw_engine_dce110),
+ GFP_KERNEL);
+
+ if (!engine_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(engine_dce110, arg);
+ return &engine_dce110->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h
new file mode 100644
index 000000000000..c48c61f540a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_SW_ENGINE_DCE110_H__
+#define __DAL_I2C_SW_ENGINE_DCE110_H__
+
+struct i2c_sw_engine_dce110 {
+ struct i2c_sw_engine base;
+ uint32_t engine_id;
+};
+
+struct i2c_sw_engine_dce110_create_arg {
+ uint32_t engine_id;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+struct i2c_engine *dal_i2c_sw_engine_dce110_create(
+ const struct i2c_sw_engine_dce110_create_arg *arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
new file mode 100644
index 000000000000..2a047f8ca0e9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+/*
+ * Header of this unit
+ */
+#include "i2caux_dce110.h"
+
+#include "i2c_sw_engine_dce110.h"
+#include "i2c_hw_engine_dce110.h"
+#include "aux_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+/*cast pointer to struct i2caux TO pointer to struct i2caux_dce110*/
+#define FROM_I2C_AUX(ptr) \
+ container_of((ptr), struct i2caux_dce110, base)
+
+static void destruct(
+ struct i2caux_dce110 *i2caux_dce110)
+{
+ dal_i2caux_destruct(&i2caux_dce110->base);
+}
+
+static void destroy(
+ struct i2caux **i2c_engine)
+{
+ struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(*i2c_engine);
+
+ destruct(i2caux_dce110);
+
+ kfree(i2caux_dce110);
+
+ *i2c_engine = NULL;
+}
+
+static struct i2c_engine *acquire_i2c_hw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
+
+ struct i2c_engine *engine = NULL;
+ /* generic hw engine is not used for EDID read
+ * It may be needed for external i2c device, like thermal chip,
+ * TODO will be implemented when needed.
+ * check dce80 bool non_generic for generic hw engine;
+ */
+
+ if (!ddc)
+ return NULL;
+
+ if (ddc->hw_info.hw_supported) {
+ enum gpio_ddc_line line = dal_ddc_get_line(ddc);
+
+ if (line < GPIO_DDC_LINE_COUNT)
+ engine = i2caux->i2c_hw_engines[line];
+ }
+
+ if (!engine)
+ return NULL;
+
+ if (!i2caux_dce110->i2c_hw_buffer_in_use &&
+ engine->base.funcs->acquire(&engine->base, ddc)) {
+ i2caux_dce110->i2c_hw_buffer_in_use = true;
+ return engine;
+ }
+
+ return NULL;
+}
+
+static void release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine)
+{
+ struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
+
+ if (engine->funcs->get_engine_type(engine) ==
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
+ i2caux_dce110->i2c_hw_buffer_in_use = false;
+
+ dal_i2caux_release_engine(i2caux, engine);
+}
+
+static const enum gpio_ddc_line hw_ddc_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+};
+
+static const enum gpio_ddc_line hw_aux_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+};
+
+/* function table */
+static const struct i2caux_funcs i2caux_funcs = {
+ .destroy = destroy,
+ .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
+ .release_engine = release_engine,
+ .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
+ .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
+};
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dce110_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+static const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+void dal_i2caux_dce110_construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx,
+ const struct dce110_aux_registers aux_regs[],
+ const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
+ const struct dce110_i2c_hw_engine_shift *i2c_shift,
+ const struct dce110_i2c_hw_engine_mask *i2c_mask)
+{
+ uint32_t i = 0;
+ uint32_t reference_frequency = 0;
+ bool use_i2c_sw_engine = false;
+ struct i2caux *base = NULL;
+ /*TODO: For CZ bring up, if dal_i2caux_get_reference_clock
+ * does not return 48KHz, we need hard coded for 48Khz.
+ * Some BIOS setting incorrect cause this
+ * For production, we always get value from BIOS*/
+ reference_frequency =
+ dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
+
+ base = &i2caux_dce110->base;
+
+ dal_i2caux_construct(base, ctx);
+
+ i2caux_dce110->base.funcs = &i2caux_funcs;
+ i2caux_dce110->i2c_hw_buffer_in_use = false;
+ /* Create I2C engines (DDC lines per connector)
+ * different I2C/AUX usage cases, DDC, Generic GPIO, AUX.
+ */
+ do {
+ enum gpio_ddc_line line_id = hw_ddc_lines[i];
+
+ struct i2c_hw_engine_dce110_create_arg hw_arg_dce110;
+
+ if (use_i2c_sw_engine) {
+ struct i2c_sw_engine_dce110_create_arg sw_arg;
+
+ sw_arg.engine_id = i;
+ sw_arg.default_speed = base->default_i2c_sw_speed;
+ sw_arg.ctx = ctx;
+ base->i2c_sw_engines[line_id] =
+ dal_i2c_sw_engine_dce110_create(&sw_arg);
+ }
+
+ hw_arg_dce110.engine_id = i;
+ hw_arg_dce110.reference_frequency = reference_frequency;
+ hw_arg_dce110.default_speed = base->default_i2c_hw_speed;
+ hw_arg_dce110.ctx = ctx;
+ hw_arg_dce110.regs = &i2c_hw_engine_regs[i];
+ hw_arg_dce110.i2c_shift = i2c_shift;
+ hw_arg_dce110.i2c_mask = i2c_mask;
+
+ base->i2c_hw_engines[line_id] =
+ dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_ddc_lines));
+
+ /* Create AUX engines for all lines which has assisted HW AUX
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+
+ i = 0;
+
+ do {
+ enum gpio_ddc_line line_id = hw_aux_lines[i];
+
+ struct aux_engine_dce110_init_data aux_init_data;
+
+ aux_init_data.engine_id = i;
+ aux_init_data.timeout_period = base->aux_timeout_period;
+ aux_init_data.ctx = ctx;
+ aux_init_data.regs = &aux_regs[i];
+
+ base->aux_engines[line_id] =
+ dal_aux_engine_dce110_create(&aux_init_data);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_aux_lines));
+
+ /*TODO Generic I2C SW and HW*/
+}
+
+/*
+ * dal_i2caux_dce110_create
+ *
+ * @brief
+ * public interface to allocate memory for DCE11 I2CAUX
+ *
+ * @param
+ * struct adapter_service *as - [in]
+ * struct dc_context *ctx - [in]
+ *
+ * @return
+ * pointer to the base struct of DCE11 I2CAUX
+ */
+struct i2caux *dal_i2caux_dce110_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce110_aux_regs,
+ i2c_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
new file mode 100644
index 000000000000..1b1f71c60ac9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE110_H__
+#define __DAL_I2C_AUX_DCE110_H__
+
+#include "../i2caux.h"
+
+struct i2caux_dce110 {
+ struct i2caux base;
+ /* indicate the I2C HW circular buffer is in use */
+ bool i2c_hw_buffer_in_use;
+};
+
+struct dce110_aux_registers;
+struct dce110_i2c_hw_engine_registers;
+struct dce110_i2c_hw_engine_shift;
+struct dce110_i2c_hw_engine_mask;
+
+struct i2caux *dal_i2caux_dce110_create(
+ struct dc_context *ctx);
+
+void dal_i2caux_dce110_construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx,
+ const struct dce110_aux_registers *aux_regs,
+ const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
+ const struct dce110_i2c_hw_engine_shift *i2c_shift,
+ const struct dce110_i2c_hw_engine_mask *i2c_mask);
+
+#endif /* __DAL_I2C_AUX_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
new file mode 100644
index 000000000000..dafc1a727f7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/i2caux_dce110.h"
+#include "i2caux_dce112.h"
+
+#include "../dce110/aux_engine_dce110.h"
+
+#include "../dce110/i2c_hw_engine_dce110.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dce112_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+static const struct dce110_i2c_hw_engine_registers dce112_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static void construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx)
+{
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce112_aux_regs,
+ dce112_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+}
+
+/*
+ * dal_i2caux_dce110_create
+ *
+ * @brief
+ * public interface to allocate memory for DCE11 I2CAUX
+ *
+ * @param
+ * struct adapter_service *as - [in]
+ * struct dc_context *ctx - [in]
+ *
+ * @return
+ * pointer to the base struct of DCE11 I2CAUX
+ */
+struct i2caux *dal_i2caux_dce112_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(i2caux_dce110, ctx);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
new file mode 100644
index 000000000000..8d35453c25b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE112_H__
+#define __DAL_I2C_AUX_DCE112_H__
+
+struct i2caux *dal_i2caux_dce112_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCE112_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
new file mode 100644
index 000000000000..668981a4c285
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/i2c_hw_engine_dce110.h"
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \
+}
+
+static const struct dce110_aux_registers dce120_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_i2c_hw_engine_registers dce120_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+struct i2caux *dal_i2caux_dce120_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce120_aux_regs,
+ dce120_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h
new file mode 100644
index 000000000000..b6ac47617c70
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE120_H__
+#define __DAL_I2C_AUX_DCE120_H__
+
+struct i2caux *dal_i2caux_dce120_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
new file mode 100644
index 000000000000..fd0832dd2c75
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_hw_engine.h"
+#include "../i2c_generic_hw_engine.h"
+/*
+ * Header of this unit
+ */
+
+#include "i2c_hw_engine_dce80.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+/*
+ * This unit
+ */
+
+enum dc_i2c_status {
+ DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+};
+
+enum dc_i2c_arbitration {
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
+};
+
+enum {
+ /* No timeout in HW
+ * (timeout implemented in SW by querying status) */
+ I2C_SETUP_TIME_LIMIT = 255,
+ I2C_HW_BUFFER_SIZE = 144
+};
+
+/*
+ * @brief
+ * Cast 'struct i2c_hw_engine *'
+ * to 'struct i2c_hw_engine_dce80 *'
+ */
+#define FROM_I2C_HW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_hw_engine_dce80, base)
+
+/*
+ * @brief
+ * Cast pointer to 'struct i2c_engine *'
+ * to pointer to 'struct i2c_hw_engine_dce80 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
+
+/*
+ * @brief
+ * Cast pointer to 'struct engine *'
+ * to 'pointer to struct i2c_hw_engine_dce80 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+static void disable_i2c_hw_engine(
+ struct i2c_hw_engine_dce80 *engine)
+{
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
+ uint32_t value = 0;
+
+ struct dc_context *ctx = NULL;
+
+ ctx = engine->base.base.base.ctx;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_ENABLE);
+
+ dm_write_reg(ctx, addr, value);
+}
+
+static void release_engine(
+ struct engine *engine)
+{
+ struct i2c_hw_engine_dce80 *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_engine *base = NULL;
+ bool safe_to_reset;
+ uint32_t value = 0;
+
+ base = &hw_engine->base.base;
+
+ /* Restore original HW engine speed */
+
+ base->funcs->set_speed(base, hw_engine->base.original_speed);
+
+ /* Release I2C */
+ {
+ value = dm_read_reg(engine->ctx, mmDC_I2C_ARBITRATION);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_ARBITRATION,
+ DC_I2C_SW_DONE_USING_I2C_REG);
+
+ dm_write_reg(engine->ctx, mmDC_I2C_ARBITRATION, value);
+ }
+
+ /* Reset HW engine */
+ {
+ uint32_t i2c_sw_status = 0;
+
+ value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+ /* if used by SW, safe to reset */
+ safe_to_reset = (i2c_sw_status == 1);
+ }
+ {
+ value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
+
+ if (safe_to_reset)
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
+ }
+
+ /* HW I2c engine - clock gating feature */
+ if (!hw_engine->engine_keep_power_up_count)
+ disable_i2c_hw_engine(hw_engine);
+}
+
+static void destruct(
+ struct i2c_hw_engine_dce80 *engine)
+{
+ dal_i2c_hw_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **i2c_engine)
+{
+ struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(*i2c_engine);
+
+ destruct(engine);
+
+ kfree(engine);
+
+ *i2c_engine = NULL;
+}
+
+static bool setup_engine(
+ struct i2c_engine *i2c_engine)
+{
+ uint32_t value = 0;
+ struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
+
+ /* Program pin select */
+ {
+ const uint32_t addr = mmDC_I2C_CONTROL;
+
+ value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_GO);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SEND_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_TRANSACTION_COUNT);
+
+ set_reg_field_value(
+ value,
+ engine->engine_id,
+ DC_I2C_CONTROL,
+ DC_I2C_DDC_SELECT);
+
+ dm_write_reg(i2c_engine->base.ctx, addr, value);
+ }
+
+ /* Program time limit */
+ {
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
+
+ value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ set_reg_field_value(
+ value,
+ I2C_SETUP_TIME_LIMIT,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_TIME_LIMIT);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_ENABLE);
+
+ dm_write_reg(i2c_engine->base.ctx, addr, value);
+ }
+
+ /* Program HW priority
+ * set to High - interrupt software I2C at any time
+ * Enable restart of SW I2C that was interrupted by HW
+ * disable queuing of software while I2C is in use by HW */
+ {
+ value = dm_read_reg(i2c_engine->base.ctx,
+ mmDC_I2C_ARBITRATION);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_ARBITRATION,
+ DC_I2C_NO_QUEUED_SW_GO);
+
+ set_reg_field_value(
+ value,
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
+ DC_I2C_ARBITRATION,
+ DC_I2C_SW_PRIORITY);
+
+ dm_write_reg(i2c_engine->base.ctx,
+ mmDC_I2C_ARBITRATION, value);
+ }
+
+ return true;
+}
+
+static uint32_t get_speed(
+ const struct i2c_engine *i2c_engine)
+{
+ const struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
+
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
+
+ uint32_t pre_scale = 0;
+
+ uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ pre_scale = get_reg_field_value(
+ value,
+ DC_I2C_DDC1_SPEED,
+ DC_I2C_DDC1_PRESCALE);
+
+ /* [anaumov] it seems following is unnecessary */
+ /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
+
+ return pre_scale ?
+ engine->reference_frequency / pre_scale :
+ engine->base.default_speed;
+}
+
+static void set_speed(
+ struct i2c_engine *i2c_engine,
+ uint32_t speed)
+{
+ struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
+
+ if (speed) {
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
+
+ uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ set_reg_field_value(
+ value,
+ engine->reference_frequency / speed,
+ DC_I2C_DDC1_SPEED,
+ DC_I2C_DDC1_PRESCALE);
+
+ set_reg_field_value(
+ value,
+ 2,
+ DC_I2C_DDC1_SPEED,
+ DC_I2C_DDC1_THRESHOLD);
+
+ dm_write_reg(i2c_engine->base.ctx, addr, value);
+ }
+}
+
+static inline void reset_hw_engine(struct engine *engine)
+{
+ uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
+}
+
+static bool is_hw_busy(struct engine *engine)
+{
+ uint32_t i2c_sw_status = 0;
+
+ uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
+ return false;
+
+ reset_hw_engine(engine);
+
+ value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+
+ return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
+}
+
+/*
+ * @brief
+ * DC_GPIO_DDC MM register offsets
+ */
+static const uint32_t transaction_addr[] = {
+ mmDC_I2C_TRANSACTION0,
+ mmDC_I2C_TRANSACTION1,
+ mmDC_I2C_TRANSACTION2,
+ mmDC_I2C_TRANSACTION3
+};
+
+static bool process_transaction(
+ struct i2c_hw_engine_dce80 *engine,
+ struct i2c_request_transaction_data *request)
+{
+ uint32_t length = request->length;
+ uint8_t *buffer = request->data;
+
+ bool last_transaction = false;
+ uint32_t value = 0;
+
+ struct dc_context *ctx = NULL;
+
+ ctx = engine->base.base.base.ctx;
+
+ {
+ const uint32_t addr =
+ transaction_addr[engine->transaction_count];
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_STOP_ON_NACK0);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_START0);
+
+ if ((engine->transaction_count == 3) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_STOP0);
+
+ last_transaction = true;
+ } else
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_STOP0);
+
+ set_reg_field_value(
+ value,
+ (0 != (request->action &
+ I2CAUX_TRANSACTION_ACTION_I2C_READ)),
+ DC_I2C_TRANSACTION0,
+ DC_I2C_RW0);
+
+ set_reg_field_value(
+ value,
+ length,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_COUNT0);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ /* Write the I2C address and I2C data
+ * into the hardware circular buffer, one byte per entry.
+ * As an example, the 7-bit I2C slave address for CRT monitor
+ * for reading DDC/EDID information is 0b1010001.
+ * For an I2C send operation, the LSB must be programmed to 0;
+ * for I2C receive operation, the LSB must be programmed to 1. */
+
+ {
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ false,
+ DC_I2C_DATA,
+ DC_I2C_DATA_RW);
+
+ set_reg_field_value(
+ value,
+ request->address,
+ DC_I2C_DATA,
+ DC_I2C_DATA);
+
+ if (engine->transaction_count == 0) {
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DATA,
+ DC_I2C_INDEX);
+
+ /*enable index write*/
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DATA,
+ DC_I2C_INDEX_WRITE);
+ }
+
+ dm_write_reg(ctx, mmDC_I2C_DATA, value);
+
+ if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DATA,
+ DC_I2C_INDEX_WRITE);
+
+ while (length) {
+
+ set_reg_field_value(
+ value,
+ *buffer++,
+ DC_I2C_DATA,
+ DC_I2C_DATA);
+
+ dm_write_reg(ctx, mmDC_I2C_DATA, value);
+ --length;
+ }
+ }
+ }
+
+ ++engine->transaction_count;
+ engine->buffer_used_bytes += length + 1;
+
+ return last_transaction;
+}
+
+static void execute_transaction(
+ struct i2c_hw_engine_dce80 *engine)
+{
+ uint32_t value = 0;
+ struct dc_context *ctx = NULL;
+
+ ctx = engine->base.base.base.ctx;
+
+ {
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_DATA_DRIVE_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_CLK_DRIVE_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_DATA_DRIVE_SEL);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_INTRA_TRANSACTION_DELAY);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_INTRA_BYTE_DELAY);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ {
+ const uint32_t addr = mmDC_I2C_CONTROL;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SEND_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_GO);
+
+ set_reg_field_value(
+ value,
+ engine->transaction_count - 1,
+ DC_I2C_CONTROL,
+ DC_I2C_TRANSACTION_COUNT);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ /* start I2C transfer */
+ {
+ const uint32_t addr = mmDC_I2C_CONTROL;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_GO);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ /* all transactions were executed and HW buffer became empty
+ * (even though it actually happens when status becomes DONE) */
+ engine->transaction_count = 0;
+ engine->buffer_used_bytes = 0;
+}
+
+static void submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request)
+{
+ request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ if (!process_transaction(FROM_I2C_ENGINE(engine), request))
+ return;
+
+ if (is_hw_busy(&engine->base)) {
+ request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ return;
+ }
+
+ execute_transaction(FROM_I2C_ENGINE(engine));
+}
+
+static void process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply)
+{
+ uint32_t length = reply->length;
+ uint8_t *buffer = reply->data;
+
+ uint32_t value = 0;
+
+ /*set index*/
+ set_reg_field_value(
+ value,
+ length - 1,
+ DC_I2C_DATA,
+ DC_I2C_INDEX);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DATA,
+ DC_I2C_DATA_RW);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DATA,
+ DC_I2C_INDEX_WRITE);
+
+ dm_write_reg(engine->base.ctx, mmDC_I2C_DATA, value);
+
+ while (length) {
+ /* after reading the status,
+ * if the I2C operation executed successfully
+ * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
+ * should read data bytes from I2C circular data buffer */
+
+ value = dm_read_reg(engine->base.ctx, mmDC_I2C_DATA);
+
+ *buffer++ = get_reg_field_value(
+ value,
+ DC_I2C_DATA,
+ DC_I2C_DATA);
+
+ --length;
+ }
+}
+
+static enum i2c_channel_operation_result get_channel_status(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes)
+{
+ uint32_t i2c_sw_status = 0;
+ uint32_t value = dm_read_reg(engine->base.ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK)
+ return I2C_CHANNEL_OPERATION_NO_RESPONSE;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK)
+ return I2C_CHANNEL_OPERATION_TIMEOUT;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK)
+ return I2C_CHANNEL_OPERATION_FAILED;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK)
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ /*
+ * this is the case when HW used for communication, I2C_SW_STATUS
+ * could be zero
+ */
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+}
+
+static uint32_t get_hw_buffer_available_size(
+ const struct i2c_hw_engine *engine)
+{
+ return I2C_HW_BUFFER_SIZE -
+ FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
+}
+
+static uint32_t get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length)
+{
+ uint32_t speed = engine->base.funcs->get_speed(&engine->base);
+
+ uint32_t period_timeout;
+ uint32_t num_of_clock_stretches;
+
+ if (!speed)
+ return 0;
+
+ period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
+
+ num_of_clock_stretches = 1 + (length << 3) + 1;
+ num_of_clock_stretches +=
+ (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
+ (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
+
+ return period_timeout * num_of_clock_stretches;
+}
+
+/*
+ * @brief
+ * DC_I2C_DDC1_SETUP MM register offsets
+ *
+ * @note
+ * The indices of this offset array are DDC engine IDs
+ */
+static const int32_t ddc_setup_offset[] = {
+
+ mmDC_I2C_DDC1_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 1 */
+ mmDC_I2C_DDC2_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 2 */
+ mmDC_I2C_DDC3_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 3 */
+ mmDC_I2C_DDC4_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 4 */
+ mmDC_I2C_DDC5_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 5 */
+ mmDC_I2C_DDC6_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 6 */
+ mmDC_I2C_DDCVGA_SETUP - mmDC_I2C_DDC1_SETUP /* DDC Engine 7 */
+};
+
+/*
+ * @brief
+ * DC_I2C_DDC1_SPEED MM register offsets
+ *
+ * @note
+ * The indices of this offset array are DDC engine IDs
+ */
+static const int32_t ddc_speed_offset[] = {
+ mmDC_I2C_DDC1_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 1 */
+ mmDC_I2C_DDC2_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 2 */
+ mmDC_I2C_DDC3_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 3 */
+ mmDC_I2C_DDC4_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 4 */
+ mmDC_I2C_DDC5_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 5 */
+ mmDC_I2C_DDC6_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 6 */
+ mmDC_I2C_DDCVGA_SPEED - mmDC_I2C_DDC1_SPEED /* DDC Engine 7 */
+};
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .destroy = destroy,
+ .get_speed = get_speed,
+ .set_speed = set_speed,
+ .setup_engine = setup_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .get_channel_status = get_channel_status,
+ .acquire_engine = dal_i2c_hw_engine_acquire_engine,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_hw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_hw_engine_submit_request,
+};
+
+static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
+ .get_hw_buffer_available_size =
+ get_hw_buffer_available_size,
+ .get_transaction_timeout =
+ get_transaction_timeout,
+ .wait_on_operation_result =
+ dal_i2c_hw_engine_wait_on_operation_result,
+};
+
+static void construct(
+ struct i2c_hw_engine_dce80 *engine,
+ const struct i2c_hw_engine_dce80_create_arg *arg)
+{
+ dal_i2c_hw_engine_construct(&engine->base, arg->ctx);
+
+ engine->base.base.base.funcs = &engine_funcs;
+ engine->base.base.funcs = &i2c_engine_funcs;
+ engine->base.funcs = &i2c_hw_engine_funcs;
+ engine->base.default_speed = arg->default_speed;
+ engine->addr.DC_I2C_DDCX_SETUP =
+ mmDC_I2C_DDC1_SETUP + ddc_setup_offset[arg->engine_id];
+ engine->addr.DC_I2C_DDCX_SPEED =
+ mmDC_I2C_DDC1_SPEED + ddc_speed_offset[arg->engine_id];
+
+ engine->engine_id = arg->engine_id;
+ engine->reference_frequency = arg->reference_frequency;
+ engine->buffer_used_bytes = 0;
+ engine->transaction_count = 0;
+ engine->engine_keep_power_up_count = 1;
+}
+
+struct i2c_engine *dal_i2c_hw_engine_dce80_create(
+ const struct i2c_hw_engine_dce80_create_arg *arg)
+{
+ struct i2c_hw_engine_dce80 *engine;
+
+ if (!arg) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if ((arg->engine_id >= sizeof(ddc_setup_offset) / sizeof(int32_t)) ||
+ (arg->engine_id >= sizeof(ddc_speed_offset) / sizeof(int32_t)) ||
+ !arg->reference_frequency) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(struct i2c_hw_engine_dce80), GFP_KERNEL);
+
+ if (!engine) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ construct(engine, arg);
+ return &engine->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
new file mode 100644
index 000000000000..5c6116fb5479
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_HW_ENGINE_DCE80_H__
+#define __DAL_I2C_HW_ENGINE_DCE80_H__
+
+struct i2c_hw_engine_dce80 {
+ struct i2c_hw_engine base;
+ struct {
+ uint32_t DC_I2C_DDCX_SETUP;
+ uint32_t DC_I2C_DDCX_SPEED;
+ } addr;
+ uint32_t engine_id;
+ /* expressed in kilohertz */
+ uint32_t reference_frequency;
+ /* number of bytes currently used in HW buffer */
+ uint32_t buffer_used_bytes;
+ /* number of pending transactions (before GO) */
+ uint32_t transaction_count;
+ uint32_t engine_keep_power_up_count;
+};
+
+struct i2c_hw_engine_dce80_create_arg {
+ uint32_t engine_id;
+ uint32_t reference_frequency;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+struct i2c_engine *dal_i2c_hw_engine_dce80_create(
+ const struct i2c_hw_engine_dce80_create_arg *arg);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
new file mode 100644
index 000000000000..4853ee26096a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_sw_engine_dce80.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+/*
+ * This unit
+ */
+
+static const uint32_t ddc_hw_status_addr[] = {
+ mmDC_I2C_DDC1_HW_STATUS,
+ mmDC_I2C_DDC2_HW_STATUS,
+ mmDC_I2C_DDC3_HW_STATUS,
+ mmDC_I2C_DDC4_HW_STATUS,
+ mmDC_I2C_DDC5_HW_STATUS,
+ mmDC_I2C_DDC6_HW_STATUS,
+ mmDC_I2C_DDCVGA_HW_STATUS
+};
+
+/*
+ * @brief
+ * Cast 'struct i2c_sw_engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_I2C_SW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_sw_engine_dce80, base)
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+static void release_engine(
+ struct engine *engine)
+{
+
+}
+
+static void destruct(
+ struct i2c_sw_engine_dce80 *engine)
+{
+ dal_i2c_sw_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **engine)
+{
+ struct i2c_sw_engine_dce80 *sw_engine = FROM_I2C_ENGINE(*engine);
+
+ destruct(sw_engine);
+
+ kfree(sw_engine);
+
+ *engine = NULL;
+}
+
+static bool acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc_handle)
+{
+ return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .destroy = destroy,
+ .get_speed = dal_i2c_sw_engine_get_speed,
+ .set_speed = dal_i2c_sw_engine_set_speed,
+ .setup_engine = dal_i2c_engine_setup_i2c_engine,
+ .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
+ .process_channel_reply = dal_i2c_engine_process_channel_reply,
+ .get_channel_status = dal_i2c_sw_engine_get_channel_status,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_sw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_sw_engine_submit_request,
+};
+
+static void construct(
+ struct i2c_sw_engine_dce80 *engine,
+ const struct i2c_sw_engine_dce80_create_arg *arg)
+{
+ struct i2c_sw_engine_create_arg arg_base;
+
+ arg_base.ctx = arg->ctx;
+ arg_base.default_speed = arg->default_speed;
+
+ dal_i2c_sw_engine_construct(&engine->base, &arg_base);
+
+ engine->base.base.base.funcs = &engine_funcs;
+ engine->base.base.funcs = &i2c_engine_funcs;
+ engine->base.default_speed = arg->default_speed;
+ engine->engine_id = arg->engine_id;
+}
+
+struct i2c_engine *dal_i2c_sw_engine_dce80_create(
+ const struct i2c_sw_engine_dce80_create_arg *arg)
+{
+ struct i2c_sw_engine_dce80 *engine;
+
+ if (!arg) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(struct i2c_sw_engine_dce80), GFP_KERNEL);
+
+ if (!engine) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ construct(engine, arg);
+ return &engine->base.base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h
new file mode 100644
index 000000000000..26355c088746
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_SW_ENGINE_DCE80_H__
+#define __DAL_I2C_SW_ENGINE_DCE80_H__
+
+struct i2c_sw_engine_dce80 {
+ struct i2c_sw_engine base;
+ uint32_t engine_id;
+};
+
+struct i2c_sw_engine_dce80_create_arg {
+ uint32_t engine_id;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+struct i2c_engine *dal_i2c_sw_engine_dce80_create(
+ const struct i2c_sw_engine_dce80_create_arg *arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
new file mode 100644
index 000000000000..ed48596dd2a5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2caux_dce80.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "i2c_sw_engine_dce80.h"
+#include "../i2c_hw_engine.h"
+#include "i2c_hw_engine_dce80.h"
+#include "../i2c_generic_hw_engine.h"
+#include "../aux_engine.h"
+
+
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers dce80_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+/*
+ * This unit
+ */
+
+#define FROM_I2C_AUX(ptr) \
+ container_of((ptr), struct i2caux_dce80, base)
+
+static void destruct(
+ struct i2caux_dce80 *i2caux_dce80)
+{
+ dal_i2caux_destruct(&i2caux_dce80->base);
+}
+
+static void destroy(
+ struct i2caux **i2c_engine)
+{
+ struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(*i2c_engine);
+
+ destruct(i2caux_dce80);
+
+ kfree(i2caux_dce80);
+
+ *i2c_engine = NULL;
+}
+
+static struct i2c_engine *acquire_i2c_hw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(i2caux);
+
+ struct i2c_engine *engine = NULL;
+ bool non_generic;
+
+ if (!ddc)
+ return NULL;
+
+ if (ddc->hw_info.hw_supported) {
+ enum gpio_ddc_line line = dal_ddc_get_line(ddc);
+
+ if (line < GPIO_DDC_LINE_COUNT) {
+ non_generic = true;
+ engine = i2caux->i2c_hw_engines[line];
+ }
+ }
+
+ if (!engine) {
+ non_generic = false;
+ engine = i2caux->i2c_generic_hw_engine;
+ }
+
+ if (!engine)
+ return NULL;
+
+ if (non_generic) {
+ if (!i2caux_dce80->i2c_hw_buffer_in_use &&
+ engine->base.funcs->acquire(&engine->base, ddc)) {
+ i2caux_dce80->i2c_hw_buffer_in_use = true;
+ return engine;
+ }
+ } else {
+ if (engine->base.funcs->acquire(&engine->base, ddc))
+ return engine;
+ }
+
+ return NULL;
+}
+
+static void release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine)
+{
+ if (engine->funcs->get_engine_type(engine) ==
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
+ FROM_I2C_AUX(i2caux)->i2c_hw_buffer_in_use = false;
+
+ dal_i2caux_release_engine(i2caux, engine);
+}
+
+static const enum gpio_ddc_line hw_ddc_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+ GPIO_DDC_LINE_DDC_VGA
+};
+
+static const enum gpio_ddc_line hw_aux_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6
+};
+
+static const struct i2caux_funcs i2caux_funcs = {
+ .destroy = destroy,
+ .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
+ .release_engine = release_engine,
+ .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
+ .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
+};
+
+static void construct(
+ struct i2caux_dce80 *i2caux_dce80,
+ struct dc_context *ctx)
+{
+ /* Entire family have I2C engine reference clock frequency
+ * changed from XTALIN (27) to XTALIN/2 (13.5) */
+
+ struct i2caux *base = &i2caux_dce80->base;
+
+ uint32_t reference_frequency =
+ dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
+
+ /*bool use_i2c_sw_engine = dal_adapter_service_is_feature_supported(as,
+ FEATURE_RESTORE_USAGE_I2C_SW_ENGINE);*/
+
+ /* Use SWI2C for dce8 currently, sicne we have bug with hwi2c */
+ bool use_i2c_sw_engine = true;
+
+ uint32_t i;
+
+ dal_i2caux_construct(base, ctx);
+
+ i2caux_dce80->base.funcs = &i2caux_funcs;
+ i2caux_dce80->i2c_hw_buffer_in_use = false;
+
+ /* Create I2C HW engines (HW + SW pairs)
+ * for all lines which has assisted HW DDC
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+
+ i = 0;
+
+ do {
+ enum gpio_ddc_line line_id = hw_ddc_lines[i];
+
+ struct i2c_hw_engine_dce80_create_arg hw_arg;
+
+ if (use_i2c_sw_engine) {
+ struct i2c_sw_engine_dce80_create_arg sw_arg;
+
+ sw_arg.engine_id = i;
+ sw_arg.default_speed = base->default_i2c_sw_speed;
+ sw_arg.ctx = ctx;
+ base->i2c_sw_engines[line_id] =
+ dal_i2c_sw_engine_dce80_create(&sw_arg);
+ }
+
+ hw_arg.engine_id = i;
+ hw_arg.reference_frequency = reference_frequency;
+ hw_arg.default_speed = base->default_i2c_hw_speed;
+ hw_arg.ctx = ctx;
+
+ base->i2c_hw_engines[line_id] =
+ dal_i2c_hw_engine_dce80_create(&hw_arg);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_ddc_lines));
+
+ /* Create AUX engines for all lines which has assisted HW AUX
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+
+ i = 0;
+
+ do {
+ enum gpio_ddc_line line_id = hw_aux_lines[i];
+
+ struct aux_engine_dce110_init_data arg;
+
+ arg.engine_id = i;
+ arg.timeout_period = base->aux_timeout_period;
+ arg.ctx = ctx;
+ arg.regs = &dce80_aux_regs[i];
+
+ base->aux_engines[line_id] =
+ dal_aux_engine_dce110_create(&arg);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_aux_lines));
+
+ /* TODO Generic I2C SW and HW */
+}
+
+struct i2caux *dal_i2caux_dce80_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce80 *i2caux_dce80 =
+ kzalloc(sizeof(struct i2caux_dce80), GFP_KERNEL);
+
+ if (!i2caux_dce80) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ construct(i2caux_dce80, ctx);
+ return &i2caux_dce80->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h
new file mode 100644
index 000000000000..21908629e973
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE80_H__
+#define __DAL_I2C_AUX_DCE80_H__
+
+struct i2caux_dce80 {
+ struct i2caux base;
+ /* indicate the I2C HW circular buffer is in use */
+ bool i2c_hw_buffer_in_use;
+};
+
+struct i2caux *dal_i2caux_dce80_create(
+ struct dc_context *ctx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
new file mode 100644
index 000000000000..13b807d8aff8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2c_hw_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dcn10_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+static const struct dce110_i2c_hw_engine_registers dcn10_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+struct i2caux *dal_i2caux_dcn10_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dcn10_aux_regs,
+ dcn10_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h
new file mode 100644
index 000000000000..aeb4a86463d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCN10_H__
+#define __DAL_I2C_AUX_DCN10_H__
+
+struct i2caux *dal_i2caux_dcn10_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
new file mode 100644
index 000000000000..e6408f644086
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+/*
+ * Header of this unit
+ */
+#include "i2caux_diag.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+static void destruct(
+ struct i2caux *i2caux)
+{
+ dal_i2caux_destruct(i2caux);
+}
+
+static void destroy(
+ struct i2caux **i2c_engine)
+{
+ destruct(*i2c_engine);
+
+ kfree(*i2c_engine);
+
+ *i2c_engine = NULL;
+}
+
+/* function table */
+static const struct i2caux_funcs i2caux_funcs = {
+ .destroy = destroy,
+ .acquire_i2c_hw_engine = NULL,
+ .release_engine = NULL,
+ .acquire_i2c_sw_engine = NULL,
+ .acquire_aux_engine = NULL,
+};
+
+static void construct(
+ struct i2caux *i2caux,
+ struct dc_context *ctx)
+{
+ dal_i2caux_construct(i2caux, ctx);
+ i2caux->funcs = &i2caux_funcs;
+}
+
+struct i2caux *dal_i2caux_diag_fpga_create(
+ struct dc_context *ctx)
+{
+ struct i2caux *i2caux = kzalloc(sizeof(struct i2caux),
+ GFP_KERNEL);
+
+ if (!i2caux) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(i2caux, ctx);
+ return i2caux;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
new file mode 100644
index 000000000000..a83eeb748283
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DIAG_FPGA_H__
+#define __DAL_I2C_AUX_DIAG_FPGA_H__
+
+struct i2caux *dal_i2caux_diag_fpga_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
new file mode 100644
index 000000000000..33de8a8834dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_ENGINE_H__
+#define __DAL_ENGINE_H__
+
+enum i2caux_transaction_operation {
+ I2CAUX_TRANSACTION_READ,
+ I2CAUX_TRANSACTION_WRITE
+};
+
+enum i2caux_transaction_address_space {
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
+};
+
+struct i2caux_transaction_payload {
+ enum i2caux_transaction_address_space address_space;
+ uint32_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2caux_transaction_status {
+ I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
+};
+
+struct i2caux_transaction_request {
+ enum i2caux_transaction_operation operation;
+ struct i2caux_transaction_payload payload;
+ enum i2caux_transaction_status status;
+};
+
+enum i2caux_engine_type {
+ I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
+ I2CAUX_ENGINE_TYPE_AUX,
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_SW
+};
+
+enum i2c_default_speed {
+ I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
+ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+};
+
+enum i2caux_transaction_action {
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
+
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
+
+ I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
+ I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
+};
+
+struct engine;
+
+struct engine_funcs {
+ enum i2caux_engine_type (*get_engine_type)(
+ const struct engine *engine);
+ bool (*acquire)(
+ struct engine *engine,
+ struct ddc *ddc);
+ bool (*submit_request)(
+ struct engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+ void (*release_engine)(
+ struct engine *engine);
+};
+
+struct engine {
+ const struct engine_funcs *funcs;
+ struct ddc *ddc;
+ struct dc_context *ctx;
+};
+
+void dal_i2caux_construct_engine(
+ struct engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2caux_destruct_engine(
+ struct engine *engine);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c b/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c
new file mode 100644
index 000000000000..5d155d36d353
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "engine.h"
+
+void dal_i2caux_construct_engine(
+ struct engine *engine,
+ struct dc_context *ctx)
+{
+ engine->ddc = NULL;
+ engine->ctx = ctx;
+}
+
+void dal_i2caux_destruct_engine(
+ struct engine *engine)
+{
+ /* nothing to do */
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
new file mode 100644
index 000000000000..70e20bd47ce4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+#define FROM_ENGINE(ptr) \
+ container_of((ptr), struct i2c_engine, base)
+
+bool dal_i2c_engine_acquire(
+ struct engine *engine,
+ struct ddc *ddc_handle)
+{
+ struct i2c_engine *i2c_engine = FROM_ENGINE(engine);
+
+ uint32_t counter = 0;
+ bool result;
+
+ do {
+ result = i2c_engine->funcs->acquire_engine(
+ i2c_engine, ddc_handle);
+
+ if (result)
+ break;
+
+ /* i2c_engine is busy by VBios, lets wait and retry */
+
+ udelay(10);
+
+ ++counter;
+ } while (counter < 2);
+
+ if (result) {
+ if (!i2c_engine->funcs->setup_engine(i2c_engine)) {
+ engine->funcs->release_engine(engine);
+ result = false;
+ }
+ }
+
+ return result;
+}
+
+bool dal_i2c_engine_setup_i2c_engine(
+ struct i2c_engine *engine)
+{
+ /* Derivative classes do not have to override this */
+
+ return true;
+}
+
+void dal_i2c_engine_submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request)
+{
+
+}
+
+void dal_i2c_engine_process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply)
+{
+
+}
+
+void dal_i2c_engine_construct(
+ struct i2c_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2caux_construct_engine(&engine->base, ctx);
+ engine->timeout_delay = 0;
+}
+
+void dal_i2c_engine_destruct(
+ struct i2c_engine *engine)
+{
+ dal_i2caux_destruct_engine(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
new file mode 100644
index 000000000000..58fc0f25eceb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_ENGINE_H__
+#define __DAL_I2C_ENGINE_H__
+
+enum i2c_channel_operation_result {
+ I2C_CHANNEL_OPERATION_SUCCEEDED,
+ I2C_CHANNEL_OPERATION_FAILED,
+ I2C_CHANNEL_OPERATION_NOT_GRANTED,
+ I2C_CHANNEL_OPERATION_IS_BUSY,
+ I2C_CHANNEL_OPERATION_NO_HANDLE_PROVIDED,
+ I2C_CHANNEL_OPERATION_CHANNEL_IN_USE,
+ I2C_CHANNEL_OPERATION_CHANNEL_CLIENT_MAX_ALLOWED,
+ I2C_CHANNEL_OPERATION_ENGINE_BUSY,
+ I2C_CHANNEL_OPERATION_TIMEOUT,
+ I2C_CHANNEL_OPERATION_NO_RESPONSE,
+ I2C_CHANNEL_OPERATION_HW_REQUEST_I2C_BUS,
+ I2C_CHANNEL_OPERATION_WRONG_PARAMETER,
+ I2C_CHANNEL_OPERATION_OUT_NB_OF_RETRIES,
+ I2C_CHANNEL_OPERATION_NOT_STARTED
+};
+
+struct i2c_request_transaction_data {
+ enum i2caux_transaction_action action;
+ enum i2c_channel_operation_result status;
+ uint8_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+struct i2c_reply_transaction_data {
+ uint32_t length;
+ uint8_t *data;
+};
+
+struct i2c_engine;
+
+struct i2c_engine_funcs {
+ void (*destroy)(
+ struct i2c_engine **ptr);
+ uint32_t (*get_speed)(
+ const struct i2c_engine *engine);
+ void (*set_speed)(
+ struct i2c_engine *engine,
+ uint32_t speed);
+ bool (*acquire_engine)(
+ struct i2c_engine *engine,
+ struct ddc *ddc);
+ bool (*setup_engine)(
+ struct i2c_engine *engine);
+ void (*submit_channel_request)(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply);
+ enum i2c_channel_operation_result (*get_channel_status)(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes);
+};
+
+struct i2c_engine {
+ struct engine base;
+ const struct i2c_engine_funcs *funcs;
+ uint32_t timeout_delay;
+};
+
+void dal_i2c_engine_construct(
+ struct i2c_engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2c_engine_destruct(
+ struct i2c_engine *engine);
+
+bool dal_i2c_engine_setup_i2c_engine(
+ struct i2c_engine *engine);
+
+void dal_i2c_engine_submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request);
+
+void dal_i2c_engine_process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply);
+
+bool dal_i2c_engine_acquire(
+ struct engine *ptr,
+ struct ddc *ddc_handle);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
new file mode 100644
index 000000000000..5a4295e0fae5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+#include "i2c_engine.h"
+#include "i2c_hw_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_generic_hw_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct i2c_hw_engine *'
+ * to 'struct i2c_generic_hw_engine *'
+ */
+#define FROM_I2C_HW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_generic_hw_engine, base)
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_generic_hw_engine *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_generic_hw_engine *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW;
+}
+
+/*
+ * @brief
+ * Single transaction handling.
+ * Since transaction may be bigger than HW buffer size,
+ * it divides transaction to sub-transactions
+ * and uses batch transaction feature of the engine.
+ */
+bool dal_i2c_generic_hw_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction)
+{
+ struct i2c_generic_hw_engine *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_hw_engine *base = &hw_engine->base;
+
+ uint32_t max_payload_size =
+ base->funcs->get_hw_buffer_available_size(base);
+
+ bool initial_stop_bit = !middle_of_transaction;
+
+ struct i2c_generic_transaction_attributes attributes;
+
+ enum i2c_channel_operation_result operation_result =
+ I2C_CHANNEL_OPERATION_FAILED;
+
+ bool result = false;
+
+ /* setup transaction initial properties */
+
+ uint8_t address = i2caux_request->payload.address;
+ uint8_t *current_payload = i2caux_request->payload.data;
+ uint32_t remaining_payload_size = i2caux_request->payload.length;
+
+ bool first_iteration = true;
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+ return false;
+ }
+
+ /* Do batch transaction.
+ * Divide read/write data into payloads which fit HW buffer size.
+ * 1. Single transaction:
+ * start_bit = 1, stop_bit depends on session state, ack_on_read = 0;
+ * 2. Start of batch transaction:
+ * start_bit = 1, stop_bit = 0, ack_on_read = 1;
+ * 3. Middle of batch transaction:
+ * start_bit = 0, stop_bit = 0, ack_on_read = 1;
+ * 4. End of batch transaction:
+ * start_bit = 0, stop_bit depends on session state, ack_on_read = 0.
+ * Session stop bit is set if 'middle_of_transaction' = 0. */
+
+ while (remaining_payload_size) {
+ uint32_t current_transaction_size;
+ uint32_t current_payload_size;
+
+ bool last_iteration;
+ bool stop_bit;
+
+ /* Calculate current transaction size and payload size.
+ * Transaction size = total number of bytes in transaction,
+ * including slave's address;
+ * Payload size = number of data bytes in transaction. */
+
+ if (first_iteration) {
+ /* In the first sub-transaction we send slave's address
+ * thus we need to reserve one byte for it */
+ current_transaction_size =
+ (remaining_payload_size > max_payload_size - 1) ?
+ max_payload_size :
+ remaining_payload_size + 1;
+
+ current_payload_size = current_transaction_size - 1;
+ } else {
+ /* Second and further sub-transactions will have
+ * entire buffer reserved for data */
+ current_transaction_size =
+ (remaining_payload_size > max_payload_size) ?
+ max_payload_size :
+ remaining_payload_size;
+
+ current_payload_size = current_transaction_size;
+ }
+
+ last_iteration =
+ (remaining_payload_size == current_payload_size);
+
+ stop_bit = last_iteration ? initial_stop_bit : false;
+
+ /* write slave device address */
+
+ if (first_iteration)
+ hw_engine->funcs->write_address(hw_engine, address);
+
+ /* write current portion of data, if requested */
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ hw_engine->funcs->write_data(
+ hw_engine,
+ current_payload,
+ current_payload_size);
+
+ /* execute transaction */
+
+ attributes.start_bit = first_iteration;
+ attributes.stop_bit = stop_bit;
+ attributes.last_read = last_iteration;
+ attributes.transaction_size = current_transaction_size;
+
+ hw_engine->funcs->execute_transaction(hw_engine, &attributes);
+
+ /* wait until transaction is processed; if it fails - quit */
+
+ operation_result = base->funcs->wait_on_operation_result(
+ base,
+ base->funcs->get_transaction_timeout(
+ base, current_transaction_size),
+ I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+
+ if (operation_result != I2C_CHANNEL_OPERATION_SUCCEEDED)
+ break;
+
+ /* read current portion of data, if requested */
+
+ /* the read offset should be 1 for first sub-transaction,
+ * and 0 for any next one */
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ hw_engine->funcs->read_data(hw_engine, current_payload,
+ current_payload_size, first_iteration ? 1 : 0);
+
+ /* update loop variables */
+
+ first_iteration = false;
+ current_payload += current_payload_size;
+ remaining_payload_size -= current_payload_size;
+ }
+
+ /* update transaction status */
+
+ switch (operation_result) {
+ case I2C_CHANNEL_OPERATION_SUCCEEDED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ result = true;
+ break;
+ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ break;
+ case I2C_CHANNEL_OPERATION_TIMEOUT:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ break;
+ case I2C_CHANNEL_OPERATION_FAILED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+ break;
+ default:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
+ }
+
+ return result;
+}
+
+/*
+ * @brief
+ * Returns number of microseconds to wait until timeout to be considered
+ */
+uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length)
+{
+ const struct i2c_engine *base = &engine->base;
+
+ uint32_t speed = base->funcs->get_speed(base);
+
+ if (!speed)
+ return 0;
+
+ /* total timeout = period_timeout * (start + data bits count + stop) */
+
+ return ((1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed) *
+ (1 + (length << 3) + 1);
+}
+
+void dal_i2c_generic_hw_engine_construct(
+ struct i2c_generic_hw_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2c_hw_engine_construct(&engine->base, ctx);
+}
+
+void dal_i2c_generic_hw_engine_destruct(
+ struct i2c_generic_hw_engine *engine)
+{
+ dal_i2c_hw_engine_destruct(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
new file mode 100644
index 000000000000..1da0397b04a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_GENERIC_HW_ENGINE_H__
+#define __DAL_I2C_GENERIC_HW_ENGINE_H__
+
+struct i2c_generic_transaction_attributes {
+ enum i2caux_transaction_action action;
+ uint32_t transaction_size;
+ bool start_bit;
+ bool stop_bit;
+ bool last_read;
+};
+
+struct i2c_generic_hw_engine;
+
+struct i2c_generic_hw_engine_funcs {
+ void (*write_address)(
+ struct i2c_generic_hw_engine *engine,
+ uint8_t address);
+ void (*write_data)(
+ struct i2c_generic_hw_engine *engine,
+ const uint8_t *buffer,
+ uint32_t length);
+ void (*read_data)(
+ struct i2c_generic_hw_engine *engine,
+ uint8_t *buffer,
+ uint32_t length,
+ uint32_t offset);
+ void (*execute_transaction)(
+ struct i2c_generic_hw_engine *engine,
+ struct i2c_generic_transaction_attributes *attributes);
+};
+
+struct i2c_generic_hw_engine {
+ struct i2c_hw_engine base;
+ const struct i2c_generic_hw_engine_funcs *funcs;
+};
+
+void dal_i2c_generic_hw_engine_construct(
+ struct i2c_generic_hw_engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2c_generic_hw_engine_destruct(
+ struct i2c_generic_hw_engine *engine);
+enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
+ const struct engine *engine);
+bool dal_i2c_generic_hw_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction);
+uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
new file mode 100644
index 000000000000..4b54fcfb28ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+#include "i2c_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_hw_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_hw_engine *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ container_of((ptr), struct i2c_hw_engine, base)
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_hw_engine *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_I2C_DDC_HW;
+}
+
+bool dal_i2c_hw_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction)
+{
+ struct i2c_hw_engine *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_request_transaction_data request;
+
+ uint32_t transaction_timeout;
+
+ enum i2c_channel_operation_result operation_result;
+
+ bool result = false;
+
+ /* We need following:
+ * transaction length will not exceed
+ * the number of free bytes in HW buffer (minus one for address)*/
+
+ if (i2caux_request->payload.length >=
+ hw_engine->funcs->get_hw_buffer_available_size(hw_engine)) {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW;
+ return false;
+ }
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+ /* [anaumov] in DAL2, there was no "return false" */
+ return false;
+ }
+
+ request.address = (uint8_t)i2caux_request->payload.address;
+ request.length = i2caux_request->payload.length;
+ request.data = i2caux_request->payload.data;
+
+ /* obtain timeout value before submitting request */
+
+ transaction_timeout = hw_engine->funcs->get_transaction_timeout(
+ hw_engine, i2caux_request->payload.length + 1);
+
+ hw_engine->base.funcs->submit_channel_request(
+ &hw_engine->base, &request);
+
+ if ((request.status == I2C_CHANNEL_OPERATION_FAILED) ||
+ (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
+ return false;
+ }
+
+ /* wait until transaction proceed */
+
+ operation_result = hw_engine->funcs->wait_on_operation_result(
+ hw_engine,
+ transaction_timeout,
+ I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+
+ /* update transaction status */
+
+ switch (operation_result) {
+ case I2C_CHANNEL_OPERATION_SUCCEEDED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ result = true;
+ break;
+ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ break;
+ case I2C_CHANNEL_OPERATION_TIMEOUT:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ break;
+ case I2C_CHANNEL_OPERATION_FAILED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+ break;
+ default:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
+ }
+
+ if (result && (i2caux_request->operation == I2CAUX_TRANSACTION_READ)) {
+ struct i2c_reply_transaction_data reply;
+
+ reply.data = i2caux_request->payload.data;
+ reply.length = i2caux_request->payload.length;
+
+ hw_engine->base.funcs->
+ process_channel_reply(&hw_engine->base, &reply);
+ }
+
+ return result;
+}
+
+bool dal_i2c_hw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc)
+{
+ enum gpio_result result;
+ uint32_t current_speed;
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ engine->base.ddc = ddc;
+
+ current_speed = engine->funcs->get_speed(engine);
+
+ if (current_speed)
+ FROM_I2C_ENGINE(engine)->original_speed = current_speed;
+
+ return true;
+}
+/*
+ * @brief
+ * Queries in a loop for current engine status
+ * until retrieved status matches 'expected_result', or timeout occurs.
+ * Timeout given in microseconds
+ * and the status query frequency is also one per microsecond.
+ */
+enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
+ struct i2c_hw_engine *engine,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result)
+{
+ enum i2c_channel_operation_result result;
+ uint32_t i = 0;
+
+ if (!timeout)
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ do {
+ result = engine->base.funcs->get_channel_status(
+ &engine->base, NULL);
+
+ if (result != expected_result)
+ break;
+
+ udelay(1);
+
+ ++i;
+ } while (i < timeout);
+
+ return result;
+}
+
+void dal_i2c_hw_engine_construct(
+ struct i2c_hw_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2c_engine_construct(&engine->base, ctx);
+ engine->original_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
+ engine->default_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
+}
+
+void dal_i2c_hw_engine_destruct(
+ struct i2c_hw_engine *engine)
+{
+ dal_i2c_engine_destruct(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
new file mode 100644
index 000000000000..8936a994804a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_HW_ENGINE_H__
+#define __DAL_I2C_HW_ENGINE_H__
+
+enum {
+ TRANSACTION_TIMEOUT_IN_I2C_CLOCKS = 32
+};
+
+struct i2c_hw_engine;
+
+struct i2c_hw_engine_funcs {
+ uint32_t (*get_hw_buffer_available_size)(
+ const struct i2c_hw_engine *engine);
+ enum i2c_channel_operation_result (*wait_on_operation_result)(
+ struct i2c_hw_engine *engine,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result);
+ uint32_t (*get_transaction_timeout)(
+ const struct i2c_hw_engine *engine,
+ uint32_t length);
+};
+
+struct i2c_hw_engine {
+ struct i2c_engine base;
+ const struct i2c_hw_engine_funcs *funcs;
+
+ /* Values below are in kilohertz */
+ uint32_t original_speed;
+ uint32_t default_speed;
+};
+
+void dal_i2c_hw_engine_construct(
+ struct i2c_hw_engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2c_hw_engine_destruct(
+ struct i2c_hw_engine *engine);
+
+enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
+ struct i2c_hw_engine *engine,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result);
+
+bool dal_i2c_hw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc);
+
+bool dal_i2c_hw_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction);
+
+enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
+ const struct engine *engine);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
new file mode 100644
index 000000000000..8e19bb629394
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
@@ -0,0 +1,601 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+#include "i2c_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_sw_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+#define SCL false
+#define SDA true
+
+static inline bool read_bit_from_ddc(
+ struct ddc *ddc,
+ bool data_nor_clock)
+{
+ uint32_t value = 0;
+
+ if (data_nor_clock)
+ dal_gpio_get_value(ddc->pin_data, &value);
+ else
+ dal_gpio_get_value(ddc->pin_clock, &value);
+
+ return (value != 0);
+}
+
+static inline void write_bit_to_ddc(
+ struct ddc *ddc,
+ bool data_nor_clock,
+ bool bit)
+{
+ uint32_t value = bit ? 1 : 0;
+
+ if (data_nor_clock)
+ dal_gpio_set_value(ddc->pin_data, value);
+ else
+ dal_gpio_set_value(ddc->pin_clock, value);
+}
+
+static bool wait_for_scl_high(
+ struct dc_context *ctx,
+ struct ddc *ddc,
+ uint16_t clock_delay_div_4)
+{
+ uint32_t scl_retry = 0;
+ uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4;
+
+ udelay(clock_delay_div_4);
+
+ /* 3 milliseconds delay
+ * to wake up some displays from "low power" state.
+ */
+
+ do {
+ if (read_bit_from_ddc(ddc, SCL))
+ return true;
+
+ udelay(clock_delay_div_4);
+
+ ++scl_retry;
+ } while (scl_retry <= scl_retry_max);
+
+ return false;
+}
+
+static bool start_sync(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4)
+{
+ uint32_t retry = 0;
+
+ /* The I2C communications start signal is:
+ * the SDA going low from high, while the SCL is high. */
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ udelay(clock_delay_div_4);
+
+ do {
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ if (!read_bit_from_ddc(ddc_handle, SDA)) {
+ ++retry;
+ continue;
+ }
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ break;
+
+ write_bit_to_ddc(ddc_handle, SDA, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4);
+
+ return true;
+ } while (retry <= I2C_SW_RETRIES);
+
+ return false;
+}
+
+static bool stop_sync(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4)
+{
+ uint32_t retry = 0;
+
+ /* The I2C communications stop signal is:
+ * the SDA going high from low, while the SCL is high. */
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ do {
+ udelay(clock_delay_div_4);
+
+ if (read_bit_from_ddc(ddc_handle, SDA))
+ return true;
+
+ ++retry;
+ } while (retry <= 2);
+
+ return false;
+}
+
+static bool write_byte(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t byte)
+{
+ int32_t shift = 7;
+ bool ack;
+
+ /* bits are transmitted serially, starting from MSB */
+
+ do {
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, (byte >> shift) & 1);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ --shift;
+ } while (shift >= 0);
+
+ /* The display sends ACK by preventing the SDA from going high
+ * after the SCL pulse we use to send our last data bit.
+ * If the SDA goes high after that bit, it's a NACK */
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ /* read ACK bit */
+
+ ack = !read_bit_from_ddc(ddc_handle, SDA);
+
+ udelay(clock_delay_div_4 << 1);
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4 << 1);
+
+ return ack;
+}
+
+static bool read_byte(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t *byte,
+ bool more)
+{
+ int32_t shift = 7;
+
+ uint8_t data = 0;
+
+ /* The data bits are read from MSB to LSB;
+ * bit is read while SCL is high */
+
+ do {
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ if (read_bit_from_ddc(ddc_handle, SDA))
+ data |= (1 << shift);
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4 << 1);
+
+ --shift;
+ } while (shift >= 0);
+
+ /* read only whole byte */
+
+ *byte = data;
+
+ udelay(clock_delay_div_4);
+
+ /* send the acknowledge bit:
+ * SDA low means ACK, SDA high means NACK */
+
+ write_bit_to_ddc(ddc_handle, SDA, !more);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ udelay(clock_delay_div_4);
+
+ return true;
+}
+
+static bool i2c_write(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t address,
+ uint32_t length,
+ const uint8_t *data)
+{
+ uint32_t i = 0;
+
+ if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
+ return false;
+
+ while (i < length) {
+ if (!write_byte(ctx, ddc_handle, clock_delay_div_4, data[i]))
+ return false;
+ ++i;
+ }
+
+ return true;
+}
+
+static bool i2c_read(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t address,
+ uint32_t length,
+ uint8_t *data)
+{
+ uint32_t i = 0;
+
+ if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
+ return false;
+
+ while (i < length) {
+ if (!read_byte(ctx, ddc_handle, clock_delay_div_4, data + i,
+ i < length - 1))
+ return false;
+ ++i;
+ }
+
+ return true;
+}
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_sw_engine *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ container_of((ptr), struct i2c_sw_engine, base)
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_sw_engine *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_I2C_SW;
+}
+
+bool dal_i2c_sw_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction)
+{
+ struct i2c_sw_engine *sw_engine = FROM_ENGINE(engine);
+
+ struct i2c_engine *base = &sw_engine->base;
+
+ struct i2c_request_transaction_data request;
+ bool operation_succeeded = false;
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+ /* in DAL2, there was no "return false" */
+ return false;
+ }
+
+ request.address = (uint8_t)i2caux_request->payload.address;
+ request.length = i2caux_request->payload.length;
+ request.data = i2caux_request->payload.data;
+
+ base->funcs->submit_channel_request(base, &request);
+
+ if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) ||
+ (request.status == I2C_CHANNEL_OPERATION_FAILED))
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
+ else {
+ enum i2c_channel_operation_result operation_result;
+
+ do {
+ operation_result =
+ base->funcs->get_channel_status(base, NULL);
+
+ switch (operation_result) {
+ case I2C_CHANNEL_OPERATION_SUCCEEDED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ operation_succeeded = true;
+ break;
+ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ break;
+ case I2C_CHANNEL_OPERATION_TIMEOUT:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ break;
+ case I2C_CHANNEL_OPERATION_FAILED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+ break;
+ default:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
+ break;
+ }
+ } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+ }
+
+ return operation_succeeded;
+}
+
+uint32_t dal_i2c_sw_engine_get_speed(
+ const struct i2c_engine *engine)
+{
+ return FROM_I2C_ENGINE(engine)->speed;
+}
+
+void dal_i2c_sw_engine_set_speed(
+ struct i2c_engine *engine,
+ uint32_t speed)
+{
+ struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
+
+ ASSERT(speed);
+
+ sw_engine->speed = speed ? speed : I2CAUX_DEFAULT_I2C_SW_SPEED;
+
+ sw_engine->clock_delay = 1000 / sw_engine->speed;
+
+ if (sw_engine->clock_delay < 12)
+ sw_engine->clock_delay = 12;
+}
+
+bool dal_i2caux_i2c_sw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc)
+{
+ enum gpio_result result;
+
+ result = dal_ddc_open(ddc, GPIO_MODE_FAST_OUTPUT,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ engine->base.ddc = ddc;
+
+ return true;
+}
+
+void dal_i2c_sw_engine_submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *req)
+{
+ struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
+
+ struct ddc *ddc = engine->base.ddc;
+ uint16_t clock_delay_div_4 = sw_engine->clock_delay >> 2;
+
+ /* send sync (start / repeated start) */
+
+ bool result = start_sync(engine->base.ctx, ddc, clock_delay_div_4);
+
+ /* process payload */
+
+ if (result) {
+ switch (req->action) {
+ case I2CAUX_TRANSACTION_ACTION_I2C_WRITE:
+ case I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT:
+ result = i2c_write(engine->base.ctx, ddc, clock_delay_div_4,
+ req->address, req->length, req->data);
+ break;
+ case I2CAUX_TRANSACTION_ACTION_I2C_READ:
+ case I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT:
+ result = i2c_read(engine->base.ctx, ddc, clock_delay_div_4,
+ req->address, req->length, req->data);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ }
+
+ /* send stop if not 'mot' or operation failed */
+
+ if (!result ||
+ (req->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (req->action == I2CAUX_TRANSACTION_ACTION_I2C_READ))
+ if (!stop_sync(engine->base.ctx, ddc, clock_delay_div_4))
+ result = false;
+
+ req->status = result ?
+ I2C_CHANNEL_OPERATION_SUCCEEDED :
+ I2C_CHANNEL_OPERATION_FAILED;
+}
+
+enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes)
+{
+ /* No arbitration with VBIOS is performed since DCE 6.0 */
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+}
+
+void dal_i2c_sw_engine_destruct(
+ struct i2c_sw_engine *engine)
+{
+ dal_i2c_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **ptr)
+{
+ dal_i2c_sw_engine_destruct(FROM_I2C_ENGINE(*ptr));
+
+ kfree(*ptr);
+ *ptr = NULL;
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .acquire_engine = dal_i2caux_i2c_sw_engine_acquire_engine,
+ .destroy = destroy,
+ .get_speed = dal_i2c_sw_engine_get_speed,
+ .set_speed = dal_i2c_sw_engine_set_speed,
+ .setup_engine = dal_i2c_engine_setup_i2c_engine,
+ .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
+ .process_channel_reply = dal_i2c_engine_process_channel_reply,
+ .get_channel_status = dal_i2c_sw_engine_get_channel_status,
+};
+
+static void release_engine(
+ struct engine *engine)
+{
+
+}
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_sw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_sw_engine_submit_request,
+};
+
+void dal_i2c_sw_engine_construct(
+ struct i2c_sw_engine *engine,
+ const struct i2c_sw_engine_create_arg *arg)
+{
+ dal_i2c_engine_construct(&engine->base, arg->ctx);
+ dal_i2c_sw_engine_set_speed(&engine->base, arg->default_speed);
+ engine->base.funcs = &i2c_engine_funcs;
+ engine->base.base.funcs = &engine_funcs;
+}
+
+struct i2c_engine *dal_i2c_sw_engine_create(
+ const struct i2c_sw_engine_create_arg *arg)
+{
+ struct i2c_sw_engine *engine;
+
+ if (!arg) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(struct i2c_sw_engine), GFP_KERNEL);
+
+ if (!engine) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dal_i2c_sw_engine_construct(engine, arg);
+ return &engine->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
new file mode 100644
index 000000000000..546f15b0d3f1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_SW_ENGINE_H__
+#define __DAL_I2C_SW_ENGINE_H__
+
+enum {
+ I2C_SW_RETRIES = 10,
+ I2C_SW_SCL_READ_RETRIES = 128,
+ /* following value is in microseconds */
+ I2C_SW_TIMEOUT_DELAY = 3000
+};
+
+struct i2c_sw_engine;
+
+struct i2c_sw_engine {
+ struct i2c_engine base;
+ uint32_t clock_delay;
+ /* Values below are in KHz */
+ uint32_t speed;
+ uint32_t default_speed;
+};
+
+struct i2c_sw_engine_create_arg {
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+void dal_i2c_sw_engine_construct(
+ struct i2c_sw_engine *engine,
+ const struct i2c_sw_engine_create_arg *arg);
+
+bool dal_i2caux_i2c_sw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc_handle);
+
+void dal_i2c_sw_engine_destruct(
+ struct i2c_sw_engine *engine);
+
+struct i2c_engine *dal_i2c_sw_engine_create(
+ const struct i2c_sw_engine_create_arg *arg);
+enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
+ const struct engine *engine);
+bool dal_i2c_sw_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction);
+uint32_t dal_i2c_sw_engine_get_speed(
+ const struct i2c_engine *engine);
+void dal_i2c_sw_engine_set_speed(
+ struct i2c_engine *ptr,
+ uint32_t speed);
+void dal_i2c_sw_engine_submit_channel_request(
+ struct i2c_engine *ptr,
+ struct i2c_request_transaction_data *req);
+enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
new file mode 100644
index 000000000000..e1593ffe5a2b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "dc_bios_types.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2caux.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "engine.h"
+#include "i2c_engine.h"
+#include "aux_engine.h"
+
+/*
+ * This unit
+ */
+
+#include "dce80/i2caux_dce80.h"
+
+#include "dce100/i2caux_dce100.h"
+
+#include "dce110/i2caux_dce110.h"
+
+#include "dce112/i2caux_dce112.h"
+
+#include "dce120/i2caux_dce120.h"
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/i2caux_dcn10.h"
+#endif
+
+#include "diagnostics/i2caux_diag.h"
+
+/*
+ * @brief
+ * Plain API, available publicly
+ */
+
+struct i2caux *dal_i2caux_create(
+ struct dc_context *ctx)
+{
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ return dal_i2caux_diag_fpga_create(ctx);
+ }
+
+ switch (ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ return dal_i2caux_dce80_create(ctx);
+ case DCE_VERSION_11_2:
+ return dal_i2caux_dce112_create(ctx);
+ case DCE_VERSION_11_0:
+ return dal_i2caux_dce110_create(ctx);
+ case DCE_VERSION_10_0:
+ return dal_i2caux_dce100_create(ctx);
+ case DCE_VERSION_12_0:
+ return dal_i2caux_dce120_create(ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ return dal_i2caux_dcn10_create(ctx);
+#endif
+
+ default:
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+bool dal_i2caux_submit_i2c_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct i2c_command *cmd)
+{
+ struct i2c_engine *engine;
+ uint8_t index_of_payload = 0;
+ bool result;
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!cmd) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ /*
+ * default will be SW, however there is a feature flag in adapter
+ * service that determines whether SW i2c_engine will be available or
+ * not, if sw i2c is not available we will fallback to hw. This feature
+ * flag is set to not creating sw i2c engine for every dce except dce80
+ * currently
+ */
+ switch (cmd->engine) {
+ case I2C_COMMAND_ENGINE_DEFAULT:
+ case I2C_COMMAND_ENGINE_SW:
+ /* try to acquire SW engine first,
+ * acquire HW engine if SW engine not available */
+ engine = i2caux->funcs->acquire_i2c_sw_engine(i2caux, ddc);
+
+ if (!engine)
+ engine = i2caux->funcs->acquire_i2c_hw_engine(
+ i2caux, ddc);
+ break;
+ case I2C_COMMAND_ENGINE_HW:
+ default:
+ /* try to acquire HW engine first,
+ * acquire SW engine if HW engine not available */
+ engine = i2caux->funcs->acquire_i2c_hw_engine(i2caux, ddc);
+
+ if (!engine)
+ engine = i2caux->funcs->acquire_i2c_sw_engine(
+ i2caux, ddc);
+ }
+
+ if (!engine)
+ return false;
+
+ engine->funcs->set_speed(engine, cmd->speed);
+
+ result = true;
+
+ while (index_of_payload < cmd->number_of_payloads) {
+ bool mot = (index_of_payload != cmd->number_of_payloads - 1);
+
+ struct i2c_payload *payload = cmd->payloads + index_of_payload;
+
+ struct i2caux_transaction_request request = { 0 };
+
+ request.operation = payload->write ?
+ I2CAUX_TRANSACTION_WRITE :
+ I2CAUX_TRANSACTION_READ;
+
+ request.payload.address_space =
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
+ request.payload.address = (payload->address << 1) |
+ !payload->write;
+ request.payload.length = payload->length;
+ request.payload.data = payload->data;
+
+ if (!engine->base.funcs->submit_request(
+ &engine->base, &request, mot)) {
+ result = false;
+ break;
+ }
+
+ ++index_of_payload;
+ }
+
+ i2caux->funcs->release_engine(i2caux, &engine->base);
+
+ return result;
+}
+
+bool dal_i2caux_submit_aux_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct aux_command *cmd)
+{
+ struct aux_engine *engine;
+ uint8_t index_of_payload = 0;
+ bool result;
+ bool mot;
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!cmd) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc);
+
+ if (!engine)
+ return false;
+
+ engine->delay = cmd->defer_delay;
+ engine->max_defer_write_retry = cmd->max_defer_write_retry;
+
+ result = true;
+
+ while (index_of_payload < cmd->number_of_payloads) {
+ struct aux_payload *payload = cmd->payloads + index_of_payload;
+ struct i2caux_transaction_request request = { 0 };
+
+ if (cmd->mot == I2C_MOT_UNDEF)
+ mot = (index_of_payload != cmd->number_of_payloads - 1);
+ else
+ mot = (cmd->mot == I2C_MOT_TRUE);
+
+ request.operation = payload->write ?
+ I2CAUX_TRANSACTION_WRITE :
+ I2CAUX_TRANSACTION_READ;
+
+ if (payload->i2c_over_aux) {
+ request.payload.address_space =
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
+
+ request.payload.address = (payload->address << 1) |
+ !payload->write;
+ } else {
+ request.payload.address_space =
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD;
+
+ request.payload.address = payload->address;
+ }
+
+ request.payload.length = payload->length;
+ request.payload.data = payload->data;
+
+ if (!engine->base.funcs->submit_request(
+ &engine->base, &request, mot)) {
+ result = false;
+ break;
+ }
+
+ ++index_of_payload;
+ }
+
+ i2caux->funcs->release_engine(i2caux, &engine->base);
+
+ return result;
+}
+
+static bool get_hw_supported_ddc_line(
+ struct ddc *ddc,
+ enum gpio_ddc_line *line)
+{
+ enum gpio_ddc_line line_found;
+
+ *line = GPIO_DDC_LINE_UNKNOWN;
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!ddc->hw_info.hw_supported)
+ return false;
+
+ line_found = dal_ddc_get_line(ddc);
+
+ if (line_found >= GPIO_DDC_LINE_COUNT)
+ return false;
+
+ *line = line_found;
+
+ return true;
+}
+
+void dal_i2caux_configure_aux(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ union aux_config cfg)
+{
+ struct aux_engine *engine =
+ i2caux->funcs->acquire_aux_engine(i2caux, ddc);
+
+ if (!engine)
+ return;
+
+ engine->funcs->configure(engine, cfg);
+
+ i2caux->funcs->release_engine(i2caux, &engine->base);
+}
+
+void dal_i2caux_destroy(
+ struct i2caux **i2caux)
+{
+ if (!i2caux || !*i2caux) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ (*i2caux)->funcs->destroy(i2caux);
+
+ *i2caux = NULL;
+}
+
+/*
+ * @brief
+ * An utility function used by 'struct i2caux' and its descendants
+ */
+
+uint32_t dal_i2caux_get_reference_clock(
+ struct dc_bios *bios)
+{
+ struct dc_firmware_info info = { { 0 } };
+
+ if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK)
+ return 0;
+
+ return info.pll_info.crystal_frequency;
+}
+
+/*
+ * @brief
+ * i2caux
+ */
+
+enum {
+ /* following are expressed in KHz */
+ DEFAULT_I2C_SW_SPEED = 50,
+ DEFAULT_I2C_HW_SPEED = 50,
+
+ DEFAULT_I2C_SW_SPEED_100KHZ = 100,
+ DEFAULT_I2C_HW_SPEED_100KHZ = 100,
+
+ /* This is the timeout as defined in DP 1.2a,
+ * 2.3.4 "Detailed uPacket TX AUX CH State Description". */
+ AUX_TIMEOUT_PERIOD = 400,
+
+ /* Ideally, the SW timeout should be just above 550usec
+ * which is programmed in HW.
+ * But the SW timeout of 600usec is not reliable,
+ * because on some systems, delay_in_microseconds()
+ * returns faster than it should.
+ * EPR #379763: by trial-and-error on different systems,
+ * 700usec is the minimum reliable SW timeout for polling
+ * the AUX_SW_STATUS.AUX_SW_DONE bit.
+ * This timeout expires *only* when there is
+ * AUX Error or AUX Timeout conditions - not during normal operation.
+ * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
+ * at most within ~240usec. That means,
+ * increasing this timeout will not affect normal operation,
+ * and we'll timeout after
+ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+ * This timeout is especially important for
+ * resume from S3 and CTS. */
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+};
+
+struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ enum gpio_ddc_line line;
+ struct i2c_engine *engine = NULL;
+
+ if (get_hw_supported_ddc_line(ddc, &line))
+ engine = i2caux->i2c_sw_engines[line];
+
+ if (!engine)
+ engine = i2caux->i2c_generic_sw_engine;
+
+ if (!engine)
+ return NULL;
+
+ if (!engine->base.funcs->acquire(&engine->base, ddc))
+ return NULL;
+
+ return engine;
+}
+
+struct aux_engine *dal_i2caux_acquire_aux_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ enum gpio_ddc_line line;
+ struct aux_engine *engine;
+
+ if (!get_hw_supported_ddc_line(ddc, &line))
+ return NULL;
+
+ engine = i2caux->aux_engines[line];
+
+ if (!engine)
+ return NULL;
+
+ if (!engine->base.funcs->acquire(&engine->base, ddc))
+ return NULL;
+
+ return engine;
+}
+
+void dal_i2caux_release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine)
+{
+ engine->funcs->release_engine(engine);
+
+ dal_ddc_close(engine->ddc);
+
+ engine->ddc = NULL;
+}
+
+void dal_i2caux_construct(
+ struct i2caux *i2caux,
+ struct dc_context *ctx)
+{
+ uint32_t i = 0;
+
+ i2caux->ctx = ctx;
+ do {
+ i2caux->i2c_sw_engines[i] = NULL;
+ i2caux->i2c_hw_engines[i] = NULL;
+ i2caux->aux_engines[i] = NULL;
+
+ ++i;
+ } while (i < GPIO_DDC_LINE_COUNT);
+
+ i2caux->i2c_generic_sw_engine = NULL;
+ i2caux->i2c_generic_hw_engine = NULL;
+
+ i2caux->aux_timeout_period =
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD;
+
+ if (ctx->dce_version >= DCE_VERSION_11_2) {
+ i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED_100KHZ;
+ i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED_100KHZ;
+ } else {
+ i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED;
+ i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED;
+ }
+}
+
+void dal_i2caux_destruct(
+ struct i2caux *i2caux)
+{
+ uint32_t i = 0;
+
+ if (i2caux->i2c_generic_hw_engine)
+ i2caux->i2c_generic_hw_engine->funcs->destroy(
+ &i2caux->i2c_generic_hw_engine);
+
+ if (i2caux->i2c_generic_sw_engine)
+ i2caux->i2c_generic_sw_engine->funcs->destroy(
+ &i2caux->i2c_generic_sw_engine);
+
+ do {
+ if (i2caux->aux_engines[i])
+ i2caux->aux_engines[i]->funcs->destroy(
+ &i2caux->aux_engines[i]);
+
+ if (i2caux->i2c_hw_engines[i])
+ i2caux->i2c_hw_engines[i]->funcs->destroy(
+ &i2caux->i2c_hw_engines[i]);
+
+ if (i2caux->i2c_sw_engines[i])
+ i2caux->i2c_sw_engines[i]->funcs->destroy(
+ &i2caux->i2c_sw_engines[i]);
+
+ ++i;
+ } while (i < GPIO_DDC_LINE_COUNT);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
new file mode 100644
index 000000000000..64f51bb06915
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_H__
+#define __DAL_I2C_AUX_H__
+
+uint32_t dal_i2caux_get_reference_clock(
+ struct dc_bios *bios);
+
+struct i2caux;
+
+struct engine;
+
+struct i2caux_funcs {
+ void (*destroy)(struct i2caux **ptr);
+ struct i2c_engine * (*acquire_i2c_sw_engine)(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+ struct i2c_engine * (*acquire_i2c_hw_engine)(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+ struct aux_engine * (*acquire_aux_engine)(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+ void (*release_engine)(
+ struct i2caux *i2caux,
+ struct engine *engine);
+};
+
+struct i2c_engine;
+struct aux_engine;
+
+struct i2caux {
+ struct dc_context *ctx;
+ const struct i2caux_funcs *funcs;
+ /* On ASIC we have certain amount of lines with HW DDC engine
+ * (4, 6, or maybe more in the future).
+ * For every such line, we create separate HW DDC engine
+ * (since we have these engines in HW) and separate SW DDC engine
+ * (to allow concurrent use of few lines).
+ * In similar way we have AUX engines. */
+
+ /* I2C SW engines, per DDC line.
+ * Only lines with HW DDC support will be initialized */
+ struct i2c_engine *i2c_sw_engines[GPIO_DDC_LINE_COUNT];
+
+ /* I2C HW engines, per DDC line.
+ * Only lines with HW DDC support will be initialized */
+ struct i2c_engine *i2c_hw_engines[GPIO_DDC_LINE_COUNT];
+
+ /* AUX engines, per DDC line.
+ * Only lines with HW AUX support will be initialized */
+ struct aux_engine *aux_engines[GPIO_DDC_LINE_COUNT];
+
+ /* For all other lines, we can use
+ * single instance of generic I2C HW engine
+ * (since in HW, there is single instance of it)
+ * or single instance of generic I2C SW engine.
+ * AUX is not supported for other lines. */
+
+ /* General-purpose I2C SW engine.
+ * Can be assigned dynamically to any line per transaction */
+ struct i2c_engine *i2c_generic_sw_engine;
+
+ /* General-purpose I2C generic HW engine.
+ * Can be assigned dynamically to almost any line per transaction */
+ struct i2c_engine *i2c_generic_hw_engine;
+
+ /* [anaumov] in DAL2, there is a Mutex */
+
+ uint32_t aux_timeout_period;
+
+ /* expressed in KHz */
+ uint32_t default_i2c_sw_speed;
+ uint32_t default_i2c_hw_speed;
+};
+
+void dal_i2caux_construct(
+ struct i2caux *i2caux,
+ struct dc_context *ctx);
+
+void dal_i2caux_release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine);
+
+void dal_i2caux_destruct(
+ struct i2caux *i2caux);
+
+void dal_i2caux_destroy(
+ struct i2caux **ptr);
+
+struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+
+struct aux_engine *dal_i2caux_acquire_aux_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
new file mode 100644
index 000000000000..39ee8eba3c31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef BW_FIXED_H_
+#define BW_FIXED_H_
+
+#define BW_FIXED_BITS_PER_FRACTIONAL_PART 24
+
+#define BW_FIXED_GET_INTEGER_PART(x) ((x) >> BW_FIXED_BITS_PER_FRACTIONAL_PART)
+struct bw_fixed {
+ int64_t value;
+};
+
+#define BW_FIXED_MIN_I32 \
+ (int64_t)(-(1LL << (63 - BW_FIXED_BITS_PER_FRACTIONAL_PART)))
+
+#define BW_FIXED_MAX_I32 \
+ (int64_t)((1ULL << (63 - BW_FIXED_BITS_PER_FRACTIONAL_PART)) - 1)
+
+static inline struct bw_fixed bw_min2(const struct bw_fixed arg1,
+ const struct bw_fixed arg2)
+{
+ return (arg1.value <= arg2.value) ? arg1 : arg2;
+}
+
+static inline struct bw_fixed bw_max2(const struct bw_fixed arg1,
+ const struct bw_fixed arg2)
+{
+ return (arg2.value <= arg1.value) ? arg1 : arg2;
+}
+
+static inline struct bw_fixed bw_min3(struct bw_fixed v1,
+ struct bw_fixed v2,
+ struct bw_fixed v3)
+{
+ return bw_min2(bw_min2(v1, v2), v3);
+}
+
+static inline struct bw_fixed bw_max3(struct bw_fixed v1,
+ struct bw_fixed v2,
+ struct bw_fixed v3)
+{
+ return bw_max2(bw_max2(v1, v2), v3);
+}
+
+struct bw_fixed bw_int_to_fixed_nonconst(int64_t value);
+static inline struct bw_fixed bw_int_to_fixed(int64_t value)
+{
+ if (__builtin_constant_p(value)) {
+ struct bw_fixed res;
+ BUILD_BUG_ON(value > BW_FIXED_MAX_I32 || value < BW_FIXED_MIN_I32);
+ res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
+ return res;
+ } else
+ return bw_int_to_fixed_nonconst(value);
+}
+
+static inline int32_t bw_fixed_to_int(struct bw_fixed value)
+{
+ return BW_FIXED_GET_INTEGER_PART(value.value);
+}
+
+struct bw_fixed bw_frc_to_fixed(int64_t num, int64_t denum);
+
+static inline struct bw_fixed fixed31_32_to_bw_fixed(int64_t raw)
+{
+ struct bw_fixed result = { 0 };
+
+ if (raw < 0) {
+ raw = -raw;
+ result.value = -(raw >> (32 - BW_FIXED_BITS_PER_FRACTIONAL_PART));
+ } else {
+ result.value = raw >> (32 - BW_FIXED_BITS_PER_FRACTIONAL_PART);
+ }
+
+ return result;
+}
+
+static inline struct bw_fixed bw_add(const struct bw_fixed arg1,
+ const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+static inline struct bw_fixed bw_sub(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2);
+static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return bw_frc_to_fixed(arg1.value, arg2.value);
+}
+
+static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+ div64_u64_rem(arg1.value, arg2.value, &res.value);
+ return res;
+}
+
+struct bw_fixed bw_floor2(const struct bw_fixed arg, const struct bw_fixed significance);
+struct bw_fixed bw_ceil2(const struct bw_fixed arg, const struct bw_fixed significance);
+
+static inline bool bw_equ(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+static inline bool bw_neq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value != arg2.value;
+}
+
+static inline bool bw_leq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+static inline bool bw_meq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value >= arg2.value;
+}
+
+static inline bool bw_ltn(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+static inline bool bw_mtn(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value > arg2.value;
+}
+
+#endif //BW_FIXED_H_
diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
new file mode 100644
index 000000000000..ebcf67b5fc57
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_CLOCK_SOURCE_H__
+#define __DC_CLOCK_SOURCE_H__
+
+#include "dc_types.h"
+#include "include/grph_object_id.h"
+#include "include/bios_parser_types.h"
+
+struct clock_source;
+
+struct spread_spectrum_data {
+ uint32_t percentage; /*> In unit of 0.01% or 0.001%*/
+ uint32_t percentage_divider; /*> 100 or 1000 */
+ uint32_t freq_range_khz;
+ uint32_t modulation_freq_hz;
+
+ struct spread_spectrum_flags flags;
+};
+
+struct delta_sigma_data {
+ uint32_t feedback_amount;
+ uint32_t nfrac_amount;
+ uint32_t ds_frac_size;
+ uint32_t ds_frac_amount;
+};
+
+/**
+ * Pixel Clock Parameters structure
+ * These parameters are required as input
+ * when calculating Pixel Clock Dividers for requested Pixel Clock
+ */
+struct pixel_clk_flags {
+ uint32_t ENABLE_SS:1;
+ uint32_t DISPLAY_BLANKED:1;
+ uint32_t PROGRAM_PIXEL_CLOCK:1;
+ uint32_t PROGRAM_ID_CLOCK:1;
+ uint32_t SUPPORT_YCBCR420:1;
+};
+
+/**
+ * Display Port HW De spread of Reference Clock related Parameters structure
+ * Store it once at boot for later usage
+ */
+struct csdp_ref_clk_ds_params {
+ bool hw_dso_n_dp_ref_clk;
+/* Flag for HW De Spread enabled (if enabled SS on DP Reference Clock)*/
+ uint32_t avg_dp_ref_clk_khz;
+/* Average DP Reference clock (in KHz)*/
+ uint32_t ss_percentage_on_dp_ref_clk;
+/* DP Reference clock SS percentage
+ * (not to be mixed with DP IDCLK SS from PLL Settings)*/
+ uint32_t ss_percentage_divider;
+/* DP Reference clock SS percentage divider */
+};
+
+struct pixel_clk_params {
+ uint32_t requested_pix_clk; /* in KHz */
+/*> Requested Pixel Clock
+ * (based on Video Timing standard used for requested mode)*/
+ uint32_t requested_sym_clk; /* in KHz */
+/*> Requested Sym Clock (relevant only for display port)*/
+ uint32_t dp_ref_clk; /* in KHz */
+/*> DP reference clock - calculated only for DP signal for specific cases*/
+ struct graphics_object_id encoder_object_id;
+/*> Encoder object Id - needed by VBIOS Exec table*/
+ enum signal_type signal_type;
+/*> signalType -> Encoder Mode - needed by VBIOS Exec table*/
+ enum controller_id controller_id;
+/*> ControllerId - which controller using this PLL*/
+ enum dc_color_depth color_depth;
+ struct csdp_ref_clk_ds_params de_spread_params;
+/*> de-spread info, relevant only for on-the-fly tune-up pixel rate*/
+ enum dc_pixel_encoding pixel_encoding;
+ struct pixel_clk_flags flags;
+};
+
+/**
+ * Pixel Clock Dividers structure with desired Pixel Clock
+ * (adjusted after VBIOS exec table),
+ * with actually calculated Clock and reference Crystal frequency
+ */
+struct pll_settings {
+ uint32_t actual_pix_clk;
+ uint32_t adjusted_pix_clk;
+ uint32_t calculated_pix_clk;
+ uint32_t vco_freq;
+ uint32_t reference_freq;
+ uint32_t reference_divider;
+ uint32_t feedback_divider;
+ uint32_t fract_feedback_divider;
+ uint32_t pix_clk_post_divider;
+ uint32_t ss_percentage;
+ bool use_external_clk;
+};
+
+struct calc_pll_clock_source_init_data {
+ struct dc_bios *bp;
+ uint32_t min_pix_clk_pll_post_divider;
+ uint32_t max_pix_clk_pll_post_divider;
+ uint32_t min_pll_ref_divider;
+ uint32_t max_pll_ref_divider;
+ uint32_t min_override_input_pxl_clk_pll_freq_khz;
+/* if not 0, override the firmware info */
+
+ uint32_t max_override_input_pxl_clk_pll_freq_khz;
+/* if not 0, override the firmware info */
+
+ uint32_t num_fract_fb_divider_decimal_point;
+/* number of decimal point for fractional feedback divider value */
+
+ uint32_t num_fract_fb_divider_decimal_point_precision;
+/* number of decimal point to round off for fractional feedback divider value*/
+ struct dc_context *ctx;
+
+};
+
+struct calc_pll_clock_source {
+ uint32_t ref_freq_khz;
+ uint32_t min_pix_clock_pll_post_divider;
+ uint32_t max_pix_clock_pll_post_divider;
+ uint32_t min_pll_ref_divider;
+ uint32_t max_pll_ref_divider;
+
+ uint32_t max_vco_khz;
+ uint32_t min_vco_khz;
+ uint32_t min_pll_input_freq_khz;
+ uint32_t max_pll_input_freq_khz;
+
+ uint32_t fract_fb_divider_decimal_points_num;
+ uint32_t fract_fb_divider_factor;
+ uint32_t fract_fb_divider_precision;
+ uint32_t fract_fb_divider_precision_factor;
+ struct dc_context *ctx;
+};
+
+struct clock_source_funcs {
+ bool (*cs_power_down)(
+ struct clock_source *);
+ bool (*program_pix_clk)(struct clock_source *,
+ struct pixel_clk_params *, struct pll_settings *);
+ uint32_t (*get_pix_clk_dividers)(
+ struct clock_source *,
+ struct pixel_clk_params *,
+ struct pll_settings *);
+ uint32_t (*get_pix_rate_in_hz)(
+ struct clock_source *,
+ struct pixel_clk_params *,
+ struct pll_settings *);
+};
+
+struct clock_source {
+ const struct clock_source_funcs *funcs;
+ struct dc_context *ctx;
+ enum clock_source_id id;
+ bool dp_clk_src;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
new file mode 100644
index 000000000000..bcb18f5e1e60
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMPRESSOR_H__
+#define __DAL_COMPRESSOR_H__
+
+#include "include/grph_object_id.h"
+#include "bios_parser_interface.h"
+
+enum fbc_compress_ratio {
+ FBC_COMPRESS_RATIO_INVALID = 0,
+ FBC_COMPRESS_RATIO_1TO1 = 1,
+ FBC_COMPRESS_RATIO_2TO1 = 2,
+ FBC_COMPRESS_RATIO_4TO1 = 4,
+ FBC_COMPRESS_RATIO_8TO1 = 8,
+};
+
+union fbc_physical_address {
+ struct {
+ uint32_t low_part;
+ int32_t high_part;
+ } addr;
+ uint64_t quad_part;
+};
+
+struct compr_addr_and_pitch_params {
+ /* enum controller_id controller_id; */
+ uint32_t inst;
+ uint32_t source_view_width;
+ uint32_t source_view_height;
+};
+
+enum fbc_hw_max_resolution_supported {
+ FBC_MAX_X = 3840,
+ FBC_MAX_Y = 2400,
+ FBC_MAX_X_SG = 1920,
+ FBC_MAX_Y_SG = 1080,
+};
+
+struct compressor;
+
+struct compressor_funcs {
+
+ void (*power_up_fbc)(struct compressor *cp);
+ void (*enable_fbc)(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+ void (*disable_fbc)(struct compressor *cp);
+ void (*set_fbc_invalidation_triggers)(struct compressor *cp,
+ uint32_t fbc_trigger);
+ void (*surface_address_and_pitch)(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+ bool (*is_fbc_enabled_in_hw)(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+};
+struct compressor {
+ struct dc_context *ctx;
+ uint32_t attached_inst;
+ bool is_enabled;
+ const struct compressor_funcs *funcs;
+ union {
+ uint32_t raw;
+ struct {
+ uint32_t FBC_SUPPORT:1;
+ uint32_t FB_POOL:1;
+ uint32_t DYNAMIC_ALLOC:1;
+ uint32_t LPT_SUPPORT:1;
+ uint32_t LPT_MC_CONFIG:1;
+ uint32_t DUMMY_BACKEND:1;
+ uint32_t CLK_GATING_DISABLED:1;
+
+ } bits;
+ } options;
+
+ union fbc_physical_address compr_surface_address;
+
+ uint32_t embedded_panel_h_size;
+ uint32_t embedded_panel_v_size;
+ uint32_t memory_bus_width;
+ uint32_t banks_num;
+ uint32_t raw_size;
+ uint32_t channel_interleave_size;
+ uint32_t dram_channels_num;
+
+ uint32_t allocated_size;
+ uint32_t preferred_requested_size;
+ uint32_t lpt_channels_num;
+ enum fbc_compress_ratio min_compress_ratio;
+};
+
+struct fbc_input_info {
+ bool dynamic_fbc_buffer_alloc;
+ unsigned int source_view_width;
+ unsigned int source_view_height;
+ unsigned int num_of_active_targets;
+};
+
+
+struct fbc_requested_compressed_size {
+ unsigned int preferred_size;
+ unsigned int preferred_size_alignment;
+ unsigned int min_size;
+ unsigned int min_size_alignment;
+ union {
+ struct {
+ /* Above preferedSize must be allocated in FB pool */
+ unsigned int preferred_must_be_framebuffer_pool : 1;
+ /* Above minSize must be allocated in FB pool */
+ unsigned int min_must_be_framebuffer_pool : 1;
+ } bits;
+ unsigned int flags;
+ };
+};
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
new file mode 100644
index 000000000000..01df85641684
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _CORE_STATUS_H_
+#define _CORE_STATUS_H_
+
+enum dc_status {
+ DC_OK = 1,
+
+ DC_NO_CONTROLLER_RESOURCE = 2,
+ DC_NO_STREAM_ENG_RESOURCE = 3,
+ DC_NO_CLOCK_SOURCE_RESOURCE = 4,
+ DC_FAIL_CONTROLLER_VALIDATE = 5,
+ DC_FAIL_ENC_VALIDATE = 6,
+ DC_FAIL_ATTACH_SURFACES = 7,
+ DC_FAIL_DETACH_SURFACES = 8,
+ DC_FAIL_SURFACE_VALIDATE = 9,
+ DC_NO_DP_LINK_BANDWIDTH = 10,
+ DC_EXCEED_DONGLE_MAX_CLK = 11,
+ DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12,
+ DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
+ DC_FAIL_SCALING = 14,
+ DC_FAIL_DP_LINK_TRAINING = 15,
+
+ DC_ERROR_UNEXPECTED = -1
+};
+
+#endif /* _CORE_STATUS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
new file mode 100644
index 000000000000..b69f321e2ab6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _CORE_TYPES_H_
+#define _CORE_TYPES_H_
+
+#include "dc.h"
+#include "dce_calcs.h"
+#include "dcn_calcs.h"
+#include "ddc_service_types.h"
+#include "dc_bios_types.h"
+#include "mem_input.h"
+#include "hubp.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "mpc.h"
+#endif
+
+#define MAX_CLOCK_SOURCES 7
+
+void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
+ uint32_t controller_id);
+
+#include "grph_object_id.h"
+#include "link_encoder.h"
+#include "stream_encoder.h"
+#include "clock_source.h"
+#include "audio.h"
+#include "dm_pp_smu.h"
+
+
+/************ link *****************/
+struct link_init_data {
+ const struct dc *dc;
+ struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */
+ uint32_t connector_index; /* this will be mapped to the HPD pins */
+ uint32_t link_index; /* this is mapped to DAL display_index
+ TODO: remove it when DC is complete. */
+};
+
+enum {
+ FREE_ACQUIRED_RESOURCE = 0,
+ KEEP_ACQUIRED_RESOURCE = 1,
+};
+
+struct dc_link *link_create(const struct link_init_data *init_params);
+void link_destroy(struct dc_link **link);
+
+enum dc_status dc_link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+
+void core_link_resume(struct dc_link *link);
+
+void core_link_enable_stream(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
+
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+
+void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+/********** DAL Core*********************/
+#include "display_clock.h"
+#include "transform.h"
+#include "dpp.h"
+
+struct resource_pool;
+struct dc_state;
+struct resource_context;
+
+struct resource_funcs {
+ void (*destroy)(struct resource_pool **pool);
+ struct link_encoder *(*link_enc_create)(
+ const struct encoder_init_data *init);
+
+ enum dc_status (*validate_guaranteed)(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context);
+
+ bool (*validate_bandwidth)(
+ struct dc *dc,
+ struct dc_state *context);
+
+ enum dc_status (*validate_global)(
+ struct dc *dc,
+ struct dc_state *context);
+
+ struct pipe_ctx *(*acquire_idle_pipe_for_layer)(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream);
+
+ enum dc_status (*validate_plane)(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+
+ enum dc_status (*add_stream_to_ctx)(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+};
+
+struct audio_support{
+ bool dp_audio;
+ bool hdmi_audio_on_dongle;
+ bool hdmi_audio_native;
+};
+
+#define NO_UNDERLAY_PIPE -1
+
+struct resource_pool {
+ struct mem_input *mis[MAX_PIPES];
+ struct hubp *hubps[MAX_PIPES];
+ struct input_pixel_processor *ipps[MAX_PIPES];
+ struct transform *transforms[MAX_PIPES];
+ struct dpp *dpps[MAX_PIPES];
+ struct output_pixel_processor *opps[MAX_PIPES];
+ struct timing_generator *timing_generators[MAX_PIPES];
+ struct stream_encoder *stream_enc[MAX_PIPES * 2];
+
+ struct mpc *mpc;
+ struct pp_smu_funcs_rv *pp_smu;
+ struct pp_smu_display_requirement_rv pp_smu_req;
+
+ unsigned int pipe_count;
+ unsigned int underlay_pipe_index;
+ unsigned int stream_enc_count;
+ unsigned int ref_clock_inKhz;
+
+ /*
+ * reserved clock source for DP
+ */
+ struct clock_source *dp_clock_source;
+
+ struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
+ unsigned int clk_src_count;
+
+ struct audio *audios[MAX_PIPES];
+ unsigned int audio_count;
+ struct audio_support audio_support;
+
+ struct display_clock *display_clock;
+ struct irq_service *irqs;
+
+ struct abm *abm;
+ struct dmcu *dmcu;
+
+ const struct resource_funcs *funcs;
+ const struct resource_caps *res_cap;
+};
+
+struct stream_resource {
+ struct output_pixel_processor *opp;
+ struct timing_generator *tg;
+ struct stream_encoder *stream_enc;
+ struct audio *audio;
+
+ struct pixel_clk_params pix_clk_params;
+ struct encoder_info_frame encoder_info_frame;
+};
+
+struct plane_resource {
+ struct scaler_data scl_data;
+ struct hubp *hubp;
+ struct mem_input *mi;
+ struct input_pixel_processor *ipp;
+ struct transform *xfm;
+ struct dpp *dpp;
+};
+
+struct pipe_ctx {
+ struct dc_plane_state *plane_state;
+ struct dc_stream_state *stream;
+
+ struct plane_resource plane_res;
+ struct stream_resource stream_res;
+
+ struct clock_source *clock_source;
+
+ struct pll_settings pll_settings;
+
+ uint8_t pipe_idx;
+
+ struct pipe_ctx *top_pipe;
+ struct pipe_ctx *bottom_pipe;
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct _vcs_dpi_display_dlg_regs_st dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
+#endif
+ struct dwbc *dwbc;
+};
+
+struct resource_context {
+ struct pipe_ctx pipe_ctx[MAX_PIPES];
+ bool is_stream_enc_acquired[MAX_PIPES * 2];
+ bool is_audio_acquired[MAX_PIPES];
+ uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
+ uint8_t dp_clock_source_ref_count;
+};
+
+struct dce_bw_output {
+ bool cpuc_state_change_enable;
+ bool cpup_state_change_enable;
+ bool stutter_mode_enable;
+ bool nbp_state_change_enable;
+ bool all_displays_in_sync;
+ struct dce_watermarks urgent_wm_ns[MAX_PIPES];
+ struct dce_watermarks stutter_exit_wm_ns[MAX_PIPES];
+ struct dce_watermarks nbp_state_change_wm_ns[MAX_PIPES];
+ int sclk_khz;
+ int sclk_deep_sleep_khz;
+ int yclk_khz;
+ int dispclk_khz;
+ int blackout_recovery_time_us;
+};
+
+struct dcn_bw_clocks {
+ int dispclk_khz;
+ bool dppclk_div;
+ int dcfclk_khz;
+ int dcfclk_deep_sleep_khz;
+ int fclk_khz;
+ int dram_ccm_us;
+ int min_active_dram_ccm_us;
+};
+
+struct dcn_bw_output {
+ struct dcn_bw_clocks cur_clk;
+ struct dcn_bw_clocks calc_clk;
+ struct dcn_watermark_set watermarks;
+};
+
+union bw_context {
+ struct dcn_bw_output dcn;
+ struct dce_bw_output dce;
+};
+
+struct dc_state {
+ struct dc_stream_state *streams[MAX_PIPES];
+ struct dc_stream_status stream_status[MAX_PIPES];
+ uint8_t stream_count;
+
+ struct resource_context res_ctx;
+
+ /* The output from BW and WM calculations. */
+ union bw_context bw;
+
+ /* Note: these are big structures, do *not* put on stack! */
+ struct dm_pp_display_configuration pp_display_cfg;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dcn_bw_internal_vars dcn_bw_vars;
+#endif
+
+ struct display_clock *dis_clk;
+
+ struct kref refcount;
+};
+
+#endif /* _CORE_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/custom_float.h b/drivers/gpu/drm/amd/display/dc/inc/custom_float.h
new file mode 100644
index 000000000000..f57239672216
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/custom_float.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef CUSTOM_FLOAT_H_
+#define CUSTOM_FLOAT_H_
+
+#include "bw_fixed.h"
+#include "hw_shared.h"
+#include "opp.h"
+
+
+bool convert_to_custom_float_format(
+ struct fixed31_32 value,
+ const struct custom_float_format *format,
+ uint32_t *result);
+
+
+#endif //CUSTOM_FLOAT_H_
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
new file mode 100644
index 000000000000..0bf73b742f1f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DDC_SERVICE_H__
+#define __DAL_DDC_SERVICE_H__
+
+#include "include/ddc_service_types.h"
+#include "include/i2caux_interface.h"
+
+#define EDID_SEGMENT_SIZE 256
+
+/* Address range from 0x00 to 0x1F.*/
+#define DP_ADAPTOR_TYPE2_SIZE 0x20
+#define DP_ADAPTOR_TYPE2_REG_ID 0x10
+#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
+/* Identifies adaptor as Dual-mode adaptor */
+#define DP_ADAPTOR_TYPE2_ID 0xA0
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
+/* kHZ*/
+#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
+/* kHZ*/
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+
+#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
+
+struct ddc_service;
+struct graphics_object_id;
+enum ddc_result;
+struct av_sync_data;
+struct dp_receiver_id_info;
+
+struct i2c_payloads;
+struct aux_payloads;
+
+void dal_ddc_i2c_payloads_add(
+ struct i2c_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write);
+
+void dal_ddc_aux_payloads_add(
+ struct aux_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write);
+
+struct ddc_service_init_data {
+ struct graphics_object_id id;
+ struct dc_context *ctx;
+ struct dc_link *link;
+};
+
+struct ddc_service *dal_ddc_service_create(
+ struct ddc_service_init_data *ddc_init_data);
+
+void dal_ddc_service_destroy(struct ddc_service **ddc);
+
+enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc);
+
+void dal_ddc_service_set_transaction_type(
+ struct ddc_service *ddc,
+ enum ddc_transaction_type type);
+
+bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc);
+
+void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap);
+
+bool dal_ddc_service_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t len);
+
+enum ddc_result dal_ddc_service_write_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t len);
+
+void dal_ddc_service_write_scdc_data(
+ struct ddc_service *ddc_service,
+ uint32_t pix_clk,
+ bool lte_340_scramble);
+
+void dal_ddc_service_read_scdc_data(
+ struct ddc_service *ddc_service);
+
+void ddc_service_set_dongle_type(struct ddc_service *ddc,
+ enum display_dongle_type dongle_type);
+
+void dal_ddc_service_set_ddc_pin(
+ struct ddc_service *ddc_service,
+ struct ddc *ddc);
+
+struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service);
+
+uint32_t get_defer_delay(struct ddc_service *ddc);
+
+#endif /* __DAL_DDC_SERVICE_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
new file mode 100644
index 000000000000..616c73e2b0bd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DP_H__
+#define __DC_LINK_DP_H__
+
+#define LINK_TRAINING_ATTEMPTS 4
+#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+
+struct dc_link;
+struct dc_stream_state;
+struct dc_link_settings;
+
+bool dp_hbr_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting);
+
+bool dp_validate_mode_timing(
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+
+void decide_link_settings(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+
+bool perform_link_training_with_retries(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts);
+
+bool is_mst_supported(struct dc_link *link);
+
+void detect_dp_sink_caps(struct dc_link *link);
+
+void detect_edp_sink_caps(struct dc_link *link);
+
+bool is_dp_active_dongle(const struct dc_link *link);
+
+void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
+
+#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
new file mode 100644
index 000000000000..ae2399f16d1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2015-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * Bandwidth and Watermark calculations interface.
+ * (Refer to "DCEx_mode_support.xlsm" from Perforce.)
+ */
+#ifndef __DCE_CALCS_H__
+#define __DCE_CALCS_H__
+
+#include "bw_fixed.h"
+
+struct pipe_ctx;
+struct dc;
+struct dc_state;
+struct dce_bw_output;
+
+enum bw_calcs_version {
+ BW_CALCS_VERSION_INVALID,
+ BW_CALCS_VERSION_CARRIZO,
+ BW_CALCS_VERSION_POLARIS10,
+ BW_CALCS_VERSION_POLARIS11,
+ BW_CALCS_VERSION_STONEY,
+ BW_CALCS_VERSION_VEGA10
+};
+
+/*******************************************************************************
+ * There are three types of input into Calculations:
+ * 1. per-DCE static values - these are "hardcoded" properties of the DCEIP
+ * 2. board-level values - these are generally coming from VBIOS parser
+ * 3. mode/configuration values - depending Mode, Scaling number of Displays etc.
+ ******************************************************************************/
+
+enum bw_defines {
+ //Common
+ bw_def_no = 0,
+ bw_def_none = 0,
+ bw_def_yes = 1,
+ bw_def_ok = 1,
+ bw_def_high = 2,
+ bw_def_mid = 1,
+ bw_def_low = 0,
+
+ //Internal
+ bw_defs_start = 255,
+ bw_def_underlay422,
+ bw_def_underlay420_luma,
+ bw_def_underlay420_chroma,
+ bw_def_underlay444,
+ bw_def_graphics,
+ bw_def_display_write_back420_luma,
+ bw_def_display_write_back420_chroma,
+ bw_def_portrait,
+ bw_def_hsr_mtn_4,
+ bw_def_hsr_mtn_h_taps,
+ bw_def_ceiling__h_taps_div_4___meq_hsr,
+ bw_def_invalid_linear_or_stereo_mode,
+ bw_def_invalid_rotation_or_bpp_or_stereo,
+ bw_def_vsr_mtn_v_taps,
+ bw_def_vsr_mtn_4,
+ bw_def_auto,
+ bw_def_manual,
+ bw_def_exceeded_allowed_maximum_sclk,
+ bw_def_exceeded_allowed_page_close_open,
+ bw_def_exceeded_allowed_outstanding_pte_req_queue_size,
+ bw_def_exceeded_allowed_maximum_bw,
+ bw_def_landscape,
+
+ //Panning and bezel
+ bw_def_any_lines,
+
+ //Underlay mode
+ bw_def_underlay_only,
+ bw_def_blended,
+ bw_def_blend,
+
+ //Stereo mode
+ bw_def_mono,
+ bw_def_side_by_side,
+ bw_def_top_bottom,
+
+ //Underlay surface type
+ bw_def_420,
+ bw_def_422,
+ bw_def_444,
+
+ //Tiling mode
+ bw_def_linear,
+ bw_def_tiled,
+ bw_def_array_linear_general,
+ bw_def_array_linear_aligned,
+ bw_def_rotated_micro_tiling,
+ bw_def_display_micro_tiling,
+
+ //Memory type
+ bw_def_gddr5,
+ bw_def_hbm,
+
+ //Voltage
+ bw_def_high_no_nbp_state_change,
+ bw_def_0_72,
+ bw_def_0_8,
+ bw_def_0_9,
+
+ bw_def_notok = -1,
+ bw_def_na = -1
+};
+
+struct bw_calcs_dceip {
+ enum bw_calcs_version version;
+ bool large_cursor;
+ uint32_t cursor_max_outstanding_group_num;
+ bool dmif_pipe_en_fbc_chunk_tracker;
+ struct bw_fixed dmif_request_buffer_size;
+ uint32_t lines_interleaved_into_lb;
+ uint32_t low_power_tiling_mode;
+ uint32_t chunk_width;
+ uint32_t number_of_graphics_pipes;
+ uint32_t number_of_underlay_pipes;
+ bool display_write_back_supported;
+ bool argb_compression_support;
+ struct bw_fixed underlay_vscaler_efficiency6_bit_per_component;
+ struct bw_fixed underlay_vscaler_efficiency8_bit_per_component;
+ struct bw_fixed underlay_vscaler_efficiency10_bit_per_component;
+ struct bw_fixed underlay_vscaler_efficiency12_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency6_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency8_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency10_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency12_bit_per_component;
+ struct bw_fixed alpha_vscaler_efficiency;
+ uint32_t max_dmif_buffer_allocated;
+ uint32_t graphics_dmif_size;
+ uint32_t underlay_luma_dmif_size;
+ uint32_t underlay_chroma_dmif_size;
+ bool pre_downscaler_enabled;
+ bool underlay_downscale_prefetch_enabled;
+ struct bw_fixed lb_write_pixels_per_dispclk;
+ struct bw_fixed lb_size_per_component444;
+ bool graphics_lb_nodownscaling_multi_line_prefetching;
+ struct bw_fixed stutter_and_dram_clock_state_change_gated_before_cursor;
+ struct bw_fixed underlay420_luma_lb_size_per_component;
+ struct bw_fixed underlay420_chroma_lb_size_per_component;
+ struct bw_fixed underlay422_lb_size_per_component;
+ struct bw_fixed cursor_chunk_width;
+ struct bw_fixed cursor_dcp_buffer_lines;
+ struct bw_fixed underlay_maximum_width_efficient_for_tiling;
+ struct bw_fixed underlay_maximum_height_efficient_for_tiling;
+ struct bw_fixed peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
+ struct bw_fixed peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
+ struct bw_fixed minimum_outstanding_pte_request_limit;
+ struct bw_fixed maximum_total_outstanding_pte_requests_allowed_by_saw;
+ bool limit_excessive_outstanding_dmif_requests;
+ struct bw_fixed linear_mode_line_request_alternation_slice;
+ uint32_t scatter_gather_lines_of_pte_prefetching_in_linear_mode;
+ uint32_t display_write_back420_luma_mcifwr_buffer_size;
+ uint32_t display_write_back420_chroma_mcifwr_buffer_size;
+ struct bw_fixed request_efficiency;
+ struct bw_fixed dispclk_per_request;
+ struct bw_fixed dispclk_ramping_factor;
+ struct bw_fixed display_pipe_throughput_factor;
+ uint32_t scatter_gather_pte_request_rows_in_tiling_mode;
+ struct bw_fixed mcifwr_all_surfaces_burst_time;
+};
+
+struct bw_calcs_vbios {
+ enum bw_defines memory_type;
+ uint32_t dram_channel_width_in_bits;
+ uint32_t number_of_dram_channels;
+ uint32_t number_of_dram_banks;
+ struct bw_fixed low_yclk; /*m_hz*/
+ struct bw_fixed mid_yclk; /*m_hz*/
+ struct bw_fixed high_yclk; /*m_hz*/
+ struct bw_fixed low_sclk; /*m_hz*/
+ struct bw_fixed mid1_sclk; /*m_hz*/
+ struct bw_fixed mid2_sclk; /*m_hz*/
+ struct bw_fixed mid3_sclk; /*m_hz*/
+ struct bw_fixed mid4_sclk; /*m_hz*/
+ struct bw_fixed mid5_sclk; /*m_hz*/
+ struct bw_fixed mid6_sclk; /*m_hz*/
+ struct bw_fixed high_sclk; /*m_hz*/
+ struct bw_fixed low_voltage_max_dispclk; /*m_hz*/
+ struct bw_fixed mid_voltage_max_dispclk; /*m_hz*/
+ struct bw_fixed high_voltage_max_dispclk; /*m_hz*/
+ struct bw_fixed low_voltage_max_phyclk;
+ struct bw_fixed mid_voltage_max_phyclk;
+ struct bw_fixed high_voltage_max_phyclk;
+ struct bw_fixed data_return_bus_width;
+ struct bw_fixed trc;
+ struct bw_fixed dmifmc_urgent_latency;
+ struct bw_fixed stutter_self_refresh_exit_latency;
+ struct bw_fixed stutter_self_refresh_entry_latency;
+ struct bw_fixed nbp_state_change_latency;
+ struct bw_fixed mcifwrmc_urgent_latency;
+ bool scatter_gather_enable;
+ struct bw_fixed down_spread_percentage;
+ uint32_t cursor_width;
+ uint32_t average_compression_rate;
+ uint32_t number_of_request_slots_gmc_reserves_for_dmif_per_channel;
+ struct bw_fixed blackout_duration;
+ struct bw_fixed maximum_blackout_recovery_time;
+};
+
+/*******************************************************************************
+ * Temporary data structure(s).
+ ******************************************************************************/
+#define maximum_number_of_surfaces 12
+/*Units : MHz, us */
+
+struct bw_calcs_data {
+ /* data for all displays */
+ uint32_t number_of_displays;
+ enum bw_defines underlay_surface_type;
+ enum bw_defines panning_and_bezel_adjustment;
+ enum bw_defines graphics_tiling_mode;
+ uint32_t graphics_lb_bpc;
+ uint32_t underlay_lb_bpc;
+ enum bw_defines underlay_tiling_mode;
+ enum bw_defines d0_underlay_mode;
+ bool d1_display_write_back_dwb_enable;
+ enum bw_defines d1_underlay_mode;
+
+ bool cpup_state_change_enable;
+ bool cpuc_state_change_enable;
+ bool nbp_state_change_enable;
+ bool stutter_mode_enable;
+ uint32_t y_clk_level;
+ uint32_t sclk_level;
+ uint32_t number_of_underlay_surfaces;
+ uint32_t number_of_dram_wrchannels;
+ uint32_t chunk_request_delay;
+ uint32_t number_of_dram_channels;
+ enum bw_defines underlay_micro_tile_mode;
+ enum bw_defines graphics_micro_tile_mode;
+ struct bw_fixed max_phyclk;
+ struct bw_fixed dram_efficiency;
+ struct bw_fixed src_width_after_surface_type;
+ struct bw_fixed src_height_after_surface_type;
+ struct bw_fixed hsr_after_surface_type;
+ struct bw_fixed vsr_after_surface_type;
+ struct bw_fixed src_width_after_rotation;
+ struct bw_fixed src_height_after_rotation;
+ struct bw_fixed hsr_after_rotation;
+ struct bw_fixed vsr_after_rotation;
+ struct bw_fixed source_height_pixels;
+ struct bw_fixed hsr_after_stereo;
+ struct bw_fixed vsr_after_stereo;
+ struct bw_fixed source_width_in_lb;
+ struct bw_fixed lb_line_pitch;
+ struct bw_fixed underlay_maximum_source_efficient_for_tiling;
+ struct bw_fixed num_lines_at_frame_start;
+ struct bw_fixed min_dmif_size_in_time;
+ struct bw_fixed min_mcifwr_size_in_time;
+ struct bw_fixed total_requests_for_dmif_size;
+ struct bw_fixed peak_pte_request_to_eviction_ratio_limiting;
+ struct bw_fixed useful_pte_per_pte_request;
+ struct bw_fixed scatter_gather_pte_request_rows;
+ struct bw_fixed scatter_gather_row_height;
+ struct bw_fixed scatter_gather_pte_requests_in_vblank;
+ struct bw_fixed inefficient_linear_pitch_in_bytes;
+ struct bw_fixed cursor_total_data;
+ struct bw_fixed cursor_total_request_groups;
+ struct bw_fixed scatter_gather_total_pte_requests;
+ struct bw_fixed scatter_gather_total_pte_request_groups;
+ struct bw_fixed tile_width_in_pixels;
+ struct bw_fixed dmif_total_number_of_data_request_page_close_open;
+ struct bw_fixed mcifwr_total_number_of_data_request_page_close_open;
+ struct bw_fixed bytes_per_page_close_open;
+ struct bw_fixed mcifwr_total_page_close_open_time;
+ struct bw_fixed total_requests_for_adjusted_dmif_size;
+ struct bw_fixed total_dmifmc_urgent_trips;
+ struct bw_fixed total_dmifmc_urgent_latency;
+ struct bw_fixed total_display_reads_required_data;
+ struct bw_fixed total_display_reads_required_dram_access_data;
+ struct bw_fixed total_display_writes_required_data;
+ struct bw_fixed total_display_writes_required_dram_access_data;
+ struct bw_fixed display_reads_required_data;
+ struct bw_fixed display_reads_required_dram_access_data;
+ struct bw_fixed dmif_total_page_close_open_time;
+ struct bw_fixed min_cursor_memory_interface_buffer_size_in_time;
+ struct bw_fixed min_read_buffer_size_in_time;
+ struct bw_fixed display_reads_time_for_data_transfer;
+ struct bw_fixed display_writes_time_for_data_transfer;
+ struct bw_fixed dmif_required_dram_bandwidth;
+ struct bw_fixed mcifwr_required_dram_bandwidth;
+ struct bw_fixed required_dmifmc_urgent_latency_for_page_close_open;
+ struct bw_fixed required_mcifmcwr_urgent_latency;
+ struct bw_fixed required_dram_bandwidth_gbyte_per_second;
+ struct bw_fixed dram_bandwidth;
+ struct bw_fixed dmif_required_sclk;
+ struct bw_fixed mcifwr_required_sclk;
+ struct bw_fixed required_sclk;
+ struct bw_fixed downspread_factor;
+ struct bw_fixed v_scaler_efficiency;
+ struct bw_fixed scaler_limits_factor;
+ struct bw_fixed display_pipe_pixel_throughput;
+ struct bw_fixed total_dispclk_required_with_ramping;
+ struct bw_fixed total_dispclk_required_without_ramping;
+ struct bw_fixed total_read_request_bandwidth;
+ struct bw_fixed total_write_request_bandwidth;
+ struct bw_fixed dispclk_required_for_total_read_request_bandwidth;
+ struct bw_fixed total_dispclk_required_with_ramping_with_request_bandwidth;
+ struct bw_fixed total_dispclk_required_without_ramping_with_request_bandwidth;
+ struct bw_fixed dispclk;
+ struct bw_fixed blackout_recovery_time;
+ struct bw_fixed min_pixels_per_data_fifo_entry;
+ struct bw_fixed sclk_deep_sleep;
+ struct bw_fixed chunk_request_time;
+ struct bw_fixed cursor_request_time;
+ struct bw_fixed line_source_pixels_transfer_time;
+ struct bw_fixed dmifdram_access_efficiency;
+ struct bw_fixed mcifwrdram_access_efficiency;
+ struct bw_fixed total_average_bandwidth_no_compression;
+ struct bw_fixed total_average_bandwidth;
+ struct bw_fixed total_stutter_cycle_duration;
+ struct bw_fixed stutter_burst_time;
+ struct bw_fixed time_in_self_refresh;
+ struct bw_fixed stutter_efficiency;
+ struct bw_fixed worst_number_of_trips_to_memory;
+ struct bw_fixed immediate_flip_time;
+ struct bw_fixed latency_for_non_dmif_clients;
+ struct bw_fixed latency_for_non_mcifwr_clients;
+ struct bw_fixed dmifmc_urgent_latency_supported_in_high_sclk_and_yclk;
+ struct bw_fixed nbp_state_dram_speed_change_margin;
+ struct bw_fixed display_reads_time_for_data_transfer_and_urgent_latency;
+ struct bw_fixed dram_speed_change_margin;
+ struct bw_fixed min_vblank_dram_speed_change_margin;
+ struct bw_fixed min_stutter_refresh_duration;
+ uint32_t total_stutter_dmif_buffer_size;
+ uint32_t total_bytes_requested;
+ uint32_t min_stutter_dmif_buffer_size;
+ uint32_t num_stutter_bursts;
+ struct bw_fixed v_blank_nbp_state_dram_speed_change_latency_supported;
+ struct bw_fixed nbp_state_dram_speed_change_latency_supported;
+ bool fbc_en[maximum_number_of_surfaces];
+ bool lpt_en[maximum_number_of_surfaces];
+ bool displays_match_flag[maximum_number_of_surfaces];
+ bool use_alpha[maximum_number_of_surfaces];
+ bool orthogonal_rotation[maximum_number_of_surfaces];
+ bool enable[maximum_number_of_surfaces];
+ bool access_one_channel_only[maximum_number_of_surfaces];
+ bool scatter_gather_enable_for_pipe[maximum_number_of_surfaces];
+ bool interlace_mode[maximum_number_of_surfaces];
+ bool display_pstate_change_enable[maximum_number_of_surfaces];
+ bool line_buffer_prefetch[maximum_number_of_surfaces];
+ uint32_t bytes_per_pixel[maximum_number_of_surfaces];
+ uint32_t max_chunks_non_fbc_mode[maximum_number_of_surfaces];
+ uint32_t lb_bpc[maximum_number_of_surfaces];
+ uint32_t output_bpphdmi[maximum_number_of_surfaces];
+ uint32_t output_bppdp4_lane_hbr[maximum_number_of_surfaces];
+ uint32_t output_bppdp4_lane_hbr2[maximum_number_of_surfaces];
+ uint32_t output_bppdp4_lane_hbr3[maximum_number_of_surfaces];
+ enum bw_defines stereo_mode[maximum_number_of_surfaces];
+ struct bw_fixed dmif_buffer_transfer_time[maximum_number_of_surfaces];
+ struct bw_fixed displays_with_same_mode[maximum_number_of_surfaces];
+ struct bw_fixed stutter_dmif_buffer_size[maximum_number_of_surfaces];
+ struct bw_fixed stutter_refresh_duration[maximum_number_of_surfaces];
+ struct bw_fixed stutter_exit_watermark[maximum_number_of_surfaces];
+ struct bw_fixed stutter_entry_watermark[maximum_number_of_surfaces];
+ struct bw_fixed h_total[maximum_number_of_surfaces];
+ struct bw_fixed v_total[maximum_number_of_surfaces];
+ struct bw_fixed pixel_rate[maximum_number_of_surfaces];
+ struct bw_fixed src_width[maximum_number_of_surfaces];
+ struct bw_fixed pitch_in_pixels[maximum_number_of_surfaces];
+ struct bw_fixed pitch_in_pixels_after_surface_type[maximum_number_of_surfaces];
+ struct bw_fixed src_height[maximum_number_of_surfaces];
+ struct bw_fixed scale_ratio[maximum_number_of_surfaces];
+ struct bw_fixed h_taps[maximum_number_of_surfaces];
+ struct bw_fixed v_taps[maximum_number_of_surfaces];
+ struct bw_fixed h_scale_ratio[maximum_number_of_surfaces];
+ struct bw_fixed v_scale_ratio[maximum_number_of_surfaces];
+ struct bw_fixed rotation_angle[maximum_number_of_surfaces];
+ struct bw_fixed compression_rate[maximum_number_of_surfaces];
+ struct bw_fixed hsr[maximum_number_of_surfaces];
+ struct bw_fixed vsr[maximum_number_of_surfaces];
+ struct bw_fixed source_width_rounded_up_to_chunks[maximum_number_of_surfaces];
+ struct bw_fixed source_width_pixels[maximum_number_of_surfaces];
+ struct bw_fixed source_height_rounded_up_to_chunks[maximum_number_of_surfaces];
+ struct bw_fixed display_bandwidth[maximum_number_of_surfaces];
+ struct bw_fixed request_bandwidth[maximum_number_of_surfaces];
+ struct bw_fixed bytes_per_request[maximum_number_of_surfaces];
+ struct bw_fixed useful_bytes_per_request[maximum_number_of_surfaces];
+ struct bw_fixed lines_interleaved_in_mem_access[maximum_number_of_surfaces];
+ struct bw_fixed latency_hiding_lines[maximum_number_of_surfaces];
+ struct bw_fixed lb_partitions[maximum_number_of_surfaces];
+ struct bw_fixed lb_partitions_max[maximum_number_of_surfaces];
+ struct bw_fixed dispclk_required_with_ramping[maximum_number_of_surfaces];
+ struct bw_fixed dispclk_required_without_ramping[maximum_number_of_surfaces];
+ struct bw_fixed data_buffer_size[maximum_number_of_surfaces];
+ struct bw_fixed outstanding_chunk_request_limit[maximum_number_of_surfaces];
+ struct bw_fixed urgent_watermark[maximum_number_of_surfaces];
+ struct bw_fixed nbp_state_change_watermark[maximum_number_of_surfaces];
+ struct bw_fixed v_filter_init[maximum_number_of_surfaces];
+ struct bw_fixed stutter_cycle_duration[maximum_number_of_surfaces];
+ struct bw_fixed average_bandwidth[maximum_number_of_surfaces];
+ struct bw_fixed average_bandwidth_no_compression[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_pte_request_limit[maximum_number_of_surfaces];
+ struct bw_fixed lb_size_per_component[maximum_number_of_surfaces];
+ struct bw_fixed memory_chunk_size_in_bytes[maximum_number_of_surfaces];
+ struct bw_fixed pipe_chunk_size_in_bytes[maximum_number_of_surfaces];
+ struct bw_fixed number_of_trips_to_memory_for_getting_apte_row[maximum_number_of_surfaces];
+ struct bw_fixed adjusted_data_buffer_size[maximum_number_of_surfaces];
+ struct bw_fixed adjusted_data_buffer_size_in_memory[maximum_number_of_surfaces];
+ struct bw_fixed pixels_per_data_fifo_entry[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_pte_requests_in_row[maximum_number_of_surfaces];
+ struct bw_fixed pte_request_per_chunk[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_page_width[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_page_height[maximum_number_of_surfaces];
+ struct bw_fixed lb_lines_in_per_line_out_in_beginning_of_frame[maximum_number_of_surfaces];
+ struct bw_fixed lb_lines_in_per_line_out_in_middle_of_frame[maximum_number_of_surfaces];
+ struct bw_fixed cursor_width_pixels[maximum_number_of_surfaces];
+ struct bw_fixed minimum_latency_hiding[maximum_number_of_surfaces];
+ struct bw_fixed maximum_latency_hiding[maximum_number_of_surfaces];
+ struct bw_fixed minimum_latency_hiding_with_cursor[maximum_number_of_surfaces];
+ struct bw_fixed maximum_latency_hiding_with_cursor[maximum_number_of_surfaces];
+ struct bw_fixed src_pixels_for_first_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed src_pixels_for_last_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed src_data_for_first_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed src_data_for_last_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed active_time[maximum_number_of_surfaces];
+ struct bw_fixed horizontal_blank_and_chunk_granularity_factor[maximum_number_of_surfaces];
+ struct bw_fixed cursor_latency_hiding[maximum_number_of_surfaces];
+ struct bw_fixed v_blank_dram_speed_change_margin[maximum_number_of_surfaces];
+ uint32_t num_displays_with_margin[3][8];
+ struct bw_fixed dmif_burst_time[3][8];
+ struct bw_fixed mcifwr_burst_time[3][8];
+ struct bw_fixed line_source_transfer_time[maximum_number_of_surfaces][3][8];
+ struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
+ struct bw_fixed min_dram_speed_change_margin[3][8];
+ struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
+ struct bw_fixed blackout_duration_margin[3][8];
+ struct bw_fixed dispclk_required_for_blackout_duration[3][8];
+ struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
+ struct bw_fixed dmif_required_sclk_for_urgent_latency[6];
+};
+
+/**
+ * Initialize structures with data which will NOT change at runtime.
+ */
+void bw_calcs_init(
+ struct bw_calcs_dceip *bw_dceip,
+ struct bw_calcs_vbios *bw_vbios,
+ struct hw_asic_id asic_id);
+
+/**
+ * Return:
+ * true - Display(s) configuration supported.
+ * In this case 'calcs_output' contains data for HW programming
+ * false - Display(s) configuration not supported (not enough bandwidth).
+ */
+bool bw_calcs(
+ struct dc_context *ctx,
+ const struct bw_calcs_dceip *dceip,
+ const struct bw_calcs_vbios *vbios,
+ const struct pipe_ctx *pipe,
+ int pipe_count,
+ struct dce_bw_output *calcs_output);
+
+#endif /* __BANDWIDTH_CALCS_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
new file mode 100644
index 000000000000..1e231f6de732
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * Bandwidth and Watermark calculations interface.
+ * (Refer to "DCEx_mode_support.xlsm" from Perforce.)
+ */
+#ifndef __DCN_CALCS_H__
+#define __DCN_CALCS_H__
+
+#include "bw_fixed.h"
+#include "display_clock.h"
+#include "../dml/display_mode_lib.h"
+
+struct dc;
+struct dc_state;
+
+/*******************************************************************************
+ * DCN data structures.
+ ******************************************************************************/
+
+#define number_of_planes 6
+#define number_of_planes_minus_one 5
+#define number_of_states 4
+#define number_of_states_plus_one 5
+
+#define ddr4_dram_width 64
+#define ddr4_dram_factor_single_Channel 16
+enum dcn_bw_defs {
+ dcn_bw_v_min0p65,
+ dcn_bw_v_mid0p72,
+ dcn_bw_v_nom0p8,
+ dcn_bw_v_max0p9,
+ dcn_bw_v_max0p91,
+ dcn_bw_no_support = 5,
+ dcn_bw_yes,
+ dcn_bw_hor,
+ dcn_bw_vert,
+ dcn_bw_override,
+ dcn_bw_rgb_sub_64,
+ dcn_bw_rgb_sub_32,
+ dcn_bw_rgb_sub_16,
+ dcn_bw_no,
+ dcn_bw_sw_linear,
+ dcn_bw_sw_4_kb_d,
+ dcn_bw_sw_4_kb_d_x,
+ dcn_bw_sw_64_kb_d,
+ dcn_bw_sw_64_kb_d_t,
+ dcn_bw_sw_64_kb_d_x,
+ dcn_bw_sw_var_d,
+ dcn_bw_sw_var_d_x,
+ dcn_bw_yuv420_sub_8,
+ dcn_bw_sw_4_kb_s,
+ dcn_bw_sw_4_kb_s_x,
+ dcn_bw_sw_64_kb_s,
+ dcn_bw_sw_64_kb_s_t,
+ dcn_bw_sw_64_kb_s_x,
+ dcn_bw_writeback,
+ dcn_bw_444,
+ dcn_bw_dp,
+ dcn_bw_420,
+ dcn_bw_hdmi,
+ dcn_bw_sw_var_s,
+ dcn_bw_sw_var_s_x,
+ dcn_bw_yuv420_sub_10,
+ dcn_bw_supported_in_v_active,
+ dcn_bw_supported_in_v_blank,
+ dcn_bw_not_supported,
+ dcn_bw_na,
+ dcn_bw_encoder_8bpc,
+ dcn_bw_encoder_10bpc,
+ dcn_bw_encoder_12bpc,
+ dcn_bw_encoder_16bpc,
+};
+
+/*bounding box parameters*/
+/*mode parameters*/
+/*system configuration*/
+/* display configuration*/
+struct dcn_bw_internal_vars {
+ float voltage[number_of_states_plus_one + 1];
+ float max_dispclk[number_of_states_plus_one + 1];
+ float max_dppclk[number_of_states_plus_one + 1];
+ float dcfclk_per_state[number_of_states_plus_one + 1];
+ float phyclk_per_state[number_of_states_plus_one + 1];
+ float fabric_and_dram_bandwidth_per_state[number_of_states_plus_one + 1];
+ float sr_exit_time;
+ float sr_enter_plus_exit_time;
+ float dram_clock_change_latency;
+ float urgent_latency;
+ float write_back_latency;
+ float percent_of_ideal_drambw_received_after_urg_latency;
+ float dcfclkv_max0p9;
+ float dcfclkv_nom0p8;
+ float dcfclkv_mid0p72;
+ float dcfclkv_min0p65;
+ float max_dispclk_vmax0p9;
+ float max_dppclk_vmax0p9;
+ float max_dispclk_vnom0p8;
+ float max_dppclk_vnom0p8;
+ float max_dispclk_vmid0p72;
+ float max_dppclk_vmid0p72;
+ float max_dispclk_vmin0p65;
+ float max_dppclk_vmin0p65;
+ float socclk;
+ float fabric_and_dram_bandwidth_vmax0p9;
+ float fabric_and_dram_bandwidth_vnom0p8;
+ float fabric_and_dram_bandwidth_vmid0p72;
+ float fabric_and_dram_bandwidth_vmin0p65;
+ float round_trip_ping_latency_cycles;
+ float urgent_out_of_order_return_per_channel;
+ float number_of_channels;
+ float vmm_page_size;
+ float return_bus_width;
+ float rob_buffer_size_in_kbyte;
+ float det_buffer_size_in_kbyte;
+ float dpp_output_buffer_pixels;
+ float opp_output_buffer_lines;
+ float pixel_chunk_size_in_kbyte;
+ float pte_chunk_size;
+ float meta_chunk_size;
+ float writeback_chunk_size;
+ enum dcn_bw_defs odm_capability;
+ enum dcn_bw_defs dsc_capability;
+ float line_buffer_size;
+ enum dcn_bw_defs is_line_buffer_bpp_fixed;
+ float line_buffer_fixed_bpp;
+ float max_line_buffer_lines;
+ float writeback_luma_buffer_size;
+ float writeback_chroma_buffer_size;
+ float max_num_dpp;
+ float max_num_writeback;
+ float max_dchub_topscl_throughput;
+ float max_pscl_tolb_throughput;
+ float max_lb_tovscl_throughput;
+ float max_vscl_tohscl_throughput;
+ float max_hscl_ratio;
+ float max_vscl_ratio;
+ float max_hscl_taps;
+ float max_vscl_taps;
+ float under_scan_factor;
+ float phyclkv_max0p9;
+ float phyclkv_nom0p8;
+ float phyclkv_mid0p72;
+ float phyclkv_min0p65;
+ float pte_buffer_size_in_requests;
+ float dispclk_ramping_margin;
+ float downspreading;
+ float max_inter_dcn_tile_repeaters;
+ enum dcn_bw_defs can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ enum dcn_bw_defs bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+ int mode;
+ float viewport_width[number_of_planes_minus_one + 1];
+ float htotal[number_of_planes_minus_one + 1];
+ float vtotal[number_of_planes_minus_one + 1];
+ float v_sync_plus_back_porch[number_of_planes_minus_one + 1];
+ float vactive[number_of_planes_minus_one + 1];
+ float pixel_clock[number_of_planes_minus_one + 1]; /*MHz*/
+ float viewport_height[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs dcc_enable[number_of_planes_minus_one + 1];
+ float dcc_rate[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs source_scan[number_of_planes_minus_one + 1];
+ float lb_bit_per_pixel[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs source_pixel_format[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs source_surface_mode[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs output_format[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs output_deep_color[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs output[number_of_planes_minus_one + 1];
+ float scaler_rec_out_width[number_of_planes_minus_one + 1];
+ float scaler_recout_height[number_of_planes_minus_one + 1];
+ float underscan_output[number_of_planes_minus_one + 1];
+ float interlace_output[number_of_planes_minus_one + 1];
+ float override_hta_ps[number_of_planes_minus_one + 1];
+ float override_vta_ps[number_of_planes_minus_one + 1];
+ float override_hta_pschroma[number_of_planes_minus_one + 1];
+ float override_vta_pschroma[number_of_planes_minus_one + 1];
+ float urgent_latency_support_us[number_of_planes_minus_one + 1];
+ float h_ratio[number_of_planes_minus_one + 1];
+ float v_ratio[number_of_planes_minus_one + 1];
+ float htaps[number_of_planes_minus_one + 1];
+ float vtaps[number_of_planes_minus_one + 1];
+ float hta_pschroma[number_of_planes_minus_one + 1];
+ float vta_pschroma[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs pte_enable;
+ enum dcn_bw_defs synchronized_vblank;
+ enum dcn_bw_defs ta_pscalculation;
+ int voltage_override_level;
+ int number_of_active_planes;
+ int voltage_level;
+ enum dcn_bw_defs immediate_flip_supported;
+ float dcfclk;
+ float max_phyclk;
+ float fabric_and_dram_bandwidth;
+ float dpp_per_plane_per_ratio[1 + 1][number_of_planes_minus_one + 1];
+ enum dcn_bw_defs dispclk_dppclk_support_per_ratio[1 + 1];
+ float required_dispclk_per_ratio[1 + 1];
+ enum dcn_bw_defs error_message[1 + 1];
+ int dispclk_dppclk_ratio;
+ float dpp_per_plane[number_of_planes_minus_one + 1];
+ float det_buffer_size_y[number_of_planes_minus_one + 1];
+ float det_buffer_size_c[number_of_planes_minus_one + 1];
+ float swath_height_y[number_of_planes_minus_one + 1];
+ float swath_height_c[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs final_error_message;
+ float frequency;
+ float header_line;
+ float header;
+ enum dcn_bw_defs voltage_override;
+ enum dcn_bw_defs allow_different_hratio_vratio;
+ float acceptable_quality_hta_ps;
+ float acceptable_quality_vta_ps;
+ float no_of_dpp[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float swath_width_yper_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float swath_height_yper_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float swath_height_cper_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float urgent_latency_support_us_per_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_ywith_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_cwith_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float required_prefetch_pixel_data_bw_with_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_ywithout_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_cwithout_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float required_prefetch_pixel_data_bw_without_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ enum dcn_bw_defs prefetch_supported_with_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs prefetch_supported_without_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs v_ratio_in_prefetch_supported_with_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs v_ratio_in_prefetch_supported_without_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ float required_dispclk[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs dispclk_dppclk_support[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs total_available_pipes_support[number_of_states_plus_one + 1][1 + 1];
+ float total_number_of_active_dpp[number_of_states_plus_one + 1][1 + 1];
+ float total_number_of_dcc_active_dpp[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs urgent_latency_support[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs mode_support_with_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs mode_support_without_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ float return_bw_per_state[number_of_states_plus_one + 1];
+ enum dcn_bw_defs dio_support[number_of_states_plus_one + 1];
+ float urgent_round_trip_and_out_of_order_latency_per_state[number_of_states_plus_one + 1];
+ enum dcn_bw_defs rob_support[number_of_states_plus_one + 1];
+ enum dcn_bw_defs bandwidth_support[number_of_states_plus_one + 1];
+ float prefetch_bw[number_of_planes_minus_one + 1];
+ float meta_pte_bytes_per_frame[number_of_planes_minus_one + 1];
+ float meta_row_bytes[number_of_planes_minus_one + 1];
+ float dpte_bytes_per_row[number_of_planes_minus_one + 1];
+ float prefetch_lines_y[number_of_planes_minus_one + 1];
+ float prefetch_lines_c[number_of_planes_minus_one + 1];
+ float max_num_sw_y[number_of_planes_minus_one + 1];
+ float max_num_sw_c[number_of_planes_minus_one + 1];
+ float line_times_for_prefetch[number_of_planes_minus_one + 1];
+ float lines_for_meta_pte_with_immediate_flip[number_of_planes_minus_one + 1];
+ float lines_for_meta_pte_without_immediate_flip[number_of_planes_minus_one + 1];
+ float lines_for_meta_and_dpte_row_with_immediate_flip[number_of_planes_minus_one + 1];
+ float lines_for_meta_and_dpte_row_without_immediate_flip[number_of_planes_minus_one + 1];
+ float min_dppclk_using_single_dpp[number_of_planes_minus_one + 1];
+ float swath_width_ysingle_dpp[number_of_planes_minus_one + 1];
+ float byte_per_pixel_in_dety[number_of_planes_minus_one + 1];
+ float byte_per_pixel_in_detc[number_of_planes_minus_one + 1];
+ float number_of_dpp_required_for_det_and_lb_size[number_of_planes_minus_one + 1];
+ float required_phyclk[number_of_planes_minus_one + 1];
+ float read256_block_height_y[number_of_planes_minus_one + 1];
+ float read256_block_width_y[number_of_planes_minus_one + 1];
+ float read256_block_height_c[number_of_planes_minus_one + 1];
+ float read256_block_width_c[number_of_planes_minus_one + 1];
+ float max_swath_height_y[number_of_planes_minus_one + 1];
+ float max_swath_height_c[number_of_planes_minus_one + 1];
+ float min_swath_height_y[number_of_planes_minus_one + 1];
+ float min_swath_height_c[number_of_planes_minus_one + 1];
+ float read_bandwidth[number_of_planes_minus_one + 1];
+ float write_bandwidth[number_of_planes_minus_one + 1];
+ float pscl_factor[number_of_planes_minus_one + 1];
+ float pscl_factor_chroma[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs scale_ratio_support;
+ enum dcn_bw_defs source_format_pixel_and_scan_support;
+ float total_read_bandwidth_consumed_gbyte_per_second;
+ float total_write_bandwidth_consumed_gbyte_per_second;
+ float total_bandwidth_consumed_gbyte_per_second;
+ enum dcn_bw_defs dcc_enabled_in_any_plane;
+ float return_bw_todcn_per_state;
+ float critical_point;
+ enum dcn_bw_defs writeback_latency_support;
+ float required_output_bw;
+ float total_number_of_active_writeback;
+ enum dcn_bw_defs total_available_writeback_support;
+ float maximum_swath_width;
+ float number_of_dpp_required_for_det_size;
+ float number_of_dpp_required_for_lb_size;
+ float min_dispclk_using_single_dpp;
+ float min_dispclk_using_dual_dpp;
+ enum dcn_bw_defs viewport_size_support;
+ float swath_width_granularity_y;
+ float rounded_up_max_swath_size_bytes_y;
+ float swath_width_granularity_c;
+ float rounded_up_max_swath_size_bytes_c;
+ float lines_in_det_luma;
+ float lines_in_det_chroma;
+ float effective_lb_latency_hiding_source_lines_luma;
+ float effective_lb_latency_hiding_source_lines_chroma;
+ float effective_detlb_lines_luma;
+ float effective_detlb_lines_chroma;
+ float projected_dcfclk_deep_sleep;
+ float meta_req_height_y;
+ float meta_req_width_y;
+ float meta_surface_width_y;
+ float meta_surface_height_y;
+ float meta_pte_bytes_per_frame_y;
+ float meta_row_bytes_y;
+ float macro_tile_block_size_bytes_y;
+ float macro_tile_block_height_y;
+ float data_pte_req_height_y;
+ float data_pte_req_width_y;
+ float dpte_bytes_per_row_y;
+ float meta_req_height_c;
+ float meta_req_width_c;
+ float meta_surface_width_c;
+ float meta_surface_height_c;
+ float meta_pte_bytes_per_frame_c;
+ float meta_row_bytes_c;
+ float macro_tile_block_size_bytes_c;
+ float macro_tile_block_height_c;
+ float macro_tile_block_width_c;
+ float data_pte_req_height_c;
+ float data_pte_req_width_c;
+ float dpte_bytes_per_row_c;
+ float v_init_y;
+ float max_partial_sw_y;
+ float v_init_c;
+ float max_partial_sw_c;
+ float dst_x_after_scaler;
+ float dst_y_after_scaler;
+ float time_calc;
+ float v_update_offset[number_of_planes_minus_one + 1];
+ float total_repeater_delay;
+ float v_update_width[number_of_planes_minus_one + 1];
+ float v_ready_offset[number_of_planes_minus_one + 1];
+ float time_setup;
+ float extra_latency;
+ float maximum_vstartup;
+ float bw_available_for_immediate_flip;
+ float total_immediate_flip_bytes[number_of_planes_minus_one + 1];
+ float time_for_meta_pte_with_immediate_flip;
+ float time_for_meta_pte_without_immediate_flip;
+ float time_for_meta_and_dpte_row_with_immediate_flip;
+ float time_for_meta_and_dpte_row_without_immediate_flip;
+ float line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+ float line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+ float maximum_read_bandwidth_with_prefetch_with_immediate_flip;
+ float maximum_read_bandwidth_with_prefetch_without_immediate_flip;
+ float voltage_level_with_immediate_flip;
+ float voltage_level_without_immediate_flip;
+ float total_number_of_active_dpp_per_ratio[1 + 1];
+ float byte_per_pix_dety;
+ float byte_per_pix_detc;
+ float read256_bytes_block_height_y;
+ float read256_bytes_block_width_y;
+ float read256_bytes_block_height_c;
+ float read256_bytes_block_width_c;
+ float maximum_swath_height_y;
+ float maximum_swath_height_c;
+ float minimum_swath_height_y;
+ float minimum_swath_height_c;
+ float swath_width;
+ float prefetch_bandwidth[number_of_planes_minus_one + 1];
+ float v_init_pre_fill_y[number_of_planes_minus_one + 1];
+ float v_init_pre_fill_c[number_of_planes_minus_one + 1];
+ float max_num_swath_y[number_of_planes_minus_one + 1];
+ float max_num_swath_c[number_of_planes_minus_one + 1];
+ float prefill_y[number_of_planes_minus_one + 1];
+ float prefill_c[number_of_planes_minus_one + 1];
+ float v_startup[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs allow_dram_clock_change_during_vblank[number_of_planes_minus_one + 1];
+ float allow_dram_self_refresh_during_vblank[number_of_planes_minus_one + 1];
+ float v_ratio_prefetch_y[number_of_planes_minus_one + 1];
+ float v_ratio_prefetch_c[number_of_planes_minus_one + 1];
+ float destination_lines_for_prefetch[number_of_planes_minus_one + 1];
+ float destination_lines_to_request_vm_inv_blank[number_of_planes_minus_one + 1];
+ float destination_lines_to_request_row_in_vblank[number_of_planes_minus_one + 1];
+ float min_ttuv_blank[number_of_planes_minus_one + 1];
+ float byte_per_pixel_dety[number_of_planes_minus_one + 1];
+ float byte_per_pixel_detc[number_of_planes_minus_one + 1];
+ float swath_width_y[number_of_planes_minus_one + 1];
+ float lines_in_dety[number_of_planes_minus_one + 1];
+ float lines_in_dety_rounded_down_to_swath[number_of_planes_minus_one + 1];
+ float lines_in_detc[number_of_planes_minus_one + 1];
+ float lines_in_detc_rounded_down_to_swath[number_of_planes_minus_one + 1];
+ float full_det_buffering_time_y[number_of_planes_minus_one + 1];
+ float full_det_buffering_time_c[number_of_planes_minus_one + 1];
+ float active_dram_clock_change_latency_margin[number_of_planes_minus_one + 1];
+ float v_blank_dram_clock_change_latency_margin[number_of_planes_minus_one + 1];
+ float dcfclk_deep_sleep_per_plane[number_of_planes_minus_one + 1];
+ float read_bandwidth_plane_luma[number_of_planes_minus_one + 1];
+ float read_bandwidth_plane_chroma[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_luma[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_chroma[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_luma_prefetch[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_chroma_prefetch[number_of_planes_minus_one + 1];
+ float pixel_pte_bytes_per_row[number_of_planes_minus_one + 1];
+ float meta_pte_bytes_frame[number_of_planes_minus_one + 1];
+ float meta_row_byte[number_of_planes_minus_one + 1];
+ float prefetch_source_lines_y[number_of_planes_minus_one + 1];
+ float prefetch_source_lines_c[number_of_planes_minus_one + 1];
+ float pscl_throughput[number_of_planes_minus_one + 1];
+ float pscl_throughput_chroma[number_of_planes_minus_one + 1];
+ float output_bpphdmi[number_of_planes_minus_one + 1];
+ float output_bppdp4_lane_hbr[number_of_planes_minus_one + 1];
+ float output_bppdp4_lane_hbr2[number_of_planes_minus_one + 1];
+ float output_bppdp4_lane_hbr3[number_of_planes_minus_one + 1];
+ float max_vstartup_lines[number_of_planes_minus_one + 1];
+ float dispclk_with_ramping;
+ float dispclk_without_ramping;
+ float dppclk_using_single_dpp_luma;
+ float dppclk_using_single_dpp;
+ float dppclk_using_single_dpp_chroma;
+ enum dcn_bw_defs odm_capable;
+ float dispclk;
+ float dppclk;
+ float return_bandwidth_to_dcn;
+ enum dcn_bw_defs dcc_enabled_any_plane;
+ float return_bw;
+ float critical_compression;
+ float total_data_read_bandwidth;
+ float total_active_dpp;
+ float total_dcc_active_dpp;
+ float urgent_round_trip_and_out_of_order_latency;
+ float last_pixel_of_line_extra_watermark;
+ float data_fabric_line_delivery_time_luma;
+ float data_fabric_line_delivery_time_chroma;
+ float urgent_extra_latency;
+ float urgent_watermark;
+ float ptemeta_urgent_watermark;
+ float dram_clock_change_watermark;
+ float total_active_writeback;
+ float writeback_dram_clock_change_watermark;
+ float min_full_det_buffering_time;
+ float frame_time_for_min_full_det_buffering_time;
+ float average_read_bandwidth_gbyte_per_second;
+ float part_of_burst_that_fits_in_rob;
+ float stutter_burst_time;
+ float stutter_efficiency_not_including_vblank;
+ float smallest_vblank;
+ float v_blank_time;
+ float stutter_efficiency;
+ float dcf_clk_deep_sleep;
+ float stutter_exit_watermark;
+ float stutter_enter_plus_exit_watermark;
+ float effective_det_plus_lb_lines_luma;
+ float urgent_latency_support_us_luma;
+ float effective_det_plus_lb_lines_chroma;
+ float urgent_latency_support_us_chroma;
+ float min_urgent_latency_support_us;
+ float non_urgent_latency_tolerance;
+ float block_height256_bytes_y;
+ float block_height256_bytes_c;
+ float meta_request_width_y;
+ float meta_surf_width_y;
+ float meta_surf_height_y;
+ float meta_pte_bytes_frame_y;
+ float meta_row_byte_y;
+ float macro_tile_size_byte_y;
+ float macro_tile_height_y;
+ float pixel_pte_req_height_y;
+ float pixel_pte_req_width_y;
+ float pixel_pte_bytes_per_row_y;
+ float meta_request_width_c;
+ float meta_surf_width_c;
+ float meta_surf_height_c;
+ float meta_pte_bytes_frame_c;
+ float meta_row_byte_c;
+ float macro_tile_size_bytes_c;
+ float macro_tile_height_c;
+ float pixel_pte_req_height_c;
+ float pixel_pte_req_width_c;
+ float pixel_pte_bytes_per_row_c;
+ float max_partial_swath_y;
+ float max_partial_swath_c;
+ float t_calc;
+ float next_prefetch_mode;
+ float v_startup_lines;
+ enum dcn_bw_defs planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw;
+ enum dcn_bw_defs planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4;
+ enum dcn_bw_defs planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2;
+ enum dcn_bw_defs v_ratio_prefetch_more_than4;
+ enum dcn_bw_defs destination_line_times_for_prefetch_less_than2;
+ float prefetch_mode;
+ float dstx_after_scaler;
+ float dsty_after_scaler;
+ float v_update_offset_pix;
+ float total_repeater_delay_time;
+ float v_update_width_pix;
+ float v_ready_offset_pix;
+ float t_setup;
+ float t_wait;
+ float bandwidth_available_for_immediate_flip;
+ float tot_immediate_flip_bytes;
+ float max_rd_bandwidth;
+ float time_for_fetching_meta_pte;
+ float time_for_fetching_row_in_vblank;
+ float lines_to_request_prefetch_pixel_data;
+ float required_prefetch_pix_data_bw;
+ enum dcn_bw_defs prefetch_mode_supported;
+ float active_dp_ps;
+ float lb_latency_hiding_source_lines_y;
+ float lb_latency_hiding_source_lines_c;
+ float effective_lb_latency_hiding_y;
+ float effective_lb_latency_hiding_c;
+ float dpp_output_buffer_lines_y;
+ float dpp_output_buffer_lines_c;
+ float dppopp_buffering_y;
+ float max_det_buffering_time_y;
+ float active_dram_clock_change_latency_margin_y;
+ float dppopp_buffering_c;
+ float max_det_buffering_time_c;
+ float active_dram_clock_change_latency_margin_c;
+ float writeback_dram_clock_change_latency_margin;
+ float min_active_dram_clock_change_margin;
+ float v_blank_of_min_active_dram_clock_change_margin;
+ float second_min_active_dram_clock_change_margin;
+ float min_vblank_dram_clock_change_margin;
+ float dram_clock_change_margin;
+ float dram_clock_change_support;
+ float wr_bandwidth;
+ float max_used_bw;
+};
+
+struct dcn_soc_bounding_box {
+ float sr_exit_time; /*us*/
+ float sr_enter_plus_exit_time; /*us*/
+ float urgent_latency; /*us*/
+ float write_back_latency; /*us*/
+ float percent_of_ideal_drambw_received_after_urg_latency; /*%*/
+ int max_request_size; /*bytes*/
+ float dcfclkv_max0p9; /*MHz*/
+ float dcfclkv_nom0p8; /*MHz*/
+ float dcfclkv_mid0p72; /*MHz*/
+ float dcfclkv_min0p65; /*MHz*/
+ float max_dispclk_vmax0p9; /*MHz*/
+ float max_dispclk_vmid0p72; /*MHz*/
+ float max_dispclk_vnom0p8; /*MHz*/
+ float max_dispclk_vmin0p65; /*MHz*/
+ float max_dppclk_vmax0p9; /*MHz*/
+ float max_dppclk_vnom0p8; /*MHz*/
+ float max_dppclk_vmid0p72; /*MHz*/
+ float max_dppclk_vmin0p65; /*MHz*/
+ float socclk; /*MHz*/
+ float fabric_and_dram_bandwidth_vmax0p9; /*GB/s*/
+ float fabric_and_dram_bandwidth_vnom0p8; /*GB/s*/
+ float fabric_and_dram_bandwidth_vmid0p72; /*GB/s*/
+ float fabric_and_dram_bandwidth_vmin0p65; /*GB/s*/
+ float phyclkv_max0p9; /*MHz*/
+ float phyclkv_nom0p8; /*MHz*/
+ float phyclkv_mid0p72; /*MHz*/
+ float phyclkv_min0p65; /*MHz*/
+ float downspreading; /*%*/
+ int round_trip_ping_latency_cycles; /*DCFCLK Cycles*/
+ int urgent_out_of_order_return_per_channel; /*bytes*/
+ int number_of_channels;
+ int vmm_page_size; /*bytes*/
+ float dram_clock_change_latency; /*us*/
+ int return_bus_width; /*bytes*/
+ float percent_disp_bw_limit; /*%*/
+};
+extern const struct dcn_soc_bounding_box dcn10_soc_defaults;
+
+struct dcn_ip_params {
+ float rob_buffer_size_in_kbyte;
+ float det_buffer_size_in_kbyte;
+ float dpp_output_buffer_pixels;
+ float opp_output_buffer_lines;
+ float pixel_chunk_size_in_kbyte;
+ enum dcn_bw_defs pte_enable;
+ int pte_chunk_size; /*kbytes*/
+ int meta_chunk_size; /*kbytes*/
+ int writeback_chunk_size; /*kbytes*/
+ enum dcn_bw_defs odm_capability;
+ enum dcn_bw_defs dsc_capability;
+ int line_buffer_size; /*bit*/
+ int max_line_buffer_lines;
+ enum dcn_bw_defs is_line_buffer_bpp_fixed;
+ int line_buffer_fixed_bpp;
+ int writeback_luma_buffer_size; /*kbytes*/
+ int writeback_chroma_buffer_size; /*kbytes*/
+ int max_num_dpp;
+ int max_num_writeback;
+ int max_dchub_topscl_throughput; /*pixels/dppclk*/
+ int max_pscl_tolb_throughput; /*pixels/dppclk*/
+ int max_lb_tovscl_throughput; /*pixels/dppclk*/
+ int max_vscl_tohscl_throughput; /*pixels/dppclk*/
+ float max_hscl_ratio;
+ float max_vscl_ratio;
+ int max_hscl_taps;
+ int max_vscl_taps;
+ int pte_buffer_size_in_requests;
+ float dispclk_ramping_margin; /*%*/
+ float under_scan_factor;
+ int max_inter_dcn_tile_repeaters;
+ enum dcn_bw_defs can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ enum dcn_bw_defs bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+ int dcfclk_cstate_latency;
+};
+extern const struct dcn_ip_params dcn10_ip_defaults;
+
+bool dcn_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
+unsigned int dcn_find_dcfclk_suits_all(
+ const struct dc *dc,
+ struct clocks_value *clocks);
+
+void dcn_bw_update_from_pplib(struct dc *dc);
+void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+void dcn_bw_sync_calcs_and_dml(struct dc *dc);
+
+#endif /* __DCN_CALCS_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
new file mode 100644
index 000000000000..c93b9b9a817c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -0,0 +1,48 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_ABM_H__
+#define __DC_ABM_H__
+
+#include "dm_services_types.h"
+
+struct abm {
+ struct dc_context *ctx;
+ const struct abm_funcs *funcs;
+};
+
+struct abm_funcs {
+ void (*abm_init)(struct abm *abm);
+ bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
+ bool (*set_abm_immediate_disable)(struct abm *abm);
+ bool (*init_backlight)(struct abm *abm);
+ bool (*set_backlight_level)(struct abm *abm,
+ unsigned int backlight_level,
+ unsigned int frame_ramp,
+ unsigned int controller_id);
+ unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
+ bool (*is_dmcu_initialized)(struct abm *abm);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
new file mode 100644
index 000000000000..925204f49717
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUDIO_H__
+#define __DAL_AUDIO_H__
+
+#include "audio_types.h"
+
+struct audio;
+
+struct audio_funcs {
+
+ bool (*endpoint_valid)(struct audio *audio);
+
+ void (*hw_init)(struct audio *audio);
+
+ void (*az_enable)(struct audio *audio);
+
+ void (*az_disable)(struct audio *audio);
+
+ void (*az_configure)(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_info *audio_info);
+
+ void (*wall_dto_setup)(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info);
+
+ void (*destroy)(struct audio **audio);
+};
+
+struct audio {
+ const struct audio_funcs *funcs;
+ struct dc_context *ctx;
+ unsigned int inst;
+};
+
+#endif /* __DAL_AUDIO__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
new file mode 100644
index 000000000000..f5f69cd81f6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DISPLAY_CLOCK_H__
+#define __DISPLAY_CLOCK_H__
+
+#include "dm_services_types.h"
+
+
+struct clocks_value {
+ int dispclk_in_khz;
+ int max_pixelclk_in_khz;
+ int max_non_dp_phyclk_in_khz;
+ int max_dp_phyclk_in_khz;
+ bool dispclk_notify_pplib_done;
+ bool pixelclk_notify_pplib_done;
+ bool phyclk_notigy_pplib_done;
+ int dcfclock_in_khz;
+ int dppclk_in_khz;
+ int mclk_in_khz;
+ int phyclk_in_khz;
+ int common_vdd_level;
+};
+
+
+/* Structure containing all state-dependent clocks
+ * (dependent on "enum clocks_state") */
+struct state_dependent_clocks {
+ int display_clk_khz;
+ int pixel_clk_khz;
+};
+
+struct display_clock {
+ struct dc_context *ctx;
+ const struct display_clock_funcs *funcs;
+
+ enum dm_pp_clocks_state max_clks_state;
+ enum dm_pp_clocks_state cur_min_clks_state;
+ struct clocks_value cur_clocks_value;
+};
+
+struct display_clock_funcs {
+ int (*set_clock)(struct display_clock *disp_clk,
+ int requested_clock_khz);
+
+ enum dm_pp_clocks_state (*get_required_clocks_state)(
+ struct display_clock *disp_clk,
+ struct state_dependent_clocks *req_clocks);
+
+ bool (*set_min_clocks_state)(struct display_clock *disp_clk,
+ enum dm_pp_clocks_state dm_pp_clocks_state);
+
+ int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
+
+ bool (*apply_clock_voltage_request)(
+ struct display_clock *disp_clk,
+ enum dm_pp_clock_type clocks_type,
+ int clocks_in_khz,
+ bool pre_mode_set,
+ bool update_dp_phyclk);
+};
+
+#endif /* __DISPLAY_CLOCK_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
new file mode 100644
index 000000000000..0574c29cc4a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -0,0 +1,50 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DMCU_H__
+#define __DC_DMCU_H__
+
+#include "dm_services_types.h"
+
+struct dmcu {
+ struct dc_context *ctx;
+ const struct dmcu_funcs *funcs;
+};
+
+struct dmcu_funcs {
+ bool (*load_iram)(struct dmcu *dmcu,
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes);
+ void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
+ void (*setup_psr)(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context);
+ void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
+ void (*set_psr_wait_loop)(struct dmcu *dmcu,
+ unsigned int wait_loop_number);
+ void (*get_psr_wait_loop)(unsigned int *psr_wait_loop_number);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
new file mode 100644
index 000000000000..83a68460edcd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DAL_DPP_H__
+#define __DAL_DPP_H__
+
+#include "transform.h"
+
+struct dpp {
+ const struct dpp_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+ struct dpp_caps *caps;
+ struct pwl_params regamma_params;
+};
+
+struct dpp_grph_csc_adjustment {
+ struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
+ enum graphics_gamut_adjust_type gamut_adjust_type;
+};
+
+struct dpp_funcs {
+ void (*dpp_reset)(struct dpp *dpp);
+
+ void (*dpp_set_scaler)(struct dpp *dpp,
+ const struct scaler_data *scl_data);
+
+ void (*dpp_set_pixel_storage_depth)(
+ struct dpp *dpp,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params);
+
+ bool (*dpp_get_optimal_number_of_taps)(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+ void (*dpp_set_gamut_remap)(
+ struct dpp *dpp,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+ void (*opp_set_csc_default)(
+ struct dpp *dpp,
+ const struct default_adjustment *default_adjust);
+
+ void (*opp_set_csc_adjustment)(
+ struct dpp *dpp,
+ const struct out_csc_color_matrix *tbl_entry);
+
+ void (*opp_power_on_regamma_lut)(
+ struct dpp *dpp,
+ bool power_on);
+
+ void (*opp_program_regamma_lut)(
+ struct dpp *dpp,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+
+ void (*opp_configure_regamma_lut)(
+ struct dpp *dpp,
+ bool is_ram_a);
+
+ void (*opp_program_regamma_lutb_settings)(
+ struct dpp *dpp,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_luta_settings)(
+ struct dpp *dpp,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_pwl)(
+ struct dpp *dpp, const struct pwl_params *params);
+
+ void (*opp_set_regamma_mode)(
+ struct dpp *dpp_base,
+ enum opp_regamma mode);
+
+ void (*ipp_set_degamma)(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode);
+
+ void (*ipp_program_input_lut)(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma);
+
+ void (*ipp_program_degamma_pwl)(struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+ void (*ipp_setup)(
+ struct dpp *dpp_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+ void (*ipp_full_bypass)(struct dpp *dpp_base);
+
+ void (*set_cursor_attributes)(
+ struct dpp *dpp_base,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+ uint32_t width
+ );
+
+};
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
new file mode 100644
index 000000000000..90d0148430fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_H__
+#define __DAL_GPIO_H__
+
+#include "gpio_types.h"
+
+struct gpio {
+ struct gpio_service *service;
+ struct hw_gpio_pin *pin;
+ enum gpio_id id;
+ uint32_t en;
+ enum gpio_mode mode;
+ /* when GPIO comes from VBIOS, it has defined output state */
+ enum gpio_pin_output_state output_state;
+};
+
+#if 0
+struct gpio_funcs {
+
+ struct hw_gpio_pin *(*create_ddc_data)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_ddc_clock)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_generic)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_hpd)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_gpio_pad)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_sync)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_gsl)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+ /* HW translation */
+ bool (*offset_to_id)(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en);
+ bool (*id_to_offset)(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info);
+};
+#endif
+
+#endif /* __DAL_GPIO__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
new file mode 100644
index 000000000000..0d186be24cf4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HUBP_H__
+#define __DAL_HUBP_H__
+
+#include "mem_input.h"
+
+struct hubp {
+ struct hubp_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_plane_address request_address;
+ struct dc_plane_address current_address;
+ int inst;
+ int opp_id;
+ int mpcc_id;
+ struct dc_cursor_attributes curs_attr;
+};
+
+
+struct hubp_funcs {
+ void (*hubp_setup)(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+ void (*dcc_control)(struct hubp *hubp, bool enable,
+ bool independent_64b_blks);
+ void (*mem_program_viewport)(
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+ bool (*hubp_program_surface_flip_and_addr)(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+ void (*hubp_program_pte_vm)(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation);
+
+ void (*hubp_set_vm_system_aperture_settings)(
+ struct hubp *hubp,
+ struct vm_system_aperture_param *apt);
+
+ void (*hubp_set_vm_context0_settings)(
+ struct hubp *hubp,
+ const struct vm_context0_param *vm0);
+
+ void (*hubp_program_surface_config)(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+ bool (*hubp_is_flip_pending)(struct hubp *hubp);
+
+ void (*hubp_update_dchub)(struct hubp *hubp,
+ struct dchub_init_data *dh_data);
+
+ void (*set_blank)(struct hubp *hubp, bool blank);
+ void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
+
+ void (*set_cursor_attributes)(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
new file mode 100644
index 000000000000..9602f261b614
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_SHARED_H__
+#define __DAL_HW_SHARED_H__
+
+#include "os_types.h"
+#include "fixed31_32.h"
+#include "dc_hw_types.h"
+
+/******************************************************************************
+ * Data types shared between different Virtual HW blocks
+ ******************************************************************************/
+
+#define MAX_PIPES 6
+
+struct gamma_curve {
+ uint32_t offset;
+ uint32_t segments_num;
+};
+
+struct curve_points {
+ struct fixed31_32 x;
+ struct fixed31_32 y;
+ struct fixed31_32 offset;
+ struct fixed31_32 slope;
+
+ uint32_t custom_float_x;
+ uint32_t custom_float_y;
+ uint32_t custom_float_offset;
+ uint32_t custom_float_slope;
+};
+
+struct pwl_result_data {
+ struct fixed31_32 red;
+ struct fixed31_32 green;
+ struct fixed31_32 blue;
+
+ struct fixed31_32 delta_red;
+ struct fixed31_32 delta_green;
+ struct fixed31_32 delta_blue;
+
+ uint32_t red_reg;
+ uint32_t green_reg;
+ uint32_t blue_reg;
+
+ uint32_t delta_red_reg;
+ uint32_t delta_green_reg;
+ uint32_t delta_blue_reg;
+};
+
+struct pwl_params {
+ struct gamma_curve arr_curve_points[34];
+ struct curve_points arr_points[3];
+ struct pwl_result_data rgb_resulted[256 + 3];
+ uint32_t hw_points_num;
+};
+
+/* move to dpp
+ * while we are moving functionality out of opp to dpp to align
+ * HW programming to HW IP, we define these struct in hw_shared
+ * so we can still compile while refactoring
+ */
+
+enum lb_pixel_depth {
+ /* do not change the values because it is used as bit vector */
+ LB_PIXEL_DEPTH_18BPP = 1,
+ LB_PIXEL_DEPTH_24BPP = 2,
+ LB_PIXEL_DEPTH_30BPP = 4,
+ LB_PIXEL_DEPTH_36BPP = 8
+};
+
+enum graphics_csc_adjust_type {
+ GRAPHICS_CSC_ADJUST_TYPE_BYPASS = 0,
+ GRAPHICS_CSC_ADJUST_TYPE_HW, /* without adjustments */
+ GRAPHICS_CSC_ADJUST_TYPE_SW /*use adjustments */
+};
+
+enum ipp_degamma_mode {
+ IPP_DEGAMMA_MODE_BYPASS,
+ IPP_DEGAMMA_MODE_HW_sRGB,
+ IPP_DEGAMMA_MODE_HW_xvYCC,
+ IPP_DEGAMMA_MODE_USER_PWL
+};
+
+enum ipp_output_format {
+ IPP_OUTPUT_FORMAT_12_BIT_FIX,
+ IPP_OUTPUT_FORMAT_16_BIT_BYPASS,
+ IPP_OUTPUT_FORMAT_FLOAT
+};
+
+enum expansion_mode {
+ EXPANSION_MODE_DYNAMIC,
+ EXPANSION_MODE_ZERO
+};
+
+struct default_adjustment {
+ enum lb_pixel_depth lb_color_depth;
+ enum dc_color_space out_color_space;
+ enum dc_color_space in_color_space;
+ enum dc_color_depth color_depth;
+ enum pixel_format surface_pixel_format;
+ enum graphics_csc_adjust_type csc_adjust_type;
+ bool force_hw_default;
+};
+
+struct out_csc_color_matrix {
+ enum dc_color_space color_space;
+ uint16_t regval[12];
+};
+
+enum opp_regamma {
+ OPP_REGAMMA_BYPASS = 0,
+ OPP_REGAMMA_SRGB,
+ OPP_REGAMMA_3_6,
+ OPP_REGAMMA_USER
+};
+
+#endif /* __DAL_HW_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
new file mode 100644
index 000000000000..f11aa484f46e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IPP_H__
+#define __DAL_IPP_H__
+
+#include "hw_shared.h"
+#include "dc_hw_types.h"
+
+#define MAXTRIX_COEFFICIENTS_NUMBER 12
+#define MAXTRIX_COEFFICIENTS_WRAP_NUMBER (MAXTRIX_COEFFICIENTS_NUMBER + 4)
+#define MAX_OVL_MATRIX_COUNT 12
+
+/* IPP RELATED */
+struct input_pixel_processor {
+ struct dc_context *ctx;
+ unsigned int inst;
+ const struct ipp_funcs *funcs;
+};
+
+enum ipp_prescale_mode {
+ IPP_PRESCALE_MODE_BYPASS,
+ IPP_PRESCALE_MODE_FIXED_SIGNED,
+ IPP_PRESCALE_MODE_FLOAT_SIGNED,
+ IPP_PRESCALE_MODE_FIXED_UNSIGNED,
+ IPP_PRESCALE_MODE_FLOAT_UNSIGNED
+};
+
+struct ipp_prescale_params {
+ enum ipp_prescale_mode mode;
+ uint16_t bias;
+ uint16_t scale;
+};
+
+
+
+enum ovl_color_space {
+ OVL_COLOR_SPACE_UNKNOWN = 0,
+ OVL_COLOR_SPACE_RGB,
+ OVL_COLOR_SPACE_YUV601,
+ OVL_COLOR_SPACE_YUV709
+};
+
+
+struct ipp_funcs {
+
+ /*** cursor ***/
+ void (*ipp_cursor_set_position)(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_position *position,
+ const struct dc_cursor_mi_param *param);
+
+ void (*ipp_cursor_set_attributes)(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_attributes *attributes);
+
+ /*** setup input pixel processing ***/
+
+ /* put the entire pixel processor to bypass */
+ void (*ipp_full_bypass)(
+ struct input_pixel_processor *ipp);
+
+ /* setup ipp to expand/convert input to pixel processor internal format */
+ void (*ipp_setup)(
+ struct input_pixel_processor *ipp,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+ /* DCE function to setup IPP. TODO: see if we can consolidate to setup */
+ void (*ipp_program_prescale)(
+ struct input_pixel_processor *ipp,
+ struct ipp_prescale_params *params);
+
+ void (*ipp_program_input_lut)(
+ struct input_pixel_processor *ipp,
+ const struct dc_gamma *gamma);
+
+ /*** DEGAMMA RELATED ***/
+ void (*ipp_set_degamma)(
+ struct input_pixel_processor *ipp,
+ enum ipp_degamma_mode mode);
+
+ void (*ipp_program_degamma_pwl)(
+ struct input_pixel_processor *ipp,
+ const struct pwl_params *params);
+
+ void (*ipp_destroy)(struct input_pixel_processor **ipp);
+};
+
+#endif /* __DAL_IPP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
new file mode 100644
index 000000000000..3d33bcda7059
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -0,0 +1,134 @@
+/*
+ * link_encoder.h
+ *
+ * Created on: Oct 6, 2015
+ * Author: yonsun
+ */
+
+#ifndef LINK_ENCODER_H_
+#define LINK_ENCODER_H_
+
+#include "grph_object_defs.h"
+#include "signal_types.h"
+#include "dc_types.h"
+
+struct dc_context;
+struct encoder_set_dp_phy_pattern_param;
+struct link_mst_stream_allocation_table;
+struct dc_link_settings;
+struct link_training_settings;
+struct pipe_ctx;
+
+struct encoder_init_data {
+ enum channel_id channel;
+ struct graphics_object_id connector;
+ enum hpd_source_id hpd_source;
+ /* TODO: in DAL2, here was pointer to EventManagerInterface */
+ struct graphics_object_id encoder;
+ struct dc_context *ctx;
+ enum transmitter transmitter;
+};
+
+struct encoder_feature_support {
+ union {
+ struct {
+ uint32_t IS_HBR2_CAPABLE:1;
+ uint32_t IS_HBR3_CAPABLE:1;
+ uint32_t IS_TPS3_CAPABLE:1;
+ uint32_t IS_TPS4_CAPABLE:1;
+ uint32_t IS_YCBCR_CAPABLE:1;
+ uint32_t HDMI_6GB_EN:1;
+ } bits;
+ uint32_t raw;
+ } flags;
+
+ enum dc_color_depth max_hdmi_deep_color;
+ unsigned int max_hdmi_pixel_clock;
+ bool ycbcr420_supported;
+};
+
+union dpcd_psr_configuration {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
+ unsigned char CRC_VERIFICATION : 1;
+ unsigned char FRAME_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char LINE_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
+ unsigned char RESERVED : 2;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_error_status {
+ struct {
+ unsigned char LINK_CRC_ERROR :1;
+ unsigned char RFB_STORAGE_ERROR :1;
+ unsigned char RESERVED :6;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_sink_psr_status {
+ struct {
+ unsigned char SINK_SELF_REFRESH_STATUS :3;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+struct link_encoder {
+ const struct link_encoder_funcs *funcs;
+ int32_t aux_channel_offset;
+ struct dc_context *ctx;
+ struct graphics_object_id id;
+ struct graphics_object_id connector;
+ uint32_t output_signals;
+ enum engine_id preferred_engine;
+ struct encoder_feature_support features;
+ enum transmitter transmitter;
+ enum hpd_source_id hpd_source;
+};
+
+struct link_encoder_funcs {
+ bool (*validate_output_with_stream)(
+ struct link_encoder *enc, const struct dc_stream_state *stream);
+ void (*hw_init)(struct link_encoder *enc);
+ void (*setup)(struct link_encoder *enc,
+ enum signal_type signal);
+ void (*enable_tmds_output)(struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock);
+ void (*enable_dp_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+ void (*enable_dp_mst_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+ void (*disable_output)(struct link_encoder *link_enc,
+ enum signal_type signal, struct dc_link *link);
+ void (*dp_set_lane_settings)(struct link_encoder *enc,
+ const struct link_training_settings *link_settings);
+ void (*dp_set_phy_pattern)(struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *para);
+ void (*update_mst_stream_allocation_table)(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+ void (*psr_program_dp_dphy_fast_training)(struct link_encoder *enc,
+ bool exit_link_training_required);
+ void (*psr_program_secondary_packet)(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline);
+ void (*connect_dig_be_to_fe)(struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect);
+ void (*enable_hpd)(struct link_encoder *enc);
+ void (*disable_hpd)(struct link_encoder *enc);
+ void (*destroy)(struct link_encoder **enc);
+};
+
+#endif /* LINK_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
new file mode 100644
index 000000000000..3e1e7e6a8792
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_MEM_INPUT_H__
+#define __DAL_MEM_INPUT_H__
+
+#include "dc.h"
+#include "include/grph_object_id.h"
+
+#include "dml/display_mode_structs.h"
+
+struct dchub_init_data;
+struct cstate_pstate_watermarks_st {
+ uint32_t cstate_exit_ns;
+ uint32_t cstate_enter_plus_exit_ns;
+ uint32_t pstate_change_ns;
+};
+
+struct dcn_watermarks {
+ uint32_t pte_meta_urgent_ns;
+ uint32_t urgent_ns;
+ struct cstate_pstate_watermarks_st cstate_pstate;
+};
+
+struct dcn_watermark_set {
+ struct dcn_watermarks a;
+ struct dcn_watermarks b;
+ struct dcn_watermarks c;
+ struct dcn_watermarks d;
+};
+
+struct dce_watermarks {
+ int a_mark;
+ int b_mark;
+ int c_mark;
+ int d_mark;
+};
+
+struct stutter_modes {
+ bool enhanced;
+ bool quad_dmif_buffer;
+ bool watermark_nb_pstate;
+};
+
+struct mem_input {
+ struct mem_input_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_plane_address request_address;
+ struct dc_plane_address current_address;
+ int inst;
+ struct stutter_modes stutter_mode;
+};
+
+struct vm_system_aperture_param {
+ PHYSICAL_ADDRESS_LOC sys_default;
+ PHYSICAL_ADDRESS_LOC sys_low;
+ PHYSICAL_ADDRESS_LOC sys_high;
+};
+
+struct vm_context0_param {
+ PHYSICAL_ADDRESS_LOC pte_base;
+ PHYSICAL_ADDRESS_LOC pte_start;
+ PHYSICAL_ADDRESS_LOC pte_end;
+ PHYSICAL_ADDRESS_LOC fault_default;
+};
+
+struct mem_input_funcs {
+ void (*mem_input_setup)(
+ struct mem_input *mem_input,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+ void (*dcc_control)(struct mem_input *mem_input, bool enable,
+ bool independent_64b_blks);
+ void (*mem_program_viewport)(
+ struct mem_input *mem_input,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+ void (*mem_input_program_display_marks)(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns);
+
+ void (*mem_input_program_chroma_display_marks)(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns);
+
+ void (*allocate_mem_input)(
+ struct mem_input *mem_input,
+ uint32_t h_total,/* for current target */
+ uint32_t v_total,/* for current target */
+ uint32_t pix_clk_khz,/* for current target */
+ uint32_t total_streams_num);
+
+ void (*free_mem_input)(
+ struct mem_input *mem_input,
+ uint32_t paths_num);
+
+ bool (*mem_input_program_surface_flip_and_addr)(
+ struct mem_input *mem_input,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+ void (*mem_input_program_pte_vm)(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation);
+
+ void (*mem_input_set_vm_system_aperture_settings)(
+ struct mem_input *mem_input,
+ struct vm_system_aperture_param *apt);
+
+ void (*mem_input_set_vm_context0_settings)(
+ struct mem_input *mem_input,
+ const struct vm_context0_param *vm0);
+
+ void (*mem_input_program_surface_config)(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+ bool (*mem_input_is_flip_pending)(struct mem_input *mem_input);
+
+ void (*mem_input_update_dchub)(struct mem_input *mem_input,
+ struct dchub_init_data *dh_data);
+
+ void (*set_blank)(struct mem_input *mi, bool blank);
+ void (*set_hubp_blank_en)(struct mem_input *mi, bool blank);
+
+ void (*set_cursor_attributes)(
+ struct mem_input *mem_input,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct mem_input *mem_input,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
new file mode 100644
index 000000000000..d4188b2c0626
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -0,0 +1,61 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_H__
+#define __DC_MPCC_H__
+
+#include "dc_hw_types.h"
+#include "opp.h"
+
+struct mpcc_cfg {
+ int dpp_id;
+ int opp_id;
+ struct mpc_tree_cfg *tree_cfg;
+ unsigned int z_index;
+
+ struct tg_color black_color;
+ bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
+};
+
+struct mpc {
+ const struct mpc_funcs *funcs;
+ struct dc_context *ctx;
+};
+
+struct mpc_funcs {
+ int (*add)(struct mpc *mpc, struct mpcc_cfg *cfg);
+
+ void (*remove)(struct mpc *mpc,
+ struct mpc_tree_cfg *tree_cfg,
+ int opp_id,
+ int mpcc_inst);
+
+ void (*wait_for_idle)(struct mpc *mpc, int id);
+
+ void (*update_blend_mode)(struct mpc *mpc, struct mpcc_cfg *cfg);
+
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
new file mode 100644
index 000000000000..75adb8fec551
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_OPP_H__
+#define __DAL_OPP_H__
+
+#include "hw_shared.h"
+#include "dc_hw_types.h"
+#include "transform.h"
+
+struct fixed31_32;
+
+/* TODO: Need cleanup */
+enum clamping_range {
+ CLAMPING_FULL_RANGE = 0, /* No Clamping */
+ CLAMPING_LIMITED_RANGE_8BPC, /* 8 bpc: Clamping 1 to FE */
+ CLAMPING_LIMITED_RANGE_10BPC, /* 10 bpc: Clamping 4 to 3FB */
+ CLAMPING_LIMITED_RANGE_12BPC, /* 12 bpc: Clamping 10 to FEF */
+ /* Use programmable clampping value on FMT_CLAMP_COMPONENT_R/G/B. */
+ CLAMPING_LIMITED_RANGE_PROGRAMMABLE
+};
+
+struct clamping_and_pixel_encoding_params {
+ enum dc_pixel_encoding pixel_encoding; /* Pixel Encoding */
+ enum clamping_range clamping_level; /* Clamping identifier */
+ enum dc_color_depth c_depth; /* Deep color use. */
+};
+
+struct bit_depth_reduction_params {
+ struct {
+ /* truncate/round */
+ /* trunc/round enabled*/
+ uint32_t TRUNCATE_ENABLED:1;
+ /* 2 bits: 0=6 bpc, 1=8 bpc, 2 = 10bpc*/
+ uint32_t TRUNCATE_DEPTH:2;
+ /* truncate or round*/
+ uint32_t TRUNCATE_MODE:1;
+
+ /* spatial dither */
+ /* Spatial Bit Depth Reduction enabled*/
+ uint32_t SPATIAL_DITHER_ENABLED:1;
+ /* 2 bits: 0=6 bpc, 1 = 8 bpc, 2 = 10bpc*/
+ uint32_t SPATIAL_DITHER_DEPTH:2;
+ /* 0-3 to select patterns*/
+ uint32_t SPATIAL_DITHER_MODE:2;
+ /* Enable RGB random dithering*/
+ uint32_t RGB_RANDOM:1;
+ /* Enable Frame random dithering*/
+ uint32_t FRAME_RANDOM:1;
+ /* Enable HighPass random dithering*/
+ uint32_t HIGHPASS_RANDOM:1;
+
+ /* temporal dither*/
+ /* frame modulation enabled*/
+ uint32_t FRAME_MODULATION_ENABLED:1;
+ /* same as for trunc/spatial*/
+ uint32_t FRAME_MODULATION_DEPTH:2;
+ /* 2/4 gray levels*/
+ uint32_t TEMPORAL_LEVEL:1;
+ uint32_t FRC25:2;
+ uint32_t FRC50:2;
+ uint32_t FRC75:2;
+ } flags;
+
+ uint32_t r_seed_value;
+ uint32_t b_seed_value;
+ uint32_t g_seed_value;
+ enum dc_pixel_encoding pixel_encoding;
+};
+
+enum wide_gamut_regamma_mode {
+ /* 0x0 - BITS2:0 Bypass */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_BYPASS,
+ /* 0x1 - Fixed curve sRGB 2.4 */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_SRGB24,
+ /* 0x2 - Fixed curve xvYCC 2.22 */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_XYYCC22,
+ /* 0x3 - Programmable control A */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_MATRIX_A,
+ /* 0x4 - Programmable control B */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_MATRIX_B,
+ /* 0x0 - BITS6:4 Bypass */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_BYPASS,
+ /* 0x1 - Fixed curve sRGB 2.4 */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_SRGB24,
+ /* 0x2 - Fixed curve xvYCC 2.22 */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_XYYCC22,
+ /* 0x3 - Programmable control A */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_MATRIX_A,
+ /* 0x4 - Programmable control B */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_MATRIX_B
+};
+
+struct gamma_pixel {
+ struct fixed31_32 r;
+ struct fixed31_32 g;
+ struct fixed31_32 b;
+};
+
+enum channel_name {
+ CHANNEL_NAME_RED,
+ CHANNEL_NAME_GREEN,
+ CHANNEL_NAME_BLUE
+};
+
+struct custom_float_format {
+ uint32_t mantissa_bits;
+ uint32_t exponenta_bits;
+ bool sign;
+};
+
+struct custom_float_value {
+ uint32_t mantissa;
+ uint32_t exponenta;
+ uint32_t value;
+ bool negative;
+};
+
+struct hw_x_point {
+ uint32_t custom_float_x;
+ struct fixed31_32 x;
+ struct fixed31_32 regamma_y_red;
+ struct fixed31_32 regamma_y_green;
+ struct fixed31_32 regamma_y_blue;
+
+};
+
+struct pwl_float_data_ex {
+ struct fixed31_32 r;
+ struct fixed31_32 g;
+ struct fixed31_32 b;
+ struct fixed31_32 delta_r;
+ struct fixed31_32 delta_g;
+ struct fixed31_32 delta_b;
+};
+
+enum hw_point_position {
+ /* hw point sits between left and right sw points */
+ HW_POINT_POSITION_MIDDLE,
+ /* hw point lays left from left (smaller) sw point */
+ HW_POINT_POSITION_LEFT,
+ /* hw point lays stays from right (bigger) sw point */
+ HW_POINT_POSITION_RIGHT
+};
+
+struct gamma_point {
+ int32_t left_index;
+ int32_t right_index;
+ enum hw_point_position pos;
+ struct fixed31_32 coeff;
+};
+
+struct pixel_gamma_point {
+ struct gamma_point r;
+ struct gamma_point g;
+ struct gamma_point b;
+};
+
+struct gamma_coefficients {
+ struct fixed31_32 a0[3];
+ struct fixed31_32 a1[3];
+ struct fixed31_32 a2[3];
+ struct fixed31_32 a3[3];
+ struct fixed31_32 user_gamma[3];
+ struct fixed31_32 user_contrast;
+ struct fixed31_32 user_brightness;
+};
+
+struct pwl_float_data {
+ struct fixed31_32 r;
+ struct fixed31_32 g;
+ struct fixed31_32 b;
+};
+
+struct mpc_tree_cfg {
+ int num_pipes;
+ int dpp[MAX_PIPES];
+ int mpcc[MAX_PIPES];
+};
+
+struct output_pixel_processor {
+ struct dc_context *ctx;
+ uint32_t inst;
+ struct pwl_params regamma_params;
+ struct mpc_tree_cfg mpc_tree;
+ bool mpcc_disconnect_pending[MAX_PIPES];
+ const struct opp_funcs *funcs;
+};
+
+enum fmt_stereo_action {
+ FMT_STEREO_ACTION_ENABLE = 0,
+ FMT_STEREO_ACTION_DISABLE,
+ FMT_STEREO_ACTION_UPDATE_POLARITY
+};
+
+struct opp_grph_csc_adjustment {
+ //enum grph_color_adjust_option color_adjust_option;
+ enum dc_color_space c_space;
+ enum dc_color_depth color_depth; /* clean up to uint32_t */
+ enum graphics_csc_adjust_type csc_adjust_type;
+ int32_t adjust_divider;
+ int32_t grph_cont;
+ int32_t grph_sat;
+ int32_t grph_bright;
+ int32_t grph_hue;
+};
+
+/* Underlay related types */
+
+struct hw_adjustment_range {
+ int32_t hw_default;
+ int32_t min;
+ int32_t max;
+ int32_t step;
+ uint32_t divider; /* (actually HW range is min/divider; divider !=0) */
+};
+
+enum ovl_csc_adjust_item {
+ OVERLAY_BRIGHTNESS = 0,
+ OVERLAY_GAMMA,
+ OVERLAY_CONTRAST,
+ OVERLAY_SATURATION,
+ OVERLAY_HUE,
+ OVERLAY_ALPHA,
+ OVERLAY_ALPHA_PER_PIX,
+ OVERLAY_COLOR_TEMPERATURE
+};
+
+struct opp_funcs {
+
+
+ /* FORMATTER RELATED */
+
+ void (*opp_program_fmt)(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+ void (*opp_set_dyn_expansion)(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal);
+
+ void (*opp_program_bit_depth_reduction)(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params);
+
+ /* underlay related */
+ void (*opp_get_underlay_adjustment_range)(
+ struct output_pixel_processor *opp,
+ enum ovl_csc_adjust_item overlay_adjust_item,
+ struct hw_adjustment_range *range);
+
+ void (*opp_destroy)(struct output_pixel_processor **opp);
+
+ void (*opp_set_stereo_polarity)(
+ struct output_pixel_processor *opp,
+ bool enable,
+ bool rightEyePolarity);
+
+ void (*opp_set_test_pattern)(
+ struct output_pixel_processor *opp,
+ bool enable);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
new file mode 100644
index 000000000000..3050afe8e8a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -0,0 +1,130 @@
+/*
+ * stream_encoder.h
+ *
+ */
+
+#ifndef STREAM_ENCODER_H_
+#define STREAM_ENCODER_H_
+
+#include "audio_types.h"
+
+struct dc_bios;
+struct dc_context;
+struct dc_crtc_timing;
+
+struct encoder_info_packet {
+ bool valid;
+ uint8_t hb0;
+ uint8_t hb1;
+ uint8_t hb2;
+ uint8_t hb3;
+ uint8_t sb[32];
+};
+
+struct encoder_info_frame {
+ /* auxiliary video information */
+ struct encoder_info_packet avi;
+ struct encoder_info_packet gamut;
+ struct encoder_info_packet vendor;
+ /* source product description */
+ struct encoder_info_packet spd;
+ /* video stream configuration */
+ struct encoder_info_packet vsc;
+ /* HDR Static MetaData */
+ struct encoder_info_packet hdrsmd;
+};
+
+struct encoder_unblank_param {
+ struct dc_link_settings link_settings;
+ unsigned int pixel_clk_khz;
+};
+
+struct encoder_set_dp_phy_pattern_param {
+ enum dp_test_pattern dp_phy_pattern;
+ const uint8_t *custom_pattern;
+ uint32_t custom_pattern_size;
+ enum dp_panel_mode dp_panel_mode;
+};
+
+struct stream_encoder {
+ const struct stream_encoder_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_bios *bp;
+ enum engine_id id;
+};
+
+struct stream_encoder_funcs {
+ void (*dp_set_stream_attribute)(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space);
+
+ void (*hdmi_set_stream_attribute)(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio);
+
+ void (*dvi_set_stream_attribute)(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link);
+
+ void (*set_mst_bandwidth)(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp);
+
+ void (*update_hdmi_info_packets)(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame);
+
+ void (*stop_hdmi_info_packets)(
+ struct stream_encoder *enc);
+
+ void (*update_dp_info_packets)(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame);
+
+ void (*stop_dp_info_packets)(
+ struct stream_encoder *enc);
+
+ void (*dp_blank)(
+ struct stream_encoder *enc);
+
+ void (*dp_unblank)(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param);
+
+ void (*audio_mute_control)(
+ struct stream_encoder *enc, bool mute);
+
+ void (*dp_audio_setup)(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info);
+
+ void (*dp_audio_enable) (
+ struct stream_encoder *enc);
+
+ void (*dp_audio_disable) (
+ struct stream_encoder *enc);
+
+ void (*hdmi_audio_setup)(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info);
+
+ void (*hdmi_audio_disable) (
+ struct stream_encoder *enc);
+
+ void (*setup_stereo_sync) (
+ struct stream_encoder *enc,
+ int tg_inst,
+ bool enable);
+
+ void (*set_avmute)(
+ struct stream_encoder *enc, bool enable);
+};
+
+#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
new file mode 100644
index 000000000000..c6ab38c5b2be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TIMING_GENERATOR_TYPES_H__
+#define __DAL_TIMING_GENERATOR_TYPES_H__
+
+struct dc_bios;
+
+/* Contains CRTC vertical/horizontal pixel counters */
+struct crtc_position {
+ int32_t vertical_count;
+ int32_t horizontal_count;
+ int32_t nominal_vcount;
+};
+
+struct dcp_gsl_params {
+ int gsl_group;
+ int gsl_master;
+};
+
+/* define the structure of Dynamic Refresh Mode */
+struct drr_params {
+ uint32_t vertical_total_min;
+ uint32_t vertical_total_max;
+ bool immediate_flip;
+};
+
+#define LEFT_EYE_3D_PRIMARY_SURFACE 1
+#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
+
+enum test_pattern_dyn_range {
+ TEST_PATTERN_DYN_RANGE_VESA = 0,
+ TEST_PATTERN_DYN_RANGE_CEA
+};
+
+enum test_pattern_mode {
+ TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
+ TEST_PATTERN_MODE_VERTICALBARS,
+ TEST_PATTERN_MODE_HORIZONTALBARS,
+ TEST_PATTERN_MODE_SINGLERAMP_RGB,
+ TEST_PATTERN_MODE_DUALRAMP_RGB
+};
+
+enum test_pattern_color_format {
+ TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
+ TEST_PATTERN_COLOR_FORMAT_BPC_8,
+ TEST_PATTERN_COLOR_FORMAT_BPC_10,
+ TEST_PATTERN_COLOR_FORMAT_BPC_12
+};
+
+enum controller_dp_test_pattern {
+ CONTROLLER_DP_TEST_PATTERN_D102 = 0,
+ CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
+ CONTROLLER_DP_TEST_PATTERN_PRBS7,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
+};
+
+enum crtc_state {
+ CRTC_STATE_VBLANK = 0,
+ CRTC_STATE_VACTIVE
+};
+
+struct _dlg_otg_param {
+ int vstartup_start;
+ int vupdate_offset;
+ int vupdate_width;
+ int vready_offset;
+ enum signal_type signal;
+};
+
+struct crtc_stereo_flags {
+ uint8_t PROGRAM_STEREO : 1;
+ uint8_t PROGRAM_POLARITY : 1;
+ uint8_t RIGHT_EYE_POLARITY : 1;
+ uint8_t FRAME_PACKED : 1;
+ uint8_t DISABLE_STEREO_DP_SYNC : 1;
+};
+
+struct timing_generator {
+ const struct timing_generator_funcs *funcs;
+ struct dc_bios *bp;
+ struct dc_context *ctx;
+ struct _dlg_otg_param dlg_otg_param;
+ int inst;
+};
+
+struct dc_crtc_timing;
+
+struct drr_params;
+
+struct timing_generator_funcs {
+ bool (*validate_timing)(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+ void (*program_timing)(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios);
+ bool (*enable_crtc)(struct timing_generator *tg);
+ bool (*disable_crtc)(struct timing_generator *tg);
+ bool (*is_counter_moving)(struct timing_generator *tg);
+ void (*get_position)(struct timing_generator *tg,
+ struct crtc_position *position);
+
+ uint32_t (*get_frame_count)(struct timing_generator *tg);
+ void (*get_scanoutpos)(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+ void (*set_early_control)(struct timing_generator *tg,
+ uint32_t early_cntl);
+ void (*wait_for_state)(struct timing_generator *tg,
+ enum crtc_state state);
+ void (*set_blank)(struct timing_generator *tg,
+ bool enable_blanking);
+ bool (*is_blanked)(struct timing_generator *tg);
+ void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
+ void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
+ void (*set_colors)(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color);
+
+ void (*disable_vga)(struct timing_generator *tg);
+ bool (*did_triggered_reset_occur)(struct timing_generator *tg);
+ void (*setup_global_swap_lock)(struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params);
+ void (*unlock)(struct timing_generator *tg);
+ void (*lock)(struct timing_generator *tg);
+ void (*enable_reset_trigger)(struct timing_generator *tg, int source_tg_inst);
+ void (*disable_reset_trigger)(struct timing_generator *tg);
+ void (*tear_down_global_swap_lock)(struct timing_generator *tg);
+ void (*enable_advanced_request)(struct timing_generator *tg,
+ bool enable, const struct dc_crtc_timing *timing);
+ void (*set_drr)(struct timing_generator *tg, const struct drr_params *params);
+ void (*set_static_screen_control)(struct timing_generator *tg,
+ uint32_t value);
+ void (*set_test_pattern)(
+ struct timing_generator *tg,
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth);
+
+ bool (*arm_vert_intr)(struct timing_generator *tg, uint8_t width);
+
+ void (*program_global_sync)(struct timing_generator *tg);
+ void (*enable_optc_clock)(struct timing_generator *tg, bool enable);
+ void (*program_stereo)(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
+ bool (*is_stereo_left_eye)(struct timing_generator *tg);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
new file mode 100644
index 000000000000..7c08bc62c1f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TRANSFORM_H__
+#define __DAL_TRANSFORM_H__
+
+#include "hw_shared.h"
+#include "dc_hw_types.h"
+#include "fixed31_32.h"
+
+#define CSC_TEMPERATURE_MATRIX_SIZE 9
+
+struct bit_depth_reduction_params;
+
+struct transform {
+ const struct transform_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+ struct dpp_caps *caps;
+ struct pwl_params regamma_params;
+};
+
+/* Colorimetry */
+enum colorimetry {
+ COLORIMETRY_NO_DATA = 0,
+ COLORIMETRY_ITU601 = 1,
+ COLORIMETRY_ITU709 = 2,
+ COLORIMETRY_EXTENDED = 3
+};
+
+enum colorimetry_ext {
+ COLORIMETRYEX_XVYCC601 = 0,
+ COLORIMETRYEX_XVYCC709 = 1,
+ COLORIMETRYEX_SYCC601 = 2,
+ COLORIMETRYEX_ADOBEYCC601 = 3,
+ COLORIMETRYEX_ADOBERGB = 4,
+ COLORIMETRYEX_BT2020YCC = 5,
+ COLORIMETRYEX_BT2020RGBYCBCR = 6,
+ COLORIMETRYEX_RESERVED = 7
+};
+
+enum active_format_info {
+ ACTIVE_FORMAT_NO_DATA = 0,
+ ACTIVE_FORMAT_VALID = 1
+};
+
+/* Active format aspect ratio */
+enum active_format_aspect_ratio {
+ ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE = 8,
+ ACTIVE_FORMAT_ASPECT_RATIO_4_3 = 9,
+ ACTIVE_FORMAT_ASPECT_RATIO_16_9 = 0XA,
+ ACTIVE_FORMAT_ASPECT_RATIO_14_9 = 0XB
+};
+
+enum bar_info {
+ BAR_INFO_NOT_VALID = 0,
+ BAR_INFO_VERTICAL_VALID = 1,
+ BAR_INFO_HORIZONTAL_VALID = 2,
+ BAR_INFO_BOTH_VALID = 3
+};
+
+enum picture_scaling {
+ PICTURE_SCALING_UNIFORM = 0,
+ PICTURE_SCALING_HORIZONTAL = 1,
+ PICTURE_SCALING_VERTICAL = 2,
+ PICTURE_SCALING_BOTH = 3
+};
+
+/* RGB quantization range */
+enum rgb_quantization_range {
+ RGB_QUANTIZATION_DEFAULT_RANGE = 0,
+ RGB_QUANTIZATION_LIMITED_RANGE = 1,
+ RGB_QUANTIZATION_FULL_RANGE = 2,
+ RGB_QUANTIZATION_RESERVED = 3
+};
+
+/* YYC quantization range */
+enum yyc_quantization_range {
+ YYC_QUANTIZATION_LIMITED_RANGE = 0,
+ YYC_QUANTIZATION_FULL_RANGE = 1,
+ YYC_QUANTIZATION_RESERVED2 = 2,
+ YYC_QUANTIZATION_RESERVED3 = 3
+};
+
+enum graphics_gamut_adjust_type {
+ GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS = 0,
+ GRAPHICS_GAMUT_ADJUST_TYPE_HW, /* without adjustments */
+ GRAPHICS_GAMUT_ADJUST_TYPE_SW /* use adjustments */
+};
+
+enum lb_memory_config {
+ /* Enable all 3 pieces of memory */
+ LB_MEMORY_CONFIG_0 = 0,
+
+ /* Enable only the first piece of memory */
+ LB_MEMORY_CONFIG_1 = 1,
+
+ /* Enable only the second piece of memory */
+ LB_MEMORY_CONFIG_2 = 2,
+
+ /* Only applicable in 4:2:0 mode, enable all 3 pieces of memory and the
+ * last piece of chroma memory used for the luma storage
+ */
+ LB_MEMORY_CONFIG_3 = 3
+};
+
+struct xfm_grph_csc_adjustment {
+ struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
+ enum graphics_gamut_adjust_type gamut_adjust_type;
+};
+
+struct overscan_info {
+ int left;
+ int right;
+ int top;
+ int bottom;
+};
+
+struct scaling_ratios {
+ struct fixed31_32 horz;
+ struct fixed31_32 vert;
+ struct fixed31_32 horz_c;
+ struct fixed31_32 vert_c;
+};
+
+struct sharpness_adj {
+ int horz;
+ int vert;
+};
+
+struct line_buffer_params {
+ bool alpha_en;
+ bool pixel_expan_mode;
+ bool interleave_en;
+ int dynamic_pixel_depth;
+ enum lb_pixel_depth depth;
+};
+
+struct scl_inits {
+ struct fixed31_32 h;
+ struct fixed31_32 h_c;
+ struct fixed31_32 v;
+ struct fixed31_32 v_bot;
+ struct fixed31_32 v_c;
+ struct fixed31_32 v_c_bot;
+};
+
+struct scaler_data {
+ int h_active;
+ int v_active;
+ struct scaling_taps taps;
+ struct rect viewport;
+ struct rect viewport_c;
+ struct rect recout;
+ struct scaling_ratios ratios;
+ struct scl_inits inits;
+ struct sharpness_adj sharpness;
+ enum pixel_format format;
+ struct line_buffer_params lb_params;
+};
+
+struct transform_funcs {
+ void (*transform_reset)(struct transform *xfm);
+
+ void (*transform_set_scaler)(struct transform *xfm,
+ const struct scaler_data *scl_data);
+
+ void (*transform_set_pixel_storage_depth)(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params);
+
+ bool (*transform_get_optimal_number_of_taps)(
+ struct transform *xfm,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+ void (*transform_set_gamut_remap)(
+ struct transform *xfm,
+ const struct xfm_grph_csc_adjustment *adjust);
+
+ void (*opp_set_csc_default)(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust);
+
+ void (*opp_set_csc_adjustment)(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry);
+
+ void (*opp_power_on_regamma_lut)(
+ struct transform *xfm,
+ bool power_on);
+
+ void (*opp_program_regamma_lut)(
+ struct transform *xfm,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+
+ void (*opp_configure_regamma_lut)(
+ struct transform *xfm,
+ bool is_ram_a);
+
+ void (*opp_program_regamma_lutb_settings)(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_luta_settings)(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_pwl)(
+ struct transform *xfm, const struct pwl_params *params);
+
+ void (*opp_set_regamma_mode)(
+ struct transform *xfm_base,
+ enum opp_regamma mode);
+
+ void (*ipp_set_degamma)(
+ struct transform *xfm_base,
+ enum ipp_degamma_mode mode);
+
+ void (*ipp_program_input_lut)(
+ struct transform *xfm_base,
+ const struct dc_gamma *gamma);
+
+ void (*ipp_program_degamma_pwl)(struct transform *xfm_base,
+ const struct pwl_params *params);
+
+ void (*ipp_setup)(
+ struct transform *xfm_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+ void (*ipp_full_bypass)(struct transform *xfm_base);
+
+ void (*set_cursor_attributes)(
+ struct transform *xfm_base,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct transform *xfm_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+ uint32_t width
+ );
+
+};
+
+const uint16_t *get_filter_2tap_16p(void);
+const uint16_t *get_filter_2tap_64p(void);
+const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio);
+const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio);
+const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio);
+
+
+/* Defines the pixel processing capability of the DSCL */
+enum dscl_data_processing_format {
+ DSCL_DATA_PRCESSING_FIXED_FORMAT, /* The DSCL processes pixel data in fixed format */
+ DSCL_DATA_PRCESSING_FLOAT_FORMAT, /* The DSCL processes pixel data in float format */
+};
+
+/*
+ * The DPP capabilities structure contains enumerations to specify the
+ * HW processing features and an associated function pointers to
+ * provide the function interface that can be overloaded for implementations
+ * based on different capabilities
+ */
+struct dpp_caps {
+ /* DSCL processing pixel data in fixed or float format */
+ enum dscl_data_processing_format dscl_data_proc_format;
+
+ /* Calculates the number of partitions in the line buffer.
+ * The implementation of this function is overloaded for
+ * different versions of DSCL LB.
+ */
+ void (*dscl_calc_lb_num_partitions)(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c);
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
new file mode 100644
index 000000000000..8734689a9245
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HW_SEQUENCER_H__
+#define __DC_HW_SEQUENCER_H__
+#include "dc_types.h"
+#include "clock_source.h"
+#include "inc/hw/timing_generator.h"
+#include "inc/hw/link_encoder.h"
+#include "core_status.h"
+
+enum pipe_gating_control {
+ PIPE_GATING_CONTROL_DISABLE = 0,
+ PIPE_GATING_CONTROL_ENABLE,
+ PIPE_GATING_CONTROL_INIT
+};
+
+struct dce_hwseq_wa {
+ bool blnd_crtc_trigger;
+};
+
+struct dce_hwseq {
+ struct dc_context *ctx;
+ const struct dce_hwseq_registers *regs;
+ const struct dce_hwseq_shift *shifts;
+ const struct dce_hwseq_mask *masks;
+ struct dce_hwseq_wa wa;
+};
+
+struct pipe_ctx;
+struct dc_state;
+struct dchub_init_data;
+struct dc_static_screen_events;
+struct resource_pool;
+struct resource_context;
+
+struct hw_sequencer_funcs {
+
+ void (*init_hw)(struct dc *dc);
+
+ enum dc_status (*apply_ctx_to_hw)(
+ struct dc *dc, struct dc_state *context);
+
+ void (*reset_hw_ctx_wrap)(
+ struct dc *dc, struct dc_state *context);
+
+ void (*apply_ctx_for_surface)(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context);
+
+ void (*set_plane_config)(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct resource_context *res_ctx);
+
+ void (*program_gamut_remap)(
+ struct pipe_ctx *pipe_ctx);
+
+ void (*program_csc_matrix)(
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix);
+
+ void (*update_plane_addr)(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+ void (*update_dchub)(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data);
+
+ void (*update_pending_status)(
+ struct pipe_ctx *pipe_ctx);
+
+ bool (*set_input_transfer_func)(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+
+ bool (*set_output_transfer_func)(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+
+ void (*power_down)(struct dc *dc);
+
+ void (*enable_accelerated_mode)(struct dc *dc);
+
+ void (*enable_timing_synchronization)(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[]);
+
+ void (*enable_display_pipe_clock_gating)(
+ struct dc_context *ctx,
+ bool clock_gating);
+
+ bool (*enable_display_power_gating)(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating);
+
+ void (*power_down_front_end)(struct dc *dc, int fe_idx);
+
+ void (*power_on_front_end)(struct dc *dc,
+ struct pipe_ctx *pipe,
+ struct dc_state *context);
+
+ void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
+
+ void (*enable_stream)(struct pipe_ctx *pipe_ctx);
+
+ void (*disable_stream)(struct pipe_ctx *pipe_ctx,
+ int option);
+
+ void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+
+ void (*pipe_control_lock)(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+
+ void (*set_bandwidth)(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
+ void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ int vmin, int vmax);
+
+ void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ struct crtc_position *position);
+
+ void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events);
+
+ enum dc_status (*prog_pixclk_crtc_otg)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+
+ void (*setup_stereo)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc *dc);
+
+ void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
+
+ void (*log_hw_state)(struct dc *dc);
+
+ void (*wait_for_mpcc_disconnect)(struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx);
+
+ void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
+ void (*optimize_shared_resources)(struct dc *dc);
+ void (*edp_power_control)(
+ struct link_encoder *enc,
+ bool enable);
+ void (*edp_backlight_control)(
+ struct dc_link *link,
+ bool enable);
+};
+
+void color_space_to_black_color(
+ const struct dc *dc,
+ enum dc_color_space colorspace,
+ struct tg_color *black_color);
+
+bool hwss_wait_for_blank_complete(
+ struct timing_generator *tg);
+
+#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
new file mode 100644
index 000000000000..f2b8c9a376d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_HWSS_H__
+#define __DC_LINK_HWSS_H__
+
+#include "inc/core_status.h"
+
+enum dc_status core_link_read_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+
+enum dc_status core_link_write_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+
+struct gpio *get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+
+void dp_enable_link_phy(
+ struct dc_link *link,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+void dp_disable_link_phy(struct dc_link *link, enum signal_type signal);
+
+void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ enum hw_dp_training_pattern pattern);
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_settings);
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size);
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+#endif /* __DC_LINK_HWSS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
new file mode 100644
index 000000000000..77eb72874e90
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_
+
+#include "dm_services.h"
+
+/* macro for register read/write
+ * user of macro need to define
+ *
+ * CTX ==> macro to ptr to dc_context
+ * eg. aud110->base.ctx
+ *
+ * REG ==> macro to location of register offset
+ * eg. aud110->regs->reg
+ */
+#define REG_READ(reg_name) \
+ dm_read_reg(CTX, REG(reg_name))
+
+#define REG_WRITE(reg_name, value) \
+ dm_write_reg(CTX, REG(reg_name), value)
+
+#ifdef REG_SET
+#undef REG_SET
+#endif
+
+#ifdef REG_GET
+#undef REG_GET
+#endif
+
+/* macro to set register fields. */
+#define REG_SET_N(reg_name, n, initial_val, ...) \
+ generic_reg_update_ex(CTX, \
+ REG(reg_name), \
+ initial_val, \
+ n, __VA_ARGS__)
+
+#define FN(reg_name, field) \
+ FD(reg_name##__##field)
+
+#define REG_SET(reg_name, initial_val, field, val) \
+ REG_SET_N(reg_name, 1, initial_val, \
+ FN(reg_name, field), val)
+
+#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \
+ REG_SET_N(reg, 2, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \
+ REG_SET_N(reg, 3, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3)
+
+#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_SET_N(reg, 4, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4)
+
+#define REG_SET_5(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5) \
+ REG_SET_N(reg, 5, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5)
+
+#define REG_SET_6(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5, f6, v6) \
+ REG_SET_N(reg, 6, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5,\
+ FN(reg, f6), v6)
+
+#define REG_SET_7(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5, f6, v6, f7, v7) \
+ REG_SET_N(reg, 7, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5,\
+ FN(reg, f6), v6,\
+ FN(reg, f7), v7)
+
+#define REG_SET_8(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5, f6, v6, f7, v7, f8, v8) \
+ REG_SET_N(reg, 8, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5,\
+ FN(reg, f6), v6,\
+ FN(reg, f7), v7,\
+ FN(reg, f8), v8)
+
+#define REG_SET_9(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, f5, \
+ v5, f6, v6, f7, v7, f8, v8, f9, v9) \
+ REG_SET_N(reg, 9, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9)
+
+#define REG_SET_10(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, f5, \
+ v5, f6, v6, f7, v7, f8, v8, f9, v9, f10, v10) \
+ REG_SET_N(reg, 10, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10)
+
+/* macro to get register fields
+ * read given register and fill in field value in output parameter */
+#define REG_GET(reg_name, field, val) \
+ generic_reg_get(CTX, REG(reg_name), \
+ FN(reg_name, field), val)
+
+#define REG_GET_2(reg_name, f1, v1, f2, v2) \
+ generic_reg_get2(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2)
+
+#define REG_GET_3(reg_name, f1, v1, f2, v2, f3, v3) \
+ generic_reg_get3(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3)
+
+#define REG_GET_4(reg_name, f1, v1, f2, v2, f3, v3, f4, v4) \
+ generic_reg_get4(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3, \
+ FN(reg_name, f4), v4)
+
+#define REG_GET_5(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5) \
+ generic_reg_get5(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3, \
+ FN(reg_name, f4), v4, \
+ FN(reg_name, f5), v5)
+
+/* macro to poll and wait for a register field to read back given value */
+
+#define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try) \
+ generic_reg_wait(CTX, \
+ REG(reg_name), FN(reg_name, field), val,\
+ delay_between_poll_us, max_try, __func__, __LINE__)
+
+/* macro to update (read, modify, write) register fields
+ */
+#define REG_UPDATE_N(reg_name, n, ...) \
+ generic_reg_update_ex(CTX, \
+ REG(reg_name), \
+ REG_READ(reg_name), \
+ n, __VA_ARGS__)
+
+#define REG_UPDATE(reg_name, field, val) \
+ REG_UPDATE_N(reg_name, 1, \
+ FN(reg_name, field), val)
+
+#define REG_UPDATE_2(reg, f1, v1, f2, v2) \
+ REG_UPDATE_N(reg, 2,\
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \
+ REG_UPDATE_N(reg, 3, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3)
+
+#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_UPDATE_N(reg, 4, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4)
+
+#define REG_UPDATE_5(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5) \
+ REG_UPDATE_N(reg, 5, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5)
+
+#define REG_UPDATE_6(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6) \
+ REG_UPDATE_N(reg, 6, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6)
+
+#define REG_UPDATE_7(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7) \
+ REG_UPDATE_N(reg, 7, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7)
+
+#define REG_UPDATE_8(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8) \
+ REG_UPDATE_N(reg, 8, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8)
+
+#define REG_UPDATE_9(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9) \
+ REG_UPDATE_N(reg, 9, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9)
+
+#define REG_UPDATE_10(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10, v10)\
+ REG_UPDATE_N(reg, 10, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10)
+
+#define REG_UPDATE_14(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10,\
+ v10, f11, v11, f12, v12, f13, v13, f14, v14)\
+ REG_UPDATE_N(reg, 14, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10, \
+ FN(reg, f11), v11, \
+ FN(reg, f12), v12, \
+ FN(reg, f13), v13, \
+ FN(reg, f14), v14)
+
+#define REG_UPDATE_19(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10,\
+ v10, f11, v11, f12, v12, f13, v13, f14, v14, f15, v15, f16, v16, f17, v17, f18, v18, f19, v19)\
+ REG_UPDATE_N(reg, 19, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10, \
+ FN(reg, f11), v11, \
+ FN(reg, f12), v12, \
+ FN(reg, f13), v13, \
+ FN(reg, f14), v14, \
+ FN(reg, f15), v15, \
+ FN(reg, f16), v16, \
+ FN(reg, f17), v17, \
+ FN(reg, f18), v18, \
+ FN(reg, f19), v19)
+
+#define REG_UPDATE_20(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10,\
+ v10, f11, v11, f12, v12, f13, v13, f14, v14, f15, v15, f16, v16, f17, v17, f18, v18, f19, v19, f20, v20)\
+ REG_UPDATE_N(reg, 20, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10, \
+ FN(reg, f11), v11, \
+ FN(reg, f12), v12, \
+ FN(reg, f13), v13, \
+ FN(reg, f14), v14, \
+ FN(reg, f15), v15, \
+ FN(reg, f16), v16, \
+ FN(reg, f17), v17, \
+ FN(reg, f18), v18, \
+ FN(reg, f19), v19, \
+ FN(reg, f20), v20)
+/* macro to update a register field to specified values in given sequences.
+ * useful when toggling bits
+ */
+#define REG_UPDATE_SEQ(reg, field, value1, value2) \
+{ uint32_t val = REG_UPDATE(reg, field, value1); \
+ REG_SET(reg, val, field, value2); }
+
+/* macro to update fields in register 1 field at a time in given order */
+#define REG_UPDATE_1BY1_2(reg, f1, v1, f2, v2) \
+{ uint32_t val = REG_UPDATE(reg, f1, v1); \
+ REG_SET(reg, val, f2, v2); }
+
+#define REG_UPDATE_1BY1_3(reg, f1, v1, f2, v2, f3, v3) \
+{ uint32_t val = REG_UPDATE(reg, f1, v1); \
+ val = REG_SET(reg, val, f2, v2); \
+ REG_SET(reg, val, f3, v3); }
+
+uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift, uint32_t mask, uint32_t *field_value);
+
+uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2);
+
+uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3);
+
+uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4);
+
+uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5);
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
new file mode 100644
index 000000000000..5467332faf7b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_
+
+#include "core_types.h"
+#include "core_status.h"
+#include "dal_asic_id.h"
+#include "dm_pp_smu.h"
+
+/* TODO unhardcode, 4 for CZ*/
+#define MEMORY_TYPE_MULTIPLIER 4
+
+enum dce_version resource_parse_asic_id(
+ struct hw_asic_id asic_id);
+
+struct resource_caps {
+ int num_timing_generator;
+ int num_video_plane;
+ int num_audio;
+ int num_stream_encoder;
+ int num_pll;
+ int num_dwb;
+};
+
+struct resource_straps {
+ uint32_t hdmi_disable;
+ uint32_t dc_pinstraps_audio;
+ uint32_t audio_stream_number;
+};
+
+struct resource_create_funcs {
+ void (*read_dce_straps)(
+ struct dc_context *ctx, struct resource_straps *straps);
+
+ struct audio *(*create_audio)(
+ struct dc_context *ctx, unsigned int inst);
+
+ struct stream_encoder *(*create_stream_encoder)(
+ enum engine_id eng_id, struct dc_context *ctx);
+
+ struct dce_hwseq *(*create_hwseq)(
+ struct dc_context *ctx);
+};
+
+bool resource_construct(
+ unsigned int num_virtual_links,
+ struct dc *dc,
+ struct resource_pool *pool,
+ const struct resource_create_funcs *create_funcs);
+
+struct resource_pool *dc_create_resource_pool(
+ struct dc *dc,
+ int num_virtual_links,
+ enum dce_version dc_version,
+ struct hw_asic_id asic_id);
+
+void dc_destroy_resource_pool(struct dc *dc);
+
+enum dc_status resource_map_pool_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream);
+
+bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx);
+
+enum dc_status resource_build_scaling_params_for_context(
+ const struct dc *dc,
+ struct dc_state *context);
+
+void resource_build_info_frame(struct pipe_ctx *pipe_ctx);
+
+void resource_unreference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
+void resource_reference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
+bool resource_are_streams_timing_synchronizable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2);
+
+struct clock_source *resource_find_used_clk_src_for_sharing(
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx);
+
+struct clock_source *dc_resource_find_first_free_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
+struct pipe_ctx *resource_get_head_pipe_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream);
+
+bool resource_attach_surfaces_to_context(
+ struct dc_plane_state *const *plane_state,
+ int surface_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context,
+ const struct resource_pool *pool);
+
+struct pipe_ctx *find_idle_secondary_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
+bool resource_is_stream_unchanged(
+ struct dc_state *old_context, struct dc_stream_state *stream);
+
+bool resource_validate_attach_surfaces(
+ const struct dc_validation_set set[],
+ int set_count,
+ const struct dc_state *old_context,
+ struct dc_state *context,
+ const struct resource_pool *pool);
+
+void validate_guaranteed_copy_streams(
+ struct dc_state *context,
+ int max_streams);
+
+void resource_validate_ctx_update_pointer_after_copy(
+ const struct dc_state *src_ctx,
+ struct dc_state *dst_ctx);
+
+enum dc_status resource_map_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream);
+
+enum dc_status resource_map_phy_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream);
+
+bool pipe_need_reprogram(
+ struct pipe_ctx *pipe_ctx_old,
+ struct pipe_ctx *pipe_ctx);
+
+void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ struct bit_depth_reduction_params *fmt_bit_depth);
+
+void update_audio_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct audio *audio,
+ bool acquired);
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
new file mode 100644
index 000000000000..c7e93f7223bd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -0,0 +1,48 @@
+#
+# Makefile for the 'audio' sub-component of DAL.
+# It provides the control and status of HW adapter resources,
+# that are global for the ASIC and sharable between pipes.
+
+IRQ = irq_service.o
+
+AMD_DAL_IRQ = $(addprefix $(AMDDALPATH)/dc/irq/,$(IRQ))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+IRQ_DCE80 = irq_service_dce80.o
+
+AMD_DAL_IRQ_DCE80 = $(addprefix $(AMDDALPATH)/dc/irq/dce80/,$(IRQ_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE80)
+
+###############################################################################
+# DCE 11x
+###############################################################################
+IRQ_DCE11 = irq_service_dce110.o
+
+AMD_DAL_IRQ_DCE11 = $(addprefix $(AMDDALPATH)/dc/irq/dce110/,$(IRQ_DCE11))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE11)
+
+###############################################################################
+# DCE 12x
+###############################################################################
+IRQ_DCE12 = irq_service_dce120.o
+
+AMD_DAL_IRQ_DCE12 = $(addprefix $(AMDDALPATH)/dc/irq/dce120/,$(IRQ_DCE12))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
+
+###############################################################################
+# DCN 1x
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+IRQ_DCN1 = irq_service_dcn10.o
+
+AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
new file mode 100644
index 000000000000..f7e40b292dfb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce110.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include "dc.h"
+#include "core_types.h"
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ .enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
+ ~DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK\
+ },\
+ .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
+ .ack_value = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
+ .status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ .enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
+ ~DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK },\
+ .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
+ .ack_value = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
+ .status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ .enable_value = {\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
+ .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
+ .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ .enable_value = {\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
+ .ack_mask =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .enable_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ .enable_value = {\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .ack_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs,\
+ .src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+bool dal_irq_service_dummy_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
+{
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: called for non-implemented irq source\n",
+ __func__);
+ return false;
+}
+
+bool dal_irq_service_dummy_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: called for non-implemented irq source\n",
+ __func__);
+ return false;
+}
+
+
+bool dce110_vblank_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
+{
+ struct dc_context *dc_ctx = irq_service->ctx;
+ struct dc *core_dc = irq_service->ctx->dc;
+ enum dc_irq_source dal_irq_src = dc_interrupt_to_irq_source(
+ irq_service->ctx->dc,
+ info->src_id,
+ info->ext_id);
+ uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
+
+ struct timing_generator *tg =
+ core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+
+ if (enable) {
+ if (!tg->funcs->arm_vert_intr(tg, 2)) {
+ DC_ERROR("Failed to get VBLANK!\n");
+ return false;
+ }
+ }
+
+ dal_irq_service_set_generic(irq_service, info, enable);
+ return true;
+
+}
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce110[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+
+};
+
+enum dc_irq_source to_dal_irq_source_dce110(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case VISLANDS30_IV_SRCID_D2_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case VISLANDS30_IV_SRCID_D3_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case VISLANDS30_IV_SRCID_D4_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case VISLANDS30_IV_SRCID_D5_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case VISLANDS30_IV_SRCID_D1_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case VISLANDS30_IV_SRCID_D2_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case VISLANDS30_IV_SRCID_D3_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case VISLANDS30_IV_SRCID_D4_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case VISLANDS30_IV_SRCID_D5_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case VISLANDS30_IV_SRCID_D6_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+ case VISLANDS30_IV_SRCID_D1_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case VISLANDS30_IV_SRCID_D2_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case VISLANDS30_IV_SRCID_D3_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case VISLANDS30_IV_SRCID_D4_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case VISLANDS30_IV_SRCID_D5_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case VISLANDS30_IV_SRCID_D6_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP6;
+
+ case VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A:
+ return DC_IRQ_SOURCE_HPD1;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B:
+ return DC_IRQ_SOURCE_HPD2;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C:
+ return DC_IRQ_SOURCE_HPD3;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D:
+ return DC_IRQ_SOURCE_HPD4;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E:
+ return DC_IRQ_SOURCE_HPD5;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F:
+ return DC_IRQ_SOURCE_HPD6;
+ case VISLANDS30_IV_EXTID_HPD_RX_A:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_B:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_C:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_D:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_E:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_F:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static const struct irq_service_funcs irq_service_funcs_dce110 = {
+ .to_dal_irq_source = to_dal_irq_source_dce110
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce110;
+ irq_service->funcs = &irq_service_funcs_dce110;
+}
+
+struct irq_service *dal_irq_service_dce110_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h
new file mode 100644
index 000000000000..9237646c0959
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE110_H__
+#define __DAL_IRQ_SERVICE_DCE110_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dce110_create(
+ struct irq_service_init_data *init_data);
+
+enum dc_irq_source to_dal_irq_source_dce110(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+
+bool dal_irq_service_dummy_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+
+bool dal_irq_service_dummy_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+bool dce110_vblank_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
new file mode 100644
index 000000000000..2ad56b1a4099
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce120.h"
+#include "../dce110/irq_service_dce110.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(DCP, reg_num, \
+ GRPH_INTERRUPT_CONTROL, GRPH_PFLIP_INT_MASK, \
+ GRPH_INTERRUPT_STATUS, GRPH_PFLIP_INT_CLEAR),\
+ .status_reg = SRI(GRPH_INTERRUPT_STATUS, DCP, reg_num),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(CRTC, reg_num,\
+ CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\
+ CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(CRTC, reg_num,\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL, CRTC_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL, CRTC_VERTICAL_INTERRUPT0_CLEAR),\
+ .funcs = &vblank_irq_info_funcs,\
+ .src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce120[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dce120 = {
+ .to_dal_irq_source = to_dal_irq_source_dce110
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce120;
+ irq_service->funcs = &irq_service_funcs_dce120;
+}
+
+struct irq_service *dal_irq_service_dce120_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h
new file mode 100644
index 000000000000..420c96e8fefc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE120_H__
+#define __DAL_IRQ_SERVICE_DCE120_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dce120_create(
+ struct irq_service_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
new file mode 100644
index 000000000000..8a2066c313fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce80.h"
+#include "../dce110/irq_service_dce110.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include "dc_types.h"
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD1_INT_STATUS,
+ DC_HPD1_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD1_INT_CONTROL,
+ DC_HPD1_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_INVALID + reg_num] = {\
+ .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
+ ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\
+ },\
+ .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
+ .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
+ .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD6 + reg_num] = {\
+ .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
+ ~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\
+ .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
+ .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
+ .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ .enable_value = {\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
+ .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
+ .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ .enable_value = {\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
+ .ack_mask =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .enable_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ .enable_value = {\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .ack_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs,\
+ .src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce80[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_int_entry(6),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ hpd_rx_int_entry(6),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dce80 = {
+ .to_dal_irq_source = to_dal_irq_source_dce110
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce80;
+ irq_service->funcs = &irq_service_funcs_dce80;
+}
+
+struct irq_service *dal_irq_service_dce80_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h
new file mode 100644
index 000000000000..3dd1013576ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE80_H__
+#define __DAL_IRQ_SERVICE_DCE80_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dce80_create(
+ struct irq_service_init_data *init_data);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
new file mode 100644
index 000000000000..74ad24714f6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "../dce110/irq_service_dce110.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "irq_service_dcn10.h"
+
+#include "ivsrcid/irqsrcs_dcn_1_0.h"
+
+enum dc_irq_source to_dal_irq_source_dcn10(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP6;
+
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD3_INT:
+ return DC_IRQ_SOURCE_HPD3;
+ case DCN_1_0__CTXID__DC_HPD4_INT:
+ return DC_IRQ_SOURCE_HPD4;
+ case DCN_1_0__CTXID__DC_HPD5_INT:
+ return DC_IRQ_SOURCE_HPD5;
+ case DCN_1_0__CTXID__DC_HPD6_INT:
+ return DC_IRQ_SOURCE_HPD6;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case DCN_1_0__CTXID__DC_HPD3_RX_INT:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case DCN_1_0__CTXID__DC_HPD4_RX_INT:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case DCN_1_0__CTXID__DC_HPD5_RX_INT:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case DCN_1_0__CTXID__DC_HPD6_RX_INT:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dcn10 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn10
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn10;
+ irq_service->funcs = &irq_service_funcs_dcn10;
+}
+
+struct irq_service *dal_irq_service_dcn10_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h
new file mode 100644
index 000000000000..fd2ca4d0c316
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCN10_H__
+#define __DAL_IRQ_SERVICE_DCN10_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn10_create(
+ struct irq_service_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
new file mode 100644
index 000000000000..b106513fc2dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/irq_service_interface.h"
+#include "include/logger_interface.h"
+
+#include "dce110/irq_service_dce110.h"
+
+
+#include "dce80/irq_service_dce80.h"
+
+#include "dce120/irq_service_dce120.h"
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/irq_service_dcn10.h"
+#endif
+
+#include "reg_helper.h"
+#include "irq_service.h"
+
+
+
+#define CTX \
+ irq_service->ctx
+
+void dal_irq_service_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ if (!init_data || !init_data->ctx) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ irq_service->ctx = init_data->ctx;
+}
+
+void dal_irq_service_destroy(struct irq_service **irq_service)
+{
+ if (!irq_service || !*irq_service) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ kfree(*irq_service);
+
+ *irq_service = NULL;
+}
+
+const struct irq_source_info *find_irq_source_info(
+ struct irq_service *irq_service,
+ enum dc_irq_source source)
+{
+ if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
+ return NULL;
+
+ return &irq_service->info[source];
+}
+
+void dal_irq_service_set_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
+{
+ uint32_t addr = info->enable_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+
+ value = (value & ~info->enable_mask) |
+ (info->enable_value[enable ? 0 : 1] & info->enable_mask);
+ dm_write_reg(irq_service->ctx, addr, value);
+}
+
+bool dal_irq_service_set(
+ struct irq_service *irq_service,
+ enum dc_irq_source source,
+ bool enable)
+{
+ const struct irq_source_info *info =
+ find_irq_source_info(irq_service, source);
+
+ if (!info) {
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: cannot find irq info table entry for %d\n",
+ __func__,
+ source);
+ return false;
+ }
+
+ dal_irq_service_ack(irq_service, source);
+
+ if (info->funcs->set)
+ return info->funcs->set(irq_service, info, enable);
+
+ dal_irq_service_set_generic(irq_service, info, enable);
+
+ return true;
+}
+
+void dal_irq_service_ack_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->ack_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+
+ value = (value & ~info->ack_mask) |
+ (info->ack_value & info->ack_mask);
+ dm_write_reg(irq_service->ctx, addr, value);
+}
+
+bool dal_irq_service_ack(
+ struct irq_service *irq_service,
+ enum dc_irq_source source)
+{
+ const struct irq_source_info *info =
+ find_irq_source_info(irq_service, source);
+
+ if (!info) {
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: cannot find irq info table entry for %d\n",
+ __func__,
+ source);
+ return false;
+ }
+
+ if (info->funcs->ack)
+ return info->funcs->ack(irq_service, info);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ return true;
+}
+
+enum dc_irq_source dal_irq_service_to_irq_source(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ return irq_service->funcs->to_dal_irq_source(
+ irq_service,
+ src_id,
+ ext_id);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
new file mode 100644
index 000000000000..dbfcb096eedd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_H__
+#define __DAL_IRQ_SERVICE_H__
+
+#include "include/irq_service_interface.h"
+
+#include "irq_types.h"
+
+struct irq_service;
+struct irq_source_info;
+
+struct irq_source_info_funcs {
+ bool (*set)(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+ bool (*ack)(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+};
+
+struct irq_source_info {
+ uint32_t src_id;
+ uint32_t ext_id;
+ uint32_t enable_reg;
+ uint32_t enable_mask;
+ uint32_t enable_value[2];
+ uint32_t ack_reg;
+ uint32_t ack_mask;
+ uint32_t ack_value;
+ uint32_t status_reg;
+ const struct irq_source_info_funcs *funcs;
+};
+
+struct irq_service_funcs {
+ enum dc_irq_source (*to_dal_irq_source)(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+};
+
+struct irq_service {
+ struct dc_context *ctx;
+ const struct irq_source_info *info;
+ const struct irq_service_funcs *funcs;
+};
+
+void dal_irq_service_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data);
+
+void dal_irq_service_ack_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+void dal_irq_service_set_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
new file mode 100644
index 000000000000..a506c2e939f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_TYPES_H__
+#define __DAL_IRQ_TYPES_H__
+
+struct dc_context;
+
+typedef void (*interrupt_handler)(void *);
+
+typedef void *irq_handler_idx;
+#define DAL_INVALID_IRQ_HANDLER_IDX NULL
+
+/* The order of the IRQ sources is important and MUST match the one's
+of base driver */
+enum dc_irq_source {
+ /* Use as mask to specify invalid irq source */
+ DC_IRQ_SOURCE_INVALID = 0,
+
+ DC_IRQ_SOURCE_HPD1,
+ DC_IRQ_SOURCE_HPD2,
+ DC_IRQ_SOURCE_HPD3,
+ DC_IRQ_SOURCE_HPD4,
+ DC_IRQ_SOURCE_HPD5,
+ DC_IRQ_SOURCE_HPD6,
+
+ DC_IRQ_SOURCE_HPD1RX,
+ DC_IRQ_SOURCE_HPD2RX,
+ DC_IRQ_SOURCE_HPD3RX,
+ DC_IRQ_SOURCE_HPD4RX,
+ DC_IRQ_SOURCE_HPD5RX,
+ DC_IRQ_SOURCE_HPD6RX,
+
+ DC_IRQ_SOURCE_I2C_DDC1,
+ DC_IRQ_SOURCE_I2C_DDC2,
+ DC_IRQ_SOURCE_I2C_DDC3,
+ DC_IRQ_SOURCE_I2C_DDC4,
+ DC_IRQ_SOURCE_I2C_DDC5,
+ DC_IRQ_SOURCE_I2C_DDC6,
+
+ DC_IRQ_SOURCE_DPSINK1,
+ DC_IRQ_SOURCE_DPSINK2,
+ DC_IRQ_SOURCE_DPSINK3,
+ DC_IRQ_SOURCE_DPSINK4,
+ DC_IRQ_SOURCE_DPSINK5,
+ DC_IRQ_SOURCE_DPSINK6,
+
+ DC_IRQ_SOURCE_TIMER,
+
+ DC_IRQ_SOURCE_PFLIP_FIRST,
+ DC_IRQ_SOURCE_PFLIP1 = DC_IRQ_SOURCE_PFLIP_FIRST,
+ DC_IRQ_SOURCE_PFLIP2,
+ DC_IRQ_SOURCE_PFLIP3,
+ DC_IRQ_SOURCE_PFLIP4,
+ DC_IRQ_SOURCE_PFLIP5,
+ DC_IRQ_SOURCE_PFLIP6,
+ DC_IRQ_SOURCE_PFLIP_UNDERLAY0,
+ DC_IRQ_SOURCE_PFLIP_LAST = DC_IRQ_SOURCE_PFLIP_UNDERLAY0,
+
+ DC_IRQ_SOURCE_GPIOPAD0,
+ DC_IRQ_SOURCE_GPIOPAD1,
+ DC_IRQ_SOURCE_GPIOPAD2,
+ DC_IRQ_SOURCE_GPIOPAD3,
+ DC_IRQ_SOURCE_GPIOPAD4,
+ DC_IRQ_SOURCE_GPIOPAD5,
+ DC_IRQ_SOURCE_GPIOPAD6,
+ DC_IRQ_SOURCE_GPIOPAD7,
+ DC_IRQ_SOURCE_GPIOPAD8,
+ DC_IRQ_SOURCE_GPIOPAD9,
+ DC_IRQ_SOURCE_GPIOPAD10,
+ DC_IRQ_SOURCE_GPIOPAD11,
+ DC_IRQ_SOURCE_GPIOPAD12,
+ DC_IRQ_SOURCE_GPIOPAD13,
+ DC_IRQ_SOURCE_GPIOPAD14,
+ DC_IRQ_SOURCE_GPIOPAD15,
+ DC_IRQ_SOURCE_GPIOPAD16,
+ DC_IRQ_SOURCE_GPIOPAD17,
+ DC_IRQ_SOURCE_GPIOPAD18,
+ DC_IRQ_SOURCE_GPIOPAD19,
+ DC_IRQ_SOURCE_GPIOPAD20,
+ DC_IRQ_SOURCE_GPIOPAD21,
+ DC_IRQ_SOURCE_GPIOPAD22,
+ DC_IRQ_SOURCE_GPIOPAD23,
+ DC_IRQ_SOURCE_GPIOPAD24,
+ DC_IRQ_SOURCE_GPIOPAD25,
+ DC_IRQ_SOURCE_GPIOPAD26,
+ DC_IRQ_SOURCE_GPIOPAD27,
+ DC_IRQ_SOURCE_GPIOPAD28,
+ DC_IRQ_SOURCE_GPIOPAD29,
+ DC_IRQ_SOURCE_GPIOPAD30,
+
+ DC_IRQ_SOURCE_DC1UNDERFLOW,
+ DC_IRQ_SOURCE_DC2UNDERFLOW,
+ DC_IRQ_SOURCE_DC3UNDERFLOW,
+ DC_IRQ_SOURCE_DC4UNDERFLOW,
+ DC_IRQ_SOURCE_DC5UNDERFLOW,
+ DC_IRQ_SOURCE_DC6UNDERFLOW,
+
+ DC_IRQ_SOURCE_DMCU_SCP,
+ DC_IRQ_SOURCE_VBIOS_SW,
+
+ DC_IRQ_SOURCE_VUPDATE1,
+ DC_IRQ_SOURCE_VUPDATE2,
+ DC_IRQ_SOURCE_VUPDATE3,
+ DC_IRQ_SOURCE_VUPDATE4,
+ DC_IRQ_SOURCE_VUPDATE5,
+ DC_IRQ_SOURCE_VUPDATE6,
+
+ DC_IRQ_SOURCE_VBLANK1,
+ DC_IRQ_SOURCE_VBLANK2,
+ DC_IRQ_SOURCE_VBLANK3,
+ DC_IRQ_SOURCE_VBLANK4,
+ DC_IRQ_SOURCE_VBLANK5,
+ DC_IRQ_SOURCE_VBLANK6,
+
+ DAL_IRQ_SOURCES_NUMBER
+};
+
+enum irq_type
+{
+ IRQ_TYPE_PFLIP = DC_IRQ_SOURCE_PFLIP1,
+ IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
+ IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
+};
+
+#define DAL_VALID_IRQ_SRC_NUM(src) \
+ ((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
+
+/* Number of Page Flip IRQ Sources. */
+#define DAL_PFLIP_IRQ_SRC_NUM \
+ (DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1)
+
+/* the number of contexts may be expanded in the future based on needs */
+enum dc_interrupt_context {
+ INTERRUPT_LOW_IRQ_CONTEXT = 0,
+ INTERRUPT_HIGH_IRQ_CONTEXT,
+ INTERRUPT_CONTEXT_NUMBER
+};
+
+enum dc_interrupt_porlarity {
+ INTERRUPT_POLARITY_DEFAULT = 0,
+ INTERRUPT_POLARITY_LOW = INTERRUPT_POLARITY_DEFAULT,
+ INTERRUPT_POLARITY_HIGH,
+ INTERRUPT_POLARITY_BOTH
+};
+
+#define DC_DECODE_INTERRUPT_POLARITY(int_polarity) \
+ (int_polarity == INTERRUPT_POLARITY_LOW) ? "Low" : \
+ (int_polarity == INTERRUPT_POLARITY_HIGH) ? "High" : \
+ (int_polarity == INTERRUPT_POLARITY_BOTH) ? "Both" : "Invalid"
+
+struct dc_timer_interrupt_params {
+ uint32_t micro_sec_interval;
+ enum dc_interrupt_context int_context;
+};
+
+struct dc_interrupt_params {
+ /* The polarity *change* which will trigger an interrupt.
+ * If 'requested_polarity == INTERRUPT_POLARITY_BOTH', then
+ * 'current_polarity' must be initialised. */
+ enum dc_interrupt_porlarity requested_polarity;
+ /* If 'requested_polarity == INTERRUPT_POLARITY_BOTH',
+ * 'current_polarity' should contain the current state, which means
+ * the interrupt will be triggered when state changes from what is,
+ * in 'current_polarity'. */
+ enum dc_interrupt_porlarity current_polarity;
+ enum dc_irq_source irq_source;
+ enum dc_interrupt_context int_context;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
new file mode 100644
index 000000000000..a87c0329541f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _OS_TYPES_H_
+#define _OS_TYPES_H_
+
+#if defined __KERNEL__
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+#include <linux/kref.h>
+
+#include "cgs_linux.h"
+
+#if defined(__BIG_ENDIAN) && !defined(BIGENDIAN_CPU)
+#define BIGENDIAN_CPU
+#elif defined(__LITTLE_ENDIAN) && !defined(LITTLEENDIAN_CPU)
+#define LITTLEENDIAN_CPU
+#endif
+
+#undef READ
+#undef WRITE
+#undef FRAME_SIZE
+
+#define dm_output_to_console(fmt, ...) DRM_INFO(fmt, ##__VA_ARGS__)
+
+#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
+
+#define dm_debug(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
+
+#define dm_vlog(fmt, args) vprintk(fmt, args)
+
+#endif
+
+/*
+ *
+ * general debug capabilities
+ *
+ */
+#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
+#define ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ kgdb_breakpoint(); \
+ } \
+} while (0)
+#else
+#define ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ ; \
+ } \
+} while (0)
+#endif
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+#define ASSERT(expr) ASSERT_CRITICAL(expr)
+
+#else
+#define ASSERT(expr) WARN_ON(!(expr))
+#endif
+
+#define BREAK_TO_DEBUGGER() ASSERT(0)
+
+#define DC_ERR(...) do { \
+ dm_error(__VA_ARGS__); \
+ BREAK_TO_DEBUGGER(); \
+} while (0)
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include <asm/fpu/api.h>
+#endif
+
+#endif /* _OS_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/Makefile b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
new file mode 100644
index 000000000000..fc0b7318d9cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the virtual sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o
+
+AMD_DAL_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/virtual/,$(VIRTUAL))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_VIRTUAL)
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
new file mode 100644
index 000000000000..88c2bde3f039
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_services_types.h"
+
+#include "virtual_link_encoder.h"
+
+static bool virtual_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream) { return true; }
+
+static void virtual_link_encoder_hw_init(struct link_encoder *enc) {}
+
+static void virtual_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal) {}
+
+static void virtual_link_encoder_enable_tmds_output(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock) {}
+
+static void virtual_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source) {}
+
+static void virtual_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source) {}
+
+static void virtual_link_encoder_disable_output(
+ struct link_encoder *link_enc,
+ enum signal_type signal,
+ struct dc_link *link) {}
+
+static void virtual_link_encoder_dp_set_lane_settings(
+ struct link_encoder *enc,
+ const struct link_training_settings *link_settings) {}
+
+static void virtual_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param) {}
+
+static void virtual_link_encoder_update_mst_stream_allocation_table(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table) {}
+
+static void virtual_link_encoder_connect_dig_be_to_fe(
+ struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect) {}
+
+static void virtual_link_encoder_destroy(struct link_encoder **enc)
+{
+ kfree(*enc);
+ *enc = NULL;
+}
+
+
+static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
+ .validate_output_with_stream =
+ virtual_link_encoder_validate_output_with_stream,
+ .hw_init = virtual_link_encoder_hw_init,
+ .setup = virtual_link_encoder_setup,
+ .enable_tmds_output = virtual_link_encoder_enable_tmds_output,
+ .enable_dp_output = virtual_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output,
+ .disable_output = virtual_link_encoder_disable_output,
+ .dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ virtual_link_encoder_update_mst_stream_allocation_table,
+ .connect_dig_be_to_fe = virtual_link_encoder_connect_dig_be_to_fe,
+ .destroy = virtual_link_encoder_destroy
+};
+
+bool virtual_link_encoder_construct(
+ struct link_encoder *enc, const struct encoder_init_data *init_data)
+{
+ enc->funcs = &virtual_lnk_enc_funcs;
+ enc->ctx = init_data->ctx;
+ enc->id = init_data->encoder;
+
+ enc->hpd_source = init_data->hpd_source;
+ enc->connector = init_data->connector;
+
+ enc->transmitter = init_data->transmitter;
+
+ enc->output_signals = SIGNAL_TYPE_VIRTUAL;
+
+ enc->preferred_engine = ENGINE_ID_VIRTUAL;
+
+ return true;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h
new file mode 100644
index 000000000000..eb1a94fb8a9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_VIRTUAL_LINK_ENCODER_H__
+#define __DC_VIRTUAL_LINK_ENCODER_H__
+
+#include "link_encoder.h"
+
+bool virtual_link_encoder_construct(
+ struct link_encoder *enc, const struct encoder_init_data *init_data);
+
+#endif /* __DC_VIRTUAL_LINK_ENCODER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
new file mode 100644
index 000000000000..3dc1733eea20
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "virtual_stream_encoder.h"
+
+static void virtual_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space) {}
+
+static void virtual_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio) {}
+
+static void virtual_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link) {}
+
+static void virtual_stream_encoder_set_mst_bandwidth(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp) {}
+
+static void virtual_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame) {}
+
+static void virtual_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc) {}
+
+static void virtual_stream_encoder_set_avmute(
+ struct stream_encoder *enc,
+ bool enable) {}
+static void virtual_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame) {}
+
+static void virtual_stream_encoder_stop_dp_info_packets(
+ struct stream_encoder *enc) {}
+
+static void virtual_stream_encoder_dp_blank(
+ struct stream_encoder *enc) {}
+
+static void virtual_stream_encoder_dp_unblank(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param) {}
+
+static void virtual_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute) {}
+
+static const struct stream_encoder_funcs virtual_str_enc_funcs = {
+ .dp_set_stream_attribute =
+ virtual_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ virtual_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ virtual_stream_encoder_dvi_set_stream_attribute,
+ .set_mst_bandwidth =
+ virtual_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+ virtual_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ virtual_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets =
+ virtual_stream_encoder_update_dp_info_packets,
+ .stop_dp_info_packets =
+ virtual_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ virtual_stream_encoder_dp_blank,
+ .dp_unblank =
+ virtual_stream_encoder_dp_unblank,
+
+ .audio_mute_control = virtual_audio_mute_control,
+ .set_avmute = virtual_stream_encoder_set_avmute,
+};
+
+bool virtual_stream_encoder_construct(
+ struct stream_encoder *enc,
+ struct dc_context *ctx,
+ struct dc_bios *bp)
+{
+ if (!enc)
+ return false;
+ if (!bp)
+ return false;
+
+ enc->funcs = &virtual_str_enc_funcs;
+ enc->ctx = ctx;
+ enc->id = ENGINE_ID_VIRTUAL;
+ enc->bp = bp;
+
+ return true;
+}
+
+struct stream_encoder *virtual_stream_encoder_create(
+ struct dc_context *ctx, struct dc_bios *bp)
+{
+ struct stream_encoder *enc = kzalloc(sizeof(*enc), GFP_KERNEL);
+
+ if (!enc)
+ return NULL;
+
+ if (virtual_stream_encoder_construct(enc, ctx, bp))
+ return enc;
+
+ BREAK_TO_DEBUGGER();
+ kfree(enc);
+ return NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h
new file mode 100644
index 000000000000..bf3422c66976
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_VIRTUAL_STREAM_ENCODER_H__
+#define __DC_VIRTUAL_STREAM_ENCODER_H__
+
+#include "stream_encoder.h"
+
+struct stream_encoder *virtual_stream_encoder_create(
+ struct dc_context *ctx, struct dc_bios *bp);
+
+bool virtual_stream_encoder_construct(
+ struct stream_encoder *enc,
+ struct dc_context *ctx,
+ struct dc_bios *bp);
+
+#endif /* __DC_VIRTUAL_STREAM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
new file mode 100644
index 000000000000..6364fbc24cfe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AUDIO_TYPES_H__
+#define __AUDIO_TYPES_H__
+
+#include "signal_types.h"
+
+#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
+#define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18
+#define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF
+
+
+struct audio_crtc_info {
+ uint32_t h_total;
+ uint32_t h_active;
+ uint32_t v_active;
+ uint32_t pixel_repetition;
+ uint32_t requested_pixel_clock; /* in KHz */
+ uint32_t calculated_pixel_clock; /* in KHz */
+ uint32_t refresh_rate;
+ enum dc_color_depth color_depth;
+ bool interlaced;
+};
+struct azalia_clock_info {
+ uint32_t pixel_clock_in_10khz;
+ uint32_t audio_dto_phase;
+ uint32_t audio_dto_module;
+ uint32_t audio_dto_wall_clock_ratio;
+};
+
+enum audio_dto_source {
+ DTO_SOURCE_UNKNOWN = 0,
+ DTO_SOURCE_ID0,
+ DTO_SOURCE_ID1,
+ DTO_SOURCE_ID2,
+ DTO_SOURCE_ID3,
+ DTO_SOURCE_ID4,
+ DTO_SOURCE_ID5
+};
+
+/* PLL information required for AZALIA DTO calculation */
+
+struct audio_pll_info {
+ uint32_t dp_dto_source_clock_in_khz;
+ uint32_t feed_back_divider;
+ enum audio_dto_source dto_source;
+ bool ss_enabled;
+ uint32_t ss_percentage;
+ uint32_t ss_percentage_divider;
+};
+
+struct audio_channel_associate_info {
+ union {
+ struct {
+ uint32_t ALL_CHANNEL_FL:4;
+ uint32_t ALL_CHANNEL_FR:4;
+ uint32_t ALL_CHANNEL_FC:4;
+ uint32_t ALL_CHANNEL_Sub:4;
+ uint32_t ALL_CHANNEL_SL:4;
+ uint32_t ALL_CHANNEL_SR:4;
+ uint32_t ALL_CHANNEL_BL:4;
+ uint32_t ALL_CHANNEL_BR:4;
+ } bits;
+ uint32_t u32all;
+ };
+};
+
+struct audio_output {
+ /* Front DIG id. */
+ enum engine_id engine_id;
+ /* encoder output signal */
+ enum signal_type signal;
+ /* video timing */
+ struct audio_crtc_info crtc_info;
+ /* PLL for audio */
+ struct audio_pll_info pll_info;
+};
+
+enum audio_payload {
+ CHANNEL_SPLIT_MAPPINGCHANG = 0x9,
+};
+
+#endif /* __AUDIO_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_interface.h b/drivers/gpu/drm/amd/display/include/bios_parser_interface.h
new file mode 100644
index 000000000000..d51101c5c6b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_interface.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_INTERFACE_H__
+#define __DAL_BIOS_PARSER_INTERFACE_H__
+
+#include "dc_bios_types.h"
+
+struct bios_parser;
+
+struct bp_init_data {
+ struct dc_context *ctx;
+ uint8_t *bios;
+};
+
+struct dc_bios *dal_bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+void dal_bios_parser_destroy(struct dc_bios **dcb);
+
+#endif /* __DAL_BIOS_PARSER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
new file mode 100644
index 000000000000..0840f69cde99
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_H__
+
+#define __DAL_BIOS_PARSER_TYPES_H__
+
+#include "dm_services.h"
+#include "include/signal_types.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/gpio_types.h"
+#include "include/link_service_types.h"
+
+/* TODO: include signal_types.h and remove this enum */
+enum as_signal_type {
+ AS_SIGNAL_TYPE_NONE = 0L, /* no signal */
+ AS_SIGNAL_TYPE_DVI,
+ AS_SIGNAL_TYPE_HDMI,
+ AS_SIGNAL_TYPE_LVDS,
+ AS_SIGNAL_TYPE_DISPLAY_PORT,
+ AS_SIGNAL_TYPE_GPU_PLL,
+ AS_SIGNAL_TYPE_UNKNOWN
+};
+
+enum bp_result {
+ BP_RESULT_OK = 0, /* There was no error */
+ BP_RESULT_BADINPUT, /*Bad input parameter */
+ BP_RESULT_BADBIOSTABLE, /* Bad BIOS table */
+ BP_RESULT_UNSUPPORTED, /* BIOS Table is not supported */
+ BP_RESULT_NORECORD, /* Record can't be found */
+ BP_RESULT_FAILURE
+};
+
+enum bp_encoder_control_action {
+ /* direct VBIOS translation! Just to simplify the translation */
+ ENCODER_CONTROL_DISABLE = 0,
+ ENCODER_CONTROL_ENABLE,
+ ENCODER_CONTROL_SETUP,
+ ENCODER_CONTROL_INIT
+};
+
+enum bp_transmitter_control_action {
+ /* direct VBIOS translation! Just to simplify the translation */
+ TRANSMITTER_CONTROL_DISABLE = 0,
+ TRANSMITTER_CONTROL_ENABLE,
+ TRANSMITTER_CONTROL_BACKLIGHT_OFF,
+ TRANSMITTER_CONTROL_BACKLIGHT_ON,
+ TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS,
+ TRANSMITTER_CONTROL_LCD_SETF_TEST_START,
+ TRANSMITTER_CONTROL_LCD_SELF_TEST_STOP,
+ TRANSMITTER_CONTROL_INIT,
+ TRANSMITTER_CONTROL_DEACTIVATE,
+ TRANSMITTER_CONTROL_ACTIAVATE,
+ TRANSMITTER_CONTROL_SETUP,
+ TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS,
+ /* ATOM_TRANSMITTER_ACTION_POWER_ON. This action is for eDP only
+ * (power up the panel)
+ */
+ TRANSMITTER_CONTROL_POWER_ON,
+ /* ATOM_TRANSMITTER_ACTION_POWER_OFF. This action is for eDP only
+ * (power down the panel)
+ */
+ TRANSMITTER_CONTROL_POWER_OFF
+};
+
+enum bp_external_encoder_control_action {
+ EXTERNAL_ENCODER_CONTROL_DISABLE = 0,
+ EXTERNAL_ENCODER_CONTROL_ENABLE = 1,
+ EXTERNAL_ENCODER_CONTROL_INIT = 0x7,
+ EXTERNAL_ENCODER_CONTROL_SETUP = 0xf,
+ EXTERNAL_ENCODER_CONTROL_UNBLANK = 0x10,
+ EXTERNAL_ENCODER_CONTROL_BLANK = 0x11,
+};
+
+enum bp_pipe_control_action {
+ ASIC_PIPE_DISABLE = 0,
+ ASIC_PIPE_ENABLE,
+ ASIC_PIPE_INIT
+};
+
+struct bp_encoder_control {
+ enum bp_encoder_control_action action;
+ enum engine_id engine_id;
+ enum transmitter transmitter;
+ enum signal_type signal;
+ enum dc_lane_count lanes_number;
+ enum dc_color_depth color_depth;
+ bool enable_dp_audio;
+ uint32_t pixel_clock; /* khz */
+};
+
+struct bp_external_encoder_control {
+ enum bp_external_encoder_control_action action;
+ enum engine_id engine_id;
+ enum dc_link_rate link_rate;
+ enum dc_lane_count lanes_number;
+ enum signal_type signal;
+ enum dc_color_depth color_depth;
+ bool coherent;
+ struct graphics_object_id encoder_id;
+ struct graphics_object_id connector_obj_id;
+ uint32_t pixel_clock; /* in KHz */
+};
+
+struct bp_crtc_source_select {
+ enum engine_id engine_id;
+ enum controller_id controller_id;
+ /* from GPU Tx aka asic_signal */
+ enum signal_type signal;
+ /* sink_signal may differ from asicSignal if Translator encoder */
+ enum signal_type sink_signal;
+ enum display_output_bit_depth display_output_bit_depth;
+ bool enable_dp_audio;
+};
+
+struct bp_transmitter_control {
+ enum bp_transmitter_control_action action;
+ enum engine_id engine_id;
+ enum transmitter transmitter; /* PhyId */
+ enum dc_lane_count lanes_number;
+ enum clock_source_id pll_id; /* needed for DCE 4.0 */
+ enum signal_type signal;
+ enum dc_color_depth color_depth; /* not used for DCE6.0 */
+ enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */
+ struct graphics_object_id connector_obj_id;
+ /* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should
+ * be pixel clock * deep_color_ratio (in KHz)
+ */
+ uint32_t pixel_clock;
+ uint32_t lane_select;
+ uint32_t lane_settings;
+ bool coherent;
+ bool multi_path;
+ bool single_pll_mode;
+};
+
+struct bp_hw_crtc_timing_parameters {
+ enum controller_id controller_id;
+ /* horizontal part */
+ uint32_t h_total;
+ uint32_t h_addressable;
+ uint32_t h_overscan_left;
+ uint32_t h_overscan_right;
+ uint32_t h_sync_start;
+ uint32_t h_sync_width;
+
+ /* vertical part */
+ uint32_t v_total;
+ uint32_t v_addressable;
+ uint32_t v_overscan_top;
+ uint32_t v_overscan_bottom;
+ uint32_t v_sync_start;
+ uint32_t v_sync_width;
+
+ struct timing_flags {
+ uint32_t INTERLACE:1;
+ uint32_t PIXEL_REPETITION:4;
+ uint32_t HSYNC_POSITIVE_POLARITY:1;
+ uint32_t VSYNC_POSITIVE_POLARITY:1;
+ uint32_t HORZ_COUNT_BY_TWO:1;
+ } flags;
+};
+
+struct bp_adjust_pixel_clock_parameters {
+ /* Input: Signal Type - to be converted to Encoder mode */
+ enum signal_type signal_type;
+ /* Input: Encoder object id */
+ struct graphics_object_id encoder_object_id;
+ /* Input: Pixel Clock (requested Pixel clock based on Video timing
+ * standard used) in KHz
+ */
+ uint32_t pixel_clock;
+ /* Output: Adjusted Pixel Clock (after VBIOS exec table) in KHz */
+ uint32_t adjusted_pixel_clock;
+ /* Output: If non-zero, this refDiv value should be used to calculate
+ * other ppll params */
+ uint32_t reference_divider;
+ /* Output: If non-zero, this postDiv value should be used to calculate
+ * other ppll params */
+ uint32_t pixel_clock_post_divider;
+ /* Input: Enable spread spectrum */
+ bool ss_enable;
+};
+
+struct bp_pixel_clock_parameters {
+ enum controller_id controller_id; /* (Which CRTC uses this PLL) */
+ enum clock_source_id pll_id; /* Clock Source Id */
+ /* signal_type -> Encoder Mode - needed by VBIOS Exec table */
+ enum signal_type signal_type;
+ /* Adjusted Pixel Clock (after VBIOS exec table)
+ * that becomes Target Pixel Clock (KHz) */
+ uint32_t target_pixel_clock;
+ /* Calculated Reference divider of Display PLL */
+ uint32_t reference_divider;
+ /* Calculated Feedback divider of Display PLL */
+ uint32_t feedback_divider;
+ /* Calculated Fractional Feedback divider of Display PLL */
+ uint32_t fractional_feedback_divider;
+ /* Calculated Pixel Clock Post divider of Display PLL */
+ uint32_t pixel_clock_post_divider;
+ struct graphics_object_id encoder_object_id; /* Encoder object id */
+ /* VBIOS returns a fixed display clock when DFS-bypass feature
+ * is enabled (KHz) */
+ uint32_t dfs_bypass_display_clock;
+ /* color depth to support HDMI deep color */
+ enum transmitter_color_depth color_depth;
+
+ struct program_pixel_clock_flags {
+ uint32_t FORCE_PROGRAMMING_OF_PLL:1;
+ /* Use Engine Clock as source for Display Clock when
+ * programming PLL */
+ uint32_t USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK:1;
+ /* Use external reference clock (refDivSrc for PLL) */
+ uint32_t SET_EXTERNAL_REF_DIV_SRC:1;
+ /* Force program PHY PLL only */
+ uint32_t PROGRAM_PHY_PLL_ONLY:1;
+ /* Support for YUV420 */
+ uint32_t SUPPORT_YUV_420:1;
+ /* Use XTALIN reference clock source */
+ uint32_t SET_XTALIN_REF_SRC:1;
+ /* Use GENLK reference clock source */
+ uint32_t SET_GENLOCK_REF_DIV_SRC:1;
+ } flags;
+};
+
+enum bp_dce_clock_type {
+ DCECLOCK_TYPE_DISPLAY_CLOCK = 0,
+ DCECLOCK_TYPE_DPREFCLK = 1
+};
+
+/* DCE Clock Parameters structure for SetDceClock Exec command table */
+struct bp_set_dce_clock_parameters {
+ enum clock_source_id pll_id; /* Clock Source Id */
+ /* Display clock or DPREFCLK value */
+ uint32_t target_clock_frequency;
+ /* Clock to set: =0: DISPCLK =1: DPREFCLK =2: PIXCLK */
+ enum bp_dce_clock_type clock_type;
+
+ struct set_dce_clock_flags {
+ uint32_t USE_GENERICA_AS_SOURCE_FOR_DPREFCLK:1;
+ /* Use XTALIN reference clock source */
+ uint32_t USE_XTALIN_AS_SOURCE_FOR_DPREFCLK:1;
+ /* Use PCIE reference clock source */
+ uint32_t USE_PCIE_AS_SOURCE_FOR_DPREFCLK:1;
+ /* Use GENLK reference clock source */
+ uint32_t USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK:1;
+ } flags;
+};
+
+struct spread_spectrum_flags {
+ /* 1 = Center Spread; 0 = down spread */
+ uint32_t CENTER_SPREAD:1;
+ /* 1 = external; 0 = internal */
+ uint32_t EXTERNAL_SS:1;
+ /* 1 = delta-sigma type parameter; 0 = ver1 */
+ uint32_t DS_TYPE:1;
+};
+
+struct bp_spread_spectrum_parameters {
+ enum clock_source_id pll_id;
+ uint32_t percentage;
+ uint32_t ds_frac_amount;
+
+ union {
+ struct {
+ uint32_t step;
+ uint32_t delay;
+ uint32_t range; /* In Hz unit */
+ } ver1;
+ struct {
+ uint32_t feedback_amount;
+ uint32_t nfrac_amount;
+ uint32_t ds_frac_size;
+ } ds;
+ };
+
+ struct spread_spectrum_flags flags;
+};
+
+struct bp_encoder_cap_info {
+ uint32_t DP_HBR2_CAP:1;
+ uint32_t DP_HBR2_EN:1;
+ uint32_t DP_HBR3_EN:1;
+ uint32_t HDMI_6GB_EN:1;
+ uint32_t RESERVED:30;
+};
+
+#endif /*__DAL_BIOS_PARSER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
new file mode 100644
index 000000000000..7abe663ecc6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_ASIC_ID_H__
+#define __DAL_ASIC_ID_H__
+
+/*
+ * ASIC internal revision ID
+ */
+
+/* DCE80 (based on ci_id.h in Perforce) */
+#define CI_BONAIRE_M_A0 0x14
+#define CI_BONAIRE_M_A1 0x15
+#define CI_HAWAII_P_A0 0x28
+
+#define CI_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_BONAIRE_M(rev) \
+ ((rev >= CI_BONAIRE_M_A0) && (rev < CI_HAWAII_P_A0))
+
+#define ASIC_REV_IS_HAWAII_P(rev) \
+ (rev >= CI_HAWAII_P_A0)
+
+/* KV1 with Spectre GFX core, 8-8-1-2 (CU-Pix-Primitive-RB) */
+#define KV_SPECTRE_A0 0x01
+
+/* KV2 with Spooky GFX core, including downgraded from Spectre core,
+ * 3-4-1-1 (CU-Pix-Primitive-RB) */
+#define KV_SPOOKY_A0 0x41
+
+/* KB with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define KB_KALINDI_A0 0x81
+
+/* KB with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define KB_KALINDI_A1 0x82
+
+/* BV with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define BV_KALINDI_A2 0x85
+
+/* ML with Godavari GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define ML_GODAVARI_A0 0xA1
+
+/* ML with Godavari GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define ML_GODAVARI_A1 0xA2
+
+#define KV_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_KALINDI(rev) \
+ ((rev >= KB_KALINDI_A0) && (rev < KV_UNKNOWN))
+
+#define ASIC_REV_IS_BHAVANI(rev) \
+ ((rev >= BV_KALINDI_A2) && (rev < ML_GODAVARI_A0))
+
+#define ASIC_REV_IS_GODAVARI(rev) \
+ ((rev >= ML_GODAVARI_A0) && (rev < KV_UNKNOWN))
+
+/* VI Family */
+/* DCE10 */
+#define VI_TONGA_P_A0 20
+#define VI_TONGA_P_A1 21
+#define VI_FIJI_P_A0 60
+
+/* DCE112 */
+#define VI_POLARIS10_P_A0 80
+#define VI_POLARIS11_M_A0 90
+#define VI_POLARIS12_V_A0 100
+
+#define VI_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_TONGA_P(eChipRev) ((eChipRev >= VI_TONGA_P_A0) && \
+ (eChipRev < 40))
+#define ASIC_REV_IS_FIJI_P(eChipRev) ((eChipRev >= VI_FIJI_P_A0) && \
+ (eChipRev < 80))
+
+#define ASIC_REV_IS_POLARIS10_P(eChipRev) ((eChipRev >= VI_POLARIS10_P_A0) && \
+ (eChipRev < VI_POLARIS11_M_A0))
+#define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \
+ (eChipRev < VI_POLARIS12_V_A0))
+#define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0)
+
+/* DCE11 */
+#define CZ_CARRIZO_A0 0x01
+
+#define STONEY_A0 0x61
+#define CZ_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_STONEY(rev) \
+ ((rev >= STONEY_A0) && (rev < CZ_UNKNOWN))
+
+/* DCN1_0 */
+#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
+#define RAVEN_A0 0x01
+#define RAVEN_B0 0x21
+#define RAVEN_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
+#define RAVEN1_F0 0xF0
+#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
+
+
+#define FAMILY_RV 142 /* DCN 1*/
+
+/*
+ * ASIC chip ID
+ */
+/* DCE80 */
+#define DEVICE_ID_KALINDI_9834 0x9834
+#define DEVICE_ID_TEMASH_9839 0x9839
+#define DEVICE_ID_TEMASH_983D 0x983D
+
+/* Asic Family IDs for different asic family. */
+#define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
+#define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
+#define FAMILY_VI 130 /* Volcanic Islands: Iceland (V), Tonga (M) */
+#define FAMILY_CZ 135 /* Carrizo */
+
+#define FAMILY_AI 141
+
+#define FAMILY_UNKNOWN 0xFF
+
+#endif /* __DAL_ASIC_ID_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
new file mode 100644
index 000000000000..fa543965feb5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TYPES_H__
+#define __DAL_TYPES_H__
+
+#include "signal_types.h"
+#include "dc_types.h"
+
+struct dal_logger;
+struct dc_bios;
+
+enum dce_version {
+ DCE_VERSION_UNKNOWN = (-1),
+ DCE_VERSION_8_0,
+ DCE_VERSION_8_1,
+ DCE_VERSION_8_3,
+ DCE_VERSION_10_0,
+ DCE_VERSION_11_0,
+ DCE_VERSION_11_2,
+ DCE_VERSION_12_0,
+ DCE_VERSION_MAX,
+ DCN_VERSION_1_0,
+ DCN_VERSION_MAX
+};
+
+#endif /* __DAL_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
new file mode 100644
index 000000000000..0ff2a899b8f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_DDC_SERVICE_TYPES_H__
+#define __DAL_DDC_SERVICE_TYPES_H__
+
+#define DP_BRANCH_DEVICE_ID_1 0x0010FA
+#define DP_BRANCH_DEVICE_ID_2 0x0022B9
+#define DP_SINK_DEVICE_ID_1 0x4CE000
+#define DP_BRANCH_DEVICE_ID_3 0x00001A
+#define DP_BRANCH_DEVICE_ID_4 0x0080e1
+#define DP_BRANCH_DEVICE_ID_5 0x006037
+#define DP_SINK_DEVICE_ID_2 0x001CF8
+
+
+enum ddc_result {
+ DDC_RESULT_UNKNOWN = 0,
+ DDC_RESULT_SUCESSFULL,
+ DDC_RESULT_FAILED_CHANNEL_BUSY,
+ DDC_RESULT_FAILED_TIMEOUT,
+ DDC_RESULT_FAILED_PROTOCOL_ERROR,
+ DDC_RESULT_FAILED_NACK,
+ DDC_RESULT_FAILED_INCOMPLETE,
+ DDC_RESULT_FAILED_OPERATION,
+ DDC_RESULT_FAILED_INVALID_OPERATION,
+ DDC_RESULT_FAILED_BUFFER_OVERFLOW
+};
+
+enum ddc_service_type {
+ DDC_SERVICE_TYPE_CONNECTOR,
+ DDC_SERVICE_TYPE_DISPLAY_PORT_MST,
+};
+
+/**
+ * display sink capability
+ */
+struct display_sink_capability {
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+
+ /**********************************************************
+ capabilities going INTO SINK DEVICE (stream capabilities)
+ **********************************************************/
+ /* Dongle's downstream count. */
+ uint32_t downstrm_sink_count;
+ /* Is dongle's downstream count info field (downstrm_sink_count)
+ * valid. */
+ bool downstrm_sink_count_valid;
+
+ /* Maximum additional audio delay in microsecond (us) */
+ uint32_t additional_audio_delay;
+ /* Audio latency value in microsecond (us) */
+ uint32_t audio_latency;
+ /* Interlace video latency value in microsecond (us) */
+ uint32_t video_latency_interlace;
+ /* Progressive video latency value in microsecond (us) */
+ uint32_t video_latency_progressive;
+ /* Dongle caps: Maximum pixel clock supported over dongle for HDMI */
+ uint32_t max_hdmi_pixel_clock;
+ /* Dongle caps: Maximum deep color supported over dongle for HDMI */
+ enum dc_color_depth max_hdmi_deep_color;
+
+ /************************************************************
+ capabilities going OUT OF SOURCE DEVICE (link capabilities)
+ ************************************************************/
+ /* support for Spread Spectrum(SS) */
+ bool ss_supported;
+ /* DP link settings (laneCount, linkRate, Spread) */
+ uint32_t dp_link_lane_count;
+ uint32_t dp_link_rate;
+ uint32_t dp_link_spead;
+
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ bool is_dp_hdmi_s3d_converter;
+ /* to check if we have queried the display capability
+ * for eDP panel already. */
+ bool is_edp_sink_cap_valid;
+
+ enum ddc_transaction_type transaction_type;
+ enum signal_type signal;
+};
+
+struct av_sync_data {
+ uint8_t av_granularity;/* DPCD 00023h */
+ uint8_t aud_dec_lat1;/* DPCD 00024h */
+ uint8_t aud_dec_lat2;/* DPCD 00025h */
+ uint8_t aud_pp_lat1;/* DPCD 00026h */
+ uint8_t aud_pp_lat2;/* DPCD 00027h */
+ uint8_t vid_inter_lat;/* DPCD 00028h */
+ uint8_t vid_prog_lat;/* DPCD 00029h */
+ uint8_t aud_del_ins1;/* DPCD 0002Bh */
+ uint8_t aud_del_ins2;/* DPCD 0002Ch */
+ uint8_t aud_del_ins3;/* DPCD 0002Dh */
+};
+
+/*DP to VGA converter*/
+static const uint8_t DP_VGA_CONVERTER_ID_1[] = "mVGAa";
+/*DP to Dual link DVI converter*/
+static const uint8_t DP_DVI_CONVERTER_ID_1[] = "m2DVIa";
+/*Travis*/
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
+/*Nutmeg*/
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
+/*DP to VGA converter*/
+static const uint8_t DP_VGA_CONVERTER_ID_4[] = "DpVga";
+/*DP to Dual link DVI converter*/
+static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
+/*DP to Dual link DVI converter 2*/
+static const uint8_t DP_DVI_CONVERTER_ID_42[] = "v2DVIa";
+
+static const uint8_t DP_SINK_DEV_STRING_ID2_REV0[] = "\0\0\0\0\0\0";
+
+/* Identifies second generation PSR TCON from Parade: Device ID string:
+ * yy-xx-**-**-**-**
+ */
+/* xx - Hw ID high byte */
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_HIGH_BYTE =
+ 0x06;
+
+/* yy - HW ID low byte, the same silicon has several package/feature flavors */
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE1 =
+ 0x61;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE2 =
+ 0x62;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE3 =
+ 0x63;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE4 =
+ 0x72;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE5 =
+ 0x73;
+
+#endif /* __DAL_DDC_SERVICE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
new file mode 100644
index 000000000000..d8e52e3b8e3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DPCD_DEFS_H__
+#define __DAL_DPCD_DEFS_H__
+
+#include <drm/drm_dp_helper.h>
+
+enum dpcd_revision {
+ DPCD_REV_10 = 0x10,
+ DPCD_REV_11 = 0x11,
+ DPCD_REV_12 = 0x12,
+ DPCD_REV_13 = 0x13,
+ DPCD_REV_14 = 0x14
+};
+
+/* these are the types stored at DOWNSTREAMPORT_PRESENT */
+enum dpcd_downstream_port_type {
+ DOWNSTREAM_DP = 0,
+ DOWNSTREAM_VGA,
+ DOWNSTREAM_DVI_HDMI,
+ DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */
+};
+
+enum dpcd_link_test_patterns {
+ LINK_TEST_PATTERN_NONE = 0,
+ LINK_TEST_PATTERN_COLOR_RAMP,
+ LINK_TEST_PATTERN_VERTICAL_BARS,
+ LINK_TEST_PATTERN_COLOR_SQUARES
+};
+
+enum dpcd_test_color_format {
+ TEST_COLOR_FORMAT_RGB = 0,
+ TEST_COLOR_FORMAT_YCBCR422,
+ TEST_COLOR_FORMAT_YCBCR444
+};
+
+enum dpcd_test_bit_depth {
+ TEST_BIT_DEPTH_6 = 0,
+ TEST_BIT_DEPTH_8,
+ TEST_BIT_DEPTH_10,
+ TEST_BIT_DEPTH_12,
+ TEST_BIT_DEPTH_16
+};
+
+/* PHY (encoder) test patterns
+The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248)
+*/
+enum dpcd_phy_test_patterns {
+ PHY_TEST_PATTERN_NONE = 0,
+ PHY_TEST_PATTERN_D10_2,
+ PHY_TEST_PATTERN_SYMBOL_ERROR,
+ PHY_TEST_PATTERN_PRBS7,
+ PHY_TEST_PATTERN_80BIT_CUSTOM,/* For DP1.2 only */
+ PHY_TEST_PATTERN_CP2520_1,
+ PHY_TEST_PATTERN_CP2520_2,
+ PHY_TEST_PATTERN_CP2520_3, /* same as TPS4 */
+};
+
+enum dpcd_test_dyn_range {
+ TEST_DYN_RANGE_VESA = 0,
+ TEST_DYN_RANGE_CEA
+};
+
+enum dpcd_audio_test_pattern {
+ AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0,/* direct HW translation */
+ AUDIO_TEST_PATTERN_SAWTOOTH
+};
+
+enum dpcd_audio_sampling_rate {
+ AUDIO_SAMPLING_RATE_32KHZ = 0,/* direct HW translation */
+ AUDIO_SAMPLING_RATE_44_1KHZ,
+ AUDIO_SAMPLING_RATE_48KHZ,
+ AUDIO_SAMPLING_RATE_88_2KHZ,
+ AUDIO_SAMPLING_RATE_96KHZ,
+ AUDIO_SAMPLING_RATE_176_4KHZ,
+ AUDIO_SAMPLING_RATE_192KHZ
+};
+
+enum dpcd_audio_channels {
+ AUDIO_CHANNELS_1 = 0,/* direct HW translation */
+ AUDIO_CHANNELS_2,
+ AUDIO_CHANNELS_3,
+ AUDIO_CHANNELS_4,
+ AUDIO_CHANNELS_5,
+ AUDIO_CHANNELS_6,
+ AUDIO_CHANNELS_7,
+ AUDIO_CHANNELS_8,
+
+ AUDIO_CHANNELS_COUNT
+};
+
+enum dpcd_audio_test_pattern_periods {
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_NOTUSED = 0,/* direct HW translation */
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_3,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_6,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_12,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_24,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_48,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_96,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_192,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_384,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_768,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_1536
+};
+
+/* This enum is for programming DPCD TRAINING_PATTERN_SET */
+enum dpcd_training_patterns {
+ DPCD_TRAINING_PATTERN_VIDEOIDLE = 0,/* direct HW translation! */
+ DPCD_TRAINING_PATTERN_1,
+ DPCD_TRAINING_PATTERN_2,
+ DPCD_TRAINING_PATTERN_3,
+ DPCD_TRAINING_PATTERN_4 = 7
+};
+
+/* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus
+It defines the possible PSR states. */
+enum dpcd_psr_sink_states {
+ PSR_SINK_STATE_INACTIVE = 0,
+ PSR_SINK_STATE_ACTIVE_CAPTURE_DISPLAY_ON_SOURCE_TIMING = 1,
+ PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB = 2,
+ PSR_SINK_STATE_ACTIVE_CAPTURE_DISPLAY_ON_SINK_TIMING = 3,
+ PSR_SINK_STATE_ACTIVE_CAPTURE_TIMING_RESYNC = 4,
+ PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
+};
+
+#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
new file mode 100644
index 000000000000..3248f699daf2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -0,0 +1,466 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_FIXED31_32_H__
+#define __DAL_FIXED31_32_H__
+
+#include "os_types.h"
+
+#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
+
+/*
+ * @brief
+ * Arithmetic operations on real numbers
+ * represented as fixed-point numbers.
+ * There are: 1 bit for sign,
+ * 31 bit for integer part,
+ * 32 bits for fractional part.
+ *
+ * @note
+ * Currently, overflows and underflows are asserted;
+ * no special result returned.
+ */
+
+struct fixed31_32 {
+ int64_t value;
+};
+
+/*
+ * @brief
+ * Useful constants
+ */
+
+static const struct fixed31_32 dal_fixed31_32_zero = { 0 };
+static const struct fixed31_32 dal_fixed31_32_epsilon = { 1LL };
+static const struct fixed31_32 dal_fixed31_32_half = { 0x80000000LL };
+static const struct fixed31_32 dal_fixed31_32_one = { 0x100000000LL };
+
+static const struct fixed31_32 dal_fixed31_32_pi = { 13493037705LL };
+static const struct fixed31_32 dal_fixed31_32_two_pi = { 26986075409LL };
+static const struct fixed31_32 dal_fixed31_32_e = { 11674931555LL };
+static const struct fixed31_32 dal_fixed31_32_ln2 = { 2977044471LL };
+static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
+
+/*
+ * @brief
+ * Initialization routines
+ */
+
+/*
+ * @brief
+ * result = numerator / denominator
+ */
+struct fixed31_32 dal_fixed31_32_from_fraction(
+ int64_t numerator,
+ int64_t denominator);
+
+/*
+ * @brief
+ * result = arg
+ */
+struct fixed31_32 dal_fixed31_32_from_int_nonconst(int64_t arg);
+static inline struct fixed31_32 dal_fixed31_32_from_int(int64_t arg)
+{
+ if (__builtin_constant_p(arg)) {
+ struct fixed31_32 res;
+ BUILD_BUG_ON((LONG_MIN > arg) || (arg > LONG_MAX));
+ res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+ return res;
+ } else
+ return dal_fixed31_32_from_int_nonconst(arg);
+}
+
+/*
+ * @brief
+ * Unary operators
+ */
+
+/*
+ * @brief
+ * result = -arg
+ */
+static inline struct fixed31_32 dal_fixed31_32_neg(struct fixed31_32 arg)
+{
+ struct fixed31_32 res;
+
+ res.value = -arg.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = abs(arg) := (arg >= 0) ? arg : -arg
+ */
+static inline struct fixed31_32 dal_fixed31_32_abs(struct fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return dal_fixed31_32_neg(arg);
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary relational operators
+ */
+
+/*
+ * @brief
+ * result = arg1 < arg2
+ */
+static inline bool dal_fixed31_32_lt(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 <= arg2
+ */
+static inline bool dal_fixed31_32_le(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 == arg2
+ */
+static inline bool dal_fixed31_32_eq(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+/*
+ * @brief
+ * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_min(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg1;
+ else
+ return arg2;
+}
+
+/*
+ * @brief
+ * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
+ */
+static inline struct fixed31_32 dal_fixed31_32_max(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg2;
+ else
+ return arg1;
+}
+
+/*
+ * @brief
+ * | min_value, when arg <= min_value
+ * result = | arg, when min_value < arg < max_value
+ * | max_value, when arg >= max_value
+ */
+static inline struct fixed31_32 dal_fixed31_32_clamp(
+ struct fixed31_32 arg,
+ struct fixed31_32 min_value,
+ struct fixed31_32 max_value)
+{
+ if (dal_fixed31_32_le(arg, min_value))
+ return min_value;
+ else if (dal_fixed31_32_le(max_value, arg))
+ return max_value;
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary shift operators
+ */
+
+/*
+ * @brief
+ * result = arg << shift
+ */
+struct fixed31_32 dal_fixed31_32_shl(
+ struct fixed31_32 arg,
+ uint8_t shift);
+
+/*
+ * @brief
+ * result = arg >> shift
+ */
+static inline struct fixed31_32 dal_fixed31_32_shr(
+ struct fixed31_32 arg,
+ uint8_t shift)
+{
+ struct fixed31_32 res;
+ res.value = arg.value >> shift;
+ return res;
+}
+
+/*
+ * @brief
+ * Binary additive operators
+ */
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+struct fixed31_32 dal_fixed31_32_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_add(arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+struct fixed31_32 dal_fixed31_32_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_sub(arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+
+/*
+ * @brief
+ * Binary multiplicative operators
+ */
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+struct fixed31_32 dal_fixed31_32_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_mul(arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = square(arg) := arg * arg
+ */
+struct fixed31_32 dal_fixed31_32_sqr(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1,
+ int64_t arg2)
+{
+ return dal_fixed31_32_from_fraction(arg1.value,
+ dal_fixed31_32_from_int(arg2).value);
+}
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_div(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return dal_fixed31_32_from_fraction(arg1.value,
+ arg2.value);
+}
+
+/*
+ * @brief
+ * Reciprocal function
+ */
+
+/*
+ * @brief
+ * result = reciprocal(arg) := 1 / arg
+ *
+ * @note
+ * No special actions taken in case argument is zero.
+ */
+struct fixed31_32 dal_fixed31_32_recip(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Trigonometric functions
+ */
+
+/*
+ * @brief
+ * result = sinc(arg) := sin(arg) / arg
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct fixed31_32 dal_fixed31_32_sinc(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = sin(arg)
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct fixed31_32 dal_fixed31_32_sin(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = cos(arg)
+ *
+ * @note
+ * Argument specified in radians
+ * and should be in [-2pi...2pi] range -
+ * passing arguments outside that range
+ * will cause incorrect result!
+ */
+struct fixed31_32 dal_fixed31_32_cos(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Transcendent functions
+ */
+
+/*
+ * @brief
+ * result = exp(arg)
+ *
+ * @note
+ * Currently, function is verified for abs(arg) <= 1.
+ */
+struct fixed31_32 dal_fixed31_32_exp(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = log(arg)
+ *
+ * @note
+ * Currently, abs(arg) should be less than 1.
+ * No normalization is done.
+ * Currently, no special actions taken
+ * in case of invalid argument(s). Take care!
+ */
+struct fixed31_32 dal_fixed31_32_log(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Power function
+ */
+
+/*
+ * @brief
+ * result = pow(arg1, arg2)
+ *
+ * @note
+ * Currently, abs(arg1) should be less than 1. Take care!
+ */
+struct fixed31_32 dal_fixed31_32_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * Rounding functions
+ */
+
+/*
+ * @brief
+ * result = floor(arg) := greatest integer lower than or equal to arg
+ */
+int32_t dal_fixed31_32_floor(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = round(arg) := integer nearest to arg
+ */
+int32_t dal_fixed31_32_round(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = ceil(arg) := lowest integer greater than or equal to arg
+ */
+int32_t dal_fixed31_32_ceil(
+ struct fixed31_32 arg);
+
+/* the following two function are used in scaler hw programming to convert fixed
+ * point value to format 2 bits from integer part and 19 bits from fractional
+ * part. The same applies for u0d19, 0 bits from integer part and 19 bits from
+ * fractional
+ */
+
+uint32_t dal_fixed31_32_u2d19(
+ struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_u0d19(
+ struct fixed31_32 arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/fixed32_32.h b/drivers/gpu/drm/amd/display/include/fixed32_32.h
new file mode 100644
index 000000000000..9c70341fe026
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/fixed32_32.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DAL_FIXED32_32_H__
+#define __DAL_FIXED32_32_H__
+
+#include "os_types.h"
+
+struct fixed32_32 {
+ uint64_t value;
+};
+
+static const struct fixed32_32 dal_fixed32_32_zero = { 0 };
+static const struct fixed32_32 dal_fixed32_32_one = { 0x100000000LL };
+static const struct fixed32_32 dal_fixed32_32_half = { 0x80000000LL };
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d);
+static inline struct fixed32_32 dal_fixed32_32_from_int(uint32_t value)
+{
+ struct fixed32_32 fx;
+
+ fx.value = (uint64_t)value<<32;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_add_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_sub(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_sub_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_mul(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_mul_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_div(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_div_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+
+static inline struct fixed32_32 dal_fixed32_32_min(struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ return (lhs.value < rhs.value) ? lhs : rhs;
+}
+
+static inline struct fixed32_32 dal_fixed32_32_max(struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ return (lhs.value > rhs.value) ? lhs : rhs;
+}
+
+static inline bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value > rhs.value;
+}
+
+static inline bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value > ((uint64_t)rhs<<32);
+}
+
+static inline bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value < rhs.value;
+}
+
+static inline bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value < ((uint64_t)rhs<<32);
+}
+
+static inline bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value <= rhs.value;
+}
+
+static inline bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value <= ((uint64_t)rhs<<32);
+}
+
+static inline bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value == rhs.value;
+}
+
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 value);
+static inline uint32_t dal_fixed32_32_floor(struct fixed32_32 value)
+{
+ return value.value>>32;
+}
+
+uint32_t dal_fixed32_32_round(struct fixed32_32 value);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_interface.h b/drivers/gpu/drm/amd/display/include/gpio_interface.h
new file mode 100644
index 000000000000..e4fd31024b92
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_interface.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_INTERFACE_H__
+#define __DAL_GPIO_INTERFACE_H__
+
+#include "gpio_types.h"
+#include "grph_object_defs.h"
+
+struct gpio;
+
+/* Open the handle for future use */
+enum gpio_result dal_gpio_open(
+ struct gpio *gpio,
+ enum gpio_mode mode);
+
+enum gpio_result dal_gpio_open_ex(
+ struct gpio *gpio,
+ enum gpio_mode mode);
+
+/* Get high or low from the pin */
+enum gpio_result dal_gpio_get_value(
+ const struct gpio *gpio,
+ uint32_t *value);
+
+/* Set pin high or low */
+enum gpio_result dal_gpio_set_value(
+ const struct gpio *gpio,
+ uint32_t value);
+
+/* Get current mode */
+enum gpio_mode dal_gpio_get_mode(
+ const struct gpio *gpio);
+
+/* Change mode of the handle */
+enum gpio_result dal_gpio_change_mode(
+ struct gpio *gpio,
+ enum gpio_mode mode);
+
+/* Get the GPIO id */
+enum gpio_id dal_gpio_get_id(
+ const struct gpio *gpio);
+
+/* Get the GPIO enum */
+uint32_t dal_gpio_get_enum(
+ const struct gpio *gpio);
+
+/* Set the GPIO pin configuration */
+enum gpio_result dal_gpio_set_config(
+ struct gpio *gpio,
+ const struct gpio_config_data *config_data);
+
+/* Obtain GPIO pin info */
+enum gpio_result dal_gpio_get_pin_info(
+ const struct gpio *gpio,
+ struct gpio_pin_info *pin_info);
+
+/* Obtain GPIO sync source */
+enum sync_source dal_gpio_get_sync_source(
+ const struct gpio *gpio);
+
+/* Obtain GPIO pin output state (active low or active high) */
+enum gpio_pin_output_state dal_gpio_get_output_state(
+ const struct gpio *gpio);
+
+/* Close the handle */
+void dal_gpio_close(
+ struct gpio *gpio);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
new file mode 100644
index 000000000000..f40259bade40
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_SERVICE_INTERFACE_H__
+#define __DAL_GPIO_SERVICE_INTERFACE_H__
+
+#include "gpio_types.h"
+#include "gpio_interface.h"
+#include "hw/gpio.h"
+
+struct gpio_service;
+
+struct gpio *dal_gpio_create(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_pin_output_state output_state);
+
+void dal_gpio_destroy(
+ struct gpio **ptr);
+
+struct gpio_service *dal_gpio_service_create(
+ enum dce_version dce_version_major,
+ enum dce_version dce_version_minor,
+ struct dc_context *ctx);
+
+struct gpio *dal_gpio_service_create_irq(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask);
+
+struct ddc *dal_gpio_create_ddc(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask,
+ struct gpio_ddc_hw_info *info);
+
+
+void dal_gpio_destroy_ddc(
+ struct ddc **ddc);
+
+void dal_gpio_service_destroy(
+ struct gpio_service **ptr);
+
+enum dc_irq_source dal_irq_get_source(
+ const struct gpio *irq);
+
+enum dc_irq_source dal_irq_get_rx_source(
+ const struct gpio *irq);
+
+enum gpio_result dal_irq_setup_hpd_filter(
+ struct gpio *irq,
+ struct gpio_hpd_config *config);
+
+struct gpio *dal_gpio_create_irq(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en);
+
+void dal_gpio_destroy_irq(
+ struct gpio **ptr);
+
+
+enum gpio_result dal_ddc_open(
+ struct ddc *ddc,
+ enum gpio_mode mode,
+ enum gpio_ddc_config_type config_type);
+
+enum gpio_result dal_ddc_change_mode(
+ struct ddc *ddc,
+ enum gpio_mode mode);
+
+enum gpio_ddc_line dal_ddc_get_line(
+ const struct ddc *ddc);
+
+enum gpio_result dal_ddc_set_config(
+ struct ddc *ddc,
+ enum gpio_ddc_config_type config_type);
+
+void dal_ddc_close(
+ struct ddc *ddc);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_types.h b/drivers/gpu/drm/amd/display/include/gpio_types.h
new file mode 100644
index 000000000000..8dd46ed799e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_types.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_TYPES_H__
+#define __DAL_GPIO_TYPES_H__
+
+#define BUNDLE_A_MASK 0x00FFF000L
+#define BUNDLE_B_MASK 0x00000FFFL
+
+/*
+ * gpio_result
+ *
+ * @brief
+ * The possible return codes that the GPIO object can return.
+ * These return codes can be generated
+ * directly by the GPIO object or from the GPIOPin object.
+ */
+enum gpio_result {
+ GPIO_RESULT_OK,
+ GPIO_RESULT_NULL_HANDLE,
+ GPIO_RESULT_INVALID_DATA,
+ GPIO_RESULT_DEVICE_BUSY,
+ GPIO_RESULT_OPEN_FAILED,
+ GPIO_RESULT_ALREADY_OPENED,
+ GPIO_RESULT_NON_SPECIFIC_ERROR
+};
+
+/*
+ * @brief
+ * Used to identify the specific GPIO device
+ *
+ * @notes
+ * These constants are used as indices in a vector.
+ * Thus they should start from zero and be contiguous.
+ */
+enum gpio_id {
+ GPIO_ID_UNKNOWN = (-1),
+ GPIO_ID_DDC_DATA,
+ GPIO_ID_DDC_CLOCK,
+ GPIO_ID_GENERIC,
+ GPIO_ID_HPD,
+ GPIO_ID_GPIO_PAD,
+ GPIO_ID_VIP_PAD,
+ GPIO_ID_SYNC,
+ GPIO_ID_GSL, /* global swap lock */
+ GPIO_ID_COUNT,
+ GPIO_ID_MIN = GPIO_ID_DDC_DATA,
+ GPIO_ID_MAX = GPIO_ID_GSL
+};
+
+#define GPIO_ENUM_UNKNOWN \
+ 32
+
+struct gpio_pin_info {
+ uint32_t offset;
+ uint32_t offset_y;
+ uint32_t offset_en;
+ uint32_t offset_mask;
+
+ uint32_t mask;
+ uint32_t mask_y;
+ uint32_t mask_en;
+ uint32_t mask_mask;
+};
+
+enum gpio_pin_output_state {
+ GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW,
+ GPIO_PIN_OUTPUT_STATE_ACTIVE_HIGH,
+ GPIO_PIN_OUTPUT_STATE_DEFAULT = GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW
+};
+
+enum gpio_generic {
+ GPIO_GENERIC_UNKNOWN = (-1),
+ GPIO_GENERIC_A,
+ GPIO_GENERIC_B,
+ GPIO_GENERIC_C,
+ GPIO_GENERIC_D,
+ GPIO_GENERIC_E,
+ GPIO_GENERIC_F,
+ GPIO_GENERIC_G,
+ GPIO_GENERIC_COUNT,
+ GPIO_GENERIC_MIN = GPIO_GENERIC_A,
+ GPIO_GENERIC_MAX = GPIO_GENERIC_B
+};
+
+enum gpio_hpd {
+ GPIO_HPD_UNKNOWN = (-1),
+ GPIO_HPD_1,
+ GPIO_HPD_2,
+ GPIO_HPD_3,
+ GPIO_HPD_4,
+ GPIO_HPD_5,
+ GPIO_HPD_6,
+ GPIO_HPD_COUNT,
+ GPIO_HPD_MIN = GPIO_HPD_1,
+ GPIO_HPD_MAX = GPIO_HPD_6
+};
+
+enum gpio_gpio_pad {
+ GPIO_GPIO_PAD_UNKNOWN = (-1),
+ GPIO_GPIO_PAD_0,
+ GPIO_GPIO_PAD_1,
+ GPIO_GPIO_PAD_2,
+ GPIO_GPIO_PAD_3,
+ GPIO_GPIO_PAD_4,
+ GPIO_GPIO_PAD_5,
+ GPIO_GPIO_PAD_6,
+ GPIO_GPIO_PAD_7,
+ GPIO_GPIO_PAD_8,
+ GPIO_GPIO_PAD_9,
+ GPIO_GPIO_PAD_10,
+ GPIO_GPIO_PAD_11,
+ GPIO_GPIO_PAD_12,
+ GPIO_GPIO_PAD_13,
+ GPIO_GPIO_PAD_14,
+ GPIO_GPIO_PAD_15,
+ GPIO_GPIO_PAD_16,
+ GPIO_GPIO_PAD_17,
+ GPIO_GPIO_PAD_18,
+ GPIO_GPIO_PAD_19,
+ GPIO_GPIO_PAD_20,
+ GPIO_GPIO_PAD_21,
+ GPIO_GPIO_PAD_22,
+ GPIO_GPIO_PAD_23,
+ GPIO_GPIO_PAD_24,
+ GPIO_GPIO_PAD_25,
+ GPIO_GPIO_PAD_26,
+ GPIO_GPIO_PAD_27,
+ GPIO_GPIO_PAD_28,
+ GPIO_GPIO_PAD_29,
+ GPIO_GPIO_PAD_30,
+ GPIO_GPIO_PAD_COUNT,
+ GPIO_GPIO_PAD_MIN = GPIO_GPIO_PAD_0,
+ GPIO_GPIO_PAD_MAX = GPIO_GPIO_PAD_30
+};
+
+enum gpio_vip_pad {
+ GPIO_VIP_PAD_UNKNOWN = (-1),
+ /* following never used -
+ * GPIO_ID_DDC_CLOCK::GPIO_DDC_LINE_VIP_PAD defined instead */
+ GPIO_VIP_PAD_SCL,
+ /* following never used -
+ * GPIO_ID_DDC_DATA::GPIO_DDC_LINE_VIP_PAD defined instead */
+ GPIO_VIP_PAD_SDA,
+ GPIO_VIP_PAD_VHAD,
+ GPIO_VIP_PAD_VPHCTL,
+ GPIO_VIP_PAD_VIPCLK,
+ GPIO_VIP_PAD_VID,
+ GPIO_VIP_PAD_VPCLK0,
+ GPIO_VIP_PAD_DVALID,
+ GPIO_VIP_PAD_PSYNC,
+ GPIO_VIP_PAD_COUNT,
+ GPIO_VIP_PAD_MIN = GPIO_VIP_PAD_SCL,
+ GPIO_VIP_PAD_MAX = GPIO_VIP_PAD_PSYNC
+};
+
+enum gpio_sync {
+ GPIO_SYNC_UNKNOWN = (-1),
+ GPIO_SYNC_HSYNC_A,
+ GPIO_SYNC_VSYNC_A,
+ GPIO_SYNC_HSYNC_B,
+ GPIO_SYNC_VSYNC_B,
+ GPIO_SYNC_COUNT,
+ GPIO_SYNC_MIN = GPIO_SYNC_HSYNC_A,
+ GPIO_SYNC_MAX = GPIO_SYNC_VSYNC_B
+};
+
+enum gpio_gsl {
+ GPIO_GSL_UNKNOWN = (-1),
+ GPIO_GSL_GENLOCK_CLOCK,
+ GPIO_GSL_GENLOCK_VSYNC,
+ GPIO_GSL_SWAPLOCK_A,
+ GPIO_GSL_SWAPLOCK_B,
+ GPIO_GSL_COUNT,
+ GPIO_GSL_MIN = GPIO_GSL_GENLOCK_CLOCK,
+ GPIO_GSL_MAX = GPIO_GSL_SWAPLOCK_B
+};
+
+/*
+ * @brief
+ * Unique Id for DDC handle.
+ * Values are meaningful (used as indexes to array)
+ */
+enum gpio_ddc_line {
+ GPIO_DDC_LINE_UNKNOWN = (-1),
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+ GPIO_DDC_LINE_DDC_VGA,
+ GPIO_DDC_LINE_VIP_PAD,
+ GPIO_DDC_LINE_I2C_PAD = GPIO_DDC_LINE_VIP_PAD,
+ GPIO_DDC_LINE_COUNT,
+ GPIO_DDC_LINE_MIN = GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_MAX = GPIO_DDC_LINE_I2C_PAD
+};
+
+/*
+ * @brief
+ * Identifies the mode of operation to open a GPIO device.
+ * A GPIO device (pin) can be programmed in only one of these modes at a time.
+ */
+enum gpio_mode {
+ GPIO_MODE_UNKNOWN = (-1),
+ GPIO_MODE_INPUT,
+ GPIO_MODE_OUTPUT,
+ GPIO_MODE_FAST_OUTPUT,
+ GPIO_MODE_HARDWARE,
+ GPIO_MODE_INTERRUPT
+};
+
+/*
+ * @brief
+ * Identifies the source of the signal when GPIO is in HW mode.
+ * get_signal_source() will return GPIO_SYGNAL_SOURCE__UNKNOWN
+ * when one of the following holds:
+ * 1. GPIO is input GPIO
+ * 2. GPIO is not opened in HW mode
+ * 3. GPIO does not have fixed signal source
+ * (like DC_GenericA have mux instead fixed)
+ */
+enum gpio_signal_source {
+ GPIO_SIGNAL_SOURCE_UNKNOWN = (-1),
+ GPIO_SIGNAL_SOURCE_DACA_STEREO_SYNC,
+ GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC,
+ GPIO_SIGNAL_SOURCE_DACB_STEREO_SYNC,
+ GPIO_SIGNAL_SOURCE_DACA_HSYNC,
+ GPIO_SIGNAL_SOURCE_DACB_HSYNC,
+ GPIO_SIGNAL_SOURCE_DACA_VSYNC,
+ GPIO_SIGNAL_SOURCE_DACB_VSYNC,
+};
+
+enum gpio_stereo_source {
+ GPIO_STEREO_SOURCE_UNKNOWN = (-1),
+ GPIO_STEREO_SOURCE_D1,
+ GPIO_STEREO_SOURCE_D2,
+ GPIO_STEREO_SOURCE_D3,
+ GPIO_STEREO_SOURCE_D4,
+ GPIO_STEREO_SOURCE_D5,
+ GPIO_STEREO_SOURCE_D6
+};
+
+/*
+ * GPIO config
+ */
+
+enum gpio_config_type {
+ GPIO_CONFIG_TYPE_NONE,
+ GPIO_CONFIG_TYPE_DDC,
+ GPIO_CONFIG_TYPE_HPD,
+ GPIO_CONFIG_TYPE_GENERIC_MUX,
+ GPIO_CONFIG_TYPE_GSL_MUX,
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE
+};
+
+/* DDC configuration */
+
+enum gpio_ddc_config_type {
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C,
+ GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT,
+ GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT,
+ GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING
+};
+
+struct gpio_ddc_config {
+ enum gpio_ddc_config_type type;
+ bool data_en_bit_present;
+ bool clock_en_bit_present;
+};
+
+/* HPD configuration */
+
+struct gpio_hpd_config {
+ uint32_t delay_on_connect; /* milliseconds */
+ uint32_t delay_on_disconnect; /* milliseconds */
+};
+
+struct gpio_generic_mux_config {
+ bool enable_output_from_mux;
+ enum gpio_signal_source mux_select;
+ enum gpio_stereo_source stereo_select;
+};
+
+enum gpio_gsl_mux_config_type {
+ GPIO_GSL_MUX_CONFIG_TYPE_DISABLE,
+ GPIO_GSL_MUX_CONFIG_TYPE_TIMING_SYNC,
+ GPIO_GSL_MUX_CONFIG_TYPE_FLIP_SYNC
+};
+
+struct gpio_gsl_mux_config {
+ enum gpio_gsl_mux_config_type type;
+ /* Actually sync_source type,
+ * however we want to avoid inter-component includes here */
+ uint32_t gsl_group;
+};
+
+struct gpio_config_data {
+ enum gpio_config_type type;
+ union {
+ struct gpio_ddc_config ddc;
+ struct gpio_hpd_config hpd;
+ struct gpio_generic_mux_config generic_mux;
+ struct gpio_gsl_mux_config gsl_mux;
+ } config;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
new file mode 100644
index 000000000000..7a9b43f84a31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GRPH_OBJECT_CTRL_DEFS_H__
+#define __DAL_GRPH_OBJECT_CTRL_DEFS_H__
+
+#include "grph_object_defs.h"
+
+/*
+ * #####################################################
+ * #####################################################
+ *
+ * These defines shared between asic_control/bios_parser and other
+ * DAL components
+ *
+ * #####################################################
+ * #####################################################
+ */
+
+enum display_output_bit_depth {
+ PANEL_UNDEFINE = 0,
+ PANEL_6BIT_COLOR = 1,
+ PANEL_8BIT_COLOR = 2,
+ PANEL_10BIT_COLOR = 3,
+ PANEL_12BIT_COLOR = 4,
+ PANEL_16BIT_COLOR = 5,
+};
+
+
+/* Device type as abstracted by ATOM BIOS */
+enum dal_device_type {
+ DEVICE_TYPE_UNKNOWN = 0,
+ DEVICE_TYPE_LCD,
+ DEVICE_TYPE_CRT,
+ DEVICE_TYPE_DFP,
+ DEVICE_TYPE_CV,
+ DEVICE_TYPE_TV,
+ DEVICE_TYPE_CF,
+ DEVICE_TYPE_WIRELESS
+};
+
+/* Device ID as abstracted by ATOM BIOS */
+struct device_id {
+ enum dal_device_type device_type:16;
+ uint32_t enum_id:16; /* 1 based enum */
+ uint16_t raw_device_tag;
+};
+
+struct graphics_object_i2c_info {
+ struct gpio_info {
+ uint32_t clk_mask_register_index;
+ uint32_t clk_en_register_index;
+ uint32_t clk_y_register_index;
+ uint32_t clk_a_register_index;
+ uint32_t data_mask_register_index;
+ uint32_t data_en_register_index;
+ uint32_t data_y_register_index;
+ uint32_t data_a_register_index;
+
+ uint32_t clk_mask_shift;
+ uint32_t clk_en_shift;
+ uint32_t clk_y_shift;
+ uint32_t clk_a_shift;
+ uint32_t data_mask_shift;
+ uint32_t data_en_shift;
+ uint32_t data_y_shift;
+ uint32_t data_a_shift;
+ } gpio_info;
+
+ bool i2c_hw_assist;
+ uint32_t i2c_line;
+ uint32_t i2c_engine_id;
+ uint32_t i2c_slave_address;
+};
+
+struct graphics_object_hpd_info {
+ uint8_t hpd_int_gpio_uid;
+ uint8_t hpd_active;
+};
+
+struct connector_device_tag_info {
+ uint32_t acpi_device;
+ struct device_id dev_id;
+};
+
+struct device_timing {
+ struct misc_info {
+ uint32_t HORIZONTAL_CUT_OFF:1;
+ /* 0=Active High, 1=Active Low */
+ uint32_t H_SYNC_POLARITY:1;
+ /* 0=Active High, 1=Active Low */
+ uint32_t V_SYNC_POLARITY:1;
+ uint32_t VERTICAL_CUT_OFF:1;
+ uint32_t H_REPLICATION_BY2:1;
+ uint32_t V_REPLICATION_BY2:1;
+ uint32_t COMPOSITE_SYNC:1;
+ uint32_t INTERLACE:1;
+ uint32_t DOUBLE_CLOCK:1;
+ uint32_t RGB888:1;
+ uint32_t GREY_LEVEL:2;
+ uint32_t SPATIAL:1;
+ uint32_t TEMPORAL:1;
+ uint32_t API_ENABLED:1;
+ } misc_info;
+
+ uint32_t pixel_clk; /* in KHz */
+ uint32_t horizontal_addressable;
+ uint32_t horizontal_blanking_time;
+ uint32_t vertical_addressable;
+ uint32_t vertical_blanking_time;
+ uint32_t horizontal_sync_offset;
+ uint32_t horizontal_sync_width;
+ uint32_t vertical_sync_offset;
+ uint32_t vertical_sync_width;
+ uint32_t horizontal_border;
+ uint32_t vertical_border;
+};
+
+struct supported_refresh_rate {
+ uint32_t REFRESH_RATE_30HZ:1;
+ uint32_t REFRESH_RATE_40HZ:1;
+ uint32_t REFRESH_RATE_48HZ:1;
+ uint32_t REFRESH_RATE_50HZ:1;
+ uint32_t REFRESH_RATE_60HZ:1;
+};
+
+struct embedded_panel_info {
+ struct device_timing lcd_timing;
+ uint32_t ss_id;
+ struct supported_refresh_rate supported_rr;
+ uint32_t drr_enabled;
+ uint32_t min_drr_refresh_rate;
+ bool realtek_eDPToLVDS;
+};
+
+struct dc_firmware_info {
+ struct pll_info {
+ uint32_t crystal_frequency; /* in KHz */
+ uint32_t min_input_pxl_clk_pll_frequency; /* in KHz */
+ uint32_t max_input_pxl_clk_pll_frequency; /* in KHz */
+ uint32_t min_output_pxl_clk_pll_frequency; /* in KHz */
+ uint32_t max_output_pxl_clk_pll_frequency; /* in KHz */
+ } pll_info;
+
+ struct firmware_feature {
+ uint32_t memory_clk_ss_percentage;
+ uint32_t engine_clk_ss_percentage;
+ } feature;
+
+ uint32_t default_display_engine_pll_frequency; /* in KHz */
+ uint32_t external_clock_source_frequency_for_dp; /* in KHz */
+ uint32_t smu_gpu_pll_output_freq; /* in KHz */
+ uint8_t min_allowed_bl_level;
+ uint8_t remote_display_config;
+ uint32_t default_memory_clk; /* in KHz */
+ uint32_t default_engine_clk; /* in KHz */
+ uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */
+ uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */
+
+
+};
+
+struct step_and_delay_info {
+ uint32_t step;
+ uint32_t delay;
+ uint32_t recommended_ref_div;
+};
+
+struct spread_spectrum_info {
+ struct spread_spectrum_type {
+ bool CENTER_MODE:1;
+ bool EXTERNAL:1;
+ bool STEP_AND_DELAY_INFO:1;
+ } type;
+
+ /* in unit of 0.01% (spreadPercentageDivider = 100),
+ otherwise in 0.001% units (spreadPercentageDivider = 1000); */
+ uint32_t spread_spectrum_percentage;
+ uint32_t spread_percentage_divider; /* 100 or 1000 */
+ uint32_t spread_spectrum_range; /* modulation freq (HZ)*/
+
+ union {
+ struct step_and_delay_info step_and_delay_info;
+ /* For mem/engine/uvd, Clock Out frequence (VCO ),
+ in unit of kHz. For TMDS/HDMI/LVDS, it is pixel clock,
+ for DP, it is link clock ( 270000 or 162000 ) */
+ uint32_t target_clock_range; /* in KHz */
+ };
+
+};
+
+struct graphics_object_encoder_cap_info {
+ uint32_t dp_hbr2_cap:1;
+ uint32_t dp_hbr2_validated:1;
+ /*
+ * TODO: added MST and HDMI 6G capable flags
+ */
+ uint32_t reserved:15;
+};
+
+struct din_connector_info {
+ uint32_t gpio_id;
+ bool gpio_tv_active_state;
+};
+
+/* Invalid channel mapping */
+enum { INVALID_DDI_CHANNEL_MAPPING = 0x0 };
+
+/**
+ * DDI PHY channel mapping reflecting XBAR setting
+ */
+union ddi_channel_mapping {
+ struct mapping {
+ uint8_t lane0:2; /* Mapping for lane 0 */
+ uint8_t lane1:2; /* Mapping for lane 1 */
+ uint8_t lane2:2; /* Mapping for lane 2 */
+ uint8_t lane3:2; /* Mapping for lane 3 */
+ } mapping;
+ uint8_t raw;
+};
+
+/**
+* Transmitter output configuration description
+*/
+struct transmitter_configuration_info {
+ /* DDI PHY ID for the transmitter */
+ enum transmitter transmitter_phy_id;
+ /* DDI PHY channel mapping reflecting crossbar setting */
+ union ddi_channel_mapping output_channel_mapping;
+};
+
+struct transmitter_configuration {
+ /* Configuration for the primary transmitter */
+ struct transmitter_configuration_info primary_transmitter_config;
+ /* Secondary transmitter configuration for Dual-link DVI */
+ struct transmitter_configuration_info secondary_transmitter_config;
+};
+
+/* These size should be sufficient to store info coming from BIOS */
+#define NUMBER_OF_UCHAR_FOR_GUID 16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+#define NUMBER_OF_CSR_M3_ARB 10
+#define NUMBER_OF_DISP_CLK_VOLTAGE 4
+#define NUMBER_OF_AVAILABLE_SCLK 5
+
+struct i2c_reg_info {
+ unsigned char i2c_reg_index;
+ unsigned char i2c_reg_val;
+};
+
+struct ext_hdmi_settings {
+ unsigned char slv_addr;
+ unsigned char reg_num;
+ struct i2c_reg_info reg_settings[9];
+ unsigned char reg_num_6g;
+ struct i2c_reg_info reg_settings_6g[3];
+};
+
+
+/* V6 */
+struct integrated_info {
+ struct clock_voltage_caps {
+ /* The Voltage Index indicated by FUSE, same voltage index
+ shared with SCLK DPM fuse table */
+ uint32_t voltage_index;
+ /* Maximum clock supported with specified voltage index */
+ uint32_t max_supported_clk; /* in KHz */
+ } disp_clk_voltage[NUMBER_OF_DISP_CLK_VOLTAGE];
+
+ struct display_connection_info {
+ struct external_display_path {
+ /* A bit vector to show what devices are supported */
+ uint32_t device_tag;
+ /* 16bit device ACPI id. */
+ uint32_t device_acpi_enum;
+ /* A physical connector for displays to plug in,
+ using object connector definitions */
+ struct graphics_object_id device_connector_id;
+ /* An index into external AUX/DDC channel LUT */
+ uint8_t ext_aux_ddc_lut_index;
+ /* An index into external HPD pin LUT */
+ uint8_t ext_hpd_pin_lut_index;
+ /* external encoder object id */
+ struct graphics_object_id ext_encoder_obj_id;
+ /* XBAR mapping of the PHY channels */
+ union ddi_channel_mapping channel_mapping;
+
+ unsigned short caps;
+ } path[MAX_NUMBER_OF_EXT_DISPLAY_PATH];
+
+ uint8_t gu_id[NUMBER_OF_UCHAR_FOR_GUID];
+ uint8_t checksum;
+ } ext_disp_conn_info; /* exiting long long time */
+
+ struct available_s_clk_list {
+ /* Maximum clock supported with specified voltage index */
+ uint32_t supported_s_clk; /* in KHz */
+ /* The Voltage Index indicated by FUSE for specified SCLK */
+ uint32_t voltage_index;
+ /* The Voltage ID indicated by FUSE for specified SCLK */
+ uint32_t voltage_id;
+ } avail_s_clk[NUMBER_OF_AVAILABLE_SCLK];
+
+ uint8_t memory_type;
+ uint8_t ma_channel_number;
+ uint32_t boot_up_engine_clock; /* in KHz */
+ uint32_t dentist_vco_freq; /* in KHz */
+ uint32_t boot_up_uma_clock; /* in KHz */
+ uint32_t boot_up_req_display_vector;
+ uint32_t other_display_misc;
+ uint32_t gpu_cap_info;
+ uint32_t sb_mmio_base_addr;
+ uint32_t system_config;
+ uint32_t cpu_cap_info;
+ uint32_t max_nb_voltage;
+ uint32_t min_nb_voltage;
+ uint32_t boot_up_nb_voltage;
+ uint32_t ext_disp_conn_info_offset;
+ uint32_t csr_m3_arb_cntl_default[NUMBER_OF_CSR_M3_ARB];
+ uint32_t csr_m3_arb_cntl_uvd[NUMBER_OF_CSR_M3_ARB];
+ uint32_t csr_m3_arb_cntl_fs3d[NUMBER_OF_CSR_M3_ARB];
+ uint32_t gmc_restore_reset_time;
+ uint32_t minimum_n_clk;
+ uint32_t idle_n_clk;
+ uint32_t ddr_dll_power_up_time;
+ uint32_t ddr_pll_power_up_time;
+ /* start for V6 */
+ uint32_t pcie_clk_ss_type;
+ uint32_t lvds_ss_percentage;
+ uint32_t lvds_sspread_rate_in_10hz;
+ uint32_t hdmi_ss_percentage;
+ uint32_t hdmi_sspread_rate_in_10hz;
+ uint32_t dvi_ss_percentage;
+ uint32_t dvi_sspread_rate_in_10_hz;
+ uint32_t sclk_dpm_boost_margin;
+ uint32_t sclk_dpm_throttle_margin;
+ uint32_t sclk_dpm_tdp_limit_pg;
+ uint32_t sclk_dpm_tdp_limit_boost;
+ uint32_t boost_engine_clock;
+ uint32_t boost_vid_2bit;
+ uint32_t enable_boost;
+ uint32_t gnb_tdp_limit;
+ /* Start from V7 */
+ uint32_t max_lvds_pclk_freq_in_single_link;
+ uint32_t lvds_misc;
+ uint32_t lvds_pwr_on_seq_dig_on_to_de_in_4ms;
+ uint32_t lvds_pwr_on_seq_de_to_vary_bl_in_4ms;
+ uint32_t lvds_pwr_off_seq_vary_bl_to_de_in4ms;
+ uint32_t lvds_pwr_off_seq_de_to_dig_on_in4ms;
+ uint32_t lvds_off_to_on_delay_in_4ms;
+ uint32_t lvds_pwr_on_seq_vary_bl_to_blon_in_4ms;
+ uint32_t lvds_pwr_off_seq_blon_to_vary_bl_in_4ms;
+ uint32_t lvds_reserved1;
+ uint32_t lvds_bit_depth_control_val;
+ //Start from V9
+ unsigned char dp0_ext_hdmi_slv_addr;
+ unsigned char dp0_ext_hdmi_reg_num;
+ struct i2c_reg_info dp0_ext_hdmi_reg_settings[9];
+ unsigned char dp0_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp0_ext_hdmi_6g_reg_settings[3];
+ unsigned char dp1_ext_hdmi_slv_addr;
+ unsigned char dp1_ext_hdmi_reg_num;
+ struct i2c_reg_info dp1_ext_hdmi_reg_settings[9];
+ unsigned char dp1_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp1_ext_hdmi_6g_reg_settings[3];
+ unsigned char dp2_ext_hdmi_slv_addr;
+ unsigned char dp2_ext_hdmi_reg_num;
+ struct i2c_reg_info dp2_ext_hdmi_reg_settings[9];
+ unsigned char dp2_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp2_ext_hdmi_6g_reg_settings[3];
+ unsigned char dp3_ext_hdmi_slv_addr;
+ unsigned char dp3_ext_hdmi_reg_num;
+ struct i2c_reg_info dp3_ext_hdmi_reg_settings[9];
+ unsigned char dp3_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3];
+};
+
+/**
+* Power source ids.
+*/
+enum power_source {
+ POWER_SOURCE_AC = 0,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_LIMITED_POWER,
+ POWER_SOURCE_LIMITED_POWER_2,
+ POWER_SOURCE_MAX
+};
+
+struct bios_event_info {
+ uint32_t thermal_state;
+ uint32_t backlight_level;
+ enum power_source powerSource;
+ bool has_thermal_state_changed;
+ bool has_power_source_changed;
+ bool has_forced_mode_changed;
+ bool forced_mode;
+ bool backlight_changed;
+};
+
+enum {
+ HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
+ TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
+};
+
+/*
+ * DFS-bypass flag
+ */
+/* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */
+enum {
+ DFS_BYPASS_ENABLE = 0x10
+};
+
+enum {
+ INVALID_BACKLIGHT = -1
+};
+
+struct panel_backlight_boundaries {
+ uint32_t min_signal_level;
+ uint32_t max_signal_level;
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
new file mode 100644
index 000000000000..2941b882b0b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GRPH_OBJECT_DEFS_H__
+#define __DAL_GRPH_OBJECT_DEFS_H__
+
+#include "grph_object_id.h"
+
+/* ********************************************************************
+ * ********************************************************************
+ *
+ * These defines shared between All Graphics Objects
+ *
+ * ********************************************************************
+ * ********************************************************************
+ */
+
+/* HPD unit id - HW direct translation */
+enum hpd_source_id {
+ HPD_SOURCEID1 = 0,
+ HPD_SOURCEID2,
+ HPD_SOURCEID3,
+ HPD_SOURCEID4,
+ HPD_SOURCEID5,
+ HPD_SOURCEID6,
+
+ HPD_SOURCEID_COUNT,
+ HPD_SOURCEID_UNKNOWN
+};
+
+/* DDC unit id - HW direct translation */
+enum channel_id {
+ CHANNEL_ID_UNKNOWN = 0,
+ CHANNEL_ID_DDC1,
+ CHANNEL_ID_DDC2,
+ CHANNEL_ID_DDC3,
+ CHANNEL_ID_DDC4,
+ CHANNEL_ID_DDC5,
+ CHANNEL_ID_DDC6,
+ CHANNEL_ID_DDC_VGA,
+ CHANNEL_ID_I2C_PAD,
+ CHANNEL_ID_COUNT
+};
+
+#define DECODE_CHANNEL_ID(ch_id) \
+ (ch_id) == CHANNEL_ID_DDC1 ? "CHANNEL_ID_DDC1" : \
+ (ch_id) == CHANNEL_ID_DDC2 ? "CHANNEL_ID_DDC2" : \
+ (ch_id) == CHANNEL_ID_DDC3 ? "CHANNEL_ID_DDC3" : \
+ (ch_id) == CHANNEL_ID_DDC4 ? "CHANNEL_ID_DDC4" : \
+ (ch_id) == CHANNEL_ID_DDC5 ? "CHANNEL_ID_DDC5" : \
+ (ch_id) == CHANNEL_ID_DDC6 ? "CHANNEL_ID_DDC6" : \
+ (ch_id) == CHANNEL_ID_DDC_VGA ? "CHANNEL_ID_DDC_VGA" : \
+ (ch_id) == CHANNEL_ID_I2C_PAD ? "CHANNEL_ID_I2C_PAD" : "Invalid"
+
+enum transmitter {
+ TRANSMITTER_UNKNOWN = (-1L),
+ TRANSMITTER_UNIPHY_A,
+ TRANSMITTER_UNIPHY_B,
+ TRANSMITTER_UNIPHY_C,
+ TRANSMITTER_UNIPHY_D,
+ TRANSMITTER_UNIPHY_E,
+ TRANSMITTER_UNIPHY_F,
+ TRANSMITTER_NUTMEG_CRT,
+ TRANSMITTER_TRAVIS_CRT,
+ TRANSMITTER_TRAVIS_LCD,
+ TRANSMITTER_UNIPHY_G,
+ TRANSMITTER_COUNT
+};
+
+/* Generic source of the synchronisation input/output signal */
+/* Can be used for flow control, stereo sync, timing sync, frame sync, etc */
+enum sync_source {
+ SYNC_SOURCE_NONE = 0,
+
+ /* Source based on controllers */
+ SYNC_SOURCE_CONTROLLER0,
+ SYNC_SOURCE_CONTROLLER1,
+ SYNC_SOURCE_CONTROLLER2,
+ SYNC_SOURCE_CONTROLLER3,
+ SYNC_SOURCE_CONTROLLER4,
+ SYNC_SOURCE_CONTROLLER5,
+
+ /* Source based on GSL group */
+ SYNC_SOURCE_GSL_GROUP0,
+ SYNC_SOURCE_GSL_GROUP1,
+ SYNC_SOURCE_GSL_GROUP2,
+
+ /* Source based on GSL IOs */
+ /* These IOs normally used as GSL input/output */
+ SYNC_SOURCE_GSL_IO_FIRST,
+ SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK = SYNC_SOURCE_GSL_IO_FIRST,
+ SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC,
+ SYNC_SOURCE_GSL_IO_SWAPLOCK_A,
+ SYNC_SOURCE_GSL_IO_SWAPLOCK_B,
+ SYNC_SOURCE_GSL_IO_LAST = SYNC_SOURCE_GSL_IO_SWAPLOCK_B,
+
+ /* Source based on regular IOs */
+ SYNC_SOURCE_IO_FIRST,
+ SYNC_SOURCE_IO_GENERIC_A = SYNC_SOURCE_IO_FIRST,
+ SYNC_SOURCE_IO_GENERIC_B,
+ SYNC_SOURCE_IO_GENERIC_C,
+ SYNC_SOURCE_IO_GENERIC_D,
+ SYNC_SOURCE_IO_GENERIC_E,
+ SYNC_SOURCE_IO_GENERIC_F,
+ SYNC_SOURCE_IO_HPD1,
+ SYNC_SOURCE_IO_HPD2,
+ SYNC_SOURCE_IO_HSYNC_A,
+ SYNC_SOURCE_IO_VSYNC_A,
+ SYNC_SOURCE_IO_HSYNC_B,
+ SYNC_SOURCE_IO_VSYNC_B,
+ SYNC_SOURCE_IO_LAST = SYNC_SOURCE_IO_VSYNC_B,
+
+ /* Misc. flow control sources */
+ SYNC_SOURCE_DUAL_GPU_PIN
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
new file mode 100644
index 000000000000..5eb2b4dc7b9c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GRPH_OBJECT_ID_H__
+#define __DAL_GRPH_OBJECT_ID_H__
+
+/* Types of graphics objects */
+enum object_type {
+ OBJECT_TYPE_UNKNOWN = 0,
+
+ /* Direct ATOM BIOS translation */
+ OBJECT_TYPE_GPU,
+ OBJECT_TYPE_ENCODER,
+ OBJECT_TYPE_CONNECTOR,
+ OBJECT_TYPE_ROUTER,
+ OBJECT_TYPE_GENERIC,
+
+ /* Driver specific */
+ OBJECT_TYPE_AUDIO,
+ OBJECT_TYPE_CONTROLLER,
+ OBJECT_TYPE_CLOCK_SOURCE,
+ OBJECT_TYPE_ENGINE,
+
+ OBJECT_TYPE_COUNT
+};
+
+/* Enumeration inside one type of graphics objects */
+enum object_enum_id {
+ ENUM_ID_UNKNOWN = 0,
+ ENUM_ID_1,
+ ENUM_ID_2,
+ ENUM_ID_3,
+ ENUM_ID_4,
+ ENUM_ID_5,
+ ENUM_ID_6,
+ ENUM_ID_7,
+
+ ENUM_ID_COUNT
+};
+
+/* Generic object ids */
+enum generic_id {
+ GENERIC_ID_UNKNOWN = 0,
+ GENERIC_ID_MXM_OPM,
+ GENERIC_ID_GLSYNC,
+ GENERIC_ID_STEREO,
+
+ GENERIC_ID_COUNT
+};
+
+/* Controller object ids */
+enum controller_id {
+ CONTROLLER_ID_UNDEFINED = 0,
+ CONTROLLER_ID_D0,
+ CONTROLLER_ID_D1,
+ CONTROLLER_ID_D2,
+ CONTROLLER_ID_D3,
+ CONTROLLER_ID_D4,
+ CONTROLLER_ID_D5,
+ CONTROLLER_ID_UNDERLAY0,
+ CONTROLLER_ID_MAX = CONTROLLER_ID_UNDERLAY0
+};
+
+#define IS_UNDERLAY_CONTROLLER(ctrlr_id) (ctrlr_id >= CONTROLLER_ID_UNDERLAY0)
+
+/*
+ * ClockSource object ids.
+ * We maintain the order matching (more or less) ATOM BIOS
+ * to improve optimized acquire
+ */
+enum clock_source_id {
+ CLOCK_SOURCE_ID_UNDEFINED = 0,
+ CLOCK_SOURCE_ID_PLL0,
+ CLOCK_SOURCE_ID_PLL1,
+ CLOCK_SOURCE_ID_PLL2,
+ CLOCK_SOURCE_ID_EXTERNAL, /* ID (Phy) ref. clk. for DP */
+ CLOCK_SOURCE_ID_DCPLL,
+ CLOCK_SOURCE_ID_DFS, /* DENTIST */
+ CLOCK_SOURCE_ID_VCE, /* VCE does not need a real PLL */
+ /* Used to distinguish between programming pixel clock and ID (Phy) clock */
+ CLOCK_SOURCE_ID_DP_DTO,
+
+ CLOCK_SOURCE_COMBO_PHY_PLL0, /*combo PHY PLL defines (DC 11.2 and up)*/
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0
+};
+
+/* Encoder object ids */
+enum encoder_id {
+ ENCODER_ID_UNKNOWN = 0,
+
+ /* Radeon Class Display Hardware */
+ ENCODER_ID_INTERNAL_LVDS,
+ ENCODER_ID_INTERNAL_TMDS1,
+ ENCODER_ID_INTERNAL_TMDS2,
+ ENCODER_ID_INTERNAL_DAC1,
+ ENCODER_ID_INTERNAL_DAC2, /* TV/CV DAC */
+
+ /* External Third Party Encoders */
+ ENCODER_ID_INTERNAL_LVTM1, /* not used for Radeon */
+ ENCODER_ID_INTERNAL_HDMI,
+
+ /* Kaledisope (KLDSCP) Class Display Hardware */
+ ENCODER_ID_INTERNAL_KLDSCP_TMDS1,
+ ENCODER_ID_INTERNAL_KLDSCP_DAC1,
+ ENCODER_ID_INTERNAL_KLDSCP_DAC2, /* Shared with CV/TV and CRT */
+ /* External TMDS (dual link) */
+ ENCODER_ID_EXTERNAL_MVPU_FPGA, /* MVPU FPGA chip */
+ ENCODER_ID_INTERNAL_DDI,
+ ENCODER_ID_INTERNAL_UNIPHY,
+ ENCODER_ID_INTERNAL_KLDSCP_LVTMA,
+ ENCODER_ID_INTERNAL_UNIPHY1,
+ ENCODER_ID_INTERNAL_UNIPHY2,
+ ENCODER_ID_EXTERNAL_NUTMEG,
+ ENCODER_ID_EXTERNAL_TRAVIS,
+
+ ENCODER_ID_INTERNAL_WIRELESS, /* Internal wireless display encoder */
+ ENCODER_ID_INTERNAL_UNIPHY3,
+ ENCODER_ID_INTERNAL_VIRTUAL,
+};
+
+/* Connector object ids */
+enum connector_id {
+ CONNECTOR_ID_UNKNOWN = 0,
+ CONNECTOR_ID_SINGLE_LINK_DVII = 1,
+ CONNECTOR_ID_DUAL_LINK_DVII = 2,
+ CONNECTOR_ID_SINGLE_LINK_DVID = 3,
+ CONNECTOR_ID_DUAL_LINK_DVID = 4,
+ CONNECTOR_ID_VGA = 5,
+ CONNECTOR_ID_HDMI_TYPE_A = 12,
+ CONNECTOR_ID_LVDS = 14,
+ CONNECTOR_ID_PCIE = 16,
+ CONNECTOR_ID_HARDCODE_DVI = 18,
+ CONNECTOR_ID_DISPLAY_PORT = 19,
+ CONNECTOR_ID_EDP = 20,
+ CONNECTOR_ID_MXM = 21,
+ CONNECTOR_ID_WIRELESS = 22,
+ CONNECTOR_ID_MIRACAST = 23,
+
+ CONNECTOR_ID_VIRTUAL = 100
+};
+
+/* Audio object ids */
+enum audio_id {
+ AUDIO_ID_UNKNOWN = 0,
+ AUDIO_ID_INTERNAL_AZALIA
+};
+
+/* Engine object ids */
+enum engine_id {
+ ENGINE_ID_DIGA,
+ ENGINE_ID_DIGB,
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGD,
+ ENGINE_ID_DIGE,
+ ENGINE_ID_DIGF,
+ ENGINE_ID_DIGG,
+ ENGINE_ID_DACA,
+ ENGINE_ID_DACB,
+ ENGINE_ID_VCE, /* wireless display pseudo-encoder */
+ ENGINE_ID_VIRTUAL,
+
+ ENGINE_ID_COUNT,
+ ENGINE_ID_UNKNOWN = (-1L)
+};
+
+enum transmitter_color_depth {
+ TRANSMITTER_COLOR_DEPTH_24 = 0, /* 8 bits */
+ TRANSMITTER_COLOR_DEPTH_30, /* 10 bits */
+ TRANSMITTER_COLOR_DEPTH_36, /* 12 bits */
+ TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */
+};
+
+/*
+ *****************************************************************************
+ * graphics_object_id struct
+ *
+ * graphics_object_id is a very simple struct wrapping 32bit Graphics
+ * Object identication
+ *
+ * This struct should stay very simple
+ * No dependencies at all (no includes)
+ * No debug messages or asserts
+ * No #ifndef and preprocessor directives
+ * No grow in space (no more data member)
+ *****************************************************************************
+ */
+
+struct graphics_object_id {
+ uint32_t id:8;
+ uint32_t enum_id:4;
+ uint32_t type:4;
+ uint32_t reserved:16; /* for padding. total size should be u32 */
+};
+
+/* some simple functions for convenient graphics_object_id handle */
+
+static inline struct graphics_object_id dal_graphics_object_id_init(
+ uint32_t id,
+ enum object_enum_id enum_id,
+ enum object_type type)
+{
+ struct graphics_object_id result = {
+ id, enum_id, type, 0
+ };
+
+ return result;
+}
+
+bool dal_graphics_object_id_is_equal(
+ struct graphics_object_id id1,
+ struct graphics_object_id id2);
+
+/* Based on internal data members memory layout */
+static inline uint32_t dal_graphics_object_id_to_uint(
+ struct graphics_object_id id)
+{
+ return id.id + (id.enum_id << 0x8) + (id.type << 0xc);
+}
+
+static inline enum controller_id dal_graphics_object_id_get_controller_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_CONTROLLER)
+ return id.id;
+ return CONTROLLER_ID_UNDEFINED;
+}
+
+static inline enum clock_source_id dal_graphics_object_id_get_clock_source_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_CLOCK_SOURCE)
+ return id.id;
+ return CLOCK_SOURCE_ID_UNDEFINED;
+}
+
+static inline enum encoder_id dal_graphics_object_id_get_encoder_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_ENCODER)
+ return id.id;
+ return ENCODER_ID_UNKNOWN;
+}
+
+static inline enum connector_id dal_graphics_object_id_get_connector_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_CONNECTOR)
+ return id.id;
+ return CONNECTOR_ID_UNKNOWN;
+}
+
+static inline enum audio_id dal_graphics_object_id_get_audio_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_AUDIO)
+ return id.id;
+ return AUDIO_ID_UNKNOWN;
+}
+
+static inline enum engine_id dal_graphics_object_id_get_engine_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_ENGINE)
+ return id.id;
+ return ENGINE_ID_UNKNOWN;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
new file mode 100644
index 000000000000..13a3c82d118f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2CAUX_INTERFACE_H__
+#define __DAL_I2CAUX_INTERFACE_H__
+
+#include "dc_types.h"
+#include "gpio_service_interface.h"
+
+
+#define DEFAULT_AUX_MAX_DATA_SIZE 16
+#define AUX_MAX_DEFER_WRITE_RETRY 20
+
+struct aux_payload {
+ /* set following flag to read/write I2C data,
+ * reset it to read/write DPCD data */
+ bool i2c_over_aux;
+ /* set following flag to write data,
+ * reset it to read data */
+ bool write;
+ uint32_t address;
+ uint8_t length;
+ uint8_t *data;
+};
+
+struct aux_command {
+ struct aux_payload *payloads;
+ uint8_t number_of_payloads;
+
+ /* expressed in milliseconds
+ * zero means "use default value" */
+ uint32_t defer_delay;
+
+ /* zero means "use default value" */
+ uint32_t max_defer_write_retry;
+
+ enum i2c_mot_mode mot;
+};
+
+union aux_config {
+ struct {
+ uint32_t ALLOW_AUX_WHEN_HPD_LOW:1;
+ } bits;
+ uint32_t raw;
+};
+
+struct i2caux;
+
+struct i2caux *dal_i2caux_create(
+ struct dc_context *ctx);
+
+bool dal_i2caux_submit_i2c_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct i2c_command *cmd);
+
+bool dal_i2caux_submit_aux_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct aux_command *cmd);
+
+void dal_i2caux_configure_aux(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ union aux_config cfg);
+
+void dal_i2caux_destroy(
+ struct i2caux **ptr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/irq_service_interface.h b/drivers/gpu/drm/amd/display/include/irq_service_interface.h
new file mode 100644
index 000000000000..d6ebed524daf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/irq_service_interface.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_INTERFACE_H__
+#define __DAL_IRQ_SERVICE_INTERFACE_H__
+
+struct irq_service_init_data {
+ struct dc_context *ctx;
+};
+
+struct irq_service;
+
+void dal_irq_service_destroy(struct irq_service **irq_service);
+
+bool dal_irq_service_set(
+ struct irq_service *irq_service,
+ enum dc_irq_source source,
+ bool enable);
+
+bool dal_irq_service_ack(
+ struct irq_service *irq_service,
+ enum dc_irq_source source);
+
+enum dc_irq_source dal_irq_service_to_irq_source(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
new file mode 100644
index 000000000000..adea1a59f620
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LINK_SERVICE_TYPES_H__
+#define __DAL_LINK_SERVICE_TYPES_H__
+
+#include "grph_object_id.h"
+#include "dal_types.h"
+#include "irq_types.h"
+
+/*struct mst_mgr_callback_object;*/
+struct ddc;
+struct irq_manager;
+
+enum {
+ MAX_CONTROLLER_NUM = 6
+};
+
+enum dp_power_state {
+ DP_POWER_STATE_D0 = 1,
+ DP_POWER_STATE_D3
+};
+
+enum edp_revision {
+ /* eDP version 1.1 or lower */
+ EDP_REVISION_11 = 0x00,
+ /* eDP version 1.2 */
+ EDP_REVISION_12 = 0x01,
+ /* eDP version 1.3 */
+ EDP_REVISION_13 = 0x02
+};
+
+enum {
+ LINK_RATE_REF_FREQ_IN_KHZ = 27000 /*27MHz*/
+};
+
+enum link_training_result {
+ LINK_TRAINING_SUCCESS,
+ LINK_TRAINING_CR_FAIL,
+ /* CR DONE bit is cleared during EQ step */
+ LINK_TRAINING_EQ_FAIL_CR,
+ /* other failure during EQ step */
+ LINK_TRAINING_EQ_FAIL_EQ,
+};
+
+struct link_training_settings {
+ struct dc_link_settings link_settings;
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+ bool allow_invalid_msa_timing_param;
+};
+
+enum hw_dp_training_pattern {
+ HW_DP_TRAINING_PATTERN_1 = 0,
+ HW_DP_TRAINING_PATTERN_2,
+ HW_DP_TRAINING_PATTERN_3,
+ HW_DP_TRAINING_PATTERN_4
+};
+
+/*TODO: Move this enum test harness*/
+/* Test patterns*/
+enum dp_test_pattern {
+ /* Input data is pass through Scrambler
+ * and 8b10b Encoder straight to output*/
+ DP_TEST_PATTERN_VIDEO_MODE = 0,
+
+ /* phy test patterns*/
+ DP_TEST_PATTERN_PHY_PATTERN_BEGIN,
+ DP_TEST_PATTERN_D102 = DP_TEST_PATTERN_PHY_PATTERN_BEGIN,
+ DP_TEST_PATTERN_SYMBOL_ERROR,
+ DP_TEST_PATTERN_PRBS7,
+ DP_TEST_PATTERN_80BIT_CUSTOM,
+ DP_TEST_PATTERN_CP2520_1,
+ DP_TEST_PATTERN_CP2520_2,
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE = DP_TEST_PATTERN_CP2520_2,
+ DP_TEST_PATTERN_CP2520_3,
+
+ /* Link Training Patterns */
+ DP_TEST_PATTERN_TRAINING_PATTERN1,
+ DP_TEST_PATTERN_TRAINING_PATTERN2,
+ DP_TEST_PATTERN_TRAINING_PATTERN3,
+ DP_TEST_PATTERN_TRAINING_PATTERN4,
+ DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_TRAINING_PATTERN4,
+
+ /* link test patterns*/
+ DP_TEST_PATTERN_COLOR_SQUARES,
+ DP_TEST_PATTERN_COLOR_SQUARES_CEA,
+ DP_TEST_PATTERN_VERTICAL_BARS,
+ DP_TEST_PATTERN_HORIZONTAL_BARS,
+ DP_TEST_PATTERN_COLOR_RAMP,
+
+ /* audio test patterns*/
+ DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED,
+ DP_TEST_PATTERN_AUDIO_SAWTOOTH,
+
+ DP_TEST_PATTERN_UNSUPPORTED
+};
+
+enum dp_panel_mode {
+ /* not required */
+ DP_PANEL_MODE_DEFAULT,
+ /* standard mode for eDP */
+ DP_PANEL_MODE_EDP,
+ /* external chips specific settings */
+ DP_PANEL_MODE_SPECIAL
+};
+
+/* DPCD_ADDR_TRAINING_LANEx_SET registers value */
+union dpcd_training_lane_set {
+ struct {
+#if defined(LITTLEENDIAN_CPU)
+ uint8_t VOLTAGE_SWING_SET:2;
+ uint8_t MAX_SWING_REACHED:1;
+ uint8_t PRE_EMPHASIS_SET:2;
+ uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+ /* following is reserved in DP 1.1 */
+ uint8_t POST_CURSOR2_SET:2;
+#elif defined(BIGENDIAN_CPU)
+ uint8_t POST_CURSOR2_SET:2;
+ uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+ uint8_t PRE_EMPHASIS_SET:2;
+ uint8_t MAX_SWING_REACHED:1;
+ uint8_t VOLTAGE_SWING_SET:2;
+#else
+ #error ARCH not defined!
+#endif
+ } bits;
+
+ uint8_t raw;
+};
+
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct dp_mst_stream_allocation {
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in
+ * transport packet */
+ uint8_t slot_count;
+};
+
+/* DP MST stream allocation table */
+struct dp_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct dp_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+#endif /*__DAL_LINK_SERVICE_TYPES_H__*/
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
new file mode 100644
index 000000000000..8e1fe70097be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_INTERFACE_H__
+#define __DAL_LOGGER_INTERFACE_H__
+
+#include "logger_types.h"
+
+struct dc_context;
+struct dc_link;
+struct dc_surface_update;
+struct resource_context;
+struct dc_state;
+
+/*
+ *
+ * DAL logger functionality
+ *
+ */
+
+struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask);
+
+uint32_t dal_logger_destroy(struct dal_logger **logger);
+
+void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn);
+
+void dm_logger_write(
+ struct dal_logger *logger,
+ enum dc_log_type log_type,
+ const char *msg,
+ ...);
+
+void dm_logger_append(
+ struct log_entry *entry,
+ const char *msg,
+ ...);
+
+void dm_logger_open(
+ struct dal_logger *logger,
+ struct log_entry *entry,
+ enum dc_log_type log_type);
+
+void dm_logger_close(struct log_entry *entry);
+
+void dc_conn_log(struct dc_context *ctx,
+ const struct dc_link *link,
+ uint8_t *hex_data,
+ int hex_data_count,
+ enum dc_log_type event,
+ const char *msg,
+ ...);
+
+void logger_write(struct dal_logger *logger,
+ enum dc_log_type log_type,
+ const char *msg,
+ void *paralist);
+
+void pre_surface_trace(
+ struct dc *dc,
+ const struct dc_plane_state *const *plane_states,
+ int surface_count);
+
+void update_surface_trace(
+ struct dc *dc,
+ const struct dc_surface_update *updates,
+ int surface_count);
+
+void post_surface_trace(struct dc *dc);
+
+void context_timing_trace(
+ struct dc *dc,
+ struct resource_context *res_ctx);
+
+void context_clock_trace(
+ struct dc *dc,
+ struct dc_state *context);
+
+/* Any function which is empty or have incomplete implementation should be
+ * marked by this macro.
+ * Note that the message will be printed exactly once for every function
+ * it is used in order to avoid repeating of the same message. */
+#define DAL_LOGGER_NOT_IMPL(fmt, ...) \
+{ \
+ static bool print_not_impl = true; \
+\
+ if (print_not_impl == true) { \
+ print_not_impl = false; \
+ dm_logger_write(ctx->logger, LOG_WARNING, \
+ "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
+ } \
+}
+
+/******************************************************************************
+ * Convenience macros to save on typing.
+ *****************************************************************************/
+
+#define DC_ERROR(...) \
+ dm_logger_write(dc_ctx->logger, LOG_ERROR, \
+ __VA_ARGS__)
+
+#define DC_SYNC_INFO(...) \
+ dm_logger_write(dc_ctx->logger, LOG_SYNC, \
+ __VA_ARGS__)
+
+/* Connectivity log format:
+ * [time stamp] [drm] [Major_minor] [connector name] message.....
+ * eg:
+ * [ 26.590965] [drm] [Conn_LKTN] [DP-1] HBRx4 pass VS=0, PE=0^
+ * [ 26.881060] [drm] [Conn_Mode] [DP-1] {2560x1080, 2784x1111@185580Khz}^
+ */
+
+#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
+ dc_conn_log(link->ctx, link, hex_data, hex_len, \
+ LOG_EVENT_DETECTION, ##__VA_ARGS__)
+
+#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
+ dc_conn_log(link->ctx, link, hex_data, hex_len, \
+ LOG_EVENT_LINK_LOSS, ##__VA_ARGS__)
+
+#define CONN_MSG_LT(link, ...) \
+ dc_conn_log(link->ctx, link, NULL, 0, \
+ LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__)
+
+#define CONN_MSG_MODE(link, ...) \
+ dc_conn_log(link->ctx, link, NULL, 0, \
+ LOG_EVENT_MODE_SET, ##__VA_ARGS__)
+
+/*
+ * Display Test Next logging
+ */
+#define DTN_INFO_BEGIN() \
+ dm_dtn_log_begin(dc_ctx)
+
+#define DTN_INFO(msg, ...) \
+ dm_dtn_log_append_v(dc_ctx, msg, ##__VA_ARGS__)
+
+#define DTN_INFO_END() \
+ dm_dtn_log_end(dc_ctx)
+
+#define PERFORMANCE_TRACE_START() \
+ unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx); \
+ unsigned long long perf_trc_start_log_msk = dc->ctx->logger->mask; \
+ unsigned int perf_trc_start_log_flags = dc->ctx->logger->flags.value; \
+ if (dc->debug.performance_trace) {\
+ dm_logger_flush_buffer(dc->ctx->logger, false);\
+ dc->ctx->logger->mask = 1<<LOG_PERF_TRACE;\
+ dc->ctx->logger->flags.bits.ENABLE_CONSOLE = 0;\
+ dc->ctx->logger->flags.bits.ENABLE_BUFFER = 1;\
+ }
+
+#define PERFORMANCE_TRACE_END() do {\
+ unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx);\
+ if (dc->debug.performance_trace) {\
+ dm_logger_write(dc->ctx->logger, \
+ LOG_PERF_TRACE, \
+ "%s duration: %d ticks\n", __func__,\
+ perf_trc_end_stmp - perf_trc_start_stmp); \
+ if (perf_trc_start_log_msk != 1<<LOG_PERF_TRACE) {\
+ dc->ctx->logger->mask = perf_trc_start_log_msk;\
+ dc->ctx->logger->flags.value = perf_trc_start_log_flags;\
+ dm_logger_flush_buffer(dc->ctx->logger, false);\
+ } \
+ } \
+} while (0)
+
+#endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
new file mode 100644
index 000000000000..e2ff8cd423d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_TYPES_H__
+#define __DAL_LOGGER_TYPES_H__
+
+#include "os_types.h"
+
+#define MAX_NAME_LEN 32
+
+struct dal_logger;
+
+enum dc_log_type {
+ LOG_ERROR = 0,
+ LOG_WARNING,
+ LOG_DEBUG,
+ LOG_DC,
+ LOG_DTN,
+ LOG_SURFACE,
+ LOG_HW_HOTPLUG,
+ LOG_HW_LINK_TRAINING,
+ LOG_HW_SET_MODE,
+ LOG_HW_RESUME_S3,
+ LOG_HW_AUDIO,
+ LOG_HW_HPD_IRQ,
+ LOG_MST,
+ LOG_SCALER,
+ LOG_BIOS,
+ LOG_BANDWIDTH_CALCS,
+ LOG_BANDWIDTH_VALIDATION,
+ LOG_I2C_AUX,
+ LOG_SYNC,
+ LOG_BACKLIGHT,
+ LOG_FEATURE_OVERRIDE,
+ LOG_DETECTION_EDID_PARSER,
+ LOG_DETECTION_DP_CAPS,
+ LOG_RESOURCE,
+ LOG_DML,
+ LOG_EVENT_MODE_SET,
+ LOG_EVENT_DETECTION,
+ LOG_EVENT_LINK_TRAINING,
+ LOG_EVENT_LINK_LOSS,
+ LOG_EVENT_UNDERFLOW,
+ LOG_IF_TRACE,
+ LOG_PERF_TRACE,
+
+ LOG_SECTION_TOTAL_COUNT
+};
+
+#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
+ (1 << LOG_DETECTION_EDID_PARSER))
+
+#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
+ (1 << LOG_WARNING) | \
+ (1 << LOG_EVENT_MODE_SET) | \
+ (1 << LOG_EVENT_DETECTION) | \
+ (1 << LOG_EVENT_LINK_TRAINING) | \
+ (1 << LOG_EVENT_LINK_LOSS) | \
+ (1 << LOG_EVENT_UNDERFLOW) | \
+ (1 << LOG_RESOURCE) | \
+ (1 << LOG_FEATURE_OVERRIDE) | \
+ (1 << LOG_DETECTION_EDID_PARSER) | \
+ (1 << LOG_DC) | \
+ (1 << LOG_HW_HOTPLUG) | \
+ (1 << LOG_HW_SET_MODE) | \
+ (1 << LOG_HW_RESUME_S3) | \
+ (1 << LOG_HW_HPD_IRQ) | \
+ (1 << LOG_SYNC) | \
+ (1 << LOG_BANDWIDTH_VALIDATION) | \
+ (1 << LOG_MST) | \
+ (1 << LOG_DETECTION_DP_CAPS) | \
+ (1 << LOG_BACKLIGHT)) | \
+ (1 << LOG_I2C_AUX) | \
+ (1 << LOG_IF_TRACE) | \
+ (1 << LOG_DTN) /* | \
+ (1 << LOG_DEBUG) | \
+ (1 << LOG_BIOS) | \
+ (1 << LOG_SURFACE) | \
+ (1 << LOG_SCALER) | \
+ (1 << LOG_DML) | \
+ (1 << LOG_HW_LINK_TRAINING) | \
+ (1 << LOG_HW_AUDIO)| \
+ (1 << LOG_BANDWIDTH_CALCS)*/
+
+union logger_flags {
+ struct {
+ uint32_t ENABLE_CONSOLE:1; /* Print to console */
+ uint32_t ENABLE_BUFFER:1; /* Print to buffer */
+ uint32_t RESERVED:30;
+ } bits;
+ uint32_t value;
+};
+
+struct log_entry {
+ struct dal_logger *logger;
+ enum dc_log_type type;
+
+ char *buf;
+ uint32_t buf_offset;
+ uint32_t max_buf_bytes;
+};
+
+/**
+* Structure for enumerating log types
+*/
+struct dc_log_type_info {
+ enum dc_log_type type;
+ char name[MAX_NAME_LEN];
+};
+
+/* Structure for keeping track of offsets, buffer, etc */
+
+#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
+
+/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
+ * change log line size to 896 to meet the request.
+ */
+#define LOG_MAX_LINE_SIZE 896
+
+struct dal_logger {
+
+ /* How far into the circular buffer has been read by dsat
+ * Read offset should never cross write offset. Write \0's to
+ * read data just to be sure?
+ */
+ uint32_t buffer_read_offset;
+
+ /* How far into the circular buffer we have written
+ * Write offset should never cross read offset
+ */
+ uint32_t buffer_write_offset;
+
+ uint32_t open_count;
+
+ char *log_buffer; /* Pointer to malloc'ed buffer */
+ uint32_t log_buffer_size; /* Size of circular buffer */
+
+ uint32_t mask; /*array of masks for major elements*/
+
+ union logger_flags flags;
+ struct dc_context *ctx;
+};
+
+#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/set_mode_types.h b/drivers/gpu/drm/amd/display/include/set_mode_types.h
new file mode 100644
index 000000000000..fee2b6ffcfc1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/set_mode_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_SET_MODE_TYPES_H__
+#define __DAL_SET_MODE_TYPES_H__
+
+#include "dc_types.h"
+#include <linux/hdmi.h>
+
+/* Info frame packet status */
+enum info_frame_flag {
+ INFO_PACKET_PACKET_INVALID = 0,
+ INFO_PACKET_PACKET_VALID = 1,
+ INFO_PACKET_PACKET_RESET = 2,
+ INFO_PACKET_PACKET_UPDATE_SCAN_TYPE = 8
+};
+
+struct hdmi_info_frame_header {
+ uint8_t info_frame_type;
+ uint8_t version;
+ uint8_t length;
+};
+
+#pragma pack(push)
+#pragma pack(1)
+
+struct info_packet_raw_data {
+ uint8_t hb0;
+ uint8_t hb1;
+ uint8_t hb2;
+ uint8_t sb[28]; /* sb0~sb27 */
+};
+
+union hdmi_info_packet {
+ struct avi_info_frame {
+ struct hdmi_info_frame_header header;
+
+ uint8_t CHECK_SUM:8;
+
+ uint8_t S0_S1:2;
+ uint8_t B0_B1:2;
+ uint8_t A0:1;
+ uint8_t Y0_Y1_Y2:3;
+
+ uint8_t R0_R3:4;
+ uint8_t M0_M1:2;
+ uint8_t C0_C1:2;
+
+ uint8_t SC0_SC1:2;
+ uint8_t Q0_Q1:2;
+ uint8_t EC0_EC2:3;
+ uint8_t ITC:1;
+
+ uint8_t VIC0_VIC7:8;
+
+ uint8_t PR0_PR3:4;
+ uint8_t CN0_CN1:2;
+ uint8_t YQ0_YQ1:2;
+
+ uint16_t bar_top;
+ uint16_t bar_bottom;
+ uint16_t bar_left;
+ uint16_t bar_right;
+
+ uint8_t reserved[14];
+ } bits;
+
+ struct info_packet_raw_data packet_raw_data;
+};
+
+struct info_packet {
+ enum info_frame_flag flags;
+ union hdmi_info_packet info_packet_hdmi;
+};
+
+struct info_frame {
+ struct info_packet avi_info_packet;
+ struct info_packet gamut_packet;
+ struct info_packet vendor_info_packet;
+ struct info_packet spd_info_packet;
+};
+
+#pragma pack(pop)
+
+#endif /* __DAL_SET_MODE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
new file mode 100644
index 000000000000..b5ebde642207
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_SIGNAL_TYPES_H__
+#define __DC_SIGNAL_TYPES_H__
+
+enum signal_type {
+ SIGNAL_TYPE_NONE = 0L, /* no signal */
+ SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
+ SIGNAL_TYPE_DVI_DUAL_LINK = (1 << 1),
+ SIGNAL_TYPE_HDMI_TYPE_A = (1 << 2),
+ SIGNAL_TYPE_LVDS = (1 << 3),
+ SIGNAL_TYPE_RGB = (1 << 4),
+ SIGNAL_TYPE_DISPLAY_PORT = (1 << 5),
+ SIGNAL_TYPE_DISPLAY_PORT_MST = (1 << 6),
+ SIGNAL_TYPE_EDP = (1 << 7),
+ SIGNAL_TYPE_VIRTUAL = (1 << 9), /* Virtual Display */
+};
+
+/* help functions for signal types manipulation */
+static inline bool dc_is_hdmi_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
+}
+
+static inline bool dc_is_dp_sst_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_EDP);
+}
+
+static inline bool dc_is_dp_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_EDP ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+}
+
+static inline bool dc_is_embedded_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_EDP || signal == SIGNAL_TYPE_LVDS);
+}
+
+static inline bool dc_is_dvi_signal(enum signal_type signal)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return true;
+ break;
+ default:
+ return false;
+ }
+}
+
+static inline bool dc_is_dvi_single_link_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK);
+}
+
+static inline bool dc_is_dual_link_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DVI_DUAL_LINK);
+}
+
+static inline bool dc_is_audio_capable_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ dc_is_hdmi_signal(signal));
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/vector.h b/drivers/gpu/drm/amd/display/include/vector.h
new file mode 100644
index 000000000000..8233b7c22a07
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/vector.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_VECTOR_H__
+#define __DAL_VECTOR_H__
+
+struct vector {
+ uint8_t *container;
+ uint32_t struct_size;
+ uint32_t count;
+ uint32_t capacity;
+ struct dc_context *ctx;
+};
+
+bool dal_vector_construct(
+ struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size);
+
+struct vector *dal_vector_create(
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size);
+
+/* 'initial_value' is optional. If initial_value not supplied,
+ * each "structure" in the vector will contain zeros by default. */
+struct vector *dal_vector_presized_create(
+ struct dc_context *ctx,
+ uint32_t size,
+ void *initial_value,
+ uint32_t struct_size);
+
+void dal_vector_destruct(
+ struct vector *vector);
+
+void dal_vector_destroy(
+ struct vector **vector);
+
+uint32_t dal_vector_get_count(
+ const struct vector *vector);
+
+/* dal_vector_insert_at
+ * reallocate container if necessary
+ * then shell items at right and insert
+ * return if the container modified
+ * do not check that index belongs to container
+ * since the function is private and index is going to be calculated
+ * either with by function or as get_count+1 */
+bool dal_vector_insert_at(
+ struct vector *vector,
+ const void *what,
+ uint32_t position);
+
+bool dal_vector_append(
+ struct vector *vector,
+ const void *item);
+
+/* operator[] */
+void *dal_vector_at_index(
+ const struct vector *vector,
+ uint32_t index);
+
+void dal_vector_set_at_index(
+ const struct vector *vector,
+ const void *what,
+ uint32_t index);
+
+/* create a clone (copy) of a vector */
+struct vector *dal_vector_clone(
+ const struct vector *vector_other);
+
+/* dal_vector_remove_at_index
+ * Shifts elements on the right from remove position to the left,
+ * removing an element at position by overwrite means*/
+bool dal_vector_remove_at_index(
+ struct vector *vector,
+ uint32_t index);
+
+uint32_t dal_vector_capacity(const struct vector *vector);
+
+bool dal_vector_reserve(struct vector *vector, uint32_t capacity);
+
+void dal_vector_clear(struct vector *vector);
+
+/***************************************************************************
+ * Macro definitions of TYPE-SAFE versions of vector set/get functions.
+ ***************************************************************************/
+
+#define DAL_VECTOR_INSERT_AT(vector_type, type_t) \
+ static bool vector_type##_vector_insert_at( \
+ struct vector *vector, \
+ type_t what, \
+ uint32_t position) \
+{ \
+ return dal_vector_insert_at(vector, what, position); \
+}
+
+#define DAL_VECTOR_APPEND(vector_type, type_t) \
+ static bool vector_type##_vector_append( \
+ struct vector *vector, \
+ type_t item) \
+{ \
+ return dal_vector_append(vector, item); \
+}
+
+/* Note: "type_t" is the ONLY token accepted by "checkpatch.pl" and by
+ * "checkcommit" as *return type*.
+ * For uniformity reasons "type_t" is used for all type-safe macro
+ * definitions here. */
+#define DAL_VECTOR_AT_INDEX(vector_type, type_t) \
+ static type_t vector_type##_vector_at_index( \
+ const struct vector *vector, \
+ uint32_t index) \
+{ \
+ return dal_vector_at_index(vector, index); \
+}
+
+#define DAL_VECTOR_SET_AT_INDEX(vector_type, type_t) \
+ static void vector_type##_vector_set_at_index( \
+ const struct vector *vector, \
+ type_t what, \
+ uint32_t index) \
+{ \
+ dal_vector_set_at_index(vector, what, index); \
+}
+
+#endif /* __DAL_VECTOR_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/Makefile b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
new file mode 100644
index 000000000000..db8e0ff6d7a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the 'freesync' sub-module of DAL.
+#
+
+FREESYNC = freesync.o
+
+AMD_DAL_FREESYNC = $(addprefix $(AMDDALPATH)/modules/freesync/,$(FREESYNC))
+#$(info ************ DAL-FREE SYNC_MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_FREESYNC)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
new file mode 100644
index 000000000000..4d7db4aa28e0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -0,0 +1,1483 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "mod_freesync.h"
+#include "core_types.h"
+
+#define MOD_FREESYNC_MAX_CONCURRENT_STREAMS 32
+
+/* Refresh rate ramp at a fixed rate of 65 Hz/second */
+#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
+/* Number of elements in the render times cache array */
+#define RENDER_TIMES_MAX_COUNT 20
+/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_EXIT_MARGIN 2000
+/* Number of consecutive frames to check before entering/exiting fixed refresh*/
+#define FIXED_REFRESH_ENTER_FRAME_COUNT 5
+#define FIXED_REFRESH_EXIT_FRAME_COUNT 5
+
+#define FREESYNC_REGISTRY_NAME "freesync_v1"
+
+#define FREESYNC_NO_STATIC_FOR_EXTERNAL_DP_REGKEY "DalFreeSyncNoStaticForExternalDp"
+
+#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal"
+
+struct gradual_static_ramp {
+ bool ramp_is_active;
+ bool ramp_direction_is_up;
+ unsigned int ramp_current_frame_duration_in_ns;
+};
+
+struct time_cache {
+ /* video (48Hz feature) related */
+ unsigned int update_duration_in_ns;
+
+ /* BTR/fixed refresh related */
+ unsigned int prev_time_stamp_in_us;
+
+ unsigned int min_render_time_in_us;
+ unsigned int max_render_time_in_us;
+
+ unsigned int render_times_index;
+ unsigned int render_times[RENDER_TIMES_MAX_COUNT];
+};
+
+struct below_the_range {
+ bool btr_active;
+ bool program_btr;
+
+ unsigned int mid_point_in_us;
+
+ unsigned int inserted_frame_duration_in_us;
+ unsigned int frames_to_insert;
+ unsigned int frame_counter;
+};
+
+struct fixed_refresh {
+ bool fixed_active;
+ bool program_fixed;
+ unsigned int frame_counter;
+};
+
+struct freesync_range {
+ unsigned int min_refresh;
+ unsigned int max_frame_duration;
+ unsigned int vmax;
+
+ unsigned int max_refresh;
+ unsigned int min_frame_duration;
+ unsigned int vmin;
+};
+
+struct freesync_state {
+ bool fullscreen;
+ bool static_screen;
+ bool video;
+
+ unsigned int nominal_refresh_rate_in_micro_hz;
+ bool windowed_fullscreen;
+
+ struct time_cache time;
+
+ struct gradual_static_ramp static_ramp;
+ struct below_the_range btr;
+ struct fixed_refresh fixed_refresh;
+ struct freesync_range freesync_range;
+};
+
+struct freesync_entity {
+ struct dc_stream_state *stream;
+ struct mod_freesync_caps *caps;
+ struct freesync_state state;
+ struct mod_freesync_user_enable user_enable;
+};
+
+struct freesync_registry_options {
+ bool drr_external_supported;
+ bool drr_internal_supported;
+};
+
+struct core_freesync {
+ struct mod_freesync public;
+ struct dc *dc;
+ struct freesync_entity *map;
+ int num_entities;
+ struct freesync_registry_options opts;
+};
+
+#define MOD_FREESYNC_TO_CORE(mod_freesync)\
+ container_of(mod_freesync, struct core_freesync, public)
+
+static bool check_dc_support(const struct dc *dc)
+{
+ if (dc->stream_funcs.adjust_vmin_vmax == NULL)
+ return false;
+
+ return true;
+}
+
+struct mod_freesync *mod_freesync_create(struct dc *dc)
+{
+ struct core_freesync *core_freesync =
+ kzalloc(sizeof(struct core_freesync), GFP_KERNEL);
+
+
+ struct persistent_data_flag flag;
+
+ int i, data = 0;
+
+ if (core_freesync == NULL)
+ goto fail_alloc_context;
+
+ core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
+ GFP_KERNEL);
+
+ if (core_freesync->map == NULL)
+ goto fail_alloc_map;
+
+ for (i = 0; i < MOD_FREESYNC_MAX_CONCURRENT_STREAMS; i++)
+ core_freesync->map[i].stream = NULL;
+
+ core_freesync->num_entities = 0;
+
+ if (dc == NULL)
+ goto fail_construct;
+
+ core_freesync->dc = dc;
+
+ if (!check_dc_support(dc))
+ goto fail_construct;
+
+ /* Create initial module folder in registry for freesync enable data */
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+ dm_write_persistent_data(dc->ctx, NULL, FREESYNC_REGISTRY_NAME,
+ NULL, NULL, 0, &flag);
+ flag.save_per_edid = false;
+ flag.save_per_link = false;
+
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.drr_internal_supported =
+ (data & 1) ? false : true;
+ }
+
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_NO_STATIC_FOR_EXTERNAL_DP_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.drr_external_supported =
+ (data & 1) ? false : true;
+ }
+
+ return &core_freesync->public;
+
+fail_construct:
+ kfree(core_freesync->map);
+
+fail_alloc_map:
+ kfree(core_freesync);
+
+fail_alloc_context:
+ return NULL;
+}
+
+void mod_freesync_destroy(struct mod_freesync *mod_freesync)
+{
+ if (mod_freesync != NULL) {
+ int i;
+ struct core_freesync *core_freesync =
+ MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (i = 0; i < core_freesync->num_entities; i++)
+ if (core_freesync->map[i].stream)
+ dc_stream_release(core_freesync->map[i].stream);
+
+ kfree(core_freesync->map);
+
+ kfree(core_freesync);
+ }
+}
+
+/* Given a specific dc_stream* this function finds its equivalent
+ * on the core_freesync->map and returns the corresponding index
+ */
+static unsigned int map_index_from_stream(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream)
+{
+ unsigned int index = 0;
+
+ for (index = 0; index < core_freesync->num_entities; index++) {
+ if (core_freesync->map[index].stream == stream) {
+ return index;
+ }
+ }
+ /* Could not find stream requested */
+ ASSERT(false);
+ return index;
+}
+
+bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream, struct mod_freesync_caps *caps)
+{
+ struct dc *dc = NULL;
+ struct core_freesync *core_freesync = NULL;
+ int persistent_freesync_enable = 0;
+ struct persistent_data_flag flag;
+ unsigned int nom_refresh_rate_uhz;
+ unsigned long long temp;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ dc = core_freesync->dc;
+
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+
+ if (core_freesync->num_entities < MOD_FREESYNC_MAX_CONCURRENT_STREAMS) {
+
+ dc_stream_retain(stream);
+
+ temp = stream->timing.pix_clk_khz;
+ temp *= 1000ULL * 1000ULL * 1000ULL;
+ temp = div_u64(temp, stream->timing.h_total);
+ temp = div_u64(temp, stream->timing.v_total);
+
+ nom_refresh_rate_uhz = (unsigned int) temp;
+
+ core_freesync->map[core_freesync->num_entities].stream = stream;
+ core_freesync->map[core_freesync->num_entities].caps = caps;
+
+ core_freesync->map[core_freesync->num_entities].state.
+ fullscreen = false;
+ core_freesync->map[core_freesync->num_entities].state.
+ static_screen = false;
+ core_freesync->map[core_freesync->num_entities].state.
+ video = false;
+ core_freesync->map[core_freesync->num_entities].state.time.
+ update_duration_in_ns = 0;
+ core_freesync->map[core_freesync->num_entities].state.
+ static_ramp.ramp_is_active = false;
+
+ /* get persistent data from registry */
+ if (dm_read_persistent_data(dc->ctx, stream->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable", &persistent_freesync_enable,
+ sizeof(int), &flag)) {
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_gaming =
+ (persistent_freesync_enable & 1) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_static =
+ (persistent_freesync_enable & 2) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_video =
+ (persistent_freesync_enable & 4) ? true : false;
+ } else {
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_gaming = false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_static = false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_video = false;
+ }
+
+ if (caps->supported &&
+ nom_refresh_rate_uhz >= caps->min_refresh_in_micro_hz &&
+ nom_refresh_rate_uhz <= caps->max_refresh_in_micro_hz)
+ stream->ignore_msa_timing_param = 1;
+
+ core_freesync->num_entities++;
+ return true;
+ }
+ return false;
+}
+
+bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream)
+{
+ int i = 0;
+ struct core_freesync *core_freesync = NULL;
+ unsigned int index = 0;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ dc_stream_release(core_freesync->map[index].stream);
+ core_freesync->map[index].stream = NULL;
+ /* To remove this entity, shift everything after down */
+ for (i = index; i < core_freesync->num_entities - 1; i++)
+ core_freesync->map[i] = core_freesync->map[i + 1];
+ core_freesync->num_entities--;
+ return true;
+}
+
+static void update_stream_freesync_context(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream)
+{
+ unsigned int index;
+ struct freesync_context *ctx;
+
+ ctx = &stream->freesync_ctx;
+
+ index = map_index_from_stream(core_freesync, stream);
+
+ ctx->supported = core_freesync->map[index].caps->supported;
+ ctx->enabled = (core_freesync->map[index].user_enable.enable_for_gaming ||
+ core_freesync->map[index].user_enable.enable_for_video ||
+ core_freesync->map[index].user_enable.enable_for_static);
+ ctx->active = (core_freesync->map[index].state.fullscreen ||
+ core_freesync->map[index].state.video ||
+ core_freesync->map[index].state.static_ramp.ramp_is_active);
+ ctx->min_refresh_in_micro_hz =
+ core_freesync->map[index].caps->min_refresh_in_micro_hz;
+ ctx->nominal_refresh_in_micro_hz = core_freesync->
+ map[index].state.nominal_refresh_rate_in_micro_hz;
+
+}
+
+static void update_stream(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream)
+{
+ unsigned int index = map_index_from_stream(core_freesync, stream);
+ if (core_freesync->map[index].caps->supported) {
+ stream->ignore_msa_timing_param = 1;
+ update_stream_freesync_context(core_freesync, stream);
+ }
+}
+
+static void calc_freesync_range(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream,
+ struct freesync_state *state,
+ unsigned int min_refresh_in_uhz,
+ unsigned int max_refresh_in_uhz)
+{
+ unsigned int min_frame_duration_in_ns = 0, max_frame_duration_in_ns = 0;
+ unsigned int index = map_index_from_stream(core_freesync, stream);
+ uint32_t vtotal = stream->timing.v_total;
+
+ if ((min_refresh_in_uhz == 0) || (max_refresh_in_uhz == 0)) {
+ state->freesync_range.min_refresh =
+ state->nominal_refresh_rate_in_micro_hz;
+ state->freesync_range.max_refresh =
+ state->nominal_refresh_rate_in_micro_hz;
+
+ state->freesync_range.max_frame_duration = 0;
+ state->freesync_range.min_frame_duration = 0;
+
+ state->freesync_range.vmax = vtotal;
+ state->freesync_range.vmin = vtotal;
+
+ return;
+ }
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ max_refresh_in_uhz)));
+ max_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ min_refresh_in_uhz)));
+
+ state->freesync_range.min_refresh = min_refresh_in_uhz;
+ state->freesync_range.max_refresh = max_refresh_in_uhz;
+
+ state->freesync_range.max_frame_duration = max_frame_duration_in_ns;
+ state->freesync_range.min_frame_duration = min_frame_duration_in_ns;
+
+ state->freesync_range.vmax = div64_u64(div64_u64(((unsigned long long)(
+ max_frame_duration_in_ns) * stream->timing.pix_clk_khz),
+ stream->timing.h_total), 1000000);
+ state->freesync_range.vmin = div64_u64(div64_u64(((unsigned long long)(
+ min_frame_duration_in_ns) * stream->timing.pix_clk_khz),
+ stream->timing.h_total), 1000000);
+
+ /* vmin/vmax cannot be less than vtotal */
+ if (state->freesync_range.vmin < vtotal) {
+ /* Error of 1 is permissible */
+ ASSERT((state->freesync_range.vmin + 1) >= vtotal);
+ state->freesync_range.vmin = vtotal;
+ }
+
+ if (state->freesync_range.vmax < vtotal) {
+ /* Error of 1 is permissible */
+ ASSERT((state->freesync_range.vmax + 1) >= vtotal);
+ state->freesync_range.vmax = vtotal;
+ }
+
+ /* Determine whether BTR can be supported */
+ if (max_frame_duration_in_ns >=
+ 2 * min_frame_duration_in_ns)
+ core_freesync->map[index].caps->btr_supported = true;
+ else
+ core_freesync->map[index].caps->btr_supported = false;
+
+ /* Cache the time variables */
+ state->time.max_render_time_in_us =
+ max_frame_duration_in_ns / 1000;
+ state->time.min_render_time_in_us =
+ min_frame_duration_in_ns / 1000;
+ state->btr.mid_point_in_us =
+ (max_frame_duration_in_ns +
+ min_frame_duration_in_ns) / 2000;
+}
+
+static void calc_v_total_from_duration(struct dc_stream_state *stream,
+ unsigned int duration_in_ns, int *v_total_nominal)
+{
+ *v_total_nominal = div64_u64(div64_u64(((unsigned long long)(
+ duration_in_ns) * stream->timing.pix_clk_khz),
+ stream->timing.h_total), 1000000);
+}
+
+static void calc_v_total_for_static_ramp(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream,
+ unsigned int index, int *v_total)
+{
+ unsigned int frame_duration = 0;
+
+ struct gradual_static_ramp *static_ramp_variables =
+ &core_freesync->map[index].state.static_ramp;
+
+ /* Calc ratio between new and current frame duration with 3 digit */
+ unsigned int frame_duration_ratio = div64_u64(1000000,
+ (1000 + div64_u64(((unsigned long long)(
+ STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME) *
+ static_ramp_variables->ramp_current_frame_duration_in_ns),
+ 1000000000)));
+
+ /* Calculate delta between new and current frame duration in ns */
+ unsigned int frame_duration_delta = div64_u64(((unsigned long long)(
+ static_ramp_variables->ramp_current_frame_duration_in_ns) *
+ (1000 - frame_duration_ratio)), 1000);
+
+ /* Adjust frame duration delta based on ratio between current and
+ * standard frame duration (frame duration at 60 Hz refresh rate).
+ */
+ unsigned int ramp_rate_interpolated = div64_u64(((unsigned long long)(
+ frame_duration_delta) * static_ramp_variables->
+ ramp_current_frame_duration_in_ns), 16666666);
+
+ /* Going to a higher refresh rate (lower frame duration) */
+ if (static_ramp_variables->ramp_direction_is_up) {
+ /* reduce frame duration */
+ static_ramp_variables->ramp_current_frame_duration_in_ns -=
+ ramp_rate_interpolated;
+
+ /* min frame duration */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ core_freesync->map[index].state.
+ nominal_refresh_rate_in_micro_hz)));
+
+ /* adjust for frame duration below min */
+ if (static_ramp_variables->ramp_current_frame_duration_in_ns <=
+ frame_duration) {
+
+ static_ramp_variables->ramp_is_active = false;
+ static_ramp_variables->
+ ramp_current_frame_duration_in_ns =
+ frame_duration;
+ }
+ /* Going to a lower refresh rate (larger frame duration) */
+ } else {
+ /* increase frame duration */
+ static_ramp_variables->ramp_current_frame_duration_in_ns +=
+ ramp_rate_interpolated;
+
+ /* max frame duration */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ core_freesync->map[index].caps->min_refresh_in_micro_hz)));
+
+ /* adjust for frame duration above max */
+ if (static_ramp_variables->ramp_current_frame_duration_in_ns >=
+ frame_duration) {
+
+ static_ramp_variables->ramp_is_active = false;
+ static_ramp_variables->
+ ramp_current_frame_duration_in_ns =
+ frame_duration;
+ }
+ }
+
+ calc_v_total_from_duration(stream, static_ramp_variables->
+ ramp_current_frame_duration_in_ns, v_total);
+}
+
+static void reset_freesync_state_variables(struct freesync_state* state)
+{
+ state->static_ramp.ramp_is_active = false;
+ if (state->nominal_refresh_rate_in_micro_hz)
+ state->static_ramp.ramp_current_frame_duration_in_ns =
+ ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ state->nominal_refresh_rate_in_micro_hz)));
+
+ state->btr.btr_active = false;
+ state->btr.frame_counter = 0;
+ state->btr.frames_to_insert = 0;
+ state->btr.inserted_frame_duration_in_us = 0;
+ state->btr.program_btr = false;
+
+ state->fixed_refresh.fixed_active = false;
+ state->fixed_refresh.program_fixed = false;
+}
+/*
+ * Sets freesync mode on a stream depending on current freesync state.
+ */
+static bool set_freesync_on_streams(struct core_freesync *core_freesync,
+ struct dc_stream_state **streams, int num_streams)
+{
+ int v_total_nominal = 0, v_total_min = 0, v_total_max = 0;
+ unsigned int stream_idx, map_index = 0;
+ struct freesync_state *state;
+
+ if (num_streams == 0 || streams == NULL || num_streams > 1)
+ return false;
+
+ for (stream_idx = 0; stream_idx < num_streams; stream_idx++) {
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_idx]);
+
+ state = &core_freesync->map[map_index].state;
+
+ if (core_freesync->map[map_index].caps->supported) {
+
+ /* Fullscreen has the topmost priority. If the
+ * fullscreen bit is set, we are in a fullscreen
+ * application where it should not matter if it is
+ * static screen. We should not check the static_screen
+ * or video bit.
+ *
+ * Special cases of fullscreen include btr and fixed
+ * refresh. We program btr on every flip and involves
+ * programming full range right before the last inserted frame.
+ * However, we do not want to program the full freesync range
+ * when fixed refresh is active, because we only program
+ * that logic once and this will override it.
+ */
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_gaming == true &&
+ state->fullscreen == true &&
+ state->fixed_refresh.fixed_active == false) {
+ /* Enable freesync */
+
+ v_total_min = state->freesync_range.vmin;
+ v_total_max = state->freesync_range.vmax;
+
+ /* Update the freesync context for the stream */
+ update_stream_freesync_context(core_freesync,
+ streams[stream_idx]);
+
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(core_freesync->dc, streams,
+ num_streams, v_total_min,
+ v_total_max);
+
+ return true;
+
+ } else if (core_freesync->map[map_index].user_enable.
+ enable_for_video && state->video == true) {
+ /* Enable 48Hz feature */
+
+ calc_v_total_from_duration(streams[stream_idx],
+ state->time.update_duration_in_ns,
+ &v_total_nominal);
+
+ /* Program only if v_total_nominal is in range*/
+ if (v_total_nominal >=
+ streams[stream_idx]->timing.v_total) {
+
+ /* Update the freesync context for
+ * the stream
+ */
+ update_stream_freesync_context(
+ core_freesync,
+ streams[stream_idx]);
+
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total_nominal,
+ v_total_nominal);
+ }
+ return true;
+
+ } else {
+ /* Disable freesync */
+ v_total_nominal = streams[stream_idx]->
+ timing.v_total;
+
+ /* Update the freesync context for
+ * the stream
+ */
+ update_stream_freesync_context(
+ core_freesync,
+ streams[stream_idx]);
+
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total_nominal,
+ v_total_nominal);
+
+ /* Reset the cached variables */
+ reset_freesync_state_variables(state);
+
+ return true;
+ }
+ } else {
+ /* Disable freesync */
+ v_total_nominal = streams[stream_idx]->
+ timing.v_total;
+ /*
+ * we have to reset drr always even sink does
+ * not support freesync because a former stream has
+ * be programmed
+ */
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total_nominal,
+ v_total_nominal);
+ /* Reset the cached variables */
+ reset_freesync_state_variables(state);
+ }
+
+ }
+
+ return false;
+}
+
+static void set_static_ramp_variables(struct core_freesync *core_freesync,
+ unsigned int index, bool enable_static_screen)
+{
+ unsigned int frame_duration = 0;
+ unsigned int nominal_refresh_rate = core_freesync->map[index].state.
+ nominal_refresh_rate_in_micro_hz;
+ unsigned int min_refresh_rate= core_freesync->map[index].caps->
+ min_refresh_in_micro_hz;
+ struct gradual_static_ramp *static_ramp_variables =
+ &core_freesync->map[index].state.static_ramp;
+
+ /* If we are ENABLING static screen, refresh rate should go DOWN.
+ * If we are DISABLING static screen, refresh rate should go UP.
+ */
+ if (enable_static_screen)
+ static_ramp_variables->ramp_direction_is_up = false;
+ else
+ static_ramp_variables->ramp_direction_is_up = true;
+
+ /* If ramp is not active, set initial frame duration depending on
+ * whether we are enabling/disabling static screen mode. If the ramp is
+ * already active, ramp should continue in the opposite direction
+ * starting with the current frame duration
+ */
+ if (!static_ramp_variables->ramp_is_active) {
+ if (enable_static_screen == true) {
+ /* Going to lower refresh rate, so start from max
+ * refresh rate (min frame duration)
+ */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ nominal_refresh_rate)));
+ } else {
+ /* Going to higher refresh rate, so start from min
+ * refresh rate (max frame duration)
+ */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ min_refresh_rate)));
+ }
+ static_ramp_variables->
+ ramp_current_frame_duration_in_ns = frame_duration;
+
+ static_ramp_variables->ramp_is_active = true;
+ }
+}
+
+void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams)
+{
+ unsigned int index, v_total, inserted_frame_v_total = 0;
+ unsigned int min_frame_duration_in_ns, vmax, vmin = 0;
+ struct freesync_state *state;
+ struct core_freesync *core_freesync = NULL;
+ struct dc_static_screen_events triggers = {0};
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ if (core_freesync->num_entities == 0)
+ return;
+
+ index = map_index_from_stream(core_freesync,
+ streams[0]);
+
+ if (core_freesync->map[index].caps->supported == false)
+ return;
+
+ state = &core_freesync->map[index].state;
+
+ /* Below the Range Logic */
+
+ /* Only execute if in fullscreen mode */
+ if (state->fullscreen == true &&
+ core_freesync->map[index].user_enable.enable_for_gaming &&
+ core_freesync->map[index].caps->btr_supported &&
+ state->btr.btr_active) {
+
+ /* TODO: pass in flag for Pre-DCE12 ASIC
+ * in order for frame variable duration to take affect,
+ * it needs to be done one VSYNC early, which is at
+ * frameCounter == 1.
+ * For DCE12 and newer updates to V_TOTAL_MIN/MAX
+ * will take affect on current frame
+ */
+ if (state->btr.frames_to_insert == state->btr.frame_counter) {
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ state->nominal_refresh_rate_in_micro_hz)));
+
+ vmin = state->freesync_range.vmin;
+
+ inserted_frame_v_total = vmin;
+
+ if (min_frame_duration_in_ns / 1000)
+ inserted_frame_v_total =
+ state->btr.inserted_frame_duration_in_us *
+ vmin / (min_frame_duration_in_ns / 1000);
+
+ /* Set length of inserted frames as v_total_max*/
+ vmax = inserted_frame_v_total;
+ vmin = inserted_frame_v_total;
+
+ /* Program V_TOTAL */
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, vmin, vmax);
+ }
+
+ if (state->btr.frame_counter > 0)
+ state->btr.frame_counter--;
+
+ /* Restore FreeSync */
+ if (state->btr.frame_counter == 0)
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+ }
+
+ /* If in fullscreen freesync mode or in video, do not program
+ * static screen ramp values
+ */
+ if (state->fullscreen == true || state->video == true) {
+
+ state->static_ramp.ramp_is_active = false;
+
+ return;
+ }
+
+ /* Gradual Static Screen Ramping Logic */
+
+ /* Execute if ramp is active and user enabled freesync static screen*/
+ if (state->static_ramp.ramp_is_active &&
+ core_freesync->map[index].user_enable.enable_for_static) {
+
+ calc_v_total_for_static_ramp(core_freesync, streams[0],
+ index, &v_total);
+
+ /* Update the freesync context for the stream */
+ update_stream_freesync_context(core_freesync, streams[0]);
+
+ /* Program static screen ramp values */
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total,
+ v_total);
+
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ core_freesync->dc->stream_funcs.set_static_screen_events(
+ core_freesync->dc, streams, num_streams,
+ &triggers);
+ }
+}
+
+void mod_freesync_update_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_params *freesync_params)
+{
+ bool freesync_program_required = false;
+ unsigned int stream_index;
+ struct freesync_state *state;
+ struct core_freesync *core_freesync = NULL;
+ struct dc_static_screen_events triggers = {0};
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ if (core_freesync->num_entities == 0)
+ return;
+
+ for(stream_index = 0; stream_index < num_streams; stream_index++) {
+
+ unsigned int map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ bool is_embedded = dc_is_embedded_signal(
+ streams[stream_index]->sink->sink_signal);
+
+ struct freesync_registry_options *opts = &core_freesync->opts;
+
+ state = &core_freesync->map[map_index].state;
+
+ switch (freesync_params->state){
+ case FREESYNC_STATE_FULLSCREEN:
+ state->fullscreen = freesync_params->enable;
+ freesync_program_required = true;
+ state->windowed_fullscreen =
+ freesync_params->windowed_fullscreen;
+ break;
+ case FREESYNC_STATE_STATIC_SCREEN:
+ /* Static screen ramp is disabled by default, but can
+ * be enabled through regkey.
+ */
+ if ((is_embedded && opts->drr_internal_supported) ||
+ (!is_embedded && opts->drr_external_supported))
+
+ if (state->static_screen !=
+ freesync_params->enable) {
+
+ /* Change the state flag */
+ state->static_screen =
+ freesync_params->enable;
+
+ /* Update static screen ramp */
+ set_static_ramp_variables(core_freesync,
+ map_index,
+ freesync_params->enable);
+ }
+ /* We program the ramp starting next VUpdate */
+ break;
+ case FREESYNC_STATE_VIDEO:
+ /* Change core variables only if there is a change*/
+ if(freesync_params->update_duration_in_ns !=
+ state->time.update_duration_in_ns) {
+
+ state->video = freesync_params->enable;
+ state->time.update_duration_in_ns =
+ freesync_params->update_duration_in_ns;
+
+ freesync_program_required = true;
+ }
+ break;
+ case FREESYNC_STATE_NONE:
+ /* handle here to avoid warning */
+ break;
+ }
+ }
+
+ /* Update mask */
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ core_freesync->dc->stream_funcs.set_static_screen_events(
+ core_freesync->dc, streams, num_streams,
+ &triggers);
+
+ if (freesync_program_required)
+ /* Program freesync according to current state*/
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+}
+
+
+bool mod_freesync_get_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_params *freesync_params)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ if (core_freesync->map[index].state.fullscreen) {
+ freesync_params->state = FREESYNC_STATE_FULLSCREEN;
+ freesync_params->enable = true;
+ } else if (core_freesync->map[index].state.static_screen) {
+ freesync_params->state = FREESYNC_STATE_STATIC_SCREEN;
+ freesync_params->enable = true;
+ } else if (core_freesync->map[index].state.video) {
+ freesync_params->state = FREESYNC_STATE_VIDEO;
+ freesync_params->enable = true;
+ } else {
+ freesync_params->state = FREESYNC_STATE_NONE;
+ freesync_params->enable = false;
+ }
+
+ freesync_params->update_duration_in_ns =
+ core_freesync->map[index].state.time.update_duration_in_ns;
+
+ freesync_params->windowed_fullscreen =
+ core_freesync->map[index].state.windowed_fullscreen;
+
+ return true;
+}
+
+bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_user_enable *user_enable)
+{
+ unsigned int stream_index, map_index;
+ int persistent_data = 0;
+ struct persistent_data_flag flag;
+ struct dc *dc = NULL;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ dc = core_freesync->dc;
+
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+
+ for(stream_index = 0; stream_index < num_streams;
+ stream_index++){
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ core_freesync->map[map_index].user_enable = *user_enable;
+
+ /* Write persistent data in registry*/
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_gaming)
+ persistent_data = persistent_data | 1;
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_static)
+ persistent_data = persistent_data | 2;
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_video)
+ persistent_data = persistent_data | 4;
+
+ dm_write_persistent_data(dc->ctx,
+ streams[stream_index]->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable",
+ &persistent_data,
+ sizeof(int),
+ &flag);
+ }
+
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+
+ return true;
+}
+
+bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_user_enable *user_enable)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *user_enable = core_freesync->map[index].user_enable;
+
+ return true;
+}
+
+bool mod_freesync_get_static_ramp_active(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ bool *is_ramp_active)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *is_ramp_active =
+ core_freesync->map[index].state.static_ramp.ramp_is_active;
+
+ return true;
+}
+
+bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *streams,
+ unsigned int min_refresh,
+ unsigned int max_refresh,
+ struct mod_freesync_caps *caps)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync;
+ struct freesync_state *state;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, streams);
+ state = &core_freesync->map[index].state;
+
+ if (max_refresh == 0)
+ max_refresh = state->nominal_refresh_rate_in_micro_hz;
+
+ if (min_refresh == 0) {
+ /* Restore defaults */
+ calc_freesync_range(core_freesync, streams, state,
+ core_freesync->map[index].caps->
+ min_refresh_in_micro_hz,
+ state->nominal_refresh_rate_in_micro_hz);
+ } else {
+ calc_freesync_range(core_freesync, streams,
+ state,
+ min_refresh,
+ max_refresh);
+
+ /* Program vtotal min/max */
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, &streams, 1,
+ state->freesync_range.vmin,
+ state->freesync_range.vmax);
+ }
+
+ if (min_refresh != 0 &&
+ dc_is_embedded_signal(streams->sink->sink_signal) &&
+ (max_refresh - min_refresh >= 10000000)) {
+ caps->supported = true;
+ caps->min_refresh_in_micro_hz = min_refresh;
+ caps->max_refresh_in_micro_hz = max_refresh;
+ }
+
+ /* Update the stream */
+ update_stream(core_freesync, streams);
+
+ return true;
+}
+
+bool mod_freesync_get_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *min_refresh,
+ unsigned int *max_refresh)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *min_refresh =
+ core_freesync->map[index].state.freesync_range.min_refresh;
+ *max_refresh =
+ core_freesync->map[index].state.freesync_range.max_refresh;
+
+ return true;
+}
+
+bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *vmin,
+ unsigned int *vmax)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *vmin =
+ core_freesync->map[index].state.freesync_range.vmin;
+ *vmax =
+ core_freesync->map[index].state.freesync_range.vmax;
+
+ return true;
+}
+
+bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *nom_v_pos,
+ unsigned int *v_pos)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+ struct crtc_position position;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ if (core_freesync->dc->stream_funcs.get_crtc_position(
+ core_freesync->dc, &stream, 1,
+ &position.vertical_count, &position.nominal_vcount)) {
+
+ *nom_v_pos = position.nominal_vcount;
+ *v_pos = position.vertical_count;
+
+ return true;
+ }
+
+ return false;
+}
+
+void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams)
+{
+ unsigned int stream_index, map_index;
+ struct freesync_state *state;
+ struct core_freesync *core_freesync = NULL;
+ struct dc_static_screen_events triggers = {0};
+ unsigned long long temp = 0;
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (stream_index = 0; stream_index < num_streams; stream_index++) {
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ state = &core_freesync->map[map_index].state;
+
+ /* Update the field rate for new timing */
+ temp = streams[stream_index]->timing.pix_clk_khz;
+ temp *= 1000ULL * 1000ULL * 1000ULL;
+ temp = div_u64(temp,
+ streams[stream_index]->timing.h_total);
+ temp = div_u64(temp,
+ streams[stream_index]->timing.v_total);
+ state->nominal_refresh_rate_in_micro_hz =
+ (unsigned int) temp;
+
+ if (core_freesync->map[map_index].caps->supported) {
+
+ /* Update the stream */
+ update_stream(core_freesync, streams[stream_index]);
+
+ /* Calculate vmin/vmax and refresh rate for
+ * current mode
+ */
+ calc_freesync_range(core_freesync, *streams, state,
+ core_freesync->map[map_index].caps->
+ min_refresh_in_micro_hz,
+ state->nominal_refresh_rate_in_micro_hz);
+
+ /* Update mask */
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ core_freesync->dc->stream_funcs.set_static_screen_events(
+ core_freesync->dc, streams, num_streams,
+ &triggers);
+ }
+ }
+
+ /* Program freesync according to current state*/
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+}
+
+/* Add the timestamps to the cache and determine whether BTR programming
+ * is required, depending on the times calculated
+ */
+static void update_timestamps(struct core_freesync *core_freesync,
+ const struct dc_stream_state *stream, unsigned int map_index,
+ unsigned int last_render_time_in_us)
+{
+ struct freesync_state *state = &core_freesync->map[map_index].state;
+
+ state->time.render_times[state->time.render_times_index] =
+ last_render_time_in_us;
+ state->time.render_times_index++;
+
+ if (state->time.render_times_index >= RENDER_TIMES_MAX_COUNT)
+ state->time.render_times_index = 0;
+
+ if (last_render_time_in_us + BTR_EXIT_MARGIN <
+ state->time.max_render_time_in_us) {
+
+ /* Exit Below the Range */
+ if (state->btr.btr_active) {
+
+ state->btr.program_btr = true;
+ state->btr.btr_active = false;
+ state->btr.frame_counter = 0;
+
+ /* Exit Fixed Refresh mode */
+ } else if (state->fixed_refresh.fixed_active) {
+
+ state->fixed_refresh.frame_counter++;
+
+ if (state->fixed_refresh.frame_counter >
+ FIXED_REFRESH_EXIT_FRAME_COUNT) {
+ state->fixed_refresh.frame_counter = 0;
+ state->fixed_refresh.program_fixed = true;
+ state->fixed_refresh.fixed_active = false;
+ }
+ }
+
+ } else if (last_render_time_in_us > state->time.max_render_time_in_us) {
+
+ /* Enter Below the Range */
+ if (!state->btr.btr_active &&
+ core_freesync->map[map_index].caps->btr_supported) {
+
+ state->btr.program_btr = true;
+ state->btr.btr_active = true;
+
+ /* Enter Fixed Refresh mode */
+ } else if (!state->fixed_refresh.fixed_active &&
+ !core_freesync->map[map_index].caps->btr_supported) {
+
+ state->fixed_refresh.frame_counter++;
+
+ if (state->fixed_refresh.frame_counter >
+ FIXED_REFRESH_ENTER_FRAME_COUNT) {
+ state->fixed_refresh.frame_counter = 0;
+ state->fixed_refresh.program_fixed = true;
+ state->fixed_refresh.fixed_active = true;
+ }
+ }
+ }
+
+ /* When Below the Range is active, must react on every frame */
+ if (state->btr.btr_active)
+ state->btr.program_btr = true;
+}
+
+static void apply_below_the_range(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream, unsigned int map_index,
+ unsigned int last_render_time_in_us)
+{
+ unsigned int inserted_frame_duration_in_us = 0;
+ unsigned int mid_point_frames_ceil = 0;
+ unsigned int mid_point_frames_floor = 0;
+ unsigned int frame_time_in_us = 0;
+ unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
+ unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
+ unsigned int frames_to_insert = 0;
+ unsigned int min_frame_duration_in_ns = 0;
+ struct freesync_state *state = &core_freesync->map[map_index].state;
+
+ if (!state->btr.program_btr)
+ return;
+
+ state->btr.program_btr = false;
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ state->nominal_refresh_rate_in_micro_hz)));
+
+ /* Program BTR */
+
+ /* BTR set to "not active" so disengage */
+ if (!state->btr.btr_active)
+
+ /* Restore FreeSync */
+ set_freesync_on_streams(core_freesync, &stream, 1);
+
+ /* BTR set to "active" so engage */
+ else {
+
+ /* Calculate number of midPoint frames that could fit within
+ * the render time interval- take ceil of this value
+ */
+ mid_point_frames_ceil = (last_render_time_in_us +
+ state->btr.mid_point_in_us- 1) /
+ state->btr.mid_point_in_us;
+
+ if (mid_point_frames_ceil > 0) {
+
+ frame_time_in_us = last_render_time_in_us /
+ mid_point_frames_ceil;
+ delta_from_mid_point_in_us_1 =
+ (state->btr.mid_point_in_us >
+ frame_time_in_us) ?
+ (state->btr.mid_point_in_us - frame_time_in_us):
+ (frame_time_in_us - state->btr.mid_point_in_us);
+ }
+
+ /* Calculate number of midPoint frames that could fit within
+ * the render time interval- take floor of this value
+ */
+ mid_point_frames_floor = last_render_time_in_us /
+ state->btr.mid_point_in_us;
+
+ if (mid_point_frames_floor > 0) {
+
+ frame_time_in_us = last_render_time_in_us /
+ mid_point_frames_floor;
+ delta_from_mid_point_in_us_2 =
+ (state->btr.mid_point_in_us >
+ frame_time_in_us) ?
+ (state->btr.mid_point_in_us - frame_time_in_us):
+ (frame_time_in_us - state->btr.mid_point_in_us);
+ }
+
+ /* Choose number of frames to insert based on how close it
+ * can get to the mid point of the variable range.
+ */
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2)
+ frames_to_insert = mid_point_frames_ceil;
+ else
+ frames_to_insert = mid_point_frames_floor;
+
+ /* Either we've calculated the number of frames to insert,
+ * or we need to insert min duration frames
+ */
+ if (frames_to_insert > 0)
+ inserted_frame_duration_in_us = last_render_time_in_us /
+ frames_to_insert;
+
+ if (inserted_frame_duration_in_us <
+ state->time.min_render_time_in_us)
+
+ inserted_frame_duration_in_us =
+ state->time.min_render_time_in_us;
+
+ /* Cache the calculated variables */
+ state->btr.inserted_frame_duration_in_us =
+ inserted_frame_duration_in_us;
+ state->btr.frames_to_insert = frames_to_insert;
+ state->btr.frame_counter = frames_to_insert;
+
+ }
+}
+
+static void apply_fixed_refresh(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream, unsigned int map_index)
+{
+ unsigned int vmin = 0, vmax = 0;
+ struct freesync_state *state = &core_freesync->map[map_index].state;
+
+ if (!state->fixed_refresh.program_fixed)
+ return;
+
+ state->fixed_refresh.program_fixed = false;
+
+ /* Program Fixed Refresh */
+
+ /* Fixed Refresh set to "not active" so disengage */
+ if (!state->fixed_refresh.fixed_active) {
+ set_freesync_on_streams(core_freesync, &stream, 1);
+
+ /* Fixed Refresh set to "active" so engage (fix to max) */
+ } else {
+
+ vmin = state->freesync_range.vmin;
+
+ vmax = vmin;
+
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, &stream,
+ 1, vmin,
+ vmax);
+ }
+}
+
+void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int curr_time_stamp_in_us)
+{
+ unsigned int stream_index, map_index, last_render_time_in_us = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (stream_index = 0; stream_index < num_streams; stream_index++) {
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ if (core_freesync->map[map_index].caps->supported) {
+
+ last_render_time_in_us = curr_time_stamp_in_us -
+ core_freesync->map[map_index].state.time.
+ prev_time_stamp_in_us;
+
+ /* Add the timestamps to the cache and determine
+ * whether BTR program is required
+ */
+ update_timestamps(core_freesync, streams[stream_index],
+ map_index, last_render_time_in_us);
+
+ if (core_freesync->map[map_index].state.fullscreen &&
+ core_freesync->map[map_index].user_enable.
+ enable_for_gaming) {
+
+ if (core_freesync->map[map_index].caps->btr_supported) {
+
+ apply_below_the_range(core_freesync,
+ streams[stream_index], map_index,
+ last_render_time_in_us);
+ } else {
+ apply_fixed_refresh(core_freesync,
+ streams[stream_index], map_index);
+ }
+ }
+
+ core_freesync->map[map_index].state.time.
+ prev_time_stamp_in_us = curr_time_stamp_in_us;
+ }
+
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
new file mode 100644
index 000000000000..84b53425f2c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+
+
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_FREESYNC_H_
+#define MOD_FREESYNC_H_
+
+#include "dm_services.h"
+
+struct mod_freesync *mod_freesync_create(struct dc *dc);
+void mod_freesync_destroy(struct mod_freesync *mod_freesync);
+
+struct mod_freesync {
+ int dummy;
+};
+
+enum mod_freesync_state {
+ FREESYNC_STATE_NONE,
+ FREESYNC_STATE_FULLSCREEN,
+ FREESYNC_STATE_STATIC_SCREEN,
+ FREESYNC_STATE_VIDEO
+};
+
+enum mod_freesync_user_enable_mask {
+ FREESYNC_USER_ENABLE_STATIC = 0x1,
+ FREESYNC_USER_ENABLE_VIDEO = 0x2,
+ FREESYNC_USER_ENABLE_GAMING = 0x4
+};
+
+struct mod_freesync_user_enable {
+ bool enable_for_static;
+ bool enable_for_video;
+ bool enable_for_gaming;
+};
+
+struct mod_freesync_caps {
+ bool supported;
+ unsigned int min_refresh_in_micro_hz;
+ unsigned int max_refresh_in_micro_hz;
+
+ bool btr_supported;
+};
+
+struct mod_freesync_params {
+ enum mod_freesync_state state;
+ bool enable;
+ unsigned int update_duration_in_ns;
+ bool windowed_fullscreen;
+};
+
+/*
+ * Add stream to be tracked by module
+ */
+bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream, struct mod_freesync_caps *caps);
+
+/*
+ * Remove stream to be tracked by module
+ */
+bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream);
+
+/*
+ * Update the freesync state flags for each display and program
+ * freesync accordingly
+ */
+void mod_freesync_update_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_params *freesync_params);
+
+bool mod_freesync_get_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_params *freesync_params);
+
+bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_user_enable *user_enable);
+
+bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_user_enable *user_enable);
+
+bool mod_freesync_get_static_ramp_active(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ bool *is_ramp_active);
+
+bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *streams,
+ unsigned int min_refresh,
+ unsigned int max_refresh,
+ struct mod_freesync_caps *caps);
+
+bool mod_freesync_get_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *min_refresh,
+ unsigned int *max_refresh);
+
+bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *vmin,
+ unsigned int *vmax);
+
+bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *nom_v_pos,
+ unsigned int *v_pos);
+
+void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams);
+
+void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams);
+
+void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int curr_time_stamp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
index b39fb6821faa..4ccf9681c45d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
@@ -2283,6 +2283,10 @@
#define mmDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2
#define mmDCHUBBUB_SPARE 0x0534
#define mmDCHUBBUB_SPARE_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_INDEX 0x053a
+#define mmDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_DATA 0x053b
+#define mmDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dchubbub_dchubbub_dcperfmon_dc_perfmon_dispdec
@@ -10361,6 +10365,8 @@
#define mmUNIPHYG_CHANNEL_XBAR_CNTL_BASE_IDX 2
#define mmDCIO_WRCMD_DELAY 0x287e
#define mmDCIO_WRCMD_DELAY_BASE_IDX 2
+#define mmDC_PINSTRAPS 0x2880
+#define mmDC_PINSTRAPS_BASE_IDX 2
#define mmDC_DVODATA_CONFIG 0x2882
#define mmDC_DVODATA_CONFIG_BASE_IDX 2
#define mmLVTMA_PWRSEQ_CNTL 0x2883
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
index 1e98ce86ed19..b28d4b64c05d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
@@ -9361,12 +9361,14 @@
#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
//HUBPREQ0_DCSURF_SURFACE_CONTROL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
@@ -39956,6 +39958,9 @@
#define DCIO_WRCMD_DELAY__DPHY_DELAY_MASK 0x00000F00L
#define DCIO_WRCMD_DELAY__DCRXPHY_DELAY_MASK 0x0000F000L
#define DCIO_WRCMD_DELAY__ZCAL_DELAY_MASK 0x000F0000L
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
//DC_DVODATA_CONFIG
#define DC_DVODATA_CONFIG__VIP_MUX_EN__SHIFT 0x13
#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN__SHIFT 0x14
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
index 75b660d57bdf..f730d0629020 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
@@ -1841,6 +1841,10 @@
#define mmUNIPHYG_CHANNEL_XBAR_CNTL_BASE_IDX 2
#define mmDCIO_WRCMD_DELAY 0x2094
#define mmDCIO_WRCMD_DELAY_BASE_IDX 2
+#define mmDC_PINSTRAPS 0x2096
+#define mmDC_PINSTRAPS_BASE_IDX 2
+#define mmCC_DC_MISC_STRAPS 0x2097
+#define mmCC_DC_MISC_STRAPS_BASE_IDX 2
#define mmDC_DVODATA_CONFIG 0x2098
#define mmDC_DVODATA_CONFIG_BASE_IDX 2
#define mmLVTMA_PWRSEQ_CNTL 0x2099
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
index d8ad862b3a74..6d3162c42957 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
@@ -2447,6 +2447,14 @@
//DCCG_CBUS_WRCMD_DELAY
#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY__SHIFT 0x0
#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY_MASK 0x0000000FL
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
+//CC_DC_MISC_STRAPS
+#define CC_DC_MISC_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_MISC_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#define CC_DC_MISC_STRAPS__HDMI_DISABLE_MASK 0x00000040L
+#define CC_DC_MISC_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x00000700L
//DCCG_DS_DTO_INCR
#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 68b417ac94dd..8c55c6e254d9 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += \
-I$(FULL_AMD_PATH)/powerplay/inc/ \
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index dc4bbcfe1243..824fb6fe54ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the 'hw manager' sub-component of powerplay.
# It provides the hardware management services for the driver.
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
index 8ba75d43fba6..67fae834bc67 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "pp_overdriver.h"
#include <linux/errno.h>
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72.h b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
index b73d6b59ac32..08cd70c75d8b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SMU72_H
#define SMU72_H
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
index 98f76e925e65..b2edbc0c3c4d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SMU72_DISCRETE_H
#define SMU72_DISCRETE_H
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index b24b0f203a51..30d3089d7dba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 8bd38102b58e..283a0dc25e84 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _GPU_SCHED_TRACE_H_
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index e4d3b4ec4e92..92ec663fdada 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -188,7 +188,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
if (kfifo_is_empty(&entity->job_queue))
return false;
- if (ACCESS_ONCE(entity->dependency))
+ if (READ_ONCE(entity->dependency))
return false;
return true;
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 72b22b805412..5a5427bbd70e 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -317,9 +317,8 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
formats, ARRAY_SIZE(formats),
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
+ if (ret)
return ERR_PTR(ret);
- }
drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
hdlcd->plane = plane;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 764d0c83710c..0afb53b1f4e9 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/console.h>
#include <linux/list.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
@@ -354,7 +355,7 @@ err_unload:
err_free:
drm_mode_config_cleanup(drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -379,7 +380,7 @@ static void hdlcd_drm_unbind(struct device *dev)
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
drm_mode_config_cleanup(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
}
@@ -432,9 +433,11 @@ static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
return 0;
drm_kms_helper_poll_disable(drm);
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
hdlcd->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(hdlcd->state)) {
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
drm_kms_helper_poll_enable(drm);
return PTR_ERR(hdlcd->state);
}
@@ -451,8 +454,8 @@ static int __maybe_unused hdlcd_pm_resume(struct device *dev)
return 0;
drm_atomic_helper_resume(drm, hdlcd->state);
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
drm_kms_helper_poll_enable(drm);
- pm_runtime_set_active(dev);
return 0;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index e3950a071152..56f34dfff640 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ARM HDLCD Controller register definition
*/
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index a18f156c8b66..ecf25cf9f9f5 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
armada_gem.o armada_overlay.o armada_trace.o
armada-y += armada_510.o
diff --git a/drivers/gpu/drm/armada/armada_trace.c b/drivers/gpu/drm/armada/armada_trace.c
index 068b336ba75f..c64cce325cdf 100644
--- a/drivers/gpu/drm/armada/armada_trace.c
+++ b/drivers/gpu/drm/armada/armada_trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "armada_trace.h"
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
index be245a24610f..8dbfea7a00fe 100644
--- a/drivers/gpu/drm/armada/armada_trace.h
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(ARMADA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define ARMADA_TRACE_H
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 749646ae365f..4c7375b45281 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/firmware.h>
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
index 1d9c4e75d303..1e9ac9d6d26c 100644
--- a/drivers/gpu/drm/ast/ast_dram_tables.h
+++ b/drivers/gpu/drm/ast/ast_dram_tables.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef AST_DRAM_TABLES_H
#define AST_DRAM_TABLES_H
diff --git a/drivers/gpu/drm/atmel-hlcdc/Makefile b/drivers/gpu/drm/atmel-hlcdc/Makefile
index bb5f8507a8ce..49dc89f36b73 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Makefile
+++ b/drivers/gpu/drm/atmel-hlcdc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
atmel-hlcdc-dc-y := atmel_hlcdc_crtc.o \
atmel_hlcdc_dc.o \
atmel_hlcdc_output.o \
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 76c490c3cdbc..375bf92cd04f 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/io.h>
#include <linux/console.h>
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index e3d5eb031f18..373eb28f31ed 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index b4efcbabf7f7..d034b2cb5eee 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -372,9 +372,18 @@ struct adv7511 {
};
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
- unsigned int offset);
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+#else
+static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+ return 0;
+}
#endif
#ifdef CONFIG_DRM_I2C_ADV7533
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index b33d730e4d73..a20a45c0b353 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -300,18 +300,21 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
return 0;
}
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
- unsigned int offset)
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)
- return ret;
+ goto err_cec_parse_dt;
adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
- if (IS_ERR(adv7511->cec_adap))
- return PTR_ERR(adv7511->cec_adap);
+ if (IS_ERR(adv7511->cec_adap)) {
+ ret = PTR_ERR(adv7511->cec_adap);
+ goto err_cec_alloc;
+ }
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
/* cec soft reset */
@@ -329,9 +332,18 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
((adv7511->cec_clk_freq / 750000) - 1) << 2);
ret = cec_register_adapter(adv7511->cec_adap, dev);
- if (ret) {
- cec_delete_adapter(adv7511->cec_adap);
- adv7511->cec_adap = NULL;
- }
- return ret;
+ if (ret)
+ goto err_cec_register;
+ return 0;
+
+err_cec_register:
+ cec_delete_adapter(adv7511->cec_adap);
+ adv7511->cec_adap = NULL;
+err_cec_alloc:
+ dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
+ ret);
+err_cec_parse_dt:
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+ return ret == -EPROBE_DEFER ? ret : 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 0e14f1572d05..efa29db5fc2b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1084,7 +1084,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
struct device *dev = &i2c->dev;
unsigned int main_i2c_addr = i2c->addr << 1;
unsigned int edid_i2c_addr = main_i2c_addr + 4;
- unsigned int offset;
unsigned int val;
int ret;
@@ -1192,24 +1191,16 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (adv7511->type == ADV7511)
adv7511_set_link_config(adv7511, &link_config);
+ ret = adv7511_cec_init(dev, adv7511);
+ if (ret)
+ goto err_unregister_cec;
+
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
drm_bridge_add(&adv7511->bridge);
adv7511_audio_init(dev, adv7511);
-
- offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
-
-#ifdef CONFIG_DRM_I2C_ADV7511_CEC
- ret = adv7511_cec_init(dev, adv7511, offset);
- if (ret)
- goto err_unregister_cec;
-#else
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
- ADV7511_CEC_CTRL_POWER_DOWN);
-#endif
-
return 0;
err_unregister_cec:
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 0903ba574f61..75b0d3f6e4de 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -13,13 +13,37 @@
#include <linux/of_graph.h>
+struct lvds_encoder {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+};
+
+static int lvds_encoder_attach(struct drm_bridge *bridge)
+{
+ struct lvds_encoder *lvds_encoder = container_of(bridge,
+ struct lvds_encoder,
+ bridge);
+
+ return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
+ bridge);
+}
+
+static struct drm_bridge_funcs funcs = {
+ .attach = lvds_encoder_attach,
+};
+
static int lvds_encoder_probe(struct platform_device *pdev)
{
struct device_node *port;
struct device_node *endpoint;
struct device_node *panel_node;
struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct lvds_encoder *lvds_encoder;
+
+ lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
+ GFP_KERNEL);
+ if (!lvds_encoder)
+ return -ENOMEM;
/* Locate the panel DT node. */
port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
@@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
- if (IS_ERR(bridge))
- return PTR_ERR(bridge);
+ lvds_encoder->panel_bridge =
+ devm_drm_panel_bridge_add(&pdev->dev,
+ panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds_encoder->panel_bridge))
+ return PTR_ERR(lvds_encoder->panel_bridge);
+
+ /* The panel_bridge bridge is attached to the panel's of_node,
+ * but we need a bridge attached to our of_node for our user
+ * to look up.
+ */
+ lvds_encoder->bridge.of_node = pdev->dev.of_node;
+ lvds_encoder->bridge.funcs = &funcs;
+ drm_bridge_add(&lvds_encoder->bridge);
- platform_set_drvdata(pdev, bridge);
+ platform_set_drvdata(pdev, lvds_encoder);
return 0;
}
static int lvds_encoder_remove(struct platform_device *pdev)
{
- struct drm_bridge *bridge = platform_get_drvdata(pdev);
+ struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
- drm_bridge_remove(bridge);
+ drm_bridge_remove(&lvds_encoder->bridge);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
index fd1f745c6073..63b5756f463b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DW_HDMI_AUDIO_H
#define DW_HDMI_AUDIO_H
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index bf14214fa464..b72259bf6e2f 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -138,6 +138,7 @@ struct dw_hdmi {
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ struct clk *cec_clk;
struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
@@ -2382,6 +2383,26 @@ __dw_hdmi_probe(struct platform_device *pdev,
goto err_isfr;
}
+ hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
+ if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
+ hdmi->cec_clk = NULL;
+ } else if (IS_ERR(hdmi->cec_clk)) {
+ ret = PTR_ERR(hdmi->cec_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
+ ret);
+
+ hdmi->cec_clk = NULL;
+ goto err_iahb;
+ } else {
+ ret = clk_prepare_enable(hdmi->cec_clk);
+ if (ret) {
+ dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
+ ret);
+ goto err_iahb;
+ }
+ }
+
/* Product and revision IDs */
hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8)
| (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0);
@@ -2518,6 +2539,8 @@ err_iahb:
cec_notifier_put(hdmi->cec_notifier);
clk_disable_unprepare(hdmi->iahb_clk);
+ if (hdmi->cec_clk)
+ clk_disable_unprepare(hdmi->cec_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
err_res:
@@ -2541,6 +2564,8 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
+ if (hdmi->cec_clk)
+ clk_disable_unprepare(hdmi->cec_clk);
if (hdmi->i2c)
i2c_del_adapter(&hdmi->i2c->adap);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8571cfd877c5..8636e7eeb731 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -97,7 +97,7 @@
#define DP0_ACTIVEVAL 0x0650
#define DP0_SYNCVAL 0x0654
#define DP0_MISC 0x0658
-#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
+#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
#define BPC_6 (0 << 5)
#define BPC_8 (1 << 5)
@@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
tmp = (tmp << 8) | buf[i];
i++;
if (((i % 4) == 0) || (i == size)) {
- tc_write(DP0_AUXWDATA(i >> 2), tmp);
+ tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
tmp = 0;
}
}
@@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc)
ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
if (ret < 0)
goto err_dpcd_read;
- if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
- goto err_dpcd_inval;
+ if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+ dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
+ tc->link.base.rate = 270000;
+ }
+
+ if (tc->link.base.num_lanes > 2) {
+ dev_dbg(tc->dev, "Falling to 2 lanes\n");
+ tc->link.base.num_lanes = 2;
+ }
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
if (ret < 0)
@@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc)
err_dpcd_read:
dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
return ret;
-err_dpcd_inval:
- dev_err(tc->dev, "invalid DPCD\n");
- return -EINVAL;
}
static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
@@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
int lower_margin = mode->vsync_start - mode->vdisplay;
int vsync_len = mode->vsync_end - mode->vsync_start;
+ /*
+ * Recommended maximum number of symbols transferred in a transfer unit:
+ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+ * (output active video bandwidth in bytes))
+ * Must be less than tu_size.
+ */
+ max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
dev_dbg(tc->dev, "set mode %dx%d\n",
mode->hdisplay, mode->vdisplay);
dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
@@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
- /* LCD Ctl Frame Size */
- tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+ /*
+ * LCD Ctl Frame Size
+ * datasheet is not clear of vsdelay in case of DPI
+ * assume we do not need any delay when DPI is a source of
+ * sync signals
+ */
+ tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
- tc_write(HTIM01, (left_margin << 16) | /* H back porch */
- (hsync_len << 0)); /* Hsync */
- tc_write(HTIM02, (right_margin << 16) | /* H front porch */
- (mode->hdisplay << 0)); /* width */
+ tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
+ (ALIGN(hsync_len, 2) << 0)); /* Hsync */
+ tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
+ (ALIGN(mode->hdisplay, 2) << 0)); /* width */
tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
(vsync_len << 0)); /* Vsync */
tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
@@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
/* DP Main Stream Attributes */
vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
tc_write(DP0_VIDSYNCDELAY,
- (0x003e << 16) | /* thresh_dly */
+ (max_tu_symbol << 16) | /* thresh_dly */
(vid_sync_dly << 0));
tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
@@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
- /*
- * Recommended maximum number of symbols transferred in a transfer unit:
- * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
- * (output active video bandwidth in bytes))
- * Must be less than tu_size.
- */
- max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
- tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+ tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+ BPC_8);
return 0;
err:
@@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc)
unsigned int rate;
u32 dp_phy_ctrl;
int timeout;
- bool aligned;
- bool ready;
u32 value;
int ret;
u8 tmp[8];
@@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc)
ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
if (ret < 0)
goto err_dpcd_read;
- ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
- DP_CHANNEL_EQ_BITS)); /* Lane0 */
- aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
- } while ((--timeout) && !(ready && aligned));
+ } while ((--timeout) &&
+ !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes)));
if (timeout == 0) {
/* Read DPCD 0x200-0x201 */
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
if (ret < 0)
goto err_dpcd_read;
+ dev_err(dev, "channel(s) EQ not ok\n");
dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
tmp[1]);
@@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc)
dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
tmp[6]);
- if (!ready)
- dev_err(dev, "Lane0/1 not ready\n");
- if (!aligned)
- dev_err(dev, "Lane0/1 not aligned\n");
return -EAGAIN;
}
@@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
static int tc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- /* Accept any mode */
+ /* DPI interface clock limitation: upto 154 MHz */
+ if (mode->clock > 154000)
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 71d712f1b56a..b16f1d69a0bb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1225,7 +1225,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
return;
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!new_crtc_state->active || !new_crtc_state->planes_changed)
+ if (!new_crtc_state->active)
continue;
ret = drm_crtc_vblank_get(crtc);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index d34e5096887a..053044201e31 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -263,12 +263,6 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux)
return aux_dev;
}
-static int auxdev_wait_atomic_t(atomic_t *p)
-{
- schedule();
- return 0;
-}
-
void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
@@ -283,7 +277,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
mutex_unlock(&aux_idr_mutex);
atomic_dec(&aux_dev->usecount);
- wait_on_atomic_t(&aux_dev->usecount, auxdev_wait_atomic_t,
+ wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
minor = aux_dev->index;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 07374008f146..e56166334455 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1809,6 +1809,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
DRM_INFO("Cannot find any crtc or sizes\n");
+
+ /* First time: disable all crtc's.. */
+ if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
+ restore_fbdev_mode(fb_helper);
return -EAGAIN;
}
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index 16c64d067e67..baccc63db106 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_DRM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _DRM_TRACE_H_
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 09c1c4ff93ca..3717b3df34a4 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -367,9 +367,9 @@ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
-static void vblank_disable_fn(unsigned long arg)
+static void vblank_disable_fn(struct timer_list *t)
{
- struct drm_vblank_crtc *vblank = (void *)arg;
+ struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
@@ -436,8 +436,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
vblank->dev = dev;
vblank->pipe = i;
init_waitqueue_head(&vblank->queue);
- setup_timer(&vblank->disable_timer, vblank_disable_fn,
- (unsigned long)vblank);
+ timer_setup(&vblank->disable_timer, vblank_disable_fn, 0);
seqlock_init(&vblank->seqlock);
}
@@ -1019,7 +1018,7 @@ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
if (drm_vblank_offdelay == 0)
return;
else if (drm_vblank_offdelay < 0)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
else if (!dev->vblank_disable_immediate)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
@@ -1650,7 +1649,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev->event_lock, irqflags);
if (disable_irq)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
return true;
}
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index 15c3bfa89a79..1281c8d4fae5 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
etnaviv-y := \
etnaviv_buffer.o \
etnaviv_cmd_parser.o \
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5884ab623e0a..daee3f1196df 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -760,7 +760,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
up_read(&mm->mmap_sem);
if (ret < 0) {
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
return ERR_PTR(ret);
}
@@ -833,7 +833,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
}
}
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -867,7 +867,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
if (etnaviv_obj->pages) {
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- release_pages(etnaviv_obj->pages, npages, 0);
+ release_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
}
put_task_struct(etnaviv_obj->userptr.task);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 8197e1d6ed11..e19cbe05da2a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -968,9 +968,9 @@ static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
}
-static void hangcheck_handler(unsigned long data)
+static void hangcheck_handler(struct timer_list *t)
{
- struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
+ struct etnaviv_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
u32 fence = gpu->completed_fence;
bool progress = false;
@@ -1765,8 +1765,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
INIT_WORK(&gpu->recover_work, recover_worker);
init_waitqueue_head(&gpu->fence_event);
- setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler,
- (unsigned long)gpu);
+ timer_setup(&gpu->hangcheck_timer, hangcheck_handler, TIMER_DEFERRABLE);
priv->gpu[priv->num_gpus++] = gpu;
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h
index 368218304566..c27c1484cfa9 100644
--- a/drivers/gpu/drm/etnaviv/state.xml.h
+++ b/drivers/gpu/drm/etnaviv/state.xml.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_XML
#define STATE_XML
diff --git a/drivers/gpu/drm/etnaviv/state_3d.xml.h b/drivers/gpu/drm/etnaviv/state_3d.xml.h
index d7146fd13943..73a97d35c51b 100644
--- a/drivers/gpu/drm/etnaviv/state_3d.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_3d.xml.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_3D_XML
#define STATE_3D_XML
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 43c73e2ed34f..60808daf7e8d 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_HI_XML
#define STATE_HI_XML
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index f663490e949d..bdf4212dde7b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 53e03f8af3d5..e6b0940b1ac2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -161,9 +161,9 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.atomic_flush = exynos_crtc_handle_event,
};
-static void vidi_fake_vblank_timer(unsigned long arg)
+static void vidi_fake_vblank_timer(struct timer_list *t)
{
- struct vidi_context *ctx = (void *)arg;
+ struct vidi_context *ctx = from_timer(ctx, t, timer);
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
@@ -449,7 +449,7 @@ static int vidi_probe(struct platform_device *pdev)
ctx->pdev = pdev;
- setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx);
+ timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
mutex_init(&ctx->lock);
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
index aca34f656bea..b55c4482d0f9 100644
--- a/drivers/gpu/drm/fsl-dcu/Makefile
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
fsl_dcu_drm_kms.o \
fsl_dcu_drm_rgb.o \
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index c1c8dc18aa53..c8f2c89be99d 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# KMS driver for the GMA500
#
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index 1d2ebb5e530f..be6dda58fcae 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -23,9 +23,9 @@
#include "psb_intel_reg.h"
#include <linux/spinlock.h>
-static void psb_lid_timer_func(unsigned long data)
+static void psb_lid_timer_func(struct timer_list *t)
{
- struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+ struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
struct drm_device *dev = (struct drm_device *)dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
@@ -77,10 +77,8 @@ void psb_lid_timer_init(struct drm_psb_private *dev_priv)
spin_lock_init(&dev_priv->lid_lock);
spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
- init_timer(lid_timer);
+ timer_setup(lid_timer, psb_lid_timer_func, 0);
- lid_timer->data = (unsigned long)dev_priv;
- lid_timer->function = psb_lid_timer_func;
lid_timer->expires = jiffies + PSB_LID_DELAY;
add_timer(lid_timer);
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index a77acfc1852e..b20100c18ffb 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 4d1f45acf2cd..127815253a84 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -601,9 +601,9 @@ tda998x_reset(struct tda998x_priv *priv)
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
-static void tda998x_edid_delay_done(unsigned long data)
+static void tda998x_edid_delay_done(struct timer_list *t)
{
- struct tda998x_priv *priv = (struct tda998x_priv *)data;
+ struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
priv->edid_delay_active = false;
wake_up(&priv->edid_delay_waitq);
@@ -1492,8 +1492,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
mutex_init(&priv->mutex); /* protect the page access */
init_waitqueue_head(&priv->edid_delay_waitq);
- setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
- (unsigned long)priv);
+ timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
/* wake up the device: */
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6c3b0481ef82..2acf3b3c5f9d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index f5486cb94818..2641ba510a61 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 3c318439a659..355120865efd 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -282,6 +282,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -307,6 +308,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->type = type;
emulate_monitor_status_change(vgpu);
+ vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 4427be18e4a9..940cdaaa3f24 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
+ ret = intel_gvt_generate_request(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to generate request\n");
+ goto err_unpin_mm;
+ }
+
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2801d70579d8..8e331142badb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
#define GTT_HAW 46
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a5bed2e71b92..44cd5ff5e97d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1381,40 +1381,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
}
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 v = *(u32 *)p_data;
-
- if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
- return intel_vgpu_default_mmio_write(vgpu,
- offset, p_data, bytes);
-
- switch (offset) {
- case 0x4ddc:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
- break;
- case 0x42080:
- /* bypass WaCompressedResourceDisplayNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
- break;
- case 0xe194:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
- break;
- case 0x7014:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -1671,8 +1637,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2530,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2580,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
- MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
MMIO_D(0x45504, D_SKL_PLUS);
MMIO_D(0x45520, D_SKL_PLUS);
MMIO_D(0x46000, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f6ded475bb2c..3ac1dc97a7a0 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -140,9 +140,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id ring_id = req->engine->id;
struct intel_vgpu_workload *workload;
+ unsigned long flags;
if (!is_gvt_request(req)) {
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
@@ -150,7 +151,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
return NOTIFY_OK;
}
@@ -161,7 +162,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,7 +171,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
ring_id, workload->vgpu->id);
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
@@ -253,7 +254,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
@@ -299,6 +299,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = populate_shadow_context(workload);
if (ret)
goto err_unpin;
+ workload->shadowed = true;
+ return 0;
+
+err_unpin:
+ engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+ return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ struct drm_i915_gem_request *rq;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
@@ -313,14 +333,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
- workload->shadowed = true;
return 0;
err_unpin:
engine->context_unpin(engine, shadow_ctx);
-err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2d694f6c0907..b9f872204d7e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7d9b07df32fa..2cf10d17acfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1325,7 +1325,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
* becaue the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
- pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
ret = i915_driver_init_early(dev_priv, ent);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 94b23fcbc989..3a140eedfc83 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2267,8 +2267,10 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
struct radix_tree_iter iter;
void __rcu **slot;
+ rcu_read_lock();
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+ rcu_read_unlock();
}
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e304dcbc6042..f782cf2069c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -104,12 +104,14 @@ static void lut_close(struct i915_gem_context *ctx)
kmem_cache_free(ctx->i915->luts, lut);
}
+ rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
__i915_gem_object_release_unless_active(vma->obj);
}
+ rcu_read_unlock();
}
static void i915_gem_context_free(struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3d7190764f10..435ed95df144 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -343,6 +343,10 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
(vma->node.start + vma->node.size - 1) >> 32)
return true;
+ if (flags & __EXEC_OBJECT_NEEDS_MAP &&
+ !i915_vma_is_map_and_fenceable(vma))
+ return true;
+
return false;
}
@@ -2100,6 +2104,11 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err;
}
+ if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
+ err = -EINVAL;
+ goto err;
+ }
+
syncobj = drm_syncobj_find(file, fence.handle);
if (!syncobj) {
DRM_DEBUG("Invalid syncobj handle provided\n");
@@ -2107,6 +2116,9 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err;
}
+ BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
+ ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
+
fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5eaa6893daaa..2af65ecf2df8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -958,10 +958,14 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
}
}
-struct sgt_dma {
+static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
-};
+} sgt_dma(struct i915_vma *vma) {
+ struct scatterlist *sg = vma->pages->sgl;
+ dma_addr_t addr = sg_dma_address(sg);
+ return (struct sgt_dma) { sg, addr, addr + sg->length };
+}
struct gen8_insert_pte {
u16 pml4e;
@@ -1042,11 +1046,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = {
- .sg = vma->pages->sgl,
- .dma = sg_dma_address(iter.sg),
- .max = iter.dma + iter.sg->length,
- };
+ struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
@@ -1158,11 +1158,7 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = {
- .sg = vma->pages->sgl,
- .dma = sg_dma_address(iter.sg),
- .max = iter.dma + iter.sg->length,
- };
+ struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
@@ -1869,13 +1865,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter;
+ struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
- iter.sg = vma->pages->sgl;
- iter.dma = sg_dma_address(iter.sg);
- iter.max = iter.dma + iter.sg->length;
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -2107,7 +2100,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
INIT_LIST_HEAD(&vm->unbound_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages, false);
+ pagevec_init(&vm->free_pages);
}
static void i915_address_space_fini(struct i915_address_space *vm)
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index ccde12d9e5f5..382a77a1097e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -549,7 +549,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
mutex_unlock(&obj->mm.lock);
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
i915_gem_object_put(obj);
@@ -662,7 +662,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
__i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages))
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
return PTR_ERR_OR_ZERO(pages);
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c
index e2993857df37..888b7d3f04c3 100644
--- a/drivers/gpu/drm/i915/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/i915_gemfs.c
@@ -52,7 +52,8 @@ int i915_gemfs_init(struct drm_i915_private *i915)
if (has_transparent_hugepage()) {
struct super_block *sb = gemfs->mnt_sb;
- char options[] = "huge=within_size";
+ /* FIXME: Disabled until we get W/A for read BW issue. */
+ char options[] = "huge=never";
int flags = 0;
int err;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 9cab91ddeb79..4e76768ffa95 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index f1df2bd4ecf4..463a7177997c 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2009 Intel Corporation
*
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 42fb436f6cdc..d1abf4bb7c81 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel ACPI functions
*
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f4a9a182868f..878acc432a4b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15196,6 +15196,23 @@ void intel_connector_unregister(struct drm_connector *connector)
intel_panel_destroy_backlight(connector);
}
+static void intel_hpd_poll_fini(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ /* First disable polling... */
+ drm_kms_helper_poll_fini(dev);
+
+ /* Then kill the work that may have been queued by hpd. */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->modeset_retry_work.func)
+ cancel_work_sync(&connector->modeset_retry_work);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15216,7 +15233,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- drm_kms_helper_poll_fini(dev);
+ intel_hpd_poll_fini(dev);
/* poll work can call into fbdev, hence clean that up afterwards */
intel_fbdev_fini(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index aa75f55eeb61..158438bb0389 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3735,9 +3735,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
}
- /* Read the eDP Display control capabilities registers */
- if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ /*
+ * Read the eDP display control registers.
+ *
+ * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
+ * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
+ * set, but require eDP 1.4+ detection (e.g. for supported link rates
+ * method). The display control registers should read zero if they're
+ * not supported anyway.
+ */
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 47d022d48718..6c7f8bca574e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -499,7 +499,6 @@ struct intel_crtc_scaler_state {
struct intel_pipe_wm {
struct intel_wm_level wm[5];
- struct intel_wm_level raw_wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
@@ -1737,7 +1736,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
int intel_backlight_device_register(struct intel_connector *connector);
void intel_backlight_device_unregister(struct intel_connector *connector);
#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
+static inline int intel_backlight_device_register(struct intel_connector *connector)
{
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b8af35187d22..ea96682568e8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -697,10 +697,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
/* Due to peculiar init order wrt to hpd handling this is separate. */
if (drm_fb_helper_initial_config(&ifbdev->helper,
- ifbdev->preferred_bpp)) {
+ ifbdev->preferred_bpp))
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
- intel_fbdev_fini(to_i915(ifbdev->helper.dev));
- }
}
void intel_fbdev_initial_config_async(struct drm_device *dev)
@@ -800,7 +798,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
- if (ifbdev)
+ if (!ifbdev)
+ return;
+
+ intel_fbdev_sync(ifbdev);
+ if (ifbdev->vma)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index eb5827110d8f..49fdf09f9919 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -438,7 +438,9 @@ static bool
gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
{
return (i + 1 < num &&
- !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ msgs[i].addr == msgs[i + 1].addr &&
+ !(msgs[i].flags & I2C_M_RD) &&
+ (msgs[i].len == 1 || msgs[i].len == 2) &&
(msgs[i + 1].flags & I2C_M_RD));
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aa12a44e9a76..f4a4e9496893 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2721,9 +2721,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_crtc *intel_crtc,
int level,
struct intel_crtc_state *cstate,
- struct intel_plane_state *pristate,
- struct intel_plane_state *sprstate,
- struct intel_plane_state *curstate,
+ const struct intel_plane_state *pristate,
+ const struct intel_plane_state *sprstate,
+ const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -3043,28 +3043,24 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane;
- struct intel_plane_state *pristate = NULL;
- struct intel_plane_state *sprstate = NULL;
- struct intel_plane_state *curstate = NULL;
+ struct drm_plane *plane;
+ const struct drm_plane_state *plane_state;
+ const struct intel_plane_state *pristate = NULL;
+ const struct intel_plane_state *sprstate = NULL;
+ const struct intel_plane_state *curstate = NULL;
int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
pipe_wm = &cstate->wm.ilk.optimal;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct intel_plane_state *ps;
-
- ps = intel_atomic_get_existing_plane_state(state,
- intel_plane);
- if (!ps)
- continue;
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
+ const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
- if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
pristate = ps;
- else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
sprstate = ps;
- else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ else if (plane->type == DRM_PLANE_TYPE_CURSOR)
curstate = ps;
}
@@ -3086,11 +3082,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
if (pipe_wm->sprites_scaled)
usable_level = 0;
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
- pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
-
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- pipe_wm->wm[0] = pipe_wm->raw_wm[0];
+ ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+ pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
@@ -3100,8 +3094,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
- for (level = 1; level <= max_level; level++) {
- struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
+ for (level = 1; level <= usable_level; level++) {
+ struct intel_wm_level *wm = &pipe_wm->wm[level];
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
pristate, sprstate, curstate, wm);
@@ -3111,13 +3105,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
* register maximums since such watermarks are
* always invalid.
*/
- if (level > usable_level)
- continue;
-
- if (ilk_validate_wm_level(level, &max, wm))
- pipe_wm->wm[level] = *wm;
- else
- usable_level = level;
+ if (!ilk_validate_wm_level(level, &max, wm)) {
+ memset(wm, 0, sizeof(*wm));
+ break;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6a42ed618a28..2863d5a65187 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 54a73534b37e..d7dd98a6acad 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 9961b44f76ed..19c6fce837df 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index 828904b7d468..54fc571b1102 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -271,13 +271,7 @@ struct igt_wakeup {
u32 seqno;
};
-static int wait_atomic(atomic_t *p)
-{
- schedule();
- return 0;
-}
-
-static int wait_atomic_timeout(atomic_t *p)
+static int wait_atomic_timeout(atomic_t *p, unsigned int mode)
{
return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
}
@@ -348,7 +342,7 @@ static void igt_wake_all_sync(atomic_t *ready,
atomic_set(ready, 0);
wake_up_all(wq);
- wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE);
+ wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE);
atomic_set(ready, count);
atomic_set(done, count);
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index 3790fdf44a1a..b26f07b55d86 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -49,9 +49,9 @@ void onstack_fence_fini(struct i915_sw_fence *fence)
i915_sw_fence_fini(fence);
}
-static void timed_fence_wake(unsigned long data)
+static void timed_fence_wake(struct timer_list *t)
{
- struct timed_fence *tf = (struct timed_fence *)data;
+ struct timed_fence *tf = from_timer(tf, t, timer);
i915_sw_fence_commit(&tf->fence);
}
@@ -60,7 +60,7 @@ void timed_fence_init(struct timed_fence *tf, unsigned long expires)
{
onstack_fence_init(&tf->fence);
- setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
+ timer_setup_on_stack(&tf->timer, timed_fence_wake, 0);
if (time_after(expires, jiffies))
mod_timer(&tf->timer, expires);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.h b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
index 4cca4d57f52c..b5dc4e394555 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MOCK_GEM_DEVICE_H__
#define __MOCK_GEM_DEVICE_H__
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/selftests/mock_gem_object.h
index 9fbf67321662..20acdbee7bd0 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_object.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_object.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MOCK_GEM_OBJECT_H__
#define __MOCK_GEM_OBJECT_H__
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index 16ecef33e008..ab6c83caceb7 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 93c7e3f9b4a8..17d2f3a1c562 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -133,9 +133,16 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
plane_disabling = true;
}
- if (plane_disabling) {
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ /*
+ * The flip done wait is only strictly required by imx-drm if a deferred
+ * plane disable is in-flight. As the core requires blocking commits
+ * to wait for the flip it is done here unconditionally. This keeps the
+ * workitem around a bit longer than required for the majority of
+ * non-blocking commits, but we accept that for the sake of simplicity.
+ */
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+ if (plane_disabling) {
for_each_old_plane_in_state(state, plane, old_plane_state, i)
ipu_plane_disable_deferred(plane);
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index f6dd64be9cd5..f0b7556c0857 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IMX_DRM_H_
#define _IMX_DRM_H_
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 596b24ddbf65..e563ea17a827 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IPUV3_PLANE_H__
#define __IPUV3_PLANE_H__
diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c
index a78c4b483e8d..eeb155826d27 100644
--- a/drivers/gpu/drm/lib/drm_random.c
+++ b/drivers/gpu/drm/lib/drm_random.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/random.h>
diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h
index a78644bea7f9..4a3e94dfa0c0 100644
--- a/drivers/gpu/drm/lib/drm_random.h
+++ b/drivers/gpu/drm/lib/drm_random.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DRM_RANDOM_H__
#define __DRM_RANDOM_H__
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index e37b55a23a65..ce83c396a742 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
mediatek-drm-y := mtk_disp_color.o \
mtk_disp_ovl.o \
mtk_disp_rdma.o \
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index 3ae442a64bd6..c096a9d6bcbc 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MGA Millennium (MGA2064W) functions
* MGA Mystique (MGA1064SG) functions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index d0b26dd80076..92b3844202d2 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/gpu/drm/msm
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 40f4840ef98e..970c7963ae29 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -82,9 +82,9 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
return NULL;
}
-static void a5xx_preempt_timer(unsigned long data)
+static void a5xx_preempt_timer(struct timer_list *t)
{
- struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+ struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
struct msm_gpu *gpu = &a5xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -300,6 +300,5 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
}
- setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
- (unsigned long) a5xx_gpu);
+ timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8d4477818ec2..232201403439 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -353,9 +353,9 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
}
-static void hangcheck_handler(unsigned long data)
+static void hangcheck_handler(struct timer_list *t)
{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
@@ -703,8 +703,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_WORK(&gpu->recover_work, recover_worker);
- setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
- (unsigned long)gpu);
+ timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
spin_lock_init(&gpu->perf_lock);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index f26e44ea7389..ebf860bd59af 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <drm/drmP.h>
#include <drm/drm_mode.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 74a8795c2c2b..f74f1f2b186e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_DISPLAY_H__
#define __NV04_DISPLAY_H__
#include <subdev/bios.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0002.h b/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
index 6d72ed38da32..1a8b45b4631f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL0002_H__
#define __NVIF_CL0002_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0046.h b/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
index a6a71f4ad91e..c0d5eba4f8fc 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL0046_H__
#define __NVIF_CL0046_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl006b.h b/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
index 309ab8a3d9e8..d0e8f35d9e92 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL006B_H__
#define __NVIF_CL006B_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 287a7d6fa480..2740278d226b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL0080_H__
#define __NVIF_CL0080_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
index f50866011002..989690fe3cd8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL506E_H__
#define __NVIF_CL506E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
index 0e5bbb553158..5137b6879abd 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL506F_H__
#define __NVIF_CL506F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index 542d95145a67..7cdf53615d7b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL5070_H__
#define __NVIF_CL5070_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507a.h b/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
index 12e0643b78bd..36e537218596 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507A_H__
#define __NVIF_CL507A_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507b.h b/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
index 99e9d8c47f60..3e643b752bfc 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507B_H__
#define __NVIF_CL507B_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507c.h b/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
index 6af70dbdfd9f..fd9e336d0a24 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507C_H__
#define __NVIF_CL507C_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507d.h b/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
index 5ab0c9e4c6a3..e994c6894e3e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507D_H__
#define __NVIF_CL507D_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507e.h b/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
index c06209f3cac4..8082d2fde248 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507E_H__
#define __NVIF_CL507E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
index 7f6a8ce5a418..1a875090b251 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL826E_H__
#define __NVIF_CL826E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
index c4d35522331a..e4e50cfe88f1 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL826F_H__
#define __NVIF_CL826F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
index 169161c1587f..ab0fa8adb756 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL906F_H__
#define __NVIF_CL906F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl9097.h b/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
index 4057676d2981..e4c8de6d00b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL9097_H__
#define __NVIF_CL9097_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 3e57089526e3..56f5bd81e480 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CLA06F_H__
#define __NVIF_CLA06F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 56aade45067d..a7c5bf572788 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CLASS_H__
#define __NVIF_CLASS_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index b52a8eadce01..f5df8b30c599 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CLIENT_H__
#define __NVIF_CLIENT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index b579633b80c0..6edb6266857e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_DEVICE_H__
#define __NVIF_DEVICE_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 0c6f48d8140a..93bccd45a042 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_DRIVER_H__
#define __NVIF_DRIVER_H__
#include <nvif/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/event.h b/drivers/gpu/drm/nouveau/include/nvif/event.h
index 21764499b4be..ec5c924f576a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/event.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/event.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_EVENT_H__
#define __NVIF_EVENT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
index c2c0fc41e017..30ecd31db5df 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0000_H__
#define __NVIF_IF0000_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0001.h b/drivers/gpu/drm/nouveau/include/nvif/if0001.h
index bd5b64125eed..ca9215262215 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0001.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0001.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0001_H__
#define __NVIF_IF0001_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0002.h b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
index c04c91d0b818..d9235c011196 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0002.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0002_H__
#define __NVIF_IF0002_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0003.h b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
index 0cd03efb80a1..ae30b8261b88 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0003.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0003_H__
#define __NVIF_IF0003_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0004.h b/drivers/gpu/drm/nouveau/include/nvif/if0004.h
index bd5cd428cfd7..b35547c8ea36 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0004.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0004.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0004_H__
#define __NVIF_IF0004_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0005.h b/drivers/gpu/drm/nouveau/include/nvif/if0005.h
index abfd373bb68b..8ed0ae101715 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0005.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0005.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0005_H__
#define __NVIF_IF0005_H__
#define NV10_NVSW_NTFY_UEVENT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 1886366457f1..b93d586a2304 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 51e2eb580809..4ed169230657 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_NOTIFY_H__
#define __NVIF_NOTIFY_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 0b54261bdefe..a2d5244ff2b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_OBJECT_H__
#define __NVIF_OBJECT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index 5efdf80d5abc..fd09b2842972 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_OS_H__
#define __NOUVEAU_OS_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
index 751bcf4930a7..7f0d9f6cc1e7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/unpack.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_UNPACK_H__
#define __NVIF_UNPACK_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 79624f6d0a2b..757fac823a10 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLIENT_H__
#define __NVKM_CLIENT_H__
#define nvkm_client(p) container_of((p), struct nvkm_client, object)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
index c59fd4e2ad5e..966d1822dd80 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEBUG_H__
#define __NVKM_DEBUG_H__
#define NV_DBG_FATAL 0
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 5046e1db99ac..560265b15ec2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_H__
#define __NVKM_DEVICE_H__
#include <core/oclass.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index 7730499bfd95..ebf8473a39fe 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_ENGINE_H__
#define __NVKM_ENGINE_H__
#define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index 40429a82f792..38acbde2de4f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_ENUM_H__
#define __NVKM_ENUM_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
index b98fe2de546a..d3c45e90a1c1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_EVENT_H__
#define __NVKM_EVENT_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index a626ce378f04..ff0fa38aee72 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIRMWARE_H__
#define __NVKM_FIRMWARE_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index 51691667b813..10eeaeebc242 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GPUOBJ_H__
#define __NVKM_GPUOBJ_H__
#include <core/memory.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
index 88971eb37afa..e2d39192fa26 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_IOCTL_H__
#define __NVKM_IOCTL_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index 13ebf4da2b96..05f505de0075 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MEMORY_H__
#define __NVKM_MEMORY_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 5c1261351138..b0726c39429e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MM_H__
#define __NVKM_MM_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
index 753d08c1767b..4eb82bc563f3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NOTIFY_H__
#define __NVKM_NOTIFY_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 916a4b76d430..270f893cc154 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OBJECT_H__
#define __NVKM_OBJECT_H__
#include <core/oclass.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
index bd52236cc2f4..d950d5ee188b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OPROXY_H__
#define __NVKM_OPROXY_H__
#define nvkm_oproxy(p) container_of((p), struct nvkm_oproxy, base)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
index 80fdc146e816..a34a79bacbd0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OPTION_H__
#define __NVKM_OPTION_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 1f0108fdd24a..445602d1e8d3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OS_H__
#define __NVKM_OS_H__
#include <nvif/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
index 78d41be20b8c..4c7f647d2dc9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_PCI_H__
#define __NVKM_DEVICE_PCI_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index 8a48ca67f60d..d5d789663aca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_RAMHT_H__
#define __NVKM_RAMHT_H__
#include <core/gpuobj.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index a6c21be7537f..63df2290177f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SUBDEV_H__
#define __NVKM_SUBDEV_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 7c7d91cad09a..5c102d0206a7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_TEGRA_H__
#define __NVKM_DEVICE_TEGRA_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index 904820558fc0..40613983fccb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BSP_H__
#define __NVKM_BSP_H__
#include <engine/xtensa.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index b93f4c1a95e5..553245994450 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CE_H__
#define __NVKM_CE_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 03fa57a7c30a..72b9da2de7c2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CIPHER_H__
#define __NVKM_CIPHER_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 05f9c13ab8c3..e83193d3ccab 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_H__
#define __NVKM_DISP_H__
#define nvkm_disp(p) container_of((p), struct nvkm_disp, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index b672a3b07f55..0f9c1c702ed6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DMA_H__
#define __NVKM_DMA_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index f0024fb5a5af..6427747b6f77 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FALCON_H__
#define __NVKM_FALCON_H__
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index e42d686fbd8b..c17b3a9bf8fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIFO_H__
#define __NVKM_FIFO_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index c7944b19bed8..fb18f105fc43 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GR_H__
#define __NVKM_GR_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 257738eff9f6..4ef3d4c5e358 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MPEG_H__
#define __NVKM_MPEG_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
index 748ea9b7e559..985fc9490643 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSENC_H__
#define __NVKM_MSENC_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 08516ca82e04..e03f33472486 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSPDEC_H__
#define __NVKM_MSPDEC_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index 85fd306021ac..760bf17ea63d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSPPP_H__
#define __NVKM_MSPPP_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 99757ed96f76..281866d2501d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSVLD_H__
#define __NVKM_MSVLD_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 00b2b227ff41..fe716859d4a9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NVDEC_H__
#define __NVKM_NVDEC_H__
#define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 8a819328059b..cdd68a8bab8b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NVENC_H__
#define __NVKM_NVENC_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 240855ad8c8d..6cce8502f9df 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PM_H__
#define __NVKM_PM_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index 7317ef4c0207..b206b918c43e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SEC_H__
#define __NVKM_SEC_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index d3db1b1e75c4..f7d89822b905 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SEC2_H__
#define __NVKM_SEC2_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index 096e7dbd1e65..83a17c4e11e7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SW_H__
#define __NVKM_SW_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
index 2b0dc4c695c2..9b7d4877cf41 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_VIC_H__
#define __NVKM_VIC_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 616ea91e03f8..53bf8aed48fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_VP_H__
#define __NVKM_VP_H__
#include <engine/xtensa.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index b1fcc416732f..13c00ce6d556 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_XTENSA_H__
#define __NVKM_XTENSA_H__
#define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index ffa963939e15..f6bd94c7e0f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BAR_H__
#define __NVKM_BAR_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index a72f3290528a..979e9a144e7b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BIOS_H__
#define __NVKM_BIOS_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
index cf202c793a1d..703a5b524b96 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_M0203_H__
#define __NVBIOS_M0203_H__
struct nvbios_M0203T {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
index d34608ff241e..b4e14e45a0e8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_M0205_H__
#define __NVBIOS_M0205_H__
struct nvbios_M0205T {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
index c7ff8d9526e7..c09376894d12 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_M0209_H__
#define __NVBIOS_M0209_H__
u32 nvbios_M0209Te(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
index 1c1c52eac97d..901d94ef11b8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_P0260_H__
#define __NVBIOS_P0260_H__
u32 nvbios_P0260Te(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
index 6711732b7cb1..d068586f3263 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_BIT_H__
#define __NVBIOS_BIT_H__
struct bit_entry {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
index 3f0c7c414026..9a3f9483ee75 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_BMP_H__
#define __NVBIOS_BMP_H__
static inline u16
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
index 2ff64a20c0ec..a1c48c6b223b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_BOOST_H__
#define __NVBIOS_BOOST_H__
u32 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index deb477282dde..ed9e0a6a0011 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_CONN_H__
#define __NVBIOS_CONN_H__
enum dcb_connector_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
index 76fe7d50a1ce..49343d276e11 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_CSTEP_H__
#define __NVBIOS_CSTEP_H__
u32 nvbios_cstepTe(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
index 903d117603d8..63ddc6ed897a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_DCB_H__
#define __NVBIOS_DCB_H__
enum dcb_output_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
index c5a6ebd5a478..423d92de0aae 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_DISP_H__
#define __NVBIOS_DISP_H__
u16 nvbios_disp_table(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
index b4d39df70d4e..df34b41838d6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_DP_H__
#define __NVBIOS_DP_H__
struct nvbios_dpout {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
index bb49bd5f879e..f93e4f951f2f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_EXTDEV_H__
#define __NVBIOS_EXTDEV_H__
enum nvbios_extdev_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
index a7513e8406a3..09c1d3b9d009 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_FAN_H__
#define __NVBIOS_FAN_H__
#include <subdev/bios/therm.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index b7a54e605469..b71a3555c64e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_GPIO_H__
#define __NVBIOS_GPIO_H__
enum dcb_gpio_func_name {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
index 85c529ecf9b1..ae1f7483dd28 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_I2C_H__
#define __NVBIOS_I2C_H__
enum dcb_i2c_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
index e933d3eede70..e220a1ac1387 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_ICCSENSE_H__
#define __NVBIOS_ICCSENSE_H__
struct pwr_rail_resistor_t {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
index e15d63b9a5eb..893288b060de 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_IMAGE_H__
#define __NVBIOS_IMAGE_H__
struct nvbios_image {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
index 06ab48052128..744b1868e789 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_INIT_H__
#define __NVBIOS_INIT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
index 4e31b64c5edf..327bf9c4b703 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_MXM_H__
#define __NVBIOS_MXM_H__
u16 mxm_table(struct nvkm_bios *, u8 *ver, u8 *hdr);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
index 64a59549b7ea..ee5419b7b45b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_NPDE_H__
#define __NVBIOS_NPDE_H__
struct nvbios_npdeT {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
index e85931541f4f..1dffe8d6cc81 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_PCIR_H__
#define __NVBIOS_PCIR_H__
struct nvbios_pcirT {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
index 478b1c0d2089..0ee84ea6d737 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_PERF_H__
#define __NVBIOS_PERF_H__
u32 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
index 5a69978d1e3b..ab964e085f02 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_PLL_H__
#define __NVBIOS_PLL_H__
/*XXX: kill me */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
index 3a643df6de04..fb41ecab8f8c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_PMU_H__
#define __NVBIOS_PMU_H__
struct nvbios_pmuT {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
index f5f4a14c4030..ff12d810dce3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_POWER_BUDGET_H__
#define __NVBIOS_POWER_BUDGET_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index dca6c060a24f..2b87a38adb7a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_RAMCFG_H__
#define __NVBIOS_RAMCFG_H__
struct nvbios_ramcfg {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
index 8d8ee13721ec..471eef434b51 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_RAMMAP_H__
#define __NVBIOS_RAMMAP_H__
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
index dd3ba960e75d..46a3b15e10ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_THERM_H__
#define __NVBIOS_THERM_H__
struct nvbios_therm_threshold {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
index 38188d4c9ab5..40ceabf37827 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_TIMING_H__
#define __NVBIOS_TIMING_H__
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
index bea31cdd1dd1..67419bad584c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_VMAP_H__
#define __NVBIOS_VMAP_H__
struct nvbios_vmap {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index f0baa2c7de09..6b36d5ecb8f9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_VOLT_H__
#define __NVBIOS_VOLT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
index 87f804fc3a88..36f3028d58ef 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_VPSTATE_H__
#define __NVBIOS_VPSTATE_H__
struct nvbios_vpstate_header {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
index 0c0fe234ff12..d1bb5d044585 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_XPIO_H__
#define __NVBIOS_XPIO_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index 33a057c334f2..7695f7f77a06 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BUS_H__
#define __NVKM_BUS_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index e5275f742977..15db75ef0189 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLK_H__
#define __NVKM_CLK_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 709d786f1808..40558064d589 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVINIT_H__
#define __NVKM_DEVINIT_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index a00fd2e59215..adb78f7d083a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_H__
#define __NVKM_FB_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index ae201e388487..092193b7f98e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FUSE_H__
#define __NVKM_FUSE_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index 9b9c6d2f90b6..ee54899076e3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GPIO_H__
#define __NVKM_GPIO_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index ce23cc6c672e..eef54e9b5d77 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_H__
#define __NVKM_I2C_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index 6e2b70bd2f41..919653c1d101 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_IBUS_H__
#define __NVKM_IBUS_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index b7a9b041e130..be9475cd94fd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_ICCSENSE_H__
#define __NVKM_ICCSENSE_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 8111c0c3c5ec..36ed520ed2d0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_INSTMEM_H__
#define __NVKM_INSTMEM_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 4a224fd22e48..95b611554d53 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_LTC_H__
#define __NVKM_LTC_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 58f10890c3b6..61c93c86e2e2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MC_H__
#define __NVKM_MC_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 975c42f620a0..0760b93e9d1f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index ed0250139dae..0fd6d6f8eada 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MXM_H__
#define __NVKM_MXM_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index ac2a695963c1..23803cc859fd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PCI_H__
#define __NVKM_PCI_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index e7f04732a425..4bc9384046c6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PMU_H__
#define __NVKM_PMU_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 9841f076da2e..b1ac47eb786e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_THERM_H__
#define __NVKM_THERM_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index ff0709652f80..e9b0746826ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_TIMER_H__
#define __NVKM_TIMER_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index d23209b62c25..f7d3eb647e2e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_TOP_H__
#define __NVKM_TOP_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
index ce5636fe2a66..312933ad7c2b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_VGA_H__
#define __NOUVEAU_VGA_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index 08ef9983c643..8a0f85f5fc1a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_VOLT_H__
#define __NVKM_VOLT_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 327747680324..36fde1ff3ad5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_ABI16_H__
#define __NOUVEAU_ABI16_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7459ef9943ec..5ffcb6683776 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 2f03653aff86..b86294fc99e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_ACPI_H__
#define __NOUVEAU_ACPI_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 23002bdd94a8..7b5cc5c73d20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_BO_H__
#define __NOUVEAU_BO_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index f29d3a72c48c..14607c16a2bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_CHAN_H__
#define __NOUVEAU_CHAN_H__
#include <nvif/object.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index b799f8dfb2b2..1d01a82d4b6f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_DEBUGFS_H__
#define __NOUVEAU_DEBUGFS_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1411bf05b89d..270ba56f2756 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_DISPLAY_H__
#define __NOUVEAU_DISPLAY_H__
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e86b8220a4bb..3331e82ae9e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_DRV_H__
#define __NOUVEAU_DRV_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c36031aa013e..5bd8d30d1657 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index d39f845dda87..fe39998f65cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_GEM_H__
#define __NOUVEAU_GEM_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
index 3b9f2e5463a7..380ede26806c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioctl.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_IOCTL_H__
#define __NOUVEAU_IOCTL_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 7226f1f60901..b5b5fe40779d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define NV04_PFB_BOOT_0 0x00100000
# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 941bf33bd249..11f6ca89769b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/pagemap.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index 25b0de413352..96082b696420 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_TTM_H__
#define __NOUVEAU_TTM_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h
index c037e3ae8c70..c68f1c65af3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.h
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_USIF_H__
#define __NOUVEAU_USIF_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 48393a4f6331..52e52a360fb1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.h b/drivers/gpu/drm/nouveau/nouveau_vga.h
index ea3ad6974c65..6a3000c88142 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_VGA_H__
#define __NOUVEAU_VGA_H__
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index b7a508585304..7616c66803f8 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV10_FENCE_H_
#define __NV10_FENCE_H_
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 92d46222c79d..584466ef688f 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4099,7 +4099,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct drm_plane_state *old_plane_state;
+ struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
struct drm_crtc *crtc;
bool active = false;
@@ -4129,8 +4129,8 @@ nv50_disp_atomic_commit(struct drm_device *dev,
if (ret)
goto err_cleanup;
- for_each_old_plane_in_state(state, plane, old_plane_state, i) {
- struct nv50_wndw_atom *asyw = nv50_wndw_atom(old_plane_state);
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
if (asyw->set.image) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index d9ca9636a3e3..da130f5058e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf100_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index f0a1cf31c7ca..0b92eb32598d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gt215_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index 2dce405976ad..0e3d08f11b0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CE_PRIV_H__
#define __NVKM_CE_PRIV_H__
#include <engine/ce.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
index 1bbe76e0740a..6a62021e9861 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_ACPI_H__
#define __NVKM_DEVICE_ACPI_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
index 2c3c3ee3c494..ebcc5c52fbd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_CTRL_H__
#define __NVKM_DEVICE_CTRL_H__
#define nvkm_control(p) container_of((p), struct nvkm_control, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 6c16f3835f44..08d0bf605722 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_PRIV_H__
#define __NVKM_DEVICE_PRIV_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 9bb4ad5b0e57..40681db91a02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DISP_CHAN_H__
#define __NV50_DISP_CHAN_H__
#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index de962b7b026d..090e869ae612 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_CONN_H__
#define __NVKM_DISP_CONN_H__
#include <engine/disp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index ea4a0d062e31..f9b98211da6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DISP_DMAC_H__
#define __NV50_DISP_DMAC_H__
#define nv50_disp_dmac(p) container_of((p), struct nv50_disp_dmac, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
index 59173c290525..495f665a0ee6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_DP_H__
#define __NVKM_DISP_DP_H__
#define nvkm_dp(p) container_of((p), struct nvkm_dp, outp)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
index e82c68f18444..d131cca999dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "hdmi.h"
void pack_hdmi_infoframe(struct packed_hdmi_infoframe *packed_frame,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h
index 528f5621a496..45094c6e1425 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_HDMI_H__
#define __NVKM_DISP_HDMI_H__
#include "ior.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
index b04c49d2eeeb..57030b3a4a75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_HEAD_H__
#define __NVKM_DISP_HEAD_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index c9e0a8f7b5d5..4548c031b937 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_IOR_H__
#define __NVKM_DISP_IOR_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 6ea19466f436..eb0b8acb1c5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DISP_H__
#define __NV50_DISP_H__
#define nv50_disp(p) container_of((p), struct nv50_disp, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 146d101d4891..ea84d7d5741a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_OUTP_H__
#define __NVKM_DISP_OUTP_H__
#include <engine/disp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index 5772f0094129..6c9bfff6d043 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_PRIV_H__
#define __NVKM_DISP_PRIV_H__
#include <engine/disp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index b147cf5b3518..4818fa69ae6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DISP_ROOT_H__
#define __NV50_DISP_ROOT_H__
#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
index deb37ee55c0b..4307cbecd5c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DMA_PRIV_H__
#define __NVKM_DMA_PRIV_H__
#define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
index 69a7f1034024..4bbac8a21c71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DMA_USER_H__
#define __NVKM_DMA_USER_H__
#define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index d8019bdacd61..3ffef236189e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIFO_CHAN_H__
#define __NVKM_FIFO_CHAN_H__
#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
index fc1142af02cf..b653664e081b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GF100_FIFO_CHAN_H__
#define __GF100_FIFO_CHAN_H__
#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 5beb5c628473..1208e3d9dbe2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GK104_FIFO_CHAN_H__
#define __GK104_FIFO_CHAN_H__
#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
index 3361a1fd0343..15b06bdf5067 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_FIFO_CHAN_H__
#define __NV04_FIFO_CHAN_H__
#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index d853056e040b..2e3c4005b874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_FIFO_CHAN_H__
#define __NV50_FIFO_CHAN_H__
#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
index b81a2ad48aa4..68f97ba03df6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GF100_FIFO_H__
#define __GF100_FIFO_H__
#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 466f1051f91a..1579785cf941 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GK104_FIFO_H__
#define __GK104_FIFO_H__
#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index 03f60004bf7c..1d70542553cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_FIFO_H__
#define __NV04_FIFO_H__
#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 8ab53948cbb4..a3994e8db462 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_FIFO_H__
#define __NV50_FIFO_H__
#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index f889b13b5e41..ae76b1aaccd4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIFO_PRIV_H__
#define __NVKM_FIFO_PRIV_H__
#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
index 92d56221197b..49892a5e7201 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_FIFO_REGS_H__
#define __NV04_FIFO_REGS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 4731e56fbb11..5199e5aa0cb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GRCTX_NVC0_H__
#define __NVKM_GRCTX_NVC0_H__
#include "gf100.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
index 50e808e9f926..4d67d90261b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GRCTX_H__
#define __NVKM_GRCTX_H__
#include <core/gpuobj.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 12a703fe355d..0323acb739c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf100_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x00000064,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index ffbfc51200f1..1bb265917915 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf117_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 357f662de571..cf8343a693ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk104_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index 4ffc8212a85c..f4bfa109ed27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk110_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 09196206c9bc..59a3e1b2927f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk208_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 6d7d004363d9..8daa0516704a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gm107_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index 7538404b8b13..cbf2351f8da8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf100_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index ce000a47ec6d..70830036ffee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf117_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index 1f26cb6a233c..7f2fd84d0c3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk104_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index 70436d93efe3..560063789de8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk110_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index e0933a07426a..71e85784b615 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk208_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 9b432823bcbe..d85eac6d1c61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gm107_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
index 1718ae4e8224..f87693809c9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GRAPH_OS_H__
#define __NVKM_GRAPH_OS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
index d7c3d86cc99d..d5a376c4dd0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV10_GR_H__
#define __NV10_GR_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index d6840dc81a29..111c8bb4497b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index d0cb2b8846ec..979dc5f7b32e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV20_GR_H__
#define __NV20_GR_H__
#define nv20_gr(p) container_of((p), struct nv20_gr, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index 6c4a00819b4b..e59a28a26d65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index 3cad26dbc2b1..e113b2d4c811 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index b4e3c50badc7..4aac2c224874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index e7ed04b935cd..301556503e93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 5e8abacbacc6..5d6926611a5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index bee8ef2d5697..731400937edd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV40_GR_H__
#define __NV40_GR_H__
#define nv40_gr(p) container_of((p), struct nv40_gr, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 1ab6ea436b70..5b9d99bee207 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_GR_H__
#define __NV50_GR_H__
#define nv50_gr(p) container_of((p), struct nv50_gr, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index 2a52d9f026ec..66359c23cbce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GR_PRIV_H__
#define __NVKM_GR_PRIV_H__
#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
index 90a9873ce522..dc4f936675ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GR_REGS_H__
#define __NVKM_GR_REGS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index f0d35beb58df..b31fad8bdaad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV31_MPEG_H__
#define __NV31_MPEG_H__
#define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
index d5753103ff63..26f9d14151e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MPEG_PRIV_H__
#define __NVKM_MPEG_PRIV_H__
#include <engine/mpeg.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
index d518af4bc9de..db305072a82f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSPDEC_PRIV_H__
#define __NVKM_MSPDEC_PRIV_H__
#include <engine/mspdec.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
index 37a91f9d9181..7708e52c9043 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSPPP_PRIV_H__
#define __NVKM_MSPPP_PRIV_H__
#include <engine/msppp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
index 9dc1da67d929..66c36049abca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSVLD_PRIV_H__
#define __NVKM_MSVLD_PRIV_H__
#include <engine/msvld.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index 353b94f51205..6c300739f621 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NVDEC_PRIV_H__
#define __NVKM_NVDEC_PRIV_H__
#include <engine/nvdec.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 56d0344853ea..c74fd4557d41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PM_NVC0_H__
#define __NVKM_PM_NVC0_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index da481abe8f7a..3f37b713936c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PM_NV40_H__
#define __NVKM_PM_NV40_H__
#define nv40_pm(p) container_of((p), struct nv40_pm, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 4ff0475e776c..9fad3611a843 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PM_PRIV_H__
#define __NVKM_PM_PRIV_H__
#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index 4b57f8814560..6278a0c5fe83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t g98_sec_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index 7ecc9d4724dc..2f97c806a79d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SEC2_PRIV_H__
#define __NVKM_SEC2_PRIV_H__
#include <engine/sec2.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
index b5be49f0ac56..d42862fc43fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SW_CHAN_H__
#define __NVKM_SW_CHAN_H__
#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
index 25cdfdef2d46..459afd30a484 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SW_NV50_H__
#define __NVKM_SW_NV50_H__
#define nv50_sw_chan(p) container_of((p), struct nv50_sw_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
index bcfff62131fe..d7034950ba87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NVSW_H__
#define __NVKM_NVSW_H__
#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
index 0ef1318dc2fd..4aca1791abc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SW_PRIV_H__
#define __NVKM_SW_PRIV_H__
#define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
index 97b56f759d0b..d515ad994199 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FALCON_PRIV_H__
#define __NVKM_FALCON_PRIV_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index e4da39139e95..4f2b66e8d795 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GF100_BAR_H__
#define __GF100_BAR_H__
#define gf100_bar(p) container_of((p), struct gf100_bar, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
index 140b76f588b6..2fe833f6d9f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_BAR_H__
#define __NV50_BAR_H__
#define nv50_bar(p) container_of((p), struct nv50_bar, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index 14398e2dbdf9..01ba5b26666e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BAR_PRIV_H__
#define __NVKM_BAR_PRIV_H__
#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 7d1d3c6b4b72..33435ca16311 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BIOS_PRIV_H__
#define __NVKM_BIOS_PRIV_H__
#define nvkm_bios(p) container_of((p), struct nvkm_bios, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index 54ec3b131dfd..17ac1812a928 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BUS_HWSQ_H__
#define __NVKM_BUS_HWSQ_H__
#include <subdev/bus.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
index a130f2c642d5..ef01e569352d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BUS_PRIV_H__
#define __NVKM_BUS_PRIV_H__
#define nvkm_bus(p) container_of((p), struct nvkm_bus, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
index 8865b59fe575..1ea886a4301f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLK_NVA3_H__
#define __NVKM_CLK_NVA3_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index d3c7fb6efa16..f134d979d884 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_CLK_H__
#define __NV50_CLK_H__
#define nv50_clk(p) container_of((p), struct nv50_clk, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
index 44020a30dee8..9a39f1fd2976 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PLL_H__
#define __NVKM_PLL_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
index 51eafc00c8b1..b656177923fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLK_PRIV_H__
#define __NVKM_CLK_PRIV_H__
#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
index d717e8b8f679..d0715fe84328 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLK_SEQ_H__
#define __NVKM_CLK_SEQ_H__
#include <subdev/bus/hwsq.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 4a87c8c2bce8..b18e49847eee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_DEVINIT_H__
#define __NV04_DEVINIT_H__
#define nv04_devinit(p) container_of((p), struct nv04_devinit, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 25d2ae3af1c6..315ebaff1165 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DEVINIT_H__
#define __NV50_DEVINIT_H__
#define nv50_devinit(p) container_of((p), struct nv50_devinit, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index e1f6ae58f1d3..5b3097a586dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVINIT_PRIV_H__
#define __NVKM_DEVINIT_PRIV_H__
#define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index e3cf0515bb70..ab261310753a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_RAM_NVC0_H__
#define __NVKM_RAM_NVC0_H__
#define gf100_fb(p) container_of((p), struct gf100_fb, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index 13231d4b00d9..dacc696387b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_NV50_H__
#define __NVKM_FB_NV50_H__
#define nv50_fb(p) container_of((p), struct nv50_fb, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index e05d95240e85..9351188d5d76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_PRIV_H__
#define __NVKM_FB_PRIV_H__
#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index 70fd59dcd06d..330132e95b6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_RAM_PRIV_H__
#define __NVKM_FB_RAM_PRIV_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
index 9ef9d6aa3721..a65fa5586af8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FBRAM_FUC_H__
#define __NVKM_FBRAM_FUC_H__
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
index ec5dcbfcaea8..11f6bb2936b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV40_FB_RAM_H__
#define __NV40_FB_RAM_H__
#define nv40_ram(p) container_of((p), struct nv40_ram, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
index 8df7306d5729..d8f5053e8e2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FBRAM_SEQ_H__
#define __NVKM_FBRAM_SEQ_H__
#include <subdev/bus/hwsq.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
index 1f865f61504e..ad26fcbe9e06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_REGS_04_H__
#define __NVKM_FB_REGS_04_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index b0390b540ef5..3a5595a9e457 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FUSE_PRIV_H__
#define __NVKM_FUSE_PRIV_H__
#define nvkm_fuse(p) container_of((p), struct nvkm_fuse, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 371bcdbbe0d6..9759f13447bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GPIO_PRIV_H__
#define __NVKM_GPIO_PRIV_H__
#define nvkm_gpio(p) container_of((p), struct nvkm_gpio, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 9587ab456d9e..7d56c4ba693c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_AUX_H__
#define __NVKM_I2C_AUX_H__
#include "pad.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
index e1be14c23e54..bea0dd33961e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_BUS_H__
#define __NVKM_I2C_BUS_H__
#include "pad.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 316c4536f29a..33f0c809e583 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_PAD_H__
#define __NVKM_I2C_PAD_H__
#include <subdev/i2c.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index bf655a66ef40..f476a69b6cb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_PRIV_H__
#define __NVKM_I2C_PRIV_H__
#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
index 01caf798cf31..504a6d37ec50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_IBUS_PRIV_H__
#define __NVKM_IBUS_PRIV_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index e90e0f6ed008..bd599b8252ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_ICCSENSE_PRIV_H__
#define __NVKM_ICCSENSE_PRIV_H__
#define nvkm_iccsense(p) container_of((p), struct nvkm_iccsense, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index 44651ca42d52..b9e4751b9921 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_INSTMEM_PRIV_H__
#define __NVKM_INSTMEM_PRIV_H__
#define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 8b95f96e3ffa..e71cc25cc775 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_LTC_PRIV_H__
#define __NVKM_LTC_PRIV_H__
#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 3be4126441e4..8869d79c2b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MC_PRIV_H__
#define __NVKM_MC_PRIV_H__
#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index d024d8055fcb..948a48c21be4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MMU_PRIV_H__
#define __NVKM_MMU_PRIV_H__
#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
index 333e0c01545a..011a67fe4a8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVMXM_MXMS_H__
#define __NVMXM_MXMS_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
index 7d970157aed1..6767c2279e7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MXM_PRIV_H__
#define __NVKM_MXM_PRIV_H__
#define nvkm_mxm(p) container_of((p), struct nvkm_mxm, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
index df2dd08363ad..edb7f00f0de5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include "priv.h"
#if defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))
#ifndef __NVKM_PCI_AGP_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 86921ec962d6..c17f6063c9ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PCI_PRIV_H__
#define __NVKM_PCI_PRIV_H__
#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 0bcf0b307a61..53d01fb00a8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf100_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index fe8905666c67..e1e981966c2d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf119_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 9cf4e6fc724e..c4edbc79e41a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk208_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index 5d692425b190..6a2572e8945a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gt215_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
index c8b06cb77e72..30d9480b9be5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PWR_OS_H__
#define __NVKM_PWR_OS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index e6f74168238c..11b28b086a06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#ifndef __NVKM_PMU_MEMX_H__
#define __NVKM_PMU_MEMX_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index a4c48a10cd47..e9c6f9725afe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PMU_PRIV_H__
#define __NVKM_PMU_PRIV_H__
#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index f820ca2aeda4..3b8878486faa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_TIMER_PRIV_H__
#define __NVKM_TIMER_PRIV_H__
#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
index 10bef85b485e..23d07f5f44d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define NV04_PTIMER_INTR_0 0x009100
#define NV04_PTIMER_INTR_EN_0 0x009140
#define NV04_PTIMER_NUMERATOR 0x009200
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
index adb3ed03d937..4f49b0acaa0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_TOP_PRIV_H__
#define __NVKM_TOP_PRIV_H__
#define nvkm_top(p) container_of((p), struct nvkm_top, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index 354bafe4b4e2..1a8ad560321b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_VOLT_PRIV_H__
#define __NVKM_VOLT_PRIV_H__
#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index b391be7ecb6c..f115253115c5 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI)
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index c226da145fb3..a349cb61961e 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
config DRM_OMAP_PANEL_DPI
tristate "Generic DPI panel"
+ depends on BACKLIGHT_CLASS_DEVICE
help
Driver for generic DPI panels.
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 46baafb1a83e..d99659e1381b 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 3c5644c3fc38..904101c5e79d 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index daf286fc8a40..ca1e3b489540 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll)
}
static const struct soc_device_attribute dpi_soc_devices[] = {
- { .family = "OMAP3[456]*" },
- { .family = "[AD]M37*" },
+ { .machine = "OMAP3[456]*" },
+ { .machine = "[AD]M37*" },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b56a05730314..c2cf6d98e577 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4095,7 +4095,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
}
#ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
{
DSSERR("TE not received for 250ms!\n");
}
@@ -5449,9 +5449,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi_framedone_timeout_work_callback);
#ifdef DSI_CATCH_MISSING_TE
- init_timer(&dsi->te_timer);
- dsi->te_timer.function = dsi_te_timeout;
- dsi->te_timer.data = 0;
+ timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index d86873f2abe6..e626eddf24d5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -352,7 +352,7 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
{
const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
- unsigned int ret;
+ int ret;
core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
"omap4", caps, CEC_MAX_LOG_ADDRS);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 62e451162d96..b06f9956e733 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -886,25 +886,36 @@ struct hdmi4_features {
bool audio_use_mclk;
};
-static const struct hdmi4_features hdmi4_es1_features = {
+static const struct hdmi4_features hdmi4430_es1_features = {
.cts_swmode = false,
.audio_use_mclk = false,
};
-static const struct hdmi4_features hdmi4_es2_features = {
+static const struct hdmi4_features hdmi4430_es2_features = {
.cts_swmode = true,
.audio_use_mclk = false,
};
-static const struct hdmi4_features hdmi4_es3_features = {
+static const struct hdmi4_features hdmi4_features = {
.cts_swmode = true,
.audio_use_mclk = true,
};
static const struct soc_device_attribute hdmi4_soc_devices[] = {
- { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
- { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
- { .family = "OMAP4", .data = &hdmi4_es3_features },
+ {
+ .machine = "OMAP4430",
+ .revision = "ES1.?",
+ .data = &hdmi4430_es1_features,
+ },
+ {
+ .machine = "OMAP4430",
+ .revision = "ES2.?",
+ .data = &hdmi4430_es2_features,
+ },
+ {
+ .family = "OMAP4",
+ .data = &hdmi4_features,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
index 4dfb67fe5f6d..3ecde23ac604 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define DSS_SUBSYS_NAME "HDMI"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 1dd3dafc59af..c60a85e82c6d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
match = of_match_node(dmm_of_match, dev->dev.of_node);
if (!match) {
dev_err(&dev->dev, "failed to find matching device node\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
omap_dmm->plat_data = match->data;
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 77ede3467324..2c4e1a93e05f 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index fce1453a93e1..9c5e8dba8ac6 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
pl111_drm-y += pl111_display.o \
pl111_versatile.o \
pl111_drv.o
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 09143b840482..2de40d276116 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -147,6 +147,10 @@ extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file
extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
extern void r128_freelist_reset(struct drm_device *dev);
extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index 663f38c63ba6..6589f9e0310e 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -63,39 +63,36 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_init32_t init32;
- drm_r128_init_t __user *init;
+ drm_r128_init_t init;
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
- init = compat_alloc_user_space(sizeof(*init));
- if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
- || __put_user(init32.func, &init->func)
- || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
- || __put_user(init32.is_pci, &init->is_pci)
- || __put_user(init32.cce_mode, &init->cce_mode)
- || __put_user(init32.cce_secure, &init->cce_secure)
- || __put_user(init32.ring_size, &init->ring_size)
- || __put_user(init32.usec_timeout, &init->usec_timeout)
- || __put_user(init32.fb_bpp, &init->fb_bpp)
- || __put_user(init32.front_offset, &init->front_offset)
- || __put_user(init32.front_pitch, &init->front_pitch)
- || __put_user(init32.back_offset, &init->back_offset)
- || __put_user(init32.back_pitch, &init->back_pitch)
- || __put_user(init32.depth_bpp, &init->depth_bpp)
- || __put_user(init32.depth_offset, &init->depth_offset)
- || __put_user(init32.depth_pitch, &init->depth_pitch)
- || __put_user(init32.span_offset, &init->span_offset)
- || __put_user(init32.fb_offset, &init->fb_offset)
- || __put_user(init32.mmio_offset, &init->mmio_offset)
- || __put_user(init32.ring_offset, &init->ring_offset)
- || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
- || __put_user(init32.buffers_offset, &init->buffers_offset)
- || __put_user(init32.agp_textures_offset,
- &init->agp_textures_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
+ init.func = init32.func;
+ init.sarea_priv_offset = init32.sarea_priv_offset;
+ init.is_pci = init32.is_pci;
+ init.cce_mode = init32.cce_mode;
+ init.cce_secure = init32.cce_secure;
+ init.ring_size = init32.ring_size;
+ init.usec_timeout = init32.usec_timeout;
+ init.fb_bpp = init32.fb_bpp;
+ init.front_offset = init32.front_offset;
+ init.front_pitch = init32.front_pitch;
+ init.back_offset = init32.back_offset;
+ init.back_pitch = init32.back_pitch;
+ init.depth_bpp = init32.depth_bpp;
+ init.depth_offset = init32.depth_offset;
+ init.depth_pitch = init32.depth_pitch;
+ init.span_offset = init32.span_offset;
+ init.fb_offset = init32.fb_offset;
+ init.mmio_offset = init32.mmio_offset;
+ init.ring_offset = init32.ring_offset;
+ init.ring_rptr_offset = init32.ring_rptr_offset;
+ init.buffers_offset = init32.buffers_offset;
+ init.agp_textures_offset = init32.agp_textures_offset;
+
+ return drm_ioctl_kernel(file, r128_cce_init, &init,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_r128_depth32 {
@@ -111,25 +108,19 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_depth32_t depth32;
- drm_r128_depth_t __user *depth;
+ drm_r128_depth_t depth;
if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
return -EFAULT;
- depth = compat_alloc_user_space(sizeof(*depth));
- if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth))
- || __put_user(depth32.func, &depth->func)
- || __put_user(depth32.n, &depth->n)
- || __put_user((int __user *)(unsigned long)depth32.x, &depth->x)
- || __put_user((int __user *)(unsigned long)depth32.y, &depth->y)
- || __put_user((unsigned int __user *)(unsigned long)depth32.buffer,
- &depth->buffer)
- || __put_user((unsigned char __user *)(unsigned long)depth32.mask,
- &depth->mask))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
+ depth.func = depth32.func;
+ depth.n = depth32.n;
+ depth.x = compat_ptr(depth32.x);
+ depth.y = compat_ptr(depth32.y);
+ depth.buffer = compat_ptr(depth32.buffer);
+ depth.mask = compat_ptr(depth32.mask);
+ return drm_ioctl_kernel(file, r128_cce_depth, &depth, DRM_AUTH);
}
typedef struct drm_r128_stipple32 {
@@ -140,18 +131,14 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_stipple32_t stipple32;
- drm_r128_stipple_t __user *stipple;
+ drm_r128_stipple_t stipple;
if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
return -EFAULT;
- stipple = compat_alloc_user_space(sizeof(*stipple));
- if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple))
- || __put_user((unsigned int __user *)(unsigned long)stipple32.mask,
- &stipple->mask))
- return -EFAULT;
+ stipple.mask = compat_ptr(stipple32.mask);
- return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
+ return drm_ioctl_kernel(file, r128_cce_stipple, &stipple, DRM_AUTH);
}
typedef struct drm_r128_getparam32 {
@@ -163,19 +150,15 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_getparam32_t getparam32;
- drm_r128_getparam_t __user *getparam;
+ drm_r128_getparam_t getparam;
if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
return -EFAULT;
- getparam = compat_alloc_user_space(sizeof(*getparam));
- if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
- || __put_user(getparam32.param, &getparam->param)
- || __put_user((void __user *)(unsigned long)getparam32.value,
- &getparam->value))
- return -EFAULT;
+ getparam.param = getparam32.param;
+ getparam.value = compat_ptr(getparam32.value);
- return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ return drm_ioctl_kernel(file, r128_getparam, &getparam, DRM_AUTH);
}
drm_ioctl_compat_t *r128_compat_ioctls[] = {
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 8fd2d9f58f77..8fdc56c1c953 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1460,7 +1460,7 @@ static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *fi
return ret;
}
-static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_depth_t *depth = data;
@@ -1492,7 +1492,7 @@ static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *f
return ret;
}
-static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_stipple_t *stipple = data;
@@ -1582,7 +1582,7 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file
return 0;
}
-static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_getparam_t *param = data;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index cf3e5985e3e7..92ccd7aed0d4 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index b928c17bdeed..c21d8fa591ef 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* utility to create the register check tables
* this includes inlined list.h safe for userspace.
*
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index eb40888bdfcc..ad16a925f8d5 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define R100_TRACK_MAX_TEXTURE 3
#define R200_TRACK_MAX_TEXTURE 6
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index ebdf1b859cb6..2917ea1b667e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <drm/drmP.h>
#include <drm/drm_dp_mst_helper.h>
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3386452bd2f0..cf3deb283da5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
else
r = 0;
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_put_unlocked(gobj);
return r;
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
r = ret;
/* Flush HDP cache via MMIO if necessary */
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 49750d07ab7d..611cf934b211 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 815eaa8c394b..bc26efd1793e 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _RADEON_TRACE_H_
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
index e51d3575976b..66b3d5084662 100644
--- a/drivers/gpu/drm/radeon/radeon_trace_points.c
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright Red Hat Inc 2010.
* Author : Dave Airlie <airlied@redhat.com>
*/
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8032da57e409..6ada64db00e9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -597,7 +597,7 @@ release_sg:
kfree(ttm->sg);
release_pages:
- release_pages(ttm->pages, pinned, 0);
+ release_pages(ttm->pages, pinned);
return r;
}
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 2131e722de3b..0cf5c11030e8 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_drv.o \
rcar_du_encoder.o \
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index a881d2cc4f25..a314e2109e76 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index b15755b6129c..b1fe0639227e 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -1285,8 +1285,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
goto err_pllref;
}
- pm_runtime_enable(dev);
-
dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
dsi->dsi_host.dev = dev;
ret = mipi_dsi_host_register(&dsi->dsi_host);
@@ -1301,6 +1299,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
}
dev_set_drvdata(dev, dsi);
+ pm_runtime_enable(dev);
return 0;
err_mipi_dsi_host:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index a553e182ff53..3acfd576b7df 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -101,9 +101,9 @@ static void psr_set_state(struct psr_drv *psr, enum psr_state state)
spin_unlock_irqrestore(&psr->lock, flags);
}
-static void psr_flush_handler(unsigned long data)
+static void psr_flush_handler(struct timer_list *t)
{
- struct psr_drv *psr = (struct psr_drv *)data;
+ struct psr_drv *psr = from_timer(psr, t, flush_timer);
unsigned long flags;
/* If the state has changed since we initiated the flush, do nothing */
@@ -232,7 +232,7 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
if (!psr)
return -ENOMEM;
- setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+ timer_setup(&psr->flush_timer, psr_flush_handler, 0);
spin_lock_init(&psr->lock);
psr->active = true;
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index 37bbdac52896..54acc117550c 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as igt__name to create
diff --git a/drivers/gpu/drm/shmobile/Makefile b/drivers/gpu/drm/shmobile/Makefile
index 4c3eeb355630..861edafed856 100644
--- a/drivers/gpu/drm/shmobile/Makefile
+++ b/drivers/gpu/drm/shmobile/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
shmob-drm-y := shmob_drm_backlight.o \
shmob_drm_crtc.o \
shmob_drm_drv.o \
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index c35db12435c3..f203ac5514ae 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
sti-drm-y := \
sti_mixer.o \
sti_gdp.o \
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 301b5b1452db..0c2f8c7facae 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
sun4i-backend-y += sun4i_backend.o sun4i_layer.o
sun4i-drm-y += sun4i_drv.o
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 8927784396e8..46d65d39214d 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
tegra-drm-y := \
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 943bdf88c4a2..52552b9b89ef 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -155,8 +155,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order,
- carveout_start >> order,
- carveout_end >> order);
+ carveout_start >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index efc2c4f00daa..87f9480e43b0 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
ccflags-y += -Werror
endif
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 4d0c938ff4b2..a60e560804e0 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 719a771f3d5c..f5500df51686 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Please keep these build lists sorted!
# core driver code
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 98a6cb9f44fc..4ae45d7dac42 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -674,10 +674,9 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo)
mutex_unlock(&bo->madv_lock);
}
-static void vc4_bo_cache_time_timer(unsigned long data)
+static void vc4_bo_cache_time_timer(struct timer_list *t)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
schedule_work(&vc4->bo_cache.time_work);
}
@@ -1039,9 +1038,7 @@ int vc4_bo_cache_init(struct drm_device *dev)
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
- setup_timer(&vc4->bo_cache.time_timer,
- vc4_bo_cache_time_timer,
- (unsigned long)dev);
+ timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index e00ac2f3a264..6c32c89a83a9 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -312,10 +312,10 @@ vc4_reset_work(struct work_struct *work)
}
static void
-vc4_hangcheck_elapsed(unsigned long data)
+vc4_hangcheck_elapsed(struct timer_list *t)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
+ struct drm_device *dev = vc4->dev;
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
struct vc4_exec_info *bin_exec, *render_exec;
@@ -1154,9 +1154,7 @@ vc4_gem_init(struct drm_device *dev)
spin_lock_init(&vc4->job_lock);
INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
- setup_timer(&vc4->hangcheck.timer,
- vc4_hangcheck_elapsed,
- (unsigned long)dev);
+ timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 8fd52f211e9d..b28876c222b4 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -85,9 +85,9 @@ static const struct dma_fence_ops vgem_fence_ops = {
.timeline_value_str = vgem_fence_timeline_value_str,
};
-static void vgem_fence_timeout(unsigned long data)
+static void vgem_fence_timeout(struct timer_list *t)
{
- struct vgem_fence *fence = (struct vgem_fence *)data;
+ struct vgem_fence *fence = from_timer(fence, t, timer);
dma_fence_signal(&fence->base);
}
@@ -105,7 +105,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
dma_fence_context_alloc(1), 1);
- setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+ timer_setup(&fence->timer, vgem_fence_timeout, 0);
/* We force the fence to expire within 10s to prevent driver hangs */
mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 98aae9809249..d6e84a589ef1 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -238,9 +238,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
if (NULL == vsg->pages)
return -ENOMEM;
- ret = get_user_pages_unlocked((unsigned long)xfer->mem_addr,
- vsg->num_pages, vsg->pages,
- (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0);
+ ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
+ vsg->num_pages, vsg->direction == DMA_FROM_DEVICE,
+ vsg->pages);
if (ret != vsg->num_pages) {
if (ret < 0)
return ret;
@@ -452,9 +452,9 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
static void
-via_dmablit_timer(unsigned long data)
+via_dmablit_timer(struct timer_list *t)
{
- drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+ drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
@@ -559,8 +559,7 @@ via_init_dmablit(struct drm_device *dev)
init_waitqueue_head(blitq->blit_queue + j);
init_waitqueue_head(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
- setup_timer(&blitq->poll_timer, via_dmablit_timer,
- (unsigned long)blitq);
+ timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
}
}
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 7684f613bdc3..f29deec83d1f 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index a365330bbb82..ad80211e1098 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
index 120eab830eaf..3a195e8106b3 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VM_BASIC_TYPES_H_
#define _VM_BASIC_TYPES_H_
#include <linux/kernel.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e84fee3ec4f3..184340d486c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -721,7 +721,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 2;
+ mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 3bbad22b3748..d6b1c509ae01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -224,7 +224,7 @@ out:
return ret;
}
-static struct dma_fence_ops vmw_fence_ops = {
+static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index a552e4ea5440..6ac094ee8983 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+ if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
return -EACCES;
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
index 9df7766a7f9d..b6d966d849dd 100644
--- a/drivers/gpu/drm/zte/Makefile
+++ b/drivers/gpu/drm/zte/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
zxdrm-y := \
zx_drm_drv.o \
zx_hdmi.o \
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 4fb61bd57aee..b92016ce09b7 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
host1x-y = \
bus.o \
syncpt.o \
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 66ea5acee820..2e57c9cea696 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -320,6 +320,7 @@ struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.pm = &host1x_device_pm_ops,
+ .force_dma = true,
};
static void __host1x_device_del(struct host1x_device *device)
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 773d6337aa30..bf67c3aeb634 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -239,8 +239,7 @@ static int host1x_probe(struct platform_device *pdev)
order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order,
- geometry->aperture_start >> order,
- geometry->aperture_end >> order);
+ geometry->aperture_start >> order);
host->iova_end = geometry->aperture_end;
}
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 8cdf9e4ae772..7cc8b47e488b 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 374301fcbc86..779c5ae47f36 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -230,7 +230,7 @@ config HID_CMEDIA
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
- depends on USB_HID && I2C && GPIOLIB
+ depends on USB_HID && HIDRAW && I2C && GPIOLIB
select GPIOLIB_IRQCHIP
---help---
Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
@@ -750,11 +750,10 @@ config HID_PRIMAX
HID standard.
config HID_RETRODE
- tristate "Retrode"
+ tristate "Retrode 2 USB adapter for vintage video games"
depends on USB_HID
---help---
Support for
-
* Retrode 2 cartridge and controller adapter
config HID_ROCCAT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 8659d7e633a5..235bd2a7b333 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the HID driver
#
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index ed9c0ea5b026..b1eeb4839bfc 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -52,8 +52,30 @@
#define ADDRESS_U1_PAD_BTN 0x00800052
#define ADDRESS_U1_SP_BTN 0x0080009F
+#define T4_INPUT_REPORT_LEN sizeof(struct t4_input_report)
+#define T4_FEATURE_REPORT_LEN T4_INPUT_REPORT_LEN
+#define T4_FEATURE_REPORT_ID 7
+#define T4_CMD_REGISTER_READ 0x08
+#define T4_CMD_REGISTER_WRITE 0x07
+
+#define T4_ADDRESS_BASE 0xC2C0
+#define PRM_SYS_CONFIG_1 (T4_ADDRESS_BASE + 0x0002)
+#define T4_PRM_FEED_CONFIG_1 (T4_ADDRESS_BASE + 0x0004)
+#define T4_PRM_FEED_CONFIG_4 (T4_ADDRESS_BASE + 0x001A)
+#define T4_PRM_ID_CONFIG_3 (T4_ADDRESS_BASE + 0x00B0)
+
+
+#define T4_FEEDCFG4_ADVANCED_ABS_ENABLE 0x01
+#define T4_I2C_ABS 0x78
+
+#define T4_COUNT_PER_ELECTRODE 256
#define MAX_TOUCHES 5
+enum dev_num {
+ U1,
+ T4,
+ UNKNOWN,
+};
/**
* struct u1_data
*
@@ -61,43 +83,173 @@
* @input2: pointer to the kernel input2 device
* @hdev: pointer to the struct hid_device
*
- * @dev_ctrl: device control parameter
* @dev_type: device type
- * @sen_line_num_x: number of sensor line of X
- * @sen_line_num_y: number of sensor line of Y
- * @pitch_x: sensor pitch of X
- * @pitch_y: sensor pitch of Y
- * @resolution: resolution
- * @btn_info: button information
+ * @max_fingers: total number of fingers
+ * @has_sp: boolean of sp existense
+ * @sp_btn_info: button information
* @x_active_len_mm: active area length of X (mm)
* @y_active_len_mm: active area length of Y (mm)
* @x_max: maximum x coordinate value
* @y_max: maximum y coordinate value
+ * @x_min: minimum x coordinate value
+ * @y_min: minimum y coordinate value
* @btn_cnt: number of buttons
* @sp_btn_cnt: number of stick buttons
*/
-struct u1_dev {
+struct alps_dev {
struct input_dev *input;
struct input_dev *input2;
struct hid_device *hdev;
- u8 dev_ctrl;
- u8 dev_type;
- u8 sen_line_num_x;
- u8 sen_line_num_y;
- u8 pitch_x;
- u8 pitch_y;
- u8 resolution;
- u8 btn_info;
+ enum dev_num dev_type;
+ u8 max_fingers;
+ u8 has_sp;
u8 sp_btn_info;
u32 x_active_len_mm;
u32 y_active_len_mm;
u32 x_max;
u32 y_max;
+ u32 x_min;
+ u32 y_min;
u32 btn_cnt;
u32 sp_btn_cnt;
};
+struct t4_contact_data {
+ u8 palm;
+ u8 x_lo;
+ u8 x_hi;
+ u8 y_lo;
+ u8 y_hi;
+};
+
+struct t4_input_report {
+ u8 reportID;
+ u8 numContacts;
+ struct t4_contact_data contact[5];
+ u8 button;
+ u8 track[5];
+ u8 zx[5], zy[5];
+ u8 palmTime[5];
+ u8 kilroy;
+ u16 timeStamp;
+};
+
+static u16 t4_calc_check_sum(u8 *buffer,
+ unsigned long offset, unsigned long length)
+{
+ u16 sum1 = 0xFF, sum2 = 0xFF;
+ unsigned long i = 0;
+
+ if (offset + length >= 50)
+ return 0;
+
+ while (length > 0) {
+ u32 tlen = length > 20 ? 20 : length;
+
+ length -= tlen;
+
+ do {
+ sum1 += buffer[offset + i];
+ sum2 += sum1;
+ i++;
+ } while (--tlen > 0);
+
+ sum1 = (sum1 & 0xFF) + (sum1 >> 8);
+ sum2 = (sum2 & 0xFF) + (sum2 >> 8);
+ }
+
+ sum1 = (sum1 & 0xFF) + (sum1 >> 8);
+ sum2 = (sum2 & 0xFF) + (sum2 >> 8);
+
+ return(sum2 << 8 | sum1);
+}
+
+static int t4_read_write_register(struct hid_device *hdev, u32 address,
+ u8 *read_val, u8 write_val, bool read_flag)
+{
+ int ret;
+ u16 check_sum;
+ u8 *input;
+ u8 *readbuf;
+
+ input = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ input[0] = T4_FEATURE_REPORT_ID;
+ if (read_flag) {
+ input[1] = T4_CMD_REGISTER_READ;
+ input[8] = 0x00;
+ } else {
+ input[1] = T4_CMD_REGISTER_WRITE;
+ input[8] = write_val;
+ }
+ put_unaligned_le32(address, input + 2);
+ input[6] = 1;
+ input[7] = 0;
+
+ /* Calculate the checksum */
+ check_sum = t4_calc_check_sum(input, 1, 8);
+ input[9] = (u8)check_sum;
+ input[10] = (u8)(check_sum >> 8);
+ input[11] = 0;
+
+ ret = hid_hw_raw_request(hdev, T4_FEATURE_REPORT_ID, input,
+ T4_FEATURE_REPORT_LEN,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to read command (%d)\n", ret);
+ goto exit;
+ }
+
+ readbuf = kzalloc(T4_FEATURE_REPORT_LEN, GFP_KERNEL);
+ if (read_flag) {
+ if (!readbuf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = hid_hw_raw_request(hdev, T4_FEATURE_REPORT_ID, readbuf,
+ T4_FEATURE_REPORT_LEN,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed read register (%d)\n", ret);
+ goto exit_readbuf;
+ }
+
+ if (*(u32 *)&readbuf[6] != address) {
+ dev_err(&hdev->dev, "read register address error (%x,%x)\n",
+ *(u32 *)&readbuf[6], address);
+ goto exit_readbuf;
+ }
+
+ if (*(u16 *)&readbuf[10] != 1) {
+ dev_err(&hdev->dev, "read register size error (%x)\n",
+ *(u16 *)&readbuf[10]);
+ goto exit_readbuf;
+ }
+
+ check_sum = t4_calc_check_sum(readbuf, 6, 7);
+ if (*(u16 *)&readbuf[13] != check_sum) {
+ dev_err(&hdev->dev, "read register checksum error (%x,%x)\n",
+ *(u16 *)&readbuf[13], check_sum);
+ goto exit_readbuf;
+ }
+
+ *read_val = readbuf[12];
+ }
+
+ ret = 0;
+
+exit_readbuf:
+ kfree(readbuf);
+exit:
+ kfree(input);
+ return ret;
+}
+
static int u1_read_write_register(struct hid_device *hdev, u32 address,
u8 *read_val, u8 write_val, bool read_flag)
{
@@ -165,21 +317,60 @@ exit:
return ret;
}
-static int alps_raw_event(struct hid_device *hdev,
- struct hid_report *report, u8 *data, int size)
+static int t4_raw_event(struct alps_dev *hdata, u8 *data, int size)
+{
+ unsigned int x, y, z;
+ int i;
+ struct t4_input_report *p_report = (struct t4_input_report *)data;
+
+ if (!data)
+ return 0;
+ for (i = 0; i < hdata->max_fingers; i++) {
+ x = p_report->contact[i].x_hi << 8 | p_report->contact[i].x_lo;
+ y = p_report->contact[i].y_hi << 8 | p_report->contact[i].y_lo;
+ y = hdata->y_max - y + hdata->y_min;
+ z = (p_report->contact[i].palm < 0x80 &&
+ p_report->contact[i].palm > 0) * 62;
+ if (x == 0xffff) {
+ x = 0;
+ y = 0;
+ z = 0;
+ }
+ input_mt_slot(hdata->input, i);
+
+ input_mt_report_slot_state(hdata->input,
+ MT_TOOL_FINGER, z != 0);
+
+ if (!z)
+ continue;
+
+ input_report_abs(hdata->input, ABS_MT_POSITION_X, x);
+ input_report_abs(hdata->input, ABS_MT_POSITION_Y, y);
+ input_report_abs(hdata->input, ABS_MT_PRESSURE, z);
+ }
+ input_mt_sync_frame(hdata->input);
+
+ input_report_key(hdata->input, BTN_LEFT, p_report->button);
+
+ input_sync(hdata->input);
+ return 1;
+}
+
+static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
{
unsigned int x, y, z;
int i;
short sp_x, sp_y;
- struct u1_dev *hdata = hid_get_drvdata(hdev);
+ if (!data)
+ return 0;
switch (data[0]) {
case U1_MOUSE_REPORT_ID:
break;
case U1_FEATURE_REPORT_ID:
break;
case U1_ABSOLUTE_REPORT_ID:
- for (i = 0; i < MAX_TOUCHES; i++) {
+ for (i = 0; i < hdata->max_fingers; i++) {
u8 *contact = &data[i * 5];
x = get_unaligned_le16(contact + 3);
@@ -241,122 +432,253 @@ static int alps_raw_event(struct hid_device *hdev,
return 0;
}
-#ifdef CONFIG_PM
-static int alps_post_reset(struct hid_device *hdev)
+static int alps_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
{
- return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+ int ret = 0;
+ struct alps_dev *hdata = hid_get_drvdata(hdev);
+
+ switch (hdev->product) {
+ case HID_PRODUCT_ID_T4_BTNLESS:
+ ret = t4_raw_event(hdata, data, size);
+ break;
+ default:
+ ret = u1_raw_event(hdata, data, size);
+ break;
+ }
+ return ret;
}
-static int alps_post_resume(struct hid_device *hdev)
+static int __maybe_unused alps_post_reset(struct hid_device *hdev)
{
- return u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+ int ret = -1;
+ struct alps_dev *data = hid_get_drvdata(hdev);
+
+ switch (data->dev_type) {
+ case T4:
+ ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_1,
+ NULL, T4_I2C_ABS, false);
+ ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_4,
+ NULL, T4_FEEDCFG4_ADVANCED_ABS_ENABLE, false);
+ break;
+ case U1:
+ ret = u1_read_write_register(hdev,
+ ADDRESS_U1_DEV_CTRL_1, NULL,
+ U1_TP_ABS_MODE | U1_SP_ABS_MODE, false);
+ break;
+ default:
+ break;
+ }
+ return ret;
}
-#endif /* CONFIG_PM */
-static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+static int __maybe_unused alps_post_resume(struct hid_device *hdev)
{
- struct u1_dev *data = hid_get_drvdata(hdev);
- struct input_dev *input = hi->input, *input2;
- struct u1_dev devInfo;
- int ret;
- int res_x, res_y, i;
-
- data->input = input;
-
- hid_dbg(hdev, "Opening low level driver\n");
- ret = hid_hw_open(hdev);
- if (ret)
- return ret;
+ return alps_post_reset(hdev);
+}
- /* Allow incoming hid reports */
- hid_device_io_start(hdev);
+static int u1_init(struct hid_device *hdev, struct alps_dev *pri_data)
+{
+ int ret;
+ u8 tmp, dev_ctrl, sen_line_num_x, sen_line_num_y;
+ u8 pitch_x, pitch_y, resolution;
/* Device initialization */
ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- &devInfo.dev_ctrl, 0, true);
+ &dev_ctrl, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_DEV_CTRL_1 (%d)\n", ret);
goto exit;
}
- devInfo.dev_ctrl &= ~U1_DISABLE_DEV;
- devInfo.dev_ctrl |= U1_TP_ABS_MODE;
+ dev_ctrl &= ~U1_DISABLE_DEV;
+ dev_ctrl |= U1_TP_ABS_MODE;
ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, devInfo.dev_ctrl, false);
+ NULL, dev_ctrl, false);
if (ret < 0) {
dev_err(&hdev->dev, "failed to change TP mode (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_X,
- &devInfo.sen_line_num_x, 0, true);
+ &sen_line_num_x, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_NUM_SENS_X (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_NUM_SENS_Y,
- &devInfo.sen_line_num_y, 0, true);
+ &sen_line_num_y, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_NUM_SENS_Y (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_X,
- &devInfo.pitch_x, 0, true);
+ &pitch_x, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_PITCH_SENS_X (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_PITCH_SENS_Y,
- &devInfo.pitch_y, 0, true);
+ &pitch_y, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_PITCH_SENS_Y (%d)\n", ret);
goto exit;
}
ret = u1_read_write_register(hdev, ADDRESS_U1_RESO_DWN_ABS,
- &devInfo.resolution, 0, true);
+ &resolution, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_RESO_DWN_ABS (%d)\n", ret);
goto exit;
}
+ pri_data->x_active_len_mm =
+ (pitch_x * (sen_line_num_x - 1)) / 10;
+ pri_data->y_active_len_mm =
+ (pitch_y * (sen_line_num_y - 1)) / 10;
+
+ pri_data->x_max =
+ (resolution << 2) * (sen_line_num_x - 1);
+ pri_data->x_min = 1;
+ pri_data->y_max =
+ (resolution << 2) * (sen_line_num_y - 1);
+ pri_data->y_min = 1;
ret = u1_read_write_register(hdev, ADDRESS_U1_PAD_BTN,
- &devInfo.btn_info, 0, true);
+ &tmp, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_PAD_BTN (%d)\n", ret);
goto exit;
}
+ if ((tmp & 0x0F) == (tmp & 0xF0) >> 4) {
+ pri_data->btn_cnt = (tmp & 0x0F);
+ } else {
+ /* Button pad */
+ pri_data->btn_cnt = 1;
+ }
+ pri_data->has_sp = 0;
/* Check StickPointer device */
ret = u1_read_write_register(hdev, ADDRESS_U1_DEVICE_TYP,
- &devInfo.dev_type, 0, true);
+ &tmp, 0, true);
if (ret < 0) {
dev_err(&hdev->dev, "failed U1_DEVICE_TYP (%d)\n", ret);
goto exit;
}
+ if (tmp & U1_DEVTYPE_SP_SUPPORT) {
+ dev_ctrl |= U1_SP_ABS_MODE;
+ ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
+ NULL, dev_ctrl, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed SP mode (%d)\n", ret);
+ goto exit;
+ }
- devInfo.x_active_len_mm =
- (devInfo.pitch_x * (devInfo.sen_line_num_x - 1)) / 10;
- devInfo.y_active_len_mm =
- (devInfo.pitch_y * (devInfo.sen_line_num_y - 1)) / 10;
+ ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN,
+ &pri_data->sp_btn_info, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret);
+ goto exit;
+ }
+ pri_data->has_sp = 1;
+ }
+ pri_data->max_fingers = 5;
+exit:
+ return ret;
+}
- devInfo.x_max =
- (devInfo.resolution << 2) * (devInfo.sen_line_num_x - 1);
- devInfo.y_max =
- (devInfo.resolution << 2) * (devInfo.sen_line_num_y - 1);
+static int T4_init(struct hid_device *hdev, struct alps_dev *pri_data)
+{
+ int ret;
+ u8 tmp, sen_line_num_x, sen_line_num_y;
+
+ ret = t4_read_write_register(hdev, T4_PRM_ID_CONFIG_3, &tmp, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed T4_PRM_ID_CONFIG_3 (%d)\n", ret);
+ goto exit;
+ }
+ sen_line_num_x = 16 + ((tmp & 0x0F) | (tmp & 0x08 ? 0xF0 : 0));
+ sen_line_num_y = 12 + (((tmp & 0xF0) >> 4) | (tmp & 0x80 ? 0xF0 : 0));
+
+ pri_data->x_max = sen_line_num_x * T4_COUNT_PER_ELECTRODE;
+ pri_data->x_min = T4_COUNT_PER_ELECTRODE;
+ pri_data->y_max = sen_line_num_y * T4_COUNT_PER_ELECTRODE;
+ pri_data->y_min = T4_COUNT_PER_ELECTRODE;
+ pri_data->x_active_len_mm = pri_data->y_active_len_mm = 0;
+ pri_data->btn_cnt = 1;
+
+ ret = t4_read_write_register(hdev, PRM_SYS_CONFIG_1, &tmp, 0, true);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed PRM_SYS_CONFIG_1 (%d)\n", ret);
+ goto exit;
+ }
+ tmp |= 0x02;
+ ret = t4_read_write_register(hdev, PRM_SYS_CONFIG_1, NULL, tmp, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed PRM_SYS_CONFIG_1 (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_1,
+ NULL, T4_I2C_ABS, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_1 (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = t4_read_write_register(hdev, T4_PRM_FEED_CONFIG_4, NULL,
+ T4_FEEDCFG4_ADVANCED_ABS_ENABLE, false);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed T4_PRM_FEED_CONFIG_4 (%d)\n", ret);
+ goto exit;
+ }
+ pri_data->max_fingers = 5;
+ pri_data->has_sp = 0;
+exit:
+ return ret;
+}
+
+static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+ struct alps_dev *data = hid_get_drvdata(hdev);
+ struct input_dev *input = hi->input, *input2;
+ int ret;
+ int res_x, res_y, i;
+
+ data->input = input;
+
+ hid_dbg(hdev, "Opening low level driver\n");
+ ret = hid_hw_open(hdev);
+ if (ret)
+ return ret;
+
+ /* Allow incoming hid reports */
+ hid_device_io_start(hdev);
+ switch (data->dev_type) {
+ case T4:
+ ret = T4_init(hdev, data);
+ break;
+ case U1:
+ ret = u1_init(hdev, data);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ goto exit;
__set_bit(EV_ABS, input->evbit);
- input_set_abs_params(input, ABS_MT_POSITION_X, 1, devInfo.x_max, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 1, devInfo.y_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ data->x_min, data->x_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ data->y_min, data->y_max, 0, 0);
- if (devInfo.x_active_len_mm && devInfo.y_active_len_mm) {
- res_x = (devInfo.x_max - 1) / devInfo.x_active_len_mm;
- res_y = (devInfo.y_max - 1) / devInfo.y_active_len_mm;
+ if (data->x_active_len_mm && data->y_active_len_mm) {
+ res_x = (data->x_max - 1) / data->x_active_len_mm;
+ res_y = (data->y_max - 1) / data->y_active_len_mm;
input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
@@ -364,49 +686,25 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 64, 0, 0);
- input_mt_init_slots(input, MAX_TOUCHES, INPUT_MT_POINTER);
+ input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER);
__set_bit(EV_KEY, input->evbit);
- if ((devInfo.btn_info & 0x0F) == (devInfo.btn_info & 0xF0) >> 4) {
- devInfo.btn_cnt = (devInfo.btn_info & 0x0F);
- } else {
- /* Button pad */
- devInfo.btn_cnt = 1;
+
+ if (data->btn_cnt == 1)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- }
- for (i = 0; i < devInfo.btn_cnt; i++)
+ for (i = 0; i < data->btn_cnt; i++)
__set_bit(BTN_LEFT + i, input->keybit);
-
/* Stick device initialization */
- if (devInfo.dev_type & U1_DEVTYPE_SP_SUPPORT) {
-
+ if (data->has_sp) {
input2 = input_allocate_device();
if (!input2) {
- ret = -ENOMEM;
- goto exit;
- }
-
- data->input2 = input2;
-
- devInfo.dev_ctrl |= U1_SP_ABS_MODE;
- ret = u1_read_write_register(hdev, ADDRESS_U1_DEV_CTRL_1,
- NULL, devInfo.dev_ctrl, false);
- if (ret < 0) {
- dev_err(&hdev->dev, "failed SP mode (%d)\n", ret);
- input_free_device(input2);
- goto exit;
- }
-
- ret = u1_read_write_register(hdev, ADDRESS_U1_SP_BTN,
- &devInfo.sp_btn_info, 0, true);
- if (ret < 0) {
- dev_err(&hdev->dev, "failed U1_SP_BTN (%d)\n", ret);
input_free_device(input2);
goto exit;
}
+ data->input2 = input2;
input2->phys = input->phys;
input2->name = "DualPoint Stick";
input2->id.bustype = BUS_I2C;
@@ -416,8 +714,8 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
input2->dev.parent = input->dev.parent;
__set_bit(EV_KEY, input2->evbit);
- devInfo.sp_btn_cnt = (devInfo.sp_btn_info & 0x0F);
- for (i = 0; i < devInfo.sp_btn_cnt; i++)
+ data->sp_btn_cnt = (data->sp_btn_info & 0x0F);
+ for (i = 0; i < data->sp_btn_cnt; i++)
__set_bit(BTN_LEFT + i, input2->keybit);
__set_bit(EV_REL, input2->evbit);
@@ -426,8 +724,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
__set_bit(INPUT_PROP_POINTER, input2->propbit);
__set_bit(INPUT_PROP_POINTING_STICK, input2->propbit);
- ret = input_register_device(data->input2);
- if (ret) {
+ if (input_register_device(data->input2)) {
input_free_device(input2);
goto exit;
}
@@ -448,10 +745,9 @@ static int alps_input_mapping(struct hid_device *hdev,
static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
- struct u1_dev *data = NULL;
+ struct alps_dev *data = NULL;
int ret;
-
- data = devm_kzalloc(&hdev->dev, sizeof(struct u1_dev), GFP_KERNEL);
+ data = devm_kzalloc(&hdev->dev, sizeof(struct alps_dev), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -466,6 +762,18 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
+ switch (hdev->product) {
+ case HID_DEVICE_ID_ALPS_T4_BTNLESS:
+ data->dev_type = T4;
+ break;
+ case HID_DEVICE_ID_ALPS_U1_DUAL:
+ case HID_DEVICE_ID_ALPS_U1:
+ data->dev_type = U1;
+ break;
+ default:
+ data->dev_type = UNKNOWN;
+ }
+
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
@@ -483,6 +791,10 @@ static void alps_remove(struct hid_device *hdev)
static const struct hid_device_id alps_id[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+ USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY,
+ USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
{ }
};
MODULE_DEVICE_TABLE(hid, alps_id);
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index 07cbc70f00e7..eae7d52cf1a8 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -173,9 +173,9 @@ static void battery_flat(struct appleir *appleir)
dev_err(&appleir->input_dev->dev, "possible flat battery?\n");
}
-static void key_up_tick(unsigned long data)
+static void key_up_tick(struct timer_list *t)
{
- struct appleir *appleir = (struct appleir *)data;
+ struct appleir *appleir = from_timer(appleir, t, key_up_timer);
struct hid_device *hid = appleir->hid;
unsigned long flags;
@@ -303,8 +303,7 @@ static int appleir_probe(struct hid_device *hid, const struct hid_device_id *id)
hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
spin_lock_init(&appleir->lock);
- setup_timer(&appleir->key_up_timer,
- key_up_tick, (unsigned long) appleir);
+ timer_setup(&appleir->key_up_timer, key_up_tick, 0);
hid_set_drvdata(hid, appleir);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 50c294be8324..1bb7b63b3150 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -67,6 +67,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_USE_KBD_BACKLIGHT BIT(5)
#define QUIRK_T100_KEYBOARD BIT(6)
#define QUIRK_T100CHI BIT(7)
+#define QUIRK_G752_KEYBOARD BIT(8)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -670,6 +671,11 @@ static void asus_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static const __u8 asus_g752_fixed_rdesc[] = {
+ 0x19, 0x00, /* Usage Minimum (0x00) */
+ 0x2A, 0xFF, 0x00, /* Usage Maximum (0xFF) */
+};
+
static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -708,6 +714,27 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[391] = 0xff;
rdesc[402] = 0x00;
}
+ if (drvdata->quirks & QUIRK_G752_KEYBOARD &&
+ *rsize == 75 && rdesc[61] == 0x15 && rdesc[62] == 0x00) {
+ /* report is missing usage mninum and maximum */
+ __u8 *new_rdesc;
+ size_t new_size = *rsize + sizeof(asus_g752_fixed_rdesc);
+
+ new_rdesc = devm_kzalloc(&hdev->dev, new_size, GFP_KERNEL);
+ if (new_rdesc == NULL)
+ return rdesc;
+
+ hid_info(hdev, "Fixing up Asus G752 keyb report descriptor\n");
+ /* copy the valid part */
+ memcpy(new_rdesc, rdesc, 61);
+ /* insert missing part */
+ memcpy(new_rdesc + 61, asus_g752_fixed_rdesc, sizeof(asus_g752_fixed_rdesc));
+ /* copy remaining data */
+ memcpy(new_rdesc + 61 + sizeof(asus_g752_fixed_rdesc), rdesc + 61, *rsize - 61);
+
+ *rsize = new_size;
+ rdesc = new_rdesc;
+ }
return rdesc;
}
@@ -718,10 +745,12 @@ static const struct hid_device_id asus_devices[] = {
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD), I2C_TOUCHPAD_QUIRKS },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
+ USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1), QUIRK_USE_KBD_BACKLIGHT },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 330ca983828b..f3fcb836a1f9 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1662,7 +1662,7 @@ static struct bin_attribute dev_bin_attr_report_desc = {
.size = HID_MAX_DESCRIPTOR_SIZE,
};
-static struct device_attribute dev_attr_country = {
+static const struct device_attribute dev_attr_country = {
.attr = { .name = "country", .mode = 0444 },
.show = show_country,
};
@@ -1889,6 +1889,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
#endif
#if IS_ENABLED(CONFIG_HID_ALPS)
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLE)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
@@ -1979,6 +1982,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
@@ -2329,6 +2333,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
@@ -3121,4 +3126,3 @@ MODULE_AUTHOR("Andreas Gal");
MODULE_AUTHOR("Vojtech Pavlik");
MODULE_AUTHOR("Jiri Kosina");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 078026f63b6f..68cdc962265b 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -21,7 +21,7 @@
* Data Sheet:
* http://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf
* Programming Interface Specification:
- * http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
+ * https://www.silabs.com/documents/public/application-notes/an495-cp2112-interface-specification.pdf
*/
#include <linux/gpio.h>
@@ -196,6 +196,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
HID_REQ_GET_REPORT);
if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
+ if (ret >= 0)
+ ret = -EIO;
goto exit;
}
@@ -205,8 +207,10 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
- if (ret < 0) {
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
+ if (ret >= 0)
+ ret = -EIO;
goto exit;
}
@@ -214,7 +218,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
exit:
mutex_unlock(&dev->lock);
- return ret < 0 ? ret : -EIO;
+ return ret;
}
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 6039f071fab1..3aa2bb9f0f81 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -313,7 +313,7 @@ static void mousevsc_on_receive(struct hv_device *device,
break;
default:
- pr_err("unsupported hid msg type - type %d len %d",
+ pr_err("unsupported hid msg type - type %d len %d\n",
hid_msg->header.type, hid_msg->header.size);
break;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index be2e005c3c51..5da3d6256d25 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -77,6 +77,9 @@
#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
#define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
+#define HID_DEVICE_ID_ALPS_U1 0x1215
+#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
+
#define USB_VENDOR_ID_AMI 0x046b
#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
@@ -182,6 +185,7 @@
#define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
+#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
#define USB_VENDOR_ID_ATEN 0x0557
#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -509,6 +513,9 @@
#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
+#define I2C_VENDOR_ID_HANTICK 0x0911
+#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
+
#define USB_VENDOR_ID_HANWANG 0x0b57
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
@@ -729,6 +736,9 @@
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
+#define USB_VENDOR_ID_MCS 0x16d0
+#define USB_DEVICE_ID_MCS_GAMEPADBLOCK 0x0bcc
+
#define USB_VENDOR_ID_MGE 0x0463
#define USB_DEVICE_ID_MGE_UPS 0xffff
#define USB_DEVICE_ID_MGE_UPS1 0x0001
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 199f6a01fc62..04d01b57d94c 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -797,6 +797,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
map_key_clear(BTN_STYLUS);
break;
+ case 0x45: /* ERASER */
+ /*
+ * This event is reported when eraser tip touches the surface.
+ * Actual eraser (BTN_TOOL_RUBBER) is set by Invert usage when
+ * tool gets in proximity.
+ */
+ map_key_clear(BTN_TOUCH);
+ break;
+
case 0x46: /* TabletPick */
case 0x5a: /* SecondaryBarrelSwitch */
map_key_clear(BTN_STYLUS2);
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 52026dc94d5c..596227ddb6e0 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -756,7 +756,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
/* Setup wireless link with Logitech Wii wheel */
if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
- const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ static const unsigned char cbuf[] = {
+ 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
if (!buf) {
diff --git a/drivers/hid/hid-lg.h b/drivers/hid/hid-lg.h
index 10dd8f024135..3d8902ba1c6c 100644
--- a/drivers/hid/hid-lg.h
+++ b/drivers/hid/hid-lg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __HID_LG_H
#define __HID_LG_H
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 1fc12e357035..512d67e1aae3 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -474,9 +474,7 @@ static int lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effec
static void lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
- struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
- s32 *value = report->field[0]->value;
+ s32 *value;
u32 expand_a, expand_b;
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
diff --git a/drivers/hid/hid-lg4ff.h b/drivers/hid/hid-lg4ff.h
index de1f350e0bd3..e5c55d515ac2 100644
--- a/drivers/hid/hid-lg4ff.h
+++ b/drivers/hid/hid-lg4ff.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __HID_LG4FF_H
#define __HID_LG4FF_H
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 614054af904a..19cc980eebce 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1957,7 +1957,8 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
/* initialize with zero autocenter to get wheel in usable state */
hidpp_ff_set_autocenter(dev, 0);
- hid_info(hid, "Force feeback support loaded (firmware release %d).\n", version);
+ hid_info(hid, "Force feedback support loaded (firmware release %d).\n",
+ version);
return 0;
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 20b40ad26325..42ed887ba0be 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -34,7 +34,8 @@ module_param(emulate_scroll_wheel, bool, 0644);
MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel");
static unsigned int scroll_speed = 32;
-static int param_set_scroll_speed(const char *val, struct kernel_param *kp) {
+static int param_set_scroll_speed(const char *val,
+ const struct kernel_param *kp) {
unsigned long speed;
if (!val || kstrtoul(val, 0, &speed) || speed > 63)
return -EINVAL;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 9e8c4d2ba11d..65ea23be9677 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -43,6 +43,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/input/mt.h>
+#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -112,6 +113,7 @@ struct mt_device {
struct mt_slot curdata; /* placeholder of incoming data */
struct mt_class mtclass; /* our mt device class */
struct timer_list release_timer; /* to release sticky fingers */
+ struct hid_device *hdev; /* hid_device we're attached to */
struct mt_fields *fields; /* temporary placeholder for storing the
multitouch fields */
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
@@ -136,6 +138,9 @@ struct mt_device {
bool serial_maybe; /* need to check for serial protocol */
bool curvalid; /* is the current contact valid? */
unsigned mt_flags; /* flags to pass to input-mt */
+ __s32 dev_time; /* the scan time provided by the device */
+ unsigned long jiffies; /* the frame's jiffies */
+ int timestamp; /* the timestamp to be sent */
};
static void mt_post_parse_default_settings(struct mt_device *td);
@@ -177,6 +182,12 @@ static void mt_post_parse(struct mt_device *td);
#define MT_DEFAULT_MAXCONTACT 10
#define MT_MAX_MAXCONTACT 250
+/*
+ * Resync device and local timestamps after that many microseconds without
+ * receiving data.
+ */
+#define MAX_TIMESTAMP_INTERVAL 1000000
+
#define MT_USB_DEVICE(v, p) HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH, v, p)
#define MT_BT_DEVICE(v, p) HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH, v, p)
@@ -583,6 +594,12 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
cls->sn_pressure);
mt_store_field(usage, td, hi);
return 1;
+ case HID_DG_SCANTIME:
+ hid_map_usage(hi, usage, bit, max,
+ EV_MSC, MSC_TIMESTAMP);
+ input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
+ mt_store_field(usage, td, hi);
+ return 1;
case HID_DG_CONTACTCOUNT:
/* Ignore if indexes are out of bounds. */
if (field->index >= field->report->maxfield ||
@@ -718,6 +735,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
{
input_mt_sync_frame(input);
+ input_event(input, EV_MSC, MSC_TIMESTAMP, td->timestamp);
input_sync(input);
td->num_received = 0;
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
@@ -727,6 +745,28 @@ static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
}
+static int mt_compute_timestamp(struct mt_device *td, struct hid_field *field,
+ __s32 value)
+{
+ long delta = value - td->dev_time;
+ unsigned long jdelta = jiffies_to_usecs(jiffies - td->jiffies);
+
+ td->jiffies = jiffies;
+ td->dev_time = value;
+
+ if (delta < 0)
+ delta += field->logical_maximum;
+
+ /* HID_DG_SCANTIME is expressed in 100us, we want it in us. */
+ delta *= 100;
+
+ if (jdelta > MAX_TIMESTAMP_INTERVAL)
+ /* No data received for a while, resync the timestamp. */
+ return 0;
+ else
+ return td->timestamp + delta;
+}
+
static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -787,6 +827,9 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
case HID_DG_HEIGHT:
td->curdata.h = value;
break;
+ case HID_DG_SCANTIME:
+ td->timestamp = mt_compute_timestamp(td, field, value);
+ break;
case HID_DG_CONTACTCOUNT:
break;
case HID_DG_TOUCH:
@@ -1246,10 +1289,10 @@ static void mt_release_contacts(struct hid_device *hid)
td->num_received = 0;
}
-static void mt_expired_timeout(unsigned long arg)
+static void mt_expired_timeout(struct timer_list *t)
{
- struct hid_device *hdev = (void *)arg;
- struct mt_device *td = hid_get_drvdata(hdev);
+ struct mt_device *td = from_timer(td, t, release_timer);
+ struct hid_device *hdev = td->hdev;
/*
* An input report came in just before we release the sticky fingers,
@@ -1280,6 +1323,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
return -ENOMEM;
}
+ td->hdev = hdev;
td->mtclass = *mtclass;
td->inputmode = -1;
td->maxcontact_report_id = -1;
@@ -1331,7 +1375,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
*/
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
- setup_timer(&td->release_timer, mt_expired_timeout, (long)hdev);
+ timer_setup(&td->release_timer, mt_expired_timeout, 0);
ret = hid_parse(hdev);
if (ret != 0)
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 49c4bd34b3c5..87eda34ea2f8 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -239,9 +239,9 @@ drop_note:
return;
}
-static void pcmidi_sustained_note_release(unsigned long data)
+static void pcmidi_sustained_note_release(struct timer_list *t)
{
- struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+ struct pcmidi_sustain *pms = from_timer(pms, t, timer);
pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
pms->in_use = 0;
@@ -256,8 +256,7 @@ static void init_sustain_timers(struct pcmidi_snd *pm)
pms = &pm->sustained_notes[i];
pms->in_use = 0;
pms->pm = pm;
- setup_timer(&pms->timer, pcmidi_sustained_note_release,
- (unsigned long)pms);
+ timer_setup(&pms->timer, pcmidi_sustained_note_release, 0);
}
}
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index ef241d66562e..0f43c4292685 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -368,6 +368,11 @@ static int rmi_check_sanity(struct hid_device *hdev, u8 *data, int size)
static int rmi_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data, int size)
{
+ struct rmi_data *hdata = hid_get_drvdata(hdev);
+
+ if (!(hdata->device_flags & RMI_DEVICE))
+ return 0;
+
size = rmi_check_sanity(hdev, data, size);
if (size < 2)
return 0;
@@ -713,9 +718,11 @@ static void rmi_remove(struct hid_device *hdev)
{
struct rmi_data *hdata = hid_get_drvdata(hdev);
- clear_bit(RMI_STARTED, &hdata->flags);
- cancel_work_sync(&hdata->reset_work);
- rmi_unregister_transport_device(&hdata->xport);
+ if (hdata->device_flags & RMI_DEVICE) {
+ clear_bit(RMI_STARTED, &hdata->flags);
+ cancel_work_sync(&hdata->reset_work);
+ rmi_unregister_transport_device(&hdata->xport);
+ }
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index d03203a82e8f..b9dc3ac4d4aa 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1439,10 +1439,16 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
goto out;
}
- ret = hid_hw_output_report(hdev, buf, 1);
- if (ret < 0) {
- hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
- ret = 0;
+ /*
+ * But the USB interrupt would cause SHANWAN controllers to
+ * start rumbling non-stop.
+ */
+ if (strcmp(hdev->name, "SHANWAN PS3 GamePad")) {
+ ret = hid_hw_output_report(hdev, buf, 1);
+ if (ret < 0) {
+ hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
+ ret = 0;
+ }
}
out:
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index b83376077d72..bea8def64f43 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -242,6 +242,8 @@ static const struct hid_device_id tm_devices[] = {
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
.driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605), /* NASCAR PRO FF2 Wheel */
+ .driver_data = (unsigned long)ff_joystick },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651), /* FGT Rumble Force Wheel */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653), /* RGT Force Feedback CLUTCH Raging Wheel */
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index d00391418d1a..579884ebd94d 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -1226,9 +1226,9 @@ static void wiimote_schedule(struct wiimote_data *wdata)
spin_unlock_irqrestore(&wdata->state.lock, flags);
}
-static void wiimote_init_timeout(unsigned long arg)
+static void wiimote_init_timeout(struct timer_list *t)
{
- struct wiimote_data *wdata = (void*)arg;
+ struct wiimote_data *wdata = from_timer(wdata, t, timer);
wiimote_schedule(wdata);
}
@@ -1740,7 +1740,7 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
wdata->state.cmd_battery = 0xff;
INIT_WORK(&wdata->init_worker, wiimote_init_worker);
- setup_timer(&wdata->timer, wiimote_init_timeout, (long)wdata);
+ timer_setup(&wdata->timer, wiimote_init_timeout, 0);
return wdata;
}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 9145c2129a96..e054ee43c1e2 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -46,6 +46,7 @@
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
+#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
/* flags */
#define I2C_HID_STARTED 0
@@ -168,6 +169,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ 0, 0 }
};
@@ -252,7 +255,9 @@ static int __i2c_hid_command(struct i2c_client *client,
ret = 0;
- if (wait) {
+ if (wait && (ihid->quirks & I2C_HID_QUIRK_NO_IRQ_AFTER_RESET)) {
+ msleep(100);
+ } else if (wait) {
i2c_hid_dbg(ihid, "%s: waiting...\n", __func__);
if (!wait_event_timeout(ihid->wait,
!test_bit(I2C_HID_RESET_PENDING, &ihid->flags),
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
index 8c08b0b358b1..825b70af672f 100644
--- a/drivers/hid/intel-ish-hid/Makefile
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile - Intel ISH HID drivers
# Copyright (c) 2014-2016, Intel Corporation.
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index 890f2914a8ff..0ff227d0c033 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the USB input drivers
#
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 045b5da9b992..640dfb937c69 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -101,10 +101,10 @@ static int hid_start_in(struct hid_device *hid)
}
/* I/O retry timer routine */
-static void hid_retry_timeout(unsigned long _hid)
+static void hid_retry_timeout(struct timer_list *t)
{
- struct hid_device *hid = (struct hid_device *) _hid;
- struct usbhid_device *usbhid = hid->driver_data;
+ struct usbhid_device *usbhid = from_timer(usbhid, t, io_retry);
+ struct hid_device *hid = usbhid->hid;
dev_dbg(&usbhid->intf->dev, "retrying intr urb\n");
if (hid_start_in(hid))
@@ -1373,7 +1373,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
init_waitqueue_head(&usbhid->wait);
INIT_WORK(&usbhid->reset_work, hid_reset);
- setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
+ timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
spin_lock_init(&usbhid->lock);
ret = hid_add_device(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index f489a5cfcb48..331f7f34ec14 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -170,6 +170,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK, HID_QUIRK_MULTI_INPUT },
{ 0, 0 }
};
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 906e654fb0ba..ee71ad9b6cc1 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -196,6 +196,13 @@ static void wacom_feature_mapping(struct hid_device *hdev,
kfree(data);
break;
}
+
+ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+ hdev->product == 0x4200 /* Dell Canvas 27 */ &&
+ field->application == HID_UP_MSVENDOR) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 2;
+ }
}
/*
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index aa692e28b2cd..16af6886e828 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2140,6 +2140,12 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
case HID_DG_TIPSWITCH:
wacom_wac->hid_data.tipswitch |= value;
return;
+ case HID_DG_BARRELSWITCH:
+ wacom_wac->hid_data.barrelswitch = value;
+ return;
+ case HID_DG_BARRELSWITCH2:
+ wacom_wac->hid_data.barrelswitch2 = value;
+ return;
case HID_DG_TOOLSERIALNUMBER:
if (value) {
wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
@@ -2217,11 +2223,11 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
if (!usage->type || delay_pen_events(wacom_wac))
return;
- /* send pen events only when the pen is in/entering/leaving proximity */
- if (!wacom_wac->hid_data.inrange_state && !wacom_wac->tool[0])
- return;
-
- input_event(input, usage->type, usage->code, value);
+ /* send pen events only when the pen is in range */
+ if (wacom_wac->hid_data.inrange_state)
+ input_event(input, usage->type, usage->code, value);
+ else if (wacom_wac->shared->stylus_in_proximity && !wacom_wac->hid_data.sense_state)
+ input_event(input, usage->type, usage->code, 0);
}
static void wacom_wac_pen_pre_report(struct hid_device *hdev,
@@ -2236,11 +2242,11 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct input_dev *input = wacom_wac->pen_input;
- bool prox = wacom_wac->hid_data.inrange_state;
- bool range = wacom_wac->hid_data.sense_state;
+ bool range = wacom_wac->hid_data.inrange_state;
+ bool sense = wacom_wac->hid_data.sense_state;
- if (!wacom_wac->tool[0] && prox) { /* first in prox */
- /* Going into proximity select tool */
+ if (!wacom_wac->tool[0] && range) { /* first in range */
+ /* Going into range select tool */
if (wacom_wac->hid_data.invert_state)
wacom_wac->tool[0] = BTN_TOOL_RUBBER;
else if (wacom_wac->id[0])
@@ -2250,10 +2256,16 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
}
/* keep pen state for touch events */
- wacom_wac->shared->stylus_in_proximity = range;
+ wacom_wac->shared->stylus_in_proximity = sense;
if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) {
int id = wacom_wac->id[0];
+ int sw_state = wacom_wac->hid_data.barrelswitch |
+ (wacom_wac->hid_data.barrelswitch2 << 1);
+
+ input_report_key(input, BTN_STYLUS, sw_state == 1);
+ input_report_key(input, BTN_STYLUS2, sw_state == 2);
+ input_report_key(input, BTN_STYLUS3, sw_state == 3);
/*
* Non-USI EMR tools should have their IDs mangled to
@@ -2269,10 +2281,10 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
*/
input_report_key(input, BTN_TOUCH,
wacom_wac->hid_data.tipswitch);
- input_report_key(input, wacom_wac->tool[0], prox);
+ input_report_key(input, wacom_wac->tool[0], sense);
if (wacom_wac->serial[0]) {
input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
- input_report_abs(input, ABS_MISC, prox ? id : 0);
+ input_report_abs(input, ABS_MISC, sense ? id : 0);
}
wacom_wac->hid_data.tipswitch = false;
@@ -2280,7 +2292,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
input_sync(input);
}
- if (!prox) {
+ if (!sense) {
wacom_wac->tool[0] = 0;
wacom_wac->id[0] = 0;
wacom_wac->serial[0] = 0;
@@ -3300,9 +3312,11 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
else
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
- if (features->type == HID_GENERIC)
- /* setup has already been done */
+ if (features->type == HID_GENERIC) {
+ /* setup has already been done; apply otherwise-undetectible quirks */
+ input_set_capability(input_dev, EV_KEY, BTN_STYLUS3);
return 0;
+ }
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(ABS_MISC, input_dev->absbit);
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 8a03654048bf..64d8f014602e 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -166,6 +166,7 @@
((f)->physical == HID_DG_PEN) || \
((f)->application == HID_DG_PEN) || \
((f)->application == HID_DG_DIGITIZER) || \
+ ((f)->application == WACOM_HID_WD_PEN) || \
((f)->application == WACOM_HID_WD_DIGITIZER) || \
((f)->application == WACOM_HID_G9_PEN) || \
((f)->application == WACOM_HID_G11_PEN))
@@ -291,6 +292,8 @@ struct hid_data {
bool inrange_state;
bool invert_state;
bool tipswitch;
+ bool barrelswitch;
+ bool barrelswitch2;
int x;
int y;
int pressure;
diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile
index 96944783d584..53c3bcdbf1b0 100644
--- a/drivers/hsi/Makefile
+++ b/drivers/hsi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for HSI
#
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
index 57f70c28fa38..cf9c2a332ad8 100644
--- a/drivers/hsi/clients/hsi_char.c
+++ b/drivers/hsi/clients/hsi_char.c
@@ -773,13 +773,13 @@ static int __init hsc_init(void)
if ((max_data_size < 4) || (max_data_size > 0x10000) ||
(max_data_size & (max_data_size - 1))) {
- pr_err("Invalid max read/write data size");
+ pr_err("Invalid max read/write data size\n");
return -EINVAL;
}
ret = hsi_register_client_driver(&hsc_driver);
if (ret) {
- pr_err("Error while registering HSI/SSI driver %d", ret);
+ pr_err("Error while registering HSI/SSI driver %d\n", ret);
return ret;
}
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 93d28c0ec8bf..9b167bc6eee4 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -464,10 +464,10 @@ static void ssip_error(struct hsi_client *cl)
hsi_async_read(cl, msg);
}
-static void ssip_keep_alive(unsigned long data)
+static void ssip_keep_alive(struct timer_list *t)
{
- struct hsi_client *cl = (struct hsi_client *)data;
- struct ssi_protocol *ssi = hsi_client_drvdata(cl);
+ struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive);
+ struct hsi_client *cl = ssi->cl;
dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n",
ssi->main_state, ssi->recv_state, ssi->send_state);
@@ -490,9 +490,19 @@ static void ssip_keep_alive(unsigned long data)
spin_unlock(&ssi->lock);
}
-static void ssip_wd(unsigned long data)
+static void ssip_rx_wd(struct timer_list *t)
+{
+ struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd);
+ struct hsi_client *cl = ssi->cl;
+
+ dev_err(&cl->device, "Watchdog trigerred\n");
+ ssip_error(cl);
+}
+
+static void ssip_tx_wd(struct timer_list *t)
{
- struct hsi_client *cl = (struct hsi_client *)data;
+ struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd);
+ struct hsi_client *cl = ssi->cl;
dev_err(&cl->device, "Watchdog trigerred\n");
ssip_error(cl);
@@ -1084,15 +1094,9 @@ static int ssi_protocol_probe(struct device *dev)
}
spin_lock_init(&ssi->lock);
- init_timer_deferrable(&ssi->rx_wd);
- init_timer_deferrable(&ssi->tx_wd);
- init_timer(&ssi->keep_alive);
- ssi->rx_wd.data = (unsigned long)cl;
- ssi->rx_wd.function = ssip_wd;
- ssi->tx_wd.data = (unsigned long)cl;
- ssi->tx_wd.function = ssip_wd;
- ssi->keep_alive.data = (unsigned long)cl;
- ssi->keep_alive.function = ssip_keep_alive;
+ timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE);
+ timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE);
+ timer_setup(&ssi->keep_alive, ssip_keep_alive, 0);
INIT_LIST_HEAD(&ssi->txqueue);
INIT_LIST_HEAD(&ssi->cmdqueue);
atomic_set(&ssi->tx_usecnt, 0);
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 88e48b346916..41a09f506803 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -334,7 +334,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event,
case POST_RATE_CHANGE:
dev_dbg(&ssi->device, "post rate change (%lu -> %lu)\n",
clk_data->old_rate, clk_data->new_rate);
- omap_ssi->fck_rate = DIV_ROUND_CLOSEST(clk_data->new_rate, 1000); /* KHz */
+ omap_ssi->fck_rate = DIV_ROUND_CLOSEST(clk_data->new_rate, 1000); /* kHz */
for (i = 0; i < ssi->num_ports; i++) {
omap_port = omap_ssi->port[i];
@@ -467,9 +467,9 @@ static int ssi_hw_init(struct hsi_controller *ssi)
}
/* Resetting GDD */
writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG);
- /* Get FCK rate in KHz */
+ /* Get FCK rate in kHz */
omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000);
- dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate);
+ dev_dbg(&ssi->device, "SSI fck rate %lu kHz\n", omap_ssi->fck_rate);
writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG);
omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON;
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index 39c9b2c08d33..14c22786b519 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -1,8 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
+CFLAGS_hv_trace.o = -I$(src)
+
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
- channel_mgmt.o ring_buffer.o
+ channel_mgmt.o ring_buffer.o hv_trace.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 894b67ac2cae..19f0cf37e0ed 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -43,6 +43,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
+ trace_vmbus_setevent(channel);
+
/*
* For channels marked as in "low latency" mode
* bypass the monitor page mechanism.
@@ -185,6 +187,8 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
ret = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
+ trace_vmbus_open(open_msg, ret);
+
if (ret != 0) {
err = ret;
goto error_clean_msglist;
@@ -234,13 +238,18 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
const uuid_le *shv_host_servie_id)
{
struct vmbus_channel_tl_connect_request conn_msg;
+ int ret;
memset(&conn_msg, 0, sizeof(conn_msg));
conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
conn_msg.host_service_id = *shv_host_servie_id;
- return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+ ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
+
+ trace_vmbus_send_tl_connect_request(&conn_msg, ret);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
@@ -433,6 +442,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo), true);
+
+ trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
+
if (ret != 0)
goto cleanup;
@@ -448,6 +460,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadl_body,
submsginfo->msgsize - sizeof(*submsginfo),
true);
+
+ trace_vmbus_establish_gpadl_body(gpadl_body, ret);
+
if (ret != 0)
goto cleanup;
@@ -511,6 +526,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
true);
+ trace_vmbus_teardown_gpadl(msg, ret);
+
if (ret)
goto post_msg_err;
@@ -589,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
true);
+ trace_vmbus_close_internal(msg, ret);
+
if (ret) {
pr_err("Close failed: close post msg return is %d\n", ret);
/*
@@ -745,6 +764,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
+ desc.reserved = 0;
desc.rangecount = pagecount;
for (i = 0; i < pagecount; i++) {
@@ -788,6 +808,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = requestid;
+ desc->reserved = 0;
desc->rangecount = 1;
bufferlist[0].iov_base = desc;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 379b0df123be..ec5454f3f4a6 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -350,7 +350,7 @@ static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
- kfree_rcu(channel, rcu);
+ kobject_put(&channel->kobj);
}
static void percpu_channel_enq(void *arg)
@@ -373,12 +373,15 @@ static void percpu_channel_deq(void *arg)
static void vmbus_release_relid(u32 relid)
{
struct vmbus_channel_relid_released msg;
+ int ret;
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
- vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
- true);
+ ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
+ true);
+
+ trace_vmbus_release_relid(&msg, ret);
}
void hv_process_channel_removal(u32 relid)
@@ -520,6 +523,14 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
newchannel->state = CHANNEL_OPEN_STATE;
if (!fnew) {
+ struct hv_device *dev
+ = newchannel->primary_channel->device_obj;
+
+ if (vmbus_add_channel_kobj(dev, newchannel)) {
+ atomic_dec(&vmbus_connection.offer_in_progress);
+ goto err_free_chan;
+ }
+
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
newchannel->probe_done = true;
@@ -805,6 +816,8 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer = (struct vmbus_channel_offer_channel *)hdr;
+ trace_vmbus_onoffer(offer);
+
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
@@ -846,6 +859,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
rescind = (struct vmbus_channel_rescind_offer *)hdr;
+ trace_vmbus_onoffer_rescind(rescind);
+
/*
* The offer msg and the corresponding rescind msg
* from the host are guranteed to be ordered -
@@ -974,6 +989,8 @@ static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
result = (struct vmbus_channel_open_result *)hdr;
+ trace_vmbus_onopen_result(result);
+
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
@@ -1018,6 +1035,8 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
+ trace_vmbus_ongpadl_created(gpadlcreated);
+
/*
* Find the establish msg, copy the result and signal/unblock the wait
* event
@@ -1066,6 +1085,8 @@ static void vmbus_ongpadl_torndown(
gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
+ trace_vmbus_ongpadl_torndown(gpadl_torndown);
+
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
@@ -1109,6 +1130,9 @@ static void vmbus_onversion_response(
unsigned long flags;
version_response = (struct vmbus_channel_version_response *)hdr;
+
+ trace_vmbus_onversion_response(version_response);
+
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
@@ -1168,6 +1192,8 @@ void vmbus_onmessage(void *context)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
size = msg->header.payload_size;
+ trace_vmbus_on_message(hdr);
+
if (hdr->msgtype >= CHANNELMSG_COUNT) {
pr_err("Received invalid channel message type %d size %d\n",
hdr->msgtype, size);
@@ -1201,9 +1227,11 @@ int vmbus_request_offers(void)
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
-
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
true);
+
+ trace_vmbus_request_offers(ret);
+
if (ret != 0) {
pr_err("Unable to request offers - %d\n", ret);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index f41901f80b64..447371f4de56 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -117,6 +117,9 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
ret = vmbus_post_msg(msg,
sizeof(struct vmbus_channel_initiate_contact),
true);
+
+ trace_vmbus_negotiate_version(msg, ret);
+
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
@@ -319,6 +322,8 @@ void vmbus_on_event(unsigned long data)
struct vmbus_channel *channel = (void *) data;
unsigned long time_limit = jiffies + 2;
+ trace_vmbus_on_event(channel);
+
do {
void (*callback_fn)(void *);
@@ -409,6 +414,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
if (!channel->is_dedicated_interrupt)
vmbus_send_interrupt(child_relid);
+ ++channel->sig_events;
+
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv_trace.c b/drivers/hv/hv_trace.c
new file mode 100644
index 000000000000..df47acd01a81
--- /dev/null
+++ b/drivers/hv/hv_trace.c
@@ -0,0 +1,4 @@
+#include "hyperv_vmbus.h"
+
+#define CREATE_TRACE_POINTS
+#include "hv_trace.h"
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
new file mode 100644
index 000000000000..d635ee95b20d
--- /dev/null
+++ b/drivers/hv/hv_trace.h
@@ -0,0 +1,327 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hyperv
+
+#if !defined(_HV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _HV_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(vmbus_hdr_msg,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr),
+ TP_STRUCT__entry(__field(unsigned int, msgtype)),
+ TP_fast_assign(__entry->msgtype = hdr->msgtype;),
+ TP_printk("msgtype=%u", __entry->msgtype)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_msg_dpc,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+DEFINE_EVENT(vmbus_hdr_msg, vmbus_on_message,
+ TP_PROTO(const struct vmbus_channel_message_header *hdr),
+ TP_ARGS(hdr)
+);
+
+TRACE_EVENT(vmbus_onoffer,
+ TP_PROTO(const struct vmbus_channel_offer_channel *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u8, monitorid)
+ __field(u16, is_ddc_int)
+ __field(u32, connection_id)
+ __array(char, if_type, 16)
+ __array(char, if_instance, 16)
+ __field(u16, chn_flags)
+ __field(u16, mmio_mb)
+ __field(u16, sub_idx)
+ ),
+ TP_fast_assign(__entry->child_relid = offer->child_relid;
+ __entry->monitorid = offer->monitorid;
+ __entry->is_ddc_int = offer->is_dedicated_interrupt;
+ __entry->connection_id = offer->connection_id;
+ memcpy(__entry->if_type,
+ &offer->offer.if_type.b, 16);
+ memcpy(__entry->if_instance,
+ &offer->offer.if_instance.b, 16);
+ __entry->chn_flags = offer->offer.chn_flags;
+ __entry->mmio_mb = offer->offer.mmio_megabytes;
+ __entry->sub_idx = offer->offer.sub_channel_index;
+ ),
+ TP_printk("child_relid 0x%x, monitorid 0x%x, is_dedicated %d, "
+ "connection_id 0x%x, if_type %pUl, if_instance %pUl, "
+ "chn_flags 0x%x, mmio_megabytes %d, sub_channel_index %d",
+ __entry->child_relid, __entry->monitorid,
+ __entry->is_ddc_int, __entry->connection_id,
+ __entry->if_type, __entry->if_instance,
+ __entry->chn_flags, __entry->mmio_mb,
+ __entry->sub_idx
+ )
+ );
+
+TRACE_EVENT(vmbus_onoffer_rescind,
+ TP_PROTO(const struct vmbus_channel_rescind_offer *offer),
+ TP_ARGS(offer),
+ TP_STRUCT__entry(__field(u32, child_relid)),
+ TP_fast_assign(__entry->child_relid = offer->child_relid),
+ TP_printk("child_relid 0x%x", __entry->child_relid)
+ );
+
+TRACE_EVENT(vmbus_onopen_result,
+ TP_PROTO(const struct vmbus_channel_open_result *result),
+ TP_ARGS(result),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = result->child_relid;
+ __entry->openid = result->openid;
+ __entry->status = result->status;
+ ),
+ TP_printk("child_relid 0x%x, openid %d, status %d",
+ __entry->child_relid, __entry->openid, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_created,
+ TP_PROTO(const struct vmbus_channel_gpadl_created *gpadlcreated),
+ TP_ARGS(gpadlcreated),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u32, status)
+ ),
+ TP_fast_assign(__entry->child_relid = gpadlcreated->child_relid;
+ __entry->gpadl = gpadlcreated->gpadl;
+ __entry->status = gpadlcreated->creation_status;
+ ),
+ TP_printk("child_relid 0x%x, gpadl 0x%x, creation_status %d",
+ __entry->child_relid, __entry->gpadl, __entry->status
+ )
+ );
+
+TRACE_EVENT(vmbus_ongpadl_torndown,
+ TP_PROTO(const struct vmbus_channel_gpadl_torndown *gpadltorndown),
+ TP_ARGS(gpadltorndown),
+ TP_STRUCT__entry(__field(u32, gpadl)),
+ TP_fast_assign(__entry->gpadl = gpadltorndown->gpadl),
+ TP_printk("gpadl 0x%x", __entry->gpadl)
+ );
+
+TRACE_EVENT(vmbus_onversion_response,
+ TP_PROTO(const struct vmbus_channel_version_response *response),
+ TP_ARGS(response),
+ TP_STRUCT__entry(
+ __field(u8, ver)
+ ),
+ TP_fast_assign(__entry->ver = response->version_supported;
+ ),
+ TP_printk("version_supported %d", __entry->ver)
+ );
+
+TRACE_EVENT(vmbus_request_offers,
+ TP_PROTO(int ret),
+ TP_ARGS(ret),
+ TP_STRUCT__entry(__field(int, ret)),
+ TP_fast_assign(__entry->ret = ret),
+ TP_printk("sending ret %d", __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_open,
+ TP_PROTO(const struct vmbus_channel_open_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, openid)
+ __field(u32, gpadlhandle)
+ __field(u32, target_vp)
+ __field(u32, offset)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->openid = msg->openid;
+ __entry->gpadlhandle = msg->ringbuffer_gpadlhandle;
+ __entry->target_vp = msg->target_vp;
+ __entry->offset = msg->downstream_ringbuffer_pageoffset;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, openid %d, "
+ "gpadlhandle 0x%x, target_vp 0x%x, offset 0x%x, ret %d",
+ __entry->child_relid, __entry->openid,
+ __entry->gpadlhandle, __entry->target_vp,
+ __entry->offset, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_close_internal,
+ TP_PROTO(const struct vmbus_channel_close_channel *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d", __entry->child_relid,
+ __entry->ret)
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_header,
+ TP_PROTO(const struct vmbus_channel_gpadl_header *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(u16, range_buflen)
+ __field(u16, rangecount)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->range_buflen = msg->range_buflen;
+ __entry->rangecount = msg->rangecount;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, range_buflen %d "
+ "rangecount %d, ret %d",
+ __entry->child_relid, __entry->gpadl,
+ __entry->range_buflen, __entry->rangecount, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_establish_gpadl_body,
+ TP_PROTO(const struct vmbus_channel_gpadl_body *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, msgnumber)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->msgnumber = msg->msgnumber;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending msgnumber %d, gpadl 0x%x, ret %d",
+ __entry->msgnumber, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_teardown_gpadl,
+ TP_PROTO(const struct vmbus_channel_gpadl_teardown *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(u32, gpadl)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->gpadl = msg->gpadl;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, gpadl 0x%x, ret %d",
+ __entry->child_relid, __entry->gpadl, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_negotiate_version,
+ TP_PROTO(const struct vmbus_channel_initiate_contact *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, ver)
+ __field(u32, target_vcpu)
+ __field(int, ret)
+ __field(u64, int_page)
+ __field(u64, mon_page1)
+ __field(u64, mon_page2)
+ ),
+ TP_fast_assign(
+ __entry->ver = msg->vmbus_version_requested;
+ __entry->target_vcpu = msg->target_vcpu;
+ __entry->int_page = msg->interrupt_page;
+ __entry->mon_page1 = msg->monitor_page1;
+ __entry->mon_page2 = msg->monitor_page2;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending vmbus_version_requested %d, target_vcpu 0x%x, "
+ "pages %llx:%llx:%llx, ret %d",
+ __entry->ver, __entry->target_vcpu, __entry->int_page,
+ __entry->mon_page1, __entry->mon_page2, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_release_relid,
+ TP_PROTO(const struct vmbus_channel_relid_released *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __field(u32, child_relid)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->child_relid = msg->child_relid;
+ __entry->ret = ret;
+ ),
+ TP_printk("sending child_relid 0x%x, ret %d",
+ __entry->child_relid, __entry->ret
+ )
+ );
+
+TRACE_EVENT(vmbus_send_tl_connect_request,
+ TP_PROTO(const struct vmbus_channel_tl_connect_request *msg,
+ int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __array(char, guest_id, 16)
+ __array(char, host_id, 16)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16);
+ memcpy(__entry->host_id, &msg->host_service_id.b, 16);
+ __entry->ret = ret;
+ ),
+ TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
+ "ret %d",
+ __entry->guest_id, __entry->host_id, __entry->ret
+ )
+ );
+
+DECLARE_EVENT_CLASS(vmbus_channel,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel),
+ TP_STRUCT__entry(__field(u32, relid)),
+ TP_fast_assign(__entry->relid = channel->offermsg.child_relid),
+ TP_printk("relid 0x%x", __entry->relid)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_chan_sched,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_setevent,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+DEFINE_EVENT(vmbus_channel, vmbus_on_event,
+ TP_PROTO(const struct vmbus_channel *channel),
+ TP_ARGS(channel)
+);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hv_trace
+#endif /* _HV_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 49569f8fe038..22300ec7b556 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -31,6 +31,8 @@
#include <linux/hyperv.h>
#include <linux/interrupt.h>
+#include "hv_trace.h"
+
/*
* Timeout for services such as KVP and fcopy.
*/
@@ -373,6 +375,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
int vmbus_device_register(struct hv_device *child_device_obj);
void vmbus_device_unregister(struct hv_device *device_obj);
+int vmbus_add_channel_kobj(struct hv_device *device_obj,
+ struct vmbus_channel *channel);
struct vmbus_channel *relid2channel(u32 relid);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 937801ac2fe0..76ed9a216f10 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -65,7 +65,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
regs = current_pt_regs();
- hyperv_report_panic(regs);
+ hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
@@ -75,7 +75,7 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
struct die_args *die = (struct die_args *)args;
struct pt_regs *regs = die->regs;
- hyperv_report_panic(regs);
+ hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
@@ -107,28 +107,30 @@ static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
}
-static u8 channel_monitor_group(struct vmbus_channel *channel)
+static u8 channel_monitor_group(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid / 32;
}
-static u8 channel_monitor_offset(struct vmbus_channel *channel)
+static u8 channel_monitor_offset(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid % 32;
}
-static u32 channel_pending(struct vmbus_channel *channel,
- struct hv_monitor_page *monitor_page)
+static u32 channel_pending(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
+
return monitor_page->trigger_group[monitor_group].pending;
}
-static u32 channel_latency(struct vmbus_channel *channel,
- struct hv_monitor_page *monitor_page)
+static u32 channel_latency(const struct vmbus_channel *channel,
+ const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
u8 monitor_offset = channel_monitor_offset(channel);
+
return monitor_page->latency[monitor_group][monitor_offset];
}
@@ -833,6 +835,8 @@ void vmbus_on_msg_dpc(unsigned long data)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ trace_vmbus_on_msg_dpc(hdr);
+
if (hdr->msgtype >= CHANNELMSG_COUNT) {
WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
goto msg_handled;
@@ -942,6 +946,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (channel->rescind)
continue;
+ trace_vmbus_chan_sched(channel);
+
+ ++channel->interrupts;
+
switch (channel->callback_mode) {
case HV_CALL_ISR:
vmbus_channel_isr(channel);
@@ -1133,6 +1141,159 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
+
+/*
+ * Called when last reference to channel is gone.
+ */
+static void vmbus_chan_release(struct kobject *kobj)
+{
+ struct vmbus_channel *channel
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ kfree_rcu(channel, rcu);
+}
+
+struct vmbus_chan_attribute {
+ struct attribute attr;
+ ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
+ ssize_t (*store)(struct vmbus_channel *chan,
+ const char *buf, size_t count);
+};
+#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
+ struct vmbus_chan_attribute chan_attr_##_name \
+ = __ATTR(_name, _mode, _show, _store)
+#define VMBUS_CHAN_ATTR_RW(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
+#define VMBUS_CHAN_ATTR_RO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
+#define VMBUS_CHAN_ATTR_WO(_name) \
+ struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
+
+static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ const struct vmbus_chan_attribute *attribute
+ = container_of(attr, struct vmbus_chan_attribute, attr);
+ const struct vmbus_channel *chan
+ = container_of(kobj, struct vmbus_channel, kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(chan, buf);
+}
+
+static const struct sysfs_ops vmbus_chan_sysfs_ops = {
+ .show = vmbus_chan_attr_show,
+};
+
+static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->outbound;
+
+ return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+}
+VMBUS_CHAN_ATTR_RO(out_mask);
+
+static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+}
+VMBUS_CHAN_ATTR_RO(in_mask);
+
+static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+ return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
+}
+VMBUS_CHAN_ATTR_RO(read_avail);
+
+static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
+{
+ const struct hv_ring_buffer_info *rbi = &channel->outbound;
+
+ return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
+}
+VMBUS_CHAN_ATTR_RO(write_avail);
+
+static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%u\n", channel->target_cpu);
+}
+VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
+
+static ssize_t channel_pending_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_pending(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
+
+static ssize_t channel_latency_show(const struct vmbus_channel *channel,
+ char *buf)
+{
+ return sprintf(buf, "%d\n",
+ channel_latency(channel,
+ vmbus_connection.monitor_pages[1]));
+}
+VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
+
+static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->interrupts);
+}
+VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
+
+static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
+{
+ return sprintf(buf, "%llu\n", channel->sig_events);
+}
+VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
+
+static struct attribute *vmbus_chan_attrs[] = {
+ &chan_attr_out_mask.attr,
+ &chan_attr_in_mask.attr,
+ &chan_attr_read_avail.attr,
+ &chan_attr_write_avail.attr,
+ &chan_attr_cpu.attr,
+ &chan_attr_pending.attr,
+ &chan_attr_latency.attr,
+ &chan_attr_interrupts.attr,
+ &chan_attr_events.attr,
+ NULL
+};
+
+static struct kobj_type vmbus_chan_ktype = {
+ .sysfs_ops = &vmbus_chan_sysfs_ops,
+ .release = vmbus_chan_release,
+ .default_attrs = vmbus_chan_attrs,
+};
+
+/*
+ * vmbus_add_channel_kobj - setup a sub-directory under device/channels
+ */
+int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+{
+ struct kobject *kobj = &channel->kobj;
+ u32 relid = channel->offermsg.child_relid;
+ int ret;
+
+ kobj->kset = dev->channels_kset;
+ ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
+ "%u", relid);
+ if (ret)
+ return ret;
+
+ kobject_uevent(kobj, KOBJ_ADD);
+
+ return 0;
+}
+
/*
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
@@ -1164,7 +1325,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
*/
int vmbus_device_register(struct hv_device *child_device_obj)
{
- int ret = 0;
+ struct kobject *kobj = &child_device_obj->device.kobj;
+ int ret;
dev_set_name(&child_device_obj->device, "%pUl",
child_device_obj->channel->offermsg.offer.if_instance.b);
@@ -1178,13 +1340,32 @@ int vmbus_device_register(struct hv_device *child_device_obj)
* binding...which will eventually call vmbus_match() and vmbus_probe()
*/
ret = device_register(&child_device_obj->device);
-
- if (ret)
+ if (ret) {
pr_err("Unable to register child device\n");
- else
- pr_debug("child device %s registered\n",
- dev_name(&child_device_obj->device));
+ return ret;
+ }
+
+ child_device_obj->channels_kset = kset_create_and_add("channels",
+ NULL, kobj);
+ if (!child_device_obj->channels_kset) {
+ ret = -ENOMEM;
+ goto err_dev_unregister;
+ }
+
+ ret = vmbus_add_channel_kobj(child_device_obj,
+ child_device_obj->channel);
+ if (ret) {
+ pr_err("Unable to register primary channeln");
+ goto err_kset_unregister;
+ }
+
+ return 0;
+
+err_kset_unregister:
+ kset_unregister(child_device_obj->channels_kset);
+err_dev_unregister:
+ device_unregister(&child_device_obj->device);
return ret;
}
@@ -1534,7 +1715,7 @@ static int __init hv_acpi_init(void)
{
int ret, t;
- if (x86_hyper != &x86_hyper_ms_hyperv)
+ if (x86_hyper_type != X86_HYPER_MS_HYPERV)
return -ENODEV;
init_completion(&probe_event);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index d65431417b17..7ad017690e3a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -552,6 +552,7 @@ config SENSORS_G762
config SENSORS_GPIO_FAN
tristate "GPIO fan"
+ depends on OF_GPIO
depends on GPIOLIB || COMPILE_TEST
depends on THERMAL || THERMAL=n
help
@@ -862,6 +863,20 @@ tristate "MAX31722 temperature sensor"
This driver can also be built as a module. If so, the module
will be called max31722.
+config SENSORS_MAX6621
+ tristate "Maxim MAX6621 sensor chip"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for MAX6621 sensor chip.
+ MAX6621 is a PECI-to-I2C translator provides an efficient,
+ low-cost solution for PECI-to-SMBus/I2C protocol conversion.
+ It allows reading the temperature from the PECI-compliant
+ host directly from up to four PECI-enabled CPUs.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6621.
+
config SENSORS_MAX6639
tristate "Maxim MAX6639 sensor chip"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index c84d9784be98..0fe489fab663 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for sensor chip drivers.
#
@@ -117,6 +118,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
diff --git a/drivers/hwmon/adt7x10.h b/drivers/hwmon/adt7x10.h
index d491c698529e..21ad15ce3163 100644
--- a/drivers/hwmon/adt7x10.h
+++ b/drivers/hwmon/adt7x10.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __HWMON_ADT7X10_H__
#define __HWMON_ADT7X10_H__
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 4875e99b59c9..6d34c05a4f83 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -579,7 +579,6 @@ static ssize_t show_pwm_enable(struct device *dev,
mutex_unlock(&data->update_lock);
val = config | (altbit << 3);
- newval = 0;
if (val == 3 || val >= 10)
newval = 255;
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 69b97d45e3cb..63a95e23ca81 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -7,19 +7,19 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
-#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/sysfs.h>
#include <linux/regmap.h>
+#include <linux/sysfs.h>
#include <linux/thermal.h>
/* ASPEED PWM & FAN Tach Register Definition */
@@ -161,7 +161,7 @@
* 11: reserved.
*/
#define M_TACH_MODE 0x02 /* 10b */
-#define M_TACH_UNIT 0x00c0
+#define M_TACH_UNIT 0x0210
#define INIT_FAN_CTRL 0xFF
/* How long we sleep in us while waiting for an RPM result. */
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 9c355b9d31c5..5c9a52599cf6 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -29,21 +29,24 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/hwmon.h>
-#include <linux/gpio.h>
-#include <linux/gpio-fan.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/thermal.h>
+struct gpio_fan_speed {
+ int rpm;
+ int ctrl_val;
+};
+
struct gpio_fan_data {
- struct platform_device *pdev;
+ struct device *dev;
struct device *hwmon_dev;
/* Cooling device if any */
struct thermal_cooling_device *cdev;
struct mutex lock; /* lock GPIOs operations. */
- int num_ctrl;
- unsigned *ctrl;
+ int num_gpios;
+ struct gpio_desc **gpios;
int num_speed;
struct gpio_fan_speed *speed;
int speed_index;
@@ -51,7 +54,7 @@ struct gpio_fan_data {
int resume_speed;
#endif
bool pwm_enable;
- struct gpio_fan_alarm *alarm;
+ struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
};
@@ -64,8 +67,8 @@ static void fan_alarm_notify(struct work_struct *ws)
struct gpio_fan_data *fan_data =
container_of(ws, struct gpio_fan_data, alarm_work);
- sysfs_notify(&fan_data->pdev->dev.kobj, NULL, "fan1_alarm");
- kobject_uevent(&fan_data->pdev->dev.kobj, KOBJ_CHANGE);
+ sysfs_notify(&fan_data->dev->kobj, NULL, "fan1_alarm");
+ kobject_uevent(&fan_data->dev->kobj, KOBJ_CHANGE);
}
static irqreturn_t fan_alarm_irq_handler(int irq, void *dev_id)
@@ -81,47 +84,30 @@ static ssize_t fan1_alarm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- struct gpio_fan_alarm *alarm = fan_data->alarm;
- int value = gpio_get_value_cansleep(alarm->gpio);
- if (alarm->active_low)
- value = !value;
-
- return sprintf(buf, "%d\n", value);
+ return sprintf(buf, "%d\n",
+ gpiod_get_value_cansleep(fan_data->alarm_gpio));
}
static DEVICE_ATTR_RO(fan1_alarm);
-static int fan_alarm_init(struct gpio_fan_data *fan_data,
- struct gpio_fan_alarm *alarm)
+static int fan_alarm_init(struct gpio_fan_data *fan_data)
{
- int err;
int alarm_irq;
- struct platform_device *pdev = fan_data->pdev;
-
- fan_data->alarm = alarm;
-
- err = devm_gpio_request(&pdev->dev, alarm->gpio, "GPIO fan alarm");
- if (err)
- return err;
-
- err = gpio_direction_input(alarm->gpio);
- if (err)
- return err;
+ struct device *dev = fan_data->dev;
/*
* If the alarm GPIO don't support interrupts, just leave
* without initializing the fail notification support.
*/
- alarm_irq = gpio_to_irq(alarm->gpio);
- if (alarm_irq < 0)
+ alarm_irq = gpiod_to_irq(fan_data->alarm_gpio);
+ if (alarm_irq <= 0)
return 0;
INIT_WORK(&fan_data->alarm_work, fan_alarm_notify);
irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH);
- err = devm_request_irq(&pdev->dev, alarm_irq, fan_alarm_irq_handler,
- IRQF_SHARED, "GPIO fan alarm", fan_data);
- return err;
+ return devm_request_irq(dev, alarm_irq, fan_alarm_irq_handler,
+ IRQF_SHARED, "GPIO fan alarm", fan_data);
}
/*
@@ -133,8 +119,9 @@ static void __set_fan_ctrl(struct gpio_fan_data *fan_data, int ctrl_val)
{
int i;
- for (i = 0; i < fan_data->num_ctrl; i++)
- gpio_set_value_cansleep(fan_data->ctrl[i], (ctrl_val >> i) & 1);
+ for (i = 0; i < fan_data->num_gpios; i++)
+ gpiod_set_value_cansleep(fan_data->gpios[i],
+ (ctrl_val >> i) & 1);
}
static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
@@ -142,10 +129,10 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
int i;
int ctrl_val = 0;
- for (i = 0; i < fan_data->num_ctrl; i++) {
+ for (i = 0; i < fan_data->num_gpios; i++) {
int value;
- value = gpio_get_value_cansleep(fan_data->ctrl[i]);
+ value = gpiod_get_value_cansleep(fan_data->gpios[i]);
ctrl_val |= (value << i);
}
return ctrl_val;
@@ -170,7 +157,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
if (fan_data->speed[i].ctrl_val == ctrl_val)
return i;
- dev_warn(&fan_data->pdev->dev,
+ dev_warn(fan_data->dev,
"missing speed array entry for GPIO value 0x%x\n", ctrl_val);
return -ENODEV;
@@ -328,9 +315,9 @@ static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct gpio_fan_data *data = dev_get_drvdata(dev);
- if (index == 0 && !data->alarm)
+ if (index == 0 && !data->alarm_gpio)
return 0;
- if (index > 0 && !data->ctrl)
+ if (index > 0 && !data->gpios)
return 0;
return attr->mode;
@@ -358,30 +345,25 @@ static const struct attribute_group *gpio_fan_groups[] = {
NULL
};
-static int fan_ctrl_init(struct gpio_fan_data *fan_data,
- struct gpio_fan_platform_data *pdata)
+static int fan_ctrl_init(struct gpio_fan_data *fan_data)
{
- struct platform_device *pdev = fan_data->pdev;
- int num_ctrl = pdata->num_ctrl;
- unsigned *ctrl = pdata->ctrl;
+ int num_gpios = fan_data->num_gpios;
+ struct gpio_desc **gpios = fan_data->gpios;
int i, err;
- for (i = 0; i < num_ctrl; i++) {
- err = devm_gpio_request(&pdev->dev, ctrl[i],
- "GPIO fan control");
- if (err)
- return err;
-
- err = gpio_direction_output(ctrl[i],
- gpio_get_value_cansleep(ctrl[i]));
+ for (i = 0; i < num_gpios; i++) {
+ /*
+ * The GPIO descriptors were retrieved with GPIOD_ASIS so here
+ * we set the GPIO into output mode, carefully preserving the
+ * current value by setting it to whatever it is already set
+ * (no surprise changes in default fan speed).
+ */
+ err = gpiod_direction_output(gpios[i],
+ gpiod_get_value_cansleep(gpios[i]));
if (err)
return err;
}
- fan_data->num_ctrl = num_ctrl;
- fan_data->ctrl = ctrl;
- fan_data->num_speed = pdata->num_speed;
- fan_data->speed = pdata->speed;
fan_data->pwm_enable = true; /* Enable manual fan speed control. */
fan_data->speed_index = get_fan_speed_index(fan_data);
if (fan_data->speed_index < 0)
@@ -432,67 +414,47 @@ static const struct thermal_cooling_device_ops gpio_fan_cool_ops = {
.set_cur_state = gpio_fan_set_cur_state,
};
-#ifdef CONFIG_OF_GPIO
/*
* Translate OpenFirmware node properties into platform_data
*/
-static int gpio_fan_get_of_pdata(struct device *dev,
- struct gpio_fan_platform_data *pdata)
+static int gpio_fan_get_of_data(struct gpio_fan_data *fan_data)
{
- struct device_node *node;
struct gpio_fan_speed *speed;
- unsigned *ctrl;
+ struct device *dev = fan_data->dev;
+ struct device_node *np = dev->of_node;
+ struct gpio_desc **gpios;
unsigned i;
u32 u;
struct property *prop;
const __be32 *p;
- node = dev->of_node;
-
/* Alarm GPIO if one exists */
- if (of_gpio_named_count(node, "alarm-gpios") > 0) {
- struct gpio_fan_alarm *alarm;
- int val;
- enum of_gpio_flags flags;
-
- alarm = devm_kzalloc(dev, sizeof(struct gpio_fan_alarm),
- GFP_KERNEL);
- if (!alarm)
- return -ENOMEM;
-
- val = of_get_named_gpio_flags(node, "alarm-gpios", 0, &flags);
- if (val < 0)
- return val;
- alarm->gpio = val;
- alarm->active_low = flags & OF_GPIO_ACTIVE_LOW;
-
- pdata->alarm = alarm;
- }
+ fan_data->alarm_gpio = devm_gpiod_get_optional(dev, "alarm", GPIOD_IN);
+ if (IS_ERR(fan_data->alarm_gpio))
+ return PTR_ERR(fan_data->alarm_gpio);
/* Fill GPIO pin array */
- pdata->num_ctrl = of_gpio_count(node);
- if (pdata->num_ctrl <= 0) {
- if (pdata->alarm)
+ fan_data->num_gpios = gpiod_count(dev, NULL);
+ if (fan_data->num_gpios <= 0) {
+ if (fan_data->alarm_gpio)
return 0;
dev_err(dev, "DT properties empty / missing");
return -ENODEV;
}
- ctrl = devm_kzalloc(dev, pdata->num_ctrl * sizeof(unsigned),
- GFP_KERNEL);
- if (!ctrl)
+ gpios = devm_kzalloc(dev,
+ fan_data->num_gpios * sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!gpios)
return -ENOMEM;
- for (i = 0; i < pdata->num_ctrl; i++) {
- int val;
-
- val = of_get_gpio(node, i);
- if (val < 0)
- return val;
- ctrl[i] = val;
+ for (i = 0; i < fan_data->num_gpios; i++) {
+ gpios[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+ if (IS_ERR(gpios[i]))
+ return PTR_ERR(gpios[i]);
}
- pdata->ctrl = ctrl;
+ fan_data->gpios = gpios;
/* Get number of RPM/ctrl_val pairs in speed map */
- prop = of_find_property(node, "gpio-fan,speed-map", &i);
+ prop = of_find_property(np, "gpio-fan,speed-map", &i);
if (!prop) {
dev_err(dev, "gpio-fan,speed-map DT property missing");
return -ENODEV;
@@ -502,7 +464,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
dev_err(dev, "gpio-fan,speed-map contains zero/odd number of entries");
return -ENODEV;
}
- pdata->num_speed = i / 2;
+ fan_data->num_speed = i / 2;
/*
* Populate speed map
@@ -510,12 +472,12 @@ static int gpio_fan_get_of_pdata(struct device *dev,
* this needs splitting into pairs to create gpio_fan_speed structs
*/
speed = devm_kzalloc(dev,
- pdata->num_speed * sizeof(struct gpio_fan_speed),
+ fan_data->num_speed * sizeof(struct gpio_fan_speed),
GFP_KERNEL);
if (!speed)
return -ENOMEM;
p = NULL;
- for (i = 0; i < pdata->num_speed; i++) {
+ for (i = 0; i < fan_data->num_speed; i++) {
p = of_prop_next_u32(prop, p, &u);
if (!p)
return -ENODEV;
@@ -525,7 +487,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
return -ENODEV;
speed[i].ctrl_val = u;
}
- pdata->speed = speed;
+ fan_data->speed = speed;
return 0;
}
@@ -535,76 +497,58 @@ static const struct of_device_id of_gpio_fan_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
-#endif /* CONFIG_OF_GPIO */
static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
struct gpio_fan_data *fan_data;
- struct gpio_fan_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
- fan_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_fan_data),
+ fan_data = devm_kzalloc(dev, sizeof(struct gpio_fan_data),
GFP_KERNEL);
if (!fan_data)
return -ENOMEM;
-#ifdef CONFIG_OF_GPIO
- if (!pdata) {
- pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct gpio_fan_platform_data),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- err = gpio_fan_get_of_pdata(&pdev->dev, pdata);
- if (err)
- return err;
- }
-#else /* CONFIG_OF_GPIO */
- if (!pdata)
- return -EINVAL;
-#endif /* CONFIG_OF_GPIO */
+ fan_data->dev = dev;
+ err = gpio_fan_get_of_data(fan_data);
+ if (err)
+ return err;
- fan_data->pdev = pdev;
platform_set_drvdata(pdev, fan_data);
mutex_init(&fan_data->lock);
/* Configure alarm GPIO if available. */
- if (pdata->alarm) {
- err = fan_alarm_init(fan_data, pdata->alarm);
+ if (fan_data->alarm_gpio) {
+ err = fan_alarm_init(fan_data);
if (err)
return err;
}
/* Configure control GPIOs if available. */
- if (pdata->ctrl && pdata->num_ctrl > 0) {
- if (!pdata->speed || pdata->num_speed <= 1)
+ if (fan_data->gpios && fan_data->num_gpios > 0) {
+ if (!fan_data->speed || fan_data->num_speed <= 1)
return -EINVAL;
- err = fan_ctrl_init(fan_data, pdata);
+ err = fan_ctrl_init(fan_data);
if (err)
return err;
}
/* Make this driver part of hwmon class. */
fan_data->hwmon_dev =
- devm_hwmon_device_register_with_groups(&pdev->dev,
+ devm_hwmon_device_register_with_groups(dev,
"gpio_fan", fan_data,
gpio_fan_groups);
if (IS_ERR(fan_data->hwmon_dev))
return PTR_ERR(fan_data->hwmon_dev);
-#ifdef CONFIG_OF_GPIO
+
/* Optional cooling device register for Device tree platforms */
- fan_data->cdev = thermal_of_cooling_device_register(pdev->dev.of_node,
+ fan_data->cdev = thermal_of_cooling_device_register(np,
"gpio-fan",
fan_data,
&gpio_fan_cool_ops);
-#else /* CONFIG_OF_GPIO */
- /* Optional cooling device register for non Device tree platforms */
- fan_data->cdev = thermal_cooling_device_register("gpio-fan", fan_data,
- &gpio_fan_cool_ops);
-#endif /* CONFIG_OF_GPIO */
- dev_info(&pdev->dev, "GPIO fan initialized\n");
+ dev_info(dev, "GPIO fan initialized\n");
return 0;
}
@@ -616,7 +560,7 @@ static int gpio_fan_remove(struct platform_device *pdev)
if (!IS_ERR(fan_data->cdev))
thermal_cooling_device_unregister(fan_data->cdev);
- if (fan_data->ctrl)
+ if (fan_data->gpios)
set_fan_speed(fan_data, 0);
return 0;
@@ -632,7 +576,7 @@ static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->ctrl) {
+ if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
set_fan_speed(fan_data, 0);
}
@@ -644,7 +588,7 @@ static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->ctrl)
+ if (fan_data->gpios)
set_fan_speed(fan_data, fan_data->resume_speed);
return 0;
@@ -663,9 +607,7 @@ static struct platform_driver gpio_fan_driver = {
.driver = {
.name = "gpio-fan",
.pm = GPIO_FAN_PM,
-#ifdef CONFIG_OF_GPIO
.of_match_table = of_match_ptr(of_gpio_fan_match),
-#endif
},
};
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index ce3b91f22e30..0721e175664a 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -36,6 +36,10 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
/* Provide lock for writing to NB_SMU_IND_ADDR */
static DEFINE_MUTEX(nb_smu_ind_mutex);
+#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#endif
+
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -61,31 +65,72 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
*/
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
-static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
- int offset, u32 *val)
+/* F17h M01h Access througn SMN */
+#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
+
+struct k10temp_data {
+ struct pci_dev *pdev;
+ void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
+ int temp_offset;
+};
+
+struct tctl_offset {
+ u8 model;
+ char const *id;
+ int offset;
+};
+
+static const struct tctl_offset tctl_offset_table[] = {
+ { 0x17, "AMD Ryzen 5 1600X", 20000 },
+ { 0x17, "AMD Ryzen 7 1700X", 20000 },
+ { 0x17, "AMD Ryzen 7 1800X", 20000 },
+ { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+};
+
+static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
+{
+ pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
+}
+
+static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
+ unsigned int base, int offset, u32 *val)
{
mutex_lock(&nb_smu_ind_mutex);
pci_bus_write_config_dword(pdev->bus, devfn,
- 0xb8, offset);
+ base, offset);
pci_bus_read_config_dword(pdev->bus, devfn,
- 0xbc, val);
+ base + 4, val);
mutex_unlock(&nb_smu_ind_mutex);
}
+static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
+ F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
+}
+
+static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
+}
+
static ssize_t temp1_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct k10temp_data *data = dev_get_drvdata(dev);
u32 regval;
- struct pci_dev *pdev = dev_get_drvdata(dev);
-
- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model == 0x60) {
- amd_nb_smu_index_read(pdev, PCI_DEVFN(0, 0),
- F15H_M60H_REPORTED_TEMP_CTRL_OFFSET,
- &regval);
- } else {
- pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, &regval);
- }
- return sprintf(buf, "%u\n", (regval >> 21) * 125);
+ unsigned int temp;
+
+ data->read_tempreg(data->pdev, &regval);
+ temp = (regval >> 21) * 125;
+ temp -= data->temp_offset;
+
+ return sprintf(buf, "%u\n", temp);
}
static ssize_t temp1_max_show(struct device *dev,
@@ -98,11 +143,12 @@ static ssize_t show_temp_crit(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct k10temp_data *data = dev_get_drvdata(dev);
int show_hyst = attr->index;
u32 regval;
int value;
- pci_read_config_dword(dev_get_drvdata(dev),
+ pci_read_config_dword(data->pdev,
REG_HARDWARE_THERMAL_CONTROL, &regval);
value = ((regval >> 16) & 0x7f) * 500 + 52000;
if (show_hyst)
@@ -119,7 +165,8 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct pci_dev *pdev = dev_get_drvdata(dev);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ struct pci_dev *pdev = data->pdev;
if (index >= 2) {
u32 reg_caps, reg_htc;
@@ -187,7 +234,9 @@ static int k10temp_probe(struct pci_dev *pdev,
{
int unreliable = has_erratum_319(pdev);
struct device *dev = &pdev->dev;
+ struct k10temp_data *data;
struct device *hwmon_dev;
+ int i;
if (unreliable) {
if (!force) {
@@ -199,7 +248,31 @@ static int k10temp_probe(struct pci_dev *pdev,
"unreliable CPU thermal sensor; check erratum 319\n");
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", pdev,
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+
+ if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
+ boot_cpu_data.x86_model == 0x70))
+ data->read_tempreg = read_tempreg_nb_f15;
+ else if (boot_cpu_data.x86 == 0x17)
+ data->read_tempreg = read_tempreg_nb_f17;
+ else
+ data->read_tempreg = read_tempreg_pci;
+
+ for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
+ const struct tctl_offset *entry = &tctl_offset_table[i];
+
+ if (boot_cpu_data.x86 == entry->model &&
+ strstr(boot_cpu_data.x86_model_id, entry->id)) {
+ data->temp_offset = entry->offset;
+ break;
+ }
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
k10temp_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -214,6 +287,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index a18278938494..76d966932941 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -303,10 +303,20 @@ static const struct i2c_device_id max1619_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max1619_id);
+#ifdef CONFIG_OF
+static const struct of_device_id max1619_of_match[] = {
+ { .compatible = "maxim,max1619", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, max1619_of_match);
+#endif
+
static struct i2c_driver max1619_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max1619",
+ .of_match_table = of_match_ptr(max1619_of_match),
},
.probe = max1619_probe,
.id_table = max1619_id,
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
new file mode 100644
index 000000000000..35555f0eefb9
--- /dev/null
+++ b/drivers/hwmon/max6621.c
@@ -0,0 +1,593 @@
+/*
+ * Hardware monitoring driver for Maxim MAX6621
+ *
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define MAX6621_DRV_NAME "max6621"
+#define MAX6621_TEMP_INPUT_REG_NUM 9
+#define MAX6621_TEMP_INPUT_MIN -127000
+#define MAX6621_TEMP_INPUT_MAX 128000
+#define MAX6621_TEMP_ALERT_CHAN_SHIFT 1
+
+#define MAX6621_TEMP_S0D0_REG 0x00
+#define MAX6621_TEMP_S0D1_REG 0x01
+#define MAX6621_TEMP_S1D0_REG 0x02
+#define MAX6621_TEMP_S1D1_REG 0x03
+#define MAX6621_TEMP_S2D0_REG 0x04
+#define MAX6621_TEMP_S2D1_REG 0x05
+#define MAX6621_TEMP_S3D0_REG 0x06
+#define MAX6621_TEMP_S3D1_REG 0x07
+#define MAX6621_TEMP_MAX_REG 0x08
+#define MAX6621_TEMP_MAX_ADDR_REG 0x0a
+#define MAX6621_TEMP_ALERT_CAUSE_REG 0x0b
+#define MAX6621_CONFIG0_REG 0x0c
+#define MAX6621_CONFIG1_REG 0x0d
+#define MAX6621_CONFIG2_REG 0x0e
+#define MAX6621_CONFIG3_REG 0x0f
+#define MAX6621_TEMP_S0_ALERT_REG 0x10
+#define MAX6621_TEMP_S1_ALERT_REG 0x11
+#define MAX6621_TEMP_S2_ALERT_REG 0x12
+#define MAX6621_TEMP_S3_ALERT_REG 0x13
+#define MAX6621_CLEAR_ALERT_REG 0x15
+#define MAX6621_REG_MAX (MAX6621_CLEAR_ALERT_REG + 1)
+#define MAX6621_REG_TEMP_SHIFT 0x06
+
+#define MAX6621_ENABLE_TEMP_ALERTS_BIT 4
+#define MAX6621_ENABLE_I2C_CRC_BIT 5
+#define MAX6621_ENABLE_ALTERNATE_DATA 6
+#define MAX6621_ENABLE_LOCKUP_TO 7
+#define MAX6621_ENABLE_S0D0_BIT 8
+#define MAX6621_ENABLE_S3D1_BIT 15
+#define MAX6621_ENABLE_TEMP_ALL GENMASK(MAX6621_ENABLE_S3D1_BIT, \
+ MAX6621_ENABLE_S0D0_BIT)
+#define MAX6621_POLL_DELAY_MASK 0x5
+#define MAX6621_CONFIG0_INIT (MAX6621_ENABLE_TEMP_ALL | \
+ BIT(MAX6621_ENABLE_LOCKUP_TO) | \
+ BIT(MAX6621_ENABLE_I2C_CRC_BIT) | \
+ MAX6621_POLL_DELAY_MASK)
+#define MAX6621_PECI_BIT_TIME 0x2
+#define MAX6621_PECI_RETRY_NUM 0x3
+#define MAX6621_CONFIG1_INIT ((MAX6621_PECI_BIT_TIME << 8) | \
+ MAX6621_PECI_RETRY_NUM)
+
+/* Error codes */
+#define MAX6621_TRAN_FAILED 0x8100 /*
+ * PECI transaction failed for more
+ * than the configured number of
+ * consecutive retries.
+ */
+#define MAX6621_POOL_DIS 0x8101 /*
+ * Polling disabled for requested
+ * socket/domain.
+ */
+#define MAX6621_POOL_UNCOMPLETE 0x8102 /*
+ * First poll not yet completed for
+ * requested socket/domain (on
+ * startup).
+ */
+#define MAX6621_SD_DIS 0x8103 /*
+ * Read maximum temperature requested,
+ * but no sockets/domains enabled or
+ * all enabled sockets/domains have
+ * errors; or read maximum temperature
+ * address requested, but read maximum
+ * temperature was not called.
+ */
+#define MAX6621_ALERT_DIS 0x8104 /*
+ * Get alert socket/domain requested,
+ * but no alert active.
+ */
+#define MAX6621_PECI_ERR_MIN 0x8000 /* Intel spec PECI error min value. */
+#define MAX6621_PECI_ERR_MAX 0x80ff /* Intel spec PECI error max value. */
+
+static const u32 max6621_temp_regs[] = {
+ MAX6621_TEMP_MAX_REG, MAX6621_TEMP_S0D0_REG, MAX6621_TEMP_S1D0_REG,
+ MAX6621_TEMP_S2D0_REG, MAX6621_TEMP_S3D0_REG, MAX6621_TEMP_S0D1_REG,
+ MAX6621_TEMP_S1D1_REG, MAX6621_TEMP_S2D1_REG, MAX6621_TEMP_S3D1_REG,
+};
+
+static const char *const max6621_temp_labels[] = {
+ "maximum",
+ "socket0_0",
+ "socket1_0",
+ "socket2_0",
+ "socket3_0",
+ "socket0_1",
+ "socket1_1",
+ "socket2_1",
+ "socket3_1",
+};
+
+static const int max6621_temp_alert_chan2reg[] = {
+ MAX6621_TEMP_S0_ALERT_REG,
+ MAX6621_TEMP_S1_ALERT_REG,
+ MAX6621_TEMP_S2_ALERT_REG,
+ MAX6621_TEMP_S3_ALERT_REG,
+};
+
+/**
+ * struct max6621_data - private data:
+ *
+ * @client: I2C client;
+ * @regmap: register map handle;
+ * @input_chan2reg: mapping from channel to register;
+ */
+struct max6621_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ int input_chan2reg[MAX6621_TEMP_INPUT_REG_NUM + 1];
+};
+
+static long max6621_temp_mc2reg(long val)
+{
+ return (val / 1000L) << MAX6621_REG_TEMP_SHIFT;
+}
+
+static umode_t
+max6621_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ /* Skip channels which are not physically conncted. */
+ if (((struct max6621_data *)data)->input_chan2reg[channel] < 0)
+ return 0;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_offset:
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int max6621_verify_reg_data(struct device *dev, int regval)
+{
+ if (regval >= MAX6621_PECI_ERR_MIN &&
+ regval <= MAX6621_PECI_ERR_MAX) {
+ dev_dbg(dev, "PECI error code - err 0x%04x.\n",
+ regval);
+
+ return -EIO;
+ }
+
+ switch (regval) {
+ case MAX6621_TRAN_FAILED:
+ dev_dbg(dev, "PECI transaction failed - err 0x%04x.\n",
+ regval);
+ return -EIO;
+ case MAX6621_POOL_DIS:
+ dev_dbg(dev, "Polling disabled - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ case MAX6621_POOL_UNCOMPLETE:
+ dev_dbg(dev, "First poll not completed on startup - err 0x%04x.\n",
+ regval);
+ return -EIO;
+ case MAX6621_SD_DIS:
+ dev_dbg(dev, "Resource is disabled - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ case MAX6621_ALERT_DIS:
+ dev_dbg(dev, "No alert active - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ default:
+ return 0;
+ }
+}
+
+static int
+max6621_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct max6621_data *data = dev_get_drvdata(dev);
+ u32 regval;
+ int reg;
+ s8 temp;
+ int ret;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = data->input_chan2reg[channel];
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ /*
+ * Bit MAX6621_REG_TEMP_SHIFT represents 1 degree step.
+ * The temperature is given in two's complement and 8
+ * bits is used for the register conversion.
+ */
+ temp = (regval >> MAX6621_REG_TEMP_SHIFT);
+ *val = temp * 1000L;
+
+ break;
+ case hwmon_temp_offset:
+ ret = regmap_read(data->regmap, MAX6621_CONFIG2_REG,
+ &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ *val = (regval >> MAX6621_REG_TEMP_SHIFT) *
+ 1000L;
+
+ break;
+ case hwmon_temp_crit:
+ channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT;
+ reg = max6621_temp_alert_chan2reg[channel];
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ *val = regval * 1000L;
+
+ break;
+ case hwmon_temp_crit_alarm:
+ /*
+ * Set val to zero to recover the case, when reading
+ * MAX6621_TEMP_ALERT_CAUSE_REG results in for example
+ * MAX6621_ALERT_DIS. Reading will return with error,
+ * but in such case alarm should be returned as 0.
+ */
+ *val = 0;
+ ret = regmap_read(data->regmap,
+ MAX6621_TEMP_ALERT_CAUSE_REG,
+ &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret) {
+ /* Do not report error if alert is disabled. */
+ if (regval == MAX6621_ALERT_DIS)
+ return 0;
+ else
+ return ret;
+ }
+
+ /*
+ * Clear the alert automatically, using send-byte
+ * smbus protocol for clearing alert.
+ */
+ if (regval) {
+ ret = i2c_smbus_write_byte(data->client,
+ MAX6621_CLEAR_ALERT_REG);
+ if (ret)
+ return ret;
+ }
+
+ *val = !!regval;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+max6621_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct max6621_data *data = dev_get_drvdata(dev);
+ u32 reg;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_offset:
+ /* Clamp to allowed range to prevent overflow. */
+ val = clamp_val(val, MAX6621_TEMP_INPUT_MIN,
+ MAX6621_TEMP_INPUT_MAX);
+ val = max6621_temp_mc2reg(val);
+
+ return regmap_write(data->regmap,
+ MAX6621_CONFIG2_REG, val);
+ case hwmon_temp_crit:
+ channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT;
+ reg = max6621_temp_alert_chan2reg[channel];
+ /* Clamp to allowed range to prevent overflow. */
+ val = clamp_val(val, MAX6621_TEMP_INPUT_MIN,
+ MAX6621_TEMP_INPUT_MAX);
+ val = val / 1000L;
+
+ return regmap_write(data->regmap, reg, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+max6621_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = max6621_temp_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool max6621_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_CONFIG0_REG:
+ case MAX6621_CONFIG1_REG:
+ case MAX6621_CONFIG2_REG:
+ case MAX6621_CONFIG3_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ case MAX6621_TEMP_ALERT_CAUSE_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool max6621_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_TEMP_S0D0_REG:
+ case MAX6621_TEMP_S0D1_REG:
+ case MAX6621_TEMP_S1D0_REG:
+ case MAX6621_TEMP_S1D1_REG:
+ case MAX6621_TEMP_S2D0_REG:
+ case MAX6621_TEMP_S2D1_REG:
+ case MAX6621_TEMP_S3D0_REG:
+ case MAX6621_TEMP_S3D1_REG:
+ case MAX6621_TEMP_MAX_REG:
+ case MAX6621_TEMP_MAX_ADDR_REG:
+ case MAX6621_CONFIG0_REG:
+ case MAX6621_CONFIG1_REG:
+ case MAX6621_CONFIG2_REG:
+ case MAX6621_CONFIG3_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool max6621_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_TEMP_S0D0_REG:
+ case MAX6621_TEMP_S0D1_REG:
+ case MAX6621_TEMP_S1D0_REG:
+ case MAX6621_TEMP_S1D1_REG:
+ case MAX6621_TEMP_S2D0_REG:
+ case MAX6621_TEMP_S2D1_REG:
+ case MAX6621_TEMP_S3D0_REG:
+ case MAX6621_TEMP_S3D1_REG:
+ case MAX6621_TEMP_MAX_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ case MAX6621_TEMP_ALERT_CAUSE_REG:
+ return true;
+ }
+ return false;
+}
+
+static const struct reg_default max6621_regmap_default[] = {
+ { MAX6621_CONFIG0_REG, MAX6621_CONFIG0_INIT },
+ { MAX6621_CONFIG1_REG, MAX6621_CONFIG1_INIT },
+};
+
+static const struct regmap_config max6621_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = MAX6621_REG_MAX,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = max6621_writeable_reg,
+ .readable_reg = max6621_readable_reg,
+ .volatile_reg = max6621_volatile_reg,
+ .reg_defaults = max6621_regmap_default,
+ .num_reg_defaults = ARRAY_SIZE(max6621_regmap_default),
+};
+
+static u32 max6621_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info max6621_chip = {
+ .type = hwmon_chip,
+ .config = max6621_chip_config,
+};
+
+static const u32 max6621_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ 0
+};
+
+static const struct hwmon_channel_info max6621_temp = {
+ .type = hwmon_temp,
+ .config = max6621_temp_config,
+};
+
+static const struct hwmon_channel_info *max6621_info[] = {
+ &max6621_chip,
+ &max6621_temp,
+ NULL
+};
+
+static const struct hwmon_ops max6621_hwmon_ops = {
+ .read = max6621_read,
+ .write = max6621_write,
+ .read_string = max6621_read_string,
+ .is_visible = max6621_is_visible,
+};
+
+static const struct hwmon_chip_info max6621_chip_info = {
+ .ops = &max6621_hwmon_ops,
+ .info = max6621_info,
+};
+
+static int max6621_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct max6621_data *data;
+ struct device *hwmon_dev;
+ int i;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = devm_regmap_init_i2c(client, &max6621_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ i2c_set_clientdata(client, data);
+ data->client = client;
+
+ /* Set CONFIG0 register masking temperature alerts and PEC. */
+ ret = regmap_write(data->regmap, MAX6621_CONFIG0_REG,
+ MAX6621_CONFIG0_INIT);
+ if (ret)
+ return ret;
+
+ /* Set CONFIG1 register for PEC access retry number. */
+ ret = regmap_write(data->regmap, MAX6621_CONFIG1_REG,
+ MAX6621_CONFIG1_INIT);
+ if (ret)
+ return ret;
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(data->regmap);
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ return ret;
+
+ /* Verify which temperature input registers are enabled. */
+ for (i = 0; i < MAX6621_TEMP_INPUT_REG_NUM; i++) {
+ ret = i2c_smbus_read_word_data(client, max6621_temp_regs[i]);
+ if (ret < 0)
+ return ret;
+ ret = max6621_verify_reg_data(dev, ret);
+ if (ret) {
+ data->input_chan2reg[i] = -1;
+ continue;
+ }
+
+ data->input_chan2reg[i] = max6621_temp_regs[i];
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max6621_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max6621_id[] = {
+ { MAX6621_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6621_id);
+
+static const struct of_device_id max6621_of_match[] = {
+ { .compatible = "maxim,max6621" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max6621_of_match);
+
+static struct i2c_driver max6621_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = MAX6621_DRV_NAME,
+ .of_match_table = of_match_ptr(max6621_of_match),
+ },
+ .probe = max6621_probe,
+ .id_table = max6621_id,
+};
+
+module_i2c_driver(max6621_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Driver for Maxim MAX6621");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 40019325b517..08479006c7f9 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -114,6 +114,16 @@ config SENSORS_MAX20751
This driver can also be built as a module. If so, the module will
be called max20751.
+config SENSORS_MAX31785
+ tristate "Maxim MAX31785 and compatibles"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX31785.
+
+ This driver can also be built as a module. If so, the module will
+ be called max31785.
+
config SENSORS_MAX34440
tristate "Maxim MAX34440 and compatibles"
default n
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 459a6be3390e..ea0e39518c21 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for PMBus chip drivers.
#
@@ -12,6 +13,7 @@ obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
+obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
new file mode 100644
index 000000000000..9313849d5160
--- /dev/null
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum max31785_regs {
+ MFR_REVISION = 0x9b,
+};
+
+#define MAX31785_NR_PAGES 23
+
+#define MAX31785_FAN_FUNCS \
+ (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12)
+
+#define MAX31785_TEMP_FUNCS \
+ (PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
+
+#define MAX31785_VOUT_FUNCS \
+ (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
+
+static const struct pmbus_driver_info max31785_info = {
+ .pages = MAX31785_NR_PAGES,
+
+ /* RPM */
+ .format[PSC_FAN] = direct,
+ .m[PSC_FAN] = 1,
+ .b[PSC_FAN] = 0,
+ .R[PSC_FAN] = 0,
+ .func[0] = MAX31785_FAN_FUNCS,
+ .func[1] = MAX31785_FAN_FUNCS,
+ .func[2] = MAX31785_FAN_FUNCS,
+ .func[3] = MAX31785_FAN_FUNCS,
+ .func[4] = MAX31785_FAN_FUNCS,
+ .func[5] = MAX31785_FAN_FUNCS,
+
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[6] = MAX31785_TEMP_FUNCS,
+ .func[7] = MAX31785_TEMP_FUNCS,
+ .func[8] = MAX31785_TEMP_FUNCS,
+ .func[9] = MAX31785_TEMP_FUNCS,
+ .func[10] = MAX31785_TEMP_FUNCS,
+ .func[11] = MAX31785_TEMP_FUNCS,
+ .func[12] = MAX31785_TEMP_FUNCS,
+ .func[13] = MAX31785_TEMP_FUNCS,
+ .func[14] = MAX31785_TEMP_FUNCS,
+ .func[15] = MAX31785_TEMP_FUNCS,
+ .func[16] = MAX31785_TEMP_FUNCS,
+
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .func[17] = MAX31785_VOUT_FUNCS,
+ .func[18] = MAX31785_VOUT_FUNCS,
+ .func[19] = MAX31785_VOUT_FUNCS,
+ .func[20] = MAX31785_VOUT_FUNCS,
+ .func[21] = MAX31785_VOUT_FUNCS,
+ .func[22] = MAX31785_VOUT_FUNCS,
+};
+
+static int max31785_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ s64 ret;
+
+ info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ *info = max31785_info;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 255);
+ if (ret < 0)
+ return ret;
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id max31785_id[] = {
+ { "max31785", 0 },
+ { "max31785a", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max31785_id);
+
+static struct i2c_driver max31785_driver = {
+ .driver = {
+ .name = "max31785",
+ },
+ .probe = max31785_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max31785_id,
+};
+
+module_i2c_driver(max31785_driver);
+
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("PMBus driver for the Maxim MAX31785");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 4efa2bd4f6d8..fa613bd209e3 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -404,9 +404,9 @@ extern const struct regulator_ops pmbus_regulator_ops;
/* Function declarations */
void pmbus_clear_cache(struct i2c_client *client);
-int pmbus_set_page(struct i2c_client *client, u8 page);
-int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
-int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
+int pmbus_set_page(struct i2c_client *client, int page);
+int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg);
+int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word);
int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg,
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 302f0aef59de..52a58b8b6e1b 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -136,13 +136,13 @@ void pmbus_clear_cache(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_clear_cache);
-int pmbus_set_page(struct i2c_client *client, u8 page)
+int pmbus_set_page(struct i2c_client *client, int page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
int rv = 0;
int newpage;
- if (page != data->currpage) {
+ if (page >= 0 && page != data->currpage) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
if (newpage != page)
@@ -158,11 +158,9 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
int rv;
- if (page >= 0) {
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
- }
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
return i2c_smbus_write_byte(client, value);
}
@@ -186,7 +184,8 @@ static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
return pmbus_write_byte(client, page, value);
}
-int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
+int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
+ u16 word)
{
int rv;
@@ -219,7 +218,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
return pmbus_write_word_data(client, page, reg, word);
}
-int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
+int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
@@ -255,11 +254,9 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
- if (page >= 0) {
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
- }
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
return i2c_smbus_read_byte_data(client, reg);
}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index e4d642b673c6..25d28343ba93 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -18,13 +18,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/mutex.h>
-#include <linux/platform_data/sht15.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -34,7 +32,8 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/bitrev.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
/* Commands */
#define SHT15_MEASURE_TEMP 0x03
@@ -122,7 +121,8 @@ static const u8 sht15_crc8_table[] = {
/**
* struct sht15_data - device instance specific data
- * @pdata: platform data (gpio's etc).
+ * @sck: clock GPIO line
+ * @data: data GPIO line
* @read_work: bh of interrupt handler.
* @wait_queue: wait queue for getting values from device.
* @val_temp: last temperature value read from device.
@@ -150,7 +150,8 @@ static const u8 sht15_crc8_table[] = {
* @interrupt_handled: flag used to indicate a handler has been scheduled.
*/
struct sht15_data {
- struct sht15_platform_data *pdata;
+ struct gpio_desc *sck;
+ struct gpio_desc *data;
struct work_struct read_work;
wait_queue_head_t wait_queue;
uint16_t val_temp;
@@ -205,16 +206,16 @@ static int sht15_connection_reset(struct sht15_data *data)
{
int i, err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
for (i = 0; i < 9; ++i) {
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
}
return 0;
@@ -227,11 +228,11 @@ static int sht15_connection_reset(struct sht15_data *data)
*/
static inline void sht15_send_bit(struct sht15_data *data, int val)
{
- gpio_set_value(data->pdata->gpio_data, val);
+ gpiod_set_value(data->data, val);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL); /* clock low time */
}
@@ -248,23 +249,23 @@ static int sht15_transmission_start(struct sht15_data *data)
int err;
/* ensure data is high and output */
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_data, 0);
+ gpiod_set_value(data->data, 0);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_data, 1);
+ gpiod_set_value(data->data, 1);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -292,20 +293,20 @@ static int sht15_wait_for_response(struct sht15_data *data)
{
int err;
- err = gpio_direction_input(data->pdata->gpio_data);
+ err = gpiod_direction_input(data->data);
if (err)
return err;
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- if (gpio_get_value(data->pdata->gpio_data)) {
- gpio_set_value(data->pdata->gpio_sck, 0);
+ if (gpiod_get_value(data->data)) {
+ gpiod_set_value(data->sck, 0);
dev_err(data->dev, "Command not acknowledged\n");
err = sht15_connection_reset(data);
if (err)
return err;
return -EIO;
}
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -360,17 +361,17 @@ static int sht15_ack(struct sht15_data *data)
{
int err;
- err = gpio_direction_output(data->pdata->gpio_data, 0);
+ err = gpiod_direction_output(data->data, 0);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_data, 1);
+ gpiod_set_value(data->data, 1);
- return gpio_direction_input(data->pdata->gpio_data);
+ return gpiod_direction_input(data->data);
}
/**
@@ -383,13 +384,13 @@ static int sht15_end_transmission(struct sht15_data *data)
{
int err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -405,10 +406,10 @@ static u8 sht15_read_byte(struct sht15_data *data)
for (i = 0; i < 8; ++i) {
byte <<= 1;
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- byte |= !!gpio_get_value(data->pdata->gpio_data);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ byte |= !!gpiod_get_value(data->data);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
}
return byte;
@@ -428,7 +429,7 @@ static int sht15_send_status(struct sht15_data *data, u8 status)
err = sht15_send_cmd(data, SHT15_WRITE_STATUS);
if (err)
return err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
@@ -528,14 +529,14 @@ static int sht15_measurement(struct sht15_data *data,
if (ret)
return ret;
- ret = gpio_direction_input(data->pdata->gpio_data);
+ ret = gpiod_direction_input(data->data);
if (ret)
return ret;
atomic_set(&data->interrupt_handled, 0);
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
- if (gpio_get_value(data->pdata->gpio_data) == 0) {
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ enable_irq(gpiod_to_irq(data->data));
+ if (gpiod_get_value(data->data) == 0) {
+ disable_irq_nosync(gpiod_to_irq(data->data));
/* Only relevant if the interrupt hasn't occurred. */
if (!atomic_read(&data->interrupt_handled))
schedule_work(&data->read_work);
@@ -547,7 +548,7 @@ static int sht15_measurement(struct sht15_data *data,
data->state = SHT15_READING_NOTHING;
return -EIO;
} else if (ret == 0) { /* timeout occurred */
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ disable_irq_nosync(gpiod_to_irq(data->data));
ret = sht15_connection_reset(data);
if (ret)
return ret;
@@ -826,15 +827,15 @@ static void sht15_bh_read_data(struct work_struct *work_s)
read_work);
/* Firstly, verify the line is low */
- if (gpio_get_value(data->pdata->gpio_data)) {
+ if (gpiod_get_value(data->data)) {
/*
* If not, then start the interrupt again - care here as could
* have gone low in meantime so verify it hasn't!
*/
atomic_set(&data->interrupt_handled, 0);
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ enable_irq(gpiod_to_irq(data->data));
/* If still not occurred or another handler was scheduled */
- if (gpio_get_value(data->pdata->gpio_data)
+ if (gpiod_get_value(data->data)
|| atomic_read(&data->interrupt_handled))
return;
}
@@ -918,53 +919,12 @@ static const struct of_device_id sht15_dt_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, sht15_dt_match);
-
-/*
- * This function returns NULL if pdev isn't a device instatiated by dt,
- * a pointer to pdata if it could successfully get all information
- * from dt or a negative ERR_PTR() on error.
- */
-static struct sht15_platform_data *sht15_probe_dt(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct sht15_platform_data *pdata;
-
- /* no device tree device */
- if (!np)
- return NULL;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->gpio_data = of_get_named_gpio(np, "data-gpios", 0);
- if (pdata->gpio_data < 0) {
- if (pdata->gpio_data != -EPROBE_DEFER)
- dev_err(dev, "data-gpios not found\n");
- return ERR_PTR(pdata->gpio_data);
- }
-
- pdata->gpio_sck = of_get_named_gpio(np, "clk-gpios", 0);
- if (pdata->gpio_sck < 0) {
- if (pdata->gpio_sck != -EPROBE_DEFER)
- dev_err(dev, "clk-gpios not found\n");
- return ERR_PTR(pdata->gpio_sck);
- }
-
- return pdata;
-}
-#else
-static inline struct sht15_platform_data *sht15_probe_dt(struct device *dev)
-{
- return NULL;
-}
#endif
static int sht15_probe(struct platform_device *pdev)
{
int ret;
struct sht15_data *data;
- u8 status = 0;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -977,25 +937,6 @@ static int sht15_probe(struct platform_device *pdev)
data->dev = &pdev->dev;
init_waitqueue_head(&data->wait_queue);
- data->pdata = sht15_probe_dt(&pdev->dev);
- if (IS_ERR(data->pdata))
- return PTR_ERR(data->pdata);
- if (data->pdata == NULL) {
- data->pdata = dev_get_platdata(&pdev->dev);
- if (data->pdata == NULL) {
- dev_err(&pdev->dev, "no platform data supplied\n");
- return -EINVAL;
- }
- }
-
- data->supply_uv = data->pdata->supply_mv * 1000;
- if (data->pdata->checksum)
- data->checksumming = true;
- if (data->pdata->no_otp_reload)
- status |= SHT15_STATUS_NO_OTP_RELOAD;
- if (data->pdata->low_resolution)
- status |= SHT15_STATUS_LOW_RESOLUTION;
-
/*
* If a regulator is available,
* query what the supply voltage actually is!
@@ -1030,21 +971,20 @@ static int sht15_probe(struct platform_device *pdev)
}
/* Try requesting the GPIOs */
- ret = devm_gpio_request_one(&pdev->dev, data->pdata->gpio_sck,
- GPIOF_OUT_INIT_LOW, "SHT15 sck");
- if (ret) {
+ data->sck = devm_gpiod_get(&pdev->dev, "clk", GPIOD_OUT_LOW);
+ if (IS_ERR(data->sck)) {
+ ret = PTR_ERR(data->sck);
dev_err(&pdev->dev, "clock line GPIO request failed\n");
goto err_release_reg;
}
-
- ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data,
- "SHT15 data");
- if (ret) {
+ data->data = devm_gpiod_get(&pdev->dev, "data", GPIOD_IN);
+ if (IS_ERR(data->data)) {
+ ret = PTR_ERR(data->data);
dev_err(&pdev->dev, "data line GPIO request failed\n");
goto err_release_reg;
}
- ret = devm_request_irq(&pdev->dev, gpio_to_irq(data->pdata->gpio_data),
+ ret = devm_request_irq(&pdev->dev, gpiod_to_irq(data->data),
sht15_interrupt_fired,
IRQF_TRIGGER_FALLING,
"sht15 data",
@@ -1053,7 +993,7 @@ static int sht15_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get irq for data line\n");
goto err_release_reg;
}
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ disable_irq_nosync(gpiod_to_irq(data->data));
ret = sht15_connection_reset(data);
if (ret)
goto err_release_reg;
@@ -1061,13 +1001,6 @@ static int sht15_probe(struct platform_device *pdev)
if (ret)
goto err_release_reg;
- /* write status with platform data options */
- if (status) {
- ret = sht15_send_status(data, status);
- if (ret)
- goto err_release_reg;
- }
-
ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group);
if (ret) {
dev_err(&pdev->dev, "sysfs create failed\n");
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 3f940fb67dc6..7fe152d92350 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -396,7 +396,7 @@ static ssize_t show_max_alarm(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->max_alert);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->max_alert);
}
static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
@@ -413,7 +413,7 @@ static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->min_alert);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->min_alert);
}
static ssize_t show_input(struct device *dev, struct device_attribute *attr,
@@ -428,7 +428,7 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->temp);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->temp);
}
static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
@@ -436,7 +436,7 @@ static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm);
}
static ssize_t set_therm(struct device *dev, struct device_attribute *attr,
@@ -478,7 +478,7 @@ static ssize_t show_hyst(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->hyst);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->hyst);
}
static ssize_t set_hyst(struct device *dev, struct device_attribute *attr,
@@ -518,7 +518,7 @@ static ssize_t show_therm_trip(struct device *dev,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm_trip);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm_trip);
}
static ssize_t show_max(struct device *dev, struct device_attribute *attr,
@@ -526,7 +526,7 @@ static ssize_t show_max(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_max);
}
static ssize_t set_max(struct device *dev, struct device_attribute *attr,
@@ -560,7 +560,7 @@ static ssize_t show_min(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_min);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_min);
}
static ssize_t set_min(struct device *dev, struct device_attribute *attr,
@@ -594,7 +594,7 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ return snprintf(buf, PAGE_SIZE, "%d\n",
stts751_intervals[priv->interval]);
}
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 246fb2365126..2b0f182daa87 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1246,10 +1246,8 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
exit_remove_files:
w83781d_remove_files(dev);
- if (data->lm75[0])
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1])
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return err;
}
@@ -1262,10 +1260,8 @@ w83781d_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
w83781d_remove_files(dev);
- if (data->lm75[0])
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1])
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return 0;
}
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 8af6081b4ab4..28fa3bd2c096 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1316,8 +1316,7 @@ static int w83791d_detect_subclients(struct i2c_client *client)
/* Undo inits in case of errors */
error_sc_1:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[0]);
error_sc_0:
return err;
}
@@ -1434,10 +1433,8 @@ error5:
error4:
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
error3:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return err;
}
@@ -1448,10 +1445,8 @@ static int w83791d_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &w83791d_group);
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return 0;
}
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index d764602d70db..76aa39e537e0 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -981,8 +981,7 @@ w83792d_detect_subclients(struct i2c_client *new_client)
/* Undo inits in case of errors */
ERROR_SC_1:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[0]);
ERROR_SC_0:
return err;
}
@@ -1456,10 +1455,8 @@ exit_remove_files:
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
exit_i2c_unregister:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return err;
}
@@ -1475,10 +1472,8 @@ w83792d_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj,
&w83792d_group_fan[i]);
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
return 0;
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index dab5c515d5a3..0af0f6283b35 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1564,10 +1564,8 @@ static int w83793_remove(struct i2c_client *client)
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
@@ -1625,8 +1623,7 @@ w83793_detect_subclients(struct i2c_client *client)
/* Undo inits in case of errors */
ERROR_SC_1:
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[0]);
ERROR_SC_0:
return err;
}
@@ -1676,7 +1673,9 @@ static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+ static const int watchdog_minors[] = {
+ WATCHDOG_MINOR, 212, 213, 214, 215
+ };
struct w83793_data *data;
int i, tmp, val, err;
int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
@@ -1960,10 +1959,8 @@ exit_remove:
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
- if (data->lm75[0] != NULL)
- i2c_unregister_device(data->lm75[0]);
- if (data->lm75[1] != NULL)
- i2c_unregister_device(data->lm75[1]);
+ i2c_unregister_device(data->lm75[0]);
+ i2c_unregister_device(data->lm75[1]);
free_mem:
kfree(data);
exit:
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index e1be61095532..a3cd91f23267 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -91,6 +91,11 @@
#define to_xgene_hwmon_dev(cl) \
container_of(cl, struct xgene_hwmon_dev, mbox_client)
+enum xgene_hwmon_version {
+ XGENE_HWMON_V1 = 0,
+ XGENE_HWMON_V2 = 1,
+};
+
struct slimpro_resp_msg {
u32 msg;
u32 param1;
@@ -609,6 +614,15 @@ static void xgene_hwmon_tx_done(struct mbox_client *cl, void *msg, int ret)
}
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
+ {"APMC0D29", XGENE_HWMON_V1},
+ {"APMC0D8A", XGENE_HWMON_V2},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
+#endif
+
static int xgene_hwmon_probe(struct platform_device *pdev)
{
struct xgene_hwmon_dev *ctx;
@@ -651,6 +665,15 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
} else {
struct acpi_pcct_hw_reduced *cppc_ss;
+ const struct acpi_device_id *acpi_id;
+ int version;
+
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ if (!acpi_id)
+ return -EINVAL;
+
+ version = (int)acpi_id->driver_data;
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
@@ -693,7 +716,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
*/
ctx->comm_base_addr = cppc_ss->base_address;
if (ctx->comm_base_addr) {
- ctx->pcc_comm_addr = memremap(ctx->comm_base_addr,
+ if (version == XGENE_HWMON_V2)
+ ctx->pcc_comm_addr = (void __force *)ioremap(
+ ctx->comm_base_addr,
+ cppc_ss->length);
+ else
+ ctx->pcc_comm_addr = memremap(
+ ctx->comm_base_addr,
cppc_ss->length,
MEMREMAP_WB);
} else {
@@ -761,14 +790,6 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
- {"APMC0D29", 0},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
-#endif
-
static const struct of_device_id xgene_hwmon_of_match[] = {
{.compatible = "apm,xgene-slimpro-hwmon"},
{}
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 6e0a5539a9ea..f0f467983960 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig HWSPINLOCK
- tristate "Hardware Spinlock drivers"
+ bool "Hardware Spinlock drivers"
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index 14928aa7cc5a..b87c01a506a4 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Generic Hardware Spinlock framework
#
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 5bae90ce794d..61db9dd0d571 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for CoreSight drivers.
#
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index accc2056f7c6..8f4357e2626c 100644
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -199,8 +199,8 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
static const struct amba_id replicator_ids[] = {
{
- .id = 0x0003b909,
- .mask = 0x0003ffff,
+ .id = 0x000bb909,
+ .mask = 0x000fffff,
},
{
/* Coresight SoC-600 */
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 56ecd7aff5eb..e03e58933141 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -748,8 +748,8 @@ static const struct dev_pm_ops etb_dev_pm_ops = {
static const struct amba_id etb_ids[] = {
{
- .id = 0x0003b907,
- .mask = 0x0003ffff,
+ .id = 0x000bb907,
+ .mask = 0x000fffff,
},
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index e5b1ec57dbde..39f42fdd503d 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -901,33 +901,33 @@ static const struct dev_pm_ops etm_dev_pm_ops = {
static const struct amba_id etm_ids[] = {
{ /* ETM 3.3 */
- .id = 0x0003b921,
- .mask = 0x0003ffff,
+ .id = 0x000bb921,
+ .mask = 0x000fffff,
.data = "ETM 3.3",
},
{ /* ETM 3.5 - Cortex-A5 */
- .id = 0x0003b955,
- .mask = 0x0003ffff,
+ .id = 0x000bb955,
+ .mask = 0x000fffff,
.data = "ETM 3.5",
},
{ /* ETM 3.5 */
- .id = 0x0003b956,
- .mask = 0x0003ffff,
+ .id = 0x000bb956,
+ .mask = 0x000fffff,
.data = "ETM 3.5",
},
{ /* PTM 1.0 */
- .id = 0x0003b950,
- .mask = 0x0003ffff,
+ .id = 0x000bb950,
+ .mask = 0x000fffff,
.data = "PTM 1.0",
},
{ /* PTM 1.1 */
- .id = 0x0003b95f,
- .mask = 0x0003ffff,
+ .id = 0x000bb95f,
+ .mask = 0x000fffff,
.data = "PTM 1.1",
},
{ /* PTM 1.1 Qualcomm */
- .id = 0x0003006f,
- .mask = 0x0003ffff,
+ .id = 0x000b006f,
+ .mask = 0x000fffff,
.data = "PTM 1.1",
},
{ 0, 0},
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 77642e0e955b..fd3c396717f6 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -248,8 +248,8 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
static const struct amba_id funnel_ids[] = {
{
- .id = 0x0003b908,
- .mask = 0x0003ffff,
+ .id = 0x000bb908,
+ .mask = 0x000fffff,
},
{
/* Coresight SoC-600 */
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 92a780a6df1d..15e7ef3891f5 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -917,13 +917,13 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
static const struct amba_id stm_ids[] = {
{
- .id = 0x0003b962,
- .mask = 0x0003ffff,
+ .id = 0x000bb962,
+ .mask = 0x000fffff,
.data = "STM32",
},
{
- .id = 0x0003b963,
- .mask = 0x0003ffff,
+ .id = 0x000bb963,
+ .mask = 0x000fffff,
.data = "STM500",
},
{ 0, 0},
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 2ff4a66a3caa..0ea04f588de0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -439,8 +439,8 @@ out:
static const struct amba_id tmc_ids[] = {
{
- .id = 0x0003b961,
- .mask = 0x0003ffff,
+ .id = 0x000bb961,
+ .mask = 0x000fffff,
},
{
/* Coresight SoC 600 TMC-ETR/ETS */
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index d7a3e453016d..bef49a3a5ca7 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -194,8 +194,8 @@ static const struct dev_pm_ops tpiu_dev_pm_ops = {
static const struct amba_id tpiu_ids[] = {
{
- .id = 0x0003b912,
- .mask = 0x0003ffff,
+ .id = 0x000bb912,
+ .mask = 0x000fffff,
},
{
.id = 0x0004b912,
diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile
index 81d42fe918f7..880c9b5e8566 100644
--- a/drivers/hwtracing/intel_th/Makefile
+++ b/drivers/hwtracing/intel_th/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INTEL_TH) += intel_th.o
intel_th-y := core.o
intel_th-$(CONFIG_INTEL_TH_DEBUG) += debug.o
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index 3abd84ce13d4..effc19e5190f 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_STM) += stm_core.o
stm_core-y := core.o policy.o
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 6c0ae2996326..33e9a1b6ea7c 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -187,8 +187,8 @@ static struct configfs_attribute *stp_policy_node_attrs[] = {
NULL,
};
-static struct config_item_type stp_policy_type;
-static struct config_item_type stp_policy_node_type;
+static const struct config_item_type stp_policy_type;
+static const struct config_item_type stp_policy_node_type;
static struct config_group *
stp_policy_node_make(struct config_group *group, const char *name)
@@ -236,7 +236,7 @@ static struct configfs_group_operations stp_policy_node_group_ops = {
.drop_item = stp_policy_node_drop,
};
-static struct config_item_type stp_policy_node_type = {
+static const struct config_item_type stp_policy_node_type = {
.ct_item_ops = &stp_policy_node_item_ops,
.ct_group_ops = &stp_policy_node_group_ops,
.ct_attrs = stp_policy_node_attrs,
@@ -311,7 +311,7 @@ static struct configfs_group_operations stp_policy_group_ops = {
.make_group = stp_policy_node_make,
};
-static struct config_item_type stp_policy_type = {
+static const struct config_item_type stp_policy_type = {
.ct_item_ops = &stp_policy_item_ops,
.ct_group_ops = &stp_policy_group_ops,
.ct_attrs = stp_policy_attrs,
@@ -380,7 +380,7 @@ static struct configfs_group_operations stp_policies_group_ops = {
.make_group = stp_policies_make,
};
-static struct config_item_type stp_policies_type = {
+static const struct config_item_type stp_policies_type = {
.ct_group_ops = &stp_policies_group_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index 7bb65a4369e1..72c94c60fdd1 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the i2c core.
#
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 45a3f3ca29b3..009345d8f49d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -198,6 +198,11 @@ config I2C_CHT_WC
SMBus controller found in the Intel Cherry Trail Whiskey Cove PMIC
found on some Intel Cherry Trail systems.
+ Note this controller is hooked up to a TI bq24292i charger-IC,
+ combined with a FUSB302 Type-C port-controller as such it is advised
+ to also select CONFIG_CHARGER_BQ24190=m and CONFIG_TYPEC_FUSB302=m
+ (the fusb302 driver currently is in drivers/staging).
+
config I2C_NFORCE2
tristate "Nvidia nForce2, nForce3 and nForce4"
depends on PCI
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 47f3ac9a695a..2ce8576540a2 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the i2c bus drivers.
#
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 284f8670dbeb..7d4aeb4465b3 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -27,6 +27,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/slab.h>
/* I2C Register */
@@ -132,6 +133,7 @@ struct aspeed_i2c_bus {
struct i2c_adapter adap;
struct device *dev;
void __iomem *base;
+ struct reset_control *rst;
/* Synchronizes I/O mem access to base. */
spinlock_t lock;
struct completion cmd_complete;
@@ -847,6 +849,14 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
/* We just need the clock rate, we don't actually use the clk object. */
devm_clk_put(&pdev->dev, parent_clk);
+ bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(bus->rst)) {
+ dev_err(&pdev->dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(bus->rst);
+ }
+ reset_control_deassert(bus->rst);
+
ret = of_property_read_u32(pdev->dev.of_node,
"bus-frequency", &bus->bus_frequency);
if (ret < 0) {
@@ -917,6 +927,8 @@ static int aspeed_i2c_remove_bus(struct platform_device *pdev)
spin_unlock_irqrestore(&bus->lock, flags);
+ reset_control_assert(bus->rst);
+
i2c_del_adapter(&bus->adap);
return 0;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 190bbbc7bfee..0d05dadb2dc5 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -16,6 +16,7 @@
* GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -25,6 +26,7 @@
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/power/bq24190_charger.h>
#include <linux/slab.h>
#define CHT_WC_I2C_CTRL 0x5e24
@@ -232,13 +234,35 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
.name = "cht_wc_ext_chrg_irq_chip",
};
+static const char * const bq24190_suppliers[] = { "fusb302-typec-source" };
+
static const struct property_entry bq24190_props[] = {
- PROPERTY_ENTRY_STRING("extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
PROPERTY_ENTRY_BOOL("omit-battery-class"),
PROPERTY_ENTRY_BOOL("disable-reset"),
{ }
};
+static struct regulator_consumer_supply fusb302_consumer = {
+ .supply = "vbus",
+ /* Must match fusb302 dev_name in intel_cht_int33fe.c */
+ .dev_name = "i2c-fusb302",
+};
+
+static const struct regulator_init_data bq24190_vbus_init_data = {
+ .constraints = {
+ /* The name is used in intel_cht_int33fe.c do not change. */
+ .name = "cht_wc_usb_typec_vbus",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = &fusb302_consumer,
+ .num_consumer_supplies = 1,
+};
+
+static struct bq24190_platform_data bq24190_pdata = {
+ .regulator_init_data = &bq24190_vbus_init_data,
+};
+
static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
@@ -246,7 +270,9 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
struct i2c_board_info board_info = {
.type = "bq24190",
.addr = 0x6b,
+ .dev_name = "bq24190",
.properties = bq24190_props,
+ .platform_data = &bq24190_pdata,
};
int ret, reg, irq;
@@ -314,11 +340,21 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
if (ret)
goto remove_irq_domain;
- board_info.irq = adap->client_irq;
- adap->client = i2c_new_device(&adap->adapter, &board_info);
- if (!adap->client) {
- ret = -ENOMEM;
- goto del_adapter;
+ /*
+ * Normally the Whiskey Cove PMIC is paired with a TI bq24292i charger,
+ * connected to this i2c bus, and a max17047 fuel-gauge and a fusb302
+ * USB Type-C controller connected to another i2c bus. In this setup
+ * the max17047 and fusb302 devices are enumerated through an INT33FE
+ * ACPI device. If this device is present register an i2c-client for
+ * the TI bq24292i charger.
+ */
+ if (acpi_dev_present("INT33FE", NULL, -1)) {
+ board_info.irq = adap->client_irq;
+ adap->client = i2c_new_device(&adap->adapter, &board_info);
+ if (!adap->client) {
+ ret = -ENOMEM;
+ goto del_adapter;
+ }
}
platform_set_drvdata(pdev, adap);
@@ -335,7 +371,8 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
{
struct cht_wc_i2c_adap *adap = platform_get_drvdata(pdev);
- i2c_unregister_device(adap->client);
+ if (adap->client)
+ i2c_unregister_device(adap->client);
i2c_del_adapter(&adap->adapter);
irq_domain_remove(adap->irq_domain);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index b8c43535f16c..2ead9b9eebb7 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -36,6 +36,7 @@
#include <linux/gpio.h>
#include <linux/of_device.h>
#include <linux/platform_data/i2c-davinci.h>
+#include <linux/pm_runtime.h>
/* ----- global defines ----------------------------------------------- */
@@ -122,6 +123,9 @@
/* set the SDA GPIO low */
#define DAVINCI_I2C_DCLR_PDCLR1 BIT(1)
+/* timeout for pm runtime autosuspend */
+#define DAVINCI_I2C_PM_TIMEOUT 1000 /* ms */
+
struct davinci_i2c_dev {
struct device *dev;
void __iomem *base;
@@ -500,7 +504,7 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
/* This should be 0 if all bytes were transferred
* or dev->cmd_err denotes an error.
*/
- dev_err(dev->dev, "abnormal termination buf_len=%i\n",
+ dev_err(dev->dev, "abnormal termination buf_len=%zu\n",
dev->buf_len);
dev->terminate = 1;
wmb();
@@ -541,10 +545,17 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to runtime_get device: %d\n", ret);
+ pm_runtime_put_noidle(dev->dev);
+ return ret;
+ }
+
ret = i2c_davinci_wait_bus_not_busy(dev);
if (ret < 0) {
dev_warn(dev->dev, "timeout waiting for bus ready\n");
- return ret;
+ goto out;
}
for (i = 0; i < num; i++) {
@@ -552,14 +563,19 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev_dbg(dev->dev, "%s [%d/%d] ret: %d\n", __func__, i + 1, num,
ret);
if (ret < 0)
- return ret;
+ goto out;
}
+ ret = num;
#ifdef CONFIG_CPU_FREQ
complete(&dev->xfr_complete);
#endif
- return num;
+out:
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return ret;
}
static u32 i2c_davinci_func(struct i2c_adapter *adap)
@@ -599,6 +615,9 @@ static irqreturn_t i2c_davinci_isr(int this_irq, void *dev_id)
int count = 0;
u16 w;
+ if (pm_runtime_suspended(dev->dev))
+ return IRQ_NONE;
+
while ((stat = davinci_i2c_read_reg(dev, DAVINCI_I2C_IVR_REG))) {
dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
if (count++ == 100) {
@@ -802,13 +821,24 @@ static int davinci_i2c_probe(struct platform_device *pdev)
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
- clk_prepare_enable(dev->clk);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(dev->base)) {
- r = PTR_ERR(dev->base);
- goto err_unuse_clocks;
+ return PTR_ERR(dev->base);
+ }
+
+ pm_runtime_set_autosuspend_delay(dev->dev,
+ DAVINCI_I2C_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(dev->dev);
+
+ pm_runtime_enable(dev->dev);
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0) {
+ dev_err(dev->dev, "failed to runtime_get device: %d\n", r);
+ pm_runtime_put_noidle(dev->dev);
+ return r;
}
i2c_davinci_init(dev);
@@ -849,27 +879,40 @@ static int davinci_i2c_probe(struct platform_device *pdev)
if (r)
goto err_unuse_clocks;
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
return 0;
err_unuse_clocks:
- clk_disable_unprepare(dev->clk);
- dev->clk = NULL;
+ pm_runtime_dont_use_autosuspend(dev->dev);
+ pm_runtime_put_sync(dev->dev);
+ pm_runtime_disable(dev->dev);
+
return r;
}
static int davinci_i2c_remove(struct platform_device *pdev)
{
struct davinci_i2c_dev *dev = platform_get_drvdata(pdev);
+ int ret;
i2c_davinci_cpufreq_deregister(dev);
i2c_del_adapter(&dev->adapter);
- clk_disable_unprepare(dev->clk);
- dev->clk = NULL;
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
+ pm_runtime_dont_use_autosuspend(dev->dev);
+ pm_runtime_put_sync(dev->dev);
+ pm_runtime_disable(dev->dev);
+
return 0;
}
@@ -880,7 +923,6 @@ static int davinci_i2c_suspend(struct device *dev)
/* put I2C into reset */
davinci_i2c_reset_ctrl(i2c_dev, 0);
- clk_disable_unprepare(i2c_dev->clk);
return 0;
}
@@ -889,7 +931,6 @@ static int davinci_i2c_resume(struct device *dev)
{
struct davinci_i2c_dev *i2c_dev = dev_get_drvdata(dev);
- clk_prepare_enable(i2c_dev->clk);
/* take I2C out of reset */
davinci_i2c_reset_ctrl(i2c_dev, 1);
@@ -899,6 +940,8 @@ static int davinci_i2c_resume(struct device *dev)
static const struct dev_pm_ops davinci_i2c_pm = {
.suspend = davinci_i2c_suspend,
.resume = davinci_i2c_resume,
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
#define davinci_i2c_pm_ops (&davinci_i2c_pm)
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 9fee4c054d3d..21bf619a86c5 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -280,6 +280,8 @@ struct dw_i2c_dev {
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_disabled;
+ bool suspended;
+ bool skip_resume;
void (*disable)(struct dw_i2c_dev *dev);
void (*disable_int)(struct dw_i2c_dev *dev);
int (*init)(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0e65b97842b4..58add69a441c 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -249,6 +249,14 @@ static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
}
}
+static void dw_i2c_plat_pm_cleanup(struct dw_i2c_dev *dev)
+{
+ pm_runtime_disable(dev->dev);
+
+ if (dev->pm_disabled)
+ pm_runtime_put_noidle(dev->dev);
+}
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -257,7 +265,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
u32 acpi_speed, ht = 0;
struct resource *mem;
int i, irq, ret;
- const int supported_speeds[] = { 0, 100000, 400000, 1000000, 3400000 };
+ static const int supported_speeds[] = {
+ 0, 100000, 400000, 1000000, 3400000
+ };
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -362,14 +372,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
- if (dev->pm_disabled) {
- pm_runtime_forbid(&pdev->dev);
- } else {
- pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- }
+ /* The code below assumes runtime PM to be disabled. */
+ WARN_ON(pm_runtime_enabled(&pdev->dev));
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+
+ if (dev->pm_disabled)
+ pm_runtime_get_noresume(&pdev->dev);
+
+ pm_runtime_enable(&pdev->dev);
if (dev->mode == DW_IC_SLAVE)
ret = i2c_dw_probe_slave(dev);
@@ -382,8 +395,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
return ret;
exit_probe:
- if (!dev->pm_disabled)
- pm_runtime_disable(&pdev->dev);
+ dw_i2c_plat_pm_cleanup(dev);
exit_reset:
if (!IS_ERR_OR_NULL(dev->rst))
reset_control_assert(dev->rst);
@@ -402,8 +414,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
- if (!dev->pm_disabled)
- pm_runtime_disable(&pdev->dev);
+ dw_i2c_plat_pm_cleanup(dev);
+
if (!IS_ERR_OR_NULL(dev->rst))
reset_control_assert(dev->rst);
@@ -437,13 +449,20 @@ static void dw_i2c_plat_complete(struct device *dev)
#endif
#ifdef CONFIG_PM
-static int dw_i2c_plat_runtime_suspend(struct device *dev)
+static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ if (i_dev->suspended) {
+ i_dev->skip_resume = true;
+ return 0;
+ }
+
i_dev->disable(i_dev);
i2c_dw_plat_prepare_clk(i_dev, false);
+ i_dev->suspended = true;
+
return 0;
}
@@ -451,27 +470,27 @@ static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
+ if (!i_dev->suspended)
+ return 0;
+
+ if (i_dev->skip_resume) {
+ i_dev->skip_resume = false;
+ return 0;
+ }
+
i2c_dw_plat_prepare_clk(i_dev, true);
i_dev->init(i_dev);
- return 0;
-}
+ i_dev->suspended = false;
-#ifdef CONFIG_PM_SLEEP
-static int dw_i2c_plat_suspend(struct device *dev)
-{
- pm_runtime_resume(dev);
- return dw_i2c_plat_runtime_suspend(dev);
+ return 0;
}
-#endif
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare,
.complete = dw_i2c_plat_complete,
- SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
- SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
- dw_i2c_plat_resume,
- NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
+ SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
};
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 0ef8fcc6ac3a..d80ea6ce91bb 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -14,27 +14,17 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
struct i2c_gpio_private_data {
+ struct gpio_desc *sda;
+ struct gpio_desc *scl;
struct i2c_adapter adap;
struct i2c_algo_bit_data bit_data;
struct i2c_gpio_platform_data pdata;
};
-/* Toggle SDA by changing the direction of the pin */
-static void i2c_gpio_setsda_dir(void *data, int state)
-{
- struct i2c_gpio_platform_data *pdata = data;
-
- if (state)
- gpio_direction_input(pdata->sda_pin);
- else
- gpio_direction_output(pdata->sda_pin, 0);
-}
-
/*
* Toggle SDA by changing the output value of the pin. This is only
* valid for pins configured as open drain (i.e. setting the value
@@ -42,20 +32,9 @@ static void i2c_gpio_setsda_dir(void *data, int state)
*/
static void i2c_gpio_setsda_val(void *data, int state)
{
- struct i2c_gpio_platform_data *pdata = data;
-
- gpio_set_value(pdata->sda_pin, state);
-}
-
-/* Toggle SCL by changing the direction of the pin. */
-static void i2c_gpio_setscl_dir(void *data, int state)
-{
- struct i2c_gpio_platform_data *pdata = data;
+ struct i2c_gpio_private_data *priv = data;
- if (state)
- gpio_direction_input(pdata->scl_pin);
- else
- gpio_direction_output(pdata->scl_pin, 0);
+ gpiod_set_value(priv->sda, state);
}
/*
@@ -66,44 +45,23 @@ static void i2c_gpio_setscl_dir(void *data, int state)
*/
static void i2c_gpio_setscl_val(void *data, int state)
{
- struct i2c_gpio_platform_data *pdata = data;
+ struct i2c_gpio_private_data *priv = data;
- gpio_set_value(pdata->scl_pin, state);
+ gpiod_set_value(priv->scl, state);
}
static int i2c_gpio_getsda(void *data)
{
- struct i2c_gpio_platform_data *pdata = data;
+ struct i2c_gpio_private_data *priv = data;
- return gpio_get_value(pdata->sda_pin);
+ return gpiod_get_value(priv->sda);
}
static int i2c_gpio_getscl(void *data)
{
- struct i2c_gpio_platform_data *pdata = data;
-
- return gpio_get_value(pdata->scl_pin);
-}
-
-static int of_i2c_gpio_get_pins(struct device_node *np,
- unsigned int *sda_pin, unsigned int *scl_pin)
-{
- if (of_gpio_count(np) < 2)
- return -ENODEV;
-
- *sda_pin = of_get_gpio(np, 0);
- *scl_pin = of_get_gpio(np, 1);
-
- if (*sda_pin == -EPROBE_DEFER || *scl_pin == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (!gpio_is_valid(*sda_pin) || !gpio_is_valid(*scl_pin)) {
- pr_err("%pOF: invalid GPIO pins, sda=%d/scl=%d\n",
- np, *sda_pin, *scl_pin);
- return -ENODEV;
- }
+ struct i2c_gpio_private_data *priv = data;
- return 0;
+ return gpiod_get_value(priv->scl);
}
static void of_i2c_gpio_get_props(struct device_node *np,
@@ -124,72 +82,105 @@ static void of_i2c_gpio_get_props(struct device_node *np,
of_property_read_bool(np, "i2c-gpio,scl-output-only");
}
+static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
+ const char *con_id,
+ unsigned int index,
+ enum gpiod_flags gflags)
+{
+ struct gpio_desc *retdesc;
+ int ret;
+
+ retdesc = devm_gpiod_get(dev, con_id, gflags);
+ if (!IS_ERR(retdesc)) {
+ dev_dbg(dev, "got GPIO from name %s\n", con_id);
+ return retdesc;
+ }
+
+ retdesc = devm_gpiod_get_index(dev, NULL, index, gflags);
+ if (!IS_ERR(retdesc)) {
+ dev_dbg(dev, "got GPIO from index %u\n", index);
+ return retdesc;
+ }
+
+ ret = PTR_ERR(retdesc);
+
+ /* FIXME: hack in the old code, is this really necessary? */
+ if (ret == -EINVAL)
+ retdesc = ERR_PTR(-EPROBE_DEFER);
+
+ /* This happens if the GPIO driver is not yet probed, let's defer */
+ if (ret == -ENOENT)
+ retdesc = ERR_PTR(-EPROBE_DEFER);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "error trying to get descriptor: %d\n", ret);
+
+ return retdesc;
+}
+
static int i2c_gpio_probe(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
struct i2c_gpio_platform_data *pdata;
struct i2c_algo_bit_data *bit_data;
struct i2c_adapter *adap;
- unsigned int sda_pin, scl_pin;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ enum gpiod_flags gflags;
int ret;
- /* First get the GPIO pins; if it fails, we'll defer the probe. */
- if (pdev->dev.of_node) {
- ret = of_i2c_gpio_get_pins(pdev->dev.of_node,
- &sda_pin, &scl_pin);
- if (ret)
- return ret;
- } else {
- if (!dev_get_platdata(&pdev->dev))
- return -ENXIO;
- pdata = dev_get_platdata(&pdev->dev);
- sda_pin = pdata->sda_pin;
- scl_pin = pdata->scl_pin;
- }
-
- ret = devm_gpio_request(&pdev->dev, sda_pin, "sda");
- if (ret) {
- if (ret == -EINVAL)
- ret = -EPROBE_DEFER; /* Try again later */
- return ret;
- }
- ret = devm_gpio_request(&pdev->dev, scl_pin, "scl");
- if (ret) {
- if (ret == -EINVAL)
- ret = -EPROBE_DEFER; /* Try again later */
- return ret;
- }
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+
adap = &priv->adap;
bit_data = &priv->bit_data;
pdata = &priv->pdata;
- if (pdev->dev.of_node) {
- pdata->sda_pin = sda_pin;
- pdata->scl_pin = scl_pin;
- of_i2c_gpio_get_props(pdev->dev.of_node, pdata);
+ if (np) {
+ of_i2c_gpio_get_props(np, pdata);
} else {
- memcpy(pdata, dev_get_platdata(&pdev->dev), sizeof(*pdata));
+ /*
+ * If all platform data settings are zero it is OK
+ * to not provide any platform data from the board.
+ */
+ if (dev_get_platdata(dev))
+ memcpy(pdata, dev_get_platdata(dev), sizeof(*pdata));
}
- if (pdata->sda_is_open_drain) {
- gpio_direction_output(pdata->sda_pin, 1);
- bit_data->setsda = i2c_gpio_setsda_val;
- } else {
- gpio_direction_input(pdata->sda_pin);
- bit_data->setsda = i2c_gpio_setsda_dir;
- }
+ /*
+ * First get the GPIO pins; if it fails, we'll defer the probe.
+ * If the SDA line is marked from platform data or device tree as
+ * "open drain" it means something outside of our control is making
+ * this line being handled as open drain, and we should just handle
+ * it as any other output. Else we enforce open drain as this is
+ * required for an I2C bus.
+ */
+ if (pdata->sda_is_open_drain)
+ gflags = GPIOD_OUT_HIGH;
+ else
+ gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
+ priv->sda = i2c_gpio_get_desc(dev, "sda", 0, gflags);
+ if (IS_ERR(priv->sda))
+ return PTR_ERR(priv->sda);
+
+ /*
+ * If the SCL line is marked from platform data or device tree as
+ * "open drain" it means something outside of our control is making
+ * this line being handled as open drain, and we should just handle
+ * it as any other output. Else we enforce open drain as this is
+ * required for an I2C bus.
+ */
+ if (pdata->scl_is_open_drain)
+ gflags = GPIOD_OUT_LOW;
+ else
+ gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+ priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
+ if (IS_ERR(priv->scl))
+ return PTR_ERR(priv->scl);
- if (pdata->scl_is_open_drain || pdata->scl_is_output_only) {
- gpio_direction_output(pdata->scl_pin, 1);
- bit_data->setscl = i2c_gpio_setscl_val;
- } else {
- gpio_direction_input(pdata->scl_pin);
- bit_data->setscl = i2c_gpio_setscl_dir;
- }
+ bit_data->setsda = i2c_gpio_setsda_val;
+ bit_data->setscl = i2c_gpio_setscl_val;
if (!pdata->scl_is_output_only)
bit_data->getscl = i2c_gpio_getscl;
@@ -207,18 +198,18 @@ static int i2c_gpio_probe(struct platform_device *pdev)
else
bit_data->timeout = HZ / 10; /* 100 ms */
- bit_data->data = pdata;
+ bit_data->data = priv;
adap->owner = THIS_MODULE;
- if (pdev->dev.of_node)
- strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
+ if (np)
+ strlcpy(adap->name, dev_name(dev), sizeof(adap->name));
else
snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
adap->algo_data = bit_data;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
- adap->dev.parent = &pdev->dev;
- adap->dev.of_node = pdev->dev.of_node;
+ adap->dev.parent = dev;
+ adap->dev.of_node = np;
adap->nr = pdev->id;
ret = i2c_bit_add_numbered_bus(adap);
@@ -227,8 +218,13 @@ static int i2c_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- dev_info(&pdev->dev, "using pins %u (SDA) and %u (SCL%s)\n",
- pdata->sda_pin, pdata->scl_pin,
+ /*
+ * FIXME: using global GPIO numbers is not helpful. If/when we
+ * get accessors to get the actual name of the GPIO line,
+ * from the descriptor, then provide that instead.
+ */
+ dev_info(dev, "using lines %u (SDA) and %u (SCL%s)\n",
+ desc_to_gpio(priv->sda), desc_to_gpio(priv->scl),
pdata->scl_is_output_only
? ", no clock stretching" : "");
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index eb1d91b986fd..f038858b6c54 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -82,6 +82,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/timer.h>
@@ -280,6 +281,8 @@
#define ISR_COMPLETE(err) (ISR_COMPLETE_M | (ISR_STATUS_M & (err)))
#define ISR_FATAL(err) (ISR_COMPLETE(err) | ISR_FATAL_M)
+#define IMG_I2C_PM_TIMEOUT 1000 /* ms */
+
enum img_i2c_mode {
MODE_INACTIVE,
MODE_RAW,
@@ -408,6 +411,9 @@ struct img_i2c {
unsigned int raw_timeout;
};
+static int img_i2c_runtime_suspend(struct device *dev);
+static int img_i2c_runtime_resume(struct device *dev);
+
static void img_i2c_writel(struct img_i2c *i2c, u32 offset, u32 value)
{
writel(value, i2c->base + offset);
@@ -826,9 +832,9 @@ next_atomic_cmd:
* Timer function to check if something has gone wrong in automatic mode (so we
* don't have to handle so many interrupts just to catch an exception).
*/
-static void img_i2c_check_timer(unsigned long arg)
+static void img_i2c_check_timer(struct timer_list *t)
{
- struct img_i2c *i2c = (struct img_i2c *)arg;
+ struct img_i2c *i2c = from_timer(i2c, t, check_timer);
unsigned long flags;
unsigned int line_status;
@@ -1054,8 +1060,8 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
atomic = true;
}
- ret = clk_prepare_enable(i2c->scb_clk);
- if (ret)
+ ret = pm_runtime_get_sync(adap->dev.parent);
+ if (ret < 0)
return ret;
for (i = 0; i < num; i++) {
@@ -1131,7 +1137,8 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
break;
}
- clk_disable_unprepare(i2c->scb_clk);
+ pm_runtime_mark_last_busy(adap->dev.parent);
+ pm_runtime_put_autosuspend(adap->dev.parent);
return i2c->msg_status ? i2c->msg_status : num;
}
@@ -1149,12 +1156,13 @@ static const struct i2c_algorithm img_i2c_algo = {
static int img_i2c_init(struct img_i2c *i2c)
{
unsigned int clk_khz, bitrate_khz, clk_period, tckh, tckl, tsdh;
- unsigned int i, ret, data, prescale, inc, int_bitrate, filt;
+ unsigned int i, data, prescale, inc, int_bitrate, filt;
struct img_i2c_timings timing;
u32 rev;
+ int ret;
- ret = clk_prepare_enable(i2c->scb_clk);
- if (ret)
+ ret = pm_runtime_get_sync(i2c->adap.dev.parent);
+ if (ret < 0)
return ret;
rev = img_i2c_readl(i2c, SCB_CORE_REV_REG);
@@ -1163,7 +1171,8 @@ static int img_i2c_init(struct img_i2c *i2c)
"Unknown hardware revision (%d.%d.%d.%d)\n",
(rev >> 24) & 0xff, (rev >> 16) & 0xff,
(rev >> 8) & 0xff, rev & 0xff);
- clk_disable_unprepare(i2c->scb_clk);
+ pm_runtime_mark_last_busy(i2c->adap.dev.parent);
+ pm_runtime_put_autosuspend(i2c->adap.dev.parent);
return -EINVAL;
}
@@ -1314,7 +1323,8 @@ static int img_i2c_init(struct img_i2c *i2c)
/* Perform a synchronous sequence to reset the bus */
ret = img_i2c_reset_bus(i2c);
- clk_disable_unprepare(i2c->scb_clk);
+ pm_runtime_mark_last_busy(i2c->adap.dev.parent);
+ pm_runtime_put_autosuspend(i2c->adap.dev.parent);
return ret;
}
@@ -1362,8 +1372,7 @@ static int img_i2c_probe(struct platform_device *pdev)
}
/* Set up the exception check timer */
- setup_timer(&i2c->check_timer, img_i2c_check_timer,
- (unsigned long)i2c);
+ timer_setup(&i2c->check_timer, img_i2c_check_timer, 0);
i2c->bitrate = timings[0].max_bitrate;
if (!of_property_read_u32(node, "clock-frequency", &val))
@@ -1384,22 +1393,30 @@ static int img_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
- ret = clk_prepare_enable(i2c->sys_clk);
- if (ret)
- return ret;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_I2C_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_i2c_runtime_resume(&pdev->dev);
+ if (ret)
+ return ret;
+ }
ret = img_i2c_init(i2c);
if (ret)
- goto disable_clk;
+ goto rpm_disable;
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0)
- goto disable_clk;
+ goto rpm_disable;
return 0;
-disable_clk:
- clk_disable_unprepare(i2c->sys_clk);
+rpm_disable:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_i2c_runtime_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return ret;
}
@@ -1408,19 +1425,55 @@ static int img_i2c_remove(struct platform_device *dev)
struct img_i2c *i2c = platform_get_drvdata(dev);
i2c_del_adapter(&i2c->adap);
+ pm_runtime_disable(&dev->dev);
+ if (!pm_runtime_status_suspended(&dev->dev))
+ img_i2c_runtime_suspend(&dev->dev);
+
+ return 0;
+}
+
+static int img_i2c_runtime_suspend(struct device *dev)
+{
+ struct img_i2c *i2c = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(i2c->scb_clk);
clk_disable_unprepare(i2c->sys_clk);
return 0;
}
+static int img_i2c_runtime_resume(struct device *dev)
+{
+ struct img_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(i2c->sys_clk);
+ if (ret) {
+ dev_err(dev, "Unable to enable sys clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(i2c->scb_clk);
+ if (ret) {
+ dev_err(dev, "Unable to enable scb clock\n");
+ clk_disable_unprepare(i2c->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int img_i2c_suspend(struct device *dev)
{
struct img_i2c *i2c = dev_get_drvdata(dev);
+ int ret;
- img_i2c_switch_mode(i2c, MODE_SUSPEND);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
- clk_disable_unprepare(i2c->sys_clk);
+ img_i2c_switch_mode(i2c, MODE_SUSPEND);
return 0;
}
@@ -1430,7 +1483,7 @@ static int img_i2c_resume(struct device *dev)
struct img_i2c *i2c = dev_get_drvdata(dev);
int ret;
- ret = clk_prepare_enable(i2c->sys_clk);
+ ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
@@ -1440,7 +1493,12 @@ static int img_i2c_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(img_i2c_pm, img_i2c_suspend, img_i2c_resume);
+static const struct dev_pm_ops img_i2c_pm = {
+ SET_RUNTIME_PM_OPS(img_i2c_runtime_suspend,
+ img_i2c_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_i2c_suspend, img_i2c_resume)
+};
static const struct of_device_id img_scb_i2c_match[] = {
{ .compatible = "img,scb-i2c" },
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 96caf378b1dc..950a9d74f54d 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -322,7 +322,7 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_8xxx[] = {
static u32 mpc_i2c_get_sec_cfg_8xxx(void)
{
- struct device_node *node = NULL;
+ struct device_node *node;
u32 __iomem *reg;
u32 val = 0;
@@ -700,7 +700,7 @@ static int fsl_i2c_probe(struct platform_device *op)
}
}
- if (of_get_property(op->dev.of_node, "fsl,preserve-clocking", NULL)) {
+ if (of_property_read_bool(op->dev.of_node, "fsl,preserve-clocking")) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
prop = of_get_property(op->dev.of_node, "clock-frequency",
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index aa3c8f4771c1..a7ef19855bb8 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 23c2ea2baedc..b9172f08fd05 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -487,6 +487,22 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
}
/*
+ * Try bus recovery, but only if SDA is actually low.
+ */
+static int omap_i2c_recover_bus(struct omap_i2c_dev *omap)
+{
+ u16 systest;
+
+ systest = omap_i2c_read_reg(omap, OMAP_I2C_SYSTEST_REG);
+ if ((systest & OMAP_I2C_SYSTEST_SCL_I_FUNC) &&
+ (systest & OMAP_I2C_SYSTEST_SDA_I_FUNC))
+ return 0; /* bus seems to already be fine */
+ if (!(systest & OMAP_I2C_SYSTEST_SCL_I_FUNC))
+ return -EBUSY; /* recovery would not fix SCL */
+ return i2c_recover_bus(&omap->adapter);
+}
+
+/*
* Waiting on Bus Busy
*/
static int omap_i2c_wait_for_bb(struct omap_i2c_dev *omap)
@@ -496,7 +512,7 @@ static int omap_i2c_wait_for_bb(struct omap_i2c_dev *omap)
timeout = jiffies + OMAP_I2C_TIMEOUT;
while (omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG) & OMAP_I2C_STAT_BB) {
if (time_after(jiffies, timeout))
- return i2c_recover_bus(&omap->adapter);
+ return omap_i2c_recover_bus(omap);
msleep(1);
}
@@ -577,8 +593,13 @@ static int omap_i2c_wait_for_bb_valid(struct omap_i2c_dev *omap)
}
if (time_after(jiffies, timeout)) {
+ /*
+ * SDA or SCL were low for the entire timeout without
+ * any activity detected. Most likely, a slave is
+ * locking up the bus with no master driving the clock.
+ */
dev_warn(omap->dev, "timeout waiting for bus ready\n");
- return -ETIMEDOUT;
+ return omap_i2c_recover_bus(omap);
}
msleep(1);
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index faa8fb8f2b8f..fa41ff799533 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -123,7 +123,6 @@ static struct i2c_adapter parport_adapter = {
/* SMBus alert support */
static struct i2c_smbus_alert_setup alert_data = {
- .alert_edge_triggered = 1,
};
static struct i2c_client *ara;
static struct lineop parport_ctrl_irq = {
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index a8e54df4aed6..319209a07353 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -237,7 +237,6 @@ static void i2c_parport_attach(struct parport *port)
/* Setup SMBus alert if supported */
if (adapter_parm[type].smbus_alert) {
- adapter->alert_data.alert_edge_triggered = 1;
adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
&adapter->alert_data);
if (adapter->ara)
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 42d6b3a226f8..a542041df0cd 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -112,7 +112,6 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
jiffies, expires);
timer->expires = jiffies + expires;
- timer->data = (unsigned long)alg_data;
add_timer(timer);
}
@@ -435,9 +434,9 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void i2c_pnx_timeout(unsigned long data)
+static void i2c_pnx_timeout(struct timer_list *t)
{
- struct i2c_pnx_algo_data *alg_data = (struct i2c_pnx_algo_data *)data;
+ struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
u32 ctl;
dev_err(&alg_data->adapter.dev,
@@ -659,8 +658,7 @@ static int i2c_pnx_probe(struct platform_device *pdev)
if (IS_ERR(alg_data->clk))
return PTR_ERR(alg_data->clk);
- setup_timer(&alg_data->mif.timer, i2c_pnx_timeout,
- (unsigned long)alg_data);
+ timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
"%s", pdev->name);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index c811af4c8d81..95c2f1ce3cad 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -84,12 +84,7 @@
#define ICSR2_NACKF 0x10
-/* ICBRx (@ PCLK 33MHz) */
#define ICBR_RESERVED 0xe0 /* Should be 1 on writes */
-#define ICBRL_SP100K (19 | ICBR_RESERVED)
-#define ICBRH_SP100K (16 | ICBR_RESERVED)
-#define ICBRL_SP400K (21 | ICBR_RESERVED)
-#define ICBRH_SP400K (9 | ICBR_RESERVED)
#define RIIC_INIT_MSG -1
@@ -288,48 +283,99 @@ static const struct i2c_algorithm riic_algo = {
.functionality = riic_func,
};
-static int riic_init_hw(struct riic_dev *riic, u32 spd)
+static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
{
int ret;
unsigned long rate;
+ int total_ticks, cks, brl, brh;
ret = clk_prepare_enable(riic->clk);
if (ret)
return ret;
+ if (t->bus_freq_hz > 400000) {
+ dev_err(&riic->adapter.dev,
+ "unsupported bus speed (%dHz). 400000 max\n",
+ t->bus_freq_hz);
+ clk_disable_unprepare(riic->clk);
+ return -EINVAL;
+ }
+
+ rate = clk_get_rate(riic->clk);
+
/*
- * TODO: Implement formula to calculate the timing values depending on
- * variable parent clock rate and arbitrary bus speed
+ * Assume the default register settings:
+ * FER.SCLE = 1 (SCL sync circuit enabled, adds 2 or 3 cycles)
+ * FER.NFE = 1 (noise circuit enabled)
+ * MR3.NF = 0 (1 cycle of noise filtered out)
+ *
+ * Freq (CKS=000) = (I2CCLK + tr + tf)/ (BRH + 3 + 1) + (BRL + 3 + 1)
+ * Freq (CKS!=000) = (I2CCLK + tr + tf)/ (BRH + 2 + 1) + (BRL + 2 + 1)
*/
- rate = clk_get_rate(riic->clk);
- if (rate != 33325000) {
- dev_err(&riic->adapter.dev,
- "invalid parent clk (%lu). Must be 33325000Hz\n", rate);
+
+ /*
+ * Determine reference clock rate. We must be able to get the desired
+ * frequency with only 62 clock ticks max (31 high, 31 low).
+ * Aim for a duty of 60% LOW, 40% HIGH.
+ */
+ total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz);
+
+ for (cks = 0; cks < 7; cks++) {
+ /*
+ * 60% low time must be less than BRL + 2 + 1
+ * BRL max register value is 0x1F.
+ */
+ brl = ((total_ticks * 6) / 10);
+ if (brl <= (0x1F + 3))
+ break;
+
+ total_ticks /= 2;
+ rate /= 2;
+ }
+
+ if (brl > (0x1F + 3)) {
+ dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
+ (unsigned long)t->bus_freq_hz);
clk_disable_unprepare(riic->clk);
return -EINVAL;
}
+ brh = total_ticks - brl;
+
+ /* Remove automatic clock ticks for sync circuit and NF */
+ if (cks == 0) {
+ brl -= 4;
+ brh -= 4;
+ } else {
+ brl -= 3;
+ brh -= 3;
+ }
+
+ /*
+ * Remove clock ticks for rise and fall times. Convert ns to clock
+ * ticks.
+ */
+ brl -= t->scl_fall_ns / (1000000000 / rate);
+ brh -= t->scl_rise_ns / (1000000000 / rate);
+
+ /* Adjust for min register values for when SCLE=1 and NFE=1 */
+ if (brl < 1)
+ brl = 1;
+ if (brh < 1)
+ brh = 1;
+
+ pr_debug("i2c-riic: freq=%lu, duty=%d, fall=%lu, rise=%lu, cks=%d, brl=%d, brh=%d\n",
+ rate / total_ticks, ((brl + 3) * 100) / (brl + brh + 6),
+ t->scl_fall_ns / (1000000000 / rate),
+ t->scl_rise_ns / (1000000000 / rate), cks, brl, brh);
+
/* Changing the order of accessing IICRST and ICE may break things! */
writeb(ICCR1_IICRST | ICCR1_SOWP, riic->base + RIIC_ICCR1);
riic_clear_set_bit(riic, 0, ICCR1_ICE, RIIC_ICCR1);
- switch (spd) {
- case 100000:
- writeb(ICMR1_CKS(3), riic->base + RIIC_ICMR1);
- writeb(ICBRH_SP100K, riic->base + RIIC_ICBRH);
- writeb(ICBRL_SP100K, riic->base + RIIC_ICBRL);
- break;
- case 400000:
- writeb(ICMR1_CKS(1), riic->base + RIIC_ICMR1);
- writeb(ICBRH_SP400K, riic->base + RIIC_ICBRH);
- writeb(ICBRL_SP400K, riic->base + RIIC_ICBRL);
- break;
- default:
- dev_err(&riic->adapter.dev,
- "unsupported bus speed (%dHz). Use 100000 or 400000\n", spd);
- clk_disable_unprepare(riic->clk);
- return -EINVAL;
- }
+ writeb(ICMR1_CKS(cks), riic->base + RIIC_ICMR1);
+ writeb(brh | ICBR_RESERVED, riic->base + RIIC_ICBRH);
+ writeb(brl | ICBR_RESERVED, riic->base + RIIC_ICBRL);
writeb(0, riic->base + RIIC_ICSER);
writeb(ICMR3_ACKWP | ICMR3_RDRFS, riic->base + RIIC_ICMR3);
@@ -351,11 +397,10 @@ static struct riic_irq_desc riic_irqs[] = {
static int riic_i2c_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct riic_dev *riic;
struct i2c_adapter *adap;
struct resource *res;
- u32 bus_rate = 0;
+ struct i2c_timings i2c_t;
int i, ret;
riic = devm_kzalloc(&pdev->dev, sizeof(*riic), GFP_KERNEL);
@@ -396,8 +441,9 @@ static int riic_i2c_probe(struct platform_device *pdev)
init_completion(&riic->msg_done);
- of_property_read_u32(np, "clock-frequency", &bus_rate);
- ret = riic_init_hw(riic, bus_rate);
+ i2c_parse_fw_timings(&pdev->dev, &i2c_t, true);
+
+ ret = riic_init_hw(riic, &i2c_t);
if (ret)
return ret;
@@ -408,7 +454,8 @@ static int riic_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, riic);
- dev_info(&pdev->dev, "registered with %dHz bus speed\n", bus_rate);
+ dev_info(&pdev->dev, "registered with %dHz bus speed\n",
+ i2c_t.bus_freq_hz);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 6f2aaeb7c4fa..c03acdf71397 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -881,7 +881,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
struct sh_mobile_i2c_data *pd;
struct i2c_adapter *adap;
struct resource *res;
- const struct of_device_id *match;
+ const struct sh_mobile_dt_config *config;
int ret;
u32 bus_speed;
@@ -913,10 +913,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
pd->bus_speed = ret ? STANDARD_MODE : bus_speed;
pd->clks_per_count = 1;
- match = of_match_device(sh_mobile_i2c_dt_ids, &dev->dev);
- if (match) {
- const struct sh_mobile_dt_config *config = match->data;
-
+ config = of_device_get_match_data(&dev->dev);
+ if (config) {
pd->clks_per_count = config->clks_per_count;
if (config->setup)
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index addd90a8cb59..7c7fc01116a1 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -282,8 +282,7 @@ static void taos_disconnect(struct serio *serio)
{
struct taos_data *taos = serio_get_drvdata(serio);
- if (taos->client)
- i2c_unregister_device(taos->client);
+ i2c_unregister_device(taos->client);
i2c_del_adapter(&taos->adapter);
serio_close(serio);
kfree(taos);
diff --git a/drivers/i2c/busses/i2c-thunderx-pcidrv.c b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
index df0976f4432a..19f8eec38717 100644
--- a/drivers/i2c/busses/i2c-thunderx-pcidrv.c
+++ b/drivers/i2c/busses/i2c-thunderx-pcidrv.c
@@ -118,8 +118,6 @@ static void thunder_i2c_clock_disable(struct device *dev, struct clk *clk)
static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
struct device_node *node)
{
- u32 type;
-
if (!node)
return -EINVAL;
@@ -127,10 +125,6 @@ static int thunder_i2c_smbus_setup_of(struct octeon_i2c *i2c,
if (!i2c->alert_data.irq)
return -EINVAL;
- type = irqd_get_trigger_type(irq_get_irq_data(i2c->alert_data.irq));
- i2c->alert_data.alert_edge_triggered =
- (type & IRQ_TYPE_LEVEL_MASK) ? 1 : 0;
-
i2c->ara = i2c_setup_smbus_alert(&i2c->adap, &i2c->alert_data);
if (!i2c->ara)
return -ENODEV;
@@ -149,8 +143,7 @@ static int thunder_i2c_smbus_setup(struct octeon_i2c *i2c,
static void thunder_i2c_smbus_remove(struct octeon_i2c *i2c)
{
- if (i2c->ara)
- i2c_unregister_device(i2c->ara);
+ i2c_unregister_device(i2c->ara);
}
static int thunder_i2c_probe_pci(struct pci_dev *pdev,
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 7e89ba6fcf6f..a7ac746018ad 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -129,6 +129,11 @@ struct slimpro_i2c_dev {
#define to_slimpro_i2c_dev(cl) \
container_of(cl, struct slimpro_i2c_dev, mbox_client)
+enum slimpro_i2c_version {
+ XGENE_SLIMPRO_I2C_V1 = 0,
+ XGENE_SLIMPRO_I2C_V2 = 1,
+};
+
/*
* This function tests and clears a bitmask then returns its old value
*/
@@ -476,6 +481,15 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
}
} else {
struct acpi_pcct_hw_reduced *cppc_ss;
+ const struct acpi_device_id *acpi_id;
+ int version = XGENE_SLIMPRO_I2C_V1;
+
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ if (!acpi_id)
+ return -EINVAL;
+
+ version = (int)acpi_id->driver_data;
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx))
@@ -514,9 +528,16 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
*/
ctx->comm_base_addr = cppc_ss->base_address;
if (ctx->comm_base_addr) {
- ctx->pcc_comm_addr = memremap(ctx->comm_base_addr,
- cppc_ss->length,
- MEMREMAP_WB);
+ if (version == XGENE_SLIMPRO_I2C_V2)
+ ctx->pcc_comm_addr = memremap(
+ ctx->comm_base_addr,
+ cppc_ss->length,
+ MEMREMAP_WT);
+ else
+ ctx->pcc_comm_addr = memremap(
+ ctx->comm_base_addr,
+ cppc_ss->length,
+ MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
rc = -ENOENT;
@@ -581,7 +602,8 @@ MODULE_DEVICE_TABLE(of, xgene_slimpro_i2c_dt_ids);
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_slimpro_i2c_acpi_ids[] = {
- {"APMC0D40", 0},
+ {"APMC0D40", XGENE_SLIMPRO_I2C_V1},
+ {"APMC0D8B", XGENE_SLIMPRO_I2C_V2},
{}
};
MODULE_DEVICE_TABLE(acpi, xgene_slimpro_i2c_acpi_ids);
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 6b106e94bc09..b970bf8f38e5 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -81,9 +82,12 @@ struct xlp9xx_i2c_dev {
struct completion msg_complete;
int irq;
bool msg_read;
+ bool len_recv;
+ bool client_pec;
u32 __iomem *base;
u32 msg_buf_remaining;
u32 msg_len;
+ u32 ip_clk_hz;
u32 clk_hz;
u32 msg_err;
u8 *msg_buf;
@@ -141,10 +145,25 @@ static void xlp9xx_i2c_fill_tx_fifo(struct xlp9xx_i2c_dev *priv)
static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
{
u32 len, i;
- u8 *buf = priv->msg_buf;
+ u8 rlen, *buf = priv->msg_buf;
len = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_FIFOWCNT) &
XLP9XX_I2C_FIFO_WCNT_MASK;
+ if (!len)
+ return;
+ if (priv->len_recv) {
+ /* read length byte */
+ rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+ *buf++ = rlen;
+ len--;
+ if (priv->client_pec)
+ ++rlen;
+ /* update remaining bytes and message length */
+ priv->msg_buf_remaining = rlen;
+ priv->msg_len = rlen + 1;
+ priv->len_recv = false;
+ }
+
len = min(priv->msg_buf_remaining, len);
for (i = 0; i < len; i++, buf++)
*buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
@@ -213,7 +232,7 @@ static int xlp9xx_i2c_init(struct xlp9xx_i2c_dev *priv)
* The controller uses 5 * SCL clock internally.
* So prescale value should be divided by 5.
*/
- prescale = DIV_ROUND_UP(XLP9XX_I2C_IP_CLK_FREQ, priv->clk_hz);
+ prescale = DIV_ROUND_UP(priv->ip_clk_hz, priv->clk_hz);
prescale = ((prescale - 8) / 5) - 1;
xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, XLP9XX_I2C_CTRL_RST);
xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, XLP9XX_I2C_CTRL_EN |
@@ -228,7 +247,7 @@ static int xlp9xx_i2c_xfer_msg(struct xlp9xx_i2c_dev *priv, struct i2c_msg *msg,
int last_msg)
{
unsigned long timeleft;
- u32 intr_mask, cmd, val;
+ u32 intr_mask, cmd, val, len;
priv->msg_buf = msg->buf;
priv->msg_buf_remaining = priv->msg_len = msg->len;
@@ -261,9 +280,13 @@ static int xlp9xx_i2c_xfer_msg(struct xlp9xx_i2c_dev *priv, struct i2c_msg *msg,
else
val &= ~XLP9XX_I2C_CTRL_ADDMODE;
+ priv->len_recv = msg->flags & I2C_M_RECV_LEN;
+ len = priv->len_recv ? XLP9XX_I2C_FIFO_SIZE : msg->len;
+ priv->client_pec = msg->flags & I2C_CLIENT_PEC;
+
/* set data length to be transferred */
val = (val & ~XLP9XX_I2C_CTRL_MCTLEN_MASK) |
- (msg->len << XLP9XX_I2C_CTRL_MCTLEN_SHIFT);
+ (len << XLP9XX_I2C_CTRL_MCTLEN_SHIFT);
xlp9xx_write_i2c_reg(priv, XLP9XX_I2C_CTRL, val);
/* fill fifo during tx */
@@ -310,6 +333,9 @@ static int xlp9xx_i2c_xfer_msg(struct xlp9xx_i2c_dev *priv, struct i2c_msg *msg,
return -ETIMEDOUT;
}
+ /* update msg->len with actual received length */
+ if (msg->flags & I2C_M_RECV_LEN)
+ msg->len = priv->msg_len;
return 0;
}
@@ -342,9 +368,19 @@ static const struct i2c_algorithm xlp9xx_i2c_algo = {
static int xlp9xx_i2c_get_frequency(struct platform_device *pdev,
struct xlp9xx_i2c_dev *priv)
{
+ struct clk *clk;
u32 freq;
int err;
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ priv->ip_clk_hz = XLP9XX_I2C_IP_CLK_FREQ;
+ dev_dbg(&pdev->dev, "using default input frequency %u\n",
+ priv->ip_clk_hz);
+ } else {
+ priv->ip_clk_hz = clk_get_rate(clk);
+ }
+
err = device_property_read_u32(&pdev->dev, "clock-frequency", &freq);
if (err) {
freq = XLP9XX_I2C_DEFAULT_FREQ;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 56e46581b84b..706164b4c5be 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -29,6 +29,7 @@
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/irqflags.h>
@@ -205,9 +206,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
*/
while (i++ < RECOVERY_CLK_CNT * 2) {
if (val) {
- /* Break if SDA is high */
- if (bri->get_sda && bri->get_sda(adap))
- break;
/* SCL shouldn't be low here */
if (!bri->get_scl(adap)) {
dev_err(&adap->dev,
@@ -215,6 +213,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
ret = -EBUSY;
break;
}
+ /* Break if SDA is high */
+ if (bri->get_sda && bri->get_sda(adap))
+ break;
}
val = !val;
@@ -222,6 +223,10 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
ndelay(RECOVERY_NDELAY);
}
+ /* check if recovery actually succeeded */
+ if (bri->get_sda && !bri->get_sda(adap))
+ ret = -EBUSY;
+
if (bri->unprepare_recovery)
bri->unprepare_recovery(adap);
@@ -666,10 +671,16 @@ static void i2c_adapter_unlock_bus(struct i2c_adapter *adapter,
}
static void i2c_dev_set_name(struct i2c_adapter *adap,
- struct i2c_client *client)
+ struct i2c_client *client,
+ struct i2c_board_info const *info)
{
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+ if (info && info->dev_name) {
+ dev_set_name(&client->dev, "i2c-%s", info->dev_name);
+ return;
+ }
+
if (adev) {
dev_set_name(&client->dev, "i2c-%s", acpi_dev_name(adev));
return;
@@ -766,7 +777,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.of_node = info->of_node;
client->dev.fwnode = info->fwnode;
- i2c_dev_set_name(adap, client);
+ i2c_dev_set_name(adap, client, info);
if (info->properties) {
status = device_add_properties(&client->dev, info->properties);
@@ -808,6 +819,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
*/
void i2c_unregister_device(struct i2c_client *client)
{
+ if (!client)
+ return;
if (client->dev.of_node)
of_node_clear_flag(client->dev.of_node, OF_POPULATED);
if (ACPI_COMPANION(&client->dev))
@@ -1259,6 +1272,10 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
goto out_list;
}
+ res = of_i2c_setup_smbus_alert(adap);
+ if (res)
+ goto out_reg;
+
dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
pm_runtime_no_callbacks(&adap->dev);
@@ -1290,6 +1307,10 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
return 0;
+out_reg:
+ init_completion(&adap->dev_released);
+ device_unregister(&adap->dev);
+ wait_for_completion(&adap->dev_released);
out_list:
mutex_lock(&core_lock);
idr_remove(&i2c_adapter_idr, adap->nr);
@@ -1417,8 +1438,7 @@ static int __unregister_client(struct device *dev, void *dummy)
static int __unregister_dummy(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
- if (client)
- i2c_unregister_device(client);
+ i2c_unregister_device(client);
return 0;
}
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 10f00a82ec9d..4bb9927afd01 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
#define CREATE_TRACE_POINTS
#include <trace/events/smbus.h>
@@ -592,3 +593,57 @@ s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
return i;
}
EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated);
+
+/**
+ * i2c_setup_smbus_alert - Setup SMBus alert support
+ * @adapter: the target adapter
+ * @setup: setup data for the SMBus alert handler
+ * Context: can sleep
+ *
+ * Setup handling of the SMBus alert protocol on a given I2C bus segment.
+ *
+ * Handling can be done either through our IRQ handler, or by the
+ * adapter (from its handler, periodic polling, or whatever).
+ *
+ * NOTE that if we manage the IRQ, we *MUST* know if it's level or
+ * edge triggered in order to hand it to the workqueue correctly.
+ * If triggering the alert seems to wedge the system, you probably
+ * should have said it's level triggered.
+ *
+ * This returns the ara client, which should be saved for later use with
+ * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
+ * to indicate an error.
+ */
+struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup)
+{
+ struct i2c_board_info ara_board_info = {
+ I2C_BOARD_INFO("smbus_alert", 0x0c),
+ .platform_data = setup,
+ };
+
+ return i2c_new_device(adapter, &ara_board_info);
+}
+EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
+
+#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
+int of_i2c_setup_smbus_alert(struct i2c_adapter *adapter)
+{
+ struct i2c_client *client;
+ int irq;
+
+ irq = of_property_match_string(adapter->dev.of_node, "interrupt-names",
+ "smbus_alert");
+ if (irq == -EINVAL || irq == -ENODATA)
+ return 0;
+ else if (irq < 0)
+ return irq;
+
+ client = i2c_setup_smbus_alert(adapter, NULL);
+ if (!client)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_i2c_setup_smbus_alert);
+#endif
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 6f638bbc922d..2cab27a68479 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -35,6 +35,7 @@
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
/*
* An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a
@@ -238,46 +239,29 @@ static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
}
static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
- unsigned long arg)
+ unsigned nmsgs, struct i2c_msg *msgs)
{
- struct i2c_rdwr_ioctl_data rdwr_arg;
- struct i2c_msg *rdwr_pa;
u8 __user **data_ptrs;
int i, res;
- if (copy_from_user(&rdwr_arg,
- (struct i2c_rdwr_ioctl_data __user *)arg,
- sizeof(rdwr_arg)))
- return -EFAULT;
-
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
- if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
- return -EINVAL;
-
- rdwr_pa = memdup_user(rdwr_arg.msgs,
- rdwr_arg.nmsgs * sizeof(struct i2c_msg));
- if (IS_ERR(rdwr_pa))
- return PTR_ERR(rdwr_pa);
-
- data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
+ data_ptrs = kmalloc(nmsgs * sizeof(u8 __user *), GFP_KERNEL);
if (data_ptrs == NULL) {
- kfree(rdwr_pa);
+ kfree(msgs);
return -ENOMEM;
}
res = 0;
- for (i = 0; i < rdwr_arg.nmsgs; i++) {
+ for (i = 0; i < nmsgs; i++) {
/* Limit the size of the message to a sane amount */
- if (rdwr_pa[i].len > 8192) {
+ if (msgs[i].len > 8192) {
res = -EINVAL;
break;
}
- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
- rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
- if (IS_ERR(rdwr_pa[i].buf)) {
- res = PTR_ERR(rdwr_pa[i].buf);
+ data_ptrs[i] = (u8 __user *)msgs[i].buf;
+ msgs[i].buf = memdup_user(data_ptrs[i], msgs[i].len);
+ if (IS_ERR(msgs[i].buf)) {
+ res = PTR_ERR(msgs[i].buf);
break;
}
@@ -292,121 +276,117 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
* greater (for example to account for a checksum byte at
* the end of the message.)
*/
- if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
- if (!(rdwr_pa[i].flags & I2C_M_RD) ||
- rdwr_pa[i].buf[0] < 1 ||
- rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+ if (msgs[i].flags & I2C_M_RECV_LEN) {
+ if (!(msgs[i].flags & I2C_M_RD) ||
+ msgs[i].buf[0] < 1 ||
+ msgs[i].len < msgs[i].buf[0] +
I2C_SMBUS_BLOCK_MAX) {
res = -EINVAL;
break;
}
- rdwr_pa[i].len = rdwr_pa[i].buf[0];
+ msgs[i].len = msgs[i].buf[0];
}
}
if (res < 0) {
int j;
for (j = 0; j < i; ++j)
- kfree(rdwr_pa[j].buf);
+ kfree(msgs[j].buf);
kfree(data_ptrs);
- kfree(rdwr_pa);
+ kfree(msgs);
return res;
}
- res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs);
+ res = i2c_transfer(client->adapter, msgs, nmsgs);
while (i-- > 0) {
- if (res >= 0 && (rdwr_pa[i].flags & I2C_M_RD)) {
- if (copy_to_user(data_ptrs[i], rdwr_pa[i].buf,
- rdwr_pa[i].len))
+ if (res >= 0 && (msgs[i].flags & I2C_M_RD)) {
+ if (copy_to_user(data_ptrs[i], msgs[i].buf,
+ msgs[i].len))
res = -EFAULT;
}
- kfree(rdwr_pa[i].buf);
+ kfree(msgs[i].buf);
}
kfree(data_ptrs);
- kfree(rdwr_pa);
+ kfree(msgs);
return res;
}
static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
- unsigned long arg)
+ u8 read_write, u8 command, u32 size,
+ union i2c_smbus_data __user *data)
{
- struct i2c_smbus_ioctl_data data_arg;
union i2c_smbus_data temp = {};
int datasize, res;
- if (copy_from_user(&data_arg,
- (struct i2c_smbus_ioctl_data __user *) arg,
- sizeof(struct i2c_smbus_ioctl_data)))
- return -EFAULT;
- if ((data_arg.size != I2C_SMBUS_BYTE) &&
- (data_arg.size != I2C_SMBUS_QUICK) &&
- (data_arg.size != I2C_SMBUS_BYTE_DATA) &&
- (data_arg.size != I2C_SMBUS_WORD_DATA) &&
- (data_arg.size != I2C_SMBUS_PROC_CALL) &&
- (data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
- (data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
- (data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
- (data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
+ if ((size != I2C_SMBUS_BYTE) &&
+ (size != I2C_SMBUS_QUICK) &&
+ (size != I2C_SMBUS_BYTE_DATA) &&
+ (size != I2C_SMBUS_WORD_DATA) &&
+ (size != I2C_SMBUS_PROC_CALL) &&
+ (size != I2C_SMBUS_BLOCK_DATA) &&
+ (size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
+ (size != I2C_SMBUS_I2C_BLOCK_DATA) &&
+ (size != I2C_SMBUS_BLOCK_PROC_CALL)) {
dev_dbg(&client->adapter->dev,
"size out of range (%x) in ioctl I2C_SMBUS.\n",
- data_arg.size);
+ size);
return -EINVAL;
}
/* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1,
so the check is valid if size==I2C_SMBUS_QUICK too. */
- if ((data_arg.read_write != I2C_SMBUS_READ) &&
- (data_arg.read_write != I2C_SMBUS_WRITE)) {
+ if ((read_write != I2C_SMBUS_READ) &&
+ (read_write != I2C_SMBUS_WRITE)) {
dev_dbg(&client->adapter->dev,
"read_write out of range (%x) in ioctl I2C_SMBUS.\n",
- data_arg.read_write);
+ read_write);
return -EINVAL;
}
/* Note that command values are always valid! */
- if ((data_arg.size == I2C_SMBUS_QUICK) ||
- ((data_arg.size == I2C_SMBUS_BYTE) &&
- (data_arg.read_write == I2C_SMBUS_WRITE)))
+ if ((size == I2C_SMBUS_QUICK) ||
+ ((size == I2C_SMBUS_BYTE) &&
+ (read_write == I2C_SMBUS_WRITE)))
/* These are special: we do not use data */
return i2c_smbus_xfer(client->adapter, client->addr,
- client->flags, data_arg.read_write,
- data_arg.command, data_arg.size, NULL);
+ client->flags, read_write,
+ command, size, NULL);
- if (data_arg.data == NULL) {
+ if (data == NULL) {
dev_dbg(&client->adapter->dev,
"data is NULL pointer in ioctl I2C_SMBUS.\n");
return -EINVAL;
}
- if ((data_arg.size == I2C_SMBUS_BYTE_DATA) ||
- (data_arg.size == I2C_SMBUS_BYTE))
- datasize = sizeof(data_arg.data->byte);
- else if ((data_arg.size == I2C_SMBUS_WORD_DATA) ||
- (data_arg.size == I2C_SMBUS_PROC_CALL))
- datasize = sizeof(data_arg.data->word);
+ if ((size == I2C_SMBUS_BYTE_DATA) ||
+ (size == I2C_SMBUS_BYTE))
+ datasize = sizeof(data->byte);
+ else if ((size == I2C_SMBUS_WORD_DATA) ||
+ (size == I2C_SMBUS_PROC_CALL))
+ datasize = sizeof(data->word);
else /* size == smbus block, i2c block, or block proc. call */
- datasize = sizeof(data_arg.data->block);
+ datasize = sizeof(data->block);
- if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
- (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
- (data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) ||
- (data_arg.read_write == I2C_SMBUS_WRITE)) {
- if (copy_from_user(&temp, data_arg.data, datasize))
+ if ((size == I2C_SMBUS_PROC_CALL) ||
+ (size == I2C_SMBUS_BLOCK_PROC_CALL) ||
+ (size == I2C_SMBUS_I2C_BLOCK_DATA) ||
+ (read_write == I2C_SMBUS_WRITE)) {
+ if (copy_from_user(&temp, data, datasize))
return -EFAULT;
}
- if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
+ if (size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
/* Convert old I2C block commands to the new
convention. This preserves binary compatibility. */
- data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA;
- if (data_arg.read_write == I2C_SMBUS_READ)
+ size = I2C_SMBUS_I2C_BLOCK_DATA;
+ if (read_write == I2C_SMBUS_READ)
temp.block[0] = I2C_SMBUS_BLOCK_MAX;
}
res = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
- data_arg.read_write, data_arg.command, data_arg.size, &temp);
- if (!res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
- (data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
- (data_arg.read_write == I2C_SMBUS_READ))) {
- if (copy_to_user(data_arg.data, &temp, datasize))
+ read_write, command, size, &temp);
+ if (!res && ((size == I2C_SMBUS_PROC_CALL) ||
+ (size == I2C_SMBUS_BLOCK_PROC_CALL) ||
+ (read_write == I2C_SMBUS_READ))) {
+ if (copy_to_user(data, &temp, datasize))
return -EFAULT;
}
return res;
@@ -454,12 +434,39 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
funcs = i2c_get_functionality(client->adapter);
return put_user(funcs, (unsigned long __user *)arg);
- case I2C_RDWR:
- return i2cdev_ioctl_rdwr(client, arg);
+ case I2C_RDWR: {
+ struct i2c_rdwr_ioctl_data rdwr_arg;
+ struct i2c_msg *rdwr_pa;
+
+ if (copy_from_user(&rdwr_arg,
+ (struct i2c_rdwr_ioctl_data __user *)arg,
+ sizeof(rdwr_arg)))
+ return -EFAULT;
+
+ /* Put an arbitrary limit on the number of messages that can
+ * be sent at once */
+ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ return -EINVAL;
- case I2C_SMBUS:
- return i2cdev_ioctl_smbus(client, arg);
+ rdwr_pa = memdup_user(rdwr_arg.msgs,
+ rdwr_arg.nmsgs * sizeof(struct i2c_msg));
+ if (IS_ERR(rdwr_pa))
+ return PTR_ERR(rdwr_pa);
+
+ return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa);
+ }
+ case I2C_SMBUS: {
+ struct i2c_smbus_ioctl_data data_arg;
+ if (copy_from_user(&data_arg,
+ (struct i2c_smbus_ioctl_data __user *) arg,
+ sizeof(struct i2c_smbus_ioctl_data)))
+ return -EFAULT;
+ return i2cdev_ioctl_smbus(client, data_arg.read_write,
+ data_arg.command,
+ data_arg.size,
+ data_arg.data);
+ }
case I2C_RETRIES:
client->adapter->retries = arg;
break;
@@ -480,6 +487,90 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+#ifdef CONFIG_COMPAT
+
+struct i2c_smbus_ioctl_data32 {
+ u8 read_write;
+ u8 command;
+ u32 size;
+ compat_caddr_t data; /* union i2c_smbus_data *data */
+};
+
+struct i2c_msg32 {
+ u16 addr;
+ u16 flags;
+ u16 len;
+ compat_caddr_t buf;
+};
+
+struct i2c_rdwr_ioctl_data32 {
+ compat_caddr_t msgs; /* struct i2c_msg __user *msgs */
+ u32 nmsgs;
+};
+
+static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct i2c_client *client = file->private_data;
+ unsigned long funcs;
+ switch (cmd) {
+ case I2C_FUNCS:
+ funcs = i2c_get_functionality(client->adapter);
+ return put_user(funcs, (compat_ulong_t __user *)arg);
+ case I2C_RDWR: {
+ struct i2c_rdwr_ioctl_data32 rdwr_arg;
+ struct i2c_msg32 *p;
+ struct i2c_msg *rdwr_pa;
+ int i;
+
+ if (copy_from_user(&rdwr_arg,
+ (struct i2c_rdwr_ioctl_data32 __user *)arg,
+ sizeof(rdwr_arg)))
+ return -EFAULT;
+
+ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
+ return -EINVAL;
+
+ rdwr_pa = kmalloc_array(rdwr_arg.nmsgs, sizeof(struct i2c_msg),
+ GFP_KERNEL);
+ if (!rdwr_pa)
+ return -ENOMEM;
+
+ p = compat_ptr(rdwr_arg.msgs);
+ for (i = 0; i < rdwr_arg.nmsgs; i++) {
+ struct i2c_msg32 umsg;
+ if (copy_from_user(&umsg, p + i, sizeof(umsg))) {
+ kfree(rdwr_pa);
+ return -EFAULT;
+ }
+ rdwr_pa[i] = (struct i2c_msg) {
+ .addr = umsg.addr,
+ .flags = umsg.flags,
+ .len = umsg.len,
+ .buf = compat_ptr(umsg.buf)
+ };
+ }
+
+ return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa);
+ }
+ case I2C_SMBUS: {
+ struct i2c_smbus_ioctl_data32 data32;
+ if (copy_from_user(&data32,
+ (void __user *) arg,
+ sizeof(data32)))
+ return -EFAULT;
+ return i2cdev_ioctl_smbus(client, data32.read_write,
+ data32.command,
+ data32.size,
+ compat_ptr(data32.data));
+ }
+ default:
+ return i2cdev_ioctl(file, cmd, arg);
+ }
+}
+#else
+#define compat_i2cdev_ioctl NULL
+#endif
+
static int i2cdev_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
@@ -527,6 +618,7 @@ static const struct file_operations i2cdev_fops = {
.read = i2cdev_read,
.write = i2cdev_write,
.unlocked_ioctl = i2cdev_ioctl,
+ .compat_ioctl = compat_i2cdev_ioctl,
.open = i2cdev_open,
.release = i2cdev_release,
};
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index f9271c713d20..5a1dd7f13bac 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -21,12 +21,11 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
struct i2c_smbus_alert {
- unsigned int alert_edge_triggered:1;
- int irq;
struct work_struct alert;
struct i2c_client *ara; /* Alert response address */
};
@@ -72,13 +71,12 @@ static int smbus_do_alert(struct device *dev, void *addrp)
* The alert IRQ handler needs to hand work off to a task which can issue
* SMBus calls, because those sleeping calls can't be made in IRQ context.
*/
-static void smbus_alert(struct work_struct *work)
+static irqreturn_t smbus_alert(int irq, void *d)
{
- struct i2c_smbus_alert *alert;
+ struct i2c_smbus_alert *alert = d;
struct i2c_client *ara;
unsigned short prev_addr = 0; /* Not a valid address */
- alert = container_of(work, struct i2c_smbus_alert, alert);
ara = alert->ara;
for (;;) {
@@ -115,21 +113,17 @@ static void smbus_alert(struct work_struct *work)
prev_addr = data.addr;
}
- /* We handled all alerts; re-enable level-triggered IRQs */
- if (!alert->alert_edge_triggered)
- enable_irq(alert->irq);
+ return IRQ_HANDLED;
}
-static irqreturn_t smbalert_irq(int irq, void *d)
+static void smbalert_work(struct work_struct *work)
{
- struct i2c_smbus_alert *alert = d;
+ struct i2c_smbus_alert *alert;
- /* Disable level-triggered IRQs until we handle them */
- if (!alert->alert_edge_triggered)
- disable_irq_nosync(irq);
+ alert = container_of(work, struct i2c_smbus_alert, alert);
+
+ smbus_alert(0, alert);
- schedule_work(&alert->alert);
- return IRQ_HANDLED;
}
/* Setup SMBALERT# infrastructure */
@@ -139,28 +133,35 @@ static int smbalert_probe(struct i2c_client *ara,
struct i2c_smbus_alert_setup *setup = dev_get_platdata(&ara->dev);
struct i2c_smbus_alert *alert;
struct i2c_adapter *adapter = ara->adapter;
- int res;
+ int res, irq;
alert = devm_kzalloc(&ara->dev, sizeof(struct i2c_smbus_alert),
GFP_KERNEL);
if (!alert)
return -ENOMEM;
- alert->alert_edge_triggered = setup->alert_edge_triggered;
- alert->irq = setup->irq;
- INIT_WORK(&alert->alert, smbus_alert);
+ if (setup) {
+ irq = setup->irq;
+ } else {
+ irq = of_irq_get_byname(adapter->dev.of_node, "smbus_alert");
+ if (irq <= 0)
+ return irq;
+ }
+
+ INIT_WORK(&alert->alert, smbalert_work);
alert->ara = ara;
- if (setup->irq > 0) {
- res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq,
- 0, "smbus_alert", alert);
+ if (irq > 0) {
+ res = devm_request_threaded_irq(&ara->dev, irq,
+ NULL, smbus_alert,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "smbus_alert", alert);
if (res)
return res;
}
i2c_set_clientdata(ara, alert);
- dev_info(&adapter->dev, "supports SMBALERT#, %s trigger\n",
- setup->alert_edge_triggered ? "edge" : "level");
+ dev_info(&adapter->dev, "supports SMBALERT#\n");
return 0;
}
@@ -190,38 +191,6 @@ static struct i2c_driver smbalert_driver = {
};
/**
- * i2c_setup_smbus_alert - Setup SMBus alert support
- * @adapter: the target adapter
- * @setup: setup data for the SMBus alert handler
- * Context: can sleep
- *
- * Setup handling of the SMBus alert protocol on a given I2C bus segment.
- *
- * Handling can be done either through our IRQ handler, or by the
- * adapter (from its handler, periodic polling, or whatever).
- *
- * NOTE that if we manage the IRQ, we *MUST* know if it's level or
- * edge triggered in order to hand it to the workqueue correctly.
- * If triggering the alert seems to wedge the system, you probably
- * should have said it's level triggered.
- *
- * This returns the ara client, which should be saved for later use with
- * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
- * to indicate an error.
- */
-struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
- struct i2c_smbus_alert_setup *setup)
-{
- struct i2c_board_info ara_board_info = {
- I2C_BOARD_INFO("smbus_alert", 0x0c),
- .platform_data = setup,
- };
-
- return i2c_new_device(adapter, &ara_board_info);
-}
-EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
-
-/**
* i2c_handle_smbus_alert - Handle an SMBus alert
* @ara: the ARA client on the relevant adapter
* Context: can't sleep
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 4a67d3199877..6d9d865e8518 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for multiplexer I2C chip drivers.
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 7b992db38021..2ca068d8b92d 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -246,36 +246,6 @@ static irqreturn_t pca954x_irq_handler(int irq, void *dev_id)
return handled ? IRQ_HANDLED : IRQ_NONE;
}
-static void pca954x_irq_mask(struct irq_data *idata)
-{
- struct pca954x *data = irq_data_get_irq_chip_data(idata);
- unsigned int pos = idata->hwirq;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&data->lock, flags);
-
- data->irq_mask &= ~BIT(pos);
- if (!data->irq_mask)
- disable_irq(data->client->irq);
-
- raw_spin_unlock_irqrestore(&data->lock, flags);
-}
-
-static void pca954x_irq_unmask(struct irq_data *idata)
-{
- struct pca954x *data = irq_data_get_irq_chip_data(idata);
- unsigned int pos = idata->hwirq;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&data->lock, flags);
-
- if (!data->irq_mask)
- enable_irq(data->client->irq);
- data->irq_mask |= BIT(pos);
-
- raw_spin_unlock_irqrestore(&data->lock, flags);
-}
-
static int pca954x_irq_set_type(struct irq_data *idata, unsigned int type)
{
if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_LOW)
@@ -285,8 +255,6 @@ static int pca954x_irq_set_type(struct irq_data *idata, unsigned int type)
static struct irq_chip pca954x_irq_chip = {
.name = "i2c-mux-pca954x",
- .irq_mask = pca954x_irq_mask,
- .irq_unmask = pca954x_irq_unmask,
.irq_set_type = pca954x_irq_set_type,
};
@@ -294,7 +262,7 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
{
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
- int c, err, irq;
+ int c, irq;
if (!data->chip->has_irq || client->irq <= 0)
return 0;
@@ -309,29 +277,31 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
for (c = 0; c < data->chip->nchans; c++) {
irq = irq_create_mapping(data->irq, c);
+ if (!irq) {
+ dev_err(&client->dev, "failed irq create map\n");
+ return -EINVAL;
+ }
irq_set_chip_data(irq, data);
irq_set_chip_and_handler(irq, &pca954x_irq_chip,
handle_simple_irq);
}
- err = devm_request_threaded_irq(&client->dev, data->client->irq, NULL,
- pca954x_irq_handler,
- IRQF_ONESHOT | IRQF_SHARED,
- "pca954x", data);
- if (err)
- goto err_req_irq;
+ return 0;
+}
- disable_irq(data->client->irq);
+static void pca954x_cleanup(struct i2c_mux_core *muxc)
+{
+ struct pca954x *data = i2c_mux_priv(muxc);
+ int c, irq;
- return 0;
-err_req_irq:
- for (c = 0; c < data->chip->nchans; c++) {
- irq = irq_find_mapping(data->irq, c);
- irq_dispose_mapping(irq);
+ if (data->irq) {
+ for (c = 0; c < data->chip->nchans; c++) {
+ irq = irq_find_mapping(data->irq, c);
+ irq_dispose_mapping(irq);
+ }
+ irq_domain_remove(data->irq);
}
- irq_domain_remove(data->irq);
-
- return err;
+ i2c_mux_del_adapters(muxc);
}
/*
@@ -391,7 +361,7 @@ static int pca954x_probe(struct i2c_client *client,
ret = pca954x_irq_setup(muxc);
if (ret)
- goto fail_del_adapters;
+ goto fail_cleanup;
/* Now create an adapter for each channel */
for (num = 0; num < data->chip->nchans; num++) {
@@ -414,7 +384,16 @@ static int pca954x_probe(struct i2c_client *client,
ret = i2c_mux_add_adapter(muxc, force, num, class);
if (ret)
- goto fail_del_adapters;
+ goto fail_cleanup;
+ }
+
+ if (data->irq) {
+ ret = devm_request_threaded_irq(&client->dev, data->client->irq,
+ NULL, pca954x_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "pca954x", data);
+ if (ret)
+ goto fail_cleanup;
}
dev_info(&client->dev,
@@ -424,26 +403,16 @@ static int pca954x_probe(struct i2c_client *client,
return 0;
-fail_del_adapters:
- i2c_mux_del_adapters(muxc);
+fail_cleanup:
+ pca954x_cleanup(muxc);
return ret;
}
static int pca954x_remove(struct i2c_client *client)
{
struct i2c_mux_core *muxc = i2c_get_clientdata(client);
- struct pca954x *data = i2c_mux_priv(muxc);
- int c, irq;
- if (data->irq) {
- for (c = 0; c < data->chip->nchans; c++) {
- irq = irq_find_mapping(data->irq, c);
- irq_dispose_mapping(irq);
- }
- irq_domain_remove(data->irq);
- }
-
- i2c_mux_del_adapters(muxc);
+ pca954x_cleanup(muxc);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index d97031804de8..f6c9c3dc6cad 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -107,9 +107,9 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
put_device(&adapter->dev);
mux->data.n_values = of_get_child_count(np);
- if (of_find_property(np, "little-endian", NULL)) {
+ if (of_property_read_bool(np, "little-endian")) {
mux->data.little_endian = true;
- } else if (of_find_property(np, "big-endian", NULL)) {
+ } else if (of_property_read_bool(np, "big-endian")) {
mux->data.little_endian = false;
} else {
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : \
@@ -122,10 +122,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
#error Endianness not defined?
#endif
}
- if (of_find_property(np, "write-only", NULL))
- mux->data.write_only = true;
- else
- mux->data.write_only = false;
+ mux->data.write_only = of_property_read_bool(np, "write-only");
values = devm_kzalloc(&pdev->dev,
sizeof(*mux->data.values) * mux->data.n_values,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index c99a25c075bc..cf1fb3fb5d26 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -117,7 +117,9 @@ config BLK_DEV_DELKIN
config BLK_DEV_IDECD
tristate "Include IDE/ATAPI CDROM support"
+ depends on BLK_DEV
select IDE_ATAPI
+ select CDROM
---help---
If you have a CD-ROM drive using the ATAPI protocol, say Y. ATAPI is
a newer protocol used by IDE CD-ROM and TAPE drives, similar to the
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 2a8c417d4081..9f617a77970f 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# link order is important here
#
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 968038482d2f..7d4e5c08f133 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Provides ACPI support for IDE drives.
*
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 14d1e7d9a1d6..0e6bc631a1ca 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -282,7 +282,7 @@ int ide_cd_expiry(ide_drive_t *drive)
struct request *rq = drive->hwif->rq;
unsigned long wait = 0;
- debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]);
+ debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]);
/*
* Some commands are *slow* and normally take a long time to complete.
@@ -463,7 +463,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
return ide_do_reset(drive);
}
- debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
+ debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]);
/* Retry operation */
ide_retry_pc(drive);
@@ -531,7 +531,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
ide_pad_transfer(drive, write, bcount);
debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
- rq->cmd[0], done, bcount, scsi_req(rq)->resid_len);
+ scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len);
/* And set the interrupt handler again */
ide_set_handler(drive, ide_pc_intr, timeout);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 81e18f9628d0..7c3ed7c9af77 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -867,11 +867,16 @@ static void msf_from_bcd(struct atapi_msf *msf)
int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
{
struct cdrom_info *info = drive->driver_data;
- struct cdrom_device_info *cdi = &info->devinfo;
+ struct cdrom_device_info *cdi;
unsigned char cmd[BLK_MAX_CDB];
ide_debug_log(IDE_DBG_FUNC, "enter");
+ if (!info)
+ return -EIO;
+
+ cdi = &info->devinfo;
+
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_TEST_UNIT_READY;
@@ -1328,7 +1333,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
struct scsi_request *req = scsi_req(rq);
- memset(req->cmd, 0, BLK_MAX_CDB);
+ q->initialize_rq_fn(rq);
if (rq_data_dir(rq) == READ)
req->cmd[0] = GPCMD_READ_10;
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index eea60c986c4f..264e822eba58 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 1996-98 Erik Andersen
* Copyright (C) 1998-2000 Jens Axboe
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 9d26c9737e21..2acca12b9c94 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cdrom.c IOCTLs handling for ide-cd driver.
*
diff --git a/drivers/ide/ide-cd_verbose.c b/drivers/ide/ide-cd_verbose.c
index 58a6feb74c02..5ecd5b2f03a3 100644
--- a/drivers/ide/ide-cd_verbose.c
+++ b/drivers/ide/ide-cd_verbose.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Verbose error logging for ATAPI CD/DVD devices.
*
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index ef7c8c43a380..4e20747af32e 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 241983da5fc4..188d1b03715d 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
* Copyright (C) 1998-2002 Linux ATA Development
diff --git a/drivers/ide/ide-disk.h b/drivers/ide/ide-disk.h
index d511dab7c4aa..0e8cc18bfda6 100644
--- a/drivers/ide/ide-disk.h
+++ b/drivers/ide/ide-disk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IDE_DISK_H
#define __IDE_DISK_H
diff --git a/drivers/ide/ide-disk_ioctl.c b/drivers/ide/ide-disk_ioctl.c
index da36f729ff32..2c45616cff4f 100644
--- a/drivers/ide/ide-disk_ioctl.c
+++ b/drivers/ide/ide-disk_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/hdreg.h>
diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c
index 0d1fae6cba6d..82a36ced4e96 100644
--- a/drivers/ide/ide-disk_proc.c
+++ b/drivers/ide/ide-disk_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/slab.h>
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 3ddd88219906..5bd2aafc3753 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IDE ATAPI floppy driver.
*
diff --git a/drivers/ide/ide-floppy.h b/drivers/ide/ide-floppy.h
index 6dd2beb48434..13c9b4b6d75e 100644
--- a/drivers/ide/ide-floppy.h
+++ b/drivers/ide/ide-floppy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IDE_FLOPPY_H
#define __IDE_FLOPPY_H
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index a22ca8467010..40a2ebe34e1d 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ide-floppy IOCTLs handling.
*/
diff --git a/drivers/ide/ide-floppy_proc.c b/drivers/ide/ide-floppy_proc.c
index e7a25ea757df..471457ebea67 100644
--- a/drivers/ide/ide-floppy_proc.c
+++ b/drivers/ide/ide-floppy_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ide.h>
diff --git a/drivers/ide/ide-gd.h b/drivers/ide/ide-gd.h
index 55970772bd04..af3fe1880e9e 100644
--- a/drivers/ide/ide-gd.h
+++ b/drivers/ide/ide-gd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IDE_GD_H
#define __IDE_GD_H
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 3a234701d92c..6f25da56a169 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -611,9 +611,9 @@ static int drive_is_ready(ide_drive_t *drive)
* logic that wants cleaning up.
*/
-void ide_timer_expiry (unsigned long data)
+void ide_timer_expiry (struct timer_list *t)
{
- ide_hwif_t *hwif = (ide_hwif_t *)data;
+ ide_hwif_t *hwif = from_timer(hwif, t, timer);
ide_drive_t *uninitialized_var(drive);
ide_handler_t *handler;
unsigned long flags;
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 1f264d5d3f3f..6465bcc7cea6 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/ide.h>
diff --git a/drivers/ide/ide-pio-blacklist.c b/drivers/ide/ide-pio-blacklist.c
index 40e683a84ff9..1fd24798e5c9 100644
--- a/drivers/ide/ide-pio-blacklist.c
+++ b/drivers/ide/ide-pio-blacklist.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PIO blacklist. Some drives incorrectly report their maximal PIO mode,
* at least in respect to CMD640. Here we keep info on some known drives.
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 544f02d673ca..ad8a125defdd 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/ide.h>
@@ -89,9 +90,9 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
+ rq = blk_get_request_flags(drive->queue, REQ_OP_DRV_IN,
+ BLK_MQ_REQ_PREEMPT);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
- rq->rq_flags |= RQF_PREEMPT;
rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c
index f5f2b62471da..859ddab9448f 100644
--- a/drivers/ide/ide-pnp.c
+++ b/drivers/ide/ide-pnp.c
@@ -22,7 +22,7 @@
#define DRV_NAME "ide-pnp"
/* Add your devices here :)) */
-static struct pnp_device_id idepnp_devices[] = {
+static const struct pnp_device_id idepnp_devices[] = {
/* Generic ESDI/IDE/ATA compatible hard disk controller */
{.id = "PNP0600", .driver_data = 0},
{.id = ""}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index eaf39e5db08b..17fd55af4d92 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1184,7 +1184,7 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
spin_lock_init(&hwif->lock);
- setup_timer(&hwif->timer, &ide_timer_expiry, (unsigned long)hwif);
+ timer_setup(&hwif->timer, ide_timer_expiry, 0);
init_completion(&hwif->gendev_rel_comp);
diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c
index 84a6a9e08d64..b9dfeb2e8bd6 100644
--- a/drivers/ide/ide-sysfs.c
+++ b/drivers/ide/ide-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/ide.h>
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index d127ace6aa57..6ee866fcc5dd 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -244,7 +244,7 @@ struct chs_geom {
static unsigned int ide_disks;
static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
-static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
+static int ide_set_disk_chs(const char *str, const struct kernel_param *kp)
{
unsigned int a, b, c = 0, h = 0, s = 0, i, j = 1;
@@ -328,7 +328,7 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
static unsigned int ide_ignore_cable;
-static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
+static int ide_set_ignore_cable(const char *s, const struct kernel_param *kp)
{
int i, j = 1;
diff --git a/drivers/ide/qd65xx.h b/drivers/ide/qd65xx.h
index 1fba2a5f281c..01a43ab45e0e 100644
--- a/drivers/ide/qd65xx.h
+++ b/drivers/ide/qd65xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000 Linus Torvalds & authors
*/
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5dc7ea4b6bc4..b2ccce5fb071 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -913,22 +913,29 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
unsigned int cstate;
-
- cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
+ bool uninitialized_var(tick);
+ int cpu = smp_processor_id();
/*
- * NB: if CPUIDLE_FLAG_TLB_FLUSHED is set, this idle transition
- * will probably flush the TLB. It's not guaranteed to flush
- * the TLB, though, so it's not clear that we can do anything
- * useful with this knowledge.
+ * leave_mm() to avoid costly and often unnecessary wakeups
+ * for flushing the user TLB's associated with the active mm.
*/
-
- if (!(lapic_timer_reliable_states & (1 << (cstate))))
- tick_broadcast_enter();
+ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+ leave_mm(cpu);
+
+ if (!static_cpu_has(X86_FEATURE_ARAT)) {
+ cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) &
+ MWAIT_CSTATE_MASK) + 1;
+ tick = false;
+ if (!(lapic_timer_reliable_states & (1 << (cstate)))) {
+ tick = true;
+ tick_broadcast_enter();
+ }
+ }
mwait_idle_with_hints(eax, ecx);
- if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
tick_broadcast_exit();
return index;
@@ -1060,7 +1067,7 @@ static const struct idle_cpu idle_cpu_dnv = {
};
#define ICPU(model, cpu) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
@@ -1124,6 +1131,11 @@ static int __init intel_idle_probe(void)
return -ENODEV;
}
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_debug("Please enable MWAIT in BIOS SETUP\n");
+ return -ENODEV;
+ }
+
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return -ENODEV;
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 93c769cd99bf..b16b2e9ddc40 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the industrial I/O core.
#
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 15de262015df..c6d9517d7611 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -148,6 +148,17 @@ config HID_SENSOR_ACCEL_3D
To compile this driver as a module, choose M here: the
module will be called hid-sensor-accel-3d.
+config IIO_CROS_EC_ACCEL_LEGACY
+ tristate "ChromeOS EC Legacy Accelerometer Sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select CROS_EC_LPC_REGISTER_DEVICE
+ help
+ Say yes here to get support for accelerometers on Chromebook using
+ legacy EC firmware.
+ Sensor data is retrieved through IO memory.
+ Newer devices should use IIO_CROS_EC_SENSORS.
+
config IIO_ST_ACCEL_3AXIS
tristate "STMicroelectronics accelerometers 3-Axis Driver"
depends on (I2C || SPI_MASTER) && SYSFS
@@ -219,8 +230,8 @@ config KXCJK1013
select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build a driver for the Kionix KXCJK-1013
- triaxial acceleration sensor. This driver also supports KXCJ9-1008
- and KXTJ2-1009.
+ triaxial acceleration sensor. This driver also supports KXCJ9-1008,
+ KXTJ2-1009 and KXTF9.
To compile this driver as a module, choose M here: the module will
be called kxcjk-1013.
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 31fba1974e95..368aedb6377a 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O accelerometer drivers
#
@@ -43,6 +44,8 @@ obj-$(CONFIG_SCA3000) += sca3000.o
obj-$(CONFIG_STK8312) += stk8312.o
obj-$(CONFIG_STK8BA50) += stk8ba50.o
+obj-$(CONFIG_IIO_CROS_EC_ACCEL_LEGACY) += cros_ec_accel_legacy.o
+
obj-$(CONFIG_IIO_SSP_SENSORS_COMMONS) += ssp_accel_sensor.o
obj-$(CONFIG_IIO_ST_ACCEL_3AXIS) += st_accel.o
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index 9ccb5828db98..7251d0e63d74 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -95,7 +95,6 @@ static int adxl345_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info adxl345_info = {
- .driver_module = THIS_MODULE,
.read_raw = adxl345_read_raw,
};
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 3dec972ca672..cb9765a3de60 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -536,7 +536,6 @@ static const struct iio_info bma180_info = {
.attrs = &bma180_attrs_group,
.read_raw = bma180_read_raw,
.write_raw = bma180_write_raw,
- .driver_module = THIS_MODULE,
};
static const char * const bma180_power_modes[] = { "low_noise", "low_power" };
@@ -700,7 +699,6 @@ static int bma180_trig_try_reen(struct iio_trigger *trig)
static const struct iio_trigger_ops bma180_trigger_ops = {
.set_trigger_state = bma180_data_rdy_trigger_set_state,
.try_reenable = bma180_trig_try_reen,
- .owner = THIS_MODULE,
};
static int bma180_probe(struct i2c_client *client,
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 5099f295dd37..e25d91c017ed 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -186,7 +186,6 @@ static int bma220_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info bma220_info = {
- .driver_module = THIS_MODULE,
.read_raw = bma220_read_raw,
.write_raw = bma220_write_raw,
.attrs = &bma220_attribute_group,
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 807299dd45eb..870f92ef61c2 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -1094,7 +1094,6 @@ static const struct iio_info bmc150_accel_info = {
.write_event_value = bmc150_accel_write_event,
.write_event_config = bmc150_accel_write_event_config,
.read_event_config = bmc150_accel_read_event_config,
- .driver_module = THIS_MODULE,
};
static const struct iio_info bmc150_accel_info_fifo = {
@@ -1108,7 +1107,6 @@ static const struct iio_info bmc150_accel_info_fifo = {
.validate_trigger = bmc150_accel_validate_trigger,
.hwfifo_set_watermark = bmc150_accel_set_watermark,
.hwfifo_flush_to_buffer = bmc150_accel_fifo_flush,
- .driver_module = THIS_MODULE,
};
static const unsigned long bmc150_accel_scan_masks[] = {
@@ -1200,7 +1198,6 @@ static int bmc150_accel_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
.set_trigger_state = bmc150_accel_trigger_set_state,
.try_reenable = bmc150_accel_trig_try_reen,
- .owner = THIS_MODULE,
};
static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
index 38a8b11f8c19..ae6118ae11b1 100644
--- a/drivers/iio/accel/bmc150-accel.h
+++ b/drivers/iio/accel/bmc150-accel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BMC150_ACCEL_H_
#define _BMC150_ACCEL_H_
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
new file mode 100644
index 000000000000..063e89eff791
--- /dev/null
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -0,0 +1,423 @@
+/*
+ * Driver for older Chrome OS EC accelerometer
+ *
+ * Copyright 2017 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver uses the memory mapper cros-ec interface to communicate
+ * with the Chrome OS EC about accelerometer data.
+ * Accelerometer access is presented through iio sysfs.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "cros-ec-accel-legacy"
+
+/*
+ * Sensor scale hard coded at 10 bits per g, computed as:
+ * g / (2^10 - 1) = 0.009586168; with g = 9.80665 m.s^-2
+ */
+#define ACCEL_LEGACY_NSCALE 9586168
+
+/* Indices for EC sensor values. */
+enum {
+ X,
+ Y,
+ Z,
+ MAX_AXIS,
+};
+
+/* State data for cros_ec_accel_legacy iio driver. */
+struct cros_ec_accel_legacy_state {
+ struct cros_ec_device *ec;
+
+ /*
+ * Array holding data from a single capture. 2 bytes per channel
+ * for the 3 channels plus the timestamp which is always last and
+ * 8-bytes aligned.
+ */
+ s16 capture_data[8];
+ s8 sign[MAX_AXIS];
+ u8 sensor_num;
+};
+
+static int ec_cmd_read_u8(struct cros_ec_device *ec, unsigned int offset,
+ u8 *dest)
+{
+ return ec->cmd_readmem(ec, offset, 1, dest);
+}
+
+static int ec_cmd_read_u16(struct cros_ec_device *ec, unsigned int offset,
+ u16 *dest)
+{
+ __le16 tmp;
+ int ret = ec->cmd_readmem(ec, offset, 2, &tmp);
+
+ *dest = le16_to_cpu(tmp);
+
+ return ret;
+}
+
+/**
+ * read_ec_until_not_busy() - Read from EC status byte until it reads not busy.
+ * @st: Pointer to state information for device.
+ *
+ * This function reads EC status until its busy bit gets cleared. It does not
+ * wait indefinitely and returns -EIO if the EC status is still busy after a
+ * few hundreds milliseconds.
+ *
+ * Return: 8-bit status if ok, -EIO on error
+ */
+static int read_ec_until_not_busy(struct cros_ec_accel_legacy_state *st)
+{
+ struct cros_ec_device *ec = st->ec;
+ u8 status;
+ int attempts = 0;
+
+ ec_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
+ while (status & EC_MEMMAP_ACC_STATUS_BUSY_BIT) {
+ /* Give up after enough attempts, return error. */
+ if (attempts++ >= 50)
+ return -EIO;
+
+ /* Small delay every so often. */
+ if (attempts % 5 == 0)
+ msleep(25);
+
+ ec_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
+ }
+
+ return status;
+}
+
+/**
+ * read_ec_accel_data_unsafe() - Read acceleration data from EC shared memory.
+ * @st: Pointer to state information for device.
+ * @scan_mask: Bitmap of the sensor indices to scan.
+ * @data: Location to store data.
+ *
+ * This is the unsafe function for reading the EC data. It does not guarantee
+ * that the EC will not modify the data as it is being read in.
+ */
+static void read_ec_accel_data_unsafe(struct cros_ec_accel_legacy_state *st,
+ unsigned long scan_mask, s16 *data)
+{
+ int i = 0;
+ int num_enabled = bitmap_weight(&scan_mask, MAX_AXIS);
+
+ /* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
+ while (num_enabled--) {
+ i = find_next_bit(&scan_mask, MAX_AXIS, i);
+ ec_cmd_read_u16(st->ec,
+ EC_MEMMAP_ACC_DATA +
+ sizeof(s16) *
+ (1 + i + st->sensor_num * MAX_AXIS),
+ data);
+ *data *= st->sign[i];
+ i++;
+ data++;
+ }
+}
+
+/**
+ * read_ec_accel_data() - Read acceleration data from EC shared memory.
+ * @st: Pointer to state information for device.
+ * @scan_mask: Bitmap of the sensor indices to scan.
+ * @data: Location to store data.
+ *
+ * This is the safe function for reading the EC data. It guarantees that
+ * the data sampled was not modified by the EC while being read.
+ *
+ * Return: 0 if ok, -ve on error
+ */
+static int read_ec_accel_data(struct cros_ec_accel_legacy_state *st,
+ unsigned long scan_mask, s16 *data)
+{
+ u8 samp_id = 0xff;
+ u8 status = 0;
+ int ret;
+ int attempts = 0;
+
+ /*
+ * Continually read all data from EC until the status byte after
+ * all reads reflects that the EC is not busy and the sample id
+ * matches the sample id from before all reads. This guarantees
+ * that data read in was not modified by the EC while reading.
+ */
+ while ((status & (EC_MEMMAP_ACC_STATUS_BUSY_BIT |
+ EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK)) != samp_id) {
+ /* If we have tried to read too many times, return error. */
+ if (attempts++ >= 5)
+ return -EIO;
+
+ /* Read status byte until EC is not busy. */
+ ret = read_ec_until_not_busy(st);
+ if (ret < 0)
+ return ret;
+ status = ret;
+
+ /*
+ * Store the current sample id so that we can compare to the
+ * sample id after reading the data.
+ */
+ samp_id = status & EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK;
+
+ /* Read all EC data, format it, and store it into data. */
+ read_ec_accel_data_unsafe(st, scan_mask, data);
+
+ /* Read status byte. */
+ ec_cmd_read_u8(st->ec, EC_MEMMAP_ACC_STATUS, &status);
+ }
+
+ return 0;
+}
+
+static int cros_ec_accel_legacy_read(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
+ s16 data = 0;
+ int ret = IIO_VAL_INT;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = read_ec_accel_data(st, (1 << chan->scan_index), &data);
+ if (ret)
+ return ret;
+ *val = data;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = ACCEL_LEGACY_NSCALE;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ /* Calibration not supported. */
+ *val = 0;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cros_ec_accel_legacy_write(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ /*
+ * Do nothing but don't return an error code to allow calibration
+ * script to work.
+ */
+ if (mask == IIO_CHAN_INFO_CALIBBIAS)
+ return 0;
+
+ return -EINVAL;
+}
+
+static const struct iio_info cros_ec_accel_legacy_info = {
+ .read_raw = &cros_ec_accel_legacy_read,
+ .write_raw = &cros_ec_accel_legacy_write,
+};
+
+/**
+ * cros_ec_accel_legacy_capture() - The trigger handler function
+ * @irq: The interrupt number.
+ * @p: Private data - always a pointer to the poll func.
+ *
+ * On a trigger event occurring, if the pollfunc is attached then this
+ * handler is called as a threaded interrupt (and hence may sleep). It
+ * is responsible for grabbing data from the device and pushing it into
+ * the associated buffer.
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t cros_ec_accel_legacy_capture(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
+
+ /* Clear capture data. */
+ memset(st->capture_data, 0, sizeof(st->capture_data));
+
+ /*
+ * Read data based on which channels are enabled in scan mask. Note
+ * that on a capture we are always reading the calibrated data.
+ */
+ read_ec_accel_data(st, *indio_dev->active_scan_mask, st->capture_data);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, (void *)st->capture_data,
+ iio_get_time_ns(indio_dev));
+
+ /*
+ * Tell the core we are done with this trigger and ready for the
+ * next one.
+ */
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static char *cros_ec_accel_legacy_loc_strings[] = {
+ [MOTIONSENSE_LOC_BASE] = "base",
+ [MOTIONSENSE_LOC_LID] = "lid",
+ [MOTIONSENSE_LOC_MAX] = "unknown",
+};
+
+static ssize_t cros_ec_accel_legacy_loc(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%s\n",
+ cros_ec_accel_legacy_loc_strings[st->sensor_num +
+ MOTIONSENSE_LOC_BASE]);
+}
+
+static ssize_t cros_ec_accel_legacy_id(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct cros_ec_accel_legacy_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->sensor_num);
+}
+
+static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
+ {
+ .name = "id",
+ .shared = IIO_SHARED_BY_ALL,
+ .read = cros_ec_accel_legacy_id,
+ },
+ {
+ .name = "location",
+ .shared = IIO_SHARED_BY_ALL,
+ .read = cros_ec_accel_legacy_loc,
+ },
+ { }
+};
+
+#define CROS_EC_ACCEL_LEGACY_CHAN(_axis) \
+ { \
+ .type = IIO_ACCEL, \
+ .channel2 = IIO_MOD_X + (_axis), \
+ .modified = 1, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = cros_ec_accel_legacy_ext_info, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ }, \
+ } \
+
+static struct iio_chan_spec ec_accel_channels[] = {
+ CROS_EC_ACCEL_LEGACY_CHAN(X),
+ CROS_EC_ACCEL_LEGACY_CHAN(Y),
+ CROS_EC_ACCEL_LEGACY_CHAN(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(MAX_AXIS)
+};
+
+static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
+ struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
+ struct iio_dev *indio_dev;
+ struct cros_ec_accel_legacy_state *state;
+ int ret, i;
+
+ if (!ec || !ec->ec_dev) {
+ dev_warn(&pdev->dev, "No EC device found.\n");
+ return -EINVAL;
+ }
+
+ if (!ec->ec_dev->cmd_readmem) {
+ dev_warn(&pdev->dev, "EC does not support direct reads.\n");
+ return -EINVAL;
+ }
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ state = iio_priv(indio_dev);
+ state->ec = ec->ec_dev;
+ state->sensor_num = sensor_platform->sensor_num;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = pdev->name;
+ indio_dev->channels = ec_accel_channels;
+ /*
+ * Present the channel using HTML5 standard:
+ * need to invert X and Y and invert some lid axis.
+ */
+ for (i = X ; i < MAX_AXIS; i++) {
+ switch (i) {
+ case X:
+ ec_accel_channels[X].scan_index = Y;
+ case Y:
+ ec_accel_channels[Y].scan_index = X;
+ case Z:
+ ec_accel_channels[Z].scan_index = Z;
+ }
+ if (state->sensor_num == MOTIONSENSE_LOC_LID && i != Y)
+ state->sign[i] = -1;
+ else
+ state->sign[i] = 1;
+ }
+ indio_dev->num_channels = ARRAY_SIZE(ec_accel_channels);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &cros_ec_accel_legacy_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ cros_ec_accel_legacy_capture,
+ NULL);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct platform_driver cros_ec_accel_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_accel_legacy_probe,
+};
+module_platform_driver(cros_ec_accel_platform_driver);
+
+MODULE_DESCRIPTION("ChromeOS EC legacy accelerometer driver");
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
index ed8343aeac9c..6c214783241c 100644
--- a/drivers/iio/accel/da280.c
+++ b/drivers/iio/accel/da280.c
@@ -88,7 +88,6 @@ static int da280_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info da280_info = {
- .driver_module = THIS_MODULE,
.read_raw = da280_read_raw,
};
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
index c0c1620d2a2f..aa64bca00955 100644
--- a/drivers/iio/accel/da311.c
+++ b/drivers/iio/accel/da311.c
@@ -212,7 +212,6 @@ static int da311_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info da311_info = {
- .driver_module = THIS_MODULE,
.read_raw = da311_read_raw,
};
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
index 656ca8e1927f..d87e2c751475 100644
--- a/drivers/iio/accel/dmard06.c
+++ b/drivers/iio/accel/dmard06.c
@@ -124,7 +124,6 @@ static int dmard06_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info dmard06_info = {
- .driver_module = THIS_MODULE,
.read_raw = dmard06_read_raw,
};
diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c
index d3a28f96565c..16a7e74f5e9a 100644
--- a/drivers/iio/accel/dmard09.c
+++ b/drivers/iio/accel/dmard09.c
@@ -93,7 +93,6 @@ static int dmard09_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info dmard09_info = {
- .driver_module = THIS_MODULE,
.read_raw = dmard09_read_raw,
};
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
index b8736cc75656..9518ea00167e 100644
--- a/drivers/iio/accel/dmard10.c
+++ b/drivers/iio/accel/dmard10.c
@@ -170,7 +170,6 @@ static int dmard10_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info dmard10_info = {
- .driver_module = THIS_MODULE,
.read_raw = dmard10_read_raw,
};
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 2238a26aba63..c066a3bdbff7 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -225,7 +225,6 @@ static int accel_3d_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info accel_3d_info = {
- .driver_module = THIS_MODULE,
.read_raw = &accel_3d_read_raw,
.write_raw = &accel_3d_write_raw,
};
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 3f968c46e667..af53a1084ee5 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -34,6 +34,13 @@
#define KXCJK1013_DRV_NAME "kxcjk1013"
#define KXCJK1013_IRQ_NAME "kxcjk1013_event"
+#define KXTF9_REG_HP_XOUT_L 0x00
+#define KXTF9_REG_HP_XOUT_H 0x01
+#define KXTF9_REG_HP_YOUT_L 0x02
+#define KXTF9_REG_HP_YOUT_H 0x03
+#define KXTF9_REG_HP_ZOUT_L 0x04
+#define KXTF9_REG_HP_ZOUT_H 0x05
+
#define KXCJK1013_REG_XOUT_L 0x06
/*
* From low byte X axis register, all the other addresses of Y and Z can be
@@ -48,17 +55,33 @@
#define KXCJK1013_REG_DCST_RESP 0x0C
#define KXCJK1013_REG_WHO_AM_I 0x0F
-#define KXCJK1013_REG_INT_SRC1 0x16
+#define KXTF9_REG_TILT_POS_CUR 0x10
+#define KXTF9_REG_TILT_POS_PREV 0x11
+#define KXTF9_REG_INT_SRC1 0x15
+#define KXCJK1013_REG_INT_SRC1 0x16 /* compatible, but called INT_SRC2 in KXTF9 ds */
#define KXCJK1013_REG_INT_SRC2 0x17
#define KXCJK1013_REG_STATUS_REG 0x18
#define KXCJK1013_REG_INT_REL 0x1A
#define KXCJK1013_REG_CTRL1 0x1B
-#define KXCJK1013_REG_CTRL2 0x1D
+#define KXTF9_REG_CTRL2 0x1C
+#define KXCJK1013_REG_CTRL2 0x1D /* mostly compatible, CTRL_REG3 in KTXF9 ds */
#define KXCJK1013_REG_INT_CTRL1 0x1E
#define KXCJK1013_REG_INT_CTRL2 0x1F
+#define KXTF9_REG_INT_CTRL3 0x20
#define KXCJK1013_REG_DATA_CTRL 0x21
+#define KXTF9_REG_TILT_TIMER 0x28
#define KXCJK1013_REG_WAKE_TIMER 0x29
+#define KXTF9_REG_TDT_TIMER 0x2B
+#define KXTF9_REG_TDT_THRESH_H 0x2C
+#define KXTF9_REG_TDT_THRESH_L 0x2D
+#define KXTF9_REG_TDT_TAP_TIMER 0x2E
+#define KXTF9_REG_TDT_TOTAL_TIMER 0x2F
+#define KXTF9_REG_TDT_LATENCY_TIMER 0x30
+#define KXTF9_REG_TDT_WINDOW_TIMER 0x31
#define KXCJK1013_REG_SELF_TEST 0x3A
+#define KXTF9_REG_WAKE_THRESH 0x5A
+#define KXTF9_REG_TILT_ANGLE 0x5C
+#define KXTF9_REG_HYST_SET 0x5F
#define KXCJK1013_REG_WAKE_THRES 0x6A
#define KXCJK1013_REG_CTRL1_BIT_PC1 BIT(7)
@@ -67,14 +90,33 @@
#define KXCJK1013_REG_CTRL1_BIT_GSEL1 BIT(4)
#define KXCJK1013_REG_CTRL1_BIT_GSEL0 BIT(3)
#define KXCJK1013_REG_CTRL1_BIT_WUFE BIT(1)
-#define KXCJK1013_REG_INT_REG1_BIT_IEA BIT(4)
-#define KXCJK1013_REG_INT_REG1_BIT_IEN BIT(5)
+
+#define KXCJK1013_REG_INT_CTRL1_BIT_IEU BIT(2) /* KXTF9 */
+#define KXCJK1013_REG_INT_CTRL1_BIT_IEL BIT(3)
+#define KXCJK1013_REG_INT_CTRL1_BIT_IEA BIT(4)
+#define KXCJK1013_REG_INT_CTRL1_BIT_IEN BIT(5)
+
+#define KXTF9_REG_TILT_BIT_LEFT_EDGE BIT(5)
+#define KXTF9_REG_TILT_BIT_RIGHT_EDGE BIT(4)
+#define KXTF9_REG_TILT_BIT_LOWER_EDGE BIT(3)
+#define KXTF9_REG_TILT_BIT_UPPER_EDGE BIT(2)
+#define KXTF9_REG_TILT_BIT_FACE_DOWN BIT(1)
+#define KXTF9_REG_TILT_BIT_FACE_UP BIT(0)
#define KXCJK1013_DATA_MASK_12_BIT 0x0FFF
#define KXCJK1013_MAX_STARTUP_TIME_US 100000
#define KXCJK1013_SLEEP_DELAY_MS 2000
+#define KXCJK1013_REG_INT_SRC1_BIT_TPS BIT(0) /* KXTF9 */
+#define KXCJK1013_REG_INT_SRC1_BIT_WUFS BIT(1)
+#define KXCJK1013_REG_INT_SRC1_MASK_TDTS (BIT(2) | BIT(3)) /* KXTF9 */
+#define KXCJK1013_REG_INT_SRC1_TAP_NONE 0
+#define KXCJK1013_REG_INT_SRC1_TAP_SINGLE BIT(2)
+#define KXCJK1013_REG_INT_SRC1_TAP_DOUBLE BIT(3)
+#define KXCJK1013_REG_INT_SRC1_BIT_DRDY BIT(4)
+
+/* KXCJK: INT_SOURCE2: motion detect, KXTF9: INT_SRC_REG1: tap detect */
#define KXCJK1013_REG_INT_SRC2_BIT_ZP BIT(0)
#define KXCJK1013_REG_INT_SRC2_BIT_ZN BIT(1)
#define KXCJK1013_REG_INT_SRC2_BIT_YP BIT(2)
@@ -88,6 +130,7 @@ enum kx_chipset {
KXCJK1013,
KXCJ91008,
KXTJ21009,
+ KXTF9,
KX_MAX_CHIPS /* this must be last */
};
@@ -128,15 +171,42 @@ enum kxcjk1013_range {
KXCJK1013_RANGE_8G,
};
-static const struct {
+struct kx_odr_map {
int val;
int val2;
int odr_bits;
-} samp_freq_table[] = { {0, 781000, 0x08}, {1, 563000, 0x09},
- {3, 125000, 0x0A}, {6, 250000, 0x0B}, {12, 500000, 0},
- {25, 0, 0x01}, {50, 0, 0x02}, {100, 0, 0x03},
- {200, 0, 0x04}, {400, 0, 0x05}, {800, 0, 0x06},
- {1600, 0, 0x07} };
+ int wuf_bits;
+};
+
+static const struct kx_odr_map samp_freq_table[] = {
+ { 0, 781000, 0x08, 0x00 },
+ { 1, 563000, 0x09, 0x01 },
+ { 3, 125000, 0x0A, 0x02 },
+ { 6, 250000, 0x0B, 0x03 },
+ { 12, 500000, 0x00, 0x04 },
+ { 25, 0, 0x01, 0x05 },
+ { 50, 0, 0x02, 0x06 },
+ { 100, 0, 0x03, 0x06 },
+ { 200, 0, 0x04, 0x06 },
+ { 400, 0, 0x05, 0x06 },
+ { 800, 0, 0x06, 0x06 },
+ { 1600, 0, 0x07, 0x06 },
+};
+
+static const char *const kxcjk1013_samp_freq_avail =
+ "0.781000 1.563000 3.125000 6.250000 12.500000 25 50 100 200 400 800 1600";
+
+static const struct kx_odr_map kxtf9_samp_freq_table[] = {
+ { 25, 0, 0x01, 0x00 },
+ { 50, 0, 0x02, 0x01 },
+ { 100, 0, 0x03, 0x01 },
+ { 200, 0, 0x04, 0x01 },
+ { 400, 0, 0x05, 0x01 },
+ { 800, 0, 0x06, 0x01 },
+};
+
+static const char *const kxtf9_samp_freq_avail =
+ "25 50 100 200 400 800";
/* Refer to section 4 of the specification */
static const struct {
@@ -188,6 +258,15 @@ static const struct {
{0x06, 3000},
{0x07, 2000},
},
+ /* KXTF9 */
+ {
+ {0x01, 81000},
+ {0x02, 41000},
+ {0x03, 21000},
+ {0x04, 11000},
+ {0x05, 5100},
+ {0x06, 2700},
+ },
};
static const struct {
@@ -198,23 +277,6 @@ static const struct {
{19163, 1, 0},
{38326, 0, 1} };
-static const struct {
- int val;
- int val2;
- int odr_bits;
-} wake_odr_data_rate_table[] = { {0, 781000, 0x00},
- {1, 563000, 0x01},
- {3, 125000, 0x02},
- {6, 250000, 0x03},
- {12, 500000, 0x04},
- {25, 0, 0x05},
- {50, 0, 0x06},
- {100, 0, 0x06},
- {200, 0, 0x06},
- {400, 0, 0x06},
- {800, 0, 0x06},
- {1600, 0, 0x06} };
-
static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
enum kxcjk1013_mode mode)
{
@@ -341,9 +403,9 @@ static int kxcjk1013_chip_init(struct kxcjk1013_data *data)
}
if (data->active_high_intr)
- ret |= KXCJK1013_REG_INT_REG1_BIT_IEA;
+ ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEA;
else
- ret &= ~KXCJK1013_REG_INT_REG1_BIT_IEA;
+ ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEA;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
ret);
@@ -401,7 +463,7 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
{
- int ret;
+ int waketh_reg, ret;
ret = i2c_smbus_write_byte_data(data->client,
KXCJK1013_REG_WAKE_TIMER,
@@ -412,8 +474,9 @@ static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
return ret;
}
- ret = i2c_smbus_write_byte_data(data->client,
- KXCJK1013_REG_WAKE_THRES,
+ waketh_reg = data->chipset == KXTF9 ?
+ KXTF9_REG_WAKE_THRESH : KXCJK1013_REG_WAKE_THRES;
+ ret = i2c_smbus_write_byte_data(data->client, waketh_reg,
data->wake_thres);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_wake_thres\n");
@@ -449,9 +512,9 @@ static int kxcjk1013_setup_any_motion_interrupt(struct kxcjk1013_data *data,
}
if (status)
- ret |= KXCJK1013_REG_INT_REG1_BIT_IEN;
+ ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEN;
else
- ret &= ~KXCJK1013_REG_INT_REG1_BIT_IEN;
+ ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEN;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
ret);
@@ -509,9 +572,9 @@ static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
}
if (status)
- ret |= KXCJK1013_REG_INT_REG1_BIT_IEN;
+ ret |= KXCJK1013_REG_INT_CTRL1_BIT_IEN;
else
- ret &= ~KXCJK1013_REG_INT_REG1_BIT_IEN;
+ ret &= ~KXCJK1013_REG_INT_CTRL1_BIT_IEN;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_INT_CTRL1,
ret);
@@ -547,28 +610,30 @@ static int kxcjk1013_setup_new_data_interrupt(struct kxcjk1013_data *data,
return 0;
}
-static int kxcjk1013_convert_freq_to_bit(int val, int val2)
+static const struct kx_odr_map *kxcjk1013_find_odr_value(
+ const struct kx_odr_map *map, size_t map_size, int val, int val2)
{
int i;
- for (i = 0; i < ARRAY_SIZE(samp_freq_table); ++i) {
- if (samp_freq_table[i].val == val &&
- samp_freq_table[i].val2 == val2) {
- return samp_freq_table[i].odr_bits;
- }
+ for (i = 0; i < map_size; ++i) {
+ if (map[i].val == val && map[i].val2 == val2)
+ return &map[i];
}
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
-static int kxcjk1013_convert_wake_odr_to_bit(int val, int val2)
+static int kxcjk1013_convert_odr_value(const struct kx_odr_map *map,
+ size_t map_size, int odr_bits,
+ int *val, int *val2)
{
int i;
- for (i = 0; i < ARRAY_SIZE(wake_odr_data_rate_table); ++i) {
- if (wake_odr_data_rate_table[i].val == val &&
- wake_odr_data_rate_table[i].val2 == val2) {
- return wake_odr_data_rate_table[i].odr_bits;
+ for (i = 0; i < map_size; ++i) {
+ if (map[i].odr_bits == odr_bits) {
+ *val = map[i].val;
+ *val2 = map[i].val2;
+ return IIO_VAL_INT_PLUS_MICRO;
}
}
@@ -578,16 +643,24 @@ static int kxcjk1013_convert_wake_odr_to_bit(int val, int val2)
static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2)
{
int ret;
- int odr_bits;
enum kxcjk1013_mode store_mode;
+ const struct kx_odr_map *odr_setting;
ret = kxcjk1013_get_mode(data, &store_mode);
if (ret < 0)
return ret;
- odr_bits = kxcjk1013_convert_freq_to_bit(val, val2);
- if (odr_bits < 0)
- return odr_bits;
+ if (data->chipset == KXTF9)
+ odr_setting = kxcjk1013_find_odr_value(kxtf9_samp_freq_table,
+ ARRAY_SIZE(kxtf9_samp_freq_table),
+ val, val2);
+ else
+ odr_setting = kxcjk1013_find_odr_value(samp_freq_table,
+ ARRAY_SIZE(samp_freq_table),
+ val, val2);
+
+ if (IS_ERR(odr_setting))
+ return PTR_ERR(odr_setting);
/* To change ODR, the chip must be set to STANDBY as per spec */
ret = kxcjk1013_set_mode(data, STANDBY);
@@ -595,20 +668,16 @@ static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2)
return ret;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_DATA_CTRL,
- odr_bits);
+ odr_setting->odr_bits);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing data_ctrl\n");
return ret;
}
- data->odr_bits = odr_bits;
-
- odr_bits = kxcjk1013_convert_wake_odr_to_bit(val, val2);
- if (odr_bits < 0)
- return odr_bits;
+ data->odr_bits = odr_setting->odr_bits;
ret = i2c_smbus_write_byte_data(data->client, KXCJK1013_REG_CTRL2,
- odr_bits);
+ odr_setting->wuf_bits);
if (ret < 0) {
dev_err(&data->client->dev, "Error writing reg_ctrl2\n");
return ret;
@@ -625,17 +694,14 @@ static int kxcjk1013_set_odr(struct kxcjk1013_data *data, int val, int val2)
static int kxcjk1013_get_odr(struct kxcjk1013_data *data, int *val, int *val2)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(samp_freq_table); ++i) {
- if (samp_freq_table[i].odr_bits == data->odr_bits) {
- *val = samp_freq_table[i].val;
- *val2 = samp_freq_table[i].val2;
- return IIO_VAL_INT_PLUS_MICRO;
- }
- }
-
- return -EINVAL;
+ if (data->chipset == KXTF9)
+ return kxcjk1013_convert_odr_value(kxtf9_samp_freq_table,
+ ARRAY_SIZE(kxtf9_samp_freq_table),
+ data->odr_bits, val, val2);
+ else
+ return kxcjk1013_convert_odr_value(samp_freq_table,
+ ARRAY_SIZE(samp_freq_table),
+ data->odr_bits, val, val2);
}
static int kxcjk1013_get_acc_reg(struct kxcjk1013_data *data, int axis)
@@ -886,13 +952,29 @@ static int kxcjk1013_buffer_postdisable(struct iio_dev *indio_dev)
return kxcjk1013_set_power_state(data, false);
}
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
- "0.781000 1.563000 3.125000 6.250000 12.500000 25 50 100 200 400 800 1600");
+static ssize_t kxcjk1013_get_samp_freq_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+ const char *str;
+
+ if (data->chipset == KXTF9)
+ str = kxtf9_samp_freq_avail;
+ else
+ str = kxcjk1013_samp_freq_avail;
+
+ return sprintf(buf, "%s\n", str);
+}
+
+static IIO_DEVICE_ATTR(in_accel_sampling_frequency_available, S_IRUGO,
+ kxcjk1013_get_samp_freq_avail, NULL, 0);
static IIO_CONST_ATTR(in_accel_scale_available, "0.009582 0.019163 0.038326");
static struct attribute *kxcjk1013_attributes[] = {
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_accel_sampling_frequency_available.dev_attr.attr,
&iio_const_attr_in_accel_scale_available.dev_attr.attr,
NULL,
};
@@ -950,7 +1032,6 @@ static const struct iio_info kxcjk1013_info = {
.write_event_value = kxcjk1013_write_event,
.write_event_config = kxcjk1013_write_event_config,
.read_event_config = kxcjk1013_read_event_config,
- .driver_module = THIS_MODULE,
};
static const unsigned long kxcjk1013_scan_masks[] = {0x7, 0};
@@ -1036,9 +1117,74 @@ static int kxcjk1013_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops kxcjk1013_trigger_ops = {
.set_trigger_state = kxcjk1013_data_rdy_trigger_set_state,
.try_reenable = kxcjk1013_trig_try_reen,
- .owner = THIS_MODULE,
};
+static void kxcjk1013_report_motion_event(struct iio_dev *indio_dev)
+{
+ struct kxcjk1013_data *data = iio_priv(indio_dev);
+
+ int ret = i2c_smbus_read_byte_data(data->client,
+ KXCJK1013_REG_INT_SRC2);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "Error reading reg_int_src2\n");
+ return;
+ }
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_XN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_XP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_X,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_YN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_YP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Y,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZN)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ data->timestamp);
+
+ if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZP)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL,
+ 0,
+ IIO_MOD_Z,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ data->timestamp);
+}
+
static irqreturn_t kxcjk1013_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
@@ -1051,66 +1197,17 @@ static irqreturn_t kxcjk1013_event_handler(int irq, void *private)
goto ack_intr;
}
- if (ret & 0x02) {
- ret = i2c_smbus_read_byte_data(data->client,
- KXCJK1013_REG_INT_SRC2);
- if (ret < 0) {
- dev_err(&data->client->dev,
- "Error reading reg_int_src2\n");
- goto ack_intr;
- }
-
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_XN)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- data->timestamp);
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_XP)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_X,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- data->timestamp);
-
-
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_YN)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- data->timestamp);
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_YP)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Y,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- data->timestamp);
-
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZN)
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL,
- 0,
- IIO_MOD_Z,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- data->timestamp);
- if (ret & KXCJK1013_REG_INT_SRC2_BIT_ZP)
+ if (ret & KXCJK1013_REG_INT_SRC1_BIT_WUFS) {
+ if (data->chipset == KXTF9)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL,
0,
- IIO_MOD_Z,
+ IIO_MOD_X_AND_Y_AND_Z,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
data->timestamp);
+ else
+ kxcjk1013_report_motion_event(indio_dev);
}
ack_intr:
@@ -1403,6 +1500,7 @@ static const struct i2c_device_id kxcjk1013_id[] = {
{"kxcjk1013", KXCJK1013},
{"kxcj91008", KXCJ91008},
{"kxtj21009", KXTJ21009},
+ {"kxtf9", KXTF9},
{"SMO8500", KXCJ91008},
{}
};
diff --git a/drivers/iio/accel/kxsd9-i2c.c b/drivers/iio/accel/kxsd9-i2c.c
index 95e20855d2ef..98fbb628d5bd 100644
--- a/drivers/iio/accel/kxsd9-i2c.c
+++ b/drivers/iio/accel/kxsd9-i2c.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 9af60ac70738..0c0df4fce420 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -390,7 +390,6 @@ static const struct iio_info kxsd9_info = {
.read_raw = &kxsd9_read_raw,
.write_raw = &kxsd9_write_raw,
.attrs = &kxsd9_attribute_group,
- .driver_module = THIS_MODULE,
};
/* Four channels apart from timestamp, scan mask = 0x0f */
diff --git a/drivers/iio/accel/kxsd9.h b/drivers/iio/accel/kxsd9.h
index 7e8a28168310..5e3ca212f5be 100644
--- a/drivers/iio/accel/kxsd9.h
+++ b/drivers/iio/accel/kxsd9.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/device.h>
#include <linux/kernel.h>
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index 4ea2ff623a6d..8b11604eed63 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -107,7 +107,6 @@ static int mc3230_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mc3230_info = {
- .driver_module = THIS_MODULE,
.read_raw = mc3230_read_raw,
};
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index 6551085bedd7..da0ceaac46b5 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -199,7 +199,6 @@ static const struct iio_info mma7455_info = {
.attrs = &mma7455_group,
.read_raw = mma7455_read_raw,
.write_raw = mma7455_write_raw,
- .driver_module = THIS_MODULE,
};
#define MMA7455_CHANNEL(axis, idx) { \
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 42fa57e41bdd..f1a13724efb3 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -168,7 +168,6 @@ static int mma7660_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mma7660_info = {
- .driver_module = THIS_MODULE,
.read_raw = mma7660_read_raw,
.attrs = &mma7660_attribute_group,
};
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index eb6e3dc789b2..bfd4bc806fc2 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -59,7 +59,9 @@
#define MMA8452_FF_MT_THS 0x17
#define MMA8452_FF_MT_THS_MASK 0x7f
#define MMA8452_FF_MT_COUNT 0x18
+#define MMA8452_FF_MT_CHAN_SHIFT 3
#define MMA8452_TRANSIENT_CFG 0x1d
+#define MMA8452_TRANSIENT_CFG_CHAN(chan) BIT(chan + 1)
#define MMA8452_TRANSIENT_CFG_HPF_BYP BIT(0)
#define MMA8452_TRANSIENT_CFG_ELE BIT(4)
#define MMA8452_TRANSIENT_SRC 0x1e
@@ -69,6 +71,7 @@
#define MMA8452_TRANSIENT_THS 0x1f
#define MMA8452_TRANSIENT_THS_MASK GENMASK(6, 0)
#define MMA8452_TRANSIENT_COUNT 0x20
+#define MMA8452_TRANSIENT_CHAN_SHIFT 1
#define MMA8452_CTRL_REG1 0x2a
#define MMA8452_CTRL_ACTIVE BIT(0)
#define MMA8452_CTRL_DR_MASK GENMASK(5, 3)
@@ -107,6 +110,51 @@ struct mma8452_data {
const struct mma_chip_info *chip_info;
};
+ /**
+ * struct mma8452_event_regs - chip specific data related to events
+ * @ev_cfg: event config register address
+ * @ev_cfg_ele: latch bit in event config register
+ * @ev_cfg_chan_shift: number of the bit to enable events in X
+ * direction; in event config register
+ * @ev_src: event source register address
+ * @ev_ths: event threshold register address
+ * @ev_ths_mask: mask for the threshold value
+ * @ev_count: event count (period) register address
+ *
+ * Since not all chips supported by the driver support comparing high pass
+ * filtered data for events (interrupts), different interrupt sources are
+ * used for different chips and the relevant registers are included here.
+ */
+struct mma8452_event_regs {
+ u8 ev_cfg;
+ u8 ev_cfg_ele;
+ u8 ev_cfg_chan_shift;
+ u8 ev_src;
+ u8 ev_ths;
+ u8 ev_ths_mask;
+ u8 ev_count;
+};
+
+static const struct mma8452_event_regs ev_regs_accel_falling = {
+ .ev_cfg = MMA8452_FF_MT_CFG,
+ .ev_cfg_ele = MMA8452_FF_MT_CFG_ELE,
+ .ev_cfg_chan_shift = MMA8452_FF_MT_CHAN_SHIFT,
+ .ev_src = MMA8452_FF_MT_SRC,
+ .ev_ths = MMA8452_FF_MT_THS,
+ .ev_ths_mask = MMA8452_FF_MT_THS_MASK,
+ .ev_count = MMA8452_FF_MT_COUNT
+};
+
+static const struct mma8452_event_regs ev_regs_accel_rising = {
+ .ev_cfg = MMA8452_TRANSIENT_CFG,
+ .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
+ .ev_cfg_chan_shift = MMA8452_TRANSIENT_CHAN_SHIFT,
+ .ev_src = MMA8452_TRANSIENT_SRC,
+ .ev_ths = MMA8452_TRANSIENT_THS,
+ .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
+ .ev_count = MMA8452_TRANSIENT_COUNT,
+};
+
/**
* struct mma_chip_info - chip specific data
* @chip_id: WHO_AM_I register's value
@@ -116,40 +164,16 @@ struct mma8452_data {
* @mma_scales: scale factors for converting register values
* to m/s^2; 3 modes: 2g, 4g, 8g; 2 integers
* per mode: m/s^2 and micro m/s^2
- * @ev_cfg: event config register address
- * @ev_cfg_ele: latch bit in event config register
- * @ev_cfg_chan_shift: number of the bit to enable events in X
- * direction; in event config register
- * @ev_src: event source register address
- * @ev_src_xe: bit in event source register that indicates
- * an event in X direction
- * @ev_src_ye: bit in event source register that indicates
- * an event in Y direction
- * @ev_src_ze: bit in event source register that indicates
- * an event in Z direction
- * @ev_ths: event threshold register address
- * @ev_ths_mask: mask for the threshold value
- * @ev_count: event count (period) register address
- *
- * Since not all chips supported by the driver support comparing high pass
- * filtered data for events (interrupts), different interrupt sources are
- * used for different chips and the relevant registers are included here.
+ * @all_events: all events supported by this chip
+ * @enabled_events: event flags enabled and handled by this driver
*/
struct mma_chip_info {
u8 chip_id;
const struct iio_chan_spec *channels;
int num_channels;
const int mma_scales[3][2];
- u8 ev_cfg;
- u8 ev_cfg_ele;
- u8 ev_cfg_chan_shift;
- u8 ev_src;
- u8 ev_src_xe;
- u8 ev_src_ye;
- u8 ev_src_ze;
- u8 ev_ths;
- u8 ev_ths_mask;
- u8 ev_count;
+ int all_events;
+ int enabled_events;
};
enum {
@@ -394,11 +418,11 @@ static ssize_t mma8452_show_os_ratio_avail(struct device *dev,
}
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mma8452_show_samp_freq_avail);
-static IIO_DEVICE_ATTR(in_accel_scale_available, S_IRUGO,
+static IIO_DEVICE_ATTR(in_accel_scale_available, 0444,
mma8452_show_scale_avail, NULL, 0);
static IIO_DEVICE_ATTR(in_accel_filter_high_pass_3db_frequency_available,
- S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0);
-static IIO_DEVICE_ATTR(in_accel_oversampling_ratio_available, S_IRUGO,
+ 0444, mma8452_show_hp_cutoff_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_accel_oversampling_ratio_available, 0444,
mma8452_show_os_ratio_avail, NULL, 0);
static int mma8452_get_samp_freq_index(struct mma8452_data *data,
@@ -602,9 +626,8 @@ static int mma8452_set_power_mode(struct mma8452_data *data, u8 mode)
static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
{
int val;
- const struct mma_chip_info *chip = data->chip_info;
- val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ val = i2c_smbus_read_byte_data(data->client, MMA8452_FF_MT_CFG);
if (val < 0)
return val;
@@ -614,29 +637,28 @@ static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state)
{
int val;
- const struct mma_chip_info *chip = data->chip_info;
if ((state && mma8452_freefall_mode_enabled(data)) ||
(!state && !(mma8452_freefall_mode_enabled(data))))
return 0;
- val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ val = i2c_smbus_read_byte_data(data->client, MMA8452_FF_MT_CFG);
if (val < 0)
return val;
if (state) {
- val |= BIT(idx_x + chip->ev_cfg_chan_shift);
- val |= BIT(idx_y + chip->ev_cfg_chan_shift);
- val |= BIT(idx_z + chip->ev_cfg_chan_shift);
+ val |= BIT(idx_x + MMA8452_FF_MT_CHAN_SHIFT);
+ val |= BIT(idx_y + MMA8452_FF_MT_CHAN_SHIFT);
+ val |= BIT(idx_z + MMA8452_FF_MT_CHAN_SHIFT);
val &= ~MMA8452_FF_MT_CFG_OAE;
} else {
- val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
- val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
- val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_x + MMA8452_FF_MT_CHAN_SHIFT);
+ val &= ~BIT(idx_y + MMA8452_FF_MT_CHAN_SHIFT);
+ val &= ~BIT(idx_z + MMA8452_FF_MT_CHAN_SHIFT);
val |= MMA8452_FF_MT_CFG_OAE;
}
- return mma8452_change_config(data, chip->ev_cfg, val);
+ return mma8452_change_config(data, MMA8452_FF_MT_CFG, val);
}
static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
@@ -740,7 +762,37 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int mma8452_read_thresh(struct iio_dev *indio_dev,
+static int mma8452_get_event_regs(struct mma8452_data *data,
+ const struct iio_chan_spec *chan, enum iio_event_direction dir,
+ const struct mma8452_event_regs **ev_reg)
+{
+ if (!chan)
+ return -EINVAL;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ if ((data->chip_info->all_events
+ & MMA8452_INT_TRANS) &&
+ (data->chip_info->enabled_events
+ & MMA8452_INT_TRANS))
+ *ev_reg = &ev_regs_accel_rising;
+ else
+ *ev_reg = &ev_regs_accel_falling;
+ return 0;
+ case IIO_EV_DIR_FALLING:
+ *ev_reg = &ev_regs_accel_falling;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mma8452_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
@@ -749,21 +801,24 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
{
struct mma8452_data *data = iio_priv(indio_dev);
int ret, us, power_mode;
+ const struct mma8452_event_regs *ev_regs;
+
+ ret = mma8452_get_event_regs(data, chan, dir, &ev_regs);
+ if (ret)
+ return ret;
switch (info) {
case IIO_EV_INFO_VALUE:
- ret = i2c_smbus_read_byte_data(data->client,
- data->chip_info->ev_ths);
+ ret = i2c_smbus_read_byte_data(data->client, ev_regs->ev_ths);
if (ret < 0)
return ret;
- *val = ret & data->chip_info->ev_ths_mask;
+ *val = ret & ev_regs->ev_ths_mask;
return IIO_VAL_INT;
case IIO_EV_INFO_PERIOD:
- ret = i2c_smbus_read_byte_data(data->client,
- data->chip_info->ev_count);
+ ret = i2c_smbus_read_byte_data(data->client, ev_regs->ev_count);
if (ret < 0)
return ret;
@@ -800,7 +855,7 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
}
}
-static int mma8452_write_thresh(struct iio_dev *indio_dev,
+static int mma8452_write_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
@@ -809,14 +864,18 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev,
{
struct mma8452_data *data = iio_priv(indio_dev);
int ret, reg, steps;
+ const struct mma8452_event_regs *ev_regs;
+
+ ret = mma8452_get_event_regs(data, chan, dir, &ev_regs);
+ if (ret)
+ return ret;
switch (info) {
case IIO_EV_INFO_VALUE:
- if (val < 0 || val > MMA8452_TRANSIENT_THS_MASK)
+ if (val < 0 || val > ev_regs->ev_ths_mask)
return -EINVAL;
- return mma8452_change_config(data, data->chip_info->ev_ths,
- val);
+ return mma8452_change_config(data, ev_regs->ev_ths, val);
case IIO_EV_INFO_PERIOD:
ret = mma8452_get_power_mode(data);
@@ -830,8 +889,7 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev,
if (steps < 0 || steps > 0xff)
return -EINVAL;
- return mma8452_change_config(data, data->chip_info->ev_count,
- steps);
+ return mma8452_change_config(data, ev_regs->ev_count, steps);
case IIO_EV_INFO_HIGH_PASS_FILTER_3DB:
reg = i2c_smbus_read_byte_data(data->client,
@@ -861,23 +919,24 @@ static int mma8452_read_event_config(struct iio_dev *indio_dev,
enum iio_event_direction dir)
{
struct mma8452_data *data = iio_priv(indio_dev);
- const struct mma_chip_info *chip = data->chip_info;
int ret;
+ const struct mma8452_event_regs *ev_regs;
+
+ ret = mma8452_get_event_regs(data, chan, dir, &ev_regs);
+ if (ret)
+ return ret;
switch (dir) {
case IIO_EV_DIR_FALLING:
return mma8452_freefall_mode_enabled(data);
case IIO_EV_DIR_RISING:
- if (mma8452_freefall_mode_enabled(data))
- return 0;
-
ret = i2c_smbus_read_byte_data(data->client,
- data->chip_info->ev_cfg);
+ ev_regs->ev_cfg);
if (ret < 0)
return ret;
return !!(ret & BIT(chan->scan_index +
- chip->ev_cfg_chan_shift));
+ ev_regs->ev_cfg_chan_shift));
default:
return -EINVAL;
}
@@ -890,8 +949,12 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
int state)
{
struct mma8452_data *data = iio_priv(indio_dev);
- const struct mma_chip_info *chip = data->chip_info;
int val, ret;
+ const struct mma8452_event_regs *ev_regs;
+
+ ret = mma8452_get_event_regs(data, chan, dir, &ev_regs);
+ if (ret)
+ return ret;
ret = mma8452_set_runtime_pm_state(data->client, state);
if (ret)
@@ -901,28 +964,30 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
case IIO_EV_DIR_FALLING:
return mma8452_set_freefall_mode(data, state);
case IIO_EV_DIR_RISING:
- val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ val = i2c_smbus_read_byte_data(data->client, ev_regs->ev_cfg);
if (val < 0)
return val;
if (state) {
if (mma8452_freefall_mode_enabled(data)) {
- val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
- val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
- val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_x + ev_regs->ev_cfg_chan_shift);
+ val &= ~BIT(idx_y + ev_regs->ev_cfg_chan_shift);
+ val &= ~BIT(idx_z + ev_regs->ev_cfg_chan_shift);
val |= MMA8452_FF_MT_CFG_OAE;
}
- val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ val |= BIT(chan->scan_index +
+ ev_regs->ev_cfg_chan_shift);
} else {
if (mma8452_freefall_mode_enabled(data))
return 0;
- val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ val &= ~BIT(chan->scan_index +
+ ev_regs->ev_cfg_chan_shift);
}
- val |= chip->ev_cfg_ele;
+ val |= ev_regs->ev_cfg_ele;
- return mma8452_change_config(data, chip->ev_cfg, val);
+ return mma8452_change_config(data, ev_regs->ev_cfg, val);
default:
return -EINVAL;
}
@@ -934,35 +999,25 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
s64 ts = iio_get_time_ns(indio_dev);
int src;
- src = i2c_smbus_read_byte_data(data->client, data->chip_info->ev_src);
+ src = i2c_smbus_read_byte_data(data->client, MMA8452_TRANSIENT_SRC);
if (src < 0)
return;
- if (mma8452_freefall_mode_enabled(data)) {
- iio_push_event(indio_dev,
- IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
- IIO_MOD_X_AND_Y_AND_Z,
- IIO_EV_TYPE_MAG,
- IIO_EV_DIR_FALLING),
- ts);
- return;
- }
-
- if (src & data->chip_info->ev_src_xe)
+ if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
- if (src & data->chip_info->ev_src_ye)
+ if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
IIO_EV_TYPE_MAG,
IIO_EV_DIR_RISING),
ts);
- if (src & data->chip_info->ev_src_ze)
+ if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
IIO_EV_TYPE_MAG,
@@ -974,7 +1029,6 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
{
struct iio_dev *indio_dev = p;
struct mma8452_data *data = iio_priv(indio_dev);
- const struct mma_chip_info *chip = data->chip_info;
int ret = IRQ_NONE;
int src;
@@ -982,15 +1036,29 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
if (src < 0)
return IRQ_NONE;
+ if (!(src & data->chip_info->enabled_events))
+ return IRQ_NONE;
+
if (src & MMA8452_INT_DRDY) {
iio_trigger_poll_chained(indio_dev->trig);
ret = IRQ_HANDLED;
}
- if ((src & MMA8452_INT_TRANS &&
- chip->ev_src == MMA8452_TRANSIENT_SRC) ||
- (src & MMA8452_INT_FF_MT &&
- chip->ev_src == MMA8452_FF_MT_SRC)) {
+ if (src & MMA8452_INT_FF_MT) {
+ if (mma8452_freefall_mode_enabled(data)) {
+ s64 ts = iio_get_time_ns(indio_dev);
+
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ ts);
+ }
+ ret = IRQ_HANDLED;
+ }
+
+ if (src & MMA8452_INT_TRANS) {
mma8452_transient_interrupt(indio_dev);
ret = IRQ_HANDLED;
}
@@ -1020,8 +1088,8 @@ done:
}
static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
- unsigned reg, unsigned writeval,
- unsigned *readval)
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
{
int ret;
struct mma8452_data *data = iio_priv(indio_dev);
@@ -1222,96 +1290,87 @@ static const struct mma_chip_info mma_chip_info_table[] = {
* g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
*/
.mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
- .ev_cfg = MMA8452_TRANSIENT_CFG,
- .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
- .ev_cfg_chan_shift = 1,
- .ev_src = MMA8452_TRANSIENT_SRC,
- .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
- .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
- .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
- .ev_ths = MMA8452_TRANSIENT_THS,
- .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
- .ev_count = MMA8452_TRANSIENT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
},
[mma8452] = {
.chip_id = MMA8452_DEVICE_ID,
.channels = mma8452_channels,
.num_channels = ARRAY_SIZE(mma8452_channels),
.mma_scales = { {0, 9577}, {0, 19154}, {0, 38307} },
- .ev_cfg = MMA8452_TRANSIENT_CFG,
- .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
- .ev_cfg_chan_shift = 1,
- .ev_src = MMA8452_TRANSIENT_SRC,
- .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
- .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
- .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
- .ev_ths = MMA8452_TRANSIENT_THS,
- .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
- .ev_count = MMA8452_TRANSIENT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
},
[mma8453] = {
.chip_id = MMA8453_DEVICE_ID,
.channels = mma8453_channels,
.num_channels = ARRAY_SIZE(mma8453_channels),
.mma_scales = { {0, 38307}, {0, 76614}, {0, 153228} },
- .ev_cfg = MMA8452_TRANSIENT_CFG,
- .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
- .ev_cfg_chan_shift = 1,
- .ev_src = MMA8452_TRANSIENT_SRC,
- .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
- .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
- .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
- .ev_ths = MMA8452_TRANSIENT_THS,
- .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
- .ev_count = MMA8452_TRANSIENT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
},
[mma8652] = {
.chip_id = MMA8652_DEVICE_ID,
.channels = mma8652_channels,
.num_channels = ARRAY_SIZE(mma8652_channels),
.mma_scales = { {0, 9577}, {0, 19154}, {0, 38307} },
- .ev_cfg = MMA8452_FF_MT_CFG,
- .ev_cfg_ele = MMA8452_FF_MT_CFG_ELE,
- .ev_cfg_chan_shift = 3,
- .ev_src = MMA8452_FF_MT_SRC,
- .ev_src_xe = MMA8452_FF_MT_SRC_XHE,
- .ev_src_ye = MMA8452_FF_MT_SRC_YHE,
- .ev_src_ze = MMA8452_FF_MT_SRC_ZHE,
- .ev_ths = MMA8452_FF_MT_THS,
- .ev_ths_mask = MMA8452_FF_MT_THS_MASK,
- .ev_count = MMA8452_FF_MT_COUNT,
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_FF_MT,
},
[mma8653] = {
.chip_id = MMA8653_DEVICE_ID,
.channels = mma8653_channels,
.num_channels = ARRAY_SIZE(mma8653_channels),
.mma_scales = { {0, 38307}, {0, 76614}, {0, 153228} },
- .ev_cfg = MMA8452_FF_MT_CFG,
- .ev_cfg_ele = MMA8452_FF_MT_CFG_ELE,
- .ev_cfg_chan_shift = 3,
- .ev_src = MMA8452_FF_MT_SRC,
- .ev_src_xe = MMA8452_FF_MT_SRC_XHE,
- .ev_src_ye = MMA8452_FF_MT_SRC_YHE,
- .ev_src_ze = MMA8452_FF_MT_SRC_ZHE,
- .ev_ths = MMA8452_FF_MT_THS,
- .ev_ths_mask = MMA8452_FF_MT_THS_MASK,
- .ev_count = MMA8452_FF_MT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_FF_MT,
},
[fxls8471] = {
.chip_id = FXLS8471_DEVICE_ID,
.channels = mma8451_channels,
.num_channels = ARRAY_SIZE(mma8451_channels),
.mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
- .ev_cfg = MMA8452_TRANSIENT_CFG,
- .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
- .ev_cfg_chan_shift = 1,
- .ev_src = MMA8452_TRANSIENT_SRC,
- .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
- .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
- .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
- .ev_ths = MMA8452_TRANSIENT_THS,
- .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
- .ev_count = MMA8452_TRANSIENT_COUNT,
+ /*
+ * Although we enable the interrupt sources once and for
+ * all here the event detection itself is not enabled until
+ * userspace asks for it by mma8452_write_event_config()
+ */
+ .all_events = MMA8452_INT_DRDY |
+ MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
+ .enabled_events = MMA8452_INT_TRANS |
+ MMA8452_INT_FF_MT,
},
};
@@ -1332,12 +1391,11 @@ static const struct iio_info mma8452_info = {
.read_raw = &mma8452_read_raw,
.write_raw = &mma8452_write_raw,
.event_attrs = &mma8452_event_attribute_group,
- .read_event_value = &mma8452_read_thresh,
- .write_event_value = &mma8452_write_thresh,
+ .read_event_value = &mma8452_read_event_value,
+ .write_event_value = &mma8452_write_event_value,
.read_event_config = &mma8452_read_event_config,
.write_event_config = &mma8452_write_event_config,
.debugfs_reg_access = &mma8452_reg_access_dbg,
- .driver_module = THIS_MODULE,
};
static const unsigned long mma8452_scan_masks[] = {0x7, 0};
@@ -1368,7 +1426,6 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops mma8452_trigger_ops = {
.set_trigger_state = mma8452_data_rdy_trigger_set_state,
.validate_device = iio_trigger_validate_own_device,
- .owner = THIS_MODULE,
};
static int mma8452_trigger_setup(struct iio_dev *indio_dev)
@@ -1509,16 +1566,6 @@ static int mma8452_probe(struct i2c_client *client,
return ret;
if (client->irq) {
- /*
- * Although we enable the interrupt sources once and for
- * all here the event detection itself is not enabled until
- * userspace asks for it by mma8452_write_event_config()
- */
- int supported_interrupts = MMA8452_INT_DRDY |
- MMA8452_INT_TRANS |
- MMA8452_INT_FF_MT;
- int enabled_interrupts = MMA8452_INT_TRANS |
- MMA8452_INT_FF_MT;
int irq2;
irq2 = of_irq_get_byname(client->dev.of_node, "INT2");
@@ -1527,8 +1574,8 @@ static int mma8452_probe(struct i2c_client *client,
dev_dbg(&client->dev, "using interrupt line INT2\n");
} else {
ret = i2c_smbus_write_byte_data(client,
- MMA8452_CTRL_REG5,
- supported_interrupts);
+ MMA8452_CTRL_REG5,
+ data->chip_info->all_events);
if (ret < 0)
return ret;
@@ -1536,8 +1583,8 @@ static int mma8452_probe(struct i2c_client *client,
}
ret = i2c_smbus_write_byte_data(client,
- MMA8452_CTRL_REG4,
- enabled_interrupts);
+ MMA8452_CTRL_REG4,
+ data->chip_info->enabled_events);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 1f53f08476f5..da7c21504f38 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -332,7 +332,6 @@ static const struct iio_chan_spec mma9551_channels[] = {
};
static const struct iio_info mma9551_info = {
- .driver_module = THIS_MODULE,
.read_raw = mma9551_read_raw,
.read_event_config = mma9551_read_event_config,
.write_event_config = mma9551_write_event_config,
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 36bf19733be0..b52a3f182190 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -987,7 +987,6 @@ static const struct iio_chan_spec mma9553_channels[] = {
};
static const struct iio_info mma9553_info = {
- .driver_module = THIS_MODULE,
.read_raw = mma9553_read_raw,
.write_raw = mma9553_write_raw,
.read_event_config = mma9553_read_event_config,
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index c23f47af7256..58099e40d717 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -264,7 +264,6 @@ static int mxc4005_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mxc4005_info = {
- .driver_module = THIS_MODULE,
.read_raw = mxc4005_read_raw,
.write_raw = mxc4005_write_raw,
.attrs = &mxc4005_attrs_group,
@@ -376,7 +375,6 @@ static int mxc4005_trigger_try_reen(struct iio_trigger *trig)
static const struct iio_trigger_ops mxc4005_trigger_ops = {
.set_trigger_state = mxc4005_set_trigger_state,
.try_reenable = mxc4005_trigger_try_reen,
- .owner = THIS_MODULE,
};
static int mxc4005_chip_init(struct mxc4005_data *data)
diff --git a/drivers/iio/accel/mxc6255.c b/drivers/iio/accel/mxc6255.c
index 0abad6948201..ddd50d1781c5 100644
--- a/drivers/iio/accel/mxc6255.c
+++ b/drivers/iio/accel/mxc6255.c
@@ -78,7 +78,6 @@ static int mxc6255_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mxc6255_info = {
- .driver_module = THIS_MODULE,
.read_raw = mxc6255_read_raw,
};
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index 39ab210c44f6..f33dadf7b262 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -1454,7 +1454,6 @@ static const struct iio_info sca3000_info = {
.write_event_value = &sca3000_write_event_value,
.read_event_config = &sca3000_read_event_config,
.write_event_config = &sca3000_write_event_config,
- .driver_module = THIS_MODULE,
};
static int sca3000_probe(struct spi_device *spi)
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 0fe521609a3a..2f931e4837e5 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -32,6 +32,8 @@ enum st_accel_type {
H3LIS331DL,
LIS331DL,
LIS3LV02DL,
+ LIS2DW12,
+ LIS3DHH,
ST_ACCEL_MAX,
};
@@ -52,6 +54,8 @@ enum st_accel_type {
#define LIS2DH12_ACCEL_DEV_NAME "lis2dh12_accel"
#define LIS3L02DQ_ACCEL_DEV_NAME "lis3l02dq"
#define LNG2DM_ACCEL_DEV_NAME "lng2dm"
+#define LIS2DW12_ACCEL_DEV_NAME "lis2dw12"
+#define LIS3DHH_ACCEL_DEV_NAME "lis3dhh"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 752856b3a849..460aa58e0159 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -159,12 +159,16 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x10,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ },
.addr_ihl = 0x25,
.mask_ihl = 0x02,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x23,
@@ -229,14 +233,24 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x02,
- .mask_int2 = 0x10,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x02,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
- .addr_od = 0x22,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x23,
@@ -313,12 +327,16 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x08,
},
.drdy_irq = {
- .addr = 0x23,
- .mask_int1 = 0x80,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x23,
+ .mask = 0x80,
+ },
.addr_ihl = 0x23,
.mask_ihl = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
.ig1 = {
.en_addr = 0x23,
.en_mask = 0x08,
@@ -387,9 +405,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x01,
},
.drdy_irq = {
- .addr = 0x21,
- .mask_int1 = 0x04,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .int1 = {
+ .addr = 0x21,
+ .mask = 0x04,
+ },
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x21,
@@ -444,14 +467,24 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x04,
- .mask_int2 = 0x20,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x04,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x20,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
- .addr_od = 0x22,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x21,
@@ -513,9 +546,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x02,
- .mask_int2 = 0x10,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x02,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
},
@@ -567,9 +605,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.bdu = {
},
.drdy_irq = {
- .addr = 0x21,
- .mask_int1 = 0x04,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .int1 = {
+ .addr = 0x21,
+ .mask = 0x04,
+ },
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x21,
@@ -635,12 +678,16 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
},
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x10,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ },
.addr_ihl = 0x25,
.mask_ihl = 0x02,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
},
.sim = {
.addr = 0x23,
@@ -649,6 +696,139 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = true,
.bootime = 2,
},
+ {
+ .wai = 0x44,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS2DW12_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01, },
+ { .hz = 12, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .addr = 0x25,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(976),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(1952),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(3904),
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(7808),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x21,
+ .mask = 0x08,
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x23,
+ .mask = 0x01,
+ .addr_od = 0x22,
+ .mask_od = 0x20,
+ },
+ .int2 = {
+ .addr = 0x24,
+ .mask = 0x01,
+ .addr_od = 0x22,
+ .mask_od = 0x20,
+ },
+ .addr_ihl = 0x22,
+ .mask_ihl = 0x08,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x01,
+ },
+ },
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(0),
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
+ {
+ .wai = 0x11,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS3DHH_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_16bit_channels,
+ .odr = {
+ /* just ODR = 1100Hz available */
+ .odr_avl = {
+ { .hz = 1100, .value = 0x00, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0x80,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .fs = {
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .gain = IIO_G_TO_M_S_2(76),
+ },
+ },
+ },
+ .bdu = {
+ .addr = 0x20,
+ .mask = 0x01,
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x21,
+ .mask = 0x80,
+ .addr_od = 0x23,
+ .mask_od = 0x04,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x80,
+ .addr_od = 0x23,
+ .mask_od = 0x08,
+ },
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .multi_read_bit = false,
+ .bootime = 2,
+ },
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -721,7 +901,6 @@ static const struct attribute_group st_accel_attribute_group = {
};
static const struct iio_info accel_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_accel_attribute_group,
.read_raw = &st_accel_read_raw,
.write_raw = &st_accel_write_raw,
@@ -730,7 +909,6 @@ static const struct iio_info accel_info = {
#ifdef CONFIG_IIO_TRIGGER
static const struct iio_trigger_ops st_accel_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
.validate_device = st_sensors_validate_device,
};
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 18cafb9f2468..363429b5686c 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -94,6 +94,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lng2dm-accel",
.data = LNG2DM_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis2dw12",
+ .data = LIS2DW12_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -129,6 +133,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ H3LIS331DL_ACCEL_DEV_NAME, H3LIS331DL },
{ LIS331DL_ACCEL_DEV_NAME, LIS331DL },
{ LIS3LV02DL_ACCEL_DEV_NAME, LIS3LV02DL },
+ { LIS2DW12_ACCEL_DEV_NAME, LIS2DW12 },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 915fa49085f7..dcc9bd243a52 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -82,6 +82,14 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis331dl-accel",
.data = LIS331DL_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis2dw12",
+ .data = LIS2DW12_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis3dhh",
+ .data = LIS3DHH_ACCEL_DEV_NAME,
+ },
{}
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -133,6 +141,8 @@ static const struct spi_device_id st_accel_id_table[] = {
{ H3LIS331DL_ACCEL_DEV_NAME },
{ LIS331DL_ACCEL_DEV_NAME },
{ LIS3LV02DL_ACCEL_DEV_NAME },
+ { LIS2DW12_ACCEL_DEV_NAME },
+ { LIS3DHH_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index e31023dc5f1b..cacc0da2f874 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -237,7 +237,6 @@ static int stk8312_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops stk8312_trigger_ops = {
.set_trigger_state = stk8312_data_rdy_trigger_set_state,
- .owner = THIS_MODULE,
};
static int stk8312_set_sample_rate(struct stk8312_data *data, u8 rate)
@@ -421,7 +420,6 @@ static int stk8312_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info stk8312_info = {
- .driver_module = THIS_MODULE,
.read_raw = stk8312_read_raw,
.write_raw = stk8312_write_raw,
.attrs = &stk8312_attribute_group,
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 300d955bad00..576b6b140f08 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -179,7 +179,6 @@ static int stk8ba50_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops stk8ba50_trigger_ops = {
.set_trigger_state = stk8ba50_data_rdy_trigger_set_state,
- .owner = THIS_MODULE,
};
static int stk8ba50_set_power(struct stk8ba50_data *data, bool mode)
@@ -307,7 +306,6 @@ static int stk8ba50_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info stk8ba50_info = {
- .driver_module = THIS_MODULE,
.read_raw = stk8ba50_read_raw,
.write_raw = stk8ba50_write_raw,
.attrs = &stk8ba50_attribute_group,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 1d13bf03c758..ef86296b8b0d 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -477,12 +477,13 @@ config MAX9611
called max9611.
config MCP320X
- tristate "Microchip Technology MCP3x01/02/04/08"
+ tristate "Microchip Technology MCP3x01/02/04/08 and MCP3550/1/3"
depends on SPI
help
Say yes here to build support for Microchip Technology's
MCP3001, MCP3002, MCP3004, MCP3008, MCP3201, MCP3202, MCP3204,
- MCP3208 or MCP3301 analog to digital converter.
+ MCP3208, MCP3301, MCP3550, MCP3551 and MCP3553 analog to digital
+ converters.
This driver can also be built as a module. If so, the module will be
called mcp320x.
@@ -595,7 +596,7 @@ config QCOM_SPMI_VADC
config RCAR_GYRO_ADC
tristate "Renesas R-Car GyroADC driver"
- depends on ARCH_RCAR_GEN2 || (ARM && COMPILE_TEST)
+ depends on ARCH_RCAR_GEN2 || COMPILE_TEST
help
Say yes here to build support for the GyroADC found in Renesas
R-Car Gen2 SoCs. This block is a simple SPI offload engine for
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 9874e05f52d7..9572c1090f35 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for IIO ADC drivers
#
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index b8d5cfd57ec4..605eb5e7e829 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -280,7 +280,6 @@ static AD7266_DECLARE_DIFF_CHANNELS_FIXED(u, 'u');
static const struct iio_info ad7266_info = {
.read_raw = &ad7266_read_raw,
.update_scan_mode = &ad7266_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const unsigned long ad7266_available_scan_masks[] = {
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index 1d90b02732bb..a862b5d8fb4b 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -461,7 +461,6 @@ static const struct iio_info ad7291_info = {
.write_event_config = &ad7291_write_event_config,
.read_event_value = &ad7291_read_event_value,
.write_event_value = &ad7291_write_event_value,
- .driver_module = THIS_MODULE,
};
static int ad7291_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index e399bf04c73a..2b20c6c8ec7f 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -280,7 +280,6 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad7298_info = {
.read_raw = &ad7298_read_raw,
.update_scan_mode = ad7298_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int ad7298_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index b7ecf9aab90f..b7706bf10ffe 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -195,7 +195,6 @@ static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
};
static const struct iio_info ad7476_info = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7476_read_raw,
};
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
index ce45037295d8..3ae14fc8c649 100644
--- a/drivers/iio/adc/ad7766.c
+++ b/drivers/iio/adc/ad7766.c
@@ -185,7 +185,6 @@ static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = {
};
static const struct iio_info ad7766_info = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7766_read_raw,
};
@@ -208,7 +207,6 @@ static int ad7766_set_trigger_state(struct iio_trigger *trig, bool enable)
}
static const struct iio_trigger_ops ad7766_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ad7766_set_trigger_state,
.validate_device = iio_trigger_validate_own_device,
};
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 34e353c43ac8..70fbf92f9827 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -308,13 +308,11 @@ static const struct iio_info ad7791_info = {
.read_raw = &ad7791_read_raw,
.attrs = &ad7791_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ad7791_no_filter_info = {
.read_raw = &ad7791_read_raw,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static int ad7791_setup(struct ad7791_state *st,
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 47c3d7f32900..801afb61310b 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -563,7 +563,6 @@ static const struct iio_info ad7793_info = {
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
.attrs = &ad7793_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ad7797_info = {
@@ -572,7 +571,6 @@ static const struct iio_info ad7797_info = {
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
.attrs = &ad7793_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
#define DECLARE_AD7793_CHANNELS(_name, _b, _sb, _s) \
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 7a483bfbd70c..205c0f1761aa 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -229,7 +229,6 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
static const struct iio_info ad7887_info = {
.read_raw = &ad7887_read_raw,
- .driver_module = THIS_MODULE,
};
static int ad7887_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 77a675e11ebb..ffb7e089969c 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -262,7 +262,6 @@ static int ad7923_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad7923_info = {
.read_raw = &ad7923_read_raw,
.update_scan_mode = ad7923_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int ad7923_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 22426ae4af97..e1da67d5ee22 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -526,13 +526,11 @@ static const struct attribute_group ad799x_event_attrs_group = {
static const struct iio_info ad7991_info = {
.read_raw = &ad799x_read_raw,
- .driver_module = THIS_MODULE,
.update_scan_mode = ad799x_update_scan_mode,
};
static const struct iio_info ad7993_4_7_8_noirq_info = {
.read_raw = &ad799x_read_raw,
- .driver_module = THIS_MODULE,
.update_scan_mode = ad799x_update_scan_mode,
};
@@ -543,7 +541,6 @@ static const struct iio_info ad7993_4_7_8_irq_info = {
.write_event_config = &ad799x_write_event_config,
.read_event_value = &ad799x_read_event_value,
.write_event_value = &ad799x_write_event_value,
- .driver_module = THIS_MODULE,
.update_scan_mode = ad799x_update_scan_mode,
};
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 22c4c17cd996..cf1b048b0665 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -463,7 +463,6 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig)
EXPORT_SYMBOL_GPL(ad_sd_validate_trigger);
static const struct iio_trigger_ops ad_sd_trigger_ops = {
- .owner = THIS_MODULE,
};
static int ad_sd_probe_trigger(struct iio_dev *indio_dev)
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index c02b23d675cb..8a958d5f1905 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -165,7 +165,6 @@ static int aspeed_adc_reg_access(struct iio_dev *indio_dev,
}
static const struct iio_info aspeed_adc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = aspeed_adc_read_raw,
.write_raw = aspeed_adc_write_raw,
.debugfs_reg_access = aspeed_adc_reg_access,
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index a70ef7fec95f..755a493c2a2c 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -348,7 +348,6 @@ static int at91_adc_reenable_trigger(struct iio_trigger *trig)
}
static const struct iio_trigger_ops at91_adc_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &at91_adc_configure_trigger,
.try_reenable = &at91_adc_reenable_trigger,
};
@@ -584,7 +583,6 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
static const struct iio_info at91_adc_info = {
.read_raw = &at91_adc_read_raw,
.write_raw = &at91_adc_write_raw,
- .driver_module = THIS_MODULE,
};
static void at91_adc_hw_init(struct at91_adc_state *st)
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 15109728cae7..3836d4222a3e 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -594,7 +594,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops at91_adc_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &at91_adc_configure_trigger,
};
@@ -976,7 +975,6 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st,
}
static const struct iio_info at91_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = &at91_adc_read_raw,
};
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index 11e177180ea0..a30a97245e91 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -464,12 +464,10 @@ static int axp20x_write_raw(struct iio_dev *indio_dev,
static const struct iio_info axp20x_adc_iio_info = {
.read_raw = axp20x_read_raw,
.write_raw = axp20x_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_info axp22x_adc_iio_info = {
.read_raw = axp22x_read_raw,
- .driver_module = THIS_MODULE,
};
static int axp20x_adc_rate(int rate)
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 462a99c13e7a..60c9e853dd81 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -183,7 +183,6 @@ static int axp288_adc_set_state(struct regmap *regmap)
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
- .driver_module = THIS_MODULE,
};
static int axp288_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 7f4f9c4150e3..7af59a4bbd8d 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -492,7 +492,6 @@ static int iproc_adc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info iproc_adc_iio_info = {
.read_raw = &iproc_adc_read_raw,
- .driver_module = THIS_MODULE,
};
#define IPROC_ADC_CHANNEL(_index, _id) { \
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 71c806ecc722..72d8fa94ab31 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -277,7 +277,6 @@ static irqreturn_t berlin2_adc_tsen_irq(int irq, void *private)
}
static const struct iio_info berlin2_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = berlin2_adc_read_raw,
};
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index 91636c0ba5b5..707d8b24b072 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -262,7 +262,6 @@ static int cc10001_update_scan_mode(struct iio_dev *indio_dev,
}
static const struct iio_info cc10001_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = &cc10001_adc_read_raw,
.update_scan_mode = &cc10001_update_scan_mode,
};
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index 6e419d5a7c14..3576ec73ec23 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -932,7 +932,6 @@ err_unlock:
static const struct iio_info cpcap_adc_info = {
.read_raw = &cpcap_adc_read,
- .driver_module = THIS_MODULE,
};
/*
diff --git a/drivers/iio/adc/da9150-gpadc.c b/drivers/iio/adc/da9150-gpadc.c
index 3445107e10b7..0a5d9ce79164 100644
--- a/drivers/iio/adc/da9150-gpadc.c
+++ b/drivers/iio/adc/da9150-gpadc.c
@@ -249,7 +249,6 @@ static int da9150_gpadc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info da9150_gpadc_info = {
.read_raw = &da9150_gpadc_read_raw,
- .driver_module = THIS_MODULE,
};
#define DA9150_GPADC_CHANNEL(_id, _hw_id, _type, chan_info, \
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index ab8d6aed5085..c64c6675cae6 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -479,7 +479,6 @@ static const struct iio_info dln2_adc_info = {
.read_raw = dln2_adc_read_raw,
.write_raw = dln2_adc_write_raw,
.update_scan_mode = dln2_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
@@ -604,10 +603,6 @@ static void dln2_adc_event(struct platform_device *pdev, u16 echo,
iio_trigger_poll(dln2->trig);
}
-static const struct iio_trigger_ops dln2_adc_trigger_ops = {
- .owner = THIS_MODULE,
-};
-
static int dln2_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -665,7 +660,6 @@ static int dln2_adc_probe(struct platform_device *pdev)
dev_err(dev, "failed to allocate trigger\n");
return -ENOMEM;
}
- dln2->trig->ops = &dln2_adc_trigger_ops;
iio_trigger_set_drvdata(dln2->trig, dln2);
devm_iio_trigger_register(dev, dln2->trig);
iio_trigger_set_immutable(indio_dev, dln2->trig);
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
index fef15c0d7c9c..4ebda8ab54fe 100644
--- a/drivers/iio/adc/envelope-detector.c
+++ b/drivers/iio/adc/envelope-detector.c
@@ -322,7 +322,6 @@ static const struct iio_chan_spec envelope_detector_iio_channel = {
static const struct iio_info envelope_detector_info = {
.read_raw = &envelope_detector_read_raw,
- .driver_module = THIS_MODULE,
};
static int envelope_detector_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c
index a179ac476c6d..81c901507ad2 100644
--- a/drivers/iio/adc/ep93xx_adc.c
+++ b/drivers/iio/adc/ep93xx_adc.c
@@ -150,7 +150,6 @@ static int ep93xx_read_raw(struct iio_dev *iiodev,
}
static const struct iio_info ep93xx_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = ep93xx_read_raw,
};
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 6c5a7be9f8c1..f10443f92e4c 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -657,7 +657,6 @@ static int exynos_adc_reg_access(struct iio_dev *indio_dev,
static const struct iio_info exynos_adc_iio_info = {
.read_raw = &exynos_read_raw,
.debugfs_reg_access = &exynos_adc_reg_access,
- .driver_module = THIS_MODULE,
};
#define ADC_CHANNEL(_index, _id) { \
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index adf7dc712937..6f6c9a348158 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -408,7 +408,6 @@ static const struct iio_chan_spec hi8435_channels[] = {
};
static const struct iio_info hi8435_info = {
- .driver_module = THIS_MODULE,
.read_raw = hi8435_read_raw,
.read_event_config = hi8435_read_event_config,
.write_event_config = hi8435_write_event_config,
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 27005d84ed73..d10b9f13d557 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -374,7 +374,6 @@ static const struct attribute_group hx711_attribute_group = {
};
static const struct iio_info hx711_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = hx711_read_raw,
.write_raw = hx711_write_raw,
.write_raw_get_fmt = hx711_write_raw_get_fmt,
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 254b29a68b9d..cfab31162845 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -412,7 +412,6 @@ static int imx7d_adc_reg_access(struct iio_dev *indio_dev,
}
static const struct iio_info imx7d_adc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = &imx7d_adc_read_raw,
.debugfs_reg_access = &imx7d_adc_reg_access,
};
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index f387b972e4f4..84a43871f7dc 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -123,7 +123,7 @@ struct ina2xx_chip_info {
struct task_struct *task;
const struct ina2xx_config *config;
struct mutex state_lock;
- unsigned int shunt_resistor;
+ unsigned int shunt_resistor_uohm;
int avg;
int int_time_vbus; /* Bus voltage integration time uS */
int int_time_vshunt; /* Shunt voltage integration time uS */
@@ -436,7 +436,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
/*
* Set current LSB to 1mA, shunt is in uOhms
* (equation 13 in datasheet). We hardcode a Current_LSB
- * of 1.0 x10-6. The only remaining parameter is RShunt.
+ * of 1.0 x10-3. The only remaining parameter is RShunt.
* There is no need to expose the CALIBRATION register
* to the user for now. But we need to reset this register
* if the user updates RShunt after driver init, e.g upon
@@ -445,7 +445,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
static int ina2xx_set_calibration(struct ina2xx_chip_info *chip)
{
u16 regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
- chip->shunt_resistor);
+ chip->shunt_resistor_uohm);
return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
}
@@ -455,7 +455,7 @@ static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
if (val <= 0 || val > chip->config->calibration_factor)
return -EINVAL;
- chip->shunt_resistor = val;
+ chip->shunt_resistor_uohm = val;
return 0;
}
@@ -465,8 +465,9 @@ static ssize_t ina2xx_shunt_resistor_show(struct device *dev,
char *buf)
{
struct ina2xx_chip_info *chip = iio_priv(dev_to_iio_dev(dev));
+ int vals[2] = { chip->shunt_resistor_uohm, 1000000 };
- return sprintf(buf, "%d\n", chip->shunt_resistor);
+ return iio_format_value(buf, IIO_VAL_FRACTIONAL, 1, vals);
}
static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
@@ -474,14 +475,13 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
const char *buf, size_t len)
{
struct ina2xx_chip_info *chip = iio_priv(dev_to_iio_dev(dev));
- unsigned long val;
- int ret;
+ int val, val_fract, ret;
- ret = kstrtoul((const char *) buf, 10, &val);
+ ret = iio_str_to_fixpoint(buf, 100000, &val, &val_fract);
if (ret)
return ret;
- ret = set_shunt_resistor(chip, val);
+ ret = set_shunt_resistor(chip, val * 1000000 + val_fract);
if (ret)
return ret;
@@ -778,7 +778,6 @@ static const struct attribute_group ina226_attribute_group = {
};
static const struct iio_info ina219_info = {
- .driver_module = THIS_MODULE,
.attrs = &ina219_attribute_group,
.read_raw = ina2xx_read_raw,
.write_raw = ina2xx_write_raw,
@@ -786,7 +785,6 @@ static const struct iio_info ina219_info = {
};
static const struct iio_info ina226_info = {
- .driver_module = THIS_MODULE,
.attrs = &ina226_attribute_group,
.read_raw = ina2xx_read_raw,
.write_raw = ina2xx_write_raw,
diff --git a/drivers/iio/adc/lp8788_adc.c b/drivers/iio/adc/lp8788_adc.c
index 152cfc8e1c7b..3bc4df916420 100644
--- a/drivers/iio/adc/lp8788_adc.c
+++ b/drivers/iio/adc/lp8788_adc.c
@@ -125,7 +125,6 @@ static int lp8788_adc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info lp8788_adc_info = {
.read_raw = &lp8788_adc_read_raw,
- .driver_module = THIS_MODULE,
};
#define LP8788_CHAN(_id, _type) { \
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index 3ef18f4b27f0..041dc4a3f66c 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -116,7 +116,6 @@ static int lpc18xx_adc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info lpc18xx_adc_info = {
.read_raw = lpc18xx_adc_read_raw,
- .driver_module = THIS_MODULE,
};
static int lpc18xx_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
index 6a5b9a9bc662..20b36690fa4f 100644
--- a/drivers/iio/adc/lpc32xx_adc.c
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -104,7 +104,6 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
static const struct iio_info lpc32xx_adc_iio_info = {
.read_raw = &lpc32xx_read_raw,
- .driver_module = THIS_MODULE,
};
#define LPC32XX_ADC_CHANNEL(_index) { \
diff --git a/drivers/iio/adc/ltc2471.c b/drivers/iio/adc/ltc2471.c
index 29b7ed60cdb0..b88102b751cf 100644
--- a/drivers/iio/adc/ltc2471.c
+++ b/drivers/iio/adc/ltc2471.c
@@ -98,7 +98,6 @@ static const struct iio_chan_spec ltc2473_channel[] = {
static const struct iio_info ltc2471_info = {
.read_raw = ltc2471_read_raw,
- .driver_module = THIS_MODULE,
};
static int ltc2471_i2c_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/ltc2485.c b/drivers/iio/adc/ltc2485.c
index eab91f12454a..b24c14037fd4 100644
--- a/drivers/iio/adc/ltc2485.c
+++ b/drivers/iio/adc/ltc2485.c
@@ -90,7 +90,6 @@ static const struct iio_chan_spec ltc2485_channel[] = {
static const struct iio_info ltc2485_info = {
.read_raw = ltc2485_read_raw,
- .driver_module = THIS_MODULE,
};
static int ltc2485_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 5bf8011dcde9..f1f7cdf66fbd 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -186,7 +186,6 @@ static const struct iio_chan_spec ltc2497_channel[] = {
static const struct iio_info ltc2497_info = {
.read_raw = ltc2497_read_raw,
- .driver_module = THIS_MODULE,
};
static int ltc2497_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index ebc715927e63..375da6491499 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -381,13 +381,11 @@ static irqreturn_t max1027_trigger_handler(int irq, void *private)
}
static const struct iio_trigger_ops max1027_trigger_ops = {
- .owner = THIS_MODULE,
.validate_device = &iio_trigger_validate_own_device,
.set_trigger_state = &max1027_set_trigger_state,
};
static const struct iio_info max1027_info = {
- .driver_module = THIS_MODULE,
.read_raw = &max1027_read_raw,
.validate_trigger = &max1027_validate_trigger,
.debugfs_reg_access = &max1027_debugfs_reg_access,
diff --git a/drivers/iio/adc/max11100.c b/drivers/iio/adc/max11100.c
index 1180bcc22ff1..af59ab2e650c 100644
--- a/drivers/iio/adc/max11100.c
+++ b/drivers/iio/adc/max11100.c
@@ -100,7 +100,6 @@ static int max11100_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info max11100_info = {
- .driver_module = THIS_MODULE,
.read_raw = max11100_read_raw,
};
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 2e9648a078c4..49db9e9ae625 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -155,7 +155,6 @@ static int max1118_read_raw(struct iio_dev *indio_dev,
static const struct iio_info max1118_info = {
.read_raw = max1118_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t max1118_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 80eada4886b3..7f1848dac9bf 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1029,7 +1029,6 @@ static int max1363_update_scan_mode(struct iio_dev *indio_dev,
static const struct iio_info max1238_info = {
.read_raw = &max1363_read_raw,
- .driver_module = THIS_MODULE,
.update_scan_mode = &max1363_update_scan_mode,
};
@@ -1040,7 +1039,6 @@ static const struct iio_info max1363_info = {
.write_event_config = &max1363_write_event_config,
.read_raw = &max1363_read_raw,
.update_scan_mode = &max1363_update_scan_mode,
- .driver_module = THIS_MODULE,
.event_attrs = &max1363_event_attribute_group,
};
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index b1dd17cbce58..0538ff8c4ac1 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -460,7 +460,6 @@ static const struct attribute_group max9611_attribute_group = {
};
static const struct iio_info indio_info = {
- .driver_module = THIS_MODULE,
.read_raw = max9611_read_raw,
.attrs = &max9611_attribute_group,
};
@@ -573,7 +572,6 @@ static int max9611_probe(struct i2c_client *client,
static struct i2c_driver max9611_driver = {
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = max9611_of_table,
},
.probe = max9611_probe,
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 071dd23a33d9..a04856d8afdb 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -19,6 +19,11 @@
* ------------
* 13 bit converter
* MCP3301
+ * ------------
+ * 22 bit converter
+ * MCP3550
+ * MCP3551
+ * MCP3553
*
* Datasheet can be found here:
* http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
@@ -28,6 +33,7 @@
* http://ww1.microchip.com/downloads/en/DeviceDoc/21034D.pdf mcp3202
* http://ww1.microchip.com/downloads/en/DeviceDoc/21298c.pdf mcp3204/08
* http://ww1.microchip.com/downloads/en/DeviceDoc/21700E.pdf mcp3301
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21950D.pdf mcp3550/1/3
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -51,25 +57,45 @@ enum {
mcp3204,
mcp3208,
mcp3301,
+ mcp3550_50,
+ mcp3550_60,
+ mcp3551,
+ mcp3553,
};
struct mcp320x_chip_info {
const struct iio_chan_spec *channels;
unsigned int num_channels;
unsigned int resolution;
+ unsigned int conv_time; /* usec */
};
+/**
+ * struct mcp320x - Microchip SPI ADC instance
+ * @spi: SPI slave (parent of the IIO device)
+ * @msg: SPI message to select a channel and receive a value from the ADC
+ * @transfer: SPI transfers used by @msg
+ * @start_conv_msg: SPI message to start a conversion by briefly asserting CS
+ * @start_conv_transfer: SPI transfer used by @start_conv_msg
+ * @reg: regulator generating Vref
+ * @lock: protects read sequences
+ * @chip_info: ADC properties
+ * @tx_buf: buffer for @transfer[0] (not used on single-channel converters)
+ * @rx_buf: buffer for @transfer[1]
+ */
struct mcp320x {
struct spi_device *spi;
struct spi_message msg;
struct spi_transfer transfer[2];
+ struct spi_message start_conv_msg;
+ struct spi_transfer start_conv_transfer;
struct regulator *reg;
struct mutex lock;
const struct mcp320x_chip_info *chip_info;
u8 tx_buf ____cacheline_aligned;
- u8 rx_buf[2];
+ u8 rx_buf[4];
};
static int mcp320x_channel_to_tx_data(int device_index,
@@ -78,10 +104,6 @@ static int mcp320x_channel_to_tx_data(int device_index,
int start_bit = 1;
switch (device_index) {
- case mcp3001:
- case mcp3201:
- case mcp3301:
- return 0;
case mcp3002:
case mcp3202:
return ((start_bit << 4) | (!differential << 3) |
@@ -102,21 +124,24 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
{
int ret;
- adc->rx_buf[0] = 0;
- adc->rx_buf[1] = 0;
- adc->tx_buf = mcp320x_channel_to_tx_data(device_index,
- channel, differential);
-
- if (device_index != mcp3001 && device_index != mcp3201 && device_index != mcp3301) {
- ret = spi_sync(adc->spi, &adc->msg);
- if (ret < 0)
- return ret;
- } else {
- ret = spi_read(adc->spi, &adc->rx_buf, sizeof(adc->rx_buf));
+ if (adc->chip_info->conv_time) {
+ ret = spi_sync(adc->spi, &adc->start_conv_msg);
if (ret < 0)
return ret;
+
+ usleep_range(adc->chip_info->conv_time,
+ adc->chip_info->conv_time + 100);
}
+ memset(&adc->rx_buf, 0, sizeof(adc->rx_buf));
+ if (adc->chip_info->num_channels > 1)
+ adc->tx_buf = mcp320x_channel_to_tx_data(device_index, channel,
+ differential);
+
+ ret = spi_sync(adc->spi, &adc->msg);
+ if (ret < 0)
+ return ret;
+
switch (device_index) {
case mcp3001:
*val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
@@ -138,6 +163,31 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
*val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
| adc->rx_buf[1], 12);
return 0;
+ case mcp3550_50:
+ case mcp3550_60:
+ case mcp3551:
+ case mcp3553: {
+ u32 raw = be32_to_cpup((u32 *)adc->rx_buf);
+
+ if (!(adc->spi->mode & SPI_CPOL))
+ raw <<= 1; /* strip Data Ready bit in SPI mode 0,0 */
+
+ /*
+ * If the input is within -vref and vref, bit 21 is the sign.
+ * Up to 12% overrange or underrange are allowed, in which case
+ * bit 23 is the sign and bit 0 to 21 is the value.
+ */
+ raw >>= 8;
+ if (raw & BIT(22) && raw & BIT(23))
+ return -EIO; /* cannot have overrange AND underrange */
+ else if (raw & BIT(22))
+ raw &= ~BIT(22); /* overrange */
+ else if (raw & BIT(23) || raw & BIT(21))
+ raw |= GENMASK(31, 22); /* underrange or negative */
+
+ *val = (s32)raw;
+ return 0;
+ }
default:
return -EINVAL;
}
@@ -248,7 +298,6 @@ static const struct iio_chan_spec mcp3208_channels[] = {
static const struct iio_info mcp320x_info = {
.read_raw = mcp320x_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
@@ -297,6 +346,31 @@ static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
.num_channels = ARRAY_SIZE(mcp3201_channels),
.resolution = 13
},
+ [mcp3550_50] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 21,
+ /* 2% max deviation + 144 clock periods to exit shutdown */
+ .conv_time = 80000 * 1.02 + 144000 / 102.4,
+ },
+ [mcp3550_60] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 21,
+ .conv_time = 66670 * 1.02 + 144000 / 122.88,
+ },
+ [mcp3551] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 21,
+ .conv_time = 73100 * 1.02 + 144000 / 112.64,
+ },
+ [mcp3553] = {
+ .channels = mcp3201_channels,
+ .num_channels = ARRAY_SIZE(mcp3201_channels),
+ .resolution = 21,
+ .conv_time = 16670 * 1.02 + 144000 / 122.88,
+ },
};
static int mcp320x_probe(struct spi_device *spi)
@@ -304,7 +378,7 @@ static int mcp320x_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
struct mcp320x *adc;
const struct mcp320x_chip_info *chip_info;
- int ret;
+ int ret, device_index;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
if (!indio_dev)
@@ -320,7 +394,8 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->info = &mcp320x_info;
spi_set_drvdata(spi, indio_dev);
- chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
+ device_index = spi_get_device_id(spi)->driver_data;
+ chip_info = &mcp320x_chip_infos[device_index];
indio_dev->channels = chip_info->channels;
indio_dev->num_channels = chip_info->num_channels;
@@ -329,10 +404,41 @@ static int mcp320x_probe(struct spi_device *spi)
adc->transfer[0].tx_buf = &adc->tx_buf;
adc->transfer[0].len = sizeof(adc->tx_buf);
adc->transfer[1].rx_buf = adc->rx_buf;
- adc->transfer[1].len = sizeof(adc->rx_buf);
+ adc->transfer[1].len = DIV_ROUND_UP(chip_info->resolution, 8);
+
+ if (chip_info->num_channels == 1)
+ /* single-channel converters are rx only (no MOSI pin) */
+ spi_message_init_with_transfers(&adc->msg,
+ &adc->transfer[1], 1);
+ else
+ spi_message_init_with_transfers(&adc->msg, adc->transfer,
+ ARRAY_SIZE(adc->transfer));
- spi_message_init_with_transfers(&adc->msg, adc->transfer,
- ARRAY_SIZE(adc->transfer));
+ switch (device_index) {
+ case mcp3550_50:
+ case mcp3550_60:
+ case mcp3551:
+ case mcp3553:
+ /* rx len increases from 24 to 25 bit in SPI mode 0,0 */
+ if (!(spi->mode & SPI_CPOL))
+ adc->transfer[1].len++;
+
+ /* conversions are started by asserting CS pin for 8 usec */
+ adc->start_conv_transfer.delay_usecs = 8;
+ spi_message_init_with_transfers(&adc->start_conv_msg,
+ &adc->start_conv_transfer, 1);
+
+ /*
+ * If CS was previously kept low (continuous conversion mode)
+ * and then changed to high, the chip is in shutdown.
+ * Sometimes it fails to wake from shutdown and clocks out
+ * only 0xffffff. The magic sequence of performing two
+ * conversions without delay between them resets the chip
+ * and ensures all subsequent conversions succeed.
+ */
+ mcp320x_adc_conversion(adc, 0, 1, device_index, &ret);
+ mcp320x_adc_conversion(adc, 0, 1, device_index, &ret);
+ }
adc->reg = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(adc->reg))
@@ -370,62 +476,29 @@ static int mcp320x_remove(struct spi_device *spi)
#if defined(CONFIG_OF)
static const struct of_device_id mcp320x_dt_ids[] = {
/* NOTE: The use of compatibles with no vendor prefix is deprecated. */
- {
- .compatible = "mcp3001",
- .data = &mcp320x_chip_infos[mcp3001],
- }, {
- .compatible = "mcp3002",
- .data = &mcp320x_chip_infos[mcp3002],
- }, {
- .compatible = "mcp3004",
- .data = &mcp320x_chip_infos[mcp3004],
- }, {
- .compatible = "mcp3008",
- .data = &mcp320x_chip_infos[mcp3008],
- }, {
- .compatible = "mcp3201",
- .data = &mcp320x_chip_infos[mcp3201],
- }, {
- .compatible = "mcp3202",
- .data = &mcp320x_chip_infos[mcp3202],
- }, {
- .compatible = "mcp3204",
- .data = &mcp320x_chip_infos[mcp3204],
- }, {
- .compatible = "mcp3208",
- .data = &mcp320x_chip_infos[mcp3208],
- }, {
- .compatible = "mcp3301",
- .data = &mcp320x_chip_infos[mcp3301],
- }, {
- .compatible = "microchip,mcp3001",
- .data = &mcp320x_chip_infos[mcp3001],
- }, {
- .compatible = "microchip,mcp3002",
- .data = &mcp320x_chip_infos[mcp3002],
- }, {
- .compatible = "microchip,mcp3004",
- .data = &mcp320x_chip_infos[mcp3004],
- }, {
- .compatible = "microchip,mcp3008",
- .data = &mcp320x_chip_infos[mcp3008],
- }, {
- .compatible = "microchip,mcp3201",
- .data = &mcp320x_chip_infos[mcp3201],
- }, {
- .compatible = "microchip,mcp3202",
- .data = &mcp320x_chip_infos[mcp3202],
- }, {
- .compatible = "microchip,mcp3204",
- .data = &mcp320x_chip_infos[mcp3204],
- }, {
- .compatible = "microchip,mcp3208",
- .data = &mcp320x_chip_infos[mcp3208],
- }, {
- .compatible = "microchip,mcp3301",
- .data = &mcp320x_chip_infos[mcp3301],
- }, {
- }
+ { .compatible = "mcp3001" },
+ { .compatible = "mcp3002" },
+ { .compatible = "mcp3004" },
+ { .compatible = "mcp3008" },
+ { .compatible = "mcp3201" },
+ { .compatible = "mcp3202" },
+ { .compatible = "mcp3204" },
+ { .compatible = "mcp3208" },
+ { .compatible = "mcp3301" },
+ { .compatible = "microchip,mcp3001" },
+ { .compatible = "microchip,mcp3002" },
+ { .compatible = "microchip,mcp3004" },
+ { .compatible = "microchip,mcp3008" },
+ { .compatible = "microchip,mcp3201" },
+ { .compatible = "microchip,mcp3202" },
+ { .compatible = "microchip,mcp3204" },
+ { .compatible = "microchip,mcp3208" },
+ { .compatible = "microchip,mcp3301" },
+ { .compatible = "microchip,mcp3550-50" },
+ { .compatible = "microchip,mcp3550-60" },
+ { .compatible = "microchip,mcp3551" },
+ { .compatible = "microchip,mcp3553" },
+ { }
};
MODULE_DEVICE_TABLE(of, mcp320x_dt_ids);
#endif
@@ -440,6 +513,10 @@ static const struct spi_device_id mcp320x_id[] = {
{ "mcp3204", mcp3204 },
{ "mcp3208", mcp3208 },
{ "mcp3301", mcp3301 },
+ { "mcp3550-50", mcp3550_50 },
+ { "mcp3550-60", mcp3550_60 },
+ { "mcp3551", mcp3551 },
+ { "mcp3553", mcp3553 },
{ }
};
MODULE_DEVICE_TABLE(spi, mcp320x_id);
@@ -456,5 +533,5 @@ static struct spi_driver mcp320x_driver = {
module_spi_driver(mcp320x_driver);
MODULE_AUTHOR("Oskar Andero <oskar.andero@gmail.com>");
-MODULE_DESCRIPTION("Microchip Technology MCP3x01/02/04/08");
+MODULE_DESCRIPTION("Microchip Technology MCP3x01/02/04/08 and MCP3550/1/3");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 63de705086ed..819f26011500 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -327,7 +327,6 @@ static const struct iio_info mcp3422_info = {
.write_raw = mcp3422_write_raw,
.write_raw_get_fmt = mcp3422_write_raw_get_fmt,
.attrs = &mcp3422_attribute_group,
- .driver_module = THIS_MODULE,
};
static int mcp3422_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 8f3606de4eaf..c80261748d8f 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -80,7 +80,6 @@ static int z188_iio_read_raw(struct iio_dev *iio_dev,
static const struct iio_info z188_adc_info = {
.read_raw = &z188_iio_read_raw,
- .driver_module = THIS_MODULE,
};
static void men_z188_config_channels(void __iomem *addr)
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 2e8dbb89c8c9..9c6932ffc0af 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -840,7 +840,6 @@ out:
static const struct iio_info meson_sar_adc_iio_info = {
.read_raw = meson_sar_adc_iio_info_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 414cf44bf19d..95d76abb64ec 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -180,7 +180,6 @@ static int mt6577_auxadc_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mt6577_auxadc_info = {
- .driver_module = THIS_MODULE,
.read_raw = &mt6577_auxadc_read_raw,
};
@@ -306,6 +305,7 @@ static SIMPLE_DEV_PM_OPS(mt6577_auxadc_pm_ops,
static const struct of_device_id mt6577_auxadc_of_match[] = {
{ .compatible = "mediatek,mt2701-auxadc", },
+ { .compatible = "mediatek,mt2712-auxadc", },
{ .compatible = "mediatek,mt7622-auxadc", },
{ .compatible = "mediatek,mt8173-auxadc", },
{ }
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index d32b34638c2f..c627513d9f0f 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -382,7 +382,6 @@ static const struct attribute_group mxs_lradc_adc_attribute_group = {
};
static const struct iio_info mxs_lradc_adc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = mxs_lradc_adc_read_raw,
.write_raw = mxs_lradc_adc_write_raw,
.write_raw_get_fmt = mxs_lradc_adc_write_raw_get_fmt,
@@ -455,7 +454,6 @@ static int mxs_lradc_adc_configure_trigger(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops mxs_lradc_adc_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &mxs_lradc_adc_configure_trigger,
};
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
index 08f446695f97..8997e74a8847 100644
--- a/drivers/iio/adc/nau7802.c
+++ b/drivers/iio/adc/nau7802.c
@@ -402,7 +402,6 @@ static int nau7802_write_raw_get_fmt(struct iio_dev *indio_dev,
}
static const struct iio_info nau7802_info = {
- .driver_module = THIS_MODULE,
.read_raw = &nau7802_read_raw,
.write_raw = &nau7802_write_raw,
.write_raw_get_fmt = nau7802_write_raw_get_fmt,
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 7d61b566e148..69b9affeef1e 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -430,7 +430,6 @@ out:
static const struct iio_info palmas_gpadc_iio_info = {
.read_raw = palmas_gpadc_read_raw,
- .driver_module = THIS_MODULE,
};
#define PALMAS_ADC_CHAN_IIO(chan, _type, chan_info) \
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index cea8f1fb444a..b093ecddf1a8 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -728,7 +728,6 @@ static int pm8xxx_of_xlate(struct iio_dev *indio_dev,
}
static const struct iio_info pm8xxx_xoadc_info = {
- .driver_module = THIS_MODULE,
.of_xlate = pm8xxx_of_xlate,
.read_raw = pm8xxx_read_raw,
};
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
index fabd24edc2a1..3f062cd61aba 100644
--- a/drivers/iio/adc/qcom-spmi-iadc.c
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -356,7 +356,6 @@ static int iadc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info iadc_info = {
.read_raw = iadc_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t iadc_isr(int irq, void *dev_id)
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 9e600bfd1765..3680e0d47412 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -506,7 +506,6 @@ static int vadc_of_xlate(struct iio_dev *indio_dev,
static const struct iio_info vadc_info = {
.read_raw = vadc_read_raw,
.of_xlate = vadc_of_xlate,
- .driver_module = THIS_MODULE,
};
struct vadc_channels {
diff --git a/drivers/iio/adc/qcom-vadc-common.c b/drivers/iio/adc/qcom-vadc-common.c
index 102fc51b10aa..47d24ae5462f 100644
--- a/drivers/iio/adc/qcom-vadc-common.c
+++ b/drivers/iio/adc/qcom-vadc-common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/drivers/iio/adc/qcom-vadc-common.h b/drivers/iio/adc/qcom-vadc-common.h
index 63c872a70adc..1d5354ff5c72 100644
--- a/drivers/iio/adc/qcom-vadc-common.h
+++ b/drivers/iio/adc/qcom-vadc-common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Code shared between the different Qualcomm PMIC voltage ADCs
*/
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index 27a318164619..dcb50172186f 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -277,7 +277,6 @@ static int rcar_gyroadc_reg_access(struct iio_dev *indio_dev,
}
static const struct iio_info rcar_gyroadc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = rcar_gyroadc_read_raw,
.debugfs_reg_access = rcar_gyroadc_reg_access,
};
@@ -349,7 +348,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
continue;
}
- childmode = (unsigned int)of_id->data;
+ childmode = (uintptr_t)of_id->data;
switch (childmode) {
case RCAR_GYROADC_MODE_SELECT_1_MB88101A:
sample_width = 12;
@@ -488,8 +487,6 @@ err:
static int rcar_gyroadc_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(rcar_gyroadc_match, &pdev->dev);
struct device *dev = &pdev->dev;
struct rcar_gyroadc *priv;
struct iio_dev *indio_dev;
@@ -526,7 +523,8 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->model = (enum rcar_gyroadc_model)of_id->data;
+ priv->model = (enum rcar_gyroadc_model)
+ of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 5f612d694b33..1f98566d5b3c 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -125,7 +125,6 @@ static irqreturn_t rockchip_saradc_isr(int irq, void *dev_id)
static const struct iio_info rockchip_saradc_iio_info = {
.read_raw = rockchip_saradc_read_raw,
- .driver_module = THIS_MODULE,
};
#define ADC_CHANNEL(_index, _id) { \
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index 5dd61f6a57b9..b1da2c46107c 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -254,7 +254,6 @@ static int spear_adc_configure(struct spear_adc_state *st)
static const struct iio_info spear_adc_info = {
.read_raw = &spear_adc_read_raw,
.write_raw = &spear_adc_write_raw,
- .driver_module = THIS_MODULE,
};
static int spear_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 804198eb0eef..6aefef99f935 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -139,6 +139,11 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
}
rate = clk_get_rate(priv->aclk);
+ if (!rate) {
+ dev_err(&pdev->dev, "Invalid clock rate: 0\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
if ((rate / stm32f4_pclk_div[i]) <= STM32F4_ADC_MAX_CLK_RATE)
break;
@@ -216,6 +221,10 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
* From spec: PLL output musn't exceed max rate
*/
rate = clk_get_rate(priv->aclk);
+ if (!rate) {
+ dev_err(&pdev->dev, "Invalid adc clock rate: 0\n");
+ return -EINVAL;
+ }
for (i = 0; i < ARRAY_SIZE(stm32h7_adc_ckmodes_spec); i++) {
ckmode = stm32h7_adc_ckmodes_spec[i].ckmode;
@@ -232,6 +241,10 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
/* Synchronous clock modes (e.g. ckmode is 1, 2 or 3) */
rate = clk_get_rate(priv->bclk);
+ if (!rate) {
+ dev_err(&pdev->dev, "Invalid bus clock rate: 0\n");
+ return -EINVAL;
+ }
for (i = 0; i < ARRAY_SIZE(stm32h7_adc_ckmodes_spec); i++) {
ckmode = stm32h7_adc_ckmodes_spec[i].ckmode;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 4df32cf1650e..c9d96f935dba 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -531,6 +531,7 @@ static struct stm32_adc_trig_info stm32h7_adc_trigs[] = {
{ TIM2_TRGO, STM32_EXT11 },
{ TIM4_TRGO, STM32_EXT12 },
{ TIM6_TRGO, STM32_EXT13 },
+ { TIM15_TRGO, STM32_EXT14 },
{ TIM3_CH4, STM32_EXT15 },
{ LPTIM1_OUT, STM32_EXT18 },
{ LPTIM2_OUT, STM32_EXT19 },
@@ -1385,7 +1386,6 @@ static const struct iio_info stm32_adc_iio_info = {
.update_scan_mode = stm32_adc_update_scan_mode,
.debugfs_reg_access = stm32_adc_debugfs_reg_access,
.of_xlate = stm32_adc_of_xlate,
- .driver_module = THIS_MODULE,
};
static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c
index 2da741d27540..17b021f33180 100644
--- a/drivers/iio/adc/stx104.c
+++ b/drivers/iio/adc/stx104.c
@@ -172,7 +172,6 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info stx104_info = {
- .driver_module = THIS_MODULE,
.read_raw = stx104_read_raw,
.write_raw = stx104_write_raw
};
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 137f577d9432..04d7147e0110 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -352,7 +352,6 @@ static int sun4i_gpadc_read_raw(struct iio_dev *indio_dev,
static const struct iio_info sun4i_gpadc_iio_info = {
.read_raw = sun4i_gpadc_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
@@ -502,17 +501,15 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
struct iio_dev *indio_dev)
{
struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
- const struct of_device_id *of_dev;
struct resource *mem;
void __iomem *base;
int ret;
- of_dev = of_match_device(sun4i_gpadc_of_id, &pdev->dev);
- if (!of_dev)
+ info->data = of_device_get_match_data(&pdev->dev);
+ if (!info->data)
return -ENODEV;
info->no_irq = true;
- info->data = (struct gpadc_data *)of_dev->data;
indio_dev->num_channels = ARRAY_SIZE(sun8i_a33_gpadc_channels);
indio_dev->channels = sun8i_a33_gpadc_channels;
@@ -529,17 +526,10 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
return ret;
}
- if (!IS_ENABLED(CONFIG_THERMAL_OF))
- return 0;
+ if (IS_ENABLED(CONFIG_THERMAL_OF))
+ info->sensor_device = &pdev->dev;
- info->sensor_device = &pdev->dev;
- info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 0,
- info, &sun4i_ts_tz_ops);
- if (IS_ERR(info->tzd))
- dev_err(&pdev->dev, "could not register thermal sensor: %ld\n",
- PTR_ERR(info->tzd));
-
- return PTR_ERR_OR_ZERO(info->tzd);
+ return 0;
}
static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
@@ -586,15 +576,6 @@ static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
* return the temperature.
*/
info->sensor_device = pdev->dev.parent;
- info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
- 0, info,
- &sun4i_ts_tz_ops);
- if (IS_ERR(info->tzd)) {
- dev_err(&pdev->dev,
- "could not register thermal sensor: %ld\n",
- PTR_ERR(info->tzd));
- return PTR_ERR(info->tzd);
- }
} else {
indio_dev->num_channels =
ARRAY_SIZE(sun4i_gpadc_channels_no_temp);
@@ -664,6 +645,22 @@ static int sun4i_gpadc_probe(struct platform_device *pdev)
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_enable(&pdev->dev);
+ if (IS_ENABLED(CONFIG_THERMAL_OF)) {
+ info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
+ 0, info,
+ &sun4i_ts_tz_ops);
+ /*
+ * Do not fail driver probing when failing to register in
+ * thermal because no thermal DT node is found.
+ */
+ if (IS_ERR(info->tzd) && PTR_ERR(info->tzd) != -ENODEV) {
+ dev_err(&pdev->dev,
+ "could not register thermal sensor: %ld\n",
+ PTR_ERR(info->tzd));
+ return PTR_ERR(info->tzd);
+ }
+ }
+
ret = devm_iio_device_register(&pdev->dev, indio_dev);
if (ret < 0) {
dev_err(&pdev->dev, "could not register the device\n");
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 319172cf7da8..405e3779c0c5 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -124,7 +124,6 @@ static struct adcxx1c_model adcxx1c_models[] = {
static const struct iio_info adc081c_info = {
.read_raw = adc081c_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t adc081c_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index e952e94a14af..188dae705bf7 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -195,7 +195,6 @@ static int adc0832_read_raw(struct iio_dev *iio,
static const struct iio_info adc0832_info = {
.read_raw = adc0832_read_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t adc0832_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index a355121c11a4..25504640e126 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -186,7 +186,6 @@ static int adc084s021_buffer_postdisable(struct iio_dev *indio_dev)
static const struct iio_info adc084s021_info = {
.read_raw = adc084s021_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_buffer_setup_ops adc084s021_buffer_setup_ops = {
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index de4e5ac98c6e..841203edaac5 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -220,7 +220,6 @@ static int adc108s102_read_raw(struct iio_dev *indio_dev,
static const struct iio_info adc108s102_info = {
.read_raw = &adc108s102_read_raw,
.update_scan_mode = &adc108s102_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int adc108s102_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index 072f03bfe6a0..703d68ae96b7 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -164,7 +164,7 @@ static int __adc12138_start_conv(struct adc12138 *adc,
void *data, int len)
{
- const u8 ch_to_mux[] = { 0, 4, 1, 5, 2, 6, 3, 7 };
+ static const u8 ch_to_mux[] = { 0, 4, 1, 5, 2, 6, 3, 7 };
u8 mode = (ch_to_mux[channel->channel] << 4) |
(channel->differential ? 0 : 0x80);
@@ -277,7 +277,6 @@ static int adc12138_read_raw(struct iio_dev *iio,
static const struct iio_info adc12138_info = {
.read_raw = adc12138_read_raw,
- .driver_module = THIS_MODULE,
};
static int adc12138_init(struct adc12138 *adc)
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index 89dfbd31be5c..7cf39b3e2416 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -130,7 +130,6 @@ static const struct adc128_configuration adc128_config[] = {
static const struct iio_info adc128_info = {
.read_raw = adc128_read_raw,
- .driver_module = THIS_MODULE,
};
static int adc128_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index 4836a0d7aef5..10fa7677ac4b 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -173,7 +173,6 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info ti_adc_info = {
- .driver_module = THIS_MODULE,
.read_raw = ti_adc_read_raw,
};
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index e0dc20488335..6a114dcb4a3a 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -332,7 +332,7 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
static
int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
{
- int ret, pga, dr, conv_time;
+ int ret, pga, dr, dr_old, conv_time;
unsigned int old, mask, cfg;
if (chan < 0 || chan >= ADS1015_CHANNELS)
@@ -358,17 +358,17 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
}
cfg = (old & ~mask) | (cfg & mask);
-
- ret = regmap_write(data->regmap, ADS1015_CFG_REG, cfg);
- if (ret)
- return ret;
-
- if (old != cfg || data->conv_invalid) {
- int dr_old = (old & ADS1015_CFG_DR_MASK) >>
- ADS1015_CFG_DR_SHIFT;
-
+ if (old != cfg) {
+ ret = regmap_write(data->regmap, ADS1015_CFG_REG, cfg);
+ if (ret)
+ return ret;
+ data->conv_invalid = true;
+ }
+ if (data->conv_invalid) {
+ dr_old = (old & ADS1015_CFG_DR_MASK) >> ADS1015_CFG_DR_SHIFT;
conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+ conv_time += conv_time / 10; /* 10% internal clock inaccuracy */
usleep_range(conv_time, conv_time + 1);
data->conv_invalid = false;
}
@@ -821,7 +821,6 @@ static const struct attribute_group ads1115_attribute_group = {
};
static const struct iio_info ads1015_info = {
- .driver_module = THIS_MODULE,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
.read_event_value = ads1015_read_event,
@@ -832,7 +831,6 @@ static const struct iio_info ads1015_info = {
};
static const struct iio_info ads1115_info = {
- .driver_module = THIS_MODULE,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
.read_event_value = ads1015_read_event,
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index a376190914ad..0225c1b333ab 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -372,7 +372,6 @@ static int ti_ads7950_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ti_ads7950_info = {
.read_raw = &ti_ads7950_read_raw,
.update_scan_mode = ti_ads7950_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int ti_ads7950_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 4a163496d9e4..079f133144b0 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -369,7 +369,6 @@ static const struct iio_info ads8688_info = {
.write_raw = &ads8688_write_raw,
.write_raw_get_fmt = &ads8688_write_raw_get_fmt,
.attrs = &ads8688_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct ads8688_chip_info ads8688_chip_info_tbl[] = {
@@ -474,7 +473,6 @@ MODULE_DEVICE_TABLE(of, ads8688_of_match);
static struct spi_driver ads8688_driver = {
.driver = {
.name = "ads8688",
- .owner = THIS_MODULE,
},
.probe = ads8688_probe,
.remove = ads8688_remove,
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 78d91a069ea4..2290024c89fc 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -157,7 +157,6 @@ static int tlc4541_read_raw(struct iio_dev *indio_dev,
static const struct iio_info tlc4541_info = {
.read_raw = &tlc4541_read_raw,
- .driver_module = THIS_MODULE,
};
static int tlc4541_probe(struct spi_device *spi)
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 6cbed7eb118a..b3e573cc6f5f 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -533,7 +533,6 @@ err_unlock:
static const struct iio_info tiadc_info = {
.read_raw = &tiadc_read_raw,
- .driver_module = THIS_MODULE,
};
static int tiadc_request_dma(struct platform_device *pdev,
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index e3cfb91bffc6..8c019bb6625f 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -212,7 +212,6 @@ static int twl4030_madc_read(struct iio_dev *iio_dev,
static const struct iio_info twl4030_madc_iio_info = {
.read_raw = &twl4030_madc_read,
- .driver_module = THIS_MODULE,
};
#define TWL4030_ADC_CHANNEL(_channel, _type, _name) { \
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index bc0e60b9da45..dc83f8f6c3d3 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -843,7 +843,6 @@ static const struct iio_chan_spec twl6032_gpadc_iio_channels[] = {
static const struct iio_info twl6030_gpadc_iio_info = {
.read_raw = &twl6030_gpadc_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct twl6030_gpadc_platform_data twl6030_pdata = {
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index c168e0db329a..bbcb7a4d7edf 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -799,7 +799,6 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
}
static const struct iio_info vf610_adc_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = &vf610_read_raw,
.write_raw = &vf610_write_raw,
.debugfs_reg_access = &vf610_adc_reg_access,
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
index 3be2e35721cc..53eb5a4136fe 100644
--- a/drivers/iio/adc/viperboard_adc.c
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -107,7 +107,6 @@ error:
static const struct iio_info vprbrd_adc_iio_info = {
.read_raw = &vprbrd_iio_read_raw,
- .driver_module = THIS_MODULE,
};
static int vprbrd_adc_probe(struct platform_device *pdev)
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 4a60497a1f19..d4f21d1be6c8 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -675,7 +675,6 @@ err_out:
}
static const struct iio_trigger_ops xadc_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &xadc_trigger_set_state,
};
@@ -1028,7 +1027,6 @@ static const struct iio_info xadc_info = {
.read_event_value = &xadc_read_event_value,
.write_event_value = &xadc_write_event_value,
.update_scan_mode = &xadc_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const struct of_device_id xadc_of_match_table[] = {
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index 102c7174da5b..43667866321e 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -117,7 +117,6 @@ static int ad8366_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad8366_info = {
.read_raw = &ad8366_read_raw,
.write_raw = &ad8366_write_raw,
- .driver_module = THIS_MODULE,
};
#define AD8366_CHAN(_channel) { \
diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile
index 85beaae831ae..95f9f41c58b7 100644
--- a/drivers/iio/buffer/Makefile
+++ b/drivers/iio/buffer/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the industrial I/O buffer implementations
#
diff --git a/drivers/iio/chemical/ams-iaq-core.c b/drivers/iio/chemical/ams-iaq-core.c
index c948ad2ee9ad..d9e5950ad24a 100644
--- a/drivers/iio/chemical/ams-iaq-core.c
+++ b/drivers/iio/chemical/ams-iaq-core.c
@@ -141,7 +141,6 @@ err_out:
static const struct iio_info ams_iaqcore_info = {
.read_raw = ams_iaqcore_read_raw,
- .driver_module = THIS_MODULE,
};
static int ams_iaqcore_probe(struct i2c_client *client,
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index ef761a508630..8c4e05580091 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -344,7 +344,6 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev)
}
static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
- .owner = THIS_MODULE,
};
static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = {
@@ -499,7 +498,6 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info atlas_info = {
- .driver_module = THIS_MODULE,
.read_raw = atlas_read_raw,
.write_raw = atlas_write_raw,
};
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 840a6cbd5f0f..97bce8345c6a 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/module.h>
@@ -59,6 +60,8 @@
#define CCS811_MODE_IAQ_60SEC 0x30
#define CCS811_MODE_RAW_DATA 0x40
+#define CCS811_MEAS_MODE_INTERRUPT BIT(3)
+
#define CCS811_VOLTAGE_MASK 0x3FF
struct ccs811_reading {
@@ -73,6 +76,8 @@ struct ccs811_data {
struct i2c_client *client;
struct mutex lock; /* Protect readings */
struct ccs811_reading buffer;
+ struct iio_trigger *drdy_trig;
+ bool drdy_trig_on;
};
static const struct iio_chan_spec ccs811_channels[] = {
@@ -193,10 +198,14 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
mutex_lock(&data->lock);
ret = ccs811_get_measurement(data);
if (ret < 0) {
mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -228,6 +237,7 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
ret = -EINVAL;
}
mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
@@ -270,7 +280,31 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ccs811_info = {
.read_raw = ccs811_read_raw,
- .driver_module = THIS_MODULE,
+};
+
+static int ccs811_set_trigger_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct ccs811_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, CCS811_MEAS_MODE);
+ if (ret < 0)
+ return ret;
+
+ if (state)
+ ret |= CCS811_MEAS_MODE_INTERRUPT;
+ else
+ ret &= ~CCS811_MEAS_MODE_INTERRUPT;
+
+ data->drdy_trig_on = state;
+
+ return i2c_smbus_write_byte_data(data->client, CCS811_MEAS_MODE, ret);
+}
+
+static const struct iio_trigger_ops ccs811_trigger_ops = {
+ .set_trigger_state = ccs811_set_trigger_state,
};
static irqreturn_t ccs811_trigger_handler(int irq, void *p)
@@ -298,6 +332,17 @@ err:
return IRQ_HANDLED;
}
+static irqreturn_t ccs811_data_rdy_trigger_poll(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct ccs811_data *data = iio_priv(indio_dev);
+
+ if (data->drdy_trig_on)
+ iio_trigger_poll(data->drdy_trig);
+
+ return IRQ_HANDLED;
+}
+
static int ccs811_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -346,16 +391,48 @@ static int ccs811_probe(struct i2c_client *client,
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
indio_dev->info = &ccs811_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ccs811_channels;
indio_dev->num_channels = ARRAY_SIZE(ccs811_channels);
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ ccs811_data_rdy_trigger_poll,
+ NULL,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "ccs811_irq", indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "irq request error %d\n", -ret);
+ goto err_poweroff;
+ }
+
+ data->drdy_trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!data->drdy_trig) {
+ ret = -ENOMEM;
+ goto err_poweroff;
+ }
+
+ data->drdy_trig->dev.parent = &client->dev;
+ data->drdy_trig->ops = &ccs811_trigger_ops;
+ iio_trigger_set_drvdata(data->drdy_trig, indio_dev);
+ indio_dev->trig = data->drdy_trig;
+ iio_trigger_get(indio_dev->trig);
+ ret = iio_trigger_register(data->drdy_trig);
+ if (ret)
+ goto err_poweroff;
+ }
+
ret = iio_triggered_buffer_setup(indio_dev, NULL,
ccs811_trigger_handler, NULL);
if (ret < 0) {
dev_err(&client->dev, "triggered buffer setup failed\n");
- goto err_poweroff;
+ goto err_trigger_unregister;
}
ret = iio_device_register(indio_dev);
@@ -367,6 +444,9 @@ static int ccs811_probe(struct i2c_client *client,
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+ if (data->drdy_trig)
+ iio_trigger_unregister(data->drdy_trig);
err_poweroff:
i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE, CCS811_MODE_IDLE);
@@ -376,9 +456,12 @@ err_poweroff:
static int ccs811_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ccs811_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
+ if (data->drdy_trig)
+ iio_trigger_unregister(data->drdy_trig);
return i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE,
CCS811_MODE_IDLE);
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index f75eea6822f2..9c9095ba4227 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -326,7 +326,6 @@ static int vz89x_read_raw(struct iio_dev *indio_dev,
static const struct iio_info vz89x_info = {
.attrs = &vz89x_attrs_group,
.read_raw = vz89x_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct vz89x_chip_data vz89x_chips[] = {
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index 6fa760e1bdd5..4bc30bb548e2 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the IIO common modules.
# Common modules contains modules, which can be shared among multiple
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 38e8783e4b05..ed8063f2da99 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -185,7 +185,6 @@ static int cros_ec_sensors_write(struct iio_dev *indio_dev,
static const struct iio_info ec_sensors_info = {
.read_raw = &cros_ec_sensors_read,
.write_raw = &cros_ec_sensors_write,
- .driver_module = THIS_MODULE,
};
static int cros_ec_sensors_probe(struct platform_device *pdev)
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 0e4b379ada45..cfb6588565ba 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -179,6 +179,10 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
int ret;
atomic_set(&st->user_requested_state, state);
+
+ if (atomic_add_unless(&st->runtime_pm_enable, 1, 1))
+ pm_runtime_enable(&st->pdev->dev);
+
if (state)
ret = pm_runtime_get_sync(&st->pdev->dev);
else {
@@ -221,7 +225,8 @@ static void hid_sensor_set_power_work(struct work_struct *work)
if (attrb->latency_ms > 0)
hid_sensor_set_report_latency(attrb, attrb->latency_ms);
- _hid_sensor_power_state(attrb, true);
+ if (atomic_read(&attrb->user_requested_state))
+ _hid_sensor_power_state(attrb, true);
}
static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
@@ -232,7 +237,9 @@ static int hid_sensor_data_rdy_trigger_set_state(struct iio_trigger *trig,
void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
{
- pm_runtime_disable(&attrb->pdev->dev);
+ if (atomic_read(&attrb->runtime_pm_enable))
+ pm_runtime_disable(&attrb->pdev->dev);
+
pm_runtime_set_suspended(&attrb->pdev->dev);
pm_runtime_put_noidle(&attrb->pdev->dev);
@@ -243,7 +250,6 @@ void hid_sensor_remove_trigger(struct hid_sensor_common *attrb)
EXPORT_SYMBOL(hid_sensor_remove_trigger);
static const struct iio_trigger_ops hid_sensor_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &hid_sensor_data_rdy_trigger_set_state,
};
@@ -283,7 +289,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
INIT_WORK(&attrb->work, hid_sensor_set_power_work);
pm_suspend_ignore_children(&attrb->pdev->dev, true);
- pm_runtime_enable(&attrb->pdev->dev);
/* Default to 3 seconds, but can be changed from sysfs */
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
3000);
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index ea7adb638d99..2ba2ff5e59c4 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -175,9 +175,9 @@ static void ssp_wdt_work_func(struct work_struct *work)
data->timeout_cnt = 0;
}
-static void ssp_wdt_timer_func(unsigned long ptr)
+static void ssp_wdt_timer_func(struct timer_list *t)
{
- struct ssp_data *data = (struct ssp_data *)ptr;
+ struct ssp_data *data = from_timer(data, t, wdt_timer);
switch (data->fw_dl_state) {
case SSP_FW_DL_STATE_FAIL:
@@ -571,7 +571,7 @@ static int ssp_probe(struct spi_device *spi)
INIT_WORK(&data->work_wdt, ssp_wdt_work_func);
INIT_DELAYED_WORK(&data->work_refresh, ssp_refresh_task);
- setup_timer(&data->wdt_timer, ssp_wdt_timer_func, (unsigned long)data);
+ timer_setup(&data->wdt_timer, ssp_wdt_timer_func, 0);
ret = request_threaded_irq(data->spi->irq, NULL,
ssp_irq_thread_fn,
diff --git a/drivers/iio/common/ssp_sensors/ssp_iio_sensor.h b/drivers/iio/common/ssp_sensors/ssp_iio_sensor.h
index 541c6590d69c..4528ab55eb68 100644
--- a/drivers/iio/common/ssp_sensors/ssp_iio_sensor.h
+++ b/drivers/iio/common/ssp_sensors/ssp_iio_sensor.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SSP_IIO_SENSOR_H__
#define __SSP_IIO_SENSOR_H__
diff --git a/drivers/iio/common/st_sensors/Makefile b/drivers/iio/common/st_sensors/Makefile
index 9f3e24f3024b..f7fb3b79b64c 100644
--- a/drivers/iio/common/st_sensors/Makefile
+++ b/drivers/iio/common/st_sensors/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the STMicroelectronics sensor common modules.
#
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 02e833b14db0..57db19182e95 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -93,6 +93,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ if (!sdata->sensor_settings->odr.addr)
+ return 0;
+
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
if (err < 0)
goto st_sensors_match_odr_error;
@@ -221,11 +224,14 @@ EXPORT_SYMBOL(st_sensors_set_enable);
int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
{
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ int err = 0;
- return st_sensors_write_data_with_mask(indio_dev,
+ if (sdata->sensor_settings->enable_axis.addr)
+ err = st_sensors_write_data_with_mask(indio_dev,
sdata->sensor_settings->enable_axis.addr,
sdata->sensor_settings->enable_axis.mask,
axis_enable);
+ return err;
}
EXPORT_SYMBOL(st_sensors_set_axis_enable);
@@ -283,7 +289,8 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
struct st_sensor_data *sdata = iio_priv(indio_dev);
/* Sensor does not support interrupts */
- if (sdata->sensor_settings->drdy_irq.addr == 0) {
+ if (!sdata->sensor_settings->drdy_irq.int1.addr &&
+ !sdata->sensor_settings->drdy_irq.int2.addr) {
if (pdata->drdy_int_pin)
dev_info(&indio_dev->dev,
"DRDY on pin INT%d specified, but sensor "
@@ -294,7 +301,7 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
switch (pdata->drdy_int_pin) {
case 1:
- if (sdata->sensor_settings->drdy_irq.mask_int1 == 0) {
+ if (!sdata->sensor_settings->drdy_irq.int1.mask) {
dev_err(&indio_dev->dev,
"DRDY on INT1 not available.\n");
return -EINVAL;
@@ -302,7 +309,7 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
sdata->drdy_int_pin = 1;
break;
case 2:
- if (sdata->sensor_settings->drdy_irq.mask_int2 == 0) {
+ if (!sdata->sensor_settings->drdy_irq.int2.mask) {
dev_err(&indio_dev->dev,
"DRDY on INT2 not available.\n");
return -EINVAL;
@@ -315,7 +322,8 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
}
if (pdata->open_drain) {
- if (!sdata->sensor_settings->drdy_irq.addr_od)
+ if (!sdata->sensor_settings->drdy_irq.int1.addr_od &&
+ !sdata->sensor_settings->drdy_irq.int2.addr_od)
dev_err(&indio_dev->dev,
"open drain requested but unsupported.\n");
else
@@ -442,11 +450,21 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
}
if (sdata->int_pin_open_drain) {
+ u8 addr, mask;
+
+ if (sdata->drdy_int_pin == 1) {
+ addr = sdata->sensor_settings->drdy_irq.int1.addr_od;
+ mask = sdata->sensor_settings->drdy_irq.int1.mask_od;
+ } else {
+ addr = sdata->sensor_settings->drdy_irq.int2.addr_od;
+ mask = sdata->sensor_settings->drdy_irq.int2.mask_od;
+ }
+
dev_info(&indio_dev->dev,
- "set interrupt line to open drain mode\n");
- err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor_settings->drdy_irq.addr_od,
- sdata->sensor_settings->drdy_irq.mask_od, 1);
+ "set interrupt line to open drain mode on pin %d\n",
+ sdata->drdy_int_pin);
+ err = st_sensors_write_data_with_mask(indio_dev, addr,
+ mask, 1);
if (err < 0)
return err;
}
@@ -460,17 +478,18 @@ EXPORT_SYMBOL(st_sensors_init_sensor);
int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
{
int err;
- u8 drdy_mask;
+ u8 drdy_addr, drdy_mask;
struct st_sensor_data *sdata = iio_priv(indio_dev);
- if (!sdata->sensor_settings->drdy_irq.addr) {
+ if (!sdata->sensor_settings->drdy_irq.int1.addr &&
+ !sdata->sensor_settings->drdy_irq.int2.addr) {
/*
* there are some devices (e.g. LIS3MDL) where drdy line is
* routed to a given pin and it is not possible to select a
* different one. Take into account irq status register
* to understand if irq trigger can be properly supported
*/
- if (sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ if (sdata->sensor_settings->drdy_irq.stat_drdy.addr)
sdata->hw_irq_trigger = enable;
return 0;
}
@@ -485,18 +504,20 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
goto st_accel_set_dataready_irq_error;
}
- if (sdata->drdy_int_pin == 1)
- drdy_mask = sdata->sensor_settings->drdy_irq.mask_int1;
- else
- drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
+ if (sdata->drdy_int_pin == 1) {
+ drdy_addr = sdata->sensor_settings->drdy_irq.int1.addr;
+ drdy_mask = sdata->sensor_settings->drdy_irq.int1.mask;
+ } else {
+ drdy_addr = sdata->sensor_settings->drdy_irq.int2.addr;
+ drdy_mask = sdata->sensor_settings->drdy_irq.int2.mask;
+ }
/* Flag to the poll function that the hardware trigger is in use */
sdata->hw_irq_trigger = enable;
/* Enable/Disable the interrupt generator for data ready. */
- err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor_settings->drdy_irq.addr,
- drdy_mask, (int)enable);
+ err = st_sensors_write_data_with_mask(indio_dev, drdy_addr,
+ drdy_mask, (int)enable);
st_accel_set_dataready_irq_error:
return err;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.h b/drivers/iio/common/st_sensors/st_sensors_core.h
index cd88098ff6f1..e8894be55660 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.h
+++ b/drivers/iio/common/st_sensors/st_sensors_core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Local functions in the ST Sensors core
*/
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index fa73e6795359..fdcc5a891958 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -31,7 +31,7 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
int ret;
/* How would I know if I can't check it? */
- if (!sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
return -EINVAL;
/* No scan mask, no interrupt */
@@ -39,23 +39,15 @@ static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
return 0;
ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
- sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+ sdata->sensor_settings->drdy_irq.stat_drdy.addr,
&status);
if (ret < 0) {
dev_err(sdata->dev,
"error checking samples available\n");
return ret;
}
- /*
- * the lower bits of .active_scan_mask[0] is directly mapped
- * to the channels on the sensor: either bit 0 for
- * one-dimensional sensors, or e.g. x,y,z for accelerometers,
- * gyroscopes or magnetometers. No sensor use more than 3
- * channels, so cut the other status bits here.
- */
- status &= 0x07;
- if (status & (u8)indio_dev->active_scan_mask[0])
+ if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
return 1;
return 0;
@@ -212,7 +204,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
* it was "our" interrupt.
*/
if (sdata->int_pin_open_drain &&
- sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ sdata->sensor_settings->drdy_irq.stat_drdy.addr)
irq_trig |= IRQF_SHARED;
err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
index ba3d9030cd51..b56985078d8c 100644
--- a/drivers/iio/counter/104-quad-8.c
+++ b/drivers/iio/counter/104-quad-8.c
@@ -185,7 +185,6 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info quad8_info = {
- .driver_module = THIS_MODULE,
.read_raw = quad8_read_raw,
.write_raw = quad8_write_raw
};
diff --git a/drivers/iio/counter/stm32-lptimer-cnt.c b/drivers/iio/counter/stm32-lptimer-cnt.c
index 1c5909bb1605..81ae5f74216d 100644
--- a/drivers/iio/counter/stm32-lptimer-cnt.c
+++ b/drivers/iio/counter/stm32-lptimer-cnt.c
@@ -178,7 +178,6 @@ static int stm32_lptim_read_raw(struct iio_dev *indio_dev,
static const struct iio_info stm32_lptim_cnt_iio_info = {
.read_raw = stm32_lptim_read_raw,
.write_raw = stm32_lptim_write_raw,
- .driver_module = THIS_MODULE,
};
static const char *const stm32_lptim_quadrature_modes[] = {
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 25bed2d7d2b9..965d5c0d2468 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -60,7 +60,8 @@ config AD5446
Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5601, AD5602, AD5611, AD5612,
- AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs.
+ AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
+ as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101.
To compile this driver as a module, choose M here: the
module will be called ad5446.
@@ -221,6 +222,15 @@ config DPOT_DAC
To compile this driver as a module, choose M here: the module will be
called dpot-dac.
+config DS4424
+ tristate "Maxim Integrated DS4422/DS4424 DAC driver"
+ depends on I2C
+ help
+ If you say yes here you get support for Maxim chips DS4422, DS4424.
+
+ This driver can also be built as a module. If so, the module
+ will be called ds4424.
+
config LPC18XX_DAC
tristate "NXP LPC18xx DAC driver"
depends on ARCH_LPC18XX || COMPILE_TEST
@@ -300,6 +310,16 @@ config STM32_DAC
config STM32_DAC_CORE
tristate
+config TI_DAC082S085
+ tristate "Texas Instruments 8/10/12-bit 2/4-channel DAC driver"
+ depends on SPI_MASTER
+ help
+ Driver for the Texas Instruments (formerly National Semiconductor)
+ DAC082S085, DAC102S085, DAC122S085, DAC084S085, DAC104S085 and
+ DAC124S085.
+
+ If compiled as a module, it will be called ti-dac082s085.
+
config VF610_DAC
tristate "Vybrid vf610 DAC driver"
depends on OF
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 603587cc2f07..81e710ed7491 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O DAC drivers
#
@@ -23,6 +24,7 @@ obj-$(CONFIG_AD7303) += ad7303.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
+obj-$(CONFIG_DS4424) += ds4424.o
obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_LTC2632) += ltc2632.o
obj-$(CONFIG_M62332) += m62332.o
@@ -32,4 +34,5 @@ obj-$(CONFIG_MCP4725) += mcp4725.o
obj-$(CONFIG_MCP4922) += mcp4922.o
obj-$(CONFIG_STM32_DAC_CORE) += stm32-dac-core.o
obj-$(CONFIG_STM32_DAC) += stm32-dac.o
+obj-$(CONFIG_TI_DAC082S085) += ti-dac082s085.o
obj-$(CONFIG_VF610_DAC) += vf610_dac.o
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 3f9399c27869..bf4fc40ec84d 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -366,7 +366,6 @@ static int ad5064_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5064_info = {
.read_raw = ad5064_read_raw,
.write_raw = ad5064_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 8ba0e9c50176..0209316d5566 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -425,7 +425,6 @@ static const struct iio_info ad5360_info = {
.read_raw = ad5360_read_raw,
.write_raw = ad5360_write_raw,
.attrs = &ad5360_attribute_group,
- .driver_module = THIS_MODULE,
};
static const char * const ad5360_vref_name[] = {
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 97d2c5111f43..845fd1c0fd9d 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -237,7 +237,6 @@ static int ad5380_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5380_info = {
.read_raw = ad5380_read_raw,
.write_raw = ad5380_write_raw,
- .driver_module = THIS_MODULE,
};
static struct iio_chan_spec_ext_info ad5380_ext_info[] = {
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 559061ab1982..8e9633d8de67 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -465,7 +465,6 @@ static const struct iio_info ad5421_info = {
.read_event_config = ad5421_read_event_config,
.write_event_config = ad5421_write_event_config,
.read_event_value = ad5421_read_event_value,
- .driver_module = THIS_MODULE,
};
static int ad5421_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index b555552a0d80..fd26a4272fc5 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -212,7 +212,6 @@ static int ad5446_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5446_info = {
.read_raw = ad5446_read_raw,
.write_raw = ad5446_write_raw,
- .driver_module = THIS_MODULE,
};
static int ad5446_probe(struct device *dev, const char *name,
@@ -461,10 +460,22 @@ static const struct spi_device_id ad5446_spi_ids[] = {
{"ad5660-2500", ID_AD5660_2500},
{"ad5660-1250", ID_AD5660_1250},
{"ad5662", ID_AD5662},
+ {"dac081s101", ID_AD5300}, /* compatible Texas Instruments chips */
+ {"dac101s101", ID_AD5310},
+ {"dac121s101", ID_AD5320},
+ {"dac7512", ID_AD5320},
{}
};
MODULE_DEVICE_TABLE(spi, ad5446_spi_ids);
+#ifdef CONFIG_OF
+static const struct of_device_id ad5446_of_ids[] = {
+ { .compatible = "ti,dac7512" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad5446_of_ids);
+#endif
+
static int ad5446_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
@@ -481,6 +492,7 @@ static int ad5446_spi_remove(struct spi_device *spi)
static struct spi_driver ad5446_spi_driver = {
.driver = {
.name = "ad5446",
+ .of_match_table = of_match_ptr(ad5446_of_ids),
},
.probe = ad5446_spi_probe,
.remove = ad5446_spi_remove,
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index 5f3202339420..317a74129932 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -193,7 +193,6 @@ static int ad5449_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5449_info = {
.read_raw = ad5449_read_raw,
.write_raw = ad5449_write_raw,
- .driver_module = THIS_MODULE,
};
#define AD5449_CHANNEL(chan, bits) { \
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 712d86b4be09..d9037ea59168 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -232,7 +232,6 @@ static const struct iio_info ad5504_info = {
.write_raw = ad5504_write_raw,
.read_raw = ad5504_read_raw,
.event_attrs = &ad5504_ev_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5504_ext_info[] = {
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 69bde5909854..9234c6a09a93 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -474,7 +474,6 @@ static const struct iio_info ad5592r_info = {
.read_raw = ad5592r_read_raw,
.write_raw = ad5592r_write_raw,
.write_raw_get_fmt = ad5592r_write_raw_get_fmt,
- .driver_module = THIS_MODULE,
};
static ssize_t ad5592r_show_scale_available(struct iio_dev *iio_dev,
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 5489ec43b95d..13fdb4dfe356 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -149,7 +149,6 @@ static ssize_t ad5624r_write_dac_powerdown(struct iio_dev *indio_dev,
static const struct iio_info ad5624r_info = {
.write_raw = ad5624r_write_raw,
.read_raw = ad5624r_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = {
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index d1d8450c19f6..20254df7f9c7 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -252,7 +252,6 @@ static int ad5686_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5686_info = {
.read_raw = ad5686_read_raw,
.write_raw = ad5686_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 5f7968232564..2d03cc89ba50 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -417,7 +417,6 @@ static ssize_t ad5755_write_powerdown(struct iio_dev *indio_dev, uintptr_t priv,
static const struct iio_info ad5755_info = {
.read_raw = ad5755_read_raw,
.write_raw = ad5755_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5755_ext_info[] = {
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
index d6510d6928b3..05017c8bbd00 100644
--- a/drivers/iio/dac/ad5761.c
+++ b/drivers/iio/dac/ad5761.c
@@ -251,7 +251,6 @@ static int ad5761_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5761_info = {
.read_raw = &ad5761_read_raw,
.write_raw = &ad5761_write_raw,
- .driver_module = THIS_MODULE,
};
#define AD5761_CHAN(_bits) { \
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index 9a547bbf7d2b..033f20eca616 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -268,7 +268,6 @@ static int ad5764_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5764_info = {
.read_raw = ad5764_read_raw,
.write_raw = ad5764_write_raw,
- .driver_module = THIS_MODULE,
};
static int ad5764_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 33e4ae5c42f8..7569bf6868c2 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -340,7 +340,6 @@ static int ad5791_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad5791_info = {
.read_raw = &ad5791_read_raw,
.write_raw = &ad5791_write_raw,
- .driver_module = THIS_MODULE,
};
static int ad5791_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index 4b0f942b8914..8f3bd19b6dc3 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -161,7 +161,6 @@ static int ad7303_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ad7303_info = {
.read_raw = ad7303_read_raw,
.write_raw = ad7303_write_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad7303_ext_info[] = {
diff --git a/drivers/iio/dac/ad8801.c b/drivers/iio/dac/ad8801.c
index f06faa1aec09..aef5808c9865 100644
--- a/drivers/iio/dac/ad8801.c
+++ b/drivers/iio/dac/ad8801.c
@@ -92,7 +92,6 @@ static int ad8801_read_raw(struct iio_dev *indio_dev,
static const struct iio_info ad8801_info = {
.read_raw = ad8801_read_raw,
.write_raw = ad8801_write_raw,
- .driver_module = THIS_MODULE,
};
#define AD8801_CHANNEL(chan) { \
diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
index a8dffd938615..6898b0c79013 100644
--- a/drivers/iio/dac/cio-dac.c
+++ b/drivers/iio/dac/cio-dac.c
@@ -85,7 +85,6 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info cio_dac_info = {
- .driver_module = THIS_MODULE,
.read_raw = cio_dac_read_raw,
.write_raw = cio_dac_write_raw
};
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
index 960a2b430480..aaa2103d7c2b 100644
--- a/drivers/iio/dac/dpot-dac.c
+++ b/drivers/iio/dac/dpot-dac.c
@@ -128,7 +128,6 @@ static const struct iio_info dpot_dac_info = {
.read_raw = dpot_dac_read_raw,
.read_avail = dpot_dac_read_avail,
.write_raw = dpot_dac_write_raw,
- .driver_module = THIS_MODULE,
};
static int dpot_dac_channel_max_ohms(struct iio_dev *indio_dev)
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
new file mode 100644
index 000000000000..883a47562055
--- /dev/null
+++ b/drivers/iio/dac/ds4424.c
@@ -0,0 +1,341 @@
+/*
+ * Maxim Integrated
+ * 7-bit, Multi-Channel Sink/Source Current DAC Driver
+ * Copyright (C) 2017 Maxim Integrated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/machine.h>
+#include <linux/iio/consumer.h>
+
+#define DS4422_MAX_DAC_CHANNELS 2
+#define DS4424_MAX_DAC_CHANNELS 4
+
+#define DS4424_DAC_ADDR(chan) ((chan) + 0xf8)
+#define DS4424_SOURCE_I 1
+#define DS4424_SINK_I 0
+
+#define DS4424_CHANNEL(chan) { \
+ .type = IIO_CURRENT, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+}
+
+/*
+ * DS4424 DAC control register 8 bits
+ * [7] 0: to sink; 1: to source
+ * [6:0] steps to sink/source
+ * bit[7] looks like a sign bit, but the value of the register is
+ * not a two's complement code considering the bit[6:0] is a absolute
+ * distance from the zero point.
+ */
+union ds4424_raw_data {
+ struct {
+ u8 dx:7;
+ u8 source_bit:1;
+ };
+ u8 bits;
+};
+
+enum ds4424_device_ids {
+ ID_DS4422,
+ ID_DS4424,
+};
+
+struct ds4424_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ uint8_t save[DS4424_MAX_DAC_CHANNELS];
+ struct regulator *vcc_reg;
+ uint8_t raw[DS4424_MAX_DAC_CHANNELS];
+};
+
+static const struct iio_chan_spec ds4424_channels[] = {
+ DS4424_CHANNEL(0),
+ DS4424_CHANNEL(1),
+ DS4424_CHANNEL(2),
+ DS4424_CHANNEL(3),
+};
+
+static int ds4424_get_value(struct iio_dev *indio_dev,
+ int *val, int channel)
+{
+ struct ds4424_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_read_byte_data(data->client, DS4424_DAC_ADDR(channel));
+ if (ret < 0)
+ goto fail;
+
+ *val = ret;
+
+fail:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int ds4424_set_value(struct iio_dev *indio_dev,
+ int val, struct iio_chan_spec const *chan)
+{
+ struct ds4424_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = i2c_smbus_write_byte_data(data->client,
+ DS4424_DAC_ADDR(chan->channel), val);
+ if (ret < 0)
+ goto fail;
+
+ data->raw[chan->channel] = val;
+
+fail:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int ds4424_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ union ds4424_raw_data raw;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ds4424_get_value(indio_dev, val, chan->channel);
+ if (ret < 0) {
+ pr_err("%s : ds4424_get_value returned %d\n",
+ __func__, ret);
+ return ret;
+ }
+ raw.bits = *val;
+ *val = raw.dx;
+ if (raw.source_bit == DS4424_SINK_I)
+ *val = -*val;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ds4424_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ union ds4424_raw_data raw;
+
+ if (val2 != 0)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val < S8_MIN || val > S8_MAX)
+ return -EINVAL;
+
+ if (val > 0) {
+ raw.source_bit = DS4424_SOURCE_I;
+ raw.dx = val;
+ } else {
+ raw.source_bit = DS4424_SINK_I;
+ raw.dx = -val;
+ }
+
+ return ds4424_set_value(indio_dev, raw.bits, chan);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ds4424_verify_chip(struct iio_dev *indio_dev)
+{
+ int ret, val;
+
+ ret = ds4424_get_value(indio_dev, &val, DS4424_DAC_ADDR(0));
+ if (ret < 0)
+ dev_err(&indio_dev->dev,
+ "%s failed. ret: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int __maybe_unused ds4424_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ds4424_data *data = iio_priv(indio_dev);
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ data->save[i] = data->raw[i];
+ ret = ds4424_set_value(indio_dev, 0,
+ &indio_dev->channels[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+static int __maybe_unused ds4424_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ds4424_data *data = iio_priv(indio_dev);
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = ds4424_set_value(indio_dev, data->save[i],
+ &indio_dev->channels[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ds4424_pm_ops, ds4424_suspend, ds4424_resume);
+
+static const struct iio_info ds4424_info = {
+ .read_raw = ds4424_read_raw,
+ .write_raw = ds4424_write_raw,
+};
+
+static int ds4424_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ds4424_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio dev alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ indio_dev->name = id->name;
+ indio_dev->dev.of_node = client->dev.of_node;
+ indio_dev->dev.parent = &client->dev;
+
+ if (!client->dev.of_node) {
+ dev_err(&client->dev,
+ "Not found DT.\n");
+ return -ENODEV;
+ }
+
+ data->vcc_reg = devm_regulator_get(&client->dev, "vcc");
+ if (IS_ERR(data->vcc_reg)) {
+ dev_err(&client->dev,
+ "Failed to get vcc-supply regulator. err: %ld\n",
+ PTR_ERR(data->vcc_reg));
+ return PTR_ERR(data->vcc_reg);
+ }
+
+ mutex_init(&data->lock);
+ ret = regulator_enable(data->vcc_reg);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Unable to enable the regulator.\n");
+ return ret;
+ }
+
+ usleep_range(1000, 1200);
+ ret = ds4424_verify_chip(indio_dev);
+ if (ret < 0)
+ goto fail;
+
+ switch (id->driver_data) {
+ case ID_DS4422:
+ indio_dev->num_channels = DS4422_MAX_DAC_CHANNELS;
+ break;
+ case ID_DS4424:
+ indio_dev->num_channels = DS4424_MAX_DAC_CHANNELS;
+ break;
+ default:
+ dev_err(&client->dev,
+ "ds4424: Invalid chip id.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ indio_dev->channels = ds4424_channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ds4424_info;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "iio_device_register failed. ret: %d\n", ret);
+ goto fail;
+ }
+
+ return ret;
+
+fail:
+ regulator_disable(data->vcc_reg);
+ return ret;
+}
+
+static int ds4424_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ds4424_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(data->vcc_reg);
+
+ return 0;
+}
+
+static const struct i2c_device_id ds4424_id[] = {
+ { "ds4422", ID_DS4422 },
+ { "ds4424", ID_DS4424 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, ds4424_id);
+
+static const struct of_device_id ds4424_of_match[] = {
+ { .compatible = "maxim,ds4422" },
+ { .compatible = "maxim,ds4424" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, ds4424_of_match);
+
+static struct i2c_driver ds4424_driver = {
+ .driver = {
+ .name = "ds4424",
+ .of_match_table = ds4424_of_match,
+ .pm = &ds4424_pm_ops,
+ },
+ .probe = ds4424_probe,
+ .remove = ds4424_remove,
+ .id_table = ds4424_id,
+};
+module_i2c_driver(ds4424_driver);
+
+MODULE_DESCRIPTION("Maxim DS4424 DAC Driver");
+MODULE_AUTHOR("Ismail H. Kose <ismail.kose@maximintegrated.com>");
+MODULE_AUTHOR("Vishal Sood <vishal.sood@maximintegrated.com>");
+MODULE_AUTHOR("David Jung <david.jung@maximintegrated.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 55d1456a059d..7036f77fdf23 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -103,7 +103,6 @@ static int lpc18xx_dac_write_raw(struct iio_dev *indio_dev,
static const struct iio_info lpc18xx_dac_info = {
.read_raw = lpc18xx_dac_read_raw,
.write_raw = lpc18xx_dac_write_raw,
- .driver_module = THIS_MODULE,
};
static int lpc18xx_dac_probe(struct platform_device *pdev)
diff --git a/drivers/iio/dac/ltc2632.c b/drivers/iio/dac/ltc2632.c
index ac5e05f6eb8b..af2ddd0dd341 100644
--- a/drivers/iio/dac/ltc2632.c
+++ b/drivers/iio/dac/ltc2632.c
@@ -159,7 +159,6 @@ static ssize_t ltc2632_write_dac_powerdown(struct iio_dev *indio_dev,
static const struct iio_info ltc2632_info = {
.write_raw = ltc2632_write_raw,
.read_raw = ltc2632_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ltc2632_ext_info[] = {
diff --git a/drivers/iio/dac/m62332.c b/drivers/iio/dac/m62332.c
index 76e8b044b979..19031943dabe 100644
--- a/drivers/iio/dac/m62332.c
+++ b/drivers/iio/dac/m62332.c
@@ -174,7 +174,6 @@ static SIMPLE_DEV_PM_OPS(m62332_pm_ops, m62332_suspend, m62332_resume);
static const struct iio_info m62332_info = {
.read_raw = m62332_read_raw,
.write_raw = m62332_write_raw,
- .driver_module = THIS_MODULE,
};
#define M62332_CHANNEL(chan) { \
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index 5507b3970b4b..1d853247a205 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -137,7 +137,6 @@ static SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume);
static const struct iio_info max517_info = {
.read_raw = max517_read_raw,
.write_raw = max517_write_raw,
- .driver_module = THIS_MODULE,
};
#define MAX517_CHANNEL(chan) { \
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 193fac3059a3..d0ecc1fdd8fc 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -300,7 +300,6 @@ static SIMPLE_DEV_PM_OPS(max5821_pm_ops, max5821_suspend, max5821_resume);
static const struct iio_info max5821_info = {
.read_raw = max5821_read_raw,
.write_raw = max5821_write_raw,
- .driver_module = THIS_MODULE,
};
static int max5821_probe(struct i2c_client *client,
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 6ab1f23e5a79..afa856d10c26 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -363,7 +363,6 @@ static const struct iio_info mcp4725_info = {
.read_raw = mcp4725_read_raw,
.write_raw = mcp4725_write_raw,
.attrs = &mcp4725_attribute_group,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_OF
diff --git a/drivers/iio/dac/mcp4922.c b/drivers/iio/dac/mcp4922.c
index 3854d201a5d6..bf9aa3fc0534 100644
--- a/drivers/iio/dac/mcp4922.c
+++ b/drivers/iio/dac/mcp4922.c
@@ -119,7 +119,6 @@ static const struct iio_chan_spec mcp4922_channels[3][MCP4922_NUM_CHANNELS] = {
static const struct iio_info mcp4922_info = {
.read_raw = &mcp4922_read_raw,
.write_raw = &mcp4922_write_raw,
- .driver_module = THIS_MODULE,
};
static int mcp4922_probe(struct spi_device *spi)
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index c1864e8aa851..9ffab02bf9f9 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -156,7 +156,6 @@ static const struct iio_info stm32_dac_iio_info = {
.read_raw = stm32_dac_read_raw,
.write_raw = stm32_dac_write_raw,
.debugfs_reg_access = stm32_dac_debugfs_reg_access,
- .driver_module = THIS_MODULE,
};
static const char * const stm32_dac_powerdown_modes[] = {
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
new file mode 100644
index 000000000000..4e1e28339c84
--- /dev/null
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -0,0 +1,368 @@
+/*
+ * ti-dac082s085.c - Texas Instruments 8/10/12-bit 2/4-channel DAC driver
+ *
+ * Copyright (C) 2017 KUNBUS GmbH
+ *
+ * http://www.ti.com/lit/ds/symlink/dac082s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac102s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac122s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac084s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac104s085.pdf
+ * http://www.ti.com/lit/ds/symlink/dac124s085.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+enum { dual_8bit, dual_10bit, dual_12bit, quad_8bit, quad_10bit, quad_12bit };
+
+struct ti_dac_spec {
+ u8 num_channels;
+ u8 resolution;
+};
+
+static const struct ti_dac_spec ti_dac_spec[] = {
+ [dual_8bit] = { .num_channels = 2, .resolution = 8 },
+ [dual_10bit] = { .num_channels = 2, .resolution = 10 },
+ [dual_12bit] = { .num_channels = 2, .resolution = 12 },
+ [quad_8bit] = { .num_channels = 4, .resolution = 8 },
+ [quad_10bit] = { .num_channels = 4, .resolution = 10 },
+ [quad_12bit] = { .num_channels = 4, .resolution = 12 },
+};
+
+/**
+ * struct ti_dac_chip - TI DAC chip
+ * @lock: protects write sequences
+ * @vref: regulator generating Vref
+ * @mesg: SPI message to perform a write
+ * @xfer: SPI transfer used by @mesg
+ * @val: cached value of each output
+ * @powerdown: whether the chip is powered down
+ * @powerdown_mode: selected by the user
+ * @resolution: resolution of the chip
+ * @buf: buffer for @xfer
+ */
+struct ti_dac_chip {
+ struct mutex lock;
+ struct regulator *vref;
+ struct spi_message mesg;
+ struct spi_transfer xfer;
+ u16 val[4];
+ bool powerdown;
+ u8 powerdown_mode;
+ u8 resolution;
+ u8 buf[2] ____cacheline_aligned;
+};
+
+#define WRITE_NOT_UPDATE(chan) (0x00 | (chan) << 6)
+#define WRITE_AND_UPDATE(chan) (0x10 | (chan) << 6)
+#define WRITE_ALL_UPDATE 0x20
+#define POWERDOWN(mode) (0x30 | ((mode) + 1) << 6)
+
+static int ti_dac_cmd(struct ti_dac_chip *ti_dac, u8 cmd, u16 val)
+{
+ u8 shift = 12 - ti_dac->resolution;
+
+ ti_dac->buf[0] = cmd | (val >> (8 - shift));
+ ti_dac->buf[1] = (val << shift) & 0xff;
+ return spi_sync(ti_dac->mesg.spi, &ti_dac->mesg);
+}
+
+static const char * const ti_dac_powerdown_modes[] = {
+ "2.5kohm_to_gnd", "100kohm_to_gnd", "three_state",
+};
+
+static int ti_dac_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+
+ return ti_dac->powerdown_mode;
+}
+
+static int ti_dac_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (ti_dac->powerdown_mode == mode)
+ return 0;
+
+ mutex_lock(&ti_dac->lock);
+ if (ti_dac->powerdown) {
+ ret = ti_dac_cmd(ti_dac, POWERDOWN(mode), 0);
+ if (ret)
+ goto out;
+ }
+ ti_dac->powerdown_mode = mode;
+
+out:
+ mutex_unlock(&ti_dac->lock);
+ return ret;
+}
+
+static const struct iio_enum ti_dac_powerdown_mode = {
+ .items = ti_dac_powerdown_modes,
+ .num_items = ARRAY_SIZE(ti_dac_powerdown_modes),
+ .get = ti_dac_get_powerdown_mode,
+ .set = ti_dac_set_powerdown_mode,
+};
+
+static ssize_t ti_dac_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", ti_dac->powerdown);
+}
+
+static ssize_t ti_dac_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+ bool powerdown;
+ int ret;
+
+ ret = strtobool(buf, &powerdown);
+ if (ret)
+ return ret;
+
+ if (ti_dac->powerdown == powerdown)
+ return len;
+
+ mutex_lock(&ti_dac->lock);
+ if (powerdown)
+ ret = ti_dac_cmd(ti_dac, POWERDOWN(ti_dac->powerdown_mode), 0);
+ else
+ ret = ti_dac_cmd(ti_dac, WRITE_AND_UPDATE(0), ti_dac->val[0]);
+ if (!ret)
+ ti_dac->powerdown = powerdown;
+ mutex_unlock(&ti_dac->lock);
+
+ return ret ? ret : len;
+}
+
+static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ti_dac_read_powerdown,
+ .write = ti_dac_write_powerdown,
+ .shared = IIO_SHARED_BY_TYPE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ti_dac_powerdown_mode),
+ { },
+};
+
+#define TI_DAC_CHANNEL(chan) { \
+ .type = IIO_VOLTAGE, \
+ .channel = (chan), \
+ .address = (chan), \
+ .indexed = true, \
+ .output = true, \
+ .datasheet_name = (const char[]){ 'A' + (chan), 0 }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = ti_dac_ext_info, \
+}
+
+static const struct iio_chan_spec ti_dac_channels[] = {
+ TI_DAC_CHANNEL(0),
+ TI_DAC_CHANNEL(1),
+ TI_DAC_CHANNEL(2),
+ TI_DAC_CHANNEL(3),
+};
+
+static int ti_dac_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = ti_dac->val[chan->channel];
+ ret = IIO_VAL_INT;
+ break;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(ti_dac->vref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / 1000;
+ *val2 = ti_dac->resolution;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ti_dac_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (ti_dac->val[chan->channel] == val)
+ return 0;
+
+ if (val >= (1 << ti_dac->resolution) || val < 0)
+ return -EINVAL;
+
+ if (ti_dac->powerdown)
+ return -EBUSY;
+
+ mutex_lock(&ti_dac->lock);
+ ret = ti_dac_cmd(ti_dac, WRITE_AND_UPDATE(chan->channel), val);
+ if (!ret)
+ ti_dac->val[chan->channel] = val;
+ mutex_unlock(&ti_dac->lock);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ti_dac_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info ti_dac_info = {
+ .read_raw = ti_dac_read_raw,
+ .write_raw = ti_dac_write_raw,
+ .write_raw_get_fmt = ti_dac_write_raw_get_fmt,
+};
+
+static int ti_dac_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ const struct ti_dac_spec *spec;
+ struct ti_dac_chip *ti_dac;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*ti_dac));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &ti_dac_info;
+ indio_dev->name = spi->modalias;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ti_dac_channels;
+ spi_set_drvdata(spi, indio_dev);
+
+ ti_dac = iio_priv(indio_dev);
+ ti_dac->xfer.tx_buf = &ti_dac->buf;
+ ti_dac->xfer.len = sizeof(ti_dac->buf);
+ spi_message_init_with_transfers(&ti_dac->mesg, &ti_dac->xfer, 1);
+ ti_dac->mesg.spi = spi;
+
+ spec = &ti_dac_spec[spi_get_device_id(spi)->driver_data];
+ indio_dev->num_channels = spec->num_channels;
+ ti_dac->resolution = spec->resolution;
+
+ ti_dac->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(ti_dac->vref))
+ return PTR_ERR(ti_dac->vref);
+
+ ret = regulator_enable(ti_dac->vref);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&ti_dac->lock);
+
+ ret = ti_dac_cmd(ti_dac, WRITE_ALL_UPDATE, 0);
+ if (ret) {
+ dev_err(dev, "failed to initialize outputs to 0\n");
+ goto err;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ mutex_destroy(&ti_dac->lock);
+ regulator_disable(ti_dac->vref);
+ return ret;
+}
+
+static int ti_dac_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ti_dac_chip *ti_dac = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ mutex_destroy(&ti_dac->lock);
+ regulator_disable(ti_dac->vref);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ti_dac_of_id[] = {
+ { .compatible = "ti,dac082s085" },
+ { .compatible = "ti,dac102s085" },
+ { .compatible = "ti,dac122s085" },
+ { .compatible = "ti,dac084s085" },
+ { .compatible = "ti,dac104s085" },
+ { .compatible = "ti,dac124s085" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ti_dac_of_id);
+#endif
+
+static const struct spi_device_id ti_dac_spi_id[] = {
+ { "dac082s085", dual_8bit },
+ { "dac102s085", dual_10bit },
+ { "dac122s085", dual_12bit },
+ { "dac084s085", quad_8bit },
+ { "dac104s085", quad_10bit },
+ { "dac124s085", quad_12bit },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ti_dac_spi_id);
+
+static struct spi_driver ti_dac_driver = {
+ .driver = {
+ .name = "ti-dac082s085",
+ .of_match_table = of_match_ptr(ti_dac_of_id),
+ },
+ .probe = ti_dac_probe,
+ .remove = ti_dac_remove,
+ .id_table = ti_dac_spi_id,
+};
+module_spi_driver(ti_dac_driver);
+
+MODULE_AUTHOR("Lukas Wunner <lukas@wunner.de>");
+MODULE_DESCRIPTION("Texas Instruments 8/10/12-bit 2/4-channel DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
index c4ec7779b394..5dccdd16cab3 100644
--- a/drivers/iio/dac/vf610_dac.c
+++ b/drivers/iio/dac/vf610_dac.c
@@ -167,7 +167,6 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info vf610_dac_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = &vf610_read_raw,
.write_raw = &vf610_write_raw,
};
diff --git a/drivers/iio/dummy/Kconfig b/drivers/iio/dummy/Kconfig
index aa5824d96a43..5a29fbd3c531 100644
--- a/drivers/iio/dummy/Kconfig
+++ b/drivers/iio/dummy/Kconfig
@@ -5,7 +5,7 @@ menu "IIO dummy driver"
depends on IIO
config IIO_DUMMY_EVGEN
- select IRQ_WORK
+ select IRQ_SIM
tristate
config IIO_SIMPLE_DUMMY
diff --git a/drivers/iio/dummy/Makefile b/drivers/iio/dummy/Makefile
index 0765e93d7804..f14fe20f365c 100644
--- a/drivers/iio/dummy/Makefile
+++ b/drivers/iio/dummy/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the IIO Dummy Driver
#
diff --git a/drivers/iio/dummy/iio_dummy_evgen.c b/drivers/iio/dummy/iio_dummy_evgen.c
index 9e83f348df51..fe8884543da0 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.c
+++ b/drivers/iio/dummy/iio_dummy_evgen.c
@@ -24,97 +24,46 @@
#include "iio_dummy_evgen.h"
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/irq_work.h>
+#include <linux/irq_sim.h>
/* Fiddly bit of faking and irq without hardware */
#define IIO_EVENTGEN_NO 10
/**
- * struct iio_dummy_handle_irq - helper struct to simulate interrupt generation
- * @work: irq_work used to run handlers from hardirq context
- * @irq: fake irq line number to trigger an interrupt
- */
-struct iio_dummy_handle_irq {
- struct irq_work work;
- int irq;
-};
-
-/**
- * struct iio_dummy_evgen - evgen state
- * @chip: irq chip we are faking
- * @base: base of irq range
- * @enabled: mask of which irqs are enabled
- * @inuse: mask of which irqs are connected
* @regs: irq regs we are faking
* @lock: protect the evgen state
- * @handler: helper for a 'hardware-like' interrupt simulation
+ * @inuse: mask of which irqs are connected
+ * @irq_sim: interrupt simulator
+ * @base: base of irq range
*/
struct iio_dummy_eventgen {
- struct irq_chip chip;
- int base;
- bool enabled[IIO_EVENTGEN_NO];
- bool inuse[IIO_EVENTGEN_NO];
struct iio_dummy_regs regs[IIO_EVENTGEN_NO];
struct mutex lock;
- struct iio_dummy_handle_irq handler;
+ bool inuse[IIO_EVENTGEN_NO];
+ struct irq_sim irq_sim;
+ int base;
};
/* We can only ever have one instance of this 'device' */
static struct iio_dummy_eventgen *iio_evgen;
-static const char *iio_evgen_name = "iio_dummy_evgen";
-
-static void iio_dummy_event_irqmask(struct irq_data *d)
-{
- struct irq_chip *chip = irq_data_get_irq_chip(d);
- struct iio_dummy_eventgen *evgen =
- container_of(chip, struct iio_dummy_eventgen, chip);
-
- evgen->enabled[d->irq - evgen->base] = false;
-}
-
-static void iio_dummy_event_irqunmask(struct irq_data *d)
-{
- struct irq_chip *chip = irq_data_get_irq_chip(d);
- struct iio_dummy_eventgen *evgen =
- container_of(chip, struct iio_dummy_eventgen, chip);
-
- evgen->enabled[d->irq - evgen->base] = true;
-}
-
-static void iio_dummy_work_handler(struct irq_work *work)
-{
- struct iio_dummy_handle_irq *irq_handler;
-
- irq_handler = container_of(work, struct iio_dummy_handle_irq, work);
- handle_simple_irq(irq_to_desc(irq_handler->irq));
-}
static int iio_dummy_evgen_create(void)
{
- int ret, i;
+ int ret;
iio_evgen = kzalloc(sizeof(*iio_evgen), GFP_KERNEL);
if (!iio_evgen)
return -ENOMEM;
- iio_evgen->base = irq_alloc_descs(-1, 0, IIO_EVENTGEN_NO, 0);
- if (iio_evgen->base < 0) {
- ret = iio_evgen->base;
+ ret = irq_sim_init(&iio_evgen->irq_sim, IIO_EVENTGEN_NO);
+ if (ret) {
kfree(iio_evgen);
return ret;
}
- iio_evgen->chip.name = iio_evgen_name;
- iio_evgen->chip.irq_mask = &iio_dummy_event_irqmask;
- iio_evgen->chip.irq_unmask = &iio_dummy_event_irqunmask;
- for (i = 0; i < IIO_EVENTGEN_NO; i++) {
- irq_set_chip(iio_evgen->base + i, &iio_evgen->chip);
- irq_set_handler(iio_evgen->base + i, &handle_simple_irq);
- irq_modify_status(iio_evgen->base + i,
- IRQ_NOREQUEST | IRQ_NOAUTOEN,
- IRQ_NOPROBE);
- }
- init_irq_work(&iio_evgen->handler.work, iio_dummy_work_handler);
+
+ iio_evgen->base = irq_sim_irqnum(&iio_evgen->irq_sim, 0);
mutex_init(&iio_evgen->lock);
+
return 0;
}
@@ -132,15 +81,17 @@ int iio_dummy_evgen_get_irq(void)
return -ENODEV;
mutex_lock(&iio_evgen->lock);
- for (i = 0; i < IIO_EVENTGEN_NO; i++)
+ for (i = 0; i < IIO_EVENTGEN_NO; i++) {
if (!iio_evgen->inuse[i]) {
- ret = iio_evgen->base + i;
+ ret = irq_sim_irqnum(&iio_evgen->irq_sim, i);
iio_evgen->inuse[i] = true;
break;
}
+ }
mutex_unlock(&iio_evgen->lock);
if (i == IIO_EVENTGEN_NO)
return -ENOMEM;
+
return ret;
}
EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_irq);
@@ -167,7 +118,7 @@ EXPORT_SYMBOL_GPL(iio_dummy_evgen_get_regs);
static void iio_dummy_evgen_free(void)
{
- irq_free_descs(iio_evgen->base, IIO_EVENTGEN_NO);
+ irq_sim_fini(&iio_evgen->irq_sim);
kfree(iio_evgen);
}
@@ -192,9 +143,7 @@ static ssize_t iio_evgen_poke(struct device *dev,
iio_evgen->regs[this_attr->address].reg_id = this_attr->address;
iio_evgen->regs[this_attr->address].reg_data = event;
- iio_evgen->handler.irq = iio_evgen->base + this_attr->address;
- if (iio_evgen->enabled[this_attr->address])
- irq_work_queue(&iio_evgen->handler.work);
+ irq_sim_fire(&iio_evgen->irq_sim, this_attr->address);
return len;
}
diff --git a/drivers/iio/dummy/iio_dummy_evgen.h b/drivers/iio/dummy/iio_dummy_evgen.h
index d044b946e74a..e0bf64fe9d67 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.h
+++ b/drivers/iio/dummy/iio_dummy_evgen.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IIO_DUMMY_EVGEN_H_
#define _IIO_DUMMY_EVGEN_H_
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index ad3410e528b6..62052479c349 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -26,7 +26,7 @@
#include <linux/iio/sw_device.h>
#include "iio_simple_dummy.h"
-static struct config_item_type iio_dummy_type = {
+static const struct config_item_type iio_dummy_type = {
.ct_owner = THIS_MODULE,
};
@@ -519,7 +519,6 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
* Device type specific information.
*/
static const struct iio_info iio_dummy_info = {
- .driver_module = THIS_MODULE,
.read_raw = &iio_dummy_read_raw,
.write_raw = &iio_dummy_write_raw,
#ifdef CONFIG_IIO_SIMPLE_DUMMY_EVENTS
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 99eba524f6dd..ddb6a334ae68 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -738,7 +738,6 @@ static const struct iio_info ad9523_info = {
.write_raw = &ad9523_write_raw,
.debugfs_reg_access = &ad9523_reg_access,
.attrs = &ad9523_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad9523_setup(struct iio_dev *indio_dev)
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index d2d824b446f5..6d768431d90e 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -374,7 +374,6 @@ static const struct iio_chan_spec adf4350_chan = {
static const struct iio_info adf4350_info = {
.debugfs_reg_access = &adf4350_reg_access,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_OF
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index f0e149a606b0..295ec780c4eb 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O gyroscope sensor drivers
#
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index ad31a1372a04..a551ebde4762 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -163,7 +163,6 @@ static const struct iio_chan_spec adis16080_channels[] = {
static const struct iio_info adis16080_info = {
.read_raw = &adis16080_read_raw,
- .driver_module = THIS_MODULE,
};
enum {
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index e5241f41e65e..aea80ab04122 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -137,7 +137,6 @@ static const struct iio_chan_spec adis16130_channels[] = {
static const struct iio_info adis16130_info = {
.read_raw = &adis16130_read_raw,
- .driver_module = THIS_MODULE,
};
static int adis16130_probe(struct spi_device *spi)
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index b04faf93e1bc..90ec4bed62b7 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -398,7 +398,6 @@ static const struct attribute_group adis16136_attribute_group = {
};
static const struct iio_info adis16136_info = {
- .driver_module = THIS_MODULE,
.attrs = &adis16136_attribute_group,
.read_raw = &adis16136_read_raw,
.write_raw = &adis16136_write_raw,
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 7da8825f4791..a8cb1ca349d9 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -321,7 +321,6 @@ static const struct iio_info adis16260_info = {
.read_raw = &adis16260_read_raw,
.write_raw = &adis16260_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis1620_status_error_msgs[] = {
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
index a330d4288bb0..5d39fd008378 100644
--- a/drivers/iio/gyro/adxrs450.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -405,7 +405,6 @@ static const struct iio_chan_spec adxrs450_channels[2][2] = {
};
static const struct iio_info adxrs450_info = {
- .driver_module = THIS_MODULE,
.read_raw = &adxrs450_read_raw,
.write_raw = &adxrs450_write_raw,
};
diff --git a/drivers/iio/gyro/bmg160.h b/drivers/iio/gyro/bmg160.h
index 72db723c8fb6..6bcff6562249 100644
--- a/drivers/iio/gyro/bmg160.h
+++ b/drivers/iio/gyro/bmg160.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BMG160_H_
#define BMG160_H_
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 821919dd245b..15046172e437 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -858,7 +858,6 @@ static const struct iio_info bmg160_info = {
.write_event_value = bmg160_write_event,
.write_event_config = bmg160_write_event_config,
.read_event_config = bmg160_read_event_config,
- .driver_module = THIS_MODULE,
};
static const unsigned long bmg160_accel_scan_masks[] = {
@@ -956,7 +955,6 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig,
static const struct iio_trigger_ops bmg160_trigger_ops = {
.set_trigger_state = bmg160_data_rdy_trigger_set_state,
.try_reenable = bmg160_trig_try_reen,
- .owner = THIS_MODULE,
};
static irqreturn_t bmg160_event_handler(int irq, void *private)
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index c67ce2ac4715..f59995a90387 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -186,7 +186,6 @@ static int gyro_3d_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info gyro_3d_info = {
- .driver_module = THIS_MODULE,
.read_raw = &gyro_3d_read_raw,
.write_raw = &gyro_3d_write_raw,
};
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index eef50e91f17c..59770e5b6660 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -101,7 +101,6 @@ error_ret:
}
static const struct iio_trigger_ops itg3200_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &itg3200_data_rdy_trigger_set_state,
};
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index cfa2db04a8ab..7adecb562c81 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -278,7 +278,6 @@ static const struct iio_chan_spec itg3200_channels[] = {
static const struct iio_info itg3200_info = {
.read_raw = &itg3200_read_raw,
.write_raw = &itg3200_write_raw,
- .driver_module = THIS_MODULE,
};
static const unsigned long itg3200_available_scan_masks[] = { 0xffffffff, 0x0 };
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index e0d241a9aa30..77fac81a3adc 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -742,7 +742,6 @@ static const struct attribute_group mpu3050_attribute_group = {
};
static const struct iio_info mpu3050_info = {
- .driver_module = THIS_MODULE,
.read_raw = mpu3050_read_raw,
.write_raw = mpu3050_write_raw,
.attrs = &mpu3050_attribute_group,
@@ -1032,7 +1031,6 @@ static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
}
static const struct iio_trigger_ops mpu3050_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = mpu3050_drdy_trigger_set_state,
};
diff --git a/drivers/iio/gyro/mpu3050.h b/drivers/iio/gyro/mpu3050.h
index bef87a714dc5..835b0249c376 100644
--- a/drivers/iio/gyro/mpu3050.h
+++ b/drivers/iio/gyro/mpu3050.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/iio/iio.h>
#include <linux/mutex.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index e366422e8512..b31064ba37b9 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -111,14 +111,23 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int2 = 0x08,
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x08,
+ },
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -181,14 +190,23 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int2 = 0x08,
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x08,
+ },
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -246,14 +264,23 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = 0x80,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int2 = 0x08,
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x08,
+ },
/*
* The sensor has IHL (active low) and open
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -326,7 +353,6 @@ static const struct attribute_group st_gyro_attribute_group = {
};
static const struct iio_info gyro_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_gyro_attribute_group,
.read_raw = &st_gyro_read_raw,
.write_raw = &st_gyro_write_raw,
@@ -335,7 +361,6 @@ static const struct iio_info gyro_info = {
#ifdef CONFIG_IIO_TRIGGER
static const struct iio_trigger_ops st_gyro_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
.validate_device = st_sensors_validate_device,
};
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 6bb23a49e81e..a739fff01c6b 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -309,7 +309,6 @@ static const struct iio_info afe4403_iio_info = {
.attrs = &afe440x_attribute_group,
.read_raw = afe4403_read_raw,
.write_raw = afe4403_write_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t afe4403_trigger_handler(int irq, void *private)
@@ -354,7 +353,6 @@ err:
}
static const struct iio_trigger_ops afe4403_trigger_ops = {
- .owner = THIS_MODULE,
};
#define AFE4403_TIMING_PAIRS \
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 964f5231a831..11910922e655 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -328,7 +328,6 @@ static const struct iio_info afe4404_iio_info = {
.attrs = &afe440x_attribute_group,
.read_raw = afe4404_read_raw,
.write_raw = afe4404_write_raw,
- .driver_module = THIS_MODULE,
};
static irqreturn_t afe4404_trigger_handler(int irq, void *private)
@@ -355,7 +354,6 @@ err:
}
static const struct iio_trigger_ops afe4404_trigger_ops = {
- .owner = THIS_MODULE,
};
/* Default timings from data-sheet */
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 849d71747f9f..91aef5df24a1 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -420,7 +420,6 @@ static int max30100_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info max30100_info = {
- .driver_module = THIS_MODULE,
.read_raw = max30100_read_raw,
};
diff --git a/drivers/iio/health/max30102.c b/drivers/iio/health/max30102.c
index 839b875c29b9..203ffb9cad6a 100644
--- a/drivers/iio/health/max30102.c
+++ b/drivers/iio/health/max30102.c
@@ -381,7 +381,6 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info max30102_info = {
- .driver_module = THIS_MODULE,
.read_raw = max30102_read_raw,
};
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index be0dedeb8f3c..ae4204995017 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for IIO humidity sensor drivers
#
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index ff96b6d0fdae..7d8669dc6547 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -215,7 +215,6 @@ static int am2315_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info am2315_info = {
- .driver_module = THIS_MODULE,
.read_raw = am2315_read_raw,
};
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 2a22ad920333..df6bab40d6fa 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -284,7 +284,6 @@ err:
}
static const struct iio_info dht11_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = dht11_read_raw,
};
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 7851bd90ef64..d8438310b6d4 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -357,7 +357,6 @@ static const struct iio_info hdc100x_info = {
.read_raw = hdc100x_read_raw,
.write_raw = hdc100x_write_raw,
.attrs = &hdc100x_attribute_group,
- .driver_module = THIS_MODULE,
};
static int hdc100x_probe(struct i2c_client *client,
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index 6e09c1acfe51..beab6d6fd6e1 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -125,7 +125,6 @@ static int humidity_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info humidity_info = {
- .driver_module = THIS_MODULE,
.read_raw = &humidity_read_raw,
.write_raw = &humidity_write_raw,
};
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 9690dfe9a844..e971ea425268 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -47,7 +47,6 @@ static int hts221_trig_set_state(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops hts221_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = hts221_trig_set_state,
};
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index 32524a8dc66f..daef177219b6 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -573,7 +573,6 @@ static const struct attribute_group hts221_attribute_group = {
};
static const struct iio_info hts221_info = {
- .driver_module = THIS_MODULE,
.attrs = &hts221_attribute_group,
.read_raw = hts221_read_raw,
.write_raw = hts221_write_raw,
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index 2c4b9be85a05..f5a2701ba6dd 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -175,7 +175,6 @@ static const struct iio_info htu21_info = {
.read_raw = htu21_read_raw,
.write_raw = htu21_write_raw,
.attrs = &htu21_attribute_group,
- .driver_module = THIS_MODULE,
};
static int htu21_probe(struct i2c_client *client,
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index 6297766e93d0..1fd19f035a5d 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -124,7 +124,6 @@ static const struct iio_chan_spec si7005_channels[] = {
static const struct iio_info si7005_info = {
.read_raw = si7005_read_raw,
- .driver_module = THIS_MODULE,
};
static int si7005_probe(struct i2c_client *client,
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 345a7656c5ef..1b2ec8df1a72 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -108,7 +108,6 @@ static const struct iio_chan_spec si7020_channels[] = {
static const struct iio_info si7020_info = {
.read_raw = si7020_read_raw,
- .driver_module = THIS_MODULE,
};
static int si7020_probe(struct i2c_client *client,
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 8b563c3323b5..68629c68b19b 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Inertial Measurement Units
#
diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
index 90c24a23c679..e70a5339acb1 100644
--- a/drivers/iio/imu/adis16400_buffer.c
+++ b/drivers/iio/imu/adis16400_buffer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 9b697d35dbef..46a569005a13 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -833,7 +833,6 @@ static struct adis16400_chip_info adis16400_chips[] = {
};
static const struct iio_info adis16400_info = {
- .driver_module = THIS_MODULE,
.read_raw = &adis16400_read_raw,
.write_raw = &adis16400_write_raw,
.update_scan_mode = adis16400_update_scan_mode,
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 12898424d838..7a33d6bd60e0 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -720,7 +720,6 @@ static const struct iio_info adis16480_info = {
.read_raw = &adis16480_read_raw,
.write_raw = &adis16480_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int adis16480_stop_device(struct iio_dev *indio_dev)
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index f53e9a803a0e..0dd5a381be64 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -25,7 +25,6 @@ static int adis_data_rdy_trigger_set_state(struct iio_trigger *trig,
}
static const struct iio_trigger_ops adis_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &adis_data_rdy_trigger_set_state,
};
diff --git a/drivers/iio/imu/bmi160/bmi160.h b/drivers/iio/imu/bmi160/bmi160.h
index d2ae6ed70271..e7b11e74fd1d 100644
--- a/drivers/iio/imu/bmi160/bmi160.h
+++ b/drivers/iio/imu/bmi160/bmi160.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BMI160_H_
#define BMI160_H_
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index cfd225ed1c8d..c85659ca9507 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -482,7 +482,6 @@ static const struct attribute_group bmi160_attrs_group = {
};
static const struct iio_info bmi160_info = {
- .driver_module = THIS_MODULE,
.read_raw = bmi160_read_raw,
.write_raw = bmi160_write_raw,
.attrs = &bmi160_attrs_group,
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
index 734af5e6cef9..70ffe0d13d8c 100644
--- a/drivers/iio/imu/inv_mpu6050/Makefile
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Invensense MPU6050 device.
#
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 44830bce13df..7d64be353403 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -542,7 +542,9 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
break;
default:
result = -EINVAL;
+ break;
}
+ break;
default:
result = -EINVAL;
break;
@@ -570,10 +572,12 @@ error_write_raw_unlock:
*/
static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
{
- const int hz[] = {188, 98, 42, 20, 10, 5};
- const int d[] = {INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ,
- INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ,
- INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ};
+ static const int hz[] = {188, 98, 42, 20, 10, 5};
+ static const int d[] = {
+ INV_MPU6050_FILTER_188HZ, INV_MPU6050_FILTER_98HZ,
+ INV_MPU6050_FILTER_42HZ, INV_MPU6050_FILTER_20HZ,
+ INV_MPU6050_FILTER_10HZ, INV_MPU6050_FILTER_5HZ
+ };
int i, h, result;
u8 data;
@@ -795,7 +799,6 @@ static const struct attribute_group inv_attribute_group = {
};
static const struct iio_info mpu_info = {
- .driver_module = THIS_MODULE,
.read_raw = &inv_mpu6050_read_raw,
.write_raw = &inv_mpu6050_write_raw,
.write_raw_get_fmt = &inv_write_raw_get_fmt,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 540070f0a230..f963f9fc98c0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -114,7 +114,6 @@ static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
}
static const struct iio_trigger_ops inv_mpu_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &inv_mpu_data_rdy_trigger_set_state,
};
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 2e7dd5754a56..44b3f5397343 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1003,7 +1003,6 @@ static int kmx61_mag_validate_trigger(struct iio_dev *indio_dev,
}
static const struct iio_info kmx61_acc_info = {
- .driver_module = THIS_MODULE,
.read_raw = kmx61_read_raw,
.write_raw = kmx61_write_raw,
.attrs = &kmx61_acc_attribute_group,
@@ -1015,7 +1014,6 @@ static const struct iio_info kmx61_acc_info = {
};
static const struct iio_info kmx61_mag_info = {
- .driver_module = THIS_MODULE,
.read_raw = kmx61_read_raw,
.write_raw = kmx61_write_raw,
.attrs = &kmx61_mag_attribute_group,
@@ -1087,7 +1085,6 @@ static int kmx61_trig_try_reenable(struct iio_trigger *trig)
static const struct iio_trigger_ops kmx61_trigger_ops = {
.set_trigger_state = kmx61_data_rdy_trigger_set_state,
.try_reenable = kmx61_trig_try_reenable,
- .owner = THIS_MODULE,
};
static irqreturn_t kmx61_event_handler(int irq, void *private)
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 46352c7bff43..4fdb7fcc3ea8 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -29,8 +29,6 @@ enum st_lsm6dsx_hw_id {
#define ST_LSM6DSX_CHAN_SIZE 2
#define ST_LSM6DSX_SAMPLE_SIZE 6
-#define ST_LSM6DSX_SAMPLE_DEPTH (ST_LSM6DSX_SAMPLE_SIZE / \
- ST_LSM6DSX_CHAN_SIZE)
#if defined(CONFIG_SPI_MASTER)
#define ST_LSM6DSX_RX_MAX_LENGTH 256
@@ -52,10 +50,38 @@ struct st_lsm6dsx_reg {
u8 mask;
};
+/**
+ * struct st_lsm6dsx_fifo_ops - ST IMU FIFO settings
+ * @fifo_th: FIFO threshold register info (addr + mask).
+ * @fifo_diff: FIFO diff status register info (addr + mask).
+ * @th_wl: FIFO threshold word length.
+ */
+struct st_lsm6dsx_fifo_ops {
+ struct {
+ u8 addr;
+ u16 mask;
+ } fifo_th;
+ struct {
+ u8 addr;
+ u16 mask;
+ } fifo_diff;
+ u8 th_wl;
+};
+
+/**
+ * struct st_lsm6dsx_settings - ST IMU sensor settings
+ * @wai: Sensor WhoAmI default value.
+ * @max_fifo_size: Sensor max fifo length in FIFO words.
+ * @id: List of hw id supported by the driver configuration.
+ * @decimator: List of decimator register info (addr + mask).
+ * @fifo_ops: Sensor hw FIFO parameters.
+ */
struct st_lsm6dsx_settings {
u8 wai;
u16 max_fifo_size;
enum st_lsm6dsx_hw_id id[ST_LSM6DSX_MAX_ID];
+ struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
+ struct st_lsm6dsx_fifo_ops fifo_ops;
};
enum st_lsm6dsx_sensor_id {
@@ -79,7 +105,6 @@ enum st_lsm6dsx_fifo_mode {
* @watermark: Sensor watermark level.
* @sip: Number of samples in a given pattern.
* @decimator: FIFO decimation factor.
- * @decimator_mask: Sensor mask for decimation register.
* @delta_ts: Delta time between two consecutive interrupts.
* @ts: Latest timestamp from the interrupt handler.
*/
@@ -94,7 +119,6 @@ struct st_lsm6dsx_sensor {
u16 watermark;
u8 sip;
u8 decimator;
- u8 decimator_mask;
s64 delta_ts;
s64 ts;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index e2737dc71b67..755c472e8a05 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -35,10 +35,6 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_FIFO_THL_ADDR 0x06
-#define ST_LSM6DSX_REG_FIFO_THH_ADDR 0x07
-#define ST_LSM6DSX_FIFO_TH_MASK GENMASK(11, 0)
-#define ST_LSM6DSX_REG_FIFO_DEC_GXL_ADDR 0x08
#define ST_LSM6DSX_REG_HLACTIVE_ADDR 0x12
#define ST_LSM6DSX_REG_HLACTIVE_MASK BIT(5)
#define ST_LSM6DSX_REG_PP_OD_ADDR 0x12
@@ -46,8 +42,6 @@
#define ST_LSM6DSX_REG_FIFO_MODE_ADDR 0x0a
#define ST_LSM6DSX_FIFO_MODE_MASK GENMASK(2, 0)
#define ST_LSM6DSX_FIFO_ODR_MASK GENMASK(6, 3)
-#define ST_LSM6DSX_REG_FIFO_DIFFL_ADDR 0x3a
-#define ST_LSM6DSX_FIFO_DIFF_MASK GENMASK(11, 0)
#define ST_LSM6DSX_FIFO_EMPTY_MASK BIT(12)
#define ST_LSM6DSX_REG_FIFO_OUTL_ADDR 0x3e
@@ -110,8 +104,9 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
st_lsm6dsx_get_max_min_odr(hw, &max_odr, &min_odr);
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
- sensor = iio_priv(hw->iio_devs[i]);
+ const struct st_lsm6dsx_reg *dec_reg;
+ sensor = iio_priv(hw->iio_devs[i]);
/* update fifo decimators and sample in pattern */
if (hw->enable_mask & BIT(sensor->id)) {
sensor->sip = sensor->odr / min_odr;
@@ -123,12 +118,13 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
data = 0;
}
- err = st_lsm6dsx_write_with_mask(hw,
- ST_LSM6DSX_REG_FIFO_DEC_GXL_ADDR,
- sensor->decimator_mask, data);
- if (err < 0)
- return err;
-
+ dec_reg = &hw->settings->decimator[sensor->id];
+ if (dec_reg->addr) {
+ err = st_lsm6dsx_write_with_mask(hw, dec_reg->addr,
+ dec_reg->mask, data);
+ if (err < 0)
+ return err;
+ }
sip += sensor->sip;
}
hw->sip = sip;
@@ -139,23 +135,10 @@ static int st_lsm6dsx_update_decimators(struct st_lsm6dsx_hw *hw)
int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
enum st_lsm6dsx_fifo_mode fifo_mode)
{
- u8 data;
int err;
- switch (fifo_mode) {
- case ST_LSM6DSX_FIFO_BYPASS:
- data = fifo_mode;
- break;
- case ST_LSM6DSX_FIFO_CONT:
- data = (ST_LSM6DSX_MAX_FIFO_ODR_VAL <<
- __ffs(ST_LSM6DSX_FIFO_ODR_MASK)) | fifo_mode;
- break;
- default:
- return -EINVAL;
- }
-
- err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
- sizeof(data), &data);
+ err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+ ST_LSM6DSX_FIFO_MODE_MASK, fifo_mode);
if (err < 0)
return err;
@@ -164,9 +147,20 @@ int st_lsm6dsx_set_fifo_mode(struct st_lsm6dsx_hw *hw,
return 0;
}
+static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
+ bool enable)
+{
+ struct st_lsm6dsx_hw *hw = sensor->hw;
+ u8 data;
+
+ data = hw->enable_mask ? ST_LSM6DSX_MAX_FIFO_ODR_VAL : 0;
+ return st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_FIFO_MODE_ADDR,
+ ST_LSM6DSX_FIFO_ODR_MASK, data);
+}
+
int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
{
- u16 fifo_watermark = ~0, cur_watermark, sip = 0;
+ u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
struct st_lsm6dsx_hw *hw = sensor->hw;
struct st_lsm6dsx_sensor *cur_sensor;
__le16 wdata;
@@ -191,20 +185,21 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
fifo_watermark = max_t(u16, fifo_watermark, sip);
fifo_watermark = (fifo_watermark / sip) * sip;
- fifo_watermark = fifo_watermark * ST_LSM6DSX_SAMPLE_DEPTH;
+ fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
mutex_lock(&hw->lock);
- err = hw->tf->read(hw->dev, ST_LSM6DSX_REG_FIFO_THH_ADDR,
+ err = hw->tf->read(hw->dev, hw->settings->fifo_ops.fifo_th.addr + 1,
sizeof(data), &data);
if (err < 0)
goto out;
- fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) |
- (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK);
+ fifo_th_mask = hw->settings->fifo_ops.fifo_th.mask;
+ fifo_watermark = ((data << 8) & ~fifo_th_mask) |
+ (fifo_watermark & fifo_th_mask);
wdata = cpu_to_le16(fifo_watermark);
- err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR,
+ err = hw->tf->write(hw->dev, hw->settings->fifo_ops.fifo_th.addr,
sizeof(wdata), (u8 *)&wdata);
out:
mutex_unlock(&hw->lock);
@@ -223,6 +218,7 @@ out:
static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
{
u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
+ u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
int err, acc_sip, gyro_sip, read_len, samples, offset;
struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor;
s64 acc_ts, acc_delta_ts, gyro_ts, gyro_delta_ts;
@@ -230,7 +226,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
u8 buff[pattern_len];
__le16 fifo_status;
- err = hw->tf->read(hw->dev, ST_LSM6DSX_REG_FIFO_DIFFL_ADDR,
+ err = hw->tf->read(hw->dev, hw->settings->fifo_ops.fifo_diff.addr,
sizeof(fifo_status), (u8 *)&fifo_status);
if (err < 0)
return err;
@@ -238,7 +234,7 @@ static int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK))
return 0;
- fifo_len = (le16_to_cpu(fifo_status) & ST_LSM6DSX_FIFO_DIFF_MASK) *
+ fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) *
ST_LSM6DSX_CHAN_SIZE;
samples = fifo_len / ST_LSM6DSX_SAMPLE_SIZE;
fifo_len = (fifo_len / pattern_len) * pattern_len;
@@ -345,6 +341,10 @@ static int st_lsm6dsx_update_fifo(struct iio_dev *iio_dev, bool enable)
return err;
}
+ err = st_lsm6dsx_set_fifo_odr(sensor, enable);
+ if (err < 0)
+ return err;
+
err = st_lsm6dsx_update_decimators(hw);
if (err < 0)
return err;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index b485540da89e..239c735242be 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -42,8 +42,6 @@
#include "st_lsm6dsx.h"
-#define ST_LSM6DSX_REG_ACC_DEC_MASK GENMASK(2, 0)
-#define ST_LSM6DSX_REG_GYRO_DEC_MASK GENMASK(5, 3)
#define ST_LSM6DSX_REG_INT1_ADDR 0x0d
#define ST_LSM6DSX_REG_INT2_ADDR 0x0e
#define ST_LSM6DSX_REG_FIFO_FTH_IRQ_MASK BIT(3)
@@ -54,10 +52,6 @@
#define ST_LSM6DSX_REG_BDU_MASK BIT(6)
#define ST_LSM6DSX_REG_INT2_ON_INT1_ADDR 0x13
#define ST_LSM6DSX_REG_INT2_ON_INT1_MASK BIT(5)
-#define ST_LSM6DSX_REG_ROUNDING_ADDR 0x16
-#define ST_LSM6DSX_REG_ROUNDING_MASK BIT(2)
-#define ST_LSM6DSX_REG_LIR_ADDR 0x58
-#define ST_LSM6DSX_REG_LIR_MASK BIT(0)
#define ST_LSM6DSX_REG_ACC_ODR_ADDR 0x10
#define ST_LSM6DSX_REG_ACC_ODR_MASK GENMASK(7, 4)
@@ -160,25 +154,88 @@ static const struct st_lsm6dsx_fs_table_entry st_lsm6dsx_fs_table[] = {
static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
{
.wai = 0x69,
- .max_fifo_size = 8192,
+ .max_fifo_size = 1365,
.id = {
[0] = ST_LSM6DS3_ID,
},
+ .decimator = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .addr = 0x08,
+ .mask = GENMASK(2, 0),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .addr = 0x08,
+ .mask = GENMASK(5, 3),
+ },
+ },
+ .fifo_ops = {
+ .fifo_th = {
+ .addr = 0x06,
+ .mask = GENMASK(11, 0),
+ },
+ .fifo_diff = {
+ .addr = 0x3a,
+ .mask = GENMASK(11, 0),
+ },
+ .th_wl = 3, /* 1LSB = 2B */
+ },
},
{
.wai = 0x69,
- .max_fifo_size = 4096,
+ .max_fifo_size = 682,
.id = {
[0] = ST_LSM6DS3H_ID,
},
+ .decimator = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .addr = 0x08,
+ .mask = GENMASK(2, 0),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .addr = 0x08,
+ .mask = GENMASK(5, 3),
+ },
+ },
+ .fifo_ops = {
+ .fifo_th = {
+ .addr = 0x06,
+ .mask = GENMASK(11, 0),
+ },
+ .fifo_diff = {
+ .addr = 0x3a,
+ .mask = GENMASK(11, 0),
+ },
+ .th_wl = 3, /* 1LSB = 2B */
+ },
},
{
.wai = 0x6a,
- .max_fifo_size = 4096,
+ .max_fifo_size = 682,
.id = {
[0] = ST_LSM6DSL_ID,
[1] = ST_LSM6DSM_ID,
},
+ .decimator = {
+ [ST_LSM6DSX_ID_ACC] = {
+ .addr = 0x08,
+ .mask = GENMASK(2, 0),
+ },
+ [ST_LSM6DSX_ID_GYRO] = {
+ .addr = 0x08,
+ .mask = GENMASK(5, 3),
+ },
+ },
+ .fifo_ops = {
+ .fifo_th = {
+ .addr = 0x06,
+ .mask = GENMASK(11, 0),
+ },
+ .fifo_diff = {
+ .addr = 0x3a,
+ .mask = GENMASK(11, 0),
+ },
+ .th_wl = 3, /* 1LSB = 2B */
+ },
},
};
@@ -322,7 +379,6 @@ static int st_lsm6dsx_check_odr(struct st_lsm6dsx_sensor *sensor, u16 odr,
return -EINVAL;
*val = st_lsm6dsx_odr_table[sensor->id].odr_avl[i].val;
- sensor->odr = odr;
return 0;
}
@@ -449,6 +505,8 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
u8 data;
err = st_lsm6dsx_check_odr(sensor, val, &data);
+ if (!err)
+ sensor->odr = val;
break;
}
default:
@@ -465,10 +523,9 @@ static int st_lsm6dsx_set_watermark(struct iio_dev *iio_dev, unsigned int val)
{
struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
struct st_lsm6dsx_hw *hw = sensor->hw;
- int err, max_fifo_len;
+ int err;
- max_fifo_len = hw->settings->max_fifo_size / ST_LSM6DSX_SAMPLE_SIZE;
- if (val < 1 || val > max_fifo_len)
+ if (val < 1 || val > hw->settings->max_fifo_size)
return -EINVAL;
err = st_lsm6dsx_update_watermark(sensor, val);
@@ -530,7 +587,6 @@ static const struct attribute_group st_lsm6dsx_acc_attribute_group = {
};
static const struct iio_info st_lsm6dsx_acc_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_lsm6dsx_acc_attribute_group,
.read_raw = st_lsm6dsx_read_raw,
.write_raw = st_lsm6dsx_write_raw,
@@ -548,7 +604,6 @@ static const struct attribute_group st_lsm6dsx_gyro_attribute_group = {
};
static const struct iio_info st_lsm6dsx_gyro_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_lsm6dsx_gyro_attribute_group,
.read_raw = st_lsm6dsx_read_raw,
.write_raw = st_lsm6dsx_write_raw,
@@ -608,23 +663,12 @@ static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw)
msleep(200);
- /* latch interrupts */
- err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_LIR_ADDR,
- ST_LSM6DSX_REG_LIR_MASK, 1);
- if (err < 0)
- return err;
-
/* enable Block Data Update */
err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_BDU_ADDR,
ST_LSM6DSX_REG_BDU_MASK, 1);
if (err < 0)
return err;
- err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_ROUNDING_ADDR,
- ST_LSM6DSX_REG_ROUNDING_MASK, 1);
- if (err < 0)
- return err;
-
/* enable FIFO watermak interrupt */
err = st_lsm6dsx_get_drdy_reg(hw, &drdy_int_reg);
if (err < 0)
@@ -662,7 +706,6 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
iio_dev->num_channels = ARRAY_SIZE(st_lsm6dsx_acc_channels);
iio_dev->info = &st_lsm6dsx_acc_info;
- sensor->decimator_mask = ST_LSM6DSX_REG_ACC_DEC_MASK;
scnprintf(sensor->name, sizeof(sensor->name), "%s_accel",
name);
break;
@@ -671,7 +714,6 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
iio_dev->num_channels = ARRAY_SIZE(st_lsm6dsx_gyro_channels);
iio_dev->info = &st_lsm6dsx_gyro_info;
- sensor->decimator_mask = ST_LSM6DSX_REG_GYRO_DEC_MASK;
scnprintf(sensor->name, sizeof(sensor->name), "%s_gyro",
name);
break;
diff --git a/drivers/iio/industrialio-configfs.c b/drivers/iio/industrialio-configfs.c
index 45ce2bc47180..5a0aae119369 100644
--- a/drivers/iio/industrialio-configfs.c
+++ b/drivers/iio/industrialio-configfs.c
@@ -17,7 +17,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/configfs.h>
-static struct config_item_type iio_root_group_type = {
+static const struct config_item_type iio_root_group_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index a47428b4d31b..9c4cfd19b739 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1662,14 +1662,11 @@ static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
static const struct iio_buffer_setup_ops noop_ring_setup_ops;
-/**
- * iio_device_register() - register a device with the IIO subsystem
- * @indio_dev: Device structure filled by the device driver
- **/
-int iio_device_register(struct iio_dev *indio_dev)
+int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
{
int ret;
+ indio_dev->driver_module = this_mod;
/* If the calling driver did not initialize of_node, do it here */
if (!indio_dev->dev.of_node && indio_dev->dev.parent)
indio_dev->dev.of_node = indio_dev->dev.parent->of_node;
@@ -1715,7 +1712,8 @@ int iio_device_register(struct iio_dev *indio_dev)
indio_dev->setup_ops = &noop_ring_setup_ops;
cdev_init(&indio_dev->chrdev, &iio_buffer_fileops);
- indio_dev->chrdev.owner = indio_dev->info->driver_module;
+
+ indio_dev->chrdev.owner = this_mod;
ret = cdev_device_add(&indio_dev->chrdev, &indio_dev->dev);
if (ret < 0)
@@ -1733,7 +1731,7 @@ error_unreg_debugfs:
iio_device_unregister_debugfs(indio_dev);
return ret;
}
-EXPORT_SYMBOL(iio_device_register);
+EXPORT_SYMBOL(__iio_device_register);
/**
* iio_device_unregister() - unregister a device from the IIO subsystem
@@ -1765,23 +1763,8 @@ static void devm_iio_device_unreg(struct device *dev, void *res)
iio_device_unregister(*(struct iio_dev **)res);
}
-/**
- * devm_iio_device_register - Resource-managed iio_device_register()
- * @dev: Device to allocate iio_dev for
- * @indio_dev: Device structure filled by the device driver
- *
- * Managed iio_device_register. The IIO device registered with this
- * function is automatically unregistered on driver detach. This function
- * calls iio_device_register() internally. Refer to that function for more
- * information.
- *
- * If an iio_dev registered with this function needs to be unregistered
- * separately, devm_iio_device_unregister() must be used.
- *
- * RETURNS:
- * 0 on success, negative error number on failure.
- */
-int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
+int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
+ struct module *this_mod)
{
struct iio_dev **ptr;
int ret;
@@ -1791,7 +1774,7 @@ int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
return -ENOMEM;
*ptr = indio_dev;
- ret = iio_device_register(indio_dev);
+ ret = __iio_device_register(indio_dev, this_mod);
if (!ret)
devres_add(dev, ptr);
else
@@ -1799,7 +1782,7 @@ int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev)
return ret;
}
-EXPORT_SYMBOL_GPL(devm_iio_device_register);
+EXPORT_SYMBOL_GPL(__devm_iio_device_register);
/**
* devm_iio_device_unregister - Resource-managed iio_device_unregister()
diff --git a/drivers/iio/industrialio-sw-device.c b/drivers/iio/industrialio-sw-device.c
index 81b49cfca452..90df97c542f6 100644
--- a/drivers/iio/industrialio-sw-device.c
+++ b/drivers/iio/industrialio-sw-device.c
@@ -19,9 +19,9 @@
#include <linux/configfs.h>
static struct config_group *iio_devices_group;
-static struct config_item_type iio_device_type_group_type;
+static const struct config_item_type iio_device_type_group_type;
-static struct config_item_type iio_devices_group_type = {
+static const struct config_item_type iio_devices_group_type = {
.ct_owner = THIS_MODULE,
};
@@ -156,7 +156,7 @@ static struct configfs_group_operations device_ops = {
.drop_item = &device_drop_group,
};
-static struct config_item_type iio_device_type_group_type = {
+static const struct config_item_type iio_device_type_group_type = {
.ct_group_ops = &device_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/iio/industrialio-sw-trigger.c b/drivers/iio/industrialio-sw-trigger.c
index 8d24fb159cc9..bc6b7fb43e3a 100644
--- a/drivers/iio/industrialio-sw-trigger.c
+++ b/drivers/iio/industrialio-sw-trigger.c
@@ -19,9 +19,9 @@
#include <linux/configfs.h>
static struct config_group *iio_triggers_group;
-static struct config_item_type iio_trigger_type_group_type;
+static const struct config_item_type iio_trigger_type_group_type;
-static struct config_item_type iio_triggers_group_type = {
+static const struct config_item_type iio_triggers_group_type = {
.ct_owner = THIS_MODULE,
};
@@ -156,7 +156,7 @@ static struct configfs_group_operations trigger_ops = {
.drop_item = &trigger_drop_group,
};
-static struct config_item_type iio_trigger_type_group_type = {
+static const struct config_item_type iio_trigger_type_group_type = {
.ct_group_ops = &trigger_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 4061fed93f1f..ce66699c7fcc 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -66,13 +66,12 @@ ATTRIBUTE_GROUPS(iio_trig_dev);
static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
-int iio_trigger_register(struct iio_trigger *trig_info)
+int __iio_trigger_register(struct iio_trigger *trig_info,
+ struct module *this_mod)
{
int ret;
- /* trig_info->ops is required for the module member */
- if (!trig_info->ops)
- return -EINVAL;
+ trig_info->owner = this_mod;
trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
if (trig_info->id < 0)
@@ -105,7 +104,7 @@ error_unregister_id:
ida_simple_remove(&iio_trigger_ida, trig_info->id);
return ret;
}
-EXPORT_SYMBOL(iio_trigger_register);
+EXPORT_SYMBOL(__iio_trigger_register);
void iio_trigger_unregister(struct iio_trigger *trig_info)
{
@@ -206,7 +205,8 @@ EXPORT_SYMBOL(iio_trigger_poll_chained);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
- if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable)
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+ trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
/* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig);
@@ -250,7 +250,7 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
= bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
/* Prevent the module from being removed whilst attached to a trigger */
- __module_get(pf->indio_dev->info->driver_module);
+ __module_get(pf->indio_dev->driver_module);
/* Get irq number */
pf->irq = iio_trigger_get_irq(trig);
@@ -265,7 +265,7 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
goto out_put_irq;
/* Enable trigger in driver */
- if (trig->ops->set_trigger_state && notinuse) {
+ if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
if (ret < 0)
goto out_free_irq;
@@ -286,7 +286,7 @@ out_free_irq:
out_put_irq:
iio_trigger_put_irq(trig, pf->irq);
out_put_module:
- module_put(pf->indio_dev->info->driver_module);
+ module_put(pf->indio_dev->driver_module);
return ret;
}
@@ -298,7 +298,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
= (bitmap_weight(trig->pool,
CONFIG_IIO_CONSUMERS_PER_TRIGGER)
== 1);
- if (trig->ops->set_trigger_state && no_other_users) {
+ if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
ret = trig->ops->set_trigger_state(trig, false);
if (ret)
return ret;
@@ -307,7 +307,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
trig->attached_own_device = false;
iio_trigger_put_irq(trig, pf->irq);
free_irq(pf->irq, pf);
- module_put(pf->indio_dev->info->driver_module);
+ module_put(pf->indio_dev->driver_module);
return ret;
}
@@ -428,7 +428,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
goto out_trigger_put;
}
- if (trig && trig->ops->validate_device) {
+ if (trig && trig->ops && trig->ops->validate_device) {
ret = trig->ops->validate_device(trig, indio_dev);
if (ret)
goto out_trigger_put;
@@ -663,9 +663,10 @@ static void devm_iio_trigger_unreg(struct device *dev, void *res)
}
/**
- * devm_iio_trigger_register - Resource-managed iio_trigger_register()
+ * __devm_iio_trigger_register - Resource-managed iio_trigger_register()
* @dev: device this trigger was allocated for
* @trig_info: trigger to register
+ * @this_mod: module registering the trigger
*
* Managed iio_trigger_register(). The IIO trigger registered with this
* function is automatically unregistered on driver detach. This function
@@ -678,7 +679,9 @@ static void devm_iio_trigger_unreg(struct device *dev, void *res)
* RETURNS:
* 0 on success, negative error number on failure.
*/
-int devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info)
+int __devm_iio_trigger_register(struct device *dev,
+ struct iio_trigger *trig_info,
+ struct module *this_mod)
{
struct iio_trigger **ptr;
int ret;
@@ -688,7 +691,7 @@ int devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info)
return -ENOMEM;
*ptr = trig_info;
- ret = iio_trigger_register(trig_info);
+ ret = __iio_trigger_register(trig_info, this_mod);
if (!ret)
devres_add(dev, ptr);
else
@@ -696,7 +699,7 @@ int devm_iio_trigger_register(struct device *dev, struct iio_trigger *trig_info)
return ret;
}
-EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
+EXPORT_SYMBOL_GPL(__devm_iio_trigger_register);
/**
* devm_iio_trigger_unregister - Resource-managed iio_trigger_unregister()
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index fa32fa459e2e..c263469b7ce9 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for IIO Light sensors
#
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index f0b47c501f4e..c35e2f8df339 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -171,7 +171,6 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info acpi_als_info = {
- .driver_module = THIS_MODULE,
.read_raw = acpi_als_read_raw,
};
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 0113fc843a81..e45bb6a277c2 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -245,7 +245,6 @@ static const struct iio_info adjd_s311_info = {
.read_raw = adjd_s311_read_raw,
.write_raw = adjd_s311_write_raw,
.update_scan_mode = adjd_s311_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static int adjd_s311_probe(struct i2c_client *client,
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
index 6aac6513fd41..66623facea9a 100644
--- a/drivers/iio/light/al3320a.c
+++ b/drivers/iio/light/al3320a.c
@@ -168,7 +168,6 @@ static int al3320a_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info al3320a_info = {
- .driver_module = THIS_MODULE,
.read_raw = al3320a_read_raw,
.write_raw = al3320a_write_raw,
.attrs = &al3320a_attribute_group,
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 05eacd1ee40f..5c15736fb93e 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -337,12 +337,10 @@ static int apds9300_write_interrupt_config(struct iio_dev *indio_dev,
}
static const struct iio_info apds9300_info_no_irq = {
- .driver_module = THIS_MODULE,
.read_raw = apds9300_read_raw,
};
static const struct iio_info apds9300_info = {
- .driver_module = THIS_MODULE,
.read_raw = apds9300_read_raw,
.read_event_value = apds9300_read_thresh,
.write_event_value = apds9300_write_thresh,
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 518a47e9377b..a8fa00e31c39 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -744,7 +744,6 @@ static int apds9960_write_event_config(struct iio_dev *indio_dev,
}
static const struct iio_info apds9960_info = {
- .driver_module = THIS_MODULE,
.attrs = &apds9960_attribute_group,
.read_raw = apds9960_read_raw,
.write_raw = apds9960_write_raw,
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 6c61187e630f..a814828e69f5 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -217,7 +217,6 @@ static const struct attribute_group bh1750_attribute_group = {
};
static const struct iio_info bh1750_info = {
- .driver_module = THIS_MODULE,
.attrs = &bh1750_attribute_group,
.read_raw = bh1750_read_raw,
.write_raw = bh1750_write_raw,
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index b54dcba05a82..036f3bbe323c 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -128,7 +128,6 @@ static int bh1780_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info bh1780_info = {
- .driver_module = THIS_MODULE,
.read_raw = bh1780_read_raw,
.debugfs_reg_access = bh1780_debugfs_reg_access,
};
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index d6fd0dace74f..aebf7dd071af 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -292,7 +292,6 @@ static const struct attribute_group cm32181_attribute_group = {
};
static const struct iio_info cm32181_info = {
- .driver_module = THIS_MODULE,
.read_raw = &cm32181_read_raw,
.write_raw = &cm32181_write_raw,
.attrs = &cm32181_attribute_group,
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index 263e97235ea0..c639cf276ee6 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -322,7 +322,6 @@ static const struct attribute_group cm3232_attribute_group = {
};
static const struct iio_info cm3232_info = {
- .driver_module = THIS_MODULE,
.read_raw = &cm3232_read_raw,
.write_raw = &cm3232_write_raw,
.attrs = &cm3232_attribute_group,
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index d823c112d54b..83b08b6dc60f 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -211,7 +211,6 @@ static int cm3323_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info cm3323_info = {
- .driver_module = THIS_MODULE,
.read_raw = cm3323_read_raw,
.write_raw = cm3323_write_raw,
.attrs = &cm3323_attribute_group,
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index 980624e9ffb5..e454bc6a33c6 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -126,7 +126,6 @@ static int cm3605_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info cm3605_info = {
- .driver_module = THIS_MODULE,
.read_raw = cm3605_read_raw,
};
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 9d66e89c57ef..1dd8ed0121b3 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -612,7 +612,6 @@ static const struct attribute_group cm36651_attribute_group = {
};
static const struct iio_info cm36651_info = {
- .driver_module = THIS_MODULE,
.read_raw = &cm36651_read_raw,
.write_raw = &cm36651_write_raw,
.read_event_value = &cm36651_read_prox_thresh,
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index 721722376fd0..b2a46b390d5c 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -175,7 +175,6 @@ static int cros_ec_light_prox_write(struct iio_dev *indio_dev,
static const struct iio_info cros_ec_light_prox_info = {
.read_raw = &cros_ec_light_prox_read,
.write_raw = &cros_ec_light_prox_write,
- .driver_module = THIS_MODULE,
};
static int cros_ec_light_prox_probe(struct platform_device *pdev)
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 6ada9149f142..44b13fbcd093 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1384,7 +1384,6 @@ static const struct iio_info gp2ap020a00f_info = {
.read_event_config = &gp2ap020a00f_read_event_config,
.write_event_value = &gp2ap020a00f_write_event_val,
.write_event_config = &gp2ap020a00f_write_event_config,
- .driver_module = THIS_MODULE,
};
static int gp2ap020a00f_buffer_postenable(struct iio_dev *indio_dev)
@@ -1481,7 +1480,6 @@ static const struct iio_buffer_setup_ops gp2ap020a00f_buffer_setup_ops = {
};
static const struct iio_trigger_ops gp2ap020a00f_trigger_ops = {
- .owner = THIS_MODULE,
};
static int gp2ap020a00f_probe(struct i2c_client *client,
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 059d964772c7..befd693a4a31 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -177,7 +177,6 @@ static int als_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info als_info = {
- .driver_module = THIS_MODULE,
.read_raw = &als_read_raw,
.write_raw = &als_write_raw,
};
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 73fced8a63b7..45107f7537b5 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -156,7 +156,6 @@ static int prox_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info prox_info = {
- .driver_module = THIS_MODULE,
.read_raw = &prox_read_raw,
.write_raw = &prox_write_raw,
};
diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
index 61f5924b472d..b45400f8fef4 100644
--- a/drivers/iio/light/isl29018.c
+++ b/drivers/iio/light/isl29018.c
@@ -624,14 +624,12 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
static const struct iio_info isl29018_info = {
.attrs = &isl29018_group,
- .driver_module = THIS_MODULE,
.read_raw = isl29018_read_raw,
.write_raw = isl29018_write_raw,
};
static const struct iio_info isl29023_info = {
.attrs = &isl29023_group,
- .driver_module = THIS_MODULE,
.read_raw = isl29018_read_raw,
.write_raw = isl29018_write_raw,
};
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 3d09c1fc4dad..f9912ab4f65c 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -536,7 +536,6 @@ static const struct iio_chan_spec isl29028_channels[] = {
static const struct iio_info isl29028_info = {
.attrs = &isl29108_group,
- .driver_module = THIS_MODULE,
.read_raw = isl29028_read_raw,
.write_raw = isl29028_write_raw,
};
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index 1d2c0c8a1d4f..ed38edcd5efe 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -214,7 +214,6 @@ static const struct iio_info isl29125_info = {
.read_raw = isl29125_read_raw,
.write_raw = isl29125_write_raw,
.attrs = &isl29125_attribute_group,
- .driver_module = THIS_MODULE,
};
static int isl29125_buffer_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index e8a8931b4f50..811505d925b3 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -271,7 +271,6 @@ static const struct iio_chan_spec jsa1212_channels[] = {
};
static const struct iio_info jsa1212_info = {
- .driver_module = THIS_MODULE,
.read_raw = &jsa1212_read_raw,
};
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index 0443fd2e8757..36208a3652e9 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -827,7 +827,6 @@ static int lm3533_als_disable(struct lm3533_als *als)
static const struct iio_info lm3533_als_info = {
.attrs = &lm3533_als_attribute_group,
.event_attrs = &lm3533_als_event_attribute_group,
- .driver_module = THIS_MODULE,
.read_raw = &lm3533_als_read_raw,
};
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 67838edd8b37..830a2d45aa4d 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1158,7 +1158,6 @@ static const struct iio_info ltr501_info_no_irq = {
.read_raw = ltr501_read_raw,
.write_raw = ltr501_write_raw,
.attrs = &ltr501_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ltr501_info = {
@@ -1169,14 +1168,12 @@ static const struct iio_info ltr501_info = {
.write_event_value = &ltr501_write_event,
.read_event_config = &ltr501_read_event_config,
.write_event_config = &ltr501_write_event_config,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ltr301_info_no_irq = {
.read_raw = ltr501_read_raw,
.write_raw = ltr501_write_raw,
.attrs = &ltr301_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ltr301_info = {
@@ -1187,7 +1184,6 @@ static const struct iio_info ltr301_info = {
.write_event_value = &ltr501_write_event,
.read_event_config = &ltr501_read_event_config,
.write_event_config = &ltr501_write_event_config,
- .driver_module = THIS_MODULE,
};
static struct ltr501_chip_info ltr501_chip_info_tbl[] = {
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index 81bd8e8da4a6..bcdb0eb9e537 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -402,7 +402,6 @@ static const struct attribute_group max44000_attribute_group = {
};
static const struct iio_info max44000_info = {
- .driver_module = THIS_MODULE,
.read_raw = max44000_read_raw,
.write_raw = max44000_write_raw,
.write_raw_get_fmt = max44000_write_raw_get_fmt,
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index b91ebc3483ce..54d88b60e303 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -585,7 +585,6 @@ err:
}
static const struct iio_info opt3001_info = {
- .driver_module = THIS_MODULE,
.attrs = &opt3001_attribute_group,
.read_raw = opt3001_read_raw,
.write_raw = opt3001_write_raw,
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index 76a9e12b46bc..30ea1a088dd9 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -306,7 +306,6 @@ static int pa12203001_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info pa12203001_info = {
- .driver_module = THIS_MODULE,
.read_raw = pa12203001_read_raw,
.write_raw = pa12203001_write_raw,
.attrs = &pa12203001_attr_group,
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index a6efa12613a2..ffe9ce798ea2 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -538,7 +538,6 @@ static int rpr0521_pxs_drdy_set_state(struct iio_trigger *trigger,
static const struct iio_trigger_ops rpr0521_trigger_ops = {
.set_trigger_state = rpr0521_pxs_drdy_set_state,
- .owner = THIS_MODULE,
};
@@ -830,7 +829,6 @@ static int rpr0521_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info rpr0521_info = {
- .driver_module = THIS_MODULE,
.read_raw = rpr0521_read_raw,
.write_raw = rpr0521_write_raw,
.attrs = &rpr0521_attribute_group,
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 096034c126a4..76f16f9c7616 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -989,14 +989,12 @@ static const struct attribute_group si114x_attribute_group = {
static const struct iio_info si1132_info = {
.read_raw = si1145_read_raw,
.write_raw = si1145_write_raw,
- .driver_module = THIS_MODULE,
.attrs = &si1132_attribute_group,
};
static const struct iio_info si114x_info = {
.read_raw = si1145_read_raw,
.write_raw = si1145_write_raw,
- .driver_module = THIS_MODULE,
.attrs = &si114x_attribute_group,
};
@@ -1237,7 +1235,6 @@ disable:
}
static const struct iio_trigger_ops si1145_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = si1145_trigger_set_state,
};
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 45cf8b0a4363..6e2a169da950 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -409,7 +409,6 @@ static int stk3310_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info stk3310_info = {
- .driver_module = THIS_MODULE,
.read_raw = stk3310_read_raw,
.write_raw = stk3310_write_raw,
.attrs = &stk3310_attribute_group,
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index a795afb7667b..205e5659ce6b 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -241,7 +241,6 @@ static const struct iio_info tcs3414_info = {
.read_raw = tcs3414_read_raw,
.write_raw = tcs3414_write_raw,
.attrs = &tcs3414_attribute_group,
- .driver_module = THIS_MODULE,
};
static int tcs3414_buffer_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 09e6ca5e332e..e7923b514d7a 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -13,7 +13,7 @@
*
* Datasheet: http://ams.com/eng/content/download/319364/1117183/file/TCS3472_Datasheet_EN_v2.pdf
*
- * TODO: interrupt support, thresholds, wait time
+ * TODO: wait time
*/
#include <linux/module.h>
@@ -23,6 +23,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
@@ -31,12 +32,15 @@
#define TCS3472_COMMAND BIT(7)
#define TCS3472_AUTO_INCR BIT(5)
+#define TCS3472_SPECIAL_FUNC (BIT(5) | BIT(6))
+
+#define TCS3472_INTR_CLEAR (TCS3472_COMMAND | TCS3472_SPECIAL_FUNC | 0x06)
#define TCS3472_ENABLE (TCS3472_COMMAND | 0x00)
#define TCS3472_ATIME (TCS3472_COMMAND | 0x01)
#define TCS3472_WTIME (TCS3472_COMMAND | 0x03)
-#define TCS3472_AILT (TCS3472_COMMAND | 0x04)
-#define TCS3472_AIHT (TCS3472_COMMAND | 0x06)
+#define TCS3472_AILT (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x04)
+#define TCS3472_AIHT (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x06)
#define TCS3472_PERS (TCS3472_COMMAND | 0x0c)
#define TCS3472_CONFIG (TCS3472_COMMAND | 0x0d)
#define TCS3472_CONTROL (TCS3472_COMMAND | 0x0f)
@@ -47,19 +51,42 @@
#define TCS3472_GDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x18)
#define TCS3472_BDATA (TCS3472_COMMAND | TCS3472_AUTO_INCR | 0x1a)
+#define TCS3472_STATUS_AINT BIT(4)
#define TCS3472_STATUS_AVALID BIT(0)
+#define TCS3472_ENABLE_AIEN BIT(4)
#define TCS3472_ENABLE_AEN BIT(1)
#define TCS3472_ENABLE_PON BIT(0)
#define TCS3472_CONTROL_AGAIN_MASK (BIT(0) | BIT(1))
struct tcs3472_data {
struct i2c_client *client;
+ struct mutex lock;
+ u16 low_thresh;
+ u16 high_thresh;
u8 enable;
u8 control;
u8 atime;
+ u8 apers;
u16 buffer[8]; /* 4 16-bit channels + 64-bit timestamp */
};
+static const struct iio_event_spec tcs3472_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+};
+
#define TCS3472_CHANNEL(_color, _si, _addr) { \
.type = IIO_INTENSITY, \
.modified = 1, \
@@ -75,6 +102,8 @@ struct tcs3472_data {
.storagebits = 16, \
.endianness = IIO_CPU, \
}, \
+ .event_spec = _si ? NULL : tcs3472_events, \
+ .num_event_specs = _si ? 0 : ARRAY_SIZE(tcs3472_events), \
}
static const int tcs3472_agains[] = { 1, 4, 16, 60 };
@@ -182,6 +211,166 @@ static int tcs3472_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+/*
+ * Translation from APERS field value to the number of consecutive out-of-range
+ * clear channel values before an interrupt is generated
+ */
+static const int tcs3472_intr_pers[] = {
+ 0, 1, 2, 3, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60
+};
+
+static int tcs3472_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int *val,
+ int *val2)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int period;
+
+ mutex_lock(&data->lock);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = (dir == IIO_EV_DIR_RISING) ?
+ data->high_thresh : data->low_thresh;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ period = (256 - data->atime) * 2400 *
+ tcs3472_intr_pers[data->apers];
+ *val = period / USEC_PER_SEC;
+ *val2 = period % USEC_PER_SEC;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int tcs3472_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int val,
+ int val2)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+ u8 command;
+ int period;
+ int i;
+
+ mutex_lock(&data->lock);
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ command = TCS3472_AIHT;
+ break;
+ case IIO_EV_DIR_FALLING:
+ command = TCS3472_AILT;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+ ret = i2c_smbus_write_word_data(data->client, command, val);
+ if (ret)
+ goto error;
+
+ if (dir == IIO_EV_DIR_RISING)
+ data->high_thresh = val;
+ else
+ data->low_thresh = val;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ period = val * USEC_PER_SEC + val2;
+ for (i = 1; i < ARRAY_SIZE(tcs3472_intr_pers) - 1; i++) {
+ if (period <= (256 - data->atime) * 2400 *
+ tcs3472_intr_pers[i])
+ break;
+ }
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_PERS, i);
+ if (ret)
+ goto error;
+
+ data->apers = i;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+error:
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int tcs3472_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = !!(data->enable & TCS3472_ENABLE_AIEN);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int tcs3472_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret = 0;
+ u8 enable_old;
+
+ mutex_lock(&data->lock);
+
+ enable_old = data->enable;
+
+ if (state)
+ data->enable |= TCS3472_ENABLE_AIEN;
+ else
+ data->enable &= ~TCS3472_ENABLE_AIEN;
+
+ if (enable_old != data->enable) {
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable);
+ if (ret)
+ data->enable = enable_old;
+ }
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static irqreturn_t tcs3472_event_handler(int irq, void *priv)
+{
+ struct iio_dev *indio_dev = priv;
+ struct tcs3472_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, TCS3472_STATUS);
+ if (ret >= 0 && (ret & TCS3472_STATUS_AINT)) {
+ iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+
+ i2c_smbus_read_byte_data(data->client, TCS3472_INTR_CLEAR);
+ }
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -245,8 +434,11 @@ static const struct attribute_group tcs3472_attribute_group = {
static const struct iio_info tcs3472_info = {
.read_raw = tcs3472_read_raw,
.write_raw = tcs3472_write_raw,
+ .read_event_value = tcs3472_read_event,
+ .write_event_value = tcs3472_write_event,
+ .read_event_config = tcs3472_read_event_config,
+ .write_event_config = tcs3472_write_event_config,
.attrs = &tcs3472_attribute_group,
- .driver_module = THIS_MODULE,
};
static int tcs3472_probe(struct i2c_client *client,
@@ -263,6 +455,7 @@ static int tcs3472_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &tcs3472_info;
@@ -292,12 +485,29 @@ static int tcs3472_probe(struct i2c_client *client,
return ret;
data->atime = ret;
+ ret = i2c_smbus_read_word_data(data->client, TCS3472_AILT);
+ if (ret < 0)
+ return ret;
+ data->low_thresh = ret;
+
+ ret = i2c_smbus_read_word_data(data->client, TCS3472_AIHT);
+ if (ret < 0)
+ return ret;
+ data->high_thresh = ret;
+
+ data->apers = 1;
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_PERS,
+ data->apers);
+ if (ret < 0)
+ return ret;
+
ret = i2c_smbus_read_byte_data(data->client, TCS3472_ENABLE);
if (ret < 0)
return ret;
/* enable device */
data->enable = ret | TCS3472_ENABLE_PON | TCS3472_ENABLE_AEN;
+ data->enable &= ~TCS3472_ENABLE_AIEN;
ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
data->enable);
if (ret < 0)
@@ -308,12 +518,24 @@ static int tcs3472_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ if (client->irq) {
+ ret = request_threaded_irq(client->irq, NULL,
+ tcs3472_event_handler,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED |
+ IRQF_ONESHOT,
+ client->name, indio_dev);
+ if (ret)
+ goto buffer_cleanup;
+ }
+
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto buffer_cleanup;
+ goto free_irq;
return 0;
+free_irq:
+ free_irq(client->irq, indio_dev);
buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
return ret;
@@ -321,8 +543,19 @@ buffer_cleanup:
static int tcs3472_powerdown(struct tcs3472_data *data)
{
- return i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
- data->enable & ~(TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON));
+ int ret;
+ u8 enable_mask = TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON;
+
+ mutex_lock(&data->lock);
+
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable & ~enable_mask);
+ if (!ret)
+ data->enable &= ~enable_mask;
+
+ mutex_unlock(&data->lock);
+
+ return ret;
}
static int tcs3472_remove(struct i2c_client *client)
@@ -330,6 +563,7 @@ static int tcs3472_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
+ free_irq(client->irq, indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
tcs3472_powerdown(iio_priv(indio_dev));
@@ -348,8 +582,19 @@ static int tcs3472_resume(struct device *dev)
{
struct tcs3472_data *data = iio_priv(i2c_get_clientdata(
to_i2c_client(dev)));
- return i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
- data->enable | (TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON));
+ int ret;
+ u8 enable_mask = TCS3472_ENABLE_AEN | TCS3472_ENABLE_PON;
+
+ mutex_lock(&data->lock);
+
+ ret = i2c_smbus_write_byte_data(data->client, TCS3472_ENABLE,
+ data->enable | enable_mask);
+ if (!ret)
+ data->enable |= enable_mask;
+
+ mutex_unlock(&data->lock);
+
+ return ret;
}
#endif
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 7599693f7fe9..6bbb0b1e6032 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -697,13 +697,11 @@ static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
}
static const struct iio_info tsl2563_info_no_irq = {
- .driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
};
static const struct iio_info tsl2563_info = {
- .driver_module = THIS_MODULE,
.read_raw = &tsl2563_read_raw,
.write_raw = &tsl2563_write_raw,
.read_event_value = &tsl2563_read_thresh,
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index fb711ed4862e..f2e50edaa242 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -804,7 +804,6 @@ static int tsl2583_write_raw(struct iio_dev *indio_dev,
static const struct iio_info tsl2583_info = {
.attrs = &tsl2583_attribute_group,
- .driver_module = THIS_MODULE,
.read_raw = tsl2583_read_raw,
.write_raw = tsl2583_write_raw,
};
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
index cf94ec72b181..06171cb76e23 100644
--- a/drivers/iio/light/tsl4531.c
+++ b/drivers/iio/light/tsl4531.c
@@ -144,7 +144,6 @@ static const struct iio_info tsl4531_info = {
.read_raw = tsl4531_read_raw,
.write_raw = tsl4531_write_raw,
.attrs = &tsl4531_attribute_group,
- .driver_module = THIS_MODULE,
};
static int tsl4531_check_id(struct i2c_client *client)
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index d571ad7291ed..68e52943879a 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -714,7 +714,6 @@ err:
}
static const struct iio_info us5182d_info = {
- .driver_module = THIS_MODULE,
.read_raw = us5182d_read_raw,
.write_raw = us5182d_write_raw,
.attrs = &us5182d_attr_group,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 360b6e98137a..c599a90506ad 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -155,7 +155,6 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
static const struct iio_info vcnl4000_info = {
.read_raw = vcnl4000_read_raw,
- .driver_module = THIS_MODULE,
};
static int vcnl4000_probe(struct i2c_client *client,
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
index bc1c4cb782cd..f4bf3c5b5eda 100644
--- a/drivers/iio/light/veml6070.c
+++ b/drivers/iio/light/veml6070.c
@@ -136,7 +136,6 @@ static int veml6070_read_raw(struct iio_dev *indio_dev,
static const struct iio_info veml6070_info = {
.read_raw = veml6070_read_raw,
- .driver_module = THIS_MODULE,
};
static int veml6070_probe(struct i2c_client *client,
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index 6e25b724d941..192c77ef3608 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/delay.h>
+#include <linux/util_macros.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -86,6 +87,8 @@
struct vl6180_data {
struct i2c_client *client;
struct mutex lock;
+ unsigned int als_gain_milli;
+ unsigned int als_it_ms;
};
enum { VL6180_ALS, VL6180_RANGE, VL6180_PROX };
@@ -275,19 +278,17 @@ static const struct iio_chan_spec vl6180_channels[] = {
};
/*
- * Columns 3 & 4 represent the same value in decimal and hex notations.
- * Kept in order to avoid the datatype conversion while reading the
- * hardware_gain.
+ * Available Ambient Light Sensor gain settings, 1/1000th, and
+ * corresponding setting for the VL6180_ALS_GAIN register
*/
-static const int vl6180_als_gain[8][4] = {
- { 1, 0, 70, VL6180_ALS_GAIN_1 },
- { 1, 250000, 69, VL6180_ALS_GAIN_1_25 },
- { 1, 670000, 68, VL6180_ALS_GAIN_1_67 },
- { 2, 500000, 67, VL6180_ALS_GAIN_2_5 },
- { 5, 0, 66, VL6180_ALS_GAIN_5 },
- { 10, 0, 65, VL6180_ALS_GAIN_10 },
- { 20, 0, 64, VL6180_ALS_GAIN_20 },
- { 40, 0, 71, VL6180_ALS_GAIN_40 }
+static const int vl6180_als_gain_tab[8] = {
+ 1000, 1250, 1670, 2500, 5000, 10000, 20000, 40000
+};
+static const u8 vl6180_als_gain_tab_bits[8] = {
+ VL6180_ALS_GAIN_1, VL6180_ALS_GAIN_1_25,
+ VL6180_ALS_GAIN_1_67, VL6180_ALS_GAIN_2_5,
+ VL6180_ALS_GAIN_5, VL6180_ALS_GAIN_10,
+ VL6180_ALS_GAIN_20, VL6180_ALS_GAIN_40
};
static int vl6180_read_raw(struct iio_dev *indio_dev,
@@ -295,7 +296,7 @@ static int vl6180_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct vl6180_data *data = iio_priv(indio_dev);
- int ret, i;
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -306,19 +307,20 @@ static int vl6180_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
- ret = vl6180_read_word(data->client, VL6180_ALS_IT);
- if (ret < 0)
- return ret;
- *val = 0; /* 1 count = 1ms (0 = 1ms) */
- *val2 = (ret + 1) * 1000; /* convert to seconds */
+ *val = data->als_it_ms;
+ *val2 = 1000;
+
+ return IIO_VAL_FRACTIONAL;
- return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_LIGHT:
- *val = 0; /* one ALS count is 0.32 Lux */
- *val2 = 320000;
- break;
+ /* one ALS count is 0.32 Lux @ gain 1, IT 100 ms */
+ *val = 32000; /* 0.32 * 1000 * 100 */
+ *val2 = data->als_gain_milli * data->als_it_ms;
+
+ return IIO_VAL_FRACTIONAL;
+
case IIO_DISTANCE:
*val = 0; /* sensor reports mm, scale to meter */
*val2 = 1000;
@@ -329,17 +331,11 @@ static int vl6180_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_HARDWAREGAIN:
- ret = vl6180_read_byte(data->client, VL6180_ALS_GAIN);
- if (ret < 0)
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(vl6180_als_gain); i++) {
- if (ret == vl6180_als_gain[i][2]) {
- *val = vl6180_als_gain[i][0];
- *val2 = vl6180_als_gain[i][1];
- }
- }
+ *val = data->als_gain_milli;
+ *val2 = 1000;
+
+ return IIO_VAL_FRACTIONAL;
- return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -365,37 +361,53 @@ static int vl6180_hold(struct vl6180_data *data, bool hold)
static int vl6180_set_als_gain(struct vl6180_data *data, int val, int val2)
{
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(vl6180_als_gain); i++) {
- if (val == vl6180_als_gain[i][0] &&
- val2 == vl6180_als_gain[i][1]) {
- mutex_lock(&data->lock);
- ret = vl6180_hold(data, true);
- if (ret < 0)
- goto fail;
- ret = vl6180_write_byte(data->client, VL6180_ALS_GAIN,
- vl6180_als_gain[i][3]);
-fail:
- vl6180_hold(data, false);
- mutex_unlock(&data->lock);
- return ret;
- }
- }
+ int i, ret, gain;
- return -EINVAL;
+ if (val < 1 || val > 40)
+ return -EINVAL;
+
+ gain = (val * 1000000 + val2) / 1000;
+ if (gain < 1 || gain > 40000)
+ return -EINVAL;
+
+ i = find_closest(gain, vl6180_als_gain_tab,
+ ARRAY_SIZE(vl6180_als_gain_tab));
+
+ mutex_lock(&data->lock);
+ ret = vl6180_hold(data, true);
+ if (ret < 0)
+ goto fail;
+
+ ret = vl6180_write_byte(data->client, VL6180_ALS_GAIN,
+ vl6180_als_gain_tab_bits[i]);
+
+ if (ret >= 0)
+ data->als_gain_milli = vl6180_als_gain_tab[i];
+
+fail:
+ vl6180_hold(data, false);
+ mutex_unlock(&data->lock);
+ return ret;
}
-static int vl6180_set_it(struct vl6180_data *data, int val2)
+static int vl6180_set_it(struct vl6180_data *data, int val, int val2)
{
- int ret;
+ int ret, it_ms;
+
+ it_ms = (val2 + 500) / 1000; /* round to ms */
+ if (val != 0 || it_ms < 1 || it_ms > 512)
+ return -EINVAL;
mutex_lock(&data->lock);
ret = vl6180_hold(data, true);
if (ret < 0)
goto fail;
- ret = vl6180_write_word(data->client, VL6180_ALS_IT,
- (val2 - 500) / 1000); /* write value in ms */
+
+ ret = vl6180_write_word(data->client, VL6180_ALS_IT, it_ms - 1);
+
+ if (ret >= 0)
+ data->als_it_ms = it_ms;
+
fail:
vl6180_hold(data, false);
mutex_unlock(&data->lock);
@@ -411,10 +423,8 @@ static int vl6180_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- if (val != 0 || val2 < 500 || val2 >= 512500)
- return -EINVAL;
+ return vl6180_set_it(data, val, val2);
- return vl6180_set_it(data, val2);
case IIO_CHAN_INFO_HARDWAREGAIN:
if (chan->type != IIO_LIGHT)
return -EINVAL;
@@ -429,7 +439,6 @@ static const struct iio_info vl6180_info = {
.read_raw = vl6180_read_raw,
.write_raw = vl6180_write_raw,
.attrs = &vl6180_attribute_group,
- .driver_module = THIS_MODULE,
};
static int vl6180_init(struct vl6180_data *data)
@@ -468,11 +477,13 @@ static int vl6180_init(struct vl6180_data *data)
return ret;
/* ALS integration time: 100ms */
+ data->als_it_ms = 100;
ret = vl6180_write_word(client, VL6180_ALS_IT, VL6180_ALS_IT_100);
if (ret < 0)
return ret;
/* ALS gain: 1 */
+ data->als_gain_milli = 1000;
ret = vl6180_write_byte(client, VL6180_ALS_GAIN, VL6180_ALS_GAIN_1);
if (ret < 0)
return ret;
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index b86d6cb7f285..664b2f866472 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O Magnetometer sensor drivers
#
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 0bff76e96950..93be1f4c0f27 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -658,7 +658,6 @@ static const unsigned long ak8974_scan_masks[] = { 0x7, 0 };
static const struct iio_info ak8974_info = {
.read_raw = &ak8974_read_raw,
- .driver_module = THIS_MODULE,
};
static bool ak8974_writeable_reg(struct device *dev, unsigned int reg)
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 4ff883942f7b..c09329069d0a 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -781,7 +781,6 @@ static const unsigned long ak8975_scan_masks[] = { 0x7, 0 };
static const struct iio_info ak8975_info = {
.read_raw = &ak8975_read_raw,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_ACPI
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index d104fb8d9379..d91cb845e3d6 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -651,7 +651,6 @@ static const struct iio_info bmc150_magn_info = {
.attrs = &bmc150_magn_attrs_group,
.read_raw = bmc150_magn_read_raw,
.write_raw = bmc150_magn_write_raw,
- .driver_module = THIS_MODULE,
};
static const unsigned long bmc150_magn_scan_masks[] = {
@@ -811,7 +810,6 @@ err_unlock:
static const struct iio_trigger_ops bmc150_magn_trigger_ops = {
.set_trigger_state = bmc150_magn_data_rdy_trigger_set_state,
.try_reenable = bmc150_magn_trig_try_reen,
- .owner = THIS_MODULE,
};
static int bmc150_magn_buffer_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/magnetometer/bmc150_magn.h b/drivers/iio/magnetometer/bmc150_magn.h
index 9a8e26812ca8..3b69232afd2c 100644
--- a/drivers/iio/magnetometer/bmc150_magn.h
+++ b/drivers/iio/magnetometer/bmc150_magn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BMC150_MAGN_H_
#define _BMC150_MAGN_H_
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 0e791b02ed4a..a1fd9d591818 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -282,7 +282,6 @@ static int magn_3d_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info magn_3d_info = {
- .driver_module = THIS_MODULE,
.read_raw = &magn_3d_read_raw,
.write_raw = &magn_3d_write_raw,
};
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index ba3e2a374ee5..ada142fb7aa3 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -597,7 +597,6 @@ static const struct iio_info hmc5843_info = {
.read_raw = &hmc5843_read_raw,
.write_raw = &hmc5843_write_raw,
.write_raw_get_fmt = &hmc5843_write_raw_get_fmt,
- .driver_module = THIS_MODULE,
};
static const unsigned long hmc5843_scan_masks[] = {0x7, 0};
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index dad8d57f7402..b34ace76d31b 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -333,7 +333,6 @@ static const struct iio_info mag3110_info = {
.attrs = &mag3110_group,
.read_raw = &mag3110_read_raw,
.write_raw = &mag3110_write_raw,
- .driver_module = THIS_MODULE,
};
static const unsigned long mag3110_scan_masks[] = {0x7, 0xf, 0};
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 176e14a61558..6b640c6338c9 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -418,7 +418,6 @@ static int mmc35240_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info mmc35240_info = {
- .driver_module = THIS_MODULE,
.read_raw = mmc35240_read_raw,
.write_raw = mmc35240_write_raw,
.attrs = &mmc35240_attribute_group,
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 08aafba4481c..72f6d1335a04 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -317,7 +317,14 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
.drdy_irq = {
/* drdy line is routed drdy pin */
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x22,
+ .value = BIT(2),
},
.multi_read_bit = true,
.bootime = 2,
@@ -359,9 +366,14 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.mask = 0x10,
},
.drdy_irq = {
- .addr = 0x62,
- .mask_int1 = 0x01,
- .addr_stat_drdy = 0x67,
+ .int1 = {
+ .addr = 0x62,
+ .mask = 0x01,
+ },
+ .stat_drdy = {
+ .addr = 0x67,
+ .mask = 0x07,
+ },
},
.multi_read_bit = false,
.bootime = 2,
@@ -438,7 +450,6 @@ static const struct attribute_group st_magn_attribute_group = {
};
static const struct iio_info magn_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_magn_attribute_group,
.read_raw = &st_magn_read_raw,
.write_raw = &st_magn_write_raw,
@@ -447,7 +458,6 @@ static const struct iio_info magn_info = {
#ifdef CONFIG_IIO_TRIGGER
static const struct iio_trigger_ops st_magn_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
.validate_device = st_sensors_validate_device,
};
diff --git a/drivers/iio/multiplexer/iio-mux.c b/drivers/iio/multiplexer/iio-mux.c
index 37ba007f8dca..60621ccd67e4 100644
--- a/drivers/iio/multiplexer/iio-mux.c
+++ b/drivers/iio/multiplexer/iio-mux.c
@@ -173,7 +173,6 @@ static const struct iio_info mux_info = {
.read_raw = mux_read_raw,
.read_avail = mux_read_avail,
.write_raw = mux_write_raw,
- .driver_module = THIS_MODULE,
};
static ssize_t mux_read_ext_info(struct iio_dev *indio_dev, uintptr_t private,
@@ -285,6 +284,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
child->ext_info_cache = devm_kzalloc(dev,
sizeof(*child->ext_info_cache) *
num_ext_info, GFP_KERNEL);
+ if (!child->ext_info_cache)
+ return -ENOMEM;
+
for (i = 0; i < num_ext_info; ++i) {
child->ext_info_cache[i].size = -1;
@@ -309,6 +311,9 @@ static int mux_configure_channel(struct device *dev, struct mux *mux,
child->ext_info_cache[i].data = devm_kmemdup(dev, page, ret + 1,
GFP_KERNEL);
+ if (!child->ext_info_cache[i].data)
+ return -ENOMEM;
+
child->ext_info_cache[i].data[ret] = 0;
child->ext_info_cache[i].size = ret;
}
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index fd1b3696ee42..1e5451d1ff88 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -186,7 +186,6 @@ static int incl_3d_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info incl_3d_info = {
- .driver_module = THIS_MODULE,
.read_raw = &incl_3d_read_raw,
.write_raw = &incl_3d_write_raw,
};
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index 98fe0c5df380..a69db2002414 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -138,7 +138,6 @@ static int dev_rot_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info dev_rot_info = {
- .driver_module = THIS_MODULE,
.read_raw_multi = &dev_rot_read_raw,
.write_raw = &dev_rot_write_raw,
};
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index 2260d40e0936..1afd1e70f8cc 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O potentiometer drivers
#
diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c
index fb9e2a337dc2..9b0ff4ab2f9c 100644
--- a/drivers/iio/potentiometer/ds1803.c
+++ b/drivers/iio/potentiometer/ds1803.c
@@ -110,7 +110,6 @@ static int ds1803_write_raw(struct iio_dev *indio_dev,
static const struct iio_info ds1803_info = {
.read_raw = ds1803_read_raw,
.write_raw = ds1803_write_raw,
- .driver_module = THIS_MODULE,
};
static int ds1803_probe(struct i2c_client *client,
diff --git a/drivers/iio/potentiometer/max5481.c b/drivers/iio/potentiometer/max5481.c
index 926554991244..ffe2761333a2 100644
--- a/drivers/iio/potentiometer/max5481.c
+++ b/drivers/iio/potentiometer/max5481.c
@@ -119,7 +119,6 @@ static int max5481_write_raw(struct iio_dev *indio_dev,
static const struct iio_info max5481_info = {
.read_raw = max5481_read_raw,
.write_raw = max5481_write_raw,
- .driver_module = THIS_MODULE,
};
#if defined(CONFIG_OF)
@@ -207,7 +206,6 @@ MODULE_DEVICE_TABLE(acpi, max5481_acpi_match);
static struct spi_driver max5481_driver = {
.driver = {
.name = "max5481",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(max5481_match),
.acpi_match_table = ACPI_PTR(max5481_acpi_match),
},
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
index 6c50939a2e83..5042d3e09b12 100644
--- a/drivers/iio/potentiometer/max5487.c
+++ b/drivers/iio/potentiometer/max5487.c
@@ -83,7 +83,6 @@ static int max5487_write_raw(struct iio_dev *indio_dev,
static const struct iio_info max5487_info = {
.read_raw = max5487_read_raw,
.write_raw = max5487_write_raw,
- .driver_module = THIS_MODULE,
};
static int max5487_spi_probe(struct spi_device *spi)
@@ -147,7 +146,6 @@ MODULE_DEVICE_TABLE(acpi, max5487_acpi_match);
static struct spi_driver max5487_driver = {
.driver = {
.name = "max5487",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(max5487_acpi_match),
},
.id_table = max5487_id,
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
index 4e7e2c6c522c..b3e30db246cc 100644
--- a/drivers/iio/potentiometer/mcp4131.c
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -237,7 +237,6 @@ static int mcp4131_write_raw(struct iio_dev *indio_dev,
static const struct iio_info mcp4131_info = {
.read_raw = mcp4131_read_raw,
.write_raw = mcp4131_write_raw,
- .driver_module = THIS_MODULE,
};
static int mcp4131_probe(struct spi_device *spi)
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 314353d7ab59..114ab876fcc6 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -207,7 +207,6 @@ static const struct iio_info mcp4531_info = {
.read_raw = mcp4531_read_raw,
.read_avail = mcp4531_read_avail,
.write_raw = mcp4531_write_raw,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_OF
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
index 7b6b54531ea2..93f9d4a8c9aa 100644
--- a/drivers/iio/potentiometer/tpl0102.c
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -106,7 +106,6 @@ static int tpl0102_write_raw(struct iio_dev *indio_dev,
static const struct iio_info tpl0102_info = {
.read_raw = tpl0102_read_raw,
.write_raw = tpl0102_write_raw,
- .driver_module = THIS_MODULE,
};
static int tpl0102_probe(struct i2c_client *client,
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index afa8de3418d0..007710991f15 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -207,7 +207,6 @@ static int lmp91000_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info lmp91000_info = {
- .driver_module = THIS_MODULE,
.read_raw = lmp91000_read_raw,
};
@@ -283,7 +282,6 @@ static int lmp91000_buffer_cb(const void *val, void *private)
}
static const struct iio_trigger_ops lmp91000_trigger_ops = {
- .owner = THIS_MODULE,
};
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 838642789389..c2058d7b2f93 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O pressure drivers
#
diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c
index 43bdd0b9155f..46a220c70d6a 100644
--- a/drivers/iio/pressure/abp060mg.c
+++ b/drivers/iio/pressure/abp060mg.c
@@ -168,7 +168,6 @@ static int abp060mg_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info abp060mg_info = {
- .driver_module = THIS_MODULE,
.read_raw = abp060mg_read_raw,
};
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 8f26428804a2..fd1da26a62e4 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -561,7 +561,6 @@ static const struct attribute_group bmp280_attrs_group = {
};
static const struct iio_info bmp280_info = {
- .driver_module = THIS_MODULE,
.read_raw = &bmp280_read_raw,
.write_raw = &bmp280_write_raw,
.attrs = &bmp280_attrs_group,
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
index 6807113ec09f..08c00ac32bda 100644
--- a/drivers/iio/pressure/bmp280-regmap.c
+++ b/drivers/iio/pressure/bmp280-regmap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/module.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 61347438b779..eda50ef65706 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index 48b2a30f57ae..4599fde4dd25 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -120,14 +120,12 @@ static int cros_ec_baro_write(struct iio_dev *indio_dev,
static const struct iio_info cros_ec_baro_info = {
.read_raw = &cros_ec_baro_read,
.write_raw = &cros_ec_baro_write,
- .driver_module = THIS_MODULE,
};
static int cros_ec_baro_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
- struct cros_ec_device *ec_device;
struct iio_dev *indio_dev;
struct cros_ec_baro_state *state;
struct iio_chan_spec *channel;
@@ -137,7 +135,6 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
dev_warn(dev, "No CROS EC device found.\n");
return -EINVAL;
}
- ec_device = ec_dev->ec_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(*state));
if (!indio_dev)
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 6848d8c80eff..4c437918f1d2 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -160,7 +160,6 @@ static int press_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info press_info = {
- .driver_module = THIS_MODULE,
.read_raw = &press_read_raw,
.write_raw = &press_write_raw,
};
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
index 8c7b3ec3d84a..406934ea6228 100644
--- a/drivers/iio/pressure/hp03.c
+++ b/drivers/iio/pressure/hp03.c
@@ -208,7 +208,6 @@ static int hp03_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info hp03_info = {
- .driver_module = THIS_MODULE,
.read_raw = &hp03_read_raw,
};
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index 12f769e86355..c38c19678cf6 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -351,7 +351,6 @@ static const struct iio_info hp206c_info = {
.attrs = &hp206c_attribute_group,
.read_raw = hp206c_read_raw,
.write_raw = hp206c_write_raw,
- .driver_module = THIS_MODULE,
};
static int hp206c_probe(struct i2c_client *client,
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index 8f2bce213248..ab4786d0102b 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -144,7 +144,6 @@ static const struct iio_chan_spec mpl115_channels[] = {
static const struct iio_info mpl115_info = {
.read_raw = &mpl115_read_raw,
- .driver_module = THIS_MODULE,
};
int mpl115_probe(struct device *dev, const char *name,
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 619b963714c7..7537547fb7ee 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -218,7 +218,6 @@ static const struct iio_chan_spec mpl3115_channels[] = {
static const struct iio_info mpl3115_info = {
.read_raw = &mpl3115_read_raw,
- .driver_module = THIS_MODULE,
};
static int mpl3115_probe(struct i2c_client *client,
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 2a77a2f15752..f950cfde5db9 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -384,7 +384,6 @@ static const struct iio_info ms5611_info = {
.read_raw = &ms5611_read_raw,
.write_raw = &ms5611_write_raw,
.attrs = &ms5611_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ms5611_init(struct iio_dev *indio_dev)
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index c413f8a84a63..e2f73e6dc58f 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -124,7 +124,6 @@ static const struct iio_info ms5637_info = {
.read_raw = ms5637_read_raw,
.write_raw = ms5637_write_raw,
.attrs = &ms5637_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ms5637_probe(struct i2c_client *client,
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 7d995937adba..e67eb0d971bf 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -19,6 +19,8 @@ enum st_press_type {
LPS25H,
LPS331AP,
LPS22HB,
+ LPS33HW,
+ LPS35HW,
ST_PRESS_MAX,
};
@@ -26,6 +28,8 @@ enum st_press_type {
#define LPS25H_PRESS_DEV_NAME "lps25h"
#define LPS331AP_PRESS_DEV_NAME "lps331ap"
#define LPS22HB_PRESS_DEV_NAME "lps22hb"
+#define LPS33HW_PRESS_DEV_NAME "lps33hw"
+#define LPS35HW_PRESS_DEV_NAME "lps35hw"
/**
* struct st_sensors_platform_data - default press platform data
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 34611a8ea2ce..349e5c713c03 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -280,14 +280,28 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask = 0x04,
},
.drdy_irq = {
- .addr = 0x22,
- .mask_int1 = 0x04,
- .mask_int2 = 0x20,
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x04,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
+ .int2 = {
+ .addr = 0x22,
+ .mask = 0x20,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
- .addr_od = 0x22,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x03,
+ },
+ },
+ .sim = {
+ .addr = 0x20,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -335,8 +349,9 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.addr = 0x20,
.mask = 0x04,
},
- .drdy_irq = {
- .addr = 0,
+ .sim = {
+ .addr = 0x20,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -388,14 +403,22 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask = 0x04,
},
.drdy_irq = {
- .addr = 0x23,
- .mask_int1 = 0x01,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x23,
+ .mask = 0x01,
+ .addr_od = 0x22,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x22,
.mask_ihl = 0x80,
- .addr_od = 0x22,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x03,
+ },
+ },
+ .sim = {
+ .addr = 0x20,
+ .value = BIT(0),
},
.multi_read_bit = true,
.bootime = 2,
@@ -410,6 +433,8 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LPS22HB_PRESS_DEV_NAME,
+ [1] = LPS33HW_PRESS_DEV_NAME,
+ [2] = LPS35HW_PRESS_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
.num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
@@ -447,14 +472,22 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask = 0x02,
},
.drdy_irq = {
- .addr = 0x12,
- .mask_int1 = 0x04,
- .mask_int2 = 0x00,
+ .int1 = {
+ .addr = 0x12,
+ .mask = 0x04,
+ .addr_od = 0x12,
+ .mask_od = 0x40,
+ },
.addr_ihl = 0x12,
.mask_ihl = 0x80,
- .addr_od = 0x12,
- .mask_od = 0x40,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x03,
+ },
+ },
+ .sim = {
+ .addr = 0x10,
+ .value = BIT(0),
},
.multi_read_bit = false,
.bootime = 2,
@@ -547,7 +580,6 @@ static const struct attribute_group st_press_attribute_group = {
};
static const struct iio_info press_info = {
- .driver_module = THIS_MODULE,
.attrs = &st_press_attribute_group,
.read_raw = &st_press_read_raw,
.write_raw = &st_press_write_raw,
@@ -556,7 +588,6 @@ static const struct iio_info press_info = {
#ifdef CONFIG_IIO_TRIGGER
static const struct iio_trigger_ops st_press_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
.validate_device = st_sensors_validate_device,
};
@@ -605,7 +636,8 @@ int st_press_common_probe(struct iio_dev *indio_dev)
press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
/* Some devices don't support a data ready pin. */
- if (!pdata && press_data->sensor_settings->drdy_irq.addr)
+ if (!pdata && (press_data->sensor_settings->drdy_irq.int1.addr ||
+ press_data->sensor_settings->drdy_irq.int2.addr))
pdata = (struct st_sensors_platform_data *)&default_press_pdata;
err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 7f15e927fa2b..fbb59059e942 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -37,6 +37,14 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22hb-press",
.data = LPS22HB_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps33hw",
+ .data = LPS33HW_PRESS_DEV_NAME,
+ },
+ {
+ .compatible = "st,lps35hw",
+ .data = LPS35HW_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -59,6 +67,8 @@ static const struct i2c_device_id st_press_id_table[] = {
{ LPS25H_PRESS_DEV_NAME, LPS25H },
{ LPS331AP_PRESS_DEV_NAME, LPS331AP },
{ LPS22HB_PRESS_DEV_NAME, LPS22HB },
+ { LPS33HW_PRESS_DEV_NAME, LPS33HW },
+ { LPS35HW_PRESS_DEV_NAME, LPS35HW },
{},
};
MODULE_DEVICE_TABLE(i2c, st_press_id_table);
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index f5ebd36bb4bf..9a3441b128e7 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -41,6 +41,14 @@ static const struct of_device_id st_press_of_match[] = {
.compatible = "st,lps22hb-press",
.data = LPS22HB_PRESS_DEV_NAME,
},
+ {
+ .compatible = "st,lps33hw",
+ .data = LPS33HW_PRESS_DEV_NAME,
+ },
+ {
+ .compatible = "st,lps35hw",
+ .data = LPS35HW_PRESS_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
@@ -83,6 +91,8 @@ static const struct spi_device_id st_press_id_table[] = {
{ LPS25H_PRESS_DEV_NAME },
{ LPS331AP_PRESS_DEV_NAME },
{ LPS22HB_PRESS_DEV_NAME },
+ { LPS33HW_PRESS_DEV_NAME },
+ { LPS35HW_PRESS_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_press_id_table);
diff --git a/drivers/iio/pressure/t5403.c b/drivers/iio/pressure/t5403.c
index 2667e71721f5..92c00f603b1d 100644
--- a/drivers/iio/pressure/t5403.c
+++ b/drivers/iio/pressure/t5403.c
@@ -209,7 +209,6 @@ static const struct iio_info t5403_info = {
.read_raw = &t5403_read_raw,
.write_raw = &t5403_write_raw,
.attrs = &t5403_attribute_group,
- .driver_module = THIS_MODULE,
};
static int t5403_probe(struct i2c_client *client,
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 91431454eb85..81d8f24eaeb4 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -1390,7 +1390,6 @@ static int zpa2326_set_trigger_state(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops zpa2326_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = zpa2326_set_trigger_state,
};
@@ -1590,7 +1589,6 @@ static const struct iio_chan_spec zpa2326_channels[] = {
};
static const struct iio_info zpa2326_info = {
- .driver_module = THIS_MODULE,
.attrs = &zpa2326_attribute_group,
.read_raw = zpa2326_read_raw,
.write_raw = zpa2326_write_raw,
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index ae070950f920..fcb1c4ba5e41 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -32,6 +32,16 @@ config LIDAR_LITE_V2
To compile this driver as a module, choose M here: the
module will be called pulsedlight-lite-v2
+config RFD77402
+ tristate "RFD77402 ToF sensor"
+ depends on I2C
+ help
+ Say Y to build a driver for the RFD77420 Time-of-Flight (distance)
+ sensor module with I2C interface.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rfd77402.
+
config SRF04
tristate "Devantech SRF04 ultrasonic ranger sensor"
depends on GPIOLIB
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index ed1b6f4cc209..4f4ed45e87ef 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for IIO proximity sensors
#
@@ -5,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
+obj-$(CONFIG_RFD77402) += rfd77402.o
obj-$(CONFIG_SRF04) += srf04.o
obj-$(CONFIG_SRF08) += srf08.o
obj-$(CONFIG_SX9500) += sx9500.o
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 4a48b7ba3a1c..b6249af48014 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -221,7 +221,6 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info as3935_info = {
- .driver_module = THIS_MODULE,
.attrs = &as3935_attribute_group,
.read_raw = &as3935_read_raw,
};
@@ -247,7 +246,6 @@ err_read:
}
static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
- .owner = THIS_MODULE,
};
static void as3935_event_work(struct work_struct *work)
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 36c1ddc251aa..4d56f67b24c6 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -249,7 +249,6 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
}
static const struct iio_info lidar_info = {
- .driver_module = THIS_MODULE,
.read_raw = lidar_read_raw,
};
diff --git a/drivers/iio/proximity/rfd77402.c b/drivers/iio/proximity/rfd77402.c
new file mode 100644
index 000000000000..fe29fb1a19a6
--- /dev/null
+++ b/drivers/iio/proximity/rfd77402.c
@@ -0,0 +1,352 @@
+/*
+ * rfd77402.c - Support for RF Digital RFD77402 Time-of-Flight (distance) sensor
+ *
+ * Copyright 2017 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C slave address 0x4c
+ *
+ * TODO: interrupt
+ * https://media.digikey.com/pdf/Data%20Sheets/RF%20Digital%20PDFs/RFD77402.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+
+#define RFD77402_DRV_NAME "rfd77402"
+
+#define RFD77402_ICSR 0x00 /* Interrupt Control Status Register */
+#define RFD77402_ICSR_INT_MODE BIT(2)
+#define RFD77402_ICSR_INT_POL BIT(3)
+#define RFD77402_ICSR_RESULT BIT(4)
+#define RFD77402_ICSR_M2H_MSG BIT(5)
+#define RFD77402_ICSR_H2M_MSG BIT(6)
+#define RFD77402_ICSR_RESET BIT(7)
+
+#define RFD77402_CMD_R 0x04
+#define RFD77402_CMD_SINGLE 0x01
+#define RFD77402_CMD_STANDBY 0x10
+#define RFD77402_CMD_MCPU_OFF 0x11
+#define RFD77402_CMD_MCPU_ON 0x12
+#define RFD77402_CMD_RESET BIT(6)
+#define RFD77402_CMD_VALID BIT(7)
+
+#define RFD77402_STATUS_R 0x06
+#define RFD77402_STATUS_PM_MASK GENMASK(4, 0)
+#define RFD77402_STATUS_STANDBY 0x00
+#define RFD77402_STATUS_MCPU_OFF 0x10
+#define RFD77402_STATUS_MCPU_ON 0x18
+
+#define RFD77402_RESULT_R 0x08
+#define RFD77402_RESULT_DIST_MASK GENMASK(12, 2)
+#define RFD77402_RESULT_ERR_MASK GENMASK(14, 13)
+#define RFD77402_RESULT_VALID BIT(15)
+
+#define RFD77402_PMU_CFG 0x14
+#define RFD77402_PMU_MCPU_INIT BIT(9)
+
+#define RFD77402_I2C_INIT_CFG 0x1c
+#define RFD77402_I2C_ADDR_INCR BIT(0)
+#define RFD77402_I2C_DATA_INCR BIT(2)
+#define RFD77402_I2C_HOST_DEBUG BIT(5)
+#define RFD77402_I2C_MCPU_DEBUG BIT(6)
+
+#define RFD77402_CMD_CFGR_A 0x0c
+#define RFD77402_CMD_CFGR_B 0x0e
+#define RFD77402_HFCFG_0 0x20
+#define RFD77402_HFCFG_1 0x22
+#define RFD77402_HFCFG_2 0x24
+#define RFD77402_HFCFG_3 0x26
+
+#define RFD77402_MOD_CHIP_ID 0x28
+
+/* magic configuration values from datasheet */
+static const struct {
+ u8 reg;
+ u16 val;
+} rf77402_tof_config[] = {
+ {RFD77402_CMD_CFGR_A, 0xe100},
+ {RFD77402_CMD_CFGR_B, 0x10ff},
+ {RFD77402_HFCFG_0, 0x07d0},
+ {RFD77402_HFCFG_1, 0x5008},
+ {RFD77402_HFCFG_2, 0xa041},
+ {RFD77402_HFCFG_3, 0x45d4},
+};
+
+struct rfd77402_data {
+ struct i2c_client *client;
+ /* Serialize reads from the sensor */
+ struct mutex lock;
+};
+
+static const struct iio_chan_spec rfd77402_channels[] = {
+ {
+ .type = IIO_DISTANCE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int rfd77402_set_state(struct rfd77402_data *data, u8 state, u16 check)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, RFD77402_CMD_R,
+ state | RFD77402_CMD_VALID);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(10000, 20000);
+
+ ret = i2c_smbus_read_word_data(data->client, RFD77402_STATUS_R);
+ if (ret < 0)
+ return ret;
+ if ((ret & RFD77402_STATUS_PM_MASK) != check)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int rfd77402_measure(struct rfd77402_data *data)
+{
+ int ret;
+ int tries = 10;
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_MCPU_ON,
+ RFD77402_STATUS_MCPU_ON);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, RFD77402_CMD_R,
+ RFD77402_CMD_SINGLE |
+ RFD77402_CMD_VALID);
+ if (ret < 0)
+ goto err;
+
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_byte_data(data->client, RFD77402_ICSR);
+ if (ret < 0)
+ goto err;
+ if (ret & RFD77402_ICSR_RESULT)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = i2c_smbus_read_word_data(data->client, RFD77402_RESULT_R);
+ if (ret < 0)
+ goto err;
+
+ if ((ret & RFD77402_RESULT_ERR_MASK) ||
+ !(ret & RFD77402_RESULT_VALID)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ return (ret & RFD77402_RESULT_DIST_MASK) >> 2;
+
+err:
+ rfd77402_set_state(data, RFD77402_CMD_MCPU_OFF,
+ RFD77402_STATUS_MCPU_OFF);
+ return ret;
+}
+
+static int rfd77402_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct rfd77402_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+ ret = rfd77402_measure(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* 1 LSB is 1 mm */
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info rfd77402_info = {
+ .read_raw = rfd77402_read_raw,
+};
+
+static int rfd77402_init(struct rfd77402_data *data)
+{
+ int ret, i;
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_STANDBY,
+ RFD77402_STATUS_STANDBY);
+ if (ret < 0)
+ return ret;
+
+ /* configure INT pad as push-pull, active low */
+ ret = i2c_smbus_write_byte_data(data->client, RFD77402_ICSR,
+ RFD77402_ICSR_INT_MODE);
+ if (ret < 0)
+ return ret;
+
+ /* I2C configuration */
+ ret = i2c_smbus_write_word_data(data->client, RFD77402_I2C_INIT_CFG,
+ RFD77402_I2C_ADDR_INCR |
+ RFD77402_I2C_DATA_INCR |
+ RFD77402_I2C_HOST_DEBUG |
+ RFD77402_I2C_MCPU_DEBUG);
+ if (ret < 0)
+ return ret;
+
+ /* set initialization */
+ ret = i2c_smbus_write_word_data(data->client, RFD77402_PMU_CFG, 0x0500);
+ if (ret < 0)
+ return ret;
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_MCPU_OFF,
+ RFD77402_STATUS_MCPU_OFF);
+ if (ret < 0)
+ return ret;
+
+ /* set initialization */
+ ret = i2c_smbus_write_word_data(data->client, RFD77402_PMU_CFG, 0x0600);
+ if (ret < 0)
+ return ret;
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_MCPU_ON,
+ RFD77402_STATUS_MCPU_ON);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(rf77402_tof_config); i++) {
+ ret = i2c_smbus_write_word_data(data->client,
+ rf77402_tof_config[i].reg,
+ rf77402_tof_config[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rfd77402_set_state(data, RFD77402_CMD_STANDBY,
+ RFD77402_STATUS_STANDBY);
+
+ return ret;
+}
+
+static int rfd77402_powerdown(struct rfd77402_data *data)
+{
+ return rfd77402_set_state(data, RFD77402_CMD_STANDBY,
+ RFD77402_STATUS_STANDBY);
+}
+
+static int rfd77402_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct rfd77402_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, RFD77402_MOD_CHIP_ID);
+ if (ret < 0)
+ return ret;
+ if (ret != 0xad01 && ret != 0xad02) /* known chip ids */
+ return -ENODEV;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &rfd77402_info;
+ indio_dev->channels = rfd77402_channels;
+ indio_dev->num_channels = ARRAY_SIZE(rfd77402_channels);
+ indio_dev->name = RFD77402_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = rfd77402_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_powerdown;
+
+ return 0;
+
+err_powerdown:
+ rfd77402_powerdown(data);
+ return ret;
+}
+
+static int rfd77402_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ rfd77402_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rfd77402_suspend(struct device *dev)
+{
+ struct rfd77402_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return rfd77402_powerdown(data);
+}
+
+static int rfd77402_resume(struct device *dev)
+{
+ struct rfd77402_data *data = iio_priv(i2c_get_clientdata(
+ to_i2c_client(dev)));
+
+ return rfd77402_init(data);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(rfd77402_pm_ops, rfd77402_suspend, rfd77402_resume);
+
+static const struct i2c_device_id rfd77402_id[] = {
+ { "rfd77402", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rfd77402_id);
+
+static struct i2c_driver rfd77402_driver = {
+ .driver = {
+ .name = RFD77402_DRV_NAME,
+ .pm = &rfd77402_pm_ops,
+ },
+ .probe = rfd77402_probe,
+ .remove = rfd77402_remove,
+ .id_table = rfd77402_id,
+};
+
+module_i2c_driver(rfd77402_driver);
+
+MODULE_AUTHOR("Peter Meerwald-Stadler <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("RFD77402 Time-of-Flight sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index e37667f933b3..09c7b9c095b0 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -203,7 +203,6 @@ static int srf04_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info srf04_iio_info = {
- .driver_module = THIS_MODULE,
.read_raw = srf04_read_raw,
};
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index 9380d545aab1..f2bf783f829a 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -436,7 +436,6 @@ static const struct iio_chan_spec srf08_channels[] = {
static const struct iio_info srf08_info = {
.read_raw = srf08_read_raw,
.attrs = &srf08_attribute_group,
- .driver_module = THIS_MODULE,
};
/*
@@ -445,7 +444,6 @@ static const struct iio_info srf08_info = {
*/
static const struct iio_info srf02_info = {
.read_raw = srf08_read_raw,
- .driver_module = THIS_MODULE,
};
static int srf08_probe(struct i2c_client *client,
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index f42b3a1c75ff..53c5d653e780 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -615,7 +615,6 @@ static const struct attribute_group sx9500_attribute_group = {
};
static const struct iio_info sx9500_info = {
- .driver_module = THIS_MODULE,
.attrs = &sx9500_attribute_group,
.read_raw = &sx9500_read_raw,
.write_raw = &sx9500_write_raw,
@@ -650,7 +649,6 @@ out:
static const struct iio_trigger_ops sx9500_trigger_ops = {
.set_trigger_state = sx9500_set_trigger_state,
- .owner = THIS_MODULE,
};
static irqreturn_t sx9500_trigger_handler(int irq, void *private)
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index ad1d668de546..34bd9023727b 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O temperature drivers
#
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index c01efeca4002..beaf6fd3e337 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -123,7 +123,6 @@ static int temperature_write_raw(struct iio_dev *indio_dev,
}
static const struct iio_info temperature_info = {
- .driver_module = THIS_MODULE,
.read_raw = &temperature_read_raw,
.write_raw = &temperature_write_raw,
};
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index d70e2e53d6a7..e8b7e0b6c8ad 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -208,7 +208,6 @@ static int maxim_thermocouple_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_info maxim_thermocouple_info = {
- .driver_module = THIS_MODULE,
.read_raw = maxim_thermocouple_read_raw,
};
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 2077eef4095c..d619e8634a00 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -400,7 +400,6 @@ static const struct iio_info mlx90614_info = {
.write_raw = mlx90614_write_raw,
.write_raw_get_fmt = mlx90614_write_raw_get_fmt,
.attrs = &mlx90614_attr_group,
- .driver_module = THIS_MODULE,
};
#ifdef CONFIG_PM
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 18c9b43c02cb..a9b5b7cc7836 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -179,7 +179,6 @@ static const struct iio_info tmp006_info = {
.read_raw = tmp006_read_raw,
.write_raw = tmp006_write_raw,
.attrs = &tmp006_attribute_group,
- .driver_module = THIS_MODULE,
};
static bool tmp006_check_identification(struct i2c_client *client)
diff --git a/drivers/iio/temperature/tmp007.c b/drivers/iio/temperature/tmp007.c
index 0615324d054c..0e3f2d432e10 100644
--- a/drivers/iio/temperature/tmp007.c
+++ b/drivers/iio/temperature/tmp007.c
@@ -426,7 +426,6 @@ static const struct iio_info tmp007_info = {
.read_event_value = tmp007_read_thresh,
.write_event_value = tmp007_write_thresh,
.attrs = &tmp007_attribute_group,
- .driver_module = THIS_MODULE,
};
static bool tmp007_identify(struct i2c_client *client)
diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c
index d8aa211d76e4..3799d007c8e7 100644
--- a/drivers/iio/temperature/tsys01.c
+++ b/drivers/iio/temperature/tsys01.c
@@ -111,7 +111,6 @@ static const struct iio_chan_spec tsys01_channels[] = {
static const struct iio_info tsys01_info = {
.read_raw = tsys01_read_raw,
- .driver_module = THIS_MODULE,
};
static bool tsys01_crc_valid(u16 *n_prom)
diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c
index c0a19a000387..9b2e56fa5fd5 100644
--- a/drivers/iio/temperature/tsys02d.c
+++ b/drivers/iio/temperature/tsys02d.c
@@ -120,7 +120,6 @@ static const struct iio_info tsys02d_info = {
.read_raw = tsys02d_read_raw,
.write_raw = tsys02d_write_raw,
.attrs = &tsys02d_attribute_group,
- .driver_module = THIS_MODULE,
};
static int tsys02d_probe(struct i2c_client *client,
diff --git a/drivers/iio/trigger/Makefile b/drivers/iio/trigger/Makefile
index 0a72a2a76cb2..f3d11acb8a0b 100644
--- a/drivers/iio/trigger/Makefile
+++ b/drivers/iio/trigger/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for triggers not associated with iio-devices
#
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index a1cad6cc2e0f..7accd0187ba1 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -30,7 +30,7 @@ struct iio_hrtimer_info {
ktime_t period;
};
-static struct config_item_type iio_hrtimer_type = {
+static const struct config_item_type iio_hrtimer_type = {
.ct_owner = THIS_MODULE,
};
@@ -114,7 +114,6 @@ static int iio_trig_hrtimer_set_state(struct iio_trigger *trig, bool state)
}
static const struct iio_trigger_ops iio_hrtimer_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = iio_trig_hrtimer_set_state,
};
diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c
index e18f12b74610..171c4ed03543 100644
--- a/drivers/iio/trigger/iio-trig-interrupt.c
+++ b/drivers/iio/trigger/iio-trig-interrupt.c
@@ -29,7 +29,6 @@ static irqreturn_t iio_interrupt_trigger_poll(int irq, void *private)
}
static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
- .owner = THIS_MODULE,
};
static int iio_interrupt_trigger_probe(struct platform_device *pdev)
diff --git a/drivers/iio/trigger/iio-trig-loop.c b/drivers/iio/trigger/iio-trig-loop.c
index dc6be28f96fe..94a90e0a3fdb 100644
--- a/drivers/iio/trigger/iio-trig-loop.c
+++ b/drivers/iio/trigger/iio-trig-loop.c
@@ -36,7 +36,7 @@ struct iio_loop_info {
struct task_struct *task;
};
-static struct config_item_type iio_loop_type = {
+static const struct config_item_type iio_loop_type = {
.ct_owner = THIS_MODULE,
};
@@ -74,7 +74,6 @@ static int iio_loop_trigger_set_state(struct iio_trigger *trig, bool state)
static const struct iio_trigger_ops iio_loop_trigger_ops = {
.set_trigger_state = iio_loop_trigger_set_state,
- .owner = THIS_MODULE,
};
static struct iio_sw_trigger *iio_trig_loop_probe(const char *name)
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index 202e8b89caf2..3f0dc9a1a514 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -127,7 +127,6 @@ static const struct attribute_group *iio_sysfs_trigger_attr_groups[] = {
};
static const struct iio_trigger_ops iio_sysfs_trigger_ops = {
- .owner = THIS_MODULE,
};
static int iio_sysfs_trigger_probe(int id)
diff --git a/drivers/iio/trigger/stm32-lptimer-trigger.c b/drivers/iio/trigger/stm32-lptimer-trigger.c
index 241eae6a4306..de361d879929 100644
--- a/drivers/iio/trigger/stm32-lptimer-trigger.c
+++ b/drivers/iio/trigger/stm32-lptimer-trigger.c
@@ -37,7 +37,6 @@ static int stm32_lptim_validate_device(struct iio_trigger *trig,
}
static const struct iio_trigger_ops stm32_lptim_trigger_ops = {
- .owner = THIS_MODULE,
.validate_device = stm32_lptim_validate_device,
};
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index eb212f8c8879..b542dc484969 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -355,7 +355,6 @@ static const struct attribute_group *stm32_trigger_attr_groups[] = {
};
static const struct iio_trigger_ops timer_trigger_ops = {
- .owner = THIS_MODULE,
};
static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv)
@@ -493,7 +492,6 @@ static int stm32_counter_validate_trigger(struct iio_dev *indio_dev,
}
static const struct iio_info stm32_trigger_info = {
- .driver_module = THIS_MODULE,
.validate_trigger = stm32_counter_validate_trigger,
.read_raw = stm32_counter_read_raw,
.write_raw = stm32_counter_write_raw
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 3726205c8704..98ac46ed7214 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -1,6 +1,5 @@
menuconfig INFINIBAND
tristate "InfiniBand support"
- depends on PCI || BROKEN
depends on HAS_IOMEM
depends on NET
depends on INET
@@ -46,6 +45,7 @@ config INFINIBAND_EXP_USER_ACCESS
config INFINIBAND_USER_MEM
bool
depends on INFINIBAND_USER_ACCESS != n
+ depends on MMU
default y
config INFINIBAND_ON_DEMAND_PAGING
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index b4df164f71a6..504b926552c6 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o
user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
@@ -14,7 +15,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
security.o nldev.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
-ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
+ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
ib_cm-y := cm.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 12523f630b61..f4e8185bccd3 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -229,8 +229,9 @@ void rdma_addr_unregister_client(struct rdma_addr_client *client)
}
EXPORT_SYMBOL(rdma_addr_unregister_client);
-int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
- const unsigned char *dst_dev_addr)
+void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev,
+ const unsigned char *dst_dev_addr)
{
dev_addr->dev_type = dev->type;
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
@@ -238,7 +239,6 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
if (dst_dev_addr)
memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
dev_addr->bound_dev_if = dev->ifindex;
- return 0;
}
EXPORT_SYMBOL(rdma_copy_addr);
@@ -247,15 +247,14 @@ int rdma_translate_ip(const struct sockaddr *addr,
u16 *vlan_id)
{
struct net_device *dev;
- int ret = -EADDRNOTAVAIL;
if (dev_addr->bound_dev_if) {
dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!dev)
return -ENODEV;
- ret = rdma_copy_addr(dev_addr, dev, NULL);
+ rdma_copy_addr(dev_addr, dev, NULL);
dev_put(dev);
- return ret;
+ return 0;
}
switch (addr->sa_family) {
@@ -264,9 +263,9 @@ int rdma_translate_ip(const struct sockaddr *addr,
((const struct sockaddr_in *)addr)->sin_addr.s_addr);
if (!dev)
- return ret;
+ return -EADDRNOTAVAIL;
- ret = rdma_copy_addr(dev_addr, dev, NULL);
+ rdma_copy_addr(dev_addr, dev, NULL);
dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
@@ -279,7 +278,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
if (ipv6_chk_addr(dev_addr->net,
&((const struct sockaddr_in6 *)addr)->sin6_addr,
dev, 1)) {
- ret = rdma_copy_addr(dev_addr, dev, NULL);
+ rdma_copy_addr(dev_addr, dev, NULL);
dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
@@ -290,7 +289,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
break;
#endif
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL(rdma_translate_ip);
@@ -336,7 +335,7 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
const void *daddr)
{
struct neighbour *n;
- int ret;
+ int ret = 0;
n = dst_neigh_lookup(dst, daddr);
@@ -346,7 +345,7 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
neigh_event_send(n, NULL);
ret = -ENODATA;
} else {
- ret = rdma_copy_addr(dev_addr, dst->dev, n->ha);
+ rdma_copy_addr(dev_addr, dst->dev, n->ha);
}
rcu_read_unlock();
@@ -494,7 +493,9 @@ static int addr_resolve_neigh(struct dst_entry *dst,
if (!(dst->dev->flags & IFF_NOARP))
return fetch_ha(dst, addr, dst_in, seq);
- return rdma_copy_addr(addr, dst->dev, NULL);
+ rdma_copy_addr(addr, dst->dev, NULL);
+
+ return 0;
}
static int addr_resolve(struct sockaddr *src_in,
@@ -852,7 +853,7 @@ static struct notifier_block nb = {
int addr_init(void)
{
- addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
+ addr_wq = alloc_ordered_workqueue("ib_addr", 0);
if (!addr_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 4c4b46586af2..f6b159d79977 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1472,31 +1472,29 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
sa_path_set_dlid(primary_path,
- htonl(ntohs(req_msg->primary_local_lid)));
+ ntohs(req_msg->primary_local_lid));
sa_path_set_slid(primary_path,
- htonl(ntohs(req_msg->primary_remote_lid)));
+ ntohs(req_msg->primary_remote_lid));
} else {
lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
- sa_path_set_dlid(primary_path, cpu_to_be32(lid));
+ sa_path_set_dlid(primary_path, lid);
lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
- sa_path_set_slid(primary_path, cpu_to_be32(lid));
+ sa_path_set_slid(primary_path, lid);
}
if (!cm_req_has_alt_path(req_msg))
return;
if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
- sa_path_set_dlid(alt_path,
- htonl(ntohs(req_msg->alt_local_lid)));
- sa_path_set_slid(alt_path,
- htonl(ntohs(req_msg->alt_remote_lid)));
+ sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
+ sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
} else {
lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
- sa_path_set_dlid(alt_path, cpu_to_be32(lid));
+ sa_path_set_dlid(alt_path, lid);
lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
- sa_path_set_slid(alt_path, cpu_to_be32(lid));
+ sa_path_set_slid(alt_path, lid);
}
}
@@ -1575,7 +1573,7 @@ static void cm_format_req_event(struct cm_work *work,
param->bth_pkey = cm_get_bth_pkey(work);
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
- if (req_msg->alt_local_lid)
+ if (cm_req_has_alt_path(req_msg))
param->alternate_path = &work->path[1];
else
param->alternate_path = NULL;
@@ -1856,7 +1854,8 @@ static int cm_req_handler(struct cm_work *work)
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
memset(&work->path[0], 0, sizeof(work->path[0]));
- memset(&work->path[1], 0, sizeof(work->path[1]));
+ if (cm_req_has_alt_path(req_msg))
+ memset(&work->path[1], 0, sizeof(work->path[1]));
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
work->port->port_num,
@@ -2810,6 +2809,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
+ /* fall through */
default:
ret = -EINVAL;
goto error1;
@@ -3037,14 +3037,14 @@ static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
u32 lid;
if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
- sa_path_set_dlid(path, htonl(ntohs(lap_msg->alt_local_lid)));
- sa_path_set_slid(path, htonl(ntohs(lap_msg->alt_remote_lid)));
+ sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
+ sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
} else {
lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
- sa_path_set_dlid(path, cpu_to_be32(lid));
+ sa_path_set_dlid(path, lid);
lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
- sa_path_set_slid(path, cpu_to_be32(lid));
+ sa_path_set_slid(path, lid);
}
}
@@ -3817,14 +3817,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct cm_port *port = mad_agent->context;
struct cm_work *work;
enum ib_cm_event_type event;
+ bool alt_path = false;
u16 attr_id;
int paths = 0;
int going_down = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
- paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
- alt_local_lid != 0);
+ alt_path = cm_req_has_alt_path((struct cm_req_msg *)
+ mad_recv_wc->recv_buf.mad);
+ paths = 1 + (alt_path != 0);
event = IB_CM_REQ_RECEIVED;
break;
case CM_MRA_ATTR_ID:
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 852c8fec8088..1fdb473b5df7 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1540,7 +1540,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
return id_priv;
}
-static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
+static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
{
return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
}
@@ -1846,9 +1846,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (net_dev) {
- ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
- if (ret)
- goto err;
+ rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
} else {
if (!cma_protocol_roce(listen_id) &&
cma_any_addr(cma_src_addr(id_priv))) {
@@ -1894,9 +1892,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
goto err;
if (net_dev) {
- ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
- if (ret)
- goto err;
+ rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
} else {
if (!cma_any_addr(cma_src_addr(id_priv))) {
ret = cma_translate_addr(cma_src_addr(id_priv),
@@ -1942,7 +1938,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_id_private *listen_id, *conn_id = NULL;
struct rdma_cm_event event;
struct net_device *net_dev;
- int offset, ret;
+ u8 offset;
+ int ret;
listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
if (IS_ERR(listen_id))
@@ -3440,7 +3437,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
struct ib_cm_sidr_req_param req;
struct ib_cm_id *id;
void *private_data;
- int offset, ret;
+ u8 offset;
+ int ret;
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
@@ -3497,7 +3495,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
struct rdma_route *route;
void *private_data;
struct ib_cm_id *id;
- int offset, ret;
+ u8 offset;
+ int ret;
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 54076a3e8007..31dfee0c8295 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -186,7 +186,7 @@ static struct configfs_attribute *cma_configfs_attributes[] = {
NULL,
};
-static struct config_item_type cma_port_group_type = {
+static const struct config_item_type cma_port_group_type = {
.ct_attrs = cma_configfs_attributes,
.ct_owner = THIS_MODULE
};
@@ -263,7 +263,7 @@ static struct configfs_item_operations cma_ports_item_ops = {
.release = release_cma_ports_group
};
-static struct config_item_type cma_ports_group_type = {
+static const struct config_item_type cma_ports_group_type = {
.ct_item_ops = &cma_ports_item_ops,
.ct_owner = THIS_MODULE
};
@@ -272,7 +272,7 @@ static struct configfs_item_operations cma_device_item_ops = {
.release = release_cma_dev
};
-static struct config_item_type cma_device_group_type = {
+static const struct config_item_type cma_device_group_type = {
.ct_item_ops = &cma_device_item_ops,
.ct_owner = THIS_MODULE
};
@@ -323,7 +323,7 @@ static struct configfs_group_operations cma_subsys_group_ops = {
.make_group = make_cma_dev,
};
-static struct config_item_type cma_subsys_type = {
+static const struct config_item_type cma_subsys_type = {
.ct_group_ops = &cma_subsys_group_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index fcf42f6bb82a..e9e189ec7502 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -447,9 +447,6 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
*/
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
{
- struct iwcm_id_private *cm_id_priv;
-
- cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
destroy_cm_id(cm_id);
}
EXPORT_SYMBOL(iw_destroy_cm_id);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index f8f53bb90837..cb91245e9163 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1974,14 +1974,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
unsigned long flags;
int ret;
+ INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
ret = ib_mad_enforce_security(mad_agent_priv,
mad_recv_wc->wc->pkey_index);
if (ret) {
ib_free_recv_mad(mad_recv_wc);
deref_mad_agent(mad_agent_priv);
+ return;
}
- INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 3ba24c428c3b..2fae850a3eff 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -214,7 +214,9 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
nldev_policy, extack);
- if (err || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ if (err ||
+ !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 6ca607e8e293..c8963e91f92a 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -384,21 +384,17 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
count += ret;
prev_wr = &ctx->sig->data.reg_wr.wr;
- if (prot_sg_cnt) {
- ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
- prot_sg, prot_sg_cnt, 0);
- if (ret < 0)
- goto out_destroy_data_mr;
- count += ret;
+ ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
+ prot_sg, prot_sg_cnt, 0);
+ if (ret < 0)
+ goto out_destroy_data_mr;
+ count += ret;
- if (ctx->sig->prot.inv_wr.next)
- prev_wr->next = &ctx->sig->prot.inv_wr;
- else
- prev_wr->next = &ctx->sig->prot.reg_wr.wr;
- prev_wr = &ctx->sig->prot.reg_wr.wr;
- } else {
- ctx->sig->prot.mr = NULL;
- }
+ if (ctx->sig->prot.inv_wr.next)
+ prev_wr->next = &ctx->sig->prot.inv_wr;
+ else
+ prev_wr->next = &ctx->sig->prot.reg_wr.wr;
+ prev_wr = &ctx->sig->prot.reg_wr.wr;
ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs);
if (!ctx->sig->sig_mr) {
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 88bdafb297f5..23278ed5be45 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey,
if (ret)
return ret;
- if (qp_sec->qp == qp_sec->qp->real_qp) {
- list_for_each_entry(shared_qp_sec,
- &qp_sec->shared_qp_list,
- shared_qp_list) {
- ret = security_ib_pkey_access(shared_qp_sec->security,
- subnet_prefix,
- pkey);
- if (ret)
- return ret;
- }
+ list_for_each_entry(shared_qp_sec,
+ &qp_sec->shared_qp_list,
+ shared_qp_list) {
+ ret = security_ib_pkey_access(shared_qp_sec->security,
+ subnet_prefix,
+ pkey);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -560,15 +558,22 @@ int ib_security_modify_qp(struct ib_qp *qp,
int ret = 0;
struct ib_ports_pkeys *tmp_pps;
struct ib_ports_pkeys *new_pps;
- bool special_qp = (qp->qp_type == IB_QPT_SMI ||
- qp->qp_type == IB_QPT_GSI ||
- qp->qp_type >= IB_QPT_RESERVED1);
+ struct ib_qp *real_qp = qp->real_qp;
+ bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
+ real_qp->qp_type == IB_QPT_GSI ||
+ real_qp->qp_type >= IB_QPT_RESERVED1);
bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
(qp_attr_mask & IB_QP_ALT_PATH));
+ /* The port/pkey settings are maintained only for the real QP. Open
+ * handles on the real QP will be in the shared_qp_list. When
+ * enforcing security on the real QP all the shared QPs will be
+ * checked as well.
+ */
+
if (pps_change && !special_qp) {
- mutex_lock(&qp->qp_sec->mutex);
- new_pps = get_new_pps(qp,
+ mutex_lock(&real_qp->qp_sec->mutex);
+ new_pps = get_new_pps(real_qp,
qp_attr,
qp_attr_mask);
@@ -586,14 +591,14 @@ int ib_security_modify_qp(struct ib_qp *qp,
if (!ret)
ret = check_qp_port_pkey_settings(new_pps,
- qp->qp_sec);
+ real_qp->qp_sec);
}
if (!ret)
- ret = qp->device->modify_qp(qp->real_qp,
- qp_attr,
- qp_attr_mask,
- udata);
+ ret = real_qp->device->modify_qp(real_qp,
+ qp_attr,
+ qp_attr_mask,
+ udata);
if (pps_change && !special_qp) {
/* Clean up the lists and free the appropriate
@@ -602,8 +607,8 @@ int ib_security_modify_qp(struct ib_qp *qp,
if (ret) {
tmp_pps = new_pps;
} else {
- tmp_pps = qp->qp_sec->ports_pkeys;
- qp->qp_sec->ports_pkeys = new_pps;
+ tmp_pps = real_qp->qp_sec->ports_pkeys;
+ real_qp->qp_sec->ports_pkeys = new_pps;
}
if (tmp_pps) {
@@ -611,7 +616,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
port_pkey_list_remove(&tmp_pps->alt);
}
kfree(tmp_pps);
- mutex_unlock(&qp->qp_sec->mutex);
+ mutex_unlock(&real_qp->qp_sec->mutex);
}
return ret;
}
@@ -692,20 +697,13 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
{
- int ret;
-
if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
return -EACCES;
- ret = ib_security_pkey_access(map->agent.device,
- map->agent.port_num,
- pkey_index,
- map->agent.security);
-
- if (ret)
- return ret;
-
- return 0;
+ return ib_security_pkey_access(map->agent.device,
+ map->agent.port_num,
+ pkey_index,
+ map->agent.security);
}
#endif /* CONFIG_SECURITY_INFINIBAND */
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index abc5ab581f82..e30d86fa1855 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -108,8 +108,22 @@ static ssize_t port_attr_show(struct kobject *kobj,
return port_attr->show(p, port_attr, buf);
}
+static ssize_t port_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct port_attribute *port_attr =
+ container_of(attr, struct port_attribute, attr);
+ struct ib_port *p = container_of(kobj, struct ib_port, kobj);
+
+ if (!port_attr->store)
+ return -EIO;
+ return port_attr->store(p, port_attr, buf, count);
+}
+
static const struct sysfs_ops port_sysfs_ops = {
- .show = port_attr_show
+ .show = port_attr_show,
+ .store = port_attr_store
};
static ssize_t gid_attr_show(struct kobject *kobj,
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 55e8f5ed8b3c..2aadf5813a40 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -39,11 +39,44 @@
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
+#include <linux/interval_tree_generic.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
+/*
+ * The ib_umem list keeps track of memory regions for which the HW
+ * device request to receive notification when the related memory
+ * mapping is changed.
+ *
+ * ib_umem_lock protects the list.
+ */
+
+static u64 node_start(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_start(umem_odp->umem);
+}
+
+/* Note that the representation of the intervals in the interval tree
+ * considers the ending point as contained in the interval, while the
+ * function ib_umem_end returns the first address which is not contained
+ * in the umem.
+ */
+static u64 node_last(struct umem_odp_node *n)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(n, struct ib_umem_odp, interval_tree);
+
+ return ib_umem_end(umem_odp->umem) - 1;
+}
+
+INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
+ node_start, node_last, static, rbt_ib_umem)
+
static void ib_umem_notifier_start_account(struct ib_umem *item)
{
mutex_lock(&item->odp_data->umem_mutex);
@@ -754,3 +787,42 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
mutex_unlock(&umem->odp_data->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
+
+/* @last is not a part of the interval. See comment for function
+ * node_last.
+ */
+int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
+ u64 start, u64 last,
+ umem_call_back cb,
+ void *cookie)
+{
+ int ret_val = 0;
+ struct umem_odp_node *node, *next;
+ struct ib_umem_odp *umem;
+
+ if (unlikely(start == last))
+ return ret_val;
+
+ for (node = rbt_ib_umem_iter_first(root, start, last - 1);
+ node; node = next) {
+ next = rbt_ib_umem_iter_next(node, start, last - 1);
+ umem = container_of(node, struct ib_umem_odp, interval_tree);
+ ret_val = cb(umem->umem, start, last, cookie) || ret_val;
+ }
+
+ return ret_val;
+}
+EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
+
+struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
+ u64 addr, u64 length)
+{
+ struct umem_odp_node *node;
+
+ node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
+ if (node)
+ return container_of(node, struct ib_umem_odp, interval_tree);
+ return NULL;
+
+}
+EXPORT_SYMBOL(rbt_ib_umem_lookup);
diff --git a/drivers/infiniband/core/umem_rbtree.c b/drivers/infiniband/core/umem_rbtree.c
deleted file mode 100644
index fc801920e341..000000000000
--- a/drivers/infiniband/core/umem_rbtree.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interval_tree_generic.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include <rdma/ib_umem_odp.h>
-
-/*
- * The ib_umem list keeps track of memory regions for which the HW
- * device request to receive notification when the related memory
- * mapping is changed.
- *
- * ib_umem_lock protects the list.
- */
-
-static inline u64 node_start(struct umem_odp_node *n)
-{
- struct ib_umem_odp *umem_odp =
- container_of(n, struct ib_umem_odp, interval_tree);
-
- return ib_umem_start(umem_odp->umem);
-}
-
-/* Note that the representation of the intervals in the interval tree
- * considers the ending point as contained in the interval, while the
- * function ib_umem_end returns the first address which is not contained
- * in the umem.
- */
-static inline u64 node_last(struct umem_odp_node *n)
-{
- struct ib_umem_odp *umem_odp =
- container_of(n, struct ib_umem_odp, interval_tree);
-
- return ib_umem_end(umem_odp->umem) - 1;
-}
-
-INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
- node_start, node_last, , rbt_ib_umem)
-
-/* @last is not a part of the interval. See comment for function
- * node_last.
- */
-int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
- u64 start, u64 last,
- umem_call_back cb,
- void *cookie)
-{
- int ret_val = 0;
- struct umem_odp_node *node, *next;
- struct ib_umem_odp *umem;
-
- if (unlikely(start == last))
- return ret_val;
-
- for (node = rbt_ib_umem_iter_first(root, start, last - 1);
- node; node = next) {
- next = rbt_ib_umem_iter_next(node, start, last - 1);
- umem = container_of(node, struct ib_umem_odp, interval_tree);
- ret_val = cb(umem->umem, start, last, cookie) || ret_val;
- }
-
- return ret_val;
-}
-EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
-
-struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
- u64 addr, u64 length)
-{
- struct umem_odp_node *node;
-
- node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
- if (node)
- return container_of(node, struct ib_umem_odp, interval_tree);
- return NULL;
-
-}
-EXPORT_SYMBOL(rbt_ib_umem_lookup);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c1696e6084b2..4b64dd02e090 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent,
packet->mad.hdr.status = 0;
packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
- packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
+ /*
+ * On OPA devices it is okay to lose the upper 16 bits of LID as this
+ * information is obtained elsewhere. Mask off the upper 16 bits.
+ */
+ if (agent->device->port_immutable[agent->port_num].core_cap_flags &
+ RDMA_CORE_PORT_INTEL_OPA)
+ packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
+ mad_recv_wc->wc->slid);
+ else
+ packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
packet->mad.hdr.sl = mad_recv_wc->wc->sl;
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
@@ -506,7 +515,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid);
}
- ah = rdma_create_ah(agent->qp->pd, &ah_attr);
+ ah = rdma_create_user_ah(agent->qp->pd, &ah_attr, NULL);
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
goto err_up;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 37c8903e7fd0..deccefb71a6b 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -47,21 +47,28 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
-#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
- do { \
- (udata)->inbuf = (const void __user *) (ibuf); \
- (udata)->outbuf = (void __user *) (obuf); \
- (udata)->inlen = (ilen); \
- (udata)->outlen = (olen); \
- } while (0)
+static inline void
+ib_uverbs_init_udata(struct ib_udata *udata,
+ const void __user *ibuf,
+ void __user *obuf,
+ size_t ilen, size_t olen)
+{
+ udata->inbuf = ibuf;
+ udata->outbuf = obuf;
+ udata->inlen = ilen;
+ udata->outlen = olen;
+}
-#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
- do { \
- (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
- (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
- (udata)->inlen = (ilen); \
- (udata)->outlen = (olen); \
- } while (0)
+static inline void
+ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
+ const void __user *ibuf,
+ void __user *obuf,
+ size_t ilen, size_t olen)
+{
+ ib_uverbs_init_udata(udata,
+ ilen ? ibuf : NULL, olen ? obuf : NULL,
+ ilen, olen);
+}
/*
* Our lifetime rules for these structs are the following:
@@ -299,5 +306,6 @@ IB_UVERBS_DECLARE_EX_CMD(destroy_wq);
IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table);
IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table);
IB_UVERBS_DECLARE_EX_CMD(modify_qp);
+IB_UVERBS_DECLARE_EX_CMD(modify_cq);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 52a2cf2d83aa..16d55710b116 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -91,8 +91,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err;
}
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -141,8 +141,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err_fd;
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_file;
}
@@ -238,8 +237,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp);
copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -295,8 +293,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp.link_layer = rdma_port_get_link_layer(ib_dev,
cmd.port_num);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -320,8 +317,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -344,8 +341,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_copy;
}
@@ -490,8 +486,8 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -556,8 +552,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
atomic_inc(&xrcd->usecnt);
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_copy;
}
@@ -655,8 +650,8 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -705,8 +700,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
resp.rkey = mr->rkey;
resp.mr_handle = uobj->id;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_copy;
}
@@ -748,8 +742,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -800,8 +794,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
resp.lkey = mr->lkey;
resp.rkey = mr->rkey;
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp)))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
ret = -EFAULT;
else
ret = in_len;
@@ -867,8 +860,8 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
goto err_free;
}
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long)cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -889,8 +882,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
resp.rkey = mw->rkey;
resp.mw_handle = uobj->id;
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp))) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
ret = -EFAULT;
goto err_copy;
}
@@ -956,8 +948,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
uobj_file.uobj);
ib_uverbs_init_event_queue(&ev_file->ev_queue);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
uobj_alloc_abort(uobj);
return -EFAULT;
}
@@ -1087,10 +1078,11 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
+ ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
+ sizeof(cmd), sizeof(resp));
- INIT_UDATA(&uhw, buf + sizeof(cmd),
- (unsigned long)cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -1173,8 +1165,8 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -1188,8 +1180,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
resp.cqe = cq->cqe;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp.cqe))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
ret = -EFAULT;
out:
@@ -1249,7 +1240,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
return -EINVAL;
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
- header_ptr = (void __user *)(unsigned long) cmd.response;
+ header_ptr = u64_to_user_ptr(cmd.response);
data_ptr = header_ptr + sizeof resp;
memset(&resp, 0, sizeof resp);
@@ -1343,8 +1334,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
resp.async_events_reported = obj->async_events_reported;
uverbs_uobject_put(uobj);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -1501,7 +1491,8 @@ static int create_qp(struct ib_uverbs_file *file,
IB_QP_CREATE_MANAGED_RECV |
IB_QP_CREATE_SCATTER_FCS |
IB_QP_CREATE_CVLAN_STRIPPING |
- IB_QP_CREATE_SOURCE_QPN)) {
+ IB_QP_CREATE_SOURCE_QPN |
+ IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
ret = -EINVAL;
goto err_put;
}
@@ -1650,10 +1641,10 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
- INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
- resp_size);
- INIT_UDATA(&uhw, buf + sizeof(cmd),
- (unsigned long)cmd.response + resp_size,
+ ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
+ sizeof(cmd), resp_size);
+ ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + resp_size,
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - resp_size);
@@ -1750,8 +1741,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -1795,8 +1786,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
resp.qpn = qp->qp_num;
resp.qp_handle = obj->uevent.uobject.id;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_destroy;
}
@@ -1911,8 +1901,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
resp.max_inline_data = init_attr->cap.max_inline_data;
resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
ret = -EFAULT;
out:
@@ -2042,7 +2031,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
return -EOPNOTSUPP;
- INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
out_len);
@@ -2126,8 +2115,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
resp.events_reported = obj->uevent.events_reported;
uverbs_uobject_put(uobj);
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -2311,8 +2299,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
break;
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
ret = -EFAULT;
out_put:
@@ -2460,8 +2447,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
}
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
ret = -EFAULT;
out:
@@ -2510,8 +2496,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
break;
}
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
ret = -EFAULT;
out:
@@ -2537,7 +2522,6 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
struct rdma_ah_attr attr;
int ret;
struct ib_udata udata;
- u8 *dmac;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -2548,8 +2532,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
return -EINVAL;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long)cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -2580,28 +2564,20 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
} else {
rdma_ah_set_ah_flags(&attr, 0);
}
- dmac = rdma_ah_retrieve_dmac(&attr);
- if (dmac)
- memset(dmac, 0, ETH_ALEN);
-
- ah = pd->device->create_ah(pd, &attr, &udata);
+ ah = rdma_create_user_ah(pd, &attr, &udata);
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
goto err_put;
}
- ah->device = pd->device;
- ah->pd = pd;
- atomic_inc(&pd->usecnt);
ah->uobject = uobj;
uobj->user_handle = cmd.user_handle;
uobj->object = ah;
resp.ah_handle = uobj->id;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp)) {
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
goto err_copy;
}
@@ -3627,8 +3603,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
xcmd.max_sge = cmd.max_sge;
xcmd.srq_limit = cmd.srq_limit;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -3654,8 +3630,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof(cmd),
- (unsigned long) cmd.response + sizeof(resp),
+ ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
+ u64_to_user_ptr(cmd.response) + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
@@ -3680,7 +3656,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
+ ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
@@ -3731,8 +3707,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
resp.max_sge = attr.max_sge;
resp.srq_limit = attr.srq_limit;
- if (copy_to_user((void __user *) (unsigned long) cmd.response,
- &resp, sizeof resp))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
return -EFAULT;
return in_len;
@@ -3773,8 +3748,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
}
resp.events_reported = obj->events_reported;
uverbs_uobject_put(uobj);
- if (copy_to_user((void __user *)(unsigned long)cmd.response,
- &resp, sizeof(resp)))
+ if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
return -EFAULT;
return in_len;
@@ -3878,7 +3852,58 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp.tm_caps.max_sge = attr.tm_caps.max_sge;
resp.tm_caps.flags = attr.tm_caps.flags;
resp.response_length += sizeof(resp.tm_caps);
+
+ if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps))
+ goto end;
+
+ resp.cq_moderation_caps.max_cq_moderation_count =
+ attr.cq_caps.max_cq_moderation_count;
+ resp.cq_moderation_caps.max_cq_moderation_period =
+ attr.cq_caps.max_cq_moderation_period;
+ resp.response_length += sizeof(resp.cq_moderation_caps);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;
}
+
+int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
+ struct ib_device *ib_dev,
+ struct ib_udata *ucore,
+ struct ib_udata *uhw)
+{
+ struct ib_uverbs_ex_modify_cq cmd = {};
+ struct ib_cq *cq;
+ size_t required_cmd_sz;
+ int ret;
+
+ required_cmd_sz = offsetof(typeof(cmd), reserved) +
+ sizeof(cmd.reserved);
+ if (ucore->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ /* sanity checks */
+ if (ucore->inlen > sizeof(cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
+ ucore->inlen - sizeof(cmd)))
+ return -EOPNOTSUPP;
+
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+ if (ret)
+ return ret;
+
+ if (!cmd.attr_mask || cmd.reserved)
+ return -EINVAL;
+
+ if (cmd.attr_mask > IB_CQ_MODERATE)
+ return -EOPNOTSUPP;
+
+ cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
+ if (!cq)
+ return -EINVAL;
+
+ ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
+
+ uobj_put_obj_read(cq);
+
+ return ret;
+}
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 5286ad57d903..71ff2644e053 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -241,9 +241,7 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
struct uverbs_attr *curr_attr;
unsigned long *curr_bitmap;
size_t ctx_size;
-#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
-#endif
if (hdr->reserved)
return -EINVAL;
@@ -269,13 +267,10 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
(method_spec->num_child_attrs / BITS_PER_LONG +
method_spec->num_buckets);
-#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
if (ctx_size <= UVERBS_OPTIMIZE_USING_STACK_SZ)
ctx = (void *)data;
-
if (!ctx)
-#endif
- ctx = kmalloc(ctx_size, GFP_KERNEL);
+ ctx = kmalloc(ctx_size, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -311,10 +306,8 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
file, method_spec, ctx->uverbs_attr_bundle);
out:
-#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
- if (ctx_size > UVERBS_OPTIMIZE_USING_STACK_SZ)
-#endif
- kfree(ctx);
+ if (ctx != (void *)data)
+ kfree(ctx);
return err;
}
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c
index 76ddb6564578..062485f9300d 100644
--- a/drivers/infiniband/core/uverbs_ioctl_merge.c
+++ b/drivers/infiniband/core/uverbs_ioctl_merge.c
@@ -376,7 +376,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
min_id) ||
WARN(attr_obj_with_special_access &&
!(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY),
- "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy aceess but isn't mandatory\n",
+ "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
min_id) ||
WARN(IS_ATTR_OBJECT(attr) &&
attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index dc2aed6fb21b..381fd9c096ae 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -128,6 +128,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
[IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
[IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp,
+ [IB_USER_VERBS_EX_CMD_MODIFY_CQ] = ib_uverbs_ex_modify_cq,
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -763,7 +764,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
}
if (!access_ok(VERIFY_WRITE,
- (void __user *) (unsigned long) ex_hdr.response,
+ u64_to_user_ptr(ex_hdr.response),
(hdr.out_words + ex_hdr.provider_out_words) * 8)) {
ret = -EFAULT;
goto out;
@@ -775,19 +776,17 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
}
}
- INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
- hdr.in_words * 8, hdr.out_words * 8);
+ ib_uverbs_init_udata_buf_or_null(&ucore, buf,
+ u64_to_user_ptr(ex_hdr.response),
+ hdr.in_words * 8, hdr.out_words * 8);
- INIT_UDATA_BUF_OR_NULL(&uhw,
- buf + ucore.inlen,
- (unsigned long) ex_hdr.response + ucore.outlen,
- ex_hdr.provider_in_words * 8,
- ex_hdr.provider_out_words * 8);
+ ib_uverbs_init_udata_buf_or_null(&uhw,
+ buf + ucore.inlen,
+ u64_to_user_ptr(ex_hdr.response) + ucore.outlen,
+ ex_hdr.provider_in_words * 8,
+ ex_hdr.provider_out_words * 8);
- ret = uverbs_ex_cmd_table[command](file,
- ib_dev,
- &ucore,
- &uhw);
+ ret = uverbs_ex_cmd_table[command](file, ib_dev, &ucore, &uhw);
if (!ret)
ret = written_count;
} else {
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index bd0acf376af0..bb372b4713a4 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -69,8 +69,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
- (rdma_ah_get_dlid(ah_attr) >=
- be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&
(!rdma_ah_conv_opa_to_ib(device, &conv_ah, ah_attr)))
src = &conv_ah;
@@ -176,18 +175,18 @@ EXPORT_SYMBOL(ib_copy_path_rec_to_user);
void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
struct ib_user_path_rec *src)
{
- __be32 slid, dlid;
+ u32 slid, dlid;
memset(dst, 0, sizeof(*dst));
if ((ib_is_opa_gid((union ib_gid *)src->sgid)) ||
(ib_is_opa_gid((union ib_gid *)src->dgid))) {
dst->rec_type = SA_PATH_REC_TYPE_OPA;
- slid = htonl(opa_get_lid_from_gid((union ib_gid *)src->sgid));
- dlid = htonl(opa_get_lid_from_gid((union ib_gid *)src->dgid));
+ slid = opa_get_lid_from_gid((union ib_gid *)src->sgid);
+ dlid = opa_get_lid_from_gid((union ib_gid *)src->dgid);
} else {
dst->rec_type = SA_PATH_REC_TYPE_IB;
- slid = htonl(ntohs(src->slid));
- dlid = htonl(ntohs(src->dlid));
+ slid = ntohs(src->slid);
+ dlid = ntohs(src->dlid);
}
memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid);
memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 0a98579700ec..c3ee5d9b336d 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -227,26 +227,26 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
* to use uverbs_attr_bundle instead of ib_udata.
* Assume attr == 0 is input and attr == 1 is output.
*/
- void __user *inbuf;
- size_t inbuf_len = 0;
- void __user *outbuf;
- size_t outbuf_len = 0;
const struct uverbs_attr *uhw_in =
uverbs_attr_get(ctx, UVERBS_UHW_IN);
const struct uverbs_attr *uhw_out =
uverbs_attr_get(ctx, UVERBS_UHW_OUT);
if (!IS_ERR(uhw_in)) {
- inbuf = uhw_in->ptr_attr.ptr;
- inbuf_len = uhw_in->ptr_attr.len;
+ udata->inbuf = uhw_in->ptr_attr.ptr;
+ udata->inlen = uhw_in->ptr_attr.len;
+ } else {
+ udata->inbuf = NULL;
+ udata->inlen = 0;
}
if (!IS_ERR(uhw_out)) {
- outbuf = uhw_out->ptr_attr.ptr;
- outbuf_len = uhw_out->ptr_attr.len;
+ udata->outbuf = uhw_out->ptr_attr.ptr;
+ udata->outlen = uhw_out->ptr_attr.len;
+ } else {
+ udata->outbuf = NULL;
+ udata->outlen = 0;
}
-
- INIT_UDATA_BUF_OR_NULL(udata, inbuf, outbuf, inbuf_len, outbuf_len);
}
static int uverbs_create_cq_handler(struct ib_device *ib_dev,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index de57d6c11a25..3fb8fb6cc824 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -53,6 +53,9 @@
#include "core_priv.h"
+static int ib_resolve_eth_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr);
+
static const char * const ib_events[] = {
[IB_EVENT_CQ_ERR] = "CQ error",
[IB_EVENT_QP_FATAL] = "QP fatal error",
@@ -302,11 +305,13 @@ EXPORT_SYMBOL(ib_dealloc_pd);
/* Address handles */
-struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
+static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_udata *udata)
{
struct ib_ah *ah;
- ah = pd->device->create_ah(pd, ah_attr, NULL);
+ ah = pd->device->create_ah(pd, ah_attr, udata);
if (!IS_ERR(ah)) {
ah->device = pd->device;
@@ -318,8 +323,42 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
return ah;
}
+
+struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
+{
+ return _rdma_create_ah(pd, ah_attr, NULL);
+}
EXPORT_SYMBOL(rdma_create_ah);
+/**
+ * rdma_create_user_ah - Creates an address handle for the
+ * given address vector.
+ * It resolves destination mac address for ah attribute of RoCE type.
+ * @pd: The protection domain associated with the address handle.
+ * @ah_attr: The attributes of the address vector.
+ * @udata: pointer to user's input output buffer information need by
+ * provider driver.
+ *
+ * It returns 0 on success and returns appropriate error code on error.
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
+struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_udata *udata)
+{
+ int err;
+
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ err = ib_resolve_eth_dmac(pd->device, ah_attr);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ return _rdma_create_ah(pd, ah_attr, udata);
+}
+EXPORT_SYMBOL(rdma_create_user_ah);
+
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
{
const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
@@ -1221,8 +1260,8 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
}
EXPORT_SYMBOL(ib_modify_qp_is_ok);
-int ib_resolve_eth_dmac(struct ib_device *device,
- struct rdma_ah_attr *ah_attr)
+static int ib_resolve_eth_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
{
int ret = 0;
struct ib_global_route *grh;
@@ -1281,7 +1320,6 @@ int ib_resolve_eth_dmac(struct ib_device *device,
out:
return ret;
}
-EXPORT_SYMBOL(ib_resolve_eth_dmac);
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
@@ -1512,12 +1550,12 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
}
EXPORT_SYMBOL(ib_create_cq);
-int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
return cq->device->modify_cq ?
cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
}
-EXPORT_SYMBOL(ib_modify_cq);
+EXPORT_SYMBOL(rdma_set_cq_moderation);
int ib_destroy_cq(struct ib_cq *cq)
{
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 34c93abf0fe0..e4f31c1be8f7 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
diff --git a/drivers/infiniband/hw/bnxt_re/Makefile b/drivers/infiniband/hw/bnxt_re/Makefile
index afbaa0e20670..6e3bc25cc140 100644
--- a/drivers/infiniband/hw/bnxt_re/Makefile
+++ b/drivers/infiniband/hw/bnxt_re/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/net/ethernet/broadcom/bnxt
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re.o
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 0d89621d9fe8..2032db7db766 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -394,6 +394,7 @@ int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
ctx->idx = tbl_idx;
ctx->refcnt = 1;
ctx_tbl[tbl_idx] = ctx;
+ *context = ctx;
return rc;
}
@@ -665,7 +666,6 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
struct bnxt_re_ah *ah;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
int rc;
- u16 vlan_tag;
u8 nw_type;
struct ib_gid_attr sgid_attr;
@@ -711,11 +711,8 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
grh->sgid_index);
goto fail;
}
- if (sgid_attr.ndev) {
- if (is_vlan_dev(sgid_attr.ndev))
- vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
+ if (sgid_attr.ndev)
dev_put(sgid_attr.ndev);
- }
/* Get network header type for this GID */
nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
switch (nw_type) {
@@ -729,14 +726,6 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
break;
}
- rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
- ah_attr->roce.dmac, &vlan_tag,
- &sgid_attr.ndev->ifindex,
- NULL);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
- goto fail;
- }
}
memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
@@ -796,6 +785,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
struct bnxt_re_dev *rdev = qp->rdev;
int rc;
+ bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
@@ -1643,7 +1633,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
u8 ip_version = 0;
u16 vlan_id = 0xFFFF;
void *buf;
- int i, rc = 0, size;
+ int i, rc = 0;
memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
@@ -1760,7 +1750,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
/* Pack the QP1 to the transmit buffer */
buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
if (buf) {
- size = ib_ud_header_pack(&qp->qp1_hdr, buf);
+ ib_ud_header_pack(&qp->qp1_hdr, buf);
for (i = wqe->num_sge; i; i--) {
wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
@@ -2216,7 +2206,7 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
struct ib_recv_wr *wr)
{
struct bnxt_qplib_swqe wqe;
- int rc = 0, payload_sz = 0;
+ int rc = 0;
memset(&wqe, 0, sizeof(wqe));
while (wr) {
@@ -2231,8 +2221,7 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
rc = -EINVAL;
break;
}
- payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
- wr->num_sge);
+ bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
wqe.wr_id = wr->wr_id;
wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
@@ -2569,7 +2558,7 @@ static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
u16 raweth_qp1_flags2)
{
- bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
+ bool is_ipv6 = false, is_ipv4 = false;
/* raweth_qp1_flags Bit 9-6 indicates itype */
if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
@@ -2580,7 +2569,6 @@ static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
raweth_qp1_flags2 &
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
- is_udp = true;
/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
(raweth_qp1_flags2 &
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
@@ -2781,6 +2769,32 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
wc->wc_flags |= IB_WC_GRH;
}
+static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
+ u16 *vid, u8 *sl)
+{
+ bool ret = false;
+ u32 metadata;
+ u16 tpid;
+
+ metadata = orig_cqe->raweth_qp1_metadata;
+ if (orig_cqe->raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
+ tpid = ((metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
+ if (tpid == ETH_P_8021Q) {
+ *vid = metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
+ *sl = (metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
@@ -2800,12 +2814,14 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
- u32 tbl_idx;
struct bnxt_re_dev *rdev = qp->rdev;
struct bnxt_re_qp *qp1_qp = NULL;
struct bnxt_qplib_cqe *orig_cqe = NULL;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
int nw_type;
+ u32 tbl_idx;
+ u16 vlan_id;
+ u8 sl;
tbl_idx = cqe->wr_id;
@@ -2820,6 +2836,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
wc->ex.imm_data = orig_cqe->immdata;
wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
+ if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
+ wc->vlan_id = vlan_id;
+ wc->sl = sl;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
wc->port_num = 1;
wc->vendor_err = orig_cqe->status;
@@ -3008,8 +3029,10 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
enum ib_cq_notify_flags ib_cqn_flags)
{
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- int type = 0;
+ int type = 0, rc = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&cq->cq_lock, flags);
/* Trigger on the very next completion */
if (ib_cqn_flags & IB_CQ_NEXT_COMP)
type = DBR_DBR_TYPE_CQ_ARMALL;
@@ -3019,12 +3042,15 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
/* Poll to see if there are missed events */
if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
- !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
- return 1;
-
+ !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
+ rc = 1;
+ goto exit;
+ }
bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
- return 0;
+exit:
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return rc;
}
/* Memory Regions */
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index e7450ea92aa9..aafc19aa5de1 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -78,6 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
/* Mutex to protect the list of bnxt_re devices added */
static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
/* for handling bnxt_en callbacks later */
static void bnxt_re_stop(void *p)
@@ -92,11 +93,22 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
{
}
+static void bnxt_re_shutdown(void *p)
+{
+ struct bnxt_re_dev *rdev = p;
+
+ if (!rdev)
+ return;
+
+ bnxt_re_ib_unreg(rdev, false);
+}
+
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
.ulp_async_notifier = NULL,
.ulp_stop = bnxt_re_stop,
.ulp_start = bnxt_re_start,
- .ulp_sriov_config = bnxt_re_sriov_config
+ .ulp_sriov_config = bnxt_re_sriov_config,
+ .ulp_shutdown = bnxt_re_shutdown
};
/* RoCE -> Net driver */
@@ -1071,9 +1083,10 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
*/
rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
BNXT_RE_MAX_QPC_COUNT);
- if (rc)
+ if (rc) {
+ pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
goto fail;
-
+ }
rc = bnxt_re_net_ring_alloc
(rdev, rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr,
rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index e8afc47f8949..61764f7aa79b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -160,11 +160,6 @@ void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
{
- struct bnxt_qplib_cq *scq, *rcq;
-
- scq = qp->scq;
- rcq = qp->rcq;
-
if (qp->sq.flushed) {
qp->sq.flushed = false;
list_del(&qp->sq_flush);
@@ -297,6 +292,12 @@ static void bnxt_qplib_service_nq(unsigned long data)
if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
break;
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
switch (type) {
case NQ_BASE_TYPE_CQ_NOTIFICATION:
@@ -1118,6 +1119,11 @@ static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
continue;
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
case CQ_BASE_CQE_TYPE_REQ:
case CQ_BASE_CQE_TYPE_TERMINAL:
@@ -1360,7 +1366,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
break;
}
- /* else, just fall thru */
+ /* fall thru */
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
{
@@ -1901,6 +1907,11 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
/* If the next hwcqe is VALID */
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
cq->hwq.max_elements)) {
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
/* If the next hwcqe is a REQ */
if ((peek_hwcqe->cqe_type_toggle &
CQ_BASE_CQE_TYPE_MASK) ==
@@ -2107,6 +2118,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq);
__bnxt_qplib_add_flush_qp(qp);
@@ -2170,6 +2182,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq);
__bnxt_qplib_add_flush_qp(qp);
@@ -2241,6 +2254,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
+ cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
rq = &qp->rq;
if (wr_id_idx > rq->hwq.max_elements) {
@@ -2257,6 +2271,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
/* Add qp to flush list of the CQ */
bnxt_qplib_lock_buddy_cq(qp, cq);
__bnxt_qplib_add_flush_qp(qp);
@@ -2445,6 +2460,11 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
break;
+ /*
+ * The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
/* From the device's respective CQE format to qplib_wc*/
switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
case CQ_BASE_CQE_TYPE_REQ:
@@ -2518,3 +2538,10 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
atomic_set(&cq->arm_state, 1);
spin_unlock_irqrestore(&cq->hwq.lock, flags);
}
+
+void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
+{
+ flush_workqueue(qp->scq->nq->cqn_wq);
+ if (qp->scq != qp->rcq)
+ flush_workqueue(qp->rcq->nq->cqn_wq);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 8ead70ca1c1d..c582d4ec8173 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -478,4 +478,5 @@ void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe *cqe,
int num_cqes);
+void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 2bdb1562bd21..bb5574adf195 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -88,7 +88,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
unsigned long flags;
u32 size, opcode;
u16 cookie, cbit;
- int pg, idx;
u8 *preq;
opcode = req->opcode;
@@ -149,9 +148,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
preq = (u8 *)req;
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
do {
- pg = 0;
- idx = 0;
-
/* Locate the next cmdq slot */
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
@@ -172,14 +168,14 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
rcfw->seq_num++;
cmdq_prod = cmdq->prod;
- if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
+ if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
/* The very first doorbell write
* is required to set this flag
* which prompts the FW to reset
* its internal pointers
*/
- cmdq_prod |= FIRMWARE_FIRST_FLAG;
- rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
+ cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
+ clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
}
/* ring CMDQ DB */
@@ -306,6 +302,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
"QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
+ if (!qp)
+ break;
bnxt_qplib_acquire_cq_locks(qp, &flags);
bnxt_qplib_mark_qp_error(qp);
bnxt_qplib_release_cq_locks(qp, &flags);
@@ -361,6 +359,10 @@ static void bnxt_qplib_service_creq(unsigned long data)
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
type = creqe->type & CREQ_BASE_TYPE_MASK;
switch (type) {
@@ -622,7 +624,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
/* General */
rcfw->seq_num = 0;
- rcfw->flags = FIRMWARE_FIRST_FLAG;
+ set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
sizeof(unsigned long));
rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 85b16da287f9..2946a7cfae82 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -162,9 +162,9 @@ struct bnxt_qplib_rcfw {
unsigned long *cmdq_bitmap;
u32 bmap_size;
unsigned long flags;
-#define FIRMWARE_INITIALIZED_FLAG BIT(0)
-#define FIRMWARE_FIRST_FLAG BIT(31)
-#define FIRMWARE_TIMED_OUT BIT(3)
+#define FIRMWARE_INITIALIZED_FLAG 0
+#define FIRMWARE_FIRST_FLAG 31
+#define FIRMWARE_TIMED_OUT 3
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index e87207526d2c..2e5c052da5a9 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -169,7 +169,7 @@ struct bnxt_qplib_ctx {
u32 cq_count;
struct bnxt_qplib_hwq cq_tbl;
struct bnxt_qplib_hwq tim_tbl;
-#define MAX_TQM_ALLOC_REQ 32
+#define MAX_TQM_ALLOC_REQ 48
#define MAX_TQM_ALLOC_BLK_SIZE 8
u8 tqm_count[MAX_TQM_ALLOC_REQ];
struct bnxt_qplib_hwq tqm_pde;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index e277e54a05eb..9543ce51a28a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -720,13 +720,12 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
struct cmdq_map_tc_to_cos req;
struct creq_map_tc_to_cos_resp resp;
u16 cmd_flags = 0;
- int rc = 0;
RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
req.cos0 = cpu_to_le16(cids[0]);
req.cos1 = cpu_to_le16(cids[1]);
- rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
- (void *)&resp, NULL, 0);
+ bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
+ 0);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index eeb55b2db57e..c3cba6063a03 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2644,7 +2644,7 @@ struct creq_query_func_resp_sb {
u8 l2_db_space_size;
__le16 max_srq;
__le32 max_gid;
- __le32 tqm_alloc_reqs[8];
+ __le32 tqm_alloc_reqs[12];
};
/* Set resources command response (16 bytes) */
diff --git a/drivers/infiniband/hw/cxgb3/Kconfig b/drivers/infiniband/hw/cxgb3/Kconfig
index 2b6352b85485..431be733fbbe 100644
--- a/drivers/infiniband/hw/cxgb3/Kconfig
+++ b/drivers/infiniband/hw/cxgb3/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_CXGB3
tristate "Chelsio RDMA Driver"
- depends on CHELSIO_T3 && INET
+ depends on CHELSIO_T3
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
index 2761364185af..2c66d35d19bd 100644
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ b/drivers/infiniband/hw/cxgb3/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb3
obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 3eff6541bd6f..3328acc53c2a 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -404,12 +404,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
{
- __u32 ptr;
+ __u32 ptr = wq->sq_rptr + count;
int flushed = 0;
- struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
+ struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
- ptr = wq->sq_rptr + count;
- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
while (ptr != wq->sq_wptr) {
sqp->signaled = 0;
insert_sq_cqe(wq, cq, sqp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 86975370a4c0..1c90c86fc8b8 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -107,7 +107,7 @@ static struct workqueue_struct *workq;
static struct sk_buff_head rxq;
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(unsigned long arg);
+static void ep_timeout(struct timer_list *t);
static void connect_reply_upcall(struct iwch_ep *ep, int status);
static void start_ep_timer(struct iwch_ep *ep)
@@ -119,8 +119,6 @@ static void start_ep_timer(struct iwch_ep *ep)
} else
get_ep(&ep->com);
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- ep->timer.data = (unsigned long)ep;
- ep->timer.function = ep_timeout;
add_timer(&ep->timer);
}
@@ -1399,7 +1397,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
child_ep->l2t = l2t;
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- init_timer(&child_ep->timer);
+ timer_setup(&child_ep->timer, ep_timeout, 0);
cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
accept_cr(child_ep, req->peer_ip, skb);
goto out;
@@ -1719,9 +1717,9 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
return CPL_RET_BUF_DONE;
}
-static void ep_timeout(unsigned long arg)
+static void ep_timeout(struct timer_list *t)
{
- struct iwch_ep *ep = (struct iwch_ep *)arg;
+ struct iwch_ep *ep = from_timer(ep, t, timer);
struct iwch_qp_attributes attrs;
unsigned long flags;
int abort = 1;
@@ -1760,8 +1758,8 @@ static void ep_timeout(unsigned long arg)
int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
- int err;
struct iwch_ep *ep = to_ep(cm_id);
+
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (state_read(&ep->com) == DEAD) {
@@ -1772,8 +1770,8 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
if (mpa_rev == 0)
abort_connection(ep, NULL, GFP_KERNEL);
else {
- err = send_mpa_reject(ep, pdata, pdata_len);
- err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
+ send_mpa_reject(ep, pdata, pdata_len);
+ iwch_ep_disconnect(ep, 0, GFP_KERNEL);
}
put_ep(&ep->com);
return 0;
@@ -1899,7 +1897,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = -ENOMEM;
goto out;
}
- init_timer(&ep->timer);
+ timer_setup(&ep->timer, ep_timeout, 0);
ep->plen = conn_param->private_data_len;
if (ep->plen)
memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 099e76f3758a..a578ca559e11 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -969,7 +969,6 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
insert_mmap(ucontext, mm2);
}
qhp->ibqp.qp_num = qhp->wq.qpid;
- init_timer(&(qhp->timer));
pr_debug("%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
__func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.qpid, qhp, (unsigned long long)qhp->wq.dma_addr,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 9e216edec4c0..2e38ddefea8a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -168,7 +168,6 @@ struct iwch_qp {
atomic_t refcnt;
wait_queue_head_t wait;
enum IWCH_QP_FLAGS flags;
- struct timer_list timer;
};
static inline int qp_quiesced(struct iwch_qp *qhp)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 7f633da0185d..3871e1fd8395 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -722,10 +722,13 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
*/
static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
struct iwch_cq *schp)
+ __releases(&qhp->lock)
+ __acquires(&qhp->lock)
{
int count;
int flushed;
+ lockdep_assert_held(&qhp->lock);
pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
/* take a ref on the qhp since we must release the lock */
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index afe8b28e0878..0a671a61fc92 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver"
- depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
+ depends on CHELSIO_T4 && INET
select CHELSIO_LIB
select GENERIC_ALLOCATOR
---help---
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index daf7a56e5d7e..21db3b48a617 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -99,10 +99,6 @@ module_param(enable_tcp_window_scaling, int, 0644);
MODULE_PARM_DESC(enable_tcp_window_scaling,
"Enable tcp window scaling (default=1)");
-int c4iw_debug;
-module_param(c4iw_debug, int, 0644);
-MODULE_PARM_DESC(c4iw_debug, "obsolete");
-
static int peer2peer = 1;
module_param(peer2peer, int, 0644);
MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
@@ -144,7 +140,7 @@ static struct workqueue_struct *workq;
static struct sk_buff_head rxq;
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
-static void ep_timeout(unsigned long arg);
+static void ep_timeout(struct timer_list *t);
static void connect_reply_upcall(struct c4iw_ep *ep, int status);
static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
@@ -180,7 +176,7 @@ static void ref_qp(struct c4iw_ep *ep)
static void start_ep_timer(struct c4iw_ep *ep)
{
- pr_debug("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
if (timer_pending(&ep->timer)) {
pr_err("%s timer already started! ep %p\n",
__func__, ep);
@@ -189,14 +185,12 @@ static void start_ep_timer(struct c4iw_ep *ep)
clear_bit(TIMEOUT, &ep->com.flags);
c4iw_get_ep(&ep->com);
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
- ep->timer.data = (unsigned long)ep;
- ep->timer.function = ep_timeout;
add_timer(&ep->timer);
}
static int stop_ep_timer(struct c4iw_ep *ep)
{
- pr_debug("%s ep %p stopping\n", __func__, ep);
+ pr_debug("ep %p stopping\n", ep);
del_timer_sync(&ep->timer);
if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
c4iw_put_ep(&ep->com);
@@ -212,7 +206,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- pr_debug("%s - device in error state - dropping\n", __func__);
+ pr_err("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
@@ -229,7 +223,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
if (c4iw_fatal_error(rdev)) {
kfree_skb(skb);
- pr_debug("%s - device in error state - dropping\n", __func__);
+ pr_err("%s - device in error state - dropping\n", __func__);
return -EIO;
}
error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
@@ -263,10 +257,10 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
- pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- TCPOPT_MSS_G(opt), ep->mss, ep->emss);
- pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
- ep->mss, ep->emss);
+ pr_warn("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
+ ep->emss);
}
static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
@@ -287,7 +281,7 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
{
mutex_lock(&epc->mutex);
- pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
+ pr_debug("%s -> %s\n", states[epc->state], states[new]);
__state_set(epc, new);
mutex_unlock(&epc->mutex);
return;
@@ -318,11 +312,18 @@ static void *alloc_ep(int size, gfp_t gfp)
epc = kzalloc(size, gfp);
if (epc) {
+ epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
+ if (!epc->wr_waitp) {
+ kfree(epc);
+ epc = NULL;
+ goto out;
+ }
kref_init(&epc->kref);
mutex_init(&epc->mutex);
- c4iw_init_wr_wait(&epc->wr_wait);
+ c4iw_init_wr_wait(epc->wr_waitp);
}
- pr_debug("%s alloc ep %p\n", __func__, epc);
+ pr_debug("alloc ep %p\n", epc);
+out:
return epc;
}
@@ -384,7 +385,7 @@ void _c4iw_free_ep(struct kref *kref)
struct c4iw_ep *ep;
ep = container_of(kref, struct c4iw_ep, com.kref);
- pr_debug("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
+ pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
if (test_bit(QP_REFERENCED, &ep->com.flags))
deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
@@ -407,6 +408,7 @@ void _c4iw_free_ep(struct kref *kref)
}
if (!skb_queue_empty(&ep->com.ep_skb_list))
skb_queue_purge(&ep->com.ep_skb_list);
+ c4iw_put_wr_wait(ep->com.wr_waitp);
kfree(ep);
}
@@ -570,7 +572,7 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
struct c4iw_rdev *rdev = &ep->com.dev->rdev;
struct cpl_abort_req *req = cplhdr(skb);
- pr_debug("%s rdev %p\n", __func__, rdev);
+ pr_debug("rdev %p\n", rdev);
req->cmd = CPL_ABORT_NO_RST;
skb_get(skb);
ret = c4iw_ofld_send(rdev, skb);
@@ -647,7 +649,7 @@ static int send_halfclose(struct c4iw_ep *ep)
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
if (WARN_ON(!skb))
return -ENOMEM;
@@ -662,7 +664,7 @@ static int send_abort(struct c4iw_ep *ep)
u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
if (WARN_ON(!req_skb))
return -ENOMEM;
@@ -725,7 +727,7 @@ static int send_connect(struct c4iw_ep *ep)
roundup(sizev4, 16) :
roundup(sizev6, 16);
- pr_debug("%s ep %p atid %u\n", __func__, ep, ep->atid);
+ pr_debug("ep %p atid %u\n", ep, ep->atid);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
@@ -824,13 +826,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t5req->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t5req->rsvd);
+ pr_debug("snd_isn %u\n", t5req->rsvd);
t5req->opt2 = cpu_to_be32(opt2);
} else {
t6req->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t6req->rsvd);
+ pr_debug("snd_isn %u\n", t6req->rsvd);
t6req->opt2 = cpu_to_be32(opt2);
}
}
@@ -877,13 +879,13 @@ static int send_connect(struct c4iw_ep *ep)
t5req6->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t5req6->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t5req6->rsvd);
+ pr_debug("snd_isn %u\n", t5req6->rsvd);
t5req6->opt2 = cpu_to_be32(opt2);
} else {
t6req6->params =
cpu_to_be64(FILTER_TUPLE_V(params));
t6req6->rsvd = cpu_to_be32(isn);
- pr_debug("%s snd_isn %u\n", __func__, t6req6->rsvd);
+ pr_debug("snd_isn %u\n", t6req6->rsvd);
t6req6->opt2 = cpu_to_be32(opt2);
}
@@ -907,10 +909,8 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
struct mpa_message *mpa;
struct mpa_v2_conn_params mpa_v2_params;
- pr_debug("%s ep %p tid %u pd_len %d\n",
- __func__, ep, ep->hwtid, ep->plen);
-
- BUG_ON(skb_cloned(skb));
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + ep->plen;
if (mpa_rev_to_use == 2)
@@ -961,7 +961,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
if (mpa_rev_to_use == 2) {
mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
sizeof (struct mpa_v2_conn_params));
- pr_debug("%s initiator ird %u ord %u\n", __func__, ep->ird,
+ pr_debug("initiator ird %u ord %u\n", ep->ird,
ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
@@ -994,7 +994,6 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
*/
skb_get(skb);
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
if (ret)
@@ -1014,8 +1013,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- pr_debug("%s ep %p tid %u pd_len %d\n",
- __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
@@ -1080,7 +1079,6 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
skb_get(skb);
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
- BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
ep->snd_seq += mpalen;
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -1094,8 +1092,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
struct sk_buff *skb;
struct mpa_v2_conn_params mpa_v2_params;
- pr_debug("%s ep %p tid %u pd_len %d\n",
- __func__, ep, ep->hwtid, ep->plen);
+ pr_debug("ep %p tid %u pd_len %d\n",
+ ep, ep->hwtid, ep->plen);
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
@@ -1185,7 +1183,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_atid(t, atid);
- pr_debug("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
+ pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
mutex_lock(&ep->com.mutex);
@@ -1229,7 +1227,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = status;
@@ -1246,7 +1244,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_DISCONNECT;
if (ep->com.cm_id) {
@@ -1261,7 +1259,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CLOSE;
event.status = -ECONNRESET;
@@ -1278,8 +1276,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u status %d\n",
- __func__, ep, ep->hwtid, status);
+ pr_debug("ep %p tid %u status %d\n",
+ ep, ep->hwtid, status);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status;
@@ -1308,7 +1306,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
}
}
- pr_debug("%s ep %p tid %u status %d\n", __func__, ep,
+ pr_debug("ep %p tid %u status %d\n", ep,
ep->hwtid, status);
set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
@@ -1322,7 +1320,7 @@ static int connect_request_upcall(struct c4iw_ep *ep)
struct iw_cm_event event;
int ret;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
memcpy(&event.local_addr, &ep->com.local_addr,
@@ -1359,13 +1357,13 @@ static void established_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_ESTABLISHED;
event.ird = ep->ord;
event.ord = ep->ird;
if (ep->com.cm_id) {
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
set_bit(ESTAB_UPCALL, &ep->com.history);
}
@@ -1377,8 +1375,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
u32 credit_dack;
- pr_debug("%s ep %p tid %u credits %u\n",
- __func__, ep, ep->hwtid, credits);
+ pr_debug("ep %p tid %u credits %u\n",
+ ep, ep->hwtid, credits);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
pr_err("update_rx_credits - cannot alloc skb!\n");
@@ -1429,7 +1427,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
int err;
int disconnect = 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
@@ -1527,8 +1525,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
resp_ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
- pr_debug("%s responder ird %u ord %u ep ird %u ord %u\n",
- __func__,
+ pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
resp_ird, resp_ord, ep->ird, ep->ord);
/*
@@ -1573,8 +1570,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
- __func__, ep->mpa_attr.crc_enabled,
+ pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
+ ep->mpa_attr.crc_enabled,
ep->mpa_attr.recv_marker_enabled,
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type, p2p_type);
@@ -1670,7 +1667,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
struct mpa_v2_conn_params *mpa_v2_params;
u16 plen;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/*
* If we get more than the supported amount of private data
@@ -1679,7 +1676,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
goto err_stop_timer;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
/*
* Copy the new data into our accumulation buffer.
@@ -1695,7 +1692,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (ep->mpa_pkt_len < sizeof(*mpa))
return 0;
- pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
+ pr_debug("enter (%s line %u)\n", __FILE__, __LINE__);
mpa = (struct mpa_message *) ep->mpa_pkt;
/*
@@ -1758,8 +1755,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
ep->ord = min_t(u32, ep->ord,
cur_max_read_depth(ep->com.dev));
- pr_debug("%s initiator ird %u ord %u\n",
- __func__, ep->ird, ep->ord);
+ pr_debug("initiator ird %u ord %u\n",
+ ep->ird, ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (peer2peer) {
if (ntohs(mpa_v2_params->ord) &
@@ -1776,8 +1773,7 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (peer2peer)
ep->mpa_attr.p2p_type = p2p_type;
- pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
- __func__,
+ pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type);
@@ -1816,7 +1812,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
+ pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex);
@@ -1836,7 +1832,6 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
update_rx_credits(ep, dlen);
- BUG_ON(!ep->com.qp);
if (status)
pr_err("%s Unexpected streaming data." \
" qpid %u ep %p state %d tid %u status %d\n",
@@ -1870,11 +1865,11 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
pr_warn("Abort rpl to freed endpoint\n");
return 0;
}
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case ABORTING:
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
__state_set(&ep->com, DEAD);
release = 1;
break;
@@ -1994,8 +1989,8 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{
ep->snd_win = snd_win;
ep->rcv_win = rcv_win;
- pr_debug("%s snd_win %d rcv_win %d\n",
- __func__, ep->snd_win, ep->rcv_win);
+ pr_debug("snd_win %d rcv_win %d\n",
+ ep->snd_win, ep->rcv_win);
}
#define ACT_OPEN_RETRY_COUNT 2
@@ -2100,9 +2095,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
int iptype;
__u8 *ra;
- pr_debug("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
- init_timer(&ep->timer);
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
/* When MPA revision is different on nodes, the node with MPA_rev=2
* tries to reconnect with MPA_rev 1 for the same EP through
@@ -2110,7 +2104,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
* further connection establishment. As we are using the same EP pointer
* for reconnect, few skbs are used during the previous c4iw_connect(),
* which leaves the EP with inadequate skbs for further
- * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
+ * c4iw_reconnect(), Further causing a crash due to an empty
* skb_list() during peer_abort(). Allocate skbs which is already used.
*/
size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
@@ -2163,8 +2157,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
goto fail4;
}
- pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
@@ -2215,12 +2209,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
- pr_debug("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
+ pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
status, status2errno(status));
if (cxgb_is_neg_adv(status)) {
- pr_debug("%s Connection problems for atid %u status %u (%s)\n",
- __func__, atid, status, neg_adv_str(status));
+ pr_debug("Connection problems for atid %u status %u (%s)\n",
+ atid, status, neg_adv_str(status));
ep->stats.connect_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
@@ -2316,12 +2310,12 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) {
- pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+ pr_warn("%s stid %d lookup failure!\n", __func__, stid);
goto out;
}
- pr_debug("%s ep %p status %d error %d\n", __func__, ep,
+ pr_debug("ep %p status %d error %d\n", ep,
rpl->status, status2errno(rpl->status));
- c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+ c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
out:
return 0;
@@ -2334,11 +2328,11 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) {
- pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+ pr_warn("%s stid %d lookup failure!\n", __func__, stid);
goto out;
}
- pr_debug("%s ep %p\n", __func__, ep);
- c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+ pr_debug("ep %p\n", ep);
+ c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
out:
return 0;
@@ -2356,8 +2350,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
int win;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- BUG_ON(skb_cloned(skb));
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
skb_get(skb);
rpl = cplhdr(skb);
@@ -2427,7 +2420,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
- pr_debug("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
+ pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
}
rpl->opt0 = cpu_to_be64(opt0);
@@ -2440,8 +2433,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
{
- pr_debug("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
- BUG_ON(skb_cloned(skb));
+ pr_debug("c4iw_dev %p tid %u\n", dev, hwtid);
skb_trim(skb, sizeof(struct cpl_tid_release));
release_tid(&dev->rdev, hwtid, skb);
return;
@@ -2466,13 +2458,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!parent_ep) {
- pr_debug("%s connect request on invalid stid %d\n",
- __func__, stid);
+ pr_err("%s connect request on invalid stid %d\n",
+ __func__, stid);
goto reject;
}
if (state_read(&parent_ep->com) != LISTEN) {
- pr_debug("%s - listening ep not in LISTEN\n", __func__);
+ pr_err("%s - listening ep not in LISTEN\n", __func__);
goto reject;
}
@@ -2481,16 +2473,16 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
/* Find output route */
if (iptype == 4) {
- pr_debug("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
+ pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
+ , parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
*(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port, tos);
} else {
- pr_debug("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
- , __func__, parent_ep, hwtid,
+ pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
+ , parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
@@ -2576,10 +2568,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- pr_debug("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
+ pr_debug("tx_chan %u smac_idx %u rss_qid %u\n",
child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
- init_timer(&child_ep->timer);
+ timer_setup(&child_ep->timer, ep_timeout, 0);
cxgb4_insert_tid(t, child_ep, hwtid,
child_ep->com.local_addr.ss_family);
insert_ep_tid(child_ep);
@@ -2613,11 +2605,11 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
int ret;
ep = get_ep_from_tid(dev, tid);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
- pr_debug("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
+ pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid,
ntohs(req->tcp_opt));
set_emss(ep, ntohs(req->tcp_opt));
@@ -2650,7 +2642,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
dst_confirm(ep->dst);
set_bit(PEER_CLOSE, &ep->com.history);
@@ -2673,12 +2665,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
*/
__state_set(&ep->com, CLOSING);
pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
break;
case MPA_REP_SENT:
__state_set(&ep->com, CLOSING);
pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
break;
case FPDU_MODE:
start_ep_timer(ep);
@@ -2714,7 +2706,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
disconnect = 0;
break;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
}
mutex_unlock(&ep->com.mutex);
if (disconnect)
@@ -2741,16 +2733,16 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
if (cxgb_is_neg_adv(req->status)) {
- pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
+ neg_adv_str(req->status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
mutex_unlock(&dev->rdev.stats.lock);
goto deref_ep;
}
- pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
ep->com.state);
set_bit(PEER_ABORT, &ep->com.history);
@@ -2760,7 +2752,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* MPA_REQ_SENT
*/
if (ep->com.state != MPA_REQ_SENT)
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
@@ -2783,8 +2775,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
* do some housekeeping so as to re-initiate the
* connection
*/
- pr_debug("%s: mpa_rev=%d. Retrying with mpav1\n",
- __func__, mpa_rev);
+ pr_info("%s: mpa_rev=%d. Retrying with mpav1\n",
+ __func__, mpa_rev);
ep->retry_with_mpa_v1 = 1;
}
break;
@@ -2810,11 +2802,11 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case ABORTING:
break;
case DEAD:
- pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+ pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
mutex_unlock(&ep->com.mutex);
goto deref_ep;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
dst_confirm(ep->dst);
@@ -2875,7 +2867,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
/* The cm_id may be null if we failed to connect */
mutex_lock(&ep->com.mutex);
@@ -2901,7 +2893,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case DEAD:
break;
default:
- BUG_ON(1);
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
mutex_unlock(&ep->com.mutex);
@@ -2919,7 +2911,6 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
ep = get_ep_from_tid(dev, tid);
- BUG_ON(!ep);
if (ep && ep->com.qp) {
pr_warn("TERM received tid %u qpid %u\n",
@@ -2950,19 +2941,19 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- pr_debug("%s ep %p tid %u credits %u\n",
- __func__, ep, ep->hwtid, credits);
+ pr_debug("ep %p tid %u credits %u\n",
+ ep, ep->hwtid, credits);
if (credits == 0) {
- pr_debug("%s 0 credit ack ep %p tid %u state %u\n",
- __func__, ep, ep->hwtid, state_read(&ep->com));
+ pr_debug("0 credit ack ep %p tid %u state %u\n",
+ ep, ep->hwtid, state_read(&ep->com));
goto out;
}
dst_confirm(ep->dst);
if (ep->mpa_skb) {
- pr_debug("%s last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
- __func__, ep, ep->hwtid,
- state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
+ pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
+ ep, ep->hwtid, state_read(&ep->com),
+ ep->mpa_attr.initiator ? 1 : 0);
mutex_lock(&ep->com.mutex);
kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL;
@@ -2980,7 +2971,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
int abort;
struct c4iw_ep *ep = to_ep(cm_id);
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) {
@@ -3011,7 +3002,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
int abort = 0;
- pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
+ pr_debug("ep %p tid %u\n", ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
if (ep->com.state != MPA_REQ_RCVD) {
@@ -3019,7 +3010,10 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto err_out;
}
- BUG_ON(!qp);
+ if (!qp) {
+ err = -EINVAL;
+ goto err_out;
+ }
set_bit(ULP_ACCEPT, &ep->com.history);
if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
@@ -3064,7 +3058,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->ird = 1;
}
- pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
+ pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
@@ -3204,7 +3198,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail1;
}
- init_timer(&ep->timer);
+ timer_setup(&ep->timer, ep_timeout, 0);
ep->plen = conn_param->private_data_len;
if (ep->plen)
memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
@@ -3220,12 +3214,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.dev = dev;
ep->com.qp = get_qhp(dev, conn_param->qpn);
if (!ep->com.qp) {
- pr_debug("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
+ pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
err = -EINVAL;
goto fail2;
}
ref_qp(ep);
- pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
+ pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn,
ep->com.qp, cm_id);
/*
@@ -3263,8 +3257,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
/* find a route */
- pr_debug("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
- __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
+ pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
+ &laddr->sin_addr, ntohs(laddr->sin_port),
ra, ntohs(raddr->sin_port));
ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
laddr->sin_addr.s_addr,
@@ -3285,8 +3279,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
/* find a route */
- pr_debug("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
- __func__, laddr6->sin6_addr.s6_addr,
+ pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
+ laddr6->sin6_addr.s6_addr,
ntohs(laddr6->sin6_port),
raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
@@ -3309,8 +3303,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail4;
}
- pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
- __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
+ pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
+ ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
@@ -3348,14 +3342,14 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
if (err)
return err;
}
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
ep->stid, &sin6->sin6_addr,
sin6->sin6_port,
ep->com.dev->rdev.lldi.rxq_ids[0]);
if (!err)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
- &ep->com.wr_wait,
+ ep->com.wr_waitp,
0, 0, __func__);
else if (err > 0)
err = net_xmit_errno(err);
@@ -3391,13 +3385,13 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
}
} while (err == -EBUSY);
} else {
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
ep->stid, sin->sin_addr.s_addr, sin->sin_port,
0, ep->com.dev->rdev.lldi.rxq_ids[0]);
if (!err)
err = c4iw_wait_for_reply(&ep->com.dev->rdev,
- &ep->com.wr_wait,
+ ep->com.wr_waitp,
0, 0, __func__);
else if (err > 0)
err = net_xmit_errno(err);
@@ -3424,7 +3418,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail1;
}
skb_queue_head_init(&ep->com.ep_skb_list);
- pr_debug("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
ep->com.cm_id = cm_id;
ref_cm_id(&ep->com);
ep->com.dev = dev;
@@ -3478,7 +3472,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
int err;
struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
- pr_debug("%s ep %p\n", __func__, ep);
+ pr_debug("ep %p\n", ep);
might_sleep();
state_set(&ep->com, DEAD);
@@ -3489,13 +3483,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
} else {
struct sockaddr_in6 *sin6;
- c4iw_init_wr_wait(&ep->com.wr_wait);
+ c4iw_init_wr_wait(ep->com.wr_waitp);
err = cxgb4_remove_server(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
if (err)
goto done;
- err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
0, 0, __func__);
sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
@@ -3519,7 +3513,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
mutex_lock(&ep->com.mutex);
- pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
+ pr_debug("ep %p state %s, abrupt %d\n", ep,
states[ep->com.state], abrupt);
/*
@@ -3573,11 +3567,11 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND:
case ABORTING:
case DEAD:
- pr_debug("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
+ pr_info("%s ignoring disconnect ep %p state %u\n",
+ __func__, ep, ep->com.state);
break;
default:
- BUG();
+ WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
break;
}
@@ -3636,6 +3630,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
send_fw_act_open_req(ep, atid);
return;
}
+ /* fall through */
case FW_EADDRINUSE:
set_bit(ACT_RETRY_INUSE, &ep->com.history);
if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
@@ -3676,9 +3671,8 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
int ret;
rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
- BUG_ON(!rpl_skb);
if (req->retval) {
- pr_debug("%s passive open failure %d\n", __func__, req->retval);
+ pr_err("%s passive open failure %d\n", __func__, req->retval);
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.pas_ofld_conn_fails++;
mutex_unlock(&dev->rdev.stats.lock);
@@ -3874,7 +3868,6 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct net_device *pdev;
u16 rss_qid, eth_hdr_len;
int step;
- u32 tx_chan;
struct neighbour *neigh;
/* Drop all non-SYN packets */
@@ -3895,8 +3888,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!lep) {
- pr_debug("%s connect request on invalid stid %d\n",
- __func__, stid);
+ pr_warn("%s connect request on invalid stid %d\n",
+ __func__, stid);
goto reject;
}
@@ -3933,7 +3926,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
skb_set_transport_header(skb, (void *)tcph - (void *)rss);
skb_get(skb);
- pr_debug("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
+ pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n",
ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
ntohs(tcph->source), iph->tos);
@@ -3941,15 +3934,13 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
iph->daddr, iph->saddr, tcph->dest,
tcph->source, iph->tos);
if (!dst) {
- pr_err("%s - failed to find dst entry!\n",
- __func__);
+ pr_err("%s - failed to find dst entry!\n", __func__);
goto reject;
}
neigh = dst_neigh_lookup_skb(dst, skb);
if (!neigh) {
- pr_err("%s - failed to allocate neigh!\n",
- __func__);
+ pr_err("%s - failed to allocate neigh!\n", __func__);
goto free_dst;
}
@@ -3958,14 +3949,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);
- tx_chan = cxgb4_port_chan(pdev);
dev_put(pdev);
} else {
pdev = get_real_dev(neigh->dev);
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);
- tx_chan = cxgb4_port_chan(pdev);
}
neigh_release(neigh);
if (!e) {
@@ -4032,8 +4021,7 @@ static void process_timeout(struct c4iw_ep *ep)
int abort = 1;
mutex_lock(&ep->com.mutex);
- pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) {
case MPA_REQ_SENT:
@@ -4109,7 +4097,6 @@ static void process_work(struct work_struct *work)
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
- BUG_ON(!work_handlers[opcode]);
ret = work_handlers[opcode](dev, skb);
if (!ret)
kfree_skb(skb);
@@ -4119,9 +4106,9 @@ static void process_work(struct work_struct *work)
static DECLARE_WORK(skb_work, process_work);
-static void ep_timeout(unsigned long arg)
+static void ep_timeout(struct timer_list *t)
{
- struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+ struct c4iw_ep *ep = from_timer(ep, t, timer);
int kickit = 0;
spin_lock(&timeout_lock);
@@ -4176,15 +4163,15 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_wr_wait *wr_waitp;
int ret;
- pr_debug("%s type %u\n", __func__, rpl->type);
+ pr_debug("type %u\n", rpl->type);
switch (rpl->type) {
case FW6_TYPE_WR_RPL:
ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
- pr_debug("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
+ pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret);
if (wr_waitp)
- c4iw_wake_up(wr_waitp, ret ? -ret : 0);
+ c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0);
kfree_skb(skb);
break;
case FW6_TYPE_CQE:
@@ -4214,15 +4201,14 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (cxgb_is_neg_adv(req->status)) {
- pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
+ pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
neg_adv_str(req->status));
goto out;
}
- pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
- ep->com.state);
+ pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
out:
sched(dev, skb);
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index be07da1997e6..ea55e95cd2c5 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -33,12 +33,12 @@
#include "iw_cxgb4.h"
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
- struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
+ struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
- struct c4iw_wr_wait wr_wait;
int ret;
wr_len = sizeof *res_wr + sizeof *res;
@@ -50,17 +50,14 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (uintptr_t)&wr_wait;
+ res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_RESET;
res->u.cq.iqid = cpu_to_be32(cq->cqid);
- c4iw_init_wr_wait(&wr_wait);
- ret = c4iw_ofld_send(rdev, skb);
- if (!ret) {
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
- }
+ c4iw_init_wr_wait(wr_waitp);
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
kfree(cq->sw_queue);
dma_free_coherent(&(rdev->lldi.pdev->dev),
@@ -71,13 +68,13 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
}
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
{
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
int user = (uctx != &rdev->uctx);
- struct c4iw_wr_wait wr_wait;
int ret;
struct sk_buff *skb;
@@ -119,7 +116,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (uintptr_t)&wr_wait;
+ res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -139,13 +136,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
- c4iw_init_wr_wait(&wr_wait);
-
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- goto err4;
- pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait);
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+ c4iw_init_wr_wait(wr_waitp);
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
if (ret)
goto err4;
@@ -178,7 +170,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
{
struct t4_cqe cqe;
- pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
@@ -196,8 +188,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
int flushed = 0;
int in_use = wq->rq.in_use - count;
- BUG_ON(in_use < 0);
- pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
+ pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
wq, cq, wq->rq.in_use, count);
while (in_use--) {
insert_recv_cqe(wq, cq);
@@ -211,7 +202,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
{
struct t4_cqe cqe;
- pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
+ pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
@@ -239,14 +230,11 @@ int c4iw_flush_sq(struct c4iw_qp *qhp)
if (wq->sq.flush_cidx == -1)
wq->sq.flush_cidx = wq->sq.cidx;
idx = wq->sq.flush_cidx;
- BUG_ON(idx >= wq->sq.size);
while (idx != wq->sq.pidx) {
swsqe = &wq->sq.sw_sq[idx];
- BUG_ON(swsqe->flushed);
swsqe->flushed = 1;
insert_sq_cqe(wq, cq, swsqe);
if (wq->sq.oldest_read == swsqe) {
- BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
advance_oldest_read(wq);
}
flushed++;
@@ -267,7 +255,6 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
if (wq->sq.flush_cidx == -1)
wq->sq.flush_cidx = wq->sq.cidx;
cidx = wq->sq.flush_cidx;
- BUG_ON(cidx > wq->sq.size);
while (cidx != wq->sq.pidx) {
swsqe = &wq->sq.sw_sq[cidx];
@@ -276,13 +263,11 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
cidx = 0;
} else if (swsqe->complete) {
- BUG_ON(swsqe->flushed);
-
/*
* Insert this completed cqe into the swcq.
*/
- pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n",
- __func__, cidx, cq->sw_pidx);
+ pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
+ cidx, cq->sw_pidx);
swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
t4_swcq_produce(cq);
@@ -337,7 +322,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
struct t4_swsqe *swsqe;
int ret;
- pr_debug("%s cqid 0x%x\n", __func__, chp->cq.cqid);
+ pr_debug("cqid 0x%x\n", chp->cq.cqid);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
/*
@@ -430,7 +415,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
u32 ptr;
*count = 0;
- pr_debug("%s count zero %d\n", __func__, *count);
+ pr_debug("count zero %d\n", *count);
ptr = cq->sw_cidx;
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
@@ -440,7 +425,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
if (++ptr == cq->size)
ptr = 0;
}
- pr_debug("%s cq %p count %d\n", __func__, cq, *count);
+ pr_debug("cq %p count %d\n", cq, *count);
}
/*
@@ -471,8 +456,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (ret)
return ret;
- pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
- __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
+ pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
+ CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
CQE_WRID_LOW(hw_cqe));
@@ -603,8 +588,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
struct t4_swsqe *swsqe;
- pr_debug("%s out of order completion going in sw_sq at idx %u\n",
- __func__, CQE_WRID_SQ_IDX(hw_cqe));
+ pr_debug("out of order completion going in sw_sq at idx %u\n",
+ CQE_WRID_SQ_IDX(hw_cqe));
swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
swsqe->cqe = *hw_cqe;
swsqe->complete = 1;
@@ -621,7 +606,6 @@ proc_cqe:
*/
if (SQ_TYPE(hw_cqe)) {
int idx = CQE_WRID_SQ_IDX(hw_cqe);
- BUG_ON(idx >= wq->sq.size);
/*
* Account for any unsignaled completions completed by
@@ -635,18 +619,16 @@ proc_cqe:
wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
else
wq->sq.in_use -= idx - wq->sq.cidx;
- BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
wq->sq.cidx = (uint16_t)idx;
- pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx);
+ pr_debug("completing sq idx %u\n", wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq);
} else {
- pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx);
+ pr_debug("completing rq idx %u\n", wq->rq.cidx);
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
- BUG_ON(t4_rq_empty(wq));
if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe);
t4_rq_consume(wq);
@@ -661,12 +643,12 @@ flush_wq:
skip_cqe:
if (SW_CQE(hw_cqe)) {
- pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
- __func__, cq, cq->cqid, cq->sw_cidx);
+ pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
+ cq, cq->cqid, cq->sw_cidx);
t4_swcq_consume(cq);
} else {
- pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
- __func__, cq, cq->cqid, cq->cidx);
+ pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
+ cq, cq->cqid, cq->cidx);
t4_hwcq_consume(cq);
}
return ret;
@@ -712,8 +694,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0;
- pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
- __func__, CQE_QPID(&cqe),
+ pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
+ CQE_QPID(&cqe),
CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
CQE_STATUS(&cqe), CQE_LEN(&cqe),
CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
@@ -857,7 +839,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
struct c4iw_cq *chp;
struct c4iw_ucontext *ucontext;
- pr_debug("%s ib_cq %p\n", __func__, ib_cq);
+ pr_debug("ib_cq %p\n", ib_cq);
chp = to_c4iw_cq(ib_cq);
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
@@ -868,8 +850,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
: NULL;
destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
- chp->destroy_skb);
- chp->destroy_skb = NULL;
+ chp->destroy_skb, chp->wr_waitp);
+ c4iw_put_wr_wait(chp->wr_waitp);
kfree(chp);
return 0;
}
@@ -889,7 +871,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2;
- pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
+ pr_debug("ib_dev %p entries %d\n", ibdev, entries);
if (attr->flags)
return ERR_PTR(-EINVAL);
@@ -901,12 +883,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp)
return ERR_PTR(-ENOMEM);
+ chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!chp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_chp;
+ }
+ c4iw_init_wr_wait(chp->wr_waitp);
wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
if (!chp->destroy_skb) {
ret = -ENOMEM;
- goto err1;
+ goto err_free_wr_wait;
}
if (ib_context)
@@ -947,9 +935,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
chp->cq.vector = vector;
ret = create_cq(&rhp->rdev, &chp->cq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ chp->wr_waitp);
if (ret)
- goto err2;
+ goto err_free_skb;
chp->rhp = rhp;
chp->cq.size--; /* status page */
@@ -960,16 +949,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
init_waitqueue_head(&chp->wait);
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
if (ret)
- goto err3;
+ goto err_destroy_cq;
if (ucontext) {
ret = -ENOMEM;
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm)
- goto err4;
+ goto err_remove_handle;
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
if (!mm2)
- goto err5;
+ goto err_free_mm;
uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid;
@@ -984,7 +973,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved));
if (ret)
- goto err6;
+ goto err_free_mm2;
mm->key = uresp.key;
mm->addr = virt_to_phys(chp->cq.queue);
@@ -996,23 +985,25 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
mm2->len = PAGE_SIZE;
insert_mmap(ucontext, mm2);
}
- pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
- __func__, chp->cq.cqid, chp, chp->cq.size,
+ pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
+ chp->cq.cqid, chp, chp->cq.size,
chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
return &chp->ibcq;
-err6:
+err_free_mm2:
kfree(mm2);
-err5:
+err_free_mm:
kfree(mm);
-err4:
+err_remove_handle:
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
-err3:
+err_destroy_cq:
destroy_cq(&chp->rhp->rdev, &chp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
- chp->destroy_skb);
-err2:
+ chp->destroy_skb, chp->wr_waitp);
+err_free_skb:
kfree_skb(chp->destroy_skb);
-err1:
+err_free_wr_wait:
+ c4iw_put_wr_wait(chp->wr_waitp);
+err_free_chp:
kfree(chp);
return ERR_PTR(ret);
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index fc886f81b885..af77d128d242 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -64,14 +64,9 @@ module_param(c4iw_wr_log_size_order, int, 0444);
MODULE_PARM_DESC(c4iw_wr_log_size_order,
"Number of entries (log2) in the work request timing log.");
-struct uld_ctx {
- struct list_head entry;
- struct cxgb4_lld_info lldi;
- struct c4iw_dev *dev;
-};
-
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
+struct workqueue_struct *reg_workq;
#define DB_FC_RESUME_SIZE 64
#define DB_FC_RESUME_DELAY 1
@@ -811,8 +806,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->qpmask = rdev->lldi.udb_density - 1;
rdev->cqmask = rdev->lldi.ucq_density - 1;
- pr_debug("%s dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
- __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
+ pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
+ pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
rdev->lldi.vr->pbl.start,
rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
@@ -912,7 +907,7 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
c4iw_destroy_resource(&rdev->resource);
}
-static void c4iw_dealloc(struct uld_ctx *ctx)
+void c4iw_dealloc(struct uld_ctx *ctx)
{
c4iw_rdev_close(&ctx->dev->rdev);
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
@@ -935,7 +930,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx)
{
- pr_debug("%s c4iw_dev %p\n", __func__, ctx->dev);
+ pr_debug("c4iw_dev %p\n", ctx->dev);
c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx);
}
@@ -969,8 +964,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.lldi = *infop;
/* init various hw-queue params based on lld info */
- pr_debug("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
- __func__, devp->rdev.lldi.sge_ingpadboundary,
+ pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
+ devp->rdev.lldi.sge_ingpadboundary,
devp->rdev.lldi.sge_egrstatuspagesize);
devp->rdev.hw_queue.t4_eq_status_entries =
@@ -1069,8 +1064,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
}
ctx->lldi = *infop;
- pr_debug("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
- __func__, pci_name(ctx->lldi.pdev),
+ pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
+ pci_name(ctx->lldi.pdev),
ctx->lldi.nchan, ctx->lldi.nrxq,
ctx->lldi.ntxq, ctx->lldi.nports);
@@ -1102,8 +1097,8 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
if (unlikely(!skb))
return NULL;
- __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
- sizeof(struct rss_header) - pktshift);
+ __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
+ sizeof(struct rss_header) - pktshift);
/*
* This skb will contain:
@@ -1203,13 +1198,11 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct uld_ctx *ctx = handle;
- pr_debug("%s new_state %u\n", __func__, new_state);
+ pr_debug("new_state %u\n", new_state);
switch (new_state) {
case CXGB4_STATE_UP:
pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
if (!ctx->dev) {
- int ret;
-
ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) {
pr_err("%s: initialization failed: %ld\n",
@@ -1218,12 +1211,9 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
ctx->dev = NULL;
break;
}
- ret = c4iw_register_device(ctx->dev);
- if (ret) {
- pr_err("%s: RDMA registration failed: %d\n",
- pci_name(ctx->lldi.pdev), ret);
- c4iw_dealloc(ctx);
- }
+
+ INIT_WORK(&ctx->reg_work, c4iw_register_device);
+ queue_work(reg_workq, &ctx->reg_work);
}
break;
case CXGB4_STATE_DOWN:
@@ -1518,6 +1508,27 @@ static struct cxgb4_uld_info c4iw_uld_info = {
.control = c4iw_uld_control,
};
+void _c4iw_free_wr_wait(struct kref *kref)
+{
+ struct c4iw_wr_wait *wr_waitp;
+
+ wr_waitp = container_of(kref, struct c4iw_wr_wait, kref);
+ pr_debug("Free wr_wait %p\n", wr_waitp);
+ kfree(wr_waitp);
+}
+
+struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp)
+{
+ struct c4iw_wr_wait *wr_waitp;
+
+ wr_waitp = kzalloc(sizeof(*wr_waitp), gfp);
+ if (wr_waitp) {
+ kref_init(&wr_waitp->kref);
+ pr_debug("wr_wait %p\n", wr_waitp);
+ }
+ return wr_waitp;
+}
+
static int __init c4iw_init_module(void)
{
int err;
@@ -1530,6 +1541,12 @@ static int __init c4iw_init_module(void)
if (!c4iw_debugfs_root)
pr_warn("could not create debugfs entry, continuing\n");
+ reg_workq = create_singlethread_workqueue("Register_iWARP_device");
+ if (!reg_workq) {
+ pr_err("Failed creating workqueue to register iwarp device\n");
+ return -ENOMEM;
+ }
+
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
return 0;
@@ -1546,6 +1563,8 @@ static void __exit c4iw_exit_module(void)
kfree(ctx);
}
mutex_unlock(&dev_mutex);
+ flush_workqueue(reg_workq);
+ destroy_workqueue(reg_workq);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 8f963df0bffc..a252d5c40ae3 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&chp->cq)) {
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ }
}
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
@@ -234,7 +236,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
- pr_debug("%s unknown cqid 0x%x\n", __func__, qid);
+ pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
spin_unlock_irqrestore(&dev->lock, flag);
}
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index 0161ae6ad629..5c2cfdea06ad 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -73,7 +73,6 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
unsigned long flags;
obj -= alloc->start;
- BUG_ON((int)obj < 0);
spin_lock_irqsave(&alloc->lock, flags);
clear_bit(obj, alloc->table);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 819a30635d53..470f97a79ebb 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -202,18 +202,50 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
struct c4iw_wr_wait {
struct completion completion;
int ret;
+ struct kref kref;
};
+void _c4iw_free_wr_wait(struct kref *kref);
+
+static inline void c4iw_put_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ pr_debug("wr_wait %p ref before put %u\n", wr_waitp,
+ kref_read(&wr_waitp->kref));
+ WARN_ON(kref_read(&wr_waitp->kref) == 0);
+ kref_put(&wr_waitp->kref, _c4iw_free_wr_wait);
+}
+
+static inline void c4iw_get_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+ pr_debug("wr_wait %p ref before get %u\n", wr_waitp,
+ kref_read(&wr_waitp->kref));
+ WARN_ON(kref_read(&wr_waitp->kref) == 0);
+ kref_get(&wr_waitp->kref);
+}
+
static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
{
wr_waitp->ret = 0;
init_completion(&wr_waitp->completion);
}
-static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
+static inline void _c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret,
+ bool deref)
{
wr_waitp->ret = ret;
complete(&wr_waitp->completion);
+ if (deref)
+ c4iw_put_wr_wait(wr_waitp);
+}
+
+static inline void c4iw_wake_up_noref(struct c4iw_wr_wait *wr_waitp, int ret)
+{
+ _c4iw_wake_up(wr_waitp, ret, false);
+}
+
+static inline void c4iw_wake_up_deref(struct c4iw_wr_wait *wr_waitp, int ret)
+{
+ _c4iw_wake_up(wr_waitp, ret, true);
}
static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
@@ -230,18 +262,40 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
if (!ret) {
- pr_debug("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
- func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+ pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+ func, pci_name(rdev->lldi.pdev), hwtid, qpid);
rdev->flags |= T4_FATAL_ERROR;
wr_waitp->ret = -EIO;
+ goto out;
}
-out:
if (wr_waitp->ret)
pr_debug("%s: FW reply %d tid %u qpid %u\n",
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
+out:
return wr_waitp->ret;
}
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
+
+static inline int c4iw_ref_send_wait(struct c4iw_rdev *rdev,
+ struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp,
+ u32 hwtid, u32 qpid,
+ const char *func)
+{
+ int ret;
+
+ pr_debug("%s wr_wait %p hwtid %u qpid %u\n", func, wr_waitp, hwtid,
+ qpid);
+ c4iw_get_wr_wait(wr_waitp);
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret) {
+ c4iw_put_wr_wait(wr_waitp);
+ return ret;
+ }
+ return c4iw_wait_for_reply(rdev, wr_waitp, hwtid, qpid, func);
+}
+
enum db_state {
NORMAL = 0,
FLOW_CONTROL = 1,
@@ -268,6 +322,13 @@ struct c4iw_dev {
wait_queue_head_t wait;
};
+struct uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+ struct c4iw_dev *dev;
+ struct work_struct reg_work;
+};
+
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
{
return container_of(ibdev, struct c4iw_dev, ibdev);
@@ -310,7 +371,6 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
idr_preload_end();
}
- BUG_ON(ret == -ENOSPC);
return ret < 0 ? ret : 0;
}
@@ -394,6 +454,7 @@ struct c4iw_mr {
dma_addr_t mpl_addr;
u32 max_mpl_len;
u32 mpl_len;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
@@ -407,6 +468,7 @@ struct c4iw_mw {
struct sk_buff *dereg_skb;
u64 kva;
struct tpt_attributes attr;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
@@ -423,6 +485,7 @@ struct c4iw_cq {
spinlock_t comp_handler_lock;
atomic_t refcnt;
wait_queue_head_t wait;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
@@ -480,10 +543,10 @@ struct c4iw_qp {
struct mutex mutex;
struct kref kref;
wait_queue_head_t wait;
- struct timer_list timer;
int sq_sig_all;
struct work_struct free_work;
struct c4iw_ucontext *ucontext;
+ struct c4iw_wr_wait *wr_waitp;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -537,8 +600,7 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
if (mm->key == key && mm->len == len) {
list_del_init(&mm->entry);
spin_unlock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, key,
+ pr_debug("key 0x%x addr 0x%llx len %d\n", key,
(unsigned long long)mm->addr, mm->len);
return mm;
}
@@ -551,8 +613,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext,
struct c4iw_mm_entry *mm)
{
spin_lock(&ucontext->mmap_lock);
- pr_debug("%s key 0x%x addr 0x%llx len %d\n",
- __func__, mm->key, (unsigned long long)mm->addr, mm->len);
+ pr_debug("key 0x%x addr 0x%llx len %d\n",
+ mm->key, (unsigned long long)mm->addr, mm->len);
list_add_tail(&mm->entry, &ucontext->mmaps);
spin_unlock(&ucontext->mmap_lock);
}
@@ -671,16 +733,14 @@ enum c4iw_mmid_state {
#define MPA_V2_IRD_ORD_MASK 0x3FFF
#define c4iw_put_ep(ep) { \
- pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
- __func__, __LINE__, \
+ pr_debug("put_ep ep %p refcnt %d\n", \
ep, kref_read(&((ep)->kref))); \
WARN_ON(kref_read(&((ep)->kref)) < 1); \
kref_put(&((ep)->kref), _c4iw_free_ep); \
}
#define c4iw_get_ep(ep) { \
- pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
- __func__, __LINE__, \
+ pr_debug("get_ep ep %p, refcnt %d\n", \
ep, kref_read(&((ep)->kref))); \
kref_get(&((ep)->kref)); \
}
@@ -841,7 +901,7 @@ struct c4iw_ep_common {
struct mutex mutex;
struct sockaddr_storage local_addr;
struct sockaddr_storage remote_addr;
- struct c4iw_wr_wait wr_wait;
+ struct c4iw_wr_wait *wr_waitp;
unsigned long flags;
unsigned long history;
};
@@ -935,7 +995,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
-int c4iw_register_device(struct c4iw_dev *dev);
+void c4iw_register_device(struct work_struct *work);
void c4iw_unregister_device(struct c4iw_dev *dev);
int __init c4iw_cm_init(void);
void c4iw_cm_term(void);
@@ -961,6 +1021,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int c4iw_dealloc_mw(struct ib_mw *mw);
+void c4iw_dealloc(struct uld_ctx *ctx);
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
@@ -990,7 +1051,6 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
-int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
void c4iw_flush_hw_cq(struct c4iw_cq *chp);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
@@ -1018,5 +1078,6 @@ extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
+struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index c2fba76becd4..7e0eb201cc26 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -60,18 +60,18 @@ static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
u32 len, dma_addr_t data,
- int wait, struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
struct ulp_mem_io *req;
struct ulptx_sgl *sgl;
u8 wr_len;
int ret = 0;
- struct c4iw_wr_wait wr_wait;
addr &= 0x7FFFFFF;
- if (wait)
- c4iw_init_wr_wait(&wr_wait);
+ if (wr_waitp)
+ c4iw_init_wr_wait(wr_waitp);
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
if (!skb) {
@@ -84,8 +84,8 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req = __skb_put_zero(skb, wr_len);
INIT_ULPTX_WR(req, wr_len, 0, 0);
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
- (wait ? FW_WR_COMPL_F : 0));
- req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
+ (wr_waitp ? FW_WR_COMPL_F : 0));
+ req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
T5_ULP_MEMIO_ORDER_V(1) |
@@ -100,22 +100,21 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- return ret;
- if (wait)
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+ if (wr_waitp)
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
+ else
+ ret = c4iw_ofld_send(rdev, skb);
return ret;
}
static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, struct sk_buff *skb)
+ void *data, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
struct ulp_mem_io *req;
struct ulptx_idata *sc;
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
- struct c4iw_wr_wait wr_wait;
__be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
if (is_t4(rdev->lldi.adapter_type))
@@ -124,9 +123,9 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
addr &= 0x7FFFFFF;
- pr_debug("%s addr 0x%x len %u\n", __func__, addr, len);
+ pr_debug("addr 0x%x len %u\n", addr, len);
num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
- c4iw_init_wr_wait(&wr_wait);
+ c4iw_init_wr_wait(wr_waitp);
for (i = 0; i < num_wqe; i++) {
copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
@@ -147,7 +146,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (i == (num_wqe-1)) {
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
FW_WR_COMPL_F);
- req->wr.wr_lo = (__force __be64)(unsigned long)&wr_wait;
+ req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
} else
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
@@ -173,19 +172,23 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (copy_len % T4_ULPTX_MIN_IO)
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
(copy_len % T4_ULPTX_MIN_IO));
- ret = c4iw_ofld_send(rdev, skb);
- skb = NULL;
+ if (i == (num_wqe-1))
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
+ __func__);
+ else
+ ret = c4iw_ofld_send(rdev, skb);
if (ret)
- return ret;
+ break;
+ skb = NULL;
len -= C4IW_MAX_INLINE_SIZE;
}
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
return ret;
}
static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, struct sk_buff *skb)
+ void *data, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
u32 remain = len;
u32 dmalen;
@@ -208,7 +211,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
- !remain, skb);
+ skb, remain ? NULL : wr_waitp);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -216,7 +219,8 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
daddr += dmalen;
}
if (remain)
- ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb);
+ ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
+ wr_waitp);
out:
dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
return ret;
@@ -227,23 +231,33 @@ out:
* If data is NULL, clear len byte of memory to zero.
*/
static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, struct sk_buff *skb)
+ void *data, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
- if (rdev->lldi.ulptx_memwrite_dsgl && use_dsgl) {
- if (len > inline_threshold) {
- if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
- pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
- pci_name(rdev->lldi.pdev));
- return _c4iw_write_mem_inline(rdev, addr, len,
- data, skb);
- } else {
- return 0;
- }
- } else
- return _c4iw_write_mem_inline(rdev, addr,
- len, data, skb);
- } else
- return _c4iw_write_mem_inline(rdev, addr, len, data, skb);
+ int ret;
+
+ if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
+ ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+ wr_waitp);
+ goto out;
+ }
+
+ if (len <= inline_threshold) {
+ ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+ wr_waitp);
+ goto out;
+ }
+
+ ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
+ if (ret) {
+ pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
+ pci_name(rdev->lldi.pdev));
+ ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
+ wr_waitp);
+ }
+out:
+ return ret;
+
}
/*
@@ -257,7 +271,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
int bind_enabled, u32 zbva, u64 to,
u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
- struct sk_buff *skb)
+ struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
{
int err;
struct fw_ri_tpte tpt;
@@ -285,8 +299,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_unlock(&rdev->stats.lock);
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
}
- pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
- __func__, stag_state, type, pdid, stag_idx);
+ pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
+ stag_state, type, pdid, stag_idx);
/* write TPT entry */
if (reset_tpt_entry)
@@ -311,7 +325,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
}
err = write_adapter_mem(rdev, stag_idx +
(rdev->lldi.vr->stag.start >> 5),
- sizeof(tpt), &tpt, skb);
+ sizeof(tpt), &tpt, skb, wr_waitp);
if (reset_tpt_entry) {
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -323,45 +337,50 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
}
static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
- u32 pbl_addr, u32 pbl_size)
+ u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
{
int err;
- pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
- __func__, pbl_addr, rdev->lldi.vr->pbl.start,
+ pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+ pbl_addr, rdev->lldi.vr->pbl.start,
pbl_size);
- err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
+ err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
+ wr_waitp);
return err;
}
static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
- u32 pbl_addr, struct sk_buff *skb)
+ u32 pbl_addr, struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
- pbl_size, pbl_addr, skb);
+ pbl_size, pbl_addr, skb, wr_waitp);
}
-static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
+static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
+ struct c4iw_wr_wait *wr_waitp)
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
- 0UL, 0, 0, 0, 0, NULL);
+ 0UL, 0, 0, 0, 0, NULL, wr_waitp);
}
static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct c4iw_wr_wait *wr_waitp)
{
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
- 0, skb);
+ 0, skb, wr_waitp);
}
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
- u32 pbl_size, u32 pbl_addr)
+ u32 pbl_size, u32 pbl_addr,
+ struct c4iw_wr_wait *wr_waitp)
{
*stag = T4_STAG_UNSET;
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
- 0UL, 0, 0, pbl_size, pbl_addr, NULL);
+ 0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
}
static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
@@ -372,7 +391,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
mhp->attr.stag = stag;
mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+ pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
}
@@ -388,14 +407,15 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
mhp->attr.mw_bind_enable, mhp->attr.zbva,
mhp->attr.va_fbo, mhp->attr.len ?
mhp->attr.len : -1, shift - 12,
- mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL);
+ mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
+ mhp->wr_waitp);
if (ret)
return ret;
ret = finish_mem_reg(mhp, stag);
if (ret) {
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr, mhp->dereg_skb);
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
mhp->dereg_skb = NULL;
}
return ret;
@@ -422,18 +442,24 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
int ret;
u32 stag = T4_STAG_UNSET;
- pr_debug("%s ib_pd %p\n", __func__, pd);
+ pr_debug("ib_pd %p\n", pd);
php = to_c4iw_pd(pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_mhp;
+ }
+ c4iw_init_wr_wait(mhp->wr_waitp);
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
if (!mhp->dereg_skb) {
ret = -ENOMEM;
- goto err0;
+ goto err_free_wr_wait;
}
mhp->rhp = rhp;
@@ -449,20 +475,22 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
- NULL);
+ NULL, mhp->wr_waitp);
if (ret)
- goto err1;
+ goto err_free_skb;
ret = finish_mem_reg(mhp, stag);
if (ret)
- goto err2;
+ goto err_dereg_mem;
return &mhp->ibmr;
-err2:
+err_dereg_mem:
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr, mhp->dereg_skb);
-err1:
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_skb:
kfree_skb(mhp->dereg_skb);
-err0:
+err_free_mhp:
kfree(mhp);
return ERR_PTR(ret);
}
@@ -473,13 +501,13 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
__be64 *pages;
int shift, n, len;
int i, k, entry;
- int err = 0;
+ int err = -ENOMEM;
struct scatterlist *sg;
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mr *mhp;
- pr_debug("%s ib_pd %p\n", __func__, pd);
+ pr_debug("ib_pd %p\n", pd);
if (length == ~0ULL)
return ERR_PTR(-EINVAL);
@@ -496,34 +524,31 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp)
+ goto err_free_mhp;
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
- if (!mhp->dereg_skb) {
- kfree(mhp);
- return ERR_PTR(-ENOMEM);
- }
+ if (!mhp->dereg_skb)
+ goto err_free_wr_wait;
mhp->rhp = rhp;
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
- if (IS_ERR(mhp->umem)) {
- err = PTR_ERR(mhp->umem);
- kfree_skb(mhp->dereg_skb);
- kfree(mhp);
- return ERR_PTR(err);
- }
+ if (IS_ERR(mhp->umem))
+ goto err_free_skb;
shift = mhp->umem->page_shift;
n = mhp->umem->nmap;
err = alloc_pbl(mhp, n);
if (err)
- goto err;
+ goto err_umem_release;
pages = (__be64 *) __get_free_page(GFP_KERNEL);
if (!pages) {
err = -ENOMEM;
- goto err_pbl;
+ goto err_pbl_free;
}
i = n = 0;
@@ -536,7 +561,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (i == PAGE_SIZE / sizeof *pages) {
err = write_pbl(&mhp->rhp->rdev,
pages,
- mhp->attr.pbl_addr + (n << 3), i);
+ mhp->attr.pbl_addr + (n << 3), i,
+ mhp->wr_waitp);
if (err)
goto pbl_done;
n += i;
@@ -547,12 +573,13 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (i)
err = write_pbl(&mhp->rhp->rdev, pages,
- mhp->attr.pbl_addr + (n << 3), i);
+ mhp->attr.pbl_addr + (n << 3), i,
+ mhp->wr_waitp);
pbl_done:
free_page((unsigned long) pages);
if (err)
- goto err_pbl;
+ goto err_pbl_free;
mhp->attr.pdid = php->pdid;
mhp->attr.zbva = 0;
@@ -563,17 +590,20 @@ pbl_done:
err = register_mem(rhp, php, mhp, shift);
if (err)
- goto err_pbl;
+ goto err_pbl_free;
return &mhp->ibmr;
-err_pbl:
+err_pbl_free:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
-
-err:
+err_umem_release:
ib_umem_release(mhp->umem);
+err_free_skb:
kfree_skb(mhp->dereg_skb);
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_mhp:
kfree(mhp);
return ERR_PTR(err);
}
@@ -597,13 +627,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
if (!mhp)
return ERR_PTR(-ENOMEM);
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto free_mhp;
+ }
+
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
if (!mhp->dereg_skb) {
ret = -ENOMEM;
- goto free_mhp;
+ goto free_wr_wait;
}
- ret = allocate_window(&rhp->rdev, &stag, php->pdid);
+ ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
if (ret)
goto free_skb;
mhp->rhp = rhp;
@@ -616,13 +652,16 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ret = -ENOMEM;
goto dealloc_win;
}
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmw);
dealloc_win:
- deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+ deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
+ mhp->wr_waitp);
free_skb:
kfree_skb(mhp->dereg_skb);
+free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
free_mhp:
kfree(mhp);
return ERR_PTR(ret);
@@ -638,10 +677,12 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
rhp = mhp->rhp;
mmid = (mw->rkey) >> 8;
remove_handle(rhp, &rhp->mmidr, mmid);
- deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
+ deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
+ mhp->wr_waitp);
kfree_skb(mhp->dereg_skb);
+ c4iw_put_wr_wait(mhp->wr_waitp);
kfree(mhp);
- pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
+ pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
return 0;
}
@@ -671,23 +712,31 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
goto err;
}
+ mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!mhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_mhp;
+ }
+ c4iw_init_wr_wait(mhp->wr_waitp);
+
mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
length, &mhp->mpl_addr, GFP_KERNEL);
if (!mhp->mpl) {
ret = -ENOMEM;
- goto err_mpl;
+ goto err_free_wr_wait;
}
mhp->max_mpl_len = length;
mhp->rhp = rhp;
ret = alloc_pbl(mhp, max_num_sg);
if (ret)
- goto err1;
+ goto err_free_dma;
mhp->attr.pbl_size = max_num_sg;
ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
- mhp->attr.pbl_size, mhp->attr.pbl_addr);
+ mhp->attr.pbl_size, mhp->attr.pbl_addr,
+ mhp->wr_waitp);
if (ret)
- goto err2;
+ goto err_free_pbl;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_NSMR;
mhp->attr.stag = stag;
@@ -696,21 +745,23 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
ret = -ENOMEM;
- goto err3;
+ goto err_dereg;
}
- pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
+ pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmr);
-err3:
+err_dereg:
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr, mhp->dereg_skb);
-err2:
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
+err_free_pbl:
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
-err1:
+err_free_dma:
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
-err_mpl:
+err_free_wr_wait:
+ c4iw_put_wr_wait(mhp->wr_waitp);
+err_free_mhp:
kfree(mhp);
err:
return ERR_PTR(ret);
@@ -744,7 +795,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
struct c4iw_mr *mhp;
u32 mmid;
- pr_debug("%s ib_mr %p\n", __func__, ib_mr);
+ pr_debug("ib_mr %p\n", ib_mr);
mhp = to_c4iw_mr(ib_mr);
rhp = mhp->rhp;
@@ -754,7 +805,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
- mhp->attr.pbl_addr, mhp->dereg_skb);
+ mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
if (mhp->attr.pbl_size)
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
mhp->attr.pbl_size << 3);
@@ -762,7 +813,8 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
kfree((void *) (unsigned long) mhp->kva);
if (mhp->umem)
ib_umem_release(mhp->umem);
- pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
+ pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
+ c4iw_put_wr_wait(mhp->wr_waitp);
kfree(mhp);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 346e8334279a..1b5c6cd2ac4d 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref)
ucontext = container_of(kref, struct c4iw_ucontext, kref);
rhp = to_c4iw_dev(ucontext->ibucontext.device);
- pr_debug("%s ucontext %p\n", __func__, ucontext);
+ pr_debug("ucontext %p\n", ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
@@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
- pr_debug("%s context %p\n", __func__, context);
+ pr_debug("context %p\n", context);
c4iw_put_ucontext(ucontext);
return 0;
}
@@ -127,7 +127,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
int ret = 0;
struct c4iw_mm_entry *mm = NULL;
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) {
ret = -ENOMEM;
@@ -185,7 +185,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct c4iw_ucontext *ucontext;
u64 addr;
- pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
+ pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
key, len);
if (vma->vm_start & (PAGE_SIZE-1))
@@ -251,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
php = to_c4iw_pd(pd);
rhp = php->rhp;
- pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
+ pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--;
@@ -268,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
u32 pdid;
struct c4iw_dev *rhp;
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid)
@@ -291,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock);
- pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
+ pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
return &php->ibpd;
}
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
*pkey = 0;
return 0;
}
@@ -308,10 +308,11 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
{
struct c4iw_dev *dev;
- pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
- __func__, ibdev, port, index, gid);
+ pr_debug("ibdev %p, port %d, index %d, gid %p\n",
+ ibdev, port, index, gid);
+ if (!port)
+ return -EINVAL;
dev = to_c4iw_dev(ibdev);
- BUG_ON(port == 0);
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
return 0;
@@ -323,7 +324,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
struct c4iw_dev *dev;
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
if (uhw->inlen || uhw->outlen)
return -EINVAL;
@@ -364,7 +365,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct net_device *netdev;
struct in_device *inetdev;
- pr_debug("%s ibdev %p\n", __func__, ibdev);
+ pr_debug("ibdev %p\n", ibdev);
dev = to_c4iw_dev(ibdev);
netdev = dev->rdev.lldi.ports[port-1];
@@ -406,7 +407,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
- pr_debug("%s dev 0x%p\n", __func__, dev);
+ pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
@@ -419,7 +420,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
struct ethtool_drvinfo info;
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
- pr_debug("%s dev 0x%p\n", __func__, dev);
+ pr_debug("dev 0x%p\n", dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
@@ -429,7 +430,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
- pr_debug("%s dev 0x%p\n", __func__, dev);
+ pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device);
}
@@ -521,7 +522,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev);
- pr_debug("%s dev 0x%p\n", __func__, dev);
+ pr_debug("dev 0x%p\n", dev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
@@ -530,13 +531,14 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
}
-int c4iw_register_device(struct c4iw_dev *dev)
+void c4iw_register_device(struct work_struct *work)
{
int ret;
int i;
+ struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
+ struct c4iw_dev *dev = ctx->dev;
- pr_debug("%s c4iw_dev %p\n", __func__, dev);
- BUG_ON(!dev->rdev.lldi.ports[0]);
+ pr_debug("c4iw_dev %p\n", dev);
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
@@ -609,8 +611,10 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
- if (!dev->ibdev.iwcm)
- return -ENOMEM;
+ if (!dev->ibdev.iwcm) {
+ ret = -ENOMEM;
+ goto err_dealloc_ctx;
+ }
dev->ibdev.iwcm->connect = c4iw_connect;
dev->ibdev.iwcm->accept = c4iw_accept_cr;
@@ -625,27 +629,31 @@ int c4iw_register_device(struct c4iw_dev *dev)
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
- goto bail1;
+ goto err_kfree_iwcm;
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
ret = device_create_file(&dev->ibdev.dev,
c4iw_class_attributes[i]);
if (ret)
- goto bail2;
+ goto err_unregister_device;
}
- return 0;
-bail2:
+ return;
+err_unregister_device:
ib_unregister_device(&dev->ibdev);
-bail1:
+err_kfree_iwcm:
kfree(dev->ibdev.iwcm);
- return ret;
+err_dealloc_ctx:
+ pr_err("%s - Failed registering iwarp device: %d\n",
+ pci_name(ctx->lldi.pdev), ret);
+ c4iw_dealloc(ctx);
+ return;
}
void c4iw_unregister_device(struct c4iw_dev *dev)
{
int i;
- pr_debug("%s c4iw_dev %p\n", __func__, dev);
+ pr_debug("c4iw_dev %p\n", dev);
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
device_remove_file(&dev->ibdev.dev,
c4iw_class_attributes[i]);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cb7fc0d35d1d..5ee7fe433136 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -194,13 +194,13 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct t4_cq *rcq, struct t4_cq *scq,
- struct c4iw_dev_ucontext *uctx)
+ struct c4iw_dev_ucontext *uctx,
+ struct c4iw_wr_wait *wr_waitp)
{
int user = (uctx != &rdev->uctx);
struct fw_ri_res_wr *res_wr;
struct fw_ri_res *res;
int wr_len;
- struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
int ret = 0;
int eqsize;
@@ -254,8 +254,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
ret = -ENOMEM;
goto free_sq;
}
- pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
- __func__, wq->sq.queue,
+ pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
+ wq->sq.queue,
(unsigned long long)virt_to_phys(wq->sq.queue),
wq->rq.queue,
(unsigned long long)virt_to_phys(wq->rq.queue));
@@ -299,7 +299,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_NRES_V(2) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (uintptr_t)&wr_wait;
+ res_wr->cookie = (uintptr_t)wr_waitp;
res = res_wr->res;
res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -352,17 +352,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
- c4iw_init_wr_wait(&wr_wait);
-
- ret = c4iw_ofld_send(rdev, skb);
- if (ret)
- goto free_dma;
- ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
+ c4iw_init_wr_wait(wr_waitp);
+ ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
if (ret)
goto free_dma;
- pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
- __func__, wq->sq.qid, wq->rq.qid, wq->db,
+ pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
+ wq->sq.qid, wq->rq.qid, wq->db,
wq->sq.bar2_va, wq->rq.bar2_va);
return 0;
@@ -693,7 +689,6 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
if (++p == (__be64 *)&sq->queue[sq->size])
p = (__be64 *)sq->queue;
}
- BUG_ON(rem < 0);
while (rem) {
*p = 0;
rem -= sizeof(*p);
@@ -724,12 +719,13 @@ static void free_qp_work(struct work_struct *work)
ucontext = qhp->ucontext;
rhp = qhp->rhp;
- pr_debug("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+ pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
if (ucontext)
c4iw_put_ucontext(ucontext);
+ c4iw_put_wr_wait(qhp->wr_waitp);
kfree(qhp);
}
@@ -738,19 +734,19 @@ static void queue_qp_free(struct kref *kref)
struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref);
- pr_debug("%s qhp %p\n", __func__, qhp);
+ pr_debug("qhp %p\n", qhp);
queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
}
void c4iw_qp_add_ref(struct ib_qp *qp)
{
- pr_debug("%s ib_qp %p\n", __func__, qp);
+ pr_debug("ib_qp %p\n", qp);
kref_get(&to_c4iw_qp(qp)->kref);
}
void c4iw_qp_rem_ref(struct ib_qp *qp)
{
- pr_debug("%s ib_qp %p\n", __func__, qp);
+ pr_debug("ib_qp %p\n", qp);
kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
}
@@ -817,10 +813,12 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
t4_swcq_produce(cq);
spin_unlock_irqrestore(&schp->lock, flag);
- spin_lock_irqsave(&schp->comp_handler_lock, flag);
- (*schp->ibcq.comp_handler)(&schp->ibcq,
- schp->ibcq.cq_context);
- spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&schp->cq)) {
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ }
}
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -846,10 +844,12 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
t4_swcq_produce(cq);
spin_unlock_irqrestore(&rchp->lock, flag);
- spin_lock_irqsave(&rchp->comp_handler_lock, flag);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq,
- rchp->ibcq.cq_context);
- spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&rchp->cq)) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
}
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -958,8 +958,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
break;
default:
- pr_debug("%s post of type=%d TBD!\n", __func__,
- wr->opcode);
+ pr_warn("%s post of type=%d TBD!\n", __func__,
+ wr->opcode);
err = -EINVAL;
}
if (err) {
@@ -980,8 +980,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
- pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
- __func__,
+ pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
(unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
swsqe->opcode, swsqe->read_len);
wr = wr->next;
@@ -1057,8 +1056,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->recv.r2[1] = 0;
wqe->recv.r2[2] = 0;
wqe->recv.len16 = len16;
- pr_debug("%s cookie 0x%llx pidx %u\n",
- __func__,
+ pr_debug("cookie 0x%llx pidx %u\n",
(unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
t4_rq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
@@ -1218,7 +1216,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
struct sk_buff *skb;
struct terminate_message *term;
- pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
+ pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
qhp->ep->hwtid);
skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
@@ -1255,33 +1253,36 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int rq_flushed, sq_flushed;
unsigned long flag;
- pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
+ pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
- /* locking hierarchy: cq lock first, then qp lock. */
+ /* locking hierarchy: cqs lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, flag);
+ if (schp != rchp)
+ spin_lock(&schp->lock);
spin_lock(&qhp->lock);
if (qhp->wq.flushed) {
spin_unlock(&qhp->lock);
+ if (schp != rchp)
+ spin_unlock(&schp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
return;
}
qhp->wq.flushed = 1;
+ t4_set_wq_in_error(&qhp->wq);
c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
- spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&rchp->lock, flag);
- /* locking hierarchy: cq lock first, then qp lock. */
- spin_lock_irqsave(&schp->lock, flag);
- spin_lock(&qhp->lock);
if (schp != rchp)
c4iw_flush_hw_cq(schp);
sq_flushed = c4iw_flush_sq(qhp);
+
spin_unlock(&qhp->lock);
- spin_unlock_irqrestore(&schp->lock, flag);
+ if (schp != rchp)
+ spin_unlock(&schp->lock);
+ spin_unlock_irqrestore(&rchp->lock, flag);
if (schp == rchp) {
if (t4_clear_cq_armed(&rchp->cq) &&
@@ -1315,8 +1316,8 @@ static void flush_qp(struct c4iw_qp *qhp)
rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
schp = to_c4iw_cq(qhp->ibqp.send_cq);
- t4_set_wq_in_error(&qhp->wq);
if (qhp->ibqp.uobject) {
+ t4_set_wq_in_error(&qhp->wq);
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
@@ -1340,8 +1341,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int ret;
struct sk_buff *skb;
- pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
- ep->hwtid);
+ pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
skb = skb_dequeue(&ep->com.ep_skb_list);
if (WARN_ON(!skb))
@@ -1357,23 +1357,20 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID_V(ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (uintptr_t)&ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)ep->com.wr_waitp;
wqe->u.fini.type = FW_RI_TYPE_FINI;
- ret = c4iw_ofld_send(&rhp->rdev, skb);
- if (ret)
- goto out;
- ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
- qhp->wq.sq.qid, __func__);
-out:
- pr_debug("%s ret %d\n", __func__, ret);
+ ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
+ qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+
+ pr_debug("ret %d\n", ret);
return ret;
}
static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
{
- pr_debug("%s p2p_type = %d\n", __func__, p2p_type);
+ pr_debug("p2p_type = %d\n", p2p_type);
memset(&init->u, 0, sizeof init->u);
switch (p2p_type) {
case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
@@ -1402,7 +1399,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
int ret;
struct sk_buff *skb;
- pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
+ pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
@@ -1427,7 +1424,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_FLOWID_V(qhp->ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
@@ -1464,18 +1461,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
if (qhp->attr.mpa_attr.initiator)
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
- ret = c4iw_ofld_send(&rhp->rdev, skb);
- if (ret)
- goto err1;
-
- ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
- qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+ ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
+ qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
if (!ret)
goto out;
-err1:
+
free_ird(rhp, qhp->attr.max_ird);
out:
- pr_debug("%s ret %d\n", __func__, ret);
+ pr_debug("ret %d\n", ret);
return ret;
}
@@ -1492,8 +1485,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int free = 0;
struct c4iw_ep *ep = NULL;
- pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
- __func__,
+ pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
(mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
@@ -1582,7 +1574,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
case C4IW_QP_STATE_RTS:
switch (attrs->next_state) {
case C4IW_QP_STATE_CLOSING:
- BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
t4_set_wq_in_error(&qhp->wq);
set_state(qhp, C4IW_QP_STATE_CLOSING);
ep = qhp->ep;
@@ -1680,7 +1671,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
goto out;
err:
- pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
+ pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
qhp->wq.sq.qid);
/* disassociate the LLP connection */
@@ -1691,7 +1682,6 @@ err:
set_state(qhp, C4IW_QP_STATE_ERROR);
free = 1;
abort = 1;
- BUG_ON(!ep);
flush_qp(qhp);
wake_up(&qhp->wait);
out:
@@ -1717,7 +1707,7 @@ out:
*/
if (free)
c4iw_put_ep(&ep->com);
- pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
+ pr_debug("exit state %d\n", qhp->attr.state);
return ret;
}
@@ -1747,7 +1737,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_qp_rem_ref(ib_qp);
- pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
+ pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
return 0;
}
@@ -1766,7 +1756,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
- pr_debug("%s ib_pd %p\n", __func__, pd);
+ pr_debug("ib_pd %p\n", pd);
if (attrs->qp_type != IB_QPT_RC)
return ERR_PTR(-EINVAL);
@@ -1798,6 +1788,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
if (!qhp)
return ERR_PTR(-ENOMEM);
+
+ qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
+ if (!qhp->wr_waitp) {
+ ret = -ENOMEM;
+ goto err_free_qhp;
+ }
+
qhp->wq.sq.size = sqsize;
qhp->wq.sq.memsize =
(sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
@@ -1814,9 +1811,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
}
ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
+ qhp->wr_waitp);
if (ret)
- goto err1;
+ goto err_free_wr_wait;
attrs->cap.max_recv_wr = rqsize - 1;
attrs->cap.max_send_wr = sqsize - 1;
@@ -1847,35 +1845,35 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
- goto err2;
+ goto err_destroy_qp;
- if (udata) {
+ if (udata && ucontext) {
sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
if (!sq_key_mm) {
ret = -ENOMEM;
- goto err3;
+ goto err_remove_handle;
}
rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
if (!rq_key_mm) {
ret = -ENOMEM;
- goto err4;
+ goto err_free_sq_key;
}
sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
if (!sq_db_key_mm) {
ret = -ENOMEM;
- goto err5;
+ goto err_free_rq_key;
}
rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
if (!rq_db_key_mm) {
ret = -ENOMEM;
- goto err6;
+ goto err_free_sq_db_key;
}
if (t4_sq_onchip(&qhp->wq.sq)) {
ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
GFP_KERNEL);
if (!ma_sync_key_mm) {
ret = -ENOMEM;
- goto err7;
+ goto err_free_rq_db_key;
}
uresp.flags = C4IW_QPF_ONCHIP;
} else
@@ -1905,7 +1903,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (ret)
- goto err8;
+ goto err_free_ma_sync_key;
sq_key_mm->key = uresp.sq_key;
sq_key_mm->addr = qhp->wq.sq.phys_addr;
sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
@@ -1935,30 +1933,30 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->ucontext = ucontext;
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
- init_timer(&(qhp->timer));
INIT_LIST_HEAD(&qhp->db_fc_entry);
- pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
- __func__,
+ pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
return &qhp->ibqp;
-err8:
+err_free_ma_sync_key:
kfree(ma_sync_key_mm);
-err7:
+err_free_rq_db_key:
kfree(rq_db_key_mm);
-err6:
+err_free_sq_db_key:
kfree(sq_db_key_mm);
-err5:
+err_free_rq_key:
kfree(rq_key_mm);
-err4:
+err_free_sq_key:
kfree(sq_key_mm);
-err3:
+err_remove_handle:
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
-err2:
+err_destroy_qp:
destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-err1:
+err_free_wr_wait:
+ c4iw_put_wr_wait(qhp->wr_waitp);
+err_free_qhp:
kfree(qhp);
return ERR_PTR(ret);
}
@@ -1971,7 +1969,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum c4iw_qp_attr_mask mask = 0;
struct c4iw_qp_attributes attrs;
- pr_debug("%s ib_qp %p\n", __func__, ibqp);
+ pr_debug("ib_qp %p\n", ibqp);
/* iwarp does not support the RTR state */
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
@@ -2017,7 +2015,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
{
- pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
+ pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 8ff0cbe5cb16..3cf25997ed2b 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table)
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
{
- pr_debug("%s entry 0x%x\n", __func__, entry);
+ pr_debug("entry 0x%x\n", entry);
c4iw_id_free(id_table, entry);
}
@@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock(&uctx->lock);
- pr_debug("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
- pr_debug("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->cqids);
@@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
}
out:
mutex_unlock(&uctx->lock);
- pr_debug("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur;
@@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry)
return;
- pr_debug("%s qid 0x%x\n", __func__, qid);
+ pr_debug("qid 0x%x\n", qid);
entry->qid = qid;
mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids);
@@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ pr_debug("addr 0x%x size %d\n", (u32)addr, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
@@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
+ pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -290,8 +290,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
while (pbl_start < pbl_top) {
pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
- pr_debug("%s failed to add PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
+ pr_debug("failed to add PBL chunk (%x/%x)\n",
+ pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
pr_warn("Failed to add all PBL chunks (%x/%x)\n",
pbl_start, pbl_top - pbl_start);
@@ -299,8 +299,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
}
pbl_chunk >>= 1;
} else {
- pr_debug("%s added PBL chunk (%x/%x)\n",
- __func__, pbl_start, pbl_chunk);
+ pr_debug("added PBL chunk (%x/%x)\n",
+ pbl_start, pbl_chunk);
pbl_start += pbl_chunk;
}
}
@@ -322,7 +322,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
+ pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
if (!addr)
pr_warn_ratelimited("%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev));
@@ -339,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
+ pr_debug("addr 0x%x size %d\n", addr, size << 6);
mutex_lock(&rdev->stats.lock);
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -361,8 +361,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
while (rqt_start < rqt_top) {
rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
- pr_debug("%s failed to add RQT chunk (%x/%x)\n",
- __func__, rqt_start, rqt_chunk);
+ pr_debug("failed to add RQT chunk (%x/%x)\n",
+ rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
pr_warn("Failed to add all RQT chunks (%x/%x)\n",
rqt_start, rqt_top - rqt_start);
@@ -370,8 +370,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
}
rqt_chunk >>= 1;
} else {
- pr_debug("%s added RQT chunk (%x/%x)\n",
- __func__, rqt_start, rqt_chunk);
+ pr_debug("added RQT chunk (%x/%x)\n",
+ rqt_start, rqt_chunk);
rqt_start += rqt_chunk;
}
}
@@ -391,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
- pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
+ pr_debug("addr 0x%x size %d\n", (u32)addr, size);
if (addr) {
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
@@ -404,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
+ pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
mutex_unlock(&rdev->stats.lock);
@@ -426,8 +426,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
while (start < top) {
chunk = min(top - start + 1, chunk);
if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
- pr_debug("%s failed to add OCQP chunk (%x/%x)\n",
- __func__, start, chunk);
+ pr_debug("failed to add OCQP chunk (%x/%x)\n",
+ start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) {
pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
start, top - start);
@@ -435,8 +435,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
}
chunk >>= 1;
} else {
- pr_debug("%s added OCQP chunk (%x/%x)\n",
- __func__, start, chunk);
+ pr_debug("added OCQP chunk (%x/%x)\n",
+ start, chunk);
start += chunk;
}
}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e765c00303cd..e9ea94268d51 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -171,7 +171,7 @@ struct t4_cqe {
__be32 msn;
} rcqe;
struct {
- u32 stag;
+ __be32 stag;
u16 nada2;
u16 cidx;
} scqe;
@@ -425,7 +425,6 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_sq_consume(struct t4_wq *wq)
{
- BUG_ON(wq->sq.in_use < 1);
if (wq->sq.cidx == wq->sq.flush_cidx)
wq->sq.flush_cidx = -1;
wq->sq.in_use--;
@@ -466,14 +465,12 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
wmb();
if (wq->sq.bar2_va) {
if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
- pr_debug("%s: WC wq->sq.pidx = %d\n",
- __func__, wq->sq.pidx);
+ pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx);
pio_copy((u64 __iomem *)
(wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
(u64 *)wqe);
} else {
- pr_debug("%s: DB wq->sq.pidx = %d\n",
- __func__, wq->sq.pidx);
+ pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
wq->sq.bar2_va + SGE_UDB_KDOORBELL);
}
@@ -493,14 +490,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
wmb();
if (wq->rq.bar2_va) {
if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
- pr_debug("%s: WC wq->rq.pidx = %d\n",
- __func__, wq->rq.pidx);
+ pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
pio_copy((u64 __iomem *)
(wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
(void *)wqe);
} else {
- pr_debug("%s: DB wq->rq.pidx = %d\n",
- __func__, wq->rq.pidx);
+ pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
wq->rq.bar2_va + SGE_UDB_KDOORBELL);
}
@@ -601,10 +596,11 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
{
cq->sw_in_use++;
if (cq->sw_in_use == cq->size) {
- pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
- __func__, cq->cqid);
+ pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
+ __func__, cq->cqid);
cq->error = 1;
- BUG_ON(1);
+ cq->sw_in_use--;
+ return;
}
if (++cq->sw_pidx == cq->size)
cq->sw_pidx = 0;
@@ -612,7 +608,6 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
static inline void t4_swcq_consume(struct t4_cq *cq)
{
- BUG_ON(cq->sw_in_use < 1);
cq->sw_in_use--;
if (++cq->sw_cidx == cq->size)
cq->sw_cidx = 0;
@@ -658,7 +653,6 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
ret = -EOVERFLOW;
cq->error = 1;
pr_err("cq overflow cqid %u\n", cq->cqid);
- BUG_ON(1);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
/* Ensure CQE is flushed to memory */
@@ -673,10 +667,9 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{
if (cq->sw_in_use == cq->size) {
- pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
- __func__, cq->cqid);
+ pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
+ __func__, cq->cqid);
cq->error = 1;
- BUG_ON(1);
return NULL;
}
if (cq->sw_in_use)
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 010c709ba3bb..58c531db4f4a 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr {
__u16 wrid;
__u8 r1[3];
__u8 len16;
- __u32 r2;
- __u32 stag;
+ __be32 r2;
+ __be32 stag;
struct fw_ri_tpte tpte;
__u64 pbl[2];
};
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 66d538c033b0..ce4010bad982 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# HFI driver
#
diff --git a/drivers/infiniband/hw/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h
index 522b40ed9937..e8133870ee87 100644
--- a/drivers/infiniband/hw/hfi1/aspm.h
+++ b/drivers/infiniband/hw/hfi1/aspm.h
@@ -218,9 +218,9 @@ unlock:
}
/* Timer function for re-enabling ASPM in the absence of interrupt activity */
-static inline void aspm_ctx_timer_function(unsigned long data)
+static inline void aspm_ctx_timer_function(struct timer_list *t)
{
- struct hfi1_ctxtdata *rcd = (struct hfi1_ctxtdata *)data;
+ struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer);
unsigned long flags;
spin_lock_irqsave(&rcd->aspm_lock, flags);
@@ -281,8 +281,7 @@ static inline void aspm_enable_all(struct hfi1_devdata *dd)
static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd)
{
spin_lock_init(&rcd->aspm_lock);
- setup_timer(&rcd->aspm_timer, aspm_ctx_timer_function,
- (unsigned long)rcd);
+ timer_setup(&rcd->aspm_timer, aspm_ctx_timer_function, 0);
rcd->aspm_intr_supported = rcd->dd->aspm_supported &&
aspm_mode == ASPM_MODE_DYNAMIC &&
rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 0be42787759f..4f057e8ffe50 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1036,7 +1036,6 @@ static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
u8 *flag_bits, u16 *link_widths);
static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
u8 *device_rev);
-static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
u8 *tx_polarity_inversion,
@@ -5538,9 +5537,9 @@ static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
* associated with them.
*/
#define RCVERR_CHECK_TIME 10
-static void update_rcverr_timer(unsigned long opaque)
+static void update_rcverr_timer(struct timer_list *t)
{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
+ struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
struct hfi1_pportdata *ppd = dd->pport;
u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
@@ -5559,7 +5558,7 @@ static void update_rcverr_timer(unsigned long opaque)
static int init_rcverr(struct hfi1_devdata *dd)
{
- setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
+ timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
/* Assume the hardware counter has been reset */
dd->rcv_ovfl_cnt = 0;
return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
@@ -5567,9 +5566,8 @@ static int init_rcverr(struct hfi1_devdata *dd)
static void free_rcverr(struct hfi1_devdata *dd)
{
- if (dd->rcverr_timer.data)
+ if (dd->rcverr_timer.function)
del_timer_sync(&dd->rcverr_timer);
- dd->rcverr_timer.data = 0;
}
static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
@@ -6520,12 +6518,11 @@ static void _dc_start(struct hfi1_devdata *dd)
if (!dd->dc_shutdown)
return;
- /* Take the 8051 out of reset */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
- /* Wait until 8051 is ready */
- if (wait_fm_ready(dd, TIMEOUT_8051_START))
- dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
- __func__);
+ /*
+ * Take the 8051 out of reset, wait until 8051 is ready, and set host
+ * version bit.
+ */
+ release_and_wait_ready_8051_firmware(dd);
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
write_csr(dd, DCC_CFG_RESET, 0x10);
@@ -6819,7 +6816,8 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
rcd = hfi1_rcd_get_by_index(dd, i);
/* Ensure all non-user contexts(including vnic) are enabled */
- if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
+ if (!rcd ||
+ (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
hfi1_rcd_put(rcd);
continue;
}
@@ -7199,27 +7197,6 @@ static int lcb_to_port_ltp(int lcb_crc)
return port_ltp;
}
-/*
- * Our neighbor has indicated that we are allowed to act as a fabric
- * manager, so place the full management partition key in the second
- * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
- * that we should already have the limited management partition key in
- * array element 1, and also that the port is not yet up when
- * add_full_mgmt_pkey() is invoked.
- */
-static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
-
- /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
- if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
- dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
- __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
- ppd->pkeys[2] = FULL_MGMT_P_KEY;
- (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
- hfi1_event_pkey_change(ppd->dd, ppd->port);
-}
-
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
{
if (ppd->pkeys[2] != 0) {
@@ -7416,11 +7393,7 @@ void handle_verify_cap(struct work_struct *work)
&partner_supported_crc);
read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
read_remote_device_id(dd, &device_id, &device_rev);
- /*
- * And the 'MgmtAllowed' information, which is exchanged during
- * LNI, is also be available at this point.
- */
- read_mgmt_allowed(dd, &ppd->mgmt_allowed);
+
/* print the active widths */
get_link_widths(dd, &active_tx, &active_rx);
dd_dev_info(dd,
@@ -7548,9 +7521,6 @@ void handle_verify_cap(struct work_struct *work)
write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
set_8051_lcb_access(dd);
- if (ppd->mgmt_allowed)
- add_full_mgmt_pkey(ppd);
-
/* tell the 8051 to go to LinkUp */
set_link_state(ppd, HLS_GOING_UP);
}
@@ -8124,8 +8094,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
/* Check for non-user contexts, including vnic */
- if ((source < dd->first_dyn_alloc_ctxt) ||
- (rcd->sc && (rcd->sc->type == SC_KERNEL)))
+ if (source < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
rcd->do_interrupt(rcd, 0);
else
handle_user_interrupt(rcd);
@@ -8155,8 +8124,8 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
/* only pay attention to user urgent interrupts */
- if ((source >= dd->first_dyn_alloc_ctxt) &&
- (!rcd->sc || (rcd->sc->type == SC_USER)))
+ if (source >= dd->first_dyn_alloc_ctxt &&
+ !rcd->is_vnic)
handle_user_interrupt(rcd);
hfi1_rcd_put(rcd);
@@ -8595,30 +8564,23 @@ int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
}
/*
+ * If the 8051 is in reset mode (dd->dc_shutdown == 1), this function
+ * will still continue executing.
+ *
* Returns:
* < 0 = Linux error, not able to get access
* > 0 = 8051 command RETURN_CODE
*/
-static int do_8051_command(
- struct hfi1_devdata *dd,
- u32 type,
- u64 in_data,
- u64 *out_data)
+static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data)
{
u64 reg, completed;
int return_code;
unsigned long timeout;
+ lockdep_assert_held(&dd->dc8051_lock);
hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
- mutex_lock(&dd->dc8051_lock);
-
- /* We can't send any commands to the 8051 if it's in reset */
- if (dd->dc_shutdown) {
- return_code = -ENODEV;
- goto fail;
- }
-
/*
* If an 8051 host command timed out previously, then the 8051 is
* stuck.
@@ -8719,6 +8681,29 @@ static int do_8051_command(
write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
fail:
+ return return_code;
+}
+
+/*
+ * Returns:
+ * < 0 = Linux error, not able to get access
+ * > 0 = 8051 command RETURN_CODE
+ */
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data)
+{
+ int return_code;
+
+ mutex_lock(&dd->dc8051_lock);
+ /* We can't send any commands to the 8051 if it's in reset */
+ if (dd->dc_shutdown) {
+ return_code = -ENODEV;
+ goto fail;
+ }
+
+ return_code = _do_8051_command(dd, type, in_data, out_data);
+
+fail:
mutex_unlock(&dd->dc8051_lock);
return return_code;
}
@@ -8728,16 +8713,17 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
}
-int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
+static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
{
u64 data;
int ret;
+ lockdep_assert_held(&dd->dc8051_lock);
data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
| (u64)config_data << LOAD_DATA_DATA_SHIFT;
- ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
+ ret = _do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd,
"load 8051 config: field id %d, lane %d, err %d\n",
@@ -8746,6 +8732,18 @@ int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
return ret;
}
+int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
+{
+ int return_code;
+
+ mutex_lock(&dd->dc8051_lock);
+ return_code = _load_8051_config(dd, field_id, lane_id, config_data);
+ mutex_unlock(&dd->dc8051_lock);
+
+ return return_code;
+}
+
/*
* Read the 8051 firmware "registers". Use the RAM directly. Always
* set the result, even on error.
@@ -8861,13 +8859,14 @@ int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
u32 frame;
u32 mask;
+ lockdep_assert_held(&dd->dc8051_lock);
mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
/* Clear, then set field */
frame &= ~mask;
frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
- return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
- frame);
+ return _load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
+ frame);
}
void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
@@ -8932,14 +8931,6 @@ static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
*enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
}
-static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
-{
- u32 frame;
-
- read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
- *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
-}
-
static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
{
read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
@@ -9161,25 +9152,6 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
}
/*
- * Set the SerDes to internal loopback mode.
- * Returns 0 on success, -errno on error.
- */
-static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
-{
- int ret;
-
- ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
- if (ret == HCMD_SUCCESS)
- return 0;
- dd_dev_err(dd,
- "Set physical link state to SerDes Loopback failed with return %d\n",
- ret);
- if (ret >= 0)
- ret = -EINVAL;
- return ret;
-}
-
-/*
* Do all special steps to set up loopback.
*/
static int init_loopback(struct hfi1_devdata *dd)
@@ -9204,13 +9176,11 @@ static int init_loopback(struct hfi1_devdata *dd)
return 0;
}
- /* handle serdes loopback */
- if (loopback == LOOPBACK_SERDES) {
- /* internal serdes loopack needs quick linkup on RTL */
- if (dd->icode == ICODE_RTL_SILICON)
- quick_linkup = 1;
- return set_serdes_loopback_mode(dd);
- }
+ /*
+ * SerDes loopback init sequence is handled in set_local_link_attributes
+ */
+ if (loopback == LOOPBACK_SERDES)
+ return 0;
/* LCB loopback - handled at poll time */
if (loopback == LOOPBACK_LCB) {
@@ -9269,7 +9239,7 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
u8 tx_polarity_inversion;
u8 rx_polarity_inversion;
int ret;
-
+ u32 misc_bits = 0;
/* reset our fabric serdes to clear any lingering problems */
fabric_serdes_reset(dd);
@@ -9315,7 +9285,14 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
- ret = write_vc_local_link_width(dd, 0, 0,
+ /*
+ * SerDes loopback init sequence requires
+ * setting bit 0 of MISC_CONFIG_BITS
+ */
+ if (loopback == LOOPBACK_SERDES)
+ misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
+
+ ret = write_vc_local_link_width(dd, misc_bits, 0,
opa_to_vc_link_widths(
ppd->link_width_enabled));
if (ret != HCMD_SUCCESS)
@@ -9809,9 +9786,9 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
cancel_delayed_work_sync(&ppd->start_link_work);
ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
- set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
- OPA_LINKDOWN_REASON_SMA_DISABLED);
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
+ OPA_LINKDOWN_REASON_REBOOT);
set_link_state(ppd, HLS_DN_OFFLINE);
/* disable the port */
@@ -9952,7 +9929,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
goto unimplemented;
case HFI1_IB_CFG_OP_VLS:
- val = ppd->vls_operational;
+ val = ppd->actual_vls_operational;
break;
case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
@@ -9967,7 +9944,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
val = ppd->phy_error_threshold;
break;
case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- val = dd->link_default;
+ val = HLS_DEFAULT;
break;
case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
@@ -10170,6 +10147,10 @@ static const char * const state_complete_reasons[] = {
[0x33] =
"Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
[0x34] = tx_out_of_policy,
+ [0x35] = "Negotiated link width is mutually exclusive",
+ [0x36] =
+ "Timed out before receiving verifycap frames in VerifyCap.Exchange",
+ [0x37] = "Unable to resolve secure data exchange",
};
static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
@@ -10298,9 +10279,6 @@ static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
- /* adjust ppd->statusp, if needed */
- update_statusp(ppd, IB_PORT_DOWN);
-
dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
}
@@ -10382,6 +10360,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
force_logical_link_state_down(ppd);
ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+ update_statusp(ppd, IB_PORT_DOWN);
/*
* The LNI has a mandatory wait time after the physical state
@@ -10569,7 +10548,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
orig_new_state = state;
if (state == HLS_DN_DOWNDEF)
- state = dd->link_default;
+ state = HLS_DEFAULT;
/* interpret poll -> poll as a link bounce */
poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
@@ -10643,6 +10622,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
handle_linkup_change(dd, 1);
ppd->host_link_state = HLS_UP_INIT;
+ update_statusp(ppd, IB_PORT_INIT);
break;
case HLS_UP_ARMED:
if (ppd->host_link_state != HLS_UP_INIT)
@@ -10664,6 +10644,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
break;
}
ppd->host_link_state = HLS_UP_ARMED;
+ update_statusp(ppd, IB_PORT_ARMED);
/*
* The simulator does not currently implement SMA messages,
* so neighbor_normal is not set. Set it here when we first
@@ -10686,6 +10667,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
/* tell all engines to go running */
sdma_all_running(dd);
ppd->host_link_state = HLS_UP_ACTIVE;
+ update_statusp(ppd, IB_PORT_ACTIVE);
/* Signal the IB layer that the port has went active */
event.device = &dd->verbs_dev.rdi.ibdev;
@@ -12089,9 +12071,8 @@ static void free_cntrs(struct hfi1_devdata *dd)
struct hfi1_pportdata *ppd;
int i;
- if (dd->synth_stats_timer.data)
+ if (dd->synth_stats_timer.function)
del_timer_sync(&dd->synth_stats_timer);
- dd->synth_stats_timer.data = 0;
ppd = (struct hfi1_pportdata *)(dd + 1);
for (i = 0; i < dd->num_pports; i++, ppd++) {
kfree(ppd->cntrs);
@@ -12367,9 +12348,9 @@ static void do_update_synth_timer(struct work_struct *work)
}
}
-static void update_synth_timer(unsigned long opaque)
+static void update_synth_timer(struct timer_list *t)
{
- struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
+ struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
@@ -12387,8 +12368,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
const int bit_type_32_sz = strlen(bit_type_32);
/* set up the stats timer; the add_timer is done at the end */
- setup_timer(&dd->synth_stats_timer, update_synth_timer,
- (unsigned long)dd);
+ timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
/***********************/
/* per device counters */
@@ -12701,6 +12681,17 @@ const char *opa_pstate_name(u32 pstate)
return "unknown";
}
+/**
+ * update_statusp - Update userspace status flag
+ * @ppd: Port data structure
+ * @state: port state information
+ *
+ * Actual port status is determined by the host_link_state value
+ * in the ppd.
+ *
+ * host_link_state MUST be updated before updating the user space
+ * statusp.
+ */
static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
{
/*
@@ -12726,9 +12717,11 @@ static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
break;
}
}
+ dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
+ opa_lstate_name(state), state);
}
-/*
+/**
* wait_logical_linkstate - wait for an IB link state change to occur
* @ppd: port device
* @state: the state to wait for
@@ -12759,11 +12752,6 @@ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
msleep(20);
}
- update_statusp(ppd, state);
- dd_dev_info(ppd->dd,
- "logical state changed to %s (0x%x)\n",
- opa_lstate_name(state),
- state);
return 0;
}
@@ -12910,6 +12898,32 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
return ret;
}
+/**
+ * get_int_mask - get 64 bit int mask
+ * @dd - the devdata
+ * @i - the csr (relative to CCE_INT_MASK)
+ *
+ * Returns the mask with the urgent interrupt mask
+ * bit clear for kernel receive contexts.
+ */
+static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
+{
+ u64 mask = U64_MAX; /* default to no change */
+
+ if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
+ int j = (i - (IS_RCVURGENT_START / 64)) * 64;
+ int k = !j ? IS_RCVURGENT_START % 64 : 0;
+
+ if (j)
+ j -= IS_RCVURGENT_START % 64;
+ /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
+ for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
+ /* convert to bit in mask and clear */
+ mask &= ~BIT_ULL(k);
+ }
+ return mask;
+}
+
/* ========================================================================= */
/*
@@ -12923,9 +12937,12 @@ void set_intr_state(struct hfi1_devdata *dd, u32 enable)
* In HFI, the mask needs to be 1 to allow interrupts.
*/
if (enable) {
- /* enable all interrupts */
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
+ /* enable all interrupts but urgent on kernel contexts */
+ for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
+ u64 mask = get_int_mask(dd, i);
+
+ write_csr(dd, CCE_INT_MASK + (8 * i), mask);
+ }
init_qsfp_int(dd);
} else {
@@ -12980,7 +12997,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
if (!me->arg) /* => no irq, no affinity */
continue;
hfi1_put_irq_affinity(dd, me);
- free_irq(me->irq, me->arg);
+ pci_free_irq(dd->pcidev, i, me->arg);
}
/* clean structures */
@@ -12990,7 +13007,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
} else {
/* INTx */
if (dd->requested_intx_irq) {
- free_irq(dd->pcidev->irq, dd);
+ pci_free_irq(dd->pcidev, 0, dd);
dd->requested_intx_irq = 0;
}
disable_intx(dd->pcidev);
@@ -13049,10 +13066,8 @@ static int request_intx_irq(struct hfi1_devdata *dd)
{
int ret;
- snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
- dd->unit);
- ret = request_irq(dd->pcidev->irq, general_interrupt,
- IRQF_SHARED, dd->intx_name, dd);
+ ret = pci_request_irq(dd->pcidev, 0, general_interrupt, NULL, dd,
+ DRIVER_NAME "_%d", dd->unit);
if (ret)
dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
ret);
@@ -13074,7 +13089,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
first_sdma = last_general;
last_sdma = first_sdma + dd->num_sdma;
first_rx = last_sdma;
- last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
+ last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
/* VNIC MSIx interrupts get mapped when VNIC contexts are created */
dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
@@ -13095,13 +13110,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
int idx;
struct hfi1_ctxtdata *rcd = NULL;
struct sdma_engine *sde = NULL;
+ char name[MAX_NAME_SIZE];
- /* obtain the arguments to request_irq */
+ /* obtain the arguments to pci_request_irq */
if (first_general <= i && i < last_general) {
idx = i - first_general;
handler = general_interrupt;
arg = dd;
- snprintf(me->name, sizeof(me->name),
+ snprintf(name, sizeof(name),
DRIVER_NAME "_%d", dd->unit);
err_info = "general";
me->type = IRQ_GENERAL;
@@ -13110,14 +13126,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
sde = &dd->per_sdma[idx];
handler = sdma_interrupt;
arg = sde;
- snprintf(me->name, sizeof(me->name),
+ snprintf(name, sizeof(name),
DRIVER_NAME "_%d sdma%d", dd->unit, idx);
err_info = "sdma";
remap_sdma_interrupts(dd, idx, i);
me->type = IRQ_SDMA;
} else if (first_rx <= i && i < last_rx) {
idx = i - first_rx;
- rcd = hfi1_rcd_get_by_index(dd, idx);
+ rcd = hfi1_rcd_get_by_index_safe(dd, idx);
if (rcd) {
/*
* Set the interrupt register and mask for this
@@ -13129,7 +13145,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
handler = receive_context_interrupt;
thread = receive_context_thread;
arg = rcd;
- snprintf(me->name, sizeof(me->name),
+ snprintf(name, sizeof(name),
DRIVER_NAME "_%d kctxt%d",
dd->unit, idx);
err_info = "receive context";
@@ -13150,18 +13166,10 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
if (!arg)
continue;
/* make sure the name is terminated */
- me->name[sizeof(me->name) - 1] = 0;
+ name[sizeof(name) - 1] = 0;
me->irq = pci_irq_vector(dd->pcidev, i);
- /*
- * On err return me->irq. Don't need to clear this
- * because 'arg' has not been set, and cleanup will
- * do the right thing.
- */
- if (me->irq < 0)
- return me->irq;
-
- ret = request_threaded_irq(me->irq, handler, thread, 0,
- me->name, arg);
+ ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
+ name);
if (ret) {
dd_dev_err(dd,
"unable to allocate %s interrupt, irq %d, index %d, err %d\n",
@@ -13169,7 +13177,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
return ret;
}
/*
- * assign arg after request_irq call, so it will be
+ * assign arg after pci_request_irq call, so it will be
* cleaned up
*/
me->arg = arg;
@@ -13187,7 +13195,7 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
int i;
if (!dd->num_msix_entries) {
- synchronize_irq(dd->pcidev->irq);
+ synchronize_irq(pci_irq_vector(dd->pcidev, 0));
return;
}
@@ -13208,7 +13216,7 @@ void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
return;
hfi1_put_irq_affinity(dd, me);
- free_irq(me->irq, me->arg);
+ pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
me->arg = NULL;
}
@@ -13231,28 +13239,21 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
rcd->imask = ((u64)1) <<
((IS_RCVAVAIL_START + idx) % 64);
-
- snprintf(me->name, sizeof(me->name),
- DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
- me->name[sizeof(me->name) - 1] = 0;
me->type = IRQ_RCVCTXT;
me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
- if (me->irq < 0) {
- dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
- idx, me->irq);
- return;
- }
remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
- ret = request_threaded_irq(me->irq, receive_context_interrupt,
- receive_context_thread, 0, me->name, arg);
+ ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
+ receive_context_interrupt,
+ receive_context_thread, arg,
+ DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
if (ret) {
dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
me->irq, idx, ret);
return;
}
/*
- * assign arg after request_irq call, so it will be
+ * assign arg after pci_request_irq call, so it will be
* cleaned up
*/
me->arg = arg;
@@ -13261,7 +13262,7 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
if (ret) {
dd_dev_err(dd,
"unable to pin IRQ %d\n", ret);
- free_irq(me->irq, me->arg);
+ pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
}
}
@@ -13294,8 +13295,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
* slow source, SDMACleanupDone)
* N interrupts - one per used SDMA engine
* M interrupt - one per kernel receive context
+ * V interrupt - one for each VNIC context
*/
- total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
+ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
/* ask for MSI-X interrupts */
request = request_msix(dd, total);
@@ -13356,15 +13358,18 @@ fail:
* in array of contexts
* freectxts - number of free user contexts
* num_send_contexts - number of PIO send contexts being used
+ * num_vnic_contexts - number of contexts reserved for VNIC
*/
static int set_up_context_variables(struct hfi1_devdata *dd)
{
unsigned long num_kernel_contexts;
+ u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
int total_contexts;
int ret;
unsigned ngroups;
int qos_rmt_count;
int user_rmt_reduced;
+ u32 n_usr_ctxts;
/*
* Kernel receive contexts:
@@ -13393,59 +13398,63 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
num_kernel_contexts);
num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
}
+
+ /* Accommodate VNIC contexts if possible */
+ if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
+ dd_dev_err(dd, "No receive contexts available for VNIC\n");
+ num_vnic_contexts = 0;
+ }
+ total_contexts = num_kernel_contexts + num_vnic_contexts;
+
/*
* User contexts:
* - default to 1 user context per real (non-HT) CPU core if
* num_user_contexts is negative
*/
if (num_user_contexts < 0)
- num_user_contexts =
- cpumask_weight(&node_affinity.real_cpu_mask);
-
- total_contexts = num_kernel_contexts + num_user_contexts;
-
+ n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
+ else
+ n_usr_ctxts = num_user_contexts;
/*
* Adjust the counts given a global max.
*/
- if (total_contexts > dd->chip_rcv_contexts) {
+ if (total_contexts + n_usr_ctxts > dd->chip_rcv_contexts) {
dd_dev_err(dd,
- "Reducing # user receive contexts to: %d, from %d\n",
- (int)(dd->chip_rcv_contexts - num_kernel_contexts),
- (int)num_user_contexts);
- num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
+ "Reducing # user receive contexts to: %d, from %u\n",
+ (int)(dd->chip_rcv_contexts - total_contexts),
+ n_usr_ctxts);
/* recalculate */
- total_contexts = num_kernel_contexts + num_user_contexts;
+ n_usr_ctxts = dd->chip_rcv_contexts - total_contexts;
}
/* each user context requires an entry in the RMT */
qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
- if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
+ if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
dd_dev_err(dd,
- "RMT size is reducing the number of user receive contexts from %d to %d\n",
- (int)num_user_contexts,
+ "RMT size is reducing the number of user receive contexts from %u to %d\n",
+ n_usr_ctxts,
user_rmt_reduced);
/* recalculate */
- num_user_contexts = user_rmt_reduced;
- total_contexts = num_kernel_contexts + num_user_contexts;
+ n_usr_ctxts = user_rmt_reduced;
}
- /* Accommodate VNIC contexts */
- if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
- total_contexts += HFI1_NUM_VNIC_CTXT;
+ total_contexts += n_usr_ctxts;
/* the first N are kernel contexts, the rest are user/vnic contexts */
dd->num_rcv_contexts = total_contexts;
dd->n_krcv_queues = num_kernel_contexts;
dd->first_dyn_alloc_ctxt = num_kernel_contexts;
- dd->num_user_contexts = num_user_contexts;
- dd->freectxts = num_user_contexts;
+ dd->num_vnic_contexts = num_vnic_contexts;
+ dd->num_user_contexts = n_usr_ctxts;
+ dd->freectxts = n_usr_ctxts;
dd_dev_info(dd,
- "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
+ "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
(int)dd->chip_rcv_contexts,
(int)dd->num_rcv_contexts,
(int)dd->n_krcv_queues,
- (int)dd->num_rcv_contexts - dd->n_krcv_queues);
+ dd->num_vnic_contexts,
+ dd->num_user_contexts);
/*
* Receive array allocation:
@@ -14962,8 +14971,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
init_vl_arb_caches(ppd);
}
- dd->link_default = HLS_DN_POLL;
-
/*
* Do remaining PCIe setup and save PCIe values in dd.
* Any error printing is already done by the init code.
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 50b8645d0b87..133e313feca4 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -560,7 +560,7 @@ enum {
/* timeouts */
#define LINK_RESTART_DELAY 1000 /* link restart delay, in ms */
#define TIMEOUT_8051_START 5000 /* 8051 start timeout, in ms */
-#define DC8051_COMMAND_TIMEOUT 20000 /* DC8051 command timeout, in ms */
+#define DC8051_COMMAND_TIMEOUT 1000 /* DC8051 command timeout, in ms */
#define FREEZE_STATUS_TIMEOUT 20 /* wait for freeze indicators, in ms */
#define VL_STATUS_CLEAR_TIMEOUT 5000 /* per-VL status clear, in ms */
#define CCE_STATUS_TIMEOUT 10 /* time to clear CCE Status, in ms */
@@ -583,6 +583,9 @@ enum {
#define LOOPBACK_LCB 2
#define LOOPBACK_CABLE 3 /* external cable */
+/* set up serdes bit in MISC_CONFIG_BITS */
+#define LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT 0
+
/* read and write hardware registers */
u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value);
@@ -710,6 +713,7 @@ void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
u8 *ver_patch);
int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
void read_guid(struct hfi1_devdata *dd);
+int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd);
int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
u8 neigh_reason, u8 rem_reason);
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index 3e27794ec750..7108d4d92259 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -328,6 +328,7 @@ struct diag_pkt {
#define SC15_PACKET 0xF
#define SIZE_OF_CRC 1
#define SIZE_OF_LT 1
+#define MAX_16B_PADDING 12 /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
#define LIM_MGMT_P_KEY 0x7FFF
#define FULL_MGMT_P_KEY 0xFFFF
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 36ae1fd86502..2e6e0c516041 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -71,13 +71,13 @@ static ssize_t hfi1_seq_read(
loff_t *ppos)
{
struct dentry *d = file->f_path.dentry;
- int srcu_idx;
ssize_t r;
- r = debugfs_use_file_start(d, &srcu_idx);
- if (likely(!r))
- r = seq_read(file, buf, size, ppos);
- debugfs_use_file_finish(srcu_idx);
+ r = debugfs_file_get(d);
+ if (unlikely(r))
+ return r;
+ r = seq_read(file, buf, size, ppos);
+ debugfs_file_put(d);
return r;
}
@@ -87,13 +87,13 @@ static loff_t hfi1_seq_lseek(
int whence)
{
struct dentry *d = file->f_path.dentry;
- int srcu_idx;
loff_t r;
- r = debugfs_use_file_start(d, &srcu_idx);
- if (likely(!r))
- r = seq_lseek(file, offset, whence);
- debugfs_use_file_finish(srcu_idx);
+ r = debugfs_file_get(d);
+ if (unlikely(r))
+ return r;
+ r = seq_lseek(file, offset, whence);
+ debugfs_file_put(d);
return r;
}
@@ -165,6 +165,17 @@ static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
{
}
+static int opcode_stats_show(struct seq_file *s, u8 i, u64 packets, u64 bytes)
+{
+ if (!packets && !bytes)
+ return SEQ_SKIP;
+ seq_printf(s, "%02x %llu/%llu\n", i,
+ (unsigned long long)packets,
+ (unsigned long long)bytes);
+
+ return 0;
+}
+
static int _opcode_stats_seq_show(struct seq_file *s, void *v)
{
loff_t *spos = v;
@@ -182,19 +193,49 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v)
}
hfi1_rcd_put(rcd);
}
- if (!n_packets && !n_bytes)
- return SEQ_SKIP;
- seq_printf(s, "%02llx %llu/%llu\n", i,
- (unsigned long long)n_packets,
- (unsigned long long)n_bytes);
-
- return 0;
+ return opcode_stats_show(s, i, n_packets, n_bytes);
}
DEBUGFS_SEQ_FILE_OPS(opcode_stats);
DEBUGFS_SEQ_FILE_OPEN(opcode_stats)
DEBUGFS_FILE_OPS(opcode_stats);
+static void *_tx_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return _opcode_stats_seq_start(s, pos);
+}
+
+static void *_tx_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ return _opcode_stats_seq_next(s, v, pos);
+}
+
+static void _tx_opcode_stats_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _tx_opcode_stats_seq_show(struct seq_file *s, void *v)
+{
+ loff_t *spos = v;
+ loff_t i = *spos;
+ int j;
+ u64 n_packets = 0, n_bytes = 0;
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ for_each_possible_cpu(j) {
+ struct hfi1_opcode_stats_perctx *s =
+ per_cpu_ptr(dd->tx_opstats, j);
+ n_packets += s->stats[i].n_packets;
+ n_bytes += s->stats[i].n_bytes;
+ }
+ return opcode_stats_show(s, i, n_packets, n_bytes);
+}
+
+DEBUGFS_SEQ_FILE_OPS(tx_opcode_stats);
+DEBUGFS_SEQ_FILE_OPEN(tx_opcode_stats)
+DEBUGFS_FILE_OPS(tx_opcode_stats);
+
static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
{
struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
@@ -243,7 +284,7 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
spos = v;
i = *spos;
- rcd = hfi1_rcd_get_by_index(dd, i);
+ rcd = hfi1_rcd_get_by_index_safe(dd, i);
if (!rcd)
return SEQ_SKIP;
@@ -402,7 +443,7 @@ static int _rcds_seq_show(struct seq_file *s, void *v)
loff_t *spos = v;
loff_t i = *spos;
- rcd = hfi1_rcd_get_by_index(dd, i);
+ rcd = hfi1_rcd_get_by_index_safe(dd, i);
if (rcd)
seqfile_dump_rcd(s, rcd);
hfi1_rcd_put(rcd);
@@ -1363,6 +1404,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
return;
}
DEBUGFS_SEQ_FILE_CREATE(opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(tx_opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(ctx_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 7372cc00cb2d..4f65ac671044 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -433,6 +433,12 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
packet->numpkt = 0;
}
+/* We support only two types - 9B and 16B for now */
+static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &return_cnp,
+ [HFI1_PKT_TYPE_16B] = &return_cnp_16B
+};
+
void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp)
{
@@ -866,7 +872,7 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
* interrupt handler for all statically allocated kernel contexts.
*/
if (ctxt >= dd->first_dyn_alloc_ctxt) {
- rcd = hfi1_rcd_get_by_index(dd, ctxt);
+ rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
if (rcd) {
rcd->do_interrupt =
&handle_receive_interrupt_nodma_rtail;
@@ -895,7 +901,7 @@ static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
* interrupt handler for all statically allocated kernel contexts.
*/
if (ctxt >= dd->first_dyn_alloc_ctxt) {
- rcd = hfi1_rcd_get_by_index(dd, ctxt);
+ rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
if (rcd) {
rcd->do_interrupt =
&handle_receive_interrupt_dma_rtail;
@@ -923,10 +929,9 @@ void set_all_slowpath(struct hfi1_devdata *dd)
rcd = hfi1_rcd_get_by_index(dd, i);
if (!rcd)
continue;
- if ((i < dd->first_dyn_alloc_ctxt) ||
- (rcd->sc && (rcd->sc->type == SC_KERNEL))) {
+ if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
rcd->do_interrupt = &handle_receive_interrupt;
- }
+
hfi1_rcd_put(rcd);
}
}
@@ -1252,9 +1257,9 @@ void shutdown_led_override(struct hfi1_pportdata *ppd)
write_csr(dd, DCC_CFG_LED_CNTRL, 0);
}
-static void run_led_override(unsigned long opaque)
+static void run_led_override(struct timer_list *t)
{
- struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
+ struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
struct hfi1_devdata *dd = ppd->dd;
unsigned long timeout;
int phase_idx;
@@ -1298,8 +1303,7 @@ void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
* timeout so the handler will be called soon to look at our request.
*/
if (!timer_pending(&ppd->led_override_timer)) {
- setup_timer(&ppd->led_override_timer, run_led_override,
- (unsigned long)ppd);
+ timer_setup(&ppd->led_override_timer, run_led_override, 0);
ppd->led_override_timer.expires = jiffies + 1;
add_timer(&ppd->led_override_timer);
atomic_set(&ppd->led_override_timer_active, 1);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index d9a1e9893136..7750a9c38b06 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -78,16 +78,20 @@ static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
static u64 kvirt_to_phys(void *addr);
-static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo);
+static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
static void init_subctxts(struct hfi1_ctxtdata *uctxt,
const struct hfi1_user_info *uinfo);
static int init_user_ctxt(struct hfi1_filedata *fd,
struct hfi1_ctxtdata *uctxt);
static void user_init(struct hfi1_ctxtdata *uctxt);
-static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
- __u32 len);
-static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
- __u32 len);
+static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
+static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
+static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len);
+static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len);
+static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len);
static int setup_base_ctxt(struct hfi1_filedata *fd,
struct hfi1_ctxtdata *uctxt);
static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
@@ -101,10 +105,11 @@ static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
- unsigned long events);
-static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
+ unsigned long arg);
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
+static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
- int start_stop);
+ unsigned long arg);
static int vma_fault(struct vm_fault *vmf);
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg);
@@ -221,13 +226,8 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_user_info uinfo;
- struct hfi1_tid_info tinfo;
int ret = 0;
- unsigned long addr;
int uval = 0;
- unsigned long ul_uval = 0;
- u16 uval16 = 0;
hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
@@ -237,171 +237,55 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
switch (cmd) {
case HFI1_IOCTL_ASSIGN_CTXT:
- if (uctxt)
- return -EINVAL;
-
- if (copy_from_user(&uinfo,
- (struct hfi1_user_info __user *)arg,
- sizeof(uinfo)))
- return -EFAULT;
-
- ret = assign_ctxt(fd, &uinfo);
+ ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
break;
+
case HFI1_IOCTL_CTXT_INFO:
- ret = get_ctxt_info(fd, (void __user *)(unsigned long)arg,
- sizeof(struct hfi1_ctxt_info));
+ ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
break;
+
case HFI1_IOCTL_USER_INFO:
- ret = get_base_info(fd, (void __user *)(unsigned long)arg,
- sizeof(struct hfi1_base_info));
+ ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
break;
+
case HFI1_IOCTL_CREDIT_UPD:
if (uctxt)
sc_return_credits(uctxt->sc);
break;
case HFI1_IOCTL_TID_UPDATE:
- if (copy_from_user(&tinfo,
- (struct hfi11_tid_info __user *)arg,
- sizeof(tinfo)))
- return -EFAULT;
-
- ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
- if (!ret) {
- /*
- * Copy the number of tidlist entries we used
- * and the length of the buffer we registered.
- */
- addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
- if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt)))
- return -EFAULT;
-
- addr = arg + offsetof(struct hfi1_tid_info, length);
- if (copy_to_user((void __user *)addr, &tinfo.length,
- sizeof(tinfo.length)))
- ret = -EFAULT;
- }
+ ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_TID_FREE:
- if (copy_from_user(&tinfo,
- (struct hfi11_tid_info __user *)arg,
- sizeof(tinfo)))
- return -EFAULT;
-
- ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
- if (ret)
- break;
- addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
- if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt)))
- ret = -EFAULT;
+ ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_TID_INVAL_READ:
- if (copy_from_user(&tinfo,
- (struct hfi11_tid_info __user *)arg,
- sizeof(tinfo)))
- return -EFAULT;
-
- ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
- if (ret)
- break;
- addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
- if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt)))
- ret = -EFAULT;
+ ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd));
break;
case HFI1_IOCTL_RECV_CTRL:
- ret = get_user(uval, (int __user *)arg);
- if (ret != 0)
- return -EFAULT;
- ret = manage_rcvq(uctxt, fd->subctxt, uval);
+ ret = manage_rcvq(uctxt, fd->subctxt, arg);
break;
case HFI1_IOCTL_POLL_TYPE:
- ret = get_user(uval, (int __user *)arg);
- if (ret != 0)
+ if (get_user(uval, (int __user *)arg))
return -EFAULT;
uctxt->poll_type = (typeof(uctxt->poll_type))uval;
break;
case HFI1_IOCTL_ACK_EVENT:
- ret = get_user(ul_uval, (unsigned long __user *)arg);
- if (ret != 0)
- return -EFAULT;
- ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
+ ret = user_event_ack(uctxt, fd->subctxt, arg);
break;
case HFI1_IOCTL_SET_PKEY:
- ret = get_user(uval16, (u16 __user *)arg);
- if (ret != 0)
- return -EFAULT;
- if (HFI1_CAP_IS_USET(PKEY_CHECK))
- ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
- else
- return -EPERM;
+ ret = set_ctxt_pkey(uctxt, arg);
break;
- case HFI1_IOCTL_CTXT_RESET: {
- struct send_context *sc;
- struct hfi1_devdata *dd;
-
- if (!uctxt || !uctxt->dd || !uctxt->sc)
- return -EINVAL;
-
- /*
- * There is no protection here. User level has to
- * guarantee that no one will be writing to the send
- * context while it is being re-initialized.
- * If user level breaks that guarantee, it will break
- * it's own context and no one else's.
- */
- dd = uctxt->dd;
- sc = uctxt->sc;
- /*
- * Wait until the interrupt handler has marked the
- * context as halted or frozen. Report error if we time
- * out.
- */
- wait_event_interruptible_timeout(
- sc->halt_wait, (sc->flags & SCF_HALTED),
- msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (!(sc->flags & SCF_HALTED))
- return -ENOLCK;
-
- /*
- * If the send context was halted due to a Freeze,
- * wait until the device has been "unfrozen" before
- * resetting the context.
- */
- if (sc->flags & SCF_FROZEN) {
- wait_event_interruptible_timeout(
- dd->event_queue,
- !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
- msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (dd->flags & HFI1_FROZEN)
- return -ENOLCK;
-
- if (dd->flags & HFI1_FORCED_FREEZE)
- /*
- * Don't allow context reset if we are into
- * forced freeze
- */
- return -ENODEV;
-
- sc_disable(sc);
- ret = sc_enable(sc);
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
- } else {
- ret = sc_restart(sc);
- }
- if (!ret)
- sc_return_credits(sc);
+ case HFI1_IOCTL_CTXT_RESET:
+ ret = ctxt_reset(uctxt);
break;
- }
case HFI1_IOCTL_GET_VERS:
uval = HFI1_USER_SWVERSION;
@@ -595,9 +479,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
* Use the page where this context's flags are. User level
* knows where it's own bitmap is within the page.
*/
- memaddr = (unsigned long)(dd->events +
- ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
+ memaddr = (unsigned long)
+ (dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
memlen = PAGE_SIZE;
/*
* v3.7 removes VM_RESERVED but the effect is kept by
@@ -779,8 +662,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
* Clear any left over, unhandled events so the next process that
* gets this context doesn't get confused.
*/
- ev = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
+ ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
*ev = 0;
spin_lock_irqsave(&dd->uctxt_lock, flags);
@@ -891,21 +773,29 @@ static int complete_subctxt(struct hfi1_filedata *fd)
return ret;
}
-static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
+static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
{
int ret;
- unsigned int swmajor, swminor;
+ unsigned int swmajor;
struct hfi1_ctxtdata *uctxt = NULL;
+ struct hfi1_user_info uinfo;
+
+ if (fd->uctxt)
+ return -EINVAL;
+
+ if (sizeof(uinfo) != len)
+ return -EINVAL;
- swmajor = uinfo->userversion >> 16;
+ if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
+ return -EFAULT;
+
+ swmajor = uinfo.userversion >> 16;
if (swmajor != HFI1_USER_SWMAJOR)
return -ENODEV;
- if (uinfo->subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
+ if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
return -EINVAL;
- swminor = uinfo->userversion & 0xffff;
-
/*
* Acquire the mutex to protect against multiple creations of what
* could be a shared base context.
@@ -915,14 +805,14 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
* Get a sub context if available (fd->uctxt will be set).
* ret < 0 error, 0 no context, 1 sub-context found
*/
- ret = find_sub_ctxt(fd, uinfo);
+ ret = find_sub_ctxt(fd, &uinfo);
/*
* Allocate a base context if context sharing is not required or a
* sub context wasn't found.
*/
if (!ret)
- ret = allocate_ctxt(fd, fd->dd, uinfo, &uctxt);
+ ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
mutex_unlock(&hfi1_mutex);
@@ -1230,12 +1120,13 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
}
-static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
- __u32 len)
+static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
{
struct hfi1_ctxt_info cinfo;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- int ret = 0;
+
+ if (sizeof(cinfo) != len)
+ return -EINVAL;
memset(&cinfo, 0, sizeof(cinfo));
cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
@@ -1265,10 +1156,10 @@ static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
- if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
- ret = -EFAULT;
+ if (copy_to_user((void __user *)arg, &cinfo, len))
+ return -EFAULT;
- return ret;
+ return 0;
}
static int init_user_ctxt(struct hfi1_filedata *fd,
@@ -1344,18 +1235,18 @@ done:
return ret;
}
-static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
- __u32 len)
+static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
{
struct hfi1_base_info binfo;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- ssize_t sz;
unsigned offset;
- int ret = 0;
trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
+ if (sizeof(binfo) != len)
+ return -EINVAL;
+
memset(&binfo, 0, sizeof(binfo));
binfo.hw_version = dd->revision;
binfo.sw_version = HFI1_KERN_SWVERSION;
@@ -1385,39 +1276,152 @@ static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
fd->subctxt,
uctxt->egrbufs.rcvtids[0].dma);
binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
- fd->subctxt, 0);
+ fd->subctxt, 0);
/*
* user regs are at
* (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
*/
binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
- fd->subctxt, 0);
- offset = offset_in_page((((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
- sizeof(*dd->events));
+ fd->subctxt, 0);
+ offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
+ sizeof(*dd->events));
binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
- fd->subctxt,
- offset);
+ fd->subctxt,
+ offset);
binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
- fd->subctxt,
- dd->status);
+ fd->subctxt,
+ dd->status);
if (HFI1_CAP_IS_USET(DMA_RTAIL))
binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
- fd->subctxt, 0);
+ fd->subctxt, 0);
if (uctxt->subctxt_cnt) {
binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
- uctxt->ctxt,
- fd->subctxt, 0);
- binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
uctxt->ctxt,
fd->subctxt, 0);
+ binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
+ uctxt->ctxt,
+ fd->subctxt, 0);
binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
- uctxt->ctxt,
- fd->subctxt, 0);
+ uctxt->ctxt,
+ fd->subctxt, 0);
}
- sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
- if (copy_to_user(ubase, &binfo, sz))
+
+ if (copy_to_user((void __user *)arg, &binfo, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * user_exp_rcv_setup - Set up the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * Wrapper to validate ioctl information before doing _rcv_setup.
+ *
+ */
+static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len)
+{
+ int ret;
+ unsigned long addr;
+ struct hfi1_tid_info tinfo;
+
+ if (sizeof(tinfo) != len)
+ return -EINVAL;
+
+ if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
+ if (!ret) {
+ /*
+ * Copy the number of tidlist entries we used
+ * and the length of the buffer we registered.
+ */
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
+ return -EFAULT;
+
+ addr = arg + offsetof(struct hfi1_tid_info, length);
+ if (copy_to_user((void __user *)addr, &tinfo.length,
+ sizeof(tinfo.length)))
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+/**
+ * user_exp_rcv_clear - Clear the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * The hfi1_user_exp_rcv_clear() can be called from the error path. Because
+ * of this, we need to use this wrapper to copy the user space information
+ * before doing the clear.
+ */
+static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len)
+{
+ int ret;
+ unsigned long addr;
+ struct hfi1_tid_info tinfo;
+
+ if (sizeof(tinfo) != len)
+ return -EINVAL;
+
+ if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
+ if (!ret) {
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+/**
+ * user_exp_rcv_invalid - Invalidate the given tid rcv list
+ * @fd: file data of the current driver instance
+ * @arg: ioctl argumnent for user space information
+ * @len: length of data structure associated with ioctl command
+ *
+ * Wrapper to validate ioctl information before doing _rcv_invalid.
+ *
+ */
+static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
+ u32 len)
+{
+ int ret;
+ unsigned long addr;
+ struct hfi1_tid_info tinfo;
+
+ if (sizeof(tinfo) != len)
+ return -EINVAL;
+
+ if (!fd->invalid_tids)
+ return -EINVAL;
+
+ if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
+ if (ret)
+ return ret;
+
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
ret = -EFAULT;
+
return ret;
}
@@ -1485,14 +1489,13 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
ctxt++) {
uctxt = hfi1_rcd_get_by_index(dd, ctxt);
if (uctxt) {
- unsigned long *evs = dd->events +
- (uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS;
+ unsigned long *evs;
int i;
/*
* subctxt_cnt is 0 if not shared, so do base
* separately, first, then remaining subctxt, if any
*/
+ evs = dd->events + uctxt_offset(uctxt);
set_bit(evtbit, evs);
for (i = 1; i < uctxt->subctxt_cnt; i++)
set_bit(evtbit, evs + i);
@@ -1514,13 +1517,18 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
* re-init the software copy of the head register
*/
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
- int start_stop)
+ unsigned long arg)
{
struct hfi1_devdata *dd = uctxt->dd;
unsigned int rcvctrl_op;
+ int start_stop;
if (subctxt)
- goto bail;
+ return 0;
+
+ if (get_user(start_stop, (int __user *)arg))
+ return -EFAULT;
+
/* atomically clear receive enable ctxt. */
if (start_stop) {
/*
@@ -1539,7 +1547,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
}
hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
/* always; new head should be equal to new tail; see above */
-bail:
+
return 0;
}
@@ -1549,17 +1557,20 @@ bail:
* set, if desired, and checks again in future.
*/
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
- unsigned long events)
+ unsigned long arg)
{
int i;
struct hfi1_devdata *dd = uctxt->dd;
unsigned long *evs;
+ unsigned long events;
if (!dd->events)
return 0;
- evs = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + subctxt;
+ if (get_user(events, (unsigned long __user *)arg))
+ return -EFAULT;
+
+ evs = dd->events + uctxt_offset(uctxt) + subctxt;
for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
if (!test_bit(i, &events))
@@ -1569,26 +1580,89 @@ static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
return 0;
}
-static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
+static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg)
{
- int ret = -ENOENT, i, intable = 0;
+ int i;
struct hfi1_pportdata *ppd = uctxt->ppd;
struct hfi1_devdata *dd = uctxt->dd;
+ u16 pkey;
- if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
- ret = -EINVAL;
- goto done;
- }
+ if (!HFI1_CAP_IS_USET(PKEY_CHECK))
+ return -EPERM;
+
+ if (get_user(pkey, (u16 __user *)arg))
+ return -EFAULT;
+
+ if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
+ return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
- if (pkey == ppd->pkeys[i]) {
- intable = 1;
- break;
- }
+ if (pkey == ppd->pkeys[i])
+ return hfi1_set_ctxt_pkey(dd, uctxt, pkey);
+
+ return -ENOENT;
+}
+
+/**
+ * ctxt_reset - Reset the user context
+ * @uctxt: valid user context
+ */
+static int ctxt_reset(struct hfi1_ctxtdata *uctxt)
+{
+ struct send_context *sc;
+ struct hfi1_devdata *dd;
+ int ret = 0;
+
+ if (!uctxt || !uctxt->dd || !uctxt->sc)
+ return -EINVAL;
+
+ /*
+ * There is no protection here. User level has to guarantee that
+ * no one will be writing to the send context while it is being
+ * re-initialized. If user level breaks that guarantee, it will
+ * break it's own context and no one else's.
+ */
+ dd = uctxt->dd;
+ sc = uctxt->sc;
+
+ /*
+ * Wait until the interrupt handler has marked the context as
+ * halted or frozen. Report error if we time out.
+ */
+ wait_event_interruptible_timeout(
+ sc->halt_wait, (sc->flags & SCF_HALTED),
+ msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+ if (!(sc->flags & SCF_HALTED))
+ return -ENOLCK;
+
+ /*
+ * If the send context was halted due to a Freeze, wait until the
+ * device has been "unfrozen" before resetting the context.
+ */
+ if (sc->flags & SCF_FROZEN) {
+ wait_event_interruptible_timeout(
+ dd->event_queue,
+ !(READ_ONCE(dd->flags) & HFI1_FROZEN),
+ msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
+ if (dd->flags & HFI1_FROZEN)
+ return -ENOLCK;
+
+ if (dd->flags & HFI1_FORCED_FREEZE)
+ /*
+ * Don't allow context reset if we are into
+ * forced freeze
+ */
+ return -ENODEV;
+
+ sc_disable(sc);
+ ret = sc_enable(sc);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
+ } else {
+ ret = sc_restart(sc);
+ }
+ if (!ret)
+ sc_return_credits(sc);
- if (intable)
- ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
-done:
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 5aea8f47e670..98868df78a7e 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -70,6 +70,11 @@
#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
#define HOST_INTERFACE_VERSION 1
+MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
+MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
+MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME);
+MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME);
+
static uint fw_8051_load = 1;
static uint fw_fabric_serdes_load = 1;
static uint fw_pcie_serdes_load = 1;
@@ -113,6 +118,12 @@ struct css_header {
#define MU_SIZE 8
#define EXPONENT_SIZE 4
+/* size of platform configuration partition */
+#define MAX_PLATFORM_CONFIG_FILE_SIZE 4096
+
+/* size of file of plaform configuration encoded in format version 4 */
+#define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528
+
/* the file itself */
struct firmware_file {
struct css_header css_header;
@@ -965,6 +976,46 @@ int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
}
/*
+ * Clear all reset bits, releasing the 8051.
+ * Wait for firmware to be ready to accept host requests.
+ * Then, set host version bit.
+ *
+ * This function executes even if the 8051 is in reset mode when
+ * dd->dc_shutdown == 1.
+ *
+ * Expects dd->dc8051_lock to be held.
+ */
+int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd)
+{
+ int ret;
+
+ lockdep_assert_held(&dd->dc8051_lock);
+ /* clear all reset bits, releasing the 8051 */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+
+ /*
+ * Wait for firmware to be ready to accept host
+ * requests.
+ */
+ ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+ if (ret) {
+ dd_dev_err(dd, "8051 start timeout, current FW state 0x%x\n",
+ get_firmware_state(dd));
+ return ret;
+ }
+
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
* Load the 8051 firmware.
*/
static int load_8051_firmware(struct hfi1_devdata *dd,
@@ -1029,31 +1080,22 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
if (ret)
return ret;
- /* clear all reset bits, releasing the 8051 */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
-
/*
+ * Clear all reset bits, releasing the 8051.
* DC reset step 5. Wait for firmware to be ready to accept host
* requests.
+ * Then, set host version bit.
*/
- ret = wait_fm_ready(dd, TIMEOUT_8051_START);
- if (ret) { /* timed out */
- dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
- get_firmware_state(dd));
- return -ETIMEDOUT;
- }
+ mutex_lock(&dd->dc8051_lock);
+ ret = release_and_wait_ready_8051_firmware(dd);
+ mutex_unlock(&dd->dc8051_lock);
+ if (ret)
+ return ret;
read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
(int)ver_major, (int)ver_minor, (int)ver_patch);
dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
- ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to set host interface version, return 0x%x\n",
- ret);
- return -EIO;
- }
return 0;
}
@@ -1387,7 +1429,14 @@ int acquire_hw_mutex(struct hfi1_devdata *dd)
unsigned long timeout;
int try = 0;
u8 mask = 1 << dd->hfi1_id;
- u8 user;
+ u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+
+ if (user == mask) {
+ dd_dev_info(dd,
+ "Hardware mutex already acquired, mutex mask %u\n",
+ (u32)mask);
+ return 0;
+ }
retry:
timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
@@ -1418,7 +1467,15 @@ retry:
void release_hw_mutex(struct hfi1_devdata *dd)
{
- write_csr(dd, ASIC_CFG_MUTEX, 0);
+ u8 mask = 1 << dd->hfi1_id;
+ u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
+
+ if (user != mask)
+ dd_dev_warn(dd,
+ "Unable to release hardware mutex, mutex mask %u, my mask %u\n",
+ (u32)user, (u32)mask);
+ else
+ write_csr(dd, ASIC_CFG_MUTEX, 0);
}
/* return the given resource bit(s) as a mask for the given HFI */
@@ -1733,7 +1790,7 @@ static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
ver_start /= 8;
meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
- if (meta_ver < 5) {
+ if (meta_ver < 4) {
dd_dev_info(
dd, "%s:Please update platform config\n", __func__);
return -EINVAL;
@@ -1774,7 +1831,20 @@ int parse_platform_config(struct hfi1_devdata *dd)
/* Field is file size in DWORDs */
file_length = (*ptr) * 4;
- ptr++;
+
+ /*
+ * Length can't be larger than partition size. Assume platform
+ * config format version 4 is being used. Interpret the file size
+ * field as header instead by not moving the pointer.
+ */
+ if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) {
+ dd_dev_info(dd,
+ "%s:File length out of bounds, using alternative format\n",
+ __func__);
+ file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE;
+ } else {
+ ptr++;
+ }
if (file_length > dd->platform_config.size) {
dd_dev_info(dd, "%s:File claims to be larger than read size\n",
@@ -1789,7 +1859,8 @@ int parse_platform_config(struct hfi1_devdata *dd)
/*
* In both cases where we proceed, using the self-reported file length
- * is the safer option
+ * is the safer option. In case of old format a predefined value is
+ * being used.
*/
while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
header1 = *ptr;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 3ac9c307a285..4a9b4d7efe63 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -95,6 +95,9 @@
#define DROP_PACKET_OFF 0
#define DROP_PACKET_ON 1
+#define NEIGHBOR_TYPE_HFI 0
+#define NEIGHBOR_TYPE_SWITCH 1
+
extern unsigned long hfi1_cap_mask;
#define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
#define HFI1_CAP_UGET_MASK(mask, cap) \
@@ -164,9 +167,7 @@ extern const struct pci_error_handlers hfi1_pci_err_handler;
* Below contains all data related to a single context (formerly called port).
*/
-#ifdef CONFIG_DEBUG_FS
struct hfi1_opcode_stats_perctx;
-#endif
struct ctxt_eager_bufs {
ssize_t size; /* total size of eager buffers */
@@ -283,7 +284,7 @@ struct hfi1_ctxtdata {
u64 imask; /* clear interrupt mask */
int ireg; /* clear interrupt register */
unsigned numa_id; /* numa node of this context */
- /* verbs stats per CTX */
+ /* verbs rx_stats per rcd */
struct hfi1_opcode_stats_perctx *opstats;
/* Is ASPM interrupt supported for this context */
@@ -390,6 +391,7 @@ struct hfi1_packet {
/*
* OPA 16B L2/L4 Encodings
*/
+#define OPA_16B_L4_9B 0x00
#define OPA_16B_L2_TYPE 0x02
#define OPA_16B_L4_IB_LOCAL 0x09
#define OPA_16B_L4_IB_GLOBAL 0x0A
@@ -535,6 +537,8 @@ struct rvt_sge_state;
#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
#define HLS_DOWN ~(HLS_UP)
+#define HLS_DEFAULT HLS_DN_POLL
+
/* use this MTU size if none other is given */
#define HFI1_DEFAULT_ACTIVE_MTU 10240
/* use this MTU size as the default maximum */
@@ -616,7 +620,6 @@ struct hfi1_msix_entry {
enum irq_type type;
int irq;
void *arg;
- char name[MAX_NAME_SIZE];
cpumask_t mask;
struct irq_affinity_notify notify;
};
@@ -1047,6 +1050,8 @@ struct hfi1_devdata {
u64 z_send_schedule;
u64 __percpu *send_schedule;
+ /* number of reserved contexts for VNIC usage */
+ u16 num_vnic_contexts;
/* number of receive contexts in use by the driver */
u32 num_rcv_contexts;
/* number of pio send contexts in use by the driver */
@@ -1109,8 +1114,7 @@ struct hfi1_devdata {
u16 rcvegrbufsize_shift;
/* both sides of the PCIe link are gen3 capable */
u8 link_gen3_capable;
- /* default link down value (poll/sleep) */
- u8 link_default;
+ u8 dc_shutdown;
/* localbus width (1, 2,4,8,16,32) from config space */
u32 lbus_width;
/* localbus speed in MHz */
@@ -1183,7 +1187,6 @@ struct hfi1_devdata {
/* INTx information */
u32 requested_intx_irq; /* did we request one? */
- char intx_name[MAX_NAME_SIZE]; /* INTx name */
/* general interrupt: mask of handled interrupts */
u64 gi_mask[CCE_NUM_INT_CSRS];
@@ -1274,6 +1277,8 @@ struct hfi1_devdata {
/* receive context data */
struct hfi1_ctxtdata **rcd;
u64 __percpu *int_counter;
+ /* verbs tx opcode stats */
+ struct hfi1_opcode_stats_perctx __percpu *tx_opstats;
/* device (not port) flags, basically device capabilities */
u16 flags;
/* Number of physical ports available */
@@ -1295,7 +1300,6 @@ struct hfi1_devdata {
u8 oui1;
u8 oui2;
u8 oui3;
- u8 dc_shutdown;
/* Timer and counter used to detect RcvBufOvflCnt changes */
struct timer_list rcverr_timer;
@@ -1373,8 +1377,12 @@ struct hfi1_filedata {
extern struct list_head hfi1_dev_list;
extern spinlock_t hfi1_devs_lock;
struct hfi1_devdata *hfi1_lookup(int unit);
-extern u32 hfi1_cpulist_count;
-extern unsigned long *hfi1_cpulist;
+
+static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
+{
+ return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
+ HFI1_MAX_SHARED_CTXTS;
+}
int hfi1_init(struct hfi1_devdata *dd, int reinit);
int hfi1_count_active_units(void);
@@ -1396,6 +1404,8 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
+ u16 ctxt);
struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
@@ -1531,11 +1541,6 @@ typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
u8 sc5, const struct ib_grh *old_grh);
-/* We support only two types - 9B and 16B for now */
-static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
- [HFI1_PKT_TYPE_9B] = &return_cnp,
- [HFI1_PKT_TYPE_16B] = &return_cnp_16B
-};
#define PKEY_CHECK_INVALID -1
int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
u8 sc5, int8_t s_pkey_index);
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index fba77001c3a7..8e3b3e7d829a 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -123,8 +123,6 @@ MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user
static inline u64 encode_rcv_header_entry_size(u16 size);
static struct idr hfi1_unit_table;
-u32 hfi1_cpulist_count;
-unsigned long *hfi1_cpulist;
static int hfi1_create_kctxt(struct hfi1_devdata *dd,
struct hfi1_pportdata *ppd)
@@ -286,6 +284,27 @@ static int allocate_rcd_index(struct hfi1_devdata *dd,
}
/**
+ * hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
+ * array
+ * @dd: pointer to a valid devdata structure
+ * @ctxt: the index of an possilbe rcd
+ *
+ * This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
+ * ctxt index is valid.
+ *
+ * The caller is responsible for making the _put().
+ *
+ */
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
+ u16 ctxt)
+{
+ if (ctxt < dd->num_rcv_contexts)
+ return hfi1_rcd_get_by_index(dd, ctxt);
+
+ return NULL;
+}
+
+/**
* hfi1_rcd_get_by_index
* @dd: pointer to a valid devdata structure
* @ctxt: the index of an possilbe rcd
@@ -1006,7 +1025,7 @@ static void stop_timers(struct hfi1_devdata *dd)
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
- if (ppd->led_override_timer.data) {
+ if (ppd->led_override_timer.function) {
del_timer_sync(&ppd->led_override_timer);
atomic_set(&ppd->led_override_timer_active, 0);
}
@@ -1198,6 +1217,7 @@ static void __hfi1_free_devdata(struct kobject *kobj)
free_percpu(dd->int_counter);
free_percpu(dd->rcv_limit);
free_percpu(dd->send_schedule);
+ free_percpu(dd->tx_opstats);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
@@ -1272,39 +1292,27 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {
ret = -ENOMEM;
- hfi1_early_err(&pdev->dev,
- "Could not allocate per-cpu int_counter\n");
goto bail;
}
dd->rcv_limit = alloc_percpu(u64);
if (!dd->rcv_limit) {
ret = -ENOMEM;
- hfi1_early_err(&pdev->dev,
- "Could not allocate per-cpu rcv_limit\n");
goto bail;
}
dd->send_schedule = alloc_percpu(u64);
if (!dd->send_schedule) {
ret = -ENOMEM;
- hfi1_early_err(&pdev->dev,
- "Could not allocate per-cpu int_counter\n");
goto bail;
}
- if (!hfi1_cpulist_count) {
- u32 count = num_online_cpus();
-
- hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
- GFP_KERNEL);
- if (hfi1_cpulist)
- hfi1_cpulist_count = count;
- else
- hfi1_early_err(
- &pdev->dev,
- "Could not alloc cpulist info, cpu affinity might be wrong\n");
+ dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
+ if (!dd->tx_opstats) {
+ ret = -ENOMEM;
+ goto bail;
}
+
kobject_init(&dd->kobj, &hfi1_devdata_type);
return dd;
@@ -1477,8 +1485,6 @@ static void __exit hfi1_mod_cleanup(void)
node_affinity_destroy();
hfi1_wss_exit();
hfi1_dbg_exit();
- hfi1_cpulist_count = 0;
- kfree(hfi1_cpulist);
idr_destroy(&hfi1_unit_table);
dispose_firmware(); /* asymmetric with obtain_firmware() */
@@ -1801,8 +1807,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
sizeof(u32));
- if ((rcd->ctxt < dd->first_dyn_alloc_ctxt) ||
- (rcd->sc && (rcd->sc->type == SC_KERNEL)))
+ if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
gfp_flags = GFP_KERNEL;
else
gfp_flags = GFP_USER;
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 96845dfed5c5..387305b768e9 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -53,6 +53,42 @@
#include "common.h"
#include "sdma.h"
+#define LINK_UP_DELAY 500 /* in microseconds */
+
+static void set_mgmt_allowed(struct hfi1_pportdata *ppd)
+{
+ u32 frame;
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if (ppd->neighbor_type == NEIGHBOR_TYPE_HFI) {
+ ppd->mgmt_allowed = 1;
+ } else {
+ read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
+ ppd->mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT)
+ & MGMT_ALLOWED_MASK;
+ }
+}
+
+/*
+ * Our neighbor has indicated that we are allowed to act as a fabric
+ * manager, so place the full management partition key in the second
+ * (0-based) pkey array position. Note that we should already have
+ * the limited management partition key in array element 1, and also
+ * that the port is not yet up when add_full_mgmt_pkey() is invoked.
+ */
+static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
+ if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
+ dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
+ __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
+ ppd->pkeys[2] = FULL_MGMT_P_KEY;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
+}
+
/**
* format_hwmsg - format a single hwerror message
* @msg message buffer
@@ -102,9 +138,16 @@ static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
ib_dispatch_event(&event);
}
-/*
+/**
+ * handle_linkup_change - finish linkup/down state changes
+ * @dd: valid device
+ * @linkup: link state information
+ *
* Handle a linkup or link down notification.
+ * The HW needs time to finish its link up state change. Give it that chance.
+ *
* This is called outside an interrupt.
+ *
*/
void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
{
@@ -151,6 +194,18 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
ppd->neighbor_guid, ppd->neighbor_type,
ppd->neighbor_port_number);
+ /* HW needs LINK_UP_DELAY to settle, give it that chance */
+ udelay(LINK_UP_DELAY);
+
+ /*
+ * 'MgmtAllowed' information, which is exchanged during
+ * LNI, is available at this point.
+ */
+ set_mgmt_allowed(ppd);
+
+ if (ppd->mgmt_allowed)
+ add_full_mgmt_pkey(ppd);
+
/* physical link went up */
ppd->linkup = 1;
ppd->offline_disabled_reason =
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index f4c0ffc040cc..cf8dba34fe30 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -98,6 +98,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
memset(data, 0, size);
}
+static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
+{
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ if (pkey_idx < ARRAY_SIZE(ppd->pkeys))
+ return ppd->pkeys[pkey_idx];
+
+ return 0;
+}
+
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
{
struct ib_event event;
@@ -399,9 +409,9 @@ static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
ib_free_send_mad(send_buf);
}
-void hfi1_handle_trap_timer(unsigned long data)
+void hfi1_handle_trap_timer(struct timer_list *t)
{
- struct hfi1_ibport *ibp = (struct hfi1_ibport *)data;
+ struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
struct trap_node *trap = NULL;
unsigned long flags;
int i;
@@ -711,6 +721,7 @@ static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
/* Bad mkey not a violation below level 2 */
if (ibp->rvp.mkeyprot < 2)
break;
+ /* fall through */
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS:
if (ibp->rvp.mkey_violations != 0xFFFF)
@@ -1227,8 +1238,7 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
}
static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
- u32 logical_state, u32 phys_state,
- int suppress_idle_sma)
+ u32 logical_state, u32 phys_state)
{
struct hfi1_devdata *dd = ppd->dd;
u32 link_state;
@@ -1309,7 +1319,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
break;
case IB_PORT_ARMED:
ret = set_link_state(ppd, HLS_UP_ARMED);
- if ((ret == 0) && (suppress_idle_sma == 0))
+ if (!ret)
send_idle_sma(dd, SMA_IDLE_ARM);
break;
case IB_PORT_ACTIVE:
@@ -1603,8 +1613,10 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
ppd->is_sm_config_started = 1;
} else if (ls_new == IB_PORT_ARMED) {
- if (ppd->is_sm_config_started == 0)
+ if (ppd->is_sm_config_started == 0) {
invalid = 1;
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
}
}
@@ -1621,9 +1633,11 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
* is down or is being set to down.
*/
- ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
- if (ret)
- return ret;
+ if (!invalid) {
+ ret = set_port_states(ppd, smp, ls_new, ps_new);
+ if (ret)
+ return ret;
+ }
ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
max_len);
@@ -2100,17 +2114,18 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
ppd->is_sm_config_started = 1;
} else if (ls_new == IB_PORT_ARMED) {
- if (ppd->is_sm_config_started == 0)
+ if (ppd->is_sm_config_started == 0) {
invalid = 1;
+ smp->status |= IB_SMP_INVALID_FIELD;
+ }
}
}
- ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
- if (ret)
- return ret;
-
- if (invalid)
- smp->status |= IB_SMP_INVALID_FIELD;
+ if (!invalid) {
+ ret = set_port_states(ppd, smp, ls_new, ps_new);
+ if (ret)
+ return ret;
+ }
return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len,
max_len);
@@ -2888,7 +2903,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
struct _vls_dctrs *vlinfo;
size_t response_data_size;
u32 num_ports;
- u8 num_pslm;
u8 lq, num_vls;
u8 res_lli, res_ler;
u64 port_mask;
@@ -2898,7 +2912,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
int vfi;
num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
- num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
vl_select_mask = be32_to_cpu(req->vl_select_mask);
res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
@@ -3688,7 +3701,11 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
*new_cc_state = *old_cc_state;
- new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
+ if (ppd->total_cct_entry)
+ new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
+ else
+ new_cc_state->cct.ccti_limit = 0;
+
memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
@@ -3751,7 +3768,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
- s64 ts;
+ u64 ts;
int i;
if (am || smp_length_check(sizeof(*cong_log), max_len)) {
@@ -3769,7 +3786,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
ppd->threshold_cong_event_map,
sizeof(cong_log->threshold_cong_event_map));
/* keep timestamp in units of 1.024 usec */
- ts = ktime_to_ns(ktime_get()) / 1024;
+ ts = ktime_get_ns() / 1024;
cong_log->current_time_stamp = cpu_to_be32(ts);
for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
struct opa_hfi1_cong_log_event_internal *cce =
@@ -3781,7 +3798,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
* required to wrap the counter are supposed to
* be zeroed (CA10-49 IBTA, release 1.2.1, V1).
*/
- if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
+ if ((ts - cce->timestamp) / 2 > U32_MAX)
continue;
memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
memcpy(cong_log->events[i].remote_qp_number_cn_entry,
@@ -4260,6 +4277,18 @@ void clear_linkup_counters(struct hfi1_devdata *dd)
dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
}
+static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp)
+{
+ unsigned int i;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
+ if (ppd->pkeys[i] == FULL_MGMT_P_KEY)
+ return 1;
+
+ return 0;
+}
+
/*
* is_local_mad() returns 1 if 'mad' is sent from, and destined to the
* local node, 0 otherwise.
@@ -4293,7 +4322,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
const struct ib_wc *in_wc)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u16 slid = ib_lid_cpu16(in_wc->slid);
u16 pkey;
if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
@@ -4320,10 +4348,71 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
*/
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
return 0;
- ingress_pkey_table_fail(ppd, pkey, slid);
+ /*
+ * On OPA devices it is okay to lose the upper 16 bits of LID as this
+ * information is obtained elsewhere. Mask off the upper 16 bits.
+ */
+ ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
return 1;
}
+/**
+ * hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets.
+ * @ibp: IB port data
+ * @in_mad: MAD packet with header and data
+ * @in_wc: Work completion data such as source LID, port number, etc.
+ *
+ * These are all the possible logic rules for validating a pkey:
+ *
+ * a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY,
+ * and NOT self-originated packet:
+ * Drop MAD packet as it should always be part of the
+ * management partition unless it's a self-originated packet.
+ *
+ * b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table:
+ * The packet is coming from a management node and the receiving node
+ * is also a management node, so it is safe for the packet to go through.
+ *
+ * c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table:
+ * Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table.
+ * It could be an FM misconfiguration.
+ *
+ * d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table:
+ * It is safe for the packet to go through since a non-management node is
+ * talking to another non-management node.
+ *
+ * e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table:
+ * Drop the packet because a non-management node is talking to a
+ * management node, and it could be an attack.
+ *
+ * For the implementation, these rules can be simplied to only checking
+ * for (a) and (e). There's no need to check for rule (b) as
+ * the packet doesn't need to be dropped. Rule (c) is not possible in
+ * the driver as LIM_MGMT_P_KEY is always in the pkey table.
+ *
+ * Return:
+ * 0 - pkey is okay, -EINVAL it's a bad pkey
+ */
+static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
+ const struct opa_mad *in_mad,
+ const struct ib_wc *in_wc)
+{
+ u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index);
+
+ /* Rule (a) from above */
+ if (!is_local_mad(ibp, in_mad, in_wc) &&
+ pkey_value != LIM_MGMT_P_KEY &&
+ pkey_value != FULL_MGMT_P_KEY)
+ return -EINVAL;
+
+ /* Rule (e) from above */
+ if (pkey_value == LIM_MGMT_P_KEY &&
+ is_full_mgmt_pkey_in_table(ibp))
+ return -EINVAL;
+
+ return 0;
+}
+
static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
u8 port, const struct opa_mad *in_mad,
struct opa_mad *out_mad,
@@ -4663,8 +4752,11 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
out_mad, &resp_len);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf_opa(ibdev, port, in_mad, out_mad,
- &resp_len);
+ ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
+ if (ret)
+ return IB_MAD_RESULT_FAILURE;
+
+ ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len);
goto bail;
default:
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 4c1245072093..c4938f3d97c8 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -239,7 +239,7 @@ struct opa_hfi1_cong_log_event_internal {
u8 sl;
u8 svc_type;
u32 rlid;
- s64 timestamp; /* wider than 32 bits to detect 32 bit rollover */
+ u64 timestamp; /* wider than 32 bits to detect 32 bit rollover */
};
struct opa_hfi1_cong_log_event {
@@ -428,6 +428,6 @@ struct sc2vlnt {
COUNTER_MASK(1, 4))
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
-void hfi1_handle_trap_timer(unsigned long data);
+void hfi1_handle_trap_timer(struct timer_list *t);
#endif /* _HFI1_MAD_H */
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 175002c046ed..e7b3ce123da6 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -67,12 +67,9 @@ struct mmu_rb_handler {
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
-static inline void mmu_notifier_range_start(struct mmu_notifier *,
- struct mm_struct *,
- unsigned long, unsigned long);
-static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
- struct mm_struct *,
- unsigned long, unsigned long);
+static void mmu_notifier_range_start(struct mmu_notifier *,
+ struct mm_struct *,
+ unsigned long, unsigned long);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
static void do_remove(struct mmu_rb_handler *handler,
@@ -286,17 +283,10 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
handler->ops->remove(handler->ops_arg, node);
}
-static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
-{
- mmu_notifier_mem_invalidate(mn, mm, start, end);
-}
-
-static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static void mmu_notifier_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 7108a4b5e94c..4c1198bc5e70 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -703,7 +703,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
{
struct send_context_info *sci;
struct send_context *sc = NULL;
- int req_type = type;
dma_addr_t dma;
unsigned long flags;
u64 reg;
@@ -730,13 +729,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
return NULL;
}
- /*
- * VNIC contexts are dynamically allocated.
- * Hence, pick a user context for VNIC.
- */
- if (type == SC_VNIC)
- type = SC_USER;
-
spin_lock_irqsave(&dd->sc_lock, flags);
ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
if (ret) {
@@ -746,15 +738,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
return NULL;
}
- /*
- * VNIC contexts are used by kernel driver.
- * Hence, mark them as kernel contexts.
- */
- if (req_type == SC_VNIC) {
- dd->send_contexts[sw_index].type = SC_KERNEL;
- type = SC_KERNEL;
- }
-
sci = &dd->send_contexts[sw_index];
sci->sc = sc;
@@ -1423,14 +1406,14 @@ retry:
goto done;
}
/* copy from receiver cache line and recalculate */
- sc->alloc_free = ACCESS_ONCE(sc->free);
+ sc->alloc_free = READ_ONCE(sc->free);
avail =
(unsigned long)sc->credits -
(sc->fill - sc->alloc_free);
if (blocks > avail) {
/* still no room, actively update */
sc_release_update(sc);
- sc->alloc_free = ACCESS_ONCE(sc->free);
+ sc->alloc_free = READ_ONCE(sc->free);
trycount++;
goto retry;
}
@@ -1667,7 +1650,7 @@ void sc_release_update(struct send_context *sc)
/* call sent buffer callbacks */
code = -1; /* code not yet set */
- head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
+ head = READ_ONCE(sc->sr_head); /* snapshot the head */
tail = sc->sr_tail;
while (head != tail) {
pbuf = &sc->sr[tail].pbuf;
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 99ca5edb0b43..058b08f459ab 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -54,12 +54,6 @@
#define SC_USER 3 /* must be the last one: it may take all left */
#define SC_MAX 4 /* count of send context types */
-/*
- * SC_VNIC types are allocated (dynamically) from the user context pool,
- * (SC_USER) and used by kernel driver as kernel contexts (SC_KERNEL).
- */
-#define SC_VNIC SC_MAX
-
/* invalid send context index */
#define INVALID_SCI 0xff
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index e1cf0c08ca6f..fd01a760259f 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -276,7 +276,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (IS_ERR(ps->s_txreq))
goto bail_no_tx;
- ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
@@ -1966,7 +1965,7 @@ static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
cc_event->svc_type = svc_type;
cc_event->rlid = rlid;
/* keep timestamp in units of 1.024 usec */
- cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
+ cc_event->timestamp = ktime_get_ns() / 1024;
spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
}
@@ -2175,7 +2174,7 @@ send_middle:
goto no_immediate_data;
if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
goto send_last_inv;
- /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
+ /* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
@@ -2220,7 +2219,7 @@ send_last:
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
/*
* It seems that IB mandates the presence of an SL in a
* work completion only for the UD transport (see section
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index b3291f0fde9a..2c7fc6e331ea 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
@@ -560,7 +560,7 @@ do_write:
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
@@ -825,11 +825,9 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibport *ibp = ps->ibp;
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 bth1 = 0;
u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
u16 lrh0 = HFI1_LRH_BTH;
- u16 slid;
u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
extra_bytes) >> 2);
@@ -866,13 +864,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
}
hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
-
- if (!ppd->lid)
- slid = be16_to_cpu(IB_LID_PERMISSIVE);
- else
- slid = ppd->lid |
- (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
- ((1 << ppd->lmc) - 1));
hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
lrh0,
qp->s_hdrwords + nwords,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 6781bcdb10b3..31c8f89b5fc8 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -491,10 +491,10 @@ static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
}
}
-static void sdma_err_progress_check(unsigned long data)
+static void sdma_err_progress_check(struct timer_list *t)
{
unsigned index;
- struct sdma_engine *sde = (struct sdma_engine *)data;
+ struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
dd_dev_err(sde->dd, "SDE progress check event\n");
for (index = 0; index < sde->dd->num_sdma; index++) {
@@ -1392,6 +1392,13 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
return ret;
idle_cnt = ns_to_cclock(dd, idle_cnt);
+ if (idle_cnt)
+ dd->default_desc1 =
+ SDMA_DESC1_HEAD_TO_HOST_FLAG;
+ else
+ dd->default_desc1 =
+ SDMA_DESC1_INT_REQ_FLAG;
+
if (!sdma_desct_intr)
sdma_desct_intr = SDMA_DESC_INTR;
@@ -1436,13 +1443,6 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
sde->tail_csr =
get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
- if (idle_cnt)
- dd->default_desc1 =
- SDMA_DESC1_HEAD_TO_HOST_FLAG;
- else
- dd->default_desc1 =
- SDMA_DESC1_INT_REQ_FLAG;
-
tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
(unsigned long)sde);
@@ -1453,8 +1453,8 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
sde->progress_check_head = 0;
- setup_timer(&sde->err_progress_check_timer,
- sdma_err_progress_check, (unsigned long)sde);
+ timer_setup(&sde->err_progress_check_timer,
+ sdma_err_progress_check, 0);
sde->descq = dma_zalloc_coherent(
&dd->pcidev->dev,
@@ -1465,13 +1465,8 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
if (!sde->descq)
goto bail;
sde->tx_ring =
- kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
- GFP_KERNEL);
- if (!sde->tx_ring)
- sde->tx_ring =
- vzalloc(
- sizeof(struct sdma_txreq *) *
- descq_cnt);
+ kvzalloc_node(sizeof(struct sdma_txreq *) * descq_cnt,
+ GFP_KERNEL, dd->node);
if (!sde->tx_ring)
goto bail;
}
@@ -1725,7 +1720,7 @@ retry:
swhead = sde->descq_head & sde->sdma_mask;
/* this code is really bad for cache line trading */
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
cnt = sde->descq_cnt;
if (swhead < swtail)
@@ -1872,7 +1867,7 @@ retry:
if ((status & sde->idle_mask) && !idle_check_done) {
u16 swtail;
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
if (swtail != hwhead) {
hwhead = (u16)read_sde_csr(sde, SD(HEAD));
idle_check_done = 1;
@@ -2144,7 +2139,6 @@ void sdma_dumpstate(struct sdma_engine *sde)
static void dump_sdma_state(struct sdma_engine *sde)
{
- struct hw_sdma_desc *descq;
struct hw_sdma_desc *descqp;
u64 desc[2];
u64 addr;
@@ -2155,7 +2149,6 @@ static void dump_sdma_state(struct sdma_engine *sde)
head = sde->descq_head & sde->sdma_mask;
tail = sde->descq_tail & sde->sdma_mask;
cnt = sdma_descq_freecnt(sde);
- descq = sde->descq;
dd_dev_err(sde->dd,
"SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
@@ -2222,7 +2215,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
u16 len;
head = sde->descq_head & sde->sdma_mask;
- tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
seq_printf(s, SDE_FMT, sde->this_idx,
sde->cpu,
sdma_state_name(sde->state.current_state),
@@ -2593,7 +2586,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
* 7220, e.g.
*/
ss->go_s99_running = 1;
- /* fall through and start dma engine */
+ /* fall through -- and start dma engine */
case sdma_event_e10_go_hw_start:
/* This reference means the state machine is started */
sdma_get(&sde->state);
@@ -3016,6 +3009,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
case sdma_event_e60_hw_halted:
need_progress = 1;
sdma_err_progress_check_schedule(sde);
+ /* fall through */
case sdma_event_e90_sw_halted:
/*
* SW initiated halt does not perform engines
@@ -3305,7 +3299,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde)
return -EINVAL;
}
while (1) {
- nr = ffz(ACCESS_ONCE(sde->ahg_bits));
+ nr = ffz(READ_ONCE(sde->ahg_bits));
if (nr > 31) {
trace_hfi1_ahg_allocate(sde, -ENOSPC);
return -ENOSPC;
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 107011d8613b..374c59784950 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
{
return sde->descq_cnt -
(sde->descq_tail -
- ACCESS_ONCE(sde->descq_head)) - 1;
+ READ_ONCE(sde->descq_head)) - 1;
}
static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 6d2702ef34ac..25e867393463 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device,
* give a more accurate picture of total contexts available.
*/
return scnprintf(buf, PAGE_SIZE, "%u\n",
- min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt,
+ min(dd->num_user_contexts,
(u32)dd->sc_sizes[SC_USER].count));
}
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 9938bb983ce6..959a80429ee9 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -91,12 +91,17 @@ u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr)
return __get_16b_hdr_len(&opa_hdr->opah);
}
-const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
+const char *hfi1_trace_get_packet_l4_str(u8 l4)
{
- if (packet->etype != RHF_RCV_TYPE_BYPASS)
- return "IB";
+ if (l4)
+ return "16B";
+ else
+ return "9B";
+}
- switch (hfi1_16B_get_l2(packet->hdr)) {
+const char *hfi1_trace_get_packet_l2_str(u8 l2)
+{
+ switch (l2) {
case 0:
return "0";
case 1:
@@ -109,14 +114,6 @@ const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
return "";
}
-const char *hfi1_trace_get_packet_type_str(u8 l4)
-{
- if (l4)
- return "16B";
- else
- return "9B";
-}
-
#define IMM_PRN "imm:%d"
#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
@@ -154,7 +151,7 @@ void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
*opcode = ib_bth_get_opcode(ohdr);
*tver = ib_bth_get_tver(ohdr);
*pkey = ib_bth_get_pkey(ohdr);
- *psn = ib_bth_get_psn(ohdr);
+ *psn = mask_psn(ib_bth_get_psn(ohdr));
*qpn = ib_bth_get_qpn(ohdr);
}
@@ -169,7 +166,7 @@ void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
*pad = ib_bth_get_pad(ohdr);
*se = ib_bth_get_se(ohdr);
*tver = ib_bth_get_tver(ohdr);
- *psn = ib_bth_get_psn(ohdr);
+ *psn = mask_psn(ib_bth_get_psn(ohdr));
*qpn = ib_bth_get_qpn(ohdr);
}
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index af50c0793450..8540463ef3f7 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -44,6 +44,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
+
+#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
+#define show_packettype(etype) \
+__print_symbolic(etype, \
+ packettype_name(EXPECTED), \
+ packettype_name(EAGER), \
+ packettype_name(IB), \
+ packettype_name(ERROR), \
+ packettype_name(BYPASS))
+
#include "trace_dbg.h"
#include "trace_misc.h"
#include "trace_ctxts.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
index 6721f84dafa5..fb631278eccd 100644
--- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -99,8 +99,7 @@ u8 ibhdr_exhdr_len(struct ib_header *hdr);
const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opah);
u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet);
-const char *hfi1_trace_get_packet_type_str(u8 l4);
-const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet);
+const char *hfi1_trace_get_packet_l4_str(u8 l4);
void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
u8 *ack, u8 *becn, u8 *fecn, u8 *mig,
u8 *se, u8 *pad, u8 *opcode, u8 *tver,
@@ -129,6 +128,8 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
u8 se, u8 pad, u8 opcode, const char *opname,
u8 tver, u16 pkey, u32 psn, u32 qpn);
+const char *hfi1_trace_get_packet_l2_str(u8 l2);
+
#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
#define lrh_name(lrh) { HFI1_##lrh, #lrh }
@@ -136,8 +137,6 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
__print_symbolic(lrh, \
lrh_name(LRH_BTH), \
lrh_name(LRH_GRH))
-#define PKT_ENTRY(pkt) __string(ptype, hfi1_trace_get_packet_str(packet))
-#define PKT_ASSIGN(pkt) __assign_str(ptype, hfi1_trace_get_packet_str(packet))
DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
TP_PROTO(struct hfi1_devdata *dd,
@@ -146,12 +145,12 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
TP_ARGS(dd, packet, sc5),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
- PKT_ENTRY(packet)
- __field(bool, bypass)
+ __field(u8, etype)
__field(u8, ack)
__field(u8, age)
__field(u8, becn)
__field(u8, fecn)
+ __field(u8, l2)
__field(u8, l4)
__field(u8, lnh)
__field(u8, lver)
@@ -176,10 +175,10 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
),
TP_fast_assign(
DD_DEV_ASSIGN(dd);
- PKT_ASSIGN(packet);
- if (packet->etype == RHF_RCV_TYPE_BYPASS) {
- __entry->bypass = true;
+ __entry->etype = packet->etype;
+ __entry->l2 = hfi1_16B_get_l2(packet->hdr);
+ if (__entry->etype == RHF_RCV_TYPE_BYPASS) {
hfi1_trace_parse_16b_hdr(packet->hdr,
&__entry->age,
&__entry->becn,
@@ -203,7 +202,6 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
&__entry->psn,
&__entry->qpn);
} else {
- __entry->bypass = false;
hfi1_trace_parse_9b_hdr(packet->hdr, sc5,
&__entry->lnh,
&__entry->lver,
@@ -233,9 +231,13 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
),
TP_printk("[%s] (%s) %s %s hlen:%d %s",
__get_str(dev),
- __get_str(ptype),
+ __entry->etype != RHF_RCV_TYPE_BYPASS ?
+ show_packettype(__entry->etype) :
+ hfi1_trace_get_packet_l2_str(
+ __entry->l2),
hfi1_trace_fmt_lrh(p,
- __entry->bypass,
+ __entry->etype ==
+ RHF_RCV_TYPE_BYPASS,
__entry->age,
__entry->becn,
__entry->fecn,
@@ -252,7 +254,8 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
__entry->dlid,
__entry->slid),
hfi1_trace_fmt_bth(p,
- __entry->bypass,
+ __entry->etype ==
+ RHF_RCV_TYPE_BYPASS,
__entry->ack,
__entry->becn,
__entry->fecn,
@@ -284,7 +287,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
TP_ARGS(dd, opah, sc5),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
- __field(bool, bypass)
+ __field(u8, hdr_type)
__field(u8, ack)
__field(u8, age)
__field(u8, becn)
@@ -316,8 +319,8 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
DD_DEV_ASSIGN(dd);
- if (opah->hdr_type) {
- __entry->bypass = true;
+ __entry->hdr_type = opah->hdr_type;
+ if (__entry->hdr_type) {
hfi1_trace_parse_16b_hdr(&opah->opah,
&__entry->age,
&__entry->becn,
@@ -331,7 +334,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
&__entry->dlid,
&__entry->slid);
- if (entry->l4 == OPA_16B_L4_IB_LOCAL)
+ if (__entry->l4 == OPA_16B_L4_IB_LOCAL)
ohdr = &opah->opah.u.oth;
else
ohdr = &opah->opah.u.l.oth;
@@ -345,7 +348,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
&__entry->psn,
&__entry->qpn);
} else {
- __entry->bypass = false;
+ __entry->l4 = OPA_16B_L4_9B;
hfi1_trace_parse_9b_hdr(&opah->ibh, sc5,
&__entry->lnh,
&__entry->lver,
@@ -354,7 +357,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
&__entry->len,
&__entry->dlid,
&__entry->slid);
- if (entry->lnh == HFI1_LRH_BTH)
+ if (__entry->lnh == HFI1_LRH_BTH)
ohdr = &opah->ibh.u.oth;
else
ohdr = &opah->ibh.u.l.oth;
@@ -378,9 +381,9 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
),
TP_printk("[%s] (%s) %s %s hlen:%d %s",
__get_str(dev),
- hfi1_trace_get_packet_type_str(__entry->l4),
+ hfi1_trace_get_packet_l4_str(__entry->l4),
hfi1_trace_fmt_lrh(p,
- __entry->bypass,
+ !!__entry->hdr_type,
__entry->age,
__entry->becn,
__entry->fecn,
@@ -397,7 +400,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
__entry->dlid,
__entry->slid),
hfi1_trace_fmt_bth(p,
- __entry->bypass,
+ !!__entry->hdr_type,
__entry->ack,
__entry->becn,
__entry->fecn,
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index f9909d240dcc..4d487fee105d 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -62,15 +62,6 @@ __print_symbolic(type, \
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_rx
-#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
-#define show_packettype(etype) \
-__print_symbolic(etype, \
- packettype_name(EXPECTED), \
- packettype_name(EAGER), \
- packettype_name(IB), \
- packettype_name(ERROR), \
- packettype_name(BYPASS))
-
TRACE_EVENT(hfi1_rcvhdr,
TP_PROTO(struct hfi1_devdata *dd,
u32 ctxt,
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 0b646173ca22..991bbee04821 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -93,7 +93,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto done_free_tx;
}
- ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
@@ -121,7 +120,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
+ if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
}
@@ -463,7 +462,7 @@ last_imm:
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
/*
* It seems that IB mandates the presence of an SL in a
* work completion only for the UD transport (see section
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 2ba74fdd6f15..beb5091eccca 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -265,8 +265,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
} else {
wc.pkey_index = 0;
}
- wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1));
+ wc.slid = (ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1))) & U16_MAX;
/* Check for loopback when the port lid is not set */
if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
@@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
@@ -854,7 +854,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
int mgmt_pkey_idx = -1;
struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct ib_header *hdr = packet->hdr;
void *data = packet->payload;
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
@@ -880,7 +879,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
dlid_is_permissive = (dlid == permissive_lid);
slid_is_permissive = (slid == permissive_lid);
} else {
- hdr = packet->hdr;
pkey = ib_bth_get_pkey(ohdr);
dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
@@ -1039,7 +1037,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
}
if (slid_is_permissive)
slid = be32_to_cpu(OPA_LID_PERMISSIVE);
- wc.slid = slid;
+ wc.slid = slid & U16_MAX;
wc.sl = sl_from_sc;
/*
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 6f6c14df383e..c1c596adcd01 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -542,14 +542,10 @@ int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
unsigned long *ev = uctxt->dd->events +
- (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
+ (uctxt_offset(uctxt) + fd->subctxt);
u32 *array;
int ret = 0;
- if (!fd->invalid_tids)
- return -EINVAL;
-
/*
* copy_to_user() can sleep, which will leave the invalid_lock
* locked and cause the MMU notifier to be blocked on the lock
@@ -942,8 +938,7 @@ static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
* process in question.
*/
ev = uctxt->dd->events +
- (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
- HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
+ (uctxt_offset(uctxt) + fdata->subctxt);
set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
}
fdata->invalid_tid_idx++;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index c0c0e0445cbf..a3a7b33196d6 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
/* Wait until all requests have been freed. */
wait_event_interruptible(
pq->wait,
- (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+ (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
@@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
if (ret != -EBUSY) {
req->status = ret;
WRITE_ONCE(req->has_error, 1);
- if (ACCESS_ONCE(req->seqcomp) ==
+ if (READ_ONCE(req->seqcomp) ==
req->seqsubmitted - 1)
goto free_req;
return ret;
@@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
*/
if (req->data_len) {
iovec = &req->iovs[req->iov_idx];
- if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
+ if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
goto free_txreq;
@@ -956,10 +956,8 @@ static int pin_sdma_pages(struct user_sdma_request *req,
struct hfi1_user_sdma_pkt_q *pq = req->pq;
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- SDMA_DBG(req, "Failed page array alloc");
+ if (!pages)
return -ENOMEM;
- }
memcpy(pages, node->pages, node->npages * sizeof(*pages));
npages -= node->npages;
@@ -1254,20 +1252,25 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
struct user_sdma_txreq *tx, u32 datalen)
{
u32 ahg[AHG_KDETH_ARRAY_SIZE];
- int diff = 0;
+ int idx = 0;
u8 omfactor; /* KDETH.OM */
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct hfi1_pkt_header *hdr = &req->hdr;
u16 pbclen = le16_to_cpu(hdr->pbc[0]);
u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
+ size_t array_size = ARRAY_SIZE(ahg);
if (PBC2LRH(pbclen) != lrhlen) {
/* PBC.PbcLengthDWs */
- AHG_HEADER_SET(ahg, diff, 0, 0, 12,
- cpu_to_le16(LRH2PBC(lrhlen)));
+ idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12,
+ (__force u16)cpu_to_le16(LRH2PBC(lrhlen)));
+ if (idx < 0)
+ return idx;
/* LRH.PktLen (we need the full 16 bits due to byte swap) */
- AHG_HEADER_SET(ahg, diff, 3, 0, 16,
- cpu_to_be16(lrhlen >> 2));
+ idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16,
+ (__force u16)cpu_to_be16(lrhlen >> 2));
+ if (idx < 0)
+ return idx;
}
/*
@@ -1278,12 +1281,23 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
val32 |= 1UL << 31;
- AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
- AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
+ idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16,
+ (__force u16)cpu_to_be16(val32 >> 16));
+ if (idx < 0)
+ return idx;
+ idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16,
+ (__force u16)cpu_to_be16(val32 & 0xffff));
+ if (idx < 0)
+ return idx;
/* KDETH.Offset */
- AHG_HEADER_SET(ahg, diff, 15, 0, 16,
- cpu_to_le16(req->koffset & 0xffff));
- AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
+ idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16,
+ (__force u16)cpu_to_le16(req->koffset & 0xffff));
+ if (idx < 0)
+ return idx;
+ idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16,
+ (__force u16)cpu_to_le16(req->koffset >> 16));
+ if (idx < 0)
+ return idx;
if (req_opcode(req->info.ctrl) == EXPECTED) {
__le16 val;
@@ -1310,10 +1324,13 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
KDETH_OM_SMALL_SHIFT;
/* KDETH.OM and KDETH.OFFSET (TID) */
- AHG_HEADER_SET(ahg, diff, 7, 0, 16,
- ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
+ idx = ahg_header_set(
+ ahg, idx, array_size, 7, 0, 16,
+ ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
((req->tidoffset >> omfactor)
- & 0x7fff)));
+ & 0x7fff)));
+ if (idx < 0)
+ return idx;
/* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
(EXP_TID_GET(tidval, IDX) & 0x3ff));
@@ -1330,21 +1347,22 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
AHG_KDETH_INTR_SHIFT));
}
- AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
+ idx = ahg_header_set(ahg, idx, array_size,
+ 7, 16, 14, (__force u16)val);
+ if (idx < 0)
+ return idx;
}
- if (diff < 0)
- return diff;
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
req->info.comp_idx, req->sde->this_idx,
- req->ahg_idx, ahg, diff, tidval);
+ req->ahg_idx, ahg, idx, tidval);
sdma_txinit_ahg(&tx->txreq,
SDMA_TXREQ_F_USE_AHG,
- datalen, req->ahg_idx, diff,
+ datalen, req->ahg_idx, idx,
ahg, sizeof(req->hdr),
user_sdma_txreq_cb);
- return diff;
+ return idx;
}
/*
@@ -1390,7 +1408,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
} else {
if (status != SDMA_TXREQ_S_OK)
req->status = status;
- if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
+ if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
(READ_ONCE(req->done) ||
READ_ONCE(req->has_error))) {
user_sdma_free_request(req, false);
@@ -1410,6 +1428,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
{
+ int i;
+
if (!list_empty(&req->txps)) {
struct sdma_txreq *t, *p;
@@ -1421,22 +1441,20 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
kmem_cache_free(req->pq->txreq_cache, tx);
}
}
- if (req->data_iovs) {
- struct sdma_mmu_node *node;
- int i;
-
- for (i = 0; i < req->data_iovs; i++) {
- node = req->iovs[i].node;
- if (!node)
- continue;
-
- if (unpin)
- hfi1_mmu_rb_remove(req->pq->handler,
- &node->rb);
- else
- atomic_dec(&node->refcount);
- }
+
+ for (i = 0; i < req->data_iovs; i++) {
+ struct sdma_mmu_node *node = req->iovs[i].node;
+
+ if (!node)
+ continue;
+
+ if (unpin)
+ hfi1_mmu_rb_remove(req->pq->handler,
+ &node->rb);
+ else
+ atomic_dec(&node->refcount);
}
+
kfree(req->tids);
clear_bit(req->info.comp_idx, req->pq->req_in_use);
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 9b8bb5634c0d..a3d192424344 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -80,15 +80,26 @@
#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
-#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
- do { \
- if ((idx) < ARRAY_SIZE((arr))) \
- (arr)[(idx++)] = sdma_build_ahg_descriptor( \
- (__force u16)(value), (dw), (bit), \
- (width)); \
- else \
- return -ERANGE; \
- } while (0)
+/**
+ * Build an SDMA AHG header update descriptor and save it to an array.
+ * @arr - Array to save the descriptor to.
+ * @idx - Index of the array at which the descriptor will be saved.
+ * @array_size - Size of the array arr.
+ * @dw - Update index into the header in DWs.
+ * @bit - Start bit.
+ * @width - Field width.
+ * @value - 16 bits of immediate data to write into the field.
+ * Returns -ERANGE if idx is invalid. If successful, returns the next index
+ * (idx + 1) of the array to be used for the next descriptor.
+ */
+static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
+ u8 dw, u8 bit, u8 width, u16 value)
+{
+ if ((size_t)idx >= array_size)
+ return -ERANGE;
+ arr[idx++] = sdma_build_ahg_descriptor(value, dw, bit, width);
+ return idx;
+}
/* Tx request flag bits */
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index e232f3c608b4..a38785e224cc 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -146,6 +146,9 @@ static int pio_wait(struct rvt_qp *qp,
/* Length of buffer to create verbs txreq cache name */
#define TXREQ_NAME_LEN 24
+/* 16B trailing buffer */
+static const u8 trail_buf[MAX_16B_PADDING];
+
static uint wss_threshold;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
@@ -667,9 +670,9 @@ void hfi1_16B_rcv(struct hfi1_packet *packet)
* This is called from a timer to check for QPs
* which need kernel memory in order to send a packet.
*/
-static void mem_timer(unsigned long data)
+static void mem_timer(struct timer_list *t)
{
- struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
+ struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer);
struct list_head *list = &dev->memwait;
struct rvt_qp *qp = NULL;
struct iowait *wait;
@@ -793,6 +796,27 @@ bail_txadd:
return ret;
}
+/**
+ * update_tx_opstats - record stats by opcode
+ * @qp; the qp
+ * @ps: transmit packet state
+ * @plen: the plen in dwords
+ *
+ * This is a routine to record the tx opstats after a
+ * packet has been presented to the egress mechanism.
+ */
+static void update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ u32 plen)
+{
+#ifdef CONFIG_DEBUG_FS
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats);
+
+ inc_opstats(plen * 4, &s->stats[ps->opcode]);
+ put_cpu_ptr(s);
+#endif
+}
+
/*
* Build the number of DMA descriptors needed to send length bytes of data.
*
@@ -812,9 +836,7 @@ static int build_verbs_tx_desc(
int ret = 0;
struct hfi1_sdma_header *phdr = &tx->phdr;
u16 hdrbytes = tx->hdr_dwords << 2;
- u32 *hdr;
u8 extra_bytes = 0;
- static char trail_buf[12]; /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
if (tx->phdr.hdr.hdr_type) {
/*
@@ -823,9 +845,6 @@ static int build_verbs_tx_desc(
*/
extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
(SIZE_OF_CRC << 2) + SIZE_OF_LT;
- hdr = (u32 *)&phdr->hdr.opah;
- } else {
- hdr = (u32 *)&phdr->hdr.ibh;
}
if (!ahg_info->ahgcount) {
ret = sdma_txinit_ahg(
@@ -869,9 +888,9 @@ static int build_verbs_tx_desc(
}
/* add icrc, lt byte, and padding to flit */
- if (extra_bytes != 0)
+ if (extra_bytes)
ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
- trail_buf, extra_bytes);
+ (void *)trail_buf, extra_bytes);
bail_txadd:
return ret;
@@ -891,14 +910,12 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u8 sc5 = priv->s_sc;
int ret;
u32 dwords;
- bool bypass = false;
if (ps->s_txreq->phdr.hdr.hdr_type) {
u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
SIZE_OF_LT) >> 2;
- bypass = true;
} else {
dwords = (len + 3) >> 2;
}
@@ -938,6 +955,8 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
goto bail_ecomm;
return ret;
}
+
+ update_tx_opstats(qp, ps, plen);
trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
return ret;
@@ -1033,8 +1052,6 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
int wc_status = IB_WC_SUCCESS;
int ret = 0;
pio_release_cb cb = NULL;
- u32 lrh0_16b;
- bool bypass = false;
u8 extra_bytes = 0;
if (ps->s_txreq->phdr.hdr.hdr_type) {
@@ -1043,8 +1060,6 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
dwords = (len + extra_bytes) >> 2;
hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
- lrh0_16b = ps->s_txreq->phdr.hdr.opah.lrh[0];
- bypass = true;
} else {
dwords = (len + 3) >> 2;
hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
@@ -1128,21 +1143,14 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
len -= slen;
}
}
- /*
- * Bypass packet will need to copy additional
- * bytes to accommodate for CRC and LT bytes
- */
- if (extra_bytes) {
- u8 *empty_buf;
+ /* add icrc, lt byte, and padding to flit */
+ if (extra_bytes)
+ seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
- empty_buf = kcalloc(extra_bytes, sizeof(u8),
- GFP_KERNEL);
- seg_pio_copy_mid(pbuf, empty_buf, extra_bytes);
- kfree(empty_buf);
- }
seg_pio_copy_end(pbuf);
}
+ update_tx_opstats(qp, ps, plen);
trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
@@ -1636,8 +1644,7 @@ static void init_ibport(struct hfi1_pportdata *ppd)
for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
- setup_timer(&ibp->rvp.trap_timer, hfi1_handle_trap_timer,
- (unsigned long)ibp);
+ timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0);
spin_lock_init(&ibp->rvp.lock);
/* Set the prefix to the default value (see ch. 4.1.1) */
@@ -1844,7 +1851,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
/* Only need to initialize non-zero fields. */
- setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
+ timer_setup(&dev->mem_timer, mem_timer, 0);
seqlock_init(&dev->iowait_lock);
seqlock_init(&dev->txwait_lock);
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 76216f2ef35a..cec7a4b34d16 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -92,6 +92,8 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
tx->psc = priv->s_sendcontext;
/* so that we can test if the sdma decriptors are there */
tx->txreq.num_desc = 0;
+ /* Set the header type */
+ tx->phdr.hdr.hdr_type = priv->hdr_type;
return tx;
}
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index f419cbb05928..5d65582fe4d9 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -67,8 +67,6 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
unsigned int rcvctrl_ops = 0;
int ret;
- hfi1_init_ctxt(uctxt->sc);
-
uctxt->do_interrupt = &handle_receive_interrupt;
/* Now allocate the RcvHdr queue and eager buffers. */
@@ -96,8 +94,6 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
-
- uctxt->is_vnic = true;
done:
return ret;
}
@@ -122,20 +118,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
HFI1_CAP_KGET(NODROP_EGR_FULL) |
HFI1_CAP_KGET(DMA_RTAIL);
uctxt->seq_cnt = 1;
-
- /* Allocate and enable a PIO send context */
- uctxt->sc = sc_alloc(dd, SC_VNIC, uctxt->rcvhdrqentsize,
- uctxt->numa_id);
-
- ret = uctxt->sc ? 0 : -ENOMEM;
- if (ret)
- goto bail;
-
- dd_dev_dbg(dd, "allocated vnic send context %u(%u)\n",
- uctxt->sc->sw_index, uctxt->sc->hw_context);
- ret = sc_enable(uctxt->sc);
- if (ret)
- goto bail;
+ uctxt->is_vnic = true;
if (dd->num_msix_entries)
hfi1_set_vnic_msix_info(uctxt);
@@ -144,11 +127,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
*vnic_ctxt = uctxt;
- return ret;
-bail:
- hfi1_free_ctxt(uctxt);
- dd_dev_dbg(dd, "vnic allocation failed. rc %d\n", ret);
- return ret;
+ return 0;
}
static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
@@ -170,18 +149,6 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
- /*
- * VNIC contexts are allocated from user context pool.
- * Release them back to user context pool.
- *
- * Reset context integrity checks to default.
- * (writes to CSRs probably belong in chip.c)
- */
- write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
- hfi1_pkt_default_send_ctxt_mask(dd, SC_USER));
- sc_disable(uctxt->sc);
-
- dd->send_contexts[uctxt->sc->sw_index].type = SC_USER;
uctxt->event_flags = 0;
@@ -840,6 +807,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
struct rdma_netdev *rn;
int i, size, rc;
+ if (!dd->num_vnic_contexts)
+ return ERR_PTR(-ENOMEM);
+
if (!port_num || (port_num > dd->num_pports))
return ERR_PTR(-EINVAL);
@@ -848,7 +818,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
- dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT);
+ dd->chip_sdma_engines, dd->num_vnic_contexts);
if (!netdev)
return ERR_PTR(-ENOMEM);
@@ -856,7 +826,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
vinfo = opa_vnic_dev_priv(netdev);
vinfo->dd = dd;
vinfo->num_tx_q = dd->chip_sdma_engines;
- vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT;
+ vinfo->num_rx_q = dd->num_vnic_contexts;
vinfo->netdev = netdev;
rn->free_rdma_netdev = hfi1_vnic_free_rn;
rn->set_id = hfi1_vnic_set_vesw_id;
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 61c93bbd230d..fddb5fdf92de 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,10 +1,31 @@
config INFINIBAND_HNS
tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
- depends on (ARM64 || (COMPILE_TEST && 64BIT)) && HNS && HNS_DSAF && HNS_ENET
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
- is used in Hisilicon Hi1610 and more further ICT SoC.
+ is used in Hisilicon Hip06 and more further ICT SoC based on
+ platform device.
To compile this driver as a module, choose M here: the module
will be called hns-roce.
+
+config INFINIBAND_HNS_HIP06
+ tristate "Hisilicon Hip06 Family RoCE support"
+ depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
+ ---help---
+ RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
+ Hip07 SoC. These RoCE engines are platform devices.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hns-roce-hw-v1.
+
+config INFINIBAND_HNS_HIP08
+ tristate "Hisilicon Hip08 Family RoCE support"
+ depends on INFINIBAND_HNS && PCI && HNS3
+ ---help---
+ RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
+ The RoCE engine is a PCI device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hns-roce-hw-v2.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 7e8ebd24dcae..ff426a625e13 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -2,7 +2,13 @@
# Makefile for the Hisilicon RoCE drivers.
#
+ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
- hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o
+ hns_roce_cq.o hns_roce_alloc.o
+obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
+hns-roce-hw-v1-objs := hns_roce_hw_v1.o
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
+hns-roce-hw-v2-objs := hns_roce_hw_v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index d545302b8ef8..7dd6a66ea244 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -44,11 +44,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct ib_gid_attr gid_attr;
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
- struct in6_addr in6;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
union ib_gid sgid;
int ret;
@@ -58,18 +57,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
return ERR_PTR(-ENOMEM);
/* Get mac address */
- memcpy(&in6, grh->dgid.raw, sizeof(grh->dgid.raw));
- if (rdma_is_multicast_addr(&in6)) {
- rdma_get_mcast_mac(&in6, ah->av.mac);
- } else {
- u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
-
- if (!dmac) {
- kfree(ah);
- return ERR_PTR(-EINVAL);
- }
- memcpy(ah->av.mac, dmac, ETH_ALEN);
- }
+ memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
/* Get source gid */
ret = ib_get_cached_gid(ibpd->device, rdma_ah_get_port_num(ah_attr),
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index e1b433cdd5e2..3e4c5253ab5c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -67,6 +67,7 @@ void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
{
hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
}
+EXPORT_SYMBOL_GPL(hns_roce_bitmap_free);
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
int align, unsigned long *obj)
@@ -160,39 +161,47 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf)
{
int i;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;
if (buf->nbufs == 1) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
- if (bits_per_long == 64)
+ if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
- dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
+ dma_free_coherent(dev, 1 << buf->page_shift,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
+EXPORT_SYMBOL_GPL(hns_roce_buf_free);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
- struct hns_roce_buf *buf)
+ struct hns_roce_buf *buf, u32 page_shift)
{
int i = 0;
dma_addr_t t;
struct page **pages;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;
+ u32 page_size = 1 << page_shift;
+ u32 order;
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
if (size <= max_direct) {
buf->nbufs = 1;
/* Npages calculated by page_size */
- buf->npages = 1 << get_order(size);
- buf->page_shift = PAGE_SHIFT;
+ order = get_order(size);
+ if (order <= page_shift - PAGE_SHIFT)
+ order = 0;
+ else
+ order -= page_shift - PAGE_SHIFT;
+ buf->npages = 1 << order;
+ buf->page_shift = page_shift;
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
if (!buf->direct.buf)
@@ -207,9 +216,9 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
memset(buf->direct.buf, 0, size);
} else {
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->nbufs = (size + page_size - 1) / page_size;
buf->npages = buf->nbufs;
- buf->page_shift = PAGE_SHIFT;
+ buf->page_shift = page_shift;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
GFP_KERNEL);
@@ -218,16 +227,16 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf = dma_alloc_coherent(dev,
- PAGE_SIZE, &t,
+ page_size, &t,
GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
- memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+ memset(buf->page_list[i].buf, 0, page_size);
}
- if (bits_per_long == 64) {
+ if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
GFP_KERNEL);
if (!pages)
@@ -241,6 +250,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
kfree(pages);
if (!buf->direct.buf)
goto err_free;
+ } else {
+ buf->direct.buf = NULL;
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index b94dcd823ad1..1085cb249bc1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -38,69 +38,7 @@
#define CMD_POLL_TOKEN 0xffff
#define CMD_MAX_NUM 32
-#define STATUS_MASK 0xff
#define CMD_TOKEN_MASK 0x1f
-#define GO_BIT_TIMEOUT_MSECS 10000
-
-enum {
- HCR_TOKEN_OFFSET = 0x14,
- HCR_STATUS_OFFSET = 0x18,
- HCR_GO_BIT = 15,
-};
-
-static int cmd_pending(struct hns_roce_dev *hr_dev)
-{
- u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
-
- return (!!(status & (1 << HCR_GO_BIT)));
-}
-
-/* this function should be serialized with "hcr_mutex" */
-static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
- u64 in_param, u64 out_param,
- u32 in_modifier, u8 op_modifier, u16 op,
- u16 token, int event)
-{
- struct hns_roce_cmdq *cmd = &hr_dev->cmd;
- struct device *dev = &hr_dev->pdev->dev;
- u32 __iomem *hcr = (u32 *)cmd->hcr;
- int ret = -EAGAIN;
- unsigned long end;
- u32 val = 0;
-
- end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
- while (cmd_pending(hr_dev)) {
- if (time_after(jiffies, end)) {
- dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
- (int)end);
- goto out;
- }
- cond_resched();
- }
-
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
- op);
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
- ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
- roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
- roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
- roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
- ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
-
- __raw_writeq(cpu_to_le64(in_param), hcr + 0);
- __raw_writeq(cpu_to_le64(out_param), hcr + 2);
- __raw_writel(cpu_to_le32(in_modifier), hcr + 4);
- /* Memory barrier */
- wmb();
-
- __raw_writel(cpu_to_le32(val), hcr + 5);
-
- mmiowb();
- ret = 0;
-
-out:
- return ret;
-}
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier,
@@ -108,12 +46,11 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
int event)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
- int ret = -EAGAIN;
+ int ret;
mutex_lock(&cmd->hcr_mutex);
- ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
- in_modifier, op_modifier, op, token,
- event);
+ ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
+ op_modifier, op, token, event);
mutex_unlock(&cmd->hcr_mutex);
return ret;
@@ -125,10 +62,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u8 op_modifier, u16 op,
unsigned long timeout)
{
- struct device *dev = &hr_dev->pdev->dev;
- u8 __iomem *hcr = hr_dev->cmd.hcr;
- unsigned long end = 0;
- u32 status = 0;
+ struct device *dev = hr_dev->dev;
int ret;
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
@@ -136,29 +70,10 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
CMD_POLL_TOKEN, 0);
if (ret) {
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
- goto out;
- }
-
- end = msecs_to_jiffies(timeout) + jiffies;
- while (cmd_pending(hr_dev) && time_before(jiffies, end))
- cond_resched();
-
- if (cmd_pending(hr_dev)) {
- dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
- ret = -ETIMEDOUT;
- goto out;
+ return ret;
}
- status = le32_to_cpu((__force __be32)
- __raw_readl(hcr + HCR_STATUS_OFFSET));
- if ((status & STATUS_MASK) != 0x1) {
- dev_err(dev, "mailbox status 0x%x!\n", status);
- ret = -EBUSY;
- goto out;
- }
-
-out:
- return ret;
+ return hr_dev->hw->chk_mbox(hr_dev, timeout);
}
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
@@ -196,9 +111,9 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
unsigned long timeout)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
- struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_cmd_context *context;
- int ret = 0;
+ struct device *dev = hr_dev->dev;
+ int ret;
spin_lock(&cmd->context_lock);
WARN_ON(cmd->free_head < 0);
@@ -269,17 +184,17 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
in_modifier, op_modifier, op,
timeout);
}
+EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1);
hr_dev->cmd.use_events = 0;
hr_dev->cmd.toggle = 1;
hr_dev->cmd.max_cmds = CMD_MAX_NUM;
- hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
HNS_ROCE_MAILBOX_SIZE,
HNS_ROCE_MAILBOX_SIZE, 0);
@@ -356,6 +271,7 @@ struct hns_roce_cmd_mailbox
return mailbox;
}
+EXPORT_SYMBOL_GPL(hns_roce_alloc_cmd_mailbox);
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox)
@@ -366,3 +282,4 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox);
}
+EXPORT_SYMBOL_GPL(hns_roce_free_cmd_mailbox);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index f5a9ee2fc53d..b1c94223c28b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -37,6 +37,60 @@
#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
enum {
+ /* QPC BT commands */
+ HNS_ROCE_CMD_WRITE_QPC_BT0 = 0x0,
+ HNS_ROCE_CMD_WRITE_QPC_BT1 = 0x1,
+ HNS_ROCE_CMD_WRITE_QPC_BT2 = 0x2,
+ HNS_ROCE_CMD_READ_QPC_BT0 = 0x4,
+ HNS_ROCE_CMD_READ_QPC_BT1 = 0x5,
+ HNS_ROCE_CMD_READ_QPC_BT2 = 0x6,
+ HNS_ROCE_CMD_DESTROY_QPC_BT0 = 0x8,
+ HNS_ROCE_CMD_DESTROY_QPC_BT1 = 0x9,
+ HNS_ROCE_CMD_DESTROY_QPC_BT2 = 0xa,
+
+ /* QPC operation */
+ HNS_ROCE_CMD_MODIFY_QPC = 0x41,
+ HNS_ROCE_CMD_QUERY_QPC = 0x42,
+
+ HNS_ROCE_CMD_MODIFY_CQC = 0x52,
+ /* CQC BT commands */
+ HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
+ HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
+ HNS_ROCE_CMD_WRITE_CQC_BT2 = 0x12,
+ HNS_ROCE_CMD_READ_CQC_BT0 = 0x14,
+ HNS_ROCE_CMD_READ_CQC_BT1 = 0x15,
+ HNS_ROCE_CMD_READ_CQC_BT2 = 0x1b,
+ HNS_ROCE_CMD_DESTROY_CQC_BT0 = 0x18,
+ HNS_ROCE_CMD_DESTROY_CQC_BT1 = 0x19,
+ HNS_ROCE_CMD_DESTROY_CQC_BT2 = 0x1a,
+
+ /* MPT BT commands */
+ HNS_ROCE_CMD_WRITE_MPT_BT0 = 0x20,
+ HNS_ROCE_CMD_WRITE_MPT_BT1 = 0x21,
+ HNS_ROCE_CMD_WRITE_MPT_BT2 = 0x22,
+ HNS_ROCE_CMD_READ_MPT_BT0 = 0x24,
+ HNS_ROCE_CMD_READ_MPT_BT1 = 0x25,
+ HNS_ROCE_CMD_READ_MPT_BT2 = 0x26,
+ HNS_ROCE_CMD_DESTROY_MPT_BT0 = 0x28,
+ HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
+ HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
+
+ /* MPT commands */
+ HNS_ROCE_CMD_QUERY_MPT = 0x62,
+
+ /* SRQC BT commands */
+ HNS_ROCE_CMD_WRITE_SRQC_BT0 = 0x30,
+ HNS_ROCE_CMD_WRITE_SRQC_BT1 = 0x31,
+ HNS_ROCE_CMD_WRITE_SRQC_BT2 = 0x32,
+ HNS_ROCE_CMD_READ_SRQC_BT0 = 0x34,
+ HNS_ROCE_CMD_READ_SRQC_BT1 = 0x35,
+ HNS_ROCE_CMD_READ_SRQC_BT2 = 0x36,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+};
+
+enum {
/* TPT commands */
HNS_ROCE_CMD_SW2HW_MPT = 0xd,
HNS_ROCE_CMD_HW2SW_MPT = 0xf,
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 4af403e1348c..7ecb7a4147a8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -341,6 +341,7 @@
#define ROCEE_BT_CMD_L_REG 0x200
#define ROCEE_MB1_REG 0x210
+#define ROCEE_MB6_REG 0x224
#define ROCEE_DB_SQ_L_0_REG 0x230
#define ROCEE_DB_OTHERS_L_0_REG 0x238
#define ROCEE_QP1C_CFG0_0_REG 0x270
@@ -362,4 +363,26 @@
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
+/* V2 ROCEE REG */
+#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
+#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
+#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
+#define ROCEE_TX_CMQ_TAIL_REG 0x07010
+#define ROCEE_TX_CMQ_HEAD_REG 0x07014
+
+#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
+#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
+#define ROCEE_RX_CMQ_DEPTH_REG 0x07020
+#define ROCEE_RX_CMQ_TAIL_REG 0x07024
+#define ROCEE_RX_CMQ_HEAD_REG 0x07028
+
+#define ROCEE_VF_SMAC_CFG0_REG 0x12000
+#define ROCEE_VF_SMAC_CFG1_REG 0x12004
+
+#define ROCEE_VF_SGID_CFG0_REG 0x10000
+#define ROCEE_VF_SGID_CFG1_REG 0x10004
+#define ROCEE_VF_SGID_CFG2_REG 0x10008
+#define ROCEE_VF_SGID_CFG3_REG 0x1000c
+#define ROCEE_VF_SGID_CFG4_REG 0x10010
+
#endif /* _HNS_ROCE_COMMON_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index b89fd711019e..2111b57a3489 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(&hr_dev->pdev->dev,
+ dev_err(hr_dev->dev,
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
event_type, hr_cq->cqn);
return;
@@ -85,17 +85,23 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_uar *hr_uar,
struct hns_roce_cq *hr_cq, int vector)
{
- struct hns_roce_cmd_mailbox *mailbox = NULL;
- struct hns_roce_cq_table *cq_table = NULL;
- struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct hns_roce_hem_table *mtt_table;
+ struct hns_roce_cq_table *cq_table;
+ struct device *dev = hr_dev->dev;
dma_addr_t dma_handle;
- u64 *mtts = NULL;
- int ret = 0;
+ u64 *mtts;
+ int ret;
cq_table = &hr_dev->cq_table;
/* Get the physical address of cq buf */
- mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ mtt_table = &hr_dev->mr_table.mtt_cqe_table;
+ else
+ mtt_table = &hr_dev->mr_table.mtt_table;
+
+ mtts = hns_roce_table_find(hr_dev, mtt_table,
hr_mtt->first_seg, &dma_handle);
if (!mtts) {
dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
@@ -150,6 +156,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
}
hr_cq->cons_index = 0;
+ hr_cq->arm_sn = 1;
hr_cq->uar = hr_uar;
atomic_set(&hr_cq->refcount, 1);
@@ -182,21 +189,22 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int ret;
ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
-
- /* Waiting interrupt process procedure carried out */
- synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
-
- /* wait for all interrupt processed */
- if (atomic_dec_and_test(&hr_cq->refcount))
- complete(&hr_cq->free);
- wait_for_completion(&hr_cq->free);
+ if (hr_dev->eq_table.eq) {
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
+ }
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
@@ -205,6 +213,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
}
+EXPORT_SYMBOL_GPL(hns_roce_free_cq);
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
struct ib_ucontext *context,
@@ -212,14 +221,31 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
struct ib_umem **umem, u64 buf_addr, int cqe)
{
int ret;
+ u32 page_shift;
+ u32 npages;
*umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
- (*umem)->page_shift, &buf->hr_mtt);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ else
+ buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+
+ if (hr_dev->caps.cqe_buf_pg_sz) {
+ npages = (ib_umem_page_count(*umem) +
+ (1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
+ (1 << hr_dev->caps.cqe_buf_pg_sz);
+ page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
+ &buf->hr_mtt);
+ } else {
+ ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
+ (*umem)->page_shift,
+ &buf->hr_mtt);
+ }
if (ret)
goto err_buf;
@@ -241,12 +267,19 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_cq_buf *buf, u32 nent)
{
int ret;
+ u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
- PAGE_SIZE * 2, &buf->hr_buf);
+ (1 << page_shift) * 2, &buf->hr_buf,
+ page_shift);
if (ret)
goto out;
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
+ else
+ buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
+
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
buf->hr_buf.page_shift, &buf->hr_mtt);
if (ret)
@@ -281,13 +314,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq ucmd;
struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector;
int cq_entries = attr->cqe;
- int ret = 0;
+ int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
@@ -295,13 +328,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
return ERR_PTR(-EINVAL);
}
- hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
+ hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
if (!hr_cq)
return ERR_PTR(-ENOMEM);
- /* In v1 engine, parameter verification */
- if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
- cq_entries = HNS_ROCE_MIN_CQE_NUM;
+ if (hr_dev->caps.min_cqes)
+ cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1;
@@ -335,8 +367,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
}
uar = &hr_dev->priv_uar;
- hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
- 0x1000 * uar->index;
+ hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * uar->index;
}
/* Allocate cq index, fill cq_context */
@@ -353,7 +385,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
* problems if tptr is set to zero here, so we initialze it in user
* space.
*/
- if (!context)
+ if (!context && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0;
/* Get created cq handler and carry out event */
@@ -385,6 +417,7 @@ err_cq:
kfree(hr_cq);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
{
@@ -410,10 +443,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
return ret;
}
+EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq;
cq = radix_tree_lookup(&hr_dev->cq_table.tree,
@@ -423,13 +457,14 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
return;
}
+ ++cq->arm_sn;
cq->comp(cq);
}
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq;
cq = radix_tree_lookup(&cq_table->tree,
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index e493a61e14e1..01d3d695cbba 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -78,6 +78,8 @@
#define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16
+#define HNS_ROCE_HOP_NUM_0 0xff
+
#define BITMAP_NO_RR 0
#define BITMAP_RR 1
@@ -168,6 +170,16 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
};
+enum {
+ HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
+};
+
+enum hns_roce_mtt_type {
+ MTT_TYPE_WQE,
+ MTT_TYPE_CQE,
+};
+
#define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0
@@ -229,15 +241,21 @@ struct hns_roce_hem_table {
unsigned long num_obj;
/*Single obj size */
unsigned long obj_size;
+ unsigned long table_chunk_size;
int lowmem;
struct mutex mutex;
struct hns_roce_hem **hem;
+ u64 **bt_l1;
+ dma_addr_t *bt_l1_dma_addr;
+ u64 **bt_l0;
+ dma_addr_t *bt_l0_dma_addr;
};
struct hns_roce_mtt {
- unsigned long first_seg;
- int order;
- int page_shift;
+ unsigned long first_seg;
+ int order;
+ int page_shift;
+ enum hns_roce_mtt_type mtt_type;
};
/* Only support 4K page size for mr register */
@@ -255,6 +273,19 @@ struct hns_roce_mr {
int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */
dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
+ u32 pbl_size;/* PA number in the PBL */
+ u64 pbl_ba;/* page table address */
+ u32 l0_chunk_last_num;/* L0 last number */
+ u32 l1_chunk_last_num;/* L1 last number */
+ u64 **pbl_bt_l2;/* PBL BT L2 */
+ u64 **pbl_bt_l1;/* PBL BT L1 */
+ u64 *pbl_bt_l0;/* PBL BT L0 */
+ dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */
+ dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */
+ dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */
+ u32 pbl_ba_pg_sz;/* BT chunk page size */
+ u32 pbl_buf_pg_sz;/* buf chunk page size */
+ u32 pbl_hop_num;/* multi-hop number */
};
struct hns_roce_mr_table {
@@ -262,6 +293,8 @@ struct hns_roce_mr_table {
struct hns_roce_buddy mtt_buddy;
struct hns_roce_hem_table mtt_table;
struct hns_roce_hem_table mtpt_table;
+ struct hns_roce_buddy mtt_cqe_buddy;
+ struct hns_roce_hem_table mtt_cqe_table;
};
struct hns_roce_wq {
@@ -277,6 +310,12 @@ struct hns_roce_wq {
void __iomem *db_reg_l;
};
+struct hns_roce_sge {
+ int sge_cnt; /* SGE num */
+ int offset;
+ int sge_shift;/* SGE size */
+};
+
struct hns_roce_buf_list {
void *buf;
dma_addr_t map;
@@ -308,6 +347,7 @@ struct hns_roce_cq {
u32 cons_index;
void __iomem *cq_db_l;
u16 *tptr_addr;
+ int arm_sn;
unsigned long cqn;
u32 vector;
atomic_t refcount;
@@ -328,6 +368,7 @@ struct hns_roce_qp_table {
spinlock_t lock;
struct hns_roce_hem_table qp_table;
struct hns_roce_hem_table irrl_table;
+ struct hns_roce_hem_table trrl_table;
};
struct hns_roce_cq_table {
@@ -367,7 +408,6 @@ struct hns_roce_cmd_context {
struct hns_roce_cmdq {
struct dma_pool *pool;
- u8 __iomem *hcr;
struct mutex hcr_mutex;
struct semaphore poll_sem;
/*
@@ -429,6 +469,9 @@ struct hns_roce_qp {
atomic_t refcount;
struct completion free;
+
+ struct hns_roce_sge sge;
+ u32 next_sge;
};
struct hns_roce_sqp {
@@ -439,7 +482,6 @@ struct hns_roce_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
struct notifier_block nb;
- struct notifier_block nb_inet;
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
@@ -477,16 +519,20 @@ struct hns_roce_caps {
u32 max_wqes; /* 16k */
u32 max_sq_desc_sz; /* 64 */
u32 max_rq_desc_sz; /* 64 */
+ u32 max_srq_desc_sz;
int max_qp_init_rdma;
int max_qp_dest_rdma;
int num_cqs;
int max_cqes;
+ int min_cqes;
+ u32 min_wqes;
int reserved_cqs;
int num_aeq_vectors; /* 1 */
int num_comp_vectors; /* 32 ceq */
int num_other_vectors;
int num_mtpts;
u32 num_mtt_segs;
+ u32 num_cqe_segs;
int reserved_mrws;
int reserved_uars;
int num_pds;
@@ -498,29 +544,70 @@ struct hns_roce_caps {
int mtpt_entry_sz;
int qpc_entry_sz;
int irrl_entry_sz;
+ int trrl_entry_sz;
int cqc_entry_sz;
+ u32 pbl_ba_pg_sz;
+ u32 pbl_buf_pg_sz;
+ u32 pbl_hop_num;
int aeqe_depth;
int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
enum ib_mtu max_mtu;
+ u32 qpc_bt_num;
+ u32 srqc_bt_num;
+ u32 cqc_bt_num;
+ u32 mpt_bt_num;
+ u32 qpc_ba_pg_sz;
+ u32 qpc_buf_pg_sz;
+ u32 qpc_hop_num;
+ u32 srqc_ba_pg_sz;
+ u32 srqc_buf_pg_sz;
+ u32 srqc_hop_num;
+ u32 cqc_ba_pg_sz;
+ u32 cqc_buf_pg_sz;
+ u32 cqc_hop_num;
+ u32 mpt_ba_pg_sz;
+ u32 mpt_buf_pg_sz;
+ u32 mpt_hop_num;
+ u32 mtt_ba_pg_sz;
+ u32 mtt_buf_pg_sz;
+ u32 mtt_hop_num;
+ u32 cqe_ba_pg_sz;
+ u32 cqe_buf_pg_sz;
+ u32 cqe_hop_num;
+ u32 chunk_sz; /* chunk size in non multihop mode*/
+ u64 flags;
};
struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
- void (*hw_profile)(struct hns_roce_dev *hr_dev);
+ int (*cmq_init)(struct hns_roce_dev *hr_dev);
+ void (*cmq_exit)(struct hns_roce_dev *hr_dev);
+ int (*hw_profile)(struct hns_roce_dev *hr_dev);
int (*hw_init)(struct hns_roce_dev *hr_dev);
void (*hw_exit)(struct hns_roce_dev *hr_dev);
- void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
- union ib_gid *gid);
- void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+ int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
+ u16 token, int event);
+ int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
+ int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ union ib_gid *gid, const struct ib_gid_attr *attr);
+ int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx);
+ int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr, int flags, u32 pdn,
+ int mr_access_flags, u64 iova, u64 size,
+ void *mb_buf);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector);
+ int (*set_hem)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj);
+ struct hns_roce_hem_table *table, int obj,
+ int step_idx);
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
@@ -535,12 +622,14 @@ struct hns_roce_hw {
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
int (*destroy_cq)(struct ib_cq *ibcq);
- void *priv;
+ int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
};
struct hns_roce_dev {
struct ib_device ib_dev;
struct platform_device *pdev;
+ struct pci_dev *pci_dev;
+ struct device *dev;
struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
@@ -569,9 +658,12 @@ struct hns_roce_dev {
int cmd_mod;
int loop_idc;
+ u32 sdb_offset;
+ u32 odb_offset;
dma_addr_t tptr_dma_addr; /*only for hw v1*/
u32 tptr_size; /*only for hw v1*/
- struct hns_roce_hw *hw;
+ const struct hns_roce_hw *hw;
+ void *priv;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -635,12 +727,14 @@ static inline struct hns_roce_qp
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
{
u32 bits_per_long_val = BITS_PER_LONG;
+ u32 page_size = 1 << buf->page_shift;
- if (bits_per_long_val == 64 || buf->nbufs == 1)
+ if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
+ buf->nbufs == 1)
return (char *)(buf->direct.buf) + offset;
else
- return (char *)(buf->page_list[offset >> PAGE_SHIFT].buf) +
- (offset & (PAGE_SIZE - 1));
+ return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
+ (offset & (page_size - 1));
}
int hns_roce_init_uar_table(struct hns_roce_dev *dev);
@@ -702,6 +796,9 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
@@ -711,7 +808,7 @@ unsigned long key_to_hw_index(u32 key);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
- struct hns_roce_buf *buf);
+ struct hns_roce_buf *buf, u32 page_shift);
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct ib_umem *umem);
@@ -723,6 +820,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
+void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
@@ -749,7 +847,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
-
-extern struct hns_roce_hw hns_roce_hw_v1;
+int hns_roce_init(struct hns_roce_dev *hr_dev);
+void hns_roce_exit(struct hns_roce_dev *hr_dev);
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index b0f43735de1a..d184431e2bf5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -558,7 +558,7 @@ static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
writel(eqshift_val, eqc);
/* Configure eq extended address 12~44bit */
- writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4);
+ writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
/*
* Configure eq extended address 45~49 bit.
@@ -572,13 +572,13 @@ static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
roce_set_field(eqcuridx_val,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
- writel(eqcuridx_val, (u8 *)eqc + 8);
+ writel(eqcuridx_val, eqc + 8);
/* Configure eq consumer index */
roce_set_field(eqconsindx_val,
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
- writel(eqconsindx_val, (u8 *)eqc + 0xc);
+ writel(eqconsindx_val, eqc + 0xc);
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index c5104e0b2916..8b733a66fae5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -36,14 +36,165 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
-#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
-#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
-
#define DMA_ADDR_T_SHIFT 12
#define BT_BA_SHIFT 32
-struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
- gfp_t gfp_mask)
+bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
+{
+ if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
+ (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
+ (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
+ (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
+ (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
+ (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
+
+static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
+ u32 bt_chunk_num)
+{
+ int i;
+
+ for (i = 0; i < bt_chunk_num; i++)
+ if (hem[start_idx + i])
+ return false;
+
+ return true;
+}
+
+static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
+{
+ int i;
+
+ for (i = 0; i < bt_chunk_num; i++)
+ if (bt[start_idx + i])
+ return false;
+
+ return true;
+}
+
+static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
+{
+ if (check_whether_bt_num_3(table_type, hop_num))
+ return 3;
+ else if (check_whether_bt_num_2(table_type, hop_num))
+ return 2;
+ else if (check_whether_bt_num_1(table_type, hop_num))
+ return 1;
+ else
+ return 0;
+}
+
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long *obj,
+ struct hns_roce_hem_mhop *mhop)
+{
+ struct device *dev = hr_dev->dev;
+ u32 chunk_ba_num;
+ u32 table_idx;
+ u32 bt_num;
+ u32 chunk_size;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
+ mhop->hop_num = hr_dev->caps.qpc_hop_num;
+ break;
+ case HEM_TYPE_MTPT:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
+ mhop->hop_num = hr_dev->caps.mpt_hop_num;
+ break;
+ case HEM_TYPE_CQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_hop_num;
+ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
+ mhop->hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ case HEM_TYPE_MTT:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = mhop->bt_chunk_size / 8;
+ mhop->hop_num = hr_dev->caps.mtt_hop_num;
+ break;
+ case HEM_TYPE_CQE:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
+ + PAGE_SHIFT);
+ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ + PAGE_SHIFT);
+ mhop->ba_l0_num = mhop->bt_chunk_size / 8;
+ mhop->hop_num = hr_dev->caps.cqe_hop_num;
+ break;
+ default:
+ dev_err(dev, "Table %d not support multi-hop addressing!\n",
+ table->type);
+ return -EINVAL;
+ }
+
+ if (!obj)
+ return 0;
+
+ /*
+ * QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
+ * MTT/CQE alloc hem for bt pages.
+ */
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+ chunk_ba_num = mhop->bt_chunk_size / 8;
+ chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
+ mhop->bt_chunk_size;
+ table_idx = (*obj & (table->num_obj - 1)) /
+ (chunk_size / table->obj_size);
+ switch (bt_num) {
+ case 3:
+ mhop->l2_idx = table_idx & (chunk_ba_num - 1);
+ mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
+ mhop->l0_idx = table_idx / chunk_ba_num / chunk_ba_num;
+ break;
+ case 2:
+ mhop->l1_idx = table_idx & (chunk_ba_num - 1);
+ mhop->l0_idx = table_idx / chunk_ba_num;
+ break;
+ case 1:
+ mhop->l0_idx = table_idx;
+ break;
+ default:
+ dev_err(dev, "Table %d not support hop_num = %d!\n",
+ table->type, mhop->hop_num);
+ return -EINVAL;
+ }
+ if (mhop->l0_idx >= mhop->ba_l0_num)
+ mhop->l0_idx %= mhop->ba_l0_num;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hns_roce_calc_hem_mhop);
+
+static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
+ int npages,
+ unsigned long hem_alloc_size,
+ gfp_t gfp_mask)
{
struct hns_roce_hem_chunk *chunk = NULL;
struct hns_roce_hem *hem;
@@ -61,7 +212,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
hem->refcount = 0;
INIT_LIST_HEAD(&hem->chunk_list);
- order = get_order(HNS_ROCE_HEM_ALLOC_SIZE);
+ order = get_order(hem_alloc_size);
while (npages > 0) {
if (!chunk) {
@@ -84,7 +235,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
* memory, directly return fail.
*/
mem = &chunk->mem[chunk->npages];
- buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
+ buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
if (!buf)
goto fail;
@@ -115,7 +266,7 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i)
- dma_free_coherent(&hr_dev->pdev->dev,
+ dma_free_coherent(hr_dev->dev,
chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
@@ -128,8 +279,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
- struct device *dev = &hr_dev->pdev->dev;
spinlock_t *lock = &hr_dev->bt_cmd_lock;
+ struct device *dev = hr_dev->dev;
unsigned long end = 0;
unsigned long flags;
struct hns_roce_hem_iter iter;
@@ -142,7 +293,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
/* Find the HEM(Hardware Entry Memory) entry */
unsigned long i = (obj & (table->num_obj - 1)) /
- (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
+ (table->table_chunk_size / table->obj_size);
switch (table->type) {
case HEM_TYPE_QPC:
@@ -209,14 +360,185 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret;
}
+static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_hem_mhop mhop;
+ struct hns_roce_hem_iter iter;
+ u32 buf_chunk_size;
+ u32 bt_chunk_size;
+ u32 chunk_ba_num;
+ u32 hop_num;
+ u32 size;
+ u32 bt_num;
+ u64 hem_idx;
+ u64 bt_l1_idx = 0;
+ u64 bt_l0_idx = 0;
+ u64 bt_ba;
+ unsigned long mhop_obj = obj;
+ int bt_l1_allocated = 0;
+ int bt_l0_allocated = 0;
+ int step_idx;
+ int ret;
+
+ ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ if (ret)
+ return ret;
+
+ buf_chunk_size = mhop.buf_chunk_size;
+ bt_chunk_size = mhop.bt_chunk_size;
+ hop_num = mhop.hop_num;
+ chunk_ba_num = bt_chunk_size / 8;
+
+ bt_num = hns_roce_get_bt_num(table->type, hop_num);
+ switch (bt_num) {
+ case 3:
+ hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+ mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
+ bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+ bt_l0_idx = mhop.l0_idx;
+ break;
+ case 2:
+ hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+ bt_l0_idx = mhop.l0_idx;
+ break;
+ case 1:
+ hem_idx = mhop.l0_idx;
+ break;
+ default:
+ dev_err(dev, "Table %d not support hop_num = %d!\n",
+ table->type, hop_num);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->mutex);
+
+ if (table->hem[hem_idx]) {
+ ++table->hem[hem_idx]->refcount;
+ goto out;
+ }
+
+ /* alloc L1 BA's chunk */
+ if ((check_whether_bt_num_3(table->type, hop_num) ||
+ check_whether_bt_num_2(table->type, hop_num)) &&
+ !table->bt_l0[bt_l0_idx]) {
+ table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
+ &(table->bt_l0_dma_addr[bt_l0_idx]),
+ GFP_KERNEL);
+ if (!table->bt_l0[bt_l0_idx]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ bt_l0_allocated = 1;
+
+ /* set base address to hardware */
+ if (table->type < HEM_TYPE_MTT) {
+ step_idx = 0;
+ if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
+ ret = -ENODEV;
+ dev_err(dev, "set HEM base address to HW failed!\n");
+ goto err_dma_alloc_l1;
+ }
+ }
+ }
+
+ /* alloc L2 BA's chunk */
+ if (check_whether_bt_num_3(table->type, hop_num) &&
+ !table->bt_l1[bt_l1_idx]) {
+ table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
+ &(table->bt_l1_dma_addr[bt_l1_idx]),
+ GFP_KERNEL);
+ if (!table->bt_l1[bt_l1_idx]) {
+ ret = -ENOMEM;
+ goto err_dma_alloc_l1;
+ }
+ bt_l1_allocated = 1;
+ *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
+ table->bt_l1_dma_addr[bt_l1_idx];
+
+ /* set base address to hardware */
+ step_idx = 1;
+ if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
+ ret = -ENODEV;
+ dev_err(dev, "set HEM base address to HW failed!\n");
+ goto err_alloc_hem_buf;
+ }
+ }
+
+ /*
+ * alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
+ * alloc bt space chunk for MTT/CQE.
+ */
+ size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
+ table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
+ size >> PAGE_SHIFT,
+ size,
+ (table->lowmem ? GFP_KERNEL :
+ GFP_HIGHUSER) | __GFP_NOWARN);
+ if (!table->hem[hem_idx]) {
+ ret = -ENOMEM;
+ goto err_alloc_hem_buf;
+ }
+
+ hns_roce_hem_first(table->hem[hem_idx], &iter);
+ bt_ba = hns_roce_hem_addr(&iter);
+
+ if (table->type < HEM_TYPE_MTT) {
+ if (hop_num == 2) {
+ *(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
+ step_idx = 2;
+ } else if (hop_num == 1) {
+ *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
+ step_idx = 1;
+ } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
+ step_idx = 0;
+ }
+
+ /* set HEM base address to hardware */
+ if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
+ ret = -ENODEV;
+ dev_err(dev, "set HEM base address to HW failed!\n");
+ goto err_alloc_hem_buf;
+ }
+ } else if (hop_num == 2) {
+ *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
+ }
+
+ ++table->hem[hem_idx]->refcount;
+ goto out;
+
+err_alloc_hem_buf:
+ if (bt_l1_allocated) {
+ dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
+ table->bt_l1_dma_addr[bt_l1_idx]);
+ table->bt_l1[bt_l1_idx] = NULL;
+ }
+
+err_dma_alloc_l1:
+ if (bt_l0_allocated) {
+ dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
+ table->bt_l0_dma_addr[bt_l0_idx]);
+ table->bt_l0[bt_l0_idx] = NULL;
+ }
+
+out:
+ mutex_unlock(&table->mutex);
+ return ret;
+}
+
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int ret = 0;
unsigned long i;
- i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
+ if (hns_roce_check_whether_mhop(hr_dev, table->type))
+ return hns_roce_table_mhop_get(hr_dev, table, obj);
+
+ i = (obj & (table->num_obj - 1)) / (table->table_chunk_size /
table->obj_size);
mutex_lock(&table->mutex);
@@ -227,7 +549,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
}
table->hem[i] = hns_roce_alloc_hem(hr_dev,
- HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+ table->table_chunk_size >> PAGE_SHIFT,
+ table->table_chunk_size,
(table->lowmem ? GFP_KERNEL :
GFP_HIGHUSER) | __GFP_NOWARN);
if (!table->hem[i]) {
@@ -237,6 +560,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
/* Set HEM base address(128K/page, pa) to Hardware */
if (hns_roce_set_hem(hr_dev, table, obj)) {
+ hns_roce_free_hem(hr_dev, table->hem[i]);
+ table->hem[i] = NULL;
ret = -ENODEV;
dev_err(dev, "set HEM base address to HW failed.\n");
goto out;
@@ -248,20 +573,139 @@ out:
return ret;
}
+static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj,
+ int check_refcount)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_hem_mhop mhop;
+ unsigned long mhop_obj = obj;
+ u32 bt_chunk_size;
+ u32 chunk_ba_num;
+ u32 hop_num;
+ u32 start_idx;
+ u32 bt_num;
+ u64 hem_idx;
+ u64 bt_l1_idx = 0;
+ int ret;
+
+ ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ if (ret)
+ return;
+
+ bt_chunk_size = mhop.bt_chunk_size;
+ hop_num = mhop.hop_num;
+ chunk_ba_num = bt_chunk_size / 8;
+
+ bt_num = hns_roce_get_bt_num(table->type, hop_num);
+ switch (bt_num) {
+ case 3:
+ hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+ mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
+ bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+ break;
+ case 2:
+ hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
+ break;
+ case 1:
+ hem_idx = mhop.l0_idx;
+ break;
+ default:
+ dev_err(dev, "Table %d not support hop_num = %d!\n",
+ table->type, hop_num);
+ return;
+ }
+
+ mutex_lock(&table->mutex);
+
+ if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
+ mutex_unlock(&table->mutex);
+ return;
+ }
+
+ if (table->type < HEM_TYPE_MTT && hop_num == 1) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+ } else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+ } else if (table->type < HEM_TYPE_MTT &&
+ hop_num == HNS_ROCE_HOP_NUM_0) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+ }
+
+ /*
+ * free buffer space chunk for QPC/MTPT/CQC/SRQC.
+ * free bt space chunk for MTT/CQE.
+ */
+ hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
+ table->hem[hem_idx] = NULL;
+
+ if (check_whether_bt_num_2(table->type, hop_num)) {
+ start_idx = mhop.l0_idx * chunk_ba_num;
+ if (hns_roce_check_hem_null(table->hem, start_idx,
+ chunk_ba_num)) {
+ if (table->type < HEM_TYPE_MTT &&
+ hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+
+ dma_free_coherent(dev, bt_chunk_size,
+ table->bt_l0[mhop.l0_idx],
+ table->bt_l0_dma_addr[mhop.l0_idx]);
+ table->bt_l0[mhop.l0_idx] = NULL;
+ }
+ } else if (check_whether_bt_num_3(table->type, hop_num)) {
+ start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+ mhop.l1_idx * chunk_ba_num;
+ if (hns_roce_check_hem_null(table->hem, start_idx,
+ chunk_ba_num)) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+
+ dma_free_coherent(dev, bt_chunk_size,
+ table->bt_l1[bt_l1_idx],
+ table->bt_l1_dma_addr[bt_l1_idx]);
+ table->bt_l1[bt_l1_idx] = NULL;
+
+ start_idx = mhop.l0_idx * chunk_ba_num;
+ if (hns_roce_check_bt_null(table->bt_l1, start_idx,
+ chunk_ba_num)) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj,
+ 0))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+
+ dma_free_coherent(dev, bt_chunk_size,
+ table->bt_l0[mhop.l0_idx],
+ table->bt_l0_dma_addr[mhop.l0_idx]);
+ table->bt_l0[mhop.l0_idx] = NULL;
+ }
+ }
+ }
+
+ mutex_unlock(&table->mutex);
+}
+
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
unsigned long i;
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_table_mhop_put(hr_dev, table, obj, 1);
+ return;
+ }
+
i = (obj & (table->num_obj - 1)) /
- (HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
+ (table->table_chunk_size / table->obj_size);
mutex_lock(&table->mutex);
if (--table->hem[i]->refcount == 0) {
/* Clear HEM base address */
- if (hr_dev->hw->clear_hem(hr_dev, table, obj))
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
dev_warn(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -271,23 +715,48 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
mutex_unlock(&table->mutex);
}
-void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
- dma_addr_t *dma_handle)
+void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+ unsigned long obj, dma_addr_t *dma_handle)
{
struct hns_roce_hem_chunk *chunk;
- unsigned long idx;
- int i;
- int offset, dma_offset;
+ struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
struct page *page = NULL;
+ unsigned long mhop_obj = obj;
+ unsigned long obj_per_chunk;
+ unsigned long idx_offset;
+ int offset, dma_offset;
+ int i, j;
+ u32 hem_idx = 0;
if (!table->lowmem)
return NULL;
mutex_lock(&table->mutex);
- idx = (obj & (table->num_obj - 1)) * table->obj_size;
- hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
- dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ obj_per_chunk = table->table_chunk_size / table->obj_size;
+ hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk];
+ idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
+ dma_offset = offset = idx_offset * table->obj_size;
+ } else {
+ hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ /* mtt mhop */
+ i = mhop.l0_idx;
+ j = mhop.l1_idx;
+ if (mhop.hop_num == 2)
+ hem_idx = i * (mhop.bt_chunk_size / 8) + j;
+ else if (mhop.hop_num == 1 ||
+ mhop.hop_num == HNS_ROCE_HOP_NUM_0)
+ hem_idx = i;
+
+ hem = table->hem[hem_idx];
+ dma_offset = offset = (obj & (table->num_obj - 1)) *
+ table->obj_size % mhop.bt_chunk_size;
+ if (mhop.hop_num == 2)
+ dma_offset = offset = 0;
+ }
if (!hem)
goto out;
@@ -314,14 +783,21 @@ out:
mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL;
}
+EXPORT_SYMBOL_GPL(hns_roce_table_find);
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long start, unsigned long end)
{
- unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
- unsigned long i = 0;
- int ret = 0;
+ struct hns_roce_hem_mhop mhop;
+ unsigned long inc = table->table_chunk_size / table->obj_size;
+ unsigned long i;
+ int ret;
+
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ inc = mhop.bt_chunk_size / table->obj_size;
+ }
/* Allocate MTT entry memory according to chunk(128K) */
for (i = start; i <= end; i += inc) {
@@ -344,10 +820,16 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long start, unsigned long end)
{
+ struct hns_roce_hem_mhop mhop;
+ unsigned long inc = table->table_chunk_size / table->obj_size;
unsigned long i;
- for (i = start; i <= end;
- i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ inc = mhop.bt_chunk_size / table->obj_size;
+ }
+
+ for (i = start; i <= end; i += inc)
hns_roce_table_put(hr_dev, table, i);
}
@@ -356,15 +838,120 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
unsigned long obj_size, unsigned long nobj,
int use_lowmem)
{
+ struct device *dev = hr_dev->dev;
unsigned long obj_per_chunk;
unsigned long num_hem;
- obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
- num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+ if (!hns_roce_check_whether_mhop(hr_dev, type)) {
+ table->table_chunk_size = hr_dev->caps.chunk_sz;
+ obj_per_chunk = table->table_chunk_size / obj_size;
+ num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
+ if (!table->hem)
+ return -ENOMEM;
+ } else {
+ unsigned long buf_chunk_size;
+ unsigned long bt_chunk_size;
+ unsigned long bt_chunk_num;
+ unsigned long num_bt_l0 = 0;
+ u32 hop_num;
+
+ switch (type) {
+ case HEM_TYPE_QPC:
+ buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.qpc_bt_num;
+ hop_num = hr_dev->caps.qpc_hop_num;
+ break;
+ case HEM_TYPE_MTPT:
+ buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.mpt_bt_num;
+ hop_num = hr_dev->caps.mpt_hop_num;
+ break;
+ case HEM_TYPE_CQC:
+ buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.cqc_bt_num;
+ hop_num = hr_dev->caps.cqc_hop_num;
+ break;
+ case HEM_TYPE_SRQC:
+ buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
+ + PAGE_SHIFT);
+ num_bt_l0 = hr_dev->caps.srqc_bt_num;
+ hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ case HEM_TYPE_MTT:
+ buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = buf_chunk_size;
+ hop_num = hr_dev->caps.mtt_hop_num;
+ break;
+ case HEM_TYPE_CQE:
+ buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
+ + PAGE_SHIFT);
+ bt_chunk_size = buf_chunk_size;
+ hop_num = hr_dev->caps.cqe_hop_num;
+ break;
+ default:
+ dev_err(dev,
+ "Table %d not support to init hem table here!\n",
+ type);
+ return -EINVAL;
+ }
+ obj_per_chunk = buf_chunk_size / obj_size;
+ num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+ bt_chunk_num = bt_chunk_size / 8;
+ if (table->type >= HEM_TYPE_MTT)
+ num_bt_l0 = bt_chunk_num;
+
+ table->hem = kcalloc(num_hem, sizeof(*table->hem),
+ GFP_KERNEL);
+ if (!table->hem)
+ goto err_kcalloc_hem_buf;
+
+ if (check_whether_bt_num_3(table->type, hop_num)) {
+ unsigned long num_bt_l1;
+
+ num_bt_l1 = (num_hem + bt_chunk_num - 1) /
+ bt_chunk_num;
+ table->bt_l1 = kcalloc(num_bt_l1,
+ sizeof(*table->bt_l1),
+ GFP_KERNEL);
+ if (!table->bt_l1)
+ goto err_kcalloc_bt_l1;
+
+ table->bt_l1_dma_addr = kcalloc(num_bt_l1,
+ sizeof(*table->bt_l1_dma_addr),
+ GFP_KERNEL);
+
+ if (!table->bt_l1_dma_addr)
+ goto err_kcalloc_l1_dma;
+ }
- table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
- if (!table->hem)
- return -ENOMEM;
+ if (check_whether_bt_num_2(table->type, hop_num) ||
+ check_whether_bt_num_3(table->type, hop_num)) {
+ table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
+ GFP_KERNEL);
+ if (!table->bt_l0)
+ goto err_kcalloc_bt_l0;
+
+ table->bt_l0_dma_addr = kcalloc(num_bt_l0,
+ sizeof(*table->bt_l0_dma_addr),
+ GFP_KERNEL);
+ if (!table->bt_l0_dma_addr)
+ goto err_kcalloc_l0_dma;
+ }
+ }
table->type = type;
table->num_hem = num_hem;
@@ -374,18 +961,72 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
mutex_init(&table->mutex);
return 0;
+
+err_kcalloc_l0_dma:
+ kfree(table->bt_l0);
+ table->bt_l0 = NULL;
+
+err_kcalloc_bt_l0:
+ kfree(table->bt_l1_dma_addr);
+ table->bt_l1_dma_addr = NULL;
+
+err_kcalloc_l1_dma:
+ kfree(table->bt_l1);
+ table->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(table->hem);
+ table->hem = NULL;
+
+err_kcalloc_hem_buf:
+ return -ENOMEM;
+}
+
+static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table)
+{
+ struct hns_roce_hem_mhop mhop;
+ u32 buf_chunk_size;
+ int i;
+ u64 obj;
+
+ hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
+ buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
+ mhop.bt_chunk_size;
+
+ for (i = 0; i < table->num_hem; ++i) {
+ obj = i * buf_chunk_size / table->obj_size;
+ if (table->hem[i])
+ hns_roce_table_mhop_put(hr_dev, table, obj, 0);
+ }
+
+ kfree(table->hem);
+ table->hem = NULL;
+ kfree(table->bt_l1);
+ table->bt_l1 = NULL;
+ kfree(table->bt_l1_dma_addr);
+ table->bt_l1_dma_addr = NULL;
+ kfree(table->bt_l0);
+ table->bt_l0 = NULL;
+ kfree(table->bt_l0_dma_addr);
+ table->bt_l0_dma_addr = NULL;
}
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
unsigned long i;
+ if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
+ hns_roce_cleanup_mhop_hem_table(hr_dev, table);
+ return;
+ }
+
for (i = 0; i < table->num_hem; ++i)
if (table->hem[i]) {
if (hr_dev->hw->clear_hem(hr_dev, table,
- i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
+ i * table->table_chunk_size / table->obj_size, 0))
dev_err(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -398,7 +1039,13 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_cqe_table);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 435748858252..db66db12075e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -47,13 +47,27 @@ enum {
/* UNMAP HEM */
HEM_TYPE_MTT,
+ HEM_TYPE_CQE,
HEM_TYPE_IRRL,
+ HEM_TYPE_TRRL,
};
#define HNS_ROCE_HEM_CHUNK_LEN \
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
(sizeof(struct scatterlist)))
+#define check_whether_bt_num_3(type, hop_num) \
+ (type < HEM_TYPE_MTT && hop_num == 2)
+
+#define check_whether_bt_num_2(type, hop_num) \
+ ((type < HEM_TYPE_MTT && hop_num == 1) || \
+ (type >= HEM_TYPE_MTT && hop_num == 2))
+
+#define check_whether_bt_num_1(type, hop_num) \
+ ((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
+ (type >= HEM_TYPE_MTT && hop_num == 1) || \
+ (type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
+
enum {
HNS_ROCE_HEM_PAGE_SHIFT = 12,
HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
@@ -77,12 +91,23 @@ struct hns_roce_hem_iter {
int page_idx;
};
+struct hns_roce_hem_mhop {
+ u32 hop_num;
+ u32 buf_chunk_size;
+ u32 bt_chunk_size;
+ u32 ba_l0_num;
+ u32 l0_idx;/* level 0 base address table index */
+ u32 l1_idx;/* level 1 base address table index */
+ u32 l2_idx;/* level 2 base address table index */
+};
+
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj);
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj);
-void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
+void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long obj,
dma_addr_t *dma_handle);
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
@@ -97,6 +122,10 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table);
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
+int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, unsigned long *obj,
+ struct hns_roce_hem_mhop *mhop);
+bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter)
@@ -105,7 +134,7 @@ static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
iter->chunk = list_empty(&hem->chunk_list) ? NULL :
list_entry(hem->chunk_list.next,
struct hns_roce_hem_chunk, list);
- iter->page_idx = 0;
+ iter->page_idx = 0;
}
static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 747efd1ae5a6..af27168faf0f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -34,6 +34,7 @@
#include <linux/acpi.h>
#include <linux/etherdevice.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <rdma/ib_umem.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
@@ -56,8 +57,8 @@ static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
rseg->len = 0;
}
-int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
+static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
@@ -316,8 +317,8 @@ out:
return ret;
}
-int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
{
int ret = 0;
int nreq = 0;
@@ -472,7 +473,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
dma_addr_t sdb_dma_addr;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
/* Configure extend SDB threshold */
@@ -511,7 +512,7 @@ static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
dma_addr_t odb_dma_addr;
u32 val;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
/* Configure extend ODB threshold */
@@ -547,7 +548,7 @@ static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
dma_addr_t odb_dma_addr;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
@@ -668,7 +669,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
u8 port = 0;
u8 sl;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
/* Reserved cq for loop qp */
@@ -816,7 +817,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
int ret;
int i;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
@@ -850,7 +851,7 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
u32 odb_evt_mod;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
memset(db, 0, sizeof(*db));
@@ -876,7 +877,7 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
return 0;
}
-void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
+static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
{
struct hns_roce_recreate_lp_qp_work *lp_qp_work;
struct hns_roce_dev *hr_dev;
@@ -906,11 +907,13 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
unsigned long end =
msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
GFP_KERNEL);
+ if (!lp_qp_work)
+ return -ENOMEM;
INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
@@ -982,7 +985,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
hr_dev = to_hr_dev(mr_work->ib_dev);
dev = &hr_dev->pdev->dev;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
mr_free_cq = free_mr->mr_free_cq;
@@ -1001,6 +1004,11 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
}
}
+ if (!ne) {
+ dev_err(dev, "Reserved loop qp is absent!\n");
+ goto free_work;
+ }
+
do {
ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
if (ret < 0) {
@@ -1025,7 +1033,8 @@ free_work:
kfree(mr_work);
}
-int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr)
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_mr_free_work *mr_work;
@@ -1038,7 +1047,7 @@ int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
int npages;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
if (mr->enabled) {
@@ -1103,7 +1112,7 @@ static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
struct hns_roce_db_table *db;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
db = &priv->db_table;
if (db->sdb_ext_mod) {
@@ -1133,7 +1142,7 @@ static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
struct hns_roce_raq_table *raq;
struct device *dev = &hr_dev->pdev->dev;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
raq = &priv->raq_table;
raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
@@ -1210,7 +1219,7 @@ static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
struct hns_roce_raq_table *raq;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
raq = &priv->raq_table;
dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
@@ -1244,7 +1253,7 @@ static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
int ret;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
@@ -1286,7 +1295,7 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
@@ -1304,7 +1313,7 @@ static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
struct hns_roce_buf_list *tptr_buf;
struct hns_roce_v1_priv *priv;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
tptr_buf = &priv->tptr_table.tptr_buf;
/*
@@ -1330,7 +1339,7 @@ static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
struct hns_roce_buf_list *tptr_buf;
struct hns_roce_v1_priv *priv;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
tptr_buf = &priv->tptr_table.tptr_buf;
dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
@@ -1344,7 +1353,7 @@ static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
int ret = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
@@ -1368,7 +1377,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
struct hns_roce_free_mr *free_mr;
struct hns_roce_v1_priv *priv;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
free_mr = &priv->free_mr;
flush_workqueue(free_mr->free_mr_wq);
@@ -1383,7 +1392,7 @@ static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
* @enable: true -- drop reset, false -- reset
* return 0 - success , negative --fail
*/
-int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
+static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
{
struct device_node *dsaf_node;
struct device *dev = &hr_dev->pdev->dev;
@@ -1432,7 +1441,7 @@ static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
struct hns_roce_des_qp *des_qp;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
des_qp = &priv->des_qp;
des_qp->requeue_flag = 1;
@@ -1450,7 +1459,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
struct hns_roce_v1_priv *priv;
struct hns_roce_des_qp *des_qp;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
des_qp = &priv->des_qp;
des_qp->requeue_flag = 0;
@@ -1458,7 +1467,7 @@ static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
destroy_workqueue(des_qp->qp_wq);
}
-void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
+static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
{
int i = 0;
struct hns_roce_caps *caps = &hr_dev->caps;
@@ -1474,7 +1483,9 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
+ caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
+ caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
@@ -1503,6 +1514,7 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
+ caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
for (i = 0; i < caps->num_ports; i++)
caps->pkey_table_len[i] = 1;
@@ -1524,9 +1536,11 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
ROCEE_ACK_DELAY_REG));
caps->max_mtu = IB_MTU_2048;
+
+ return 0;
}
-int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
+static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
{
int ret;
u32 val;
@@ -1605,7 +1619,7 @@ error_failed_raq_init:
return ret;
}
-void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
+static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
{
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_free_mr_free(hr_dev);
@@ -1616,8 +1630,82 @@ void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
hns_roce_db_free(hr_dev);
}
-void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
- union ib_gid *gid)
+static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
+{
+ u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
+
+ return (!!(status & (1 << HCR_GO_BIT)));
+}
+
+static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier, u8 op_modifier,
+ u16 op, u16 token, int event)
+{
+ u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
+ unsigned long end;
+ u32 val = 0;
+
+ end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
+ while (hns_roce_v1_cmd_pending(hr_dev)) {
+ if (time_after(jiffies, end)) {
+ dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
+ (int)jiffies, (int)end);
+ return -EAGAIN;
+ }
+ cond_resched();
+ }
+
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
+ op);
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
+ ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
+ roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
+ roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
+ roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
+ ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
+
+ __raw_writeq(cpu_to_le64(in_param), hcr + 0);
+ __raw_writeq(cpu_to_le64(out_param), hcr + 2);
+ __raw_writel(cpu_to_le32(in_modifier), hcr + 4);
+ /* Memory barrier */
+ wmb();
+
+ __raw_writel(cpu_to_le32(val), hcr + 5);
+
+ mmiowb();
+
+ return 0;
+}
+
+static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
+ unsigned long timeout)
+{
+ u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
+ unsigned long end = 0;
+ u32 status = 0;
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
+ cond_resched();
+
+ if (hns_roce_v1_cmd_pending(hr_dev)) {
+ dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
+ return -ETIMEDOUT;
+ }
+
+ status = le32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET));
+ if ((status & STATUS_MASK) != 0x1) {
+ dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+ int gid_index, union ib_gid *gid,
+ const struct ib_gid_attr *attr)
{
u32 *p = NULL;
u8 gid_idx = 0;
@@ -1639,9 +1727,12 @@ void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
p = (u32 *)&gid->raw[0xc];
roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
(HNS_ROCE_V1_GID_NUM * gid_idx));
+
+ return 0;
}
-void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
+static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
+ u8 *addr)
{
u32 reg_smac_l;
u16 reg_smac_h;
@@ -1654,8 +1745,13 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
* because of smac not equal to dmac.
* We Need to release and create reserved qp again.
*/
- if (hr_dev->hw->dereg_mr && hns_roce_v1_recreate_lp_qp(hr_dev))
- dev_warn(&hr_dev->pdev->dev, "recreate lp qp timeout!\n");
+ if (hr_dev->hw->dereg_mr) {
+ int ret;
+
+ ret = hns_roce_v1_recreate_lp_qp(hr_dev);
+ if (ret && ret != -ETIMEDOUT)
+ return ret;
+ }
p = (u32 *)(&addr[0]);
reg_smac_l = *p;
@@ -1670,10 +1766,12 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
val);
+
+ return 0;
}
-void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
- enum ib_mtu mtu)
+static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
+ enum ib_mtu mtu)
{
u32 val;
@@ -1685,8 +1783,8 @@ void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
val);
}
-int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx)
+static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+ unsigned long mtpt_idx)
{
struct hns_roce_v1_mpt_entry *mpt_entry;
struct scatterlist *sg;
@@ -1858,7 +1956,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
return get_sw_cqe(hr_cq, hr_cq->cons_index);
}
-void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
+static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
u32 doorbell[2];
@@ -1931,9 +2029,10 @@ static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
spin_unlock_irq(&hr_cq->lock);
}
-void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
- dma_addr_t dma_handle, int nent, u32 vector)
+static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf,
+ u64 *mtts, dma_addr_t dma_handle, int nent,
+ u32 vector)
{
struct hns_roce_cq_context *cq_context = NULL;
struct hns_roce_buf_list *tptr_buf;
@@ -1941,7 +2040,7 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
dma_addr_t tptr_dma_addr;
int offset;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
tptr_buf = &priv->tptr_table.tptr_buf;
cq_context = mb_buf;
@@ -2018,7 +2117,13 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
}
-int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags flags)
{
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag;
@@ -2279,8 +2384,9 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return ret;
}
-int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj)
+static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj,
+ int step_idx)
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
@@ -2289,7 +2395,7 @@ int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
void __iomem *bt_cmd;
u64 bt_ba = 0;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
switch (table->type) {
case HEM_TYPE_QPC:
@@ -2441,14 +2547,14 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int rq_pa_start;
u32 reg_val;
u64 *mtts;
- u32 *addr;
+ u32 __iomem *addr;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
/* Search QP buf's MTTs */
- mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle);
if (!mtts) {
dev_err(dev, "qp buf pa find failed\n");
@@ -2523,8 +2629,9 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
/* Copy context to QP1C register */
- addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context));
+ addr = (u32 __iomem *)(hr_dev->reg_base +
+ ROCEE_QP1C_CFG0_0_REG +
+ hr_qp->phy_port * sizeof(*context));
writel(context->qp1c_bytes_4, addr);
writel(context->sq_rq_bt_l, addr + 1);
@@ -2595,7 +2702,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return -ENOMEM;
/* Search qp buf's mtts */
- mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
hr_qp->mtt.first_seg, &dma_handle);
if (mtts == NULL) {
dev_err(dev, "qp buf pa find failed\n");
@@ -2603,8 +2710,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
}
/* Search IRRL's mtts */
- mtts_2 = hns_roce_table_find(&hr_dev->qp_table.irrl_table, hr_qp->qpn,
- &dma_handle_2);
+ mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+ hr_qp->qpn, &dma_handle_2);
if (mtts_2 == NULL) {
dev_err(dev, "qp irrl_table find failed\n");
goto out;
@@ -2800,10 +2907,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
ilog2((unsigned int)attr->max_dest_rd_atomic));
- roce_set_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
- attr->dest_qp_num);
+ if (attr_mask & IB_QP_DEST_QPN)
+ roce_set_field(context->qpc_bytes_36,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
+ QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
+ attr->dest_qp_num);
/* Configure GID index */
port_num = rdma_ah_get_port_num(&attr->ah_attr);
@@ -3143,7 +3251,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
if (ibqp->uobject) {
hr_qp->rq.db_reg_l = hr_dev->reg_base +
- ROCEE_DB_OTHERS_L_0_REG +
+ hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
}
@@ -3177,9 +3285,10 @@ out:
return ret;
}
-int hns_roce_v1_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
+static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
{
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
@@ -3270,6 +3379,7 @@ static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->path_mtu = IB_MTU_256;
qp_attr->path_mig_state = IB_MIG_ARMED;
qp_attr->qkey = QKEY_VAL;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
qp_attr->rq_psn = 0;
qp_attr->sq_psn = 0;
qp_attr->dest_qp_num = 1;
@@ -3351,6 +3461,7 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
QP_CONTEXT_QPC_BYTES_48_MTU_M,
QP_CONTEXT_QPC_BYTES_48_MTU_S);
qp_attr->path_mig_state = IB_MIG_ARMED;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
if (hr_qp->ibqp.qp_type == IB_QPT_UD)
qp_attr->qkey = QKEY_VAL;
@@ -3406,10 +3517,10 @@ static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
qp_attr->port_num = hr_qp->port + 1;
qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
+ qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
- qp_attr->max_dest_rd_atomic = roce_get_field(context->qpc_bytes_32,
+ qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
@@ -3444,8 +3555,9 @@ out:
return ret;
}
-int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
{
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -3454,6 +3566,53 @@ int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
}
+static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
+ u32 *old_send, u32 *old_retry,
+ u32 *tsp_st, u32 *success_flags)
+{
+ u32 sdb_retry_cnt;
+ u32 sdb_send_ptr;
+ u32 cur_cnt, old_cnt;
+ u32 send_ptr;
+
+ sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
+ sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
+ cur_cnt = roce_get_field(sdb_send_ptr,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(sdb_retry_cnt,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
+ old_cnt = roce_get_field(*old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(*old_retry,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+ *success_flags = 1;
+ } else {
+ old_cnt = roce_get_field(*old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
+ if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
+ *success_flags = 1;
+ } else {
+ send_ptr = roce_get_field(*old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+ roce_get_field(sdb_retry_cnt,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+ ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+ roce_set_field(*old_send,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+ ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
+ send_ptr);
+ }
+ }
+}
+
static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
u32 sdb_issue_ptr,
@@ -3461,12 +3620,10 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
u32 *wait_stage)
{
struct device *dev = &hr_dev->pdev->dev;
- u32 sdb_retry_cnt, old_retry;
u32 sdb_send_ptr, old_send;
u32 success_flags = 0;
- u32 cur_cnt, old_cnt;
unsigned long end;
- u32 send_ptr;
+ u32 old_retry;
u32 inv_cnt;
u32 tsp_st;
@@ -3524,47 +3681,9 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
- sdb_send_ptr = roce_read(hr_dev,
- ROCEE_SDB_SEND_PTR_REG);
- sdb_retry_cnt = roce_read(hr_dev,
- ROCEE_SDB_RETRY_CNT_REG);
- cur_cnt = roce_get_field(sdb_send_ptr,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(sdb_retry_cnt,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- if (!roce_get_bit(tsp_st,
- ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
- old_cnt = roce_get_field(old_send,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(old_retry,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
- success_flags = 1;
- } else {
- old_cnt = roce_get_field(old_send,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
- if (cur_cnt - old_cnt >
- SDB_ST_CMP_VAL) {
- success_flags = 1;
- } else {
- send_ptr =
- roce_get_field(old_send,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
- roce_get_field(sdb_retry_cnt,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
- ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
- roce_set_field(old_send,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
- ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
- send_ptr);
- }
- }
+ hns_roce_check_sdb_status(hr_dev, &old_send,
+ &old_retry, &tsp_st,
+ &success_flags);
} while (!success_flags);
}
@@ -3664,7 +3783,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
hr_dev = to_hr_dev(qp_work_entry->ib_dev);
dev = &hr_dev->pdev->dev;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
hr_qp = qp_work_entry->qp;
qpn = hr_qp->qpn;
@@ -3781,7 +3900,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
qp_work->sche_cnt = qp_work_entry.sche_cnt;
- priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
queue_work(priv->des_qp.qp_wq, &qp_work->work);
dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
}
@@ -3789,7 +3908,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
return 0;
}
-int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
+static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
@@ -3841,18 +3960,19 @@ int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
return ret;
}
-struct hns_roce_v1_priv hr_v1_priv;
-
-struct hns_roce_hw hns_roce_hw_v1 = {
+static const struct hns_roce_hw hns_roce_hw_v1 = {
.reset = hns_roce_v1_reset,
.hw_profile = hns_roce_v1_profile,
.hw_init = hns_roce_v1_init,
.hw_exit = hns_roce_v1_exit,
+ .post_mbox = hns_roce_v1_post_mbox,
+ .chk_mbox = hns_roce_v1_chk_mbox,
.set_gid = hns_roce_v1_set_gid,
.set_mac = hns_roce_v1_set_mac,
.set_mtu = hns_roce_v1_set_mtu,
.write_mtpt = hns_roce_v1_write_mtpt,
.write_cqc = hns_roce_v1_write_cqc,
+ .modify_cq = hns_roce_v1_modify_cq,
.clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp,
.query_qp = hns_roce_v1_query_qp,
@@ -3863,5 +3983,258 @@ struct hns_roce_hw hns_roce_hw_v1 = {
.poll_cq = hns_roce_v1_poll_cq,
.dereg_mr = hns_roce_v1_dereg_mr,
.destroy_cq = hns_roce_v1_destroy_cq,
- .priv = &hr_v1_priv,
};
+
+static const struct of_device_id hns_roce_of_match[] = {
+ { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hns_roce_of_match);
+
+static const struct acpi_device_id hns_roce_acpi_match[] = {
+ { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
+
+static int hns_roce_node_match(struct device *dev, void *fwnode)
+{
+ return dev->fwnode == fwnode;
+}
+
+static struct
+platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ /* get the 'device' corresponding to the matching 'fwnode' */
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, hns_roce_node_match);
+ /* get the platform device */
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct platform_device *pdev = NULL;
+ struct net_device *netdev = NULL;
+ struct device_node *net_node;
+ struct resource *res;
+ int port_cnt = 0;
+ u8 phy_port;
+ int ret;
+ int i;
+
+ /* check if we are compatible with the underlying SoC */
+ if (dev_of_node(dev)) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(hns_roce_of_match, dev->of_node);
+ if (!of_id) {
+ dev_err(dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+ hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
+ if (!hr_dev->hw) {
+ dev_err(dev, "couldn't get H/W specific DT data!\n");
+ return -ENXIO;
+ }
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
+ if (!acpi_id) {
+ dev_err(dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+ hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
+ if (!hr_dev->hw) {
+ dev_err(dev, "couldn't get H/W specific ACPI data!\n");
+ return -ENXIO;
+ }
+ } else {
+ dev_err(dev, "can't read compatibility data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ /* get the mapped register base address */
+ res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
+ hr_dev->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hr_dev->reg_base))
+ return PTR_ERR(hr_dev->reg_base);
+
+ /* read the node_guid of IB device from the DT or ACPI */
+ ret = device_property_read_u8_array(dev, "node-guid",
+ (u8 *)&hr_dev->ib_dev.node_guid,
+ GUID_LEN);
+ if (ret) {
+ dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
+ return ret;
+ }
+
+ /* get the RoCE associated ethernet ports or netdevices */
+ for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
+ if (dev_of_node(dev)) {
+ net_node = of_parse_phandle(dev->of_node, "eth-handle",
+ i);
+ if (!net_node)
+ continue;
+ pdev = of_find_device_by_node(net_node);
+ } else if (is_acpi_device_node(dev->fwnode)) {
+ struct acpi_reference_args args;
+ struct fwnode_handle *fwnode;
+
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "eth-handle",
+ i, &args);
+ if (ret)
+ continue;
+ fwnode = acpi_fwnode_handle(args.adev);
+ pdev = hns_roce_find_pdev(fwnode);
+ } else {
+ dev_err(dev, "cannot read data from DT or ACPI\n");
+ return -ENXIO;
+ }
+
+ if (pdev) {
+ netdev = platform_get_drvdata(pdev);
+ phy_port = (u8)i;
+ if (netdev) {
+ hr_dev->iboe.netdevs[port_cnt] = netdev;
+ hr_dev->iboe.phy_port[port_cnt] = phy_port;
+ } else {
+ dev_err(dev, "no netdev found with pdev %s\n",
+ pdev->name);
+ return -ENODEV;
+ }
+ port_cnt++;
+ }
+ }
+
+ if (port_cnt == 0) {
+ dev_err(dev, "unable to get eth-handle for available ports!\n");
+ return -EINVAL;
+ }
+
+ hr_dev->caps.num_ports = port_cnt;
+
+ /* cmd issue mode: 0 is poll, 1 is event */
+ hr_dev->cmd_mod = 1;
+ hr_dev->loop_idc = 0;
+ hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+ hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
+
+ /* read the interrupt names from the DT or ACPI */
+ ret = device_property_read_string_array(dev, "interrupt-names",
+ hr_dev->irq_names,
+ HNS_ROCE_MAX_IRQ_NUM);
+ if (ret < 0) {
+ dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
+ return ret;
+ }
+
+ /* fetch the interrupt numbers */
+ for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
+ hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
+ if (hr_dev->irq[i] <= 0) {
+ dev_err(dev, "platform get of irq[=%d] failed!\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * hns_roce_probe - RoCE driver entrance
+ * @pdev: pointer to platform device
+ * Return : int
+ *
+ */
+static int hns_roce_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct hns_roce_dev *hr_dev;
+ struct device *dev = &pdev->dev;
+
+ hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+ if (!hr_dev)
+ return -ENOMEM;
+
+ hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
+ if (!hr_dev->priv) {
+ ret = -ENOMEM;
+ goto error_failed_kzalloc;
+ }
+
+ hr_dev->pdev = pdev;
+ hr_dev->dev = dev;
+ platform_set_drvdata(pdev, hr_dev);
+
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
+ dev_err(dev, "Not usable DMA addressing mode\n");
+ ret = -EIO;
+ goto error_failed_get_cfg;
+ }
+
+ ret = hns_roce_get_cfg(hr_dev);
+ if (ret) {
+ dev_err(dev, "Get Configuration failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ ret = hns_roce_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "RoCE engine init failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ return 0;
+
+error_failed_get_cfg:
+ kfree(hr_dev->priv);
+
+error_failed_kzalloc:
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return ret;
+}
+
+/**
+ * hns_roce_remove - remove RoCE device
+ * @pdev: pointer to platform device
+ */
+static int hns_roce_remove(struct platform_device *pdev)
+{
+ struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
+
+ hns_roce_exit(hr_dev);
+ kfree(hr_dev->priv);
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return 0;
+}
+
+static struct platform_driver hns_roce_driver = {
+ .probe = hns_roce_probe,
+ .remove = hns_roce_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hns_roce_of_match,
+ .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
+ },
+};
+
+module_platform_driver(hns_roce_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index b213b5e6fef1..21a07ef0afc9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -72,6 +72,8 @@
#define HNS_ROCE_V1_CQE_ENTRY_SIZE 32
#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
+#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17)
+
#define HNS_ROCE_V1_EXT_RAQ_WF 8
#define HNS_ROCE_V1_RAQ_ENTRY 64
#define HNS_ROCE_V1_RAQ_DEPTH 32768
@@ -948,6 +950,11 @@ struct hns_roce_qp_context {
#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
+#define STATUS_MASK 0xff
+#define GO_BIT_TIMEOUT_MSECS 10000
+#define HCR_STATUS_OFFSET 0x18
+#define HCR_GO_BIT 15
+
struct hns_roce_rq_db {
u32 u32_4;
u32 u32_8;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
new file mode 100644
index 000000000000..8f719c00467b
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -0,0 +1,3296 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/acpi.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <rdma/ib_umem.h>
+
+#include "hnae3.h"
+#include "hns_roce_common.h"
+#include "hns_roce_device.h"
+#include "hns_roce_cmd.h"
+#include "hns_roce_hem.h"
+#include "hns_roce_hw_v2.h"
+
+static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
+ struct ib_sge *sg)
+{
+ dseg->lkey = cpu_to_le32(sg->lkey);
+ dseg->addr = cpu_to_le64(sg->addr);
+ dseg->len = cpu_to_le32(sg->length);
+}
+
+static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_v2_db sq_db;
+ unsigned int sge_ind = 0;
+ unsigned int wqe_sz = 0;
+ unsigned int owner_bit;
+ unsigned long flags;
+ unsigned int ind;
+ void *wqe = NULL;
+ int ret = 0;
+ int nreq;
+ int i;
+
+ if (unlikely(ibqp->qp_type != IB_QPT_RC)) {
+ dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
+ *bad_wr = NULL;
+ return -EOPNOTSUPP;
+ }
+
+ if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
+ dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+ ind = qp->sq_next_wqe;
+ sge_ind = qp->next_sge;
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->sq.max_gs)) {
+ dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, qp->sq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
+ qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
+ wr->wr_id;
+
+ owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
+ rc_sq_wqe = wqe;
+ memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+ for (i = 0; i < wr->num_sge; i++)
+ rc_sq_wqe->msg_len += wr->sg_list[i].length;
+
+ rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
+ owner_bit);
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_READ);
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
+ rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_SEND:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
+ break;
+ case IB_WR_SEND_WITH_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+ break;
+ case IB_WR_LOCAL_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+ break;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
+ break;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
+ break;
+ default:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_MASK);
+ break;
+ }
+
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ dseg = wqe;
+ if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+ if (rc_sq_wqe->msg_len >
+ hr_dev->caps.max_sq_inline) {
+ ret = -EINVAL;
+ *bad_wr = wr;
+ dev_err(dev, "inline len(1-%d)=%d, illegal",
+ rc_sq_wqe->msg_len,
+ hr_dev->caps.max_sq_inline);
+ goto out;
+ }
+
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(wqe, ((void *)wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ wqe += wr->sg_list[i].length;
+ wqe_sz += wr->sg_list[i].length;
+ }
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+ } else {
+ if (wr->num_sge <= 2) {
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_seg_v2(dseg + i,
+ wr->sg_list + i);
+ } else {
+ roce_set_field(rc_sq_wqe->byte_20,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ sge_ind & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < 2; i++)
+ set_data_seg_v2(dseg + i,
+ wr->sg_list + i);
+
+ dseg = get_send_extend_sge(qp,
+ sge_ind & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < wr->num_sge - 2; i++) {
+ set_data_seg_v2(dseg + i,
+ wr->sg_list + 2 + i);
+ sge_ind++;
+ }
+ }
+
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
+ wr->num_sge);
+ wqe_sz += wr->num_sge *
+ sizeof(struct hns_roce_v2_wqe_data_seg);
+ }
+ ind++;
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+ /* Memory barrier */
+ wmb();
+
+ sq_db.byte_4 = 0;
+ sq_db.parameter = 0;
+
+ roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
+ V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
+ roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
+ V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
+ V2_DB_PARAMETER_CONS_IDX_S,
+ qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
+ V2_DB_PARAMETER_SL_S, qp->sl);
+
+ hns_roce_write64_k((__be32 *)&sq_db, qp->sq.db_reg_l);
+
+ qp->sq_next_wqe = ind;
+ qp->next_sge = sge_ind;
+ }
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return ret;
+}
+
+static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_v2_db rq_db;
+ unsigned long flags;
+ void *wqe = NULL;
+ int ret = 0;
+ int nreq;
+ int ind;
+ int i;
+
+ spin_lock_irqsave(&hr_qp->rq.lock, flags);
+ ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
+
+ if (hr_qp->state == IB_QPS_RESET || hr_qp->state == IB_QPS_ERR) {
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
+ hr_qp->ibqp.recv_cq)) {
+ ret = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+ dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
+ wr->num_sge, hr_qp->rq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ wqe = get_recv_wqe(hr_qp, ind);
+ dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
+ for (i = 0; i < wr->num_sge; i++) {
+ if (!wr->sg_list[i].length)
+ continue;
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+
+ if (i < hr_qp->rq.max_gs) {
+ dseg[i].lkey = cpu_to_be32(HNS_ROCE_INVALID_LKEY);
+ dseg[i].addr = 0;
+ }
+
+ hr_qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ hr_qp->rq.head += nreq;
+ /* Memory barrier */
+ wmb();
+
+ rq_db.byte_4 = 0;
+ rq_db.parameter = 0;
+
+ roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_TAG_M,
+ V2_DB_BYTE_4_TAG_S, hr_qp->qpn);
+ roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_CMD_M,
+ V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_RQ_DB);
+ roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
+ V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head);
+
+ hns_roce_write64_k((__be32 *)&rq_db, hr_qp->rq.db_reg_l);
+ }
+ spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
+
+ return ret;
+}
+
+static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+ int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_cmq_ring *ring)
+{
+ dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
+ ring->desc_num * sizeof(struct hns_roce_cmq_desc),
+ DMA_BIDIRECTIONAL);
+ kfree(ring->desc);
+}
+
+static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
+ &priv->cmq.csq : &priv->cmq.crq;
+
+ ring->flag = ring_type;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ return hns_roce_alloc_cmq_desc(hr_dev, ring);
+}
+
+static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
+ &priv->cmq.csq : &priv->cmq.crq;
+ dma_addr_t dma = ring->desc_dma_addr;
+
+ if (ring_type == TYPE_CSQ) {
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
+ (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
+ HNS_ROCE_CMQ_ENABLE);
+ roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
+ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
+ } else {
+ roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
+ roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
+ (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
+ HNS_ROCE_CMQ_ENABLE);
+ roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
+ roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
+ }
+}
+
+static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ int ret;
+
+ /* Setup the queue entries for command queue */
+ priv->cmq.csq.desc_num = 1024;
+ priv->cmq.crq.desc_num = 1024;
+
+ /* Setup the lock for command queue */
+ spin_lock_init(&priv->cmq.csq.lock);
+ spin_lock_init(&priv->cmq.crq.lock);
+
+ /* Setup Tx write back timeout */
+ priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
+
+ /* Init CSQ */
+ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
+ if (ret) {
+ dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Init CRQ */
+ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
+ if (ret) {
+ dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
+ goto err_crq;
+ }
+
+ /* Init CSQ REG */
+ hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
+
+ /* Init CRQ REG */
+ hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
+
+ return 0;
+
+err_crq:
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
+
+ return ret;
+}
+
+static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
+}
+
+static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
+ enum hns_roce_opcode_type opcode,
+ bool is_read)
+{
+ memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag =
+ cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
+}
+
+static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+
+ return head == priv->cmq.csq.next_to_use;
+}
+
+static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ struct hns_roce_cmq_desc *desc;
+ u16 ntc = csq->next_to_clean;
+ u32 head;
+ int clean = 0;
+
+ desc = &csq->desc[ntc];
+ head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
+ while (head != ntc) {
+ memset(desc, 0, sizeof(*desc));
+ ntc++;
+ if (ntc == csq->desc_num)
+ ntc = 0;
+ desc = &csq->desc[ntc];
+ clean++;
+ }
+ csq->next_to_clean = ntc;
+
+ return clean;
+}
+
+static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+{
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ struct hns_roce_cmq_desc *desc_to_use;
+ bool complete = false;
+ u32 timeout = 0;
+ int handle = 0;
+ u16 desc_ret;
+ int ret = 0;
+ int ntc;
+
+ spin_lock_bh(&csq->lock);
+
+ if (num > hns_roce_cmq_space(csq)) {
+ spin_unlock_bh(&csq->lock);
+ return -EBUSY;
+ }
+
+ /*
+ * Record the location of desc in the cmq for this time
+ * which will be use for hardware to write back
+ */
+ ntc = csq->next_to_use;
+
+ while (handle < num) {
+ desc_to_use = &csq->desc[csq->next_to_use];
+ *desc_to_use = desc[handle];
+ dev_dbg(hr_dev->dev, "set cmq desc:\n");
+ csq->next_to_use++;
+ if (csq->next_to_use == csq->desc_num)
+ csq->next_to_use = 0;
+ handle++;
+ }
+
+ /* Write to hardware */
+ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
+
+ /*
+ * If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
+ do {
+ if (hns_roce_cmq_csq_done(hr_dev))
+ break;
+ udelay(1);
+ timeout++;
+ } while (timeout < priv->cmq.tx_timeout);
+ }
+
+ if (hns_roce_cmq_csq_done(hr_dev)) {
+ complete = true;
+ handle = 0;
+ while (handle < num) {
+ /* get the result of hardware write back */
+ desc_to_use = &csq->desc[ntc];
+ desc[handle] = *desc_to_use;
+ dev_dbg(hr_dev->dev, "Get cmq desc:\n");
+ desc_ret = desc[handle].retval;
+ if (desc_ret == CMD_EXEC_SUCCESS)
+ ret = 0;
+ else
+ ret = -EIO;
+ priv->cmq.last_status = desc_ret;
+ ntc++;
+ handle++;
+ if (ntc == csq->desc_num)
+ ntc = 0;
+ }
+ }
+
+ if (!complete)
+ ret = -EAGAIN;
+
+ /* clean the command send queue */
+ handle = hns_roce_cmq_csq_clean(hr_dev);
+ if (handle != num)
+ dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
+ handle, num);
+
+ spin_unlock_bh(&csq->lock);
+
+ return ret;
+}
+
+static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_query_version *resp;
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ resp = (struct hns_roce_query_version *)desc.data;
+ hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
+ hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
+
+ return 0;
+}
+
+static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cfg_global_param *req;
+ struct hns_roce_cmq_desc desc;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
+ false);
+
+ req = (struct hns_roce_cfg_global_param *)desc.data;
+ memset(req, 0, sizeof(*req));
+ roce_set_field(req->time_cfg_udp_port,
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
+ roce_set_field(req->time_cfg_udp_port,
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
+ CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_pf_res *res;
+ int ret;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i],
+ HNS_ROCE_OPC_QUERY_PF_RES, true);
+
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ }
+
+ ret = hns_roce_cmq_send(hr_dev, desc, 2);
+ if (ret)
+ return ret;
+
+ res = (struct hns_roce_pf_res *)desc[0].data;
+
+ hr_dev->caps.qpc_bt_num = roce_get_field(res->qpc_bt_idx_num,
+ PF_RES_DATA_1_PF_QPC_BT_NUM_M,
+ PF_RES_DATA_1_PF_QPC_BT_NUM_S);
+ hr_dev->caps.srqc_bt_num = roce_get_field(res->srqc_bt_idx_num,
+ PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
+ PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
+ hr_dev->caps.cqc_bt_num = roce_get_field(res->cqc_bt_idx_num,
+ PF_RES_DATA_3_PF_CQC_BT_NUM_M,
+ PF_RES_DATA_3_PF_CQC_BT_NUM_S);
+ hr_dev->caps.mpt_bt_num = roce_get_field(res->mpt_bt_idx_num,
+ PF_RES_DATA_4_PF_MPT_BT_NUM_M,
+ PF_RES_DATA_4_PF_MPT_BT_NUM_S);
+
+ return 0;
+}
+
+static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_cmq_desc desc[2];
+ struct hns_roce_vf_res_a *req_a;
+ struct hns_roce_vf_res_b *req_b;
+ int i;
+
+ req_a = (struct hns_roce_vf_res_a *)desc[0].data;
+ req_b = (struct hns_roce_vf_res_b *)desc[1].data;
+ memset(req_a, 0, sizeof(*req_a));
+ memset(req_b, 0, sizeof(*req_b));
+ for (i = 0; i < 2; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i],
+ HNS_ROCE_OPC_ALLOC_VF_RES, false);
+
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ if (i == 0) {
+ roce_set_field(req_a->vf_qpc_bt_idx_num,
+ VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
+ VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_qpc_bt_idx_num,
+ VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
+ VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
+ HNS_ROCE_VF_QPC_BT_NUM);
+
+ roce_set_field(req_a->vf_srqc_bt_idx_num,
+ VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
+ VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_srqc_bt_idx_num,
+ VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
+ VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
+ HNS_ROCE_VF_SRQC_BT_NUM);
+
+ roce_set_field(req_a->vf_cqc_bt_idx_num,
+ VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
+ VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_cqc_bt_idx_num,
+ VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
+ VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
+ HNS_ROCE_VF_CQC_BT_NUM);
+
+ roce_set_field(req_a->vf_mpt_bt_idx_num,
+ VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
+ VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
+ roce_set_field(req_a->vf_mpt_bt_idx_num,
+ VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
+ VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
+ HNS_ROCE_VF_MPT_BT_NUM);
+
+ roce_set_field(req_a->vf_eqc_bt_idx_num,
+ VF_RES_A_DATA_5_VF_EQC_IDX_M,
+ VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
+ roce_set_field(req_a->vf_eqc_bt_idx_num,
+ VF_RES_A_DATA_5_VF_EQC_NUM_M,
+ VF_RES_A_DATA_5_VF_EQC_NUM_S,
+ HNS_ROCE_VF_EQC_NUM);
+ } else {
+ roce_set_field(req_b->vf_smac_idx_num,
+ VF_RES_B_DATA_1_VF_SMAC_IDX_M,
+ VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
+ roce_set_field(req_b->vf_smac_idx_num,
+ VF_RES_B_DATA_1_VF_SMAC_NUM_M,
+ VF_RES_B_DATA_1_VF_SMAC_NUM_S,
+ HNS_ROCE_VF_SMAC_NUM);
+
+ roce_set_field(req_b->vf_sgid_idx_num,
+ VF_RES_B_DATA_2_VF_SGID_IDX_M,
+ VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
+ roce_set_field(req_b->vf_sgid_idx_num,
+ VF_RES_B_DATA_2_VF_SGID_NUM_M,
+ VF_RES_B_DATA_2_VF_SGID_NUM_S,
+ HNS_ROCE_VF_SGID_NUM);
+
+ roce_set_field(req_b->vf_qid_idx_sl_num,
+ VF_RES_B_DATA_3_VF_QID_IDX_M,
+ VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
+ roce_set_field(req_b->vf_qid_idx_sl_num,
+ VF_RES_B_DATA_3_VF_SL_NUM_M,
+ VF_RES_B_DATA_3_VF_SL_NUM_S,
+ HNS_ROCE_VF_SL_NUM);
+ }
+ }
+
+ return hns_roce_cmq_send(hr_dev, desc, 2);
+}
+
+static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
+{
+ u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
+ u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
+ u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
+ u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
+ struct hns_roce_cfg_bt_attr *req;
+ struct hns_roce_cmq_desc desc;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
+ req = (struct hns_roce_cfg_bt_attr *)desc.data;
+ memset(req, 0, sizeof(*req));
+
+ roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
+ CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
+ hr_dev->caps.qpc_ba_pg_sz);
+ roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
+ CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
+ hr_dev->caps.qpc_buf_pg_sz);
+ roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
+ CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
+ qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
+
+ roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
+ CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
+ hr_dev->caps.srqc_ba_pg_sz);
+ roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
+ CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
+ hr_dev->caps.srqc_buf_pg_sz);
+ roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
+ CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
+ srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
+
+ roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
+ CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
+ hr_dev->caps.cqc_ba_pg_sz);
+ roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
+ CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
+ hr_dev->caps.cqc_buf_pg_sz);
+ roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
+ CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
+ cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
+
+ roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
+ CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
+ hr_dev->caps.mpt_ba_pg_sz);
+ roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
+ CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
+ hr_dev->caps.mpt_buf_pg_sz);
+ roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
+ CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
+ mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
+
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+}
+
+static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_caps *caps = &hr_dev->caps;
+ int ret;
+
+ ret = hns_roce_cmq_query_hw_info(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hns_roce_config_global_param(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
+ ret);
+ }
+
+ /* Get pf resource owned by every pf */
+ ret = hns_roce_query_pf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hns_roce_alloc_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ hr_dev->vendor_part_id = 0;
+ hr_dev->sys_image_guid = 0;
+
+ caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
+ caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
+ caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
+ caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
+ caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
+ caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
+ caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
+ caps->num_uars = HNS_ROCE_V2_UAR_NUM;
+ caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
+ caps->num_aeq_vectors = 1;
+ caps->num_comp_vectors = 63;
+ caps->num_other_vectors = 0;
+ caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
+ caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
+ caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
+ caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
+ caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
+ caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
+ caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
+ caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
+ caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
+ caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
+ caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
+ caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
+ caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
+ caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
+ caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
+ caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
+ caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
+ caps->reserved_lkey = 0;
+ caps->reserved_pds = 0;
+ caps->reserved_mrws = 1;
+ caps->reserved_uars = 0;
+ caps->reserved_cqs = 0;
+
+ caps->qpc_ba_pg_sz = 0;
+ caps->qpc_buf_pg_sz = 0;
+ caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->srqc_ba_pg_sz = 0;
+ caps->srqc_buf_pg_sz = 0;
+ caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
+ caps->cqc_ba_pg_sz = 0;
+ caps->cqc_buf_pg_sz = 0;
+ caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->mpt_ba_pg_sz = 0;
+ caps->mpt_buf_pg_sz = 0;
+ caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->pbl_ba_pg_sz = 0;
+ caps->pbl_buf_pg_sz = 0;
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
+ caps->mtt_ba_pg_sz = 0;
+ caps->mtt_buf_pg_sz = 0;
+ caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
+ caps->cqe_ba_pg_sz = 0;
+ caps->cqe_buf_pg_sz = 0;
+ caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
+ caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
+
+ caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
+ caps->pkey_table_len[0] = 1;
+ caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ caps->local_ca_ack_delay = 0;
+ caps->max_mtu = IB_MTU_4096;
+
+ ret = hns_roce_v2_set_bt(hr_dev);
+ if (ret)
+ dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
+{
+ u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
+
+ return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
+}
+
+static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
+{
+ u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
+
+ return status & HNS_ROCE_HW_MB_STATUS_MASK;
+}
+
+static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
+ u64 out_param, u32 in_modifier, u8 op_modifier,
+ u16 op, u16 token, int event)
+{
+ struct device *dev = hr_dev->dev;
+ u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base +
+ ROCEE_VF_MB_CFG0_REG);
+ unsigned long end;
+ u32 val0 = 0;
+ u32 val1 = 0;
+
+ end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
+ while (hns_roce_v2_cmd_pending(hr_dev)) {
+ if (time_after(jiffies, end)) {
+ dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
+ (int)end);
+ return -EAGAIN;
+ }
+ cond_resched();
+ }
+
+ roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
+ HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
+ roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
+ HNS_ROCE_VF_MB4_CMD_SHIFT, op);
+ roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
+ HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
+ roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
+ HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
+
+ __raw_writeq(cpu_to_le64(in_param), hcr + 0);
+ __raw_writeq(cpu_to_le64(out_param), hcr + 2);
+
+ /* Memory barrier */
+ wmb();
+
+ __raw_writel(cpu_to_le32(val0), hcr + 4);
+ __raw_writel(cpu_to_le32(val1), hcr + 5);
+
+ mmiowb();
+
+ return 0;
+}
+
+static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
+ unsigned long timeout)
+{
+ struct device *dev = hr_dev->dev;
+ unsigned long end = 0;
+ u32 status;
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
+ cond_resched();
+
+ if (hns_roce_v2_cmd_pending(hr_dev)) {
+ dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
+ return -ETIMEDOUT;
+ }
+
+ status = hns_roce_v2_cmd_complete(hr_dev);
+ if (status != 0x1) {
+ dev_err(dev, "mailbox status 0x%x!\n", status);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+ int gid_index, union ib_gid *gid,
+ const struct ib_gid_attr *attr)
+{
+ enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
+ u32 *p;
+ u32 val;
+
+ if (!gid || !attr)
+ return -EINVAL;
+
+ if (attr->gid_type == IB_GID_TYPE_ROCE)
+ sgid_type = GID_TYPE_FLAG_ROCE_V1;
+
+ if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ if (ipv6_addr_v4mapped((void *)gid))
+ sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
+ else
+ sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
+ }
+
+ p = (u32 *)&gid->raw[0];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG0_REG +
+ 0x20 * gid_index);
+
+ p = (u32 *)&gid->raw[4];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG1_REG +
+ 0x20 * gid_index);
+
+ p = (u32 *)&gid->raw[8];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG2_REG +
+ 0x20 * gid_index);
+
+ p = (u32 *)&gid->raw[0xc];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_VF_SGID_CFG3_REG +
+ 0x20 * gid_index);
+
+ val = roce_read(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index);
+ roce_set_field(val, ROCEE_VF_SGID_CFG4_SGID_TYPE_M,
+ ROCEE_VF_SGID_CFG4_SGID_TYPE_S, sgid_type);
+
+ roce_write(hr_dev, ROCEE_VF_SGID_CFG4_REG + 0x20 * gid_index, val);
+
+ return 0;
+}
+
+static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
+ u8 *addr)
+{
+ u16 reg_smac_h;
+ u32 reg_smac_l;
+ u32 val;
+
+ reg_smac_l = *(u32 *)(&addr[0]);
+ roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_VF_SMAC_CFG0_REG +
+ 0x08 * phy_port);
+ val = roce_read(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port);
+
+ reg_smac_h = *(u16 *)(&addr[4]);
+ roce_set_field(val, ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M,
+ ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S, reg_smac_h);
+ roce_write(hr_dev, ROCEE_VF_SMAC_CFG1_REG + 0x08 * phy_port, val);
+
+ return 0;
+}
+
+static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+ unsigned long mtpt_idx)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+ struct scatterlist *sg;
+ u64 *pages;
+ int entry;
+ int i;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
+ HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, mr->pbl_ba_pg_sz);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mr->pd);
+ mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
+ (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
+ (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
+ (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
+ (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+ mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
+
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
+ mr->type == MR_TYPE_MR ? 0 : 1);
+ mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
+
+ mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
+ mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
+ mpt_entry->lkey = cpu_to_le32(mr->key);
+ mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
+ mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
+
+ if (mr->type == MR_TYPE_DMA)
+ return 0;
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
+ V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(mr->pbl_ba >> 3));
+ mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
+
+ pages = (u64 *)__get_free_page(GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
+ pages[i] = ((u64)sg_dma_address(sg)) >> 6;
+
+ /* Record the first 2 entry directly to MTPT table */
+ if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+ break;
+ i++;
+ }
+
+ mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
+ roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
+ V2_MPT_BYTE_56_PA0_H_S,
+ upper_32_bits(pages[0]));
+ mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
+
+ mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
+ roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
+ V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
+
+ free_page((unsigned long)pages);
+
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, mr->pbl_buf_pg_sz);
+ mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
+
+ return 0;
+}
+
+static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr, int flags,
+ u32 pdn, int mr_access_flags, u64 iova,
+ u64 size, void *mb_buf)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+
+ if (flags & IB_MR_REREG_PD) {
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, pdn);
+ mr->pd = pdn;
+ }
+
+ if (flags & IB_MR_REREG_ACCESS) {
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
+ V2_MPT_BYTE_8_BIND_EN_S,
+ (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
+ V2_MPT_BYTE_8_ATOMIC_EN_S,
+ (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
+ (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
+ (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
+ (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+ }
+
+ if (flags & IB_MR_REREG_TRANS) {
+ mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
+ mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
+ mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
+ mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+ mpt_entry->pbl_ba_l =
+ cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ roce_set_field(mpt_entry->byte_48_mode_ba,
+ V2_MPT_BYTE_48_PBL_BA_H_M,
+ V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(mr->pbl_ba >> 3));
+ mpt_entry->byte_48_mode_ba =
+ cpu_to_le32(mpt_entry->byte_48_mode_ba);
+
+ mr->iova = iova;
+ mr->size = size;
+ }
+
+ return 0;
+}
+
+static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
+{
+ return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
+ n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
+}
+
+static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
+{
+ struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
+
+ /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
+ return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
+ !!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
+}
+
+static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
+{
+ return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
+}
+
+static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
+{
+ struct hns_roce_v2_cq_db cq_db;
+
+ cq_db.byte_4 = 0;
+ cq_db.parameter = 0;
+
+ roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_TAG_M,
+ V2_CQ_DB_BYTE_4_TAG_S, hr_cq->cqn);
+ roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_CMD_M,
+ V2_CQ_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_PTR);
+
+ roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CONS_IDX_M,
+ V2_CQ_DB_PARAMETER_CONS_IDX_S,
+ cons_index & ((hr_cq->cq_depth << 1) - 1));
+ roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CMD_SN_M,
+ V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
+
+ hns_roce_write64_k((__be32 *)&cq_db, hr_cq->cq_db_l);
+
+}
+
+static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ struct hns_roce_v2_cqe *cqe, *dest;
+ u32 prod_index;
+ int nfreed = 0;
+ u8 owner_bit;
+
+ for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
+ ++prod_index) {
+ if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
+ break;
+ }
+
+ /*
+ * Now backwards through the CQ, removing CQ entries
+ * that match our QP by overwriting them with next entries.
+ */
+ while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
+ cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
+ if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
+ V2_CQE_BYTE_16_LCL_QPN_S) &
+ HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
+ /* In v1 engine, not support SRQ */
+ ++nfreed;
+ } else if (nfreed) {
+ dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
+ hr_cq->ib_cq.cqe);
+ owner_bit = roce_get_bit(dest->byte_4,
+ V2_CQE_BYTE_4_OWNER_S);
+ memcpy(dest, cqe, sizeof(*cqe));
+ roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
+ owner_bit);
+ }
+ }
+
+ if (nfreed) {
+ hr_cq->cons_index += nfreed;
+ /*
+ * Make sure update of buffer contents is done before
+ * updating consumer index.
+ */
+ wmb();
+ hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+ }
+}
+
+static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
+ struct hns_roce_srq *srq)
+{
+ spin_lock_irq(&hr_cq->lock);
+ __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
+ spin_unlock_irq(&hr_cq->lock);
+}
+
+static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf,
+ u64 *mtts, dma_addr_t dma_handle, int nent,
+ u32 vector)
+{
+ struct hns_roce_v2_cq_context *cq_context;
+
+ cq_context = mb_buf;
+ memset(cq_context, 0, sizeof(*cq_context));
+
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
+ V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
+ V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
+ V2_CQC_BYTE_4_CEQN_S, vector);
+ cq_context->byte_4_pg_ceqn = cpu_to_le32(cq_context->byte_4_pg_ceqn);
+
+ roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
+ V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
+
+ cq_context->cqe_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+ cq_context->cqe_cur_blk_addr =
+ cpu_to_le32(cq_context->cqe_cur_blk_addr);
+
+ roce_set_field(cq_context->byte_16_hop_addr,
+ V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
+ V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
+ cpu_to_le32((mtts[0]) >> (32 + PAGE_ADDR_SHIFT)));
+ roce_set_field(cq_context->byte_16_hop_addr,
+ V2_CQC_BYTE_16_CQE_HOP_NUM_M,
+ V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
+ HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
+
+ cq_context->cqe_nxt_blk_addr = (u32)(mtts[1] >> PAGE_ADDR_SHIFT);
+ roce_set_field(cq_context->byte_24_pgsz_addr,
+ V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
+ V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
+ cpu_to_le32((mtts[1]) >> (32 + PAGE_ADDR_SHIFT)));
+ roce_set_field(cq_context->byte_24_pgsz_addr,
+ V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
+ V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
+ hr_dev->caps.cqe_ba_pg_sz);
+ roce_set_field(cq_context->byte_24_pgsz_addr,
+ V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
+ V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
+ hr_dev->caps.cqe_buf_pg_sz);
+
+ cq_context->cqe_ba = (u32)(dma_handle >> 3);
+
+ roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
+ V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
+}
+
+static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags flags)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ u32 notification_flag;
+ u32 doorbell[2];
+
+ doorbell[0] = 0;
+ doorbell[1] = 0;
+
+ notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
+ /*
+ * flags = 0; Notification Flag = 1, next
+ * flags = 1; Notification Flag = 0, solocited
+ */
+ roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
+ hr_cq->cqn);
+ roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
+ HNS_ROCE_V2_CQ_DB_NTR);
+ roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
+ V2_CQ_DB_PARAMETER_CONS_IDX_S,
+ hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
+ roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
+ V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
+ roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
+ notification_flag);
+
+ hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
+
+ return 0;
+}
+
+static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
+ struct hns_roce_qp **cur_qp, struct ib_wc *wc)
+{
+ struct hns_roce_dev *hr_dev;
+ struct hns_roce_v2_cqe *cqe;
+ struct hns_roce_qp *hr_qp;
+ struct hns_roce_wq *wq;
+ int is_send;
+ u16 wqe_ctr;
+ u32 opcode;
+ u32 status;
+ int qpn;
+
+ /* Find cqe according to consumer index */
+ cqe = next_cqe_sw_v2(hr_cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ ++hr_cq->cons_index;
+ /* Memory barrier */
+ rmb();
+
+ /* 0->SQ, 1->RQ */
+ is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
+
+ qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
+ V2_CQE_BYTE_16_LCL_QPN_S);
+
+ if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
+ hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+ hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+ if (unlikely(!hr_qp)) {
+ dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
+ hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
+ return -EINVAL;
+ }
+ *cur_qp = hr_qp;
+ }
+
+ wc->qp = &(*cur_qp)->ibqp;
+ wc->vendor_err = 0;
+
+ status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
+ V2_CQE_BYTE_4_STATUS_S);
+ switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
+ case HNS_ROCE_CQE_V2_SUCCESS:
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
+ wc->status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_MW_BIND_ERR:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
+ wc->status = IB_WC_BAD_RESP_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
+ wc->status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
+ wc->status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
+ wc->status = IB_WC_REM_OP_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
+ wc->status = IB_WC_RETRY_EXC_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
+ wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
+ wc->status = IB_WC_REM_ABORT_ERR;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ /* CQE status error, directly return */
+ if (wc->status != IB_WC_SUCCESS)
+ return 0;
+
+ if (is_send) {
+ wc->wc_flags = 0;
+ /* SQ corresponding to CQE */
+ switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+ V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
+ case HNS_ROCE_SQ_OPCODE_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
+ wc->opcode = IB_WC_SEND;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case HNS_ROCE_SQ_OPCODE_RDMA_READ:
+ wc->opcode = IB_WC_RDMA_READ;
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+ break;
+ case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
+ wc->opcode = IB_WC_LOCAL_INV;
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ break;
+ case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
+ wc->opcode = IB_WC_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
+ wc->opcode = IB_WC_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
+ case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
+ wc->opcode = IB_WC_MASKED_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
+ wc->opcode = IB_WC_MASKED_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
+ case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
+ wc->opcode = IB_WC_REG_MR;
+ break;
+ case HNS_ROCE_SQ_OPCODE_BIND_MW:
+ wc->opcode = IB_WC_REG_MR;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ wq = &(*cur_qp)->sq;
+ if ((*cur_qp)->sq_signal_bits) {
+ /*
+ * If sg_signal_bit is 1,
+ * firstly tail pointer updated to wqe
+ * which current cqe correspond to
+ */
+ wqe_ctr = (u16)roce_get_field(cqe->byte_4,
+ V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S);
+ wq->tail += (wqe_ctr - (u16)wq->tail) &
+ (wq->wqe_cnt - 1);
+ }
+
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+ } else {
+ /* RQ correspond to CQE */
+ wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+
+ opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+ V2_CQE_BYTE_4_OPCODE_S);
+ switch (opcode & 0x1f) {
+ case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ break;
+ case HNS_ROCE_V2_OPCODE_SEND:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+ break;
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ break;
+ case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = cqe->rkey_immtdata;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ /* Update tail pointer, record wr_id */
+ wq = &(*cur_qp)->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+
+ wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
+ V2_CQE_BYTE_32_SL_S);
+ wc->src_qp = (u8)roce_get_field(cqe->byte_32,
+ V2_CQE_BYTE_32_RMT_QPN_M,
+ V2_CQE_BYTE_32_RMT_QPN_S);
+ wc->wc_flags |= (roce_get_bit(cqe->byte_32,
+ V2_CQE_BYTE_32_GRH_S) ?
+ IB_WC_GRH : 0);
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *wc)
+{
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ struct hns_roce_qp *cur_qp = NULL;
+ unsigned long flags;
+ int npolled;
+
+ spin_lock_irqsave(&hr_cq->lock, flags);
+
+ for (npolled = 0; npolled < num_entries; ++npolled) {
+ if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
+ break;
+ }
+
+ if (npolled) {
+ /* Memory barrier */
+ wmb();
+ hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+ }
+
+ spin_unlock_irqrestore(&hr_cq->lock, flags);
+
+ return npolled;
+}
+
+static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj,
+ int step_idx)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct hns_roce_hem_iter iter;
+ struct hns_roce_hem_mhop mhop;
+ struct hns_roce_hem *hem;
+ unsigned long mhop_obj = obj;
+ int i, j, k;
+ int ret = 0;
+ u64 hem_idx = 0;
+ u64 l1_idx = 0;
+ u64 bt_ba = 0;
+ u32 chunk_ba_num;
+ u32 hop_num;
+ u16 op = 0xff;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type))
+ return 0;
+
+ hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
+ i = mhop.l0_idx;
+ j = mhop.l1_idx;
+ k = mhop.l2_idx;
+ hop_num = mhop.hop_num;
+ chunk_ba_num = mhop.bt_chunk_size / 8;
+
+ if (hop_num == 2) {
+ hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
+ k;
+ l1_idx = i * chunk_ba_num + j;
+ } else if (hop_num == 1) {
+ hem_idx = i * chunk_ba_num + j;
+ } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
+ hem_idx = i;
+ }
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ op = HNS_ROCE_CMD_WRITE_QPC_BT0;
+ break;
+ case HEM_TYPE_MTPT:
+ op = HNS_ROCE_CMD_WRITE_MPT_BT0;
+ break;
+ case HEM_TYPE_CQC:
+ op = HNS_ROCE_CMD_WRITE_CQC_BT0;
+ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+ break;
+ default:
+ dev_warn(dev, "Table %d not to be written by mailbox!\n",
+ table->type);
+ return 0;
+ }
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (check_whether_last_step(hop_num, step_idx)) {
+ hem = table->hem[hem_idx];
+ for (hns_roce_hem_first(hem, &iter);
+ !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
+ bt_ba = hns_roce_hem_addr(&iter);
+
+ /* configure the ba, tag, and op */
+ ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
+ obj, 0, op,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ }
+ } else {
+ if (step_idx == 0)
+ bt_ba = table->bt_l0_dma_addr[i];
+ else if (step_idx == 1 && hop_num == 2)
+ bt_ba = table->bt_l1_dma_addr[l1_idx];
+
+ /* configure the ba, tag, and op */
+ ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
+ 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
+ }
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj,
+ int step_idx)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret = 0;
+ u16 op = 0xff;
+
+ if (!hns_roce_check_whether_mhop(hr_dev, table->type))
+ return 0;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
+ break;
+ case HEM_TYPE_MTPT:
+ op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
+ break;
+ case HEM_TYPE_CQC:
+ op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+ break;
+ default:
+ dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
+ table->type);
+ return 0;
+ }
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ /* configure the tag and op */
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtt *mtt,
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_qp *hr_qp)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, context, sizeof(*context) * 2);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
+ HNS_ROCE_CMD_MODIFY_QPC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static void modify_qp_reset_to_init(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+ V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+ V2_QPC_BYTE_4_TST_S, 0);
+
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
+
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, 0);
+
+ roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+ V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+ roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+ V2_QPC_BYTE_16_PD_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
+ V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
+ V2_QPC_BYTE_20_RQWS_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+
+ /* No VLAN need to set 0xFFF */
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
+ V2_QPC_BYTE_24_VLAN_IDX_S, 0xfff);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_IDX_M,
+ V2_QPC_BYTE_24_VLAN_IDX_S, 0);
+
+ /*
+ * Set some fields in context to zero, Because the default values
+ * of all fields in context are zero, we need not set them to 0 again.
+ * but we should set the relevant fields of context mask to 0.
+ */
+ roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
+ roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
+ roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
+ roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
+
+ roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
+ V2_QPC_BYTE_60_MAPID_S, 0);
+
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid,
+ V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
+
+ roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+ V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+ V2_QPC_BYTE_80_RX_CQN_S, 0);
+ if (ibqp->srq) {
+ roce_set_field(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
+ to_hr_srq(ibqp->srq)->srqn);
+ roce_set_field(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 0);
+ }
+
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
+
+ roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
+ V2_QPC_BYTE_92_SRQ_INFO_S, 0);
+
+ roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
+ V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_104_rq_sge,
+ V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
+ V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
+
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
+ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
+ V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
+
+ qpc_mask->rq_rnr_timer = 0;
+ qpc_mask->rx_msg_len = 0;
+ qpc_mask->rx_rkey_pkt_info = 0;
+ qpc_mask->rx_va = 0;
+
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
+ V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
+ V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
+
+ roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
+ V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
+ V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
+
+ roce_set_field(qpc_mask->byte_144_raq,
+ V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
+ V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
+ roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
+ 0);
+ roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
+ V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
+ roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
+
+ roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
+ V2_QPC_BYTE_148_RQ_MSN_S, 0);
+ roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
+ V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
+
+ roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+ roce_set_field(qpc_mask->byte_152_raq,
+ V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
+ V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
+
+ roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
+ V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
+
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
+ V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
+
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
+
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
+ V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
+
+ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
+ roce_set_field(qpc_mask->byte_172_sq_psn,
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
+ V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
+
+ roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
+ 0);
+
+ roce_set_field(qpc_mask->byte_176_msg_pktn,
+ V2_QPC_BYTE_176_MSG_USE_PKTN_M,
+ V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
+ roce_set_field(qpc_mask->byte_176_msg_pktn,
+ V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
+ V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
+
+ roce_set_field(qpc_mask->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
+ V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
+
+ qpc_mask->cur_sge_offset = 0;
+
+ roce_set_field(qpc_mask->byte_192_ext_sge,
+ V2_QPC_BYTE_192_CUR_SGE_IDX_M,
+ V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_192_ext_sge,
+ V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
+ V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
+
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+
+ roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
+ V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_200_sq_max,
+ V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
+ V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
+
+ roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
+ roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
+
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
+ V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+
+ qpc_mask->sq_timer = 0;
+
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+ roce_set_field(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
+ V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+
+ qpc_mask->irrl_cur_sge_offset = 0;
+
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
+ V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_RX_ACK_MSN_M,
+ V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
+ V2_QPC_BYTE_248_IRRL_PSN_S, 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
+ 0);
+ roce_set_field(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
+ 0);
+
+ hr_qp->access_flags = attr->qp_access_flags;
+ hr_qp->pkey_index = attr->pkey_index;
+ roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+ roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, 0);
+
+ roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
+ V2_QPC_BYTE_252_ERR_TYPE_S, 0);
+
+ roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
+ V2_QPC_BYTE_256_RQ_CQE_IDX_M,
+ V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
+ roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
+ V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
+ V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
+}
+
+static void modify_qp_init_to_init(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+ V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
+ V2_QPC_BYTE_4_TST_S, 0);
+
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(attr->qp_access_flags &
+ IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ 0);
+ } else {
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ 0);
+ }
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
+ ilog2((unsigned int)hr_qp->rq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
+
+ roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+ V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+ roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
+ V2_QPC_BYTE_16_PD_S, 0);
+
+ roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+ V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
+ V2_QPC_BYTE_80_RX_CQN_S, 0);
+
+ roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, 0);
+
+ if (ibqp->srq) {
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQ_EN_S, 0);
+ roce_set_field(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
+ to_hr_srq(ibqp->srq)->srqn);
+ roce_set_field(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ context->qkey_xrcd = attr->pkey_index;
+ else
+ context->qkey_xrcd = hr_qp->pkey_index;
+
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, 0);
+
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, 0);
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
+ ilog2((unsigned int)hr_qp->sq.wqe_cnt));
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
+ V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
+}
+
+static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = hr_dev->dev;
+ dma_addr_t dma_handle_3;
+ dma_addr_t dma_handle_2;
+ dma_addr_t dma_handle;
+ u32 page_size;
+ u8 port_num;
+ u64 *mtts_3;
+ u64 *mtts_2;
+ u64 *mtts;
+ u8 *dmac;
+ u8 *smac;
+ int port;
+
+ /* Search qp buf's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
+ hr_qp->mtt.first_seg, &dma_handle);
+ if (!mtts) {
+ dev_err(dev, "qp buf pa find failed\n");
+ return -EINVAL;
+ }
+
+ /* Search IRRL's mtts */
+ mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
+ hr_qp->qpn, &dma_handle_2);
+ if (!mtts_2) {
+ dev_err(dev, "qp irrl_table find failed\n");
+ return -EINVAL;
+ }
+
+ /* Search TRRL's mtts */
+ mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
+ hr_qp->qpn, &dma_handle_3);
+ if (!mtts_3) {
+ dev_err(dev, "qp trrl_table find failed\n");
+ return -EINVAL;
+ }
+
+ if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
+ (attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) {
+ dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
+ return -EINVAL;
+ }
+
+ dmac = (u8 *)attr->ah_attr.roce.dmac;
+ context->wqe_sge_ba = (u32)(dma_handle >> 3);
+ qpc_mask->wqe_sge_ba = 0;
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
+ V2_QPC_BYTE_12_WQE_SGE_BA_S, dma_handle >> (32 + 3));
+ roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
+ V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
+
+ roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
+ V2_QPC_BYTE_12_SQ_HOP_NUM_S,
+ hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
+ 0 : hr_dev->caps.mtt_hop_num);
+ roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
+ V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGE_HOP_NUM_M,
+ V2_QPC_BYTE_20_SGE_HOP_NUM_S,
+ hr_qp->sq.max_gs > 2 ? hr_dev->caps.mtt_hop_num : 0);
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGE_HOP_NUM_M,
+ V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
+
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_HOP_NUM_M,
+ V2_QPC_BYTE_20_RQ_HOP_NUM_S,
+ hr_dev->caps.mtt_hop_num == HNS_ROCE_HOP_NUM_0 ?
+ 0 : hr_dev->caps.mtt_hop_num);
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_RQ_HOP_NUM_M,
+ V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
+
+ roce_set_field(context->byte_16_buf_ba_pg_sz,
+ V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
+ hr_dev->caps.mtt_ba_pg_sz);
+ roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
+ V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
+
+ roce_set_field(context->byte_16_buf_ba_pg_sz,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
+ hr_dev->caps.mtt_buf_pg_sz);
+ roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
+
+ roce_set_field(context->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
+ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
+
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
+ >> PAGE_ADDR_SHIFT);
+ qpc_mask->rq_cur_blk_addr = 0;
+
+ roce_set_field(context->byte_92_srq_info,
+ V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
+ mtts[hr_qp->rq.offset / page_size]
+ >> (32 + PAGE_ADDR_SHIFT));
+ roce_set_field(qpc_mask->byte_92_srq_info,
+ V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
+
+ context->rq_nxt_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size + 1]
+ >> PAGE_ADDR_SHIFT);
+ qpc_mask->rq_nxt_blk_addr = 0;
+
+ roce_set_field(context->byte_104_rq_sge,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
+ mtts[hr_qp->rq.offset / page_size + 1]
+ >> (32 + PAGE_ADDR_SHIFT));
+ roce_set_field(qpc_mask->byte_104_rq_sge,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+
+ roce_set_field(context->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
+ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
+
+ roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+ V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+ V2_QPC_BYTE_132_TRRL_BA_S, 0);
+ context->trrl_ba = (u32)(dma_handle_3 >> (16 + 4));
+ qpc_mask->trrl_ba = 0;
+ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
+ V2_QPC_BYTE_140_TRRL_BA_S,
+ (u32)(dma_handle_3 >> (32 + 16 + 4)));
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
+ V2_QPC_BYTE_140_TRRL_BA_S, 0);
+
+ context->irrl_ba = (u32)(dma_handle_2 >> 6);
+ qpc_mask->irrl_ba = 0;
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
+ V2_QPC_BYTE_208_IRRL_BA_S,
+ dma_handle_2 >> (32 + 6));
+ roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
+ V2_QPC_BYTE_208_IRRL_BA_S, 0);
+
+ roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
+ roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
+
+ roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
+ hr_qp->sq_signal_bits);
+ roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
+ 0);
+
+ port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
+
+ smac = (u8 *)hr_dev->dev_addr[port];
+ /* when dmac equals smac or loop_idc is 1, it should loopback */
+ if (ether_addr_equal_unaligned(dmac, smac) ||
+ hr_dev->loop_idc == 0x1) {
+ roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
+ }
+
+ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S,
+ ilog2((unsigned int)attr->max_dest_rd_atomic));
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S, 0);
+
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+ roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, 0);
+
+ /* Configure GID index */
+ port_num = rdma_ah_get_port_num(&attr->ah_attr);
+ roce_set_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S,
+ hns_get_gid_index(hr_dev, port_num - 1,
+ grh->sgid_index));
+ roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S, 0);
+ memcpy(&(context->dmac), dmac, 4);
+ roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
+ V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
+ qpc_mask->dmac = 0;
+ roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
+ V2_QPC_BYTE_52_DMAC_S, 0);
+
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
+ V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
+ roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
+ V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
+
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, grh->flow_label);
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S, 0);
+
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, 0);
+
+ memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+ memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
+
+ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
+
+ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
+ V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
+ roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
+ V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
+ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
+ V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
+
+ context->rq_rnr_timer = 0;
+ qpc_mask->rq_rnr_timer = 0;
+
+ roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+ V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
+ roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
+ V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
+ V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
+
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_LP_SGEN_INI_M,
+ V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_LP_SGEN_INI_M,
+ V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
+
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S,
+ ilog2((unsigned int)attr->max_rd_atomic));
+ roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S, 0);
+
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, 0);
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+ return 0;
+}
+
+static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct device *dev = hr_dev->dev;
+ dma_addr_t dma_handle;
+ u32 page_size;
+ u64 *mtts;
+
+ /* Search qp buf's mtts */
+ mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
+ hr_qp->mtt.first_seg, &dma_handle);
+ if (!mtts) {
+ dev_err(dev, "qp buf pa find failed\n");
+ return -EINVAL;
+ }
+
+ /* If exist optional param, return error */
+ if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
+ (attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) ||
+ (attr_mask & IB_QP_CUR_STATE) ||
+ (attr_mask & IB_QP_MIN_RNR_TIMER)) {
+ dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
+ return -EINVAL;
+ }
+
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ roce_set_field(context->byte_60_qpst_mapid,
+ V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
+ V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
+ roce_set_field(qpc_mask->byte_60_qpst_mapid,
+ V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
+ V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
+
+ context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+ roce_set_field(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
+ mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ qpc_mask->sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
+
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ context->sq_cur_sge_blk_addr = hr_qp->sq.max_gs > 2 ?
+ ((u32)(mtts[hr_qp->sge.offset / page_size]
+ >> PAGE_ADDR_SHIFT)) : 0;
+ roce_set_field(context->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
+ hr_qp->sq.max_gs > 2 ?
+ (mtts[hr_qp->sge.offset / page_size] >>
+ (32 + PAGE_ADDR_SHIFT)) : 0);
+ qpc_mask->sq_cur_sge_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_184_irrl_idx,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
+ V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
+
+ context->rx_sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
+ roce_set_field(context->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
+ mtts[0] >> (32 + PAGE_ADDR_SHIFT));
+ qpc_mask->rx_sq_cur_blk_addr = 0;
+ roce_set_field(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
+ V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
+
+ /*
+ * Set some fields in context to zero, Because the default values
+ * of all fields in context are zero, we need not set them to 0 again.
+ * but we should set the relevant fields of context mask to 0.
+ */
+ roce_set_field(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
+ V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_RX_ACK_MSN_M,
+ V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+
+ roce_set_field(context->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+ V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
+ roce_set_bit(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
+ roce_set_field(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_IRRL_PSN_M,
+ V2_QPC_BYTE_248_IRRL_PSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_240_irrl_tail,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+
+ roce_set_field(context->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
+
+ roce_set_field(context->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
+ roce_set_field(qpc_mask->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
+
+ roce_set_field(context->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_224_retry_msg,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+
+ roce_set_bit(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
+
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
+ V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+
+ roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+ V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+ V2_QPC_BYTE_212_RETRY_CNT_S, 0);
+
+ roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+ V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
+
+ roce_set_field(context->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
+ roce_set_field(qpc_mask->byte_244_rnr_rxack,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+ V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
+
+ roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+ V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
+ roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+ V2_QPC_BYTE_244_RNR_CNT_S, 0);
+
+ roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0x100);
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0);
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+ V2_QPC_BYTE_28_AT_S, attr->timeout);
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+ V2_QPC_BYTE_28_AT_S, 0);
+ }
+
+ roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S,
+ rdma_ah_get_sl(&attr->ah_attr));
+ roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S, 0);
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
+
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+ roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+ V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+ V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+
+ return 0;
+}
+
+static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_qp_context *context;
+ struct hns_roce_v2_qp_context *qpc_mask;
+ struct device *dev = hr_dev->dev;
+ int ret = -EINVAL;
+
+ context = kzalloc(2 * sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ qpc_mask = context + 1;
+ /*
+ * In v2 engine, software pass context and context mask to hardware
+ * when modifying qp. If software need modify some fields in context,
+ * we should set all bits of the relevant fields in context mask to
+ * 0 at the same time, else set them to 0x1.
+ */
+ memset(qpc_mask, 0xff, sizeof(*qpc_mask));
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ modify_qp_init_to_init(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ if (ret)
+ goto out;
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+ ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
+ qpc_mask);
+ if (ret)
+ goto out;
+ } else if ((cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) ||
+ (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD) ||
+ (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD) ||
+ (cur_state == IB_QPS_SQD && new_state == IB_QPS_RTS) ||
+ (cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
+ (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
+ (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) {
+ /* Nothing */
+ ;
+ } else {
+ dev_err(dev, "Illegal state for QP!\n");
+ goto out;
+ }
+
+ /* Every status migrate must change state */
+ roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ V2_QPC_BYTE_60_QP_ST_S, new_state);
+ roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ V2_QPC_BYTE_60_QP_ST_S, 0);
+
+ /* SW pass context to HW */
+ ret = hns_roce_v2_qp_modify(hr_dev, &hr_qp->mtt, cur_state, new_state,
+ context, hr_qp);
+ if (ret) {
+ dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
+ goto out;
+ }
+
+ hr_qp->state = new_state;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ hr_qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT) {
+ hr_qp->port = attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
+
+ if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
+ ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
+ if (ibqp->send_cq != ibqp->recv_cq)
+ hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
+ hr_qp->qpn, NULL);
+
+ hr_qp->rq.head = 0;
+ hr_qp->rq.tail = 0;
+ hr_qp->sq.head = 0;
+ hr_qp->sq.tail = 0;
+ hr_qp->sq_next_wqe = 0;
+ hr_qp->next_sge = 0;
+ }
+
+out:
+ kfree(context);
+ return ret;
+}
+
+static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
+{
+ switch (state) {
+ case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
+ case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
+ case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
+ case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
+ case HNS_ROCE_QP_ST_SQ_DRAINING:
+ case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
+ case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
+ case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
+ default: return -1;
+ }
+}
+
+static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *hr_context)
+{
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
+ HNS_ROCE_CMD_QUERY_QPC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
+ goto out;
+ }
+
+ memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
+static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct hns_roce_v2_qp_context *context;
+ struct device *dev = hr_dev->dev;
+ int tmp_qp_state;
+ int state;
+ int ret;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ mutex_lock(&hr_qp->mutex);
+
+ if (hr_qp->state == IB_QPS_RESET) {
+ qp_attr->qp_state = IB_QPS_RESET;
+ ret = 0;
+ goto done;
+ }
+
+ ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, context);
+ if (ret) {
+ dev_err(dev, "query qpc error\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ state = roce_get_field(context->byte_60_qpst_mapid,
+ V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
+ tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
+ if (tmp_qp_state == -1) {
+ dev_err(dev, "Illegal ib_qp_state\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ hr_qp->state = (u8)tmp_qp_state;
+ qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
+ qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S);
+ qp_attr->path_mig_state = IB_MIG_ARMED;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD)
+ qp_attr->qkey = V2_QKEY_VAL;
+
+ qp_attr->rq_psn = roce_get_field(context->byte_108_rx_reqepsn,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+ V2_QPC_BYTE_108_RX_REQ_EPSN_S);
+ qp_attr->sq_psn = (u32)roce_get_field(context->byte_172_sq_psn,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_S);
+ qp_attr->dest_qp_num = (u8)roce_get_field(context->byte_56_dqpn_err,
+ V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S);
+ qp_attr->qp_access_flags = ((roce_get_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RRE_S)) << 2) |
+ ((roce_get_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RWE_S)) << 1) |
+ ((roce_get_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_ATE_S)) << 3);
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
+ hr_qp->ibqp.qp_type == IB_QPT_UC) {
+ struct ib_global_route *grh =
+ rdma_ah_retrieve_grh(&qp_attr->ah_attr);
+
+ rdma_ah_set_sl(&qp_attr->ah_attr,
+ roce_get_field(context->byte_28_at_fl,
+ V2_QPC_BYTE_28_SL_M,
+ V2_QPC_BYTE_28_SL_S));
+ grh->flow_label = roce_get_field(context->byte_28_at_fl,
+ V2_QPC_BYTE_28_FL_M,
+ V2_QPC_BYTE_28_FL_S);
+ grh->sgid_index = roce_get_field(context->byte_20_smac_sgid_idx,
+ V2_QPC_BYTE_20_SGID_IDX_M,
+ V2_QPC_BYTE_20_SGID_IDX_S);
+ grh->hop_limit = roce_get_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_HOP_LIMIT_M,
+ V2_QPC_BYTE_24_HOP_LIMIT_S);
+ grh->traffic_class = roce_get_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_TC_M,
+ V2_QPC_BYTE_24_TC_S);
+
+ memcpy(grh->dgid.raw, context->dgid, sizeof(grh->dgid.raw));
+ }
+
+ qp_attr->port_num = hr_qp->port + 1;
+ qp_attr->sq_draining = 0;
+ qp_attr->max_rd_atomic = 1 << roce_get_field(context->byte_208_irrl,
+ V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S);
+ qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->byte_140_raq,
+ V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S);
+ qp_attr->min_rnr_timer = (u8)roce_get_field(context->byte_80_rnr_rx_cqn,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+ V2_QPC_BYTE_80_MIN_RNR_TIME_S);
+ qp_attr->timeout = (u8)roce_get_field(context->byte_28_at_fl,
+ V2_QPC_BYTE_28_AT_M,
+ V2_QPC_BYTE_28_AT_S);
+ qp_attr->retry_cnt = roce_get_field(context->byte_212_lsn,
+ V2_QPC_BYTE_212_RETRY_CNT_M,
+ V2_QPC_BYTE_212_RETRY_CNT_S);
+ qp_attr->rnr_retry = context->rq_rnr_timer;
+
+done:
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+
+ if (!ibqp->uobject) {
+ qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+ } else {
+ qp_attr->cap.max_send_wr = 0;
+ qp_attr->cap.max_send_sge = 0;
+ }
+
+ qp_init_attr->cap = qp_attr->cap;
+
+out:
+ mutex_unlock(&hr_qp->mutex);
+ kfree(context);
+ return ret;
+}
+
+static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ int is_user)
+{
+ struct hns_roce_cq *send_cq, *recv_cq;
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
+ /* Modify qp to reset before destroying qp */
+ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
+ hr_qp->state, IB_QPS_RESET);
+ if (ret) {
+ dev_err(dev, "modify QP %06lx to ERR failed.\n",
+ hr_qp->qpn);
+ return ret;
+ }
+ }
+
+ send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
+ recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+
+ hns_roce_lock_cqs(send_cq, recv_cq);
+
+ if (!is_user) {
+ __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
+ to_hr_srq(hr_qp->ibqp.srq) : NULL);
+ if (send_cq != recv_cq)
+ __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
+ }
+
+ hns_roce_qp_remove(hr_dev, hr_qp);
+
+ hns_roce_unlock_cqs(send_cq, recv_cq);
+
+ hns_roce_qp_free(hr_dev, hr_qp);
+
+ /* Not special_QP, free their QPN */
+ if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
+ (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
+ (hr_qp->ibqp.qp_type == IB_QPT_UD))
+ hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+
+ hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
+
+ if (is_user) {
+ ib_umem_release(hr_qp->umem);
+ } else {
+ kfree(hr_qp->sq.wrid);
+ kfree(hr_qp->rq.wrid);
+ hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+ }
+
+ return 0;
+}
+
+static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ int ret;
+
+ ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+ if (ret) {
+ dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
+ return ret;
+ }
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ kfree(hr_to_hr_sqp(hr_qp));
+ else
+ kfree(hr_qp);
+
+ return 0;
+}
+
+static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
+ struct hns_roce_v2_cq_context *cq_context;
+ struct hns_roce_cq *hr_cq = to_hr_cq(cq);
+ struct hns_roce_v2_cq_context *cqc_mask;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ cq_context = mailbox->buf;
+ cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
+
+ memset(cqc_mask, 0xff, sizeof(*cqc_mask));
+
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
+ cq_count);
+ roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
+ 0);
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
+ cq_period);
+ roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
+ 0);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
+ HNS_ROCE_CMD_MODIFY_CQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret)
+ dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
+
+ return ret;
+}
+
+static const struct hns_roce_hw hns_roce_hw_v2 = {
+ .cmq_init = hns_roce_v2_cmq_init,
+ .cmq_exit = hns_roce_v2_cmq_exit,
+ .hw_profile = hns_roce_v2_profile,
+ .post_mbox = hns_roce_v2_post_mbox,
+ .chk_mbox = hns_roce_v2_chk_mbox,
+ .set_gid = hns_roce_v2_set_gid,
+ .set_mac = hns_roce_v2_set_mac,
+ .write_mtpt = hns_roce_v2_write_mtpt,
+ .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
+ .write_cqc = hns_roce_v2_write_cqc,
+ .set_hem = hns_roce_v2_set_hem,
+ .clear_hem = hns_roce_v2_clear_hem,
+ .modify_qp = hns_roce_v2_modify_qp,
+ .query_qp = hns_roce_v2_query_qp,
+ .destroy_qp = hns_roce_v2_destroy_qp,
+ .modify_cq = hns_roce_v2_modify_cq,
+ .post_send = hns_roce_v2_post_send,
+ .post_recv = hns_roce_v2_post_recv,
+ .req_notify_cq = hns_roce_v2_req_notify_cq,
+ .poll_cq = hns_roce_v2_poll_cq,
+};
+
+static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ /* required last entry */
+ {0, }
+};
+
+static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
+{
+ const struct pci_device_id *id;
+
+ id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+ if (!id) {
+ dev_err(hr_dev->dev, "device is not compatible!\n");
+ return -ENXIO;
+ }
+
+ hr_dev->hw = &hns_roce_hw_v2;
+ hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+ hr_dev->odb_offset = hr_dev->sdb_offset;
+
+ /* Get info from NIC driver. */
+ hr_dev->reg_base = handle->rinfo.roce_io_base;
+ hr_dev->caps.num_ports = 1;
+ hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
+ hr_dev->iboe.phy_port[0] = 0;
+
+ /* cmd issue mode: 0 is poll, 1 is event */
+ hr_dev->cmd_mod = 0;
+ hr_dev->loop_idc = 0;
+
+ return 0;
+}
+
+static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+{
+ struct hns_roce_dev *hr_dev;
+ int ret;
+
+ hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
+ if (!hr_dev)
+ return -ENOMEM;
+
+ hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
+ if (!hr_dev->priv) {
+ ret = -ENOMEM;
+ goto error_failed_kzalloc;
+ }
+
+ hr_dev->pci_dev = handle->pdev;
+ hr_dev->dev = &handle->pdev->dev;
+ handle->priv = hr_dev;
+
+ ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
+ if (ret) {
+ dev_err(hr_dev->dev, "Get Configuration failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ ret = hns_roce_init(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
+ goto error_failed_get_cfg;
+ }
+
+ return 0;
+
+error_failed_get_cfg:
+ kfree(hr_dev->priv);
+
+error_failed_kzalloc:
+ ib_dealloc_device(&hr_dev->ib_dev);
+
+ return ret;
+}
+
+static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ bool reset)
+{
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+
+ hns_roce_exit(hr_dev);
+ kfree(hr_dev->priv);
+ ib_dealloc_device(&hr_dev->ib_dev);
+}
+
+static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
+ .init_instance = hns_roce_hw_v2_init_instance,
+ .uninit_instance = hns_roce_hw_v2_uninit_instance,
+};
+
+static struct hnae3_client hns_roce_hw_v2_client = {
+ .name = "hns_roce_hw_v2",
+ .type = HNAE3_CLIENT_ROCE,
+ .ops = &hns_roce_hw_v2_ops,
+};
+
+static int __init hns_roce_hw_v2_init(void)
+{
+ return hnae3_register_client(&hns_roce_hw_v2_client);
+}
+
+static void __exit hns_roce_hw_v2_exit(void)
+{
+ hnae3_unregister_client(&hns_roce_hw_v2_client);
+}
+
+module_init(hns_roce_hw_v2_init);
+module_exit(hns_roce_hw_v2_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
+MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
+MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
+MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
new file mode 100644
index 000000000000..04b7a51b8efb
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -0,0 +1,1177 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _HNS_ROCE_HW_V2_H
+#define _HNS_ROCE_HW_V2_H
+
+#include <linux/bitops.h>
+
+#define HNS_ROCE_VF_QPC_BT_NUM 256
+#define HNS_ROCE_VF_SRQC_BT_NUM 64
+#define HNS_ROCE_VF_CQC_BT_NUM 64
+#define HNS_ROCE_VF_MPT_BT_NUM 64
+#define HNS_ROCE_VF_EQC_NUM 64
+#define HNS_ROCE_VF_SMAC_NUM 32
+#define HNS_ROCE_VF_SGID_NUM 32
+#define HNS_ROCE_VF_SL_NUM 8
+
+#define HNS_ROCE_V2_MAX_QP_NUM 0x2000
+#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
+#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
+#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
+#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
+#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
+#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
+#define HNS_ROCE_V2_UAR_NUM 256
+#define HNS_ROCE_V2_PHY_UAR_NUM 1
+#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
+#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
+#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
+#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
+#define HNS_ROCE_V2_MAX_QP_INIT_RDMA 128
+#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
+#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
+#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
+#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
+#define HNS_ROCE_V2_QPC_ENTRY_SZ 256
+#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
+#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48
+#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
+#define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
+#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+#define HNS_ROCE_INVALID_LKEY 0x100
+#define HNS_ROCE_CMQ_TX_TIMEOUT 200
+
+#define HNS_ROCE_CONTEXT_HOP_NUM 1
+#define HNS_ROCE_MTT_HOP_NUM 1
+#define HNS_ROCE_CQE_HOP_NUM 1
+#define HNS_ROCE_PBL_HOP_NUM 2
+#define HNS_ROCE_V2_GID_INDEX_NUM 256
+
+#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
+
+#define HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT 0
+#define HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT 1
+#define HNS_ROCE_CMD_FLAG_NEXT_SHIFT 2
+#define HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT 3
+#define HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT 4
+#define HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT 5
+
+#define HNS_ROCE_CMD_FLAG_IN BIT(HNS_ROCE_CMD_FLAG_IN_VALID_SHIFT)
+#define HNS_ROCE_CMD_FLAG_OUT BIT(HNS_ROCE_CMD_FLAG_OUT_VALID_SHIFT)
+#define HNS_ROCE_CMD_FLAG_NEXT BIT(HNS_ROCE_CMD_FLAG_NEXT_SHIFT)
+#define HNS_ROCE_CMD_FLAG_WR BIT(HNS_ROCE_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HNS_ROCE_CMD_FLAG_NO_INTR BIT(HNS_ROCE_CMD_FLAG_NO_INTR_SHIFT)
+#define HNS_ROCE_CMD_FLAG_ERR_INTR BIT(HNS_ROCE_CMD_FLAG_ERR_INTR_SHIFT)
+
+#define HNS_ROCE_CMQ_DESC_NUM_S 3
+#define HNS_ROCE_CMQ_EN_B 16
+#define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
+
+#define check_whether_last_step(hop_num, step_idx) \
+ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
+ (step_idx == 1 && hop_num == 1) || \
+ (step_idx == 2 && hop_num == 2))
+
+#define V2_CQ_DB_REQ_NOT_SOL 0
+#define V2_CQ_DB_REQ_NOT 1
+
+#define V2_CQ_STATE_VALID 1
+#define V2_QKEY_VAL 0x80010000
+
+#define GID_LEN_V2 16
+
+#define HNS_ROCE_V2_CQE_QPN_MASK 0x3ffff
+
+enum {
+ HNS_ROCE_V2_WQE_OP_SEND = 0x0,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_INV = 0x1,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM = 0x2,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE = 0x3,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM = 0x4,
+ HNS_ROCE_V2_WQE_OP_RDMA_READ = 0x5,
+ HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP = 0x6,
+ HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD = 0x7,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP = 0x8,
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD = 0x9,
+ HNS_ROCE_V2_WQE_OP_FAST_REG_PMR = 0xa,
+ HNS_ROCE_V2_WQE_OP_LOCAL_INV = 0xb,
+ HNS_ROCE_V2_WQE_OP_BIND_MW_TYPE = 0xc,
+ HNS_ROCE_V2_WQE_OP_MASK = 0x1f,
+};
+
+enum {
+ HNS_ROCE_SQ_OPCODE_SEND = 0x0,
+ HNS_ROCE_SQ_OPCODE_SEND_WITH_INV = 0x1,
+ HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM = 0x2,
+ HNS_ROCE_SQ_OPCODE_RDMA_WRITE = 0x3,
+ HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM = 0x4,
+ HNS_ROCE_SQ_OPCODE_RDMA_READ = 0x5,
+ HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP = 0x6,
+ HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD = 0x7,
+ HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP = 0x8,
+ HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD = 0x9,
+ HNS_ROCE_SQ_OPCODE_FAST_REG_WR = 0xa,
+ HNS_ROCE_SQ_OPCODE_LOCAL_INV = 0xb,
+ HNS_ROCE_SQ_OPCODE_BIND_MW = 0xc,
+};
+
+enum {
+ /* rq operations */
+ HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM = 0x0,
+ HNS_ROCE_V2_OPCODE_SEND = 0x1,
+ HNS_ROCE_V2_OPCODE_SEND_WITH_IMM = 0x2,
+ HNS_ROCE_V2_OPCODE_SEND_WITH_INV = 0x3,
+};
+
+enum {
+ HNS_ROCE_V2_SQ_DB = 0x0,
+ HNS_ROCE_V2_RQ_DB = 0x1,
+ HNS_ROCE_V2_SRQ_DB = 0x2,
+ HNS_ROCE_V2_CQ_DB_PTR = 0x3,
+ HNS_ROCE_V2_CQ_DB_NTR = 0x4,
+};
+
+enum {
+ HNS_ROCE_CQE_V2_SUCCESS = 0x00,
+ HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR = 0x01,
+ HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR = 0x02,
+ HNS_ROCE_CQE_V2_LOCAL_PROT_ERR = 0x04,
+ HNS_ROCE_CQE_V2_WR_FLUSH_ERR = 0x05,
+ HNS_ROCE_CQE_V2_MW_BIND_ERR = 0x06,
+ HNS_ROCE_CQE_V2_BAD_RESP_ERR = 0x10,
+ HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR = 0x11,
+ HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR = 0x12,
+ HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR = 0x13,
+ HNS_ROCE_CQE_V2_REMOTE_OP_ERR = 0x14,
+ HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR = 0x15,
+ HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR = 0x16,
+ HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR = 0x22,
+
+ HNS_ROCE_V2_CQE_STATUS_MASK = 0xff,
+};
+
+/* CMQ command */
+enum hns_roce_opcode_type {
+ HNS_ROCE_OPC_QUERY_HW_VER = 0x8000,
+ HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001,
+ HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
+ HNS_ROCE_OPC_QUERY_PF_RES = 0x8400,
+ HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
+ HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
+};
+
+enum {
+ TYPE_CRQ,
+ TYPE_CSQ,
+};
+
+enum hns_roce_cmd_return_status {
+ CMD_EXEC_SUCCESS = 0,
+ CMD_NO_AUTH = 1,
+ CMD_NOT_EXEC = 2,
+ CMD_QUEUE_FULL = 3,
+};
+
+enum hns_roce_sgid_type {
+ GID_TYPE_FLAG_ROCE_V1 = 0,
+ GID_TYPE_FLAG_ROCE_V2_IPV4,
+ GID_TYPE_FLAG_ROCE_V2_IPV6,
+};
+
+struct hns_roce_v2_cq_context {
+ u32 byte_4_pg_ceqn;
+ u32 byte_8_cqn;
+ u32 cqe_cur_blk_addr;
+ u32 byte_16_hop_addr;
+ u32 cqe_nxt_blk_addr;
+ u32 byte_24_pgsz_addr;
+ u32 byte_28_cq_pi;
+ u32 byte_32_cq_ci;
+ u32 cqe_ba;
+ u32 byte_40_cqe_ba;
+ u32 byte_44_db_record;
+ u32 db_record_addr;
+ u32 byte_52_cqe_cnt;
+ u32 byte_56_cqe_period_maxcnt;
+ u32 cqe_report_timer;
+ u32 byte_64_se_cqe_idx;
+};
+#define V2_CQC_BYTE_4_CQ_ST_S 0
+#define V2_CQC_BYTE_4_CQ_ST_M GENMASK(1, 0)
+
+#define V2_CQC_BYTE_4_POLL_S 2
+
+#define V2_CQC_BYTE_4_SE_S 3
+
+#define V2_CQC_BYTE_4_OVER_IGNORE_S 4
+
+#define V2_CQC_BYTE_4_COALESCE_S 5
+
+#define V2_CQC_BYTE_4_ARM_ST_S 6
+#define V2_CQC_BYTE_4_ARM_ST_M GENMASK(7, 6)
+
+#define V2_CQC_BYTE_4_SHIFT_S 8
+#define V2_CQC_BYTE_4_SHIFT_M GENMASK(12, 8)
+
+#define V2_CQC_BYTE_4_CMD_SN_S 13
+#define V2_CQC_BYTE_4_CMD_SN_M GENMASK(14, 13)
+
+#define V2_CQC_BYTE_4_CEQN_S 15
+#define V2_CQC_BYTE_4_CEQN_M GENMASK(23, 15)
+
+#define V2_CQC_BYTE_4_PAGE_OFFSET_S 24
+#define V2_CQC_BYTE_4_PAGE_OFFSET_M GENMASK(31, 24)
+
+#define V2_CQC_BYTE_8_CQN_S 0
+#define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0)
+
+#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0
+#define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_CQC_BYTE_16_CQE_HOP_NUM_S 30
+#define V2_CQC_BYTE_16_CQE_HOP_NUM_M GENMASK(31, 30)
+
+#define V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S 0
+#define V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_CQC_BYTE_24_CQE_BA_PG_SZ_S 24
+#define V2_CQC_BYTE_24_CQE_BA_PG_SZ_M GENMASK(27, 24)
+
+#define V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S 28
+#define V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M GENMASK(31, 28)
+
+#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S 0
+#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M GENMASK(23, 0)
+
+#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S 0
+#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M GENMASK(23, 0)
+
+#define V2_CQC_BYTE_40_CQE_BA_S 0
+#define V2_CQC_BYTE_40_CQE_BA_M GENMASK(28, 0)
+
+#define V2_CQC_BYTE_44_DB_RECORD_EN_S 0
+
+#define V2_CQC_BYTE_52_CQE_CNT_S 0
+#define V2_CQC_BYTE_52_CQE_CNT_M GENMASK(23, 0)
+
+#define V2_CQC_BYTE_56_CQ_MAX_CNT_S 0
+#define V2_CQC_BYTE_56_CQ_MAX_CNT_M GENMASK(15, 0)
+
+#define V2_CQC_BYTE_56_CQ_PERIOD_S 16
+#define V2_CQC_BYTE_56_CQ_PERIOD_M GENMASK(31, 16)
+
+#define V2_CQC_BYTE_64_SE_CQE_IDX_S 0
+#define V2_CQC_BYTE_64_SE_CQE_IDX_M GENMASK(23, 0)
+
+enum{
+ V2_MPT_ST_VALID = 0x1,
+};
+
+enum hns_roce_v2_qp_state {
+ HNS_ROCE_QP_ST_RST,
+ HNS_ROCE_QP_ST_INIT,
+ HNS_ROCE_QP_ST_RTR,
+ HNS_ROCE_QP_ST_RTS,
+ HNS_ROCE_QP_ST_SQER,
+ HNS_ROCE_QP_ST_SQD,
+ HNS_ROCE_QP_ST_ERR,
+ HNS_ROCE_QP_ST_SQ_DRAINING,
+ HNS_ROCE_QP_NUM_ST
+};
+
+struct hns_roce_v2_qp_context {
+ u32 byte_4_sqpn_tst;
+ u32 wqe_sge_ba;
+ u32 byte_12_sq_hop;
+ u32 byte_16_buf_ba_pg_sz;
+ u32 byte_20_smac_sgid_idx;
+ u32 byte_24_mtu_tc;
+ u32 byte_28_at_fl;
+ u8 dgid[GID_LEN_V2];
+ u32 dmac;
+ u32 byte_52_udpspn_dmac;
+ u32 byte_56_dqpn_err;
+ u32 byte_60_qpst_mapid;
+ u32 qkey_xrcd;
+ u32 byte_68_rq_db;
+ u32 rq_db_record_addr;
+ u32 byte_76_srqn_op_en;
+ u32 byte_80_rnr_rx_cqn;
+ u32 byte_84_rq_ci_pi;
+ u32 rq_cur_blk_addr;
+ u32 byte_92_srq_info;
+ u32 byte_96_rx_reqmsn;
+ u32 rq_nxt_blk_addr;
+ u32 byte_104_rq_sge;
+ u32 byte_108_rx_reqepsn;
+ u32 rq_rnr_timer;
+ u32 rx_msg_len;
+ u32 rx_rkey_pkt_info;
+ u64 rx_va;
+ u32 byte_132_trrl;
+ u32 trrl_ba;
+ u32 byte_140_raq;
+ u32 byte_144_raq;
+ u32 byte_148_raq;
+ u32 byte_152_raq;
+ u32 byte_156_raq;
+ u32 byte_160_sq_ci_pi;
+ u32 sq_cur_blk_addr;
+ u32 byte_168_irrl_idx;
+ u32 byte_172_sq_psn;
+ u32 byte_176_msg_pktn;
+ u32 sq_cur_sge_blk_addr;
+ u32 byte_184_irrl_idx;
+ u32 cur_sge_offset;
+ u32 byte_192_ext_sge;
+ u32 byte_196_sq_psn;
+ u32 byte_200_sq_max;
+ u32 irrl_ba;
+ u32 byte_208_irrl;
+ u32 byte_212_lsn;
+ u32 sq_timer;
+ u32 byte_220_retry_psn_msn;
+ u32 byte_224_retry_msg;
+ u32 rx_sq_cur_blk_addr;
+ u32 byte_232_irrl_sge;
+ u32 irrl_cur_sge_offset;
+ u32 byte_240_irrl_tail;
+ u32 byte_244_rnr_rxack;
+ u32 byte_248_ack_psn;
+ u32 byte_252_err_txcqn;
+ u32 byte_256_sqflush_rqcqe;
+};
+
+#define V2_QPC_BYTE_4_TST_S 0
+#define V2_QPC_BYTE_4_TST_M GENMASK(2, 0)
+
+#define V2_QPC_BYTE_4_SGE_SHIFT_S 3
+#define V2_QPC_BYTE_4_SGE_SHIFT_M GENMASK(7, 3)
+
+#define V2_QPC_BYTE_4_SQPN_S 8
+#define V2_QPC_BYTE_4_SQPN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_12_WQE_SGE_BA_S 0
+#define V2_QPC_BYTE_12_WQE_SGE_BA_M GENMASK(28, 0)
+
+#define V2_QPC_BYTE_12_SQ_HOP_NUM_S 29
+#define V2_QPC_BYTE_12_SQ_HOP_NUM_M GENMASK(30, 29)
+
+#define V2_QPC_BYTE_12_RSVD_LKEY_EN_S 31
+
+#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S 0
+#define V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M GENMASK(3, 0)
+
+#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S 4
+#define V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define V2_QPC_BYTE_16_PD_S 8
+#define V2_QPC_BYTE_16_PD_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_20_RQ_HOP_NUM_S 0
+#define V2_QPC_BYTE_20_RQ_HOP_NUM_M GENMASK(1, 0)
+
+#define V2_QPC_BYTE_20_SGE_HOP_NUM_S 2
+#define V2_QPC_BYTE_20_SGE_HOP_NUM_M GENMASK(3, 2)
+
+#define V2_QPC_BYTE_20_RQWS_S 4
+#define V2_QPC_BYTE_20_RQWS_M GENMASK(7, 4)
+
+#define V2_QPC_BYTE_20_SQ_SHIFT_S 8
+#define V2_QPC_BYTE_20_SQ_SHIFT_M GENMASK(11, 8)
+
+#define V2_QPC_BYTE_20_RQ_SHIFT_S 12
+#define V2_QPC_BYTE_20_RQ_SHIFT_M GENMASK(15, 12)
+
+#define V2_QPC_BYTE_20_SGID_IDX_S 16
+#define V2_QPC_BYTE_20_SGID_IDX_M GENMASK(23, 16)
+
+#define V2_QPC_BYTE_20_SMAC_IDX_S 24
+#define V2_QPC_BYTE_20_SMAC_IDX_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_24_HOP_LIMIT_S 0
+#define V2_QPC_BYTE_24_HOP_LIMIT_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_24_TC_S 8
+#define V2_QPC_BYTE_24_TC_M GENMASK(15, 8)
+
+#define V2_QPC_BYTE_24_VLAN_IDX_S 16
+#define V2_QPC_BYTE_24_VLAN_IDX_M GENMASK(27, 16)
+
+#define V2_QPC_BYTE_24_MTU_S 28
+#define V2_QPC_BYTE_24_MTU_M GENMASK(31, 28)
+
+#define V2_QPC_BYTE_28_FL_S 0
+#define V2_QPC_BYTE_28_FL_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_28_SL_S 20
+#define V2_QPC_BYTE_28_SL_M GENMASK(23, 20)
+
+#define V2_QPC_BYTE_28_CNP_TX_FLAG_S 24
+
+#define V2_QPC_BYTE_28_CE_FLAG_S 25
+
+#define V2_QPC_BYTE_28_LBI_S 26
+
+#define V2_QPC_BYTE_28_AT_S 27
+#define V2_QPC_BYTE_28_AT_M GENMASK(31, 27)
+
+#define V2_QPC_BYTE_52_DMAC_S 0
+#define V2_QPC_BYTE_52_DMAC_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_52_UDPSPN_S 16
+#define V2_QPC_BYTE_52_UDPSPN_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_56_DQPN_S 0
+#define V2_QPC_BYTE_56_DQPN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_56_SQ_TX_ERR_S 24
+#define V2_QPC_BYTE_56_SQ_RX_ERR_S 25
+#define V2_QPC_BYTE_56_RQ_TX_ERR_S 26
+#define V2_QPC_BYTE_56_RQ_RX_ERR_S 27
+
+#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
+#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
+
+#define V2_QPC_BYTE_60_MAPID_S 0
+#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0)
+
+#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13
+
+#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14
+
+#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15
+
+#define V2_QPC_BYTE_60_TEMPID_S 16
+#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16)
+
+#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23
+
+#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24
+#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24)
+
+#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27
+
+#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28
+
+#define V2_QPC_BYTE_60_QP_ST_S 29
+#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
+
+#define V2_QPC_BYTE_68_RQ_RECORD_EN_S 0
+
+#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S 1
+#define V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M GENMASK(31, 1)
+
+#define V2_QPC_BYTE_76_SRQN_S 0
+#define V2_QPC_BYTE_76_SRQN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_76_SRQ_EN_S 24
+
+#define V2_QPC_BYTE_76_RRE_S 25
+
+#define V2_QPC_BYTE_76_RWE_S 26
+
+#define V2_QPC_BYTE_76_ATE_S 27
+
+#define V2_QPC_BYTE_76_RQIE_S 28
+
+#define V2_QPC_BYTE_80_RX_CQN_S 0
+#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_80_MIN_RNR_TIME_S 27
+#define V2_QPC_BYTE_80_MIN_RNR_TIME_M GENMASK(31, 27)
+
+#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S 0
+#define V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S 16
+#define V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S 0
+#define V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_92_SRQ_INFO_S 20
+#define V2_QPC_BYTE_92_SRQ_INFO_M GENMASK(31, 20)
+
+#define V2_QPC_BYTE_96_RX_REQ_MSN_S 0
+#define V2_QPC_BYTE_96_RX_REQ_MSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S 0
+#define V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S 24
+#define V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_108_INV_CREDIT_S 0
+
+#define V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S 3
+
+#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S 4
+#define V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M GENMASK(6, 4)
+
+#define V2_QPC_BYTE_108_RX_REQ_RNR_S 7
+
+#define V2_QPC_BYTE_108_RX_REQ_EPSN_S 8
+#define V2_QPC_BYTE_108_RX_REQ_EPSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_S 0
+#define V2_QPC_BYTE_132_TRRL_HEAD_MAX_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_S 8
+#define V2_QPC_BYTE_132_TRRL_TAIL_MAX_M GENMASK(15, 8)
+
+#define V2_QPC_BYTE_132_TRRL_BA_S 16
+#define V2_QPC_BYTE_132_TRRL_BA_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_140_TRRL_BA_S 0
+#define V2_QPC_BYTE_140_TRRL_BA_M GENMASK(11, 0)
+
+#define V2_QPC_BYTE_140_RR_MAX_S 12
+#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
+
+#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15
+
+#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
+#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
+
+#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S 24
+#define V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
+#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24
+
+#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
+#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
+
+#define V2_QPC_BYTE_144_RESP_RTY_FLG_S 31
+
+#define V2_QPC_BYTE_148_RQ_MSN_S 0
+#define V2_QPC_BYTE_148_RQ_MSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
+#define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_152_RAQ_PSN_S 8
+#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
+#define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_156_RAQ_USE_PKTN_S 0
+#define V2_QPC_BYTE_156_RAQ_USE_PKTN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S 0
+#define V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S 16
+#define V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S 0
+#define V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S 20
+
+#define V2_QPC_BYTE_168_SQ_INVLD_FLG_S 21
+
+#define V2_QPC_BYTE_168_LP_SGEN_INI_S 22
+#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22)
+
+#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24
+#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24)
+
+#define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
+#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
+
+#define V2_QPC_BYTE_172_ACK_REQ_FREQ_S 0
+#define V2_QPC_BYTE_172_ACK_REQ_FREQ_M GENMASK(5, 0)
+
+#define V2_QPC_BYTE_172_MSG_RNR_FLG_S 6
+
+#define V2_QPC_BYTE_172_FRE_S 7
+
+#define V2_QPC_BYTE_172_SQ_CUR_PSN_S 8
+#define V2_QPC_BYTE_172_SQ_CUR_PSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_176_MSG_USE_PKTN_S 0
+#define V2_QPC_BYTE_176_MSG_USE_PKTN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_S 24
+#define V2_QPC_BYTE_176_IRRL_HEAD_PRE_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S 0
+#define V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_184_IRRL_IDX_MSB_S 20
+#define V2_QPC_BYTE_184_IRRL_IDX_MSB_M GENMASK(31, 20)
+
+#define V2_QPC_BYTE_192_CUR_SGE_IDX_S 0
+#define V2_QPC_BYTE_192_CUR_SGE_IDX_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S 24
+#define V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M GENMASK(31, 24)
+
+#define V2_QPC_BYTE_196_IRRL_HEAD_S 0
+#define V2_QPC_BYTE_196_IRRL_HEAD_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_196_SQ_MAX_PSN_S 8
+#define V2_QPC_BYTE_196_SQ_MAX_PSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_200_SQ_MAX_IDX_S 0
+#define V2_QPC_BYTE_200_SQ_MAX_IDX_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_S 16
+#define V2_QPC_BYTE_200_LCL_OPERATED_CNT_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_208_IRRL_BA_S 0
+#define V2_QPC_BYTE_208_IRRL_BA_M GENMASK(25, 0)
+
+#define V2_QPC_BYTE_208_PKT_RNR_FLG_S 26
+
+#define V2_QPC_BYTE_208_PKT_RTY_FLG_S 27
+
+#define V2_QPC_BYTE_208_RMT_E2E_S 28
+
+#define V2_QPC_BYTE_208_SR_MAX_S 29
+#define V2_QPC_BYTE_208_SR_MAX_M GENMASK(31, 29)
+
+#define V2_QPC_BYTE_212_LSN_S 0
+#define V2_QPC_BYTE_212_LSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_212_RETRY_NUM_INIT_S 24
+#define V2_QPC_BYTE_212_RETRY_NUM_INIT_M GENMASK(26, 24)
+
+#define V2_QPC_BYTE_212_CHECK_FLG_S 27
+#define V2_QPC_BYTE_212_CHECK_FLG_M GENMASK(28, 27)
+
+#define V2_QPC_BYTE_212_RETRY_CNT_S 29
+#define V2_QPC_BYTE_212_RETRY_CNT_M GENMASK(31, 29)
+
+#define V2_QPC_BYTE_220_RETRY_MSG_MSN_S 0
+#define V2_QPC_BYTE_220_RETRY_MSG_MSN_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_220_RETRY_MSG_PSN_S 16
+#define V2_QPC_BYTE_220_RETRY_MSG_PSN_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_224_RETRY_MSG_PSN_S 0
+#define V2_QPC_BYTE_224_RETRY_MSG_PSN_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S 8
+#define V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M GENMASK(31, 8)
+
+#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S 0
+#define V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M GENMASK(19, 0)
+
+#define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
+#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
+
+#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
+#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
+
+#define V2_QPC_BYTE_240_IRRL_TAIL_RD_S 8
+#define V2_QPC_BYTE_240_IRRL_TAIL_RD_M GENMASK(15, 8)
+
+#define V2_QPC_BYTE_240_RX_ACK_MSN_S 16
+#define V2_QPC_BYTE_240_RX_ACK_MSN_M GENMASK(31, 16)
+
+#define V2_QPC_BYTE_244_RX_ACK_EPSN_S 0
+#define V2_QPC_BYTE_244_RX_ACK_EPSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_244_RNR_NUM_INIT_S 24
+#define V2_QPC_BYTE_244_RNR_NUM_INIT_M GENMASK(26, 24)
+
+#define V2_QPC_BYTE_244_RNR_CNT_S 27
+#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
+
+#define V2_QPC_BYTE_248_IRRL_PSN_S 0
+#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_248_ACK_PSN_ERR_S 24
+
+#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S 25
+#define V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M GENMASK(26, 25)
+
+#define V2_QPC_BYTE_248_IRRL_PSN_VLD_S 27
+
+#define V2_QPC_BYTE_248_RNR_RETRY_FLAG_S 28
+
+#define V2_QPC_BYTE_248_CQ_ERR_IND_S 31
+
+#define V2_QPC_BYTE_252_TX_CQN_S 0
+#define V2_QPC_BYTE_252_TX_CQN_M GENMASK(23, 0)
+
+#define V2_QPC_BYTE_252_SIG_TYPE_S 24
+
+#define V2_QPC_BYTE_252_ERR_TYPE_S 25
+#define V2_QPC_BYTE_252_ERR_TYPE_M GENMASK(31, 25)
+
+#define V2_QPC_BYTE_256_RQ_CQE_IDX_S 0
+#define V2_QPC_BYTE_256_RQ_CQE_IDX_M GENMASK(15, 0)
+
+#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_S 16
+#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
+
+struct hns_roce_v2_cqe {
+ u32 byte_4;
+ u32 rkey_immtdata;
+ u32 byte_12;
+ u32 byte_16;
+ u32 byte_cnt;
+ u32 smac;
+ u32 byte_28;
+ u32 byte_32;
+};
+
+#define V2_CQE_BYTE_4_OPCODE_S 0
+#define V2_CQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define V2_CQE_BYTE_4_RQ_INLINE_S 5
+
+#define V2_CQE_BYTE_4_S_R_S 6
+
+#define V2_CQE_BYTE_4_OWNER_S 7
+
+#define V2_CQE_BYTE_4_STATUS_S 8
+#define V2_CQE_BYTE_4_STATUS_M GENMASK(15, 8)
+
+#define V2_CQE_BYTE_4_WQE_INDX_S 16
+#define V2_CQE_BYTE_4_WQE_INDX_M GENMASK(31, 16)
+
+#define V2_CQE_BYTE_12_XRC_SRQN_S 0
+#define V2_CQE_BYTE_12_XRC_SRQN_M GENMASK(23, 0)
+
+#define V2_CQE_BYTE_16_LCL_QPN_S 0
+#define V2_CQE_BYTE_16_LCL_QPN_M GENMASK(23, 0)
+
+#define V2_CQE_BYTE_16_SUB_STATUS_S 24
+#define V2_CQE_BYTE_16_SUB_STATUS_M GENMASK(31, 24)
+
+#define V2_CQE_BYTE_28_SMAC_4_S 0
+#define V2_CQE_BYTE_28_SMAC_4_M GENMASK(7, 0)
+
+#define V2_CQE_BYTE_28_SMAC_5_S 8
+#define V2_CQE_BYTE_28_SMAC_5_M GENMASK(15, 8)
+
+#define V2_CQE_BYTE_28_PORT_TYPE_S 16
+#define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16)
+
+#define V2_CQE_BYTE_32_RMT_QPN_S 0
+#define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0)
+
+#define V2_CQE_BYTE_32_SL_S 24
+#define V2_CQE_BYTE_32_SL_M GENMASK(26, 24)
+
+#define V2_CQE_BYTE_32_PORTN_S 27
+#define V2_CQE_BYTE_32_PORTN_M GENMASK(29, 27)
+
+#define V2_CQE_BYTE_32_GRH_S 30
+
+#define V2_CQE_BYTE_32_LPK_S 31
+
+struct hns_roce_v2_mpt_entry {
+ __le32 byte_4_pd_hop_st;
+ __le32 byte_8_mw_cnt_en;
+ __le32 byte_12_mw_pa;
+ __le32 bound_lkey;
+ __le32 len_l;
+ __le32 len_h;
+ __le32 lkey;
+ __le32 va_l;
+ __le32 va_h;
+ __le32 pbl_size;
+ __le32 pbl_ba_l;
+ __le32 byte_48_mode_ba;
+ __le32 pa0_l;
+ __le32 byte_56_pa0_h;
+ __le32 pa1_l;
+ __le32 byte_64_buf_pa1;
+};
+
+#define V2_MPT_BYTE_4_MPT_ST_S 0
+#define V2_MPT_BYTE_4_MPT_ST_M GENMASK(1, 0)
+
+#define V2_MPT_BYTE_4_PBL_HOP_NUM_S 2
+#define V2_MPT_BYTE_4_PBL_HOP_NUM_M GENMASK(3, 2)
+
+#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_S 4
+#define V2_MPT_BYTE_4_PBL_BA_PG_SZ_M GENMASK(7, 4)
+
+#define V2_MPT_BYTE_4_PD_S 8
+#define V2_MPT_BYTE_4_PD_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_8_RA_EN_S 0
+
+#define V2_MPT_BYTE_8_R_INV_EN_S 1
+
+#define V2_MPT_BYTE_8_L_INV_EN_S 2
+
+#define V2_MPT_BYTE_8_BIND_EN_S 3
+
+#define V2_MPT_BYTE_8_ATOMIC_EN_S 4
+
+#define V2_MPT_BYTE_8_RR_EN_S 5
+
+#define V2_MPT_BYTE_8_RW_EN_S 6
+
+#define V2_MPT_BYTE_8_LW_EN_S 7
+
+#define V2_MPT_BYTE_12_PA_S 1
+
+#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
+
+#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
+#define V2_MPT_BYTE_12_MW_BIND_QPN_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_48_PBL_BA_H_S 0
+#define V2_MPT_BYTE_48_PBL_BA_H_M GENMASK(28, 0)
+
+#define V2_MPT_BYTE_48_BLK_MODE_S 29
+
+#define V2_MPT_BYTE_56_PA0_H_S 0
+#define V2_MPT_BYTE_56_PA0_H_M GENMASK(25, 0)
+
+#define V2_MPT_BYTE_64_PA1_H_S 0
+#define V2_MPT_BYTE_64_PA1_H_M GENMASK(25, 0)
+
+#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S 28
+#define V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M GENMASK(31, 28)
+
+#define V2_DB_BYTE_4_TAG_S 0
+#define V2_DB_BYTE_4_TAG_M GENMASK(23, 0)
+
+#define V2_DB_BYTE_4_CMD_S 24
+#define V2_DB_BYTE_4_CMD_M GENMASK(27, 24)
+
+#define V2_DB_PARAMETER_CONS_IDX_S 0
+#define V2_DB_PARAMETER_CONS_IDX_M GENMASK(15, 0)
+
+#define V2_DB_PARAMETER_SL_S 16
+#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
+
+struct hns_roce_v2_cq_db {
+ u32 byte_4;
+ u32 parameter;
+};
+
+#define V2_CQ_DB_BYTE_4_TAG_S 0
+#define V2_CQ_DB_BYTE_4_TAG_M GENMASK(23, 0)
+
+#define V2_CQ_DB_BYTE_4_CMD_S 24
+#define V2_CQ_DB_BYTE_4_CMD_M GENMASK(27, 24)
+
+#define V2_CQ_DB_PARAMETER_CONS_IDX_S 0
+#define V2_CQ_DB_PARAMETER_CONS_IDX_M GENMASK(23, 0)
+
+#define V2_CQ_DB_PARAMETER_CMD_SN_S 25
+#define V2_CQ_DB_PARAMETER_CMD_SN_M GENMASK(26, 25)
+
+#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
+
+struct hns_roce_v2_rc_send_wqe {
+ u32 byte_4;
+ u32 msg_len;
+ u32 inv_key_immtdata;
+ u32 byte_16;
+ u32 byte_20;
+ u32 rkey;
+ u64 va;
+};
+
+#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0
+#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7
+
+#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8
+
+#define V2_RC_SEND_WQE_BYTE_4_FENCE_S 9
+
+#define V2_RC_SEND_WQE_BYTE_4_SO_S 10
+
+#define V2_RC_SEND_WQE_BYTE_4_SE_S 11
+
+#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
+
+#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
+#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
+
+#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S 24
+#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
+#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+
+struct hns_roce_v2_wqe_data_seg {
+ __be32 len;
+ __be32 lkey;
+ __be64 addr;
+};
+
+struct hns_roce_v2_db {
+ u32 byte_4;
+ u32 parameter;
+};
+
+struct hns_roce_query_version {
+ __le16 rocee_vendor_id;
+ __le16 rocee_hw_version;
+ __le32 rsv[5];
+};
+
+struct hns_roce_cfg_global_param {
+ __le32 time_cfg_udp_port;
+ __le32 rsv[5];
+};
+
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S 0
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M GENMASK(9, 0)
+
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S 16
+#define CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M GENMASK(31, 16)
+
+struct hns_roce_pf_res {
+ __le32 rsv;
+ __le32 qpc_bt_idx_num;
+ __le32 srqc_bt_idx_num;
+ __le32 cqc_bt_idx_num;
+ __le32 mpt_bt_idx_num;
+ __le32 eqc_bt_idx_num;
+};
+
+#define PF_RES_DATA_1_PF_QPC_BT_IDX_S 0
+#define PF_RES_DATA_1_PF_QPC_BT_IDX_M GENMASK(10, 0)
+
+#define PF_RES_DATA_1_PF_QPC_BT_NUM_S 16
+#define PF_RES_DATA_1_PF_QPC_BT_NUM_M GENMASK(27, 16)
+
+#define PF_RES_DATA_2_PF_SRQC_BT_IDX_S 0
+#define PF_RES_DATA_2_PF_SRQC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_2_PF_SRQC_BT_NUM_S 16
+#define PF_RES_DATA_2_PF_SRQC_BT_NUM_M GENMASK(25, 16)
+
+#define PF_RES_DATA_3_PF_CQC_BT_IDX_S 0
+#define PF_RES_DATA_3_PF_CQC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_3_PF_CQC_BT_NUM_S 16
+#define PF_RES_DATA_3_PF_CQC_BT_NUM_M GENMASK(25, 16)
+
+#define PF_RES_DATA_4_PF_MPT_BT_IDX_S 0
+#define PF_RES_DATA_4_PF_MPT_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_4_PF_MPT_BT_NUM_S 16
+#define PF_RES_DATA_4_PF_MPT_BT_NUM_M GENMASK(25, 16)
+
+#define PF_RES_DATA_5_PF_EQC_BT_IDX_S 0
+#define PF_RES_DATA_5_PF_EQC_BT_IDX_M GENMASK(8, 0)
+
+#define PF_RES_DATA_5_PF_EQC_BT_NUM_S 16
+#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
+
+struct hns_roce_vf_res_a {
+ u32 vf_id;
+ u32 vf_qpc_bt_idx_num;
+ u32 vf_srqc_bt_idx_num;
+ u32 vf_cqc_bt_idx_num;
+ u32 vf_mpt_bt_idx_num;
+ u32 vf_eqc_bt_idx_num;
+};
+
+#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0
+#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_M GENMASK(10, 0)
+
+#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_S 16
+#define VF_RES_A_DATA_1_VF_QPC_BT_NUM_M GENMASK(27, 16)
+
+#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S 0
+#define VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S 16
+#define VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M GENMASK(25, 16)
+
+#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_S 0
+#define VF_RES_A_DATA_3_VF_CQC_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_S 16
+#define VF_RES_A_DATA_3_VF_CQC_BT_NUM_M GENMASK(25, 16)
+
+#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_S 0
+#define VF_RES_A_DATA_4_VF_MPT_BT_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_S 16
+#define VF_RES_A_DATA_4_VF_MPT_BT_NUM_M GENMASK(25, 16)
+
+#define VF_RES_A_DATA_5_VF_EQC_IDX_S 0
+#define VF_RES_A_DATA_5_VF_EQC_IDX_M GENMASK(8, 0)
+
+#define VF_RES_A_DATA_5_VF_EQC_NUM_S 16
+#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16)
+
+struct hns_roce_vf_res_b {
+ u32 rsv0;
+ u32 vf_smac_idx_num;
+ u32 vf_sgid_idx_num;
+ u32 vf_qid_idx_sl_num;
+ u32 rsv[2];
+};
+
+#define VF_RES_B_DATA_0_VF_ID_S 0
+#define VF_RES_B_DATA_0_VF_ID_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_1_VF_SMAC_IDX_S 0
+#define VF_RES_B_DATA_1_VF_SMAC_IDX_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_1_VF_SMAC_NUM_S 8
+#define VF_RES_B_DATA_1_VF_SMAC_NUM_M GENMASK(16, 8)
+
+#define VF_RES_B_DATA_2_VF_SGID_IDX_S 0
+#define VF_RES_B_DATA_2_VF_SGID_IDX_M GENMASK(7, 0)
+
+#define VF_RES_B_DATA_2_VF_SGID_NUM_S 8
+#define VF_RES_B_DATA_2_VF_SGID_NUM_M GENMASK(16, 8)
+
+#define VF_RES_B_DATA_3_VF_QID_IDX_S 0
+#define VF_RES_B_DATA_3_VF_QID_IDX_M GENMASK(9, 0)
+
+#define VF_RES_B_DATA_3_VF_SL_NUM_S 16
+#define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
+
+/* Reg field definition */
+#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_S 0
+#define ROCEE_VF_SMAC_CFG1_VF_SMAC_H_M GENMASK(15, 0)
+
+#define ROCEE_VF_SGID_CFG4_SGID_TYPE_S 0
+#define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0)
+
+struct hns_roce_cfg_bt_attr {
+ u32 vf_qpc_cfg;
+ u32 vf_srqc_cfg;
+ u32 vf_cqc_cfg;
+ u32 vf_mpt_cfg;
+ u32 rsv[2];
+};
+
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M GENMASK(9, 8)
+
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M GENMASK(9, 8)
+
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M GENMASK(9, 8)
+
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S 0
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M GENMASK(3, 0)
+
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S 4
+#define CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M GENMASK(7, 4)
+
+#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
+#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
+
+struct hns_roce_cmq_desc {
+ u16 opcode;
+ u16 flag;
+ u16 retval;
+ u16 rsv;
+ u32 data[6];
+};
+
+#define ROCEE_VF_MB_CFG0_REG 0x40
+#define ROCEE_VF_MB_STATUS_REG 0x58
+
+#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
+
+#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
+#define HNS_ROCE_HW_MB_STATUS_MASK 0xFF
+
+#define HNS_ROCE_VF_MB4_TAG_MASK 0xFFFFFF00
+#define HNS_ROCE_VF_MB4_TAG_SHIFT 8
+
+#define HNS_ROCE_VF_MB4_CMD_MASK 0xFF
+#define HNS_ROCE_VF_MB4_CMD_SHIFT 0
+
+#define HNS_ROCE_VF_MB5_EVENT_MASK 0x10000
+#define HNS_ROCE_VF_MB5_EVENT_SHIFT 16
+
+#define HNS_ROCE_VF_MB5_TOKEN_MASK 0xFFFF
+#define HNS_ROCE_VF_MB5_TOKEN_SHIFT 0
+
+struct hns_roce_v2_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hns_roce_cmq_desc *desc;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 flag;
+ spinlock_t lock; /* command queue lock */
+};
+
+struct hns_roce_v2_cmq {
+ struct hns_roce_v2_cmq_ring csq;
+ struct hns_roce_v2_cmq_ring crq;
+ u16 tx_timeout;
+ u16 last_status;
+};
+
+struct hns_roce_v2_priv {
+ struct hns_roce_v2_cmq cmq;
+};
+
+#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index d9777b662eba..cf02ac2d3596 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -57,20 +57,21 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
{
return gid_index * hr_dev->caps.num_ports + port;
}
+EXPORT_SYMBOL_GPL(hns_get_gid_index);
-static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
+static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{
u8 phy_port;
u32 i = 0;
if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM))
- return;
+ return 0;
for (i = 0; i < MAC_ADDR_OCTET_NUM; i++)
hr_dev->dev_addr[port][i] = addr[i];
phy_port = hr_dev->iboe.phy_port[port];
- hr_dev->hw->set_mac(hr_dev, phy_port, addr);
+ return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
@@ -80,17 +81,19 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
struct hns_roce_dev *hr_dev = to_hr_dev(device);
u8 port = port_num - 1;
unsigned long flags;
+ int ret;
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid);
+ ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid,
+ attr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- return 0;
+ return ret;
}
static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
@@ -100,24 +103,26 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
union ib_gid zgid = { {0} };
u8 port = port_num - 1;
unsigned long flags;
+ int ret;
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- hr_dev->hw->set_gid(hr_dev, port, index, &zgid);
+ ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, NULL);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- return 0;
+ return ret;
}
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
unsigned long event)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct net_device *netdev;
+ int ret = 0;
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
@@ -130,7 +135,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
case NETDEV_CHANGE:
case NETDEV_REGISTER:
case NETDEV_CHANGEADDR:
- hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
+ ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
break;
case NETDEV_DOWN:
/*
@@ -142,7 +147,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
break;
}
- return 0;
+ return ret;
}
static int hns_roce_netdev_event(struct notifier_block *self,
@@ -171,12 +176,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
{
+ int ret;
u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
- hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
- hr_dev->caps.max_mtu);
- hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
+ if (hr_dev->hw->set_mtu)
+ hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
+ hr_dev->caps.max_mtu);
+ ret = hns_roce_set_mac(hr_dev, i,
+ hr_dev->iboe.netdevs[i]->dev_addr);
+ if (ret)
+ return ret;
}
return 0;
@@ -200,7 +210,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_RC_RNR_NAK_GEN;
- props->max_sge = hr_dev->caps.max_sq_sg;
+ props->max_sge = max(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg);
props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
@@ -238,7 +248,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct net_device *net_dev;
unsigned long flags;
enum ib_mtu mtu;
@@ -379,7 +389,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
to_hr_ucontext(context)->uar.pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
- } else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
+ hr_dev->tptr_size) {
/* vm_pgoff: 1 -- TPTR */
if (io_remap_pfn_range(vma, vma->vm_start,
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
@@ -398,8 +409,6 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr attr;
int ret;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
-
ret = ib_query_port(ib_dev, port_num, &attr);
if (ret)
return ret;
@@ -408,6 +417,9 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
+ immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
return 0;
}
@@ -416,7 +428,6 @@ static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
- unregister_inetaddr_notifier(&iboe->nb_inet);
unregister_netdevice_notifier(&iboe->nb);
ib_unregister_device(&hr_dev->ib_dev);
}
@@ -426,7 +437,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
int ret;
struct hns_roce_ib_iboe *iboe = NULL;
struct ib_device *ib_dev = NULL;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock);
@@ -492,6 +503,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
/* CQ */
ib_dev->create_cq = hns_roce_ib_create_cq;
+ ib_dev->modify_cq = hr_dev->hw->modify_cq;
ib_dev->destroy_cq = hns_roce_ib_destroy_cq;
ib_dev->req_notify_cq = hr_dev->hw->req_notify_cq;
ib_dev->poll_cq = hr_dev->hw->poll_cq;
@@ -500,6 +512,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->get_dma_mr = hns_roce_get_dma_mr;
ib_dev->reg_user_mr = hns_roce_reg_user_mr;
ib_dev->dereg_mr = hns_roce_dereg_mr;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
+ ib_dev->rereg_user_mr = hns_roce_rereg_user_mr;
+ ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
+ }
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
@@ -531,173 +547,10 @@ error_failed_setup_mtu_mac:
return ret;
}
-static const struct of_device_id hns_roce_of_match[] = {
- { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
- {},
-};
-MODULE_DEVICE_TABLE(of, hns_roce_of_match);
-
-static const struct acpi_device_id hns_roce_acpi_match[] = {
- { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
-
-static int hns_roce_node_match(struct device *dev, void *fwnode)
-{
- return dev->fwnode == fwnode;
-}
-
-static struct
-platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
-{
- struct device *dev;
-
- /* get the 'device'corresponding to matching 'fwnode' */
- dev = bus_find_device(&platform_bus_type, NULL,
- fwnode, hns_roce_node_match);
- /* get the platform device */
- return dev ? to_platform_device(dev) : NULL;
-}
-
-static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
-{
- int i;
- int ret;
- u8 phy_port;
- int port_cnt = 0;
- struct device *dev = &hr_dev->pdev->dev;
- struct device_node *net_node;
- struct net_device *netdev = NULL;
- struct platform_device *pdev = NULL;
- struct resource *res;
-
- /* check if we are compatible with the underlying SoC */
- if (dev_of_node(dev)) {
- const struct of_device_id *of_id;
-
- of_id = of_match_node(hns_roce_of_match, dev->of_node);
- if (!of_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (struct hns_roce_hw *)of_id->data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific DT data!\n");
- return -ENXIO;
- }
- } else if (is_acpi_device_node(dev->fwnode)) {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
- if (!acpi_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (struct hns_roce_hw *) acpi_id->driver_data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific ACPI data!\n");
- return -ENXIO;
- }
- } else {
- dev_err(dev, "can't read compatibility data from DT or ACPI\n");
- return -ENXIO;
- }
-
- /* get the mapped register base address */
- res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "memory resource not found!\n");
- return -EINVAL;
- }
- hr_dev->reg_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(hr_dev->reg_base))
- return PTR_ERR(hr_dev->reg_base);
-
- /* read the node_guid of IB device from the DT or ACPI */
- ret = device_property_read_u8_array(dev, "node-guid",
- (u8 *)&hr_dev->ib_dev.node_guid,
- GUID_LEN);
- if (ret) {
- dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
- return ret;
- }
-
- /* get the RoCE associated ethernet ports or netdevices */
- for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
- if (dev_of_node(dev)) {
- net_node = of_parse_phandle(dev->of_node, "eth-handle",
- i);
- if (!net_node)
- continue;
- pdev = of_find_device_by_node(net_node);
- } else if (is_acpi_device_node(dev->fwnode)) {
- struct acpi_reference_args args;
- struct fwnode_handle *fwnode;
-
- ret = acpi_node_get_property_reference(dev->fwnode,
- "eth-handle",
- i, &args);
- if (ret)
- continue;
- fwnode = acpi_fwnode_handle(args.adev);
- pdev = hns_roce_find_pdev(fwnode);
- } else {
- dev_err(dev, "cannot read data from DT or ACPI\n");
- return -ENXIO;
- }
-
- if (pdev) {
- netdev = platform_get_drvdata(pdev);
- phy_port = (u8)i;
- if (netdev) {
- hr_dev->iboe.netdevs[port_cnt] = netdev;
- hr_dev->iboe.phy_port[port_cnt] = phy_port;
- } else {
- dev_err(dev, "no netdev found with pdev %s\n",
- pdev->name);
- return -ENODEV;
- }
- port_cnt++;
- }
- }
-
- if (port_cnt == 0) {
- dev_err(dev, "unable to get eth-handle for available ports!\n");
- return -EINVAL;
- }
-
- hr_dev->caps.num_ports = port_cnt;
-
- /* cmd issue mode: 0 is poll, 1 is event */
- hr_dev->cmd_mod = 1;
- hr_dev->loop_idc = 0;
-
- /* read the interrupt names from the DT or ACPI */
- ret = device_property_read_string_array(dev, "interrupt-names",
- hr_dev->irq_names,
- HNS_ROCE_MAX_IRQ_NUM);
- if (ret < 0) {
- dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
- return ret;
- }
-
- /* fetch the interrupt numbers */
- for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
- hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
- if (hr_dev->irq[i] <= 0) {
- dev_err(dev, "platform get of irq[=%d] failed!\n", i);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
{
int ret;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
@@ -707,6 +560,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
return ret;
}
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_cqe_table,
+ HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
+ hr_dev->caps.num_cqe_segs, 1);
+ if (ret) {
+ dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
+ goto err_unmap_cqe;
+ }
+ }
+
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
hr_dev->caps.num_mtpts, 1);
@@ -733,16 +597,35 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
goto err_unmap_qp;
}
+ if (hr_dev->caps.trrl_entry_sz) {
+ ret = hns_roce_init_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table,
+ HEM_TYPE_TRRL,
+ hr_dev->caps.trrl_entry_sz *
+ hr_dev->caps.max_qp_dest_rdma,
+ hr_dev->caps.num_qps, 1);
+ if (ret) {
+ dev_err(dev,
+ "Failed to init trrl_table memory, aborting.\n");
+ goto err_unmap_irrl;
+ }
+ }
+
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
hr_dev->caps.num_cqs, 1);
if (ret) {
dev_err(dev, "Failed to init CQ context memory, aborting.\n");
- goto err_unmap_irrl;
+ goto err_unmap_trrl;
}
return 0;
+err_unmap_trrl:
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
+
err_unmap_irrl:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
@@ -754,6 +637,12 @@ err_unmap_dmpt:
err_unmap_mtt:
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->mr_table.mtt_cqe_table);
+
+err_unmap_cqe:
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
return ret;
}
@@ -766,7 +655,7 @@ err_unmap_mtt:
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
{
int ret;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
@@ -826,56 +715,45 @@ err_uar_table_free:
return ret;
}
-/**
- * hns_roce_probe - RoCE driver entrance
- * @pdev: pointer to platform device
- * Return : int
- *
- */
-static int hns_roce_probe(struct platform_device *pdev)
+int hns_roce_init(struct hns_roce_dev *hr_dev)
{
int ret;
- struct hns_roce_dev *hr_dev;
- struct device *dev = &pdev->dev;
-
- hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
- if (!hr_dev)
- return -ENOMEM;
-
- hr_dev->pdev = pdev;
- platform_set_drvdata(pdev, hr_dev);
+ struct device *dev = hr_dev->dev;
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
- dev_err(dev, "Not usable DMA addressing mode\n");
- ret = -EIO;
- goto error_failed_get_cfg;
+ if (hr_dev->hw->reset) {
+ ret = hr_dev->hw->reset(hr_dev, true);
+ if (ret) {
+ dev_err(dev, "Reset RoCE engine failed!\n");
+ return ret;
+ }
}
- ret = hns_roce_get_cfg(hr_dev);
- if (ret) {
- dev_err(dev, "Get Configuration failed!\n");
- goto error_failed_get_cfg;
+ if (hr_dev->hw->cmq_init) {
+ ret = hr_dev->hw->cmq_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "Init RoCE Command Queue failed!\n");
+ goto error_failed_cmq_init;
+ }
}
- ret = hr_dev->hw->reset(hr_dev, true);
+ ret = hr_dev->hw->hw_profile(hr_dev);
if (ret) {
- dev_err(dev, "Reset RoCE engine failed!\n");
- goto error_failed_get_cfg;
+ dev_err(dev, "Get RoCE engine profile failed!\n");
+ goto error_failed_cmd_init;
}
- hr_dev->hw->hw_profile(hr_dev);
-
ret = hns_roce_cmd_init(hr_dev);
if (ret) {
dev_err(dev, "cmd init failed!\n");
goto error_failed_cmd_init;
}
- ret = hns_roce_init_eq_table(hr_dev);
- if (ret) {
- dev_err(dev, "eq init failed!\n");
- goto error_failed_eq_table;
+ if (hr_dev->cmd_mod) {
+ ret = hns_roce_init_eq_table(hr_dev);
+ if (ret) {
+ dev_err(dev, "eq init failed!\n");
+ goto error_failed_eq_table;
+ }
}
if (hr_dev->cmd_mod) {
@@ -898,10 +776,12 @@ static int hns_roce_probe(struct platform_device *pdev)
goto error_failed_setup_hca;
}
- ret = hr_dev->hw->hw_init(hr_dev);
- if (ret) {
- dev_err(dev, "hw_init failed!\n");
- goto error_failed_engine_init;
+ if (hr_dev->hw->hw_init) {
+ ret = hr_dev->hw->hw_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "hw_init failed!\n");
+ goto error_failed_engine_init;
+ }
}
ret = hns_roce_register_device(hr_dev);
@@ -911,7 +791,8 @@ static int hns_roce_probe(struct platform_device *pdev)
return 0;
error_failed_register_device:
- hr_dev->hw->hw_exit(hr_dev);
+ if (hr_dev->hw->hw_exit)
+ hr_dev->hw->hw_exit(hr_dev);
error_failed_engine_init:
hns_roce_cleanup_bitmap(hr_dev);
@@ -924,58 +805,47 @@ error_failed_init_hem:
hns_roce_cmd_use_polling(hr_dev);
error_failed_use_event:
- hns_roce_cleanup_eq_table(hr_dev);
+ if (hr_dev->cmd_mod)
+ hns_roce_cleanup_eq_table(hr_dev);
error_failed_eq_table:
hns_roce_cmd_cleanup(hr_dev);
error_failed_cmd_init:
- ret = hr_dev->hw->reset(hr_dev, false);
- if (ret)
- dev_err(&hr_dev->pdev->dev, "roce_engine reset fail\n");
+ if (hr_dev->hw->cmq_exit)
+ hr_dev->hw->cmq_exit(hr_dev);
-error_failed_get_cfg:
- ib_dealloc_device(&hr_dev->ib_dev);
+error_failed_cmq_init:
+ if (hr_dev->hw->reset) {
+ ret = hr_dev->hw->reset(hr_dev, false);
+ if (ret)
+ dev_err(dev, "Dereset RoCE engine failed!\n");
+ }
return ret;
}
+EXPORT_SYMBOL_GPL(hns_roce_init);
-/**
- * hns_roce_remove - remove RoCE device
- * @pdev: pointer to platform device
- */
-static int hns_roce_remove(struct platform_device *pdev)
+void hns_roce_exit(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
-
hns_roce_unregister_device(hr_dev);
- hr_dev->hw->hw_exit(hr_dev);
+ if (hr_dev->hw->hw_exit)
+ hr_dev->hw->hw_exit(hr_dev);
hns_roce_cleanup_bitmap(hr_dev);
hns_roce_cleanup_hem(hr_dev);
if (hr_dev->cmd_mod)
hns_roce_cmd_use_polling(hr_dev);
- hns_roce_cleanup_eq_table(hr_dev);
+ if (hr_dev->cmd_mod)
+ hns_roce_cleanup_eq_table(hr_dev);
hns_roce_cmd_cleanup(hr_dev);
- hr_dev->hw->reset(hr_dev, false);
-
- ib_dealloc_device(&hr_dev->ib_dev);
-
- return 0;
+ if (hr_dev->hw->cmq_exit)
+ hr_dev->hw->cmq_exit(hr_dev);
+ if (hr_dev->hw->reset)
+ hr_dev->hw->reset(hr_dev, false);
}
-
-static struct platform_driver hns_roce_driver = {
- .probe = hns_roce_probe,
- .remove = hns_roce_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = hns_roce_of_match,
- .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
- },
-};
-
-module_platform_driver(hns_roce_driver);
+EXPORT_SYMBOL_GPL(hns_roce_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index e387360e3780..da86a8117bd5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -47,6 +47,7 @@ unsigned long key_to_hw_index(u32 key)
{
return (key << 24) | (key >> 8);
}
+EXPORT_SYMBOL_GPL(key_to_hw_index);
static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
@@ -65,6 +66,7 @@ int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
HNS_ROCE_CMD_TIMEOUT_MSECS);
}
+EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt);
static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
unsigned long *seg)
@@ -175,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
}
static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
- unsigned long *seg)
+ unsigned long *seg, u32 mtt_type)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- int ret = 0;
+ struct hns_roce_hem_table *table;
+ struct hns_roce_buddy *buddy;
+ int ret;
+
+ if (mtt_type == MTT_TYPE_WQE) {
+ buddy = &mr_table->mtt_buddy;
+ table = &mr_table->mtt_table;
+ } else {
+ buddy = &mr_table->mtt_cqe_buddy;
+ table = &mr_table->mtt_cqe_table;
+ }
- ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
+ ret = hns_roce_buddy_alloc(buddy, order, seg);
if (ret == -1)
return -1;
- if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
+ if (hns_roce_table_get_range(hr_dev, table, *seg,
*seg + (1 << order) - 1)) {
- hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
+ hns_roce_buddy_free(buddy, *seg, order);
return -1;
}
@@ -196,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
struct hns_roce_mtt *mtt)
{
- int ret = 0;
+ int ret;
int i;
/* Page num is zero, correspond to DMA memory register */
@@ -215,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
++mtt->order;
/* Allocate MTT entry */
- ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
+ ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
+ mtt->mtt_type);
if (ret == -1)
return -ENOMEM;
@@ -229,18 +242,261 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
if (mtt->order < 0)
return;
- hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
- hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
+ if (mtt->mtt_type == MTT_TYPE_WQE) {
+ hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
+ mtt->order);
+ hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
+ mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+ } else {
+ hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
+ mtt->order);
+ hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
+ mtt->first_seg,
+ mtt->first_seg + (1 << mtt->order) - 1);
+ }
+}
+EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
+
+static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr, int err_loop_index,
+ int loop_i, int loop_j)
+{
+ struct device *dev = hr_dev->dev;
+ u32 mhop_num;
+ u32 pbl_bt_sz;
+ u64 bt_idx;
+ int i, j;
+
+ pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+ mhop_num = hr_dev->caps.pbl_hop_num;
+
+ i = loop_i;
+ if (mhop_num == 3 && err_loop_index == 2) {
+ for (; i >= 0; i--) {
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+
+ for (j = 0; j < pbl_bt_sz / 8; j++) {
+ if (i == loop_i && j >= loop_j)
+ break;
+
+ bt_idx = i * pbl_bt_sz / 8 + j;
+ dma_free_coherent(dev, pbl_bt_sz,
+ mr->pbl_bt_l2[bt_idx],
+ mr->pbl_l2_dma_addr[bt_idx]);
+ }
+ }
+ } else if (mhop_num == 3 && err_loop_index == 1) {
+ for (i -= 1; i >= 0; i--) {
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+
+ for (j = 0; j < pbl_bt_sz / 8; j++) {
+ bt_idx = i * pbl_bt_sz / 8 + j;
+ dma_free_coherent(dev, pbl_bt_sz,
+ mr->pbl_bt_l2[bt_idx],
+ mr->pbl_l2_dma_addr[bt_idx]);
+ }
+ }
+ } else if (mhop_num == 2 && err_loop_index == 1) {
+ for (i -= 1; i >= 0; i--)
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+ } else {
+ dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
+ mhop_num, err_loop_index);
+ return;
+ }
+
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
+ mr->pbl_bt_l0 = NULL;
+ mr->pbl_l0_dma_addr = 0;
+}
+
+/* PBL multi hop addressing */
+static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
+ struct hns_roce_mr *mr)
+{
+ struct device *dev = hr_dev->dev;
+ int mr_alloc_done = 0;
+ int npages_allocated;
+ int i = 0, j = 0;
+ u32 pbl_bt_sz;
+ u32 mhop_num;
+ u64 pbl_last_bt_num;
+ u64 pbl_bt_cnt = 0;
+ u64 bt_idx;
+ u64 size;
+
+ mhop_num = hr_dev->caps.pbl_hop_num;
+ pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+ pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
+
+ if (mhop_num == HNS_ROCE_HOP_NUM_0)
+ return 0;
+
+ /* hop_num = 1 */
+ if (mhop_num == 1) {
+ if (npages > pbl_bt_sz / 8) {
+ dev_err(dev, "npages %d is larger than buf_pg_sz!",
+ npages);
+ return -EINVAL;
+ }
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf)
+ return -ENOMEM;
+
+ mr->pbl_size = npages;
+ mr->pbl_ba = mr->pbl_dma_addr;
+ mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+ mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+ return 0;
+ }
+
+ mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
+ sizeof(*mr->pbl_l1_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_l1_dma_addr)
+ return -ENOMEM;
+
+ mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l1)
+ goto err_kcalloc_bt_l1;
+
+ if (mhop_num == 3) {
+ mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
+ sizeof(*mr->pbl_l2_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_l2_dma_addr)
+ goto err_kcalloc_l2_dma;
+
+ mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
+ sizeof(*mr->pbl_bt_l2),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l2)
+ goto err_kcalloc_bt_l2;
+ }
+
+ /* alloc L0 BT */
+ mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
+ &(mr->pbl_l0_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l0)
+ goto err_dma_alloc_l0;
+
+ if (mhop_num == 2) {
+ /* alloc L1 BT */
+ for (i = 0; i < pbl_bt_sz / 8; i++) {
+ if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+ size = pbl_bt_sz;
+ } else {
+ npages_allocated = i * (pbl_bt_sz / 8);
+ size = (npages - npages_allocated) * 8;
+ }
+ mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
+ &(mr->pbl_l1_dma_addr[i]),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l1[i]) {
+ hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+ goto err_dma_alloc_l0;
+ }
+
+ *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+ pbl_bt_cnt++;
+ if (pbl_bt_cnt >= pbl_last_bt_num)
+ break;
+ }
+ } else if (mhop_num == 3) {
+ /* alloc L1, L2 BT */
+ for (i = 0; i < pbl_bt_sz / 8; i++) {
+ mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
+ &(mr->pbl_l1_dma_addr[i]),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l1[i]) {
+ hns_roce_loop_free(hr_dev, mr, 1, i, 0);
+ goto err_dma_alloc_l0;
+ }
+
+ *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
+
+ for (j = 0; j < pbl_bt_sz / 8; j++) {
+ bt_idx = i * pbl_bt_sz / 8 + j;
+
+ if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
+ size = pbl_bt_sz;
+ } else {
+ npages_allocated = bt_idx *
+ (pbl_bt_sz / 8);
+ size = (npages - npages_allocated) * 8;
+ }
+ mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
+ dev, size,
+ &(mr->pbl_l2_dma_addr[bt_idx]),
+ GFP_KERNEL);
+ if (!mr->pbl_bt_l2[bt_idx]) {
+ hns_roce_loop_free(hr_dev, mr, 2, i, j);
+ goto err_dma_alloc_l0;
+ }
+
+ *(mr->pbl_bt_l1[i] + j) =
+ mr->pbl_l2_dma_addr[bt_idx];
+
+ pbl_bt_cnt++;
+ if (pbl_bt_cnt >= pbl_last_bt_num) {
+ mr_alloc_done = 1;
+ break;
+ }
+ }
+
+ if (mr_alloc_done)
+ break;
+ }
+ }
+
+ mr->l0_chunk_last_num = i + 1;
+ if (mhop_num == 3)
+ mr->l1_chunk_last_num = j + 1;
+
+ mr->pbl_size = npages;
+ mr->pbl_ba = mr->pbl_l0_dma_addr;
+ mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+ mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+
+ return 0;
+
+err_dma_alloc_l0:
+ kfree(mr->pbl_bt_l2);
+ mr->pbl_bt_l2 = NULL;
+
+err_kcalloc_bt_l2:
+ kfree(mr->pbl_l2_dma_addr);
+ mr->pbl_l2_dma_addr = NULL;
+
+err_kcalloc_l2_dma:
+ kfree(mr->pbl_bt_l1);
+ mr->pbl_bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(mr->pbl_l1_dma_addr);
+ mr->pbl_l1_dma_addr = NULL;
+
+ return -ENOMEM;
}
static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
u64 size, u32 access, int npages,
struct hns_roce_mr *mr)
{
+ struct device *dev = hr_dev->dev;
unsigned long index = 0;
int ret = 0;
- struct device *dev = &hr_dev->pdev->dev;
/* Allocate a key for mr from mr_table */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
@@ -258,22 +514,117 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->type = MR_TYPE_DMA;
mr->pbl_buf = NULL;
mr->pbl_dma_addr = 0;
+ /* PBL multi-hop addressing parameters */
+ mr->pbl_bt_l2 = NULL;
+ mr->pbl_bt_l1 = NULL;
+ mr->pbl_bt_l0 = NULL;
+ mr->pbl_l2_dma_addr = NULL;
+ mr->pbl_l1_dma_addr = NULL;
+ mr->pbl_l0_dma_addr = 0;
} else {
mr->type = MR_TYPE_MR;
- mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
- &(mr->pbl_dma_addr),
- GFP_KERNEL);
- if (!mr->pbl_buf)
- return -ENOMEM;
+ if (!hr_dev->caps.pbl_hop_num) {
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf)
+ return -ENOMEM;
+ } else {
+ ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
+ }
}
- return 0;
+ return ret;
+}
+
+static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr)
+{
+ struct device *dev = hr_dev->dev;
+ int npages_allocated;
+ int npages;
+ int i, j;
+ u32 pbl_bt_sz;
+ u32 mhop_num;
+ u64 bt_idx;
+
+ npages = ib_umem_page_count(mr->umem);
+ pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
+ mhop_num = hr_dev->caps.pbl_hop_num;
+
+ if (mhop_num == HNS_ROCE_HOP_NUM_0)
+ return;
+
+ /* hop_num = 1 */
+ if (mhop_num == 1) {
+ dma_free_coherent(dev, (unsigned int)(npages * 8),
+ mr->pbl_buf, mr->pbl_dma_addr);
+ return;
+ }
+
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
+ mr->pbl_l0_dma_addr);
+
+ if (mhop_num == 2) {
+ for (i = 0; i < mr->l0_chunk_last_num; i++) {
+ if (i == mr->l0_chunk_last_num - 1) {
+ npages_allocated = i * (pbl_bt_sz / 8);
+
+ dma_free_coherent(dev,
+ (npages - npages_allocated) * 8,
+ mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+
+ break;
+ }
+
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+ }
+ } else if (mhop_num == 3) {
+ for (i = 0; i < mr->l0_chunk_last_num; i++) {
+ dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
+ mr->pbl_l1_dma_addr[i]);
+
+ for (j = 0; j < pbl_bt_sz / 8; j++) {
+ bt_idx = i * (pbl_bt_sz / 8) + j;
+
+ if ((i == mr->l0_chunk_last_num - 1)
+ && j == mr->l1_chunk_last_num - 1) {
+ npages_allocated = bt_idx *
+ (pbl_bt_sz / 8);
+
+ dma_free_coherent(dev,
+ (npages - npages_allocated) * 8,
+ mr->pbl_bt_l2[bt_idx],
+ mr->pbl_l2_dma_addr[bt_idx]);
+
+ break;
+ }
+
+ dma_free_coherent(dev, pbl_bt_sz,
+ mr->pbl_bt_l2[bt_idx],
+ mr->pbl_l2_dma_addr[bt_idx]);
+ }
+ }
+ }
+
+ kfree(mr->pbl_bt_l1);
+ kfree(mr->pbl_l1_dma_addr);
+ mr->pbl_bt_l1 = NULL;
+ mr->pbl_l1_dma_addr = NULL;
+ if (mhop_num == 3) {
+ kfree(mr->pbl_bt_l2);
+ kfree(mr->pbl_l2_dma_addr);
+ mr->pbl_bt_l2 = NULL;
+ mr->pbl_l2_dma_addr = NULL;
+ }
}
static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int npages = 0;
int ret;
@@ -286,10 +637,18 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
if (mr->size != ~0ULL) {
npages = ib_umem_page_count(mr->umem);
- dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
- mr->pbl_dma_addr);
+
+ if (!hr_dev->caps.pbl_hop_num)
+ dma_free_coherent(dev, (unsigned int)(npages * 8),
+ mr->pbl_buf, mr->pbl_dma_addr);
+ else
+ hns_roce_mhop_free(hr_dev, mr);
}
+ if (mr->enabled)
+ hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
+ key_to_hw_index(mr->key));
+
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
key_to_hw_index(mr->key), BITMAP_NO_RR);
}
@@ -299,7 +658,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
{
int ret;
unsigned long mtpt_idx = key_to_hw_index(mr->key);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
@@ -345,28 +704,44 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, u32 start_index,
u32 npages, u64 *page_list)
{
- u32 i = 0;
- __le64 *mtts = NULL;
+ struct hns_roce_hem_table *table;
dma_addr_t dma_handle;
+ __le64 *mtts;
u32 s = start_index * sizeof(u64);
+ u32 bt_page_size;
+ u32 i;
+
+ if (mtt->mtt_type == MTT_TYPE_WQE)
+ bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
+ else
+ bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
/* All MTTs must fit in the same page */
- if (start_index / (PAGE_SIZE / sizeof(u64)) !=
- (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
+ if (start_index / (bt_page_size / sizeof(u64)) !=
+ (start_index + npages - 1) / (bt_page_size / sizeof(u64)))
return -EINVAL;
if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
return -EINVAL;
- mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
+ if (mtt->mtt_type == MTT_TYPE_WQE)
+ table = &hr_dev->mr_table.mtt_table;
+ else
+ table = &hr_dev->mr_table.mtt_cqe_table;
+
+ mtts = hns_roce_table_find(hr_dev, table,
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
&dma_handle);
if (!mtts)
return -ENOMEM;
/* Save page addr, low 12 bits : 0 */
- for (i = 0; i < npages; ++i)
- mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
+ for (i = 0; i < npages; ++i) {
+ if (!hr_dev->caps.mtt_hop_num)
+ mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
+ else
+ mtts[i] = cpu_to_le64(page_list[i]);
+ }
return 0;
}
@@ -377,12 +752,18 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
{
int chunk;
int ret;
+ u32 bt_page_size;
if (mtt->order < 0)
return -EINVAL;
+ if (mtt->mtt_type == MTT_TYPE_WQE)
+ bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
+ else
+ bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
+
while (npages > 0) {
- chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
+ chunk = min_t(int, bt_page_size / sizeof(u64), npages);
ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
page_list);
@@ -400,9 +781,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
{
- u32 i = 0;
- int ret = 0;
- u64 *page_list = NULL;
+ u64 *page_list;
+ int ret;
+ u32 i;
page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
@@ -425,7 +806,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- int ret = 0;
+ int ret;
ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
hr_dev->caps.num_mtpts,
@@ -439,8 +820,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
if (ret)
goto err_buddy;
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
+ ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
+ ilog2(hr_dev->caps.num_cqe_segs));
+ if (ret)
+ goto err_buddy_cqe;
+ }
return 0;
+err_buddy_cqe:
+ hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+
err_buddy:
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
return ret;
@@ -451,13 +841,15 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
+ if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
+ hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
}
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
{
- int ret = 0;
- struct hns_roce_mr *mr = NULL;
+ struct hns_roce_mr *mr;
+ int ret;
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
if (mr == NULL)
@@ -489,25 +881,44 @@ err_free:
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct ib_umem *umem)
{
+ struct device *dev = hr_dev->dev;
struct scatterlist *sg;
+ unsigned int order;
int i, k, entry;
+ int npage = 0;
int ret = 0;
+ int len;
+ u64 page_addr;
u64 *pages;
+ u32 bt_page_size;
u32 n;
- int len;
- pages = (u64 *) __get_free_page(GFP_KERNEL);
+ order = mtt->mtt_type == MTT_TYPE_WQE ? hr_dev->caps.mtt_ba_pg_sz :
+ hr_dev->caps.cqe_ba_pg_sz;
+ bt_page_size = 1 << (order + PAGE_SHIFT);
+
+ pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
if (!pages)
return -ENOMEM;
i = n = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> mtt->page_shift;
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(sg) +
- (k << umem->page_shift);
- if (i == PAGE_SIZE / sizeof(u64)) {
+ page_addr =
+ sg_dma_address(sg) + (k << umem->page_shift);
+ if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
+ if (page_addr & ((1 << mtt->page_shift) - 1)) {
+ dev_err(dev, "page_addr 0x%llx is not page_shift %d alignment!\n",
+ page_addr, mtt->page_shift);
+ ret = -EINVAL;
+ goto out;
+ }
+ pages[i++] = page_addr;
+ }
+ npage++;
+ if (i == bt_page_size / sizeof(u64)) {
ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
pages);
if (ret)
@@ -526,16 +937,44 @@ out:
return ret;
}
-static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
+static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr,
struct ib_umem *umem)
{
- int i = 0;
- int entry;
struct scatterlist *sg;
+ int i = 0, j = 0, k;
+ int entry;
+ int len;
+ u64 page_addr;
+ u32 pbl_bt_sz;
+
+ if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
+ return 0;
+ pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
- i++;
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (k = 0; k < len; ++k) {
+ page_addr = sg_dma_address(sg) +
+ (k << umem->page_shift);
+
+ if (!hr_dev->caps.pbl_hop_num) {
+ mr->pbl_buf[i++] = page_addr >> 12;
+ } else if (hr_dev->caps.pbl_hop_num == 1) {
+ mr->pbl_buf[i++] = page_addr;
+ } else {
+ if (hr_dev->caps.pbl_hop_num == 2)
+ mr->pbl_bt_l1[i][j] = page_addr;
+ else if (hr_dev->caps.pbl_hop_num == 3)
+ mr->pbl_bt_l2[i][j] = page_addr;
+
+ j++;
+ if (j >= (pbl_bt_sz / 8)) {
+ i++;
+ j = 0;
+ }
+ }
+ }
}
/* Memory barrier */
@@ -549,10 +988,12 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_mr *mr = NULL;
- int ret = 0;
- int n = 0;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_mr *mr;
+ int bt_size;
+ int ret;
+ int n;
+ int i;
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
@@ -573,11 +1014,27 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_umem;
}
- if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
- dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
- length);
- ret = -EINVAL;
- goto err_umem;
+ if (!hr_dev->caps.pbl_hop_num) {
+ if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
+ dev_err(dev,
+ " MR len %lld err. MR is limited to 4G at most!\n",
+ length);
+ ret = -EINVAL;
+ goto err_umem;
+ }
+ } else {
+ int pbl_size = 1;
+
+ bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8;
+ for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
+ pbl_size *= bt_size;
+ if (n > pbl_size) {
+ dev_err(dev,
+ " MR len %lld err. MR page num is limited to %d!\n",
+ length, pbl_size);
+ ret = -EINVAL;
+ goto err_umem;
+ }
}
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
@@ -585,7 +1042,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (ret)
goto err_umem;
- ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
+ ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
if (ret)
goto err_mr;
@@ -608,6 +1065,129 @@ err_free:
return ERR_PTR(ret);
}
+int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = hr_dev->dev;
+ unsigned long mtpt_idx;
+ u32 pdn = 0;
+ int npages;
+ int ret;
+
+ if (!mr->enabled)
+ return -EINVAL;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
+ HNS_ROCE_CMD_QUERY_MPT,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret)
+ goto free_cmd_mbox;
+
+ ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
+ if (ret)
+ dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
+
+ mr->enabled = 0;
+
+ if (flags & IB_MR_REREG_PD)
+ pdn = to_hr_pd(pd)->pdn;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+
+ if (hr_dev->caps.pbl_hop_num)
+ hns_roce_mhop_free(hr_dev, mr);
+ else
+ dma_free_coherent(dev, npages * 8, mr->pbl_buf,
+ mr->pbl_dma_addr);
+ }
+ ib_umem_release(mr->umem);
+
+ mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(mr->umem)) {
+ ret = PTR_ERR(mr->umem);
+ mr->umem = NULL;
+ goto free_cmd_mbox;
+ }
+ npages = ib_umem_page_count(mr->umem);
+
+ if (hr_dev->caps.pbl_hop_num) {
+ ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
+ if (ret)
+ goto release_umem;
+ } else {
+ mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
+ &(mr->pbl_dma_addr),
+ GFP_KERNEL);
+ if (!mr->pbl_buf) {
+ ret = -ENOMEM;
+ goto release_umem;
+ }
+ }
+ }
+
+ ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
+ mr_access_flags, virt_addr,
+ length, mailbox->buf);
+ if (ret) {
+ if (flags & IB_MR_REREG_TRANS)
+ goto release_umem;
+ else
+ goto free_cmd_mbox;
+ }
+
+ if (flags & IB_MR_REREG_TRANS) {
+ ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
+ if (ret) {
+ if (mr->size != ~0ULL) {
+ npages = ib_umem_page_count(mr->umem);
+
+ if (hr_dev->caps.pbl_hop_num)
+ hns_roce_mhop_free(hr_dev, mr);
+ else
+ dma_free_coherent(dev, npages * 8,
+ mr->pbl_buf,
+ mr->pbl_dma_addr);
+ }
+
+ goto release_umem;
+ }
+ }
+
+ ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
+ if (ret) {
+ dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
+ goto release_umem;
+ }
+
+ mr->enabled = 1;
+ if (flags & IB_MR_REREG_ACCESS)
+ mr->access = mr_access_flags;
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+release_umem:
+ ib_umem_release(mr->umem);
+
+free_cmd_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
int hns_roce_dereg_mr(struct ib_mr *ibmr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index a64500fa1145..bdab2188c04a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -31,6 +31,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/pci.h>
#include "hns_roce_device.h"
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
@@ -60,7 +61,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd;
int ret;
@@ -86,6 +87,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
return &pd->ibpd;
}
+EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
int hns_roce_dealloc_pd(struct ib_pd *pd)
{
@@ -94,6 +96,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd)
return 0;
}
+EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd);
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
@@ -109,12 +112,17 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
uar->index = (uar->index - 1) %
(hr_dev->caps.phy_num_uars - 1) + 1;
- res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
- return -EINVAL;
+ if (!dev_is_pci(hr_dev->dev)) {
+ res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
+ uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
+ } else {
+ uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
+ >> PAGE_SHIFT);
}
- uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index f5dd21c2d275..49586ec8126a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -44,7 +44,7 @@
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_qp *qp;
spin_lock(&qp_table->lock);
@@ -136,6 +136,7 @@ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
return HNS_ROCE_QP_NUM_STATE;
}
}
+EXPORT_SYMBOL_GPL(to_hns_roce_state);
static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
struct hns_roce_qp *hr_qp)
@@ -153,7 +154,7 @@ static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
- dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
+ dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
goto err_put_irrl;
}
@@ -171,7 +172,7 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int ret;
if (!qpn)
@@ -193,13 +194,23 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
goto err_put_qp;
}
+ if (hr_dev->caps.trrl_entry_sz) {
+ /* Alloc memory for TRRL */
+ ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
+ if (ret) {
+ dev_err(dev, "TRRL table get failed\n");
+ goto err_put_irrl;
+ }
+ }
+
spin_lock_irq(&qp_table->lock);
ret = radix_tree_insert(&hr_dev->qp_table_tree,
hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
spin_unlock_irq(&qp_table->lock);
if (ret) {
dev_err(dev, "QPC radix_tree_insert failed\n");
- goto err_put_irrl;
+ goto err_put_trrl;
}
atomic_set(&hr_qp->refcount, 1);
@@ -207,6 +218,10 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
return 0;
+err_put_trrl:
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
+
err_put_irrl:
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
@@ -227,6 +242,7 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
hr_qp->qpn & (hr_dev->caps.num_qps - 1));
spin_unlock_irqrestore(&qp_table->lock, flags);
}
+EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
@@ -237,10 +253,14 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
wait_for_completion(&hr_qp->free);
if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
}
}
+EXPORT_SYMBOL_GPL(hns_roce_qp_free);
void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
int cnt)
@@ -252,13 +272,14 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
}
+EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, int is_user, int has_srq,
struct hns_roce_qp *hr_qp)
{
+ struct device *dev = hr_dev->dev;
u32 max_cnt;
- struct device *dev = &hr_dev->pdev->dev;
/* Check the validity of QP support capacity */
if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
@@ -282,20 +303,27 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
return -EINVAL;
}
- /* In v1 engine, parameter verification procession */
- max_cnt = cap->max_recv_wr > HNS_ROCE_MIN_WQE_NUM ?
- cap->max_recv_wr : HNS_ROCE_MIN_WQE_NUM;
+ if (hr_dev->caps.min_wqes)
+ max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
+ else
+ max_cnt = cap->max_recv_wr;
+
hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
- dev_err(dev, "hns_roce_set_rq_size rq.wqe_cnt too large\n");
+ dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
return -EINVAL;
}
max_cnt = max(1U, cap->max_recv_sge);
hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
- /* WQE is fixed for 64B */
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
+ if (hr_dev->caps.max_rq_sg <= 2)
+ hr_qp->rq.wqe_shift =
+ ilog2(hr_dev->caps.max_rq_desc_sz);
+ else
+ hr_qp->rq.wqe_shift =
+ ilog2(hr_dev->caps.max_rq_desc_sz
+ * hr_qp->rq.max_gs);
}
cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
@@ -305,32 +333,79 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
}
static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
+ struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp,
struct hns_roce_ib_create_qp *ucmd)
{
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride);
+ u32 page_size;
+ u32 max_cnt;
/* Sanity check SQ size before proceeding */
if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
ucmd->log_sq_stride > max_sq_stride ||
ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
- dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
+ dev_err(hr_dev->dev, "check SQ size error!\n");
+ return -EINVAL;
+ }
+
+ if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
+ dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
+ cap->max_send_sge);
return -EINVAL;
}
hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+ max_cnt = max(1U, cap->max_send_sge);
+ if (hr_dev->caps.max_sq_sg <= 2)
+ hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+ else
+ hr_qp->sq.max_gs = max_cnt;
+
+ if (hr_qp->sq.max_gs > 2)
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ (hr_qp->sq.max_gs - 2));
+ hr_qp->sge.sge_shift = 4;
+
/* Get buf size, SQ and RQ are aligned to page_szie */
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ if (hr_dev->caps.max_sq_sg <= 2) {
+ hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
- hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.offset = 0;
+ hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift), PAGE_SIZE);
+ } else {
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
+ hr_qp->rq.wqe_shift), page_size) +
+ HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ hr_qp->sge.sge_shift), page_size) +
+ HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift), page_size);
+
+ hr_qp->sq.offset = 0;
+ if (hr_qp->sge.sge_cnt) {
+ hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
+ (hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift),
+ page_size);
+ hr_qp->rq.offset = hr_qp->sge.offset +
+ HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
+ hr_qp->sge.sge_shift),
+ page_size);
+ } else {
+ hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
+ (hr_qp->sq.wqe_cnt <<
+ hr_qp->sq.wqe_shift),
+ page_size);
+ }
+ }
return 0;
}
@@ -339,13 +414,15 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
struct hns_roce_qp *hr_qp)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
+ u32 page_size;
u32 max_cnt;
+ int size;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
cap->max_inline_data > hr_dev->caps.max_sq_inline) {
- dev_err(dev, "hns_roce_set_kernel_sq_size error1\n");
+ dev_err(dev, "SQ WR or sge or inline data error!\n");
return -EINVAL;
}
@@ -353,27 +430,46 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq_max_wqes_per_wr = 1;
hr_qp->sq_spare_wqes = 0;
- /* In v1 engine, parameter verification procession */
- max_cnt = cap->max_send_wr > HNS_ROCE_MIN_WQE_NUM ?
- cap->max_send_wr : HNS_ROCE_MIN_WQE_NUM;
+ if (hr_dev->caps.min_wqes)
+ max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
+ else
+ max_cnt = cap->max_send_wr;
+
hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
- dev_err(dev, "hns_roce_set_kernel_sq_size sq.wqe_cnt too large\n");
+ dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
return -EINVAL;
}
/* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge);
- hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+ if (hr_dev->caps.max_sq_sg <= 2)
+ hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
+ else
+ hr_qp->sq.max_gs = max_cnt;
- /* Get buf size, SQ and RQ are aligned to page_szie */
- hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
- hr_qp->rq.wqe_shift), PAGE_SIZE) +
- HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
+ if (hr_qp->sq.max_gs > 2) {
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ (hr_qp->sq.max_gs - 2));
+ hr_qp->sge.sge_shift = 4;
+ }
+
+ /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
- hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
- hr_qp->sq.wqe_shift), PAGE_SIZE);
+ size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
+ page_size);
+
+ if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
+ hr_qp->sge.offset = size;
+ size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
+ hr_qp->sge.sge_shift, page_size);
+ }
+
+ hr_qp->rq.offset = size;
+ size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
+ page_size);
+ hr_qp->buff_size = size;
/* Get wr and sge number which send */
cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
@@ -391,10 +487,12 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_udata *udata, unsigned long sqpn,
struct hns_roce_qp *hr_qp)
{
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_qp ucmd;
unsigned long qpn = 0;
int ret = 0;
+ u32 page_shift;
+ u32 npages;
mutex_init(&hr_qp->mutex);
spin_lock_init(&hr_qp->sq.lock);
@@ -421,7 +519,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
- ret = hns_roce_set_user_sq_size(hr_dev, hr_qp, &ucmd);
+ ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
+ &ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
goto err_out;
@@ -436,8 +535,21 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
- ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
- hr_qp->umem->page_shift, &hr_qp->mtt);
+ hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
+ if (hr_dev->caps.mtt_buf_pg_sz) {
+ npages = (ib_umem_page_count(hr_qp->umem) +
+ (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
+ (1 << hr_dev->caps.mtt_buf_pg_sz);
+ page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ ret = hns_roce_mtt_init(hr_dev, npages,
+ page_shift,
+ &hr_qp->mtt);
+ } else {
+ ret = hns_roce_mtt_init(hr_dev,
+ ib_umem_page_count(hr_qp->umem),
+ hr_qp->umem->page_shift,
+ &hr_qp->mtt);
+ }
if (ret) {
dev_err(dev, "hns_roce_mtt_init error for create qp\n");
goto err_buf;
@@ -472,20 +584,22 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
/* QP doorbell register address */
- hr_qp->sq.db_reg_l = hr_dev->reg_base + ROCEE_DB_SQ_L_0_REG +
+ hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
- hr_qp->rq.db_reg_l = hr_dev->reg_base +
- ROCEE_DB_OTHERS_L_0_REG +
+ hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
/* Allocate QP buf */
- if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, PAGE_SIZE * 2,
- &hr_qp->hr_buf)) {
+ page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
+ if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
+ (1 << page_shift) * 2,
+ &hr_qp->hr_buf, page_shift)) {
dev_err(dev, "hns_roce_buf_alloc error!\n");
ret = -ENOMEM;
goto err_out;
}
+ hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
/* Write MTT */
ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
hr_qp->hr_buf.page_shift, &hr_qp->mtt);
@@ -522,7 +636,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
}
- if ((init_attr->qp_type) == IB_QPT_GSI) {
+ if (init_attr->qp_type == IB_QPT_GSI &&
+ hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+ /* In v1 engine, GSI QP context in RoCE engine's register */
ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
if (ret) {
dev_err(dev, "hns_roce_qp_alloc failed!\n");
@@ -571,7 +687,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
struct hns_roce_sqp *hr_sqp;
struct hns_roce_qp *hr_qp;
int ret;
@@ -629,6 +745,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return &hr_qp->ibqp;
}
+EXPORT_SYMBOL_GPL(hns_roce_create_qp);
int to_hr_qp_type(int qp_type)
{
@@ -647,6 +764,7 @@ int to_hr_qp_type(int qp_type)
return transport_type;
}
+EXPORT_SYMBOL_GPL(to_hr_qp_type);
int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
@@ -654,7 +772,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
- struct device *dev = &hr_dev->pdev->dev;
+ struct device *dev = hr_dev->dev;
int ret = -EINVAL;
int p;
enum ib_mtu active_mtu;
@@ -692,7 +810,10 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
- if (attr->path_mtu > IB_MTU_2048 ||
+ if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
+ attr->path_mtu > IB_MTU_4096) ||
+ (hr_dev->caps.max_mtu == IB_MTU_2048 &&
+ attr->path_mtu > IB_MTU_2048) ||
attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
@@ -716,9 +837,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
- ret = -EPERM;
- dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
- new_state);
+ ret = 0;
goto out;
}
@@ -745,6 +864,7 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
+EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
@@ -761,6 +881,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
spin_unlock_irq(&recv_cq->lock);
}
}
+EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
__be32 send_ieth(struct ib_send_wr *wr)
{
@@ -774,6 +895,7 @@ __be32 send_ieth(struct ib_send_wr *wr)
return 0;
}
}
+EXPORT_SYMBOL_GPL(send_ieth);
static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
{
@@ -785,11 +907,20 @@ void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
{
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
}
+EXPORT_SYMBOL_GPL(get_recv_wqe);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
{
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
}
+EXPORT_SYMBOL_GPL(get_send_wqe);
+
+void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
+{
+ return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
+ (n << hr_qp->sge.sge_shift));
+}
+EXPORT_SYMBOL_GPL(get_send_extend_sge);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq)
@@ -808,6 +939,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
return cur + nreq >= hr_wq->max_post;
}
+EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
@@ -823,7 +955,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
hr_dev->caps.num_qps - 1, SQP_NUM,
reserved_from_top);
if (ret) {
- dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
+ dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
ret);
return ret;
}
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
index 6e7d27a14061..f6d20ba88c03 100644
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E
+ depends on PCI
select GENERIC_ALLOCATOR
---help---
Intel(R) Ethernet X722 iWARP Driver
diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile
index 90068c03d217..5a8a7a3f28ae 100644
--- a/drivers/infiniband/hw/i40iw/Makefile
+++ b/drivers/infiniband/hw/i40iw/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/net/ethernet/intel/i40e
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index a65e4cbdce2f..4ae9131b6350 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -119,9 +119,6 @@
#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3
#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
-#define I40IW_MTU_TO_MSS 40
-#define I40IW_DEFAULT_MSS 1460
-
struct i40iw_cqp_compl_info {
u32 op_ret_val;
u16 maj_err_code;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 5230dd3c938c..493d6ef3d2d5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1188,7 +1188,7 @@ static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node
* i40iw_cm_timer_tick - system's timer expired callback
* @pass: Pointing to cm_core
*/
-static void i40iw_cm_timer_tick(unsigned long pass)
+static void i40iw_cm_timer_tick(struct timer_list *t)
{
unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
struct i40iw_cm_node *cm_node;
@@ -1196,10 +1196,9 @@ static void i40iw_cm_timer_tick(unsigned long pass)
struct list_head *list_core_temp;
struct i40iw_sc_vsi *vsi;
struct list_head *list_node;
- struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
+ struct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
u32 settimer = 0;
unsigned long timetosend;
- struct i40iw_sc_dev *dev;
unsigned long flags;
struct list_head timer_list;
@@ -1267,13 +1266,15 @@ static void i40iw_cm_timer_tick(unsigned long pass)
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
goto done;
}
- cm_node->cm_core->stats_pkt_retrans++;
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
vsi = &cm_node->iwdev->vsi;
- dev = cm_node->dev;
- atomic_inc(&send_entry->sqbuf->refcount);
- i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+
+ if (!cm_node->ack_rcvd) {
+ atomic_inc(&send_entry->sqbuf->refcount);
+ i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+ cm_node->cm_core->stats_pkt_retrans++;
+ }
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
if (send_entry->send_retrans) {
send_entry->retranscount--;
@@ -1524,8 +1525,8 @@ static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool acti
break;
}
}
- if (!ret)
- clear_bit(port, cm_core->active_side_ports);
+ if (!ret)
+ clear_bit(port, cm_core->active_side_ports);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
} else {
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
@@ -2181,6 +2182,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
cm_node->cm_id = cm_info->cm_id;
ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
spin_lock_init(&cm_node->retrans_list_lock);
+ cm_node->ack_rcvd = false;
atomic_set(&cm_node->ref_count, 1);
/* associate our parent CM core */
@@ -2191,7 +2193,8 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
ts = current_kernel_time();
cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
- cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
+ cm_node->tcp_cntxt.mss = (cm_node->ipv4) ? (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4) :
+ (iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6);
cm_node->iwdev = iwdev;
cm_node->dev = &iwdev->sc_dev;
@@ -2406,6 +2409,7 @@ static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
case I40IW_CM_STATE_FIN_WAIT1:
case I40IW_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
+ /* fall through */
case I40IW_CM_STATE_TIME_WAIT:
cm_node->state = I40IW_CM_STATE_CLOSED;
i40iw_rem_ref_cm_node(cm_node);
@@ -2719,7 +2723,10 @@ static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ cm_node->ack_rcvd = false;
i40iw_handle_rcv_mpa(cm_node, rbuf);
+ } else {
+ cm_node->ack_rcvd = true;
}
break;
case I40IW_CM_STATE_LISTENING:
@@ -3195,8 +3202,7 @@ void i40iw_setup_cm_core(struct i40iw_device *iwdev)
INIT_LIST_HEAD(&cm_core->connected_nodes);
INIT_LIST_HEAD(&cm_core->listen_nodes);
- setup_timer(&cm_core->tcp_timer, i40iw_cm_timer_tick,
- (unsigned long)cm_core);
+ timer_setup(&cm_core->tcp_timer, i40iw_cm_timer_tick, 0);
spin_lock_init(&cm_core->ht_lock);
spin_lock_init(&cm_core->listen_list_lock);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 45abef76295b..0d5840d2c4fc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -360,6 +360,7 @@ struct i40iw_cm_node {
u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
struct i40iw_kmem_info mpa_hdr;
+ bool ack_rcvd;
};
/* structure for client or CM to fill when making CM api calls. */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 42ca5346777d..d88c6cf47cf2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -348,7 +348,10 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
u16 qs_handle;
int i;
- vsi->mss = l2params->mss;
+ if (vsi->mtu != l2params->mtu) {
+ vsi->mtu = l2params->mtu;
+ i40iw_reinitialize_ieq(dev);
+ }
i40iw_fill_qos_list(l2params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
@@ -374,7 +377,7 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
* i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
* @qp: qp to be removed from qos
*/
-static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
+void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
{
struct i40iw_sc_vsi *vsi = qp->vsi;
unsigned long flags;
@@ -479,6 +482,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
+ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */
+
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
"%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
@@ -1774,6 +1781,53 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
+
+ switch (info->ae_id) {
+ case I40IW_AE_PRIV_OPERATION_DENIED:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case I40IW_AE_BAD_CLOSE:
+ case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:
+ case I40IW_AE_STAG_ZERO_INVALID:
+ case I40IW_AE_IB_RREQ_AND_Q1_FULL:
+ case I40IW_AE_WQE_UNEXPECTED_OPCODE:
+ case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case I40IW_AE_DDP_UBE_INVALID_MO:
+ case I40IW_AE_DDP_UBE_INVALID_QN:
+ case I40IW_AE_DDP_NO_L_BIT:
+ case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case I40IW_AE_INVALID_ARP_ENTRY:
+ case I40IW_AE_INVALID_TCP_OPTION_RCVD:
+ case I40IW_AE_STALE_ARP_ENTRY:
+ case I40IW_AE_LLP_CLOSE_COMPLETE:
+ case I40IW_AE_LLP_CONNECTION_RESET:
+ case I40IW_AE_LLP_FIN_RECEIVED:
+ case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+ case I40IW_AE_LLP_SYN_RECEIVED:
+ case I40IW_AE_LLP_TERMINATE_RECEIVED:
+ case I40IW_AE_LLP_TOO_MANY_RETRIES:
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ case I40IW_AE_RESET_SENT:
+ case I40IW_AE_TERMINATE_SENT:
+ case I40IW_AE_RESET_NOT_SENT:
+ case I40IW_AE_LCE_QP_CATASTROPHIC:
+ case I40IW_AE_QP_SUSPEND_COMPLETE:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ ae_src = I40IW_AE_SOURCE_RSVD;
+ break;
+ case I40IW_AE_LCE_CQ_CATASTROPHIC:
+ info->cq = true;
+ info->compl_ctx = LS_64_1(compl_ctx, 1);
+ ae_src = I40IW_AE_SOURCE_RSVD;
+ break;
+ }
+
switch (ae_src) {
case I40IW_AE_SOURCE_RQ:
case I40IW_AE_SOURCE_RQ_0011:
@@ -1807,6 +1861,8 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
info->compl_ctx = compl_ctx;
info->out_rdrsp = true;
break;
+ case I40IW_AE_SOURCE_RSVD:
+ /* fallthrough */
default:
break;
}
@@ -2357,7 +2413,6 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
qp->rcv_tph_en = info->rcv_tph_en;
qp->xmit_tph_en = info->xmit_tph_en;
qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
- qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
return 0;
}
@@ -2399,7 +2454,6 @@ static enum i40iw_status_code i40iw_sc_qp_create(
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
- LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
@@ -2462,7 +2516,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
- LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
@@ -2694,7 +2747,7 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
- LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
+ LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
if (info->iwarp_info_valid) {
qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
@@ -4376,10 +4429,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
(LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
break;
- case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
- i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
- (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
- break;
case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
@@ -4395,7 +4444,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
(LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
break;
case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
- case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
(LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
break;
@@ -4541,7 +4589,8 @@ void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *inf
vsi->dev = info->dev;
vsi->back_vsi = info->back_vsi;
- vsi->mss = info->params->mss;
+ vsi->mtu = info->params->mtu;
+ vsi->exception_lan_queue = info->exception_lan_queue;
i40iw_fill_qos_list(info->params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
@@ -4873,6 +4922,7 @@ enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40
vsi->pestat = info->pestat;
vsi->pestat->hw = vsi->dev->hw;
+ vsi->pestat->vsi = vsi;
if (info->stats_initialize) {
i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
@@ -5018,14 +5068,12 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
u8 db_size;
spin_lock_init(&dev->cqp_lock);
- INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */
i40iw_device_init_uk(&dev->dev_uk);
dev->debug_mask = info->debug_mask;
dev->hmc_fn_id = info->hmc_fn_id;
- dev->exception_lan_queue = info->exception_lan_queue;
dev->is_pf = info->is_pf;
dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 2ebaadbed379..65ec39e3746b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -73,6 +73,10 @@
#define I40IW_FIRST_NON_PF_STAT 4
+#define I40IW_MTU_TO_MSS_IPV4 40
+#define I40IW_MTU_TO_MSS_IPV6 60
+#define I40IW_DEFAULT_MTU 1500
+
#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
#define LS_32_1(val, bits) (u32)(val << bits)
@@ -128,6 +132,7 @@
&_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)] \
)
+#define I40IW_AE_SOURCE_RSVD 0x0
#define I40IW_AE_SOURCE_RQ 0x1
#define I40IW_AE_SOURCE_RQ_0011 0x3
@@ -539,9 +544,6 @@
#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52
#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT)
-#define I40IW_CQPSQ_QP_STATRSRC_SHIFT 53
-#define I40IW_CQPSQ_QP_STATRSRC_MASK (1ULL << I40IW_CQPSQ_QP_STATRSRC_SHIFT)
-
#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54
#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK \
(1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT)
@@ -1105,6 +1107,9 @@
#define I40IWQPC_SNDMSS_SHIFT 16
#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT)
+#define I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT 16
+#define I40IW_UDA_QPC_MAXFRAMESIZE_MASK (0x3fffUL << I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT)
+
#define I40IWQPC_VLANTAG_SHIFT 32
#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
@@ -1296,8 +1301,13 @@
(0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT)
/* wqe size considering 32 bytes per wqe*/
-#define I40IWQP_SW_MIN_WQSIZE 4 /* 128 bytes */
-#define I40IWQP_SW_MAX_WQSIZE 2048 /* 2048 bytes */
+#define I40IW_QP_SW_MIN_WQSIZE 4 /*in WRs*/
+#define I40IW_SQ_RSVD 2
+#define I40IW_RQ_RSVD 1
+#define I40IW_MAX_QUANTAS_PER_WR 2
+#define I40IW_QP_SW_MAX_SQ_QUANTAS 2048
+#define I40IW_QP_SW_MAX_RQ_QUANTAS 16384
+#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTAS / I40IW_MAX_QUANTAS_PER_WR) - 1)
#define I40IWQP_OP_RDMA_WRITE 0
#define I40IWQP_OP_RDMA_READ 1
@@ -1636,7 +1646,8 @@ enum i40iw_alignment {
#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
#define I40IW_AE_AMP_MWBIND_BIND_DISABLED 0x011b
-#define I40IW_AE_AMP_WQE_INVALID_PARAMETER 0x0130
+#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
#define I40IW_AE_BAD_CLOSE 0x0201
#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
#define I40IW_AE_CQ_OPERATION_ERROR 0x0203
@@ -1644,12 +1655,10 @@ enum i40iw_alignment {
#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
#define I40IW_AE_STAG_ZERO_INVALID 0x0206
#define I40IW_AE_IB_RREQ_AND_Q1_FULL 0x0207
-#define I40IW_AE_SRQ_LIMIT 0x0209
#define I40IW_AE_WQE_UNEXPECTED_OPCODE 0x020a
#define I40IW_AE_WQE_INVALID_PARAMETER 0x020b
#define I40IW_AE_WQE_LSMM_TOO_LONG 0x0220
#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
-#define I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID 0x0302
#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
#define I40IW_AE_DDP_UBE_INVALID_MO 0x0305
@@ -1663,12 +1672,10 @@ enum i40iw_alignment {
#define I40IW_AE_INVALID_ARP_ENTRY 0x0401
#define I40IW_AE_INVALID_TCP_OPTION_RCVD 0x0402
#define I40IW_AE_STALE_ARP_ENTRY 0x0403
-#define I40IW_AE_INVALID_WQE_LENGTH 0x0404
#define I40IW_AE_INVALID_MAC_ENTRY 0x0405
#define I40IW_AE_LLP_CLOSE_COMPLETE 0x0501
#define I40IW_AE_LLP_CONNECTION_RESET 0x0502
#define I40IW_AE_LLP_FIN_RECEIVED 0x0503
-#define I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
#define I40IW_AE_LLP_SEGMENT_TOO_LARGE 0x0506
#define I40IW_AE_LLP_SEGMENT_TOO_SMALL 0x0507
@@ -1685,9 +1692,6 @@ enum i40iw_alignment {
#define I40IW_AE_LCE_QP_CATASTROPHIC 0x0700
#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
#define I40IW_AE_LCE_CQ_CATASTROPHIC 0x0702
-#define I40IW_AE_UDA_XMIT_FRAG_SEQ 0x0800
-#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0801
-#define I40IW_AE_UDA_XMIT_IPADDR_MISMATCH 0x0802
#define I40IW_AE_QP_SUSPEND_COMPLETE 0x0900
#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY 1
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 476867a3f584..e96bdafbcbb3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -408,8 +408,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
case I40IW_AE_LCE_CQ_CATASTROPHIC:
case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
ctx_info->err_rq_idx_valid = false;
+ /* fall through */
default:
if (!info->sq && ctx_info->err_rq_idx_valid) {
ctx_info->err_rq_idx = info->wqe_idx;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 27590ae21881..e824296713e2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -353,6 +353,8 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
i40iw_destroy_ceq(iwdev, iwceq);
}
+
+ iwdev->sc_dev.ceq_valid = false;
}
/**
@@ -810,17 +812,16 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
iwdev->ceqs_count++;
}
-
exit:
- if (status) {
- if (!iwdev->ceqs_count) {
- kfree(iwdev->ceqlist);
- iwdev->ceqlist = NULL;
- } else {
- status = 0;
- }
+ if (status && !iwdev->ceqs_count) {
+ kfree(iwdev->ceqlist);
+ iwdev->ceqlist = NULL;
+ return status;
+ } else {
+ iwdev->sc_dev.ceq_valid = true;
+ return 0;
}
- return status;
+
}
/**
@@ -958,13 +959,13 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
memset(&info, 0, sizeof(info));
info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
info.cq_id = 2;
- info.qp_id = iwdev->sc_dev.exception_lan_queue;
+ info.qp_id = iwdev->vsi.exception_lan_queue;
info.count = 1;
info.pd_id = 2;
info.sq_size = 8192;
info.rq_size = 8192;
- info.buf_size = 2048;
- info.tx_buf_cnt = 16384;
+ info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;
+ info.tx_buf_cnt = 4096;
status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
if (status)
i40iw_pr_err("ieq create fail\n");
@@ -972,6 +973,21 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
}
/**
+ * i40iw_reinitialize_ieq - destroy and re-create ieq
+ * @dev: iwarp device
+ */
+void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);
+ if (i40iw_initialize_ieq(iwdev)) {
+ iwdev->reset = true;
+ i40iw_request_reset(iwdev);
+ }
+}
+
+/**
* i40iw_hmc_setup - create hmc objects for the device
* @iwdev: iwarp device
*
@@ -1327,8 +1343,8 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
info.bar0 = ldev->hw_addr;
info.hw = &iwdev->hw;
info.debug_mask = debug;
- l2params.mss =
- (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
+ l2params.mtu =
+ (ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;
for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
qset = ldev->params.qos.prio_qos[i].qs_handle;
l2params.qs_handle_list[i] = qset;
@@ -1338,7 +1354,6 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
iwdev->dcb = true;
}
i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
- info.exception_lan_queue = 1;
info.vchnl_send = i40iw_virtchnl_send;
status = i40iw_device_init(&iwdev->sc_dev, &info);
@@ -1348,6 +1363,7 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
vsi_info.dev = &iwdev->sc_dev;
vsi_info.back_vsi = (void *)iwdev;
vsi_info.params = &l2params;
+ vsi_info.exception_lan_queue = 1;
i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
if (dev->is_pf) {
@@ -1748,7 +1764,7 @@ static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *cli
for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
- l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
+ l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;
INIT_WORK(&work->work, i40iw_l2params_worker);
queue_work(iwdev->param_wq, &work->work);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index 5498ad01c280..11d3a2a72100 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -86,7 +86,7 @@ void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *inf
void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
-
+void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp);
void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
@@ -123,5 +123,6 @@ enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
struct i40iw_virt_mem *mem);
u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);
+void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 59f70676f0e0..796a815b53fd 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -488,7 +488,7 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
- set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
+ set_64bit_val(qp_ctx, 48, LS_64(rsrc->buf_size, I40IW_UDA_QPC_MAXFRAMESIZE));
set_64bit_val(qp_ctx, 56, 0);
set_64bit_val(qp_ctx, 64, 1);
@@ -611,12 +611,14 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
qp->user_pri = 0;
i40iw_qp_add_qos(qp);
i40iw_puda_qp_setctx(rsrc);
- if (rsrc->ceq_valid)
+ if (rsrc->dev->ceq_valid)
ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
else
ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
- if (ret)
+ if (ret) {
+ i40iw_qp_rem_qos(qp);
i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
+ }
return ret;
}
@@ -704,7 +706,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
if (ret)
goto error;
- if (rsrc->ceq_valid)
+ if (rsrc->dev->ceq_valid)
ret = i40iw_cqp_cq_create_cmd(dev, cq);
else
ret = i40iw_puda_cq_wqe(dev, cq);
@@ -724,7 +726,7 @@ static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
struct i40iw_ccq_cqe_info compl_info;
struct i40iw_sc_dev *dev = rsrc->dev;
- if (rsrc->ceq_valid) {
+ if (rsrc->dev->ceq_valid) {
i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
return;
}
@@ -757,7 +759,7 @@ static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
struct i40iw_ccq_cqe_info compl_info;
struct i40iw_sc_dev *dev = rsrc->dev;
- if (rsrc->ceq_valid) {
+ if (rsrc->dev->ceq_valid) {
i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
return;
}
@@ -813,6 +815,7 @@ void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
switch (rsrc->completion) {
case PUDA_HASH_CRC_COMPLETE:
i40iw_free_hash_desc(rsrc->hash_desc);
+ /* fall through */
case PUDA_QP_CREATED:
if (!reset)
i40iw_puda_free_qp(rsrc);
@@ -921,7 +924,6 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
rsrc->xmit_complete = i40iw_ieq_tx_compl;
}
- rsrc->ceq_valid = info->ceq_valid;
rsrc->type = info->type;
rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
@@ -1400,7 +1402,8 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
pfpdu->rcv_nxt = fps;
pfpdu->fps = fps;
pfpdu->mode = true;
- pfpdu->max_fpdu_data = ieq->vsi->mss;
+ pfpdu->max_fpdu_data = (buf->ipv4) ? (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV4) :
+ (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV6);
pfpdu->pmode_count++;
INIT_LIST_HEAD(rxlist);
i40iw_ieq_check_first_buf(buf, fps);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index dba05ce7d392..660aa3edae56 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -100,7 +100,6 @@ struct i40iw_puda_rsrc_info {
enum puda_resource_type type; /* ILQ or IEQ */
u32 count;
u16 pd_id;
- bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
@@ -125,7 +124,6 @@ struct i40iw_puda_rsrc {
enum puda_resource_type type;
u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
u16 mss;
- bool ceq_valid;
u32 cq_id;
u32 qp_id;
u32 sq_size;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 63118f6d5ab4..a27d392c92a2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -250,6 +250,7 @@ struct i40iw_vsi_pestat {
struct i40iw_dev_hw_stats last_read_hw_stats;
struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
struct timer_list stats_timer;
+ struct i40iw_sc_vsi *vsi;
spinlock_t lock; /* rdma stats lock */
};
@@ -380,7 +381,6 @@ struct i40iw_sc_qp {
u8 *q2_buf;
u64 qp_compl_ctx;
u16 qs_handle;
- u16 exception_lan_queue;
u16 push_idx;
u8 sq_tph_val;
u8 rq_tph_val;
@@ -459,7 +459,8 @@ struct i40iw_sc_vsi {
u32 ieq_count;
struct i40iw_virt_mem ieq_mem;
struct i40iw_puda_rsrc *ieq;
- u16 mss;
+ u16 exception_lan_queue;
+ u16 mtu;
u8 fcn_id;
bool stats_fcn_id_alloc;
struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
@@ -501,10 +502,10 @@ struct i40iw_sc_dev {
struct i40iw_hmc_fpm_misc hmc_fpm_misc;
u32 debug_mask;
- u16 exception_lan_queue;
u8 hmc_fn_id;
bool is_pf;
bool vchnl_up;
+ bool ceq_valid;
u8 vf_id;
wait_queue_head_t vf_reqs;
u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY];
@@ -534,7 +535,6 @@ struct i40iw_create_qp_info {
bool ord_valid;
bool tcp_ctx_valid;
bool cq_num_valid;
- bool static_rsrc;
bool arp_cache_idx_valid;
};
@@ -546,7 +546,6 @@ struct i40iw_modify_qp_info {
bool ord_valid;
bool tcp_ctx_valid;
bool cq_num_valid;
- bool static_rsrc;
bool arp_cache_idx_valid;
bool reset_tcp_conn;
bool remove_hash_idx;
@@ -568,13 +567,14 @@ struct i40iw_ccq_cqe_info {
struct i40iw_l2params {
u16 qs_handle_list[I40IW_MAX_USER_PRIORITY];
- u16 mss;
+ u16 mtu;
};
struct i40iw_vsi_init_info {
struct i40iw_sc_dev *dev;
void *back_vsi;
struct i40iw_l2params *params;
+ u16 exception_lan_queue;
};
struct i40iw_vsi_stats_info {
@@ -592,7 +592,6 @@ struct i40iw_device_init_info {
struct i40iw_hw *hw;
void __iomem *bar0;
enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
- u16 exception_lan_queue;
u8 hmc_fn_id;
bool is_pf;
u32 debug_mask;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 0aadb7a0d1aa..3ec5389a81a1 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -821,6 +821,18 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
pring = &qp->rq_ring;
} else {
+ if (qp->first_sq_wq) {
+ qp->first_sq_wq = false;
+ if (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) {
+ I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ I40IW_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+ memset(info, 0, sizeof(struct i40iw_cq_poll_info));
+ return i40iw_cq_poll_completion(cq, info);
+ }
+ }
+
if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
@@ -882,8 +894,21 @@ exit:
}
/**
+ * i40iw_qp_roundup - return round up QP WQ depth
+ * @wqdepth: WQ depth in quantas to round up
+ */
+static int i40iw_qp_round_up(u32 wqdepth)
+{
+ int scount = 1;
+
+ for (wqdepth--; scount <= 16; scount *= 2)
+ wqdepth |= wqdepth >> scount;
+
+ return ++wqdepth;
+}
+
+/**
* i40iw_get_wqe_shift - get shift count for maximum wqe size
- * @wqdepth: depth of wq required.
* @sge: Maximum Scatter Gather Elements wqe
* @inline_data: Maximum inline data size
* @shift: Returns the shift needed based on sge
@@ -893,22 +918,48 @@ exit:
* For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
* Shift of 2 otherwise (wqe size of 128 bytes).
*/
-enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift)
+void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)
{
- u32 size;
-
*shift = 0;
if (sge > 1 || inline_data > 16)
*shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
+}
- /* check if wqdepth is multiple of 2 or not */
+/*
+ * i40iw_get_sqdepth - get SQ depth (quantas)
+ * @sq_size: SQ size
+ * @shift: shift which determines size of WQE
+ * @sqdepth: depth of SQ
+ *
+ */
+enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
+{
+ *sqdepth = i40iw_qp_round_up((sq_size << shift) + I40IW_SQ_RSVD);
- if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1)))
+ if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
+ *sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
+ else if (*sqdepth > I40IW_QP_SW_MAX_SQ_QUANTAS)
return I40IW_ERR_INVALID_SIZE;
- size = wqdepth << *shift; /* multiple of 32 bytes count */
- if (size > I40IWQP_SW_MAX_WQSIZE)
+ return 0;
+}
+
+/*
+ * i40iw_get_rq_depth - get RQ depth (quantas)
+ * @rq_size: RQ size
+ * @shift: shift which determines size of WQE
+ * @rqdepth: depth of RQ
+ *
+ */
+enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
+{
+ *rqdepth = i40iw_qp_round_up((rq_size << shift) + I40IW_RQ_RSVD);
+
+ if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
+ *rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
+ else if (*rqdepth > I40IW_QP_SW_MAX_RQ_QUANTAS)
return I40IW_ERR_INVALID_SIZE;
+
return 0;
}
@@ -962,9 +1013,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
return I40IW_ERR_INVALID_FRAG_COUNT;
- ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
- if (ret_code)
- return ret_code;
+ i40iw_get_wqe_shift(info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
qp->sq_base = info->sq;
qp->rq_base = info->rq;
@@ -988,6 +1037,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
I40IW_RING_MOVE_TAIL(qp->sq_ring);
I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
qp->swqe_polarity = 1;
+ qp->first_sq_wq = true;
qp->swqe_polarity_deferred = 1;
qp->rwqe_polarity = 0;
@@ -997,9 +1047,7 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
switch (info->abi_ver) {
case 4:
- ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
- if (ret_code)
- return ret_code;
+ i40iw_get_wqe_shift(info->max_rq_frag_cnt, 0, &rqshift);
break;
case 5: /* fallthrough until next ABI version */
default:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 84be6f13b9c5..e73efc59a0ab 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -204,18 +204,6 @@ struct i40iw_post_inline_send {
u32 len;
};
-struct i40iw_post_send_w_inv {
- i40iw_sgl sg_list;
- u32 num_sges;
- i40iw_stag remote_stag_to_inv;
-};
-
-struct i40iw_post_inline_send_w_inv {
- void *data;
- u32 len;
- i40iw_stag remote_stag_to_inv;
-};
-
struct i40iw_rdma_write {
i40iw_sgl lo_sg_list;
u32 num_lo_sges;
@@ -257,9 +245,6 @@ struct i40iw_post_sq_info {
bool defer_flag;
union {
struct i40iw_post_send send;
- struct i40iw_post_send send_w_sol;
- struct i40iw_post_send_w_inv send_w_inv;
- struct i40iw_post_send_w_inv send_w_sol_inv;
struct i40iw_rdma_write rdma_write;
struct i40iw_rdma_read rdma_read;
struct i40iw_rdma_read rdma_read_inv;
@@ -267,9 +252,6 @@ struct i40iw_post_sq_info {
struct i40iw_inv_local_stag inv_local_stag;
struct i40iw_inline_rdma_write inline_rdma_write;
struct i40iw_post_inline_send inline_send;
- struct i40iw_post_inline_send inline_send_w_sol;
- struct i40iw_post_inline_send_w_inv inline_send_w_inv;
- struct i40iw_post_inline_send_w_inv inline_send_w_sol_inv;
} op;
};
@@ -376,6 +358,7 @@ struct i40iw_qp_uk {
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
+ bool first_sq_wq;
bool deferred_flag;
};
@@ -442,5 +425,7 @@ enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size);
-enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift);
+void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift);
+enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth);
+enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index e52dbbb4165e..8845dba7c438 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -168,11 +168,16 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
if (netdev != event_netdev)
return NOTIFY_DONE;
- if (upper_dev)
- local_ipaddr = ntohl(
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
- else
+ if (upper_dev) {
+ struct in_device *in;
+
+ rcu_read_lock();
+ in = __in_dev_get_rcu(upper_dev);
+ local_ipaddr = ntohl(in->ifa_list->ifa_address);
+ rcu_read_unlock();
+ } else {
local_ipaddr = ntohl(ifa->ifa_address);
+ }
switch (event) {
case NETDEV_DOWN:
action = I40IW_ARP_DELETE;
@@ -870,9 +875,9 @@ void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
* i40iw_terminate_imeout - timeout happened
* @context: points to iwarp qp
*/
-static void i40iw_terminate_timeout(unsigned long context)
+static void i40iw_terminate_timeout(struct timer_list *t)
{
- struct i40iw_qp *iwqp = (struct i40iw_qp *)context;
+ struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);
struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
i40iw_terminate_done(qp, 1);
@@ -889,8 +894,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
iwqp = (struct i40iw_qp *)qp->back_qp;
i40iw_add_ref(&iwqp->ibqp);
- setup_timer(&iwqp->terminate_timer, i40iw_terminate_timeout,
- (unsigned long)iwqp);
+ timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
iwqp->terminate_timer.expires = jiffies + HZ;
add_timer(&iwqp->terminate_timer);
}
@@ -1445,11 +1449,12 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
* i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
* @vsi: pointer to the vsi structure
*/
-static void i40iw_hw_stats_timeout(unsigned long vsi)
+static void i40iw_hw_stats_timeout(struct timer_list *t)
{
- struct i40iw_sc_vsi *sc_vsi = (struct i40iw_sc_vsi *)vsi;
+ struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,
+ stats_timer);
+ struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi;
struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
- struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
struct i40iw_vsi_pestat *vf_devstat = NULL;
u16 iw_vf_idx;
unsigned long flags;
@@ -1480,8 +1485,7 @@ void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
{
struct i40iw_vsi_pestat *devstat = vsi->pestat;
- setup_timer(&devstat->stats_timer, i40iw_hw_stats_timeout,
- (unsigned long)vsi);
+ timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0);
mod_timer(&devstat->stats_timer,
jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 62be0a41ad0b..3c6f3ce88f89 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -69,7 +69,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
props->max_qp = iwdev->max_qp - iwdev->used_qps;
- props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
+ props->max_qp_wr = I40IW_MAX_QP_WRS;
props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
props->max_cq = iwdev->max_cq - iwdev->used_cqs;
props->max_cqe = iwdev->max_cqe;
@@ -381,22 +381,6 @@ static int i40iw_dealloc_pd(struct ib_pd *ibpd)
}
/**
- * i40iw_qp_roundup - return round up qp ring size
- * @wr_ring_size: ring size to round up
- */
-static int i40iw_qp_roundup(u32 wr_ring_size)
-{
- int scount = 1;
-
- if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
- wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
-
- for (wr_ring_size--; scount <= 16; scount *= 2)
- wr_ring_size |= wr_ring_size >> scount;
- return ++wr_ring_size;
-}
-
-/**
* i40iw_get_pbl - Retrieve pbl from a list given a virtual
* address
* @va: user virtual address
@@ -515,21 +499,19 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
{
struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
u32 sqdepth, rqdepth;
- u32 sq_size, rq_size;
u8 sqshift;
u32 size;
enum i40iw_status_code status;
struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
- sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
- rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
-
- status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
+ i40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
+ status = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth);
if (status)
return -ENOMEM;
- sqdepth = sq_size << sqshift;
- rqdepth = rq_size << I40IW_MAX_RQ_WQE_SHIFT;
+ status = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth);
+ if (status)
+ return -ENOMEM;
size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
@@ -559,8 +541,8 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
- ukinfo->sq_size = sq_size;
- ukinfo->rq_size = rq_size;
+ ukinfo->sq_size = sqdepth >> sqshift;
+ ukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT;
ukinfo->qp_id = iwqp->ibqp.qp_num;
return 0;
}
@@ -2204,6 +2186,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ukqp = &iwqp->sc_qp.qp_uk;
spin_lock_irqsave(&iwqp->lock, flags);
+
+ if (iwqp->flush_issued) {
+ err = -EINVAL;
+ goto out;
+ }
+
while (ib_wr) {
inv_stag = false;
memset(&info, 0, sizeof(info));
@@ -2346,6 +2334,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
ib_wr = ib_wr->next;
}
+out:
if (err)
*bad_wr = ib_wr;
else
@@ -2378,6 +2367,12 @@ static int i40iw_post_recv(struct ib_qp *ibqp,
memset(&post_recv, 0, sizeof(post_recv));
spin_lock_irqsave(&iwqp->lock, flags);
+
+ if (iwqp->flush_issued) {
+ err = -EINVAL;
+ goto out;
+ }
+
while (ib_wr) {
post_recv.num_sges = ib_wr->num_sge;
post_recv.wr_id = ib_wr->wr_id;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 538c46a73248..6dee4fdc5d67 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -92,12 +92,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd,
int ret;
memcpy(&in6, grh->dgid.raw, sizeof(in6));
- if (rdma_is_multicast_addr(&in6)) {
+ if (rdma_is_multicast_addr(&in6))
is_mcast = 1;
- rdma_get_mcast_mac(&in6, ah->av.eth.mac);
- } else {
- memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN);
- }
+
+ memcpy(ah->av.eth.mac, ah_attr->roce.dmac, ETH_ALEN);
ret = ib_get_cached_gid(pd->device, rdma_ah_get_port_num(ah_attr),
grh->sgid_index, &sgid, &gid_attr);
if (ret)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index cab796341697..bf4f14a1b4fc 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -140,14 +140,18 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
{
int err;
int cqe_size = dev->dev->caps.cqe_size;
+ int shift;
+ int n;
*umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
- (*umem)->page_shift, &buf->mtt);
+ n = ib_umem_page_count(*umem);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
+ err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);
+
if (err)
goto err_buf;
@@ -768,11 +772,13 @@ repoll:
switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
case MLX4_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX4_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX4_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX4_OPCODE_SEND:
case MLX4_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c636842c5be0..8c8a16791a3f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -563,6 +563,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_wq_type_rq = props->max_qp;
}
+ props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
+
if (!mlx4_is_slave(dev->dev))
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
@@ -581,6 +584,23 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx4_wqe_data_seg);
}
+ if (uhw->outlen >= resp.response_length + sizeof(resp.rss_caps)) {
+ resp.response_length += sizeof(resp.rss_caps);
+ if (props->rss_caps.supported_qpts) {
+ resp.rss_caps.rx_hash_function =
+ MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
+ resp.rss_caps.rx_hash_fields_mask =
+ MLX4_IB_RX_HASH_SRC_IPV4 |
+ MLX4_IB_RX_HASH_DST_IPV4 |
+ MLX4_IB_RX_HASH_SRC_IPV6 |
+ MLX4_IB_RX_HASH_DST_IPV6 |
+ MLX4_IB_RX_HASH_SRC_PORT_TCP |
+ MLX4_IB_RX_HASH_DST_PORT_TCP |
+ MLX4_IB_RX_HASH_SRC_PORT_UDP |
+ MLX4_IB_RX_HASH_DST_PORT_UDP;
+ }
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
if (err)
@@ -2733,6 +2753,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
+ ibdev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
+
if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
IB_LINK_LAYER_ETHERNET) ||
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 70eb9f917303..81ffc007e0a1 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -944,6 +944,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
switch (sa_mad->mad_hdr.method) {
case IB_MGMT_METHOD_SET:
may_create = 1;
+ /* fall through */
case IB_SA_METHOD_DELETE:
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 1fa19820355a..e14919c15b06 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -47,6 +47,7 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cq.h>
#define MLX4_IB_DRV_NAME "mlx4_ib"
@@ -644,12 +645,18 @@ enum query_device_resp_mask {
QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0,
};
+struct mlx4_ib_rss_caps {
+ __u64 rx_hash_fields_mask; /* enum mlx4_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx4_rx_hash_function_flags */
+ __u8 reserved[7];
+};
+
struct mlx4_uverbs_ex_query_device_resp {
- __u32 comp_mask;
- __u32 response_length;
- __u64 hca_core_clock_offset;
- __u32 max_inl_recv_sz;
- __u32 reserved;
+ __u32 comp_mask;
+ __u32 response_length;
+ __u64 hca_core_clock_offset;
+ __u32 max_inl_recv_sz;
+ struct mlx4_ib_rss_caps rss_caps;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
@@ -929,5 +936,7 @@ struct ib_rwq_ind_table
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
+ int *num_of_mtts);
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index e6f77f63da75..313bfb9ccb71 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -87,50 +87,286 @@ err_free:
return ERR_PTR(err);
}
+enum {
+ MLX4_MAX_MTT_SHIFT = 31
+};
+
+static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev,
+ struct mlx4_mtt *mtt,
+ u64 mtt_size, u64 mtt_shift, u64 len,
+ u64 cur_start_addr, u64 *pages,
+ int *start_index, int *npages)
+{
+ u64 cur_end_addr = cur_start_addr + len;
+ u64 cur_end_addr_aligned = 0;
+ u64 mtt_entries;
+ int err = 0;
+ int k;
+
+ len += (cur_start_addr & (mtt_size - 1ULL));
+ cur_end_addr_aligned = round_up(cur_end_addr, mtt_size);
+ len += (cur_end_addr_aligned - cur_end_addr);
+ if (len & (mtt_size - 1ULL)) {
+ pr_warn("write_block: len %llx is not aligned to mtt_size %llx\n",
+ len, mtt_size);
+ return -EINVAL;
+ }
+
+ mtt_entries = (len >> mtt_shift);
+
+ /*
+ * Align the MTT start address to the mtt_size.
+ * Required to handle cases when the MR starts in the middle of an MTT
+ * record. Was not required in old code since the physical addresses
+ * provided by the dma subsystem were page aligned, which was also the
+ * MTT size.
+ */
+ cur_start_addr = round_down(cur_start_addr, mtt_size);
+ /* A new block is started ... */
+ for (k = 0; k < mtt_entries; ++k) {
+ pages[*npages] = cur_start_addr + (mtt_size * k);
+ (*npages)++;
+ /*
+ * Be friendly to mlx4_write_mtt() and pass it chunks of
+ * appropriate size.
+ */
+ if (*npages == PAGE_SIZE / sizeof(u64)) {
+ err = mlx4_write_mtt(dev->dev, mtt, *start_index,
+ *npages, pages);
+ if (err)
+ return err;
+
+ (*start_index) += *npages;
+ *npages = 0;
+ }
+ }
+
+ return 0;
+}
+
+static inline u64 alignment_of(u64 ptr)
+{
+ return ilog2(ptr & (~(ptr - 1)));
+}
+
+static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start,
+ u64 current_block_end,
+ u64 block_shift)
+{
+ /* Check whether the alignment of the new block is aligned as well as
+ * the previous block.
+ * Block address must start with zeros till size of entity_size.
+ */
+ if ((next_block_start & ((1ULL << block_shift) - 1ULL)) != 0)
+ /*
+ * It is not as well aligned as the previous block-reduce the
+ * mtt size accordingly. Here we take the last right bit which
+ * is 1.
+ */
+ block_shift = alignment_of(next_block_start);
+
+ /*
+ * Check whether the alignment of the end of previous block - is it
+ * aligned as well as the start of the block
+ */
+ if (((current_block_end) & ((1ULL << block_shift) - 1ULL)) != 0)
+ /*
+ * It is not as well aligned as the start of the block -
+ * reduce the mtt size accordingly.
+ */
+ block_shift = alignment_of(current_block_end);
+
+ return block_shift;
+}
+
int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem)
{
u64 *pages;
- int i, k, entry;
- int n;
- int len;
+ u64 len = 0;
int err = 0;
+ u64 mtt_size;
+ u64 cur_start_addr = 0;
+ u64 mtt_shift;
+ int start_index = 0;
+ int npages = 0;
struct scatterlist *sg;
+ int i;
pages = (u64 *) __get_free_page(GFP_KERNEL);
if (!pages)
return -ENOMEM;
- i = n = 0;
+ mtt_shift = mtt->page_shift;
+ mtt_size = 1ULL << mtt_shift;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- len = sg_dma_len(sg) >> mtt->page_shift;
- for (k = 0; k < len; ++k) {
- pages[i++] = sg_dma_address(sg) +
- (k << umem->page_shift);
- /*
- * Be friendly to mlx4_write_mtt() and
- * pass it chunks of appropriate size.
- */
- if (i == PAGE_SIZE / sizeof (u64)) {
- err = mlx4_write_mtt(dev->dev, mtt, n,
- i, pages);
- if (err)
- goto out;
- n += i;
- i = 0;
- }
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ if (cur_start_addr + len == sg_dma_address(sg)) {
+ /* still the same block */
+ len += sg_dma_len(sg);
+ continue;
}
+ /*
+ * A new block is started ...
+ * If len is malaligned, write an extra mtt entry to cover the
+ * misaligned area (round up the division)
+ */
+ err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
+ mtt_shift, len,
+ cur_start_addr,
+ pages, &start_index,
+ &npages);
+ if (err)
+ goto out;
+
+ cur_start_addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
}
- if (i)
- err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
+ /* Handle the last block */
+ if (len > 0) {
+ /*
+ * If len is malaligned, write an extra mtt entry to cover
+ * the misaligned area (round up the division)
+ */
+ err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
+ mtt_shift, len,
+ cur_start_addr, pages,
+ &start_index, &npages);
+ if (err)
+ goto out;
+ }
+
+ if (npages)
+ err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
out:
free_page((unsigned long) pages);
return err;
}
+/*
+ * Calculate optimal mtt size based on contiguous pages.
+ * Function will return also the number of pages that are not aligned to the
+ * calculated mtt_size to be added to total number of pages. For that we should
+ * check the first chunk length & last chunk length and if not aligned to
+ * mtt_size we should increment the non_aligned_pages number. All chunks in the
+ * middle already handled as part of mtt shift calculation for both their start
+ * & end addresses.
+ */
+int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
+ int *num_of_mtts)
+{
+ u64 block_shift = MLX4_MAX_MTT_SHIFT;
+ u64 min_shift = umem->page_shift;
+ u64 last_block_aligned_end = 0;
+ u64 current_block_start = 0;
+ u64 first_block_start = 0;
+ u64 current_block_len = 0;
+ u64 last_block_end = 0;
+ struct scatterlist *sg;
+ u64 current_block_end;
+ u64 misalignment_bits;
+ u64 next_block_start;
+ u64 total_len = 0;
+ int i;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ /*
+ * Initialization - save the first chunk start as the
+ * current_block_start - block means contiguous pages.
+ */
+ if (current_block_len == 0 && current_block_start == 0) {
+ current_block_start = sg_dma_address(sg);
+ first_block_start = current_block_start;
+ /*
+ * Find the bits that are different between the physical
+ * address and the virtual address for the start of the
+ * MR.
+ * umem_get aligned the start_va to a page boundary.
+ * Therefore, we need to align the start va to the same
+ * boundary.
+ * misalignment_bits is needed to handle the case of a
+ * single memory region. In this case, the rest of the
+ * logic will not reduce the block size. If we use a
+ * block size which is bigger than the alignment of the
+ * misalignment bits, we might use the virtual page
+ * number instead of the physical page number, resulting
+ * in access to the wrong data.
+ */
+ misalignment_bits =
+ (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL)))
+ ^ current_block_start;
+ block_shift = min(alignment_of(misalignment_bits),
+ block_shift);
+ }
+
+ /*
+ * Go over the scatter entries and check if they continue the
+ * previous scatter entry.
+ */
+ next_block_start = sg_dma_address(sg);
+ current_block_end = current_block_start + current_block_len;
+ /* If we have a split (non-contig.) between two blocks */
+ if (current_block_end != next_block_start) {
+ block_shift = mlx4_ib_umem_calc_block_mtt
+ (next_block_start,
+ current_block_end,
+ block_shift);
+
+ /*
+ * If we reached the minimum shift for 4k page we stop
+ * the loop.
+ */
+ if (block_shift <= min_shift)
+ goto end;
+
+ /*
+ * If not saved yet we are in first block - we save the
+ * length of first block to calculate the
+ * non_aligned_pages number at the end.
+ */
+ total_len += current_block_len;
+
+ /* Start a new block */
+ current_block_start = next_block_start;
+ current_block_len = sg_dma_len(sg);
+ continue;
+ }
+ /* The scatter entry is another part of the current block,
+ * increase the block size.
+ * An entry in the scatter can be larger than 4k (page) as of
+ * dma mapping which merge some blocks together.
+ */
+ current_block_len += sg_dma_len(sg);
+ }
+
+ /* Account for the last block in the total len */
+ total_len += current_block_len;
+ /* Add to the first block the misalignment that it suffers from. */
+ total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
+ last_block_end = current_block_start + current_block_len;
+ last_block_aligned_end = round_up(last_block_end, 1 << block_shift);
+ total_len += (last_block_aligned_end - last_block_end);
+
+ if (total_len & ((1ULL << block_shift) - 1ULL))
+ pr_warn("misaligned total length detected (%llu, %llu)!",
+ total_len, block_shift);
+
+ *num_of_mtts = total_len >> block_shift;
+end:
+ if (block_shift < min_shift) {
+ /*
+ * If shift is less than the min we set a warning and return the
+ * min shift.
+ */
+ pr_warn("umem_calc_optimal_mtt_size - unexpected shift %lld\n", block_shift);
+
+ block_shift = min_shift;
+ }
+ return block_shift;
+}
+
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@@ -155,7 +391,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
n = ib_umem_page_count(mr->umem);
- shift = mr->umem->page_shift;
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
convert_access(access_flags), n, shift, &mr->mmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b6b33d99b0b4..013049bcdb53 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1038,6 +1038,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct mlx4_ib_create_wq wq;
} ucmd;
size_t copy_len;
+ int shift;
+ int n;
copy_len = (src == MLX4_IB_QP_SRC) ?
sizeof(struct mlx4_ib_create_qp) :
@@ -1100,8 +1102,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
}
- err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
- qp->umem->page_shift, &qp->mtt);
+ n = ib_umem_page_count(qp->umem);
+ shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
+ err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
+
if (err)
goto err_buf;
@@ -2182,11 +2186,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
(to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
- if (rwq_ind_tbl) {
- fill_qp_rss_context(context, qp);
- context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
- }
-
if (!(attr_mask & IB_QP_PATH_MIG_STATE))
context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
else {
@@ -2216,7 +2215,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->mtu_msgmax = (IB_MTU_4096 << 5) |
ilog2(dev->dev->caps.max_gso_sz);
else
- context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+ context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
pr_err("path MTU (%u) is invalid\n",
@@ -2387,6 +2386,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
context->pd = cpu_to_be32(pd->pdn);
if (!rwq_ind_tbl) {
+ context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
get_cqs(qp, src_type, &send_cq, &recv_cq);
} else { /* Set dummy CQs to be compatible with HV and PRM */
send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq);
@@ -2394,7 +2394,6 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
}
context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
- context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
/* Set "fast registration enabled" for all kernel QPs */
if (!ibuobject)
@@ -2513,7 +2512,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
MLX4_IB_LINK_TYPE_ETH;
if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
/* set QP to receive both tunneled & non-tunneled packets */
- if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+ if (!rwq_ind_tbl)
context->srqn = cpu_to_be32(7 << 28);
}
}
@@ -2562,6 +2561,13 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
}
}
+ if (rwq_ind_tbl &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT) {
+ fill_qp_rss_context(context, qp);
+ context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
+ }
+
err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
to_mlx4_state(new_state), context, optpar,
sqd_event, &qp->mqp);
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 3363e29157f6..fe269f680103 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -89,10 +89,6 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
resp.response_length = min_resp_len;
- err = ib_resolve_eth_dmac(pd->device, ah_attr);
- if (err)
- return ERR_PTR(err);
-
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 2aa53f427685..18705cbcdc8c 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -124,11 +124,13 @@ static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
case MLX5_OPCODE_RDMA_WRITE_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX5_OPCODE_RDMA_WRITE:
wc->opcode = IB_WC_RDMA_WRITE;
break;
case MLX5_OPCODE_SEND_IMM:
wc->wc_flags |= IB_WC_WITH_IMM;
+ /* fall through */
case MLX5_OPCODE_SEND:
case MLX5_OPCODE_SEND_INVAL:
wc->opcode = IB_WC_SEND;
@@ -752,13 +754,13 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
int err;
ucmdlen = udata->inlen < sizeof(ucmd) ?
- (sizeof(ucmd) - sizeof(ucmd.reserved)) : sizeof(ucmd);
+ (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
return -EFAULT;
if (ucmdlen == sizeof(ucmd) &&
- ucmd.reserved != 0)
+ (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD)))
return -EINVAL;
if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
@@ -802,8 +804,10 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
*index = to_mucontext(context)->bfregi.sys_pages[0];
if (ucmd.cqe_comp_en == 1) {
- if (unlikely((*cqe_size != 64) ||
- !MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
+ if (!((*cqe_size == 128 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
+ (*cqe_size == 64 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
err = -EOPNOTSUPP;
mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
*cqe_size);
@@ -826,6 +830,19 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
ilog2(ucmd.cqe_comp_res_format));
}
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
+ if (*cqe_size != 128 ||
+ !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev,
+ "CQE padding is not supported for CQE size of %dB!\n",
+ *cqe_size);
+ goto err_cqb;
+ }
+
+ cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
+ }
+
return 0;
err_cqb:
@@ -985,7 +1002,10 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->cqe_size = cqe_size;
cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
- MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
@@ -1129,6 +1149,9 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
return -ENOSYS;
+ if (cq_period > MLX5_MAX_CQ_PERIOD)
+ return -EINVAL;
+
err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
cq_period, cq_count);
if (err)
@@ -1335,7 +1358,10 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
MLX5_SET(cqc, cqc, log_page_size,
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
- MLX5_SET(cqc, cqc, cqe_sz, cqe_sz_to_mlx_sz(cqe_size));
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 552f7bd4ecc3..543d0a4c8bf3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -715,6 +715,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+ if (MLX5_CAP_GEN(mdev, end_pad))
+ props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@@ -787,6 +790,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
}
+ if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
+ props->cq_caps.max_cq_moderation_count =
+ MLX5_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period =
+ MLX5_MAX_CQ_PERIOD;
+ }
+
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
resp.cqe_comp_caps.max_num =
MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
@@ -824,8 +834,16 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
- if (field_avail(typeof(resp), reserved, uhw->outlen))
- resp.response_length += sizeof(resp.reserved);
+ if (field_avail(typeof(resp), flags, uhw->outlen)) {
+ resp.response_length += sizeof(resp.flags);
+
+ if (MLX5_CAP_GEN(mdev, cqe_compression_128))
+ resp.flags |=
+ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
+
+ if (MLX5_CAP_GEN(mdev, cqe_128_always))
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
+ }
if (field_avail(typeof(resp), sw_parsing_caps,
uhw->outlen)) {
@@ -848,6 +866,36 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
+ if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen)) {
+ resp.response_length += sizeof(resp.striding_rq_caps);
+ if (MLX5_CAP_GEN(mdev, striding_rq)) {
+ resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
+ resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
+ resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.supported_qpts =
+ BIT(IB_QPT_RAW_PACKET);
+ }
+ }
+
+ if (field_avail(typeof(resp), tunnel_offloads_caps,
+ uhw->outlen)) {
+ resp.response_length += sizeof(resp.tunnel_offloads_caps);
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GRE;
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -3097,6 +3145,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = MLX5_IB_QPT_REG_UMR;
+ qp->send_cq = init_attr->send_cq;
+ qp->recv_cq = init_attr->recv_cq;
attr->qp_state = IB_QPS_INIT;
attr->port_num = 1;
@@ -3979,7 +4029,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 189e80cd6b2f..6dd8cac78de2 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -228,6 +228,7 @@ struct wr_list {
enum mlx5_ib_rq_flags {
MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
+ MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
};
struct mlx5_ib_wq {
@@ -254,8 +255,14 @@ struct mlx5_ib_wq {
enum mlx5_ib_wq_flags {
MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
+ MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
};
+#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
+#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
+#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
+#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
+
struct mlx5_ib_rwq {
struct ib_wq ibwq;
struct mlx5_core_qp core_qp;
@@ -264,6 +271,9 @@ struct mlx5_ib_rwq {
u32 log_rq_size;
u32 rq_page_offset;
u32 log_page_size;
+ u32 log_num_strides;
+ u32 two_byte_shift_en;
+ u32 single_stride_log_num_of_bytes;
struct ib_umem *umem;
size_t buf_size;
unsigned int page_shift;
@@ -389,6 +399,7 @@ struct mlx5_ib_qp {
struct list_head cq_send_list;
u32 rate_limit;
u32 underlay_qpn;
+ bool tunnel_offload_en;
};
struct mlx5_ib_cq_buf {
@@ -411,6 +422,8 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_RSS = 1 << 8,
MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
MLX5_IB_QP_UNDERLAY = 1 << 10,
+ MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
+ MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
};
struct mlx5_umr_wr {
@@ -435,6 +448,10 @@ struct mlx5_shared_mr_info {
struct ib_umem *umem;
};
+enum mlx5_ib_cq_pr_flags {
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
+};
+
struct mlx5_ib_cq {
struct ib_cq ibcq;
struct mlx5_core_cq mcq;
@@ -457,6 +474,7 @@ struct mlx5_ib_cq {
struct list_head wc_list;
enum ib_cq_notify_flags notify_flags;
struct work_struct notify_work;
+ u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
};
struct mlx5_ib_wc {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 37bbc543847a..ee0ee1f9994b 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -642,9 +642,9 @@ err:
return -ENOMEM;
}
-static void delay_time_func(unsigned long ctx)
+static void delay_time_func(struct timer_list *t)
{
- struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+ struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
dev->fill_delay = 0;
}
@@ -663,7 +663,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
return -ENOMEM;
}
- setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
+ timer_setup(&dev->delay_timer, delay_time_func, 0);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
INIT_LIST_HEAD(&ent->head);
@@ -1230,13 +1230,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
- mlx5_ib_dbg(dev, "cache empty for order %d", order);
+ mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL;
}
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL;
- pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error;
}
use_umr = false;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 3d701c7a4c91..e2197bdda89c 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -32,6 +32,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
+#include <linux/kernel.h>
#include "mlx5_ib.h"
#include "cmd.h"
@@ -929,9 +930,8 @@ static int mlx5_ib_mr_initiator_pfault_handler(
return -EFAULT;
}
- if (unlikely(opcode >= sizeof(mlx5_ib_odp_opcode_cap) /
- sizeof(mlx5_ib_odp_opcode_cap[0]) ||
- !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
+ if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
+ !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
opcode);
return -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index acb79d3a4f1d..31ad28853efa 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1178,8 +1178,8 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, end_padding_mode,
- MLX5_GET(qpc, qpc, end_padding_mode));
+ if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
@@ -1204,8 +1204,16 @@ static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
}
+static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) ||
+ MLX5_CAP_ETH(dev, tunnel_stateless_gre) ||
+ MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
+}
+
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq, u32 tdn)
+ struct mlx5_ib_rq *rq, u32 tdn,
+ bool tunnel_offload_en)
{
u32 *in;
void *tirc;
@@ -1221,6 +1229,8 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
MLX5_SET(tirc, tirc, transport_domain, tdn);
+ if (tunnel_offload_en)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
@@ -1266,12 +1276,15 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
+ if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
+ rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
err = create_raw_packet_qp_rq(dev, rq, in);
if (err)
goto err_destroy_sq;
- err = create_raw_packet_qp_tir(dev, rq, tdn);
+ err = create_raw_packet_qp_tir(dev, rq, tdn,
+ qp->tunnel_offload_en);
if (err)
goto err_destroy_rq;
}
@@ -1358,7 +1371,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (udata->outlen < min_resp_len)
return -EINVAL;
- required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
+ required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags);
if (udata->inlen < required_cmd_sz) {
mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
@@ -1381,8 +1394,20 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
- if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) {
- mlx5_ib_dbg(dev, "invalid reserved\n");
+ if (ucmd.flags & ~MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ mlx5_ib_dbg(dev, "invalid flags\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS &&
+ !tunnel_offload_supported(dev->mdev)) {
+ mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
+ !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
+ mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
return -EOPNOTSUPP;
}
@@ -1405,6 +1430,15 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(tirc, tirc, transport_domain, tdn);
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+
+ if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
+ else
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
switch (ucmd.rx_hash_function) {
case MLX5_RX_HASH_FUNC_TOEPLITZ:
{
@@ -1604,6 +1638,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
+ if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
+ !tunnel_offload_supported(mdev)) {
+ mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->tunnel_offload_en = true;
+ }
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
if (init_attr->qp_type != IB_QPT_UD ||
@@ -1781,6 +1823,19 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags |= MLX5_IB_QP_LSO;
}
+ if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
+ mlx5_ib_dbg(dev, "scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto err;
+ } else if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ } else {
+ qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING;
+ }
+ }
+
if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
@@ -1825,6 +1880,7 @@ err_create:
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
+err:
kvfree(in);
return err;
}
@@ -2283,8 +2339,12 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
return err;
memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
- path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
- grh->sgid_index);
+ if (qp->ibqp.qp_type == IB_QPT_RC ||
+ qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+ qp->ibqp.qp_type == IB_QPT_XRC_TGT)
+ path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
+ grh->sgid_index);
path->dci_cfi_prio_sl = (sl & 0x7) << 4;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
@@ -3858,7 +3918,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags;
unsigned idx;
int err = 0;
- int inl = 0;
int num_sge;
void *seg;
int nreq;
@@ -4053,6 +4112,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
goto out;
}
+ /* fall through */
case MLX5_IB_QPT_HW_GSI:
set_datagram_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_datagram_seg);
@@ -4116,7 +4176,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
goto out;
}
- inl = 1;
size += sz;
} else {
dpseg = seg;
@@ -4707,9 +4766,27 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, wq_type,
+ rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
+ if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
+ mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ } else {
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ }
+ }
MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
+ if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
+ MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ rwq->single_stride_log_num_of_bytes -
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides -
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES);
+ }
MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
@@ -4791,7 +4868,8 @@ static int prepare_user_rq(struct ib_pd *pd,
int err;
size_t required_cmd_sz;
- required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+ required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes)
+ + sizeof(ucmd.single_stride_log_num_of_bytes);
if (udata->inlen < required_cmd_sz) {
mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
@@ -4809,14 +4887,39 @@ static int prepare_user_rq(struct ib_pd *pd,
return -EFAULT;
}
- if (ucmd.comp_mask) {
+ if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
mlx5_ib_dbg(dev, "invalid comp mask\n");
return -EOPNOTSUPP;
- }
-
- if (ucmd.reserved) {
- mlx5_ib_dbg(dev, "invalid reserved\n");
- return -EOPNOTSUPP;
+ } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
+ if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
+ mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if ((ucmd.single_stride_log_num_of_bytes <
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
+ (ucmd.single_stride_log_num_of_bytes >
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
+ mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
+ ucmd.single_stride_log_num_of_bytes,
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
+ return -EINVAL;
+ }
+ if ((ucmd.single_wqe_log_num_of_strides >
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
+ (ucmd.single_wqe_log_num_of_strides <
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) {
+ mlx5_ib_dbg(dev, "Invalid log num strides (%u. Range is %u - %u)\n",
+ ucmd.single_wqe_log_num_of_strides,
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
+ return -EINVAL;
+ }
+ rwq->single_stride_log_num_of_bytes =
+ ucmd.single_stride_log_num_of_bytes;
+ rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
+ rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
+ rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
}
err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
@@ -5054,6 +5157,12 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
MLX5_SET(rqc, rqc, vsd,
(wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
}
+
+ if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
}
if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
diff --git a/drivers/infiniband/hw/mthca/Makefile b/drivers/infiniband/hw/mthca/Makefile
index e388d95d0cf1..3a09e9ffd634 100644
--- a/drivers/infiniband/hw/mthca/Makefile
+++ b/drivers/infiniband/hw/mthca/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index f6474c24f193..ffb98eaaf1c2 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -130,9 +130,9 @@ static void handle_catas(struct mthca_dev *dev)
spin_unlock_irqrestore(&catas_lock, flags);
}
-static void poll_catas(unsigned long dev_ptr)
+static void poll_catas(struct timer_list *t)
{
- struct mthca_dev *dev = (struct mthca_dev *) dev_ptr;
+ struct mthca_dev *dev = from_timer(dev, t, catas_err.timer);
int i;
for (i = 0; i < dev->catas_err.size; ++i)
@@ -149,7 +149,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
{
phys_addr_t addr;
- init_timer(&dev->catas_err.timer);
+ timer_setup(&dev->catas_err.timer, poll_catas, 0);
dev->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, 0) +
@@ -164,8 +164,6 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
return;
}
- dev->catas_err.timer.data = (unsigned long) dev;
- dev->catas_err.timer.function = poll_catas;
dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL;
INIT_LIST_HEAD(&dev->catas_err.list);
add_timer(&dev->catas_err.timer);
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index e36a9bc52268..f3e80dec1334 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -473,11 +473,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
goto err_unmap_eqp;
}
- mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
- dev_lim->cqc_entry_sz,
- mdev->limits.num_cqs,
- mdev->limits.reserved_cqs,
- 0, 0);
+ mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
+ dev_lim->cqc_entry_sz,
+ mdev->limits.num_cqs,
+ mdev->limits.reserved_cqs,
+ 0, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 942ca84713c9..42b68aa999fc 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -178,11 +178,16 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
/* fall through */
case NETDEV_CHANGEADDR:
/* Add the address to the IP table */
- if (upper_dev)
- nesvnic->local_ipaddr =
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
- else
+ if (upper_dev) {
+ struct in_device *in;
+
+ rcu_read_lock();
+ in = __in_dev_get_rcu(upper_dev);
+ nesvnic->local_ipaddr = in->ifa_list->ifa_address;
+ rcu_read_unlock();
+ } else {
nesvnic->local_ipaddr = ifa->ifa_address;
+ }
nes_write_indexed(nesdev,
NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
@@ -757,18 +762,18 @@ static void nes_remove(struct pci_dev *pcidev)
int netdev_index = 0;
unsigned long flags;
- if (nesdev->netdev_count) {
- netdev = nesdev->netdev[netdev_index];
- if (netdev) {
- netif_stop_queue(netdev);
- unregister_netdev(netdev);
- nes_netdev_destroy(netdev);
+ if (nesdev->netdev_count) {
+ netdev = nesdev->netdev[netdev_index];
+ if (netdev) {
+ netif_stop_queue(netdev);
+ unregister_netdev(netdev);
+ nes_netdev_destroy(netdev);
- nesdev->netdev[netdev_index] = NULL;
- nesdev->netdev_count--;
- nesdev->nesadapter->netdev_count--;
- }
+ nesdev->netdev[netdev_index] = NULL;
+ nesdev->netdev_count--;
+ nesdev->nesadapter->netdev_count--;
}
+ }
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 3f9e56e8b379..00c27291dc26 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -536,7 +536,7 @@ void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
int nes_destroy_cqp(struct nes_device *);
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
void nes_recheck_link_status(struct work_struct *work);
-void nes_terminate_timeout(unsigned long context);
+void nes_terminate_timeout(struct timer_list *t);
/* nes_nic.c */
struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
@@ -575,8 +575,8 @@ void nes_put_cqp_request(struct nes_device *nesdev,
struct nes_cqp_request *cqp_request);
void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *);
int nes_arp_table(struct nes_device *, u32, u8 *, u32);
-void nes_mh_fix(unsigned long);
-void nes_clc(unsigned long);
+void nes_mh_fix(struct timer_list *t);
+void nes_clc(struct timer_list *t);
void nes_dump_mem(unsigned int, void *, int);
u32 nes_crc32(u32, u32, u32, u32, u8 *, u32, u32, u32);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index de4025deaa4a..c56ca2a74df5 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -840,7 +840,7 @@ static void handle_recv_entry(struct nes_cm_node *cm_node, u32 rem_node)
/**
* nes_cm_timer_tick
*/
-static void nes_cm_timer_tick(unsigned long pass)
+static void nes_cm_timer_tick(struct timer_list *unused)
{
unsigned long flags;
unsigned long nexttimeout = jiffies + NES_LONG_TIME;
@@ -1389,7 +1389,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
struct rtable *rt;
struct neighbour *neigh;
int rc = arpindex;
- struct net_device *netdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
__be32 dst_ipaddr = htonl(dst_ip);
@@ -1400,11 +1399,6 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
return rc;
}
- if (netif_is_bond_slave(nesvnic->netdev))
- netdev = netdev_master_upper_dev_get(nesvnic->netdev);
- else
- netdev = nesvnic->netdev;
-
neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
rcu_read_lock();
@@ -1768,6 +1762,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_LAST_ACK:
cm_node->cm_id->rem_ref(cm_node->cm_id);
+ /* fall through */
case NES_CM_STATE_TIME_WAIT:
cm_node->state = NES_CM_STATE_CLOSED;
rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -2670,8 +2665,7 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
return NULL;
INIT_LIST_HEAD(&cm_core->connected_nodes);
- init_timer(&cm_core->tcp_timer);
- cm_core->tcp_timer.function = nes_cm_timer_tick;
+ timer_setup(&cm_core->tcp_timer, nes_cm_timer_tick, 0);
cm_core->mtu = NES_CM_DEFAULT_MTU;
cm_core->state = NES_CM_STATE_INITED;
@@ -3074,7 +3068,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
u32 crc_value;
int ret;
int passive_state;
- struct nes_ib_device *nesibdev;
struct ib_mr *ibmr = NULL;
struct nes_pd *nespd;
u64 tagged_offset;
@@ -3157,7 +3150,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (raddr->sin_addr.s_addr != laddr->sin_addr.s_addr) {
u64temp = (unsigned long)nesqp;
- nesibdev = nesvnic->nesibdev;
nespd = nesqp->nespd;
tagged_offset = (u64)(unsigned long)*start_buff;
ibmr = nes_reg_phys_mr(&nespd->ibpd,
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b0adf65e4bdb..18a7de1c3923 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -381,6 +381,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
sizeof nesadapter->pft_mcast_map);
/* populate the new nesadapter */
+ nesadapter->nesdev = nesdev;
nesadapter->devfn = nesdev->pcidev->devfn;
nesadapter->bus_number = nesdev->pcidev->bus->number;
nesadapter->ref_count = 1;
@@ -598,19 +599,15 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
}
if (nesadapter->hw_rev == NE020_REV) {
- init_timer(&nesadapter->mh_timer);
- nesadapter->mh_timer.function = nes_mh_fix;
+ timer_setup(&nesadapter->mh_timer, nes_mh_fix, 0);
nesadapter->mh_timer.expires = jiffies + (HZ/5); /* 1 second */
- nesadapter->mh_timer.data = (unsigned long)nesdev;
add_timer(&nesadapter->mh_timer);
} else {
nes_write32(nesdev->regs+NES_INTF_INT_STAT, 0x0f000000);
}
- init_timer(&nesadapter->lc_timer);
- nesadapter->lc_timer.function = nes_clc;
+ timer_setup(&nesadapter->lc_timer, nes_clc, 0);
nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
- nesadapter->lc_timer.data = (unsigned long)nesdev;
add_timer(&nesadapter->lc_timer);
list_add_tail(&nesadapter->list, &nes_adapter_list);
@@ -1623,9 +1620,9 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
/**
* nes_rq_wqes_timeout
*/
-static void nes_rq_wqes_timeout(unsigned long parm)
+static void nes_rq_wqes_timeout(struct timer_list *t)
{
- struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
+ struct nes_vnic *nesvnic = from_timer(nesvnic, t, rq_wqes_timer);
printk("%s: Timer fired.\n", __func__);
atomic_set(&nesvnic->rx_skb_timer_running, 0);
if (atomic_read(&nesvnic->rx_skbs_needed))
@@ -1849,8 +1846,7 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
wqe_count -= counter;
nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter << 24) | nesvnic->nic.qp_id);
} while (wqe_count);
- setup_timer(&nesvnic->rq_wqes_timer, nes_rq_wqes_timeout,
- (unsigned long)nesvnic);
+ timer_setup(&nesvnic->rq_wqes_timer, nes_rq_wqes_timeout, 0);
nes_debug(NES_DBG_INIT, "NAPI support Enabled\n");
if (nesdev->nesadapter->et_use_adaptive_rx_coalesce)
{
@@ -1861,8 +1857,9 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
}
if ((nesdev->nesadapter->allow_unaligned_fpdus) &&
(nes_init_mgt_qp(nesdev, netdev, nesvnic))) {
- nes_debug(NES_DBG_INIT, "%s: Out of memory for pau nic\n", netdev->name);
- nes_destroy_nic_qp(nesvnic);
+ nes_debug(NES_DBG_INIT, "%s: Out of memory for pau nic\n",
+ netdev->name);
+ nes_destroy_nic_qp(nesvnic);
return -ENOMEM;
}
@@ -3474,9 +3471,9 @@ static void nes_terminate_received(struct nes_device *nesdev,
}
/* Timeout routine in case terminate fails to complete */
-void nes_terminate_timeout(unsigned long context)
+void nes_terminate_timeout(struct timer_list *t)
{
- struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
+ struct nes_qp *nesqp = from_timer(nesqp, t, terminate_timer);
nes_terminate_done(nesqp, 1);
}
@@ -3631,7 +3628,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
}
-
+ /* fall through */
case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 1b66ef1e9937..3c56470816a8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1164,6 +1164,7 @@ struct nes_adapter {
u8 log_port;
/* PCI information */
+ struct nes_device *nesdev;
unsigned int devfn;
unsigned char bus_number;
unsigned char OneG_Mode;
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index 77226cf4ea02..21e0ebd39a05 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -122,9 +122,10 @@ static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
/**
* nes_mgt_rq_wqes_timeout
*/
-static void nes_mgt_rq_wqes_timeout(unsigned long parm)
+static void nes_mgt_rq_wqes_timeout(struct timer_list *t)
{
- struct nes_vnic_mgt *mgtvnic = (struct nes_vnic_mgt *)parm;
+ struct nes_vnic_mgt *mgtvnic = from_timer(mgtvnic, t,
+ rq_wqes_timer);
atomic_set(&mgtvnic->rx_skb_timer_running, 0);
if (atomic_read(&mgtvnic->rx_skbs_needed))
@@ -1040,8 +1041,8 @@ int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct
mgtvnic->mgt.rx_skb[counter] = skb;
}
- setup_timer(&mgtvnic->rq_wqes_timer, nes_mgt_rq_wqes_timeout,
- (unsigned long)mgtvnic);
+ timer_setup(&mgtvnic->rq_wqes_timer, nes_mgt_rq_wqes_timeout,
+ 0);
wqe_count = NES_MGT_WQ_COUNT - 1;
mgtvnic->mgt.rq_head = wqe_count;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 5921ea3d50ae..0a75164cedea 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -926,11 +926,10 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
nesadapter->pft_mcast_map[mc_index] !=
nesvnic->nic_index &&
mc_index < max_pft_entries_avaiable) {
- nes_debug(NES_DBG_NIC_RX,
- "mc_index=%d skipping nic_index=%d, "
- "used for=%d \n", mc_index,
- nesvnic->nic_index,
- nesadapter->pft_mcast_map[mc_index]);
+ nes_debug(NES_DBG_NIC_RX,
+ "mc_index=%d skipping nic_index=%d, used for=%d\n",
+ mc_index, nesvnic->nic_index,
+ nesadapter->pft_mcast_map[mc_index]);
mc_index++;
}
if (mc_index >= max_pft_entries_avaiable)
@@ -1746,8 +1745,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
nesvnic->rdma_enabled = 0;
}
nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
- init_timer(&nesvnic->event_timer);
- nesvnic->event_timer.function = NULL;
+ timer_setup(&nesvnic->event_timer, NULL, 0);
spin_lock_init(&nesvnic->tx_lock);
spin_lock_init(&nesvnic->port_ibevent_lock);
nesdev->netdev[nesdev->netdev_count] = netdev;
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 37331e2fdc5f..21b4a8373acf 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -740,11 +740,11 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
/**
* nes_mh_fix
*/
-void nes_mh_fix(unsigned long parm)
+void nes_mh_fix(struct timer_list *t)
{
+ struct nes_adapter *nesadapter = from_timer(nesadapter, t, mh_timer);
+ struct nes_device *nesdev = nesadapter->nesdev;
unsigned long flags;
- struct nes_device *nesdev = (struct nes_device *)parm;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_vnic *nesvnic;
u32 used_chunks_tx;
u32 temp_used_chunks_tx;
@@ -753,7 +753,6 @@ void nes_mh_fix(unsigned long parm)
u32 mac_tx_frames_low;
u32 mac_tx_frames_high;
u32 mac_tx_pauses;
- u32 serdes_status;
u32 reset_value;
u32 tx_control;
u32 tx_config;
@@ -846,7 +845,7 @@ void nes_mh_fix(unsigned long parm)
}
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
- serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0);
+ nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
@@ -859,7 +858,7 @@ void nes_mh_fix(unsigned long parm)
} else {
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
}
- serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0);
+ nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, tx_control);
@@ -881,17 +880,16 @@ no_mh_work:
/**
* nes_clc
*/
-void nes_clc(unsigned long parm)
+void nes_clc(struct timer_list *t)
{
+ struct nes_adapter *nesadapter = from_timer(nesadapter, t, lc_timer);
unsigned long flags;
- struct nes_device *nesdev = (struct nes_device *)parm;
- struct nes_adapter *nesadapter = nesdev->nesadapter;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
- nesadapter->link_interrupt_count[0] = 0;
- nesadapter->link_interrupt_count[1] = 0;
- nesadapter->link_interrupt_count[2] = 0;
- nesadapter->link_interrupt_count[3] = 0;
+ nesadapter->link_interrupt_count[0] = 0;
+ nesadapter->link_interrupt_count[1] = 0;
+ nesadapter->link_interrupt_count[2] = 0;
+ nesadapter->link_interrupt_count[3] = 0;
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 442b9bdc0f03..162475aeeedd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1304,8 +1304,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
init_completion(&nesqp->rq_drained);
nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
- setup_timer(&nesqp->terminate_timer, nes_terminate_timeout,
- (unsigned long)nesqp);
+ timer_setup(&nesqp->terminate_timer, nes_terminate_timeout, 0);
/* update the QP table */
nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
@@ -2865,11 +2864,11 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
/* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
- if (nesqp->hte_added) {
- nes_debug(NES_DBG_MOD_QP, "set CQP_QP_DEL_HTE\n");
- next_iwarp_state |= NES_CQP_QP_DEL_HTE;
- nesqp->hte_added = 0;
- }
+ if (nesqp->hte_added) {
+ nes_debug(NES_DBG_MOD_QP, "set CQP_QP_DEL_HTE\n");
+ next_iwarp_state |= NES_CQP_QP_DEL_HTE;
+ nesqp->hte_added = 0;
+ }
if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
(nesdev->iw_status) &&
(nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
@@ -3560,7 +3559,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
- entry->opcode = IB_WC_RECV;
+ entry->opcode = IB_WC_RECV;
nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
@@ -3788,9 +3787,9 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
/**
* nes_handle_delayed_event
*/
-static void nes_handle_delayed_event(unsigned long data)
+static void nes_handle_delayed_event(struct timer_list *t)
{
- struct nes_vnic *nesvnic = (void *) data;
+ struct nes_vnic *nesvnic = from_timer(nesvnic, t, event_timer);
if (nesvnic->delayed_event != nesvnic->last_dispatched_event) {
struct ib_event event;
@@ -3821,7 +3820,6 @@ void nes_port_ibevent(struct nes_vnic *nesvnic)
ib_dispatch_event(&event);
nesvnic->last_dispatched_event = event.event;
nesvnic->event_timer.function = nes_handle_delayed_event;
- nesvnic->event_timer.data = (unsigned long) nesvnic;
nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
add_timer(&nesvnic->event_timer);
} else {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index d0249e463338..dec650930ca6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -201,21 +201,6 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
/* Get network header type for this GID */
ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
- if ((pd->uctx) &&
- (!rdma_is_multicast_addr((struct in6_addr *)grh->dgid.raw)) &&
- (!rdma_link_local_addr((struct in6_addr *)grh->dgid.raw))) {
- status = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
- attr->roce.dmac,
- &vlan_tag,
- &sgid_attr.ndev->ifindex,
- NULL);
- if (status) {
- pr_err("%s(): Failed to resolve dmac from gid."
- "status = %d\n", __func__, status);
- goto av_conf_err;
- }
- }
-
status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan, vlan_tag);
if (status)
goto av_conf_err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 65b166cc7437..0ba695a88b62 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1093,7 +1093,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
rsp = &mqe->u.rsp;
if (cqe_status || ext_status) {
- pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
+ pr_err("%s() cqe_status=0x%x, ext_status=0x%x,\n",
__func__, cqe_status, ext_status);
if (rsp) {
/* This is for embedded cmds. */
@@ -1947,7 +1947,7 @@ mbx_err:
int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
{
- int status = -ENOMEM;
+ int status;
struct ocrdma_dealloc_lkey *cmd;
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
@@ -1956,9 +1956,7 @@ int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
cmd->lkey = lkey;
cmd->rsvd_frmr = fr_mr ? 1 : 0;
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
- if (status)
- goto mbx_err;
-mbx_err:
+
kfree(cmd);
return status;
}
@@ -3186,8 +3184,8 @@ void ocrdma_eqd_set_task(struct work_struct *work)
{
struct ocrdma_dev *dev =
container_of(work, struct ocrdma_dev, eqd_work.work);
- struct ocrdma_eq *eq = 0;
- int i, num = 0, status = -EINVAL;
+ struct ocrdma_eq *eq = NULL;
+ int i, num = 0;
u64 eq_intr;
for (i = 0; i < dev->eq_cnt; i++) {
@@ -3209,7 +3207,7 @@ void ocrdma_eqd_set_task(struct work_struct *work)
}
if (num)
- status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
+ ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 66056f9a9700..e528d7acb7f6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -658,7 +658,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
if (reset) {
status = ocrdma_mbx_rdma_stats(dev, true);
if (status) {
- pr_err("Failed to reset stats = %d", status);
+ pr_err("Failed to reset stats = %d\n", status);
goto err;
}
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 27d5e8d9f08d..7866fd8051f6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -66,9 +66,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
int index, union ib_gid *sgid)
{
int ret;
- struct ocrdma_dev *dev;
- dev = get_ocrdma_dev(ibdev);
memset(sgid, 0, sizeof(*sgid));
if (index >= OCRDMA_MAX_SGID)
return -EINVAL;
@@ -2247,6 +2245,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_SEND_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
+ /* fall through */
case IB_WR_SEND:
hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
ocrdma_build_send(qp, hdr, wr);
@@ -2260,6 +2259,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE_WITH_IMM:
hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
hdr->immdt = ntohl(wr->ex.imm_data);
+ /* fall through */
case IB_WR_RDMA_WRITE:
hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
status = ocrdma_build_write(qp, hdr, wr);
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
index 6c9f3923e838..9b9e3b1d2705 100644
--- a/drivers/infiniband/hw/qedr/Kconfig
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -1,7 +1,9 @@
config INFINIBAND_QEDR
tristate "QLogic RoCE driver"
depends on 64BIT && QEDE
+ depends on PCI
select QED_LL2
+ select QED_OOO
select QED_RDMA
---help---
This driver provides low-level InfiniBand over Ethernet
diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile
index ba7067c77f2f..1c0bc4f78550 100644
--- a/drivers/infiniband/hw/qedr/Makefile
+++ b/drivers/infiniband/hw/qedr/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
-qedr-y := main.o verbs.o qedr_cm.o
+qedr-y := main.o verbs.o qedr_roce_cm.o qedr_iw_cm.o
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 97d033f51dc9..50812b33291b 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -33,16 +33,20 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_mad.h>
#include <linux/netdevice.h>
#include <linux/iommu.h>
#include <linux/pci.h>
#include <net/addrconf.h>
+#include <linux/idr.h>
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
+#include "qedr_iw_cm.h"
MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
MODULE_AUTHOR("QLogic Corporation");
@@ -50,8 +54,8 @@ MODULE_LICENSE("Dual BSD/GPL");
#define QEDR_WQ_MULTIPLIER_DFT (3)
-void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
- enum ib_event_type type)
+static void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+ enum ib_event_type type)
{
struct ib_event ibev;
@@ -92,8 +96,84 @@ static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
return qdev->ndev;
}
+static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = qedr_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = qedr_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = 1;
+ immutable->gid_tbl_len = 1;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ immutable->max_mad_size = 0;
+
+ return 0;
+}
+
+static int qedr_iw_register_device(struct qedr_dev *dev)
+{
+ dev->ibdev.node_type = RDMA_NODE_RNIC;
+ dev->ibdev.query_gid = qedr_iw_query_gid;
+
+ dev->ibdev.get_port_immutable = qedr_iw_port_immutable;
+
+ dev->ibdev.iwcm = kzalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
+ if (!dev->ibdev.iwcm)
+ return -ENOMEM;
+
+ dev->ibdev.iwcm->connect = qedr_iw_connect;
+ dev->ibdev.iwcm->accept = qedr_iw_accept;
+ dev->ibdev.iwcm->reject = qedr_iw_reject;
+ dev->ibdev.iwcm->create_listen = qedr_iw_create_listen;
+ dev->ibdev.iwcm->destroy_listen = qedr_iw_destroy_listen;
+ dev->ibdev.iwcm->add_ref = qedr_iw_qp_add_ref;
+ dev->ibdev.iwcm->rem_ref = qedr_iw_qp_rem_ref;
+ dev->ibdev.iwcm->get_qp = qedr_iw_get_qp;
+
+ memcpy(dev->ibdev.iwcm->ifname,
+ dev->ndev->name, sizeof(dev->ibdev.iwcm->ifname));
+
+ return 0;
+}
+
+static void qedr_roce_register_device(struct qedr_dev *dev)
+{
+ dev->ibdev.node_type = RDMA_NODE_IB_CA;
+ dev->ibdev.query_gid = qedr_query_gid;
+
+ dev->ibdev.add_gid = qedr_add_gid;
+ dev->ibdev.del_gid = qedr_del_gid;
+
+ dev->ibdev.get_port_immutable = qedr_roce_port_immutable;
+}
+
static int qedr_register_device(struct qedr_dev *dev)
{
+ int rc;
+
strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
dev->ibdev.node_guid = dev->attr.node_guid;
@@ -121,18 +201,21 @@ static int qedr_register_device(struct qedr_dev *dev)
QEDR_UVERBS(POST_SEND) |
QEDR_UVERBS(POST_RECV);
+ if (IS_IWARP(dev)) {
+ rc = qedr_iw_register_device(dev);
+ if (rc)
+ return rc;
+ } else {
+ qedr_roce_register_device(dev);
+ }
+
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = dev->num_cnq;
- dev->ibdev.node_type = RDMA_NODE_IB_CA;
dev->ibdev.query_device = qedr_query_device;
dev->ibdev.query_port = qedr_query_port;
dev->ibdev.modify_port = qedr_modify_port;
- dev->ibdev.query_gid = qedr_query_gid;
- dev->ibdev.add_gid = qedr_add_gid;
- dev->ibdev.del_gid = qedr_del_gid;
-
dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
dev->ibdev.mmap = qedr_mmap;
@@ -166,7 +249,7 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.post_recv = qedr_post_recv;
dev->ibdev.process_mad = qedr_process_mad;
- dev->ibdev.get_port_immutable = qedr_port_immutable;
+
dev->ibdev.get_netdev = qedr_get_netdev;
dev->ibdev.dev.parent = &dev->pdev->dev;
@@ -217,6 +300,9 @@ static void qedr_free_resources(struct qedr_dev *dev)
{
int i;
+ if (IS_IWARP(dev))
+ destroy_workqueue(dev->iwarp_wq);
+
for (i = 0; i < dev->num_cnq; i++) {
qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
@@ -241,6 +327,12 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
spin_lock_init(&dev->sgid_lock);
+ if (IS_IWARP(dev)) {
+ spin_lock_init(&dev->idr_lock);
+ idr_init(&dev->qpidr);
+ dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
+ }
+
/* Allocate Status blocks for CNQ */
dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
GFP_KERNEL);
@@ -597,12 +689,12 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
return 0;
}
-void qedr_unaffiliated_event(void *context, u8 event_code)
+static void qedr_unaffiliated_event(void *context, u8 event_code)
{
pr_err("unaffiliated event not implemented yet\n");
}
-void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
+static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
{
#define EVENT_TYPE_NOT_DEFINED 0
#define EVENT_TYPE_CQ 1
@@ -716,6 +808,7 @@ static int qedr_init_hw(struct qedr_dev *dev)
in_params->events = &events;
in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
in_params->max_mtu = dev->ndev->mtu;
+ dev->iwarp_max_mtu = dev->ndev->mtu;
ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
rc = dev->ops->rdma_init(dev->cdev, in_params);
@@ -726,7 +819,7 @@ static int qedr_init_hw(struct qedr_dev *dev)
if (rc)
goto out;
- dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
+ dev->db_addr = (void __iomem *)(uintptr_t)out_params.dpi_addr;
dev->db_phys_addr = out_params.dpi_phys_addr;
dev->db_size = out_params.dpi_size;
dev->dpi = out_params.dpi;
@@ -740,7 +833,7 @@ out:
return rc;
}
-void qedr_stop_hw(struct qedr_dev *dev)
+static void qedr_stop_hw(struct qedr_dev *dev)
{
dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
dev->ops->rdma_stop(dev->rdma_ctx);
@@ -777,6 +870,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
goto init_err;
dev->user_dpm_enabled = dev_info.user_dpm_enabled;
+ dev->rdma_type = dev_info.rdma_type;
dev->num_hwfns = dev_info.common.num_hwfns;
dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 254083b524bd..86d4511e0d75 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -33,6 +33,7 @@
#define __QEDR_H__
#include <linux/pci.h>
+#include <linux/idr.h>
#include <rdma/ib_addr.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_chain.h>
@@ -43,6 +44,8 @@
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
#define DP_NAME(dev) ((dev)->ibdev.name)
+#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
+#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
#define DP_DEBUG(dev, module, fmt, ...) \
pr_debug("(%s) " module ": " fmt, \
@@ -56,6 +59,7 @@
#define QEDR_MSG_SQ " SQ"
#define QEDR_MSG_QP " QP"
#define QEDR_MSG_GSI " GSI"
+#define QEDR_MSG_IWARP " IW"
#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
@@ -160,6 +164,11 @@ struct qedr_dev {
struct qedr_cq *gsi_sqcq;
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
+ enum qed_rdma_type rdma_type;
+ spinlock_t idr_lock; /* Protect qpidr data-structure */
+ struct idr qpidr;
+ struct workqueue_struct *iwarp_wq;
+ u16 iwarp_max_mtu;
unsigned long enet_state;
@@ -317,6 +326,9 @@ struct qedr_qp_hwq_info {
/* DB */
void __iomem *db;
union db_prod32 db_data;
+
+ void __iomem *iwarp_db2;
+ union db_prod32 iwarp_db2_data;
};
#define QEDR_INC_SW_IDX(p_info, index) \
@@ -337,7 +349,7 @@ enum qedr_qp_err_bitmap {
struct qedr_qp {
struct ib_qp ibqp; /* must be first */
struct qedr_dev *dev;
-
+ struct qedr_iw_ep *ep;
struct qedr_qp_hwq_info sq;
struct qedr_qp_hwq_info rq;
@@ -394,6 +406,8 @@ struct qedr_qp {
/* Relevant to qps created from user space only (applications) */
struct qedr_userq usq;
struct qedr_userq urq;
+ atomic_t refcnt;
+ bool destroyed;
};
struct qedr_ah {
@@ -474,6 +488,21 @@ static inline int qedr_get_dmac(struct qedr_dev *dev,
return 0;
}
+struct qedr_iw_listener {
+ struct qedr_dev *dev;
+ struct iw_cm_id *cm_id;
+ int backlog;
+ void *qed_handle;
+};
+
+struct qedr_iw_ep {
+ struct qedr_dev *dev;
+ struct iw_cm_id *cm_id;
+ struct qedr_qp *qp;
+ void *qed_context;
+ u8 during_connect;
+};
+
static inline
struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
{
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index 5c98d2055cad..b7587f10e7de 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -655,8 +655,10 @@ struct rdma_sq_rdma_wqe_1st {
#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
-#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3
-#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT 6
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 7
u8 wqe_size;
u8 prev_wqe_size;
};
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
new file mode 100644
index 000000000000..478b7317b80a
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -0,0 +1,749 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/addrconf.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+#include <net/flow.h>
+#include "qedr.h"
+#include "qedr_iw_cm.h"
+
+static inline void
+qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+ laddr->sin_family = AF_INET;
+ raddr->sin_family = AF_INET;
+
+ laddr->sin_port = htons(cm_info->local_port);
+ raddr->sin_port = htons(cm_info->remote_port);
+
+ laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]);
+ raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]);
+}
+
+static inline void
+qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+ struct sockaddr_in6 *raddr6 =
+ (struct sockaddr_in6 *)&event->remote_addr;
+ int i;
+
+ laddr6->sin6_family = AF_INET6;
+ raddr6->sin6_family = AF_INET6;
+
+ laddr6->sin6_port = htons(cm_info->local_port);
+ raddr6->sin6_port = htons(cm_info->remote_port);
+
+ for (i = 0; i < 4; i++) {
+ laddr6->sin6_addr.in6_u.u6_addr32[i] =
+ htonl(cm_info->local_ip[i]);
+ raddr6->sin6_addr.in6_u.u6_addr32[i] =
+ htonl(cm_info->remote_ip[i]);
+ }
+}
+
+static void
+qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context;
+ struct qedr_dev *dev = listener->dev;
+ struct iw_cm_event event;
+ struct qedr_iw_ep *ep;
+
+ ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+ if (!ep)
+ return;
+
+ ep->dev = dev;
+ ep->qed_context = params->ep_context;
+
+ memset(&event, 0, sizeof(event));
+ event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ event.status = params->status;
+
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ params->cm_info->ip_version == QED_TCP_IPV4)
+ qedr_fill_sockaddr4(params->cm_info, &event);
+ else
+ qedr_fill_sockaddr6(params->cm_info, &event);
+
+ event.provider_data = (void *)ep;
+ event.private_data = (void *)params->cm_info->private_data;
+ event.private_data_len = (u8)params->cm_info->private_data_len;
+ event.ord = params->cm_info->ord;
+ event.ird = params->cm_info->ird;
+
+ listener->cm_id->event_handler(listener->cm_id, &event);
+}
+
+static void
+qedr_iw_issue_event(void *context,
+ struct qed_iwarp_cm_event_params *params,
+ enum iw_cm_event_type event_type)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct iw_cm_event event;
+
+ memset(&event, 0, sizeof(event));
+ event.status = params->status;
+ event.event = event_type;
+
+ if (params->cm_info) {
+ event.ird = params->cm_info->ird;
+ event.ord = params->cm_info->ord;
+ event.private_data_len = params->cm_info->private_data_len;
+ event.private_data = (void *)params->cm_info->private_data;
+ }
+
+ if (ep->cm_id)
+ ep->cm_id->event_handler(ep->cm_id, &event);
+}
+
+static void
+qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ if (ep->cm_id) {
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
+
+ ep->cm_id->rem_ref(ep->cm_id);
+ ep->cm_id = NULL;
+ }
+}
+
+static void
+qedr_iw_qp_event(void *context,
+ struct qed_iwarp_cm_event_params *params,
+ enum ib_event_type ib_event, char *str)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+ struct ib_qp *ibqp = &ep->qp->ibqp;
+ struct ib_event event;
+
+ DP_NOTICE(dev, "QP error received: %s\n", str);
+
+ if (ibqp->event_handler) {
+ event.event = ib_event;
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+}
+
+struct qedr_discon_work {
+ struct work_struct work;
+ struct qedr_iw_ep *ep;
+ enum qed_iwarp_event_type event;
+ int status;
+};
+
+static void qedr_iw_disconnect_worker(struct work_struct *work)
+{
+ struct qedr_discon_work *dwork =
+ container_of(work, struct qedr_discon_work, work);
+ struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+ struct qedr_iw_ep *ep = dwork->ep;
+ struct qedr_dev *dev = ep->dev;
+ struct qedr_qp *qp = ep->qp;
+ struct iw_cm_event event;
+
+ if (qp->destroyed) {
+ kfree(dwork);
+ qedr_iw_qp_rem_ref(&qp->ibqp);
+ return;
+ }
+
+ memset(&event, 0, sizeof(event));
+ event.status = dwork->status;
+ event.event = IW_CM_EVENT_DISCONNECT;
+
+ /* Success means graceful disconnect was requested. modifying
+ * to SQD is translated to graceful disconnect. O/w reset is sent
+ */
+ if (dwork->status)
+ qp_params.new_state = QED_ROCE_QP_STATE_ERR;
+ else
+ qp_params.new_state = QED_ROCE_QP_STATE_SQD;
+
+ kfree(dwork);
+
+ if (ep->cm_id)
+ ep->cm_id->event_handler(ep->cm_id, &event);
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+
+ dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
+
+ qedr_iw_qp_rem_ref(&qp->ibqp);
+}
+
+static void
+qedr_iw_disconnect_event(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_discon_work *work;
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+ struct qedr_qp *qp = ep->qp;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ qedr_iw_qp_add_ref(&qp->ibqp);
+ work->ep = ep;
+ work->event = params->event;
+ work->status = params->status;
+
+ INIT_WORK(&work->work, qedr_iw_disconnect_worker);
+ queue_work(dev->iwarp_wq, &work->work);
+}
+
+static void
+qedr_iw_passive_complete(void *context,
+ struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+
+ /* We will only reach the following state if MPA_REJECT was called on
+ * passive. In this case there will be no associated QP.
+ */
+ if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "PASSIVE connection refused releasing ep...\n");
+ kfree(ep);
+ return;
+ }
+
+ qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
+
+ if (params->status < 0)
+ qedr_iw_close_event(context, params);
+}
+
+static int
+qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+ struct qed_iwarp_send_rtr_in rtr_in;
+
+ rtr_in.ep_context = params->ep_context;
+
+ return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
+}
+
+static int
+qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+ struct qedr_dev *dev = ep->dev;
+
+ switch (params->event) {
+ case QED_IWARP_EVENT_MPA_REQUEST:
+ qedr_iw_mpa_request(context, params);
+ break;
+ case QED_IWARP_EVENT_ACTIVE_MPA_REPLY:
+ qedr_iw_mpa_reply(context, params);
+ break;
+ case QED_IWARP_EVENT_PASSIVE_COMPLETE:
+ ep->during_connect = 0;
+ qedr_iw_passive_complete(context, params);
+ break;
+
+ case QED_IWARP_EVENT_ACTIVE_COMPLETE:
+ ep->during_connect = 0;
+ qedr_iw_issue_event(context,
+ params,
+ IW_CM_EVENT_CONNECT_REPLY);
+ if (params->status < 0) {
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
+
+ ep->cm_id->rem_ref(ep->cm_id);
+ ep->cm_id = NULL;
+ }
+ break;
+ case QED_IWARP_EVENT_DISCONNECT:
+ qedr_iw_disconnect_event(context, params);
+ break;
+ case QED_IWARP_EVENT_CLOSE:
+ ep->during_connect = 0;
+ qedr_iw_close_event(context, params);
+ break;
+ case QED_IWARP_EVENT_RQ_EMPTY:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_RQ_EMPTY");
+ break;
+ case QED_IWARP_EVENT_IRQ_FULL:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_IRQ_FULL");
+ break;
+ case QED_IWARP_EVENT_LLP_TIMEOUT:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_LLP_TIMEOUT");
+ break;
+ case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
+ "QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR");
+ break;
+ case QED_IWARP_EVENT_CQ_OVERFLOW:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_CQ_OVERFLOW");
+ break;
+ case QED_IWARP_EVENT_QP_CATASTROPHIC:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_QP_CATASTROPHIC");
+ break;
+ case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
+ "QED_IWARP_EVENT_LOCAL_ACCESS_ERROR");
+ break;
+ case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR:
+ qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
+ "QED_IWARP_EVENT_REMOTE_OPERATION_ERROR");
+ break;
+ case QED_IWARP_EVENT_TERMINATE_RECEIVED:
+ DP_NOTICE(dev, "Got terminate message\n");
+ break;
+ default:
+ DP_NOTICE(dev, "Unknown event received %d\n", params->event);
+ break;
+ };
+ return 0;
+}
+
+static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr)
+{
+ struct net_device *ndev;
+ u16 vlan_id = 0;
+
+ ndev = ip_dev_find(&init_net, htonl(addr[0]));
+
+ if (ndev) {
+ vlan_id = rdma_vlan_dev_vlan_id(ndev);
+ dev_put(ndev);
+ }
+ if (vlan_id == 0xffff)
+ vlan_id = 0;
+ return vlan_id;
+}
+
+static u16 qedr_iw_get_vlan_ipv6(u32 *addr)
+{
+ struct net_device *ndev = NULL;
+ struct in6_addr laddr6;
+ u16 vlan_id = 0;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return vlan_id;
+
+ for (i = 0; i < 4; i++)
+ laddr6.in6_u.u6_addr32[i] = htonl(addr[i]);
+
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, ndev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) {
+ vlan_id = rdma_vlan_dev_vlan_id(ndev);
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ if (vlan_id == 0xffff)
+ vlan_id = 0;
+
+ return vlan_id;
+}
+
+static int
+qedr_addr4_resolve(struct qedr_dev *dev,
+ struct sockaddr_in *src_in,
+ struct sockaddr_in *dst_in, u8 *dst_mac)
+{
+ __be32 src_ip = src_in->sin_addr.s_addr;
+ __be32 dst_ip = dst_in->sin_addr.s_addr;
+ struct neighbour *neigh = NULL;
+ struct rtable *rt = NULL;
+ int rc = 0;
+
+ rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0);
+ if (IS_ERR(rt)) {
+ DP_ERR(dev, "ip_route_output returned error\n");
+ return -EINVAL;
+ }
+
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
+
+ if (neigh) {
+ rcu_read_lock();
+ if (neigh->nud_state & NUD_VALID) {
+ ether_addr_copy(dst_mac, neigh->ha);
+ DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ rcu_read_unlock();
+ neigh_release(neigh);
+ }
+
+ ip_rt_put(rt);
+
+ return rc;
+}
+
+static int
+qedr_addr6_resolve(struct qedr_dev *dev,
+ struct sockaddr_in6 *src_in,
+ struct sockaddr_in6 *dst_in, u8 *dst_mac)
+{
+ struct neighbour *neigh = NULL;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ int rc = 0;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.daddr = dst_in->sin6_addr;
+ fl6.saddr = src_in->sin6_addr;
+
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+
+ if ((!dst) || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ DP_ERR(dev,
+ "ip6_route_output returned dst->error = %d\n",
+ dst->error);
+ }
+ return -EINVAL;
+ }
+ neigh = dst_neigh_lookup(dst, &dst_in);
+
+ if (neigh) {
+ rcu_read_lock();
+ if (neigh->nud_state & NUD_VALID) {
+ ether_addr_copy(dst_mac, neigh->ha);
+ DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ rcu_read_unlock();
+ neigh_release(neigh);
+ }
+
+ dst_release(dst);
+
+ return rc;
+}
+
+int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+ struct qed_iwarp_connect_out out_params;
+ struct qed_iwarp_connect_in in_params;
+ struct qed_iwarp_cm_info *cm_info;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct qedr_iw_ep *ep;
+ struct qedr_qp *qp;
+ int rc = 0;
+ int i;
+
+ qp = idr_find(&dev->qpidr, conn_param->qpn);
+
+ laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "Connect source address: %pISpc, remote address: %pISpc\n",
+ &cm_id->local_addr, &cm_id->remote_addr);
+
+ if (!laddr->sin_port || !raddr->sin_port)
+ return -EINVAL;
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->dev = dev;
+ ep->qp = qp;
+ qp->ep = ep;
+ cm_id->add_ref(cm_id);
+ ep->cm_id = cm_id;
+
+ in_params.event_cb = qedr_iw_event_handler;
+ in_params.cb_context = ep;
+
+ cm_info = &in_params.cm_info;
+ memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip));
+ memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip));
+
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ cm_id->remote_addr.ss_family == AF_INET) {
+ cm_info->ip_version = QED_TCP_IPV4;
+
+ cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
+ cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info->remote_port = ntohs(raddr->sin_port);
+ cm_info->local_port = ntohs(laddr->sin_port);
+ cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip);
+
+ rc = qedr_addr4_resolve(dev, laddr, raddr,
+ (u8 *)in_params.remote_mac_addr);
+
+ in_params.mss = dev->iwarp_max_mtu -
+ (sizeof(struct iphdr) + sizeof(struct tcphdr));
+
+ } else {
+ in_params.cm_info.ip_version = QED_TCP_IPV6;
+
+ for (i = 0; i < 4; i++) {
+ cm_info->remote_ip[i] =
+ ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]);
+ cm_info->local_ip[i] =
+ ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
+ }
+
+ cm_info->local_port = ntohs(laddr6->sin6_port);
+ cm_info->remote_port = ntohs(raddr6->sin6_port);
+
+ in_params.mss = dev->iwarp_max_mtu -
+ (sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
+
+ cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip);
+
+ rc = qedr_addr6_resolve(dev, laddr6, raddr6,
+ (u8 *)in_params.remote_mac_addr);
+ }
+ if (rc)
+ goto err;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n",
+ conn_param->ord, conn_param->ird, conn_param->private_data,
+ conn_param->private_data_len, qp->rq_psn);
+
+ cm_info->ord = conn_param->ord;
+ cm_info->ird = conn_param->ird;
+ cm_info->private_data = conn_param->private_data;
+ cm_info->private_data_len = conn_param->private_data_len;
+ in_params.qp = qp->qed_qp;
+ memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
+
+ ep->during_connect = 1;
+ rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
+ if (rc)
+ goto err;
+
+ return rc;
+
+err:
+ cm_id->rem_ref(cm_id);
+ kfree(ep);
+ return rc;
+}
+
+int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+ struct qedr_iw_listener *listener;
+ struct qed_iwarp_listen_in iparams;
+ struct qed_iwarp_listen_out oparams;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in6 *laddr6;
+ int rc;
+ int i;
+
+ laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP,
+ "Create Listener address: %pISpc\n", &cm_id->local_addr);
+
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return -ENOMEM;
+
+ listener->dev = dev;
+ cm_id->add_ref(cm_id);
+ listener->cm_id = cm_id;
+ listener->backlog = backlog;
+
+ iparams.cb_context = listener;
+ iparams.event_cb = qedr_iw_event_handler;
+ iparams.max_backlog = backlog;
+
+ if (!IS_ENABLED(CONFIG_IPV6) ||
+ cm_id->local_addr.ss_family == AF_INET) {
+ iparams.ip_version = QED_TCP_IPV4;
+ memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr));
+
+ iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ iparams.port = ntohs(laddr->sin_port);
+ iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr);
+ } else {
+ iparams.ip_version = QED_TCP_IPV6;
+
+ for (i = 0; i < 4; i++) {
+ iparams.ip_addr[i] =
+ ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
+ }
+
+ iparams.port = ntohs(laddr6->sin6_port);
+
+ iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr);
+ }
+ rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
+ if (rc)
+ goto err;
+
+ listener->qed_handle = oparams.handle;
+ cm_id->provider_data = listener;
+ return rc;
+
+err:
+ cm_id->rem_ref(cm_id);
+ kfree(listener);
+ return rc;
+}
+
+int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+ struct qedr_iw_listener *listener = cm_id->provider_data;
+ struct qedr_dev *dev = get_qedr_dev(cm_id->device);
+ int rc = 0;
+
+ if (listener->qed_handle)
+ rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx,
+ listener->qed_handle);
+
+ cm_id->rem_ref(cm_id);
+ return rc;
+}
+
+int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
+ struct qedr_dev *dev = ep->dev;
+ struct qedr_qp *qp;
+ struct qed_iwarp_accept_in params;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
+
+ qp = idr_find(&dev->qpidr, conn_param->qpn);
+ if (!qp) {
+ DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
+ return -EINVAL;
+ }
+
+ ep->qp = qp;
+ qp->ep = ep;
+ cm_id->add_ref(cm_id);
+ ep->cm_id = cm_id;
+
+ params.ep_context = ep->qed_context;
+ params.cb_context = ep;
+ params.qp = ep->qp->qed_qp;
+ params.private_data = conn_param->private_data;
+ params.private_data_len = conn_param->private_data_len;
+ params.ird = conn_param->ird;
+ params.ord = conn_param->ord;
+
+ ep->during_connect = 1;
+ rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
+ if (rc)
+ goto err;
+
+ return rc;
+err:
+ ep->during_connect = 0;
+ cm_id->rem_ref(cm_id);
+ return rc;
+}
+
+int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
+ struct qedr_dev *dev = ep->dev;
+ struct qed_iwarp_reject_in params;
+
+ params.ep_context = ep->qed_context;
+ params.cb_context = ep;
+ params.private_data = pdata;
+ params.private_data_len = pdata_len;
+ ep->qp = NULL;
+
+ return dev->ops->iwarp_reject(dev->rdma_ctx, &params);
+}
+
+void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+
+ atomic_inc(&qp->refcnt);
+}
+
+void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+
+ if (atomic_dec_and_test(&qp->refcnt)) {
+ spin_lock_irq(&qp->dev->idr_lock);
+ idr_remove(&qp->dev->qpidr, qp->qp_id);
+ spin_unlock_irq(&qp->dev->idr_lock);
+ kfree(qp);
+ }
+}
+
+struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+ return idr_find(&dev->qpidr, qpn);
+}
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.h b/drivers/infiniband/hw/qedr/qedr_iw_cm.h
new file mode 100644
index 000000000000..08f4b1067e6c
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.h
@@ -0,0 +1,49 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <rdma/iw_cm.h>
+
+int qedr_iw_connect(struct iw_cm_id *cm_id,
+ struct iw_cm_conn_param *conn_param);
+
+int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
+
+int qedr_iw_destroy_listen(struct iw_cm_id *cm_id);
+
+int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+
+int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+
+void qedr_iw_qp_add_ref(struct ib_qp *qp);
+
+void qedr_iw_qp_rem_ref(struct ib_qp *qp);
+
+struct ib_qp *qedr_iw_get_qp(struct ib_device *dev, int qpn);
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index ad8965397cf7..2bdbb12bfc69 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -48,7 +48,7 @@
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
-#include "qedr_cm.h"
+#include "qedr_roce_cm.h"
void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
{
@@ -64,11 +64,11 @@ void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
dev->gsi_qp = qp;
}
-void qedr_ll2_complete_tx_packet(void *cxt,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet)
+static void qedr_ll2_complete_tx_packet(void *cxt, u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet)
{
struct qedr_dev *dev = (struct qedr_dev *)cxt;
struct qed_roce_ll2_packet *pkt = cookie;
@@ -93,8 +93,8 @@ void qedr_ll2_complete_tx_packet(void *cxt,
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
}
-void qedr_ll2_complete_rx_packet(void *cxt,
- struct qed_ll2_comp_rx_data *data)
+static void qedr_ll2_complete_rx_packet(void *cxt,
+ struct qed_ll2_comp_rx_data *data)
{
struct qedr_dev *dev = (struct qedr_dev *)cxt;
struct qedr_cq *cq = dev->gsi_rqcq;
@@ -122,10 +122,9 @@ void qedr_ll2_complete_rx_packet(void *cxt,
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
}
-void qedr_ll2_release_rx_packet(void *cxt,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr, bool b_last_packet)
+static void qedr_ll2_release_rx_packet(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t rx_buf_addr,
+ bool b_last_packet)
{
/* Do nothing... */
}
@@ -237,7 +236,7 @@ static int qedr_ll2_post_tx(struct qedr_dev *dev,
return 0;
}
-int qedr_ll2_stop(struct qedr_dev *dev)
+static int qedr_ll2_stop(struct qedr_dev *dev)
{
int rc;
@@ -260,8 +259,8 @@ int qedr_ll2_stop(struct qedr_dev *dev)
return rc;
}
-int qedr_ll2_start(struct qedr_dev *dev,
- struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
+static int qedr_ll2_start(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
{
struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs;
@@ -660,7 +659,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
dev->gsi_ll2_handle,
wr->sg_list[0].addr,
wr->sg_list[0].length,
- 0 /* cookie */,
+ NULL /* cookie */,
1 /* notify_fw */);
if (rc) {
DP_ERR(dev,
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
index a55916323ea9..a55916323ea9 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.h
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.h
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 769ac07c3c8e..b26aa88dab48 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -49,7 +49,7 @@
#include "qedr.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
-#include "qedr_cm.h"
+#include "qedr_roce_cm.h"
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
@@ -70,6 +70,20 @@ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return 0;
}
+int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *sgid)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+ memset(sgid->raw, 0, sizeof(sgid->raw));
+ ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "QUERY sgid[%d]=%llx:%llx\n", index,
+ sgid->global.interface_id, sgid->global.subnet_prefix);
+
+ return 0;
+}
+
int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *sgid)
{
@@ -263,8 +277,13 @@ int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
attr->sm_lid = 0;
attr->sm_sl = 0;
attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
- attr->gid_tbl_len = QEDR_MAX_SGID;
- attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ attr->gid_tbl_len = 1;
+ attr->pkey_tbl_len = 1;
+ } else {
+ attr->gid_tbl_len = QEDR_MAX_SGID;
+ attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+ }
attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
attr->qkey_viol_cntr = 0;
get_link_speed_and_width(rdma_port->link_speed,
@@ -770,7 +789,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
struct qedr_dev *dev,
struct qedr_userq *q,
u64 buf_addr, size_t buf_len,
- int access, int dmasync)
+ int access, int dmasync,
+ int alloc_and_init)
{
u32 fw_pages;
int rc;
@@ -791,19 +811,27 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
if (rc)
goto err0;
- q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
- if (IS_ERR(q->pbl_tbl)) {
- rc = PTR_ERR(q->pbl_tbl);
- goto err0;
- }
-
+ if (alloc_and_init) {
+ q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+ if (IS_ERR(q->pbl_tbl)) {
+ rc = PTR_ERR(q->pbl_tbl);
+ goto err0;
+ }
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
FW_PAGE_SHIFT);
+ } else {
+ q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
+ if (!q->pbl_tbl) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+ }
return 0;
err0:
ib_umem_release(q->umem);
+ q->umem = NULL;
return rc;
}
@@ -929,7 +957,8 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
cq->cq_type = QEDR_CQ_TYPE_USER;
rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
- ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
+ ureq.len, IB_ACCESS_LOCAL_WRITE,
+ 1, 1);
if (rc)
goto err0;
@@ -1222,18 +1251,34 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
return 0;
}
-static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
+static void qedr_copy_rq_uresp(struct qedr_dev *dev,
+ struct qedr_create_qp_uresp *uresp,
struct qedr_qp *qp)
{
- uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ /* iWARP requires two doorbells per RQ. */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ uresp->rq_db_offset =
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
+ uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
+ } else {
+ uresp->rq_db_offset =
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ }
+
uresp->rq_icid = qp->icid;
}
-static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
+static void qedr_copy_sq_uresp(struct qedr_dev *dev,
+ struct qedr_create_qp_uresp *uresp,
struct qedr_qp *qp)
{
uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
- uresp->sq_icid = qp->icid + 1;
+
+ /* iWARP uses the same cid for rq and sq */
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ uresp->sq_icid = qp->icid;
+ else
+ uresp->sq_icid = qp->icid + 1;
}
static int qedr_copy_qp_uresp(struct qedr_dev *dev,
@@ -1243,8 +1288,8 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
int rc;
memset(&uresp, 0, sizeof(uresp));
- qedr_copy_sq_uresp(&uresp, qp);
- qedr_copy_rq_uresp(&uresp, qp);
+ qedr_copy_sq_uresp(dev, &uresp, qp);
+ qedr_copy_rq_uresp(dev, &uresp, qp);
uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
uresp.qp_id = qp->qp_id;
@@ -1264,6 +1309,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
struct ib_qp_init_attr *attrs)
{
spin_lock_init(&qp->q_lock);
+ atomic_set(&qp->refcnt, 1);
qp->pd = pd;
qp->qp_type = attrs->qp_type;
qp->max_inline_data = attrs->cap.max_inline_data;
@@ -1334,6 +1380,52 @@ static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
}
+static int qedr_idr_add(struct qedr_dev *dev, void *ptr, u32 id)
+{
+ int rc;
+
+ if (!rdma_protocol_iwarp(&dev->ibdev, 1))
+ return 0;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&dev->idr_lock);
+
+ rc = idr_alloc(&dev->qpidr, ptr, id, id + 1, GFP_ATOMIC);
+
+ spin_unlock_irq(&dev->idr_lock);
+ idr_preload_end();
+
+ return rc < 0 ? rc : 0;
+}
+
+static void qedr_idr_remove(struct qedr_dev *dev, u32 id)
+{
+ if (!rdma_protocol_iwarp(&dev->ibdev, 1))
+ return;
+
+ spin_lock_irq(&dev->idr_lock);
+ idr_remove(&dev->qpidr, id);
+ spin_unlock_irq(&dev->idr_lock);
+}
+
+static inline void
+qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
+ qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
+
+ qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
+ &qp->usq.pbl_info, FW_PAGE_SHIFT);
+
+ qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
+ qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
+
+ qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
+ &qp->urq.pbl_info, FW_PAGE_SHIFT);
+}
+
static void qedr_cleanup_user(struct qedr_dev *dev, struct qedr_qp *qp)
{
if (qp->usq.umem)
@@ -1355,12 +1447,11 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
struct qed_rdma_create_qp_out_params out_params;
struct qedr_pd *pd = get_qedr_pd(ibpd);
struct ib_ucontext *ib_ctx = NULL;
- struct qedr_ucontext *ctx = NULL;
struct qedr_create_qp_ureq ureq;
+ int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
int rc = -EINVAL;
ib_ctx = ibpd->uobject->context;
- ctx = get_qedr_ucontext(ib_ctx);
memset(&ureq, 0, sizeof(ureq));
rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
@@ -1371,14 +1462,13 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* SQ - read access only (0), dma sync not required (0) */
rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq.sq_addr,
- ureq.sq_len, 0, 0);
+ ureq.sq_len, 0, 0, alloc_and_init);
if (rc)
return rc;
/* RQ - read access only (0), dma sync not required (0) */
rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq.rq_addr,
- ureq.rq_len, 0, 0);
-
+ ureq.rq_len, 0, 0, alloc_and_init);
if (rc)
return rc;
@@ -1399,6 +1489,9 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
goto err1;
}
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ qedr_iwarp_populate_user_qp(dev, qp, &out_params);
+
qp->qp_id = out_params.qp_id;
qp->icid = out_params.icid;
@@ -1419,6 +1512,21 @@ err1:
return rc;
}
+static void qedr_set_iwarp_db_info(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid;
+
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+ qp->rq.iwarp_db2 = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
+ qp->rq.iwarp_db2_data.data.icid = qp->icid;
+ qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
+}
+
static int
qedr_roce_create_kernel_qp(struct qedr_dev *dev,
struct qedr_qp *qp,
@@ -1465,8 +1573,71 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
qp->icid = out_params.icid;
qedr_set_roce_db_info(dev, qp);
+ return rc;
+}
- return 0;
+static int
+qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qed_rdma_create_qp_in_params *in_params,
+ u32 n_sq_elems, u32 n_rq_elems)
+{
+ struct qed_rdma_create_qp_out_params out_params;
+ struct qed_chain_ext_pbl ext_pbl;
+ int rc;
+
+ in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
+ QEDR_SQE_ELEMENT_SIZE,
+ QED_CHAIN_MODE_PBL);
+ in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
+ QEDR_RQE_ELEMENT_SIZE,
+ QED_CHAIN_MODE_PBL);
+
+ qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+ in_params, &out_params);
+
+ if (!qp->qed_qp)
+ return -EINVAL;
+
+ /* Now we allocate the chain */
+ ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
+ ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_sq_elems,
+ QEDR_SQE_ELEMENT_SIZE,
+ &qp->sq.pbl, &ext_pbl);
+
+ if (rc)
+ goto err;
+
+ ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
+ ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_rq_elems,
+ QEDR_RQE_ELEMENT_SIZE,
+ &qp->rq.pbl, &ext_pbl);
+
+ if (rc)
+ goto err;
+
+ qp->qp_id = out_params.qp_id;
+ qp->icid = out_params.icid;
+
+ qedr_set_iwarp_db_info(dev, qp);
+ return rc;
+
+err:
+ dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+
+ return rc;
}
static void qedr_cleanup_kernel(struct qedr_dev *dev, struct qedr_qp *qp)
@@ -1541,8 +1712,12 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
- rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
- n_sq_elems, n_rq_elems);
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ rc = qedr_iwarp_create_kernel_qp(dev, qp, &in_params,
+ n_sq_elems, n_rq_elems);
+ else
+ rc = qedr_roce_create_kernel_qp(dev, qp, &in_params,
+ n_sq_elems, n_rq_elems);
if (rc)
qedr_cleanup_kernel(dev, qp);
@@ -1602,6 +1777,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
qp->ibqp.qp_num = qp->qp_id;
+ rc = qedr_idr_add(dev, qp, qp->qp_id);
+ if (rc)
+ goto err;
+
return &qp->ibqp;
err:
@@ -1689,10 +1868,13 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* Update doorbell (in case post_recv was
* done before move to RTR)
*/
- wmb();
- writel(qp->rq.db_data.raw, qp->rq.db);
- /* Make sure write takes effect */
- mmiowb();
+
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ wmb();
+ writel(qp->rq.db_data.raw, qp->rq.db);
+ /* Make sure write takes effect */
+ mmiowb();
+ }
break;
case QED_ROCE_QP_STATE_ERR:
break;
@@ -1786,16 +1968,18 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
else
new_qp_state = old_qp_state;
- if (!ib_modify_qp_is_ok
- (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
- DP_ERR(dev,
- "modify qp: invalid attribute mask=0x%x specified for\n"
- "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
- attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
- new_qp_state);
- rc = -EINVAL;
- goto err;
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
+ ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ DP_ERR(dev,
+ "modify qp: invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+ attr_mask, qp->qp_id, ibqp->qp_type,
+ old_qp_state, new_qp_state);
+ rc = -EINVAL;
+ goto err;
+ }
}
/* Translate the masks... */
@@ -2082,7 +2266,7 @@ err:
return rc;
}
-int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
+static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
{
int rc = 0;
@@ -2111,15 +2295,34 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
qp, qp->qp_type);
- if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
- (qp->state != QED_ROCE_QP_STATE_ERR) &&
- (qp->state != QED_ROCE_QP_STATE_INIT)) {
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_INIT)) {
- attr.qp_state = IB_QPS_ERR;
- attr_mask |= IB_QP_STATE;
+ attr.qp_state = IB_QPS_ERR;
+ attr_mask |= IB_QP_STATE;
- /* Change the QP state to ERROR */
- qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ /* Change the QP state to ERROR */
+ qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ }
+ } else {
+ /* Wait for the connect/accept to complete */
+ if (qp->ep) {
+ int wait_count = 1;
+
+ while (qp->ep->during_connect) {
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "Still in during connect/accept\n");
+
+ msleep(100);
+ if (wait_count++ > 200) {
+ DP_NOTICE(dev,
+ "during connect timeout\n");
+ break;
+ }
+ }
+ }
}
if (qp->qp_type == IB_QPT_GSI)
@@ -2127,8 +2330,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
qedr_free_qp_resources(dev, qp);
- kfree(qp);
-
+ if (atomic_dec_and_test(&qp->refcnt)) {
+ qedr_idr_remove(dev, qp->qp_id);
+ kfree(qp);
+ }
return rc;
}
@@ -2395,7 +2600,6 @@ err0:
struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
enum ib_mr_type mr_type, u32 max_num_sg)
{
- struct qedr_dev *dev;
struct qedr_mr *mr;
if (mr_type != IB_MR_TYPE_MEM_REG)
@@ -2406,8 +2610,6 @@ struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
if (IS_ERR(mr))
return ERR_PTR(-EINVAL);
- dev = mr->dev;
-
return &mr->ibmr;
}
@@ -2740,6 +2942,7 @@ static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
case IB_WR_SEND_WITH_INV:
return IB_WC_SEND;
case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_READ_WITH_INV:
return IB_WC_RDMA_READ;
case IB_WR_ATOMIC_CMP_AND_SWP:
return IB_WC_COMP_SWAP;
@@ -2900,11 +3103,8 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
break;
case IB_WR_RDMA_READ_WITH_INV:
- DP_ERR(dev,
- "RDMA READ WITH INVALIDATE not supported\n");
- *bad_wr = wr;
- rc = -EINVAL;
- break;
+ SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
+ /* fallthrough -- same is identical to RDMA READ */
case IB_WR_RDMA_READ:
wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
@@ -3014,15 +3214,17 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->q_lock, flags);
- if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
- (qp->state != QED_ROCE_QP_STATE_ERR) &&
- (qp->state != QED_ROCE_QP_STATE_SQD)) {
- spin_unlock_irqrestore(&qp->q_lock, flags);
- *bad_wr = wr;
- DP_DEBUG(dev, QEDR_MSG_CQ,
- "QP in wrong state! QP icid=0x%x state %d\n",
- qp->icid, qp->state);
- return -EINVAL;
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
+ (qp->state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->state != QED_ROCE_QP_STATE_SQD)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "QP in wrong state! QP icid=0x%x state %d\n",
+ qp->icid, qp->state);
+ return -EINVAL;
+ }
}
while (wr) {
@@ -3142,6 +3344,11 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* Make sure write sticks */
mmiowb();
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
+ writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
+ mmiowb(); /* for second doorbell */
+ }
+
wr = wr->next;
}
@@ -3603,23 +3810,3 @@ int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
return IB_MAD_RESULT_SUCCESS;
}
-
-int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
- RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
-
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
- return 0;
-}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 0f8ab49d5a1a..1a94425dea33 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -39,6 +39,8 @@ int qedr_modify_port(struct ib_device *, u8 port, int mask,
struct ib_port_modify *props);
int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
+int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid);
int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index e0fdb9201423..cb06314a2ae2 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_QIB
tristate "Intel PCIe HCA support"
depends on 64BIT && INFINIBAND_RDMAVT
+ depends on PCI
---help---
This is a low-level driver for Intel PCIe QLE InfiniBand host
channel adapters. This driver does not support the Intel
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
index 79ebd79e8405..80ffab88fbca 100644
--- a/drivers/infiniband/hw/qib/Makefile
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
ib_qib-y := qib_diag.o qib_driver.o qib_eeprom.o \
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index f9e1c69603a5..092ed8103842 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -102,18 +102,6 @@ extern const struct pci_error_handlers qib_pci_err_handler;
#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
/*
- * Struct used to indicate which errors are logged in each of the
- * error-counters that are logged to EEPROM. A counter is incremented
- * _once_ (saturating at 255) for each event with any bits set in
- * the error or hwerror register masks below.
- */
-#define QIB_EEP_LOG_CNT (4)
-struct qib_eep_log_mask {
- u64 errs_to_log;
- u64 hwerrs_to_log;
-};
-
-/*
* Below contains all data related to a single context (formerly called port).
*/
@@ -443,14 +431,12 @@ struct qib_irq_notify;
#endif
struct qib_msix_entry {
- int irq;
void *arg;
#ifdef CONFIG_INFINIBAND_QIB_DCA
int dca;
int rcv;
struct qib_irq_notify *notifier;
#endif
- char name[MAX_NAME_SIZE];
cpumask_var_t mask;
};
@@ -1081,11 +1067,6 @@ struct qib_devdata {
/* control high-level access to EEPROM */
struct mutex eep_lock;
uint64_t traffic_wds;
- /*
- * masks for which bits of errs, hwerrs that cause
- * each of the counters to increment.
- */
- struct qib_eep_log_mask eep_st_masks[QIB_EEP_LOG_CNT];
struct qib_diag_client *diag_client;
spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
struct diag_observer_list_elt *diag_observer_list;
@@ -1188,7 +1169,7 @@ int qib_set_lid(struct qib_pportdata *, u32, u8);
void qib_hol_down(struct qib_pportdata *);
void qib_hol_init(struct qib_pportdata *);
void qib_hol_up(struct qib_pportdata *);
-void qib_hol_event(unsigned long);
+void qib_hol_event(struct timer_list *);
void qib_disable_after_error(struct qib_devdata *);
int qib_set_uevent_bits(struct qib_pportdata *, const int);
@@ -1299,10 +1280,9 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
const void *buffer, int len);
void qib_get_eeprom_info(struct qib_devdata *);
-#define qib_inc_eeprom_err(dd, eidx, incr)
void qib_dump_lookup_output_queue(struct qib_devdata *);
void qib_force_pio_avail_update(struct qib_devdata *);
-void qib_clear_symerror_on_linkup(unsigned long opaque);
+void qib_clear_symerror_on_linkup(struct timer_list *t);
/*
* Set LED override, only the two LSBs have "public" meaning, but
@@ -1434,10 +1414,8 @@ int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
const struct pci_device_id *);
void qib_pcie_ddcleanup(struct qib_devdata *);
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
-int qib_reinit_intr(struct qib_devdata *);
-void qib_enable_intx(struct qib_devdata *dd);
-void qib_nomsi(struct qib_devdata *);
-void qib_nomsix(struct qib_devdata *);
+void qib_free_irq(struct qib_devdata *dd);
+int qib_reinit_intr(struct qib_devdata *dd);
void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
/* interrupts for device */
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
index a5356cb4252e..9ecaab6232e3 100644
--- a/drivers/infiniband/hw/qib/qib_7220.h
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -67,7 +67,6 @@ struct qib_chip_specific {
u32 lastbuf_for_pio;
u32 updthresh; /* current AvailUpdThld */
u32 updthresh_dflt; /* default AvailUpdThld */
- int irq;
u8 presets_needed;
u8 relock_timer_active;
char emsgbuf[128];
@@ -75,6 +74,7 @@ struct qib_chip_specific {
char bitsmsgbuf[64];
struct timer_list relock_timer;
unsigned int relock_interval; /* in jiffies */
+ struct qib_devdata *dd;
};
struct qib_chippport_specific {
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 775018b32b0d..a9377eee8734 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -761,7 +761,6 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
{
struct qib_diag_client *dc = fp->private_data;
struct qib_devdata *dd = dc->dd;
- void __iomem *kreg_base;
ssize_t ret;
if (dc->pid != current->pid) {
@@ -769,8 +768,6 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
goto bail;
}
- kreg_base = dd->kregbase;
-
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
@@ -838,7 +835,6 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
{
struct qib_diag_client *dc = fp->private_data;
struct qib_devdata *dd = dc->dd;
- void __iomem *kreg_base;
ssize_t ret;
if (dc->pid != current->pid) {
@@ -846,8 +842,6 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
goto bail;
}
- kreg_base = dd->kregbase;
-
if (count == 0)
ret = 0;
else if ((count % 4) || (*off % 4))
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 719906a9fd51..33d3335385e8 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -682,9 +682,10 @@ int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
/* Below is "non-zero" to force override, but both actual LEDs are off */
#define LED_OVER_BOTH_OFF (8)
-static void qib_run_led_override(unsigned long opaque)
+static void qib_run_led_override(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_pportdata *ppd = from_timer(ppd, t,
+ led_override_timer);
struct qib_devdata *dd = ppd->dd;
int timeoff;
int ph_idx;
@@ -735,9 +736,7 @@ void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
*/
if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
/* Need to start timer */
- init_timer(&ppd->led_override_timer);
- ppd->led_override_timer.function = qib_run_led_override;
- ppd->led_override_timer.data = (unsigned long) ppd;
+ timer_setup(&ppd->led_override_timer, qib_run_led_override, 0);
ppd->led_override_timer.expires = jiffies + 1;
add_timer(&ppd->led_override_timer);
} else {
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 9396c1807cc3..2d6a191afec0 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -696,15 +696,8 @@ static void qib_clean_part_key(struct qib_ctxtdata *rcd,
struct qib_devdata *dd)
{
int i, j, pchanged = 0;
- u64 oldpkey;
struct qib_pportdata *ppd = rcd->ppd;
- /* for debugging only */
- oldpkey = (u64) ppd->pkeys[0] |
- ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
-
for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
if (!rcd->pkeys[i])
continue;
@@ -1817,7 +1810,6 @@ static int qib_close(struct inode *in, struct file *fp)
struct qib_devdata *dd;
unsigned long flags;
unsigned ctxt;
- pid_t pid;
mutex_lock(&qib_mutex);
@@ -1859,7 +1851,6 @@ static int qib_close(struct inode *in, struct file *fp)
spin_lock_irqsave(&dd->uctxt_lock, flags);
ctxt = rcd->ctxt;
dd->rcd[ctxt] = NULL;
- pid = rcd->pid;
rcd->pid = 0;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 3259a60e4f4f..8a15e5c7dd91 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -245,7 +245,6 @@ struct qib_chip_specific {
u64 iblnkerrsnap;
u64 ibcctrl; /* shadow for kr_ibcctrl */
u32 lastlinkrecov; /* link recovery issue */
- int irq;
u32 cntrnamelen;
u32 portcntrnamelen;
u32 ncntrs;
@@ -266,6 +265,7 @@ struct qib_chip_specific {
u64 rpkts; /* total packets received (sample result) */
u64 xmit_wait; /* # of ticks no data sent (sample result) */
struct timer_list pma_timer;
+ struct qib_pportdata *ppd;
char emsgbuf[128];
char bitsmsgbuf[64];
u8 pma_sample_status;
@@ -749,7 +749,6 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
u32 bits, ctrl;
int isfatal = 0;
char *bitsmsg;
- int log_idx;
hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
if (!hwerrs)
@@ -771,11 +770,6 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
hwerrs &= dd->cspec->hwerrmask;
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
-
/*
* Make sure we get this much out, unless told to be quiet,
* or it's occurred within the last 5 seconds.
@@ -1005,7 +999,6 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
char *msg;
u64 ignore_this_time = 0;
u64 iserr = 0;
- int log_idx;
struct qib_pportdata *ppd = dd->pport;
u64 mask;
@@ -1016,10 +1009,6 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
/* do these first, they are most important */
if (errs & ERR_MASK(HardwareErr))
qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- else
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (errs & dd->eep_st_masks[log_idx].errs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
if (errs & ~IB_E_BITSEXTANT)
qib_dev_err(dd,
@@ -1485,15 +1474,6 @@ static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
}
-static void qib_6120_free_irq(struct qib_devdata *dd)
-{
- if (dd->cspec->irq) {
- free_irq(dd->cspec->irq, dd);
- dd->cspec->irq = 0;
- }
- qib_nomsi(dd);
-}
-
/**
* qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
* @dd: the qlogic_ib device
@@ -1502,7 +1482,7 @@ static void qib_6120_free_irq(struct qib_devdata *dd)
*/
static void qib_6120_setup_cleanup(struct qib_devdata *dd)
{
- qib_6120_free_irq(dd);
+ qib_free_irq(dd);
kfree(dd->cspec->cntrs);
kfree(dd->cspec->portcntrs);
if (dd->cspec->dummy_hdrq) {
@@ -1706,6 +1686,8 @@ bail:
*/
static void qib_setup_6120_interrupt(struct qib_devdata *dd)
{
+ int ret;
+
/*
* If the chip supports added error indication via GPIO pins,
* enable interrupts on those bits so the interrupt routine
@@ -1719,19 +1701,12 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
}
- if (!dd->cspec->irq)
+ ret = pci_request_irq(dd->pcidev, 0, qib_6120intr, NULL, dd,
+ QIB_DRV_NAME);
+ if (ret)
qib_dev_err(dd,
- "irq is 0, BIOS error? Interrupts won't work\n");
- else {
- int ret;
-
- ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
- QIB_DRV_NAME, dd);
- if (ret)
- qib_dev_err(dd,
- "Couldn't setup interrupt (irq=%d): %d\n",
- dd->cspec->irq, ret);
- }
+ "Couldn't setup interrupt (irq=%d): %d\n",
+ pci_irq_vector(dd->pcidev, 0), ret);
}
/**
@@ -1929,7 +1904,6 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
u32 type, unsigned long pa)
{
u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- u32 tidx;
if (!dd->kregbase)
return;
@@ -1953,7 +1927,6 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
else /* for now, always full 4KB page */
pa |= 2 << 29;
}
- tidx = tidptr - dd->egrtidbase;
writel(pa, tidp32);
mmiowb();
}
@@ -2647,9 +2620,9 @@ static void qib_chk_6120_errormask(struct qib_devdata *dd)
* need traffic_wds done the way it is
* called from add_timer
*/
-static void qib_get_6120_faststats(unsigned long opaque)
+static void qib_get_6120_faststats(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_devdata *dd = from_timer(dd, t, stats_timer);
struct qib_pportdata *ppd = dd->pport;
unsigned long flags;
u64 traffic_wds;
@@ -2937,10 +2910,10 @@ static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
return ret;
}
-static void pma_6120_timer(unsigned long data)
+static void pma_6120_timer(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)data;
- struct qib_chip_specific *cs = ppd->dd->cspec;
+ struct qib_chip_specific *cs = from_timer(cs, t, pma_timer);
+ struct qib_pportdata *ppd = cs->ppd;
struct qib_ibport *ibp = &ppd->ibport_data;
unsigned long flags;
@@ -3205,6 +3178,7 @@ static int init_6120_variables(struct qib_devdata *dd)
dd->num_pports = 1;
dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
+ dd->cspec->ppd = ppd;
ppd->cpspec = NULL; /* not used in this chip */
spin_lock_init(&dd->cspec->kernel_tid_lock);
@@ -3242,20 +3216,6 @@ static int init_6120_variables(struct qib_devdata *dd)
if (qib_unordered_wc())
dd->flags |= QIB_PIO_FLUSH_WC;
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
-
- /* Ignore errors in PIO/PBC on systems with unordered write-combining */
- if (qib_unordered_wc())
- dd->eep_st_masks[0].hwerrs_to_log &= ~TXE_PIO_PARITY;
-
- dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
-
- dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
-
ret = qib_init_pportdata(ppd, dd, 0, 1);
if (ret)
goto bail;
@@ -3289,11 +3249,8 @@ static int init_6120_variables(struct qib_devdata *dd)
dd->rhdrhead_intr_off = 1ULL << 32;
/* setup the stats timer; the add_timer is done at end of init */
- setup_timer(&dd->stats_timer, qib_get_6120_faststats,
- (unsigned long)dd);
-
- setup_timer(&dd->cspec->pma_timer, pma_6120_timer,
- (unsigned long)ppd);
+ timer_setup(&dd->stats_timer, qib_get_6120_faststats, 0);
+ timer_setup(&dd->cspec->pma_timer, pma_6120_timer, 0);
dd->ureg_align = qib_read_kreg32(dd, kr_palign);
@@ -3490,7 +3447,7 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
dd->f_bringup_serdes = qib_6120_bringup_serdes;
dd->f_cleanup = qib_6120_setup_cleanup;
dd->f_clear_tids = qib_6120_clear_tids;
- dd->f_free_irq = qib_6120_free_irq;
+ dd->f_free_irq = qib_free_irq;
dd->f_get_base_info = qib_6120_get_base_info;
dd->f_get_msgheader = qib_6120_get_msgheader;
dd->f_getsendbuf = qib_6120_getsendbuf;
@@ -3559,8 +3516,6 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
if (qib_pcie_params(dd, 8, NULL))
qib_dev_err(dd,
"Failed to setup PCIe or interrupts; continuing anyway\n");
- dd->cspec->irq = pdev->irq; /* save IRQ */
-
/* clear diagctrl register, in case diags were running and crashed */
qib_write_kreg(dd, kr_hwdiagctrl, 0);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 04bdd3d487b1..bdff2326731e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1042,9 +1042,11 @@ done:
return iserr;
}
-static void reenable_7220_chase(unsigned long opaque)
+static void reenable_7220_chase(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_chippport_specific *cpspec = from_timer(cpspec, t,
+ chase_timer);
+ struct qib_pportdata *ppd = &cpspec->pportdata;
ppd->cpspec->chase_timer.expires = 0;
qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
@@ -1094,7 +1096,6 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
char *msg;
u64 ignore_this_time = 0;
u64 iserr = 0;
- int log_idx;
struct qib_pportdata *ppd = dd->pport;
u64 mask;
@@ -1105,10 +1106,6 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
/* do these first, they are most important */
if (errs & ERR_MASK(HardwareErr))
qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- else
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (errs & dd->eep_st_masks[log_idx].errs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
if (errs & QLOGIC_IB_E_SDMAERRS)
sdma_7220_errors(ppd, errs);
@@ -1302,7 +1299,6 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
u32 bits, ctrl;
int isfatal = 0;
char *bitsmsg;
- int log_idx;
hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
if (!hwerrs)
@@ -1326,10 +1322,6 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
hwerrs &= dd->cspec->hwerrmask;
- /* We log some errors to EEPROM, check if we have any of those. */
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (hwerrs & dd->eep_st_masks[log_idx].hwerrs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
RXE_PARITY))
qib_devinfo(dd->pcidev,
@@ -1663,7 +1655,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
dd->control | QLOGIC_IB_C_FREEZEMODE);
ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.data) /* if initted */
+ if (ppd->cpspec->chase_timer.function) /* if initted */
del_timer_sync(&ppd->cpspec->chase_timer);
if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
@@ -1780,15 +1772,6 @@ static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
}
-static void qib_7220_free_irq(struct qib_devdata *dd)
-{
- if (dd->cspec->irq) {
- free_irq(dd->cspec->irq, dd);
- dd->cspec->irq = 0;
- }
- qib_nomsi(dd);
-}
-
/*
* qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
* @dd: the qlogic_ib device
@@ -1798,7 +1781,7 @@ static void qib_7220_free_irq(struct qib_devdata *dd)
*/
static void qib_setup_7220_cleanup(struct qib_devdata *dd)
{
- qib_7220_free_irq(dd);
+ qib_free_irq(dd);
kfree(dd->cspec->cntrs);
kfree(dd->cspec->portcntrs);
}
@@ -2026,20 +2009,14 @@ bail:
*/
static void qib_setup_7220_interrupt(struct qib_devdata *dd)
{
- if (!dd->cspec->irq)
- qib_dev_err(dd,
- "irq is 0, BIOS error? Interrupts won't work\n");
- else {
- int ret = request_irq(dd->cspec->irq, qib_7220intr,
- dd->msi_lo ? 0 : IRQF_SHARED,
- QIB_DRV_NAME, dd);
+ int ret;
- if (ret)
- qib_dev_err(dd,
- "Couldn't setup %s interrupt (irq=%d): %d\n",
- dd->msi_lo ? "MSI" : "INTx",
- dd->cspec->irq, ret);
- }
+ ret = pci_request_irq(dd->pcidev, 0, qib_7220intr, NULL, dd,
+ QIB_DRV_NAME);
+ if (ret)
+ qib_dev_err(dd, "Couldn't setup %s interrupt (irq=%d): %d\n",
+ dd->pcidev->msi_enabled ? "MSI" : "INTx",
+ pci_irq_vector(dd->pcidev, 0), ret);
}
/**
@@ -3263,9 +3240,9 @@ done:
* need traffic_wds done the way it is
* called from add_timer
*/
-static void qib_get_7220_faststats(unsigned long opaque)
+static void qib_get_7220_faststats(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_devdata *dd = from_timer(dd, t, stats_timer);
struct qib_pportdata *ppd = dd->pport;
unsigned long flags;
u64 traffic_wds;
@@ -3302,16 +3279,12 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd)
return 0;
qib_devinfo(dd->pcidev,
- "MSI interrupt not detected, trying INTx interrupts\n");
- qib_7220_free_irq(dd);
- qib_enable_intx(dd);
- /*
- * Some newer kernels require free_irq before disable_msi,
- * and irq can be changed during disable and INTx enable
- * and we need to therefore use the pcidev->irq value,
- * not our saved MSI value.
- */
- dd->cspec->irq = dd->pcidev->irq;
+ "MSI interrupt not detected, trying INTx interrupts\n");
+
+ qib_free_irq(dd);
+ dd->msi_lo = 0;
+ if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
+ qib_dev_err(dd, "Failed to enable INTx\n");
qib_setup_7220_interrupt(dd);
return 1;
}
@@ -3543,7 +3516,6 @@ static void autoneg_7220_work(struct work_struct *work)
{
struct qib_pportdata *ppd;
struct qib_devdata *dd;
- u64 startms;
u32 i;
unsigned long flags;
@@ -3551,8 +3523,6 @@ static void autoneg_7220_work(struct work_struct *work)
autoneg_work.work)->pportdata;
dd = ppd->dd;
- startms = jiffies_to_msecs(jiffies);
-
/*
* Busy wait for this first part, it should be at most a
* few hundred usec, since we scheduled ourselves for 2msec.
@@ -3997,6 +3967,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
dd->num_pports = 1;
dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
+ dd->cspec->dd = dd;
ppd->cpspec = cpspec;
spin_lock_init(&dd->cspec->sdepb_lock);
@@ -4035,16 +4006,6 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
dd->flags |= qib_special_trigger ?
QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
- /*
- * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
- * 2 is Some Misc, 3 is reserved for future.
- */
- dd->eep_st_masks[0].hwerrs_to_log = HWE_MASK(TXEMemParityErr);
-
- dd->eep_st_masks[1].hwerrs_to_log = HWE_MASK(RXEMemParityErr);
-
- dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated);
-
init_waitqueue_head(&cpspec->autoneg_wait);
INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
@@ -4069,8 +4030,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
if (!qib_mini_init)
qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
- setup_timer(&ppd->cpspec->chase_timer, reenable_7220_chase,
- (unsigned long)ppd);
+ timer_setup(&ppd->cpspec->chase_timer, reenable_7220_chase, 0);
qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
@@ -4095,9 +4055,7 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
dd->rhdrhead_intr_off = 1ULL << 32;
/* setup the stats timer; the add_timer is done at end of init */
- init_timer(&dd->stats_timer);
- dd->stats_timer.function = qib_get_7220_faststats;
- dd->stats_timer.data = (unsigned long) dd;
+ timer_setup(&dd->stats_timer, qib_get_7220_faststats, 0);
dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
/*
@@ -4535,7 +4493,7 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
dd->f_bringup_serdes = qib_7220_bringup_serdes;
dd->f_cleanup = qib_setup_7220_cleanup;
dd->f_clear_tids = qib_7220_clear_tids;
- dd->f_free_irq = qib_7220_free_irq;
+ dd->f_free_irq = qib_free_irq;
dd->f_get_base_info = qib_7220_get_base_info;
dd->f_get_msgheader = qib_7220_get_msgheader;
dd->f_getsendbuf = qib_7220_getsendbuf;
@@ -4618,9 +4576,6 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
qib_dev_err(dd,
"Failed to setup PCIe or interrupts; continuing anyway\n");
- /* save IRQ for possible later use */
- dd->cspec->irq = pdev->irq;
-
if (qib_read_kreg64(dd, kr_hwerrstatus) &
QLOGIC_IB_HWE_SERDESPLLFAILED)
qib_write_kreg(dd, kr_hwerrclear,
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 14cadf6d6214..6265dac415fc 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -150,7 +150,7 @@ static struct kparam_string kp_txselect = {
.string = txselect_list,
.maxlen = MAX_ATTEN_LEN
};
-static int setup_txselect(const char *, struct kernel_param *);
+static int setup_txselect(const char *, const struct kernel_param *);
module_param_call(txselect, setup_txselect, param_get_string,
&kp_txselect, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(txselect,
@@ -553,7 +553,6 @@ struct qib_chip_specific {
u32 updthresh; /* current AvailUpdThld */
u32 updthresh_dflt; /* default AvailUpdThld */
u32 r1;
- int irq;
u32 num_msix_entries;
u32 sdmabufcnt;
u32 lastbuf_for_pio;
@@ -756,10 +755,8 @@ static void check_7322_rxe_status(struct qib_pportdata *);
static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
#ifdef CONFIG_INFINIBAND_QIB_DCA
static void qib_setup_dca(struct qib_devdata *dd);
-static void setup_dca_notifier(struct qib_devdata *dd,
- struct qib_msix_entry *m);
-static void reset_dca_notifier(struct qib_devdata *dd,
- struct qib_msix_entry *m);
+static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
+static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
#endif
/**
@@ -1647,7 +1644,6 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
u64 iserr = 0;
u64 errs;
u64 mask;
- int log_idx;
qib_stats.sps_errints++;
errs = qib_read_kreg64(dd, kr_errstatus);
@@ -1665,10 +1661,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
if (errs & QIB_E_HARDWARE) {
*msg = '\0';
qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- } else
- for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
- if (errs & dd->eep_st_masks[log_idx].errs_to_log)
- qib_inc_eeprom_err(dd, log_idx, 1);
+ }
if (errs & QIB_E_SPKTERRS) {
qib_disarm_7322_senderrbufs(dd->pport);
@@ -1739,9 +1732,10 @@ static void qib_error_tasklet(unsigned long data)
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
}
-static void reenable_chase(unsigned long opaque)
+static void reenable_chase(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
+ struct qib_pportdata *ppd = cp->ppd;
ppd->cpspec->chase_timer.expires = 0;
qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
@@ -2531,7 +2525,7 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.data) /* if initted */
+ if (ppd->cpspec->chase_timer.function) /* if initted */
del_timer_sync(&ppd->cpspec->chase_timer);
/*
@@ -2778,7 +2772,7 @@ static void qib_setup_dca(struct qib_devdata *dd)
qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
cspec->dca_rcvhdr_ctrl[i]);
for (i = 0; i < cspec->num_msix_entries; i++)
- setup_dca_notifier(dd, &cspec->msix_entries[i]);
+ setup_dca_notifier(dd, i);
}
static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
@@ -2820,49 +2814,41 @@ static void qib_irq_notifier_release(struct kref *ref)
}
#endif
-/*
- * Disable MSIx interrupt if enabled, call generic MSIx code
- * to cleanup, and clear pending MSIx interrupts.
- * Used for fallback to INTx, after reset, and when MSIx setup fails.
- */
-static void qib_7322_nomsix(struct qib_devdata *dd)
+static void qib_7322_free_irq(struct qib_devdata *dd)
{
u64 intgranted;
- int n;
+ int i;
dd->cspec->main_int_mask = ~0ULL;
- n = dd->cspec->num_msix_entries;
- if (n) {
- int i;
- dd->cspec->num_msix_entries = 0;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < dd->cspec->num_msix_entries; i++) {
+ /* only free IRQs that were allocated */
+ if (dd->cspec->msix_entries[i].arg) {
#ifdef CONFIG_INFINIBAND_QIB_DCA
- reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
+ reset_dca_notifier(dd, i);
#endif
- irq_set_affinity_hint(
- dd->cspec->msix_entries[i].irq, NULL);
+ irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
+ NULL);
free_cpumask_var(dd->cspec->msix_entries[i].mask);
- free_irq(dd->cspec->msix_entries[i].irq,
- dd->cspec->msix_entries[i].arg);
+ pci_free_irq(dd->pcidev, i,
+ dd->cspec->msix_entries[i].arg);
}
- qib_nomsix(dd);
}
+
+ /* If num_msix_entries was 0, disable the INTx IRQ */
+ if (!dd->cspec->num_msix_entries)
+ pci_free_irq(dd->pcidev, 0, dd);
+ else
+ dd->cspec->num_msix_entries = 0;
+
+ pci_free_irq_vectors(dd->pcidev);
+
/* make sure no MSIx interrupts are left pending */
intgranted = qib_read_kreg64(dd, kr_intgranted);
if (intgranted)
qib_write_kreg(dd, kr_intgranted, intgranted);
}
-static void qib_7322_free_irq(struct qib_devdata *dd)
-{
- if (dd->cspec->irq) {
- free_irq(dd->cspec->irq, dd);
- dd->cspec->irq = 0;
- }
- qib_7322_nomsix(dd);
-}
-
static void qib_setup_7322_cleanup(struct qib_devdata *dd)
{
int i;
@@ -3329,22 +3315,20 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data)
#ifdef CONFIG_INFINIBAND_QIB_DCA
-static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
+static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
{
- if (!m->dca)
+ if (!dd->cspec->msix_entries[msixnum].dca)
return;
- qib_devinfo(dd->pcidev,
- "Disabling notifier on HCA %d irq %d\n",
- dd->unit,
- m->irq);
- irq_set_affinity_notifier(
- m->irq,
- NULL);
- m->notifier = NULL;
+
+ qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
+ dd->unit, pci_irq_vector(dd->pcidev, msixnum));
+ irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
+ dd->cspec->msix_entries[msixnum].notifier = NULL;
}
-static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
+static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
{
+ struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
struct qib_irq_notify *n;
if (!m->dca)
@@ -3354,7 +3338,7 @@ static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
int ret;
m->notifier = n;
- n->notify.irq = m->irq;
+ n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
n->notify.notify = qib_irq_notifier_notify;
n->notify.release = qib_irq_notifier_release;
n->arg = m->arg;
@@ -3415,22 +3399,17 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
if (!dd->cspec->num_msix_entries) {
/* Try to get INTx interrupt */
try_intx:
- if (!dd->pcidev->irq) {
- qib_dev_err(dd,
- "irq is 0, BIOS error? Interrupts won't work\n");
- goto bail;
- }
- ret = request_irq(dd->pcidev->irq, qib_7322intr,
- IRQF_SHARED, QIB_DRV_NAME, dd);
+ ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
+ QIB_DRV_NAME);
if (ret) {
- qib_dev_err(dd,
+ qib_dev_err(
+ dd,
"Couldn't setup INTx interrupt (irq=%d): %d\n",
- dd->pcidev->irq, ret);
- goto bail;
+ pci_irq_vector(dd->pcidev, 0), ret);
+ return;
}
- dd->cspec->irq = dd->pcidev->irq;
dd->cspec->main_int_mask = ~0ULL;
- goto bail;
+ return;
}
/* Try to get MSIx interrupts */
@@ -3453,15 +3432,10 @@ try_intx:
for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
irq_handler_t handler;
void *arg;
- u64 val;
int lsb, reg, sh;
#ifdef CONFIG_INFINIBAND_QIB_DCA
int dca = 0;
#endif
-
- dd->cspec->msix_entries[msixnum].
- name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
- = '\0';
if (i < ARRAY_SIZE(irq_table)) {
if (irq_table[i].port) {
/* skip if for a non-configured port */
@@ -3475,11 +3449,10 @@ try_intx:
#endif
lsb = irq_table[i].lsb;
handler = irq_table[i].handler;
- snprintf(dd->cspec->msix_entries[msixnum].name,
- sizeof(dd->cspec->msix_entries[msixnum].name)
- - 1,
- QIB_DRV_NAME "%d%s", dd->unit,
- irq_table[i].name);
+ ret = pci_request_irq(dd->pcidev, msixnum, handler,
+ NULL, arg, QIB_DRV_NAME "%d%s",
+ dd->unit,
+ irq_table[i].name);
} else {
unsigned ctxt;
@@ -3495,37 +3468,25 @@ try_intx:
#endif
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
handler = qib_7322pintr;
- snprintf(dd->cspec->msix_entries[msixnum].name,
- sizeof(dd->cspec->msix_entries[msixnum].name)
- - 1,
- QIB_DRV_NAME "%d (kctx)", dd->unit);
+ ret = pci_request_irq(dd->pcidev, msixnum, handler,
+ NULL, arg,
+ QIB_DRV_NAME "%d (kctx)",
+ dd->unit);
}
- dd->cspec->msix_entries[msixnum].irq = pci_irq_vector(
- dd->pcidev, msixnum);
- if (dd->cspec->msix_entries[msixnum].irq < 0) {
- qib_dev_err(dd,
- "Couldn't get MSIx irq (vec=%d): %d\n",
- msixnum,
- dd->cspec->msix_entries[msixnum].irq);
- qib_7322_nomsix(dd);
- goto try_intx;
- }
- ret = request_irq(dd->cspec->msix_entries[msixnum].irq,
- handler, 0,
- dd->cspec->msix_entries[msixnum].name,
- arg);
if (ret) {
/*
* Shouldn't happen since the enable said we could
* have as many as we are trying to setup here.
*/
qib_dev_err(dd,
- "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
- msixnum,
- dd->cspec->msix_entries[msixnum].irq,
- ret);
- qib_7322_nomsix(dd);
+ "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
+ msixnum,
+ pci_irq_vector(dd->pcidev, msixnum),
+ ret);
+ qib_7322_free_irq(dd);
+ pci_alloc_irq_vectors(dd->pcidev, 1, 1,
+ PCI_IRQ_LEGACY);
goto try_intx;
}
dd->cspec->msix_entries[msixnum].arg = arg;
@@ -3541,8 +3502,8 @@ try_intx:
mask &= ~(1ULL << lsb);
redirect[reg] |= ((u64) msixnum) << sh;
}
- val = qib_read_kreg64(dd, 2 * msixnum + 1 +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
+ qib_read_kreg64(dd, 2 * msixnum + 1 +
+ (QIB_7322_MsixTable_OFFS / sizeof(u64)));
if (firstcpu < nr_cpu_ids &&
zalloc_cpumask_var(
&dd->cspec->msix_entries[msixnum].mask,
@@ -3559,7 +3520,7 @@ try_intx:
dd->cspec->msix_entries[msixnum].mask);
}
irq_set_affinity_hint(
- dd->cspec->msix_entries[msixnum].irq,
+ pci_irq_vector(dd->pcidev, msixnum),
dd->cspec->msix_entries[msixnum].mask);
}
msixnum++;
@@ -3570,7 +3531,6 @@ try_intx:
dd->cspec->main_int_mask = mask;
tasklet_init(&dd->error_tasklet, qib_error_tasklet,
(unsigned long)dd);
-bail:;
}
/**
@@ -3674,8 +3634,9 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
/* no interrupts till re-initted */
qib_7322_set_intr_state(dd, 0);
+ qib_7322_free_irq(dd);
+
if (msix_entries) {
- qib_7322_nomsix(dd);
/* can be up to 512 bytes, too big for stack */
msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
sizeof(u64), GFP_KERNEL);
@@ -3765,11 +3726,11 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
write_7322_init_portregs(&dd->pport[i]);
write_7322_initregs(dd);
- if (qib_pcie_params(dd, dd->lbus_width,
- &dd->cspec->num_msix_entries))
+ if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
qib_dev_err(dd,
"Reset failed to setup PCIe or interrupts; continuing anyway\n");
+ dd->cspec->num_msix_entries = msix_entries;
qib_setup_7322_interrupt(dd, 1);
for (i = 0; i < dd->num_pports; ++i) {
@@ -5138,9 +5099,9 @@ done:
*
* called from add_timer
*/
-static void qib_get_7322_faststats(unsigned long opaque)
+static void qib_get_7322_faststats(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_devdata *dd = from_timer(dd, t, stats_timer);
struct qib_pportdata *ppd;
unsigned long flags;
u64 traffic_wds;
@@ -5197,8 +5158,9 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd)
qib_devinfo(dd->pcidev,
"MSIx interrupt not detected, trying INTx interrupts\n");
- qib_7322_nomsix(dd);
- qib_enable_intx(dd);
+ qib_7322_free_irq(dd);
+ if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
+ qib_dev_err(dd, "Failed to enable INTx\n");
qib_setup_7322_interrupt(dd, 0);
return 1;
}
@@ -5396,16 +5358,11 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
static void autoneg_7322_work(struct work_struct *work)
{
struct qib_pportdata *ppd;
- struct qib_devdata *dd;
- u64 startms;
u32 i;
unsigned long flags;
ppd = container_of(work, struct qib_chippport_specific,
autoneg_work.work)->ppd;
- dd = ppd->dd;
-
- startms = jiffies_to_msecs(jiffies);
/*
* Busy wait for this first part, it should be at most a
@@ -6169,7 +6126,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
}
/* handle the txselect parameter changing */
-static int setup_txselect(const char *str, struct kernel_param *kp)
+static int setup_txselect(const char *str, const struct kernel_param *kp)
{
struct qib_devdata *dd;
unsigned long val;
@@ -6614,8 +6571,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
if (!qib_mini_init)
write_7322_init_portregs(ppd);
- setup_timer(&cp->chase_timer, reenable_chase,
- (unsigned long)ppd);
+ timer_setup(&cp->chase_timer, reenable_chase, 0);
ppd++;
}
@@ -6641,8 +6597,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
/* setup the stats timer; the add_timer is done at end of init */
- setup_timer(&dd->stats_timer, qib_get_7322_faststats,
- (unsigned long)dd);
+ timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
dd->ureg_align = 0x10000; /* 64KB alignment */
@@ -7845,13 +7800,12 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
{
struct qib_devdata *dd = ppd->dd;
int chan;
- u32 rbc;
for (chan = 0; chan < SERDES_CHANS; ++chan) {
ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
data, mask);
- rbc = ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- addr, 0, 0);
+ ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
+ 0, 0);
}
}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index c5a4c65636d6..85dfbba427f6 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -93,7 +93,7 @@ unsigned qib_cc_table_size;
module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
-static void verify_interrupt(unsigned long);
+static void verify_interrupt(struct timer_list *);
static struct idr qib_unit_table;
u32 qib_cpulist_count;
@@ -233,8 +233,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
spin_lock_init(&ppd->cc_shadow_lock);
init_waitqueue_head(&ppd->state_wait);
- setup_timer(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup,
- (unsigned long)ppd);
+ timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0);
ppd->qib_wq = NULL;
ppd->ibport_data.pmastats =
@@ -428,8 +427,7 @@ static int loadtime_init(struct qib_devdata *dd)
qib_get_eeprom_info(dd);
/* setup time (don't start yet) to verify we got interrupt */
- setup_timer(&dd->intrchk_timer, verify_interrupt,
- (unsigned long)dd);
+ timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
done:
return ret;
}
@@ -493,9 +491,9 @@ static void enable_chip(struct qib_devdata *dd)
}
}
-static void verify_interrupt(unsigned long opaque)
+static void verify_interrupt(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *) opaque;
+ struct qib_devdata *dd = from_timer(dd, t, intrchk_timer);
u64 int_counter;
if (!dd)
@@ -753,8 +751,7 @@ done:
continue;
if (dd->flags & QIB_HAS_SEND_DMA)
ret = qib_setup_sdma(ppd);
- setup_timer(&ppd->hol_timer, qib_hol_event,
- (unsigned long)ppd);
+ timer_setup(&ppd->hol_timer, qib_hol_event, 0);
ppd->hol_state = QIB_HOL_UP;
}
@@ -815,23 +812,19 @@ static void qib_stop_timers(struct qib_devdata *dd)
struct qib_pportdata *ppd;
int pidx;
- if (dd->stats_timer.data) {
+ if (dd->stats_timer.function)
del_timer_sync(&dd->stats_timer);
- dd->stats_timer.data = 0;
- }
- if (dd->intrchk_timer.data) {
+ if (dd->intrchk_timer.function)
del_timer_sync(&dd->intrchk_timer);
- dd->intrchk_timer.data = 0;
- }
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
- if (ppd->hol_timer.data)
+ if (ppd->hol_timer.function)
del_timer_sync(&ppd->hol_timer);
- if (ppd->led_override_timer.data) {
+ if (ppd->led_override_timer.function) {
del_timer_sync(&ppd->led_override_timer);
atomic_set(&ppd->led_override_timer_active, 0);
}
- if (ppd->symerr_clear_timer.data)
+ if (ppd->symerr_clear_timer.function)
del_timer_sync(&ppd->symerr_clear_timer);
}
}
@@ -1674,8 +1667,9 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
}
if (!rcd->rcvegrbuf_phys) {
rcd->rcvegrbuf_phys =
- kmalloc_node(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
- GFP_KERNEL, rcd->node_id);
+ kmalloc_array_node(chunk,
+ sizeof(rcd->rcvegrbuf_phys[0]),
+ GFP_KERNEL, rcd->node_id);
if (!rcd->rcvegrbuf_phys)
goto bail_rcvegrbuf;
}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index a014fd4cd076..65c3b964ad1b 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -141,7 +141,7 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
qib_hol_up(ppd); /* useful only for 6120 now */
*ppd->statusp |=
QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
- qib_clear_symerror_on_linkup((unsigned long)ppd);
+ qib_clear_symerror_on_linkup(&ppd->symerr_clear_timer);
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
ppd->lflags &= ~(QIBL_LINKINIT |
@@ -170,9 +170,9 @@ skip_ibchange:
signal_ib_event(ppd, ev);
}
-void qib_clear_symerror_on_linkup(unsigned long opaque)
+void qib_clear_symerror_on_linkup(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_pportdata *ppd = from_timer(ppd, t, symerr_clear_timer);
if (ppd->lflags & QIBL_LINKACTIVE)
return;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 82d9da9b6997..4845d000c22f 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -280,7 +280,7 @@ static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
{
struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 vendor, majrev, minrev;
+ u32 majrev, minrev;
unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
/* GUID 0 is illegal */
@@ -303,7 +303,6 @@ static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
minrev = dd->minrev;
nip->revision = cpu_to_be32((majrev << 16) | minrev);
nip->local_port_num = port;
- vendor = dd->vendorid;
nip->vendor_id[0] = QIB_SRC_OUI_1;
nip->vendor_id[1] = QIB_SRC_OUI_2;
nip->vendor_id[2] = QIB_SRC_OUI_3;
@@ -434,6 +433,7 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
/* Bad mkey not a violation below level 2 */
if (ibp->rvp.mkeyprot < 2)
break;
+ /* fall through */
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS:
if (ibp->rvp.mkey_violations != 0xFFFF)
@@ -2446,9 +2446,9 @@ bail:
return ret;
}
-static void xmit_wait_timer_func(unsigned long opaque)
+static void xmit_wait_timer_func(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_pportdata *ppd = from_timer(ppd, t, cong_stats.timer);
struct qib_devdata *dd = dd_from_ppd(ppd);
unsigned long flags;
u8 status;
@@ -2478,10 +2478,8 @@ void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
/* Initialize xmit_wait structure */
dd->pport[port_idx].cong_stats.counter = 0;
- init_timer(&dd->pport[port_idx].cong_stats.timer);
- dd->pport[port_idx].cong_stats.timer.function = xmit_wait_timer_func;
- dd->pport[port_idx].cong_stats.timer.data =
- (unsigned long)(&dd->pport[port_idx]);
+ timer_setup(&dd->pport[port_idx].cong_stats.timer,
+ xmit_wait_timer_func, 0);
dd->pport[port_idx].cong_stats.timer.expires = 0;
add_timer(&dd->pport[port_idx].cong_stats.timer);
}
@@ -2492,7 +2490,7 @@ void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
struct qib_devdata *dd = container_of(ibdev,
struct qib_devdata, verbs_dev);
- if (dd->pport[port_idx].cong_stats.timer.data)
+ if (dd->pport[port_idx].cong_stats.timer.function)
del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
if (dd->pport[port_idx].ibport_data.smi_ah)
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index d90403e31a9d..5ac7b31c346b 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -193,7 +193,7 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
* chip reset (the kernel PCI infrastructure doesn't yet handle that
* correctly.
*/
-static void qib_msi_setup(struct qib_devdata *dd, int pos)
+static void qib_cache_msi_info(struct qib_devdata *dd, int pos)
{
struct pci_dev *pdev = dd->pcidev;
u16 control;
@@ -208,64 +208,39 @@ static void qib_msi_setup(struct qib_devdata *dd, int pos)
&dd->msi_data);
}
-static int qib_allocate_irqs(struct qib_devdata *dd, u32 maxvec)
-{
- unsigned int flags = PCI_IRQ_LEGACY;
-
- /* Check our capabilities */
- if (dd->pcidev->msix_cap) {
- flags |= PCI_IRQ_MSIX;
- } else {
- if (dd->pcidev->msi_cap) {
- flags |= PCI_IRQ_MSI;
- /* Get msi_lo and msi_hi */
- qib_msi_setup(dd, dd->pcidev->msi_cap);
- }
- }
-
- if (!(flags & (PCI_IRQ_MSIX | PCI_IRQ_MSI)))
- qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
-
- return pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
-}
-
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
{
u16 linkstat, speed;
int nvec;
int maxvec;
- int ret = 0;
+ unsigned int flags = PCI_IRQ_MSIX | PCI_IRQ_MSI;
if (!pci_is_pcie(dd->pcidev)) {
qib_dev_err(dd, "Can't find PCI Express capability!\n");
/* set up something... */
dd->lbus_width = 1;
dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
- ret = -1;
+ nvec = -1;
goto bail;
}
+ if (dd->flags & QIB_HAS_INTX)
+ flags |= PCI_IRQ_LEGACY;
maxvec = (nent && *nent) ? *nent : 1;
- nvec = qib_allocate_irqs(dd, maxvec);
- if (nvec < 0) {
- ret = nvec;
+ nvec = pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
+ if (nvec < 0)
goto bail;
- }
/*
- * If nent exists, make sure to record how many vectors were allocated
+ * If nent exists, make sure to record how many vectors were allocated.
+ * If msix_enabled is false, return 0 so the fallback code works
+ * correctly.
*/
- if (nent) {
- *nent = nvec;
+ if (nent)
+ *nent = !dd->pcidev->msix_enabled ? 0 : nvec;
- /*
- * If we requested (nent) MSIX, but msix_enabled is not set,
- * pci_alloc_irq_vectors() enabled INTx.
- */
- if (!dd->pcidev->msix_enabled)
- qib_dev_err(dd,
- "no msix vectors allocated, using INTx\n");
- }
+ if (dd->pcidev->msi_enabled)
+ qib_cache_msi_info(dd, dd->pcidev->msi_cap);
pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
/*
@@ -306,7 +281,21 @@ bail:
/* fill in string, even on errors */
snprintf(dd->lbus_info, sizeof(dd->lbus_info),
"PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
- return ret;
+ return nvec < 0 ? nvec : 0;
+}
+
+/**
+ * qib_free_irq - Cleanup INTx and MSI interrupts
+ * @dd: valid pointer to qib dev data
+ *
+ * Since cleanup for INTx and MSI interrupts is trivial, have a common
+ * routine.
+ *
+ */
+void qib_free_irq(struct qib_devdata *dd)
+{
+ pci_free_irq(dd->pcidev, 0, dd);
+ pci_free_irq_vectors(dd->pcidev);
}
/*
@@ -351,10 +340,10 @@ int qib_reinit_intr(struct qib_devdata *dd)
dd->msi_data);
ret = 1;
bail:
- if (!ret && (dd->flags & QIB_HAS_INTX)) {
- qib_enable_intx(dd);
+ qib_free_irq(dd);
+
+ if (!ret && (dd->flags & QIB_HAS_INTX))
ret = 1;
- }
/* and now set the pci master bit again */
pci_set_master(dd->pcidev);
@@ -363,56 +352,6 @@ bail:
}
/*
- * Disable msi interrupt if enabled, and clear msi_lo.
- * This is used primarily for the fallback to INTx, but
- * is also used in reinit after reset, and during cleanup.
- */
-void qib_nomsi(struct qib_devdata *dd)
-{
- dd->msi_lo = 0;
- pci_free_irq_vectors(dd->pcidev);
-}
-
-/*
- * Same as qib_nosmi, but for MSIx.
- */
-void qib_nomsix(struct qib_devdata *dd)
-{
- pci_free_irq_vectors(dd->pcidev);
-}
-
-/*
- * Similar to pci_intx(pdev, 1), except that we make sure
- * msi(x) is off.
- */
-void qib_enable_intx(struct qib_devdata *dd)
-{
- u16 cw, new;
- int pos;
- struct pci_dev *pdev = dd->pcidev;
-
- if (pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY) < 0)
- qib_dev_err(dd, "Failed to enable INTx\n");
-
- pos = pdev->msi_cap;
- if (pos) {
- /* then turn off MSI */
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
- new = cw & ~PCI_MSI_FLAGS_ENABLE;
- if (new != cw)
- pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
- }
- pos = pdev->msix_cap;
- if (pos) {
- /* then turn off MSIx */
- pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
- new = cw & ~PCI_MSIX_FLAGS_ENABLE;
- if (new != cw)
- pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
- }
-}
-
-/*
* These two routines are helper routines for the device reset code
* to move all the pcie code out of the chip-specific driver code.
*/
@@ -458,7 +397,6 @@ MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
*/
static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
{
- int r;
struct pci_dev *parent;
u16 devid;
u32 mask, bits, val;
@@ -513,7 +451,7 @@ static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
pci_read_config_dword(parent, 0x48, &val);
val &= ~mask;
val |= bits;
- r = pci_write_config_dword(parent, 0x48, val);
+ pci_write_config_dword(parent, 0x48, val);
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index e9a91736b12d..8f5754fb8579 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1869,7 +1869,7 @@ send_middle:
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto no_immediate_data;
- /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
+ /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */
case OP(SEND_LAST_WITH_IMMEDIATE):
send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 53efbb0b40c4..9a37e844d4c8 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index c72775f27212..12caf3db8c34 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -755,7 +755,6 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
int addr;
int ret;
unsigned long flags;
- const char *op;
/* Pick appropriate transaction reg and "Chip select" for this serdes */
switch (sdnum) {
@@ -775,7 +774,6 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
return -1;
}
- op = rd_notwr ? "Rd" : "Wr";
spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
owned = epb_access(dd, sdnum, 1);
@@ -1390,11 +1388,11 @@ module_param_named(relock_by_timer, qib_relock_by_timer, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
-static void qib_run_relock(unsigned long opaque)
+static void qib_run_relock(struct timer_list *t)
{
- struct qib_devdata *dd = (struct qib_devdata *)opaque;
+ struct qib_chip_specific *cs = from_timer(cs, t, relock_timer);
+ struct qib_devdata *dd = cs->dd;
struct qib_pportdata *ppd = dd->pport;
- struct qib_chip_specific *cs = dd->cspec;
int timeoff;
/*
@@ -1440,9 +1438,7 @@ void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
/* If timer has not yet been started, do so. */
if (!cs->relock_timer_active) {
cs->relock_timer_active = 1;
- init_timer(&cs->relock_timer);
- cs->relock_timer.function = qib_run_relock;
- cs->relock_timer.data = (unsigned long) dd;
+ timer_setup(&cs->relock_timer, qib_run_relock, 0);
cs->relock_interval = timeout;
cs->relock_timer.expires = jiffies + timeout;
add_timer(&cs->relock_timer);
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 891873b38a1e..c3690bd51582 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -808,7 +808,7 @@ void __qib_sdma_process_event(struct qib_pportdata *ppd,
* bringing the link up with traffic active on
* 7220, e.g. */
ss->go_s99_running = 1;
- /* fall through and start dma engine */
+ /* fall through -- and start dma engine */
case qib_sdma_event_e10_go_hw_start:
/* This reference means the state machine is started */
sdma_get(&ppd->sdma_state);
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
index eface3b3dacf..29785eb84646 100644
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ b/drivers/infiniband/hw/qib/qib_tx.c
@@ -179,8 +179,6 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
pppd[i] = NULL;
for (i = 0; i < cnt; i++) {
- int which;
-
if (!test_bit(i, mask))
continue;
/*
@@ -201,9 +199,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
(!test_bit(i << 1, dd->pioavailkernel) &&
find_ctxt(dd, i))) {
__set_bit(i, dd->pio_need_disarm);
- which = 0;
} else {
- which = 1;
dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
}
spin_unlock_irqrestore(&dd->pioavail_lock, flags);
@@ -552,9 +548,9 @@ void qib_hol_up(struct qib_pportdata *ppd)
/*
* This is only called via the timer.
*/
-void qib_hol_event(unsigned long opaque)
+void qib_hol_event(struct timer_list *t)
{
- struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+ struct qib_pportdata *ppd = from_timer(ppd, t, hol_timer);
/* If hardware error, etc, skip. */
if (!(ppd->dd->flags & QIB_INITTED))
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 498e2202e72c..bddcc37ace44 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
/*
* Start a new request.
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index be4907453ac4..15962ed193ce 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 9d92aeb8d9a1..c55000501582 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -389,9 +389,9 @@ drop:
* This is called from a timer to check for QPs
* which need kernel memory in order to send a packet.
*/
-static void mem_timer(unsigned long data)
+static void mem_timer(struct timer_list *t)
{
- struct qib_ibdev *dev = (struct qib_ibdev *) data;
+ struct qib_ibdev *dev = from_timer(dev, t, mem_timer);
struct list_head *list = &dev->memwait;
struct rvt_qp *qp = NULL;
struct qib_qp_priv *priv = NULL;
@@ -701,7 +701,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
*/
void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
{
- struct rvt_qp *qp, *nqp;
+ struct rvt_qp *qp;
struct qib_qp_priv *qpp, *nqpp;
struct rvt_qp *qps[20];
struct qib_ibdev *dev;
@@ -714,7 +714,6 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
/* Search wait list for first QP wanting DMA descriptors. */
list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
qp = qpp->owner;
- nqp = nqpp->owner;
if (qp->port_num != ppd->port)
continue;
if (n == ARRAY_SIZE(qps))
@@ -1532,7 +1531,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
init_ibport(ppd + i);
/* Only need to initialize non-zero fields. */
- setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
+ timer_setup(&dev->mem_timer, mem_timer, 0);
INIT_LIST_HEAD(&dev->piowait);
INIT_LIST_HEAD(&dev->dmawait);
diff --git a/drivers/infiniband/hw/usnic/Makefile b/drivers/infiniband/hw/usnic/Makefile
index 99fb2db47cd5..94ae7a1a6950 100644
--- a/drivers/infiniband/hw/usnic/Makefile
+++ b/drivers/infiniband/hw/usnic/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/net/ethernet/cisco/enic
obj-$(CONFIG_INFINIBAND_USNIC)+= usnic_verbs.o
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 092d4e11a633..912d8ef04352 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -392,14 +392,12 @@ int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
void *data)
{
int status = 0;
- int vnic_idx;
struct ib_event ib_event;
enum ib_qp_state old_state;
struct usnic_transport_spec *trans_spec;
struct usnic_ib_qp_grp_flow *qp_flow;
old_state = qp_grp->state;
- vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
trans_spec = (struct usnic_transport_spec *) data;
spin_lock(&qp_grp->lock);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
index b1458be1d402..a8a2314c9531 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
@@ -84,30 +84,7 @@ struct usnic_ib_qp_grp_flow {
char dentry_name[32];
};
-static const struct
-usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
- { /*USNIC_TRANSPORT_UNKNOWN*/
- .resources = {
- {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
- },
- },
- { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
- .resources = {
- {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
- },
- },
- { /*USNIC_TRANSPORT_IPV4_UDP*/
- .resources = {
- {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
- {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
- },
- },
-};
+extern const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX];
const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state);
int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 32956f9f5715..685ef2293cb8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -43,6 +43,7 @@
#include "usnic_ib_qp_grp.h"
#include "usnic_vnic.h"
#include "usnic_ib_verbs.h"
+#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
#include "usnic_ib_sysfs.h"
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index e4113ef09315..aa2456a4f9bd 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -42,6 +42,7 @@
#include "usnic_ib.h"
#include "usnic_common_util.h"
#include "usnic_ib_qp_grp.h"
+#include "usnic_ib_verbs.h"
#include "usnic_fwd.h"
#include "usnic_log.h"
#include "usnic_uiom.h"
@@ -50,6 +51,30 @@
#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
+const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
+ { /*USNIC_TRANSPORT_UNKNOWN*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+ { /*USNIC_TRANSPORT_IPV4_UDP*/
+ .resources = {
+ {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
+ {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
+ },
+ },
+};
+
static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
{
*fw_ver = *((u64 *)fw_ver_str);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Makefile b/drivers/infiniband/hw/vmw_pvrdma/Makefile
index 0194ed19f542..2f52e0a044a0 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/Makefile
+++ b/drivers/infiniband/hw/vmw_pvrdma/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o
-vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_verbs.o
+vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_srq.o pvrdma_verbs.o
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 984aa3484928..63bc2efc34eb 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -162,6 +162,22 @@ struct pvrdma_ah {
struct pvrdma_av av;
};
+struct pvrdma_srq {
+ struct ib_srq ibsrq;
+ int offset;
+ spinlock_t lock; /* SRQ lock. */
+ int wqe_cnt;
+ int wqe_size;
+ int max_gs;
+ struct ib_umem *umem;
+ struct pvrdma_ring_state *ring;
+ struct pvrdma_page_dir pdir;
+ u32 srq_handle;
+ int npages;
+ refcount_t refcnt;
+ wait_queue_head_t wait;
+};
+
struct pvrdma_qp {
struct ib_qp ibqp;
u32 qp_handle;
@@ -171,6 +187,7 @@ struct pvrdma_qp {
struct ib_umem *rumem;
struct ib_umem *sumem;
struct pvrdma_page_dir pdir;
+ struct pvrdma_srq *srq;
int npages;
int npages_send;
int npages_recv;
@@ -210,6 +227,8 @@ struct pvrdma_dev {
struct pvrdma_page_dir cq_pdir;
struct pvrdma_cq **cq_tbl;
spinlock_t cq_tbl_lock;
+ struct pvrdma_srq **srq_tbl;
+ spinlock_t srq_tbl_lock;
struct pvrdma_qp **qp_tbl;
spinlock_t qp_tbl_lock;
struct pvrdma_uar_table uar_table;
@@ -221,6 +240,7 @@ struct pvrdma_dev {
bool ib_active;
atomic_t num_qps;
atomic_t num_cqs;
+ atomic_t num_srqs;
atomic_t num_pds;
atomic_t num_ahs;
@@ -256,6 +276,11 @@ static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
return container_of(ibcq, struct pvrdma_cq, ibcq);
}
+static inline struct pvrdma_srq *to_vsrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct pvrdma_srq, ibsrq);
+}
+
static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct pvrdma_user_mr, ibmr);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index df0a6b525021..6fd5a8f4e2f6 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -339,6 +339,10 @@ enum {
PVRDMA_CMD_DESTROY_UC,
PVRDMA_CMD_CREATE_BIND,
PVRDMA_CMD_DESTROY_BIND,
+ PVRDMA_CMD_CREATE_SRQ,
+ PVRDMA_CMD_MODIFY_SRQ,
+ PVRDMA_CMD_QUERY_SRQ,
+ PVRDMA_CMD_DESTROY_SRQ,
PVRDMA_CMD_MAX,
};
@@ -361,6 +365,10 @@ enum {
PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
+ PVRDMA_CMD_CREATE_SRQ_RESP,
+ PVRDMA_CMD_MODIFY_SRQ_RESP,
+ PVRDMA_CMD_QUERY_SRQ_RESP,
+ PVRDMA_CMD_DESTROY_SRQ_RESP,
PVRDMA_CMD_MAX_RESP,
};
@@ -495,6 +503,46 @@ struct pvrdma_cmd_destroy_cq {
u8 reserved[4];
};
+struct pvrdma_cmd_create_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u64 pdir_dma;
+ u32 pd_handle;
+ u32 nchunks;
+ struct pvrdma_srq_attr attrs;
+ u8 srq_type;
+ u8 reserved[7];
+};
+
+struct pvrdma_cmd_create_srq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ u32 srqn;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_modify_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 srq_handle;
+ u32 attr_mask;
+ struct pvrdma_srq_attr attrs;
+};
+
+struct pvrdma_cmd_query_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 srq_handle;
+ u8 reserved[4];
+};
+
+struct pvrdma_cmd_query_srq_resp {
+ struct pvrdma_cmd_resp_hdr hdr;
+ struct pvrdma_srq_attr attrs;
+};
+
+struct pvrdma_cmd_destroy_srq {
+ struct pvrdma_cmd_hdr hdr;
+ u32 srq_handle;
+ u8 reserved[4];
+};
+
struct pvrdma_cmd_create_qp {
struct pvrdma_cmd_hdr hdr;
u64 pdir_dma;
@@ -594,6 +642,10 @@ union pvrdma_cmd_req {
struct pvrdma_cmd_destroy_qp destroy_qp;
struct pvrdma_cmd_create_bind create_bind;
struct pvrdma_cmd_destroy_bind destroy_bind;
+ struct pvrdma_cmd_create_srq create_srq;
+ struct pvrdma_cmd_modify_srq modify_srq;
+ struct pvrdma_cmd_query_srq query_srq;
+ struct pvrdma_cmd_destroy_srq destroy_srq;
};
union pvrdma_cmd_resp {
@@ -608,6 +660,8 @@ union pvrdma_cmd_resp {
struct pvrdma_cmd_create_qp_resp create_qp_resp;
struct pvrdma_cmd_query_qp_resp query_qp_resp;
struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
+ struct pvrdma_cmd_create_srq_resp create_srq_resp;
+ struct pvrdma_cmd_query_srq_resp query_srq_resp;
};
#endif /* __PVRDMA_DEV_API_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 6ce709a67959..1f4e18717a00 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -118,6 +118,7 @@ static int pvrdma_init_device(struct pvrdma_dev *dev)
spin_lock_init(&dev->cmd_lock);
sema_init(&dev->cmd_sema, 1);
atomic_set(&dev->num_qps, 0);
+ atomic_set(&dev->num_srqs, 0);
atomic_set(&dev->num_cqs, 0);
atomic_set(&dev->num_pds, 0);
atomic_set(&dev->num_ahs, 0);
@@ -254,9 +255,32 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
goto err_cq_free;
spin_lock_init(&dev->qp_tbl_lock);
+ /* Check if SRQ is supported by backend */
+ if (dev->dsr->caps.max_srq) {
+ dev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+
+ dev->ib_dev.create_srq = pvrdma_create_srq;
+ dev->ib_dev.modify_srq = pvrdma_modify_srq;
+ dev->ib_dev.query_srq = pvrdma_query_srq;
+ dev->ib_dev.destroy_srq = pvrdma_destroy_srq;
+ dev->ib_dev.post_srq_recv = pvrdma_post_srq_recv;
+
+ dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
+ sizeof(struct pvrdma_srq *),
+ GFP_KERNEL);
+ if (!dev->srq_tbl)
+ goto err_qp_free;
+ }
+ spin_lock_init(&dev->srq_tbl_lock);
+
ret = ib_register_device(&dev->ib_dev, NULL);
if (ret)
- goto err_qp_free;
+ goto err_srq_free;
for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
ret = device_create_file(&dev->ib_dev.dev,
@@ -271,6 +295,8 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
err_class:
ib_unregister_device(&dev->ib_dev);
+err_srq_free:
+ kfree(dev->srq_tbl);
err_qp_free:
kfree(dev->qp_tbl);
err_cq_free:
@@ -353,6 +379,35 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
}
}
+static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
+{
+ struct pvrdma_srq *srq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+ if (dev->srq_tbl)
+ srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq];
+ else
+ srq = NULL;
+ if (srq)
+ refcount_inc(&srq->refcnt);
+ spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+
+ if (srq && srq->ibsrq.event_handler) {
+ struct ib_srq *ibsrq = &srq->ibsrq;
+ struct ib_event e;
+
+ e.device = ibsrq->device;
+ e.element.srq = ibsrq;
+ e.event = type; /* 1:1 mapping for now. */
+ ibsrq->event_handler(&e, ibsrq->srq_context);
+ }
+ if (srq) {
+ if (refcount_dec_and_test(&srq->refcnt))
+ wake_up(&srq->wait);
+ }
+}
+
static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
enum ib_event_type event)
{
@@ -423,6 +478,7 @@ static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
case PVRDMA_EVENT_SRQ_ERR:
case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
+ pvrdma_srq_event(dev, eqe->info, eqe->type);
break;
case PVRDMA_EVENT_PORT_ACTIVE:
@@ -1059,6 +1115,7 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
iounmap(dev->regs);
kfree(dev->sgid_tbl);
kfree(dev->cq_tbl);
+ kfree(dev->srq_tbl);
kfree(dev->qp_tbl);
pvrdma_uar_table_cleanup(dev);
iounmap(dev->driver_uar.map);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index ed34d5a581fa..10420a18d02f 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -198,6 +198,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct pvrdma_create_qp ucmd;
unsigned long flags;
int ret;
+ bool is_srq = !!init_attr->srq;
if (init_attr->create_flags) {
dev_warn(&dev->pdev->dev,
@@ -214,6 +215,12 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
+ if (is_srq && !dev->dsr->caps.max_srq) {
+ dev_warn(&dev->pdev->dev,
+ "SRQs not supported by device\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
return ERR_PTR(-ENOMEM);
@@ -252,26 +259,36 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
goto err_qp;
}
- /* set qp->sq.wqe_cnt, shift, buf_size.. */
- qp->rumem = ib_umem_get(pd->uobject->context,
- ucmd.rbuf_addr,
- ucmd.rbuf_size, 0, 0);
- if (IS_ERR(qp->rumem)) {
- ret = PTR_ERR(qp->rumem);
- goto err_qp;
+ if (!is_srq) {
+ /* set qp->sq.wqe_cnt, shift, buf_size.. */
+ qp->rumem = ib_umem_get(pd->uobject->context,
+ ucmd.rbuf_addr,
+ ucmd.rbuf_size, 0, 0);
+ if (IS_ERR(qp->rumem)) {
+ ret = PTR_ERR(qp->rumem);
+ goto err_qp;
+ }
+ qp->srq = NULL;
+ } else {
+ qp->rumem = NULL;
+ qp->srq = to_vsrq(init_attr->srq);
}
qp->sumem = ib_umem_get(pd->uobject->context,
ucmd.sbuf_addr,
ucmd.sbuf_size, 0, 0);
if (IS_ERR(qp->sumem)) {
- ib_umem_release(qp->rumem);
+ if (!is_srq)
+ ib_umem_release(qp->rumem);
ret = PTR_ERR(qp->sumem);
goto err_qp;
}
qp->npages_send = ib_umem_page_count(qp->sumem);
- qp->npages_recv = ib_umem_page_count(qp->rumem);
+ if (!is_srq)
+ qp->npages_recv = ib_umem_page_count(qp->rumem);
+ else
+ qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
} else {
qp->is_kernel = true;
@@ -312,12 +329,14 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
if (!qp->is_kernel) {
pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
- pvrdma_page_dir_insert_umem(&qp->pdir, qp->rumem,
- qp->npages_send);
+ if (!is_srq)
+ pvrdma_page_dir_insert_umem(&qp->pdir,
+ qp->rumem,
+ qp->npages_send);
} else {
/* Ring state is always the first page. */
qp->sq.ring = qp->pdir.pages[0];
- qp->rq.ring = &qp->sq.ring[1];
+ qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1];
}
break;
default:
@@ -333,6 +352,10 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
cmd->pd_handle = to_vpd(pd)->pd_handle;
cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
+ if (is_srq)
+ cmd->srq_handle = to_vsrq(init_attr->srq)->srq_handle;
+ else
+ cmd->srq_handle = 0;
cmd->max_send_wr = init_attr->cap.max_send_wr;
cmd->max_recv_wr = init_attr->cap.max_recv_wr;
cmd->max_send_sge = init_attr->cap.max_send_sge;
@@ -340,6 +363,8 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
cmd->max_inline_data = init_attr->cap.max_inline_data;
cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
+ cmd->is_srq = is_srq;
+ cmd->lkey = 0;
cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
cmd->total_chunks = qp->npages;
cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES;
@@ -815,6 +840,12 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
return -EINVAL;
}
+ if (qp->srq) {
+ dev_warn(&dev->pdev->dev, "QP associated with SRQ\n");
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&qp->rq.lock, flags);
while (wr) {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
new file mode 100644
index 000000000000..826ccb864596
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2016-2017 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ /* No support for kernel clients. */
+ return -EOPNOTSUPP;
+}
+
+/**
+ * pvrdma_query_srq - query shared receive queue
+ * @ibsrq: the shared receive queue to query
+ * @srq_attr: attributes to query and return to client
+ *
+ * @return: 0 for success, otherwise returns an errno.
+ */
+int pvrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
+ struct pvrdma_srq *srq = to_vsrq(ibsrq);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_query_srq *cmd = &req.query_srq;
+ struct pvrdma_cmd_query_srq_resp *resp = &rsp.query_srq_resp;
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_QUERY_SRQ;
+ cmd->srq_handle = srq->srq_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_SRQ_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not query shared receive queue, error: %d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ srq_attr->srq_limit = resp->attrs.srq_limit;
+ srq_attr->max_wr = resp->attrs.max_wr;
+ srq_attr->max_sge = resp->attrs.max_sge;
+
+ return 0;
+}
+
+/**
+ * pvrdma_create_srq - create shared receive queue
+ * @pd: protection domain
+ * @init_attr: shared receive queue attributes
+ * @udata: user data
+ *
+ * @return: the ib_srq pointer on success, otherwise returns an errno.
+ */
+struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct pvrdma_srq *srq = NULL;
+ struct pvrdma_dev *dev = to_vdev(pd->device);
+ union pvrdma_cmd_req req;
+ union pvrdma_cmd_resp rsp;
+ struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
+ struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
+ struct pvrdma_create_srq ucmd;
+ unsigned long flags;
+ int ret;
+
+ if (!(pd->uobject && udata)) {
+ /* No support for kernel clients. */
+ dev_warn(&dev->pdev->dev,
+ "no shared receive queue support for kernel client\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (init_attr->srq_type != IB_SRQT_BASIC) {
+ dev_warn(&dev->pdev->dev,
+ "shared receive queue type %d not supported\n",
+ init_attr->srq_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (init_attr->attr.max_wr > dev->dsr->caps.max_srq_wr ||
+ init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
+ dev_warn(&dev->pdev->dev,
+ "shared receive queue size invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
+ return ERR_PTR(-ENOMEM);
+
+ srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ ret = -ENOMEM;
+ goto err_srq;
+ }
+
+ spin_lock_init(&srq->lock);
+ refcount_set(&srq->refcnt, 1);
+ init_waitqueue_head(&srq->wait);
+
+ dev_dbg(&dev->pdev->dev,
+ "create shared receive queue from user space\n");
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+ ret = -EFAULT;
+ goto err_srq;
+ }
+
+ srq->umem = ib_umem_get(pd->uobject->context,
+ ucmd.buf_addr,
+ ucmd.buf_size, 0, 0);
+ if (IS_ERR(srq->umem)) {
+ ret = PTR_ERR(srq->umem);
+ goto err_srq;
+ }
+
+ srq->npages = ib_umem_page_count(srq->umem);
+
+ if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ dev_warn(&dev->pdev->dev,
+ "overflow pages in shared receive queue\n");
+ ret = -EINVAL;
+ goto err_umem;
+ }
+
+ ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false);
+ if (ret) {
+ dev_warn(&dev->pdev->dev,
+ "could not allocate page directory\n");
+ goto err_umem;
+ }
+
+ pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
+ cmd->srq_type = init_attr->srq_type;
+ cmd->nchunks = srq->npages;
+ cmd->pd_handle = to_vpd(pd)->pd_handle;
+ cmd->attrs.max_wr = init_attr->attr.max_wr;
+ cmd->attrs.max_sge = init_attr->attr.max_sge;
+ cmd->attrs.srq_limit = init_attr->attr.srq_limit;
+ cmd->pdir_dma = srq->pdir.dir_dma;
+
+ ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_SRQ_RESP);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not create shared receive queue, error: %d\n",
+ ret);
+ goto err_page_dir;
+ }
+
+ srq->srq_handle = resp->srqn;
+ spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+ dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
+ spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+
+ /* Copy udata back. */
+ if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) {
+ dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
+ pvrdma_destroy_srq(&srq->ibsrq);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &srq->ibsrq;
+
+err_page_dir:
+ pvrdma_page_dir_cleanup(dev, &srq->pdir);
+err_umem:
+ ib_umem_release(srq->umem);
+err_srq:
+ kfree(srq);
+ atomic_dec(&dev->num_srqs);
+
+ return ERR_PTR(ret);
+}
+
+static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->srq_tbl_lock, flags);
+ dev->srq_tbl[srq->srq_handle] = NULL;
+ spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
+
+ refcount_dec(&srq->refcnt);
+ wait_event(srq->wait, !refcount_read(&srq->refcnt));
+
+ /* There is no support for kernel clients, so this is safe. */
+ ib_umem_release(srq->umem);
+
+ pvrdma_page_dir_cleanup(dev, &srq->pdir);
+
+ kfree(srq);
+
+ atomic_dec(&dev->num_srqs);
+}
+
+/**
+ * pvrdma_destroy_srq - destroy shared receive queue
+ * @srq: the shared receive queue to destroy
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_destroy_srq(struct ib_srq *srq)
+{
+ struct pvrdma_srq *vsrq = to_vsrq(srq);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_destroy_srq *cmd = &req.destroy_srq;
+ struct pvrdma_dev *dev = to_vdev(srq->device);
+ int ret;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_DESTROY_SRQ;
+ cmd->srq_handle = vsrq->srq_handle;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0)
+ dev_warn(&dev->pdev->dev,
+ "destroy shared receive queue failed, error: %d\n",
+ ret);
+
+ pvrdma_free_srq(dev, vsrq);
+
+ return 0;
+}
+
+/**
+ * pvrdma_modify_srq - modify shared receive queue attributes
+ * @ibsrq: the shared receive queue to modify
+ * @attr: the shared receive queue's new attributes
+ * @attr_mask: attributes mask
+ * @udata: user data
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct pvrdma_srq *vsrq = to_vsrq(ibsrq);
+ union pvrdma_cmd_req req;
+ struct pvrdma_cmd_modify_srq *cmd = &req.modify_srq;
+ struct pvrdma_dev *dev = to_vdev(ibsrq->device);
+ int ret;
+
+ /* Only support SRQ limit. */
+ if (!(attr_mask & IB_SRQ_LIMIT))
+ return -EINVAL;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.cmd = PVRDMA_CMD_MODIFY_SRQ;
+ cmd->srq_handle = vsrq->srq_handle;
+ cmd->attrs.srq_limit = attr->srq_limit;
+ cmd->attr_mask = attr_mask;
+
+ ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&dev->pdev->dev,
+ "could not modify shared receive queue, error: %d\n",
+ ret);
+
+ return -EINVAL;
+ }
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 48776f5ffb0e..16b96616ef7e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -85,6 +85,9 @@ int pvrdma_query_device(struct ib_device *ibdev,
props->max_sge = dev->dsr->caps.max_sge;
props->max_sge_rd = PVRDMA_GET_CAP(dev, dev->dsr->caps.max_sge,
dev->dsr->caps.max_sge_rd);
+ props->max_srq = dev->dsr->caps.max_srq;
+ props->max_srq_wr = dev->dsr->caps.max_srq_wr;
+ props->max_srq_sge = dev->dsr->caps.max_srq_sge;
props->max_cq = dev->dsr->caps.max_cq;
props->max_cqe = dev->dsr->caps.max_cqe;
props->max_mr = dev->dsr->caps.max_mr;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 002a9b066e70..b7b25728a7e5 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -324,6 +324,13 @@ enum pvrdma_mw_type {
PVRDMA_MW_TYPE_2 = 2,
};
+struct pvrdma_srq_attr {
+ u32 max_wr;
+ u32 max_sge;
+ u32 srq_limit;
+ u32 reserved;
+};
+
struct pvrdma_qp_attr {
enum pvrdma_qp_state qp_state;
enum pvrdma_qp_state cur_qp_state;
@@ -420,6 +427,17 @@ int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
int pvrdma_destroy_ah(struct ib_ah *ah);
+
+struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
+int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+int pvrdma_destroy_srq(struct ib_srq *srq);
+int pvrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+
struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index fdd001ce13d8..2b5513da7e83 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
depends on 64BIT
+ depends on PCI
select DMA_VIRT_OPS
---help---
This is a common software verbs provider for RDMA networks.
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index 1f12b69a0d07..b3a38c5e4cad 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -351,7 +351,7 @@ int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int last = 0;
int ret = 0;
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+ if (ibqp->qp_num <= 1)
return -EINVAL;
spin_lock_irq(&ibp->lock);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 22df09ae809e..9177df60742a 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -57,7 +57,7 @@
#include "vt.h"
#include "trace.h"
-static void rvt_rc_timeout(unsigned long arg);
+static void rvt_rc_timeout(struct timer_list *t);
/*
* Convert the AETH RNR timeout code into the number of microseconds.
@@ -238,7 +238,7 @@ int rvt_driver_qp_init(struct rvt_dev_info *rdi)
rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
rdi->qp_dev->qp_table =
- kmalloc_node(rdi->qp_dev->qp_table_size *
+ kmalloc_array_node(rdi->qp_dev->qp_table_size,
sizeof(*rdi->qp_dev->qp_table),
GFP_KERNEL, rdi->dparms.node);
if (!rdi->qp_dev->qp_table)
@@ -717,7 +717,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
/* take qp out the hash and wait for it to be unused */
rvt_remove_qp(rdi, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
/* grab the lock b/c it was locked at call time */
spin_lock_irq(&qp->r_lock);
@@ -807,6 +806,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (init_attr->port_num == 0 ||
init_attr->port_num > ibpd->device->phys_port_cnt)
return ERR_PTR(-EINVAL);
+ /* fall through */
case IB_QPT_UC:
case IB_QPT_RC:
case IB_QPT_UD:
@@ -845,7 +845,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_qp;
}
/* initialize timers needed for rc qp */
- setup_timer(&qp->s_timer, rvt_rc_timeout, (unsigned long)qp);
+ timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
qp->s_rnr_timer.function = rvt_rc_rnr_retry;
@@ -894,8 +894,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
atomic_set(&qp->refcount, 0);
atomic_set(&qp->local_ops_pending, 0);
init_waitqueue_head(&qp->wait);
- init_timer(&qp->s_timer);
- qp->s_timer.data = (unsigned long)qp;
INIT_LIST_HEAD(&qp->rspwait);
qp->state = IB_QPS_RESET;
qp->s_wq = swq;
@@ -1073,7 +1071,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
rdi->driver_f.notify_error_qp(qp);
/* Schedule the sending tasklet to drain the send work queue. */
- if (ACCESS_ONCE(qp->s_last) != qp->s_head)
+ if (READ_ONCE(qp->s_last) != qp->s_head)
rdi->driver_f.schedule_send(qp);
rvt_clear_mr_refs(qp, 0);
@@ -1443,6 +1441,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
spin_unlock(&qp->s_hlock);
spin_unlock_irq(&qp->r_lock);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
/* qpn is now available for use again */
rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
@@ -1686,7 +1685,7 @@ static inline int rvt_qp_is_avail(
if (likely(qp->s_avail))
return 0;
smp_read_barrier_depends(); /* see rc.c */
- slast = ACCESS_ONCE(qp->s_last);
+ slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
else
@@ -1917,7 +1916,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* ahead and kick the send engine into gear. Otherwise we will always
* just schedule the send to happen later.
*/
- call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
+ call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
for (; wr; wr = wr->next) {
err = rvt_post_one_wr(qp, wr, &call_send);
@@ -2132,9 +2131,9 @@ EXPORT_SYMBOL(rvt_del_timers_sync);
/**
* This is called from s_timer for missing responses.
*/
-static void rvt_rc_timeout(unsigned long arg)
+static void rvt_rc_timeout(struct timer_list *t)
{
- struct rvt_qp *qp = (struct rvt_qp *)arg;
+ struct rvt_qp *qp = from_timer(qp, t, s_timer);
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
unsigned long flags;
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
index 3f12beb7076f..66af72dca759 100644
--- a/drivers/infiniband/sw/rxe/Makefile
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RDMA_RXE) += rdma_rxe.o
rdma_rxe-y := \
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 9eb12c2e3c74..6cdc40ed8a9f 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -136,9 +136,9 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
}
}
-void retransmit_timer(unsigned long data)
+void retransmit_timer(struct timer_list *t)
{
- struct rxe_qp *qp = (struct rxe_qp *)data;
+ struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
if (qp->valid) {
qp->comp.timeout = 1;
@@ -270,8 +270,8 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
if ((syn & AETH_TYPE_MASK) != AETH_ACK)
return COMPST_ERROR;
- /* Fall through (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE
- * doesn't have an AETH)
+ /* fall through */
+ /* (IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE doesn't have an AETH)
*/
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (wqe->wr.opcode != IB_WR_RDMA_READ &&
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 77b3ed0df936..d7472a442a2c 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -218,8 +218,8 @@ static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
qp->resp.res_head = 0;
}
-void retransmit_timer(unsigned long data);
-void rnr_nak_timer(unsigned long data);
+void retransmit_timer(struct timer_list *t);
+void rnr_nak_timer(struct timer_list *t);
/* rxe_srq.c */
#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index c1b5f38f31a5..b4a8acc7bb7d 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -394,21 +394,25 @@ void *rxe_alloc(struct rxe_pool *pool)
kref_get(&pool->rxe->ref_cnt);
- if (atomic_inc_return(&pool->num_elem) > pool->max_elem) {
- atomic_dec(&pool->num_elem);
- rxe_dev_put(pool->rxe);
- rxe_pool_put(pool);
- return NULL;
- }
+ if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+ goto out_put_pool;
elem = kmem_cache_zalloc(pool_cache(pool),
(pool->flags & RXE_POOL_ATOMIC) ?
GFP_ATOMIC : GFP_KERNEL);
+ if (!elem)
+ goto out_put_pool;
elem->pool = pool;
kref_init(&elem->ref_cnt);
return elem;
+
+out_put_pool:
+ atomic_dec(&pool->num_elem);
+ rxe_dev_put(pool->rxe);
+ rxe_pool_put(pool);
+ return NULL;
}
void rxe_elem_release(struct kref *kref)
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 00bda9380a2e..4469592b839d 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -275,8 +275,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
if (init->qp_type == IB_QPT_RC) {
- setup_timer(&qp->rnr_nak_timer, rnr_nak_timer, (unsigned long)qp);
- setup_timer(&qp->retrans_timer, retransmit_timer, (unsigned long)qp);
+ timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
+ timer_setup(&qp->retrans_timer, retransmit_timer, 0);
}
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index d84222f9d5d2..26a7f923045b 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -118,9 +118,9 @@ static void req_retry(struct rxe_qp *qp)
}
}
-void rnr_nak_timer(unsigned long data)
+void rnr_nak_timer(struct timer_list *t)
{
- struct rxe_qp *qp = (struct rxe_qp *)data;
+ struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
rxe_run_task(&qp->req.task, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index ea3810b29273..08f05ac5f5d5 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -71,7 +71,7 @@ void rxe_do_task(unsigned long data)
case TASK_STATE_BUSY:
task->state = TASK_STATE_ARMED;
- /* fall through to */
+ /* fall through */
case TASK_STATE_ARMED:
spin_unlock_irqrestore(&task->state_lock, flags);
return;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 0b362f49a10a..d03002b9d84d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -644,6 +644,7 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_WRITE_WITH_IMM:
wr->ex.imm_data = ibwr->ex.imm_data;
+ /* fall through */
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
@@ -1191,6 +1192,7 @@ int rxe_register_device(struct rxe_dev *rxe)
int err;
int i;
struct ib_device *dev = &rxe->ib_dev;
+ struct crypto_shash *tfm;
strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
@@ -1288,12 +1290,13 @@ int rxe_register_device(struct rxe_dev *rxe)
dev->get_hw_stats = rxe_ib_get_hw_stats;
dev->alloc_hw_stats = rxe_ib_alloc_hw_stats;
- rxe->tfm = crypto_alloc_shash("crc32", 0, 0);
- if (IS_ERR(rxe->tfm)) {
+ tfm = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(tfm)) {
pr_err("failed to allocate crc algorithm err:%ld\n",
- PTR_ERR(rxe->tfm));
- return PTR_ERR(rxe->tfm);
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
}
+ rxe->tfm = tfm;
err = ib_register_device(dev, NULL);
if (err) {
diff --git a/drivers/infiniband/ulp/Makefile b/drivers/infiniband/ulp/Makefile
index c28af1823a2d..437813c7b481 100644
--- a/drivers/infiniband/ulp/Makefile
+++ b/drivers/infiniband/ulp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_IPOIB) += ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += srp/
obj-$(CONFIG_INFINIBAND_SRPT) += srpt/
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
index e5430dd50764..6ece857ed262 100644
--- a/drivers/infiniband/ulp/ipoib/Makefile
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o
ib_ipoib-y := ipoib_main.o \
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 4a5c7a07a631..8033a006277f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -111,7 +111,7 @@ enum {
IPOIB_MCAST_FLAG_BUSY = 2,
IPOIB_MCAST_FLAG_ATTACHED = 3,
- MAX_SEND_CQE = 16,
+ MAX_SEND_CQE = 64,
IPOIB_CM_COPYBREAK = 256,
IPOIB_NON_CHILD = 0,
@@ -331,7 +331,8 @@ struct ipoib_dev_priv {
struct net_device *dev;
- struct napi_struct napi;
+ struct napi_struct send_napi;
+ struct napi_struct recv_napi;
unsigned long flags;
@@ -381,7 +382,6 @@ struct ipoib_dev_priv {
unsigned tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_ud_wr tx_wr;
- unsigned tx_outstanding;
struct ib_wc send_wc[MAX_SEND_CQE];
struct ib_recv_wr rx_wr;
@@ -409,7 +409,6 @@ struct ipoib_dev_priv {
#endif
u64 hca_caps;
struct ipoib_ethtool_st ethtool;
- struct timer_list poll_timer;
unsigned max_send_sge;
bool sm_fullmember_sendonly_support;
const struct net_device_ops *rn_ops;
@@ -476,9 +475,10 @@ extern struct workqueue_struct *ipoib_workqueue;
/* functions */
-int ipoib_poll(struct napi_struct *napi, int budget);
-void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
-void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
+int ipoib_rx_poll(struct napi_struct *napi, int budget);
+int ipoib_tx_poll(struct napi_struct *napi, int budget);
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr);
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr);
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct rdma_ah_attr *attr);
@@ -500,7 +500,7 @@ void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
struct ipoib_dev_priv *ipoib_intf_alloc(struct ib_device *hca, u8 port,
const char *format);
-void ipoib_ib_tx_timer_func(unsigned long ctx);
+void ipoib_ib_tx_timer_func(struct timer_list *t);
void ipoib_ib_dev_flush_light(struct work_struct *work);
void ipoib_ib_dev_flush_normal(struct work_struct *work);
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7774654c2ccb..87f4bd99cdf7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -594,9 +594,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb = rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
- ipoib_dbg(priv, "cm recv error "
- "(status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ ipoib_dbg(priv,
+ "cm recv error (status=%d, wrid=%d vend_err %#x)\n",
+ wc->status, wr_id, wc->vendor_err);
++dev->stats.rx_dropped;
if (has_srq)
goto repost;
@@ -757,30 +757,35 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
return;
}
+ if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
+ ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
+ tx->qp->qp_num);
+ netif_stop_queue(dev);
+ }
+
skb_orphan(skb);
skb_dst_drop(skb);
+ if (netif_queue_stopped(dev))
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS)) {
+ ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+ napi_schedule(&priv->send_napi);
+ }
+
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
- ipoib_warn(priv, "post_send failed, error %d\n", rc);
+ ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
} else {
netif_trans_update(dev);
++tx->tx_head;
-
- if (++priv->tx_outstanding == ipoib_sendq_size) {
- ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
- tx->qp->qp_num);
- netif_stop_queue(dev);
- rc = ib_req_notify_cq(priv->send_cq,
- IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
- if (rc < 0)
- ipoib_warn(priv, "request notify on send CQ failed\n");
- else if (rc)
- ipoib_send_comp_handler(priv->send_cq, dev);
- }
+ ++priv->tx_head;
}
}
@@ -814,9 +819,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
netif_tx_lock(dev);
++tx->tx_tail;
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
- netif_queue_stopped(dev) &&
- test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+ ++priv->tx_tail;
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
@@ -829,11 +836,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
wc->status == IB_WC_RETRY_EXC_ERR)
ipoib_dbg(priv,
- "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
else
ipoib_warn(priv,
- "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
@@ -1045,7 +1052,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_init_attr attr = {
- .send_cq = priv->recv_cq,
+ .send_cq = priv->send_cq,
.recv_cq = priv->recv_cq,
.srq = priv->cm.srq,
.cap.max_send_wr = ipoib_sendq_size,
@@ -1219,9 +1226,10 @@ timeout:
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
- ++p->tx_tail;
netif_tx_lock_bh(p->dev);
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
+ ++p->tx_tail;
+ ++priv->tx_tail;
+ if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 8dc1e6225cc8..2706bf26cbac 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -99,8 +99,9 @@ static int ipoib_set_coalesce(struct net_device *dev,
coal->rx_max_coalesced_frames > 0xffff)
return -EINVAL;
- ret = ib_modify_cq(priv->recv_cq, coal->rx_max_coalesced_frames,
- coal->rx_coalesce_usecs);
+ ret = rdma_set_cq_moderation(priv->recv_cq,
+ coal->rx_max_coalesced_frames,
+ coal->rx_coalesce_usecs);
if (ret && ret != -ENOSYS) {
ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
return ret;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 6cd61638b441..3b96cdaf9a83 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -192,8 +192,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- ipoib_warn(priv, "failed recv event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ ipoib_warn(priv,
+ "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
@@ -264,7 +264,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&priv->napi, skb);
+ napi_gro_receive(&priv->recv_napi, skb);
repost:
if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -406,16 +406,17 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
- if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
- netif_queue_stopped(dev) &&
- test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
+ test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_qp_state_validate *qp_work;
- ipoib_warn(priv, "failed send event "
- "(status=%d, wrid=%d vend_err %x)\n",
+ ipoib_warn(priv,
+ "failed send event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
if (!qp_work)
@@ -430,17 +431,23 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
static int poll_tx(struct ipoib_dev_priv *priv)
{
int n, i;
+ struct ib_wc *wc;
n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
- for (i = 0; i < n; ++i)
- ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
-
+ for (i = 0; i < n; ++i) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
+ else
+ ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
+ }
return n == MAX_SEND_CQE;
}
-int ipoib_poll(struct napi_struct *napi, int budget)
+int ipoib_rx_poll(struct napi_struct *napi, int budget)
{
- struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
+ struct ipoib_dev_priv *priv =
+ container_of(napi, struct ipoib_dev_priv, recv_napi);
struct net_device *dev = priv->dev;
int done;
int t;
@@ -464,8 +471,9 @@ poll_more:
ipoib_cm_handle_rx_wc(dev, wc);
else
ipoib_ib_handle_rx_wc(dev, wc);
- } else
- ipoib_cm_handle_tx_wc(priv->dev, wc);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
}
if (n != t)
@@ -484,33 +492,47 @@ poll_more:
return done;
}
-void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
+int ipoib_tx_poll(struct napi_struct *napi, int budget)
{
- struct net_device *dev = dev_ptr;
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
+ send_napi);
+ struct net_device *dev = priv->dev;
+ int n, i;
+ struct ib_wc *wc;
- napi_schedule(&priv->napi);
-}
+poll_more:
+ n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
-static void drain_tx_cq(struct net_device *dev)
-{
- struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ for (i = 0; i < n; i++) {
+ wc = priv->send_wc + i;
+ if (wc->wr_id & IPOIB_OP_CM)
+ ipoib_cm_handle_tx_wc(dev, wc);
+ else
+ ipoib_ib_handle_tx_wc(dev, wc);
+ }
- netif_tx_lock(dev);
- while (poll_tx(priv))
- ; /* nothing */
+ if (n < budget) {
+ napi_complete(napi);
+ if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS)) &&
+ napi_reschedule(napi))
+ goto poll_more;
+ }
+ return n < 0 ? 0 : n;
+}
- if (netif_queue_stopped(dev))
- mod_timer(&priv->poll_timer, jiffies + 1);
+void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
+{
+ struct ipoib_dev_priv *priv = ctx_ptr;
- netif_tx_unlock(dev);
+ napi_schedule(&priv->recv_napi);
}
-void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
+void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
{
- struct ipoib_dev_priv *priv = ipoib_priv(dev_ptr);
+ struct ipoib_dev_priv *priv = ctx_ptr;
- mod_timer(&priv->poll_timer, jiffies);
+ napi_schedule(&priv->send_napi);
}
static inline int post_send(struct ipoib_dev_priv *priv,
@@ -611,23 +633,25 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
else
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
-
- if (++priv->tx_outstanding == ipoib_sendq_size) {
+ /* increase the tx_head after send success, but use it for queue state */
+ if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
- ipoib_warn(priv, "request notify on send CQ failed\n");
netif_stop_queue(dev);
}
skb_orphan(skb);
skb_dst_drop(skb);
+ if (netif_queue_stopped(dev))
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS))
+ ipoib_warn(priv, "request notify on send CQ failed\n");
+
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address, dqpn, tx_req, phead, hlen);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
- --priv->tx_outstanding;
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev))
@@ -639,11 +663,6 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
rc = priv->tx_head;
++priv->tx_head;
}
-
- if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
- while (poll_tx(priv))
- ; /* nothing */
-
return rc;
}
@@ -732,6 +751,22 @@ static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
new_state, qp_attr.qp_state);
}
+static void ipoib_napi_enable(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ napi_enable(&priv->recv_napi);
+ napi_enable(&priv->send_napi);
+}
+
+static void ipoib_napi_disable(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ napi_disable(&priv->recv_napi);
+ napi_disable(&priv->send_napi);
+}
+
int ipoib_ib_dev_stop_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -741,7 +776,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
int i;
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- napi_disable(&priv->napi);
+ ipoib_napi_disable(dev);
ipoib_cm_dev_stop(dev);
@@ -773,7 +808,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
- --priv->tx_outstanding;
}
for (i = 0; i < ipoib_recvq_size; ++i) {
@@ -799,7 +833,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_dbg(priv, "All sends and receives done.\n");
timeout:
- del_timer_sync(&priv->poll_timer);
qp_attr.qp_state = IB_QPS_RESET;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
@@ -821,11 +854,6 @@ int ipoib_ib_dev_stop(struct net_device *dev)
return 0;
}
-void ipoib_ib_tx_timer_func(unsigned long ctx)
-{
- drain_tx_cq((struct net_device *)ctx);
-}
-
int ipoib_ib_dev_open_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -850,7 +878,7 @@ int ipoib_ib_dev_open_default(struct net_device *dev)
}
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- napi_enable(&priv->napi);
+ ipoib_napi_enable(dev);
return 0;
out:
@@ -893,13 +921,17 @@ dev_stop:
void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ struct rdma_netdev *rn = netdev_priv(dev);
if (!(priv->pkey & 0x7fff) ||
ib_find_pkey(priv->ca, priv->port, priv->pkey,
- &priv->pkey_index))
+ &priv->pkey_index)) {
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
- else
+ } else {
+ if (rn->set_id)
+ rn->set_id(dev, priv->pkey_index);
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ }
}
void ipoib_ib_dev_up(struct net_device *dev)
@@ -961,8 +993,9 @@ void ipoib_drain_cq(struct net_device *dev)
ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
else
ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
- } else
- ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
+ } else {
+ pr_warn("%s: Got unexpected wqe id\n", __func__);
+ }
}
} while (n == IPOIB_NUM_WC);
@@ -1203,10 +1236,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
+ rtnl_lock();
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev);
- if (ipoib_ib_dev_open(dev) != 0)
+
+ result = ipoib_ib_dev_open(dev);
+ rtnl_unlock();
+ if (result)
return;
+
if (netif_queue_stopped(dev))
netif_start_queue(dev);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index dcc77014018d..12b7f911f0e5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -51,7 +51,6 @@
#include <net/addrconf.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
-#include <linux/pci.h>
#define DRV_VERSION "1.0.0"
@@ -1617,13 +1616,29 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
wait_for_completion(&priv->ntbl.deleted);
}
+static void ipoib_napi_add(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_add(dev, &priv->recv_napi, ipoib_rx_poll, IPOIB_NUM_WC);
+ netif_napi_add(dev, &priv->send_napi, ipoib_tx_poll, MAX_SEND_CQE);
+}
+
+static void ipoib_napi_del(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ netif_napi_del(&priv->recv_napi);
+ netif_napi_del(&priv->send_napi);
+}
+
static void ipoib_dev_uninit_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_transport_dev_cleanup(dev);
- netif_napi_del(&priv->napi);
+ ipoib_napi_del(dev);
ipoib_cm_dev_cleanup(dev);
@@ -1638,7 +1653,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- netif_napi_add(dev, &priv->napi, ipoib_poll, NAPI_POLL_WEIGHT);
+ ipoib_napi_add(dev);
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
@@ -1666,9 +1681,6 @@ static int ipoib_dev_init_default(struct net_device *dev)
priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
- setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
- (unsigned long)dev);
-
return 0;
out_tx_ring_cleanup:
@@ -1678,7 +1690,7 @@ out_rx_ring_cleanup:
kfree(priv->rx_ring);
out:
- netif_napi_del(&priv->napi);
+ ipoib_napi_del(dev);
return -ENOMEM;
}
@@ -2314,7 +2326,8 @@ static void ipoib_add_one(struct ib_device *device)
}
if (!count) {
- kfree(dev_list);
+ pr_err("Failed to init port, removing it\n");
+ ipoib_remove_one(device, dev_list);
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index bb64baf25309..a1ed25422b72 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -156,7 +156,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
};
struct ib_cq_init_attr cq_attr = {};
- int ret, size;
+ int ret, size, req_vec;
int i;
size = ipoib_recvq_size + 1;
@@ -171,17 +171,21 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (ret != -ENOSYS)
return -ENODEV;
+ req_vec = (priv->port - 1) * 2;
+
cq_attr.cqe = size;
- priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
- dev, &cq_attr);
+ cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors;
+ priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
+ priv, &cq_attr);
if (IS_ERR(priv->recv_cq)) {
printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
goto out_cm_dev_cleanup;
}
cq_attr.cqe = ipoib_sendq_size;
- priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
- dev, &cq_attr);
+ cq_attr.comp_vector = (req_vec + 1) % priv->ca->num_comp_vectors;
+ priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
+ priv, &cq_attr);
if (IS_ERR(priv->send_cq)) {
printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq;
@@ -208,6 +212,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
goto out_free_send_cq;
}
+ if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
+ goto out_free_send_cq;
+
for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
priv->tx_sge[i].lkey = priv->pd->local_dma_lkey;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 55a73b0ed4c6..56b7240a3fc3 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1146,7 +1146,7 @@ void iser_err_comp(struct ib_wc *wc, const char *type)
if (wc->status != IB_WC_WR_FLUSH_ERR) {
struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
- iser_err("%s failure: %s (%d) vend_err %x\n", type,
+ iser_err("%s failure: %s (%d) vend_err %#x\n", type,
ib_wc_status_msg(wc->status), wc->status,
wc->vendor_err);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index ceabdb85df8b..720dfb3a1ac2 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -788,10 +788,11 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
* the rdma cm id
*/
return 1;
- case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
+ case RDMA_CM_EVENT_REJECTED:
isert_info("Connection rejected: %s\n",
rdma_reject_msg(cma_id, event->status));
- case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
+ /* fall through */
+ case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
ret = isert_connect_error(cma_id);
break;
@@ -1569,9 +1570,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
- /*
- * Fall-through
- */
+ /* fall through */
default:
iscsit_release_cmd(cmd);
break;
@@ -1749,8 +1748,9 @@ isert_do_control_comp(struct work_struct *work)
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
iscsit_tmr_post_handler(cmd, cmd->conn);
- case ISTATE_SEND_REJECT: /* FALLTHRU */
- case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
+ /* fall through */
+ case ISTATE_SEND_REJECT:
+ case ISTATE_SEND_TEXTRSP:
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
ib_dev, false);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 87d994de8c91..d6fd248320ae 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index afa938bd26d6..4be3aef40bd2 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -139,6 +139,7 @@ void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter)
rcu_assign_pointer(adapter->mactbl, NULL);
synchronize_rcu();
opa_vnic_free_mac_tbl(mactbl);
+ adapter->info.vport.mac_tbl_digest = 0;
mutex_unlock(&adapter->mactbl_lock);
}
@@ -405,6 +406,42 @@ u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
return vl;
}
+/* opa_vnic_get_rc - return the routing control */
+static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
+ struct sk_buff *skb)
+{
+ u8 proto, rout_ctrl;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IPV6):
+ proto = ipv6_hdr(skb)->nexthdr;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV6_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV6);
+ break;
+ case htons(ETH_P_IP):
+ proto = ip_hdr(skb)->protocol;
+ if (proto == IPPROTO_TCP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_TCP);
+ else if (proto == IPPROTO_UDP)
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
+ IPV4_UDP);
+ else
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV4);
+ break;
+ default:
+ rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, DEFAULT);
+ }
+
+ return rout_ctrl;
+}
+
/* opa_vnic_calc_entropy - calculate the packet entropy */
u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
{
@@ -447,7 +484,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
{
struct __opa_veswport_info *info = &adapter->info;
struct opa_vnic_skb_mdata *mdata;
- u8 def_port, sc, entropy, *hdr;
+ u8 def_port, sc, rc, entropy, *hdr;
u16 len, l4_hdr;
u32 dlid;
@@ -458,6 +495,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
len = opa_vnic_wire_length(skb);
dlid = opa_vnic_get_dlid(adapter, skb, def_port);
sc = opa_vnic_get_sc(info, skb);
+ rc = opa_vnic_get_rc(info, skb);
l4_hdr = info->vesw.vesw_id;
mdata = skb_push(skb, sizeof(*mdata));
@@ -470,6 +508,6 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
}
opa_vnic_make_header(hdr, info->vport.encap_slid, dlid, len,
- info->vesw.pkey, entropy, sc, 0,
+ info->vesw.pkey, entropy, sc, rc,
OPA_VNIC_L4_ETHR, l4_hdr);
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
index 4c434b9dd84c..e4c9bf2ef7e2 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.h
@@ -103,6 +103,17 @@
#define OPA_VNIC_ETH_LINK_UP 1
#define OPA_VNIC_ETH_LINK_DOWN 2
+/* routing control */
+#define OPA_VNIC_ENCAP_RC_DEFAULT 0
+#define OPA_VNIC_ENCAP_RC_IPV4 4
+#define OPA_VNIC_ENCAP_RC_IPV4_UDP 8
+#define OPA_VNIC_ENCAP_RC_IPV4_TCP 12
+#define OPA_VNIC_ENCAP_RC_IPV6 16
+#define OPA_VNIC_ENCAP_RC_IPV6_TCP 20
+#define OPA_VNIC_ENCAP_RC_IPV6_UDP 24
+
+#define OPA_VNIC_ENCAP_RC_EXT(w, b) (((w) >> OPA_VNIC_ENCAP_RC_ ## b) & 0x7)
+
/**
* struct opa_vesw_info - OPA vnic switch information
* @fabric_id: 10-bit fabric id
@@ -111,8 +122,8 @@
* @pkey: partition key
* @u_mcast_dlid: unknown multicast dlid
* @u_ucast_dlid: array of unknown unicast dlids
- * @eth_mtu: MTUs for each vlan PCP
- * @eth_mtu_non_vlan: MTU for non vlan packets
+ * @rc: routing control
+ * @eth_mtu: Ethernet MTU
*/
struct opa_vesw_info {
__be16 fabric_id;
@@ -128,9 +139,10 @@ struct opa_vesw_info {
__be32 u_mcast_dlid;
__be32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
- u8 rsvd3[44];
- __be16 eth_mtu[OPA_VNIC_MAX_NUM_PCP];
- __be16 eth_mtu_non_vlan;
+ __be32 rc;
+
+ u8 rsvd3[56];
+ __be16 eth_mtu;
u8 rsvd4[2];
} __packed;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index ca29e6d5aedc..afd95f432262 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -89,9 +89,10 @@ struct __opa_vesw_info {
u32 u_mcast_dlid;
u32 u_ucast_dlid[OPA_VESW_MAX_NUM_DEF_PORT];
- u8 rsvd3[44];
- u16 eth_mtu[OPA_VNIC_MAX_NUM_PCP];
- u16 eth_mtu_non_vlan;
+ u32 rc;
+
+ u8 rsvd3[56];
+ u16 eth_mtu;
u8 rsvd4[2];
} __packed;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index 1a3c25364b64..ce57e0f10289 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -112,6 +112,27 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
return rc;
}
+static void opa_vnic_update_state(struct opa_vnic_adapter *adapter, bool up)
+{
+ struct __opa_veswport_info *info = &adapter->info;
+
+ mutex_lock(&adapter->lock);
+ /* Operational state can only be DROP_ALL or FORWARDING */
+ if ((info->vport.config_state == OPA_VNIC_STATE_FORWARDING) && up) {
+ info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_UP;
+ } else {
+ info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
+ info->vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ }
+
+ if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING)
+ netif_dormant_off(adapter->netdev);
+ else
+ netif_dormant_on(adapter->netdev);
+ mutex_unlock(&adapter->lock);
+}
+
/* opa_vnic_process_vema_config - process vema configuration updates */
void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
{
@@ -130,7 +151,7 @@ void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
memcpy(saddr.sa_data, info->vport.base_mac_addr,
ARRAY_SIZE(info->vport.base_mac_addr));
mutex_lock(&adapter->lock);
- eth_mac_addr(netdev, &saddr);
+ eth_commit_mac_addr_change(netdev, &saddr);
memcpy(adapter->vema_mac_addr,
info->vport.base_mac_addr, ETH_ALEN);
mutex_unlock(&adapter->lock);
@@ -140,7 +161,7 @@ void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
/* Handle MTU limit change */
rtnl_lock();
- netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu_non_vlan,
+ netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu,
netdev->min_mtu);
if (netdev->mtu > netdev->max_mtu)
dev_set_mtu(netdev, netdev->max_mtu);
@@ -164,14 +185,8 @@ void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
adapter->flow_tbl[i] = port_count ? port_num[i % port_count] :
OPA_VNIC_INVALID_PORT;
- /* Operational state can only be DROP_ALL or FORWARDING */
- if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING) {
- info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
- netif_dormant_off(netdev);
- } else {
- info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
- netif_dormant_on(netdev);
- }
+ /* update state */
+ opa_vnic_update_state(adapter, !!(netdev->flags & IFF_UP));
}
/*
@@ -183,6 +198,7 @@ static inline void opa_vnic_set_pod_values(struct opa_vnic_adapter *adapter)
adapter->info.vport.max_smac_ent = OPA_VNIC_MAX_SMAC_LIMIT;
adapter->info.vport.config_state = OPA_VNIC_STATE_DROP_ALL;
adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ adapter->info.vesw.eth_mtu = ETH_DATA_LEN;
}
/* opa_vnic_set_mac_addr - change mac address */
@@ -268,8 +284,8 @@ static int opa_netdev_open(struct net_device *netdev)
return rc;
}
- /* Update eth link status and send trap */
- adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_UP;
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, true);
opa_vnic_vema_report_event(adapter,
OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
return 0;
@@ -287,8 +303,8 @@ static int opa_netdev_close(struct net_device *netdev)
return rc;
}
- /* Update eth link status and send trap */
- adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
+ /* Update status and send trap */
+ opa_vnic_update_state(adapter, false);
opa_vnic_vema_report_event(adapter,
OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
return 0;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 21f0b481edcc..4b615c1451e7 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -186,6 +186,7 @@ static inline void vema_get_pod_values(struct opa_veswport_info *port_info)
cpu_to_be16(OPA_VNIC_MAX_SMAC_LIMIT);
port_info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
port_info->vport.config_state = OPA_VNIC_STATE_DROP_ALL;
+ port_info->vesw.eth_mtu = cpu_to_be16(ETH_DATA_LEN);
}
/**
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
index c2733964379c..868b5aec1537 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
@@ -176,11 +176,10 @@ void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter,
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
info->u_ucast_dlid[i] = cpu_to_be32(src->u_ucast_dlid[i]);
- memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
- for (i = 0; i < OPA_VNIC_MAX_NUM_PCP; i++)
- info->eth_mtu[i] = cpu_to_be16(src->eth_mtu[i]);
+ info->rc = cpu_to_be32(src->rc);
- info->eth_mtu_non_vlan = cpu_to_be16(src->eth_mtu_non_vlan);
+ memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
+ info->eth_mtu = cpu_to_be16(src->eth_mtu);
memcpy(info->rsvd4, src->rsvd4, ARRAY_SIZE(src->rsvd4));
}
@@ -211,11 +210,10 @@ void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter,
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
dst->u_ucast_dlid[i] = be32_to_cpu(info->u_ucast_dlid[i]);
- memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
- for (i = 0; i < OPA_VNIC_MAX_NUM_PCP; i++)
- dst->eth_mtu[i] = be16_to_cpu(info->eth_mtu[i]);
+ dst->rc = be32_to_cpu(info->rc);
- dst->eth_mtu_non_vlan = be16_to_cpu(info->eth_mtu_non_vlan);
+ memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
+ dst->eth_mtu = be16_to_cpu(info->eth_mtu);
memcpy(dst->rsvd4, info->rsvd4, ARRAY_SIZE(info->rsvd4));
}
@@ -348,7 +346,7 @@ void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter,
void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
struct opa_veswport_iface_macs *macs)
{
- u16 start_idx, tot_macs, num_macs, idx = 0, count = 0;
+ u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0;
struct netdev_hw_addr *ha;
start_idx = be16_to_cpu(macs->start_idx);
@@ -359,8 +357,10 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
/* Do not include EM specified MAC address */
if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr,
- ARRAY_SIZE(adapter->info.vport.base_mac_addr)))
+ ARRAY_SIZE(adapter->info.vport.base_mac_addr))) {
+ em_macs++;
continue;
+ }
if (start_idx > idx++)
continue;
@@ -383,7 +383,7 @@ void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
}
tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) +
- netdev_uc_count(adapter->netdev);
+ netdev_uc_count(adapter->netdev) - em_macs;
macs->tot_macs_in_lst = cpu_to_be16(tot_macs);
macs->num_macs_in_msg = cpu_to_be16(count);
macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index fa5ccdb3bb2a..972d4b3c5223 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -464,20 +464,20 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
/**
* srp_destroy_qp() - destroy an RDMA queue pair
- * @qp: RDMA queue pair.
+ * @ch: SRP RDMA channel.
*
* Drain the qp before destroying it. This avoids that the receive
* completion handler can access the queue pair while it is
* being destroyed.
*/
-static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
+static void srp_destroy_qp(struct srp_rdma_ch *ch)
{
spin_lock_irq(&ch->lock);
ib_process_cq_direct(ch->send_cq, -1);
spin_unlock_irq(&ch->lock);
- ib_drain_qp(qp);
- ib_destroy_qp(qp);
+ ib_drain_qp(ch->qp);
+ ib_destroy_qp(ch->qp);
}
static int srp_create_ch_ib(struct srp_rdma_ch *ch)
@@ -550,7 +550,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
}
if (ch->qp)
- srp_destroy_qp(ch, ch->qp);
+ srp_destroy_qp(ch);
if (ch->recv_cq)
ib_free_cq(ch->recv_cq);
if (ch->send_cq)
@@ -617,7 +617,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
ib_destroy_fmr_pool(ch->fmr_pool);
}
- srp_destroy_qp(ch, ch->qp);
+ srp_destroy_qp(ch);
ib_free_cq(ch->send_cq);
ib_free_cq(ch->recv_cq);
@@ -665,12 +665,19 @@ static void srp_path_rec_completion(int status,
static int srp_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
- int ret;
+ int ret = -ENODEV;
ch->path.numb_path = 1;
init_completion(&ch->done);
+ /*
+ * Avoid that the SCSI host can be removed by srp_remove_target()
+ * before srp_path_rec_completion() is called.
+ */
+ if (!scsi_host_get(target->scsi_host))
+ goto out;
+
ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
@@ -684,18 +691,41 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
GFP_KERNEL,
srp_path_rec_completion,
ch, &ch->path_query);
- if (ch->path_query_id < 0)
- return ch->path_query_id;
+ ret = ch->path_query_id;
+ if (ret < 0)
+ goto put;
ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0)
- return ret;
+ goto put;
- if (ch->status < 0)
+ ret = ch->status;
+ if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path record query failed\n");
- return ch->status;
+put:
+ scsi_host_put(target->scsi_host);
+
+out:
+ return ret;
+}
+
+static u8 srp_get_subnet_timeout(struct srp_host *host)
+{
+ struct ib_port_attr attr;
+ int ret;
+ u8 subnet_timeout = 18;
+
+ ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
+ if (ret == 0)
+ subnet_timeout = attr.subnet_timeout;
+
+ if (unlikely(subnet_timeout < 15))
+ pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
+ dev_name(&host->srp_dev->dev->dev), subnet_timeout);
+
+ return subnet_timeout;
}
static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
@@ -706,6 +736,9 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
struct srp_login_req priv;
} *req = NULL;
int status;
+ u8 subnet_timeout;
+
+ subnet_timeout = srp_get_subnet_timeout(target->srp_host);
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
@@ -728,8 +761,8 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
* module parameters if anyone cared about setting them.
*/
req->param.responder_resources = 4;
- req->param.remote_cm_response_timeout = 20;
- req->param.local_cm_response_timeout = 20;
+ req->param.remote_cm_response_timeout = subnet_timeout + 2;
+ req->param.local_cm_response_timeout = subnet_timeout + 2;
req->param.retry_count = target->tl_retry_count;
req->param.rnr_retry_count = 7;
req->param.max_cm_retries = 15;
@@ -1279,7 +1312,6 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pd *pd = target->pd;
struct ib_pool_fmr *fmr;
u64 io_addr = 0;
@@ -1295,9 +1327,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
if (state->npages == 0)
return 0;
- if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (state->npages == 1 && target->global_rkey) {
srp_map_desc(state, state->base_dma_addr, state->dma_len,
- pd->unsafe_global_rkey);
+ target->global_rkey);
goto reset_state;
}
@@ -1337,7 +1369,6 @@ static int srp_map_finish_fr(struct srp_map_state *state,
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
- struct ib_pd *pd = target->pd;
struct ib_send_wr *bad_wr;
struct ib_reg_wr wr;
struct srp_fr_desc *desc;
@@ -1353,12 +1384,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
- if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (sg_nents == 1 && target->global_rkey) {
unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
sg_dma_len(state->sg) - sg_offset,
- pd->unsafe_global_rkey);
+ target->global_rkey);
if (sg_offset_p)
*sg_offset_p = 0;
return 1;
@@ -1520,7 +1551,7 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
- target->pd->unsafe_global_rkey);
+ target->global_rkey);
}
return 0;
@@ -1618,7 +1649,6 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_request *req)
{
struct srp_target_port *target = ch->target;
- struct ib_pd *pd = target->pd;
struct scatterlist *scat;
struct srp_cmd *cmd = req->cmd->buf;
int len, nents, count, ret;
@@ -1654,7 +1684,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
- if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (count == 1 && target->global_rkey) {
/*
* The midlayer only generated a single gather/scatter
* entry, or DMA mapping coalesced everything to a
@@ -1664,7 +1694,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_direct_buf *buf = (void *) cmd->add_data;
buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
- buf->key = cpu_to_be32(pd->unsafe_global_rkey);
+ buf->key = cpu_to_be32(target->global_rkey);
buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
req->nmdesc = 0;
@@ -1735,14 +1765,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
memcpy(indirect_hdr->desc_list, req->indirect_desc,
count * sizeof (struct srp_direct_buf));
- if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
+ if (!target->global_rkey) {
ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
idb_len, &idb_rkey);
if (ret < 0)
goto unmap;
req->nmdesc++;
} else {
- idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
+ idb_rkey = cpu_to_be32(target->global_rkey);
}
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
@@ -2403,7 +2433,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
- sa_path_set_dlid(&ch->path, htonl(ntohs(cpi->redirect_lid)));
+ sa_path_set_dlid(&ch->path, ntohs(cpi->redirect_lid));
ch->path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
@@ -3318,8 +3348,8 @@ static ssize_t srp_create_target(struct device *dev,
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
- target->pd = host->srp_dev->pd;
target->lkey = host->srp_dev->pd->local_dma_lkey;
+ target->global_rkey = host->srp_dev->global_rkey;
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
@@ -3638,6 +3668,10 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->pd))
goto free_dev;
+ if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
+ WARN_ON_ONCE(srp_dev->global_rkey == 0);
+ }
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
host = srp_add_port(srp_dev, p);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ab9077b81d5a..a814f5ef16f9 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -90,6 +90,7 @@ struct srp_device {
struct list_head dev_list;
struct ib_device *dev;
struct ib_pd *pd;
+ u32 global_rkey;
u64 mr_page_mask;
int mr_page_size;
int mr_max_size;
@@ -179,7 +180,7 @@ struct srp_target_port {
spinlock_t lock;
/* read only in the hot path */
- struct ib_pd *pd;
+ u32 global_rkey;
struct srp_rdma_ch *ch;
u32 ch_count;
u32 lkey;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 9e8e9220f816..8a1bd354b1cc 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -80,7 +80,7 @@ module_param(srpt_srq_size, int, 0444);
MODULE_PARM_DESC(srpt_srq_size,
"Shared receive queue (SRQ) size.");
-static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
+static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
}
@@ -295,6 +295,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
{
struct srpt_device *sdev = sport->sdev;
struct ib_dm_ioc_profile *iocp;
+ int send_queue_depth;
iocp = (struct ib_dm_ioc_profile *)mad->data;
@@ -310,6 +311,12 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
return;
}
+ if (sdev->use_srq)
+ send_queue_depth = sdev->srq_size;
+ else
+ send_queue_depth = min(SRPT_RQ_SIZE,
+ sdev->device->attrs.max_qp_wr);
+
memset(iocp, 0, sizeof(*iocp));
strcpy(iocp->id_string, SRPT_ID_STRING);
iocp->guid = cpu_to_be64(srpt_service_guid);
@@ -322,7 +329,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
- iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
+ iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
iocp->rdma_read_depth = 4;
iocp->send_size = cpu_to_be32(srp_max_req_size);
iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
@@ -686,6 +693,9 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
{
int i;
+ if (!ioctx_ring)
+ return;
+
for (i = 0; i < ring_size; ++i)
srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
kfree(ioctx_ring);
@@ -757,7 +767,7 @@ static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
/**
* srpt_post_recv() - Post an IB receive request.
*/
-static int srpt_post_recv(struct srpt_device *sdev,
+static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
{
struct ib_sge list;
@@ -766,7 +776,7 @@ static int srpt_post_recv(struct srpt_device *sdev,
BUG_ON(!sdev);
list.addr = ioctx->ioctx.dma;
list.length = srp_max_req_size;
- list.lkey = sdev->pd->local_dma_lkey;
+ list.lkey = sdev->lkey;
ioctx->ioctx.cqe.done = srpt_recv_done;
wr.wr_cqe = &ioctx->ioctx.cqe;
@@ -774,7 +784,10 @@ static int srpt_post_recv(struct srpt_device *sdev,
wr.sg_list = &list;
wr.num_sge = 1;
- return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+ if (sdev->use_srq)
+ return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+ else
+ return ib_post_recv(ch->qp, &wr, &bad_wr);
}
/**
@@ -1517,7 +1530,7 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
break;
}
- srpt_post_recv(ch->sport->sdev, recv_ioctx);
+ srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
return;
out_wait:
@@ -1616,7 +1629,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct srpt_device *sdev = sport->sdev;
const struct ib_device_attr *attrs = &sdev->device->attrs;
u32 srp_sq_size = sport->port_attrib.srp_sq_size;
- int ret;
+ int i, ret;
WARN_ON(ch->rq_size < 1);
@@ -1640,7 +1653,6 @@ retry:
= (void(*)(struct ib_event *, void*))srpt_qp_event;
qp_init->send_cq = ch->cq;
qp_init->recv_cq = ch->cq;
- qp_init->srq = sdev->srq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
qp_init->qp_type = IB_QPT_RC;
/*
@@ -1650,10 +1662,16 @@ retry:
* both both, as RDMA contexts will also post completions for the
* RDMA READ case.
*/
- qp_init->cap.max_send_wr = srp_sq_size / 2;
+ qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U);
qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
+ if (sdev->use_srq) {
+ qp_init->srq = sdev->srq;
+ } else {
+ qp_init->cap.max_recv_wr = ch->rq_size;
+ qp_init->cap.max_recv_sge = qp_init->cap.max_send_sge;
+ }
ch->qp = ib_create_qp(sdev->pd, qp_init);
if (IS_ERR(ch->qp)) {
@@ -1679,6 +1697,10 @@ retry:
if (ret)
goto err_destroy_qp;
+ if (!sdev->use_srq)
+ for (i = 0; i < ch->rq_size; i++)
+ srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
+
out:
kfree(qp_init);
return ret;
@@ -1765,19 +1787,65 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-static void __srpt_close_all_ch(struct srpt_device *sdev)
+/*
+ * Send DREQ and wait for DREP. Return true if and only if this function
+ * changed the state of @ch.
+ */
+static bool srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
+ __must_hold(&sdev->mutex)
{
+ DECLARE_COMPLETION_ONSTACK(release_done);
+ struct srpt_device *sdev = ch->sport->sdev;
+ bool wait;
+
+ lockdep_assert_held(&sdev->mutex);
+
+ pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
+ ch->state);
+
+ WARN_ON(ch->release_done);
+ ch->release_done = &release_done;
+ wait = !list_empty(&ch->list);
+ srpt_disconnect_ch(ch);
+ mutex_unlock(&sdev->mutex);
+
+ if (!wait)
+ goto out;
+
+ while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
+ pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
+ ch->sess_name, ch->qp->qp_num, ch->state);
+
+out:
+ mutex_lock(&sdev->mutex);
+ return wait;
+}
+
+static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
+ __must_hold(&sdev->mutex)
+{
+ struct srpt_device *sdev = sport->sdev;
struct srpt_rdma_ch *ch;
lockdep_assert_held(&sdev->mutex);
+ if (sport->enabled == enabled)
+ return;
+ sport->enabled = enabled;
+ if (sport->enabled)
+ return;
+
+again:
list_for_each_entry(ch, &sdev->rch_list, list) {
- if (srpt_disconnect_ch(ch) >= 0)
- pr_info("Closing channel %s-%d because target %s has been disabled\n",
- ch->sess_name, ch->qp->qp_num,
- sdev->device->name);
- srpt_close_ch(ch);
+ if (ch->sport == sport) {
+ pr_info("%s: closing channel %s-%d\n",
+ sdev->device->name, ch->sess_name,
+ ch->qp->qp_num);
+ if (srpt_disconnect_ch_sync(ch))
+ goto again;
+ }
}
+
}
static void srpt_free_ch(struct kref *kref)
@@ -1818,6 +1886,10 @@ static void srpt_release_channel_work(struct work_struct *w)
ch->sport->sdev, ch->rq_size,
ch->rsp_size, DMA_TO_DEVICE);
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ sdev, ch->rq_size,
+ srp_max_req_size, DMA_FROM_DEVICE);
+
mutex_lock(&sdev->mutex);
list_del_init(&ch->list);
if (ch->release_done)
@@ -1953,10 +2025,11 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch->cm_id = cm_id;
cm_id->context = ch;
/*
- * Avoid QUEUE_FULL conditions by limiting the number of buffers used
- * for the SRP protocol to the command queue size.
+ * ch->rq_size should be at least as large as the initiator queue
+ * depth to avoid that the initiator driver has to report QUEUE_FULL
+ * to the SCSI mid-layer.
*/
- ch->rq_size = SRPT_RQ_SIZE;
+ ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list);
@@ -1974,6 +2047,19 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ch->ioctx_ring[i]->ch = ch;
list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
}
+ if (!sdev->use_srq) {
+ ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+ sizeof(*ch->ioctx_recv_ring[0]),
+ srp_max_req_size,
+ DMA_FROM_DEVICE);
+ if (!ch->ioctx_recv_ring) {
+ pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
+ rej->reason =
+ cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ goto free_ring;
+ }
+ }
ret = srpt_create_ch_ib(ch);
if (ret) {
@@ -1981,7 +2067,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because creating"
" a new RDMA channel failed.\n");
- goto free_ring;
+ goto free_recv_ring;
}
ret = srpt_ch_qp_rtr(ch, ch->qp);
@@ -2072,6 +2158,11 @@ release_channel:
destroy_ib:
srpt_destroy_ch_ib(ch);
+free_recv_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ ch->sport->sdev, ch->rq_size,
+ srp_max_req_size, DMA_FROM_DEVICE);
+
free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
@@ -2342,7 +2433,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
sge.addr = ioctx->ioctx.dma;
sge.length = resp_len;
- sge.lkey = sdev->pd->local_dma_lkey;
+ sge.lkey = sdev->lkey;
ioctx->ioctx.cqe.done = srpt_send_done;
send_wr.next = NULL;
@@ -2417,8 +2508,7 @@ static int srpt_release_sdev(struct srpt_device *sdev)
mutex_lock(&sdev->mutex);
for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
- sdev->port[i].enabled = false;
- __srpt_close_all_ch(sdev);
+ srpt_set_enabled(&sdev->port[i], false);
mutex_unlock(&sdev->mutex);
res = wait_event_interruptible(sdev->ch_releaseQ,
@@ -2465,6 +2555,74 @@ static struct se_wwn *srpt_lookup_wwn(const char *name)
return wwn;
}
+static void srpt_free_srq(struct srpt_device *sdev)
+{
+ if (!sdev->srq)
+ return;
+
+ ib_destroy_srq(sdev->srq);
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
+ sdev->srq = NULL;
+}
+
+static int srpt_alloc_srq(struct srpt_device *sdev)
+{
+ struct ib_srq_init_attr srq_attr = {
+ .event_handler = srpt_srq_event,
+ .srq_context = (void *)sdev,
+ .attr.max_wr = sdev->srq_size,
+ .attr.max_sge = 1,
+ .srq_type = IB_SRQT_BASIC,
+ };
+ struct ib_device *device = sdev->device;
+ struct ib_srq *srq;
+ int i;
+
+ WARN_ON_ONCE(sdev->srq);
+ srq = ib_create_srq(sdev->pd, &srq_attr);
+ if (IS_ERR(srq)) {
+ pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
+ return PTR_ERR(srq);
+ }
+
+ pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
+ sdev->device->attrs.max_srq_wr, device->name);
+
+ sdev->ioctx_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
+ sizeof(*sdev->ioctx_ring[0]),
+ srp_max_req_size, DMA_FROM_DEVICE);
+ if (!sdev->ioctx_ring) {
+ ib_destroy_srq(srq);
+ return -ENOMEM;
+ }
+
+ sdev->use_srq = true;
+ sdev->srq = srq;
+
+ for (i = 0; i < sdev->srq_size; ++i)
+ srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
+
+ return 0;
+}
+
+static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
+{
+ struct ib_device *device = sdev->device;
+ int ret = 0;
+
+ if (!use_srq) {
+ srpt_free_srq(sdev);
+ sdev->use_srq = false;
+ } else if (use_srq && !sdev->srq) {
+ ret = srpt_alloc_srq(sdev);
+ }
+ pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name,
+ sdev->use_srq, ret);
+ return ret;
+}
+
/**
* srpt_add_one() - Infiniband device addition callback function.
*/
@@ -2472,7 +2630,6 @@ static void srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
- struct ib_srq_init_attr srq_attr;
int i;
pr_debug("device = %p\n", device);
@@ -2490,29 +2647,18 @@ static void srpt_add_one(struct ib_device *device)
if (IS_ERR(sdev->pd))
goto free_dev;
- sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
-
- srq_attr.event_handler = srpt_srq_event;
- srq_attr.srq_context = (void *)sdev;
- srq_attr.attr.max_wr = sdev->srq_size;
- srq_attr.attr.max_sge = 1;
- srq_attr.attr.srq_limit = 0;
- srq_attr.srq_type = IB_SRQT_BASIC;
+ sdev->lkey = sdev->pd->local_dma_lkey;
- sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
- if (IS_ERR(sdev->srq))
- goto err_pd;
+ sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
- pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
- __func__, sdev->srq_size, sdev->device->attrs.max_srq_wr,
- device->name);
+ srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
if (!srpt_service_guid)
srpt_service_guid = be64_to_cpu(device->node_guid);
sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
if (IS_ERR(sdev->cm_id))
- goto err_srq;
+ goto err_ring;
/* print out target login information */
pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
@@ -2532,16 +2678,6 @@ static void srpt_add_one(struct ib_device *device)
srpt_event_handler);
ib_register_event_handler(&sdev->event_handler);
- sdev->ioctx_ring = (struct srpt_recv_ioctx **)
- srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
- sizeof(*sdev->ioctx_ring[0]),
- srp_max_req_size, DMA_FROM_DEVICE);
- if (!sdev->ioctx_ring)
- goto err_event;
-
- for (i = 0; i < sdev->srq_size; ++i)
- srpt_post_recv(sdev, sdev->ioctx_ring[i]);
-
WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
@@ -2551,12 +2687,13 @@ static void srpt_add_one(struct ib_device *device)
sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
+ sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
sdev->device->name, i);
- goto err_ring;
+ goto err_event;
}
}
@@ -2569,17 +2706,12 @@ out:
pr_debug("added %s.\n", device->name);
return;
-err_ring:
- srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
- sdev->srq_size, srp_max_req_size,
- DMA_FROM_DEVICE);
err_event:
ib_unregister_event_handler(&sdev->event_handler);
err_cm:
ib_destroy_cm_id(sdev->cm_id);
-err_srq:
- ib_destroy_srq(sdev->srq);
-err_pd:
+err_ring:
+ srpt_free_srq(sdev);
ib_dealloc_pd(sdev->pd);
free_dev:
kfree(sdev);
@@ -2622,12 +2754,10 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
spin_unlock(&srpt_dev_lock);
srpt_release_sdev(sdev);
- ib_destroy_srq(sdev->srq);
+ srpt_free_srq(sdev);
+
ib_dealloc_pd(sdev->pd);
- srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
- sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
- sdev->ioctx_ring = NULL;
kfree(sdev);
}
@@ -2706,27 +2836,12 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
*/
static void srpt_close_session(struct se_session *se_sess)
{
- DECLARE_COMPLETION_ONSTACK(release_done);
struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
struct srpt_device *sdev = ch->sport->sdev;
- bool wait;
-
- pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
- ch->state);
mutex_lock(&sdev->mutex);
- BUG_ON(ch->release_done);
- ch->release_done = &release_done;
- wait = !list_empty(&ch->list);
- srpt_disconnect_ch(ch);
+ srpt_disconnect_ch_sync(ch);
mutex_unlock(&sdev->mutex);
-
- if (!wait)
- return;
-
- while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
- pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
- ch->sess_name, ch->qp->qp_num, ch->state);
}
/**
@@ -2777,7 +2892,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
{
const char *p;
unsigned len, count, leading_zero_bytes;
- int ret, rc;
+ int ret;
p = name;
if (strncasecmp(p, "0x", 2) == 0)
@@ -2789,10 +2904,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
count = min(len / 2, 16U);
leading_zero_bytes = 16 - count;
memset(i_port_id, 0, leading_zero_bytes);
- rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
- if (rc < 0)
- pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
- ret = 0;
+ ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
+ if (ret < 0)
+ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
out:
return ret;
}
@@ -2926,14 +3040,55 @@ static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
return count;
}
+static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+
+ return sprintf(page, "%d\n", sport->port_attrib.use_srq);
+}
+
+static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
+ struct srpt_device *sdev = sport->sdev;
+ unsigned long val;
+ bool enabled;
+ int ret;
+
+ ret = kstrtoul(page, 0, &val);
+ if (ret < 0)
+ return ret;
+ if (val != !!val)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&sdev->mutex);
+ if (ret < 0)
+ return ret;
+ enabled = sport->enabled;
+ /* Log out all initiator systems before changing 'use_srq'. */
+ srpt_set_enabled(sport, false);
+ sport->port_attrib.use_srq = val;
+ srpt_use_srq(sdev, sport->port_attrib.use_srq);
+ srpt_set_enabled(sport, enabled);
+ mutex_unlock(&sdev->mutex);
+
+ return count;
+}
+
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
+CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq);
static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
&srpt_tpg_attrib_attr_srp_max_rdma_size,
&srpt_tpg_attrib_attr_srp_max_rsp_size,
&srpt_tpg_attrib_attr_srp_sq_size,
+ &srpt_tpg_attrib_attr_use_srq,
NULL,
};
@@ -2951,7 +3106,6 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
struct srpt_device *sdev = sport->sdev;
- struct srpt_rdma_ch *ch;
unsigned long tmp;
int ret;
@@ -2965,24 +3119,11 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
return -EINVAL;
}
- if (sport->enabled == tmp)
- goto out;
- sport->enabled = tmp;
- if (sport->enabled)
- goto out;
mutex_lock(&sdev->mutex);
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->sport == sport) {
- pr_debug("%s: ch %p %s-%d\n", __func__, ch,
- ch->sess_name, ch->qp->qp_num);
- srpt_disconnect_ch(ch);
- srpt_close_ch(ch);
- }
- }
+ srpt_set_enabled(sport, tmp);
mutex_unlock(&sdev->mutex);
-out:
return count;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 1b817e51b84b..673387d365a3 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -252,6 +252,7 @@ enum rdma_ch_state {
* @free_list: Head of list with free send I/O contexts.
* @state: channel state. See also enum rdma_ch_state.
* @ioctx_ring: Send ring.
+ * @ioctx_recv_ring: Receive I/O context ring.
* @list: Node for insertion in the srpt_device.rch_list list.
* @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
* list contains struct srpt_ioctx elements and is protected
@@ -281,6 +282,7 @@ struct srpt_rdma_ch {
struct list_head free_list;
enum rdma_ch_state state;
struct srpt_send_ioctx **ioctx_ring;
+ struct srpt_recv_ioctx **ioctx_recv_ring;
struct list_head list;
struct list_head cmd_wait_list;
struct se_session *sess;
@@ -295,11 +297,13 @@ struct srpt_rdma_ch {
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
+ * @use_srq: Whether or not to use SRQ.
*/
struct srpt_port_attrib {
u32 srp_max_rdma_size;
u32 srp_max_rsp_size;
u32 srp_sq_size;
+ bool use_srq;
};
/**
@@ -343,10 +347,11 @@ struct srpt_port {
* struct srpt_device - Information associated by SRPT with a single HCA.
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
- * @mr: L_Key (local key) with write access to all local memory.
+ * @lkey: L_Key (local key) with write access to all local memory.
* @srq: Per-HCA SRQ (shared receive queue).
* @cm_id: Connection identifier.
* @srq_size: SRQ size.
+ * @use_srq: Whether or not to use SRQ.
* @ioctx_ring: Per-HCA SRQ.
* @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
* @ch_releaseQ: Enables waiting for removal from rch_list.
@@ -358,9 +363,11 @@ struct srpt_port {
struct srpt_device {
struct ib_device *device;
struct ib_pd *pd;
+ u32 lkey;
struct ib_srq *srq;
struct ib_cm_id *cm_id;
int srq_size;
+ bool use_srq;
struct srpt_recv_ioctx **ioctx_ring;
struct list_head rch_list;
wait_queue_head_t ch_releaseQ;
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 595820bbabe9..40de6a7be641 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the input core drivers.
#
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index fcc6c3368182..2743ed4656e4 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -412,10 +412,10 @@ static void ml_play_effects(struct ml_device *ml)
ml_schedule_timer(ml);
}
-static void ml_effect_timer(unsigned long timer_data)
+static void ml_effect_timer(struct timer_list *t)
{
- struct input_dev *dev = (struct input_dev *)timer_data;
- struct ml_device *ml = dev->ff->private;
+ struct ml_device *ml = from_timer(ml, t, timer);
+ struct input_dev *dev = ml->dev;
unsigned long flags;
pr_debug("timer: updating effects\n");
@@ -526,7 +526,7 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
ml->private = data;
ml->play_effect = play_effect;
ml->gain = 0xffff;
- setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev);
+ timer_setup(&ml->timer, ml_effect_timer, 0);
set_bit(FF_GAIN, dev->ffbit);
diff --git a/drivers/input/gameport/Makefile b/drivers/input/gameport/Makefile
index b6f6097bd8c4..73ad8fe4db79 100644
--- a/drivers/input/gameport/Makefile
+++ b/drivers/input/gameport/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the gameport drivers.
#
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index cedc665364cd..73862a836062 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -202,9 +202,9 @@ void gameport_stop_polling(struct gameport *gameport)
}
EXPORT_SYMBOL(gameport_stop_polling);
-static void gameport_run_poll_handler(unsigned long d)
+static void gameport_run_poll_handler(struct timer_list *t)
{
- struct gameport *gameport = (struct gameport *)d;
+ struct gameport *gameport = from_timer(gameport, t, poll_timer);
gameport->poll_handler(gameport);
if (gameport->poll_cnt)
@@ -542,8 +542,7 @@ static void gameport_init_port(struct gameport *gameport)
INIT_LIST_HEAD(&gameport->node);
spin_lock_init(&gameport->timer_lock);
- setup_timer(&gameport->poll_timer, gameport_run_poll_handler,
- (unsigned long)gameport);
+ timer_setup(&gameport->poll_timer, gameport_run_poll_handler, 0);
}
/*
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 762bfb9487dc..e30642db50d5 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -76,7 +76,7 @@ static void input_start_autorepeat(struct input_dev *dev, int code)
{
if (test_bit(EV_REP, dev->evbit) &&
dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
- dev->timer.data) {
+ dev->timer.function) {
dev->repeat_key = code;
mod_timer(&dev->timer,
jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
@@ -179,9 +179,9 @@ static void input_pass_event(struct input_dev *dev,
* dev->event_lock here to avoid racing with input_event
* which may cause keys get "stuck".
*/
-static void input_repeat_key(unsigned long data)
+static void input_repeat_key(struct timer_list *t)
{
- struct input_dev *dev = (void *) data;
+ struct input_dev *dev = from_timer(dev, t, timer);
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
@@ -1784,7 +1784,7 @@ struct input_dev *input_allocate_device(void)
device_initialize(&dev->dev);
mutex_init(&dev->mutex);
spin_lock_init(&dev->event_lock);
- init_timer(&dev->timer);
+ timer_setup(&dev->timer, NULL, 0);
INIT_LIST_HEAD(&dev->h_list);
INIT_LIST_HEAD(&dev->node);
@@ -2047,7 +2047,6 @@ static void devm_input_device_unregister(struct device *dev, void *res)
*/
void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
{
- dev->timer.data = (unsigned long) dev;
dev->timer.function = input_repeat_key;
dev->rep[REP_DELAY] = delay;
dev->rep[REP_PERIOD] = period;
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index 496fd56b3f1b..67651efda2e1 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the input core drivers.
#
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index f4ad83eab67f..de0dd4756c84 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -364,9 +364,9 @@ static int db9_saturn(int mode, struct parport *port, struct input_dev *devs[])
return 0;
}
-static void db9_timer(unsigned long private)
+static void db9_timer(struct timer_list *t)
{
- struct db9 *db9 = (void *) private;
+ struct db9 *db9 = from_timer(db9, t, timer);
struct parport *port = db9->pd->port;
struct input_dev *dev = db9->dev[0];
struct input_dev *dev2 = db9->dev[1];
@@ -609,7 +609,7 @@ static void db9_attach(struct parport *pp)
db9->pd = pd;
db9->mode = mode;
db9->parportno = pp->number;
- setup_timer(&db9->timer, db9_timer, (long)db9);
+ timer_setup(&db9->timer, db9_timer, 0);
for (i = 0; i < (min(db9_mode->n_pads, DB9_MAX_DEVICES)); i++) {
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index c43f087a496d..2ffb2e8bdc3b 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -654,6 +654,7 @@ static void gc_psx_report_one(struct gc_pad *pad, unsigned char psx_type,
input_report_key(dev, BTN_THUMBL, ~data[0] & 0x04);
input_report_key(dev, BTN_THUMBR, ~data[0] & 0x02);
+ /* fall through */
case GC_PSX_NEGCON:
case GC_PSX_ANALOG:
@@ -742,9 +743,9 @@ static void gc_psx_process_packet(struct gc *gc)
* gc_timer() initiates reads of console pads data.
*/
-static void gc_timer(unsigned long private)
+static void gc_timer(struct timer_list *t)
{
- struct gc *gc = (void *) private;
+ struct gc *gc = from_timer(gc, t, timer);
/*
* N64 pads - must be read first, any read confuses them for 200 us
@@ -887,6 +888,7 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
case GC_SNES:
for (i = 4; i < 8; i++)
__set_bit(gc_snes_btn[i], input_dev->keybit);
+ /* fall through */
case GC_NES:
for (i = 0; i < 4; i++)
__set_bit(gc_snes_btn[i], input_dev->keybit);
@@ -894,6 +896,7 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type)
case GC_MULTI2:
__set_bit(BTN_THUMB, input_dev->keybit);
+ /* fall through */
case GC_MULTI:
__set_bit(BTN_TRIGGER, input_dev->keybit);
break;
@@ -971,7 +974,7 @@ static void gc_attach(struct parport *pp)
mutex_init(&gc->mutex);
gc->pd = pd;
gc->parportno = pp->number;
- setup_timer(&gc->timer, gc_timer, (long) gc);
+ timer_setup(&gc->timer, gc_timer, 0);
for (i = 0; i < n_pads && i < GC_MAX_DEVICES; i++) {
if (!pads[i])
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 4a95b224169f..5e602a6852b7 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -672,16 +672,16 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
switch (i * m) {
case 60:
- sw->number++;
+ sw->number++; /* fall through */
case 45: /* Ambiguous packet length */
if (j <= 40) { /* ID length less or eq 40 -> FSP */
case 43:
sw->type = SW_ID_FSP;
break;
}
- sw->number++;
+ sw->number++; /* fall through */
case 30:
- sw->number++;
+ sw->number++; /* fall through */
case 15:
sw->type = SW_ID_GP;
break;
@@ -697,9 +697,9 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
sw->type = SW_ID_PP;
break;
case 66:
- sw->bits = 3;
+ sw->bits = 3; /* fall through */
case 198:
- sw->length = 22;
+ sw->length = 22; /* fall through */
case 64:
sw->type = SW_ID_3DP;
if (j == 160)
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index e9712a1b7cad..bb3faeff8cac 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -162,6 +162,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
break;
}
spaceball->escape = 0;
+ /* fall through */
case 'M':
case 'Q':
case 'S':
@@ -169,6 +170,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio,
spaceball->escape = 0;
data &= 0x1f;
}
+ /* fall through */
default:
if (spaceball->escape)
spaceball->escape = 0;
@@ -234,11 +236,13 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv)
input_dev->keybit[BIT_WORD(BTN_A)] |= BIT_MASK(BTN_A) |
BIT_MASK(BTN_B) | BIT_MASK(BTN_C) |
BIT_MASK(BTN_MODE);
+ /* fall through */
default:
input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_2) |
BIT_MASK(BTN_3) | BIT_MASK(BTN_4) |
BIT_MASK(BTN_5) | BIT_MASK(BTN_6) |
BIT_MASK(BTN_7) | BIT_MASK(BTN_8);
+ /* fall through */
case SPACEBALL_3003C:
input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_1) |
BIT_MASK(BTN_8);
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index a1fdc75a438d..e2685753e460 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -89,9 +89,9 @@ static struct tgfx {
* tgfx_timer() reads and analyzes TurboGraFX joystick data.
*/
-static void tgfx_timer(unsigned long private)
+static void tgfx_timer(struct timer_list *t)
{
- struct tgfx *tgfx = (void *) private;
+ struct tgfx *tgfx = from_timer(tgfx, t, timer);
struct input_dev *dev;
int data1, data2, i;
@@ -200,7 +200,7 @@ static void tgfx_attach(struct parport *pp)
mutex_init(&tgfx->sem);
tgfx->pd = pd;
tgfx->parportno = pp->number;
- setup_timer(&tgfx->timer, tgfx_timer, (long)tgfx);
+ timer_setup(&tgfx->timer, tgfx_timer, 0);
for (i = 0; i < n_devs; i++) {
if (n_buttons[i] < 1)
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index d2338bacdad1..526e68294e6e 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the input core drivers.
#
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 39bcbc38997f..8a07a426f88e 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -127,10 +127,9 @@ static inline void bfin_kpad_clear_irq(void)
bfin_write_KPAD_ROWCOL(0xFFFF);
}
-static void bfin_kpad_timer(unsigned long data)
+static void bfin_kpad_timer(struct timer_list *t)
{
- struct platform_device *pdev = (struct platform_device *) data;
- struct bf54x_kpad *bf54x_kpad = platform_get_drvdata(pdev);
+ struct bf54x_kpad *bf54x_kpad = from_timer(bf54x_kpad, t, timer);
if (bfin_kpad_get_keypressed(bf54x_kpad)) {
/* Try again later */
@@ -298,7 +297,7 @@ static int bfin_kpad_probe(struct platform_device *pdev)
/* Init Keypad Key Up/Release test timer */
- setup_timer(&bf54x_kpad->timer, bfin_kpad_timer, (unsigned long) pdev);
+ timer_setup(&bf54x_kpad->timer, bfin_kpad_timer, 0);
bfin_write_KPAD_PRESCALE(bfin_kpad_get_prescale(TIME_SCALE));
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index e9f0ebf3267a..87e613dc33b8 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -419,9 +419,9 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void gpio_keys_irq_timer(unsigned long _data)
+static void gpio_keys_irq_timer(struct timer_list *t)
{
- struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
+ struct gpio_button_data *bdata = from_timer(bdata, t, release_timer);
struct input_dev *input = bdata->input;
unsigned long flags;
@@ -582,8 +582,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
}
bdata->release_delay = button->debounce_interval;
- setup_timer(&bdata->release_timer,
- gpio_keys_irq_timer, (unsigned long)bdata);
+ timer_setup(&bdata->release_timer, gpio_keys_irq_timer, 0);
isr = gpio_keys_irq_isr;
irqflags = 0;
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 2165f3dd328b..25d61d8d4fc4 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -184,9 +184,9 @@ static void imx_keypad_fire_events(struct imx_keypad *keypad,
/*
* imx_keypad_check_for_events is the timer handler.
*/
-static void imx_keypad_check_for_events(unsigned long data)
+static void imx_keypad_check_for_events(struct timer_list *t)
{
- struct imx_keypad *keypad = (struct imx_keypad *) data;
+ struct imx_keypad *keypad = from_timer(keypad, t, check_matrix_timer);
unsigned short matrix_volatile_state[MAX_MATRIX_KEY_COLS];
unsigned short reg_val;
bool state_changed, is_zero_matrix;
@@ -456,8 +456,8 @@ static int imx_keypad_probe(struct platform_device *pdev)
keypad->irq = irq;
keypad->stable_count = 0;
- setup_timer(&keypad->check_matrix_timer,
- imx_keypad_check_for_events, (unsigned long) keypad);
+ timer_setup(&keypad->check_matrix_timer,
+ imx_keypad_check_for_events, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/input/keyboard/locomokbd.c b/drivers/input/keyboard/locomokbd.c
index 0d74312d5b02..30d610758595 100644
--- a/drivers/input/keyboard/locomokbd.c
+++ b/drivers/input/keyboard/locomokbd.c
@@ -210,9 +210,9 @@ static irqreturn_t locomokbd_interrupt(int irq, void *dev_id)
/*
* LoCoMo timer checking for released keys
*/
-static void locomokbd_timer_callback(unsigned long data)
+static void locomokbd_timer_callback(struct timer_list *t)
{
- struct locomokbd *locomokbd = (struct locomokbd *) data;
+ struct locomokbd *locomokbd = from_timer(locomokbd, t, timer);
locomokbd_scankeyboard(locomokbd);
}
@@ -264,8 +264,7 @@ static int locomokbd_probe(struct locomo_dev *dev)
spin_lock_init(&locomokbd->lock);
- setup_timer(&locomokbd->timer, locomokbd_timer_callback,
- (unsigned long)locomokbd);
+ timer_setup(&locomokbd->timer, locomokbd_timer_callback, 0);
locomokbd->suspend_jiffies = jiffies;
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 146b26f665f6..7bd107910a6e 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -41,7 +41,7 @@
#undef NEW_BOARD_LEARNING_MODE
static void omap_kp_tasklet(unsigned long);
-static void omap_kp_timer(unsigned long);
+static void omap_kp_timer(struct timer_list *);
static unsigned char keypad_state[8];
static DEFINE_MUTEX(kp_enable_mutex);
@@ -74,7 +74,7 @@ static irqreturn_t omap_kp_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void omap_kp_timer(unsigned long data)
+static void omap_kp_timer(struct timer_list *unused)
{
tasklet_schedule(&kp_tasklet);
}
@@ -233,7 +233,7 @@ static int omap_kp_probe(struct platform_device *pdev)
col_idx = 0;
row_idx = 0;
- setup_timer(&omap_kp->timer, omap_kp_timer, (unsigned long)omap_kp);
+ timer_setup(&omap_kp->timer, omap_kp_timer, 0);
/* get the irq and init timer*/
kp_tasklet.data = (unsigned long) omap_kp;
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 7544888c4749..53c768b95939 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -45,9 +45,9 @@ struct pwrkey_drv_data {
struct input_dev *input;
};
-static void imx_imx_snvs_check_for_events(unsigned long data)
+static void imx_imx_snvs_check_for_events(struct timer_list *t)
{
- struct pwrkey_drv_data *pdata = (struct pwrkey_drv_data *) data;
+ struct pwrkey_drv_data *pdata = from_timer(pdata, t, check_timer);
struct input_dev *input = pdata->input;
u32 state;
@@ -134,8 +134,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
/* clear the unexpected interrupt before driver ready */
regmap_write(pdata->snvs, SNVS_LPSR_REG, SNVS_LPSR_SPO);
- setup_timer(&pdata->check_timer,
- imx_imx_snvs_check_for_events, (unsigned long) pdata);
+ timer_setup(&pdata->check_timer, imx_imx_snvs_check_for_events, 0);
input = devm_input_allocate_device(&pdev->dev);
if (!input) {
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index edc1385ca00b..875205f445b5 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -251,9 +251,9 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
writel(val, kbc->mmio + KBC_CONTROL_0);
}
-static void tegra_kbc_keypress_timer(unsigned long data)
+static void tegra_kbc_keypress_timer(struct timer_list *t)
{
- struct tegra_kbc *kbc = (struct tegra_kbc *)data;
+ struct tegra_kbc *kbc = from_timer(kbc, t, timer);
unsigned long flags;
u32 val;
unsigned int i;
@@ -655,7 +655,7 @@ static int tegra_kbc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
+ timer_setup(&kbc->timer, tegra_kbc_keypress_timer, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
kbc->mmio = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 03fd4262ada9..4b6118d313fe 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the input misc drivers.
#
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 2b2d02f408bb..a3e79bf5a04b 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -796,7 +796,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
if (pdata->watermark) {
ac->int_mask |= WATERMARK;
- if (!FIFO_MODE(pdata->fifo_mode))
+ if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
ac->pdata.fifo_mode |= FIFO_STREAM;
} else {
ac->int_mask |= DATA_READY;
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 2e8f801932be..a1db1e5040dc 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev)
haptic->suspended = false;
- magnitude = ACCESS_ONCE(haptic->magnitude);
+ magnitude = READ_ONCE(haptic->magnitude);
if (magnitude)
regulator_haptic_set_voltage(haptic, magnitude);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 443151de90c6..39ddd9a73feb 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -31,6 +31,7 @@
* 0.1 20/06/2002
* - first public version
*/
+#include <uapi/linux/uinput.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -38,10 +39,47 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
-#include <linux/uinput.h>
#include <linux/input/mt.h>
#include "../input-compat.h"
+#define UINPUT_NAME "uinput"
+#define UINPUT_BUFFER_SIZE 16
+#define UINPUT_NUM_REQUESTS 16
+
+enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
+
+struct uinput_request {
+ unsigned int id;
+ unsigned int code; /* UI_FF_UPLOAD, UI_FF_ERASE */
+
+ int retval;
+ struct completion done;
+
+ union {
+ unsigned int effect_id;
+ struct {
+ struct ff_effect *effect;
+ struct ff_effect *old;
+ } upload;
+ } u;
+};
+
+struct uinput_device {
+ struct input_dev *dev;
+ struct mutex mutex;
+ enum uinput_state state;
+ wait_queue_head_t waitq;
+ unsigned char ready;
+ unsigned char head;
+ unsigned char tail;
+ struct input_event buff[UINPUT_BUFFER_SIZE];
+ unsigned int ff_effects_max;
+
+ struct uinput_request *requests[UINPUT_NUM_REQUESTS];
+ wait_queue_head_t requests_waitq;
+ spinlock_t requests_lock;
+};
+
static int uinput_dev_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
@@ -149,7 +187,11 @@ static int uinput_request_submit(struct uinput_device *udev,
if (retval)
goto out;
- wait_for_completion(&request->done);
+ if (!wait_for_completion_timeout(&request->done, 30 * HZ)) {
+ retval = -ETIMEDOUT;
+ goto out;
+ }
+
retval = request->retval;
out:
@@ -320,6 +362,10 @@ static int uinput_create_device(struct uinput_device *udev)
dev->flush = uinput_dev_flush;
}
+ dev->event = uinput_dev_event;
+
+ input_set_drvdata(udev->dev, udev);
+
error = input_register_device(udev->dev);
if (error)
goto fail2;
@@ -402,18 +448,6 @@ static int uinput_validate_absbits(struct input_dev *dev)
return 0;
}
-static int uinput_allocate_device(struct uinput_device *udev)
-{
- udev->dev = input_allocate_device();
- if (!udev->dev)
- return -ENOMEM;
-
- udev->dev->event = uinput_dev_event;
- input_set_drvdata(udev->dev, udev);
-
- return 0;
-}
-
static int uinput_dev_setup(struct uinput_device *udev,
struct uinput_setup __user *arg)
{
@@ -489,9 +523,9 @@ static int uinput_setup_device_legacy(struct uinput_device *udev,
return -EINVAL;
if (!udev->dev) {
- retval = uinput_allocate_device(udev);
- if (retval)
- return retval;
+ udev->dev = input_allocate_device();
+ if (!udev->dev)
+ return -ENOMEM;
}
dev = udev->dev;
@@ -822,162 +856,163 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
return retval;
if (!udev->dev) {
- retval = uinput_allocate_device(udev);
- if (retval)
+ udev->dev = input_allocate_device();
+ if (!udev->dev) {
+ retval = -ENOMEM;
goto out;
+ }
}
switch (cmd) {
- case UI_GET_VERSION:
- if (put_user(UINPUT_VERSION,
- (unsigned int __user *)p))
- retval = -EFAULT;
- goto out;
+ case UI_GET_VERSION:
+ if (put_user(UINPUT_VERSION, (unsigned int __user *)p))
+ retval = -EFAULT;
+ goto out;
- case UI_DEV_CREATE:
- retval = uinput_create_device(udev);
- goto out;
+ case UI_DEV_CREATE:
+ retval = uinput_create_device(udev);
+ goto out;
- case UI_DEV_DESTROY:
- uinput_destroy_device(udev);
- goto out;
+ case UI_DEV_DESTROY:
+ uinput_destroy_device(udev);
+ goto out;
- case UI_DEV_SETUP:
- retval = uinput_dev_setup(udev, p);
- goto out;
+ case UI_DEV_SETUP:
+ retval = uinput_dev_setup(udev, p);
+ goto out;
- /* UI_ABS_SETUP is handled in the variable size ioctls */
+ /* UI_ABS_SETUP is handled in the variable size ioctls */
- case UI_SET_EVBIT:
- retval = uinput_set_bit(arg, evbit, EV_MAX);
- goto out;
+ case UI_SET_EVBIT:
+ retval = uinput_set_bit(arg, evbit, EV_MAX);
+ goto out;
- case UI_SET_KEYBIT:
- retval = uinput_set_bit(arg, keybit, KEY_MAX);
- goto out;
+ case UI_SET_KEYBIT:
+ retval = uinput_set_bit(arg, keybit, KEY_MAX);
+ goto out;
- case UI_SET_RELBIT:
- retval = uinput_set_bit(arg, relbit, REL_MAX);
- goto out;
+ case UI_SET_RELBIT:
+ retval = uinput_set_bit(arg, relbit, REL_MAX);
+ goto out;
- case UI_SET_ABSBIT:
- retval = uinput_set_bit(arg, absbit, ABS_MAX);
- goto out;
+ case UI_SET_ABSBIT:
+ retval = uinput_set_bit(arg, absbit, ABS_MAX);
+ goto out;
- case UI_SET_MSCBIT:
- retval = uinput_set_bit(arg, mscbit, MSC_MAX);
- goto out;
+ case UI_SET_MSCBIT:
+ retval = uinput_set_bit(arg, mscbit, MSC_MAX);
+ goto out;
- case UI_SET_LEDBIT:
- retval = uinput_set_bit(arg, ledbit, LED_MAX);
- goto out;
+ case UI_SET_LEDBIT:
+ retval = uinput_set_bit(arg, ledbit, LED_MAX);
+ goto out;
+
+ case UI_SET_SNDBIT:
+ retval = uinput_set_bit(arg, sndbit, SND_MAX);
+ goto out;
- case UI_SET_SNDBIT:
- retval = uinput_set_bit(arg, sndbit, SND_MAX);
+ case UI_SET_FFBIT:
+ retval = uinput_set_bit(arg, ffbit, FF_MAX);
+ goto out;
+
+ case UI_SET_SWBIT:
+ retval = uinput_set_bit(arg, swbit, SW_MAX);
+ goto out;
+
+ case UI_SET_PROPBIT:
+ retval = uinput_set_bit(arg, propbit, INPUT_PROP_MAX);
+ goto out;
+
+ case UI_SET_PHYS:
+ if (udev->state == UIST_CREATED) {
+ retval = -EINVAL;
goto out;
+ }
- case UI_SET_FFBIT:
- retval = uinput_set_bit(arg, ffbit, FF_MAX);
+ phys = strndup_user(p, 1024);
+ if (IS_ERR(phys)) {
+ retval = PTR_ERR(phys);
goto out;
+ }
+
+ kfree(udev->dev->phys);
+ udev->dev->phys = phys;
+ goto out;
- case UI_SET_SWBIT:
- retval = uinput_set_bit(arg, swbit, SW_MAX);
+ case UI_BEGIN_FF_UPLOAD:
+ retval = uinput_ff_upload_from_user(p, &ff_up);
+ if (retval)
goto out;
- case UI_SET_PROPBIT:
- retval = uinput_set_bit(arg, propbit, INPUT_PROP_MAX);
+ req = uinput_request_find(udev, ff_up.request_id);
+ if (!req || req->code != UI_FF_UPLOAD ||
+ !req->u.upload.effect) {
+ retval = -EINVAL;
goto out;
+ }
- case UI_SET_PHYS:
- if (udev->state == UIST_CREATED) {
- retval = -EINVAL;
- goto out;
- }
+ ff_up.retval = 0;
+ ff_up.effect = *req->u.upload.effect;
+ if (req->u.upload.old)
+ ff_up.old = *req->u.upload.old;
+ else
+ memset(&ff_up.old, 0, sizeof(struct ff_effect));
- phys = strndup_user(p, 1024);
- if (IS_ERR(phys)) {
- retval = PTR_ERR(phys);
- goto out;
- }
+ retval = uinput_ff_upload_to_user(p, &ff_up);
+ goto out;
- kfree(udev->dev->phys);
- udev->dev->phys = phys;
+ case UI_BEGIN_FF_ERASE:
+ if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
+ retval = -EFAULT;
goto out;
+ }
- case UI_BEGIN_FF_UPLOAD:
- retval = uinput_ff_upload_from_user(p, &ff_up);
- if (retval)
- goto out;
-
- req = uinput_request_find(udev, ff_up.request_id);
- if (!req || req->code != UI_FF_UPLOAD ||
- !req->u.upload.effect) {
- retval = -EINVAL;
- goto out;
- }
-
- ff_up.retval = 0;
- ff_up.effect = *req->u.upload.effect;
- if (req->u.upload.old)
- ff_up.old = *req->u.upload.old;
- else
- memset(&ff_up.old, 0, sizeof(struct ff_effect));
-
- retval = uinput_ff_upload_to_user(p, &ff_up);
+ req = uinput_request_find(udev, ff_erase.request_id);
+ if (!req || req->code != UI_FF_ERASE) {
+ retval = -EINVAL;
goto out;
+ }
- case UI_BEGIN_FF_ERASE:
- if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
- retval = -EFAULT;
- goto out;
- }
-
- req = uinput_request_find(udev, ff_erase.request_id);
- if (!req || req->code != UI_FF_ERASE) {
- retval = -EINVAL;
- goto out;
- }
-
- ff_erase.retval = 0;
- ff_erase.effect_id = req->u.effect_id;
- if (copy_to_user(p, &ff_erase, sizeof(ff_erase))) {
- retval = -EFAULT;
- goto out;
- }
-
+ ff_erase.retval = 0;
+ ff_erase.effect_id = req->u.effect_id;
+ if (copy_to_user(p, &ff_erase, sizeof(ff_erase))) {
+ retval = -EFAULT;
goto out;
+ }
- case UI_END_FF_UPLOAD:
- retval = uinput_ff_upload_from_user(p, &ff_up);
- if (retval)
- goto out;
+ goto out;
- req = uinput_request_find(udev, ff_up.request_id);
- if (!req || req->code != UI_FF_UPLOAD ||
- !req->u.upload.effect) {
- retval = -EINVAL;
- goto out;
- }
+ case UI_END_FF_UPLOAD:
+ retval = uinput_ff_upload_from_user(p, &ff_up);
+ if (retval)
+ goto out;
- req->retval = ff_up.retval;
- complete(&req->done);
+ req = uinput_request_find(udev, ff_up.request_id);
+ if (!req || req->code != UI_FF_UPLOAD ||
+ !req->u.upload.effect) {
+ retval = -EINVAL;
goto out;
+ }
- case UI_END_FF_ERASE:
- if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
- retval = -EFAULT;
- goto out;
- }
+ req->retval = ff_up.retval;
+ complete(&req->done);
+ goto out;
- req = uinput_request_find(udev, ff_erase.request_id);
- if (!req || req->code != UI_FF_ERASE) {
- retval = -EINVAL;
- goto out;
- }
+ case UI_END_FF_ERASE:
+ if (copy_from_user(&ff_erase, p, sizeof(ff_erase))) {
+ retval = -EFAULT;
+ goto out;
+ }
- req->retval = ff_erase.retval;
- complete(&req->done);
+ req = uinput_request_find(udev, ff_erase.request_id);
+ if (!req || req->code != UI_FF_ERASE) {
+ retval = -EINVAL;
goto out;
+ }
+
+ req->retval = ff_erase.retval;
+ complete(&req->done);
+ goto out;
}
size = _IOC_SIZE(cmd);
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 56bf0ad877c6..e49f08565076 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the mouse drivers.
#
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 850b00e3ad8e..579b899add26 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1587,10 +1587,10 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
return PSMOUSE_GOOD_DATA;
}
-static void alps_flush_packet(unsigned long data)
+static void alps_flush_packet(struct timer_list *t)
{
- struct psmouse *psmouse = (struct psmouse *)data;
- struct alps_data *priv = psmouse->private;
+ struct alps_data *priv = from_timer(priv, t, timer);
+ struct psmouse *psmouse = priv->psmouse;
serio_pause_rx(psmouse->ps2dev.serio);
@@ -2702,7 +2702,7 @@ static int alps_set_protocol(struct psmouse *psmouse,
{
psmouse->private = priv;
- setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
+ timer_setup(&priv->timer, alps_flush_packet, 0);
priv->proto_version = protocol->version;
priv->byte0 = protocol->byte0;
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
index b64b81599f7e..f2aabf7f906f 100644
--- a/drivers/input/mouse/byd.c
+++ b/drivers/input/mouse/byd.c
@@ -227,6 +227,7 @@
struct byd_data {
struct timer_list timer;
+ struct psmouse *psmouse;
s32 abs_x;
s32 abs_y;
typeof(jiffies) last_touch_time;
@@ -251,10 +252,10 @@ static void byd_report_input(struct psmouse *psmouse)
input_sync(dev);
}
-static void byd_clear_touch(unsigned long data)
+static void byd_clear_touch(struct timer_list *t)
{
- struct psmouse *psmouse = (struct psmouse *)data;
- struct byd_data *priv = psmouse->private;
+ struct byd_data *priv = from_timer(priv, t, timer);
+ struct psmouse *psmouse = priv->psmouse;
serio_pause_rx(psmouse->ps2dev.serio);
priv->touch = false;
@@ -478,7 +479,8 @@ int byd_init(struct psmouse *psmouse)
if (!priv)
return -ENOMEM;
- setup_timer(&priv->timer, byd_clear_touch, (unsigned long) psmouse);
+ priv->psmouse = psmouse;
+ timer_setup(&priv->timer, byd_clear_touch, 0);
psmouse->private = priv;
psmouse->disconnect = byd_disconnect;
diff --git a/drivers/input/mouse/byd.h b/drivers/input/mouse/byd.h
index d6c120cf36cd..8cb90d904186 100644
--- a/drivers/input/mouse/byd.h
+++ b/drivers/input/mouse/byd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BYD_H
#define _BYD_H
diff --git a/drivers/input/mouse/cypress_ps2.h b/drivers/input/mouse/cypress_ps2.h
index 81f68aaed7c8..1eaddd818004 100644
--- a/drivers/input/mouse/cypress_ps2.h
+++ b/drivers/input/mouse/cypress_ps2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CYPRESS_PS2_H
#define _CYPRESS_PS2_H
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 6d6b092e2da9..2111a85d0b17 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kernel.h>
@@ -1141,10 +1142,13 @@ static int elan_probe(struct i2c_client *client,
return error;
/*
- * Systems using device tree should set up interrupt via DTS,
- * the rest will use the default falling edge interrupts.
+ * Platform code (ACPI, DTS) should normally set up interrupt
+ * for us, but in case it did not let's fall back to using falling
+ * edge to be compatible with older Chromebooks.
*/
- irqflags = dev->of_node ? 0 : IRQF_TRIGGER_FALLING;
+ irqflags = irq_get_trigger_type(client->irq);
+ if (!irqflags)
+ irqflags = IRQF_TRIGGER_FALLING;
error = devm_request_threaded_irq(dev, client->irq, NULL, elan_isr,
irqflags | IRQF_ONESHOT,
@@ -1255,9 +1259,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
{ "ELAN0608", 0 },
- { "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
+ { "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index ced07391304b..a26d8be6f795 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -2,6 +2,7 @@
* Driver for simulating a mouse on GPIO lines.
*
* Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,9 +12,35 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input-polldev.h>
-#include <linux/gpio.h>
-#include <linux/gpio_mouse.h>
-
+#include <linux/gpio/consumer.h>
+#include <linux/property.h>
+#include <linux/of.h>
+
+/**
+ * struct gpio_mouse
+ * @scan_ms: the scan interval in milliseconds.
+ * @up: GPIO line for up value.
+ * @down: GPIO line for down value.
+ * @left: GPIO line for left value.
+ * @right: GPIO line for right value.
+ * @bleft: GPIO line for left button.
+ * @bmiddle: GPIO line for middle button.
+ * @bright: GPIO line for right button.
+ *
+ * This struct must be added to the platform_device in the board code.
+ * It is used by the gpio_mouse driver to setup GPIO lines and to
+ * calculate mouse movement.
+ */
+struct gpio_mouse {
+ u32 scan_ms;
+ struct gpio_desc *up;
+ struct gpio_desc *down;
+ struct gpio_desc *left;
+ struct gpio_desc *right;
+ struct gpio_desc *bleft;
+ struct gpio_desc *bmiddle;
+ struct gpio_desc *bright;
+};
/*
* Timer function which is run every scan_ms ms when the device is opened.
@@ -21,24 +48,22 @@
*/
static void gpio_mouse_scan(struct input_polled_dev *dev)
{
- struct gpio_mouse_platform_data *gpio = dev->private;
+ struct gpio_mouse *gpio = dev->private;
struct input_dev *input = dev->input;
int x, y;
- if (gpio->bleft >= 0)
+ if (gpio->bleft)
input_report_key(input, BTN_LEFT,
- gpio_get_value(gpio->bleft) ^ gpio->polarity);
- if (gpio->bmiddle >= 0)
+ gpiod_get_value(gpio->bleft));
+ if (gpio->bmiddle)
input_report_key(input, BTN_MIDDLE,
- gpio_get_value(gpio->bmiddle) ^ gpio->polarity);
- if (gpio->bright >= 0)
+ gpiod_get_value(gpio->bmiddle));
+ if (gpio->bright)
input_report_key(input, BTN_RIGHT,
- gpio_get_value(gpio->bright) ^ gpio->polarity);
+ gpiod_get_value(gpio->bright));
- x = (gpio_get_value(gpio->right) ^ gpio->polarity)
- - (gpio_get_value(gpio->left) ^ gpio->polarity);
- y = (gpio_get_value(gpio->down) ^ gpio->polarity)
- - (gpio_get_value(gpio->up) ^ gpio->polarity);
+ x = gpiod_get_value(gpio->right) - gpiod_get_value(gpio->left);
+ y = gpiod_get_value(gpio->down) - gpiod_get_value(gpio->up);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
@@ -47,65 +72,61 @@ static void gpio_mouse_scan(struct input_polled_dev *dev)
static int gpio_mouse_probe(struct platform_device *pdev)
{
- struct gpio_mouse_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct gpio_mouse *gmouse;
struct input_polled_dev *input_poll;
struct input_dev *input;
- int pin, i;
- int error;
-
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data\n");
- error = -ENXIO;
- goto out;
- }
-
- if (pdata->scan_ms < 0) {
- dev_err(&pdev->dev, "invalid scan time\n");
- error = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < GPIO_MOUSE_PIN_MAX; i++) {
- pin = pdata->pins[i];
-
- if (pin < 0) {
-
- if (i <= GPIO_MOUSE_PIN_RIGHT) {
- /* Mouse direction is required. */
- dev_err(&pdev->dev,
- "missing GPIO for directions\n");
- error = -EINVAL;
- goto out_free_gpios;
- }
-
- if (i == GPIO_MOUSE_PIN_BLEFT)
- dev_dbg(&pdev->dev, "no left button defined\n");
-
- } else {
- error = gpio_request(pin, "gpio_mouse");
- if (error) {
- dev_err(&pdev->dev, "fail %d pin (%d idx)\n",
- pin, i);
- goto out_free_gpios;
- }
-
- gpio_direction_input(pin);
- }
+ int ret;
+
+ gmouse = devm_kzalloc(dev, sizeof(*gmouse), GFP_KERNEL);
+ if (!gmouse)
+ return -ENOMEM;
+
+ /* Assign some default scanning time */
+ ret = device_property_read_u32(dev, "scan-interval-ms",
+ &gmouse->scan_ms);
+ if (ret || gmouse->scan_ms == 0) {
+ dev_warn(dev, "invalid scan time, set to 50 ms\n");
+ gmouse->scan_ms = 50;
}
- input_poll = input_allocate_polled_device();
+ gmouse->up = devm_gpiod_get(dev, "up", GPIOD_IN);
+ if (IS_ERR(gmouse->up))
+ return PTR_ERR(gmouse->up);
+ gmouse->down = devm_gpiod_get(dev, "down", GPIOD_IN);
+ if (IS_ERR(gmouse->down))
+ return PTR_ERR(gmouse->down);
+ gmouse->left = devm_gpiod_get(dev, "left", GPIOD_IN);
+ if (IS_ERR(gmouse->left))
+ return PTR_ERR(gmouse->left);
+ gmouse->right = devm_gpiod_get(dev, "right", GPIOD_IN);
+ if (IS_ERR(gmouse->right))
+ return PTR_ERR(gmouse->right);
+
+ gmouse->bleft = devm_gpiod_get_optional(dev, "button-left", GPIOD_IN);
+ if (IS_ERR(gmouse->bleft))
+ return PTR_ERR(gmouse->bleft);
+ gmouse->bmiddle = devm_gpiod_get_optional(dev, "button-middle",
+ GPIOD_IN);
+ if (IS_ERR(gmouse->bmiddle))
+ return PTR_ERR(gmouse->bmiddle);
+ gmouse->bright = devm_gpiod_get_optional(dev, "button-right",
+ GPIOD_IN);
+ if (IS_ERR(gmouse->bright))
+ return PTR_ERR(gmouse->bright);
+
+ input_poll = devm_input_allocate_polled_device(dev);
if (!input_poll) {
- dev_err(&pdev->dev, "not enough memory for input device\n");
- error = -ENOMEM;
- goto out_free_gpios;
+ dev_err(dev, "not enough memory for input device\n");
+ return -ENOMEM;
}
platform_set_drvdata(pdev, input_poll);
/* set input-polldev handlers */
- input_poll->private = pdata;
+ input_poll->private = gmouse;
input_poll->poll = gpio_mouse_scan;
- input_poll->poll_interval = pdata->scan_ms;
+ input_poll->poll_interval = gmouse->scan_ms;
input = input_poll->input;
input->name = pdev->name;
@@ -114,63 +135,39 @@ static int gpio_mouse_probe(struct platform_device *pdev)
input_set_capability(input, EV_REL, REL_X);
input_set_capability(input, EV_REL, REL_Y);
- if (pdata->bleft >= 0)
+ if (gmouse->bleft)
input_set_capability(input, EV_KEY, BTN_LEFT);
- if (pdata->bmiddle >= 0)
+ if (gmouse->bmiddle)
input_set_capability(input, EV_KEY, BTN_MIDDLE);
- if (pdata->bright >= 0)
+ if (gmouse->bright)
input_set_capability(input, EV_KEY, BTN_RIGHT);
- error = input_register_polled_device(input_poll);
- if (error) {
- dev_err(&pdev->dev, "could not register input device\n");
- goto out_free_polldev;
+ ret = input_register_polled_device(input_poll);
+ if (ret) {
+ dev_err(dev, "could not register input device\n");
+ return ret;
}
- dev_dbg(&pdev->dev, "%d ms scan time, buttons: %s%s%s\n",
- pdata->scan_ms,
- pdata->bleft < 0 ? "" : "left ",
- pdata->bmiddle < 0 ? "" : "middle ",
- pdata->bright < 0 ? "" : "right");
+ dev_dbg(dev, "%d ms scan time, buttons: %s%s%s\n",
+ gmouse->scan_ms,
+ gmouse->bleft ? "" : "left ",
+ gmouse->bmiddle ? "" : "middle ",
+ gmouse->bright ? "" : "right");
return 0;
-
- out_free_polldev:
- input_free_polled_device(input_poll);
-
- out_free_gpios:
- while (--i >= 0) {
- pin = pdata->pins[i];
- if (pin)
- gpio_free(pin);
- }
- out:
- return error;
}
-static int gpio_mouse_remove(struct platform_device *pdev)
-{
- struct input_polled_dev *input = platform_get_drvdata(pdev);
- struct gpio_mouse_platform_data *pdata = input->private;
- int pin, i;
-
- input_unregister_polled_device(input);
- input_free_polled_device(input);
-
- for (i = 0; i < GPIO_MOUSE_PIN_MAX; i++) {
- pin = pdata->pins[i];
- if (pin >= 0)
- gpio_free(pin);
- }
-
- return 0;
-}
+static const struct of_device_id gpio_mouse_of_match[] = {
+ { .compatible = "gpio-mouse", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpio_mouse_of_match);
static struct platform_driver gpio_mouse_device_driver = {
.probe = gpio_mouse_probe,
- .remove = gpio_mouse_remove,
.driver = {
.name = "gpio_mouse",
+ .of_match_table = gpio_mouse_of_match,
}
};
module_platform_driver(gpio_mouse_device_driver);
@@ -179,4 +176,3 @@ MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("GPIO mouse driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:gpio_mouse"); /* work with hotplug and coldplug */
-
diff --git a/drivers/input/mouse/hgpk.h b/drivers/input/mouse/hgpk.h
index dd686771cfe0..98b7b384229b 100644
--- a/drivers/input/mouse/hgpk.h
+++ b/drivers/input/mouse/hgpk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OLPC HGPK (XO-1) touchpad PS/2 mouse driver
*/
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 38855e425f01..8cd453808cc7 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PSMOUSE_H
#define _PSMOUSE_H
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index 0f586780ceb4..1ae5c1ef3f5b 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -316,11 +316,9 @@ static int vmmouse_enable(struct psmouse *psmouse)
/*
* Array of supported hypervisors.
*/
-static const struct hypervisor_x86 *vmmouse_supported_hypervisors[] = {
- &x86_hyper_vmware,
-#ifdef CONFIG_KVM_GUEST
- &x86_hyper_kvm,
-#endif
+static enum x86_hypervisor_type vmmouse_supported_hypervisors[] = {
+ X86_HYPER_VMWARE,
+ X86_HYPER_KVM,
};
/**
@@ -331,7 +329,7 @@ static bool vmmouse_check_hypervisor(void)
int i;
for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++)
- if (vmmouse_supported_hypervisors[i] == x86_hyper)
+ if (vmmouse_supported_hypervisors[i] == x86_hyper_type)
return true;
return false;
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index 9aaac3dd8613..f17631656987 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RMI4_CORE) += rmi_core.o
rmi_core-y := rmi_bus.o rmi_driver.o rmi_f01.o
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index f5206e2c767e..5343f2c08f15 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -73,7 +73,7 @@ enum rmi_f54_report_type {
F54_MAX_REPORT_TYPE,
};
-const char *rmi_f54_report_type_names[] = {
+static const char * const rmi_f54_report_type_names[] = {
[F54_REPORT_NONE] = "Unknown",
[F54_8BIT_IMAGE] = "Normalized 8-Bit Image",
[F54_16BIT_IMAGE] = "Normalized 16-Bit Image",
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index 225025a0940c..b6ccf39c6a7b 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -312,7 +312,7 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_smb->xport.dev = &client->dev;
rmi_smb->xport.pdata = *pdata;
rmi_smb->xport.pdata.irq = client->irq;
- rmi_smb->xport.proto_name = "smb2";
+ rmi_smb->xport.proto_name = "smb";
rmi_smb->xport.ops = &rmi_smb_ops;
smbus_version = rmi_smb_get_version(rmi_smb);
@@ -322,7 +322,7 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
smbus_version);
- if (smbus_version != 2) {
+ if (smbus_version != 2 && smbus_version != 3) {
dev_err(&client->dev, "Unrecognized SMB version %d\n",
smbus_version);
return -ENODEV;
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 767bd9b6e1ed..a3ca07621542 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the input core drivers.
#
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index 65605e4ef3cf..d66d01c5373b 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -784,7 +784,7 @@ static void hil_mlcs_process(unsigned long unused)
/************************* Keepalive timer task *********************/
-static void hil_mlcs_timer(unsigned long data)
+static void hil_mlcs_timer(struct timer_list *unused)
{
hil_mlcs_probe = 1;
tasklet_schedule(&hil_mlcs_tasklet);
@@ -998,7 +998,7 @@ int hil_mlc_unregister(hil_mlc *mlc)
static int __init hil_mlc_init(void)
{
- setup_timer(&hil_mlcs_kicker, &hil_mlcs_timer, 0);
+ timer_setup(&hil_mlcs_kicker, &hil_mlcs_timer, 0);
mod_timer(&hil_mlcs_kicker, jiffies + HZ);
tasklet_enable(&hil_mlcs_tasklet);
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 8eef6849d066..1d7c7d81a5ef 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -794,7 +794,7 @@ int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback)
/************************* Keepalive timer task *********************/
-static void hp_sdc_kicker(unsigned long data)
+static void hp_sdc_kicker(struct timer_list *unused)
{
tasklet_schedule(&hp_sdc.task);
/* Re-insert the periodic task. */
@@ -909,9 +909,8 @@ static int __init hp_sdc_init(void)
down(&s_sync); /* Wait for t_sync to complete */
/* Create the keepalive task */
- init_timer(&hp_sdc.kicker);
+ timer_setup(&hp_sdc.kicker, hp_sdc_kicker, 0);
hp_sdc.kicker.expires = jiffies + HZ;
- hp_sdc.kicker.function = &hp_sdc_kicker;
add_timer(&hp_sdc.kicker);
hp_sdc.dev_err = 0;
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 6231d63860ee..796289846204 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _I8042_SPARCIO_H
#define _I8042_SPARCIO_H
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index b50e3817f3c4..c62cceb97bb1 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -366,6 +366,7 @@ static int ps2_gpio_probe(struct platform_device *pdev)
gpiod_cansleep(drvdata->gpio_clk)) {
dev_err(dev, "GPIO data or clk are connected via slow bus\n");
error = -EINVAL;
+ goto err_free_serio;
}
drvdata->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index b3e688911fd9..f9e5c793f4f0 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -47,6 +47,8 @@ struct ps2if {
struct serio *io;
struct sa1111_dev *dev;
void __iomem *base;
+ int rx_irq;
+ int tx_irq;
unsigned int open;
spinlock_t lock;
unsigned int head;
@@ -64,22 +66,22 @@ static irqreturn_t ps2_rxint(int irq, void *dev_id)
struct ps2if *ps2if = dev_id;
unsigned int scancode, flag, status;
- status = sa1111_readl(ps2if->base + PS2STAT);
+ status = readl_relaxed(ps2if->base + PS2STAT);
while (status & PS2STAT_RXF) {
if (status & PS2STAT_STP)
- sa1111_writel(PS2STAT_STP, ps2if->base + PS2STAT);
+ writel_relaxed(PS2STAT_STP, ps2if->base + PS2STAT);
flag = (status & PS2STAT_STP ? SERIO_FRAME : 0) |
(status & PS2STAT_RXP ? 0 : SERIO_PARITY);
- scancode = sa1111_readl(ps2if->base + PS2DATA) & 0xff;
+ scancode = readl_relaxed(ps2if->base + PS2DATA) & 0xff;
if (hweight8(scancode) & 1)
flag ^= SERIO_PARITY;
serio_interrupt(ps2if->io, scancode, flag);
- status = sa1111_readl(ps2if->base + PS2STAT);
+ status = readl_relaxed(ps2if->base + PS2STAT);
}
return IRQ_HANDLED;
@@ -94,12 +96,12 @@ static irqreturn_t ps2_txint(int irq, void *dev_id)
unsigned int status;
spin_lock(&ps2if->lock);
- status = sa1111_readl(ps2if->base + PS2STAT);
+ status = readl_relaxed(ps2if->base + PS2STAT);
if (ps2if->head == ps2if->tail) {
disable_irq_nosync(irq);
/* done */
} else if (status & PS2STAT_TXE) {
- sa1111_writel(ps2if->buf[ps2if->tail], ps2if->base + PS2DATA);
+ writel_relaxed(ps2if->buf[ps2if->tail], ps2if->base + PS2DATA);
ps2if->tail = (ps2if->tail + 1) & (sizeof(ps2if->buf) - 1);
}
spin_unlock(&ps2if->lock);
@@ -122,11 +124,11 @@ static int ps2_write(struct serio *io, unsigned char val)
/*
* If the TX register is empty, we can go straight out.
*/
- if (sa1111_readl(ps2if->base + PS2STAT) & PS2STAT_TXE) {
- sa1111_writel(val, ps2if->base + PS2DATA);
+ if (readl_relaxed(ps2if->base + PS2STAT) & PS2STAT_TXE) {
+ writel_relaxed(val, ps2if->base + PS2DATA);
} else {
if (ps2if->head == ps2if->tail)
- enable_irq(ps2if->dev->irq[1]);
+ enable_irq(ps2if->tx_irq);
head = (ps2if->head + 1) & (sizeof(ps2if->buf) - 1);
if (head != ps2if->tail) {
ps2if->buf[ps2if->head] = val;
@@ -147,30 +149,30 @@ static int ps2_open(struct serio *io)
if (ret)
return ret;
- ret = request_irq(ps2if->dev->irq[0], ps2_rxint, 0,
+ ret = request_irq(ps2if->rx_irq, ps2_rxint, 0,
SA1111_DRIVER_NAME(ps2if->dev), ps2if);
if (ret) {
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
- ps2if->dev->irq[0], ret);
+ ps2if->rx_irq, ret);
sa1111_disable_device(ps2if->dev);
return ret;
}
- ret = request_irq(ps2if->dev->irq[1], ps2_txint, 0,
+ ret = request_irq(ps2if->tx_irq, ps2_txint, 0,
SA1111_DRIVER_NAME(ps2if->dev), ps2if);
if (ret) {
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
- ps2if->dev->irq[1], ret);
- free_irq(ps2if->dev->irq[0], ps2if);
+ ps2if->tx_irq, ret);
+ free_irq(ps2if->rx_irq, ps2if);
sa1111_disable_device(ps2if->dev);
return ret;
}
ps2if->open = 1;
- enable_irq_wake(ps2if->dev->irq[0]);
+ enable_irq_wake(ps2if->rx_irq);
- sa1111_writel(PS2CR_ENA, ps2if->base + PS2CR);
+ writel_relaxed(PS2CR_ENA, ps2if->base + PS2CR);
return 0;
}
@@ -178,14 +180,14 @@ static void ps2_close(struct serio *io)
{
struct ps2if *ps2if = io->port_data;
- sa1111_writel(0, ps2if->base + PS2CR);
+ writel_relaxed(0, ps2if->base + PS2CR);
- disable_irq_wake(ps2if->dev->irq[0]);
+ disable_irq_wake(ps2if->rx_irq);
ps2if->open = 0;
- free_irq(ps2if->dev->irq[1], ps2if);
- free_irq(ps2if->dev->irq[0], ps2if);
+ free_irq(ps2if->tx_irq, ps2if);
+ free_irq(ps2if->rx_irq, ps2if);
sa1111_disable_device(ps2if->dev);
}
@@ -198,7 +200,7 @@ static void ps2_clear_input(struct ps2if *ps2if)
int maxread = 100;
while (maxread--) {
- if ((sa1111_readl(ps2if->base + PS2DATA) & 0xff) == 0xff)
+ if ((readl_relaxed(ps2if->base + PS2DATA) & 0xff) == 0xff)
break;
}
}
@@ -208,11 +210,11 @@ static unsigned int ps2_test_one(struct ps2if *ps2if,
{
unsigned int val;
- sa1111_writel(PS2CR_ENA | mask, ps2if->base + PS2CR);
+ writel_relaxed(PS2CR_ENA | mask, ps2if->base + PS2CR);
- udelay(2);
+ udelay(10);
- val = sa1111_readl(ps2if->base + PS2STAT);
+ val = readl_relaxed(ps2if->base + PS2STAT);
return val & (PS2STAT_KBC | PS2STAT_KBD);
}
@@ -243,7 +245,7 @@ static int ps2_test(struct ps2if *ps2if)
ret = -ENODEV;
}
- sa1111_writel(0, ps2if->base + PS2CR);
+ writel_relaxed(0, ps2if->base + PS2CR);
return ret;
}
@@ -264,7 +266,6 @@ static int ps2_probe(struct sa1111_dev *dev)
goto free;
}
-
serio->id.type = SERIO_8042;
serio->write = ps2_write;
serio->open = ps2_open;
@@ -279,6 +280,18 @@ static int ps2_probe(struct sa1111_dev *dev)
spin_lock_init(&ps2if->lock);
+ ps2if->rx_irq = sa1111_get_irq(dev, 0);
+ if (ps2if->rx_irq <= 0) {
+ ret = ps2if->rx_irq ? : -ENXIO;
+ goto free;
+ }
+
+ ps2if->tx_irq = sa1111_get_irq(dev, 1);
+ if (ps2if->tx_irq <= 0) {
+ ret = ps2if->tx_irq ? : -ENXIO;
+ goto free;
+ }
+
/*
* Request the physical region for this PS2 port.
*/
@@ -297,8 +310,8 @@ static int ps2_probe(struct sa1111_dev *dev)
sa1111_enable_device(ps2if->dev);
/* Incoming clock is 8MHz */
- sa1111_writel(0, ps2if->base + PS2CLKDIV);
- sa1111_writel(127, ps2if->base + PS2PRECNT);
+ writel_relaxed(0, ps2if->base + PS2CLKDIV);
+ writel_relaxed(127, ps2if->base + PS2PRECNT);
/*
* Flush any pending input.
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index bb0349fa64bc..fd03e55768c9 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -255,6 +255,7 @@ void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *k
case KE_VSW:
input_report_switch(dev, ke->sw.code, value);
+ input_sync(dev);
break;
}
}
diff --git a/drivers/input/tablet/Makefile b/drivers/input/tablet/Makefile
index 200fc4e11987..8279ccc18b0a 100644
--- a/drivers/input/tablet/Makefile
+++ b/drivers/input/tablet/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the tablet drivers
#
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 64b30fe273fd..38a226f9fcbd 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -316,6 +316,16 @@ config TOUCHSCREEN_EGALAX_SERIAL
To compile this driver as a module, choose M here: the
module will be called egalax_ts_serial.
+config TOUCHSCREEN_EXC3000
+ tristate "EETI EXC3000 multi-touch panel support"
+ depends on I2C
+ help
+ Say Y here to enable support for I2C connected EETI
+ EXC3000 multi-touch panels.
+
+ To compile this driver as a module, choose M here: the
+ module will be called exc3000.
+
config TOUCHSCREEN_FUJITSU
tristate "Fujitsu serial touchscreen"
select SERIO
@@ -344,6 +354,17 @@ config TOUCHSCREEN_GOODIX
To compile this driver as a module, choose M here: the
module will be called goodix.
+config TOUCHSCREEN_HIDEEP
+ tristate "HiDeep Touch IC"
+ depends on I2C
+ help
+ Say Y here if you have a touchscreen using HiDeep.
+
+ If unsure, say N.
+
+ To compile this driver as a moudle, choose M here : the
+ module will be called hideep_ts.
+
config TOUCHSCREEN_ILI210X
tristate "Ilitek ILI210X based touchscreen"
depends on I2C
@@ -383,6 +404,17 @@ config TOUCHSCREEN_S3C2410
To compile this driver as a module, choose M here: the
module will be called s3c2410_ts.
+config TOUCHSCREEN_S6SY761
+ tristate "Samsung S6SY761 Touchscreen driver"
+ depends on I2C
+ help
+ Say Y if you have the Samsung S6SY761 driver
+
+ If unsure, say N
+
+ To compile this driver as module, choose M here: the
+ module will be called s6sy761.
+
config TOUCHSCREEN_GUNZE
tristate "Gunze AHL-51S touchscreen"
select SERIO
@@ -727,7 +759,7 @@ config TOUCHSCREEN_WM831X
config TOUCHSCREEN_WM97XX
tristate "Support for WM97xx AC97 touchscreen controllers"
- depends on AC97_BUS
+ depends on AC97_BUS || AC97_BUS_NEW
help
Say Y here if you have a Wolfson Microelectronics WM97xx
touchscreen connected to your system. Note that this option
@@ -949,7 +981,7 @@ config TOUCHSCREEN_USB_NEXIO
config TOUCHSCREEN_USB_EASYTOUCH
default y
- bool "EasyTouch USB Touch controller device support" if EMBEDDED
+ bool "EasyTouch USB Touch controller device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
help
Say Y here if you have an EasyTouch USB Touch controller.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 6badce87037b..d2a2b3b7af27 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the touchscreen drivers.
#
@@ -37,8 +38,10 @@ obj-$(CONFIG_TOUCHSCREEN_ELAN) += elants_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_EGALAX_SERIAL) += egalax_ts_serial.o
+obj-$(CONFIG_TOUCHSCREEN_EXC3000) += exc3000.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX) += goodix.o
+obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_IMX6UL_TSC) += imx6ul_tsc.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
@@ -64,6 +67,7 @@ obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_RM_TS) += raydium_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
+obj-$(CONFIG_TOUCHSCREEN_S6SY761) += s6sy761.o
obj-$(CONFIG_TOUCHSCREEN_SILEAD) += silead.o
obj-$(CONFIG_TOUCHSCREEN_SIS_I2C) += sis_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 9c250ae780d9..0381c7809d1b 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -385,9 +385,9 @@ static inline void ad7877_ts_event_release(struct ad7877 *ts)
input_sync(input_dev);
}
-static void ad7877_timer(unsigned long handle)
+static void ad7877_timer(struct timer_list *t)
{
- struct ad7877 *ts = (void *)handle;
+ struct ad7877 *ts = from_timer(ts, t, timer);
unsigned long flags;
spin_lock_irqsave(&ts->lock, flags);
@@ -718,7 +718,7 @@ static int ad7877_probe(struct spi_device *spi)
ts->spi = spi;
ts->input = input_dev;
- setup_timer(&ts->timer, ad7877_timer, (unsigned long) ts);
+ timer_setup(&ts->timer, ad7877_timer, 0);
mutex_init(&ts->mutex);
spin_lock_init(&ts->lock);
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 196028c45210..6bad23ee47a1 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -237,9 +237,9 @@ static void ad7879_ts_event_release(struct ad7879 *ts)
input_sync(input_dev);
}
-static void ad7879_timer(unsigned long handle)
+static void ad7879_timer(struct timer_list *t)
{
- struct ad7879 *ts = (void *)handle;
+ struct ad7879 *ts = from_timer(ts, t, timer);
ad7879_ts_event_release(ts);
}
@@ -524,13 +524,6 @@ static int ad7879_parse_dt(struct device *dev, struct ad7879 *ts)
return 0;
}
-static void ad7879_cleanup_sysfs(void *_ts)
-{
- struct ad7879 *ts = _ts;
-
- sysfs_remove_group(&ts->dev->kobj, &ad7879_attr_group);
-}
-
int ad7879_probe(struct device *dev, struct regmap *regmap,
int irq, u16 bustype, u8 devid)
{
@@ -577,7 +570,7 @@ int ad7879_probe(struct device *dev, struct regmap *regmap,
ts->irq = irq;
ts->regmap = regmap;
- setup_timer(&ts->timer, ad7879_timer, (unsigned long) ts);
+ timer_setup(&ts->timer, ad7879_timer, 0);
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));
input_dev->name = "AD7879 Touchscreen";
@@ -658,11 +651,7 @@ int ad7879_probe(struct device *dev, struct regmap *regmap,
__ad7879_disable(ts);
- err = sysfs_create_group(&dev->kobj, &ad7879_attr_group);
- if (err)
- return err;
-
- err = devm_add_action_or_reset(dev, ad7879_cleanup_sysfs, ts);
+ err = devm_device_add_group(dev, &ad7879_attr_group);
if (err)
return err;
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index f9dcbd63e598..b35b640fdadf 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -117,6 +117,7 @@ static int ar1021_i2c_probe(struct i2c_client *client,
input->open = ar1021_i2c_open;
input->close = ar1021_i2c_close;
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
input_set_capability(input, EV_KEY, BTN_TOUCH);
input_set_abs_params(input, ABS_X, 0, AR1021_MAX_X, 0, 0);
input_set_abs_params(input, ABS_Y, 0, AR1021_MAX_Y, 0, 0);
diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c
index 8cf0b2be2df4..9140a43cfe20 100644
--- a/drivers/input/touchscreen/atmel-wm97xx.c
+++ b/drivers/input/touchscreen/atmel-wm97xx.c
@@ -208,9 +208,12 @@ static void atmel_wm97xx_acc_pen_up(struct wm97xx *wm)
}
}
-static void atmel_wm97xx_pen_timer(unsigned long data)
+static void atmel_wm97xx_pen_timer(struct timer_list *t)
{
- atmel_wm97xx_acc_pen_up((struct wm97xx *)data);
+ struct atmel_wm97xx *atmel_wm97xx = from_timer(atmel_wm97xx, t,
+ pen_timer);
+
+ atmel_wm97xx_acc_pen_up(atmel_wm97xx->wm);
}
static int atmel_wm97xx_acc_startup(struct wm97xx *wm)
@@ -348,8 +351,7 @@ static int __init atmel_wm97xx_probe(struct platform_device *pdev)
atmel_wm97xx->gpio_pen = atmel_gpio_line;
atmel_wm97xx->gpio_irq = gpio_to_irq(atmel_wm97xx->gpio_pen);
- setup_timer(&atmel_wm97xx->pen_timer, atmel_wm97xx_pen_timer,
- (unsigned long)wm);
+ timer_setup(&atmel_wm97xx->pen_timer, atmel_wm97xx_pen_timer, 0);
ret = request_irq(atmel_wm97xx->ac97c_irq,
atmel_wm97xx_channel_b_interrupt,
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index beaf61ce775b..727c3232517c 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -201,13 +201,21 @@ static int cyttsp4_si_get_cydata(struct cyttsp4 *cd)
void *p;
int rc;
+ if (si->si_ofs.test_ofs <= si->si_ofs.cydata_ofs) {
+ dev_err(cd->dev,
+ "%s: invalid offset test_ofs: %zu, cydata_ofs: %zu\n",
+ __func__, si->si_ofs.test_ofs, si->si_ofs.cydata_ofs);
+ return -EINVAL;
+ }
+
si->si_ofs.cydata_size = si->si_ofs.test_ofs - si->si_ofs.cydata_ofs;
dev_dbg(cd->dev, "%s: cydata size: %zd\n", __func__,
si->si_ofs.cydata_size);
p = krealloc(si->si_ptrs.cydata, si->si_ofs.cydata_size, GFP_KERNEL);
if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc cydata memory\n", __func__);
+ dev_err(cd->dev, "%s: failed to allocate cydata memory\n",
+ __func__);
return -ENOMEM;
}
si->si_ptrs.cydata = p;
@@ -270,11 +278,19 @@ static int cyttsp4_si_get_test_data(struct cyttsp4 *cd)
void *p;
int rc;
+ if (si->si_ofs.pcfg_ofs <= si->si_ofs.test_ofs) {
+ dev_err(cd->dev,
+ "%s: invalid offset pcfg_ofs: %zu, test_ofs: %zu\n",
+ __func__, si->si_ofs.pcfg_ofs, si->si_ofs.test_ofs);
+ return -EINVAL;
+ }
+
si->si_ofs.test_size = si->si_ofs.pcfg_ofs - si->si_ofs.test_ofs;
p = krealloc(si->si_ptrs.test, si->si_ofs.test_size, GFP_KERNEL);
if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc test memory\n", __func__);
+ dev_err(cd->dev, "%s: failed to allocate test memory\n",
+ __func__);
return -ENOMEM;
}
si->si_ptrs.test = p;
@@ -321,14 +337,20 @@ static int cyttsp4_si_get_pcfg_data(struct cyttsp4 *cd)
void *p;
int rc;
+ if (si->si_ofs.opcfg_ofs <= si->si_ofs.pcfg_ofs) {
+ dev_err(cd->dev,
+ "%s: invalid offset opcfg_ofs: %zu, pcfg_ofs: %zu\n",
+ __func__, si->si_ofs.opcfg_ofs, si->si_ofs.pcfg_ofs);
+ return -EINVAL;
+ }
+
si->si_ofs.pcfg_size = si->si_ofs.opcfg_ofs - si->si_ofs.pcfg_ofs;
p = krealloc(si->si_ptrs.pcfg, si->si_ofs.pcfg_size, GFP_KERNEL);
if (p == NULL) {
- rc = -ENOMEM;
- dev_err(cd->dev, "%s: fail alloc pcfg memory r=%d\n",
- __func__, rc);
- return rc;
+ dev_err(cd->dev, "%s: failed to allocate pcfg memory\n",
+ __func__);
+ return -ENOMEM;
}
si->si_ptrs.pcfg = p;
@@ -367,13 +389,20 @@ static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd)
void *p;
int rc;
+ if (si->si_ofs.ddata_ofs <= si->si_ofs.opcfg_ofs) {
+ dev_err(cd->dev,
+ "%s: invalid offset ddata_ofs: %zu, opcfg_ofs: %zu\n",
+ __func__, si->si_ofs.ddata_ofs, si->si_ofs.opcfg_ofs);
+ return -EINVAL;
+ }
+
si->si_ofs.opcfg_size = si->si_ofs.ddata_ofs - si->si_ofs.opcfg_ofs;
p = krealloc(si->si_ptrs.opcfg, si->si_ofs.opcfg_size, GFP_KERNEL);
if (p == NULL) {
- dev_err(cd->dev, "%s: fail alloc opcfg memory\n", __func__);
- rc = -ENOMEM;
- goto cyttsp4_si_get_opcfg_data_exit;
+ dev_err(cd->dev, "%s: failed to allocate opcfg memory\n",
+ __func__);
+ return -ENOMEM;
}
si->si_ptrs.opcfg = p;
@@ -382,7 +411,7 @@ static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd)
if (rc < 0) {
dev_err(cd->dev, "%s: fail read opcfg data r=%d\n",
__func__, rc);
- goto cyttsp4_si_get_opcfg_data_exit;
+ return rc;
}
si->si_ofs.cmd_ofs = si->si_ptrs.opcfg->cmd_ofs;
si->si_ofs.rep_ofs = si->si_ptrs.opcfg->rep_ofs;
@@ -447,8 +476,7 @@ static int cyttsp4_si_get_opcfg_data(struct cyttsp4 *cd)
cyttsp4_pr_buf(cd->dev, cd->pr_buf, (u8 *)si->si_ptrs.opcfg,
si->si_ofs.opcfg_size, "sysinfo_opcfg_data");
-cyttsp4_si_get_opcfg_data_exit:
- return rc;
+ return 0;
}
static int cyttsp4_si_get_ddata(struct cyttsp4 *cd)
@@ -1237,9 +1265,9 @@ static void cyttsp4_stop_wd_timer(struct cyttsp4 *cd)
del_timer_sync(&cd->watchdog_timer);
}
-static void cyttsp4_watchdog_timer(unsigned long handle)
+static void cyttsp4_watchdog_timer(struct timer_list *t)
{
- struct cyttsp4 *cd = (struct cyttsp4 *)handle;
+ struct cyttsp4 *cd = from_timer(cd, t, watchdog_timer);
dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__);
@@ -2074,8 +2102,7 @@ struct cyttsp4 *cyttsp4_probe(const struct cyttsp4_bus_ops *ops,
}
/* Setup watchdog timer */
- setup_timer(&cd->watchdog_timer, cyttsp4_watchdog_timer,
- (unsigned long)cd);
+ timer_setup(&cd->watchdog_timer, cyttsp4_watchdog_timer, 0);
/*
* call startup directly to ensure that the device
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 5bf63f76ddda..c53a3d7239e7 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -70,8 +70,10 @@
#define EDT_RAW_DATA_DELAY 1000 /* usec */
enum edt_ver {
- M06,
- M09,
+ EDT_M06,
+ EDT_M09,
+ EDT_M12,
+ GENERIC_FT,
};
struct edt_reg_addr {
@@ -179,14 +181,16 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
int error;
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
cmd = 0xf9; /* tell the controller to send touch data */
offset = 5; /* where the actual touch data starts */
tplen = 4; /* data comes in so called frames */
crclen = 1; /* length of the crc data */
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
cmd = 0x0;
offset = 3;
tplen = 6;
@@ -209,8 +213,8 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
goto out;
}
- /* M09 does not send header or CRC */
- if (tsdata->version == M06) {
+ /* M09/M12 does not send header or CRC */
+ if (tsdata->version == EDT_M06) {
if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa ||
rdbuf[2] != datalen) {
dev_err_ratelimited(dev,
@@ -233,7 +237,7 @@ static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
continue;
/* M06 sometimes sends bogus coordinates in TOUCH_DOWN */
- if (tsdata->version == M06 && type == TOUCH_EVENT_DOWN)
+ if (tsdata->version == EDT_M06 && type == TOUCH_EVENT_DOWN)
continue;
x = ((buf[0] << 8) | buf[1]) & 0x0fff;
@@ -264,14 +268,16 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
u8 wrbuf[4];
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
wrbuf[2] = value;
wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
return edt_ft5x06_ts_readwrite(tsdata->client, 4,
wrbuf, 0, NULL);
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
wrbuf[0] = addr;
wrbuf[1] = value;
@@ -290,7 +296,7 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
int error;
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
wrbuf[1] |= tsdata->factory_mode ? 0x80 : 0x40;
@@ -309,7 +315,9 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
}
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
wrbuf[0] = addr;
error = edt_ft5x06_ts_readwrite(tsdata->client, 1,
wrbuf, 1, rdbuf);
@@ -368,11 +376,13 @@ static ssize_t edt_ft5x06_setting_show(struct device *dev,
}
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
addr = attr->addr_m06;
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
addr = attr->addr_m09;
break;
@@ -437,11 +447,13 @@ static ssize_t edt_ft5x06_setting_store(struct device *dev,
}
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
addr = attr->addr_m06;
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
+ case GENERIC_FT:
addr = attr->addr_m09;
break;
@@ -466,14 +478,18 @@ out:
return error ?: count;
}
+/* m06, m09: range 0-31, m12: range 0-5 */
static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN,
M09_REGISTER_GAIN, 0, 31);
+/* m06, m09: range 0-31, m12: range 0-16 */
static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET,
M09_REGISTER_OFFSET, 0, 31);
+/* m06: range 20 to 80, m09: range 0 to 30, m12: range 1 to 255... */
static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD,
- M09_REGISTER_THRESHOLD, 0, 80);
+ M09_REGISTER_THRESHOLD, 0, 255);
+/* m06: range 3 to 14, m12: (0x64: 100Hz) */
static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE,
- NO_REGISTER, 3, 14);
+ NO_REGISTER, 0, 255);
static struct attribute *edt_ft5x06_attrs[] = {
&edt_ft5x06_attr_gain.dattr.attr,
@@ -508,7 +524,7 @@ static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
}
/* mode register is 0x3c when in the work mode */
- if (tsdata->version == M09)
+ if (tsdata->version != EDT_M06)
goto m09_out;
error = edt_ft5x06_register_write(tsdata, WORK_REGISTER_OPMODE, 0x03);
@@ -545,7 +561,7 @@ err_out:
return error;
m09_out:
- dev_err(&client->dev, "No factory mode support for M09\n");
+ dev_err(&client->dev, "No factory mode support for M09/M12/GENERIC_FT\n");
return -EINVAL;
}
@@ -770,16 +786,17 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
* to have garbage in there
*/
memset(rdbuf, 0, sizeof(rdbuf));
- error = edt_ft5x06_ts_readwrite(client, 1, "\xbb",
+ error = edt_ft5x06_ts_readwrite(client, 1, "\xBB",
EDT_NAME_LEN - 1, rdbuf);
if (error)
return error;
- /* if we find something consistent, stay with that assumption
- * at least M09 won't send 3 bytes here
+ /* Probe content for something consistent.
+ * M06 starts with a response byte, M12 gives the data directly.
+ * M09/Generic does not provide model number information.
*/
- if (!(strncasecmp(rdbuf + 1, "EP0", 3))) {
- tsdata->version = M06;
+ if (!strncasecmp(rdbuf + 1, "EP0", 3)) {
+ tsdata->version = EDT_M06;
/* remove last '$' end marker */
rdbuf[EDT_NAME_LEN - 1] = '\0';
@@ -792,9 +809,31 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
*p++ = '\0';
strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN);
strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+ } else if (!strncasecmp(rdbuf, "EP0", 3)) {
+ tsdata->version = EDT_M12;
+
+ /* remove last '$' end marker */
+ rdbuf[EDT_NAME_LEN - 2] = '\0';
+ if (rdbuf[EDT_NAME_LEN - 3] == '$')
+ rdbuf[EDT_NAME_LEN - 3] = '\0';
+
+ /* look for Model/Version separator */
+ p = strchr(rdbuf, '*');
+ if (p)
+ *p++ = '\0';
+ strlcpy(model_name, rdbuf, EDT_NAME_LEN);
+ strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
} else {
- /* since there are only two versions around (M06, M09) */
- tsdata->version = M09;
+ /* If it is not an EDT M06/M12 touchscreen, then the model
+ * detection is a bit hairy. The different ft5x06
+ * firmares around don't reliably implement the
+ * identification registers. Well, we'll take a shot.
+ *
+ * The main difference between generic focaltec based
+ * touches and EDT M09 is that we know how to retrieve
+ * the max coordinates for the latter.
+ */
+ tsdata->version = GENERIC_FT;
error = edt_ft5x06_ts_readwrite(client, 1, "\xA6",
2, rdbuf);
@@ -808,8 +847,34 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
if (error)
return error;
- snprintf(model_name, EDT_NAME_LEN, "EP0%i%i0M09",
- rdbuf[0] >> 4, rdbuf[0] & 0x0F);
+ /* This "model identification" is not exact. Unfortunately
+ * not all firmwares for the ft5x06 put useful values in
+ * the identification registers.
+ */
+ switch (rdbuf[0]) {
+ case 0x35: /* EDT EP0350M09 */
+ case 0x43: /* EDT EP0430M09 */
+ case 0x50: /* EDT EP0500M09 */
+ case 0x57: /* EDT EP0570M09 */
+ case 0x70: /* EDT EP0700M09 */
+ tsdata->version = EDT_M09;
+ snprintf(model_name, EDT_NAME_LEN, "EP0%i%i0M09",
+ rdbuf[0] >> 4, rdbuf[0] & 0x0F);
+ break;
+ case 0xa1: /* EDT EP1010ML00 */
+ tsdata->version = EDT_M09;
+ snprintf(model_name, EDT_NAME_LEN, "EP%i%i0ML00",
+ rdbuf[0] >> 4, rdbuf[0] & 0x0F);
+ break;
+ case 0x5a: /* Solomon Goldentek Display */
+ snprintf(model_name, EDT_NAME_LEN, "GKTW50SCED1R0");
+ break;
+ default:
+ snprintf(model_name, EDT_NAME_LEN,
+ "generic ft5x06 (%02x)",
+ rdbuf[0]);
+ break;
+ }
}
return 0;
@@ -853,8 +918,17 @@ edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
if (reg_addr->reg_report_rate != NO_REGISTER)
tsdata->report_rate = edt_ft5x06_register_read(tsdata,
reg_addr->reg_report_rate);
- tsdata->num_x = edt_ft5x06_register_read(tsdata, reg_addr->reg_num_x);
- tsdata->num_y = edt_ft5x06_register_read(tsdata, reg_addr->reg_num_y);
+ if (tsdata->version == EDT_M06 ||
+ tsdata->version == EDT_M09 ||
+ tsdata->version == EDT_M12) {
+ tsdata->num_x = edt_ft5x06_register_read(tsdata,
+ reg_addr->reg_num_x);
+ tsdata->num_y = edt_ft5x06_register_read(tsdata,
+ reg_addr->reg_num_y);
+ } else {
+ tsdata->num_x = -1;
+ tsdata->num_y = -1;
+ }
}
static void
@@ -863,7 +937,7 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
struct edt_reg_addr *reg_addr = &tsdata->reg_addr;
switch (tsdata->version) {
- case M06:
+ case EDT_M06:
reg_addr->reg_threshold = WORK_REGISTER_THRESHOLD;
reg_addr->reg_report_rate = WORK_REGISTER_REPORT_RATE;
reg_addr->reg_gain = WORK_REGISTER_GAIN;
@@ -872,7 +946,8 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
reg_addr->reg_num_y = WORK_REGISTER_NUM_Y;
break;
- case M09:
+ case EDT_M09:
+ case EDT_M12:
reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
reg_addr->reg_report_rate = NO_REGISTER;
reg_addr->reg_gain = M09_REGISTER_GAIN;
@@ -880,6 +955,13 @@ edt_ft5x06_ts_set_regs(struct edt_ft5x06_ts_data *tsdata)
reg_addr->reg_num_x = M09_REGISTER_NUM_X;
reg_addr->reg_num_y = M09_REGISTER_NUM_Y;
break;
+
+ case GENERIC_FT:
+ /* this is a guesswork */
+ reg_addr->reg_threshold = M09_REGISTER_THRESHOLD;
+ reg_addr->reg_gain = M09_REGISTER_GAIN;
+ reg_addr->reg_offset = M09_REGISTER_OFFSET;
+ break;
}
}
@@ -969,10 +1051,20 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
input->id.bustype = BUS_I2C;
input->dev.parent = &client->dev;
- input_set_abs_params(input, ABS_MT_POSITION_X,
- 0, tsdata->num_x * 64 - 1, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y,
- 0, tsdata->num_y * 64 - 1, 0, 0);
+ if (tsdata->version == EDT_M06 ||
+ tsdata->version == EDT_M09 ||
+ tsdata->version == EDT_M12) {
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ 0, tsdata->num_x * 64 - 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ 0, tsdata->num_y * 64 - 1, 0, 0);
+ } else {
+ /* Unknown maximum values. Specify via devicetree */
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ 0, 65535, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ 0, 65535, 0, 0);
+ }
touchscreen_parse_properties(input, true, &tsdata->prop);
@@ -998,13 +1090,13 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
return error;
}
- error = sysfs_create_group(&client->dev.kobj, &edt_ft5x06_attr_group);
+ error = devm_device_add_group(&client->dev, &edt_ft5x06_attr_group);
if (error)
return error;
error = input_register_device(input);
if (error)
- goto err_remove_attrs;
+ return error;
edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
device_init_wakeup(&client->dev, 1);
@@ -1016,10 +1108,6 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client,
tsdata->reset_gpio ? desc_to_gpio(tsdata->reset_gpio) : -1);
return 0;
-
-err_remove_attrs:
- sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
- return error;
}
static int edt_ft5x06_ts_remove(struct i2c_client *client)
@@ -1027,7 +1115,6 @@ static int edt_ft5x06_ts_remove(struct i2c_client *client)
struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
edt_ft5x06_ts_teardown_debugfs(tsdata);
- sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
return 0;
}
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 0f4cda7282a2..e102d7764bc2 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1070,13 +1070,6 @@ static const struct attribute_group elants_attribute_group = {
.attrs = elants_attributes,
};
-static void elants_i2c_remove_sysfs_group(void *_data)
-{
- struct elants_data *ts = _data;
-
- sysfs_remove_group(&ts->client->dev.kobj, &elants_attribute_group);
-}
-
static int elants_i2c_power_on(struct elants_data *ts)
{
int error;
@@ -1289,23 +1282,13 @@ static int elants_i2c_probe(struct i2c_client *client,
if (!client->dev.of_node)
device_init_wakeup(&client->dev, true);
- error = sysfs_create_group(&client->dev.kobj, &elants_attribute_group);
+ error = devm_device_add_group(&client->dev, &elants_attribute_group);
if (error) {
dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
error);
return error;
}
- error = devm_add_action(&client->dev,
- elants_i2c_remove_sysfs_group, ts);
- if (error) {
- elants_i2c_remove_sysfs_group(ts);
- dev_err(&client->dev,
- "Failed to add sysfs cleanup action: %d\n",
- error);
- return error;
- }
-
return 0;
}
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
new file mode 100644
index 000000000000..37437a53cd1a
--- /dev/null
+++ b/drivers/input/touchscreen/exc3000.c
@@ -0,0 +1,223 @@
+/*
+ * Driver for I2C connected EETI EXC3000 multiple touch controller
+ *
+ * Copyright (C) 2017 Ahmet Inan <inan@distec.de>
+ *
+ * minimal implementation based on egalax_ts.c and egalax_i2c.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/timer.h>
+#include <asm/unaligned.h>
+
+#define EXC3000_NUM_SLOTS 10
+#define EXC3000_SLOTS_PER_FRAME 5
+#define EXC3000_LEN_FRAME 66
+#define EXC3000_LEN_POINT 10
+#define EXC3000_MT_EVENT 6
+#define EXC3000_TIMEOUT_MS 100
+
+struct exc3000_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct touchscreen_properties prop;
+ struct timer_list timer;
+ u8 buf[2 * EXC3000_LEN_FRAME];
+};
+
+static void exc3000_report_slots(struct input_dev *input,
+ struct touchscreen_properties *prop,
+ const u8 *buf, int num)
+{
+ for (; num--; buf += EXC3000_LEN_POINT) {
+ if (buf[0] & BIT(0)) {
+ input_mt_slot(input, buf[1]);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ touchscreen_report_pos(input, prop,
+ get_unaligned_le16(buf + 2),
+ get_unaligned_le16(buf + 4),
+ true);
+ }
+ }
+}
+
+static void exc3000_timer(struct timer_list *t)
+{
+ struct exc3000_data *data = from_timer(data, t, timer);
+
+ input_mt_sync_frame(data->input);
+ input_sync(data->input);
+}
+
+static int exc3000_read_frame(struct i2c_client *client, u8 *buf)
+{
+ int ret;
+
+ ret = i2c_master_send(client, "'", 2);
+ if (ret < 0)
+ return ret;
+
+ if (ret != 2)
+ return -EIO;
+
+ ret = i2c_master_recv(client, buf, EXC3000_LEN_FRAME);
+ if (ret < 0)
+ return ret;
+
+ if (ret != EXC3000_LEN_FRAME)
+ return -EIO;
+
+ if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME ||
+ buf[2] != EXC3000_MT_EVENT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int exc3000_read_data(struct i2c_client *client,
+ u8 *buf, int *n_slots)
+{
+ int error;
+
+ error = exc3000_read_frame(client, buf);
+ if (error)
+ return error;
+
+ *n_slots = buf[3];
+ if (!*n_slots || *n_slots > EXC3000_NUM_SLOTS)
+ return -EINVAL;
+
+ if (*n_slots > EXC3000_SLOTS_PER_FRAME) {
+ /* Read 2nd frame to get the rest of the contacts. */
+ error = exc3000_read_frame(client, buf + EXC3000_LEN_FRAME);
+ if (error)
+ return error;
+
+ /* 2nd chunk must have number of contacts set to 0. */
+ if (buf[EXC3000_LEN_FRAME + 3] != 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t exc3000_interrupt(int irq, void *dev_id)
+{
+ struct exc3000_data *data = dev_id;
+ struct input_dev *input = data->input;
+ u8 *buf = data->buf;
+ int slots, total_slots;
+ int error;
+
+ error = exc3000_read_data(data->client, buf, &total_slots);
+ if (error) {
+ /* Schedule a timer to release "stuck" contacts */
+ mod_timer(&data->timer,
+ jiffies + msecs_to_jiffies(EXC3000_TIMEOUT_MS));
+ goto out;
+ }
+
+ /*
+ * We read full state successfully, no contacts will be "stuck".
+ */
+ del_timer_sync(&data->timer);
+
+ while (total_slots > 0) {
+ slots = min(total_slots, EXC3000_SLOTS_PER_FRAME);
+ exc3000_report_slots(input, &data->prop, buf + 4, slots);
+ total_slots -= slots;
+ buf += EXC3000_LEN_FRAME;
+ }
+
+ input_mt_sync_frame(input);
+ input_sync(input);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int exc3000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct exc3000_data *data;
+ struct input_dev *input;
+ int error;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ timer_setup(&data->timer, exc3000_timer, 0);
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
+
+ data->input = input;
+
+ input->name = "EETI EXC3000 Touch Screen";
+ input->id.bustype = BUS_I2C;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, 4095, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
+ touchscreen_parse_properties(input, true, &data->prop);
+
+ error = input_mt_init_slots(input, EXC3000_NUM_SLOTS,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error)
+ return error;
+
+ error = input_register_device(input);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, exc3000_interrupt, IRQF_ONESHOT,
+ client->name, data);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct i2c_device_id exc3000_id[] = {
+ { "exc3000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, exc3000_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id exc3000_of_match[] = {
+ { .compatible = "eeti,exc3000" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, exc3000_of_match);
+#endif
+
+static struct i2c_driver exc3000_driver = {
+ .driver = {
+ .name = "exc3000",
+ .of_match_table = of_match_ptr(exc3000_of_match),
+ },
+ .id_table = exc3000_id,
+ .probe = exc3000_probe,
+};
+
+module_i2c_driver(exc3000_driver);
+
+MODULE_AUTHOR("Ahmet Inan <inan@distec.de>");
+MODULE_DESCRIPTION("I2C connected EETI EXC3000 multiple touch controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b3bbad7d2282..69d0b8cbc71f 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -31,9 +31,18 @@
#include <linux/of.h>
#include <asm/unaligned.h>
+struct goodix_ts_data;
+
+struct goodix_chip_data {
+ u16 config_addr;
+ int config_len;
+ int (*check_config)(struct goodix_ts_data *, const struct firmware *);
+};
+
struct goodix_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
+ const struct goodix_chip_data *chip;
int abs_x_max;
int abs_y_max;
bool swapped_x_y;
@@ -41,7 +50,6 @@ struct goodix_ts_data {
bool inverted_y;
unsigned int max_touch_num;
unsigned int int_trigger_type;
- int cfg_len;
struct gpio_desc *gpiod_int;
struct gpio_desc *gpiod_rst;
u16 id;
@@ -69,7 +77,8 @@ struct goodix_ts_data {
#define GOODIX_CMD_SCREEN_OFF 0x05
#define GOODIX_READ_COOR_ADDR 0x814E
-#define GOODIX_REG_CONFIG_DATA 0x8047
+#define GOODIX_GT1X_REG_CONFIG_DATA 0x8050
+#define GOODIX_GT9X_REG_CONFIG_DATA 0x8047
#define GOODIX_REG_ID 0x8140
#define GOODIX_BUFFER_STATUS_READY BIT(7)
@@ -79,6 +88,35 @@ struct goodix_ts_data {
#define MAX_CONTACTS_LOC 5
#define TRIGGER_LOC 6
+static int goodix_check_cfg_8(struct goodix_ts_data *ts,
+ const struct firmware *cfg);
+static int goodix_check_cfg_16(struct goodix_ts_data *ts,
+ const struct firmware *cfg);
+
+static const struct goodix_chip_data gt1x_chip_data = {
+ .config_addr = GOODIX_GT1X_REG_CONFIG_DATA,
+ .config_len = GOODIX_CONFIG_MAX_LENGTH,
+ .check_config = goodix_check_cfg_16,
+};
+
+static const struct goodix_chip_data gt911_chip_data = {
+ .config_addr = GOODIX_GT9X_REG_CONFIG_DATA,
+ .config_len = GOODIX_CONFIG_911_LENGTH,
+ .check_config = goodix_check_cfg_8,
+};
+
+static const struct goodix_chip_data gt967_chip_data = {
+ .config_addr = GOODIX_GT9X_REG_CONFIG_DATA,
+ .config_len = GOODIX_CONFIG_967_LENGTH,
+ .check_config = goodix_check_cfg_8,
+};
+
+static const struct goodix_chip_data gt9x_chip_data = {
+ .config_addr = GOODIX_GT9X_REG_CONFIG_DATA,
+ .config_len = GOODIX_CONFIG_MAX_LENGTH,
+ .check_config = goodix_check_cfg_8,
+};
+
static const unsigned long goodix_irq_flags[] = {
IRQ_TYPE_EDGE_RISING,
IRQ_TYPE_EDGE_FALLING,
@@ -177,22 +215,25 @@ static int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value)
return goodix_i2c_write(client, reg, &value, sizeof(value));
}
-static int goodix_get_cfg_len(u16 id)
+static const struct goodix_chip_data *goodix_get_chip_data(u16 id)
{
switch (id) {
+ case 1151:
+ return &gt1x_chip_data;
+
case 911:
case 9271:
case 9110:
case 927:
case 928:
- return GOODIX_CONFIG_911_LENGTH;
+ return &gt911_chip_data;
case 912:
case 967:
- return GOODIX_CONFIG_967_LENGTH;
+ return &gt967_chip_data;
default:
- return GOODIX_CONFIG_MAX_LENGTH;
+ return &gt9x_chip_data;
}
}
@@ -332,25 +373,12 @@ static int goodix_request_irq(struct goodix_ts_data *ts)
ts->irq_flags, ts->client->name, ts);
}
-/**
- * goodix_check_cfg - Checks if config fw is valid
- *
- * @ts: goodix_ts_data pointer
- * @cfg: firmware config data
- */
-static int goodix_check_cfg(struct goodix_ts_data *ts,
- const struct firmware *cfg)
+static int goodix_check_cfg_8(struct goodix_ts_data *ts,
+ const struct firmware *cfg)
{
- int i, raw_cfg_len;
+ int i, raw_cfg_len = cfg->size - 2;
u8 check_sum = 0;
- if (cfg->size > GOODIX_CONFIG_MAX_LENGTH) {
- dev_err(&ts->client->dev,
- "The length of the config fw is not correct");
- return -EINVAL;
- }
-
- raw_cfg_len = cfg->size - 2;
for (i = 0; i < raw_cfg_len; i++)
check_sum += cfg->data[i];
check_sum = (~check_sum) + 1;
@@ -369,6 +397,48 @@ static int goodix_check_cfg(struct goodix_ts_data *ts,
return 0;
}
+static int goodix_check_cfg_16(struct goodix_ts_data *ts,
+ const struct firmware *cfg)
+{
+ int i, raw_cfg_len = cfg->size - 3;
+ u16 check_sum = 0;
+
+ for (i = 0; i < raw_cfg_len; i += 2)
+ check_sum += get_unaligned_be16(&cfg->data[i]);
+ check_sum = (~check_sum) + 1;
+ if (check_sum != get_unaligned_be16(&cfg->data[raw_cfg_len])) {
+ dev_err(&ts->client->dev,
+ "The checksum of the config fw is not correct");
+ return -EINVAL;
+ }
+
+ if (cfg->data[raw_cfg_len + 2] != 1) {
+ dev_err(&ts->client->dev,
+ "Config fw must have Config_Fresh register set");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * goodix_check_cfg - Checks if config fw is valid
+ *
+ * @ts: goodix_ts_data pointer
+ * @cfg: firmware config data
+ */
+static int goodix_check_cfg(struct goodix_ts_data *ts,
+ const struct firmware *cfg)
+{
+ if (cfg->size > GOODIX_CONFIG_MAX_LENGTH) {
+ dev_err(&ts->client->dev,
+ "The length of the config fw is not correct");
+ return -EINVAL;
+ }
+
+ return ts->chip->check_config(ts, cfg);
+}
+
/**
* goodix_send_cfg - Write fw config to device
*
@@ -384,7 +454,7 @@ static int goodix_send_cfg(struct goodix_ts_data *ts,
if (error)
return error;
- error = goodix_i2c_write(ts->client, GOODIX_REG_CONFIG_DATA, cfg->data,
+ error = goodix_i2c_write(ts->client, ts->chip->config_addr, cfg->data,
cfg->size);
if (error) {
dev_err(&ts->client->dev, "Failed to write config data: %d",
@@ -511,8 +581,8 @@ static void goodix_read_config(struct goodix_ts_data *ts)
u8 config[GOODIX_CONFIG_MAX_LENGTH];
int error;
- error = goodix_i2c_read(ts->client, GOODIX_REG_CONFIG_DATA,
- config, ts->cfg_len);
+ error = goodix_i2c_read(ts->client, ts->chip->config_addr,
+ config, ts->chip->config_len);
if (error) {
dev_warn(&ts->client->dev,
"Error reading config (%d), using defaults\n",
@@ -592,7 +662,7 @@ static int goodix_i2c_test(struct i2c_client *client)
u8 test;
while (retry++ < 2) {
- error = goodix_i2c_read(client, GOODIX_REG_CONFIG_DATA,
+ error = goodix_i2c_read(client, GOODIX_REG_ID,
&test, 1);
if (!error)
return 0;
@@ -762,7 +832,7 @@ static int goodix_ts_probe(struct i2c_client *client,
return error;
}
- ts->cfg_len = goodix_get_cfg_len(ts->id);
+ ts->chip = goodix_get_chip_data(ts->id);
if (ts->gpiod_int && ts->gpiod_rst) {
/* update device config */
@@ -891,6 +961,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
+ { .compatible = "goodix,gt1151" },
{ .compatible = "goodix,gt911" },
{ .compatible = "goodix,gt9110" },
{ .compatible = "goodix,gt912" },
diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c
new file mode 100644
index 000000000000..fc080a7c2e1f
--- /dev/null
+++ b/drivers/input/touchscreen/hideep.c
@@ -0,0 +1,1120 @@
+/*
+ * Copyright (C) 2012-2017 Hideep, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foudation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/regulator/consumer.h>
+#include <asm/unaligned.h>
+
+#define HIDEEP_TS_NAME "HiDeep Touchscreen"
+#define HIDEEP_I2C_NAME "hideep_ts"
+
+#define HIDEEP_MT_MAX 10
+#define HIDEEP_KEY_MAX 3
+
+/* count(2) + touch data(100) + key data(6) */
+#define HIDEEP_MAX_EVENT 108UL
+
+#define HIDEEP_TOUCH_EVENT_INDEX 2
+#define HIDEEP_KEY_EVENT_INDEX 102
+
+/* Touch & key event */
+#define HIDEEP_EVENT_ADDR 0x240
+
+/* command list */
+#define HIDEEP_RESET_CMD 0x9800
+
+/* event bit */
+#define HIDEEP_MT_RELEASED BIT(4)
+#define HIDEEP_KEY_PRESSED BIT(7)
+#define HIDEEP_KEY_FIRST_PRESSED BIT(8)
+#define HIDEEP_KEY_PRESSED_MASK (HIDEEP_KEY_PRESSED | \
+ HIDEEP_KEY_FIRST_PRESSED)
+
+#define HIDEEP_KEY_IDX_MASK 0x0f
+
+/* For NVM */
+#define HIDEEP_YRAM_BASE 0x40000000
+#define HIDEEP_PERIPHERAL_BASE 0x50000000
+#define HIDEEP_ESI_BASE (HIDEEP_PERIPHERAL_BASE + 0x00000000)
+#define HIDEEP_FLASH_BASE (HIDEEP_PERIPHERAL_BASE + 0x01000000)
+#define HIDEEP_SYSCON_BASE (HIDEEP_PERIPHERAL_BASE + 0x02000000)
+
+#define HIDEEP_SYSCON_MOD_CON (HIDEEP_SYSCON_BASE + 0x0000)
+#define HIDEEP_SYSCON_SPC_CON (HIDEEP_SYSCON_BASE + 0x0004)
+#define HIDEEP_SYSCON_CLK_CON (HIDEEP_SYSCON_BASE + 0x0008)
+#define HIDEEP_SYSCON_CLK_ENA (HIDEEP_SYSCON_BASE + 0x000C)
+#define HIDEEP_SYSCON_RST_CON (HIDEEP_SYSCON_BASE + 0x0010)
+#define HIDEEP_SYSCON_WDT_CON (HIDEEP_SYSCON_BASE + 0x0014)
+#define HIDEEP_SYSCON_WDT_CNT (HIDEEP_SYSCON_BASE + 0x0018)
+#define HIDEEP_SYSCON_PWR_CON (HIDEEP_SYSCON_BASE + 0x0020)
+#define HIDEEP_SYSCON_PGM_ID (HIDEEP_SYSCON_BASE + 0x00F4)
+
+#define HIDEEP_FLASH_CON (HIDEEP_FLASH_BASE + 0x0000)
+#define HIDEEP_FLASH_STA (HIDEEP_FLASH_BASE + 0x0004)
+#define HIDEEP_FLASH_CFG (HIDEEP_FLASH_BASE + 0x0008)
+#define HIDEEP_FLASH_TIM (HIDEEP_FLASH_BASE + 0x000C)
+#define HIDEEP_FLASH_CACHE_CFG (HIDEEP_FLASH_BASE + 0x0010)
+#define HIDEEP_FLASH_PIO_SIG (HIDEEP_FLASH_BASE + 0x400000)
+
+#define HIDEEP_ESI_TX_INVALID (HIDEEP_ESI_BASE + 0x0008)
+
+#define HIDEEP_PERASE 0x00040000
+#define HIDEEP_WRONLY 0x00100000
+
+#define HIDEEP_NVM_MASK_OFS 0x0000000C
+#define HIDEEP_NVM_DEFAULT_PAGE 0
+#define HIDEEP_NVM_SFR_WPAGE 1
+#define HIDEEP_NVM_SFR_RPAGE 2
+
+#define HIDEEP_PIO_SIG 0x00400000
+#define HIDEEP_PROT_MODE 0x03400000
+
+#define HIDEEP_NVM_PAGE_SIZE 128
+
+#define HIDEEP_DWZ_INFO 0x000002C0
+
+struct hideep_event {
+ __le16 x;
+ __le16 y;
+ __le16 z;
+ u8 w;
+ u8 flag;
+ u8 type;
+ u8 index;
+};
+
+struct dwz_info {
+ __be32 code_start;
+ u8 code_crc[12];
+
+ __be32 c_code_start;
+ __be16 gen_ver;
+ __be16 c_code_len;
+
+ __be32 vr_start;
+ __be16 rsv0;
+ __be16 vr_len;
+
+ __be32 ft_start;
+ __be16 vr_version;
+ __be16 ft_len;
+
+ __be16 core_ver;
+ __be16 boot_ver;
+
+ __be16 release_ver;
+ __be16 custom_ver;
+
+ u8 factory_id;
+ u8 panel_type;
+ u8 model_name[6];
+
+ __be16 extra_option;
+ __be16 product_code;
+
+ __be16 vendor_id;
+ __be16 product_id;
+};
+
+struct pgm_packet {
+ struct {
+ u8 unused[3];
+ u8 len;
+ __be32 addr;
+ } header;
+ __be32 payload[HIDEEP_NVM_PAGE_SIZE / sizeof(__be32)];
+};
+
+#define HIDEEP_XFER_BUF_SIZE sizeof(struct pgm_packet)
+
+struct hideep_ts {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct regmap *reg;
+
+ struct touchscreen_properties prop;
+
+ struct gpio_desc *reset_gpio;
+
+ struct regulator *vcc_vdd;
+ struct regulator *vcc_vid;
+
+ struct mutex dev_mutex;
+
+ u32 tch_count;
+ u32 lpm_count;
+
+ /*
+ * Data buffer to read packet from the device (contacts and key
+ * states). We align it on double-word boundary to keep word-sized
+ * fields in contact data and double-word-sized fields in program
+ * packet aligned.
+ */
+ u8 xfer_buf[HIDEEP_XFER_BUF_SIZE] __aligned(4);
+
+ int key_num;
+ u32 key_codes[HIDEEP_KEY_MAX];
+
+ struct dwz_info dwz_info;
+
+ unsigned int fw_size;
+ u32 nvm_mask;
+};
+
+static int hideep_pgm_w_mem(struct hideep_ts *ts, u32 addr,
+ const __be32 *data, size_t count)
+{
+ struct pgm_packet *packet = (void *)ts->xfer_buf;
+ size_t len = count * sizeof(*data);
+ struct i2c_msg msg = {
+ .addr = ts->client->addr,
+ .len = len + sizeof(packet->header.len) +
+ sizeof(packet->header.addr),
+ .buf = &packet->header.len,
+ };
+ int ret;
+
+ if (len > HIDEEP_NVM_PAGE_SIZE)
+ return -EINVAL;
+
+ packet->header.len = 0x80 | (count - 1);
+ packet->header.addr = cpu_to_be32(addr);
+ memcpy(packet->payload, data, len);
+
+ ret = i2c_transfer(ts->client->adapter, &msg, 1);
+ if (ret != 1)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int hideep_pgm_r_mem(struct hideep_ts *ts, u32 addr,
+ __be32 *data, size_t count)
+{
+ struct pgm_packet *packet = (void *)ts->xfer_buf;
+ size_t len = count * sizeof(*data);
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .len = sizeof(packet->header.len) +
+ sizeof(packet->header.addr),
+ .buf = &packet->header.len,
+ },
+ {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = (u8 *)data,
+ },
+ };
+ int ret;
+
+ if (len > HIDEEP_NVM_PAGE_SIZE)
+ return -EINVAL;
+
+ packet->header.len = count - 1;
+ packet->header.addr = cpu_to_be32(addr);
+
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg))
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int hideep_pgm_r_reg(struct hideep_ts *ts, u32 addr, u32 *val)
+{
+ __be32 data;
+ int error;
+
+ error = hideep_pgm_r_mem(ts, addr, &data, 1);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "read of register %#08x failed: %d\n",
+ addr, error);
+ return error;
+ }
+
+ *val = be32_to_cpu(data);
+ return 0;
+}
+
+static int hideep_pgm_w_reg(struct hideep_ts *ts, u32 addr, u32 val)
+{
+ __be32 data = cpu_to_be32(val);
+ int error;
+
+ error = hideep_pgm_w_mem(ts, addr, &data, 1);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "write to register %#08x (%#08x) failed: %d\n",
+ addr, val, error);
+ return error;
+ }
+
+ return 0;
+}
+
+#define SW_RESET_IN_PGM(clk) \
+{ \
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_WDT_CNT, (clk)); \
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_WDT_CON, 0x03); \
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_WDT_CON, 0x01); \
+}
+
+#define SET_FLASH_PIO(ce) \
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CON, \
+ 0x01 | ((ce) << 1))
+
+#define SET_PIO_SIG(x, y) \
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_PIO_SIG + (x), (y))
+
+#define SET_FLASH_HWCONTROL() \
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CON, 0x00)
+
+#define NVM_W_SFR(x, y) \
+{ \
+ SET_FLASH_PIO(1); \
+ SET_PIO_SIG(x, y); \
+ SET_FLASH_PIO(0); \
+}
+
+static void hideep_pgm_set(struct hideep_ts *ts)
+{
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_WDT_CON, 0x00);
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_SPC_CON, 0x00);
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_CLK_ENA, 0xFF);
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_CLK_CON, 0x01);
+ hideep_pgm_w_reg(ts, HIDEEP_SYSCON_PWR_CON, 0x01);
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_TIM, 0x03);
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CACHE_CFG, 0x00);
+}
+
+static int hideep_pgm_get_pattern(struct hideep_ts *ts, u32 *pattern)
+{
+ u16 p1 = 0xAF39;
+ u16 p2 = 0xDF9D;
+ int error;
+
+ error = regmap_bulk_write(ts->reg, p1, &p2, 1);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "%s: regmap_bulk_write() failed with %d\n",
+ __func__, error);
+ return error;
+ }
+
+ usleep_range(1000, 1100);
+
+ /* flush invalid Tx load register */
+ error = hideep_pgm_w_reg(ts, HIDEEP_ESI_TX_INVALID, 0x01);
+ if (error)
+ return error;
+
+ error = hideep_pgm_r_reg(ts, HIDEEP_SYSCON_PGM_ID, pattern);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int hideep_enter_pgm(struct hideep_ts *ts)
+{
+ int retry_count = 10;
+ u32 pattern;
+ int error;
+
+ while (retry_count--) {
+ error = hideep_pgm_get_pattern(ts, &pattern);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "hideep_pgm_get_pattern failed: %d\n", error);
+ } else if (pattern != 0x39AF9DDF) {
+ dev_err(&ts->client->dev, "%s: bad pattern: %#08x\n",
+ __func__, pattern);
+ } else {
+ dev_dbg(&ts->client->dev, "found magic code");
+
+ hideep_pgm_set(ts);
+ usleep_range(1000, 1100);
+
+ return 0;
+ }
+ }
+
+ dev_err(&ts->client->dev, "failed to enter pgm mode\n");
+ SW_RESET_IN_PGM(1000);
+ return -EIO;
+}
+
+static void hideep_nvm_unlock(struct hideep_ts *ts)
+{
+ u32 unmask_code;
+
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_SFR_RPAGE);
+ hideep_pgm_r_reg(ts, 0x0000000C, &unmask_code);
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_DEFAULT_PAGE);
+
+ /* make it unprotected code */
+ unmask_code &= ~HIDEEP_PROT_MODE;
+
+ /* compare unmask code */
+ if (unmask_code != ts->nvm_mask)
+ dev_warn(&ts->client->dev,
+ "read mask code different %#08x vs %#08x",
+ unmask_code, ts->nvm_mask);
+
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_SFR_WPAGE);
+ SET_FLASH_PIO(0);
+
+ NVM_W_SFR(HIDEEP_NVM_MASK_OFS, ts->nvm_mask);
+ SET_FLASH_HWCONTROL();
+ hideep_pgm_w_reg(ts, HIDEEP_FLASH_CFG, HIDEEP_NVM_DEFAULT_PAGE);
+}
+
+static int hideep_check_status(struct hideep_ts *ts)
+{
+ int time_out = 100;
+ int status;
+ int error;
+
+ while (time_out--) {
+ error = hideep_pgm_r_reg(ts, HIDEEP_FLASH_STA, &status);
+ if (!error && status)
+ return 0;
+
+ usleep_range(1000, 1100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int hideep_program_page(struct hideep_ts *ts, u32 addr,
+ const __be32 *ucode, size_t xfer_count)
+{
+ u32 val;
+ int error;
+
+ error = hideep_check_status(ts);
+ if (error)
+ return -EBUSY;
+
+ addr &= ~(HIDEEP_NVM_PAGE_SIZE - 1);
+
+ SET_FLASH_PIO(0);
+ SET_FLASH_PIO(1);
+
+ /* erase page */
+ SET_PIO_SIG(HIDEEP_PERASE | addr, 0xFFFFFFFF);
+
+ SET_FLASH_PIO(0);
+
+ error = hideep_check_status(ts);
+ if (error)
+ return -EBUSY;
+
+ /* write page */
+ SET_FLASH_PIO(1);
+
+ val = be32_to_cpu(ucode[0]);
+ SET_PIO_SIG(HIDEEP_WRONLY | addr, val);
+
+ hideep_pgm_w_mem(ts, HIDEEP_FLASH_PIO_SIG | HIDEEP_WRONLY,
+ ucode, xfer_count);
+
+ val = be32_to_cpu(ucode[xfer_count - 1]);
+ SET_PIO_SIG(124, val);
+
+ SET_FLASH_PIO(0);
+
+ usleep_range(1000, 1100);
+
+ error = hideep_check_status(ts);
+ if (error)
+ return -EBUSY;
+
+ SET_FLASH_HWCONTROL();
+
+ return 0;
+}
+
+static int hideep_program_nvm(struct hideep_ts *ts,
+ const __be32 *ucode, size_t ucode_len)
+{
+ struct pgm_packet *packet_r = (void *)ts->xfer_buf;
+ __be32 *current_ucode = packet_r->payload;
+ size_t xfer_len;
+ size_t xfer_count;
+ u32 addr = 0;
+ int error;
+
+ hideep_nvm_unlock(ts);
+
+ while (ucode_len > 0) {
+ xfer_len = min_t(size_t, ucode_len, HIDEEP_NVM_PAGE_SIZE);
+ xfer_count = xfer_len / sizeof(*ucode);
+
+ error = hideep_pgm_r_mem(ts, 0x00000000 + addr,
+ current_ucode, xfer_count);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "%s: failed to read page at offset %#08x: %d\n",
+ __func__, addr, error);
+ return error;
+ }
+
+ /* See if the page needs updating */
+ if (memcmp(ucode, current_ucode, xfer_len)) {
+ error = hideep_program_page(ts, addr,
+ ucode, xfer_count);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "%s: iwrite failure @%#08x: %d\n",
+ __func__, addr, error);
+ return error;
+ }
+
+ usleep_range(1000, 1100);
+ }
+
+ ucode += xfer_count;
+ addr += xfer_len;
+ ucode_len -= xfer_len;
+ }
+
+ return 0;
+}
+
+static int hideep_verify_nvm(struct hideep_ts *ts,
+ const __be32 *ucode, size_t ucode_len)
+{
+ struct pgm_packet *packet_r = (void *)ts->xfer_buf;
+ __be32 *current_ucode = packet_r->payload;
+ size_t xfer_len;
+ size_t xfer_count;
+ u32 addr = 0;
+ int i;
+ int error;
+
+ while (ucode_len > 0) {
+ xfer_len = min_t(size_t, ucode_len, HIDEEP_NVM_PAGE_SIZE);
+ xfer_count = xfer_len / sizeof(*ucode);
+
+ error = hideep_pgm_r_mem(ts, 0x00000000 + addr,
+ current_ucode, xfer_count);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "%s: failed to read page at offset %#08x: %d\n",
+ __func__, addr, error);
+ return error;
+ }
+
+ if (memcmp(ucode, current_ucode, xfer_len)) {
+ const u8 *ucode_bytes = (const u8 *)ucode;
+ const u8 *current_bytes = (const u8 *)current_ucode;
+
+ for (i = 0; i < xfer_len; i++)
+ if (ucode_bytes[i] != current_bytes[i])
+ dev_err(&ts->client->dev,
+ "%s: mismatch @%#08x: (%#02x vs %#02x)\n",
+ __func__, addr + i,
+ ucode_bytes[i],
+ current_bytes[i]);
+
+ return -EIO;
+ }
+
+ ucode += xfer_count;
+ addr += xfer_len;
+ ucode_len -= xfer_len;
+ }
+
+ return 0;
+}
+
+static int hideep_load_dwz(struct hideep_ts *ts)
+{
+ u16 product_code;
+ int error;
+
+ error = hideep_enter_pgm(ts);
+ if (error)
+ return error;
+
+ msleep(50);
+
+ error = hideep_pgm_r_mem(ts, HIDEEP_DWZ_INFO,
+ (void *)&ts->dwz_info,
+ sizeof(ts->dwz_info) / sizeof(__be32));
+
+ SW_RESET_IN_PGM(10);
+ msleep(50);
+
+ if (error) {
+ dev_err(&ts->client->dev,
+ "failed to fetch DWZ data: %d\n", error);
+ return error;
+ }
+
+ product_code = be16_to_cpu(ts->dwz_info.product_code);
+
+ switch (product_code & 0xF0) {
+ case 0x40:
+ dev_dbg(&ts->client->dev, "used crimson IC");
+ ts->fw_size = 1024 * 48;
+ ts->nvm_mask = 0x00310000;
+ break;
+ case 0x60:
+ dev_dbg(&ts->client->dev, "used lime IC");
+ ts->fw_size = 1024 * 64;
+ ts->nvm_mask = 0x0030027B;
+ break;
+ default:
+ dev_err(&ts->client->dev, "product code is wrong: %#04x",
+ product_code);
+ return -EINVAL;
+ }
+
+ dev_dbg(&ts->client->dev, "firmware release version: %#04x",
+ be16_to_cpu(ts->dwz_info.release_ver));
+
+ return 0;
+}
+
+static int hideep_flash_firmware(struct hideep_ts *ts,
+ const __be32 *ucode, size_t ucode_len)
+{
+ int retry_cnt = 3;
+ int error;
+
+ while (retry_cnt--) {
+ error = hideep_program_nvm(ts, ucode, ucode_len);
+ if (!error) {
+ error = hideep_verify_nvm(ts, ucode, ucode_len);
+ if (!error)
+ return 0;
+ }
+ }
+
+ return error;
+}
+
+static int hideep_update_firmware(struct hideep_ts *ts,
+ const __be32 *ucode, size_t ucode_len)
+{
+ int error, error2;
+
+ dev_dbg(&ts->client->dev, "starting firmware update");
+
+ /* enter program mode */
+ error = hideep_enter_pgm(ts);
+ if (error)
+ return error;
+
+ error = hideep_flash_firmware(ts, ucode, ucode_len);
+ if (error)
+ dev_err(&ts->client->dev,
+ "firmware update failed: %d\n", error);
+ else
+ dev_dbg(&ts->client->dev, "firmware updated successfully\n");
+
+ SW_RESET_IN_PGM(1000);
+
+ error2 = hideep_load_dwz(ts);
+ if (error2)
+ dev_err(&ts->client->dev,
+ "failed to load dwz after firmware update: %d\n",
+ error2);
+
+ return error ?: error2;
+}
+
+static int hideep_power_on(struct hideep_ts *ts)
+{
+ int error = 0;
+
+ error = regulator_enable(ts->vcc_vdd);
+ if (error)
+ dev_err(&ts->client->dev,
+ "failed to enable 'vdd' regulator: %d", error);
+
+ usleep_range(999, 1000);
+
+ error = regulator_enable(ts->vcc_vid);
+ if (error)
+ dev_err(&ts->client->dev,
+ "failed to enable 'vcc_vid' regulator: %d",
+ error);
+
+ msleep(30);
+
+ if (ts->reset_gpio) {
+ gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ } else {
+ error = regmap_write(ts->reg, HIDEEP_RESET_CMD, 0x01);
+ if (error)
+ dev_err(&ts->client->dev,
+ "failed to send 'reset' command: %d\n", error);
+ }
+
+ msleep(50);
+
+ return error;
+}
+
+static void hideep_power_off(void *data)
+{
+ struct hideep_ts *ts = data;
+
+ if (ts->reset_gpio)
+ gpiod_set_value(ts->reset_gpio, 1);
+
+ regulator_disable(ts->vcc_vid);
+ regulator_disable(ts->vcc_vdd);
+}
+
+#define __GET_MT_TOOL_TYPE(type) ((type) == 0x01 ? MT_TOOL_FINGER : MT_TOOL_PEN)
+
+static void hideep_report_slot(struct input_dev *input,
+ const struct hideep_event *event)
+{
+ input_mt_slot(input, event->index & 0x0f);
+ input_mt_report_slot_state(input,
+ __GET_MT_TOOL_TYPE(event->type),
+ !(event->flag & HIDEEP_MT_RELEASED));
+ if (!(event->flag & HIDEEP_MT_RELEASED)) {
+ input_report_abs(input, ABS_MT_POSITION_X,
+ le16_to_cpup(&event->x));
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ le16_to_cpup(&event->y));
+ input_report_abs(input, ABS_MT_PRESSURE,
+ le16_to_cpup(&event->z));
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, event->w);
+ }
+}
+
+static void hideep_parse_and_report(struct hideep_ts *ts)
+{
+ const struct hideep_event *events =
+ (void *)&ts->xfer_buf[HIDEEP_TOUCH_EVENT_INDEX];
+ const u8 *keys = &ts->xfer_buf[HIDEEP_KEY_EVENT_INDEX];
+ int touch_count = ts->xfer_buf[0];
+ int key_count = ts->xfer_buf[1] & 0x0f;
+ int lpm_count = ts->xfer_buf[1] & 0xf0;
+ int i;
+
+ /* get touch event count */
+ dev_dbg(&ts->client->dev, "mt = %d, key = %d, lpm = %02x",
+ touch_count, key_count, lpm_count);
+
+ touch_count = min(touch_count, HIDEEP_MT_MAX);
+ for (i = 0; i < touch_count; i++)
+ hideep_report_slot(ts->input_dev, events + i);
+
+ key_count = min(key_count, HIDEEP_KEY_MAX);
+ for (i = 0; i < key_count; i++) {
+ u8 key_data = keys[i * 2];
+
+ input_report_key(ts->input_dev,
+ ts->key_codes[key_data & HIDEEP_KEY_IDX_MASK],
+ key_data & HIDEEP_KEY_PRESSED_MASK);
+ }
+
+ input_mt_sync_frame(ts->input_dev);
+ input_sync(ts->input_dev);
+}
+
+static irqreturn_t hideep_irq(int irq, void *handle)
+{
+ struct hideep_ts *ts = handle;
+ int error;
+
+ BUILD_BUG_ON(HIDEEP_MAX_EVENT > HIDEEP_XFER_BUF_SIZE);
+
+ error = regmap_bulk_read(ts->reg, HIDEEP_EVENT_ADDR,
+ ts->xfer_buf, HIDEEP_MAX_EVENT / 2);
+ if (error) {
+ dev_err(&ts->client->dev, "failed to read events: %d\n", error);
+ goto out;
+ }
+
+ hideep_parse_and_report(ts);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int hideep_get_axis_info(struct hideep_ts *ts)
+{
+ __le16 val[2];
+ int error;
+
+ error = regmap_bulk_read(ts->reg, 0x28, val, ARRAY_SIZE(val));
+ if (error)
+ return error;
+
+ ts->prop.max_x = le16_to_cpup(val);
+ ts->prop.max_y = le16_to_cpup(val + 1);
+
+ dev_dbg(&ts->client->dev, "X: %d, Y: %d",
+ ts->prop.max_x, ts->prop.max_y);
+
+ return 0;
+}
+
+static int hideep_init_input(struct hideep_ts *ts)
+{
+ struct device *dev = &ts->client->dev;
+ int i;
+ int error;
+
+ ts->input_dev = devm_input_allocate_device(dev);
+ if (!ts->input_dev) {
+ dev_err(dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ ts->input_dev->name = HIDEEP_TS_NAME;
+ ts->input_dev->id.bustype = BUS_I2C;
+ input_set_drvdata(ts->input_dev, ts);
+
+ input_set_capability(ts->input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(ts->input_dev, EV_ABS, ABS_MT_POSITION_Y);
+ input_set_abs_params(ts->input_dev, ABS_MT_PRESSURE, 0, 65535, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
+ touchscreen_parse_properties(ts->input_dev, true, &ts->prop);
+
+ if (ts->prop.max_x == 0 || ts->prop.max_y == 0) {
+ error = hideep_get_axis_info(ts);
+ if (error)
+ return error;
+ }
+
+ error = input_mt_init_slots(ts->input_dev, HIDEEP_MT_MAX,
+ INPUT_MT_DIRECT);
+ if (error)
+ return error;
+
+ ts->key_num = device_property_read_u32_array(dev, "linux,keycodes",
+ NULL, 0);
+ if (ts->key_num > HIDEEP_KEY_MAX) {
+ dev_err(dev, "too many keys defined: %d\n",
+ ts->key_num);
+ return -EINVAL;
+ }
+
+ if (ts->key_num <= 0) {
+ dev_dbg(dev,
+ "missing or malformed 'linux,keycodes' property\n");
+ } else {
+ error = device_property_read_u32_array(dev, "linux,keycodes",
+ ts->key_codes,
+ ts->key_num);
+ if (error) {
+ dev_dbg(dev, "failed to read keymap: %d", error);
+ return error;
+ }
+
+ if (ts->key_num) {
+ ts->input_dev->keycode = ts->key_codes;
+ ts->input_dev->keycodesize = sizeof(ts->key_codes[0]);
+ ts->input_dev->keycodemax = ts->key_num;
+
+ for (i = 0; i < ts->key_num; i++)
+ input_set_capability(ts->input_dev, EV_KEY,
+ ts->key_codes[i]);
+ }
+ }
+
+ error = input_register_device(ts->input_dev);
+ if (error) {
+ dev_err(dev, "failed to register input device: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static ssize_t hideep_update_fw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+ const struct firmware *fw_entry;
+ char *fw_name;
+ int mode;
+ int error;
+
+ error = kstrtoint(buf, 0, &mode);
+ if (error)
+ return error;
+
+ fw_name = kasprintf(GFP_KERNEL, "hideep_ts_%04x.bin",
+ be16_to_cpu(ts->dwz_info.product_id));
+ if (!fw_name)
+ return -ENOMEM;
+
+ error = request_firmware(&fw_entry, fw_name, dev);
+ if (error) {
+ dev_err(dev, "failed to request firmware %s: %d",
+ fw_name, error);
+ goto out_free_fw_name;
+ }
+
+ if (fw_entry->size % sizeof(__be32)) {
+ dev_err(dev, "invalid firmware size %zu\n", fw_entry->size);
+ error = -EINVAL;
+ goto out_release_fw;
+ }
+
+ if (fw_entry->size > ts->fw_size) {
+ dev_err(dev, "fw size (%zu) is too big (memory size %d)\n",
+ fw_entry->size, ts->fw_size);
+ error = -EFBIG;
+ goto out_release_fw;
+ }
+
+ mutex_lock(&ts->dev_mutex);
+ disable_irq(client->irq);
+
+ error = hideep_update_firmware(ts, (const __be32 *)fw_entry->data,
+ fw_entry->size);
+
+ enable_irq(client->irq);
+ mutex_unlock(&ts->dev_mutex);
+
+out_release_fw:
+ release_firmware(fw_entry);
+out_free_fw_name:
+ kfree(fw_name);
+
+ return error ?: count;
+}
+
+static ssize_t hideep_fw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+ ssize_t len;
+
+ mutex_lock(&ts->dev_mutex);
+ len = scnprintf(buf, PAGE_SIZE, "%04x\n",
+ be16_to_cpu(ts->dwz_info.release_ver));
+ mutex_unlock(&ts->dev_mutex);
+
+ return len;
+}
+
+static ssize_t hideep_product_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+ ssize_t len;
+
+ mutex_lock(&ts->dev_mutex);
+ len = scnprintf(buf, PAGE_SIZE, "%04x\n",
+ be16_to_cpu(ts->dwz_info.product_id));
+ mutex_unlock(&ts->dev_mutex);
+
+ return len;
+}
+
+static DEVICE_ATTR(version, 0664, hideep_fw_version_show, NULL);
+static DEVICE_ATTR(product_id, 0664, hideep_product_id_show, NULL);
+static DEVICE_ATTR(update_fw, 0664, NULL, hideep_update_fw);
+
+static struct attribute *hideep_ts_sysfs_entries[] = {
+ &dev_attr_version.attr,
+ &dev_attr_product_id.attr,
+ &dev_attr_update_fw.attr,
+ NULL,
+};
+
+static const struct attribute_group hideep_ts_attr_group = {
+ .attrs = hideep_ts_sysfs_entries,
+};
+
+static int __maybe_unused hideep_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+
+ disable_irq(client->irq);
+ hideep_power_off(ts);
+
+ return 0;
+}
+
+static int __maybe_unused hideep_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hideep_ts *ts = i2c_get_clientdata(client);
+ int error;
+
+ error = hideep_power_on(ts);
+ if (error) {
+ dev_err(&client->dev, "power on failed");
+ return error;
+ }
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(hideep_pm_ops, hideep_suspend, hideep_resume);
+
+static const struct regmap_config hideep_regmap_config = {
+ .reg_bits = 16,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .max_register = 0xffff,
+};
+
+static int hideep_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hideep_ts *ts;
+ int error;
+
+ /* check i2c bus */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "check i2c device error");
+ return -ENODEV;
+ }
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "missing irq: %d\n", client->irq);
+ return -EINVAL;
+ }
+
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->client = client;
+ i2c_set_clientdata(client, ts);
+ mutex_init(&ts->dev_mutex);
+
+ ts->reg = devm_regmap_init_i2c(client, &hideep_regmap_config);
+ if (IS_ERR(ts->reg)) {
+ error = PTR_ERR(ts->reg);
+ dev_err(&client->dev,
+ "failed to initialize regmap: %d\n", error);
+ return error;
+ }
+
+ ts->vcc_vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(ts->vcc_vdd))
+ return PTR_ERR(ts->vcc_vdd);
+
+ ts->vcc_vid = devm_regulator_get(&client->dev, "vid");
+ if (IS_ERR(ts->vcc_vid))
+ return PTR_ERR(ts->vcc_vid);
+
+ ts->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->reset_gpio))
+ return PTR_ERR(ts->reset_gpio);
+
+ error = hideep_power_on(ts);
+ if (error) {
+ dev_err(&client->dev, "power on failed: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action_or_reset(&client->dev, hideep_power_off, ts);
+ if (error)
+ return error;
+
+ error = hideep_load_dwz(ts);
+ if (error) {
+ dev_err(&client->dev, "failed to load dwz: %d", error);
+ return error;
+ }
+
+ error = hideep_init_input(ts);
+ if (error)
+ return error;
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, hideep_irq, IRQF_ONESHOT,
+ client->name, ts);
+ if (error) {
+ dev_err(&client->dev, "failed to request irq %d: %d\n",
+ client->irq, error);
+ return error;
+ }
+
+ error = devm_device_add_group(&client->dev, &hideep_ts_attr_group);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to add sysfs attributes: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id hideep_i2c_id[] = {
+ { HIDEEP_I2C_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, hideep_i2c_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hideep_acpi_id[] = {
+ { "HIDP0001", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hideep_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id hideep_match_table[] = {
+ { .compatible = "hideep,hideep-ts" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hideep_match_table);
+#endif
+
+static struct i2c_driver hideep_driver = {
+ .driver = {
+ .name = HIDEEP_I2C_NAME,
+ .of_match_table = of_match_ptr(hideep_match_table),
+ .acpi_match_table = ACPI_PTR(hideep_acpi_id),
+ .pm = &hideep_pm_ops,
+ },
+ .id_table = hideep_i2c_id,
+ .probe = hideep_probe,
+};
+
+module_i2c_driver(hideep_driver);
+
+MODULE_DESCRIPTION("Driver for HiDeep Touchscreen Controller");
+MODULE_AUTHOR("anthony.kim@hideep.com");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 05108c2fea93..6892f0e28918 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1433,13 +1433,6 @@ static const struct attribute_group mip4_attr_group = {
.attrs = mip4_attrs,
};
-static void mip4_sysfs_remove(void *_data)
-{
- struct mip4_ts *ts = _data;
-
- sysfs_remove_group(&ts->client->dev.kobj, &mip4_attr_group);
-}
-
static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct mip4_ts *ts;
@@ -1535,21 +1528,13 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
return error;
}
- error = sysfs_create_group(&client->dev.kobj, &mip4_attr_group);
+ error = devm_device_add_group(&client->dev, &mip4_attr_group);
if (error) {
dev_err(&client->dev,
"Failed to create sysfs attribute group: %d\n", error);
return error;
}
- error = devm_add_action(&client->dev, mip4_sysfs_remove, ts);
- if (error) {
- mip4_sysfs_remove(ts);
- dev_err(&client->dev,
- "Failed to install sysfs remoce action: %d\n", error);
- return error;
- }
-
return 0;
}
diff --git a/drivers/input/touchscreen/mxs-lradc-ts.c b/drivers/input/touchscreen/mxs-lradc-ts.c
index 3707e927f770..c850b517854e 100644
--- a/drivers/input/touchscreen/mxs-lradc-ts.c
+++ b/drivers/input/touchscreen/mxs-lradc-ts.c
@@ -584,7 +584,7 @@ static void mxs_lradc_ts_hw_init(struct mxs_lradc_ts *ts)
static int mxs_lradc_ts_register(struct mxs_lradc_ts *ts)
{
- struct input_dev *input = ts->ts_input;
+ struct input_dev *input;
struct device *dev = ts->dev;
input = devm_input_allocate_device(dev);
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 4f1d3fd5d412..100538d64fff 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -943,13 +943,6 @@ static const struct attribute_group raydium_i2c_attribute_group = {
.attrs = raydium_i2c_attributes,
};
-static void raydium_i2c_remove_sysfs_group(void *_data)
-{
- struct raydium_data *ts = _data;
-
- sysfs_remove_group(&ts->client->dev.kobj, &raydium_i2c_attribute_group);
-}
-
static int raydium_i2c_power_on(struct raydium_data *ts)
{
int error;
@@ -1120,7 +1113,7 @@ static int raydium_i2c_probe(struct i2c_client *client,
return error;
}
- error = sysfs_create_group(&client->dev.kobj,
+ error = devm_device_add_group(&client->dev,
&raydium_i2c_attribute_group);
if (error) {
dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
@@ -1128,15 +1121,6 @@ static int raydium_i2c_probe(struct i2c_client *client,
return error;
}
- error = devm_add_action(&client->dev,
- raydium_i2c_remove_sysfs_group, ts);
- if (error) {
- raydium_i2c_remove_sysfs_group(ts);
- dev_err(&client->dev,
- "Failed to add sysfs cleanup action: %d\n", error);
- return error;
- }
-
return 0;
}
diff --git a/drivers/input/touchscreen/rohm_bu21023.c b/drivers/input/touchscreen/rohm_bu21023.c
index eeaf6ff03597..bda0500c9b57 100644
--- a/drivers/input/touchscreen/rohm_bu21023.c
+++ b/drivers/input/touchscreen/rohm_bu21023.c
@@ -1103,13 +1103,6 @@ static void rohm_ts_close(struct input_dev *input_dev)
ts->initialized = false;
}
-static void rohm_ts_remove_sysfs_group(void *_dev)
-{
- struct device *dev = _dev;
-
- sysfs_remove_group(&dev->kobj, &rohm_ts_attr_group);
-}
-
static int rohm_bu21023_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1180,20 +1173,12 @@ static int rohm_bu21023_i2c_probe(struct i2c_client *client,
return error;
}
- error = sysfs_create_group(&dev->kobj, &rohm_ts_attr_group);
+ error = devm_device_add_group(dev, &rohm_ts_attr_group);
if (error) {
dev_err(dev, "failed to create sysfs group: %d\n", error);
return error;
}
- error = devm_add_action(dev, rohm_ts_remove_sysfs_group, dev);
- if (error) {
- rohm_ts_remove_sysfs_group(dev);
- dev_err(dev, "Failed to add sysfs cleanup action: %d\n",
- error);
- return error;
- }
-
return error;
}
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 3b3db8c868e0..1173890f6719 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -102,7 +102,7 @@ static inline bool get_down(unsigned long data0, unsigned long data1)
!(data1 & S3C2410_ADCDAT0_UPDOWN));
}
-static void touch_timer_fire(unsigned long data)
+static void touch_timer_fire(struct timer_list *unused)
{
unsigned long data0;
unsigned long data1;
@@ -145,7 +145,7 @@ static void touch_timer_fire(unsigned long data)
}
}
-static DEFINE_TIMER(touch_timer, touch_timer_fire, 0, 0);
+static DEFINE_TIMER(touch_timer, touch_timer_fire);
/**
* stylus_irq - touchscreen stylus event interrupt
diff --git a/drivers/input/touchscreen/s6sy761.c b/drivers/input/touchscreen/s6sy761.c
new file mode 100644
index 000000000000..26b1cb8a88ec
--- /dev/null
+++ b/drivers/input/touchscreen/s6sy761.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ * Author: Andi Shyti <andi.shyti@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Samsung S6SY761 Touchscreen device driver
+ */
+
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+/* commands */
+#define S6SY761_SENSE_ON 0x10
+#define S6SY761_SENSE_OFF 0x11
+#define S6SY761_TOUCH_FUNCTION 0x30 /* R/W for get/set */
+#define S6SY761_FIRMWARE_INTEGRITY 0x21
+#define S6SY761_PANEL_INFO 0x23
+#define S6SY761_DEVICE_ID 0x52
+#define S6SY761_BOOT_STATUS 0x55
+#define S6SY761_READ_ONE_EVENT 0x60
+#define S6SY761_READ_ALL_EVENT 0x61
+#define S6SY761_CLEAR_EVENT_STACK 0x62
+#define S6SY761_APPLICATION_MODE 0xe4
+
+/* events */
+#define S6SY761_EVENT_INFO 0x02
+#define S6SY761_EVENT_VENDOR_INFO 0x07
+
+/* info */
+#define S6SY761_INFO_BOOT_COMPLETE 0x00
+
+/* firmware status */
+#define S6SY761_FW_OK 0x80
+
+/*
+ * the functionalities are put as a reference
+ * as in the device I am using none of them
+ * works therefore not used in this driver yet.
+ */
+/* touchscreen functionalities */
+#define S6SY761_MASK_TOUCH BIT(0)
+#define S6SY761_MASK_HOVER BIT(1)
+#define S6SY761_MASK_COVER BIT(2)
+#define S6SY761_MASK_GLOVE BIT(3)
+#define S6SY761_MASK_STYLUS BIT(4)
+#define S6SY761_MASK_PALM BIT(5)
+#define S6SY761_MASK_WET BIT(6)
+#define S6SY761_MASK_PROXIMITY BIT(7)
+
+/* boot status (BS) */
+#define S6SY761_BS_BOOT_LOADER 0x10
+#define S6SY761_BS_APPLICATION 0x20
+
+/* event id */
+#define S6SY761_EVENT_ID_COORDINATE 0x00
+#define S6SY761_EVENT_ID_STATUS 0x01
+
+/* event register masks */
+#define S6SY761_MASK_TOUCH_STATE 0xc0 /* byte 0 */
+#define S6SY761_MASK_TID 0x3c
+#define S6SY761_MASK_EID 0x03
+#define S6SY761_MASK_X 0xf0 /* byte 3 */
+#define S6SY761_MASK_Y 0x0f
+#define S6SY761_MASK_Z 0x3f /* byte 6 */
+#define S6SY761_MASK_LEFT_EVENTS 0x3f /* byte 7 */
+#define S6SY761_MASK_TOUCH_TYPE 0xc0 /* MSB in byte 6, LSB in byte 7 */
+
+/* event touch state values */
+#define S6SY761_TS_NONE 0x00
+#define S6SY761_TS_PRESS 0x01
+#define S6SY761_TS_MOVE 0x02
+#define S6SY761_TS_RELEASE 0x03
+
+/* application modes */
+#define S6SY761_APP_NORMAL 0x0
+#define S6SY761_APP_LOW_POWER 0x1
+#define S6SY761_APP_TEST 0x2
+#define S6SY761_APP_FLASH 0x3
+#define S6SY761_APP_SLEEP 0x4
+
+#define S6SY761_EVENT_SIZE 8
+#define S6SY761_EVENT_COUNT 32
+#define S6SY761_DEVID_SIZE 3
+#define S6SY761_PANEL_ID_SIZE 11
+#define S6SY761_TS_STATUS_SIZE 5
+#define S6SY761_MAX_FINGERS 10
+
+#define S6SY761_DEV_NAME "s6sy761"
+
+enum s6sy761_regulators {
+ S6SY761_REGULATOR_VDD,
+ S6SY761_REGULATOR_AVDD,
+};
+
+struct s6sy761_data {
+ struct i2c_client *client;
+ struct regulator_bulk_data regulators[2];
+ struct input_dev *input;
+ struct touchscreen_properties prop;
+
+ u8 data[S6SY761_EVENT_SIZE * S6SY761_EVENT_COUNT];
+
+ u16 devid;
+ u8 tx_channel;
+};
+
+/*
+ * We can't simply use i2c_smbus_read_i2c_block_data because we
+ * need to read more than 255 bytes
+ */
+static int s6sy761_read_events(struct s6sy761_data *sdata, u16 n_events)
+{
+ u8 cmd = S6SY761_READ_ALL_EVENT;
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = sdata->client->addr,
+ .len = 1,
+ .buf = &cmd,
+ },
+ {
+ .addr = sdata->client->addr,
+ .flags = I2C_M_RD,
+ .len = (n_events * S6SY761_EVENT_SIZE),
+ .buf = sdata->data + S6SY761_EVENT_SIZE,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(sdata->client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ return ret == ARRAY_SIZE(msgs) ? 0 : -EIO;
+}
+
+static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
+ u8 *event, u8 tid)
+{
+ u8 major = event[4];
+ u8 minor = event[5];
+ u8 z = event[6] & S6SY761_MASK_Z;
+ u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
+ u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
+
+ input_mt_slot(sdata->input, tid);
+
+ input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, true);
+ input_report_abs(sdata->input, ABS_MT_POSITION_X, x);
+ input_report_abs(sdata->input, ABS_MT_POSITION_Y, y);
+ input_report_abs(sdata->input, ABS_MT_TOUCH_MAJOR, major);
+ input_report_abs(sdata->input, ABS_MT_TOUCH_MINOR, minor);
+ input_report_abs(sdata->input, ABS_MT_PRESSURE, z);
+
+ input_sync(sdata->input);
+}
+
+static void s6sy761_report_release(struct s6sy761_data *sdata,
+ u8 *event, u8 tid)
+{
+ input_mt_slot(sdata->input, tid);
+ input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, false);
+
+ input_sync(sdata->input);
+}
+
+static void s6sy761_handle_coordinates(struct s6sy761_data *sdata, u8 *event)
+{
+ u8 tid;
+ u8 touch_state;
+
+ if (unlikely(!(event[0] & S6SY761_MASK_TID)))
+ return;
+
+ tid = ((event[0] & S6SY761_MASK_TID) >> 2) - 1;
+ touch_state = (event[0] & S6SY761_MASK_TOUCH_STATE) >> 6;
+
+ switch (touch_state) {
+
+ case S6SY761_TS_NONE:
+ break;
+ case S6SY761_TS_RELEASE:
+ s6sy761_report_release(sdata, event, tid);
+ break;
+ case S6SY761_TS_PRESS:
+ case S6SY761_TS_MOVE:
+ s6sy761_report_coordinates(sdata, event, tid);
+ break;
+ }
+}
+
+static void s6sy761_handle_events(struct s6sy761_data *sdata, u8 n_events)
+{
+ int i;
+
+ for (i = 0; i < n_events; i++) {
+ u8 *event = &sdata->data[i * S6SY761_EVENT_SIZE];
+ u8 event_id = event[0] & S6SY761_MASK_EID;
+
+ if (!event[0])
+ return;
+
+ switch (event_id) {
+
+ case S6SY761_EVENT_ID_COORDINATE:
+ s6sy761_handle_coordinates(sdata, event);
+ break;
+
+ case S6SY761_EVENT_ID_STATUS:
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+static irqreturn_t s6sy761_irq_handler(int irq, void *dev)
+{
+ struct s6sy761_data *sdata = dev;
+ int ret;
+ u8 n_events;
+
+ ret = i2c_smbus_read_i2c_block_data(sdata->client,
+ S6SY761_READ_ONE_EVENT,
+ S6SY761_EVENT_SIZE,
+ sdata->data);
+ if (ret < 0) {
+ dev_err(&sdata->client->dev, "failed to read events\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!sdata->data[0])
+ return IRQ_HANDLED;
+
+ n_events = sdata->data[7] & S6SY761_MASK_LEFT_EVENTS;
+ if (unlikely(n_events > S6SY761_EVENT_COUNT - 1))
+ return IRQ_HANDLED;
+
+ if (n_events) {
+ ret = s6sy761_read_events(sdata, n_events);
+ if (ret < 0) {
+ dev_err(&sdata->client->dev, "failed to read events\n");
+ return IRQ_HANDLED;
+ }
+ }
+
+ s6sy761_handle_events(sdata, n_events + 1);
+
+ return IRQ_HANDLED;
+}
+
+static int s6sy761_input_open(struct input_dev *dev)
+{
+ struct s6sy761_data *sdata = input_get_drvdata(dev);
+
+ return i2c_smbus_write_byte(sdata->client, S6SY761_SENSE_ON);
+}
+
+static void s6sy761_input_close(struct input_dev *dev)
+{
+ struct s6sy761_data *sdata = input_get_drvdata(dev);
+ int ret;
+
+ ret = i2c_smbus_write_byte(sdata->client, S6SY761_SENSE_OFF);
+ if (ret)
+ dev_err(&sdata->client->dev, "failed to turn off sensing\n");
+}
+
+static ssize_t s6sy761_sysfs_devid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%#x\n", sdata->devid);
+}
+
+static DEVICE_ATTR(devid, 0444, s6sy761_sysfs_devid, NULL);
+
+static struct attribute *s6sy761_sysfs_attrs[] = {
+ &dev_attr_devid.attr,
+ NULL
+};
+
+static struct attribute_group s6sy761_attribute_group = {
+ .attrs = s6sy761_sysfs_attrs
+};
+
+static int s6sy761_power_on(struct s6sy761_data *sdata)
+{
+ u8 buffer[S6SY761_EVENT_SIZE];
+ u8 event;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(sdata->regulators),
+ sdata->regulators);
+ if (ret)
+ return ret;
+
+ msleep(140);
+
+ /* double check whether the touch is functional */
+ ret = i2c_smbus_read_i2c_block_data(sdata->client,
+ S6SY761_READ_ONE_EVENT,
+ S6SY761_EVENT_SIZE,
+ buffer);
+ if (ret < 0)
+ return ret;
+
+ event = (buffer[0] >> 2) & 0xf;
+
+ if ((event != S6SY761_EVENT_INFO &&
+ event != S6SY761_EVENT_VENDOR_INFO) ||
+ buffer[1] != S6SY761_INFO_BOOT_COMPLETE) {
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_byte_data(sdata->client, S6SY761_BOOT_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* for some reasons the device might be stuck in the bootloader */
+ if (ret != S6SY761_BS_APPLICATION)
+ return -ENODEV;
+
+ /* enable touch functionality */
+ ret = i2c_smbus_write_word_data(sdata->client,
+ S6SY761_TOUCH_FUNCTION,
+ S6SY761_MASK_TOUCH);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int s6sy761_hw_init(struct s6sy761_data *sdata,
+ unsigned int *max_x, unsigned int *max_y)
+{
+ u8 buffer[S6SY761_PANEL_ID_SIZE]; /* larger read size */
+ int ret;
+
+ ret = s6sy761_power_on(sdata);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_read_i2c_block_data(sdata->client,
+ S6SY761_DEVICE_ID,
+ S6SY761_DEVID_SIZE,
+ buffer);
+ if (ret < 0)
+ return ret;
+
+ sdata->devid = get_unaligned_be16(buffer + 1);
+
+ ret = i2c_smbus_read_i2c_block_data(sdata->client,
+ S6SY761_PANEL_INFO,
+ S6SY761_PANEL_ID_SIZE,
+ buffer);
+ if (ret < 0)
+ return ret;
+
+ *max_x = get_unaligned_be16(buffer);
+ *max_y = get_unaligned_be16(buffer + 2);
+
+ /* if no tx channels defined, at least keep one */
+ sdata->tx_channel = max_t(u8, buffer[8], 1);
+
+ ret = i2c_smbus_read_byte_data(sdata->client,
+ S6SY761_FIRMWARE_INTEGRITY);
+ if (ret < 0)
+ return ret;
+ else if (ret != S6SY761_FW_OK)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void s6sy761_power_off(void *data)
+{
+ struct s6sy761_data *sdata = data;
+
+ disable_irq(sdata->client->irq);
+ regulator_bulk_disable(ARRAY_SIZE(sdata->regulators),
+ sdata->regulators);
+}
+
+static int s6sy761_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct s6sy761_data *sdata;
+ unsigned int max_x, max_y;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENODEV;
+
+ sdata = devm_kzalloc(&client->dev, sizeof(*sdata), GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, sdata);
+ sdata->client = client;
+
+ sdata->regulators[S6SY761_REGULATOR_VDD].supply = "vdd";
+ sdata->regulators[S6SY761_REGULATOR_AVDD].supply = "avdd";
+ err = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(sdata->regulators),
+ sdata->regulators);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(&client->dev, s6sy761_power_off, sdata);
+ if (err)
+ return err;
+
+ err = s6sy761_hw_init(sdata, &max_x, &max_y);
+ if (err)
+ return err;
+
+ sdata->input = devm_input_allocate_device(&client->dev);
+ if (!sdata->input)
+ return -ENOMEM;
+
+ sdata->input->name = S6SY761_DEV_NAME;
+ sdata->input->id.bustype = BUS_I2C;
+ sdata->input->open = s6sy761_input_open;
+ sdata->input->close = s6sy761_input_close;
+
+ input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+ touchscreen_parse_properties(sdata->input, true, &sdata->prop);
+
+ if (!input_abs_get_max(sdata->input, ABS_X) ||
+ !input_abs_get_max(sdata->input, ABS_Y)) {
+ dev_warn(&client->dev, "the axis have not been set\n");
+ }
+
+ err = input_mt_init_slots(sdata->input, sdata->tx_channel,
+ INPUT_MT_DIRECT);
+ if (err)
+ return err;
+
+ input_set_drvdata(sdata->input, sdata);
+
+ err = input_register_device(sdata->input);
+ if (err)
+ return err;
+
+ err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ s6sy761_irq_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "s6sy761_irq", sdata);
+ if (err)
+ return err;
+
+ err = devm_device_add_group(&client->dev, &s6sy761_attribute_group);
+ if (err)
+ return err;
+
+ pm_runtime_enable(&client->dev);
+
+ return 0;
+}
+
+static int s6sy761_remove(struct i2c_client *client)
+{
+ pm_runtime_disable(&client->dev);
+
+ return 0;
+}
+
+static int __maybe_unused s6sy761_runtime_suspend(struct device *dev)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ return i2c_smbus_write_byte_data(sdata->client,
+ S6SY761_APPLICATION_MODE, S6SY761_APP_SLEEP);
+}
+
+static int __maybe_unused s6sy761_runtime_resume(struct device *dev)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ return i2c_smbus_write_byte_data(sdata->client,
+ S6SY761_APPLICATION_MODE, S6SY761_APP_NORMAL);
+}
+
+static int __maybe_unused s6sy761_suspend(struct device *dev)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ s6sy761_power_off(sdata);
+
+ return 0;
+}
+
+static int __maybe_unused s6sy761_resume(struct device *dev)
+{
+ struct s6sy761_data *sdata = dev_get_drvdata(dev);
+
+ enable_irq(sdata->client->irq);
+
+ return s6sy761_power_on(sdata);
+}
+
+static const struct dev_pm_ops s6sy761_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(s6sy761_suspend, s6sy761_resume)
+ SET_RUNTIME_PM_OPS(s6sy761_runtime_suspend,
+ s6sy761_runtime_resume, NULL)
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id s6sy761_of_match[] = {
+ { .compatible = "samsung,s6sy761", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, s6sy761_of_match);
+#endif
+
+static const struct i2c_device_id s6sy761_id[] = {
+ { "s6sy761", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, s6sy761_id);
+
+static struct i2c_driver s6sy761_driver = {
+ .driver = {
+ .name = S6SY761_DEV_NAME,
+ .of_match_table = of_match_ptr(s6sy761_of_match),
+ .pm = &s6sy761_pm_ops,
+ },
+ .probe = s6sy761_probe,
+ .remove = s6sy761_remove,
+ .id_table = s6sy761_id,
+};
+
+module_i2c_driver(s6sy761_driver);
+
+MODULE_AUTHOR("Andi Shyti <andi.shyti@samsung.com>");
+MODULE_DESCRIPTION("Samsung S6SY761 Touch Screen");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index be5615c6bf8f..d5dfa4053bbf 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -29,7 +29,6 @@
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/platform_data/st1232_pdata.h>
#define ST1232_TS_NAME "st1232-ts"
@@ -152,10 +151,9 @@ static void st1232_ts_power(struct st1232_ts_data *ts, bool poweron)
}
static int st1232_ts_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct st1232_ts_data *ts;
- struct st1232_pdata *pdata = dev_get_platdata(&client->dev);
struct input_dev *input_dev;
int error;
@@ -180,13 +178,7 @@ static int st1232_ts_probe(struct i2c_client *client,
ts->client = client;
ts->input_dev = input_dev;
- if (pdata)
- ts->reset_gpio = pdata->reset_gpio;
- else if (client->dev.of_node)
- ts->reset_gpio = of_get_gpio(client->dev.of_node, 0);
- else
- ts->reset_gpio = -ENODEV;
-
+ ts->reset_gpio = of_get_gpio(client->dev.of_node, 0);
if (gpio_is_valid(ts->reset_gpio)) {
error = devm_gpio_request(&client->dev, ts->reset_gpio, NULL);
if (error) {
@@ -281,13 +273,11 @@ static const struct i2c_device_id st1232_ts_id[] = {
};
MODULE_DEVICE_TABLE(i2c, st1232_ts_id);
-#ifdef CONFIG_OF
static const struct of_device_id st1232_ts_dt_ids[] = {
{ .compatible = "sitronix,st1232", },
{ }
};
MODULE_DEVICE_TABLE(of, st1232_ts_dt_ids);
-#endif
static struct i2c_driver st1232_ts_driver = {
.probe = st1232_ts_probe,
@@ -295,7 +285,7 @@ static struct i2c_driver st1232_ts_driver = {
.id_table = st1232_ts_id,
.driver = {
.name = ST1232_TS_NAME,
- .of_match_table = of_match_ptr(st1232_ts_dt_ids),
+ .of_match_table = st1232_ts_dt_ids,
.pm = &st1232_ts_pm_ops,
},
};
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 8c6c6178ec12..c12d01899939 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -725,8 +725,7 @@ static int stmfts_probe(struct i2c_client *client,
}
}
- err = sysfs_create_group(&sdata->client->dev.kobj,
- &stmfts_attribute_group);
+ err = devm_device_add_group(&client->dev, &stmfts_attribute_group);
if (err)
return err;
@@ -738,7 +737,6 @@ static int stmfts_probe(struct i2c_client *client,
static int stmfts_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
- sysfs_remove_group(&client->dev.kobj, &stmfts_attribute_group);
return 0;
}
diff --git a/drivers/input/touchscreen/tsc2007_iio.c b/drivers/input/touchscreen/tsc2007_iio.c
index 27b25a9fce83..e27a956f5f2b 100644
--- a/drivers/input/touchscreen/tsc2007_iio.c
+++ b/drivers/input/touchscreen/tsc2007_iio.c
@@ -104,7 +104,6 @@ static int tsc2007_read_raw(struct iio_dev *indio_dev,
static const struct iio_info tsc2007_iio_info = {
.read_raw = tsc2007_read_raw,
- .driver_module = THIS_MODULE,
};
int tsc2007_iio_configure(struct tsc2007 *ts)
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 88ea5e1b72ae..e0fde590df8e 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -202,9 +202,9 @@ out:
return IRQ_HANDLED;
}
-static void tsc200x_penup_timer(unsigned long data)
+static void tsc200x_penup_timer(struct timer_list *t)
{
- struct tsc200x *ts = (struct tsc200x *)data;
+ struct tsc200x *ts = from_timer(ts, t, penup_timer);
unsigned long flags;
spin_lock_irqsave(&ts->lock, flags);
@@ -506,7 +506,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
mutex_init(&ts->mutex);
spin_lock_init(&ts->lock);
- setup_timer(&ts->penup_timer, tsc200x_penup_timer, (unsigned long)ts);
+ timer_setup(&ts->penup_timer, tsc200x_penup_timer, 0);
INIT_DELAYED_WORK(&ts->esd_work, tsc200x_esd_work);
@@ -531,6 +531,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
input_set_drvdata(input_dev, ts);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X,
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index 49a63a3c6840..a43c08ccfd3d 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TSC200X_CORE_H
#define _TSC200X_CORE_H
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index da6004e97753..638c1d78ca3a 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -146,9 +146,9 @@ static irqreturn_t w90p910_ts_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void w90p910_check_pen_up(unsigned long data)
+static void w90p910_check_pen_up(struct timer_list *t)
{
- struct w90p910_ts *w90p910_ts = (struct w90p910_ts *) data;
+ struct w90p910_ts *w90p910_ts = from_timer(w90p910_ts, t, timer);
unsigned long flags;
spin_lock_irqsave(&w90p910_ts->lock, flags);
@@ -232,8 +232,7 @@ static int w90x900ts_probe(struct platform_device *pdev)
w90p910_ts->input = input_dev;
w90p910_ts->state = TS_IDLE;
spin_lock_init(&w90p910_ts->lock);
- setup_timer(&w90p910_ts->timer, w90p910_check_pen_up,
- (unsigned long)w90p910_ts);
+ timer_setup(&w90p910_ts->timer, w90p910_check_pen_up, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index a9132603ab34..d351efd18f89 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -1106,7 +1106,7 @@ static int wdt87xx_ts_probe(struct i2c_client *client,
return error;
}
- error = sysfs_create_group(&client->dev.kobj, &wdt87xx_attr_group);
+ error = devm_device_add_group(&client->dev, &wdt87xx_attr_group);
if (error) {
dev_err(&client->dev, "create sysfs failed: %d\n", error);
return error;
@@ -1115,13 +1115,6 @@ static int wdt87xx_ts_probe(struct i2c_client *client,
return 0;
}
-static int wdt87xx_ts_remove(struct i2c_client *client)
-{
- sysfs_remove_group(&client->dev.kobj, &wdt87xx_attr_group);
-
- return 0;
-}
-
static int __maybe_unused wdt87xx_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -1179,7 +1172,6 @@ MODULE_DEVICE_TABLE(acpi, wdt87xx_acpi_id);
static struct i2c_driver wdt87xx_driver = {
.probe = wdt87xx_ts_probe,
- .remove = wdt87xx_ts_remove,
.id_table = wdt87xx_dev_id,
.driver = {
.name = WDT87XX_NAME,
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index c9d1c91e1887..fd714ee881f7 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -44,6 +44,7 @@
#include <linux/pm.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
+#include <linux/mfd/wm97xx.h>
#include <linux/workqueue.h>
#include <linux/wm97xx.h>
#include <linux/uaccess.h>
@@ -581,27 +582,85 @@ static void wm97xx_ts_input_close(struct input_dev *idev)
wm->codec->acc_enable(wm, 0);
}
-static int wm97xx_probe(struct device *dev)
+static int wm97xx_register_touch(struct wm97xx *wm)
{
- struct wm97xx *wm;
- struct wm97xx_pdata *pdata = dev_get_platdata(dev);
- int ret = 0, id = 0;
+ struct wm97xx_pdata *pdata = dev_get_platdata(wm->dev);
+ int ret;
- wm = kzalloc(sizeof(struct wm97xx), GFP_KERNEL);
- if (!wm)
+ wm->input_dev = devm_input_allocate_device(wm->dev);
+ if (wm->input_dev == NULL)
return -ENOMEM;
- mutex_init(&wm->codec_mutex);
- wm->dev = dev;
- dev_set_drvdata(dev, wm);
- wm->ac97 = to_ac97_t(dev);
+ /* set up touch configuration */
+ wm->input_dev->name = "wm97xx touchscreen";
+ wm->input_dev->phys = "wm97xx";
+ wm->input_dev->open = wm97xx_ts_input_open;
+ wm->input_dev->close = wm97xx_ts_input_close;
+
+ __set_bit(EV_ABS, wm->input_dev->evbit);
+ __set_bit(EV_KEY, wm->input_dev->evbit);
+ __set_bit(BTN_TOUCH, wm->input_dev->keybit);
+
+ input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1],
+ abs_x[2], 0);
+ input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1],
+ abs_y[2], 0);
+ input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1],
+ abs_p[2], 0);
+
+ input_set_drvdata(wm->input_dev, wm);
+ wm->input_dev->dev.parent = wm->dev;
+
+ ret = input_register_device(wm->input_dev);
+ if (ret)
+ return ret;
+
+ /*
+ * register our extended touch device (for machine specific
+ * extensions)
+ */
+ wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
+ if (!wm->touch_dev) {
+ ret = -ENOMEM;
+ goto touch_err;
+ }
+ platform_set_drvdata(wm->touch_dev, wm);
+ wm->touch_dev->dev.parent = wm->dev;
+ wm->touch_dev->dev.platform_data = pdata;
+ ret = platform_device_add(wm->touch_dev);
+ if (ret < 0)
+ goto touch_reg_err;
+
+ return 0;
+touch_reg_err:
+ platform_device_put(wm->touch_dev);
+touch_err:
+ input_unregister_device(wm->input_dev);
+ wm->input_dev = NULL;
+
+ return ret;
+}
+
+static void wm97xx_unregister_touch(struct wm97xx *wm)
+{
+ platform_device_unregister(wm->touch_dev);
+ input_unregister_device(wm->input_dev);
+ wm->input_dev = NULL;
+}
+
+static int _wm97xx_probe(struct wm97xx *wm)
+{
+ int id = 0;
+
+ mutex_init(&wm->codec_mutex);
+ dev_set_drvdata(wm->dev, wm);
/* check that we have a supported codec */
id = wm97xx_reg_read(wm, AC97_VENDOR_ID1);
if (id != WM97XX_ID1) {
- dev_err(dev, "Device with vendor %04x is not a wm97xx\n", id);
- ret = -ENODEV;
- goto alloc_err;
+ dev_err(wm->dev,
+ "Device with vendor %04x is not a wm97xx\n", id);
+ return -ENODEV;
}
wm->id = wm97xx_reg_read(wm, AC97_VENDOR_ID2);
@@ -629,8 +688,7 @@ static int wm97xx_probe(struct device *dev)
default:
dev_err(wm->dev, "Support for wm97%02x not compiled in.\n",
wm->id & 0xff);
- ret = -ENODEV;
- goto alloc_err;
+ return -ENODEV;
}
/* set up physical characteristics */
@@ -644,79 +702,58 @@ static int wm97xx_probe(struct device *dev)
wm->gpio[4] = wm97xx_reg_read(wm, AC97_GPIO_STATUS);
wm->gpio[5] = wm97xx_reg_read(wm, AC97_MISC_AFE);
- wm->input_dev = input_allocate_device();
- if (wm->input_dev == NULL) {
- ret = -ENOMEM;
- goto alloc_err;
- }
-
- /* set up touch configuration */
- wm->input_dev->name = "wm97xx touchscreen";
- wm->input_dev->phys = "wm97xx";
- wm->input_dev->open = wm97xx_ts_input_open;
- wm->input_dev->close = wm97xx_ts_input_close;
-
- __set_bit(EV_ABS, wm->input_dev->evbit);
- __set_bit(EV_KEY, wm->input_dev->evbit);
- __set_bit(BTN_TOUCH, wm->input_dev->keybit);
-
- input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1],
- abs_x[2], 0);
- input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1],
- abs_y[2], 0);
- input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1],
- abs_p[2], 0);
+ return wm97xx_register_touch(wm);
+}
- input_set_drvdata(wm->input_dev, wm);
- wm->input_dev->dev.parent = dev;
+static void wm97xx_remove_battery(struct wm97xx *wm)
+{
+ platform_device_unregister(wm->battery_dev);
+}
- ret = input_register_device(wm->input_dev);
- if (ret < 0)
- goto dev_alloc_err;
+static int wm97xx_add_battery(struct wm97xx *wm,
+ struct wm97xx_batt_pdata *pdata)
+{
+ int ret;
- /* register our battery device */
wm->battery_dev = platform_device_alloc("wm97xx-battery", -1);
- if (!wm->battery_dev) {
- ret = -ENOMEM;
- goto batt_err;
- }
+ if (!wm->battery_dev)
+ return -ENOMEM;
+
platform_set_drvdata(wm->battery_dev, wm);
- wm->battery_dev->dev.parent = dev;
- wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
+ wm->battery_dev->dev.parent = wm->dev;
+ wm->battery_dev->dev.platform_data = pdata;
ret = platform_device_add(wm->battery_dev);
- if (ret < 0)
- goto batt_reg_err;
+ if (ret)
+ platform_device_put(wm->battery_dev);
- /* register our extended touch device (for machine specific
- * extensions) */
- wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
- if (!wm->touch_dev) {
- ret = -ENOMEM;
- goto touch_err;
- }
- platform_set_drvdata(wm->touch_dev, wm);
- wm->touch_dev->dev.parent = dev;
- wm->touch_dev->dev.platform_data = pdata;
- ret = platform_device_add(wm->touch_dev);
+ return ret;
+}
+
+static int wm97xx_probe(struct device *dev)
+{
+ struct wm97xx *wm;
+ int ret;
+ struct wm97xx_pdata *pdata = dev_get_platdata(dev);
+
+ wm = devm_kzalloc(dev, sizeof(struct wm97xx), GFP_KERNEL);
+ if (!wm)
+ return -ENOMEM;
+
+ wm->dev = dev;
+ wm->ac97 = to_ac97_t(dev);
+
+ ret = _wm97xx_probe(wm);
+ if (ret)
+ return ret;
+
+ ret = wm97xx_add_battery(wm, pdata ? pdata->batt_pdata : NULL);
if (ret < 0)
- goto touch_reg_err;
+ goto batt_err;
return ret;
- touch_reg_err:
- platform_device_put(wm->touch_dev);
- touch_err:
- platform_device_del(wm->battery_dev);
- batt_reg_err:
- platform_device_put(wm->battery_dev);
- batt_err:
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
- dev_alloc_err:
- input_free_device(wm->input_dev);
- alloc_err:
- kfree(wm);
-
+batt_err:
+ wm97xx_unregister_touch(wm);
return ret;
}
@@ -724,14 +761,45 @@ static int wm97xx_remove(struct device *dev)
{
struct wm97xx *wm = dev_get_drvdata(dev);
- platform_device_unregister(wm->battery_dev);
- platform_device_unregister(wm->touch_dev);
- input_unregister_device(wm->input_dev);
- kfree(wm);
+ wm97xx_remove_battery(wm);
+ wm97xx_unregister_touch(wm);
return 0;
}
+static int wm97xx_mfd_probe(struct platform_device *pdev)
+{
+ struct wm97xx *wm;
+ struct wm97xx_platform_data *mfd_pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ wm = devm_kzalloc(&pdev->dev, sizeof(struct wm97xx), GFP_KERNEL);
+ if (!wm)
+ return -ENOMEM;
+
+ wm->dev = &pdev->dev;
+ wm->ac97 = mfd_pdata->ac97;
+
+ ret = _wm97xx_probe(wm);
+ if (ret)
+ return ret;
+
+ ret = wm97xx_add_battery(wm, mfd_pdata->batt_pdata);
+ if (ret < 0)
+ goto batt_err;
+
+ return ret;
+
+batt_err:
+ wm97xx_unregister_touch(wm);
+ return ret;
+}
+
+static int wm97xx_mfd_remove(struct platform_device *pdev)
+{
+ return wm97xx_remove(&pdev->dev);
+}
+
static int __maybe_unused wm97xx_suspend(struct device *dev)
{
struct wm97xx *wm = dev_get_drvdata(dev);
@@ -828,21 +896,41 @@ EXPORT_SYMBOL_GPL(wm97xx_unregister_mach_ops);
static struct device_driver wm97xx_driver = {
.name = "wm97xx-ts",
+#ifdef CONFIG_AC97_BUS
.bus = &ac97_bus_type,
+#endif
.owner = THIS_MODULE,
.probe = wm97xx_probe,
.remove = wm97xx_remove,
.pm = &wm97xx_pm_ops,
};
+static struct platform_driver wm97xx_mfd_driver = {
+ .driver = {
+ .name = "wm97xx-ts",
+ .pm = &wm97xx_pm_ops,
+ },
+ .probe = wm97xx_mfd_probe,
+ .remove = wm97xx_mfd_remove,
+};
+
static int __init wm97xx_init(void)
{
- return driver_register(&wm97xx_driver);
+ int ret;
+
+ ret = platform_driver_register(&wm97xx_mfd_driver);
+ if (ret)
+ return ret;
+
+ if (IS_BUILTIN(CONFIG_AC97_BUS))
+ ret = driver_register(&wm97xx_driver);
+ return ret;
}
static void __exit wm97xx_exit(void)
{
driver_unregister(&wm97xx_driver);
+ platform_driver_unregister(&wm97xx_mfd_driver);
}
module_init(wm97xx_init);
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index b910aea813a1..1fb695854809 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 8e8874d23717..7d5eb004091d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -63,7 +63,6 @@
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
/* Reserved IOVA ranges */
#define MSI_RANGE_START (0xfee00000)
@@ -1547,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev,
if (dma_mask > DMA_BIT_MASK(32))
pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(DMA_BIT_MASK(32)));
+ IOVA_PFN(DMA_BIT_MASK(32)), false);
if (!pfn)
- pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
+ pfn = alloc_iova_fast(&dma_dom->iovad, pages,
+ IOVA_PFN(dma_mask), true);
return (pfn << PAGE_SHIFT);
}
@@ -1788,8 +1788,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
if (!dma_dom->domain.pt_root)
goto free_dma_dom;
- init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
- IOVA_START_PFN, DMA_32BIT_PFN);
+ init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
goto free_dma_dom;
@@ -2383,11 +2382,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
size_t size,
int dir)
{
- dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
- flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
@@ -2696,8 +2693,7 @@ static int init_reserved_iova_ranges(void)
struct pci_dev *pdev = NULL;
struct iova *val;
- init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
- IOVA_START_PFN, DMA_32BIT_PFN);
+ init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
&reserved_rbtree_key);
@@ -3155,7 +3151,7 @@ static void amd_iommu_apply_resv_region(struct device *dev,
unsigned long start, end;
start = IOVA_PFN(region->start);
- end = IOVA_PFN(region->start + region->length);
+ end = IOVA_PFN(region->start + region->length - 1);
WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
}
@@ -3663,11 +3659,11 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count)
+static int alloc_irq_index(u16 devid, int count, bool align)
{
struct irq_remap_table *table;
+ int index, c, alignment = 1;
unsigned long flags;
- int index, c;
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
@@ -3677,16 +3673,21 @@ static int alloc_irq_index(u16 devid, int count)
if (!table)
return -ENODEV;
+ if (align)
+ alignment = roundup_pow_of_two(count);
+
spin_lock_irqsave(&table->lock, flags);
/* Scan table for free entries */
- for (c = 0, index = table->min_index;
- index < MAX_IRQS_PER_TABLE;
- ++index) {
- if (!iommu->irte_ops->is_allocated(table, index))
+ for (index = ALIGN(table->min_index, alignment), c = 0;
+ index < MAX_IRQS_PER_TABLE;) {
+ if (!iommu->irte_ops->is_allocated(table, index)) {
c += 1;
- else
- c = 0;
+ } else {
+ c = 0;
+ index = ALIGN(index + 1, alignment);
+ continue;
+ }
if (c == count) {
for (; c != 0; --c)
@@ -3695,6 +3696,8 @@ static int alloc_irq_index(u16 devid, int count)
index -= count - 1;
goto out;
}
+
+ index++;
}
index = -ENOSPC;
@@ -4099,7 +4102,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
else
ret = -ENOMEM;
} else {
- index = alloc_irq_index(devid, nr_irqs);
+ bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
+
+ index = alloc_irq_index(devid, nr_irqs, align);
}
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
@@ -4173,16 +4178,26 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
-static void irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data)
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+ struct amd_ir_data *ir_data,
+ struct irq_2_irte *irte_info,
+ struct irq_cfg *cfg);
+
+static int irq_remapping_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool early)
{
struct amd_ir_data *data = irq_data->chip_data;
struct irq_2_irte *irte_info = &data->irq_2_irte;
struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
+ struct irq_cfg *cfg = irqd_cfg(irq_data);
- if (iommu)
- iommu->irte_ops->activate(data->entry, irte_info->devid,
- irte_info->index);
+ if (!iommu)
+ return 0;
+
+ iommu->irte_ops->activate(data->entry, irte_info->devid,
+ irte_info->index);
+ amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
+ return 0;
}
static void irq_remapping_deactivate(struct irq_domain *domain,
@@ -4269,6 +4284,22 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
}
+
+static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
+ struct amd_ir_data *ir_data,
+ struct irq_2_irte *irte_info,
+ struct irq_cfg *cfg)
+{
+
+ /*
+ * Atomically updates the IRTE with the new destination, vector
+ * and flushes the interrupt entry cache.
+ */
+ iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
+ irte_info->index, cfg->vector,
+ cfg->dest_apicid);
+}
+
static int amd_ir_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
@@ -4286,13 +4317,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
- /*
- * Atomically updates the IRTE with the new destination, vector
- * and flushes the interrupt entry cache.
- */
- iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
- irte_info->index, cfg->vector, cfg->dest_apicid);
-
+ amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
/*
* After this point, all the interrupts will start arriving
* at the new destination. So, time to cleanup the previous
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index e67ba6c40faf..f122071688fd 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -316,6 +316,7 @@
#define ARM64_TCR_TBI0_MASK 0x1UL
#define CTXDESC_CD_0_AA64 (1UL << 41)
+#define CTXDESC_CD_0_S (1UL << 44)
#define CTXDESC_CD_0_R (1UL << 45)
#define CTXDESC_CD_0_A (1UL << 46)
#define CTXDESC_CD_0_ASET_SHIFT 47
@@ -377,7 +378,16 @@
#define CMDQ_SYNC_0_CS_SHIFT 12
#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
+#define CMDQ_SYNC_0_CS_IRQ (1UL << CMDQ_SYNC_0_CS_SHIFT)
#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
+#define CMDQ_SYNC_0_MSH_SHIFT 22
+#define CMDQ_SYNC_0_MSH_ISH (3UL << CMDQ_SYNC_0_MSH_SHIFT)
+#define CMDQ_SYNC_0_MSIATTR_SHIFT 24
+#define CMDQ_SYNC_0_MSIATTR_OIWB (0xfUL << CMDQ_SYNC_0_MSIATTR_SHIFT)
+#define CMDQ_SYNC_0_MSIDATA_SHIFT 32
+#define CMDQ_SYNC_0_MSIDATA_MASK 0xffffffffUL
+#define CMDQ_SYNC_1_MSIADDR_SHIFT 0
+#define CMDQ_SYNC_1_MSIADDR_MASK 0xffffffffffffcUL
/* Event queue */
#define EVTQ_ENT_DWORDS 4
@@ -408,20 +418,12 @@
/* High-level queue structures */
#define ARM_SMMU_POLL_TIMEOUT_US 100
-#define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */
+#define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US 1000000 /* 1s! */
+#define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT 10
#define MSI_IOVA_BASE 0x8000000
#define MSI_IOVA_LENGTH 0x100000
-/* Until ACPICA headers cover IORT rev. C */
-#ifndef ACPI_IORT_SMMU_HISILICON_HI161X
-#define ACPI_IORT_SMMU_HISILICON_HI161X 0x1
-#endif
-
-#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
-#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
-#endif
-
static bool disable_bypass;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
@@ -504,6 +506,10 @@ struct arm_smmu_cmdq_ent {
} pri;
#define CMDQ_OP_CMD_SYNC 0x46
+ struct {
+ u32 msidata;
+ u64 msiaddr;
+ } sync;
};
};
@@ -604,6 +610,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
#define ARM_SMMU_FEAT_STALLS (1 << 11)
#define ARM_SMMU_FEAT_HYP (1 << 12)
+#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -616,6 +623,7 @@ struct arm_smmu_device {
int gerr_irq;
int combined_irq;
+ atomic_t sync_nr;
unsigned long ias; /* IPA */
unsigned long oas; /* PA */
@@ -634,6 +642,8 @@ struct arm_smmu_device {
struct arm_smmu_strtab_cfg strtab_cfg;
+ u32 sync_count;
+
/* IOMMU core code handle */
struct iommu_device iommu;
};
@@ -757,26 +767,29 @@ static void queue_inc_prod(struct arm_smmu_queue *q)
* Wait for the SMMU to consume items. If drain is true, wait until the queue
* is empty. Otherwise, wait until there is at least one free slot.
*/
-static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
+static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
{
ktime_t timeout;
- unsigned int delay = 1;
+ unsigned int delay = 1, spin_cnt = 0;
- /* Wait longer if it's queue drain */
- timeout = ktime_add_us(ktime_get(), drain ?
- ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US :
+ /* Wait longer if it's a CMD_SYNC */
+ timeout = ktime_add_us(ktime_get(), sync ?
+ ARM_SMMU_CMDQ_SYNC_TIMEOUT_US :
ARM_SMMU_POLL_TIMEOUT_US);
- while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
+ while (queue_sync_cons(q), (sync ? !queue_empty(q) : queue_full(q))) {
if (ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
if (wfe) {
wfe();
- } else {
+ } else if (++spin_cnt < ARM_SMMU_CMDQ_SYNC_SPIN_COUNT) {
cpu_relax();
+ continue;
+ } else {
udelay(delay);
delay *= 2;
+ spin_cnt = 0;
}
}
@@ -878,7 +891,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
}
break;
case CMDQ_OP_CMD_SYNC:
- cmd[0] |= CMDQ_SYNC_0_CS_SEV;
+ if (ent->sync.msiaddr)
+ cmd[0] |= CMDQ_SYNC_0_CS_IRQ;
+ else
+ cmd[0] |= CMDQ_SYNC_0_CS_SEV;
+ cmd[0] |= CMDQ_SYNC_0_MSH_ISH | CMDQ_SYNC_0_MSIATTR_OIWB;
+ cmd[0] |= (u64)ent->sync.msidata << CMDQ_SYNC_0_MSIDATA_SHIFT;
+ cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
break;
default:
return -ENOENT;
@@ -936,13 +955,22 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
+static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
+{
+ struct arm_smmu_queue *q = &smmu->cmdq.q;
+ bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+
+ while (queue_insert_raw(q, cmd) == -ENOSPC) {
+ if (queue_poll_cons(q, false, wfe))
+ dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
+ }
+}
+
static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_ent *ent)
{
u64 cmd[CMDQ_ENT_DWORDS];
unsigned long flags;
- bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
- struct arm_smmu_queue *q = &smmu->cmdq.q;
if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
@@ -951,14 +979,76 @@ static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
}
spin_lock_irqsave(&smmu->cmdq.lock, flags);
- while (queue_insert_raw(q, cmd) == -ENOSPC) {
- if (queue_poll_cons(q, false, wfe))
- dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
- }
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+}
- if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
- dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
+/*
+ * The difference between val and sync_idx is bounded by the maximum size of
+ * a queue at 2^20 entries, so 32 bits is plenty for wrap-safe arithmetic.
+ */
+static int __arm_smmu_sync_poll_msi(struct arm_smmu_device *smmu, u32 sync_idx)
+{
+ ktime_t timeout;
+ u32 val;
+
+ timeout = ktime_add_us(ktime_get(), ARM_SMMU_CMDQ_SYNC_TIMEOUT_US);
+ val = smp_cond_load_acquire(&smmu->sync_count,
+ (int)(VAL - sync_idx) >= 0 ||
+ !ktime_before(ktime_get(), timeout));
+
+ return (int)(val - sync_idx) < 0 ? -ETIMEDOUT : 0;
+}
+
+static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
+{
+ u64 cmd[CMDQ_ENT_DWORDS];
+ unsigned long flags;
+ struct arm_smmu_cmdq_ent ent = {
+ .opcode = CMDQ_OP_CMD_SYNC,
+ .sync = {
+ .msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
+ .msiaddr = virt_to_phys(&smmu->sync_count),
+ },
+ };
+
+ arm_smmu_cmdq_build_cmd(cmd, &ent);
+
+ spin_lock_irqsave(&smmu->cmdq.lock, flags);
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+
+ return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
+}
+
+static int __arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+{
+ u64 cmd[CMDQ_ENT_DWORDS];
+ unsigned long flags;
+ bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+ struct arm_smmu_cmdq_ent ent = { .opcode = CMDQ_OP_CMD_SYNC };
+ int ret;
+
+ arm_smmu_cmdq_build_cmd(cmd, &ent);
+
+ spin_lock_irqsave(&smmu->cmdq.lock, flags);
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ ret = queue_poll_cons(&smmu->cmdq.q, true, wfe);
spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
+
+ return ret;
+}
+
+static void arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
+{
+ int ret;
+ bool msi = (smmu->features & ARM_SMMU_FEAT_MSI) &&
+ (smmu->features & ARM_SMMU_FEAT_COHERENCY);
+
+ ret = msi ? __arm_smmu_cmdq_issue_sync_msi(smmu)
+ : __arm_smmu_cmdq_issue_sync(smmu);
+ if (ret)
+ dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
}
/* Context descriptor manipulation functions */
@@ -996,6 +1086,11 @@ static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
CTXDESC_CD_0_V;
+
+ /* STALL_MODEL==0b10 && CD.S==0 is ILLEGAL */
+ if (smmu->features & ARM_SMMU_FEAT_STALL_FORCE)
+ val |= CTXDESC_CD_0_S;
+
cfg->cdptr[0] = cpu_to_le64(val);
val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
@@ -1029,8 +1124,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
};
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- cmd.opcode = CMDQ_OP_CMD_SYNC;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
@@ -1094,7 +1188,11 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
<< STRTAB_STE_1_SHCFG_SHIFT);
dst[2] = 0; /* Nuke the VMID */
- if (ste_live)
+ /*
+ * The SMMU can perform negative caching, so we must sync
+ * the STE regardless of whether the old value was live.
+ */
+ if (smmu)
arm_smmu_sync_ste_for_sid(smmu, sid);
return;
}
@@ -1112,7 +1210,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
#endif
STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
- if (smmu->features & ARM_SMMU_FEAT_STALLS)
+ if (smmu->features & ARM_SMMU_FEAT_STALLS &&
+ !(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
@@ -1275,12 +1374,6 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
return IRQ_HANDLED;
}
-static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
-{
- /* We don't actually use CMD_SYNC interrupts for anything */
- return IRQ_HANDLED;
-}
-
static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
@@ -1313,10 +1406,8 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
if (active & GERROR_MSI_EVTQ_ABT_ERR)
dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
- if (active & GERROR_MSI_CMDQ_ABT_ERR) {
+ if (active & GERROR_MSI_CMDQ_ABT_ERR)
dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
- arm_smmu_cmdq_sync_handler(irq, smmu->dev);
- }
if (active & GERROR_PRIQ_ABT_ERR)
dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
@@ -1345,17 +1436,13 @@ static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev)
static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
{
arm_smmu_gerror_handler(irq, dev);
- arm_smmu_cmdq_sync_handler(irq, dev);
return IRQ_WAKE_THREAD;
}
/* IO_PGTABLE API */
static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
{
- struct arm_smmu_cmdq_ent cmd;
-
- cmd.opcode = CMDQ_OP_CMD_SYNC;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static void arm_smmu_tlb_sync(void *cookie)
@@ -1743,6 +1830,14 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+{
+ struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
+
+ if (smmu)
+ __arm_smmu_tlb_sync(smmu);
+}
+
static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
@@ -1963,6 +2058,8 @@ static struct iommu_ops arm_smmu_ops = {
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
+ .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
@@ -2147,6 +2244,7 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
+ atomic_set(&smmu->sync_nr, 0);
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;
@@ -2265,15 +2363,6 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
dev_warn(smmu->dev, "failed to enable evtq irq\n");
}
- irq = smmu->cmdq.q.irq;
- if (irq) {
- ret = devm_request_irq(smmu->dev, irq,
- arm_smmu_cmdq_sync_handler, 0,
- "arm-smmu-v3-cmdq-sync", smmu);
- if (ret < 0)
- dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
- }
-
irq = smmu->gerr_irq;
if (irq) {
ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
@@ -2399,8 +2488,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Invalidate any cached configuration */
cmd.opcode = CMDQ_OP_CFGI_ALL;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- cmd.opcode = CMDQ_OP_CMD_SYNC;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
/* Invalidate any stale TLB entries */
if (smmu->features & ARM_SMMU_FEAT_HYP) {
@@ -2410,8 +2498,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- cmd.opcode = CMDQ_OP_CMD_SYNC;
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
/* Event queue */
writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
@@ -2532,13 +2619,14 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
* register, but warn on mismatch.
*/
if (!!(reg & IDR0_COHACC) != coherent)
- dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
+ dev_warn(smmu->dev, "IDR0.COHACC overridden by FW configuration (%s)\n",
coherent ? "true" : "false");
switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
- case IDR0_STALL_MODEL_STALL:
- /* Fallthrough */
case IDR0_STALL_MODEL_FORCE:
+ smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
+ /* Fallthrough */
+ case IDR0_STALL_MODEL_STALL:
smmu->features |= ARM_SMMU_FEAT_STALLS;
}
@@ -2665,7 +2753,7 @@ static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu)
case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX:
smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY;
break;
- case ACPI_IORT_SMMU_HISILICON_HI161X:
+ case ACPI_IORT_SMMU_V3_HISILICON_HI161X:
smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH;
break;
}
@@ -2783,10 +2871,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (irq > 0)
smmu->priq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "cmdq-sync");
- if (irq > 0)
- smmu->cmdq.q.irq = irq;
-
irq = platform_get_irq_byname(pdev, "gerror");
if (irq > 0)
smmu->gerr_irq = irq;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 3bdb799d3b4b..78d4c6b8f1ba 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -59,6 +59,7 @@
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
+#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
@@ -119,14 +120,6 @@ enum arm_smmu_implementation {
CAVIUM_SMMUV2,
};
-/* Until ACPICA headers cover IORT rev. C */
-#ifndef ACPI_IORT_SMMU_CORELINK_MMU401
-#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4
-#endif
-#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
-#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5
-#endif
-
struct arm_smmu_s2cr {
struct iommu_group *group;
int count;
@@ -250,6 +243,7 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
+ const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
@@ -735,7 +729,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- const struct iommu_gather_ops *tlb_ops;
mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
@@ -813,7 +806,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
- tlb_ops = &arm_smmu_s1_tlb_ops;
+ smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -833,9 +826,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
oas = min(oas, 40UL);
}
if (smmu->version == ARM_SMMU_V2)
- tlb_ops = &arm_smmu_s2_tlb_ops_v2;
+ smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
else
- tlb_ops = &arm_smmu_s2_tlb_ops_v1;
+ smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
break;
default:
ret = -EINVAL;
@@ -863,7 +856,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
- .tlb = tlb_ops,
+ .tlb = smmu_domain->tlb_ops,
.iommu_dev = smmu->dev,
};
@@ -1259,6 +1252,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->tlb_ops)
+ smmu_domain->tlb_ops->tlb_sync(smmu_domain);
+}
+
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -1562,6 +1563,8 @@ static struct iommu_ops arm_smmu_ops = {
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
+ .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
@@ -1606,7 +1609,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
* Allow unmatched Stream IDs to allocate bypass
* TLB entries for reduced latency.
*/
- reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
+ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 9d1cebe7f6cb..25914d36c5ac 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -292,18 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* ...then finally give it a kicking to make sure it fits */
base_pfn = max_t(unsigned long, base_pfn,
domain->geometry.aperture_start >> order);
- end_pfn = min_t(unsigned long, end_pfn,
- domain->geometry.aperture_end >> order);
}
- /*
- * PCI devices may have larger DMA masks, but still prefer allocating
- * within a 32-bit mask to avoid DAC addressing. Such limitations don't
- * apply to the typical platform device, so for those we may as well
- * leave the cache limit at the top of their range to save an rb_last()
- * traversal on every allocation.
- */
- if (dev && dev_is_pci(dev))
- end_pfn &= DMA_BIT_MASK(32) >> order;
/* start_pfn is always nonzero for an already-initialised domain */
if (iovad->start_pfn) {
@@ -312,16 +301,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
pr_warn("Incompatible range for DMA domain\n");
return -EFAULT;
}
- /*
- * If we have devices with different DMA masks, move the free
- * area cache limit down for the benefit of the smaller one.
- */
- iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn);
return 0;
}
- init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
+ init_iova_domain(iovad, 1UL << order, base_pfn);
if (!dev)
return 0;
@@ -386,10 +370,12 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
- iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift);
+ iova = alloc_iova_fast(iovad, iova_len,
+ DMA_BIT_MASK(32) >> shift, false);
if (!iova)
- iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift);
+ iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
+ true);
return (dma_addr_t)iova << shift;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 57c920c1372d..9a7ffd13c7f0 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -801,13 +801,16 @@ int __init dmar_dev_scope_init(void)
dmar_free_pci_notify_info(info);
}
}
-
- bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
}
return dmar_dev_scope_status;
}
+void dmar_register_bus_notifier(void)
+{
+ bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
+}
+
int __init dmar_table_init(void)
{
@@ -1676,7 +1679,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
raw_spin_lock_irqsave(&iommu->register_lock, flag);
}
- writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
+ writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
+ iommu->reg + DMAR_FSTS_REG);
unlock_exit:
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 25c2c75f5332..79c45650f8de 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -263,6 +263,7 @@ struct exynos_iommu_domain {
struct sysmmu_drvdata {
struct device *sysmmu; /* SYSMMU controller device */
struct device *master; /* master device (owner) */
+ struct device_link *link; /* runtime PM link to master */
void __iomem *sfrbase; /* our registers */
struct clk *clk; /* SYSMMU's clock */
struct clk *aclk; /* SYSMMU's aclk clock */
@@ -1250,6 +1251,8 @@ static struct iommu_group *get_device_iommu_group(struct device *dev)
static int exynos_iommu_add_device(struct device *dev)
{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data;
struct iommu_group *group;
if (!has_sysmmu(dev))
@@ -1260,6 +1263,15 @@ static int exynos_iommu_add_device(struct device *dev)
if (IS_ERR(group))
return PTR_ERR(group);
+ list_for_each_entry(data, &owner->controllers, owner_node) {
+ /*
+ * SYSMMU will be runtime activated via device link
+ * (dependency) to its master device, so there are no
+ * direct calls to pm_runtime_get/put in this driver.
+ */
+ data->link = device_link_add(dev, data->sysmmu,
+ DL_FLAG_PM_RUNTIME);
+ }
iommu_group_put(group);
return 0;
@@ -1268,6 +1280,7 @@ static int exynos_iommu_add_device(struct device *dev)
static void exynos_iommu_remove_device(struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct sysmmu_drvdata *data;
if (!has_sysmmu(dev))
return;
@@ -1283,6 +1296,9 @@ static void exynos_iommu_remove_device(struct device *dev)
}
}
iommu_group_remove_device(dev);
+
+ list_for_each_entry(data, &owner->controllers, owner_node)
+ device_link_del(data->link);
}
static int exynos_iommu_of_xlate(struct device *dev,
@@ -1316,13 +1332,6 @@ static int exynos_iommu_of_xlate(struct device *dev,
list_add_tail(&data->owner_node, &owner->controllers);
data->master = dev;
- /*
- * SYSMMU will be runtime activated via device link (dependency) to its
- * master device, so there are no direct calls to pm_runtime_get/put
- * in this driver.
- */
- device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
-
return 0;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6784a05dd6b2..a0babdbf7146 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -82,8 +82,6 @@
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
-#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
/* page table handling */
#define LEVEL_STRIDE (9)
@@ -1878,8 +1876,7 @@ static int dmar_init_reserved_ranges(void)
struct iova *iova;
int i;
- init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
+ init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
@@ -1938,8 +1935,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
unsigned long sagaw;
int err;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
err = init_iova_flush_queue(&domain->iovad,
iommu_flush_iova, iova_entry_free);
@@ -2058,7 +2054,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_copied(context)) {
u16 did_old = context_domain_id(context);
- if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
+ if (did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
@@ -3473,11 +3469,12 @@ static unsigned long intel_alloc_iova(struct device *dev,
* from higher range
*/
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
- IOVA_PFN(DMA_BIT_MASK(32)));
+ IOVA_PFN(DMA_BIT_MASK(32)), false);
if (iova_pfn)
return iova_pfn;
}
- iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
+ iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
+ IOVA_PFN(dma_mask), true);
if (unlikely(!iova_pfn)) {
pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev));
@@ -4752,6 +4749,16 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}
+ up_write(&dmar_global_lock);
+
+ /*
+ * The bus notifier takes the dmar_global_lock, so lockdep will
+ * complain later when we register it under the lock.
+ */
+ dmar_register_bus_notifier();
+
+ down_write(&dmar_global_lock);
+
if (no_iommu || dmar_disabled) {
/*
* We exit the function here to ensure IOMMU's remapping and
@@ -4897,8 +4904,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
- DMA_32BIT_PFN);
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index f6697e55c2d4..ed1cf7c5a43b 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -292,7 +292,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
int pasid_max;
int ret;
- if (WARN_ON(!iommu))
+ if (WARN_ON(!iommu || !iommu->pasid_table))
return -EINVAL;
if (dev_is_pci(dev)) {
@@ -458,6 +458,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
+ svm->iommu->pasid_table[svm->pasid].val = 0;
+ wmb();
idr_remove(&svm->iommu->pasid_idr, svm->pasid);
if (svm->mm)
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index a5b89f6bcdbf..76a193c7fcfc 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "DMAR-IR: " fmt
@@ -1121,6 +1122,24 @@ struct irq_remap_ops intel_irq_remap_ops = {
.get_irq_domain = intel_get_irq_domain,
};
+static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
+{
+ struct intel_ir_data *ir_data = irqd->chip_data;
+ struct irte *irte = &ir_data->irte_entry;
+ struct irq_cfg *cfg = irqd_cfg(irqd);
+
+ /*
+ * Atomically updates the IRTE with the new destination, vector
+ * and flushes the interrupt entry cache.
+ */
+ irte->vector = cfg->vector;
+ irte->dest_id = IRTE_DEST(cfg->dest_apicid);
+
+ /* Update the hardware only if the interrupt is in remapped mode. */
+ if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+ modify_irte(&ir_data->irq_2_iommu, irte);
+}
+
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
*
@@ -1139,27 +1158,15 @@ static int
intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- struct intel_ir_data *ir_data = data->chip_data;
- struct irte *irte = &ir_data->irte_entry;
- struct irq_cfg *cfg = irqd_cfg(data);
struct irq_data *parent = data->parent_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
int ret;
ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
- /*
- * Atomically updates the IRTE with the new destination, vector
- * and flushes the interrupt entry cache.
- */
- irte->vector = cfg->vector;
- irte->dest_id = IRTE_DEST(cfg->dest_apicid);
-
- /* Update the hardware only if the interrupt is in remapped mode. */
- if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
- modify_irte(&ir_data->irq_2_iommu, irte);
-
+ intel_ir_reconfigure_irte(data, false);
/*
* After this point, all the interrupts will start arriving
* at the new destination. So, time to cleanup the previous
@@ -1389,12 +1396,11 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
-static void intel_irq_remapping_activate(struct irq_domain *domain,
- struct irq_data *irq_data)
+static int intel_irq_remapping_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool early)
{
- struct intel_ir_data *data = irq_data->chip_data;
-
- modify_irte(&data->irq_2_iommu, &data->irte_entry);
+ intel_ir_reconfigure_irte(irq_data, true);
+ return 0;
}
static void intel_irq_remapping_deactivate(struct irq_domain *domain,
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 6961fc393f0b..2ca08dc9331c 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -660,16 +660,11 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
- size_t unmapped;
if (WARN_ON(upper_32_bits(iova)))
return 0;
- unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
- if (unmapped)
- io_pgtable_tlb_sync(&data->iop);
-
- return unmapped;
+ return __arm_v7s_unmap(data, iova, size, 1, data->pgd);
}
static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index e8018a308868..51e5c43caed1 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -609,7 +609,6 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size)
{
- size_t unmapped;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
@@ -617,11 +616,7 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0;
- unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
- if (unmapped)
- io_pgtable_tlb_sync(&data->iop);
-
- return unmapped;
+ return __arm_lpae_unmap(data, iova, size, lvl, ptep);
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index a3e667077b14..cd2e1eafffe6 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IO_PGTABLE_H
#define __IO_PGTABLE_H
#include <linux/bitops.h>
diff --git a/drivers/iommu/iommu-traces.c b/drivers/iommu/iommu-traces.c
index bf3b317ff0c1..1e9ca7789de1 100644
--- a/drivers/iommu/iommu-traces.c
+++ b/drivers/iommu/iommu-traces.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* iommu trace points
*
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 33edfa794ae9..83fe2621effe 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -24,6 +24,9 @@
#include <linux/bitops.h>
#include <linux/cpu.h>
+/* The anchor node sits above the top of the usable address space */
+#define IOVA_ANCHOR ~0UL
+
static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size);
@@ -33,11 +36,11 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
-static void fq_flush_timeout(unsigned long data);
+static void fq_flush_timeout(struct timer_list *t);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
- unsigned long start_pfn, unsigned long pfn_32bit)
+ unsigned long start_pfn)
{
/*
* IOVA granularity will normally be equal to the smallest
@@ -48,12 +51,16 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
- iovad->cached32_node = NULL;
+ iovad->cached_node = &iovad->anchor.node;
+ iovad->cached32_node = &iovad->anchor.node;
iovad->granule = granule;
iovad->start_pfn = start_pfn;
- iovad->dma_32bit_pfn = pfn_32bit + 1;
+ iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->flush_cb = NULL;
iovad->fq = NULL;
+ iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
+ rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
+ rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -100,7 +107,7 @@ int init_iova_flush_queue(struct iova_domain *iovad,
spin_lock_init(&fq->lock);
}
- setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
+ timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
atomic_set(&iovad->fq_timer_on, 0);
return 0;
@@ -108,50 +115,36 @@ int init_iova_flush_queue(struct iova_domain *iovad,
EXPORT_SYMBOL_GPL(init_iova_flush_queue);
static struct rb_node *
-__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
+__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
{
- if ((*limit_pfn > iovad->dma_32bit_pfn) ||
- (iovad->cached32_node == NULL))
- return rb_last(&iovad->rbroot);
- else {
- struct rb_node *prev_node = rb_prev(iovad->cached32_node);
- struct iova *curr_iova =
- rb_entry(iovad->cached32_node, struct iova, node);
- *limit_pfn = curr_iova->pfn_lo;
- return prev_node;
- }
+ if (limit_pfn <= iovad->dma_32bit_pfn)
+ return iovad->cached32_node;
+
+ return iovad->cached_node;
}
static void
-__cached_rbnode_insert_update(struct iova_domain *iovad,
- unsigned long limit_pfn, struct iova *new)
+__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
{
- if (limit_pfn != iovad->dma_32bit_pfn)
- return;
- iovad->cached32_node = &new->node;
+ if (new->pfn_hi < iovad->dma_32bit_pfn)
+ iovad->cached32_node = &new->node;
+ else
+ iovad->cached_node = &new->node;
}
static void
__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
{
struct iova *cached_iova;
- struct rb_node *curr;
- if (!iovad->cached32_node)
- return;
- curr = iovad->cached32_node;
- cached_iova = rb_entry(curr, struct iova, node);
-
- if (free->pfn_lo >= cached_iova->pfn_lo) {
- struct rb_node *node = rb_next(&free->node);
- struct iova *iova = rb_entry(node, struct iova, node);
+ cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
+ if (free->pfn_hi < iovad->dma_32bit_pfn &&
+ free->pfn_lo >= cached_iova->pfn_lo)
+ iovad->cached32_node = rb_next(&free->node);
- /* only cache if it's below 32bit pfn */
- if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
- iovad->cached32_node = node;
- else
- iovad->cached32_node = NULL;
- }
+ cached_iova = rb_entry(iovad->cached_node, struct iova, node);
+ if (free->pfn_lo >= cached_iova->pfn_lo)
+ iovad->cached_node = rb_next(&free->node);
}
/* Insert the iova into domain rbtree by holding writer lock */
@@ -182,63 +175,43 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova,
rb_insert_color(&iova->node, root);
}
-/*
- * Computes the padding size required, to make the start address
- * naturally aligned on the power-of-two order of its size
- */
-static unsigned int
-iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
-{
- return (limit_pfn - size) & (__roundup_pow_of_two(size) - 1);
-}
-
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
unsigned long size, unsigned long limit_pfn,
struct iova *new, bool size_aligned)
{
- struct rb_node *prev, *curr = NULL;
+ struct rb_node *curr, *prev;
+ struct iova *curr_iova;
unsigned long flags;
- unsigned long saved_pfn;
- unsigned int pad_size = 0;
+ unsigned long new_pfn;
+ unsigned long align_mask = ~0UL;
+
+ if (size_aligned)
+ align_mask <<= fls_long(size - 1);
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- saved_pfn = limit_pfn;
- curr = __get_cached_rbnode(iovad, &limit_pfn);
- prev = curr;
- while (curr) {
- struct iova *curr_iova = rb_entry(curr, struct iova, node);
-
- if (limit_pfn <= curr_iova->pfn_lo) {
- goto move_left;
- } else if (limit_pfn > curr_iova->pfn_hi) {
- if (size_aligned)
- pad_size = iova_get_pad_size(size, limit_pfn);
- if ((curr_iova->pfn_hi + size + pad_size) < limit_pfn)
- break; /* found a free slot */
- }
- limit_pfn = curr_iova->pfn_lo;
-move_left:
+ curr = __get_cached_rbnode(iovad, limit_pfn);
+ curr_iova = rb_entry(curr, struct iova, node);
+ do {
+ limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
+ new_pfn = (limit_pfn - size) & align_mask;
prev = curr;
curr = rb_prev(curr);
- }
+ curr_iova = rb_entry(curr, struct iova, node);
+ } while (curr && new_pfn <= curr_iova->pfn_hi);
- if (!curr) {
- if (size_aligned)
- pad_size = iova_get_pad_size(size, limit_pfn);
- if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return -ENOMEM;
- }
+ if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
}
/* pfn_lo will point to size aligned address if size_aligned is set */
- new->pfn_lo = limit_pfn - (size + pad_size);
+ new->pfn_lo = new_pfn;
new->pfn_hi = new->pfn_lo + size - 1;
/* If we have 'prev', it's a valid place to start the insertion. */
iova_insert_rbtree(&iovad->rbroot, new, prev);
- __cached_rbnode_insert_update(iovad, saved_pfn, new);
+ __cached_rbnode_insert_update(iovad, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
@@ -258,7 +231,8 @@ EXPORT_SYMBOL(alloc_iova_mem);
void free_iova_mem(struct iova *iova)
{
- kmem_cache_free(iova_cache, iova);
+ if (iova->pfn_lo != IOVA_ANCHOR)
+ kmem_cache_free(iova_cache, iova);
}
EXPORT_SYMBOL(free_iova_mem);
@@ -342,15 +316,12 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
while (node) {
struct iova *iova = rb_entry(node, struct iova, node);
- /* If pfn falls within iova's range, return iova */
- if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
- return iova;
- }
-
if (pfn < iova->pfn_lo)
node = node->rb_left;
- else if (pfn > iova->pfn_lo)
+ else if (pfn > iova->pfn_hi)
node = node->rb_right;
+ else
+ return iova; /* pfn falls within iova's range */
}
return NULL;
@@ -424,18 +395,19 @@ EXPORT_SYMBOL_GPL(free_iova);
* @iovad: - iova domain in question
* @size: - size of page frames to allocate
* @limit_pfn: - max limit address
+ * @flush_rcache: - set to flush rcache on regular allocation failure
* This function tries to satisfy an iova allocation from the rcache,
- * and falls back to regular allocation on failure.
+ * and falls back to regular allocation on failure. If regular allocation
+ * fails too and the flush_rcache flag is set then the rcache will be flushed.
*/
unsigned long
alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
- unsigned long limit_pfn)
+ unsigned long limit_pfn, bool flush_rcache)
{
- bool flushed_rcache = false;
unsigned long iova_pfn;
struct iova *new_iova;
- iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
+ iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
if (iova_pfn)
return iova_pfn;
@@ -444,11 +416,11 @@ retry:
if (!new_iova) {
unsigned int cpu;
- if (flushed_rcache)
+ if (!flush_rcache)
return 0;
/* Try replenishing IOVAs by flushing rcache. */
- flushed_rcache = true;
+ flush_rcache = false;
for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad);
goto retry;
@@ -547,9 +519,9 @@ static void fq_destroy_all_entries(struct iova_domain *iovad)
}
}
-static void fq_flush_timeout(unsigned long data)
+static void fq_flush_timeout(struct timer_list *t)
{
- struct iova_domain *iovad = (struct iova_domain *)data;
+ struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
int cpu;
atomic_set(&iovad->fq_timer_on, 0);
@@ -570,7 +542,7 @@ void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages,
unsigned long data)
{
- struct iova_fq *fq = get_cpu_ptr(iovad->fq);
+ struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
unsigned long flags;
unsigned idx;
@@ -600,8 +572,6 @@ void queue_iova(struct iova_domain *iovad,
if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
mod_timer(&iovad->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
-
- put_cpu_ptr(iovad->fq);
}
EXPORT_SYMBOL_GPL(queue_iova);
@@ -612,21 +582,12 @@ EXPORT_SYMBOL_GPL(queue_iova);
*/
void put_iova_domain(struct iova_domain *iovad)
{
- struct rb_node *node;
- unsigned long flags;
+ struct iova *iova, *tmp;
free_iova_flush_queue(iovad);
free_iova_rcaches(iovad);
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- node = rb_first(&iovad->rbroot);
- while (node) {
- struct iova *iova = rb_entry(node, struct iova, node);
-
- rb_erase(node, &iovad->rbroot);
+ rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
free_iova_mem(iova);
- node = rb_first(&iovad->rbroot);
- }
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
EXPORT_SYMBOL_GPL(put_iova_domain);
@@ -695,6 +656,10 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova;
unsigned int overlap = 0;
+ /* Don't allow nonsensical pfns */
+ if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
+ return NULL;
+
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
@@ -738,6 +703,9 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
struct iova *iova = rb_entry(node, struct iova, node);
struct iova *new_iova;
+ if (iova->pfn_lo == IOVA_ANCHOR)
+ continue;
+
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
if (!new_iova)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
@@ -855,12 +823,21 @@ static bool iova_magazine_empty(struct iova_magazine *mag)
static unsigned long iova_magazine_pop(struct iova_magazine *mag,
unsigned long limit_pfn)
{
+ int i;
+ unsigned long pfn;
+
BUG_ON(iova_magazine_empty(mag));
- if (mag->pfns[mag->size - 1] >= limit_pfn)
- return 0;
+ /* Only fall back to the rbtree if we have no suitable pfns at all */
+ for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
+ if (i == 0)
+ return 0;
- return mag->pfns[--mag->size];
+ /* Swap it to pop it */
+ pfn = mag->pfns[i];
+ mag->pfns[i] = mag->pfns[--mag->size];
+
+ return pfn;
}
static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
@@ -1011,27 +988,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
return 0;
- return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
-}
-
-/*
- * Free a cpu's rcache.
- */
-static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
- struct iova_rcache *rcache)
-{
- struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
- unsigned long flags;
-
- spin_lock_irqsave(&cpu_rcache->lock, flags);
-
- iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
- iova_magazine_free(cpu_rcache->loaded);
-
- iova_magazine_free_pfns(cpu_rcache->prev, iovad);
- iova_magazine_free(cpu_rcache->prev);
-
- spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
}
/*
@@ -1040,21 +997,20 @@ static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
static void free_iova_rcaches(struct iova_domain *iovad)
{
struct iova_rcache *rcache;
- unsigned long flags;
+ struct iova_cpu_rcache *cpu_rcache;
unsigned int cpu;
int i, j;
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad->rcaches[i];
- for_each_possible_cpu(cpu)
- free_cpu_iova_rcache(cpu, iovad, rcache);
- spin_lock_irqsave(&rcache->lock, flags);
+ for_each_possible_cpu(cpu) {
+ cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ iova_magazine_free(cpu_rcache->loaded);
+ iova_magazine_free(cpu_rcache->prev);
+ }
free_percpu(rcache->cpu_rcaches);
- for (j = 0; j < rcache->depot_size; ++j) {
- iova_magazine_free_pfns(rcache->depot[j], iovad);
+ for (j = 0; j < rcache->depot_size; ++j)
iova_magazine_free(rcache->depot[j]);
- }
- spin_unlock_irqrestore(&rcache->lock, flags);
}
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 195d6e93ac71..8dce3a9de9d8 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -19,30 +19,49 @@
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_iommu.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
#include <asm/dma-iommu.h>
#include <asm/pgalloc.h>
+#else
+#define arm_iommu_create_mapping(...) NULL
+#define arm_iommu_attach_device(...) -ENODEV
+#define arm_iommu_release_mapping(...) do {} while (0)
+#define arm_iommu_detach_device(...) do {} while (0)
#endif
#include "io-pgtable.h"
-#define IPMMU_CTX_MAX 1
+#define IPMMU_CTX_MAX 8
+
+struct ipmmu_features {
+ bool use_ns_alias_offset;
+ bool has_cache_leaf_nodes;
+ unsigned int number_of_contexts;
+ bool setup_imbuscr;
+ bool twobit_imttbcr_sl0;
+};
struct ipmmu_vmsa_device {
struct device *dev;
void __iomem *base;
struct iommu_device iommu;
-
+ struct ipmmu_vmsa_device *root;
+ const struct ipmmu_features *features;
unsigned int num_utlbs;
+ unsigned int num_ctx;
spinlock_t lock; /* Protects ctx and domains[] */
DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
+ struct iommu_group *group;
struct dma_iommu_mapping *mapping;
};
@@ -57,18 +76,12 @@ struct ipmmu_vmsa_domain {
spinlock_t lock; /* Protects mappings */
};
-struct ipmmu_vmsa_iommu_priv {
- struct ipmmu_vmsa_device *mmu;
- struct device *dev;
- struct list_head list;
-};
-
static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
{
return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
}
-static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
+static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
{
return dev->iommu_fwspec ? dev->iommu_fwspec->iommu_priv : NULL;
}
@@ -133,6 +146,10 @@ static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
#define IMTTBCR_TSZ0_MASK (7 << 0)
#define IMTTBCR_TSZ0_SHIFT O
+#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6)
+#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6)
+#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6)
+
#define IMBUSCR 0x000c
#define IMBUSCR_DVM (1 << 2)
#define IMBUSCR_BUSSEL_SYS (0 << 0)
@@ -194,6 +211,36 @@ static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev)
#define IMUASID_ASID0_SHIFT 0
/* -----------------------------------------------------------------------------
+ * Root device handling
+ */
+
+static struct platform_driver ipmmu_driver;
+
+static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
+{
+ return mmu->root == mmu;
+}
+
+static int __ipmmu_check_device(struct device *dev, void *data)
+{
+ struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
+ struct ipmmu_vmsa_device **rootp = data;
+
+ if (ipmmu_is_root(mmu))
+ *rootp = mmu;
+
+ return 0;
+}
+
+static struct ipmmu_vmsa_device *ipmmu_find_root(void)
+{
+ struct ipmmu_vmsa_device *root = NULL;
+
+ return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
+ __ipmmu_check_device) == 0 ? root : NULL;
+}
+
+/* -----------------------------------------------------------------------------
* Read/Write Access
*/
@@ -208,15 +255,29 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
iowrite32(data, mmu->base + offset);
}
-static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg)
+static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
+ unsigned int reg)
{
- return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg);
+ return ipmmu_read(domain->mmu->root,
+ domain->context_id * IM_CTX_SIZE + reg);
}
-static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg,
- u32 data)
+static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
+ unsigned int reg, u32 data)
{
- ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_write(domain->mmu->root,
+ domain->context_id * IM_CTX_SIZE + reg, data);
+}
+
+static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
+ unsigned int reg, u32 data)
+{
+ if (domain->mmu != domain->mmu->root)
+ ipmmu_write(domain->mmu,
+ domain->context_id * IM_CTX_SIZE + reg, data);
+
+ ipmmu_write(domain->mmu->root,
+ domain->context_id * IM_CTX_SIZE + reg, data);
}
/* -----------------------------------------------------------------------------
@@ -228,7 +289,7 @@ static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
{
unsigned int count = 0;
- while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) {
+ while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
cpu_relax();
if (++count == TLB_LOOP_TIMEOUT) {
dev_err_ratelimited(domain->mmu->dev,
@@ -243,9 +304,9 @@ static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
{
u32 reg;
- reg = ipmmu_ctx_read(domain, IMCTR);
+ reg = ipmmu_ctx_read_root(domain, IMCTR);
reg |= IMCTR_FLUSH;
- ipmmu_ctx_write(domain, IMCTR, reg);
+ ipmmu_ctx_write_all(domain, IMCTR, reg);
ipmmu_tlb_sync(domain);
}
@@ -313,11 +374,12 @@ static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
spin_lock_irqsave(&mmu->lock, flags);
- ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX);
- if (ret != IPMMU_CTX_MAX) {
+ ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
+ if (ret != mmu->num_ctx) {
mmu->domains[ret] = domain;
set_bit(ret, mmu->ctx);
- }
+ } else
+ ret = -EBUSY;
spin_unlock_irqrestore(&mmu->lock, flags);
@@ -340,6 +402,7 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
u64 ttbr;
+ u32 tmp;
int ret;
/*
@@ -364,51 +427,59 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code.
*/
- domain->cfg.iommu_dev = domain->mmu->dev;
+ domain->cfg.iommu_dev = domain->mmu->root->dev;
/*
* Find an unused context.
*/
- ret = ipmmu_domain_allocate_context(domain->mmu, domain);
- if (ret == IPMMU_CTX_MAX)
- return -EBUSY;
+ ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
+ if (ret < 0)
+ return ret;
domain->context_id = ret;
domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
domain);
if (!domain->iop) {
- ipmmu_domain_free_context(domain->mmu, domain->context_id);
+ ipmmu_domain_free_context(domain->mmu->root,
+ domain->context_id);
return -EINVAL;
}
/* TTBR0 */
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
- ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
- ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
+ ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
+ ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
/*
* TTBCR
* We use long descriptors with inner-shareable WBWA tables and allocate
* the whole 32-bit VA space to TTBR0.
*/
- ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE |
- IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
- IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
+ if (domain->mmu->features->twobit_imttbcr_sl0)
+ tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
+ else
+ tmp = IMTTBCR_SL0_LVL_1;
+
+ ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
+ IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
+ IMTTBCR_IRGN0_WB_WA | tmp);
/* MAIR0 */
- ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
+ ipmmu_ctx_write_root(domain, IMMAIR0,
+ domain->cfg.arm_lpae_s1_cfg.mair[0]);
/* IMBUSCR */
- ipmmu_ctx_write(domain, IMBUSCR,
- ipmmu_ctx_read(domain, IMBUSCR) &
- ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
+ if (domain->mmu->features->setup_imbuscr)
+ ipmmu_ctx_write_root(domain, IMBUSCR,
+ ipmmu_ctx_read_root(domain, IMBUSCR) &
+ ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
/*
* IMSTR
* Clear all interrupt flags.
*/
- ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR));
+ ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
/*
* IMCTR
@@ -417,7 +488,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
* software management as we have no use for it. Flush the TLB as
* required when modifying the context registers.
*/
- ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
+ ipmmu_ctx_write_all(domain, IMCTR,
+ IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
return 0;
}
@@ -430,9 +502,9 @@ static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
*
* TODO: Is TLB flush really needed ?
*/
- ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH);
+ ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
ipmmu_tlb_sync(domain);
- ipmmu_domain_free_context(domain->mmu, domain->context_id);
+ ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
}
/* -----------------------------------------------------------------------------
@@ -446,11 +518,11 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
u32 status;
u32 iova;
- status = ipmmu_ctx_read(domain, IMSTR);
+ status = ipmmu_ctx_read_root(domain, IMSTR);
if (!(status & err_mask))
return IRQ_NONE;
- iova = ipmmu_ctx_read(domain, IMEAR);
+ iova = ipmmu_ctx_read_root(domain, IMEAR);
/*
* Clear the error status flags. Unlike traditional interrupt flag
@@ -458,7 +530,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
* seems to require 0. The error address register must be read before,
* otherwise its value will be 0.
*/
- ipmmu_ctx_write(domain, IMSTR, 0);
+ ipmmu_ctx_write_root(domain, IMSTR, 0);
/* Log fatal errors. */
if (status & IMSTR_MHIT)
@@ -499,7 +571,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
/*
* Check interrupts for all active contexts.
*/
- for (i = 0; i < IPMMU_CTX_MAX; i++) {
+ for (i = 0; i < mmu->num_ctx; i++) {
if (!mmu->domains[i])
continue;
if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
@@ -528,6 +600,27 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
return &domain->io_domain;
}
+static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
+{
+ struct iommu_domain *io_domain = NULL;
+
+ switch (type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ io_domain = __ipmmu_domain_alloc(type);
+ break;
+
+ case IOMMU_DOMAIN_DMA:
+ io_domain = __ipmmu_domain_alloc(type);
+ if (io_domain && iommu_get_dma_cookie(io_domain)) {
+ kfree(io_domain);
+ io_domain = NULL;
+ }
+ break;
+ }
+
+ return io_domain;
+}
+
static void ipmmu_domain_free(struct iommu_domain *io_domain)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
@@ -536,6 +629,7 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain)
* Free the domain resources. We assume that all devices have already
* been detached.
*/
+ iommu_put_dma_cookie(io_domain);
ipmmu_domain_destroy_context(domain);
free_io_pgtable_ops(domain->iop);
kfree(domain);
@@ -544,15 +638,14 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain)
static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct device *dev)
{
- struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
- struct ipmmu_vmsa_device *mmu = priv->mmu;
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned long flags;
unsigned int i;
int ret = 0;
- if (!priv || !priv->mmu) {
+ if (!mmu) {
dev_err(dev, "Cannot attach to IPMMU\n");
return -ENXIO;
}
@@ -563,6 +656,13 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
/* The domain hasn't been used yet, initialize it. */
domain->mmu = mmu;
ret = ipmmu_domain_init_context(domain);
+ if (ret < 0) {
+ dev_err(dev, "Unable to initialize IPMMU context\n");
+ domain->mmu = NULL;
+ } else {
+ dev_info(dev, "Using IPMMU context %u\n",
+ domain->context_id);
+ }
} else if (domain->mmu != mmu) {
/*
* Something is wrong, we can't attach two devices using
@@ -619,6 +719,14 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
return domain->iop->unmap(domain->iop, iova, size);
}
+static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
+{
+ struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
+
+ if (domain->mmu)
+ ipmmu_tlb_flush_all(domain);
+}
+
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
@@ -633,62 +741,53 @@ static int ipmmu_init_platform_device(struct device *dev,
struct of_phandle_args *args)
{
struct platform_device *ipmmu_pdev;
- struct ipmmu_vmsa_iommu_priv *priv;
ipmmu_pdev = of_find_device_by_node(args->np);
if (!ipmmu_pdev)
return -ENODEV;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->mmu = platform_get_drvdata(ipmmu_pdev);
- priv->dev = dev;
- dev->iommu_fwspec->iommu_priv = priv;
+ dev->iommu_fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
return 0;
}
+static bool ipmmu_slave_whitelist(struct device *dev)
+{
+ /* By default, do not allow use of IPMMU */
+ return false;
+}
+
+static const struct soc_device_attribute soc_r8a7795[] = {
+ { .soc_id = "r8a7795", },
+ { /* sentinel */ }
+};
+
static int ipmmu_of_xlate(struct device *dev,
struct of_phandle_args *spec)
{
+ /* For R-Car Gen3 use a white list to opt-in slave devices */
+ if (soc_device_match(soc_r8a7795) && !ipmmu_slave_whitelist(dev))
+ return -ENODEV;
+
iommu_fwspec_add_ids(dev, spec->args, 1);
/* Initialize once - xlate() will call multiple times */
- if (to_priv(dev))
+ if (to_ipmmu(dev))
return 0;
return ipmmu_init_platform_device(dev, spec);
}
-#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
-
-static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
+static int ipmmu_init_arm_mapping(struct device *dev)
{
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
-
- return __ipmmu_domain_alloc(type);
-}
-
-static int ipmmu_add_device(struct device *dev)
-{
- struct ipmmu_vmsa_device *mmu = NULL;
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct iommu_group *group;
int ret;
- /*
- * Only let through devices that have been verified in xlate()
- */
- if (!to_priv(dev))
- return -ENODEV;
-
/* Create a device group and add the device to it. */
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(dev, "Failed to allocate IOMMU group\n");
- ret = PTR_ERR(group);
- goto error;
+ return PTR_ERR(group);
}
ret = iommu_group_add_device(group, dev);
@@ -696,8 +795,7 @@ static int ipmmu_add_device(struct device *dev)
if (ret < 0) {
dev_err(dev, "Failed to add device to IPMMU group\n");
- group = NULL;
- goto error;
+ return ret;
}
/*
@@ -709,7 +807,6 @@ static int ipmmu_add_device(struct device *dev)
* - Make the mapping size configurable ? We currently use a 2GB mapping
* at a 1GB offset to ensure that NULL VAs will fault.
*/
- mmu = to_priv(dev)->mmu;
if (!mmu->mapping) {
struct dma_iommu_mapping *mapping;
@@ -734,159 +831,73 @@ static int ipmmu_add_device(struct device *dev)
return 0;
error:
- if (mmu)
+ iommu_group_remove_device(dev);
+ if (mmu->mapping)
arm_iommu_release_mapping(mmu->mapping);
- if (!IS_ERR_OR_NULL(group))
- iommu_group_remove_device(dev);
-
return ret;
}
-static void ipmmu_remove_device(struct device *dev)
-{
- arm_iommu_detach_device(dev);
- iommu_group_remove_device(dev);
-}
-
-static const struct iommu_ops ipmmu_ops = {
- .domain_alloc = ipmmu_domain_alloc,
- .domain_free = ipmmu_domain_free,
- .attach_dev = ipmmu_attach_device,
- .detach_dev = ipmmu_detach_device,
- .map = ipmmu_map,
- .unmap = ipmmu_unmap,
- .map_sg = default_iommu_map_sg,
- .iova_to_phys = ipmmu_iova_to_phys,
- .add_device = ipmmu_add_device,
- .remove_device = ipmmu_remove_device,
- .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
- .of_xlate = ipmmu_of_xlate,
-};
-
-#endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */
-
-#ifdef CONFIG_IOMMU_DMA
-
-static DEFINE_SPINLOCK(ipmmu_slave_devices_lock);
-static LIST_HEAD(ipmmu_slave_devices);
-
-static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type)
-{
- struct iommu_domain *io_domain = NULL;
-
- switch (type) {
- case IOMMU_DOMAIN_UNMANAGED:
- io_domain = __ipmmu_domain_alloc(type);
- break;
-
- case IOMMU_DOMAIN_DMA:
- io_domain = __ipmmu_domain_alloc(type);
- if (io_domain)
- iommu_get_dma_cookie(io_domain);
- break;
- }
-
- return io_domain;
-}
-
-static void ipmmu_domain_free_dma(struct iommu_domain *io_domain)
-{
- switch (io_domain->type) {
- case IOMMU_DOMAIN_DMA:
- iommu_put_dma_cookie(io_domain);
- /* fall-through */
- default:
- ipmmu_domain_free(io_domain);
- break;
- }
-}
-
-static int ipmmu_add_device_dma(struct device *dev)
+static int ipmmu_add_device(struct device *dev)
{
struct iommu_group *group;
/*
* Only let through devices that have been verified in xlate()
*/
- if (!to_priv(dev))
+ if (!to_ipmmu(dev))
return -ENODEV;
+ if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
+ return ipmmu_init_arm_mapping(dev);
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
- spin_lock(&ipmmu_slave_devices_lock);
- list_add(&to_priv(dev)->list, &ipmmu_slave_devices);
- spin_unlock(&ipmmu_slave_devices_lock);
+ iommu_group_put(group);
return 0;
}
-static void ipmmu_remove_device_dma(struct device *dev)
+static void ipmmu_remove_device(struct device *dev)
{
- struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
-
- spin_lock(&ipmmu_slave_devices_lock);
- list_del(&priv->list);
- spin_unlock(&ipmmu_slave_devices_lock);
-
+ arm_iommu_detach_device(dev);
iommu_group_remove_device(dev);
}
-static struct device *ipmmu_find_sibling_device(struct device *dev)
-{
- struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev);
- struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL;
- bool found = false;
-
- spin_lock(&ipmmu_slave_devices_lock);
-
- list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) {
- if (priv == sibling_priv)
- continue;
- if (sibling_priv->mmu == priv->mmu) {
- found = true;
- break;
- }
- }
-
- spin_unlock(&ipmmu_slave_devices_lock);
-
- return found ? sibling_priv->dev : NULL;
-}
-
-static struct iommu_group *ipmmu_find_group_dma(struct device *dev)
+static struct iommu_group *ipmmu_find_group(struct device *dev)
{
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct iommu_group *group;
- struct device *sibling;
- sibling = ipmmu_find_sibling_device(dev);
- if (sibling)
- group = iommu_group_get(sibling);
- if (!sibling || IS_ERR(group))
- group = generic_device_group(dev);
+ if (mmu->group)
+ return iommu_group_ref_get(mmu->group);
+
+ group = iommu_group_alloc();
+ if (!IS_ERR(group))
+ mmu->group = group;
return group;
}
static const struct iommu_ops ipmmu_ops = {
- .domain_alloc = ipmmu_domain_alloc_dma,
- .domain_free = ipmmu_domain_free_dma,
+ .domain_alloc = ipmmu_domain_alloc,
+ .domain_free = ipmmu_domain_free,
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
+ .flush_iotlb_all = ipmmu_iotlb_sync,
+ .iotlb_sync = ipmmu_iotlb_sync,
.map_sg = default_iommu_map_sg,
.iova_to_phys = ipmmu_iova_to_phys,
- .add_device = ipmmu_add_device_dma,
- .remove_device = ipmmu_remove_device_dma,
- .device_group = ipmmu_find_group_dma,
+ .add_device = ipmmu_add_device,
+ .remove_device = ipmmu_remove_device,
+ .device_group = ipmmu_find_group,
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
.of_xlate = ipmmu_of_xlate,
};
-#endif /* CONFIG_IOMMU_DMA */
-
/* -----------------------------------------------------------------------------
* Probe/remove and init
*/
@@ -896,10 +907,40 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
unsigned int i;
/* Disable all contexts. */
- for (i = 0; i < 4; ++i)
+ for (i = 0; i < mmu->num_ctx; ++i)
ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
}
+static const struct ipmmu_features ipmmu_features_default = {
+ .use_ns_alias_offset = true,
+ .has_cache_leaf_nodes = false,
+ .number_of_contexts = 1, /* software only tested with one context */
+ .setup_imbuscr = true,
+ .twobit_imttbcr_sl0 = false,
+};
+
+static const struct ipmmu_features ipmmu_features_r8a7795 = {
+ .use_ns_alias_offset = false,
+ .has_cache_leaf_nodes = true,
+ .number_of_contexts = 8,
+ .setup_imbuscr = false,
+ .twobit_imttbcr_sl0 = true,
+};
+
+static const struct of_device_id ipmmu_of_ids[] = {
+ {
+ .compatible = "renesas,ipmmu-vmsa",
+ .data = &ipmmu_features_default,
+ }, {
+ .compatible = "renesas,ipmmu-r8a7795",
+ .data = &ipmmu_features_r8a7795,
+ }, {
+ /* Terminator */
+ },
+};
+
+MODULE_DEVICE_TABLE(of, ipmmu_of_ids);
+
static int ipmmu_probe(struct platform_device *pdev)
{
struct ipmmu_vmsa_device *mmu;
@@ -917,6 +958,8 @@ static int ipmmu_probe(struct platform_device *pdev)
mmu->num_utlbs = 32;
spin_lock_init(&mmu->lock);
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
+ mmu->features = of_device_get_match_data(&pdev->dev);
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
/* Map I/O memory and request IRQ. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -936,34 +979,71 @@ static int ipmmu_probe(struct platform_device *pdev)
* Offset the registers base unconditionally to point to the non-secure
* alias space for now.
*/
- mmu->base += IM_NS_ALIAS_OFFSET;
+ if (mmu->features->use_ns_alias_offset)
+ mmu->base += IM_NS_ALIAS_OFFSET;
+
+ mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
+ mmu->features->number_of_contexts);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ found\n");
- return irq;
- }
- ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
- dev_name(&pdev->dev), mmu);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
- return ret;
- }
+ /*
+ * Determine if this IPMMU instance is a root device by checking for
+ * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
+ */
+ if (!mmu->features->has_cache_leaf_nodes ||
+ !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
+ mmu->root = mmu;
+ else
+ mmu->root = ipmmu_find_root();
- ipmmu_device_reset(mmu);
+ /*
+ * Wait until the root device has been registered for sure.
+ */
+ if (!mmu->root)
+ return -EPROBE_DEFER;
+
+ /* Root devices have mandatory IRQs */
+ if (ipmmu_is_root(mmu)) {
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no IRQ found\n");
+ return irq;
+ }
- ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
- dev_name(&pdev->dev));
- if (ret)
- return ret;
+ ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
+ dev_name(&pdev->dev), mmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
+ return ret;
+ }
- iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
- iommu_device_set_fwnode(&mmu->iommu, &pdev->dev.of_node->fwnode);
+ ipmmu_device_reset(mmu);
+ }
- ret = iommu_device_register(&mmu->iommu);
- if (ret)
- return ret;
+ /*
+ * Register the IPMMU to the IOMMU subsystem in the following cases:
+ * - R-Car Gen2 IPMMU (all devices registered)
+ * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
+ */
+ if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
+ ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
+ iommu_device_set_fwnode(&mmu->iommu,
+ &pdev->dev.of_node->fwnode);
+
+ ret = iommu_device_register(&mmu->iommu);
+ if (ret)
+ return ret;
+
+#if defined(CONFIG_IOMMU_DMA)
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &ipmmu_ops);
+#endif
+ }
/*
* We can't create the ARM mapping here as it requires the bus to have
@@ -983,20 +1063,13 @@ static int ipmmu_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&mmu->iommu);
iommu_device_unregister(&mmu->iommu);
-#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
arm_iommu_release_mapping(mmu->mapping);
-#endif
ipmmu_device_reset(mmu);
return 0;
}
-static const struct of_device_id ipmmu_of_ids[] = {
- { .compatible = "renesas,ipmmu-vmsa", },
- { }
-};
-
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
@@ -1008,15 +1081,22 @@ static struct platform_driver ipmmu_driver = {
static int __init ipmmu_init(void)
{
+ static bool setup_done;
int ret;
+ if (setup_done)
+ return 0;
+
ret = platform_driver_register(&ipmmu_driver);
if (ret < 0)
return ret;
+#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &ipmmu_ops);
+#endif
+ setup_done = true;
return 0;
}
@@ -1028,6 +1108,19 @@ static void __exit ipmmu_exit(void)
subsys_initcall(ipmmu_init);
module_exit(ipmmu_exit);
+#ifdef CONFIG_IOMMU_DMA
+static int __init ipmmu_vmsa_iommu_of_setup(struct device_node *np)
+{
+ ipmmu_init();
+ return 0;
+}
+
+IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa",
+ ipmmu_vmsa_iommu_of_setup);
+IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795",
+ ipmmu_vmsa_iommu_of_setup);
+#endif
+
MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 16d33ac19db0..f227d73e7bf6 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -392,6 +392,11 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
return unmapsz;
}
+static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
+{
+ mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+}
+
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -491,6 +496,8 @@ static struct iommu_ops mtk_iommu_ops = {
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.map_sg = default_iommu_map_sg,
+ .flush_iotlb_all = mtk_iommu_iotlb_sync,
+ .iotlb_sync = mtk_iommu_iotlb_sync,
.iova_to_phys = mtk_iommu_iova_to_phys,
.add_device = mtk_iommu_add_device,
.remove_device = mtk_iommu_remove_device,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index bc1efbfb9ddf..542930cd183d 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -708,7 +708,7 @@ static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe,
.remove = mtk_iommu_remove,
.driver = {
- .name = "mtk-iommu",
+ .name = "mtk-iommu-v1",
.of_match_table = mtk_iommu_of_ids,
.pm = &mtk_iommu_pm_ops,
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index bd67e1b2c64e..e135ab830ebf 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -2,6 +2,7 @@
* omap iommu: tlb and pagetable primitives
*
* Copyright (C) 2008-2010 Nokia Corporation
+ * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
@@ -71,13 +72,23 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
**/
void omap_iommu_save_ctx(struct device *dev)
{
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- u32 *p = obj->ctx;
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu *obj;
+ u32 *p;
int i;
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- p[i] = iommu_read_reg(obj, i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ if (!arch_data)
+ return;
+
+ while (arch_data->iommu_dev) {
+ obj = arch_data->iommu_dev;
+ p = obj->ctx;
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ p[i] = iommu_read_reg(obj, i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
+ p[i]);
+ }
+ arch_data++;
}
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
@@ -88,13 +99,23 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
**/
void omap_iommu_restore_ctx(struct device *dev)
{
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- u32 *p = obj->ctx;
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu *obj;
+ u32 *p;
int i;
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- iommu_write_reg(obj, p[i], i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ if (!arch_data)
+ return;
+
+ while (arch_data->iommu_dev) {
+ obj = arch_data->iommu_dev;
+ p = obj->ctx;
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ iommu_write_reg(obj, p[i], i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
+ p[i]);
+ }
+ arch_data++;
}
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
@@ -805,7 +826,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
struct iommu_domain *domain = obj->domain;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- if (!omap_domain->iommu_dev)
+ if (!omap_domain->dev)
return IRQ_NONE;
errs = iommu_report_fault(obj, &da);
@@ -893,6 +914,24 @@ static void omap_iommu_detach(struct omap_iommu *obj)
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
}
+static bool omap_iommu_can_register(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
+ return true;
+
+ /*
+ * restrict IOMMU core registration only for processor-port MDMA MMUs
+ * on DRA7 DSPs
+ */
+ if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
+ (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
+ return true;
+
+ return false;
+}
+
static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
struct omap_iommu *obj)
{
@@ -984,19 +1023,22 @@ static int omap_iommu_probe(struct platform_device *pdev)
return err;
platform_set_drvdata(pdev, obj);
- obj->group = iommu_group_alloc();
- if (IS_ERR(obj->group))
- return PTR_ERR(obj->group);
+ if (omap_iommu_can_register(pdev)) {
+ obj->group = iommu_group_alloc();
+ if (IS_ERR(obj->group))
+ return PTR_ERR(obj->group);
- err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name);
- if (err)
- goto out_group;
+ err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
+ obj->name);
+ if (err)
+ goto out_group;
- iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
+ iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
- err = iommu_device_register(&obj->iommu);
- if (err)
- goto out_sysfs;
+ err = iommu_device_register(&obj->iommu);
+ if (err)
+ goto out_sysfs;
+ }
pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
@@ -1018,11 +1060,13 @@ static int omap_iommu_remove(struct platform_device *pdev)
{
struct omap_iommu *obj = platform_get_drvdata(pdev);
- iommu_group_put(obj->group);
- obj->group = NULL;
+ if (obj->group) {
+ iommu_group_put(obj->group);
+ obj->group = NULL;
- iommu_device_sysfs_remove(&obj->iommu);
- iommu_device_unregister(&obj->iommu);
+ iommu_device_sysfs_remove(&obj->iommu);
+ iommu_device_unregister(&obj->iommu);
+ }
omap_iommu_debugfs_remove(obj);
@@ -1068,11 +1112,13 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu *oiommu = omap_domain->iommu_dev;
- struct device *dev = oiommu->dev;
+ struct device *dev = omap_domain->dev;
+ struct omap_iommu_device *iommu;
+ struct omap_iommu *oiommu;
struct iotlb_entry e;
int omap_pgsz;
- u32 ret;
+ u32 ret = -EINVAL;
+ int i;
omap_pgsz = bytes_to_iopgsz(bytes);
if (omap_pgsz < 0) {
@@ -1084,9 +1130,24 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
iotlb_init_entry(&e, da, pa, omap_pgsz);
- ret = omap_iopgtable_store_entry(oiommu, &e);
- if (ret)
- dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
+ iommu = omap_domain->iommus;
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
+ oiommu = iommu->iommu_dev;
+ ret = omap_iopgtable_store_entry(oiommu, &e);
+ if (ret) {
+ dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
+ ret);
+ break;
+ }
+ }
+
+ if (ret) {
+ while (i--) {
+ iommu--;
+ oiommu = iommu->iommu_dev;
+ iopgtable_clear_entry(oiommu, da);
+ }
+ }
return ret;
}
@@ -1095,12 +1156,90 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t size)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu *oiommu = omap_domain->iommu_dev;
- struct device *dev = oiommu->dev;
+ struct device *dev = omap_domain->dev;
+ struct omap_iommu_device *iommu;
+ struct omap_iommu *oiommu;
+ bool error = false;
+ size_t bytes = 0;
+ int i;
dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
- return iopgtable_clear_entry(oiommu, da);
+ iommu = omap_domain->iommus;
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
+ oiommu = iommu->iommu_dev;
+ bytes = iopgtable_clear_entry(oiommu, da);
+ if (!bytes)
+ error = true;
+ }
+
+ /*
+ * simplify return - we are only checking if any of the iommus
+ * reported an error, but not if all of them are unmapping the
+ * same number of entries. This should not occur due to the
+ * mirror programming.
+ */
+ return error ? 0 : bytes;
+}
+
+static int omap_iommu_count(struct device *dev)
+{
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ int count = 0;
+
+ while (arch_data->iommu_dev) {
+ count++;
+ arch_data++;
+ }
+
+ return count;
+}
+
+/* caller should call cleanup if this function fails */
+static int omap_iommu_attach_init(struct device *dev,
+ struct omap_iommu_domain *odomain)
+{
+ struct omap_iommu_device *iommu;
+ int i;
+
+ odomain->num_iommus = omap_iommu_count(dev);
+ if (!odomain->num_iommus)
+ return -EINVAL;
+
+ odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
+ GFP_ATOMIC);
+ if (!odomain->iommus)
+ return -ENOMEM;
+
+ iommu = odomain->iommus;
+ for (i = 0; i < odomain->num_iommus; i++, iommu++) {
+ iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
+ if (!iommu->pgtable)
+ return -ENOMEM;
+
+ /*
+ * should never fail, but please keep this around to ensure
+ * we keep the hardware happy
+ */
+ if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
+ IOPGD_TABLE_SIZE)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
+{
+ int i;
+ struct omap_iommu_device *iommu = odomain->iommus;
+
+ for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
+ kfree(iommu->pgtable);
+
+ kfree(odomain->iommus);
+ odomain->num_iommus = 0;
+ odomain->iommus = NULL;
}
static int
@@ -1108,8 +1247,10 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_device *iommu;
struct omap_iommu *oiommu;
int ret = 0;
+ int i;
if (!arch_data || !arch_data->iommu_dev) {
dev_err(dev, "device doesn't have an associated iommu\n");
@@ -1118,26 +1259,49 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
spin_lock(&omap_domain->lock);
- /* only a single device is supported per domain for now */
- if (omap_domain->iommu_dev) {
+ /* only a single client device can be attached to a domain */
+ if (omap_domain->dev) {
dev_err(dev, "iommu domain is already attached\n");
ret = -EBUSY;
goto out;
}
- oiommu = arch_data->iommu_dev;
-
- /* get a handle to and enable the omap iommu */
- ret = omap_iommu_attach(oiommu, omap_domain->pgtable);
+ ret = omap_iommu_attach_init(dev, omap_domain);
if (ret) {
- dev_err(dev, "can't get omap iommu: %d\n", ret);
- goto out;
+ dev_err(dev, "failed to allocate required iommu data %d\n",
+ ret);
+ goto init_fail;
+ }
+
+ iommu = omap_domain->iommus;
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
+ /* configure and enable the omap iommu */
+ oiommu = arch_data->iommu_dev;
+ ret = omap_iommu_attach(oiommu, iommu->pgtable);
+ if (ret) {
+ dev_err(dev, "can't get omap iommu: %d\n", ret);
+ goto attach_fail;
+ }
+
+ oiommu->domain = domain;
+ iommu->iommu_dev = oiommu;
}
- omap_domain->iommu_dev = oiommu;
omap_domain->dev = dev;
- oiommu->domain = domain;
+ goto out;
+
+attach_fail:
+ while (i--) {
+ iommu--;
+ arch_data--;
+ oiommu = iommu->iommu_dev;
+ omap_iommu_detach(oiommu);
+ iommu->iommu_dev = NULL;
+ oiommu->domain = NULL;
+ }
+init_fail:
+ omap_iommu_detach_fini(omap_domain);
out:
spin_unlock(&omap_domain->lock);
return ret;
@@ -1146,21 +1310,40 @@ out:
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev)
{
- struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu_device *iommu = omap_domain->iommus;
+ struct omap_iommu *oiommu;
+ int i;
+
+ if (!omap_domain->dev) {
+ dev_err(dev, "domain has no attached device\n");
+ return;
+ }
/* only a single device is supported per domain for now */
- if (omap_domain->iommu_dev != oiommu) {
- dev_err(dev, "invalid iommu device\n");
+ if (omap_domain->dev != dev) {
+ dev_err(dev, "invalid attached device\n");
return;
}
- iopgtable_clear_entry_all(oiommu);
+ /*
+ * cleanup in the reverse order of attachment - this addresses
+ * any h/w dependencies between multiple instances, if any
+ */
+ iommu += (omap_domain->num_iommus - 1);
+ arch_data += (omap_domain->num_iommus - 1);
+ for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
+ oiommu = iommu->iommu_dev;
+ iopgtable_clear_entry_all(oiommu);
+
+ omap_iommu_detach(oiommu);
+ iommu->iommu_dev = NULL;
+ oiommu->domain = NULL;
+ }
- omap_iommu_detach(oiommu);
+ omap_iommu_detach_fini(omap_domain);
- omap_domain->iommu_dev = NULL;
omap_domain->dev = NULL;
- oiommu->domain = NULL;
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
@@ -1182,18 +1365,7 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain)
- goto out;
-
- omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
- if (!omap_domain->pgtable)
- goto fail_nomem;
-
- /*
- * should never fail, but please keep this around to ensure
- * we keep the hardware happy
- */
- if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
- goto fail_align;
+ return NULL;
spin_lock_init(&omap_domain->lock);
@@ -1202,13 +1374,6 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
omap_domain->domain.geometry.force_aperture = true;
return &omap_domain->domain;
-
-fail_align:
- kfree(omap_domain->pgtable);
-fail_nomem:
- kfree(omap_domain);
-out:
- return NULL;
}
static void omap_iommu_domain_free(struct iommu_domain *domain)
@@ -1219,10 +1384,9 @@ static void omap_iommu_domain_free(struct iommu_domain *domain)
* An iommu device is still attached
* (currently, only one device can be attached) ?
*/
- if (omap_domain->iommu_dev)
+ if (omap_domain->dev)
_omap_iommu_detach_dev(omap_domain, omap_domain->dev);
- kfree(omap_domain->pgtable);
kfree(omap_domain);
}
@@ -1230,11 +1394,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t da)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu *oiommu = omap_domain->iommu_dev;
+ struct omap_iommu_device *iommu = omap_domain->iommus;
+ struct omap_iommu *oiommu = iommu->iommu_dev;
struct device *dev = oiommu->dev;
u32 *pgd, *pte;
phys_addr_t ret = 0;
+ /*
+ * all the iommus within the domain will have identical programming,
+ * so perform the lookup using just the first iommu
+ */
iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
if (pte) {
@@ -1260,11 +1429,12 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
static int omap_iommu_add_device(struct device *dev)
{
- struct omap_iommu_arch_data *arch_data;
+ struct omap_iommu_arch_data *arch_data, *tmp;
struct omap_iommu *oiommu;
struct iommu_group *group;
struct device_node *np;
struct platform_device *pdev;
+ int num_iommus, i;
int ret;
/*
@@ -1276,36 +1446,57 @@ static int omap_iommu_add_device(struct device *dev)
if (!dev->of_node)
return 0;
- np = of_parse_phandle(dev->of_node, "iommus", 0);
- if (!np)
+ /*
+ * retrieve the count of IOMMU nodes using phandle size as element size
+ * since #iommu-cells = 0 for OMAP
+ */
+ num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
+ sizeof(phandle));
+ if (num_iommus < 0)
return 0;
- pdev = of_find_device_by_node(np);
- if (WARN_ON(!pdev)) {
- of_node_put(np);
- return -EINVAL;
- }
+ arch_data = kzalloc((num_iommus + 1) * sizeof(*arch_data), GFP_KERNEL);
+ if (!arch_data)
+ return -ENOMEM;
- oiommu = platform_get_drvdata(pdev);
- if (!oiommu) {
- of_node_put(np);
- return -EINVAL;
- }
+ for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
+ np = of_parse_phandle(dev->of_node, "iommus", i);
+ if (!np) {
+ kfree(arch_data);
+ return -EINVAL;
+ }
+
+ pdev = of_find_device_by_node(np);
+ if (WARN_ON(!pdev)) {
+ of_node_put(np);
+ kfree(arch_data);
+ return -EINVAL;
+ }
+
+ oiommu = platform_get_drvdata(pdev);
+ if (!oiommu) {
+ of_node_put(np);
+ kfree(arch_data);
+ return -EINVAL;
+ }
+
+ tmp->iommu_dev = oiommu;
- arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
- if (!arch_data) {
of_node_put(np);
- return -ENOMEM;
}
+ /*
+ * use the first IOMMU alone for the sysfs device linking.
+ * TODO: Evaluate if a single iommu_group needs to be
+ * maintained for both IOMMUs
+ */
+ oiommu = arch_data->iommu_dev;
ret = iommu_device_link(&oiommu->iommu, dev);
if (ret) {
kfree(arch_data);
- of_node_put(np);
return ret;
}
- arch_data->iommu_dev = oiommu;
dev->archdata.iommu = arch_data;
/*
@@ -1321,8 +1512,6 @@ static int omap_iommu_add_device(struct device *dev)
}
iommu_group_put(group);
- of_node_put(np);
-
return 0;
}
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index a675af29a6ec..1703159ef5af 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -29,17 +29,26 @@ struct iotlb_entry {
};
/**
+ * struct omap_iommu_device - omap iommu device data
+ * @pgtable: page table used by an omap iommu attached to a domain
+ * @iommu_dev: pointer to store an omap iommu instance attached to a domain
+ */
+struct omap_iommu_device {
+ u32 *pgtable;
+ struct omap_iommu *iommu_dev;
+};
+
+/**
* struct omap_iommu_domain - omap iommu domain
- * @pgtable: the page table
- * @iommu_dev: an omap iommu device attached to this domain. only a single
- * iommu device can be attached for now.
+ * @num_iommus: number of iommus in this domain
+ * @iommus: omap iommu device data for all iommus in this domain
* @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
* @domain: generic domain handle used by iommu core code
*/
struct omap_iommu_domain {
- u32 *pgtable;
- struct omap_iommu *iommu_dev;
+ u32 num_iommus;
+ struct omap_iommu_device *iommus;
struct device *dev;
spinlock_t lock;
struct iommu_domain domain;
@@ -97,17 +106,6 @@ struct iotlb_lock {
short vict;
};
-/**
- * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
- * @dev: iommu client device
- */
-static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
-{
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
-
- return arch_data->iommu_dev;
-}
-
/*
* MMU Register offsets
*/
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index c8a587d034b0..e07f02d00c68 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -66,6 +66,7 @@ struct qcom_iommu_ctx {
void __iomem *base;
bool secure_init;
u8 asid; /* asid and ctx bank # are 1:1 */
+ struct iommu_domain *domain;
};
struct qcom_iommu_domain {
@@ -194,12 +195,15 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
- dev_err_ratelimited(ctx->dev,
- "Unhandled context fault: fsr=0x%x, "
- "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
- fsr, iova, fsynr, ctx->asid);
+ if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
+ dev_err_ratelimited(ctx->dev,
+ "Unhandled context fault: fsr=0x%x, "
+ "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
+ fsr, iova, fsynr, ctx->asid);
+ }
iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
+ iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE);
return IRQ_HANDLED;
}
@@ -274,12 +278,14 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
- SCTLR_M | SCTLR_S1_ASIDPNE;
+ SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG;
if (IS_ENABLED(CONFIG_BIG_ENDIAN))
reg |= SCTLR_E;
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
+
+ ctx->domain = domain;
}
mutex_unlock(&qcom_domain->init_mutex);
@@ -395,6 +401,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
+
+ ctx->domain = NULL;
}
pm_runtime_put_sync(qcom_iommu->dev);
@@ -443,6 +451,19 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
+{
+ struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
+ struct io_pgtable, ops);
+ if (!qcom_domain->pgtbl_ops)
+ return;
+
+ pm_runtime_get_sync(qcom_domain->iommu->dev);
+ qcom_iommu_tlb_sync(pgtable->cookie);
+ pm_runtime_put_sync(qcom_domain->iommu->dev);
+}
+
static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -570,6 +591,8 @@ static const struct iommu_ops qcom_iommu_ops = {
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
.map_sg = default_iommu_map_sg,
+ .flush_iotlb_all = qcom_iommu_iotlb_sync,
+ .iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
.add_device = qcom_iommu_add_device,
.remove_device = qcom_iommu_remove_device,
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 0e2f31f9032b..22d4db302c1c 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IOMMU API for s390 PCI devices
*
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 9d8a1dd2e2c2..c70476b34a53 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -1,3 +1,5 @@
+menu "IRQ chip support"
+
config IRQCHIP
def_bool y
depends on OF_IRQ
@@ -39,8 +41,15 @@ config ARM_GIC_V3
config ARM_GIC_V3_ITS
bool
+ select GENERIC_MSI_IRQ_DOMAIN
+ default ARM_GIC_V3
+
+config ARM_GIC_V3_ITS_PCI
+ bool
+ depends on ARM_GIC_V3_ITS
depends on PCI
depends on PCI_MSI
+ default ARM_GIC_V3_ITS
config ARM_NVIC
bool
@@ -151,6 +160,9 @@ config CLPS711X_IRQCHIP
select SPARSE_IRQ
default y
+config OMPIC
+ bool
+
config OR1K_PIC
bool
select IRQ_DOMAIN
@@ -304,6 +316,7 @@ config EZNPS_GIC
config STM32_EXTI
bool
select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
config QCOM_IRQ_COMBINER
bool "QCOM IRQ combiner support"
@@ -321,3 +334,13 @@ config IRQ_UNIPHIER_AIDET
select IRQ_DOMAIN_HIERARCHY
help
Support for the UniPhier AIDET (ARM Interrupt Detector).
+
+config MESON_IRQ_GPIO
+ bool "Meson GPIO Interrupt Multiplexer"
+ depends on ARCH_MESON
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support Meson SoC Family GPIO Interrupt Multiplexer
+
+endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 845abc107ad5..d2df34a54d38 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IRQCHIP) += irqchip.o
obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o
@@ -17,6 +18,7 @@ obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
obj-$(CONFIG_METAG) += irq-metag-ext.o
obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
+obj-$(CONFIG_OMPIC) += irq-ompic.o
obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o
@@ -28,7 +30,8 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
@@ -79,3 +82,5 @@ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
+obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
+obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index 815b88dd18f2..f20200af0992 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -76,8 +76,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
return -ENOMEM;
i2c_ic->base = of_iomap(node, 0);
- if (IS_ERR(i2c_ic->base)) {
- ret = PTR_ERR(i2c_ic->base);
+ if (!i2c_ic->base) {
+ ret = -ENOMEM;
goto err_free_ic;
}
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index dc8c1e3eafe7..667b9e14b032 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -19,62 +19,9 @@
#include <linux/of_irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
-#include <asm/exception.h>
-
-#define LOCAL_CONTROL 0x000
-#define LOCAL_PRESCALER 0x008
+#include <linux/irqchip/irq-bcm2836.h>
-/*
- * The low 2 bits identify the CPU that the GPU IRQ goes to, and the
- * next 2 bits identify the CPU that the GPU FIQ goes to.
- */
-#define LOCAL_GPU_ROUTING 0x00c
-/* When setting bits 0-3, enables PMU interrupts on that CPU. */
-#define LOCAL_PM_ROUTING_SET 0x010
-/* When setting bits 0-3, disables PMU interrupts on that CPU. */
-#define LOCAL_PM_ROUTING_CLR 0x014
-/*
- * The low 4 bits of this are the CPU's timer IRQ enables, and the
- * next 4 bits are the CPU's timer FIQ enables (which override the IRQ
- * bits).
- */
-#define LOCAL_TIMER_INT_CONTROL0 0x040
-/*
- * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and
- * the next 4 bits are the CPU's per-mailbox FIQ enables (which
- * override the IRQ bits).
- */
-#define LOCAL_MAILBOX_INT_CONTROL0 0x050
-/*
- * The CPU's interrupt status register. Bits are defined by the the
- * LOCAL_IRQ_* bits below.
- */
-#define LOCAL_IRQ_PENDING0 0x060
-/* Same status bits as above, but for FIQ. */
-#define LOCAL_FIQ_PENDING0 0x070
-/*
- * Mailbox write-to-set bits. There are 16 mailboxes, 4 per CPU, and
- * these bits are organized by mailbox number and then CPU number. We
- * use mailbox 0 for IPIs. The mailbox's interrupt is raised while
- * any bit is set.
- */
-#define LOCAL_MAILBOX0_SET0 0x080
-#define LOCAL_MAILBOX3_SET0 0x08c
-/* Mailbox write-to-clear bits. */
-#define LOCAL_MAILBOX0_CLR0 0x0c0
-#define LOCAL_MAILBOX3_CLR0 0x0cc
-
-#define LOCAL_IRQ_CNTPSIRQ 0
-#define LOCAL_IRQ_CNTPNSIRQ 1
-#define LOCAL_IRQ_CNTHPIRQ 2
-#define LOCAL_IRQ_CNTVIRQ 3
-#define LOCAL_IRQ_MAILBOX0 4
-#define LOCAL_IRQ_MAILBOX1 5
-#define LOCAL_IRQ_MAILBOX2 6
-#define LOCAL_IRQ_MAILBOX3 7
-#define LOCAL_IRQ_GPU_FAST 8
-#define LOCAL_IRQ_PMU_FAST 9
-#define LAST_IRQ LOCAL_IRQ_PMU_FAST
+#include <asm/exception.h>
struct bcm2836_arm_irqchip_intc {
struct irq_domain *domain;
@@ -215,24 +162,6 @@ static int bcm2836_cpu_dying(unsigned int cpu)
cpu);
return 0;
}
-
-#ifdef CONFIG_ARM
-static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
- struct task_struct *idle)
-{
- unsigned long secondary_startup_phys =
- (unsigned long)virt_to_phys((void *)secondary_startup);
-
- writel(secondary_startup_phys,
- intc.base + LOCAL_MAILBOX3_SET0 + 16 * cpu);
-
- return 0;
-}
-
-static const struct smp_operations bcm2836_smp_ops __initconst = {
- .smp_boot_secondary = bcm2836_smp_boot_secondary,
-};
-#endif
#endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
@@ -249,10 +178,6 @@ bcm2836_arm_irqchip_smp_init(void)
bcm2836_cpu_dying);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
-
-#ifdef CONFIG_ARM
- smp_set_ops(&bcm2836_smp_ops);
-#endif
#endif
}
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index b009b916a292..691d20eb0bec 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -1,7 +1,7 @@
/*
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
*
- * Copyright (C) 2014 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,35 +31,82 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-/* Register offsets in the L2 interrupt controller */
-#define CPU_STATUS 0x00
-#define CPU_SET 0x04
-#define CPU_CLEAR 0x08
-#define CPU_MASK_STATUS 0x0c
-#define CPU_MASK_SET 0x10
-#define CPU_MASK_CLEAR 0x14
+struct brcmstb_intc_init_params {
+ irq_flow_handler_t handler;
+ int cpu_status;
+ int cpu_clear;
+ int cpu_mask_status;
+ int cpu_mask_set;
+ int cpu_mask_clear;
+};
+
+/* Register offsets in the L2 latched interrupt controller */
+static const struct brcmstb_intc_init_params l2_edge_intc_init = {
+ .handler = handle_edge_irq,
+ .cpu_status = 0x00,
+ .cpu_clear = 0x08,
+ .cpu_mask_status = 0x0c,
+ .cpu_mask_set = 0x10,
+ .cpu_mask_clear = 0x14
+};
+
+/* Register offsets in the L2 level interrupt controller */
+static const struct brcmstb_intc_init_params l2_lvl_intc_init = {
+ .handler = handle_level_irq,
+ .cpu_status = 0x00,
+ .cpu_clear = -1, /* Register not present */
+ .cpu_mask_status = 0x04,
+ .cpu_mask_set = 0x08,
+ .cpu_mask_clear = 0x0C
+};
/* L2 intc private data structure */
struct brcmstb_l2_intc_data {
- int parent_irq;
- void __iomem *base;
struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ int status_offset;
+ int mask_offset;
bool can_wake;
u32 saved_mask; /* for suspend/resume */
};
+/**
+ * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
+ * @d: irq_data
+ *
+ * Chip has separate enable/disable registers instead of a single mask
+ * register and pending interrupt is acknowledged by setting a bit.
+ *
+ * Note: This function is generic and could easily be added to the
+ * generic irqchip implementation if there ever becomes a will to do so.
+ * Perhaps with a name like irq_gc_mask_disable_and_ack_set().
+ *
+ * e.g.: https://patchwork.kernel.org/patch/9831047/
+ */
+static void brcmstb_l2_mask_and_ack(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(gc, mask, ct->regs.disable);
+ *ct->mask_cache &= ~mask;
+ irq_reg_writel(gc, mask, ct->regs.ack);
+ irq_gc_unlock(gc);
+}
+
static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
{
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
- struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int irq;
u32 status;
chained_irq_enter(chip, desc);
- status = irq_reg_readl(gc, CPU_STATUS) &
- ~(irq_reg_readl(gc, CPU_MASK_STATUS));
+ status = irq_reg_readl(b->gc, b->status_offset) &
+ ~(irq_reg_readl(b->gc, b->mask_offset));
if (status == 0) {
raw_spin_lock(&desc->lock);
@@ -70,10 +117,8 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
do {
irq = ffs(status) - 1;
- /* ack at our level */
- irq_reg_writel(gc, 1 << irq, CPU_CLEAR);
status &= ~(1 << irq);
- generic_handle_irq(irq_find_mapping(b->domain, irq));
+ generic_handle_irq(irq_linear_revmap(b->domain, irq));
} while (status);
out:
chained_irq_exit(chip, desc);
@@ -82,16 +127,17 @@ out:
static void brcmstb_l2_intc_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
irq_gc_lock(gc);
/* Save the current mask */
- b->saved_mask = irq_reg_readl(gc, CPU_MASK_STATUS);
+ b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
if (b->can_wake) {
/* Program the wakeup mask */
- irq_reg_writel(gc, ~gc->wake_active, CPU_MASK_SET);
- irq_reg_writel(gc, gc->wake_active, CPU_MASK_CLEAR);
+ irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
+ irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
}
irq_gc_unlock(gc);
}
@@ -99,49 +145,56 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
static void brcmstb_l2_intc_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
irq_gc_lock(gc);
- /* Clear unmasked non-wakeup interrupts */
- irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, CPU_CLEAR);
+ if (ct->chip.irq_ack) {
+ /* Clear unmasked non-wakeup interrupts */
+ irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
+ ct->regs.ack);
+ }
/* Restore the saved mask */
- irq_reg_writel(gc, b->saved_mask, CPU_MASK_SET);
- irq_reg_writel(gc, ~b->saved_mask, CPU_MASK_CLEAR);
+ irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
+ irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
irq_gc_unlock(gc);
}
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
- struct device_node *parent)
+ struct device_node *parent,
+ const struct brcmstb_intc_init_params
+ *init_params)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct brcmstb_l2_intc_data *data;
- struct irq_chip_generic *gc;
struct irq_chip_type *ct;
int ret;
unsigned int flags;
+ int parent_irq;
+ void __iomem *base;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->base = of_iomap(np, 0);
- if (!data->base) {
+ base = of_iomap(np, 0);
+ if (!base) {
pr_err("failed to remap intc L2 registers\n");
ret = -ENOMEM;
goto out_free;
}
/* Disable all interrupts by default */
- writel(0xffffffff, data->base + CPU_MASK_SET);
+ writel(0xffffffff, base + init_params->cpu_mask_set);
/* Wakeup interrupts may be retained from S5 (cold boot) */
data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake");
- if (!data->can_wake)
- writel(0xffffffff, data->base + CPU_CLEAR);
+ if (!data->can_wake && (init_params->cpu_clear >= 0))
+ writel(0xffffffff, base + init_params->cpu_clear);
- data->parent_irq = irq_of_parse_and_map(np, 0);
- if (!data->parent_irq) {
+ parent_irq = irq_of_parse_and_map(np, 0);
+ if (!parent_irq) {
pr_err("failed to find parent interrupt\n");
ret = -EINVAL;
goto out_unmap;
@@ -163,29 +216,39 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
/* Allocate a single Generic IRQ chip for this node */
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
- np->full_name, handle_edge_irq, clr, 0, flags);
+ np->full_name, init_params->handler, clr, 0, flags);
if (ret) {
pr_err("failed to allocate generic irq chip\n");
goto out_free_domain;
}
/* Set the IRQ chaining logic */
- irq_set_chained_handler_and_data(data->parent_irq,
+ irq_set_chained_handler_and_data(parent_irq,
brcmstb_l2_intc_irq_handle, data);
- gc = irq_get_domain_generic_chip(data->domain, 0);
- gc->reg_base = data->base;
- gc->private = data;
- ct = gc->chip_types;
-
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->regs.ack = CPU_CLEAR;
+ data->gc = irq_get_domain_generic_chip(data->domain, 0);
+ data->gc->reg_base = base;
+ data->gc->private = data;
+ data->status_offset = init_params->cpu_status;
+ data->mask_offset = init_params->cpu_mask_status;
+
+ ct = data->gc->chip_types;
+
+ if (init_params->cpu_clear >= 0) {
+ ct->regs.ack = init_params->cpu_clear;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
+ } else {
+ /* No Ack - but still slightly more efficient to define this */
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+ }
ct->chip.irq_mask = irq_gc_mask_disable_reg;
- ct->regs.disable = CPU_MASK_SET;
+ ct->regs.disable = init_params->cpu_mask_set;
+ ct->regs.mask = init_params->cpu_mask_status;
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
- ct->regs.enable = CPU_MASK_CLEAR;
+ ct->regs.enable = init_params->cpu_mask_clear;
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
@@ -195,21 +258,35 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
/* This IRQ chip can wake the system, set all child interrupts
* in wake_enabled mask
*/
- gc->wake_enabled = 0xffffffff;
+ data->gc->wake_enabled = 0xffffffff;
ct->chip.irq_set_wake = irq_gc_set_wake;
}
pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
- data->base, data->parent_irq);
+ base, parent_irq);
return 0;
out_free_domain:
irq_domain_remove(data->domain);
out_unmap:
- iounmap(data->base);
+ iounmap(base);
out_free:
kfree(data);
return ret;
}
-IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init);
+
+int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
+}
+IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
+
+int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
+}
+IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc",
+ brcmstb_l2_lvl_intc_of_init);
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index cd2dc8bbbe9c..0bf98425dca5 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* irqchip for the Faraday Technology FTINTC010 Copyright (C) 2017 Linus
* Walleij <linus.walleij@linaro.org>
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 9ae71804b5dd..30017df5b54c 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -40,8 +40,9 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
for (; quirks->desc; quirks++) {
if (quirks->iidr != (quirks->mask & iidr))
continue;
- quirks->init(data);
- pr_info("GIC: enabling workaround for %s\n", quirks->desc);
+ if (quirks->init(data))
+ pr_info("GIC: enabling workaround for %s\n",
+ quirks->desc);
}
}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 205e5fddf6da..3919cd7c5285 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -23,7 +23,7 @@
struct gic_quirk {
const char *desc;
- void (*init)(void *data);
+ bool (*init)(void *data);
u32 iidr;
u32 mask;
};
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index 18d58d2b4ffe..b4c1924f0255 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Special GIC quirks for the ARM RealView
* Copyright (C) 2015 Linus Walleij
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e88395605e32..4039e64cd342 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -83,6 +83,8 @@ struct its_baser {
u32 psz;
};
+struct its_device;
+
/*
* The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the
@@ -97,12 +99,18 @@ struct its_node {
struct its_cmd_block *cmd_write;
struct its_baser tables[GITS_BASER_NR_REGS];
struct its_collection *collections;
+ struct fwnode_handle *fwnode_handle;
+ u64 (*get_msi_base)(struct its_device *its_dev);
struct list_head its_device_list;
u64 flags;
+ unsigned long list_nr;
u32 ite_size;
u32 device_ids;
int numa_node;
+ unsigned int msi_domain_flags;
+ u32 pre_its_base; /* for Socionext Synquacer */
bool is_v4;
+ int vlpi_redist_offset;
};
#define ITS_ITT_ALIGN SZ_256
@@ -152,12 +160,6 @@ static DEFINE_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
static struct irq_domain *its_parent;
-/*
- * We have a maximum number of 16 ITSs in the whole system if we're
- * using the ITSList mechanism
- */
-#define ITS_LIST_MAX 16
-
static unsigned long its_list_map;
static u16 vmovp_seq_num;
static DEFINE_RAW_SPINLOCK(vmovp_lock);
@@ -272,10 +274,12 @@ struct its_cmd_block {
#define ITS_CMD_QUEUE_SZ SZ_64K
#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
-typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
+typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
+ struct its_cmd_block *,
struct its_cmd_desc *);
-typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
+typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
+ struct its_cmd_block *,
struct its_cmd_desc *);
static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
@@ -379,7 +383,8 @@ static inline void its_fixup_cmd(struct its_cmd_block *cmd)
cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
}
-static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapd_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
unsigned long itt_addr;
@@ -399,7 +404,8 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
return NULL;
}
-static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapc_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_MAPC);
@@ -412,7 +418,8 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
return desc->its_mapc_cmd.col;
}
-static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapti_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -431,7 +438,8 @@ static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_movi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -449,7 +457,8 @@ static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_discard_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -466,7 +475,8 @@ static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_inv_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -483,7 +493,8 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_int_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -500,7 +511,8 @@ static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_clear_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -517,7 +529,8 @@ static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_invall_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_INVALL);
@@ -528,7 +541,8 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
return NULL;
}
-static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_VINVALL);
@@ -539,17 +553,20 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
return desc->its_vinvall_cmd.vpe;
}
-static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
unsigned long vpt_addr;
+ u64 target;
vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+ target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
its_encode_cmd(cmd, GITS_CMD_VMAPP);
its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
- its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
+ its_encode_target(cmd, target);
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
@@ -558,7 +575,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
return desc->its_vmapp_cmd.vpe;
}
-static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
u32 db;
@@ -580,7 +598,8 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
return desc->its_vmapti_cmd.vpe;
}
-static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
u32 db;
@@ -602,14 +621,18 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
return desc->its_vmovi_cmd.vpe;
}
-static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ u64 target;
+
+ target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
its_encode_cmd(cmd, GITS_CMD_VMOVP);
its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
- its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
+ its_encode_target(cmd, target);
its_fixup_cmd(cmd);
@@ -688,9 +711,9 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
dsb(ishst);
}
-static void its_wait_for_range_completion(struct its_node *its,
- struct its_cmd_block *from,
- struct its_cmd_block *to)
+static int its_wait_for_range_completion(struct its_node *its,
+ struct its_cmd_block *from,
+ struct its_cmd_block *to)
{
u64 rd_idx, from_idx, to_idx;
u32 count = 1000000; /* 1s! */
@@ -711,12 +734,15 @@ static void its_wait_for_range_completion(struct its_node *its,
count--;
if (!count) {
- pr_err_ratelimited("ITS queue timeout\n");
- return;
+ pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
+ from_idx, to_idx, rd_idx);
+ return -1;
}
cpu_relax();
udelay(1);
}
+
+ return 0;
}
/* Warning, macro hell follows */
@@ -736,7 +762,7 @@ void name(struct its_node *its, \
raw_spin_unlock_irqrestore(&its->lock, flags); \
return; \
} \
- sync_obj = builder(cmd, desc); \
+ sync_obj = builder(its, cmd, desc); \
its_flush_cmd(its, cmd); \
\
if (sync_obj) { \
@@ -744,7 +770,7 @@ void name(struct its_node *its, \
if (!sync_cmd) \
goto post; \
\
- buildfn(sync_cmd, sync_obj); \
+ buildfn(its, sync_cmd, sync_obj); \
its_flush_cmd(its, sync_cmd); \
} \
\
@@ -752,10 +778,12 @@ post: \
next_cmd = its_post_commands(its); \
raw_spin_unlock_irqrestore(&its->lock, flags); \
\
- its_wait_for_range_completion(its, cmd, next_cmd); \
+ if (its_wait_for_range_completion(its, cmd, next_cmd)) \
+ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
}
-static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
+static void its_build_sync_cmd(struct its_node *its,
+ struct its_cmd_block *sync_cmd,
struct its_collection *sync_col)
{
its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
@@ -767,7 +795,8 @@ static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
struct its_collection, its_build_sync_cmd)
-static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
+static void its_build_vsync_cmd(struct its_node *its,
+ struct its_cmd_block *sync_cmd,
struct its_vpe *sync_vpe)
{
its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
@@ -899,21 +928,16 @@ static void its_send_vmovi(struct its_device *dev, u32 id)
its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
}
-static void its_send_vmapp(struct its_vpe *vpe, bool valid)
+static void its_send_vmapp(struct its_node *its,
+ struct its_vpe *vpe, bool valid)
{
struct its_cmd_desc desc;
- struct its_node *its;
desc.its_vmapp_cmd.vpe = vpe;
desc.its_vmapp_cmd.valid = valid;
+ desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
- list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
- continue;
-
- desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
- its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
- }
+ its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
}
static void its_send_vmovp(struct its_vpe *vpe)
@@ -951,6 +975,9 @@ static void its_send_vmovp(struct its_vpe *vpe)
if (!its->is_v4)
continue;
+ if (!vpe->its_vm->vlpi_count[its->list_nr])
+ continue;
+
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
@@ -958,18 +985,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
-static void its_send_vinvall(struct its_vpe *vpe)
+static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
{
struct its_cmd_desc desc;
- struct its_node *its;
desc.its_vinvall_cmd.vpe = vpe;
-
- list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
- continue;
- its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
- }
+ its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
}
/*
@@ -991,9 +1012,15 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
if (irqd_is_forwarded_to_vcpu(d)) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
prop_page = its_dev->event_map.vm->vprop_page;
- hwirq = its_dev->event_map.vlpi_maps[event].vintid;
+ map = &its_dev->event_map.vlpi_maps[event];
+ hwirq = map->vintid;
+
+ /* Remember the updated property */
+ map->properties &= ~clr;
+ map->properties |= set | LPI_PROP_GROUP1;
} else {
prop_page = gic_rdists->prop_page;
hwirq = d->hwirq;
@@ -1099,6 +1126,13 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return IRQ_SET_MASK_OK_DONE;
}
+static u64 its_irq_get_msi_base(struct its_device *its_dev)
+{
+ struct its_node *its = its_dev->its;
+
+ return its->phys_base + GITS_TRANSLATER;
+}
+
static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1106,7 +1140,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
u64 addr;
its = its_dev->its;
- addr = its->phys_base + GITS_TRANSLATER;
+ addr = its->get_msi_base(its_dev);
msg->address_lo = lower_32_bits(addr);
msg->address_hi = upper_32_bits(addr);
@@ -1133,6 +1167,60 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
return 0;
}
+static void its_map_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ /* Not using the ITS list? Everything is always mapped. */
+ if (!its_list_map)
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ /*
+ * If the VM wasn't mapped yet, iterate over the vpes and get
+ * them mapped now.
+ */
+ vm->vlpi_count[its->list_nr]++;
+
+ if (vm->vlpi_count[its->list_nr] == 1) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ struct its_vpe *vpe = vm->vpes[i];
+ struct irq_data *d = irq_get_irq_data(vpe->irq);
+
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ /* Not using the ITS list? Everything is always mapped. */
+ if (!its_list_map)
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ if (!--vm->vlpi_count[its->list_nr]) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++)
+ its_send_vmapp(its, vm->vpes[i], false);
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1168,12 +1256,23 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
/* Already mapped, move it around */
its_send_vmovi(its_dev, event);
} else {
+ /* Ensure all the VPEs are mapped on this ITS */
+ its_map_vm(its_dev->its, info->map->vm);
+
+ /*
+ * Flag the interrupt as forwarded so that we can
+ * start poking the virtual property table.
+ */
+ irqd_set_forwarded_to_vcpu(d);
+
+ /* Write out the property to the prop table */
+ lpi_write_config(d, 0xff, info->map->properties);
+
/* Drop the physical mapping */
its_send_discard(its_dev, event);
/* and install the virtual one */
its_send_vmapti(its_dev, event);
- irqd_set_forwarded_to_vcpu(d);
/* Increment the number of VLPIs */
its_dev->event_map.nr_vlpis++;
@@ -1229,6 +1328,9 @@ static int its_vlpi_unmap(struct irq_data *d)
LPI_PROP_ENABLED |
LPI_PROP_GROUP1));
+ /* Potentially unmap the VM from this ITS */
+ its_unmap_vm(its_dev->its, its_dev->event_map.vm);
+
/*
* Drop the refcount and make the device available again if
* this was the last VLPI.
@@ -1669,23 +1771,14 @@ static void its_free_tables(struct its_node *its)
static int its_alloc_tables(struct its_node *its)
{
- u64 typer = gic_read_typer(its->base + GITS_TYPER);
- u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_RaWaWb;
u32 psz = SZ_64K;
int err, i;
- if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
- /*
- * erratum 22375: only alloc 8MB table size
- * erratum 24313: ignore memory access type
- */
- cache = GITS_BASER_nCnB;
- ids = 0x14; /* 20 bits, 8MB */
- }
-
- its->device_ids = ids;
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
+ /* erratum 24313: ignore memory access type */
+ cache = GITS_BASER_nCnB;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
struct its_baser *baser = its->tables + i;
@@ -2209,8 +2302,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return 0;
}
-static void its_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d)
+static int its_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool early)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
@@ -2228,6 +2321,7 @@ static void its_irq_domain_activate(struct irq_domain *domain,
/* Map the GIC IRQ and event to the device */
its_send_mapti(its_dev, d->hwirq, event);
+ return 0;
}
static void its_irq_domain_deactivate(struct irq_domain *domain,
@@ -2394,6 +2488,8 @@ static int its_vpe_set_affinity(struct irq_data *d,
its_vpe_db_proxy_move(vpe, from, cpu);
}
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
return IRQ_SET_MASK_OK_DONE;
}
@@ -2461,6 +2557,26 @@ static void its_vpe_deschedule(struct its_vpe *vpe)
}
}
+static void its_vpe_invall(struct its_vpe *vpe)
+{
+ struct its_node *its;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
+ continue;
+
+ /*
+ * Sending a VINVALL to a single ITS is enough, as all
+ * we need is to reach the redistributors.
+ */
+ its_send_vinvall(its, vpe);
+ return;
+ }
+}
+
static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
@@ -2476,7 +2592,7 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
return 0;
case INVALL_VPE:
- its_send_vinvall(vpe);
+ its_vpe_invall(vpe);
return 0;
default:
@@ -2701,23 +2817,51 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
return err;
}
-static void its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d)
+static int its_vpe_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool early)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /* If we use the list map, we issue VMAPP on demand... */
+ if (its_list_map)
+ return 0;
/* Map the VPE to the first possible CPU */
vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(vpe, true);
- its_send_vinvall(vpe);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+ }
+
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+
+ return 0;
}
static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /*
+ * If we use the list map, we unmap the VPE once no VLPIs are
+ * associated with the VM.
+ */
+ if (its_list_map)
+ return;
- its_send_vmapp(vpe, false);
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ its_send_vmapp(its, vpe, false);
+ }
}
static const struct irq_domain_ops its_vpe_domain_ops = {
@@ -2760,26 +2904,85 @@ static int its_force_quiescent(void __iomem *base)
}
}
-static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
+static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
{
struct its_node *its = data;
+ /* erratum 22375: only alloc 8MB table size */
+ its->device_ids = 0x14; /* 20 bits, 8MB */
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
+
+ return true;
}
-static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
{
struct its_node *its = data;
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+
+ return true;
}
-static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
{
struct its_node *its = data;
/* On QDF2400, the size of the ITE is 16Bytes */
its->ite_size = 16;
+
+ return true;
+}
+
+static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
+{
+ struct its_node *its = its_dev->its;
+
+ /*
+ * The Socionext Synquacer SoC has a so-called 'pre-ITS',
+ * which maps 32-bit writes targeted at a separate window of
+ * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
+ * with device ID taken from bits [device_id_bits + 1:2] of
+ * the window offset.
+ */
+ return its->pre_its_base + (its_dev->device_id << 2);
+}
+
+static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
+{
+ struct its_node *its = data;
+ u32 pre_its_window[2];
+ u32 ids;
+
+ if (!fwnode_property_read_u32_array(its->fwnode_handle,
+ "socionext,synquacer-pre-its",
+ pre_its_window,
+ ARRAY_SIZE(pre_its_window))) {
+
+ its->pre_its_base = pre_its_window[0];
+ its->get_msi_base = its_irq_get_msi_base_pre_its;
+
+ ids = ilog2(pre_its_window[1]) - 2;
+ if (its->device_ids > ids)
+ its->device_ids = ids;
+
+ /* the pre-ITS breaks isolation, so disable MSI remapping */
+ its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
+ return true;
+ }
+ return false;
+}
+
+static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
+{
+ struct its_node *its = data;
+
+ /*
+ * Hip07 insists on using the wrong address for the VLPI
+ * page. Trick it into doing the right thing...
+ */
+ its->vlpi_redist_offset = SZ_128K;
+ return true;
}
static const struct gic_quirk its_quirks[] = {
@@ -2807,6 +3010,27 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_qdf2400_e0065,
},
#endif
+#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
+ {
+ /*
+ * The Socionext Synquacer SoC incorporates ARM's own GIC-500
+ * implementation, but with a 'pre-ITS' added that requires
+ * special handling in software.
+ */
+ .desc = "ITS: Socionext Synquacer pre-ITS",
+ .iidr = 0x0001143b,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_socionext_synquacer,
+ },
+#endif
+#ifdef CONFIG_HISILICON_ERRATUM_161600802
+ {
+ .desc = "ITS: Hip07 erratum 161600802",
+ .iidr = 0x00000004,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_hip07_161600802,
+ },
+#endif
{
}
};
@@ -2835,7 +3059,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
inner_domain->parent = its_parent;
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
+ inner_domain->flags |= its->msi_domain_flags;
info->ops = &its_msi_domain_ops;
info->data = its;
inner_domain->host_data = info;
@@ -2896,8 +3120,8 @@ static int __init its_compute_its_list_map(struct resource *res,
* locking. Should this change, we should address
* this.
*/
- its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
- if (its_number >= ITS_LIST_MAX) {
+ its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
+ if (its_number >= GICv4_ITS_LIST_MAX) {
pr_err("ITS@%pa: No ITSList entry available!\n",
&res->start);
return -EINVAL;
@@ -2965,6 +3189,7 @@ static int __init its_probe_one(struct resource *res,
its->base = its_base;
its->phys_base = res->start;
its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
+ its->device_ids = GITS_TYPER_DEVBITS(typer);
its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
if (its->is_v4) {
if (!(typer & GITS_TYPER_VMOVP)) {
@@ -2972,6 +3197,8 @@ static int __init its_probe_one(struct resource *res,
if (err < 0)
goto out_free_its;
+ its->list_nr = err;
+
pr_info("ITS@%pa: Using ITS number %d\n",
&res->start, err);
} else {
@@ -2988,6 +3215,9 @@ static int __init its_probe_one(struct resource *res,
goto out_free_its;
}
its->cmd_write = its->cmd_base;
+ its->fwnode_handle = handle;
+ its->get_msi_base = its_irq_get_msi_base;
+ its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
its_enable_quirks(its);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index b5df99c6f680..b56c3e23f0af 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -55,6 +55,7 @@ struct gic_chip_data {
struct irq_domain *domain;
u64 redist_stride;
u32 nr_redist_regions;
+ bool has_rss;
unsigned int irq_nr;
struct partition_desc *ppi_descs[16];
};
@@ -63,7 +64,9 @@ static struct gic_chip_data gic_data __read_mostly;
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
static struct gic_kvm_info gic_v3_kvm_info;
+static DEFINE_PER_CPU(bool, has_rss);
+#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
@@ -526,6 +529,10 @@ static void gic_update_vlpi_properties(void)
static void gic_cpu_sys_reg_init(void)
{
+ int i, cpu = smp_processor_id();
+ u64 mpidr = cpu_logical_map(cpu);
+ u64 need_rss = MPIDR_RS(mpidr);
+
/*
* Need to check that the SRE bit has actually been set. If
* not, it means that SRE is disabled at EL2. We're going to
@@ -557,6 +564,30 @@ static void gic_cpu_sys_reg_init(void)
/* ... and let's hit the road... */
gic_write_grpen1(1);
+
+ /* Keep the RSS capability status in per_cpu variable */
+ per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
+
+ /* Check all the CPUs have capable of sending SGIs to other CPUs */
+ for_each_online_cpu(i) {
+ bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
+
+ need_rss |= MPIDR_RS(cpu_logical_map(i));
+ if (need_rss && (!have_rss))
+ pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
+ cpu, (unsigned long)mpidr,
+ i, (unsigned long)cpu_logical_map(i));
+ }
+
+ /**
+ * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
+ * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
+ * UNPREDICTABLE choice of :
+ * - The write is ignored.
+ * - The RS field is treated as 0.
+ */
+ if (need_rss && (!gic_data.has_rss))
+ pr_crit_once("RSS is required but GICD doesn't support it\n");
}
static int gic_dist_supports_lpis(void)
@@ -591,6 +622,9 @@ static void gic_cpu_init(void)
#ifdef CONFIG_SMP
+#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
+#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
+
static int gic_starting_cpu(unsigned int cpu)
{
gic_cpu_init();
@@ -605,13 +639,6 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
u16 tlist = 0;
while (cpu < nr_cpu_ids) {
- /*
- * If we ever get a cluster of more than 16 CPUs, just
- * scream and skip that CPU.
- */
- if (WARN_ON((mpidr & 0xff) >= 16))
- goto out;
-
tlist |= 1 << (mpidr & 0xf);
next_cpu = cpumask_next(cpu, mask);
@@ -621,7 +648,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
mpidr = cpu_logical_map(cpu);
- if (cluster_id != (mpidr & ~0xffUL)) {
+ if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
cpu--;
goto out;
}
@@ -643,6 +670,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
irq << ICC_SGI1R_SGI_ID_SHIFT |
MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
+ MPIDR_TO_SGI_RS(cluster_id) |
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
@@ -663,7 +691,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
smp_wmb();
for_each_cpu(cpu, mask) {
- unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+ u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
u16 tlist;
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
@@ -1007,6 +1035,10 @@ static int __init gic_init_bases(void __iomem *dist_base,
goto out_free;
}
+ gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
+ pr_info("Distributor has %sRange Selector support\n",
+ gic_data.has_rss ? "" : "no ");
+
set_handle_irq(gic_handle_irq);
gic_update_vlpi_properties();
@@ -1071,18 +1103,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
int nr_parts;
struct partition_affinity *parts;
- parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+ parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
if (!parts_node)
return;
nr_parts = of_get_child_count(parts_node);
if (!nr_parts)
- return;
+ goto out_put_node;
parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
if (WARN_ON(!parts))
- return;
+ goto out_put_node;
for_each_child_of_node(parts_node, child_part) {
struct partition_affinity *part;
@@ -1149,6 +1181,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
gic_data.ppi_descs[i] = desc;
}
+
+out_put_node:
+ of_node_put(parts_node);
}
static void __init gic_of_setup_kvm_info(struct device_node *node)
@@ -1228,7 +1263,9 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
goto out_unmap_rdist;
gic_populate_ppi_partitions(node);
- gic_of_setup_kvm_info(node);
+
+ if (static_key_true(&supports_deactivate))
+ gic_of_setup_kvm_info(node);
return 0;
out_unmap_rdist:
@@ -1489,7 +1526,7 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
err = gic_validate_dist_version(acpi_data.dist_base);
if (err) {
- pr_err("No distributor detected at @%p, giving up",
+ pr_err("No distributor detected at @%p, giving up\n",
acpi_data.dist_base);
goto out_dist_unmap;
}
@@ -1517,7 +1554,9 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
goto out_fwhandle_free;
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
- gic_acpi_setup_kvm_info();
+
+ if (static_key_true(&supports_deactivate))
+ gic_acpi_setup_kvm_info();
return 0;
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index cd0bcc3b7e33..dba9d67cb9c1 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -177,6 +177,7 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
.map = map,
},
};
+ int ret;
/*
* The host will never see that interrupt firing again, so it
@@ -184,7 +185,11 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
*/
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
- return irq_set_vcpu_affinity(irq, &info);
+ ret = irq_set_vcpu_affinity(irq, &info);
+ if (ret)
+ irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+ return ret;
}
int its_get_vlpi(int irq, struct its_vlpi_map *map)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 651d726e8b12..121af5cf688f 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1256,6 +1256,19 @@ static void gic_teardown(struct gic_chip_data *gic)
#ifdef CONFIG_OF
static int gic_cnt __initdata;
+static bool gicv2_force_probe;
+
+static int __init gicv2_force_probe_cfg(char *buf)
+{
+ return strtobool(buf, &gicv2_force_probe);
+}
+early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
+
+static bool gic_check_gicv2(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + GIC_CPU_IDENT);
+ return (val & 0xff0fff) == 0x02043B;
+}
static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
{
@@ -1265,20 +1278,60 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
if (!is_hyp_mode_available())
return false;
- if (resource_size(&cpuif_res) < SZ_8K)
- return false;
- if (resource_size(&cpuif_res) == SZ_128K) {
- u32 val_low, val_high;
+ if (resource_size(&cpuif_res) < SZ_8K) {
+ void __iomem *alt;
+ /*
+ * Check for a stupid firmware that only exposes the
+ * first page of a GICv2.
+ */
+ if (!gic_check_gicv2(*base))
+ return false;
+
+ if (!gicv2_force_probe) {
+ pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
+ return false;
+ }
+
+ alt = ioremap(cpuif_res.start, SZ_8K);
+ if (!alt)
+ return false;
+ if (!gic_check_gicv2(alt + SZ_4K)) {
+ /*
+ * The first page was that of a GICv2, and
+ * the second was *something*. Let's trust it
+ * to be a GICv2, and update the mapping.
+ */
+ pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
+ &cpuif_res.start);
+ iounmap(*base);
+ *base = alt;
+ return true;
+ }
/*
- * Verify that we have the first 4kB of a GIC400
+ * We detected *two* initial GICv2 pages in a
+ * row. Could be a GICv2 aliased over two 64kB
+ * pages. Update the resource, map the iospace, and
+ * pray.
+ */
+ iounmap(alt);
+ alt = ioremap(cpuif_res.start, SZ_128K);
+ if (!alt)
+ return false;
+ pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
+ &cpuif_res.start);
+ cpuif_res.end = cpuif_res.start + SZ_128K -1;
+ iounmap(*base);
+ *base = alt;
+ }
+ if (resource_size(&cpuif_res) == SZ_128K) {
+ /*
+ * Verify that we have the first 4kB of a GICv2
* aliased over the first 64kB by checking the
* GICC_IIDR register on both ends.
*/
- val_low = readl_relaxed(*base + GIC_CPU_IDENT);
- val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000);
- if ((val_low & 0xffff0fff) != 0x0202043B ||
- val_low != val_high)
+ if (!gic_check_gicv2(*base) ||
+ !gic_check_gicv2(*base + 0xf000))
return false;
/*
@@ -1367,7 +1420,8 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (ret)
return;
- gic_set_kvm_info(&gic_v2_kvm_info);
+ if (static_key_true(&supports_deactivate))
+ gic_set_kvm_info(&gic_v2_kvm_info);
}
int __init
@@ -1599,7 +1653,8 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(NULL, gic_data[0].domain);
- gic_acpi_setup_kvm_info();
+ if (static_key_true(&supports_deactivate))
+ gic_acpi_setup_kvm_info();
return 0;
}
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index c02d29c9dc05..e80263e16c4c 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IMG PowerDown Controller (PDC)
*
@@ -324,7 +325,7 @@ static int pdc_intc_probe(struct platform_device *pdev)
/* Ioremap the registers */
priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
- res_regs->end - res_regs->start);
+ resource_size(res_regs));
if (!priv->pdc_base)
return -EIO;
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 119f4ef0d421..57e3d900f19e 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -316,6 +316,7 @@ static const struct of_device_id ls_scfg_msi_id[] = {
{ .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
{ .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
+ { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
{ .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
{ .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
{ .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
new file mode 100644
index 000000000000..a59bdbc0b9bb
--- /dev/null
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define NUM_CHANNEL 8
+#define MAX_INPUT_MUX 256
+
+#define REG_EDGE_POL 0x00
+#define REG_PIN_03_SEL 0x04
+#define REG_PIN_47_SEL 0x08
+#define REG_FILTER_SEL 0x0c
+
+#define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x)))
+#define REG_EDGE_POL_EDGE(x) BIT(x)
+#define REG_EDGE_POL_LOW(x) BIT(16 + (x))
+#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
+#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
+
+struct meson_gpio_irq_params {
+ unsigned int nr_hwirq;
+};
+
+static const struct meson_gpio_irq_params meson8_params = {
+ .nr_hwirq = 134,
+};
+
+static const struct meson_gpio_irq_params meson8b_params = {
+ .nr_hwirq = 119,
+};
+
+static const struct meson_gpio_irq_params gxbb_params = {
+ .nr_hwirq = 133,
+};
+
+static const struct meson_gpio_irq_params gxl_params = {
+ .nr_hwirq = 110,
+};
+
+static const struct of_device_id meson_irq_gpio_matches[] = {
+ { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
+ { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
+ { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
+ { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
+ { }
+};
+
+struct meson_gpio_irq_controller {
+ unsigned int nr_hwirq;
+ void __iomem *base;
+ u32 channel_irqs[NUM_CHANNEL];
+ DECLARE_BITMAP(channel_map, NUM_CHANNEL);
+ spinlock_t lock;
+};
+
+static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+ unsigned int reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(ctl->base + reg);
+ tmp &= ~mask;
+ tmp |= val;
+ writel_relaxed(tmp, ctl->base + reg);
+}
+
+static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
+{
+ return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+}
+
+static int
+meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ unsigned long hwirq,
+ u32 **channel_hwirq)
+{
+ unsigned int reg, idx;
+
+ spin_lock(&ctl->lock);
+
+ /* Find a free channel */
+ idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
+ if (idx >= NUM_CHANNEL) {
+ spin_unlock(&ctl->lock);
+ pr_err("No channel available\n");
+ return -ENOSPC;
+ }
+
+ /* Mark the channel as used */
+ set_bit(idx, ctl->channel_map);
+
+ /*
+ * Setup the mux of the channel to route the signal of the pad
+ * to the appropriate input of the GIC
+ */
+ reg = meson_gpio_irq_channel_to_reg(idx);
+ meson_gpio_irq_update_bits(ctl, reg,
+ 0xff << REG_PIN_SEL_SHIFT(idx),
+ hwirq << REG_PIN_SEL_SHIFT(idx));
+
+ /*
+ * Get the hwirq number assigned to this channel through
+ * a pointer the channel_irq table. The added benifit of this
+ * method is that we can also retrieve the channel index with
+ * it, using the table base.
+ */
+ *channel_hwirq = &(ctl->channel_irqs[idx]);
+
+ spin_unlock(&ctl->lock);
+
+ pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
+ hwirq, idx, **channel_hwirq);
+
+ return 0;
+}
+
+static unsigned int
+meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl,
+ u32 *channel_hwirq)
+{
+ return channel_hwirq - ctl->channel_irqs;
+}
+
+static void
+meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
+ u32 *channel_hwirq)
+{
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+ clear_bit(idx, ctl->channel_map);
+}
+
+static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
+ unsigned int type,
+ u32 *channel_hwirq)
+{
+ u32 val = 0;
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+
+ /*
+ * The controller has a filter block to operate in either LEVEL or
+ * EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and
+ * EDGE_FALLING support (which the GIC does not support), the filter
+ * block is also able to invert the input signal it gets before
+ * providing it to the GIC.
+ */
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ if (type == IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_EDGE(idx);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_LOW(idx);
+
+ spin_lock(&ctl->lock);
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ REG_EDGE_POL_MASK(idx), val);
+
+ spin_unlock(&ctl->lock);
+
+ return 0;
+}
+
+static unsigned int meson_gpio_irq_type_output(unsigned int type)
+{
+ unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
+
+ type &= ~IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * The polarity of the signal provided to the GIC should always
+ * be high.
+ */
+ if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ type |= IRQ_TYPE_LEVEL_HIGH;
+ else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ type |= IRQ_TYPE_EDGE_RISING;
+
+ return type;
+}
+
+static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct meson_gpio_irq_controller *ctl = data->domain->host_data;
+ u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq);
+ if (ret)
+ return ret;
+
+ return irq_chip_set_type_parent(data,
+ meson_gpio_irq_type_output(type));
+}
+
+static struct irq_chip meson_gpio_irq_chip = {
+ .name = "meson-gpio-irqchip",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = meson_gpio_irq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int meson_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain,
+ unsigned int virq,
+ u32 hwirq,
+ unsigned int type)
+{
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = hwirq;
+ fwspec.param[2] = meson_gpio_irq_type_output(type);
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+}
+
+static int meson_gpio_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct meson_gpio_irq_controller *ctl = domain->host_data;
+ unsigned long hwirq;
+ u32 *channel_hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq);
+ if (ret)
+ return ret;
+
+ ret = meson_gpio_irq_allocate_gic_irq(domain, virq,
+ *channel_hwirq, type);
+ if (ret < 0) {
+ pr_err("failed to allocate gic irq %u\n", *channel_hwirq);
+ meson_gpio_irq_release_channel(ctl, channel_hwirq);
+ return ret;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &meson_gpio_irq_chip, channel_hwirq);
+
+ return 0;
+}
+
+static void meson_gpio_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct meson_gpio_irq_controller *ctl = domain->host_data;
+ struct irq_data *irq_data;
+ u32 *channel_hwirq;
+
+ if (WARN_ON(nr_irqs != 1))
+ return;
+
+ irq_domain_free_irqs_parent(domain, virq, 1);
+
+ irq_data = irq_domain_get_irq_data(domain, virq);
+ channel_hwirq = irq_data_get_irq_chip_data(irq_data);
+
+ meson_gpio_irq_release_channel(ctl, channel_hwirq);
+}
+
+static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
+ .alloc = meson_gpio_irq_domain_alloc,
+ .free = meson_gpio_irq_domain_free,
+ .translate = meson_gpio_irq_domain_translate,
+};
+
+static int __init meson_gpio_irq_parse_dt(struct device_node *node,
+ struct meson_gpio_irq_controller *ctl)
+{
+ const struct of_device_id *match;
+ const struct meson_gpio_irq_params *params;
+ int ret;
+
+ match = of_match_node(meson_irq_gpio_matches, node);
+ if (!match)
+ return -ENODEV;
+
+ params = match->data;
+ ctl->nr_hwirq = params->nr_hwirq;
+
+ ret = of_property_read_variable_u32_array(node,
+ "amlogic,channel-interrupts",
+ ctl->channel_irqs,
+ NUM_CHANNEL,
+ NUM_CHANNEL);
+ if (ret < 0) {
+ pr_err("can't get %d channel interrupts\n", NUM_CHANNEL);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init meson_gpio_irq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *parent_domain;
+ struct meson_gpio_irq_controller *ctl;
+ int ret;
+
+ if (!parent) {
+ pr_err("missing parent interrupt node\n");
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("unable to obtain parent domain\n");
+ return -ENXIO;
+ }
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ spin_lock_init(&ctl->lock);
+
+ ctl->base = of_iomap(node, 0);
+ if (!ctl->base) {
+ ret = -ENOMEM;
+ goto free_ctl;
+ }
+
+ ret = meson_gpio_irq_parse_dt(node, ctl);
+ if (ret)
+ goto free_channel_irqs;
+
+ domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq,
+ of_node_to_fwnode(node),
+ &meson_gpio_irq_domain_ops,
+ ctl);
+ if (!domain) {
+ pr_err("failed to add domain\n");
+ ret = -ENODEV;
+ goto free_channel_irqs;
+ }
+
+ pr_info("%d to %d gpio interrupt mux initialized\n",
+ ctl->nr_hwirq, NUM_CHANNEL);
+
+ return 0;
+
+free_channel_irqs:
+ iounmap(ctl->base);
+free_ctl:
+ kfree(ctl);
+
+ return ret;
+}
+
+IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
+ meson_gpio_irq_of_init);
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index be7216bfb8dd..e67483161f0f 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Meta External interrupt code.
*
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index a5f053bd2f44..857b946747eb 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Meta internal (HWSTATMETA) interrupt code.
*
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c90976d7e53c..ef92a4d2038e 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -6,8 +6,12 @@
* Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
+
+#define pr_fmt(fmt) "irq-mips-gic: " fmt
+
#include <linux/bitmap.h>
#include <linux/clocksource.h>
+#include <linux/cpuhotplug.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -48,12 +52,16 @@ static DEFINE_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs;
-static int gic_vpes;
static unsigned int gic_cpu_pin;
static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
-DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
-DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
+static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+
+static struct gic_all_vpes_chip_data {
+ u32 map;
+ bool mask;
+} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
static void gic_clear_pcpu_masks(unsigned int intr)
{
@@ -194,46 +202,46 @@ static void gic_ack_irq(struct irq_data *d)
static int gic_set_type(struct irq_data *d, unsigned int type)
{
- unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ unsigned int irq, pol, trig, dual;
unsigned long flags;
- bool is_edge;
+
+ irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
spin_lock_irqsave(&gic_lock, flags);
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
- change_gic_pol(irq, GIC_POL_FALLING_EDGE);
- change_gic_trig(irq, GIC_TRIG_EDGE);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = true;
+ pol = GIC_POL_FALLING_EDGE;
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_SINGLE;
break;
case IRQ_TYPE_EDGE_RISING:
- change_gic_pol(irq, GIC_POL_RISING_EDGE);
- change_gic_trig(irq, GIC_TRIG_EDGE);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = true;
+ pol = GIC_POL_RISING_EDGE;
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_SINGLE;
break;
case IRQ_TYPE_EDGE_BOTH:
- /* polarity is irrelevant in this case */
- change_gic_trig(irq, GIC_TRIG_EDGE);
- change_gic_dual(irq, GIC_DUAL_DUAL);
- is_edge = true;
+ pol = 0; /* Doesn't matter */
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_DUAL;
break;
case IRQ_TYPE_LEVEL_LOW:
- change_gic_pol(irq, GIC_POL_ACTIVE_LOW);
- change_gic_trig(irq, GIC_TRIG_LEVEL);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = false;
+ pol = GIC_POL_ACTIVE_LOW;
+ trig = GIC_TRIG_LEVEL;
+ dual = GIC_DUAL_SINGLE;
break;
case IRQ_TYPE_LEVEL_HIGH:
default:
- change_gic_pol(irq, GIC_POL_ACTIVE_HIGH);
- change_gic_trig(irq, GIC_TRIG_LEVEL);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = false;
+ pol = GIC_POL_ACTIVE_HIGH;
+ trig = GIC_TRIG_LEVEL;
+ dual = GIC_DUAL_SINGLE;
break;
}
- if (is_edge)
+ change_gic_pol(irq, pol);
+ change_gic_trig(irq, trig);
+ change_gic_dual(irq, dual);
+
+ if (trig == GIC_TRIG_EDGE)
irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
handle_edge_irq, NULL);
else
@@ -338,13 +346,17 @@ static struct irq_chip gic_local_irq_controller = {
static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
- int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- int i;
+ struct gic_all_vpes_chip_data *cd;
unsigned long flags;
+ int intr, cpu;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = false;
spin_lock_irqsave(&gic_lock, flags);
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_rmask(BIT(intr));
}
spin_unlock_irqrestore(&gic_lock, flags);
@@ -352,22 +364,40 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
- int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- int i;
+ struct gic_all_vpes_chip_data *cd;
unsigned long flags;
+ int intr, cpu;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = true;
spin_lock_irqsave(&gic_lock, flags);
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_smask(BIT(intr));
}
spin_unlock_irqrestore(&gic_lock, flags);
}
+static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
+{
+ struct gic_all_vpes_chip_data *cd;
+ unsigned int intr;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+
+ write_gic_vl_map(intr, cd->map);
+ if (cd->mask)
+ write_gic_vl_smask(BIT(intr));
+}
+
static struct irq_chip gic_all_vpes_local_irq_controller = {
- .name = "MIPS GIC Local",
- .irq_mask = gic_mask_local_irq_all_vpes,
- .irq_unmask = gic_unmask_local_irq_all_vpes,
+ .name = "MIPS GIC Local",
+ .irq_mask = gic_mask_local_irq_all_vpes,
+ .irq_unmask = gic_unmask_local_irq_all_vpes,
+ .irq_cpu_online = gic_all_vpes_irq_cpu_online,
};
static void __gic_irq_dispatch(void)
@@ -382,39 +412,6 @@ static void gic_irq_dispatch(struct irq_desc *desc)
gic_handle_shared_int(true);
}
-static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
-{
- int intr = GIC_HWIRQ_TO_LOCAL(hw);
- int i;
- unsigned long flags;
- u32 val;
-
- if (!gic_local_irq_is_routable(intr))
- return -EPERM;
-
- if (intr > GIC_LOCAL_INT_FDC) {
- pr_err("Invalid local IRQ %d\n", intr);
- return -EINVAL;
- }
-
- if (intr == GIC_LOCAL_INT_TIMER) {
- /* CONFIG_MIPS_CMP workaround (see __gic_init) */
- val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
- } else {
- val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
- }
-
- spin_lock_irqsave(&gic_lock, flags);
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
- write_gic_vo_map(intr, val);
- }
- spin_unlock_irqrestore(&gic_lock, flags);
-
- return 0;
-}
-
static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw, unsigned int cpu)
{
@@ -457,7 +454,11 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
- int err;
+ struct gic_all_vpes_chip_data *cd;
+ unsigned long flags;
+ unsigned int intr;
+ int err, cpu;
+ u32 map;
if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
/* verify that shared irqs don't conflict with an IPI irq */
@@ -474,8 +475,14 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
return gic_shared_irq_domain_map(d, virq, hwirq, 0);
}
- switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
+ intr = GIC_HWIRQ_TO_LOCAL(hwirq);
+ map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+
+ switch (intr) {
case GIC_LOCAL_INT_TIMER:
+ /* CONFIG_MIPS_CMP workaround (see __gic_init) */
+ map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
+ /* fall-through */
case GIC_LOCAL_INT_PERFCTR:
case GIC_LOCAL_INT_FDC:
/*
@@ -483,9 +490,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
* the rest of the MIPS kernel code does not use the
* percpu IRQ API for them.
*/
+ cd = &gic_all_vpes_chip_data[intr];
+ cd->map = map;
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_all_vpes_local_irq_controller,
- NULL);
+ cd);
if (err)
return err;
@@ -504,7 +513,17 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
break;
}
- return gic_local_irq_domain_map(d, virq, hwirq);
+ if (!gic_local_irq_is_routable(intr))
+ return -EPERM;
+
+ spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_map(intr, map);
+ }
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
}
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -636,11 +655,25 @@ static const struct irq_domain_ops gic_ipi_domain_ops = {
.match = gic_ipi_domain_match,
};
+static int gic_cpu_startup(unsigned int cpu)
+{
+ /* Enable or disable EIC */
+ change_gic_vl_ctl(GIC_VX_CTL_EIC,
+ cpu_has_veic ? GIC_VX_CTL_EIC : 0);
+
+ /* Clear all local IRQ masks (ie. disable all local interrupts) */
+ write_gic_vl_rmask(~0);
+
+ /* Invoke irq_cpu_online callbacks to enable desired interrupts */
+ irq_cpu_online();
+
+ return 0;
+}
static int __init gic_of_init(struct device_node *node,
struct device_node *parent)
{
- unsigned int cpu_vec, i, j, gicconfig, cpu, v[2];
+ unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
unsigned long reserved;
phys_addr_t gic_base;
struct resource res;
@@ -655,7 +688,7 @@ static int __init gic_of_init(struct device_node *node,
cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
if (cpu_vec == hweight_long(ST0_IM)) {
- pr_err("No CPU vectors available for GIC\n");
+ pr_err("No CPU vectors available\n");
return -ENODEV;
}
@@ -668,8 +701,10 @@ static int __init gic_of_init(struct device_node *node,
gic_base = read_gcr_gic_base() &
~CM_GCR_GIC_BASE_GICEN;
gic_len = 0x20000;
+ pr_warn("Using inherited base address %pa\n",
+ &gic_base);
} else {
- pr_err("Failed to get GIC memory range\n");
+ pr_err("Failed to get memory range\n");
return -ENODEV;
}
} else {
@@ -690,17 +725,7 @@ static int __init gic_of_init(struct device_node *node,
gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
gic_shared_intrs = (gic_shared_intrs + 1) * 8;
- gic_vpes = gicconfig & GIC_CONFIG_PVPS;
- gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
- gic_vpes = gic_vpes + 1;
-
if (cpu_has_veic) {
- /* Set EIC mode for all VPEs */
- for_each_present_cpu(cpu) {
- write_gic_vl_other(mips_cm_vp_id(cpu));
- write_gic_vo_ctl(GIC_VX_CTL_EIC);
- }
-
/* Always use vector 1 in EIC mode */
gic_cpu_pin = 0;
timer_cpu_pin = gic_cpu_pin;
@@ -737,7 +762,7 @@ static int __init gic_of_init(struct device_node *node,
gic_shared_intrs, 0,
&gic_irq_domain_ops, NULL);
if (!gic_irq_domain) {
- pr_err("Failed to add GIC IRQ domain");
+ pr_err("Failed to add IRQ domain");
return -ENXIO;
}
@@ -746,7 +771,7 @@ static int __init gic_of_init(struct device_node *node,
GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
node, &gic_ipi_domain_ops, NULL);
if (!gic_ipi_domain) {
- pr_err("Failed to add GIC IPI domain");
+ pr_err("Failed to add IPI domain");
return -ENXIO;
}
@@ -756,10 +781,12 @@ static int __init gic_of_init(struct device_node *node,
!of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
bitmap_set(ipi_resrv, v[0], v[1]);
} else {
- /* Make the last 2 * gic_vpes available for IPIs */
- bitmap_set(ipi_resrv,
- gic_shared_intrs - 2 * gic_vpes,
- 2 * gic_vpes);
+ /*
+ * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
+ * meeting the requirements of arch/mips SMP.
+ */
+ num_ipis = 2 * num_possible_cpus();
+ bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
}
bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
@@ -773,15 +800,8 @@ static int __init gic_of_init(struct device_node *node,
write_gic_rmask(i);
}
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
- for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
- if (!gic_local_irq_is_routable(j))
- continue;
- write_gic_vo_rmask(BIT(j));
- }
- }
-
- return 0;
+ return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ "irqchip/mips/gic:starting",
+ gic_cpu_startup, NULL);
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index b283fc90be1e..17a4a7b6cdbb 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -194,6 +194,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
return -ENOMEM;
gicp->dev = &pdev->dev;
+ spin_lock_init(&gicp->spi_lock);
gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!gicp->res)
diff --git a/drivers/irqchip/irq-mvebu-gicp.h b/drivers/irqchip/irq-mvebu-gicp.h
index 98535e886ea5..eaa12fb72102 100644
--- a/drivers/irqchip/irq-mvebu-gicp.h
+++ b/drivers/irqchip/irq-mvebu-gicp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MVEBU_GICP_H__
#define __MVEBU_GICP_H__
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index b04a8ac6e744..d360a6eddd6d 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -25,10 +25,6 @@
#include <linux/irqchip/irq-omap-intc.h>
-/* Define these here for now until we drop all board-files */
-#define OMAP24XX_IC_BASE 0x480fe000
-#define OMAP34XX_IC_BASE 0x48200000
-
/* selected INTC register offsets */
#define INTC_REVISION 0x0000
@@ -70,8 +66,8 @@ static struct omap_intc_regs intc_context;
static struct irq_domain *domain;
static void __iomem *omap_irq_base;
-static int omap_nr_pending = 3;
-static int omap_nr_irqs = 96;
+static int omap_nr_pending;
+static int omap_nr_irqs;
static void intc_writel(u32 reg, u32 val)
{
@@ -364,14 +360,6 @@ omap_intc_handle_irq(struct pt_regs *regs)
handle_domain_irq(domain, irqnr, regs);
}
-void __init omap3_init_irq(void)
-{
- omap_nr_irqs = 96;
- omap_nr_pending = 3;
- omap_init_irq(OMAP34XX_IC_BASE, NULL);
- set_handle_irq(omap_intc_handle_irq);
-}
-
static int __init intc_of_init(struct device_node *node,
struct device_node *parent)
{
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
new file mode 100644
index 000000000000..cf6d0c455518
--- /dev/null
+++ b/drivers/irqchip/irq-ompic.c
@@ -0,0 +1,202 @@
+/*
+ * Open Multi-Processor Interrupt Controller driver
+ *
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The ompic device handles IPI communication between cores in multi-core
+ * OpenRISC systems.
+ *
+ * Registers
+ *
+ * For each CPU the ompic has 2 registers. The control register for sending
+ * and acking IPIs and the status register for receiving IPIs. The register
+ * layouts are as follows:
+ *
+ * Control register
+ * +---------+---------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * ----------+---------+----------+----------
+ * | IRQ ACK | IRQ GEN | DST CORE | DATA |
+ * +---------+---------+----------+---------+
+ *
+ * Status register
+ * +----------+-------------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * -----------+-------------+----------+---------+
+ * | Reserved | IRQ Pending | SRC CORE | DATA |
+ * +----------+-------------+----------+---------+
+ *
+ * Architecture
+ *
+ * - The ompic generates a level interrupt to the CPU PIC when a message is
+ * ready. Messages are delivered via the memory bus.
+ * - The ompic does not have any interrupt input lines.
+ * - The ompic is wired to the same irq line on each core.
+ * - Devices are wired to the same irq line on each core.
+ *
+ * +---------+ +---------+
+ * | CPU | | CPU |
+ * | Core 0 |<==\ (memory access) /==>| Core 1 |
+ * | [ PIC ]| | | | [ PIC ]|
+ * +----^-^--+ | | +----^-^--+
+ * | | v v | |
+ * <====|=|=================================|=|==> (memory bus)
+ * | | ^ ^ | |
+ * (ipi | +------|---------+--------|-------|-+ (device irq)
+ * irq | | | | |
+ * core0)| +------|---------|--------|-------+ (ipi irq core1)
+ * | | | | |
+ * +----o-o-+ | +--------+ |
+ * | ompic |<===/ | Device |<===/
+ * | IPI | +--------+
+ * +--------+*
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <linux/irqchip.h>
+
+#define OMPIC_CPUBYTES 8
+#define OMPIC_CTRL(cpu) (0x0 + (cpu * OMPIC_CPUBYTES))
+#define OMPIC_STAT(cpu) (0x4 + (cpu * OMPIC_CPUBYTES))
+
+#define OMPIC_CTRL_IRQ_ACK (1 << 31)
+#define OMPIC_CTRL_IRQ_GEN (1 << 30)
+#define OMPIC_CTRL_DST(cpu) (((cpu) & 0x3fff) << 16)
+
+#define OMPIC_STAT_IRQ_PENDING (1 << 30)
+
+#define OMPIC_DATA(x) ((x) & 0xffff)
+
+DEFINE_PER_CPU(unsigned long, ops);
+
+static void __iomem *ompic_base;
+
+static inline u32 ompic_readreg(void __iomem *base, loff_t offset)
+{
+ return ioread32be(base + offset);
+}
+
+static void ompic_writereg(void __iomem *base, loff_t offset, u32 data)
+{
+ iowrite32be(data, base + offset);
+}
+
+static void ompic_raise_softirq(const struct cpumask *mask,
+ unsigned int ipi_msg)
+{
+ unsigned int dst_cpu;
+ unsigned int src_cpu = smp_processor_id();
+
+ for_each_cpu(dst_cpu, mask) {
+ set_bit(ipi_msg, &per_cpu(ops, dst_cpu));
+
+ /*
+ * On OpenRISC the atomic set_bit() call implies a memory
+ * barrier. Otherwise we would need: smp_wmb(); paired
+ * with the read in ompic_ipi_handler.
+ */
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(src_cpu),
+ OMPIC_CTRL_IRQ_GEN |
+ OMPIC_CTRL_DST(dst_cpu) |
+ OMPIC_DATA(1));
+ }
+}
+
+static irqreturn_t ompic_ipi_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long *pending_ops = &per_cpu(ops, cpu);
+ unsigned long ops;
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(cpu), OMPIC_CTRL_IRQ_ACK);
+ while ((ops = xchg(pending_ops, 0)) != 0) {
+
+ /*
+ * On OpenRISC the atomic xchg() call implies a memory
+ * barrier. Otherwise we may need an smp_rmb(); paired
+ * with the write in ompic_raise_softirq.
+ */
+
+ do {
+ unsigned long ipi_msg;
+
+ ipi_msg = __ffs(ops);
+ ops &= ~(1UL << ipi_msg);
+
+ handle_IPI(ipi_msg);
+ } while (ops);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init ompic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ int irq;
+ int ret;
+
+ /* Validate the DT */
+ if (ompic_base) {
+ pr_err("ompic: duplicate ompic's are not supported");
+ return -EEXIST;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("ompic: reg property requires an address and size");
+ return -EINVAL;
+ }
+
+ if (resource_size(&res) < (num_possible_cpus() * OMPIC_CPUBYTES)) {
+ pr_err("ompic: reg size, currently %d must be at least %d",
+ resource_size(&res),
+ (num_possible_cpus() * OMPIC_CPUBYTES));
+ return -EINVAL;
+ }
+
+ /* Setup the device */
+ ompic_base = ioremap(res.start, resource_size(&res));
+ if (IS_ERR(ompic_base)) {
+ pr_err("ompic: unable to map registers");
+ return PTR_ERR(ompic_base);
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ pr_err("ompic: unable to parse device irq");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ ret = request_irq(irq, ompic_ipi_handler, IRQF_PERCPU,
+ "ompic_ipi", NULL);
+ if (ret)
+ goto out_irq_disp;
+
+ set_smp_cross_call(ompic_raise_softirq);
+
+ return 0;
+
+out_irq_disp:
+ irq_dispose_mapping(irq);
+out_unmap:
+ iounmap(ompic_base);
+ ompic_base = NULL;
+ return ret;
+}
+IRQCHIP_DECLARE(ompic, "openrisc,ompic", ompic_of_init);
diff --git a/drivers/irqchip/irq-renesas-h8300h.c b/drivers/irqchip/irq-renesas-h8300h.c
index b8327590ae52..1054d74b7edd 100644
--- a/drivers/irqchip/irq-renesas-h8300h.c
+++ b/drivers/irqchip/irq-renesas-h8300h.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* H8/300H interrupt controller driver
*
diff --git a/drivers/irqchip/irq-renesas-h8s.c b/drivers/irqchip/irq-renesas-h8s.c
index 71d8139be26c..aed31afb0216 100644
--- a/drivers/irqchip/irq-renesas-h8s.c
+++ b/drivers/irqchip/irq-renesas-h8s.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* H8S interrupt contoller driver
*
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 713177d97c7a..06f29cf5018a 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -389,9 +389,8 @@ MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
static int intc_irqpin_probe(struct platform_device *pdev)
{
- const struct intc_irqpin_config *config = NULL;
+ const struct intc_irqpin_config *config;
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id;
struct intc_irqpin_priv *p;
struct intc_irqpin_iomem *i;
struct resource *io[INTC_IRQPIN_REG_NR];
@@ -422,11 +421,9 @@ static int intc_irqpin_probe(struct platform_device *pdev)
p->pdev = pdev;
platform_set_drvdata(pdev, p);
- of_id = of_match_device(intc_irqpin_dt_ids, dev);
- if (of_id && of_id->data) {
- config = of_id->data;
+ config = of_device_get_match_data(dev);
+ if (config)
p->needs_clk = config->needs_clk;
- }
p->clk = devm_clk_get(dev, NULL);
if (IS_ERR(p->clk)) {
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index c25ce5af091a..ec0e6a8cdb75 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -156,7 +156,7 @@ static int s3c_irq_type(struct irq_data *data, unsigned int type)
irq_set_handler(data->irq, handle_level_irq);
break;
default:
- pr_err("No such irq type %d", type);
+ pr_err("No such irq type %d\n", type);
return -EINVAL;
}
@@ -204,7 +204,7 @@ static int s3c_irqext_type_set(void __iomem *gpcon_reg,
break;
default:
- pr_err("No such irq type %d", type);
+ pr_err("No such irq type %d\n", type);
return -EINVAL;
}
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
new file mode 100644
index 000000000000..1927b2f36ff6
--- /dev/null
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for Socionext External Interrupt Unit (EXIU)
+ *
+ * Copyright (c) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Based on irq-tegra.c:
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2010,2013, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define NUM_IRQS 32
+
+#define EIMASK 0x00
+#define EISRCSEL 0x04
+#define EIREQSTA 0x08
+#define EIRAWREQSTA 0x0C
+#define EIREQCLR 0x10
+#define EILVL 0x14
+#define EIEDG 0x18
+#define EISIR 0x1C
+
+struct exiu_irq_data {
+ void __iomem *base;
+ u32 spi_base;
+};
+
+static void exiu_irq_eoi(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(d->hwirq), data->base + EIREQCLR);
+ irq_chip_eoi_parent(d);
+}
+
+static void exiu_irq_mask(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EIMASK) | BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_mask_parent(d);
+}
+
+static void exiu_irq_unmask(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_unmask_parent(d);
+}
+
+static void exiu_irq_enable(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ /* clear interrupts that were latched while disabled */
+ writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+ val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_enable_parent(d);
+}
+
+static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EILVL);
+ if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+ val |= BIT(d->hwirq);
+ else
+ val &= ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EILVL);
+
+ val = readl_relaxed(data->base + EIEDG);
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ val &= ~BIT(d->hwirq);
+ else
+ val |= BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIEDG);
+
+ writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip exiu_irq_chip = {
+ .name = "EXIU",
+ .irq_eoi = exiu_irq_eoi,
+ .irq_enable = exiu_irq_enable,
+ .irq_mask = exiu_irq_mask,
+ .irq_unmask = exiu_irq_unmask,
+ .irq_set_type = exiu_irq_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_EOI_THREADED |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int exiu_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct exiu_irq_data *info = domain->host_data;
+
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ *hwirq = fwspec->param[1] - info->spi_base;
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct exiu_irq_data *info = dom->host_data;
+ irq_hw_number_t hwirq;
+
+ if (fwspec->param_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ WARN_ON(nr_irqs != 1);
+ hwirq = fwspec->param[1] - info->spi_base;
+ irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info);
+
+ parent_fwspec = *fwspec;
+ parent_fwspec.fwnode = dom->parent->fwnode;
+ return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec);
+}
+
+static const struct irq_domain_ops exiu_domain_ops = {
+ .translate = exiu_domain_translate,
+ .alloc = exiu_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init exiu_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct exiu_irq_data *data;
+ int err;
+
+ if (!parent) {
+ pr_err("%pOF: no parent, giving up\n", node);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (of_property_read_u32(node, "socionext,spi-base", &data->spi_base)) {
+ pr_err("%pOF: failed to parse 'spi-base' property\n", node);
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ data->base = of_iomap(node, 0);
+ if (!data->base) {
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ /* clear and mask all interrupts */
+ writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR);
+ writel_relaxed(0xFFFFFFFF, data->base + EIMASK);
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
+ &exiu_domain_ops, data);
+ if (!domain) {
+ pr_err("%pOF: failed to allocate domain\n", node);
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS,
+ parent);
+
+ return 0;
+
+out_unmap:
+ iounmap(data->base);
+out_free:
+ kfree(data);
+ return err;
+}
+IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_init);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 45363ff8d06f..31ab0dee2ce7 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -14,27 +14,99 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#define EXTI_IMR 0x0
-#define EXTI_EMR 0x4
-#define EXTI_RTSR 0x8
-#define EXTI_FTSR 0xc
-#define EXTI_SWIER 0x10
-#define EXTI_PR 0x14
+#define IRQS_PER_BANK 32
+
+struct stm32_exti_bank {
+ u32 imr_ofst;
+ u32 emr_ofst;
+ u32 rtsr_ofst;
+ u32 ftsr_ofst;
+ u32 swier_ofst;
+ u32 pr_ofst;
+};
+
+static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
+ .imr_ofst = 0x00,
+ .emr_ofst = 0x04,
+ .rtsr_ofst = 0x08,
+ .ftsr_ofst = 0x0C,
+ .swier_ofst = 0x10,
+ .pr_ofst = 0x14,
+};
+
+static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
+ &stm32f4xx_exti_b1,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
+ .imr_ofst = 0x80,
+ .emr_ofst = 0x84,
+ .rtsr_ofst = 0x00,
+ .ftsr_ofst = 0x04,
+ .swier_ofst = 0x08,
+ .pr_ofst = 0x88,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
+ .imr_ofst = 0x90,
+ .emr_ofst = 0x94,
+ .rtsr_ofst = 0x20,
+ .ftsr_ofst = 0x24,
+ .swier_ofst = 0x28,
+ .pr_ofst = 0x98,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
+ .imr_ofst = 0xA0,
+ .emr_ofst = 0xA4,
+ .rtsr_ofst = 0x40,
+ .ftsr_ofst = 0x44,
+ .swier_ofst = 0x48,
+ .pr_ofst = 0xA8,
+};
+
+static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
+ &stm32h7xx_exti_b1,
+ &stm32h7xx_exti_b2,
+ &stm32h7xx_exti_b3,
+};
+
+static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
+{
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+
+ return irq_reg_readl(gc, stm32_bank->pr_ofst);
+}
+
+static void stm32_exti_irq_ack(struct irq_chip_generic *gc, u32 mask)
+{
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+
+ irq_reg_writel(gc, mask, stm32_bank->pr_ofst);
+}
static void stm32_irq_handler(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
- struct irq_chip_generic *gc = domain->gc->gc[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int virq, nbanks = domain->gc->num_chips;
+ struct irq_chip_generic *gc;
+ const struct stm32_exti_bank *stm32_bank;
unsigned long pending;
- int n;
+ int n, i, irq_base = 0;
chained_irq_enter(chip, desc);
- while ((pending = irq_reg_readl(gc, EXTI_PR))) {
- for_each_set_bit(n, &pending, BITS_PER_LONG) {
- generic_handle_irq(irq_find_mapping(domain, n));
- irq_reg_writel(gc, BIT(n), EXTI_PR);
+ for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) {
+ gc = irq_get_domain_generic_chip(domain, irq_base);
+ stm32_bank = gc->private;
+
+ while ((pending = stm32_exti_pending(gc))) {
+ for_each_set_bit(n, &pending, IRQS_PER_BANK) {
+ virq = irq_find_mapping(domain, irq_base + n);
+ generic_handle_irq(virq);
+ stm32_exti_irq_ack(gc, BIT(n));
+ }
}
}
@@ -44,13 +116,14 @@ static void stm32_irq_handler(struct irq_desc *desc)
static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- int pin = data->hwirq;
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+ int pin = data->hwirq % IRQS_PER_BANK;
u32 rtsr, ftsr;
irq_gc_lock(gc);
- rtsr = irq_reg_readl(gc, EXTI_RTSR);
- ftsr = irq_reg_readl(gc, EXTI_FTSR);
+ rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
+ ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
@@ -70,8 +143,8 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
}
- irq_reg_writel(gc, rtsr, EXTI_RTSR);
- irq_reg_writel(gc, ftsr, EXTI_FTSR);
+ irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
+ irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
irq_gc_unlock(gc);
@@ -81,17 +154,18 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
static int stm32_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- int pin = data->hwirq;
- u32 emr;
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+ int pin = data->hwirq % IRQS_PER_BANK;
+ u32 imr;
irq_gc_lock(gc);
- emr = irq_reg_readl(gc, EXTI_EMR);
+ imr = irq_reg_readl(gc, stm32_bank->imr_ofst);
if (on)
- emr |= BIT(pin);
+ imr |= BIT(pin);
else
- emr &= ~BIT(pin);
- irq_reg_writel(gc, emr, EXTI_EMR);
+ imr &= ~BIT(pin);
+ irq_reg_writel(gc, imr, stm32_bank->imr_ofst);
irq_gc_unlock(gc);
@@ -101,11 +175,12 @@ static int stm32_irq_set_wake(struct irq_data *data, unsigned int on)
static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs, void *data)
{
- struct irq_chip_generic *gc = d->gc->gc[0];
+ struct irq_chip_generic *gc;
struct irq_fwspec *fwspec = data;
irq_hw_number_t hwirq;
hwirq = fwspec->param[0];
+ gc = irq_get_domain_generic_chip(d, hwirq);
irq_map_generic_chip(d, virq, hwirq);
irq_domain_set_info(d, virq, hwirq, &gc->chip_types->chip, gc,
@@ -129,8 +204,9 @@ struct irq_domain_ops irq_exti_domain_ops = {
.free = stm32_exti_free,
};
-static int __init stm32_exti_init(struct device_node *node,
- struct device_node *parent)
+static int
+__init stm32_exti_init(const struct stm32_exti_bank **stm32_exti_banks,
+ int bank_nr, struct device_node *node)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
int nr_irqs, nr_exti, ret, i;
@@ -144,23 +220,16 @@ static int __init stm32_exti_init(struct device_node *node,
return -ENOMEM;
}
- /* Determine number of irqs supported */
- writel_relaxed(~0UL, base + EXTI_RTSR);
- nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
- writel_relaxed(0, base + EXTI_RTSR);
-
- pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
-
- domain = irq_domain_add_linear(node, nr_exti,
+ domain = irq_domain_add_linear(node, bank_nr * IRQS_PER_BANK,
&irq_exti_domain_ops, NULL);
if (!domain) {
pr_err("%s: Could not register interrupt domain.\n",
- node->name);
+ node->name);
ret = -ENOMEM;
goto out_unmap;
}
- ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
+ ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti",
handle_edge_irq, clr, 0, 0);
if (ret) {
pr_err("%pOF: Could not allocate generic interrupt chip.\n",
@@ -168,18 +237,41 @@ static int __init stm32_exti_init(struct device_node *node,
goto out_free_domain;
}
- gc = domain->gc->gc[0];
- gc->reg_base = base;
- gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
- gc->chip_types->chip.name = gc->chip_types[0].chip.name;
- gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit;
- gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
- gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
- gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
- gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake;
- gc->chip_types->regs.ack = EXTI_PR;
- gc->chip_types->regs.mask = EXTI_IMR;
- gc->chip_types->handler = handle_edge_irq;
+ for (i = 0; i < bank_nr; i++) {
+ const struct stm32_exti_bank *stm32_bank = stm32_exti_banks[i];
+ u32 irqs_mask;
+
+ gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
+
+ gc->reg_base = base;
+ gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
+ gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake;
+ gc->chip_types->regs.ack = stm32_bank->pr_ofst;
+ gc->chip_types->regs.mask = stm32_bank->imr_ofst;
+ gc->private = (void *)stm32_bank;
+
+ /* Determine number of irqs supported */
+ writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
+ irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
+ nr_exti = fls(readl_relaxed(base + stm32_bank->rtsr_ofst));
+
+ /*
+ * This IP has no reset, so after hot reboot we should
+ * clear registers to avoid residue
+ */
+ writel_relaxed(0, base + stm32_bank->imr_ofst);
+ writel_relaxed(0, base + stm32_bank->emr_ofst);
+ writel_relaxed(0, base + stm32_bank->rtsr_ofst);
+ writel_relaxed(0, base + stm32_bank->ftsr_ofst);
+ writel_relaxed(~0UL, base + stm32_bank->pr_ofst);
+
+ pr_info("%s: bank%d, External IRQs available:%#x\n",
+ node->full_name, i, irqs_mask);
+ }
nr_irqs = of_irq_count(node);
for (i = 0; i < nr_irqs; i++) {
@@ -198,4 +290,20 @@ out_unmap:
return ret;
}
-IRQCHIP_DECLARE(stm32_exti, "st,stm32-exti", stm32_exti_init);
+static int __init stm32f4_exti_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return stm32_exti_init(stm32f4xx_exti_banks,
+ ARRAY_SIZE(stm32f4xx_exti_banks), np);
+}
+
+IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init);
+
+static int __init stm32h7_exti_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return stm32_exti_init(stm32h7xx_exti_banks,
+ ARRAY_SIZE(stm32h7xx_exti_banks), np);
+}
+
+IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 37dd4645bf18..928858dada75 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Support for Versatile FPGA-based IRQ controllers
*/
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index 6aa3ea479214..f31265937439 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev)
{
struct combiner *combiner;
size_t alloc_sz;
- u32 nregs;
+ int nregs;
int err;
nregs = count_registers(pdev);
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index c32e45826c2c..e7d3d8f2ad5a 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the kernel ISDN subsystem and device drivers.
# Object files in subdirectories
diff --git a/drivers/isdn/capi/Makefile b/drivers/isdn/capi/Makefile
index 4d5b4b71db1e..06da3ed2c40a 100644
--- a/drivers/isdn/capi/Makefile
+++ b/drivers/isdn/capi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the CAPI subsystem.
# Ordering constraints: kernelcapi.o first
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 89dd1303a98a..49fef08858c5 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -2235,9 +2235,9 @@ static void send_listen(capidrv_contr *card)
send_message(card, &cmdcmsg);
}
-static void listentimerfunc(unsigned long x)
+static void listentimerfunc(struct timer_list *t)
{
- capidrv_contr *card = (capidrv_contr *)x;
+ capidrv_contr *card = from_timer(card, t, listentimer);
if (card->state != ST_LISTEN_NONE && card->state != ST_LISTEN_ACTIVE)
printk(KERN_ERR "%s: controller dead ??\n", card->name);
send_listen(card);
@@ -2264,7 +2264,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
return -1;
}
card->owner = THIS_MODULE;
- setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card);
+ timer_setup(&card->listentimer, listentimerfunc, 0);
strcpy(card->name, id);
card->contrnr = contr;
card->nbchan = profp->nbchannel;
diff --git a/drivers/isdn/capi/capilib.c b/drivers/isdn/capi/capilib.c
index 33361f833c01..a39ad3796bba 100644
--- a/drivers/isdn/capi/capilib.c
+++ b/drivers/isdn/capi/capilib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/kernel.h>
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 6f423bc49d0d..5620fd2c6009 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -55,10 +55,10 @@ DEFINE_SPINLOCK(divert_lock);
/***************************/
/* timer callback function */
/***************************/
-static void deflect_timer_expire(ulong arg)
+static void deflect_timer_expire(struct timer_list *t)
{
unsigned long flags;
- struct call_struc *cs = (struct call_struc *) arg;
+ struct call_struc *cs = from_timer(cs, t, timer);
spin_lock_irqsave(&divert_lock, flags);
del_timer(&cs->timer); /* delete active timer */
@@ -157,7 +157,7 @@ int cf_command(int drvid, int mode,
/* allocate mem for information struct */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (-ENOMEM); /* no memory */
- setup_timer(&cs->timer, deflect_timer_expire, (ulong)cs);
+ timer_setup(&cs->timer, deflect_timer_expire, 0);
cs->info[0] = '\0';
cs->ics.driver = drvid;
cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */
@@ -450,8 +450,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
return (0); /* no external deflection needed */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (0); /* no memory */
- setup_timer(&cs->timer, deflect_timer_expire,
- (ulong)cs);
+ timer_setup(&cs->timer, deflect_timer_expire, 0);
cs->info[0] = '\0';
cs->ics = *ic; /* copy incoming data */
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile
index c453b72272a0..ac45a2739f56 100644
--- a/drivers/isdn/gigaset/Makefile
+++ b/drivers/isdn/gigaset/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o
gigaset-$(CONFIG_GIGASET_CAPI) += capi.o
gigaset-$(CONFIG_GIGASET_I4L) += i4l.o
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 2da3ff650e1d..20d0a080a2b0 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -89,6 +89,7 @@ static int start_cbsend(struct cardstate *);
struct bas_cardstate {
struct usb_device *udev; /* USB device pointer */
+ struct cardstate *cs;
struct usb_interface *interface; /* interface for this device */
unsigned char minor; /* starting minor number */
@@ -433,10 +434,10 @@ static void check_pending(struct bas_cardstate *ucs)
* argument:
* controller state structure
*/
-static void cmd_in_timeout(unsigned long data)
+static void cmd_in_timeout(struct timer_list *t)
{
- struct cardstate *cs = (struct cardstate *) data;
- struct bas_cardstate *ucs = cs->hw.bas;
+ struct bas_cardstate *ucs = from_timer(ucs, t, timer_cmd_in);
+ struct cardstate *cs = ucs->cs;
int rc;
if (!ucs->rcvbuf_size) {
@@ -639,10 +640,10 @@ static void int_in_work(struct work_struct *work)
* argument:
* controller state structure
*/
-static void int_in_resubmit(unsigned long data)
+static void int_in_resubmit(struct timer_list *t)
{
- struct cardstate *cs = (struct cardstate *) data;
- struct bas_cardstate *ucs = cs->hw.bas;
+ struct bas_cardstate *ucs = from_timer(ucs, t, timer_int_in);
+ struct cardstate *cs = ucs->cs;
int rc;
if (ucs->retry_int_in++ >= BAS_RETRY) {
@@ -1441,10 +1442,10 @@ error:
* argument:
* controller state structure
*/
-static void req_timeout(unsigned long data)
+static void req_timeout(struct timer_list *t)
{
- struct cardstate *cs = (struct cardstate *) data;
- struct bas_cardstate *ucs = cs->hw.bas;
+ struct bas_cardstate *ucs = from_timer(ucs, t, timer_ctrl);
+ struct cardstate *cs = ucs->cs;
int pending;
unsigned long flags;
@@ -1837,10 +1838,10 @@ static void write_command_callback(struct urb *urb)
* argument:
* controller state structure
*/
-static void atrdy_timeout(unsigned long data)
+static void atrdy_timeout(struct timer_list *t)
{
- struct cardstate *cs = (struct cardstate *) data;
- struct bas_cardstate *ucs = cs->hw.bas;
+ struct bas_cardstate *ucs = from_timer(ucs, t, timer_atrdy);
+ struct cardstate *cs = ucs->cs;
dev_warn(cs->dev, "timeout waiting for HD_READY_SEND_ATDATA\n");
@@ -2200,7 +2201,7 @@ static int gigaset_initcshw(struct cardstate *cs)
{
struct bas_cardstate *ucs;
- cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL);
+ cs->hw.bas = ucs = kzalloc(sizeof(*ucs), GFP_KERNEL);
if (!ucs) {
pr_err("out of memory\n");
return -ENOMEM;
@@ -2212,19 +2213,12 @@ static int gigaset_initcshw(struct cardstate *cs)
return -ENOMEM;
}
- ucs->urb_cmd_in = NULL;
- ucs->urb_cmd_out = NULL;
- ucs->rcvbuf = NULL;
- ucs->rcvbuf_size = 0;
-
spin_lock_init(&ucs->lock);
- ucs->pending = 0;
-
- ucs->basstate = 0;
- setup_timer(&ucs->timer_ctrl, req_timeout, (unsigned long) cs);
- setup_timer(&ucs->timer_atrdy, atrdy_timeout, (unsigned long) cs);
- setup_timer(&ucs->timer_cmd_in, cmd_in_timeout, (unsigned long) cs);
- setup_timer(&ucs->timer_int_in, int_in_resubmit, (unsigned long) cs);
+ ucs->cs = cs;
+ timer_setup(&ucs->timer_ctrl, req_timeout, 0);
+ timer_setup(&ucs->timer_atrdy, atrdy_timeout, 0);
+ timer_setup(&ucs->timer_cmd_in, cmd_in_timeout, 0);
+ timer_setup(&ucs->timer_int_in, int_in_resubmit, 0);
init_waitqueue_head(&ucs->waitqueue);
INIT_WORK(&ucs->int_in_wq, int_in_work);
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 7c7814497e3e..15482c5de33c 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -153,9 +153,9 @@ static int test_timeout(struct at_state_t *at_state)
return 1;
}
-static void timer_tick(unsigned long data)
+static void timer_tick(struct timer_list *t)
{
- struct cardstate *cs = (struct cardstate *) data;
+ struct cardstate *cs = from_timer(cs, t, timer);
unsigned long flags;
unsigned channel;
struct at_state_t *at_state;
@@ -687,7 +687,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
cs->ignoreframes = ignoreframes;
INIT_LIST_HEAD(&cs->temp_at_states);
cs->running = 0;
- init_timer(&cs->timer); /* clear next & prev */
+ timer_setup(&cs->timer, timer_tick, 0);
spin_lock_init(&cs->ev_lock);
cs->ev_tail = 0;
cs->ev_head = 0;
@@ -768,7 +768,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
spin_lock_irqsave(&cs->lock, flags);
cs->running = 1;
spin_unlock_irqrestore(&cs->lock, flags);
- setup_timer(&cs->timer, timer_tick, (unsigned long) cs);
cs->timer.expires = jiffies + msecs_to_jiffies(GIG_TICK);
add_timer(&cs->timer);
diff --git a/drivers/isdn/hardware/avm/Makefile b/drivers/isdn/hardware/avm/Makefile
index b540e8f2efb6..3830a0573fcc 100644
--- a/drivers/isdn/hardware/avm/Makefile
+++ b/drivers/isdn/hardware/avm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the AVM ISDN device drivers
# Each configuration option enables a list of files.
diff --git a/drivers/isdn/hardware/eicon/Makefile b/drivers/isdn/hardware/eicon/Makefile
index 4fa7fdb7df0d..a0ab2e2d7df0 100644
--- a/drivers/isdn/hardware/eicon/Makefile
+++ b/drivers/isdn/hardware/eicon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the Eicon DIVA ISDN drivers.
# Each configuration option enables a list of files.
diff --git a/drivers/isdn/hardware/eicon/adapter.h b/drivers/isdn/hardware/eicon/adapter.h
index 71a7c2f084a7..f9b24eb8781d 100644
--- a/drivers/isdn/hardware/eicon/adapter.h
+++ b/drivers/isdn/hardware/eicon/adapter.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: adapter.h,v 1.4 2004/03/21 17:26:01 armin Exp $ */
#ifndef __DIVA_USER_MODE_IDI_ADAPTER_H__
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
index 8bc2791bc39c..301788115c4f 100644
--- a/drivers/isdn/hardware/eicon/debug.c
+++ b/drivers/isdn/hardware/eicon/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "platform.h"
#include "pc.h"
#include "di_defs.h"
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
index d91dd580e978..944a7f338099 100644
--- a/drivers/isdn/hardware/eicon/diva.c
+++ b/drivers/isdn/hardware/eicon/diva.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Id: diva.c,v 1.21.4.1 2004/05/08 14:33:43 armin Exp $ */
#define CARDTYPE_H_WANT_DATA 1
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
index e979085d1b89..b067032093a8 100644
--- a/drivers/isdn/hardware/eicon/diva.h
+++ b/drivers/isdn/hardware/eicon/diva.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: diva.h,v 1.1.2.2 2001/02/08 12:25:43 armin Exp $ */
#ifndef __DIVA_XDI_OS_PART_H__
diff --git a/drivers/isdn/hardware/eicon/diva_pci.h b/drivers/isdn/hardware/eicon/diva_pci.h
index bb4b562050f6..7ef5db98ad3c 100644
--- a/drivers/isdn/hardware/eicon/diva_pci.h
+++ b/drivers/isdn/hardware/eicon/diva_pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: diva_pci.h,v 1.6 2003/01/04 15:29:45 schindler Exp $ */
#ifndef __DIVA_PCI_INTERFACE_H__
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index c61049585cbd..0033d74a7291 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -78,7 +78,7 @@ static unsigned int um_idi_poll(struct file *file, poll_table *wait);
static int um_idi_open(struct inode *inode, struct file *file);
static int um_idi_release(struct inode *inode, struct file *file);
static int remove_entity(void *entity);
-static void diva_um_timer_function(unsigned long data);
+static void diva_um_timer_function(struct timer_list *t);
/*
* proc entry
@@ -300,8 +300,7 @@ static int um_idi_open_adapter(struct file *file, int adapter_nr)
p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e);
init_waitqueue_head(&p_os->read_wait);
init_waitqueue_head(&p_os->close_wait);
- setup_timer(&p_os->diva_timer_id, (void *)diva_um_timer_function,
- (unsigned long)p_os);
+ timer_setup(&p_os->diva_timer_id, diva_um_timer_function, 0);
p_os->aborted = 0;
p_os->adapter_nr = adapter_nr;
return (1);
@@ -457,9 +456,9 @@ void diva_os_wakeup_close(void *os_context)
}
static
-void diva_um_timer_function(unsigned long data)
+void diva_um_timer_function(struct timer_list *t)
{
- diva_um_idi_os_context_t *p_os = (diva_um_idi_os_context_t *) data;
+ diva_um_idi_os_context_t *p_os = from_timer(p_os, t, diva_timer_id);
p_os->aborted = 1;
wake_up_interruptible(&p_os->read_wait);
diff --git a/drivers/isdn/hardware/eicon/dqueue.h b/drivers/isdn/hardware/eicon/dqueue.h
index 6992c45457a4..2da9799686ab 100644
--- a/drivers/isdn/hardware/eicon/dqueue.h
+++ b/drivers/isdn/hardware/eicon/dqueue.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: dqueue.h,v 1.1.2.2 2001/02/08 12:25:43 armin Exp $ */
#ifndef _DIVA_USER_MODE_IDI_DATA_QUEUE_H__
diff --git a/drivers/isdn/hardware/eicon/dsp_tst.h b/drivers/isdn/hardware/eicon/dsp_tst.h
index fe36f138be8b..85edd3ea50f7 100644
--- a/drivers/isdn/hardware/eicon/dsp_tst.h
+++ b/drivers/isdn/hardware/eicon/dsp_tst.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: dsp_tst.h,v 1.1.2.2 2001/02/08 12:25:43 armin Exp $ */
#ifndef __DIVA_PRI_HOST_TEST_DSPS_H__
diff --git a/drivers/isdn/hardware/eicon/entity.h b/drivers/isdn/hardware/eicon/entity.h
index fdb83416af31..f9767d321db9 100644
--- a/drivers/isdn/hardware/eicon/entity.h
+++ b/drivers/isdn/hardware/eicon/entity.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: entity.h,v 1.4 2004/03/21 17:26:01 armin Exp $ */
#ifndef __DIVAS_USER_MODE_IDI_ENTITY__
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index eadd1ed1e014..def7992a38e6 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -4501,6 +4501,7 @@ static void control_rc(PLCI *plci, byte req, byte rc, byte ch, byte global_req,
plci->channels++;
a->ncci_state[ncci] = OUTG_CON_PENDING;
}
+ /* fall through */
default:
if (plci->internal_command_queue[0])
@@ -7020,6 +7021,7 @@ static void nl_ind(PLCI *plci)
plci->NL.RNum = 1;
return;
}
+ /* fall through */
case N_BDATA:
case N_DATA:
if (((a->ncci_state[ncci] != CONNECTED) && (plci->B2_prot == 1)) /* transparent */
@@ -9626,9 +9628,9 @@ static void dtmf_command(dword Id, PLCI *plci, byte Rc)
{
case DTMF_LISTEN_TONE_START:
- mask <<= 1;
+ mask <<= 1; /* fall through */
case DTMF_LISTEN_MF_START:
- mask <<= 1;
+ mask <<= 1; /* fall through */
case DTMF_LISTEN_START:
switch (internal_command)
@@ -9636,6 +9638,7 @@ static void dtmf_command(dword Id, PLCI *plci, byte Rc)
default:
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
B1_FACILITY_DTMFR), DTMF_COMMAND_1);
+ /* fall through */
case DTMF_COMMAND_1:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
@@ -9646,6 +9649,7 @@ static void dtmf_command(dword Id, PLCI *plci, byte Rc)
}
if (plci->internal_command)
return;
+ /* fall through */
case DTMF_COMMAND_2:
if (plci_nl_busy(plci))
{
@@ -9673,9 +9677,9 @@ static void dtmf_command(dword Id, PLCI *plci, byte Rc)
case DTMF_LISTEN_TONE_STOP:
- mask <<= 1;
+ mask <<= 1; /* fall through */
case DTMF_LISTEN_MF_STOP:
- mask <<= 1;
+ mask <<= 1; /* fall through */
case DTMF_LISTEN_STOP:
switch (internal_command)
@@ -9710,6 +9714,7 @@ static void dtmf_command(dword Id, PLCI *plci, byte Rc)
*/
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
~(B1_FACILITY_DTMFX | B1_FACILITY_DTMFR)), DTMF_COMMAND_3);
+ /* fall through */
case DTMF_COMMAND_3:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
@@ -9726,9 +9731,9 @@ static void dtmf_command(dword Id, PLCI *plci, byte Rc)
case DTMF_SEND_TONE:
- mask <<= 1;
+ mask <<= 1; /* fall through */
case DTMF_SEND_MF:
- mask <<= 1;
+ mask <<= 1; /* fall through */
case DTMF_DIGITS_SEND:
switch (internal_command)
@@ -9737,6 +9742,7 @@ static void dtmf_command(dword Id, PLCI *plci, byte Rc)
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
((plci->dtmf_parameter_length != 0) ? B1_FACILITY_DTMFX | B1_FACILITY_DTMFR : B1_FACILITY_DTMFX)),
DTMF_COMMAND_1);
+ /* fall through */
case DTMF_COMMAND_1:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
@@ -9747,6 +9753,7 @@ static void dtmf_command(dword Id, PLCI *plci, byte Rc)
}
if (plci->internal_command)
return;
+ /* fall through */
case DTMF_COMMAND_2:
if (plci_nl_busy(plci))
{
@@ -9863,7 +9870,7 @@ static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci
case DTMF_LISTEN_TONE_START:
case DTMF_LISTEN_TONE_STOP:
- mask <<= 1;
+ mask <<= 1; /* fall through */
case DTMF_LISTEN_MF_START:
case DTMF_LISTEN_MF_STOP:
mask <<= 1;
@@ -9875,6 +9882,7 @@ static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
break;
}
+ /* fall through */
case DTMF_LISTEN_START:
case DTMF_LISTEN_STOP:
@@ -9904,7 +9912,7 @@ static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci
case DTMF_SEND_TONE:
- mask <<= 1;
+ mask <<= 1; /* fall through */
case DTMF_SEND_MF:
mask <<= 1;
if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id - 1])
@@ -9915,6 +9923,7 @@ static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci
PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
break;
}
+ /* fall through */
case DTMF_DIGITS_SEND:
if (api_parse(&msg[1].info[1], msg[1].length, "wwws", dtmf_parms))
@@ -11315,6 +11324,7 @@ static word mixer_restore_config(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
Rc = OK;
+ /* fall through */
case ADJUST_B_RESTORE_MIXER_2:
case ADJUST_B_RESTORE_MIXER_3:
case ADJUST_B_RESTORE_MIXER_4:
@@ -11344,10 +11354,12 @@ static word mixer_restore_config(dword Id, PLCI *plci, byte Rc)
plci->internal_command = plci->adjust_b_command;
break;
}
+ /* fall through */
case ADJUST_B_RESTORE_MIXER_5:
xconnect_write_coefs(plci, plci->adjust_b_command);
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_6;
Rc = OK;
+ /* fall through */
case ADJUST_B_RESTORE_MIXER_6:
if (!xconnect_write_coefs_process(Id, plci, Rc))
{
@@ -11392,6 +11404,7 @@ static void mixer_command(dword Id, PLCI *plci, byte Rc)
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
B1_FACILITY_MIXER), MIXER_COMMAND_1);
}
+ /* fall through */
case MIXER_COMMAND_1:
if (plci->li_channel_bits & LI_CHANNEL_INVOLVED)
{
@@ -11419,6 +11432,7 @@ static void mixer_command(dword Id, PLCI *plci, byte Rc)
mixer_indication_coefs_set(Id, plci);
} while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos);
}
+ /* fall through */
case MIXER_COMMAND_2:
if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED)
|| ((get_b1_facilities(plci, plci->B1_resource) & B1_FACILITY_MIXER)
@@ -11450,6 +11464,7 @@ static void mixer_command(dword Id, PLCI *plci, byte Rc)
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
~B1_FACILITY_MIXER), MIXER_COMMAND_3);
}
+ /* fall through */
case MIXER_COMMAND_3:
if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED))
{
@@ -12602,6 +12617,7 @@ static void ec_command(dword Id, PLCI *plci, byte Rc)
default:
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
B1_FACILITY_EC), EC_COMMAND_1);
+ /* fall through */
case EC_COMMAND_1:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
@@ -12612,6 +12628,7 @@ static void ec_command(dword Id, PLCI *plci, byte Rc)
}
if (plci->internal_command)
return;
+ /* fall through */
case EC_COMMAND_2:
if (plci->sig_req)
{
@@ -12650,6 +12667,7 @@ static void ec_command(dword Id, PLCI *plci, byte Rc)
return;
}
Rc = OK;
+ /* fall through */
case EC_COMMAND_2:
if ((Rc != OK) && (Rc != OK_FC))
{
@@ -12660,6 +12678,7 @@ static void ec_command(dword Id, PLCI *plci, byte Rc)
}
adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
~B1_FACILITY_EC), EC_COMMAND_3);
+ /* fall through */
case EC_COMMAND_3:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
@@ -13485,6 +13504,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_SAVE_MIXER_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_SAVE_MIXER_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
@@ -13496,6 +13516,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_SAVE_DTMF_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_SAVE_DTMF_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
@@ -13506,6 +13527,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_REMOVE_L23_1;
+ /* fall through */
case ADJUST_B_REMOVE_L23_1:
if ((plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23)
&& plci->NL.Id && !plci->nl_remove_id)
@@ -13530,6 +13552,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_REMOVE_L23_2;
Rc = OK;
+ /* fall through */
case ADJUST_B_REMOVE_L23_2:
if ((Rc != OK) && (Rc != OK_FC))
{
@@ -13548,6 +13571,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_SAVE_EC_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_SAVE_EC_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
@@ -13559,6 +13583,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_SAVE_DTMF_PARAMETER_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_SAVE_DTMF_PARAMETER_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
@@ -13570,6 +13595,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_SAVE_VOICE_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_SAVE_VOICE_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
{
@@ -13578,6 +13604,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
break;
}
plci->adjust_b_state = ADJUST_B_SWITCH_L1_1;
+ /* fall through */
case ADJUST_B_SWITCH_L1_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1)
{
@@ -13608,6 +13635,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_SWITCH_L1_2;
Rc = OK;
+ /* fall through */
case ADJUST_B_SWITCH_L1_2:
if ((Rc != OK) && (Rc != OK_FC))
{
@@ -13619,6 +13647,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_RESTORE_VOICE_1:
case ADJUST_B_RESTORE_VOICE_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
@@ -13629,6 +13658,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_RESTORE_DTMF_PARAMETER_1:
case ADJUST_B_RESTORE_DTMF_PARAMETER_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
@@ -13641,6 +13671,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_RESTORE_EC_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_RESTORE_EC_1:
case ADJUST_B_RESTORE_EC_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
@@ -13652,6 +13683,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_ASSIGN_L23_1;
+ /* fall through */
case ADJUST_B_ASSIGN_L23_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23)
{
@@ -13681,6 +13713,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2;
Rc = ASSIGN_OK;
+ /* fall through */
case ADJUST_B_ASSIGN_L23_2:
if ((Rc != OK) && (Rc != OK_FC) && (Rc != ASSIGN_OK))
{
@@ -13703,6 +13736,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
break;
}
plci->adjust_b_state = ADJUST_B_CONNECT_1;
+ /* fall through */
case ADJUST_B_CONNECT_1:
if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
{
@@ -13716,6 +13750,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_CONNECT_2:
case ADJUST_B_CONNECT_3:
case ADJUST_B_CONNECT_4:
@@ -13751,6 +13786,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
break;
}
Rc = OK;
+ /* fall through */
case ADJUST_B_RESTORE_DTMF_1:
case ADJUST_B_RESTORE_DTMF_2:
if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
@@ -13763,6 +13799,7 @@ static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
}
plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1;
Rc = OK;
+ /* fall through */
case ADJUST_B_RESTORE_MIXER_1:
case ADJUST_B_RESTORE_MIXER_2:
case ADJUST_B_RESTORE_MIXER_3:
@@ -13827,6 +13864,7 @@ static void adjust_b_restore(dword Id, PLCI *plci, byte Rc)
break;
}
Rc = OK;
+ /* fall through */
case ADJUST_B_RESTORE_1:
if ((Rc != OK) && (Rc != OK_FC))
{
@@ -13841,6 +13879,7 @@ static void adjust_b_restore(dword Id, PLCI *plci, byte Rc)
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Adjust B restore...",
UnMapId(Id), (char *)(FILE_), __LINE__));
+ /* fall through */
case ADJUST_B_RESTORE_2:
if (adjust_b_process(Id, plci, Rc) != GOOD)
{
@@ -13877,6 +13916,7 @@ static void reset_b3_command(dword Id, PLCI *plci, byte Rc)
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Reset B3...",
UnMapId(Id), (char *)(FILE_), __LINE__));
+ /* fall through */
case RESET_B3_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
@@ -13930,6 +13970,7 @@ static void select_b_command(dword Id, PLCI *plci, byte Rc)
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: Select B protocol...",
UnMapId(Id), (char *)(FILE_), __LINE__));
+ /* fall through */
case SELECT_B_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
@@ -13965,7 +14006,7 @@ static void fax_connect_ack_command(dword Id, PLCI *plci, byte Rc)
switch (internal_command)
{
default:
- plci->command = 0;
+ plci->command = 0; /* fall through */
case FAX_CONNECT_ACK_COMMAND_1:
if (plci_nl_busy(plci))
{
@@ -14013,6 +14054,7 @@ static void fax_edata_ack_command(dword Id, PLCI *plci, byte Rc)
{
default:
plci->command = 0;
+ /* fall through */
case FAX_EDATA_ACK_COMMAND_1:
if (plci_nl_busy(plci))
{
@@ -14052,7 +14094,7 @@ static void fax_connect_info_command(dword Id, PLCI *plci, byte Rc)
switch (internal_command)
{
default:
- plci->command = 0;
+ plci->command = 0; /* fall through */
case FAX_CONNECT_INFO_COMMAND_1:
if (plci_nl_busy(plci))
{
@@ -14112,6 +14154,7 @@ static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc)
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: FAX adjust B23...",
UnMapId(Id), (char *)(FILE_), __LINE__));
+ /* fall through */
case FAX_ADJUST_B23_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
@@ -14122,6 +14165,7 @@ static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc)
}
if (plci->internal_command)
return;
+ /* fall through */
case FAX_ADJUST_B23_COMMAND_2:
if (plci_nl_busy(plci))
{
@@ -14194,7 +14238,7 @@ static void rtp_connect_b3_req_command(dword Id, PLCI *plci, byte Rc)
switch (internal_command)
{
default:
- plci->command = 0;
+ plci->command = 0; /* fall through */
case RTP_CONNECT_B3_REQ_COMMAND_1:
if (plci_nl_busy(plci))
{
@@ -14245,7 +14289,7 @@ static void rtp_connect_b3_res_command(dword Id, PLCI *plci, byte Rc)
switch (internal_command)
{
default:
- plci->command = 0;
+ plci->command = 0; /* fall through */
case RTP_CONNECT_B3_RES_COMMAND_1:
if (plci_nl_busy(plci))
{
@@ -14310,6 +14354,7 @@ static void hold_save_command(dword Id, PLCI *plci, byte Rc)
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: HOLD save...",
UnMapId(Id), (char *)(FILE_), __LINE__));
+ /* fall through */
case HOLD_SAVE_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
@@ -14349,6 +14394,7 @@ static void retrieve_restore_command(dword Id, PLCI *plci, byte Rc)
plci->adjust_b_state = ADJUST_B_START;
dbug(1, dprintf("[%06lx] %s,%d: RETRIEVE restore...",
UnMapId(Id), (char *)(FILE_), __LINE__));
+ /* fall through */
case RETRIEVE_RESTORE_COMMAND_1:
Info = adjust_b_process(Id, plci, Rc);
if (Info != GOOD)
diff --git a/drivers/isdn/hardware/eicon/os_4bri.c b/drivers/isdn/hardware/eicon/os_4bri.c
index 1891246807ed..87db5f4df27d 100644
--- a/drivers/isdn/hardware/eicon/os_4bri.c
+++ b/drivers/isdn/hardware/eicon/os_4bri.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Id: os_4bri.c,v 1.28.4.4 2005/02/11 19:40:25 armin Exp $ */
#include "platform.h"
diff --git a/drivers/isdn/hardware/eicon/os_4bri.h b/drivers/isdn/hardware/eicon/os_4bri.h
index 72253278d4f5..94b2709537d8 100644
--- a/drivers/isdn/hardware/eicon/os_4bri.h
+++ b/drivers/isdn/hardware/eicon/os_4bri.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: os_4bri.h,v 1.1.2.2 2001/02/08 12:25:44 armin Exp $ */
#ifndef __DIVA_OS_4_BRI_H__
diff --git a/drivers/isdn/hardware/eicon/os_bri.c b/drivers/isdn/hardware/eicon/os_bri.c
index 20f2653c58fa..de93090bcacb 100644
--- a/drivers/isdn/hardware/eicon/os_bri.c
+++ b/drivers/isdn/hardware/eicon/os_bri.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Id: os_bri.c,v 1.21 2004/03/21 17:26:01 armin Exp $ */
#include "platform.h"
diff --git a/drivers/isdn/hardware/eicon/os_bri.h b/drivers/isdn/hardware/eicon/os_bri.h
index 02e7456f8962..37c92cc53ded 100644
--- a/drivers/isdn/hardware/eicon/os_bri.h
+++ b/drivers/isdn/hardware/eicon/os_bri.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: os_bri.h,v 1.1.2.2 2001/02/08 12:25:44 armin Exp $ */
#ifndef __DIVA_OS_BRI_REV_1_H__
diff --git a/drivers/isdn/hardware/eicon/os_pri.c b/drivers/isdn/hardware/eicon/os_pri.c
index da4957abb422..b20f1fb89d14 100644
--- a/drivers/isdn/hardware/eicon/os_pri.c
+++ b/drivers/isdn/hardware/eicon/os_pri.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Id: os_pri.c,v 1.32 2004/03/21 17:26:01 armin Exp $ */
#include "platform.h"
diff --git a/drivers/isdn/hardware/eicon/os_pri.h b/drivers/isdn/hardware/eicon/os_pri.h
index 537c74d042e7..0e91855b171a 100644
--- a/drivers/isdn/hardware/eicon/os_pri.h
+++ b/drivers/isdn/hardware/eicon/os_pri.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: os_pri.h,v 1.1.2.2 2001/02/08 12:25:44 armin Exp $ */
#ifndef __DIVA_OS_PRI_REV_1_H__
diff --git a/drivers/isdn/hardware/eicon/um_idi.c b/drivers/isdn/hardware/eicon/um_idi.c
index e1519718ce67..db4dd4ff3642 100644
--- a/drivers/isdn/hardware/eicon/um_idi.c
+++ b/drivers/isdn/hardware/eicon/um_idi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Id: um_idi.c,v 1.14 2004/03/21 17:54:37 armin Exp $ */
#include "platform.h"
diff --git a/drivers/isdn/hardware/eicon/um_idi.h b/drivers/isdn/hardware/eicon/um_idi.h
index ffb88f7b42fc..9aedd9e351a3 100644
--- a/drivers/isdn/hardware/eicon/um_idi.h
+++ b/drivers/isdn/hardware/eicon/um_idi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: um_idi.h,v 1.6 2004/03/21 17:26:01 armin Exp $ */
#ifndef __DIVA_USER_MODE_IDI_CORE_H__
diff --git a/drivers/isdn/hardware/eicon/um_xdi.h b/drivers/isdn/hardware/eicon/um_xdi.h
index b48fc042a5bc..1f37aa4efd18 100644
--- a/drivers/isdn/hardware/eicon/um_xdi.h
+++ b/drivers/isdn/hardware/eicon/um_xdi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: um_xdi.h,v 1.1.2.2 2002/10/02 14:38:38 armin Exp $ */
#ifndef __DIVA_USER_MODE_XDI_H__
diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
index d303e65dbe6c..b036e217c659 100644
--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: xdi_adapter.h,v 1.7 2004/03/21 17:26:01 armin Exp $ */
#ifndef __DIVA_OS_XDI_ADAPTER_H__
diff --git a/drivers/isdn/hardware/eicon/xdi_msg.h b/drivers/isdn/hardware/eicon/xdi_msg.h
index 2498c349a32e..0646079bf466 100644
--- a/drivers/isdn/hardware/eicon/xdi_msg.h
+++ b/drivers/isdn/hardware/eicon/xdi_msg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */
#ifndef __DIVA_XDI_UM_CFG_MESSAGE_H__
diff --git a/drivers/isdn/hardware/mISDN/Makefile b/drivers/isdn/hardware/mISDN/Makefile
index 2987d990993f..422f9fd8ab9a 100644
--- a/drivers/isdn/hardware/mISDN/Makefile
+++ b/drivers/isdn/hardware/mISDN/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the modular ISDN hardware drivers
#
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index dce6632daae1..ae2b2669af1b 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -156,7 +156,7 @@ _set_debug(struct fritzcard *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct fritzcard *card;
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h
index c601f880141e..5acf826d913c 100644
--- a/drivers/isdn/hardware/mISDN/hfc_multi.h
+++ b/drivers/isdn/hardware/mISDN/hfc_multi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* see notice in hfc_multi.c
*/
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
index 8a254747768e..b0d772340e16 100644
--- a/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
+++ b/drivers/isdn/hardware/mISDN/hfc_multi_8xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* For License see notice in hfc_multi.c
*
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 3cf07b8ced1c..4d85645c87f7 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -2855,7 +2855,7 @@ irq_notforus:
*/
static void
-hfcmulti_dbusy_timer(struct hfc_multi *hc)
+hfcmulti_dbusy_timer(struct timer_list *t)
{
}
@@ -3877,8 +3877,7 @@ hfcmulti_initmode(struct dchannel *dch)
if (hc->dnum[pt]) {
mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
-1, 0, -1, 0);
- setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
- (long)dch);
+ timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
}
for (i = 1; i <= 31; i++) {
if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
@@ -3984,8 +3983,7 @@ hfcmulti_initmode(struct dchannel *dch)
hc->chan[i].slot_rx = -1;
hc->chan[i].conf = -1;
mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
- setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
- (long)dch);
+ timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
hc->chan[i - 2].slot_tx = -1;
hc->chan[i - 2].slot_rx = -1;
hc->chan[i - 2].conf = -1;
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index d2e401a8090e..34c93874af23 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -301,8 +301,9 @@ reset_hfcpci(struct hfc_pci *hc)
* Timer function called when kernel timer expires
*/
static void
-hfcpci_Timer(struct hfc_pci *hc)
+hfcpci_Timer(struct timer_list *t)
{
+ struct hfc_pci *hc = from_timer(hc, t, hw.timer);
hc->hw.timer.expires = jiffies + 75;
/* WD RESET */
/*
@@ -1241,7 +1242,7 @@ hfcpci_int(int intno, void *dev_id)
* timer callback for D-chan busy resolution. Currently no function
*/
static void
-hfcpci_dbusy_timer(struct hfc_pci *hc)
+hfcpci_dbusy_timer(struct timer_list *t)
{
}
@@ -1717,8 +1718,7 @@ static void
inithfcpci(struct hfc_pci *hc)
{
printk(KERN_DEBUG "inithfcpci: entered\n");
- setup_timer(&hc->dch.timer, (void *)hfcpci_dbusy_timer,
- (long)&hc->dch);
+ timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
hc->chanlimit = 2;
mode_hfcpci(&hc->bch[0], 1, -1);
mode_hfcpci(&hc->bch[1], 2, -1);
@@ -2043,7 +2043,7 @@ setup_hw(struct hfc_pci *hc)
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* At this point the needed PCI config is done */
/* fifos are still not enabled */
- setup_timer(&hc->hw.timer, (void *)hfcpci_Timer, (long)hc);
+ timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
/* default PCM master */
test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
return 0;
@@ -2265,7 +2265,7 @@ static struct pci_driver hfc_driver = {
};
static int
-_hfcpci_softirq(struct device *dev, void *arg)
+_hfcpci_softirq(struct device *dev, void *unused)
{
struct hfc_pci *hc = dev_get_drvdata(dev);
struct bchannel *bch;
@@ -2290,9 +2290,9 @@ _hfcpci_softirq(struct device *dev, void *arg)
}
static void
-hfcpci_softirq(void *arg)
+hfcpci_softirq(struct timer_list *unused)
{
- WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, arg,
+ WARN_ON_ONCE(driver_for_each_device(&hfc_driver.driver, NULL, NULL,
_hfcpci_softirq) != 0);
/* if next event would be in the past ... */
@@ -2327,9 +2327,7 @@ HFC_init(void)
if (poll != HFCPCI_BTRANS_THRESHOLD) {
printk(KERN_INFO "%s: Using alternative poll value of %d\n",
__func__, poll);
- hfc_tl.function = (void *)hfcpci_softirq;
- hfc_tl.data = 0;
- init_timer(&hfc_tl);
+ timer_setup(&hfc_tl, hfcpci_softirq, 0);
hfc_tl.expires = jiffies + tics;
hfc_jiffies = hfc_tl.expires;
add_timer(&hfc_tl);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h
index 5f8f1d9cac11..e4fa2a2824af 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.h
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* hfcsusb.h, HFC-S USB mISDN driver
*/
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index d5bdbaf93a1a..1fc290659e94 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -244,7 +244,7 @@ _set_debug(struct inf_hw *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct inf_hw *card;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index e240010b93fa..4d78f870435e 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -172,7 +172,6 @@ isac_fill_fifo(struct isac_hw *isac)
pr_debug("%s: %s dbusytimer running\n", isac->name, __func__);
del_timer(&isac->dch.timer);
}
- init_timer(&isac->dch.timer);
isac->dch.timer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000);
add_timer(&isac->dch.timer);
if (isac->dch.debug & DEBUG_HW_DFIFO) {
@@ -727,8 +726,9 @@ isac_release(struct isac_hw *isac)
}
static void
-dbusy_timer_handler(struct isac_hw *isac)
+dbusy_timer_handler(struct timer_list *t)
{
+ struct isac_hw *isac = from_timer(isac, t, dch.timer);
int rbch, star;
u_long flags;
@@ -796,8 +796,7 @@ isac_init(struct isac_hw *isac)
}
isac->mon_tx = NULL;
isac->mon_rx = NULL;
- setup_timer(&isac->dch.timer, (void *)dbusy_timer_handler,
- (long)isac);
+ timer_setup(&isac->dch.timer, dbusy_timer_handler, 0);
isac->mocr = 0xaa;
if (isac->type & IPAC_TYPE_ISACX) {
/* Disable all IRQ */
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 5b078591b6ee..b791688d0228 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1146,9 +1146,9 @@ mISDNisar_irq(struct isar_hw *isar)
EXPORT_SYMBOL(mISDNisar_irq);
static void
-ftimer_handler(unsigned long data)
+ftimer_handler(struct timer_list *t)
{
- struct isar_ch *ch = (struct isar_ch *)data;
+ struct isar_ch *ch = from_timer(ch, t, ftimer);
pr_debug("%s: ftimer flags %lx\n", ch->is->name, ch->bch.Flags);
test_and_clear_bit(FLG_FTI_RUN, &ch->bch.Flags);
@@ -1635,11 +1635,9 @@ init_isar(struct isar_hw *isar)
}
if (isar->version != 1)
return -EINVAL;
- setup_timer(&isar->ch[0].ftimer, &ftimer_handler,
- (long)&isar->ch[0]);
+ timer_setup(&isar->ch[0].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags);
- setup_timer(&isar->ch[1].ftimer, &ftimer_handler,
- (long)&isar->ch[1]);
+ timer_setup(&isar->ch[1].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags);
return 0;
}
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 6a6d848bd18e..89d9ba8ed535 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -111,7 +111,7 @@ _set_debug(struct tiger_hw *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct tiger_hw *card;
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 9815bb4eec9c..1f1446ed8d5f 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -94,7 +94,7 @@ _set_debug(struct sfax_hw *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct sfax_hw *card;
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index d80072fef434..5acf6ab67cd3 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -101,7 +101,7 @@ _set_debug(struct w6692_hw *card)
}
static int
-set_debug(const char *val, struct kernel_param *kp)
+set_debug(const char *val, const struct kernel_param *kp)
{
int ret;
struct w6692_hw *card;
@@ -311,7 +311,6 @@ W6692_fill_Dfifo(struct w6692_hw *card)
pr_debug("%s: fill_Dfifo dbusytimer running\n", card->name);
del_timer(&dch->timer);
}
- init_timer(&dch->timer);
dch->timer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000);
add_timer(&dch->timer);
if (debug & DEBUG_HW_DFIFO) {
@@ -819,8 +818,9 @@ w6692_irq(int intno, void *dev_id)
}
static void
-dbusy_timer_handler(struct dchannel *dch)
+dbusy_timer_handler(struct timer_list *t)
{
+ struct dchannel *dch = from_timer(dch, t, timer);
struct w6692_hw *card = dch->hw;
int rbch, star;
u_long flags;
@@ -852,8 +852,7 @@ static void initW6692(struct w6692_hw *card)
{
u8 val;
- setup_timer(&card->dch.timer, (void *)dbusy_timer_handler,
- (u_long)&card->dch);
+ timer_setup(&card->dch.timer, dbusy_timer_handler, 0);
w6692_mode(&card->bc[0], ISDN_P_NONE);
w6692_mode(&card->bc[1], ISDN_P_NONE);
WriteW6692(card, W_D_CTL, 0x00);
diff --git a/drivers/isdn/hisax/Makefile b/drivers/isdn/hisax/Makefile
index 646368fe41c9..3eca9d23f1c2 100644
--- a/drivers/isdn/hisax/Makefile
+++ b/drivers/isdn/hisax/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the hisax ISDN device driver
# The target object and module list name.
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c
index dcf4c2a9fcea..77debda2221b 100644
--- a/drivers/isdn/hisax/amd7930_fn.c
+++ b/drivers/isdn/hisax/amd7930_fn.c
@@ -398,7 +398,6 @@ Amd7930_fill_Dfifo(struct IsdnCardState *cs)
debugl1(cs, "Amd7930: fill_Dfifo dbusytimer running");
del_timer(&cs->dbusytimer);
}
- init_timer(&cs->dbusytimer);
cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000);
add_timer(&cs->dbusytimer);
@@ -686,8 +685,9 @@ DC_Close_Amd7930(struct IsdnCardState *cs) {
static void
-dbusy_timer_handler(struct IsdnCardState *cs)
+dbusy_timer_handler(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
u_long flags;
struct PStack *stptr;
WORD dtcr, der;
@@ -790,5 +790,5 @@ void Amd7930_init(struct IsdnCardState *cs)
void setup_Amd7930(struct IsdnCardState *cs)
{
INIT_WORK(&cs->tqueue, Amd7930_bh);
- setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs);
+ timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
}
diff --git a/drivers/isdn/hisax/arcofi.c b/drivers/isdn/hisax/arcofi.c
index 9826bad49e2c..2f784f96d439 100644
--- a/drivers/isdn/hisax/arcofi.c
+++ b/drivers/isdn/hisax/arcofi.c
@@ -23,7 +23,6 @@ add_arcofi_timer(struct IsdnCardState *cs) {
if (test_and_set_bit(FLG_ARCOFI_TIMER, &cs->HW_Flags)) {
del_timer(&cs->dc.isac.arcofitimer);
}
- init_timer(&cs->dc.isac.arcofitimer);
cs->dc.isac.arcofitimer.expires = jiffies + ((ARCOFI_TIMER_VALUE * HZ) / 1000);
add_timer(&cs->dc.isac.arcofitimer);
}
@@ -112,7 +111,8 @@ arcofi_fsm(struct IsdnCardState *cs, int event, void *data) {
}
static void
-arcofi_timer(struct IsdnCardState *cs) {
+arcofi_timer(struct timer_list *t) {
+ struct IsdnCardState *cs = from_timer(cs, t, dc.isac.arcofitimer);
arcofi_fsm(cs, ARCOFI_TIMEOUT, NULL);
}
@@ -125,7 +125,7 @@ clear_arcofi(struct IsdnCardState *cs) {
void
init_arcofi(struct IsdnCardState *cs) {
- setup_timer(&cs->dc.isac.arcofitimer, (void *)arcofi_timer, (long)cs);
+ timer_setup(&cs->dc.isac.arcofitimer, arcofi_timer, 0);
init_waitqueue_head(&cs->dc.isac.arcofi_wait);
test_and_set_bit(HW_ARCOFI, &cs->HW_Flags);
}
diff --git a/drivers/isdn/hisax/asuscom.c b/drivers/isdn/hisax/asuscom.c
index 62f9c43e2377..74c871495e81 100644
--- a/drivers/isdn/hisax/asuscom.c
+++ b/drivers/isdn/hisax/asuscom.c
@@ -348,7 +348,7 @@ int setup_asuscom(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "AsusPnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index daf3742cdef6..a18b605fb4f2 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -805,7 +805,7 @@ static int avm_pnp_setup(struct IsdnCardState *cs)
cs->hw.avm.cfg_reg =
pnp_port_start(pnp_avm_d, 0);
cs->irq = pnp_irq(pnp_avm_d, 0);
- if (!cs->irq) {
+ if (cs->irq == -1) {
printk(KERN_ERR "FritzPnP:No IRQ\n");
return (0);
}
diff --git a/drivers/isdn/hisax/diva.c b/drivers/isdn/hisax/diva.c
index 3fc94e7741ae..d23df7a7784d 100644
--- a/drivers/isdn/hisax/diva.c
+++ b/drivers/isdn/hisax/diva.c
@@ -798,8 +798,9 @@ reset_diva(struct IsdnCardState *cs)
#define DIVA_ASSIGN 1
static void
-diva_led_handler(struct IsdnCardState *cs)
+diva_led_handler(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, hw.diva.tl);
int blink = 0;
if ((cs->subtyp == DIVA_IPAC_ISA) ||
@@ -828,7 +829,6 @@ diva_led_handler(struct IsdnCardState *cs)
byteout(cs->hw.diva.ctrl, cs->hw.diva.ctrl_reg);
if (blink) {
- init_timer(&cs->hw.diva.tl);
cs->hw.diva.tl.expires = jiffies + ((blink * HZ) / 1000);
add_timer(&cs->hw.diva.tl);
}
@@ -900,7 +900,7 @@ Diva_card_msg(struct IsdnCardState *cs, int mt, void *arg)
(cs->subtyp != DIVA_IPAC_PCI) &&
(cs->subtyp != DIVA_IPACX_PCI)) {
spin_lock_irqsave(&cs->lock, flags);
- diva_led_handler(cs);
+ diva_led_handler(&cs->hw.diva.tl);
spin_unlock_irqrestore(&cs->lock, flags);
}
return (0);
@@ -978,8 +978,7 @@ static int setup_diva_common(struct IsdnCardState *cs)
printk(KERN_INFO "Diva: IPACX Design Id: %x\n",
MemReadISAC_IPACX(cs, IPACX_ID) & 0x3F);
} else { /* DIVA 2.0 */
- setup_timer(&cs->hw.diva.tl, (void *)diva_led_handler,
- (long)cs);
+ timer_setup(&cs->hw.diva.tl, diva_led_handler, 0);
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
@@ -1094,7 +1093,7 @@ static int setup_diva_isapnp(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "Diva PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 03bc5d504e22..0754c0743790 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -606,8 +606,9 @@ check_arcofi(struct IsdnCardState *cs)
#endif /* ARCOFI_USE */
static void
-elsa_led_handler(struct IsdnCardState *cs)
+elsa_led_handler(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, hw.elsa.tl);
int blink = 0;
if (cs->subtyp == ELSA_PCMCIA || cs->subtyp == ELSA_PCMCIA_IPAC)
@@ -640,7 +641,6 @@ elsa_led_handler(struct IsdnCardState *cs)
} else
byteout(cs->hw.elsa.ctrl, cs->hw.elsa.ctrl_reg);
if (blink) {
- init_timer(&cs->hw.elsa.tl);
cs->hw.elsa.tl.expires = jiffies + ((blink * HZ) / 1000);
add_timer(&cs->hw.elsa.tl);
}
@@ -715,7 +715,7 @@ Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg)
init_modem(cs);
}
#endif
- elsa_led_handler(cs);
+ elsa_led_handler(&cs->hw.elsa.tl);
return (ret);
case (MDL_REMOVE | REQUEST):
cs->hw.elsa.status &= 0;
@@ -767,7 +767,7 @@ Elsa_card_msg(struct IsdnCardState *cs, int mt, void *arg)
else
cs->hw.elsa.status &= ~ELSA_BAD_PWR;
}
- elsa_led_handler(cs);
+ elsa_led_handler(&cs->hw.elsa.tl);
return (ret);
}
@@ -945,7 +945,7 @@ static int setup_elsa_isapnp(struct IsdnCard *card)
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "Elsa PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
@@ -1147,7 +1147,7 @@ static int setup_elsa_common(struct IsdnCard *card)
init_arcofi(cs);
#endif
setup_isac(cs);
- setup_timer(&cs->hw.elsa.tl, (void *)elsa_led_handler, (long)cs);
+ timer_setup(&cs->hw.elsa.tl, elsa_led_handler, 0);
/* Teste Timer */
if (cs->hw.elsa.timer) {
byteout(cs->hw.elsa.trig, 0xff);
diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c
index d63266fa8cbd..3e020ec0f65e 100644
--- a/drivers/isdn/hisax/fsm.c
+++ b/drivers/isdn/hisax/fsm.c
@@ -85,8 +85,9 @@ FsmChangeState(struct FsmInst *fi, int newstate)
}
static void
-FsmExpireTimer(struct FsmTimer *ft)
+FsmExpireTimer(struct timer_list *t)
{
+ struct FsmTimer *ft = from_timer(ft, t, tl);
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
@@ -102,7 +103,7 @@ FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmInitTimer %lx", (long) ft);
#endif
- setup_timer(&ft->tl, (void *)FsmExpireTimer, (long)ft);
+ timer_setup(&ft->tl, FsmExpireTimer, 0);
}
void
@@ -131,7 +132,6 @@ FsmAddTimer(struct FsmTimer *ft,
ft->fi->printdebug(ft->fi, "FsmAddTimer already active!");
return -1;
}
- init_timer(&ft->tl);
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
@@ -152,7 +152,6 @@ FsmRestartTimer(struct FsmTimer *ft,
if (timer_pending(&ft->tl))
del_timer(&ft->tl);
- init_timer(&ft->tl);
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 9090cc1e1f29..e9bb8fb67ad0 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -591,8 +591,9 @@ bch_l2l1(struct hisax_if *ifc, int pr, void *arg)
/* layer 1 timer function */
/**************************/
static void
-hfc_l1_timer(struct hfc4s8s_l1 *l1)
+hfc_l1_timer(struct timer_list *t)
{
+ struct hfc4s8s_l1 *l1 = from_timer(l1, t, l1_timer);
u_long flags;
if (!l1->enabled)
@@ -1396,8 +1397,7 @@ setup_instance(hfc4s8s_hw *hw)
l1p = hw->l1 + i;
spin_lock_init(&l1p->lock);
l1p->hw = hw;
- setup_timer(&l1p->l1_timer, (void *)hfc_l1_timer,
- (long)(l1p));
+ timer_setup(&l1p->l1_timer, hfc_l1_timer, 0);
l1p->st_num = i;
skb_queue_head_init(&l1p->d_tx_queue);
l1p->d_if.ifc.priv = hw->l1 + i;
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.h b/drivers/isdn/hisax/hfc4s8s_l1.h
index 6a8f89113d2f..4665b9d5df16 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.h
+++ b/drivers/isdn/hisax/hfc4s8s_l1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/***************************************************************/
/* $Id: hfc4s8s_l1.h,v 1.1 2005/02/02 17:28:55 martinb1 Exp $ */
/* */
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index ad8597a1a07e..86b82172e992 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -1014,7 +1014,7 @@ setstack_hfcd(struct PStack *st, struct IsdnCardState *cs)
}
static void
-hfc_dbusy_timer(struct IsdnCardState *cs)
+hfc_dbusy_timer(struct timer_list *t)
{
}
@@ -1073,6 +1073,6 @@ set_cs_func(struct IsdnCardState *cs)
cs->writeisacfifo = &dummyf;
cs->BC_Read_Reg = &ReadReg;
cs->BC_Write_Reg = &WriteReg;
- setup_timer(&cs->dbusytimer, (void *)hfc_dbusy_timer, (long)cs);
+ timer_setup(&cs->dbusytimer, hfc_dbusy_timer, 0);
INIT_WORK(&cs->tqueue, hfcd_bh);
}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index f9ca35cc32b1..8e5b03161b2f 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -165,8 +165,9 @@ reset_hfcpci(struct IsdnCardState *cs)
/* Timer function called when kernel timer expires */
/***************************************************/
static void
-hfcpci_Timer(struct IsdnCardState *cs)
+hfcpci_Timer(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, hw.hfcpci.timer);
cs->hw.hfcpci.timer.expires = jiffies + 75;
/* WD RESET */
/* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcpci.ctmt | 0x80);
@@ -1095,7 +1096,7 @@ hfcpci_interrupt(int intno, void *dev_id)
/* timer callback for D-chan busy resolution. Currently no function */
/********************************************************************/
static void
-hfcpci_dbusy_timer(struct IsdnCardState *cs)
+hfcpci_dbusy_timer(struct timer_list *t)
{
}
@@ -1582,7 +1583,7 @@ inithfcpci(struct IsdnCardState *cs)
cs->bcs[1].BC_SetStack = setstack_2b;
cs->bcs[0].BC_Close = close_hfcpci;
cs->bcs[1].BC_Close = close_hfcpci;
- setup_timer(&cs->dbusytimer, (void *)hfcpci_dbusy_timer, (long)cs);
+ timer_setup(&cs->dbusytimer, hfcpci_dbusy_timer, 0);
mode_hfcpci(cs->bcs, 0, 0);
mode_hfcpci(cs->bcs + 1, 0, 1);
}
@@ -1744,7 +1745,7 @@ setup_hfcpci(struct IsdnCard *card)
cs->BC_Write_Reg = NULL;
cs->irq_func = &hfcpci_interrupt;
cs->irq_flags |= IRQF_SHARED;
- setup_timer(&cs->hw.hfcpci.timer, (void *)hfcpci_Timer, (long)cs);
+ timer_setup(&cs->hw.hfcpci.timer, hfcpci_Timer, 0);
cs->cardmsg = &hfcpci_card_msg;
cs->auxcmd = &hfcpci_auxcmd;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 3aef8e1a90e4..4d3b4b2f2612 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -418,8 +418,9 @@ reset_hfcsx(struct IsdnCardState *cs)
/* Timer function called when kernel timer expires */
/***************************************************/
static void
-hfcsx_Timer(struct IsdnCardState *cs)
+hfcsx_Timer(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, hw.hfcsx.timer);
cs->hw.hfcsx.timer.expires = jiffies + 75;
/* WD RESET */
/* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcsx.ctmt | 0x80);
@@ -860,7 +861,7 @@ hfcsx_interrupt(int intno, void *dev_id)
/* timer callback for D-chan busy resolution. Currently no function */
/********************************************************************/
static void
-hfcsx_dbusy_timer(struct IsdnCardState *cs)
+hfcsx_dbusy_timer(struct timer_list *t)
{
}
@@ -1422,7 +1423,7 @@ int setup_hfcsx(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
@@ -1495,7 +1496,7 @@ int setup_hfcsx(struct IsdnCard *card)
} else
return (0); /* no valid card type */
- setup_timer(&cs->dbusytimer, (void *)hfcsx_dbusy_timer, (long)cs);
+ timer_setup(&cs->dbusytimer, hfcsx_dbusy_timer, 0);
INIT_WORK(&cs->tqueue, hfcsx_bh);
cs->readisac = NULL;
cs->writeisac = NULL;
@@ -1507,7 +1508,7 @@ int setup_hfcsx(struct IsdnCard *card)
cs->hw.hfcsx.b_fifo_size = 0; /* fifo size still unknown */
cs->hw.hfcsx.cirm = ccd_sp_irqtab[cs->irq & 0xF]; /* RAM not evaluated */
- setup_timer(&cs->hw.hfcsx.timer, (void *)hfcsx_Timer, (long)cs);
+ timer_setup(&cs->hw.hfcsx.timer, hfcsx_Timer, 0);
reset_hfcsx(cs);
cs->cardmsg = &hfcsx_card_msg;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index e8212185d386..97ecb3073045 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -343,8 +343,9 @@ handle_led(hfcusb_data *hfc, int event)
/* ISDN l1 timer T3 expires */
static void
-l1_timer_expire_t3(hfcusb_data *hfc)
+l1_timer_expire_t3(struct timer_list *t)
{
+ hfcusb_data *hfc = from_timer(hfc, t, t3_timer);
hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION,
NULL);
@@ -360,8 +361,9 @@ l1_timer_expire_t3(hfcusb_data *hfc)
/* ISDN l1 timer T4 expires */
static void
-l1_timer_expire_t4(hfcusb_data *hfc)
+l1_timer_expire_t4(struct timer_list *t)
{
+ hfcusb_data *hfc = from_timer(hfc, t, t4_timer);
hfc->d_if.ifc.l1l2(&hfc->d_if.ifc, PH_DEACTIVATE | INDICATION,
NULL);
@@ -1165,10 +1167,10 @@ hfc_usb_init(hfcusb_data *hfc)
hfc->old_led_state = 0;
/* init the t3 timer */
- setup_timer(&hfc->t3_timer, (void *)l1_timer_expire_t3, (long)hfc);
+ timer_setup(&hfc->t3_timer, l1_timer_expire_t3, 0);
/* init the t4 timer */
- setup_timer(&hfc->t4_timer, (void *)l1_timer_expire_t4, (long)hfc);
+ timer_setup(&hfc->t4_timer, l1_timer_expire_t4, 0);
/* init the background machinery for control requests */
hfc->ctrl_read.bRequestType = 0xc0;
diff --git a/drivers/isdn/hisax/hfc_usb.h b/drivers/isdn/hisax/hfc_usb.h
index f987bf89da1a..9a212330e8a8 100644
--- a/drivers/isdn/hisax/hfc_usb.h
+++ b/drivers/isdn/hisax/hfc_usb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* hfc_usb.h
*
diff --git a/drivers/isdn/hisax/hfcscard.c b/drivers/isdn/hisax/hfcscard.c
index 467287096918..91b5219499ca 100644
--- a/drivers/isdn/hisax/hfcscard.c
+++ b/drivers/isdn/hisax/hfcscard.c
@@ -41,8 +41,9 @@ hfcs_interrupt(int intno, void *dev_id)
}
static void
-hfcs_Timer(struct IsdnCardState *cs)
+hfcs_Timer(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, hw.hfcD.timer);
cs->hw.hfcD.timer.expires = jiffies + 75;
/* WD RESET */
/* WriteReg(cs, HFCD_DATA, HFCD_CTMT, cs->hw.hfcD.ctmt | 0x80);
@@ -195,7 +196,7 @@ int setup_hfcs(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "HFC PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
@@ -253,7 +254,7 @@ int setup_hfcs(struct IsdnCard *card)
outb(0x57, cs->hw.hfcD.addr | 1);
}
set_cs_func(cs);
- setup_timer(&cs->hw.hfcD.timer, (void *)hfcs_Timer, (long)cs);
+ timer_setup(&cs->hw.hfcD.timer, hfcs_Timer, 0);
cs->cardmsg = &hfcs_card_msg;
cs->irq_func = &hfcs_interrupt;
return (1);
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index e4f7573ba9bf..7a7137d8664b 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -940,6 +940,8 @@ static int fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
}
adapter->io = pnp_port_start(pdev, 0);
adapter->irq = pnp_irq(pdev, 0);
+ if (!adapter->io || adapter->irq == -1)
+ goto err_free;
printk(KERN_INFO "hisax_fcpcipnp: found adapter %s at IO %#x irq %d\n",
(char *) dev_id->driver_data, adapter->io, adapter->irq);
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.h b/drivers/isdn/hisax/hisax_fcpcipnp.h
index aedef97827fe..1f64e9937aa1 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.h
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include "hisax_if.h"
#include "hisax_isac.h"
#include <linux/pci.h>
diff --git a/drivers/isdn/hisax/hisax_isac.h b/drivers/isdn/hisax/hisax_isac.h
index 08890cf4d923..d7301da97991 100644
--- a/drivers/isdn/hisax/hisax_isac.h
+++ b/drivers/isdn/hisax/hisax_isac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __HISAX_ISAC_H__
#define __HISAX_ISAC_H__
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index 8d1804572b32..831dd1bb81ef 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -168,7 +168,6 @@ icc_fill_fifo(struct IsdnCardState *cs)
debugl1(cs, "icc_fill_fifo dbusytimer running");
del_timer(&cs->dbusytimer);
}
- init_timer(&cs->dbusytimer);
cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000);
add_timer(&cs->dbusytimer);
if (cs->debug & L1_DEB_ISAC_FIFO) {
@@ -580,8 +579,9 @@ DC_Close_icc(struct IsdnCardState *cs) {
}
static void
-dbusy_timer_handler(struct IsdnCardState *cs)
+dbusy_timer_handler(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
struct PStack *stptr;
int rbch, star;
@@ -676,5 +676,5 @@ clear_pending_icc_ints(struct IsdnCardState *cs)
void setup_icc(struct IsdnCardState *cs)
{
INIT_WORK(&cs->tqueue, icc_bh);
- setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs);
+ timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
}
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index c426b4fea28a..c7086c1534bd 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -35,7 +35,7 @@
static void ph_command(struct IsdnCardState *cs, unsigned int command);
static inline void cic_int(struct IsdnCardState *cs);
static void dch_l2l1(struct PStack *st, int pr, void *arg);
-static void dbusy_timer_handler(struct IsdnCardState *cs);
+static void dbusy_timer_handler(struct timer_list *t);
static void dch_empty_fifo(struct IsdnCardState *cs, int count);
static void dch_fill_fifo(struct IsdnCardState *cs);
static inline void dch_int(struct IsdnCardState *cs);
@@ -198,8 +198,9 @@ dch_l2l1(struct PStack *st, int pr, void *arg)
//----------------------------------------------------------
//----------------------------------------------------------
static void
-dbusy_timer_handler(struct IsdnCardState *cs)
+dbusy_timer_handler(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
struct PStack *st;
int rbchd, stard;
@@ -298,7 +299,6 @@ dch_fill_fifo(struct IsdnCardState *cs)
debugl1(cs, "dch_fill_fifo dbusytimer running");
del_timer(&cs->dbusytimer);
}
- init_timer(&cs->dbusytimer);
cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000);
add_timer(&cs->dbusytimer);
@@ -424,7 +424,7 @@ dch_init(struct IsdnCardState *cs)
cs->setstack_d = dch_setstack;
- setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs);
+ timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
cs->writeisac(cs, IPACX_TR_CONF0, 0x00); // clear LDD
cs->writeisac(cs, IPACX_TR_CONF2, 0x00); // enable transmitter
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index ea965f29a555..bd40e0671ded 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -171,7 +171,6 @@ isac_fill_fifo(struct IsdnCardState *cs)
debugl1(cs, "isac_fill_fifo dbusytimer running");
del_timer(&cs->dbusytimer);
}
- init_timer(&cs->dbusytimer);
cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ)/1000);
add_timer(&cs->dbusytimer);
if (cs->debug & L1_DEB_ISAC_FIFO) {
@@ -584,8 +583,9 @@ DC_Close_isac(struct IsdnCardState *cs)
}
static void
-dbusy_timer_handler(struct IsdnCardState *cs)
+dbusy_timer_handler(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
struct PStack *stptr;
int rbch, star;
@@ -677,5 +677,5 @@ void clear_pending_isac_ints(struct IsdnCardState *cs)
void setup_isac(struct IsdnCardState *cs)
{
INIT_WORK(&cs->tqueue, isac_bh);
- setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler, (long)cs);
+ timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
}
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 98b4b67ea337..d01ff116797b 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1267,7 +1267,8 @@ isar_int_main(struct IsdnCardState *cs)
}
static void
-ftimer_handler(struct BCState *bcs) {
+ftimer_handler(struct timer_list *t) {
+ struct BCState *bcs = from_timer(bcs, t, hw.isar.ftimer);
if (bcs->cs->debug)
debugl1(bcs->cs, "ftimer flags %04lx",
bcs->Flag);
@@ -1902,8 +1903,6 @@ void initisar(struct IsdnCardState *cs)
cs->bcs[1].BC_SetStack = setstack_isar;
cs->bcs[0].BC_Close = close_isarstate;
cs->bcs[1].BC_Close = close_isarstate;
- setup_timer(&cs->bcs[0].hw.isar.ftimer, (void *)ftimer_handler,
- (long)&cs->bcs[0]);
- setup_timer(&cs->bcs[1].hw.isar.ftimer, (void *)ftimer_handler,
- (long)&cs->bcs[1]);
+ timer_setup(&cs->bcs[0].hw.isar.ftimer, ftimer_handler, 0);
+ timer_setup(&cs->bcs[1].hw.isar.ftimer, ftimer_handler, 0);
}
diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c
index 569ce52c567b..bb3f9ec62749 100644
--- a/drivers/isdn/hisax/isdnl3.c
+++ b/drivers/isdn/hisax/isdnl3.c
@@ -160,8 +160,9 @@ newl3state(struct l3_process *pc, int state)
}
static void
-L3ExpireTimer(struct L3Timer *t)
+L3ExpireTimer(struct timer_list *timer)
{
+ struct L3Timer *t = from_timer(t, timer, tl);
t->pc->st->lli.l4l3(t->pc->st, t->event, t->pc);
}
@@ -169,7 +170,7 @@ void
L3InitTimer(struct l3_process *pc, struct L3Timer *t)
{
t->pc = pc;
- setup_timer(&t->tl, (void *)L3ExpireTimer, (long)t);
+ timer_setup(&t->tl, L3ExpireTimer, 0);
}
void
@@ -186,7 +187,6 @@ L3AddTimer(struct L3Timer *t,
printk(KERN_WARNING "L3AddTimer: timer already active!\n");
return -1;
}
- init_timer(&t->tl);
t->event = event;
t->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&t->tl);
diff --git a/drivers/isdn/hisax/isurf.c b/drivers/isdn/hisax/isurf.c
index 1399ddd4f6cb..53e299be4304 100644
--- a/drivers/isdn/hisax/isurf.c
+++ b/drivers/isdn/hisax/isurf.c
@@ -238,7 +238,7 @@ int setup_isurf(struct IsdnCard *card)
cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
cs->irq = pnp_irq(pnp_d, 0);
- if (!cs->irq || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
+ if (cs->irq == -1 || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
printk(KERN_ERR "ISurfPnP:some resources are missing %d/%x/%lx\n",
cs->irq, cs->hw.isurf.reset, cs->hw.isurf.phymem);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/ix1_micro.c b/drivers/isdn/hisax/ix1_micro.c
index 7ae39f5e865d..bfb79f3f0a49 100644
--- a/drivers/isdn/hisax/ix1_micro.c
+++ b/drivers/isdn/hisax/ix1_micro.c
@@ -256,7 +256,7 @@ int setup_ix1micro(struct IsdnCard *card)
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "ITK PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c
index e4c33cfe3ef4..dfbcd2eaa81a 100644
--- a/drivers/isdn/hisax/niccy.c
+++ b/drivers/isdn/hisax/niccy.c
@@ -261,7 +261,7 @@ int setup_niccy(struct IsdnCard *card)
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[2] = pnp_port_start(pnp_d, 1);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1] ||
+ if (card->para[0] == -1 || !card->para[1] ||
!card->para[2]) {
printk(KERN_ERR "NiccyPnP:some resources are "
"missing %ld/%lx/%lx\n",
diff --git a/drivers/isdn/hisax/saphir.c b/drivers/isdn/hisax/saphir.c
index 6b2d0eccdd56..db906cb37a3f 100644
--- a/drivers/isdn/hisax/saphir.c
+++ b/drivers/isdn/hisax/saphir.c
@@ -159,8 +159,9 @@ Start_ISAC:
}
static void
-SaphirWatchDog(struct IsdnCardState *cs)
+SaphirWatchDog(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, hw.saphir.timer);
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
@@ -268,9 +269,7 @@ int setup_saphir(struct IsdnCard *card)
cs->irq, cs->hw.saphir.cfg_reg);
setup_isac(cs);
- cs->hw.saphir.timer.function = (void *) SaphirWatchDog;
- cs->hw.saphir.timer.data = (long) cs;
- init_timer(&cs->hw.saphir.timer);
+ timer_setup(&cs->hw.saphir.timer, SaphirWatchDog, 0);
cs->hw.saphir.timer.expires = jiffies + 4 * HZ;
add_timer(&cs->hw.saphir.timer);
if (saphir_reset(cs)) {
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index f16a47bcef48..c0b97b893495 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -558,7 +558,7 @@ static int setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1]) {
+ if (card->para[0] == -1 || !card->para[1]) {
printk(KERN_ERR "Sedlbauer PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/teleint.c b/drivers/isdn/hisax/teleint.c
index 950399f066ef..247aa33076b1 100644
--- a/drivers/isdn/hisax/teleint.c
+++ b/drivers/isdn/hisax/teleint.c
@@ -179,8 +179,9 @@ Start_ISAC:
}
static void
-TeleInt_Timer(struct IsdnCardState *cs)
+TeleInt_Timer(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, hw.hfc.timer);
int stat = 0;
u_long flags;
@@ -278,7 +279,7 @@ int setup_TeleInt(struct IsdnCard *card)
cs->bcs[0].hw.hfc.send = NULL;
cs->bcs[1].hw.hfc.send = NULL;
cs->hw.hfc.fifosize = 7 * 1024 + 512;
- setup_timer(&cs->hw.hfc.timer, (void *)TeleInt_Timer, (long)cs);
+ timer_setup(&cs->hw.hfc.timer, TeleInt_Timer, 0);
if (!request_region(cs->hw.hfc.addr, 2, "TeleInt isdn")) {
printk(KERN_WARNING
"HiSax: TeleInt config port %x-%x already in use\n",
diff --git a/drivers/isdn/hisax/teles3.c b/drivers/isdn/hisax/teles3.c
index 38fb2c1a3f0f..1eef693f04f0 100644
--- a/drivers/isdn/hisax/teles3.c
+++ b/drivers/isdn/hisax/teles3.c
@@ -306,7 +306,7 @@ int setup_teles3(struct IsdnCard *card)
card->para[2] = pnp_port_start(pnp_d, 1);
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
- if (!card->para[0] || !card->para[1] || !card->para[2]) {
+ if (card->para[0] == -1 || !card->para[1] || !card->para[2]) {
printk(KERN_ERR "Teles PnP:some resources are missing %ld/%lx/%lx\n",
card->para[0], card->para[1], card->para[2]);
pnp_disable_dev(pnp_d);
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c
index 6f6733b7c1e4..c4be1644f5bb 100644
--- a/drivers/isdn/hisax/w6692.c
+++ b/drivers/isdn/hisax/w6692.c
@@ -188,7 +188,6 @@ W6692_fill_fifo(struct IsdnCardState *cs)
debugl1(cs, "W6692_fill_fifo dbusytimer running");
del_timer(&cs->dbusytimer);
}
- init_timer(&cs->dbusytimer);
cs->dbusytimer.expires = jiffies + ((DBUSY_TIMER_VALUE * HZ) / 1000);
add_timer(&cs->dbusytimer);
if (cs->debug & L1_DEB_ISAC_FIFO) {
@@ -684,8 +683,9 @@ DC_Close_W6692(struct IsdnCardState *cs)
}
static void
-dbusy_timer_handler(struct IsdnCardState *cs)
+dbusy_timer_handler(struct timer_list *t)
{
+ struct IsdnCardState *cs = from_timer(cs, t, dbusytimer);
struct PStack *stptr;
int rbch, star;
u_long flags;
@@ -904,8 +904,7 @@ static void initW6692(struct IsdnCardState *cs, int part)
if (part & 1) {
cs->setstack_d = setstack_W6692;
cs->DC_Close = DC_Close_W6692;
- setup_timer(&cs->dbusytimer, (void *)dbusy_timer_handler,
- (long)cs);
+ timer_setup(&cs->dbusytimer, dbusy_timer_handler, 0);
resetW6692(cs);
ph_command(cs, W_L1CMD_RST);
cs->dc.w6692.ph_state = W_L1CMD_RST;
diff --git a/drivers/isdn/i4l/Makefile b/drivers/isdn/i4l/Makefile
index cb9d3bb9fae0..be77500c9e86 100644
--- a/drivers/isdn/i4l/Makefile
+++ b/drivers/isdn/i4l/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the kernel ISDN subsystem and device drivers.
# Each configuration option enables a list of files.
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 38a5bb764c7b..8b03d618185e 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -231,7 +231,7 @@ static int isdn_timer_cnt2 = 0;
static int isdn_timer_cnt3 = 0;
static void
-isdn_timer_funct(ulong dummy)
+isdn_timer_funct(struct timer_list *unused)
{
int tf = dev->tflags;
if (tf & ISDN_TIMER_FAST) {
@@ -2294,8 +2294,7 @@ static int __init isdn_init(void)
printk(KERN_WARNING "isdn: Could not allocate device-struct.\n");
return -EIO;
}
- init_timer(&dev->timer);
- dev->timer.function = isdn_timer_funct;
+ timer_setup(&dev->timer, isdn_timer_funct, 0);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->timerlock);
#ifdef MODULE
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index f63a110b7bcb..c138f66f2659 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1509,9 +1509,9 @@ static int isdn_net_ioctl(struct net_device *dev,
/* called via cisco_timer.function */
static void
-isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
+isdn_net_ciscohdlck_slarp_send_keepalive(struct timer_list *t)
{
- isdn_net_local *lp = (isdn_net_local *) data;
+ isdn_net_local *lp = from_timer(lp, t, cisco_timer);
struct sk_buff *skb;
unsigned char *p;
unsigned long last_cisco_myseq = lp->cisco_myseq;
@@ -1615,9 +1615,8 @@ isdn_net_ciscohdlck_connected(isdn_net_local *lp)
/* send slarp request because interface/seq.no.s reset */
isdn_net_ciscohdlck_slarp_send_request(lp);
- init_timer(&lp->cisco_timer);
- lp->cisco_timer.data = (unsigned long) lp;
- lp->cisco_timer.function = isdn_net_ciscohdlck_slarp_send_keepalive;
+ timer_setup(&lp->cisco_timer,
+ isdn_net_ciscohdlck_slarp_send_keepalive, 0);
lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
add_timer(&lp->cisco_timer);
}
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index cd2b3c69771a..e07aefb9151d 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -50,7 +50,7 @@ static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is);
static void isdn_ppp_ccp_reset_free(struct ippp_struct *is);
static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
unsigned char id);
-static void isdn_ppp_ccp_timer_callback(unsigned long closure);
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t);
static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
unsigned char id);
static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
@@ -2327,10 +2327,10 @@ static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
/* The timer callback function which is called when a ResetReq has timed out,
aka has never been answered by a ResetAck */
-static void isdn_ppp_ccp_timer_callback(unsigned long closure)
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t)
{
struct ippp_ccp_reset_state *rs =
- (struct ippp_ccp_reset_state *)closure;
+ from_timer(rs, t, timer);
if (!rs) {
printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n");
@@ -2376,8 +2376,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
rs->state = CCPResetIdle;
rs->is = is;
rs->id = id;
- setup_timer(&rs->timer, isdn_ppp_ccp_timer_callback,
- (unsigned long)rs);
+ timer_setup(&rs->timer, isdn_ppp_ccp_timer_callback, 0);
is->reset->rs[id] = rs;
}
return rs;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index d30130c8d0f3..960f26348bb5 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -541,9 +541,9 @@ isdn_tty_senddown(modem_info *info)
* into the tty's buffer.
*/
static void
-isdn_tty_modem_do_ncarrier(unsigned long data)
+isdn_tty_modem_do_ncarrier(struct timer_list *t)
{
- modem_info *info = (modem_info *) data;
+ modem_info *info = from_timer(info, t, nc_timer);
isdn_tty_modem_result(RESULT_NO_CARRIER, info);
}
@@ -1812,8 +1812,7 @@ isdn_tty_modem_init(void)
info->isdn_channel = -1;
info->drv_index = -1;
info->xmit_size = ISDN_SERIAL_XMIT_SIZE;
- setup_timer(&info->nc_timer, isdn_tty_modem_do_ncarrier,
- (unsigned long)info);
+ timer_setup(&info->nc_timer, isdn_tty_modem_do_ncarrier, 0);
skb_queue_head_init(&info->xmit_queue);
#ifdef CONFIG_ISDN_AUDIO
skb_queue_head_init(&info->dtmf_queue);
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index e97232646ba1..a4597e96c916 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -90,9 +90,9 @@ isdnloop_bchan_send(isdnloop_card *card, int ch)
* data = pointer to card struct, set by kernel timer.data
*/
static void
-isdnloop_pollbchan(unsigned long data)
+isdnloop_pollbchan(struct timer_list *t)
{
- isdnloop_card *card = (isdnloop_card *) data;
+ isdnloop_card *card = from_timer(card, t, rb_timer);
unsigned long flags;
if (card->flags & ISDNLOOP_FLAGS_B1ACTIVE)
@@ -305,9 +305,9 @@ isdnloop_putmsg(isdnloop_card *card, unsigned char c)
* data = pointer to card struct
*/
static void
-isdnloop_polldchan(unsigned long data)
+isdnloop_polldchan(struct timer_list *t)
{
- isdnloop_card *card = (isdnloop_card *) data;
+ isdnloop_card *card = from_timer(card, t, st_timer);
struct sk_buff *skb;
int avail;
int left;
@@ -373,8 +373,6 @@ isdnloop_polldchan(unsigned long data)
card->flags |= ISDNLOOP_FLAGS_RBTIMER;
spin_lock_irqsave(&card->isdnloop_lock, flags);
del_timer(&card->rb_timer);
- card->rb_timer.function = isdnloop_pollbchan;
- card->rb_timer.data = (unsigned long) card;
card->rb_timer.expires = jiffies + ISDNLOOP_TIMER_BCREAD;
add_timer(&card->rb_timer);
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
@@ -588,9 +586,10 @@ isdnloop_atimeout(isdnloop_card *card, int ch)
* Wrapper for isdnloop_atimeout().
*/
static void
-isdnloop_atimeout0(unsigned long data)
+isdnloop_atimeout0(struct timer_list *t)
{
- isdnloop_card *card = (isdnloop_card *) data;
+ isdnloop_card *card = from_timer(card, t, c_timer[0]);
+
isdnloop_atimeout(card, 0);
}
@@ -598,9 +597,10 @@ isdnloop_atimeout0(unsigned long data)
* Wrapper for isdnloop_atimeout().
*/
static void
-isdnloop_atimeout1(unsigned long data)
+isdnloop_atimeout1(struct timer_list *t)
{
- isdnloop_card *card = (isdnloop_card *) data;
+ isdnloop_card *card = from_timer(card, t, c_timer[1]);
+
isdnloop_atimeout(card, 1);
}
@@ -617,13 +617,9 @@ isdnloop_start_ctimer(isdnloop_card *card, int ch)
unsigned long flags;
spin_lock_irqsave(&card->isdnloop_lock, flags);
- init_timer(&card->c_timer[ch]);
+ timer_setup(&card->c_timer[ch], ch ? isdnloop_atimeout1
+ : isdnloop_atimeout0, 0);
card->c_timer[ch].expires = jiffies + ISDNLOOP_TIMER_ALERTWAIT;
- if (ch)
- card->c_timer[ch].function = isdnloop_atimeout1;
- else
- card->c_timer[ch].function = isdnloop_atimeout0;
- card->c_timer[ch].data = (unsigned long) card;
add_timer(&card->c_timer[ch]);
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
@@ -1113,10 +1109,9 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
sdef.ptype);
return -EINVAL;
}
- init_timer(&card->st_timer);
+ timer_setup(&card->rb_timer, isdnloop_pollbchan, 0);
+ timer_setup(&card->st_timer, isdnloop_polldchan, 0);
card->st_timer.expires = jiffies + ISDNLOOP_TIMER_DCREAD;
- card->st_timer.function = isdnloop_polldchan;
- card->st_timer.data = (unsigned long) card;
add_timer(&card->st_timer);
card->flags |= ISDNLOOP_FLAGS_RUNNING;
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
diff --git a/drivers/isdn/mISDN/Makefile b/drivers/isdn/mISDN/Makefile
index 0a6bd2a9e730..f3b4b7fa85f8 100644
--- a/drivers/isdn/mISDN/Makefile
+++ b/drivers/isdn/mISDN/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the modular ISDN driver
#
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
index fc1733a08845..fa09d511a8ed 100644
--- a/drivers/isdn/mISDN/dsp.h
+++ b/drivers/isdn/mISDN/dsp.h
@@ -259,7 +259,7 @@ extern u8 *dsp_dtmf_goertzel_decode(struct dsp *dsp, u8 *data, int len,
extern int dsp_tone(struct dsp *dsp, int tone);
extern void dsp_tone_copy(struct dsp *dsp, u8 *data, int len);
-extern void dsp_tone_timeout(void *arg);
+extern void dsp_tone_timeout(struct timer_list *t);
extern void dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len);
extern void dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len);
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 880e9d367a39..cd036e87335a 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -1092,7 +1092,7 @@ dspcreate(struct channel_req *crq)
ndsp->pcm_bank_tx = -1;
ndsp->hfc_conf = -1; /* current conference number */
/* set tone timer */
- setup_timer(&ndsp->tone.tl, (void *)dsp_tone_timeout, (long)ndsp);
+ timer_setup(&ndsp->tone.tl, dsp_tone_timeout, 0);
if (dtmfthreshold < 20 || dtmfthreshold > 500)
dtmfthreshold = 200;
@@ -1202,9 +1202,7 @@ static int __init dsp_init(void)
}
/* set sample timer */
- dsp_spl_tl.function = (void *)dsp_cmx_send;
- dsp_spl_tl.data = 0;
- init_timer(&dsp_spl_tl);
+ timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0);
dsp_spl_tl.expires = jiffies + dsp_tics;
dsp_spl_jiffies = dsp_spl_tl.expires;
add_timer(&dsp_spl_tl);
diff --git a/drivers/isdn/mISDN/dsp_hwec.h b/drivers/isdn/mISDN/dsp_hwec.h
index bbca1eb5a888..c9cb0ea249da 100644
--- a/drivers/isdn/mISDN/dsp_hwec.h
+++ b/drivers/isdn/mISDN/dsp_hwec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* dsp_hwec.h
*/
diff --git a/drivers/isdn/mISDN/dsp_tones.c b/drivers/isdn/mISDN/dsp_tones.c
index 057e0d6a369b..8389e2105cdc 100644
--- a/drivers/isdn/mISDN/dsp_tones.c
+++ b/drivers/isdn/mISDN/dsp_tones.c
@@ -457,9 +457,9 @@ dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len)
* timer expires *
*****************/
void
-dsp_tone_timeout(void *arg)
+dsp_tone_timeout(struct timer_list *t)
{
- struct dsp *dsp = arg;
+ struct dsp *dsp = from_timer(dsp, t, tone.tl);
struct dsp_tone *tone = &dsp->tone;
struct pattern *pat = (struct pattern *)tone->pattern;
int index = tone->index;
@@ -478,7 +478,6 @@ dsp_tone_timeout(void *arg)
else
dsp_tone_hw_message(dsp, pat->data[index], *(pat->siz[index]));
/* set timer */
- init_timer(&tone->tl);
tone->tl.expires = jiffies + (pat->seq[index] * HZ) / 8000;
add_timer(&tone->tl);
}
@@ -541,7 +540,6 @@ dsp_tone(struct dsp *dsp, int tone)
/* set timer */
if (timer_pending(&tonet->tl))
del_timer(&tonet->tl);
- init_timer(&tonet->tl);
tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000;
add_timer(&tonet->tl);
} else {
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c
index 92e6570b1143..cabcb906e0b5 100644
--- a/drivers/isdn/mISDN/fsm.c
+++ b/drivers/isdn/mISDN/fsm.c
@@ -100,8 +100,9 @@ mISDN_FsmChangeState(struct FsmInst *fi, int newstate)
EXPORT_SYMBOL(mISDN_FsmChangeState);
static void
-FsmExpireTimer(struct FsmTimer *ft)
+FsmExpireTimer(struct timer_list *t)
{
+ struct FsmTimer *ft = from_timer(ft, t, tl);
#if FSM_TIMER_DEBUG
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
@@ -117,7 +118,7 @@ mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
if (ft->fi->debug)
ft->fi->printdebug(ft->fi, "mISDN_FsmInitTimer %lx", (long) ft);
#endif
- setup_timer(&ft->tl, (void *)FsmExpireTimer, (long)ft);
+ timer_setup(&ft->tl, FsmExpireTimer, 0);
}
EXPORT_SYMBOL(mISDN_FsmInitTimer);
@@ -153,7 +154,6 @@ mISDN_FsmAddTimer(struct FsmTimer *ft,
}
return -1;
}
- init_timer(&ft->tl);
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
@@ -175,7 +175,6 @@ mISDN_FsmRestartTimer(struct FsmTimer *ft,
if (timer_pending(&ft->tl))
del_timer(&ft->tl);
- init_timer(&ft->tl);
ft->event = event;
ft->arg = arg;
ft->tl.expires = jiffies + (millisec * HZ) / 1000;
diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h
index 661c060ada49..7ea10db20e3a 100644
--- a/drivers/isdn/mISDN/l1oip.h
+++ b/drivers/isdn/mISDN/l1oip.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* see notice in l1oip.c
*/
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 6be2041248d3..e3654782a3e2 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -440,14 +440,8 @@ l1oip_socket_recv(struct l1oip *hc, u8 remotecodec, u8 channel, u16 timebase,
#ifdef REORDER_DEBUG
if (hc->chan[channel].disorder_flag) {
- struct sk_buff *skb;
- int cnt;
- skb = hc->chan[channel].disorder_skb;
- hc->chan[channel].disorder_skb = nskb;
- nskb = skb;
- cnt = hc->chan[channel].disorder_cnt;
- hc->chan[channel].disorder_cnt = rx_counter;
- rx_counter = cnt;
+ swap(hc->chan[channel].disorder_skb, nskb);
+ swap(hc->chan[channel].disorder_cnt, rx_counter);
}
hc->chan[channel].disorder_flag ^= 1;
if (nskb)
@@ -842,17 +836,18 @@ l1oip_send_bh(struct work_struct *work)
* timer stuff
*/
static void
-l1oip_keepalive(void *data)
+l1oip_keepalive(struct timer_list *t)
{
- struct l1oip *hc = (struct l1oip *)data;
+ struct l1oip *hc = from_timer(hc, t, keep_tl);
schedule_work(&hc->workq);
}
static void
-l1oip_timeout(void *data)
+l1oip_timeout(struct timer_list *t)
{
- struct l1oip *hc = (struct l1oip *)data;
+ struct l1oip *hc = from_timer(hc, t,
+ timeout_tl);
struct dchannel *dch = hc->chan[hc->d_idx].dch;
if (debug & DEBUG_L1OIP_MSG)
@@ -1437,13 +1432,11 @@ init_card(struct l1oip *hc, int pri, int bundle)
if (ret)
return ret;
- hc->keep_tl.function = (void *)l1oip_keepalive;
- hc->keep_tl.data = (ulong)hc;
- init_timer(&hc->keep_tl);
+ timer_setup(&hc->keep_tl, l1oip_keepalive, 0);
hc->keep_tl.expires = jiffies + 2 * HZ; /* two seconds first time */
add_timer(&hc->keep_tl);
- setup_timer(&hc->timeout_tl, (void *)l1oip_timeout, (ulong)hc);
+ timer_setup(&hc->timeout_tl, l1oip_timeout, 0);
hc->timeout_on = 0; /* state that we have timer off */
return 0;
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index b1e135fc1fb5..c50a34340f67 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -162,9 +162,9 @@ mISDN_poll(struct file *filep, poll_table *wait)
}
static void
-dev_expire_timer(unsigned long data)
+dev_expire_timer(struct timer_list *t)
{
- struct mISDNtimer *timer = (void *)data;
+ struct mISDNtimer *timer = from_timer(timer, t, tl);
u_long flags;
spin_lock_irqsave(&timer->dev->lock, flags);
@@ -189,7 +189,7 @@ misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
if (!timer)
return -ENOMEM;
timer->dev = dev;
- setup_timer(&timer->tl, dev_expire_timer, (long)timer);
+ timer_setup(&timer->tl, dev_expire_timer, 0);
spin_lock_irq(&dev->lock);
id = timer->id = dev->next_id++;
if (dev->next_id < 0)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 52ea34e337cd..318a28fd58fe 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -57,6 +57,16 @@ config LEDS_AAT1290
depends on PINCTRL
help
This option enables support for the LEDs on the AAT1290.
+config LEDS_APU
+ tristate "Front panel LED support for PC Engines APU/APU2 boards"
+ depends on LEDS_CLASS
+ depends on X86 && DMI
+ help
+ This driver makes the PC Engines APU/APU2 front panel LEDs
+ accessible from userspace programs through the LED subsystem.
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-apu.
config LEDS_AS3645A
tristate "AS3645A LED flash controller support"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 7d7b26552923..a2a6b5a4f86d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# LED Core
obj-$(CONFIG_NEW_LEDS) += led-core.o
@@ -8,6 +9,7 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
# LED Platform Drivers
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
+obj-$(CONFIG_LEDS_APU) += leds-apu.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index ef1360445413..fd83c7f77a95 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -45,9 +45,9 @@ static int __led_set_brightness_blocking(struct led_classdev *led_cdev,
return led_cdev->brightness_set_blocking(led_cdev, value);
}
-static void led_timer_function(unsigned long data)
+static void led_timer_function(struct timer_list *t)
{
- struct led_classdev *led_cdev = (void *)data;
+ struct led_classdev *led_cdev = from_timer(led_cdev, t, blink_timer);
unsigned long brightness;
unsigned long delay;
@@ -178,8 +178,7 @@ void led_init_core(struct led_classdev *led_cdev)
{
INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
- setup_timer(&led_cdev->blink_timer, led_timer_function,
- (unsigned long)led_cdev);
+ timer_setup(&led_cdev->blink_timer, led_timer_function, 0);
}
EXPORT_SYMBOL_GPL(led_init_core);
diff --git a/drivers/leds/leds-apu.c b/drivers/leds/leds-apu.c
new file mode 100644
index 000000000000..74820aab9497
--- /dev/null
+++ b/drivers/leds/leds-apu.c
@@ -0,0 +1,278 @@
+/*
+ * drivers/leds/leds-apu.c
+ * Copyright (C) 2017 Alan Mizrahi, alan at mizrahi dot com dot ve
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define APU1_FCH_ACPI_MMIO_BASE 0xFED80000
+#define APU1_FCH_GPIO_BASE (APU1_FCH_ACPI_MMIO_BASE + 0x01BD)
+#define APU1_LEDON 0x08
+#define APU1_LEDOFF 0xC8
+#define APU1_NUM_GPIO 3
+#define APU1_IOSIZE sizeof(u8)
+
+#define APU2_FCH_ACPI_MMIO_BASE 0xFED80000
+#define APU2_FCH_GPIO_BASE (APU2_FCH_ACPI_MMIO_BASE + 0x1500)
+#define APU2_GPIO_BIT_WRITE 22
+#define APU2_APU2_NUM_GPIO 4
+#define APU2_IOSIZE sizeof(u32)
+
+/* LED access parameters */
+struct apu_param {
+ void __iomem *addr; /* for ioread/iowrite */
+};
+
+/* LED private data */
+struct apu_led_priv {
+ struct led_classdev cdev;
+ struct apu_param param;
+};
+#define cdev_to_priv(c) container_of(c, struct apu_led_priv, cdev)
+
+/* LED profile */
+struct apu_led_profile {
+ const char *name;
+ enum led_brightness brightness;
+ unsigned long offset; /* for devm_ioremap */
+};
+
+/* Supported platform types */
+enum apu_led_platform_types {
+ APU1_LED_PLATFORM,
+ APU2_LED_PLATFORM,
+};
+
+struct apu_led_pdata {
+ struct platform_device *pdev;
+ struct apu_led_priv *pled;
+ const struct apu_led_profile *profile;
+ enum apu_led_platform_types platform;
+ int num_led_instances;
+ int iosize; /* for devm_ioremap() */
+ spinlock_t lock;
+};
+
+static struct apu_led_pdata *apu_led;
+
+static const struct apu_led_profile apu1_led_profile[] = {
+ { "apu:green:1", LED_ON, APU1_FCH_GPIO_BASE + 0 * APU1_IOSIZE },
+ { "apu:green:2", LED_OFF, APU1_FCH_GPIO_BASE + 1 * APU1_IOSIZE },
+ { "apu:green:3", LED_OFF, APU1_FCH_GPIO_BASE + 2 * APU1_IOSIZE },
+};
+
+static const struct apu_led_profile apu2_led_profile[] = {
+ { "apu2:green:1", LED_ON, APU2_FCH_GPIO_BASE + 68 * APU2_IOSIZE },
+ { "apu2:green:2", LED_OFF, APU2_FCH_GPIO_BASE + 69 * APU2_IOSIZE },
+ { "apu2:green:3", LED_OFF, APU2_FCH_GPIO_BASE + 70 * APU2_IOSIZE },
+};
+
+static const struct dmi_system_id apu_led_dmi_table[] __initconst = {
+ {
+ .ident = "apu",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "APU")
+ }
+ },
+ {
+ .ident = "apu2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"),
+ DMI_MATCH(DMI_BOARD_NAME, "APU2")
+ }
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, apu_led_dmi_table);
+
+static void apu1_led_brightness_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct apu_led_priv *pled = cdev_to_priv(led);
+
+ spin_lock(&apu_led->lock);
+ iowrite8(value ? APU1_LEDON : APU1_LEDOFF, pled->param.addr);
+ spin_unlock(&apu_led->lock);
+}
+
+static void apu2_led_brightness_set(struct led_classdev *led, enum led_brightness value)
+{
+ struct apu_led_priv *pled = cdev_to_priv(led);
+ u32 value_new;
+
+ spin_lock(&apu_led->lock);
+
+ value_new = ioread32(pled->param.addr);
+
+ if (value)
+ value_new &= ~BIT(APU2_GPIO_BIT_WRITE);
+ else
+ value_new |= BIT(APU2_GPIO_BIT_WRITE);
+
+ iowrite32(value_new, pled->param.addr);
+
+ spin_unlock(&apu_led->lock);
+}
+
+static int apu_led_config(struct device *dev, struct apu_led_pdata *apuld)
+{
+ int i;
+ int err;
+
+ apu_led->pled = devm_kzalloc(dev,
+ sizeof(struct apu_led_priv) * apu_led->num_led_instances,
+ GFP_KERNEL);
+
+ if (!apu_led->pled)
+ return -ENOMEM;
+
+ for (i = 0; i < apu_led->num_led_instances; i++) {
+ struct apu_led_priv *pled = &apu_led->pled[i];
+ struct led_classdev *led_cdev = &pled->cdev;
+
+ led_cdev->name = apu_led->profile[i].name;
+ led_cdev->brightness = apu_led->profile[i].brightness;
+ led_cdev->max_brightness = 1;
+ led_cdev->flags = LED_CORE_SUSPENDRESUME;
+ if (apu_led->platform == APU1_LED_PLATFORM)
+ led_cdev->brightness_set = apu1_led_brightness_set;
+ else if (apu_led->platform == APU2_LED_PLATFORM)
+ led_cdev->brightness_set = apu2_led_brightness_set;
+
+ pled->param.addr = devm_ioremap(dev,
+ apu_led->profile[i].offset, apu_led->iosize);
+ if (!pled->param.addr) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = led_classdev_register(dev, led_cdev);
+ if (err)
+ goto error;
+
+ led_cdev->brightness_set(led_cdev, apu_led->profile[i].brightness);
+ }
+
+ return 0;
+
+error:
+ while (i-- > 0)
+ led_classdev_unregister(&apu_led->pled[i].cdev);
+
+ return err;
+}
+
+static int __init apu_led_probe(struct platform_device *pdev)
+{
+ apu_led = devm_kzalloc(&pdev->dev, sizeof(*apu_led), GFP_KERNEL);
+
+ if (!apu_led)
+ return -ENOMEM;
+
+ apu_led->pdev = pdev;
+
+ if (dmi_match(DMI_BOARD_NAME, "APU")) {
+ apu_led->profile = apu1_led_profile;
+ apu_led->platform = APU1_LED_PLATFORM;
+ apu_led->num_led_instances = ARRAY_SIZE(apu1_led_profile);
+ apu_led->iosize = APU1_IOSIZE;
+ } else if (dmi_match(DMI_BOARD_NAME, "APU2")) {
+ apu_led->profile = apu2_led_profile;
+ apu_led->platform = APU2_LED_PLATFORM;
+ apu_led->num_led_instances = ARRAY_SIZE(apu2_led_profile);
+ apu_led->iosize = APU2_IOSIZE;
+ }
+
+ spin_lock_init(&apu_led->lock);
+ return apu_led_config(&pdev->dev, apu_led);
+}
+
+static struct platform_driver apu_led_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static int __init apu_led_init(void)
+{
+ struct platform_device *pdev;
+ int err;
+
+ if (!dmi_match(DMI_SYS_VENDOR, "PC Engines")) {
+ pr_err("No PC Engines board detected\n");
+ return -ENODEV;
+ }
+ if (!(dmi_match(DMI_PRODUCT_NAME, "APU") || dmi_match(DMI_PRODUCT_NAME, "APU2"))) {
+ pr_err("Unknown PC Engines board: %s\n",
+ dmi_get_system_info(DMI_PRODUCT_NAME));
+ return -ENODEV;
+ }
+
+ pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ pr_err("Device allocation failed\n");
+ return PTR_ERR(pdev);
+ }
+
+ err = platform_driver_probe(&apu_led_driver, apu_led_probe);
+ if (err) {
+ pr_err("Probe platform driver failed\n");
+ platform_device_unregister(pdev);
+ }
+
+ return err;
+}
+
+static void __exit apu_led_exit(void)
+{
+ int i;
+
+ for (i = 0; i < apu_led->num_led_instances; i++)
+ led_classdev_unregister(&apu_led->pled[i].cdev);
+
+ platform_device_unregister(apu_led->pdev);
+ platform_driver_unregister(&apu_led_driver);
+}
+
+module_init(apu_led_init);
+module_exit(apu_led_exit);
+
+MODULE_AUTHOR("Alan Mizrahi");
+MODULE_DESCRIPTION("PC Engines APU family LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:leds_apu");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 924e50aefb00..52b6f529e278 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -323,7 +323,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
if (status != LP5523_ENG_STATUS_MASK) {
dev_err(&chip->cl->dev,
- "cound not configure LED engine, status = 0x%.2x\n",
+ "could not configure LED engine, status = 0x%.2x\n",
status);
ret = -1;
}
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 905729191d3e..78183f90820e 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -61,6 +61,10 @@
#define PCA955X_LS_BLINK0 0x2 /* Blink at PWM0 rate */
#define PCA955X_LS_BLINK1 0x3 /* Blink at PWM1 rate */
+#define PCA955X_GPIO_INPUT LED_OFF
+#define PCA955X_GPIO_HIGH LED_OFF
+#define PCA955X_GPIO_LOW LED_FULL
+
enum pca955x_type {
pca9550,
pca9551,
@@ -329,9 +333,9 @@ static int pca955x_set_value(struct gpio_chip *gc, unsigned int offset,
struct pca955x_led *led = &pca955x->leds[offset];
if (val)
- return pca955x_led_set(&led->led_cdev, LED_FULL);
- else
- return pca955x_led_set(&led->led_cdev, LED_OFF);
+ return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_HIGH);
+
+ return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_LOW);
}
static void pca955x_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
@@ -355,8 +359,11 @@ static int pca955x_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
static int pca955x_gpio_direction_input(struct gpio_chip *gc,
unsigned int offset)
{
- /* To use as input ensure pin is not driven */
- return pca955x_set_value(gc, offset, 0);
+ struct pca955x *pca955x = gpiochip_get_data(gc);
+ struct pca955x_led *led = &pca955x->leds[offset];
+
+ /* To use as input ensure pin is not driven. */
+ return pca955x_led_set(&led->led_cdev, PCA955X_GPIO_INPUT);
}
static int pca955x_gpio_direction_output(struct gpio_chip *gc,
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 45222a7f4f75..c12c16fb1b9c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -715,7 +715,7 @@ tca6507_led_dt_init(struct i2c_client *client)
if (of_property_match_string(child, "compatible", "gpio") >= 0)
led.flags |= TCA6507_MAKE_GPIO;
ret = of_property_read_u32(child, "reg", &reg);
- if (ret != 0 || reg < 0 || reg >= NUM_LEDS)
+ if (ret != 0 || reg >= NUM_LEDS)
continue;
tca_leds[reg] = led;
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 3f9ddb9fafa7..bb090216b4dc 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -77,6 +77,15 @@ config LEDS_TRIGGER_CPU
If unsure, say N.
+config LEDS_TRIGGER_ACTIVITY
+ tristate "LED activity Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by a immediate CPU usage.
+ The flash frequency and duty cycle varies from faint flashes to
+ intense brightness depending on the instant CPU load.
+ If unsure, say N.
+
config LEDS_TRIGGER_GPIO
tristate "LED GPIO Trigger"
depends on LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index a72c43cffebf..4a8b6cff7761 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
obj-$(CONFIG_LEDS_TRIGGER_ONESHOT) += ledtrig-oneshot.o
obj-$(CONFIG_LEDS_TRIGGER_DISK) += ledtrig-disk.o
@@ -6,6 +7,7 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtrig-cpu.o
+obj-$(CONFIG_LEDS_TRIGGER_ACTIVITY) += ledtrig-activity.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
diff --git a/drivers/leds/trigger/ledtrig-activity.c b/drivers/leds/trigger/ledtrig-activity.c
new file mode 100644
index 000000000000..5081894082bd
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-activity.c
@@ -0,0 +1,275 @@
+/*
+ * Activity LED trigger
+ *
+ * Copyright (C) 2017 Willy Tarreau <w@1wt.eu>
+ * Partially based on Atsushi Nemoto's ledtrig-heartbeat.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include "../leds.h"
+
+static int panic_detected;
+
+struct activity_data {
+ struct timer_list timer;
+ struct led_classdev *led_cdev;
+ u64 last_used;
+ u64 last_boot;
+ int time_left;
+ int state;
+ int invert;
+};
+
+static void led_activity_function(struct timer_list *t)
+{
+ struct activity_data *activity_data = from_timer(activity_data, t,
+ timer);
+ struct led_classdev *led_cdev = activity_data->led_cdev;
+ struct timespec boot_time;
+ unsigned int target;
+ unsigned int usage;
+ int delay;
+ u64 curr_used;
+ u64 curr_boot;
+ s32 diff_used;
+ s32 diff_boot;
+ int cpus;
+ int i;
+
+ if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags))
+ led_cdev->blink_brightness = led_cdev->new_blink_brightness;
+
+ if (unlikely(panic_detected)) {
+ /* full brightness in case of panic */
+ led_set_brightness_nosleep(led_cdev, led_cdev->blink_brightness);
+ return;
+ }
+
+ get_monotonic_boottime(&boot_time);
+
+ cpus = 0;
+ curr_used = 0;
+
+ for_each_possible_cpu(i) {
+ curr_used += kcpustat_cpu(i).cpustat[CPUTIME_USER]
+ + kcpustat_cpu(i).cpustat[CPUTIME_NICE]
+ + kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]
+ + kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]
+ + kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
+ cpus++;
+ }
+
+ /* We come here every 100ms in the worst case, so that's 100M ns of
+ * cumulated time. By dividing by 2^16, we get the time resolution
+ * down to 16us, ensuring we won't overflow 32-bit computations below
+ * even up to 3k CPUs, while keeping divides cheap on smaller systems.
+ */
+ curr_boot = timespec_to_ns(&boot_time) * cpus;
+ diff_boot = (curr_boot - activity_data->last_boot) >> 16;
+ diff_used = (curr_used - activity_data->last_used) >> 16;
+ activity_data->last_boot = curr_boot;
+ activity_data->last_used = curr_used;
+
+ if (diff_boot <= 0 || diff_used < 0)
+ usage = 0;
+ else if (diff_used >= diff_boot)
+ usage = 100;
+ else
+ usage = 100 * diff_used / diff_boot;
+
+ /*
+ * Now we know the total boot_time multiplied by the number of CPUs, and
+ * the total idle+wait time for all CPUs. We'll compare how they evolved
+ * since last call. The % of overall CPU usage is :
+ *
+ * 1 - delta_idle / delta_boot
+ *
+ * What we want is that when the CPU usage is zero, the LED must blink
+ * slowly with very faint flashes that are detectable but not disturbing
+ * (typically 10ms every second, or 10ms ON, 990ms OFF). Then we want
+ * blinking frequency to increase up to the point where the load is
+ * enough to saturate one core in multi-core systems or 50% in single
+ * core systems. At this point it should reach 10 Hz with a 10/90 duty
+ * cycle (10ms ON, 90ms OFF). After this point, the blinking frequency
+ * remains stable (10 Hz) and only the duty cycle increases to report
+ * the activity, up to the point where we have 90ms ON, 10ms OFF when
+ * all cores are saturated. It's important that the LED never stays in
+ * a steady state so that it's easy to distinguish an idle or saturated
+ * machine from a hung one.
+ *
+ * This gives us :
+ * - a target CPU usage of min(50%, 100%/#CPU) for a 10% duty cycle
+ * (10ms ON, 90ms OFF)
+ * - below target :
+ * ON_ms = 10
+ * OFF_ms = 90 + (1 - usage/target) * 900
+ * - above target :
+ * ON_ms = 10 + (usage-target)/(100%-target) * 80
+ * OFF_ms = 90 - (usage-target)/(100%-target) * 80
+ *
+ * In order to keep a good responsiveness, we cap the sleep time to
+ * 100 ms and keep track of the sleep time left. This allows us to
+ * quickly change it if needed.
+ */
+
+ activity_data->time_left -= 100;
+ if (activity_data->time_left <= 0) {
+ activity_data->time_left = 0;
+ activity_data->state = !activity_data->state;
+ led_set_brightness_nosleep(led_cdev,
+ (activity_data->state ^ activity_data->invert) ?
+ led_cdev->blink_brightness : LED_OFF);
+ }
+
+ target = (cpus > 1) ? (100 / cpus) : 50;
+
+ if (usage < target)
+ delay = activity_data->state ?
+ 10 : /* ON */
+ 990 - 900 * usage / target; /* OFF */
+ else
+ delay = activity_data->state ?
+ 10 + 80 * (usage - target) / (100 - target) : /* ON */
+ 90 - 80 * (usage - target) / (100 - target); /* OFF */
+
+
+ if (!activity_data->time_left || delay <= activity_data->time_left)
+ activity_data->time_left = delay;
+
+ delay = min_t(int, activity_data->time_left, 100);
+ mod_timer(&activity_data->timer, jiffies + msecs_to_jiffies(delay));
+}
+
+static ssize_t led_invert_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct activity_data *activity_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%u\n", activity_data->invert);
+}
+
+static ssize_t led_invert_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct activity_data *activity_data = led_cdev->trigger_data;
+ unsigned long state;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ activity_data->invert = !!state;
+
+ return size;
+}
+
+static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
+
+static void activity_activate(struct led_classdev *led_cdev)
+{
+ struct activity_data *activity_data;
+ int rc;
+
+ activity_data = kzalloc(sizeof(*activity_data), GFP_KERNEL);
+ if (!activity_data)
+ return;
+
+ led_cdev->trigger_data = activity_data;
+ rc = device_create_file(led_cdev->dev, &dev_attr_invert);
+ if (rc) {
+ kfree(led_cdev->trigger_data);
+ return;
+ }
+
+ activity_data->led_cdev = led_cdev;
+ timer_setup(&activity_data->timer, led_activity_function, 0);
+ if (!led_cdev->blink_brightness)
+ led_cdev->blink_brightness = led_cdev->max_brightness;
+ led_activity_function(&activity_data->timer);
+ set_bit(LED_BLINK_SW, &led_cdev->work_flags);
+ led_cdev->activated = true;
+}
+
+static void activity_deactivate(struct led_classdev *led_cdev)
+{
+ struct activity_data *activity_data = led_cdev->trigger_data;
+
+ if (led_cdev->activated) {
+ del_timer_sync(&activity_data->timer);
+ device_remove_file(led_cdev->dev, &dev_attr_invert);
+ kfree(activity_data);
+ clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
+ led_cdev->activated = false;
+ }
+}
+
+static struct led_trigger activity_led_trigger = {
+ .name = "activity",
+ .activate = activity_activate,
+ .deactivate = activity_deactivate,
+};
+
+static int activity_reboot_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ led_trigger_unregister(&activity_led_trigger);
+ return NOTIFY_DONE;
+}
+
+static int activity_panic_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ panic_detected = 1;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block activity_reboot_nb = {
+ .notifier_call = activity_reboot_notifier,
+};
+
+static struct notifier_block activity_panic_nb = {
+ .notifier_call = activity_panic_notifier,
+};
+
+static int __init activity_init(void)
+{
+ int rc = led_trigger_register(&activity_led_trigger);
+
+ if (!rc) {
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &activity_panic_nb);
+ register_reboot_notifier(&activity_reboot_nb);
+ }
+ return rc;
+}
+
+static void __exit activity_exit(void)
+{
+ unregister_reboot_notifier(&activity_reboot_nb);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &activity_panic_nb);
+ led_trigger_unregister(&activity_led_trigger);
+}
+
+module_init(activity_init);
+module_exit(activity_exit);
+
+MODULE_AUTHOR("Willy Tarreau <w@1wt.eu>");
+MODULE_DESCRIPTION("Activity LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index e95ea65380c8..f0896de410b8 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -25,19 +25,23 @@
static int panic_heartbeats;
struct heartbeat_trig_data {
+ struct led_classdev *led_cdev;
unsigned int phase;
unsigned int period;
struct timer_list timer;
unsigned int invert;
};
-static void led_heartbeat_function(unsigned long data)
+static void led_heartbeat_function(struct timer_list *t)
{
- struct led_classdev *led_cdev = (struct led_classdev *) data;
- struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
+ struct heartbeat_trig_data *heartbeat_data =
+ from_timer(heartbeat_data, t, timer);
+ struct led_classdev *led_cdev;
unsigned long brightness = LED_OFF;
unsigned long delay = 0;
+ led_cdev = heartbeat_data->led_cdev;
+
if (unlikely(panic_heartbeats)) {
led_set_brightness_nosleep(led_cdev, LED_OFF);
return;
@@ -127,18 +131,18 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
return;
led_cdev->trigger_data = heartbeat_data;
+ heartbeat_data->led_cdev = led_cdev;
rc = device_create_file(led_cdev->dev, &dev_attr_invert);
if (rc) {
kfree(led_cdev->trigger_data);
return;
}
- setup_timer(&heartbeat_data->timer,
- led_heartbeat_function, (unsigned long) led_cdev);
+ timer_setup(&heartbeat_data->timer, led_heartbeat_function, 0);
heartbeat_data->phase = 0;
if (!led_cdev->blink_brightness)
led_cdev->blink_brightness = led_cdev->max_brightness;
- led_heartbeat_function(heartbeat_data->timer.data);
+ led_heartbeat_function(&heartbeat_data->timer);
set_bit(LED_BLINK_SW, &led_cdev->work_flags);
led_cdev->activated = true;
}
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index 7e6011bd3646..7acce64b692a 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -33,12 +33,14 @@ struct transient_trig_data {
int restore_state;
unsigned long duration;
struct timer_list timer;
+ struct led_classdev *led_cdev;
};
-static void transient_timer_function(unsigned long data)
+static void transient_timer_function(struct timer_list *t)
{
- struct led_classdev *led_cdev = (struct led_classdev *) data;
- struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ struct transient_trig_data *transient_data =
+ from_timer(transient_data, t, timer);
+ struct led_classdev *led_cdev = transient_data->led_cdev;
transient_data->activate = 0;
led_set_brightness_nosleep(led_cdev, transient_data->restore_state);
@@ -169,6 +171,7 @@ static void transient_trig_activate(struct led_classdev *led_cdev)
return;
}
led_cdev->trigger_data = tdata;
+ tdata->led_cdev = led_cdev;
rc = device_create_file(led_cdev->dev, &dev_attr_activate);
if (rc)
@@ -182,8 +185,7 @@ static void transient_trig_activate(struct led_classdev *led_cdev)
if (rc)
goto err_out_state;
- setup_timer(&tdata->timer, transient_timer_function,
- (unsigned long) led_cdev);
+ timer_setup(&tdata->timer, transient_timer_function, 0);
led_cdev->activated = true;
return;
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index ead61a93cb4e..2a953efec4e1 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -4,7 +4,8 @@
menuconfig NVM
bool "Open-Channel SSD target support"
- depends on BLOCK && HAS_DMA
+ depends on BLOCK && HAS_DMA && PCI
+ select BLK_DEV_NVME
help
Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 82d1a117fb27..2c3fd9d2c08c 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Open-Channel SSDs.
#
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index ddae430b6eae..83249b43dd06 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/sem.h>
#include <linux/bitmap.h>
+#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/lightnvm.h>
@@ -138,7 +139,6 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
int prev_nr_luns;
int i, j;
- nr_chnls = nr_luns / dev->geo.luns_per_chnl;
nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
@@ -226,6 +226,24 @@ static const struct block_device_operations nvm_fops = {
.owner = THIS_MODULE,
};
+static struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
+{
+ struct nvm_tgt_type *tmp, *tt = NULL;
+
+ if (lock)
+ down_write(&nvm_tgtt_lock);
+
+ list_for_each_entry(tmp, &nvm_tgt_types, list)
+ if (!strcmp(name, tmp->name)) {
+ tt = tmp;
+ break;
+ }
+
+ if (lock)
+ up_write(&nvm_tgtt_lock);
+ return tt;
+}
+
static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
struct nvm_ioctl_create_simple *s = &create->conf.s;
@@ -316,6 +334,8 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
list_add_tail(&t->list, &dev->targets);
mutex_unlock(&dev->mlock);
+ __module_get(tt->owner);
+
return 0;
err_sysfs:
if (tt->exit)
@@ -351,6 +371,7 @@ static void __nvm_remove_target(struct nvm_target *t)
nvm_remove_tgt_dev(t->dev, 1);
put_disk(tdisk);
+ module_put(t->type->owner);
list_del(&t->list);
kfree(t);
@@ -532,25 +553,6 @@ void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
}
EXPORT_SYMBOL(nvm_part_to_tgt);
-struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
-{
- struct nvm_tgt_type *tmp, *tt = NULL;
-
- if (lock)
- down_write(&nvm_tgtt_lock);
-
- list_for_each_entry(tmp, &nvm_tgt_types, list)
- if (!strcmp(name, tmp->name)) {
- tt = tmp;
- break;
- }
-
- if (lock)
- up_write(&nvm_tgtt_lock);
- return tt;
-}
-EXPORT_SYMBOL(nvm_find_target_type);
-
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
@@ -571,9 +573,9 @@ void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
if (!tt)
return;
- down_write(&nvm_lock);
+ down_write(&nvm_tgtt_lock);
list_del(&tt->list);
- up_write(&nvm_lock);
+ up_write(&nvm_tgtt_lock);
}
EXPORT_SYMBOL(nvm_unregister_tgt_type);
@@ -602,6 +604,52 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
return NULL;
}
+static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
+ const struct ppa_addr *ppas, int nr_ppas)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_geo *geo = &tgt_dev->geo;
+ int i, plane_cnt, pl_idx;
+ struct ppa_addr ppa;
+
+ if (geo->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
+ rqd->nr_ppas = nr_ppas;
+ rqd->ppa_addr = ppas[0];
+
+ return 0;
+ }
+
+ rqd->nr_ppas = nr_ppas;
+ rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
+ if (!rqd->ppa_list) {
+ pr_err("nvm: failed to allocate dma memory\n");
+ return -ENOMEM;
+ }
+
+ plane_cnt = geo->plane_mode;
+ rqd->nr_ppas *= plane_cnt;
+
+ for (i = 0; i < nr_ppas; i++) {
+ for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ ppa = ppas[i];
+ ppa.g.pl = pl_idx;
+ rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
+ }
+ }
+
+ return 0;
+}
+
+static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
+ struct nvm_rq *rqd)
+{
+ if (!rqd->ppa_list)
+ return;
+
+ nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
+}
+
+
int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
int nr_ppas, int type)
{
@@ -616,7 +664,7 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
memset(&rqd, 0, sizeof(struct nvm_rq));
- nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
+ nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
nvm_rq_tgt_to_dev(tgt_dev, &rqd);
ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
@@ -658,12 +706,25 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_submit_io);
-static void nvm_end_io_sync(struct nvm_rq *rqd)
+int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
- struct completion *waiting = rqd->private;
+ struct nvm_dev *dev = tgt_dev->parent;
+ int ret;
- complete(waiting);
+ if (!dev->ops->submit_io_sync)
+ return -ENODEV;
+
+ nvm_rq_tgt_to_dev(tgt_dev, rqd);
+
+ rqd->dev = tgt_dev;
+
+ /* In case of error, fail with right address format */
+ ret = dev->ops->submit_io_sync(dev, rqd);
+ nvm_rq_dev_to_tgt(tgt_dev, rqd);
+
+ return ret;
}
+EXPORT_SYMBOL(nvm_submit_io_sync);
int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
int nr_ppas)
@@ -671,25 +732,21 @@ int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
struct nvm_geo *geo = &tgt_dev->geo;
struct nvm_rq rqd;
int ret;
- DECLARE_COMPLETION_ONSTACK(wait);
memset(&rqd, 0, sizeof(struct nvm_rq));
rqd.opcode = NVM_OP_ERASE;
- rqd.end_io = nvm_end_io_sync;
- rqd.private = &wait;
rqd.flags = geo->plane_mode >> 1;
- ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas, 1);
+ ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
if (ret)
return ret;
- ret = nvm_submit_io(tgt_dev, &rqd);
+ ret = nvm_submit_io_sync(tgt_dev, &rqd);
if (ret) {
pr_err("rrpr: erase I/O submission failed: %d\n", ret);
goto free_ppa_list;
}
- wait_for_completion_io(&wait);
free_ppa_list:
nvm_free_rqd_ppalist(tgt_dev, &rqd);
@@ -775,57 +832,6 @@ void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
}
EXPORT_SYMBOL(nvm_put_area);
-int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
- const struct ppa_addr *ppas, int nr_ppas, int vblk)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_geo *geo = &tgt_dev->geo;
- int i, plane_cnt, pl_idx;
- struct ppa_addr ppa;
-
- if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
- rqd->nr_ppas = nr_ppas;
- rqd->ppa_addr = ppas[0];
-
- return 0;
- }
-
- rqd->nr_ppas = nr_ppas;
- rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
- if (!rqd->ppa_list) {
- pr_err("nvm: failed to allocate dma memory\n");
- return -ENOMEM;
- }
-
- if (!vblk) {
- for (i = 0; i < nr_ppas; i++)
- rqd->ppa_list[i] = ppas[i];
- } else {
- plane_cnt = geo->plane_mode;
- rqd->nr_ppas *= plane_cnt;
-
- for (i = 0; i < nr_ppas; i++) {
- for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
- ppa = ppas[i];
- ppa.g.pl = pl_idx;
- rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
- }
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(nvm_set_rqd_ppalist);
-
-void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
-{
- if (!rqd->ppa_list)
- return;
-
- nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
-}
-EXPORT_SYMBOL(nvm_free_rqd_ppalist);
-
void nvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_dev *tgt_dev = rqd->dev;
@@ -1177,7 +1183,7 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
info->version[1] = NVM_VERSION_MINOR;
info->version[2] = NVM_VERSION_PATCH;
- down_write(&nvm_lock);
+ down_write(&nvm_tgtt_lock);
list_for_each_entry(tt, &nvm_tgt_types, list) {
struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
@@ -1190,7 +1196,7 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
}
info->tgtsize = tgt_iter;
- up_write(&nvm_lock);
+ up_write(&nvm_tgtt_lock);
if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
kfree(info);
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 024a8fc93069..0d227ef7d1b9 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -43,8 +43,10 @@ retry:
if (unlikely(!bio_has_data(bio)))
goto out;
- w_ctx.flags = flags;
pblk_ppa_set_empty(&w_ctx.ppa);
+ w_ctx.flags = flags;
+ if (bio->bi_opf & REQ_PREFLUSH)
+ w_ctx.flags |= PBLK_FLUSH_ENTRY;
for (i = 0; i < nr_entries; i++) {
void *data = bio_data(bio);
@@ -73,12 +75,11 @@ out:
* On GC the incoming lbas are not necessarily sequential. Also, some of the
* lbas might not be valid entries, which are marked as empty by the GC thread
*/
-int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
- unsigned int nr_entries, unsigned int nr_rec_entries,
- struct pblk_line *gc_line, unsigned long flags)
+int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
{
struct pblk_w_ctx w_ctx;
unsigned int bpos, pos;
+ void *data = gc_rq->data;
int i, valid_entries;
/* Update the write buffer head (mem) with the entries that we can
@@ -86,28 +87,29 @@ int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
* rollback from here on.
*/
retry:
- if (!pblk_rb_may_write_gc(&pblk->rwb, nr_rec_entries, &bpos)) {
+ if (!pblk_rb_may_write_gc(&pblk->rwb, gc_rq->secs_to_gc, &bpos)) {
io_schedule();
goto retry;
}
- w_ctx.flags = flags;
+ w_ctx.flags = PBLK_IOTYPE_GC;
pblk_ppa_set_empty(&w_ctx.ppa);
- for (i = 0, valid_entries = 0; i < nr_entries; i++) {
- if (lba_list[i] == ADDR_EMPTY)
+ for (i = 0, valid_entries = 0; i < gc_rq->nr_secs; i++) {
+ if (gc_rq->lba_list[i] == ADDR_EMPTY)
continue;
- w_ctx.lba = lba_list[i];
+ w_ctx.lba = gc_rq->lba_list[i];
pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + valid_entries);
- pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_line, pos);
+ pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_rq->line,
+ gc_rq->paddr_list[i], pos);
data += PBLK_EXPOSED_PAGE_SIZE;
valid_entries++;
}
- WARN_ONCE(nr_rec_entries != valid_entries,
+ WARN_ONCE(gc_rq->secs_to_gc != valid_entries,
"pblk: inconsistent GC write\n");
#ifdef CONFIG_NVM_DEBUG
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 81501644fb15..76516ee84e9a 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -18,6 +18,31 @@
#include "pblk.h"
+static void pblk_line_mark_bb(struct work_struct *work)
+{
+ struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
+ ws);
+ struct pblk *pblk = line_ws->pblk;
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct ppa_addr *ppa = line_ws->priv;
+ int ret;
+
+ ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
+ if (ret) {
+ struct pblk_line *line;
+ int pos;
+
+ line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
+ pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
+
+ pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
+ line->id, pos);
+ }
+
+ kfree(ppa);
+ mempool_free(line_ws, pblk->gen_ws_pool);
+}
+
static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
struct ppa_addr *ppa)
{
@@ -33,7 +58,8 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
line->id, pos);
- pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, pblk->bb_wq);
+ pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
+ GFP_ATOMIC, pblk->bb_wq);
}
static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
@@ -63,7 +89,7 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
struct pblk *pblk = rqd->private;
__pblk_end_io_erase(pblk, rqd);
- mempool_free(rqd, pblk->g_rq_pool);
+ mempool_free(rqd, pblk->e_rq_pool);
}
void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
@@ -77,11 +103,7 @@ void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
* that newer updates are not overwritten.
*/
spin_lock(&line->lock);
- if (line->state == PBLK_LINESTATE_GC ||
- line->state == PBLK_LINESTATE_FREE) {
- spin_unlock(&line->lock);
- return;
- }
+ WARN_ON(line->state == PBLK_LINESTATE_FREE);
if (test_and_set_bit(paddr, line->invalid_bitmap)) {
WARN_ONCE(1, "pblk: double invalidate\n");
@@ -98,8 +120,7 @@ void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
spin_lock(&l_mg->gc_lock);
spin_lock(&line->lock);
/* Prevent moving a line that has just been chosen for GC */
- if (line->state == PBLK_LINESTATE_GC ||
- line->state == PBLK_LINESTATE_FREE) {
+ if (line->state == PBLK_LINESTATE_GC) {
spin_unlock(&line->lock);
spin_unlock(&l_mg->gc_lock);
return;
@@ -150,17 +171,25 @@ static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
spin_unlock(&pblk->trans_lock);
}
-struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
+/* Caller must guarantee that the request is a valid type */
+struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
{
mempool_t *pool;
struct nvm_rq *rqd;
int rq_size;
- if (rw == WRITE) {
+ switch (type) {
+ case PBLK_WRITE:
+ case PBLK_WRITE_INT:
pool = pblk->w_rq_pool;
rq_size = pblk_w_rq_size;
- } else {
- pool = pblk->g_rq_pool;
+ break;
+ case PBLK_READ:
+ pool = pblk->r_rq_pool;
+ rq_size = pblk_g_rq_size;
+ break;
+ default:
+ pool = pblk->e_rq_pool;
rq_size = pblk_g_rq_size;
}
@@ -170,15 +199,30 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
return rqd;
}
-void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
+/* Typically used on completion path. Cannot guarantee request consistency */
+void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
{
+ struct nvm_tgt_dev *dev = pblk->dev;
mempool_t *pool;
- if (rw == WRITE)
+ switch (type) {
+ case PBLK_WRITE:
+ kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
+ case PBLK_WRITE_INT:
pool = pblk->w_rq_pool;
- else
- pool = pblk->g_rq_pool;
+ break;
+ case PBLK_READ:
+ pool = pblk->r_rq_pool;
+ break;
+ case PBLK_ERASE:
+ pool = pblk->e_rq_pool;
+ break;
+ default:
+ pr_err("pblk: trying to free unknown rqd type\n");
+ return;
+ }
+ nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
mempool_free(rqd, pool);
}
@@ -190,10 +234,9 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
WARN_ON(off + nr_pages != bio->bi_vcnt);
- bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
for (i = off; i < nr_pages + off; i++) {
bv = bio->bi_io_vec[i];
- mempool_free(bv.bv_page, pblk->page_pool);
+ mempool_free(bv.bv_page, pblk->page_bio_pool);
}
}
@@ -205,14 +248,12 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
int i, ret;
for (i = 0; i < nr_pages; i++) {
- page = mempool_alloc(pblk->page_pool, flags);
- if (!page)
- goto err;
+ page = mempool_alloc(pblk->page_bio_pool, flags);
ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
if (ret != PBLK_EXPOSED_PAGE_SIZE) {
pr_err("pblk: could not add page to bio\n");
- mempool_free(page, pblk->page_pool);
+ mempool_free(page, pblk->page_bio_pool);
goto err;
}
}
@@ -229,9 +270,9 @@ static void pblk_write_kick(struct pblk *pblk)
mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
}
-void pblk_write_timer_fn(unsigned long data)
+void pblk_write_timer_fn(struct timer_list *t)
{
- struct pblk *pblk = (struct pblk *)data;
+ struct pblk *pblk = from_timer(pblk, t, wtimer);
/* kick the write thread every tick to flush outstanding data */
pblk_write_kick(pblk);
@@ -245,13 +286,6 @@ void pblk_write_should_kick(struct pblk *pblk)
pblk_write_kick(pblk);
}
-void pblk_end_bio_sync(struct bio *bio)
-{
- struct completion *waiting = bio->bi_private;
-
- complete(waiting);
-}
-
void pblk_end_io_sync(struct nvm_rq *rqd)
{
struct completion *waiting = rqd->private;
@@ -259,7 +293,7 @@ void pblk_end_io_sync(struct nvm_rq *rqd)
complete(waiting);
}
-void pblk_wait_for_meta(struct pblk *pblk)
+static void pblk_wait_for_meta(struct pblk *pblk)
{
do {
if (!atomic_read(&pblk->inflight_io))
@@ -336,17 +370,6 @@ void pblk_discard(struct pblk *pblk, struct bio *bio)
pblk_invalidate_range(pblk, slba, nr_secs);
}
-struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
-{
- struct ppa_addr ppa;
-
- spin_lock(&pblk->trans_lock);
- ppa = pblk_trans_map_get(pblk, lba);
- spin_unlock(&pblk->trans_lock);
-
- return ppa;
-}
-
void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
{
atomic_long_inc(&pblk->write_failed);
@@ -389,39 +412,38 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
struct nvm_tgt_dev *dev = pblk->dev;
#ifdef CONFIG_NVM_DEBUG
- struct ppa_addr *ppa_list;
+ int ret;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
- if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
- WARN_ON(1);
- return -EINVAL;
- }
+ ret = pblk_check_io(pblk, rqd);
+ if (ret)
+ return ret;
+#endif
- if (rqd->opcode == NVM_OP_PWRITE) {
- struct pblk_line *line;
- struct ppa_addr ppa;
- int i;
+ atomic_inc(&pblk->inflight_io);
- for (i = 0; i < rqd->nr_ppas; i++) {
- ppa = ppa_list[i];
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ return nvm_submit_io(dev, rqd);
+}
- spin_lock(&line->lock);
- if (line->state != PBLK_LINESTATE_OPEN) {
- pr_err("pblk: bad ppa: line:%d,state:%d\n",
- line->id, line->state);
- WARN_ON(1);
- spin_unlock(&line->lock);
- return -EINVAL;
- }
- spin_unlock(&line->lock);
- }
- }
+int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+
+#ifdef CONFIG_NVM_DEBUG
+ int ret;
+
+ ret = pblk_check_io(pblk, rqd);
+ if (ret)
+ return ret;
#endif
atomic_inc(&pblk->inflight_io);
- return nvm_submit_io(dev, rqd);
+ return nvm_submit_io_sync(dev, rqd);
+}
+
+static void pblk_bio_map_addr_endio(struct bio *bio)
+{
+ bio_put(bio);
}
struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
@@ -460,6 +482,8 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
kaddr += PAGE_SIZE;
}
+
+ bio->bi_end_io = pblk_bio_map_addr_endio;
out:
return bio;
}
@@ -486,12 +510,14 @@ void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
u64 addr;
int i;
+ spin_lock(&line->lock);
addr = find_next_zero_bit(line->map_bitmap,
pblk->lm.sec_per_line, line->cur_sec);
line->cur_sec = addr - nr_secs;
for (i = 0; i < nr_secs; i++, line->cur_sec--)
WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
+ spin_unlock(&line->lock);
}
u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
@@ -565,12 +591,11 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
int cmd_op, bio_op;
int i, j;
int ret;
- DECLARE_COMPLETION_ONSTACK(wait);
- if (dir == WRITE) {
+ if (dir == PBLK_WRITE) {
bio_op = REQ_OP_WRITE;
cmd_op = NVM_OP_PWRITE;
- } else if (dir == READ) {
+ } else if (dir == PBLK_READ) {
bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD;
} else
@@ -607,13 +632,11 @@ next_rq:
rqd.dma_ppa_list = dma_ppa_list;
rqd.opcode = cmd_op;
rqd.nr_ppas = rq_ppas;
- rqd.end_io = pblk_end_io_sync;
- rqd.private = &wait;
- if (dir == WRITE) {
+ if (dir == PBLK_WRITE) {
struct pblk_sec_meta *meta_list = rqd.meta_list;
- rqd.flags = pblk_set_progr_mode(pblk, WRITE);
+ rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
for (i = 0; i < rqd.nr_ppas; ) {
spin_lock(&line->lock);
paddr = __pblk_alloc_page(pblk, line, min);
@@ -662,25 +685,17 @@ next_rq:
}
}
- ret = pblk_submit_io(pblk, &rqd);
+ ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
pr_err("pblk: emeta I/O submission failed: %d\n", ret);
bio_put(bio);
goto free_rqd_dma;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: emeta I/O timed out\n");
- }
atomic_dec(&pblk->inflight_io);
- reinit_completion(&wait);
-
- if (likely(pblk->l_mg.emeta_alloc_type == PBLK_VMALLOC_META))
- bio_put(bio);
if (rqd.error) {
- if (dir == WRITE)
+ if (dir == PBLK_WRITE)
pblk_log_write_err(pblk, &rqd);
else
pblk_log_read_err(pblk, &rqd);
@@ -721,14 +736,13 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
int i, ret;
int cmd_op, bio_op;
int flags;
- DECLARE_COMPLETION_ONSTACK(wait);
- if (dir == WRITE) {
+ if (dir == PBLK_WRITE) {
bio_op = REQ_OP_WRITE;
cmd_op = NVM_OP_PWRITE;
- flags = pblk_set_progr_mode(pblk, WRITE);
+ flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == READ) {
+ } else if (dir == PBLK_READ) {
bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -758,15 +772,13 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
rqd.opcode = cmd_op;
rqd.flags = flags;
rqd.nr_ppas = lm->smeta_sec;
- rqd.end_io = pblk_end_io_sync;
- rqd.private = &wait;
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
struct pblk_sec_meta *meta_list = rqd.meta_list;
rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
- if (dir == WRITE) {
+ if (dir == PBLK_WRITE) {
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
meta_list[i].lba = lba_list[paddr] = addr_empty;
@@ -778,21 +790,17 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
* the write thread is the only one sending write and erase commands,
* there is no need to take the LUN semaphore.
*/
- ret = pblk_submit_io(pblk, &rqd);
+ ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
pr_err("pblk: smeta I/O submission failed: %d\n", ret);
bio_put(bio);
goto free_ppa_list;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: smeta I/O timed out\n");
- }
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
- if (dir == WRITE)
+ if (dir == PBLK_WRITE)
pblk_log_write_err(pblk, &rqd);
else
pblk_log_read_err(pblk, &rqd);
@@ -808,14 +816,14 @@ int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
{
u64 bpaddr = pblk_line_smeta_start(pblk, line);
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
+ return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
}
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
void *emeta_buf)
{
return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
- line->emeta_ssec, READ);
+ line->emeta_ssec, PBLK_READ);
}
static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -824,7 +832,7 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd->opcode = NVM_OP_ERASE;
rqd->ppa_addr = ppa;
rqd->nr_ppas = 1;
- rqd->flags = pblk_set_progr_mode(pblk, ERASE);
+ rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
rqd->bio = NULL;
}
@@ -832,19 +840,15 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
{
struct nvm_rq rqd;
int ret = 0;
- DECLARE_COMPLETION_ONSTACK(wait);
memset(&rqd, 0, sizeof(struct nvm_rq));
pblk_setup_e_rq(pblk, &rqd, ppa);
- rqd.end_io = pblk_end_io_sync;
- rqd.private = &wait;
-
/* The write thread schedules erases so that it minimizes disturbances
* with writes. Thus, there is no need to take the LUN semaphore.
*/
- ret = pblk_submit_io(pblk, &rqd);
+ ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -857,11 +861,6 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
goto out;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: sync erase timed out\n");
- }
-
out:
rqd.private = pblk;
__pblk_end_io_erase(pblk, &rqd);
@@ -976,7 +975,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
smeta_buf->header.id = cpu_to_le32(line->id);
smeta_buf->header.type = cpu_to_le16(line->type);
- smeta_buf->header.version = cpu_to_le16(1);
+ smeta_buf->header.version = SMETA_VERSION;
/* Start metadata */
smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
@@ -1046,7 +1045,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
line->smeta_ssec = off;
line->cur_sec = off + lm->smeta_sec;
- if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
+ if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
pr_debug("pblk: line smeta I/O failed. Retry\n");
return 1;
}
@@ -1056,7 +1055,6 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
/* Mark emeta metadata sectors as bad sectors. We need to consider bad
* blocks to make sure that there are enough sectors to store emeta
*/
- bit = lm->sec_per_line;
off = lm->sec_per_line - lm->emeta_sec[0];
bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
while (nr_bb) {
@@ -1093,25 +1091,21 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
struct pblk_line_meta *lm = &pblk->lm;
int blk_in_line = atomic_read(&line->blk_in_line);
- line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
+ line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_ATOMIC);
if (!line->map_bitmap)
return -ENOMEM;
- memset(line->map_bitmap, 0, lm->sec_bitmap_len);
- /* invalid_bitmap is special since it is used when line is closed. No
- * need to zeroized; it will be initialized using bb info form
- * map_bitmap
- */
- line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
+ /* will be initialized using bb info from map_bitmap */
+ line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_ATOMIC);
if (!line->invalid_bitmap) {
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
return -ENOMEM;
}
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_FREE) {
- mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
+ kfree(line->invalid_bitmap);
spin_unlock(&line->lock);
WARN(1, "pblk: corrupted line %d, state %d\n",
line->id, line->state);
@@ -1163,7 +1157,7 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
{
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
@@ -1328,6 +1322,41 @@ static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
pblk->state = PBLK_STATE_STOPPING;
}
+static void pblk_line_close_meta_sync(struct pblk *pblk)
+{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
+ struct pblk_line *line, *tline;
+ LIST_HEAD(list);
+
+ spin_lock(&l_mg->close_lock);
+ if (list_empty(&l_mg->emeta_list)) {
+ spin_unlock(&l_mg->close_lock);
+ return;
+ }
+
+ list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
+ spin_unlock(&l_mg->close_lock);
+
+ list_for_each_entry_safe(line, tline, &list, list) {
+ struct pblk_emeta *emeta = line->emeta;
+
+ while (emeta->mem < lm->emeta_len[0]) {
+ int ret;
+
+ ret = pblk_submit_meta_io(pblk, line);
+ if (ret) {
+ pr_err("pblk: sync meta line %d failed (%d)\n",
+ line->id, ret);
+ return;
+ }
+ }
+ }
+
+ pblk_wait_for_meta(pblk);
+ flush_workqueue(pblk->close_wq);
+}
+
void pblk_pipeline_stop(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -1361,17 +1390,17 @@ void pblk_pipeline_stop(struct pblk *pblk)
spin_unlock(&l_mg->free_lock);
}
-void pblk_line_replace_data(struct pblk *pblk)
+struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
- struct pblk_line *cur, *new;
+ struct pblk_line *cur, *new = NULL;
unsigned int left_seblks;
int is_next = 0;
cur = l_mg->data_line;
new = l_mg->data_next;
if (!new)
- return;
+ goto out;
l_mg->data_line = new;
spin_lock(&l_mg->free_lock);
@@ -1379,7 +1408,7 @@ void pblk_line_replace_data(struct pblk *pblk)
l_mg->data_line = NULL;
l_mg->data_next = NULL;
spin_unlock(&l_mg->free_lock);
- return;
+ goto out;
}
pblk_line_setup_metadata(new, l_mg, &pblk->lm);
@@ -1391,7 +1420,7 @@ retry_erase:
/* If line is not fully erased, erase it */
if (atomic_read(&new->left_eblks)) {
if (pblk_line_erase(pblk, new))
- return;
+ goto out;
} else {
io_schedule();
}
@@ -1402,7 +1431,7 @@ retry_setup:
if (!pblk_line_init_metadata(pblk, new, cur)) {
new = pblk_line_retry(pblk, new);
if (!new)
- return;
+ goto out;
goto retry_setup;
}
@@ -1410,7 +1439,7 @@ retry_setup:
if (!pblk_line_init_bb(pblk, new, 1)) {
new = pblk_line_retry(pblk, new);
if (!new)
- return;
+ goto out;
goto retry_setup;
}
@@ -1434,14 +1463,15 @@ retry_setup:
if (is_next)
pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
+
+out:
+ return new;
}
void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
{
- if (line->map_bitmap)
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
- if (line->invalid_bitmap)
- mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
+ kfree(line->invalid_bitmap);
*line->vsc = cpu_to_le32(EMPTY_ENTRY);
@@ -1451,11 +1481,10 @@ void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
line->emeta = NULL;
}
-void pblk_line_put(struct kref *ref)
+static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
{
- struct pblk_line *line = container_of(ref, struct pblk_line, ref);
- struct pblk *pblk = line->pblk;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_gc *gc = &pblk->gc;
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
@@ -1464,6 +1493,8 @@ void pblk_line_put(struct kref *ref)
pblk_line_free(pblk, line);
spin_unlock(&line->lock);
+ atomic_dec(&gc->pipeline_gc);
+
spin_lock(&l_mg->free_lock);
list_add_tail(&line->list, &l_mg->free_list);
l_mg->nr_free_lines++;
@@ -1472,13 +1503,49 @@ void pblk_line_put(struct kref *ref)
pblk_rl_free_lines_inc(&pblk->rl, line);
}
+static void pblk_line_put_ws(struct work_struct *work)
+{
+ struct pblk_line_ws *line_put_ws = container_of(work,
+ struct pblk_line_ws, ws);
+ struct pblk *pblk = line_put_ws->pblk;
+ struct pblk_line *line = line_put_ws->line;
+
+ __pblk_line_put(pblk, line);
+ mempool_free(line_put_ws, pblk->gen_ws_pool);
+}
+
+void pblk_line_put(struct kref *ref)
+{
+ struct pblk_line *line = container_of(ref, struct pblk_line, ref);
+ struct pblk *pblk = line->pblk;
+
+ __pblk_line_put(pblk, line);
+}
+
+void pblk_line_put_wq(struct kref *ref)
+{
+ struct pblk_line *line = container_of(ref, struct pblk_line, ref);
+ struct pblk *pblk = line->pblk;
+ struct pblk_line_ws *line_put_ws;
+
+ line_put_ws = mempool_alloc(pblk->gen_ws_pool, GFP_ATOMIC);
+ if (!line_put_ws)
+ return;
+
+ line_put_ws->pblk = pblk;
+ line_put_ws->line = line;
+ line_put_ws->priv = NULL;
+
+ INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
+ queue_work(pblk->r_end_wq, &line_put_ws->ws);
+}
+
int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
{
struct nvm_rq *rqd;
int err;
- rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
- memset(rqd, 0, pblk_g_rq_size);
+ rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
pblk_setup_e_rq(pblk, rqd, ppa);
@@ -1517,41 +1584,6 @@ int pblk_line_is_full(struct pblk_line *line)
return (line->left_msecs == 0);
}
-void pblk_line_close_meta_sync(struct pblk *pblk)
-{
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
- struct pblk_line_meta *lm = &pblk->lm;
- struct pblk_line *line, *tline;
- LIST_HEAD(list);
-
- spin_lock(&l_mg->close_lock);
- if (list_empty(&l_mg->emeta_list)) {
- spin_unlock(&l_mg->close_lock);
- return;
- }
-
- list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
- spin_unlock(&l_mg->close_lock);
-
- list_for_each_entry_safe(line, tline, &list, list) {
- struct pblk_emeta *emeta = line->emeta;
-
- while (emeta->mem < lm->emeta_len[0]) {
- int ret;
-
- ret = pblk_submit_meta_io(pblk, line);
- if (ret) {
- pr_err("pblk: sync meta line %d failed (%d)\n",
- line->id, ret);
- return;
- }
- }
- }
-
- pblk_wait_for_meta(pblk);
- flush_workqueue(pblk->close_wq);
-}
-
static void pblk_line_should_sync_meta(struct pblk *pblk)
{
if (pblk_rl_is_limit(&pblk->rl))
@@ -1582,15 +1614,13 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
list_add_tail(&line->list, move_list);
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
spin_unlock(&line->lock);
spin_unlock(&l_mg->gc_lock);
-
- pblk_gc_should_kick(pblk);
}
void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
@@ -1624,43 +1654,16 @@ void pblk_line_close_ws(struct work_struct *work)
struct pblk_line *line = line_ws->line;
pblk_line_close(pblk, line);
- mempool_free(line_ws, pblk->line_ws_pool);
-}
-
-void pblk_line_mark_bb(struct work_struct *work)
-{
- struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
- ws);
- struct pblk *pblk = line_ws->pblk;
- struct nvm_tgt_dev *dev = pblk->dev;
- struct ppa_addr *ppa = line_ws->priv;
- int ret;
-
- ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
- if (ret) {
- struct pblk_line *line;
- int pos;
-
- line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
- pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
-
- pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
- line->id, pos);
- }
-
- kfree(ppa);
- mempool_free(line_ws, pblk->line_ws_pool);
+ mempool_free(line_ws, pblk->gen_ws_pool);
}
-void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
- void (*work)(struct work_struct *),
+void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
+ void (*work)(struct work_struct *), gfp_t gfp_mask,
struct workqueue_struct *wq)
{
struct pblk_line_ws *line_ws;
- line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
- if (!line_ws)
- return;
+ line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
line_ws->pblk = pblk;
line_ws->line = line;
@@ -1689,16 +1692,8 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
#endif
ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
- if (ret) {
- switch (ret) {
- case -ETIME:
- pr_err("pblk: lun semaphore timed out\n");
- break;
- case -EINTR:
- pr_err("pblk: lun semaphore timed out\n");
- break;
- }
- }
+ if (ret == -ETIME || ret == -EINTR)
+ pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret);
}
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
@@ -1758,13 +1753,11 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
rlun = &pblk->luns[bit];
up(&rlun->wr_sem);
}
-
- kfree(lun_bitmap);
}
void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
{
- struct ppa_addr l2p_ppa;
+ struct ppa_addr ppa_l2p;
/* logic error: lba out-of-bounds. Ignore update */
if (!(lba < pblk->rl.nr_secs)) {
@@ -1773,10 +1766,10 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
}
spin_lock(&pblk->trans_lock);
- l2p_ppa = pblk_trans_map_get(pblk, lba);
+ ppa_l2p = pblk_trans_map_get(pblk, lba);
- if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
- pblk_map_invalidate(pblk, l2p_ppa);
+ if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
+ pblk_map_invalidate(pblk, ppa_l2p);
pblk_trans_map_set(pblk, lba, ppa);
spin_unlock(&pblk->trans_lock);
@@ -1784,6 +1777,7 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
{
+
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */
BUG_ON(!pblk_addr_in_cache(ppa));
@@ -1793,16 +1787,16 @@ void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
pblk_update_map(pblk, lba, ppa);
}
-int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
- struct pblk_line *gc_line)
+int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
+ struct pblk_line *gc_line, u64 paddr_gc)
{
- struct ppa_addr l2p_ppa;
+ struct ppa_addr ppa_l2p, ppa_gc;
int ret = 1;
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */
- BUG_ON(!pblk_addr_in_cache(ppa));
- BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
+ BUG_ON(!pblk_addr_in_cache(ppa_new));
+ BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
#endif
/* logic error: lba out-of-bounds. Ignore update */
@@ -1812,36 +1806,41 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
}
spin_lock(&pblk->trans_lock);
- l2p_ppa = pblk_trans_map_get(pblk, lba);
+ ppa_l2p = pblk_trans_map_get(pblk, lba);
+ ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
+
+ if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
+ spin_lock(&gc_line->lock);
+ WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
+ "pblk: corrupted GC update");
+ spin_unlock(&gc_line->lock);
- /* Prevent updated entries to be overwritten by GC */
- if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
- pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
ret = 0;
goto out;
}
- pblk_trans_map_set(pblk, lba, ppa);
+ pblk_trans_map_set(pblk, lba, ppa_new);
out:
spin_unlock(&pblk->trans_lock);
return ret;
}
-void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
- struct ppa_addr entry_line)
+void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
+ struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
{
- struct ppa_addr l2p_line;
+ struct ppa_addr ppa_l2p;
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a device address */
- BUG_ON(pblk_addr_in_cache(ppa));
+ BUG_ON(pblk_addr_in_cache(ppa_mapped));
#endif
/* Invalidate and discard padded entries */
if (lba == ADDR_EMPTY) {
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->padded_wb);
#endif
- pblk_map_invalidate(pblk, ppa);
+ if (!pblk_ppa_empty(ppa_mapped))
+ pblk_map_invalidate(pblk, ppa_mapped);
return;
}
@@ -1852,22 +1851,22 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
}
spin_lock(&pblk->trans_lock);
- l2p_line = pblk_trans_map_get(pblk, lba);
+ ppa_l2p = pblk_trans_map_get(pblk, lba);
/* Do not update L2P if the cacheline has been updated. In this case,
* the mapped ppa must be invalidated
*/
- if (l2p_line.ppa != entry_line.ppa) {
- if (!pblk_ppa_empty(ppa))
- pblk_map_invalidate(pblk, ppa);
+ if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
+ if (!pblk_ppa_empty(ppa_mapped))
+ pblk_map_invalidate(pblk, ppa_mapped);
goto out;
}
#ifdef CONFIG_NVM_DEBUG
- WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
+ WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
#endif
- pblk_trans_map_set(pblk, lba, ppa);
+ pblk_trans_map_set(pblk, lba, ppa_mapped);
out:
spin_unlock(&pblk->trans_lock);
}
@@ -1878,23 +1877,32 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
int i;
spin_lock(&pblk->trans_lock);
- for (i = 0; i < nr_secs; i++)
- ppas[i] = pblk_trans_map_get(pblk, blba + i);
+ for (i = 0; i < nr_secs; i++) {
+ struct ppa_addr ppa;
+
+ ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
+
+ /* If the L2P entry maps to a line, the reference is valid */
+ if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
+ int line_id = pblk_dev_ppa_to_line(ppa);
+ struct pblk_line *line = &pblk->lines[line_id];
+
+ kref_get(&line->ref);
+ }
+ }
spin_unlock(&pblk->trans_lock);
}
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
u64 *lba_list, int nr_secs)
{
- sector_t lba;
+ u64 lba;
int i;
spin_lock(&pblk->trans_lock);
for (i = 0; i < nr_secs; i++) {
lba = lba_list[i];
- if (lba == ADDR_EMPTY) {
- ppas[i].ppa = ADDR_EMPTY;
- } else {
+ if (lba != ADDR_EMPTY) {
/* logic error: lba out-of-bounds. Ignore update */
if (!(lba < pblk->rl.nr_secs)) {
WARN(1, "pblk: corrupted L2P map request\n");
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 6090d28f7995..9c8e114c8a54 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -20,7 +20,8 @@
static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
{
- vfree(gc_rq->data);
+ if (gc_rq->data)
+ vfree(gc_rq->data);
kfree(gc_rq);
}
@@ -41,10 +42,7 @@ static int pblk_gc_write(struct pblk *pblk)
spin_unlock(&gc->w_lock);
list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
- pblk_write_gc_to_cache(pblk, gc_rq->data, gc_rq->lba_list,
- gc_rq->nr_secs, gc_rq->secs_to_gc,
- gc_rq->line, PBLK_IOTYPE_GC);
-
+ pblk_write_gc_to_cache(pblk, gc_rq);
list_del(&gc_rq->list);
kref_put(&gc_rq->line->ref, pblk_line_put);
pblk_gc_free_gc_rq(gc_rq);
@@ -58,42 +56,59 @@ static void pblk_gc_writer_kick(struct pblk_gc *gc)
wake_up_process(gc->gc_writer_ts);
}
-/*
- * Responsible for managing all memory related to a gc request. Also in case of
- * failure
- */
-static int pblk_gc_move_valid_secs(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
+static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
+{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct list_head *move_list;
+
+ spin_lock(&line->lock);
+ WARN_ON(line->state != PBLK_LINESTATE_GC);
+ line->state = PBLK_LINESTATE_CLOSED;
+ move_list = pblk_line_gc_list(pblk, line);
+ spin_unlock(&line->lock);
+
+ if (move_list) {
+ spin_lock(&l_mg->gc_lock);
+ list_add_tail(&line->list, move_list);
+ spin_unlock(&l_mg->gc_lock);
+ }
+}
+
+static void pblk_gc_line_ws(struct work_struct *work)
{
+ struct pblk_line_ws *gc_rq_ws = container_of(work,
+ struct pblk_line_ws, ws);
+ struct pblk *pblk = gc_rq_ws->pblk;
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_gc *gc = &pblk->gc;
- struct pblk_line *line = gc_rq->line;
- void *data;
- unsigned int secs_to_gc;
- int ret = 0;
+ struct pblk_line *line = gc_rq_ws->line;
+ struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
+ int ret;
- data = vmalloc(gc_rq->nr_secs * geo->sec_size);
- if (!data) {
- ret = -ENOMEM;
+ up(&gc->gc_sem);
+
+ gc_rq->data = vmalloc(gc_rq->nr_secs * geo->sec_size);
+ if (!gc_rq->data) {
+ pr_err("pblk: could not GC line:%d (%d/%d)\n",
+ line->id, *line->vsc, gc_rq->nr_secs);
goto out;
}
/* Read from GC victim block */
- if (pblk_submit_read_gc(pblk, gc_rq->lba_list, data, gc_rq->nr_secs,
- &secs_to_gc, line)) {
- ret = -EFAULT;
- goto free_data;
+ ret = pblk_submit_read_gc(pblk, gc_rq);
+ if (ret) {
+ pr_err("pblk: failed GC read in line:%d (err:%d)\n",
+ line->id, ret);
+ goto out;
}
- if (!secs_to_gc)
- goto free_rq;
-
- gc_rq->data = data;
- gc_rq->secs_to_gc = secs_to_gc;
+ if (!gc_rq->secs_to_gc)
+ goto out;
retry:
spin_lock(&gc->w_lock);
- if (gc->w_entries >= PBLK_GC_W_QD) {
+ if (gc->w_entries >= PBLK_GC_RQ_QD) {
spin_unlock(&gc->w_lock);
pblk_gc_writer_kick(&pblk->gc);
usleep_range(128, 256);
@@ -105,53 +120,13 @@ retry:
pblk_gc_writer_kick(&pblk->gc);
- return 0;
+ kfree(gc_rq_ws);
+ return;
-free_rq:
- kfree(gc_rq);
-free_data:
- vfree(data);
out:
+ pblk_gc_free_gc_rq(gc_rq);
kref_put(&line->ref, pblk_line_put);
- return ret;
-}
-
-static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
-{
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
- struct list_head *move_list;
-
- spin_lock(&line->lock);
- WARN_ON(line->state != PBLK_LINESTATE_GC);
- line->state = PBLK_LINESTATE_CLOSED;
- move_list = pblk_line_gc_list(pblk, line);
- spin_unlock(&line->lock);
-
- if (move_list) {
- spin_lock(&l_mg->gc_lock);
- list_add_tail(&line->list, move_list);
- spin_unlock(&l_mg->gc_lock);
- }
-}
-
-static void pblk_gc_line_ws(struct work_struct *work)
-{
- struct pblk_line_ws *line_rq_ws = container_of(work,
- struct pblk_line_ws, ws);
- struct pblk *pblk = line_rq_ws->pblk;
- struct pblk_gc *gc = &pblk->gc;
- struct pblk_line *line = line_rq_ws->line;
- struct pblk_gc_rq *gc_rq = line_rq_ws->priv;
-
- up(&gc->gc_sem);
-
- if (pblk_gc_move_valid_secs(pblk, gc_rq)) {
- pr_err("pblk: could not GC all sectors: line:%d (%d/%d)\n",
- line->id, *line->vsc,
- gc_rq->nr_secs);
- }
-
- mempool_free(line_rq_ws, pblk->line_ws_pool);
+ kfree(gc_rq_ws);
}
static void pblk_gc_line_prepare_ws(struct work_struct *work)
@@ -164,17 +139,24 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_gc *gc = &pblk->gc;
struct line_emeta *emeta_buf;
- struct pblk_line_ws *line_rq_ws;
+ struct pblk_line_ws *gc_rq_ws;
struct pblk_gc_rq *gc_rq;
__le64 *lba_list;
+ unsigned long *invalid_bitmap;
int sec_left, nr_secs, bit;
int ret;
+ invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
+ if (!invalid_bitmap) {
+ pr_err("pblk: could not allocate GC invalid bitmap\n");
+ goto fail_free_ws;
+ }
+
emeta_buf = pblk_malloc(lm->emeta_len[0], l_mg->emeta_alloc_type,
GFP_KERNEL);
if (!emeta_buf) {
pr_err("pblk: cannot use GC emeta\n");
- return;
+ goto fail_free_bitmap;
}
ret = pblk_line_read_emeta(pblk, line, emeta_buf);
@@ -193,7 +175,11 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
goto fail_free_emeta;
}
+ spin_lock(&line->lock);
+ bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
sec_left = pblk_line_vsc(line);
+ spin_unlock(&line->lock);
+
if (sec_left < 0) {
pr_err("pblk: corrupted GC line (%d)\n", line->id);
goto fail_free_emeta;
@@ -207,11 +193,12 @@ next_rq:
nr_secs = 0;
do {
- bit = find_next_zero_bit(line->invalid_bitmap, lm->sec_per_line,
+ bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
bit + 1);
if (bit > line->emeta_ssec)
break;
+ gc_rq->paddr_list[nr_secs] = bit;
gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
} while (nr_secs < pblk->max_write_pgs);
@@ -223,19 +210,25 @@ next_rq:
gc_rq->nr_secs = nr_secs;
gc_rq->line = line;
- line_rq_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL);
- if (!line_rq_ws)
+ gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
+ if (!gc_rq_ws)
goto fail_free_gc_rq;
- line_rq_ws->pblk = pblk;
- line_rq_ws->line = line;
- line_rq_ws->priv = gc_rq;
+ gc_rq_ws->pblk = pblk;
+ gc_rq_ws->line = line;
+ gc_rq_ws->priv = gc_rq;
+
+ /* The write GC path can be much slower than the read GC one due to
+ * the budget imposed by the rate-limiter. Balance in case that we get
+ * back pressure from the write GC path.
+ */
+ while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
+ io_schedule();
- down(&gc->gc_sem);
kref_get(&line->ref);
- INIT_WORK(&line_rq_ws->ws, pblk_gc_line_ws);
- queue_work(gc->gc_line_reader_wq, &line_rq_ws->ws);
+ INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
+ queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
sec_left -= nr_secs;
if (sec_left > 0)
@@ -243,10 +236,11 @@ next_rq:
out:
pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
- mempool_free(line_ws, pblk->line_ws_pool);
+ kfree(line_ws);
+ kfree(invalid_bitmap);
kref_put(&line->ref, pblk_line_put);
- atomic_dec(&gc->inflight_gc);
+ atomic_dec(&gc->read_inflight_gc);
return;
@@ -254,10 +248,14 @@ fail_free_gc_rq:
kfree(gc_rq);
fail_free_emeta:
pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
+fail_free_bitmap:
+ kfree(invalid_bitmap);
+fail_free_ws:
+ kfree(line_ws);
+
pblk_put_line_back(pblk, line);
kref_put(&line->ref, pblk_line_put);
- mempool_free(line_ws, pblk->line_ws_pool);
- atomic_dec(&gc->inflight_gc);
+ atomic_dec(&gc->read_inflight_gc);
pr_err("pblk: Failed to GC line %d\n", line->id);
}
@@ -269,19 +267,40 @@ static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
- line_ws = mempool_alloc(pblk->line_ws_pool, GFP_KERNEL);
+ line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
if (!line_ws)
return -ENOMEM;
line_ws->pblk = pblk;
line_ws->line = line;
+ atomic_inc(&gc->pipeline_gc);
INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
queue_work(gc->gc_reader_wq, &line_ws->ws);
return 0;
}
+static void pblk_gc_reader_kick(struct pblk_gc *gc)
+{
+ wake_up_process(gc->gc_reader_ts);
+}
+
+static void pblk_gc_kick(struct pblk *pblk)
+{
+ struct pblk_gc *gc = &pblk->gc;
+
+ pblk_gc_writer_kick(gc);
+ pblk_gc_reader_kick(gc);
+
+ /* If we're shutting down GC, let's not start it up again */
+ if (gc->gc_enabled) {
+ wake_up_process(gc->gc_ts);
+ mod_timer(&gc->gc_timer,
+ jiffies + msecs_to_jiffies(GC_TIME_MSECS));
+ }
+}
+
static int pblk_gc_read(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
@@ -305,11 +324,6 @@ static int pblk_gc_read(struct pblk *pblk)
return 0;
}
-static void pblk_gc_reader_kick(struct pblk_gc *gc)
-{
- wake_up_process(gc->gc_reader_ts);
-}
-
static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
struct list_head *group_list)
{
@@ -338,26 +352,17 @@ static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
return ((gc->gc_active) && (nr_blocks_need > nr_blocks_free));
}
-/*
- * Lines with no valid sectors will be returned to the free list immediately. If
- * GC is activated - either because the free block count is under the determined
- * threshold, or because it is being forced from user space - only lines with a
- * high count of invalid sectors will be recycled.
- */
-static void pblk_gc_run(struct pblk *pblk)
+void pblk_gc_free_full_lines(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_gc *gc = &pblk->gc;
struct pblk_line *line;
- struct list_head *group_list;
- bool run_gc;
- int inflight_gc, gc_group = 0, prev_group = 0;
do {
spin_lock(&l_mg->gc_lock);
if (list_empty(&l_mg->gc_full_list)) {
spin_unlock(&l_mg->gc_lock);
- break;
+ return;
}
line = list_first_entry(&l_mg->gc_full_list,
@@ -371,11 +376,30 @@ static void pblk_gc_run(struct pblk *pblk)
list_del(&line->list);
spin_unlock(&l_mg->gc_lock);
+ atomic_inc(&gc->pipeline_gc);
kref_put(&line->ref, pblk_line_put);
} while (1);
+}
+
+/*
+ * Lines with no valid sectors will be returned to the free list immediately. If
+ * GC is activated - either because the free block count is under the determined
+ * threshold, or because it is being forced from user space - only lines with a
+ * high count of invalid sectors will be recycled.
+ */
+static void pblk_gc_run(struct pblk *pblk)
+{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_gc *gc = &pblk->gc;
+ struct pblk_line *line;
+ struct list_head *group_list;
+ bool run_gc;
+ int read_inflight_gc, gc_group = 0, prev_group = 0;
+
+ pblk_gc_free_full_lines(pblk);
run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
- if (!run_gc || (atomic_read(&gc->inflight_gc) >= PBLK_GC_L_QD))
+ if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
return;
next_gc_group:
@@ -402,14 +426,14 @@ next_gc_group:
list_add_tail(&line->list, &gc->r_list);
spin_unlock(&gc->r_lock);
- inflight_gc = atomic_inc_return(&gc->inflight_gc);
+ read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
pblk_gc_reader_kick(gc);
prev_group = 1;
/* No need to queue up more GC lines than we can handle */
run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
- if (!run_gc || inflight_gc >= PBLK_GC_L_QD)
+ if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
break;
} while (1);
@@ -418,19 +442,9 @@ next_gc_group:
goto next_gc_group;
}
-void pblk_gc_kick(struct pblk *pblk)
+static void pblk_gc_timer(struct timer_list *t)
{
- struct pblk_gc *gc = &pblk->gc;
-
- wake_up_process(gc->gc_ts);
- pblk_gc_writer_kick(gc);
- pblk_gc_reader_kick(gc);
- mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
-}
-
-static void pblk_gc_timer(unsigned long data)
-{
- struct pblk *pblk = (struct pblk *)data;
+ struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
pblk_gc_kick(pblk);
}
@@ -465,6 +479,7 @@ static int pblk_gc_writer_ts(void *data)
static int pblk_gc_reader_ts(void *data)
{
struct pblk *pblk = data;
+ struct pblk_gc *gc = &pblk->gc;
while (!kthread_should_stop()) {
if (!pblk_gc_read(pblk))
@@ -473,6 +488,18 @@ static int pblk_gc_reader_ts(void *data)
io_schedule();
}
+#ifdef CONFIG_NVM_DEBUG
+ pr_info("pblk: flushing gc pipeline, %d lines left\n",
+ atomic_read(&gc->pipeline_gc));
+#endif
+
+ do {
+ if (!atomic_read(&gc->pipeline_gc))
+ break;
+
+ schedule();
+ } while (1);
+
return 0;
}
@@ -486,10 +513,10 @@ void pblk_gc_should_start(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
- if (gc->gc_enabled && !gc->gc_active)
+ if (gc->gc_enabled && !gc->gc_active) {
pblk_gc_start(pblk);
-
- pblk_gc_kick(pblk);
+ pblk_gc_kick(pblk);
+ }
}
/*
@@ -510,6 +537,11 @@ void pblk_gc_should_stop(struct pblk *pblk)
pblk_gc_stop(pblk, 0);
}
+void pblk_gc_should_kick(struct pblk *pblk)
+{
+ pblk_rl_update_rates(&pblk->rl);
+}
+
void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
int *gc_active)
{
@@ -569,14 +601,15 @@ int pblk_gc_init(struct pblk *pblk)
goto fail_free_writer_kthread;
}
- setup_timer(&gc->gc_timer, pblk_gc_timer, (unsigned long)pblk);
+ timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
gc->gc_active = 0;
gc->gc_forced = 0;
gc->gc_enabled = 1;
gc->w_entries = 0;
- atomic_set(&gc->inflight_gc, 0);
+ atomic_set(&gc->read_inflight_gc, 0);
+ atomic_set(&gc->pipeline_gc, 0);
/* Workqueue that reads valid sectors from a line and submit them to the
* GC writer to be recycled.
@@ -602,7 +635,7 @@ int pblk_gc_init(struct pblk *pblk)
spin_lock_init(&gc->w_lock);
spin_lock_init(&gc->r_lock);
- sema_init(&gc->gc_sem, 128);
+ sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
INIT_LIST_HEAD(&gc->w_list);
INIT_LIST_HEAD(&gc->r_list);
@@ -625,24 +658,24 @@ void pblk_gc_exit(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
- flush_workqueue(gc->gc_reader_wq);
- flush_workqueue(gc->gc_line_reader_wq);
-
- del_timer(&gc->gc_timer);
+ gc->gc_enabled = 0;
+ del_timer_sync(&gc->gc_timer);
pblk_gc_stop(pblk, 1);
if (gc->gc_ts)
kthread_stop(gc->gc_ts);
+ if (gc->gc_reader_ts)
+ kthread_stop(gc->gc_reader_ts);
+
+ flush_workqueue(gc->gc_reader_wq);
if (gc->gc_reader_wq)
destroy_workqueue(gc->gc_reader_wq);
+ flush_workqueue(gc->gc_line_reader_wq);
if (gc->gc_line_reader_wq)
destroy_workqueue(gc->gc_line_reader_wq);
if (gc->gc_writer_ts)
kthread_stop(gc->gc_writer_ts);
-
- if (gc->gc_reader_ts)
- kthread_stop(gc->gc_reader_ts);
}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 1b0f61233c21..695826a06b5d 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -20,8 +20,8 @@
#include "pblk.h"
-static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
- *pblk_w_rq_cache, *pblk_line_meta_cache;
+static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
+ *pblk_w_rq_cache;
static DECLARE_RWSEM(pblk_lock);
struct bio_set *pblk_bio_set;
@@ -46,7 +46,7 @@ static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
* user I/Os. Unless stalled, the rate limiter leaves at least 256KB
* available for user I/O.
*/
- if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
+ if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
blk_queue_split(q, &bio);
return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
@@ -76,6 +76,28 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+static size_t pblk_trans_map_size(struct pblk *pblk)
+{
+ int entry_size = 8;
+
+ if (pblk->ppaf_bitsize < 32)
+ entry_size = 4;
+
+ return entry_size * pblk->rl.nr_secs;
+}
+
+#ifdef CONFIG_NVM_DEBUG
+static u32 pblk_l2p_crc(struct pblk *pblk)
+{
+ size_t map_size;
+ u32 crc = ~(u32)0;
+
+ map_size = pblk_trans_map_size(pblk);
+ crc = crc32_le(crc, pblk->trans_map, map_size);
+ return crc;
+}
+#endif
+
static void pblk_l2p_free(struct pblk *pblk)
{
vfree(pblk->trans_map);
@@ -85,12 +107,10 @@ static int pblk_l2p_init(struct pblk *pblk)
{
sector_t i;
struct ppa_addr ppa;
- int entry_size = 8;
+ size_t map_size;
- if (pblk->ppaf_bitsize < 32)
- entry_size = 4;
-
- pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
+ map_size = pblk_trans_map_size(pblk);
+ pblk->trans_map = vmalloc(map_size);
if (!pblk->trans_map)
return -ENOMEM;
@@ -132,7 +152,6 @@ static int pblk_rwb_init(struct pblk *pblk)
}
/* Minimum pages needed within a lun */
-#define PAGE_POOL_SIZE 16
#define ADDR_POOL_SIZE 64
static int pblk_set_ppaf(struct pblk *pblk)
@@ -182,12 +201,10 @@ static int pblk_set_ppaf(struct pblk *pblk)
static int pblk_init_global_caches(struct pblk *pblk)
{
- char cache_name[PBLK_CACHE_NAME_LEN];
-
down_write(&pblk_lock);
- pblk_blk_ws_cache = kmem_cache_create("pblk_blk_ws",
+ pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
sizeof(struct pblk_line_ws), 0, 0, NULL);
- if (!pblk_blk_ws_cache) {
+ if (!pblk_ws_cache) {
up_write(&pblk_lock);
return -ENOMEM;
}
@@ -195,7 +212,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
pblk_rec_cache = kmem_cache_create("pblk_rec",
sizeof(struct pblk_rec_ctx), 0, 0, NULL);
if (!pblk_rec_cache) {
- kmem_cache_destroy(pblk_blk_ws_cache);
+ kmem_cache_destroy(pblk_ws_cache);
up_write(&pblk_lock);
return -ENOMEM;
}
@@ -203,7 +220,7 @@ static int pblk_init_global_caches(struct pblk *pblk)
pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
0, 0, NULL);
if (!pblk_g_rq_cache) {
- kmem_cache_destroy(pblk_blk_ws_cache);
+ kmem_cache_destroy(pblk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
up_write(&pblk_lock);
return -ENOMEM;
@@ -212,30 +229,25 @@ static int pblk_init_global_caches(struct pblk *pblk)
pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
0, 0, NULL);
if (!pblk_w_rq_cache) {
- kmem_cache_destroy(pblk_blk_ws_cache);
+ kmem_cache_destroy(pblk_ws_cache);
kmem_cache_destroy(pblk_rec_cache);
kmem_cache_destroy(pblk_g_rq_cache);
up_write(&pblk_lock);
return -ENOMEM;
}
-
- snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
- pblk->disk->disk_name);
- pblk_line_meta_cache = kmem_cache_create(cache_name,
- pblk->lm.sec_bitmap_len, 0, 0, NULL);
- if (!pblk_line_meta_cache) {
- kmem_cache_destroy(pblk_blk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- kmem_cache_destroy(pblk_g_rq_cache);
- kmem_cache_destroy(pblk_w_rq_cache);
- up_write(&pblk_lock);
- return -ENOMEM;
- }
up_write(&pblk_lock);
return 0;
}
+static void pblk_free_global_caches(struct pblk *pblk)
+{
+ kmem_cache_destroy(pblk_ws_cache);
+ kmem_cache_destroy(pblk_rec_cache);
+ kmem_cache_destroy(pblk_g_rq_cache);
+ kmem_cache_destroy(pblk_w_rq_cache);
+}
+
static int pblk_core_init(struct pblk *pblk)
{
struct nvm_tgt_dev *dev = pblk->dev;
@@ -247,70 +259,80 @@ static int pblk_core_init(struct pblk *pblk)
if (pblk_init_global_caches(pblk))
return -ENOMEM;
- pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
- if (!pblk->page_pool)
- return -ENOMEM;
+ /* Internal bios can be at most the sectors signaled by the device. */
+ pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
+ 0);
+ if (!pblk->page_bio_pool)
+ goto free_global_caches;
- pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE,
- pblk_blk_ws_cache);
- if (!pblk->line_ws_pool)
- goto free_page_pool;
+ pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
+ pblk_ws_cache);
+ if (!pblk->gen_ws_pool)
+ goto free_page_bio_pool;
pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
if (!pblk->rec_pool)
- goto free_blk_ws_pool;
+ goto free_gen_ws_pool;
- pblk->g_rq_pool = mempool_create_slab_pool(PBLK_READ_REQ_POOL_SIZE,
+ pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
pblk_g_rq_cache);
- if (!pblk->g_rq_pool)
+ if (!pblk->r_rq_pool)
goto free_rec_pool;
- pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns * 2,
+ pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk_g_rq_cache);
+ if (!pblk->e_rq_pool)
+ goto free_r_rq_pool;
+
+ pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
pblk_w_rq_cache);
if (!pblk->w_rq_pool)
- goto free_g_rq_pool;
-
- pblk->line_meta_pool =
- mempool_create_slab_pool(PBLK_META_POOL_SIZE,
- pblk_line_meta_cache);
- if (!pblk->line_meta_pool)
- goto free_w_rq_pool;
+ goto free_e_rq_pool;
pblk->close_wq = alloc_workqueue("pblk-close-wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
if (!pblk->close_wq)
- goto free_line_meta_pool;
+ goto free_w_rq_pool;
pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!pblk->bb_wq)
goto free_close_wq;
- if (pblk_set_ppaf(pblk))
+ pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!pblk->r_end_wq)
goto free_bb_wq;
+ if (pblk_set_ppaf(pblk))
+ goto free_r_end_wq;
+
if (pblk_rwb_init(pblk))
- goto free_bb_wq;
+ goto free_r_end_wq;
INIT_LIST_HEAD(&pblk->compl_list);
return 0;
+free_r_end_wq:
+ destroy_workqueue(pblk->r_end_wq);
free_bb_wq:
destroy_workqueue(pblk->bb_wq);
free_close_wq:
destroy_workqueue(pblk->close_wq);
-free_line_meta_pool:
- mempool_destroy(pblk->line_meta_pool);
free_w_rq_pool:
mempool_destroy(pblk->w_rq_pool);
-free_g_rq_pool:
- mempool_destroy(pblk->g_rq_pool);
+free_e_rq_pool:
+ mempool_destroy(pblk->e_rq_pool);
+free_r_rq_pool:
+ mempool_destroy(pblk->r_rq_pool);
free_rec_pool:
mempool_destroy(pblk->rec_pool);
-free_blk_ws_pool:
- mempool_destroy(pblk->line_ws_pool);
-free_page_pool:
- mempool_destroy(pblk->page_pool);
+free_gen_ws_pool:
+ mempool_destroy(pblk->gen_ws_pool);
+free_page_bio_pool:
+ mempool_destroy(pblk->page_bio_pool);
+free_global_caches:
+ pblk_free_global_caches(pblk);
return -ENOMEM;
}
@@ -319,21 +341,20 @@ static void pblk_core_free(struct pblk *pblk)
if (pblk->close_wq)
destroy_workqueue(pblk->close_wq);
+ if (pblk->r_end_wq)
+ destroy_workqueue(pblk->r_end_wq);
+
if (pblk->bb_wq)
destroy_workqueue(pblk->bb_wq);
- mempool_destroy(pblk->page_pool);
- mempool_destroy(pblk->line_ws_pool);
+ mempool_destroy(pblk->page_bio_pool);
+ mempool_destroy(pblk->gen_ws_pool);
mempool_destroy(pblk->rec_pool);
- mempool_destroy(pblk->g_rq_pool);
+ mempool_destroy(pblk->r_rq_pool);
+ mempool_destroy(pblk->e_rq_pool);
mempool_destroy(pblk->w_rq_pool);
- mempool_destroy(pblk->line_meta_pool);
- kmem_cache_destroy(pblk_blk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- kmem_cache_destroy(pblk_g_rq_cache);
- kmem_cache_destroy(pblk_w_rq_cache);
- kmem_cache_destroy(pblk_line_meta_cache);
+ pblk_free_global_caches(pblk);
}
static void pblk_luns_free(struct pblk *pblk)
@@ -372,13 +393,11 @@ static void pblk_line_meta_free(struct pblk *pblk)
kfree(l_mg->bb_aux);
kfree(l_mg->vsc_list);
- spin_lock(&l_mg->free_lock);
for (i = 0; i < PBLK_DATA_LINES; i++) {
kfree(l_mg->sline_meta[i]);
pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
kfree(l_mg->eline_meta[i]);
}
- spin_unlock(&l_mg->free_lock);
kfree(pblk->lines);
}
@@ -507,6 +526,13 @@ static int pblk_lines_configure(struct pblk *pblk, int flags)
}
}
+#ifdef CONFIG_NVM_DEBUG
+ pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
+#endif
+
+ /* Free full lines directly as GC has not been started yet */
+ pblk_gc_free_full_lines(pblk);
+
if (!line) {
/* Configure next line for user data */
line = pblk_line_get_first_data(pblk);
@@ -630,7 +656,10 @@ static int pblk_lines_alloc_metadata(struct pblk *pblk)
fail_free_emeta:
while (--i >= 0) {
- vfree(l_mg->eline_meta[i]->buf);
+ if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
+ vfree(l_mg->eline_meta[i]->buf);
+ else
+ kfree(l_mg->eline_meta[i]->buf);
kfree(l_mg->eline_meta[i]);
}
@@ -681,8 +710,8 @@ static int pblk_lines_init(struct pblk *pblk)
lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
- lm->high_thrs = lm->sec_per_line / 2;
- lm->mid_thrs = lm->sec_per_line / 4;
+ lm->mid_thrs = lm->sec_per_line / 2;
+ lm->high_thrs = lm->sec_per_line / 4;
lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
/* Calculate necessary pages for smeta. See comment over struct
@@ -713,9 +742,13 @@ add_emeta_page:
goto add_emeta_page;
}
- lm->emeta_bb = geo->nr_luns - i;
- lm->min_blk_line = 1 + DIV_ROUND_UP(lm->smeta_sec + lm->emeta_sec[0],
- geo->sec_per_blk);
+ lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
+
+ lm->min_blk_line = 1;
+ if (geo->nr_luns > 1)
+ lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
+ lm->emeta_sec[0], geo->sec_per_blk);
+
if (lm->min_blk_line > lm->blk_per_line) {
pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
lm->blk_per_line);
@@ -833,7 +866,7 @@ fail:
static int pblk_writer_init(struct pblk *pblk)
{
- setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
+ timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
@@ -890,6 +923,11 @@ static void pblk_exit(void *private)
down_write(&pblk_lock);
pblk_gc_exit(pblk);
pblk_tear_down(pblk);
+
+#ifdef CONFIG_NVM_DEBUG
+ pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
+#endif
+
pblk_free(pblk);
up_write(&pblk_lock);
}
@@ -911,7 +949,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
int ret;
if (dev->identity.dom & NVM_RSP_L2P) {
- pr_err("pblk: device-side L2P table not supported. (%x)\n",
+ pr_err("pblk: host-side L2P table not supported. (%x)\n",
dev->identity.dom);
return ERR_PTR(-EINVAL);
}
@@ -923,6 +961,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk->dev = dev;
pblk->disk = tdisk;
pblk->state = PBLK_STATE_RUNNING;
+ pblk->gc.gc_enabled = 0;
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);
@@ -944,6 +983,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
atomic_long_set(&pblk->recov_writes, 0);
atomic_long_set(&pblk->recov_writes, 0);
atomic_long_set(&pblk->recov_gc_writes, 0);
+ atomic_long_set(&pblk->recov_gc_reads, 0);
#endif
atomic_long_set(&pblk->read_failed, 0);
@@ -1012,6 +1052,10 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk->rwb.nr_entries);
wake_up_process(pblk->writer_ts);
+
+ /* Check if we need to start GC */
+ pblk_gc_should_kick(pblk);
+
return pblk;
fail_stop_writer:
@@ -1044,6 +1088,7 @@ static struct nvm_tgt_type tt_pblk = {
.sysfs_init = pblk_sysfs_init,
.sysfs_exit = pblk_sysfs_exit,
+ .owner = THIS_MODULE,
};
static int __init pblk_module_init(void)
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index fddb924f6dde..6f3ecde2140f 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -25,16 +25,28 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
unsigned int valid_secs)
{
struct pblk_line *line = pblk_line_get_data(pblk);
- struct pblk_emeta *emeta = line->emeta;
+ struct pblk_emeta *emeta;
struct pblk_w_ctx *w_ctx;
- __le64 *lba_list = emeta_to_lbas(pblk, emeta->buf);
+ __le64 *lba_list;
u64 paddr;
int nr_secs = pblk->min_write_pgs;
int i;
+ if (pblk_line_is_full(line)) {
+ struct pblk_line *prev_line = line;
+
+ line = pblk_line_replace_data(pblk);
+ pblk_line_close_meta(pblk, prev_line);
+ }
+
+ emeta = line->emeta;
+ lba_list = emeta_to_lbas(pblk, emeta->buf);
+
paddr = pblk_alloc_page(pblk, line, nr_secs);
for (i = 0; i < nr_secs; i++, paddr++) {
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
+
/* ppa to be sent to the device */
ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
@@ -51,22 +63,14 @@ static void pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
w_ctx->ppa = ppa_list[i];
meta_list[i].lba = cpu_to_le64(w_ctx->lba);
lba_list[paddr] = cpu_to_le64(w_ctx->lba);
- line->nr_valid_lbas++;
+ if (lba_list[paddr] != addr_empty)
+ line->nr_valid_lbas++;
} else {
- __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
lba_list[paddr] = meta_list[i].lba = addr_empty;
__pblk_map_invalidate(pblk, line, paddr);
}
}
- if (pblk_line_is_full(line)) {
- struct pblk_line *prev_line = line;
-
- pblk_line_replace_data(pblk);
- pblk_line_close_meta(pblk, prev_line);
- }
-
pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
}
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 9bc32578a766..b8f78e401482 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -201,8 +201,7 @@ unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
return subm;
}
-static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
- unsigned int to_update)
+static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_line *line;
@@ -213,7 +212,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
int flags;
for (i = 0; i < to_update; i++) {
- entry = &rb->entries[*l2p_upd];
+ entry = &rb->entries[rb->l2p_update];
w_ctx = &entry->w_ctx;
flags = READ_ONCE(entry->w_ctx.flags);
@@ -230,7 +229,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd,
line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
kref_put(&line->ref, pblk_line_put);
clean_wctx(w_ctx);
- *l2p_upd = (*l2p_upd + 1) & (rb->nr_entries - 1);
+ rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
}
pblk_rl_out(&pblk->rl, user_io, gc_io);
@@ -258,7 +257,7 @@ static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
count = nr_entries - space;
/* l2p_update used exclusively under rb->w_lock */
- ret = __pblk_rb_update_l2p(rb, &rb->l2p_update, count);
+ ret = __pblk_rb_update_l2p(rb, count);
out:
return ret;
@@ -280,7 +279,7 @@ void pblk_rb_sync_l2p(struct pblk_rb *rb)
sync = smp_load_acquire(&rb->sync);
to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
- __pblk_rb_update_l2p(rb, &rb->l2p_update, to_update);
+ __pblk_rb_update_l2p(rb, to_update);
spin_unlock(&rb->w_lock);
}
@@ -325,8 +324,8 @@ void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
}
void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
- struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
- unsigned int ring_pos)
+ struct pblk_w_ctx w_ctx, struct pblk_line *line,
+ u64 paddr, unsigned int ring_pos)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry;
@@ -341,7 +340,7 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
__pblk_rb_write_entry(rb, data, w_ctx, entry);
- if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, gc_line))
+ if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
entry->w_ctx.lba = ADDR_EMPTY;
flags = w_ctx.flags | PBLK_WRITTEN_DATA;
@@ -355,7 +354,6 @@ static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
{
struct pblk_rb_entry *entry;
unsigned int subm, sync_point;
- int flags;
subm = READ_ONCE(rb->subm);
@@ -369,12 +367,6 @@ static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
entry = &rb->entries[sync_point];
- flags = READ_ONCE(entry->w_ctx.flags);
- flags |= PBLK_FLUSH_ENTRY;
-
- /* Release flags on context. Protect from writes */
- smp_store_release(&entry->w_ctx.flags, flags);
-
/* Protect syncs */
smp_store_release(&rb->sync_point, sync_point);
@@ -454,6 +446,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
/* Protect from read count */
smp_store_release(&rb->mem, mem);
+
return 1;
}
@@ -558,12 +551,13 @@ out:
* persist data on the write buffer to the media.
*/
unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
- struct bio *bio, unsigned int pos,
- unsigned int nr_entries, unsigned int count)
+ unsigned int pos, unsigned int nr_entries,
+ unsigned int count)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct request_queue *q = pblk->dev->q;
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
+ struct bio *bio = rqd->bio;
struct pblk_rb_entry *entry;
struct page *page;
unsigned int pad = 0, to_read = nr_entries;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index d682e89e6493..ca79d8fb3e60 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -39,21 +39,15 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
}
static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned long *read_bitmap)
+ sector_t blba, unsigned long *read_bitmap)
{
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
struct bio *bio = rqd->bio;
struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
- sector_t blba = pblk_get_lba(bio);
int nr_secs = rqd->nr_ppas;
bool advanced_bio = false;
int i, j = 0;
- /* logic error: lba out-of-bounds. Ignore read request */
- if (blba + nr_secs >= pblk->rl.nr_secs) {
- WARN(1, "pblk: read lbas out of bounds\n");
- return;
- }
-
pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
for (i = 0; i < nr_secs; i++) {
@@ -63,6 +57,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
retry:
if (pblk_ppa_empty(p)) {
WARN_ON(test_and_set_bit(i, read_bitmap));
+ meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
if (unlikely(!advanced_bio)) {
bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
@@ -82,6 +77,7 @@ retry:
goto retry;
}
WARN_ON(test_and_set_bit(i, read_bitmap));
+ meta_list[i].lba = cpu_to_le64(lba);
advanced_bio = true;
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->cache_reads);
@@ -117,10 +113,51 @@ static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_OK;
}
-static void pblk_end_io_read(struct nvm_rq *rqd)
+static void pblk_read_check(struct pblk *pblk, struct nvm_rq *rqd,
+ sector_t blba)
+{
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
+ int nr_lbas = rqd->nr_ppas;
+ int i;
+
+ for (i = 0; i < nr_lbas; i++) {
+ u64 lba = le64_to_cpu(meta_list[i].lba);
+
+ if (lba == ADDR_EMPTY)
+ continue;
+
+ WARN(lba != blba + i, "pblk: corrupted read LBA\n");
+ }
+}
+
+static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct ppa_addr *ppa_list;
+ int i;
+
+ ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ struct ppa_addr ppa = ppa_list[i];
+ struct pblk_line *line;
+
+ line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ kref_put(&line->ref, pblk_line_put_wq);
+ }
+}
+
+static void pblk_end_user_read(struct bio *bio)
+{
+#ifdef CONFIG_NVM_DEBUG
+ WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
+#endif
+ bio_endio(bio);
+ bio_put(bio);
+}
+
+static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
+ bool put_line)
{
- struct pblk *pblk = rqd->private;
- struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
@@ -131,47 +168,51 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
#endif
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
+ pblk_read_check(pblk, rqd, r_ctx->lba);
bio_put(bio);
- if (r_ctx->private) {
- struct bio *orig_bio = r_ctx->private;
+ if (r_ctx->private)
+ pblk_end_user_read((struct bio *)r_ctx->private);
-#ifdef CONFIG_NVM_DEBUG
- WARN_ONCE(orig_bio->bi_status, "pblk: corrupted read bio\n");
-#endif
- bio_endio(orig_bio);
- bio_put(orig_bio);
- }
+ if (put_line)
+ pblk_read_put_rqd_kref(pblk, rqd);
#ifdef CONFIG_NVM_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads);
#endif
- pblk_free_rqd(pblk, rqd, READ);
+ pblk_free_rqd(pblk, rqd, PBLK_READ);
atomic_dec(&pblk->inflight_io);
}
+static void pblk_end_io_read(struct nvm_rq *rqd)
+{
+ struct pblk *pblk = rqd->private;
+
+ __pblk_end_io_read(pblk, rqd, true);
+}
+
static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
unsigned int bio_init_idx,
unsigned long *read_bitmap)
{
struct bio *new_bio, *bio = rqd->bio;
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
struct bio_vec src_bv, dst_bv;
void *ppa_ptr = NULL;
void *src_p, *dst_p;
dma_addr_t dma_ppa_list = 0;
+ __le64 *lba_list_mem, *lba_list_media;
int nr_secs = rqd->nr_ppas;
int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
int i, ret, hole;
- DECLARE_COMPLETION_ONSTACK(wait);
+
+ /* Re-use allocated memory for intermediate lbas */
+ lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
+ lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
new_bio = bio_alloc(GFP_KERNEL, nr_holes);
- if (!new_bio) {
- pr_err("pblk: could not alloc read bio\n");
- return NVM_IO_ERR;
- }
if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
goto err;
@@ -181,34 +222,29 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
goto err;
}
+ for (i = 0; i < nr_secs; i++)
+ lba_list_mem[i] = meta_list[i].lba;
+
new_bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
- new_bio->bi_private = &wait;
- new_bio->bi_end_io = pblk_end_bio_sync;
rqd->bio = new_bio;
rqd->nr_ppas = nr_holes;
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
- rqd->end_io = NULL;
- if (unlikely(nr_secs > 1 && nr_holes == 1)) {
+ if (unlikely(nr_holes == 1)) {
ppa_ptr = rqd->ppa_list;
dma_ppa_list = rqd->dma_ppa_list;
rqd->ppa_addr = rqd->ppa_list[0];
}
- ret = pblk_submit_read_io(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
bio_put(rqd->bio);
- pr_err("pblk: read IO submission failed\n");
+ pr_err("pblk: sync read IO submission failed\n");
goto err;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: partial read I/O timed out\n");
- }
-
if (rqd->error) {
atomic_long_inc(&pblk->read_failed);
#ifdef CONFIG_NVM_DEBUG
@@ -216,15 +252,31 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
#endif
}
- if (unlikely(nr_secs > 1 && nr_holes == 1)) {
+ if (unlikely(nr_holes == 1)) {
+ struct ppa_addr ppa;
+
+ ppa = rqd->ppa_addr;
rqd->ppa_list = ppa_ptr;
rqd->dma_ppa_list = dma_ppa_list;
+ rqd->ppa_list[0] = ppa;
+ }
+
+ for (i = 0; i < nr_secs; i++) {
+ lba_list_media[i] = meta_list[i].lba;
+ meta_list[i].lba = lba_list_mem[i];
}
/* Fill the holes in the original bio */
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
+ int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
+ struct pblk_line *line = &pblk->lines[line_id];
+
+ kref_put(&line->ref, pblk_line_put);
+
+ meta_list[hole].lba = lba_list_media[i];
+
src_bv = new_bio->bi_io_vec[i++];
dst_bv = bio->bi_io_vec[bio_init_idx + hole];
@@ -238,7 +290,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
kunmap_atomic(src_p);
kunmap_atomic(dst_p);
- mempool_free(src_bv.bv_page, pblk->page_pool);
+ mempool_free(src_bv.bv_page, pblk->page_bio_pool);
hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
} while (hole < nr_secs);
@@ -246,34 +298,26 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
bio_put(new_bio);
/* Complete the original bio and associated request */
+ bio_endio(bio);
rqd->bio = bio;
rqd->nr_ppas = nr_secs;
- rqd->private = pblk;
- bio_endio(bio);
- pblk_end_io_read(rqd);
+ __pblk_end_io_read(pblk, rqd, false);
return NVM_IO_OK;
err:
/* Free allocated pages in new bio */
pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
- rqd->private = pblk;
- pblk_end_io_read(rqd);
+ __pblk_end_io_read(pblk, rqd, false);
return NVM_IO_ERR;
}
static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned long *read_bitmap)
+ sector_t lba, unsigned long *read_bitmap)
{
+ struct pblk_sec_meta *meta_list = rqd->meta_list;
struct bio *bio = rqd->bio;
struct ppa_addr ppa;
- sector_t lba = pblk_get_lba(bio);
-
- /* logic error: lba out-of-bounds. Ignore read request */
- if (lba >= pblk->rl.nr_secs) {
- WARN(1, "pblk: read lba out of bounds\n");
- return;
- }
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
@@ -284,6 +328,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
retry:
if (pblk_ppa_empty(ppa)) {
WARN_ON(test_and_set_bit(0, read_bitmap));
+ meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
return;
}
@@ -295,9 +340,12 @@ retry:
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
goto retry;
}
+
WARN_ON(test_and_set_bit(0, read_bitmap));
+ meta_list[0].lba = cpu_to_le64(lba);
+
#ifdef CONFIG_NVM_DEBUG
- atomic_long_inc(&pblk->cache_reads);
+ atomic_long_inc(&pblk->cache_reads);
#endif
} else {
rqd->ppa_addr = ppa;
@@ -309,22 +357,24 @@ retry:
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
+ struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
- unsigned long read_bitmap; /* Max 64 ppas per request */
unsigned int bio_init_idx;
+ unsigned long read_bitmap; /* Max 64 ppas per request */
int ret = NVM_IO_ERR;
- if (nr_secs > PBLK_MAX_REQ_ADDRS)
+ /* logic error: lba out-of-bounds. Ignore read request */
+ if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
+ WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
+ (unsigned long long)blba, nr_secs);
return NVM_IO_ERR;
+ }
bitmap_zero(&read_bitmap, nr_secs);
- rqd = pblk_alloc_rqd(pblk, READ);
- if (IS_ERR(rqd)) {
- pr_err_ratelimited("pblk: not able to alloc rqd");
- return NVM_IO_ERR;
- }
+ rqd = pblk_alloc_rqd(pblk, PBLK_READ);
rqd->opcode = NVM_OP_PREAD;
rqd->bio = bio;
@@ -332,6 +382,9 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->private = pblk;
rqd->end_io = pblk_end_io_read;
+ r_ctx = nvm_rq_to_pdu(rqd);
+ r_ctx->lba = blba;
+
/* Save the index for this bio's start. This is needed in case
* we need to fill a partial read.
*/
@@ -348,23 +401,22 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
- pblk_read_ppalist_rq(pblk, rqd, &read_bitmap);
+ pblk_read_ppalist_rq(pblk, rqd, blba, &read_bitmap);
} else {
- pblk_read_rq(pblk, rqd, &read_bitmap);
+ pblk_read_rq(pblk, rqd, blba, &read_bitmap);
}
bio_get(bio);
if (bitmap_full(&read_bitmap, nr_secs)) {
bio_endio(bio);
atomic_inc(&pblk->inflight_io);
- pblk_end_io_read(rqd);
+ __pblk_end_io_read(pblk, rqd, false);
return NVM_IO_OK;
}
/* All sectors are to be read from the device */
if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) {
struct bio *int_bio = NULL;
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
/* Clone read bio to deal with read errors internally */
int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
@@ -399,40 +451,46 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_OK;
fail_rqd_free:
- pblk_free_rqd(pblk, rqd, READ);
+ pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_line *line, u64 *lba_list,
- unsigned int nr_secs)
+ u64 *paddr_list_gc, unsigned int nr_secs)
{
- struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
+ struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
+ struct ppa_addr ppa_gc;
int valid_secs = 0;
int i;
- pblk_lookup_l2p_rand(pblk, ppas, lba_list, nr_secs);
+ pblk_lookup_l2p_rand(pblk, ppa_list_l2p, lba_list, nr_secs);
for (i = 0; i < nr_secs; i++) {
- if (pblk_addr_in_cache(ppas[i]) || ppas[i].g.blk != line->id ||
- pblk_ppa_empty(ppas[i])) {
- lba_list[i] = ADDR_EMPTY;
+ if (lba_list[i] == ADDR_EMPTY)
+ continue;
+
+ ppa_gc = addr_to_gen_ppa(pblk, paddr_list_gc[i], line->id);
+ if (!pblk_ppa_comp(ppa_list_l2p[i], ppa_gc)) {
+ paddr_list_gc[i] = lba_list[i] = ADDR_EMPTY;
continue;
}
- rqd->ppa_list[valid_secs++] = ppas[i];
+ rqd->ppa_list[valid_secs++] = ppa_list_l2p[i];
}
#ifdef CONFIG_NVM_DEBUG
atomic_long_add(valid_secs, &pblk->inflight_reads);
#endif
+
return valid_secs;
}
static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
- struct pblk_line *line, sector_t lba)
+ struct pblk_line *line, sector_t lba,
+ u64 paddr_gc)
{
- struct ppa_addr ppa;
+ struct ppa_addr ppa_l2p, ppa_gc;
int valid_secs = 0;
if (lba == ADDR_EMPTY)
@@ -445,15 +503,14 @@ static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
}
spin_lock(&pblk->trans_lock);
- ppa = pblk_trans_map_get(pblk, lba);
+ ppa_l2p = pblk_trans_map_get(pblk, lba);
spin_unlock(&pblk->trans_lock);
- /* Ignore updated values until the moment */
- if (pblk_addr_in_cache(ppa) || ppa.g.blk != line->id ||
- pblk_ppa_empty(ppa))
+ ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, line->id);
+ if (!pblk_ppa_comp(ppa_l2p, ppa_gc))
goto out;
- rqd->ppa_addr = ppa;
+ rqd->ppa_addr = ppa_l2p;
valid_secs = 1;
#ifdef CONFIG_NVM_DEBUG
@@ -464,42 +521,44 @@ out:
return valid_secs;
}
-int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
- unsigned int nr_secs, unsigned int *secs_to_gc,
- struct pblk_line *line)
+int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct bio *bio;
struct nvm_rq rqd;
- int ret, data_len;
- DECLARE_COMPLETION_ONSTACK(wait);
+ int data_len;
+ int ret = NVM_IO_OK;
memset(&rqd, 0, sizeof(struct nvm_rq));
rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
&rqd.dma_meta_list);
if (!rqd.meta_list)
- return NVM_IO_ERR;
+ return -ENOMEM;
- if (nr_secs > 1) {
+ if (gc_rq->nr_secs > 1) {
rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
- *secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, line, lba_list,
- nr_secs);
- if (*secs_to_gc == 1)
+ gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
+ gc_rq->lba_list,
+ gc_rq->paddr_list,
+ gc_rq->nr_secs);
+ if (gc_rq->secs_to_gc == 1)
rqd.ppa_addr = rqd.ppa_list[0];
} else {
- *secs_to_gc = read_rq_gc(pblk, &rqd, line, lba_list[0]);
+ gc_rq->secs_to_gc = read_rq_gc(pblk, &rqd, gc_rq->line,
+ gc_rq->lba_list[0],
+ gc_rq->paddr_list[0]);
}
- if (!(*secs_to_gc))
+ if (!(gc_rq->secs_to_gc))
goto out;
- data_len = (*secs_to_gc) * geo->sec_size;
- bio = pblk_bio_map_addr(pblk, data, *secs_to_gc, data_len,
- PBLK_KMALLOC_META, GFP_KERNEL);
+ data_len = (gc_rq->secs_to_gc) * geo->sec_size;
+ bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len,
+ PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio));
goto err_free_dma;
@@ -509,23 +568,16 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
bio_set_op_attrs(bio, REQ_OP_READ, 0);
rqd.opcode = NVM_OP_PREAD;
- rqd.end_io = pblk_end_io_sync;
- rqd.private = &wait;
- rqd.nr_ppas = *secs_to_gc;
+ rqd.nr_ppas = gc_rq->secs_to_gc;
rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
rqd.bio = bio;
- ret = pblk_submit_read_io(pblk, &rqd);
- if (ret) {
- bio_endio(bio);
+ if (pblk_submit_io_sync(pblk, &rqd)) {
+ ret = -EIO;
pr_err("pblk: GC read request failed\n");
- goto err_free_dma;
+ goto err_free_bio;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: GC read I/O timed out\n");
- }
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
@@ -536,16 +588,18 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
}
#ifdef CONFIG_NVM_DEBUG
- atomic_long_add(*secs_to_gc, &pblk->sync_reads);
- atomic_long_add(*secs_to_gc, &pblk->recov_gc_reads);
- atomic_long_sub(*secs_to_gc, &pblk->inflight_reads);
+ atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads);
+ atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads);
+ atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads);
#endif
out:
nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
- return NVM_IO_OK;
+ return ret;
+err_free_bio:
+ bio_put(bio);
err_free_dma:
nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
- return NVM_IO_ERR;
+ return ret;
}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index cb556e06673e..eadb3eb5d4dc 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -34,10 +34,6 @@ void pblk_submit_rec(struct work_struct *work)
max_secs);
bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
- if (!bio) {
- pr_err("pblk: not able to create recovery bio\n");
- return;
- }
bio->bi_iter.bi_sector = 0;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -71,7 +67,7 @@ void pblk_submit_rec(struct work_struct *work)
err:
bio_put(bio);
- pblk_free_rqd(pblk, rqd, WRITE);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE);
}
int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
@@ -84,12 +80,7 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
struct pblk_c_ctx *rec_ctx;
int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
- rec_rqd = pblk_alloc_rqd(pblk, WRITE);
- if (IS_ERR(rec_rqd)) {
- pr_err("pblk: could not create recovery req.\n");
- return -ENOMEM;
- }
-
+ rec_rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
rec_ctx = nvm_rq_to_pdu(rec_rqd);
/* Copy completion bitmap, but exclude the first X completed entries */
@@ -142,19 +133,19 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
struct pblk_emeta *emeta = line->emeta;
struct line_emeta *emeta_buf = emeta->buf;
__le64 *lba_list;
- int data_start;
- int nr_data_lbas, nr_valid_lbas, nr_lbas = 0;
- int i;
+ u64 data_start, data_end;
+ u64 nr_valid_lbas, nr_lbas = 0;
+ u64 i;
lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
if (!lba_list)
return 1;
data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
- nr_data_lbas = lm->sec_per_line - lm->emeta_sec[0];
+ data_end = line->emeta_ssec;
nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
- for (i = data_start; i < nr_data_lbas && nr_lbas < nr_valid_lbas; i++) {
+ for (i = data_start; i < data_end; i++) {
struct ppa_addr ppa;
int pos;
@@ -181,8 +172,8 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
}
if (nr_valid_lbas != nr_lbas)
- pr_err("pblk: line %d - inconsistent lba list(%llu/%d)\n",
- line->id, emeta_buf->nr_valid_lbas, nr_lbas);
+ pr_err("pblk: line %d - inconsistent lba list(%llu/%llu)\n",
+ line->id, nr_valid_lbas, nr_lbas);
line->left_msecs = 0;
@@ -225,7 +216,6 @@ static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
int rq_ppas, rq_len;
int i, j;
int ret = 0;
- DECLARE_COMPLETION_ONSTACK(wait);
ppa_list = p.ppa_list;
meta_list = p.meta_list;
@@ -262,8 +252,6 @@ next_read_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
- rqd->end_io = pblk_end_io_sync;
- rqd->private = &wait;
if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -289,19 +277,13 @@ next_read_rq:
}
/* If read fails, more padding is needed */
- ret = pblk_submit_io(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
pr_err("pblk: I/O submission failed: %d\n", ret);
return ret;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: L2P recovery read timed out\n");
- return -EINTR;
- }
atomic_dec(&pblk->inflight_io);
- reinit_completion(&wait);
/* At this point, the read should not fail. If it does, it is a problem
* we cannot recover from here. Need FTL log.
@@ -338,13 +320,10 @@ static void pblk_end_io_recov(struct nvm_rq *rqd)
{
struct pblk_pad_rq *pad_rq = rqd->private;
struct pblk *pblk = pad_rq->pblk;
- struct nvm_tgt_dev *dev = pblk->dev;
pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
- bio_put(rqd->bio);
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
- pblk_free_rqd(pblk, rqd, WRITE);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
atomic_dec(&pblk->inflight_io);
kref_put(&pad_rq->ref, pblk_recov_complete);
@@ -404,25 +383,21 @@ next_pad_rq:
ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
- rqd = pblk_alloc_rqd(pblk, WRITE);
- if (IS_ERR(rqd)) {
- ret = PTR_ERR(rqd);
- goto fail_free_meta;
- }
-
bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto fail_free_rqd;
+ goto fail_free_meta;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
+
rqd->bio = bio;
rqd->opcode = NVM_OP_PWRITE;
- rqd->flags = pblk_set_progr_mode(pblk, WRITE);
+ rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
rqd->meta_list = meta_list;
rqd->nr_ppas = rq_ppas;
rqd->ppa_list = ppa_list;
@@ -490,8 +465,6 @@ free_rq:
fail_free_bio:
bio_put(bio);
-fail_free_rqd:
- pblk_free_rqd(pblk, rqd, WRITE);
fail_free_meta:
nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
fail_free_pad:
@@ -522,7 +495,6 @@ static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
int ret = 0;
int rec_round;
int left_ppas = pblk_calc_sec_in_line(pblk, line) - line->cur_sec;
- DECLARE_COMPLETION_ONSTACK(wait);
ppa_list = p.ppa_list;
meta_list = p.meta_list;
@@ -557,8 +529,6 @@ next_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
- rqd->end_io = pblk_end_io_sync;
- rqd->private = &wait;
if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -584,18 +554,13 @@ next_rq:
addr_to_gen_ppa(pblk, w_ptr, line->id);
}
- ret = pblk_submit_io(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
pr_err("pblk: I/O submission failed: %d\n", ret);
return ret;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: L2P recovery read timed out\n");
- }
atomic_dec(&pblk->inflight_io);
- reinit_completion(&wait);
/* This should not happen since the read failed during normal recovery,
* but the media works funny sometimes...
@@ -663,7 +628,6 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
int i, j;
int ret = 0;
int left_ppas = pblk_calc_sec_in_line(pblk, line);
- DECLARE_COMPLETION_ONSTACK(wait);
ppa_list = p.ppa_list;
meta_list = p.meta_list;
@@ -696,8 +660,6 @@ next_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
- rqd->end_io = pblk_end_io_sync;
- rqd->private = &wait;
if (pblk_io_aligned(pblk, rq_ppas))
rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -723,19 +685,14 @@ next_rq:
addr_to_gen_ppa(pblk, paddr, line->id);
}
- ret = pblk_submit_io(pblk, rqd);
+ ret = pblk_submit_io_sync(pblk, rqd);
if (ret) {
pr_err("pblk: I/O submission failed: %d\n", ret);
bio_put(bio);
return ret;
}
- if (!wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pr_err("pblk: L2P recovery read timed out\n");
- }
atomic_dec(&pblk->inflight_io);
- reinit_completion(&wait);
/* Reached the end of the written line */
if (rqd->error) {
@@ -785,15 +742,9 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
dma_addr_t dma_ppa_list, dma_meta_list;
int done, ret = 0;
- rqd = pblk_alloc_rqd(pblk, READ);
- if (IS_ERR(rqd))
- return PTR_ERR(rqd);
-
meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
- if (!meta_list) {
- ret = -ENOMEM;
- goto free_rqd;
- }
+ if (!meta_list)
+ return -ENOMEM;
ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
@@ -804,6 +755,8 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
goto free_meta_list;
}
+ rqd = pblk_alloc_rqd(pblk, PBLK_READ);
+
p.ppa_list = ppa_list;
p.meta_list = meta_list;
p.rqd = rqd;
@@ -832,8 +785,6 @@ out:
kfree(data);
free_meta_list:
nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
-free_rqd:
- pblk_free_rqd(pblk, rqd, READ);
return ret;
}
@@ -851,11 +802,33 @@ static void pblk_recov_line_add_ordered(struct list_head *head,
__list_add(&line->list, t->list.prev, &t->list);
}
-struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
+static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_line_meta *lm = &pblk->lm;
+ unsigned int emeta_secs;
+ u64 emeta_start;
+ struct ppa_addr ppa;
+ int pos;
+
+ emeta_secs = lm->emeta_sec[0];
+ emeta_start = lm->sec_per_line;
+
+ while (emeta_secs) {
+ emeta_start--;
+ ppa = addr_to_pblk_ppa(pblk, emeta_start, line->id);
+ pos = pblk_ppa_to_pos(geo, ppa);
+ if (!test_bit(pos, line->blk_bitmap))
+ emeta_secs--;
+ }
+
+ return emeta_start;
+}
+
+struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
+{
+ struct pblk_line_meta *lm = &pblk->lm;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *line, *tline, *data_line = NULL;
struct pblk_smeta *smeta;
@@ -900,9 +873,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
continue;
- if (le16_to_cpu(smeta_buf->header.version) != 1) {
+ if (smeta_buf->header.version != SMETA_VERSION) {
pr_err("pblk: found incompatible line version %u\n",
- smeta_buf->header.version);
+ le16_to_cpu(smeta_buf->header.version));
return ERR_PTR(-EINVAL);
}
@@ -954,15 +927,9 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
/* Verify closed blocks and recover this portion of L2P table*/
list_for_each_entry_safe(line, tline, &recov_list, list) {
- int off, nr_bb;
-
recovered_lines++;
- /* Calculate where emeta starts based on the line bb */
- off = lm->sec_per_line - lm->emeta_sec[0];
- nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
- off -= nr_bb * geo->sec_per_pl;
- line->emeta_ssec = off;
+ line->emeta_ssec = pblk_line_emeta_start(pblk, line);
line->emeta = emeta;
memset(line->emeta->buf, 0, lm->emeta_len[0]);
@@ -987,7 +954,7 @@ next:
list_move_tail(&line->list, move_list);
spin_unlock(&l_mg->gc_lock);
- mempool_free(line->map_bitmap, pblk->line_meta_pool);
+ kfree(line->map_bitmap);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index 2e6a5361baf0..dacc71922260 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -96,9 +96,11 @@ unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
*
* Only the total number of free blocks is used to configure the rate limiter.
*/
-static int pblk_rl_update_rates(struct pblk_rl *rl, unsigned long max)
+void pblk_rl_update_rates(struct pblk_rl *rl)
{
+ struct pblk *pblk = container_of(rl, struct pblk, rl);
unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
+ int max = rl->rb_budget;
if (free_blocks >= rl->high) {
rl->rb_user_max = max;
@@ -124,23 +126,18 @@ static int pblk_rl_update_rates(struct pblk_rl *rl, unsigned long max)
rl->rb_state = PBLK_RL_LOW;
}
- return rl->rb_state;
+ if (rl->rb_state == (PBLK_RL_MID | PBLK_RL_LOW))
+ pblk_gc_should_start(pblk);
+ else
+ pblk_gc_should_stop(pblk);
}
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
{
- struct pblk *pblk = container_of(rl, struct pblk, rl);
int blk_in_line = atomic_read(&line->blk_in_line);
- int ret;
atomic_add(blk_in_line, &rl->free_blocks);
- /* Rates will not change that often - no need to lock update */
- ret = pblk_rl_update_rates(rl, rl->rb_budget);
-
- if (ret == (PBLK_RL_MID | PBLK_RL_LOW))
- pblk_gc_should_start(pblk);
- else
- pblk_gc_should_stop(pblk);
+ pblk_rl_update_rates(rl);
}
void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
@@ -148,19 +145,7 @@ void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
int blk_in_line = atomic_read(&line->blk_in_line);
atomic_sub(blk_in_line, &rl->free_blocks);
-}
-
-void pblk_gc_should_kick(struct pblk *pblk)
-{
- struct pblk_rl *rl = &pblk->rl;
- int ret;
-
- /* Rates will not change that often - no need to lock update */
- ret = pblk_rl_update_rates(rl, rl->rb_budget);
- if (ret == (PBLK_RL_MID | PBLK_RL_LOW))
- pblk_gc_should_start(pblk);
- else
- pblk_gc_should_stop(pblk);
+ pblk_rl_update_rates(rl);
}
int pblk_rl_high_thrs(struct pblk_rl *rl)
@@ -168,19 +153,14 @@ int pblk_rl_high_thrs(struct pblk_rl *rl)
return rl->high;
}
-int pblk_rl_low_thrs(struct pblk_rl *rl)
-{
- return rl->low;
-}
-
-int pblk_rl_sysfs_rate_show(struct pblk_rl *rl)
+int pblk_rl_max_io(struct pblk_rl *rl)
{
- return rl->rb_user_max;
+ return rl->rb_max_io;
}
-static void pblk_rl_u_timer(unsigned long data)
+static void pblk_rl_u_timer(struct timer_list *t)
{
- struct pblk_rl *rl = (struct pblk_rl *)data;
+ struct pblk_rl *rl = from_timer(rl, t, u_timer);
/* Release user I/O state. Protect from GC */
smp_store_release(&rl->rb_user_active, 0);
@@ -214,6 +194,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
/* To start with, all buffer is available to user I/O writers */
rl->rb_budget = budget;
rl->rb_user_max = budget;
+ rl->rb_max_io = budget >> 1;
rl->rb_gc_max = 0;
rl->rb_state = PBLK_RL_HIGH;
@@ -221,7 +202,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
atomic_set(&rl->rb_gc_cnt, 0);
atomic_set(&rl->rb_space, -1);
- setup_timer(&rl->u_timer, pblk_rl_u_timer, (unsigned long)rl);
+ timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
rl->rb_user_active = 0;
rl->rb_gc_active = 0;
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index 95fb434e2f01..cd49e8875d4e 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -253,7 +253,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sz += snprintf(page + sz, PAGE_SIZE - sz,
"GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, queue:%d\n",
gc_full, gc_high, gc_mid, gc_low, gc_empty,
- atomic_read(&pblk->gc.inflight_gc));
+ atomic_read(&pblk->gc.read_inflight_gc));
sz += snprintf(page + sz, PAGE_SIZE - sz,
"data (%d) cur:%d, left:%d, vsc:%d, s:%d, map:%d/%d (%d)\n",
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 3ad9e56d2473..6c1cafafef53 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -20,7 +20,6 @@
static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
- struct nvm_tgt_dev *dev = pblk->dev;
struct bio *original_bio;
unsigned long ret;
int i;
@@ -33,16 +32,18 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
bio_endio(original_bio);
}
+ if (c_ctx->nr_padded)
+ pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
+ c_ctx->nr_padded);
+
#ifdef CONFIG_NVM_DEBUG
- atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
+ atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
#endif
ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
-
bio_put(rqd->bio);
- pblk_free_rqd(pblk, rqd, WRITE);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE);
return ret;
}
@@ -107,10 +108,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
ppa_list = &rqd->ppa_addr;
recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
- if (!recovery) {
- pr_err("pblk: could not allocate recovery context\n");
- return;
- }
+
INIT_LIST_HEAD(&recovery->failed);
bit = -1;
@@ -175,7 +173,6 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
static void pblk_end_io_write_meta(struct nvm_rq *rqd)
{
struct pblk *pblk = rqd->private;
- struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
struct pblk_line *line = m_ctx->private;
struct pblk_emeta *emeta = line->emeta;
@@ -187,19 +184,13 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
pblk_log_write_err(pblk, rqd);
pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
}
-#ifdef CONFIG_NVM_DEBUG
- else
- WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
-#endif
sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
if (sync == emeta->nr_entries)
- pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws,
- pblk->close_wq);
+ pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
+ GFP_ATOMIC, pblk->close_wq);
- bio_put(rqd->bio);
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
- pblk_free_rqd(pblk, rqd, READ);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
atomic_dec(&pblk->inflight_io);
}
@@ -213,7 +204,7 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
/* Setup write request */
rqd->opcode = NVM_OP_PWRITE;
rqd->nr_ppas = nr_secs;
- rqd->flags = pblk_set_progr_mode(pblk, WRITE);
+ rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
rqd->private = pblk;
rqd->end_io = end_io;
@@ -229,15 +220,16 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
}
static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
- struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
+ struct ppa_addr *erase_ppa)
{
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_line *e_line = pblk_line_get_erase(pblk);
+ struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
unsigned int valid = c_ctx->nr_valid;
unsigned int padded = c_ctx->nr_padded;
unsigned int nr_secs = valid + padded;
unsigned long *lun_bitmap;
- int ret = 0;
+ int ret;
lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
if (!lun_bitmap)
@@ -279,7 +271,7 @@ int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
rqd->ppa_status = (u64)0;
- rqd->flags = pblk_set_progr_mode(pblk, WRITE);
+ rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
return ret;
}
@@ -303,55 +295,6 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
return secs_to_sync;
}
-static inline int pblk_valid_meta_ppa(struct pblk *pblk,
- struct pblk_line *meta_line,
- struct ppa_addr *ppa_list, int nr_ppas)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct pblk_line *data_line;
- struct ppa_addr ppa, ppa_opt;
- u64 paddr;
- int i;
-
- data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
- paddr = pblk_lookup_page(pblk, meta_line);
- ppa = addr_to_gen_ppa(pblk, paddr, 0);
-
- if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
- return 1;
-
- /* Schedule a metadata I/O that is half the distance from the data I/O
- * with regards to the number of LUNs forming the pblk instance. This
- * balances LUN conflicts across every I/O.
- *
- * When the LUN configuration changes (e.g., due to GC), this distance
- * can align, which would result on a LUN deadlock. In this case, modify
- * the distance to not be optimal, but allow metadata I/Os to succeed.
- */
- ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
- if (unlikely(ppa_opt.ppa == ppa.ppa)) {
- data_line->meta_distance--;
- return 0;
- }
-
- for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
- if (ppa_list[i].g.ch == ppa_opt.g.ch &&
- ppa_list[i].g.lun == ppa_opt.g.lun)
- return 1;
-
- if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
- for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
- if (ppa_list[i].g.ch == ppa.g.ch &&
- ppa_list[i].g.lun == ppa.g.lun)
- return 0;
-
- return 1;
- }
-
- return 0;
-}
-
int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
{
struct nvm_tgt_dev *dev = pblk->dev;
@@ -370,11 +313,8 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
int i, j;
int ret;
- rqd = pblk_alloc_rqd(pblk, READ);
- if (IS_ERR(rqd)) {
- pr_err("pblk: cannot allocate write req.\n");
- return PTR_ERR(rqd);
- }
+ rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
+
m_ctx = nvm_rq_to_pdu(rqd);
m_ctx->private = meta_line;
@@ -407,8 +347,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
if (emeta->mem >= lm->emeta_len[0]) {
spin_lock(&l_mg->close_lock);
list_del(&meta_line->list);
- WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
- "pblk: corrupt meta line %d\n", meta_line->id);
spin_unlock(&l_mg->close_lock);
}
@@ -428,18 +366,51 @@ fail_rollback:
pblk_dealloc_page(pblk, meta_line, rq_ppas);
list_add(&meta_line->list, &meta_line->list);
spin_unlock(&l_mg->close_lock);
-
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
fail_free_bio:
- if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
- bio_put(bio);
+ bio_put(bio);
fail_free_rqd:
- pblk_free_rqd(pblk, rqd, READ);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
return ret;
}
-static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
- int prev_n)
+static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
+ struct pblk_line *meta_line,
+ struct nvm_rq *data_rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
+ struct pblk_line *data_line = pblk_line_get_data(pblk);
+ struct ppa_addr ppa, ppa_opt;
+ u64 paddr;
+ int pos_opt;
+
+ /* Schedule a metadata I/O that is half the distance from the data I/O
+ * with regards to the number of LUNs forming the pblk instance. This
+ * balances LUN conflicts across every I/O.
+ *
+ * When the LUN configuration changes (e.g., due to GC), this distance
+ * can align, which would result on metadata and data I/Os colliding. In
+ * this case, modify the distance to not be optimal, but move the
+ * optimal in the right direction.
+ */
+ paddr = pblk_lookup_page(pblk, meta_line);
+ ppa = addr_to_gen_ppa(pblk, paddr, 0);
+ ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
+ pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
+
+ if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
+ test_bit(pos_opt, data_line->blk_bitmap))
+ return true;
+
+ if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
+ data_line->meta_distance--;
+
+ return false;
+}
+
+static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
+ struct nvm_rq *data_rqd)
{
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -449,57 +420,45 @@ static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
retry:
if (list_empty(&l_mg->emeta_list)) {
spin_unlock(&l_mg->close_lock);
- return 0;
+ return NULL;
}
meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
- if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
+ if (meta_line->emeta->mem >= lm->emeta_len[0])
goto retry;
spin_unlock(&l_mg->close_lock);
- if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
- return 0;
+ if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
+ return NULL;
- return pblk_submit_meta_io(pblk, meta_line);
+ return meta_line;
}
static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
struct ppa_addr erase_ppa;
+ struct pblk_line *meta_line;
int err;
ppa_set_empty(&erase_ppa);
/* Assign lbas to ppas and populate request structure */
- err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
+ err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
if (err) {
pr_err("pblk: could not setup write request: %d\n", err);
return NVM_IO_ERR;
}
- if (likely(ppa_empty(erase_ppa))) {
- /* Submit metadata write for previous data line */
- err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
- if (err) {
- pr_err("pblk: metadata I/O submission failed: %d", err);
- return NVM_IO_ERR;
- }
+ meta_line = pblk_should_submit_meta_io(pblk, rqd);
- /* Submit data write for current data line */
- err = pblk_submit_io(pblk, rqd);
- if (err) {
- pr_err("pblk: data I/O submission failed: %d\n", err);
- return NVM_IO_ERR;
- }
- } else {
- /* Submit data write for current data line */
- err = pblk_submit_io(pblk, rqd);
- if (err) {
- pr_err("pblk: data I/O submission failed: %d\n", err);
- return NVM_IO_ERR;
- }
+ /* Submit data write for current data line */
+ err = pblk_submit_io(pblk, rqd);
+ if (err) {
+ pr_err("pblk: data I/O submission failed: %d\n", err);
+ return NVM_IO_ERR;
+ }
- /* Submit available erase for next data line */
+ if (!ppa_empty(erase_ppa)) {
+ /* Submit erase for next data line */
if (pblk_blk_erase_async(pblk, erase_ppa)) {
struct pblk_line *e_line = pblk_line_get_erase(pblk);
struct nvm_tgt_dev *dev = pblk->dev;
@@ -512,6 +471,15 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
}
}
+ if (meta_line) {
+ /* Submit metadata write for previous data line */
+ err = pblk_submit_meta_io(pblk, meta_line);
+ if (err) {
+ pr_err("pblk: metadata I/O submission failed: %d", err);
+ return NVM_IO_ERR;
+ }
+ }
+
return NVM_IO_OK;
}
@@ -521,7 +489,8 @@ static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
struct bio *bio = rqd->bio;
if (c_ctx->nr_padded)
- pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
+ pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
+ c_ctx->nr_padded);
}
static int pblk_submit_write(struct pblk *pblk)
@@ -543,31 +512,24 @@ static int pblk_submit_write(struct pblk *pblk)
if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
return 1;
- rqd = pblk_alloc_rqd(pblk, WRITE);
- if (IS_ERR(rqd)) {
- pr_err("pblk: cannot allocate write req.\n");
- return 1;
- }
-
- bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
- if (!bio) {
- pr_err("pblk: cannot allocate write bio\n");
- goto fail_free_rqd;
- }
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- rqd->bio = bio;
-
secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
if (secs_to_sync > pblk->max_write_pgs) {
pr_err("pblk: bad buffer sync calculation\n");
- goto fail_put_bio;
+ return 1;
}
secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
- if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
+ bio = bio_alloc(GFP_KERNEL, secs_to_sync);
+
+ bio->bi_iter.bi_sector = 0; /* internal bio */
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+ rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
+ rqd->bio = bio;
+
+ if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
secs_avail)) {
pr_err("pblk: corrupted write bio\n");
goto fail_put_bio;
@@ -586,8 +548,7 @@ fail_free_bio:
pblk_free_write_rqd(pblk, rqd);
fail_put_bio:
bio_put(bio);
-fail_free_rqd:
- pblk_free_rqd(pblk, rqd, WRITE);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE);
return 1;
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 67e623bd5c2d..59a64d461a5d 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -40,10 +40,6 @@
#define PBLK_MAX_REQ_ADDRS (64)
#define PBLK_MAX_REQ_ADDRS_PW (6)
-#define PBLK_WS_POOL_SIZE (128)
-#define PBLK_META_POOL_SIZE (128)
-#define PBLK_READ_REQ_POOL_SIZE (1024)
-
#define PBLK_NR_CLOSE_JOBS (4)
#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
@@ -59,7 +55,15 @@
for ((i) = 0, rlun = &(pblk)->luns[0]; \
(i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
-#define ERASE 2 /* READ = 0, WRITE = 1 */
+/* Static pool sizes */
+#define PBLK_GEN_WS_POOL_SIZE (2)
+
+enum {
+ PBLK_READ = READ,
+ PBLK_WRITE = WRITE,/* Write from write buffer */
+ PBLK_WRITE_INT, /* Internal write - no write buffer */
+ PBLK_ERASE,
+};
enum {
/* IO Types */
@@ -95,6 +99,7 @@ enum {
};
#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
+#define pblk_dma_ppa_size (sizeof(u64) * PBLK_MAX_REQ_ADDRS)
/* write buffer completion context */
struct pblk_c_ctx {
@@ -106,9 +111,10 @@ struct pblk_c_ctx {
unsigned int nr_padded;
};
-/* generic context */
+/* read context */
struct pblk_g_ctx {
void *private;
+ u64 lba;
};
/* Pad context */
@@ -207,6 +213,7 @@ struct pblk_lun {
struct pblk_gc_rq {
struct pblk_line *line;
void *data;
+ u64 paddr_list[PBLK_MAX_REQ_ADDRS];
u64 lba_list[PBLK_MAX_REQ_ADDRS];
int nr_secs;
int secs_to_gc;
@@ -231,7 +238,10 @@ struct pblk_gc {
struct timer_list gc_timer;
struct semaphore gc_sem;
- atomic_t inflight_gc;
+ atomic_t read_inflight_gc; /* Number of lines with inflight GC reads */
+ atomic_t pipeline_gc; /* Number of lines in the GC pipeline -
+ * started reads to finished writes
+ */
int w_entries;
struct list_head w_list;
@@ -267,6 +277,7 @@ struct pblk_rl {
int rb_gc_max; /* Max buffer entries available for GC I/O */
int rb_gc_rsv; /* Reserved buffer entries for GC I/O */
int rb_state; /* Rate-limiter current state */
+ int rb_max_io; /* Maximum size for an I/O giving the config */
atomic_t rb_user_cnt; /* User I/O buffer counter */
atomic_t rb_gc_cnt; /* GC I/O buffer counter */
@@ -310,6 +321,7 @@ enum {
};
#define PBLK_MAGIC 0x70626c6b /*pblk*/
+#define SMETA_VERSION cpu_to_le16(1)
struct line_header {
__le32 crc;
@@ -618,15 +630,16 @@ struct pblk {
struct list_head compl_list;
- mempool_t *page_pool;
- mempool_t *line_ws_pool;
+ mempool_t *page_bio_pool;
+ mempool_t *gen_ws_pool;
mempool_t *rec_pool;
- mempool_t *g_rq_pool;
+ mempool_t *r_rq_pool;
mempool_t *w_rq_pool;
- mempool_t *line_meta_pool;
+ mempool_t *e_rq_pool;
struct workqueue_struct *close_wq;
struct workqueue_struct *bb_wq;
+ struct workqueue_struct *r_end_wq;
struct timer_list wtimer;
@@ -657,15 +670,15 @@ int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
struct pblk_w_ctx w_ctx, unsigned int pos);
void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
- struct pblk_w_ctx w_ctx, struct pblk_line *gc_line,
- unsigned int pos);
+ struct pblk_w_ctx w_ctx, struct pblk_line *line,
+ u64 paddr, unsigned int pos);
struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
void pblk_rb_flush(struct pblk_rb *rb);
void pblk_rb_sync_l2p(struct pblk_rb *rb);
unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
- struct bio *bio, unsigned int pos,
- unsigned int nr_entries, unsigned int count);
+ unsigned int pos, unsigned int nr_entries,
+ unsigned int count);
unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
struct list_head *list,
unsigned int max);
@@ -692,24 +705,23 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
/*
* pblk core
*/
-struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw);
+struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
+void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx);
-void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw);
-void pblk_wait_for_meta(struct pblk *pblk);
-struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba);
void pblk_discard(struct pblk *pblk, struct bio *bio);
void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
+int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
unsigned int nr_secs, unsigned int len,
int alloc_type, gfp_t gfp_mask);
struct pblk_line *pblk_line_get(struct pblk *pblk);
struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
-void pblk_line_replace_data(struct pblk *pblk);
+struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
struct pblk_line *pblk_line_get_data(struct pblk *pblk);
@@ -719,19 +731,18 @@ int pblk_line_is_full(struct pblk_line *line);
void pblk_line_free(struct pblk *pblk, struct pblk_line *line);
void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line);
void pblk_line_close(struct pblk *pblk, struct pblk_line *line);
-void pblk_line_close_meta_sync(struct pblk *pblk);
void pblk_line_close_ws(struct work_struct *work);
void pblk_pipeline_stop(struct pblk *pblk);
-void pblk_line_mark_bb(struct work_struct *work);
-void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
- void (*work)(struct work_struct *),
- struct workqueue_struct *wq);
+void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
+ void (*work)(struct work_struct *), gfp_t gfp_mask,
+ struct workqueue_struct *wq);
u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
void *emeta_buf);
int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
void pblk_line_put(struct kref *ref);
+void pblk_line_put_wq(struct kref *ref);
struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line);
u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line);
void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
@@ -745,7 +756,6 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
unsigned long *lun_bitmap);
-void pblk_end_bio_sync(struct bio *bio);
void pblk_end_io_sync(struct nvm_rq *rqd);
int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
int nr_pages);
@@ -760,7 +770,7 @@ void pblk_update_map_cache(struct pblk *pblk, sector_t lba,
void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
struct ppa_addr ppa, struct ppa_addr entry_line);
int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
- struct pblk_line *gc_line);
+ struct pblk_line *gc_line, u64 paddr);
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
u64 *lba_list, int nr_secs);
void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
@@ -771,9 +781,7 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
*/
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
unsigned long flags);
-int pblk_write_gc_to_cache(struct pblk *pblk, void *data, u64 *lba_list,
- unsigned int nr_entries, unsigned int nr_rec_entries,
- struct pblk_line *gc_line, unsigned long flags);
+int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk map
@@ -789,7 +797,7 @@ void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
* pblk write thread
*/
int pblk_write_ts(void *data);
-void pblk_write_timer_fn(unsigned long data);
+void pblk_write_timer_fn(struct timer_list *t);
void pblk_write_should_kick(struct pblk *pblk);
/*
@@ -797,9 +805,7 @@ void pblk_write_should_kick(struct pblk *pblk);
*/
extern struct bio_set *pblk_bio_set;
int pblk_submit_read(struct pblk *pblk, struct bio *bio);
-int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
- unsigned int nr_secs, unsigned int *secs_to_gc,
- struct pblk_line *line);
+int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk recovery
*/
@@ -815,7 +821,7 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
* pblk gc
*/
#define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
-#define PBLK_GC_W_QD 128 /* Queue depth for inflight GC write I/Os */
+#define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */
#define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
#define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
@@ -824,7 +830,7 @@ void pblk_gc_exit(struct pblk *pblk);
void pblk_gc_should_start(struct pblk *pblk);
void pblk_gc_should_stop(struct pblk *pblk);
void pblk_gc_should_kick(struct pblk *pblk);
-void pblk_gc_kick(struct pblk *pblk);
+void pblk_gc_free_full_lines(struct pblk *pblk);
void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
int *gc_active);
int pblk_gc_sysfs_force(struct pblk *pblk, int force);
@@ -834,8 +840,8 @@ int pblk_gc_sysfs_force(struct pblk *pblk, int force);
*/
void pblk_rl_init(struct pblk_rl *rl, int budget);
void pblk_rl_free(struct pblk_rl *rl);
+void pblk_rl_update_rates(struct pblk_rl *rl);
int pblk_rl_high_thrs(struct pblk_rl *rl);
-int pblk_rl_low_thrs(struct pblk_rl *rl);
unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
@@ -843,10 +849,9 @@ void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
-int pblk_rl_sysfs_rate_show(struct pblk_rl *rl);
+int pblk_rl_max_io(struct pblk_rl *rl);
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
-void pblk_rl_set_space_limit(struct pblk_rl *rl, int entries_left);
int pblk_rl_is_limit(struct pblk_rl *rl);
/*
@@ -892,13 +897,7 @@ static inline void *emeta_to_vsc(struct pblk *pblk, struct line_emeta *emeta)
static inline int pblk_line_vsc(struct pblk_line *line)
{
- int vsc;
-
- spin_lock(&line->lock);
- vsc = le32_to_cpu(*line->vsc);
- spin_unlock(&line->lock);
-
- return vsc;
+ return le32_to_cpu(*line->vsc);
}
#define NVM_MEM_PAGE_WRITE (8)
@@ -1140,7 +1139,7 @@ static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
flags = geo->plane_mode >> 1;
- if (type == WRITE)
+ if (type == PBLK_WRITE)
flags |= NVM_IO_SCRAMBLE_ENABLE;
return flags;
@@ -1200,7 +1199,6 @@ static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd,
pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status);
}
-#endif
static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
struct ppa_addr *ppas, int nr_ppas)
@@ -1221,14 +1219,50 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
ppa->g.sec < geo->sec_per_pg)
continue;
-#ifdef CONFIG_NVM_DEBUG
print_ppa(ppa, "boundary", i);
-#endif
+
return 1;
}
return 0;
}
+static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct ppa_addr *ppa_list;
+
+ ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+
+ if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (rqd->opcode == NVM_OP_PWRITE) {
+ struct pblk_line *line;
+ struct ppa_addr ppa;
+ int i;
+
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ ppa = ppa_list[i];
+ line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+
+ spin_lock(&line->lock);
+ if (line->state != PBLK_LINESTATE_OPEN) {
+ pr_err("pblk: bad ppa: line:%d,state:%d\n",
+ line->id, line->state);
+ WARN_ON(1);
+ spin_unlock(&line->lock);
+ return -EINVAL;
+ }
+ spin_unlock(&line->lock);
+ }
+ }
+
+ return 0;
+}
+#endif
+
static inline int pblk_boundary_paddr_checks(struct pblk *pblk, u64 paddr)
{
struct pblk_line_meta *lm = &pblk->lm;
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 267f01ae87e4..0993c14be860 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -267,9 +267,9 @@ static void rrpc_gc_kick(struct rrpc *rrpc)
/*
* timed GC every interval.
*/
-static void rrpc_gc_timer(unsigned long data)
+static void rrpc_gc_timer(struct timer_list *t)
{
- struct rrpc *rrpc = (struct rrpc *)data;
+ struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
rrpc_gc_kick(rrpc);
mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
@@ -1063,7 +1063,7 @@ static int rrpc_gc_init(struct rrpc *rrpc)
if (!rrpc->kgc_wq)
return -ENOMEM;
- setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
+ timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
return 0;
}
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index 516eb65bcacc..ee803638e595 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Macintosh-specific device drivers.
#
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index f5f4da3d0b67..15db69d8ba69 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* I/O Processor (IOP) ADB Driver
* Written and (C) 1999 by Joshua M. Thompson (funaho@jurai.org)
@@ -29,8 +30,6 @@
/*#define DEBUG_ADB_IOP*/
-extern void iop_ism_irq(int, void *);
-
static struct adb_request *current_req;
static struct adb_request *last_req;
#if 0
@@ -265,7 +264,7 @@ int adb_iop_autopoll(int devs)
void adb_iop_poll(void)
{
if (adb_iop_state == idle) adb_iop_start();
- iop_ism_irq(0, (void *) ADB_IOP);
+ iop_ism_irq_poll(ADB_IOP);
}
int adb_iop_reset_bus(void)
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 039dc8285fc5..289800b5235d 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device driver for the Apple Desktop Bus
* and the /dev/adb device on macintoshes.
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index 09d72bb00d12..e091193104f7 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/macintosh/adbhid.c
*
diff --git a/drivers/macintosh/ams/ams.h b/drivers/macintosh/ams/ams.h
index 90f094d45450..fe8d596f9845 100644
--- a/drivers/macintosh/ams/ams.h
+++ b/drivers/macintosh/ams/ams.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/i2c.h>
#include <linux/input-polldev.h>
#include <linux/kthread.h>
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 281fa9e6fc1f..1de81d922d8a 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* /dev/lcd driver for Apple Network Servers.
*/
diff --git a/drivers/macintosh/ans-lcd.h b/drivers/macintosh/ans-lcd.h
index d795b9fd2db6..f0a6e4c68557 100644
--- a/drivers/macintosh/ans-lcd.h
+++ b/drivers/macintosh/ans-lcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PPC_ANS_LCD_H
#define _PPC_ANS_LCD_H
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index 87de8d9bcfad..9a6223add30e 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the ADB controller in the Mac I/O (Hydra) chip.
*/
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 281f5345661e..ca4fcffe454b 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/stat.h>
#include <asm/macio.h>
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index ea9bdc85a21d..899ec1f4c833 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -103,7 +103,7 @@ static DEFINE_MUTEX(smu_part_access);
static int smu_irq_inited;
static unsigned long smu_cmdbuf_abs;
-static void smu_i2c_retry(unsigned long data);
+static void smu_i2c_retry(struct timer_list *t);
/*
* SMU driver low level stuff
@@ -582,9 +582,7 @@ static int smu_late_init(void)
if (!smu)
return 0;
- init_timer(&smu->i2c_timer);
- smu->i2c_timer.function = smu_i2c_retry;
- smu->i2c_timer.data = (unsigned long)smu;
+ timer_setup(&smu->i2c_timer, smu_i2c_retry, 0);
if (smu->db_node) {
smu->db_irq = irq_of_parse_and_map(smu->db_node, 0);
@@ -755,7 +753,7 @@ static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
}
-static void smu_i2c_retry(unsigned long data)
+static void smu_i2c_retry(struct timer_list *unused)
{
struct smu_i2c_cmd *cmd = smu->cmd_i2c_cur;
@@ -795,7 +793,7 @@ static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
BUG_ON(cmd != smu->cmd_i2c_cur);
if (!smu_irq_inited) {
mdelay(5);
- smu_i2c_retry(0);
+ smu_i2c_retry(NULL);
return;
}
mod_timer(&smu->i2c_timer, jiffies + msecs_to_jiffies(5));
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 147da4edd021..98dd702eb867 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device driver for the Cuda and Egret system controllers found on PowerMacs
* and 68k Macs.
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index 415c145c8299..4ba06a1695ea 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device driver for the via ADB on (many) Mac II-class machines
*
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 6f68537c93ce..89ed51571b62 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Backlight code for via-pmu
*
diff --git a/drivers/macintosh/via-pmu-event.h b/drivers/macintosh/via-pmu-event.h
index 72c54de408e8..5e52109eb9a6 100644
--- a/drivers/macintosh/via-pmu-event.h
+++ b/drivers/macintosh/via-pmu-event.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __VIA_PMU_EVENT_H
#define __VIA_PMU_EVENT_H
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index cce99f72e4ae..c4c2b3b85ebc 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device driver for the via-pmu on Apple Powermacs.
*
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index a411c5cb77a1..25465fb91ec9 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device driver for the PMU on 68K-based Apple PowerBooks
*
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c5731e5e3c6c..ba2f1525f4ee 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -163,9 +163,10 @@ config BCM_PDC_MBOX
config BCM_FLEXRM_MBOX
tristate "Broadcom FlexRM Mailbox"
depends on ARM64
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on HAS_DMA
select GENERIC_MSI_IRQ_DOMAIN
- default ARCH_BCM_IPROC
+ default m if ARCH_BCM_IPROC
help
Mailbox implementation of the Broadcom FlexRM ring manager,
which provides access to various offload engines on Broadcom
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index d54e41206e17..4896f8dcae95 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Generic MAILBOX API
obj-$(CONFIG_MAILBOX) += mailbox.o
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index ae6146311934..a8cf4333a68f 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1,10 +1,18 @@
-/* Broadcom FlexRM Mailbox Driver
- *
+/*
* Copyright (C) 2017 Broadcom
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Broadcom FlexRM Mailbox Driver
*
* Each Broadcom FlexSparx4 offload engine is implemented as an
* extension to Broadcom FlexRM ring manager. The FlexRM ring
@@ -1116,8 +1124,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
err = flexrm_cmpl_desc_to_error(desc);
if (err < 0) {
dev_warn(ring->mbox->dev,
- "got completion desc=0x%lx with error %d",
- (unsigned long)desc, err);
+ "ring%d got completion desc=0x%lx with error %d\n",
+ ring->num, (unsigned long)desc, err);
}
/* Determine request id from completion descriptor */
@@ -1127,8 +1135,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
msg = ring->requests[reqid];
if (!msg) {
dev_warn(ring->mbox->dev,
- "null msg pointer for completion desc=0x%lx",
- (unsigned long)desc);
+ "ring%d null msg pointer for completion desc=0x%lx\n",
+ ring->num, (unsigned long)desc);
continue;
}
@@ -1238,7 +1246,9 @@ static int flexrm_startup(struct mbox_chan *chan)
ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
GFP_KERNEL, &ring->bd_dma_base);
if (!ring->bd_base) {
- dev_err(ring->mbox->dev, "can't allocate BD memory\n");
+ dev_err(ring->mbox->dev,
+ "can't allocate BD memory for ring%d\n",
+ ring->num);
ret = -ENOMEM;
goto fail;
}
@@ -1261,7 +1271,9 @@ static int flexrm_startup(struct mbox_chan *chan)
ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
GFP_KERNEL, &ring->cmpl_dma_base);
if (!ring->cmpl_base) {
- dev_err(ring->mbox->dev, "can't allocate completion memory\n");
+ dev_err(ring->mbox->dev,
+ "can't allocate completion memory for ring%d\n",
+ ring->num);
ret = -ENOMEM;
goto fail_free_bd_memory;
}
@@ -1269,7 +1281,8 @@ static int flexrm_startup(struct mbox_chan *chan)
/* Request IRQ */
if (ring->irq == UINT_MAX) {
- dev_err(ring->mbox->dev, "ring IRQ not available\n");
+ dev_err(ring->mbox->dev,
+ "ring%d IRQ not available\n", ring->num);
ret = -ENODEV;
goto fail_free_cmpl_memory;
}
@@ -1278,7 +1291,8 @@ static int flexrm_startup(struct mbox_chan *chan)
flexrm_irq_thread,
0, dev_name(ring->mbox->dev), ring);
if (ret) {
- dev_err(ring->mbox->dev, "failed to request ring IRQ\n");
+ dev_err(ring->mbox->dev,
+ "failed to request ring%d IRQ\n", ring->num);
goto fail_free_cmpl_memory;
}
ring->irq_requested = true;
@@ -1291,7 +1305,9 @@ static int flexrm_startup(struct mbox_chan *chan)
&ring->irq_aff_hint);
ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
if (ret) {
- dev_err(ring->mbox->dev, "failed to set IRQ affinity hint\n");
+ dev_err(ring->mbox->dev,
+ "failed to set IRQ affinity hint for ring%d\n",
+ ring->num);
goto fail_free_irq;
}
@@ -1365,8 +1381,8 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Disable/inactivate ring */
writel_relaxed(0x0, ring->regs + RING_CONTROL);
- /* Flush ring with timeout of 1s */
- timeout = 1000;
+ /* Set ring flush state */
+ timeout = 1000; /* timeout of 1s */
writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
ring->regs + RING_CONTROL);
do {
@@ -1374,7 +1390,23 @@ static void flexrm_shutdown(struct mbox_chan *chan)
FLUSH_DONE_MASK)
break;
mdelay(1);
- } while (timeout--);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "setting ring%d flush state timedout\n", ring->num);
+
+ /* Clear ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(0x0, ring + RING_CONTROL);
+ do {
+ if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK))
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ dev_err(ring->mbox->dev,
+ "clearing ring%d flush state timedout\n", ring->num);
/* Abort all in-flight requests */
for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
index bb682c926b0a..bcb29df9549e 100644
--- a/drivers/mailbox/mailbox-altera.c
+++ b/drivers/mailbox/mailbox-altera.c
@@ -57,6 +57,7 @@ struct altera_mbox {
/* If the controller supports only RX polling mode */
struct timer_list rxpoll_timer;
+ struct mbox_chan *chan;
};
static struct altera_mbox *mbox_chan_to_altera_mbox(struct mbox_chan *chan)
@@ -138,12 +139,11 @@ static void altera_mbox_rx_data(struct mbox_chan *chan)
}
}
-static void altera_mbox_poll_rx(unsigned long data)
+static void altera_mbox_poll_rx(struct timer_list *t)
{
- struct mbox_chan *chan = (struct mbox_chan *)data;
- struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan);
+ struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
- altera_mbox_rx_data(chan);
+ altera_mbox_rx_data(mbox->chan);
mod_timer(&mbox->rxpoll_timer,
jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
@@ -206,8 +206,8 @@ static int altera_mbox_startup_receiver(struct mbox_chan *chan)
polling:
/* Setup polling timer */
- setup_timer(&mbox->rxpoll_timer, altera_mbox_poll_rx,
- (unsigned long)chan);
+ mbox->chan = chan;
+ timer_setup(&mbox->rxpoll_timer, altera_mbox_poll_rx, 0);
mod_timer(&mbox->rxpoll_timer,
jiffies + msecs_to_jiffies(MBOX_POLLING_MS));
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 97fb956bb6e0..93f3d4d61fa7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -30,6 +30,7 @@
#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
(MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
+static bool mbox_data_ready;
static struct dentry *root_debugfs_dir;
struct mbox_test_device {
@@ -152,16 +153,14 @@ out:
static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
{
- unsigned char data;
+ bool data_ready;
unsigned long flags;
spin_lock_irqsave(&tdev->lock, flags);
- data = tdev->rx_buffer[0];
+ data_ready = mbox_data_ready;
spin_unlock_irqrestore(&tdev->lock, flags);
- if (data != '\0')
- return true;
- return false;
+ return data_ready;
}
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
@@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
*(touser + l) = '\0';
memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
+ mbox_data_ready = false;
spin_unlock_irqrestore(&tdev->lock, flags);
@@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
message, MBOX_MAX_MSG_LEN);
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
}
+ mbox_data_ready = true;
spin_unlock_irqrestore(&tdev->lock, flags);
wake_up_interruptible(&tdev->waitq);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 537f4f6d009b..674b35f402f5 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -351,7 +351,7 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method |= TXDONE_BY_ACK;
+ chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
@@ -418,7 +418,7 @@ void mbox_free_channel(struct mbox_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
- if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
module_put(chan->mbox->dev->driver->owner);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index c5e8b9cb170d..2517038a8452 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -906,7 +906,11 @@ static int __init omap_mbox_init(void)
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
sizeof(mbox_msg_t));
- return platform_driver_register(&omap_mbox_driver);
+ err = platform_driver_register(&omap_mbox_driver);
+ if (err)
+ class_unregister(&omap_mbox_class);
+
+ return err;
}
subsys_initcall(omap_mbox_init);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 9b7005e1345e..3ef7f036ceea 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -69,7 +69,6 @@
#include "mailbox.h"
-#define MAX_PCC_SUBSPACES 256
#define MBOX_IRQ_NAME "pcc-mbox"
static struct mbox_chan *pcc_mbox_channels;
@@ -266,7 +265,7 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method |= TXDONE_BY_ACK;
+ chan->txdone_method = TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
@@ -312,7 +311,7 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
- if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
spin_unlock_irqrestore(&chan->lock, flags);
diff --git a/drivers/mcb/Makefile b/drivers/mcb/Makefile
index bcc7745774ab..77073c5928f4 100644
--- a/drivers/mcb/Makefile
+++ b/drivers/mcb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MCB) += mcb.o
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
index d6e6933b19f1..3602cb3b2021 100644
--- a/drivers/mcb/mcb-internal.h
+++ b/drivers/mcb/mcb-internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MCB_INTERNAL
#define __MCB_INTERNAL
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 4a249ee86364..83b9362be09c 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -178,7 +178,7 @@ config MD_FAULTY
config MD_CLUSTER
- tristate "Cluster Support for MD (EXPERIMENTAL)"
+ tristate "Cluster Support for MD"
depends on BLK_DEV_MD
depends on DLM
default n
@@ -188,7 +188,8 @@ config MD_CLUSTER
nodes in the cluster can access the MD devices simultaneously.
This brings the redundancy (and uptime) of RAID levels across the
- nodes of the cluster.
+ nodes of the cluster. Currently, it can work with raid1 and raid10
+ (limited support).
If unsure, say N.
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 786ec9e86d65..f701bb211783 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel software RAID and LVM drivers.
#
@@ -18,9 +19,12 @@ dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o \
dm-cache-smq-y += dm-cache-policy-smq.o
dm-era-y += dm-era-target.o
dm-verity-y += dm-verity-target.o
-md-mod-y += md.o bitmap.o
+md-mod-y += md.o md-bitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
+linear-y += md-linear.o
+multipath-y += md-multipath.o
+faulty-y += md-faulty.o
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index c488b846f831..d26b35195825 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_BCACHE) += bcache.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index cacbe2dbd5c3..a27d85232ce1 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Primary bucket allocation code
*
@@ -406,7 +407,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
finish_wait(&ca->set->bucket_wait, &w);
out:
- wake_up_process(ca->alloc_thread);
+ if (ca->alloc_thread)
+ wake_up_process(ca->alloc_thread);
trace_bcache_alloc(ca, reserve);
@@ -441,6 +443,11 @@ out:
b->prio = INITIAL_PRIO;
}
+ if (ca->set->avail_nbuckets > 0) {
+ ca->set->avail_nbuckets--;
+ bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+ }
+
return r;
}
@@ -448,6 +455,11 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
{
SET_GC_MARK(b, 0);
SET_GC_SECTORS_USED(b, 0);
+
+ if (ca->set->avail_nbuckets < ca->set->nbuckets) {
+ ca->set->avail_nbuckets++;
+ bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+ }
}
void bch_bucket_free(struct cache_set *c, struct bkey *k)
@@ -600,7 +612,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
/*
* If we had to allocate, we might race and not need to allocate the
- * second time we call find_data_bucket(). If we allocated a bucket but
+ * second time we call pick_data_bucket(). If we allocated a bucket but
* didn't use it, drop the refcount bch_bucket_alloc_set() took:
*/
if (KEY_PTRS(&alloc.key))
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 2ed9bd231d84..843877e017e1 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_H
#define _BCACHE_H
@@ -184,6 +185,7 @@
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
+#include <linux/refcount.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -265,9 +267,6 @@ struct bcache_device {
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
- unsigned long sectors_dirty_last;
- long sectors_dirty_derivative;
-
struct bio_set *bio_split;
unsigned data_csum:1;
@@ -299,7 +298,7 @@ struct cached_dev {
struct semaphore sb_write_mutex;
/* Refcount on the cache set. Always nonzero when we're caching. */
- atomic_t count;
+ refcount_t count;
struct work_struct detach;
/*
@@ -362,12 +361,14 @@ struct cached_dev {
uint64_t writeback_rate_target;
int64_t writeback_rate_proportional;
- int64_t writeback_rate_derivative;
- int64_t writeback_rate_change;
+ int64_t writeback_rate_integral;
+ int64_t writeback_rate_integral_scaled;
+ int32_t writeback_rate_change;
unsigned writeback_rate_update_seconds;
- unsigned writeback_rate_d_term;
+ unsigned writeback_rate_i_term_inverse;
unsigned writeback_rate_p_term_inverse;
+ unsigned writeback_rate_minimum;
};
enum alloc_reserve {
@@ -581,6 +582,7 @@ struct cache_set {
uint8_t need_gc;
struct gc_stat gc_stats;
size_t nbuckets;
+ size_t avail_nbuckets;
struct task_struct *gc_thread;
/* Where in the btree gc currently is */
@@ -806,13 +808,13 @@ do { \
static inline void cached_dev_put(struct cached_dev *dc)
{
- if (atomic_dec_and_test(&dc->count))
+ if (refcount_dec_and_test(&dc->count))
schedule_work(&dc->detach);
}
static inline bool cached_dev_get(struct cached_dev *dc)
{
- if (!atomic_inc_not_zero(&dc->count))
+ if (!refcount_inc_not_zero(&dc->count))
return false;
/* Paired with the mb in cached_dev_attach */
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 18526d44688d..e56d3ecdbfcb 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Code for working with individual keys, and sorted sets of keys with in a
* btree node
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index ae964624efb2..fa506c1aa524 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_BSET_H
#define _BCACHE_BSET_H
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 866dcf78ff8e..11c5503d31dc 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
*
@@ -1240,6 +1241,11 @@ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
__bch_btree_mark_key(c, level, k);
}
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
+{
+ stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
+}
+
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
@@ -1651,9 +1657,8 @@ static void btree_gc_start(struct cache_set *c)
mutex_unlock(&c->bucket_lock);
}
-static size_t bch_btree_gc_finish(struct cache_set *c)
+static void bch_btree_gc_finish(struct cache_set *c)
{
- size_t available = 0;
struct bucket *b;
struct cache *ca;
unsigned i;
@@ -1690,6 +1695,7 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
}
rcu_read_unlock();
+ c->avail_nbuckets = 0;
for_each_cache(ca, c, i) {
uint64_t *i;
@@ -1711,18 +1717,16 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
- available++;
+ c->avail_nbuckets++;
}
}
mutex_unlock(&c->bucket_lock);
- return available;
}
static void bch_btree_gc(struct cache_set *c)
{
int ret;
- unsigned long available;
struct gc_stat stats;
struct closure writes;
struct btree_op op;
@@ -1745,14 +1749,14 @@ static void bch_btree_gc(struct cache_set *c)
pr_warn("gc failed!");
} while (ret);
- available = bch_btree_gc_finish(c);
+ bch_btree_gc_finish(c);
wake_up_allocators(c);
bch_time_stats_update(&c->btree_gc_time, start_time);
stats.key_bytes *= sizeof(uint64_t);
stats.data <<= 9;
- stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
+ bch_update_bucket_in_use(c, &stats);
memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
trace_bcache_gc_end(c);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 73da1f5626cb..d211e2c25b6b 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_BTREE_H
#define _BCACHE_BTREE_H
@@ -305,5 +306,5 @@ void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
struct keybuf_key *bch_keybuf_next(struct keybuf *);
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
struct bkey *, keybuf_pred_fn *);
-
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
#endif
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 295b7e43f92c..ccfbea6f9f6b 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CLOSURE_H
#define _LINUX_CLOSURE_H
@@ -251,6 +252,12 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
static inline void closure_queue(struct closure *cl)
{
struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
if (wq) {
INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work));
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 61076eda2e6d..c7a02c4900da 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Assorted bcache debug code
*
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index 1f63c195d247..acc48d3fa274 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_DEBUG_H
#define _BCACHE_DEBUG_H
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 243de0bf15cd..41c238fc3733 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
*
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
index e2ed54054e7a..0cd3575afa1d 100644
--- a/drivers/md/bcache/extents.h
+++ b/drivers/md/bcache/extents.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_EXTENTS_H
#define _BCACHE_EXTENTS_H
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 7e871bdc0097..fac97ec2d0e2 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Some low level IO code, and hacks for various block layer limitations
*
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 7e1d1c3ba33a..02a98ddb592d 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bcache journalling code, for btree insertions
*
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index e3c39457afbb..b5788199188f 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_JOURNAL_H
#define _BCACHE_JOURNAL_H
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index f633b30c962e..d50c1c97da68 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Moving/copying garbage collector
*
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 681b4f12b05a..3a7aed7282b2 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Main bcache entry point - handle a read or a write request and decide what to
* do with it; the make_request functions are called by the block layer.
@@ -26,12 +27,12 @@ struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *);
-static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
+static unsigned cache_mode(struct cached_dev *dc)
{
return BDEV_CACHE_MODE(&dc->sb);
}
-static bool verify(struct cached_dev *dc, struct bio *bio)
+static bool verify(struct cached_dev *dc)
{
return dc->verify;
}
@@ -369,7 +370,7 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{
struct cache_set *c = dc->disk.c;
- unsigned mode = cache_mode(dc, bio);
+ unsigned mode = cache_mode(dc);
unsigned sectors, congested = bch_get_congested(c);
struct task_struct *task = current;
struct io *i;
@@ -384,6 +385,14 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
op_is_write(bio_op(bio))))
goto skip;
+ /*
+ * Flag for bypass if the IO is for read-ahead or background,
+ * unless the read-ahead request is for metadata (eg, for gfs2).
+ */
+ if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
+ !(bio->bi_opf & REQ_META))
+ goto skip;
+
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
bio_sectors(bio) & (c->sb.block_size - 1)) {
pr_debug("skipping unaligned io");
@@ -462,6 +471,7 @@ struct search {
unsigned recoverable:1;
unsigned write:1;
unsigned read_dirty_data:1;
+ unsigned cache_missed:1;
unsigned long start_time;
@@ -648,6 +658,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->orig_bio = bio;
s->cache_miss = NULL;
+ s->cache_missed = 0;
s->d = d;
s->recoverable = 1;
s->write = op_is_write(bio_op(bio));
@@ -697,8 +708,16 @@ static void cached_dev_read_error(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
+ struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- if (s->recoverable) {
+ /*
+ * If cache device is dirty (dc->has_dirty is non-zero), then
+ * recovery a failed read request from cached device may get a
+ * stale data back. So read failure recovery is only permitted
+ * when cache device is clean.
+ */
+ if (s->recoverable &&
+ (dc && !atomic_read(&dc->has_dirty))) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
@@ -739,7 +758,7 @@ static void cached_dev_read_done(struct closure *cl)
s->cache_miss = NULL;
}
- if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
+ if (verify(dc) && s->recoverable && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio);
bio_complete(s);
@@ -759,12 +778,12 @@ static void cached_dev_read_done_bh(struct closure *cl)
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s->iop.c, s->d,
- !s->cache_miss, s->iop.bypass);
+ !s->cache_missed, s->iop.bypass);
trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
if (s->iop.status)
continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
- else if (s->iop.bio || verify(dc, &s->bio.bio))
+ else if (s->iop.bio || verify(dc))
continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
else
continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
@@ -778,6 +797,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
+ s->cache_missed = 1;
+
if (s->cache_miss || s->iop.bypass) {
miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
@@ -891,7 +912,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
s->iop.bypass = true;
if (should_writeback(dc, s->orig_bio,
- cache_mode(dc, bio),
+ cache_mode(dc),
s->iop.bypass)) {
s->iop.bypass = false;
s->iop.writeback = true;
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 7689176951ce..dea0886b81c1 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_REQUEST_H_
#define _BCACHE_REQUEST_H_
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 0ca072c20d0d..be119326297b 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bcache stats code
*
@@ -146,9 +147,9 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
}
}
-static void scale_accounting(unsigned long data)
+static void scale_accounting(struct timer_list *t)
{
- struct cache_accounting *acc = (struct cache_accounting *) data;
+ struct cache_accounting *acc = from_timer(acc, t, timer);
#define move_stat(name) do { \
unsigned t = atomic_xchg(&acc->collector.name, 0); \
@@ -233,9 +234,7 @@ void bch_cache_accounting_init(struct cache_accounting *acc,
kobject_init(&acc->day.kobj, &bch_stats_ktype);
closure_init(&acc->cl, parent);
- init_timer(&acc->timer);
+ timer_setup(&acc->timer, scale_accounting, 0);
acc->timer.expires = jiffies + accounting_delay;
- acc->timer.data = (unsigned long) acc;
- acc->timer.function = scale_accounting;
add_timer(&acc->timer);
}
diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h
index adbff141c887..0b70f9de0c03 100644
--- a/drivers/md/bcache/stats.h
+++ b/drivers/md/bcache/stats.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_STATS_H_
#define _BCACHE_STATS_H_
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index fc0a31b13ac4..b4d28928dec5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -53,12 +53,15 @@ LIST_HEAD(bch_cache_sets);
static LIST_HEAD(uncached_devices);
static int bcache_major;
-static DEFINE_IDA(bcache_minor);
+static DEFINE_IDA(bcache_device_idx);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
-#define BCACHE_MINORS 16 /* partition support */
+/* limitation of partitions number on single bcache device */
+#define BCACHE_MINORS 128
+/* limitation of bcache devices number on single system */
+#define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
/* Superblock */
@@ -721,6 +724,16 @@ static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
closure_get(&c->caching);
}
+static inline int first_minor_to_idx(int first_minor)
+{
+ return (first_minor/BCACHE_MINORS);
+}
+
+static inline int idx_to_first_minor(int idx)
+{
+ return (idx * BCACHE_MINORS);
+}
+
static void bcache_device_free(struct bcache_device *d)
{
lockdep_assert_held(&bch_register_lock);
@@ -734,7 +747,8 @@ static void bcache_device_free(struct bcache_device *d)
if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue);
if (d->disk) {
- ida_simple_remove(&bcache_minor, d->disk->first_minor);
+ ida_simple_remove(&bcache_device_idx,
+ first_minor_to_idx(d->disk->first_minor));
put_disk(d->disk);
}
@@ -751,7 +765,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
{
struct request_queue *q;
size_t n;
- int minor;
+ int idx;
if (!d->stripe_size)
d->stripe_size = 1 << 31;
@@ -776,25 +790,24 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->full_dirty_stripes)
return -ENOMEM;
- minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
- if (minor < 0)
- return minor;
-
- minor *= BCACHE_MINORS;
+ idx = ida_simple_get(&bcache_device_idx, 0,
+ BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
+ if (idx < 0)
+ return idx;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
BIOSET_NEED_BVECS |
BIOSET_NEED_RESCUER)) ||
!(d->disk = alloc_disk(BCACHE_MINORS))) {
- ida_simple_remove(&bcache_minor, minor);
+ ida_simple_remove(&bcache_device_idx, idx);
return -ENOMEM;
}
set_capacity(d->disk, sectors);
- snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
+ snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
d->disk->major = bcache_major;
- d->disk->first_minor = minor;
+ d->disk->first_minor = idx_to_first_minor(idx);
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
@@ -889,7 +902,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
closure_init_stack(&cl);
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
- BUG_ON(atomic_read(&dc->count));
+ BUG_ON(refcount_read(&dc->count));
mutex_lock(&bch_register_lock);
@@ -1016,7 +1029,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
* dc->c must be set before dc->count != 0 - paired with the mb in
* cached_dev_get()
*/
- atomic_set(&dc->count, 1);
+ refcount_set(&dc->count, 1);
/* Block writeback thread, but spawn it */
down_write(&dc->writeback_lock);
@@ -1028,7 +1041,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
- atomic_inc(&dc->count);
+ refcount_inc(&dc->count);
bch_writeback_queue(dc);
}
@@ -1129,9 +1142,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
if (ret)
return ret;
- set_capacity(dc->disk.disk,
- dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
-
dc->disk.disk->queue->backing_dev_info->ra_pages =
max(dc->disk.disk->queue->backing_dev_info->ra_pages,
q->backing_dev_info->ra_pages);
@@ -2085,6 +2095,7 @@ static void bcache_exit(void)
if (bcache_major)
unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot);
+ mutex_destroy(&bch_register_lock);
}
static int __init bcache_init(void)
@@ -2103,14 +2114,15 @@ static int __init bcache_init(void)
bcache_major = register_blkdev(0, "bcache");
if (bcache_major < 0) {
unregister_reboot_notifier(&reboot);
+ mutex_destroy(&bch_register_lock);
return bcache_major;
}
if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
- sysfs_create_files(bcache_kobj, files) ||
bch_request_init() ||
- bch_debug_init(bcache_kobj))
+ bch_debug_init(bcache_kobj) ||
+ sysfs_create_files(bcache_kobj, files))
goto err;
return 0;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 104c57cd666c..b4184092c727 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bcache sysfs interfaces
*
@@ -81,8 +82,9 @@ rw_attribute(writeback_delay);
rw_attribute(writeback_rate);
rw_attribute(writeback_rate_update_seconds);
-rw_attribute(writeback_rate_d_term);
+rw_attribute(writeback_rate_i_term_inverse);
rw_attribute(writeback_rate_p_term_inverse);
+rw_attribute(writeback_rate_minimum);
read_attribute(writeback_rate_debug);
read_attribute(stripe_size);
@@ -130,15 +132,16 @@ SHOW(__bch_cached_dev)
sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
var_print(writeback_rate_update_seconds);
- var_print(writeback_rate_d_term);
+ var_print(writeback_rate_i_term_inverse);
var_print(writeback_rate_p_term_inverse);
+ var_print(writeback_rate_minimum);
if (attr == &sysfs_writeback_rate_debug) {
char rate[20];
char dirty[20];
char target[20];
char proportional[20];
- char derivative[20];
+ char integral[20];
char change[20];
s64 next_io;
@@ -146,7 +149,7 @@ SHOW(__bch_cached_dev)
bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
bch_hprint(target, dc->writeback_rate_target << 9);
bch_hprint(proportional,dc->writeback_rate_proportional << 9);
- bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ bch_hprint(integral, dc->writeback_rate_integral_scaled << 9);
bch_hprint(change, dc->writeback_rate_change << 9);
next_io = div64_s64(dc->writeback_rate.next - local_clock(),
@@ -157,11 +160,11 @@ SHOW(__bch_cached_dev)
"dirty:\t\t%s\n"
"target:\t\t%s\n"
"proportional:\t%s\n"
- "derivative:\t%s\n"
+ "integral:\t%s\n"
"change:\t\t%s/sec\n"
"next io:\t%llims\n",
rate, dirty, target, proportional,
- derivative, change, next_io);
+ integral, change, next_io);
}
sysfs_hprint(dirty_data,
@@ -213,7 +216,7 @@ STORE(__cached_dev)
dc->writeback_rate.rate, 1, INT_MAX);
d_strtoul_nonzero(writeback_rate_update_seconds);
- d_strtoul(writeback_rate_d_term);
+ d_strtoul(writeback_rate_i_term_inverse);
d_strtoul_nonzero(writeback_rate_p_term_inverse);
d_strtoi_h(sequential_cutoff);
@@ -319,7 +322,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_percent,
&sysfs_writeback_rate,
&sysfs_writeback_rate_update_seconds,
- &sysfs_writeback_rate_d_term,
+ &sysfs_writeback_rate_i_term_inverse,
&sysfs_writeback_rate_p_term_inverse,
&sysfs_writeback_rate_debug,
&sysfs_dirty_data,
@@ -745,6 +748,11 @@ static struct attribute *bch_cache_set_internal_files[] = {
};
KTYPE(bch_cache_set_internal);
+static int __bch_cache_cmp(const void *l, const void *r)
+{
+ return *((uint16_t *)r) - *((uint16_t *)l);
+}
+
SHOW(__bch_cache)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
@@ -769,9 +777,6 @@ SHOW(__bch_cache)
CACHE_REPLACEMENT(&ca->sb));
if (attr == &sysfs_priority_stats) {
- int cmp(const void *l, const void *r)
- { return *((uint16_t *) r) - *((uint16_t *) l); }
-
struct bucket *b;
size_t n = ca->sb.nbuckets, i;
size_t unused = 0, available = 0, dirty = 0, meta = 0;
@@ -800,7 +805,7 @@ SHOW(__bch_cache)
p[i] = ca->buckets[i].prio;
mutex_unlock(&ca->set->bucket_lock);
- sort(p, n, sizeof(uint16_t), cmp, NULL);
+ sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
while (n &&
!cached[n - 1])
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
index 0526fe92a683..b54fe9602529 100644
--- a/drivers/md/bcache/sysfs.h
+++ b/drivers/md/bcache/sysfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_SYSFS_H_
#define _BCACHE_SYSFS_H_
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index b7820b0d2621..a9a73f560c04 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "bcache.h"
#include "btree.h"
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 176d3c2ef5f5..e548b8b51322 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -232,8 +232,14 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
d->next += div_u64(done * NSEC_PER_SEC, d->rate);
- if (time_before64(now + NSEC_PER_SEC, d->next))
- d->next = now + NSEC_PER_SEC;
+ /* Bound the time. Don't let us fall further than 2 seconds behind
+ * (this prevents unnecessary backlog that would make it impossible
+ * to catch up). If we're ahead of the desired writeback rate,
+ * don't let us sleep more than 2.5 seconds (so we can notice/respond
+ * if the control system tells us to speed up!).
+ */
+ if (time_before64(now + NSEC_PER_SEC * 5LLU / 2LLU, d->next))
+ d->next = now + NSEC_PER_SEC * 5LLU / 2LLU;
if (time_after64(now - NSEC_PER_SEC * 2, d->next))
d->next = now - NSEC_PER_SEC * 2;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index cb8d2ccbb6c6..ed5e8a412eb8 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_UTIL_H
#define _BCACHE_UTIL_H
@@ -441,10 +442,10 @@ struct bch_ratelimit {
uint64_t next;
/*
- * Rate at which we want to do work, in units per nanosecond
+ * Rate at which we want to do work, in units per second
* The units here correspond to the units passed to bch_next_delay()
*/
- unsigned rate;
+ uint32_t rate;
};
static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index e663ca082183..56a37884ca8b 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* background writeback - scan btree for dirty data and write it to the backing
* device
@@ -25,48 +26,63 @@ static void __update_writeback_rate(struct cached_dev *dc)
bcache_flash_devs_sectors_dirty(c);
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
-
int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
c->cached_dev_sectors);
- /* PD controller */
-
+ /*
+ * PI controller:
+ * Figures out the amount that should be written per second.
+ *
+ * First, the error (number of sectors that are dirty beyond our
+ * target) is calculated. The error is accumulated (numerically
+ * integrated).
+ *
+ * Then, the proportional value and integral value are scaled
+ * based on configured values. These are stored as inverses to
+ * avoid fixed point math and to make configuration easy-- e.g.
+ * the default value of 40 for writeback_rate_p_term_inverse
+ * attempts to write at a rate that would retire all the dirty
+ * blocks in 40 seconds.
+ *
+ * The writeback_rate_i_inverse value of 10000 means that 1/10000th
+ * of the error is accumulated in the integral term per second.
+ * This acts as a slow, long-term average that is not subject to
+ * variations in usage like the p term.
+ */
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
- int64_t derivative = dirty - dc->disk.sectors_dirty_last;
- int64_t proportional = dirty - target;
- int64_t change;
-
- dc->disk.sectors_dirty_last = dirty;
-
- /* Scale to sectors per second */
-
- proportional *= dc->writeback_rate_update_seconds;
- proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
-
- derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
-
- derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
- (dc->writeback_rate_d_term /
- dc->writeback_rate_update_seconds) ?: 1, 0);
-
- derivative *= dc->writeback_rate_d_term;
- derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
-
- change = proportional + derivative;
+ int64_t error = dirty - target;
+ int64_t proportional_scaled =
+ div_s64(error, dc->writeback_rate_p_term_inverse);
+ int64_t integral_scaled;
+ uint32_t new_rate;
+
+ if ((error < 0 && dc->writeback_rate_integral > 0) ||
+ (error > 0 && time_before64(local_clock(),
+ dc->writeback_rate.next + NSEC_PER_MSEC))) {
+ /*
+ * Only decrease the integral term if it's more than
+ * zero. Only increase the integral term if the device
+ * is keeping up. (Don't wind up the integral
+ * ineffectively in either case).
+ *
+ * It's necessary to scale this by
+ * writeback_rate_update_seconds to keep the integral
+ * term dimensioned properly.
+ */
+ dc->writeback_rate_integral += error *
+ dc->writeback_rate_update_seconds;
+ }
- /* Don't increase writeback rate if the device isn't keeping up */
- if (change > 0 &&
- time_after64(local_clock(),
- dc->writeback_rate.next + NSEC_PER_MSEC))
- change = 0;
+ integral_scaled = div_s64(dc->writeback_rate_integral,
+ dc->writeback_rate_i_term_inverse);
- dc->writeback_rate.rate =
- clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
- 1, NSEC_PER_MSEC);
+ new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
+ dc->writeback_rate_minimum, NSEC_PER_SEC);
- dc->writeback_rate_proportional = proportional;
- dc->writeback_rate_derivative = derivative;
- dc->writeback_rate_change = change;
+ dc->writeback_rate_proportional = proportional_scaled;
+ dc->writeback_rate_integral_scaled = integral_scaled;
+ dc->writeback_rate_change = new_rate - dc->writeback_rate.rate;
+ dc->writeback_rate.rate = new_rate;
dc->writeback_rate_target = target;
}
@@ -179,13 +195,21 @@ static void write_dirty(struct closure *cl)
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
- dirty_init(w);
- bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
- io->bio.bi_iter.bi_sector = KEY_START(&w->key);
- bio_set_dev(&io->bio, io->dc->bdev);
- io->bio.bi_end_io = dirty_endio;
+ /*
+ * IO errors are signalled using the dirty bit on the key.
+ * If we failed to read, we should not attempt to write to the
+ * backing device. Instead, immediately go to write_dirty_finish
+ * to clean up.
+ */
+ if (KEY_DIRTY(&w->key)) {
+ dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
+ io->bio.bi_iter.bi_sector = KEY_START(&w->key);
+ bio_set_dev(&io->bio, io->dc->bdev);
+ io->bio.bi_end_io = dirty_endio;
- closure_bio_submit(&io->bio, cl);
+ closure_bio_submit(&io->bio, cl);
+ }
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
@@ -417,6 +441,8 @@ static int bch_writeback_thread(void *arg)
struct cached_dev *dc = arg;
bool searched_full_index;
+ bch_ratelimit_reset(&dc->writeback_rate);
+
while (!kthread_should_stop()) {
down_write(&dc->writeback_lock);
if (!atomic_read(&dc->has_dirty) ||
@@ -444,7 +470,6 @@ static int bch_writeback_thread(void *arg)
up_write(&dc->writeback_lock);
- bch_ratelimit_reset(&dc->writeback_rate);
read_dirty(dc);
if (searched_full_index) {
@@ -454,6 +479,8 @@ static int bch_writeback_thread(void *arg)
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
delay = schedule_timeout_interruptible(delay);
+
+ bch_ratelimit_reset(&dc->writeback_rate);
}
}
@@ -491,8 +518,6 @@ void bch_sectors_dirty_init(struct bcache_device *d)
bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
-
- d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -506,10 +531,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_percent = 10;
dc->writeback_delay = 30;
dc->writeback_rate.rate = 1024;
+ dc->writeback_rate_minimum = 8;
dc->writeback_rate_update_seconds = 5;
- dc->writeback_rate_d_term = 30;
- dc->writeback_rate_p_term_inverse = 6000;
+ dc->writeback_rate_p_term_inverse = 40;
+ dc->writeback_rate_i_term_inverse = 10000;
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
}
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index e35421d20d2e..a9e3ffb4b03c 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHE_WRITEBACK_H
#define _BCACHE_WRITEBACK_H
@@ -76,7 +77,9 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (would_skip)
return false;
- return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
+ return (op_is_sync(bio->bi_opf) ||
+ bio->bi_opf & (REQ_META|REQ_PRIO) ||
+ in_use <= CUTOFF_WRITEBACK);
}
static inline void bch_writeback_queue(struct cached_dev *dc)
@@ -89,7 +92,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
{
if (!atomic_read(&dc->has_dirty) &&
!atomic_xchg(&dc->has_dirty, 1)) {
- atomic_inc(&dc->count);
+ refcount_inc(&dc->count);
if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index d216a8f7bc22..b8ac591aaaa7 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -347,7 +347,7 @@ static void __cache_size_refresh(void)
BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
BUG_ON(dm_bufio_client_count < 0);
- dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
+ dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
/*
* Use default if set to 0 and report the actual cache size used.
@@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c,
{
unsigned long buffers;
- if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+ if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
if (mutex_trylock(&dm_bufio_clients_lock)) {
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
@@ -974,7 +974,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
buffers = c->minimum_buffers;
*limit_buffers = buffers;
- *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
+ *threshold_buffers = mult_frac(buffers,
+ DM_BUFIO_WRITEBACK_PERCENT, 100);
}
/*
@@ -1600,7 +1601,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
- unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+ unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
}
@@ -1647,7 +1648,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
+ return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
@@ -1818,7 +1819,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
static unsigned get_max_age_hz(void)
{
- unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
+ unsigned max_age = READ_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
@@ -1910,19 +1911,15 @@ static int __init dm_bufio_init(void)
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
- mem = (__u64)((totalram_pages - totalhigh_pages) *
- DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
+ mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
+ DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
if (mem > ULONG_MAX)
mem = ULONG_MAX;
#ifdef CONFIG_MMU
- /*
- * Get the size of vmalloc space the same way as VMALLOC_TOTAL
- * in fs/proc/internal.h
- */
- if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
- mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
+ if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
+ mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
#endif
dm_bufio_default_cache_size = mem;
diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
index f092771878c2..8eb52e425141 100644
--- a/drivers/md/dm-builtin.c
+++ b/drivers/md/dm-builtin.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "dm-core.h"
/*
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 707233891291..1d0af0a21fc7 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -161,8 +161,17 @@ EXPORT_SYMBOL_GPL(btracker_nr_demotions_queued);
static bool max_work_reached(struct background_tracker *b)
{
- // FIXME: finish
- return false;
+ return atomic_read(&b->pending_promotes) +
+ atomic_read(&b->pending_writebacks) +
+ atomic_read(&b->pending_demotes) >= b->max_work;
+}
+
+struct bt_work *alloc_work(struct background_tracker *b)
+{
+ if (max_work_reached(b))
+ return NULL;
+
+ return kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
}
int btracker_queue(struct background_tracker *b,
@@ -174,10 +183,7 @@ int btracker_queue(struct background_tracker *b,
if (pwork)
*pwork = NULL;
- if (max_work_reached(b))
- return -ENOMEM;
-
- w = kmem_cache_alloc(b->work_cache, GFP_NOWAIT);
+ w = alloc_work(b);
if (!w)
return -ENOMEM;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4a4e9c75fc4c..0d7212410e21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -13,6 +13,7 @@
#include "persistent-data/dm-transaction-manager.h"
#include <linux/device-mapper.h>
+#include <linux/refcount.h>
/*----------------------------------------------------------------*/
@@ -100,7 +101,7 @@ struct cache_disk_superblock {
} __packed;
struct dm_cache_metadata {
- atomic_t ref_count;
+ refcount_t ref_count;
struct list_head list;
unsigned version;
@@ -753,7 +754,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
}
cmd->version = metadata_version;
- atomic_set(&cmd->ref_count, 1);
+ refcount_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
cmd->data_block_size = data_block_size;
@@ -791,7 +792,7 @@ static struct dm_cache_metadata *lookup(struct block_device *bdev)
list_for_each_entry(cmd, &table, list)
if (cmd->bdev == bdev) {
- atomic_inc(&cmd->ref_count);
+ refcount_inc(&cmd->ref_count);
return cmd;
}
@@ -862,7 +863,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
- if (atomic_dec_and_test(&cmd->ref_count)) {
+ if (refcount_dec_and_test(&cmd->ref_count)) {
mutex_lock(&table_lock);
list_del(&cmd->list);
mutex_unlock(&table_lock);
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index e5eb9c9b4bc8..4ab23d0075f6 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -213,6 +213,19 @@ static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
l->nr_elts--;
}
+static struct entry *l_pop_head(struct entry_space *es, struct ilist *l)
+{
+ struct entry *e;
+
+ for (e = l_head(es, l); e; e = l_next(es, e))
+ if (!e->sentinel) {
+ l_del(es, l, e);
+ return e;
+ }
+
+ return NULL;
+}
+
static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
{
struct entry *e;
@@ -719,7 +732,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
if (l_empty(&ea->free))
return NULL;
- e = l_pop_tail(ea->es, &ea->free);
+ e = l_pop_head(ea->es, &ea->free);
init_entry(e);
ea->nr_allocated++;
@@ -1158,13 +1171,13 @@ static void clear_pending(struct smq_policy *mq, struct entry *e)
e->pending_work = false;
}
-static void queue_writeback(struct smq_policy *mq)
+static void queue_writeback(struct smq_policy *mq, bool idle)
{
int r;
struct policy_work work;
struct entry *e;
- e = q_peek(&mq->dirty, mq->dirty.nr_levels, !mq->migrations_allowed);
+ e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle);
if (e) {
mark_pending(mq, e);
q_del(&mq->dirty, e);
@@ -1174,12 +1187,16 @@ static void queue_writeback(struct smq_policy *mq)
work.cblock = infer_cblock(mq, e);
r = btracker_queue(mq->bg_work, &work, NULL);
- WARN_ON_ONCE(r); // FIXME: finish, I think we have to get rid of this race.
+ if (r) {
+ clear_pending(mq, e);
+ q_push_front(&mq->dirty, e);
+ }
}
}
static void queue_demotion(struct smq_policy *mq)
{
+ int r;
struct policy_work work;
struct entry *e;
@@ -1189,7 +1206,7 @@ static void queue_demotion(struct smq_policy *mq)
e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
if (!e) {
if (!clean_target_met(mq, true))
- queue_writeback(mq);
+ queue_writeback(mq, false);
return;
}
@@ -1199,12 +1216,17 @@ static void queue_demotion(struct smq_policy *mq)
work.op = POLICY_DEMOTE;
work.oblock = e->oblock;
work.cblock = infer_cblock(mq, e);
- btracker_queue(mq->bg_work, &work, NULL);
+ r = btracker_queue(mq->bg_work, &work, NULL);
+ if (r) {
+ clear_pending(mq, e);
+ q_push_front(&mq->clean, e);
+ }
}
static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
struct policy_work **workp)
{
+ int r;
struct entry *e;
struct policy_work work;
@@ -1234,7 +1256,9 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
work.op = POLICY_PROMOTE;
work.oblock = oblock;
work.cblock = infer_cblock(mq, e);
- btracker_queue(mq->bg_work, &work, workp);
+ r = btracker_queue(mq->bg_work, &work, workp);
+ if (r)
+ free_entry(&mq->cache_alloc, e);
}
/*----------------------------------------------------------------*/
@@ -1418,7 +1442,7 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
r = btracker_issue(mq->bg_work, result);
if (r == -ENODATA) {
if (!clean_target_met(mq, idle)) {
- queue_writeback(mq);
+ queue_writeback(mq, idle);
r = btracker_issue(mq->bg_work, result);
}
}
@@ -1778,7 +1802,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
mq->next_hotspot_period = jiffies;
mq->next_cache_period = jiffies;
- mq->bg_work = btracker_create(10240); /* FIXME: hard coded value */
+ mq->bg_work = btracker_create(4096); /* FIXME: hard coded value */
if (!mq->bg_work)
goto bad_btracker;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 8785134c9f1f..cf23a14f9c6a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -408,9 +408,7 @@ struct cache {
int sectors_per_block_shift;
spinlock_t lock;
- struct list_head deferred_cells;
struct bio_list deferred_bios;
- struct bio_list deferred_writethrough_bios;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
atomic_t nr_allocated_migrations;
@@ -446,10 +444,10 @@ struct cache {
struct dm_kcopyd_client *copier;
struct workqueue_struct *wq;
struct work_struct deferred_bio_worker;
- struct work_struct deferred_writethrough_worker;
struct work_struct migration_worker;
struct delayed_work waker;
struct dm_bio_prison_v2 *prison;
+ struct bio_set *bs;
mempool_t *migration_pool;
@@ -490,15 +488,6 @@ struct per_bio_data {
struct dm_bio_prison_cell_v2 *cell;
struct dm_hook_info hook_info;
sector_t len;
-
- /*
- * writethrough fields. These MUST remain at the end of this
- * structure and the 'cache' member must be the first as it
- * is used to determine the offset of the writethrough fields.
- */
- struct cache *cache;
- dm_cblock_t cblock;
- struct dm_bio_details bio_details;
};
struct dm_cache_migration {
@@ -515,19 +504,19 @@ struct dm_cache_migration {
/*----------------------------------------------------------------*/
-static bool writethrough_mode(struct cache_features *f)
+static bool writethrough_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITETHROUGH;
+ return cache->features.io_mode == CM_IO_WRITETHROUGH;
}
-static bool writeback_mode(struct cache_features *f)
+static bool writeback_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITEBACK;
+ return cache->features.io_mode == CM_IO_WRITEBACK;
}
-static inline bool passthrough_mode(struct cache_features *f)
+static inline bool passthrough_mode(struct cache *cache)
{
- return unlikely(f->io_mode == CM_IO_PASSTHROUGH);
+ return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
}
/*----------------------------------------------------------------*/
@@ -537,14 +526,9 @@ static void wake_deferred_bio_worker(struct cache *cache)
queue_work(cache->wq, &cache->deferred_bio_worker);
}
-static void wake_deferred_writethrough_worker(struct cache *cache)
-{
- queue_work(cache->wq, &cache->deferred_writethrough_worker);
-}
-
static void wake_migration_worker(struct cache *cache)
{
- if (passthrough_mode(&cache->features))
+ if (passthrough_mode(cache))
return;
queue_work(cache->wq, &cache->migration_worker);
@@ -567,10 +551,13 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
struct dm_cache_migration *mg;
mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
- if (mg) {
- mg->cache = cache;
- atomic_inc(&mg->cache->nr_allocated_migrations);
- }
+ if (!mg)
+ return NULL;
+
+ memset(mg, 0, sizeof(*mg));
+
+ mg->cache = cache;
+ atomic_inc(&cache->nr_allocated_migrations);
return mg;
}
@@ -618,27 +605,16 @@ static unsigned lock_level(struct bio *bio)
* Per bio data
*--------------------------------------------------------------*/
-/*
- * If using writeback, leave out struct per_bio_data's writethrough fields.
- */
-#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
-#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
-
-static size_t get_per_bio_data_size(struct cache *cache)
+static struct per_bio_data *get_per_bio_data(struct bio *bio)
{
- return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
-}
-
-static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
-{
- struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
BUG_ON(!pb);
return pb;
}
-static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
+static struct per_bio_data *init_per_bio_data(struct bio *bio)
{
- struct per_bio_data *pb = get_per_bio_data(bio, data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
pb->tick = false;
pb->req_nr = dm_bio_get_target_bio_nr(bio);
@@ -678,7 +654,6 @@ static void defer_bios(struct cache *cache, struct bio_list *bios)
static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
{
bool r;
- size_t pb_size;
struct per_bio_data *pb;
struct dm_cell_key_v2 key;
dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
@@ -703,8 +678,7 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
if (cell != cell_prealloc)
free_prison_cell(cache, cell_prealloc);
- pb_size = get_per_bio_data_size(cache);
- pb = get_per_bio_data(bio, pb_size);
+ pb = get_per_bio_data(bio);
pb->cell = cell;
return r;
@@ -856,28 +830,35 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
unsigned long flags;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb;
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
bio_op(bio) != REQ_OP_DISCARD) {
+ pb = get_per_bio_data(bio);
pb->tick = true;
cache->need_tick_bio = false;
}
spin_unlock_irqrestore(&cache->lock, flags);
}
-static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock)
+static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, bool bio_has_pbd)
{
- // FIXME: this is called way too much.
- check_if_tick_bio_needed(cache, bio);
+ if (bio_has_pbd)
+ check_if_tick_bio_needed(cache, bio);
remap_to_origin(cache, bio);
if (bio_data_dir(bio) == WRITE)
clear_discard(cache, oblock_to_dblock(cache, oblock));
}
+static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock)
+{
+ // FIXME: check_if_tick_bio_needed() is called way too much through this interface
+ __remap_to_origin_clear_discard(cache, bio, oblock, true);
+}
+
static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
@@ -908,10 +889,10 @@ static bool accountable_bio(struct cache *cache, struct bio *bio)
static void accounted_begin(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb;
if (accountable_bio(cache, bio)) {
+ pb = get_per_bio_data(bio);
pb->len = bio_sectors(bio);
iot_io_begin(&cache->tracker, pb->len);
}
@@ -919,8 +900,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
static void accounted_complete(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
iot_io_end(&cache->tracker, pb->len);
}
@@ -937,57 +917,26 @@ static void issue_op(struct bio *bio, void *context)
accounted_request(cache, bio);
}
-static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cache->lock, flags);
- bio_list_add(&cache->deferred_writethrough_bios, bio);
- spin_unlock_irqrestore(&cache->lock, flags);
-
- wake_deferred_writethrough_worker(cache);
-}
-
-static void writethrough_endio(struct bio *bio)
-{
- struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
-
- dm_unhook_bio(&pb->hook_info, bio);
-
- if (bio->bi_status) {
- bio_endio(bio);
- return;
- }
-
- dm_bio_restore(&pb->bio_details, bio);
- remap_to_cache(pb->cache, bio, pb->cblock);
-
- /*
- * We can't issue this bio directly, since we're in interrupt
- * context. So it gets put on a bio list for processing by the
- * worker thread.
- */
- defer_writethrough_bio(pb->cache, bio);
-}
-
/*
- * FIXME: send in parallel, huge latency as is.
* When running in writethrough mode we need to send writes to clean blocks
- * to both the cache and origin devices. In future we'd like to clone the
- * bio and send them in parallel, but for now we're doing them in
- * series as this is easier.
+ * to both the cache and origin devices. Clone the bio and send them in parallel.
*/
-static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock, dm_cblock_t cblock)
+static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, dm_cblock_t cblock)
{
- struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
+ struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
+
+ BUG_ON(!origin_bio);
- pb->cache = cache;
- pb->cblock = cblock;
- dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
- dm_bio_record(&pb->bio_details, bio);
+ bio_chain(origin_bio, bio);
+ /*
+ * Passing false to __remap_to_origin_clear_discard() skips
+ * all code that might use per_bio_data (since clone doesn't have it)
+ */
+ __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
+ submit_bio(origin_bio);
- remap_to_origin_clear_discard(pb->cache, bio, oblock);
+ remap_to_cache(cache, bio, cblock);
}
/*----------------------------------------------------------------
@@ -1201,6 +1150,18 @@ static void background_work_end(struct cache *cache)
/*----------------------------------------------------------------*/
+static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
+{
+ return (bio_data_dir(bio) == WRITE) &&
+ (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
+}
+
+static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
+{
+ return writeback_mode(cache) &&
+ (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
+}
+
static void quiesce(struct dm_cache_migration *mg,
void (*continuation)(struct work_struct *))
{
@@ -1248,8 +1209,7 @@ static int copy(struct dm_cache_migration *mg, bool promote)
static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
free_prison_cell(cache, pb->cell);
@@ -1260,23 +1220,21 @@ static void overwrite_endio(struct bio *bio)
{
struct dm_cache_migration *mg = bio->bi_private;
struct cache *cache = mg->cache;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
dm_unhook_bio(&pb->hook_info, bio);
if (bio->bi_status)
mg->k.input = bio->bi_status;
- queue_continuation(mg->cache->wq, &mg->k);
+ queue_continuation(cache->wq, &mg->k);
}
static void overwrite(struct dm_cache_migration *mg,
void (*continuation)(struct work_struct *))
{
struct bio *bio = mg->overwrite_bio;
- size_t pb_data_size = get_per_bio_data_size(mg->cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
@@ -1474,13 +1432,51 @@ static void mg_upgrade_lock(struct work_struct *ws)
}
}
+static void mg_full_copy(struct work_struct *ws)
+{
+ struct dm_cache_migration *mg = ws_to_mg(ws);
+ struct cache *cache = mg->cache;
+ struct policy_work *op = mg->op;
+ bool is_policy_promote = (op->op == POLICY_PROMOTE);
+
+ if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
+ is_discarded_oblock(cache, op->oblock)) {
+ mg_upgrade_lock(ws);
+ return;
+ }
+
+ init_continuation(&mg->k, mg_upgrade_lock);
+
+ if (copy(mg, is_policy_promote)) {
+ DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
+ mg->k.input = BLK_STS_IOERR;
+ mg_complete(mg, false);
+ }
+}
+
static void mg_copy(struct work_struct *ws)
{
- int r;
struct dm_cache_migration *mg = ws_to_mg(ws);
if (mg->overwrite_bio) {
/*
+ * No exclusive lock was held when we last checked if the bio
+ * was optimisable. So we have to check again in case things
+ * have changed (eg, the block may no longer be discarded).
+ */
+ if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
+ /*
+ * Fallback to a real full copy after doing some tidying up.
+ */
+ bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
+ BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
+ mg->overwrite_bio = NULL;
+ inc_io_migrations(mg->cache);
+ mg_full_copy(ws);
+ return;
+ }
+
+ /*
* It's safe to do this here, even though it's new data
* because all IO has been locked out of the block.
*
@@ -1489,26 +1485,8 @@ static void mg_copy(struct work_struct *ws)
*/
overwrite(mg, mg_update_metadata_after_copy);
- } else {
- struct cache *cache = mg->cache;
- struct policy_work *op = mg->op;
- bool is_policy_promote = (op->op == POLICY_PROMOTE);
-
- if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
- is_discarded_oblock(cache, op->oblock)) {
- mg_upgrade_lock(ws);
- return;
- }
-
- init_continuation(&mg->k, mg_upgrade_lock);
-
- r = copy(mg, is_policy_promote);
- if (r) {
- DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
- mg->k.input = BLK_STS_IOERR;
- mg_complete(mg, false);
- }
- }
+ } else
+ mg_full_copy(ws);
}
static int mg_lock_writes(struct dm_cache_migration *mg)
@@ -1567,9 +1545,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
return -ENOMEM;
}
- memset(mg, 0, sizeof(*mg));
-
- mg->cache = cache;
mg->op = op;
mg->overwrite_bio = bio;
@@ -1703,9 +1678,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return -ENOMEM;
}
- memset(mg, 0, sizeof(*mg));
-
- mg->cache = cache;
mg->overwrite_bio = bio;
mg->invalidate_cblock = cblock;
mg->invalidate_oblock = oblock;
@@ -1748,26 +1720,12 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
/*----------------------------------------------------------------*/
-static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
-{
- return (bio_data_dir(bio) == WRITE) &&
- (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
-}
-
-static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
-{
- return writeback_mode(&cache->features) &&
- (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
-}
-
static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
bool *commit_needed)
{
int r, data_dir;
bool rb, background_queued;
dm_cblock_t cblock;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
*commit_needed = false;
@@ -1816,6 +1774,8 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
}
if (r == -ENOENT) {
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
/*
* Miss.
*/
@@ -1823,7 +1783,6 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
if (pb->req_nr == 0) {
accounted_begin(cache, bio);
remap_to_origin_clear_discard(cache, bio, block);
-
} else {
/*
* This is a duplicate writethrough io that is no
@@ -1842,18 +1801,17 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
* Passthrough always maps to the origin, invalidating any
* cache blocks that are written to.
*/
- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
if (bio_data_dir(bio) == WRITE) {
bio_drop_shared_lock(cache, bio);
atomic_inc(&cache->stats.demotion);
invalidate_start(cache, cblock, block, bio);
} else
remap_to_origin_clear_discard(cache, bio, block);
-
} else {
- if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
+ if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
!is_dirty(cache, cblock)) {
- remap_to_origin_then_cache(cache, bio, block, cblock);
+ remap_to_origin_and_cache(cache, bio, block, cblock);
accounted_begin(cache, bio);
} else
remap_to_cache_dirty(cache, bio, block, cblock);
@@ -1922,8 +1880,7 @@ static blk_status_t commit_op(void *context)
static bool process_flush_bio(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (!pb->req_nr)
remap_to_origin(cache, bio);
@@ -1983,28 +1940,6 @@ static void process_deferred_bios(struct work_struct *ws)
schedule_commit(&cache->committer);
}
-static void process_deferred_writethrough_bios(struct work_struct *ws)
-{
- struct cache *cache = container_of(ws, struct cache, deferred_writethrough_worker);
-
- unsigned long flags;
- struct bio_list bios;
- struct bio *bio;
-
- bio_list_init(&bios);
-
- spin_lock_irqsave(&cache->lock, flags);
- bio_list_merge(&bios, &cache->deferred_writethrough_bios);
- bio_list_init(&cache->deferred_writethrough_bios);
- spin_unlock_irqrestore(&cache->lock, flags);
-
- /*
- * These bios have already been through accounted_begin()
- */
- while ((bio = bio_list_pop(&bios)))
- generic_make_request(bio);
-}
-
/*----------------------------------------------------------------
* Main worker loop
*--------------------------------------------------------------*/
@@ -2112,6 +2047,9 @@ static void destroy(struct cache *cache)
kfree(cache->ctr_args[i]);
kfree(cache->ctr_args);
+ if (cache->bs)
+ bioset_free(cache->bs);
+
kfree(cache);
}
@@ -2555,8 +2493,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discards_supported = true;
ti->split_discard_bios = false;
+ ti->per_io_data_size = sizeof(struct per_bio_data);
+
cache->features = ca->features;
- ti->per_io_data_size = get_per_bio_data_size(cache);
+ if (writethrough_mode(cache)) {
+ /* Create bioset for writethrough bios issued to origin */
+ cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
+ if (!cache->bs)
+ goto bad;
+ }
cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -2618,7 +2563,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
bool all_clean;
r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
@@ -2637,9 +2582,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
}
spin_lock_init(&cache->lock);
- INIT_LIST_HEAD(&cache->deferred_cells);
bio_list_init(&cache->deferred_bios);
- bio_list_init(&cache->deferred_writethrough_bios);
atomic_set(&cache->nr_allocated_migrations, 0);
atomic_set(&cache->nr_io_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
@@ -2678,8 +2621,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
- INIT_WORK(&cache->deferred_writethrough_worker,
- process_deferred_writethrough_bios);
INIT_WORK(&cache->migration_worker, check_migrations);
INIT_DELAYED_WORK(&cache->waker, do_waker);
@@ -2795,9 +2736,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
int r;
bool commit_needed;
dm_oblock_t block = get_bio_block(cache, bio);
- size_t pb_data_size = get_per_bio_data_size(cache);
- init_per_bio_data(bio, pb_data_size);
+ init_per_bio_data(bio);
if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/*
* This can only occur if the io goes to a partial block at
@@ -2821,13 +2761,11 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return r;
}
-static int cache_end_io(struct dm_target *ti, struct bio *bio,
- blk_status_t *error)
+static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
{
struct cache *cache = ti->private;
unsigned long flags;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (pb->tick) {
policy_tick(cache->policy, false);
@@ -3243,13 +3181,13 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("1 ");
- if (writethrough_mode(&cache->features))
+ if (writethrough_mode(cache))
DMEMIT("writethrough ");
- else if (passthrough_mode(&cache->features))
+ else if (passthrough_mode(cache))
DMEMIT("passthrough ");
- else if (writeback_mode(&cache->features))
+ else if (writeback_mode(cache))
DMEMIT("writeback ");
else {
@@ -3415,7 +3353,7 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
unsigned i;
struct cblock_range range;
- if (!passthrough_mode(&cache->features)) {
+ if (!passthrough_mode(cache)) {
DMERR("%s: cache has to be in passthrough mode for invalidation",
cache_device_name(cache));
return -EPERM;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 203144762f36..6a14f945783c 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -29,7 +29,6 @@ struct dm_kobject_holder {
* DM targets must _not_ deference a mapped_device to directly access its members!
*/
struct mapped_device {
- struct srcu_struct io_barrier;
struct mutex suspend_lock;
/*
@@ -127,6 +126,8 @@ struct mapped_device {
struct blk_mq_tag_set *tag_set;
bool use_blk_mq:1;
bool init_tio_pdu:1;
+
+ struct srcu_struct io_barrier;
};
void dm_init_md_queue(struct mapped_device *md);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 96ab46512e1f..9fc12f556534 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1075,7 +1075,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
/* Reject unexpected unaligned bio. */
- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
+ if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
return -EIO;
dmreq = dmreq_of_req(cc, req);
@@ -1168,7 +1168,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
int r = 0;
/* Reject unexpected unaligned bio. */
- if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
+ if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
return -EIO;
dmreq = dmreq_of_req(cc, req);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 2209a9700acd..288386bfbfb5 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -44,9 +44,9 @@ struct dm_delay_info {
static DEFINE_MUTEX(delayed_bios_lock);
-static void handle_delayed_timer(unsigned long data)
+static void handle_delayed_timer(struct timer_list *t)
{
- struct delay_c *dc = (struct delay_c *)data;
+ struct delay_c *dc = from_timer(dc, t, delay_timer);
queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
}
@@ -195,7 +195,7 @@ out:
goto bad_queue;
}
- setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
+ timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
INIT_LIST_HEAD(&dc->delayed_bios);
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index ba84b8d62cd0..73a5c198113a 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1513,7 +1513,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->flush_supported = true;
ti->num_discard_bios = 1;
- ti->discards_supported = true;
era->callbacks.congested_fn = era_is_congested;
dm_table_add_target_callbacks(ti->table, &era->callbacks);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 096fe9b66c50..05c7bfd0c9d9 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -6,6 +6,7 @@
* This file is released under the GPL.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
@@ -80,13 +81,13 @@ struct journal_entry {
#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
#if BITS_PER_LONG == 64
-#define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
+#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#elif defined(CONFIG_LBDAF)
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#else
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
#endif
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
@@ -320,7 +321,7 @@ static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, in
static int dm_integrity_failed(struct dm_integrity_c *ic)
{
- return ACCESS_ONCE(ic->failed);
+ return READ_ONCE(ic->failed);
}
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
@@ -1093,9 +1094,9 @@ static void sleep_on_endio_wait(struct dm_integrity_c *ic)
__remove_wait_queue(&ic->endio_wait, &wait);
}
-static void autocommit_fn(unsigned long data)
+static void autocommit_fn(struct timer_list *t)
{
- struct dm_integrity_c *ic = (struct dm_integrity_c *)data;
+ struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
if (likely(!dm_integrity_failed(ic)))
queue_work(ic->commit_wq, &ic->commit_work);
@@ -1376,7 +1377,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
struct bvec_iter iter;
struct bio_vec bv;
bio_for_each_segment(bv, bio, iter) {
- if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
+ if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
bv.bv_offset, bv.bv_len, ic->sectors_per_block);
return DM_MAPIO_KILL;
@@ -1545,7 +1546,7 @@ retry_kmap:
smp_mb();
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
wake_up(&ic->copy_to_journal_wait);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
queue_work(ic->commit_wq, &ic->commit_work);
} else {
schedule_autocommit(ic);
@@ -1798,7 +1799,7 @@ static void integrity_commit(struct work_struct *w)
ic->n_committed_sections += commit_sections;
spin_unlock_irq(&ic->endio_wait.lock);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
queue_work(ic->writer_wq, &ic->writer_work);
release_flush_bios:
@@ -1980,7 +1981,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (ACCESS_ONCE(ic->suspending))
+ if (READ_ONCE(ic->suspending))
return;
spin_lock_irq(&ic->endio_wait.lock);
@@ -2941,7 +2942,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
ic->autocommit_msec = sync_msec;
- setup_timer(&ic->autocommit_timer, autocommit_fn, (unsigned long)ic);
+ timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
ic->io = dm_io_client_create();
if (IS_ERR(ic->io)) {
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index cf2c67e35eaf..eb45cc3df31d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
try_again:
spin_lock_irq(&throttle_spinlock);
- throttle = ACCESS_ONCE(t->throttle);
+ throttle = READ_ONCE(t->throttle);
if (likely(throttle >= 100))
goto skip_limit;
@@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
t->num_io_jobs--;
- if (likely(ACCESS_ONCE(t->throttle) >= 100))
+ if (likely(READ_ONCE(t->throttle) >= 100))
goto skip_limit;
if (!t->num_io_jobs) {
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 8b80a9ce9ea9..189badbeddaf 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -10,9 +10,11 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
+#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/uio.h>
#define DM_MSG_PREFIX "log-writes"
@@ -246,27 +248,108 @@ error:
return -1;
}
+static int write_inline_data(struct log_writes_c *lc, void *entry,
+ size_t entrylen, void *data, size_t datalen,
+ sector_t sector)
+{
+ int num_pages, bio_pages, pg_datalen, pg_sectorlen, i;
+ struct page *page;
+ struct bio *bio;
+ size_t ret;
+ void *ptr;
+
+ while (datalen) {
+ num_pages = ALIGN(datalen, PAGE_SIZE) >> PAGE_SHIFT;
+ bio_pages = min(num_pages, BIO_MAX_PAGES);
+
+ atomic_inc(&lc->io_blocks);
+
+ bio = bio_alloc(GFP_KERNEL, bio_pages);
+ if (!bio) {
+ DMERR("Couldn't alloc inline data bio");
+ goto error;
+ }
+
+ bio->bi_iter.bi_size = 0;
+ bio->bi_iter.bi_sector = sector;
+ bio_set_dev(bio, lc->logdev->bdev);
+ bio->bi_end_io = log_end_io;
+ bio->bi_private = lc;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+ for (i = 0; i < bio_pages; i++) {
+ pg_datalen = min_t(int, datalen, PAGE_SIZE);
+ pg_sectorlen = ALIGN(pg_datalen, lc->sectorsize);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ DMERR("Couldn't alloc inline data page");
+ goto error_bio;
+ }
+
+ ptr = kmap_atomic(page);
+ memcpy(ptr, data, pg_datalen);
+ if (pg_sectorlen > pg_datalen)
+ memset(ptr + pg_datalen, 0, pg_sectorlen - pg_datalen);
+ kunmap_atomic(ptr);
+
+ ret = bio_add_page(bio, page, pg_sectorlen, 0);
+ if (ret != pg_sectorlen) {
+ DMERR("Couldn't add page of inline data");
+ __free_page(page);
+ goto error_bio;
+ }
+
+ datalen -= pg_datalen;
+ data += pg_datalen;
+ }
+ submit_bio(bio);
+
+ sector += bio_pages * PAGE_SECTORS;
+ }
+ return 0;
+error_bio:
+ bio_free_pages(bio);
+ bio_put(bio);
+error:
+ put_io_block(lc);
+ return -1;
+}
+
static int log_one_block(struct log_writes_c *lc,
struct pending_block *block, sector_t sector)
{
struct bio *bio;
struct log_write_entry entry;
- size_t ret;
+ size_t metadatalen, ret;
int i;
entry.sector = cpu_to_le64(block->sector);
entry.nr_sectors = cpu_to_le64(block->nr_sectors);
entry.flags = cpu_to_le64(block->flags);
entry.data_len = cpu_to_le64(block->datalen);
+
+ metadatalen = (block->flags & LOG_MARK_FLAG) ? block->datalen : 0;
if (write_metadata(lc, &entry, sizeof(entry), block->data,
- block->datalen, sector)) {
+ metadatalen, sector)) {
free_pending_block(lc, block);
return -1;
}
+ sector += dev_to_bio_sectors(lc, 1);
+
+ if (block->datalen && metadatalen == 0) {
+ if (write_inline_data(lc, &entry, sizeof(entry), block->data,
+ block->datalen, sector)) {
+ free_pending_block(lc, block);
+ return -1;
+ }
+ /* we don't support both inline data & bio data */
+ goto out;
+ }
+
if (!block->vec_cnt)
goto out;
- sector += dev_to_bio_sectors(lc, 1);
atomic_inc(&lc->io_blocks);
bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
@@ -527,6 +610,51 @@ static int log_mark(struct log_writes_c *lc, char *data)
return 0;
}
+static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pending_block *block;
+
+ if (!bytes)
+ return 0;
+
+ block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
+ if (!block) {
+ DMERR("Error allocating dax pending block");
+ return -ENOMEM;
+ }
+
+ block->data = kzalloc(bytes, GFP_KERNEL);
+ if (!block->data) {
+ DMERR("Error allocating dax data space");
+ kfree(block);
+ return -ENOMEM;
+ }
+
+ /* write data provided via the iterator */
+ if (!copy_from_iter(block->data, bytes, i)) {
+ DMERR("Error copying dax data");
+ kfree(block->data);
+ kfree(block);
+ return -EIO;
+ }
+
+ /* rewind the iterator so that the block driver can use it */
+ iov_iter_revert(i, bytes);
+
+ block->datalen = bytes;
+ block->sector = bio_to_dev_sectors(lc, sector);
+ block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift;
+
+ atomic_inc(&lc->pending_blocks);
+ spin_lock_irq(&lc->blocks_lock);
+ list_add_tail(&block->list, &lc->unflushed_blocks);
+ spin_unlock_irq(&lc->blocks_lock);
+ wake_up_process(lc->log_kthread);
+
+ return 0;
+}
+
static void log_writes_dtr(struct dm_target *ti)
{
struct log_writes_c *lc = ti->private;
@@ -792,9 +920,46 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
limits->io_min = limits->physical_block_size;
}
+static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ struct log_writes_c *lc = ti->private;
+ sector_t sector = pgoff * PAGE_SECTORS;
+ int ret;
+
+ ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff);
+ if (ret)
+ return ret;
+ return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn);
+}
+
+static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
+ pgoff_t pgoff, void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct log_writes_c *lc = ti->private;
+ sector_t sector = pgoff * PAGE_SECTORS;
+ int err;
+
+ if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
+ return 0;
+
+ /* Don't bother doing anything if logging has been disabled */
+ if (!lc->logging_enabled)
+ goto dax_copy;
+
+ err = log_dax(lc, sector, bytes, i);
+ if (err) {
+ DMWARN("Error %d logging DAX write", err);
+ return 0;
+ }
+dax_copy:
+ return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
+}
+
static struct target_type log_writes_target = {
.name = "log-writes",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = log_writes_ctr,
.dtr = log_writes_dtr,
@@ -805,6 +970,8 @@ static struct target_type log_writes_target = {
.message = log_writes_message,
.iterate_devices = log_writes_iterate_devices,
.io_hints = log_writes_io_hints,
+ .direct_access = log_writes_dax_direct_access,
+ .dax_copy_from_iter = log_writes_dax_copy_from_iter,
};
static int __init dm_log_writes_init(void)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 11f273d2f018..c8faa2b85842 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
pgpath = path_to_pgpath(path);
- if (unlikely(lockless_dereference(m->current_pg) != pg)) {
+ if (unlikely(READ_ONCE(m->current_pg) != pg)) {
/* Only update current_pgpath if pg changed */
spin_lock_irqsave(&m->lock, flags);
m->current_pgpath = pgpath;
@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
}
/* Were we instructed to switch PG? */
- if (lockless_dereference(m->next_pg)) {
+ if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
if (!pg) {
@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
/* Don't change PG until it has no remaining paths */
check_current_pg:
- pg = lockless_dereference(m->current_pg);
+ pg = READ_ONCE(m->current_pg);
if (pg) {
pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath))
@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
struct request *clone;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, nr_bytes);
@@ -499,8 +499,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
bool queue_dying = blk_queue_dying(q);
- DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing",
- PTR_ERR(clone), queue_dying ? " (path offline)" : "");
if (queue_dying) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
@@ -535,7 +533,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
bool queue_io;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
pgpath = choose_pgpath(m, nr_bytes);
@@ -641,14 +639,6 @@ static void process_queued_bios(struct work_struct *work)
blk_finish_plug(&plug);
}
-static void assign_bit(bool value, long nr, unsigned long *addr)
-{
- if (value)
- set_bit(nr, addr);
- else
- clear_bit(nr, addr);
-}
-
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
@@ -658,11 +648,11 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- assign_bit((save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
- (!save_old_value && queue_if_no_path),
- MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
- assign_bit(queue_if_no_path || dm_noflush_suspending(m->ti),
- MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
+ (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
+ (!save_old_value && queue_if_no_path));
+ assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
+ queue_if_no_path || dm_noflush_suspending(m->ti));
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) {
@@ -1588,8 +1578,8 @@ static void multipath_resume(struct dm_target *ti)
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- assign_bit(test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
- MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags,
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1804,7 +1794,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct pgpath *current_pgpath;
int r;
- current_pgpath = lockless_dereference(m->current_pgpath);
+ current_pgpath = READ_ONCE(m->current_pgpath);
if (!current_pgpath)
current_pgpath = choose_pgpath(m, 0);
@@ -1826,7 +1816,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
}
if (r == -ENOTCONN) {
- if (!lockless_dereference(m->current_pg)) {
+ if (!READ_ONCE(m->current_pg)) {
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
@@ -1895,9 +1885,9 @@ static int multipath_busy(struct dm_target *ti)
return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */
- pg = lockless_dereference(m->current_pg);
- next_pg = lockless_dereference(m->next_pg);
- if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
+ pg = READ_ONCE(m->current_pg);
+ next_pg = READ_ONCE(m->next_pg);
+ if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
pg = next_pg;
if (!pg) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2245d06d2045..6319d846e0ad 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -12,7 +12,7 @@
#include "raid1.h"
#include "raid5.h"
#include "raid10.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include <linux/device-mapper.h>
@@ -2143,13 +2143,6 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
struct dm_raid_superblock *refsb;
uint64_t events_sb, events_refsb;
- rdev->sb_start = 0;
- rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
- if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
- DMERR("superblock size of a logical block is no longer valid");
- return -EINVAL;
- }
-
r = read_disk_sb(rdev, rdev->sb_size, false);
if (r)
return r;
@@ -2494,6 +2487,17 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (test_bit(Journal, &rdev->flags))
continue;
+ if (!rdev->meta_bdev)
+ continue;
+
+ /* Set superblock offset/size for metadata device. */
+ rdev->sb_start = 0;
+ rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+ if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
+ DMERR("superblock size of a logical block is no longer valid");
+ return -EINVAL;
+ }
+
/*
* Skipping super_load due to CTR_FLAG_SYNC will cause
* the array to undergo initialization again as
@@ -2506,9 +2510,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
continue;
- if (!rdev->meta_bdev)
- continue;
-
r = super_load(rdev, freshest);
switch (r) {
@@ -2886,9 +2887,6 @@ static void configure_discard_support(struct raid_set *rs)
bool raid456;
struct dm_target *ti = rs->ti;
- /* Assume discards not supported until after checks below. */
- ti->discards_supported = false;
-
/*
* XXX: RAID level 4,5,6 require zeroing for safety.
*/
@@ -2913,9 +2911,6 @@ static void configure_discard_support(struct raid_set *rs)
}
}
- /* All RAID members properly support discards */
- ti->discards_supported = true;
-
/*
* RAID1 and RAID10 personalities require bio splitting,
* RAID0/4/5/6 don't and process large discard bios properly.
@@ -3629,8 +3624,11 @@ static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
+ if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ mddev_lock_nointr(&rs->md);
mddev_suspend(&rs->md);
+ mddev_unlock(&rs->md);
+ }
rs->md.ro = 1;
}
@@ -3887,8 +3885,11 @@ static void raid_resume(struct dm_target *ti)
if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ mddev_lock_nointr(mddev);
mddev_resume(mddev);
+ mddev_unlock(mddev);
+ }
}
static struct target_type raid_target = {
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index c0b82136b2d1..580c49cc8079 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -94,9 +94,9 @@ static void wakeup_mirrord(void *context)
queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
}
-static void delayed_wake_fn(unsigned long data)
+static void delayed_wake_fn(struct timer_list *t)
{
- struct mirror_set *ms = (struct mirror_set *) data;
+ struct mirror_set *ms = from_timer(ms, t, timer);
clear_bit(0, &ms->timer_pending);
wakeup_mirrord(ms);
@@ -108,8 +108,6 @@ static void delayed_wake(struct mirror_set *ms)
return;
ms->timer.expires = jiffies + HZ / 5;
- ms->timer.data = (unsigned long) ms;
- ms->timer.function = delayed_wake_fn;
add_timer(&ms->timer);
}
@@ -1133,7 +1131,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_free_context;
}
INIT_WORK(&ms->kmirrord_work, do_mirror);
- init_timer(&ms->timer);
+ timer_setup(&ms->timer, delayed_wake_fn, 0);
ms->timer_pending = 0;
INIT_WORK(&ms->trigger_event, trigger_event);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index eadfcfd106ff..9d32f25489c2 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -56,7 +56,7 @@ static unsigned dm_get_blk_mq_queue_depth(void)
int dm_request_based(struct mapped_device *md)
{
- return blk_queue_stackable(md->queue);
+ return queue_is_rq_based(md->queue);
}
static void dm_old_start_queue(struct request_queue *q)
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 6028d8247f58..29bc51084c82 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/numa.h>
#include <linux/slab.h>
@@ -431,7 +432,7 @@ do_sync_free:
synchronize_rcu_expedited();
dm_stat_free(&s->rcu_head);
} else {
- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+ WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
call_rcu(&s->rcu_head, dm_stat_free);
}
return 0;
@@ -639,12 +640,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
*/
last = raw_cpu_ptr(stats->last);
stats_aux->merged =
- (bi_sector == (ACCESS_ONCE(last->last_sector) &&
+ (bi_sector == (READ_ONCE(last->last_sector) &&
((bi_rw == WRITE) ==
- (ACCESS_ONCE(last->last_rw) == WRITE))
+ (READ_ONCE(last->last_rw) == WRITE))
));
- ACCESS_ONCE(last->last_sector) = end_sector;
- ACCESS_ONCE(last->last_rw) = bi_rw;
+ WRITE_ONCE(last->last_sector, end_sector);
+ WRITE_ONCE(last->last_rw, bi_rw);
}
rcu_read_lock();
@@ -693,22 +694,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
for_each_possible_cpu(cpu) {
p = &s->stat_percpu[cpu][x];
- shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
- shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
- shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
- shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
- shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
- shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
- shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
- shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
- shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
- shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
- shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
- shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+ shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
+ shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
+ shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
+ shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
+ shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
+ shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
+ shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
+ shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
+ shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
+ shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
+ shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
+ shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
unsigned i;
for (i = 0; i < s->n_histogram_entries + 1; i++)
- shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
+ shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
}
}
}
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
index f1c0956e3843..2ddfae678f32 100644
--- a/drivers/md/dm-stats.h
+++ b/drivers/md/dm-stats.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DM_STATS_H
#define DM_STATS_H
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 4c8de1ff78ca..8d0ba879777e 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
switch_get_position(sctx, region_nr, &region_index, &bit);
- return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) &
+ return (READ_ONCE(sctx->region_table[region_index]) >> bit) &
((1 << sctx->region_table_entry_bits) - 1);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ef7b8f201f73..88130b5d95f9 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -451,15 +451,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
return r;
}
- atomic_set(&dd->count, 0);
+ refcount_set(&dd->count, 1);
list_add(&dd->list, &t->devices);
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
+ refcount_inc(&dd->count);
}
- atomic_inc(&dd->count);
*result = dd->dm_dev;
return 0;
@@ -515,7 +515,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
dm_device_name(ti->table->md), d->name);
return;
}
- if (atomic_dec_and_test(&dd->count)) {
+ if (refcount_dec_and_test(&dd->count)) {
dm_put_table_device(ti->table->md, d);
list_del(&dd->list);
kfree(dd);
@@ -1000,7 +1000,7 @@ verify_rq_based:
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
- if (!blk_queue_stackable(q)) {
+ if (!queue_is_rq_based(q)) {
DMERR("table load rejected: including"
" non-request-stackable devices");
return -EINVAL;
@@ -1758,13 +1758,12 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
return true;
}
-
-static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && blk_queue_discard(q);
+ return q && !blk_queue_discard(q);
}
static bool dm_table_supports_discards(struct dm_table *t)
@@ -1772,28 +1771,24 @@ static bool dm_table_supports_discards(struct dm_table *t)
struct dm_target *ti;
unsigned i;
- /*
- * Unless any target used by the table set discards_supported,
- * require at least one underlying device to support discards.
- * t->devices includes internal dm devices such as mirror logs
- * so we need to use iterate_devices here, which targets
- * supporting discard selectively must provide.
- */
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!ti->num_discard_bios)
- continue;
-
- if (ti->discards_supported)
- return true;
+ return false;
- if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, device_discard_capable, NULL))
- return true;
+ /*
+ * Either the target provides discard support (as implied by setting
+ * 'discards_supported') or it relies on _all_ data devices having
+ * discard support.
+ */
+ if (!ti->discards_supported &&
+ (!ti->type->iterate_devices ||
+ ti->type->iterate_devices(ti, device_not_discard_capable, NULL)))
+ return false;
}
- return false;
+ return true;
}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
@@ -1806,9 +1801,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
q->limits = *limits;
- if (!dm_table_supports_discards(t))
+ if (!dm_table_supports_discards(t)) {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
- else
+ /* Must also clear discard limits... */
+ q->limits.max_discard_sectors = 0;
+ q->limits.max_hw_discard_sectors = 0;
+ q->limits.discard_granularity = 0;
+ q->limits.discard_alignment = 0;
+ q->limits.discard_misaligned = 0;
+ } else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
@@ -1847,19 +1848,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
-
- /*
- * QUEUE_FLAG_STACKABLE must be set after all queue settings are
- * visible to other CPUs because, once the flag is set, incoming bios
- * are processed by request-based dm, which refers to the queue
- * settings.
- * Until the flag set, bios are passed to bio-based dm and queued to
- * md->deferred where queue settings are not needed yet.
- * Those bios are passed to request-based dm at the resume time.
- */
- smp_mb();
- if (dm_table_request_based(t))
- queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1e25705209c2..89e5dff9b4cf 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
struct pool_c *pt = pool->ti->private;
bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
enum pool_mode old_mode = get_pool_mode(pool);
- unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
+ unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
/*
* Never allow the pool to transition to PM_WRITE mode if user
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index bda3caca23ca..aedb8222836b 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -92,74 +92,33 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
return block >> (level * v->hash_per_block_bits);
}
-/*
- * Callback function for asynchrnous crypto API completion notification
- */
-static void verity_op_done(struct crypto_async_request *base, int err)
-{
- struct verity_result *res = (struct verity_result *)base->data;
-
- if (err == -EINPROGRESS)
- return;
-
- res->err = err;
- complete(&res->completion);
-}
-
-/*
- * Wait for async crypto API callback
- */
-static inline int verity_complete_op(struct verity_result *res, int ret)
-{
- switch (ret) {
- case 0:
- break;
-
- case -EINPROGRESS:
- case -EBUSY:
- ret = wait_for_completion_interruptible(&res->completion);
- if (!ret)
- ret = res->err;
- reinit_completion(&res->completion);
- break;
-
- default:
- DMERR("verity_wait_hash: crypto op submission failed: %d", ret);
- }
-
- if (unlikely(ret < 0))
- DMERR("verity_wait_hash: crypto op failed: %d", ret);
-
- return ret;
-}
-
static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len,
- struct verity_result *res)
+ struct crypto_wait *wait)
{
struct scatterlist sg;
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
- return verity_complete_op(res, crypto_ahash_update(req));
+ return crypto_wait_req(crypto_ahash_update(req), wait);
}
/*
* Wrapper for crypto_ahash_init, which handles verity salting.
*/
static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
- struct verity_result *res)
+ struct crypto_wait *wait)
{
int r;
ahash_request_set_tfm(req, v->tfm);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
- verity_op_done, (void *)res);
- init_completion(&res->completion);
+ crypto_req_done, (void *)wait);
+ crypto_init_wait(wait);
- r = verity_complete_op(res, crypto_ahash_init(req));
+ r = crypto_wait_req(crypto_ahash_init(req), wait);
if (unlikely(r < 0)) {
DMERR("crypto_ahash_init failed: %d", r);
@@ -167,18 +126,18 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
}
if (likely(v->salt_size && (v->version >= 1)))
- r = verity_hash_update(v, req, v->salt, v->salt_size, res);
+ r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
return r;
}
static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
- u8 *digest, struct verity_result *res)
+ u8 *digest, struct crypto_wait *wait)
{
int r;
if (unlikely(v->salt_size && (!v->version))) {
- r = verity_hash_update(v, req, v->salt, v->salt_size, res);
+ r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
if (r < 0) {
DMERR("verity_hash_final failed updating salt: %d", r);
@@ -187,7 +146,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
}
ahash_request_set_crypt(req, NULL, digest, 0);
- r = verity_complete_op(res, crypto_ahash_final(req));
+ r = crypto_wait_req(crypto_ahash_final(req), wait);
out:
return r;
}
@@ -196,17 +155,17 @@ int verity_hash(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len, u8 *digest)
{
int r;
- struct verity_result res;
+ struct crypto_wait wait;
- r = verity_hash_init(v, req, &res);
+ r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
goto out;
- r = verity_hash_update(v, req, data, len, &res);
+ r = verity_hash_update(v, req, data, len, &wait);
if (unlikely(r < 0))
goto out;
- r = verity_hash_final(v, req, digest, &res);
+ r = verity_hash_final(v, req, digest, &wait);
out:
return r;
@@ -389,7 +348,7 @@ out:
* Calculates the digest for the given bio
*/
int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
- struct bvec_iter *iter, struct verity_result *res)
+ struct bvec_iter *iter, struct crypto_wait *wait)
{
unsigned int todo = 1 << v->data_dev_block_bits;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
@@ -414,7 +373,7 @@ int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
*/
sg_set_page(&sg, bv.bv_page, len, bv.bv_offset);
ahash_request_set_crypt(req, &sg, NULL, len);
- r = verity_complete_op(res, crypto_ahash_update(req));
+ r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r < 0)) {
DMERR("verity_for_io_block crypto op failed: %d", r);
@@ -482,7 +441,7 @@ static int verity_verify_io(struct dm_verity_io *io)
struct dm_verity *v = io->v;
struct bvec_iter start;
unsigned b;
- struct verity_result res;
+ struct crypto_wait wait;
for (b = 0; b < io->n_blocks; b++) {
int r;
@@ -507,17 +466,17 @@ static int verity_verify_io(struct dm_verity_io *io)
continue;
}
- r = verity_hash_init(v, req, &res);
+ r = verity_hash_init(v, req, &wait);
if (unlikely(r < 0))
return r;
start = io->iter;
- r = verity_for_io_block(v, io, &io->iter, &res);
+ r = verity_for_io_block(v, io, &io->iter, &wait);
if (unlikely(r < 0))
return r;
r = verity_hash_final(v, req, verity_io_real_digest(v, io),
- &res);
+ &wait);
if (unlikely(r < 0))
return r;
@@ -589,7 +548,7 @@ static void verity_prefetch_io(struct work_struct *work)
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
- unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
+ unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index a59e0ada6fd3..b675bc015512 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -90,11 +90,6 @@ struct dm_verity_io {
*/
};
-struct verity_result {
- struct completion completion;
- int err;
-};
-
static inline struct ahash_request *verity_io_hash_req(struct dm_verity *v,
struct dm_verity_io *io)
{
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index b87c1741da4b..6d7bda6f8190 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -660,6 +660,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
struct dmz_target *dmz = ti->private;
struct request_queue *q;
struct dmz_dev *dev;
+ sector_t aligned_capacity;
int ret;
/* Get the target device */
@@ -685,15 +686,17 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
goto err;
}
+ q = bdev_get_queue(dev->bdev);
dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- if (ti->begin || (ti->len != dev->capacity)) {
+ aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
+ if (ti->begin ||
+ ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
ti->error = "Partial mapping not supported";
ret = -EINVAL;
goto err;
}
- q = bdev_get_queue(dev->bdev);
- dev->zone_nr_sectors = q->limits.chunk_sectors;
+ dev->zone_nr_sectors = blk_queue_zone_sectors(q);
dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
@@ -929,8 +932,10 @@ static int dmz_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct dmz_target *dmz = ti->private;
+ struct dmz_dev *dev = dmz->dev;
+ sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
- return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data);
+ return fn(ti, dmz->ddev, 0, capacity, data);
}
static struct target_type dmz_type = {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4be85324f44d..de17b7193299 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/pr.h>
+#include <linux/refcount.h>
#define DM_MSG_PREFIX "core"
@@ -98,7 +99,7 @@ struct dm_md_mempools {
struct table_device {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct dm_dev dm_dev;
};
@@ -114,7 +115,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
- int param = ACCESS_ONCE(*module_param);
+ int param = READ_ONCE(*module_param);
int modified_param = 0;
bool modified = true;
@@ -136,7 +137,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
- unsigned param = ACCESS_ONCE(*module_param);
+ unsigned param = READ_ONCE(*module_param);
unsigned modified_param = 0;
if (!param)
@@ -685,10 +686,11 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
format_dev_t(td->dm_dev.name, dev);
- atomic_set(&td->count, 0);
+ refcount_set(&td->count, 1);
list_add(&td->list, &md->table_devices);
+ } else {
+ refcount_inc(&td->count);
}
- atomic_inc(&td->count);
mutex_unlock(&md->table_devices_lock);
*result = &td->dm_dev;
@@ -701,7 +703,7 @@ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
struct table_device *td = container_of(d, struct table_device, dm_dev);
mutex_lock(&md->table_devices_lock);
- if (atomic_dec_and_test(&td->count)) {
+ if (refcount_dec_and_test(&td->count)) {
close_table_device(td, md);
list_del(&td->list);
kfree(td);
@@ -718,7 +720,7 @@ static void free_table_devices(struct list_head *devices)
struct table_device *td = list_entry(tmp, struct table_device, list);
DMWARN("dm_destroy: %s still exists with %d references",
- td->dm_dev.name, atomic_read(&td->count));
+ td->dm_dev.name, refcount_read(&td->count));
kfree(td);
}
}
@@ -1619,17 +1621,6 @@ static void dm_wq_work(struct work_struct *work);
void dm_init_md_queue(struct mapped_device *md)
{
/*
- * Request-based dm devices cannot be stacked on top of bio-based dm
- * devices. The type of this dm device may not have been decided yet.
- * The type is decided at the first table loading time.
- * To prevent problematic device stacking, clear the queue flag
- * for request stacking support until then.
- *
- * This queue is new, so no concurrency on the queue_flags.
- */
- queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
-
- /*
* Initialize data that will only be used by a non-blk-mq DM queue
* - must do so here (in alloc_dev callchain) before queue is used
*/
@@ -1695,7 +1686,7 @@ static struct mapped_device *alloc_dev(int minor)
struct mapped_device *md;
void *old_md;
- md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
+ md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) {
DMWARN("unable to allocate device, out of memory.");
return NULL;
@@ -1795,7 +1786,7 @@ bad_io_barrier:
bad_minor:
module_put(THIS_MODULE);
bad_module_get:
- kfree(md);
+ kvfree(md);
return NULL;
}
@@ -1814,7 +1805,7 @@ static void free_dev(struct mapped_device *md)
free_minor(minor);
module_put(THIS_MODULE);
- kfree(md);
+ kvfree(md);
}
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
@@ -2072,17 +2063,12 @@ struct mapped_device *dm_get_md(dev_t dev)
spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
- if (md) {
- if ((md == MINOR_ALLOCED ||
- (MINOR(disk_devt(dm_disk(md))) != minor) ||
- dm_deleting_md(md) ||
- test_bit(DMF_FREEING, &md->flags))) {
- md = NULL;
- goto out;
- }
- dm_get(md);
+ if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
+ test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+ md = NULL;
+ goto out;
}
-
+ dm_get(md);
out:
spin_unlock(&_minor_lock);
@@ -2709,11 +2695,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
- if (test_bit(DMF_FREEING, &md->flags) ||
- dm_deleting_md(md))
- return NULL;
-
+ spin_lock(&_minor_lock);
+ if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
+ md = NULL;
+ goto out;
+ }
dm_get(md);
+out:
+ spin_unlock(&_minor_lock);
+
return md;
}
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 38c84c0a35d4..36399bb875dd 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -19,6 +19,7 @@
#include <linux/hdreg.h>
#include <linux/completion.h>
#include <linux/kobject.h>
+#include <linux/refcount.h>
#include "dm-stats.h"
@@ -38,7 +39,7 @@
*/
struct dm_dev_internal {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct dm_dev *dm_dev;
};
diff --git a/drivers/md/bitmap.c b/drivers/md/md-bitmap.c
index d2121637b4ab..239c7bb3929b 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -29,7 +29,7 @@
#include <linux/seq_file.h>
#include <trace/events/block.h>
#include "md.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
static inline char *bmname(struct bitmap *bitmap)
{
@@ -368,7 +368,7 @@ static int read_page(struct file *file, unsigned long index,
pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
(unsigned long long)index << PAGE_SHIFT);
- bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
+ bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
if (!bh) {
ret = -ENOMEM;
goto out;
@@ -459,7 +459,11 @@ void bitmap_update_sb(struct bitmap *bitmap)
/* rocking back to read-only */
bitmap->events_cleared = bitmap->mddev->events;
sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
- sb->state = cpu_to_le32(bitmap->flags);
+ /*
+ * clear BITMAP_WRITE_ERROR bit to protect against the case that
+ * a bitmap write error occurred but the later writes succeeded.
+ */
+ sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
/* Just in case these have been changed via sysfs: */
sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
@@ -625,7 +629,7 @@ re_read:
err = read_sb_page(bitmap->mddev,
offset,
sb_page,
- 0, PAGE_SIZE);
+ 0, sizeof(bitmap_super_t));
}
if (err)
return err;
@@ -1816,6 +1820,12 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
BUG_ON(file && mddev->bitmap_info.offset);
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+ pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
+ mdname(mddev));
+ return ERR_PTR(-EBUSY);
+ }
+
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
return ERR_PTR(-ENOMEM);
@@ -2123,7 +2133,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (store.sb_page && bitmap->storage.sb_page)
memcpy(page_address(store.sb_page),
page_address(bitmap->storage.sb_page),
- PAGE_SIZE);
+ sizeof(bitmap_super_t));
bitmap_file_unmap(&bitmap->storage);
bitmap->storage = store;
@@ -2152,6 +2162,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
for (k = 0; k < page; k++) {
kfree(new_bp[k].map);
}
+ kfree(new_bp);
/* restore some fields from old_counts */
bitmap->counts.bp = old_counts.bp;
@@ -2202,6 +2213,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
block += old_blocks;
}
+ if (bitmap->counts.bp != old_counts.bp) {
+ unsigned long k;
+ for (k = 0; k < old_counts.pages; k++)
+ if (!old_counts.bp[k].hijacked)
+ kfree(old_counts.bp[k].map);
+ kfree(old_counts.bp);
+ }
+
if (!init) {
int i;
while (block < (chunks << chunkshift)) {
diff --git a/drivers/md/bitmap.h b/drivers/md/md-bitmap.h
index d15721ac07a6..5df35ca90f58 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
*
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 03082e17c65c..79bfbc840385 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -15,7 +15,7 @@
#include <linux/sched.h>
#include <linux/raid/md_p.h>
#include "md.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include "md-cluster.h"
#define LVB_SIZE 64
@@ -442,10 +442,11 @@ static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
static void remove_suspend_info(struct mddev *mddev, int slot)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
+ mddev->pers->quiesce(mddev, 1);
spin_lock_irq(&cinfo->suspend_lock);
__remove_suspend_info(cinfo, slot);
spin_unlock_irq(&cinfo->suspend_lock);
- mddev->pers->quiesce(mddev, 2);
+ mddev->pers->quiesce(mddev, 0);
}
@@ -492,13 +493,12 @@ static void process_suspend_info(struct mddev *mddev,
s->lo = lo;
s->hi = hi;
mddev->pers->quiesce(mddev, 1);
- mddev->pers->quiesce(mddev, 0);
spin_lock_irq(&cinfo->suspend_lock);
/* Remove existing entry (if exists) before adding */
__remove_suspend_info(cinfo, slot);
list_add(&s->list, &cinfo->suspend_list);
spin_unlock_irq(&cinfo->suspend_lock);
- mddev->pers->quiesce(mddev, 2);
+ mddev->pers->quiesce(mddev, 0);
}
static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
@@ -1094,7 +1094,7 @@ static void metadata_update_cancel(struct mddev *mddev)
/*
* return 0 if all the bitmaps have the same sync_size
*/
-int cluster_check_sync_size(struct mddev *mddev)
+static int cluster_check_sync_size(struct mddev *mddev)
{
int i, rv;
bitmap_super_t *sb;
@@ -1478,7 +1478,7 @@ static struct md_cluster_operations cluster_ops = {
static int __init cluster_init(void)
{
- pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n");
+ pr_warn("md-cluster: support raid1 and raid10 (limited support)\n");
pr_info("Registering Cluster MD functions\n");
register_md_cluster_operations(&cluster_ops, THIS_MODULE);
return 0;
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 274016177983..c0240708f443 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MD_CLUSTER_H
diff --git a/drivers/md/faulty.c b/drivers/md/md-faulty.c
index 38264b38420f..38264b38420f 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/md-faulty.c
diff --git a/drivers/md/linear.c b/drivers/md/md-linear.c
index c464fb48039a..773fc70dced7 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/md-linear.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <trace/events/block.h>
#include "md.h"
-#include "linear.h"
+#include "md-linear.h"
/*
* find which device holds a particular offset
diff --git a/drivers/md/linear.h b/drivers/md/md-linear.h
index 8d392e6098b3..8381d651d4ed 100644
--- a/drivers/md/linear.h
+++ b/drivers/md/md-linear.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINEAR_H
#define _LINEAR_H
diff --git a/drivers/md/multipath.c b/drivers/md/md-multipath.c
index b68e0666b9b0..e40065bdbfc8 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/md-multipath.c
@@ -25,7 +25,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "md.h"
-#include "multipath.h"
+#include "md-multipath.h"
#define MAX_WORK_PER_DISK 128
@@ -243,7 +243,6 @@ static void print_multipath_conf (struct mpconf *conf)
static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
- struct request_queue *q;
int err = -EEXIST;
int path;
struct multipath_info *p;
@@ -257,7 +256,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
for (path = first; path <= last; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
- q = rdev->bdev->bd_disk->queue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/drivers/md/multipath.h b/drivers/md/md-multipath.h
index 717c60f62898..0adb941f485a 100644
--- a/drivers/md/multipath.h
+++ b/drivers/md/md-multipath.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MULTIPATH_H
#define _MULTIPATH_H
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0ff1bbf6c90e..41c050b59ec4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -69,7 +69,7 @@
#include <trace/events/block.h>
#include "md.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include "md-cluster.h"
#ifndef MODULE
@@ -266,16 +266,31 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
* call has finished, the bio has been linked into some internal structure
* and so is visible to ->quiesce(), so we don't need the refcount any more.
*/
+static bool is_suspended(struct mddev *mddev, struct bio *bio)
+{
+ if (mddev->suspended)
+ return true;
+ if (bio_data_dir(bio) != WRITE)
+ return false;
+ if (mddev->suspend_lo >= mddev->suspend_hi)
+ return false;
+ if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
+ return false;
+ if (bio_end_sector(bio) < mddev->suspend_lo)
+ return false;
+ return true;
+}
+
void md_handle_request(struct mddev *mddev, struct bio *bio)
{
check_suspended:
rcu_read_lock();
- if (mddev->suspended) {
+ if (is_suspended(mddev, bio)) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
- if (!mddev->suspended)
+ if (!is_suspended(mddev, bio))
break;
rcu_read_unlock();
schedule();
@@ -344,12 +359,17 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
void mddev_suspend(struct mddev *mddev)
{
WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
+ lockdep_assert_held(&mddev->reconfig_mutex);
if (mddev->suspended++)
return;
synchronize_rcu();
wake_up(&mddev->sb_wait);
+ set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ smp_mb__after_atomic();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
+ clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
+ wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
del_timer_sync(&mddev->safemode_timer);
}
@@ -357,6 +377,7 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
+ lockdep_assert_held(&mddev->reconfig_mutex);
if (--mddev->suspended)
return;
wake_up(&mddev->sb_wait);
@@ -520,7 +541,7 @@ static void mddev_put(struct mddev *mddev)
bioset_free(sync_bs);
}
-static void md_safemode_timeout(unsigned long data);
+static void md_safemode_timeout(struct timer_list *t);
void mddev_init(struct mddev *mddev)
{
@@ -529,8 +550,7 @@ void mddev_init(struct mddev *mddev)
mutex_init(&mddev->bitmap_info.mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
- setup_timer(&mddev->safemode_timer, md_safemode_timeout,
- (unsigned long) mddev);
+ timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
@@ -663,6 +683,7 @@ void mddev_unlock(struct mddev *mddev)
*/
spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
+ wake_up(&mddev->sb_wait);
spin_unlock(&pers_lock);
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -2313,7 +2334,7 @@ static void export_array(struct mddev *mddev)
static bool set_in_sync(struct mddev *mddev)
{
- WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
+ lockdep_assert_held(&mddev->lock);
if (!mddev->in_sync) {
mddev->sync_checkers++;
spin_unlock(&mddev->lock);
@@ -2432,10 +2453,18 @@ repeat:
}
}
- /* First make sure individual recovery_offsets are correct */
+ /*
+ * First make sure individual recovery_offsets are correct
+ * curr_resync_completed can only be used during recovery.
+ * During reshape/resync it might use array-addresses rather
+ * that device addresses.
+ */
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
mddev->delta_disks >= 0 &&
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
+ test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
!test_bit(Journal, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
mddev->curr_resync_completed > rdev->recovery_offset)
@@ -2651,7 +2680,7 @@ state_show(struct md_rdev *rdev, char *page)
{
char *sep = ",";
size_t len = 0;
- unsigned long flags = ACCESS_ONCE(rdev->flags);
+ unsigned long flags = READ_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
(!test_bit(ExternalBbl, &flags) &&
@@ -4824,7 +4853,7 @@ suspend_lo_show(struct mddev *mddev, char *page)
static ssize_t
suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
{
- unsigned long long old, new;
+ unsigned long long new;
int err;
err = kstrtoull(buf, 10, &new);
@@ -4840,16 +4869,10 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
goto unlock;
- old = mddev->suspend_lo;
+ mddev_suspend(mddev);
mddev->suspend_lo = new;
- if (new >= old)
- /* Shrinking suspended region */
- mddev->pers->quiesce(mddev, 2);
- else {
- /* Expanding suspended region - need to wait */
- mddev->pers->quiesce(mddev, 1);
- mddev->pers->quiesce(mddev, 0);
- }
+ mddev_resume(mddev);
+
err = 0;
unlock:
mddev_unlock(mddev);
@@ -4867,7 +4890,7 @@ suspend_hi_show(struct mddev *mddev, char *page)
static ssize_t
suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
{
- unsigned long long old, new;
+ unsigned long long new;
int err;
err = kstrtoull(buf, 10, &new);
@@ -4880,19 +4903,13 @@ suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
err = -EINVAL;
- if (mddev->pers == NULL ||
- mddev->pers->quiesce == NULL)
+ if (mddev->pers == NULL)
goto unlock;
- old = mddev->suspend_hi;
+
+ mddev_suspend(mddev);
mddev->suspend_hi = new;
- if (new <= old)
- /* Shrinking suspended region */
- mddev->pers->quiesce(mddev, 2);
- else {
- /* Expanding suspended region - need to wait */
- mddev->pers->quiesce(mddev, 1);
- mddev->pers->quiesce(mddev, 0);
- }
+ mddev_resume(mddev);
+
err = 0;
unlock:
mddev_unlock(mddev);
@@ -5357,7 +5374,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data)
return NULL;
}
-static int add_named_array(const char *val, struct kernel_param *kp)
+static int add_named_array(const char *val, const struct kernel_param *kp)
{
/*
* val must be "md_*" or "mdNNN".
@@ -5386,9 +5403,9 @@ static int add_named_array(const char *val, struct kernel_param *kp)
return -EINVAL;
}
-static void md_safemode_timeout(unsigned long data)
+static void md_safemode_timeout(struct timer_list *t)
{
- struct mddev *mddev = (struct mddev *) data;
+ struct mddev *mddev = from_timer(mddev, t, safemode_timer);
mddev->safemode = 1;
if (mddev->external)
@@ -5834,8 +5851,14 @@ void md_stop(struct mddev *mddev)
* This is called from dm-raid
*/
__md_stop(mddev);
- if (mddev->bio_set)
+ if (mddev->bio_set) {
bioset_free(mddev->bio_set);
+ mddev->bio_set = NULL;
+ }
+ if (mddev->sync_set) {
+ bioset_free(mddev->sync_set);
+ mddev->sync_set = NULL;
+ }
}
EXPORT_SYMBOL_GPL(md_stop);
@@ -6362,7 +6385,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
break;
}
}
- if (has_journal) {
+ if (has_journal || mddev->bitmap) {
export_rdev(rdev);
return -EBUSY;
}
@@ -6618,22 +6641,26 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
return -ENOENT; /* cannot remove what isn't there */
err = 0;
if (mddev->pers) {
- mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
struct bitmap *bitmap;
bitmap = bitmap_create(mddev, -1);
+ mddev_suspend(mddev);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
err = bitmap_load(mddev);
} else
err = PTR_ERR(bitmap);
- }
- if (fd < 0 || err) {
+ if (err) {
+ bitmap_destroy(mddev);
+ fd = -1;
+ }
+ mddev_resume(mddev);
+ } else if (fd < 0) {
+ mddev_suspend(mddev);
bitmap_destroy(mddev);
- fd = -1; /* make sure to put the file */
+ mddev_resume(mddev);
}
- mddev->pers->quiesce(mddev, 0);
}
if (fd < 0) {
struct file *f = mddev->bitmap_info.file;
@@ -6735,7 +6762,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
{
- WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
+ lockdep_assert_held(&mddev->reconfig_mutex);
if (mddev->external_size)
return;
@@ -6917,8 +6944,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
- mddev->pers->quiesce(mddev, 1);
bitmap = bitmap_create(mddev, -1);
+ mddev_suspend(mddev);
if (!IS_ERR(bitmap)) {
mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
@@ -6926,7 +6953,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
rv = PTR_ERR(bitmap);
if (rv)
bitmap_destroy(mddev);
- mddev->pers->quiesce(mddev, 0);
+ mddev_resume(mddev);
} else {
/* remove the bitmap */
if (!mddev->bitmap) {
@@ -6949,9 +6976,9 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.nodes = 0;
md_cluster_ops->leave(mddev);
}
- mddev->pers->quiesce(mddev, 1);
+ mddev_suspend(mddev);
bitmap_destroy(mddev);
- mddev->pers->quiesce(mddev, 0);
+ mddev_resume(mddev);
mddev->bitmap_info.offset = 0;
}
}
@@ -7468,8 +7495,8 @@ void md_wakeup_thread(struct md_thread *thread)
{
if (thread) {
pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
- if (!test_and_set_bit(THREAD_WAKEUP, &thread->flags))
- wake_up(&thread->wqueue);
+ set_bit(THREAD_WAKEUP, &thread->flags);
+ wake_up(&thread->wqueue);
}
}
EXPORT_SYMBOL(md_wakeup_thread);
@@ -8039,7 +8066,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended);
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
+ mddev->suspended);
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
percpu_ref_put(&mddev->writes_pending);
return false;
@@ -8110,7 +8138,6 @@ void md_allow_write(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
/* wait for the dirty state to be recorded in the metadata */
wait_event(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags) &&
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
} else
spin_unlock(&mddev->lock);
@@ -8477,16 +8504,19 @@ void md_do_sync(struct md_thread *thread)
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
- rcu_read_lock();
- rdev_for_each_rcu(rdev, mddev)
- if (rdev->raid_disk >= 0 &&
- mddev->delta_disks >= 0 &&
- !test_bit(Journal, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < mddev->curr_resync)
- rdev->recovery_offset = mddev->curr_resync;
- rcu_read_unlock();
+ if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ mddev->delta_disks >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < mddev->curr_resync)
+ rdev->recovery_offset = mddev->curr_resync;
+ rcu_read_unlock();
+ }
}
}
skip:
@@ -8813,6 +8843,16 @@ void md_check_recovery(struct mddev *mddev)
unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
+ } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
+ /* Write superblock - thread that called mddev_suspend()
+ * holds reconfig_mutex for us.
+ */
+ set_bit(MD_UPDATING_SB, &mddev->flags);
+ smp_mb__after_atomic();
+ if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
+ md_update_sb(mddev, 0);
+ clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
+ wake_up(&mddev->sb_wait);
}
}
EXPORT_SYMBOL(md_check_recovery);
@@ -9274,11 +9314,11 @@ static __exit void md_exit(void)
subsys_initcall(md_init);
module_exit(md_exit)
-static int get_ro(char *buffer, struct kernel_param *kp)
+static int get_ro(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "%d", start_readonly);
}
-static int set_ro(const char *val, struct kernel_param *kp)
+static int set_ro(const char *val, const struct kernel_param *kp)
{
return kstrtouint(val, 10, (unsigned int *)&start_readonly);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d8287d3cd1bf..7d6bcf0eba0c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -237,6 +237,12 @@ enum mddev_flags {
*/
MD_HAS_PPL, /* The raid array has PPL feature set */
MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */
+ MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update
+ * the metadata without taking reconfig_mutex.
+ */
+ MD_UPDATING_SB, /* md_check_recovery is updating the metadata
+ * without explicitly holding reconfig_mutex.
+ */
};
enum mddev_sb_flags {
@@ -494,11 +500,6 @@ static inline void mddev_lock_nointr(struct mddev *mddev)
mutex_lock(&mddev->reconfig_mutex);
}
-static inline int mddev_is_locked(struct mddev *mddev)
-{
- return mutex_is_locked(&mddev->reconfig_mutex);
-}
-
static inline int mddev_trylock(struct mddev *mddev)
{
return mutex_trylock(&mddev->reconfig_mutex);
@@ -538,12 +539,11 @@ struct md_personality
int (*check_reshape) (struct mddev *mddev);
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
- /* quiesce moves between quiescence states
- * 0 - fully active
- * 1 - no new requests allowed
- * others - reserved
+ /* quiesce suspends or resumes internal processing.
+ * 1 - stop new actions and wait for action io to complete
+ * 0 - return to normal behaviour
*/
- void (*quiesce) (struct mddev *mddev, int state);
+ void (*quiesce) (struct mddev *mddev, int quiesce);
/* takeover is used to transition an array from one
* personality to another. The new personality must be able
* to handle the data in the current layout.
diff --git a/drivers/md/persistent-data/Makefile b/drivers/md/persistent-data/Makefile
index ff528792c358..66be7c66479a 100644
--- a/drivers/md/persistent-data/Makefile
+++ b/drivers/md/persistent-data/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DM_PERSISTENT_DATA) += dm-persistent-data.o
dm-persistent-data-objs := \
dm-array.o \
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 4aed69d9dd17..aec449243966 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
+#include <linux/kernel.h>
#define DM_MSG_PREFIX "space map metadata"
@@ -111,7 +112,7 @@ static bool brb_empty(struct bop_ring_buffer *brb)
static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
{
unsigned r = old + 1;
- return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
+ return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
}
static int brb_push(struct bop_ring_buffer *brb,
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 5a00fc118470..5ecba9eef441 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -768,7 +768,7 @@ static void *raid0_takeover(struct mddev *mddev)
return ERR_PTR(-EINVAL);
}
-static void raid0_quiesce(struct mddev *mddev, int state)
+static void raid0_quiesce(struct mddev *mddev, int quiesce)
{
}
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 7127a623f5da..540e65d92642 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID0_H
#define _RAID0_H
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 9f2670b45f31..400001b815db 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Maximum size of each resync request */
#define RESYNC_BLOCK_SIZE (64*1024)
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f3f3e40dc9d8..cc9d337a1ed3 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -37,13 +37,12 @@
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
-#include <linux/sched/signal.h>
#include <trace/events/block.h>
#include "md.h"
#include "raid1.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#define UNSUPPORTED_MDDEV_FLAGS \
((1L << MD_HAS_JOURNAL) | \
@@ -990,14 +989,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
_wait_barrier(conf, idx);
}
-static void wait_all_barriers(struct r1conf *conf)
-{
- int idx;
-
- for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
- _wait_barrier(conf, idx);
-}
-
static void _allow_barrier(struct r1conf *conf, int idx)
{
atomic_dec(&conf->nr_pending[idx]);
@@ -1011,14 +1002,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
_allow_barrier(conf, idx);
}
-static void allow_all_barriers(struct r1conf *conf)
-{
- int idx;
-
- for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
- _allow_barrier(conf, idx);
-}
-
/* conf->resync_lock should be held */
static int get_unqueued_pending(struct r1conf *conf)
{
@@ -1303,42 +1286,28 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int first_clone;
int max_sectors;
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
-
-
- if ((bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_iter.bi_sector < mddev->suspend_hi) ||
- (mddev_is_clustered(mddev) &&
+ if (mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
+ bio->bi_iter.bi_sector, bio_end_sector(bio))) {
- /*
- * As the suspend_* range is controlled by userspace, we want
- * an interruptible wait.
- */
DEFINE_WAIT(w);
for (;;) {
- sigset_t full, old;
prepare_to_wait(&conf->wait_barrier,
- &w, TASK_INTERRUPTIBLE);
- if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_iter.bi_sector >= mddev->suspend_hi ||
- (mddev_is_clustered(mddev) &&
- !md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector,
- bio_end_sector(bio))))
+ &w, TASK_IDLE);
+ if (!md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio)))
break;
- sigfillset(&full);
- sigprocmask(SIG_BLOCK, &full, &old);
schedule();
- sigprocmask(SIG_SETMASK, &old, NULL);
}
finish_wait(&conf->wait_barrier, &w);
}
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
wait_barrier(conf, bio->bi_iter.bi_sector);
r1_bio = alloc_r1bio(mddev, bio);
@@ -1654,8 +1623,12 @@ static void print_conf(struct r1conf *conf)
static void close_sync(struct r1conf *conf)
{
- wait_all_barriers(conf);
- allow_all_barriers(conf);
+ int idx;
+
+ for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
+ _wait_barrier(conf, idx);
+ _allow_barrier(conf, idx);
+ }
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
@@ -3277,21 +3250,14 @@ static int raid1_reshape(struct mddev *mddev)
return 0;
}
-static void raid1_quiesce(struct mddev *mddev, int state)
+static void raid1_quiesce(struct mddev *mddev, int quiesce)
{
struct r1conf *conf = mddev->private;
- switch(state) {
- case 2: /* wake for suspend */
- wake_up(&conf->wait_barrier);
- break;
- case 1:
+ if (quiesce)
freeze_array(conf, 0);
- break;
- case 0:
+ else
unfreeze_array(conf);
- break;
- }
}
static void *raid1_takeover(struct mddev *mddev)
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c8894ef1e9d2..c7294e7557e0 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID1_H
#define _RAID1_H
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 374df5796649..b9edbc747a95 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -29,7 +29,7 @@
#include "md.h"
#include "raid10.h"
#include "raid0.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
/*
* RAID10 provides a combination of RAID0 and RAID1 functionality.
@@ -136,10 +136,13 @@ static void r10bio_pool_free(void *r10_bio, void *data)
kfree(r10_bio);
}
+#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
/* amount of memory to reserve for resync requests */
#define RESYNC_WINDOW (1024*1024)
/* maximum number of concurrent requests, memory permitting */
#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
+#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
+#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
/*
* When performing a resync, we need to read and compare, so
@@ -383,12 +386,11 @@ static void raid10_end_read_request(struct bio *bio)
{
int uptodate = !bio->bi_status;
struct r10bio *r10_bio = bio->bi_private;
- int slot, dev;
+ int slot;
struct md_rdev *rdev;
struct r10conf *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot;
- dev = r10_bio->devs[slot].devnum;
rdev = r10_bio->devs[slot].rdev;
/*
* this branch is our 'one mirror IO has finished' event handler:
@@ -748,7 +750,6 @@ static struct md_rdev *read_balance(struct r10conf *conf,
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
- sectors = r10_bio->sectors;
best_slot = -1;
best_rdev = NULL;
best_dist = MaxSector;
@@ -761,8 +762,11 @@ static struct md_rdev *read_balance(struct r10conf *conf,
* the resync window. We take the first readable disk when
* above the resync window.
*/
- if (conf->mddev->recovery_cp < MaxSector
- && (this_sector + sectors >= conf->next_resync))
+ if ((conf->mddev->recovery_cp < MaxSector
+ && (this_sector + sectors >= conf->next_resync)) ||
+ (mddev_is_clustered(conf->mddev) &&
+ md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
+ this_sector + sectors)))
do_balance = 0;
for (slot = 0; slot < conf->copies ; slot++) {
@@ -1293,6 +1297,22 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
sector_t sectors;
int max_sectors;
+ if ((mddev_is_clustered(mddev) &&
+ md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio)))) {
+ DEFINE_WAIT(w);
+ for (;;) {
+ prepare_to_wait(&conf->wait_barrier,
+ &w, TASK_IDLE);
+ if (!md_cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector, bio_end_sector(bio)))
+ break;
+ schedule();
+ }
+ finish_wait(&conf->wait_barrier, &w);
+ }
+
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
@@ -2575,7 +2595,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
struct bio *bio;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev = r10_bio->devs[slot].rdev;
- sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it.
@@ -2586,7 +2605,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
* frozen.
*/
bio = r10_bio->devs[slot].bio;
- bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio);
r10_bio->devs[slot].bio = NULL;
@@ -2826,6 +2844,43 @@ static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
}
/*
+ * Set cluster_sync_high since we need other nodes to add the
+ * range [cluster_sync_low, cluster_sync_high] to suspend list.
+ */
+static void raid10_set_cluster_sync_high(struct r10conf *conf)
+{
+ sector_t window_size;
+ int extra_chunk, chunks;
+
+ /*
+ * First, here we define "stripe" as a unit which across
+ * all member devices one time, so we get chunks by use
+ * raid_disks / near_copies. Otherwise, if near_copies is
+ * close to raid_disks, then resync window could increases
+ * linearly with the increase of raid_disks, which means
+ * we will suspend a really large IO window while it is not
+ * necessary. If raid_disks is not divisible by near_copies,
+ * an extra chunk is needed to ensure the whole "stripe" is
+ * covered.
+ */
+
+ chunks = conf->geo.raid_disks / conf->geo.near_copies;
+ if (conf->geo.raid_disks % conf->geo.near_copies == 0)
+ extra_chunk = 0;
+ else
+ extra_chunk = 1;
+ window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
+
+ /*
+ * At least use a 32M window to align with raid1's resync window
+ */
+ window_size = (CLUSTER_RESYNC_WINDOW_SECTORS > window_size) ?
+ CLUSTER_RESYNC_WINDOW_SECTORS : window_size;
+
+ conf->cluster_sync_high = conf->cluster_sync_low + window_size;
+}
+
+/*
* perform a "sync" on one "block"
*
* We need to make sure that no normal I/O request - particularly write
@@ -2897,6 +2952,9 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
max_sector = mddev->resync_max_sectors;
if (sector_nr >= max_sector) {
+ conf->cluster_sync_low = 0;
+ conf->cluster_sync_high = 0;
+
/* If we aborted, we need to abort the
* sync on the 'current' bitmap chucks (there can
* be several when recovering multiple devices).
@@ -3251,7 +3309,17 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* resync. Schedule a read for every block at this virt offset */
int count = 0;
- bitmap_cond_end_sync(mddev->bitmap, sector_nr, 0);
+ /*
+ * Since curr_resync_completed could probably not update in
+ * time, and we will set cluster_sync_low based on it.
+ * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
+ * safety reason, which ensures curr_resync_completed is
+ * updated in bitmap_cond_end_sync.
+ */
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+ mddev_is_clustered(mddev) &&
+ (sector_nr + 2 * RESYNC_SECTORS >
+ conf->cluster_sync_high));
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
&sync_blocks, mddev->degraded) &&
@@ -3385,6 +3453,52 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
} while (++page_idx < RESYNC_PAGES);
r10_bio->sectors = nr_sectors;
+ if (mddev_is_clustered(mddev) &&
+ test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ /* It is resync not recovery */
+ if (conf->cluster_sync_high < sector_nr + nr_sectors) {
+ conf->cluster_sync_low = mddev->curr_resync_completed;
+ raid10_set_cluster_sync_high(conf);
+ /* Send resync message */
+ md_cluster_ops->resync_info_update(mddev,
+ conf->cluster_sync_low,
+ conf->cluster_sync_high);
+ }
+ } else if (mddev_is_clustered(mddev)) {
+ /* This is recovery not resync */
+ sector_t sect_va1, sect_va2;
+ bool broadcast_msg = false;
+
+ for (i = 0; i < conf->geo.raid_disks; i++) {
+ /*
+ * sector_nr is a device address for recovery, so we
+ * need translate it to array address before compare
+ * with cluster_sync_high.
+ */
+ sect_va1 = raid10_find_virt(conf, sector_nr, i);
+
+ if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
+ broadcast_msg = true;
+ /*
+ * curr_resync_completed is similar as
+ * sector_nr, so make the translation too.
+ */
+ sect_va2 = raid10_find_virt(conf,
+ mddev->curr_resync_completed, i);
+
+ if (conf->cluster_sync_low == 0 ||
+ conf->cluster_sync_low > sect_va2)
+ conf->cluster_sync_low = sect_va2;
+ }
+ }
+ if (broadcast_msg) {
+ raid10_set_cluster_sync_high(conf);
+ md_cluster_ops->resync_info_update(mddev,
+ conf->cluster_sync_low,
+ conf->cluster_sync_high);
+ }
+ }
+
while (biolist) {
bio = biolist;
biolist = biolist->bi_next;
@@ -3644,6 +3758,18 @@ static int raid10_run(struct mddev *mddev)
if (!conf)
goto out;
+ if (mddev_is_clustered(conf->mddev)) {
+ int fc, fo;
+
+ fc = (mddev->layout >> 8) & 255;
+ fo = mddev->layout & (1<<16);
+ if (fc > 1 || fo > 0) {
+ pr_err("only near layout is supported by clustered"
+ " raid10\n");
+ goto out;
+ }
+ }
+
mddev->thread = conf->thread;
conf->thread = NULL;
@@ -3832,18 +3958,14 @@ static void raid10_free(struct mddev *mddev, void *priv)
kfree(conf);
}
-static void raid10_quiesce(struct mddev *mddev, int state)
+static void raid10_quiesce(struct mddev *mddev, int quiesce)
{
struct r10conf *conf = mddev->private;
- switch(state) {
- case 1:
+ if (quiesce)
raise_barrier(conf, 0);
- break;
- case 0:
+ else
lower_barrier(conf);
- break;
- }
}
static int raid10_resize(struct mddev *mddev, sector_t sectors)
@@ -4578,15 +4700,18 @@ static int handle_reshape_read_error(struct mddev *mddev,
/* Use sync reads to get the blocks from somewhere else */
int sectors = r10_bio->sectors;
struct r10conf *conf = mddev->private;
- struct {
- struct r10bio r10_bio;
- struct r10dev devs[conf->copies];
- } on_stack;
- struct r10bio *r10b = &on_stack.r10_bio;
+ struct r10bio *r10b;
int slot = 0;
int idx = 0;
struct page **pages;
+ r10b = kmalloc(sizeof(*r10b) +
+ sizeof(struct r10dev) * conf->copies, GFP_NOIO);
+ if (!r10b) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ return -ENOMEM;
+ }
+
/* reshape IOs share pages from .devs[0].bio */
pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
@@ -4635,11 +4760,13 @@ static int handle_reshape_read_error(struct mddev *mddev,
/* couldn't read this block, must give up */
set_bit(MD_RECOVERY_INTR,
&mddev->recovery);
+ kfree(r10b);
return -EIO;
}
sectors -= s;
idx++;
}
+ kfree(r10b);
return 0;
}
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 735ce1a3d260..db2ac22ac1b4 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID10_H
#define _RAID10_H
@@ -88,6 +89,12 @@ struct r10conf {
* the new thread here until we fully activate the array.
*/
struct md_thread *thread;
+
+ /*
+ * Keep track of cluster resync window to send to other nodes.
+ */
+ sector_t cluster_sync_low;
+ sector_t cluster_sync_high;
};
/*
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 0b7406ac8ce1..f1c86d938502 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -23,7 +23,7 @@
#include <linux/types.h>
#include "md.h"
#include "raid5.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include "raid5-log.h"
/*
@@ -539,7 +539,7 @@ static void r5l_log_run_stripes(struct r5l_log *log)
{
struct r5l_io_unit *io, *next;
- assert_spin_locked(&log->io_list_lock);
+ lockdep_assert_held(&log->io_list_lock);
list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
/* don't change list order */
@@ -555,7 +555,7 @@ static void r5l_move_to_end_ios(struct r5l_log *log)
{
struct r5l_io_unit *io, *next;
- assert_spin_locked(&log->io_list_lock);
+ lockdep_assert_held(&log->io_list_lock);
list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
/* don't change list order */
@@ -693,6 +693,8 @@ static void r5c_disable_writeback_async(struct work_struct *work)
struct r5l_log *log = container_of(work, struct r5l_log,
disable_writeback_work);
struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+ int locked = 0;
if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
return;
@@ -701,11 +703,15 @@ static void r5c_disable_writeback_async(struct work_struct *work)
/* wait superblock change before suspend */
wait_event(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
-
- mddev_suspend(mddev);
- log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
- mddev_resume(mddev);
+ conf->log == NULL ||
+ (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) &&
+ (locked = mddev_trylock(mddev))));
+ if (locked) {
+ mddev_suspend(mddev);
+ log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+ mddev_resume(mddev);
+ mddev_unlock(mddev);
+ }
}
static void r5l_submit_current_io(struct r5l_log *log)
@@ -1194,7 +1200,7 @@ static void r5l_run_no_mem_stripe(struct r5l_log *log)
{
struct stripe_head *sh;
- assert_spin_locked(&log->io_list_lock);
+ lockdep_assert_held(&log->io_list_lock);
if (!list_empty(&log->no_mem_stripes)) {
sh = list_first_entry(&log->no_mem_stripes,
@@ -1210,7 +1216,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
struct r5l_io_unit *io, *next;
bool found = false;
- assert_spin_locked(&log->io_list_lock);
+ lockdep_assert_held(&log->io_list_lock);
list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
/* don't change list order */
@@ -1382,7 +1388,7 @@ static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
* raid5_release_stripe() while holding conf->device_lock
*/
BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
- assert_spin_locked(&conf->device_lock);
+ lockdep_assert_held(&conf->device_lock);
list_del_init(&sh->lru);
atomic_inc(&sh->count);
@@ -1409,7 +1415,7 @@ void r5c_flush_cache(struct r5conf *conf, int num)
int count;
struct stripe_head *sh, *next;
- assert_spin_locked(&conf->device_lock);
+ lockdep_assert_held(&conf->device_lock);
if (!conf->log)
return;
@@ -1583,21 +1589,21 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
md_wakeup_thread(log->reclaim_thread);
}
-void r5l_quiesce(struct r5l_log *log, int state)
+void r5l_quiesce(struct r5l_log *log, int quiesce)
{
struct mddev *mddev;
- if (!log || state == 2)
+ if (!log)
return;
- if (state == 0)
- kthread_unpark(log->reclaim_thread->tsk);
- else if (state == 1) {
+
+ if (quiesce) {
/* make sure r5l_write_super_and_discard_space exits */
mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
kthread_park(log->reclaim_thread->tsk);
r5l_wake_reclaim(log, MaxSector);
r5l_do_reclaim(log);
- }
+ } else
+ kthread_unpark(log->reclaim_thread->tsk);
}
bool r5l_log_disk_error(struct r5conf *conf)
@@ -3165,6 +3171,8 @@ void r5l_exit_log(struct r5conf *conf)
conf->log = NULL;
synchronize_rcu();
+ /* Ensure disable_writeback_work wakes up and exits */
+ wake_up(&conf->mddev->sb_wait);
flush_work(&log->disable_writeback_work);
md_unregister_thread(&log->reclaim_thread);
mempool_destroy(log->meta_pool);
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 328d67aedda4..284578b0a349 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID5_LOG_H
#define _RAID5_LOG_H
@@ -8,7 +9,7 @@ extern void r5l_write_stripe_run(struct r5l_log *log);
extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
extern void r5l_stripe_write_finished(struct stripe_head *sh);
extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
-extern void r5l_quiesce(struct r5l_log *log, int state);
+extern void r5l_quiesce(struct r5l_log *log, int quiesce);
extern bool r5l_log_disk_error(struct r5conf *conf);
extern bool r5c_is_writeback(struct r5l_log *log);
extern int
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index cd026c88f7ef..628c0bf7b9fd 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -758,7 +758,8 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
(unsigned long long)sector);
rdev = conf->disks[dd_idx].rdev;
- if (!rdev) {
+ if (!rdev || (!test_bit(In_sync, &rdev->flags) &&
+ sector >= rdev->recovery_offset)) {
pr_debug("%s:%*s data member disk %d missing\n",
__func__, indent, "", dd_idx);
update_parity = false;
@@ -1296,8 +1297,7 @@ int ppl_init_log(struct r5conf *conf)
if (ret) {
goto err;
- } else if (!mddev->pers &&
- mddev->recovery_cp == 0 && !mddev->degraded &&
+ } else if (!mddev->pers && mddev->recovery_cp == 0 &&
ppl_conf->recovered_entries > 0 &&
ppl_conf->mismatch_count == 0) {
/*
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 928e24a07133..31dc25e2871a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -55,7 +55,6 @@
#include <linux/ratelimit.h>
#include <linux/nodemask.h>
#include <linux/flex_array.h>
-#include <linux/sched/signal.h>
#include <trace/events/block.h>
#include <linux/list_sort.h>
@@ -63,7 +62,7 @@
#include "md.h"
#include "raid5.h"
#include "raid0.h"
-#include "bitmap.h"
+#include "md-bitmap.h"
#include "raid5-log.h"
#define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED)
@@ -1818,8 +1817,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
struct r5dev *dev = &sh->dev[i];
if (dev->written || i == pd_idx || i == qd_idx) {
- if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
+ if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) {
set_bit(R5_UPTODATE, &dev->flags);
+ if (test_bit(STRIPE_EXPAND_READY, &sh->state))
+ set_bit(R5_Expanded, &dev->flags);
+ }
if (fua)
set_bit(R5_WantFUA, &dev->flags);
if (sync)
@@ -5682,28 +5684,6 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
goto retry;
}
- if (rw == WRITE &&
- logical_sector >= mddev->suspend_lo &&
- logical_sector < mddev->suspend_hi) {
- raid5_release_stripe(sh);
- /* As the suspend_* range is controlled by
- * userspace, we want an interruptible
- * wait.
- */
- prepare_to_wait(&conf->wait_for_overlap,
- &w, TASK_INTERRUPTIBLE);
- if (logical_sector >= mddev->suspend_lo &&
- logical_sector < mddev->suspend_hi) {
- sigset_t full, old;
- sigfillset(&full);
- sigprocmask(SIG_BLOCK, &full, &old);
- schedule();
- sigprocmask(SIG_SETMASK, &old, NULL);
- do_prepare = true;
- }
- goto retry;
- }
-
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
!add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
/* Stripe is busy expanding or
@@ -5758,6 +5738,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
*/
struct r5conf *conf = mddev->private;
struct stripe_head *sh;
+ struct md_rdev *rdev;
sector_t first_sector, last_sector;
int raid_disks = conf->previous_raid_disks;
int data_disks = raid_disks - conf->max_degraded;
@@ -5880,6 +5861,15 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
return 0;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
+ if (!mddev->reshape_backwards)
+ /* Can update recovery_offset */
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < sector_nr)
+ rdev->recovery_offset = sector_nr;
+
conf->reshape_checkpoint = jiffies;
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
@@ -5978,6 +5968,14 @@ finish:
goto ret;
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = sector_nr;
+ if (!mddev->reshape_backwards)
+ /* Can update recovery_offset */
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < sector_nr)
+ rdev->recovery_offset = sector_nr;
conf->reshape_checkpoint = jiffies;
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
@@ -6072,7 +6070,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
*/
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
@@ -7156,6 +7154,13 @@ static int raid5_run(struct mddev *mddev)
min_offset_diff = diff;
}
+ if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) &&
+ (mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
+ pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+
if (mddev->reshape_position != MaxSector) {
/* Check that we can continue the reshape.
* Difficulties arise if the stripe we would write to
@@ -7958,6 +7963,7 @@ static void end_reshape(struct r5conf *conf)
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
+ struct md_rdev *rdev;
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
@@ -7965,6 +7971,11 @@ static void end_reshape(struct r5conf *conf)
smp_wmb();
conf->reshape_progress = MaxSector;
conf->mddev->reshape_position = MaxSector;
+ rdev_for_each(rdev, conf->mddev)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags))
+ rdev->recovery_offset = MaxSector;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
@@ -8020,16 +8031,12 @@ static void raid5_finish_reshape(struct mddev *mddev)
}
}
-static void raid5_quiesce(struct mddev *mddev, int state)
+static void raid5_quiesce(struct mddev *mddev, int quiesce)
{
struct r5conf *conf = mddev->private;
- switch(state) {
- case 2: /* resume for a suspend */
- wake_up(&conf->wait_for_overlap);
- break;
-
- case 1: /* stop all writes */
+ if (quiesce) {
+ /* stop all writes */
lock_all_device_hash_locks_irq(conf);
/* '2' tells resync/reshape to pause so that all
* active stripes can drain
@@ -8045,17 +8052,15 @@ static void raid5_quiesce(struct mddev *mddev, int state)
unlock_all_device_hash_locks_irq(conf);
/* allow reshape to continue */
wake_up(&conf->wait_for_overlap);
- break;
-
- case 0: /* re-enable writes */
+ } else {
+ /* re-enable writes */
lock_all_device_hash_locks_irq(conf);
conf->quiesce = 0;
wake_up(&conf->wait_for_quiescent);
wake_up(&conf->wait_for_overlap);
unlock_all_device_hash_locks_irq(conf);
- break;
}
- r5l_quiesce(conf->log, state);
+ r5l_quiesce(conf->log, quiesce);
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index f6536399677a..2e6123825095 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAID5_H
#define _RAID5_H
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 044503aa8801..594b462ddf0e 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel multimedia device drivers.
#
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index 3353c1741961..41ee3325e1ea 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
ifeq ($(CONFIG_CEC_NOTIFIER),y)
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index f8a808d45034..98f88c43f62c 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -86,7 +86,7 @@ void cec_queue_event_fh(struct cec_fh *fh,
const struct cec_event *new_ev, u64 ts)
{
static const u8 max_events[CEC_NUM_EVENTS] = {
- 1, 1, 64, 64,
+ 1, 1, 64, 64, 8, 8,
};
struct cec_event_entry *entry;
unsigned int ev_idx = new_ev->event - 1;
@@ -170,6 +170,22 @@ void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
}
EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event);
+/* Notify userspace that the HPD pin changed state at the given time. */
+void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
+{
+ struct cec_event ev = {
+ .event = is_high ? CEC_EVENT_PIN_HPD_HIGH :
+ CEC_EVENT_PIN_HPD_LOW,
+ };
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list)
+ cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
+ mutex_unlock(&adap->devnode.lock);
+}
+EXPORT_SYMBOL_GPL(cec_queue_pin_hpd_event);
+
/*
* Queue a new message for this filehandle.
*
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index a079f7fe018c..3dba3aa34a43 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -32,6 +32,7 @@
#include <media/cec-pin.h>
#include "cec-priv.h"
+#include "cec-pin-priv.h"
static inline struct cec_devnode *cec_devnode_data(struct file *filp)
{
@@ -529,7 +530,7 @@ static int cec_open(struct inode *inode, struct file *filp)
* Initial events that are automatically sent when the cec device is
* opened.
*/
- struct cec_event ev_state = {
+ struct cec_event ev = {
.event = CEC_EVENT_STATE_CHANGE,
.flags = CEC_EVENT_FL_INITIAL_STATE,
};
@@ -569,9 +570,19 @@ static int cec_open(struct inode *inode, struct file *filp)
filp->private_data = fh;
/* Queue up initial state events */
- ev_state.state_change.phys_addr = adap->phys_addr;
- ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
- cec_queue_event_fh(fh, &ev_state, 0);
+ ev.state_change.phys_addr = adap->phys_addr;
+ ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+ cec_queue_event_fh(fh, &ev, 0);
+#ifdef CONFIG_CEC_PIN
+ if (adap->pin && adap->pin->ops->read_hpd) {
+ err = adap->pin->ops->read_hpd(adap);
+ if (err >= 0) {
+ ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
+ CEC_EVENT_PIN_HPD_LOW;
+ cec_queue_event_fh(fh, &ev, 0);
+ }
+ }
+#endif
list_add(&fh->list, &devnode->fhs);
mutex_unlock(&devnode->lock);
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 648136e552d5..5870da6a567f 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -112,10 +112,6 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
int minor;
int ret;
- /* Initialization */
- INIT_LIST_HEAD(&devnode->fhs);
- mutex_init(&devnode->lock);
-
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
@@ -242,6 +238,10 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
INIT_LIST_HEAD(&adap->wait_queue);
init_waitqueue_head(&adap->kthread_waitq);
+ /* adap->devnode initialization */
+ INIT_LIST_HEAD(&adap->devnode.fhs);
+ mutex_init(&adap->devnode.lock);
+
adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
if (IS_ERR(adap->kthread)) {
pr_err("cec-%s: kernel_thread() failed\n", name);
@@ -277,7 +277,6 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->rc->input_id.version = 1;
adap->rc->driver_name = CEC_NAME;
adap->rc->allowed_protocols = RC_PROTO_BIT_CEC;
- adap->rc->enabled_protocols = RC_PROTO_BIT_CEC;
adap->rc->priv = adap;
adap->rc->map_name = RC_MAP_CEC;
adap->rc->timeout = MS_TO_NS(100);
diff --git a/drivers/media/cec/cec-pin-priv.h b/drivers/media/cec/cec-pin-priv.h
new file mode 100644
index 000000000000..7d0def199762
--- /dev/null
+++ b/drivers/media/cec/cec-pin-priv.h
@@ -0,0 +1,133 @@
+/*
+ * cec-pin-priv.h - internal cec-pin header
+ *
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LINUX_CEC_PIN_PRIV_H
+#define LINUX_CEC_PIN_PRIV_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <media/cec-pin.h>
+
+enum cec_pin_state {
+ /* CEC is off */
+ CEC_ST_OFF,
+ /* CEC is idle, waiting for Rx or Tx */
+ CEC_ST_IDLE,
+
+ /* Tx states */
+
+ /* Pending Tx, waiting for Signal Free Time to expire */
+ CEC_ST_TX_WAIT,
+ /* Low-drive was detected, wait for bus to go high */
+ CEC_ST_TX_WAIT_FOR_HIGH,
+ /* Drive CEC low for the start bit */
+ CEC_ST_TX_START_BIT_LOW,
+ /* Drive CEC high for the start bit */
+ CEC_ST_TX_START_BIT_HIGH,
+ /* Drive CEC low for the 0 bit */
+ CEC_ST_TX_DATA_BIT_0_LOW,
+ /* Drive CEC high for the 0 bit */
+ CEC_ST_TX_DATA_BIT_0_HIGH,
+ /* Drive CEC low for the 1 bit */
+ CEC_ST_TX_DATA_BIT_1_LOW,
+ /* Drive CEC high for the 1 bit */
+ CEC_ST_TX_DATA_BIT_1_HIGH,
+ /*
+ * Wait for start of sample time to check for Ack bit or first
+ * four initiator bits to check for Arbitration Lost.
+ */
+ CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE,
+ /* Wait for end of bit period after sampling */
+ CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE,
+
+ /* Rx states */
+
+ /* Start bit low detected */
+ CEC_ST_RX_START_BIT_LOW,
+ /* Start bit high detected */
+ CEC_ST_RX_START_BIT_HIGH,
+ /* Wait for bit sample time */
+ CEC_ST_RX_DATA_SAMPLE,
+ /* Wait for earliest end of bit period after sampling */
+ CEC_ST_RX_DATA_POST_SAMPLE,
+ /* Wait for CEC to go high (i.e. end of bit period */
+ CEC_ST_RX_DATA_HIGH,
+ /* Drive CEC low to send 0 Ack bit */
+ CEC_ST_RX_ACK_LOW,
+ /* End of 0 Ack time, wait for earliest end of bit period */
+ CEC_ST_RX_ACK_LOW_POST,
+ /* Wait for CEC to go high (i.e. end of bit period */
+ CEC_ST_RX_ACK_HIGH_POST,
+ /* Wait for earliest end of bit period and end of message */
+ CEC_ST_RX_ACK_FINISH,
+
+ /* Start low drive */
+ CEC_ST_LOW_DRIVE,
+ /* Monitor pin using interrupts */
+ CEC_ST_RX_IRQ,
+
+ /* Total number of pin states */
+ CEC_PIN_STATES
+};
+
+#define CEC_NUM_PIN_EVENTS 128
+
+#define CEC_PIN_IRQ_UNCHANGED 0
+#define CEC_PIN_IRQ_DISABLE 1
+#define CEC_PIN_IRQ_ENABLE 2
+
+struct cec_pin {
+ struct cec_adapter *adap;
+ const struct cec_pin_ops *ops;
+ struct task_struct *kthread;
+ wait_queue_head_t kthread_waitq;
+ struct hrtimer timer;
+ ktime_t ts;
+ unsigned int wait_usecs;
+ u16 la_mask;
+ bool enabled;
+ bool monitor_all;
+ bool rx_eom;
+ bool enable_irq_failed;
+ enum cec_pin_state state;
+ struct cec_msg tx_msg;
+ u32 tx_bit;
+ bool tx_nacked;
+ u32 tx_signal_free_time;
+ struct cec_msg rx_msg;
+ u32 rx_bit;
+
+ struct cec_msg work_rx_msg;
+ u8 work_tx_status;
+ ktime_t work_tx_ts;
+ atomic_t work_irq_change;
+ atomic_t work_pin_events;
+ unsigned int work_pin_events_wr;
+ unsigned int work_pin_events_rd;
+ ktime_t work_pin_ts[CEC_NUM_PIN_EVENTS];
+ bool work_pin_is_high[CEC_NUM_PIN_EVENTS];
+ ktime_t timer_ts;
+ u32 timer_cnt;
+ u32 timer_100ms_overruns;
+ u32 timer_300ms_overruns;
+ u32 timer_max_overrun;
+ u32 timer_sum_overrun;
+};
+
+#endif
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index c003b8eac617..b48dfe844118 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -20,6 +20,7 @@
#include <linux/sched/types.h>
#include <media/cec-pin.h>
+#include "cec-pin-priv.h"
/* All timings are in microseconds */
@@ -132,7 +133,7 @@ static void cec_pin_to_idle(struct cec_pin *pin)
pin->rx_msg.len = 0;
memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg));
pin->state = CEC_ST_IDLE;
- pin->ts = 0;
+ pin->ts = ns_to_ktime(0);
}
/*
@@ -426,7 +427,7 @@ static void cec_pin_rx_states(struct cec_pin *pin, ktime_t ts)
v = cec_pin_read(pin);
if (v && pin->rx_eom) {
pin->work_rx_msg = pin->rx_msg;
- pin->work_rx_msg.rx_ts = ts;
+ pin->work_rx_msg.rx_ts = ktime_to_ns(ts);
wake_up_interruptible(&pin->kthread_waitq);
pin->ts = ts;
pin->state = CEC_ST_RX_ACK_FINISH;
@@ -457,7 +458,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
s32 delta;
ts = ktime_get();
- if (pin->timer_ts) {
+ if (ktime_to_ns(pin->timer_ts)) {
delta = ktime_us_delta(ts, pin->timer_ts);
pin->timer_cnt++;
if (delta > 100 && pin->state != CEC_ST_IDLE) {
@@ -481,17 +482,19 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (pin->wait_usecs > 150) {
pin->wait_usecs -= 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, 100000);
+ hrtimer_forward_now(timer, ns_to_ktime(100000));
return HRTIMER_RESTART;
}
if (pin->wait_usecs > 100) {
pin->wait_usecs /= 2;
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
- hrtimer_forward_now(timer, pin->wait_usecs * 1000);
+ hrtimer_forward_now(timer,
+ ns_to_ktime(pin->wait_usecs * 1000));
return HRTIMER_RESTART;
}
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
- hrtimer_forward_now(timer, pin->wait_usecs * 1000);
+ hrtimer_forward_now(timer,
+ ns_to_ktime(pin->wait_usecs * 1000));
pin->wait_usecs = 0;
return HRTIMER_RESTART;
}
@@ -531,7 +534,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
pin->state = CEC_ST_RX_START_BIT_LOW;
break;
}
- if (pin->ts == 0)
+ if (ktime_to_ns(pin->ts) == 0)
pin->ts = ts;
if (pin->tx_msg.len) {
/*
@@ -572,12 +575,13 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (!adap->monitor_pin_cnt || states[pin->state].usecs <= 150) {
pin->wait_usecs = 0;
pin->timer_ts = ktime_add_us(ts, states[pin->state].usecs);
- hrtimer_forward_now(timer, states[pin->state].usecs * 1000);
+ hrtimer_forward_now(timer,
+ ns_to_ktime(states[pin->state].usecs * 1000));
return HRTIMER_RESTART;
}
pin->wait_usecs = states[pin->state].usecs - 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, 100000);
+ hrtimer_forward_now(timer, ns_to_ktime(100000));
return HRTIMER_RESTART;
}
@@ -596,7 +600,7 @@ static int cec_pin_thread_func(void *_adap)
if (pin->work_rx_msg.len) {
cec_received_msg_ts(adap, &pin->work_rx_msg,
- pin->work_rx_msg.rx_ts);
+ ns_to_ktime(pin->work_rx_msg.rx_ts));
pin->work_rx_msg.len = 0;
}
if (pin->work_tx_status) {
@@ -623,13 +627,15 @@ static int cec_pin_thread_func(void *_adap)
pin->ops->disable_irq(adap);
cec_pin_high(pin);
cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
break;
case CEC_PIN_IRQ_ENABLE:
pin->enable_irq_failed = !pin->ops->enable_irq(adap);
if (pin->enable_irq_failed) {
cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
}
break;
default:
@@ -653,7 +659,7 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
cec_pin_read(pin);
cec_pin_to_idle(pin);
pin->tx_msg.len = 0;
- pin->timer_ts = 0;
+ pin->timer_ts = ns_to_ktime(0);
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED);
pin->kthread = kthread_run(cec_pin_thread_func, adap,
"cec-pin");
@@ -661,7 +667,8 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
pr_err("cec-pin: kernel_thread() failed\n");
return PTR_ERR(pin->kthread);
}
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
} else {
if (pin->ops->disable_irq)
pin->ops->disable_irq(adap);
@@ -699,7 +706,8 @@ static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts,
pin->ops->disable_irq(adap);
cec_pin_high(pin);
cec_pin_to_idle(pin);
- hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ hrtimer_start(&pin->timer, ns_to_ktime(0),
+ HRTIMER_MODE_REL);
}
return 0;
}
@@ -789,7 +797,7 @@ struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
caps | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN,
CEC_MAX_LOG_ADDRS);
- if (PTR_ERR_OR_ZERO(adap)) {
+ if (IS_ERR(adap)) {
kfree(pin);
return adap;
}
diff --git a/drivers/media/common/b2c2/Makefile b/drivers/media/common/b2c2/Makefile
index 24993a5b38ba..73df4a334eda 100644
--- a/drivers/media/common/b2c2/Makefile
+++ b/drivers/media/common/b2c2/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
b2c2-flexcop-objs += flexcop.o flexcop-fe-tuner.o flexcop-i2c.o
b2c2-flexcop-objs += flexcop-sram.o flexcop-eeprom.o flexcop-misc.o
b2c2-flexcop-objs += flexcop-hw-filter.o
diff --git a/drivers/media/common/b2c2/flexcop-common.h b/drivers/media/common/b2c2/flexcop-common.h
index 2533574c0cf4..b7e5e4c17acb 100644
--- a/drivers/media/common/b2c2/flexcop-common.h
+++ b/drivers/media/common/b2c2/flexcop-common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-common.h - common header file for device-specific source files
diff --git a/drivers/media/common/b2c2/flexcop-eeprom.c b/drivers/media/common/b2c2/flexcop-eeprom.c
index 844c7836c2a6..0f2151cd36f2 100644
--- a/drivers/media/common/b2c2/flexcop-eeprom.c
+++ b/drivers/media/common/b2c2/flexcop-eeprom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-eeprom.c - eeprom access methods (currently only MAC address reading)
diff --git a/drivers/media/common/b2c2/flexcop-fe-tuner.c b/drivers/media/common/b2c2/flexcop-fe-tuner.c
index 7636606f0be5..a1ce3e8eb1d3 100644
--- a/drivers/media/common/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/common/b2c2/flexcop-fe-tuner.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-fe-tuner.c - methods for frontend attachment and DiSEqC controlling
diff --git a/drivers/media/common/b2c2/flexcop-hw-filter.c b/drivers/media/common/b2c2/flexcop-hw-filter.c
index 8220257903ef..335f30a54ba8 100644
--- a/drivers/media/common/b2c2/flexcop-hw-filter.c
+++ b/drivers/media/common/b2c2/flexcop-hw-filter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-hw-filter.c - pid and mac address filtering and control functions
diff --git a/drivers/media/common/b2c2/flexcop-i2c.c b/drivers/media/common/b2c2/flexcop-i2c.c
index 58d39a59fc09..564da6fa900d 100644
--- a/drivers/media/common/b2c2/flexcop-i2c.c
+++ b/drivers/media/common/b2c2/flexcop-i2c.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-i2c.c - flexcop internal 2Wire bus (I2C) and dvb i2c initialization
diff --git a/drivers/media/common/b2c2/flexcop-misc.c b/drivers/media/common/b2c2/flexcop-misc.c
index bb0d95fe64f9..83d01d3a81cc 100644
--- a/drivers/media/common/b2c2/flexcop-misc.c
+++ b/drivers/media/common/b2c2/flexcop-misc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-misc.c - miscellaneous functions
diff --git a/drivers/media/common/b2c2/flexcop-reg.h b/drivers/media/common/b2c2/flexcop-reg.h
index 835c54d60e74..dd7c962db565 100644
--- a/drivers/media/common/b2c2/flexcop-reg.h
+++ b/drivers/media/common/b2c2/flexcop-reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-reg.h - register abstraction for FlexCopII, FlexCopIIb and FlexCopIII
diff --git a/drivers/media/common/b2c2/flexcop-sram.c b/drivers/media/common/b2c2/flexcop-sram.c
index 185c285f70fc..d97962eb0112 100644
--- a/drivers/media/common/b2c2/flexcop-sram.c
+++ b/drivers/media/common/b2c2/flexcop-sram.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-sram.c - functions for controlling the SRAM
diff --git a/drivers/media/common/b2c2/flexcop.h b/drivers/media/common/b2c2/flexcop.h
index 8942bdacbf61..911ece59ea02 100644
--- a/drivers/media/common/b2c2/flexcop.h
+++ b/drivers/media/common/b2c2/flexcop.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop.h - private header file for all flexcop-chip-source files
diff --git a/drivers/media/common/b2c2/flexcop_ibi_value_be.h b/drivers/media/common/b2c2/flexcop_ibi_value_be.h
index 8f64bdbd72bb..c97a0d6d7b3a 100644
--- a/drivers/media/common/b2c2/flexcop_ibi_value_be.h
+++ b/drivers/media/common/b2c2/flexcop_ibi_value_be.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* register descriptions
* see flexcop.c for copyright information
diff --git a/drivers/media/common/b2c2/flexcop_ibi_value_le.h b/drivers/media/common/b2c2/flexcop_ibi_value_le.h
index c75830d7d942..5db3b46f21ee 100644
--- a/drivers/media/common/b2c2/flexcop_ibi_value_le.h
+++ b/drivers/media/common/b2c2/flexcop_ibi_value_le.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* register descriptions
* see flexcop.c for copyright information
diff --git a/drivers/media/common/btcx-risc.h b/drivers/media/common/btcx-risc.h
index 03583ef90506..76dc16f402b9 100644
--- a/drivers/media/common/btcx-risc.h
+++ b/drivers/media/common/btcx-risc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
*/
struct btcx_riscmem {
diff --git a/drivers/media/common/cypress_firmware.c b/drivers/media/common/cypress_firmware.c
index 50e3f76d4847..8895158c1962 100644
--- a/drivers/media/common/cypress_firmware.c
+++ b/drivers/media/common/cypress_firmware.c
@@ -74,11 +74,9 @@ int cypress_load_firmware(struct usb_device *udev,
struct hexline *hx;
int ret, pos = 0;
- hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
- if (!hx) {
- dev_err(&udev->dev, "%s: kmalloc() failed\n", KBUILD_MODNAME);
+ hx = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!hx)
return -ENOMEM;
- }
/* stop the CPU */
hx->data[0] = 1;
diff --git a/drivers/media/common/cypress_firmware.h b/drivers/media/common/cypress_firmware.h
index 1e4f27356205..0aa46e602b07 100644
--- a/drivers/media/common/cypress_firmware.h
+++ b/drivers/media/common/cypress_firmware.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
* see dvb-usb-init.c for copyright information.
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 930d2c94d5d3..8c87d6837c49 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -163,9 +163,9 @@ void saa7146_buffer_next(struct saa7146_dev *dev,
}
}
-void saa7146_buffer_timeout(unsigned long data)
+void saa7146_buffer_timeout(struct timer_list *t)
{
- struct saa7146_dmaqueue *q = (struct saa7146_dmaqueue*)data;
+ struct saa7146_dmaqueue *q = from_timer(q, t, timeout);
struct saa7146_dev *dev = q->dev;
unsigned long flags;
@@ -559,7 +559,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
vbi->start[1] = 312;
vbi->count[1] = 16;
- init_timer(&vv->vbi_read_timeout);
+ timer_setup(&vv->vbi_read_timeout, NULL, 0);
vv->ov_fb.capability = V4L2_FBUF_CAP_LIST_CLIPPING;
vv->ov_fb.flags = V4L2_FBUF_FLAG_PRIMARY;
diff --git a/drivers/media/common/saa7146/saa7146_i2c.c b/drivers/media/common/saa7146/saa7146_i2c.c
index 75897f95e4b4..f9e099d812c8 100644
--- a/drivers/media/common/saa7146/saa7146_i2c.c
+++ b/drivers/media/common/saa7146/saa7146_i2c.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <media/drv-intf/saa7146_vv.h>
diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c
index d79e4d7ecd9f..e1d369b976ed 100644
--- a/drivers/media/common/saa7146/saa7146_vbi.c
+++ b/drivers/media/common/saa7146/saa7146_vbi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <media/drv-intf/saa7146_vv.h>
static int vbi_pixel_to_capture = 720 * 2;
@@ -348,9 +349,10 @@ static void vbi_stop(struct saa7146_fh *fh, struct file *file)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static void vbi_read_timeout(unsigned long data)
+static void vbi_read_timeout(struct timer_list *t)
{
- struct file *file = (struct file*)data;
+ struct saa7146_vv *vv = from_timer(vv, t, vbi_read_timeout);
+ struct file *file = vv->vbi_read_timeout_file;
struct saa7146_fh *fh = file->private_data;
struct saa7146_dev *dev = fh->dev;
@@ -365,8 +367,7 @@ static void vbi_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
INIT_LIST_HEAD(&vv->vbi_dmaq.queue);
- setup_timer(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout,
- (unsigned long)(&vv->vbi_dmaq));
+ timer_setup(&vv->vbi_dmaq.timeout, saa7146_buffer_timeout, 0);
vv->vbi_dmaq.dev = dev;
init_waitqueue_head(&vv->vbi_wq);
@@ -402,7 +403,7 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
file, &dev->v4l2_lock);
vv->vbi_read_timeout.function = vbi_read_timeout;
- vv->vbi_read_timeout.data = (unsigned long)file;
+ vv->vbi_read_timeout_file = file;
/* initialize the brs */
if ( 0 != (SAA7146_USE_PORT_B_FOR_VBI & dev->ext_vv_data->flags)) {
@@ -488,7 +489,7 @@ static ssize_t vbi_read(struct file *file, char __user *data, size_t count, loff
return ret;
}
-struct saa7146_use_ops saa7146_vbi_uops = {
+const struct saa7146_use_ops saa7146_vbi_uops = {
.init = vbi_init,
.open = vbi_open,
.release = vbi_close,
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index 37b4654dc21c..2b631eaa65b3 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -1201,8 +1201,7 @@ static void video_init(struct saa7146_dev *dev, struct saa7146_vv *vv)
{
INIT_LIST_HEAD(&vv->video_dmaq.queue);
- setup_timer(&vv->video_dmaq.timeout, saa7146_buffer_timeout,
- (unsigned long)(&vv->video_dmaq));
+ timer_setup(&vv->video_dmaq.timeout, saa7146_buffer_timeout, 0);
vv->video_dmaq.dev = dev;
/* set some default values */
@@ -1303,7 +1302,7 @@ out:
return ret;
}
-struct saa7146_use_ops saa7146_video_uops = {
+const struct saa7146_use_ops saa7146_video_uops = {
.init = video_init,
.open = video_open,
.release = video_close,
diff --git a/drivers/media/common/siano/Makefile b/drivers/media/common/siano/Makefile
index 4c0567f106b2..88e2b7ffc537 100644
--- a/drivers/media/common/siano/Makefile
+++ b/drivers/media/common/siano/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o
smsdvb-objs := smsdvb-main.o
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index e7a0d7798d5b..e4ea2a0c7a24 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -447,7 +447,7 @@ static struct smscore_registry_entry_t *smscore_find_registry(char *devpath)
return entry;
}
}
- entry = kmalloc(sizeof(struct smscore_registry_entry_t), GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (entry) {
entry->mode = default_mode;
strcpy(entry->devpath, devpath);
@@ -536,9 +536,7 @@ int smscore_register_hotplug(hotplug_t hotplug)
int rc = 0;
kmutex_lock(&g_smscore_deviceslock);
-
- notifyee = kmalloc(sizeof(struct smscore_device_notifyee_t),
- GFP_KERNEL);
+ notifyee = kmalloc(sizeof(*notifyee), GFP_KERNEL);
if (notifyee) {
/* now notify callback about existing devices */
first = &g_smscore_devices;
@@ -627,7 +625,7 @@ smscore_buffer_t *smscore_createbuffer(u8 *buffer, void *common_buffer,
{
struct smscore_buffer_t *cb;
- cb = kzalloc(sizeof(struct smscore_buffer_t), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
@@ -655,7 +653,7 @@ int smscore_register_device(struct smsdevice_params_t *params,
struct smscore_device_t *dev;
u8 *buffer;
- dev = kzalloc(sizeof(struct smscore_device_t), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -751,7 +749,7 @@ static int smscore_sendrequest_and_wait(struct smscore_device_t *coredev,
void *buffer, size_t size, struct completion *completion) {
int rc;
- if (completion == NULL)
+ if (!completion)
return -EINVAL;
init_completion(completion);
@@ -1153,8 +1151,8 @@ static int smscore_load_firmware_from_file(struct smscore_device_t *coredev,
}
pr_debug("Firmware name: %s\n", fw_filename);
- if (loadfirmware_handler == NULL && !(coredev->device_flags
- & SMS_DEVICE_FAMILY2))
+ if (!loadfirmware_handler &&
+ !(coredev->device_flags & SMS_DEVICE_FAMILY2))
return -EINVAL;
rc = request_firmware(&fw, fw_filename, coredev->device);
@@ -1301,10 +1299,8 @@ static int smscore_init_device(struct smscore_device_t *coredev, int mode)
buffer = kmalloc(sizeof(struct sms_msg_data) +
SMS_DMA_ALIGNMENT, GFP_KERNEL | GFP_DMA);
- if (!buffer) {
- pr_err("Could not allocate buffer for init device message.\n");
+ if (!buffer)
return -ENOMEM;
- }
msg = (struct sms_msg_data *)SMS_ALIGN_ADDRESS(buffer);
SMS_INIT_MSG(&msg->x_msg_header, MSG_SMS_INIT_DEVICE_REQ,
@@ -1686,11 +1682,10 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
pr_err("The msg ID already registered to another client.\n");
return -EEXIST;
}
- listentry = kzalloc(sizeof(struct smscore_idlist_t), GFP_KERNEL);
- if (!listentry) {
- pr_err("Can't allocate memory for client id.\n");
+ listentry = kzalloc(sizeof(*listentry), GFP_KERNEL);
+ if (!listentry)
return -ENOMEM;
- }
+
listentry->id = id;
listentry->data_type = data_type;
list_add_locked(&listentry->entry, &client->idlist,
@@ -1724,11 +1719,9 @@ int smscore_register_client(struct smscore_device_t *coredev,
return -EEXIST;
}
- newclient = kzalloc(sizeof(struct smscore_client_t), GFP_KERNEL);
- if (!newclient) {
- pr_err("Failed to allocate memory for client.\n");
+ newclient = kzalloc(sizeof(*newclient), GFP_KERNEL);
+ if (!newclient)
return -ENOMEM;
- }
INIT_LIST_HEAD(&newclient->idlist);
newclient->coredev = coredev;
@@ -1796,7 +1789,7 @@ int smsclient_sendrequest(struct smscore_client_t *client,
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
int rc;
- if (client == NULL) {
+ if (!client) {
pr_err("Got NULL client\n");
return -EINVAL;
}
@@ -1804,7 +1797,7 @@ int smsclient_sendrequest(struct smscore_client_t *client,
coredev = client->coredev;
/* check that no other channel with same id exists */
- if (coredev == NULL) {
+ if (!coredev) {
pr_err("Got NULL coredev\n");
return -EINVAL;
}
@@ -1961,7 +1954,7 @@ int smscore_gpio_configure(struct smscore_device_t *coredev, u8 pin_num,
if (pin_num > MAX_GPIO_PIN_NUMBER)
return -EINVAL;
- if (p_gpio_config == NULL)
+ if (!p_gpio_config)
return -EINVAL;
total_len = sizeof(struct sms_msg_hdr) + (sizeof(u32) * 6);
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index a772976cfe26..f96968c11312 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -238,6 +238,8 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->color_enc = TGP_COLOR_ENC_RGB;
break;
case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y16:
case V4L2_PIX_FMT_Y16_BE:
tpg->color_enc = TGP_COLOR_ENC_LUMA;
@@ -352,6 +354,8 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
case V4L2_PIX_FMT_YUV444:
case V4L2_PIX_FMT_YUV555:
case V4L2_PIX_FMT_YUV565:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y16:
case V4L2_PIX_FMT_Y16_BE:
tpg->twopixelsize[0] = 2 * 2;
@@ -1056,6 +1060,14 @@ static void gen_twopix(struct tpg_data *tpg,
case V4L2_PIX_FMT_GREY:
buf[0][offset] = r_y_h;
break;
+ case V4L2_PIX_FMT_Y10:
+ buf[0][offset] = (r_y_h << 2) & 0xff;
+ buf[0][offset+1] = r_y_h >> 6;
+ break;
+ case V4L2_PIX_FMT_Y12:
+ buf[0][offset] = (r_y_h << 4) & 0xff;
+ buf[0][offset+1] = r_y_h >> 4;
+ break;
case V4L2_PIX_FMT_Y16:
/*
* Ideally both bytes should be set to r_y_h, but then you won't
diff --git a/drivers/media/dvb-core/Makefile b/drivers/media/dvb-core/Makefile
index 281bc89576e6..47e2e391bfb8 100644
--- a/drivers/media/dvb-core/Makefile
+++ b/drivers/media/dvb-core/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel DVB device drivers.
#
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 18e4230865be..3ddd44e1ee77 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -329,9 +329,9 @@ static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter,
return 0;
}
-static void dvb_dmxdev_filter_timeout(unsigned long data)
+static void dvb_dmxdev_filter_timeout(struct timer_list *t)
{
- struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data;
+ struct dmxdev_filter *dmxdevfilter = from_timer(dmxdevfilter, t, timer);
dmxdevfilter->buffer.error = -ETIMEDOUT;
spin_lock_irq(&dmxdevfilter->dev->lock);
@@ -346,8 +346,6 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
del_timer(&dmxdevfilter->timer);
if (para->timeout) {
- dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout;
- dmxdevfilter->timer.data = (unsigned long)dmxdevfilter;
dmxdevfilter->timer.expires =
jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000;
add_timer(&dmxdevfilter->timer);
@@ -754,7 +752,7 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
- init_timer(&dmxdevfilter->timer);
+ timer_setup(&dmxdevfilter->timer, dvb_dmxdev_filter_timeout, 0);
dvbdev->users++;
diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h
index 054fd4eb6192..5e795f5f0f41 100644
--- a/drivers/media/dvb-core/dmxdev.h
+++ b/drivers/media/dvb-core/dmxdev.h
@@ -36,12 +36,33 @@
#include "demux.h"
#include "dvb_ringbuffer.h"
+/**
+ * enum dmxdev_type - type of demux filter type.
+ *
+ * @DMXDEV_TYPE_NONE: no filter set.
+ * @DMXDEV_TYPE_SEC: section filter.
+ * @DMXDEV_TYPE_PES: Program Elementary Stream (PES) filter.
+ */
enum dmxdev_type {
DMXDEV_TYPE_NONE,
DMXDEV_TYPE_SEC,
DMXDEV_TYPE_PES,
};
+/**
+ * enum dmxdev_state - state machine for the dmxdev.
+ *
+ * @DMXDEV_STATE_FREE: indicates that the filter is freed.
+ * @DMXDEV_STATE_ALLOCATED: indicates that the filter was allocated
+ * to be used.
+ * @DMXDEV_STATE_SET: indicates that the filter parameters are set.
+ * @DMXDEV_STATE_GO: indicates that the filter is running.
+ * @DMXDEV_STATE_DONE: indicates that a packet was already filtered
+ * and the filter is now disabled.
+ * Set only if %DMX_ONESHOT. See
+ * &dmx_sct_filter_params.
+ * @DMXDEV_STATE_TIMEDOUT: Indicates a timeout condition.
+ */
enum dmxdev_state {
DMXDEV_STATE_FREE,
DMXDEV_STATE_ALLOCATED,
@@ -51,12 +72,49 @@ enum dmxdev_state {
DMXDEV_STATE_TIMEDOUT
};
+/**
+ * struct dmxdev_feed - digital TV dmxdev feed
+ *
+ * @pid: Program ID to be filtered
+ * @ts: pointer to &struct dmx_ts_feed
+ * @next: &struct list_head pointing to the next feed.
+ */
+
struct dmxdev_feed {
u16 pid;
struct dmx_ts_feed *ts;
struct list_head next;
};
+/**
+ * struct dmxdev_filter - digital TV dmxdev filter
+ *
+ * @filter: a dmxdev filter. Currently used only for section filter:
+ * if the filter is Section, it contains a
+ * &struct dmx_section_filter @sec pointer.
+ * @feed: a dmxdev feed. Depending on the feed type, it can be:
+ * for TS feed: a &struct list_head @ts list of TS and PES
+ * feeds;
+ * for section feed: a &struct dmx_section_feed @sec pointer.
+ * @params: dmxdev filter parameters. Depending on the feed type, it
+ * can be:
+ * for section filter: a &struct dmx_sct_filter_params @sec
+ * embedded struct;
+ * for a TS filter: a &struct dmx_pes_filter_params @pes
+ * embedded struct.
+ * @type: type of the dmxdev filter, as defined by &enum dmxdev_type.
+ * @state: state of the dmxdev filter, as defined by &enum dmxdev_state.
+ * @dev: pointer to &struct dmxdev.
+ * @buffer: an embedded &struct dvb_ringbuffer buffer.
+ * @mutex: protects the access to &struct dmxdev_filter.
+ * @timer: &struct timer_list embedded timer, used to check for
+ * feed timeouts.
+ * Only for section filter.
+ * @todo: index for the @secheader.
+ * Only for section filter.
+ * @secheader: buffer cache to parse the section header.
+ * Only for section filter.
+ */
struct dmxdev_filter {
union {
struct dmx_section_filter *sec;
@@ -86,7 +144,23 @@ struct dmxdev_filter {
u8 secheader[3];
};
-
+/**
+ * struct dmxdev - Describes a digital TV demux device.
+ *
+ * @dvbdev: pointer to &struct dvb_device associated with
+ * the demux device node.
+ * @dvr_dvbdev: pointer to &struct dvb_device associated with
+ * the dvr device node.
+ * @filter: pointer to &struct dmxdev_filter.
+ * @demux: pointer to &struct dmx_demux.
+ * @filternum: number of filters.
+ * @capabilities: demux capabilities as defined by &enum dmx_demux_caps.
+ * @exit: flag to indicate that the demux is being released.
+ * @dvr_orig_fe: pointer to &struct dmx_frontend.
+ * @dvr_buffer: embedded &struct dvb_ringbuffer for DVB output.
+ * @mutex: protects the usage of this structure.
+ * @lock: protects access to &dmxdev->filter->data.
+ */
struct dmxdev {
struct dvb_device *dvbdev;
struct dvb_device *dvr_dvbdev;
@@ -108,8 +182,20 @@ struct dmxdev {
spinlock_t lock;
};
+/**
+ * dvb_dmxdev_init - initializes a digital TV demux and registers both demux
+ * and DVR devices.
+ *
+ * @dmxdev: pointer to &struct dmxdev.
+ * @adap: pointer to &struct dvb_adapter.
+ */
+int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *adap);
-int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *);
+/**
+ * dvb_dmxdev_release - releases a digital TV demux and unregisters it.
+ *
+ * @dmxdev: pointer to &struct dmxdev.
+ */
void dvb_dmxdev_release(struct dmxdev *dmxdev);
#endif /* _DMXDEV_H_ */
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 5b6041d462bc..54d7d8a48b18 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* dvb-usb-ids.h is part of the DVB USB library.
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 6628f80d184f..acade7543b82 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -223,10 +223,10 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
* when the second packet arrives.
*
* Fix:
- * when demux is started, let feed->pusi_seen = 0 to
+ * when demux is started, let feed->pusi_seen = false to
* prevent initial feeding of garbage from the end of
* previous section. When you for the first time see PUSI=1
- * then set feed->pusi_seen = 1
+ * then set feed->pusi_seen = true
*/
static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
const u8 *buf, u8 len)
@@ -318,10 +318,10 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
*/
#endif
/*
- * Discontinuity detected. Reset pusi_seen = 0 to
+ * Discontinuity detected. Reset pusi_seen to
* stop feeding of suspicious data until next PUSI=1 arrives
*/
- feed->pusi_seen = 0;
+ feed->pusi_seen = false;
dvb_dmx_swfilter_section_new(feed);
}
@@ -335,8 +335,8 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
dvb_dmx_swfilter_section_copy_dump(feed, before,
before_len);
- /* before start of new section, set pusi_seen = 1 */
- feed->pusi_seen = 1;
+ /* before start of new section, set pusi_seen */
+ feed->pusi_seen = true;
dvb_dmx_swfilter_section_new(feed);
dvb_dmx_swfilter_section_copy_dump(feed, after,
after_len);
@@ -367,6 +367,7 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
else
feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
}
+ /* Used only on full-featured devices */
if (feed->ts_type & TS_DECODER)
if (feed->demux->write_to_decoder)
feed->demux->write_to_decoder(feed, buf, 188);
@@ -898,14 +899,14 @@ static void prepare_secfilters(struct dvb_demux_feed *dvbdmxfeed)
return;
do {
sf = &f->filter;
- doneq = 0;
+ doneq = false;
for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
mode = sf->filter_mode[i];
mask = sf->filter_mask[i];
f->maskandmode[i] = mask & mode;
doneq |= f->maskandnotmode[i] = mask & ~mode;
}
- f->doneq = doneq ? 1 : 0;
+ f->doneq = doneq ? true : false;
} while ((f = f->next));
}
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index 6f572ca8d339..cc048f09aa85 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -26,15 +26,33 @@
#include "demux.h"
-#define DMX_TYPE_TS 0
-#define DMX_TYPE_SEC 1
-#define DMX_TYPE_PES 2
+/**
+ * enum dvb_dmx_filter_type - type of demux feed.
+ *
+ * @DMX_TYPE_TS: feed is in TS mode.
+ * @DMX_TYPE_SEC: feed is in Section mode.
+ */
+enum dvb_dmx_filter_type {
+ DMX_TYPE_TS,
+ DMX_TYPE_SEC,
+};
-#define DMX_STATE_FREE 0
-#define DMX_STATE_ALLOCATED 1
-#define DMX_STATE_SET 2
-#define DMX_STATE_READY 3
-#define DMX_STATE_GO 4
+/**
+ * enum dvb_dmx_state - state machine for a demux filter.
+ *
+ * @DMX_STATE_FREE: indicates that the filter is freed.
+ * @DMX_STATE_ALLOCATED: indicates that the filter was allocated
+ * to be used.
+ * @DMX_STATE_READY: indicates that the filter is ready
+ * to be used.
+ * @DMX_STATE_GO: indicates that the filter is running.
+ */
+enum dvb_dmx_state {
+ DMX_STATE_FREE,
+ DMX_STATE_ALLOCATED,
+ DMX_STATE_READY,
+ DMX_STATE_GO,
+};
#define DVB_DEMUX_MASK_MAX 18
@@ -42,24 +60,66 @@
#define SPEED_PKTS_INTERVAL 50000
+/**
+ * struct dvb_demux_filter - Describes a DVB demux section filter.
+ *
+ * @filter: Section filter as defined by &struct dmx_section_filter.
+ * @maskandmode: logical ``and`` bit mask.
+ * @maskandnotmode: logical ``and not`` bit mask.
+ * @doneq: flag that indicates when a filter is ready.
+ * @next: pointer to the next section filter.
+ * @feed: &struct dvb_demux_feed pointer.
+ * @index: index of the used demux filter.
+ * @state: state of the filter as described by &enum dvb_dmx_state.
+ * @type: type of the filter as described
+ * by &enum dvb_dmx_filter_type.
+ */
+
struct dvb_demux_filter {
struct dmx_section_filter filter;
u8 maskandmode[DMX_MAX_FILTER_SIZE];
u8 maskandnotmode[DMX_MAX_FILTER_SIZE];
- int doneq;
+ bool doneq;
struct dvb_demux_filter *next;
struct dvb_demux_feed *feed;
int index;
- int state;
- int type;
+ enum dvb_dmx_state state;
+ enum dvb_dmx_filter_type type;
+ /* private: used only by av7110 */
u16 hw_handle;
- struct timer_list timer;
};
-#define DMX_FEED_ENTRY(pos) list_entry(pos, struct dvb_demux_feed, list_head)
-
+/**
+ * struct dvb_demux_feed - describes a DVB field
+ *
+ * @feed: a digital TV feed. It can either be a TS or a section feed:
+ * if the feed is TS, it contains &struct dvb_ts_feed @ts;
+ * if the feed is section, it contains
+ * &struct dmx_section_feed @sec.
+ * @cb: digital TV callbacks. depending on the feed type, it can be:
+ * if the feed is TS, it contains a dmx_ts_cb() @ts callback;
+ * if the feed is section, it contains a dmx_section_cb() @sec
+ * callback.
+ *
+ * @demux: pointer to &struct dvb_demux.
+ * @priv: private data that can optionally be used by a DVB driver.
+ * @type: type of the filter, as defined by &enum dvb_dmx_filter_type.
+ * @state: state of the filter as defined by &enum dvb_dmx_state.
+ * @pid: PID to be filtered.
+ * @timeout: feed timeout.
+ * @filter: pointer to &struct dvb_demux_filter.
+ * @ts_type: type of TS, as defined by &enum ts_filter_type.
+ * @pes_type: type of PES, as defined by &enum dmx_ts_pes.
+ * @cc: MPEG-TS packet continuity counter
+ * @pusi_seen: if true, indicates that a discontinuity was detected.
+ * it is used to prevent feeding of garbage from previous section.
+ * @peslen: length of the PES (Packet Elementary Stream).
+ * @list_head: head for the list of digital TV demux feeds.
+ * @index: a unique index for each feed. Can be used as hardware
+ * pid filter index.
+ */
struct dvb_demux_feed {
union {
struct dmx_ts_feed ts;
@@ -73,25 +133,63 @@ struct dvb_demux_feed {
struct dvb_demux *demux;
void *priv;
- int type;
- int state;
+ enum dvb_dmx_filter_type type;
+ enum dvb_dmx_state state;
u16 pid;
ktime_t timeout;
struct dvb_demux_filter *filter;
- int ts_type;
+ enum ts_filter_type ts_type;
enum dmx_ts_pes pes_type;
int cc;
- int pusi_seen; /* prevents feeding of garbage from previous section */
+ bool pusi_seen;
u16 peslen;
struct list_head list_head;
- unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */
+ unsigned int index;
};
+/**
+ * struct dvb_demux - represents a digital TV demux
+ * @dmx: embedded &struct dmx_demux with demux capabilities
+ * and callbacks.
+ * @priv: private data that can optionally be used by
+ * a DVB driver.
+ * @filternum: maximum amount of DVB filters.
+ * @feednum: maximum amount of DVB feeds.
+ * @start_feed: callback routine to be called in order to start
+ * a DVB feed.
+ * @stop_feed: callback routine to be called in order to stop
+ * a DVB feed.
+ * @write_to_decoder: callback routine to be called if the feed is TS and
+ * it is routed to an A/V decoder, when a new TS packet
+ * is received.
+ * Used only on av7110-av.c.
+ * @check_crc32: callback routine to check CRC. If not initialized,
+ * dvb_demux will use an internal one.
+ * @memcopy: callback routine to memcopy received data.
+ * If not initialized, dvb_demux will default to memcpy().
+ * @users: counter for the number of demux opened file descriptors.
+ * Currently, it is limited to 10 users.
+ * @filter: pointer to &struct dvb_demux_filter.
+ * @feed: pointer to &struct dvb_demux_feed.
+ * @frontend_list: &struct list_head with frontends used by the demux.
+ * @pesfilter: array of &struct dvb_demux_feed with the PES types
+ * that will be filtered.
+ * @pids: list of filtered program IDs.
+ * @feed_list: &struct list_head with feeds.
+ * @tsbuf: temporary buffer used internally to store TS packets.
+ * @tsbufp: temporary buffer index used internally.
+ * @mutex: pointer to &struct mutex used to protect feed set
+ * logic.
+ * @lock: pointer to &spinlock_t, used to protect buffer handling.
+ * @cnt_storage: buffer used for TS/TEI continuity check.
+ * @speed_last_time: &ktime_t used for TS speed check.
+ * @speed_pkts_cnt: packets count used for TS speed check.
+ */
struct dvb_demux {
struct dmx_demux dmx;
void *priv;
@@ -115,8 +213,6 @@ struct dvb_demux {
struct dvb_demux_feed *pesfilter[DMX_PES_OTHER];
u16 pids[DMX_PES_OTHER];
- int playing;
- int recording;
#define DMX_MAX_PID 0x2000
struct list_head feed_list;
@@ -130,15 +226,119 @@ struct dvb_demux {
ktime_t speed_last_time; /* for TS speed check */
uint32_t speed_pkts_cnt; /* for TS speed check */
+
+ /* private: used only on av7110 */
+ int playing;
+ int recording;
};
-int dvb_dmx_init(struct dvb_demux *dvbdemux);
-void dvb_dmx_release(struct dvb_demux *dvbdemux);
-void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf,
+/**
+ * dvb_dmx_init - initialize a digital TV demux struct.
+ *
+ * @demux: &struct dvb_demux to be initialized.
+ *
+ * Before being able to register a digital TV demux struct, drivers
+ * should call this routine. On its typical usage, some fields should
+ * be initialized at the driver before calling it.
+ *
+ * A typical usecase is::
+ *
+ * dvb->demux.dmx.capabilities =
+ * DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ * DMX_MEMORY_BASED_FILTERING;
+ * dvb->demux.priv = dvb;
+ * dvb->demux.filternum = 256;
+ * dvb->demux.feednum = 256;
+ * dvb->demux.start_feed = driver_start_feed;
+ * dvb->demux.stop_feed = driver_stop_feed;
+ * ret = dvb_dmx_init(&dvb->demux);
+ * if (ret < 0)
+ * return ret;
+ */
+int dvb_dmx_init(struct dvb_demux *demux);
+
+/**
+ * dvb_dmx_release - releases a digital TV demux internal buffers.
+ *
+ * @demux: &struct dvb_demux to be released.
+ *
+ * The DVB core internally allocates data at @demux. This routine
+ * releases those data. Please notice that the struct itelf is not
+ * released, as it can be embedded on other structs.
+ */
+void dvb_dmx_release(struct dvb_demux *demux);
+
+/**
+ * dvb_dmx_swfilter_packets - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 188 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 188.
+ *
+ * The routine will discard a DVB packet that don't start with 0x47.
+ *
+ * Use this routine if the DVB demux fills MPEG-TS buffers that are
+ * already aligned.
+ *
+ * NOTE: The @buf size should have size equal to ``count * 188``.
+ */
+void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count);
+
+/**
+ * dvb_dmx_swfilter - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 188 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 188.
+ *
+ * If a DVB packet doesn't start with 0x47, it will seek for the first
+ * byte that starts with 0x47.
+ *
+ * Use this routine if the DVB demux fill buffers that may not start with
+ * a packet start mark (0x47).
+ *
+ * NOTE: The @buf size should have size equal to ``count * 188``.
+ */
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count);
+
+/**
+ * dvb_dmx_swfilter_204 - use dvb software filter for a buffer with
+ * multiple MPEG-TS packets with 204 bytes each.
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data to be filtered
+ * @count: number of MPEG-TS packets with size of 204.
+ *
+ * If a DVB packet doesn't start with 0x47, it will seek for the first
+ * byte that starts with 0x47.
+ *
+ * Use this routine if the DVB demux fill buffers that may not start with
+ * a packet start mark (0x47).
+ *
+ * NOTE: The @buf size should have size equal to ``count * 204``.
+ */
void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf,
size_t count);
+
+/**
+ * dvb_dmx_swfilter_raw - make the raw data available to userspace without
+ * filtering
+ *
+ * @demux: pointer to &struct dvb_demux
+ * @buf: buffer with data
+ * @count: number of packets to be passed. The actual size of each packet
+ * depends on the &dvb_demux->feed->cb.ts logic.
+ *
+ * Use it if the driver needs to deliver the raw payload to userspace without
+ * passing through the kernel demux. That is meant to support some
+ * delivery systems that aren't based on MPEG-TS.
+ *
+ * This function relies on &dvb_demux->feed->cb.ts to actually handle the
+ * buffer.
+ */
void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf,
size_t count);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 9139d01ba7ed..3ad83359098b 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -145,15 +145,13 @@ static void __dvb_frontend_free(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- if (!fepriv)
- return;
-
- dvb_free_device(fepriv->dvbdev);
+ if (fepriv)
+ dvb_free_device(fepriv->dvbdev);
dvb_frontend_invoke_release(fe, fe->ops.release);
- kfree(fepriv);
- fe->frontend_priv = NULL;
+ if (fepriv)
+ kfree(fepriv);
}
static void dvb_frontend_free(struct kref *ref)
@@ -951,8 +949,6 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
memset(c, 0, offsetof(struct dtv_frontend_properties, strength));
c->delivery_system = delsys;
- c->state = DTV_CLEAR;
-
dev_dbg(fe->dvb->device, "%s: Clearing cache for delivery system %d\n",
__func__, c->delivery_system);
@@ -1109,39 +1105,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0),
};
-static void dtv_property_dump(struct dvb_frontend *fe,
- bool is_set,
- struct dtv_property *tvp)
-{
- int i;
-
- if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) {
- dev_warn(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x undefined\n",
- __func__,
- is_set ? "SET" : "GET",
- tvp->cmd);
- return;
- }
-
- dev_dbg(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x (%s)\n", __func__,
- is_set ? "SET" : "GET",
- tvp->cmd,
- dtv_cmds[tvp->cmd].name);
-
- if (dtv_cmds[tvp->cmd].buffer) {
- dev_dbg(fe->dvb->device, "%s: tvp.u.buffer.len = 0x%02x\n",
- __func__, tvp->u.buffer.len);
-
- for(i = 0; i < tvp->u.buffer.len; i++)
- dev_dbg(fe->dvb->device,
- "%s: tvp.u.buffer.data[0x%02x] = 0x%02x\n",
- __func__, i, tvp->u.buffer.data[i]);
- } else {
- dev_dbg(fe->dvb->device, "%s: tvp.u.data = 0x%08x\n", __func__,
- tvp->u.data);
- }
-}
-
/* Synchronise the legacy tuning parameters into the cache, so that demodulator
* drivers can use a single set_frontend tuning function, regardless of whether
* it's being used for the legacy or new API, reducing code and complexity.
@@ -1315,17 +1278,15 @@ static int dtv_get_frontend(struct dvb_frontend *fe,
return 0;
}
-static int dvb_frontend_ioctl_legacy(struct file *file,
- unsigned int cmd, void *parg);
-static int dvb_frontend_ioctl_properties(struct file *file,
- unsigned int cmd, void *parg);
+static int dvb_frontend_handle_ioctl(struct file *file,
+ unsigned int cmd, void *parg);
static int dtv_property_process_get(struct dvb_frontend *fe,
const struct dtv_frontend_properties *c,
struct dtv_property *tvp,
struct file *file)
{
- int r, ncaps;
+ int ncaps;
switch(tvp->cmd) {
case DTV_ENUM_DELSYS:
@@ -1536,14 +1497,18 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
return -EINVAL;
}
- /* Allow the frontend to override outgoing properties */
- if (fe->ops.get_property) {
- r = fe->ops.get_property(fe, tvp);
- if (r < 0)
- return r;
- }
-
- dtv_property_dump(fe, false, tvp);
+ if (!dtv_cmds[tvp->cmd].buffer)
+ dev_dbg(fe->dvb->device,
+ "%s: GET cmd 0x%08x (%s) = 0x%08x\n",
+ __func__, tvp->cmd, dtv_cmds[tvp->cmd].name,
+ tvp->u.data);
+ else
+ dev_dbg(fe->dvb->device,
+ "%s: GET cmd 0x%08x (%s) len %d: %*ph\n",
+ __func__,
+ tvp->cmd, dtv_cmds[tvp->cmd].name,
+ tvp->u.buffer.len,
+ tvp->u.buffer.len, tvp->u.buffer.data);
return 0;
}
@@ -1766,23 +1731,36 @@ static int dvbv3_set_delivery_system(struct dvb_frontend *fe)
return emulate_delivery_system(fe, delsys);
}
+/**
+ * dtv_property_process_set - Sets a single DTV property
+ * @fe: Pointer to &struct dvb_frontend
+ * @file: Pointer to &struct file
+ * @cmd: Digital TV command
+ * @data: An unsigned 32-bits number
+ *
+ * This routine assigns the property
+ * value to the corresponding member of
+ * &struct dtv_frontend_properties
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
static int dtv_property_process_set(struct dvb_frontend *fe,
- struct dtv_property *tvp,
- struct file *file)
+ struct file *file,
+ u32 cmd, u32 data)
{
int r = 0;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- /* Allow the frontend to validate incoming properties */
- if (fe->ops.set_property) {
- r = fe->ops.set_property(fe, tvp);
- if (r < 0)
- return r;
- }
-
- dtv_property_dump(fe, true, tvp);
-
- switch(tvp->cmd) {
+ /** Dump DTV command name and value*/
+ if (!cmd || cmd > DTV_MAX_COMMAND)
+ dev_warn(fe->dvb->device, "%s: SET cmd 0x%08x undefined\n",
+ __func__, cmd);
+ else
+ dev_dbg(fe->dvb->device,
+ "%s: SET cmd 0x%08x (%s) to 0x%08x\n",
+ __func__, cmd, dtv_cmds[cmd].name, data);
+ switch (cmd) {
case DTV_CLEAR:
/*
* Reset a cache of data specific to the frontend here. This does
@@ -1791,144 +1769,144 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
dvb_frontend_clear_cache(fe);
break;
case DTV_TUNE:
- /* interpret the cache of data, build either a traditional frontend
- * tunerequest so we can pass validation in the FE_SET_FRONTEND
- * ioctl.
+ /*
+ * Use the cached Digital TV properties to tune the
+ * frontend
*/
- c->state = tvp->cmd;
- dev_dbg(fe->dvb->device, "%s: Finalised property cache\n",
- __func__);
+ dev_dbg(fe->dvb->device,
+ "%s: Setting the frontend from property cache\n",
+ __func__);
r = dtv_set_frontend(fe);
break;
case DTV_FREQUENCY:
- c->frequency = tvp->u.data;
+ c->frequency = data;
break;
case DTV_MODULATION:
- c->modulation = tvp->u.data;
+ c->modulation = data;
break;
case DTV_BANDWIDTH_HZ:
- c->bandwidth_hz = tvp->u.data;
+ c->bandwidth_hz = data;
break;
case DTV_INVERSION:
- c->inversion = tvp->u.data;
+ c->inversion = data;
break;
case DTV_SYMBOL_RATE:
- c->symbol_rate = tvp->u.data;
+ c->symbol_rate = data;
break;
case DTV_INNER_FEC:
- c->fec_inner = tvp->u.data;
+ c->fec_inner = data;
break;
case DTV_PILOT:
- c->pilot = tvp->u.data;
+ c->pilot = data;
break;
case DTV_ROLLOFF:
- c->rolloff = tvp->u.data;
+ c->rolloff = data;
break;
case DTV_DELIVERY_SYSTEM:
- r = dvbv5_set_delivery_system(fe, tvp->u.data);
+ r = dvbv5_set_delivery_system(fe, data);
break;
case DTV_VOLTAGE:
- c->voltage = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE,
+ c->voltage = data;
+ r = dvb_frontend_handle_ioctl(file, FE_SET_VOLTAGE,
(void *)c->voltage);
break;
case DTV_TONE:
- c->sectone = tvp->u.data;
- r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE,
+ c->sectone = data;
+ r = dvb_frontend_handle_ioctl(file, FE_SET_TONE,
(void *)c->sectone);
break;
case DTV_CODE_RATE_HP:
- c->code_rate_HP = tvp->u.data;
+ c->code_rate_HP = data;
break;
case DTV_CODE_RATE_LP:
- c->code_rate_LP = tvp->u.data;
+ c->code_rate_LP = data;
break;
case DTV_GUARD_INTERVAL:
- c->guard_interval = tvp->u.data;
+ c->guard_interval = data;
break;
case DTV_TRANSMISSION_MODE:
- c->transmission_mode = tvp->u.data;
+ c->transmission_mode = data;
break;
case DTV_HIERARCHY:
- c->hierarchy = tvp->u.data;
+ c->hierarchy = data;
break;
case DTV_INTERLEAVING:
- c->interleaving = tvp->u.data;
+ c->interleaving = data;
break;
/* ISDB-T Support here */
case DTV_ISDBT_PARTIAL_RECEPTION:
- c->isdbt_partial_reception = tvp->u.data;
+ c->isdbt_partial_reception = data;
break;
case DTV_ISDBT_SOUND_BROADCASTING:
- c->isdbt_sb_mode = tvp->u.data;
+ c->isdbt_sb_mode = data;
break;
case DTV_ISDBT_SB_SUBCHANNEL_ID:
- c->isdbt_sb_subchannel = tvp->u.data;
+ c->isdbt_sb_subchannel = data;
break;
case DTV_ISDBT_SB_SEGMENT_IDX:
- c->isdbt_sb_segment_idx = tvp->u.data;
+ c->isdbt_sb_segment_idx = data;
break;
case DTV_ISDBT_SB_SEGMENT_COUNT:
- c->isdbt_sb_segment_count = tvp->u.data;
+ c->isdbt_sb_segment_count = data;
break;
case DTV_ISDBT_LAYER_ENABLED:
- c->isdbt_layer_enabled = tvp->u.data;
+ c->isdbt_layer_enabled = data;
break;
case DTV_ISDBT_LAYERA_FEC:
- c->layer[0].fec = tvp->u.data;
+ c->layer[0].fec = data;
break;
case DTV_ISDBT_LAYERA_MODULATION:
- c->layer[0].modulation = tvp->u.data;
+ c->layer[0].modulation = data;
break;
case DTV_ISDBT_LAYERA_SEGMENT_COUNT:
- c->layer[0].segment_count = tvp->u.data;
+ c->layer[0].segment_count = data;
break;
case DTV_ISDBT_LAYERA_TIME_INTERLEAVING:
- c->layer[0].interleaving = tvp->u.data;
+ c->layer[0].interleaving = data;
break;
case DTV_ISDBT_LAYERB_FEC:
- c->layer[1].fec = tvp->u.data;
+ c->layer[1].fec = data;
break;
case DTV_ISDBT_LAYERB_MODULATION:
- c->layer[1].modulation = tvp->u.data;
+ c->layer[1].modulation = data;
break;
case DTV_ISDBT_LAYERB_SEGMENT_COUNT:
- c->layer[1].segment_count = tvp->u.data;
+ c->layer[1].segment_count = data;
break;
case DTV_ISDBT_LAYERB_TIME_INTERLEAVING:
- c->layer[1].interleaving = tvp->u.data;
+ c->layer[1].interleaving = data;
break;
case DTV_ISDBT_LAYERC_FEC:
- c->layer[2].fec = tvp->u.data;
+ c->layer[2].fec = data;
break;
case DTV_ISDBT_LAYERC_MODULATION:
- c->layer[2].modulation = tvp->u.data;
+ c->layer[2].modulation = data;
break;
case DTV_ISDBT_LAYERC_SEGMENT_COUNT:
- c->layer[2].segment_count = tvp->u.data;
+ c->layer[2].segment_count = data;
break;
case DTV_ISDBT_LAYERC_TIME_INTERLEAVING:
- c->layer[2].interleaving = tvp->u.data;
+ c->layer[2].interleaving = data;
break;
/* Multistream support */
case DTV_STREAM_ID:
case DTV_DVBT2_PLP_ID_LEGACY:
- c->stream_id = tvp->u.data;
+ c->stream_id = data;
break;
/* ATSC-MH */
case DTV_ATSCMH_PARADE_ID:
- fe->dtv_property_cache.atscmh_parade_id = tvp->u.data;
+ fe->dtv_property_cache.atscmh_parade_id = data;
break;
case DTV_ATSCMH_RS_FRAME_ENSEMBLE:
- fe->dtv_property_cache.atscmh_rs_frame_ensemble = tvp->u.data;
+ fe->dtv_property_cache.atscmh_rs_frame_ensemble = data;
break;
case DTV_LNA:
- c->lna = tvp->u.data;
+ c->lna = data;
if (fe->ops.set_lna)
r = fe->ops.set_lna(fe);
if (r < 0)
@@ -1942,14 +1920,12 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
return r;
}
-static int dvb_frontend_ioctl(struct file *file,
- unsigned int cmd, void *parg)
+static int dvb_frontend_ioctl(struct file *file, unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- int err = -EOPNOTSUPP;
+ int err;
dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));
if (down_interruptible(&fepriv->sem))
@@ -1960,109 +1936,33 @@ static int dvb_frontend_ioctl(struct file *file,
return -ENODEV;
}
- if ((file->f_flags & O_ACCMODE) == O_RDONLY &&
- (_IOC_DIR(cmd) != _IOC_READ || cmd == FE_GET_EVENT ||
- cmd == FE_DISEQC_RECV_SLAVE_REPLY)) {
+ /*
+ * If the frontend is opened in read-only mode, only the ioctls
+ * that don't interfere with the tune logic should be accepted.
+ * That allows an external application to monitor the DVB QoS and
+ * statistics parameters.
+ *
+ * That matches all _IOR() ioctls, except for two special cases:
+ * - FE_GET_EVENT is part of the tuning logic on a DVB application;
+ * - FE_DISEQC_RECV_SLAVE_REPLY is part of DiSEqC 2.0
+ * setup
+ * So, those two ioctls should also return -EPERM, as otherwise
+ * reading from them would interfere with a DVB tune application
+ */
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY
+ && (_IOC_DIR(cmd) != _IOC_READ
+ || cmd == FE_GET_EVENT
+ || cmd == FE_DISEQC_RECV_SLAVE_REPLY)) {
up(&fepriv->sem);
return -EPERM;
}
- if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY))
- err = dvb_frontend_ioctl_properties(file, cmd, parg);
- else {
- c->state = DTV_UNDEFINED;
- err = dvb_frontend_ioctl_legacy(file, cmd, parg);
- }
+ err = dvb_frontend_handle_ioctl(file, cmd, parg);
up(&fepriv->sem);
return err;
}
-static int dvb_frontend_ioctl_properties(struct file *file,
- unsigned int cmd, void *parg)
-{
- struct dvb_device *dvbdev = file->private_data;
- struct dvb_frontend *fe = dvbdev->priv;
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int err = 0;
-
- struct dtv_properties *tvps = parg;
- struct dtv_property *tvp = NULL;
- int i;
-
- dev_dbg(fe->dvb->device, "%s:\n", __func__);
-
- if (cmd == FE_SET_PROPERTY) {
- dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num);
- dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props);
-
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
- if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
- return -EINVAL;
-
- tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
- if (IS_ERR(tvp))
- return PTR_ERR(tvp);
-
- for (i = 0; i < tvps->num; i++) {
- err = dtv_property_process_set(fe, tvp + i, file);
- if (err < 0)
- goto out;
- (tvp + i)->result = err;
- }
-
- if (c->state == DTV_TUNE)
- dev_dbg(fe->dvb->device, "%s: Property cache is full, tuning\n", __func__);
-
- } else if (cmd == FE_GET_PROPERTY) {
- struct dtv_frontend_properties getp = fe->dtv_property_cache;
-
- dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num);
- dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props);
-
- /* Put an arbitrary limit on the number of messages that can
- * be sent at once */
- if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS))
- return -EINVAL;
-
- tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
- if (IS_ERR(tvp))
- return PTR_ERR(tvp);
-
- /*
- * Let's use our own copy of property cache, in order to
- * avoid mangling with DTV zigzag logic, as drivers might
- * return crap, if they don't check if the data is available
- * before updating the properties cache.
- */
- if (fepriv->state != FESTATE_IDLE) {
- err = dtv_get_frontend(fe, &getp, NULL);
- if (err < 0)
- goto out;
- }
- for (i = 0; i < tvps->num; i++) {
- err = dtv_property_process_get(fe, &getp, tvp + i, file);
- if (err < 0)
- goto out;
- (tvp + i)->result = err;
- }
-
- if (copy_to_user((void __user *)tvps->props, tvp,
- tvps->num * sizeof(struct dtv_property))) {
- err = -EFAULT;
- goto out;
- }
-
- } else
- err = -EOPNOTSUPP;
-
-out:
- kfree(tvp);
- return err;
-}
-
static int dtv_set_frontend(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
@@ -2200,16 +2100,102 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
}
-static int dvb_frontend_ioctl_legacy(struct file *file,
- unsigned int cmd, void *parg)
+static int dvb_frontend_handle_ioctl(struct file *file,
+ unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- int err = -EOPNOTSUPP;
+ int i, err;
+
+ dev_dbg(fe->dvb->device, "%s:\n", __func__);
switch (cmd) {
+ case FE_SET_PROPERTY: {
+ struct dtv_properties *tvps = parg;
+ struct dtv_property *tvp = NULL;
+
+ dev_dbg(fe->dvb->device, "%s: properties.num = %d\n",
+ __func__, tvps->num);
+ dev_dbg(fe->dvb->device, "%s: properties.props = %p\n",
+ __func__, tvps->props);
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
+ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ return -EINVAL;
+
+ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+ for (i = 0; i < tvps->num; i++) {
+ err = dtv_property_process_set(fe, file,
+ (tvp + i)->cmd,
+ (tvp + i)->u.data);
+ if (err < 0) {
+ kfree(tvp);
+ return err;
+ }
+ }
+ kfree(tvp);
+ break;
+ }
+ case FE_GET_PROPERTY: {
+ struct dtv_properties *tvps = parg;
+ struct dtv_property *tvp = NULL;
+ struct dtv_frontend_properties getp = fe->dtv_property_cache;
+
+ dev_dbg(fe->dvb->device, "%s: properties.num = %d\n",
+ __func__, tvps->num);
+ dev_dbg(fe->dvb->device, "%s: properties.props = %p\n",
+ __func__, tvps->props);
+
+ /*
+ * Put an arbitrary limit on the number of messages that can
+ * be sent at once
+ */
+ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS))
+ return -EINVAL;
+
+ tvp = memdup_user(tvps->props, tvps->num * sizeof(*tvp));
+ if (IS_ERR(tvp))
+ return PTR_ERR(tvp);
+
+ /*
+ * Let's use our own copy of property cache, in order to
+ * avoid mangling with DTV zigzag logic, as drivers might
+ * return crap, if they don't check if the data is available
+ * before updating the properties cache.
+ */
+ if (fepriv->state != FESTATE_IDLE) {
+ err = dtv_get_frontend(fe, &getp, NULL);
+ if (err < 0) {
+ kfree(tvp);
+ return err;
+ }
+ }
+ for (i = 0; i < tvps->num; i++) {
+ err = dtv_property_process_get(fe, &getp,
+ tvp + i, file);
+ if (err < 0) {
+ kfree(tvp);
+ return err;
+ }
+ }
+
+ if (copy_to_user((void __user *)tvps->props, tvp,
+ tvps->num * sizeof(struct dtv_property))) {
+ kfree(tvp);
+ return -EFAULT;
+ }
+ kfree(tvp);
+ break;
+ }
+
case FE_GET_INFO: {
struct dvb_frontend_info* info = parg;
@@ -2273,42 +2259,6 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
break;
}
- case FE_READ_BER:
- if (fe->ops.read_ber) {
- if (fepriv->thread)
- err = fe->ops.read_ber(fe, (__u32 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
- case FE_READ_SIGNAL_STRENGTH:
- if (fe->ops.read_signal_strength) {
- if (fepriv->thread)
- err = fe->ops.read_signal_strength(fe, (__u16 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
- case FE_READ_SNR:
- if (fe->ops.read_snr) {
- if (fepriv->thread)
- err = fe->ops.read_snr(fe, (__u16 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
- case FE_READ_UNCORRECTED_BLOCKS:
- if (fe->ops.read_ucblocks) {
- if (fepriv->thread)
- err = fe->ops.read_ucblocks(fe, (__u32 *) parg);
- else
- err = -EAGAIN;
- }
- break;
-
case FE_DISEQC_RESET_OVERLOAD:
if (fe->ops.diseqc_reset_overload) {
err = fe->ops.diseqc_reset_overload(fe);
@@ -2360,6 +2310,23 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
}
break;
+ case FE_DISEQC_RECV_SLAVE_REPLY:
+ if (fe->ops.diseqc_recv_slave_reply)
+ err = fe->ops.diseqc_recv_slave_reply(fe, parg);
+ break;
+
+ case FE_ENABLE_HIGH_LNB_VOLTAGE:
+ if (fe->ops.enable_high_lnb_voltage)
+ err = fe->ops.enable_high_lnb_voltage(fe, (long) parg);
+ break;
+
+ case FE_SET_FRONTEND_TUNE_MODE:
+ fepriv->tune_mode_flags = (unsigned long) parg;
+ err = 0;
+ break;
+
+ /* DEPRECATED dish control ioctls */
+
case FE_DISHNETWORK_SEND_LEGACY_CMD:
if (fe->ops.dishnetwork_send_legacy_command) {
err = fe->ops.dishnetwork_send_legacy_command(fe,
@@ -2425,16 +2392,46 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
}
break;
- case FE_DISEQC_RECV_SLAVE_REPLY:
- if (fe->ops.diseqc_recv_slave_reply)
- err = fe->ops.diseqc_recv_slave_reply(fe, (struct dvb_diseqc_slave_reply*) parg);
+ /* DEPRECATED statistics ioctls */
+
+ case FE_READ_BER:
+ if (fe->ops.read_ber) {
+ if (fepriv->thread)
+ err = fe->ops.read_ber(fe, parg);
+ else
+ err = -EAGAIN;
+ }
break;
- case FE_ENABLE_HIGH_LNB_VOLTAGE:
- if (fe->ops.enable_high_lnb_voltage)
- err = fe->ops.enable_high_lnb_voltage(fe, (long) parg);
+ case FE_READ_SIGNAL_STRENGTH:
+ if (fe->ops.read_signal_strength) {
+ if (fepriv->thread)
+ err = fe->ops.read_signal_strength(fe, parg);
+ else
+ err = -EAGAIN;
+ }
break;
+ case FE_READ_SNR:
+ if (fe->ops.read_snr) {
+ if (fepriv->thread)
+ err = fe->ops.read_snr(fe, parg);
+ else
+ err = -EAGAIN;
+ }
+ break;
+
+ case FE_READ_UNCORRECTED_BLOCKS:
+ if (fe->ops.read_ucblocks) {
+ if (fepriv->thread)
+ err = fe->ops.read_ucblocks(fe, parg);
+ else
+ err = -EAGAIN;
+ }
+ break;
+
+ /* DEPRECATED DVBv3 ioctls */
+
case FE_SET_FRONTEND:
err = dvbv3_set_delivery_system(fe);
if (err)
@@ -2461,11 +2458,10 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
err = dtv_get_frontend(fe, &getp, parg);
break;
}
- case FE_SET_FRONTEND_TUNE_MODE:
- fepriv->tune_mode_flags = (unsigned long) parg;
- err = 0;
- break;
- }
+
+ default:
+ return -ENOTSUPP;
+ } /* switch */
return err;
}
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index 907a05bde162..ace0c2fb26c2 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -180,8 +180,8 @@ enum dvbfe_search {
/**
* struct dvb_tuner_ops - Tuner information and callbacks
*
- * @info: embedded struct dvb_tuner_info with tuner properties
- * @release: callback function called when frontend is dettached.
+ * @info: embedded &struct dvb_tuner_info with tuner properties
+ * @release: callback function called when frontend is detached.
* drivers should free any allocated memory.
* @init: callback function used to initialize the tuner device.
* @sleep: callback function used to put the tuner to sleep.
@@ -191,14 +191,14 @@ enum dvbfe_search {
* resuming from suspend.
* @set_params: callback function used to inform the tuner to tune
* into a digital TV channel. The properties to be used
- * are stored at @dvb_frontend.dtv_property_cache;. The
- * tuner demod can change the parameters to reflect the
- * changes needed for the channel to be tuned, and
+ * are stored at &struct dvb_frontend.dtv_property_cache.
+ * The tuner demod can change the parameters to reflect
+ * the changes needed for the channel to be tuned, and
* update statistics. This is the recommended way to set
* the tuner parameters and should be used on newer
* drivers.
* @set_analog_params: callback function used to tune into an analog TV
- * channel on hybrid tuners. It passes @analog_parameters;
+ * channel on hybrid tuners. It passes @analog_parameters
* to the driver.
* @set_config: callback function used to send some tuner-specific
* parameters.
@@ -207,9 +207,9 @@ enum dvbfe_search {
* @get_if_frequency: get the Intermediate Frequency, in Hz. For baseband,
* should return 0.
* @get_status: returns the frontend lock status
- * @get_rf_strength: returns the RF signal strengh. Used mostly to support
+ * @get_rf_strength: returns the RF signal strength. Used mostly to support
* analog TV and radio. Digital TV should report, instead,
- * via DVBv5 API (@dvb_frontend.dtv_property_cache;).
+ * via DVBv5 API (&struct dvb_frontend.dtv_property_cache).
* @get_afc: Used only by analog TV core. Reports the frequency
* drift due to AFC.
* @calc_regs: callback function used to pass register data settings
@@ -217,7 +217,7 @@ enum dvbfe_search {
* @set_frequency: Set a new frequency. Shouldn't be used on newer drivers.
* @set_bandwidth: Set a new frequency. Shouldn't be used on newer drivers.
*
- * NOTE: frequencies used on get_frequency and set_frequency are in Hz for
+ * NOTE: frequencies used on @get_frequency and @set_frequency are in Hz for
* terrestrial/cable or kHz for satellite.
*
*/
@@ -283,14 +283,14 @@ struct analog_demod_info {
* @set_params: callback function used to inform the demod to set the
* demodulator parameters needed to decode an analog or
* radio channel. The properties are passed via
- * struct @analog_params;.
+ * &struct analog_params.
* @has_signal: returns 0xffff if has signal, or 0 if it doesn't.
* @get_afc: Used only by analog TV core. Reports the frequency
* drift due to AFC.
* @tuner_status: callback function that returns tuner status bits, e. g.
- * TUNER_STATUS_LOCKED and TUNER_STATUS_STEREO.
+ * %TUNER_STATUS_LOCKED and %TUNER_STATUS_STEREO.
* @standby: set the tuner to standby mode.
- * @release: callback function called when frontend is dettached.
+ * @release: callback function called when frontend is detached.
* drivers should free any allocated memory.
* @i2c_gate_ctrl: controls the I2C gate. Newer drivers should use I2C
* mux support instead.
@@ -321,10 +321,10 @@ struct dtv_frontend_properties;
* struct dvb_frontend_ops - Demodulation information and callbacks for
* ditialt TV
*
- * @info: embedded struct dvb_tuner_info with tuner properties
+ * @info: embedded &struct dvb_tuner_info with tuner properties
* @delsys: Delivery systems supported by the frontend
* @detach: callback function called when frontend is detached.
- * drivers should clean up, but not yet free the struct
+ * drivers should clean up, but not yet free the &struct
* dvb_frontend allocation.
* @release: callback function called when frontend is ready to be
* freed.
@@ -338,57 +338,57 @@ struct dtv_frontend_properties;
* allow other drivers to write data into their registers.
* Should not be used on new drivers.
* @tune: callback function used by demod drivers that use
- * @DVBFE_ALGO_HW; to tune into a frequency.
+ * @DVBFE_ALGO_HW to tune into a frequency.
* @get_frontend_algo: returns the desired hardware algorithm.
* @set_frontend: callback function used to inform the demod to set the
* parameters for demodulating a digital TV channel.
- * The properties to be used are stored at
- * @dvb_frontend.dtv_property_cache;. The demod can change
+ * The properties to be used are stored at &struct
+ * dvb_frontend.dtv_property_cache. The demod can change
* the parameters to reflect the changes needed for the
* channel to be decoded, and update statistics.
* @get_tune_settings: callback function
* @get_frontend: callback function used to inform the parameters
* actuall in use. The properties to be used are stored at
- * @dvb_frontend.dtv_property_cache; and update
+ * &struct dvb_frontend.dtv_property_cache and update
* statistics. Please notice that it should not return
* an error code if the statistics are not available
* because the demog is not locked.
* @read_status: returns the locking status of the frontend.
* @read_ber: legacy callback function to return the bit error rate.
* Newer drivers should provide such info via DVBv5 API,
- * e. g. @set_frontend;/@get_frontend;, implementing this
+ * e. g. @set_frontend;/@get_frontend, implementing this
* callback only if DVBv3 API compatibility is wanted.
* @read_signal_strength: legacy callback function to return the signal
* strength. Newer drivers should provide such info via
- * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
* implementing this callback only if DVBv3 API
* compatibility is wanted.
* @read_snr: legacy callback function to return the Signal/Noise
* rate. Newer drivers should provide such info via
- * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
* implementing this callback only if DVBv3 API
* compatibility is wanted.
* @read_ucblocks: legacy callback function to return the Uncorrected Error
* Blocks. Newer drivers should provide such info via
- * DVBv5 API, e. g. @set_frontend;/@get_frontend;,
+ * DVBv5 API, e. g. @set_frontend/@get_frontend,
* implementing this callback only if DVBv3 API
* compatibility is wanted.
* @diseqc_reset_overload: callback function to implement the
- * FE_DISEQC_RESET_OVERLOAD ioctl (only Satellite)
+ * FE_DISEQC_RESET_OVERLOAD() ioctl (only Satellite)
* @diseqc_send_master_cmd: callback function to implement the
- * FE_DISEQC_SEND_MASTER_CMD ioctl (only Satellite).
+ * FE_DISEQC_SEND_MASTER_CMD() ioctl (only Satellite).
* @diseqc_recv_slave_reply: callback function to implement the
- * FE_DISEQC_RECV_SLAVE_REPLY ioctl (only Satellite)
+ * FE_DISEQC_RECV_SLAVE_REPLY() ioctl (only Satellite)
* @diseqc_send_burst: callback function to implement the
- * FE_DISEQC_SEND_BURST ioctl (only Satellite).
+ * FE_DISEQC_SEND_BURST() ioctl (only Satellite).
* @set_tone: callback function to implement the
- * FE_SET_TONE ioctl (only Satellite).
+ * FE_SET_TONE() ioctl (only Satellite).
* @set_voltage: callback function to implement the
- * FE_SET_VOLTAGE ioctl (only Satellite).
+ * FE_SET_VOLTAGE() ioctl (only Satellite).
* @enable_high_lnb_voltage: callback function to implement the
- * FE_ENABLE_HIGH_LNB_VOLTAGE ioctl (only Satellite).
+ * FE_ENABLE_HIGH_LNB_VOLTAGE() ioctl (only Satellite).
* @dishnetwork_send_legacy_command: callback function to implement the
- * FE_DISHNETWORK_SEND_LEGACY_CMD ioctl (only Satellite).
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl (only Satellite).
* Drivers should not use this, except when the DVB
* core emulation fails to provide proper support (e.g.
* if @set_voltage takes more than 8ms to work), and
@@ -399,15 +399,10 @@ struct dtv_frontend_properties;
* @ts_bus_ctrl: callback function used to take control of the TS bus.
* @set_lna: callback function to power on/off/auto the LNA.
* @search: callback function used on some custom algo search algos.
- * @tuner_ops: pointer to struct dvb_tuner_ops
- * @analog_ops: pointer to struct analog_demod_ops
- * @set_property: callback function to allow the frontend to validade
- * incoming properties. Should not be used on new drivers.
- * @get_property: callback function to allow the frontend to override
- * outcoming properties. Should not be used on new drivers.
+ * @tuner_ops: pointer to &struct dvb_tuner_ops
+ * @analog_ops: pointer to &struct analog_demod_ops
*/
struct dvb_frontend_ops {
-
struct dvb_frontend_info info;
u8 delsys[MAX_DELSYS];
@@ -466,9 +461,6 @@ struct dvb_frontend_ops {
struct dvb_tuner_ops tuner_ops;
struct analog_demod_ops analog_ops;
-
- int (*set_property)(struct dvb_frontend* fe, struct dtv_property* tvp);
- int (*get_property)(struct dvb_frontend* fe, struct dtv_property* tvp);
};
#ifdef __DVB_CORE__
@@ -565,15 +557,15 @@ struct dtv_frontend_properties {
enum fe_sec_voltage voltage;
enum fe_sec_tone_mode sectone;
- enum fe_spectral_inversion inversion;
- enum fe_code_rate fec_inner;
+ enum fe_spectral_inversion inversion;
+ enum fe_code_rate fec_inner;
enum fe_transmit_mode transmission_mode;
u32 bandwidth_hz; /* 0 = AUTO */
enum fe_guard_interval guard_interval;
- enum fe_hierarchy hierarchy;
+ enum fe_hierarchy hierarchy;
u32 symbol_rate;
- enum fe_code_rate code_rate_HP;
- enum fe_code_rate code_rate_LP;
+ enum fe_code_rate code_rate_HP;
+ enum fe_code_rate code_rate_LP;
enum fe_pilot pilot;
enum fe_rolloff rolloff;
@@ -628,11 +620,6 @@ struct dtv_frontend_properties {
struct dtv_fe_stats post_bit_count;
struct dtv_fe_stats block_error;
struct dtv_fe_stats block_count;
-
- /* private: */
- /* Cache State */
- u32 state;
-
};
#define DVB_FE_NO_EXIT 0
@@ -643,16 +630,16 @@ struct dtv_frontend_properties {
/**
* struct dvb_frontend - Frontend structure to be used on drivers.
*
- * @refcount: refcount to keep track of struct dvb_frontend
+ * @refcount: refcount to keep track of &struct dvb_frontend
* references
- * @ops: embedded struct dvb_frontend_ops
- * @dvb: pointer to struct dvb_adapter
+ * @ops: embedded &struct dvb_frontend_ops
+ * @dvb: pointer to &struct dvb_adapter
* @demodulator_priv: demod private data
* @tuner_priv: tuner private data
* @frontend_priv: frontend private data
* @sec_priv: SEC private data
* @analog_demod_priv: Analog demod private data
- * @dtv_property_cache: embedded struct dtv_frontend_properties
+ * @dtv_property_cache: embedded &struct dtv_frontend_properties
* @callback: callback function used on some drivers to call
* either the tuner or the demodulator.
* @id: Frontend ID
@@ -681,8 +668,8 @@ struct dvb_frontend {
/**
* dvb_register_frontend() - Registers a DVB frontend at the adapter
*
- * @dvb: pointer to the dvb adapter
- * @fe: pointer to the frontend struct
+ * @dvb: pointer to &struct dvb_adapter
+ * @fe: pointer to &struct dvb_frontend
*
* Allocate and initialize the private data needed by the frontend core to
* manage the frontend and calls dvb_register_device() to register a new
@@ -695,7 +682,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
/**
* dvb_unregister_frontend() - Unregisters a DVB frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* Stops the frontend kthread, calls dvb_unregister_device() and frees the
* private frontend data allocated by dvb_register_frontend().
@@ -709,14 +696,14 @@ int dvb_unregister_frontend(struct dvb_frontend *fe);
/**
* dvb_frontend_detach() - Detaches and frees frontend specific data
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* This function should be called after dvb_unregister_frontend(). It
* calls the SEC, tuner and demod release functions:
* &dvb_frontend_ops.release_sec, &dvb_frontend_ops.tuner_ops.release,
* &dvb_frontend_ops.analog_ops.release and &dvb_frontend_ops.release.
*
- * If the driver is compiled with CONFIG_MEDIA_ATTACH, it also decreases
+ * If the driver is compiled with %CONFIG_MEDIA_ATTACH, it also decreases
* the module reference count, needed to allow userspace to remove the
* previously used DVB frontend modules.
*/
@@ -725,7 +712,7 @@ void dvb_frontend_detach(struct dvb_frontend *fe);
/**
* dvb_frontend_suspend() - Suspends a Digital TV frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* This function prepares a Digital TV frontend to suspend.
*
@@ -743,7 +730,7 @@ int dvb_frontend_suspend(struct dvb_frontend *fe);
/**
* dvb_frontend_resume() - Resumes a Digital TV frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* This function resumes the usual operation of the tuner after resume.
*
@@ -764,7 +751,7 @@ int dvb_frontend_resume(struct dvb_frontend *fe);
/**
* dvb_frontend_reinitialise() - forces a reinitialisation at the frontend
*
- * @fe: pointer to the frontend struct
+ * @fe: pointer to &struct dvb_frontend
*
* Calls &dvb_frontend_ops.init\(\) and &dvb_frontend_ops.tuner_ops.init\(\),
* and resets SEC tone and voltage (for Satellite systems).
@@ -779,16 +766,16 @@ void dvb_frontend_reinitialise(struct dvb_frontend *fe);
* dvb_frontend_sleep_until() - Sleep for the amount of time given by
* add_usec parameter
*
- * @waketime: pointer to a struct ktime_t
+ * @waketime: pointer to &struct ktime_t
* @add_usec: time to sleep, in microseconds
*
* This function is used to measure the time required for the
- * %FE_DISHNETWORK_SEND_LEGACY_CMD ioctl to work. It needs to be as precise
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() ioctl to work. It needs to be as precise
* as possible, as it affects the detection of the dish tone command at the
* satellite subsystem.
*
* Its used internally by the DVB frontend core, in order to emulate
- * %FE_DISHNETWORK_SEND_LEGACY_CMD using the &dvb_frontend_ops.set_voltage\(\)
+ * FE_DISHNETWORK_SEND_LEGACY_CMD() using the &dvb_frontend_ops.set_voltage\(\)
* callback.
*
* NOTE: it should not be used at the drivers, as the emulation for the
diff --git a/drivers/media/dvb-core/dvb_net.h b/drivers/media/dvb-core/dvb_net.h
index e9b18aa03e02..1eae8bad7cc1 100644
--- a/drivers/media/dvb-core/dvb_net.h
+++ b/drivers/media/dvb-core/dvb_net.h
@@ -30,6 +30,22 @@
#ifdef CONFIG_DVB_NET
+/**
+ * struct dvb_net - describes a DVB network interface
+ *
+ * @dvbdev: pointer to &struct dvb_device.
+ * @device: array of pointers to &struct net_device.
+ * @state: array of integers to each net device. A value
+ * different than zero means that the interface is
+ * in usage.
+ * @exit: flag to indicate when the device is being removed.
+ * @demux: pointer to &struct dmx_demux.
+ * @ioctl_mutex: protect access to this struct.
+ *
+ * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
+ * devices.
+ */
+
struct dvb_net {
struct dvb_device *dvbdev;
struct net_device *device[DVB_NET_DEVICES_MAX];
@@ -39,8 +55,22 @@ struct dvb_net {
struct mutex ioctl_mutex;
};
-void dvb_net_release(struct dvb_net *);
-int dvb_net_init(struct dvb_adapter *, struct dvb_net *, struct dmx_demux *);
+/**
+ * dvb_net_init - nitializes a digital TV network device and registers it.
+ *
+ * @adap: pointer to &struct dvb_adapter.
+ * @dvbnet: pointer to &struct dvb_net.
+ * @dmxdemux: pointer to &struct dmx_demux.
+ */
+int dvb_net_init(struct dvb_adapter *adap, struct dvb_net *dvbnet,
+ struct dmx_demux *dmxdemux);
+
+/**
+ * dvb_net_release - releases a digital TV network device and unregisters it.
+ *
+ * @dvbnet: pointer to &struct dvb_net.
+ */
+void dvb_net_release(struct dvb_net *dvbnet);
#else
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 2322af1b8742..53011629c9ad 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -66,12 +66,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
{
ssize_t free;
- /* ACCESS_ONCE() to load read pointer on writer side
+ /* READ_ONCE() to load read pointer on writer side
* this pairs with smp_store_release() in dvb_ringbuffer_read(),
* dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
* or dvb_ringbuffer_reset()
*/
- free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
+ free = READ_ONCE(rbuf->pread) - rbuf->pwrite;
if (free <= 0)
free += rbuf->size;
return free-1;
@@ -143,7 +143,7 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
- * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ * this pairs with READ_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
@@ -168,7 +168,7 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
- * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ * this pairs with READ_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 41aad0f99d73..060c60ddfcc3 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -51,8 +51,15 @@ static LIST_HEAD(dvb_adapter_list);
static DEFINE_MUTEX(dvbdev_register_lock);
static const char * const dnames[] = {
- "video", "audio", "sec", "frontend", "demux", "dvr", "ca",
- "net", "osd"
+ [DVB_DEVICE_VIDEO] = "video",
+ [DVB_DEVICE_AUDIO] = "audio",
+ [DVB_DEVICE_SEC] = "sec",
+ [DVB_DEVICE_FRONTEND] = "frontend",
+ [DVB_DEVICE_DEMUX] = "demux",
+ [DVB_DEVICE_DVR] = "dvr",
+ [DVB_DEVICE_CA] = "ca",
+ [DVB_DEVICE_NET] = "net",
+ [DVB_DEVICE_OSD] = "osd"
};
#ifdef CONFIG_DVB_DYNAMIC_MINORS
@@ -60,7 +67,22 @@ static const char * const dnames[] = {
#define DVB_MAX_IDS MAX_DVB_MINORS
#else
#define DVB_MAX_IDS 4
-#define nums2minor(num, type, id) ((num << 6) | (id << 4) | type)
+
+static const u8 minor_type[] = {
+ [DVB_DEVICE_VIDEO] = 0,
+ [DVB_DEVICE_AUDIO] = 1,
+ [DVB_DEVICE_SEC] = 2,
+ [DVB_DEVICE_FRONTEND] = 3,
+ [DVB_DEVICE_DEMUX] = 4,
+ [DVB_DEVICE_DVR] = 5,
+ [DVB_DEVICE_CA] = 6,
+ [DVB_DEVICE_NET] = 7,
+ [DVB_DEVICE_OSD] = 8,
+};
+
+#define nums2minor(num, type, id) \
+ (((num) << 6) | ((id) << 4) | minor_type[type])
+
#define MAX_DVB_MINORS (DVB_MAX_ADAPTERS*64)
#endif
@@ -426,8 +448,8 @@ static int dvb_register_media_device(struct dvb_device *dvbdev,
}
int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
- const struct dvb_device *template, void *priv, int type,
- int demux_sink_pads)
+ const struct dvb_device *template, void *priv,
+ enum dvb_device_type type, int demux_sink_pads)
{
struct dvb_device *dvbdev;
struct file_operations *dvbdevfops;
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index 49189392cf3b..bbc1c20c0529 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -35,15 +35,37 @@
#define DVB_UNSET (-1)
-#define DVB_DEVICE_VIDEO 0
-#define DVB_DEVICE_AUDIO 1
-#define DVB_DEVICE_SEC 2
-#define DVB_DEVICE_FRONTEND 3
-#define DVB_DEVICE_DEMUX 4
-#define DVB_DEVICE_DVR 5
-#define DVB_DEVICE_CA 6
-#define DVB_DEVICE_NET 7
-#define DVB_DEVICE_OSD 8
+/* List of DVB device types */
+
+/**
+ * enum dvb_device_type - type of the Digital TV device
+ *
+ * @DVB_DEVICE_SEC: Digital TV standalone Common Interface (CI)
+ * @DVB_DEVICE_FRONTEND: Digital TV frontend.
+ * @DVB_DEVICE_DEMUX: Digital TV demux.
+ * @DVB_DEVICE_DVR: Digital TV digital video record (DVR).
+ * @DVB_DEVICE_CA: Digital TV Conditional Access (CA).
+ * @DVB_DEVICE_NET: Digital TV network.
+ *
+ * @DVB_DEVICE_VIDEO: Digital TV video decoder.
+ * Deprecated. Used only on av7110-av.
+ * @DVB_DEVICE_AUDIO: Digital TV audio decoder.
+ * Deprecated. Used only on av7110-av.
+ * @DVB_DEVICE_OSD: Digital TV On Screen Display (OSD).
+ * Deprecated. Used only on av7110.
+ */
+enum dvb_device_type {
+ DVB_DEVICE_SEC,
+ DVB_DEVICE_FRONTEND,
+ DVB_DEVICE_DEMUX,
+ DVB_DEVICE_DVR,
+ DVB_DEVICE_CA,
+ DVB_DEVICE_NET,
+
+ DVB_DEVICE_VIDEO,
+ DVB_DEVICE_AUDIO,
+ DVB_DEVICE_OSD,
+};
#define DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr) \
static short adapter_nr[] = \
@@ -104,8 +126,7 @@ struct dvb_adapter {
* @list_head: List head with all DVB devices
* @fops: pointer to struct file_operations
* @adapter: pointer to the adapter that holds this device node
- * @type: type of the device: DVB_DEVICE_SEC, DVB_DEVICE_FRONTEND,
- * DVB_DEVICE_DEMUX, DVB_DEVICE_DVR, DVB_DEVICE_CA, DVB_DEVICE_NET
+ * @type: type of the device, as defined by &enum dvb_device_type.
* @minor: devnode minor number. Major number is always DVB_MAJOR.
* @id: device ID number, inside the adapter
* @readers: Initialized by the caller. Each call to open() in Read Only mode
@@ -135,7 +156,7 @@ struct dvb_device {
struct list_head list_head;
const struct file_operations *fops;
struct dvb_adapter *adapter;
- int type;
+ enum dvb_device_type type;
int minor;
u32 id;
@@ -194,9 +215,7 @@ int dvb_unregister_adapter(struct dvb_adapter *adap);
* stored
* @template: Template used to create &pdvbdev;
* @priv: private data
- * @type: type of the device: %DVB_DEVICE_SEC, %DVB_DEVICE_FRONTEND,
- * %DVB_DEVICE_DEMUX, %DVB_DEVICE_DVR, %DVB_DEVICE_CA,
- * %DVB_DEVICE_NET
+ * @type: type of the device, as defined by &enum dvb_device_type.
* @demux_sink_pads: Number of demux outputs, to be used to create the TS
* outputs via the Media Controller.
*/
@@ -204,7 +223,7 @@ int dvb_register_device(struct dvb_adapter *adap,
struct dvb_device **pdvbdev,
const struct dvb_device *template,
void *priv,
- int type,
+ enum dvb_device_type type,
int demux_sink_pads);
/**
@@ -242,7 +261,7 @@ void dvb_unregister_device(struct dvb_device *dvbdev);
* dvb_create_media_graph - Creates media graph for the Digital TV part of the
* device.
*
- * @adap: pointer to struct dvb_adapter
+ * @adap: pointer to &struct dvb_adapter
* @create_rf_connector: if true, it creates the RF connector too
*
* This function checks all DVB-related functions at the media controller
@@ -255,12 +274,23 @@ void dvb_unregister_device(struct dvb_device *dvbdev);
__must_check int dvb_create_media_graph(struct dvb_adapter *adap,
bool create_rf_connector);
+/**
+ * dvb_register_media_controller - registers a media controller at DVB adapter
+ *
+ * @adap: pointer to &struct dvb_adapter
+ * @mdev: pointer to &struct media_device
+ */
static inline void dvb_register_media_controller(struct dvb_adapter *adap,
struct media_device *mdev)
{
adap->mdev = mdev;
}
+/**
+ * dvb_get_media_controller - gets the associated media controller
+ *
+ * @adap: pointer to &struct dvb_adapter
+ */
static inline struct media_device
*dvb_get_media_controller(struct dvb_adapter *adap)
{
@@ -277,20 +307,71 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
#define dvb_get_media_controller(a) NULL
#endif
-int dvb_generic_open (struct inode *inode, struct file *file);
-int dvb_generic_release (struct inode *inode, struct file *file);
-long dvb_generic_ioctl (struct file *file,
- unsigned int cmd, unsigned long arg);
+/**
+ * dvb_generic_open - Digital TV open function, used by DVB devices
+ *
+ * @inode: pointer to &struct inode.
+ * @file: pointer to &struct file.
+ *
+ * Checks if a DVB devnode is still valid, and if the permissions are
+ * OK and increment negative use count.
+ */
+int dvb_generic_open(struct inode *inode, struct file *file);
-/* we don't mess with video_usercopy() any more,
-we simply define out own dvb_usercopy(), which will hopefully become
-generic_usercopy() someday... */
+/**
+ * dvb_generic_close - Digital TV close function, used by DVB devices
+ *
+ * @inode: pointer to &struct inode.
+ * @file: pointer to &struct file.
+ *
+ * Checks if a DVB devnode is still valid, and if the permissions are
+ * OK and decrement negative use count.
+ */
+int dvb_generic_release(struct inode *inode, struct file *file);
+/**
+ * dvb_generic_ioctl - Digital TV close function, used by DVB devices
+ *
+ * @file: pointer to &struct file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ *
+ * Checks if a DVB devnode and struct dvbdev.kernel_ioctl is still valid.
+ * If so, calls dvb_usercopy().
+ */
+long dvb_generic_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+/**
+ * dvb_usercopy - copies data from/to userspace memory when an ioctl is
+ * issued.
+ *
+ * @file: Pointer to struct &file.
+ * @cmd: Ioctl name.
+ * @arg: Ioctl argument.
+ * @func: function that will actually handle the ioctl
+ *
+ * Ancillary function that uses ioctl direction and size to copy from
+ * userspace. Then, it calls @func, and, if needed, data is copied back
+ * to userspace.
+ */
int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
int (*func)(struct file *file, unsigned int cmd, void *arg));
/** generic DVB attach function. */
#ifdef CONFIG_MEDIA_ATTACH
+
+/**
+ * dvb_attach - attaches a DVB frontend into the DVB core.
+ *
+ * @FUNCTION: function on a frontend module to be called.
+ * @ARGS...: @FUNCTION arguments.
+ *
+ * This ancillary function loads a frontend module in runtime and runs
+ * the @FUNCTION function there, with @ARGS.
+ * As it increments symbol usage cont, at unregister, dvb_detach()
+ * should be called.
+ */
#define dvb_attach(FUNCTION, ARGS...) ({ \
void *__r = NULL; \
typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
@@ -304,6 +385,14 @@ int dvb_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
__r; \
})
+/**
+ * dvb_detach - detaches a DVB frontend loaded via dvb_attach()
+ *
+ * @FUNC: attach function
+ *
+ * Decrements usage count for a function previously called via dvb_attach().
+ */
+
#define dvb_detach(FUNC) symbol_put_addr(FUNC)
#else
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 2631d0e0a024..d17722eb4456 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -173,7 +173,7 @@ config DVB_STB6000
tristate "ST STB6000 silicon tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
- help
+ help
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
config DVB_STV0299
@@ -187,7 +187,7 @@ config DVB_STV6110
tristate "ST STV6110 silicon tuner"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
- help
+ help
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
config DVB_STV0900
@@ -902,7 +902,7 @@ config DVB_HELENE
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
- Say Y when you want to support this frontend.
+ Say Y when you want to support this frontend.
comment "Tools to develop new frontends"
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index f45f6a4a4371..d025eb373842 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel DVB frontend device drivers.
#
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 98d575f2744c..b1c84ee914f0 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -455,11 +455,10 @@ struct dvb_frontend *as102_attach(const char *name,
struct as102_state *state;
struct dvb_frontend *fe;
- state = kzalloc(sizeof(struct as102_state), GFP_KERNEL);
- if (state == NULL) {
- pr_err("%s: unable to allocate memory for state\n", __func__);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
return NULL;
- }
+
fe = &state->frontend;
fe->demodulator_priv = state;
state->ops = ops;
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 0118c2658cf7..ee1f704f81f2 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -552,13 +552,11 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
const struct cx24113_config *config, struct i2c_adapter *i2c)
{
/* allocate memory for the internal state */
- struct cx24113_state *state =
- kzalloc(sizeof(struct cx24113_state), GFP_KERNEL);
+ struct cx24113_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
int rc;
- if (state == NULL) {
- cx_err("Unable to kzalloc\n");
- goto error;
- }
+
+ if (!state)
+ return NULL;
/* setup the state */
state->config = config;
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index e105532bfba8..8fb3f095e21c 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -221,16 +221,13 @@ static int cx24116_writereg(struct cx24116_state *state, int reg, int data)
static int cx24116_writeregN(struct cx24116_state *state, int reg,
const u8 *data, u16 len)
{
- int ret = -EREMOTEIO;
+ int ret;
struct i2c_msg msg;
u8 *buf;
buf = kmalloc(len + 1, GFP_KERNEL);
- if (buf == NULL) {
- printk("Unable to kmalloc\n");
- ret = -ENOMEM;
- goto error;
- }
+ if (!buf)
+ return -ENOMEM;
*(buf) = reg;
memcpy(buf + 1, data, len);
@@ -251,7 +248,6 @@ static int cx24116_writeregN(struct cx24116_state *state, int reg,
ret = -EREMOTEIO;
}
-error:
kfree(buf);
return ret;
@@ -1121,15 +1117,15 @@ static const struct dvb_frontend_ops cx24116_ops;
struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
struct i2c_adapter *i2c)
{
- struct cx24116_state *state = NULL;
+ struct cx24116_state *state;
int ret;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kzalloc(sizeof(struct cx24116_state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
- goto error1;
+ return NULL;
state->config = config;
state->i2c = i2c;
@@ -1138,8 +1134,9 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
ret = (cx24116_readreg(state, 0xFF) << 8) |
cx24116_readreg(state, 0xFE);
if (ret != 0x0501) {
+ kfree(state);
printk(KERN_INFO "Invalid probe, probably not a CX24116 device\n");
- goto error2;
+ return NULL;
}
/* create dvb_frontend */
@@ -1147,9 +1144,6 @@ struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
-
-error2: kfree(state);
-error1: return NULL;
}
EXPORT_SYMBOL(cx24116_attach);
diff --git a/drivers/media/dvb-frontends/dib7000m.h b/drivers/media/dvb-frontends/dib7000m.h
index 8f84dfa9bb58..df7ecb4314cd 100644
--- a/drivers/media/dvb-frontends/dib7000m.h
+++ b/drivers/media/dvb-frontends/dib7000m.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DIB7000M_H
#define DIB7000M_H
diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
index 205fbbff632b..2e10b5ccce67 100644
--- a/drivers/media/dvb-frontends/dib7000p.h
+++ b/drivers/media/dvb-frontends/dib7000p.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DIB7000P_H
#define DIB7000P_H
diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
index 75cc8e47ec8f..b920fe769021 100644
--- a/drivers/media/dvb-frontends/dib8000.h
+++ b/drivers/media/dvb-frontends/dib8000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DIB8000_H
#define DIB8000_H
diff --git a/drivers/media/dvb-frontends/dib9000.h b/drivers/media/dvb-frontends/dib9000.h
index 40883b41e66b..bb03362ac7a3 100644
--- a/drivers/media/dvb-frontends/dib9000.h
+++ b/drivers/media/dvb-frontends/dib9000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DIB9000_H
#define DIB9000_H
diff --git a/drivers/media/dvb-frontends/dibx000_common.h b/drivers/media/dvb-frontends/dibx000_common.h
index 61f4152f24ee..8784af962eba 100644
--- a/drivers/media/dvb-frontends/dibx000_common.h
+++ b/drivers/media/dvb-frontends/dibx000_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DIBX000_COMMON_H
#define DIBX000_COMMON_H
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 7d04400b18dd..0696bc62dcc9 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -328,7 +328,7 @@ static int WriteTable(struct drxd_state *state, u8 * pTable)
{
int status = 0;
- if (pTable == NULL)
+ if (!pTable)
return 0;
while (!status) {
@@ -640,7 +640,7 @@ static int SetCfgIfAgc(struct drxd_state *state, struct SCfgAgc *cfg)
const u16 maxRur = 8;
static const u16 slowIncrDecLUT[] = {
3, 4, 4, 5, 6 };
- const u16 fastIncrDecLUT[] = {
+ static const u16 fastIncrDecLUT[] = {
14, 15, 15, 16,
17, 18, 18, 19,
20, 21, 22, 23,
@@ -909,9 +909,8 @@ static int load_firmware(struct drxd_state *state, const char *fw_name)
}
state->microcode = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (state->microcode == NULL) {
+ if (!state->microcode) {
release_firmware(fw);
- printk(KERN_ERR "drxd: firmware load failure: no memory\n");
return -ENOMEM;
}
@@ -2630,7 +2629,7 @@ static int DRXD_init(struct drxd_state *state, const u8 *fw, u32 fw_size)
break;
/* Apply I2c address patch to B1 */
- if (!state->type_A && state->m_HiI2cPatch != NULL) {
+ if (!state->type_A && state->m_HiI2cPatch) {
status = WriteTable(state, state->m_HiI2cPatch);
if (status < 0)
break;
diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h
index a629897eb905..eb9bdc9f59c4 100644
--- a/drivers/media/dvb-frontends/drxk.h
+++ b/drivers/media/dvb-frontends/drxk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DRXK_H_
#define _DRXK_H_
diff --git a/drivers/media/dvb-frontends/drxk_hard.h b/drivers/media/dvb-frontends/drxk_hard.h
index 9ed88e014942..a850a876deee 100644
--- a/drivers/media/dvb-frontends/drxk_hard.h
+++ b/drivers/media/dvb-frontends/drxk_hard.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include "drxk_map.h"
#define DRXK_VERSION_MAJOR 0
diff --git a/drivers/media/dvb-frontends/drxk_map.h b/drivers/media/dvb-frontends/drxk_map.h
index 761613f9fd5a..9234ef4fb68d 100644
--- a/drivers/media/dvb-frontends/drxk_map.h
+++ b/drivers/media/dvb-frontends/drxk_map.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define AUD_COMM_EXEC__A 0x1000000
#define AUD_COMM_EXEC_STOP 0x0
#define FEC_COMM_EXEC__A 0x1C00000
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 0b17a45c5640..bd4f8278c906 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -277,10 +277,8 @@ static int ds3000_writeFW(struct ds3000_state *state, int reg,
u8 *buf;
buf = kmalloc(33, GFP_KERNEL);
- if (buf == NULL) {
- printk(KERN_ERR "Unable to kmalloc\n");
+ if (!buf)
return -ENOMEM;
- }
*(buf) = reg;
@@ -835,17 +833,15 @@ static const struct dvb_frontend_ops ds3000_ops;
struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
struct i2c_adapter *i2c)
{
- struct ds3000_state *state = NULL;
+ struct ds3000_state *state;
int ret;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kzalloc(sizeof(struct ds3000_state), GFP_KERNEL);
- if (state == NULL) {
- printk(KERN_ERR "Unable to kmalloc\n");
- goto error2;
- }
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
state->config = config;
state->i2c = i2c;
@@ -854,8 +850,9 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
/* check if the demod is present */
ret = ds3000_readreg(state, 0x00) & 0xfe;
if (ret != 0xe0) {
+ kfree(state);
printk(KERN_ERR "Invalid probe, probably not a DS3000\n");
- goto error3;
+ return NULL;
}
printk(KERN_INFO "DS3000 chip version: %d.%d attached.\n",
@@ -873,11 +870,6 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
*/
ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
return &state->frontend;
-
-error3:
- kfree(state);
-error2:
- return NULL;
}
EXPORT_SYMBOL(ds3000_attach);
diff --git a/drivers/media/dvb-frontends/dvb-pll.h b/drivers/media/dvb-frontends/dvb-pll.h
index bf9602a88b6c..6aaa9c6bff9c 100644
--- a/drivers/media/dvb-frontends/dvb-pll.h
+++ b/drivers/media/dvb-frontends/dvb-pll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* descriptions + helper functions for simple dvb plls.
*/
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index 5798079add10..9854096839ae 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1048,16 +1048,6 @@ fail:
return ret;
}
-static int lg216x_get_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-
- return (DTV_ATSCMH_FIC_VER == tvp->cmd) ?
- lg216x_get_frontend(fe, c) : 0;
-}
-
-
static int lg2160_set_frontend(struct dvb_frontend *fe)
{
struct lg216x_state *state = fe->demodulator_priv;
@@ -1368,8 +1358,6 @@ static const struct dvb_frontend_ops lg2160_ops = {
.init = lg216x_init,
.sleep = lg216x_sleep,
#endif
- .get_property = lg216x_get_property,
-
.set_frontend = lg2160_set_frontend,
.get_frontend = lg216x_get_frontend,
.get_tune_settings = lg216x_get_tune_settings,
@@ -1396,8 +1384,6 @@ static const struct dvb_frontend_ops lg2161_ops = {
.init = lg216x_init,
.sleep = lg216x_sleep,
#endif
- .get_property = lg216x_get_property,
-
.set_frontend = lg2160_set_frontend,
.get_frontend = lg216x_get_frontend,
.get_tune_settings = lg216x_get_tune_settings,
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index c9b1eb38444e..724e9aac0f11 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/div64.h>
+#include <linux/kernel.h>
#include <linux/dvb/frontend.h>
#include "dvb_math.h"
#include "lgdt3306a.h"
@@ -2072,7 +2073,7 @@ static const short regtab[] = {
0x30aa, /* MPEGLOCK */
};
-#define numDumpRegs (sizeof(regtab)/sizeof(regtab[0]))
+#define numDumpRegs (ARRAY_SIZE(regtab))
static u8 regval1[numDumpRegs] = {0, };
static u8 regval2[numDumpRegs] = {0, };
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index e8ac8c3e2ec0..bdaf9d235fed 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -2071,12 +2071,9 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
dev_dbg(&i2c->dev, "%s called.\n", __func__);
/* allocate memory for the internal state */
- state = kzalloc(sizeof(struct mb86a20s_state), GFP_KERNEL);
- if (state == NULL) {
- dev_err(&i2c->dev,
- "%s: unable to allocate memory for state\n", __func__);
- goto error;
- }
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
/* setup the state */
state->config = config;
@@ -2089,22 +2086,16 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
/* Check if it is a mb86a20s frontend */
rev = mb86a20s_readreg(state, 0);
-
- if (rev == 0x13) {
- dev_info(&i2c->dev,
- "Detected a Fujitsu mb86a20s frontend\n");
- } else {
+ if (rev != 0x13) {
+ kfree(state);
dev_dbg(&i2c->dev,
"Frontend revision %d is unknown - aborting.\n",
rev);
- goto error;
+ return NULL;
}
+ dev_info(&i2c->dev, "Detected a Fujitsu mb86a20s frontend\n");
return &state->frontend;
-
-error:
- kfree(state);
- return NULL;
}
EXPORT_SYMBOL(mb86a20s_attach);
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 676c96c216c3..53064e11f5f1 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -43,7 +43,7 @@
#define BYTE2(v) ((v >> 16) & 0xff)
#define BYTE3(v) ((v >> 24) & 0xff)
-LIST_HEAD(mxllist);
+static LIST_HEAD(mxllist);
struct mxl_base {
struct list_head mxllist;
diff --git a/drivers/media/dvb-frontends/nxt6000_priv.h b/drivers/media/dvb-frontends/nxt6000_priv.h
index 0422e580038a..d317df02c8d7 100644
--- a/drivers/media/dvb-frontends/nxt6000_priv.h
+++ b/drivers/media/dvb-frontends/nxt6000_priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Public Include File for DRV6000 users
* (ie. NxtWave Communications - NXT6000 demodulator driver)
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 172fc367ccaa..41d9c513b7e8 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -696,7 +696,6 @@ static int si2168_probe(struct i2c_client *client,
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
- dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
diff --git a/drivers/media/dvb-frontends/si21xx.h b/drivers/media/dvb-frontends/si21xx.h
index b1be62f1983a..43d480bb6ea2 100644
--- a/drivers/media/dvb-frontends/si21xx.h
+++ b/drivers/media/dvb-frontends/si21xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SI21XX_H
#define SI21XX_H
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 43d47dfcc7b8..53e66c232d3c 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -357,14 +357,14 @@ static int sp2_exit(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
- if (client == NULL)
+ if (!client)
return 0;
s = i2c_get_clientdata(client);
- if (s == NULL)
+ if (!s)
return 0;
- if (s->ca.data == NULL)
+ if (!s->ca.data)
return 0;
dvb_ca_en50221_release(&s->ca);
@@ -381,10 +381,9 @@ static int sp2_probe(struct i2c_client *client,
dev_dbg(&client->dev, "\n");
- s = kzalloc(sizeof(struct sp2), GFP_KERNEL);
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
ret = -ENOMEM;
- dev_err(&client->dev, "kzalloc() failed\n");
goto err;
}
diff --git a/drivers/media/dvb-frontends/sp887x.h b/drivers/media/dvb-frontends/sp887x.h
index 412f011e6dfd..a680cc22379c 100644
--- a/drivers/media/dvb-frontends/sp887x.h
+++ b/drivers/media/dvb-frontends/sp887x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
Driver for the Spase sp887x demodulator
*/
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index 45cbc898ad25..67f91814b9f7 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -447,12 +447,6 @@ static int stv0288_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return 0;
}
-static int stv0288_set_property(struct dvb_frontend *fe, struct dtv_property *p)
-{
- dprintk("%s(..)\n", __func__);
- return 0;
-}
-
static int stv0288_set_frontend(struct dvb_frontend *fe)
{
struct stv0288_state *state = fe->demodulator_priv;
@@ -567,7 +561,6 @@ static const struct dvb_frontend_ops stv0288_ops = {
.set_tone = stv0288_set_tone,
.set_voltage = stv0288_set_voltage,
- .set_property = stv0288_set_property,
.set_frontend = stv0288_set_frontend,
};
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index e4fd9c1b0560..6aad0efa3174 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -258,11 +258,9 @@ static int stv6110_get_frequency(struct dvb_frontend *fe, u32 *frequency)
static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency)
{
struct stv6110_priv *priv = fe->tuner_priv;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u8 ret = 0x04;
u32 divider, ref, p, presc, i, result_freq, vco_freq;
s32 p_calc, p_calc_opt = 1000, r_div, r_div_opt = 0, p_val;
- s32 srate;
dprintk("%s, freq=%d kHz, mclk=%d Hz\n", __func__,
frequency, priv->mclk);
@@ -273,13 +271,6 @@ static int stv6110_set_frequency(struct dvb_frontend *fe, u32 frequency)
((((priv->mclk / 1000000) - 16) & 0x1f) << 3);
/* BB_GAIN = db/2 */
- if (fe->ops.set_property && fe->ops.get_property) {
- srate = c->symbol_rate;
- dprintk("%s: Get Frontend parameters: srate=%d\n",
- __func__, srate);
- } else
- srate = 15000000;
-
priv->regs[RSTV6110_CTRL2] &= ~0x0f;
priv->regs[RSTV6110_CTRL2] |= (priv->gain & 0x0f);
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.h b/drivers/media/dvb-frontends/tda18271c2dd.h
index e6ccf240f54c..289653db68e4 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.h
+++ b/drivers/media/dvb-frontends/tda18271c2dd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TDA18271C2DD_H_
#define _TDA18271C2DD_H_
diff --git a/drivers/media/dvb-frontends/tda18271c2dd_maps.h b/drivers/media/dvb-frontends/tda18271c2dd_maps.h
index f3bca5c237d7..5f75516bc0cb 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd_maps.h
+++ b/drivers/media/dvb-frontends/tda18271c2dd_maps.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
enum HF_S {
HF_None = 0, HF_B, HF_DK, HF_G, HF_I, HF_L, HF_L1, HF_MN, HF_FM_Radio,
HF_AnalogMax, HF_DVBT_6MHZ, HF_DVBT_7MHZ, HF_DVBT_8MHZ,
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 94153895fcd4..3c6d6428f525 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -354,6 +354,14 @@ config VIDEO_TC358743
To compile this driver as a module, choose M here: the
module will be called tc358743.
+config VIDEO_TC358743_CEC
+ bool "Enable Toshiba TC358743 CEC support"
+ depends on VIDEO_TC358743
+ select CEC_CORE
+ ---help---
+ When selected the tc358743 will support the optional
+ HDMI CEC feature.
+
config VIDEO_TVP514X
tristate "Texas Instruments TVP514x video decoder"
depends on VIDEO_V4L2 && I2C
@@ -547,6 +555,14 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+config VIDEO_IMX274
+ tristate "Sony IMX274 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a V4L2 sensor-level driver for the Sony IMX274
+ CMOS image sensor.
+
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
depends on VIDEO_V4L2 && I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index c843c181dfb9..548a9efce966 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
msp3400-objs := msp3400-driver.o msp3400-kthreads.o
obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
@@ -92,5 +93,6 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_IMX274) += imx274.o
obj-$(CONFIG_SDR_MAX2175) += max2175.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 3df28f2f9b38..6fb818a775db 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1328,7 +1328,7 @@ static int adv7180_probe(struct i2c_client *client,
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
ret = adv7180_init_controls(state);
if (ret)
diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
index b33ccfc08708..4aa8e45b5cd3 100644
--- a/drivers/media/i2c/adv748x/adv748x-afe.c
+++ b/drivers/media/i2c/adv748x/adv748x-afe.c
@@ -217,6 +217,7 @@ static int adv748x_afe_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
struct adv748x_state *state = adv748x_afe_to_state(afe);
+ int afe_std;
int ret;
mutex_lock(&state->mutex);
@@ -235,8 +236,12 @@ static int adv748x_afe_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
/* Read detected standard */
ret = adv748x_afe_status(afe, NULL, std);
+ afe_std = adv748x_afe_std(afe->curr_norm);
+ if (afe_std < 0)
+ goto unlock;
+
/* Restore original state */
- adv748x_afe_set_video_standard(state, afe->curr_norm);
+ adv748x_afe_set_video_standard(state, afe_std);
unlock:
mutex_unlock(&state->mutex);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index f289b8aca1da..c786cd125417 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1948,7 +1948,7 @@ static int adv76xx_set_format(struct v4l2_subdev *sd,
return -EINVAL;
info = adv76xx_format_info(state, format->format.code);
- if (info == NULL)
+ if (!info)
info = adv76xx_format_info(state, MEDIA_BUS_FMT_YUYV8_2X8);
adv76xx_fill_format(state, &format->format);
@@ -2256,7 +2256,7 @@ static int adv76xx_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
return 0;
}
- if (data == NULL)
+ if (!data)
return -ENODATA;
if (edid->start_block >= state->edid.blocks)
@@ -3316,10 +3316,8 @@ static int adv76xx_probe(struct i2c_client *client,
client->addr << 1);
state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
- if (!state) {
- v4l_err(client, "Could not allocate adv76xx_state memory!\n");
+ if (!state)
return -ENOMEM;
- }
state->i2c_clients[ADV76XX_PAGE_IO] = client;
@@ -3482,7 +3480,7 @@ static int adv76xx_probe(struct i2c_client *client,
state->i2c_clients[i] =
adv76xx_dummy_client(sd, state->pdata.i2c_addresses[i],
0xf2 + i);
- if (state->i2c_clients[i] == NULL) {
+ if (!state->i2c_clients[i]) {
err = -ENOMEM;
v4l2_err(sd, "failed to create i2c client %u\n", i);
goto err_i2c;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 65f34e7e146f..136aa80a834b 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3467,11 +3467,9 @@ static int adv7842_probe(struct i2c_client *client,
return -ENODEV;
}
- state = devm_kzalloc(&client->dev, sizeof(struct adv7842_state), GFP_KERNEL);
- if (!state) {
- v4l_err(client, "Could not allocate adv7842_state memory!\n");
+ state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
return -ENOMEM;
- }
/* platform data */
state->pdata = *pdata;
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 39f51daa7558..f38bf819d805 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -1745,7 +1745,7 @@ static int cx25840_g_std(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- v4l2_std_id stds[] = {
+ static const v4l2_std_id stds[] = {
/* 0000 */ V4L2_STD_UNKNOWN,
/* 0001 */ V4L2_STD_NTSC_M,
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 95af4fc99cd0..ed01e8bd4331 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -21,6 +21,11 @@
#define DW9714_NAME "dw9714"
#define DW9714_MAX_FOCUS_POS 1023
/*
+ * This sets the minimum granularity for the focus positions.
+ * A value of 1 gives maximum accuracy for a desired focus position
+ */
+#define DW9714_FOCUS_STEPS 1
+/*
* This acts as the minimum granularity of lens movement.
* Keep this value power of 2, so the control steps can be
* uniformly adjusted for gradual lens movement, with desired
@@ -137,7 +142,7 @@ static int dw9714_init_controls(struct dw9714_device *dev_vcm)
v4l2_ctrl_handler_init(hdl, 1);
v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
- 0, DW9714_MAX_FOCUS_POS, DW9714_CTRL_STEPS, 0);
+ 0, DW9714_MAX_FOCUS_POS, DW9714_FOCUS_STEPS, 0);
if (hdl->error)
dev_err(&client->dev, "%s fail error: 0x%x\n",
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index c14f0fd6ded3..e9eff9039ef5 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1453,7 +1453,7 @@ static int et8ek8_probe(struct i2c_client *client,
goto err_mutex;
}
- ret = v4l2_async_register_subdev(&sensor->subdev);
+ ret = v4l2_async_register_subdev_sensor_common(&sensor->subdev);
if (ret < 0)
goto err_entity;
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
new file mode 100644
index 000000000000..800b9bf9cdd3
--- /dev/null
+++ b/drivers/media/i2c/imx274.c
@@ -0,0 +1,1811 @@
+/*
+ * imx274.c - IMX274 CMOS Image Sensor driver
+ *
+ * Copyright (C) 2017, Leopard Imaging, Inc.
+ *
+ * Leon Luo <leonl@leopardimaging.com>
+ * Edwin Zou <edwinz@leopardimaging.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+/*
+ * See "SHR, SVR Setting" in datasheet
+ */
+#define IMX274_DEFAULT_FRAME_LENGTH (4550)
+#define IMX274_MAX_FRAME_LENGTH (0x000fffff)
+
+/*
+ * See "Frame Rate Adjustment" in datasheet
+ */
+#define IMX274_PIXCLK_CONST1 (72000000)
+#define IMX274_PIXCLK_CONST2 (1000000)
+
+/*
+ * The input gain is shifted by IMX274_GAIN_SHIFT to get
+ * decimal number. The real gain is
+ * (float)input_gain_value / (1 << IMX274_GAIN_SHIFT)
+ */
+#define IMX274_GAIN_SHIFT (8)
+#define IMX274_GAIN_SHIFT_MASK ((1 << IMX274_GAIN_SHIFT) - 1)
+
+/*
+ * See "Analog Gain" and "Digital Gain" in datasheet
+ * min gain is 1X
+ * max gain is calculated based on IMX274_GAIN_REG_MAX
+ */
+#define IMX274_GAIN_REG_MAX (1957)
+#define IMX274_MIN_GAIN (0x01 << IMX274_GAIN_SHIFT)
+#define IMX274_MAX_ANALOG_GAIN ((2048 << IMX274_GAIN_SHIFT)\
+ / (2048 - IMX274_GAIN_REG_MAX))
+#define IMX274_MAX_DIGITAL_GAIN (8)
+#define IMX274_DEF_GAIN (20 << IMX274_GAIN_SHIFT)
+#define IMX274_GAIN_CONST (2048) /* for gain formula */
+
+/*
+ * 1 line time in us = (HMAX / 72), minimal is 4 lines
+ */
+#define IMX274_MIN_EXPOSURE_TIME (4 * 260 / 72)
+
+#define IMX274_DEFAULT_MODE IMX274_MODE_3840X2160
+#define IMX274_MAX_WIDTH (3840)
+#define IMX274_MAX_HEIGHT (2160)
+#define IMX274_MAX_FRAME_RATE (120)
+#define IMX274_MIN_FRAME_RATE (5)
+#define IMX274_DEF_FRAME_RATE (60)
+
+/*
+ * register SHR is limited to (SVR value + 1) x VMAX value - 4
+ */
+#define IMX274_SHR_LIMIT_CONST (4)
+
+/*
+ * Constants for sensor reset delay
+ */
+#define IMX274_RESET_DELAY1 (2000)
+#define IMX274_RESET_DELAY2 (2200)
+
+/*
+ * shift and mask constants
+ */
+#define IMX274_SHIFT_8_BITS (8)
+#define IMX274_SHIFT_16_BITS (16)
+#define IMX274_MASK_LSB_2_BITS (0x03)
+#define IMX274_MASK_LSB_3_BITS (0x07)
+#define IMX274_MASK_LSB_4_BITS (0x0f)
+#define IMX274_MASK_LSB_8_BITS (0x00ff)
+
+#define DRIVER_NAME "IMX274"
+
+/*
+ * IMX274 register definitions
+ */
+#define IMX274_FRAME_LENGTH_ADDR_1 0x30FA /* VMAX, MSB */
+#define IMX274_FRAME_LENGTH_ADDR_2 0x30F9 /* VMAX */
+#define IMX274_FRAME_LENGTH_ADDR_3 0x30F8 /* VMAX, LSB */
+#define IMX274_SVR_REG_MSB 0x300F /* SVR */
+#define IMX274_SVR_REG_LSB 0x300E /* SVR */
+#define IMX274_HMAX_REG_MSB 0x30F7 /* HMAX */
+#define IMX274_HMAX_REG_LSB 0x30F6 /* HMAX */
+#define IMX274_COARSE_TIME_ADDR_MSB 0x300D /* SHR */
+#define IMX274_COARSE_TIME_ADDR_LSB 0x300C /* SHR */
+#define IMX274_ANALOG_GAIN_ADDR_LSB 0x300A /* ANALOG GAIN LSB */
+#define IMX274_ANALOG_GAIN_ADDR_MSB 0x300B /* ANALOG GAIN MSB */
+#define IMX274_DIGITAL_GAIN_REG 0x3012 /* Digital Gain */
+#define IMX274_VFLIP_REG 0x301A /* VERTICAL FLIP */
+#define IMX274_TEST_PATTERN_REG 0x303D /* TEST PATTERN */
+#define IMX274_STANDBY_REG 0x3000 /* STANDBY */
+
+#define IMX274_TABLE_WAIT_MS 0
+#define IMX274_TABLE_END 1
+
+/*
+ * imx274 I2C operation related structure
+ */
+struct reg_8 {
+ u16 addr;
+ u8 val;
+};
+
+static const struct regmap_config imx274_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+enum imx274_mode {
+ IMX274_MODE_3840X2160,
+ IMX274_MODE_1920X1080,
+ IMX274_MODE_1280X720,
+
+ IMX274_MODE_START_STREAM_1,
+ IMX274_MODE_START_STREAM_2,
+ IMX274_MODE_START_STREAM_3,
+ IMX274_MODE_START_STREAM_4,
+ IMX274_MODE_STOP_STREAM
+};
+
+/*
+ * imx274 format related structure
+ */
+struct imx274_frmfmt {
+ u32 mbus_code;
+ enum v4l2_colorspace colorspace;
+ struct v4l2_frmsize_discrete size;
+ enum imx274_mode mode;
+};
+
+/*
+ * imx274 test pattern related structure
+ */
+enum {
+ TEST_PATTERN_DISABLED = 0,
+ TEST_PATTERN_ALL_000H,
+ TEST_PATTERN_ALL_FFFH,
+ TEST_PATTERN_ALL_555H,
+ TEST_PATTERN_ALL_AAAH,
+ TEST_PATTERN_VSP_5AH, /* VERTICAL STRIPE PATTERN 555H/AAAH */
+ TEST_PATTERN_VSP_A5H, /* VERTICAL STRIPE PATTERN AAAH/555H */
+ TEST_PATTERN_VSP_05H, /* VERTICAL STRIPE PATTERN 000H/555H */
+ TEST_PATTERN_VSP_50H, /* VERTICAL STRIPE PATTERN 555H/000H */
+ TEST_PATTERN_VSP_0FH, /* VERTICAL STRIPE PATTERN 000H/FFFH */
+ TEST_PATTERN_VSP_F0H, /* VERTICAL STRIPE PATTERN FFFH/000H */
+ TEST_PATTERN_H_COLOR_BARS,
+ TEST_PATTERN_V_COLOR_BARS,
+};
+
+static const char * const tp_qmenu[] = {
+ "Disabled",
+ "All 000h Pattern",
+ "All FFFh Pattern",
+ "All 555h Pattern",
+ "All AAAh Pattern",
+ "Vertical Stripe (555h / AAAh)",
+ "Vertical Stripe (AAAh / 555h)",
+ "Vertical Stripe (000h / 555h)",
+ "Vertical Stripe (555h / 000h)",
+ "Vertical Stripe (000h / FFFh)",
+ "Vertical Stripe (FFFh / 000h)",
+ "Horizontal Color Bars",
+ "Vertical Color Bars",
+};
+
+/*
+ * All-pixel scan mode (10-bit)
+ * imx274 mode1(refer to datasheet) register configuration with
+ * 3840x2160 resolution, raw10 data and mipi four lane output
+ */
+static const struct reg_8 imx274_mode1_3840x2160_raw10[] = {
+ {0x3004, 0x01},
+ {0x3005, 0x01},
+ {0x3006, 0x00},
+ {0x3007, 0x02},
+
+ {0x3018, 0xA2}, /* output XVS, HVS */
+
+ {0x306B, 0x05},
+ {0x30E2, 0x01},
+ {0x30F6, 0x07}, /* HMAX, 263 */
+ {0x30F7, 0x01}, /* HMAX */
+
+ {0x30dd, 0x01}, /* crop to 2160 */
+ {0x30de, 0x06},
+ {0x30df, 0x00},
+ {0x30e0, 0x12},
+ {0x30e1, 0x00},
+ {0x3037, 0x01}, /* to crop to 3840 */
+ {0x3038, 0x0c},
+ {0x3039, 0x00},
+ {0x303a, 0x0c},
+ {0x303b, 0x0f},
+
+ {0x30EE, 0x01},
+ {0x3130, 0x86},
+ {0x3131, 0x08},
+ {0x3132, 0x7E},
+ {0x3133, 0x08},
+ {0x3342, 0x0A},
+ {0x3343, 0x00},
+ {0x3344, 0x16},
+ {0x3345, 0x00},
+ {0x33A6, 0x01},
+ {0x3528, 0x0E},
+ {0x3554, 0x1F},
+ {0x3555, 0x01},
+ {0x3556, 0x01},
+ {0x3557, 0x01},
+ {0x3558, 0x01},
+ {0x3559, 0x00},
+ {0x355A, 0x00},
+ {0x35BA, 0x0E},
+ {0x366A, 0x1B},
+ {0x366B, 0x1A},
+ {0x366C, 0x19},
+ {0x366D, 0x17},
+ {0x3A41, 0x08},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * Horizontal/vertical 2/2-line binning
+ * (Horizontal and vertical weightedbinning, 10-bit)
+ * imx274 mode3(refer to datasheet) register configuration with
+ * 1920x1080 resolution, raw10 data and mipi four lane output
+ */
+static const struct reg_8 imx274_mode3_1920x1080_raw10[] = {
+ {0x3004, 0x02},
+ {0x3005, 0x21},
+ {0x3006, 0x00},
+ {0x3007, 0x11},
+
+ {0x3018, 0xA2}, /* output XVS, HVS */
+
+ {0x306B, 0x05},
+ {0x30E2, 0x02},
+
+ {0x30F6, 0x04}, /* HMAX, 260 */
+ {0x30F7, 0x01}, /* HMAX */
+
+ {0x30dd, 0x01}, /* to crop to 1920x1080 */
+ {0x30de, 0x05},
+ {0x30df, 0x00},
+ {0x30e0, 0x04},
+ {0x30e1, 0x00},
+ {0x3037, 0x01},
+ {0x3038, 0x0c},
+ {0x3039, 0x00},
+ {0x303a, 0x0c},
+ {0x303b, 0x0f},
+
+ {0x30EE, 0x01},
+ {0x3130, 0x4E},
+ {0x3131, 0x04},
+ {0x3132, 0x46},
+ {0x3133, 0x04},
+ {0x3342, 0x0A},
+ {0x3343, 0x00},
+ {0x3344, 0x1A},
+ {0x3345, 0x00},
+ {0x33A6, 0x01},
+ {0x3528, 0x0E},
+ {0x3554, 0x00},
+ {0x3555, 0x01},
+ {0x3556, 0x01},
+ {0x3557, 0x01},
+ {0x3558, 0x01},
+ {0x3559, 0x00},
+ {0x355A, 0x00},
+ {0x35BA, 0x0E},
+ {0x366A, 0x1B},
+ {0x366B, 0x1A},
+ {0x366C, 0x19},
+ {0x366D, 0x17},
+ {0x3A41, 0x08},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * Vertical 2/3 subsampling binning horizontal 3 binning
+ * imx274 mode5(refer to datasheet) register configuration with
+ * 1280x720 resolution, raw10 data and mipi four lane output
+ */
+static const struct reg_8 imx274_mode5_1280x720_raw10[] = {
+ {0x3004, 0x03},
+ {0x3005, 0x31},
+ {0x3006, 0x00},
+ {0x3007, 0x09},
+
+ {0x3018, 0xA2}, /* output XVS, HVS */
+
+ {0x306B, 0x05},
+ {0x30E2, 0x03},
+
+ {0x30F6, 0x04}, /* HMAX, 260 */
+ {0x30F7, 0x01}, /* HMAX */
+
+ {0x30DD, 0x01},
+ {0x30DE, 0x07},
+ {0x30DF, 0x00},
+ {0x40E0, 0x04},
+ {0x30E1, 0x00},
+ {0x3030, 0xD4},
+ {0x3031, 0x02},
+ {0x3032, 0xD0},
+ {0x3033, 0x02},
+
+ {0x30EE, 0x01},
+ {0x3130, 0xE2},
+ {0x3131, 0x02},
+ {0x3132, 0xDE},
+ {0x3133, 0x02},
+ {0x3342, 0x0A},
+ {0x3343, 0x00},
+ {0x3344, 0x1B},
+ {0x3345, 0x00},
+ {0x33A6, 0x01},
+ {0x3528, 0x0E},
+ {0x3554, 0x00},
+ {0x3555, 0x01},
+ {0x3556, 0x01},
+ {0x3557, 0x01},
+ {0x3558, 0x01},
+ {0x3559, 0x00},
+ {0x355A, 0x00},
+ {0x35BA, 0x0E},
+ {0x366A, 0x1B},
+ {0x366B, 0x19},
+ {0x366C, 0x17},
+ {0x366D, 0x17},
+ {0x3A41, 0x04},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 first step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_1[] = {
+ {IMX274_STANDBY_REG, 0x12},
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 second step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_2[] = {
+ {0x3120, 0xF0}, /* clock settings */
+ {0x3121, 0x00}, /* clock settings */
+ {0x3122, 0x02}, /* clock settings */
+ {0x3129, 0x9C}, /* clock settings */
+ {0x312A, 0x02}, /* clock settings */
+ {0x312D, 0x02}, /* clock settings */
+
+ {0x310B, 0x00},
+
+ /* PLSTMG */
+ {0x304C, 0x00}, /* PLSTMG01 */
+ {0x304D, 0x03},
+ {0x331C, 0x1A},
+ {0x331D, 0x00},
+ {0x3502, 0x02},
+ {0x3529, 0x0E},
+ {0x352A, 0x0E},
+ {0x352B, 0x0E},
+ {0x3538, 0x0E},
+ {0x3539, 0x0E},
+ {0x3553, 0x00},
+ {0x357D, 0x05},
+ {0x357F, 0x05},
+ {0x3581, 0x04},
+ {0x3583, 0x76},
+ {0x3587, 0x01},
+ {0x35BB, 0x0E},
+ {0x35BC, 0x0E},
+ {0x35BD, 0x0E},
+ {0x35BE, 0x0E},
+ {0x35BF, 0x0E},
+ {0x366E, 0x00},
+ {0x366F, 0x00},
+ {0x3670, 0x00},
+ {0x3671, 0x00},
+
+ /* PSMIPI */
+ {0x3304, 0x32}, /* PSMIPI1 */
+ {0x3305, 0x00},
+ {0x3306, 0x32},
+ {0x3307, 0x00},
+ {0x3590, 0x32},
+ {0x3591, 0x00},
+ {0x3686, 0x32},
+ {0x3687, 0x00},
+
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 third step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_3[] = {
+ {IMX274_STANDBY_REG, 0x00},
+ {0x303E, 0x02}, /* SYS_MODE = 2 */
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 forth step register configuration for
+ * starting stream
+ */
+static const struct reg_8 imx274_start_4[] = {
+ {0x30F4, 0x00},
+ {0x3018, 0xA2}, /* XHS VHS OUTUPT */
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 register configuration for stoping stream
+ */
+static const struct reg_8 imx274_stop[] = {
+ {IMX274_STANDBY_REG, 0x01},
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 disable test pattern register configuration
+ */
+static const struct reg_8 imx274_tp_disabled[] = {
+ {0x303C, 0x00},
+ {0x377F, 0x00},
+ {0x3781, 0x00},
+ {0x370B, 0x00},
+ {IMX274_TABLE_END, 0x00}
+};
+
+/*
+ * imx274 test pattern register configuration
+ * reg 0x303D defines the test pattern modes
+ */
+static const struct reg_8 imx274_tp_regs[] = {
+ {0x303C, 0x11},
+ {0x370E, 0x01},
+ {0x377F, 0x01},
+ {0x3781, 0x01},
+ {0x370B, 0x11},
+ {IMX274_TABLE_END, 0x00}
+};
+
+static const struct reg_8 *mode_table[] = {
+ [IMX274_MODE_3840X2160] = imx274_mode1_3840x2160_raw10,
+ [IMX274_MODE_1920X1080] = imx274_mode3_1920x1080_raw10,
+ [IMX274_MODE_1280X720] = imx274_mode5_1280x720_raw10,
+
+ [IMX274_MODE_START_STREAM_1] = imx274_start_1,
+ [IMX274_MODE_START_STREAM_2] = imx274_start_2,
+ [IMX274_MODE_START_STREAM_3] = imx274_start_3,
+ [IMX274_MODE_START_STREAM_4] = imx274_start_4,
+ [IMX274_MODE_STOP_STREAM] = imx274_stop,
+};
+
+/*
+ * imx274 format related structure
+ */
+static const struct imx274_frmfmt imx274_formats[] = {
+ {MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, {3840, 2160},
+ IMX274_MODE_3840X2160},
+ {MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, {1920, 1080},
+ IMX274_MODE_1920X1080},
+ {MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, {1280, 720},
+ IMX274_MODE_1280X720},
+};
+
+/*
+ * minimal frame length for each mode
+ * refer to datasheet section "Frame Rate Adjustment (CSI-2)"
+ */
+static const int min_frame_len[] = {
+ 4550, /* mode 1, 4K */
+ 2310, /* mode 3, 1080p */
+ 2310 /* mode 5, 720p */
+};
+
+/*
+ * minimal numbers of SHR register
+ * refer to datasheet table "Shutter Setting (CSI-2)"
+ */
+static const int min_SHR[] = {
+ 12, /* mode 1, 4K */
+ 8, /* mode 3, 1080p */
+ 8 /* mode 5, 720p */
+};
+
+static const int max_frame_rate[] = {
+ 60, /* mode 1 , 4K */
+ 120, /* mode 3, 1080p */
+ 120 /* mode 5, 720p */
+};
+
+/*
+ * Number of clocks per internal offset period
+ * a constant based on mode
+ * refer to section "Integration Time in Each Readout Drive Mode (CSI-2)"
+ * in the datasheet
+ * for the implemented 3 modes, it happens to be the same number
+ */
+static const int nocpiop[] = {
+ 112, /* mode 1 , 4K */
+ 112, /* mode 3, 1080p */
+ 112 /* mode 5, 720p */
+};
+
+/*
+ * struct imx274_ctrls - imx274 ctrl structure
+ * @handler: V4L2 ctrl handler structure
+ * @exposure: Pointer to expsure ctrl structure
+ * @gain: Pointer to gain ctrl structure
+ * @vflip: Pointer to vflip ctrl structure
+ * @test_pattern: Pointer to test pattern ctrl structure
+ */
+struct imx274_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *test_pattern;
+};
+
+/*
+ * struct stim274 - imx274 device structure
+ * @sd: V4L2 subdevice structure
+ * @pd: Media pad structure
+ * @client: Pointer to I2C client
+ * @ctrls: imx274 control structure
+ * @format: V4L2 media bus frame format structure
+ * @frame_rate: V4L2 frame rate structure
+ * @regmap: Pointer to regmap structure
+ * @reset_gpio: Pointer to reset gpio
+ * @lock: Mutex structure
+ * @mode_index: Resolution mode index
+ */
+struct stimx274 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct i2c_client *client;
+ struct imx274_ctrls ctrls;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_fract frame_interval;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct mutex lock; /* mutex lock for operations */
+ u32 mode_index;
+};
+
+/*
+ * Function declaration
+ */
+static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl);
+static int imx274_set_exposure(struct stimx274 *priv, int val);
+static int imx274_set_vflip(struct stimx274 *priv, int val);
+static int imx274_set_test_pattern(struct stimx274 *priv, int val);
+static int imx274_set_frame_interval(struct stimx274 *priv,
+ struct v4l2_fract frame_interval);
+
+static inline void msleep_range(unsigned int delay_base)
+{
+ usleep_range(delay_base * 1000, delay_base * 1000 + 500);
+}
+
+/*
+ * v4l2_ctrl and v4l2_subdev related operations
+ */
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler,
+ struct stimx274, ctrls.handler)->sd;
+}
+
+static inline struct stimx274 *to_imx274(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct stimx274, sd);
+}
+
+/*
+ * imx274_regmap_util_write_table_8 - Function for writing register table
+ * @regmap: Pointer to device reg map structure
+ * @table: Table containing register values
+ * @wait_ms_addr: Flag for performing delay
+ * @end_addr: Flag for incating end of table
+ *
+ * This is used to write register table into sensor's reg map.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_regmap_util_write_table_8(struct regmap *regmap,
+ const struct reg_8 table[],
+ u16 wait_ms_addr, u16 end_addr)
+{
+ int err;
+ const struct reg_8 *next;
+ u8 val;
+
+ int range_start = -1;
+ int range_count = 0;
+ u8 range_vals[16];
+ int max_range_vals = ARRAY_SIZE(range_vals);
+
+ for (next = table;; next++) {
+ if ((next->addr != range_start + range_count) ||
+ (next->addr == end_addr) ||
+ (next->addr == wait_ms_addr) ||
+ (range_count == max_range_vals)) {
+ if (range_count == 1)
+ err = regmap_write(regmap,
+ range_start, range_vals[0]);
+ else if (range_count > 1)
+ err = regmap_bulk_write(regmap, range_start,
+ &range_vals[0],
+ range_count);
+
+ if (err)
+ return err;
+
+ range_start = -1;
+ range_count = 0;
+
+ /* Handle special address values */
+ if (next->addr == end_addr)
+ break;
+
+ if (next->addr == wait_ms_addr) {
+ msleep_range(next->val);
+ continue;
+ }
+ }
+
+ val = next->val;
+
+ if (range_start == -1)
+ range_start = next->addr;
+
+ range_vals[range_count++] = val;
+ }
+ return 0;
+}
+
+static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
+{
+ int err;
+
+ err = regmap_read(priv->regmap, addr, (unsigned int *)val);
+ if (err)
+ dev_err(&priv->client->dev,
+ "%s : i2c read failed, addr = %x\n", __func__, addr);
+ else
+ dev_dbg(&priv->client->dev,
+ "%s : addr 0x%x, val=0x%x\n", __func__,
+ addr, *val);
+ return err;
+}
+
+static inline int imx274_write_reg(struct stimx274 *priv, u16 addr, u8 val)
+{
+ int err;
+
+ err = regmap_write(priv->regmap, addr, val);
+ if (err)
+ dev_err(&priv->client->dev,
+ "%s : i2c write failed, %x = %x\n", __func__,
+ addr, val);
+ else
+ dev_dbg(&priv->client->dev,
+ "%s : addr 0x%x, val=0x%x\n", __func__,
+ addr, val);
+ return err;
+}
+
+static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
+{
+ return imx274_regmap_util_write_table_8(priv->regmap,
+ table, IMX274_TABLE_WAIT_MS, IMX274_TABLE_END);
+}
+
+/*
+ * imx274_mode_regs - Function for set mode registers per mode index
+ * @priv: Pointer to device structure
+ * @mode: Mode index value
+ *
+ * This is used to start steam per mode index.
+ * mode = 0, start stream for sensor Mode 1: 4K/raw10
+ * mode = 1, start stream for sensor Mode 3: 1080p/raw10
+ * mode = 2, start stream for sensor Mode 5: 720p/raw10
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_mode_regs(struct stimx274 *priv, int mode)
+{
+ int err = 0;
+
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_1]);
+ if (err)
+ return err;
+
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_2]);
+ if (err)
+ return err;
+
+ err = imx274_write_table(priv, mode_table[mode]);
+
+ return err;
+}
+
+/*
+ * imx274_start_stream - Function for starting stream per mode index
+ * @priv: Pointer to device structure
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_start_stream(struct stimx274 *priv)
+{
+ int err = 0;
+
+ /*
+ * Refer to "Standby Cancel Sequence when using CSI-2" in
+ * imx274 datasheet, it should wait 10ms or more here.
+ * give it 1 extra ms for margin
+ */
+ msleep_range(11);
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_3]);
+ if (err)
+ return err;
+
+ /*
+ * Refer to "Standby Cancel Sequence when using CSI-2" in
+ * imx274 datasheet, it should wait 7ms or more here.
+ * give it 1 extra ms for margin
+ */
+ msleep_range(8);
+ err = imx274_write_table(priv, mode_table[IMX274_MODE_START_STREAM_4]);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * imx274_reset - Function called to reset the sensor
+ * @priv: Pointer to device structure
+ * @rst: Input value for determining the sensor's end state after reset
+ *
+ * Set the senor in reset and then
+ * if rst = 0, keep it in reset;
+ * if rst = 1, bring it out of reset.
+ *
+ */
+static void imx274_reset(struct stimx274 *priv, int rst)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(IMX274_RESET_DELAY1, IMX274_RESET_DELAY2);
+ gpiod_set_value_cansleep(priv->reset_gpio, !!rst);
+ usleep_range(IMX274_RESET_DELAY1, IMX274_RESET_DELAY2);
+}
+
+/**
+ * imx274_s_ctrl - This is used to set the imx274 V4L2 controls
+ * @ctrl: V4L2 control to be set
+ *
+ * This function is used to set the V4L2 controls for the imx274 sensor.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct stimx274 *imx274 = to_imx274(sd);
+ int ret = -EINVAL;
+
+ dev_dbg(&imx274->client->dev,
+ "%s : s_ctrl: %s, value: %d\n", __func__,
+ ctrl->name, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_EXPOSURE\n", __func__);
+ ret = imx274_set_exposure(imx274, ctrl->val);
+ break;
+
+ case V4L2_CID_GAIN:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_GAIN\n", __func__);
+ ret = imx274_set_gain(imx274, ctrl);
+ break;
+
+ case V4L2_CID_VFLIP:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_VFLIP\n", __func__);
+ ret = imx274_set_vflip(imx274, ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ dev_dbg(&imx274->client->dev,
+ "%s : set V4L2_CID_TEST_PATTERN\n", __func__);
+ ret = imx274_set_test_pattern(imx274, ctrl->val);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * imx274_get_fmt - Get the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @fmt: Pointer to pad level media bus format
+ *
+ * This function is used to get the pad format information.
+ *
+ * Return: 0 on success
+ */
+static int imx274_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ mutex_lock(&imx274->lock);
+ fmt->format = imx274->format;
+ mutex_unlock(&imx274->lock);
+ return 0;
+}
+
+/**
+ * imx274_set_fmt - This is used to set the pad format
+ * @sd: Pointer to V4L2 Sub device structure
+ * @cfg: Pointer to sub device pad information structure
+ * @format: Pointer to pad level media bus format
+ *
+ * This function is used to set the pad format.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct stimx274 *imx274 = to_imx274(sd);
+ struct i2c_client *client = imx274->client;
+ int index;
+
+ dev_dbg(&client->dev,
+ "%s: width = %d height = %d code = %d mbus_code = %d\n",
+ __func__, fmt->width, fmt->height, fmt->code,
+ imx274_formats[imx274->mode_index].mbus_code);
+
+ mutex_lock(&imx274->lock);
+
+ for (index = 0; index < ARRAY_SIZE(imx274_formats); index++) {
+ if (imx274_formats[index].size.width == fmt->width &&
+ imx274_formats[index].size.height == fmt->height)
+ break;
+ }
+
+ if (index >= ARRAY_SIZE(imx274_formats)) {
+ /* default to first format */
+ index = 0;
+ }
+
+ imx274->mode_index = index;
+
+ if (fmt->width > IMX274_MAX_WIDTH)
+ fmt->width = IMX274_MAX_WIDTH;
+ if (fmt->height > IMX274_MAX_HEIGHT)
+ fmt->height = IMX274_MAX_HEIGHT;
+ fmt->width = fmt->width & (~IMX274_MASK_LSB_2_BITS);
+ fmt->height = fmt->height & (~IMX274_MASK_LSB_2_BITS);
+ fmt->field = V4L2_FIELD_NONE;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ cfg->try_fmt = *fmt;
+ else
+ imx274->format = *fmt;
+
+ mutex_unlock(&imx274->lock);
+ return 0;
+}
+
+/**
+ * imx274_g_frame_interval - Get the frame interval
+ * @sd: Pointer to V4L2 Sub device structure
+ * @fi: Pointer to V4l2 Sub device frame interval structure
+ *
+ * This function is used to get the frame interval.
+ *
+ * Return: 0 on success
+ */
+static int imx274_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ fi->interval = imx274->frame_interval;
+ dev_dbg(&imx274->client->dev, "%s frame rate = %d / %d\n",
+ __func__, imx274->frame_interval.numerator,
+ imx274->frame_interval.denominator);
+
+ return 0;
+}
+
+/**
+ * imx274_s_frame_interval - Set the frame interval
+ * @sd: Pointer to V4L2 Sub device structure
+ * @fi: Pointer to V4l2 Sub device frame interval structure
+ *
+ * This function is used to set the frame intervavl.
+ *
+ * Return: 0 on success
+ */
+static int imx274_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+ struct v4l2_ctrl *ctrl = imx274->ctrls.exposure;
+ int min, max, def;
+ int ret;
+
+ mutex_lock(&imx274->lock);
+ ret = imx274_set_frame_interval(imx274, fi->interval);
+
+ if (!ret) {
+ /*
+ * exposure time range is decided by frame interval
+ * need to update it after frame interal changes
+ */
+ min = IMX274_MIN_EXPOSURE_TIME;
+ max = fi->interval.numerator * 1000000
+ / fi->interval.denominator;
+ def = max;
+ if (__v4l2_ctrl_modify_range(ctrl, min, max, 1, def)) {
+ dev_err(&imx274->client->dev,
+ "Exposure ctrl range update failed\n");
+ goto unlock;
+ }
+
+ /* update exposure time accordingly */
+ imx274_set_exposure(imx274, imx274->ctrls.exposure->val);
+
+ dev_dbg(&imx274->client->dev, "set frame interval to %uus\n",
+ fi->interval.numerator * 1000000
+ / fi->interval.denominator);
+ }
+
+unlock:
+ mutex_unlock(&imx274->lock);
+
+ return ret;
+}
+
+/**
+ * imx274_load_default - load default control values
+ * @priv: Pointer to device structure
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_load_default(struct stimx274 *priv)
+{
+ int ret;
+
+ /* load default control values */
+ priv->frame_interval.numerator = 1;
+ priv->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+ priv->ctrls.exposure->val = 1000000 / IMX274_DEF_FRAME_RATE;
+ priv->ctrls.gain->val = IMX274_DEF_GAIN;
+ priv->ctrls.vflip->val = 0;
+ priv->ctrls.test_pattern->val = TEST_PATTERN_DISABLED;
+
+ /* update frame rate */
+ ret = imx274_set_frame_interval(priv,
+ priv->frame_interval);
+ if (ret)
+ return ret;
+
+ /* update exposure time */
+ ret = v4l2_ctrl_s_ctrl(priv->ctrls.exposure, priv->ctrls.exposure->val);
+ if (ret)
+ return ret;
+
+ /* update gain */
+ ret = v4l2_ctrl_s_ctrl(priv->ctrls.gain, priv->ctrls.gain->val);
+ if (ret)
+ return ret;
+
+ /* update vflip */
+ ret = v4l2_ctrl_s_ctrl(priv->ctrls.vflip, priv->ctrls.vflip->val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * imx274_s_stream - It is used to start/stop the streaming.
+ * @sd: V4L2 Sub device
+ * @on: Flag (True / False)
+ *
+ * This function controls the start or stop of streaming for the
+ * imx274 sensor.
+ *
+ * Return: 0 on success, errors otherwise
+ */
+static int imx274_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct stimx274 *imx274 = to_imx274(sd);
+ int ret = 0;
+
+ dev_dbg(&imx274->client->dev, "%s : %s, mode index = %d\n", __func__,
+ on ? "Stream Start" : "Stream Stop", imx274->mode_index);
+
+ mutex_lock(&imx274->lock);
+
+ if (on) {
+ /* load mode registers */
+ ret = imx274_mode_regs(imx274, imx274->mode_index);
+ if (ret)
+ goto fail;
+
+ /*
+ * update frame rate & expsoure. if the last mode is different,
+ * HMAX could be changed. As the result, frame rate & exposure
+ * are changed.
+ * gain is not affected.
+ */
+ ret = imx274_set_frame_interval(imx274,
+ imx274->frame_interval);
+ if (ret)
+ goto fail;
+
+ /* update exposure time */
+ ret = __v4l2_ctrl_s_ctrl(imx274->ctrls.exposure,
+ imx274->ctrls.exposure->val);
+ if (ret)
+ goto fail;
+
+ /* start stream */
+ ret = imx274_start_stream(imx274);
+ if (ret)
+ goto fail;
+ } else {
+ /* stop stream */
+ ret = imx274_write_table(imx274,
+ mode_table[IMX274_MODE_STOP_STREAM]);
+ if (ret)
+ goto fail;
+ }
+
+ mutex_unlock(&imx274->lock);
+ dev_dbg(&imx274->client->dev,
+ "%s : Done: mode = %d\n", __func__, imx274->mode_index);
+ return 0;
+
+fail:
+ mutex_unlock(&imx274->lock);
+ dev_err(&imx274->client->dev, "s_stream failed\n");
+ return ret;
+}
+
+/*
+ * imx274_get_frame_length - Function for obtaining current frame length
+ * @priv: Pointer to device structure
+ * @val: Pointer to obainted value
+ *
+ * frame_length = vmax x (svr + 1), in unit of hmax.
+ *
+ * Return: 0 on success
+ */
+static int imx274_get_frame_length(struct stimx274 *priv, u32 *val)
+{
+ int err;
+ u16 svr;
+ u32 vmax;
+ u8 reg_val[3];
+
+ /* svr */
+ err = imx274_read_reg(priv, IMX274_SVR_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+
+ err = imx274_read_reg(priv, IMX274_SVR_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+
+ svr = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+
+ /* vmax */
+ err = imx274_read_reg(priv, IMX274_FRAME_LENGTH_ADDR_3, &reg_val[0]);
+ if (err)
+ goto fail;
+
+ err = imx274_read_reg(priv, IMX274_FRAME_LENGTH_ADDR_2, &reg_val[1]);
+ if (err)
+ goto fail;
+
+ err = imx274_read_reg(priv, IMX274_FRAME_LENGTH_ADDR_1, &reg_val[2]);
+ if (err)
+ goto fail;
+
+ vmax = ((reg_val[2] & IMX274_MASK_LSB_3_BITS) << IMX274_SHIFT_16_BITS)
+ + (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+
+ *val = vmax * (svr + 1);
+
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+static int imx274_clamp_coarse_time(struct stimx274 *priv, u32 *val,
+ u32 *frame_length)
+{
+ int err;
+
+ err = imx274_get_frame_length(priv, frame_length);
+ if (err)
+ return err;
+
+ if (*frame_length < min_frame_len[priv->mode_index])
+ *frame_length = min_frame_len[priv->mode_index];
+
+ *val = *frame_length - *val; /* convert to raw shr */
+ if (*val > *frame_length - IMX274_SHR_LIMIT_CONST)
+ *val = *frame_length - IMX274_SHR_LIMIT_CONST;
+ else if (*val < min_SHR[priv->mode_index])
+ *val = min_SHR[priv->mode_index];
+
+ return 0;
+}
+
+/*
+ * imx274_set_digital gain - Function called when setting digital gain
+ * @priv: Pointer to device structure
+ * @dgain: Value of digital gain.
+ *
+ * Digital gain has only 4 steps: 1x, 2x, 4x, and 8x
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_digital_gain(struct stimx274 *priv, u32 dgain)
+{
+ u8 reg_val;
+
+ reg_val = ffs(dgain);
+
+ if (reg_val)
+ reg_val--;
+
+ reg_val = clamp(reg_val, (u8)0, (u8)3);
+
+ return imx274_write_reg(priv, IMX274_DIGITAL_GAIN_REG,
+ reg_val & IMX274_MASK_LSB_4_BITS);
+}
+
+static inline void imx274_calculate_gain_regs(struct reg_8 regs[2], u16 gain)
+{
+ regs->addr = IMX274_ANALOG_GAIN_ADDR_MSB;
+ regs->val = (gain >> IMX274_SHIFT_8_BITS) & IMX274_MASK_LSB_3_BITS;
+
+ (regs + 1)->addr = IMX274_ANALOG_GAIN_ADDR_LSB;
+ (regs + 1)->val = (gain) & IMX274_MASK_LSB_8_BITS;
+}
+
+/*
+ * imx274_set_gain - Function called when setting gain
+ * @priv: Pointer to device structure
+ * @val: Value of gain. the real value = val << IMX274_GAIN_SHIFT;
+ * @ctrl: v4l2 control pointer
+ *
+ * Set the gain based on input value.
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_gain(struct stimx274 *priv, struct v4l2_ctrl *ctrl)
+{
+ struct reg_8 reg_list[2];
+ int err;
+ u32 gain, analog_gain, digital_gain, gain_reg;
+ int i;
+
+ gain = (u32)(ctrl->val);
+
+ dev_dbg(&priv->client->dev,
+ "%s : input gain = %d.%d\n", __func__,
+ gain >> IMX274_GAIN_SHIFT,
+ ((gain & IMX274_GAIN_SHIFT_MASK) * 100) >> IMX274_GAIN_SHIFT);
+
+ if (gain > IMX274_MAX_DIGITAL_GAIN * IMX274_MAX_ANALOG_GAIN)
+ gain = IMX274_MAX_DIGITAL_GAIN * IMX274_MAX_ANALOG_GAIN;
+ else if (gain < IMX274_MIN_GAIN)
+ gain = IMX274_MIN_GAIN;
+
+ if (gain <= IMX274_MAX_ANALOG_GAIN)
+ digital_gain = 1;
+ else if (gain <= IMX274_MAX_ANALOG_GAIN * 2)
+ digital_gain = 2;
+ else if (gain <= IMX274_MAX_ANALOG_GAIN * 4)
+ digital_gain = 4;
+ else
+ digital_gain = IMX274_MAX_DIGITAL_GAIN;
+
+ analog_gain = gain / digital_gain;
+
+ dev_dbg(&priv->client->dev,
+ "%s : digital gain = %d, analog gain = %d.%d\n",
+ __func__, digital_gain, analog_gain >> IMX274_GAIN_SHIFT,
+ ((analog_gain & IMX274_GAIN_SHIFT_MASK) * 100)
+ >> IMX274_GAIN_SHIFT);
+
+ err = imx274_set_digital_gain(priv, digital_gain);
+ if (err)
+ goto fail;
+
+ /* convert to register value, refer to imx274 datasheet */
+ gain_reg = (u32)IMX274_GAIN_CONST -
+ (IMX274_GAIN_CONST << IMX274_GAIN_SHIFT) / analog_gain;
+ if (gain_reg > IMX274_GAIN_REG_MAX)
+ gain_reg = IMX274_GAIN_REG_MAX;
+
+ imx274_calculate_gain_regs(reg_list, (u16)gain_reg);
+
+ for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
+ err = imx274_write_reg(priv, reg_list[i].addr,
+ reg_list[i].val);
+ if (err)
+ goto fail;
+ }
+
+ if (IMX274_GAIN_CONST - gain_reg == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ /* convert register value back to gain value */
+ ctrl->val = (IMX274_GAIN_CONST << IMX274_GAIN_SHIFT)
+ / (IMX274_GAIN_CONST - gain_reg) * digital_gain;
+
+ dev_dbg(&priv->client->dev,
+ "%s : GAIN control success, gain_reg = %d, new gain = %d\n",
+ __func__, gain_reg, ctrl->val);
+
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+static inline void imx274_calculate_coarse_time_regs(struct reg_8 regs[2],
+ u32 coarse_time)
+{
+ regs->addr = IMX274_COARSE_TIME_ADDR_MSB;
+ regs->val = (coarse_time >> IMX274_SHIFT_8_BITS)
+ & IMX274_MASK_LSB_8_BITS;
+ (regs + 1)->addr = IMX274_COARSE_TIME_ADDR_LSB;
+ (regs + 1)->val = (coarse_time) & IMX274_MASK_LSB_8_BITS;
+}
+
+/*
+ * imx274_set_coarse_time - Function called when setting SHR value
+ * @priv: Pointer to device structure
+ * @val: Value for exposure time in number of line_length, or [HMAX]
+ *
+ * Set SHR value based on input value.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_coarse_time(struct stimx274 *priv, u32 *val)
+{
+ struct reg_8 reg_list[2];
+ int err;
+ u32 coarse_time, frame_length;
+ int i;
+
+ coarse_time = *val;
+
+ /* convert exposure_time to appropriate SHR value */
+ err = imx274_clamp_coarse_time(priv, &coarse_time, &frame_length);
+ if (err)
+ goto fail;
+
+ /* prepare SHR registers */
+ imx274_calculate_coarse_time_regs(reg_list, coarse_time);
+
+ /* write to SHR registers */
+ for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
+ err = imx274_write_reg(priv, reg_list[i].addr,
+ reg_list[i].val);
+ if (err)
+ goto fail;
+ }
+
+ *val = frame_length - coarse_time;
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+/*
+ * imx274_set_exposure - Function called when setting exposure time
+ * @priv: Pointer to device structure
+ * @val: Variable for exposure time, in the unit of micro-second
+ *
+ * Set exposure time based on input value.
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_exposure(struct stimx274 *priv, int val)
+{
+ int err;
+ u16 hmax;
+ u8 reg_val[2];
+ u32 coarse_time; /* exposure time in unit of line (HMAX)*/
+
+ dev_dbg(&priv->client->dev,
+ "%s : EXPOSURE control input = %d\n", __func__, val);
+
+ /* step 1: convert input exposure_time (val) into number of 1[HMAX] */
+
+ /* obtain HMAX value */
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+ hmax = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+ if (hmax == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ coarse_time = (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2 * val
+ - nocpiop[priv->mode_index]) / hmax;
+
+ /* step 2: convert exposure_time into SHR value */
+
+ /* set SHR */
+ err = imx274_set_coarse_time(priv, &coarse_time);
+ if (err)
+ goto fail;
+
+ priv->ctrls.exposure->val =
+ (coarse_time * hmax + nocpiop[priv->mode_index])
+ / (IMX274_PIXCLK_CONST1 / IMX274_PIXCLK_CONST2);
+
+ dev_dbg(&priv->client->dev,
+ "%s : EXPOSURE control success\n", __func__);
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+
+ return err;
+}
+
+/*
+ * imx274_set_vflip - Function called when setting vertical flip
+ * @priv: Pointer to device structure
+ * @val: Value for vflip setting
+ *
+ * Set vertical flip based on input value.
+ * val = 0: normal, no vertical flip
+ * val = 1: vertical flip enabled
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_vflip(struct stimx274 *priv, int val)
+{
+ int err;
+
+ err = imx274_write_reg(priv, IMX274_VFLIP_REG, val);
+ if (err) {
+ dev_err(&priv->client->dev, "VFILP control error\n");
+ return err;
+ }
+
+ dev_dbg(&priv->client->dev,
+ "%s : VFLIP control success\n", __func__);
+
+ return 0;
+}
+
+/*
+ * imx274_set_test_pattern - Function called when setting test pattern
+ * @priv: Pointer to device structure
+ * @val: Variable for test pattern
+ *
+ * Set to different test patterns based on input value.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_test_pattern(struct stimx274 *priv, int val)
+{
+ int err = 0;
+
+ if (val == TEST_PATTERN_DISABLED) {
+ err = imx274_write_table(priv, imx274_tp_disabled);
+ } else if (val <= TEST_PATTERN_V_COLOR_BARS) {
+ err = imx274_write_reg(priv, IMX274_TEST_PATTERN_REG, val - 1);
+ if (!err)
+ err = imx274_write_table(priv, imx274_tp_regs);
+ } else {
+ err = -EINVAL;
+ }
+
+ if (!err)
+ dev_dbg(&priv->client->dev,
+ "%s : TEST PATTERN control success\n", __func__);
+ else
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+
+ return err;
+}
+
+static inline void imx274_calculate_frame_length_regs(struct reg_8 regs[3],
+ u32 frame_length)
+{
+ regs->addr = IMX274_FRAME_LENGTH_ADDR_1;
+ regs->val = (frame_length >> IMX274_SHIFT_16_BITS)
+ & IMX274_MASK_LSB_4_BITS;
+ (regs + 1)->addr = IMX274_FRAME_LENGTH_ADDR_2;
+ (regs + 1)->val = (frame_length >> IMX274_SHIFT_8_BITS)
+ & IMX274_MASK_LSB_8_BITS;
+ (regs + 2)->addr = IMX274_FRAME_LENGTH_ADDR_3;
+ (regs + 2)->val = (frame_length) & IMX274_MASK_LSB_8_BITS;
+}
+
+/*
+ * imx274_set_frame_length - Function called when setting frame length
+ * @priv: Pointer to device structure
+ * @val: Variable for frame length (= VMAX, i.e. vertical drive period length)
+ *
+ * Set frame length based on input value.
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_frame_length(struct stimx274 *priv, u32 val)
+{
+ struct reg_8 reg_list[3];
+ int err;
+ u32 frame_length;
+ int i;
+
+ dev_dbg(&priv->client->dev, "%s : input length = %d\n",
+ __func__, val);
+
+ frame_length = (u32)val;
+
+ imx274_calculate_frame_length_regs(reg_list, frame_length);
+ for (i = 0; i < ARRAY_SIZE(reg_list); i++) {
+ err = imx274_write_reg(priv, reg_list[i].addr,
+ reg_list[i].val);
+ if (err)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+/*
+ * imx274_set_frame_interval - Function called when setting frame interval
+ * @priv: Pointer to device structure
+ * @frame_interval: Variable for frame interval
+ *
+ * Change frame interval by updating VMAX value
+ * The caller should hold the mutex lock imx274->lock if necessary
+ *
+ * Return: 0 on success
+ */
+static int imx274_set_frame_interval(struct stimx274 *priv,
+ struct v4l2_fract frame_interval)
+{
+ int err;
+ u32 frame_length, req_frame_rate;
+ u16 svr;
+ u16 hmax;
+ u8 reg_val[2];
+
+ dev_dbg(&priv->client->dev, "%s: input frame interval = %d / %d",
+ __func__, frame_interval.numerator,
+ frame_interval.denominator);
+
+ if (frame_interval.numerator == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ req_frame_rate = (u32)(frame_interval.denominator
+ / frame_interval.numerator);
+
+ /* boundary check */
+ if (req_frame_rate > max_frame_rate[priv->mode_index]) {
+ frame_interval.numerator = 1;
+ frame_interval.denominator =
+ max_frame_rate[priv->mode_index];
+ } else if (req_frame_rate < IMX274_MIN_FRAME_RATE) {
+ frame_interval.numerator = 1;
+ frame_interval.denominator = IMX274_MIN_FRAME_RATE;
+ }
+
+ /*
+ * VMAX = 1/frame_rate x 72M / (SVR+1) / HMAX
+ * frame_length (i.e. VMAX) = (frame_interval) x 72M /(SVR+1) / HMAX
+ */
+
+ /* SVR */
+ err = imx274_read_reg(priv, IMX274_SVR_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+ err = imx274_read_reg(priv, IMX274_SVR_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+ svr = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+ dev_dbg(&priv->client->dev,
+ "%s : register SVR = %d\n", __func__, svr);
+
+ /* HMAX */
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_LSB, &reg_val[0]);
+ if (err)
+ goto fail;
+ err = imx274_read_reg(priv, IMX274_HMAX_REG_MSB, &reg_val[1]);
+ if (err)
+ goto fail;
+ hmax = (reg_val[1] << IMX274_SHIFT_8_BITS) + reg_val[0];
+ dev_dbg(&priv->client->dev,
+ "%s : register HMAX = %d\n", __func__, hmax);
+
+ if (hmax == 0 || frame_interval.denominator == 0) {
+ err = -EINVAL;
+ goto fail;
+ }
+
+ frame_length = IMX274_PIXCLK_CONST1 / (svr + 1) / hmax
+ * frame_interval.numerator
+ / frame_interval.denominator;
+
+ err = imx274_set_frame_length(priv, frame_length);
+ if (err)
+ goto fail;
+
+ priv->frame_interval = frame_interval;
+ return 0;
+
+fail:
+ dev_err(&priv->client->dev, "%s error = %d\n", __func__, err);
+ return err;
+}
+
+static const struct v4l2_subdev_pad_ops imx274_pad_ops = {
+ .get_fmt = imx274_get_fmt,
+ .set_fmt = imx274_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops imx274_video_ops = {
+ .g_frame_interval = imx274_g_frame_interval,
+ .s_frame_interval = imx274_s_frame_interval,
+ .s_stream = imx274_s_stream,
+};
+
+static const struct v4l2_subdev_ops imx274_subdev_ops = {
+ .pad = &imx274_pad_ops,
+ .video = &imx274_video_ops,
+};
+
+static const struct v4l2_ctrl_ops imx274_ctrl_ops = {
+ .s_ctrl = imx274_s_ctrl,
+};
+
+static const struct of_device_id imx274_of_id_table[] = {
+ { .compatible = "sony,imx274" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx274_of_id_table);
+
+static const struct i2c_device_id imx274_id[] = {
+ { "IMX274", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, imx274_id);
+
+static int imx274_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct v4l2_subdev *sd;
+ struct stimx274 *imx274;
+ int ret;
+
+ /* initialize imx274 */
+ imx274 = devm_kzalloc(&client->dev, sizeof(*imx274), GFP_KERNEL);
+ if (!imx274)
+ return -ENOMEM;
+
+ mutex_init(&imx274->lock);
+
+ /* initialize regmap */
+ imx274->regmap = devm_regmap_init_i2c(client, &imx274_regmap_config);
+ if (IS_ERR(imx274->regmap)) {
+ dev_err(&client->dev,
+ "regmap init failed: %ld\n", PTR_ERR(imx274->regmap));
+ ret = -ENODEV;
+ goto err_regmap;
+ }
+
+ /* initialize subdevice */
+ imx274->client = client;
+ sd = &imx274->sd;
+ v4l2_i2c_subdev_init(sd, client, &imx274_subdev_ops);
+ strlcpy(sd->name, DRIVER_NAME, sizeof(sd->name));
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+
+ /* initialize subdev media pad */
+ imx274->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sd->entity, 1, &imx274->pad);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s : media entity init Failed %d\n", __func__, ret);
+ goto err_regmap;
+ }
+
+ /* initialize sensor reset gpio */
+ imx274->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(imx274->reset_gpio)) {
+ if (PTR_ERR(imx274->reset_gpio) != -EPROBE_DEFER)
+ dev_err(&client->dev, "Reset GPIO not setup in DT");
+ ret = PTR_ERR(imx274->reset_gpio);
+ goto err_me;
+ }
+
+ /* pull sensor out of reset */
+ imx274_reset(imx274, 1);
+
+ /* initialize controls */
+ ret = v4l2_ctrl_handler_init(&imx274->ctrls.handler, 2);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s : ctrl handler init Failed\n", __func__);
+ goto err_me;
+ }
+
+ imx274->ctrls.handler.lock = &imx274->lock;
+
+ /* add new controls */
+ imx274->ctrls.test_pattern = v4l2_ctrl_new_std_menu_items(
+ &imx274->ctrls.handler, &imx274_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(tp_qmenu) - 1, 0, 0, tp_qmenu);
+
+ imx274->ctrls.gain = v4l2_ctrl_new_std(
+ &imx274->ctrls.handler,
+ &imx274_ctrl_ops,
+ V4L2_CID_GAIN, IMX274_MIN_GAIN,
+ IMX274_MAX_DIGITAL_GAIN * IMX274_MAX_ANALOG_GAIN, 1,
+ IMX274_DEF_GAIN);
+
+ imx274->ctrls.exposure = v4l2_ctrl_new_std(
+ &imx274->ctrls.handler,
+ &imx274_ctrl_ops,
+ V4L2_CID_EXPOSURE, IMX274_MIN_EXPOSURE_TIME,
+ 1000000 / IMX274_DEF_FRAME_RATE, 1,
+ IMX274_MIN_EXPOSURE_TIME);
+
+ imx274->ctrls.vflip = v4l2_ctrl_new_std(
+ &imx274->ctrls.handler,
+ &imx274_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ imx274->sd.ctrl_handler = &imx274->ctrls.handler;
+ if (imx274->ctrls.handler.error) {
+ ret = imx274->ctrls.handler.error;
+ goto err_ctrls;
+ }
+
+ /* setup default controls */
+ ret = v4l2_ctrl_handler_setup(&imx274->ctrls.handler);
+ if (ret) {
+ dev_err(&client->dev,
+ "Error %d setup default controls\n", ret);
+ goto err_ctrls;
+ }
+
+ /* initialize format */
+ imx274->mode_index = IMX274_MODE_3840X2160;
+ imx274->format.width = imx274_formats[0].size.width;
+ imx274->format.height = imx274_formats[0].size.height;
+ imx274->format.field = V4L2_FIELD_NONE;
+ imx274->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ imx274->format.colorspace = V4L2_COLORSPACE_SRGB;
+ imx274->frame_interval.numerator = 1;
+ imx274->frame_interval.denominator = IMX274_DEF_FRAME_RATE;
+
+ /* load default control values */
+ ret = imx274_load_default(imx274);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s : imx274_load_default failed %d\n",
+ __func__, ret);
+ goto err_ctrls;
+ }
+
+ /* register subdevice */
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s : v4l2_async_register_subdev failed %d\n",
+ __func__, ret);
+ goto err_ctrls;
+ }
+
+ dev_info(&client->dev, "imx274 : imx274 probe success !\n");
+ return 0;
+
+err_ctrls:
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+err_me:
+ media_entity_cleanup(&sd->entity);
+err_regmap:
+ mutex_destroy(&imx274->lock);
+ return ret;
+}
+
+static int imx274_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct stimx274 *imx274 = to_imx274(sd);
+
+ /* stop stream */
+ imx274_write_table(imx274, mode_table[IMX274_MODE_STOP_STREAM]);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ media_entity_cleanup(&sd->entity);
+ mutex_destroy(&imx274->lock);
+ return 0;
+}
+
+static struct i2c_driver imx274_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = imx274_of_id_table,
+ },
+ .probe = imx274_probe,
+ .remove = imx274_remove,
+ .id_table = imx274_id,
+};
+
+module_i2c_driver(imx274_i2c_driver);
+
+MODULE_AUTHOR("Leon Luo <leonl@leopardimaging.com>");
+MODULE_DESCRIPTION("IMX274 CMOS Image Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index a374e2a0ac3d..8b5f7d0435e4 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -460,7 +460,6 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
*/
rc->map_name = ir->ir_codes;
rc->allowed_protocols = rc_proto;
- rc->enabled_protocols = rc_proto;
if (!rc->driver_name)
rc->driver_name = MODULE_NAME;
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index bf0e821a2b93..2f1966bdc473 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1345,7 +1345,7 @@ static int max2175_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(sd, client, &max2175_ops);
ctx->client = client;
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
/* Controls */
hdl = &ctx->ctrl_hdl;
diff --git a/drivers/media/i2c/msp3400-driver.h b/drivers/media/i2c/msp3400-driver.h
index a8702aca187a..b6c7698bce5a 100644
--- a/drivers/media/i2c/msp3400-driver.h
+++ b/drivers/media/i2c/msp3400-driver.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
*/
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 99b992e46702..b1665d97e0fd 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -945,7 +945,7 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
- return -EPROBE_DEFER;
+ return PTR_ERR(mt9m111->clk);
/* Default HIGHPOWER context */
mt9m111->ctx = &context_b;
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index af7af0d14c69..bf7d06f3f21a 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -104,7 +104,6 @@ struct ov13858_reg_list {
/* Link frequency config */
struct ov13858_link_freq_config {
- u32 pixel_rate;
u32 pixels_per_line;
/* PLL registers for this link frequency */
@@ -238,11 +237,11 @@ static const struct ov13858_reg mode_4224x3136_regs[] = {
{0x3800, 0x00},
{0x3801, 0x00},
{0x3802, 0x00},
- {0x3803, 0x00},
+ {0x3803, 0x08},
{0x3804, 0x10},
{0x3805, 0x9f},
{0x3806, 0x0c},
- {0x3807, 0x5f},
+ {0x3807, 0x57},
{0x3808, 0x10},
{0x3809, 0x80},
{0x380a, 0x0c},
@@ -948,6 +947,18 @@ static const char * const ov13858_test_pattern_menu[] = {
#define OV13858_LINK_FREQ_INDEX_0 0
#define OV13858_LINK_FREQ_INDEX_1 1
+/*
+ * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
+ * data rate => double data rate; number of lanes => 4; bits per pixel => 10
+ */
+static u64 link_freq_to_pixel_rate(u64 f)
+{
+ f *= 2 * 4;
+ do_div(f, 10);
+
+ return f;
+}
+
/* Menu items for LINK_FREQ V4L2 control */
static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = {
OV13858_LINK_FREQ_540MHZ,
@@ -958,8 +969,6 @@ static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = {
static const struct ov13858_link_freq_config
link_freq_configs[OV13858_NUM_OF_LINK_FREQS] = {
{
- /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
- .pixel_rate = (OV13858_LINK_FREQ_540MHZ * 2 * 4) / 10,
.pixels_per_line = OV13858_PPL_540MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_1080mbps),
@@ -967,8 +976,6 @@ static const struct ov13858_link_freq_config
}
},
{
- /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
- .pixel_rate = (OV13858_LINK_FREQ_270MHZ * 2 * 4) / 10,
.pixels_per_line = OV13858_PPL_270MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_540mbps),
@@ -1385,6 +1392,8 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
s32 vblank_def;
s32 vblank_min;
s64 h_blank;
+ s64 pixel_rate;
+ s64 link_freq;
mutex_lock(&ov13858->mutex);
@@ -1400,9 +1409,10 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
} else {
ov13858->cur_mode = mode;
__v4l2_ctrl_s_ctrl(ov13858->link_freq, mode->link_freq_index);
- __v4l2_ctrl_s_ctrl_int64(
- ov13858->pixel_rate,
- link_freq_configs[mode->link_freq_index].pixel_rate);
+ link_freq = link_freq_menu_items[mode->link_freq_index];
+ pixel_rate = link_freq_to_pixel_rate(link_freq);
+ __v4l2_ctrl_s_ctrl_int64(ov13858->pixel_rate, pixel_rate);
+
/* Update limits and set FPS to default */
vblank_def = ov13858->cur_mode->vts_def -
ov13858->cur_mode->height;
@@ -1617,6 +1627,10 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
s64 exposure_max;
s64 vblank_def;
s64 vblank_min;
+ s64 hblank;
+ s64 pixel_rate_min;
+ s64 pixel_rate_max;
+ const struct ov13858_mode *mode;
int ret;
ctrl_hdlr = &ov13858->ctrl_handler;
@@ -1634,29 +1648,30 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
link_freq_menu_items);
ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]);
/* By default, PIXEL_RATE is read only */
ov13858->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops,
- V4L2_CID_PIXEL_RATE, 0,
- link_freq_configs[0].pixel_rate, 1,
- link_freq_configs[0].pixel_rate);
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate_min, pixel_rate_max,
+ 1, pixel_rate_max);
- vblank_def = ov13858->cur_mode->vts_def - ov13858->cur_mode->height;
- vblank_min = ov13858->cur_mode->vts_min - ov13858->cur_mode->height;
+ mode = ov13858->cur_mode;
+ vblank_def = mode->vts_def - mode->height;
+ vblank_min = mode->vts_min - mode->height;
ov13858->vblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_VBLANK,
- vblank_min,
- OV13858_VTS_MAX - ov13858->cur_mode->height, 1,
+ vblank_min, OV13858_VTS_MAX - mode->height, 1,
vblank_def);
+ hblank = link_freq_configs[mode->link_freq_index].pixels_per_line -
+ mode->width;
ov13858->hblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK,
- OV13858_PPL_540MHZ - ov13858->cur_mode->width,
- OV13858_PPL_540MHZ - ov13858->cur_mode->width,
- 1,
- OV13858_PPL_540MHZ - ov13858->cur_mode->width);
+ hblank, hblank, 1, hblank);
ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- exposure_max = ov13858->cur_mode->vts_def - 8;
+ exposure_max = mode->vts_def - 8;
ov13858->exposure = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops,
V4L2_CID_EXPOSURE, OV13858_EXPOSURE_MIN,
@@ -1746,7 +1761,7 @@ static int ov13858_probe(struct i2c_client *client,
goto error_handler_free;
}
- ret = v4l2_async_register_subdev(&ov13858->sd);
+ ret = v4l2_async_register_subdev_sensor_common(&ov13858->sd);
if (ret < 0)
goto error_media_entity;
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index e6d0c1f64f0b..518868388d65 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -685,7 +685,7 @@ static int ov2640_mask_set(struct i2c_client *client,
static int ov2640_reset(struct i2c_client *client)
{
int ret;
- const struct regval_list reset_seq[] = {
+ static const struct regval_list reset_seq[] = {
{BANK_SEL, BANK_SEL_SENS},
{COM7, COM7_SRST},
ENDMARKER,
@@ -1097,18 +1097,17 @@ static int ov2640_probe(struct i2c_client *client,
return -EIO;
}
- priv = devm_kzalloc(&client->dev, sizeof(struct ov2640_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&adapter->dev,
- "Failed to allocate memory for private data!\n");
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
if (client->dev.of_node) {
priv->clk = devm_clk_get(&client->dev, "xvclk");
if (IS_ERR(priv->clk))
- return -EPROBE_DEFER;
- clk_prepare_enable(priv->clk);
+ return PTR_ERR(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
}
ret = ov2640_probe_dt(client, priv);
@@ -1116,7 +1115,7 @@ static int ov2640_probe(struct i2c_client *client,
goto err_clk;
v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
- priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_ctrl_handler_init(&priv->hdl, 2);
v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 39a2269c0bee..c89ed6609738 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2271,7 +2271,7 @@ static int ov5640_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
- sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index 95ce90fdb876..34179d232a35 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -35,9 +35,18 @@
#define SENSOR_NAME "ov5647"
-#define OV5647_SW_RESET 0x0103
-#define OV5647_REG_CHIPID_H 0x300A
-#define OV5647_REG_CHIPID_L 0x300B
+#define MIPI_CTRL00_CLOCK_LANE_GATE BIT(5)
+#define MIPI_CTRL00_BUS_IDLE BIT(2)
+#define MIPI_CTRL00_CLOCK_LANE_DISABLE BIT(0)
+
+#define OV5647_SW_STANDBY 0x0100
+#define OV5647_SW_RESET 0x0103
+#define OV5647_REG_CHIPID_H 0x300A
+#define OV5647_REG_CHIPID_L 0x300B
+#define OV5640_REG_PAD_OUT 0x300D
+#define OV5647_REG_FRAME_OFF_NUMBER 0x4202
+#define OV5647_REG_MIPI_CTRL00 0x4800
+#define OV5647_REG_MIPI_CTRL14 0x4814
#define REG_TERM 0xfffe
#define VAL_TERM 0xfe
@@ -241,34 +250,43 @@ static int ov5647_set_virtual_channel(struct v4l2_subdev *sd, int channel)
u8 channel_id;
int ret;
- ret = ov5647_read(sd, 0x4814, &channel_id);
+ ret = ov5647_read(sd, OV5647_REG_MIPI_CTRL14, &channel_id);
if (ret < 0)
return ret;
channel_id &= ~(3 << 6);
- return ov5647_write(sd, 0x4814, channel_id | (channel << 6));
+ return ov5647_write(sd, OV5647_REG_MIPI_CTRL14, channel_id | (channel << 6));
}
static int ov5647_stream_on(struct v4l2_subdev *sd)
{
int ret;
- ret = ov5647_write(sd, 0x4202, 0x00);
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_BUS_IDLE);
if (ret < 0)
return ret;
- return ov5647_write(sd, 0x300D, 0x00);
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x00);
+ if (ret < 0)
+ return ret;
+
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x00);
}
static int ov5647_stream_off(struct v4l2_subdev *sd)
{
int ret;
- ret = ov5647_write(sd, 0x4202, 0x0f);
+ ret = ov5647_write(sd, OV5647_REG_MIPI_CTRL00, MIPI_CTRL00_CLOCK_LANE_GATE
+ | MIPI_CTRL00_BUS_IDLE | MIPI_CTRL00_CLOCK_LANE_DISABLE);
+ if (ret < 0)
+ return ret;
+
+ ret = ov5647_write(sd, OV5647_REG_FRAME_OFF_NUMBER, 0x0f);
if (ret < 0)
return ret;
- return ov5647_write(sd, 0x300D, 0x01);
+ return ov5647_write(sd, OV5640_REG_PAD_OUT, 0x01);
}
static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
@@ -276,7 +294,7 @@ static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
int ret;
u8 rdval;
- ret = ov5647_read(sd, 0x0100, &rdval);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
if (ret < 0)
return ret;
@@ -285,7 +303,7 @@ static int set_sw_standby(struct v4l2_subdev *sd, bool standby)
else
rdval |= 0x01;
- return ov5647_write(sd, 0x0100, rdval);
+ return ov5647_write(sd, OV5647_SW_STANDBY, rdval);
}
static int __sensor_init(struct v4l2_subdev *sd)
@@ -294,7 +312,7 @@ static int __sensor_init(struct v4l2_subdev *sd)
u8 resetval, rdval;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- ret = ov5647_read(sd, 0x0100, &rdval);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &rdval);
if (ret < 0)
return ret;
@@ -309,18 +327,21 @@ static int __sensor_init(struct v4l2_subdev *sd)
if (ret < 0)
return ret;
- ret = ov5647_read(sd, 0x0100, &resetval);
+ ret = ov5647_read(sd, OV5647_SW_STANDBY, &resetval);
if (ret < 0)
return ret;
if (!(resetval & 0x01)) {
dev_err(&client->dev, "Device was in SW standby");
- ret = ov5647_write(sd, 0x0100, 0x01);
+ ret = ov5647_write(sd, OV5647_SW_STANDBY, 0x01);
if (ret < 0)
return ret;
}
- return ov5647_write(sd, 0x4800, 0x04);
+ /*
+ * stream off to make the clock lane into LP-11 state.
+ */
+ return ov5647_stream_off(sd);
}
static int ov5647_sensor_power(struct v4l2_subdev *sd, int on)
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 6f7a1d6d2200..9f9196568eb8 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -390,7 +390,10 @@ static const struct ov5670_reg mode_2592x1944_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_1296x972_regs[] = {
@@ -653,7 +656,10 @@ static const struct ov5670_reg mode_1296x972_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_648x486_regs[] = {
@@ -916,7 +922,10 @@ static const struct ov5670_reg mode_648x486_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_2560x1440_regs[] = {
@@ -1178,7 +1187,10 @@ static const struct ov5670_reg mode_2560x1440_regs[] = {
{0x5791, 0x06},
{0x5792, 0x00},
{0x5793, 0x52},
- {0x5794, 0xa3}
+ {0x5794, 0xa3},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_1280x720_regs[] = {
@@ -1441,7 +1453,10 @@ static const struct ov5670_reg mode_1280x720_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const struct ov5670_reg mode_640x360_regs[] = {
@@ -1704,7 +1719,10 @@ static const struct ov5670_reg mode_640x360_regs[] = {
{0x5792, 0x00},
{0x5793, 0x52},
{0x5794, 0xa3},
- {0x3503, 0x00}
+ {0x3503, 0x00},
+ {0x5045, 0x05},
+ {0x4003, 0x40},
+ {0x5048, 0x40}
};
static const char * const ov5670_test_pattern_menu[] = {
@@ -2323,8 +2341,6 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
return ret;
}
- ov5670->streaming = true;
-
return 0;
}
@@ -2338,8 +2354,6 @@ static int ov5670_stop_streaming(struct ov5670 *ov5670)
if (ret)
dev_err(&client->dev, "%s failed to set stream\n", __func__);
- ov5670->streaming = false;
-
/* Return success even if it was an error, as there is nothing the
* caller can do about it.
*/
@@ -2370,6 +2384,7 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
ret = ov5670_stop_streaming(ov5670);
pm_runtime_put(&client->dev);
}
+ ov5670->streaming = enable;
goto unlock_and_return;
error:
@@ -2514,7 +2529,7 @@ static int ov5670_probe(struct i2c_client *client)
}
/* Async register for subdev */
- ret = v4l2_async_register_subdev(&ov5670->sd);
+ ret = v4l2_async_register_subdev_sensor_common(&ov5670->sd);
if (ret < 0) {
err_msg = "v4l2_async_register_subdev() error";
goto error_entity_cleanup;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
index 768f2950ea36..8975d16b2b24 100644
--- a/drivers/media/i2c/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -951,11 +951,8 @@ static int ov6650_probe(struct i2c_client *client,
int ret;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev,
- "Failed to allocate memory for private data!\n");
+ if (!priv)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 13);
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index e88549f0e704..950a0acf85fb 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -213,6 +213,9 @@ struct ov7670_devtype {
struct ov7670_format_struct; /* coming later */
struct ov7670_info {
struct v4l2_subdev sd;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ struct media_pad pad;
+#endif
struct v4l2_ctrl_handler hdl;
struct {
/* gain cluster */
@@ -229,6 +232,7 @@ struct ov7670_info {
struct v4l2_ctrl *saturation;
struct v4l2_ctrl *hue;
};
+ struct v4l2_mbus_framefmt format;
struct ov7670_format_struct *fmt; /* Current format */
struct clk *clk;
struct gpio_desc *resetb_gpio;
@@ -972,6 +976,9 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd,
fmt->width = wsize->width;
fmt->height = wsize->height;
fmt->colorspace = ov7670_formats[index].colorspace;
+
+ info->format = *fmt;
+
return 0;
}
@@ -985,6 +992,9 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
struct ov7670_format_struct *ovfmt;
struct ov7670_win_size *wsize;
struct ov7670_info *info = to_state(sd);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *mbus_fmt;
+#endif
unsigned char com7;
int ret;
@@ -995,8 +1005,13 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL);
if (ret)
return ret;
- cfg->try_fmt = format->format;
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+ *mbus_fmt = format->format;
return 0;
+#else
+ return -ENOTTY;
+#endif
}
ret = ov7670_try_fmt_internal(sd, &format->format, &ovfmt, &wsize);
@@ -1038,6 +1053,30 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
return 0;
}
+static int ov7670_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov7670_info *info = to_state(sd);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ struct v4l2_mbus_framefmt *mbus_fmt;
+#endif
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ format->format = *mbus_fmt;
+ return 0;
+#else
+ return -ENOTTY;
+#endif
+ } else {
+ format->format = info->format;
+ }
+
+ return 0;
+}
+
/*
* Implement G/S_PARM. There is a "high quality" mode we could try
* to do someday; for now, we just do the frame rate tweak.
@@ -1505,6 +1544,46 @@ static int ov7670_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regis
}
#endif
+static int ov7670_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ if (info->pwdn_gpio)
+ gpiod_set_value(info->pwdn_gpio, !on);
+ if (on && info->resetb_gpio) {
+ gpiod_set_value(info->resetb_gpio, 1);
+ usleep_range(500, 1000);
+ gpiod_set_value(info->resetb_gpio, 0);
+ usleep_range(3000, 5000);
+ }
+
+ return 0;
+}
+
+static void ov7670_get_default_format(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *format)
+{
+ struct ov7670_info *info = to_state(sd);
+
+ format->width = info->devtype->win_sizes[0].width;
+ format->height = info->devtype->win_sizes[0].height;
+ format->colorspace = info->fmt->colorspace;
+ format->code = info->fmt->mbus_code;
+ format->field = V4L2_FIELD_NONE;
+}
+
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+
+ ov7670_get_default_format(sd, format);
+
+ return 0;
+}
+#endif
+
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops ov7670_core_ops = {
@@ -1525,6 +1604,7 @@ static const struct v4l2_subdev_pad_ops ov7670_pad_ops = {
.enum_frame_interval = ov7670_enum_frame_interval,
.enum_frame_size = ov7670_enum_frame_size,
.enum_mbus_code = ov7670_enum_mbus_code,
+ .get_fmt = ov7670_get_fmt,
.set_fmt = ov7670_set_fmt,
};
@@ -1534,6 +1614,12 @@ static const struct v4l2_subdev_ops ov7670_ops = {
.pad = &ov7670_pad_ops,
};
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+static const struct v4l2_subdev_internal_ops ov7670_subdev_internal_ops = {
+ .open = ov7670_open,
+};
+#endif
+
/* ----------------------------------------------------------------------- */
static const struct ov7670_devtype ov7670_devdata[] = {
@@ -1586,6 +1672,11 @@ static int ov7670_probe(struct i2c_client *client,
sd = &info->sd;
v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
+#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+ sd->internal_ops = &ov7670_subdev_internal_ops;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+#endif
+
info->clock_speed = 30; /* default: a guess */
if (client->dev.platform_data) {
struct ov7670_config *config = client->dev.platform_data;
@@ -1619,29 +1710,34 @@ static int ov7670_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = ov7670_init_gpio(client, info);
- if (ret)
- goto clk_disable;
-
info->clock_speed = clk_get_rate(info->clk) / 1000000;
if (info->clock_speed < 10 || info->clock_speed > 48) {
ret = -EINVAL;
goto clk_disable;
}
+ ret = ov7670_init_gpio(client, info);
+ if (ret)
+ goto clk_disable;
+
+ ov7670_s_power(sd, 1);
+
/* Make sure it's an ov7670 */
ret = ov7670_detect(sd);
if (ret) {
v4l_dbg(1, debug, client,
"chip found @ 0x%x (%s) is not an ov7670 chip.\n",
client->addr << 1, client->adapter->name);
- goto clk_disable;
+ goto power_off;
}
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
info->devtype = &ov7670_devdata[id->driver_data];
info->fmt = &ov7670_formats[0];
+
+ ov7670_get_default_format(sd, &info->format);
+
info->clkrc = 0;
/* Set default frame rate to 30 fps */
@@ -1688,16 +1784,31 @@ static int ov7670_probe(struct i2c_client *client,
v4l2_ctrl_auto_cluster(2, &info->auto_exposure,
V4L2_EXPOSURE_MANUAL, false);
v4l2_ctrl_cluster(2, &info->saturation);
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ info->pad.flags = MEDIA_PAD_FL_SOURCE;
+ info->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&info->sd.entity, 1, &info->pad);
+ if (ret < 0)
+ goto hdl_free;
+#endif
+
v4l2_ctrl_handler_setup(&info->hdl);
ret = v4l2_async_register_subdev(&info->sd);
if (ret < 0)
- goto hdl_free;
+ goto entity_cleanup;
return 0;
+entity_cleanup:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&info->sd.entity);
+#endif
hdl_free:
v4l2_ctrl_handler_free(&info->hdl);
+power_off:
+ ov7670_s_power(sd, 0);
clk_disable:
clk_disable_unprepare(info->clk);
return ret;
@@ -1712,6 +1823,10 @@ static int ov7670_remove(struct i2c_client *client)
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&info->hdl);
clk_disable_unprepare(info->clk);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&info->sd.entity);
+#endif
+ ov7670_s_power(sd, 0);
return 0;
}
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 6ffb460e8589..69433e1e2533 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -985,7 +985,6 @@ static const struct v4l2_ctrl_ops ov965x_ctrl_ops = {
static const char * const test_pattern_menu[] = {
"Disabled",
"Color bars",
- NULL
};
static int ov965x_initialize_controls(struct ov965x *ov965x)
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 700f433261d0..e6b717b83b18 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -1239,6 +1239,10 @@ static int smiapp_power_on(struct device *dev)
sleep = SMIAPP_RESET_DELAY(sensor->hwcfg->ext_clk);
usleep_range(sleep, sleep);
+ mutex_lock(&sensor->mutex);
+
+ sensor->active = true;
+
/*
* Failures to respond to the address change command have been noticed.
* Those failures seem to be caused by the sensor requiring a longer
@@ -1313,7 +1317,7 @@ static int smiapp_power_on(struct device *dev)
rval = smiapp_write(sensor, SMIAPP_REG_U8_DPHY_CTRL,
SMIAPP_DPHY_CTRL_UI);
if (rval < 0)
- return rval;
+ goto out_cci_addr_fail;
rval = smiapp_call_quirk(sensor, post_poweron);
if (rval) {
@@ -1321,28 +1325,28 @@ static int smiapp_power_on(struct device *dev)
goto out_cci_addr_fail;
}
- /* Are we still initialising...? If yes, return here. */
- if (!sensor->pixel_array)
- return 0;
+ /* Are we still initialising...? If not, proceed with control setup. */
+ if (sensor->pixel_array) {
+ rval = __v4l2_ctrl_handler_setup(
+ &sensor->pixel_array->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
- rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->ctrl_handler);
- if (rval)
- goto out_cci_addr_fail;
+ rval = __v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
+ if (rval)
+ goto out_cci_addr_fail;
- rval = v4l2_ctrl_handler_setup(&sensor->src->ctrl_handler);
- if (rval)
- goto out_cci_addr_fail;
+ rval = smiapp_update_mode(sensor);
+ if (rval < 0)
+ goto out_cci_addr_fail;
+ }
- mutex_lock(&sensor->mutex);
- rval = smiapp_update_mode(sensor);
mutex_unlock(&sensor->mutex);
- if (rval < 0)
- goto out_cci_addr_fail;
return 0;
out_cci_addr_fail:
-
+ mutex_unlock(&sensor->mutex);
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
@@ -1360,6 +1364,8 @@ static int smiapp_power_off(struct device *dev)
struct smiapp_sensor *sensor =
container_of(ssd, struct smiapp_sensor, ssds[0]);
+ mutex_lock(&sensor->mutex);
+
/*
* Currently power/clock to lens are enable/disabled separately
* but they are essentially the same signals. So if the sensor is
@@ -1372,6 +1378,10 @@ static int smiapp_power_off(struct device *dev)
SMIAPP_REG_U8_SOFTWARE_RESET,
SMIAPP_SOFTWARE_RESET);
+ sensor->active = false;
+
+ mutex_unlock(&sensor->mutex);
+
gpiod_set_value(sensor->xshutdown, 0);
clk_disable_unprepare(sensor->ext_clk);
usleep_range(5000, 5000);
@@ -1381,29 +1391,6 @@ static int smiapp_power_off(struct device *dev)
return 0;
}
-static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
-{
- int rval;
-
- if (!on) {
- pm_runtime_mark_last_busy(subdev->dev);
- pm_runtime_put_autosuspend(subdev->dev);
-
- return 0;
- }
-
- rval = pm_runtime_get_sync(subdev->dev);
- if (rval >= 0)
- return 0;
-
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(subdev->dev);
-
- pm_runtime_put(subdev->dev);
-
- return rval;
-}
-
/* -----------------------------------------------------------------------------
* Video stream management
*/
@@ -1560,19 +1547,31 @@ out:
static int smiapp_set_stream(struct v4l2_subdev *subdev, int enable)
{
struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
int rval;
if (sensor->streaming == enable)
return 0;
if (enable) {
+ rval = pm_runtime_get_sync(&client->dev);
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_put(&client->dev);
+ return rval;
+ }
+
sensor->streaming = true;
+
rval = smiapp_start_streaming(sensor);
if (rval < 0)
sensor->streaming = false;
} else {
rval = smiapp_stop_streaming(sensor);
sensor->streaming = false;
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
}
return rval;
@@ -2650,7 +2649,6 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct smiapp_subdev *ssd = to_smiapp_subdev(sd);
struct smiapp_sensor *sensor = ssd->sensor;
unsigned int i;
- int rval;
mutex_lock(&sensor->mutex);
@@ -2677,22 +2675,6 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_unlock(&sensor->mutex);
- rval = pm_runtime_get_sync(sd->dev);
- if (rval >= 0)
- return 0;
-
- if (rval != -EBUSY && rval != -EAGAIN)
- pm_runtime_set_active(sd->dev);
- pm_runtime_put(sd->dev);
-
- return rval;
-}
-
-static int smiapp_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- pm_runtime_mark_last_busy(sd->dev);
- pm_runtime_put_autosuspend(sd->dev);
-
return 0;
}
@@ -2700,10 +2682,6 @@ static const struct v4l2_subdev_video_ops smiapp_video_ops = {
.s_stream = smiapp_set_stream,
};
-static const struct v4l2_subdev_core_ops smiapp_core_ops = {
- .s_power = smiapp_set_power,
-};
-
static const struct v4l2_subdev_pad_ops smiapp_pad_ops = {
.enum_mbus_code = smiapp_enum_mbus_code,
.get_fmt = smiapp_get_format,
@@ -2718,7 +2696,6 @@ static const struct v4l2_subdev_sensor_ops smiapp_sensor_ops = {
};
static const struct v4l2_subdev_ops smiapp_ops = {
- .core = &smiapp_core_ops,
.video = &smiapp_video_ops,
.pad = &smiapp_pad_ops,
.sensor = &smiapp_sensor_ops,
@@ -2732,12 +2709,10 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_src_ops = {
.registered = smiapp_registered,
.unregistered = smiapp_unregistered,
.open = smiapp_open,
- .close = smiapp_close,
};
static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
.open = smiapp_open,
- .close = smiapp_close,
};
/* -----------------------------------------------------------------------------
@@ -2829,12 +2804,10 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
/* NVM size is not mandatory */
fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
- rval = fwnode_property_read_u32(fwnode, "clock-frequency",
+ rval = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&hwcfg->ext_clk);
- if (rval) {
- dev_warn(dev, "can't get clock-frequency\n");
- goto out_err;
- }
+ if (rval)
+ dev_info(dev, "can't get clock-frequency\n");
dev_dbg(dev, "nvm %d, clk %d, mode %d\n",
hwcfg->nvm_size, hwcfg->ext_clk, hwcfg->csi_signalling_mode);
@@ -2894,18 +2867,46 @@ static int smiapp_probe(struct i2c_client *client,
}
sensor->ext_clk = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(sensor->ext_clk)) {
+ if (PTR_ERR(sensor->ext_clk) == -ENOENT) {
+ dev_info(&client->dev, "no clock defined, continuing...\n");
+ sensor->ext_clk = NULL;
+ } else if (IS_ERR(sensor->ext_clk)) {
dev_err(&client->dev, "could not get clock (%ld)\n",
PTR_ERR(sensor->ext_clk));
return -EPROBE_DEFER;
}
- rval = clk_set_rate(sensor->ext_clk, sensor->hwcfg->ext_clk);
- if (rval < 0) {
- dev_err(&client->dev,
- "unable to set clock freq to %u\n",
+ if (sensor->ext_clk) {
+ if (sensor->hwcfg->ext_clk) {
+ unsigned long rate;
+
+ rval = clk_set_rate(sensor->ext_clk,
+ sensor->hwcfg->ext_clk);
+ if (rval < 0) {
+ dev_err(&client->dev,
+ "unable to set clock freq to %u\n",
+ sensor->hwcfg->ext_clk);
+ return rval;
+ }
+
+ rate = clk_get_rate(sensor->ext_clk);
+ if (rate != sensor->hwcfg->ext_clk) {
+ dev_err(&client->dev,
+ "can't set clock freq, asked for %u but got %lu\n",
+ sensor->hwcfg->ext_clk, rate);
+ return rval;
+ }
+ } else {
+ sensor->hwcfg->ext_clk = clk_get_rate(sensor->ext_clk);
+ dev_dbg(&client->dev, "obtained clock freq %u\n",
+ sensor->hwcfg->ext_clk);
+ }
+ } else if (sensor->hwcfg->ext_clk) {
+ dev_dbg(&client->dev, "assuming clock freq %u\n",
sensor->hwcfg->ext_clk);
- return rval;
+ } else {
+ dev_err(&client->dev, "unable to obtain clock freq\n");
+ return -EINVAL;
}
sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
@@ -3092,7 +3093,7 @@ static int smiapp_probe(struct i2c_client *client,
if (rval < 0)
goto out_media_entity_cleanup;
- rval = v4l2_async_register_subdev(&sensor->src->sd);
+ rval = v4l2_async_register_subdev_sensor_common(&sensor->src->sd);
if (rval < 0)
goto out_media_entity_cleanup;
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index d6779e35d36f..145653dc81da 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -231,6 +231,9 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
len != SMIAPP_REG_32BIT) || flags)
return -EINVAL;
+ if (!sensor->active)
+ return 0;
+
msg.addr = client->addr;
msg.flags = 0; /* Write */
msg.len = 2 + len;
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index f74d695018b9..e6a5ab402d7f 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -206,6 +206,7 @@ struct smiapp_sensor {
u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
u8 frame_skip;
+ bool active; /* is the sensor powered on? */
u16 embedded_start; /* embedded data start line */
u16 embedded_end;
u16 image_start; /* image data start line */
diff --git a/drivers/media/i2c/soc_camera/Makefile b/drivers/media/i2c/soc_camera/Makefile
index 78532a7fb8e2..faa2df8901d2 100644
--- a/drivers/media/i2c/soc_camera/Makefile
+++ b/drivers/media/i2c/soc_camera/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
diff --git a/drivers/media/i2c/soc_camera/ov9640.c b/drivers/media/i2c/soc_camera/ov9640.c
index 0146d1f7aacb..c63948989688 100644
--- a/drivers/media/i2c/soc_camera/ov9640.c
+++ b/drivers/media/i2c/soc_camera/ov9640.c
@@ -335,8 +335,8 @@ static void ov9640_res_roundup(u32 *width, u32 *height)
{
int i;
enum { QQCIF, QQVGA, QCIF, QVGA, CIF, VGA, SXGA };
- int res_x[] = { 88, 160, 176, 320, 352, 640, 1280 };
- int res_y[] = { 72, 120, 144, 240, 288, 480, 960 };
+ static const int res_x[] = { 88, 160, 176, 320, 352, 640, 1280 };
+ static const int res_y[] = { 72, 120, 144, 240, 288, 480, 960 };
for (i = 0; i < ARRAY_SIZE(res_x); i++) {
if (res_x[i] >= *width && res_y[i] >= *height) {
@@ -675,12 +675,9 @@ static int ov9640_probe(struct i2c_client *client,
return -EINVAL;
}
- priv = devm_kzalloc(&client->dev, sizeof(struct ov9640_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev,
- "Failed to allocate memory for private data!\n");
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&priv->subdev, client, &ov9640_subdev_ops);
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index cc07b7ae5407..755de2289c39 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -935,11 +935,9 @@ static int ov9740_probe(struct i2c_client *client,
return -EINVAL;
}
- priv = devm_kzalloc(&client->dev, sizeof(struct ov9740_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev, "Failed to allocate private data!\n");
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&priv->subdev, client, &ov9740_subdev_ops);
v4l2_ctrl_handler_init(&priv->hdl, 13);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index e6f5c363ccab..2b8181469b93 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -39,6 +39,7 @@
#include <linux/workqueue.h>
#include <linux/v4l2-dv-timings.h>
#include <linux/hdmi.h>
+#include <media/cec.h>
#include <media/v4l2-dv-timings.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
@@ -63,6 +64,7 @@ MODULE_LICENSE("GPL");
#define I2C_MAX_XFER_SIZE (EDID_BLOCK_SIZE + 2)
+#define POLL_INTERVAL_CEC_MS 10
#define POLL_INTERVAL_MS 1000
static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
@@ -106,6 +108,8 @@ struct tc358743_state {
u8 csi_lanes_in_use;
struct gpio_desc *reset_gpio;
+
+ struct cec_adapter *cec_adap;
};
static void tc358743_enable_interrupts(struct v4l2_subdev *sd,
@@ -595,6 +599,7 @@ static void tc358743_set_ref_clk(struct v4l2_subdev *sd)
struct tc358743_platform_data *pdata = &state->pdata;
u32 sys_freq;
u32 lockdet_ref;
+ u32 cec_freq;
u16 fh_min;
u16 fh_max;
@@ -626,6 +631,15 @@ static void tc358743_set_ref_clk(struct v4l2_subdev *sd)
i2c_wr8_and_or(sd, NCO_F0_MOD, ~MASK_NCO_F0_MOD,
(pdata->refclk_hz == 27000000) ?
MASK_NCO_F0_MOD_27MHZ : 0x0);
+
+ /*
+ * Trial and error suggests that the default register value
+ * of 656 is for a 42 MHz reference clock. Use that to derive
+ * a new value based on the actual reference clock.
+ */
+ cec_freq = (656 * sys_freq) / 4200;
+ i2c_wr16(sd, CECHCLK, cec_freq);
+ i2c_wr16(sd, CECLCLK, cec_freq);
}
static void tc358743_set_csi_color_space(struct v4l2_subdev *sd)
@@ -814,11 +828,17 @@ static void tc358743_initial_setup(struct v4l2_subdev *sd)
struct tc358743_state *state = to_state(sd);
struct tc358743_platform_data *pdata = &state->pdata;
- /* CEC and IR are not supported by this driver */
- i2c_wr16_and_or(sd, SYSCTL, ~(MASK_CECRST | MASK_IRRST),
- (MASK_CECRST | MASK_IRRST));
+ /*
+ * IR is not supported by this driver.
+ * CEC is only enabled if needed.
+ */
+ i2c_wr16_and_or(sd, SYSCTL, ~(MASK_IRRST | MASK_CECRST),
+ (MASK_IRRST | MASK_CECRST));
tc358743_reset(sd, MASK_CTXRST | MASK_HDMIRST);
+#ifdef CONFIG_VIDEO_TC358743_CEC
+ tc358743_reset(sd, MASK_CECRST);
+#endif
tc358743_sleep_mode(sd, false);
i2c_wr16(sd, FIFOCTL, pdata->fifo_level);
@@ -842,6 +862,133 @@ static void tc358743_initial_setup(struct v4l2_subdev *sd)
i2c_wr8(sd, VOUT_SET3, MASK_VOUT_EXTCNT);
}
+/* --------------- CEC --------------- */
+
+#ifdef CONFIG_VIDEO_TC358743_CEC
+static int tc358743_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+
+ i2c_wr32(sd, CECIMSK, enable ? MASK_CECTIM | MASK_CECRIM : 0);
+ i2c_wr32(sd, CECICLR, MASK_CECTICLR | MASK_CECRICLR);
+ i2c_wr32(sd, CECEN, enable);
+ if (enable)
+ i2c_wr32(sd, CECREN, MASK_CECREN);
+ return 0;
+}
+
+static int tc358743_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ u32 reg;
+
+ reg = i2c_rd32(sd, CECRCTL1);
+ if (enable)
+ reg |= MASK_CECOTH;
+ else
+ reg &= ~MASK_CECOTH;
+ i2c_wr32(sd, CECRCTL1, reg);
+ return 0;
+}
+
+static int tc358743_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int la = 0;
+
+ if (log_addr != CEC_LOG_ADDR_INVALID) {
+ la = i2c_rd32(sd, CECADD);
+ la |= 1 << log_addr;
+ }
+ i2c_wr32(sd, CECADD, la);
+ return 0;
+}
+
+static int tc358743_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct tc358743_state *state = adap->priv;
+ struct v4l2_subdev *sd = &state->sd;
+ unsigned int i;
+
+ i2c_wr32(sd, CECTCTL,
+ (cec_msg_is_broadcast(msg) ? MASK_CECBRD : 0) |
+ (signal_free_time - 1));
+ for (i = 0; i < msg->len; i++)
+ i2c_wr32(sd, CECTBUF1 + i * 4,
+ msg->msg[i] | ((i == msg->len - 1) ? MASK_CECTEOM : 0));
+ i2c_wr32(sd, CECTEN, MASK_CECTEN);
+ return 0;
+}
+
+static const struct cec_adap_ops tc358743_cec_adap_ops = {
+ .adap_enable = tc358743_cec_adap_enable,
+ .adap_log_addr = tc358743_cec_adap_log_addr,
+ .adap_transmit = tc358743_cec_adap_transmit,
+ .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
+};
+
+static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
+ bool *handled)
+{
+ struct tc358743_state *state = to_state(sd);
+ unsigned int cec_rxint, cec_txint;
+ unsigned int clr = 0;
+
+ cec_rxint = i2c_rd32(sd, CECRSTAT);
+ cec_txint = i2c_rd32(sd, CECTSTAT);
+
+ if (intstatus & MASK_CEC_RINT)
+ clr |= MASK_CECRICLR;
+ if (intstatus & MASK_CEC_TINT)
+ clr |= MASK_CECTICLR;
+ i2c_wr32(sd, CECICLR, clr);
+
+ if ((intstatus & MASK_CEC_TINT) && cec_txint) {
+ if (cec_txint & MASK_CECTIEND)
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_OK);
+ else if (cec_txint & MASK_CECTIAL)
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_ARB_LOST);
+ else if (cec_txint & MASK_CECTIACK)
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_NACK);
+ else if (cec_txint & MASK_CECTIUR) {
+ /*
+ * Not sure when this bit is set. Treat
+ * it as an error for now.
+ */
+ cec_transmit_attempt_done(state->cec_adap,
+ CEC_TX_STATUS_ERROR);
+ }
+ *handled = true;
+ }
+ if ((intstatus & MASK_CEC_RINT) &&
+ (cec_rxint & MASK_CECRIEND)) {
+ struct cec_msg msg = {};
+ unsigned int i;
+ unsigned int v;
+
+ v = i2c_rd32(sd, CECRCTR);
+ msg.len = v & 0x1f;
+ for (i = 0; i < msg.len; i++) {
+ v = i2c_rd32(sd, CECRBUF1 + i * 4);
+ msg.msg[i] = v & 0xff;
+ }
+ cec_received_msg(state->cec_adap, &msg);
+ *handled = true;
+ }
+ i2c_wr16(sd, INTSTATUS,
+ intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+}
+
+#endif
+
/* --------------- IRQ --------------- */
static void tc358743_format_change(struct v4l2_subdev *sd)
@@ -1296,6 +1443,15 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
intstatus &= ~MASK_HDMI_INT;
}
+#ifdef CONFIG_VIDEO_TC358743_CEC
+ if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
+ tc358743_cec_isr(sd, intstatus, handled);
+ i2c_wr16(sd, INTSTATUS,
+ intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
+ intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
+ }
+#endif
+
if (intstatus & MASK_CSI_INT) {
u32 csi_int = i2c_rd32(sd, CSI_INT);
@@ -1325,13 +1481,18 @@ static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
return handled ? IRQ_HANDLED : IRQ_NONE;
}
-static void tc358743_irq_poll_timer(unsigned long arg)
+static void tc358743_irq_poll_timer(struct timer_list *t)
{
- struct tc358743_state *state = (struct tc358743_state *)arg;
+ struct tc358743_state *state = from_timer(state, t, timer);
+ unsigned int msecs;
schedule_work(&state->work_i2c_poll);
-
- mod_timer(&state->timer, jiffies + msecs_to_jiffies(POLL_INTERVAL_MS));
+ /*
+ * If CEC is present, then we need to poll more frequently,
+ * otherwise we will miss CEC messages.
+ */
+ msecs = state->cec_adap ? POLL_INTERVAL_CEC_MS : POLL_INTERVAL_MS;
+ mod_timer(&state->timer, jiffies + msecs_to_jiffies(msecs));
}
static void tc358743_work_i2c_poll(struct work_struct *work)
@@ -1488,7 +1649,7 @@ static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
{
enable_stream(sd, enable);
if (!enable) {
- /* Put all lanes in PL-11 state (STOPSTATE) */
+ /* Put all lanes in LP-11 state (STOPSTATE) */
tc358743_set_csi(sd);
}
@@ -1621,6 +1782,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
{
struct tc358743_state *state = to_state(sd);
u16 edid_len = edid->blocks * EDID_BLOCK_SIZE;
+ u16 pa;
+ int err;
int i;
v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
@@ -1638,6 +1801,12 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
edid->blocks = EDID_NUM_BLOCKS_MAX;
return -E2BIG;
}
+ pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
+ err = cec_phys_addr_validate(pa, &pa, NULL);
+ if (err)
+ return err;
+
+ cec_phys_addr_invalidate(state->cec_adap);
tc358743_disable_edid(sd);
@@ -1654,6 +1823,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
state->edid_blocks_written = edid->blocks;
+ cec_s_phys_addr(state->cec_adap, pa, false);
+
if (tx_5v_power_present(sd))
tc358743_enable_edid(sd);
@@ -1770,6 +1941,11 @@ static int tc358743_probe_of(struct tc358743_state *state)
goto free_endpoint;
}
+ if (endpoint->bus.mipi_csi2.num_data_lanes > 4) {
+ dev_err(dev, "invalid number of lanes\n");
+ goto free_endpoint;
+ }
+
state->bus = endpoint->bus.mipi_csi2;
ret = clk_prepare_enable(refclk);
@@ -1867,6 +2043,7 @@ static int tc358743_probe(struct i2c_client *client,
struct tc358743_state *state;
struct tc358743_platform_data *pdata = client->dev.platform_data;
struct v4l2_subdev *sd;
+ u16 irq_mask = MASK_HDMI_MSK | MASK_CSI_MSK;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -1929,6 +2106,7 @@ static int tc358743_probe(struct i2c_client *client,
}
state->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err < 0)
goto err_hdl;
@@ -1945,6 +2123,17 @@ static int tc358743_probe(struct i2c_client *client,
INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
tc358743_delayed_work_enable_hotplug);
+#ifdef CONFIG_VIDEO_TC358743_CEC
+ state->cec_adap = cec_allocate_adapter(&tc358743_cec_adap_ops,
+ state, dev_name(&client->dev),
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL, CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(state->cec_adap)) {
+ err = PTR_ERR(state->cec_adap);
+ goto err_hdl;
+ }
+ irq_mask |= MASK_CEC_RMSK | MASK_CEC_TMSK;
+#endif
+
tc358743_initial_setup(sd);
tc358743_s_dv_timings(sd, &default_timing);
@@ -1964,15 +2153,22 @@ static int tc358743_probe(struct i2c_client *client,
} else {
INIT_WORK(&state->work_i2c_poll,
tc358743_work_i2c_poll);
- state->timer.data = (unsigned long)state;
- state->timer.function = tc358743_irq_poll_timer;
+ timer_setup(&state->timer, tc358743_irq_poll_timer, 0);
state->timer.expires = jiffies +
msecs_to_jiffies(POLL_INTERVAL_MS);
add_timer(&state->timer);
}
+ err = cec_register_adapter(state->cec_adap, &client->dev);
+ if (err < 0) {
+ pr_err("%s: failed to register the cec device\n", __func__);
+ cec_delete_adapter(state->cec_adap);
+ state->cec_adap = NULL;
+ goto err_work_queues;
+ }
+
tc358743_enable_interrupts(sd, tx_5v_power_present(sd));
- i2c_wr16(sd, INTMASK, ~(MASK_HDMI_MSK | MASK_CSI_MSK) & 0xffff);
+ i2c_wr16(sd, INTMASK, ~irq_mask);
err = v4l2_ctrl_handler_setup(sd->ctrl_handler);
if (err)
@@ -1984,6 +2180,7 @@ static int tc358743_probe(struct i2c_client *client,
return 0;
err_work_queues:
+ cec_unregister_adapter(state->cec_adap);
if (!state->i2c_client->irq)
flush_work(&state->work_i2c_poll);
cancel_delayed_work(&state->delayed_work_enable_hotplug);
@@ -2004,6 +2201,7 @@ static int tc358743_remove(struct i2c_client *client)
flush_work(&state->work_i2c_poll);
}
cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ cec_unregister_adapter(state->cec_adap);
v4l2_async_unregister_subdev(sd);
v4l2_device_unregister_subdev(sd);
mutex_destroy(&state->confctl_mutex);
diff --git a/drivers/media/i2c/tc358743_regs.h b/drivers/media/i2c/tc358743_regs.h
index 657ef50f215f..227b46471793 100644
--- a/drivers/media/i2c/tc358743_regs.h
+++ b/drivers/media/i2c/tc358743_regs.h
@@ -193,8 +193,98 @@
#define CSI_START 0x0518
#define MASK_STRT 0x00000001
-#define CECEN 0x0600
-#define MASK_CECEN 0x0001
+/* *** CEC (32 bit) *** */
+#define CECHCLK 0x0028 /* 16 bits */
+#define MASK_CECHCLK (0x7ff << 0)
+
+#define CECLCLK 0x002a /* 16 bits */
+#define MASK_CECLCLK (0x7ff << 0)
+
+#define CECEN 0x0600
+#define MASK_CECEN 0x0001
+
+#define CECADD 0x0604
+#define CECRST 0x0608
+#define MASK_CECRESET 0x0001
+
+#define CECREN 0x060c
+#define MASK_CECREN 0x0001
+
+#define CECRCTL1 0x0614
+#define MASK_CECACKDIS (1 << 24)
+#define MASK_CECHNC (3 << 20)
+#define MASK_CECLNC (7 << 16)
+#define MASK_CECMIN (7 << 12)
+#define MASK_CECMAX (7 << 8)
+#define MASK_CECDAT (7 << 4)
+#define MASK_CECTOUT (3 << 2)
+#define MASK_CECRIHLD (1 << 1)
+#define MASK_CECOTH (1 << 0)
+
+#define CECRCTL2 0x0618
+#define MASK_CECSWAV3 (7 << 12)
+#define MASK_CECSWAV2 (7 << 8)
+#define MASK_CECSWAV1 (7 << 4)
+#define MASK_CECSWAV0 (7 << 0)
+
+#define CECRCTL3 0x061c
+#define MASK_CECWAV3 (7 << 20)
+#define MASK_CECWAV2 (7 << 16)
+#define MASK_CECWAV1 (7 << 12)
+#define MASK_CECWAV0 (7 << 8)
+#define MASK_CECACKEI (1 << 4)
+#define MASK_CECMINEI (1 << 3)
+#define MASK_CECMAXEI (1 << 2)
+#define MASK_CECRSTEI (1 << 1)
+#define MASK_CECWAVEI (1 << 0)
+
+#define CECTEN 0x0620
+#define MASK_CECTBUSY (1 << 1)
+#define MASK_CECTEN (1 << 0)
+
+#define CECTCTL 0x0628
+#define MASK_CECSTRS (7 << 20)
+#define MASK_CECSPRD (7 << 16)
+#define MASK_CECDTRS (7 << 12)
+#define MASK_CECDPRD (15 << 8)
+#define MASK_CECBRD (1 << 4)
+#define MASK_CECFREE (15 << 0)
+
+#define CECRSTAT 0x062c
+#define MASK_CECRIWA (1 << 6)
+#define MASK_CECRIOR (1 << 5)
+#define MASK_CECRIACK (1 << 4)
+#define MASK_CECRIMIN (1 << 3)
+#define MASK_CECRIMAX (1 << 2)
+#define MASK_CECRISTA (1 << 1)
+#define MASK_CECRIEND (1 << 0)
+
+#define CECTSTAT 0x0630
+#define MASK_CECTIUR (1 << 4)
+#define MASK_CECTIACK (1 << 3)
+#define MASK_CECTIAL (1 << 2)
+#define MASK_CECTIEND (1 << 1)
+
+#define CECRBUF1 0x0634
+#define MASK_CECRACK (1 << 9)
+#define MASK_CECEOM (1 << 8)
+#define MASK_CECRBYTE (0xff << 0)
+
+#define CECTBUF1 0x0674
+#define MASK_CECTEOM (1 << 8)
+#define MASK_CECTBYTE (0xff << 0)
+
+#define CECRCTR 0x06b4
+#define MASK_CECRCTR (0x1f << 0)
+
+#define CECIMSK 0x06c0
+#define MASK_CECTIM (1 << 1)
+#define MASK_CECRIM (1 << 0)
+
+#define CECICLR 0x06cc
+#define MASK_CECTICLR (1 << 1)
+#define MASK_CECRICLR (1 << 0)
+
#define HDMI_INT0 0x8500
#define MASK_I_KEY 0x80
diff --git a/drivers/media/i2c/tea6415c.h b/drivers/media/i2c/tea6415c.h
index 3a47d697536e..f43228207445 100644
--- a/drivers/media/i2c/tea6415c.h
+++ b/drivers/media/i2c/tea6415c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INCLUDED_TEA6415C__
#define __INCLUDED_TEA6415C__
diff --git a/drivers/media/i2c/tea6420.h b/drivers/media/i2c/tea6420.h
index 4aa3edb3e193..07f9d72a86bb 100644
--- a/drivers/media/i2c/tea6420.h
+++ b/drivers/media/i2c/tea6420.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INCLUDED_TEA6420__
#define __INCLUDED_TEA6420__
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index ce86534450ac..16a1e08ce06c 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -300,9 +300,9 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
* if available, ...
*/
-static void chip_thread_wake(unsigned long data)
+static void chip_thread_wake(struct timer_list *t)
{
- struct CHIPSTATE *chip = (struct CHIPSTATE*)data;
+ struct CHIPSTATE *chip = from_timer(chip, t, wt);
wake_up_process(chip->thread);
}
@@ -1995,7 +1995,7 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
v4l2_ctrl_handler_setup(&chip->hdl);
chip->thread = NULL;
- init_timer(&chip->wt);
+ timer_setup(&chip->wt, chip_thread_wake, 0);
if (desc->flags & CHIP_NEED_CHECKMODE) {
if (!desc->getrxsubchans || !desc->setaudmode) {
/* This shouldn't be happen. Warn user, but keep working
@@ -2005,8 +2005,6 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
return 0;
}
/* start async thread */
- chip->wt.function = chip_thread_wake;
- chip->wt.data = (unsigned long)chip;
chip->thread = kthread_run(chip_thread, chip, "%s",
client->name);
if (IS_ERR(chip->thread)) {
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 2ace0410d277..f7c6d64e6031 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -214,12 +214,20 @@ void media_gobj_destroy(struct media_gobj *gobj)
gobj->mdev = NULL;
}
+/*
+ * TODO: Get rid of this.
+ */
+#define MEDIA_ENTITY_MAX_PADS 512
+
int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
struct media_pad *pads)
{
struct media_device *mdev = entity->graph_obj.mdev;
unsigned int i;
+ if (num_pads >= MEDIA_ENTITY_MAX_PADS)
+ return -E2BIG;
+
entity->num_pads = num_pads;
entity->pads = pads;
@@ -280,11 +288,6 @@ static struct media_entity *stack_pop(struct media_graph *graph)
#define link_top(en) ((en)->stack[(en)->top].link)
#define stack_top(en) ((en)->stack[(en)->top].entity)
-/*
- * TODO: Get rid of this.
- */
-#define MEDIA_ENTITY_MAX_PADS 512
-
/**
* media_graph_walk_init - Allocate resources for graph walk
* @graph: Media graph structure that will be used to walk the graph
diff --git a/drivers/media/mmc/Makefile b/drivers/media/mmc/Makefile
index 31e297a202fb..e847aa82ae41 100644
--- a/drivers/media/mmc/Makefile
+++ b/drivers/media/mmc/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += siano/
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index a7e8af0f64a7..1ab759e9b1bb 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel multimedia device drivers.
#
diff --git a/drivers/media/pci/b2c2/Kconfig b/drivers/media/pci/b2c2/Kconfig
index 58761a21caa0..7b818d445f39 100644
--- a/drivers/media/pci/b2c2/Kconfig
+++ b/drivers/media/pci/b2c2/Kconfig
@@ -11,5 +11,5 @@ config DVB_B2C2_FLEXCOP_PCI_DEBUG
depends on DVB_B2C2_FLEXCOP_PCI
select DVB_B2C2_FLEXCOP_DEBUG
help
- Say Y if you want to enable the module option to control debug messages
- of all B2C2 FlexCop drivers.
+ Say Y if you want to enable the module option to control debug messages
+ of all B2C2 FlexCop drivers.
diff --git a/drivers/media/pci/b2c2/Makefile b/drivers/media/pci/b2c2/Makefile
index b894320a5f21..35d6835ae43e 100644
--- a/drivers/media/pci/b2c2/Makefile
+++ b/drivers/media/pci/b2c2/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ifneq ($(CONFIG_DVB_B2C2_FLEXCOP_PCI),)
b2c2-flexcop-pci-objs += flexcop-dma.o
endif
diff --git a/drivers/media/pci/b2c2/flexcop-dma.c b/drivers/media/pci/b2c2/flexcop-dma.c
index 913dc97f8b49..f07610a1646d 100644
--- a/drivers/media/pci/b2c2/flexcop-dma.c
+++ b/drivers/media/pci/b2c2/flexcop-dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-dma.c - configuring and controlling the DMA of the FlexCop
diff --git a/drivers/media/pci/bt8xx/Makefile b/drivers/media/pci/bt8xx/Makefile
index 2d4c3dd88be1..009f1dc1521f 100644
--- a/drivers/media/pci/bt8xx/Makefile
+++ b/drivers/media/pci/bt8xx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
bttv-objs := bttv-driver.o bttv-cards.o bttv-if.o \
bttv-risc.o bttv-vbi.o bttv-i2c.o bttv-gpio.o \
bttv-input.o bttv-audio-hook.o btcx-risc.o
diff --git a/drivers/media/pci/bt8xx/btcx-risc.h b/drivers/media/pci/bt8xx/btcx-risc.h
index 1ed7a000160a..dc774a64cd1f 100644
--- a/drivers/media/pci/bt8xx/btcx-risc.h
+++ b/drivers/media/pci/bt8xx/btcx-risc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
struct btcx_riscmem {
unsigned int size;
__le32 *cpu;
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 227086a2e99c..b366a7e1d976 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -3652,9 +3652,9 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
wake_up(&wakeup->vb.done);
}
-static void bttv_irq_timeout(unsigned long data)
+static void bttv_irq_timeout(struct timer_list *t)
{
- struct bttv *btv = (struct bttv *)data;
+ struct bttv *btv = from_timer(btv, t, timeout);
struct bttv_buffer_set old,new;
struct bttv_buffer *ovbi;
struct bttv_buffer *item;
@@ -4043,7 +4043,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
INIT_LIST_HEAD(&btv->capture);
INIT_LIST_HEAD(&btv->vcapture);
- setup_timer(&btv->timeout, bttv_irq_timeout, (unsigned long)btv);
+ timer_setup(&btv->timeout, bttv_irq_timeout, 0);
btv->i2c_rc = -1;
btv->tuner_type = UNSET;
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 73d655d073d6..ac7674700685 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -133,10 +133,10 @@ void bttv_input_irq(struct bttv *btv)
ir_handle_key(btv);
}
-static void bttv_input_timer(unsigned long data)
+static void bttv_input_timer(struct timer_list *t)
{
- struct bttv *btv = (struct bttv*)data;
- struct bttv_ir *ir = btv->remote;
+ struct bttv_ir *ir = from_timer(ir, t, timer);
+ struct bttv *btv = ir->btv;
if (btv->c.type == BTTV_BOARD_ENLTV_FM_2)
ir_enltv_handle_key(btv);
@@ -189,9 +189,9 @@ static u32 bttv_rc5_decode(unsigned int code)
return rc5;
}
-static void bttv_rc5_timer_end(unsigned long data)
+static void bttv_rc5_timer_end(struct timer_list *t)
{
- struct bttv_ir *ir = (struct bttv_ir *)data;
+ struct bttv_ir *ir = from_timer(ir, t, timer);
ktime_t tv;
u32 gap, rc5, scancode;
u8 toggle, command, system;
@@ -296,15 +296,15 @@ static int bttv_rc5_irq(struct bttv *btv)
/* ---------------------------------------------------------------------- */
-static void bttv_ir_start(struct bttv *btv, struct bttv_ir *ir)
+static void bttv_ir_start(struct bttv_ir *ir)
{
if (ir->polling) {
- setup_timer(&ir->timer, bttv_input_timer, (unsigned long)btv);
+ timer_setup(&ir->timer, bttv_input_timer, 0);
ir->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&ir->timer);
} else if (ir->rc5_gpio) {
/* set timer_end for code completion */
- setup_timer(&ir->timer, bttv_rc5_timer_end, (unsigned long)ir);
+ timer_setup(&ir->timer, bttv_rc5_timer_end, 0);
ir->shift_by = 1;
ir->rc5_remote_gap = ir_rc5_remote_gap;
}
@@ -531,6 +531,7 @@ int bttv_input_init(struct bttv *btv)
/* init input device */
ir->dev = rc;
+ ir->btv = btv;
snprintf(ir->name, sizeof(ir->name), "bttv IR (card=%d)",
btv->c.type);
@@ -553,7 +554,7 @@ int bttv_input_init(struct bttv *btv)
rc->driver_name = MODULE_NAME;
btv->remote = ir;
- bttv_ir_start(btv, ir);
+ bttv_ir_start(ir);
/* all done */
err = rc_register_device(rc);
diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
index e77129c92fa0..67c6583f1d79 100644
--- a/drivers/media/pci/bt8xx/bttv-vbi.c
+++ b/drivers/media/pci/bt8xx/bttv-vbi.c
@@ -233,7 +233,7 @@ static void vbi_buffer_release(struct videobuf_queue *q, struct videobuf_buffer
bttv_dma_free(q,fh->btv,buf);
}
-struct videobuf_queue_ops bttv_vbi_qops = {
+const struct videobuf_queue_ops bttv_vbi_qops = {
.buf_setup = vbi_buffer_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/pci/bt8xx/bttv.h b/drivers/media/pci/bt8xx/bttv.h
index 91301c3cad1e..eb67e362acf7 100644
--- a/drivers/media/pci/bt8xx/bttv.h
+++ b/drivers/media/pci/bt8xx/bttv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
*
* bttv - Bt848 frame grabber driver
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index 9efc4559fa8e..cb1b5e611130 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -122,6 +122,7 @@ struct bttv_format {
struct bttv_ir {
struct rc_dev *dev;
+ struct bttv *btv;
struct timer_list timer;
char name[32];
@@ -281,7 +282,7 @@ int bttv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
int bttv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
int bttv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
-extern struct videobuf_queue_ops bttv_vbi_qops;
+extern const struct videobuf_queue_ops bttv_vbi_qops;
/* ---------------------------------------------------------- */
/* bttv-gpio.c */
diff --git a/drivers/media/pci/bt8xx/dst_priv.h b/drivers/media/pci/bt8xx/dst_priv.h
index 3974a4c6ebe7..a4319d41d141 100644
--- a/drivers/media/pci/bt8xx/dst_priv.h
+++ b/drivers/media/pci/bt8xx/dst_priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* dst-bt878.h: part of the DST driver for the TwinHan DST Frontend
*
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 98b6cb9505d1..3f16cf3f6d74 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -738,9 +738,6 @@ static int cobalt_probe(struct pci_dev *pci_dev,
goto err_i2c;
}
- retval = v4l2_device_register_subdev_nodes(&cobalt->v4l2_dev);
- if (retval)
- goto err_i2c;
retval = cobalt_nodes_register(cobalt);
if (retval) {
cobalt_err("Error %d registering device nodes\n", retval);
@@ -767,8 +764,6 @@ err_pci:
err_wq:
destroy_workqueue(cobalt->irq_work_queues);
err:
- if (retval == 0)
- retval = -ENODEV;
cobalt_err("error %d on initialization\n", retval);
v4l2_device_unregister(&cobalt->v4l2_dev);
diff --git a/drivers/media/pci/cx18/Makefile b/drivers/media/pci/cx18/Makefile
index d3ff1545c2c5..98914a40f6ac 100644
--- a/drivers/media/pci/cx18/Makefile
+++ b/drivers/media/pci/cx18/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
cx18-objs := cx18-driver.o cx18-cards.o cx18-i2c.o cx18-firmware.o cx18-gpio.o \
cx18-queue.o cx18-streams.o cx18-fileops.o cx18-ioctl.o cx18-controls.o \
cx18-mailbox.o cx18-vbi.o cx18-audio.o cx18-video.o cx18-irq.o \
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 8654710464cc..8f314ca320c7 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -255,7 +255,7 @@ static void request_module_async(struct work_struct *work)
request_module("cx18-alsa");
/* Initialize cx18-alsa for this instance of the cx18 device */
- if (cx18_ext_init != NULL)
+ if (cx18_ext_init)
cx18_ext_init(dev);
}
@@ -291,11 +291,11 @@ int cx18_msleep_timeout(unsigned int msecs, int intr)
/* Release ioremapped memory */
static void cx18_iounmap(struct cx18 *cx)
{
- if (cx == NULL)
+ if (!cx)
return;
/* Release io memory */
- if (cx->enc_mem != NULL) {
+ if (cx->enc_mem) {
CX18_DEBUG_INFO("releasing enc_mem\n");
iounmap(cx->enc_mem);
cx->enc_mem = NULL;
@@ -649,15 +649,15 @@ static void cx18_process_options(struct cx18 *cx)
CX18_INFO("User specified %s card\n", cx->card->name);
else if (cx->options.cardtype != 0)
CX18_ERR("Unknown user specified type, trying to autodetect card\n");
- if (cx->card == NULL) {
+ if (!cx->card) {
if (cx->pci_dev->subsystem_vendor == CX18_PCI_ID_HAUPPAUGE) {
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
CX18_INFO("Autodetected Hauppauge card\n");
}
}
- if (cx->card == NULL) {
+ if (!cx->card) {
for (i = 0; (cx->card = cx18_get_card(i)); i++) {
- if (cx->card->pci_list == NULL)
+ if (!cx->card->pci_list)
continue;
for (j = 0; cx->card->pci_list[j].device; j++) {
if (cx->pci_dev->device !=
@@ -676,7 +676,7 @@ static void cx18_process_options(struct cx18 *cx)
}
done:
- if (cx->card == NULL) {
+ if (!cx->card) {
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
CX18_ERR("Unknown card: vendor/device: [%04x:%04x]\n",
cx->pci_dev->vendor, cx->pci_dev->device);
@@ -698,7 +698,7 @@ static int cx18_create_in_workq(struct cx18 *cx)
snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
cx->v4l2_dev.name);
cx->in_work_queue = alloc_ordered_workqueue("%s", 0, cx->in_workq_name);
- if (cx->in_work_queue == NULL) {
+ if (!cx->in_work_queue) {
CX18_ERR("Unable to create incoming mailbox handler thread\n");
return -ENOMEM;
}
@@ -909,12 +909,10 @@ static int cx18_probe(struct pci_dev *pci_dev,
return -ENOMEM;
}
- cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
- if (cx == NULL) {
- printk(KERN_ERR "cx18: cannot manage card %d, out of memory\n",
- i);
+ cx = kzalloc(sizeof(*cx), GFP_ATOMIC);
+ if (!cx)
return -ENOMEM;
- }
+
cx->pci_dev = pci_dev;
cx->instance = i;
@@ -1256,7 +1254,7 @@ static void cx18_cancel_out_work_orders(struct cx18 *cx)
{
int i;
for (i = 0; i < CX18_MAX_STREAMS; i++)
- if (&cx->streams[i].video_dev != NULL)
+ if (&cx->streams[i].video_dev)
cancel_work_sync(&cx->streams[i].out_work_order);
}
@@ -1301,7 +1299,7 @@ static void cx18_remove(struct pci_dev *pci_dev)
pci_disable_device(cx->pci_dev);
- if (cx->vbi.sliced_mpeg_data[0] != NULL)
+ if (cx->vbi.sliced_mpeg_data[0])
for (i = 0; i < CX18_VBI_FRAMES; i++)
kfree(cx->vbi.sliced_mpeg_data[i]);
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 98467b2089fa..4f9c2395941b 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -684,9 +684,9 @@ int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
}
-void cx18_vb_timeout(unsigned long data)
+void cx18_vb_timeout(struct timer_list *t)
{
- struct cx18_stream *s = (struct cx18_stream *)data;
+ struct cx18_stream *s = from_timer(s, t, vb_timeout);
struct cx18_videobuf_buffer *buf;
unsigned long flags;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 58b00b433708..37ef34e866cb 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -29,7 +29,7 @@ void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
void cx18_mute(struct cx18 *cx);
void cx18_unmute(struct cx18 *cx);
int cx18_v4l2_mmap(struct file *file, struct vm_area_struct *vma);
-void cx18_vb_timeout(unsigned long data);
+void cx18_vb_timeout(struct timer_list *t);
/* Shared with cx18-alsa module */
int cx18_claim_stream(struct cx18_open_id *id, int type);
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 8385411af641..f35f78d66985 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -282,7 +282,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
INIT_WORK(&s->out_work_order, cx18_out_work_handler);
INIT_LIST_HEAD(&s->vb_capture);
- setup_timer(&s->vb_timeout, cx18_vb_timeout, (unsigned long)s);
+ timer_setup(&s->vb_timeout, cx18_vb_timeout, 0);
spin_lock_init(&s->vb_lock);
if (type == CX18_ENC_STREAM_TYPE_YUV) {
spin_lock_init(&s->vbuf_q_lock);
diff --git a/drivers/media/pci/cx23885/Makefile b/drivers/media/pci/cx23885/Makefile
index a2cbdcf15a8c..b8bf7806124b 100644
--- a/drivers/media/pci/cx23885/Makefile
+++ b/drivers/media/pci/cx23885/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
cx23885-objs := cx23885-cards.o cx23885-video.o cx23885-vbi.o \
cx23885-core.o cx23885-i2c.o cx23885-dvb.o cx23885-417.o \
cx23885-ioctl.o cx23885-ir.o cx23885-av.o cx23885-input.o \
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 78a8836d03e4..28eab9c518c5 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1323,7 +1323,7 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
static void tbs_card_init(struct cx23885_dev *dev)
{
int i;
- const u8 buf[] = {
+ static const u8 buf[] = {
0xe0, 0x06, 0x66, 0x33, 0x65,
0x01, 0x17, 0x06, 0xde};
diff --git a/drivers/media/pci/cx23885/cx23885-f300.h b/drivers/media/pci/cx23885/cx23885-f300.h
index be14d7de7cd8..34aef3610aa9 100644
--- a/drivers/media/pci/cx23885/cx23885-f300.h
+++ b/drivers/media/pci/cx23885/cx23885-f300.h
@@ -1,2 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern int f300_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage);
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 0f21467ae88e..ef863492c0ac 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -270,7 +270,7 @@ static const struct i2c_adapter cx23885_i2c_adap_template = {
.algo = &cx23885_i2c_algo_template,
};
-static struct i2c_client cx23885_i2c_client_template = {
+static const struct i2c_client cx23885_i2c_client_template = {
.name = "cx23885 internal",
};
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index 369e545cac04..70f9f13bded3 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -254,7 +254,7 @@ static void cx23885_stop_streaming(struct vb2_queue *q)
}
-struct vb2_ops cx23885_vbi_qops = {
+const struct vb2_ops cx23885_vbi_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index cb714ab60d69..6aab713e0476 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -591,7 +591,7 @@ int cx23885_set_tvnorm(struct cx23885_dev *dev, v4l2_std_id norm);
extern int cx23885_vbi_fmt(struct file *file, void *priv,
struct v4l2_format *f);
extern void cx23885_vbi_timeout(unsigned long data);
-extern struct vb2_ops cx23885_vbi_qops;
+extern const struct vb2_ops cx23885_vbi_qops;
extern int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status);
/* cx23885-i2c.c */
diff --git a/drivers/media/pci/cx25821/Makefile b/drivers/media/pci/cx25821/Makefile
index c8f8598a2b86..d14d65b1b042 100644
--- a/drivers/media/pci/cx25821/Makefile
+++ b/drivers/media/pci/cx25821/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
cx25821-y := cx25821-core.o cx25821-cards.o cx25821-i2c.o \
cx25821-gpio.o cx25821-medusa-video.o \
cx25821-video.o
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index 000049d3c71b..31479a41f359 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -291,7 +291,7 @@ static const struct i2c_adapter cx25821_i2c_adap_template = {
.algo = &cx25821_i2c_algo_template,
};
-static struct i2c_client cx25821_i2c_client_template = {
+static const struct i2c_client cx25821_i2c_client_template = {
.name = "cx25821 internal",
};
diff --git a/drivers/media/pci/cx88/Makefile b/drivers/media/pci/cx88/Makefile
index d3679c3ee248..86646eee4e6b 100644
--- a/drivers/media/pci/cx88/Makefile
+++ b/drivers/media/pci/cx88/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
cx88xx-objs := cx88-cards.o cx88-core.o cx88-i2c.o cx88-tvaudio.o \
cx88-dsp.o cx88-input.o
cx8800-objs := cx88-video.o cx88-vbi.o
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index e02449bf2041..4e9953e61a12 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -593,11 +593,11 @@ static int get_key_pvr2000(struct IR_i2c *ir, enum rc_proto *protocol,
void cx88_i2c_init_ir(struct cx88_core *core)
{
struct i2c_board_info info;
- const unsigned short default_addr_list[] = {
+ static const unsigned short default_addr_list[] = {
0x18, 0x6b, 0x71,
I2C_CLIENT_END
};
- const unsigned short pvr2000_addr_list[] = {
+ static const unsigned short pvr2000_addr_list[] = {
0x18, 0x1a,
I2C_CLIENT_END
};
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index 2d0ef19e6d65..c637679b01b2 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
*/
diff --git a/drivers/media/pci/ddbridge/Makefile b/drivers/media/pci/ddbridge/Makefile
index 09703312a3f1..7453b65104ff 100644
--- a/drivers/media/pci/ddbridge/Makefile
+++ b/drivers/media/pci/ddbridge/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the ddbridge device driver
#
diff --git a/drivers/media/pci/ddbridge/ddbridge-io.h b/drivers/media/pci/ddbridge/ddbridge-io.h
index a4c6bbe09168..b3646c04f1a7 100644
--- a/drivers/media/pci/ddbridge/ddbridge-io.h
+++ b/drivers/media/pci/ddbridge/ddbridge-io.h
@@ -47,12 +47,12 @@ static inline void ddbwritel(struct ddb *dev, u32 val, u32 adr)
static inline void ddbcpyto(struct ddb *dev, u32 adr, void *src, long count)
{
- return memcpy_toio(dev->regs + adr, src, count);
+ memcpy_toio(dev->regs + adr, src, count);
}
static inline void ddbcpyfrom(struct ddb *dev, void *dst, u32 adr, long count)
{
- return memcpy_fromio(dst, dev->regs + adr, count);
+ memcpy_fromio(dst, dev->regs + adr, count);
}
static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr)
diff --git a/drivers/media/pci/ivtv/Makefile b/drivers/media/pci/ivtv/Makefile
index 0eaa88298b7e..48f8a23f9a0f 100644
--- a/drivers/media/pci/ivtv/Makefile
+++ b/drivers/media/pci/ivtv/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ivtv-objs := ivtv-routing.o ivtv-cards.o ivtv-controls.o \
ivtv-driver.o ivtv-fileops.o ivtv-firmware.o \
ivtv-gpio.o ivtv-i2c.o ivtv-ioctl.o ivtv-irq.o \
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 54dcac4b2229..6b2ffdc96961 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -770,8 +770,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
init_waitqueue_head(&itv->event_waitq);
init_waitqueue_head(&itv->vsync_waitq);
init_waitqueue_head(&itv->dma_waitq);
- setup_timer(&itv->dma_timer, ivtv_unfinished_dma,
- (unsigned long)itv);
+ timer_setup(&itv->dma_timer, ivtv_unfinished_dma, 0);
itv->cur_dma_stream = -1;
itv->cur_pio_stream = -1;
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index 5a35e366f4c0..893962ac85de 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -700,7 +700,7 @@ static const struct i2c_algo_bit_data ivtv_i2c_algo_template = {
.timeout = IVTV_ALGO_BIT_TIMEOUT * HZ, /* jiffies */
};
-static struct i2c_client ivtv_i2c_client_template = {
+static const struct i2c_client ivtv_i2c_client_template = {
.name = "ivtv internal",
};
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 6efe1f71262c..63b09bf73bf0 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -1074,9 +1074,9 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
return vsync_force ? IRQ_NONE : IRQ_HANDLED;
}
-void ivtv_unfinished_dma(unsigned long arg)
+void ivtv_unfinished_dma(struct timer_list *t)
{
- struct ivtv *itv = (struct ivtv *)arg;
+ struct ivtv *itv = from_timer(itv, t, dma_timer);
if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
return;
diff --git a/drivers/media/pci/ivtv/ivtv-irq.h b/drivers/media/pci/ivtv/ivtv-irq.h
index 1e84433737cc..bcab5f07d37f 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.h
+++ b/drivers/media/pci/ivtv/ivtv-irq.h
@@ -48,6 +48,6 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id);
void ivtv_irq_work_handler(struct kthread_work *work);
void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock);
-void ivtv_unfinished_dma(unsigned long arg);
+void ivtv_unfinished_dma(struct timer_list *t);
#endif
diff --git a/drivers/media/pci/mantis/Makefile b/drivers/media/pci/mantis/Makefile
index f715051e4453..a684dc2ec79e 100644
--- a/drivers/media/pci/mantis/Makefile
+++ b/drivers/media/pci/mantis/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
mantis_core-objs := mantis_ioc.o \
mantis_uart.o \
mantis_dma.o \
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index 11e987860b23..ed855e3df558 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -72,7 +72,7 @@ static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
- if (unlikely(mantis == NULL)) {
+ if (unlikely(!mantis)) {
dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
return IRQ_NONE;
}
@@ -161,11 +161,10 @@ static int hopper_pci_probe(struct pci_dev *pdev,
struct mantis_pci_drvdata *drvdata;
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
- int err = 0;
+ int err;
- mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
- if (mantis == NULL) {
- printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ mantis = kzalloc(sizeof(*mantis), GFP_KERNEL);
+ if (!mantis) {
err = -ENOMEM;
goto fail0;
}
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index adc980d33711..4ce8a90d69dc 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -171,13 +171,11 @@ static int mantis_pci_probe(struct pci_dev *pdev,
struct mantis_pci_drvdata *drvdata;
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
- int err = 0;
+ int err;
- mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
- if (mantis == NULL) {
- printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ mantis = kzalloc(sizeof(*mantis), GFP_KERNEL);
+ if (!mantis)
return -ENOMEM;
- }
drvdata = (void *)pci_id->driver_data;
mantis->num = devs;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 49e047e4a81e..23999a8cef37 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1626,35 +1626,31 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
meye.mchip_dev = pcidev;
meye.grab_temp = vmalloc(MCHIP_NB_PAGES_MJPEG * PAGE_SIZE);
- if (!meye.grab_temp) {
- v4l2_err(v4l2_dev, "grab buffer allocation failed\n");
+ if (!meye.grab_temp)
goto outvmalloc;
- }
spin_lock_init(&meye.grabq_lock);
if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS,
- GFP_KERNEL)) {
- v4l2_err(v4l2_dev, "fifo allocation failed\n");
+ GFP_KERNEL))
goto outkfifoalloc1;
- }
+
spin_lock_init(&meye.doneq_lock);
if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS,
- GFP_KERNEL)) {
- v4l2_err(v4l2_dev, "fifo allocation failed\n");
+ GFP_KERNEL))
goto outkfifoalloc2;
- }
meye.vdev = meye_template;
meye.vdev.v4l2_dev = &meye.v4l2_dev;
- ret = -EIO;
- if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
+ ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1);
+ if (ret) {
v4l2_err(v4l2_dev, "meye: unable to power on the camera\n");
v4l2_err(v4l2_dev, "meye: did you enable the camera in sonypi using the module options ?\n");
goto outsonypienable;
}
- if ((ret = pci_enable_device(meye.mchip_dev))) {
+ ret = pci_enable_device(meye.mchip_dev);
+ if (ret) {
v4l2_err(v4l2_dev, "meye: pci_enable_device failed\n");
goto outenabledev;
}
diff --git a/drivers/media/pci/netup_unidvb/Kconfig b/drivers/media/pci/netup_unidvb/Kconfig
index 0ad37714c7fd..b663154d0cc4 100644
--- a/drivers/media/pci/netup_unidvb/Kconfig
+++ b/drivers/media/pci/netup_unidvb/Kconfig
@@ -1,8 +1,8 @@
config DVB_NETUP_UNIDVB
tristate "NetUP Universal DVB card support"
depends on DVB_CORE && VIDEO_DEV && PCI && I2C && SPI_MASTER
- select VIDEOBUF2_DVB
- select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DVB
+ select VIDEOBUF2_VMALLOC
select DVB_HORUS3A if MEDIA_SUBDRV_AUTOSELECT
select DVB_ASCOT2E if MEDIA_SUBDRV_AUTOSELECT
select DVB_HELENE if MEDIA_SUBDRV_AUTOSELECT
@@ -10,8 +10,8 @@ config DVB_NETUP_UNIDVB
select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
---help---
Support for NetUP PCI express Universal DVB card.
- help
- Say Y when you want to support NetUP Dual Universal DVB card
- Card can receive two independent streams in following standards:
+
+ Say Y when you want to support NetUP Dual Universal DVB card.
+ Card can receive two independent streams in following standards:
DVB-S/S2, T/T2, C/C2
- Two CI slots available for CAM modules.
+ Two CI slots available for CAM modules.
diff --git a/drivers/media/pci/netup_unidvb/Makefile b/drivers/media/pci/netup_unidvb/Makefile
index ee6ae0501eae..07d3f1eb728b 100644
--- a/drivers/media/pci/netup_unidvb/Makefile
+++ b/drivers/media/pci/netup_unidvb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
netup-unidvb-objs += netup_unidvb_core.o
netup-unidvb-objs += netup_unidvb_i2c.o
netup-unidvb-objs += netup_unidvb_ci.o
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 60e6cd5b3a03..11829c0fa138 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -638,9 +638,9 @@ static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
spin_unlock_irqrestore(&dma->lock, flags);
}
-static void netup_unidvb_dma_timeout(unsigned long data)
+static void netup_unidvb_dma_timeout(struct timer_list *t)
{
- struct netup_dma *dma = (struct netup_dma *)data;
+ struct netup_dma *dma = from_timer(dma, t, timeout);
struct netup_unidvb_dev *ndev = dma->ndev;
dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
@@ -664,8 +664,7 @@ static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
spin_lock_init(&dma->lock);
INIT_WORK(&dma->work, netup_unidvb_dma_worker);
INIT_LIST_HEAD(&dma->free_buffers);
- setup_timer(&dma->timeout, netup_unidvb_dma_timeout,
- (unsigned long)dma);
+ timer_setup(&dma->timeout, netup_unidvb_dma_timeout, 0);
dma->ring_buffer_size = ndev->dma_size / 2;
dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
diff --git a/drivers/media/pci/ngene/Makefile b/drivers/media/pci/ngene/Makefile
index 5c0b5d6b9d69..dbdf284970f8 100644
--- a/drivers/media/pci/ngene/Makefile
+++ b/drivers/media/pci/ngene/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the nGene device driver
#
diff --git a/drivers/media/pci/pt3/Makefile b/drivers/media/pci/pt3/Makefile
index 396f146b1c18..aded8752ac2b 100644
--- a/drivers/media/pci/pt3/Makefile
+++ b/drivers/media/pci/pt3/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
earth-pt3-objs += pt3.o pt3_i2c.o pt3_dma.o
diff --git a/drivers/media/pci/saa7134/Makefile b/drivers/media/pci/saa7134/Makefile
index 09c43da67588..dbaadddf4320 100644
--- a/drivers/media/pci/saa7134/Makefile
+++ b/drivers/media/pci/saa7134/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
saa7134-y += saa7134-cards.o saa7134-core.o saa7134-i2c.o
saa7134-y += saa7134-ts.o saa7134-tvaudio.o saa7134-vbi.o
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 7976c5a12ca8..9e76de2411ae 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -338,9 +338,9 @@ void saa7134_buffer_next(struct saa7134_dev *dev,
}
}
-void saa7134_buffer_timeout(unsigned long data)
+void saa7134_buffer_timeout(struct timer_list *t)
{
- struct saa7134_dmaqueue *q = (struct saa7134_dmaqueue *)data;
+ struct saa7134_dmaqueue *q = from_timer(q, t, timeout);
struct saa7134_dev *dev = q->dev;
unsigned long flags;
@@ -378,7 +378,7 @@ void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q)
}
}
spin_unlock_irqrestore(&dev->slock, flags);
- saa7134_buffer_timeout((unsigned long)q); /* also calls del_timer(&q->timeout) */
+ saa7134_buffer_timeout(&q->timeout); /* also calls del_timer(&q->timeout) */
}
EXPORT_SYMBOL_GPL(saa7134_stop_streaming);
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 8f2ed632840f..cf1e526de56a 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -345,7 +345,7 @@ static const struct i2c_adapter saa7134_adap_template = {
.algo = &saa7134_algo,
};
-static struct i2c_client saa7134_client_template = {
+static const struct i2c_client saa7134_client_template = {
.name = "saa7134 internal",
};
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 9337e4615519..2d5abeddc079 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -447,10 +447,10 @@ void saa7134_input_irq(struct saa7134_dev *dev)
}
}
-static void saa7134_input_timer(unsigned long data)
+static void saa7134_input_timer(struct timer_list *t)
{
- struct saa7134_dev *dev = (struct saa7134_dev *)data;
- struct saa7134_card_ir *ir = dev->remote;
+ struct saa7134_card_ir *ir = from_timer(ir, t, timer);
+ struct saa7134_dev *dev = ir->dev->priv;
build_key(dev);
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
@@ -507,8 +507,7 @@ static int __saa7134_ir_start(void *priv)
ir->running = true;
if (ir->polling) {
- setup_timer(&ir->timer, saa7134_input_timer,
- (unsigned long)dev);
+ timer_setup(&ir->timer, saa7134_input_timer, 0);
ir->timer.expires = jiffies + HZ;
add_timer(&ir->timer);
}
diff --git a/drivers/media/pci/saa7134/saa7134-reg.h b/drivers/media/pci/saa7134/saa7134-reg.h
index b6ea6f4f9b6c..56b12641d733 100644
--- a/drivers/media/pci/saa7134/saa7134-reg.h
+++ b/drivers/media/pci/saa7134/saa7134-reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
*
* philips saa7134 registers
diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
index 7414878af9e0..2be703617e29 100644
--- a/drivers/media/pci/saa7134/saa7134-ts.c
+++ b/drivers/media/pci/saa7134/saa7134-ts.c
@@ -223,8 +223,7 @@ int saa7134_ts_init1(struct saa7134_dev *dev)
dev->ts.nr_packets = ts_nr_packets;
INIT_LIST_HEAD(&dev->ts_q.queue);
- setup_timer(&dev->ts_q.timeout, saa7134_buffer_timeout,
- (unsigned long)(&dev->ts_q));
+ timer_setup(&dev->ts_q.timeout, saa7134_buffer_timeout, 0);
dev->ts_q.dev = dev;
dev->ts_q.need_two = 1;
dev->ts_started = 0;
diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
index bcad9b2d9bb3..57bea543c39b 100644
--- a/drivers/media/pci/saa7134/saa7134-vbi.c
+++ b/drivers/media/pci/saa7134/saa7134-vbi.c
@@ -165,7 +165,7 @@ static int buffer_init(struct vb2_buffer *vb2)
return 0;
}
-struct vb2_ops saa7134_vbi_qops = {
+const struct vb2_ops saa7134_vbi_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
@@ -181,8 +181,7 @@ struct vb2_ops saa7134_vbi_qops = {
int saa7134_vbi_init1(struct saa7134_dev *dev)
{
INIT_LIST_HEAD(&dev->vbi_q.queue);
- setup_timer(&dev->vbi_q.timeout, saa7134_buffer_timeout,
- (unsigned long)(&dev->vbi_q));
+ timer_setup(&dev->vbi_q.timeout, saa7134_buffer_timeout, 0);
dev->vbi_q.dev = dev;
if (vbibufs < 2)
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 51d42bbf969e..82d2a24644e4 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2145,8 +2145,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
dev->automute = 0;
INIT_LIST_HEAD(&dev->video_q.queue);
- setup_timer(&dev->video_q.timeout, saa7134_buffer_timeout,
- (unsigned long)(&dev->video_q));
+ timer_setup(&dev->video_q.timeout, saa7134_buffer_timeout, 0);
dev->video_q.dev = dev;
dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
dev->width = 720;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 816b5282d671..39c36e6aefbe 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -773,7 +773,7 @@ int saa7134_buffer_queue(struct saa7134_dev *dev, struct saa7134_dmaqueue *q,
void saa7134_buffer_finish(struct saa7134_dev *dev, struct saa7134_dmaqueue *q,
unsigned int state);
void saa7134_buffer_next(struct saa7134_dev *dev, struct saa7134_dmaqueue *q);
-void saa7134_buffer_timeout(unsigned long data);
+void saa7134_buffer_timeout(struct timer_list *t);
void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q);
int saa7134_set_dmabits(struct saa7134_dev *dev);
@@ -870,7 +870,7 @@ int saa7134_ts_stop(struct saa7134_dev *dev);
/* ----------------------------------------------------------- */
/* saa7134-vbi.c */
-extern struct vb2_ops saa7134_vbi_qops;
+extern const struct vb2_ops saa7134_vbi_qops;
extern struct video_device saa7134_vbi_template;
int saa7134_vbi_init1(struct saa7134_dev *dev);
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index f708cab01fef..d31a2d4494d1 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -260,11 +260,10 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
DEB_EE("\n");
- hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
- if (NULL == hexium) {
- pr_err("not enough kernel memory in hexium_attach()\n");
+ hexium = kzalloc(sizeof(*hexium), GFP_KERNEL);
+ if (!hexium)
return -ENOMEM;
- }
+
dev->ext_priv = hexium;
/* enable i2c-port pins */
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index 01f01580c7ca..043318aa19e2 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -219,11 +219,9 @@ static int hexium_probe(struct saa7146_dev *dev)
return -EFAULT;
}
- hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
- if (NULL == hexium) {
- pr_err("hexium_probe: not enough kernel memory\n");
+ hexium = kzalloc(sizeof(*hexium), GFP_KERNEL);
+ if (!hexium)
return -ENOMEM;
- }
/* enable i2c-port pins */
saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
@@ -268,7 +266,9 @@ static int hexium_probe(struct saa7146_dev *dev)
/* check if this is an old hexium Orion card by looking at
a saa7110 at address 0x4e */
- if (0 == (err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ, 0x00, I2C_SMBUS_BYTE_DATA, &data))) {
+ err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ,
+ 0x00, I2C_SMBUS_BYTE_DATA, &data);
+ if (err == 0) {
pr_info("device is a Hexium HV-PCI6/Orion (old)\n");
/* we store the pointer in our private data field */
dev->ext_priv = hexium;
diff --git a/drivers/media/pci/saa7164/Makefile b/drivers/media/pci/saa7164/Makefile
index ba0e33a1ee24..3896bcdb99d2 100644
--- a/drivers/media/pci/saa7164/Makefile
+++ b/drivers/media/pci/saa7164/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
saa7164-objs := saa7164-cards.o saa7164-core.o saa7164-i2c.o saa7164-dvb.o \
saa7164-fw.o saa7164-bus.o saa7164-cmd.o saa7164-api.o \
saa7164-buffer.o saa7164-encoder.o saa7164-vbi.o
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index a0d2129c6ca9..c83b2e914dcb 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -98,11 +98,9 @@ struct saa7164_buffer *saa7164_buffer_alloc(struct saa7164_port *port,
goto ret;
}
- buf = kzalloc(sizeof(struct saa7164_buffer), GFP_KERNEL);
- if (!buf) {
- log_warn("%s() SAA_ERR_NO_RESOURCES\n", __func__);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
goto ret;
- }
buf->idx = -1;
buf->port = port;
@@ -283,7 +281,7 @@ struct saa7164_user_buffer *saa7164_buffer_alloc_user(struct saa7164_dev *dev,
{
struct saa7164_user_buffer *buf;
- buf = kzalloc(sizeof(struct saa7164_user_buffer), GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return NULL;
diff --git a/drivers/media/pci/saa7164/saa7164-i2c.c b/drivers/media/pci/saa7164/saa7164-i2c.c
index 4bcde7c79dc3..6d13cbb9d010 100644
--- a/drivers/media/pci/saa7164/saa7164-i2c.c
+++ b/drivers/media/pci/saa7164/saa7164-i2c.c
@@ -84,7 +84,7 @@ static const struct i2c_adapter saa7164_i2c_adap_template = {
.algo = &saa7164_i2c_algo_template,
};
-static struct i2c_client saa7164_i2c_client_template = {
+static const struct i2c_client saa7164_i2c_client_template = {
.name = "saa7164 internal",
};
diff --git a/drivers/media/pci/smipcie/Makefile b/drivers/media/pci/smipcie/Makefile
index 013bc3fe4294..6006aac3c41f 100644
--- a/drivers/media/pci/smipcie/Makefile
+++ b/drivers/media/pci/smipcie/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
smipcie-objs := smipcie-main.o smipcie-ir.o
diff --git a/drivers/media/pci/ttpci/Makefile b/drivers/media/pci/ttpci/Makefile
index 3cf617737f7c..0b805339c123 100644
--- a/drivers/media/pci/ttpci/Makefile
+++ b/drivers/media/pci/ttpci/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel SAA7146 FULL TS DVB device driver
# and the AV7110 DVB device driver
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index f46947d8adf8..6d415bdeef18 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -347,9 +347,9 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
static inline void print_time(char *s)
{
#ifdef DEBUG_TIMING
- struct timeval tv;
- do_gettimeofday(&tv);
- printk("%s: %d.%d\n", s, (int)tv.tv_sec, (int)tv.tv_usec);
+ struct timespec64 ts;
+ ktime_get_real_ts64(&ts);
+ printk("%s: %lld.%09ld\n", s, (s64)ts.tv_sec, ts.tv_nsec);
#endif
}
@@ -1224,7 +1224,7 @@ static int budget_start_feed(struct dvb_demux_feed *feed)
dprintk(2, "av7110: %p\n", budget);
spin_lock(&budget->feedlock1);
- feed->pusi_seen = 0; /* have a clean section start */
+ feed->pusi_seen = false; /* have a clean section start */
status = start_ts_capture(budget);
spin_unlock(&budget->feedlock1);
return status;
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index 347827925c14..cbb150d6cbb1 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AV7110_H_
#define _AV7110_H_
@@ -93,7 +94,7 @@ struct infrared {
u8 inversion;
u16 last_key;
u16 last_toggle;
- u8 delay_timer_finished;
+ bool keypressed;
};
diff --git a/drivers/media/pci/ttpci/av7110_av.h b/drivers/media/pci/ttpci/av7110_av.h
index f52276f47709..71bbd4391f57 100644
--- a/drivers/media/pci/ttpci/av7110_av.h
+++ b/drivers/media/pci/ttpci/av7110_av.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AV7110_AV_H_
#define _AV7110_AV_H_
diff --git a/drivers/media/pci/ttpci/av7110_ca.h b/drivers/media/pci/ttpci/av7110_ca.h
index 70ee855ece1b..a6e3f2955730 100644
--- a/drivers/media/pci/ttpci/av7110_ca.h
+++ b/drivers/media/pci/ttpci/av7110_ca.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AV7110_CA_H_
#define _AV7110_CA_H_
diff --git a/drivers/media/pci/ttpci/av7110_hw.h b/drivers/media/pci/ttpci/av7110_hw.h
index ccb148059406..6380d8950c69 100644
--- a/drivers/media/pci/ttpci/av7110_hw.h
+++ b/drivers/media/pci/ttpci/av7110_hw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AV7110_HW_H_
#define _AV7110_HW_H_
diff --git a/drivers/media/pci/ttpci/av7110_ipack.c b/drivers/media/pci/ttpci/av7110_ipack.c
index 699ef8b5b99a..5aff26574fe1 100644
--- a/drivers/media/pci/ttpci/av7110_ipack.c
+++ b/drivers/media/pci/ttpci/av7110_ipack.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "dvb_filter.h"
#include "av7110_ipack.h"
#include <linux/string.h> /* for memcpy() */
diff --git a/drivers/media/pci/ttpci/av7110_ipack.h b/drivers/media/pci/ttpci/av7110_ipack.h
index becf94d3fdfa..943ec899bb93 100644
--- a/drivers/media/pci/ttpci/av7110_ipack.h
+++ b/drivers/media/pci/ttpci/av7110_ipack.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AV7110_IPACK_H_
#define _AV7110_IPACK_H_
diff --git a/drivers/media/pci/ttpci/av7110_ir.c b/drivers/media/pci/ttpci/av7110_ir.c
index ca05198de2c2..ee414803e6b5 100644
--- a/drivers/media/pci/ttpci/av7110_ir.c
+++ b/drivers/media/pci/ttpci/av7110_ir.c
@@ -84,15 +84,16 @@ static u16 default_key_map [256] = {
/* key-up timer */
-static void av7110_emit_keyup(unsigned long parm)
+static void av7110_emit_keyup(struct timer_list *t)
{
- struct infrared *ir = (struct infrared *) parm;
+ struct infrared *ir = from_timer(ir, t, keyup_timer);
- if (!ir || !test_bit(ir->last_key, ir->input_dev->key))
+ if (!ir || !ir->keypressed)
return;
input_report_key(ir->input_dev, ir->last_key, 0);
input_sync(ir->input_dev);
+ ir->keypressed = false;
}
@@ -152,29 +153,18 @@ static void av7110_emit_key(unsigned long parm)
return;
}
- if (timer_pending(&ir->keyup_timer)) {
- del_timer(&ir->keyup_timer);
- if (ir->last_key != keycode || toggle != ir->last_toggle) {
- ir->delay_timer_finished = 0;
- input_event(ir->input_dev, EV_KEY, ir->last_key, 0);
- input_event(ir->input_dev, EV_KEY, keycode, 1);
- input_sync(ir->input_dev);
- } else if (ir->delay_timer_finished) {
- input_event(ir->input_dev, EV_KEY, keycode, 2);
- input_sync(ir->input_dev);
- }
- } else {
- ir->delay_timer_finished = 0;
- input_event(ir->input_dev, EV_KEY, keycode, 1);
- input_sync(ir->input_dev);
- }
+ if (ir->keypressed &&
+ (ir->last_key != keycode || toggle != ir->last_toggle))
+ input_event(ir->input_dev, EV_KEY, ir->last_key, 0);
+ input_event(ir->input_dev, EV_KEY, keycode, 1);
+ input_sync(ir->input_dev);
+
+ ir->keypressed = true;
ir->last_key = keycode;
ir->last_toggle = toggle;
- ir->keyup_timer.expires = jiffies + UP_TIMEOUT;
- add_timer(&ir->keyup_timer);
-
+ mod_timer(&ir->keyup_timer, jiffies + UP_TIMEOUT);
}
@@ -204,16 +194,6 @@ static void input_register_keys(struct infrared *ir)
ir->input_dev->keycodemax = ARRAY_SIZE(ir->key_map);
}
-
-/* called by the input driver after rep[REP_DELAY] ms */
-static void input_repeat_key(unsigned long parm)
-{
- struct infrared *ir = (struct infrared *) parm;
-
- ir->delay_timer_finished = 1;
-}
-
-
/* check for configuration changes */
int av7110_check_ir_config(struct av7110 *av7110, int force)
{
@@ -333,8 +313,7 @@ int av7110_ir_init(struct av7110 *av7110)
av_list[av_cnt++] = av7110;
av7110_check_ir_config(av7110, true);
- setup_timer(&av7110->ir.keyup_timer, av7110_emit_keyup,
- (unsigned long)&av7110->ir);
+ timer_setup(&av7110->ir.keyup_timer, av7110_emit_keyup, 0);
input_dev = input_allocate_device();
if (!input_dev)
@@ -365,8 +344,13 @@ int av7110_ir_init(struct av7110 *av7110)
input_free_device(input_dev);
return err;
}
- input_dev->timer.function = input_repeat_key;
- input_dev->timer.data = (unsigned long) &av7110->ir;
+
+ /*
+ * Input core's default autorepeat is 33 cps with 250 msec
+ * delay, let's adjust to numbers more suitable for remote
+ * control.
+ */
+ input_enable_softrepeat(input_dev, 250, 125);
if (av_cnt == 1) {
e = proc_create("av7110_ir", S_IWUSR, NULL, &av7110_ir_proc_fops);
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index 97499b2af714..b3dc45b91101 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -330,7 +330,7 @@ static int budget_start_feed(struct dvb_demux_feed *feed)
return -EINVAL;
spin_lock(&budget->feedlock);
- feed->pusi_seen = 0; /* have a clean section start */
+ feed->pusi_seen = false; /* have a clean section start */
if (budget->feeding++ == 0)
status = start_ts_capture(budget);
spin_unlock(&budget->feedlock);
diff --git a/drivers/media/pci/ttpci/budget.h b/drivers/media/pci/ttpci/budget.h
index d5ae4438153e..fae83866b199 100644
--- a/drivers/media/pci/ttpci/budget.h
+++ b/drivers/media/pci/ttpci/budget.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __BUDGET_DVB__
#define __BUDGET_DVB__
diff --git a/drivers/media/pci/ttpci/dvb_filter.c b/drivers/media/pci/ttpci/dvb_filter.c
index b67127b67d4e..8c2eca5dcdc9 100644
--- a/drivers/media/pci/ttpci/dvb_filter.c
+++ b/drivers/media/pci/ttpci/dvb_filter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
diff --git a/drivers/media/pci/tw5864/tw5864-util.c b/drivers/media/pci/tw5864/tw5864-util.c
index 771eef235755..b9cebe9d1740 100644
--- a/drivers/media/pci/tw5864/tw5864-util.c
+++ b/drivers/media/pci/tw5864/tw5864-util.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "tw5864.h"
void tw5864_indir_writeb(struct tw5864_dev *dev, u16 addr, u8 data)
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
index 336e2f9bc1b6..7fb3f07bf022 100644
--- a/drivers/media/pci/tw686x/tw686x-core.c
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -72,12 +72,12 @@ static const char *dma_mode_name(unsigned int mode)
}
}
-static int tw686x_dma_mode_get(char *buffer, struct kernel_param *kp)
+static int tw686x_dma_mode_get(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "%s", dma_mode_name(dma_mode));
}
-static int tw686x_dma_mode_set(const char *val, struct kernel_param *kp)
+static int tw686x_dma_mode_set(const char *val, const struct kernel_param *kp)
{
if (!strcasecmp(val, dma_mode_name(TW686X_DMA_MODE_MEMCPY)))
dma_mode = TW686X_DMA_MODE_MEMCPY;
@@ -126,9 +126,9 @@ void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel)
* channels "too fast" which makes some TW686x devices very
* angry and freeze the CPU (see note 1).
*/
-static void tw686x_dma_delay(unsigned long data)
+static void tw686x_dma_delay(struct timer_list *t)
{
- struct tw686x_dev *dev = (struct tw686x_dev *)data;
+ struct tw686x_dev *dev = from_timer(dev, t, dma_delay_timer);
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
@@ -325,8 +325,7 @@ static int tw686x_probe(struct pci_dev *pci_dev,
goto iounmap;
}
- setup_timer(&dev->dma_delay_timer,
- tw686x_dma_delay, (unsigned long) dev);
+ timer_setup(&dev->dma_delay_timer, tw686x_dma_delay, 0);
/*
* This must be set right before initializing v4l2_dev.
diff --git a/drivers/media/pci/tw686x/tw686x-regs.h b/drivers/media/pci/tw686x/tw686x-regs.h
index 15a956642ef4..8adacc928be1 100644
--- a/drivers/media/pci/tw686x/tw686x-regs.h
+++ b/drivers/media/pci/tw686x/tw686x-regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* DMA controller registers */
#define REG8_1(a0) ((const u16[8]) { a0, a0 + 1, a0 + 2, a0 + 3, \
a0 + 4, a0 + 5, a0 + 6, a0 + 7})
diff --git a/drivers/media/pci/zoran/Makefile b/drivers/media/pci/zoran/Makefile
index 44cc13352c88..21ac29a71458 100644
--- a/drivers/media/pci/zoran/Makefile
+++ b/drivers/media/pci/zoran/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
zr36067-objs := zoran_procfs.o zoran_device.o \
zoran_driver.o zoran_card.o
diff --git a/drivers/media/pci/zoran/zoran_card.h b/drivers/media/pci/zoran/zoran_card.h
index 81cba177cd90..0cdb7d34926d 100644
--- a/drivers/media/pci/zoran/zoran_card.h
+++ b/drivers/media/pci/zoran/zoran_card.h
@@ -37,7 +37,7 @@ extern int zr36067_debug;
/* Anybody who uses more than four? */
#define BUZ_MAX 4
-extern struct video_device zoran_template;
+extern const struct video_device zoran_template;
extern int zoran_check_jpg_settings(struct zoran *zr,
struct zoran_jpg_settings *settings,
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index a11cb501c550..d07840072337 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2839,7 +2839,7 @@ static const struct v4l2_file_operations zoran_fops = {
.poll = zoran_poll,
};
-struct video_device zoran_template = {
+const struct video_device zoran_template = {
.name = ZORAN_NAME,
.fops = &zoran_fops,
.ioctl_ops = &zoran_ioctl_ops,
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 3c4f7fa7b9d8..fd0c99859d6f 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -458,6 +458,21 @@ config VIDEO_RENESAS_VSP1
To compile this driver as a module, choose M here: the module
will be called vsp1.
+config VIDEO_ROCKCHIP_RGA
+ tristate "Rockchip Raster 2d Graphic Acceleration Unit"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select V4L2_MEM2MEM_DEV
+ default n
+ ---help---
+ This is a v4l2 driver for Rockchip SOC RGA 2d graphics accelerator.
+ Rockchip RGA is a separate 2D raster graphic acceleration unit.
+ It accelerates 2D graphics operations, such as point/line drawing,
+ image scaling, rotation, BitBLT, alpha blending and image blur/sharpness.
+
+ To compile this driver as a module choose m here.
+
config VIDEO_TI_VPE
tristate "TI VPE (Video Processing Engine) driver"
depends on VIDEO_DEV && VIDEO_V4L2
@@ -553,6 +568,16 @@ config VIDEO_MESON_AO_CEC
This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the
generic CEC framework interface.
CEC bus is present in the HDMI connector and enables communication
+
+config CEC_GPIO
+ tristate "Generic GPIO-based CEC driver"
+ depends on PREEMPT
+ select CEC_CORE
+ select CEC_PIN
+ select GPIOLIB
+ ---help---
+ This is a generic GPIO-based CEC driver.
+ The CEC bus is present in the HDMI connector and enables communication
between compatible devices.
config VIDEO_SAMSUNG_S5P_CEC
@@ -589,6 +614,17 @@ config VIDEO_STM32_HDMI_CEC
CEC bus is present in the HDMI connector and enables communication
between compatible devices.
+config VIDEO_TEGRA_HDMI_CEC
+ tristate "Tegra HDMI CEC driver"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for the Tegra HDMI CEC interface. It uses the
+ generic CEC framework interface.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
endif #CEC_PLATFORM_DRIVERS
menuconfig SDR_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index c1ef946bf032..003b0bb2cddf 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the video capture/playback device drivers.
#
@@ -26,6 +27,8 @@ obj-$(CONFIG_VIDEO_CODA) += coda/
obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
+obj-$(CONFIG_CEC_GPIO) += cec-gpio/
+
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
obj-$(CONFIG_VIDEO_MUX) += video-mux.o
@@ -46,6 +49,8 @@ obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += sti/cec/
obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/
+obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra-cec/
+
obj-y += stm32/
obj-y += blackfin/
@@ -62,6 +67,8 @@ obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
+obj-$(CONFIG_VIDEO_ROCKCHIP_RGA) += rockchip/rga/
+
obj-y += omap/
obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index dfcc484cab89..0997c640191d 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -2417,6 +2417,11 @@ static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
return vpfe_probe_complete(vpfe);
}
+static const struct v4l2_async_notifier_operations vpfe_async_ops = {
+ .bound = vpfe_async_bound,
+ .complete = vpfe_async_complete,
+};
+
static struct vpfe_config *
vpfe_get_pdata(struct platform_device *pdev)
{
@@ -2590,8 +2595,7 @@ static int vpfe_probe(struct platform_device *pdev)
vpfe->notifier.subdevs = vpfe->cfg->asd;
vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
- vpfe->notifier.bound = vpfe_async_bound;
- vpfe->notifier.complete = vpfe_async_complete;
+ vpfe->notifier.ops = &vpfe_async_ops;
ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
&vpfe->notifier);
if (ret) {
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index 6936ac467609..2aadc19235ea 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ATMEL_ISC_REGS_H
#define __ATMEL_ISC_REGS_H
@@ -42,6 +43,7 @@
/* ISC Clock Status Register */
#define ISC_CLKSR 0x00000020
+#define ISC_CLKSR_SIP BIT(31)
#define ISC_CLK(n) BIT(n)
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index d7103c5f92c3..13f1c1c797b0 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -65,6 +65,7 @@ struct isc_clk {
struct clk_hw hw;
struct clk *clk;
struct regmap *regmap;
+ spinlock_t lock;
u8 id;
u8 parent_id;
u32 div;
@@ -82,41 +83,69 @@ struct isc_subdev_entity {
struct v4l2_subdev *sd;
struct v4l2_async_subdev *asd;
struct v4l2_async_notifier notifier;
- struct v4l2_subdev_pad_config *config;
u32 pfe_cfg0;
struct list_head list;
};
+/* Indicate the format is generated by the sensor */
+#define FMT_FLAG_FROM_SENSOR BIT(0)
+/* Indicate the format is produced by ISC itself */
+#define FMT_FLAG_FROM_CONTROLLER BIT(1)
+/* Indicate a Raw Bayer format */
+#define FMT_FLAG_RAW_FORMAT BIT(2)
+
+#define FMT_FLAG_RAW_FROM_SENSOR (FMT_FLAG_FROM_SENSOR | \
+ FMT_FLAG_RAW_FORMAT)
+
/*
* struct isc_format - ISC media bus format information
* @fourcc: Fourcc code for this format
* @mbus_code: V4L2 media bus format code.
+ * flags: Indicate format from sensor or converted by controller
* @bpp: Bits per pixel (when stored in memory)
- * @reg_bps: reg value for bits per sample
* (when transferred over a bus)
- * @pipeline: pipeline switch
* @sd_support: Subdev supports this format
* @isc_support: ISC can convert raw format to this format
*/
+
struct isc_format {
u32 fourcc;
u32 mbus_code;
+ u32 flags;
u8 bpp;
- u32 reg_bps;
- u32 reg_bay_cfg;
- u32 reg_rlp_mode;
- u32 reg_dcfg_imode;
- u32 reg_dctrl_dview;
-
- u32 pipeline;
-
bool sd_support;
bool isc_support;
};
+/* Pipeline bitmap */
+#define WB_ENABLE BIT(0)
+#define CFA_ENABLE BIT(1)
+#define CC_ENABLE BIT(2)
+#define GAM_ENABLE BIT(3)
+#define GAM_BENABLE BIT(4)
+#define GAM_GENABLE BIT(5)
+#define GAM_RENABLE BIT(6)
+#define CSC_ENABLE BIT(7)
+#define CBC_ENABLE BIT(8)
+#define SUB422_ENABLE BIT(9)
+#define SUB420_ENABLE BIT(10)
+
+#define GAM_ENABLES (GAM_RENABLE | GAM_GENABLE | GAM_BENABLE | GAM_ENABLE)
+
+struct fmt_config {
+ u32 fourcc;
+
+ u32 pfe_cfg0_bps;
+ u32 cfa_baycfg;
+ u32 rlp_cfg_mode;
+ u32 dcfg_imode;
+ u32 dctrl_dview;
+
+ u32 bits_pipeline;
+};
#define HIST_ENTRIES 512
#define HIST_BAYER (ISC_HIS_CFG_MODE_B + 1)
@@ -181,80 +210,320 @@ struct isc_device {
struct list_head subdev_entities;
};
-#define RAW_FMT_IND_START 0
-#define RAW_FMT_IND_END 11
-#define ISC_FMT_IND_START 12
-#define ISC_FMT_IND_END 14
-
-static struct isc_format isc_formats[] = {
- { V4L2_PIX_FMT_SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8, 8,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
-
- { V4L2_PIX_FMT_SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10, 16,
- ISC_PFG_CFG0_BPS_TEN, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT10,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
-
- { V4L2_PIX_FMT_SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_GBGB, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_GRGR, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
- { V4L2_PIX_FMT_SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12, 16,
- ISC_PFG_CFG0_BPS_TWELVE, ISC_BAY_CFG_RGRG, ISC_RLP_CFG_MODE_DAT12,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
-
- { V4L2_PIX_FMT_YUV420, 0x0, 12,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
- ISC_DCFG_IMODE_YC420P, ISC_DCTRL_DVIEW_PLANAR, 0x7fb,
- false, false },
- { V4L2_PIX_FMT_YUV422P, 0x0, 16,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
- ISC_DCFG_IMODE_YC422P, ISC_DCTRL_DVIEW_PLANAR, 0x3fb,
- false, false },
- { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_RGB565_2X8_LE, 16,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_RGB565,
- ISC_DCFG_IMODE_PACKED16, ISC_DCTRL_DVIEW_PACKED, 0x7b,
- false, false },
-
- { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_YUYV8_2X8, 16,
- ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_DAT8,
- ISC_DCFG_IMODE_PACKED8, ISC_DCTRL_DVIEW_PACKED, 0x0,
- false, false },
+static struct isc_format formats_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .flags = FMT_FLAG_RAW_FROM_SENSOR,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .mbus_code = 0x0,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 12,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .mbus_code = 0x0,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_code = MEDIA_BUS_FMT_Y8_1X8,
+ .flags = FMT_FLAG_FROM_CONTROLLER |
+ FMT_FLAG_FROM_SENSOR,
+ .bpp = 8,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .mbus_code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 16,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .mbus_code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .flags = FMT_FLAG_FROM_CONTROLLER,
+ .bpp = 32,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .flags = FMT_FLAG_FROM_CONTROLLER |
+ FMT_FLAG_FROM_SENSOR,
+ .bpp = 16,
+ },
+};
+
+struct fmt_config fmt_configs_list[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB10,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GBGB,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_GRGR,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB12,
+ .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE,
+ .cfa_baycfg = ISC_BAY_CFG_RGRG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
+ .dcfg_imode = ISC_DCFG_IMODE_YC420P,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
+ .bits_pipeline = SUB420_ENABLE | SUB422_ENABLE |
+ CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC,
+ .dcfg_imode = ISC_DCFG_IMODE_YC422P,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PLANAR,
+ .bits_pipeline = SUB422_ENABLE |
+ CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = CBC_ENABLE | CSC_ENABLE |
+ GAM_ENABLES |
+ CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED16,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB32,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED32,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = GAM_ENABLES | CFA_ENABLE | WB_ENABLE,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT,
+ .cfa_baycfg = ISC_BAY_CFG_BGBG,
+ .rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8,
+ .dcfg_imode = ISC_DCFG_IMODE_PACKED8,
+ .dctrl_dview = ISC_DCTRL_DVIEW_PACKED,
+ .bits_pipeline = 0x0
+ },
};
#define GAMMA_MAX 2
@@ -307,31 +576,80 @@ module_param(sensor_preferred, uint, 0644);
MODULE_PARM_DESC(sensor_preferred,
"Sensor is preferred to output the specified format (1-on 0-off), default 1");
+static int isc_wait_clk_stable(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+ struct regmap *regmap = isc_clk->regmap;
+ unsigned long timeout = jiffies + usecs_to_jiffies(1000);
+ unsigned int status;
+
+ while (time_before(jiffies, timeout)) {
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (!(status & ISC_CLKSR_SIP))
+ return 0;
+
+ usleep_range(10, 250);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int isc_clk_prepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_get_sync(isc_clk->dev);
+
+ return isc_wait_clk_stable(hw);
+}
+
+static void isc_clk_unprepare(struct clk_hw *hw)
+{
+ struct isc_clk *isc_clk = to_isc_clk(hw);
+
+ isc_wait_clk_stable(hw);
+
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_put_sync(isc_clk->dev);
+}
+
static int isc_clk_enable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 id = isc_clk->id;
struct regmap *regmap = isc_clk->regmap;
+ unsigned long flags;
+ unsigned int status;
dev_dbg(isc_clk->dev, "ISC CLK: %s, div = %d, parent id = %d\n",
__func__, isc_clk->div, isc_clk->parent_id);
+ spin_lock_irqsave(&isc_clk->lock, flags);
regmap_update_bits(regmap, ISC_CLKCFG,
ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id),
(isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) |
(isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id)));
regmap_write(regmap, ISC_CLKEN, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
- return 0;
+ regmap_read(regmap, ISC_CLKSR, &status);
+ if (status & ISC_CLK(id))
+ return 0;
+ else
+ return -EINVAL;
}
static void isc_clk_disable(struct clk_hw *hw)
{
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 id = isc_clk->id;
+ unsigned long flags;
+ spin_lock_irqsave(&isc_clk->lock, flags);
regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id));
+ spin_unlock_irqrestore(&isc_clk->lock, flags);
}
static int isc_clk_is_enabled(struct clk_hw *hw)
@@ -339,8 +657,14 @@ static int isc_clk_is_enabled(struct clk_hw *hw)
struct isc_clk *isc_clk = to_isc_clk(hw);
u32 status;
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_get_sync(isc_clk->dev);
+
regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
+ if (isc_clk->id == ISC_ISPCK)
+ pm_runtime_put_sync(isc_clk->dev);
+
return status & ISC_CLK(isc_clk->id) ? 1 : 0;
}
@@ -447,6 +771,8 @@ static int isc_clk_set_rate(struct clk_hw *hw,
}
static const struct clk_ops isc_clk_ops = {
+ .prepare = isc_clk_prepare,
+ .unprepare = isc_clk_unprepare,
.enable = isc_clk_enable,
.disable = isc_clk_disable,
.is_enabled = isc_clk_is_enabled,
@@ -492,6 +818,7 @@ static int isc_clk_register(struct isc_device *isc, unsigned int id)
isc_clk->regmap = regmap;
isc_clk->id = id;
isc_clk->dev = isc->dev;
+ spin_lock_init(&isc_clk->lock);
isc_clk->clk = clk_register(isc->dev, &isc_clk->hw);
if (IS_ERR(isc_clk->clk)) {
@@ -575,11 +902,27 @@ static inline bool sensor_is_preferred(const struct isc_format *isc_fmt)
!isc_fmt->isc_support;
}
+static struct fmt_config *get_fmt_config(u32 fourcc)
+{
+ struct fmt_config *config;
+ int i;
+
+ config = &fmt_configs_list[0];
+ for (i = 0; i < ARRAY_SIZE(fmt_configs_list); i++) {
+ if (config->fourcc == fourcc)
+ return config;
+
+ config++;
+ }
+ return NULL;
+}
+
static void isc_start_dma(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
struct v4l2_pix_format *pixfmt = &isc->fmt.fmt.pix;
u32 sizeimage = pixfmt->sizeimage;
+ struct fmt_config *config = get_fmt_config(isc->current_fmt->fourcc);
u32 dctrl_dview;
dma_addr_t addr0;
@@ -602,7 +945,7 @@ static void isc_start_dma(struct isc_device *isc)
if (sensor_is_preferred(isc->current_fmt))
dctrl_dview = ISC_DCTRL_DVIEW_PACKED;
else
- dctrl_dview = isc->current_fmt->reg_dctrl_dview;
+ dctrl_dview = config->dctrl_dview;
regmap_write(regmap, ISC_DCTRL, dctrl_dview | ISC_DCTRL_IE_IS);
regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE);
@@ -612,6 +955,7 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
u32 val, bay_cfg;
const u32 *gamma;
unsigned int i;
@@ -625,7 +969,7 @@ static void isc_set_pipeline(struct isc_device *isc, u32 pipeline)
if (!pipeline)
return;
- bay_cfg = isc->raw_fmt->reg_bay_cfg;
+ bay_cfg = config->cfa_baycfg;
regmap_write(regmap, ISC_WB_CFG, bay_cfg);
regmap_write(regmap, ISC_WB_O_RGR, 0x0);
@@ -678,11 +1022,13 @@ static void isc_set_histogram(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
struct isc_ctrls *ctrls = &isc->ctrls;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
if (ctrls->awb && (ctrls->hist_stat != HIST_ENABLED)) {
- regmap_write(regmap, ISC_HIS_CFG, ISC_HIS_CFG_MODE_R |
- (isc->raw_fmt->reg_bay_cfg << ISC_HIS_CFG_BAYSEL_SHIFT) |
- ISC_HIS_CFG_RAR);
+ regmap_write(regmap, ISC_HIS_CFG,
+ ISC_HIS_CFG_MODE_R |
+ (config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT) |
+ ISC_HIS_CFG_RAR);
regmap_write(regmap, ISC_HIS_CTRL, ISC_HIS_CTRL_EN);
regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE);
ctrls->hist_id = ISC_HIS_CFG_MODE_R;
@@ -699,8 +1045,10 @@ static void isc_set_histogram(struct isc_device *isc)
}
static inline void isc_get_param(const struct isc_format *fmt,
- u32 *rlp_mode, u32 *dcfg)
+ u32 *rlp_mode, u32 *dcfg)
{
+ struct fmt_config *config = get_fmt_config(fmt->fourcc);
+
*dcfg = ISC_DCFG_YMBSIZE_BEATS8;
switch (fmt->fourcc) {
@@ -712,8 +1060,8 @@ static inline void isc_get_param(const struct isc_format *fmt,
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
- *rlp_mode = fmt->reg_rlp_mode;
- *dcfg |= fmt->reg_dcfg_imode;
+ *rlp_mode = config->rlp_cfg_mode;
+ *dcfg |= config->dcfg_imode;
break;
default:
*rlp_mode = ISC_RLP_CFG_MODE_DAT8;
@@ -726,20 +1074,22 @@ static int isc_configure(struct isc_device *isc)
{
struct regmap *regmap = isc->regmap;
const struct isc_format *current_fmt = isc->current_fmt;
+ struct fmt_config *curfmt_config = get_fmt_config(current_fmt->fourcc);
+ struct fmt_config *rawfmt_config = get_fmt_config(isc->raw_fmt->fourcc);
struct isc_subdev_entity *subdev = isc->current_subdev;
u32 pfe_cfg0, rlp_mode, dcfg, mask, pipeline;
if (sensor_is_preferred(current_fmt)) {
- pfe_cfg0 = current_fmt->reg_bps;
+ pfe_cfg0 = curfmt_config->pfe_cfg0_bps;
pipeline = 0x0;
isc_get_param(current_fmt, &rlp_mode, &dcfg);
isc->ctrls.hist_stat = HIST_INIT;
} else {
- pfe_cfg0 = isc->raw_fmt->reg_bps;
- pipeline = current_fmt->pipeline;
- rlp_mode = current_fmt->reg_rlp_mode;
- dcfg = current_fmt->reg_dcfg_imode | ISC_DCFG_YMBSIZE_BEATS8 |
- ISC_DCFG_CMBSIZE_BEATS8;
+ pfe_cfg0 = rawfmt_config->pfe_cfg0_bps;
+ pipeline = curfmt_config->bits_pipeline;
+ rlp_mode = curfmt_config->rlp_cfg_mode;
+ dcfg = curfmt_config->dcfg_imode |
+ ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
}
pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
@@ -941,6 +1291,7 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
{
struct isc_format *isc_fmt;
struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -971,7 +1322,7 @@ static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f,
v4l2_fill_mbus_format(&format.format, pixfmt, mbus_code);
ret = v4l2_subdev_call(isc->current_subdev->sd, pad, set_fmt,
- isc->current_subdev->config, &format);
+ &pad_cfg, &format);
if (ret < 0)
return ret;
@@ -1323,6 +1674,7 @@ static void isc_awb_work(struct work_struct *w)
struct isc_device *isc =
container_of(w, struct isc_device, awb_work);
struct regmap *regmap = isc->regmap;
+ struct fmt_config *config = get_fmt_config(isc->raw_fmt->fourcc);
struct isc_ctrls *ctrls = &isc->ctrls;
u32 hist_id = ctrls->hist_id;
u32 baysel;
@@ -1340,7 +1692,7 @@ static void isc_awb_work(struct work_struct *w)
}
ctrls->hist_id = hist_id;
- baysel = isc->raw_fmt->reg_bay_cfg << ISC_HIS_CFG_BAYSEL_SHIFT;
+ baysel = config->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT;
pm_runtime_get_sync(isc->dev);
@@ -1436,17 +1788,15 @@ static void isc_async_unbind(struct v4l2_async_notifier *notifier,
struct isc_device, v4l2_dev);
cancel_work_sync(&isc->awb_work);
video_unregister_device(&isc->video_dev);
- if (isc->current_subdev->config)
- v4l2_subdev_free_pad_config(isc->current_subdev->config);
v4l2_ctrl_handler_free(&isc->ctrls.handler);
}
static struct isc_format *find_format_by_code(unsigned int code, int *index)
{
- struct isc_format *fmt = &isc_formats[0];
+ struct isc_format *fmt = &formats_list[0];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ for (i = 0; i < ARRAY_SIZE(formats_list); i++) {
if (fmt->mbus_code == code) {
*index = i;
return fmt;
@@ -1463,37 +1813,36 @@ static int isc_formats_init(struct isc_device *isc)
struct isc_format *fmt;
struct v4l2_subdev *subdev = isc->current_subdev->sd;
unsigned int num_fmts, i, j;
+ u32 list_size = ARRAY_SIZE(formats_list);
struct v4l2_subdev_mbus_code_enum mbus_code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
- fmt = &isc_formats[0];
- for (i = 0; i < ARRAY_SIZE(isc_formats); i++) {
- fmt->isc_support = false;
- fmt->sd_support = false;
-
- fmt++;
- }
-
while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
NULL, &mbus_code)) {
mbus_code.index++;
+
fmt = find_format_by_code(mbus_code.code, &i);
- if (!fmt)
+ if ((!fmt) || (!(fmt->flags & FMT_FLAG_FROM_SENSOR)))
continue;
fmt->sd_support = true;
- if (i <= RAW_FMT_IND_END) {
- for (j = ISC_FMT_IND_START; j <= ISC_FMT_IND_END; j++)
- isc_formats[j].isc_support = true;
-
+ if (fmt->flags & FMT_FLAG_RAW_FORMAT)
isc->raw_fmt = fmt;
- }
}
- fmt = &isc_formats[0];
- for (i = 0, num_fmts = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ fmt = &formats_list[0];
+ for (i = 0; i < list_size; i++) {
+ if (fmt->flags & FMT_FLAG_FROM_CONTROLLER)
+ fmt->isc_support = true;
+
+ fmt++;
+ }
+
+ fmt = &formats_list[0];
+ num_fmts = 0;
+ for (i = 0; i < list_size; i++) {
if (fmt->isc_support || fmt->sd_support)
num_fmts++;
@@ -1505,15 +1854,13 @@ static int isc_formats_init(struct isc_device *isc)
isc->num_user_formats = num_fmts;
isc->user_formats = devm_kcalloc(isc->dev,
- num_fmts, sizeof(struct isc_format *),
+ num_fmts, sizeof(*isc->user_formats),
GFP_KERNEL);
- if (!isc->user_formats) {
- v4l2_err(&isc->v4l2_dev, "could not allocate memory\n");
+ if (!isc->user_formats)
return -ENOMEM;
- }
- fmt = &isc_formats[0];
- for (i = 0, j = 0; i < ARRAY_SIZE(isc_formats); i++) {
+ fmt = &formats_list[0];
+ for (i = 0, j = 0; i < list_size; i++) {
if (fmt->isc_support || fmt->sd_support)
isc->user_formats[j++] = fmt;
@@ -1550,7 +1897,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
- struct isc_subdev_entity *sd_entity;
struct video_device *vdev = &isc->video_dev;
struct vb2_queue *q = &isc->vb2_vidq;
int ret;
@@ -1563,8 +1909,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
isc->current_subdev = container_of(notifier,
struct isc_subdev_entity, notifier);
- sd_entity = isc->current_subdev;
-
mutex_init(&isc->lock);
init_completion(&isc->comp);
@@ -1591,10 +1935,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
INIT_LIST_HEAD(&isc->dma_queue);
spin_lock_init(&isc->dma_queue_lock);
- sd_entity->config = v4l2_subdev_alloc_pad_config(sd_entity->sd);
- if (sd_entity->config == NULL)
- return -ENOMEM;
-
ret = isc_formats_init(isc);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev,
@@ -1639,6 +1979,12 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
return 0;
}
+static const struct v4l2_async_notifier_operations isc_async_ops = {
+ .bound = isc_async_bound,
+ .unbind = isc_async_unbind,
+ .complete = isc_async_complete,
+};
+
static void isc_subdev_cleanup(struct isc_device *isc)
{
struct isc_subdev_entity *subdev_entity;
@@ -1716,7 +2062,7 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
subdev_entity = devm_kzalloc(dev,
sizeof(*subdev_entity), GFP_KERNEL);
- if (subdev_entity == NULL) {
+ if (!subdev_entity) {
of_node_put(rem);
ret = -ENOMEM;
break;
@@ -1724,7 +2070,7 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
subdev_entity->asd = devm_kzalloc(dev,
sizeof(*subdev_entity->asd), GFP_KERNEL);
- if (subdev_entity->asd == NULL) {
+ if (!subdev_entity->asd) {
of_node_put(rem);
ret = -ENOMEM;
break;
@@ -1815,25 +2161,37 @@ static int atmel_isc_probe(struct platform_device *pdev)
return ret;
}
+ ret = clk_prepare_enable(isc->hclock);
+ if (ret) {
+ dev_err(dev, "failed to enable hclock: %d\n", ret);
+ return ret;
+ }
+
ret = isc_clk_init(isc);
if (ret) {
dev_err(dev, "failed to init isc clock: %d\n", ret);
- goto clean_isc_clk;
+ goto unprepare_hclk;
}
isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto unprepare_hclk;
+ }
+
/* ispck should be greater or equal to hclock */
ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
if (ret) {
dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto clean_isc_clk;
+ goto unprepare_clk;
}
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
- goto clean_isc_clk;
+ goto unprepare_clk;
}
ret = isc_parse_dt(dev, isc);
@@ -1851,9 +2209,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
subdev_entity->notifier.subdevs = &subdev_entity->asd;
subdev_entity->notifier.num_subdevs = 1;
- subdev_entity->notifier.bound = isc_async_bound;
- subdev_entity->notifier.unbind = isc_async_unbind;
- subdev_entity->notifier.complete = isc_async_complete;
+ subdev_entity->notifier.ops = &isc_async_ops;
ret = v4l2_async_notifier_register(&isc->v4l2_dev,
&subdev_entity->notifier);
@@ -1866,7 +2222,9 @@ static int atmel_isc_probe(struct platform_device *pdev)
break;
}
+ pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ pm_request_idle(dev);
return 0;
@@ -1876,7 +2234,11 @@ cleanup_subdev:
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
-clean_isc_clk:
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+unprepare_hclk:
+ clk_disable_unprepare(isc->hclock);
+
isc_clk_cleanup(isc);
return ret;
@@ -1887,6 +2249,8 @@ static int atmel_isc_remove(struct platform_device *pdev)
struct isc_device *isc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(isc->ispck);
+ clk_disable_unprepare(isc->hclock);
isc_subdev_cleanup(isc);
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 891fa2505efa..e900995143a3 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -411,7 +411,7 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&isi->irqlock, flags);
list_add_tail(&buf->list, &isi->video_buffer_list);
- if (isi->active == NULL) {
+ if (!isi->active) {
isi->active = buf;
if (vb2_is_streaming(vb->vb2_queue))
start_dma(isi, buf);
@@ -1038,10 +1038,8 @@ static int isi_formats_init(struct atmel_isi *isi)
isi->user_formats = devm_kcalloc(isi->dev,
num_fmts, sizeof(struct isi_format *),
GFP_KERNEL);
- if (!isi->user_formats) {
- dev_err(isi->dev, "could not allocate memory\n");
+ if (!isi->user_formats)
return -ENOMEM;
- }
memcpy(isi->user_formats, isi_fmts,
num_fmts * sizeof(struct isi_format *));
@@ -1105,6 +1103,12 @@ static int isi_graph_notify_bound(struct v4l2_async_notifier *notifier,
return 0;
}
+static const struct v4l2_async_notifier_operations isi_graph_notify_ops = {
+ .bound = isi_graph_notify_bound,
+ .unbind = isi_graph_notify_unbind,
+ .complete = isi_graph_notify_complete,
+};
+
static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
{
struct device_node *ep = NULL;
@@ -1143,7 +1147,7 @@ static int isi_graph_init(struct atmel_isi *isi)
/* Register the subdevices notifier. */
subdevs = devm_kzalloc(isi->dev, sizeof(*subdevs), GFP_KERNEL);
- if (subdevs == NULL) {
+ if (!subdevs) {
of_node_put(isi->entity.node);
return -ENOMEM;
}
@@ -1152,9 +1156,7 @@ static int isi_graph_init(struct atmel_isi *isi)
isi->notifier.subdevs = subdevs;
isi->notifier.num_subdevs = 1;
- isi->notifier.bound = isi_graph_notify_bound;
- isi->notifier.unbind = isi_graph_notify_unbind;
- isi->notifier.complete = isi_graph_notify_complete;
+ isi->notifier.ops = &isi_graph_notify_ops;
ret = v4l2_async_notifier_register(&isi->v4l2_dev, &isi->notifier);
if (ret < 0) {
@@ -1176,10 +1178,8 @@ static int atmel_isi_probe(struct platform_device *pdev)
int ret, i;
isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
- if (!isi) {
- dev_err(&pdev->dev, "Can't allocate interface!\n");
+ if (!isi)
return -ENOMEM;
- }
isi->pclk = devm_clk_get(&pdev->dev, "isi_clk");
if (IS_ERR(isi->pclk))
@@ -1204,7 +1204,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
return ret;
isi->vdev = video_device_alloc();
- if (isi->vdev == NULL) {
+ if (!isi->vdev) {
ret = -ENOMEM;
goto err_vdev_alloc;
}
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
index 37169054b828..478eb2f7d723 100644
--- a/drivers/media/platform/blackfin/ppi.c
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -338,7 +338,6 @@ struct ppi_if *ppi_create_instance(struct platform_device *pdev,
ppi = kzalloc(sizeof(*ppi), GFP_KERNEL);
if (!ppi) {
peripheral_free_list(info->pin_req);
- dev_err(&pdev->dev, "unable to allocate memory for ppi handle\n");
return NULL;
}
ppi->ops = &ppi_ops;
diff --git a/drivers/media/platform/cec-gpio/Makefile b/drivers/media/platform/cec-gpio/Makefile
new file mode 100644
index 000000000000..e82b258afa55
--- /dev/null
+++ b/drivers/media/platform/cec-gpio/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CEC_GPIO) += cec-gpio.o
diff --git a/drivers/media/platform/cec-gpio/cec-gpio.c b/drivers/media/platform/cec-gpio/cec-gpio.c
new file mode 100644
index 000000000000..5debdf08fbe7
--- /dev/null
+++ b/drivers/media/platform/cec-gpio/cec-gpio.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <media/cec-pin.h>
+
+struct cec_gpio {
+ struct cec_adapter *adap;
+ struct device *dev;
+
+ struct gpio_desc *cec_gpio;
+ int cec_irq;
+ bool cec_is_low;
+ bool cec_have_irq;
+
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+ bool hpd_is_high;
+ ktime_t hpd_ts;
+};
+
+static bool cec_gpio_read(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_is_low)
+ return false;
+ return gpiod_get_value(cec->cec_gpio);
+}
+
+static void cec_gpio_high(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->cec_is_low)
+ return;
+ cec->cec_is_low = false;
+ gpiod_set_value(cec->cec_gpio, 1);
+}
+
+static void cec_gpio_low(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_is_low)
+ return;
+ if (WARN_ON_ONCE(cec->cec_have_irq))
+ free_irq(cec->cec_irq, cec);
+ cec->cec_have_irq = false;
+ cec->cec_is_low = true;
+ gpiod_set_value(cec->cec_gpio, 0);
+}
+
+static irqreturn_t cec_hpd_gpio_irq_handler_thread(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_queue_pin_hpd_event(cec->adap, cec->hpd_is_high, cec->hpd_ts);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cec_hpd_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+ bool is_high = gpiod_get_value(cec->hpd_gpio);
+
+ if (is_high == cec->hpd_is_high)
+ return IRQ_HANDLED;
+ cec->hpd_ts = ktime_get();
+ cec->hpd_is_high = is_high;
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t cec_gpio_irq_handler(int irq, void *priv)
+{
+ struct cec_gpio *cec = priv;
+
+ cec_pin_changed(cec->adap, gpiod_get_value(cec->cec_gpio));
+ return IRQ_HANDLED;
+}
+
+static bool cec_gpio_enable_irq(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_have_irq)
+ return true;
+
+ if (request_irq(cec->cec_irq, cec_gpio_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ adap->name, cec))
+ return false;
+ cec->cec_have_irq = true;
+ return true;
+}
+
+static void cec_gpio_disable_irq(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (cec->cec_have_irq)
+ free_irq(cec->cec_irq, cec);
+ cec->cec_have_irq = false;
+}
+
+static void cec_gpio_status(struct cec_adapter *adap, struct seq_file *file)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ seq_printf(file, "mode: %s\n", cec->cec_is_low ? "low-drive" : "read");
+ if (cec->cec_have_irq)
+ seq_printf(file, "using irq: %d\n", cec->cec_irq);
+ if (cec->hpd_gpio)
+ seq_printf(file, "hpd: %s\n",
+ cec->hpd_is_high ? "high" : "low");
+}
+
+static int cec_gpio_read_hpd(struct cec_adapter *adap)
+{
+ struct cec_gpio *cec = cec_get_drvdata(adap);
+
+ if (!cec->hpd_gpio)
+ return -ENOTTY;
+ return gpiod_get_value(cec->hpd_gpio);
+}
+
+static void cec_gpio_free(struct cec_adapter *adap)
+{
+ cec_gpio_disable_irq(adap);
+}
+
+static const struct cec_pin_ops cec_gpio_pin_ops = {
+ .read = cec_gpio_read,
+ .low = cec_gpio_low,
+ .high = cec_gpio_high,
+ .enable_irq = cec_gpio_enable_irq,
+ .disable_irq = cec_gpio_disable_irq,
+ .status = cec_gpio_status,
+ .free = cec_gpio_free,
+ .read_hpd = cec_gpio_read_hpd,
+};
+
+static int cec_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cec_gpio *cec;
+ int ret;
+
+ cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->dev = dev;
+
+ cec->cec_gpio = devm_gpiod_get(dev, "cec", GPIOD_IN);
+ if (IS_ERR(cec->cec_gpio))
+ return PTR_ERR(cec->cec_gpio);
+ cec->cec_irq = gpiod_to_irq(cec->cec_gpio);
+
+ cec->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(cec->hpd_gpio))
+ return PTR_ERR(cec->hpd_gpio);
+
+ cec->adap = cec_pin_allocate_adapter(&cec_gpio_pin_ops,
+ cec, pdev->name, CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR |
+ CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN);
+ if (IS_ERR(cec->adap))
+ return PTR_ERR(cec->adap);
+
+ if (cec->hpd_gpio) {
+ cec->hpd_irq = gpiod_to_irq(cec->hpd_gpio);
+ ret = devm_request_threaded_irq(dev, cec->hpd_irq,
+ cec_hpd_gpio_irq_handler,
+ cec_hpd_gpio_irq_handler_thread,
+ IRQF_ONESHOT |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "hpd-gpio", cec);
+ if (ret)
+ return ret;
+ }
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, cec);
+ return 0;
+}
+
+static int cec_gpio_remove(struct platform_device *pdev)
+{
+ struct cec_gpio *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ return 0;
+}
+
+static const struct of_device_id cec_gpio_match[] = {
+ {
+ .compatible = "cec-gpio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cec_gpio_match);
+
+static struct platform_driver cec_gpio_pdrv = {
+ .probe = cec_gpio_probe,
+ .remove = cec_gpio_remove,
+ .driver = {
+ .name = "cec-gpio",
+ .of_match_table = cec_gpio_match,
+ },
+};
+
+module_platform_driver(cec_gpio_pdrv);
+
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CEC GPIO driver");
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 291c40933935..bfc4ecf6f068 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -417,6 +417,10 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
dev->devtype->product != CODA_DX6)
size += ysize / 4;
name = kasprintf(GFP_KERNEL, "fb%d", i);
+ if (!name) {
+ coda_free_framebuffers(ctx);
+ return -ENOMEM;
+ }
ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i],
size, name);
kfree(name);
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
index f20666a4aa89..ca671e315ad0 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/coda/trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM coda
diff --git a/drivers/media/platform/davinci/Makefile b/drivers/media/platform/davinci/Makefile
index d74d9eeb0e9e..05c45bf371aa 100644
--- a/drivers/media/platform/davinci/Makefile
+++ b/drivers/media/platform/davinci/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the davinci video device drivers.
#
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
index f1b521045d64..3482178cbf01 100644
--- a/drivers/media/platform/davinci/ccdc_hw_device.h
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -82,8 +82,8 @@ struct ccdc_hw_device {
};
/* Used by CCDC module to register & unregister with vpfe capture driver */
-int vpfe_register_ccdc_device(struct ccdc_hw_device *dev);
-void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev);
+int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev);
+void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev);
#endif
#endif
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index 6d492dc4c3a9..89cb3094d7e6 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -841,7 +841,7 @@ static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
return 0;
}
-static struct ccdc_hw_device ccdc_hw_dev = {
+static const struct ccdc_hw_device ccdc_hw_dev = {
.name = "DM355 CCDC",
.owner = THIS_MODULE,
.hw_ops = {
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 3b2d8a9317b8..5fa0a1f32536 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -776,7 +776,7 @@ static void ccdc_restore_context(void)
regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
}
-static struct ccdc_hw_device ccdc_hw_dev = {
+static const struct ccdc_hw_device ccdc_hw_dev = {
.name = "DM6446 CCDC",
.owner = THIS_MODULE,
.hw_ops = {
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index 5813b49391ed..d5ff58494c1e 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -1000,7 +1000,7 @@ static int isif_close(struct device *device)
return 0;
}
-static struct ccdc_hw_device isif_hw_dev = {
+static const struct ccdc_hw_device isif_hw_dev = {
.name = "ISIF",
.owner = THIS_MODULE,
.hw_ops = {
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 13d027031ff0..6aabd21fe69f 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -122,7 +122,7 @@ static irqreturn_t venc_isr(int irq, void *arg)
int fid;
int i;
- if ((NULL == arg) || (NULL == disp_dev->dev[0]))
+ if (!arg || !disp_dev->dev[0])
return IRQ_HANDLED;
if (venc_is_second_field(disp_dev))
@@ -337,10 +337,10 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
} else {
- if (layer->cur_frm != NULL)
+ if (layer->cur_frm)
vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
- if (layer->next_frm != NULL)
+ if (layer->next_frm)
vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
VB2_BUF_STATE_ERROR);
}
@@ -947,7 +947,7 @@ static int vpbe_display_s_std(struct file *file, void *priv,
if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- if (NULL != vpbe_dev->ops.s_std) {
+ if (vpbe_dev->ops.s_std) {
ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev,
@@ -1000,8 +1000,7 @@ static int vpbe_display_enum_output(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
/* Enumerate outputs */
-
- if (NULL == vpbe_dev->ops.enum_outputs)
+ if (!vpbe_dev->ops.enum_outputs)
return -EINVAL;
ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
@@ -1030,7 +1029,7 @@ static int vpbe_display_s_output(struct file *file, void *priv,
if (vb2_is_busy(&layer->buffer_queue))
return -EBUSY;
- if (NULL == vpbe_dev->ops.set_output)
+ if (!vpbe_dev->ops.set_output)
return -EINVAL;
ret = vpbe_dev->ops.set_output(vpbe_dev, i);
@@ -1077,7 +1076,7 @@ vpbe_display_enum_dv_timings(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n");
/* Enumerate outputs */
- if (NULL == vpbe_dev->ops.enum_dv_timings)
+ if (!vpbe_dev->ops.enum_dv_timings)
return -EINVAL;
ret = vpbe_dev->ops.enum_dv_timings(vpbe_dev, timings);
@@ -1292,7 +1291,7 @@ static int vpbe_device_get(struct device *dev, void *data)
if (strcmp("vpbe_controller", pdev->name) == 0)
vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
- if (strstr(pdev->name, "vpbe-osd") != NULL)
+ if (strstr(pdev->name, "vpbe-osd"))
vpbe_disp->osd_device = platform_get_drvdata(pdev);
return 0;
@@ -1305,15 +1304,10 @@ static int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
struct video_device *vbd = NULL;
/* Allocate memory for four plane display objects */
-
- disp_dev->dev[i] =
- kzalloc(sizeof(struct vpbe_layer), GFP_KERNEL);
-
- /* If memory allocation fails, return error */
- if (!disp_dev->dev[i]) {
- printk(KERN_ERR "ran out of memory\n");
+ disp_dev->dev[i] = kzalloc(sizeof(*disp_dev->dev[i]), GFP_KERNEL);
+ if (!disp_dev->dev[i])
return -ENOMEM;
- }
+
spin_lock_init(&disp_dev->dev[i]->irqlock);
mutex_init(&disp_dev->dev[i]->opslock);
@@ -1397,8 +1391,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
printk(KERN_DEBUG "vpbe_display_probe\n");
/* Allocate memory for vpbe_display */
- disp_dev = devm_kzalloc(&pdev->dev, sizeof(struct vpbe_display),
- GFP_KERNEL);
+ disp_dev = devm_kzalloc(&pdev->dev, sizeof(*disp_dev), GFP_KERNEL);
if (!disp_dev)
return -ENOMEM;
@@ -1414,7 +1407,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
/* Initialize the vpbe display controller */
- if (NULL != disp_dev->vpbe_dev->ops.initialize) {
+ if (disp_dev->vpbe_dev->ops.initialize) {
err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
disp_dev->vpbe_dev);
if (err) {
@@ -1482,7 +1475,7 @@ static int vpbe_display_probe(struct platform_device *pdev)
probe_out:
for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
/* Unregister video device */
- if (disp_dev->dev[k] != NULL) {
+ if (disp_dev->dev[k]) {
video_unregister_device(&disp_dev->dev[k]->video_dev);
kfree(disp_dev->dev[k]);
}
@@ -1504,7 +1497,7 @@ static int vpbe_display_remove(struct platform_device *pdev)
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
/* deinitialize the vpbe display controller */
- if (NULL != vpbe_dev->ops.deinitialize)
+ if (vpbe_dev->ops.deinitialize)
vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
/* un-register device */
for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 6792da16d9c7..7b3f6f8e3dc8 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -115,7 +115,7 @@ static struct vpfe_config_params config_params = {
};
/* ccdc device registered */
-static struct ccdc_hw_device *ccdc_dev;
+static const struct ccdc_hw_device *ccdc_dev;
/* lock for accessing ccdc information */
static DEFINE_MUTEX(ccdc_lock);
/* ccdc configuration */
@@ -203,7 +203,7 @@ static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
* vpfe_register_ccdc_device. CCDC module calls this to
* register with vpfe capture
*/
-int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
+int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev)
{
int ret = 0;
printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
@@ -259,7 +259,7 @@ EXPORT_SYMBOL(vpfe_register_ccdc_device);
* vpfe_unregister_ccdc_device. CCDC module calls this to
* unregister with vpfe capture
*/
-void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
+void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev)
{
if (!dev) {
printk(KERN_ERR "invalid ccdc device ptr\n");
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 0ef36cec21d1..a89367ab1e06 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1500,6 +1500,11 @@ static int vpif_async_complete(struct v4l2_async_notifier *notifier)
return vpif_probe_complete();
}
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
static struct vpif_capture_config *
vpif_capture_get_pdata(struct platform_device *pdev)
{
@@ -1691,8 +1696,7 @@ static __init int vpif_probe(struct platform_device *pdev)
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
- vpif_obj.notifier.bound = vpif_async_bound;
- vpif_obj.notifier.complete = vpif_async_complete;
+ vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
&vpif_obj.notifier);
if (err) {
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 56fe4e5b396e..ff2f75a328c9 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1232,6 +1232,11 @@ static int vpif_async_complete(struct v4l2_async_notifier *notifier)
return vpif_probe_complete();
}
+static const struct v4l2_async_notifier_operations vpif_async_ops = {
+ .bound = vpif_async_bound,
+ .complete = vpif_async_complete,
+};
+
/*
* vpif_probe: This function creates device entries by register itself to the
* V4L2 driver and initializes fields of each channel objects
@@ -1313,8 +1318,7 @@ static __init int vpif_probe(struct platform_device *pdev)
} else {
vpif_obj.notifier.subdevs = vpif_obj.config->asd;
vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
- vpif_obj.notifier.bound = vpif_async_bound;
- vpif_obj.notifier.complete = vpif_async_complete;
+ vpif_obj.notifier.ops = &vpif_async_ops;
err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
&vpif_obj.notifier);
if (err) {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 43801509dabb..17854a379243 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -958,6 +958,51 @@ static struct gsc_pix_max gsc_v_100_max = {
.target_rot_en_h = 2016,
};
+static struct gsc_pix_max gsc_v_5250_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2016,
+ .real_rot_en_h = 2016,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_max gsc_v_5420_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2048,
+ .real_rot_en_h = 2048,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
+static struct gsc_pix_max gsc_v_5433_max = {
+ .org_scaler_bypass_w = 8192,
+ .org_scaler_bypass_h = 8192,
+ .org_scaler_input_w = 4800,
+ .org_scaler_input_h = 3344,
+ .real_rot_dis_w = 4800,
+ .real_rot_dis_h = 3344,
+ .real_rot_en_w = 2047,
+ .real_rot_en_h = 2047,
+ .target_rot_dis_w = 4800,
+ .target_rot_dis_h = 3344,
+ .target_rot_en_w = 2016,
+ .target_rot_en_h = 2016,
+};
+
static struct gsc_pix_min gsc_v_100_min = {
.org_w = 64,
.org_h = 32,
@@ -992,6 +1037,45 @@ static struct gsc_variant gsc_v_100_variant = {
.local_sc_down = 2,
};
+static struct gsc_variant gsc_v_5250_variant = {
+ .pix_max = &gsc_v_5250_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_variant gsc_v_5420_variant = {
+ .pix_max = &gsc_v_5420_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
+static struct gsc_variant gsc_v_5433_variant = {
+ .pix_max = &gsc_v_5433_max,
+ .pix_min = &gsc_v_100_min,
+ .pix_align = &gsc_v_100_align,
+ .in_buf_cnt = 32,
+ .out_buf_cnt = 32,
+ .sc_up_max = 8,
+ .sc_down_max = 16,
+ .poly_sc_down_max = 4,
+ .pre_sc_down_max = 4,
+ .local_sc_down = 2,
+};
+
static struct gsc_driverdata gsc_v_100_drvdata = {
.variant = {
[0] = &gsc_v_100_variant,
@@ -1004,11 +1088,33 @@ static struct gsc_driverdata gsc_v_100_drvdata = {
.num_clocks = 1,
};
+static struct gsc_driverdata gsc_v_5250_drvdata = {
+ .variant = {
+ [0] = &gsc_v_5250_variant,
+ [1] = &gsc_v_5250_variant,
+ [2] = &gsc_v_5250_variant,
+ [3] = &gsc_v_5250_variant,
+ },
+ .num_entities = 4,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
+static struct gsc_driverdata gsc_v_5420_drvdata = {
+ .variant = {
+ [0] = &gsc_v_5420_variant,
+ [1] = &gsc_v_5420_variant,
+ },
+ .num_entities = 2,
+ .clk_names = { "gscl" },
+ .num_clocks = 1,
+};
+
static struct gsc_driverdata gsc_5433_drvdata = {
.variant = {
- [0] = &gsc_v_100_variant,
- [1] = &gsc_v_100_variant,
- [2] = &gsc_v_100_variant,
+ [0] = &gsc_v_5433_variant,
+ [1] = &gsc_v_5433_variant,
+ [2] = &gsc_v_5433_variant,
},
.num_entities = 3,
.clk_names = { "pclk", "aclk", "aclk_xiu", "aclk_gsclbend" },
@@ -1017,13 +1123,21 @@ static struct gsc_driverdata gsc_5433_drvdata = {
static const struct of_device_id exynos_gsc_match[] = {
{
- .compatible = "samsung,exynos5-gsc",
- .data = &gsc_v_100_drvdata,
+ .compatible = "samsung,exynos5250-gsc",
+ .data = &gsc_v_5250_drvdata,
+ },
+ {
+ .compatible = "samsung,exynos5420-gsc",
+ .data = &gsc_v_5420_drvdata,
},
{
.compatible = "samsung,exynos5433-gsc",
.data = &gsc_5433_drvdata,
},
+ {
+ .compatible = "samsung,exynos5-gsc",
+ .data = &gsc_v_100_drvdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, exynos_gsc_match);
@@ -1045,6 +1159,9 @@ static int gsc_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ if (drv_data == &gsc_v_100_drvdata)
+ dev_info(dev, "compatible 'exynos5-gsc' is deprecated\n");
+
gsc->id = ret;
if (gsc->id >= drv_data->num_entities) {
dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index c480efb755f5..46a7d242a1a5 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -76,7 +76,7 @@ config VIDEO_EXYNOS4_ISP_DMA_CAPTURE
depends on VIDEO_EXYNOS4_FIMC_IS
select VIDEO_EXYNOS4_IS_COMMON
default y
- help
+ help
This option enables an additional video device node exposing a V4L2
video capture interface for the FIMC-IS ISP raw (Bayer) capture DMA.
diff --git a/drivers/media/platform/exynos4-is/Makefile b/drivers/media/platform/exynos4-is/Makefile
index eed1b185d813..a5ab01c73b95 100644
--- a/drivers/media/platform/exynos4-is/Makefile
+++ b/drivers/media/platform/exynos4-is/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
s5p-fimc-objs := fimc-core.o fimc-reg.o fimc-m2m.o fimc-capture.o media-dev.o
exynos-fimc-lite-objs += fimc-lite-reg.o fimc-lite.o
s5p-csis-objs := mipi-csis.o
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index d4656d5175d7..c15596b56dc9 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -1405,6 +1405,11 @@ unlock:
return media_device_register(&fmd->media_dev);
}
+static const struct v4l2_async_notifier_operations subdev_notifier_ops = {
+ .bound = subdev_notifier_bound,
+ .complete = subdev_notifier_complete,
+};
+
static int fimc_md_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1479,8 +1484,7 @@ static int fimc_md_probe(struct platform_device *pdev)
if (fmd->num_sensors > 0) {
fmd->subdev_notifier.subdevs = fmd->async_subdevs;
fmd->subdev_notifier.num_subdevs = fmd->num_sensors;
- fmd->subdev_notifier.bound = subdev_notifier_bound;
- fmd->subdev_notifier.complete = subdev_notifier_complete;
+ fmd->subdev_notifier.ops = &subdev_notifier_ops;
fmd->num_sensors = 0;
ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index fb43025df573..dba21215dc84 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -339,9 +339,9 @@ static int restart_video_queue(struct viu_dmaqueue *vidq)
}
}
-static void viu_vid_timeout(unsigned long data)
+static void viu_vid_timeout(struct timer_list *t)
{
- struct viu_dev *dev = (struct viu_dev *)data;
+ struct viu_dev *dev = from_timer(dev, t, vidq.timeout);
struct viu_buf *buf;
struct viu_dmaqueue *vidq = &dev->vidq;
@@ -1466,8 +1466,7 @@ static int viu_of_probe(struct platform_device *op)
viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
"saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
- setup_timer(&viu_dev->vidq.timeout, viu_vid_timeout,
- (unsigned long)viu_dev);
+ timer_setup(&viu_dev->vidq.timeout, viu_vid_timeout, 0);
viu_dev->std = V4L2_STD_NTSC_M;
viu_dev->first = 1;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index b07a251e8857..7b7250b1cff8 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* The Marvell camera core. This device appears in a number of settings,
* so it needs platform-specific support outside of the core.
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index beb339f5561f..ad8955f9f0a1 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Marvell camera core structures.
*
diff --git a/drivers/media/platform/mtk-mdp/Makefile b/drivers/media/platform/mtk-mdp/Makefile
index f8025699af99..5982d65c9971 100644
--- a/drivers/media/platform/mtk-mdp/Makefile
+++ b/drivers/media/platform/mtk-mdp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
mtk-mdp-y += mtk_mdp_core.o
mtk-mdp-y += mtk_mdp_comp.o
mtk-mdp-y += mtk_mdp_m2m.o
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index 852d9697ccfa..37b94b555fa1 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
mtk-vcodec-enc.o \
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 4d29860d27b4..6f1b0c799e58 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -1004,11 +1004,12 @@ static int omap_vout_open(struct file *file)
struct omap_vout_device *vout = NULL;
vout = video_drvdata(file);
- v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
if (vout == NULL)
return -ENODEV;
+ v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
+
/* for now, we only support single open */
if (vout->opened)
return -EBUSY;
diff --git a/drivers/media/platform/omap3isp/Makefile b/drivers/media/platform/omap3isp/Makefile
index 254975a9174e..56e99b4f7d23 100644
--- a/drivers/media/platform/omap3isp/Makefile
+++ b/drivers/media/platform/omap3isp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for OMAP3 ISP driver
ccflags-$(CONFIG_VIDEO_OMAP3_DEBUG) += -DDEBUG
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 1a428fe9f070..b7ff3842afc0 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1669,8 +1669,8 @@ static int isp_link_entity(
break;
}
if (i == entity->num_pads) {
- dev_err(isp->dev, "%s: no source pad in external entity\n",
- __func__);
+ dev_err(isp->dev, "%s: no source pad in external entity %s\n",
+ __func__, entity->name);
return -EINVAL;
}
@@ -2001,6 +2001,7 @@ static int isp_remove(struct platform_device *pdev)
__omap3isp_put(isp, false);
media_entity_enum_cleanup(&isp->crashed);
+ v4l2_async_notifier_cleanup(&isp->notifier);
return 0;
}
@@ -2011,44 +2012,41 @@ enum isp_of_phy {
ISP_OF_PHY_CSIPHY2,
};
-static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
- struct isp_async_subdev *isd)
+static int isp_fwnode_parse(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
{
+ struct isp_async_subdev *isd =
+ container_of(asd, struct isp_async_subdev, asd);
struct isp_bus_cfg *buscfg = &isd->bus;
- struct v4l2_fwnode_endpoint vep;
- unsigned int i;
- int ret;
bool csi1 = false;
-
- ret = v4l2_fwnode_endpoint_parse(fwnode, &vep);
- if (ret)
- return ret;
+ unsigned int i;
dev_dbg(dev, "parsing endpoint %pOF, interface %u\n",
- to_of_node(fwnode), vep.base.port);
+ to_of_node(vep->base.local_fwnode), vep->base.port);
- switch (vep.base.port) {
+ switch (vep->base.port) {
case ISP_OF_PHY_PARALLEL:
buscfg->interface = ISP_INTERFACE_PARALLEL;
buscfg->bus.parallel.data_lane_shift =
- vep.bus.parallel.data_shift;
+ vep->bus.parallel.data_shift;
buscfg->bus.parallel.clk_pol =
- !!(vep.bus.parallel.flags
+ !!(vep->bus.parallel.flags
& V4L2_MBUS_PCLK_SAMPLE_FALLING);
buscfg->bus.parallel.hs_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ !!(vep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
buscfg->bus.parallel.vs_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
+ !!(vep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
buscfg->bus.parallel.fld_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
+ !!(vep->bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
buscfg->bus.parallel.data_pol =
- !!(vep.bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
- buscfg->bus.parallel.bt656 = vep.bus_type == V4L2_MBUS_BT656;
+ !!(vep->bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
+ buscfg->bus.parallel.bt656 = vep->bus_type == V4L2_MBUS_BT656;
break;
case ISP_OF_PHY_CSIPHY1:
case ISP_OF_PHY_CSIPHY2:
- switch (vep.bus_type) {
+ switch (vep->bus_type) {
case V4L2_MBUS_CCP2:
case V4L2_MBUS_CSI1:
dev_dbg(dev, "CSI-1/CCP-2 configuration\n");
@@ -2060,11 +2058,11 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
break;
default:
dev_err(dev, "unsupported bus type %u\n",
- vep.bus_type);
+ vep->bus_type);
return -EINVAL;
}
- switch (vep.base.port) {
+ switch (vep->base.port) {
case ISP_OF_PHY_CSIPHY1:
if (csi1)
buscfg->interface = ISP_INTERFACE_CCP2B_PHY1;
@@ -2080,47 +2078,47 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
}
if (csi1) {
buscfg->bus.ccp2.lanecfg.clk.pos =
- vep.bus.mipi_csi1.clock_lane;
+ vep->bus.mipi_csi1.clock_lane;
buscfg->bus.ccp2.lanecfg.clk.pol =
- vep.bus.mipi_csi1.lane_polarity[0];
+ vep->bus.mipi_csi1.lane_polarity[0];
dev_dbg(dev, "clock lane polarity %u, pos %u\n",
buscfg->bus.ccp2.lanecfg.clk.pol,
buscfg->bus.ccp2.lanecfg.clk.pos);
buscfg->bus.ccp2.lanecfg.data[0].pos =
- vep.bus.mipi_csi1.data_lane;
+ vep->bus.mipi_csi1.data_lane;
buscfg->bus.ccp2.lanecfg.data[0].pol =
- vep.bus.mipi_csi1.lane_polarity[1];
+ vep->bus.mipi_csi1.lane_polarity[1];
dev_dbg(dev, "data lane polarity %u, pos %u\n",
buscfg->bus.ccp2.lanecfg.data[0].pol,
buscfg->bus.ccp2.lanecfg.data[0].pos);
buscfg->bus.ccp2.strobe_clk_pol =
- vep.bus.mipi_csi1.clock_inv;
- buscfg->bus.ccp2.phy_layer = vep.bus.mipi_csi1.strobe;
+ vep->bus.mipi_csi1.clock_inv;
+ buscfg->bus.ccp2.phy_layer = vep->bus.mipi_csi1.strobe;
buscfg->bus.ccp2.ccp2_mode =
- vep.bus_type == V4L2_MBUS_CCP2;
+ vep->bus_type == V4L2_MBUS_CCP2;
buscfg->bus.ccp2.vp_clk_pol = 1;
buscfg->bus.ccp2.crc = 1;
} else {
buscfg->bus.csi2.lanecfg.clk.pos =
- vep.bus.mipi_csi2.clock_lane;
+ vep->bus.mipi_csi2.clock_lane;
buscfg->bus.csi2.lanecfg.clk.pol =
- vep.bus.mipi_csi2.lane_polarities[0];
+ vep->bus.mipi_csi2.lane_polarities[0];
dev_dbg(dev, "clock lane polarity %u, pos %u\n",
buscfg->bus.csi2.lanecfg.clk.pol,
buscfg->bus.csi2.lanecfg.clk.pos);
buscfg->bus.csi2.num_data_lanes =
- vep.bus.mipi_csi2.num_data_lanes;
+ vep->bus.mipi_csi2.num_data_lanes;
for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) {
buscfg->bus.csi2.lanecfg.data[i].pos =
- vep.bus.mipi_csi2.data_lanes[i];
+ vep->bus.mipi_csi2.data_lanes[i];
buscfg->bus.csi2.lanecfg.data[i].pol =
- vep.bus.mipi_csi2.lane_polarities[i + 1];
+ vep->bus.mipi_csi2.lane_polarities[i + 1];
dev_dbg(dev,
"data lane %u polarity %u, pos %u\n", i,
buscfg->bus.csi2.lanecfg.data[i].pol,
@@ -2137,57 +2135,13 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
default:
dev_warn(dev, "%pOF: invalid interface %u\n",
- to_of_node(fwnode), vep.base.port);
+ to_of_node(vep->base.local_fwnode), vep->base.port);
return -EINVAL;
}
return 0;
}
-static int isp_fwnodes_parse(struct device *dev,
- struct v4l2_async_notifier *notifier)
-{
- struct fwnode_handle *fwnode = NULL;
-
- notifier->subdevs = devm_kcalloc(
- dev, ISP_MAX_SUBDEVS, sizeof(*notifier->subdevs), GFP_KERNEL);
- if (!notifier->subdevs)
- return -ENOMEM;
-
- while (notifier->num_subdevs < ISP_MAX_SUBDEVS &&
- (fwnode = fwnode_graph_get_next_endpoint(
- of_fwnode_handle(dev->of_node), fwnode))) {
- struct isp_async_subdev *isd;
-
- isd = devm_kzalloc(dev, sizeof(*isd), GFP_KERNEL);
- if (!isd)
- goto error;
-
- if (isp_fwnode_parse(dev, fwnode, isd)) {
- devm_kfree(dev, isd);
- continue;
- }
-
- notifier->subdevs[notifier->num_subdevs] = &isd->asd;
-
- isd->asd.match.fwnode.fwnode =
- fwnode_graph_get_remote_port_parent(fwnode);
- if (!isd->asd.match.fwnode.fwnode) {
- dev_warn(dev, "bad remote port parent\n");
- goto error;
- }
-
- isd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
- notifier->num_subdevs++;
- }
-
- return notifier->num_subdevs;
-
-error:
- fwnode_handle_put(fwnode);
- return -EINVAL;
-}
-
static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
{
struct isp_device *isp = container_of(async, struct isp_device,
@@ -2201,7 +2155,7 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
return ret;
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
- if (!sd->asd)
+ if (sd->notifier != &isp->notifier)
continue;
ret = isp_link_entity(isp, &sd->entity,
@@ -2217,6 +2171,10 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
return media_device_register(&isp->media_dev);
}
+static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = {
+ .complete = isp_subdev_notifier_complete,
+};
+
/*
* isp_probe - Probe ISP platform device
* @pdev: Pointer to ISP platform device
@@ -2256,15 +2214,17 @@ static int isp_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = isp_fwnodes_parse(&pdev->dev, &isp->notifier);
- if (ret < 0)
- return ret;
-
isp->autoidle = autoidle;
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(
+ &pdev->dev, &isp->notifier, sizeof(struct isp_async_subdev),
+ isp_fwnode_parse);
+ if (ret < 0)
+ goto error;
+
isp->dev = &pdev->dev;
isp->ref_count = 0;
@@ -2385,7 +2345,7 @@ static int isp_probe(struct platform_device *pdev)
if (ret < 0)
goto error_register_entities;
- isp->notifier.complete = isp_subdev_notifier_complete;
+ isp->notifier.ops = &isp_subdev_notifier_ops;
ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
if (ret)
@@ -2406,6 +2366,7 @@ error_isp:
isp_xclk_cleanup(isp);
__omap3isp_put(isp, false);
error:
+ v4l2_async_notifier_cleanup(&isp->notifier);
mutex_destroy(&isp->isp_mutex);
return ret;
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index e528df6efc09..8b9043db94b3 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -220,14 +220,11 @@ struct isp_device {
unsigned int sbl_resources;
unsigned int subclk_resources;
-
-#define ISP_MAX_SUBDEVS 8
- struct v4l2_subdev *subdevs[ISP_MAX_SUBDEVS];
};
struct isp_async_subdev {
- struct isp_bus_cfg bus;
struct v4l2_async_subdev asd;
+ struct isp_bus_cfg bus;
};
#define v4l2_subdev_to_bus_cfg(sd) \
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index edca993c2b1f..9d3f0cb1d95a 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2221,6 +2221,11 @@ static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
mutex_unlock(&pcdev->mlock);
}
+static const struct v4l2_async_notifier_operations pxa_camera_sensor_ops = {
+ .bound = pxa_camera_sensor_bound,
+ .unbind = pxa_camera_sensor_unbind,
+};
+
/*
* Driver probe, remove, suspend and resume operations
*/
@@ -2489,8 +2494,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->asds[0] = &pcdev->asd;
pcdev->notifier.subdevs = pcdev->asds;
pcdev->notifier.num_subdevs = 1;
- pcdev->notifier.bound = pxa_camera_sensor_bound;
- pcdev->notifier.unbind = pxa_camera_sensor_unbind;
+ pcdev->notifier.ops = &pxa_camera_sensor_ops;
if (!of_have_populated_dt())
pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C;
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
index b22d2dfcd3c2..55232a912950 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
@@ -622,6 +622,9 @@ static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
+ } else {
+ /* On current devices output->wm_num is always <= 2 */
+ break;
}
if (output->wm_idx[i] % 2 == 1)
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.c b/drivers/media/platform/qcom/camss-8x16/camss-video.c
index cf4219e871bd..ffaa2849e0c1 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-video.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-video.c
@@ -21,7 +21,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
-#include <media/videobuf-core.h>
#include <media/videobuf2-dma-sg.h>
#include "camss-video.h"
diff --git a/drivers/media/platform/qcom/camss-8x16/camss.c b/drivers/media/platform/qcom/camss-8x16/camss.c
index a3760b5dd1d1..390a42c17b66 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss.c
@@ -601,6 +601,11 @@ static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async)
return media_device_register(&camss->media_dev);
}
+static const struct v4l2_async_notifier_operations camss_subdev_notifier_ops = {
+ .bound = camss_subdev_notifier_bound,
+ .complete = camss_subdev_notifier_complete,
+};
+
static const struct media_device_ops camss_media_ops = {
.link_notify = v4l2_pipeline_link_notify,
};
@@ -655,8 +660,7 @@ static int camss_probe(struct platform_device *pdev)
goto err_register_entities;
if (camss->notifier.num_subdevs) {
- camss->notifier.bound = camss_subdev_notifier_bound;
- camss->notifier.complete = camss_subdev_notifier_complete;
+ camss->notifier.ops = &camss_subdev_notifier_ops;
ret = v4l2_async_notifier_register(&camss->v4l2_dev,
&camss->notifier);
diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile
index 0fe9afb83697..bfd4edf7c83f 100644
--- a/drivers/media/platform/qcom/venus/Makefile
+++ b/drivers/media/platform/qcom/venus/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for Qualcomm Venus driver
venus-core-objs += core.o helpers.o firmware.o \
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index cba092bcb76d..a0fe80df0cbd 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -194,7 +194,6 @@ struct venus_buffer {
* @fh: a holder of v4l file handle structure
* @streamon_cap: stream on flag for capture queue
* @streamon_out: stream on flag for output queue
- * @cmd_stop: a flag to signal encoder/decoder commands
* @width: current capture width
* @height: current capture height
* @out_width: current output width
@@ -258,7 +257,6 @@ struct venus_inst {
} controls;
struct v4l2_fh fh;
unsigned int streamon_cap, streamon_out;
- bool cmd_stop;
u32 width;
u32 height;
u32 out_width;
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 9b2a401a4891..0ce9559a2924 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -623,13 +623,6 @@ void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
mutex_lock(&inst->lock);
- if (inst->cmd_stop) {
- vbuf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
- inst->cmd_stop = false;
- goto unlock;
- }
-
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
if (!(inst->streamon_out & inst->streamon_cap))
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index c09490876516..1baf78d3c02d 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -88,12 +88,6 @@ unlock:
return ret;
}
-static int core_deinit_wait_atomic_t(atomic_t *p)
-{
- schedule();
- return 0;
-}
-
int hfi_core_deinit(struct venus_core *core, bool blocking)
{
int ret = 0, empty;
@@ -112,7 +106,7 @@ int hfi_core_deinit(struct venus_core *core, bool blocking)
if (!empty) {
mutex_unlock(&core->lock);
- wait_on_atomic_t(&core->insts_count, core_deinit_wait_atomic_t,
+ wait_on_atomic_t(&core->insts_count, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
mutex_lock(&core->lock);
}
@@ -484,6 +478,7 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(hfi_session_process_buf);
irqreturn_t hfi_isr_thread(int irq, void *dev_id)
{
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 1caae8feaa36..734ce11b0ed0 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -344,7 +344,7 @@ static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
desc->attrs = DMA_ATTR_WRITE_COMBINE;
desc->size = ALIGN(size, SZ_4K);
- desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL,
+ desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
desc->attrs);
if (!desc->kva)
return -ENOMEM;
@@ -710,10 +710,8 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
if (ret)
return ret;
- hdev->ifaceq_table.kva = desc.kva;
- hdev->ifaceq_table.da = desc.da;
- hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE;
- offset = hdev->ifaceq_table.size;
+ hdev->ifaceq_table = desc;
+ offset = IFACEQ_TABLE_SIZE;
for (i = 0; i < IFACEQ_NUM; i++) {
queue = &hdev->queues[i];
@@ -755,9 +753,7 @@ static int venus_interface_queues_init(struct venus_hfi_device *hdev)
if (ret) {
hdev->sfr.da = 0;
} else {
- hdev->sfr.da = desc.da;
- hdev->sfr.kva = desc.kva;
- hdev->sfr.size = ALIGNED_SFR_SIZE;
+ hdev->sfr = desc;
sfr = hdev->sfr.kva;
sfr->buf_size = ALIGNED_SFR_SIZE;
}
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index da611a5eb670..c9e9576bb08a 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -469,8 +469,14 @@ static int vdec_subscribe_event(struct v4l2_fh *fh,
static int
vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
- if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ switch (cmd->cmd) {
+ case V4L2_DEC_CMD_STOP:
+ if (cmd->flags & V4L2_DEC_CMD_STOP_TO_BLACK)
+ return -EINVAL;
+ break;
+ default:
return -EINVAL;
+ }
return 0;
}
@@ -479,6 +485,7 @@ static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
struct venus_inst *inst = to_inst(file);
+ struct hfi_frame_data fdata = {0};
int ret;
ret = vdec_try_decoder_cmd(file, fh, cmd);
@@ -486,12 +493,23 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
return ret;
mutex_lock(&inst->lock);
- inst->cmd_stop = true;
- mutex_unlock(&inst->lock);
- hfi_session_flush(inst);
+ /*
+ * Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on decoder
+ * input to signal EOS.
+ */
+ if (!(inst->streamon_out & inst->streamon_cap))
+ goto unlock;
+
+ fdata.buffer_type = HFI_BUFFER_INPUT;
+ fdata.flags |= HFI_BUFFERFLAG_EOS;
+ fdata.device_addr = 0xdeadbeef;
- return 0;
+ ret = hfi_session_process_buf(inst, &fdata);
+
+unlock:
+ mutex_unlock(&inst->lock);
+ return ret;
}
static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
@@ -718,7 +736,6 @@ static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
inst->reconfig = false;
inst->sequence_cap = 0;
inst->sequence_out = 0;
- inst->cmd_stop = false;
ret = vdec_init_session(inst);
if (ret)
@@ -807,11 +824,6 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
- if (inst->cmd_stop) {
- vbuf->flags |= V4L2_BUF_FLAG_LAST;
- inst->cmd_stop = false;
- }
-
if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 6f123a387cf9..3fcf0e9b7b29 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -963,13 +963,12 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
if (!vbuf)
return;
- vb = &vbuf->vb2_buf;
- vb->planes[0].bytesused = bytesused;
- vb->planes[0].data_offset = data_offset;
-
vbuf->flags = flags;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ vb = &vbuf->vb2_buf;
+ vb2_set_plane_payload(vb, 0, bytesused + data_offset);
+ vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
} else {
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 142de447aaaa..108d776f3265 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-fwnode.h>
#include "rcar-vin.h"
@@ -77,14 +78,14 @@ static int rvin_digital_notify_complete(struct v4l2_async_notifier *notifier)
int ret;
/* Verify subdevices mbus format */
- if (!rvin_mbus_supported(&vin->digital)) {
+ if (!rvin_mbus_supported(vin->digital)) {
vin_err(vin, "Unsupported media bus format for %s\n",
- vin->digital.subdev->name);
+ vin->digital->subdev->name);
return -EINVAL;
}
vin_dbg(vin, "Found media bus format for %s: %d\n",
- vin->digital.subdev->name, vin->digital.code);
+ vin->digital->subdev->name, vin->digital->code);
ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
if (ret < 0) {
@@ -103,7 +104,7 @@ static void rvin_digital_notify_unbind(struct v4l2_async_notifier *notifier,
vin_dbg(vin, "unbind digital subdev %s\n", subdev->name);
rvin_v4l2_remove(vin);
- vin->digital.subdev = NULL;
+ vin->digital->subdev = NULL;
}
static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
@@ -120,117 +121,75 @@ static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE);
if (ret < 0)
return ret;
- vin->digital.source_pad = ret;
+ vin->digital->source_pad = ret;
ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK);
- vin->digital.sink_pad = ret < 0 ? 0 : ret;
+ vin->digital->sink_pad = ret < 0 ? 0 : ret;
- vin->digital.subdev = subdev;
+ vin->digital->subdev = subdev;
vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n",
- subdev->name, vin->digital.source_pad,
- vin->digital.sink_pad);
+ subdev->name, vin->digital->source_pad,
+ vin->digital->sink_pad);
return 0;
}
+static const struct v4l2_async_notifier_operations rvin_digital_notify_ops = {
+ .bound = rvin_digital_notify_bound,
+ .unbind = rvin_digital_notify_unbind,
+ .complete = rvin_digital_notify_complete,
+};
+
-static int rvin_digitial_parse_v4l2(struct rvin_dev *vin,
- struct device_node *ep,
- struct v4l2_mbus_config *mbus_cfg)
+static int rvin_digital_parse_v4l2(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
{
- struct v4l2_fwnode_endpoint v4l2_ep;
- int ret;
+ struct rvin_dev *vin = dev_get_drvdata(dev);
+ struct rvin_graph_entity *rvge =
+ container_of(asd, struct rvin_graph_entity, asd);
- ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &v4l2_ep);
- if (ret) {
- vin_err(vin, "Could not parse v4l2 endpoint\n");
- return -EINVAL;
- }
+ if (vep->base.port || vep->base.id)
+ return -ENOTCONN;
- mbus_cfg->type = v4l2_ep.bus_type;
+ rvge->mbus_cfg.type = vep->bus_type;
- switch (mbus_cfg->type) {
+ switch (rvge->mbus_cfg.type) {
case V4L2_MBUS_PARALLEL:
vin_dbg(vin, "Found PARALLEL media bus\n");
- mbus_cfg->flags = v4l2_ep.bus.parallel.flags;
+ rvge->mbus_cfg.flags = vep->bus.parallel.flags;
break;
case V4L2_MBUS_BT656:
vin_dbg(vin, "Found BT656 media bus\n");
- mbus_cfg->flags = 0;
+ rvge->mbus_cfg.flags = 0;
break;
default:
vin_err(vin, "Unknown media bus type\n");
return -EINVAL;
}
- return 0;
-}
-
-static int rvin_digital_graph_parse(struct rvin_dev *vin)
-{
- struct device_node *ep, *np;
- int ret;
-
- vin->digital.asd.match.fwnode.fwnode = NULL;
- vin->digital.subdev = NULL;
-
- /*
- * Port 0 id 0 is local digital input, try to get it.
- * Not all instances can or will have this, that is OK
- */
- ep = of_graph_get_endpoint_by_regs(vin->dev->of_node, 0, 0);
- if (!ep)
- return 0;
-
- np = of_graph_get_remote_port_parent(ep);
- if (!np) {
- vin_err(vin, "No remote parent for digital input\n");
- of_node_put(ep);
- return -EINVAL;
- }
- of_node_put(np);
-
- ret = rvin_digitial_parse_v4l2(vin, ep, &vin->digital.mbus_cfg);
- of_node_put(ep);
- if (ret)
- return ret;
-
- vin->digital.asd.match.fwnode.fwnode = of_fwnode_handle(np);
- vin->digital.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ vin->digital = rvge;
return 0;
}
static int rvin_digital_graph_init(struct rvin_dev *vin)
{
- struct v4l2_async_subdev **subdevs = NULL;
int ret;
- ret = rvin_digital_graph_parse(vin);
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(
+ vin->dev, &vin->notifier,
+ sizeof(struct rvin_graph_entity), rvin_digital_parse_v4l2);
if (ret)
return ret;
- if (!vin->digital.asd.match.fwnode.fwnode) {
- vin_dbg(vin, "No digital subdevice found\n");
+ if (!vin->digital)
return -ENODEV;
- }
-
- /* Register the subdevices notifier. */
- subdevs = devm_kzalloc(vin->dev, sizeof(*subdevs), GFP_KERNEL);
- if (subdevs == NULL)
- return -ENOMEM;
-
- subdevs[0] = &vin->digital.asd;
vin_dbg(vin, "Found digital subdevice %pOF\n",
- to_of_node(subdevs[0]->match.fwnode.fwnode));
-
- vin->notifier.num_subdevs = 1;
- vin->notifier.subdevs = subdevs;
- vin->notifier.bound = rvin_digital_notify_bound;
- vin->notifier.unbind = rvin_digital_notify_unbind;
- vin->notifier.complete = rvin_digital_notify_complete;
+ to_of_node(vin->digital->asd.match.fwnode.fwnode));
+ vin->notifier.ops = &rvin_digital_notify_ops;
ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
@@ -290,6 +249,8 @@ static int rcar_vin_probe(struct platform_device *pdev)
if (ret)
return ret;
+ platform_set_drvdata(pdev, vin);
+
ret = rvin_digital_graph_init(vin);
if (ret < 0)
goto error;
@@ -297,11 +258,10 @@ static int rcar_vin_probe(struct platform_device *pdev)
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
- platform_set_drvdata(pdev, vin);
-
return 0;
error:
rvin_dma_remove(vin);
+ v4l2_async_notifier_cleanup(&vin->notifier);
return ret;
}
@@ -313,6 +273,7 @@ static int rcar_vin_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
v4l2_async_notifier_unregister(&vin->notifier);
+ v4l2_async_notifier_cleanup(&vin->notifier);
rvin_dma_remove(vin);
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index b136844499f6..23fdff7a7370 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -183,7 +183,7 @@ static int rvin_setup(struct rvin_dev *vin)
/*
* Input interface
*/
- switch (vin->digital.code) {
+ switch (vin->digital->code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
/* BT.601/BT.1358 16bit YCbCr422 */
vnmc |= VNMC_INF_YUV16;
@@ -191,7 +191,7 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
- vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
+ vnmc |= vin->digital->mbus_cfg.type == V4L2_MBUS_BT656 ?
VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
input_is_yuv = true;
break;
@@ -200,7 +200,7 @@ static int rvin_setup(struct rvin_dev *vin)
break;
case MEDIA_BUS_FMT_UYVY10_2X10:
/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
- vnmc |= vin->digital.mbus_cfg.type == V4L2_MBUS_BT656 ?
+ vnmc |= vin->digital->mbus_cfg.type == V4L2_MBUS_BT656 ?
VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
input_is_yuv = true;
break;
@@ -212,11 +212,11 @@ static int rvin_setup(struct rvin_dev *vin)
dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1);
/* Hsync Signal Polarity Select */
- if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ if (!(vin->digital->mbus_cfg.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_HPS;
/* Vsync Signal Polarity Select */
- if (!(vin->digital.mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ if (!(vin->digital->mbus_cfg.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
dmr2 |= VNDMR2_VPS;
/*
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index dd37ea811680..b479b882da12 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -111,7 +111,7 @@ static int rvin_reset_format(struct rvin_dev *vin)
struct v4l2_mbus_framefmt *mf = &fmt.format;
int ret;
- fmt.pad = vin->digital.source_pad;
+ fmt.pad = vin->digital->source_pad;
ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
if (ret)
@@ -172,13 +172,13 @@ static int __rvin_try_format_source(struct rvin_dev *vin,
sd = vin_to_source(vin);
- v4l2_fill_mbus_format(&format.format, pix, vin->digital.code);
+ v4l2_fill_mbus_format(&format.format, pix, vin->digital->code);
pad_cfg = v4l2_subdev_alloc_pad_config(sd);
if (pad_cfg == NULL)
return -ENOMEM;
- format.pad = vin->digital.source_pad;
+ format.pad = vin->digital->source_pad;
field = pix->field;
@@ -555,7 +555,7 @@ static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
if (timings->pad)
return -EINVAL;
- timings->pad = vin->digital.sink_pad;
+ timings->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
@@ -607,7 +607,7 @@ static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
if (cap->pad)
return -EINVAL;
- cap->pad = vin->digital.sink_pad;
+ cap->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
@@ -625,7 +625,7 @@ static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital.sink_pad;
+ edid->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, get_edid, edid);
@@ -643,7 +643,7 @@ static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
if (edid->pad)
return -EINVAL;
- edid->pad = vin->digital.sink_pad;
+ edid->pad = vin->digital->sink_pad;
ret = v4l2_subdev_call(sd, pad, set_edid, edid);
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index 9bfb5a7c4dc4..5382078143fb 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -126,7 +126,7 @@ struct rvin_dev {
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
- struct rvin_graph_entity digital;
+ struct rvin_graph_entity *digital;
struct mutex lock;
struct vb2_queue queue;
@@ -145,7 +145,7 @@ struct rvin_dev {
struct v4l2_rect compose;
};
-#define vin_to_source(vin) vin->digital.subdev
+#define vin_to_source(vin) ((vin)->digital->subdev)
/* Debug */
#define vin_dbg(d, fmt, arg...) dev_dbg(d->dev, fmt, ##arg)
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 522364ff0d5d..63c94f4028a7 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -630,7 +630,7 @@ static int rcar_drif_enable_rx(struct rcar_drif_sdr *sdr)
{
unsigned int i;
u32 ctr;
- int ret;
+ int ret = -EINVAL;
/*
* When both internal channels are enabled, they can be synchronized
@@ -1185,6 +1185,12 @@ error:
return ret;
}
+static const struct v4l2_async_notifier_operations rcar_drif_notify_ops = {
+ .bound = rcar_drif_notify_bound,
+ .unbind = rcar_drif_notify_unbind,
+ .complete = rcar_drif_notify_complete,
+};
+
/* Read endpoint properties */
static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr,
struct fwnode_handle *fwnode)
@@ -1347,9 +1353,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
if (ret)
goto error;
- sdr->notifier.bound = rcar_drif_notify_bound;
- sdr->notifier.unbind = rcar_drif_notify_unbind;
- sdr->notifier.complete = rcar_drif_notify_complete;
+ sdr->notifier.ops = &rcar_drif_notify_ops;
/* Register notifier */
ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
diff --git a/drivers/media/platform/rockchip/rga/Makefile b/drivers/media/platform/rockchip/rga/Makefile
new file mode 100644
index 000000000000..92fe25490ccd
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/Makefile
@@ -0,0 +1,3 @@
+rockchip-rga-objs := rga.o rga-hw.o rga-buf.o
+
+obj-$(CONFIG_VIDEO_ROCKCHIP_RGA) += rockchip-rga.o
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
new file mode 100644
index 000000000000..49cacc7a48d1
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+static int
+rga_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vq);
+ struct rga_frame *f = rga_get_frame(ctx, vq->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ if (*nplanes)
+ return sizes[0] < f->size ? -EINVAL : 0;
+
+ sizes[0] = f->size;
+ *nplanes = 1;
+
+ return 0;
+}
+
+static int rga_buf_prepare(struct vb2_buffer *vb)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
+
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ vb2_set_plane_payload(vb, 0, f->size);
+
+ return 0;
+}
+
+static void rga_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct rockchip_rga *rga = ctx->rga;
+ int ret, i;
+
+ ret = pm_runtime_get_sync(rga->dev);
+
+ if (!ret)
+ return 0;
+
+ for (i = 0; i < q->num_buffers; ++i) {
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
+ v4l2_m2m_buf_done(to_vb2_v4l2_buffer(q->bufs[i]),
+ VB2_BUF_STATE_QUEUED);
+ }
+ }
+
+ return ret;
+}
+
+static void rga_buf_stop_streaming(struct vb2_queue *q)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(q);
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+
+ pm_runtime_put(rga->dev);
+}
+
+const struct vb2_ops rga_qops = {
+ .queue_setup = rga_queue_setup,
+ .buf_prepare = rga_buf_prepare,
+ .buf_queue = rga_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = rga_buf_start_streaming,
+ .stop_streaming = rga_buf_stop_streaming,
+};
+
+/* RGA MMU is a 1-Level MMU, so it can't be used through the IOMMU API.
+ * We use it more like a scatter-gather list.
+ */
+void rga_buf_map(struct vb2_buffer *vb)
+{
+ struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct rockchip_rga *rga = ctx->rga;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ unsigned int *pages;
+ unsigned int address, len, i, p;
+ unsigned int mapped_size = 0;
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ pages = rga->src_mmu_pages;
+ else
+ pages = rga->dst_mmu_pages;
+
+ /* Create local MMU table for RGA */
+ sgt = vb2_plane_cookie(vb, 0);
+
+ for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;
+ address = sg_phys(sgl);
+
+ for (p = 0; p < len; p++) {
+ dma_addr_t phys = address + (p << PAGE_SHIFT);
+
+ pages[mapped_size + p] = phys;
+ }
+
+ mapped_size += len;
+ }
+
+ /* sync local MMU table for RGA */
+ dma_sync_single_for_device(rga->dev, virt_to_phys(pages),
+ 8 * PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
new file mode 100644
index 000000000000..96d1b1b3fe8e
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+enum e_rga_start_pos {
+ LT = 0,
+ LB = 1,
+ RT = 2,
+ RB = 3,
+};
+
+struct rga_addr_offset {
+ unsigned int y_off;
+ unsigned int u_off;
+ unsigned int v_off;
+};
+
+struct rga_corners_addr_offset {
+ struct rga_addr_offset left_top;
+ struct rga_addr_offset right_top;
+ struct rga_addr_offset left_bottom;
+ struct rga_addr_offset right_bottom;
+};
+
+static unsigned int rga_get_scaling(unsigned int src, unsigned int dst)
+{
+ /*
+ * The rga hw scaling factor is a normalized inverse of the
+ * scaling factor.
+ * For example: When source width is 100 and destination width is 200
+ * (scaling of 2x), then the hw factor is NC * 100 / 200.
+ * The normalization factor (NC) is 2^16 = 0x10000.
+ */
+
+ return (src > dst) ? ((dst << 16) / src) : ((src << 16) / dst);
+}
+
+static struct rga_corners_addr_offset
+rga_get_addr_offset(struct rga_frame *frm, unsigned int x, unsigned int y,
+ unsigned int w, unsigned int h)
+{
+ struct rga_corners_addr_offset offsets;
+ struct rga_addr_offset *lt, *lb, *rt, *rb;
+ unsigned int x_div = 0,
+ y_div = 0, uv_stride = 0, pixel_width = 0, uv_factor = 0;
+
+ lt = &offsets.left_top;
+ lb = &offsets.left_bottom;
+ rt = &offsets.right_top;
+ rb = &offsets.right_bottom;
+
+ x_div = frm->fmt->x_div;
+ y_div = frm->fmt->y_div;
+ uv_factor = frm->fmt->uv_factor;
+ uv_stride = frm->stride / x_div;
+ pixel_width = frm->stride / frm->width;
+
+ lt->y_off = y * frm->stride + x * pixel_width;
+ lt->u_off =
+ frm->width * frm->height + (y / y_div) * uv_stride + x / x_div;
+ lt->v_off = lt->u_off + frm->width * frm->height / uv_factor;
+
+ lb->y_off = lt->y_off + (h - 1) * frm->stride;
+ lb->u_off = lt->u_off + (h / y_div - 1) * uv_stride;
+ lb->v_off = lt->v_off + (h / y_div - 1) * uv_stride;
+
+ rt->y_off = lt->y_off + (w - 1) * pixel_width;
+ rt->u_off = lt->u_off + w / x_div - 1;
+ rt->v_off = lt->v_off + w / x_div - 1;
+
+ rb->y_off = lb->y_off + (w - 1) * pixel_width;
+ rb->u_off = lb->u_off + w / x_div - 1;
+ rb->v_off = lb->v_off + w / x_div - 1;
+
+ return offsets;
+}
+
+static struct rga_addr_offset *rga_lookup_draw_pos(struct
+ rga_corners_addr_offset
+ * offsets, u32 rotate_mode,
+ u32 mirr_mode)
+{
+ static enum e_rga_start_pos rot_mir_point_matrix[4][4] = {
+ {
+ LT, RT, LB, RB,
+ },
+ {
+ RT, LT, RB, LB,
+ },
+ {
+ RB, LB, RT, LT,
+ },
+ {
+ LB, RB, LT, RT,
+ },
+ };
+
+ if (!offsets)
+ return NULL;
+
+ switch (rot_mir_point_matrix[rotate_mode][mirr_mode]) {
+ case LT:
+ return &offsets->left_top;
+ case LB:
+ return &offsets->left_bottom;
+ case RT:
+ return &offsets->right_top;
+ case RB:
+ return &offsets->right_bottom;
+ }
+
+ return NULL;
+}
+
+static void rga_cmd_set_src_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7;
+}
+
+static void rga_cmd_set_src1_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7 << 4;
+}
+
+static void rga_cmd_set_dst_addr(struct rga_ctx *ctx, void *mmu_pages)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int reg;
+
+ reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
+ dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
+
+ reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
+ dest[reg >> 2] |= 0x7 << 8;
+}
+
+static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ unsigned int scale_dst_w, scale_dst_h;
+ unsigned int src_h, src_w, src_x, src_y, dst_h, dst_w, dst_x, dst_y;
+ union rga_src_info src_info;
+ union rga_dst_info dst_info;
+ union rga_src_x_factor x_factor;
+ union rga_src_y_factor y_factor;
+ union rga_src_vir_info src_vir_info;
+ union rga_src_act_info src_act_info;
+ union rga_dst_vir_info dst_vir_info;
+ union rga_dst_act_info dst_act_info;
+
+ struct rga_addr_offset *dst_offset;
+ struct rga_corners_addr_offset offsets;
+ struct rga_corners_addr_offset src_offsets;
+
+ src_h = ctx->in.crop.height;
+ src_w = ctx->in.crop.width;
+ src_x = ctx->in.crop.left;
+ src_y = ctx->in.crop.top;
+ dst_h = ctx->out.crop.height;
+ dst_w = ctx->out.crop.width;
+ dst_x = ctx->out.crop.left;
+ dst_y = ctx->out.crop.top;
+
+ src_info.val = dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_info.val = dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2];
+ x_factor.val = dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2];
+ y_factor.val = dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2];
+ src_vir_info.val = dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
+ src_act_info.val = dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_vir_info.val = dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
+ dst_act_info.val = dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
+
+ src_info.data.format = ctx->in.fmt->hw_format;
+ src_info.data.swap = ctx->in.fmt->color_swap;
+ dst_info.data.format = ctx->out.fmt->hw_format;
+ dst_info.data.swap = ctx->out.fmt->color_swap;
+
+ if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
+ if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) {
+ switch (ctx->in.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ src_info.data.csc_mode =
+ RGA_SRC_CSC_MODE_BT709_R0;
+ break;
+ default:
+ src_info.data.csc_mode =
+ RGA_SRC_CSC_MODE_BT601_R0;
+ break;
+ }
+ }
+ }
+
+ if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
+ switch (ctx->out.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
+ break;
+ default:
+ dst_info.data.csc_mode = RGA_DST_CSC_MODE_BT601_R0;
+ break;
+ }
+ }
+
+ if (ctx->vflip)
+ src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_X;
+
+ if (ctx->hflip)
+ src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_Y;
+
+ switch (ctx->rotate) {
+ case 90:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_90_DEGREE;
+ break;
+ case 180:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_180_DEGREE;
+ break;
+ case 270:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_270_DEGREE;
+ break;
+ default:
+ src_info.data.rot_mode = RGA_SRC_ROT_MODE_0_DEGREE;
+ break;
+ }
+
+ /*
+ * Cacluate the up/down scaling mode/factor.
+ *
+ * RGA used to scale the picture first, and then rotate second,
+ * so we need to swap the w/h when rotate degree is 90/270.
+ */
+ if (src_info.data.rot_mode == RGA_SRC_ROT_MODE_90_DEGREE ||
+ src_info.data.rot_mode == RGA_SRC_ROT_MODE_270_DEGREE) {
+ if (rga->version.major == 0 || rga->version.minor == 0) {
+ if (dst_w == src_h)
+ src_h -= 8;
+ if (abs(src_w - dst_h) < 16)
+ src_w -= 16;
+ }
+
+ scale_dst_h = dst_w;
+ scale_dst_w = dst_h;
+ } else {
+ scale_dst_w = dst_w;
+ scale_dst_h = dst_h;
+ }
+
+ if (src_w == scale_dst_w) {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_NO;
+ x_factor.val = 0;
+ } else if (src_w > scale_dst_w) {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_DOWN;
+ x_factor.data.down_scale_factor =
+ rga_get_scaling(src_w, scale_dst_w) + 1;
+ } else {
+ src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_UP;
+ x_factor.data.up_scale_factor =
+ rga_get_scaling(src_w - 1, scale_dst_w - 1);
+ }
+
+ if (src_h == scale_dst_h) {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_NO;
+ y_factor.val = 0;
+ } else if (src_h > scale_dst_h) {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_DOWN;
+ y_factor.data.down_scale_factor =
+ rga_get_scaling(src_h, scale_dst_h) + 1;
+ } else {
+ src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_UP;
+ y_factor.data.up_scale_factor =
+ rga_get_scaling(src_h - 1, scale_dst_h - 1);
+ }
+
+ /*
+ * Cacluate the framebuffer virtual strides and active size,
+ * note that the step of vir_stride / vir_width is 4 byte words
+ */
+ src_vir_info.data.vir_stride = ctx->in.stride >> 2;
+ src_vir_info.data.vir_width = ctx->in.stride >> 2;
+
+ src_act_info.data.act_height = src_h - 1;
+ src_act_info.data.act_width = src_w - 1;
+
+ dst_vir_info.data.vir_stride = ctx->out.stride >> 2;
+ dst_act_info.data.act_height = dst_h - 1;
+ dst_act_info.data.act_width = dst_w - 1;
+
+ /*
+ * Cacluate the source framebuffer base address with offset pixel.
+ */
+ src_offsets = rga_get_addr_offset(&ctx->in, src_x, src_y,
+ src_w, src_h);
+
+ /*
+ * Configure the dest framebuffer base address with pixel offset.
+ */
+ offsets = rga_get_addr_offset(&ctx->out, dst_x, dst_y, dst_w, dst_h);
+ dst_offset = rga_lookup_draw_pos(&offsets, src_info.data.rot_mode,
+ src_info.data.mir_mode);
+
+ dest[(RGA_SRC_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.y_off;
+ dest[(RGA_SRC_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.u_off;
+ dest[(RGA_SRC_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ src_offsets.left_top.v_off;
+
+ dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2] = x_factor.val;
+ dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2] = y_factor.val;
+ dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = src_vir_info.val;
+ dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = src_act_info.val;
+
+ dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2] = src_info.val;
+
+ dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->y_off;
+ dest[(RGA_DST_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->u_off;
+ dest[(RGA_DST_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
+ dst_offset->v_off;
+
+ dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = dst_vir_info.val;
+ dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = dst_act_info.val;
+
+ dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2] = dst_info.val;
+}
+
+static void rga_cmd_set_mode(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+ u32 *dest = rga->cmdbuf_virt;
+ union rga_mode_ctrl mode;
+ union rga_alpha_ctrl0 alpha_ctrl0;
+ union rga_alpha_ctrl1 alpha_ctrl1;
+
+ mode.val = 0;
+ alpha_ctrl0.val = 0;
+ alpha_ctrl1.val = 0;
+
+ mode.data.gradient_sat = 1;
+ mode.data.render = RGA_MODE_RENDER_BITBLT;
+ mode.data.bitblt = RGA_MODE_BITBLT_MODE_SRC_TO_DST;
+
+ /* disable alpha blending */
+ dest[(RGA_ALPHA_CTRL0 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl0.val;
+ dest[(RGA_ALPHA_CTRL1 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl1.val;
+
+ dest[(RGA_MODE_CTRL - RGA_MODE_BASE_REG) >> 2] = mode.val;
+}
+
+static void rga_cmd_set(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+
+ memset(rga->cmdbuf_virt, 0, RGA_CMDBUF_SIZE * 4);
+
+ rga_cmd_set_src_addr(ctx, rga->src_mmu_pages);
+ /*
+ * Due to hardware bug,
+ * src1 mmu also should be configured when using alpha blending.
+ */
+ rga_cmd_set_src1_addr(ctx, rga->dst_mmu_pages);
+
+ rga_cmd_set_dst_addr(ctx, rga->dst_mmu_pages);
+ rga_cmd_set_mode(ctx);
+
+ rga_cmd_set_trans_info(ctx);
+
+ rga_write(rga, RGA_CMD_BASE, rga->cmdbuf_phy);
+
+ /* sync CMD buf for RGA */
+ dma_sync_single_for_device(rga->dev, rga->cmdbuf_phy,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
+
+void rga_hw_start(struct rockchip_rga *rga)
+{
+ struct rga_ctx *ctx = rga->curr;
+
+ rga_cmd_set(ctx);
+
+ rga_write(rga, RGA_SYS_CTRL, 0x00);
+
+ rga_write(rga, RGA_SYS_CTRL, 0x22);
+
+ rga_write(rga, RGA_INT, 0x600);
+
+ rga_write(rga, RGA_CMD_CTRL, 0x1);
+}
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h
new file mode 100644
index 000000000000..ca3c204abe42
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga-hw.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __RGA_HW_H__
+#define __RGA_HW_H__
+
+#define RGA_CMDBUF_SIZE 0x20
+
+/* Hardware limits */
+#define MAX_WIDTH 8192
+#define MAX_HEIGHT 8192
+
+#define MIN_WIDTH 34
+#define MIN_HEIGHT 34
+
+#define DEFAULT_WIDTH 100
+#define DEFAULT_HEIGHT 100
+
+#define RGA_TIMEOUT 500
+
+/* Registers address */
+#define RGA_SYS_CTRL 0x0000
+#define RGA_CMD_CTRL 0x0004
+#define RGA_CMD_BASE 0x0008
+#define RGA_INT 0x0010
+#define RGA_MMU_CTRL0 0x0014
+#define RGA_VERSION_INFO 0x0028
+
+#define RGA_MODE_BASE_REG 0x0100
+#define RGA_MODE_MAX_REG 0x017C
+
+#define RGA_MODE_CTRL 0x0100
+#define RGA_SRC_INFO 0x0104
+#define RGA_SRC_Y_RGB_BASE_ADDR 0x0108
+#define RGA_SRC_CB_BASE_ADDR 0x010c
+#define RGA_SRC_CR_BASE_ADDR 0x0110
+#define RGA_SRC1_RGB_BASE_ADDR 0x0114
+#define RGA_SRC_VIR_INFO 0x0118
+#define RGA_SRC_ACT_INFO 0x011c
+#define RGA_SRC_X_FACTOR 0x0120
+#define RGA_SRC_Y_FACTOR 0x0124
+#define RGA_SRC_BG_COLOR 0x0128
+#define RGA_SRC_FG_COLOR 0x012c
+#define RGA_SRC_TR_COLOR0 0x0130
+#define RGA_SRC_TR_COLOR1 0x0134
+
+#define RGA_DST_INFO 0x0138
+#define RGA_DST_Y_RGB_BASE_ADDR 0x013c
+#define RGA_DST_CB_BASE_ADDR 0x0140
+#define RGA_DST_CR_BASE_ADDR 0x0144
+#define RGA_DST_VIR_INFO 0x0148
+#define RGA_DST_ACT_INFO 0x014c
+
+#define RGA_ALPHA_CTRL0 0x0150
+#define RGA_ALPHA_CTRL1 0x0154
+#define RGA_FADING_CTRL 0x0158
+#define RGA_PAT_CON 0x015c
+#define RGA_ROP_CON0 0x0160
+#define RGA_ROP_CON1 0x0164
+#define RGA_MASK_BASE 0x0168
+
+#define RGA_MMU_CTRL1 0x016C
+#define RGA_MMU_SRC_BASE 0x0170
+#define RGA_MMU_SRC1_BASE 0x0174
+#define RGA_MMU_DST_BASE 0x0178
+
+/* Registers value */
+#define RGA_MODE_RENDER_BITBLT 0
+#define RGA_MODE_RENDER_COLOR_PALETTE 1
+#define RGA_MODE_RENDER_RECTANGLE_FILL 2
+#define RGA_MODE_RENDER_UPDATE_PALETTE_LUT_RAM 3
+
+#define RGA_MODE_BITBLT_MODE_SRC_TO_DST 0
+#define RGA_MODE_BITBLT_MODE_SRC_SRC1_TO_DST 1
+
+#define RGA_MODE_CF_ROP4_SOLID 0
+#define RGA_MODE_CF_ROP4_PATTERN 1
+
+#define RGA_COLOR_FMT_ABGR8888 0
+#define RGA_COLOR_FMT_XBGR8888 1
+#define RGA_COLOR_FMT_RGB888 2
+#define RGA_COLOR_FMT_BGR565 4
+#define RGA_COLOR_FMT_ABGR1555 5
+#define RGA_COLOR_FMT_ABGR4444 6
+#define RGA_COLOR_FMT_YUV422SP 8
+#define RGA_COLOR_FMT_YUV422P 9
+#define RGA_COLOR_FMT_YUV420SP 10
+#define RGA_COLOR_FMT_YUV420P 11
+/* SRC_COLOR Palette */
+#define RGA_COLOR_FMT_CP_1BPP 12
+#define RGA_COLOR_FMT_CP_2BPP 13
+#define RGA_COLOR_FMT_CP_4BPP 14
+#define RGA_COLOR_FMT_CP_8BPP 15
+#define RGA_COLOR_FMT_MASK 15
+
+#define RGA_COLOR_NONE_SWAP 0
+#define RGA_COLOR_RB_SWAP 1
+#define RGA_COLOR_ALPHA_SWAP 2
+#define RGA_COLOR_UV_SWAP 4
+
+#define RGA_SRC_CSC_MODE_BYPASS 0
+#define RGA_SRC_CSC_MODE_BT601_R0 1
+#define RGA_SRC_CSC_MODE_BT601_R1 2
+#define RGA_SRC_CSC_MODE_BT709_R0 3
+#define RGA_SRC_CSC_MODE_BT709_R1 4
+
+#define RGA_SRC_ROT_MODE_0_DEGREE 0
+#define RGA_SRC_ROT_MODE_90_DEGREE 1
+#define RGA_SRC_ROT_MODE_180_DEGREE 2
+#define RGA_SRC_ROT_MODE_270_DEGREE 3
+
+#define RGA_SRC_MIRR_MODE_NO 0
+#define RGA_SRC_MIRR_MODE_X 1
+#define RGA_SRC_MIRR_MODE_Y 2
+#define RGA_SRC_MIRR_MODE_X_Y 3
+
+#define RGA_SRC_HSCL_MODE_NO 0
+#define RGA_SRC_HSCL_MODE_DOWN 1
+#define RGA_SRC_HSCL_MODE_UP 2
+
+#define RGA_SRC_VSCL_MODE_NO 0
+#define RGA_SRC_VSCL_MODE_DOWN 1
+#define RGA_SRC_VSCL_MODE_UP 2
+
+#define RGA_SRC_TRANS_ENABLE_R 1
+#define RGA_SRC_TRANS_ENABLE_G 2
+#define RGA_SRC_TRANS_ENABLE_B 4
+#define RGA_SRC_TRANS_ENABLE_A 8
+
+#define RGA_SRC_BIC_COE_SELEC_CATROM 0
+#define RGA_SRC_BIC_COE_SELEC_MITCHELL 1
+#define RGA_SRC_BIC_COE_SELEC_HERMITE 2
+#define RGA_SRC_BIC_COE_SELEC_BSPLINE 3
+
+#define RGA_DST_DITHER_MODE_888_TO_666 0
+#define RGA_DST_DITHER_MODE_888_TO_565 1
+#define RGA_DST_DITHER_MODE_888_TO_555 2
+#define RGA_DST_DITHER_MODE_888_TO_444 3
+
+#define RGA_DST_CSC_MODE_BYPASS 0
+#define RGA_DST_CSC_MODE_BT601_R0 1
+#define RGA_DST_CSC_MODE_BT601_R1 2
+#define RGA_DST_CSC_MODE_BT709_R0 3
+
+#define RGA_ALPHA_ROP_MODE_2 0
+#define RGA_ALPHA_ROP_MODE_3 1
+#define RGA_ALPHA_ROP_MODE_4 2
+
+#define RGA_ALPHA_SELECT_ALPHA 0
+#define RGA_ALPHA_SELECT_ROP 1
+
+#define RGA_ALPHA_MASK_BIG_ENDIAN 0
+#define RGA_ALPHA_MASK_LITTLE_ENDIAN 1
+
+#define RGA_ALPHA_NORMAL 0
+#define RGA_ALPHA_REVERSE 1
+
+#define RGA_ALPHA_BLEND_GLOBAL 0
+#define RGA_ALPHA_BLEND_NORMAL 1
+#define RGA_ALPHA_BLEND_MULTIPLY 2
+
+#define RGA_ALPHA_CAL_CUT 0
+#define RGA_ALPHA_CAL_NORMAL 1
+
+#define RGA_ALPHA_FACTOR_ZERO 0
+#define RGA_ALPHA_FACTOR_ONE 1
+#define RGA_ALPHA_FACTOR_OTHER 2
+#define RGA_ALPHA_FACTOR_OTHER_REVERSE 3
+#define RGA_ALPHA_FACTOR_SELF 4
+
+#define RGA_ALPHA_COLOR_NORMAL 0
+#define RGA_ALPHA_COLOR_MULTIPLY_CAL 1
+
+/* Registers union */
+union rga_mode_ctrl {
+ unsigned int val;
+ struct {
+ /* [0:2] */
+ unsigned int render:3;
+ /* [3:6] */
+ unsigned int bitblt:1;
+ unsigned int cf_rop4_pat:1;
+ unsigned int alpha_zero_key:1;
+ unsigned int gradient_sat:1;
+ /* [7:31] */
+ unsigned int reserved:25;
+ } data;
+};
+
+union rga_src_info {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int format:4;
+ /* [4:7] */
+ unsigned int swap:3;
+ unsigned int cp_endian:1;
+ /* [8:17] */
+ unsigned int csc_mode:2;
+ unsigned int rot_mode:2;
+ unsigned int mir_mode:2;
+ unsigned int hscl_mode:2;
+ unsigned int vscl_mode:2;
+ /* [18:22] */
+ unsigned int trans_mode:1;
+ unsigned int trans_enable:4;
+ /* [23:25] */
+ unsigned int dither_up_en:1;
+ unsigned int bic_coe_sel:2;
+ /* [26:31] */
+ unsigned int reserved:6;
+ } data;
+};
+
+union rga_src_vir_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int vir_width:15;
+ unsigned int reserved:1;
+ /* [16:25] */
+ unsigned int vir_stride:10;
+ /* [26:31] */
+ unsigned int reserved1:6;
+ } data;
+};
+
+union rga_src_act_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int act_width:13;
+ unsigned int reserved:3;
+ /* [16:31] */
+ unsigned int act_height:13;
+ unsigned int reserved1:3;
+ } data;
+};
+
+union rga_src_x_factor {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int down_scale_factor:16;
+ /* [16:31] */
+ unsigned int up_scale_factor:16;
+ } data;
+};
+
+union rga_src_y_factor {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int down_scale_factor:16;
+ /* [16:31] */
+ unsigned int up_scale_factor:16;
+ } data;
+};
+
+/* Alpha / Red / Green / Blue */
+union rga_src_cp_gr_color {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int gradient_x:16;
+ /* [16:31] */
+ unsigned int gradient_y:16;
+ } data;
+};
+
+union rga_src_transparency_color0 {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int trans_rmin:8;
+ /* [8:15] */
+ unsigned int trans_gmin:8;
+ /* [16:23] */
+ unsigned int trans_bmin:8;
+ /* [24:31] */
+ unsigned int trans_amin:8;
+ } data;
+};
+
+union rga_src_transparency_color1 {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int trans_rmax:8;
+ /* [8:15] */
+ unsigned int trans_gmax:8;
+ /* [16:23] */
+ unsigned int trans_bmax:8;
+ /* [24:31] */
+ unsigned int trans_amax:8;
+ } data;
+};
+
+union rga_dst_info {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int format:4;
+ /* [4:6] */
+ unsigned int swap:3;
+ /* [7:9] */
+ unsigned int src1_format:3;
+ /* [10:11] */
+ unsigned int src1_swap:2;
+ /* [12:15] */
+ unsigned int dither_up_en:1;
+ unsigned int dither_down_en:1;
+ unsigned int dither_down_mode:2;
+ /* [16:18] */
+ unsigned int csc_mode:2;
+ unsigned int csc_clip:1;
+ /* [19:31] */
+ unsigned int reserved:13;
+ } data;
+};
+
+union rga_dst_vir_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int vir_stride:15;
+ unsigned int reserved:1;
+ /* [16:31] */
+ unsigned int src1_vir_stride:15;
+ unsigned int reserved1:1;
+ } data;
+};
+
+union rga_dst_act_info {
+ unsigned int val;
+ struct {
+ /* [0:15] */
+ unsigned int act_width:12;
+ unsigned int reserved:4;
+ /* [16:31] */
+ unsigned int act_height:12;
+ unsigned int reserved1:4;
+ } data;
+};
+
+union rga_alpha_ctrl0 {
+ unsigned int val;
+ struct {
+ /* [0:3] */
+ unsigned int rop_en:1;
+ unsigned int rop_select:1;
+ unsigned int rop_mode:2;
+ /* [4:11] */
+ unsigned int src_fading_val:8;
+ /* [12:20] */
+ unsigned int dst_fading_val:8;
+ unsigned int mask_endian:1;
+ /* [21:31] */
+ unsigned int reserved:11;
+ } data;
+};
+
+union rga_alpha_ctrl1 {
+ unsigned int val;
+ struct {
+ /* [0:1] */
+ unsigned int dst_color_m0:1;
+ unsigned int src_color_m0:1;
+ /* [2:7] */
+ unsigned int dst_factor_m0:3;
+ unsigned int src_factor_m0:3;
+ /* [8:9] */
+ unsigned int dst_alpha_cal_m0:1;
+ unsigned int src_alpha_cal_m0:1;
+ /* [10:13] */
+ unsigned int dst_blend_m0:2;
+ unsigned int src_blend_m0:2;
+ /* [14:15] */
+ unsigned int dst_alpha_m0:1;
+ unsigned int src_alpha_m0:1;
+ /* [16:21] */
+ unsigned int dst_factor_m1:3;
+ unsigned int src_factor_m1:3;
+ /* [22:23] */
+ unsigned int dst_alpha_cal_m1:1;
+ unsigned int src_alpha_cal_m1:1;
+ /* [24:27] */
+ unsigned int dst_blend_m1:2;
+ unsigned int src_blend_m1:2;
+ /* [28:29] */
+ unsigned int dst_alpha_m1:1;
+ unsigned int src_alpha_m1:1;
+ /* [30:31] */
+ unsigned int reserved:2;
+ } data;
+};
+
+union rga_fading_ctrl {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int fading_offset_r:8;
+ /* [8:15] */
+ unsigned int fading_offset_g:8;
+ /* [16:23] */
+ unsigned int fading_offset_b:8;
+ /* [24:31] */
+ unsigned int fading_en:1;
+ unsigned int reserved:7;
+ } data;
+};
+
+union rga_pat_con {
+ unsigned int val;
+ struct {
+ /* [0:7] */
+ unsigned int width:8;
+ /* [8:15] */
+ unsigned int height:8;
+ /* [16:23] */
+ unsigned int offset_x:8;
+ /* [24:31] */
+ unsigned int offset_y:8;
+ } data;
+};
+
+#endif
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
new file mode 100644
index 000000000000..89296de9cf4a
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -0,0 +1,1010 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "rga-hw.h"
+#include "rga.h"
+
+static int debug;
+module_param(debug, int, 0644);
+
+static void job_abort(void *prv)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+
+ if (!rga->curr) /* No job currently running */
+ return;
+
+ wait_event_timeout(rga->irq_queue,
+ !rga->curr, msecs_to_jiffies(RGA_TIMEOUT));
+}
+
+static void device_run(void *prv)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_buffer *src, *dst;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rga->ctrl_lock, flags);
+
+ rga->curr = ctx;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ rga_buf_map(src);
+ rga_buf_map(dst);
+
+ rga_hw_start(rga);
+
+ spin_unlock_irqrestore(&rga->ctrl_lock, flags);
+}
+
+static irqreturn_t rga_isr(int irq, void *prv)
+{
+ struct rockchip_rga *rga = prv;
+ int intr;
+
+ intr = rga_read(rga, RGA_INT) & 0xf;
+
+ rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
+
+ if (intr & 0x04) {
+ struct vb2_v4l2_buffer *src, *dst;
+ struct rga_ctx *ctx = rga->curr;
+
+ WARN_ON(!ctx);
+
+ rga->curr = NULL;
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ WARN_ON(!src);
+ WARN_ON(!dst);
+
+ dst->timecode = src->timecode;
+ dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
+ dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ dst->flags |= src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+ v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(rga->m2m_dev, ctx->fh.m2m_ctx);
+
+ wake_up(&rga->irq_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct v4l2_m2m_ops rga_m2m_ops = {
+ .device_run = device_run,
+ .job_abort = job_abort,
+};
+
+static int
+queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct rga_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &rga_qops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->rga->mutex;
+ src_vq->dev = ctx->rga->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &rga_qops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->rga->mutex;
+ dst_vq->dev = ctx->rga->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int rga_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rga_ctx *ctx = container_of(ctrl->handler, struct rga_ctx,
+ ctrl_handler);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->rga->ctrl_lock, flags);
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ctx->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ ctx->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ ctx->rotate = ctrl->val;
+ break;
+ case V4L2_CID_BG_COLOR:
+ ctx->fill_color = ctrl->val;
+ break;
+ }
+ spin_unlock_irqrestore(&ctx->rga->ctrl_lock, flags);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops rga_ctrl_ops = {
+ .s_ctrl = rga_s_ctrl,
+};
+
+static int rga_setup_ctrls(struct rga_ctx *ctx)
+{
+ struct rockchip_rga *rga = ctx->rga;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
+ V4L2_CID_BG_COLOR, 0, 0xffffffff, 1, 0);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_err(&rga->v4l2_dev, "%s failed\n", __func__);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return err;
+ }
+
+ return 0;
+}
+
+struct rga_fmt formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XRGB32,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_XBGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .color_swap = RGA_COLOR_ALPHA_SWAP,
+ .hw_format = RGA_COLOR_FMT_XBGR8888,
+ .depth = 32,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_RGB888,
+ .depth = 24,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_RGB888,
+ .depth = 24,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB444,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR4444,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB555,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_ABGR1555,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .color_swap = RGA_COLOR_RB_SWAP,
+ .hw_format = RGA_COLOR_FMT_BGR565,
+ .depth = 16,
+ .uv_factor = 1,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420SP,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422SP,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420SP,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422SP,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420P,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .color_swap = RGA_COLOR_NONE_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV422P,
+ .depth = 16,
+ .uv_factor = 2,
+ .y_div = 1,
+ .x_div = 2,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .color_swap = RGA_COLOR_UV_SWAP,
+ .hw_format = RGA_COLOR_FMT_YUV420P,
+ .depth = 12,
+ .uv_factor = 4,
+ .y_div = 2,
+ .x_div = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+static struct rga_fmt *rga_fmt_find(struct v4l2_format *f)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix.pixelformat)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct rga_frame def_frame = {
+ .width = DEFAULT_WIDTH,
+ .height = DEFAULT_HEIGHT,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+ .crop.left = 0,
+ .crop.top = 0,
+ .crop.width = DEFAULT_WIDTH,
+ .crop.height = DEFAULT_HEIGHT,
+ .fmt = &formats[0],
+};
+
+struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->in;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->out;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static int rga_open(struct file *file)
+{
+ struct rockchip_rga *rga = video_drvdata(file);
+ struct rga_ctx *ctx = NULL;
+ int ret = 0;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->rga = rga;
+ /* Set default formats */
+ ctx->in = def_frame;
+ ctx->out = def_frame;
+
+ if (mutex_lock_interruptible(&rga->mutex)) {
+ kfree(ctx);
+ return -ERESTARTSYS;
+ }
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rga->m2m_dev, ctx, &queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ mutex_unlock(&rga->mutex);
+ kfree(ctx);
+ return ret;
+ }
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ rga_setup_ctrls(ctx);
+
+ /* Write the default values to the ctx struct */
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ mutex_unlock(&rga->mutex);
+
+ return 0;
+}
+
+static int rga_release(struct file *file)
+{
+ struct rga_ctx *ctx =
+ container_of(file->private_data, struct rga_ctx, fh);
+ struct rockchip_rga *rga = ctx->rga;
+
+ mutex_lock(&rga->mutex);
+
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ mutex_unlock(&rga->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations rga_fops = {
+ .owner = THIS_MODULE,
+ .open = rga_open,
+ .release = rga_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int
+vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, RGA_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, "rockchip-rga", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:rga", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+{
+ struct rga_fmt *fmt;
+
+ if (f->index >= NUM_FORMATS)
+ return -EINVAL;
+
+ fmt = &formats[f->index];
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_ctx *ctx = prv;
+ struct vb2_queue *vq;
+ struct rga_frame *frm;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+ frm = rga_get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+
+ f->fmt.pix.width = frm->width;
+ f->fmt.pix.height = frm->height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ f->fmt.pix.pixelformat = frm->fmt->fourcc;
+ f->fmt.pix.bytesperline = frm->stride;
+ f->fmt.pix.sizeimage = frm->size;
+ f->fmt.pix.colorspace = frm->colorspace;
+
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_fmt *fmt;
+
+ fmt = rga_fmt_find(f);
+ if (!fmt) {
+ fmt = &formats[0];
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ if (f->fmt.pix.width > MAX_WIDTH)
+ f->fmt.pix.width = MAX_WIDTH;
+ if (f->fmt.pix.height > MAX_HEIGHT)
+ f->fmt.pix.height = MAX_HEIGHT;
+
+ if (f->fmt.pix.width < MIN_WIDTH)
+ f->fmt.pix.width = MIN_WIDTH;
+ if (f->fmt.pix.height < MIN_HEIGHT)
+ f->fmt.pix.height = MIN_HEIGHT;
+
+ if (fmt->hw_format >= RGA_COLOR_FMT_YUV422SP)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ else
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
+
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * (f->fmt.pix.width * fmt->depth) >> 3;
+
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct vb2_queue *vq;
+ struct rga_frame *frm;
+ struct rga_fmt *fmt;
+ int ret = 0;
+
+ /* Adjust all values accordingly to the hardware capabilities
+ * and chosen format.
+ */
+ ret = vidioc_try_fmt(file, prv, f);
+ if (ret)
+ return ret;
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&rga->v4l2_dev, "queue (%d) bust\n", f->type);
+ return -EBUSY;
+ }
+ frm = rga_get_frame(ctx, f->type);
+ if (IS_ERR(frm))
+ return PTR_ERR(frm);
+ fmt = rga_fmt_find(f);
+ if (!fmt)
+ return -EINVAL;
+ frm->width = f->fmt.pix.width;
+ frm->height = f->fmt.pix.height;
+ frm->size = f->fmt.pix.sizeimage;
+ frm->fmt = fmt;
+ frm->stride = f->fmt.pix.bytesperline;
+ frm->colorspace = f->fmt.pix.colorspace;
+
+ /* Reset crop settings */
+ frm->crop.left = 0;
+ frm->crop.top = 0;
+ frm->crop.width = frm->width;
+ frm->crop.height = frm->height;
+
+ return 0;
+}
+
+static int vidioc_g_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
+{
+ struct rga_ctx *ctx = prv;
+ struct rga_frame *f;
+ bool use_frame = false;
+
+ f = rga_get_frame(ctx, s->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ use_frame = true;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ use_frame = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_frame) {
+ s->r = f->crop;
+ } else {
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = f->width;
+ s->r.height = f->height;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
+{
+ struct rga_ctx *ctx = prv;
+ struct rockchip_rga *rga = ctx->rga;
+ struct rga_frame *f;
+ int ret = 0;
+
+ f = rga_get_frame(ctx, s->type);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ /*
+ * COMPOSE target is only valid for capture buffer type, return
+ * error for output buffer type
+ */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * CROP target is only valid for output buffer type, return
+ * error for capture buffer type
+ */
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ /*
+ * bound and default crop/compose targets are invalid targets to
+ * try/set
+ */
+ default:
+ return -EINVAL;
+ }
+
+ if (s->r.top < 0 || s->r.left < 0) {
+ v4l2_dbg(debug, 1, &rga->v4l2_dev,
+ "doesn't support negative values for top & left.\n");
+ return -EINVAL;
+ }
+
+ if (s->r.left + s->r.width > f->width ||
+ s->r.top + s->r.height > f->height ||
+ s->r.width < MIN_WIDTH || s->r.height < MIN_HEIGHT) {
+ v4l2_dbg(debug, 1, &rga->v4l2_dev, "unsupported crop value.\n");
+ return -EINVAL;
+ }
+
+ f->crop = s->r;
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops rga_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+};
+
+static struct video_device rga_videodev = {
+ .name = "rockchip-rga",
+ .fops = &rga_fops,
+ .ioctl_ops = &rga_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_M2M,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static int rga_enable_clocks(struct rockchip_rga *rga)
+{
+ int ret;
+
+ ret = clk_prepare_enable(rga->sclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(rga->aclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
+ goto err_disable_sclk;
+ }
+
+ ret = clk_prepare_enable(rga->hclk);
+ if (ret) {
+ dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
+ goto err_disable_aclk;
+ }
+
+ return 0;
+
+err_disable_sclk:
+ clk_disable_unprepare(rga->sclk);
+err_disable_aclk:
+ clk_disable_unprepare(rga->aclk);
+
+ return ret;
+}
+
+static void rga_disable_clocks(struct rockchip_rga *rga)
+{
+ clk_disable_unprepare(rga->sclk);
+ clk_disable_unprepare(rga->hclk);
+ clk_disable_unprepare(rga->aclk);
+}
+
+static int rga_parse_dt(struct rockchip_rga *rga)
+{
+ struct reset_control *core_rst, *axi_rst, *ahb_rst;
+
+ core_rst = devm_reset_control_get(rga->dev, "core");
+ if (IS_ERR(core_rst)) {
+ dev_err(rga->dev, "failed to get core reset controller\n");
+ return PTR_ERR(core_rst);
+ }
+
+ axi_rst = devm_reset_control_get(rga->dev, "axi");
+ if (IS_ERR(axi_rst)) {
+ dev_err(rga->dev, "failed to get axi reset controller\n");
+ return PTR_ERR(axi_rst);
+ }
+
+ ahb_rst = devm_reset_control_get(rga->dev, "ahb");
+ if (IS_ERR(ahb_rst)) {
+ dev_err(rga->dev, "failed to get ahb reset controller\n");
+ return PTR_ERR(ahb_rst);
+ }
+
+ reset_control_assert(core_rst);
+ udelay(1);
+ reset_control_deassert(core_rst);
+
+ reset_control_assert(axi_rst);
+ udelay(1);
+ reset_control_deassert(axi_rst);
+
+ reset_control_assert(ahb_rst);
+ udelay(1);
+ reset_control_deassert(ahb_rst);
+
+ rga->sclk = devm_clk_get(rga->dev, "sclk");
+ if (IS_ERR(rga->sclk)) {
+ dev_err(rga->dev, "failed to get sclk clock\n");
+ return PTR_ERR(rga->sclk);
+ }
+
+ rga->aclk = devm_clk_get(rga->dev, "aclk");
+ if (IS_ERR(rga->aclk)) {
+ dev_err(rga->dev, "failed to get aclk clock\n");
+ return PTR_ERR(rga->aclk);
+ }
+
+ rga->hclk = devm_clk_get(rga->dev, "hclk");
+ if (IS_ERR(rga->hclk)) {
+ dev_err(rga->dev, "failed to get hclk clock\n");
+ return PTR_ERR(rga->hclk);
+ }
+
+ return 0;
+}
+
+static int rga_probe(struct platform_device *pdev)
+{
+ struct rockchip_rga *rga;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret = 0;
+ int irq;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
+ if (!rga)
+ return -ENOMEM;
+
+ rga->dev = &pdev->dev;
+ spin_lock_init(&rga->ctrl_lock);
+ mutex_init(&rga->mutex);
+
+ init_waitqueue_head(&rga->irq_queue);
+
+ ret = rga_parse_dt(rga);
+ if (ret)
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+
+ pm_runtime_enable(rga->dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ rga->regs = devm_ioremap_resource(rga->dev, res);
+ if (IS_ERR(rga->regs)) {
+ ret = PTR_ERR(rga->regs);
+ goto err_put_clk;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(rga->dev, "failed to get irq\n");
+ ret = irq;
+ goto err_put_clk;
+ }
+
+ ret = devm_request_irq(rga->dev, irq, rga_isr, 0,
+ dev_name(rga->dev), rga);
+ if (ret < 0) {
+ dev_err(rga->dev, "failed to request irq\n");
+ goto err_put_clk;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &rga->v4l2_dev);
+ if (ret)
+ goto err_put_clk;
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&rga->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto unreg_v4l2_dev;
+ }
+ *vfd = rga_videodev;
+ vfd->lock = &rga->mutex;
+ vfd->v4l2_dev = &rga->v4l2_dev;
+
+ video_set_drvdata(vfd, rga);
+ snprintf(vfd->name, sizeof(vfd->name), "%s", rga_videodev.name);
+ rga->vfd = vfd;
+
+ platform_set_drvdata(pdev, rga);
+ rga->m2m_dev = v4l2_m2m_init(&rga_m2m_ops);
+ if (IS_ERR(rga->m2m_dev)) {
+ v4l2_err(&rga->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(rga->m2m_dev);
+ goto unreg_video_dev;
+ }
+
+ pm_runtime_get_sync(rga->dev);
+
+ rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
+ rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
+
+ v4l2_info(&rga->v4l2_dev, "HW Version: 0x%02x.%02x\n",
+ rga->version.major, rga->version.minor);
+
+ pm_runtime_put(rga->dev);
+
+ /* Create CMD buffer */
+ rga->cmdbuf_virt = dma_alloc_attrs(rga->dev, RGA_CMDBUF_SIZE,
+ &rga->cmdbuf_phy, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+
+ rga->src_mmu_pages =
+ (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
+ rga->dst_mmu_pages =
+ (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
+
+ def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
+ def_frame.size = def_frame.stride * def_frame.height;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(&rga->v4l2_dev, "Failed to register video device\n");
+ goto rel_vdev;
+ }
+
+ v4l2_info(&rga->v4l2_dev, "Registered %s as /dev/%s\n",
+ vfd->name, video_device_node_name(vfd));
+
+ return 0;
+
+rel_vdev:
+ video_device_release(vfd);
+unreg_video_dev:
+ video_unregister_device(rga->vfd);
+unreg_v4l2_dev:
+ v4l2_device_unregister(&rga->v4l2_dev);
+err_put_clk:
+ pm_runtime_disable(rga->dev);
+
+ return ret;
+}
+
+static int rga_remove(struct platform_device *pdev)
+{
+ struct rockchip_rga *rga = platform_get_drvdata(pdev);
+
+ dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, &rga->cmdbuf_virt,
+ rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
+
+ free_pages((unsigned long)rga->src_mmu_pages, 3);
+ free_pages((unsigned long)rga->dst_mmu_pages, 3);
+
+ v4l2_info(&rga->v4l2_dev, "Removing\n");
+
+ v4l2_m2m_release(rga->m2m_dev);
+ video_unregister_device(rga->vfd);
+ v4l2_device_unregister(&rga->v4l2_dev);
+
+ pm_runtime_disable(rga->dev);
+
+ return 0;
+}
+
+static int __maybe_unused rga_runtime_suspend(struct device *dev)
+{
+ struct rockchip_rga *rga = dev_get_drvdata(dev);
+
+ rga_disable_clocks(rga);
+
+ return 0;
+}
+
+static int __maybe_unused rga_runtime_resume(struct device *dev)
+{
+ struct rockchip_rga *rga = dev_get_drvdata(dev);
+
+ return rga_enable_clocks(rga);
+}
+
+static const struct dev_pm_ops rga_pm = {
+ SET_RUNTIME_PM_OPS(rga_runtime_suspend,
+ rga_runtime_resume, NULL)
+};
+
+static const struct of_device_id rockchip_rga_match[] = {
+ {
+ .compatible = "rockchip,rk3288-rga",
+ },
+ {
+ .compatible = "rockchip,rk3399-rga",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_rga_match);
+
+static struct platform_driver rga_pdrv = {
+ .probe = rga_probe,
+ .remove = rga_remove,
+ .driver = {
+ .name = RGA_NAME,
+ .pm = &rga_pm,
+ .of_match_table = rockchip_rga_match,
+ },
+};
+
+module_platform_driver(rga_pdrv);
+
+MODULE_AUTHOR("Jacob Chen <jacob-chen@iotwrt.com>");
+MODULE_DESCRIPTION("Rockchip Raster 2d Graphic Acceleration Unit");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
new file mode 100644
index 000000000000..5d43e7ea88af
--- /dev/null
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Jacob Chen <jacob-chen@iotwrt.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __RGA_H__
+#define __RGA_H__
+
+#include <linux/platform_device.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define RGA_NAME "rockchip-rga"
+
+struct rga_fmt {
+ u32 fourcc;
+ int depth;
+ u8 uv_factor;
+ u8 y_div;
+ u8 x_div;
+ u8 color_swap;
+ u8 hw_format;
+};
+
+struct rga_frame {
+ /* Original dimensions */
+ u32 width;
+ u32 height;
+ u32 colorspace;
+
+ /* Crop */
+ struct v4l2_rect crop;
+
+ /* Image format */
+ struct rga_fmt *fmt;
+
+ /* Variables that can calculated once and reused */
+ u32 stride;
+ u32 size;
+};
+
+struct rockchip_rga_version {
+ u32 major;
+ u32 minor;
+};
+
+struct rga_ctx {
+ struct v4l2_fh fh;
+ struct rockchip_rga *rga;
+ struct rga_frame in;
+ struct rga_frame out;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Control values */
+ u32 op;
+ u32 hflip;
+ u32 vflip;
+ u32 rotate;
+ u32 fill_color;
+};
+
+struct rockchip_rga {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct video_device *vfd;
+
+ struct device *dev;
+ struct regmap *grf;
+ void __iomem *regs;
+ struct clk *sclk;
+ struct clk *aclk;
+ struct clk *hclk;
+ struct rockchip_rga_version version;
+
+ /* vfd lock */
+ struct mutex mutex;
+ /* ctrl parm lock */
+ spinlock_t ctrl_lock;
+
+ wait_queue_head_t irq_queue;
+
+ struct rga_ctx *curr;
+ dma_addr_t cmdbuf_phy;
+ void *cmdbuf_virt;
+ unsigned int *src_mmu_pages;
+ unsigned int *dst_mmu_pages;
+};
+
+struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type);
+
+/* RGA Buffers Manage */
+extern const struct vb2_ops rga_qops;
+void rga_buf_map(struct vb2_buffer *vb);
+
+/* RGA Hardware */
+static inline void rga_write(struct rockchip_rga *rga, u32 reg, u32 value)
+{
+ writel(value, rga->regs + reg);
+};
+
+static inline u32 rga_read(struct rockchip_rga *rga, u32 reg)
+{
+ return readl(rga->regs + reg);
+};
+
+static inline void rga_mod(struct rockchip_rga *rga, u32 reg, u32 val, u32 mask)
+{
+ u32 temp = rga_read(rga, reg) & ~(mask);
+
+ temp |= val & mask;
+ rga_write(rga, reg, temp);
+};
+
+void rga_hw_start(struct rockchip_rga *rga);
+
+#endif
diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/s5p-mfc/Makefile
index 15f59b324fef..0b324af2ab00 100644
--- a/drivers/media/platform/s5p-mfc/Makefile
+++ b/drivers/media/platform/s5p-mfc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o
s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 1afde5021ca6..bc68dbbcaec1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -145,9 +145,9 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
}
}
-static void s5p_mfc_watchdog(unsigned long arg)
+static void s5p_mfc_watchdog(struct timer_list *t)
{
- struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
+ struct s5p_mfc_dev *dev = from_timer(dev, t, watchdog_timer);
if (test_bit(0, &dev->hw_lock))
atomic_inc(&dev->watchdog_cnt);
@@ -470,7 +470,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
{
mfc_err("Interrupt Error: %08x\n", err);
- if (ctx != NULL) {
+ if (ctx) {
/* Error recovery is dependent on the state of context */
switch (ctx->state) {
case MFCINST_RES_CHANGE_INIT:
@@ -508,7 +508,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
{
struct s5p_mfc_dev *dev;
- if (ctx == NULL)
+ if (!ctx)
return;
dev = ctx->dev;
if (ctx->c_ops->post_seq_start) {
@@ -562,7 +562,7 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_buf *src_buf;
struct s5p_mfc_dev *dev;
- if (ctx == NULL)
+ if (!ctx)
return;
dev = ctx->dev;
s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
@@ -1043,12 +1043,9 @@ end:
static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
- struct s5p_mfc_dev *dev = ctx->dev;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
int ret;
- if (mutex_lock_interruptible(&dev->mfc_mutex))
- return -ERESTARTSYS;
if (offset < DST_QUEUE_OFF_BASE) {
mfc_debug(2, "mmaping source\n");
ret = vb2_mmap(&ctx->vq_src, vma);
@@ -1057,7 +1054,6 @@ static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
ret = vb2_mmap(&ctx->vq_dst, vma);
}
- mutex_unlock(&dev->mfc_mutex);
return ret;
}
@@ -1083,7 +1079,7 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
struct device *child;
int ret;
- child = devm_kzalloc(dev, sizeof(struct device), GFP_KERNEL);
+ child = devm_kzalloc(dev, sizeof(*child), GFP_KERNEL);
if (!child)
return NULL;
@@ -1270,10 +1266,8 @@ static int s5p_mfc_probe(struct platform_device *pdev)
pr_debug("%s++\n", __func__);
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&pdev->dev, "Not enough memory for MFC device\n");
+ if (!dev)
return -ENOMEM;
- }
spin_lock_init(&dev->irqlock);
spin_lock_init(&dev->condlock);
@@ -1291,7 +1285,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
return PTR_ERR(dev->regs_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
+ if (!res) {
dev_err(&pdev->dev, "failed to get irq resource\n");
return -ENOENT;
}
@@ -1320,9 +1314,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
dev->hw_lock = 0;
INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
atomic_set(&dev->watchdog_cnt, 0);
- init_timer(&dev->watchdog_timer);
- dev->watchdog_timer.data = (unsigned long)dev;
- dev->watchdog_timer.function = s5p_mfc_watchdog;
+ timer_setup(&dev->watchdog_timer, s5p_mfc_watchdog, 0);
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 1f3c450c7a69..916ff68b73d4 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -1391,6 +1391,12 @@ static int soc_camera_async_complete(struct v4l2_async_notifier *notifier)
return 0;
}
+static const struct v4l2_async_notifier_operations soc_camera_async_ops = {
+ .bound = soc_camera_async_bound,
+ .unbind = soc_camera_async_unbind,
+ .complete = soc_camera_async_complete,
+};
+
static int scan_async_group(struct soc_camera_host *ici,
struct v4l2_async_subdev **asd, unsigned int size)
{
@@ -1437,9 +1443,7 @@ static int scan_async_group(struct soc_camera_host *ici,
sasc->notifier.subdevs = asd;
sasc->notifier.num_subdevs = size;
- sasc->notifier.bound = soc_camera_async_bound;
- sasc->notifier.unbind = soc_camera_async_unbind;
- sasc->notifier.complete = soc_camera_async_complete;
+ sasc->notifier.ops = &soc_camera_async_ops;
icd->sasc = sasc;
icd->parent = ici->v4l2_dev.dev;
@@ -1537,9 +1541,7 @@ static int soc_of_bind(struct soc_camera_host *ici,
sasc->notifier.subdevs = &info->subdev;
sasc->notifier.num_subdevs = 1;
- sasc->notifier.bound = soc_camera_async_bound;
- sasc->notifier.unbind = soc_camera_async_unbind;
- sasc->notifier.complete = soc_camera_async_complete;
+ sasc->notifier.ops = &soc_camera_async_ops;
icd->sasc = sasc;
icd->parent = ici->v4l2_dev.dev;
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 939da6da7644..7e9ed9c7b3e1 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -723,7 +723,7 @@ static int bdisp_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
static int bdisp_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct bdisp_ctx *ctx = fh_to_ctx(fh);
- struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_pix_format *pix;
struct bdisp_frame *frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame)) {
diff --git a/drivers/media/platform/sti/c8sectpfe/Makefile b/drivers/media/platform/sti/c8sectpfe/Makefile
index b578c7cb4c34..b642b4fd5045 100644
--- a/drivers/media/platform/sti/c8sectpfe/Makefile
+++ b/drivers/media/platform/sti/c8sectpfe/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
c8sectpfe-y += c8sectpfe-core.o c8sectpfe-common.o c8sectpfe-dvb.o \
c8sectpfe-debugfs.o
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 59280ac31937..a0acee7671b1 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -61,9 +61,9 @@ static int load_c8sectpfe_fw(struct c8sectpfei *fei);
#define FIFO_LEN 1024
-static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
+static void c8sectpfe_timer_interrupt(struct timer_list *t)
{
- struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
+ struct c8sectpfei *fei = from_timer(fei, t, timer);
struct channel_info *channel;
int chan_num;
@@ -865,8 +865,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
/* Setup timer interrupt */
- setup_timer(&fei->timer, c8sectpfe_timer_interrupt,
- (unsigned long)fei);
+ timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
mutex_init(&fei->lock);
diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c
index e6f247a983c7..a7e5eed17ada 100644
--- a/drivers/media/platform/sti/hva/hva-h264.c
+++ b/drivers/media/platform/sti/hva/hva-h264.c
@@ -999,7 +999,6 @@ static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
{
struct hva_h264_ctx *ctx = (struct hva_h264_ctx *)pctx->priv;
struct hva_h264_task *task = (struct hva_h264_task *)ctx->task->vaddr;
- struct hva_buffer *tmp_frame;
u32 stuffing_bytes = 0;
int ret = 0;
@@ -1023,9 +1022,7 @@ static int hva_h264_encode(struct hva_ctx *pctx, struct hva_frame *frame,
&stream->bytesused);
/* switch reference & reconstructed frame */
- tmp_frame = ctx->ref_frame;
- ctx->ref_frame = ctx->rec_frame;
- ctx->rec_frame = tmp_frame;
+ swap(ctx->ref_frame, ctx->rec_frame);
return 0;
err:
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index 35ba6f211b79..ac4c450a6c7d 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -1495,6 +1495,12 @@ static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
return 0;
}
+static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
+ .bound = dcmi_graph_notify_bound,
+ .unbind = dcmi_graph_notify_unbind,
+ .complete = dcmi_graph_notify_complete,
+};
+
static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
{
struct device_node *ep = NULL;
@@ -1542,9 +1548,7 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
dcmi->notifier.subdevs = subdevs;
dcmi->notifier.num_subdevs = 1;
- dcmi->notifier.bound = dcmi_graph_notify_bound;
- dcmi->notifier.unbind = dcmi_graph_notify_unbind;
- dcmi->notifier.complete = dcmi_graph_notify_complete;
+ dcmi->notifier.ops = &dcmi_graph_notify_ops;
ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
if (ret < 0) {
diff --git a/drivers/media/platform/tegra-cec/Makefile b/drivers/media/platform/tegra-cec/Makefile
new file mode 100644
index 000000000000..f3d81127589f
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra_cec.o
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
new file mode 100644
index 000000000000..807c94c70049
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -0,0 +1,495 @@
+/*
+ * Tegra CEC implementation
+ *
+ * The original 3.10 CEC driver using a custom API:
+ *
+ * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Conversion to the CEC framework and to the mainline kernel:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/clk/tegra.h>
+
+#include <media/cec-notifier.h>
+
+#include "tegra_cec.h"
+
+#define TEGRA_CEC_NAME "tegra-cec"
+
+struct tegra_cec {
+ struct cec_adapter *adap;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *cec_base;
+ struct cec_notifier *notifier;
+ int tegra_cec_irq;
+ bool rx_done;
+ bool tx_done;
+ int tx_status;
+ u8 rx_buf[CEC_MAX_MSG_SIZE];
+ u8 rx_buf_cnt;
+ u32 tx_buf[CEC_MAX_MSG_SIZE];
+ u8 tx_buf_cur;
+ u8 tx_buf_cnt;
+};
+
+static inline u32 cec_read(struct tegra_cec *cec, u32 reg)
+{
+ return readl(cec->cec_base + reg);
+}
+
+static inline void cec_write(struct tegra_cec *cec, u32 reg, u32 val)
+{
+ writel(val, cec->cec_base + reg);
+}
+
+static void tegra_cec_error_recovery(struct tegra_cec *cec)
+{
+ u32 hw_ctrl;
+
+ hw_ctrl = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
+ cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, hw_ctrl);
+}
+
+static irqreturn_t tegra_cec_irq_thread_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tegra_cec *cec = dev_get_drvdata(dev);
+
+ if (cec->tx_done) {
+ cec_transmit_attempt_done(cec->adap, cec->tx_status);
+ cec->tx_done = false;
+ }
+ if (cec->rx_done) {
+ struct cec_msg msg = {};
+
+ msg.len = cec->rx_buf_cnt;
+ memcpy(msg.msg, cec->rx_buf, msg.len);
+ cec_received_msg(cec->adap, &msg);
+ cec->rx_done = false;
+ cec->rx_buf_cnt = 0;
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_cec_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct tegra_cec *cec = dev_get_drvdata(dev);
+ u32 status, mask;
+
+ status = cec_read(cec, TEGRA_CEC_INT_STAT);
+ mask = cec_read(cec, TEGRA_CEC_INT_MASK);
+
+ status &= mask;
+
+ if (!status)
+ return IRQ_HANDLED;
+
+ if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN) {
+ dev_err(dev, "TX underrun, interrupt timing issue!\n");
+
+ tegra_cec_error_recovery(cec);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ return IRQ_WAKE_THREAD;
+ }
+
+ if ((status & TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED) ||
+ (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)) {
+ tegra_cec_error_recovery(cec);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ cec->tx_done = true;
+ if (status & TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED)
+ cec->tx_status = CEC_TX_STATUS_LOW_DRIVE;
+ else
+ cec->tx_status = CEC_TX_STATUS_ARB_LOST;
+ return IRQ_WAKE_THREAD;
+ }
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED) {
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED);
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD) {
+ tegra_cec_error_recovery(cec);
+
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ } else {
+ cec->tx_done = true;
+ cec->tx_status = CEC_TX_STATUS_OK;
+ }
+ return IRQ_WAKE_THREAD;
+ }
+
+ if (status & TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD)
+ dev_warn(dev, "TX NAKed on the fly!\n");
+
+ if (status & TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY) {
+ if (cec->tx_buf_cur == cec->tx_buf_cnt) {
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask & ~TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+ } else {
+ cec_write(cec, TEGRA_CEC_TX_REGISTER,
+ cec->tx_buf[cec->tx_buf_cur++]);
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY);
+ }
+ }
+
+ if (status & (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN |
+ TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)) {
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN |
+ TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
+ TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED));
+ } else if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
+ u32 v;
+
+ cec_write(cec, TEGRA_CEC_INT_STAT,
+ TEGRA_CEC_INT_STAT_RX_REGISTER_FULL);
+ v = cec_read(cec, TEGRA_CEC_RX_REGISTER);
+ if (cec->rx_buf_cnt < CEC_MAX_MSG_SIZE)
+ cec->rx_buf[cec->rx_buf_cnt++] = v & 0xff;
+ if (v & TEGRA_CEC_RX_REGISTER_EOM) {
+ cec->rx_done = true;
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct tegra_cec *cec = adap->priv;
+
+ cec->rx_buf_cnt = 0;
+ cec->tx_buf_cnt = 0;
+ cec->tx_buf_cur = 0;
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, 0);
+ cec_write(cec, TEGRA_CEC_INT_MASK, 0);
+ cec_write(cec, TEGRA_CEC_INT_STAT, 0xffffffff);
+ cec_write(cec, TEGRA_CEC_SW_CONTROL, 0);
+
+ if (!enable)
+ return 0;
+
+ cec_write(cec, TEGRA_CEC_INPUT_FILTER, (1U << 31) | 0x20);
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_0,
+ (0x7a << TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT) |
+ (0x6d << TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT) |
+ (0x93 << TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT) |
+ (0x86 << TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_1,
+ (0x35 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT) |
+ (0x21 << TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT) |
+ (0x56 << TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT) |
+ (0x40 << TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_RX_TIMING_2,
+ (0x50 << TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_0,
+ (0x74 << TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT) |
+ (0x8d << TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT) |
+ (0x08 << TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT) |
+ (0x71 << TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_1,
+ (0x2f << TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT) |
+ (0x13 << TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT) |
+ (0x4b << TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT) |
+ (0x21 << TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_TX_TIMING_2,
+ (0x07 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT) |
+ (0x05 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT) |
+ (0x03 << TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT));
+
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN |
+ TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD |
+ TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED |
+ TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |
+ TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |
+ TEGRA_CEC_INT_MASK_RX_REGISTER_FULL |
+ TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN);
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);
+ return 0;
+}
+
+static int tegra_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct tegra_cec *cec = adap->priv;
+ u32 state = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ state &= ~TEGRA_CEC_HWCTRL_RX_LADDR_MASK;
+ else
+ state |= TEGRA_CEC_HWCTRL_RX_LADDR((1 << logical_addr));
+
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, state);
+ return 0;
+}
+
+static int tegra_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct tegra_cec *cec = adap->priv;
+ u32 reg = cec_read(cec, TEGRA_CEC_HW_CONTROL);
+
+ if (enable)
+ reg |= TEGRA_CEC_HWCTRL_RX_SNOOP;
+ else
+ reg &= ~TEGRA_CEC_HWCTRL_RX_SNOOP;
+ cec_write(cec, TEGRA_CEC_HW_CONTROL, reg);
+ return 0;
+}
+
+static int tegra_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time_ms, struct cec_msg *msg)
+{
+ bool retry_xfer = signal_free_time_ms == CEC_SIGNAL_FREE_TIME_RETRY;
+ struct tegra_cec *cec = adap->priv;
+ unsigned int i;
+ u32 mode = 0;
+ u32 mask;
+
+ if (cec_msg_is_broadcast(msg))
+ mode = TEGRA_CEC_TX_REG_BCAST;
+
+ cec->tx_buf_cur = 0;
+ cec->tx_buf_cnt = msg->len;
+
+ for (i = 0; i < msg->len; i++) {
+ cec->tx_buf[i] = mode | msg->msg[i];
+ if (i == 0)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_START_BIT;
+ if (i == msg->len - 1)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_EOM;
+ if (i == 0 && retry_xfer)
+ cec->tx_buf[i] |= TEGRA_CEC_TX_REG_RETRY;
+ }
+
+ mask = cec_read(cec, TEGRA_CEC_INT_MASK);
+ cec_write(cec, TEGRA_CEC_INT_MASK,
+ mask | TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY);
+
+ return 0;
+}
+
+static const struct cec_adap_ops tegra_cec_ops = {
+ .adap_enable = tegra_cec_adap_enable,
+ .adap_log_addr = tegra_cec_adap_log_addr,
+ .adap_transmit = tegra_cec_adap_transmit,
+ .adap_monitor_all_enable = tegra_cec_adap_monitor_all_enable,
+};
+
+static int tegra_cec_probe(struct platform_device *pdev)
+{
+ struct platform_device *hdmi_dev;
+ struct device_node *np;
+ struct tegra_cec *cec;
+ struct resource *res;
+ int ret = 0;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node in device tree\n");
+ return -ENODEV;
+ }
+ hdmi_dev = of_find_device_by_node(np);
+ if (hdmi_dev == NULL)
+ return -EPROBE_DEFER;
+
+ cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
+
+ if (!cec)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Unable to allocate resources for device\n");
+ return -EBUSY;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ pdev->name)) {
+ dev_err(&pdev->dev,
+ "Unable to request mem region for device\n");
+ return -EBUSY;
+ }
+
+ cec->tegra_cec_irq = platform_get_irq(pdev, 0);
+
+ if (cec->tegra_cec_irq <= 0)
+ return -EBUSY;
+
+ cec->cec_base = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+
+ if (!cec->cec_base) {
+ dev_err(&pdev->dev, "Unable to grab IOs for device\n");
+ return -EBUSY;
+ }
+
+ cec->clk = devm_clk_get(&pdev->dev, "cec");
+
+ if (IS_ERR_OR_NULL(cec->clk)) {
+ dev_err(&pdev->dev, "Can't get clock for CEC\n");
+ return -ENOENT;
+ }
+
+ clk_prepare_enable(cec->clk);
+
+ /* set context info. */
+ cec->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, cec);
+
+ ret = devm_request_threaded_irq(&pdev->dev, cec->tegra_cec_irq,
+ tegra_cec_irq_handler, tegra_cec_irq_thread_handler,
+ 0, "cec_irq", &pdev->dev);
+
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Unable to request interrupt for device\n");
+ goto clk_error;
+ }
+
+ cec->notifier = cec_notifier_get(&hdmi_dev->dev);
+ if (!cec->notifier) {
+ ret = -ENOMEM;
+ goto clk_error;
+ }
+
+ cec->adap = cec_allocate_adapter(&tegra_cec_ops, cec, TEGRA_CEC_NAME,
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL,
+ CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(cec->adap)) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Couldn't create cec adapter\n");
+ goto cec_error;
+ }
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register device\n");
+ goto cec_error;
+ }
+
+ cec_register_cec_notifier(cec->adap, cec->notifier);
+
+ return 0;
+
+cec_error:
+ if (cec->notifier)
+ cec_notifier_put(cec->notifier);
+ cec_delete_adapter(cec->adap);
+clk_error:
+ clk_disable_unprepare(cec->clk);
+ return ret;
+}
+
+static int tegra_cec_remove(struct platform_device *pdev)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(cec->clk);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notifier);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_cec_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(cec->clk);
+
+ dev_notice(&pdev->dev, "suspended\n");
+ return 0;
+}
+
+static int tegra_cec_resume(struct platform_device *pdev)
+{
+ struct tegra_cec *cec = platform_get_drvdata(pdev);
+
+ dev_notice(&pdev->dev, "Resuming\n");
+
+ clk_prepare_enable(cec->clk);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id tegra_cec_of_match[] = {
+ { .compatible = "nvidia,tegra114-cec", },
+ { .compatible = "nvidia,tegra124-cec", },
+ { .compatible = "nvidia,tegra210-cec", },
+ {},
+};
+
+static struct platform_driver tegra_cec_driver = {
+ .driver = {
+ .name = TEGRA_CEC_NAME,
+ .of_match_table = of_match_ptr(tegra_cec_of_match),
+ },
+ .probe = tegra_cec_probe,
+ .remove = tegra_cec_remove,
+
+#ifdef CONFIG_PM
+ .suspend = tegra_cec_suspend,
+ .resume = tegra_cec_resume,
+#endif
+};
+
+module_platform_driver(tegra_cec_driver);
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.h b/drivers/media/platform/tegra-cec/tegra_cec.h
new file mode 100644
index 000000000000..e301513daa87
--- /dev/null
+++ b/drivers/media/platform/tegra-cec/tegra_cec.h
@@ -0,0 +1,127 @@
+/*
+ * Tegra CEC register definitions
+ *
+ * The original 3.10 CEC driver using a custom API:
+ *
+ * Copyright (c) 2012-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Conversion to the CEC framework and to the mainline kernel:
+ *
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TEGRA_CEC_H
+#define TEGRA_CEC_H
+
+/* CEC registers */
+#define TEGRA_CEC_SW_CONTROL 0x000
+#define TEGRA_CEC_HW_CONTROL 0x004
+#define TEGRA_CEC_INPUT_FILTER 0x008
+#define TEGRA_CEC_TX_REGISTER 0x010
+#define TEGRA_CEC_RX_REGISTER 0x014
+#define TEGRA_CEC_RX_TIMING_0 0x018
+#define TEGRA_CEC_RX_TIMING_1 0x01c
+#define TEGRA_CEC_RX_TIMING_2 0x020
+#define TEGRA_CEC_TX_TIMING_0 0x024
+#define TEGRA_CEC_TX_TIMING_1 0x028
+#define TEGRA_CEC_TX_TIMING_2 0x02c
+#define TEGRA_CEC_INT_STAT 0x030
+#define TEGRA_CEC_INT_MASK 0x034
+#define TEGRA_CEC_HW_DEBUG_RX 0x038
+#define TEGRA_CEC_HW_DEBUG_TX 0x03c
+
+#define TEGRA_CEC_HWCTRL_RX_LADDR_MASK 0x7fff
+#define TEGRA_CEC_HWCTRL_RX_LADDR(x) \
+ ((x) & TEGRA_CEC_HWCTRL_RX_LADDR_MASK)
+#define TEGRA_CEC_HWCTRL_RX_SNOOP (1 << 15)
+#define TEGRA_CEC_HWCTRL_RX_NAK_MODE (1 << 16)
+#define TEGRA_CEC_HWCTRL_TX_NAK_MODE (1 << 24)
+#define TEGRA_CEC_HWCTRL_FAST_SIM_MODE (1 << 30)
+#define TEGRA_CEC_HWCTRL_TX_RX_MODE (1 << 31)
+
+#define TEGRA_CEC_INPUT_FILTER_MODE (1 << 31)
+#define TEGRA_CEC_INPUT_FILTER_FIFO_LENGTH_SHIFT 0
+
+#define TEGRA_CEC_TX_REG_DATA_SHIFT 0
+#define TEGRA_CEC_TX_REG_EOM (1 << 8)
+#define TEGRA_CEC_TX_REG_BCAST (1 << 12)
+#define TEGRA_CEC_TX_REG_START_BIT (1 << 16)
+#define TEGRA_CEC_TX_REG_RETRY (1 << 17)
+
+#define TEGRA_CEC_RX_REGISTER_SHIFT 0
+#define TEGRA_CEC_RX_REGISTER_EOM (1 << 8)
+#define TEGRA_CEC_RX_REGISTER_ACK (1 << 9)
+
+#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_LO_TIME_SHIFT 0
+#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_LO_TIME_SHIFT 8
+#define TEGRA_CEC_RX_TIM0_START_BIT_MAX_DURATION_SHIFT 16
+#define TEGRA_CEC_RX_TIM0_START_BIT_MIN_DURATION_SHIFT 24
+
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_LO_TIME_SHIFT 0
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_SAMPLE_TIME_SHIFT 8
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MAX_DURATION_SHIFT 16
+#define TEGRA_CEC_RX_TIM1_DATA_BIT_MIN_DURATION_SHIFT 24
+
+#define TEGRA_CEC_RX_TIM2_END_OF_BLOCK_TIME_SHIFT 0
+
+#define TEGRA_CEC_TX_TIM0_START_BIT_LO_TIME_SHIFT 0
+#define TEGRA_CEC_TX_TIM0_START_BIT_DURATION_SHIFT 8
+#define TEGRA_CEC_TX_TIM0_BUS_XITION_TIME_SHIFT 16
+#define TEGRA_CEC_TX_TIM0_BUS_ERROR_LO_TIME_SHIFT 24
+
+#define TEGRA_CEC_TX_TIM1_LO_DATA_BIT_LO_TIME_SHIFT 0
+#define TEGRA_CEC_TX_TIM1_HI_DATA_BIT_LO_TIME_SHIFT 8
+#define TEGRA_CEC_TX_TIM1_DATA_BIT_DURATION_SHIFT 16
+#define TEGRA_CEC_TX_TIM1_ACK_NAK_BIT_SAMPLE_TIME_SHIFT 24
+
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_ADDITIONAL_FRAME_SHIFT 0
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_NEW_FRAME_SHIFT 4
+#define TEGRA_CEC_TX_TIM2_BUS_IDLE_TIME_RETRY_FRAME_SHIFT 8
+
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_EMPTY (1 << 0)
+#define TEGRA_CEC_INT_STAT_TX_REGISTER_UNDERRUN (1 << 1)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
+#define TEGRA_CEC_INT_STAT_TX_ARBITRATION_FAILED (1 << 3)
+#define TEGRA_CEC_INT_STAT_TX_BUS_ANOMALY_DETECTED (1 << 4)
+#define TEGRA_CEC_INT_STAT_TX_FRAME_TRANSMITTED (1 << 5)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_FULL (1 << 8)
+#define TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN (1 << 9)
+#define TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED (1 << 10)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED (1 << 11)
+#define TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED (1 << 12)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
+#define TEGRA_CEC_INT_STAT_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
+
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_EMPTY (1 << 0)
+#define TEGRA_CEC_INT_MASK_TX_REGISTER_UNDERRUN (1 << 1)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_OR_BLOCK_NAKD (1 << 2)
+#define TEGRA_CEC_INT_MASK_TX_ARBITRATION_FAILED (1 << 3)
+#define TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED (1 << 4)
+#define TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED (1 << 5)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_FULL (1 << 8)
+#define TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN (1 << 9)
+#define TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED (1 << 10)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ANOMALY_DETECTED (1 << 11)
+#define TEGRA_CEC_INT_MASK_RX_BUS_ERROR_DETECTED (1 << 12)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_H2L (1 << 13)
+#define TEGRA_CEC_INT_MASK_FILTERED_RX_DATA_PIN_TRANSITION_L2H (1 << 14)
+
+#define TEGRA_CEC_HW_DEBUG_TX_DURATION_COUNT_SHIFT 0
+#define TEGRA_CEC_HW_DEBUG_TX_TXBIT_COUNT_SHIFT 17
+#define TEGRA_CEC_HW_DEBUG_TX_STATE_SHIFT 21
+#define TEGRA_CEC_HW_DEBUG_TX_FORCELOOUT (1 << 25)
+#define TEGRA_CEC_HW_DEBUG_TX_TXDATABIT_SAMPLE_TIMER (1 << 26)
+
+#endif /* TEGRA_CEC_H */
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
index 32504b724b5d..886ac5ec073f 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
obj-$(CONFIG_VIDEO_TI_VPDMA) += ti-vpdma.o
obj-$(CONFIG_VIDEO_TI_SC) += ti-sc.o
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 42e383a48ffe..8b586c864524 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -1522,6 +1522,11 @@ static int cal_async_complete(struct v4l2_async_notifier *notifier)
return 0;
}
+static const struct v4l2_async_notifier_operations cal_async_ops = {
+ .bound = cal_async_bound,
+ .complete = cal_async_complete,
+};
+
static int cal_complete_ctx(struct cal_ctx *ctx)
{
struct video_device *vfd;
@@ -1736,8 +1741,7 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
ctx->asd_list[0] = asd;
ctx->notifier.subdevs = ctx->asd_list;
ctx->notifier.num_subdevs = 1;
- ctx->notifier.bound = cal_async_bound;
- ctx->notifier.complete = cal_async_complete;
+ ctx->notifier.ops = &cal_async_ops;
ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
&ctx->notifier);
if (ret) {
diff --git a/drivers/media/platform/via-camera.h b/drivers/media/platform/via-camera.h
index b12a4b3d616f..2d67f8ce258d 100644
--- a/drivers/media/platform/via-camera.h
+++ b/drivers/media/platform/via-camera.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* VIA Camera register definitions.
*/
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index b01fba020d5f..7bf9fa2f8534 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -388,9 +388,9 @@ static void device_run(void *priv)
schedule_irq(dev, ctx->transtime);
}
-static void device_isr(unsigned long priv)
+static void device_isr(struct timer_list *t)
{
- struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
+ struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
struct vim2m_ctx *curr_ctx;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
@@ -1024,7 +1024,7 @@ static int vim2m_probe(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
- setup_timer(&dev->timer, device_isr, (long)dev);
+ timer_setup(&dev->timer, device_isr, 0);
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
index 68c5d9804c11..4b2e3de7856e 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/platform/vimc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
vimc-objs := vimc-core.o
vimc_capture-objs := vimc-capture.o
vimc_common-objs := vimc-common.o
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index 51c0eee61ca6..fe088a953860 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -267,11 +267,12 @@ static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
PLATFORM_DEVID_AUTO,
&pdata,
sizeof(pdata));
- if (!vimc->subdevs[i]) {
+ if (IS_ERR(vimc->subdevs[i])) {
+ match = ERR_CAST(vimc->subdevs[i]);
while (--i >= 0)
platform_device_unregister(vimc->subdevs[i]);
- return ERR_PTR(-ENOMEM);
+ return match;
}
component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index 29738810e3ee..2f5762e3309a 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index f0f423c7ca41..a651527d80db 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -189,6 +189,22 @@ struct vivid_fmt vivid_formats[] = {
.buffers = 1,
},
{
+ .fourcc = V4L2_PIX_FMT_Y10,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y12,
+ .vdownsampling = { 1 },
+ .bit_depth = { 16 },
+ .color_enc = TGP_COLOR_ENC_LUMA,
+ .planes = 1,
+ .buffers = 1,
+ },
+ {
.fourcc = V4L2_PIX_FMT_Y16,
.vdownsampling = { 1 },
.bit_depth = { 16 },
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile
index a33afc385a48..f5cd6f0491cb 100644
--- a/drivers/media/platform/vsp1/Makefile
+++ b/drivers/media/platform/vsp1/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_pipe.o
vsp1-y += vsp1_dl.o vsp1_drm.o vsp1_video.o
vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index ebfdf334d99c..d881cf09876d 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -351,6 +351,11 @@ static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
return -EINVAL;
}
+static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
+ .bound = xvip_graph_notify_bound,
+ .complete = xvip_graph_notify_complete,
+};
+
static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
struct device_node *node)
{
@@ -548,8 +553,7 @@ static int xvip_graph_init(struct xvip_composite_device *xdev)
xdev->notifier.subdevs = subdevs;
xdev->notifier.num_subdevs = num_subdevs;
- xdev->notifier.bound = xvip_graph_notify_bound;
- xdev->notifier.complete = xvip_graph_notify_complete;
+ xdev->notifier.ops = &xvip_graph_notify_ops;
ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
if (ret < 0) {
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 120e791199b2..37e6e8255b57 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel character device drivers.
#
diff --git a/drivers/media/radio/lm7000.h b/drivers/media/radio/lm7000.h
index 139cd6b68824..adb217965620 100644
--- a/drivers/media/radio/lm7000.h
+++ b/drivers/media/radio/lm7000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LM7000_H
#define __LM7000_H
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 6888b7db449d..7575e5370a49 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -281,9 +281,9 @@ static bool cadet_has_rds_data(struct cadet *dev)
}
-static void cadet_handler(unsigned long data)
+static void cadet_handler(struct timer_list *t)
{
- struct cadet *dev = (void *)data;
+ struct cadet *dev = from_timer(dev, t, readtimer);
/* Service the RDS fifo */
if (mutex_trylock(&dev->lock)) {
@@ -309,7 +309,6 @@ static void cadet_handler(unsigned long data)
/*
* Clean up and exit
*/
- setup_timer(&dev->readtimer, cadet_handler, data);
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
@@ -318,7 +317,7 @@ static void cadet_start_rds(struct cadet *dev)
{
dev->rdsstat = 1;
outb(0x80, dev->io); /* Select RDS fifo */
- setup_timer(&dev->readtimer, cadet_handler, (unsigned long)dev);
+ timer_setup(&dev->readtimer, cadet_handler, 0);
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
index 3c0a22a54113..70a2c86774ce 100644
--- a/drivers/media/radio/radio-raremono.c
+++ b/drivers/media/radio/radio-raremono.c
@@ -254,7 +254,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
struct raremono_device *radio = video_drvdata(file);
- u32 freq = f->frequency;
+ u32 freq;
unsigned band;
if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index cd76facc22f5..c89a7d5b8c55 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -749,7 +749,7 @@ static const struct v4l2_ioctl_ops si470x_ioctl_ops = {
/*
* si470x_viddev_template - video device interface
*/
-struct video_device si470x_viddev_template = {
+const struct video_device si470x_viddev_template = {
.fops = &si470x_fops,
.name = DRIVER_NAME,
.release = video_device_release_empty,
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 7d2defd9d399..eb7b834a0ae5 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -209,7 +209,7 @@ struct si470x_device {
/**************************************************************************
* Common Functions
**************************************************************************/
-extern struct video_device si470x_viddev_template;
+extern const struct video_device si470x_viddev_template;
extern const struct v4l2_ctrl_ops si470x_ctrl_ops;
int si470x_get_register(struct si470x_device *radio, int regnr);
int si470x_set_register(struct si470x_device *radio, int regnr);
diff --git a/drivers/media/radio/wl128x/Kconfig b/drivers/media/radio/wl128x/Kconfig
index c9e349b169c4..2add222ea346 100644
--- a/drivers/media/radio/wl128x/Kconfig
+++ b/drivers/media/radio/wl128x/Kconfig
@@ -7,11 +7,11 @@ config RADIO_WL128X
depends on VIDEO_V4L2 && RFKILL && TTY && TI_ST
depends on GPIOLIB || COMPILE_TEST
help
- Choose Y here if you have this FM radio chip.
+ Choose Y here if you have this FM radio chip.
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux 2 API. Information on
- this API and pointers to "v4l2" programs may be found at
- <file:Documentation/video4linux/API.html>.
+ In order to control your radio card, you will need to use programs
+ that are compatible with the Video For Linux 2 API. Information on
+ this API and pointers to "v4l2" programs may be found at
+ <file:Documentation/video4linux/API.html>.
endmenu
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index ab3428bf63fe..800d69c3f80b 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -543,13 +543,13 @@ static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
* interrupt process. Therefore reset stage index to re-enable default
* interrupts. So that next interrupt will be processed as usual.
*/
-static void int_timeout_handler(unsigned long data)
+static void int_timeout_handler(struct timer_list *t)
{
struct fmdev *fmdev;
struct fm_irq *fmirq;
fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
- fmdev = (struct fmdev *)data;
+ fmdev = from_timer(fmdev, t, irq_info.timer);
fmirq = &fmdev->irq_info;
fmirq->retry++;
@@ -1550,8 +1550,7 @@ int fmc_prepare(struct fmdev *fmdev)
atomic_set(&fmdev->tx_cnt, 1);
fmdev->resp_comp = NULL;
- setup_timer(&fmdev->irq_info.timer, &int_timeout_handler,
- (unsigned long)fmdev);
+ timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0);
/*TODO: add FM_STIC_EVENT later */
fmdev->irq_info.mask = FM_MAL_EVENT;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index d9ce8ff55d0c..afb3456d4e20 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -2,7 +2,6 @@
menuconfig RC_CORE
tristate "Remote Controller support"
depends on INPUT
- default y
---help---
Enable support for Remote Controllers on Linux. This is
needed in order to support several video capture adapters,
@@ -179,6 +178,7 @@ config IR_ENE
config IR_HIX5HD2
tristate "Hisilicon hix5hd2 IR remote control"
depends on RC_CORE
+ depends on OF || COMPILE_TEST
help
Say Y here if you want to use hisilicon hix5hd2 remote control.
To compile this driver as a module, choose M here: the module will be
@@ -286,6 +286,7 @@ config IR_REDRAT3
config IR_SPI
tristate "SPI connected IR LED"
depends on SPI && LIRC
+ depends on OF || COMPILE_TEST
---help---
Say Y if you want to use an IR LED connected through SPI bus.
@@ -393,6 +394,7 @@ config RC_LOOPBACK
config IR_GPIO_CIR
tristate "GPIO IR remote control"
depends on RC_CORE
+ depends on (OF && GPIOLIB) || COMPILE_TEST
---help---
Say Y if you want to use GPIO based IR Receiver.
@@ -403,6 +405,7 @@ config IR_GPIO_TX
tristate "GPIO IR Bit Banging Transmitter"
depends on RC_CORE
depends on LIRC
+ depends on (OF && GPIOLIB) || COMPILE_TEST
---help---
Say Y if you want to a GPIO based IR transmitter. This is a
bit banging driver.
@@ -415,6 +418,7 @@ config IR_PWM_TX
depends on RC_CORE
depends on LIRC
depends on PWM
+ depends on OF || COMPILE_TEST
---help---
Say Y if you want to use a PWM based IR transmitter. This is
more power efficient than the bit banging gpio driver.
@@ -469,6 +473,16 @@ config IR_SIR
To compile this driver as a module, choose M here: the module will
be called sir-ir.
+config IR_TANGO
+ tristate "Sigma Designs SMP86xx IR decoder"
+ depends on RC_CORE
+ depends on ARCH_TANGO || COMPILE_TEST
+ ---help---
+ Adds support for the HW IR decoder embedded on Sigma Designs
+ Tango-based systems (SMP86xx, SMP87xx).
+ The HW decoder supports NEC, RC-5, RC-6 IR protocols.
+ When compiled as a module, look for tango-ir.
+
config IR_ZX
tristate "ZTE ZX IR remote control"
depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 9bc6a3980ed0..10026477a677 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rc-core-objs := rc-main.o rc-ir-raw.o
obj-y += keymaps/
@@ -44,3 +45,4 @@ obj-$(CONFIG_IR_SERIAL) += serial_ir.o
obj-$(CONFIG_IR_SIR) += sir_ir.o
obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_IR_ZX) += zx-irdec.o
+obj-$(CONFIG_IR_TANGO) += tango-ir.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index d0871d60a723..8e82610ffaad 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -198,7 +198,7 @@ static const struct ati_receiver_type type_firefly = {
.default_keymap = RC_MAP_SNAPSTREAM_FIREFLY
};
-static struct usb_device_id ati_remote_table[] = {
+static const struct usb_device_id ati_remote_table[] = {
{
USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID),
.driver_info = (unsigned long)&type_ati
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index af7ba23e16e1..71b8c9bbf6c4 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -670,9 +670,9 @@ exit:
}
/* timer to simulate tx done interrupt */
-static void ene_tx_irqsim(unsigned long data)
+static void ene_tx_irqsim(struct timer_list *t)
{
- struct ene_device *dev = (struct ene_device *)data;
+ struct ene_device *dev = from_timer(dev, t, tx_sim_timer);
unsigned long flags;
spin_lock_irqsave(&dev->hw_lock, flags);
@@ -1045,8 +1045,7 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
if (!dev->hw_learning_and_tx_capable && txsim) {
dev->hw_learning_and_tx_capable = true;
- setup_timer(&dev->tx_sim_timer, ene_tx_irqsim,
- (long unsigned int)dev);
+ timer_setup(&dev->tx_sim_timer, ene_tx_irqsim, 0);
pr_warn("Simulation of TX activated\n");
}
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 7248b3662285..3d99b51384ac 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -14,119 +14,64 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <media/rc-core.h>
-#include <linux/platform_data/media/gpio-ir-recv.h>
-#define GPIO_IR_DRIVER_NAME "gpio-rc-recv"
#define GPIO_IR_DEVICE_NAME "gpio_ir_recv"
struct gpio_rc_dev {
struct rc_dev *rcdev;
- int gpio_nr;
- bool active_low;
+ struct gpio_desc *gpiod;
+ int irq;
};
-#ifdef CONFIG_OF
-/*
- * Translate OpenFirmware node properties into platform_data
- */
-static int gpio_ir_recv_get_devtree_pdata(struct device *dev,
- struct gpio_ir_recv_platform_data *pdata)
-{
- struct device_node *np = dev->of_node;
- enum of_gpio_flags flags;
- int gpio;
-
- gpio = of_get_gpio_flags(np, 0, &flags);
- if (gpio < 0) {
- if (gpio != -EPROBE_DEFER)
- dev_err(dev, "Failed to get gpio flags (%d)\n", gpio);
- return gpio;
- }
-
- pdata->gpio_nr = gpio;
- pdata->active_low = (flags & OF_GPIO_ACTIVE_LOW);
- /* probe() takes care of map_name == NULL or allowed_protos == 0 */
- pdata->map_name = of_get_property(np, "linux,rc-map-name", NULL);
- pdata->allowed_protos = 0;
-
- return 0;
-}
-
-static const struct of_device_id gpio_ir_recv_of_match[] = {
- { .compatible = "gpio-ir-receiver", },
- { },
-};
-MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
-
-#else /* !CONFIG_OF */
-
-#define gpio_ir_recv_get_devtree_pdata(dev, pdata) (-ENOSYS)
-
-#endif
-
static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
{
+ int val;
struct gpio_rc_dev *gpio_dev = dev_id;
- int gval;
- int rc = 0;
-
- gval = gpio_get_value(gpio_dev->gpio_nr);
-
- if (gval < 0)
- goto err_get_value;
-
- if (gpio_dev->active_low)
- gval = !gval;
- rc = ir_raw_event_store_edge(gpio_dev->rcdev, gval == 1);
- if (rc < 0)
- goto err_get_value;
+ val = gpiod_get_value(gpio_dev->gpiod);
+ if (val >= 0)
+ ir_raw_event_store_edge(gpio_dev->rcdev, val == 1);
-err_get_value:
return IRQ_HANDLED;
}
static int gpio_ir_recv_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct gpio_rc_dev *gpio_dev;
struct rc_dev *rcdev;
- const struct gpio_ir_recv_platform_data *pdata =
- pdev->dev.platform_data;
int rc;
- if (pdev->dev.of_node) {
- struct gpio_ir_recv_platform_data *dtpdata =
- devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL);
- if (!dtpdata)
- return -ENOMEM;
- rc = gpio_ir_recv_get_devtree_pdata(&pdev->dev, dtpdata);
- if (rc)
- return rc;
- pdata = dtpdata;
- }
-
- if (!pdata)
- return -EINVAL;
+ if (!np)
+ return -ENODEV;
- if (pdata->gpio_nr < 0)
- return -EINVAL;
-
- gpio_dev = kzalloc(sizeof(struct gpio_rc_dev), GFP_KERNEL);
+ gpio_dev = devm_kzalloc(dev, sizeof(*gpio_dev), GFP_KERNEL);
if (!gpio_dev)
return -ENOMEM;
- rcdev = rc_allocate_device(RC_DRIVER_IR_RAW);
- if (!rcdev) {
- rc = -ENOMEM;
- goto err_allocate_device;
+ gpio_dev->gpiod = devm_gpiod_get(dev, NULL, GPIOD_IN);
+ if (IS_ERR(gpio_dev->gpiod)) {
+ rc = PTR_ERR(gpio_dev->gpiod);
+ /* Just try again if this happens */
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "error getting gpio (%d)\n", rc);
+ return rc;
}
+ gpio_dev->irq = gpiod_to_irq(gpio_dev->gpiod);
+ if (gpio_dev->irq < 0)
+ return gpio_dev->irq;
+
+ rcdev = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
+ if (!rcdev)
+ return -ENOMEM;
rcdev->priv = gpio_dev;
rcdev->device_name = GPIO_IR_DEVICE_NAME;
@@ -135,92 +80,52 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
rcdev->input_id.vendor = 0x0001;
rcdev->input_id.product = 0x0001;
rcdev->input_id.version = 0x0100;
- rcdev->dev.parent = &pdev->dev;
- rcdev->driver_name = GPIO_IR_DRIVER_NAME;
+ rcdev->dev.parent = dev;
+ rcdev->driver_name = KBUILD_MODNAME;
rcdev->min_timeout = 1;
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
- if (pdata->allowed_protos)
- rcdev->allowed_protocols = pdata->allowed_protos;
- else
- rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
+ rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
+ rcdev->map_name = of_get_property(np, "linux,rc-map-name", NULL);
+ if (!rcdev->map_name)
+ rcdev->map_name = RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
- gpio_dev->gpio_nr = pdata->gpio_nr;
- gpio_dev->active_low = pdata->active_low;
-
- rc = gpio_request(pdata->gpio_nr, "gpio-ir-recv");
- if (rc < 0)
- goto err_gpio_request;
- rc = gpio_direction_input(pdata->gpio_nr);
- if (rc < 0)
- goto err_gpio_direction_input;
- rc = rc_register_device(rcdev);
+ rc = devm_rc_register_device(dev, rcdev);
if (rc < 0) {
- dev_err(&pdev->dev, "failed to register rc device\n");
- goto err_register_rc_device;
+ dev_err(dev, "failed to register rc device (%d)\n", rc);
+ return rc;
}
platform_set_drvdata(pdev, gpio_dev);
- rc = request_any_context_irq(gpio_to_irq(pdata->gpio_nr),
- gpio_ir_recv_irq,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "gpio-ir-recv-irq", gpio_dev);
- if (rc < 0)
- goto err_request_irq;
-
- return 0;
-
-err_request_irq:
- rc_unregister_device(rcdev);
- rcdev = NULL;
-err_register_rc_device:
-err_gpio_direction_input:
- gpio_free(pdata->gpio_nr);
-err_gpio_request:
- rc_free_device(rcdev);
-err_allocate_device:
- kfree(gpio_dev);
- return rc;
-}
-
-static int gpio_ir_recv_remove(struct platform_device *pdev)
-{
- struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
-
- free_irq(gpio_to_irq(gpio_dev->gpio_nr), gpio_dev);
- rc_unregister_device(gpio_dev->rcdev);
- gpio_free(gpio_dev->gpio_nr);
- kfree(gpio_dev);
- return 0;
+ return devm_request_irq(dev, gpio_dev->irq, gpio_ir_recv_irq,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "gpio-ir-recv-irq", gpio_dev);
}
#ifdef CONFIG_PM
static int gpio_ir_recv_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- enable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr));
+ enable_irq_wake(gpio_dev->irq);
else
- disable_irq(gpio_to_irq(gpio_dev->gpio_nr));
+ disable_irq(gpio_dev->irq);
return 0;
}
static int gpio_ir_recv_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- disable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr));
+ disable_irq_wake(gpio_dev->irq);
else
- enable_irq(gpio_to_irq(gpio_dev->gpio_nr));
+ enable_irq(gpio_dev->irq);
return 0;
}
@@ -231,11 +136,16 @@ static const struct dev_pm_ops gpio_ir_recv_pm_ops = {
};
#endif
+static const struct of_device_id gpio_ir_recv_of_match[] = {
+ { .compatible = "gpio-ir-receiver", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
+
static struct platform_driver gpio_ir_recv_driver = {
.probe = gpio_ir_recv_probe,
- .remove = gpio_ir_recv_remove,
.driver = {
- .name = GPIO_IR_DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(gpio_ir_recv_of_match),
#ifdef CONFIG_PM
.pm = &gpio_ir_recv_pm_ops,
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index a5ea86be8f44..f563ddd7f739 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -137,9 +137,9 @@ static void igorplugusb_cmd(struct igorplugusb *ir, int cmd)
dev_err(ir->dev, "submit urb failed: %d", ret);
}
-static void igorplugusb_timer(unsigned long data)
+static void igorplugusb_timer(struct timer_list *t)
{
- struct igorplugusb *ir = (struct igorplugusb *)data;
+ struct igorplugusb *ir = from_timer(ir, t, timer);
igorplugusb_cmd(ir, GET_INFRACODE);
}
@@ -174,7 +174,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
ir->dev = &intf->dev;
- setup_timer(&ir->timer, igorplugusb_timer, (unsigned long)ir);
+ timer_setup(&ir->timer, igorplugusb_timer, 0);
ir->request.bRequest = GET_INFRACODE;
ir->request.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
@@ -245,7 +245,7 @@ static void igorplugusb_disconnect(struct usb_interface *intf)
usb_free_urb(ir->urb);
}
-static struct usb_device_id igorplugusb_table[] = {
+static const struct usb_device_id igorplugusb_table[] = {
/* Igor Plug USB (Atmel's Manufact. ID) */
{ USB_DEVICE(0x03eb, 0x0002) },
/* Fit PC2 Infrared Adapter */
diff --git a/drivers/media/rc/img-ir/Makefile b/drivers/media/rc/img-ir/Makefile
index 8e6d458e66ad..741fedc5dceb 100644
--- a/drivers/media/rc/img-ir/Makefile
+++ b/drivers/media/rc/img-ir/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
img-ir-y := img-ir-core.o
img-ir-$(CONFIG_IR_IMG_RAW) += img-ir-raw.o
img-ir-$(CONFIG_IR_IMG_HW) += img-ir-hw.o
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index 03fe080278df..bcbabeeab12a 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -92,10 +92,9 @@ static int img_ir_probe(struct platform_device *pdev)
/* Private driver data */
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "cannot allocate device data\n");
+ if (!priv)
return -ENOMEM;
- }
+
platform_set_drvdata(pdev, priv);
priv->dev = &pdev->dev;
spin_lock_init(&priv->lock);
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index 82fdf4cc0824..f54bc5d23893 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -867,9 +867,9 @@ static void img_ir_handle_data(struct img_ir_priv *priv, u32 len, u64 raw)
}
/* timer function to end waiting for repeat. */
-static void img_ir_end_timer(unsigned long arg)
+static void img_ir_end_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+ struct img_ir_priv *priv = from_timer(priv, t, hw.end_timer);
spin_lock_irq(&priv->lock);
img_ir_end_repeat(priv);
@@ -881,9 +881,9 @@ static void img_ir_end_timer(unsigned long arg)
* cleared when invalid interrupts were generated due to a quirk in the
* img-ir decoder.
*/
-static void img_ir_suspend_timer(unsigned long arg)
+static void img_ir_suspend_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+ struct img_ir_priv *priv = from_timer(priv, t, hw.suspend_timer);
spin_lock_irq(&priv->lock);
/*
@@ -1055,9 +1055,8 @@ int img_ir_probe_hw(struct img_ir_priv *priv)
img_ir_probe_hw_caps(priv);
/* Set up the end timer */
- setup_timer(&hw->end_timer, img_ir_end_timer, (unsigned long)priv);
- setup_timer(&hw->suspend_timer, img_ir_suspend_timer,
- (unsigned long)priv);
+ timer_setup(&hw->end_timer, img_ir_end_timer, 0);
+ timer_setup(&hw->suspend_timer, img_ir_suspend_timer, 0);
/* Register a clock notifier */
if (!IS_ERR(priv->clk)) {
diff --git a/drivers/media/rc/img-ir/img-ir-raw.c b/drivers/media/rc/img-ir/img-ir-raw.c
index 64714efc1145..6e545680d3b6 100644
--- a/drivers/media/rc/img-ir/img-ir-raw.c
+++ b/drivers/media/rc/img-ir/img-ir-raw.c
@@ -67,9 +67,9 @@ void img_ir_isr_raw(struct img_ir_priv *priv, u32 irq_status)
* order to be assured of the final space. If there are no edges for a certain
* time we use this timer to emit a final sample to satisfy them.
*/
-static void img_ir_echo_timer(unsigned long arg)
+static void img_ir_echo_timer(struct timer_list *t)
{
- struct img_ir_priv *priv = (struct img_ir_priv *)arg;
+ struct img_ir_priv *priv = from_timer(priv, t, raw.timer);
spin_lock_irq(&priv->lock);
@@ -107,7 +107,7 @@ int img_ir_probe_raw(struct img_ir_priv *priv)
int error;
/* Set up the echo timer */
- setup_timer(&raw->timer, img_ir_echo_timer, (unsigned long)priv);
+ timer_setup(&raw->timer, img_ir_echo_timer, 0);
/* Allocate raw decoder */
raw->rdev = rdev = rc_allocate_device(RC_DRIVER_IR_RAW);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 7b3f31cc63d2..b25b35b3f6da 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -346,7 +346,7 @@ static const struct imon_usb_dev_descr imon_ir_raw = {
* devices use the SoundGraph vendor ID (0x15c2). This driver only supports
* the ffdc and later devices, which do onboard decoding.
*/
-static struct usb_device_id imon_usb_id_table[] = {
+static const struct usb_device_id imon_usb_id_table[] = {
/*
* Several devices with this same device ID, all use iMON_PAD.inf
* SoundGraph iMON PAD (IR & VFD)
@@ -602,8 +602,7 @@ static int send_packet(struct imon_context *ictx)
ictx->tx_urb->actual_length = 0;
} else {
/* fill request into kmalloc'ed space: */
- control_req = kmalloc(sizeof(struct usb_ctrlrequest),
- GFP_KERNEL);
+ control_req = kmalloc(sizeof(*control_req), GFP_KERNEL);
if (control_req == NULL)
return -ENOMEM;
@@ -943,7 +942,7 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
int seq;
int retval = 0;
struct imon_context *ictx;
- const unsigned char vfd_packet6[] = {
+ static const unsigned char vfd_packet6[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
ictx = file->private_data;
@@ -1091,9 +1090,9 @@ static void usb_tx_callback(struct urb *urb)
/**
* report touchscreen input
*/
-static void imon_touch_display_timeout(unsigned long data)
+static void imon_touch_display_timeout(struct timer_list *t)
{
- struct imon_context *ictx = (struct imon_context *)data;
+ struct imon_context *ictx = from_timer(ictx, t, ttimer);
if (ictx->display_type != IMON_DISPLAY_TYPE_VGA)
return;
@@ -2047,8 +2046,8 @@ static struct rc_dev *imon_init_rdev(struct imon_context *ictx)
{
struct rc_dev *rdev;
int ret;
- const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x88 };
+ static const unsigned char fp_packet[] = {
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88 };
rdev = rc_allocate_device(ictx->dev_descr->flags & IMON_IR_RAW ?
RC_DRIVER_IR_RAW : RC_DRIVER_SCANCODE);
@@ -2310,11 +2309,10 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf,
struct usb_host_interface *iface_desc;
int ret = -ENOMEM;
- ictx = kzalloc(sizeof(struct imon_context), GFP_KERNEL);
- if (!ictx) {
- dev_err(dev, "%s: kzalloc failed for context", __func__);
+ ictx = kzalloc(sizeof(*ictx), GFP_KERNEL);
+ if (!ictx)
goto exit;
- }
+
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rx_urb)
goto rx_urb_alloc_failed;
@@ -2413,8 +2411,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
mutex_lock(&ictx->lock);
if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) {
- setup_timer(&ictx->ttimer, imon_touch_display_timeout,
- (unsigned long)ictx);
+ timer_setup(&ictx->ttimer, imon_touch_display_timeout, 0);
}
ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf));
@@ -2517,6 +2514,11 @@ static int imon_probe(struct usb_interface *interface,
mutex_lock(&driver_lock);
first_if = usb_ifnum_to_if(usbdev, 0);
+ if (!first_if) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
first_if_ctx = usb_get_intfdata(first_if);
if (ifnum == 0) {
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index d2223c04e9ad..8f2f37412fc5 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -35,7 +35,7 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
struct lirc_codec *lirc = &dev->raw->lirc;
int sample;
- if (!dev->raw->lirc.drv || !dev->raw->lirc.drv->rbuf)
+ if (!dev->raw->lirc.ldev || !dev->raw->lirc.ldev->buf)
return -EINVAL;
/* Packet start */
@@ -84,8 +84,8 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
(u64)LIRC_VALUE_MASK);
gap_sample = LIRC_SPACE(lirc->gap_duration);
- lirc_buffer_write(dev->raw->lirc.drv->rbuf,
- (unsigned char *) &gap_sample);
+ lirc_buffer_write(dev->raw->lirc.ldev->buf,
+ (unsigned char *)&gap_sample);
lirc->gap = false;
}
@@ -95,9 +95,9 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
TO_US(ev.duration), TO_STR(ev.pulse));
}
- lirc_buffer_write(dev->raw->lirc.drv->rbuf,
+ lirc_buffer_write(dev->raw->lirc.ldev->buf,
(unsigned char *) &sample);
- wake_up(&dev->raw->lirc.drv->rbuf->wait_poll);
+ wake_up(&dev->raw->lirc.ldev->buf->wait_poll);
return 0;
}
@@ -298,11 +298,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
if (!dev->max_timeout)
return -ENOTTY;
+ /* Check for multiply overflow */
+ if (val > U32_MAX / 1000)
+ return -EINVAL;
+
tmp = val * 1000;
- if (tmp < dev->min_timeout ||
- tmp > dev->max_timeout)
- return -EINVAL;
+ if (tmp < dev->min_timeout || tmp > dev->max_timeout)
+ return -EINVAL;
if (dev->s_timeout)
ret = dev->s_timeout(dev, tmp);
@@ -343,12 +346,12 @@ static const struct file_operations lirc_fops = {
static int ir_lirc_register(struct rc_dev *dev)
{
- struct lirc_driver *drv;
+ struct lirc_dev *ldev;
int rc = -ENOMEM;
unsigned long features = 0;
- drv = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
- if (!drv)
+ ldev = lirc_allocate_device();
+ if (!ldev)
return rc;
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
@@ -380,32 +383,29 @@ static int ir_lirc_register(struct rc_dev *dev)
if (dev->max_timeout)
features |= LIRC_CAN_SET_REC_TIMEOUT;
- snprintf(drv->name, sizeof(drv->name), "ir-lirc-codec (%s)",
+ snprintf(ldev->name, sizeof(ldev->name), "ir-lirc-codec (%s)",
dev->driver_name);
- drv->minor = -1;
- drv->features = features;
- drv->data = &dev->raw->lirc;
- drv->rbuf = NULL;
- drv->code_length = sizeof(struct ir_raw_event) * 8;
- drv->chunk_size = sizeof(int);
- drv->buffer_size = LIRCBUF_SIZE;
- drv->fops = &lirc_fops;
- drv->dev = &dev->dev;
- drv->rdev = dev;
- drv->owner = THIS_MODULE;
-
- drv->minor = lirc_register_driver(drv);
- if (drv->minor < 0) {
- rc = -ENODEV;
+ ldev->features = features;
+ ldev->data = &dev->raw->lirc;
+ ldev->buf = NULL;
+ ldev->code_length = sizeof(struct ir_raw_event) * 8;
+ ldev->chunk_size = sizeof(int);
+ ldev->buffer_size = LIRCBUF_SIZE;
+ ldev->fops = &lirc_fops;
+ ldev->dev.parent = &dev->dev;
+ ldev->rdev = dev;
+ ldev->owner = THIS_MODULE;
+
+ rc = lirc_register_device(ldev);
+ if (rc < 0)
goto out;
- }
- dev->raw->lirc.drv = drv;
+ dev->raw->lirc.ldev = ldev;
dev->raw->lirc.dev = dev;
return 0;
out:
- kfree(drv);
+ lirc_free_device(ldev);
return rc;
}
@@ -413,9 +413,8 @@ static int ir_lirc_unregister(struct rc_dev *dev)
{
struct lirc_codec *lirc = &dev->raw->lirc;
- lirc_unregister_driver(lirc->drv->minor);
- kfree(lirc->drv);
- lirc->drv = NULL;
+ lirc_unregister_device(lirc->ldev);
+ lirc->ldev = NULL;
return 0;
}
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
index 7c572a643656..69d6264d54e6 100644
--- a/drivers/media/rc/ir-mce_kbd-decoder.c
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -115,9 +115,9 @@ static unsigned char kbd_keycodes[256] = {
KEY_RESERVED
};
-static void mce_kbd_rx_timeout(unsigned long data)
+static void mce_kbd_rx_timeout(struct timer_list *t)
{
- struct mce_kbd_dec *mce_kbd = (struct mce_kbd_dec *)data;
+ struct mce_kbd_dec *mce_kbd = from_timer(mce_kbd, t, rx_timeout);
int i;
unsigned char maskcode;
@@ -389,8 +389,7 @@ static int ir_mce_kbd_register(struct rc_dev *dev)
set_bit(EV_MSC, idev->evbit);
set_bit(MSC_SCAN, idev->mscbit);
- setup_timer(&mce_kbd->rx_timeout, mce_kbd_rx_timeout,
- (unsigned long)mce_kbd);
+ timer_setup(&mce_kbd->rx_timeout, mce_kbd_rx_timeout, 0);
input_set_drvdata(idev, mce_kbd);
diff --git a/drivers/media/rc/ir-nec-decoder.c b/drivers/media/rc/ir-nec-decoder.c
index 817c18f2ddd1..a95d09acc22a 100644
--- a/drivers/media/rc/ir-nec-decoder.c
+++ b/drivers/media/rc/ir-nec-decoder.c
@@ -87,8 +87,6 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
data->state = STATE_BIT_PULSE;
return 0;
} else if (eq_margin(ev.duration, NEC_REPEAT_SPACE, NEC_UNIT / 2)) {
- rc_repeat(dev);
- IR_dprintk(1, "Repeat last key\n");
data->state = STATE_TRAILER_PULSE;
return 0;
}
@@ -151,19 +149,26 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
if (!geq_margin(ev.duration, NEC_TRAILER_SPACE, NEC_UNIT / 2))
break;
- address = bitrev8((data->bits >> 24) & 0xff);
- not_address = bitrev8((data->bits >> 16) & 0xff);
- command = bitrev8((data->bits >> 8) & 0xff);
- not_command = bitrev8((data->bits >> 0) & 0xff);
+ if (data->count == NEC_NBITS) {
+ address = bitrev8((data->bits >> 24) & 0xff);
+ not_address = bitrev8((data->bits >> 16) & 0xff);
+ command = bitrev8((data->bits >> 8) & 0xff);
+ not_command = bitrev8((data->bits >> 0) & 0xff);
+
+ scancode = ir_nec_bytes_to_scancode(address,
+ not_address,
+ command,
+ not_command,
+ &rc_proto);
- scancode = ir_nec_bytes_to_scancode(address, not_address,
- command, not_command,
- &rc_proto);
+ if (data->is_nec_x)
+ data->necx_repeat = true;
- if (data->is_nec_x)
- data->necx_repeat = true;
+ rc_keydown(dev, rc_proto, scancode, 0);
+ } else {
+ rc_repeat(dev);
+ }
- rc_keydown(dev, rc_proto, scancode, 0);
data->state = STATE_INACTIVE;
return 0;
}
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index af6496d709fb..50b319355edf 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -1,7 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-alink-dtu-m.o \
rc-anysee.o \
rc-apac-viewcomp.o \
+ rc-astrometa-t2hybrid.o \
rc-asus-pc39.o \
rc-asus-ps3-100.o \
rc-ati-tv-wonder-hd-600.o \
@@ -47,6 +49,8 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-geekbox.o \
rc-genius-tvgo-a11mce.o \
rc-gotview7135.o \
+ rc-hisi-poplar.o \
+ rc-hisi-tv-demo.o \
rc-imon-mce.o \
rc-imon-pad.o \
rc-iodata-bctv7e.o \
@@ -88,6 +92,7 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-reddo.o \
rc-snapstream-firefly.o \
rc-streamzap.o \
+ rc-tango.o \
rc-tbs-nec.o \
rc-technisat-ts35.o \
rc-technisat-usb2.o \
diff --git a/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
new file mode 100644
index 000000000000..51690960fec4
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
@@ -0,0 +1,70 @@
+/*
+ * Keytable for the Astrometa T2hybrid remote controller
+ *
+ * Copyright (C) 2017 Oleh Kravchenko <oleg@kaa.org.ua>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table t2hybrid[] = {
+ { 0x4d, KEY_POWER2 },
+ { 0x54, KEY_VIDEO }, /* Source */
+ { 0x16, KEY_MUTE },
+
+ { 0x4c, KEY_RECORD },
+ { 0x05, KEY_CHANNELUP },
+ { 0x0c, KEY_TIME}, /* Timeshift */
+
+ { 0x0a, KEY_VOLUMEDOWN },
+ { 0x40, KEY_ZOOM }, /* Fullscreen */
+ { 0x1e, KEY_VOLUMEUP },
+
+ { 0x12, KEY_0 },
+ { 0x02, KEY_CHANNELDOWN },
+ { 0x1c, KEY_AGAIN }, /* Recall */
+
+ { 0x09, KEY_1 },
+ { 0x1d, KEY_2 },
+ { 0x1f, KEY_3 },
+
+ { 0x0d, KEY_4 },
+ { 0x19, KEY_5 },
+ { 0x1b, KEY_6 },
+
+ { 0x11, KEY_7 },
+ { 0x15, KEY_8 },
+ { 0x17, KEY_9 },
+};
+
+static struct rc_map_list t2hybrid_map = {
+ .map = {
+ .scan = t2hybrid,
+ .size = ARRAY_SIZE(t2hybrid),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_ASTROMETA_T2HYBRID,
+ }
+};
+
+static int __init init_rc_map_t2hybrid(void)
+{
+ return rc_map_register(&t2hybrid_map);
+}
+
+static void __exit exit_rc_map_t2hybrid(void)
+{
+ rc_map_unregister(&t2hybrid_map);
+}
+
+module_init(init_rc_map_t2hybrid)
+module_exit(exit_rc_map_t2hybrid)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oleh Kravchenko <oleg@kaa.org.ua>");
diff --git a/drivers/media/rc/keymaps/rc-avermedia-m135a.c b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
index 9882e2cde975..6d5a73b7ccec 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-m135a.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-m135a.c
@@ -43,7 +43,8 @@ static struct rc_map_table avermedia_m135a[] = {
{ 0x0213, KEY_RIGHT }, /* -> or L */
{ 0x0212, KEY_LEFT }, /* <- or R */
- { 0x0217, KEY_SLEEP }, /* Capturar Imagem or Snapshot */
+ { 0x0215, KEY_MENU },
+ { 0x0217, KEY_CAMERA }, /* Capturar Imagem or Snapshot */
{ 0x0210, KEY_SHUFFLE }, /* Amostra or 16 chan prev */
{ 0x0303, KEY_CHANNELUP },
diff --git a/drivers/media/rc/keymaps/rc-hisi-poplar.c b/drivers/media/rc/keymaps/rc-hisi-poplar.c
new file mode 100644
index 000000000000..78728bc7f63a
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-hisi-poplar.c
@@ -0,0 +1,69 @@
+/*
+ * Keytable for remote controller of HiSilicon poplar board.
+ *
+ * Copyright (c) 2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table hisi_poplar_keymap[] = {
+ { 0x0000b292, KEY_1},
+ { 0x0000b293, KEY_2},
+ { 0x0000b2cc, KEY_3},
+ { 0x0000b28e, KEY_4},
+ { 0x0000b28f, KEY_5},
+ { 0x0000b2c8, KEY_6},
+ { 0x0000b28a, KEY_7},
+ { 0x0000b28b, KEY_8},
+ { 0x0000b2c4, KEY_9},
+ { 0x0000b287, KEY_0},
+ { 0x0000b282, KEY_HOMEPAGE},
+ { 0x0000b2ca, KEY_UP},
+ { 0x0000b299, KEY_LEFT},
+ { 0x0000b2c1, KEY_RIGHT},
+ { 0x0000b2d2, KEY_DOWN},
+ { 0x0000b2c5, KEY_DELETE},
+ { 0x0000b29c, KEY_MUTE},
+ { 0x0000b281, KEY_VOLUMEDOWN},
+ { 0x0000b280, KEY_VOLUMEUP},
+ { 0x0000b2dc, KEY_POWER},
+ { 0x0000b29a, KEY_MENU},
+ { 0x0000b28d, KEY_SETUP},
+ { 0x0000b2c5, KEY_BACK},
+ { 0x0000b295, KEY_PLAYPAUSE},
+ { 0x0000b2ce, KEY_ENTER},
+ { 0x0000b285, KEY_CHANNELUP},
+ { 0x0000b286, KEY_CHANNELDOWN},
+ { 0x0000b2da, KEY_NUMERIC_STAR},
+ { 0x0000b2d0, KEY_NUMERIC_POUND},
+};
+
+static struct rc_map_list hisi_poplar_map = {
+ .map = {
+ .scan = hisi_poplar_keymap,
+ .size = ARRAY_SIZE(hisi_poplar_keymap),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_HISI_POPLAR,
+ }
+};
+
+static int __init init_rc_map_hisi_poplar(void)
+{
+ return rc_map_register(&hisi_poplar_map);
+}
+
+static void __exit exit_rc_map_hisi_poplar(void)
+{
+ rc_map_unregister(&hisi_poplar_map);
+}
+
+module_init(init_rc_map_hisi_poplar)
+module_exit(exit_rc_map_hisi_poplar)
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/keymaps/rc-hisi-tv-demo.c b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
new file mode 100644
index 000000000000..4816e3a4a18d
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-hisi-tv-demo.c
@@ -0,0 +1,81 @@
+/*
+ * Keytable for remote controller of HiSilicon tv demo board.
+ *
+ * Copyright (c) 2017 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table hisi_tv_demo_keymap[] = {
+ { 0x00000092, KEY_1},
+ { 0x00000093, KEY_2},
+ { 0x000000cc, KEY_3},
+ { 0x0000009f, KEY_4},
+ { 0x0000008e, KEY_5},
+ { 0x0000008f, KEY_6},
+ { 0x000000c8, KEY_7},
+ { 0x00000094, KEY_8},
+ { 0x0000008a, KEY_9},
+ { 0x0000008b, KEY_0},
+ { 0x000000ce, KEY_ENTER},
+ { 0x000000ca, KEY_UP},
+ { 0x00000099, KEY_LEFT},
+ { 0x00000084, KEY_PAGEUP},
+ { 0x000000c1, KEY_RIGHT},
+ { 0x000000d2, KEY_DOWN},
+ { 0x00000089, KEY_PAGEDOWN},
+ { 0x000000d1, KEY_MUTE},
+ { 0x00000098, KEY_VOLUMEDOWN},
+ { 0x00000090, KEY_VOLUMEUP},
+ { 0x0000009c, KEY_POWER},
+ { 0x000000d6, KEY_STOP},
+ { 0x00000097, KEY_MENU},
+ { 0x000000cb, KEY_BACK},
+ { 0x000000da, KEY_PLAYPAUSE},
+ { 0x00000080, KEY_INFO},
+ { 0x000000c3, KEY_REWIND},
+ { 0x00000087, KEY_HOMEPAGE},
+ { 0x000000d0, KEY_FASTFORWARD},
+ { 0x000000c4, KEY_SOUND},
+ { 0x00000082, BTN_1},
+ { 0x000000c7, BTN_2},
+ { 0x00000086, KEY_PROGRAM},
+ { 0x000000d9, KEY_SUBTITLE},
+ { 0x00000085, KEY_ZOOM},
+ { 0x0000009b, KEY_RED},
+ { 0x0000009a, KEY_GREEN},
+ { 0x000000c0, KEY_YELLOW},
+ { 0x000000c2, KEY_BLUE},
+ { 0x0000009d, KEY_CHANNELDOWN},
+ { 0x000000cf, KEY_CHANNELUP},
+};
+
+static struct rc_map_list hisi_tv_demo_map = {
+ .map = {
+ .scan = hisi_tv_demo_keymap,
+ .size = ARRAY_SIZE(hisi_tv_demo_keymap),
+ .rc_proto = RC_PROTO_NEC,
+ .name = RC_MAP_HISI_TV_DEMO,
+ }
+};
+
+static int __init init_rc_map_hisi_tv_demo(void)
+{
+ return rc_map_register(&hisi_tv_demo_map);
+}
+
+static void __exit exit_rc_map_hisi_tv_demo(void)
+{
+ rc_map_unregister(&hisi_tv_demo_map);
+}
+
+module_init(init_rc_map_hisi_tv_demo)
+module_exit(exit_rc_map_hisi_tv_demo)
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/keymaps/rc-tango.c b/drivers/media/rc/keymaps/rc-tango.c
new file mode 100644
index 000000000000..1c6e8875d46f
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-tango.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2017 Sigma Designs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <media/rc-map.h>
+
+static struct rc_map_table tango_table[] = {
+ { 0x4cb4a, KEY_POWER },
+ { 0x4cb48, KEY_FILE },
+ { 0x4cb0f, KEY_SETUP },
+ { 0x4cb4d, KEY_SUSPEND },
+ { 0x4cb4e, KEY_VOLUMEUP },
+ { 0x4cb44, KEY_EJECTCD },
+ { 0x4cb13, KEY_TV },
+ { 0x4cb51, KEY_MUTE },
+ { 0x4cb52, KEY_VOLUMEDOWN },
+
+ { 0x4cb41, KEY_1 },
+ { 0x4cb03, KEY_2 },
+ { 0x4cb42, KEY_3 },
+ { 0x4cb45, KEY_4 },
+ { 0x4cb07, KEY_5 },
+ { 0x4cb46, KEY_6 },
+ { 0x4cb55, KEY_7 },
+ { 0x4cb17, KEY_8 },
+ { 0x4cb56, KEY_9 },
+ { 0x4cb1b, KEY_0 },
+ { 0x4cb59, KEY_DELETE },
+ { 0x4cb5a, KEY_CAPSLOCK },
+
+ { 0x4cb47, KEY_BACK },
+ { 0x4cb05, KEY_SWITCHVIDEOMODE },
+ { 0x4cb06, KEY_UP },
+ { 0x4cb43, KEY_LEFT },
+ { 0x4cb01, KEY_RIGHT },
+ { 0x4cb0a, KEY_DOWN },
+ { 0x4cb02, KEY_ENTER },
+ { 0x4cb4b, KEY_INFO },
+ { 0x4cb09, KEY_HOME },
+
+ { 0x4cb53, KEY_MENU },
+ { 0x4cb12, KEY_PREVIOUS },
+ { 0x4cb50, KEY_PLAY },
+ { 0x4cb11, KEY_NEXT },
+ { 0x4cb4f, KEY_TITLE },
+ { 0x4cb0e, KEY_REWIND },
+ { 0x4cb4c, KEY_STOP },
+ { 0x4cb0d, KEY_FORWARD },
+ { 0x4cb57, KEY_MEDIA_REPEAT },
+ { 0x4cb16, KEY_ANGLE },
+ { 0x4cb54, KEY_PAUSE },
+ { 0x4cb15, KEY_SLOW },
+ { 0x4cb5b, KEY_TIME },
+ { 0x4cb1a, KEY_AUDIO },
+ { 0x4cb58, KEY_SUBTITLE },
+ { 0x4cb19, KEY_ZOOM },
+
+ { 0x4cb5f, KEY_RED },
+ { 0x4cb1e, KEY_GREEN },
+ { 0x4cb5c, KEY_YELLOW },
+ { 0x4cb1d, KEY_BLUE },
+};
+
+static struct rc_map_list tango_map = {
+ .map = {
+ .scan = tango_table,
+ .size = ARRAY_SIZE(tango_table),
+ .rc_proto = RC_PROTO_NECX,
+ .name = RC_MAP_TANGO,
+ }
+};
+
+static int __init init_rc_map_tango(void)
+{
+ return rc_map_register(&tango_map);
+}
+
+static void __exit exit_rc_map_tango(void)
+{
+ rc_map_unregister(&tango_map);
+}
+
+module_init(init_rc_map_tango)
+module_exit(exit_rc_map_tango)
+
+MODULE_AUTHOR("Sigma Designs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/keymaps/rc-twinhan1027.c b/drivers/media/rc/keymaps/rc-twinhan1027.c
index 2275b37c61d2..78bb3143a1a8 100644
--- a/drivers/media/rc/keymaps/rc-twinhan1027.c
+++ b/drivers/media/rc/keymaps/rc-twinhan1027.c
@@ -66,7 +66,7 @@ static struct rc_map_list twinhan_vp1027_map = {
.map = {
.scan = twinhan_vp1027,
.size = ARRAY_SIZE(twinhan_vp1027),
- .rc_proto = RC_PROTO_UNKNOWN, /* Legacy IR type */
+ .rc_proto = RC_PROTO_NEC,
.name = RC_MAP_TWINHAN_VP1027_DVBS,
}
};
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 9080e39ea391..e16d1138ca48 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -24,96 +24,91 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/idr.h>
#include <media/rc-core.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
-#define NOPLUG -1
#define LOGHEAD "lirc_dev (%s[%d]): "
static dev_t lirc_base_dev;
-struct irctl {
- struct lirc_driver d;
- int attached;
- int open;
+/* Used to keep track of allocated lirc devices */
+#define LIRC_MAX_DEVICES 256
+static DEFINE_IDA(lirc_ida);
- struct mutex irctl_lock;
- struct lirc_buffer *buf;
- bool buf_internal;
- unsigned int chunk_size;
-
- struct device dev;
- struct cdev cdev;
-};
+/* Only used for sysfs but defined to void otherwise */
+static struct class *lirc_class;
-static DEFINE_MUTEX(lirc_dev_lock);
+static void lirc_release_device(struct device *ld)
+{
+ struct lirc_dev *d = container_of(ld, struct lirc_dev, dev);
-static struct irctl *irctls[MAX_IRCTL_DEVICES];
+ put_device(d->dev.parent);
-/* Only used for sysfs but defined to void otherwise */
-static struct class *lirc_class;
+ if (d->buf_internal) {
+ lirc_buffer_free(d->buf);
+ kfree(d->buf);
+ d->buf = NULL;
+ }
+ kfree(d);
+ module_put(THIS_MODULE);
+}
-static void lirc_release(struct device *ld)
+static int lirc_allocate_buffer(struct lirc_dev *d)
{
- struct irctl *ir = container_of(ld, struct irctl, dev);
+ int err;
- put_device(ir->dev.parent);
+ if (d->buf) {
+ d->buf_internal = false;
+ return 0;
+ }
- if (ir->buf_internal) {
- lirc_buffer_free(ir->buf);
- kfree(ir->buf);
+ d->buf = kmalloc(sizeof(*d->buf), GFP_KERNEL);
+ if (!d->buf)
+ return -ENOMEM;
+
+ err = lirc_buffer_init(d->buf, d->chunk_size, d->buffer_size);
+ if (err) {
+ kfree(d->buf);
+ d->buf = NULL;
+ return err;
}
- mutex_lock(&lirc_dev_lock);
- irctls[ir->d.minor] = NULL;
- mutex_unlock(&lirc_dev_lock);
- kfree(ir);
+ d->buf_internal = true;
+ return 0;
}
-static int lirc_allocate_buffer(struct irctl *ir)
+struct lirc_dev *
+lirc_allocate_device(void)
{
- int err = 0;
- int bytes_in_key;
- unsigned int chunk_size;
- unsigned int buffer_size;
- struct lirc_driver *d = &ir->d;
-
- bytes_in_key = BITS_TO_LONGS(d->code_length) +
- (d->code_length % 8 ? 1 : 0);
- buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key;
- chunk_size = d->chunk_size ? d->chunk_size : bytes_in_key;
-
- if (d->rbuf) {
- ir->buf = d->rbuf;
- ir->buf_internal = false;
- } else {
- ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!ir->buf) {
- err = -ENOMEM;
- goto out;
- }
+ struct lirc_dev *d;
- err = lirc_buffer_init(ir->buf, chunk_size, buffer_size);
- if (err) {
- kfree(ir->buf);
- ir->buf = NULL;
- goto out;
- }
-
- ir->buf_internal = true;
- d->rbuf = ir->buf;
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (d) {
+ mutex_init(&d->mutex);
+ device_initialize(&d->dev);
+ d->dev.class = lirc_class;
+ d->dev.release = lirc_release_device;
+ __module_get(THIS_MODULE);
}
- ir->chunk_size = ir->buf->chunk_size;
-out:
- return err;
+ return d;
}
+EXPORT_SYMBOL(lirc_allocate_device);
-int lirc_register_driver(struct lirc_driver *d)
+void lirc_free_device(struct lirc_dev *d)
+{
+ if (!d)
+ return;
+
+ put_device(&d->dev);
+}
+EXPORT_SYMBOL(lirc_free_device);
+
+int lirc_register_device(struct lirc_dev *d)
{
- struct irctl *ir;
int minor;
int err;
@@ -122,8 +117,8 @@ int lirc_register_driver(struct lirc_driver *d)
return -EBADRQC;
}
- if (!d->dev) {
- pr_err("dev pointer not filled in!\n");
+ if (!d->dev.parent) {
+ pr_err("dev parent pointer not filled in!\n");
return -EINVAL;
}
@@ -132,226 +127,146 @@ int lirc_register_driver(struct lirc_driver *d)
return -EINVAL;
}
- if (d->minor >= MAX_IRCTL_DEVICES) {
- dev_err(d->dev, "minor must be between 0 and %d!\n",
- MAX_IRCTL_DEVICES - 1);
- return -EBADRQC;
+ if (!d->buf && d->chunk_size < 1) {
+ pr_err("chunk_size must be set!\n");
+ return -EINVAL;
}
- if (d->code_length < 1 || d->code_length > (BUFLEN * 8)) {
- dev_err(d->dev, "code length must be less than %d bits\n",
- BUFLEN * 8);
- return -EBADRQC;
+ if (!d->buf && d->buffer_size < 1) {
+ pr_err("buffer_size must be set!\n");
+ return -EINVAL;
}
- if (!d->rbuf && !(d->fops && d->fops->read &&
- d->fops->poll && d->fops->unlocked_ioctl)) {
- dev_err(d->dev, "undefined read, poll, ioctl\n");
+ if (d->code_length < 1 || d->code_length > (BUFLEN * 8)) {
+ dev_err(&d->dev, "code length must be less than %d bits\n",
+ BUFLEN * 8);
return -EBADRQC;
}
- mutex_lock(&lirc_dev_lock);
-
- minor = d->minor;
-
- if (minor < 0) {
- /* find first free slot for driver */
- for (minor = 0; minor < MAX_IRCTL_DEVICES; minor++)
- if (!irctls[minor])
- break;
- if (minor == MAX_IRCTL_DEVICES) {
- dev_err(d->dev, "no free slots for drivers!\n");
- err = -ENOMEM;
- goto out_lock;
- }
- } else if (irctls[minor]) {
- dev_err(d->dev, "minor (%d) just registered!\n", minor);
- err = -EBUSY;
- goto out_lock;
- }
-
- ir = kzalloc(sizeof(struct irctl), GFP_KERNEL);
- if (!ir) {
- err = -ENOMEM;
- goto out_lock;
+ if (!d->buf && !(d->fops && d->fops->read &&
+ d->fops->poll && d->fops->unlocked_ioctl)) {
+ dev_err(&d->dev, "undefined read, poll, ioctl\n");
+ return -EBADRQC;
}
- mutex_init(&ir->irctl_lock);
- irctls[minor] = ir;
- d->minor = minor;
-
/* some safety check 8-) */
- d->name[sizeof(d->name)-1] = '\0';
+ d->name[sizeof(d->name) - 1] = '\0';
if (d->features == 0)
d->features = LIRC_CAN_REC_LIRCCODE;
- ir->d = *d;
-
if (LIRC_CAN_REC(d->features)) {
- err = lirc_allocate_buffer(irctls[minor]);
- if (err) {
- kfree(ir);
- goto out_lock;
- }
- d->rbuf = ir->buf;
+ err = lirc_allocate_buffer(d);
+ if (err)
+ return err;
}
- device_initialize(&ir->dev);
- ir->dev.devt = MKDEV(MAJOR(lirc_base_dev), ir->d.minor);
- ir->dev.class = lirc_class;
- ir->dev.parent = d->dev;
- ir->dev.release = lirc_release;
- dev_set_name(&ir->dev, "lirc%d", ir->d.minor);
+ minor = ida_simple_get(&lirc_ida, 0, LIRC_MAX_DEVICES, GFP_KERNEL);
+ if (minor < 0)
+ return minor;
- cdev_init(&ir->cdev, d->fops);
- ir->cdev.owner = ir->d.owner;
- ir->cdev.kobj.parent = &ir->dev.kobj;
-
- err = cdev_add(&ir->cdev, ir->dev.devt, 1);
- if (err)
- goto out_free_dev;
-
- ir->attached = 1;
-
- err = device_add(&ir->dev);
- if (err)
- goto out_cdev;
-
- mutex_unlock(&lirc_dev_lock);
+ d->minor = minor;
+ d->dev.devt = MKDEV(MAJOR(lirc_base_dev), d->minor);
+ dev_set_name(&d->dev, "lirc%d", d->minor);
- get_device(ir->dev.parent);
+ cdev_init(&d->cdev, d->fops);
+ d->cdev.owner = d->owner;
+ d->attached = true;
- dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n",
- ir->d.name, ir->d.minor);
+ err = cdev_device_add(&d->cdev, &d->dev);
+ if (err) {
+ ida_simple_remove(&lirc_ida, minor);
+ return err;
+ }
- return minor;
+ get_device(d->dev.parent);
-out_cdev:
- cdev_del(&ir->cdev);
-out_free_dev:
- put_device(&ir->dev);
-out_lock:
- mutex_unlock(&lirc_dev_lock);
+ dev_info(&d->dev, "lirc_dev: driver %s registered at minor = %d\n",
+ d->name, d->minor);
- return err;
+ return 0;
}
-EXPORT_SYMBOL(lirc_register_driver);
+EXPORT_SYMBOL(lirc_register_device);
-int lirc_unregister_driver(int minor)
+void lirc_unregister_device(struct lirc_dev *d)
{
- struct irctl *ir;
+ if (!d)
+ return;
- if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
- pr_err("minor (%d) must be between 0 and %d!\n",
- minor, MAX_IRCTL_DEVICES - 1);
- return -EBADRQC;
- }
-
- ir = irctls[minor];
- if (!ir) {
- pr_err("failed to get irctl\n");
- return -ENOENT;
- }
+ dev_dbg(&d->dev, "lirc_dev: driver %s unregistered from minor = %d\n",
+ d->name, d->minor);
- mutex_lock(&lirc_dev_lock);
+ mutex_lock(&d->mutex);
- if (ir->d.minor != minor) {
- dev_err(ir->d.dev, "lirc_dev: minor %d device not registered\n",
- minor);
- mutex_unlock(&lirc_dev_lock);
- return -ENOENT;
+ d->attached = false;
+ if (d->open) {
+ dev_dbg(&d->dev, LOGHEAD "releasing opened driver\n",
+ d->name, d->minor);
+ wake_up_interruptible(&d->buf->wait_poll);
}
- dev_dbg(ir->d.dev, "lirc_dev: driver %s unregistered from minor = %d\n",
- ir->d.name, ir->d.minor);
-
- ir->attached = 0;
- if (ir->open) {
- dev_dbg(ir->d.dev, LOGHEAD "releasing opened driver\n",
- ir->d.name, ir->d.minor);
- wake_up_interruptible(&ir->buf->wait_poll);
- }
+ mutex_unlock(&d->mutex);
- mutex_unlock(&lirc_dev_lock);
-
- device_del(&ir->dev);
- cdev_del(&ir->cdev);
- put_device(&ir->dev);
-
- return 0;
+ cdev_device_del(&d->cdev, &d->dev);
+ ida_simple_remove(&lirc_ida, d->minor);
+ put_device(&d->dev);
}
-EXPORT_SYMBOL(lirc_unregister_driver);
+EXPORT_SYMBOL(lirc_unregister_device);
int lirc_dev_fop_open(struct inode *inode, struct file *file)
{
- struct irctl *ir;
- int retval = 0;
-
- if (iminor(inode) >= MAX_IRCTL_DEVICES) {
- pr_err("open result for %d is -ENODEV\n", iminor(inode));
- return -ENODEV;
- }
-
- if (mutex_lock_interruptible(&lirc_dev_lock))
- return -ERESTARTSYS;
-
- ir = irctls[iminor(inode)];
- mutex_unlock(&lirc_dev_lock);
+ struct lirc_dev *d = container_of(inode->i_cdev, struct lirc_dev, cdev);
+ int retval;
- if (!ir) {
- retval = -ENODEV;
- goto error;
- }
+ dev_dbg(&d->dev, LOGHEAD "open called\n", d->name, d->minor);
- dev_dbg(ir->d.dev, LOGHEAD "open called\n", ir->d.name, ir->d.minor);
+ retval = mutex_lock_interruptible(&d->mutex);
+ if (retval)
+ return retval;
- if (ir->d.minor == NOPLUG) {
+ if (!d->attached) {
retval = -ENODEV;
- goto error;
+ goto out;
}
- if (ir->open) {
+ if (d->open) {
retval = -EBUSY;
- goto error;
+ goto out;
}
- if (ir->d.rdev) {
- retval = rc_open(ir->d.rdev);
+ if (d->rdev) {
+ retval = rc_open(d->rdev);
if (retval)
- goto error;
+ goto out;
}
- if (ir->buf)
- lirc_buffer_clear(ir->buf);
+ if (d->buf)
+ lirc_buffer_clear(d->buf);
- ir->open++;
+ d->open++;
-error:
+ lirc_init_pdata(inode, file);
nonseekable_open(inode, file);
+ mutex_unlock(&d->mutex);
+
+ return 0;
+out:
+ mutex_unlock(&d->mutex);
return retval;
}
EXPORT_SYMBOL(lirc_dev_fop_open);
int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
- struct irctl *ir = irctls[iminor(inode)];
- int ret;
-
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return -EINVAL;
- }
+ struct lirc_dev *d = file->private_data;
- ret = mutex_lock_killable(&lirc_dev_lock);
- WARN_ON(ret);
+ mutex_lock(&d->mutex);
- rc_close(ir->d.rdev);
+ rc_close(d->rdev);
+ d->open--;
- ir->open--;
- if (!ret)
- mutex_unlock(&lirc_dev_lock);
+ mutex_unlock(&d->mutex);
return 0;
}
@@ -359,29 +274,24 @@ EXPORT_SYMBOL(lirc_dev_fop_close);
unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
{
- struct irctl *ir = irctls[iminor(file_inode(file))];
+ struct lirc_dev *d = file->private_data;
unsigned int ret;
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return POLLERR;
- }
-
- if (!ir->attached)
+ if (!d->attached)
return POLLHUP | POLLERR;
- if (ir->buf) {
- poll_wait(file, &ir->buf->wait_poll, wait);
+ if (d->buf) {
+ poll_wait(file, &d->buf->wait_poll, wait);
- if (lirc_buffer_empty(ir->buf))
+ if (lirc_buffer_empty(d->buf))
ret = 0;
else
ret = POLLIN | POLLRDNORM;
- } else
+ } else {
ret = POLLERR;
+ }
- dev_dbg(ir->d.dev, LOGHEAD "poll result = %d\n",
- ir->d.name, ir->d.minor, ret);
+ dev_dbg(&d->dev, LOGHEAD "poll result = %d\n", d->name, d->minor, ret);
return ret;
}
@@ -389,48 +299,44 @@ EXPORT_SYMBOL(lirc_dev_fop_poll);
long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
+ struct lirc_dev *d = file->private_data;
__u32 mode;
- int result = 0;
- struct irctl *ir = irctls[iminor(file_inode(file))];
+ int result;
- if (!ir) {
- pr_err("no irctl found!\n");
- return -ENODEV;
- }
+ dev_dbg(&d->dev, LOGHEAD "ioctl called (0x%x)\n",
+ d->name, d->minor, cmd);
- dev_dbg(ir->d.dev, LOGHEAD "ioctl called (0x%x)\n",
- ir->d.name, ir->d.minor, cmd);
+ result = mutex_lock_interruptible(&d->mutex);
+ if (result)
+ return result;
- if (ir->d.minor == NOPLUG || !ir->attached) {
- dev_err(ir->d.dev, LOGHEAD "ioctl result = -ENODEV\n",
- ir->d.name, ir->d.minor);
- return -ENODEV;
+ if (!d->attached) {
+ result = -ENODEV;
+ goto out;
}
- mutex_lock(&ir->irctl_lock);
-
switch (cmd) {
case LIRC_GET_FEATURES:
- result = put_user(ir->d.features, (__u32 __user *)arg);
+ result = put_user(d->features, (__u32 __user *)arg);
break;
case LIRC_GET_REC_MODE:
- if (!LIRC_CAN_REC(ir->d.features)) {
+ if (!LIRC_CAN_REC(d->features)) {
result = -ENOTTY;
break;
}
result = put_user(LIRC_REC2MODE
- (ir->d.features & LIRC_CAN_REC_MASK),
+ (d->features & LIRC_CAN_REC_MASK),
(__u32 __user *)arg);
break;
case LIRC_SET_REC_MODE:
- if (!LIRC_CAN_REC(ir->d.features)) {
+ if (!LIRC_CAN_REC(d->features)) {
result = -ENOTTY;
break;
}
result = get_user(mode, (__u32 __user *)arg);
- if (!result && !(LIRC_MODE2REC(mode) & ir->d.features))
+ if (!result && !(LIRC_MODE2REC(mode) & d->features))
result = -EINVAL;
/*
* FIXME: We should actually set the mode somehow but
@@ -438,32 +344,14 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
break;
case LIRC_GET_LENGTH:
- result = put_user(ir->d.code_length, (__u32 __user *)arg);
- break;
- case LIRC_GET_MIN_TIMEOUT:
- if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
- ir->d.min_timeout == 0) {
- result = -ENOTTY;
- break;
- }
-
- result = put_user(ir->d.min_timeout, (__u32 __user *)arg);
- break;
- case LIRC_GET_MAX_TIMEOUT:
- if (!(ir->d.features & LIRC_CAN_SET_REC_TIMEOUT) ||
- ir->d.max_timeout == 0) {
- result = -ENOTTY;
- break;
- }
-
- result = put_user(ir->d.max_timeout, (__u32 __user *)arg);
+ result = put_user(d->code_length, (__u32 __user *)arg);
break;
default:
result = -ENOTTY;
}
- mutex_unlock(&ir->irctl_lock);
-
+out:
+ mutex_unlock(&d->mutex);
return result;
}
EXPORT_SYMBOL(lirc_dev_fop_ioctl);
@@ -473,35 +361,34 @@ ssize_t lirc_dev_fop_read(struct file *file,
size_t length,
loff_t *ppos)
{
- struct irctl *ir = irctls[iminor(file_inode(file))];
+ struct lirc_dev *d = file->private_data;
unsigned char *buf;
- int ret = 0, written = 0;
+ int ret, written = 0;
DECLARE_WAITQUEUE(wait, current);
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return -ENODEV;
- }
-
- if (!LIRC_CAN_REC(ir->d.features))
- return -EINVAL;
-
- dev_dbg(ir->d.dev, LOGHEAD "read called\n", ir->d.name, ir->d.minor);
-
- buf = kzalloc(ir->chunk_size, GFP_KERNEL);
+ buf = kzalloc(d->buf->chunk_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- if (mutex_lock_interruptible(&ir->irctl_lock)) {
- ret = -ERESTARTSYS;
- goto out_unlocked;
+ dev_dbg(&d->dev, LOGHEAD "read called\n", d->name, d->minor);
+
+ ret = mutex_lock_interruptible(&d->mutex);
+ if (ret) {
+ kfree(buf);
+ return ret;
}
- if (!ir->attached) {
+
+ if (!d->attached) {
ret = -ENODEV;
goto out_locked;
}
- if (length % ir->chunk_size) {
+ if (!LIRC_CAN_REC(d->features)) {
+ ret = -EINVAL;
+ goto out_locked;
+ }
+
+ if (length % d->buf->chunk_size) {
ret = -EINVAL;
goto out_locked;
}
@@ -511,14 +398,14 @@ ssize_t lirc_dev_fop_read(struct file *file,
* to avoid losing scan code (in case when queue is awaken somewhere
* between while condition checking and scheduling)
*/
- add_wait_queue(&ir->buf->wait_poll, &wait);
+ add_wait_queue(&d->buf->wait_poll, &wait);
/*
* while we didn't provide 'length' bytes, device is opened in blocking
* mode and 'copy_to_user' is happy, wait for data.
*/
while (written < length && ret == 0) {
- if (lirc_buffer_empty(ir->buf)) {
+ if (lirc_buffer_empty(d->buf)) {
/* According to the read(2) man page, 'written' can be
* returned as less than 'length', instead of blocking
* again, returning -EWOULDBLOCK, or returning
@@ -535,36 +422,36 @@ ssize_t lirc_dev_fop_read(struct file *file,
break;
}
- mutex_unlock(&ir->irctl_lock);
+ mutex_unlock(&d->mutex);
set_current_state(TASK_INTERRUPTIBLE);
schedule();
set_current_state(TASK_RUNNING);
- if (mutex_lock_interruptible(&ir->irctl_lock)) {
- ret = -ERESTARTSYS;
- remove_wait_queue(&ir->buf->wait_poll, &wait);
+ ret = mutex_lock_interruptible(&d->mutex);
+ if (ret) {
+ remove_wait_queue(&d->buf->wait_poll, &wait);
goto out_unlocked;
}
- if (!ir->attached) {
+ if (!d->attached) {
ret = -ENODEV;
goto out_locked;
}
} else {
- lirc_buffer_read(ir->buf, buf);
+ lirc_buffer_read(d->buf, buf);
ret = copy_to_user((void __user *)buffer+written, buf,
- ir->buf->chunk_size);
+ d->buf->chunk_size);
if (!ret)
- written += ir->buf->chunk_size;
+ written += d->buf->chunk_size;
else
ret = -EFAULT;
}
}
- remove_wait_queue(&ir->buf->wait_poll, &wait);
+ remove_wait_queue(&d->buf->wait_poll, &wait);
out_locked:
- mutex_unlock(&ir->irctl_lock);
+ mutex_unlock(&d->mutex);
out_unlocked:
kfree(buf);
@@ -573,9 +460,19 @@ out_unlocked:
}
EXPORT_SYMBOL(lirc_dev_fop_read);
+void lirc_init_pdata(struct inode *inode, struct file *file)
+{
+ struct lirc_dev *d = container_of(inode->i_cdev, struct lirc_dev, cdev);
+
+ file->private_data = d;
+}
+EXPORT_SYMBOL(lirc_init_pdata);
+
void *lirc_get_pdata(struct file *file)
{
- return irctls[iminor(file_inode(file))]->d.data;
+ struct lirc_dev *d = file->private_data;
+
+ return d->data;
}
EXPORT_SYMBOL(lirc_get_pdata);
@@ -590,7 +487,7 @@ static int __init lirc_dev_init(void)
return PTR_ERR(lirc_class);
}
- retval = alloc_chrdev_region(&lirc_base_dev, 0, MAX_IRCTL_DEVICES,
+ retval = alloc_chrdev_region(&lirc_base_dev, 0, LIRC_MAX_DEVICES,
"BaseRemoteCtl");
if (retval) {
class_destroy(lirc_class);
@@ -607,7 +504,7 @@ static int __init lirc_dev_init(void)
static void __exit lirc_dev_exit(void)
{
class_destroy(lirc_class);
- unregister_chrdev_region(lirc_base_dev, MAX_IRCTL_DEVICES);
+ unregister_chrdev_region(lirc_base_dev, LIRC_MAX_DEVICES);
pr_info("module unloaded\n");
}
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index bf7aaff3aa37..a9187b0b46a1 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -188,6 +188,8 @@ enum mceusb_model_type {
TIVO_KIT,
MCE_GEN2_NO_TX,
HAUPPAUGE_CX_HYBRID_TV,
+ EVROMEDIA_FULL_HYBRID_FULLHD,
+ ASTROMETA_T2HYBRID,
};
struct mceusb_model {
@@ -247,9 +249,19 @@ static const struct mceusb_model mceusb_model[] = {
.mce_gen2 = 1,
.rc_map = RC_MAP_TIVO,
},
+ [EVROMEDIA_FULL_HYBRID_FULLHD] = {
+ .name = "Evromedia USB Full Hybrid Full HD",
+ .no_tx = 1,
+ .rc_map = RC_MAP_MSI_DIGIVOX_III,
+ },
+ [ASTROMETA_T2HYBRID] = {
+ .name = "Astrometa T2Hybrid",
+ .no_tx = 1,
+ .rc_map = RC_MAP_ASTROMETA_T2HYBRID,
+ }
};
-static struct usb_device_id mceusb_dev_table[] = {
+static const struct usb_device_id mceusb_dev_table[] = {
/* Original Microsoft MCE IR Transceiver (often HP-branded) */
{ USB_DEVICE(VENDOR_MICROSOFT, 0x006d),
.driver_info = MCE_GEN1 },
@@ -398,6 +410,12 @@ static struct usb_device_id mceusb_dev_table[] = {
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
/* Adaptec / HP eHome Receiver */
{ USB_DEVICE(VENDOR_ADAPTEC, 0x0094) },
+ /* Evromedia USB Full Hybrid Full HD */
+ { USB_DEVICE(0x1b80, 0xd3b2),
+ .driver_info = EVROMEDIA_FULL_HYBRID_FULLHD },
+ /* Astrometa T2hybrid */
+ { USB_DEVICE(0x15f4, 0x0135),
+ .driver_info = ASTROMETA_T2HYBRID },
/* Terminating entry */
{ }
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 7da9c96cb058..ae4dd0c27731 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -106,7 +106,7 @@ struct ir_raw_event_ctrl {
} mce_kbd;
struct lirc_codec {
struct rc_dev *dev;
- struct lirc_driver *drv;
+ struct lirc_dev *ldev;
int carrier_low;
ktime_t gap_start;
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 503bc425a187..f6e5ba4fbb49 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -471,9 +471,10 @@ int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
}
EXPORT_SYMBOL(ir_raw_encode_scancode);
-static void edge_handle(unsigned long arg)
+static void edge_handle(struct timer_list *t)
{
- struct rc_dev *dev = (struct rc_dev *)arg;
+ struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
+ struct rc_dev *dev = raw->dev;
ktime_t interval = ktime_sub(ktime_get(), dev->raw->last_event);
if (ktime_to_ns(interval) >= dev->timeout) {
@@ -513,8 +514,7 @@ int ir_raw_event_prepare(struct rc_dev *dev)
dev->raw->dev = dev;
dev->change_protocol = change_protocol;
- setup_timer(&dev->raw->edge_handle, edge_handle,
- (unsigned long)dev);
+ timer_setup(&dev->raw->edge_handle, edge_handle, 0);
INIT_KFIFO(dev->raw->kfifo);
return 0;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 981cccd6b988..17950e29d4e3 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <media/rc-core.h>
+#include <linux/bsearch.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/input.h>
@@ -439,9 +440,6 @@ static int ir_setkeytable(struct rc_dev *dev,
if (rc)
return rc;
- IR_dprintk(1, "Allocated space for %u keycode entries (%u bytes)\n",
- rc_map->size, rc_map->alloc);
-
for (i = 0; i < from->size; i++) {
index = ir_establish_scancode(dev, rc_map,
from->scan[i].scancode, false);
@@ -460,6 +458,18 @@ static int ir_setkeytable(struct rc_dev *dev,
return rc;
}
+static int rc_map_cmp(const void *key, const void *elt)
+{
+ const unsigned int *scancode = key;
+ const struct rc_map_table *e = elt;
+
+ if (*scancode < e->scancode)
+ return -1;
+ else if (*scancode > e->scancode)
+ return 1;
+ return 0;
+}
+
/**
* ir_lookup_by_scancode() - locate mapping by scancode
* @rc_map: the struct rc_map to search
@@ -472,21 +482,14 @@ static int ir_setkeytable(struct rc_dev *dev,
static unsigned int ir_lookup_by_scancode(const struct rc_map *rc_map,
unsigned int scancode)
{
- int start = 0;
- int end = rc_map->len - 1;
- int mid;
-
- while (start <= end) {
- mid = (start + end) / 2;
- if (rc_map->scan[mid].scancode < scancode)
- start = mid + 1;
- else if (rc_map->scan[mid].scancode > scancode)
- end = mid - 1;
- else
- return mid;
- }
+ struct rc_map_table *res;
- return -1U;
+ res = bsearch(&scancode, rc_map->scan, rc_map->len,
+ sizeof(struct rc_map_table), rc_map_cmp);
+ if (!res)
+ return -1U;
+ else
+ return res - rc_map->scan;
}
/**
@@ -627,9 +630,9 @@ EXPORT_SYMBOL_GPL(rc_keyup);
* This routine will generate a keyup event some time after a keydown event
* is generated when no further activity has been detected.
*/
-static void ir_timer_keyup(unsigned long cookie)
+static void ir_timer_keyup(struct timer_list *t)
{
- struct rc_dev *dev = (struct rc_dev *)cookie;
+ struct rc_dev *dev = from_timer(dev, t, timer_keyup);
unsigned long flags;
/*
@@ -1480,6 +1483,8 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
if (dev->driver_name)
ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name);
+ if (dev->device_name)
+ ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name);
return 0;
}
@@ -1487,7 +1492,10 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
/*
* Static device attribute struct with the sysfs attributes for IR's
*/
-static DEVICE_ATTR(protocols, 0644, show_protocols, store_protocols);
+static struct device_attribute dev_attr_ro_protocols =
+__ATTR(protocols, 0444, show_protocols, NULL);
+static struct device_attribute dev_attr_rw_protocols =
+__ATTR(protocols, 0644, show_protocols, store_protocols);
static DEVICE_ATTR(wakeup_protocols, 0644, show_wakeup_protocols,
store_wakeup_protocols);
static RC_FILTER_ATTR(filter, S_IRUGO|S_IWUSR,
@@ -1499,13 +1507,22 @@ static RC_FILTER_ATTR(wakeup_filter, S_IRUGO|S_IWUSR,
static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
show_filter, store_filter, RC_FILTER_WAKEUP, true);
-static struct attribute *rc_dev_protocol_attrs[] = {
- &dev_attr_protocols.attr,
+static struct attribute *rc_dev_rw_protocol_attrs[] = {
+ &dev_attr_rw_protocols.attr,
+ NULL,
+};
+
+static const struct attribute_group rc_dev_rw_protocol_attr_grp = {
+ .attrs = rc_dev_rw_protocol_attrs,
+};
+
+static struct attribute *rc_dev_ro_protocol_attrs[] = {
+ &dev_attr_ro_protocols.attr,
NULL,
};
-static const struct attribute_group rc_dev_protocol_attr_grp = {
- .attrs = rc_dev_protocol_attrs,
+static const struct attribute_group rc_dev_ro_protocol_attr_grp = {
+ .attrs = rc_dev_ro_protocol_attrs,
};
static struct attribute *rc_dev_filter_attrs[] = {
@@ -1529,7 +1546,7 @@ static const struct attribute_group rc_dev_wakeup_filter_attr_grp = {
.attrs = rc_dev_wakeup_filter_attrs,
};
-static struct device_type rc_dev_type = {
+static const struct device_type rc_dev_type = {
.release = rc_dev_release,
.uevent = rc_dev_uevent,
};
@@ -1553,8 +1570,7 @@ struct rc_dev *rc_allocate_device(enum rc_driver_type type)
dev->input_dev->setkeycode = ir_setkeycode;
input_set_drvdata(dev->input_dev, dev);
- setup_timer(&dev->timer_keyup, ir_timer_keyup,
- (unsigned long)dev);
+ timer_setup(&dev->timer_keyup, ir_timer_keyup, 0);
spin_lock_init(&dev->rc_map.lock);
spin_lock_init(&dev->keylock);
@@ -1638,6 +1654,9 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
rc_proto = BIT_ULL(rc_map->rc_proto);
+ if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol)
+ dev->enabled_protocols = dev->allowed_protocols;
+
if (dev->change_protocol) {
rc = dev->change_protocol(dev, &rc_proto);
if (rc < 0)
@@ -1729,8 +1748,10 @@ int rc_register_device(struct rc_dev *dev)
dev_set_drvdata(&dev->dev, dev);
dev->dev.groups = dev->sysfs_groups;
- if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
- dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
+ if (dev->driver_type == RC_DRIVER_SCANCODE && !dev->change_protocol)
+ dev->sysfs_groups[attr++] = &rc_dev_ro_protocol_attr_grp;
+ else if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
+ dev->sysfs_groups[attr++] = &rc_dev_rw_protocol_attr_grp;
if (dev->s_filter)
dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp;
if (dev->s_wakeup_filter)
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 6784cb9fc4e7..6bfc24885b5c 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -186,7 +186,7 @@ struct redrat3_error {
} __packed;
/* table of devices that work with this driver */
-static struct usb_device_id redrat3_dev_table[] = {
+static const struct usb_device_id redrat3_dev_table[] = {
/* Original version of the RedRat3 */
{USB_DEVICE(USB_RR3USB_VENDOR_ID, USB_RR3USB_PRODUCT_ID)},
/* Second Version/release of the RedRat3 - RetRat3-II */
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 8b66926bc16a..8bf5637b3a69 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -470,7 +470,7 @@ static int hardware_init_port(void)
return 0;
}
-static void serial_ir_timeout(unsigned long arg)
+static void serial_ir_timeout(struct timer_list *unused)
{
DEFINE_IR_RAW_EVENT(ev);
@@ -540,8 +540,7 @@ static int serial_ir_probe(struct platform_device *dev)
serial_ir.rcdev = rcdev;
- setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
- (unsigned long)&serial_ir);
+ timer_setup(&serial_ir.timeout_timer, serial_ir_timeout, 0);
result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
share_irq ? IRQF_SHARED : 0,
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index bc906fb128d5..76120664b700 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -120,7 +120,7 @@ static void add_read_queue(int flag, unsigned long val)
}
/* SECTION: Hardware */
-static void sir_timeout(unsigned long data)
+static void sir_timeout(struct timer_list *unused)
{
/*
* if last received signal was a pulse, but receiving stopped
@@ -321,7 +321,7 @@ static int sir_ir_probe(struct platform_device *dev)
rcdev->timeout = IR_DEFAULT_TIMEOUT;
rcdev->dev.parent = &sir_ir_dev->dev;
- setup_timer(&timerlist, sir_timeout, 0);
+ timer_setup(&timerlist, sir_timeout, 0);
/* get I/O port access and IRQ line */
if (!devm_request_region(&sir_ir_dev->dev, io, 8, KBUILD_MODNAME)) {
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index f03a174ddf9d..4eebfcfc10f3 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -43,7 +43,7 @@
#define USB_STREAMZAP_PRODUCT_ID 0x0000
/* table of devices that work with this driver */
-static struct usb_device_id streamzap_table[] = {
+static const struct usb_device_id streamzap_table[] = {
/* Streamzap Remote Control */
{ USB_DEVICE(USB_STREAMZAP_VENDOR_ID, USB_STREAMZAP_PRODUCT_ID) },
/* Terminating entry */
diff --git a/drivers/media/rc/tango-ir.c b/drivers/media/rc/tango-ir.c
new file mode 100644
index 000000000000..9d4c17230c3a
--- /dev/null
+++ b/drivers/media/rc/tango-ir.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <media/rc-core.h>
+
+#define DRIVER_NAME "tango-ir"
+
+#define IR_NEC_CTRL 0x00
+#define IR_NEC_DATA 0x04
+#define IR_CTRL 0x08
+#define IR_RC5_CLK_DIV 0x0c
+#define IR_RC5_DATA 0x10
+#define IR_INT 0x14
+
+#define NEC_TIME_BASE 560
+#define RC5_TIME_BASE 1778
+
+#define RC6_CTRL 0x00
+#define RC6_CLKDIV 0x04
+#define RC6_DATA0 0x08
+#define RC6_DATA1 0x0c
+#define RC6_DATA2 0x10
+#define RC6_DATA3 0x14
+#define RC6_DATA4 0x18
+
+#define RC6_CARRIER 36000
+#define RC6_TIME_BASE 16
+
+#define NEC_CAP(n) ((n) << 24)
+#define GPIO_SEL(n) ((n) << 16)
+#define DISABLE_NEC (BIT(4) | BIT(8))
+#define ENABLE_RC5 (BIT(0) | BIT(9))
+#define ENABLE_RC6 (BIT(0) | BIT(7))
+#define ACK_IR_INT (BIT(0) | BIT(1))
+#define ACK_RC6_INT (BIT(31))
+
+#define NEC_ANY (RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32)
+
+struct tango_ir {
+ void __iomem *rc5_base;
+ void __iomem *rc6_base;
+ struct rc_dev *rc;
+ struct clk *clk;
+};
+
+static void tango_ir_handle_nec(struct tango_ir *ir)
+{
+ u32 v, code;
+ enum rc_proto proto;
+
+ v = readl_relaxed(ir->rc5_base + IR_NEC_DATA);
+ if (!v) {
+ rc_repeat(ir->rc);
+ return;
+ }
+
+ code = ir_nec_bytes_to_scancode(v, v >> 8, v >> 16, v >> 24, &proto);
+ rc_keydown(ir->rc, proto, code, 0);
+}
+
+static void tango_ir_handle_rc5(struct tango_ir *ir)
+{
+ u32 data, field, toggle, addr, cmd, code;
+
+ data = readl_relaxed(ir->rc5_base + IR_RC5_DATA);
+ if (data & BIT(31))
+ return;
+
+ field = data >> 12 & 1;
+ toggle = data >> 11 & 1;
+ addr = data >> 6 & 0x1f;
+ cmd = (data & 0x3f) | (field ^ 1) << 6;
+
+ code = RC_SCANCODE_RC5(addr, cmd);
+ rc_keydown(ir->rc, RC_PROTO_RC5, code, toggle);
+}
+
+static void tango_ir_handle_rc6(struct tango_ir *ir)
+{
+ u32 data0, data1, toggle, mode, addr, cmd, code;
+
+ data0 = readl_relaxed(ir->rc6_base + RC6_DATA0);
+ data1 = readl_relaxed(ir->rc6_base + RC6_DATA1);
+
+ mode = data0 >> 1 & 7;
+ if (mode != 0)
+ return;
+
+ toggle = data0 & 1;
+ addr = data0 >> 16;
+ cmd = data1;
+
+ code = RC_SCANCODE_RC6_0(addr, cmd);
+ rc_keydown(ir->rc, RC_PROTO_RC6_0, code, toggle);
+}
+
+static irqreturn_t tango_ir_irq(int irq, void *dev_id)
+{
+ struct tango_ir *ir = dev_id;
+ unsigned int rc5_stat;
+ unsigned int rc6_stat;
+
+ rc5_stat = readl_relaxed(ir->rc5_base + IR_INT);
+ writel_relaxed(rc5_stat, ir->rc5_base + IR_INT);
+
+ rc6_stat = readl_relaxed(ir->rc6_base + RC6_CTRL);
+ writel_relaxed(rc6_stat, ir->rc6_base + RC6_CTRL);
+
+ if (!(rc5_stat & 3) && !(rc6_stat & BIT(31)))
+ return IRQ_NONE;
+
+ if (rc5_stat & BIT(0))
+ tango_ir_handle_rc5(ir);
+
+ if (rc5_stat & BIT(1))
+ tango_ir_handle_nec(ir);
+
+ if (rc6_stat & BIT(31))
+ tango_ir_handle_rc6(ir);
+
+ return IRQ_HANDLED;
+}
+
+static int tango_change_protocol(struct rc_dev *dev, u64 *rc_type)
+{
+ struct tango_ir *ir = dev->priv;
+ u32 rc5_ctrl = DISABLE_NEC;
+ u32 rc6_ctrl = 0;
+
+ if (*rc_type & NEC_ANY)
+ rc5_ctrl = 0;
+
+ if (*rc_type & RC_PROTO_BIT_RC5)
+ rc5_ctrl |= ENABLE_RC5;
+
+ if (*rc_type & RC_PROTO_BIT_RC6_0)
+ rc6_ctrl = ENABLE_RC6;
+
+ writel_relaxed(rc5_ctrl, ir->rc5_base + IR_CTRL);
+ writel_relaxed(rc6_ctrl, ir->rc6_base + RC6_CTRL);
+
+ return 0;
+}
+
+static int tango_ir_probe(struct platform_device *pdev)
+{
+ const char *map_name = RC_MAP_TANGO;
+ struct device *dev = &pdev->dev;
+ struct rc_dev *rc;
+ struct tango_ir *ir;
+ struct resource *rc5_res;
+ struct resource *rc6_res;
+ u64 clkrate, clkdiv;
+ int irq, err;
+ u32 val;
+
+ rc5_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!rc5_res)
+ return -EINVAL;
+
+ rc6_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!rc6_res)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ ir = devm_kzalloc(dev, sizeof(*ir), GFP_KERNEL);
+ if (!ir)
+ return -ENOMEM;
+
+ ir->rc5_base = devm_ioremap_resource(dev, rc5_res);
+ if (IS_ERR(ir->rc5_base))
+ return PTR_ERR(ir->rc5_base);
+
+ ir->rc6_base = devm_ioremap_resource(dev, rc6_res);
+ if (IS_ERR(ir->rc6_base))
+ return PTR_ERR(ir->rc6_base);
+
+ ir->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ir->clk))
+ return PTR_ERR(ir->clk);
+
+ rc = devm_rc_allocate_device(dev, RC_DRIVER_SCANCODE);
+ if (!rc)
+ return -ENOMEM;
+
+ of_property_read_string(dev->of_node, "linux,rc-map-name", &map_name);
+
+ rc->device_name = DRIVER_NAME;
+ rc->driver_name = DRIVER_NAME;
+ rc->input_phys = DRIVER_NAME "/input0";
+ rc->map_name = map_name;
+ rc->allowed_protocols = NEC_ANY | RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_0;
+ rc->change_protocol = tango_change_protocol;
+ rc->priv = ir;
+ ir->rc = rc;
+
+ err = clk_prepare_enable(ir->clk);
+ if (err)
+ return err;
+
+ clkrate = clk_get_rate(ir->clk);
+
+ clkdiv = clkrate * NEC_TIME_BASE;
+ do_div(clkdiv, 1000000);
+
+ val = NEC_CAP(31) | GPIO_SEL(12) | clkdiv;
+ writel_relaxed(val, ir->rc5_base + IR_NEC_CTRL);
+
+ clkdiv = clkrate * RC5_TIME_BASE;
+ do_div(clkdiv, 1000000);
+
+ writel_relaxed(DISABLE_NEC, ir->rc5_base + IR_CTRL);
+ writel_relaxed(clkdiv, ir->rc5_base + IR_RC5_CLK_DIV);
+ writel_relaxed(ACK_IR_INT, ir->rc5_base + IR_INT);
+
+ clkdiv = clkrate * RC6_TIME_BASE;
+ do_div(clkdiv, RC6_CARRIER);
+
+ writel_relaxed(ACK_RC6_INT, ir->rc6_base + RC6_CTRL);
+ writel_relaxed((clkdiv >> 2) << 18 | clkdiv, ir->rc6_base + RC6_CLKDIV);
+
+ err = devm_request_irq(dev, irq, tango_ir_irq, IRQF_SHARED,
+ dev_name(dev), ir);
+ if (err)
+ goto err_clk;
+
+ err = devm_rc_register_device(dev, rc);
+ if (err)
+ goto err_clk;
+
+ platform_set_drvdata(pdev, ir);
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(ir->clk);
+ return err;
+}
+
+static int tango_ir_remove(struct platform_device *pdev)
+{
+ struct tango_ir *ir = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ir->clk);
+ return 0;
+}
+
+static const struct of_device_id tango_ir_dt_ids[] = {
+ { .compatible = "sigma,smp8642-ir" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tango_ir_dt_ids);
+
+static struct platform_driver tango_ir_driver = {
+ .probe = tango_ir_probe,
+ .remove = tango_ir_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = tango_ir_dt_ids,
+ },
+};
+module_platform_driver(tango_ir_driver);
+
+MODULE_DESCRIPTION("SMP86xx IR decoder driver");
+MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/Makefile b/drivers/media/tuners/Makefile
index 06a9ab65e5fa..7be96511532d 100644
--- a/drivers/media/tuners/Makefile
+++ b/drivers/media/tuners/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for common V4L/DVB tuners
#
diff --git a/drivers/media/tuners/fc0011.h b/drivers/media/tuners/fc0011.h
index 438cf897acd1..a36871c44c8c 100644
--- a/drivers/media/tuners/fc0011.h
+++ b/drivers/media/tuners/fc0011.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_FC0011_H_
#define LINUX_FC0011_H_
diff --git a/drivers/media/tuners/mt2063.h b/drivers/media/tuners/mt2063.h
index e55e0a6dd1be..0e3e3b0525bb 100644
--- a/drivers/media/tuners/mt2063.h
+++ b/drivers/media/tuners/mt2063.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MT2063_H__
#define __MT2063_H__
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 738b993ec8b0..21e46b10caa5 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the USB media device drivers
#
diff --git a/drivers/media/usb/as102/Makefile b/drivers/media/usb/as102/Makefile
index 22f43eee4a3b..56bd2d00b920 100644
--- a/drivers/media/usb/as102/Makefile
+++ b/drivers/media/usb/as102/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dvb-as102-objs := as102_drv.o as102_fw.o as10x_cmd.o as10x_cmd_stream.o \
as102_usb_drv.o as10x_cmd_cfg.o
diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
index 5a28ce3a1d49..38dbc128340d 100644
--- a/drivers/media/usb/as102/as102_fw.c
+++ b/drivers/media/usb/as102/as102_fw.c
@@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
unsigned char *cmd,
const struct firmware *firmware) {
- struct as10x_fw_pkt_t fw_pkt;
+ struct as10x_fw_pkt_t *fw_pkt;
int total_read_bytes = 0, errno = 0;
unsigned char addr_has_changed = 0;
+ fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
+ if (!fw_pkt)
+ return -ENOMEM;
+
+
for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
int read_bytes = 0, data_len = 0;
/* parse intel hex line */
read_bytes = parse_hex_line(
(u8 *) (firmware->data + total_read_bytes),
- fw_pkt.raw.address,
- fw_pkt.raw.data,
+ fw_pkt->raw.address,
+ fw_pkt->raw.data,
&data_len,
&addr_has_changed);
@@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
/* detect the end of file */
total_read_bytes += read_bytes;
if (total_read_bytes == firmware->size) {
- fw_pkt.u.request[0] = 0x00;
- fw_pkt.u.request[1] = 0x03;
+ fw_pkt->u.request[0] = 0x00;
+ fw_pkt->u.request[1] = 0x03;
/* send EOF command */
errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *)
- &fw_pkt, 2, 0);
+ fw_pkt, 2, 0);
if (errno < 0)
goto error;
} else {
if (!addr_has_changed) {
/* prepare command to send */
- fw_pkt.u.request[0] = 0x00;
- fw_pkt.u.request[1] = 0x01;
+ fw_pkt->u.request[0] = 0x00;
+ fw_pkt->u.request[1] = 0x01;
- data_len += sizeof(fw_pkt.u.request);
- data_len += sizeof(fw_pkt.raw.address);
+ data_len += sizeof(fw_pkt->u.request);
+ data_len += sizeof(fw_pkt->raw.address);
/* send cmd to device */
errno = bus_adap->ops->upload_fw_pkt(bus_adap,
(uint8_t *)
- &fw_pkt,
+ fw_pkt,
data_len,
0);
if (errno < 0)
@@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
}
}
error:
+ kfree(fw_pkt);
return (errno == 0) ? total_read_bytes : errno;
}
diff --git a/drivers/media/usb/au0828/Makefile b/drivers/media/usb/au0828/Makefile
index 3dc7539a5c4e..c06ef6601f2d 100644
--- a/drivers/media/usb/au0828/Makefile
+++ b/drivers/media/usb/au0828/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
au0828-objs := au0828-core.o au0828-i2c.o au0828-cards.o au0828-dvb.o
ifeq ($(CONFIG_VIDEO_AU0828_V4L2),y)
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 34dc7e062471..d9093a3c57c5 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -105,9 +105,9 @@ static struct tda18271_config hauppauge_woodbury_tunerconfig = {
static void au0828_restart_dvb_streaming(struct work_struct *work);
-static void au0828_bulk_timeout(unsigned long data)
+static void au0828_bulk_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, bulk_timeout);
dprintk(1, "%s called\n", __func__);
dev->bulk_timeout_running = 0;
@@ -648,9 +648,7 @@ int au0828_dvb_register(struct au0828_dev *dev)
return ret;
}
- dev->bulk_timeout.function = au0828_bulk_timeout;
- dev->bulk_timeout.data = (unsigned long) dev;
- init_timer(&dev->bulk_timeout);
+ timer_setup(&dev->bulk_timeout, au0828_bulk_timeout, 0);
return 0;
}
diff --git a/drivers/media/usb/au0828/au0828-i2c.c b/drivers/media/usb/au0828/au0828-i2c.c
index ef7d1b830ca3..1b8ec5d9e7ab 100644
--- a/drivers/media/usb/au0828/au0828-i2c.c
+++ b/drivers/media/usb/au0828/au0828-i2c.c
@@ -342,7 +342,7 @@ static const struct i2c_adapter au0828_i2c_adap_template = {
.algo = &au0828_i2c_algo_template,
};
-static struct i2c_client au0828_i2c_client_template = {
+static const struct i2c_client au0828_i2c_client_template = {
.name = "au0828 internal",
};
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 7996eb83a54e..af68afe085b5 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -269,7 +269,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
static int au0828_probe_i2c_ir(struct au0828_dev *dev)
{
int i = 0;
- const unsigned short addr_list[] = {
+ static const unsigned short addr_list[] = {
0x47, I2C_CLIENT_END
};
diff --git a/drivers/media/usb/au0828/au0828-vbi.c b/drivers/media/usb/au0828/au0828-vbi.c
index e0930ce59b8d..9dd6bdb7304f 100644
--- a/drivers/media/usb/au0828/au0828-vbi.c
+++ b/drivers/media/usb/au0828/au0828-vbi.c
@@ -79,7 +79,7 @@ vbi_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
}
-struct vb2_ops au0828_vbi_qops = {
+const struct vb2_ops au0828_vbi_qops = {
.queue_setup = vbi_queue_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 9342402b92f7..a240153821e0 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -67,10 +67,10 @@ static inline void print_err_status(struct au0828_dev *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -954,9 +954,9 @@ int au0828_analog_unregister(struct au0828_dev *dev)
/* This function ensures that video frames continue to be delivered even if
the ITU-656 input isn't receiving any data (thereby preventing applications
such as tvtime from hanging) */
-static void au0828_vid_buffer_timeout(unsigned long data)
+static void au0828_vid_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, vid_timeout);
struct au0828_dmaqueue *dma_q = &dev->vidq;
struct au0828_buffer *buf;
unsigned char *vid_data;
@@ -978,9 +978,9 @@ static void au0828_vid_buffer_timeout(unsigned long data)
spin_unlock_irqrestore(&dev->slock, flags);
}
-static void au0828_vbi_buffer_timeout(unsigned long data)
+static void au0828_vbi_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, vbi_timeout);
struct au0828_dmaqueue *dma_q = &dev->vbiq;
struct au0828_buffer *buf;
unsigned char *vbi_data;
@@ -1953,10 +1953,8 @@ int au0828_analog_register(struct au0828_dev *dev,
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vbiq.active);
- setup_timer(&dev->vid_timeout, au0828_vid_buffer_timeout,
- (unsigned long)dev);
- setup_timer(&dev->vbi_timeout, au0828_vbi_buffer_timeout,
- (unsigned long)dev);
+ timer_setup(&dev->vid_timeout, au0828_vid_buffer_timeout, 0);
+ timer_setup(&dev->vbi_timeout, au0828_vbi_buffer_timeout, 0);
dev->width = NTSC_STD_W;
dev->height = NTSC_STD_H;
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 05e445fe0b77..f6f37e8ef51d 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -358,7 +358,7 @@ void au0828_dvb_suspend(struct au0828_dev *dev);
void au0828_dvb_resume(struct au0828_dev *dev);
/* au0828-vbi.c */
-extern struct vb2_ops au0828_vbi_qops;
+extern const struct vb2_ops au0828_vbi_qops;
#define dprintk(level, fmt, arg...)\
do { if (au0828_debug & level)\
diff --git a/drivers/media/usb/b2c2/Kconfig b/drivers/media/usb/b2c2/Kconfig
index 17d35833980c..a620ae42dfc8 100644
--- a/drivers/media/usb/b2c2/Kconfig
+++ b/drivers/media/usb/b2c2/Kconfig
@@ -10,6 +10,6 @@ config DVB_B2C2_FLEXCOP_USB_DEBUG
bool "Enable debug for the B2C2 FlexCop drivers"
depends on DVB_B2C2_FLEXCOP_USB
select DVB_B2C2_FLEXCOP_DEBUG
- help
- Say Y if you want to enable the module option to control debug messages
- of all B2C2 FlexCop drivers.
+ help
+ Say Y if you want to enable the module option to control debug messages
+ of all B2C2 FlexCop drivers.
diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h
index 25ad43166e78..e86faa0e06ca 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.h
+++ b/drivers/media/usb/b2c2/flexcop-usb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
* flexcop-usb.h - header file for the USB part
diff --git a/drivers/media/usb/cx231xx/Makefile b/drivers/media/usb/cx231xx/Makefile
index 52cf76935e69..19e8c35d6a77 100644
--- a/drivers/media/usb/cx231xx/Makefile
+++ b/drivers/media/usb/cx231xx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
cx231xx-y += cx231xx-video.o cx231xx-i2c.o cx231xx-cards.o cx231xx-core.o
cx231xx-y += cx231xx-avcore.o cx231xx-417.o cx231xx-pcb-cfg.o cx231xx-vbi.o
cx231xx-$(CONFIG_VIDEO_CX231XX_RC) += cx231xx-input.o
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e0daa9b6c2a0..54d9d0cb326f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -847,7 +847,7 @@ struct cx231xx_board cx231xx_boards[] = {
.demod_addr = 0x64, /* 0xc8 >> 1 */
.demod_i2c_master = I2C_1_MUX_3,
.has_dvb = 1,
- .ir_i2c_master = I2C_0,
+ .decoder = CX231XX_AVDECODER,
.norm = V4L2_STD_PAL,
.output_mode = OUT_MODE_VIP11,
.tuner_addr = 0x60, /* 0xc0 >> 1 */
@@ -872,6 +872,7 @@ struct cx231xx_board cx231xx_boards[] = {
.name = "Astrometa T2hybrid",
.tuner_type = TUNER_ABSENT,
.has_dvb = 1,
+ .decoder = CX231XX_AVDECODER,
.output_mode = OUT_MODE_VIP11,
.agc_analog_digital_select_gpio = 0x01,
.ctl_pin_status_mask = 0xffffffc4,
@@ -1684,7 +1685,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
nr = dev->devno;
assoc_desc = udev->actconfig->intf_assoc[0];
- if (assoc_desc->bFirstInterface != ifnum) {
+ if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
dev_err(d, "Not found matching IAD interface\n");
retval = -ENODEV;
goto err_if;
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index c18bb33e060e..54abc1a7c8e1 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -179,10 +179,10 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c
index 76e901920f6f..d3bfe8e23b1f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c
@@ -43,10 +43,10 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -285,7 +285,7 @@ static void vbi_buffer_release(struct videobuf_queue *vq,
free_buffer(vq, buf);
}
-struct videobuf_queue_ops cx231xx_vbi_qops = {
+const struct videobuf_queue_ops cx231xx_vbi_qops = {
.buf_setup = vbi_buffer_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.h b/drivers/media/usb/cx231xx/cx231xx-vbi.h
index 16c7d20a22a4..b33d2bdb621c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-vbi.h
+++ b/drivers/media/usb/cx231xx/cx231xx-vbi.h
@@ -22,7 +22,7 @@
#ifndef _CX231XX_VBI_H
#define _CX231XX_VBI_H
-extern struct videobuf_queue_ops cx231xx_vbi_qops;
+extern const struct videobuf_queue_ops cx231xx_vbi_qops;
#define NTSC_VBI_START_LINE 10 /* line 10 - 21 */
#define NTSC_VBI_END_LINE 21
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 179b8481a870..226059fc672b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -199,10 +199,10 @@ static inline void print_err_status(struct cx231xx *dev, int packet, int status)
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile
index 969f68e55265..bed44601f324 100644
--- a/drivers/media/usb/dvb-usb-v2/Makefile
+++ b/drivers/media/usb/dvb-usb-v2/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dvb_usb_v2-objs := dvb_usb_core.o dvb_usb_urb.o usb_urb.o
obj-$(CONFIG_DVB_USB_V2) += dvb_usb_v2.o
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 096bb75a24e5..2bf3bd81280a 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -628,8 +628,7 @@ static int dvb_usb_fe_sleep(struct dvb_frontend *fe)
}
ret = dvb_usbv2_device_power_ctrl(d, 0);
- if (ret < 0)
- goto err;
+
err:
if (!adap->suspend_resume_active) {
adap->active_fe = -1;
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.h b/drivers/media/usb/dvb-usb-v2/gl861.h
index b0b80d87bb7e..b651b857e034 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.h
+++ b/drivers/media/usb/dvb-usb-v2/gl861.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DVB_USB_GL861_H_
#define _DVB_USB_GL861_H_
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 0eb33e043079..a221bb8a12b4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -516,7 +516,6 @@ static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
data required to program */
block_len = (msg->len / 8);
left_over_len = (msg->len % 8);
- index = 0;
mxl_i2c("block_len %d, left_over_len %d",
block_len, left_over_len);
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 95a7b9123f8e..c76e78f9638a 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1598,7 +1598,7 @@ static int rtl2831u_rc_query(struct dvb_usb_device *d)
struct rtl28xxu_dev *dev = d->priv;
u8 buf[5];
u32 rc_code;
- struct rtl28xxu_reg_val rc_nec_tab[] = {
+ static const struct rtl28xxu_reg_val rc_nec_tab[] = {
{ 0x3033, 0x80 },
{ 0x3020, 0x43 },
{ 0x3021, 0x16 },
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index 55136cde38f5..dce2b97efce4 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* usb-urb.c is part of the DVB USB library.
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
diff --git a/drivers/media/usb/dvb-usb/Makefile b/drivers/media/usb/dvb-usb/Makefile
index 3b3f32b426d1..16de1e4f36a4 100644
--- a/drivers/media/usb/dvb-usb/Makefile
+++ b/drivers/media/usb/dvb-usb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
dvb-usb-objs += dvb-usb-firmware.o dvb-usb-init.o dvb-usb-urb.o dvb-usb-i2c.o
dvb-usb-objs += dvb-usb-dvb.o dvb-usb-remote.o usb-urb.o
obj-$(CONFIG_DVB_USB) += dvb-usb.o
diff --git a/drivers/media/usb/dvb-usb/a800.c b/drivers/media/usb/dvb-usb/a800.c
index 7ba975bea96a..540886b3bb29 100644
--- a/drivers/media/usb/dvb-usb/a800.c
+++ b/drivers/media/usb/dvb-usb/a800.c
@@ -37,48 +37,9 @@ static int a800_identify_state(struct usb_device *udev, struct dvb_usb_device_pr
return 0;
}
-static struct rc_map_table rc_map_a800_table[] = {
- { 0x0201, KEY_MODE }, /* SOURCE */
- { 0x0200, KEY_POWER2 }, /* POWER */
- { 0x0205, KEY_1 }, /* 1 */
- { 0x0206, KEY_2 }, /* 2 */
- { 0x0207, KEY_3 }, /* 3 */
- { 0x0209, KEY_4 }, /* 4 */
- { 0x020a, KEY_5 }, /* 5 */
- { 0x020b, KEY_6 }, /* 6 */
- { 0x020d, KEY_7 }, /* 7 */
- { 0x020e, KEY_8 }, /* 8 */
- { 0x020f, KEY_9 }, /* 9 */
- { 0x0212, KEY_LEFT }, /* L / DISPLAY */
- { 0x0211, KEY_0 }, /* 0 */
- { 0x0213, KEY_RIGHT }, /* R / CH RTN */
- { 0x0217, KEY_CAMERA }, /* SNAP SHOT */
- { 0x0210, KEY_LAST }, /* 16-CH PREV */
- { 0x021e, KEY_VOLUMEDOWN }, /* VOL DOWN */
- { 0x020c, KEY_ZOOM }, /* FULL SCREEN */
- { 0x021f, KEY_VOLUMEUP }, /* VOL UP */
- { 0x0214, KEY_MUTE }, /* MUTE */
- { 0x0208, KEY_AUDIO }, /* AUDIO */
- { 0x0219, KEY_RECORD }, /* RECORD */
- { 0x0218, KEY_PLAY }, /* PLAY */
- { 0x021b, KEY_STOP }, /* STOP */
- { 0x021a, KEY_PLAYPAUSE }, /* TIMESHIFT / PAUSE */
- { 0x021d, KEY_BACK }, /* << / RED */
- { 0x021c, KEY_FORWARD }, /* >> / YELLOW */
- { 0x0203, KEY_TEXT }, /* TELETEXT */
- { 0x0204, KEY_EPG }, /* EPG */
- { 0x0215, KEY_MENU }, /* MENU */
-
- { 0x0303, KEY_CHANNELUP }, /* CH UP */
- { 0x0302, KEY_CHANNELDOWN }, /* CH DOWN */
- { 0x0301, KEY_FIRST }, /* |<< / GREEN */
- { 0x0300, KEY_LAST }, /* >>| / BLUE */
-
-};
-
-static int a800_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int a800_rc_query(struct dvb_usb_device *d)
{
- int ret;
+ int ret = 0;
u8 *key = kmalloc(5, GFP_KERNEL);
if (!key)
return -ENOMEM;
@@ -90,11 +51,12 @@ static int a800_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
goto out;
}
- /* call the universal NEC remote processor, to find out the key's state and event */
- dvb_usb_nec_rc_key_to_event(d,key,event,state);
- if (key[0] != 0)
- deb_rc("key: %*ph\n", 5, key);
- ret = 0;
+ /* Note that extended nec and nec32 are dropped */
+ if (key[0] == 1)
+ rc_keydown(d->rc_dev, RC_PROTO_NEC,
+ RC_SCANCODE_NEC(key[1], key[3]), 0);
+ else if (key[0] == 2)
+ rc_repeat(d->rc_dev);
out:
kfree(key);
return ret;
@@ -157,11 +119,12 @@ static struct dvb_usb_device_properties a800_properties = {
.power_ctrl = a800_power_ctrl,
.identify_state = a800_identify_state,
- .rc.legacy = {
- .rc_interval = DEFAULT_RC_INTERVAL,
- .rc_map_table = rc_map_a800_table,
- .rc_map_size = ARRAY_SIZE(rc_map_a800_table),
- .rc_query = a800_rc_query,
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_AVERMEDIA_M135A,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = a800_rc_query,
+ .allowed_protos = RC_PROTO_BIT_NEC,
},
.i2c_algo = &dibusb_i2c_algo,
diff --git a/drivers/media/usb/dvb-usb/af9005-script.h b/drivers/media/usb/dvb-usb/af9005-script.h
index 4d69045426dd..870cb59cd904 100644
--- a/drivers/media/usb/dvb-usb/af9005-script.h
+++ b/drivers/media/usb/dvb-usb/af9005-script.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
File automatically generated by createinit.py using data
extracted from AF05BDA.sys (windows driver):
diff --git a/drivers/media/usb/dvb-usb/az6027.h b/drivers/media/usb/dvb-usb/az6027.h
index f3afe17f3f3d..95b056b36030 100644
--- a/drivers/media/usb/dvb-usb/az6027.h
+++ b/drivers/media/usb/dvb-usb/az6027.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DVB_USB_VP6027_H_
#define _DVB_USB_VP6027_H_
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 66429d7f69b5..88f9b9804b25 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DVB_USB_CXUSB_H_
#define _DVB_USB_CXUSB_H_
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 6020170fe99a..92098c1b78e5 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -291,7 +291,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap)
stk7700d_dib7000p_mt2266_config)
!= 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -325,7 +325,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap)
stk7700d_dib7000p_mt2266_config)
!= 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -478,7 +478,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap)
&stk7700ph_dib7700_xc3028_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
&dib7070p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
&dib7770p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3056,7 +3056,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap)
if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
@@ -3109,7 +3109,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap)
/* initialize IC 0 */
if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3139,7 +3139,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap)
i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3214,7 +3214,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap)
1, 0x10, &tfe7790p_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap,
@@ -3309,7 +3309,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
stk7070pd_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
@@ -3384,7 +3384,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
stk7070pd_dib7000p_config) != 0) {
err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n",
__func__);
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
}
@@ -3620,7 +3620,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
/* Demodulator not found for some reason? */
- dvb_detach(&state->dib7000p_ops);
+ dvb_detach(state->dib7000p_ops.set_wbd_ref);
return -ENODEV;
}
diff --git a/drivers/media/usb/dvb-usb/dib07x0.h b/drivers/media/usb/dvb-usb/dib07x0.h
index 7e62c1018520..2e67f794fe37 100644
--- a/drivers/media/usb/dvb-usb/dib07x0.h
+++ b/drivers/media/usb/dvb-usb/dib07x0.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DIB07X0_H_
#define _DIB07X0_H_
diff --git a/drivers/media/usb/dvb-usb/digitv.h b/drivers/media/usb/dvb-usb/digitv.h
index 581e09c25491..2af9fedfad70 100644
--- a/drivers/media/usb/dvb-usb/digitv.h
+++ b/drivers/media/usb/dvb-usb/digitv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DVB_USB_DIGITV_H_
#define _DVB_USB_DIGITV_H_
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-common.h b/drivers/media/usb/dvb-usb/dvb-usb-common.h
index 7e619d638809..8c51ac4493dd 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-common.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb-common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* dvb-usb-common.h is part of the DVB USB library.
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
index e5675da286cb..3a66e732e0d8 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* dvb-usb-dvb.c is part of the DVB USB library.
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index 04033efe7ad5..15c153e49382 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* dvb-usb-firmware.c is part of the DVB USB library.
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
index 4f0b0adce7f5..ca0b734e009b 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* dvb-usb-i2c.c is part of the DVB USB library.
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-remote.c b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
index 0b03f9bd9c26..65e2c9e2cdc9 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* dvb-usb-remote.c is part of the DVB USB library.
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
@@ -279,10 +280,11 @@ static int rc_core_dvb_usb_remote_init(struct dvb_usb_device *d)
dev->change_protocol = d->props.rc.core.change_protocol;
dev->allowed_protocols = d->props.rc.core.allowed_protos;
usb_to_input_id(d->udev, &dev->input_id);
- dev->device_name = "IR-receiver inside an USB DVB receiver";
+ dev->device_name = d->desc->name;
dev->input_phys = d->rc_phys;
dev->dev.parent = &d->udev->dev;
dev->priv = d;
+ dev->scancode_mask = d->props.rc.core.scancode_mask;
err = rc_register_device(dev);
if (err < 0) {
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-urb.c b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
index 95f9097498cb..c1b4e94a37f8 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-urb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* dvb-usb-urb.c is part of the DVB USB library.
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 72468fdffa18..e71fc86b4fb2 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* dvb-usb.h is part of the DVB USB library.
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
@@ -207,6 +208,7 @@ struct dvb_rc {
int (*rc_query) (struct dvb_usb_device *d);
int rc_interval;
bool bulk_mode; /* uses bulk mode */
+ u32 scancode_mask;
};
/**
diff --git a/drivers/media/usb/dvb-usb/dw2102.h b/drivers/media/usb/dvb-usb/dw2102.h
index 5cd0b0eb6ce1..f64cf79b7934 100644
--- a/drivers/media/usb/dvb-usb/dw2102.h
+++ b/drivers/media/usb/dvb-usb/dw2102.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DW2102_H_
#define _DW2102_H_
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 0251a4e91d47..41261317bd5c 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -261,28 +261,6 @@ static int jdvbt90502_read_signal_strength(struct dvb_frontend *fe,
return 0;
}
-
-/* filter out un-supported properties to notify users */
-static int jdvbt90502_set_property(struct dvb_frontend *fe,
- struct dtv_property *tvp)
-{
- int r = 0;
-
- switch (tvp->cmd) {
- case DTV_DELIVERY_SYSTEM:
- if (tvp->u.data != SYS_ISDBT)
- r = -EINVAL;
- break;
- case DTV_CLEAR:
- case DTV_TUNE:
- case DTV_FREQUENCY:
- break;
- default:
- r = -EINVAL;
- }
- return r;
-}
-
static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
@@ -457,8 +435,6 @@ static const struct dvb_frontend_ops jdvbt90502_ops = {
.init = jdvbt90502_init,
.write = _jdvbt90502_write,
- .set_property = jdvbt90502_set_property,
-
.set_frontend = jdvbt90502_set_frontend,
.read_status = jdvbt90502_read_status,
diff --git a/drivers/media/usb/dvb-usb/m920x.h b/drivers/media/usb/dvb-usb/m920x.h
index 3c061518ffc1..bab3c6ac7084 100644
--- a/drivers/media/usb/dvb-usb/m920x.h
+++ b/drivers/media/usb/dvb-usb/m920x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DVB_USB_M920X_H_
#define _DVB_USB_M920X_H_
diff --git a/drivers/media/usb/dvb-usb/usb-urb.c b/drivers/media/usb/dvb-usb/usb-urb.c
index 89173603be67..2804d2d0e83a 100644
--- a/drivers/media/usb/dvb-usb/usb-urb.c
+++ b/drivers/media/usb/dvb-usb/usb-urb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* usb-urb.c is part of the DVB USB library.
*
* Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
diff --git a/drivers/media/usb/dvb-usb/vp702x.h b/drivers/media/usb/dvb-usb/vp702x.h
index 20b90055e7ac..18ad7ced2045 100644
--- a/drivers/media/usb/dvb-usb/vp702x.h
+++ b/drivers/media/usb/dvb-usb/vp702x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DVB_USB_VP7021_H_
#define _DVB_USB_VP7021_H_
diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c
index 13340af0d39c..2527b88beb87 100644
--- a/drivers/media/usb/dvb-usb/vp7045.c
+++ b/drivers/media/usb/dvb-usb/vp7045.c
@@ -97,82 +97,22 @@ static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff)
return vp7045_usb_op(d,SET_TUNER_POWER,&v,1,NULL,0,150);
}
-/* remote control stuff */
-
-/* The keymapping struct. Somehow this should be loaded to the driver, but
- * currently it is hardcoded. */
-static struct rc_map_table rc_map_vp7045_table[] = {
- { 0x0016, KEY_POWER },
- { 0x0010, KEY_MUTE },
- { 0x0003, KEY_1 },
- { 0x0001, KEY_2 },
- { 0x0006, KEY_3 },
- { 0x0009, KEY_4 },
- { 0x001d, KEY_5 },
- { 0x001f, KEY_6 },
- { 0x000d, KEY_7 },
- { 0x0019, KEY_8 },
- { 0x001b, KEY_9 },
- { 0x0015, KEY_0 },
- { 0x0005, KEY_CHANNELUP },
- { 0x0002, KEY_CHANNELDOWN },
- { 0x001e, KEY_VOLUMEUP },
- { 0x000a, KEY_VOLUMEDOWN },
- { 0x0011, KEY_RECORD },
- { 0x0017, KEY_FAVORITES }, /* Heart symbol - Channel list. */
- { 0x0014, KEY_PLAY },
- { 0x001a, KEY_STOP },
- { 0x0040, KEY_REWIND },
- { 0x0012, KEY_FASTFORWARD },
- { 0x000e, KEY_PREVIOUS }, /* Recall - Previous channel. */
- { 0x004c, KEY_PAUSE },
- { 0x004d, KEY_SCREEN }, /* Full screen mode. */
- { 0x0054, KEY_AUDIO }, /* MTS - Switch to secondary audio. */
- { 0x000c, KEY_CANCEL }, /* Cancel */
- { 0x001c, KEY_EPG }, /* EPG */
- { 0x0000, KEY_TAB }, /* Tab */
- { 0x0048, KEY_INFO }, /* Preview */
- { 0x0004, KEY_LIST }, /* RecordList */
- { 0x000f, KEY_TEXT }, /* Teletext */
- { 0x0041, KEY_PREVIOUSSONG },
- { 0x0042, KEY_NEXTSONG },
- { 0x004b, KEY_UP },
- { 0x0051, KEY_DOWN },
- { 0x004e, KEY_LEFT },
- { 0x0052, KEY_RIGHT },
- { 0x004f, KEY_ENTER },
- { 0x0013, KEY_CANCEL },
- { 0x004a, KEY_CLEAR },
- { 0x0054, KEY_PRINT }, /* Capture */
- { 0x0043, KEY_SUBTITLE }, /* Subtitle/CC */
- { 0x0008, KEY_VIDEO }, /* A/V */
- { 0x0007, KEY_SLEEP }, /* Hibernate */
- { 0x0045, KEY_ZOOM }, /* Zoom+ */
- { 0x0018, KEY_RED},
- { 0x0053, KEY_GREEN},
- { 0x005e, KEY_YELLOW},
- { 0x005f, KEY_BLUE}
-};
-
-static int vp7045_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+static int vp7045_rc_query(struct dvb_usb_device *d)
{
u8 key;
- int i;
vp7045_usb_op(d,RC_VAL_READ,NULL,0,&key,1,20);
deb_rc("remote query key: %x %d\n",key,key);
- if (key == 0x44) {
- *state = REMOTE_NO_KEY_PRESSED;
- return 0;
+ if (key != 0x44) {
+ /*
+ * The 8 bit address isn't available, but since the remote uses
+ * address 0 we'll use that. nec repeats are ignored too, even
+ * though the remote sends them.
+ */
+ rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(0, key), 0);
}
- for (i = 0; i < ARRAY_SIZE(rc_map_vp7045_table); i++)
- if (rc5_data(&rc_map_vp7045_table[i]) == key) {
- *state = REMOTE_KEY_PRESSED;
- *event = rc_map_vp7045_table[i].keycode;
- break;
- }
return 0;
}
@@ -265,11 +205,13 @@ static struct dvb_usb_device_properties vp7045_properties = {
.power_ctrl = vp7045_power_ctrl,
.read_mac_address = vp7045_read_mac_addr,
- .rc.legacy = {
- .rc_interval = 400,
- .rc_map_table = rc_map_vp7045_table,
- .rc_map_size = ARRAY_SIZE(rc_map_vp7045_table),
- .rc_query = vp7045_rc_query,
+ .rc.core = {
+ .rc_interval = 400,
+ .rc_codes = RC_MAP_TWINHAN_VP1027_DVBS,
+ .module_name = KBUILD_MODNAME,
+ .rc_query = vp7045_rc_query,
+ .allowed_protos = RC_PROTO_BIT_NEC,
+ .scancode_mask = 0xff,
},
.num_device_descs = 2,
diff --git a/drivers/media/usb/em28xx/Makefile b/drivers/media/usb/em28xx/Makefile
index 3f850d5063d0..86bfc35e2ed4 100644
--- a/drivers/media/usb/em28xx/Makefile
+++ b/drivers/media/usb/em28xx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
em28xx-y += em28xx-core.o em28xx-i2c.o em28xx-cards.o em28xx-camera.o
em28xx-v4l-objs := em28xx-video.o em28xx-vbi.o
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 4a7db623fe29..9950a740e04e 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -112,10 +112,10 @@ static inline void print_err_status(struct em28xx *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 66c5012a628a..9bf49d666e5a 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -882,7 +882,7 @@ static const struct i2c_adapter em28xx_adap_template = {
.algo = &em28xx_algo,
};
-static struct i2c_client em28xx_client_template = {
+static const struct i2c_client em28xx_client_template = {
.name = "em28xx internal",
};
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 747525ca7ed5..9e5cdfb25a73 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define EM_GPIO_0 (1 << 0)
#define EM_GPIO_1 (1 << 1)
#define EM_GPIO_2 (1 << 2)
diff --git a/drivers/media/usb/em28xx/em28xx-v4l.h b/drivers/media/usb/em28xx/em28xx-v4l.h
index 8dfcb56bf4b3..9c411aac3878 100644
--- a/drivers/media/usb/em28xx/em28xx-v4l.h
+++ b/drivers/media/usb/em28xx/em28xx-v4l.h
@@ -16,4 +16,4 @@
int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count);
void em28xx_stop_vbi_streaming(struct vb2_queue *vq);
-extern struct vb2_ops em28xx_vbi_qops;
+extern const struct vb2_ops em28xx_vbi_qops;
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 0bac552bbe87..f5123651ef30 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -93,7 +93,7 @@ vbi_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&dev->slock, flags);
}
-struct vb2_ops em28xx_vbi_qops = {
+const struct vb2_ops em28xx_vbi_qops = {
.queue_setup = vbi_queue_setup,
.buf_prepare = vbi_buffer_prepare,
.buf_queue = vbi_buffer_queue,
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 8d253a5df0a9..a2ba2d905952 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -557,10 +557,10 @@ static inline void print_err_status(struct em28xx *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/go7007/Makefile b/drivers/media/usb/go7007/Makefile
index e99287c3b828..3d95bbc4192c 100644
--- a/drivers/media/usb/go7007/Makefile
+++ b/drivers/media/usb/go7007/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_GO7007) += go7007.o
obj-$(CONFIG_VIDEO_GO7007_USB) += go7007-usb.o
obj-$(CONFIG_VIDEO_GO7007_LOADER) += go7007-loader.o
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index 3fd94fe7e1eb..d214a21acff7 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -204,11 +204,11 @@ config USB_GSPCA_SE401
tristate "SE401 USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
help
- Say Y here if you want support for cameras based on the
- Endpoints (formerly known as AOX) se401 chip.
+ Say Y here if you want support for cameras based on the
+ Endpoints (formerly known as AOX) se401 chip.
- To compile this driver as a module, choose M here: the
- module will be called gspca_se401.
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_se401.
config USB_GSPCA_SN9C2028
tristate "SONIX Dual-Mode USB Camera Driver"
@@ -224,11 +224,11 @@ config USB_GSPCA_SN9C20X
tristate "SN9C20X USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
help
- Say Y here if you want support for cameras based on the
- sn9c20x chips (SN9C201 and SN9C202).
+ Say Y here if you want support for cameras based on the
+ sn9c20x chips (SN9C201 and SN9C202).
- To compile this driver as a module, choose M here: the
- module will be called gspca_sn9c20x.
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_sn9c20x.
config USB_GSPCA_SONIXB
tristate "SONIX Bayer USB Camera Driver"
diff --git a/drivers/media/usb/gspca/Makefile b/drivers/media/usb/gspca/Makefile
index 9f5ccecb9c8a..3e3ecbffdf9f 100644
--- a/drivers/media/usb/gspca/Makefile
+++ b/drivers/media/usb/gspca/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_GSPCA) += gspca_main.o
obj-$(CONFIG_USB_GSPCA_BENQ) += gspca_benq.o
obj-$(CONFIG_USB_GSPCA_CONEX) += gspca_conex.o
diff --git a/drivers/media/usb/gspca/gl860/Makefile b/drivers/media/usb/gspca/gl860/Makefile
index cf6397415aad..7bcfa36f12ac 100644
--- a/drivers/media/usb/gspca/gl860/Makefile
+++ b/drivers/media/usb/gspca/gl860/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_GL860) += gspca_gl860.o
gspca_gl860-objs := gl860.o \
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 0f141762abf1..961343873fd0 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1075,7 +1075,6 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
/* give an index to each format */
index = 0;
- j = 0;
for (i = gspca_dev->cam.nmodes; --i >= 0; ) {
fmt_tb[index] = gspca_dev->cam.cam_mode[i].pixelformat;
j = 0;
diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
index d39adf90303b..9e0cf711642b 100644
--- a/drivers/media/usb/gspca/gspca.h
+++ b/drivers/media/usb/gspca/gspca.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef GSPCAV2_H
#define GSPCAV2_H
diff --git a/drivers/media/usb/gspca/m5602/Makefile b/drivers/media/usb/gspca/m5602/Makefile
index 8e1fb5a1d2a1..95c9db6dc59c 100644
--- a/drivers/media/usb/gspca/m5602/Makefile
+++ b/drivers/media/usb/gspca/m5602/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_M5602) += gspca_m5602.o
gspca_m5602-objs := m5602_core.o \
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index cdb79c5f0c38..f1537daf4e2e 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -2865,7 +2865,7 @@ static void sd_reset_snapshot(struct gspca_dev *gspca_dev)
static void ov51x_upload_quan_tables(struct sd *sd)
{
- const unsigned char yQuanTable511[] = {
+ static const unsigned char yQuanTable511[] = {
0, 1, 1, 2, 2, 3, 3, 4,
1, 1, 1, 2, 2, 3, 4, 4,
1, 1, 2, 2, 3, 4, 4, 4,
@@ -2876,7 +2876,7 @@ static void ov51x_upload_quan_tables(struct sd *sd)
4, 4, 4, 4, 5, 5, 5, 5
};
- const unsigned char uvQuanTable511[] = {
+ static const unsigned char uvQuanTable511[] = {
0, 2, 2, 3, 4, 4, 4, 4,
2, 2, 2, 4, 4, 4, 4, 4,
2, 2, 3, 4, 4, 4, 4, 4,
@@ -2888,13 +2888,13 @@ static void ov51x_upload_quan_tables(struct sd *sd)
};
/* OV518 quantization tables are 8x4 (instead of 8x8) */
- const unsigned char yQuanTable518[] = {
+ static const unsigned char yQuanTable518[] = {
5, 4, 5, 6, 6, 7, 7, 7,
5, 5, 5, 5, 6, 7, 7, 7,
6, 6, 6, 6, 7, 7, 7, 8,
7, 7, 6, 7, 7, 7, 8, 8
};
- const unsigned char uvQuanTable518[] = {
+ static const unsigned char uvQuanTable518[] = {
6, 6, 6, 7, 7, 7, 7, 7,
6, 6, 6, 7, 7, 7, 7, 7,
6, 6, 6, 7, 7, 7, 7, 8,
@@ -2943,7 +2943,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* For 511 and 511+ */
- const struct ov_regvals init_511[] = {
+ static const struct ov_regvals init_511[] = {
{ R51x_SYS_RESET, 0x7f },
{ R51x_SYS_INIT, 0x01 },
{ R51x_SYS_RESET, 0x7f },
@@ -2953,7 +2953,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
{ R51x_SYS_RESET, 0x3d },
};
- const struct ov_regvals norm_511[] = {
+ static const struct ov_regvals norm_511[] = {
{ R511_DRAM_FLOW_CTL, 0x01 },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
@@ -2963,7 +2963,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
{ R511_COMP_LUT_EN, 0x03 },
};
- const struct ov_regvals norm_511_p[] = {
+ static const struct ov_regvals norm_511_p[] = {
{ R511_DRAM_FLOW_CTL, 0xff },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
@@ -2973,7 +2973,7 @@ static void ov511_configure(struct gspca_dev *gspca_dev)
{ R511_COMP_LUT_EN, 0x03 },
};
- const struct ov_regvals compress_511[] = {
+ static const struct ov_regvals compress_511[] = {
{ 0x70, 0x1f },
{ 0x71, 0x05 },
{ 0x72, 0x06 },
@@ -3009,7 +3009,7 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
/* For 518 and 518+ */
- const struct ov_regvals init_518[] = {
+ static const struct ov_regvals init_518[] = {
{ R51x_SYS_RESET, 0x40 },
{ R51x_SYS_INIT, 0xe1 },
{ R51x_SYS_RESET, 0x3e },
@@ -3020,7 +3020,7 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
{ 0x5d, 0x03 },
};
- const struct ov_regvals norm_518[] = {
+ static const struct ov_regvals norm_518[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
@@ -3033,7 +3033,7 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
{ 0x2f, 0x80 },
};
- const struct ov_regvals norm_518_p[] = {
+ static const struct ov_regvals norm_518_p[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
diff --git a/drivers/media/usb/gspca/stv06xx/Makefile b/drivers/media/usb/gspca/stv06xx/Makefile
index 3a4b2f899049..c4d7206e0c92 100644
--- a/drivers/media/usb/gspca/stv06xx/Makefile
+++ b/drivers/media/usb/gspca/stv06xx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_STV06XX) += gspca_stv06xx.o
gspca_stv06xx-objs := stv06xx.o \
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index a097d3dbc141..65ef755adfdc 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -386,7 +386,7 @@ static void msi2500_isoc_handler(struct urb *urb)
if (unlikely(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)) {
- dev_dbg(dev->dev, "URB (%p) unlinked %ssynchronuously\n",
+ dev_dbg(dev->dev, "URB (%p) unlinked %ssynchronously\n",
urb, urb->status == -ENOENT ? "" : "a");
return;
}
diff --git a/drivers/media/usb/pvrusb2/Kconfig b/drivers/media/usb/pvrusb2/Kconfig
index 60a2604e4cb3..1ad913fc30bf 100644
--- a/drivers/media/usb/pvrusb2/Kconfig
+++ b/drivers/media/usb/pvrusb2/Kconfig
@@ -44,7 +44,6 @@ config VIDEO_PVRUSB2_DVB
select MEDIA_TUNER_SIMPLE if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA8290 if MEDIA_SUBDRV_AUTOSELECT
---help---
-
This option enables a DVB interface for the pvrusb2 driver.
If your device does not support digital television, this
feature will have no affect on the driver's operation.
diff --git a/drivers/media/usb/pvrusb2/Makefile b/drivers/media/usb/pvrusb2/Makefile
index ad705547bdce..0d84064036b2 100644
--- a/drivers/media/usb/pvrusb2/Makefile
+++ b/drivers/media/usb/pvrusb2/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-pvrusb2-sysfs-$(CONFIG_VIDEO_PVRUSB2_SYSFS) := pvrusb2-sysfs.o
obj-pvrusb2-debugifc-$(CONFIG_VIDEO_PVRUSB2_DEBUGIFC) := pvrusb2-debugifc.o
obj-pvrusb2-dvb-$(CONFIG_VIDEO_PVRUSB2_DVB) := pvrusb2-dvb.o
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.h b/drivers/media/usb/pvrusb2/pvrusb2-dvb.h
index 884ff916a352..b500c86d4178 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PVRUSB2_DVB_H__
#define __PVRUSB2_DVB_H__
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index ad5b25b89699..8289ee482f49 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -330,10 +330,10 @@ static void pvr2_hdw_state_log_state(struct pvr2_hdw *);
static int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl);
static int pvr2_hdw_commit_setup(struct pvr2_hdw *hdw);
static int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw);
-static void pvr2_hdw_quiescent_timeout(unsigned long);
-static void pvr2_hdw_decoder_stabilization_timeout(unsigned long);
-static void pvr2_hdw_encoder_wait_timeout(unsigned long);
-static void pvr2_hdw_encoder_run_timeout(unsigned long);
+static void pvr2_hdw_quiescent_timeout(struct timer_list *);
+static void pvr2_hdw_decoder_stabilization_timeout(struct timer_list *);
+static void pvr2_hdw_encoder_wait_timeout(struct timer_list *);
+static void pvr2_hdw_encoder_run_timeout(struct timer_list *);
static int pvr2_issue_simple_cmd(struct pvr2_hdw *,u32);
static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
unsigned int timeout,int probe_fl,
@@ -2373,18 +2373,15 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
}
if (!hdw) goto fail;
- setup_timer(&hdw->quiescent_timer, pvr2_hdw_quiescent_timeout,
- (unsigned long)hdw);
+ timer_setup(&hdw->quiescent_timer, pvr2_hdw_quiescent_timeout, 0);
- setup_timer(&hdw->decoder_stabilization_timer,
- pvr2_hdw_decoder_stabilization_timeout,
- (unsigned long)hdw);
+ timer_setup(&hdw->decoder_stabilization_timer,
+ pvr2_hdw_decoder_stabilization_timeout, 0);
- setup_timer(&hdw->encoder_wait_timer, pvr2_hdw_encoder_wait_timeout,
- (unsigned long)hdw);
+ timer_setup(&hdw->encoder_wait_timer, pvr2_hdw_encoder_wait_timeout,
+ 0);
- setup_timer(&hdw->encoder_run_timer, pvr2_hdw_encoder_run_timeout,
- (unsigned long)hdw);
+ timer_setup(&hdw->encoder_run_timer, pvr2_hdw_encoder_run_timeout, 0);
hdw->master_state = PVR2_STATE_DEAD;
@@ -3539,10 +3536,16 @@ static void pvr2_ctl_read_complete(struct urb *urb)
complete(&hdw->ctl_done);
}
+struct hdw_timer {
+ struct timer_list timer;
+ struct pvr2_hdw *hdw;
+};
-static void pvr2_ctl_timeout(unsigned long data)
+static void pvr2_ctl_timeout(struct timer_list *t)
{
- struct pvr2_hdw *hdw = (struct pvr2_hdw *)data;
+ struct hdw_timer *timer = from_timer(timer, t, timer);
+ struct pvr2_hdw *hdw = timer->hdw;
+
if (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) {
hdw->ctl_timeout_flag = !0;
if (hdw->ctl_write_pend_flag)
@@ -3564,7 +3567,10 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
{
unsigned int idx;
int status = 0;
- struct timer_list timer;
+ struct hdw_timer timer = {
+ .hdw = hdw,
+ };
+
if (!hdw->ctl_lock_held) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Attempted to execute control transfer without lock!!");
@@ -3621,8 +3627,8 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
hdw->ctl_timeout_flag = 0;
hdw->ctl_write_pend_flag = 0;
hdw->ctl_read_pend_flag = 0;
- setup_timer(&timer, pvr2_ctl_timeout, (unsigned long)hdw);
- timer.expires = jiffies + timeout;
+ timer_setup_on_stack(&timer.timer, pvr2_ctl_timeout, 0);
+ timer.timer.expires = jiffies + timeout;
if (write_len && write_data) {
hdw->cmd_debug_state = 2;
@@ -3677,7 +3683,7 @@ status);
}
/* Start timer */
- add_timer(&timer);
+ add_timer(&timer.timer);
/* Now wait for all I/O to complete */
hdw->cmd_debug_state = 4;
@@ -3687,7 +3693,7 @@ status);
hdw->cmd_debug_state = 5;
/* Stop timer */
- del_timer_sync(&timer);
+ del_timer_sync(&timer.timer);
hdw->cmd_debug_state = 6;
status = 0;
@@ -3769,6 +3775,8 @@ status);
if ((status < 0) && (!probe_fl)) {
pvr2_hdw_render_useless(hdw);
}
+ destroy_timer_on_stack(&timer.timer);
+
return status;
}
@@ -4366,9 +4374,9 @@ static int state_eval_encoder_run(struct pvr2_hdw *hdw)
/* Timeout function for quiescent timer. */
-static void pvr2_hdw_quiescent_timeout(unsigned long data)
+static void pvr2_hdw_quiescent_timeout(struct timer_list *t)
{
- struct pvr2_hdw *hdw = (struct pvr2_hdw *)data;
+ struct pvr2_hdw *hdw = from_timer(hdw, t, quiescent_timer);
hdw->state_decoder_quiescent = !0;
trace_stbit("state_decoder_quiescent",hdw->state_decoder_quiescent);
hdw->state_stale = !0;
@@ -4377,9 +4385,9 @@ static void pvr2_hdw_quiescent_timeout(unsigned long data)
/* Timeout function for decoder stabilization timer. */
-static void pvr2_hdw_decoder_stabilization_timeout(unsigned long data)
+static void pvr2_hdw_decoder_stabilization_timeout(struct timer_list *t)
{
- struct pvr2_hdw *hdw = (struct pvr2_hdw *)data;
+ struct pvr2_hdw *hdw = from_timer(hdw, t, decoder_stabilization_timer);
hdw->state_decoder_ready = !0;
trace_stbit("state_decoder_ready", hdw->state_decoder_ready);
hdw->state_stale = !0;
@@ -4388,9 +4396,9 @@ static void pvr2_hdw_decoder_stabilization_timeout(unsigned long data)
/* Timeout function for encoder wait timer. */
-static void pvr2_hdw_encoder_wait_timeout(unsigned long data)
+static void pvr2_hdw_encoder_wait_timeout(struct timer_list *t)
{
- struct pvr2_hdw *hdw = (struct pvr2_hdw *)data;
+ struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_wait_timer);
hdw->state_encoder_waitok = !0;
trace_stbit("state_encoder_waitok",hdw->state_encoder_waitok);
hdw->state_stale = !0;
@@ -4399,9 +4407,9 @@ static void pvr2_hdw_encoder_wait_timeout(unsigned long data)
/* Timeout function for encoder run timer. */
-static void pvr2_hdw_encoder_run_timeout(unsigned long data)
+static void pvr2_hdw_encoder_run_timeout(struct timer_list *t)
{
- struct pvr2_hdw *hdw = (struct pvr2_hdw *)data;
+ struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_run_timer);
if (!hdw->state_encoder_runok) {
hdw->state_encoder_runok = !0;
trace_stbit("state_encoder_runok",hdw->state_encoder_runok);
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index eb6921d2743e..54b036d39c5b 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -262,7 +262,8 @@ static void pwc_isoc_handler(struct urb *urb)
if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN) {
- PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronuously.\n", urb, urb->status == -ENOENT ? "" : "a");
+ PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronously.\n",
+ urb, urb->status == -ENOENT ? "" : "a");
return;
}
diff --git a/drivers/media/usb/pwc/pwc-nala.h b/drivers/media/usb/pwc/pwc-nala.h
index 168c73ef75d8..0fe9d473f4e9 100644
--- a/drivers/media/usb/pwc/pwc-nala.h
+++ b/drivers/media/usb/pwc/pwc-nala.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* SQCIF */
{
{0, 0, {0x04, 0x01, 0x03}},
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index b2f239c4ba42..7fee5766587a 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -485,9 +485,10 @@ static void s2255_reset_dsppower(struct s2255_dev *dev)
/* kickstarts the firmware loading. from probe
*/
-static void s2255_timer(unsigned long user_data)
+static void s2255_timer(struct timer_list *t)
{
- struct s2255_fw *data = (struct s2255_fw *)user_data;
+ struct s2255_dev *dev = from_timer(dev, t, timer);
+ struct s2255_fw *data = dev->fw_data;
if (usb_submit_urb(data->fw_urb, GFP_ATOMIC) < 0) {
pr_err("s2255: can't submit urb\n");
atomic_set(&data->fw_state, S2255_FW_FAILED);
@@ -2283,7 +2284,7 @@ static int s2255_probe(struct usb_interface *interface,
dev_err(&interface->dev, "Could not find bulk-in endpoint\n");
goto errorEP;
}
- setup_timer(&dev->timer, s2255_timer, (unsigned long)dev->fw_data);
+ timer_setup(&dev->timer, s2255_timer, 0);
init_waitqueue_head(&dev->fw_data->wait_fw);
for (i = 0; i < MAX_CHANNELS; i++) {
struct s2255_vc *vc = &dev->vc[i];
diff --git a/drivers/media/usb/stk1160/Makefile b/drivers/media/usb/stk1160/Makefile
index 42d05463b353..613471528749 100644
--- a/drivers/media/usb/stk1160/Makefile
+++ b/drivers/media/usb/stk1160/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
stk1160-y := stk1160-core.o \
stk1160-v4l.o \
stk1160-video.o \
diff --git a/drivers/media/usb/stk1160/stk1160-i2c.c b/drivers/media/usb/stk1160/stk1160-i2c.c
index 2c70173e3c82..62a12d5356ad 100644
--- a/drivers/media/usb/stk1160/stk1160-i2c.c
+++ b/drivers/media/usb/stk1160/stk1160-i2c.c
@@ -246,7 +246,7 @@ static const struct i2c_adapter adap_template = {
.algo = &algo,
};
-static struct i2c_client client_template = {
+static const struct i2c_client client_template = {
.name = "stk1160 internal",
};
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index ce8ebbe395a6..423c03a0638d 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -38,10 +38,10 @@ static inline void print_err_status(struct stk1160 *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
diff --git a/drivers/media/usb/tm6000/Makefile b/drivers/media/usb/tm6000/Makefile
index f2644933b8d1..05322a72e862 100644
--- a/drivers/media/usb/tm6000/Makefile
+++ b/drivers/media/usb/tm6000/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
tm6000-y := tm6000-cards.o \
tm6000-core.o \
tm6000-i2c.o \
diff --git a/drivers/media/usb/tm6000/tm6000-cards.c b/drivers/media/usb/tm6000/tm6000-cards.c
index 2537643a1808..77347541904d 100644
--- a/drivers/media/usb/tm6000/tm6000-cards.c
+++ b/drivers/media/usb/tm6000/tm6000-cards.c
@@ -1184,7 +1184,7 @@ static int tm6000_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *usbdev;
- struct tm6000_core *dev = NULL;
+ struct tm6000_core *dev;
int i, rc = 0;
int nr = 0;
char *speed;
@@ -1194,22 +1194,21 @@ static int tm6000_usb_probe(struct usb_interface *interface,
/* Selects the proper interface */
rc = usb_set_interface(usbdev, 0, 1);
if (rc < 0)
- goto err;
+ goto report_failure;
/* Check to see next free device and mark as used */
nr = find_first_zero_bit(&tm6000_devused, TM6000_MAXBOARDS);
if (nr >= TM6000_MAXBOARDS) {
printk(KERN_ERR "tm6000: Supports only %i tm60xx boards.\n", TM6000_MAXBOARDS);
- usb_put_dev(usbdev);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto put_device;
}
/* Create and initialize dev struct */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
- printk(KERN_ERR "tm6000" ": out of memory!\n");
- usb_put_dev(usbdev);
- return -ENOMEM;
+ if (!dev) {
+ rc = -ENOMEM;
+ goto put_device;
}
spin_lock_init(&dev->slock);
mutex_init(&dev->usb_lock);
@@ -1313,8 +1312,7 @@ static int tm6000_usb_probe(struct usb_interface *interface,
if (!dev->isoc_in.endp) {
printk(KERN_ERR "tm6000: probing error: no IN ISOC endpoint!\n");
rc = -ENODEV;
-
- goto err;
+ goto free_device;
}
/* save our data pointer in this interface device */
@@ -1324,17 +1322,18 @@ static int tm6000_usb_probe(struct usb_interface *interface,
rc = tm6000_init_dev(dev);
if (rc < 0)
- goto err;
+ goto free_device;
return 0;
-err:
+free_device:
+ kfree(dev);
+report_failure:
printk(KERN_ERR "tm6000: Error %d while registering\n", rc);
clear_bit(nr, &tm6000_devused);
+put_device:
usb_put_dev(usbdev);
-
- kfree(dev);
return rc;
}
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 097ac321b7e1..c811fc6cf48a 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -45,10 +45,10 @@ static inline void print_err_status(struct tm6000_core *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -123,7 +123,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
}
dvb->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (dvb->bulk_urb == NULL)
+ if (!dvb->bulk_urb)
return -ENOMEM;
pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in.endp->desc.bEndpointAddress
@@ -133,9 +133,8 @@ static int tm6000_start_stream(struct tm6000_core *dev)
size = size * 15; /* 512 x 8 or 12 or 15 */
dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
- if (dvb->bulk_urb->transfer_buffer == NULL) {
+ if (!dvb->bulk_urb->transfer_buffer) {
usb_free_urb(dvb->bulk_urb);
- printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
return -ENOMEM;
}
@@ -361,7 +360,7 @@ static void unregister_dvb(struct tm6000_core *dev)
{
struct tm6000_dvb *dvb = dev->dvb;
- if (dvb->bulk_urb != NULL) {
+ if (dvb->bulk_urb) {
struct urb *bulk_urb = dvb->bulk_urb;
kfree(bulk_urb->transfer_buffer);
@@ -400,10 +399,8 @@ static int dvb_init(struct tm6000_core *dev)
}
dvb = kzalloc(sizeof(struct tm6000_dvb), GFP_KERNEL);
- if (!dvb) {
- printk(KERN_INFO "Cannot allocate memory\n");
+ if (!dvb)
return -ENOMEM;
- }
dev->dvb = dvb;
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index 91889ad9cdd7..397990afe00b 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -352,7 +352,7 @@ static int __tm6000_ir_int_start(struct rc_dev *rc)
dprintk(1, "IR max size: %d\n", size);
ir->int_urb->transfer_buffer = kzalloc(size, GFP_ATOMIC);
- if (ir->int_urb->transfer_buffer == NULL) {
+ if (!ir->int_urb->transfer_buffer) {
usb_free_urb(ir->int_urb);
return err;
}
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index ec8c4d2534dc..9fa25de6b5a9 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -342,10 +342,10 @@ static inline void print_err_status(struct tm6000_core *dev,
switch (status) {
case -ENOENT:
- errmsg = "unlinked synchronuously";
+ errmsg = "unlinked synchronously";
break;
case -ECONNRESET:
- errmsg = "unlinked asynchronuously";
+ errmsg = "unlinked asynchronously";
break;
case -ENOSR:
errmsg = "Buffer error (overrun)";
@@ -470,20 +470,16 @@ static int tm6000_alloc_urb_buffers(struct tm6000_core *dev)
int num_bufs = TM6000_NUM_URB_BUF;
int i;
- if (dev->urb_buffer != NULL)
+ if (dev->urb_buffer)
return 0;
dev->urb_buffer = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!dev->urb_buffer) {
- tm6000_err("cannot allocate memory for urb buffers\n");
+ if (!dev->urb_buffer)
return -ENOMEM;
- }
dev->urb_dma = kmalloc(sizeof(dma_addr_t *)*num_bufs, GFP_KERNEL);
- if (!dev->urb_dma) {
- tm6000_err("cannot allocate memory for urb dma pointers\n");
+ if (!dev->urb_dma)
return -ENOMEM;
- }
for (i = 0; i < num_bufs; i++) {
dev->urb_buffer[i] = usb_alloc_coherent(
@@ -507,7 +503,7 @@ static int tm6000_free_urb_buffers(struct tm6000_core *dev)
{
int i;
- if (dev->urb_buffer == NULL)
+ if (!dev->urb_buffer)
return 0;
for (i = 0; i < TM6000_NUM_URB_BUF; i++) {
@@ -598,15 +594,12 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
dev->isoc_ctl.num_bufs = num_bufs;
dev->isoc_ctl.urb = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
- if (!dev->isoc_ctl.urb) {
- tm6000_err("cannot alloc memory for usb buffers\n");
+ if (!dev->isoc_ctl.urb)
return -ENOMEM;
- }
dev->isoc_ctl.transfer_buffer = kmalloc(sizeof(void *)*num_bufs,
GFP_KERNEL);
if (!dev->isoc_ctl.transfer_buffer) {
- tm6000_err("cannot allocate memory for usbtransfer\n");
kfree(dev->isoc_ctl.urb);
return -ENOMEM;
}
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index f06f09a0876e..b55b79b8e921 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -84,7 +84,7 @@ static int usbtv_probe(struct usb_interface *intf,
/* Packet size is split into 11 bits of base size and count of
* extra multiplies of it.*/
size = usb_endpoint_maxp(&ep->desc);
- size = (size & 0x07ff) * usb_endpoint_maxp_mult(&ep->desc);
+ size = size * usb_endpoint_maxp_mult(&ep->desc);
/* Device structure */
usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 95b5f4319ec2..3668a04359e8 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -718,8 +718,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
*/
if (ctrl->id == V4L2_CID_BRIGHTNESS || ctrl->id == V4L2_CID_CONTRAST) {
ret = usb_control_msg(usbtv->udev,
- usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
if (ret < 0)
goto error;
diff --git a/drivers/media/usb/usbvision/usbvision-cards.h b/drivers/media/usb/usbvision/usbvision-cards.h
index a51cc1185cce..07ec83512743 100644
--- a/drivers/media/usb/usbvision/usbvision-cards.h
+++ b/drivers/media/usb/usbvision/usbvision-cards.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define XANBOO 0
#define BELKIN_VIDEOBUS_II 1
#define BELKIN_VIDEOBUS 2
diff --git a/drivers/media/usb/uvc/Makefile b/drivers/media/usb/uvc/Makefile
index c26d12fdb8f4..a4fe5b5d533f 100644
--- a/drivers/media/usb/uvc/Makefile
+++ b/drivers/media/usb/uvc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \
uvc_status.o uvc_isight.o uvc_debugfs.o
ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 6d22b22cb35b..28b91b7d756f 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2230,7 +2230,7 @@ static int uvc_reset_resume(struct usb_interface *intf)
* Module parameters
*/
-static int uvc_clock_param_get(char *buffer, struct kernel_param *kp)
+static int uvc_clock_param_get(char *buffer, const struct kernel_param *kp)
{
if (uvc_clock_param == CLOCK_MONOTONIC)
return sprintf(buffer, "CLOCK_MONOTONIC");
@@ -2238,7 +2238,7 @@ static int uvc_clock_param_get(char *buffer, struct kernel_param *kp)
return sprintf(buffer, "CLOCK_REALTIME");
}
-static int uvc_clock_param_set(const char *val, struct kernel_param *kp)
+static int uvc_clock_param_set(const char *val, const struct kernel_param *kp)
{
if (strncasecmp(val, "clock_", strlen("clock_")) == 0)
val += strlen("clock_");
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 34c7ee6cc9e5..05398784d1c8 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _USB_VIDEO_H_
#define _USB_VIDEO_H_
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 4ff8d0aed015..1d888661fd03 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -209,10 +209,8 @@ static int send_control_msg(struct usb_device *udev, u8 request, u16 value,
int status;
unsigned char *transfer_buffer = kmalloc(size, GFP_KERNEL);
- if (!transfer_buffer) {
- dev_err(&udev->dev, "kmalloc(%d) failed\n", size);
+ if (!transfer_buffer)
return -ENOMEM;
- }
memcpy(transfer_buffer, cp, size);
@@ -387,9 +385,9 @@ static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
vb);
int rc;
- DBG("%s, field=%d, fmt name = %s\n", __func__, field, cam->fmt != NULL ?
- cam->fmt->name : "");
- if (cam->fmt == NULL)
+ DBG("%s, field=%d, fmt name = %s\n", __func__, field,
+ cam->fmt ? cam->fmt->name : "");
+ if (!cam->fmt)
return -EINVAL;
buf->vb.size = cam->width * cam->height * (cam->fmt->depth >> 3);
@@ -789,7 +787,7 @@ static int zr364xx_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct zr364xx_camera *cam = video_drvdata(file);
char pixelformat_name[5];
- if (cam == NULL)
+ if (!cam)
return -ENODEV;
if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_JPEG) {
@@ -819,7 +817,7 @@ static int zr364xx_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
{
struct zr364xx_camera *cam;
- if (file == NULL)
+ if (!file)
return -ENODEV;
cam = video_drvdata(file);
@@ -981,13 +979,13 @@ static void read_pipe_completion(struct urb *purb)
pipe_info = purb->context;
_DBG("%s %p, status %d\n", __func__, purb, purb->status);
- if (pipe_info == NULL) {
+ if (!pipe_info) {
printk(KERN_ERR KBUILD_MODNAME ": no context!\n");
return;
}
cam = pipe_info->cam;
- if (cam == NULL) {
+ if (!cam) {
printk(KERN_ERR KBUILD_MODNAME ": no context!\n");
return;
}
@@ -1071,7 +1069,7 @@ static void zr364xx_stop_readpipe(struct zr364xx_camera *cam)
{
struct zr364xx_pipeinfo *pipe_info;
- if (cam == NULL) {
+ if (!cam) {
printk(KERN_ERR KBUILD_MODNAME ": invalid device\n");
return;
}
@@ -1275,7 +1273,7 @@ static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
struct zr364xx_camera *cam = video_drvdata(file);
int ret;
- if (cam == NULL) {
+ if (!cam) {
DBG("%s: cam == NULL\n", __func__);
return -ENODEV;
}
@@ -1359,7 +1357,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
pipe->transfer_buffer = kzalloc(pipe->transfer_size,
GFP_KERNEL);
- if (pipe->transfer_buffer == NULL) {
+ if (!pipe->transfer_buffer) {
DBG("out of memory!\n");
return -ENOMEM;
}
@@ -1375,7 +1373,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
DBG("valloc %p, idx %lu, pdata %p\n",
&cam->buffer.frame[i], i,
cam->buffer.frame[i].lpvbits);
- if (cam->buffer.frame[i].lpvbits == NULL) {
+ if (!cam->buffer.frame[i].lpvbits) {
printk(KERN_INFO KBUILD_MODNAME ": out of memory. Using less frames\n");
break;
}
@@ -1423,11 +1421,9 @@ static int zr364xx_probe(struct usb_interface *intf,
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
- cam = kzalloc(sizeof(struct zr364xx_camera), GFP_KERNEL);
- if (cam == NULL) {
- dev_err(&udev->dev, "cam: out of memory !\n");
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (!cam)
return -ENOMEM;
- }
cam->v4l2_dev.release = zr364xx_release;
err = v4l2_device_register(&intf->dev, &cam->v4l2_dev);
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 098ad5fd5231..77303286aef7 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the V4L2 core
#
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index d741a8e0fdac..a7c3464976f2 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -22,8 +22,37 @@
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
+static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ if (!n->ops || !n->ops->bound)
+ return 0;
+
+ return n->ops->bound(n, subdev, asd);
+}
+
+static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ if (!n->ops || !n->ops->unbind)
+ return;
+
+ n->ops->unbind(n, subdev, asd);
+}
+
+static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
+{
+ if (!n->ops || !n->ops->complete)
+ return 0;
+
+ return n->ops->complete(n);
+}
+
static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
#if IS_ENABLED(CONFIG_I2C)
@@ -60,8 +89,8 @@ static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
-static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd)
+static struct v4l2_async_subdev *v4l2_async_find_match(
+ struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd)
{
bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *);
struct v4l2_async_subdev *asd;
@@ -95,22 +124,96 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *
return NULL;
}
-static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+/* Find the sub-device notifier registered by a sub-device driver. */
+static struct v4l2_async_notifier *v4l2_async_find_subdev_notifier(
+ struct v4l2_subdev *sd)
{
- int ret;
+ struct v4l2_async_notifier *n;
- if (notifier->bound) {
- ret = notifier->bound(notifier, sd, asd);
- if (ret < 0)
- return ret;
+ list_for_each_entry(n, &notifier_list, list)
+ if (n->sd == sd)
+ return n;
+
+ return NULL;
+}
+
+/* Get v4l2_device related to the notifier if one can be found. */
+static struct v4l2_device *v4l2_async_notifier_find_v4l2_dev(
+ struct v4l2_async_notifier *notifier)
+{
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ return notifier->v4l2_dev;
+}
+
+/*
+ * Return true if all child sub-device notifiers are complete, false otherwise.
+ */
+static bool v4l2_async_notifier_can_complete(
+ struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_subdev *sd;
+
+ if (!list_empty(&notifier->waiting))
+ return false;
+
+ list_for_each_entry(sd, &notifier->done, async_list) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(sd);
+
+ if (subdev_notifier &&
+ !v4l2_async_notifier_can_complete(subdev_notifier))
+ return false;
}
- ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
+ return true;
+}
+
+/*
+ * Complete the master notifier if possible. This is done when all async
+ * sub-devices have been bound; v4l2_device is also available then.
+ */
+static int v4l2_async_notifier_try_complete(
+ struct v4l2_async_notifier *notifier)
+{
+ /* Quick check whether there are still more sub-devices here. */
+ if (!list_empty(&notifier->waiting))
+ return 0;
+
+ /* Check the entire notifier tree; find the root notifier first. */
+ while (notifier->parent)
+ notifier = notifier->parent;
+
+ /* This is root if it has v4l2_dev. */
+ if (!notifier->v4l2_dev)
+ return 0;
+
+ /* Is everything ready? */
+ if (!v4l2_async_notifier_can_complete(notifier))
+ return 0;
+
+ return v4l2_async_notifier_call_complete(notifier);
+}
+
+static int v4l2_async_notifier_try_all_subdevs(
+ struct v4l2_async_notifier *notifier);
+
+static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct v4l2_async_notifier *subdev_notifier;
+ int ret;
+
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
if (ret < 0) {
- if (notifier->unbind)
- notifier->unbind(notifier, sd, asd);
+ v4l2_device_unregister_subdev(sd);
return ret;
}
@@ -122,8 +225,55 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
/* Move from the global subdevice list to notifier's done */
list_move(&sd->async_list, &notifier->done);
- if (list_empty(&notifier->waiting) && notifier->complete)
- return notifier->complete(notifier);
+ /*
+ * See if the sub-device has a notifier. If not, return here.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (!subdev_notifier || subdev_notifier->parent)
+ return 0;
+
+ /*
+ * Proceed with checking for the sub-device notifier's async
+ * sub-devices, and return the result. The error will be handled by the
+ * caller.
+ */
+ subdev_notifier->parent = notifier;
+
+ return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
+}
+
+/* Test all async sub-devices in a notifier for a match. */
+static int v4l2_async_notifier_try_all_subdevs(
+ struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_notifier_find_v4l2_dev(notifier);
+ struct v4l2_subdev *sd;
+
+ if (!v4l2_dev)
+ return 0;
+
+again:
+ list_for_each_entry(sd, &subdev_list, async_list) {
+ struct v4l2_async_subdev *asd;
+ int ret;
+
+ asd = v4l2_async_find_match(notifier, sd);
+ if (!asd)
+ continue;
+
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * v4l2_async_match_notify() may lead to registering a
+ * new notifier and thus changing the async subdevs
+ * list. In order to proceed safely from here, restart
+ * parsing the list from the beginning.
+ */
+ goto again;
+ }
return 0;
}
@@ -134,24 +284,107 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)
/* Subdevice driver will reprobe and put the subdev back onto the list */
list_del_init(&sd->async_list);
sd->asd = NULL;
- sd->dev = NULL;
}
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier)
+/* Unbind all sub-devices in the notifier tree. */
+static void v4l2_async_notifier_unbind_all_subdevs(
+ struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd, *tmp;
+
+ list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
+ struct v4l2_async_notifier *subdev_notifier =
+ v4l2_async_find_subdev_notifier(sd);
+
+ if (subdev_notifier)
+ v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+
+ v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
+
+ list_move(&sd->async_list, &subdev_list);
+ }
+
+ notifier->parent = NULL;
+}
+
+/* See if an fwnode can be found in a notifier's lists. */
+static bool __v4l2_async_notifier_fwnode_has_async_subdev(
+ struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode)
+{
struct v4l2_async_subdev *asd;
+ struct v4l2_subdev *sd;
+
+ list_for_each_entry(asd, &notifier->waiting, list) {
+ if (asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
+ continue;
+
+ if (asd->match.fwnode.fwnode == fwnode)
+ return true;
+ }
+
+ list_for_each_entry(sd, &notifier->done, async_list) {
+ if (WARN_ON(!sd->asd))
+ continue;
+
+ if (sd->asd->match_type != V4L2_ASYNC_MATCH_FWNODE)
+ continue;
+
+ if (sd->asd->match.fwnode.fwnode == fwnode)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Find out whether an async sub-device was set up for an fwnode already or
+ * whether it exists in a given notifier before @this_index.
+ */
+static bool v4l2_async_notifier_fwnode_has_async_subdev(
+ struct v4l2_async_notifier *notifier, struct fwnode_handle *fwnode,
+ unsigned int this_index)
+{
+ unsigned int j;
+
+ lockdep_assert_held(&list_lock);
+
+ /* Check that an fwnode is not being added more than once. */
+ for (j = 0; j < this_index; j++) {
+ struct v4l2_async_subdev *asd = notifier->subdevs[this_index];
+ struct v4l2_async_subdev *other_asd = notifier->subdevs[j];
+
+ if (other_asd->match_type == V4L2_ASYNC_MATCH_FWNODE &&
+ asd->match.fwnode.fwnode ==
+ other_asd->match.fwnode.fwnode)
+ return true;
+ }
+
+ /* Check than an fwnode did not exist in other notifiers. */
+ list_for_each_entry(notifier, &notifier_list, list)
+ if (__v4l2_async_notifier_fwnode_has_async_subdev(
+ notifier, fwnode))
+ return true;
+
+ return false;
+}
+
+static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
+{
+ struct device *dev =
+ notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
+ struct v4l2_async_subdev *asd;
+ int ret;
int i;
- if (!v4l2_dev || !notifier->num_subdevs ||
- notifier->num_subdevs > V4L2_MAX_SUBDEVS)
+ if (notifier->num_subdevs > V4L2_MAX_SUBDEVS)
return -EINVAL;
- notifier->v4l2_dev = v4l2_dev;
INIT_LIST_HEAD(&notifier->waiting);
INIT_LIST_HEAD(&notifier->done);
+ mutex_lock(&list_lock);
+
for (i = 0; i < notifier->num_subdevs; i++) {
asd = notifier->subdevs[i];
@@ -159,32 +392,32 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
case V4L2_ASYNC_MATCH_CUSTOM:
case V4L2_ASYNC_MATCH_DEVNAME:
case V4L2_ASYNC_MATCH_I2C:
+ break;
case V4L2_ASYNC_MATCH_FWNODE:
+ if (v4l2_async_notifier_fwnode_has_async_subdev(
+ notifier, asd->match.fwnode.fwnode, i)) {
+ dev_err(dev,
+ "fwnode has already been registered or in notifier's subdev list\n");
+ ret = -EEXIST;
+ goto err_unlock;
+ }
break;
default:
- dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
- "Invalid match type %u on %p\n",
+ dev_err(dev, "Invalid match type %u on %p\n",
asd->match_type, asd);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unlock;
}
list_add_tail(&asd->list, &notifier->waiting);
}
- mutex_lock(&list_lock);
+ ret = v4l2_async_notifier_try_all_subdevs(notifier);
+ if (ret < 0)
+ goto err_unbind;
- list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
- int ret;
-
- asd = v4l2_async_belongs(notifier, sd);
- if (!asd)
- continue;
-
- ret = v4l2_async_test_notify(notifier, sd, asd);
- if (ret < 0) {
- mutex_unlock(&list_lock);
- return ret;
- }
- }
+ ret = v4l2_async_notifier_try_complete(notifier);
+ if (ret < 0)
+ goto err_unbind;
/* Keep also completed notifiers on the list */
list_add(&notifier->list, &notifier_list);
@@ -192,90 +425,114 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
mutex_unlock(&list_lock);
return 0;
+
+err_unbind:
+ /*
+ * On failure, unbind all sub-devices registered through this notifier.
+ */
+ v4l2_async_notifier_unbind_all_subdevs(notifier);
+
+err_unlock:
+ mutex_unlock(&list_lock);
+
+ return ret;
+}
+
+int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier)
+{
+ int ret;
+
+ if (WARN_ON(!v4l2_dev || notifier->sd))
+ return -EINVAL;
+
+ notifier->v4l2_dev = v4l2_dev;
+
+ ret = __v4l2_async_notifier_register(notifier);
+ if (ret)
+ notifier->v4l2_dev = NULL;
+
+ return ret;
}
EXPORT_SYMBOL(v4l2_async_notifier_register);
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
+ struct v4l2_async_notifier *notifier)
{
- struct v4l2_subdev *sd, *tmp;
- unsigned int notif_n_subdev = notifier->num_subdevs;
- unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
- struct device **dev;
- int i = 0;
+ int ret;
- if (!notifier->v4l2_dev)
- return;
+ if (WARN_ON(!sd || notifier->v4l2_dev))
+ return -EINVAL;
- dev = kvmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(notifier->v4l2_dev->dev,
- "Failed to allocate device cache!\n");
- }
+ notifier->sd = sd;
- mutex_lock(&list_lock);
+ ret = __v4l2_async_notifier_register(notifier);
+ if (ret)
+ notifier->sd = NULL;
- list_del(&notifier->list);
+ return ret;
+}
+EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
- list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
- struct device *d;
+static void __v4l2_async_notifier_unregister(
+ struct v4l2_async_notifier *notifier)
+{
+ if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
+ return;
- d = get_device(sd->dev);
+ v4l2_async_notifier_unbind_all_subdevs(notifier);
- v4l2_async_cleanup(sd);
+ notifier->sd = NULL;
+ notifier->v4l2_dev = NULL;
- /* If we handled USB devices, we'd have to lock the parent too */
- device_release_driver(d);
+ list_del(&notifier->list);
+}
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asd);
+void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+{
+ mutex_lock(&list_lock);
- /*
- * Store device at the device cache, in order to call
- * put_device() on the final step
- */
- if (dev)
- dev[i++] = d;
- else
- put_device(d);
- }
+ __v4l2_async_notifier_unregister(notifier);
mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(v4l2_async_notifier_unregister);
- /*
- * Call device_attach() to reprobe devices
- *
- * NOTE: If dev allocation fails, i is 0, and the whole loop won't be
- * executed.
- */
- while (i--) {
- struct device *d = dev[i];
-
- if (d && device_attach(d) < 0) {
- const char *name = "(none)";
- int lock = device_trylock(d);
-
- if (lock && d->driver)
- name = d->driver->name;
- dev_err(d, "Failed to re-probe to %s\n", name);
- if (lock)
- device_unlock(d);
+void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+{
+ unsigned int i;
+
+ if (!notifier || !notifier->max_subdevs)
+ return;
+
+ for (i = 0; i < notifier->num_subdevs; i++) {
+ struct v4l2_async_subdev *asd = notifier->subdevs[i];
+
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_FWNODE:
+ fwnode_handle_put(asd->match.fwnode.fwnode);
+ break;
+ default:
+ WARN_ON_ONCE(true);
+ break;
}
- put_device(d);
+
+ kfree(asd);
}
- kvfree(dev);
- notifier->v4l2_dev = NULL;
+ notifier->max_subdevs = 0;
+ notifier->num_subdevs = 0;
- /*
- * Don't care about the waiting list, it is initialised and populated
- * upon notifier registration.
- */
+ kvfree(notifier->subdevs);
+ notifier->subdevs = NULL;
}
-EXPORT_SYMBOL(v4l2_async_notifier_unregister);
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
+ struct v4l2_async_notifier *subdev_notifier;
struct v4l2_async_notifier *notifier;
+ int ret;
/*
* No reference taken. The reference is held by the device
@@ -290,41 +547,74 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
INIT_LIST_HEAD(&sd->async_list);
list_for_each_entry(notifier, &notifier_list, list) {
- struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
- if (asd) {
- int ret = v4l2_async_test_notify(notifier, sd, asd);
- mutex_unlock(&list_lock);
- return ret;
- }
+ struct v4l2_device *v4l2_dev =
+ v4l2_async_notifier_find_v4l2_dev(notifier);
+ struct v4l2_async_subdev *asd;
+
+ if (!v4l2_dev)
+ continue;
+
+ asd = v4l2_async_find_match(notifier, sd);
+ if (!asd)
+ continue;
+
+ ret = v4l2_async_match_notify(notifier, notifier->v4l2_dev, sd,
+ asd);
+ if (ret)
+ goto err_unbind;
+
+ ret = v4l2_async_notifier_try_complete(notifier);
+ if (ret)
+ goto err_unbind;
+
+ goto out_unlock;
}
/* None matched, wait for hot-plugging */
list_add(&sd->async_list, &subdev_list);
+out_unlock:
mutex_unlock(&list_lock);
return 0;
+
+err_unbind:
+ /*
+ * Complete failed. Unbind the sub-devices bound through registering
+ * this async sub-device.
+ */
+ subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ if (subdev_notifier)
+ v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+
+ if (sd->asd)
+ v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
+
+ mutex_unlock(&list_lock);
+
+ return ret;
}
EXPORT_SYMBOL(v4l2_async_register_subdev);
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
- struct v4l2_async_notifier *notifier = sd->notifier;
+ mutex_lock(&list_lock);
- if (!sd->asd) {
- if (!list_empty(&sd->async_list))
- v4l2_async_cleanup(sd);
- return;
- }
+ __v4l2_async_notifier_unregister(sd->subdev_notifier);
+ v4l2_async_notifier_cleanup(sd->subdev_notifier);
+ kfree(sd->subdev_notifier);
+ sd->subdev_notifier = NULL;
- mutex_lock(&list_lock);
+ if (sd->asd) {
+ struct v4l2_async_notifier *notifier = sd->notifier;
- list_add(&sd->asd->list, &notifier->waiting);
+ list_add(&sd->asd->list, &notifier->waiting);
- v4l2_async_cleanup(sd);
+ v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ }
- if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
mutex_unlock(&list_lock);
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index dd1db678718c..cbb2ef43945f 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1227,6 +1227,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
}
EXPORT_SYMBOL(v4l2_ctrl_fill);
+static u32 user_flags(const struct v4l2_ctrl *ctrl)
+{
+ u32 flags = ctrl->flags;
+
+ if (ctrl->is_ptr)
+ flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+
+ return flags;
+}
+
static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
{
memset(ev->reserved, 0, sizeof(ev->reserved));
@@ -1234,7 +1244,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
ev->id = ctrl->id;
ev->u.ctrl.changes = changes;
ev->u.ctrl.type = ctrl->type;
- ev->u.ctrl.flags = ctrl->flags;
+ ev->u.ctrl.flags = user_flags(ctrl);
if (ctrl->is_ptr)
ev->u.ctrl.value64 = 0;
else
@@ -2003,10 +2013,6 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
handler_set_err(hdl, err);
return NULL;
}
- if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
- handler_set_err(hdl, -ERANGE);
- return NULL;
- }
if (is_array &&
(type == V4L2_CTRL_TYPE_BUTTON ||
type == V4L2_CTRL_TYPE_CTRL_CLASS)) {
@@ -2577,10 +2583,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
else
qc->id = ctrl->id;
strlcpy(qc->name, ctrl->name, sizeof(qc->name));
- qc->flags = ctrl->flags;
+ qc->flags = user_flags(ctrl);
qc->type = ctrl->type;
- if (ctrl->is_ptr)
- qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
qc->elem_size = ctrl->elem_size;
qc->elems = ctrl->elems;
qc->nr_of_dims = ctrl->nr_of_dims;
@@ -2818,7 +2822,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
{
if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL)
- return list_empty(&hdl->ctrl_refs) ? -EINVAL : 0;
+ return 0;
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
}
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 40b2fbfe8865..681b192420d9 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -19,6 +19,7 @@
*/
#include <linux/acpi.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/property.h>
@@ -26,7 +27,9 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
enum v4l2_fwnode_bus_type {
V4L2_FWNODE_BUS_TYPE_GUESS = 0,
@@ -181,25 +184,6 @@ v4l2_fwnode_endpoint_parse_csi1_bus(struct fwnode_handle *fwnode,
vep->bus_type = V4L2_MBUS_CSI1;
}
-/**
- * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties
- * @fwnode: pointer to the endpoint's fwnode handle
- * @vep: pointer to the V4L2 fwnode data structure
- *
- * All properties are optional. If none are found, we don't set any flags. This
- * means the port has a static configuration and no properties have to be
- * specified explicitly. If any properties that identify the bus as parallel
- * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
- * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
- * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
- * reference to @fwnode.
- *
- * NOTE: This function does not parse properties the size of which is variable
- * without a low fixed limit. Please use v4l2_fwnode_endpoint_alloc_parse() in
- * new drivers instead.
- *
- * Return: 0 on success or a negative error code on failure.
- */
int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint *vep)
{
@@ -239,14 +223,6 @@ int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse);
-/*
- * v4l2_fwnode_endpoint_free() - free the V4L2 fwnode acquired by
- * v4l2_fwnode_endpoint_alloc_parse()
- * @vep - the V4L2 fwnode the resources of which are to be released
- *
- * It is safe to call this function with NULL argument or on a V4L2 fwnode the
- * parsing of which failed.
- */
void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
{
if (IS_ERR_OR_NULL(vep))
@@ -257,29 +233,6 @@ void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
-/**
- * v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties
- * @fwnode: pointer to the endpoint's fwnode handle
- *
- * All properties are optional. If none are found, we don't set any flags. This
- * means the port has a static configuration and no properties have to be
- * specified explicitly. If any properties that identify the bus as parallel
- * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
- * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
- * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
- * reference to @fwnode.
- *
- * v4l2_fwnode_endpoint_alloc_parse() has two important differences to
- * v4l2_fwnode_endpoint_parse():
- *
- * 1. It also parses variable size data.
- *
- * 2. The memory it has allocated to store the variable size data must be freed
- * using v4l2_fwnode_endpoint_free() when no longer needed.
- *
- * Return: Pointer to v4l2_fwnode_endpoint if successful, on an error pointer
- * on error.
- */
struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse(
struct fwnode_handle *fwnode)
{
@@ -322,24 +275,6 @@ out_err:
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
-/**
- * v4l2_fwnode_endpoint_parse_link() - parse a link between two endpoints
- * @__fwnode: pointer to the endpoint's fwnode at the local end of the link
- * @link: pointer to the V4L2 fwnode link data structure
- *
- * Fill the link structure with the local and remote nodes and port numbers.
- * The local_node and remote_node fields are set to point to the local and
- * remote port's parent nodes respectively (the port parent node being the
- * parent node of the port node if that node isn't a 'ports' node, or the
- * grand-parent node of the port node otherwise).
- *
- * A reference is taken to both the local and remote nodes, the caller must use
- * v4l2_fwnode_endpoint_put_link() to drop the references when done with the
- * link.
- *
- * Return: 0 on success, or -ENOLINK if the remote endpoint fwnode can't be
- * found.
- */
int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
struct v4l2_fwnode_link *link)
{
@@ -374,13 +309,6 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
-/**
- * v4l2_fwnode_put_link() - drop references to nodes in a link
- * @link: pointer to the V4L2 fwnode link data structure
- *
- * Drop references to the local and remote nodes in the link. This function
- * must be called on every link parsed with v4l2_fwnode_parse_link().
- */
void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
{
fwnode_handle_put(link->local_node);
@@ -388,6 +316,630 @@ void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link);
+static int v4l2_async_notifier_realloc(struct v4l2_async_notifier *notifier,
+ unsigned int max_subdevs)
+{
+ struct v4l2_async_subdev **subdevs;
+
+ if (max_subdevs <= notifier->max_subdevs)
+ return 0;
+
+ subdevs = kvmalloc_array(
+ max_subdevs, sizeof(*notifier->subdevs),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!subdevs)
+ return -ENOMEM;
+
+ if (notifier->subdevs) {
+ memcpy(subdevs, notifier->subdevs,
+ sizeof(*subdevs) * notifier->num_subdevs);
+
+ kvfree(notifier->subdevs);
+ }
+
+ notifier->subdevs = subdevs;
+ notifier->max_subdevs = max_subdevs;
+
+ return 0;
+}
+
+static int v4l2_async_notifier_fwnode_parse_endpoint(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *endpoint, unsigned int asd_struct_size,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ struct v4l2_async_subdev *asd;
+ struct v4l2_fwnode_endpoint *vep;
+ int ret = 0;
+
+ asd = kzalloc(asd_struct_size, GFP_KERNEL);
+ if (!asd)
+ return -ENOMEM;
+
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ asd->match.fwnode.fwnode =
+ fwnode_graph_get_remote_port_parent(endpoint);
+ if (!asd->match.fwnode.fwnode) {
+ dev_warn(dev, "bad remote port parent\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ vep = v4l2_fwnode_endpoint_alloc_parse(endpoint);
+ if (IS_ERR(vep)) {
+ ret = PTR_ERR(vep);
+ dev_warn(dev, "unable to parse V4L2 fwnode endpoint (%d)\n",
+ ret);
+ goto out_err;
+ }
+
+ ret = parse_endpoint ? parse_endpoint(dev, vep, asd) : 0;
+ if (ret == -ENOTCONN)
+ dev_dbg(dev, "ignoring port@%u/endpoint@%u\n", vep->base.port,
+ vep->base.id);
+ else if (ret < 0)
+ dev_warn(dev,
+ "driver could not parse port@%u/endpoint@%u (%d)\n",
+ vep->base.port, vep->base.id, ret);
+ v4l2_fwnode_endpoint_free(vep);
+ if (ret < 0)
+ goto out_err;
+
+ notifier->subdevs[notifier->num_subdevs] = asd;
+ notifier->num_subdevs++;
+
+ return 0;
+
+out_err:
+ fwnode_handle_put(asd->match.fwnode.fwnode);
+ kfree(asd);
+
+ return ret == -ENOTCONN ? 0 : ret;
+}
+
+static int __v4l2_async_notifier_parse_fwnode_endpoints(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size, unsigned int port, bool has_port,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ struct fwnode_handle *fwnode;
+ unsigned int max_subdevs = notifier->max_subdevs;
+ int ret;
+
+ if (WARN_ON(asd_struct_size < sizeof(struct v4l2_async_subdev)))
+ return -EINVAL;
+
+ for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint(
+ dev_fwnode(dev), fwnode)); ) {
+ struct fwnode_handle *dev_fwnode;
+ bool is_available;
+
+ dev_fwnode = fwnode_graph_get_port_parent(fwnode);
+ is_available = fwnode_device_is_available(dev_fwnode);
+ fwnode_handle_put(dev_fwnode);
+ if (!is_available)
+ continue;
+
+ if (has_port) {
+ struct fwnode_endpoint ep;
+
+ ret = fwnode_graph_parse_endpoint(fwnode, &ep);
+ if (ret) {
+ fwnode_handle_put(fwnode);
+ return ret;
+ }
+
+ if (ep.port != port)
+ continue;
+ }
+ max_subdevs++;
+ }
+
+ /* No subdevs to add? Return here. */
+ if (max_subdevs == notifier->max_subdevs)
+ return 0;
+
+ ret = v4l2_async_notifier_realloc(notifier, max_subdevs);
+ if (ret)
+ return ret;
+
+ for (fwnode = NULL; (fwnode = fwnode_graph_get_next_endpoint(
+ dev_fwnode(dev), fwnode)); ) {
+ struct fwnode_handle *dev_fwnode;
+ bool is_available;
+
+ dev_fwnode = fwnode_graph_get_port_parent(fwnode);
+ is_available = fwnode_device_is_available(dev_fwnode);
+ fwnode_handle_put(dev_fwnode);
+ if (!is_available)
+ continue;
+
+ if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (has_port) {
+ struct fwnode_endpoint ep;
+
+ ret = fwnode_graph_parse_endpoint(fwnode, &ep);
+ if (ret)
+ break;
+
+ if (ep.port != port)
+ continue;
+ }
+
+ ret = v4l2_async_notifier_fwnode_parse_endpoint(
+ dev, notifier, fwnode, asd_struct_size, parse_endpoint);
+ if (ret < 0)
+ break;
+ }
+
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+int v4l2_async_notifier_parse_fwnode_endpoints(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ return __v4l2_async_notifier_parse_fwnode_endpoints(
+ dev, notifier, asd_struct_size, 0, false, parse_endpoint);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints);
+
+int v4l2_async_notifier_parse_fwnode_endpoints_by_port(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size, unsigned int port,
+ int (*parse_endpoint)(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd))
+{
+ return __v4l2_async_notifier_parse_fwnode_endpoints(
+ dev, notifier, asd_struct_size, port, true, parse_endpoint);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints_by_port);
+
+/*
+ * v4l2_fwnode_reference_parse - parse references for async sub-devices
+ * @dev: the device node the properties of which are parsed for references
+ * @notifier: the async notifier where the async subdevs will be added
+ * @prop: the name of the property
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries were found
+ * -ENOMEM if memory allocation failed
+ * -EINVAL if property parsing failed
+ */
+static int v4l2_fwnode_reference_parse(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ const char *prop)
+{
+ struct fwnode_reference_args args;
+ unsigned int index;
+ int ret;
+
+ for (index = 0;
+ !(ret = fwnode_property_get_reference_args(
+ dev_fwnode(dev), prop, NULL, 0, index, &args));
+ index++)
+ fwnode_handle_put(args.fwnode);
+
+ if (!index)
+ return -ENOENT;
+
+ /*
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return the error in cases other than that.
+ */
+ if (ret != -ENOENT && ret != -ENODATA)
+ return ret;
+
+ ret = v4l2_async_notifier_realloc(notifier,
+ notifier->num_subdevs + index);
+ if (ret)
+ return ret;
+
+ for (index = 0; !fwnode_property_get_reference_args(
+ dev_fwnode(dev), prop, NULL, 0, index, &args);
+ index++) {
+ struct v4l2_async_subdev *asd;
+
+ if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ asd = kzalloc(sizeof(*asd), GFP_KERNEL);
+ if (!asd) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ notifier->subdevs[notifier->num_subdevs] = asd;
+ asd->match.fwnode.fwnode = args.fwnode;
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ notifier->num_subdevs++;
+ }
+
+ return 0;
+
+error:
+ fwnode_handle_put(args.fwnode);
+ return ret;
+}
+
+/*
+ * v4l2_fwnode_reference_get_int_prop - parse a reference with integer
+ * arguments
+ * @fwnode: fwnode to read @prop from
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @index: the index of the reference to get
+ * @props: the array of integer property names
+ * @nprops: the number of integer property names in @nprops
+ *
+ * First find an fwnode referred to by the reference at @index in @prop.
+ *
+ * Then under that fwnode, @nprops times, for each property in @props,
+ * iteratively follow child nodes starting from fwnode such that they have the
+ * property in @props array at the index of the child node distance from the
+ * root node and the value of that property matching with the integer argument
+ * of the reference, at the same index.
+ *
+ * The child fwnode reched at the end of the iteration is then returned to the
+ * caller.
+ *
+ * The core reason for this is that you cannot refer to just any node in ACPI.
+ * So to refer to an endpoint (easy in DT) you need to refer to a device, then
+ * provide a list of (property name, property value) tuples where each tuple
+ * uniquely identifies a child node. The first tuple identifies a child directly
+ * underneath the device fwnode, the next tuple identifies a child node
+ * underneath the fwnode identified by the previous tuple, etc. until you
+ * reached the fwnode you need.
+ *
+ * An example with a graph, as defined in Documentation/acpi/dsd/graph.txt:
+ *
+ * Scope (\_SB.PCI0.I2C2)
+ * {
+ * Device (CAM0)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () {
+ * "compatible",
+ * Package () { "nokia,smia" }
+ * },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port0", "PRT0" },
+ * }
+ * })
+ * Name (PRT0, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 0 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP00" },
+ * }
+ * })
+ * Name (EP00, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package() {
+ * \_SB.PCI0.ISP, 4, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * Scope (\_SB.PCI0)
+ * {
+ * Device (ISP)
+ * {
+ * Name (_DSD, Package () {
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "port4", "PRT4" },
+ * }
+ * })
+ *
+ * Name (PRT4, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "port", 4 },
+ * },
+ * ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ * Package () {
+ * Package () { "endpoint0", "EP40" },
+ * }
+ * })
+ *
+ * Name (EP40, Package() {
+ * ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ * Package () {
+ * Package () { "endpoint", 0 },
+ * Package () {
+ * "remote-endpoint",
+ * Package () {
+ * \_SB.PCI0.I2C2.CAM0,
+ * 0, 0
+ * }
+ * },
+ * }
+ * })
+ * }
+ * }
+ *
+ * From the EP40 node under ISP device, you could parse the graph remote
+ * endpoint using v4l2_fwnode_reference_get_int_prop with these arguments:
+ *
+ * @fwnode: fwnode referring to EP40 under ISP.
+ * @prop: "remote-endpoint"
+ * @index: 0
+ * @props: "port", "endpoint"
+ * @nprops: 2
+ *
+ * And you'd get back fwnode referring to EP00 under CAM0.
+ *
+ * The same works the other way around: if you use EP00 under CAM0 as the
+ * fwnode, you'll get fwnode referring to EP40 under ISP.
+ *
+ * The same example in DT syntax would look like this:
+ *
+ * cam: cam0 {
+ * compatible = "nokia,smia";
+ *
+ * port {
+ * port = <0>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&isp 4 0>;
+ * };
+ * };
+ * };
+ *
+ * isp: isp {
+ * ports {
+ * port@4 {
+ * port = <4>;
+ * endpoint {
+ * endpoint = <0>;
+ * remote-endpoint = <&cam 0 0>;
+ * };
+ * };
+ * };
+ * };
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwise failed
+ * -ENOMEM if memory allocation failed
+ */
+static struct fwnode_handle *v4l2_fwnode_reference_get_int_prop(
+ struct fwnode_handle *fwnode, const char *prop, unsigned int index,
+ const char * const *props, unsigned int nprops)
+{
+ struct fwnode_reference_args fwnode_args;
+ unsigned int *args = fwnode_args.args;
+ struct fwnode_handle *child;
+ int ret;
+
+ /*
+ * Obtain remote fwnode as well as the integer arguments.
+ *
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return -ENOENT in that case.
+ */
+ ret = fwnode_property_get_reference_args(fwnode, prop, NULL, nprops,
+ index, &fwnode_args);
+ if (ret)
+ return ERR_PTR(ret == -ENODATA ? -ENOENT : ret);
+
+ /*
+ * Find a node in the tree under the referred fwnode corresponding to
+ * the integer arguments.
+ */
+ fwnode = fwnode_args.fwnode;
+ while (nprops--) {
+ u32 val;
+
+ /* Loop over all child nodes under fwnode. */
+ fwnode_for_each_child_node(fwnode, child) {
+ if (fwnode_property_read_u32(child, *props, &val))
+ continue;
+
+ /* Found property, see if its value matches. */
+ if (val == *args)
+ break;
+ }
+
+ fwnode_handle_put(fwnode);
+
+ /* No property found; return an error here. */
+ if (!child) {
+ fwnode = ERR_PTR(-ENOENT);
+ break;
+ }
+
+ props++;
+ args++;
+ fwnode = child;
+ }
+
+ return fwnode;
+}
+
+/*
+ * v4l2_fwnode_reference_parse_int_props - parse references for async
+ * sub-devices
+ * @dev: struct device pointer
+ * @notifier: notifier for @dev
+ * @prop: the name of the property
+ * @props: the array of integer property names
+ * @nprops: the number of integer properties
+ *
+ * Use v4l2_fwnode_reference_get_int_prop to find fwnodes through reference in
+ * property @prop with integer arguments with child nodes matching in properties
+ * @props. Then, set up V4L2 async sub-devices for those fwnodes in the notifier
+ * accordingly.
+ *
+ * While it is technically possible to use this function on DT, it is only
+ * meaningful on ACPI. On Device tree you can refer to any node in the tree but
+ * on ACPI the references are limited to devices.
+ *
+ * Return: 0 on success
+ * -ENOENT if no entries (or the property itself) were found
+ * -EINVAL if property parsing otherwisefailed
+ * -ENOMEM if memory allocation failed
+ */
+static int v4l2_fwnode_reference_parse_int_props(
+ struct device *dev, struct v4l2_async_notifier *notifier,
+ const char *prop, const char * const *props, unsigned int nprops)
+{
+ struct fwnode_handle *fwnode;
+ unsigned int index;
+ int ret;
+
+ for (index = 0; !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(
+ dev_fwnode(dev), prop, index, props,
+ nprops))); index++)
+ fwnode_handle_put(fwnode);
+
+ /*
+ * Note that right now both -ENODATA and -ENOENT may signal
+ * out-of-bounds access. Return the error in cases other than that.
+ */
+ if (PTR_ERR(fwnode) != -ENOENT && PTR_ERR(fwnode) != -ENODATA)
+ return PTR_ERR(fwnode);
+
+ ret = v4l2_async_notifier_realloc(notifier,
+ notifier->num_subdevs + index);
+ if (ret)
+ return -ENOMEM;
+
+ for (index = 0; !IS_ERR((fwnode = v4l2_fwnode_reference_get_int_prop(
+ dev_fwnode(dev), prop, index, props,
+ nprops))); index++) {
+ struct v4l2_async_subdev *asd;
+
+ if (WARN_ON(notifier->num_subdevs >= notifier->max_subdevs)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ asd = kzalloc(sizeof(struct v4l2_async_subdev), GFP_KERNEL);
+ if (!asd) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ notifier->subdevs[notifier->num_subdevs] = asd;
+ asd->match.fwnode.fwnode = fwnode;
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ notifier->num_subdevs++;
+ }
+
+ return PTR_ERR(fwnode) == -ENOENT ? 0 : PTR_ERR(fwnode);
+
+error:
+ fwnode_handle_put(fwnode);
+ return ret;
+}
+
+int v4l2_async_notifier_parse_fwnode_sensor_common(
+ struct device *dev, struct v4l2_async_notifier *notifier)
+{
+ static const char * const led_props[] = { "led" };
+ static const struct {
+ const char *name;
+ const char * const *props;
+ unsigned int nprops;
+ } props[] = {
+ { "flash-leds", led_props, ARRAY_SIZE(led_props) },
+ { "lens-focus", NULL, 0 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ if (props[i].props && is_acpi_node(dev_fwnode(dev)))
+ ret = v4l2_fwnode_reference_parse_int_props(
+ dev, notifier, props[i].name,
+ props[i].props, props[i].nprops);
+ else
+ ret = v4l2_fwnode_reference_parse(
+ dev, notifier, props[i].name);
+ if (ret && ret != -ENOENT) {
+ dev_warn(dev, "parsing property \"%s\" failed (%d)\n",
+ props[i].name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_sensor_common);
+
+int v4l2_async_register_subdev_sensor_common(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_notifier *notifier;
+ int ret;
+
+ if (WARN_ON(!sd->dev))
+ return -ENODEV;
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier)
+ return -ENOMEM;
+
+ ret = v4l2_async_notifier_parse_fwnode_sensor_common(sd->dev,
+ notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_subdev_notifier_register(sd, notifier);
+ if (ret < 0)
+ goto out_cleanup;
+
+ ret = v4l2_async_register_subdev(sd);
+ if (ret < 0)
+ goto out_unregister;
+
+ sd->subdev_notifier = notifier;
+
+ return 0;
+
+out_unregister:
+ v4l2_async_notifier_unregister(notifier);
+
+out_cleanup:
+ v4l2_async_notifier_cleanup(notifier);
+ kfree(notifier);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_register_subdev_sensor_common);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index b60a6b0841d1..79614992ee21 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -730,9 +730,12 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
break;
case V4L2_FRMSIZE_TYPE_STEPWISE:
pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
- p->stepwise.min_width, p->stepwise.min_height,
- p->stepwise.step_width, p->stepwise.step_height,
- p->stepwise.max_width, p->stepwise.max_height);
+ p->stepwise.min_width,
+ p->stepwise.min_height,
+ p->stepwise.max_width,
+ p->stepwise.max_height,
+ p->stepwise.step_width,
+ p->stepwise.step_height);
break;
case V4L2_FRMSIZE_TYPE_CONTINUOUS:
/* fall through */
diff --git a/drivers/media/v4l2-core/v4l2-trace.c b/drivers/media/v4l2-core/v4l2-trace.c
index 7416010542c1..95f3b02e1f84 100644
--- a/drivers/media/v4l2-core/v4l2-trace.c
+++ b/drivers/media/v4l2-core/v4l2-trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <media/v4l2-common.h>
#include <media/v4l2-fh.h>
#include <media/videobuf2-v4l2.h>
diff --git a/drivers/media/v4l2-core/vb2-trace.c b/drivers/media/v4l2-core/vb2-trace.c
index 61e74f5936b3..4c0f39d271f0 100644
--- a/drivers/media/v4l2-core/vb2-trace.c
+++ b/drivers/media/v4l2-core/vb2-trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <media/videobuf2-core.h>
#define CREATE_TRACE_POINTS
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index e88097fbc085..929a601d4cd1 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for memory devices
#
@@ -8,6 +9,7 @@ endif
obj-$(CONFIG_ARM_PL172_MPMC) += pl172.o
obj-$(CONFIG_ATMEL_SDRAMC) += atmel-sdramc.o
obj-$(CONFIG_ATMEL_EBI) += atmel-ebi.o
+obj-$(CONFIG_ARCH_BRCMSTB) += brcmstb_dpfe.o
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
obj-$(CONFIG_OMAP_GPMC) += omap-gpmc.o
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
new file mode 100644
index 000000000000..0a7bdbed3a6f
--- /dev/null
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -0,0 +1,722 @@
+/*
+ * DDR PHY Front End (DPFE) driver for Broadcom set top box SoCs
+ *
+ * Copyright (c) 2017 Broadcom
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+/*
+ * This driver provides access to the DPFE interface of Broadcom STB SoCs.
+ * The firmware running on the DCPU inside the DDR PHY can provide current
+ * information about the system's RAM, for instance the DRAM refresh rate.
+ * This can be used as an indirect indicator for the DRAM's temperature.
+ * Slower refresh rate means cooler RAM, higher refresh rate means hotter
+ * RAM.
+ *
+ * Throughout the driver, we use readl_relaxed() and writel_relaxed(), which
+ * already contain the appropriate le32_to_cpu()/cpu_to_le32() calls.
+ *
+ * Note regarding the loading of the firmware image: we use be32_to_cpu()
+ * and le_32_to_cpu(), so we can support the following four cases:
+ * - LE kernel + LE firmware image (the most common case)
+ * - LE kernel + BE firmware image
+ * - BE kernel + LE firmware image
+ * - BE kernel + BE firmware image
+ *
+ * The DPCU always runs in big endian mode. The firwmare image, however, can
+ * be in either format. Also, communication between host CPU and DCPU is
+ * always in little endian.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#define DRVNAME "brcmstb-dpfe"
+#define FIRMWARE_NAME "dpfe.bin"
+
+/* DCPU register offsets */
+#define REG_DCPU_RESET 0x0
+#define REG_TO_DCPU_MBOX 0x10
+#define REG_TO_HOST_MBOX 0x14
+
+/* Message RAM */
+#define DCPU_MSG_RAM(x) (0x100 + (x) * sizeof(u32))
+
+/* DRAM Info Offsets & Masks */
+#define DRAM_INFO_INTERVAL 0x0
+#define DRAM_INFO_MR4 0x4
+#define DRAM_INFO_ERROR 0x8
+#define DRAM_INFO_MR4_MASK 0xff
+
+/* DRAM MR4 Offsets & Masks */
+#define DRAM_MR4_REFRESH 0x0 /* Refresh rate */
+#define DRAM_MR4_SR_ABORT 0x3 /* Self Refresh Abort */
+#define DRAM_MR4_PPRE 0x4 /* Post-package repair entry/exit */
+#define DRAM_MR4_TH_OFFS 0x5 /* Thermal Offset; vendor specific */
+#define DRAM_MR4_TUF 0x7 /* Temperature Update Flag */
+
+#define DRAM_MR4_REFRESH_MASK 0x7
+#define DRAM_MR4_SR_ABORT_MASK 0x1
+#define DRAM_MR4_PPRE_MASK 0x1
+#define DRAM_MR4_TH_OFFS_MASK 0x3
+#define DRAM_MR4_TUF_MASK 0x1
+
+/* DRAM Vendor Offsets & Masks */
+#define DRAM_VENDOR_MR5 0x0
+#define DRAM_VENDOR_MR6 0x4
+#define DRAM_VENDOR_MR7 0x8
+#define DRAM_VENDOR_MR8 0xc
+#define DRAM_VENDOR_ERROR 0x10
+#define DRAM_VENDOR_MASK 0xff
+
+/* Reset register bits & masks */
+#define DCPU_RESET_SHIFT 0x0
+#define DCPU_RESET_MASK 0x1
+#define DCPU_CLK_DISABLE_SHIFT 0x2
+
+/* DCPU return codes */
+#define DCPU_RET_ERROR_BIT BIT(31)
+#define DCPU_RET_SUCCESS 0x1
+#define DCPU_RET_ERR_HEADER (DCPU_RET_ERROR_BIT | BIT(0))
+#define DCPU_RET_ERR_INVAL (DCPU_RET_ERROR_BIT | BIT(1))
+#define DCPU_RET_ERR_CHKSUM (DCPU_RET_ERROR_BIT | BIT(2))
+#define DCPU_RET_ERR_COMMAND (DCPU_RET_ERROR_BIT | BIT(3))
+/* This error code is not firmware defined and only used in the driver. */
+#define DCPU_RET_ERR_TIMEDOUT (DCPU_RET_ERROR_BIT | BIT(4))
+
+/* Firmware magic */
+#define DPFE_BE_MAGIC 0xfe1010fe
+#define DPFE_LE_MAGIC 0xfe0101fe
+
+/* Error codes */
+#define ERR_INVALID_MAGIC -1
+#define ERR_INVALID_SIZE -2
+#define ERR_INVALID_CHKSUM -3
+
+/* Message types */
+#define DPFE_MSG_TYPE_COMMAND 1
+#define DPFE_MSG_TYPE_RESPONSE 2
+
+#define DELAY_LOOP_MAX 200000
+
+enum dpfe_msg_fields {
+ MSG_HEADER,
+ MSG_COMMAND,
+ MSG_ARG_COUNT,
+ MSG_ARG0,
+ MSG_CHKSUM,
+ MSG_FIELD_MAX /* Last entry */
+};
+
+enum dpfe_commands {
+ DPFE_CMD_GET_INFO,
+ DPFE_CMD_GET_REFRESH,
+ DPFE_CMD_GET_VENDOR,
+ DPFE_CMD_MAX /* Last entry */
+};
+
+struct dpfe_msg {
+ u32 header;
+ u32 command;
+ u32 arg_count;
+ u32 arg0;
+ u32 chksum; /* This is the sum of all other entries. */
+};
+
+/*
+ * Format of the binary firmware file:
+ *
+ * entry
+ * 0 header
+ * value: 0xfe0101fe <== little endian
+ * 0xfe1010fe <== big endian
+ * 1 sequence:
+ * [31:16] total segments on this build
+ * [15:0] this segment sequence.
+ * 2 FW version
+ * 3 IMEM byte size
+ * 4 DMEM byte size
+ * IMEM
+ * DMEM
+ * last checksum ==> sum of everything
+ */
+struct dpfe_firmware_header {
+ u32 magic;
+ u32 sequence;
+ u32 version;
+ u32 imem_size;
+ u32 dmem_size;
+};
+
+/* Things we only need during initialization. */
+struct init_data {
+ unsigned int dmem_len;
+ unsigned int imem_len;
+ unsigned int chksum;
+ bool is_big_endian;
+};
+
+/* Things we need for as long as we are active. */
+struct private_data {
+ void __iomem *regs;
+ void __iomem *dmem;
+ void __iomem *imem;
+ struct device *dev;
+ unsigned int index;
+ struct mutex lock;
+};
+
+static const char *error_text[] = {
+ "Success", "Header code incorrect", "Unknown command or argument",
+ "Incorrect checksum", "Malformed command", "Timed out",
+};
+
+/* List of supported firmware commands */
+static const u32 dpfe_commands[DPFE_CMD_MAX][MSG_FIELD_MAX] = {
+ [DPFE_CMD_GET_INFO] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 1,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ [MSG_CHKSUM] = 4,
+ },
+ [DPFE_CMD_GET_REFRESH] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 2,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 1,
+ [MSG_CHKSUM] = 5,
+ },
+ [DPFE_CMD_GET_VENDOR] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 2,
+ [MSG_ARG_COUNT] = 1,
+ [MSG_ARG0] = 2,
+ [MSG_CHKSUM] = 6,
+ },
+};
+
+static bool is_dcpu_enabled(void __iomem *regs)
+{
+ u32 val;
+
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+
+ return !(val & DCPU_RESET_MASK);
+}
+
+static void __disable_dcpu(void __iomem *regs)
+{
+ u32 val;
+
+ if (!is_dcpu_enabled(regs))
+ return;
+
+ /* Put DCPU in reset if it's running. */
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+ val |= (1 << DCPU_RESET_SHIFT);
+ writel_relaxed(val, regs + REG_DCPU_RESET);
+}
+
+static void __enable_dcpu(void __iomem *regs)
+{
+ u32 val;
+
+ /* Clear mailbox registers. */
+ writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
+ writel_relaxed(0, regs + REG_TO_HOST_MBOX);
+
+ /* Disable DCPU clock gating */
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+ val &= ~(1 << DCPU_CLK_DISABLE_SHIFT);
+ writel_relaxed(val, regs + REG_DCPU_RESET);
+
+ /* Take DCPU out of reset */
+ val = readl_relaxed(regs + REG_DCPU_RESET);
+ val &= ~(1 << DCPU_RESET_SHIFT);
+ writel_relaxed(val, regs + REG_DCPU_RESET);
+}
+
+static unsigned int get_msg_chksum(const u32 msg[])
+{
+ unsigned int sum = 0;
+ unsigned int i;
+
+ /* Don't include the last field in the checksum. */
+ for (i = 0; i < MSG_FIELD_MAX - 1; i++)
+ sum += msg[i];
+
+ return sum;
+}
+
+static int __send_command(struct private_data *priv, unsigned int cmd,
+ u32 result[])
+{
+ const u32 *msg = dpfe_commands[cmd];
+ void __iomem *regs = priv->regs;
+ unsigned int i, chksum;
+ int ret = 0;
+ u32 resp;
+
+ if (cmd >= DPFE_CMD_MAX)
+ return -1;
+
+ mutex_lock(&priv->lock);
+
+ /* Write command and arguments to message area */
+ for (i = 0; i < MSG_FIELD_MAX; i++)
+ writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
+
+ /* Tell DCPU there is a command waiting */
+ writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
+
+ /* Wait for DCPU to process the command */
+ for (i = 0; i < DELAY_LOOP_MAX; i++) {
+ /* Read response code */
+ resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
+ if (resp > 0)
+ break;
+ udelay(5);
+ }
+
+ if (i == DELAY_LOOP_MAX) {
+ resp = (DCPU_RET_ERR_TIMEDOUT & ~DCPU_RET_ERROR_BIT);
+ ret = -ffs(resp);
+ } else {
+ /* Read response data */
+ for (i = 0; i < MSG_FIELD_MAX; i++)
+ result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
+ }
+
+ /* Tell DCPU we are done */
+ writel_relaxed(0, regs + REG_TO_HOST_MBOX);
+
+ mutex_unlock(&priv->lock);
+
+ if (ret)
+ return ret;
+
+ /* Verify response */
+ chksum = get_msg_chksum(result);
+ if (chksum != result[MSG_CHKSUM])
+ resp = DCPU_RET_ERR_CHKSUM;
+
+ if (resp != DCPU_RET_SUCCESS) {
+ resp &= ~DCPU_RET_ERROR_BIT;
+ ret = -ffs(resp);
+ }
+
+ return ret;
+}
+
+/* Ensure that the firmware file loaded meets all the requirements. */
+static int __verify_firmware(struct init_data *init,
+ const struct firmware *fw)
+{
+ const struct dpfe_firmware_header *header = (void *)fw->data;
+ unsigned int dmem_size, imem_size, total_size;
+ bool is_big_endian = false;
+ const u32 *chksum_ptr;
+
+ if (header->magic == DPFE_BE_MAGIC)
+ is_big_endian = true;
+ else if (header->magic != DPFE_LE_MAGIC)
+ return ERR_INVALID_MAGIC;
+
+ if (is_big_endian) {
+ dmem_size = be32_to_cpu(header->dmem_size);
+ imem_size = be32_to_cpu(header->imem_size);
+ } else {
+ dmem_size = le32_to_cpu(header->dmem_size);
+ imem_size = le32_to_cpu(header->imem_size);
+ }
+
+ /* Data and instruction sections are 32 bit words. */
+ if ((dmem_size % sizeof(u32)) != 0 || (imem_size % sizeof(u32)) != 0)
+ return ERR_INVALID_SIZE;
+
+ /*
+ * The header + the data section + the instruction section + the
+ * checksum must be equal to the total firmware size.
+ */
+ total_size = dmem_size + imem_size + sizeof(*header) +
+ sizeof(*chksum_ptr);
+ if (total_size != fw->size)
+ return ERR_INVALID_SIZE;
+
+ /* The checksum comes at the very end. */
+ chksum_ptr = (void *)fw->data + sizeof(*header) + dmem_size + imem_size;
+
+ init->is_big_endian = is_big_endian;
+ init->dmem_len = dmem_size;
+ init->imem_len = imem_size;
+ init->chksum = (is_big_endian)
+ ? be32_to_cpu(*chksum_ptr) : le32_to_cpu(*chksum_ptr);
+
+ return 0;
+}
+
+/* Verify checksum by reading back the firmware from co-processor RAM. */
+static int __verify_fw_checksum(struct init_data *init,
+ struct private_data *priv,
+ const struct dpfe_firmware_header *header,
+ u32 checksum)
+{
+ u32 magic, sequence, version, sum;
+ u32 __iomem *dmem = priv->dmem;
+ u32 __iomem *imem = priv->imem;
+ unsigned int i;
+
+ if (init->is_big_endian) {
+ magic = be32_to_cpu(header->magic);
+ sequence = be32_to_cpu(header->sequence);
+ version = be32_to_cpu(header->version);
+ } else {
+ magic = le32_to_cpu(header->magic);
+ sequence = le32_to_cpu(header->sequence);
+ version = le32_to_cpu(header->version);
+ }
+
+ sum = magic + sequence + version + init->dmem_len + init->imem_len;
+
+ for (i = 0; i < init->dmem_len / sizeof(u32); i++)
+ sum += readl_relaxed(dmem + i);
+
+ for (i = 0; i < init->imem_len / sizeof(u32); i++)
+ sum += readl_relaxed(imem + i);
+
+ return (sum == checksum) ? 0 : -1;
+}
+
+static int __write_firmware(u32 __iomem *mem, const u32 *fw,
+ unsigned int size, bool is_big_endian)
+{
+ unsigned int i;
+
+ /* Convert size to 32-bit words. */
+ size /= sizeof(u32);
+
+ /* It is recommended to clear the firmware area first. */
+ for (i = 0; i < size; i++)
+ writel_relaxed(0, mem + i);
+
+ /* Now copy it. */
+ if (is_big_endian) {
+ for (i = 0; i < size; i++)
+ writel_relaxed(be32_to_cpu(fw[i]), mem + i);
+ } else {
+ for (i = 0; i < size; i++)
+ writel_relaxed(le32_to_cpu(fw[i]), mem + i);
+ }
+
+ return 0;
+}
+
+static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
+ struct init_data *init)
+{
+ const struct dpfe_firmware_header *header;
+ unsigned int dmem_size, imem_size;
+ struct device *dev = &pdev->dev;
+ bool is_big_endian = false;
+ struct private_data *priv;
+ const struct firmware *fw;
+ const u32 *dmem, *imem;
+ const void *fw_blob;
+ int ret;
+
+ priv = platform_get_drvdata(pdev);
+
+ /*
+ * Skip downloading the firmware if the DCPU is already running and
+ * responding to commands.
+ */
+ if (is_dcpu_enabled(priv->regs)) {
+ u32 response[MSG_FIELD_MAX];
+
+ ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
+ if (!ret)
+ return 0;
+ }
+
+ ret = request_firmware(&fw, FIRMWARE_NAME, dev);
+ /* request_firmware() prints its own error messages. */
+ if (ret)
+ return ret;
+
+ ret = __verify_firmware(init, fw);
+ if (ret)
+ return -EFAULT;
+
+ __disable_dcpu(priv->regs);
+
+ is_big_endian = init->is_big_endian;
+ dmem_size = init->dmem_len;
+ imem_size = init->imem_len;
+
+ /* At the beginning of the firmware blob is a header. */
+ header = (struct dpfe_firmware_header *)fw->data;
+ /* Void pointer to the beginning of the actual firmware. */
+ fw_blob = fw->data + sizeof(*header);
+ /* IMEM comes right after the header. */
+ imem = fw_blob;
+ /* DMEM follows after IMEM. */
+ dmem = fw_blob + imem_size;
+
+ ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
+ if (ret)
+ return ret;
+ ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
+ if (ret)
+ return ret;
+
+ ret = __verify_fw_checksum(init, priv, header, init->chksum);
+ if (ret)
+ return ret;
+
+ __enable_dcpu(priv->regs);
+
+ return 0;
+}
+
+static ssize_t generic_show(unsigned int command, u32 response[],
+ struct device *dev, char *buf)
+{
+ struct private_data *priv;
+ int ret;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return sprintf(buf, "ERROR: driver private data not set\n");
+
+ ret = __send_command(priv, command, response);
+ if (ret < 0)
+ return sprintf(buf, "ERROR: %s\n", error_text[-ret]);
+
+ return 0;
+}
+
+static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ unsigned int info;
+ int ret;
+
+ ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf);
+ if (ret)
+ return ret;
+
+ info = response[MSG_ARG0];
+
+ return sprintf(buf, "%u.%u.%u.%u\n",
+ (info >> 24) & 0xff,
+ (info >> 16) & 0xff,
+ (info >> 8) & 0xff,
+ info & 0xff);
+}
+
+static ssize_t show_refresh(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ void __iomem *info;
+ struct private_data *priv;
+ unsigned int offset;
+ u8 refresh, sr_abort, ppre, thermal_offs, tuf;
+ u32 mr4;
+ int ret;
+
+ ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf);
+ if (ret)
+ return ret;
+
+ priv = dev_get_drvdata(dev);
+ offset = response[MSG_ARG0];
+ info = priv->dmem + offset;
+
+ mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK;
+
+ refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
+ sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
+ ppre = (mr4 >> DRAM_MR4_PPRE) & DRAM_MR4_PPRE_MASK;
+ thermal_offs = (mr4 >> DRAM_MR4_TH_OFFS) & DRAM_MR4_TH_OFFS_MASK;
+ tuf = (mr4 >> DRAM_MR4_TUF) & DRAM_MR4_TUF_MASK;
+
+ return sprintf(buf, "%#x %#x %#x %#x %#x %#x %#x\n",
+ readl_relaxed(info + DRAM_INFO_INTERVAL),
+ refresh, sr_abort, ppre, thermal_offs, tuf,
+ readl_relaxed(info + DRAM_INFO_ERROR));
+}
+
+static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 response[MSG_FIELD_MAX];
+ struct private_data *priv;
+ void __iomem *info;
+ unsigned int offset;
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ priv = dev_get_drvdata(dev);
+
+ ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
+ if (ret)
+ return ret;
+
+ offset = response[MSG_ARG0];
+ info = priv->dmem + offset;
+ writel_relaxed(val, info + DRAM_INFO_INTERVAL);
+
+ return count;
+}
+
+static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ u32 response[MSG_FIELD_MAX];
+ struct private_data *priv;
+ void __iomem *info;
+ unsigned int offset;
+ int ret;
+
+ ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf);
+ if (ret)
+ return ret;
+
+ offset = response[MSG_ARG0];
+ priv = dev_get_drvdata(dev);
+ info = priv->dmem + offset;
+
+ return sprintf(buf, "%#x %#x %#x %#x %#x\n",
+ readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK,
+ readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK,
+ readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK,
+ readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK,
+ readl_relaxed(info + DRAM_VENDOR_ERROR));
+}
+
+static int brcmstb_dpfe_resume(struct platform_device *pdev)
+{
+ struct init_data init;
+
+ return brcmstb_dpfe_download_firmware(pdev, &init);
+}
+
+static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
+static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
+static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
+static struct attribute *dpfe_attrs[] = {
+ &dev_attr_dpfe_info.attr,
+ &dev_attr_dpfe_refresh.attr,
+ &dev_attr_dpfe_vendor.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(dpfe);
+
+static int brcmstb_dpfe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct private_data *priv;
+ struct device *dpfe_dev;
+ struct init_data init;
+ struct resource *res;
+ u32 index;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ /* Cell index is optional; default to 0 if not present. */
+ ret = of_property_read_u32(dev->of_node, "cell-index", &index);
+ if (ret)
+ index = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu");
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "couldn't map DCPU registers\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-dmem");
+ priv->dmem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->dmem)) {
+ dev_err(dev, "Couldn't map DCPU data memory\n");
+ return -ENOENT;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-imem");
+ priv->imem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->imem)) {
+ dev_err(dev, "Couldn't map DCPU instruction memory\n");
+ return -ENOENT;
+ }
+
+ ret = brcmstb_dpfe_download_firmware(pdev, &init);
+ if (ret)
+ goto err;
+
+ dpfe_dev = devm_kzalloc(dev, sizeof(*dpfe_dev), GFP_KERNEL);
+ if (!dpfe_dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ priv->dev = dpfe_dev;
+ priv->index = index;
+
+ dpfe_dev->parent = dev;
+ dpfe_dev->groups = dpfe_groups;
+ dpfe_dev->of_node = dev->of_node;
+ dev_set_drvdata(dpfe_dev, priv);
+ dev_set_name(dpfe_dev, "dpfe%u", index);
+
+ ret = device_register(dpfe_dev);
+ if (ret)
+ goto err;
+
+ dev_info(dev, "registered.\n");
+
+ return 0;
+
+err:
+ dev_err(dev, "failed to initialize -- error %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id brcmstb_dpfe_of_match[] = {
+ { .compatible = "brcm,dpfe-cpu", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
+
+static struct platform_driver brcmstb_dpfe_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = brcmstb_dpfe_of_match,
+ },
+ .probe = brcmstb_dpfe_probe,
+ .resume = brcmstb_dpfe_resume,
+};
+
+module_platform_driver(brcmstb_dpfe_driver);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("BRCMSTB DDR PHY Front End Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 7059bbda2fac..a385a35c7de9 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1075,11 +1075,33 @@ int gpmc_configure(int cmd, int wval)
}
EXPORT_SYMBOL(gpmc_configure);
-void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
+static bool gpmc_nand_writebuffer_empty(void)
+{
+ if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS)
+ return true;
+
+ return false;
+}
+
+static struct gpmc_nand_ops nand_ops = {
+ .nand_writebuffer_empty = gpmc_nand_writebuffer_empty,
+};
+
+/**
+ * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
+ * @regs: the GPMC NAND register map exclusive for NAND use.
+ * @cs: GPMC chip select number on which the NAND sits. The
+ * register map returned will be specific to this chip select.
+ *
+ * Returns NULL on error e.g. invalid cs.
+ */
+struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
{
int i;
- reg->gpmc_status = NULL; /* deprecated */
+ if (cs >= gpmc_cs_num)
+ return NULL;
+
reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
@@ -1111,34 +1133,6 @@ void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 +
i * GPMC_BCH_SIZE;
}
-}
-
-static bool gpmc_nand_writebuffer_empty(void)
-{
- if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS)
- return true;
-
- return false;
-}
-
-static struct gpmc_nand_ops nand_ops = {
- .nand_writebuffer_empty = gpmc_nand_writebuffer_empty,
-};
-
-/**
- * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
- * @regs: the GPMC NAND register map exclusive for NAND use.
- * @cs: GPMC chip select number on which the NAND sits. The
- * register map returned will be specific to this chip select.
- *
- * Returns NULL on error e.g. invalid cs.
- */
-struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
-{
- if (cs >= gpmc_cs_num)
- return NULL;
-
- gpmc_update_nand_reg(reg, cs);
return &nand_ops;
}
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index c2cb671ffc4a..b44e8627a5e0 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
tegra-mc-y := mc.o
tegra-mc-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30.o
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 22de7f5ed032..57b13dfbd21e 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1492,9 +1492,9 @@ static int msb_ftl_scan(struct msb_data *msb)
return 0;
}
-static void msb_cache_flush_timer(unsigned long data)
+static void msb_cache_flush_timer(struct timer_list *t)
{
- struct msb_data *msb = (struct msb_data *)data;
+ struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
msb->need_flush_cache = true;
queue_work(msb->io_queue, &msb->io_work);
}
@@ -1514,8 +1514,7 @@ static void msb_cache_discard(struct msb_data *msb)
static int msb_cache_init(struct msb_data *msb)
{
- setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
- (unsigned long)msb);
+ timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
if (!msb->cache)
msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile
index 491c9557441d..1abaa03ee68c 100644
--- a/drivers/memstick/host/Makefile
+++ b/drivers/memstick/host/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for MemoryStick host controller drivers
#
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 48db922075e2..bcdca9fbef51 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -59,6 +59,7 @@ struct jmb38x_ms_host {
unsigned int block_pos;
unsigned long timeout_jiffies;
struct timer_list timer;
+ struct memstick_host *msh;
struct memstick_request *req;
unsigned char cmd_flags;
unsigned char io_pos;
@@ -592,10 +593,10 @@ static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void jmb38x_ms_abort(unsigned long data)
+static void jmb38x_ms_abort(struct timer_list *t)
{
- struct memstick_host *msh = (struct memstick_host *)data;
- struct jmb38x_ms_host *host = memstick_priv(msh);
+ struct jmb38x_ms_host *host = from_timer(host, t, timer);
+ struct memstick_host *msh = host->msh;
unsigned long flags;
dev_dbg(&host->chip->pdev->dev, "abort\n");
@@ -878,6 +879,7 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
return NULL;
host = memstick_priv(msh);
+ host->msh = msh;
host->chip = jm;
host->addr = ioremap(pci_resource_start(jm->pdev, cnt),
pci_resource_len(jm->pdev, cnt));
@@ -897,7 +899,7 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8;
- setup_timer(&host->timer, jmb38x_ms_abort, (unsigned long)msh);
+ timer_setup(&host->timer, jmb38x_ms_abort, 0);
if (!request_irq(host->irq, jmb38x_ms_isr, IRQF_SHARED, host->host_id,
msh))
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index d5cfb503b9d6..627d6e62fe31 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -616,9 +616,9 @@ static void r592_update_card_detect(struct r592_device *dev)
}
/* Timer routine that fires 1 second after last card detection event, */
-static void r592_detect_timer(long unsigned int data)
+static void r592_detect_timer(struct timer_list *t)
{
- struct r592_device *dev = (struct r592_device *)data;
+ struct r592_device *dev = from_timer(dev, t, detect_timer);
r592_update_card_detect(dev);
memstick_detect_change(dev->host);
}
@@ -770,8 +770,7 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&dev->io_thread_lock);
init_completion(&dev->dma_done);
INIT_KFIFO(dev->pio_fifo);
- setup_timer(&dev->detect_timer,
- r592_detect_timer, (long unsigned int)dev);
+ timer_setup(&dev->detect_timer, r592_detect_timer, 0);
/* Host initialization */
host->caps = MEMSTICK_CAP_PAR4;
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 7bafa72f8f57..bed205849d02 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -538,9 +538,9 @@ static int tifm_ms_set_param(struct memstick_host *msh,
return 0;
}
-static void tifm_ms_abort(unsigned long data)
+static void tifm_ms_abort(struct timer_list *t)
{
- struct tifm_ms *host = (struct tifm_ms *)data;
+ struct tifm_ms *host = from_timer(host, t, timer);
dev_dbg(&host->dev->dev, "status %x\n",
readl(host->dev->addr + SOCK_MS_STATUS));
@@ -575,7 +575,7 @@ static int tifm_ms_probe(struct tifm_dev *sock)
host->dev = sock;
host->timeout_jiffies = msecs_to_jiffies(1000);
- setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host);
+ timer_setup(&host->timer, tifm_ms_abort, 0);
tasklet_init(&host->notify, tifm_ms_req_tasklet, (unsigned long)msh);
msh->request = tifm_ms_submit_req;
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index d182a24b3195..e2d98b5c6f98 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Fusion MPT drivers; recognized debug defines...
# enable verbose logging
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index 11c0f461320e..a575545d681f 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index d9bcfba6b049..4e9c0ce94f27 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_fc.h b/drivers/message/fusion/lsi/mpi_fc.h
index 7d663ce76f8c..bdea95e0cde2 100644
--- a/drivers/message/fusion/lsi/mpi_fc.h
+++ b/drivers/message/fusion/lsi/mpi_fc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
index 4295d062caa7..bc6326ff2f22 100644
--- a/drivers/message/fusion/lsi/mpi_init.h
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 19fb21b8f0ce..c249f2994fc1 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_lan.h b/drivers/message/fusion/lsi/mpi_lan.h
index f41fcb69b359..d06f9928684a 100644
--- a/drivers/message/fusion/lsi/mpi_lan.h
+++ b/drivers/message/fusion/lsi/mpi_lan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_log_fc.h b/drivers/message/fusion/lsi/mpi_log_fc.h
index 03be8b217709..f1e75dd5dd58 100644
--- a/drivers/message/fusion/lsi/mpi_log_fc.h
+++ b/drivers/message/fusion/lsi/mpi_log_fc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2008 LSI Corporation. All rights reserved.
*
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index f62960b5d527..27fe17a75eaa 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/***************************************************************************
* *
* Copyright (c) 2000-2008 LSI Corporation. All rights reserved. *
diff --git a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h
index add60cc85be1..36688a921ef2 100644
--- a/drivers/message/fusion/lsi/mpi_raid.h
+++ b/drivers/message/fusion/lsi/mpi_raid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2001-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_sas.h b/drivers/message/fusion/lsi/mpi_sas.h
index ab410036bbfc..56013f288aaa 100644
--- a/drivers/message/fusion/lsi/mpi_sas.h
+++ b/drivers/message/fusion/lsi/mpi_sas.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2004-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h
index c3dea7f6909d..97e6eead6cb4 100644
--- a/drivers/message/fusion/lsi/mpi_targ.h
+++ b/drivers/message/fusion/lsi/mpi_targ.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_tool.h b/drivers/message/fusion/lsi/mpi_tool.h
index 53cd715aa7e4..b11456fb8277 100644
--- a/drivers/message/fusion/lsi/mpi_tool.h
+++ b/drivers/message/fusion/lsi/mpi_tool.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2001-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/lsi/mpi_type.h b/drivers/message/fusion/lsi/mpi_type.h
index 888b26dbc413..073e637cfdb4 100644
--- a/drivers/message/fusion/lsi/mpi_type.h
+++ b/drivers/message/fusion/lsi/mpi_type.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2008 LSI Corporation.
*
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 84eab28665f3..7a93400eea2a 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -99,7 +99,7 @@ module_param(mpt_channel_mapping, int, 0);
MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
static int mpt_debug_level;
-static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
+static int mpt_set_debug_level(const char *val, const struct kernel_param *kp);
module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
&mpt_debug_level, 0600);
MODULE_PARM_DESC(mpt_debug_level,
@@ -242,7 +242,7 @@ pci_enable_io_access(struct pci_dev *pdev)
pci_write_config_word(pdev, PCI_COMMAND, command_reg);
}
-static int mpt_set_debug_level(const char *val, struct kernel_param *kp)
+static int mpt_set_debug_level(const char *val, const struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
MPT_ADAPTER *ioc;
diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h
index 28e478879284..2205dcab0adb 100644
--- a/drivers/message/fusion/mptdebug.h
+++ b/drivers/message/fusion/mptdebug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/drivers/message/fusion/mptdebug.h
* For use with LSI PCI chip/adapter(s)
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index fc5e4fef89d2..1d20a800e967 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -510,6 +510,19 @@ config INTEL_SOC_PMIC_CHTWC
available before any devices using it are probed. This option also
causes the designware-i2c driver to be builtin for the same reason.
+config INTEL_SOC_PMIC_CHTDC_TI
+ tristate "Support for Intel Cherry Trail Dollar Cove TI PMIC"
+ depends on GPIOLIB
+ depends on I2C
+ depends on ACPI
+ depends on X86
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Select this option for supporting Dollar Cove (TI version) PMIC
+ device that is found on some Intel Cherry Trail systems.
+
config MFD_INTEL_LPSS
tristate
select COMMON_CLK
@@ -1057,6 +1070,22 @@ config MFD_SMSC
To compile this driver as a module, choose M here: the
module will be called smsc.
+config MFD_SC27XX_PMIC
+ tristate "Spreadtrum SC27xx PMICs"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on SPI_MASTER
+ select MFD_CORE
+ select REGMAP_SPI
+ select REGMAP_IRQ
+ help
+ This enables support for the Spreadtrum SC27xx PMICs with SPI
+ interface. The SC27xx series PMICs integrate power management,
+ audio codec, battery management and user interface support
+ function (such as RTC, Typec, indicator and so on) in a single chip.
+
+ This driver provides common support for accessing the SC27xx PMICs,
+ and it also adds the irq_chip parts for handling the PMIC chip events.
+
config ABX500_CORE
bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
default y if ARCH_U300 || ARCH_U8500 || COMPILE_TEST
@@ -1338,7 +1367,7 @@ config MFD_TPS65090
config MFD_TPS65217
tristate "TI TPS65217 Power Management / White LED chips"
- depends on I2C
+ depends on I2C && OF
select MFD_CORE
select REGMAP_I2C
select IRQ_DOMAIN
@@ -1400,7 +1429,7 @@ config MFD_TI_LP87565
config MFD_TPS65218
tristate "TI TPS65218 Power Management chips"
- depends on I2C
+ depends on I2C && OF
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -1408,8 +1437,7 @@ config MFD_TPS65218
If you say yes here you get support for the TPS65218 series of
Power Management chips.
These include voltage regulators, gpio and other features
- that are often used in portable devices. Only regulator
- component is currently supported.
+ that are often used in portable devices.
This driver can also be built as a module. If so, the module
will be called tps65218.
@@ -1746,6 +1774,20 @@ config MFD_WM8994
core support for the WM8994, in order to use the actual
functionaltiy of the device other drivers must be enabled.
+config MFD_WM97xx
+ tristate "Wolfson Microelectronics WM97xx"
+ select MFD_CORE
+ select REGMAP_AC97
+ select AC97_BUS_COMPAT
+ depends on AC97_BUS_NEW
+ help
+ The WM9705, WM9712 and WM9713 is a highly integrated hi-fi CODEC
+ designed for smartphone applications. As well as audio functionality
+ it has on board GPIO and a touchscreen functionality which is
+ supported via the relevant subsystems. This driver provides core
+ support for the WM97xx, in order to use the actual functionaltiy of
+ the device other drivers must be enabled.
+
config MFD_STW481X
tristate "Support for ST Microelectronics STw481x"
depends on I2C && (ARCH_NOMADIK || COMPILE_TEST)
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index c3d0a1b39bb6..d9474ade32e6 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for multifunction miscellaneous devices
#
@@ -73,6 +74,7 @@ obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
wm8994-objs := wm8994-core.o wm8994-irq.o wm8994-regmap.o
obj-$(CONFIG_MFD_WM8994) += wm8994.o
+obj-$(CONFIG_MFD_WM97xx) += wm97xx-core.o
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
@@ -218,6 +220,7 @@ intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o
obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o
+obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
@@ -226,3 +229,4 @@ obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o
obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o
obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
+obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 8d46e3ad9529..77875250abe5 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -797,12 +797,7 @@ EXPORT_SYMBOL_GPL(arizona_of_get_type);
static int arizona_of_get_core_pdata(struct arizona *arizona)
{
struct arizona_pdata *pdata = &arizona->pdata;
- struct property *prop;
- const __be32 *cur;
- u32 val;
- u32 pdm_val[ARIZONA_MAX_PDM_SPK];
int ret, i;
- int count = 0;
pdata->reset = of_get_named_gpio(arizona->dev->of_node, "wlf,reset", 0);
if (pdata->reset == -EPROBE_DEFER) {
@@ -836,64 +831,6 @@ static int arizona_of_get_core_pdata(struct arizona *arizona)
ret);
}
- of_property_for_each_u32(arizona->dev->of_node, "wlf,inmode", prop,
- cur, val) {
- if (count == ARRAY_SIZE(pdata->inmode))
- break;
-
- pdata->inmode[count] = val;
- count++;
- }
-
- count = 0;
- of_property_for_each_u32(arizona->dev->of_node, "wlf,dmic-ref", prop,
- cur, val) {
- if (count == ARRAY_SIZE(pdata->dmic_ref))
- break;
-
- pdata->dmic_ref[count] = val;
- count++;
- }
-
- count = 0;
- of_property_for_each_u32(arizona->dev->of_node, "wlf,out-mono", prop,
- cur, val) {
- if (count == ARRAY_SIZE(pdata->out_mono))
- break;
-
- pdata->out_mono[count] = !!val;
- count++;
- }
-
- count = 0;
- of_property_for_each_u32(arizona->dev->of_node,
- "wlf,max-channels-clocked",
- prop, cur, val) {
- if (count == ARRAY_SIZE(pdata->max_channels_clocked))
- break;
-
- pdata->max_channels_clocked[count] = val;
- count++;
- }
-
- ret = of_property_read_u32_array(arizona->dev->of_node,
- "wlf,spk-fmt",
- pdm_val,
- ARRAY_SIZE(pdm_val));
-
- if (ret >= 0)
- for (count = 0; count < ARRAY_SIZE(pdata->spk_fmt); ++count)
- pdata->spk_fmt[count] = pdm_val[count];
-
- ret = of_property_read_u32_array(arizona->dev->of_node,
- "wlf,spk-mute",
- pdm_val,
- ARRAY_SIZE(pdm_val));
-
- if (ret >= 0)
- for (count = 0; count < ARRAY_SIZE(pdata->spk_mute); ++count)
- pdata->spk_mute[count] = pdm_val[count];
-
return 0;
}
@@ -1026,7 +963,7 @@ int arizona_dev_init(struct arizona *arizona)
const char * const mclk_name[] = { "mclk1", "mclk2" };
struct device *dev = arizona->dev;
const char *type_name = NULL;
- unsigned int reg, val, mask;
+ unsigned int reg, val;
int (*apply_patch)(struct arizona *) = NULL;
const struct mfd_cell *subdevs = NULL;
int n_subdevs, ret, i;
@@ -1429,73 +1366,6 @@ int arizona_dev_init(struct arizona *arizona)
ARIZONA_MICB1_RATE, val);
}
- for (i = 0; i < ARIZONA_MAX_INPUT; i++) {
- /* Default for both is 0 so noop with defaults */
- val = arizona->pdata.dmic_ref[i]
- << ARIZONA_IN1_DMIC_SUP_SHIFT;
- if (arizona->pdata.inmode[i] & ARIZONA_INMODE_DMIC)
- val |= 1 << ARIZONA_IN1_MODE_SHIFT;
-
- switch (arizona->type) {
- case WM8998:
- case WM1814:
- regmap_update_bits(arizona->regmap,
- ARIZONA_ADC_DIGITAL_VOLUME_1L + (i * 8),
- ARIZONA_IN1L_SRC_SE_MASK,
- (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
- << ARIZONA_IN1L_SRC_SE_SHIFT);
-
- regmap_update_bits(arizona->regmap,
- ARIZONA_ADC_DIGITAL_VOLUME_1R + (i * 8),
- ARIZONA_IN1R_SRC_SE_MASK,
- (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
- << ARIZONA_IN1R_SRC_SE_SHIFT);
-
- mask = ARIZONA_IN1_DMIC_SUP_MASK |
- ARIZONA_IN1_MODE_MASK;
- break;
- default:
- if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
- val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
-
- mask = ARIZONA_IN1_DMIC_SUP_MASK |
- ARIZONA_IN1_MODE_MASK |
- ARIZONA_IN1_SINGLE_ENDED_MASK;
- break;
- }
-
- regmap_update_bits(arizona->regmap,
- ARIZONA_IN1L_CONTROL + (i * 8),
- mask, val);
- }
-
- for (i = 0; i < ARIZONA_MAX_OUTPUT; i++) {
- /* Default is 0 so noop with defaults */
- if (arizona->pdata.out_mono[i])
- val = ARIZONA_OUT1_MONO;
- else
- val = 0;
-
- regmap_update_bits(arizona->regmap,
- ARIZONA_OUTPUT_PATH_CONFIG_1L + (i * 8),
- ARIZONA_OUT1_MONO, val);
- }
-
- for (i = 0; i < ARIZONA_MAX_PDM_SPK; i++) {
- if (arizona->pdata.spk_mute[i])
- regmap_update_bits(arizona->regmap,
- ARIZONA_PDM_SPK1_CTRL_1 + (i * 2),
- ARIZONA_SPK1_MUTE_ENDIAN_MASK |
- ARIZONA_SPK1_MUTE_SEQ1_MASK,
- arizona->pdata.spk_mute[i]);
-
- if (arizona->pdata.spk_fmt[i])
- regmap_update_bits(arizona->regmap,
- ARIZONA_PDM_SPK1_CTRL_2 + (i * 2),
- ARIZONA_SPK1_FMT_MASK,
- arizona->pdata.spk_fmt[i]);
- }
-
pm_runtime_set_active(arizona->dev);
pm_runtime_enable(arizona->dev);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 336de66ca408..2468b431bb22 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -876,6 +876,8 @@ static struct mfd_cell axp813_cells[] = {
.name = "axp221-pek",
.num_resources = ARRAY_SIZE(axp803_pek_resources),
.resources = axp803_pek_resources,
+ }, {
+ .name = "axp20x-regulator",
}
};
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index b3767c3141e5..dbb85caaafed 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -84,8 +84,7 @@ static int mx25_tsadc_setup_irq(struct platform_device *pdev,
return -ENOMEM;
}
- irq_set_chained_handler(irq, mx25_tsadc_irq_handler);
- irq_set_handler_data(irq, tsadc);
+ irq_set_chained_handler_and_data(irq, mx25_tsadc_irq_handler, tsadc);
return 0;
}
@@ -180,6 +179,19 @@ static int mx25_tsadc_probe(struct platform_device *pdev)
return devm_of_platform_populate(dev);
}
+static int mx25_tsadc_remove(struct platform_device *pdev)
+{
+ struct mx25_tsadc *tsadc = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq) {
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ irq_domain_remove(tsadc->domain);
+ }
+
+ return 0;
+}
+
static const struct of_device_id mx25_tsadc_ids[] = {
{ .compatible = "fsl,imx25-tsadc" },
{ /* Sentinel */ }
@@ -192,6 +204,7 @@ static struct platform_driver mx25_tsadc_driver = {
.of_match_table = of_match_ptr(mx25_tsadc_ids),
},
.probe = mx25_tsadc_probe,
+ .remove = mx25_tsadc_remove,
};
module_platform_driver(mx25_tsadc_driver);
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index 694116630ffa..865bbeaaf00c 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -38,12 +38,7 @@ int intel_lpss_resume(struct device *dev);
#ifdef CONFIG_PM_SLEEP
#define INTEL_LPSS_SLEEP_PM_OPS \
.prepare = intel_lpss_prepare, \
- .suspend = intel_lpss_suspend, \
- .resume = intel_lpss_resume, \
- .freeze = intel_lpss_suspend, \
- .thaw = intel_lpss_resume, \
- .poweroff = intel_lpss_suspend, \
- .restore = intel_lpss_resume,
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_lpss_suspend, intel_lpss_resume)
#else
#define INTEL_LPSS_SLEEP_PM_OPS
#endif
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
new file mode 100644
index 000000000000..861277c6580a
--- /dev/null
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -0,0 +1,184 @@
+/*
+ * Device access for Dollar Cove TI PMIC
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
+ *
+ * Cleanup and forward-ported
+ * Copyright (c) 2017 Takashi Iwai <tiwai@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define CHTDC_TI_IRQLVL1 0x01
+#define CHTDC_TI_MASK_IRQLVL1 0x02
+
+/* Level 1 IRQs */
+enum {
+ CHTDC_TI_PWRBTN = 0, /* power button */
+ CHTDC_TI_DIETMPWARN, /* thermal */
+ CHTDC_TI_ADCCMPL, /* ADC */
+ /* No IRQ 3 */
+ CHTDC_TI_VBATLOW = 4, /* battery */
+ CHTDC_TI_VBUSDET, /* power source */
+ /* No IRQ 6 */
+ CHTDC_TI_CCEOCAL = 7, /* battery */
+};
+
+static struct resource power_button_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_PWRBTN),
+};
+
+static struct resource thermal_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_DIETMPWARN),
+};
+
+static struct resource adc_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_ADCCMPL),
+};
+
+static struct resource pwrsrc_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_VBUSDET),
+};
+
+static struct resource battery_resources[] = {
+ DEFINE_RES_IRQ(CHTDC_TI_VBATLOW),
+ DEFINE_RES_IRQ(CHTDC_TI_CCEOCAL),
+};
+
+static struct mfd_cell chtdc_ti_dev[] = {
+ {
+ .name = "chtdc_ti_pwrbtn",
+ .num_resources = ARRAY_SIZE(power_button_resources),
+ .resources = power_button_resources,
+ }, {
+ .name = "chtdc_ti_adc",
+ .num_resources = ARRAY_SIZE(adc_resources),
+ .resources = adc_resources,
+ }, {
+ .name = "chtdc_ti_thermal",
+ .num_resources = ARRAY_SIZE(thermal_resources),
+ .resources = thermal_resources,
+ }, {
+ .name = "chtdc_ti_pwrsrc",
+ .num_resources = ARRAY_SIZE(pwrsrc_resources),
+ .resources = pwrsrc_resources,
+ }, {
+ .name = "chtdc_ti_battery",
+ .num_resources = ARRAY_SIZE(battery_resources),
+ .resources = battery_resources,
+ },
+ { .name = "chtdc_ti_region", },
+};
+
+static const struct regmap_config chtdc_ti_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 128,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_irq chtdc_ti_irqs[] = {
+ REGMAP_IRQ_REG(CHTDC_TI_PWRBTN, 0, BIT(CHTDC_TI_PWRBTN)),
+ REGMAP_IRQ_REG(CHTDC_TI_DIETMPWARN, 0, BIT(CHTDC_TI_DIETMPWARN)),
+ REGMAP_IRQ_REG(CHTDC_TI_ADCCMPL, 0, BIT(CHTDC_TI_ADCCMPL)),
+ REGMAP_IRQ_REG(CHTDC_TI_VBATLOW, 0, BIT(CHTDC_TI_VBATLOW)),
+ REGMAP_IRQ_REG(CHTDC_TI_VBUSDET, 0, BIT(CHTDC_TI_VBUSDET)),
+ REGMAP_IRQ_REG(CHTDC_TI_CCEOCAL, 0, BIT(CHTDC_TI_CCEOCAL)),
+};
+
+static const struct regmap_irq_chip chtdc_ti_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .irqs = chtdc_ti_irqs,
+ .num_irqs = ARRAY_SIZE(chtdc_ti_irqs),
+ .num_regs = 1,
+ .status_base = CHTDC_TI_IRQLVL1,
+ .mask_base = CHTDC_TI_MASK_IRQLVL1,
+ .ack_base = CHTDC_TI_IRQLVL1,
+};
+
+static int chtdc_ti_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct intel_soc_pmic *pmic;
+ int ret;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, pmic);
+
+ pmic->regmap = devm_regmap_init_i2c(i2c, &chtdc_ti_regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return PTR_ERR(pmic->regmap);
+ pmic->irq = i2c->irq;
+
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
+ IRQF_ONESHOT, 0,
+ &chtdc_ti_irq_chip,
+ &pmic->irq_chip_data);
+ if (ret)
+ return ret;
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, chtdc_ti_dev,
+ ARRAY_SIZE(chtdc_ti_dev), NULL, 0,
+ regmap_irq_get_domain(pmic->irq_chip_data));
+}
+
+static void chtdc_ti_shutdown(struct i2c_client *i2c)
+{
+ struct intel_soc_pmic *pmic = i2c_get_clientdata(i2c);
+
+ disable_irq(pmic->irq);
+}
+
+static int __maybe_unused chtdc_ti_suspend(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ disable_irq(pmic->irq);
+
+ return 0;
+}
+
+static int __maybe_unused chtdc_ti_resume(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ enable_irq(pmic->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(chtdc_ti_pm_ops, chtdc_ti_suspend, chtdc_ti_resume);
+
+static const struct acpi_device_id chtdc_ti_acpi_ids[] = {
+ { "INT33F5" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, chtdc_ti_acpi_ids);
+
+static struct i2c_driver chtdc_ti_i2c_driver = {
+ .driver = {
+ .name = "intel_soc_pmic_chtdc_ti",
+ .pm = &chtdc_ti_pm_ops,
+ .acpi_match_table = chtdc_ti_acpi_ids,
+ },
+ .probe_new = chtdc_ti_probe,
+ .shutdown = chtdc_ti_shutdown,
+};
+module_i2c_driver(chtdc_ti_i2c_driver);
+
+MODULE_DESCRIPTION("I2C driver for Intel SoC Dollar Cove TI PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 450ae36645aa..cf1120abbf52 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -522,6 +522,7 @@ static struct lpc_ich_info lpc_chipset_info[] = {
.name = "Avoton SoC",
.iTCO_version = 3,
.gpio_version = AVOTON_GPIO,
+ .spi_type = INTEL_SPI_BYT,
},
[LPC_BAYTRAIL] = {
.name = "Bay Trail SoC",
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 662ae0d9e334..1c05ea0cba61 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -48,7 +48,10 @@ static const struct mfd_cell max77693_devs[] = {
.name = "max77693-charger",
.of_compatible = "maxim,max77693-charger",
},
- { .name = "max77693-muic", },
+ {
+ .name = "max77693-muic",
+ .of_compatible = "maxim,max77693-muic",
+ },
{
.name = "max77693-haptic",
.of_compatible = "maxim,max77693-haptic",
diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c
index 630bd19b2c0a..98e732a7ae96 100644
--- a/drivers/mfd/mxs-lradc.c
+++ b/drivers/mfd/mxs-lradc.c
@@ -196,8 +196,10 @@ static int mxs_lradc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lradc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOMEM;
+ if (!res) {
+ ret = -ENOMEM;
+ goto err_clk;
+ }
switch (lradc->soc) {
case IMX23_LRADC:
diff --git a/drivers/mfd/rts5249.c b/drivers/mfd/rts5249.c
index 40f8bb14fc59..7fcf37ba922c 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/mfd/rts5249.c
@@ -103,8 +103,64 @@ static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
}
+static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+ u32 lval;
+
+ if (CHK_PCI_PID(pcr, PID_524A))
+ rtsx_pci_read_config_dword(pcr,
+ PCR_ASPM_SETTING_REG1, &lval);
+ else
+ rtsx_pci_read_config_dword(pcr,
+ PCR_ASPM_SETTING_REG2, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+}
+
+static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+
+ return 0;
+}
+
static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ rts5249_init_from_cfg(pcr);
+ rts5249_init_from_hw(pcr);
+
rtsx_pci_init_cmd(pcr);
/* Rest L1SUB Config */
@@ -125,7 +181,18 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
- return rtsx_pci_send_cmd(pcr, 100);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
}
static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
@@ -285,6 +352,31 @@ static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
return rtsx_pci_send_cmd(pcr, 100);
}
+static void rts5249_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr,
+ pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ if (!enable)
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
static const struct pcr_ops rts5249_pcr_ops = {
.fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.extra_init_hw = rts5249_extra_init_hw,
@@ -297,6 +389,7 @@ static const struct pcr_ops rts5249_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
+ .set_aspm = rts5249_set_aspm,
};
/* SD Pull Control Enable:
@@ -353,6 +446,8 @@ static const u32 rts5249_ms_pull_ctl_disable_tbl[] = {
void rts5249_init_params(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &(pcr->option);
+
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 2;
pcr->ops = &rts5249_pcr_ops;
@@ -372,6 +467,20 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
pcr->ms_pull_ctl_disable_tbl = rts5249_ms_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = PM_CTRL3;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* Init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
+ option->ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF;
}
static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val)
@@ -459,6 +568,40 @@ static int rts524a_extra_init_hw(struct rtsx_pcr *pcr)
return 0;
}
+static void rts5250_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* Run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* L1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
static const struct pcr_ops rts524a_pcr_ops = {
.write_phy = rts524a_write_phy,
.read_phy = rts524a_read_phy,
@@ -473,11 +616,16 @@ static const struct pcr_ops rts524a_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
+ .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
+ .set_aspm = rts5249_set_aspm,
};
void rts524a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ pcr->option.ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts524a_pcr_ops;
@@ -576,11 +724,16 @@ static const struct pcr_ops rts525a_pcr_ops = {
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rts525a_switch_output_voltage,
.force_power_down = rtsx_base_force_power_down,
+ .set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
+ .set_aspm = rts5249_set_aspm,
};
void rts525a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
+ pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ pcr->option.ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 3cf69e5c5703..590fb9aad77d 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -79,6 +79,96 @@ static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
0xFC, 0);
}
+int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+{
+ rtsx_pci_write_register(pcr, MSGTXDATA0,
+ MASK_8_BIT_DEF, (u8) (latency & 0xFF));
+ rtsx_pci_write_register(pcr, MSGTXDATA1,
+ MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
+ rtsx_pci_write_register(pcr, MSGTXDATA2,
+ MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
+ rtsx_pci_write_register(pcr, MSGTXDATA3,
+ MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
+ rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
+ LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
+
+ return 0;
+}
+
+int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
+{
+ if (pcr->ops->set_ltr_latency)
+ return pcr->ops->set_ltr_latency(pcr, latency);
+ else
+ return rtsx_comm_set_ltr_latency(pcr, latency);
+}
+
+static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ rtsx_pci_enable_aspm(pcr);
+ else
+ rtsx_pci_disable_aspm(pcr);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK;
+ u8 val = 0;
+
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
+static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->set_aspm)
+ pcr->ops->set_aspm(pcr, false);
+ else
+ rtsx_comm_set_aspm(pcr, false);
+}
+
+int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
+{
+ rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
+
+ return 0;
+}
+
+void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
+{
+ if (pcr->ops->set_l1off_cfg_sub_d0)
+ pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
+}
+
+static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rtsx_disable_aspm(pcr);
+
+ if (option->ltr_enabled)
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+
+ if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
+ rtsx_set_l1off_sub_cfg_d0(pcr, 1);
+}
+
+void rtsx_pm_full_on(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->full_on)
+ pcr->ops->full_on(pcr);
+ else
+ rtsx_comm_pm_full_on(pcr);
+}
+
void rtsx_pci_start_run(struct rtsx_pcr *pcr)
{
/* If pci device removed, don't queue idle work any more */
@@ -89,9 +179,7 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr)
pcr->state = PDEV_STAT_RUN;
if (pcr->ops->enable_auto_blink)
pcr->ops->enable_auto_blink(pcr);
-
- if (pcr->aspm_en)
- rtsx_pci_disable_aspm(pcr);
+ rtsx_pm_full_on(pcr);
}
mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
@@ -958,6 +1046,41 @@ static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
return 0;
}
+static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->set_aspm)
+ pcr->ops->set_aspm(pcr, true);
+ else
+ rtsx_comm_set_aspm(pcr, true);
+}
+
+static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ltr_enabled) {
+ u32 latency = option->ltr_l1off_latency;
+
+ if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
+ mdelay(option->l1_snooze_delay);
+
+ rtsx_set_ltr_latency(pcr, latency);
+ }
+
+ if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
+ rtsx_set_l1off_sub_cfg_d0(pcr, 0);
+
+ rtsx_enable_aspm(pcr);
+}
+
+void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->power_saving)
+ pcr->ops->power_saving(pcr);
+ else
+ rtsx_comm_pm_power_saving(pcr);
+}
+
static void rtsx_pci_idle_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -974,8 +1097,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
- if (pcr->aspm_en)
- rtsx_pci_enable_aspm(pcr);
+ rtsx_pm_power_saving(pcr);
mutex_unlock(&pcr->pcr_mutex);
}
@@ -1063,6 +1185,16 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
if (err < 0)
return err;
+ switch (PCI_PID(pcr)) {
+ case PID_5250:
+ case PID_524A:
+ case PID_525A:
+ rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
+ break;
+ default:
+ break;
+ }
+
/* Enable clk_request_n to enable clock power management */
rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
/* Enter L1 when host tx idle */
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/mfd/rtsx_pcr.h
index 931d1ae3ce32..ec784e04fe20 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/mfd/rtsx_pcr.h
@@ -32,6 +32,18 @@
#define RTS524A_PME_FORCE_CTL 0xFF78
#define RTS524A_PM_CTRL3 0xFF7E
+#define LTR_ACTIVE_LATENCY_DEF 0x883C
+#define LTR_IDLE_LATENCY_DEF 0x892C
+#define LTR_L1OFF_LATENCY_DEF 0x9003
+#define L1_SNOOZE_DELAY_DEF 1
+#define LTR_L1OFF_SSPWRGATE_5249_DEF 0xAF
+#define LTR_L1OFF_SSPWRGATE_5250_DEF 0xFF
+#define LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF 0xAC
+#define LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF 0xF8
+#define CMD_TIMEOUT_DEF 100
+#define ASPM_MASK_NEG 0xFC
+#define MASK_8_BIT_DEF 0xFF
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
@@ -85,5 +97,7 @@ do { \
/* generic operations */
int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
+int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
+int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
#endif
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index 691dab791f7a..59d61b04c197 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -40,9 +40,9 @@ static const struct mfd_cell rtsx_usb_cells[] = {
},
};
-static void rtsx_usb_sg_timed_out(unsigned long data)
+static void rtsx_usb_sg_timed_out(struct timer_list *t)
{
- struct rtsx_ucr *ucr = (struct rtsx_ucr *)data;
+ struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
usb_sg_cancel(&ucr->current_sg);
@@ -663,7 +663,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
goto out_init_fail;
/* initialize USB SG transfer timer */
- setup_timer(&ucr->sg_timer, rtsx_usb_sg_timed_out, (unsigned long) ucr);
+ timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0);
ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
ARRAY_SIZE(rtsx_usb_cells));
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 40534352e574..ad774161a22d 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/i2c-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/slab.h>
#include <linux/sm501.h>
@@ -1107,14 +1108,6 @@ static void sm501_gpio_remove(struct sm501_devdata *sm)
kfree(gpio->regs_res);
}
-static inline int sm501_gpio_pin2nr(struct sm501_devdata *sm, unsigned int pin)
-{
- struct sm501_gpio *gpio = &sm->gpio;
- int base = (pin < 32) ? gpio->low.gpio.base : gpio->high.gpio.base;
-
- return (pin % 32) + base;
-}
-
static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
{
return sm->gpio.registered;
@@ -1129,11 +1122,6 @@ static inline void sm501_gpio_remove(struct sm501_devdata *sm)
{
}
-static inline int sm501_gpio_pin2nr(struct sm501_devdata *sm, unsigned int pin)
-{
- return -1;
-}
-
static inline int sm501_gpio_isregistered(struct sm501_devdata *sm)
{
return 0;
@@ -1145,20 +1133,37 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
{
struct i2c_gpio_platform_data *icd;
struct platform_device *pdev;
+ struct gpiod_lookup_table *lookup;
pdev = sm501_create_subdev(sm, "i2c-gpio", 0,
sizeof(struct i2c_gpio_platform_data));
if (!pdev)
return -ENOMEM;
- icd = dev_get_platdata(&pdev->dev);
-
- /* We keep the pin_sda and pin_scl fields relative in case the
- * same platform data is passed to >1 SM501.
- */
+ /* Create a gpiod lookup using gpiochip-local offsets */
+ lookup = devm_kzalloc(&pdev->dev,
+ sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
+ GFP_KERNEL);
+ lookup->dev_id = "i2c-gpio";
+ if (iic->pin_sda < 32)
+ lookup->table[0].chip_label = "SM501-LOW";
+ else
+ lookup->table[0].chip_label = "SM501-HIGH";
+ lookup->table[0].chip_hwnum = iic->pin_sda % 32;
+ lookup->table[0].con_id = NULL;
+ lookup->table[0].idx = 0;
+ lookup->table[0].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN;
+ if (iic->pin_scl < 32)
+ lookup->table[1].chip_label = "SM501-LOW";
+ else
+ lookup->table[1].chip_label = "SM501-HIGH";
+ lookup->table[1].chip_hwnum = iic->pin_scl % 32;
+ lookup->table[1].con_id = NULL;
+ lookup->table[1].idx = 1;
+ lookup->table[1].flags = GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN;
+ gpiod_add_lookup_table(lookup);
- icd->sda_pin = sm501_gpio_pin2nr(sm, iic->pin_sda);
- icd->scl_pin = sm501_gpio_pin2nr(sm, iic->pin_scl);
+ icd = dev_get_platdata(&pdev->dev);
icd->timeout = iic->timeout;
icd->udelay = iic->udelay;
@@ -1170,9 +1175,9 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm,
pdev->id = iic->bus_num;
- dev_info(sm->dev, "registering i2c-%d: sda=%d (%d), scl=%d (%d)\n",
+ dev_info(sm->dev, "registering i2c-%d: sda=%d, scl=%d\n",
iic->bus_num,
- icd->sda_pin, iic->pin_sda, icd->scl_pin, iic->pin_scl);
+ iic->pin_sda, iic->pin_scl);
return sm501_register_device(sm, pdev);
}
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
new file mode 100644
index 000000000000..56a4782f0569
--- /dev/null
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#define SPRD_PMIC_INT_MASK_STATUS 0x0
+#define SPRD_PMIC_INT_RAW_STATUS 0x4
+#define SPRD_PMIC_INT_EN 0x8
+
+#define SPRD_SC2731_IRQ_BASE 0x140
+#define SPRD_SC2731_IRQ_NUMS 16
+
+struct sprd_pmic {
+ struct regmap *regmap;
+ struct device *dev;
+ struct regmap_irq *irqs;
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+ int irq;
+};
+
+struct sprd_pmic_data {
+ u32 irq_base;
+ u32 num_irqs;
+};
+
+/*
+ * Since different PMICs of SC27xx series can have different interrupt
+ * base address and irq number, we should save irq number and irq base
+ * in the device data structure.
+ */
+static const struct sprd_pmic_data sc2731_data = {
+ .irq_base = SPRD_SC2731_IRQ_BASE,
+ .num_irqs = SPRD_SC2731_IRQ_NUMS,
+};
+
+static const struct mfd_cell sprd_pmic_devs[] = {
+ {
+ .name = "sc27xx-wdt",
+ .of_compatible = "sprd,sc27xx-wdt",
+ }, {
+ .name = "sc27xx-rtc",
+ .of_compatible = "sprd,sc27xx-rtc",
+ }, {
+ .name = "sc27xx-charger",
+ .of_compatible = "sprd,sc27xx-charger",
+ }, {
+ .name = "sc27xx-chg-timer",
+ .of_compatible = "sprd,sc27xx-chg-timer",
+ }, {
+ .name = "sc27xx-fast-chg",
+ .of_compatible = "sprd,sc27xx-fast-chg",
+ }, {
+ .name = "sc27xx-chg-wdt",
+ .of_compatible = "sprd,sc27xx-chg-wdt",
+ }, {
+ .name = "sc27xx-typec",
+ .of_compatible = "sprd,sc27xx-typec",
+ }, {
+ .name = "sc27xx-flash",
+ .of_compatible = "sprd,sc27xx-flash",
+ }, {
+ .name = "sc27xx-eic",
+ .of_compatible = "sprd,sc27xx-eic",
+ }, {
+ .name = "sc27xx-efuse",
+ .of_compatible = "sprd,sc27xx-efuse",
+ }, {
+ .name = "sc27xx-thermal",
+ .of_compatible = "sprd,sc27xx-thermal",
+ }, {
+ .name = "sc27xx-adc",
+ .of_compatible = "sprd,sc27xx-adc",
+ }, {
+ .name = "sc27xx-audio-codec",
+ .of_compatible = "sprd,sc27xx-audio-codec",
+ }, {
+ .name = "sc27xx-regulator",
+ .of_compatible = "sprd,sc27xx-regulator",
+ }, {
+ .name = "sc27xx-vibrator",
+ .of_compatible = "sprd,sc27xx-vibrator",
+ }, {
+ .name = "sc27xx-keypad-led",
+ .of_compatible = "sprd,sc27xx-keypad-led",
+ }, {
+ .name = "sc27xx-bltc",
+ .of_compatible = "sprd,sc27xx-bltc",
+ }, {
+ .name = "sc27xx-fgu",
+ .of_compatible = "sprd,sc27xx-fgu",
+ }, {
+ .name = "sc27xx-7sreset",
+ .of_compatible = "sprd,sc27xx-7sreset",
+ }, {
+ .name = "sc27xx-poweroff",
+ .of_compatible = "sprd,sc27xx-poweroff",
+ },
+};
+
+static int sprd_pmic_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write(spi, data, count);
+}
+
+static int sprd_pmic_spi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u32 rx_buf[2] = { 0 };
+ int ret;
+
+ /* Now we only support one PMIC register to read every time. */
+ if (reg_size != sizeof(u32) || val_size != sizeof(u32))
+ return -EINVAL;
+
+ /* Copy address to read from into first element of SPI buffer. */
+ memcpy(rx_buf, reg, sizeof(u32));
+ ret = spi_read(spi, rx_buf, 1);
+ if (ret < 0)
+ return ret;
+
+ memcpy(val, rx_buf, val_size);
+ return 0;
+}
+
+static struct regmap_bus sprd_pmic_regmap = {
+ .write = sprd_pmic_spi_write,
+ .read = sprd_pmic_spi_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static const struct regmap_config sprd_pmic_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xffff,
+};
+
+static int sprd_pmic_probe(struct spi_device *spi)
+{
+ struct sprd_pmic *ddata;
+ const struct sprd_pmic_data *pdata;
+ int ret, i;
+
+ pdata = of_device_get_match_data(&spi->dev);
+ if (!pdata) {
+ dev_err(&spi->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->regmap = devm_regmap_init(&spi->dev, &sprd_pmic_regmap,
+ &spi->dev, &sprd_pmic_config);
+ if (IS_ERR(ddata->regmap)) {
+ ret = PTR_ERR(ddata->regmap);
+ dev_err(&spi->dev, "Failed to allocate register map %d\n", ret);
+ return ret;
+ }
+
+ spi_set_drvdata(spi, ddata);
+ ddata->dev = &spi->dev;
+ ddata->irq = spi->irq;
+
+ ddata->irq_chip.name = dev_name(&spi->dev);
+ ddata->irq_chip.status_base =
+ pdata->irq_base + SPRD_PMIC_INT_MASK_STATUS;
+ ddata->irq_chip.mask_base = pdata->irq_base + SPRD_PMIC_INT_EN;
+ ddata->irq_chip.ack_base = 0;
+ ddata->irq_chip.num_regs = 1;
+ ddata->irq_chip.num_irqs = pdata->num_irqs;
+ ddata->irq_chip.mask_invert = true;
+
+ ddata->irqs = devm_kzalloc(&spi->dev, sizeof(struct regmap_irq) *
+ pdata->num_irqs, GFP_KERNEL);
+ if (!ddata->irqs)
+ return -ENOMEM;
+
+ ddata->irq_chip.irqs = ddata->irqs;
+ for (i = 0; i < pdata->num_irqs; i++) {
+ ddata->irqs[i].reg_offset = i / pdata->num_irqs;
+ ddata->irqs[i].mask = BIT(i % pdata->num_irqs);
+ }
+
+ ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
+ &ddata->irq_chip, &ddata->irq_data);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_mfd_add_devices(&spi->dev, PLATFORM_DEVID_AUTO,
+ sprd_pmic_devs, ARRAY_SIZE(sprd_pmic_devs),
+ NULL, 0,
+ regmap_irq_get_domain(ddata->irq_data));
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register device %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sprd_pmic_match[] = {
+ { .compatible = "sprd,sc2731", .data = &sc2731_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sprd_pmic_match);
+
+static struct spi_driver sprd_pmic_driver = {
+ .driver = {
+ .name = "sc27xx-pmic",
+ .bus = &spi_bus_type,
+ .of_match_table = sprd_pmic_match,
+ },
+ .probe = sprd_pmic_probe,
+};
+
+static int __init sprd_pmic_init(void)
+{
+ return spi_register_driver(&sprd_pmic_driver);
+}
+subsys_initcall(sprd_pmic_init);
+
+static void __exit sprd_pmic_exit(void)
+{
+ spi_unregister_driver(&sprd_pmic_driver);
+}
+module_exit(sprd_pmic_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Spreadtrum SC27xx PMICs driver");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index 27986f641f7d..36b96fee4ce6 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -314,7 +314,7 @@ static int ssbi_probe(struct platform_device *pdev)
spin_lock_init(&ssbi->lock);
- return of_platform_populate(np, NULL, NULL, &pdev->dev);
+ return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id ssbi_match_table[] = {
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index ab949eaca6ad..3cc80956260e 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -72,10 +72,12 @@ static int stw481x_get_pctl_reg(struct stw481x *stw481x, u8 reg)
static int stw481x_startup(struct stw481x *stw481x)
{
/* Voltages multiplied by 100 */
- u8 vcore_val[] = { 100, 105, 110, 115, 120, 122, 124, 126, 128,
- 130, 132, 134, 136, 138, 140, 145 };
- u8 vpll_val[] = { 105, 120, 130, 180 };
- u8 vaux_val[] = { 15, 18, 25, 28 };
+ static const u8 vcore_val[] = {
+ 100, 105, 110, 115, 120, 122, 124, 126, 128,
+ 130, 132, 134, 136, 138, 140, 145
+ };
+ static const u8 vpll_val[] = { 105, 120, 130, 180 };
+ static const u8 vaux_val[] = { 15, 18, 25, 28 };
u8 vcore;
u8 vcore_slp;
u8 vpll;
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index f769c7d4e335..7566ce4457a0 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -311,37 +311,20 @@ static const struct regmap_config tps65217_regmap_config = {
};
static const struct of_device_id tps65217_of_match[] = {
- { .compatible = "ti,tps65217", .data = (void *)TPS65217 },
+ { .compatible = "ti,tps65217"},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, tps65217_of_match);
-static int tps65217_probe(struct i2c_client *client,
- const struct i2c_device_id *ids)
+static int tps65217_probe(struct i2c_client *client)
{
struct tps65217 *tps;
unsigned int version;
- unsigned long chip_id = ids->driver_data;
- const struct of_device_id *match;
bool status_off = false;
int ret;
- if (client->dev.of_node) {
- match = of_match_device(tps65217_of_match, &client->dev);
- if (!match) {
- dev_err(&client->dev,
- "Failed to find matching dt id\n");
- return -EINVAL;
- }
- chip_id = (unsigned long)match->data;
- status_off = of_property_read_bool(client->dev.of_node,
- "ti,pmic-shutdown-controller");
- }
-
- if (!chip_id) {
- dev_err(&client->dev, "id is null.\n");
- return -ENODEV;
- }
+ status_off = of_property_read_bool(client->dev.of_node,
+ "ti,pmic-shutdown-controller");
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -349,7 +332,6 @@ static int tps65217_probe(struct i2c_client *client,
i2c_set_clientdata(client, tps);
tps->dev = &client->dev;
- tps->id = chip_id;
tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
if (IS_ERR(tps->regmap)) {
@@ -430,7 +412,7 @@ static struct i2c_driver tps65217_driver = {
.of_match_table = tps65217_of_match,
},
.id_table = tps65217_id_table,
- .probe = tps65217_probe,
+ .probe_new = tps65217_probe,
.remove = tps65217_remove,
};
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 13834a0d2817..910f569ff77c 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -215,17 +215,9 @@ static int tps65218_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
struct tps65218 *tps;
- const struct of_device_id *match;
int ret;
unsigned int chipid;
- match = of_match_device(of_tps65218_match_table, &client->dev);
- if (!match) {
- dev_err(&client->dev,
- "Failed to find matching dt id\n");
- return -EINVAL;
- }
-
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
diff --git a/drivers/mfd/twl-core.h b/drivers/mfd/twl-core.h
index 6ff99dce714f..6f96c2009a9f 100644
--- a/drivers/mfd/twl-core.h
+++ b/drivers/mfd/twl-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __TWL_CORE_H__
#define __TWL_CORE_H__
diff --git a/drivers/mfd/wm97xx-core.c b/drivers/mfd/wm97xx-core.c
new file mode 100644
index 000000000000..4141ee52a70b
--- /dev/null
+++ b/drivers/mfd/wm97xx-core.c
@@ -0,0 +1,366 @@
+/*
+ * Wolfson WM97xx -- Core device
+ *
+ * Copyright (C) 2017 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Features:
+ * - an AC97 audio codec
+ * - a touchscreen driver
+ * - a GPIO block
+ */
+
+#include <linux/device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/wm97xx.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/wm97xx.h>
+#include <sound/ac97/codec.h>
+#include <sound/ac97/compat.h>
+
+#define WM9705_VENDOR_ID 0x574d4c05
+#define WM9712_VENDOR_ID 0x574d4c12
+#define WM9713_VENDOR_ID 0x574d4c13
+#define WM97xx_VENDOR_ID_MASK 0xffffffff
+
+struct wm97xx_priv {
+ struct regmap *regmap;
+ struct snd_ac97 *ac97;
+ struct device *dev;
+ struct wm97xx_platform_data codec_pdata;
+};
+
+static bool wm97xx_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_RESET ... AC97_PCM_SURR_DAC_RATE:
+ case AC97_PCM_LR_ADC_RATE:
+ case AC97_CENTER_LFE_MASTER:
+ case AC97_SPDIF ... AC97_LINE1_LEVEL:
+ case AC97_GPIO_CFG ... 0x5c:
+ case AC97_CODEC_CLASS_REV ... AC97_PCI_SID:
+ case 0x74 ... AC97_VENDOR_ID2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool wm97xx_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_VENDOR_ID1:
+ case AC97_VENDOR_ID2:
+ return false;
+ default:
+ return wm97xx_readable_reg(dev, reg);
+ }
+}
+
+static const struct reg_default wm9705_reg_defaults[] = {
+ { 0x02, 0x8000 },
+ { 0x04, 0x8000 },
+ { 0x06, 0x8000 },
+ { 0x0a, 0x8000 },
+ { 0x0c, 0x8008 },
+ { 0x0e, 0x8008 },
+ { 0x10, 0x8808 },
+ { 0x12, 0x8808 },
+ { 0x14, 0x8808 },
+ { 0x16, 0x8808 },
+ { 0x18, 0x8808 },
+ { 0x1a, 0x0000 },
+ { 0x1c, 0x8000 },
+ { 0x20, 0x0000 },
+ { 0x22, 0x0000 },
+ { 0x26, 0x000f },
+ { 0x28, 0x0605 },
+ { 0x2a, 0x0000 },
+ { 0x2c, 0xbb80 },
+ { 0x32, 0xbb80 },
+ { 0x34, 0x2000 },
+ { 0x5a, 0x0000 },
+ { 0x5c, 0x0000 },
+ { 0x72, 0x0808 },
+ { 0x74, 0x0000 },
+ { 0x76, 0x0006 },
+ { 0x78, 0x0000 },
+ { 0x7a, 0x0000 },
+};
+
+static const struct regmap_config wm9705_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x7e,
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm9705_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm9705_reg_defaults),
+ .volatile_reg = regmap_ac97_default_volatile,
+ .readable_reg = wm97xx_readable_reg,
+ .writeable_reg = wm97xx_writeable_reg,
+};
+
+static struct mfd_cell wm9705_cells[] = {
+ { .name = "wm9705-codec", },
+ { .name = "wm97xx-ts", },
+};
+
+static bool wm9712_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_REC_GAIN:
+ return true;
+ default:
+ return regmap_ac97_default_volatile(dev, reg);
+ }
+}
+
+static const struct reg_default wm9712_reg_defaults[] = {
+ { 0x02, 0x8000 },
+ { 0x04, 0x8000 },
+ { 0x06, 0x8000 },
+ { 0x08, 0x0f0f },
+ { 0x0a, 0xaaa0 },
+ { 0x0c, 0xc008 },
+ { 0x0e, 0x6808 },
+ { 0x10, 0xe808 },
+ { 0x12, 0xaaa0 },
+ { 0x14, 0xad00 },
+ { 0x16, 0x8000 },
+ { 0x18, 0xe808 },
+ { 0x1a, 0x3000 },
+ { 0x1c, 0x8000 },
+ { 0x20, 0x0000 },
+ { 0x22, 0x0000 },
+ { 0x26, 0x000f },
+ { 0x28, 0x0605 },
+ { 0x2a, 0x0410 },
+ { 0x2c, 0xbb80 },
+ { 0x2e, 0xbb80 },
+ { 0x32, 0xbb80 },
+ { 0x34, 0x2000 },
+ { 0x4c, 0xf83e },
+ { 0x4e, 0xffff },
+ { 0x50, 0x0000 },
+ { 0x52, 0x0000 },
+ { 0x56, 0xf83e },
+ { 0x58, 0x0008 },
+ { 0x5c, 0x0000 },
+ { 0x60, 0xb032 },
+ { 0x62, 0x3e00 },
+ { 0x64, 0x0000 },
+ { 0x76, 0x0006 },
+ { 0x78, 0x0001 },
+ { 0x7a, 0x0000 },
+};
+
+static const struct regmap_config wm9712_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x7e,
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm9712_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm9712_reg_defaults),
+ .volatile_reg = wm9712_volatile_reg,
+ .readable_reg = wm97xx_readable_reg,
+ .writeable_reg = wm97xx_writeable_reg,
+};
+
+static struct mfd_cell wm9712_cells[] = {
+ { .name = "wm9712-codec", },
+ { .name = "wm97xx-ts", },
+};
+
+static const struct reg_default wm9713_reg_defaults[] = {
+ { 0x02, 0x8080 }, /* Speaker Output Volume */
+ { 0x04, 0x8080 }, /* Headphone Output Volume */
+ { 0x06, 0x8080 }, /* Out3/OUT4 Volume */
+ { 0x08, 0xc880 }, /* Mono Volume */
+ { 0x0a, 0xe808 }, /* LINEIN Volume */
+ { 0x0c, 0xe808 }, /* DAC PGA Volume */
+ { 0x0e, 0x0808 }, /* MIC PGA Volume */
+ { 0x10, 0x00da }, /* MIC Routing Control */
+ { 0x12, 0x8000 }, /* Record PGA Volume */
+ { 0x14, 0xd600 }, /* Record Routing */
+ { 0x16, 0xaaa0 }, /* PCBEEP Volume */
+ { 0x18, 0xaaa0 }, /* VxDAC Volume */
+ { 0x1a, 0xaaa0 }, /* AUXDAC Volume */
+ { 0x1c, 0x0000 }, /* Output PGA Mux */
+ { 0x1e, 0x0000 }, /* DAC 3D control */
+ { 0x20, 0x0f0f }, /* DAC Tone Control*/
+ { 0x22, 0x0040 }, /* MIC Input Select & Bias */
+ { 0x24, 0x0000 }, /* Output Volume Mapping & Jack */
+ { 0x26, 0x7f00 }, /* Powerdown Ctrl/Stat*/
+ { 0x28, 0x0405 }, /* Extended Audio ID */
+ { 0x2a, 0x0410 }, /* Extended Audio Start/Ctrl */
+ { 0x2c, 0xbb80 }, /* Audio DACs Sample Rate */
+ { 0x2e, 0xbb80 }, /* AUXDAC Sample Rate */
+ { 0x32, 0xbb80 }, /* Audio ADCs Sample Rate */
+ { 0x36, 0x4523 }, /* PCM codec control */
+ { 0x3a, 0x2000 }, /* SPDIF control */
+ { 0x3c, 0xfdff }, /* Powerdown 1 */
+ { 0x3e, 0xffff }, /* Powerdown 2 */
+ { 0x40, 0x0000 }, /* General Purpose */
+ { 0x42, 0x0000 }, /* Fast Power-Up Control */
+ { 0x44, 0x0080 }, /* MCLK/PLL Control */
+ { 0x46, 0x0000 }, /* MCLK/PLL Control */
+
+ { 0x4c, 0xfffe }, /* GPIO Pin Configuration */
+ { 0x4e, 0xffff }, /* GPIO Pin Polarity / Type */
+ { 0x50, 0x0000 }, /* GPIO Pin Sticky */
+ { 0x52, 0x0000 }, /* GPIO Pin Wake-Up */
+ /* GPIO Pin Status */
+ { 0x56, 0xfffe }, /* GPIO Pin Sharing */
+ { 0x58, 0x4000 }, /* GPIO PullUp/PullDown */
+ { 0x5a, 0x0000 }, /* Additional Functions 1 */
+ { 0x5c, 0x0000 }, /* Additional Functions 2 */
+ { 0x60, 0xb032 }, /* ALC Control */
+ { 0x62, 0x3e00 }, /* ALC / Noise Gate Control */
+ { 0x64, 0x0000 }, /* AUXDAC input control */
+ { 0x74, 0x0000 }, /* Digitiser Reg 1 */
+ { 0x76, 0x0006 }, /* Digitiser Reg 2 */
+ { 0x78, 0x0001 }, /* Digitiser Reg 3 */
+ { 0x7a, 0x0000 }, /* Digitiser Read Back */
+};
+
+static const struct regmap_config wm9713_regmap_config = {
+ .reg_bits = 16,
+ .reg_stride = 2,
+ .val_bits = 16,
+ .max_register = 0x7e,
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm9713_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm9713_reg_defaults),
+ .volatile_reg = regmap_ac97_default_volatile,
+ .readable_reg = wm97xx_readable_reg,
+ .writeable_reg = wm97xx_writeable_reg,
+};
+
+static struct mfd_cell wm9713_cells[] = {
+ { .name = "wm9713-codec", },
+ { .name = "wm97xx-ts", },
+};
+
+static int wm97xx_ac97_probe(struct ac97_codec_device *adev)
+{
+ struct wm97xx_priv *wm97xx;
+ const struct regmap_config *config;
+ struct wm97xx_platform_data *codec_pdata;
+ struct mfd_cell *cells;
+ int ret = -ENODEV, nb_cells, i;
+ struct wm97xx_pdata *pdata = snd_ac97_codec_get_platdata(adev);
+
+ wm97xx = devm_kzalloc(ac97_codec_dev2dev(adev),
+ sizeof(*wm97xx), GFP_KERNEL);
+ if (!wm97xx)
+ return -ENOMEM;
+
+ wm97xx->dev = ac97_codec_dev2dev(adev);
+ wm97xx->ac97 = snd_ac97_compat_alloc(adev);
+ if (IS_ERR(wm97xx->ac97))
+ return PTR_ERR(wm97xx->ac97);
+
+
+ ac97_set_drvdata(adev, wm97xx);
+ dev_info(wm97xx->dev, "wm97xx core found, id=0x%x\n",
+ adev->vendor_id);
+
+ codec_pdata = &wm97xx->codec_pdata;
+ codec_pdata->ac97 = wm97xx->ac97;
+ codec_pdata->batt_pdata = pdata->batt_pdata;
+
+ switch (adev->vendor_id) {
+ case WM9705_VENDOR_ID:
+ config = &wm9705_regmap_config;
+ cells = wm9705_cells;
+ nb_cells = ARRAY_SIZE(wm9705_cells);
+ break;
+ case WM9712_VENDOR_ID:
+ config = &wm9712_regmap_config;
+ cells = wm9712_cells;
+ nb_cells = ARRAY_SIZE(wm9712_cells);
+ break;
+ case WM9713_VENDOR_ID:
+ config = &wm9713_regmap_config;
+ cells = wm9713_cells;
+ nb_cells = ARRAY_SIZE(wm9713_cells);
+ break;
+ default:
+ goto err_free_compat;
+ }
+
+ for (i = 0; i < nb_cells; i++) {
+ cells[i].platform_data = codec_pdata;
+ cells[i].pdata_size = sizeof(*codec_pdata);
+ }
+
+ codec_pdata->regmap = devm_regmap_init_ac97(wm97xx->ac97, config);
+ if (IS_ERR(codec_pdata->regmap)) {
+ ret = PTR_ERR(codec_pdata->regmap);
+ goto err_free_compat;
+ }
+
+ ret = devm_mfd_add_devices(wm97xx->dev, PLATFORM_DEVID_NONE,
+ cells, nb_cells, NULL, 0, NULL);
+ if (ret)
+ goto err_free_compat;
+
+ return ret;
+
+err_free_compat:
+ snd_ac97_compat_release(wm97xx->ac97);
+ return ret;
+}
+
+static int wm97xx_ac97_remove(struct ac97_codec_device *adev)
+{
+ struct wm97xx_priv *wm97xx = ac97_get_drvdata(adev);
+
+ snd_ac97_compat_release(wm97xx->ac97);
+
+ return 0;
+}
+
+static const struct ac97_id wm97xx_ac97_ids[] = {
+ { .id = WM9705_VENDOR_ID, .mask = WM97xx_VENDOR_ID_MASK },
+ { .id = WM9712_VENDOR_ID, .mask = WM97xx_VENDOR_ID_MASK },
+ { .id = WM9713_VENDOR_ID, .mask = WM97xx_VENDOR_ID_MASK },
+ { }
+};
+
+static struct ac97_codec_driver wm97xx_ac97_driver = {
+ .driver = {
+ .name = "wm97xx-core",
+ },
+ .probe = wm97xx_ac97_probe,
+ .remove = wm97xx_ac97_remove,
+ .id_table = wm97xx_ac97_ids,
+};
+
+static int __init wm97xx_module_init(void)
+{
+ return snd_ac97_codec_driver_register(&wm97xx_ac97_driver);
+}
+module_init(wm97xx_module_init);
+
+static void __exit wm97xx_module_exit(void)
+{
+ snd_ac97_codec_driver_unregister(&wm97xx_ac97_driver);
+}
+module_exit(wm97xx_module_exit);
+
+MODULE_DESCRIPTION("WM9712, WM9713 core driver");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 8136dc7e863d..f1a5c2357b14 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -393,16 +393,6 @@ config SPEAR13XX_PCIE_GADGET
entry will be created for that controller. User can use these
sysfs node to configure PCIe EP as per his requirements.
-config TI_DAC7512
- tristate "Texas Instruments DAC7512"
- depends on SPI && SYSFS
- help
- If you say yes here you get support for the Texas Instruments
- DAC7512 16-bit digital-to-analog converter.
-
- This driver can also be built as a module. If so, the module
- will be called ti_dac7512.
-
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d84819dc2468..5ca5f64df478 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for misc devices that really don't fit anywhere else.
#
@@ -30,7 +31,6 @@ obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_ISL29020) += isl29020.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_DS1682) += ds1682.o
-obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
diff --git a/drivers/misc/altera-stapl/Kconfig b/drivers/misc/altera-stapl/Kconfig
index 7f01d8e93992..8a828fe41fad 100644
--- a/drivers/misc/altera-stapl/Kconfig
+++ b/drivers/misc/altera-stapl/Kconfig
@@ -1,4 +1,5 @@
-comment "Altera FPGA firmware download module"
+comment "Altera FPGA firmware download module (requires I2C)"
+ depends on !I2C
config ALTERA_STAPL
tristate "Altera FPGA firmware download module"
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 1922cb8f6b88..1c5b7aec13d4 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -15,7 +15,6 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/idr.h>
@@ -904,7 +903,6 @@ struct c2port_device *c2port_device_register(char *name,
return ERR_PTR(-EINVAL);
c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL);
- kmemcheck_annotate_bitfield(c2dev, flags);
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 0b5fd749d96d..502d41fc9ea5 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := $(call cc-disable-warning, unused-const-variable)
ccflags-$(CONFIG_PPC_WERROR) += -Werror
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index a0c44d16bf30..7c11bad5cded 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
#include "cxl.h"
@@ -331,9 +332,12 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
/* ensure this mm_struct can't be freed */
cxl_context_mm_count_get(ctx);
- /* decrement the use count */
- if (ctx->mm)
+ if (ctx->mm) {
+ /* decrement the use count from above */
mmput(ctx->mm);
+ /* make TLBIs for this context global */
+ mm_context_add_copro(ctx->mm);
+ }
}
/*
@@ -342,13 +346,19 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
*/
cxl_ctx_get();
+ /* See the comment in afu_ioctl_start_work() */
+ smp_mb();
+
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
put_pid(ctx->pid);
ctx->pid = NULL;
cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
- if (task)
+ if (task) {
cxl_context_mm_count_put(ctx);
+ if (ctx->mm)
+ mm_context_remove_copro(ctx->mm);
+ }
goto out;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 8c32040b9c09..12a41b2753f0 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/copro.h>
@@ -267,6 +268,8 @@ int __detach_context(struct cxl_context *ctx)
/* Decrease the mm count on the context */
cxl_context_mm_count_put(ctx);
+ if (ctx->mm)
+ mm_context_remove_copro(ctx->mm);
ctx->mm = NULL;
return 0;
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index b1afeccbb97f..e46a4062904a 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -100,9 +100,12 @@ static const cxl_p1_reg_t CXL_XSL_FEC = {0x0158};
static const cxl_p1_reg_t CXL_XSL_DSNCTL = {0x0168};
/* PSL registers - CAIA 2 */
static const cxl_p1_reg_t CXL_PSL9_CONTROL = {0x0020};
+static const cxl_p1_reg_t CXL_XSL9_INV = {0x0110};
+static const cxl_p1_reg_t CXL_XSL9_DBG = {0x0130};
+static const cxl_p1_reg_t CXL_XSL9_DEF = {0x0140};
static const cxl_p1_reg_t CXL_XSL9_DSNCTL = {0x0168};
static const cxl_p1_reg_t CXL_PSL9_FIR1 = {0x0300};
-static const cxl_p1_reg_t CXL_PSL9_FIR2 = {0x0308};
+static const cxl_p1_reg_t CXL_PSL9_FIR_MASK = {0x0308};
static const cxl_p1_reg_t CXL_PSL9_Timebase = {0x0310};
static const cxl_p1_reg_t CXL_PSL9_DEBUG = {0x0320};
static const cxl_p1_reg_t CXL_PSL9_FIR_CNTL = {0x0348};
@@ -112,6 +115,7 @@ static const cxl_p1_reg_t CXL_PSL9_TRACECFG = {0x0368};
static const cxl_p1_reg_t CXL_PSL9_APCDEDALLOC = {0x0378};
static const cxl_p1_reg_t CXL_PSL9_APCDEDTYPE = {0x0380};
static const cxl_p1_reg_t CXL_PSL9_TNR_ADDR = {0x0388};
+static const cxl_p1_reg_t CXL_PSL9_CTCCFG = {0x0390};
static const cxl_p1_reg_t CXL_PSL9_GP_CT = {0x0398};
static const cxl_p1_reg_t CXL_XSL9_IERAT = {0x0588};
static const cxl_p1_reg_t CXL_XSL9_ILPP = {0x0590};
@@ -414,6 +418,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
+#define CXL_PSL9_TRACEID_MAX 0xAU
+#define CXL_PSL9_TRACESTATE_FIN 0x3U
+
enum cxl_context_status {
CLOSED,
OPENED,
@@ -938,8 +945,6 @@ int cxl_debugfs_adapter_add(struct cxl *adapter);
void cxl_debugfs_adapter_remove(struct cxl *adapter);
int cxl_debugfs_afu_add(struct cxl_afu *afu);
void cxl_debugfs_afu_remove(struct cxl_afu *afu);
-void cxl_stop_trace_psl9(struct cxl *cxl);
-void cxl_stop_trace_psl8(struct cxl *cxl);
void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir);
void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir);
void cxl_debugfs_add_adapter_regs_xsl(struct cxl *adapter, struct dentry *dir);
@@ -975,14 +980,6 @@ static inline void cxl_debugfs_afu_remove(struct cxl_afu *afu)
{
}
-static inline void cxl_stop_trace_psl9(struct cxl *cxl)
-{
-}
-
-static inline void cxl_stop_trace_psl8(struct cxl *cxl)
-{
-}
-
static inline void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter,
struct dentry *dir)
{
@@ -1070,7 +1067,8 @@ u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9);
void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx);
void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx);
-void cxl_native_err_irq_dump_regs(struct cxl *adapter);
+void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter);
+void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter);
int cxl_pci_vphb_add(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
void cxl_release_mapping(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index eae9d749f967..1643850d2302 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -15,28 +15,6 @@
static struct dentry *cxl_debugfs;
-void cxl_stop_trace_psl9(struct cxl *adapter)
-{
- /* Stop the trace */
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x4480000000000000ULL);
-}
-
-void cxl_stop_trace_psl8(struct cxl *adapter)
-{
- int slice;
-
- /* Stop the trace */
- cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
-
- /* Stop the slice traces */
- spin_lock(&adapter->afu_list_lock);
- for (slice = 0; slice < adapter->slices; slice++) {
- if (adapter->afu[slice])
- cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, 0x8000000000000000LL);
- }
- spin_unlock(&adapter->afu_list_lock);
-}
-
/* Helpers to export CXL mmaped IO registers via debugfs */
static int debugfs_io_u64_get(void *data, u64 *val)
{
@@ -62,9 +40,14 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir)
{
debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1));
- debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR2));
+ debugfs_create_io_x64("fir_mask", 0400, dir,
+ _cxl_p1_addr(adapter, CXL_PSL9_FIR_MASK));
debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL));
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG));
+ debugfs_create_io_x64("debug", 0600, dir,
+ _cxl_p1_addr(adapter, CXL_PSL9_DEBUG));
+ debugfs_create_io_x64("xsl-debug", 0600, dir,
+ _cxl_p1_addr(adapter, CXL_XSL9_DBG));
}
void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir)
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index f17f72ea0545..70dbb6de102c 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -220,22 +220,11 @@ static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
{
- u64 crs; /* Translation Checkout Response Status */
-
if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM))
return true;
- if (cxl_is_power9()) {
- crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK);
- if ((crs == CXL_PSL9_DSISR_An_PF_SLR) ||
- (crs == CXL_PSL9_DSISR_An_PF_RGC) ||
- (crs == CXL_PSL9_DSISR_An_PF_RGP) ||
- (crs == CXL_PSL9_DSISR_An_PF_HRH) ||
- (crs == CXL_PSL9_DSISR_An_PF_STEG) ||
- (crs == CXL_PSL9_DSISR_An_URTCH)) {
- return true;
- }
- }
+ if (cxl_is_power9())
+ return true;
return false;
}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 4bfad9f6dc9f..76c0b0ca9388 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/copro.h>
@@ -220,9 +221,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
/* ensure this mm_struct can't be freed */
cxl_context_mm_count_get(ctx);
- /* decrement the use count */
- if (ctx->mm)
+ if (ctx->mm) {
+ /* decrement the use count from above */
mmput(ctx->mm);
+ /* make TLBIs for this context global */
+ mm_context_add_copro(ctx->mm);
+ }
/*
* Increment driver use count. Enables global TLBIs for hash
@@ -230,6 +234,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
*/
cxl_ctx_get();
+ /*
+ * A barrier is needed to make sure all TLBIs are global
+ * before we attach and the context starts being used by the
+ * adapter.
+ *
+ * Needed after mm_context_add_copro() for radix and
+ * cxl_ctx_get() for hash/p8.
+ *
+ * The barrier should really be mb(), since it involves a
+ * device. However, it's only useful when we have local
+ * vs. global TLBIs, i.e SMP=y. So keep smp_mb().
+ */
+ smp_mb();
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
@@ -240,6 +258,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = NULL;
cxl_ctx_put();
cxl_context_mm_count_put(ctx);
+ if (ctx->mm)
+ mm_context_remove_copro(ctx->mm);
goto out;
}
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 3aa216bf0939..43917898fb9a 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/semaphore.h>
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 4a82c313cf71..02b6b45b4c20 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -897,6 +897,14 @@ int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
+ ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
+ /*
+ * Ideally we should do a wmb() here to make sure the changes to the
+ * PE are visible to the card before we call afu_enable.
+ * On ppc64 though all mmios are preceded by a 'sync' instruction hence
+ * we dont dont need one here.
+ */
+
result = cxl_ops->afu_reset(afu);
if (result)
return result;
@@ -1077,13 +1085,11 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
{
- u64 fir1, fir2, serr;
+ u64 fir1, serr;
fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
- fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR2);
dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
- dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
cxl_afu_decode_psl_serr(ctx->afu, serr);
@@ -1257,14 +1263,23 @@ static irqreturn_t native_slice_irq_err(int irq, void *data)
return IRQ_HANDLED;
}
-void cxl_native_err_irq_dump_regs(struct cxl *adapter)
+void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
+{
+ u64 fir1;
+
+ fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
+ dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
+}
+
+void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
{
u64 fir1, fir2;
fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
-
- dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
+ dev_crit(&adapter->dev,
+ "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
+ fir1, fir2);
}
static irqreturn_t native_irq_err(int irq, void *data)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 3ba04f371380..bb7fd3f4edab 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -401,7 +401,8 @@ int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
*capp_unit_id = get_capp_unit_id(np, *phb_index);
of_node_put(np);
if (!*capp_unit_id) {
- pr_err("cxl: invalid capp unit id\n");
+ pr_err("cxl: invalid capp unit id (phb_index: %d)\n",
+ *phb_index);
return -ENODEV;
}
@@ -475,37 +476,37 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
psl_fircntl |= 0x1ULL; /* ce_thresh */
cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
- /* vccredits=0x1 pcklat=0x4 */
- cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0000000000001810ULL);
-
- /*
- * For debugging with trace arrays.
- * Configure RX trace 0 segmented mode.
- * Configure CT trace 0 segmented mode.
- * Configure LA0 trace 0 segmented mode.
- * Configure LA1 trace 0 segmented mode.
+ /* Setup the PSL to transmit packets on the PCIe before the
+ * CAPP is enabled
*/
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000000ULL);
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000003ULL);
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000005ULL);
- cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x8040800080000006ULL);
+ cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000002A10ULL);
/*
* A response to an ASB_Notify request is returned by the
* system as an MMIO write to the address defined in
- * the PSL_TNR_ADDR register
+ * the PSL_TNR_ADDR register.
+ * keep the Reset Value: 0x00020000E0000000
*/
- /* PSL_TNR_ADDR */
- /* NORST */
- cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x8000000000000000ULL);
+ /* Enable XSL rty limit */
+ cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL);
+
+ /* Change XSL_INV dummy read threshold */
+ cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL);
+
+ if (phb_index == 3) {
+ /* disable machines 31-47 and 20-27 for DMA */
+ cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL);
+ }
- /* allocate the apc machines */
- cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL);
+ /* Snoop machines */
+ cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
- /* Disable vc dd1 fix */
- if (cxl_is_power9_dd1())
- cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL);
+ if (cxl_is_power9_dd1()) {
+ /* Disabling deadlock counter CAR */
+ cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0020000000000001ULL);
+ } else
+ cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x4000000000000000ULL);
return 0;
}
@@ -1746,6 +1747,44 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
pci_disable_device(pdev);
}
+static void cxl_stop_trace_psl9(struct cxl *adapter)
+{
+ int traceid;
+ u64 trace_state, trace_mask;
+ struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+
+ /* read each tracearray state and issue mmio to stop them is needed */
+ for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) {
+ trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG);
+ trace_mask = (0x3ULL << (62 - traceid * 2));
+ trace_state = (trace_state & trace_mask) >> (62 - traceid * 2);
+ dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n",
+ traceid, trace_state);
+
+ /* issue mmio if the trace array isn't in FIN state */
+ if (trace_state != CXL_PSL9_TRACESTATE_FIN)
+ cxl_p1_write(adapter, CXL_PSL9_TRACECFG,
+ 0x8400000000000000ULL | traceid);
+ }
+}
+
+static void cxl_stop_trace_psl8(struct cxl *adapter)
+{
+ int slice;
+
+ /* Stop the trace */
+ cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
+
+ /* Stop the slice traces */
+ spin_lock(&adapter->afu_list_lock);
+ for (slice = 0; slice < adapter->slices; slice++) {
+ if (adapter->afu[slice])
+ cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE,
+ 0x8000000000000000LL);
+ }
+ spin_unlock(&adapter->afu_list_lock);
+}
+
static const struct cxl_service_layer_ops psl9_ops = {
.adapter_regs_init = init_implementation_adapter_regs_psl9,
.invalidate_all = cxl_invalidate_all_psl9,
@@ -1762,6 +1801,7 @@ static const struct cxl_service_layer_ops psl9_ops = {
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
+ .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9,
.debugfs_stop_trace = cxl_stop_trace_psl9,
.write_timebase_ctrl = write_timebase_ctrl_psl9,
.timebase_read = timebase_read_psl9,
@@ -1785,7 +1825,7 @@ static const struct cxl_service_layer_ops psl8_ops = {
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
- .err_irq_dump_registers = cxl_native_err_irq_dump_regs,
+ .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8,
.debugfs_stop_trace = cxl_stop_trace_psl8,
.write_timebase_ctrl = write_timebase_ctrl_psl8,
.timebase_read = timebase_read_psl8,
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 90a52624ddeb..2aab60ef3e3e 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_EEPROM_AT24) += at24.o
obj-$(CONFIG_EEPROM_AT25) += at25.o
obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 764ff5df0dbc..e0b4b36ef010 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
@@ -24,6 +25,7 @@
#include <linux/i2c.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_data/at24.h>
+#include <linux/pm_runtime.h>
/*
* I2C EEPROMs from most vendors are inexpensive and mostly interchangeable.
@@ -175,6 +177,64 @@ static const struct i2c_device_id at24_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, at24_ids);
+static const struct of_device_id at24_of_match[] = {
+ {
+ .compatible = "atmel,24c00",
+ .data = (void *)AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR)
+ },
+ {
+ .compatible = "atmel,24c01",
+ .data = (void *)AT24_DEVICE_MAGIC(1024 / 8, 0)
+ },
+ {
+ .compatible = "atmel,24c02",
+ .data = (void *)AT24_DEVICE_MAGIC(2048 / 8, 0)
+ },
+ {
+ .compatible = "atmel,spd",
+ .data = (void *)AT24_DEVICE_MAGIC(2048 / 8,
+ AT24_FLAG_READONLY | AT24_FLAG_IRUGO)
+ },
+ {
+ .compatible = "atmel,24c04",
+ .data = (void *)AT24_DEVICE_MAGIC(4096 / 8, 0)
+ },
+ {
+ .compatible = "atmel,24c08",
+ .data = (void *)AT24_DEVICE_MAGIC(8192 / 8, 0)
+ },
+ {
+ .compatible = "atmel,24c16",
+ .data = (void *)AT24_DEVICE_MAGIC(16384 / 8, 0)
+ },
+ {
+ .compatible = "atmel,24c32",
+ .data = (void *)AT24_DEVICE_MAGIC(32768 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c64",
+ .data = (void *)AT24_DEVICE_MAGIC(65536 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c128",
+ .data = (void *)AT24_DEVICE_MAGIC(131072 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c256",
+ .data = (void *)AT24_DEVICE_MAGIC(262144 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c512",
+ .data = (void *)AT24_DEVICE_MAGIC(524288 / 8, AT24_FLAG_ADDR16)
+ },
+ {
+ .compatible = "atmel,24c1024",
+ .data = (void *)AT24_DEVICE_MAGIC(1048576 / 8, AT24_FLAG_ADDR16)
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, at24_of_match);
+
static const struct acpi_device_id at24_acpi_ids[] = {
{ "INT3499", AT24_DEVICE_MAGIC(8192 / 8, 0) },
{ }
@@ -501,11 +561,21 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
static int at24_read(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
+ struct i2c_client *client;
char *buf = val;
+ int ret;
if (unlikely(!count))
return count;
+ client = at24_translate_offset(at24, &off);
+
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ return ret;
+ }
+
/*
* Read data from chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
@@ -518,6 +588,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
status = at24->read_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
+ pm_runtime_put(&client->dev);
return status;
}
buf += status;
@@ -527,17 +598,29 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
+ pm_runtime_put(&client->dev);
+
return 0;
}
static int at24_write(void *priv, unsigned int off, void *val, size_t count)
{
struct at24_data *at24 = priv;
+ struct i2c_client *client;
char *buf = val;
+ int ret;
if (unlikely(!count))
return -EINVAL;
+ client = at24_translate_offset(at24, &off);
+
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ return ret;
+ }
+
/*
* Write data to chip, protecting against concurrent updates
* from this host, but not from other I2C masters.
@@ -550,6 +633,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
status = at24->write_func(at24, buf, off, count);
if (status < 0) {
mutex_unlock(&at24->lock);
+ pm_runtime_put(&client->dev);
return status;
}
buf += status;
@@ -559,6 +643,8 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
mutex_unlock(&at24->lock);
+ pm_runtime_put(&client->dev);
+
return 0;
}
@@ -570,6 +656,10 @@ static void at24_get_pdata(struct device *dev, struct at24_platform_data *chip)
if (device_property_present(dev, "read-only"))
chip->flags |= AT24_FLAG_READONLY;
+ err = device_property_read_u32(dev, "size", &val);
+ if (!err)
+ chip->byte_len = val;
+
err = device_property_read_u32(dev, "pagesize", &val);
if (!err) {
chip->page_size = val;
@@ -598,7 +688,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (client->dev.platform_data) {
chip = *(struct at24_platform_data *)client->dev.platform_data;
} else {
- if (id) {
+ /*
+ * The I2C core allows OF nodes compatibles to match against the
+ * I2C device ID table as a fallback, so check not only if an OF
+ * node is present but also if it matches an OF device ID entry.
+ */
+ if (client->dev.of_node &&
+ of_match_device(at24_of_match, &client->dev)) {
+ magic = (kernel_ulong_t)
+ of_device_get_match_data(&client->dev);
+ } else if (id) {
magic = id->driver_data;
} else {
const struct acpi_device_id *aid;
@@ -739,11 +838,16 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, at24);
+ /* enable runtime pm */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
/*
* Perform a one-byte test read to verify that the
* chip is functional.
*/
err = at24_read(at24, 0, &test_byte, 1);
+ pm_runtime_idle(&client->dev);
if (err) {
err = -ENODEV;
goto err_clients;
@@ -791,6 +895,8 @@ err_clients:
if (at24->client[i])
i2c_unregister_device(at24->client[i]);
+ pm_runtime_disable(&client->dev);
+
return err;
}
@@ -806,6 +912,9 @@ static int at24_remove(struct i2c_client *client)
for (i = 1; i < at24->num_addresses; i++)
i2c_unregister_device(at24->client[i]);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
return 0;
}
@@ -814,6 +923,7 @@ static int at24_remove(struct i2c_client *client)
static struct i2c_driver at24_driver = {
.driver = {
.name = "at24",
+ .of_match_table = at24_of_match,
.acpi_match_table = ACPI_PTR(at24_acpi_ids),
},
.probe = at24_probe,
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 5813b5f25006..3743c87f8ab9 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -182,6 +182,7 @@ struct dma_mapping {
struct list_head card_list; /* list of usr_maps for card */
struct list_head pin_list; /* list of pinned memory for dev */
+ int write; /* writable map? useful in unmapping */
};
static inline void genwqe_mapping_init(struct dma_mapping *m,
@@ -189,6 +190,7 @@ static inline void genwqe_mapping_init(struct dma_mapping *m,
{
memset(m, 0, sizeof(*m));
m->type = type;
+ m->write = 1; /* Assume the maps we create are R/W */
}
/**
@@ -347,6 +349,7 @@ enum genwqe_requ_state {
* @user_size: size of user-space memory area
* @page: buffer for partial pages if needed
* @page_dma_addr: dma address partial pages
+ * @write: should we write it back to userspace?
*/
struct genwqe_sgl {
dma_addr_t sgl_dma_addr;
@@ -356,6 +359,8 @@ struct genwqe_sgl {
void __user *user_addr; /* user-space base-address */
size_t user_size; /* size of memory area */
+ int write;
+
unsigned long nr_pages;
unsigned long fpage_offs;
size_t fpage_size;
@@ -369,7 +374,7 @@ struct genwqe_sgl {
};
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size);
+ void __user *user_addr, size_t user_size, int write);
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
dma_addr_t *dma_list);
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index dd4617764f14..3ecfa35457e0 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -942,6 +942,10 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
genwqe_mapping_init(m,
GENWQE_MAPPING_SGL_TEMP);
+
+ if (ats_flags == ATS_TYPE_SGL_RD)
+ m->write = 0;
+
rc = genwqe_user_vmap(cd, m, (void *)u_addr,
u_size, req);
if (rc != 0)
@@ -954,7 +958,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
/* create genwqe style scatter gather list */
rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
(void __user *)u_addr,
- u_size);
+ u_size, m->write);
if (rc != 0)
goto err_out;
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 147b83011b58..5c0d917636f7 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -296,7 +296,7 @@ static int genwqe_sgl_size(int num_pages)
* from user-space into the cached pages.
*/
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
- void __user *user_addr, size_t user_size)
+ void __user *user_addr, size_t user_size, int write)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
@@ -312,6 +312,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->user_addr = user_addr;
sgl->user_size = user_size;
+ sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
if (get_order(sgl->sgl_size) > MAX_ORDER) {
@@ -476,14 +477,20 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
int rc = 0;
+ size_t offset;
+ unsigned long res;
struct pci_dev *pci_dev = cd->pci_dev;
if (sgl->fpage) {
- if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
- sgl->fpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ res = copy_to_user(sgl->user_addr,
+ sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying fpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
@@ -491,12 +498,16 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
sgl->fpage_dma_addr = 0;
}
if (sgl->lpage) {
- if (copy_to_user(sgl->user_addr + sgl->user_size -
- sgl->lpage_size, sgl->lpage,
- sgl->lpage_size)) {
- dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
- __func__);
- rc = -EFAULT;
+ if (sgl->write) {
+ offset = sgl->user_size - sgl->lpage_size;
+ res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
+ sgl->lpage_size);
+ if (res) {
+ dev_err(&pci_dev->dev,
+ "[%s] err: copying lpage! (res=%lu)\n",
+ __func__, res);
+ rc = -EFAULT;
+ }
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
@@ -599,14 +610,14 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
- 1, /* write by caller */
+ m->write, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_get_user_pages;
/* assumption: get_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
- free_user_pages(m->page_list, rc, 0);
+ free_user_pages(m->page_list, rc, m->write);
rc = -EFAULT;
goto fail_get_user_pages;
}
@@ -618,7 +629,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
return 0;
fail_free_user_pages:
- free_user_pages(m->page_list, m->nr_pages, 0);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
fail_get_user_pages:
kfree(m->page_list);
@@ -651,7 +662,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
- free_user_pages(m->page_list, m->nr_pages, 1);
+ free_user_pages(m->page_list, m->nr_pages, m->write);
kfree(m->page_list);
m->page_list = NULL;
diff --git a/drivers/misc/ibmasm/Makefile b/drivers/misc/ibmasm/Makefile
index 9e63ade5ffd6..1b9dd0f44411 100644
--- a/drivers/misc/ibmasm/Makefile
+++ b/drivers/misc/ibmasm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IBM_ASM) := ibmasm.o
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 8e540f4e9d52..7e33025b4854 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -155,7 +155,7 @@ int ibmasm_event_buffer_init(struct service_processor *sp)
buffer = kmalloc(sizeof(struct event_buffer), GFP_KERNEL);
if (!buffer)
- return 1;
+ return -ENOMEM;
buffer->next_index = 0;
buffer->next_serial_number = 1;
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index c5a456b0a564..e914b8c80943 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -94,12 +94,14 @@ static int ibmasm_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(sp->dirname, IBMASM_NAME_SIZE, "%d", sp->number);
snprintf(sp->devname, IBMASM_NAME_SIZE, "%s%d", DRIVER_NAME, sp->number);
- if (ibmasm_event_buffer_init(sp)) {
+ result = ibmasm_event_buffer_init(sp);
+ if (result) {
dev_err(sp->dev, "Failed to allocate event buffer\n");
goto error_eventbuffer;
}
- if (ibmasm_heartbeat_init(sp)) {
+ result = ibmasm_heartbeat_init(sp);
+ if (result) {
dev_err(sp->dev, "Failed to allocate heartbeat command\n");
goto error_heartbeat;
}
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index fc7efedbc4be..24108bfad889 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -1132,7 +1132,8 @@ static void kgdbts_put_char(u8 chr)
ts.run_test(0, chr);
}
-static int param_set_kgdbts_var(const char *kmessage, struct kernel_param *kp)
+static int param_set_kgdbts_var(const char *kmessage,
+ const struct kernel_param *kp)
{
int len = strlen(kmessage);
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index bfb6c45b6130..687a0dbbe199 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LKDTM_H
#define __LKDTM_H
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index 9e0b4f959987..7eebbdfbcacd 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests related to logic bugs (e.g. bad dereferences,
* bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
@@ -62,9 +63,11 @@ void lkdtm_BUG(void)
BUG();
}
+static int warn_counter;
+
void lkdtm_WARNING(void)
{
- WARN_ON(1);
+ WARN(1, "Warning message trigger count: %d\n", warn_counter++);
}
void lkdtm_EXCEPTION(void)
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 981b3ef71e47..ba92291508dc 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -56,122 +56,54 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off);
#ifdef CONFIG_KPROBES
-static void lkdtm_handler(void);
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
static ssize_t lkdtm_debugfs_entry(struct file *f,
const char __user *user_buf,
size_t count, loff_t *off);
-
-
-/* jprobe entry point handlers. */
-static unsigned int jp_do_irq(unsigned int irq)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static irqreturn_t jp_handle_irq_event(unsigned int irq,
- struct irqaction *action)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static void jp_tasklet_action(struct softirq_action *a)
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-struct scan_control;
-
-static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
- struct zone *zone,
- struct scan_control *sc)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-# ifdef CONFIG_IDE
-static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
- struct block_device *bdev, unsigned int cmd,
- unsigned long arg)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-# endif
+# define CRASHPOINT_KPROBE(_symbol) \
+ .kprobe = { \
+ .symbol_name = (_symbol), \
+ .pre_handler = lkdtm_kprobe_handler, \
+ },
+# define CRASHPOINT_WRITE(_symbol) \
+ (_symbol) ? lkdtm_debugfs_entry : direct_entry
+#else
+# define CRASHPOINT_KPROBE(_symbol)
+# define CRASHPOINT_WRITE(_symbol) direct_entry
#endif
/* Crash points */
struct crashpoint {
const char *name;
const struct file_operations fops;
- struct jprobe jprobe;
+ struct kprobe kprobe;
};
-#define CRASHPOINT(_name, _write, _symbol, _entry) \
+#define CRASHPOINT(_name, _symbol) \
{ \
.name = _name, \
.fops = { \
.read = lkdtm_debugfs_read, \
.llseek = generic_file_llseek, \
.open = lkdtm_debugfs_open, \
- .write = _write, \
- }, \
- .jprobe = { \
- .kp.symbol_name = _symbol, \
- .entry = (kprobe_opcode_t *)_entry, \
+ .write = CRASHPOINT_WRITE(_symbol) \
}, \
+ CRASHPOINT_KPROBE(_symbol) \
}
/* Define the possible places where we can trigger a crash point. */
-struct crashpoint crashpoints[] = {
- CRASHPOINT("DIRECT", direct_entry,
- NULL, NULL),
+static struct crashpoint crashpoints[] = {
+ CRASHPOINT("DIRECT", NULL),
#ifdef CONFIG_KPROBES
- CRASHPOINT("INT_HARDWARE_ENTRY", lkdtm_debugfs_entry,
- "do_IRQ", jp_do_irq),
- CRASHPOINT("INT_HW_IRQ_EN", lkdtm_debugfs_entry,
- "handle_IRQ_event", jp_handle_irq_event),
- CRASHPOINT("INT_TASKLET_ENTRY", lkdtm_debugfs_entry,
- "tasklet_action", jp_tasklet_action),
- CRASHPOINT("FS_DEVRW", lkdtm_debugfs_entry,
- "ll_rw_block", jp_ll_rw_block),
- CRASHPOINT("MEM_SWAPOUT", lkdtm_debugfs_entry,
- "shrink_inactive_list", jp_shrink_inactive_list),
- CRASHPOINT("TIMERADD", lkdtm_debugfs_entry,
- "hrtimer_start", jp_hrtimer_start),
- CRASHPOINT("SCSI_DISPATCH_CMD", lkdtm_debugfs_entry,
- "scsi_dispatch_cmd", jp_scsi_dispatch_cmd),
+ CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
+ CRASHPOINT("INT_HW_IRQ_EN", "handle_IRQ_event"),
+ CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
+ CRASHPOINT("FS_DEVRW", "ll_rw_block"),
+ CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
+ CRASHPOINT("TIMERADD", "hrtimer_start"),
+ CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
# ifdef CONFIG_IDE
- CRASHPOINT("IDE_CORE_CP", lkdtm_debugfs_entry,
- "generic_ide_ioctl", jp_generic_ide_ioctl),
+ CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
# endif
#endif
};
@@ -190,7 +122,7 @@ struct crashtype {
}
/* Define the possible types of crashes that can be triggered. */
-struct crashtype crashtypes[] = {
+static const struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(BUG),
CRASHTYPE(WARNING),
@@ -254,10 +186,10 @@ struct crashtype crashtypes[] = {
};
-/* Global jprobe entry and crashtype. */
-static struct jprobe *lkdtm_jprobe;
-struct crashpoint *lkdtm_crashpoint;
-struct crashtype *lkdtm_crashtype;
+/* Global kprobe entry and crashtype. */
+static struct kprobe *lkdtm_kprobe;
+static struct crashpoint *lkdtm_crashpoint;
+static const struct crashtype *lkdtm_crashtype;
/* Module parameters */
static int recur_count = -1;
@@ -280,7 +212,7 @@ MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
/* Return the crashtype number or NULL if the name is invalid */
-static struct crashtype *find_crashtype(const char *name)
+static const struct crashtype *find_crashtype(const char *name)
{
int i;
@@ -296,34 +228,35 @@ static struct crashtype *find_crashtype(const char *name)
* This is forced noinline just so it distinctly shows up in the stackdump
* which makes validation of expected lkdtm crashes easier.
*/
-static noinline void lkdtm_do_action(struct crashtype *crashtype)
+static noinline void lkdtm_do_action(const struct crashtype *crashtype)
{
- BUG_ON(!crashtype || !crashtype->func);
+ if (WARN_ON(!crashtype || !crashtype->func))
+ return;
crashtype->func();
}
static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
- struct crashtype *crashtype)
+ const struct crashtype *crashtype)
{
int ret;
/* If this doesn't have a symbol, just call immediately. */
- if (!crashpoint->jprobe.kp.symbol_name) {
+ if (!crashpoint->kprobe.symbol_name) {
lkdtm_do_action(crashtype);
return 0;
}
- if (lkdtm_jprobe != NULL)
- unregister_jprobe(lkdtm_jprobe);
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
lkdtm_crashpoint = crashpoint;
lkdtm_crashtype = crashtype;
- lkdtm_jprobe = &crashpoint->jprobe;
- ret = register_jprobe(lkdtm_jprobe);
+ lkdtm_kprobe = &crashpoint->kprobe;
+ ret = register_kprobe(lkdtm_kprobe);
if (ret < 0) {
- pr_info("Couldn't register jprobe %s\n",
- crashpoint->jprobe.kp.symbol_name);
- lkdtm_jprobe = NULL;
+ pr_info("Couldn't register kprobe %s\n",
+ crashpoint->kprobe.symbol_name);
+ lkdtm_kprobe = NULL;
lkdtm_crashpoint = NULL;
lkdtm_crashtype = NULL;
}
@@ -336,13 +269,14 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
static int crash_count = DEFAULT_COUNT;
static DEFINE_SPINLOCK(crash_count_lock);
-/* Called by jprobe entry points. */
-static void lkdtm_handler(void)
+/* Called by kprobe entry points. */
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
{
unsigned long flags;
bool do_it = false;
- BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype);
+ if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype))
+ return 0;
spin_lock_irqsave(&crash_count_lock, flags);
crash_count--;
@@ -357,6 +291,8 @@ static void lkdtm_handler(void)
if (do_it)
lkdtm_do_action(lkdtm_crashtype);
+
+ return 0;
}
static ssize_t lkdtm_debugfs_entry(struct file *f,
@@ -364,7 +300,7 @@ static ssize_t lkdtm_debugfs_entry(struct file *f,
size_t count, loff_t *off)
{
struct crashpoint *crashpoint = file_inode(f)->i_private;
- struct crashtype *crashtype = NULL;
+ const struct crashtype *crashtype = NULL;
char *buf;
int err;
@@ -432,7 +368,7 @@ static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off)
{
- struct crashtype *crashtype;
+ const struct crashtype *crashtype;
char *buf;
if (count >= PAGE_SIZE)
@@ -468,7 +404,7 @@ static struct dentry *lkdtm_debugfs_root;
static int __init lkdtm_module_init(void)
{
struct crashpoint *crashpoint = NULL;
- struct crashtype *crashtype = NULL;
+ const struct crashtype *crashtype = NULL;
int ret = -EINVAL;
int i;
@@ -556,8 +492,8 @@ static void __exit lkdtm_module_exit(void)
/* Handle test-specific clean-up. */
lkdtm_usercopy_exit();
- if (lkdtm_jprobe != NULL)
- unregister_jprobe(lkdtm_jprobe);
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
pr_info("Crash point unregistered\n");
}
diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c
index ffb6aeac07b3..f5494a6d4be5 100644
--- a/drivers/misc/lkdtm_heap.c
+++ b/drivers/misc/lkdtm_heap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests relating directly to heap memory, including
* page allocation and slab allocations.
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
index c7635a79341f..53b85c9d16b8 100644
--- a/drivers/misc/lkdtm_perms.c
+++ b/drivers/misc/lkdtm_perms.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests related to validating kernel memory
* permissions: non-executable regions, non-writable regions, and
diff --git a/drivers/misc/lkdtm_rodata.c b/drivers/misc/lkdtm_rodata.c
index 3564477b8c2d..58d180af72cf 100644
--- a/drivers/misc/lkdtm_rodata.c
+++ b/drivers/misc/lkdtm_rodata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This includes functions that are meant to live entirely in .rodata
* (via objcopy tricks), to validate the non-executability of .rodata.
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c
index df6ac985fbb5..a64372cc148d 100644
--- a/drivers/misc/lkdtm_usercopy.c
+++ b/drivers/misc/lkdtm_usercopy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This is for all the tests related to copy_to_user() and copy_from_user()
* hardening.
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 12cceb011a23..cd6825afa8e1 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver
# Copyright (c) 2010-2014, Intel Corporation.
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
index e19e6acb191b..374edde72a14 100644
--- a/drivers/misc/mei/mei-trace.c
+++ b/drivers/misc/mei/mei-trace.c
@@ -23,5 +23,4 @@
EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read);
-EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write);
#endif /* __CHECKER__ */
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
index 7d2d5d4a1624..b52e9b97a7c0 100644
--- a/drivers/misc/mei/mei-trace.h
+++ b/drivers/misc/mei/mei-trace.h
@@ -83,25 +83,6 @@ TRACE_EVENT(mei_pci_cfg_read,
__get_str(dev), __entry->reg, __entry->offs, __entry->val)
);
-TRACE_EVENT(mei_pci_cfg_write,
- TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
- TP_ARGS(dev, reg, offs, val),
- TP_STRUCT__entry(
- __string(dev, dev_name(dev))
- __field(const char *, reg)
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- __assign_str(dev, dev_name(dev))
- __entry->reg = reg;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%s] pci cfg write %s[%#x] = %#x",
- __get_str(dev), __entry->reg, __entry->offs, __entry->val)
-);
-
#endif /* _MEI_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 78b3172c8e6e..f4f17552c9b8 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -225,7 +225,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* MEI requires to resume from runtime suspend mode
* in order to perform link reset flow upon system suspend.
*/
- pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
/*
* ME maps runtime suspend/resume to D0i states,
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 0566f9bfa7de..e1b909123fb0 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -141,7 +141,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* MEI requires to resume from runtime suspend mode
* in order to perform link reset flow upon system suspend.
*/
- pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
/*
* TXE maps runtime suspend/resume to own power gating states,
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 6fd9d367dea7..227cc7443671 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -1,3 +1,5 @@
+menu "Intel MIC & related support"
+
comment "Intel MIC Bus Driver"
config INTEL_MIC_BUS
@@ -150,3 +152,5 @@ config VOP
if VOP
source "drivers/vhost/Kconfig.vringh"
endif
+
+endmenu
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
index f2b1323ff96c..1a43622b183f 100644
--- a/drivers/misc/mic/Makefile
+++ b/drivers/misc/mic/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile - Intel MIC Linux driver.
# Copyright(c) 2013, Intel Corporation.
diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile
index 6e9675e12a09..921a7e7e0fbd 100644
--- a/drivers/misc/mic/card/Makefile
+++ b/drivers/misc/mic/card/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile - Intel MIC Linux driver.
# Copyright(c) 2013, Intel Corporation.
diff --git a/drivers/misc/mic/cosm/Makefile b/drivers/misc/mic/cosm/Makefile
index b85d4d49df46..97d74cb12030 100644
--- a/drivers/misc/mic/cosm/Makefile
+++ b/drivers/misc/mic/cosm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile - Intel MIC Coprocessor State Management (COSM) Driver
# Copyright(c) 2015, Intel Corporation.
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile
index f3b502333ded..25f153367980 100644
--- a/drivers/misc/mic/host/Makefile
+++ b/drivers/misc/mic/host/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile - Intel MIC Linux driver.
# Copyright(c) 2013, Intel Corporation.
diff --git a/drivers/misc/mic/scif/Makefile b/drivers/misc/mic/scif/Makefile
index 29cfc3e51ac9..ff372555d118 100644
--- a/drivers/misc/mic/scif/Makefile
+++ b/drivers/misc/mic/scif/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile - SCIF driver.
# Copyright(c) 2014, Intel Corporation.
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
index 637cc4686742..b665757ca89a 100644
--- a/drivers/misc/mic/scif/scif_rb.c
+++ b/drivers/misc/mic/scif/scif_rb.c
@@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb)
* the read barrier in scif_rb_count(..)
*/
wmb();
- ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+ WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si bug: For the case where a Core is performing an EXT_WR
@@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb)
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+ WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#endif
}
@@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
* scif_rb_space(..)
*/
mb();
- ACCESS_ONCE(*rb->read_ptr) = new_offset;
+ WRITE_ONCE(*rb->read_ptr, new_offset);
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si Bug: For the case where a Core is performing an EXT_WR
@@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- ACCESS_ONCE(*rb->read_ptr) = new_offset;
+ WRITE_ONCE(*rb->read_ptr, new_offset);
#endif
}
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 329727e00e97..c824329f7012 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -39,8 +39,7 @@ void scif_rma_ep_init(struct scif_endpt *ep)
struct scif_endpt_rma_info *rma = &ep->rma_info;
mutex_init(&rma->rma_lock);
- init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN,
- SCIF_DMA_64BIT_PFN);
+ init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN);
spin_lock_init(&rma->tc_lock);
mutex_init(&rma->mmn_lock);
INIT_LIST_HEAD(&rma->reg_list);
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c
index e1ef8daedd5a..a036dbb4101e 100644
--- a/drivers/misc/mic/scif/scif_rma_list.c
+++ b/drivers/misc/mic/scif/scif_rma_list.c
@@ -277,7 +277,7 @@ retry:
* Need to restart list traversal if there has been
* an asynchronous list entry deletion.
*/
- if (ACCESS_ONCE(ep->rma_info.async_list_del))
+ if (READ_ONCE(ep->rma_info.async_list_del))
goto retry;
}
mutex_unlock(&ep->rma_info.rma_lock);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index deb203026496..320276f42653 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -92,6 +92,7 @@ struct pci_endpoint_test {
void __iomem *bar[6];
struct completion irq_raised;
int last_irq;
+ int num_irqs;
/* mutex to protect the ioctls */
struct mutex mutex;
struct miscdevice miscdev;
@@ -226,6 +227,9 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
u32 src_crc32;
u32 dst_crc32;
+ if (size > SIZE_MAX - alignment)
+ goto err;
+
orig_src_addr = dma_alloc_coherent(dev, size + alignment,
&orig_src_phys_addr, GFP_KERNEL);
if (!orig_src_addr) {
@@ -311,6 +315,9 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
size_t alignment = test->alignment;
u32 crc32;
+ if (size > SIZE_MAX - alignment)
+ goto err;
+
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@@ -369,6 +376,9 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
size_t alignment = test->alignment;
u32 crc32;
+ if (size > SIZE_MAX - alignment)
+ goto err;
+
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@@ -504,6 +514,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (irq < 0)
dev_err(dev, "failed to get MSI interrupts\n");
+ test->num_irqs = irq;
}
err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
@@ -533,6 +544,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->base = test->bar[test_reg_bar];
if (!test->base) {
+ err = -ENOMEM;
dev_err(dev, "Cannot perform PCI test without BAR%d\n",
test_reg_bar);
goto err_iounmap;
@@ -542,6 +554,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
+ err = id;
dev_err(dev, "unable to get id\n");
goto err_iounmap;
}
@@ -549,17 +562,24 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
misc_device = &test->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
- misc_device->name = name;
+ misc_device->name = kstrdup(name, GFP_KERNEL);
+ if (!misc_device->name) {
+ err = -ENOMEM;
+ goto err_ida_remove;
+ }
misc_device->fops = &pci_endpoint_test_fops,
err = misc_register(misc_device);
if (err) {
dev_err(dev, "failed to register device\n");
- goto err_ida_remove;
+ goto err_kfree_name;
}
return 0;
+err_kfree_name:
+ kfree(misc_device->name);
+
err_ida_remove:
ida_simple_remove(&pci_endpoint_test_ida, id);
@@ -569,6 +589,9 @@ err_iounmap:
pci_iounmap(pdev, test->bar[bar]);
}
+ for (i = 0; i < irq; i++)
+ devm_free_irq(dev, pdev->irq + i, test);
+
err_disable_msi:
pci_disable_msi(pdev);
pci_release_regions(pdev);
@@ -582,19 +605,25 @@ err_disable_pdev:
static void pci_endpoint_test_remove(struct pci_dev *pdev)
{
int id;
+ int i;
enum pci_barno bar;
struct pci_endpoint_test *test = pci_get_drvdata(pdev);
struct miscdevice *misc_device = &test->miscdev;
if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
return;
+ if (id < 0)
+ return;
misc_deregister(&test->miscdev);
+ kfree(misc_device->name);
ida_simple_remove(&pci_endpoint_test_ida, id);
for (bar = BAR_0; bar <= BAR_5; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
+ for (i = 0; i < test->num_irqs; i++)
+ devm_free_irq(&pdev->dev, pdev->irq + i, test);
pci_disable_msi(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile
index 4fc40d8e1bcc..bbb622c19c06 100644
--- a/drivers/misc/sgi-xp/Makefile
+++ b/drivers/misc/sgi-xp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for SGI's XP devices.
#
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 7f327121e6d7..0c775d6fcf59 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -172,9 +172,9 @@ struct xpc_arch_operations xpc_arch_ops;
* Timer function to enforce the timelimit on the partition disengage.
*/
static void
-xpc_timeout_partition_disengage(unsigned long data)
+xpc_timeout_partition_disengage(struct timer_list *t)
{
- struct xpc_partition *part = (struct xpc_partition *)data;
+ struct xpc_partition *part = from_timer(part, t, disengage_timer);
DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
@@ -190,7 +190,7 @@ xpc_timeout_partition_disengage(unsigned long data)
* specify when the next timeout should occur.
*/
static void
-xpc_hb_beater(unsigned long dummy)
+xpc_hb_beater(struct timer_list *unused)
{
xpc_arch_ops.increment_heartbeat();
@@ -205,8 +205,7 @@ static void
xpc_start_hb_beater(void)
{
xpc_arch_ops.heartbeat_init();
- init_timer(&xpc_hb_timer);
- xpc_hb_timer.function = xpc_hb_beater;
+ timer_setup(&xpc_hb_timer, xpc_hb_beater, 0);
xpc_hb_beater(0);
}
@@ -931,10 +930,8 @@ xpc_setup_partitions(void)
part->act_state = XPC_P_AS_INACTIVE;
XPC_SET_REASON(part, 0, 0);
- init_timer(&part->disengage_timer);
- part->disengage_timer.function =
- xpc_timeout_partition_disengage;
- part->disengage_timer.data = (unsigned long)part;
+ timer_setup(&part->disengage_timer,
+ xpc_timeout_partition_disengage, 0);
part->setup_state = XPC_P_SS_UNSET;
init_waitqueue_head(&part->teardown_wq);
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 7d71c04fc938..5a12d2a54049 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -323,16 +323,16 @@ xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
* was received.
*/
static void
-xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part)
+xpc_check_for_dropped_notify_IRQ_sn2(struct timer_list *t)
{
- struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
+ struct xpc_partition *part =
+ from_timer(part, t, sn.sn2.dropped_notify_IRQ_timer);
if (xpc_part_ref(part)) {
xpc_check_for_sent_chctl_flags_sn2(part);
- part_sn2->dropped_notify_IRQ_timer.expires = jiffies +
- XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
- add_timer(&part_sn2->dropped_notify_IRQ_timer);
+ t->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
+ add_timer(t);
xpc_part_deref(part);
}
}
@@ -1232,10 +1232,7 @@ xpc_setup_ch_structures_sn2(struct xpc_partition *part)
/* Setup a timer to check for dropped notify IRQs */
timer = &part_sn2->dropped_notify_IRQ_timer;
- init_timer(timer);
- timer->function =
- (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2;
- timer->data = (unsigned long)part;
+ timer_setup(timer, xpc_check_for_dropped_notify_IRQ_sn2, 0);
timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
add_timer(timer);
diff --git a/drivers/misc/ti_dac7512.c b/drivers/misc/ti_dac7512.c
deleted file mode 100644
index f5456fb7d773..000000000000
--- a/drivers/misc/ti_dac7512.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * dac7512.c - Linux kernel module for
- * Texas Instruments DAC7512
- *
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-#include <linux/of.h>
-
-static ssize_t dac7512_store_val(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct spi_device *spi = to_spi_device(dev);
- unsigned char tmp[2];
- unsigned long val;
- int ret;
-
- ret = kstrtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- tmp[0] = val >> 8;
- tmp[1] = val & 0xff;
- spi_write(spi, tmp, sizeof(tmp));
- return count;
-}
-
-static DEVICE_ATTR(value, S_IWUSR, NULL, dac7512_store_val);
-
-static struct attribute *dac7512_attributes[] = {
- &dev_attr_value.attr,
- NULL
-};
-
-static const struct attribute_group dac7512_attr_group = {
- .attrs = dac7512_attributes,
-};
-
-static int dac7512_probe(struct spi_device *spi)
-{
- int ret;
-
- spi->bits_per_word = 8;
- spi->mode = SPI_MODE_0;
- ret = spi_setup(spi);
- if (ret < 0)
- return ret;
-
- return sysfs_create_group(&spi->dev.kobj, &dac7512_attr_group);
-}
-
-static int dac7512_remove(struct spi_device *spi)
-{
- sysfs_remove_group(&spi->dev.kobj, &dac7512_attr_group);
- return 0;
-}
-
-static const struct spi_device_id dac7512_id_table[] = {
- { "dac7512", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(spi, dac7512_id_table);
-
-#ifdef CONFIG_OF
-static const struct of_device_id dac7512_of_match[] = {
- { .compatible = "ti,dac7512", },
- { }
-};
-MODULE_DEVICE_TABLE(of, dac7512_of_match);
-#endif
-
-static struct spi_driver dac7512_driver = {
- .driver = {
- .name = "dac7512",
- .of_match_table = of_match_ptr(dac7512_of_match),
- },
- .probe = dac7512_probe,
- .remove = dac7512_remove,
- .id_table = dac7512_id_table,
-};
-
-module_spi_driver(dac7512_driver);
-
-MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
-MODULE_DESCRIPTION("DAC7512 16-bit DAC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 1e688bfec567..9047c0a529b2 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1271,7 +1271,7 @@ static int __init vmballoon_init(void)
* Check if we are running on VMware's hypervisor and bail out
* if we are not.
*/
- if (x86_hyper != &x86_hyper_vmware)
+ if (x86_hyper_type != X86_HYPER_VMWARE)
return -ENODEV;
for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 7e3ed1aeada2..abba078f7f49 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel mmc core.
#
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2ad7b5c69156..ea80ff4cd7f9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -28,6 +28,7 @@
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
+#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
@@ -86,6 +87,7 @@ static int max_devices;
#define MAX_DEVICES 256
static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_IDA(mmc_rpmb_ida);
/*
* There is one mmc_blk_data per slot.
@@ -96,6 +98,7 @@ struct mmc_blk_data {
struct gendisk *disk;
struct mmc_queue queue;
struct list_head part;
+ struct list_head rpmbs;
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
@@ -121,6 +124,32 @@ struct mmc_blk_data {
int area_type;
};
+/* Device type for RPMB character devices */
+static dev_t mmc_rpmb_devt;
+
+/* Bus type for RPMB character devices */
+static struct bus_type mmc_rpmb_bus_type = {
+ .name = "mmc_rpmb",
+};
+
+/**
+ * struct mmc_rpmb_data - special RPMB device type for these areas
+ * @dev: the device for the RPMB area
+ * @chrdev: character device for the RPMB area
+ * @id: unique device ID number
+ * @part_index: partition index (0 on first)
+ * @md: parent MMC block device
+ * @node: list item, so we can put this device on a list
+ */
+struct mmc_rpmb_data {
+ struct device dev;
+ struct cdev chrdev;
+ int id;
+ unsigned int part_index;
+ struct mmc_blk_data *md;
+ struct list_head node;
+};
+
static DEFINE_MUTEX(open_lock);
module_param(perdev_minors, int, 0444);
@@ -299,6 +328,7 @@ struct mmc_blk_ioc_data {
struct mmc_ioc_cmd ic;
unsigned char *buf;
u64 buf_bytes;
+ struct mmc_rpmb_data *rpmb;
};
static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
@@ -437,14 +467,25 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_request mrq = {};
struct scatterlist sg;
int err;
- bool is_rpmb = false;
+ unsigned int target_part;
u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- is_rpmb = true;
+ /*
+ * The RPMB accesses comes in from the character device, so we
+ * need to target these explicitly. Else we just target the
+ * partition type for the block device the ioctl() was issued
+ * on.
+ */
+ if (idata->rpmb) {
+ /* Support multiple RPMB partitions */
+ target_part = idata->rpmb->part_index;
+ target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
+ } else {
+ target_part = md->part_type;
+ }
cmd.opcode = idata->ic.opcode;
cmd.arg = idata->ic.arg;
@@ -488,7 +529,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
mrq.cmd = &cmd;
- err = mmc_blk_part_switch(card, md->part_type);
+ err = mmc_blk_part_switch(card, target_part);
if (err)
return err;
@@ -498,7 +539,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return err;
}
- if (is_rpmb) {
+ if (idata->rpmb) {
err = mmc_set_blockcount(card, data.blocks,
idata->ic.write_flag & (1 << 31));
if (err)
@@ -538,7 +579,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (is_rpmb) {
+ if (idata->rpmb) {
/*
* Ensure RPMB command has completed by polling CMD13
* "Send Status".
@@ -554,7 +595,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
}
static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_cmd __user *ic_ptr)
+ struct mmc_ioc_cmd __user *ic_ptr,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data *idata;
struct mmc_blk_ioc_data *idatas[1];
@@ -566,6 +608,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR(idata))
return PTR_ERR(idata);
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata->rpmb = rpmb;
card = md->queue.card;
if (IS_ERR(card)) {
@@ -581,7 +625,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
idatas[0] = idata;
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -596,7 +641,8 @@ cmd_done:
}
static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_multi_cmd __user *user)
+ struct mmc_ioc_multi_cmd __user *user,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data **idata = NULL;
struct mmc_ioc_cmd __user *cmds = user->cmds;
@@ -627,6 +673,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
num_of_cmds = i;
goto cmd_err;
}
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata[i]->rpmb = rpmb;
}
card = md->queue.card;
@@ -643,7 +691,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
req = blk_get_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -691,7 +740,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_cmd(md,
- (struct mmc_ioc_cmd __user *)arg);
+ (struct mmc_ioc_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
case MMC_IOC_MULTI_CMD:
@@ -702,7 +752,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_multi_cmd(md,
- (struct mmc_ioc_multi_cmd __user *)arg);
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
default:
@@ -1152,18 +1203,6 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
-int mmc_access_rpmb(struct mmc_queue *mq)
-{
- struct mmc_blk_data *md = mq->blkdata;
- /*
- * If this is a RPMB partition access, return ture
- */
- if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
- return true;
-
- return false;
-}
-
/*
* The non-block commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
@@ -1174,17 +1213,19 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
struct mmc_queue_req *mq_rq;
struct mmc_card *card = mq->card;
struct mmc_blk_data *md = mq->blkdata;
- struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
struct mmc_blk_ioc_data **idata;
+ bool rpmb_ioctl;
u8 **ext_csd;
u32 status;
int ret;
int i;
mq_rq = req_to_mmc_queue_req(req);
+ rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
+ case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
@@ -1192,8 +1233,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
/* Always switch back to main area after RPMB access */
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- mmc_blk_part_switch(card, main_md->part_type);
+ if (rpmb_ioctl)
+ mmc_blk_part_switch(card, 0);
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
@@ -1534,25 +1575,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr,
- bool *do_data_tag)
+ int disable_multi, bool *do_rel_wr_p,
+ bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mmc_queue_req_to_req(mqrq);
+ bool do_rel_wr, do_data_tag;
/*
* Reliable writes are used to implement Forced Unit Access and
* are supported only on MMCs.
*/
- *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
- rq_data_dir(req) == WRITE &&
- (md->flags & MMC_BLK_REL_WR);
+ do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ rq_data_dir(req) == WRITE &&
+ (md->flags & MMC_BLK_REL_WR);
memset(brq, 0, sizeof(struct mmc_blk_request));
brq->mrq.data = &brq->data;
+ brq->mrq.tag = req->tag;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -1567,6 +1610,14 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->data.blocks = blk_rq_sectors(req);
+ brq->data.blk_addr = blk_rq_pos(req);
+
+ /*
+ * The command queue supports 2 priorities: "high" (1) and "simple" (0).
+ * The eMMC will give "high" priority tasks priority over "simple"
+ * priority tasks. Here we always set "simple" priority by not setting
+ * MMC_DATA_PRIO.
+ */
/*
* The block layer doesn't support all sector count
@@ -1596,18 +1647,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks);
}
- if (*do_rel_wr)
+ if (do_rel_wr) {
mmc_apply_rel_rw(brq, card, req);
+ brq->data.flags |= MMC_DATA_REL_WR;
+ }
/*
* Data tag is used only during writing meta data to speed
* up write and any subsequent read of this meta data
*/
- *do_data_tag = card->ext_csd.data_tag_unit_size &&
- (req->cmd_flags & REQ_META) &&
- (rq_data_dir(req) == WRITE) &&
- ((brq->data.blocks * brq->data.blksz) >=
- card->ext_csd.data_tag_unit_size);
+ do_data_tag = card->ext_csd.data_tag_unit_size &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ if (do_data_tag)
+ brq->data.flags |= MMC_DATA_DAT_TAG;
mmc_set_data_timeout(&brq->data, card);
@@ -1634,6 +1690,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
}
mqrq->areq.mrq = &brq->mrq;
+
+ if (do_rel_wr_p)
+ *do_rel_wr_p = do_rel_wr;
+
+ if (do_data_tag_p)
+ *do_data_tag_p = do_data_tag;
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1948,7 +2010,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (req && !mq->qcnt)
/* claim host only for the first request */
- mmc_get_card(card);
+ mmc_get_card(card, NULL);
ret = mmc_blk_part_switch(card, md->part_type);
if (ret) {
@@ -2011,7 +2073,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
out:
if (!mq->qcnt)
- mmc_put_card(card);
+ mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2068,6 +2130,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
spin_lock_init(&md->lock);
INIT_LIST_HEAD(&md->part);
+ INIT_LIST_HEAD(&md->rpmbs);
md->usage = 1;
ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
@@ -2186,6 +2249,158 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
return 0;
}
+/**
+ * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
+ * @filp: the character device file
+ * @cmd: the ioctl() command
+ * @arg: the argument from userspace
+ *
+ * This will essentially just redirect the ioctl()s coming in over to
+ * the main block device spawning the RPMB character device.
+ */
+static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mmc_rpmb_data *rpmb = filp->private_data;
+ int ret;
+
+ switch (cmd) {
+ case MMC_IOC_CMD:
+ ret = mmc_blk_ioctl_cmd(rpmb->md,
+ (struct mmc_ioc_cmd __user *)arg,
+ rpmb);
+ break;
+ case MMC_IOC_MULTI_CMD:
+ ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ rpmb);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ get_device(&rpmb->dev);
+ filp->private_data = rpmb;
+ mmc_blk_get(rpmb->md->disk);
+
+ return nonseekable_open(inode, filp);
+}
+
+static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ put_device(&rpmb->dev);
+ mmc_blk_put(rpmb->md);
+
+ return 0;
+}
+
+static const struct file_operations mmc_rpmb_fileops = {
+ .release = mmc_rpmb_chrdev_release,
+ .open = mmc_rpmb_chrdev_open,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = mmc_rpmb_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_rpmb_ioctl_compat,
+#endif
+};
+
+static void mmc_blk_rpmb_device_release(struct device *dev)
+{
+ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+
+ ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ kfree(rpmb);
+}
+
+static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_index,
+ sector_t size,
+ const char *subname)
+{
+ int devidx, ret;
+ char rpmb_name[DISK_NAME_LEN];
+ char cap_str[10];
+ struct mmc_rpmb_data *rpmb;
+
+ /* This creates the minor number for the RPMB char device */
+ devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ if (devidx < 0)
+ return devidx;
+
+ rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
+ if (!rpmb) {
+ ida_simple_remove(&mmc_rpmb_ida, devidx);
+ return -ENOMEM;
+ }
+
+ snprintf(rpmb_name, sizeof(rpmb_name),
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
+
+ rpmb->id = devidx;
+ rpmb->part_index = part_index;
+ rpmb->dev.init_name = rpmb_name;
+ rpmb->dev.bus = &mmc_rpmb_bus_type;
+ rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
+ rpmb->dev.parent = &card->dev;
+ rpmb->dev.release = mmc_blk_rpmb_device_release;
+ device_initialize(&rpmb->dev);
+ dev_set_drvdata(&rpmb->dev, rpmb);
+ rpmb->md = md;
+
+ cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
+ rpmb->chrdev.owner = THIS_MODULE;
+ ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
+ if (ret) {
+ pr_err("%s: could not add character device\n", rpmb_name);
+ goto out_put_device;
+ }
+
+ list_add(&rpmb->node, &md->rpmbs);
+
+ string_get_size((u64)size, 512, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+
+ pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n",
+ rpmb_name, mmc_card_id(card),
+ mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str,
+ MAJOR(mmc_rpmb_devt), rpmb->id);
+
+ return 0;
+
+out_put_device:
+ put_device(&rpmb->dev);
+ return ret;
+}
+
+static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
+
+{
+ cdev_device_del(&rpmb->chrdev, &rpmb->dev);
+ put_device(&rpmb->dev);
+}
+
/* MMC Physical partitions consist of two boot partitions and
* up to four general purpose partitions.
* For each partition enabled in EXT_CSD a block device will be allocatedi
@@ -2194,13 +2409,26 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
- int idx, ret = 0;
+ int idx, ret;
if (!mmc_card_mmc(card))
return 0;
for (idx = 0; idx < card->nr_parts; idx++) {
- if (card->part[idx].size) {
+ if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
+ /*
+ * RPMB partitions does not provide block access, they
+ * are only accessed using ioctl():s. Thus create
+ * special RPMB block devices that do not have a
+ * backing block queue for these.
+ */
+ ret = mmc_blk_alloc_rpmb_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].name);
+ if (ret)
+ return ret;
+ } else if (card->part[idx].size) {
ret = mmc_blk_alloc_part(card, md,
card->part[idx].part_cfg,
card->part[idx].size >> 9,
@@ -2212,7 +2440,7 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
}
}
- return ret;
+ return 0;
}
static void mmc_blk_remove_req(struct mmc_blk_data *md)
@@ -2249,7 +2477,15 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
{
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
+ struct mmc_rpmb_data *rpmb;
+ /* Remove RPMB partitions */
+ list_for_each_safe(pos, q, &md->rpmbs) {
+ rpmb = list_entry(pos, struct mmc_rpmb_data, node);
+ list_del(pos);
+ mmc_blk_remove_rpmb_part(rpmb);
+ }
+ /* Remove block partitions */
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
@@ -2568,6 +2804,17 @@ static int __init mmc_blk_init(void)
{
int res;
+ res = bus_register(&mmc_rpmb_bus_type);
+ if (res < 0) {
+ pr_err("mmcblk: could not register RPMB bus type\n");
+ return res;
+ }
+ res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
+ if (res < 0) {
+ pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
+ goto out_bus_unreg;
+ }
+
if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
pr_info("mmcblk: using %d minors per device\n", perdev_minors);
@@ -2575,16 +2822,20 @@ static int __init mmc_blk_init(void)
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
- goto out;
+ goto out_chrdev_unreg;
res = mmc_register_driver(&mmc_driver);
if (res)
- goto out2;
+ goto out_blkdev_unreg;
return 0;
- out2:
+
+out_blkdev_unreg:
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
- out:
+out_chrdev_unreg:
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
+out_bus_unreg:
+ bus_unregister(&mmc_rpmb_bus_type);
return res;
}
@@ -2592,6 +2843,7 @@ static void __exit mmc_blk_exit(void)
{
mmc_unregister_driver(&mmc_driver);
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
}
module_init(mmc_blk_init);
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 860ca7c8df86..5946636101ef 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MMC_CORE_BLOCK_H
#define _MMC_CORE_BLOCK_H
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 301246513a37..a4b49e25fe96 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -369,10 +369,17 @@ int mmc_add_card(struct mmc_card *card)
*/
void mmc_remove_card(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
+
#ifdef CONFIG_DEBUG_FS
mmc_remove_card_debugfs(card);
#endif
+ if (host->cqe_enabled) {
+ host->cqe_ops->cqe_disable(host);
+ host->cqe_enabled = false;
+ }
+
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
pr_info("%s: SPI card removed\n",
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 66c9cf49ad2f..1f0f44f4dd5f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -266,7 +266,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
host->ops->request(host, mrq);
}
-static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
+static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
+ bool cqe)
{
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
@@ -275,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
}
if (mrq->cmd) {
- pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
- mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
- mrq->cmd->flags);
+ pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), cqe ? "CQE direct " : "",
+ mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+ } else if (cqe) {
+ pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
+ mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
}
if (mrq->data) {
@@ -333,7 +337,7 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
-static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
if (mmc_card_removed(host->card))
return -ENOMEDIUM;
- mmc_mrq_pr_debug(host, mrq);
+ mmc_mrq_pr_debug(host, mrq, false);
WARN_ON(!host->claimed);
@@ -355,6 +359,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
+EXPORT_SYMBOL(mmc_start_request);
/*
* mmc_wait_data_done() - done callback for data request
@@ -482,6 +487,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_wait_for_req_done);
+/*
+ * mmc_cqe_start_req - Start a CQE request.
+ * @host: MMC host to start the request
+ * @mrq: request to start
+ *
+ * Start the request, re-tuning if needed and it is possible. Returns an error
+ * code if the request fails to start or -EBUSY if CQE is busy.
+ */
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ /*
+ * CQE cannot process re-tuning commands. Caller must hold retuning
+ * while CQE is in use. Re-tuning can happen here only when CQE has no
+ * active requests i.e. this is the first. Note, re-tuning will call
+ * ->cqe_off().
+ */
+ err = mmc_retune(host);
+ if (err)
+ goto out_err;
+
+ mrq->host = host;
+
+ mmc_mrq_pr_debug(host, mrq, true);
+
+ err = mmc_mrq_prep(host, mrq);
+ if (err)
+ goto out_err;
+
+ err = host->cqe_ops->cqe_request(host, mrq);
+ if (err)
+ goto out_err;
+
+ trace_mmc_request_start(host, mrq);
+
+ return 0;
+
+out_err:
+ if (mrq->cmd) {
+ pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, err);
+ } else {
+ pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
+ mmc_hostname(host), mrq->tag, err);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_start_req);
+
+/**
+ * mmc_cqe_request_done - CQE has finished processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which completed
+ *
+ * CQE drivers should call this function when they have completed
+ * their processing of a request.
+ */
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mmc_should_fail_request(host, mrq);
+
+ /* Flag re-tuning needed on CRC errors */
+ if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
+ (mrq->data && mrq->data->error == -EILSEQ))
+ mmc_retune_needed(host);
+
+ trace_mmc_request_done(host, mrq);
+
+ if (mrq->cmd) {
+ pr_debug("%s: CQE req done (direct CMD%u): %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
+ } else {
+ pr_debug("%s: CQE transfer done tag %d\n",
+ mmc_hostname(host), mrq->tag);
+ }
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ mrq->done(mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_request_done);
+
+/**
+ * mmc_cqe_post_req - CQE post process of a completed MMC request
+ * @host: MMC host
+ * @mrq: MMC request to be processed
+ */
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->cqe_ops->cqe_post_req)
+ host->cqe_ops->cqe_post_req(host, mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_post_req);
+
+/* Arbitrary 1 second timeout */
+#define MMC_CQE_RECOVERY_TIMEOUT 1000
+
+/*
+ * mmc_cqe_recovery - Recover from CQE errors.
+ * @host: MMC host to recover
+ *
+ * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
+ * in eMMC, and discarding the queue in CQE. CQE must call
+ * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
+ * fails to discard its queue.
+ */
+int mmc_cqe_recovery(struct mmc_host *host)
+{
+ struct mmc_command cmd;
+ int err;
+
+ mmc_retune_hold_now(host);
+
+ /*
+ * Recovery is expected seldom, if at all, but it reduces performance,
+ * so make sure it is not completely silent.
+ */
+ pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
+
+ host->cqe_ops->cqe_recovery_start(host);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_STOP_TRANSMISSION,
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ mmc_wait_for_cmd(host, &cmd, 0);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ cmd.arg = 1; /* Discard entire queue */
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
+ mmc_retune_release(host);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_recovery);
+
/**
* mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
* @host: MMC host
@@ -832,9 +986,36 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
}
EXPORT_SYMBOL(mmc_align_data_size);
+/*
+ * Allow claiming an already claimed host if the context is the same or there is
+ * no context but the task is the same.
+ */
+static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ return host->claimer == ctx ||
+ (!ctx && task && host->claimer->task == task);
+}
+
+static inline void mmc_ctx_set_claimer(struct mmc_host *host,
+ struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ if (!host->claimer) {
+ if (ctx)
+ host->claimer = ctx;
+ else
+ host->claimer = &host->default_ctx;
+ }
+ if (task)
+ host->claimer->task = task;
+}
+
/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
+ * @ctx: context that claims the host or NULL in which case the default
+ * context will be used
* @abort: whether or not the operation should be aborted
*
* Claim a host for a set of operations. If @abort is non null and
@@ -842,8 +1023,10 @@ EXPORT_SYMBOL(mmc_align_data_size);
* that non-zero value without acquiring the lock. Returns zero
* with the lock held otherwise.
*/
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort)
{
+ struct task_struct *task = ctx ? NULL : current;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int stop;
@@ -856,7 +1039,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
- if (stop || !host->claimed || host->claimer == current)
+ if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
@@ -865,7 +1048,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
set_current_state(TASK_RUNNING);
if (!stop) {
host->claimed = 1;
- host->claimer = current;
+ mmc_ctx_set_claimer(host, ctx, task);
host->claim_cnt += 1;
if (host->claim_cnt == 1)
pm = true;
@@ -900,6 +1083,7 @@ void mmc_release_host(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
} else {
host->claimed = 0;
+ host->claimer->task = NULL;
host->claimer = NULL;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
@@ -913,10 +1097,10 @@ EXPORT_SYMBOL(mmc_release_host);
* This is a helper function, which fetches a runtime pm reference for the
* card device and also claims the host.
*/
-void mmc_get_card(struct mmc_card *card)
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
pm_runtime_get_sync(&card->dev);
- mmc_claim_host(card->host);
+ __mmc_claim_host(card->host, ctx, NULL);
}
EXPORT_SYMBOL(mmc_get_card);
@@ -924,9 +1108,13 @@ EXPORT_SYMBOL(mmc_get_card);
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
*/
-void mmc_put_card(struct mmc_card *card)
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
- mmc_release_host(card->host);
+ struct mmc_host *host = card->host;
+
+ WARN_ON(ctx && host->claimer != ctx);
+
+ mmc_release_host(host);
pm_runtime_mark_last_busy(&card->dev);
pm_runtime_put_autosuspend(&card->dev);
}
@@ -1400,6 +1588,16 @@ EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
#endif /* CONFIG_REGULATOR */
+/**
+ * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. errno should be handled, it is either a critical error
+ * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
+ * regulators have been found because they all are optional. If you require
+ * certain regulators, you need to check separately in your driver if they got
+ * populated after calling this function.
+ */
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
@@ -1484,11 +1682,33 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
}
+int mmc_host_set_uhs_voltage(struct mmc_host *host)
+{
+ u32 clock;
+
+ /*
+ * During a signal voltage level switch, the clock must be gated
+ * for 5 ms according to the SD spec
+ */
+ clock = host->ios.clock;
+ host->ios.clock = 0;
+ mmc_set_ios(host);
+
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
+ return -EAGAIN;
+
+ /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
+ mmc_delay(10);
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+
+ return 0;
+}
+
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
{
struct mmc_command cmd = {};
int err = 0;
- u32 clock;
/*
* If we cannot switch voltages, return failure so the caller
@@ -1520,15 +1740,8 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
err = -EAGAIN;
goto power_cycle;
}
- /*
- * During a signal voltage level switch, the clock must be gated
- * for 5 ms according to the SD spec
- */
- clock = host->ios.clock;
- host->ios.clock = 0;
- mmc_set_ios(host);
- if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
+ if (mmc_host_set_uhs_voltage(host)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
@@ -1537,11 +1750,6 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
goto power_cycle;
}
- /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
- mmc_delay(10);
- host->ios.clock = clock;
- mmc_set_ios(host);
-
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ca861091a776..71e6c6d7ceb7 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -49,6 +49,7 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr);
+int mmc_host_set_uhs_voltage(struct mmc_host *host);
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
@@ -107,6 +108,8 @@ static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
+
struct mmc_async_req;
struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
@@ -128,10 +131,11 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write);
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort);
void mmc_release_host(struct mmc_host *host);
-void mmc_get_card(struct mmc_card *card);
-void mmc_put_card(struct mmc_card *card);
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
/**
* mmc_claim_host - exclusively claim a host
@@ -141,7 +145,11 @@ void mmc_put_card(struct mmc_card *card);
*/
static inline void mmc_claim_host(struct mmc_host *host)
{
- __mmc_claim_host(host, NULL);
+ __mmc_claim_host(host, NULL, NULL);
}
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_cqe_recovery(struct mmc_host *host);
+
#endif
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ad88deb2e8f3..64b03d6eaf18 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -111,12 +111,6 @@ void mmc_retune_hold(struct mmc_host *host)
host->hold_retune += 1;
}
-void mmc_retune_hold_now(struct mmc_host *host)
-{
- host->retune_now = 0;
- host->hold_retune += 1;
-}
-
void mmc_retune_release(struct mmc_host *host)
{
if (host->hold_retune)
@@ -124,6 +118,7 @@ void mmc_retune_release(struct mmc_host *host)
else
WARN_ON(1);
}
+EXPORT_SYMBOL(mmc_retune_release);
int mmc_retune(struct mmc_host *host)
{
@@ -165,9 +160,9 @@ out:
return err;
}
-static void mmc_retune_timer(unsigned long data)
+static void mmc_retune_timer(struct timer_list *t)
{
- struct mmc_host *host = (struct mmc_host *)data;
+ struct mmc_host *host = from_timer(host, t, retune_timer);
mmc_retune_needed(host);
}
@@ -184,7 +179,7 @@ static void mmc_retune_timer(unsigned long data)
int mmc_of_parse(struct mmc_host *host)
{
struct device *dev = host->parent;
- u32 bus_width;
+ u32 bus_width, drv_type;
int ret;
bool cd_cap_invert, cd_gpio_invert = false;
bool ro_cap_invert, ro_gpio_invert = false;
@@ -326,6 +321,15 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
+ /* Must be after "non-removable" check */
+ if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
+ if (host->caps & MMC_CAP_NONREMOVABLE)
+ host->fixed_drv_type = drv_type;
+ else
+ dev_err(host->parent,
+ "can't use fixed driver type, media is removable\n");
+ }
+
host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) {
dev_err(host->parent,
@@ -385,7 +389,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
- setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
+ timer_setup(&host->retune_timer, mmc_retune_timer, 0);
/*
* By default, hosts do not support SGIO or large requests.
@@ -398,6 +402,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_SIZE / 512;
+ host->fixed_drv_type = -EINVAL;
+
return host;
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 77d6f60d1bf9..fb689a1065ed 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -19,12 +19,17 @@ void mmc_unregister_host_class(void);
void mmc_retune_enable(struct mmc_host *host);
void mmc_retune_disable(struct mmc_host *host);
void mmc_retune_hold(struct mmc_host *host);
-void mmc_retune_hold_now(struct mmc_host *host);
void mmc_retune_release(struct mmc_host *host);
int mmc_retune(struct mmc_host *host);
void mmc_retune_pause(struct mmc_host *host);
void mmc_retune_unpause(struct mmc_host *host);
+static inline void mmc_retune_hold_now(struct mmc_host *host)
+{
+ host->retune_now = 0;
+ host->hold_retune += 1;
+}
+
static inline void mmc_retune_recheck(struct mmc_host *host)
{
if (host->hold_retune <= 1)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 36217ad5e9b1..a552f61060d2 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -780,6 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
card->ext_csd.device_life_time_est_typ_a,
@@ -838,6 +839,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_prv.attr,
+ &dev_attr_rev.attr,
&dev_attr_pre_eol_info.attr,
&dev_attr_life_time.attr,
&dev_attr_serial.attr,
@@ -1289,13 +1291,18 @@ out_err:
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
+ int fixed_drv_type = card->host->fixed_drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
mmc_driver_type_mask(0);
- drive_strength = mmc_select_drive_strength(card,
- card->ext_csd.hs200_max_dtr,
- card_drv_type, &drv_type);
+ if (fixed_drv_type >= 0)
+ drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
+ ? fixed_drv_type : 0;
+ else
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
card->drive_strength = drive_strength;
@@ -1786,12 +1793,41 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Enable Command Queue if supported. Note that Packed Commands cannot
+ * be used with Command Queue.
+ */
+ card->ext_csd.cmdq_en = false;
+ if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
+ err = mmc_cmdq_enable(card);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling CMDQ failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ err = 0;
+ }
+ }
+ /*
* In some cases (e.g. RPMB or mmc_test), the Command Queue must be
* disabled for a time, so a flag is needed to indicate to re-enable the
* Command Queue.
*/
card->reenable_cmdq = card->ext_csd.cmdq_en;
+ if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
+ err = host->cqe_ops->cqe_enable(host, card);
+ if (err) {
+ pr_err("%s: Failed to enable CQE, error %d\n",
+ mmc_hostname(host), err);
+ } else {
+ host->cqe_enabled = true;
+ pr_info("%s: Command Queue Engine enabled\n",
+ mmc_hostname(host));
+ }
+ }
+
if (!oldcard)
host->card = card;
@@ -1911,14 +1947,14 @@ static void mmc_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_remove(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 54686ca4bfb7..908e4db03535 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -977,7 +977,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
from_exception)
return;
- mmc_claim_host(card->host);
if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
timeout = MMC_OPS_TIMEOUT_MS;
use_busy_signal = true;
@@ -995,7 +994,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
mmc_retune_release(card->host);
- goto out;
+ return;
}
/*
@@ -1007,9 +1006,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
mmc_card_set_doing_bkops(card);
else
mmc_retune_release(card->host);
-out:
- mmc_release_host(card->host);
}
+EXPORT_SYMBOL(mmc_start_bkops);
/*
* Flush the cache to the non-volatile storage.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 0a4e77a5ba33..4f33d277b125 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -30,7 +30,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
- if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
+ if (mq && mmc_card_removed(mq->card))
return BLKPREP_KILL;
req->rq_flags |= RQF_DONTPREP;
@@ -177,6 +177,29 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u64 limit = BLK_BOUNCE_HIGH;
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ /* Initialize thread_sem even if it is not used */
+ sema_init(&mq->thread_sem, 1);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -190,12 +213,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
- u64 limit = BLK_BOUNCE_HIGH;
int ret = -ENOMEM;
- if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
-
mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
@@ -214,18 +233,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
}
blk_queue_prep_rq(mq->queue, mmc_prep_request);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
- if (mmc_can_erase(card))
- mmc_queue_setup_discard(mq->queue, card);
- blk_queue_bounce_limit(mq->queue, limit);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
-
- sema_init(&mq->thread_sem, 1);
+ mmc_setup_queue(mq, card);
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
host->index, subname ? subname : "");
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index f18d3f656baa..547b457c4251 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
@@ -35,12 +36,14 @@ struct mmc_blk_request {
/**
* enum mmc_drv_op - enumerates the operations in the mmc_queue_req
* @MMC_DRV_OP_IOCTL: ioctl operation
+ * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation
* @MMC_DRV_OP_BOOT_WP: write protect boot partitions
* @MMC_DRV_OP_GET_CARD_STATUS: get card status
* @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
*/
enum mmc_drv_op {
MMC_DRV_OP_IOCTL,
+ MMC_DRV_OP_IOCTL_RPMB,
MMC_DRV_OP_BOOT_WP,
MMC_DRV_OP_GET_CARD_STATUS,
MMC_DRV_OP_GET_EXT_CSD,
@@ -81,6 +84,4 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
-extern int mmc_access_rpmb(struct mmc_queue *);
-
#endif
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index fb725934fa21..f664e9cbc9f8 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file contains work-arounds for many known SD/MMC
* and SDIO hardware bugs.
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4fd1620b732d..45bf78f32716 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -908,6 +908,18 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)
return max_dtr;
}
+static bool mmc_sd_card_using_v18(struct mmc_card *card)
+{
+ /*
+ * According to the SD spec., the Bus Speed Mode (function group 1) bits
+ * 2 to 4 are zero if the card is initialized at 3.3V signal level. Thus
+ * they can be used to determine if the card has already switched to
+ * 1.8V signaling.
+ */
+ return card->sw_caps.sd3_bus_mode &
+ (SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -921,9 +933,10 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
int err;
u32 cid[4];
u32 rocr = 0;
+ bool v18_fixup_failed = false;
WARN_ON(!host->claimed);
-
+retry:
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
if (err)
return err;
@@ -989,6 +1002,36 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto free_card;
+ /*
+ * If the card has not been power cycled, it may still be using 1.8V
+ * signaling. Detect that situation and try to initialize a UHS-I (1.8V)
+ * transfer mode.
+ */
+ if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
+ mmc_sd_card_using_v18(card) &&
+ host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
+ /*
+ * Re-read switch information in case it has changed since
+ * oldcard was initialized.
+ */
+ if (oldcard) {
+ err = mmc_read_switch(card);
+ if (err)
+ goto free_card;
+ }
+ if (mmc_sd_card_using_v18(card)) {
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
+ }
+ goto done;
+ }
+ }
+
/* Initialization sequence for UHS-I cards */
if (rocr & SD_ROCR_S18A) {
err = mmc_sd_init_uhs_card(card);
@@ -1021,7 +1064,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+done:
host->card = card;
return 0;
@@ -1056,14 +1099,14 @@ static void mmc_sd_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_sd_remove(host);
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 1ada9808c329..497c026a5c5a 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MMC_CORE_SD_H
#define _MMC_CORE_SD_H
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index c771843e4c15..7a2eaf8410a3 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -155,7 +155,8 @@ static int sdio_irq_thread(void *_host)
* holding of the host lock does not cover too much work
* that doesn't require that lock to be held.
*/
- ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
+ ret = __mmc_claim_host(host, NULL,
+ &host->sdio_irq_thread_abort);
if (ret)
break;
ret = process_sdio_pending_irqs(host);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8c15637178ff..567028c9219a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -352,6 +352,19 @@ config MMC_MESON_GX
If you have a controller with this interface, say Y here.
+config MMC_MESON_MX_SDIO
+ tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on HAS_DMA
+ depends on OF
+ help
+ This selects support for the SD/MMC Host Controller on
+ Amlogic Meson6, Meson8 and Meson8b SoCs.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
config MMC_MOXART
tristate "MOXART SD/MMC Host Controller support"
depends on ARCH_MOXART && MMC
@@ -429,6 +442,7 @@ config MMC_SDHCI_MSM
tristate "Qualcomm SDHCI Controller Support"
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -663,7 +677,7 @@ config MMC_CAVIUM_OCTEON
config MMC_CAVIUM_THUNDERX
tristate "Cavium ThunderX SD/MMC Card Interface support"
depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- depends on GPIOLIB
+ depends on GPIO_THUNDERX
depends on OF_ADDRESS
help
This selects Cavium ThunderX SD/MMC Card Interface.
@@ -899,3 +913,15 @@ config MMC_SDHCI_XENON
This selects Marvell Xenon eMMC/SD/SDIO SDHCI.
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config MMC_SDHCI_OMAP
+ tristate "TI SDHCI Controller Support"
+ depends on MMC_SDHCI_PLTFM && OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ support present in TI's DRA7 SOCs. The controller supports
+ SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 303f5cd46cd9..a43cf0d5a5d3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for MMC/SD host controller drivers
#
@@ -64,6 +65,7 @@ obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o
+obj-$(CONFIG_MMC_MESON_MX_SDIO) += meson-mx-sdio.o
obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
@@ -89,6 +91,7 @@ obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
+obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 0a0ebf3a096d..e55f3932d580 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -732,11 +732,11 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
return 0;
}
-static void atmci_timeout_timer(unsigned long data)
+static void atmci_timeout_timer(struct timer_list *t)
{
struct atmel_mci *host;
- host = (struct atmel_mci *)data;
+ host = from_timer(host, t, timer);
dev_dbg(&host->pdev->dev, "software timeout\n");
@@ -1661,9 +1661,9 @@ static void atmci_command_complete(struct atmel_mci *host,
cmd->error = 0;
}
-static void atmci_detect_change(unsigned long data)
+static void atmci_detect_change(struct timer_list *t)
{
- struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
+ struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer);
bool present;
bool present_old;
@@ -2349,8 +2349,7 @@ static int atmci_init_slot(struct atmel_mci *host,
if (gpio_is_valid(slot->detect_pin)) {
int ret;
- setup_timer(&slot->detect_timer, atmci_detect_change,
- (unsigned long)slot);
+ timer_setup(&slot->detect_timer, atmci_detect_change, 0);
ret = request_irq(gpio_to_irq(slot->detect_pin),
atmci_detect_interrupt,
@@ -2563,7 +2562,7 @@ static int atmci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, atmci_timeout_timer, 0);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index fbd29f00fca0..ed5cefb83768 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -967,7 +967,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/*
* Legacy Octeon firmware has no regulator entry, fall-back to
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 64cda84b2302..73fd75c3c824 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -75,7 +75,7 @@ struct hs_timing {
u32 smpl_phase_min;
};
-struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
+static struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
{ /* reserved */ },
{ /* SD */
{7, 0, 15, 15,}, /* 0: LEGACY 400k */
diff --git a/drivers/mmc/host/dw_mmc-zx.h b/drivers/mmc/host/dw_mmc-zx.h
index f369997a39ec..09ac52766f14 100644
--- a/drivers/mmc/host/dw_mmc-zx.h
+++ b/drivers/mmc/host/dw_mmc-zx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DW_MMC_ZX_H_
#define _DW_MMC_ZX_H_
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 860313bd952a..0aa39975f33b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -401,16 +401,37 @@ static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
static inline void dw_mci_set_cto(struct dw_mci *host)
{
unsigned int cto_clks;
+ unsigned int cto_div;
unsigned int cto_ms;
+ unsigned long irqflags;
cto_clks = mci_readl(host, TMOUT) & 0xff;
- cto_ms = DIV_ROUND_UP(cto_clks, host->bus_hz / 1000);
+ cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
+ if (cto_div == 0)
+ cto_div = 1;
+ cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz);
/* add a bit spare time */
cto_ms += 10;
- mod_timer(&host->cto_timer,
- jiffies + msecs_to_jiffies(cto_ms) + 1);
+ /*
+ * The durations we're working with are fairly short so we have to be
+ * extra careful about synchronization here. Specifically in hardware a
+ * command timeout is _at most_ 5.1 ms, so that means we expect an
+ * interrupt (either command done or timeout) to come rather quickly
+ * after the mci_writel. ...but just in case we have a long interrupt
+ * latency let's add a bit of paranoia.
+ *
+ * In general we'll assume that at least an interrupt will be asserted
+ * in hardware by the time the cto_timer runs. ...and if it hasn't
+ * been asserted in hardware by that time then we'll assume it'll never
+ * come.
+ */
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
+ mod_timer(&host->cto_timer,
+ jiffies + msecs_to_jiffies(cto_ms) + 1);
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
static void dw_mci_start_command(struct dw_mci *host,
@@ -425,11 +446,11 @@ static void dw_mci_start_command(struct dw_mci *host,
wmb(); /* drain writebuffer */
dw_mci_wait_while_busy(host, cmd_flags);
+ mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
+
/* response expected command only */
if (cmd_flags & SDMMC_CMD_RESP_EXP)
dw_mci_set_cto(host);
-
- mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
}
static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
@@ -796,7 +817,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
struct dma_slave_config cfg;
struct dma_async_tx_descriptor *desc = NULL;
struct scatterlist *sgl = host->data->sg;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 sg_elems = host->data->sg_len;
u32 fifoth_val;
u32 fifo_offset = host->fifo_reg - host->regs;
@@ -1003,7 +1024,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 fifo_width = 1 << host->data_shift;
u32 blksz_depth = blksz / fifo_width, fifoth_val;
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
@@ -1915,15 +1936,55 @@ static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
static void dw_mci_set_drto(struct dw_mci *host)
{
unsigned int drto_clks;
+ unsigned int drto_div;
unsigned int drto_ms;
+ unsigned long irqflags;
drto_clks = mci_readl(host, TMOUT) >> 8;
- drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000);
+ drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
+ if (drto_div == 0)
+ drto_div = 1;
+ drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div,
+ host->bus_hz);
/* add a bit spare time */
drto_ms += 10;
- mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ mod_timer(&host->dto_timer,
+ jiffies + msecs_to_jiffies(drto_ms));
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+}
+
+static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
+{
+ if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
+ return false;
+
+ /*
+ * Really be certain that the timer has stopped. This is a bit of
+ * paranoia and could only really happen if we had really bad
+ * interrupt latency and the interrupt routine and timeout were
+ * running concurrently so that the del_timer() in the interrupt
+ * handler couldn't run.
+ */
+ WARN_ON(del_timer_sync(&host->cto_timer));
+ clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+
+ return true;
+}
+
+static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
+{
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ return false;
+
+ /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
+ WARN_ON(del_timer_sync(&host->dto_timer));
+ clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+
+ return true;
}
static void dw_mci_tasklet_func(unsigned long priv)
@@ -1952,8 +2013,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
case STATE_SENDING_CMD11:
case STATE_SENDING_CMD:
- if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
- &host->pending_events))
+ if (!dw_mci_clear_pending_cmd_complete(host))
break;
cmd = host->cmd;
@@ -2068,8 +2128,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* fall through */
case STATE_DATA_BUSY:
- if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
- &host->pending_events)) {
+ if (!dw_mci_clear_pending_data_complete(host)) {
/*
* If data error interrupt comes but data over
* interrupt doesn't come within the given time.
@@ -2122,8 +2181,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* fall through */
case STATE_SENDING_STOP:
- if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
- &host->pending_events))
+ if (!dw_mci_clear_pending_cmd_complete(host))
break;
/* CMD error in data command */
@@ -2570,6 +2628,8 @@ done:
static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
{
+ del_timer(&host->cto_timer);
+
if (!host->cmd_status)
host->cmd_status = status;
@@ -2594,6 +2654,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
struct dw_mci *host = dev_id;
u32 pending;
struct dw_mci_slot *slot = host->slot;
+ unsigned long irqflags;
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
@@ -2601,8 +2662,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
/* Check volt switch first, since it can look like an error */
if ((host->state == STATE_SENDING_CMD11) &&
(pending & SDMMC_INT_VOLT_SWITCH)) {
- unsigned long irqflags;
-
mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
pending &= ~SDMMC_INT_VOLT_SWITCH;
@@ -2618,11 +2677,15 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
del_timer(&host->cto_timer);
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
@@ -2635,6 +2698,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_DATA_OVER) {
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
@@ -2647,6 +2712,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & SDMMC_INT_RXDR) {
@@ -2662,9 +2729,12 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_CMD_DONE) {
- del_timer(&host->cto_timer);
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
dw_mci_cmd_interrupt(host, pending);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & SDMMC_INT_CD) {
@@ -2741,7 +2811,7 @@ static int dw_mci_init_slot(struct dw_mci *host)
/*if there are external regulators, get them*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto err_host_allocated;
if (!mmc->ocr_avail)
@@ -2921,9 +2991,9 @@ no_dma:
host->use_dma = TRANS_MODE_PIO;
}
-static void dw_mci_cmd11_timer(unsigned long arg)
+static void dw_mci_cmd11_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cmd11_timer);
if (host->state != STATE_SENDING_CMD11) {
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
@@ -2935,10 +3005,38 @@ static void dw_mci_cmd11_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
}
-static void dw_mci_cto_timer(unsigned long arg)
+static void dw_mci_cto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cto_timer);
+ unsigned long irqflags;
+ u32 pending;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /*
+ * If somehow we have very bad interrupt latency it's remotely possible
+ * that the timer could fire while the interrupt is still pending or
+ * while the interrupt is midway through running. Let's be paranoid
+ * and detect those two cases. Note that this is paranoia is somewhat
+ * justified because in this function we don't actually cancel the
+ * pending command in the controller--we just assume it will never come.
+ */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
+ /* The interrupt should fire; no need to act but we can warn */
+ dev_warn(host->dev, "Unexpected interrupt latency\n");
+ goto exit;
+ }
+ if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
+ /* Presumably interrupt handler couldn't delete the timer */
+ dev_warn(host->dev, "CTO timeout when already completed\n");
+ goto exit;
+ }
+ /*
+ * Continued paranoia to make sure we're in the state we expect.
+ * This paranoia isn't really justified but it seems good to be safe.
+ */
switch (host->state) {
case STATE_SENDING_CMD11:
case STATE_SENDING_CMD:
@@ -2957,12 +3055,39 @@ static void dw_mci_cto_timer(unsigned long arg)
host->state);
break;
}
+
+exit:
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
-static void dw_mci_dto_timer(unsigned long arg)
+static void dw_mci_dto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, dto_timer);
+ unsigned long irqflags;
+ u32 pending;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /*
+ * The DTO timer is much longer than the CTO timer, so it's even less
+ * likely that we'll these cases, but it pays to be paranoid.
+ */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ if (pending & SDMMC_INT_DATA_OVER) {
+ /* The interrupt should fire; no need to act but we can warn */
+ dev_warn(host->dev, "Unexpected data interrupt latency\n");
+ goto exit;
+ }
+ if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
+ /* Presumably interrupt handler couldn't delete the timer */
+ dev_warn(host->dev, "DTO timeout when already completed\n");
+ goto exit;
+ }
+ /*
+ * Continued paranoia to make sure we're in the state we expect.
+ * This paranoia isn't really justified but it seems good to be safe.
+ */
switch (host->state) {
case STATE_SENDING_DATA:
case STATE_DATA_BUSY:
@@ -2977,8 +3102,13 @@ static void dw_mci_dto_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
break;
default:
+ dev_warn(host->dev, "Unexpected data timeout, state %d\n",
+ host->state);
break;
}
+
+exit:
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
#ifdef CONFIG_OF
@@ -3127,14 +3257,9 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- setup_timer(&host->cmd11_timer,
- dw_mci_cmd11_timer, (unsigned long)host);
-
- setup_timer(&host->cto_timer,
- dw_mci_cto_timer, (unsigned long)host);
-
- setup_timer(&host->dto_timer,
- dw_mci_dto_timer, (unsigned long)host);
+ timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
+ timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
+ timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 34474ad731aa..e3124f06a47e 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -74,7 +74,8 @@ struct dw_mci_dma_slave {
* @stop_abort: The command currently prepared for stoping transfer.
* @prev_blksz: The former transfer blksz record.
* @timing: Record of current ios timing.
- * @use_dma: Whether DMA channel is initialized or not.
+ * @use_dma: Which DMA channel is in use for the current transfer, zero
+ * denotes PIO mode.
* @using_dma: Whether DMA is in use for the current transfer.
* @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
* @sg_dma: Bus address of DMA buffer.
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 7db8c7a8d38d..712e08d9a45e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -586,9 +586,9 @@ poll_timeout:
return true;
}
-static void jz4740_mmc_timeout(unsigned long data)
+static void jz4740_mmc_timeout(struct timer_list *t)
{
- struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data;
+ struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
if (!test_and_clear_bit(0, &host->waiting))
return;
@@ -1036,8 +1036,7 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
jz4740_mmc_reset(host);
jz4740_mmc_clock_disable(host);
- setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
- (unsigned long)host);
+ timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
host->use_dma = true;
if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 85745ef179e2..e0862d3f65b3 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1190,7 +1190,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* Get regulators and the supported OCR mask */
host->vqmmc_enabled = false;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto free_host;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
new file mode 100644
index 000000000000..09cb89645d06
--- /dev/null
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -0,0 +1,768 @@
+/*
+ * meson-mx-sdio.c - Meson6, Meson8 and Meson8b SDIO/MMC Host Controller
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#define MESON_MX_SDIO_ARGU 0x00
+
+#define MESON_MX_SDIO_SEND 0x04
+ #define MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK GENMASK(7, 0)
+ #define MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK GENMASK(15, 8)
+ #define MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7 BIT(16)
+ #define MESON_MX_SDIO_SEND_RESP_HAS_DATA BIT(17)
+ #define MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8 BIT(18)
+ #define MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY BIT(19)
+ #define MESON_MX_SDIO_SEND_DATA BIT(20)
+ #define MESON_MX_SDIO_SEND_USE_INT_WINDOW BIT(21)
+ #define MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK GENMASK(31, 24)
+
+#define MESON_MX_SDIO_CONF 0x08
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT 0
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH 10
+ #define MESON_MX_SDIO_CONF_CMD_DISABLE_CRC BIT(10)
+ #define MESON_MX_SDIO_CONF_CMD_OUT_AT_POSITIVE_EDGE BIT(11)
+ #define MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK GENMASK(17, 12)
+ #define MESON_MX_SDIO_CONF_RESP_LATCH_AT_NEGATIVE_EDGE BIT(18)
+ #define MESON_MX_SDIO_CONF_DATA_LATCH_AT_NEGATIVE_EDGE BIT(19)
+ #define MESON_MX_SDIO_CONF_BUS_WIDTH BIT(20)
+ #define MESON_MX_SDIO_CONF_M_ENDIAN_MASK GENMASK(22, 21)
+ #define MESON_MX_SDIO_CONF_WRITE_NWR_MASK GENMASK(28, 23)
+ #define MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK GENMASK(31, 29)
+
+#define MESON_MX_SDIO_IRQS 0x0c
+ #define MESON_MX_SDIO_IRQS_STATUS_STATE_MACHINE_MASK GENMASK(3, 0)
+ #define MESON_MX_SDIO_IRQS_CMD_BUSY BIT(4)
+ #define MESON_MX_SDIO_IRQS_RESP_CRC7_OK BIT(5)
+ #define MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK BIT(6)
+ #define MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK BIT(7)
+ #define MESON_MX_SDIO_IRQS_IF_INT BIT(8)
+ #define MESON_MX_SDIO_IRQS_CMD_INT BIT(9)
+ #define MESON_MX_SDIO_IRQS_STATUS_INFO_MASK GENMASK(15, 12)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_INT BIT(16)
+ #define MESON_MX_SDIO_IRQS_AMRISC_TIMING_OUT_INT_EN BIT(17)
+ #define MESON_MX_SDIO_IRQS_ARC_TIMING_OUT_INT_EN BIT(18)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_COUNT_MASK GENMASK(31, 19)
+
+#define MESON_MX_SDIO_IRQC 0x10
+ #define MESON_MX_SDIO_IRQC_ARC_IF_INT_EN BIT(3)
+ #define MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN BIT(4)
+ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
+ #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
+ #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
+ #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
+
+#define MESON_MX_SDIO_MULT 0x14
+ #define MESON_MX_SDIO_MULT_PORT_SEL_MASK GENMASK(1, 0)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_ENABLE BIT(2)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_SCLK_ALWAYS BIT(3)
+ #define MESON_MX_SDIO_MULT_STREAM_ENABLE BIT(4)
+ #define MESON_MX_SDIO_MULT_STREAM_8BITS_MODE BIT(5)
+ #define MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX BIT(8)
+ #define MESON_MX_SDIO_MULT_DAT0_DAT1_SWAPPED BIT(10)
+ #define MESON_MX_SDIO_MULT_DAT1_DAT0_SWAPPED BIT(11)
+ #define MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK GENMASK(15, 12)
+
+#define MESON_MX_SDIO_ADDR 0x18
+
+#define MESON_MX_SDIO_EXT 0x1c
+ #define MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK GENMASK(29, 16)
+
+#define MESON_MX_SDIO_BOUNCE_REQ_SIZE (128 * 1024)
+#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1)
+#define MESON_MX_SDIO_MAX_SLOTS 3
+
+struct meson_mx_mmc_host {
+ struct device *controller_dev;
+
+ struct clk *parent_clk;
+ struct clk *core_clk;
+ struct clk_divider cfg_div;
+ struct clk *cfg_div_clk;
+ struct clk_fixed_factor fixed_factor;
+ struct clk *fixed_factor_clk;
+
+ void __iomem *base;
+ int irq;
+ spinlock_t irq_lock;
+
+ struct timer_list cmd_timeout;
+
+ unsigned int slot_id;
+ struct mmc_host *mmc;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ int error;
+};
+
+static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask,
+ u32 val)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 regval;
+
+ regval = readl(host->base + reg);
+ regval &= ~mask;
+ regval |= (val & mask);
+
+ writel(regval, host->base + reg);
+}
+
+static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host)
+{
+ writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC);
+ udelay(2);
+}
+
+static struct mmc_command *meson_mx_mmc_get_next_cmd(struct mmc_command *cmd)
+{
+ if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
+ return cmd->mrq->cmd;
+ else if (mmc_op_multi(cmd->opcode) &&
+ (!cmd->mrq->sbc || cmd->error || cmd->data->error))
+ return cmd->mrq->stop;
+ else
+ return NULL;
+}
+
+static void meson_mx_mmc_start_cmd(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned int pack_size;
+ unsigned long irqflags, timeout;
+ u32 mult, send = 0, ext = 0;
+
+ host->cmd = cmd;
+
+ if (cmd->busy_timeout)
+ timeout = msecs_to_jiffies(cmd->busy_timeout);
+ else
+ timeout = msecs_to_jiffies(1000);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ case MMC_RSP_R3:
+ /* 7 (CMD) + 32 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 45);
+ break;
+ case MMC_RSP_R2:
+ /* 7 (CMD) + 120 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 133);
+ send |= MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8;
+ break;
+ default:
+ break;
+ }
+
+ if (!(cmd->flags & MMC_RSP_CRC))
+ send |= MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ send |= MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY;
+
+ if (cmd->data) {
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ (cmd->data->blocks - 1));
+
+ pack_size = cmd->data->blksz * BITS_PER_BYTE;
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 4;
+ else
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 1;
+
+ ext |= FIELD_PREP(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ pack_size);
+
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ send |= MESON_MX_SDIO_SEND_DATA;
+ else
+ send |= MESON_MX_SDIO_SEND_RESP_HAS_DATA;
+
+ cmd->data->bytes_xfered = 0;
+ }
+
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK,
+ (0x40 | cmd->opcode));
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id);
+ mult |= BIT(31);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ /* enable the CMD done interrupt */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN);
+
+ /* clear pending interrupts */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS,
+ MESON_MX_SDIO_IRQS_CMD_INT,
+ MESON_MX_SDIO_IRQS_CMD_INT);
+
+ writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU);
+ writel(ext, host->base + MESON_MX_SDIO_EXT);
+ writel(send, host->base + MESON_MX_SDIO_SEND);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ mod_timer(&host->cmd_timeout, jiffies + timeout);
+}
+
+static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
+{
+ struct mmc_request *mrq;
+
+ mrq = host->mrq;
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned short vdd = ios->vdd;
+ unsigned long clk_rate = ios->clock;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH, 0);
+ break;
+
+ case MMC_BUS_WIDTH_4:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH,
+ MESON_MX_SDIO_CONF_BUS_WIDTH);
+ break;
+
+ case MMC_BUS_WIDTH_8:
+ default:
+ dev_err(mmc_dev(mmc), "unsupported bus width: %d\n",
+ ios->bus_width);
+ host->error = -EINVAL;
+ return;
+ }
+
+ host->error = clk_set_rate(host->cfg_div_clk, ios->clock);
+ if (host->error) {
+ dev_warn(mmc_dev(mmc),
+ "failed to set MMC clock to %lu: %d\n",
+ clk_rate, host->error);
+ return;
+ }
+
+ mmc->actual_clock = clk_get_rate(host->cfg_div_clk);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ vdd = 0;
+ /* fall-through: */
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ host->error = mmc_regulator_set_ocr(mmc,
+ mmc->supply.vmmc,
+ vdd);
+ if (host->error)
+ return;
+ }
+ break;
+ }
+}
+
+static int meson_mx_mmc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ int dma_len;
+ struct scatterlist *sg;
+
+ if (!data)
+ return 0;
+
+ sg = data->sg;
+ if (sg->offset & 3 || sg->length & 3) {
+ dev_err(mmc_dev(mmc),
+ "unaligned scatterlist: offset %x length %d\n",
+ sg->offset, sg->length);
+ return -EINVAL;
+ }
+
+ dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (dma_len <= 0) {
+ dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+
+ if (!host->error)
+ host->error = meson_mx_mmc_map_dma(mmc, mrq);
+
+ if (host->error) {
+ cmd->error = host->error;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ host->mrq = mrq;
+
+ if (mrq->data)
+ writel(sg_dma_address(mrq->data->sg),
+ host->base + MESON_MX_SDIO_ADDR);
+
+ if (mrq->sbc)
+ meson_mx_mmc_start_cmd(mmc, mrq->sbc);
+ else
+ meson_mx_mmc_start_cmd(mmc, mrq->cmd);
+}
+
+static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+
+ return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
+}
+
+static void meson_mx_mmc_read_response(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 mult;
+ int i, resp[4];
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX;
+ mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ if (cmd->flags & MMC_RSP_136) {
+ for (i = 0; i <= 3; i++)
+ resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU);
+ cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff);
+ cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff);
+ cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff);
+ cmd->resp[3] = (resp[3] << 8);
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU);
+ }
+}
+
+static irqreturn_t meson_mx_mmc_process_cmd_irq(struct meson_mx_mmc_host *host,
+ u32 irqs, u32 send)
+{
+ struct mmc_command *cmd = host->cmd;
+
+ /*
+ * NOTE: even though it shouldn't happen we sometimes get command
+ * interrupts twice (at least this is what it looks like). Ideally
+ * we find out why this happens and warn here as soon as it occurs.
+ */
+ if (!cmd)
+ return IRQ_HANDLED;
+
+ cmd->error = 0;
+ meson_mx_mmc_read_response(host->mmc, cmd);
+
+ if (cmd->data) {
+ if (!((irqs & MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK) ||
+ (irqs & MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK)))
+ cmd->error = -EILSEQ;
+ } else {
+ if (!((irqs & MESON_MX_SDIO_IRQS_RESP_CRC7_OK) ||
+ (send & MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7)))
+ cmd->error = -EILSEQ;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t meson_mx_mmc_irq(int irq, void *data)
+{
+ struct meson_mx_mmc_host *host = (void *) data;
+ u32 irqs, send;
+ unsigned long irqflags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ irqs = readl(host->base + MESON_MX_SDIO_IRQS);
+ send = readl(host->base + MESON_MX_SDIO_SEND);
+
+ if (irqs & MESON_MX_SDIO_IRQS_CMD_INT)
+ ret = meson_mx_mmc_process_cmd_irq(host, irqs, send);
+ else
+ ret = IRQ_HANDLED;
+
+ /* finally ACK all pending interrupts */
+ writel(irqs, host->base + MESON_MX_SDIO_IRQS);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ return ret;
+}
+
+static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data)
+{
+ struct meson_mx_mmc_host *host = (void *) irq_data;
+ struct mmc_command *cmd = host->cmd, *next_cmd;
+
+ if (WARN_ON(!cmd))
+ return IRQ_HANDLED;
+
+ del_timer_sync(&host->cmd_timeout);
+
+ if (cmd->data) {
+ dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg,
+ cmd->data->sg_len,
+ mmc_get_dma_dir(cmd->data));
+
+ cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks;
+ }
+
+ next_cmd = meson_mx_mmc_get_next_cmd(cmd);
+ if (next_cmd)
+ meson_mx_mmc_start_cmd(host->mmc, next_cmd);
+ else
+ meson_mx_mmc_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static void meson_mx_mmc_timeout(struct timer_list *t)
+{
+ struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout);
+ unsigned long irqflags;
+ u32 irqc;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /* disable the CMD interrupt */
+ irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+ irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN;
+ writel(irqc, host->base + MESON_MX_SDIO_IRQC);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ /*
+ * skip the timeout handling if the interrupt handler already processed
+ * the command.
+ */
+ if (!host->cmd)
+ return;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n",
+ host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS),
+ readl(host->base + MESON_MX_SDIO_ARGU));
+
+ host->cmd->error = -ETIMEDOUT;
+
+ meson_mx_mmc_request_done(host);
+}
+
+static struct mmc_host_ops meson_mx_mmc_ops = {
+ .request = meson_mx_mmc_request,
+ .set_ios = meson_mx_mmc_set_ios,
+ .card_busy = meson_mx_mmc_card_busy,
+ .get_cd = mmc_gpio_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+};
+
+static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
+{
+ struct device_node *slot_node;
+
+ /*
+ * TODO: the MMC core framework currently does not support
+ * controllers with multiple slots properly. So we only register
+ * the first slot for now
+ */
+ slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+ if (!slot_node) {
+ dev_warn(parent, "no 'mmc-slot' sub-node found\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ return of_platform_device_create(slot_node, NULL, parent);
+}
+
+static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct device *slot_dev = mmc_dev(mmc);
+ int ret;
+
+ if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) {
+ dev_err(slot_dev, "missing 'reg' property\n");
+ return -EINVAL;
+ }
+
+ if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) {
+ dev_err(slot_dev, "invalid 'reg' property value %d\n",
+ host->slot_id);
+ return -EINVAL;
+ }
+
+ /* Get regulators and the supported OCR mask */
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret)
+ return ret;
+
+ mmc->max_req_size = MESON_MX_SDIO_BOUNCE_REQ_SIZE;
+ mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_blk_count =
+ FIELD_GET(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ 0xffffffff);
+ mmc->max_blk_size = FIELD_GET(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ 0xffffffff);
+ mmc->max_blk_size -= (4 * MESON_MX_SDIO_RESPONSE_CRC16_BITS);
+ mmc->max_blk_size /= BITS_PER_BYTE;
+
+ /* Get the min and max supported clock rates */
+ mmc->f_min = clk_round_rate(host->cfg_div_clk, 1);
+ mmc->f_max = clk_round_rate(host->cfg_div_clk,
+ clk_get_rate(host->parent_clk));
+
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->ops = &meson_mx_mmc_ops;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ return ret;
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
+{
+ struct clk_init_data init;
+ const char *clk_div_parent, *clk_fixed_factor_parent;
+
+ clk_fixed_factor_parent = __clk_get_name(host->parent_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#fixed_factor",
+ dev_name(host->controller_dev));
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ init.parent_names = &clk_fixed_factor_parent;
+ init.num_parents = 1;
+ host->fixed_factor.div = 2;
+ host->fixed_factor.mult = 1;
+ host->fixed_factor.hw.init = &init;
+
+ host->fixed_factor_clk = devm_clk_register(host->controller_dev,
+ &host->fixed_factor.hw);
+ if (WARN_ON(IS_ERR(host->fixed_factor_clk)))
+ return PTR_ERR(host->fixed_factor_clk);
+
+ clk_div_parent = __clk_get_name(host->fixed_factor_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#div", dev_name(host->controller_dev));
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &clk_div_parent;
+ init.num_parents = 1;
+ host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF;
+ host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT;
+ host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH;
+ host->cfg_div.hw.init = &init;
+ host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+
+ host->cfg_div_clk = devm_clk_register(host->controller_dev,
+ &host->cfg_div.hw);
+ if (WARN_ON(IS_ERR(host->cfg_div_clk)))
+ return PTR_ERR(host->cfg_div_clk);
+
+ return 0;
+}
+
+static int meson_mx_mmc_probe(struct platform_device *pdev)
+{
+ struct platform_device *slot_pdev;
+ struct mmc_host *mmc;
+ struct meson_mx_mmc_host *host;
+ struct resource *res;
+ int ret, irq;
+ u32 conf;
+
+ slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev);
+ if (!slot_pdev)
+ return -ENODEV;
+ else if (IS_ERR(slot_pdev))
+ return PTR_ERR(slot_pdev);
+
+ mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto error_unregister_slot_pdev;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->controller_dev = &pdev->dev;
+
+ spin_lock_init(&host->irq_lock);
+ timer_setup(&host->cmd_timeout, meson_mx_mmc_timeout, 0);
+
+ platform_set_drvdata(pdev, host);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(host->controller_dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto error_free_mmc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(host->controller_dev, irq,
+ meson_mx_mmc_irq,
+ meson_mx_mmc_irq_thread, IRQF_ONESHOT,
+ NULL, host);
+ if (ret)
+ goto error_free_mmc;
+
+ host->core_clk = devm_clk_get(host->controller_dev, "core");
+ if (IS_ERR(host->core_clk)) {
+ ret = PTR_ERR(host->core_clk);
+ goto error_free_mmc;
+ }
+
+ host->parent_clk = devm_clk_get(host->controller_dev, "clkin");
+ if (IS_ERR(host->parent_clk)) {
+ ret = PTR_ERR(host->parent_clk);
+ goto error_free_mmc;
+ }
+
+ ret = meson_mx_mmc_register_clks(host);
+ if (ret)
+ goto error_free_mmc;
+
+ ret = clk_prepare_enable(host->core_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable core clock\n");
+ goto error_free_mmc;
+ }
+
+ ret = clk_prepare_enable(host->cfg_div_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable MMC clock\n");
+ goto error_disable_core_clk;
+ }
+
+ conf = 0;
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK, 39);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2);
+ writel(conf, host->base + MESON_MX_SDIO_CONF);
+
+ meson_mx_mmc_soft_reset(host);
+
+ ret = meson_mx_mmc_add_host(host);
+ if (ret)
+ goto error_disable_clks;
+
+ return 0;
+
+error_disable_clks:
+ clk_disable_unprepare(host->cfg_div_clk);
+error_disable_core_clk:
+ clk_disable_unprepare(host->core_clk);
+error_free_mmc:
+ mmc_free_host(mmc);
+error_unregister_slot_pdev:
+ of_platform_device_destroy(&slot_pdev->dev, NULL);
+ return ret;
+}
+
+static int meson_mx_mmc_remove(struct platform_device *pdev)
+{
+ struct meson_mx_mmc_host *host = platform_get_drvdata(pdev);
+ struct device *slot_dev = mmc_dev(host->mmc);
+
+ del_timer_sync(&host->cmd_timeout);
+
+ mmc_remove_host(host->mmc);
+
+ of_platform_device_destroy(slot_dev, NULL);
+
+ clk_disable_unprepare(host->cfg_div_clk);
+ clk_disable_unprepare(host->core_clk);
+
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+static const struct of_device_id meson_mx_mmc_of_match[] = {
+ { .compatible = "amlogic,meson8-sdio", },
+ { .compatible = "amlogic,meson8b-sdio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match);
+
+static struct platform_driver meson_mx_mmc_driver = {
+ .probe = meson_mx_mmc_probe,
+ .remove = meson_mx_mmc_remove,
+ .driver = {
+ .name = "meson-mx-sdio",
+ .of_match_table = of_match_ptr(meson_mx_mmc_of_match),
+ },
+};
+
+module_platform_driver(meson_mx_mmc_driver);
+
+MODULE_DESCRIPTION("Meson6, Meson8 and Meson8b SDIO/MMC Host Driver");
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f1f54a818489..e8a1bb1ae694 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1658,7 +1658,7 @@ static int mmci_probe(struct amba_device *dev,
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto clk_disable;
if (!mmc->ocr_avail)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 267f7ab08420..6457a7d8880f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -67,6 +67,7 @@
#define SDC_RESP2 0x48
#define SDC_RESP3 0x4c
#define SDC_BLK_NUM 0x50
+#define SDC_ADV_CFG0 0x64
#define EMMC_IOCON 0x7c
#define SDC_ACMD_RESP 0x80
#define MSDC_DMA_SA 0x90
@@ -74,10 +75,14 @@
#define MSDC_DMA_CFG 0x9c
#define MSDC_PATCH_BIT 0xb0
#define MSDC_PATCH_BIT1 0xb4
+#define MSDC_PATCH_BIT2 0xb8
#define MSDC_PAD_TUNE 0xec
+#define MSDC_PAD_TUNE0 0xf0
#define PAD_DS_TUNE 0x188
#define PAD_CMD_TUNE 0x18c
#define EMMC50_CFG0 0x208
+#define EMMC50_CFG3 0x220
+#define SDC_FIFO_CFG 0x228
/*--------------------------------------------------------------------------*/
/* Register Mask */
@@ -95,6 +100,9 @@
#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
+#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */
+#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */
+#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */
/* MSDC_IOCON mask */
#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
@@ -183,6 +191,9 @@
#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
+/* SDC_ADV_CFG0 mask */
+#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */
+
/* MSDC_DMA_CTRL mask */
#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
@@ -212,11 +223,22 @@
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
+#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
+
+#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
+#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */
+#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */
+#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */
+#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */
+
#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
+#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */
+#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
+#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
@@ -228,6 +250,11 @@
#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
+#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */
+
+#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */
+#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */
+
#define REQ_CMD_EIO (0x1 << 0)
#define REQ_CMD_TMO (0x1 << 1)
#define REQ_DAT_ERR (0x1 << 2)
@@ -290,9 +317,23 @@ struct msdc_save_para {
u32 pad_tune;
u32 patch_bit0;
u32 patch_bit1;
+ u32 patch_bit2;
u32 pad_ds_tune;
u32 pad_cmd_tune;
u32 emmc50_cfg0;
+ u32 emmc50_cfg3;
+ u32 sdc_fifo_cfg;
+};
+
+struct mtk_mmc_compatible {
+ u8 clk_div_bits;
+ bool hs400_tune; /* only used for MT8173 */
+ u32 pad_tune_reg;
+ bool async_fifo;
+ bool data_tune;
+ bool busy_check;
+ bool stop_clk_fix;
+ bool enhance_rx;
};
struct msdc_tune_para {
@@ -309,6 +350,7 @@ struct msdc_delay_phase {
struct msdc_host {
struct device *dev;
+ const struct mtk_mmc_compatible *dev_comp;
struct mmc_host *mmc; /* mmc structure */
int cmd_rsp;
@@ -334,11 +376,13 @@ struct msdc_host {
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
+ struct clk *src_clk_cg; /* msdc source clock control gate */
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
u32 sclk; /* SD/MS bus clock frequency */
unsigned char timing;
bool vqmmc_enabled;
+ u32 latch_ck;
u32 hs400_ds_delay;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
@@ -350,6 +394,59 @@ struct msdc_host {
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
};
+static const struct mtk_mmc_compatible mt8135_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt8173_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2701_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2712_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .enhance_rx = true,
+};
+
+static const struct of_device_id msdc_of_ids[] = {
+ { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
+ { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+ { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
+ { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msdc_of_ids);
+
static void sdr_set_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
@@ -509,7 +606,12 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
timeout = (ns + clk_ns - 1) / clk_ns + clks;
/* in 1048576 sclk cycle unit */
timeout = (timeout + (0x1 << 20) - 1) >> 20;
- sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD, &mode);
+ else
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA, &mode);
/*DDR mode will double the clk cycles for data timeout */
timeout = mode >= 2 ? timeout * 2 : timeout;
timeout = timeout > 1 ? timeout - 1 : 0;
@@ -520,6 +622,7 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
static void msdc_gate_clock(struct msdc_host *host)
{
+ clk_disable_unprepare(host->src_clk_cg);
clk_disable_unprepare(host->src_clk);
clk_disable_unprepare(host->h_clk);
}
@@ -528,6 +631,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
{
clk_prepare_enable(host->h_clk);
clk_prepare_enable(host->src_clk);
+ clk_prepare_enable(host->src_clk_cg);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
}
@@ -538,6 +642,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
u32 flags;
u32 div;
u32 sclk;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
@@ -548,7 +653,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
flags = readl(host->base + MSDC_INTEN);
sdr_clr_bits(host->base + MSDC_INTEN, flags);
- sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_clr_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
if (timing == MMC_TIMING_UHS_DDR50 ||
timing == MMC_TIMING_MMC_DDR52 ||
timing == MMC_TIMING_MMC_HS400) {
@@ -568,8 +677,12 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (timing == MMC_TIMING_MMC_HS400 &&
hz >= (host->src_clk_freq >> 1)) {
- sdr_set_bits(host->base + MSDC_CFG,
- MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
sclk = host->src_clk_freq >> 1;
div = 0; /* div is ignore when bit18 is set */
}
@@ -587,11 +700,31 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sclk = (host->src_clk_freq >> 2) / div;
}
}
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
- (mode << 8) | div);
- sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ /*
+ * As src_clk/HCLK use the same bit to gate/ungate,
+ * So if want to only gate src_clk, need gate its parent(mux).
+ */
+ if (host->src_clk_cg)
+ clk_disable_unprepare(host->src_clk_cg);
+ else
+ clk_disable_unprepare(clk_get_parent(host->src_clk));
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
+ (mode << 8) | div);
+ else
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
+ (mode << 12) | div);
+ if (host->src_clk_cg)
+ clk_prepare_enable(host->src_clk_cg);
+ else
+ clk_prepare_enable(clk_get_parent(host->src_clk));
+
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
+ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
host->sclk = sclk;
host->mclk = hz;
host->timing = timing;
@@ -605,15 +738,16 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
*/
if (host->sclk <= 52000000) {
writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->def_tune_para.pad_tune, host->base + tune_reg);
} else {
writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->saved_tune_para.pad_tune, host->base + tune_reg);
writel(host->saved_tune_para.pad_cmd_tune,
host->base + PAD_CMD_TUNE);
}
- if (timing == MMC_TIMING_MMC_HS400)
+ if (timing == MMC_TIMING_MMC_HS400 &&
+ host->dev_comp->hs400_tune)
sdr_set_field(host->base + PAD_CMD_TUNE,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
@@ -1165,6 +1299,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_init_hw(struct msdc_host *host)
{
u32 val;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
@@ -1180,14 +1315,53 @@ static void msdc_init_hw(struct msdc_host *host)
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
- writel(0, host->base + MSDC_PAD_TUNE);
+ writel(0, host->base + tune_reg);
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
writel(0x403c0046, host->base + MSDC_PATCH_BIT);
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
- writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
+ writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+ if (host->dev_comp->stop_clk_fix) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT1,
+ MSDC_PATCH_BIT1_STOP_DLY, 3);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_WRVALIDSEL);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_RDVALIDSEL);
+ }
+
+ if (host->dev_comp->busy_check)
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7));
+
+ if (host->dev_comp->async_fifo) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPWAIT, 3);
+ if (host->dev_comp->enhance_rx) {
+ sdr_set_bits(host->base + SDC_ADV_CFG0,
+ SDC_RX_ENHANCE_EN);
+ } else {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPSTSENSEL, 2);
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_CRCSTSENSEL, 2);
+ }
+ /* use async fifo, then no need tune internal delay */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGRESP);
+ sdr_set_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGCRCSTS);
+ }
+
+ if (host->dev_comp->data_tune) {
+ sdr_set_bits(host->base + tune_reg,
+ MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL);
+ } else {
+ /* choose clock tune */
+ sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL);
+ }
+
/* Configure to enable SDIO mode.
* it's must otherwise sdio cmd5 failed
*/
@@ -1200,7 +1374,9 @@ static void msdc_init_hw(struct msdc_host *host)
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->def_tune_para.pad_tune = readl(host->base + tune_reg);
+ host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
dev_dbg(host->dev, "init hardware done!");
}
@@ -1343,18 +1519,19 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
struct msdc_delay_phase internal_delay_phase;
u8 final_delay, final_maxlen;
u32 internal_delay = 0;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int cmd_err;
int i, j;
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs200_cmd_int_delay);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1373,12 +1550,13 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
- if (final_rise_delay.maxlen >= 12 && final_rise_delay.start < 4)
+ if (final_rise_delay.maxlen >= 12 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1403,20 +1581,20 @@ skip_fall:
final_maxlen = final_fall_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
}
- if (host->hs200_cmd_int_delay)
+ if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
goto skip_internal;
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY, i);
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err)
@@ -1424,7 +1602,7 @@ skip_fall:
}
dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
internal_delay_phase = get_best_delay(host, internal_delay);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY,
internal_delay_phase.final_phase);
skip_internal:
dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
@@ -1486,12 +1664,15 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int i, ret;
+ sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
+ host->latch_ck);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1506,7 +1687,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1519,14 +1700,14 @@ skip_fall:
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
@@ -1540,8 +1721,10 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
- if (host->hs400_mode)
+ if (host->hs400_mode &&
+ host->dev_comp->hs400_tune)
ret = hs400_tune_response(mmc, opcode);
else
ret = msdc_tune_response(mmc, opcode);
@@ -1556,7 +1739,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
return ret;
}
@@ -1567,6 +1750,11 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
host->hs400_mode = true;
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ /* hs400 mode must set it to 0 */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
+ /* to improve read performance, set outstanding to 2 */
+ sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2);
+
return 0;
}
@@ -1596,6 +1784,9 @@ static const struct mmc_host_ops mt_msdc_ops = {
static void msdc_of_property_parse(struct platform_device *pdev,
struct msdc_host *host)
{
+ of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
+ &host->latch_ck);
+
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
@@ -1617,12 +1808,17 @@ static int msdc_drv_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct msdc_host *host;
struct resource *res;
+ const struct of_device_id *of_id;
int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
+
+ of_id = of_match_node(msdc_of_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
/* Allocate MMC host for this device */
mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
if (!mmc)
@@ -1641,7 +1837,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto host_free;
host->src_clk = devm_clk_get(&pdev->dev, "source");
@@ -1656,6 +1852,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ /*source clock control gate is optional clock*/
+ host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg");
+ if (IS_ERR(host->src_clk_cg))
+ host->src_clk_cg = NULL;
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
@@ -1686,11 +1887,15 @@ static int msdc_drv_probe(struct platform_device *pdev)
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
+ host->dev_comp = of_id->data;
host->mmc = mmc;
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
- mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ if (host->dev_comp->clk_div_bits == 8)
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ else
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
@@ -1788,28 +1993,38 @@ static int msdc_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static void msdc_save_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
host->save_para.iocon = readl(host->base + MSDC_IOCON);
host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
- host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->save_para.pad_tune = readl(host->base + tune_reg);
host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
+ host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
+ host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
+ host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
}
static void msdc_restore_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
writel(host->save_para.iocon, host->base + MSDC_IOCON);
writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
- writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->save_para.pad_tune, host->base + tune_reg);
writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
+ writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
+ writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
+ writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
}
static int msdc_runtime_suspend(struct device *dev)
@@ -1839,12 +2054,6 @@ static const struct dev_pm_ops msdc_dev_pm_ops = {
SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
};
-static const struct of_device_id msdc_of_ids[] = {
- { .compatible = "mediatek,mt8135-mmc", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msdc_of_ids);
-
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
.remove = msdc_drv_remove,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 58d74b8d6c79..210247b3d11a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -508,9 +508,9 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
return IRQ_NONE;
}
-static void mvsd_timeout_timer(unsigned long data)
+static void mvsd_timeout_timer(struct timer_list *t)
{
- struct mvsd_host *host = (struct mvsd_host *)data;
+ struct mvsd_host *host = from_timer(host, t, timer);
void __iomem *iobase = host->base;
struct mmc_request *mrq;
unsigned long flags;
@@ -776,7 +776,7 @@ static int mvsd_probe(struct platform_device *pdev)
goto out;
}
- setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, mvsd_timeout_timer, 0);
platform_set_drvdata(pdev, mmc);
ret = mmc_add_host(mmc);
if (ret)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 1d5418e4efae..5ff8ef7223cc 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -963,10 +963,9 @@ static bool filter(struct dma_chan *chan, void *param)
return true;
}
-static void mxcmci_watchdog(unsigned long data)
+static void mxcmci_watchdog(struct timer_list *t)
{
- struct mmc_host *mmc = (struct mmc_host *)data;
- struct mxcmci_host *host = mmc_priv(mmc);
+ struct mxcmci_host *host = from_timer(host, t, watchdog);
struct mmc_request *req = host->req;
unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
@@ -1075,7 +1074,7 @@ static int mxcmci_probe(struct platform_device *pdev)
dat3_card_detect = true;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto out_free;
if (!mmc->ocr_avail) {
@@ -1165,9 +1164,7 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_free_dma;
}
- init_timer(&host->watchdog);
- host->watchdog.function = &mxcmci_watchdog;
- host->watchdog.data = (unsigned long)mmc;
+ timer_setup(&host->watchdog, mxcmci_watchdog, 0);
mmc_add_host(mmc);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index bd49f34d7654..adf32682f27a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -625,9 +625,9 @@ static void mmc_omap_abort_command(struct work_struct *work)
}
static void
-mmc_omap_cmd_timer(unsigned long data)
+mmc_omap_cmd_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer);
unsigned long flags;
spin_lock_irqsave(&host->slot_lock, flags);
@@ -654,9 +654,9 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host)
}
static void
-mmc_omap_clk_timer(unsigned long data)
+mmc_omap_clk_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, clk_timer);
mmc_omap_fclk_enable(host, 0);
}
@@ -874,9 +874,9 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
tasklet_hi_schedule(&slot->cover_tasklet);
}
-static void mmc_omap_cover_timer(unsigned long arg)
+static void mmc_omap_cover_timer(struct timer_list *t)
{
- struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
+ struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer);
tasklet_schedule(&slot->cover_tasklet);
}
@@ -1264,8 +1264,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
mmc->max_seg_size = mmc->max_req_size;
if (slot->pdata->get_cover_state != NULL) {
- setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
- (unsigned long)slot);
+ timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0);
tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
(unsigned long)slot);
}
@@ -1352,11 +1351,10 @@ static int mmc_omap_probe(struct platform_device *pdev)
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
- setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
- (unsigned long) host);
+ timer_setup(&host->cmd_abort_timer, mmc_omap_cmd_timer, 0);
spin_lock_init(&host->clk_lock);
- setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
+ timer_setup(&host->clk_timer, mmc_omap_clk_timer, 0);
spin_lock_init(&host->dma_lock);
spin_lock_init(&host->slot_lock);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 3b5e6d11069b..071693ebfe18 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -147,10 +147,6 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
-#define VDD_1V8 1800000 /* 180000 uV */
-#define VDD_3V0 3000000 /* 300000 uV */
-#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1)
-
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -308,8 +304,7 @@ err_set_ocr:
return ret;
}
-static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
- int vdd)
+static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on)
{
int ret;
@@ -317,17 +312,6 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
if (power_on) {
- if (vdd <= VDD_165_195)
- ret = regulator_set_voltage(host->pbias, VDD_1V8,
- VDD_1V8);
- else
- ret = regulator_set_voltage(host->pbias, VDD_3V0,
- VDD_3V0);
- if (ret < 0) {
- dev_err(host->dev, "pbias set voltage fail\n");
- return ret;
- }
-
if (host->pbias_enabled == 0) {
ret = regulator_enable(host->pbias);
if (ret) {
@@ -350,8 +334,7 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
}
-static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
- int vdd)
+static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on)
{
struct mmc_host *mmc = host->mmc;
int ret = 0;
@@ -363,7 +346,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (IS_ERR(mmc->supply.vmmc))
return 0;
- ret = omap_hsmmc_set_pbias(host, false, 0);
+ ret = omap_hsmmc_set_pbias(host, false);
if (ret)
return ret;
@@ -385,7 +368,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (ret)
return ret;
- ret = omap_hsmmc_set_pbias(host, true, vdd);
+ ret = omap_hsmmc_set_pbias(host, true);
if (ret)
goto err_set_voltage;
} else {
@@ -462,7 +445,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/* Allow an aux regulator */
@@ -1220,11 +1203,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
- ret = omap_hsmmc_set_power(host, 0, 0);
+ ret = omap_hsmmc_set_power(host, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
- ret = omap_hsmmc_set_power(host, 1, vdd);
+ ret = omap_hsmmc_set_power(host, 1);
if (host->dbclk)
clk_prepare_enable(host->dbclk);
@@ -1621,10 +1604,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
- omap_hsmmc_set_power(host, 0, 0);
+ omap_hsmmc_set_power(host, 0);
break;
case MMC_POWER_UP:
- omap_hsmmc_set_power(host, 1, ios->vdd);
+ omap_hsmmc_set_power(host, 1);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
diff --git a/drivers/mmc/host/pxamci.h b/drivers/mmc/host/pxamci.h
index f6c2e2fcce37..d301ca18c5d4 100644
--- a/drivers/mmc/host/pxamci.h
+++ b/drivers/mmc/host/pxamci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define MMC_STRPCL 0x0000
#define STOP_CLOCK (1 << 0)
#define START_CLOCK (2 << 0)
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index f905f2361d12..41cbe84c1d18 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -88,6 +88,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
@@ -146,11 +147,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
WARN_ON(host->sg_len > 1);
/* This DMAC cannot handle if buffer is not 8-bytes alignment */
- if (!IS_ALIGNED(sg->offset, 8)) {
- host->force_pio = true;
- renesas_sdhi_internal_dmac_enable_dma(host, false);
- return;
- }
+ if (!IS_ALIGNED(sg->offset, 8))
+ goto force_pio;
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
@@ -163,8 +161,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
}
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir);
- if (ret < 0)
- return;
+ if (ret == 0)
+ goto force_pio;
renesas_sdhi_internal_dmac_enable_dma(host, true);
@@ -176,6 +174,12 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
dtran_mode);
renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR,
sg->dma_address);
+
+ return;
+
+force_pio:
+ host->force_pio = true;
+ renesas_sdhi_internal_dmac_enable_dma(host, false);
}
static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index df4465439e13..9ab10436e4b8 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -91,7 +91,6 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
};
static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
- { .compatible = "renesas,sdhi-shmobile" },
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
@@ -107,6 +106,10 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
+ { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,sdhi-shmobile" },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 41b57713b620..0848dc0f882e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -618,29 +618,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
u8 sample_point, bool rx)
{
struct rtsx_pcr *pcr = host->pcr;
- int err;
dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
__func__, rx ? "RX" : "TX", sample_point);
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
if (rx)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPRX_CTL, 0x1F, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPRX_CTL,
+ PHASE_SELECT_MASK, sample_point);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPTX_CTL, 0x1F, sample_point);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
- PHASE_NOT_RESET, PHASE_NOT_RESET);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, SD_VPTX_CTL,
+ PHASE_SELECT_MASK, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
return 0;
}
@@ -708,10 +701,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
{
int err;
struct mmc_command cmd = {};
+ struct rtsx_pcr *pcr = host->pcr;
- err = sd_change_phase(host, sample_point, true);
- if (err < 0)
- return err;
+ sd_change_phase(host, sample_point, true);
+
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
cmd.opcode = opcode;
err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100);
@@ -719,9 +714,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
/* Wait till SD DATA IDLE */
sd_wait_data_idle(host);
sd_clear_error(host);
+ rtsx_pci_write_register(pcr, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
return err;
}
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 08ae0ff13513..b988997a1e80 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -73,6 +73,7 @@ struct sdhci_acpi_slot {
unsigned int caps2;
mmc_pm_flag_t pm_caps;
unsigned int flags;
+ size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
};
@@ -82,13 +83,118 @@ struct sdhci_acpi_host {
const struct sdhci_acpi_slot *slot;
struct platform_device *pdev;
bool use_runtime_pm;
+ unsigned long private[0] ____cacheline_aligned;
};
+static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
+{
+ return (void *)c->private;
+}
+
static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
{
return c->slot && (c->slot->flags & flag);
}
+enum {
+ INTEL_DSM_FNS = 0,
+ INTEL_DSM_V18_SWITCH = 3,
+ INTEL_DSM_V33_SWITCH = 4,
+};
+
+struct intel_host {
+ u32 dsm_fns;
+};
+
+static const guid_t intel_dsm_guid =
+ GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
+ 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
+
+static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ union acpi_object *obj;
+ int err = 0;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+ if (!obj)
+ return -EOPNOTSUPP;
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *result = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) {
+ size_t len = min_t(size_t, obj->buffer.length, 4);
+
+ *result = 0;
+ memcpy(result, obj->buffer.pointer, len);
+ } else {
+ dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n",
+ __func__, fn, obj->type, obj->buffer.length);
+ err = -EINVAL;
+ }
+
+ ACPI_FREE(obj);
+
+ return err;
+}
+
+static int intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ return -EOPNOTSUPP;
+
+ return __intel_dsm(intel_host, dev, fn, result);
+}
+
+static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
+ struct mmc_host *mmc)
+{
+ int err;
+
+ err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
+ if (err) {
+ pr_debug("%s: DSM not supported, error %d\n",
+ mmc_hostname(mmc), err);
+ return;
+ }
+
+ pr_debug("%s: DSM function mask %#x\n",
+ mmc_hostname(mmc), intel_host->dsm_fns);
+}
+
+static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct device *dev = mmc_dev(mmc);
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ unsigned int fn;
+ u32 result = 0;
+ int err;
+
+ err = sdhci_start_signal_voltage_switch(mmc, ios);
+ if (err)
+ return err;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ fn = INTEL_DSM_V33_SWITCH;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ fn = INTEL_DSM_V18_SWITCH;
+ break;
+ default:
+ return 0;
+ }
+
+ err = intel_dsm(intel_host, dev, fn, &result);
+ pr_debug("%s: %s DSM fn %u error %d result %u\n",
+ mmc_hostname(mmc), __func__, fn, err, result);
+
+ return 0;
+}
+
static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
{
u8 reg;
@@ -269,56 +375,26 @@ out:
return ret;
}
-static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
+static int intel_probe_slot(struct platform_device *pdev, const char *hid,
+ const char *uid)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during emmc probe slot goes here */
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ struct sdhci_host *host = c->host;
if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
- return 0;
-}
-
-static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
-
- if (!c || !c->host)
- return 0;
-
- /* Platform specific code during sdio probe slot goes here */
-
- return 0;
-}
-
-static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host || !c->slot)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during sd probe slot goes here */
-
if (hid && !strcmp(hid, "80865ACA"))
host->mmc_host_ops.get_cd = bxt_get_cd;
+ intel_dsm_init(intel_host, &pdev->dev, host->mmc);
+
+ host->mmc_host_ops.start_signal_voltage_switch =
+ intel_start_signal_voltage_switch;
+
return 0;
}
@@ -332,7 +408,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
- .probe_slot = sdhci_acpi_emmc_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
@@ -343,7 +420,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
MMC_CAP_WAIT_WHILE_BUSY,
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
- .probe_slot = sdhci_acpi_sdio_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
@@ -353,7 +431,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
- .probe_slot = sdhci_acpi_sd_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
@@ -429,11 +508,13 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct sdhci_acpi_slot *slot;
struct acpi_device *device, *child;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
struct resource *iomem;
resource_size_t len;
+ size_t priv_size;
const char *hid;
const char *uid;
int err;
@@ -443,7 +524,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return -ENODEV;
hid = acpi_device_hid(device);
- uid = device->pnp.unique_id;
+ uid = acpi_device_uid(device);
+
+ slot = sdhci_acpi_get_slot(hid, uid);
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
@@ -467,13 +550,14 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
return -ENOMEM;
- host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host));
+ priv_size = slot ? slot->priv_size : 0;
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size);
if (IS_ERR(host))
return PTR_ERR(host);
c = sdhci_priv(host);
c->host = host;
- c->slot = sdhci_acpi_get_slot(hid, uid);
+ c->slot = slot;
c->pdev = pdev;
c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 56529c3d389a..0f589e26ee63 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -27,15 +28,14 @@
#define SDHCI_CDNS_HRS04_ACK BIT(26)
#define SDHCI_CDNS_HRS04_RD BIT(25)
#define SDHCI_CDNS_HRS04_WR BIT(24)
-#define SDHCI_CDNS_HRS04_RDATA_SHIFT 16
-#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8
-#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0
+#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16)
+#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8)
+#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0)
#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
-#define SDHCI_CDNS_HRS06_TUNE_SHIFT 8
-#define SDHCI_CDNS_HRS06_TUNE_MASK 0x3f
-#define SDHCI_CDNS_HRS06_MODE_MASK 0x7
+#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8)
+#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0)
#define SDHCI_CDNS_HRS06_MODE_SD 0x0
#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
@@ -105,8 +105,8 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
u32 tmp;
int ret;
- tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) |
- (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT);
+ tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
+ FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
writel(tmp, reg);
tmp |= SDHCI_CDNS_HRS04_WR;
@@ -189,8 +189,8 @@ static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
/* The speed mode for eMMC is selected by HRS06 register */
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
- tmp |= mode;
+ tmp &= ~SDHCI_CDNS_HRS06_MODE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode);
writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
}
@@ -199,7 +199,7 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
u32 tmp;
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- return tmp & SDHCI_CDNS_HRS06_MODE_MASK;
+ return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
}
static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
@@ -254,12 +254,12 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
u32 tmp;
- if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK))
+ if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val)))
return -EINVAL;
tmp = readl(reg);
- tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT);
- tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT;
+ tmp &= ~SDHCI_CDNS_HRS06_TUNE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val);
tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
writel(tmp, reg);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index fc73e56eb1e2..3fb7d2eec93f 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -123,14 +123,17 @@
#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
+
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
int pwr_irq; /* power irq */
- struct clk *clk; /* main SD/MMC bus clock */
- struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
+ struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
unsigned long clk_rate;
struct mmc_host *mmc;
bool use_14lpp_dll_reset;
@@ -138,6 +141,10 @@ struct sdhci_msm_host {
bool calibration_done;
u8 saved_tuning_phase;
bool use_cdclp533;
+ u32 curr_pwr_state;
+ u32 curr_io_level;
+ wait_queue_head_t pwr_irq_wait;
+ bool pwr_irq_flag;
};
static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
@@ -164,10 +171,11 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
struct mmc_ios curr_ios = host->mmc->ios;
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
int rc;
clock = msm_get_clock_rate_for_bus_mode(host, clock);
- rc = clk_set_rate(msm_host->clk, clock);
+ rc = clk_set_rate(core_clk, clock);
if (rc) {
pr_err("%s: Failed to set clock at rate %u at timing %d\n",
mmc_hostname(host->mmc), clock,
@@ -176,7 +184,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
}
msm_host->clk_rate = clock;
pr_debug("%s: Setting clock at rate %lu at timing %d\n",
- mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
+ mmc_hostname(host->mmc), clk_get_rate(core_clk),
curr_ios.timing);
}
@@ -995,21 +1003,142 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
sdhci_msm_hs400(host, &mmc->ios);
}
-static void sdhci_msm_voltage_switch(struct sdhci_host *host)
+static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
+{
+ init_waitqueue_head(&msm_host->pwr_irq_wait);
+}
+
+static inline void sdhci_msm_complete_pwr_irq_wait(
+ struct sdhci_msm_host *msm_host)
+{
+ wake_up(&msm_host->pwr_irq_wait);
+}
+
+/*
+ * sdhci_msm_check_power_status API should be called when registers writes
+ * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
+ * To what state the register writes will change the IO lines should be passed
+ * as the argument req_type. This API will check whether the IO line's state
+ * is already the expected state and will wait for power irq only if
+ * power irq is expected to be trigerred based on the current IO line state
+ * and expected IO line state.
+ */
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ bool done = false;
+
+ pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+ mmc_hostname(host->mmc), __func__, req_type,
+ msm_host->curr_pwr_state, msm_host->curr_io_level);
+
+ /*
+ * The IRQ for request type IO High/LOW will be generated when -
+ * there is a state change in 1.8V enable bit (bit 3) of
+ * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
+ * which indicates 3.3V IO voltage. So, when MMC core layer tries
+ * to set it to 3.3V before card detection happens, the
+ * IRQ doesn't get triggered as there is no state change in this bit.
+ * The driver already handles this case by changing the IO voltage
+ * level to high as part of controller power up sequence. Hence, check
+ * for host->pwr to handle a case where IO voltage high request is
+ * issued even before controller power up.
+ */
+ if ((req_type & REQ_IO_HIGH) && !host->pwr) {
+ pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
+ mmc_hostname(host->mmc), req_type);
+ return;
+ }
+ if ((req_type & msm_host->curr_pwr_state) ||
+ (req_type & msm_host->curr_io_level))
+ done = true;
+ /*
+ * This is needed here to handle cases where register writes will
+ * not change the current bus state or io level of the controller.
+ * In this case, no power irq will be triggerred and we should
+ * not wait.
+ */
+ if (!done) {
+ if (!wait_event_timeout(msm_host->pwr_irq_wait,
+ msm_host->pwr_irq_flag,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ dev_warn(&msm_host->pdev->dev,
+ "%s: pwr_irq for req: (%d) timed out\n",
+ mmc_hostname(host->mmc), req_type);
+ }
+ pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+ __func__, req_type);
+}
+
+static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+}
+
+static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u32 irq_status, irq_ack = 0;
+ int retry = 10;
+ int pwr_state = 0, io_level = 0;
+
irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
irq_status &= INT_MASK;
writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
- if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+ /*
+ * There is a rare HW scenario where the first clear pulse could be
+ * lost when actual reset and clear/read of status register is
+ * happening at a time. Hence, retry for at least 10 times to make
+ * sure status register is cleared. Otherwise, this will result in
+ * a spurious power IRQ resulting in system instability.
+ */
+ while (irq_status & readl_relaxed(msm_host->core_mem +
+ CORE_PWRCTL_STATUS)) {
+ if (retry == 0) {
+ pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
+ mmc_hostname(host->mmc), irq_status);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ WARN_ON(1);
+ break;
+ }
+ writel_relaxed(irq_status,
+ msm_host->core_mem + CORE_PWRCTL_CLEAR);
+ retry--;
+ udelay(10);
+ }
+
+ /* Handle BUS ON/OFF*/
+ if (irq_status & CORE_PWRCTL_BUS_ON) {
+ pwr_state = REQ_BUS_ON;
+ io_level = REQ_IO_HIGH;
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_BUS_OFF) {
+ pwr_state = REQ_BUS_OFF;
+ io_level = REQ_IO_LOW;
irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
- if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
+ }
+ /* Handle IO LOW/HIGH */
+ if (irq_status & CORE_PWRCTL_IO_LOW) {
+ io_level = REQ_IO_LOW;
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_IO_HIGH) {
+ io_level = REQ_IO_HIGH;
irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
/*
* The driver has to acknowledge the interrupt, switch voltages and
@@ -1017,13 +1146,27 @@ static void sdhci_msm_voltage_switch(struct sdhci_host *host)
* switches are handled by the sdhci core, so just report success.
*/
writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+
+ if (pwr_state)
+ msm_host->curr_pwr_state = pwr_state;
+ if (io_level)
+ msm_host->curr_io_level = io_level;
+
+ pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
+ mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
+ irq_ack);
}
static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
{
struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_msm_handle_pwr_irq(host, irq);
+ msm_host->pwr_irq_flag = 1;
+ sdhci_msm_complete_pwr_irq_wait(msm_host);
- sdhci_msm_voltage_switch(host);
return IRQ_HANDLED;
}
@@ -1032,8 +1175,9 @@ static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
- return clk_round_rate(msm_host->clk, ULONG_MAX);
+ return clk_round_rate(core_clk, ULONG_MAX);
}
static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
@@ -1092,6 +1236,69 @@ out:
__sdhci_msm_set_clock(host, clock);
}
+/*
+ * Platform specific register write functions. This is so that, if any
+ * register write needs to be followed up by platform specific actions,
+ * they can be added here. These functions can go to sleep when writes
+ * to certain registers are done.
+ * These functions are relying on sdhci_set_ios not using spinlock.
+ */
+static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 req_type = 0;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL2:
+ req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
+ REQ_IO_HIGH;
+ break;
+ case SDHCI_SOFTWARE_RESET:
+ if (host->pwr && (val & SDHCI_RESET_ALL))
+ req_type = REQ_BUS_OFF;
+ break;
+ case SDHCI_POWER_CONTROL:
+ req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
+ break;
+ }
+
+ if (req_type) {
+ msm_host->pwr_irq_flag = 0;
+ /*
+ * Since this register write may trigger a power irq, ensure
+ * all previous register writes are complete by this point.
+ */
+ mb();
+ }
+ return req_type;
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+ writew_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+
+ writeb_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{ .compatible = "qcom,sdhci-msm-v4" },
{},
@@ -1106,7 +1313,8 @@ static const struct sdhci_ops sdhci_msm_ops = {
.get_max_clock = sdhci_msm_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
- .voltage_switch = sdhci_msm_voltage_switch,
+ .write_w = sdhci_msm_writew,
+ .write_b = sdhci_msm_writeb,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1124,6 +1332,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
struct resource *core_memres;
+ struct clk *clk;
int ret;
u16 host_version, core_minor;
u32 core_version, config;
@@ -1160,24 +1369,42 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
/* Setup main peripheral bus clock */
- msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(msm_host->pclk)) {
- ret = PTR_ERR(msm_host->pclk);
+ clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
goto bus_clk_disable;
}
-
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret)
- goto bus_clk_disable;
+ msm_host->bulk_clks[1].clk = clk;
/* Setup SDC MMC clock */
- msm_host->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(msm_host->clk)) {
- ret = PTR_ERR(msm_host->clk);
+ clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
- goto pclk_disable;
+ goto bus_clk_disable;
}
+ msm_host->bulk_clks[0].clk = clk;
+
+ /* Vote for maximum clock rate for maximum performance */
+ ret = clk_set_rate(clk, INT_MAX);
+ if (ret)
+ dev_warn(&pdev->dev, "core clock boost failed\n");
+
+ clk = devm_clk_get(&pdev->dev, "cal");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[2].clk = clk;
+
+ clk = devm_clk_get(&pdev->dev, "sleep");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[3].clk = clk;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+ if (ret)
+ goto bus_clk_disable;
/*
* xo clock is needed for FLL feature of cm_dll.
@@ -1189,15 +1416,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
}
- /* Vote for maximum clock rate for maximum performance */
- ret = clk_set_rate(msm_host->clk, INT_MAX);
- if (ret)
- dev_warn(&pdev->dev, "core clock boost failed\n");
-
- ret = clk_prepare_enable(msm_host->clk);
- if (ret)
- goto pclk_disable;
-
core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
@@ -1251,6 +1469,21 @@ static int sdhci_msm_probe(struct platform_device *pdev)
CORE_VENDOR_SPEC_CAPABILITIES0);
}
+ /*
+ * Power on reset state may trigger power irq if previous status of
+ * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
+ * interrupt in GIC, any pending power irq interrupt should be
+ * acknowledged. Otherwise power irq interrupt handler would be
+ * fired prematurely.
+ */
+ sdhci_msm_handle_pwr_irq(host, 0);
+
+ /*
+ * Ensure that above writes are propogated before interrupt enablement
+ * in GIC.
+ */
+ mb();
+
/* Setup IRQ for handling power/voltage tasks with PMIC */
msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
if (msm_host->pwr_irq < 0) {
@@ -1260,6 +1493,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ sdhci_msm_init_pwr_irq_wait(msm_host);
+ /* Enable pwr irq interrupts */
+ writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
+
ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
sdhci_msm_pwr_irq, IRQF_ONESHOT,
dev_name(&pdev->dev), host);
@@ -1290,9 +1527,8 @@ pm_runtime_disable:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
clk_disable:
- clk_disable_unprepare(msm_host->clk);
-pclk_disable:
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
bus_clk_disable:
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
@@ -1315,8 +1551,8 @@ static int sdhci_msm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
sdhci_pltfm_free(pdev);
@@ -1330,8 +1566,8 @@ static int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
return 0;
}
@@ -1341,21 +1577,9 @@ static int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- int ret;
- ret = clk_prepare_enable(msm_host->clk);
- if (ret) {
- dev_err(dev, "clk_enable failed for core_clk: %d\n", ret);
- return ret;
- }
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret) {
- dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret);
- clk_disable_unprepare(msm_host->clk);
- return ret;
- }
-
- return 0;
+ return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 4e47ed6bc716..682c573e20a7 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -114,7 +114,8 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
-void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
{
if (timing == MMC_TIMING_MMC_DDR52)
sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d96a057a7db8..1f424374bbbb 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
return clock / 256 / 16;
}
+static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
+{
+ u32 val;
+ ktime_t timeout;
+
+ val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+
+ if (enable)
+ val |= ESDHC_CLOCK_SDCLKEN;
+ else
+ val &= ~ESDHC_CLOCK_SDCLKEN;
+
+ sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ val = ESDHC_CLOCK_STABLE;
+ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
+ if (ktime_after(ktime_get(), timeout)) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ udelay(10);
+ }
+}
+
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- if (clock == 0)
+ if (clock == 0) {
+ esdhc_clock_enable(host, false);
return;
+ }
/* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
if (esdhc->vendor_ver < VENDOR_V_23)
@@ -558,33 +587,6 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
sdhci_writel(host, ctrl, ESDHC_PROCTL);
}
-static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
-{
- u32 val;
- ktime_t timeout;
-
- val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-
- if (enable)
- val |= ESDHC_CLOCK_SDCLKEN;
- else
- val &= ~ESDHC_CLOCK_SDCLKEN;
-
- sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- val = ESDHC_CLOCK_STABLE;
- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
- if (ktime_after(ktime_get(), timeout)) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- break;
- }
- udelay(10);
- }
-}
-
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
new file mode 100644
index 000000000000..628bfe9a3d17
--- /dev/null
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -0,0 +1,607 @@
+/**
+ * SDHCI Controller driver for TI's OMAP SoCs
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include "sdhci-pltfm.h"
+
+#define SDHCI_OMAP_CON 0x12c
+#define CON_DW8 BIT(5)
+#define CON_DMA_MASTER BIT(20)
+#define CON_INIT BIT(1)
+#define CON_OD BIT(0)
+
+#define SDHCI_OMAP_CMD 0x20c
+
+#define SDHCI_OMAP_HCTL 0x228
+#define HCTL_SDBP BIT(8)
+#define HCTL_SDVS_SHIFT 9
+#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_33 (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
+
+#define SDHCI_OMAP_SYSCTL 0x22c
+#define SYSCTL_CEN BIT(2)
+#define SYSCTL_CLKD_SHIFT 6
+#define SYSCTL_CLKD_MASK 0x3ff
+
+#define SDHCI_OMAP_STAT 0x230
+
+#define SDHCI_OMAP_IE 0x234
+#define INT_CC_EN BIT(0)
+
+#define SDHCI_OMAP_AC12 0x23c
+#define AC12_V1V8_SIGEN BIT(19)
+
+#define SDHCI_OMAP_CAPA 0x240
+#define CAPA_VS33 BIT(24)
+#define CAPA_VS30 BIT(25)
+#define CAPA_VS18 BIT(26)
+
+#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
+
+#define SYSCTL_CLKD_MAX 0x3FF
+
+#define IOV_1V8 1800000 /* 180000 uV */
+#define IOV_3V0 3000000 /* 300000 uV */
+#define IOV_3V3 3300000 /* 330000 uV */
+
+struct sdhci_omap_data {
+ u32 offset;
+};
+
+struct sdhci_omap_host {
+ void __iomem *base;
+ struct device *dev;
+ struct regulator *pbias;
+ bool pbias_enabled;
+ struct sdhci_host *host;
+ u8 bus_mode;
+ u8 power_mode;
+};
+
+static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
+ unsigned int offset)
+{
+ return readl(host->base + offset);
+}
+
+static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
+ unsigned int offset, u32 data)
+{
+ writel(data, host->base + offset);
+}
+
+static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
+ bool power_on, unsigned int iov)
+{
+ int ret;
+ struct device *dev = omap_host->dev;
+
+ if (IS_ERR(omap_host->pbias))
+ return 0;
+
+ if (power_on) {
+ ret = regulator_set_voltage(omap_host->pbias, iov, iov);
+ if (ret) {
+ dev_err(dev, "pbias set voltage failed\n");
+ return ret;
+ }
+
+ if (omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_enable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg enable fail\n");
+ return ret;
+ }
+
+ omap_host->pbias_enabled = true;
+ } else {
+ if (!omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_disable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg disable fail\n");
+ return ret;
+ }
+ omap_host->pbias_enabled = false;
+ }
+
+ return 0;
+}
+
+static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
+ unsigned int iov)
+{
+ int ret;
+ struct sdhci_host *host = omap_host->host;
+ struct mmc_host *mmc = host->mmc;
+
+ ret = sdhci_omap_set_pbias(omap_host, false, 0);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
+ return ret;
+ }
+ }
+
+ ret = sdhci_omap_set_pbias(omap_host, true, iov);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
+ unsigned char signal_voltage)
+{
+ u32 reg;
+ ktime_t timeout;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ reg &= ~HCTL_SDVS_MASK;
+
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ reg |= HCTL_SDVS_33;
+ else
+ reg |= HCTL_SDVS_18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ reg |= HCTL_SDBP;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+}
+
+static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ u32 reg;
+ int ret;
+ unsigned int iov;
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct device *dev;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ dev = omap_host->dev;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS33))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg &= ~AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_3V3;
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS18))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg |= AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_1V8;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = sdhci_omap_enable_iov(omap_host, iov);
+ if (ret) {
+ dev_err(dev, "failed to switch IO voltage to %dmV\n", iov);
+ return ret;
+ }
+
+ dev_dbg(dev, "IO voltage switched to %dmV\n", iov);
+ return 0;
+}
+
+static void sdhci_omap_set_bus_mode(struct sdhci_omap_host *omap_host,
+ unsigned int mode)
+{
+ u32 reg;
+
+ if (omap_host->bus_mode == mode)
+ return;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (mode == MMC_BUSMODE_OPENDRAIN)
+ reg |= CON_OD;
+ else
+ reg &= ~CON_OD;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ omap_host->bus_mode = mode;
+}
+
+static void sdhci_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_omap_set_bus_mode(omap_host, ios->bus_mode);
+ sdhci_set_ios(mmc, ios);
+}
+
+static u16 sdhci_omap_calc_divisor(struct sdhci_pltfm_host *host,
+ unsigned int clock)
+{
+ u16 dsor;
+
+ dsor = DIV_ROUND_UP(clk_get_rate(host->clk), clock);
+ if (dsor > SYSCTL_CLKD_MAX)
+ dsor = SYSCTL_CLKD_MAX;
+
+ return dsor;
+}
+
+static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg |= SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg &= ~SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long clkdiv;
+
+ sdhci_omap_stop_clock(omap_host);
+
+ if (!clock)
+ return;
+
+ clkdiv = sdhci_omap_calc_divisor(pltfm_host, clock);
+ clkdiv = (clkdiv & SYSCTL_CLKD_MASK) << SYSCTL_CLKD_SHIFT;
+ sdhci_enable_clk(host, clkdiv);
+
+ sdhci_omap_start_clock(omap_host);
+}
+
+static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
+static int sdhci_omap_enable_dma(struct sdhci_host *host)
+{
+ u32 reg;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_DMA_MASTER;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ return 0;
+}
+
+static unsigned int sdhci_omap_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk) / SYSCTL_CLKD_MAX;
+}
+
+static void sdhci_omap_set_bus_width(struct sdhci_host *host, int width)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (width == MMC_BUS_WIDTH_8)
+ reg |= CON_DW8;
+ else
+ reg &= ~CON_DW8;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ sdhci_set_bus_width(host, width);
+}
+
+static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+{
+ u32 reg;
+ ktime_t timeout;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (omap_host->power_mode == power_mode)
+ return;
+
+ if (power_mode != MMC_POWER_ON)
+ return;
+
+ disable_irq(host->irq);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CMD, 0x0);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg &= ~CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_STAT, INT_CC_EN);
+
+ enable_irq(host->irq);
+
+ omap_host->power_mode = power_mode;
+}
+
+static struct sdhci_ops sdhci_omap_ops = {
+ .set_clock = sdhci_omap_set_clock,
+ .set_power = sdhci_omap_set_power,
+ .enable_dma = sdhci_omap_enable_dma,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = sdhci_omap_get_min_clock,
+ .set_bus_width = sdhci_omap_set_bus_width,
+ .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+ int ret = 0;
+ struct device *dev = omap_host->dev;
+ struct regulator *vqmmc;
+
+ vqmmc = regulator_get(dev, "vqmmc");
+ if (IS_ERR(vqmmc)) {
+ ret = PTR_ERR(vqmmc);
+ goto reg_put;
+ }
+
+ /* voltage capabilities might be set by boot loader, clear it */
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
+
+ if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3))
+ reg |= CAPA_VS33;
+ if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8))
+ reg |= CAPA_VS18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
+
+reg_put:
+ regulator_put(vqmmc);
+
+ return ret;
+}
+
+static const struct sdhci_pltfm_data sdhci_omap_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .quirks2 = SDHCI_QUIRK2_NO_1_8_V |
+ SDHCI_QUIRK2_ACMD23_BROKEN |
+ SDHCI_QUIRK2_RSP_136_HAS_CRC,
+ .ops = &sdhci_omap_ops,
+};
+
+static const struct sdhci_omap_data dra7_data = {
+ .offset = 0x200,
+};
+
+static const struct of_device_id omap_sdhci_match[] = {
+ { .compatible = "ti,dra7-sdhci", .data = &dra7_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_sdhci_match);
+
+static int sdhci_omap_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 offset;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct mmc_host *mmc;
+ const struct of_device_id *match;
+ struct sdhci_omap_data *data;
+
+ match = of_match_device(omap_sdhci_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct sdhci_omap_data *)match->data;
+ if (!data) {
+ dev_err(dev, "no sdhci omap data\n");
+ return -EINVAL;
+ }
+ offset = data->offset;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
+ sizeof(*omap_host));
+ if (IS_ERR(host)) {
+ dev_err(dev, "Failed sdhci_pltfm_init\n");
+ return PTR_ERR(host);
+ }
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ omap_host->host = host;
+ omap_host->base = host->ioaddr;
+ omap_host->dev = dev;
+ host->ioaddr += offset;
+
+ mmc = host->mmc;
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_pltfm_free;
+
+ pltfm_host->clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err_pltfm_free;
+ }
+
+ ret = clk_set_rate(pltfm_host->clk, mmc->f_max);
+ if (ret) {
+ dev_err(dev, "failed to set clock to %d\n", mmc->f_max);
+ goto err_pltfm_free;
+ }
+
+ omap_host->pbias = devm_regulator_get_optional(dev, "pbias");
+ if (IS_ERR(omap_host->pbias)) {
+ ret = PTR_ERR(omap_host->pbias);
+ if (ret != -ENODEV)
+ goto err_pltfm_free;
+ dev_dbg(dev, "unable to get pbias regulator %d\n", ret);
+ }
+ omap_host->pbias_enabled = false;
+
+ /*
+ * omap_device_pm_domain has callbacks to enable the main
+ * functional clock, interface clock and also configure the
+ * SYSCONFIG register of omap devices. The callback will be invoked
+ * as part of pm_runtime_get_sync.
+ */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ pm_runtime_put_noidle(dev);
+ goto err_rpm_disable;
+ }
+
+ ret = sdhci_omap_set_capabilities(omap_host);
+ if (ret) {
+ dev_err(dev, "failed to set system capabilities\n");
+ goto err_put_sync;
+ }
+
+ host->mmc_host_ops.get_ro = mmc_gpio_get_ro;
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_omap_start_signal_voltage_switch;
+ host->mmc_host_ops.set_ios = sdhci_omap_set_ios;
+
+ sdhci_read_caps(host);
+ host->caps |= SDHCI_CAN_DO_ADMA2;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_put_sync;
+
+ return 0;
+
+err_put_sync:
+ pm_runtime_put_sync(dev);
+
+err_rpm_disable:
+ pm_runtime_disable(dev);
+
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_omap_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ sdhci_remove_host(host, true);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_omap_driver = {
+ .probe = sdhci_omap_probe,
+ .remove = sdhci_omap_remove,
+ .driver = {
+ .name = "sdhci-omap",
+ .of_match_table = omap_sdhci_match,
+ },
+};
+
+module_platform_driver(sdhci_omap_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for OMAP SoCs");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci_omap");
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 67d787fa3306..3e4f04fd5175 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,7 +32,6 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
@@ -798,15 +797,6 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.probe_slot = intel_mrfld_mmc_probe_slot,
};
-/* O2Micro extra registers */
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -1290,6 +1280,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
+ SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 14273ca00641..555970a29c94 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -19,7 +19,40 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
+
+/*
+ * O2Micro device registers
+ */
+
+#define O2_SD_MISC_REG5 0x64
+#define O2_SD_LD0_CTRL 0x68
+#define O2_SD_DEV_CTRL 0x88
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_TEST_REG 0xD4
+#define O2_SD_FUNC_REG0 0xDC
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+#define O2_SD_MISC_CTRL4 0xFC
+#define O2_SD_TUNING_CTRL 0x300
+#define O2_SD_PLL_SETTING 0x304
+#define O2_SD_CLK_SETTING 0x328
+#define O2_SD_CAP_REG2 0x330
+#define O2_SD_CAP_REG0 0x334
+#define O2_SD_UHS1_CAP_SETTING 0x33C
+#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_UHS2_L1_CTRL 0x35C
+#define O2_SD_FUNC_REG3 0x3E0
+#define O2_SD_FUNC_REG4 0x3E4
+#define O2_SD_LED_ENABLE BIT(6)
+#define O2_SD_FREG0_LEDOFF BIT(13)
+#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
+
+#define O2_SD_VENDOR_SETTING 0x110
+#define O2_SD_VENDOR_SETTING2 0x1C8
static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
{
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h
deleted file mode 100644
index 770f53857211..000000000000
--- a/drivers/mmc/host/sdhci-pci-o2micro.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2013 BayHub Technology Ltd.
- *
- * Authors: Peter Guo <peter.guo@bayhubtech.com>
- * Adam Lee <adam.lee@canonical.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __SDHCI_PCI_O2MICRO_H
-#define __SDHCI_PCI_O2MICRO_H
-
-#include "sdhci-pci.h"
-
-/*
- * O2Micro device IDs
- */
-
-#define PCI_DEVICE_ID_O2_SDS0 0x8420
-#define PCI_DEVICE_ID_O2_SDS1 0x8421
-#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
-#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
-#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
-
-/*
- * O2Micro device registers
- */
-
-#define O2_SD_MISC_REG5 0x64
-#define O2_SD_LD0_CTRL 0x68
-#define O2_SD_DEV_CTRL 0x88
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_TEST_REG 0xD4
-#define O2_SD_FUNC_REG0 0xDC
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-#define O2_SD_MISC_CTRL4 0xFC
-#define O2_SD_TUNING_CTRL 0x300
-#define O2_SD_PLL_SETTING 0x304
-#define O2_SD_CLK_SETTING 0x328
-#define O2_SD_CAP_REG2 0x330
-#define O2_SD_CAP_REG0 0x334
-#define O2_SD_UHS1_CAP_SETTING 0x33C
-#define O2_SD_DELAY_CTRL 0x350
-#define O2_SD_UHS2_L1_CTRL 0x35C
-#define O2_SD_FUNC_REG3 0x3E0
-#define O2_SD_FUNC_REG4 0x3E4
-#define O2_SD_LED_ENABLE BIT(6)
-#define O2_SD_FREG0_LEDOFF BIT(13)
-#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
-
-#define O2_SD_VENDOR_SETTING 0x110
-#define O2_SD_VENDOR_SETTING2 0x1C8
-
-extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
-
-extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
-
-extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
-
-#endif /* __SDHCI_PCI_O2MICRO_H */
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 75196a2b5289..0056f08a29cc 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SDHCI_PCI_H
#define __SDHCI_PCI_H
@@ -5,6 +6,12 @@
* PCI device IDs, sub IDs
*/
+#define PCI_DEVICE_ID_O2_SDS0 0x8420
+#define PCI_DEVICE_ID_O2_SDS1 0x8421
+#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
+#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
+#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
@@ -25,6 +32,7 @@
#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
#define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db
+#define PCI_DEVICE_ID_INTEL_CDF_EMMC 0x18db
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
@@ -163,4 +171,10 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
+int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
+int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
+#ifdef CONFIG_PM_SLEEP
+int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
+#endif
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index d328fcf284d1..cda83ccb2702 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -761,32 +761,24 @@ static const struct dev_pm_ops sdhci_s3c_pmops = {
NULL)
};
-#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
-static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
- .no_divider = true,
-};
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
-#else
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL)
-#endif
-
static const struct platform_device_id sdhci_s3c_driver_ids[] = {
{
.name = "s3c-sdhci",
.driver_data = (kernel_ulong_t)NULL,
- }, {
- .name = "exynos4-sdhci",
- .driver_data = EXYNOS4_SDHCI_DRV_DATA,
},
{ }
};
MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
#ifdef CONFIG_OF
+static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+ .no_divider = true,
+};
+
static const struct of_device_id sdhci_s3c_dt_match[] = {
{ .compatible = "samsung,s3c6410-sdhci", },
{ .compatible = "samsung,exynos4210-sdhci",
- .data = (void *)EXYNOS4_SDHCI_DRV_DATA },
+ .data = &exynos4_sdhci_drv_data },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 0cd6fa80db66..b877c13184c2 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -422,7 +422,15 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ /* SDHCI controllers on Tegra186 support 40-bit addressing.
+ * IOVA addresses are 48-bit wide on Tegra186.
+ * With 64-bit dma mask used for SDHCI, accesses can
+ * be broken. Disable 64-bit dma, which would fall back
+ * to 32-bit dma mask. Ideally 40-bit dma mask would work,
+ * But it is not supported as of now.
+ */
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.ops = &tegra114_sdhci_ops,
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0d5fcca18c9e..2f14334e42df 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2407,12 +2407,12 @@ static void sdhci_tasklet_finish(unsigned long param)
;
}
-static void sdhci_timeout_timer(unsigned long data)
+static void sdhci_timeout_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host*)data;
+ host = from_timer(host, t, timer);
spin_lock_irqsave(&host->lock, flags);
@@ -2429,12 +2429,12 @@ static void sdhci_timeout_timer(unsigned long data)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void sdhci_timeout_data_timer(unsigned long data)
+static void sdhci_timeout_data_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host *)data;
+ host = from_timer(host, t, data_timer);
spin_lock_irqsave(&host->lock, flags);
@@ -3238,7 +3238,7 @@ int sdhci_setup_host(struct sdhci_host *host)
* available.
*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
DBG("Version: 0x%08x | Present: 0x%08x\n",
@@ -3749,9 +3749,8 @@ int __sdhci_add_host(struct sdhci_host *host)
tasklet_init(&host->finish_tasklet,
sdhci_tasklet_finish, (unsigned long)host);
- setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
- setup_timer(&host->data_timer, sdhci_timeout_data_timer,
- (unsigned long)host);
+ timer_setup(&host->timer, sdhci_timeout_timer, 0);
+ timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
init_waitqueue_head(&host->buf_ready_int);
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 111b66f5439b..04ca0d33a521 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include "sdhci-pltfm.h"
@@ -47,6 +48,7 @@ struct f_sdhost_priv {
struct clk *clk;
u32 vendor_hs200;
struct device *dev;
+ bool enable_cmd_dat_delay;
};
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
@@ -84,10 +86,19 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctl;
+
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL);
sdhci_reset(host, mask);
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
}
static const struct sdhci_ops sdhci_f_sdh30_ops = {
@@ -126,6 +137,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
SDHCI_QUIRK2_TUNING_WORK_AROUND;
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 53c970fe0873..cc98355dbdb9 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1175,11 +1175,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return -EINVAL;
ret = mmc_regulator_get_supply(host->mmc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get vmmc supply\n");
+ if (ret)
return ret;
- }
host->reg_base = devm_ioremap_resource(&pdev->dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 93c4b40df90a..a3d8380ab480 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -783,9 +783,9 @@ static void tifm_sd_end_cmd(unsigned long data)
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_abort(unsigned long data)
+static void tifm_sd_abort(struct timer_list *t)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = from_timer(host, t, timer);
pr_err("%s : card failed to respond for a long period of time "
"(%x, %x)\n",
@@ -968,7 +968,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
(unsigned long)host);
- setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
+ timer_setup(&host->timer, tifm_sd_abort, 0);
mmc->ops = &tifm_sd_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index a7293e186e03..583bf3262df5 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -47,6 +47,7 @@
#include <linux/mmc/sdio.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
+#include <linux/swiotlb.h>
#include <linux/workqueue.h>
#include "tmio_mmc.h"
@@ -166,11 +167,11 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- msleep(10);
+ usleep_range(10000, 11000);
}
}
@@ -178,7 +179,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
}
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -186,7 +187,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
@@ -218,7 +219,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
tmio_mmc_clk_start(host);
}
@@ -229,11 +230,11 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
@@ -1112,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
{
struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
+ int err;
- mmc_regulator_get_supply(mmc);
+ err = mmc_regulator_get_supply(mmc);
+ if (err)
+ return err;
/* use ocr_mask if no regulator */
if (!mmc->ocr_avail)
@@ -1215,6 +1219,18 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->max_blk_count = pdata->max_blk_count ? :
(PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ /*
+ * Since swiotlb has memory size limitation, this will calculate
+ * the maximum size locally (because we don't have any APIs for it now)
+ * and check the current max_req_size. And then, this will update
+ * the max_req_size if needed as a workaround.
+ */
+ if (swiotlb_max_segment()) {
+ unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
+
+ if (mmc->max_req_size > max_size)
+ mmc->max_req_size = max_size;
+ }
mmc->max_seg_size = mmc->max_req_size;
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
@@ -1286,23 +1302,24 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
pm_runtime_enable(&pdev->dev);
ret = mmc_add_host(mmc);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
+
mmc_gpiod_request_cd_irq(mmc);
}
return 0;
+
+remove_host:
+ tmio_mmc_host_remove(_host);
+ return ret;
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 64da6a88cfb9..cdfeb15b6f05 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1757,7 +1757,7 @@ static int usdhi6_probe(struct platform_device *pdev)
return -ENOMEM;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto e_free_mmc;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index a838bf5480d8..32c4211506fc 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -932,12 +932,12 @@ out:
return result;
}
-static void via_sdc_timeout(unsigned long ulongdata)
+static void via_sdc_timeout(struct timer_list *t)
{
struct via_crdr_mmc_host *sdhost;
unsigned long flags;
- sdhost = (struct via_crdr_mmc_host *)ulongdata;
+ sdhost = from_timer(sdhost, t, timer);
spin_lock_irqsave(&sdhost->lock, flags);
@@ -1036,9 +1036,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
u32 lenreg;
u32 status;
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = via_sdc_timeout;
+ timer_setup(&host->timer, via_sdc_timeout, 0);
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 8f569d257405..1fe68137a30f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -741,9 +741,10 @@ static void vub300_deadwork_thread(struct work_struct *work)
kref_put(&vub300->kref, vub300_delete);
}
-static void vub300_inactivity_timer_expired(unsigned long data)
+static void vub300_inactivity_timer_expired(struct timer_list *t)
{ /* softirq */
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ inactivity_timer);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
} else if (vub300->cmd) {
@@ -1180,9 +1181,10 @@ static void send_command(struct vub300_mmc_host *vub300)
* timer callback runs in atomic mode
* so it cannot call usb_kill_urb()
*/
-static void vub300_sg_timed_out(unsigned long data)
+static void vub300_sg_timed_out(struct timer_list *t)
{
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ sg_transfer_timer);
vub300->usb_timed_out = 1;
usb_sg_cancel(&vub300->sg_request);
usb_unlink_urb(vub300->command_out_urb);
@@ -1244,12 +1246,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1291,12 +1289,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1349,6 +1343,12 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
sizeof(vub300->vub_name));
return;
}
+
+ return;
+
+copy_error_message:
+ strncpy(vub300->vub_name, "SDIO pseudocode download failed",
+ sizeof(vub300->vub_name));
}
/*
@@ -2323,13 +2323,10 @@ static int vub300_probe(struct usb_interface *interface,
INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
kref_init(&vub300->kref);
- init_timer(&vub300->sg_transfer_timer);
- vub300->sg_transfer_timer.data = (unsigned long)vub300;
- vub300->sg_transfer_timer.function = vub300_sg_timed_out;
+ timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0);
kref_get(&vub300->kref);
- init_timer(&vub300->inactivity_timer);
- vub300->inactivity_timer.data = (unsigned long)vub300;
- vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
+ timer_setup(&vub300->inactivity_timer,
+ vub300_inactivity_timer_expired, 0);
vub300->inactivity_timer.expires = jiffies + HZ;
add_timer(&vub300->inactivity_timer);
if (vub300->card_present)
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 546aaf8d1507..f4233576153b 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -956,9 +956,9 @@ static const struct mmc_host_ops wbsd_ops = {
* Helper function to reset detection ignore
*/
-static void wbsd_reset_ignore(unsigned long data)
+static void wbsd_reset_ignore(struct timer_list *t)
{
- struct wbsd_host *host = (struct wbsd_host *)data;
+ struct wbsd_host *host = from_timer(host, t, ignore_timer);
BUG_ON(host == NULL);
@@ -1224,9 +1224,7 @@ static int wbsd_alloc_mmc(struct device *dev)
/*
* Set up timers
*/
- init_timer(&host->ignore_timer);
- host->ignore_timer.data = (unsigned long)host;
- host->ignore_timer.function = wbsd_reset_ignore;
+ timer_setup(&host->ignore_timer, wbsd_reset_ignore, 0);
/*
* Maximum number of segments. Worst case is one sector per segment
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5a2d71729b9a..2a8ac6829d42 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,6 +1,5 @@
menuconfig MTD
tristate "Memory Technology Device (MTD) support"
- depends on GENERIC_IO
help
Memory Technology Devices are flash, RAM and similar chips, often
used for solid state file systems on embedded devices. This option
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 151d60df303a..d6f8f625e1ff 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the memory technology device drivers.
#
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
index 36582412ccda..1f4e84f1cd88 100644
--- a/drivers/mtd/chips/Makefile
+++ b/drivers/mtd/chips/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# linux/drivers/chips/Makefile
#
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 800b0e853e86..53e6b2d5932b 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef FWH_LOCK_H
#define FWH_LOCK_H
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index afb43d5e1782..1cd0fff0e940 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -20,8 +20,9 @@ static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static int mapram_erase (struct mtd_info *, struct erase_info *);
static void mapram_nop (struct mtd_info *);
static struct mtd_info *map_ram_probe(struct map_info *map);
-static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long,
- unsigned long, unsigned long);
+static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static struct mtd_chip_driver mapram_chipdrv = {
@@ -65,11 +66,12 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->type = MTD_RAM;
mtd->size = map->size;
mtd->_erase = mapram_erase;
- mtd->_get_unmapped_area = mapram_unmapped_area;
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_panic_write = mapram_write;
+ mtd->_point = mapram_point;
mtd->_sync = mapram_nop;
+ mtd->_unpoint = mapram_unpoint;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
@@ -81,19 +83,23 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
return mtd;
}
-
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long mapram_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
+static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
- return (unsigned long) map->virt + offset;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
}
static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index e67f73ab44c9..20e3604b4d71 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -20,8 +20,10 @@ static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static void maprom_nop (struct mtd_info *);
static struct mtd_info *map_rom_probe(struct map_info *map);
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
-static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long,
- unsigned long, unsigned long);
+static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+
static struct mtd_chip_driver maprom_chipdrv = {
.probe = map_rom_probe,
@@ -51,7 +53,8 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_ROM;
mtd->size = map->size;
- mtd->_get_unmapped_area = maprom_unmapped_area;
+ mtd->_point = maprom_point;
+ mtd->_unpoint = maprom_unpoint;
mtd->_read = maprom_read;
mtd->_write = maprom_write;
mtd->_sync = maprom_nop;
@@ -66,18 +69,23 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
}
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long maprom_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
+static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
- return (unsigned long) map->virt + offset;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
}
static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index f0f767624cc6..94895eab3066 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# linux/drivers/mtd/devices/Makefile
#
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
index b2d7b38f75fd..fef0d5e42e2a 100644
--- a/drivers/mtd/devices/bcm47xxsflash.h
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __BCM47XXSFLASH_H
#define __BCM47XXSFLASH_H
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 7c887f111a7d..62fd6905c648 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -431,7 +431,7 @@ static int block2mtd_setup2(const char *val)
}
-static int block2mtd_setup(const char *val, struct kernel_param *kp)
+static int block2mtd_setup(const char *val, const struct kernel_param *kp)
{
#ifdef MODULE
return block2mtd_setup2(val);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 84b16133554b..0806f72102c0 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor)
struct dentry *root = floor->dbg.dfs_dir;
struct docg3 *docg3 = floor->priv;
- if (IS_ERR_OR_NULL(root))
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ dev_warn(floor->dev.parent,
+ "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return;
+ }
debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
&flashcontrol_fops);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 268aae45b514..555b94406e0b 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -583,7 +583,7 @@ static struct mtd_erase_region_info erase_regions[] = {
}
};
-static struct mtd_partition lart_partitions[] = {
+static const struct mtd_partition lart_partitions[] = {
/* blob */
{
.name = "blob",
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 00eea6fd379c..dbe6a1de2bb8 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -359,6 +359,7 @@ static const struct spi_device_id m25p_ids[] = {
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
/* Everspin MRAMs (non-JEDEC) */
+ { "mr25h128" }, /* 128 Kib, 40 MHz */
{ "mr25h256" }, /* 256 Kib, 40 MHz */
{ "mr25h10" }, /* 1 Mib, 40 MHz */
{ "mr25h40" }, /* 4 Mib, 40 MHz */
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index cbd8547d7aad..0bf4aeaf0cb8 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/mtdram.h>
@@ -69,6 +70,27 @@ static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
{
*virt = mtd->priv + from;
*retlen = len;
+
+ if (phys) {
+ /* limit retlen to the number of contiguous physical pages */
+ unsigned long page_ofs = offset_in_page(*virt);
+ void *addr = *virt - page_ofs;
+ unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr);
+
+ *phys = __pfn_to_phys(pfn0) + page_ofs;
+ len += page_ofs;
+ while (len > PAGE_SIZE) {
+ len -= PAGE_SIZE;
+ addr += PAGE_SIZE;
+ pfn0++;
+ pfn1 = vmalloc_to_pfn(addr);
+ if (pfn1 != pfn0) {
+ *retlen = addr - *virt;
+ break;
+ }
+ }
+ }
+
return 0;
}
@@ -77,19 +99,6 @@ static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return 0;
}
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- return (unsigned long) mtd->priv + offset;
-}
-
static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@@ -134,7 +143,6 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->_erase = ram_erase;
mtd->_point = ram_point;
mtd->_unpoint = ram_unpoint;
- mtd->_get_unmapped_area = ram_get_unmapped_area;
mtd->_read = ram_read;
mtd->_write = ram_write;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 8b66e52ca3cc..7287696a21f9 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -266,7 +266,7 @@ static int phram_setup(const char *val)
return ret;
}
-static int phram_param_call(const char *val, struct kernel_param *kp)
+static int phram_param_call(const char *val, const struct kernel_param *kp)
{
#ifdef MODULE
return phram_setup(val);
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c
index f5396f26ddb4..26f9feaa5d17 100644
--- a/drivers/mtd/devices/powernv_flash.c
+++ b/drivers/mtd/devices/powernv_flash.c
@@ -47,6 +47,11 @@ enum flash_op {
FLASH_OP_ERASE,
};
+/*
+ * Don't return -ERESTARTSYS if we can't get a token, the MTD core
+ * might have split up the call from userspace and called into the
+ * driver more than once, we'll already have done some amount of work.
+ */
static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
loff_t offset, size_t len, size_t *retlen, u_char *buf)
{
@@ -63,7 +68,8 @@ static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
if (token < 0) {
if (token != -ERESTARTSYS)
dev_err(dev, "Failed to get an async token\n");
-
+ else
+ token = -EINTR;
return token;
}
@@ -78,32 +84,53 @@ static int powernv_flash_async_op(struct mtd_info *mtd, enum flash_op op,
rc = opal_flash_erase(info->id, offset, len, token);
break;
default:
- BUG_ON(1);
- }
-
- if (rc != OPAL_ASYNC_COMPLETION) {
- dev_err(dev, "opal_flash_async_op(op=%d) failed (rc %d)\n",
- op, rc);
+ WARN_ON_ONCE(1);
opal_async_release_token(token);
return -EIO;
}
- rc = opal_async_wait_response(token, &msg);
- opal_async_release_token(token);
- if (rc) {
- dev_err(dev, "opal async wait failed (rc %d)\n", rc);
- return -EIO;
+ if (rc == OPAL_ASYNC_COMPLETION) {
+ rc = opal_async_wait_response_interruptible(token, &msg);
+ if (rc) {
+ /*
+ * If we return the mtd core will free the
+ * buffer we've just passed to OPAL but OPAL
+ * will continue to read or write from that
+ * memory.
+ * It may be tempting to ultimately return 0
+ * if we're doing a read or a write since we
+ * are going to end up waiting until OPAL is
+ * done. However, because the MTD core sends
+ * us the userspace request in chunks, we need
+ * it to know we've been interrupted.
+ */
+ rc = -EINTR;
+ if (opal_async_wait_response(token, &msg))
+ dev_err(dev, "opal_async_wait_response() failed\n");
+ goto out;
+ }
+ rc = opal_get_async_rc(msg);
}
- rc = opal_get_async_rc(msg);
- if (rc == OPAL_SUCCESS) {
- rc = 0;
- if (retlen)
- *retlen = len;
- } else {
- rc = -EIO;
- }
+ /*
+ * OPAL does mutual exclusion on the flash, it will return
+ * OPAL_BUSY.
+ * During firmware updates by the service processor OPAL may
+ * be (temporarily) prevented from accessing the flash, in
+ * this case OPAL will also return OPAL_BUSY.
+ * Both cases aren't errors exactly but the flash could have
+ * changed, userspace should be informed.
+ */
+ if (rc != OPAL_SUCCESS && rc != OPAL_BUSY)
+ dev_err(dev, "opal_flash_async_op(op=%d) failed (rc %d)\n",
+ op, rc);
+
+ if (rc == OPAL_SUCCESS && retlen)
+ *retlen = len;
+ rc = opal_error_code(rc);
+out:
+ opal_async_release_token(token);
return rc;
}
@@ -220,21 +247,20 @@ static int powernv_flash_probe(struct platform_device *pdev)
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!data)
+ return -ENOMEM;
+
data->mtd.priv = data;
ret = of_property_read_u32(dev->of_node, "ibm,opal-id", &(data->id));
if (ret) {
dev_err(dev, "no device property 'ibm,opal-id'\n");
- goto out;
+ return ret;
}
ret = powernv_flash_set_driver_info(dev, &data->mtd);
if (ret)
- goto out;
+ return ret;
dev_set_drvdata(dev, data);
@@ -243,10 +269,7 @@ static int powernv_flash_probe(struct platform_device *pdev)
* with an ffs partition at the start, it should prove easier for users
* to deal with partitions or not as they see fit
*/
- ret = mtd_device_register(&data->mtd, NULL, 0);
-
-out:
- return ret;
+ return mtd_device_register(&data->mtd, NULL, 0);
}
/**
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 8087c36dc693..0ec85f316d24 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -163,8 +163,9 @@ static int register_device(char *name, unsigned long start, unsigned long length
}
if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
- ioremap(start, length))) {
- E("slram: ioremap failed\n");
+ memremap(start, length,
+ MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) {
+ E("slram: memremap failed\n");
return -EIO;
}
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
@@ -186,7 +187,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
E("slram: Failed to register new device\n");
- iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
+ memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
kfree((*curmtd)->mtdinfo->priv);
kfree((*curmtd)->mtdinfo);
return(-EAGAIN);
@@ -206,7 +207,7 @@ static void unregister_devices(void)
while (slram_mtdlist) {
nextitem = slram_mtdlist->next;
mtd_device_unregister(slram_mtdlist->mtdinfo);
- iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
+ memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
kfree(slram_mtdlist->mtdinfo->priv);
kfree(slram_mtdlist->mtdinfo);
kfree(slram_mtdlist);
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 5a09a72ab112..b849aaf85c34 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# linux/drivers/maps/Makefile
#
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index d504b3d1791d..70f488628464 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -61,7 +61,7 @@ static struct map_info flagadm_map = {
.bankwidth = 2,
};
-static struct mtd_partition flagadm_parts[] = {
+static const struct mtd_partition flagadm_parts[] = {
{
.name = "Bootloader",
.offset = FLASH_PARTITION0_ADDR,
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 15bbda03be65..a0b8fa7849a9 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -47,7 +47,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
/*
* MTD partitioning stuff
*/
-static struct mtd_partition partitions[] =
+static const struct mtd_partition partitions[] =
{
{
.name = "FileSystem",
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index 81dc2598bc0a..3528497f96c7 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -52,7 +52,7 @@
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
{
.name = "NetSc520 boot kernel",
.offset = 0,
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index a577ef8553d0..729579fb654f 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -107,7 +107,7 @@ static struct map_info nettel_amd_map = {
.bankwidth = AMD_BUSWIDTH,
};
-static struct mtd_partition nettel_amd_partitions[] = {
+static const struct mtd_partition nettel_amd_partitions[] = {
{
.name = "SnapGear BIOS config",
.offset = 0x000e0000,
diff --git a/drivers/mtd/maps/physmap_of_gemini.c b/drivers/mtd/maps/physmap_of_gemini.c
index 4ed1a6bb4d3c..830b1b7e702b 100644
--- a/drivers/mtd/maps/physmap_of_gemini.c
+++ b/drivers/mtd/maps/physmap_of_gemini.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cortina Systems Gemini OF physmap add-on
* Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
diff --git a/drivers/mtd/maps/physmap_of_gemini.h b/drivers/mtd/maps/physmap_of_gemini.h
index c675025288dd..60e13a689d6a 100644
--- a/drivers/mtd/maps/physmap_of_gemini.h
+++ b/drivers/mtd/maps/physmap_of_gemini.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/of.h>
#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/physmap_of_versatile.h b/drivers/mtd/maps/physmap_of_versatile.h
index 5b86f6dc6b3d..0302502c9462 100644
--- a/drivers/mtd/maps/physmap_of_versatile.h
+++ b/drivers/mtd/maps/physmap_of_versatile.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/of.h>
#include <linux/mtd/map.h>
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 51572895c02c..6d9a4d6f9839 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -43,7 +43,6 @@ struct platram_info {
struct device *dev;
struct mtd_info *mtd;
struct map_info map;
- struct resource *area;
struct platdata_mtd_ram *pdata;
};
@@ -97,16 +96,6 @@ static int platram_remove(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RO);
- /* release resources */
-
- if (info->area) {
- release_resource(info->area);
- kfree(info->area);
- }
-
- if (info->map.virt != NULL)
- iounmap(info->map.virt);
-
kfree(info);
return 0;
@@ -147,12 +136,11 @@ static int platram_probe(struct platform_device *pdev)
info->pdata = pdata;
/* get the resource for the memory mapping */
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (res == NULL) {
- dev_err(&pdev->dev, "no memory resource specified\n");
- err = -ENOENT;
+ info->map.virt = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->map.virt)) {
+ err = PTR_ERR(info->map.virt);
+ dev_err(&pdev->dev, "failed to ioremap() region\n");
goto exit_free;
}
@@ -167,26 +155,8 @@ static int platram_probe(struct platform_device *pdev)
(char *)pdata->mapname : (char *)pdev->name;
info->map.bankwidth = pdata->bankwidth;
- /* register our usage of the memory area */
-
- info->area = request_mem_region(res->start, info->map.size, pdev->name);
- if (info->area == NULL) {
- dev_err(&pdev->dev, "failed to request memory region\n");
- err = -EIO;
- goto exit_free;
- }
-
- /* remap the memory area */
-
- info->map.virt = ioremap(res->start, info->map.size);
dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
- if (info->map.virt == NULL) {
- dev_err(&pdev->dev, "failed to ioremap() region\n");
- err = -EIO;
- goto exit_free;
- }
-
simple_map_init(&info->map);
dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 556a2dfe94c5..4337d279ad83 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -87,7 +87,7 @@ static DEFINE_SPINLOCK(sbc_gxx_spin);
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
{ .name = "SBC-GXx flash boot partition",
.offset = 0,
.size = BOOT_PARTITION_SIZE_KiB*1024 },
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 9969fedb1f13..8f177e0acb8c 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -43,7 +43,7 @@ static struct map_info ts5500_map = {
.phys = WINDOW_ADDR
};
-static struct mtd_partition ts5500_partitions[] = {
+static const struct mtd_partition ts5500_partitions[] = {
{
.name = "Drive A",
.offset = 0,
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index da2cdb5fd6db..9fc1f727aa76 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* tsunami_flash.c
*
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 00a8190797ec..aef030ca8601 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -49,7 +49,7 @@ static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
-static struct mtd_partition uclinux_romfs[] = {
+static const struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 3568294d4854..de8c902059b8 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -375,12 +375,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
return -EINVAL;
if (!mtd->_write_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
-
- if (ret)
- return ret;
+ return -EOPNOTSUPP;
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
@@ -419,9 +414,6 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, ptr, length))
- return -EFAULT;
-
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
ops.datbuf = NULL;
@@ -618,9 +610,6 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
usr_data = (const void __user *)(uintptr_t)req.usr_data;
usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
- if (!access_ok(VERIFY_READ, usr_data, req.len) ||
- !access_ok(VERIFY_READ, usr_oob, req.ooblen))
- return -EFAULT;
if (!mtd->_write_oob)
return -EOPNOTSUPP;
@@ -662,21 +651,10 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
struct mtd_info *mtd = mfi->mtd;
void __user *argp = (void __user *)arg;
int ret = 0;
- u_long size;
struct mtd_info_user info;
pr_debug("MTD_ioctl\n");
- size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
- if (cmd & IOC_IN) {
- if (!access_ok(VERIFY_READ, argp, size))
- return -EFAULT;
- }
- if (cmd & IOC_OUT) {
- if (!access_ok(VERIFY_WRITE, argp, size))
- return -EFAULT;
- }
-
switch (cmd) {
case MEMGETREGIONCOUNT:
if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index d573606b91c2..60bf53df5454 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -644,32 +644,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/*
- * try to support NOMMU mmaps on concatenated devices
- * - we don't support subdev spanning as we can't guarantee it'll work
- */
-static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- struct mtd_concat *concat = CONCAT(mtd);
- int i;
-
- for (i = 0; i < concat->num_subdev; i++) {
- struct mtd_info *subdev = concat->subdev[i];
-
- if (offset >= subdev->size) {
- offset -= subdev->size;
- continue;
- }
-
- return mtd_get_unmapped_area(subdev, len, offset, flags);
- }
-
- return (unsigned long) -ENOSYS;
-}
-
-/*
* This function constructs a virtual MTD device by concatenating
* num_devs MTD devices. A pointer to the new device object is
* stored to *new_dev upon success. This function does _not_
@@ -790,7 +764,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd._unlock = concat_unlock;
concat->mtd._suspend = concat_suspend;
concat->mtd._resume = concat_resume;
- concat->mtd._get_unmapped_area = concat_get_unmapped_area;
/*
* Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e7ea842ba3db..f80e911b8843 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1022,11 +1022,18 @@ EXPORT_SYMBOL_GPL(mtd_unpoint);
unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
unsigned long offset, unsigned long flags)
{
- if (!mtd->_get_unmapped_area)
- return -EOPNOTSUPP;
- if (offset >= mtd->size || len > mtd->size - offset)
- return -EINVAL;
- return mtd->_get_unmapped_area(mtd, len, offset, flags);
+ size_t retlen;
+ void *virt;
+ int ret;
+
+ ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
+ if (ret)
+ return ret;
+ if (retlen != len) {
+ mtd_unpoint(mtd, offset, retlen);
+ return -ENOSYS;
+ }
+ return (unsigned long)virt;
}
EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
@@ -1093,6 +1100,39 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
+static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
+ struct mtd_oob_ops *ops)
+{
+ /*
+ * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
+ * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
+ * this case.
+ */
+ if (!ops->datbuf)
+ ops->len = 0;
+
+ if (!ops->oobbuf)
+ ops->ooblen = 0;
+
+ if (offs < 0 || offs + ops->len >= mtd->size)
+ return -EINVAL;
+
+ if (ops->ooblen) {
+ u64 maxooblen;
+
+ if (ops->ooboffs >= mtd_oobavail(mtd, ops))
+ return -EINVAL;
+
+ maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
+ mtd_div_by_ws(offs, mtd)) *
+ mtd_oobavail(mtd, ops)) - ops->ooboffs;
+ if (ops->ooblen > maxooblen)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
int ret_code;
@@ -1100,6 +1140,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (!mtd->_read_oob)
return -EOPNOTSUPP;
+ ret_code = mtd_check_oob_ops(mtd, from, ops);
+ if (ret_code)
+ return ret_code;
+
ledtrig_mtd_activity();
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@@ -1119,11 +1163,18 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
+ int ret;
+
ops->retlen = ops->oobretlen = 0;
if (!mtd->_write_oob)
return -EOPNOTSUPP;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
+
+ ret = mtd_check_oob_ops(mtd, to, ops);
+ if (ret)
+ return ret;
+
ledtrig_mtd_activity();
return mtd->_write_oob(mtd, to, ops);
}
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 55fdb8e1fd2a..37accfd0400e 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c.
* You should not use them for _anything_ else.
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a308e707392d..be088bccd593 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -101,18 +101,6 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return part->parent->_unpoint(part->parent, from + part->offset, len);
}
-static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- offset += part->offset;
- return part->parent->_get_unmapped_area(part->parent, len, offset,
- flags);
-}
-
static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
@@ -458,8 +446,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
slave->mtd._unpoint = part_unpoint;
}
- if (parent->_get_unmapped_area)
- slave->mtd._get_unmapped_area = part_get_unmapped_area;
if (parent->_read_oob)
slave->mtd._read_oob = part_read_oob;
if (parent->_write_oob)
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index e43fea896d1e..d58a61c09304 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -79,14 +79,14 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
- ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+ ret = fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
return ERR_PTR(ret);
}
/* go */
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
return dget(sb->s_root);
/* new mountpoint for an already mounted superblock */
@@ -202,7 +202,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
not_an_MTD_device:
#endif /* CONFIG_BLOCK */
- if (!(flags & MS_SILENT))
+ if (!(flags & SB_SILENT))
printk(KERN_NOTICE
"MTD: Attempt to mount non-MTD device \"%s\"\n",
dev_name);
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index 7d9080e33865..f07492c6f4b2 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -50,7 +50,7 @@
* Number of free eraseblocks below which GC can also collect low frag
* blocks.
*/
-#define LOW_FRAG_GC_TRESHOLD 5
+#define LOW_FRAG_GC_THRESHOLD 5
/*
* Wear level cost amortization. We want to do wear leveling on the background
@@ -805,7 +805,7 @@ static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
{
int idx, stopat;
- if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
+ if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD)
stopat = MTDSWAP_LOWFRAG;
else
stopat = MTDSWAP_HIFRAG;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 3f2036f31da4..bb48aafed9a2 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
+
This enables the driver for the NAND flash device found on
- PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
+ PXA3xx processors (NFCv1) and also on 32-bit Armada
+ platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
+ platforms (7K, 8K) (NFCv2).
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index ade5fc4c3819..118a1349aad3 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# linux/drivers/nand/Makefile
#
@@ -58,7 +59,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
-obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
+obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_amd.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index dcec9cf4983f..d60ada45c549 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -41,7 +41,7 @@ static struct mtd_info *ams_delta_mtd = NULL;
* Define partitions for flash devices
*/
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
{ .name = "Kernel",
.offset = 0,
.size = 3 * SZ_1M + SZ_512K },
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index f25eca79f4e5..90a71a56bc23 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
nc->op.addrs[nc->op.naddrs++] = page;
nc->op.addrs[nc->op.naddrs++] = page >> 8;
- if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) ||
- (mtd->writesize <= 512 && chip->chipsize > SZ_32M))
+ if (chip->options & NAND_ROW_ADDR_3)
nc->op.addrs[nc->op.naddrs++] = page >> 16;
}
}
@@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
struct atmel_nand_controller *nc = dev_get_drvdata(dev);
struct atmel_nand *nand;
+ if (nc->pmecc)
+ atmel_pmecc_reset(nc->pmecc);
+
list_for_each_entry(nand, &nc->chips, node) {
int i;
@@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = {
.driver = {
.name = "atmel-nand-controller",
.of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
+ .pm = &atmel_nand_controller_pm_ops,
},
.probe = atmel_nand_controller_probe,
.remove = atmel_nand_controller_remove,
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 8268636675ef..fcbe4fd6e684 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
}
EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
+{
+ writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
+
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
{
struct atmel_pmecc *pmecc = user->pmecc;
@@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
void atmel_pmecc_disable(struct atmel_pmecc_user *user)
{
- struct atmel_pmecc *pmecc = user->pmecc;
-
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(user->pmecc);
mutex_unlock(&user->pmecc->lock);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
@@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
/* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
-
- /* Reset the ECC engine */
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(pmecc);
return pmecc;
}
diff --git a/drivers/mtd/nand/atmel/pmecc.h b/drivers/mtd/nand/atmel/pmecc.h
index a8ddbfca2ea5..817e0dd9fd15 100644
--- a/drivers/mtd/nand/atmel/pmecc.h
+++ b/drivers/mtd/nand/atmel/pmecc.h
@@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
void atmel_pmecc_disable(struct atmel_pmecc_user *user);
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 9d4a28fa6b73..8ab827edf94e 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
ctx->write_byte(mtd, (u8)(page_addr >> 8));
- /* One more address cycle for devices > 32MiB */
- if (this->chipsize > (32 << 20))
+ if (this->options & NAND_ROW_ADDR_3)
ctx->write_byte(mtd,
((page_addr >> 16) & 0x0f));
}
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
index c8834767ab6d..201b9baa52a0 100644
--- a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __BCM47XXNFLASH_H
#define __BCM47XXNFLASH_H
diff --git a/drivers/mtd/nand/brcmnand/Makefile b/drivers/mtd/nand/brcmnand/Makefile
index b28ffb59eb43..195b845e48b8 100644
--- a/drivers/mtd/nand/brcmnand/Makefile
+++ b/drivers/mtd/nand/brcmnand/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# link order matters; don't link the more generic brcmstb_nand.o before the
# more specific iproc_nand.o, for instance
obj-$(CONFIG_MTD_NAND_BRCMNAND) += iproc_nand.o
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 1fc435f994e1..b01c9804590e 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -42,7 +42,7 @@ static void __iomem *cmx270_nand_io;
/*
* Define static partitions for flash device
*/
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
[0] = {
.name = "cmx270-0",
.offset = 0,
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3087b0ba7b7f..5124f8ae8c04 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -10,20 +10,18 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
-#include <linux/interrupt.h>
-#include <linux/delay.h>
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
#include <linux/dma-mapping.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "denali.h"
@@ -31,9 +29,9 @@ MODULE_LICENSE("GPL");
#define DENALI_NAND_NAME "denali-nand"
-/* Host Data/Command Interface */
-#define DENALI_HOST_ADDR 0x00
-#define DENALI_HOST_DATA 0x10
+/* for Indexed Addressing */
+#define DENALI_INDEXED_CTRL 0x00
+#define DENALI_INDEXED_DATA 0x10
#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
@@ -61,31 +59,55 @@ MODULE_LICENSE("GPL");
*/
#define DENALI_CLK_X_MULT 6
-/*
- * this macro allows us to convert from an MTD structure to our own
- * device context (denali) structure.
- */
static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
}
-static void denali_host_write(struct denali_nand_info *denali,
- uint32_t addr, uint32_t data)
+/*
+ * Direct Addressing - the slave address forms the control information (command
+ * type, bank, block, and page address). The slave data is the actual data to
+ * be transferred. This mode requires 28 bits of address region allocated.
+ */
+static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
+{
+ return ioread32(denali->host + addr);
+}
+
+static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
{
- iowrite32(addr, denali->host + DENALI_HOST_ADDR);
- iowrite32(data, denali->host + DENALI_HOST_DATA);
+ iowrite32(data, denali->host + addr);
+}
+
+/*
+ * Indexed Addressing - address translation module intervenes in passing the
+ * control information. This mode reduces the required address range. The
+ * control information and transferred data are latched by the registers in
+ * the translation module.
+ */
+static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ return ioread32(denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ iowrite32(data, denali->host + DENALI_INDEXED_DATA);
}
/*
* Use the configuration feature register to determine the maximum number of
* banks that the hardware supports.
*/
-static void detect_max_banks(struct denali_nand_info *denali)
+static void denali_detect_max_banks(struct denali_nand_info *denali)
{
uint32_t features = ioread32(denali->reg + FEATURES);
- denali->max_banks = 1 << (features & FEATURES__N_BANKS);
+ denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
/* the encoding changed from rev 5.0 to 5.1 */
if (denali->revision < 0x0501)
@@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
msecs_to_jiffies(1000));
if (!time_left) {
dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
- denali->irq_mask);
+ irq_mask);
return 0;
}
@@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali)
return irq_status;
}
-/*
- * This helper function setups the registers for ECC and whether or not
- * the spare area will be transferred.
- */
-static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
- bool transfer_spare)
-{
- int ecc_en_flag, transfer_spare_flag;
-
- /* set ECC, transfer spare bits if needed */
- ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
- transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
-
- /* Enable spare area/ECC per user's request. */
- iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
- iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
-}
-
static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- buf[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf[i] = denali->host_read(denali, addr);
}
static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- iowrite32(buf[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf[i]);
}
static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
uint16_t *buf16 = (uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- buf16[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf16[i] = denali->host_read(denali, addr);
}
static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
const uint16_t *buf16 = (const uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- iowrite32(buf16[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf16[i]);
}
static uint8_t denali_read_byte(struct mtd_info *mtd)
@@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
if (ctrl & NAND_CTRL_CHANGE)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_BANK(denali) | type, dat);
+ denali->host_write(denali, DENALI_BANK(denali) | type, dat);
}
static int denali_dev_ready(struct mtd_info *mtd)
@@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return 0;
}
- max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
+ max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
/*
* The register holds the maximum of per-sector corrected bitflips.
@@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
-#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
-#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
-#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
-#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
-#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
-
static int denali_sw_ecc_fixup(struct mtd_info *mtd,
struct denali_nand_info *denali,
unsigned long *uncor_ecc_flags, uint8_t *buf)
@@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
do {
err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
- err_sector = ECC_SECTOR(err_addr);
- err_byte = ECC_BYTE(err_addr);
+ err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
+ err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
- err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
- err_device = ECC_ERR_DEVICE(err_cor_info);
+ err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
+ err_cor_info);
+ err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
+ err_cor_info);
/* reset the bitflip counter when crossing ECC sector */
if (err_sector != prev_sector)
bitflips = 0;
- if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
+ if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
/*
* Check later if this is a real ECC error, or
* an erased sector.
@@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
}
prev_sector = err_sector;
- } while (!ECC_LAST_ERR(err_cor_info));
+ } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
/*
- * Once handle all ecc errors, controller will trigger a
- * ECC_TRANSACTION_DONE interrupt, so here just wait for
- * a while for this interrupt
+ * Once handle all ECC errors, controller will trigger an
+ * ECC_TRANSACTION_DONE interrupt.
*/
irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
@@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-/* programs the controller to either enable/disable DMA transfers */
-static void denali_enable_dma(struct denali_nand_info *denali, bool en)
-{
- iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
- ioread32(denali->reg + DMA_ENABLE);
-}
-
static void denali_setup_dma64(struct denali_nand_info *denali,
dma_addr_t dma_addr, int page, int write)
{
@@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
* 1. setup transfer type, interrupt when complete,
* burst len = 64 bytes, the number of pages
*/
- denali_host_write(denali, mode,
- 0x01002000 | (64 << 16) | (write << 8) | page_count);
+ denali->host_write(denali, mode,
+ 0x01002000 | (64 << 16) | (write << 8) | page_count);
/* 2. set memory low address */
- denali_host_write(denali, mode, dma_addr);
+ denali->host_write(denali, mode, lower_32_bits(dma_addr));
/* 3. set memory high address */
- denali_host_write(denali, mode, (uint64_t)dma_addr >> 32);
+ denali->host_write(denali, mode, upper_32_bits(dma_addr));
}
static void denali_setup_dma32(struct denali_nand_info *denali,
@@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
/* DMA is a four step process */
/* 1. setup transfer type and # of pages */
- denali_host_write(denali, mode | page,
- 0x2000 | (write << 8) | page_count);
+ denali->host_write(denali, mode | page,
+ 0x2000 | (write << 8) | page_count);
/* 2. set memory high address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
+ denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
/* 3. set memory low address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
+ denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
/* 4. interrupt when complete, burst len = 64 bytes */
- denali_host_write(denali, mode | 0x14000, 0x2400);
-}
-
-static void denali_setup_dma(struct denali_nand_info *denali,
- dma_addr_t dma_addr, int page, int write)
-{
- if (denali->caps & DENALI_CAP_DMA_64BIT)
- denali_setup_dma64(denali, dma_addr, page, write);
- else
- denali_setup_dma32(denali, dma_addr, page, write);
+ denali->host_write(denali, mode | 0x14000, 0x2400);
}
static int denali_pio_read(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status, ecc_err_mask;
int i;
@@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- *buf32++ = ioread32(denali->host + DENALI_HOST_DATA);
+ *buf32++ = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC))
@@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
static int denali_pio_write(struct denali_nand_info *denali,
const void *buf, size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
const uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status;
int i;
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- iowrite32(*buf32++, denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, *buf32++);
irq_status = denali_wait_for_irq(denali,
INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
@@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
ecc_err_mask = INTR__ECC_ERR;
}
- denali_enable_dma(denali, true);
+ iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
- denali_setup_dma(denali, dma_addr, page, write);
+ denali->setup_dma(denali, dma_addr, page, write);
- /* wait for operation to complete */
irq_status = denali_wait_for_irq(denali, irq_mask);
if (!(irq_status & INTR__DMA_CMD_COMP))
ret = -EIO;
else if (irq_status & ecc_err_mask)
ret = -EBADMSG;
- denali_enable_dma(denali, false);
+ iowrite32(0, denali->reg + DMA_ENABLE);
+
dma_unmap_single(denali->dev, dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE)
@@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw, int write)
{
- setup_ecc_for_xfer(denali, !raw, raw);
+ iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
+ denali->reg + TRANSFER_SPARE_REG);
if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, raw, write);
@@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
- DENALI_ERASE);
+ denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
+ DENALI_ERASE);
/* wait for erase to complete or failure to occur */
irq_status = denali_wait_for_irq(denali,
@@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE;
- tmp |= acc_clks;
+ tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
iowrite32(tmp, denali->reg + ACC_CLKS);
/* tRWH -> RE_2_WE */
@@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE;
- tmp |= re_2_we;
+ tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
iowrite32(tmp, denali->reg + RE_2_WE);
/* tRHZ -> RE_2_RE */
@@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE;
- tmp |= re_2_re;
+ tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
iowrite32(tmp, denali->reg + RE_2_RE);
- /* tWHR -> WE_2_RE */
- we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
+ /*
+ * tCCS, tWHR -> WE_2_RE
+ *
+ * With WE_2_RE properly set, the Denali controller automatically takes
+ * care of the delay; the driver need not set NAND_WAIT_TCCS.
+ */
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
+ t_clk);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
- tmp |= we_2_re;
+ tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
/* tADL -> ADDR_2_DATA */
@@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
- tmp &= ~addr_2_data_mask;
- tmp |= addr_2_data;
+ tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
/* tREH, tWH -> RDWR_EN_HI_CNT */
@@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE;
- tmp |= rdwr_en_hi;
+ tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
/* tRP, tWP -> RDWR_EN_LO_CNT */
@@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
- tmp |= rdwr_en_lo;
+ tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
/* tCS, tCEA -> CS_SETUP_CNT */
@@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE;
- tmp |= cs_setup;
+ tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
iowrite32(tmp, denali->reg + CS_SETUP_CNT);
return 0;
@@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali)
* if this value is 0, just let it be.
*/
denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
- detect_max_banks(denali);
+ denali_detect_max_banks(denali);
iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
-
- /* Should set value for these registers when init */
- iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, denali->reg + ECC_ENABLE);
}
int denali_calc_ecc_bytes(int step_size, int strength)
@@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free,
};
-/* initialize driver data structures */
-static void denali_drv_init(struct denali_nand_info *denali)
-{
- /*
- * the completion object will be used to notify
- * the callee that the interrupt is done
- */
- init_completion(&denali->complete);
-
- /*
- * the spinlock will be used to synchronize the ISR with any
- * element that might be access shared data (interrupt status)
- */
- spin_lock_init(&denali->irq_lock);
-}
-
static int denali_multidev_fixup(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
@@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 features = ioread32(denali->reg + FEATURES);
int ret;
mtd->dev.parent = denali->dev;
denali_hw_init(denali);
- denali_drv_init(denali);
+
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
denali_clear_irq_all(denali);
- /* Request IRQ after all the hardware initialization is finished */
ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
IRQF_SHARED, DENALI_NAND_NAME, denali);
if (ret) {
@@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali)
if (!mtd->name)
mtd->name = "denali-nand";
- /* register the driver with the NAND core subsystem */
chip->select_chip = denali_select_chip;
chip->read_byte = denali_read_byte;
chip->write_byte = denali_write_byte;
@@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali)
chip->dev_ready = denali_dev_ready;
chip->waitfunc = denali_waitfunc;
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
/* clk rate info is needed for setup_data_interface */
if (denali->clk_x_rate)
chip->setup_data_interface = denali_setup_data_interface;
- /*
- * scan for NAND devices attached to the controller
- * this is the first stage in a two step process to register
- * with the nand subsystem
- */
ret = nand_scan_ident(mtd, denali->max_banks, NULL);
if (ret)
goto disable_irq;
@@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali)
if (denali->dma_avail) {
chip->options |= NAND_USE_BOUNCE_BUFFER;
chip->buf_align = 16;
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
}
- /*
- * second stage of the NAND scan
- * this stage requires information regarding ECC and
- * bad block management.
- */
-
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
-
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
-
- /* no subpage writes on denali */
chip->options |= NAND_NO_SUBPAGE_WRITE;
ret = denali_ecc_setup(mtd, chip, denali);
@@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali)
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
- iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
denali->reg + ECC_CORRECTION);
iowrite32(mtd->erasesize / mtd->writesize,
denali->reg + PAGES_PER_BLOCK);
iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
denali->reg + DEVICE_WIDTH);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
@@ -1441,7 +1403,6 @@ disable_irq:
}
EXPORT_SYMBOL(denali_init);
-/* driver exit point */
void denali_remove(struct denali_nand_info *denali)
{
struct mtd_info *mtd = nand_to_mtd(&denali->nand);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 9239e6793e6e..2911066dacac 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -10,18 +10,16 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef __DENALI_H__
#define __DENALI_H__
#include <linux/bitops.h>
+#include <linux/completion.h>
#include <linux/mtd/rawnand.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
#define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK(bank) BIT(bank)
@@ -111,9 +109,6 @@
#define ECC_CORRECTION 0x1b0
#define ECC_CORRECTION__VALUE GENMASK(4, 0)
#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
-#define MAKE_ECC_CORRECTION(val, thresh) \
- (((val) & (ECC_CORRECTION__VALUE)) | \
- (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD)))
#define READ_MODE 0x1c0
#define READ_MODE__VALUE GENMASK(3, 0)
@@ -255,13 +250,13 @@
#define ECC_ERROR_ADDRESS 0x630
#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
-#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12)
+#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
#define ERR_CORRECTION_INFO 0x640
-#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0)
-#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8)
-#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14)
-#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15)
+#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
+#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
+#define ERR_CORRECTION_INFO__UNCOR BIT(14)
+#define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
@@ -310,23 +305,24 @@ struct denali_nand_info {
struct device *dev;
void __iomem *reg; /* Register Interface */
void __iomem *host; /* Host Data/Command Interface */
-
- /* elements used by ISR */
struct completion complete;
- spinlock_t irq_lock;
- uint32_t irq_mask;
- uint32_t irq_status;
+ spinlock_t irq_lock; /* protect irq_mask and irq_status */
+ u32 irq_mask; /* interrupts we are waiting for */
+ u32 irq_status; /* interrupts that have happened */
int irq;
-
- void *buf;
+ void *buf; /* for syndrome layout conversion */
dma_addr_t dma_addr;
- int dma_avail;
+ int dma_avail; /* can support DMA? */
int devs_per_cs; /* devices connected in parallel */
- int oob_skip_bytes;
+ int oob_skip_bytes; /* number of bytes reserved for BBM */
int max_banks;
- unsigned int revision;
- unsigned int caps;
+ unsigned int revision; /* IP revision */
+ unsigned int caps; /* IP capability (or quirk) */
const struct nand_ecc_caps *ecc_caps;
+ u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
+ void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
+ void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
+ int page, int write);
};
#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 56e2e177644d..cfd33e6ca77f 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -12,15 +12,16 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "denali.h"
@@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = {
.of_match_table = denali_nand_dt_ids,
},
};
-
module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 81370c79aa48..57fb7ae31412 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -11,6 +11,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
+#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -106,7 +109,6 @@ failed_remap_reg:
return ret;
}
-/* driver exit point */
static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_nand_info *denali = pci_get_drvdata(dev);
@@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = {
.probe = denali_pci_probe,
.remove = denali_pci_remove,
};
-
module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index c3aa53caab5c..72671dc52e2e 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
if (page_addr != -1) {
WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
- /* One more address cycle for higher density devices */
- if (this->chipsize & 0x0c000000) {
+ if (this->options & NAND_ROW_ADDR_3) {
WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
printk("high density\n");
}
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index fd3648952b5a..484f7fbc3f7d 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
@@ -31,12 +31,16 @@
#include <linux/mtd/nand-gpio.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
struct nand_chip nand_chip;
struct gpio_nand_platdata plat;
+ struct gpio_desc *nce; /* Optional chip enable */
+ struct gpio_desc *cle;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct gpio_desc *nwp; /* Optional write protection */
};
static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
@@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
gpio_nand_dosync(gpiomtd);
if (ctrl & NAND_CTRL_CHANGE) {
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce,
- !(ctrl & NAND_NCE));
- gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
- gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+ if (gpiomtd->nce)
+ gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
+ gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
+ gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
gpio_nand_dosync(gpiomtd);
}
if (cmd == NAND_CMD_NONE)
@@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
+ return gpiod_get_value(gpiomtd->rdy);
}
#ifdef CONFIG_OF
@@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev,
}
}
- plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
- plat->gpio_nce = of_get_gpio(dev->of_node, 1);
- plat->gpio_ale = of_get_gpio(dev->of_node, 2);
- plat->gpio_cle = of_get_gpio(dev->of_node, 3);
- plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
-
if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
plat->chip_delay = val;
@@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev)
nand_release(nand_to_mtd(&gpiomtd->nand_chip));
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+ /* Enable write protection and disable the chip */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return 0;
}
@@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *res;
+ struct device *dev = &pdev->dev;
int ret = 0;
- if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
+ if (!dev->of_node && !dev_get_platdata(dev))
return -EINVAL;
- gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+ gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
if (!gpiomtd)
return -ENOMEM;
chip = &gpiomtd->nand_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ chip->IO_ADDR_R = devm_ioremap_resource(dev, res);
if (IS_ERR(chip->IO_ADDR_R))
return PTR_ERR(chip->IO_ADDR_R);
res = gpio_nand_get_io_sync(pdev);
if (res) {
- gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+ gpiomtd->io_sync = devm_ioremap_resource(dev, res);
if (IS_ERR(gpiomtd->io_sync))
return PTR_ERR(gpiomtd->io_sync);
}
- ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
+ ret = gpio_nand_get_config(dev, &gpiomtd->plat);
if (ret)
return ret;
- if (gpio_is_valid(gpiomtd->plat.gpio_nce)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce,
- "NAND NCE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+ /* Just enable the chip */
+ gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiomtd->nce))
+ return PTR_ERR(gpiomtd->nce);
+
+ /* We disable write protection once we know probe() will succeed */
+ gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
}
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
- "NAND NWP");
- if (ret)
- return ret;
+ gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
}
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+ gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->cle)) {
+ ret = PTR_ERR(gpiomtd->cle);
+ goto out_ce;
+ }
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
-
- if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
- "NAND RDY");
- if (ret)
- return ret;
- gpio_direction_input(gpiomtd->plat.gpio_rdy);
- chip->dev_ready = gpio_nand_devready;
+ gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
+ if (IS_ERR(gpiomtd->rdy)) {
+ ret = PTR_ERR(gpiomtd->rdy);
+ goto out_ce;
}
+ /* Using RDY pin */
+ if (gpiomtd->rdy)
+ chip->dev_ready = gpio_nand_devready;
nand_set_flash_node(chip, pdev->dev.of_node);
chip->IO_ADDR_W = chip->IO_ADDR_R;
@@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev)
chip->cmd_ctrl = gpio_nand_cmd_ctrl;
mtd = nand_to_mtd(chip);
- mtd->dev.parent = &pdev->dev;
+ mtd->dev.parent = dev;
platform_set_drvdata(pdev, gpiomtd);
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+ /* Disable write protection, if wired up */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_direction_output(gpiomtd->nwp, 1);
ret = nand_scan(mtd, 1);
if (ret)
@@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev)
return 0;
err_wp:
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+out_ce:
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return ret;
}
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index d9ee1a7e6956..0897261c3e17 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
host->addr_value[0] |= (page_addr & 0xffff)
<< (host->addr_cycle * 8);
host->addr_cycle += 2;
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
host->addr_cycle += 1;
if (host->command == NAND_CMD_ERASE1)
host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index 7f3b065b6b8f..c51d214d169e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
op = ECC_DECODE;
dec = readw(ecc->regs + ECC_DECDONE);
if (dec & ecc->sectors) {
+ /*
+ * Clear decode IRQ status once again to ensure that
+ * there will be no extra IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
ecc->sectors = 0;
complete(&ecc->done);
} else {
@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
}
}
- writel(0, ecc->regs + ECC_IRQ_REG(op));
-
return IRQ_HANDLED;
}
@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
+ if (op == ECC_DECODE)
+ /*
+ * Clear decode IRQ status in case there is a timeout to wait
+ * decode IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
writew(0, ecc->regs + ECC_IRQ_REG(op));
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 53e5e0337c3e..f3be0b2a8869 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
* waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
- pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+ dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
udelay(1);
}
if (max_retries < 0)
- pr_debug("%s: RESET failed\n", __func__);
+ dev_dbg(host->dev, "%s: RESET failed\n", __func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
@@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
* a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
- pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
+ dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
- pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n");
return -EBADMSG;
}
@@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
do {
err = ecc_stat & ecc_bit_mask;
if (err > err_limit) {
- printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+ dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n");
return -EBADMSG;
} else {
ret += err;
@@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+ dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
}
@@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
host->buf_start++;
}
- pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+ dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
@@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, true);
} else {
- /* One more address cycle for higher density devices */
- if (mtd->size >= 0x4000000) {
+ if (nand_chip->options & NAND_ROW_ADDR_3) {
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff,
@@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr);
/* Reset command state information */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 12edaae17d81..6135d007a068 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- if (section)
+ if (section || !ecc->total)
return -ERANGE;
oobregion->length = ecc->total;
@@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
- /* One more address cycle for devices > 32MiB */
- if (chip->chipsize > (32 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
}
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
@@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
chip->cmd_ctrl(mtd, page_addr >> 8,
NAND_NCE | NAND_ALE);
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16,
NAND_NCE | NAND_ALE);
}
@@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
return 0;
}
+EXPORT_SYMBOL_GPL(nand_reset);
/**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
@@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops;
int ret;
- /* Wait for the device to get ready */
- panic_nand_wait(mtd, chip, 400);
-
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
+ chip->select_chip(mtd, chipnr);
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(mtd, chip, 400);
+
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
@@ -3999,6 +4001,9 @@ ident_done:
chip->chip_shift += 32 - 1;
}
+ if (chip->chip_shift - chip->page_shift > 16)
+ chip->options |= NAND_ROW_ADDR_3;
+
chip->badblockbits = 8;
chip->erase = single_erase;
@@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
break;
default:
+ /*
+ * Expose the whole OOB area to users if ECC_NONE
+ * is passed. We could do that for all kind of
+ * ->oobsize, but we must keep the old large/small
+ * page with ECC layout when ->oobsize <= 128 for
+ * compatibility reasons.
+ */
+ if (ecc->mode == NAND_ECC_NONE) {
+ mtd_set_ooblayout(mtd,
+ &nand_ooblayout_lp_ops);
+ break;
+ }
+
WARN(1, "No oob scheme defined for oobsize %d\n",
mtd->oobsize);
ret = -EINVAL;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 246b4393118e..44322a363ba5 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev)
struct dentry *root = nsmtd->dbg.dfs_dir;
struct dentry *dent;
- if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ /*
+ * Just skip debugfs initialization when the debugfs directory is
+ * missing.
+ */
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return 0;
-
- if (IS_ERR_OR_NULL(root))
- return -1;
+ }
dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
root, dev, &dfs_fops);
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7bb4d2ea9342..af5b32c9a791 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
if (page_addr != -1) {
write_addr_reg(nand, page_addr);
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
write_addr_reg(nand, page_addr >> 8);
write_addr_reg(nand, page_addr >> 16 | ENDADDR);
} else {
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 54540c8fa1a2..dad438c4906a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0x97, 0x79, 0xe5, 0x24, 0xb5};
/**
- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
+ * @i: The sector number (for a multi sector page)
*
- * Support calculating of BCH4/8 ecc vectors for the page
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
*/
-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
- const u_char *dat, u_char *ecc_calc)
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc, int i)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code;
- unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
u32 val;
- int i, j;
+ int j;
+
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ unsigned long nsectors;
+ int i, ret;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) {
- ecc_code = ecc_calc;
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH8_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
- bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
- *ecc_code++ = (bch_val4 & 0xFF);
- *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
- *ecc_code++ = (bch_val3 & 0xFF);
- *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
- *ecc_code++ = (bch_val2 & 0xFF);
- *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
- *ecc_code++ = (bch_val1 & 0xFF);
- break;
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH4_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val2 & 0xF) << 4) |
- ((bch_val1 >> 28) & 0xF);
- *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val1 & 0xF) << 4);
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- val = readl(gpmc_regs->gpmc_bch_result6[i]);
- ecc_code[0] = ((val >> 8) & 0xFF);
- ecc_code[1] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result5[i]);
- ecc_code[2] = ((val >> 24) & 0xFF);
- ecc_code[3] = ((val >> 16) & 0xFF);
- ecc_code[4] = ((val >> 8) & 0xFF);
- ecc_code[5] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result4[i]);
- ecc_code[6] = ((val >> 24) & 0xFF);
- ecc_code[7] = ((val >> 16) & 0xFF);
- ecc_code[8] = ((val >> 8) & 0xFF);
- ecc_code[9] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result3[i]);
- ecc_code[10] = ((val >> 24) & 0xFF);
- ecc_code[11] = ((val >> 16) & 0xFF);
- ecc_code[12] = ((val >> 8) & 0xFF);
- ecc_code[13] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result2[i]);
- ecc_code[14] = ((val >> 24) & 0xFF);
- ecc_code[15] = ((val >> 16) & 0xFF);
- ecc_code[16] = ((val >> 8) & 0xFF);
- ecc_code[17] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result1[i]);
- ecc_code[18] = ((val >> 24) & 0xFF);
- ecc_code[19] = ((val >> 16) & 0xFF);
- ecc_code[20] = ((val >> 8) & 0xFF);
- ecc_code[21] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result0[i]);
- ecc_code[22] = ((val >> 24) & 0xFF);
- ecc_code[23] = ((val >> 16) & 0xFF);
- ecc_code[24] = ((val >> 8) & 0xFF);
- ecc_code[25] = ((val >> 0) & 0xFF);
- break;
- default:
- return -EINVAL;
- }
-
- /* ECC scheme specific syndrome customizations */
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch4_polynomial[j];
- break;
- case OMAP_ECC_BCH4_CODE_HW:
- /* Set 8th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch8_polynomial[j];
- break;
- case OMAP_ECC_BCH8_CODE_HW:
- /* Set 14th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- break;
- default:
- return -EINVAL;
- }
+ ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+ if (ret)
+ return ret;
- ecc_calc += eccbytes;
+ ecc_calc += eccbytes;
}
return 0;
@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->write_buf(mtd, buf, mtd->writesize);
/* Update ecc vector from GPMC result registers */
- chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+ omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
@@ -1509,6 +1552,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ u32 start_step = offset / ecc_size;
+ u32 end_step = (offset + data_len - 1) / ecc_size;
+ int step, ret = 0;
+
+ /*
+ * Write entire page at one go as it would be optimal
+ * as ECC is calculated by hardware.
+ * ECC is calculated for all subpages but we choose
+ * only what we want.
+ */
+
+ /* Enable GPMC ECC engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if (step < start_step || step > end_step)
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+
+ if (ret)
+ return ret;
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/**
* omap_read_page_bch - BCH ecc based page read function for entire page
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.total);
/* Calculate ecc bytes */
- chip->ecc.calculate(mtd, buf, ecc_calc);
+ omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info,
return true;
}
-static bool omap2_nand_ecc_check(struct omap_nand_info *info,
- struct omap_nand_platform_data *pdata)
+static bool omap2_nand_ecc_check(struct omap_nand_info *info)
{
bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
@@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
- struct omap_nand_platform_data *pdata = NULL;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int err;
@@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
- if (dev->of_node) {
- if (omap_get_dt_info(dev, info))
- return -EINVAL;
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -EINVAL;
- }
-
- info->gpmc_cs = pdata->cs;
- info->reg = pdata->reg;
- info->ecc_opt = pdata->ecc_opt;
- if (pdata->dev_ready)
- dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
-
- info->xfer_type = pdata->xfer_type;
- info->devsize = pdata->devsize;
- info->elm_of_node = pdata->elm_of_node;
- info->flash_bbt = pdata->flash_bbt;
- }
+ err = omap_get_dt_info(dev, info);
+ if (err)
+ return err;
- platform_set_drvdata(pdev, info);
info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
if (!info->ops) {
dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
@@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- if (!omap2_nand_ecc_check(info, pdata)) {
+ if (!omap2_nand_ecc_check(info)) {
err = -EINVAL;
goto return_error;
}
@@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 16;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2167,10 +2255,9 @@ scan_tail:
if (err)
goto return_error;
- if (dev->of_node)
- mtd_device_register(mtd, NULL, 0);
- else
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto return_error;
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 85cff68643e0..90b9a9ccbe60 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -30,6 +30,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
#define NAND_STOP_DELAY msecs_to_jiffies(40)
@@ -45,6 +47,10 @@
*/
#define INIT_BUFFER_SIZE 2048
+/* System control register and bit to enable NAND on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
@@ -174,6 +180,7 @@ enum {
enum pxa3xx_nand_variant {
PXA3XX_NAND_VARIANT_PXA,
PXA3XX_NAND_VARIANT_ARMADA370,
+ PXA3XX_NAND_VARIANT_ARMADA_8K,
};
struct pxa3xx_nand_host {
@@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = {
.compatible = "marvell,armada370-nand",
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
},
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
+ },
{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
@@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
info->retcode = ERR_UNCORERR;
if (status & NDSR_CORERR) {
info->retcode = ERR_CORERR;
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+ if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
info->ecc_bch)
info->ecc_err_cnt = NDSR_ERR_CNT(status);
else
@@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDCB0, info->ndcb2);
/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDCB0, info->ndcb3);
}
@@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
chip->options |= NAND_BUSWIDTH_16;
/* Device detection must be done with ECC disabled */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDECCCTRL, 0x0);
if (pdata->flash_bbt)
@@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
* (aka splitted) command handling,
*/
if (mtd->writesize > PAGE_CHUNK_SIZE) {
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
chip->cmdfunc = nand_cmdfunc_extended;
} else {
dev_err(&info->pdev->dev,
@@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (!of_id)
return 0;
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller to avoid being bootloader dependent. This is done
+ * through the use of a single bit in the System Functions registers.
+ */
+ if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
+ struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node, "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
+ reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+ }
+
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 3baddfc997d1..2656c1ac5646 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/delay.h>
+#include <linux/dma/qcom_bam_dma.h>
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
*/
#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+/* Returns the NAND register physical address */
+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
@@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
+ * @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
* @cmd_sgl_pos - current index in command sgl.
* @cmd_sgl_start - start index in command sgl.
* @tx_sgl_pos - current index in data sgl for tx.
@@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
* @rx_sgl_start - start index in data sgl for rx.
*/
struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
u32 cmd_sgl_pos;
u32 cmd_sgl_start;
u32 tx_sgl_pos;
@@ -307,7 +325,8 @@ struct nandc_regs {
* controller
* @dev: parent device
* @base: MMIO base
- * @base_dma: physical base address of controller registers
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
* @core_clk: controller clock
* @aon_clk: another controller clock
*
@@ -340,6 +359,7 @@ struct qcom_nand_controller {
struct device *dev;
void __iomem *base;
+ phys_addr_t base_phys;
dma_addr_t base_dma;
struct clk *core_clk;
@@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn_size =
sizeof(*bam_txn) + num_cw *
- ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
@@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn = bam_txn_buf;
bam_txn_buf += sizeof(*bam_txn);
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
bam_txn->cmd_sgl = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
@@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
if (!nandc->props->is_bam)
return;
+ bam_txn->bam_ce_pos = 0;
+ bam_txn->bam_ce_start = 0;
bam_txn->cmd_sgl_pos = 0;
bam_txn->cmd_sgl_start = 0;
bam_txn->tx_sgl_pos = 0;
@@ -734,6 +761,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
}
/*
+ * Prepares the command descriptor for BAM DMA which will be used for NAND
+ * register reads and writes. The command descriptor requires the command
+ * to be formed in command element type so this function uses the command
+ * element from bam transaction ce array and fills the same with required
+ * data. A single SGL can contain multiple command elements so
+ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+ * after the current command element.
+ */
+static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE |
+ DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
* Prepares the data descriptor for BAM DMA which will be used for NAND
* data reads and writes.
*/
@@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
{
bool flow_control = false;
void *vaddr;
- int size;
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, first);
- size = num_regs * sizeof(u32);
- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
- nandc->reg_read_pos += num_regs;
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
+ return prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
bool flow_control = false;
struct nandc_regs *regs = nandc->regs;
void *vaddr;
- int size;
vaddr = offset_to_nandc_reg(regs, first);
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
if (first == NAND_ERASED_CW_DETECT_CFG) {
if (flags & NAND_ERASED_CW_SET)
vaddr = &regs->erased_cw_detect_cfg_set;
@@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
- size = num_regs * sizeof(u32);
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, false, first, vaddr, size,
- flow_control);
+ return prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc)
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
+ r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
if (r)
return r;
}
@@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (IS_ERR(nandc->base))
return PTR_ERR(nandc->base);
+ nandc->base_phys = res->start;
nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
nandc->core_clk = devm_clk_get(dev, "core");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e7f3c98487e6..3c5008a4f5f3 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match);
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{
- const struct of_device_id *match;
- struct flctl_soc_config *config;
+ const struct flctl_soc_config *config;
struct sh_flctl_platform_data *pdata;
- match = of_match_device(of_flctl_match, dev);
- if (match)
- config = (struct flctl_soc_config *)match->data;
- else {
+ config = of_device_get_match_data(dev);
+ if (!config) {
dev_err(dev, "%s: no OF configuration attached\n", __func__);
return NULL;
}
diff --git a/drivers/mtd/onenand/Makefile b/drivers/mtd/onenand/Makefile
index 9d6540e8b3d2..f8b624aca9cc 100644
--- a/drivers/mtd/onenand/Makefile
+++ b/drivers/mtd/onenand/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the OneNAND MTD
#
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 680188a88130..420260c25ca0 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/mtd/onenand/onenand_bbt.c
*
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index d206b3c533bc..ee5ab994132f 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -6,3 +6,11 @@ config MTD_PARSER_TRX
may contain up to 3/4 partitions (depending on the version).
This driver will parse TRX header and report at least two partitions:
kernel and rootfs.
+
+config MTD_SHARPSL_PARTS
+ tristate "Sharp SL Series NAND flash partition parser"
+ depends on MTD_NAND_SHARPSL || MTD_NAND_TMIO || COMPILE_TEST
+ help
+ This provides the read-only FTL logic necessary to read the partition
+ table from the NAND flash of Sharp SL Series (Zaurus) and the MTD
+ partition parser using this code.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 4d9024e0be3b..5b1bcc3d90d9 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
new file mode 100644
index 000000000000..5fe0079ea5ed
--- /dev/null
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -0,0 +1,398 @@
+/*
+ * sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL
+ * for logical addressing, as used on the PXA models of the SHARP SL Series.
+ *
+ * Copyright (C) 2017 Andrea Adami <andrea.adami@gmail.com>
+ *
+ * Based on SHARP GPL 2.4 sources:
+ * http://support.ezaurus.com/developer/source/source_dl.asp
+ * drivers/mtd/nand/sharp_sl_logical.c
+ * linux/include/asm-arm/sharp_nand_logical.h
+ *
+ * Copyright (C) 2002 SHARP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+/* oob structure */
+#define NAND_NOOB_LOGADDR_00 8
+#define NAND_NOOB_LOGADDR_01 9
+#define NAND_NOOB_LOGADDR_10 10
+#define NAND_NOOB_LOGADDR_11 11
+#define NAND_NOOB_LOGADDR_20 12
+#define NAND_NOOB_LOGADDR_21 13
+
+#define BLOCK_IS_RESERVED 0xffff
+#define BLOCK_UNMASK_COMPLEMENT 1
+
+/* factory defaults */
+#define SHARPSL_NAND_PARTS 3
+#define SHARPSL_FTL_PART_SIZE (7 * SZ_1M)
+#define SHARPSL_PARTINFO1_LADDR 0x00060000
+#define SHARPSL_PARTINFO2_LADDR 0x00064000
+
+#define BOOT_MAGIC 0x424f4f54
+#define FSRO_MAGIC 0x4653524f
+#define FSRW_MAGIC 0x46535257
+
+/**
+ * struct sharpsl_ftl - Sharp FTL Logical Table
+ * @logmax: number of logical blocks
+ * @log2phy: the logical-to-physical table
+ *
+ * Structure containing the logical-to-physical translation table
+ * used by the SHARP SL FTL.
+ */
+struct sharpsl_ftl {
+ unsigned int logmax;
+ unsigned int *log2phy;
+};
+
+/* verify that the OOB bytes 8 to 15 are free and available for the FTL */
+static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd)
+{
+ u8 freebytes = 0;
+ int section = 0;
+
+ while (true) {
+ struct mtd_oob_region oobfree = { };
+ int ret, i;
+
+ ret = mtd_ooblayout_free(mtd, section++, &oobfree);
+ if (ret)
+ break;
+
+ if (!oobfree.length || oobfree.offset > 15 ||
+ (oobfree.offset + oobfree.length) < 8)
+ continue;
+
+ i = oobfree.offset >= 8 ? oobfree.offset : 8;
+ for (; i < oobfree.offset + oobfree.length && i < 16; i++)
+ freebytes |= BIT(i - 8);
+
+ if (freebytes == 0xff)
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf)
+{
+ struct mtd_oob_ops ops = { };
+ int ret;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+
+ ret = mtd_read_oob(mtd, offs, &ops);
+ if (ret != 0 || mtd->oobsize != ops.oobretlen)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * The logical block number assigned to a physical block is stored in the OOB
+ * of the first page, in 3 16-bit copies with the following layout:
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxyxy
+ *
+ * When reading we check that the first two copies agree.
+ * In case of error, matching is tried using the following pairs.
+ * Reserved values 0xffff mean the block is kept for wear leveling.
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9
+ * ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11
+ * ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13
+ */
+static int sharpsl_nand_get_logical_num(u8 *oob)
+{
+ u16 us;
+ int good0, good1;
+
+ if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] &&
+ oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) {
+ good0 = NAND_NOOB_LOGADDR_00;
+ good1 = NAND_NOOB_LOGADDR_01;
+ } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] &&
+ oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) {
+ good0 = NAND_NOOB_LOGADDR_10;
+ good1 = NAND_NOOB_LOGADDR_11;
+ } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] &&
+ oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) {
+ good0 = NAND_NOOB_LOGADDR_20;
+ good1 = NAND_NOOB_LOGADDR_21;
+ } else {
+ return -EINVAL;
+ }
+
+ us = oob[good0] | oob[good1] << 8;
+
+ /* parity check */
+ if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT)
+ return -EINVAL;
+
+ /* reserved */
+ if (us == BLOCK_IS_RESERVED)
+ return BLOCK_IS_RESERVED;
+
+ return (us >> 1) & GENMASK(9, 0);
+}
+
+static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
+{
+ unsigned int block_num, log_num, phymax;
+ loff_t block_adr;
+ u8 *oob;
+ int i, ret;
+
+ oob = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!oob)
+ return -ENOMEM;
+
+ phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);
+
+ /* FTL reserves 5% of the blocks + 1 spare */
+ ftl->logmax = ((phymax * 95) / 100) - 1;
+
+ ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
+ GFP_KERNEL);
+ if (!ftl->log2phy) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize ftl->log2phy */
+ for (i = 0; i < ftl->logmax; i++)
+ ftl->log2phy[i] = UINT_MAX;
+
+ /* create physical-logical table */
+ for (block_num = 0; block_num < phymax; block_num++) {
+ block_adr = block_num * mtd->erasesize;
+
+ if (mtd_block_isbad(mtd, block_adr))
+ continue;
+
+ if (sharpsl_nand_read_oob(mtd, block_adr, oob))
+ continue;
+
+ /* get logical block */
+ log_num = sharpsl_nand_get_logical_num(oob);
+
+ /* cut-off errors and skip the out-of-range values */
+ if (log_num > 0 && log_num < ftl->logmax) {
+ if (ftl->log2phy[log_num] == UINT_MAX)
+ ftl->log2phy[log_num] = block_num;
+ }
+ }
+
+ pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
+ phymax, ftl->logmax, phymax - ftl->logmax);
+
+ ret = 0;
+exit:
+ kfree(oob);
+ return ret;
+}
+
+void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+{
+ kfree(ftl->log2phy);
+}
+
+static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
+ loff_t from,
+ size_t len,
+ void *buf,
+ struct sharpsl_ftl *ftl)
+{
+ unsigned int log_num, final_log_num;
+ unsigned int block_num;
+ loff_t block_adr;
+ loff_t block_ofs;
+ size_t retlen;
+ int err;
+
+ log_num = mtd_div_by_eb((u32)from, mtd);
+ final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);
+
+ if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
+ return -EINVAL;
+
+ block_num = ftl->log2phy[log_num];
+ block_adr = block_num * mtd->erasesize;
+ block_ofs = mtd_mod_by_eb((u32)from, mtd);
+
+ err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
+ /* Ignore corrected ECC errors */
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (!err && retlen != len)
+ err = -EIO;
+
+ if (err)
+ pr_err("sharpslpart: error, read failed at %#llx\n",
+ block_adr + block_ofs);
+
+ return err;
+}
+
+/*
+ * MTD Partition Parser
+ *
+ * Sample values read from SL-C860
+ *
+ * # cat /proc/mtd
+ * dev: size erasesize name
+ * mtd0: 006d0000 00020000 "Filesystem"
+ * mtd1: 00700000 00004000 "smf"
+ * mtd2: 03500000 00004000 "root"
+ * mtd3: 04400000 00004000 "home"
+ *
+ * PARTITIONINFO1
+ * 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT....
+ * 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO....
+ * 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW....
+ */
+struct sharpsl_nand_partinfo {
+ __le32 start;
+ __le32 end;
+ __be32 magic;
+ u32 reserved;
+};
+
+static int sharpsl_nand_read_partinfo(struct mtd_info *master,
+ loff_t from,
+ size_t len,
+ struct sharpsl_nand_partinfo *buf,
+ struct sharpsl_ftl *ftl)
+{
+ int ret;
+
+ ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl);
+ if (ret)
+ return ret;
+
+ /* check for magics */
+ if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC ||
+ be32_to_cpu(buf[1].magic) != FSRO_MAGIC ||
+ be32_to_cpu(buf[2].magic) != FSRW_MAGIC) {
+ pr_err("sharpslpart: magic values mismatch\n");
+ return -EINVAL;
+ }
+
+ /* fixup for hardcoded value 64 MiB (for older models) */
+ buf[2].end = cpu_to_le32(master->size);
+
+ /* extra sanity check */
+ if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) ||
+ le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) ||
+ le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) ||
+ le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) ||
+ le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) {
+ pr_err("sharpslpart: partition sizes mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sharpsl_parse_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct sharpsl_ftl ftl;
+ struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS];
+ struct mtd_partition *sharpsl_nand_parts;
+ int err;
+
+ /* check that OOB bytes 8 to 15 used by the FTL are actually free */
+ err = sharpsl_nand_check_ooblayout(master);
+ if (err)
+ return err;
+
+ /* init logical mgmt (FTL) */
+ err = sharpsl_nand_init_ftl(master, &ftl);
+ if (err)
+ return err;
+
+ /* read and validate first partition table */
+ pr_info("sharpslpart: try reading first partition table\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO1_LADDR,
+ sizeof(buf), buf, &ftl);
+ if (err) {
+ /* fallback: read second partition table */
+ pr_warn("sharpslpart: first partition table is invalid, retry using the second\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO2_LADDR,
+ sizeof(buf), buf, &ftl);
+ }
+
+ /* cleanup logical mgmt (FTL) */
+ sharpsl_nand_cleanup_ftl(&ftl);
+
+ if (err) {
+ pr_err("sharpslpart: both partition tables are invalid\n");
+ return err;
+ }
+
+ sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) *
+ SHARPSL_NAND_PARTS, GFP_KERNEL);
+ if (!sharpsl_nand_parts)
+ return -ENOMEM;
+
+ /* original names */
+ sharpsl_nand_parts[0].name = "smf";
+ sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start);
+ sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) -
+ le32_to_cpu(buf[0].start);
+
+ sharpsl_nand_parts[1].name = "root";
+ sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start);
+ sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) -
+ le32_to_cpu(buf[1].start);
+
+ sharpsl_nand_parts[2].name = "home";
+ sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start);
+ sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) -
+ le32_to_cpu(buf[2].start);
+
+ *pparts = sharpsl_nand_parts;
+ return SHARPSL_NAND_PARTS;
+}
+
+static struct mtd_part_parser sharpsl_mtd_parser = {
+ .parse_fn = sharpsl_parse_mtd_partitions,
+ .name = "sharpslpart",
+};
+module_mtd_part_parser(sharpsl_mtd_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>");
+MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 3692dd547879..4237c7cebf02 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -989,9 +989,9 @@ restart:
/* flush timer, runs a second after last write */
-static void sm_cache_flush_timer(unsigned long data)
+static void sm_cache_flush_timer(struct timer_list *t)
{
- struct sm_ftl *ftl = (struct sm_ftl *)data;
+ struct sm_ftl *ftl = from_timer(ftl, t, timer);
queue_work(cache_flush_workqueue, &ftl->flush_work);
}
@@ -1139,7 +1139,7 @@ static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
mutex_init(&ftl->mutex);
- setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+ timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
init_completion(&ftl->erase_completion);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 69c638dd0484..89da88e59121 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -50,7 +50,7 @@ config SPI_ATMEL_QUADSPI
config SPI_CADENCE_QUADSPI
tristate "Cadence Quad SPI controller"
- depends on OF && (ARM || COMPILE_TEST)
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
help
Enable support for the Cadence Quad SPI Flash controller.
@@ -90,7 +90,7 @@ config SPI_INTEL_SPI
tristate
config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT
+ tristate "Intel PCH/PCU SPI flash PCI driver"
depends on X86 && PCI
select SPI_INTEL_SPI
help
@@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI
will be called intel-spi-pci.
config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT
+ tristate "Intel PCH/PCU SPI flash platform driver"
depends on X86
select SPI_INTEL_SPI
help
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 7d84c5108e17..f4c61d282abd 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 53c7d8e0327a..75a2bc447a99 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -31,6 +31,7 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/timer.h>
@@ -38,6 +39,9 @@
#define CQSPI_NAME "cadence-qspi"
#define CQSPI_MAX_CHIPSELECT 16
+/* Quirks */
+#define CQSPI_NEEDS_WR_DELAY BIT(0)
+
struct cqspi_st;
struct cqspi_flash_pdata {
@@ -75,7 +79,9 @@ struct cqspi_st {
bool is_decoded_cs;
u32 fifo_depth;
u32 fifo_width;
+ bool rclk_en;
u32 trigger_address;
+ u32 wr_delay;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
};
@@ -608,6 +614,15 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTWR_START_MASK,
reg_base + CQSPI_REG_INDIRECTWR);
+ /*
+ * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
+ * Controller programming sequence, couple of cycles of
+ * QSPI_REF_CLK delay is required for the above bit to
+ * be internally synchronized by the QSPI module. Provide 5
+ * cycles of delay.
+ */
+ if (cqspi->wr_delay)
+ ndelay(cqspi->wr_delay);
while (remaining > 0) {
write_bytes = remaining > page_size ? page_size : remaining;
@@ -775,7 +790,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
}
static void cqspi_readdata_capture(struct cqspi_st *cqspi,
- const unsigned int bypass,
+ const bool bypass,
const unsigned int delay)
{
void __iomem *reg_base = cqspi->iobase;
@@ -839,7 +854,8 @@ static void cqspi_configure(struct spi_nor *nor)
cqspi->sclk = sclk;
cqspi_config_baudrate_div(cqspi);
cqspi_delay(nor);
- cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
+ cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
+ f_pdata->read_delay);
}
if (switch_cs || switch_ck)
@@ -1036,6 +1052,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
return -ENXIO;
}
+ cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
+
return 0;
}
@@ -1156,6 +1174,7 @@ static int cqspi_probe(struct platform_device *pdev)
struct cqspi_st *cqspi;
struct resource *res;
struct resource *res_ahb;
+ unsigned long data;
int ret;
int irq;
@@ -1206,13 +1225,24 @@ static int cqspi_probe(struct platform_device *pdev)
return -ENXIO;
}
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
dev_err(dev, "Cannot enable QSPI clock.\n");
- return ret;
+ goto probe_clk_failed;
}
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+ data = (unsigned long)of_device_get_match_data(dev);
+ if (data & CQSPI_NEEDS_WR_DELAY)
+ cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
+ cqspi->master_ref_clk_hz);
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
@@ -1233,10 +1263,13 @@ static int cqspi_probe(struct platform_device *pdev)
}
return ret;
-probe_irq_failed:
- cqspi_controller_enable(cqspi, 0);
probe_setup_failed:
+ cqspi_controller_enable(cqspi, 0);
+probe_irq_failed:
clk_disable_unprepare(cqspi->clk);
+probe_clk_failed:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
@@ -1253,6 +1286,9 @@ static int cqspi_remove(struct platform_device *pdev)
clk_disable_unprepare(cqspi->clk);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
@@ -1284,7 +1320,14 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
#endif
static const struct of_device_id cqspi_dt_ids[] = {
- {.compatible = "cdns,qspi-nor",},
+ {
+ .compatible = "cdns,qspi-nor",
+ .data = (void *)0,
+ },
+ {
+ .compatible = "ti,k2g-qspi",
+ .data = (void *)CQSPI_NEEDS_WR_DELAY,
+ },
{ /* end of table */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index e82652335ede..c0976f2e3dd1 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -63,7 +63,10 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 8a596bfeddff..ef034d898a23 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -67,8 +67,6 @@
#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
#define PR_RPE BIT(15)
#define PR_BASE_MASK 0x3fff
-/* Last PR is GPR0 */
-#define PR_NUM (5 + 1)
/* Offsets are from @ispi->sregs */
#define SSFSTS_CTL 0x00
@@ -90,20 +88,35 @@
#define OPMENU0 0x08
#define OPMENU1 0x0c
+#define OPTYPE_READ_NO_ADDR 0
+#define OPTYPE_WRITE_NO_ADDR 1
+#define OPTYPE_READ_WITH_ADDR 2
+#define OPTYPE_WRITE_WITH_ADDR 3
+
/* CPU specifics */
#define BYT_PR 0x74
#define BYT_SSFSTS_CTL 0x90
#define BYT_BCR 0xfc
#define BYT_BCR_WPD BIT(0)
#define BYT_FREG_NUM 5
+#define BYT_PR_NUM 5
#define LPT_PR 0x74
#define LPT_SSFSTS_CTL 0x90
#define LPT_FREG_NUM 5
+#define LPT_PR_NUM 5
#define BXT_PR 0x84
#define BXT_SSFSTS_CTL 0xa0
#define BXT_FREG_NUM 12
+#define BXT_PR_NUM 6
+
+#define LVSCC 0xc4
+#define UVSCC 0xc8
+#define ERASE_OPCODE_SHIFT 8
+#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_SHIFT 16
+#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -117,8 +130,11 @@
* @pregs: Start of protection registers
* @sregs: Start of software sequencer registers
* @nregions: Maximum number of regions
+ * @pr_num: Maximum number of protected range registers
* @writeable: Is the chip writeable
- * @swseq: Use SW sequencer in register reads/writes
+ * @locked: Is SPI setting locked
+ * @swseq_reg: Use SW sequencer in register reads/writes
+ * @swseq_erase: Use SW sequencer in erase operation
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
@@ -132,8 +148,11 @@ struct intel_spi {
void __iomem *pregs;
void __iomem *sregs;
size_t nregions;
+ size_t pr_num;
bool writeable;
- bool swseq;
+ bool locked;
+ bool swseq_reg;
+ bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
u8 preopcodes[2];
@@ -167,7 +186,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
for (i = 0; i < ispi->nregions; i++)
dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
readl(ispi->base + FREG(i)));
- for (i = 0; i < PR_NUM; i++)
+ for (i = 0; i < ispi->pr_num; i++)
dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
readl(ispi->pregs + PR(i)));
@@ -181,8 +200,11 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
if (ispi->info->type == INTEL_SPI_BYT)
dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
+ dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
+ dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
+
dev_dbg(ispi->dev, "Protected regions:\n");
- for (i = 0; i < PR_NUM; i++) {
+ for (i = 0; i < ispi->pr_num; i++) {
u32 base, limit;
value = readl(ispi->pregs + PR(i));
@@ -214,7 +236,9 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
}
dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
- ispi->swseq ? 'S' : 'H');
+ ispi->swseq_reg ? 'S' : 'H');
+ dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
+ ispi->swseq_erase ? 'S' : 'H');
}
/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
@@ -278,7 +302,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
static int intel_spi_init(struct intel_spi *ispi)
{
- u32 opmenu0, opmenu1, val;
+ u32 opmenu0, opmenu1, lvscc, uvscc, val;
int i;
switch (ispi->info->type) {
@@ -286,6 +310,8 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
ispi->pregs = ispi->base + BYT_PR;
ispi->nregions = BYT_FREG_NUM;
+ ispi->pr_num = BYT_PR_NUM;
+ ispi->swseq_reg = true;
if (writeable) {
/* Disable write protection */
@@ -305,12 +331,15 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
ispi->pregs = ispi->base + LPT_PR;
ispi->nregions = LPT_FREG_NUM;
+ ispi->pr_num = LPT_PR_NUM;
+ ispi->swseq_reg = true;
break;
case INTEL_SPI_BXT:
ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
ispi->pregs = ispi->base + BXT_PR;
ispi->nregions = BXT_FREG_NUM;
+ ispi->pr_num = BXT_PR_NUM;
ispi->erase_64k = true;
break;
@@ -318,42 +347,64 @@ static int intel_spi_init(struct intel_spi *ispi)
return -EINVAL;
}
- /* Disable #SMI generation */
+ /* Disable #SMI generation from HW sequencer */
val = readl(ispi->base + HSFSTS_CTL);
val &= ~HSFSTS_CTL_FSMIE;
writel(val, ispi->base + HSFSTS_CTL);
/*
- * BIOS programs allowed opcodes and then locks down the register.
- * So read back what opcodes it decided to support. That's the set
- * we are going to support as well.
+ * Determine whether erase operation should use HW or SW sequencer.
+ *
+ * The HW sequencer has a predefined list of opcodes, with only the
+ * erase opcode being programmable in LVSCC and UVSCC registers.
+ * If these registers don't contain a valid erase opcode, erase
+ * cannot be done using HW sequencer.
*/
- opmenu0 = readl(ispi->sregs + OPMENU0);
- opmenu1 = readl(ispi->sregs + OPMENU1);
+ lvscc = readl(ispi->base + LVSCC);
+ uvscc = readl(ispi->base + UVSCC);
+ if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
+ ispi->swseq_erase = true;
+ /* SPI controller on Intel BXT supports 64K erase opcode */
+ if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
+ if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
+ !(uvscc & ERASE_64K_OPCODE_MASK))
+ ispi->erase_64k = false;
/*
* Some controllers can only do basic operations using hardware
* sequencer. All other operations are supposed to be carried out
- * using software sequencer. If we find that BIOS has programmed
- * opcodes for the software sequencer we use that over the hardware
- * sequencer.
+ * using software sequencer.
*/
- if (opmenu0 && opmenu1) {
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
- ispi->opcodes[i] = opmenu0 >> i * 8;
- ispi->opcodes[i + 4] = opmenu1 >> i * 8;
- }
-
- val = readl(ispi->sregs + PREOP_OPTYPE);
- ispi->preopcodes[0] = val;
- ispi->preopcodes[1] = val >> 8;
-
+ if (ispi->swseq_reg) {
/* Disable #SMI generation from SW sequencer */
val = readl(ispi->sregs + SSFSTS_CTL);
val &= ~SSFSTS_CTL_FSMIE;
writel(val, ispi->sregs + SSFSTS_CTL);
+ }
+
+ /* Check controller's lock status */
+ val = readl(ispi->base + HSFSTS_CTL);
+ ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
+
+ if (ispi->locked) {
+ /*
+ * BIOS programs allowed opcodes and then locks down the
+ * register. So read back what opcodes it decided to support.
+ * That's the set we are going to support as well.
+ */
+ opmenu0 = readl(ispi->sregs + OPMENU0);
+ opmenu1 = readl(ispi->sregs + OPMENU1);
- ispi->swseq = true;
+ if (opmenu0 && opmenu1) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+ ispi->opcodes[i] = opmenu0 >> i * 8;
+ ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+ }
+
+ val = readl(ispi->sregs + PREOP_OPTYPE);
+ ispi->preopcodes[0] = val;
+ ispi->preopcodes[1] = val >> 8;
+ }
}
intel_spi_dump_regs(ispi);
@@ -361,18 +412,28 @@ static int intel_spi_init(struct intel_spi *ispi)
return 0;
}
-static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
+static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
{
int i;
+ int preop;
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
- if (ispi->opcodes[i] == opcode)
- return i;
- return -EINVAL;
+ if (ispi->locked) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
+ if (ispi->opcodes[i] == opcode)
+ return i;
+
+ return -EINVAL;
+ }
+
+ /* The lock is off, so just use index 0 */
+ writel(opcode, ispi->sregs + OPMENU0);
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
+
+ return 0;
}
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
- int len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
{
u32 val, status;
int ret;
@@ -394,6 +455,9 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return -EINVAL;
}
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= HSFSTS_CTL_FGO;
@@ -412,27 +476,39 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return 0;
}
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
- int len)
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+ int optype)
{
- u32 val, status;
+ u32 val = 0, status;
+ u16 preop;
int ret;
- ret = intel_spi_opcode_index(ispi, opcode);
+ ret = intel_spi_opcode_index(ispi, opcode, optype);
if (ret < 0)
return ret;
- val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ /* Only mark 'Data Cycle' bit when there is data to be transferred */
+ if (len > 0)
+ val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
val |= ret << SSFSTS_CTL_COP_SHIFT;
val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
val |= SSFSTS_CTL_SCGO;
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if (preop) {
+ val |= SSFSTS_CTL_ACS;
+ if (preop >> 8)
+ val |= SSFSTS_CTL_SPOP;
+ }
writel(val, ispi->sregs + SSFSTS_CTL);
ret = intel_spi_wait_sw_busy(ispi);
if (ret)
return ret;
- status = readl(ispi->base + SSFSTS_CTL);
+ status = readl(ispi->sregs + SSFSTS_CTL);
if (status & SSFSTS_CTL_FCERR)
return -EIO;
else if (status & SSFSTS_CTL_AEL)
@@ -449,10 +525,11 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/* Address of the first chip */
writel(0, ispi->base + FADDR);
- if (ispi->swseq)
- ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
+ if (ispi->swseq_reg)
+ ret = intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_READ_NO_ADDR);
else
- ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
+ ret = intel_spi_hw_cycle(ispi, opcode, len);
if (ret)
return ret;
@@ -467,10 +544,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/*
* This is handled with atomic operation and preop code in Intel
- * controller so skip it here now.
+ * controller so skip it here now. If the controller is not locked,
+ * program the opcode to the PREOP register for later use.
*/
- if (opcode == SPINOR_OP_WREN)
+ if (opcode == SPINOR_OP_WREN) {
+ if (!ispi->locked)
+ writel(opcode, ispi->sregs + PREOP_OPTYPE);
+
return 0;
+ }
writel(0, ispi->base + FADDR);
@@ -479,9 +561,10 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
if (ret)
return ret;
- if (ispi->swseq)
- return intel_spi_sw_cycle(ispi, opcode, buf, len);
- return intel_spi_hw_cycle(ispi, opcode, buf, len);
+ if (ispi->swseq_reg)
+ return intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_WRITE_NO_ADDR);
+ return intel_spi_hw_cycle(ispi, opcode, len);
}
static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
@@ -561,12 +644,6 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCYCLE_WRITE;
- /* Write enable */
- if (ispi->preopcodes[1] == SPINOR_OP_WREN)
- val |= SSFSTS_CTL_SPOP;
- val |= SSFSTS_CTL_ACS;
- writel(val, ispi->base + HSFSTS_CTL);
-
ret = intel_spi_write_block(ispi, write_buf, block_size);
if (ret) {
dev_err(ispi->dev, "failed to write block\n");
@@ -574,8 +651,8 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
}
/* Start the write now */
- val = readl(ispi->base + HSFSTS_CTL);
- writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
if (ret) {
@@ -620,6 +697,22 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
erase_size = SZ_4K;
}
+ if (ispi->swseq_erase) {
+ while (len > 0) {
+ writel(offs, ispi->base + FADDR);
+
+ ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
+ 0, OPTYPE_WRITE_WITH_ADDR);
+ if (ret)
+ return ret;
+
+ offs += erase_size;
+ len -= erase_size;
+ }
+
+ return 0;
+ }
+
while (len > 0) {
writel(offs, ispi->base + FADDR);
@@ -652,7 +745,7 @@ static bool intel_spi_is_protected(const struct intel_spi *ispi,
{
int i;
- for (i = 0; i < PR_NUM; i++) {
+ for (i = 0; i < ispi->pr_num; i++) {
u32 pr_base, pr_limit, pr_value;
pr_value = readl(ispi->pregs + PR(i));
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index c258c7adf1c5..abe455ccd68b 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -404,6 +404,29 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
return ret;
}
+static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+{
+ clk_disable_unprepare(mt8173_nor->spi_clk);
+ clk_disable_unprepare(mt8173_nor->nor_clk);
+}
+
+static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+{
+ int ret;
+
+ ret = clk_prepare_enable(mt8173_nor->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(mt8173_nor->nor_clk);
+ if (ret) {
+ clk_disable_unprepare(mt8173_nor->spi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct device_node *flash_node)
{
@@ -468,15 +491,11 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
return PTR_ERR(mt8173_nor->nor_clk);
mt8173_nor->dev = &pdev->dev;
- ret = clk_prepare_enable(mt8173_nor->spi_clk);
+
+ ret = mt8173_nor_enable_clk(mt8173_nor);
if (ret)
return ret;
- ret = clk_prepare_enable(mt8173_nor->nor_clk);
- if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
- return ret;
- }
/* only support one attached flash */
flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
if (!flash_np) {
@@ -487,10 +506,9 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = mtk_nor_init(mt8173_nor, flash_np);
nor_free:
- if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
- }
+ if (ret)
+ mt8173_nor_disable_clk(mt8173_nor);
+
return ret;
}
@@ -498,11 +516,38 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
{
struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
+ mt8173_nor_disable_clk(mt8173_nor);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nor_suspend(struct device *dev)
+{
+ struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+ mt8173_nor_disable_clk(mt8173_nor);
+
return 0;
}
+static int mtk_nor_resume(struct device *dev)
+{
+ struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+ return mt8173_nor_enable_clk(mt8173_nor);
+}
+
+static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
+ .suspend = mtk_nor_suspend,
+ .resume = mtk_nor_resume,
+};
+
+#define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops)
+#else
+#define MTK_NOR_DEV_PM_OPS NULL
+#endif
+
static const struct of_device_id mtk_nor_of_ids[] = {
{ .compatible = "mediatek,mt8173-nor"},
{ /* sentinel */ }
@@ -514,6 +559,7 @@ static struct platform_driver mtk_nor_driver = {
.remove = mtk_nor_drv_remove,
.driver = {
.name = "mtk-nor",
+ .pm = MTK_NOR_DEV_PM_OPS,
.of_match_table = mtk_nor_of_ids,
},
};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 19c000722cbc..bc266f70a15b 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -89,6 +89,8 @@ struct flash_info {
#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
+
+ int (*quad_enable)(struct spi_nor *nor);
};
#define JEDEC_MFR(info) ((info)->id[0])
@@ -870,6 +872,8 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
+static int macronix_quad_enable(struct spi_nor *nor);
+
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
.id = { \
@@ -964,6 +968,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
/* Everspin */
+ { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@@ -983,6 +988,11 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
+ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
@@ -997,6 +1007,12 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
+ {
+ "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ .quad_enable = macronix_quad_enable,
+ },
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@@ -1024,7 +1040,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
@@ -1137,6 +1153,11 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ {
+ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
@@ -2288,8 +2309,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
/* Check the SFDP header version. */
if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
- header.major != SFDP_JESD216_MAJOR ||
- header.minor < SFDP_JESD216_MINOR)
+ header.major != SFDP_JESD216_MAJOR)
return -EINVAL;
/*
@@ -2427,6 +2447,15 @@ static int spi_nor_init_params(struct spi_nor *nor,
params->quad_enable = spansion_quad_enable;
break;
}
+
+ /*
+ * Some manufacturer like GigaDevice may use different
+ * bit to set QE on different memories, so the MFR can't
+ * indicate the quad_enable method for this case, we need
+ * set it in flash info list.
+ */
+ if (info->quad_enable)
+ params->quad_enable = info->quad_enable;
}
/* Override the parameters with data read from SFDP tables. */
@@ -2630,17 +2659,60 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
/* Enable Quad I/O if needed. */
enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
spi_nor_get_protocol_width(nor->write_proto) == 4);
- if (enable_quad_io && params->quad_enable) {
- err = params->quad_enable(nor);
+ if (enable_quad_io && params->quad_enable)
+ nor->quad_enable = params->quad_enable;
+ else
+ nor->quad_enable = NULL;
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
+ */
+ if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+ nor->info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ spi_nor_wait_till_ready(nor);
+ }
+
+ if (nor->quad_enable) {
+ err = nor->quad_enable(nor);
if (err) {
dev_err(nor->dev, "quad mode not supported\n");
return err;
}
}
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ set_4byte(nor, nor->info, 1);
+
return 0;
}
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct device *dev = nor->dev;
+ int ret;
+
+ /* re-initialize the nor chip */
+ ret = spi_nor_init(nor);
+ if (ret)
+ dev_err(dev, "resume() failed\n");
+}
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
@@ -2708,20 +2780,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (ret)
return ret;
- /*
- * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
- * with the software protection bits set
- */
-
- if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
- JEDEC_MFR(info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(info) == SNOR_MFR_SST ||
- info->flags & SPI_NOR_HAS_LOCK) {
- write_enable(nor);
- write_sr(nor, 0);
- spi_nor_wait_till_ready(nor);
- }
-
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
@@ -2731,6 +2789,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->size = params.size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+ mtd->_resume = spi_nor_resume;
/* NOR protection support for STmicro/Micron chips and similar */
if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
@@ -2804,8 +2863,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
info->flags & SPI_NOR_4B_OPCODES)
spi_nor_set_4byte_opcodes(nor, info);
- else
- set_4byte(nor, info, 1);
} else {
nor->addr_width = 3;
}
@@ -2822,6 +2879,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
return ret;
}
+ /* Send all the required SPI flash commands to initialize device */
+ nor->info = info;
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
index 86c0931543c5..b3c7f6addba7 100644
--- a/drivers/mtd/spi-nor/stm32-quadspi.c
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -1,9 +1,22 @@
/*
- * stm32_quadspi.c
+ * Driver for stm32 quadspi controller
*
- * Copyright (C) 2017, Ludovic Barre
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Ludovic Barre author <ludovic.barre@st.com>.
*
- * License terms: GNU General Public License (GPL), version 2
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * This program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/errno.h>
@@ -113,6 +126,7 @@
#define STM32_MAX_MMAP_SZ SZ_256M
#define STM32_MAX_NORCHIP 2
+#define STM32_QSPI_FIFO_SZ 32
#define STM32_QSPI_FIFO_TIMEOUT_US 30000
#define STM32_QSPI_BUSY_TIMEOUT_US 100000
@@ -124,6 +138,7 @@ struct stm32_qspi_flash {
u32 presc;
u32 read_mode;
bool registered;
+ u32 prefetch_limit;
};
struct stm32_qspi {
@@ -240,12 +255,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
STM32_QSPI_FIFO_TIMEOUT_US);
if (ret) {
dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
- break;
+ return ret;
}
tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
}
- return ret;
+ return 0;
}
static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
@@ -272,6 +287,7 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
{
struct stm32_qspi *qspi = flash->qspi;
u32 ccr, dcr, cr;
+ u32 last_byte;
int err;
err = stm32_qspi_wait_nobusy(qspi);
@@ -314,6 +330,10 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
if (err)
goto abort;
writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
+ } else {
+ last_byte = cmd->addr + cmd->len;
+ if (last_byte > flash->prefetch_limit)
+ goto abort;
}
return err;
@@ -322,7 +342,9 @@ abort:
cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
- dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+ if (err)
+ dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+
return err;
}
@@ -550,6 +572,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
}
flash->fsize = FSIZE_VAL(mtd->size);
+ flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ;
flash->read_mode = CCR_FMODE_MM;
if (mtd->size > qspi->mm_size)
diff --git a/drivers/mtd/tests/Makefile b/drivers/mtd/tests/Makefile
index 937a829bb701..5de0378f90db 100644
--- a/drivers/mtd/tests/Makefile
+++ b/drivers/mtd/tests/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MTD_TESTS) += mtd_oobtest.o
obj-$(CONFIG_MTD_TESTS) += mtd_pagetest.o
obj-$(CONFIG_MTD_TESTS) += mtd_readtest.o
diff --git a/drivers/mtd/tests/mtd_test.c b/drivers/mtd/tests/mtd_test.c
index 34736bbcc07b..3d0b8b5c1a53 100644
--- a/drivers/mtd/tests/mtd_test.c
+++ b/drivers/mtd/tests/mtd_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "mtd_test: " fmt
#include <linux/module.h>
diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h
index 04afd0e7074f..5a6e3bbe0474 100644
--- a/drivers/mtd/tests/mtd_test.h
+++ b/drivers/mtd/tests/mtd_test.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/mtd/mtd.h>
#include <linux/sched/signal.h>
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index 4e3c3d70d8c3..543673605ca7 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MTD_UBI) += ubi.o
ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 842550b5712a..136ce05d2328 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1334,7 +1334,7 @@ static int bytes_str_to_int(const char *str)
* This function returns zero in case of success and a negative error code in
* case of error.
*/
-static int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
+static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
{
int i, len;
struct mtd_dev_param *p;
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index bd1f07e5ce9a..2aaa3f7f2ba9 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index aba0d652095b..0936da592e12 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -483,6 +483,18 @@ config FUJITSU_ES
This driver provides support for Extended Socket network device
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
+config THUNDERBOLT_NET
+ tristate "Networking over Thunderbolt cable"
+ depends on THUNDERBOLT && INET
+ help
+ Select this if you want to create network between two
+ computers over a Thunderbolt cable. The driver supports Apple
+ ThunderboltIP protocol and allows communication with any host
+ supporting the same protocol including Windows and macOS.
+
+ To compile this driver a module, choose M here. The module will be
+ called thunderbolt-net.
+
source "drivers/net/hyperv/Kconfig"
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 8dff900085d6..766f62d02a0b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux network device drivers.
#
@@ -74,3 +75,6 @@ obj-$(CONFIG_HYPERV_NET) += hyperv/
obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/
+
+thunderbolt-net-y += thunderbolt.o
+obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 486e1e6997fc..bb49f6e40a19 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -155,6 +155,7 @@ static int cops_irqlist[] = {
};
static struct timer_list cops_timer;
+static struct net_device *cops_timer_dev;
/* use 0 for production, 1 for verification, 2 for debug, 3 for verbose debug */
#ifndef COPS_DEBUG
@@ -187,7 +188,7 @@ static void cops_load (struct net_device *dev);
static int cops_nodeid (struct net_device *dev, int nodeid);
static irqreturn_t cops_interrupt (int irq, void *dev_id);
-static void cops_poll (unsigned long ltdev);
+static void cops_poll(struct timer_list *t);
static void cops_timeout(struct net_device *dev);
static void cops_rx (struct net_device *dev);
static netdev_tx_t cops_send_packet (struct sk_buff *skb,
@@ -424,9 +425,8 @@ static int cops_open(struct net_device *dev)
*/
if(lp->board==TANGENT) /* Poll 20 times per second */
{
- init_timer(&cops_timer);
- cops_timer.function = cops_poll;
- cops_timer.data = (unsigned long)dev;
+ cops_timer_dev = dev;
+ timer_setup(&cops_timer, cops_poll, 0);
cops_timer.expires = jiffies + HZ/20;
add_timer(&cops_timer);
}
@@ -673,12 +673,11 @@ static int cops_nodeid (struct net_device *dev, int nodeid)
* Poll the Tangent type cards to see if we have work.
*/
-static void cops_poll(unsigned long ltdev)
+static void cops_poll(struct timer_list *unused)
{
int ioaddr, status;
int boguscount = 0;
-
- struct net_device *dev = (struct net_device *)ltdev;
+ struct net_device *dev = cops_timer_dev;
del_timer(&cops_timer);
diff --git a/drivers/net/appletalk/cops.h b/drivers/net/appletalk/cops.h
index fd2750b269c8..7a0bfb351929 100644
--- a/drivers/net/appletalk/cops.h
+++ b/drivers/net/appletalk/cops.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* cops.h: LocalTalk driver for Linux.
*
* Authors:
diff --git a/drivers/net/appletalk/ipddp.h b/drivers/net/appletalk/ipddp.h
index 531519da99a3..9a8e45a46925 100644
--- a/drivers/net/appletalk/ipddp.h
+++ b/drivers/net/appletalk/ipddp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ipddp.h: Header for IP-over-DDP driver for Linux.
*/
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index ac755d2950a6..75a5a9b87c5a 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -694,6 +694,7 @@ static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
/* end of idle handlers -- what should be seen is do_read, do_write */
static struct timer_list ltpc_timer;
+static struct net_device *ltpc_timer_dev;
static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -867,10 +868,8 @@ static void set_multicast_list(struct net_device *dev)
static int ltpc_poll_counter;
-static void ltpc_poll(unsigned long l)
+static void ltpc_poll(struct timer_list *unused)
{
- struct net_device *dev = (struct net_device *) l;
-
del_timer(&ltpc_timer);
if(debug & DEBUG_VERBOSE) {
@@ -880,14 +879,10 @@ static void ltpc_poll(unsigned long l)
}
ltpc_poll_counter--;
}
-
- if (!dev)
- return; /* we've been downed */
/* poll 20 times per second */
- idle(dev);
+ idle(ltpc_timer_dev);
ltpc_timer.expires = jiffies + HZ/20;
-
add_timer(&ltpc_timer);
}
@@ -1165,9 +1160,8 @@ struct net_device * __init ltpc_probe(void)
dev->irq = 0;
/* polled mode -- 20 times per second */
/* this is really, really slow... should it poll more often? */
- init_timer(&ltpc_timer);
- ltpc_timer.function=ltpc_poll;
- ltpc_timer.data = (unsigned long) dev;
+ ltpc_timer_dev = dev;
+ timer_setup(&ltpc_timer, ltpc_poll, 0);
ltpc_timer.expires = jiffies + HZ/20;
add_timer(&ltpc_timer);
@@ -1254,8 +1248,6 @@ static void __exit ltpc_cleanup(void)
if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
unregister_netdev(dev_ltpc);
- ltpc_timer.data = 0; /* signal the poll routine that we're done */
-
del_timer_sync(&ltpc_timer);
if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
diff --git a/drivers/net/appletalk/ltpc.h b/drivers/net/appletalk/ltpc.h
index cd30544a3729..58cf945732a4 100644
--- a/drivers/net/appletalk/ltpc.h
+++ b/drivers/net/appletalk/ltpc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*** ltpc.h
*
*
diff --git a/drivers/net/arcnet/Makefile b/drivers/net/arcnet/Makefile
index 5ce8ee63e435..53525e8ea130 100644
--- a/drivers/net/arcnet/Makefile
+++ b/drivers/net/arcnet/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for linux/drivers/net/arcnet
#
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index fcfccbb3d9a2..8459115d9d4e 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -382,9 +382,10 @@ static void arcdev_setup(struct net_device *dev)
dev->flags = IFF_BROADCAST;
}
-static void arcnet_timer(unsigned long data)
+static void arcnet_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
+ struct arcnet_local *lp = from_timer(lp, t, timer);
+ struct net_device *dev = lp->dev;
if (!netif_carrier_ok(dev)) {
netif_carrier_on(dev);
@@ -450,9 +451,7 @@ struct net_device *alloc_arcdev(const char *name)
lp->dev = dev;
spin_lock_init(&lp->lock);
- init_timer(&lp->timer);
- lp->timer.data = (unsigned long) dev;
- lp->timer.function = arcnet_timer;
+ timer_setup(&lp->timer, arcnet_timer, 0);
}
return dev;
diff --git a/drivers/net/arcnet/com9026.h b/drivers/net/arcnet/com9026.h
index efcaf6707214..6adbc18711e0 100644
--- a/drivers/net/arcnet/com9026.h
+++ b/drivers/net/arcnet/com9026.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __COM9026_H
#define __COM9026_H
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c02cc817a490..1ed9529e7bd1 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? ACCESS_ONCE(slaves->count) : 0;
+ count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
count];
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index e52e25a977fa..3868e1a5126d 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c99dc59d729b..c669554d70bb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
- recv_probe = ACCESS_ONCE(bond->recv_probe);
+ recv_probe = READ_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
if (ret == RX_HANDLER_CONSUMED) {
@@ -1217,25 +1217,21 @@ static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
}
}
-static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave)
+static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
+ struct netlink_ext_ack *extack)
{
struct netdev_lag_upper_info lag_upper_info;
- int err;
lag_upper_info.tx_type = bond_lag_tx_type(bond);
- err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
- &lag_upper_info);
- if (err)
- return err;
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
- return 0;
+
+ return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
+ &lag_upper_info, extack);
}
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
{
netdev_upper_dev_unlink(slave->dev, bond->dev);
slave->dev->flags &= ~IFF_SLAVE;
- rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
}
static struct slave *bond_alloc_slave(struct bonding *bond)
@@ -1328,7 +1324,8 @@ void bond_lower_state_changed(struct slave *slave)
}
/* enslave device <slave> to bond device <master> */
-int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ struct netlink_ext_ack *extack)
{
struct bonding *bond = netdev_priv(bond_dev);
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
@@ -1346,12 +1343,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
+ NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
netdev_err(bond_dev,
"Error: Device is in use and cannot be enslaved\n");
return -EBUSY;
}
if (bond_dev == slave_dev) {
+ NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself.");
netdev_err(bond_dev, "cannot enslave bond to itself.\n");
return -EPERM;
}
@@ -1362,6 +1361,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
slave_dev->name);
if (vlan_uses_dev(bond_dev)) {
+ NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
slave_dev->name, bond_dev->name);
return -EPERM;
@@ -1381,6 +1381,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* enslaving it; the old ifenslave will not.
*/
if (slave_dev->flags & IFF_UP) {
+ NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
slave_dev->name);
return -EPERM;
@@ -1421,6 +1422,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev);
}
} else if (bond_dev->type != slave_dev->type) {
+ NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
slave_dev->name, slave_dev->type, bond_dev->type);
return -EINVAL;
@@ -1428,6 +1430,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_dev->type == ARPHRD_INFINIBAND &&
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+ NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
slave_dev->type);
res = -EOPNOTSUPP;
@@ -1443,6 +1446,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
} else {
+ NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
res = -EOPNOTSUPP;
goto err_undo_flags;
@@ -1709,7 +1713,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_detach;
}
- res = bond_master_upper_dev_link(bond, new_slave);
+ res = bond_master_upper_dev_link(bond, new_slave, extack);
if (res) {
netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
goto err_unregister;
@@ -2042,6 +2046,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
+ slave->link_new_state = slave->link;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2491,7 +2496,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+ int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+ unsigned int alen;
if (!slave_do_arp_validate(bond, slave)) {
if ((slave_do_arp_validate_only(bond) && is_arp) ||
@@ -3072,7 +3078,16 @@ static int bond_slave_netdev_event(unsigned long event,
break;
case NETDEV_UP:
case NETDEV_CHANGE:
- bond_update_speed_duplex(slave);
+ /* For 802.3ad mode only:
+ * Getting invalid Speed/Duplex values here will put slave
+ * in weird state. So mark it as link-down for the time
+ * being and let link-monitoring (miimon) set it right when
+ * correct speeds/duplex are available.
+ */
+ if (bond_update_speed_duplex(slave) &&
+ BOND_MODE(bond) == BOND_MODE_8023AD)
+ slave->link = BOND_LINK_DOWN;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_adapter_speed_duplex_changed(slave);
/* Fallthrough */
@@ -3253,7 +3268,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
hash ^= (hash >> 16);
hash ^= (hash >> 8);
- return hash;
+ return hash >> 1;
}
/*-------------------------- Device entry points ----------------------------*/
@@ -3482,7 +3497,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
switch (cmd) {
case BOND_ENSLAVE_OLD:
case SIOCBONDENSLAVE:
- res = bond_enslave(bond_dev, slave_dev);
+ res = bond_enslave(bond_dev, slave_dev, NULL);
break;
case BOND_RELEASE_OLD:
case SIOCBONDRELEASE:
@@ -3810,7 +3825,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
else
bond_xmit_slave_id(bond, skb, 0);
} else {
- int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+ int slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
@@ -3972,7 +3987,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? ACCESS_ONCE(slaves->count) : 0;
+ count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count)) {
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
bond_dev_queue_xmit(bond, skb, slave->dev);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 5931aa2fe997..8a9b085c2a98 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1383,7 +1383,7 @@ static int bond_option_slaves_set(struct bonding *bond,
switch (command[0]) {
case '+':
netdev_dbg(bond->dev, "Adding slave %s\n", dev->name);
- ret = bond_enslave(bond->dev, dev);
+ ret = bond_enslave(bond->dev, dev, NULL);
break;
case '-':
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index d8d4ada034b7..f7799321dffb 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
#include <linux/export.h>
#include <net/net_namespace.h>
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 9bbd45391f6c..54ae1165d60a 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
# Serial interface
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 438966bf51c2..b8029ea03307 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -66,9 +66,9 @@ static const struct cfhsi_config hsi_default_config = {
static LIST_HEAD(cfhsi_list);
-static void cfhsi_inactivity_tout(unsigned long arg)
+static void cfhsi_inactivity_tout(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
@@ -737,9 +737,9 @@ out_of_sync:
schedule_work(&cfhsi->out_of_sync_work);
}
-static void cfhsi_rx_slowpath(unsigned long arg)
+static void cfhsi_rx_slowpath(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
@@ -997,9 +997,9 @@ static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
wake_up_interruptible(&cfhsi->wake_down_wait);
}
-static void cfhsi_aggregation_tout(unsigned long arg)
+static void cfhsi_aggregation_tout(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
@@ -1211,17 +1211,11 @@ static int cfhsi_open(struct net_device *ndev)
init_waitqueue_head(&cfhsi->flush_fifo_wait);
/* Setup the inactivity timer. */
- init_timer(&cfhsi->inactivity_timer);
- cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
- cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
+ timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
/* Setup the slowpath RX timer. */
- init_timer(&cfhsi->rx_slowpath_timer);
- cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
- cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
+ timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
/* Setup the aggregation timer. */
- init_timer(&cfhsi->aggregation_timer);
- cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
- cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
+ timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
/* Activate HSI interface. */
res = cfhsi->ops->cfhsi_up(cfhsi->ops);
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 4aabbee133b8..02b8ed794564 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux Controller Area Network drivers.
#
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index cf7c18947189..d065c0e2d18e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 46a746ee80bb..b5145a7f874c 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32;
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index a7be12d9a139..897c6b113d3f 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -807,10 +807,10 @@ static irqreturn_t grcan_interrupt(int irq, void *dev_id)
* is not ONGOING (TX might be stuck in ONGOING due to a harwrware bug
* for single shot)
*/
-static void grcan_running_reset(unsigned long data)
+static void grcan_running_reset(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_priv *priv = from_timer(priv, t, rr_timer);
+ struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
@@ -898,10 +898,10 @@ static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
}
/* Disable channels and schedule a running reset */
-static void grcan_initiate_running_reset(unsigned long data)
+static void grcan_initiate_running_reset(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct grcan_priv *priv = netdev_priv(dev);
+ struct grcan_priv *priv = from_timer(priv, t, hang_timer);
+ struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
@@ -1626,13 +1626,8 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
spin_lock_init(&priv->lock);
if (priv->need_txbug_workaround) {
- init_timer(&priv->rr_timer);
- priv->rr_timer.function = grcan_running_reset;
- priv->rr_timer.data = (unsigned long)dev;
-
- init_timer(&priv->hang_timer);
- priv->hang_timer.function = grcan_initiate_running_reset;
- priv->hang_timer.data = (unsigned long)dev;
+ timer_setup(&priv->rr_timer, grcan_running_reset, 0);
+ timer_setup(&priv->hang_timer, grcan_initiate_running_reset, 0);
}
netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 4d1fe8d95042..2772d05ff11c 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -670,9 +670,9 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */
- tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
- writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
- priv->base + IFI_CANFD_TDELAY);
+ tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
+ tdc &= IFI_CANFD_TDELAY_MASK;
+ writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 51c2d182a33a..b4efd711f824 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -29,14 +29,19 @@
#include "peak_canfd_user.h"
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
-MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards");
-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards");
+MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
+MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");
MODULE_LICENSE("GPL v2");
#define PCIEFD_DRV_NAME "peak_pciefd"
#define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */
#define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */
+#define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */
+#define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */
+#define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */
+#define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */
+#define PCAN_M2_ID 0x001a /* for M2 slot cards */
/* PEAK PCIe board access description */
#define PCIEFD_BAR0_SIZE (64 * 1024)
@@ -203,6 +208,11 @@ struct pciefd_board {
/* supported device ids. */
static const struct pci_device_id peak_pciefd_tbl[] = {
{PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,},
{0,}
};
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index be11ddd11b87..9253aaf9e739 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the SJA1000 CAN controller drivers.
#
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index dd56133cc461..485b19c9ae47 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -381,9 +381,9 @@ static inline void pcan_set_can_power(struct pcan_pccard *card, int onoff)
/*
* set leds state according to channel activity
*/
-static void pcan_led_timer(unsigned long arg)
+static void pcan_led_timer(struct timer_list *t)
{
- struct pcan_pccard *card = (struct pcan_pccard *)arg;
+ struct pcan_pccard *card = from_timer(card, t, led_timer);
struct net_device *netdev;
int i, up_count = 0;
u8 ccr;
@@ -692,9 +692,7 @@ static int pcan_probe(struct pcmcia_device *pdev)
}
/* init the timer which controls the leds */
- init_timer(&card->led_timer);
- card->led_timer.function = pcan_led_timer;
- card->led_timer.data = (unsigned long)card;
+ timer_setup(&card->led_timer, pcan_led_timer, 0);
/* request the given irq */
err = request_irq(pdev->irq, &pcan_isr, IRQF_SHARED, PCC_NAME, card);
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
index 35f062282dbd..2893007ea05e 100644
--- a/drivers/net/can/softing/softing.h
+++ b/drivers/net/can/softing/softing.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* softing common interfaces
*
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
index ebbf69815623..68a161547644 100644
--- a/drivers/net/can/softing/softing_platform.h
+++ b/drivers/net/can/softing/softing_platform.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/platform_device.h>
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index b0c80859f746..1ac2090a1721 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
}
stats->rx_over_errors++;
stats->rx_errors++;
+
+ /* reset the CAN IP by entering reset mode
+ * ignoring timeout error
+ */
+ set_reset_mode(dev);
+ set_normal_mode(dev);
+
/* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
}
@@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
- if (isrc & SUN4I_INT_RBUF_VLD) {
- /* receive interrupt */
+ if ((isrc & SUN4I_INT_RBUF_VLD) &&
+ !(isrc & SUN4I_INT_DATA_OR)) {
+ /* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */
sun4i_can_rx(dev);
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 164453fd55d0..49ac7b99ba32 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux Controller Area Network USB drivers.
#
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 838545ce468d..25a9b79cc42d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -259,10 +259,13 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
/*
* handle end of waiting for the device to reset
*/
-static void pcan_usb_restart(unsigned long arg)
+static void pcan_usb_restart(struct timer_list *t)
{
+ struct pcan_usb *pdev = from_timer(pdev, t, restart_timer);
+ struct peak_usb_device *dev = &pdev->dev;
+
/* notify candev and netdev */
- peak_usb_restart_complete((struct peak_usb_device *)arg);
+ peak_usb_restart_complete(dev);
}
/*
@@ -798,9 +801,7 @@ static int pcan_usb_init(struct peak_usb_device *dev)
int err;
/* initialize a timer needed to wait for hardware restart */
- init_timer(&pdev->restart_timer);
- pdev->restart_timer.function = pcan_usb_restart;
- pdev->restart_timer.data = (unsigned long)dev;
+ timer_setup(&pdev->restart_timer, pcan_usb_restart, 0);
/*
* explicit use of dev_xxx() instead of netdev_xxx() here:
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 017f48cdcab9..8b1a859f5140 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* e100net.c: A network driver for the ETRAX 100LX network controller.
*
@@ -164,9 +165,16 @@ static unsigned int network_rec_config_shadow = 0;
static unsigned int network_tr_ctrl_shadow = 0;
+/* Timers */
+static void e100_check_speed(struct timer_list *unused);
+static void e100_clear_network_leds(struct timer_list *unused);
+static void e100_check_duplex(struct timer_list *unused);
+static DEFINE_TIMER(speed_timer, e100_check_speed);
+static DEFINE_TIMER(clear_led_timer, e100_clear_network_leds);
+static DEFINE_TIMER(duplex_timer, e100_check_duplex);
+static struct net_device *timer_dev;
+
/* Network speed indication. */
-static DEFINE_TIMER(speed_timer, NULL, 0, 0);
-static DEFINE_TIMER(clear_led_timer, NULL, 0, 0);
static int current_speed; /* Speed read from transceiver */
static int current_speed_selection; /* Speed selected by user */
static unsigned long led_next_time;
@@ -174,7 +182,6 @@ static int led_active;
static int rx_queue_len;
/* Duplex */
-static DEFINE_TIMER(duplex_timer, NULL, 0, 0);
static int full_duplex;
static enum duplex current_duplex;
@@ -199,9 +206,7 @@ static void update_rx_stats(struct net_device_stats *);
static void update_tx_stats(struct net_device_stats *);
static int e100_probe_transceiver(struct net_device* dev);
-static void e100_check_speed(unsigned long priv);
static void e100_set_speed(struct net_device* dev, unsigned long speed);
-static void e100_check_duplex(unsigned long priv);
static void e100_set_duplex(struct net_device* dev, enum duplex);
static void e100_negotiate(struct net_device* dev);
@@ -213,7 +218,6 @@ static void e100_send_mdio_bit(unsigned char bit);
static unsigned char e100_receive_mdio_bit(void);
static void e100_reset_transceiver(struct net_device* net);
-static void e100_clear_network_leds(unsigned long dummy);
static void e100_set_network_leds(int active);
static const struct ethtool_ops e100_ethtool_ops;
@@ -380,17 +384,12 @@ etrax_ethernet_init(void)
current_speed = 10;
current_speed_selection = 0; /* Auto */
speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
- speed_timer.data = (unsigned long)dev;
- speed_timer.function = e100_check_speed;
-
- clear_led_timer.function = e100_clear_network_leds;
- clear_led_timer.data = (unsigned long)dev;
full_duplex = 0;
current_duplex = autoneg;
duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
- duplex_timer.data = (unsigned long)dev;
- duplex_timer.function = e100_check_duplex;
+
+ timer_dev = dev;
/* Initialize mii interface */
np->mii_if.phy_id_mask = 0x1f;
@@ -679,9 +678,9 @@ intel_check_speed(struct net_device* dev)
}
#endif
static void
-e100_check_speed(unsigned long priv)
+e100_check_speed(struct timer_list *unused)
{
- struct net_device* dev = (struct net_device*)priv;
+ struct net_device* dev = timer_dev;
struct net_local *np = netdev_priv(dev);
static int led_initiated = 0;
unsigned long data;
@@ -798,9 +797,9 @@ e100_set_speed(struct net_device* dev, unsigned long speed)
}
static void
-e100_check_duplex(unsigned long priv)
+e100_check_duplex(struct timer_list *unused)
{
- struct net_device *dev = (struct net_device *)priv;
+ struct net_device *dev = timer_dev;
struct net_local *np = netdev_priv(dev);
int old_duplex;
@@ -1668,9 +1667,9 @@ e100_hardware_send_packet(struct net_local *np, char *buf, int length)
}
static void
-e100_clear_network_leds(unsigned long dummy)
+e100_clear_network_leds(struct timer_list *unused)
{
- struct net_device *dev = (struct net_device *)dummy;
+ struct net_device *dev = timer_dev;
struct net_local *np = netdev_priv(dev);
spin_lock(&np->led_lock);
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 4a5b5bd297ee..d040aeb45172 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index 27f32a50df57..2f988216dab9 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -1,6 +1,8 @@
menuconfig B53
tristate "Broadcom BCM53xx managed switch support"
depends on NET_DSA
+ select NET_DSA_TAG_BRCM
+ select NET_DSA_TAG_BRCM_PREPEND
help
This driver adds support for Broadcom managed switch chips. It supports
BCM5325E, BCM5365, BCM539x, BCM53115 and BCM53125 as well as BCM63XX
diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile
index 7e6f9a8bfd75..4256fb42a4dd 100644
--- a/drivers/net/dsa/b53/Makefile
+++ b/drivers/net/dsa/b53/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_B53) += b53_common.o
obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 274f3679f33d..f5a8dd96fd75 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -325,7 +325,6 @@ static void b53_get_vlan_entry(struct b53_device *dev, u16 vid,
static void b53_set_forwarding(struct b53_device *dev, int enable)
{
- struct dsa_switch *ds = dev->ds;
u8 mgmt;
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
@@ -337,14 +336,11 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
- /* Include IMP port in dumb forwarding mode when no tagging protocol is
- * set
+ /* Include IMP port in dumb forwarding mode
*/
- if (ds->ops->get_tag_protocol(ds) == DSA_TAG_PROTO_NONE) {
- b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
- mgmt |= B53_MII_DUMB_FWDG_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
- }
+ b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, &mgmt);
+ mgmt |= B53_MII_DUMB_FWDG_EN;
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
}
static void b53_enable_vlan(struct b53_device *dev, bool enable)
@@ -484,7 +480,7 @@ static int b53_fast_age_vlan(struct b53_device *dev, u16 vid)
return b53_flush_arl(dev, FAST_AGE_VLAN);
}
-static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
+void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
{
struct b53_device *dev = ds->priv;
unsigned int i;
@@ -500,12 +496,12 @@ static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan);
}
}
+EXPORT_SYMBOL(b53_imp_vlan_setup);
-static int b53_enable_port(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
- unsigned int cpu_port = dev->cpu_port;
+ unsigned int cpu_port = ds->ports[port].cpu_dp->index;
u16 pvlan;
/* Clear the Rx and Tx disable bits and set to no spanning tree */
@@ -523,11 +519,15 @@ static int b53_enable_port(struct dsa_switch *ds, int port,
b53_imp_vlan_setup(ds, cpu_port);
+ /* If EEE was enabled, restore it */
+ if (dev->ports[port].eee.eee_enabled)
+ b53_eee_enable_set(ds, port, true);
+
return 0;
}
+EXPORT_SYMBOL(b53_enable_port);
-static void b53_disable_port(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
u8 reg;
@@ -537,20 +537,80 @@ static void b53_disable_port(struct dsa_switch *ds, int port,
reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
}
+EXPORT_SYMBOL(b53_disable_port);
-static void b53_enable_cpu_port(struct b53_device *dev)
+void b53_brcm_hdr_setup(struct dsa_switch *ds, int port)
+{
+ bool tag_en = !(ds->ops->get_tag_protocol(ds, port) ==
+ DSA_TAG_PROTO_NONE);
+ struct b53_device *dev = ds->priv;
+ u8 hdr_ctl, val;
+ u16 reg;
+
+ /* Resolve which bit controls the Broadcom tag */
+ switch (port) {
+ case 8:
+ val = BRCM_HDR_P8_EN;
+ break;
+ case 7:
+ val = BRCM_HDR_P7_EN;
+ break;
+ case 5:
+ val = BRCM_HDR_P5_EN;
+ break;
+ default:
+ val = 0;
+ break;
+ }
+
+ /* Enable Broadcom tags for IMP port */
+ b53_read8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, &hdr_ctl);
+ if (tag_en)
+ hdr_ctl |= val;
+ else
+ hdr_ctl &= ~val;
+ b53_write8(dev, B53_MGMT_PAGE, B53_BRCM_HDR, hdr_ctl);
+
+ /* Registers below are only accessible on newer devices */
+ if (!is58xx(dev))
+ return;
+
+ /* Enable reception Broadcom tag for CPU TX (switch RX) to
+ * allow us to tag outgoing frames
+ */
+ b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, &reg);
+ if (tag_en)
+ reg &= ~BIT(port);
+ else
+ reg |= BIT(port);
+ b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_RX_DIS, reg);
+
+ /* Enable transmission of Broadcom tags from the switch (CPU RX) to
+ * allow delivering frames to the per-port net_devices
+ */
+ b53_read16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, &reg);
+ if (tag_en)
+ reg &= ~BIT(port);
+ else
+ reg |= BIT(port);
+ b53_write16(dev, B53_MGMT_PAGE, B53_BRCM_HDR_TX_DIS, reg);
+}
+EXPORT_SYMBOL(b53_brcm_hdr_setup);
+
+static void b53_enable_cpu_port(struct b53_device *dev, int port)
{
- unsigned int cpu_port = dev->cpu_port;
u8 port_ctrl;
/* BCM5325 CPU port is at 8 */
- if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25)
- cpu_port = B53_CPU_PORT;
+ if ((is5325(dev) || is5365(dev)) && port == B53_CPU_PORT_25)
+ port = B53_CPU_PORT;
port_ctrl = PORT_CTRL_RX_BCST_EN |
PORT_CTRL_RX_MCST_EN |
PORT_CTRL_RX_UCST_EN;
- b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl);
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
+
+ b53_brcm_hdr_setup(dev->ds, port);
}
static void b53_enable_mib(struct b53_device *dev)
@@ -562,8 +622,9 @@ static void b53_enable_mib(struct b53_device *dev)
b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
}
-static int b53_configure_vlan(struct b53_device *dev)
+int b53_configure_vlan(struct dsa_switch *ds)
{
+ struct b53_device *dev = ds->priv;
struct b53_vlan vl = { 0 };
int i;
@@ -586,6 +647,7 @@ static int b53_configure_vlan(struct b53_device *dev)
return 0;
}
+EXPORT_SYMBOL(b53_configure_vlan);
static void b53_switch_reset_gpio(struct b53_device *dev)
{
@@ -700,7 +762,7 @@ static int b53_apply_config(struct b53_device *priv)
/* disable switching */
b53_set_forwarding(priv, 0);
- b53_configure_vlan(priv);
+ b53_configure_vlan(priv->ds);
/* enable switching */
b53_set_forwarding(priv, 1);
@@ -816,12 +878,13 @@ static int b53_setup(struct dsa_switch *ds)
if (ret)
dev_err(ds->dev, "failed to apply configuration\n");
+ /* Configure IMP/CPU port, disable unused ports. Enabled
+ * ports will be configured with .port_enable
+ */
for (port = 0; port < dev->num_ports; port++) {
- if (BIT(port) & ds->enabled_port_mask)
- b53_enable_port(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- b53_enable_cpu_port(dev);
- else
+ if (dsa_is_cpu_port(ds, port))
+ b53_enable_cpu_port(dev, port);
+ else if (dsa_is_unused_port(ds, port))
b53_disable_port(ds, port, NULL);
}
@@ -832,6 +895,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct b53_device *dev = ds->priv;
+ struct ethtool_eee *p = &dev->ports[port].eee;
u8 rgmii_ctrl = 0, reg = 0, off;
if (!phy_is_pseudo_fixed_link(phydev))
@@ -953,6 +1017,9 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po);
}
}
+
+ /* Re-negotiate EEE if it was enabled already */
+ p->eee_enabled = b53_eee_init(ds, port, phydev);
}
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
@@ -986,7 +1053,6 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- unsigned int cpu_port = dev->cpu_port;
struct b53_vlan *vl;
u16 vid;
@@ -995,12 +1061,11 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
b53_get_vlan_entry(dev, vid, vl);
- vl->members |= BIT(port) | BIT(cpu_port);
+ vl->members |= BIT(port);
if (untagged)
vl->untag |= BIT(port);
else
vl->untag &= ~BIT(port);
- vl->untag &= ~BIT(cpu_port);
b53_set_vlan_entry(dev, vid, vl);
b53_fast_age_vlan(dev, vid);
@@ -1280,7 +1345,7 @@ EXPORT_SYMBOL(b53_fdb_dump);
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
- s8 cpu_port = ds->dst->cpu_dp->index;
+ s8 cpu_port = ds->ports[port].cpu_dp->index;
u16 pvlan, reg;
unsigned int i;
@@ -1298,7 +1363,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
b53_for_each_port(dev, i) {
- if (ds->ports[i].bridge_dev != br)
+ if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Add this local port to the remote port VLAN control
@@ -1326,7 +1391,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
- s8 cpu_port = ds->dst->cpu_dp->index;
+ s8 cpu_port = ds->ports[port].cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1334,7 +1399,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
b53_for_each_port(dev, i) {
/* Don't touch the remaining ports */
- if (ds->ports[i].bridge_dev != br)
+ if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
@@ -1364,8 +1429,8 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
} else {
b53_get_vlan_entry(dev, pvid, vl);
- vl->members |= BIT(port) | BIT(dev->cpu_port);
- vl->untag |= BIT(port) | BIT(dev->cpu_port);
+ vl->members |= BIT(port) | BIT(cpu_port);
+ vl->untag |= BIT(port) | BIT(cpu_port);
b53_set_vlan_entry(dev, pvid, vl);
}
}
@@ -1414,9 +1479,40 @@ void b53_br_fast_age(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_br_fast_age);
-static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
+static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
+{
+ /* Broadcom switches will accept enabling Broadcom tags on the
+ * following ports: 5, 7 and 8, any other port is not supported
+ */
+ switch (port) {
+ case B53_CPU_PORT_25:
+ case 7:
+ case B53_CPU_PORT:
+ return true;
+ }
+
+ dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", port);
+ return false;
+}
+
+static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
+ int port)
{
- return DSA_TAG_PROTO_NONE;
+ struct b53_device *dev = ds->priv;
+
+ /* Older models support a different tag format that we do not
+ * support in net/dsa/tag_brcm.c yet.
+ */
+ if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
+ return DSA_TAG_PROTO_NONE;
+
+ /* Broadcom BCM58xx chips have a flow accelerator on Port 8
+ * which requires us to use the prepended Broadcom tag type
+ */
+ if (dev->chip_id == BCM58XX_DEVICE_ID && port == B53_CPU_PORT)
+ return DSA_TAG_PROTO_BRCM_PREPEND;
+
+ return DSA_TAG_PROTO_BRCM;
}
int b53_mirror_add(struct dsa_switch *ds, int port,
@@ -1484,6 +1580,69 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_mirror_del);
+void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg;
+
+ b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
+ if (enable)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
+}
+EXPORT_SYMBOL(b53_eee_enable_set);
+
+
+/* Returns 0 if EEE was not enabled, or 1 otherwise
+ */
+int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
+{
+ int ret;
+
+ ret = phy_init_eee(phy, 0);
+ if (ret)
+ return 0;
+
+ b53_eee_enable_set(ds, port, true);
+
+ return 1;
+}
+EXPORT_SYMBOL(b53_eee_init);
+
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+{
+ struct b53_device *dev = ds->priv;
+ struct ethtool_eee *p = &dev->ports[port].eee;
+ u16 reg;
+
+ if (is5325(dev) || is5365(dev))
+ return -EOPNOTSUPP;
+
+ b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
+ e->eee_enabled = p->eee_enabled;
+ e->eee_active = !!(reg & BIT(port));
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_get_mac_eee);
+
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+{
+ struct b53_device *dev = ds->priv;
+ struct ethtool_eee *p = &dev->ports[port].eee;
+
+ if (is5325(dev) || is5365(dev))
+ return -EOPNOTSUPP;
+
+ p->eee_enabled = e->eee_enabled;
+ b53_eee_enable_set(ds, port, e->eee_enabled);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_set_mac_eee);
+
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
@@ -1495,6 +1654,8 @@ static const struct dsa_switch_ops b53_switch_ops = {
.adjust_link = b53_adjust_link,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
+ .get_mac_eee = b53_get_mac_eee,
+ .set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_stp_state_set = b53_br_set_stp_state,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 01bd8cbe9a3f..daaaa1ecb996 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -70,6 +70,7 @@ enum {
struct b53_port {
u16 vlan_ctl_mask;
+ struct ethtool_eee eee;
};
struct b53_vlan {
@@ -186,11 +187,6 @@ static inline int is58xx(struct b53_device *dev)
#define B53_CPU_PORT_25 5
#define B53_CPU_PORT 8
-static inline int is_cpu_port(struct b53_device *dev, int port)
-{
- return dev->cpu_port;
-}
-
struct b53_device *b53_switch_alloc(struct device *base,
const struct b53_io_ops *ops,
void *priv);
@@ -204,119 +200,30 @@ static inline void b53_switch_remove(struct b53_device *dev)
dsa_unregister_switch(dev->ds);
}
-static inline int b53_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read8(dev, page, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
+#define b53_build_op(type_op_size, val_type) \
+static inline int b53_##type_op_size(struct b53_device *dev, u8 page, \
+ u8 reg, val_type val) \
+{ \
+ int ret; \
+ \
+ mutex_lock(&dev->reg_mutex); \
+ ret = dev->ops->type_op_size(dev, page, reg, val); \
+ mutex_unlock(&dev->reg_mutex); \
+ \
+ return ret; \
}
-static inline int b53_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read16(dev, page, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int b53_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read32(dev, page, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int b53_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read48(dev, page, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int b53_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read64(dev, page, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
+b53_build_op(read8, u8 *);
+b53_build_op(read16, u16 *);
+b53_build_op(read32, u32 *);
+b53_build_op(read48, u64 *);
+b53_build_op(read64, u64 *);
-static inline int b53_write8(struct b53_device *dev, u8 page, u8 reg, u8 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write8(dev, page, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int b53_write16(struct b53_device *dev, u8 page, u8 reg,
- u16 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write16(dev, page, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int b53_write32(struct b53_device *dev, u8 page, u8 reg,
- u32 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write32(dev, page, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int b53_write48(struct b53_device *dev, u8 page, u8 reg,
- u64 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write48(dev, page, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int b53_write64(struct b53_device *dev, u8 page, u8 reg,
- u64 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write64(dev, page, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
+b53_build_op(write8, u8);
+b53_build_op(write16, u16);
+b53_build_op(write32, u32);
+b53_build_op(write48, u64);
+b53_build_op(write64, u64);
struct b53_arl_entry {
u8 port;
@@ -377,6 +284,8 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
#endif
/* Exported functions towards other drivers */
+void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port);
+int b53_configure_vlan(struct dsa_switch *ds);
void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds);
@@ -403,5 +312,12 @@ int b53_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
void b53_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
+void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable);
+int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index e5c86d44667a..2a9f421680aa 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -50,6 +50,9 @@
/* Jumbo Frame Registers */
#define B53_JUMBO_PAGE 0x40
+/* EEE Control Registers Page */
+#define B53_EEE_PAGE 0x92
+
/* CFP Configuration Registers Page */
#define B53_CFP_PAGE 0xa1
@@ -210,6 +213,7 @@
#define B53_BRCM_HDR 0x03
#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */
#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
+#define BRCM_HDR_P7_EN BIT(2) /* Enable tagging on port 7 */
/* Mirror capture control register (16 bit) */
#define B53_MIR_CAP_CTL 0x10
@@ -249,6 +253,12 @@
/* Revision ID register (8 bit) */
#define B53_REV_ID 0x40
+/* Broadcom header RX control (16 bit) */
+#define B53_BRCM_HDR_RX_DIS 0x60
+
+/* Broadcom header TX control (16 bit) */
+#define B53_BRCM_HDR_TX_DIS 0x62
+
/*************************************************************************
* ARL Access Page Registers
*************************************************************************/
@@ -465,6 +475,44 @@
#define JMS_MAX_SIZE 9724
/*************************************************************************
+ * EEE Configuration Page Registers
+ *************************************************************************/
+
+/* EEE Enable control register (16 bit) */
+#define B53_EEE_EN_CTRL 0x00
+
+/* EEE LPI assert status register (16 bit) */
+#define B53_EEE_LPI_ASSERT_STS 0x02
+
+/* EEE LPI indicate status register (16 bit) */
+#define B53_EEE_LPI_INDICATE 0x4
+
+/* EEE Receiving idle symbols status register (16 bit) */
+#define B53_EEE_RX_IDLE_SYM_STS 0x6
+
+/* EEE Pipeline timer register (32 bit) */
+#define B53_EEE_PIP_TIMER 0xC
+
+/* EEE Sleep timer Gig register (32 bit) */
+#define B53_EEE_SLEEP_TIMER_GIG(i) (0x10 + 4 * (i))
+
+/* EEE Sleep timer FE register (32 bit) */
+#define B53_EEE_SLEEP_TIMER_FE(i) (0x34 + 4 * (i))
+
+/* EEE Minimum LP timer Gig register (32 bit) */
+#define B53_EEE_MIN_LP_TIMER_GIG(i) (0x58 + 4 * (i))
+
+/* EEE Minimum LP timer FE register (32 bit) */
+#define B53_EEE_MIN_LP_TIMER_FE(i) (0x7c + 4 * (i))
+
+/* EEE Wake timer Gig register (16 bit) */
+#define B53_EEE_WAKE_TIMER_GIG(i) (0xa0 + 2 * (i))
+
+/* EEE Wake timer FE register (16 bit) */
+#define B53_EEE_WAKE_TIMER_FE(i) (0xb2 + 2 * (i))
+
+
+/*************************************************************************
* CFP Configuration Page Registers
*************************************************************************/
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index d7b53d53c116..ea01f24f15e7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -35,71 +35,12 @@
#include "b53/b53_priv.h"
#include "b53/b53_regs.h"
-static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds)
+static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds,
+ int port)
{
return DSA_TAG_PROTO_BRCM;
}
-static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
-{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- unsigned int i;
- u32 reg;
-
- /* Enable the IMP Port to be in the same VLAN as the other ports
- * on a per-port basis such that we only have Port i and IMP in
- * the same VLAN.
- */
- for (i = 0; i < priv->hw_params.num_ports; i++) {
- if (!((1 << i) & ds->enabled_port_mask))
- continue;
-
- reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
- reg |= (1 << cpu_port);
- core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
- }
-}
-
-static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port)
-{
- u32 reg, val;
-
- /* Resolve which bit controls the Broadcom tag */
- switch (port) {
- case 8:
- val = BRCM_HDR_EN_P8;
- break;
- case 7:
- val = BRCM_HDR_EN_P7;
- break;
- case 5:
- val = BRCM_HDR_EN_P5;
- break;
- default:
- val = 0;
- break;
- }
-
- /* Enable Broadcom tags for IMP port */
- reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
- reg |= val;
- core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
-
- /* Enable reception Broadcom tag for CPU TX (switch RX) to
- * allow us to tag outgoing frames
- */
- reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
- reg &= ~(1 << port);
- core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
-
- /* Enable transmission of Broadcom tags from the switch (CPU RX) to
- * allow delivering frames to the per-port net_devices
- */
- reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
- reg &= ~(1 << port);
- core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
-}
-
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -138,7 +79,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
reg |= i << (PRT_TO_QID_SHIFT * i);
core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
- bcm_sf2_brcm_hdr_setup(priv, port);
+ b53_brcm_hdr_setup(ds, port);
/* Force link status for IMP port */
reg = core_readl(priv, offset);
@@ -146,19 +87,6 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
core_writel(priv, reg, offset);
}
-static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
-{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- u32 reg;
-
- reg = core_readl(priv, CORE_EEE_EN_CTRL);
- if (enable)
- reg |= 1 << port;
- else
- reg &= ~(1 << port);
- core_writel(priv, reg, CORE_EEE_EN_CTRL);
-}
-
static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -167,7 +95,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
reg = reg_readl(priv, REG_SPHY_CNTRL);
if (enable) {
reg |= PHY_RESET;
- reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
+ reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
reg_writel(priv, reg, REG_SPHY_CNTRL);
udelay(21);
reg = reg_readl(priv, REG_SPHY_CNTRL);
@@ -236,7 +164,6 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->dst->cpu_dp->index;
unsigned int i;
u32 reg;
@@ -247,7 +174,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
/* Enable Broadcom tags for that port if requested */
if (priv->brcm_tag_mask & BIT(port))
- bcm_sf2_brcm_hdr_setup(priv, port);
+ b53_brcm_hdr_setup(ds, port);
/* Configure Traffic Class to QoS mapping, allow each priority to map
* to a different queue number
@@ -257,9 +184,6 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg |= i << (PRT_TO_QID_SHIFT * i);
core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
- /* Clear the Rx and Tx disable bits and set to no spanning tree */
- core_writel(priv, 0, CORE_G_PCTL_PORT(port));
-
/* Re-enable the GPHY and re-apply workarounds */
if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
bcm_sf2_gphy_enable_set(ds, true);
@@ -282,23 +206,20 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
if (port == priv->moca_port)
bcm_sf2_port_intr_enable(priv, port);
- /* Set this port, and only this one to be in the default VLAN,
- * if member of a bridge, restore its membership prior to
- * bringing down this port.
- */
- reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
- reg &= ~PORT_VLAN_CTRL_MASK;
- reg |= (1 << port);
- reg |= priv->dev->ports[port].vlan_ctl_mask;
- core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
-
- bcm_sf2_imp_vlan_setup(ds, cpu_port);
-
- /* If EEE was enabled, restore it */
- if (priv->port_sts[port].eee.eee_enabled)
- bcm_sf2_eee_enable_set(ds, port, true);
+ /* Set per-queue pause threshold to 32 */
+ core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
+
+ /* Set ACB threshold to 24 */
+ for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
+ reg = acb_readl(priv, ACB_QUEUE_CFG(port *
+ SF2_NUM_EGRESS_QUEUES + i));
+ reg &= ~XOFF_THRESHOLD_MASK;
+ reg |= 24;
+ acb_writel(priv, reg, ACB_QUEUE_CFG(port *
+ SF2_NUM_EGRESS_QUEUES + i));
+ }
- return 0;
+ return b53_enable_port(ds, port, phy);
}
static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
@@ -321,9 +242,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
else
off = CORE_G_PCTL_PORT(port);
- reg = core_readl(priv, off);
- reg |= RX_DIS | TX_DIS;
- core_writel(priv, reg, off);
+ b53_disable_port(ds, port, phy);
/* Power down the port memory */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
@@ -331,47 +250,6 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
}
-/* Returns 0 if EEE was not enabled, or 1 otherwise
- */
-static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-{
- int ret;
-
- ret = phy_init_eee(phy, 0);
- if (ret)
- return 0;
-
- bcm_sf2_eee_enable_set(ds, port, true);
-
- return 1;
-}
-
-static int bcm_sf2_sw_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
-{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_eee *p = &priv->port_sts[port].eee;
- u32 reg;
-
- reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
- e->eee_enabled = p->eee_enabled;
- e->eee_active = !!(reg & (1 << port));
-
- return 0;
-}
-
-static int bcm_sf2_sw_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
-{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_eee *p = &priv->port_sts[port].eee;
-
- p->eee_enabled = e->eee_enabled;
- bcm_sf2_eee_enable_set(ds, port, e->eee_enabled);
-
- return 0;
-}
static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
int regnum, u16 val)
@@ -606,7 +484,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_eee *p = &priv->port_sts[port].eee;
+ struct ethtool_eee *p = &priv->dev->ports[port].eee;
u32 id_mode_dis = 0, port_mode;
const char *str = NULL;
u32 reg, offset;
@@ -688,7 +566,7 @@ force_link:
core_writel(priv, reg, offset);
if (!phydev->is_pseudo_fixed_link)
- p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
+ p->eee_enabled = b53_eee_init(ds, port, phydev);
}
static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
@@ -724,7 +602,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
* state machine and make it go in PHY_FORCING state instead.
*/
if (!status->link)
- netif_carrier_off(ds->ports[port].netdev);
+ netif_carrier_off(ds->ports[port].slave);
status->duplex = 1;
} else {
status->link = 1;
@@ -749,6 +627,20 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
status->pause = 1;
}
+static void bcm_sf2_enable_acb(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 reg;
+
+ /* Enable ACB globally */
+ reg = acb_readl(priv, ACB_CONTROL);
+ reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
+ acb_writel(priv, reg, ACB_CONTROL);
+ reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
+ reg |= ACB_EN | ACB_ALGORITHM;
+ acb_writel(priv, reg, ACB_CONTROL);
+}
+
static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -761,8 +653,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
* bcm_sf2_sw_setup
*/
for (port = 0; port < DSA_MAX_PORTS; port++) {
- if ((1 << port) & ds->enabled_port_mask ||
- dsa_is_cpu_port(ds, port))
+ if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
bcm_sf2_port_disable(ds, port, NULL);
}
@@ -785,19 +676,21 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
bcm_sf2_gphy_enable_set(ds, true);
for (port = 0; port < DSA_MAX_PORTS; port++) {
- if ((1 << port) & ds->enabled_port_mask)
+ if (dsa_is_user_port(ds, port))
bcm_sf2_port_setup(ds, port, NULL);
else if (dsa_is_cpu_port(ds, port))
bcm_sf2_imp_setup(ds, port);
}
+ bcm_sf2_enable_acb(ds);
+
return 0;
}
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->dst->cpu_dp->netdev;
+ struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol;
@@ -820,9 +713,9 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = ds->dst->cpu_dp->netdev;
+ struct net_device *p = ds->ports[port].cpu_dp->master;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- s8 cpu_port = ds->dst->cpu_dp->index;
+ s8 cpu_port = ds->ports[port].cpu_dp->index;
struct ethtool_wolinfo pwol;
p->ethtool_ops->get_wol(p, &pwol);
@@ -846,45 +739,6 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
return p->ethtool_ops->set_wol(p, wol);
}
-static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
-{
- unsigned int timeout = 10;
- u32 reg;
-
- do {
- reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
- if (!(reg & ARLA_VTBL_STDN))
- return 0;
-
- usleep_range(1000, 2000);
- } while (timeout--);
-
- return -ETIMEDOUT;
-}
-
-static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
-{
- core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
-
- return bcm_sf2_vlan_op_wait(priv);
-}
-
-static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
-{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- unsigned int port;
-
- /* Clear all VLANs */
- bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
-
- for (port = 0; port < priv->hw_params.num_ports; port++) {
- if (!((1 << port) & ds->enabled_port_mask))
- continue;
-
- core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
- }
-}
-
static int bcm_sf2_sw_setup(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -893,7 +747,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
/* Enable all valid ports and disable those unused */
for (port = 0; port < priv->hw_params.num_ports; port++) {
/* IMP port receives special treatment */
- if ((1 << port) & ds->enabled_port_mask)
+ if (dsa_is_user_port(ds, port))
bcm_sf2_port_setup(ds, port, NULL);
else if (dsa_is_cpu_port(ds, port))
bcm_sf2_imp_setup(ds, port);
@@ -901,7 +755,8 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
bcm_sf2_port_disable(ds, port, NULL);
}
- bcm_sf2_sw_configure_vlan(ds);
+ b53_configure_vlan(ds);
+ bcm_sf2_enable_acb(ds);
return 0;
}
@@ -1020,8 +875,8 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.set_wol = bcm_sf2_sw_set_wol,
.port_enable = bcm_sf2_port_setup,
.port_disable = bcm_sf2_port_disable,
- .get_mac_eee = bcm_sf2_sw_get_mac_eee,
- .set_mac_eee = bcm_sf2_sw_set_mac_eee,
+ .get_mac_eee = b53_get_mac_eee,
+ .set_mac_eee = b53_set_mac_eee,
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_stp_state_set = b53_br_set_stp_state,
@@ -1173,6 +1028,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
* permanently used
*/
set_bit(0, priv->cfp.used);
+ set_bit(0, priv->cfp.unique);
bcm_sf2_identify_ports(priv, dn->child);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 02c499f9c56b..cc31e986e6e3 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -48,14 +48,13 @@ struct bcm_sf2_hw_params {
struct bcm_sf2_port_status {
unsigned int link;
-
- struct ethtool_eee eee;
};
struct bcm_sf2_cfp_priv {
/* Mutex protecting concurrent accesses to the CFP registers */
struct mutex lock;
DECLARE_BITMAP(used, CFP_NUM_RULES);
+ DECLARE_BITMAP(unique, CFP_NUM_RULES);
unsigned int rules_cnt;
};
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 8a1da7e67707..b721a2009b50 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -20,37 +20,102 @@
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
-struct cfp_udf_layout {
- u8 slices[UDF_NUM_SLICES];
+struct cfp_udf_slice_layout {
+ u8 slices[UDFS_PER_SLICE];
u32 mask_value;
+ u32 base_offset;
+};
+struct cfp_udf_layout {
+ struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
};
+static const u8 zero_slice[UDFS_PER_SLICE] = { };
+
/* UDF slices layout for a TCPv4/UDPv4 specification */
static const struct cfp_udf_layout udf_tcpip4_layout = {
- .slices = {
- /* End of L2, byte offset 12, src IP[0:15] */
- CFG_UDF_EOL2 | 6,
- /* End of L2, byte offset 14, src IP[16:31] */
- CFG_UDF_EOL2 | 7,
- /* End of L2, byte offset 16, dst IP[0:15] */
- CFG_UDF_EOL2 | 8,
- /* End of L2, byte offset 18, dst IP[16:31] */
- CFG_UDF_EOL2 | 9,
- /* End of L3, byte offset 0, src port */
- CFG_UDF_EOL3 | 0,
- /* End of L3, byte offset 2, dst port */
- CFG_UDF_EOL3 | 1,
- 0, 0, 0
+ .udfs = {
+ [1] = {
+ .slices = {
+ /* End of L2, byte offset 12, src IP[0:15] */
+ CFG_UDF_EOL2 | 6,
+ /* End of L2, byte offset 14, src IP[16:31] */
+ CFG_UDF_EOL2 | 7,
+ /* End of L2, byte offset 16, dst IP[0:15] */
+ CFG_UDF_EOL2 | 8,
+ /* End of L2, byte offset 18, dst IP[16:31] */
+ CFG_UDF_EOL2 | 9,
+ /* End of L3, byte offset 0, src port */
+ CFG_UDF_EOL3 | 0,
+ /* End of L3, byte offset 2, dst port */
+ CFG_UDF_EOL3 | 1,
+ 0, 0, 0
+ },
+ .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
+ .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
+ },
+ },
+};
+
+/* UDF slices layout for a TCPv6/UDPv6 specification */
+static const struct cfp_udf_layout udf_tcpip6_layout = {
+ .udfs = {
+ [0] = {
+ .slices = {
+ /* End of L2, byte offset 8, src IP[0:15] */
+ CFG_UDF_EOL2 | 4,
+ /* End of L2, byte offset 10, src IP[16:31] */
+ CFG_UDF_EOL2 | 5,
+ /* End of L2, byte offset 12, src IP[32:47] */
+ CFG_UDF_EOL2 | 6,
+ /* End of L2, byte offset 14, src IP[48:63] */
+ CFG_UDF_EOL2 | 7,
+ /* End of L2, byte offset 16, src IP[64:79] */
+ CFG_UDF_EOL2 | 8,
+ /* End of L2, byte offset 18, src IP[80:95] */
+ CFG_UDF_EOL2 | 9,
+ /* End of L2, byte offset 20, src IP[96:111] */
+ CFG_UDF_EOL2 | 10,
+ /* End of L2, byte offset 22, src IP[112:127] */
+ CFG_UDF_EOL2 | 11,
+ /* End of L3, byte offset 0, src port */
+ CFG_UDF_EOL3 | 0,
+ },
+ .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
+ .base_offset = CORE_UDF_0_B_0_8_PORT_0,
+ },
+ [3] = {
+ .slices = {
+ /* End of L2, byte offset 24, dst IP[0:15] */
+ CFG_UDF_EOL2 | 12,
+ /* End of L2, byte offset 26, dst IP[16:31] */
+ CFG_UDF_EOL2 | 13,
+ /* End of L2, byte offset 28, dst IP[32:47] */
+ CFG_UDF_EOL2 | 14,
+ /* End of L2, byte offset 30, dst IP[48:63] */
+ CFG_UDF_EOL2 | 15,
+ /* End of L2, byte offset 32, dst IP[64:79] */
+ CFG_UDF_EOL2 | 16,
+ /* End of L2, byte offset 34, dst IP[80:95] */
+ CFG_UDF_EOL2 | 17,
+ /* End of L2, byte offset 36, dst IP[96:111] */
+ CFG_UDF_EOL2 | 18,
+ /* End of L2, byte offset 38, dst IP[112:127] */
+ CFG_UDF_EOL2 | 19,
+ /* End of L3, byte offset 2, dst port */
+ CFG_UDF_EOL3 | 1,
+ },
+ .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
+ .base_offset = CORE_UDF_0_D_0_11_PORT_0,
+ },
},
- .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
};
static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
{
unsigned int i, count = 0;
- for (i = 0; i < UDF_NUM_SLICES; i++) {
+ for (i = 0; i < UDFS_PER_SLICE; i++) {
if (layout[i] != 0)
count++;
}
@@ -58,15 +123,42 @@ static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
return count;
}
+static inline u32 udf_upper_bits(unsigned int num_udf)
+{
+ return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
+}
+
+static inline u32 udf_lower_bits(unsigned int num_udf)
+{
+ return (u8)GENMASK(num_udf - 1, 0);
+}
+
+static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
+ unsigned int start)
+{
+ const struct cfp_udf_slice_layout *slice_layout;
+ unsigned int slice_idx;
+
+ for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
+ slice_layout = &l->udfs[slice_idx];
+ if (memcmp(slice_layout->slices, zero_slice,
+ sizeof(zero_slice)))
+ break;
+ }
+
+ return slice_idx;
+}
+
static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
- unsigned int slice_num,
- const u8 *layout)
+ const struct cfp_udf_layout *layout,
+ unsigned int slice_num)
{
- u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET;
+ u32 offset = layout->udfs[slice_num].base_offset;
unsigned int i;
- for (i = 0; i < UDF_NUM_SLICES; i++)
- core_writel(priv, layout[i], offset + i * 4);
+ for (i = 0; i < UDFS_PER_SLICE; i++)
+ core_writel(priv, layout->udfs[slice_num].slices[i],
+ offset + i * 4);
}
static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
@@ -112,69 +204,177 @@ static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
return priv->num_cfp_rules - 1;
}
-static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
- struct ethtool_rx_flow_spec *fs)
+static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
+ unsigned int rule_index,
+ unsigned int port_num,
+ unsigned int queue_num,
+ bool fwd_map_change)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_tcpip4_spec *v4_spec;
- const struct cfp_udf_layout *layout;
- unsigned int slice_num, rule_index;
- unsigned int queue_num, port_num;
- u8 ip_proto, ip_frag;
- u8 num_udf;
- u32 reg;
int ret;
+ u32 reg;
- /* Check for unsupported extensions */
- if ((fs->flow_type & FLOW_EXT) &&
- (fs->m_ext.vlan_etype || fs->m_ext.data[1]))
- return -EINVAL;
+ /* Replace ARL derived destination with DST_MAP derived, define
+ * which port and queue this should be forwarded to.
+ */
+ if (fwd_map_change)
+ reg = CHANGE_FWRD_MAP_IB_REP_ARL |
+ BIT(port_num + DST_MAP_IB_SHIFT) |
+ CHANGE_TC | queue_num << NEW_TC_SHIFT;
+ else
+ reg = 0;
- if (fs->location != RX_CLS_LOC_ANY &&
- test_bit(fs->location, priv->cfp.used))
- return -EBUSY;
+ core_writel(priv, reg, CORE_ACT_POL_DATA0);
- if (fs->location != RX_CLS_LOC_ANY &&
- fs->location > bcm_sf2_cfp_rule_size(priv))
- return -EINVAL;
+ /* Set classification ID that needs to be put in Broadcom tag */
+ core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
- ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+ core_writel(priv, 0, CORE_ACT_POL_DATA2);
- /* We do not support discarding packets, check that the
- * destination port is enabled and that we are within the
- * number of ports supported by the switch
+ /* Configure policer RAM now */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
+ if (ret) {
+ pr_err("Policer entry at %d failed\n", rule_index);
+ return ret;
+ }
+
+ /* Disable the policer */
+ core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
+
+ /* Now the rate meter */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
+ if (ret) {
+ pr_err("Meter entry at %d failed\n", rule_index);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
+ struct ethtool_tcpip4_spec *v4_spec,
+ unsigned int slice_num,
+ bool mask)
+{
+ u32 reg, offset;
+
+ /* C-Tag [31:24]
+ * UDF_n_A8 [23:8]
+ * UDF_n_A7 [7:0]
*/
- port_num = fs->ring_cookie / 8;
+ reg = 0;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(4);
+ else
+ offset = CORE_CFP_DATA_PORT(4);
+ core_writel(priv, reg, offset);
- if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
- !(BIT(port_num) & ds->enabled_port_mask) ||
- port_num >= priv->hw_params.num_ports)
- return -EINVAL;
+ /* UDF_n_A7 [31:24]
+ * UDF_n_A6 [23:8]
+ * UDF_n_A5 [7:0]
+ */
+ reg = be16_to_cpu(v4_spec->pdst) >> 8;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(3);
+ else
+ offset = CORE_CFP_DATA_PORT(3);
+ core_writel(priv, reg, offset);
+
+ /* UDF_n_A5 [31:24]
+ * UDF_n_A4 [23:8]
+ * UDF_n_A3 [7:0]
+ */
+ reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
+ (u32)be16_to_cpu(v4_spec->psrc) << 8 |
+ (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(2);
+ else
+ offset = CORE_CFP_DATA_PORT(2);
+ core_writel(priv, reg, offset);
+
+ /* UDF_n_A3 [31:24]
+ * UDF_n_A2 [23:8]
+ * UDF_n_A1 [7:0]
+ */
+ reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
+ (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
+ (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(1);
+ else
+ offset = CORE_CFP_DATA_PORT(1);
+ core_writel(priv, reg, offset);
+
+ /* UDF_n_A1 [31:24]
+ * UDF_n_A0 [23:8]
+ * Reserved [7:4]
+ * Slice ID [3:2]
+ * Slice valid [1:0]
+ */
+ reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
+ (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
+ SLICE_NUM(slice_num) | SLICE_VALID;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(0);
+ else
+ offset = CORE_CFP_DATA_PORT(0);
+ core_writel(priv, reg, offset);
+}
+
+static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
+ unsigned int port_num,
+ unsigned int queue_num,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
+ const struct cfp_udf_layout *layout;
+ unsigned int slice_num, rule_index;
+ u8 ip_proto, ip_frag;
+ u8 num_udf;
+ u32 reg;
+ int ret;
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
ip_proto = IPPROTO_TCP;
v4_spec = &fs->h_u.tcp_ip4_spec;
+ v4_m_spec = &fs->m_u.tcp_ip4_spec;
break;
case UDP_V4_FLOW:
ip_proto = IPPROTO_UDP;
v4_spec = &fs->h_u.udp_ip4_spec;
+ v4_m_spec = &fs->m_u.udp_ip4_spec;
break;
default:
return -EINVAL;
}
- /* We only use one UDF slice for now */
- slice_num = 1;
+ ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+
+ /* Locate the first rule available */
+ if (fs->location == RX_CLS_LOC_ANY)
+ rule_index = find_first_zero_bit(priv->cfp.used,
+ bcm_sf2_cfp_rule_size(priv));
+ else
+ rule_index = fs->location;
+
layout = &udf_tcpip4_layout;
- num_udf = bcm_sf2_get_num_udf_slices(layout->slices);
+ /* We only use one UDF slice for now */
+ slice_num = bcm_sf2_get_slice_number(layout, 0);
+ if (slice_num == UDF_NUM_SLICES)
+ return -EINVAL;
+
+ num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
/* Apply the UDF layout for this filter */
- bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices);
+ bcm_sf2_cfp_udf_set(priv, layout, slice_num);
/* Apply to all packets received through this port */
core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
+ /* Source port map match */
+ core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
+
/* S-Tag status [31:30]
* C-Tag status [29:28]
* L2 framing [27:26]
@@ -189,143 +389,398 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
* Reserved [1]
* UDF_Valid[8] [0]
*/
- core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7,
+ core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
+ ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
+ udf_upper_bits(num_udf),
CORE_CFP_DATA_PORT(6));
+ /* Mask with the specific layout for IPv4 packets */
+ core_writel(priv, layout->udfs[slice_num].mask_value |
+ udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
+
/* UDF_Valid[7:0] [31:24]
* S-Tag [23:8]
* C-Tag [7:0]
*/
- core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5));
+ core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
+
+ /* Mask all but valid UDFs */
+ core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
+
+ /* Program the match and the mask */
+ bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false);
+ bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true);
+
+ /* Insert into TCAM now */
+ bcm_sf2_cfp_rule_addr_set(priv, rule_index);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+ if (ret) {
+ pr_err("TCAM entry at addr %d failed\n", rule_index);
+ return ret;
+ }
+
+ /* Insert into Action and policer RAMs now */
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num,
+ queue_num, true);
+ if (ret)
+ return ret;
+
+ /* Turn on CFP for this rule now */
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ /* Flag the rule as being used and return it */
+ set_bit(rule_index, priv->cfp.used);
+ set_bit(rule_index, priv->cfp.unique);
+ fs->location = rule_index;
+
+ return 0;
+}
+
+static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
+ const __be32 *ip6_addr, const __be16 port,
+ unsigned int slice_num,
+ bool mask)
+{
+ u32 reg, tmp, val, offset;
/* C-Tag [31:24]
- * UDF_n_A8 [23:8]
- * UDF_n_A7 [7:0]
+ * UDF_n_B8 [23:8] (port)
+ * UDF_n_B7 (upper) [7:0] (addr[15:8])
*/
- core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
+ reg = be32_to_cpu(ip6_addr[3]);
+ val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(4);
+ else
+ offset = CORE_CFP_DATA_PORT(4);
+ core_writel(priv, val, offset);
- /* UDF_n_A7 [31:24]
- * UDF_n_A6 [23:8]
- * UDF_n_A5 [7:0]
+ /* UDF_n_B7 (lower) [31:24] (addr[7:0])
+ * UDF_n_B6 [23:8] (addr[31:16])
+ * UDF_n_B5 (upper) [7:0] (addr[47:40])
*/
- core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
- CORE_CFP_DATA_PORT(3));
+ tmp = be32_to_cpu(ip6_addr[2]);
+ val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
+ ((tmp >> 8) & 0xff);
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(3);
+ else
+ offset = CORE_CFP_DATA_PORT(3);
+ core_writel(priv, val, offset);
- /* UDF_n_A5 [31:24]
- * UDF_n_A4 [23:8]
- * UDF_n_A3 [7:0]
+ /* UDF_n_B5 (lower) [31:24] (addr[39:32])
+ * UDF_n_B4 [23:8] (addr[63:48])
+ * UDF_n_B3 (upper) [7:0] (addr[79:72])
*/
- reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
- (u32)be16_to_cpu(v4_spec->psrc) << 8 |
- (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
- core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
+ reg = be32_to_cpu(ip6_addr[1]);
+ val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
+ ((reg >> 8) & 0xff);
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(2);
+ else
+ offset = CORE_CFP_DATA_PORT(2);
+ core_writel(priv, val, offset);
- /* UDF_n_A3 [31:24]
- * UDF_n_A2 [23:8]
- * UDF_n_A1 [7:0]
+ /* UDF_n_B3 (lower) [31:24] (addr[71:64])
+ * UDF_n_B2 [23:8] (addr[95:80])
+ * UDF_n_B1 (upper) [7:0] (addr[111:104])
*/
- reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
- (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
- (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
- core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
+ tmp = be32_to_cpu(ip6_addr[0]);
+ val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
+ ((tmp >> 8) & 0xff);
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(1);
+ else
+ offset = CORE_CFP_DATA_PORT(1);
+ core_writel(priv, val, offset);
- /* UDF_n_A1 [31:24]
- * UDF_n_A0 [23:8]
+ /* UDF_n_B1 (lower) [31:24] (addr[103:96])
+ * UDF_n_B0 [23:8] (addr[127:112])
* Reserved [7:4]
* Slice ID [3:2]
* Slice valid [1:0]
*/
- reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
- (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
- SLICE_NUM(slice_num) | SLICE_VALID;
- core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
+ reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
+ SLICE_NUM(slice_num) | SLICE_VALID;
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(0);
+ else
+ offset = CORE_CFP_DATA_PORT(0);
+ core_writel(priv, reg, offset);
+}
- /* Source port map match */
- core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
+static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
+ unsigned int port_num,
+ unsigned int queue_num,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
+ unsigned int slice_num, rule_index[2];
+ const struct cfp_udf_layout *layout;
+ u8 ip_proto, ip_frag;
+ int ret = 0;
+ u8 num_udf;
+ u32 reg;
- /* Mask with the specific layout for IPv4 packets */
- core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6));
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V6_FLOW:
+ ip_proto = IPPROTO_TCP;
+ v6_spec = &fs->h_u.tcp_ip6_spec;
+ v6_m_spec = &fs->m_u.tcp_ip6_spec;
+ break;
+ case UDP_V6_FLOW:
+ ip_proto = IPPROTO_UDP;
+ v6_spec = &fs->h_u.udp_ip6_spec;
+ v6_m_spec = &fs->m_u.udp_ip6_spec;
+ break;
+ default:
+ return -EINVAL;
+ }
- /* Mask all but valid UDFs */
- core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5));
+ ip_frag = be32_to_cpu(fs->m_ext.data[0]);
- /* Mask all */
- core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
+ layout = &udf_tcpip6_layout;
+ slice_num = bcm_sf2_get_slice_number(layout, 0);
+ if (slice_num == UDF_NUM_SLICES)
+ return -EINVAL;
- /* All other UDFs should be matched with the filter */
- core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
- core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
- core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
- core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
+ num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
- /* Locate the first rule available */
+ /* Negotiate two indexes, one for the second half which we are chained
+ * from, which is what we will return to user-space, and a second one
+ * which is used to store its first half. That first half does not
+ * allow any choice of placement, so it just needs to find the next
+ * available bit. We return the second half as fs->location because
+ * that helps with the rule lookup later on since the second half is
+ * chained from its first half, we can easily identify IPv6 CFP rules
+ * by looking whether they carry a CHAIN_ID.
+ *
+ * We also want the second half to have a lower rule_index than its
+ * first half because the HW search is by incrementing addresses.
+ */
if (fs->location == RX_CLS_LOC_ANY)
- rule_index = find_first_zero_bit(priv->cfp.used,
- bcm_sf2_cfp_rule_size(priv));
+ rule_index[0] = find_first_zero_bit(priv->cfp.used,
+ bcm_sf2_cfp_rule_size(priv));
else
- rule_index = fs->location;
+ rule_index[0] = fs->location;
- /* Insert into TCAM now */
- bcm_sf2_cfp_rule_addr_set(priv, rule_index);
+ /* Flag it as used (cleared on error path) such that we can immediately
+ * obtain a second one to chain from.
+ */
+ set_bit(rule_index[0], priv->cfp.used);
- ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
- if (ret) {
- pr_err("TCAM entry at addr %d failed\n", rule_index);
- return ret;
+ rule_index[1] = find_first_zero_bit(priv->cfp.used,
+ bcm_sf2_cfp_rule_size(priv));
+ if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) {
+ ret = -ENOSPC;
+ goto out_err;
}
- /* Replace ARL derived destination with DST_MAP derived, define
- * which port and queue this should be forwarded to.
- *
- * We have a small oddity where Port 6 just does not have a
- * valid bit here (so we subtract by one).
+ /* Apply the UDF layout for this filter */
+ bcm_sf2_cfp_udf_set(priv, layout, slice_num);
+
+ /* Apply to all packets received through this port */
+ core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
+
+ /* Source port map match */
+ core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
+
+ /* S-Tag status [31:30]
+ * C-Tag status [29:28]
+ * L2 framing [27:26]
+ * L3 framing [25:24]
+ * IP ToS [23:16]
+ * IP proto [15:08]
+ * IP Fragm [7]
+ * Non 1st frag [6]
+ * IP Authen [5]
+ * TTL range [4:3]
+ * PPPoE session [2]
+ * Reserved [1]
+ * UDF_Valid[8] [0]
*/
- queue_num = fs->ring_cookie % 8;
- if (port_num >= 7)
- port_num -= 1;
+ reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
+ ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
- reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
- CHANGE_TC | queue_num << NEW_TC_SHIFT;
+ /* Mask with the specific layout for IPv6 packets including
+ * UDF_Valid[8]
+ */
+ reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
- core_writel(priv, reg, CORE_ACT_POL_DATA0);
+ /* UDF_Valid[7:0] [31:24]
+ * S-Tag [23:8]
+ * C-Tag [7:0]
+ */
+ core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
- /* Set classification ID that needs to be put in Broadcom tag */
- core_writel(priv, rule_index << CHAIN_ID_SHIFT,
- CORE_ACT_POL_DATA1);
+ /* Mask all but valid UDFs */
+ core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
- core_writel(priv, 0, CORE_ACT_POL_DATA2);
+ /* Slice the IPv6 source address and port */
+ bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
+ slice_num, false);
+ bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
+ slice_num, true);
- /* Configure policer RAM now */
- ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
+ /* Insert into TCAM now because we need to insert a second rule */
+ bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
- pr_err("Policer entry at %d failed\n", rule_index);
- return ret;
+ pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
+ goto out_err;
}
- /* Disable the policer */
- core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
+ /* Insert into Action and policer RAMs now */
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
+ queue_num, false);
+ if (ret)
+ goto out_err;
- /* Now the rate meter */
- ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
+ /* Now deal with the second slice to chain this rule */
+ slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
+ if (slice_num == UDF_NUM_SLICES) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
+
+ /* Apply the UDF layout for this filter */
+ bcm_sf2_cfp_udf_set(priv, layout, slice_num);
+
+ /* Chained rule, source port match is coming from the rule we are
+ * chained from.
+ */
+ core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
+ core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
+
+ /*
+ * CHAIN ID [31:24] chain to previous slice
+ * Reserved [23:20]
+ * UDF_Valid[11:8] [19:16]
+ * UDF_Valid[7:0] [15:8]
+ * UDF_n_D11 [7:0]
+ */
+ reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
+ udf_lower_bits(num_udf) << 8;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
+
+ /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
+ reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
+ udf_lower_bits(num_udf) << 8;
+ core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
+
+ /* Don't care */
+ core_writel(priv, 0, CORE_CFP_DATA_PORT(5));
+
+ /* Mask all */
+ core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
+
+ bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num,
+ false);
+ bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst,
+ SLICE_NUM_MASK, true);
+
+ /* Insert into TCAM now */
+ bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
if (ret) {
- pr_err("Meter entry at %d failed\n", rule_index);
- return ret;
+ pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
+ goto out_err;
}
+ /* Insert into Action and policer RAMs now, set chain ID to
+ * the one we are chained to
+ */
+ ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
+ queue_num, true);
+ if (ret)
+ goto out_err;
+
/* Turn on CFP for this rule now */
reg = core_readl(priv, CORE_CFP_CTL_REG);
reg |= BIT(port);
core_writel(priv, reg, CORE_CFP_CTL_REG);
- /* Flag the rule as being used and return it */
- set_bit(rule_index, priv->cfp.used);
- fs->location = rule_index;
+ /* Flag the second half rule as being used now, return it as the
+ * location, and flag it as unique while dumping rules
+ */
+ set_bit(rule_index[1], priv->cfp.used);
+ set_bit(rule_index[1], priv->cfp.unique);
+ fs->location = rule_index[1];
- return 0;
+ return ret;
+
+out_err:
+ clear_bit(rule_index[0], priv->cfp.used);
+ return ret;
}
-static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
- u32 loc)
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int queue_num, port_num;
+ int ret = -EINVAL;
+
+ /* Check for unsupported extensions */
+ if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
+ fs->m_ext.data[1]))
+ return -EINVAL;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ test_bit(fs->location, priv->cfp.used))
+ return -EBUSY;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
+ return -EINVAL;
+
+ /* We do not support discarding packets, check that the
+ * destination port is enabled and that we are within the
+ * number of ports supported by the switch
+ */
+ port_num = fs->ring_cookie / SF2_NUM_EGRESS_QUEUES;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
+ !dsa_is_user_port(ds, port_num) ||
+ port_num >= priv->hw_params.num_ports)
+ return -EINVAL;
+ /*
+ * We have a small oddity where Port 6 just does not have a
+ * valid bit here (so we substract by one).
+ */
+ queue_num = fs->ring_cookie % SF2_NUM_EGRESS_QUEUES;
+ if (port_num >= 7)
+ port_num -= 1;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
+ queue_num, fs);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
+ queue_num, fs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
+ u32 loc, u32 *next_loc)
{
int ret;
u32 reg;
@@ -341,6 +796,14 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
if (ret)
return ret;
+ /* Check if this is possibly an IPv6 rule that would
+ * indicate we need to delete its companion rule
+ * as well
+ */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+ if (next_loc)
+ *next_loc = (reg >> 24) & CHAIN_ID_MASK;
+
/* Clear its valid bits */
reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
reg &= ~SLICE_VALID;
@@ -352,10 +815,28 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
return ret;
clear_bit(loc, priv->cfp.used);
+ clear_bit(loc, priv->cfp.unique);
return 0;
}
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
+ u32 loc)
+{
+ u32 next_loc = 0;
+ int ret;
+
+ ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
+ if (ret)
+ return ret;
+
+ /* If this was an IPv6 rule, delete is companion rule too */
+ if (next_loc)
+ ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
+
+ return ret;
+}
+
static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
{
unsigned int i;
@@ -369,93 +850,63 @@ static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
-static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rxnfc *nfc, bool search)
+static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
+ struct ethtool_tcpip4_spec *v4_spec,
+ bool mask)
{
- struct ethtool_tcpip4_spec *v4_spec;
- unsigned int queue_num;
+ u32 reg, offset, ipv4;
u16 src_dst_port;
- u32 reg, ipv4;
- int ret;
-
- if (!search) {
- bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
-
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
- if (ret)
- return ret;
-
- reg = core_readl(priv, CORE_ACT_POL_DATA0);
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
- if (ret)
- return ret;
- } else {
- reg = core_readl(priv, CORE_ACT_POL_DATA0);
- }
-
- /* Extract the destination port */
- nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
- DST_MAP_IB_MASK) - 1;
-
- /* There is no Port 6, so we compensate for that here */
- if (nfc->fs.ring_cookie >= 6)
- nfc->fs.ring_cookie++;
- nfc->fs.ring_cookie *= 8;
-
- /* Extract the destination queue */
- queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
- nfc->fs.ring_cookie += queue_num;
-
- /* Extract the IP protocol */
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
- switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
- case IPPROTO_TCP:
- nfc->fs.flow_type = TCP_V4_FLOW;
- v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
- break;
- case IPPROTO_UDP:
- nfc->fs.flow_type = UDP_V4_FLOW;
- v4_spec = &nfc->fs.h_u.udp_ip4_spec;
- break;
- default:
- /* Clear to exit the search process */
- if (search)
- core_readl(priv, CORE_CFP_DATA_PORT(7));
- return -EINVAL;
- }
-
- v4_spec->tos = (reg >> 16) & IPPROTO_MASK;
- nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1);
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(3);
+ else
+ offset = CORE_CFP_DATA_PORT(3);
- reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
+ reg = core_readl(priv, offset);
/* src port [15:8] */
src_dst_port = reg << 8;
- reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(2);
+ else
+ offset = CORE_CFP_DATA_PORT(2);
+
+ reg = core_readl(priv, offset);
/* src port [7:0] */
src_dst_port |= (reg >> 24);
v4_spec->pdst = cpu_to_be16(src_dst_port);
- nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
- nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
/* IPv4 dst [15:8] */
ipv4 = (reg & 0xff) << 8;
- reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
+
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(1);
+ else
+ offset = CORE_CFP_DATA_PORT(1);
+
+ reg = core_readl(priv, offset);
/* IPv4 dst [31:16] */
ipv4 |= ((reg >> 8) & 0xffff) << 16;
/* IPv4 dst [7:0] */
ipv4 |= (reg >> 24) & 0xff;
v4_spec->ip4dst = cpu_to_be32(ipv4);
- nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
/* IPv4 src [15:8] */
ipv4 = (reg & 0xff) << 8;
- reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
- if (!(reg & SLICE_VALID))
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(0);
+ else
+ offset = CORE_CFP_DATA_PORT(0);
+ reg = core_readl(priv, offset);
+
+ /* Once the TCAM is programmed, the mask reflects the slice number
+ * being matched, don't bother checking it when reading back the
+ * mask spec
+ */
+ if (!mask && !(reg & SLICE_VALID))
return -EINVAL;
/* IPv4 src [7:0] */
@@ -463,7 +914,233 @@ static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
/* IPv4 src [31:16] */
ipv4 |= ((reg >> 8) & 0xffff) << 16;
v4_spec->ip4src = cpu_to_be32(ipv4);
- nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+ return 0;
+}
+
+static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
+ u32 reg;
+ int ret;
+
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+
+ switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
+ case IPPROTO_TCP:
+ fs->flow_type = TCP_V4_FLOW;
+ v4_spec = &fs->h_u.tcp_ip4_spec;
+ v4_m_spec = &fs->m_u.tcp_ip4_spec;
+ break;
+ case IPPROTO_UDP:
+ fs->flow_type = UDP_V4_FLOW;
+ v4_spec = &fs->h_u.udp_ip4_spec;
+ v4_m_spec = &fs->m_u.udp_ip4_spec;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
+ v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
+
+ ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
+ if (ret)
+ return ret;
+
+ return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
+}
+
+static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
+ __be32 *ip6_addr, __be16 *port,
+ bool mask)
+{
+ u32 reg, tmp, offset;
+
+ /* C-Tag [31:24]
+ * UDF_n_B8 [23:8] (port)
+ * UDF_n_B7 (upper) [7:0] (addr[15:8])
+ */
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(4);
+ else
+ offset = CORE_CFP_DATA_PORT(4);
+ reg = core_readl(priv, offset);
+ *port = cpu_to_be32(reg) >> 8;
+ tmp = (u32)(reg & 0xff) << 8;
+
+ /* UDF_n_B7 (lower) [31:24] (addr[7:0])
+ * UDF_n_B6 [23:8] (addr[31:16])
+ * UDF_n_B5 (upper) [7:0] (addr[47:40])
+ */
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(3);
+ else
+ offset = CORE_CFP_DATA_PORT(3);
+ reg = core_readl(priv, offset);
+ tmp |= (reg >> 24) & 0xff;
+ tmp |= (u32)((reg >> 8) << 16);
+ ip6_addr[3] = cpu_to_be32(tmp);
+ tmp = (u32)(reg & 0xff) << 8;
+
+ /* UDF_n_B5 (lower) [31:24] (addr[39:32])
+ * UDF_n_B4 [23:8] (addr[63:48])
+ * UDF_n_B3 (upper) [7:0] (addr[79:72])
+ */
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(2);
+ else
+ offset = CORE_CFP_DATA_PORT(2);
+ reg = core_readl(priv, offset);
+ tmp |= (reg >> 24) & 0xff;
+ tmp |= (u32)((reg >> 8) << 16);
+ ip6_addr[2] = cpu_to_be32(tmp);
+ tmp = (u32)(reg & 0xff) << 8;
+
+ /* UDF_n_B3 (lower) [31:24] (addr[71:64])
+ * UDF_n_B2 [23:8] (addr[95:80])
+ * UDF_n_B1 (upper) [7:0] (addr[111:104])
+ */
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(1);
+ else
+ offset = CORE_CFP_DATA_PORT(1);
+ reg = core_readl(priv, offset);
+ tmp |= (reg >> 24) & 0xff;
+ tmp |= (u32)((reg >> 8) << 16);
+ ip6_addr[1] = cpu_to_be32(tmp);
+ tmp = (u32)(reg & 0xff) << 8;
+
+ /* UDF_n_B1 (lower) [31:24] (addr[103:96])
+ * UDF_n_B0 [23:8] (addr[127:112])
+ * Reserved [7:4]
+ * Slice ID [3:2]
+ * Slice valid [1:0]
+ */
+ if (mask)
+ offset = CORE_CFP_MASK_PORT(0);
+ else
+ offset = CORE_CFP_DATA_PORT(0);
+ reg = core_readl(priv, offset);
+ tmp |= (reg >> 24) & 0xff;
+ tmp |= (u32)((reg >> 8) << 16);
+ ip6_addr[0] = cpu_to_be32(tmp);
+
+ if (!mask && !(reg & SLICE_VALID))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rx_flow_spec *fs,
+ u32 next_loc)
+{
+ struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
+ u32 reg;
+ int ret;
+
+ /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
+ * assuming tcp_ip6_spec here being an union.
+ */
+ v6_spec = &fs->h_u.tcp_ip6_spec;
+ v6_m_spec = &fs->m_u.tcp_ip6_spec;
+
+ /* Read the second half first */
+ ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
+ false);
+ if (ret)
+ return ret;
+
+ ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
+ &v6_m_spec->pdst, true);
+ if (ret)
+ return ret;
+
+ /* Read last to avoid next entry clobbering the results during search
+ * operations. We would not have the port enabled for this rule, so
+ * don't bother checking it.
+ */
+ (void)core_readl(priv, CORE_CFP_DATA_PORT(7));
+
+ /* The slice number is valid, so read the rule we are chained from now
+ * which is our first half.
+ */
+ bcm_sf2_cfp_rule_addr_set(priv, next_loc);
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
+ if (ret)
+ return ret;
+
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+
+ switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
+ case IPPROTO_TCP:
+ fs->flow_type = TCP_V6_FLOW;
+ break;
+ case IPPROTO_UDP:
+ fs->flow_type = UDP_V6_FLOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc,
+ false);
+ if (ret)
+ return ret;
+
+ return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src,
+ &v6_m_spec->psrc, true);
+}
+
+static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 reg, ipv4_or_chain_id;
+ unsigned int queue_num;
+ int ret;
+
+ bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
+ if (ret)
+ return ret;
+
+ reg = core_readl(priv, CORE_ACT_POL_DATA0);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
+ if (ret)
+ return ret;
+
+ /* Extract the destination port */
+ nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
+ DST_MAP_IB_MASK) - 1;
+
+ /* There is no Port 6, so we compensate for that here */
+ if (nfc->fs.ring_cookie >= 6)
+ nfc->fs.ring_cookie++;
+ nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
+
+ /* Extract the destination queue */
+ queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
+ nfc->fs.ring_cookie += queue_num;
+
+ /* Extract the L3_FRAMING or CHAIN_ID */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+
+ /* With IPv6 rules this would contain a non-zero chain ID since
+ * we reserve entry 0 and it cannot be used. So if we read 0 here
+ * this means an IPv4 rule.
+ */
+ ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
+ if (ipv4_or_chain_id == 0)
+ ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
+ else
+ ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
+ ipv4_or_chain_id);
+ if (ret)
+ return ret;
/* Read last to avoid next entry clobbering the results during search
* operations
@@ -486,44 +1163,11 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
u32 *rule_locs)
{
unsigned int index = 1, rules_cnt = 0;
- int ret;
- u32 reg;
- /* Do not poll on OP_STR_DONE to be self-clearing for search
- * operations, we cannot use bcm_sf2_cfp_op here because it completes
- * on clearing OP_STR_DONE which won't clear until the entire search
- * operation is over.
- */
- reg = core_readl(priv, CORE_CFP_ACC);
- reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
- reg |= index << XCESS_ADDR_SHIFT;
- reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
- reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE;
- core_writel(priv, reg, CORE_CFP_ACC);
-
- do {
- /* Wait for results to be ready */
- reg = core_readl(priv, CORE_CFP_ACC);
-
- /* Extract the address we are searching */
- index = reg >> XCESS_ADDR_SHIFT;
- index &= XCESS_ADDR_MASK;
-
- /* We have a valid search result, so flag it accordingly */
- if (reg & SEARCH_STS) {
- ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true);
- if (ret)
- continue;
-
- rule_locs[rules_cnt] = index;
- rules_cnt++;
- }
-
- /* Search is over break out */
- if (!(reg & OP_STR_DONE))
- break;
-
- } while (index < priv->num_cfp_rules);
+ for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
+ rule_locs[rules_cnt] = index;
+ rules_cnt++;
+ }
/* Put the TCAM size here */
nfc->data = bcm_sf2_cfp_rule_size(priv);
@@ -543,13 +1187,13 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
switch (nfc->cmd) {
case ETHTOOL_GRXCLSRLCNT:
/* Subtract the default, unusable rule */
- nfc->rule_cnt = bitmap_weight(priv->cfp.used,
+ nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
priv->num_cfp_rules) - 1;
/* We support specifying rule locations */
nfc->data |= RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRULE:
- ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false);
+ ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
break;
case ETHTOOL_GRXCLSRLALL:
ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 49695fcc2ea8..3ccd5a865dcb 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -115,6 +115,24 @@ enum bcm_sf2_reg_offs {
#define P7_IRQ_OFF 0
#define P_IRQ_OFF(x) ((6 - (x)) * P_NUM_IRQ)
+/* Register set relative to 'ACB' */
+#define ACB_CONTROL 0x00
+#define ACB_EN (1 << 0)
+#define ACB_ALGORITHM (1 << 1)
+#define ACB_FLUSH_SHIFT 2
+#define ACB_FLUSH_MASK 0x3
+
+#define ACB_QUEUE_0_CFG 0x08
+#define XOFF_THRESHOLD_MASK 0x7ff
+#define XON_EN (1 << 11)
+#define TOTAL_XOFF_THRESHOLD_SHIFT 12
+#define TOTAL_XOFF_THRESHOLD_MASK 0x7ff
+#define TOTAL_XOFF_EN (1 << 23)
+#define TOTAL_XON_EN (1 << 24)
+#define PKTLEN_SHIFT 25
+#define PKTLEN_MASK 0x3f
+#define ACB_QUEUE_CFG(x) (ACB_QUEUE_0_CFG + ((x) * 0x4))
+
/* Register set relative to 'CORE' */
#define CORE_G_PCTL_PORT0 0x00000
#define CORE_G_PCTL_PORT(x) (CORE_G_PCTL_PORT0 + (x * 0x4))
@@ -205,16 +223,8 @@ enum bcm_sf2_reg_offs {
#define CORE_IMP0_PRT_ID 0x0804
-#define CORE_BRCM_HDR_CTRL 0x0080c
-#define BRCM_HDR_EN_P8 (1 << 0)
-#define BRCM_HDR_EN_P5 (1 << 1)
-#define BRCM_HDR_EN_P7 (1 << 2)
-
#define CORE_RST_MIB_CNT_EN 0x0950
-#define CORE_BRCM_HDR_RX_DIS 0x0980
-#define CORE_BRCM_HDR_TX_DIS 0x0988
-
#define CORE_ARLA_VTBL_RWCTRL 0x1600
#define ARLA_VTBL_CMD_WRITE 0
#define ARLA_VTBL_CMD_READ 1
@@ -245,6 +255,11 @@ enum bcm_sf2_reg_offs {
#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
#define PORT_VLAN_CTRL_MASK 0x1ff
+#define CORE_TXQ_THD_PAUSE_QN_PORT_0 0x2c80
+#define TXQ_PAUSE_THD_MASK 0x7ff
+#define CORE_TXQ_THD_PAUSE_QN_PORT(x) (CORE_TXQ_THD_PAUSE_QN_PORT_0 + \
+ (x) * 0x8)
+
#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8))
#define CFI_SHIFT 12
#define PRI_SHIFT 13
@@ -252,9 +267,6 @@ enum bcm_sf2_reg_offs {
#define CORE_JOIN_ALL_VLAN_EN 0xd140
-#define CORE_EEE_EN_CTRL 0x24800
-#define CORE_EEE_LPI_INDICATE 0x24810
-
#define CORE_CFP_ACC 0x28000
#define OP_STR_DONE (1 << 0)
#define OP_SEL_SHIFT 1
@@ -290,14 +302,18 @@ enum bcm_sf2_reg_offs {
/* UDF_DATA7 */
#define L3_FRAMING_SHIFT 24
#define L3_FRAMING_MASK (0x3 << L3_FRAMING_SHIFT)
+#define IPTOS_SHIFT 16
+#define IPTOS_MASK 0xff
#define IPPROTO_SHIFT 8
#define IPPROTO_MASK (0xff << IPPROTO_SHIFT)
-#define IP_FRAG (1 << 7)
+#define IP_FRAG_SHIFT 7
+#define IP_FRAG (1 << IP_FRAG_SHIFT)
/* UDF_DATA0 */
#define SLICE_VALID 3
#define SLICE_NUM_SHIFT 2
#define SLICE_NUM(x) ((x) << SLICE_NUM_SHIFT)
+#define SLICE_NUM_MASK 0x3
#define CORE_CFP_MASK_PORT_0 0x280c0
@@ -393,8 +409,15 @@ enum bcm_sf2_reg_offs {
#define CFG_UDF_EOL2 (2 << CFG_UDF_OFFSET_BASE_SHIFT)
#define CFG_UDF_EOL3 (3 << CFG_UDF_OFFSET_BASE_SHIFT)
+/* IPv6 slices */
+#define CORE_UDF_0_B_0_8_PORT_0 0x28500
+
+/* IPv6 chained slices */
+#define CORE_UDF_0_D_0_11_PORT_0 0x28680
+
/* Number of slices for IPv4, IPv6 and non-IP */
-#define UDF_NUM_SLICES 9
+#define UDF_NUM_SLICES 4
+#define UDFS_PER_SLICE 9
/* Spacing between different slices */
#define UDF_SLICE_OFFSET 0x40
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index d55051abf4ed..bb71d3d6f65b 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -64,7 +64,8 @@ struct dsa_loop_priv {
static struct phy_device *phydevs[PHY_MAX_ADDR];
-static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds)
+static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
+ int port)
{
dev_dbg(ds->dev, "%s\n", __func__);
@@ -110,13 +111,6 @@ static void dsa_loop_get_ethtool_stats(struct dsa_switch *ds, int port,
data[i] = ps->ports[port].mib[i].val;
}
-static int dsa_loop_set_addr(struct dsa_switch *ds, u8 *addr)
-{
- dev_dbg(ds->dev, "%s\n", __func__);
-
- return 0;
-}
-
static int dsa_loop_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct dsa_loop_priv *ps = ds->priv;
@@ -263,7 +257,6 @@ static const struct dsa_switch_ops dsa_loop_driver = {
.get_strings = dsa_loop_get_strings,
.get_ethtool_stats = dsa_loop_get_ethtool_stats,
.get_sset_count = dsa_loop_get_sset_count,
- .set_addr = dsa_loop_set_addr,
.phy_read = dsa_loop_phy_read,
.phy_write = dsa_loop_phy_write,
.port_bridge_join = dsa_loop_port_bridge_join,
diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h
index dc396877fc95..93e5c15d0efd 100644
--- a/drivers/net/dsa/dsa_loop.h
+++ b/drivers/net/dsa/dsa_loop.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DSA_LOOP_H
#define __DSA_LOOP_H
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index b471413d3df9..b24566bb74d2 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -17,6 +17,9 @@
#include <linux/regmap.h>
#include <linux/mutex.h>
#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/if_bridge.h>
+#include <linux/etherdevice.h>
#include "lan9303.h"
@@ -57,6 +60,7 @@
#define LAN9303_SWITCH_CSR_CMD_LANES (BIT(19) | BIT(18) | BIT(17) | BIT(16))
#define LAN9303_VIRT_PHY_BASE 0x70
#define LAN9303_VIRT_SPECIAL_CTRL 0x77
+#define LAN9303_VIRT_SPECIAL_TURBO BIT(10) /*Turbo MII Enable*/
/*13.4 Switch Fabric Control and Status Registers
* Accessed indirectly via SWITCH_CSR_CMD, SWITCH_CSR_DATA.
@@ -121,6 +125,21 @@
#define LAN9303_MAC_RX_CFG_2 0x0c01
#define LAN9303_MAC_TX_CFG_2 0x0c40
#define LAN9303_SWE_ALR_CMD 0x1800
+# define LAN9303_ALR_CMD_MAKE_ENTRY BIT(2)
+# define LAN9303_ALR_CMD_GET_FIRST BIT(1)
+# define LAN9303_ALR_CMD_GET_NEXT BIT(0)
+#define LAN9303_SWE_ALR_WR_DAT_0 0x1801
+#define LAN9303_SWE_ALR_WR_DAT_1 0x1802
+# define LAN9303_ALR_DAT1_VALID BIT(26)
+# define LAN9303_ALR_DAT1_END_OF_TABL BIT(25)
+# define LAN9303_ALR_DAT1_AGE_OVERRID BIT(25)
+# define LAN9303_ALR_DAT1_STATIC BIT(24)
+# define LAN9303_ALR_DAT1_PORT_BITOFFS 16
+# define LAN9303_ALR_DAT1_PORT_MASK (7 << LAN9303_ALR_DAT1_PORT_BITOFFS)
+#define LAN9303_SWE_ALR_RD_DAT_0 0x1805
+#define LAN9303_SWE_ALR_RD_DAT_1 0x1806
+#define LAN9303_SWE_ALR_CMD_STS 0x1808
+# define ALR_STS_MAKE_PEND BIT(0)
#define LAN9303_SWE_VLAN_CMD 0x180b
# define LAN9303_SWE_VLAN_CMD_RNW BIT(5)
# define LAN9303_SWE_VLAN_CMD_PVIDNVLAN BIT(4)
@@ -134,6 +153,8 @@
# define LAN9303_SWE_VLAN_UNTAG_PORT0 BIT(12)
#define LAN9303_SWE_VLAN_CMD_STS 0x1810
#define LAN9303_SWE_GLB_INGRESS_CFG 0x1840
+# define LAN9303_SWE_GLB_INGR_IGMP_TRAP BIT(7)
+# define LAN9303_SWE_GLB_INGR_IGMP_PORT(p) BIT(10 + p)
#define LAN9303_SWE_PORT_STATE 0x1843
# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT2 (0)
# define LAN9303_SWE_PORT_STATE_LEARNING_PORT2 BIT(5)
@@ -144,6 +165,7 @@
# define LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 (0)
# define LAN9303_SWE_PORT_STATE_LEARNING_PORT0 BIT(1)
# define LAN9303_SWE_PORT_STATE_BLOCKING_PORT0 BIT(0)
+# define LAN9303_SWE_PORT_STATE_DISABLED_PORT0 (3)
#define LAN9303_SWE_PORT_MIRROR 0x1846
# define LAN9303_SWE_PORT_MIRROR_SNIFF_ALL BIT(8)
# define LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT2 BIT(7)
@@ -154,7 +176,9 @@
# define LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT0 BIT(2)
# define LAN9303_SWE_PORT_MIRROR_ENABLE_RX_MIRRORING BIT(1)
# define LAN9303_SWE_PORT_MIRROR_ENABLE_TX_MIRRORING BIT(0)
+# define LAN9303_SWE_PORT_MIRROR_DISABLED 0
#define LAN9303_SWE_INGRESS_PORT_TYPE 0x1847
+#define LAN9303_SWE_INGRESS_PORT_TYPE_VLAN 3
#define LAN9303_BM_CFG 0x1c00
#define LAN9303_BM_EGRSS_PORT_TYPE 0x1c0c
# define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT2 (BIT(17) | BIT(16))
@@ -262,7 +286,7 @@ static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip)
}
if (!(reg & LAN9303_PMI_ACCESS_MII_BUSY))
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EIO;
@@ -354,7 +378,7 @@ static int lan9303_switch_wait_for_completion(struct lan9303 *chip)
}
if (!(reg & LAN9303_SWITCH_CSR_CMD_BUSY))
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EIO;
@@ -428,6 +452,21 @@ on_error:
return ret;
}
+static int lan9303_write_switch_reg_mask(struct lan9303 *chip, u16 regnum,
+ u32 val, u32 mask)
+{
+ int ret;
+ u32 reg;
+
+ ret = lan9303_read_switch_reg(chip, regnum, &reg);
+ if (ret)
+ return ret;
+
+ reg = (reg & ~mask) | val;
+
+ return lan9303_write_switch_reg(chip, regnum, reg);
+}
+
static int lan9303_write_switch_port(struct lan9303 *chip, int port,
u16 regnum, u32 val)
{
@@ -472,6 +511,220 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
return 0;
}
+/* Map ALR-port bits to port bitmap, and back */
+static const int alrport_2_portmap[] = {1, 2, 4, 0, 3, 5, 6, 7 };
+static const int portmap_2_alrport[] = {3, 0, 1, 4, 2, 5, 6, 7 };
+
+/* Return pointer to first free ALR cache entry, return NULL if none */
+static struct lan9303_alr_cache_entry *
+lan9303_alr_cache_find_free(struct lan9303 *chip)
+{
+ int i;
+ struct lan9303_alr_cache_entry *entr = chip->alr_cache;
+
+ for (i = 0; i < LAN9303_NUM_ALR_RECORDS; i++, entr++)
+ if (entr->port_map == 0)
+ return entr;
+
+ return NULL;
+}
+
+/* Return pointer to ALR cache entry matching MAC address */
+static struct lan9303_alr_cache_entry *
+lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr)
+{
+ int i;
+ struct lan9303_alr_cache_entry *entr = chip->alr_cache;
+
+ BUILD_BUG_ON_MSG(sizeof(struct lan9303_alr_cache_entry) & 1,
+ "ether_addr_equal require u16 alignment");
+
+ for (i = 0; i < LAN9303_NUM_ALR_RECORDS; i++, entr++)
+ if (ether_addr_equal(entr->mac_addr, mac_addr))
+ return entr;
+
+ return NULL;
+}
+
+/* Wait a while until mask & reg == value. Otherwise return timeout. */
+static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno,
+ int mask, char value)
+{
+ int i;
+
+ for (i = 0; i < 0x1000; i++) {
+ u32 reg;
+
+ lan9303_read_switch_reg(chip, regno, &reg);
+ if ((reg & mask) == value)
+ return 0;
+ usleep_range(1000, 2000);
+ }
+ return -ETIMEDOUT;
+}
+
+static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
+{
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_0, dat0);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
+ LAN9303_ALR_CMD_MAKE_ENTRY);
+ lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND,
+ 0);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
+
+ return 0;
+}
+
+typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1,
+ int portmap, void *ctx);
+
+static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
+{
+ int i;
+
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
+ LAN9303_ALR_CMD_GET_FIRST);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
+
+ for (i = 1; i < LAN9303_NUM_ALR_RECORDS; i++) {
+ u32 dat0, dat1;
+ int alrport, portmap;
+
+ lan9303_read_switch_reg(chip, LAN9303_SWE_ALR_RD_DAT_0, &dat0);
+ lan9303_read_switch_reg(chip, LAN9303_SWE_ALR_RD_DAT_1, &dat1);
+ if (dat1 & LAN9303_ALR_DAT1_END_OF_TABL)
+ break;
+
+ alrport = (dat1 & LAN9303_ALR_DAT1_PORT_MASK) >>
+ LAN9303_ALR_DAT1_PORT_BITOFFS;
+ portmap = alrport_2_portmap[alrport];
+
+ cb(chip, dat0, dat1, portmap, ctx);
+
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
+ LAN9303_ALR_CMD_GET_NEXT);
+ lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
+ }
+}
+
+static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
+{
+ mac[0] = (dat0 >> 0) & 0xff;
+ mac[1] = (dat0 >> 8) & 0xff;
+ mac[2] = (dat0 >> 16) & 0xff;
+ mac[3] = (dat0 >> 24) & 0xff;
+ mac[4] = (dat1 >> 0) & 0xff;
+ mac[5] = (dat1 >> 8) & 0xff;
+}
+
+struct del_port_learned_ctx {
+ int port;
+};
+
+/* Clear learned (non-static) entry on given port */
+static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0,
+ u32 dat1, int portmap, void *ctx)
+{
+ struct del_port_learned_ctx *del_ctx = ctx;
+ int port = del_ctx->port;
+
+ if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC))
+ return;
+
+ /* learned entries has only one port, we can just delete */
+ dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */
+ lan9303_alr_make_entry_raw(chip, dat0, dat1);
+}
+
+struct port_fdb_dump_ctx {
+ int port;
+ void *data;
+ dsa_fdb_dump_cb_t *cb;
+};
+
+static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0,
+ u32 dat1, int portmap, void *ctx)
+{
+ struct port_fdb_dump_ctx *dump_ctx = ctx;
+ u8 mac[ETH_ALEN];
+ bool is_static;
+
+ if ((BIT(dump_ctx->port) & portmap) == 0)
+ return;
+
+ alr_reg_to_mac(dat0, dat1, mac);
+ is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC);
+ dump_ctx->cb(mac, 0, is_static, dump_ctx->data);
+}
+
+/* Set a static ALR entry. Delete entry if port_map is zero */
+static void lan9303_alr_set_entry(struct lan9303 *chip, const u8 *mac,
+ u8 port_map, bool stp_override)
+{
+ u32 dat0, dat1, alr_port;
+
+ dev_dbg(chip->dev, "%s(%pM, %d)\n", __func__, mac, port_map);
+ dat1 = LAN9303_ALR_DAT1_STATIC;
+ if (port_map)
+ dat1 |= LAN9303_ALR_DAT1_VALID;
+ /* otherwise no ports: delete entry */
+ if (stp_override)
+ dat1 |= LAN9303_ALR_DAT1_AGE_OVERRID;
+
+ alr_port = portmap_2_alrport[port_map & 7];
+ dat1 &= ~LAN9303_ALR_DAT1_PORT_MASK;
+ dat1 |= alr_port << LAN9303_ALR_DAT1_PORT_BITOFFS;
+
+ dat0 = 0;
+ dat0 |= (mac[0] << 0);
+ dat0 |= (mac[1] << 8);
+ dat0 |= (mac[2] << 16);
+ dat0 |= (mac[3] << 24);
+
+ dat1 |= (mac[4] << 0);
+ dat1 |= (mac[5] << 8);
+
+ lan9303_alr_make_entry_raw(chip, dat0, dat1);
+}
+
+/* Add port to static ALR entry, create new static entry if needed */
+static int lan9303_alr_add_port(struct lan9303 *chip, const u8 *mac, int port,
+ bool stp_override)
+{
+ struct lan9303_alr_cache_entry *entr;
+
+ entr = lan9303_alr_cache_find_mac(chip, mac);
+ if (!entr) { /*New entry */
+ entr = lan9303_alr_cache_find_free(chip);
+ if (!entr)
+ return -ENOSPC;
+ ether_addr_copy(entr->mac_addr, mac);
+ }
+ entr->port_map |= BIT(port);
+ entr->stp_override = stp_override;
+ lan9303_alr_set_entry(chip, mac, entr->port_map, stp_override);
+
+ return 0;
+}
+
+/* Delete static port from ALR entry, delete entry if last port */
+static int lan9303_alr_del_port(struct lan9303 *chip, const u8 *mac, int port)
+{
+ struct lan9303_alr_cache_entry *entr;
+
+ entr = lan9303_alr_cache_find_mac(chip, mac);
+ if (!entr)
+ return 0; /* no static entry found */
+
+ entr->port_map &= ~BIT(port);
+ if (entr->port_map == 0) /* zero means its free again */
+ eth_zero_addr(entr->mac_addr);
+ lan9303_alr_set_entry(chip, mac, entr->port_map, entr->stp_override);
+
+ return 0;
+}
+
static int lan9303_disable_processing_port(struct lan9303 *chip,
unsigned int port)
{
@@ -508,16 +761,36 @@ static int lan9303_enable_processing_port(struct lan9303 *chip,
LAN9303_MAC_TX_CFG_X_TX_ENABLE);
}
+/* forward special tagged packets from port 0 to port 1 *or* port 2 */
+static int lan9303_setup_tagging(struct lan9303 *chip)
+{
+ int ret;
+ u32 val;
+ /* enable defining the destination port via special VLAN tagging
+ * for port 0
+ */
+ ret = lan9303_write_switch_reg(chip, LAN9303_SWE_INGRESS_PORT_TYPE,
+ LAN9303_SWE_INGRESS_PORT_TYPE_VLAN);
+ if (ret)
+ return ret;
+
+ /* tag incoming packets at port 1 and 2 on their way to port 0 to be
+ * able to discover their source port
+ */
+ val = LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0;
+ return lan9303_write_switch_reg(chip, LAN9303_BM_EGRSS_PORT_TYPE, val);
+}
+
/* We want a special working switch:
* - do not forward packets between port 1 and 2
* - forward everything from port 1 to port 0
* - forward everything from port 2 to port 0
- * - forward special tagged packets from port 0 to port 1 *or* port 2
*/
static int lan9303_separate_ports(struct lan9303 *chip)
{
int ret;
+ lan9303_alr_del_port(chip, eth_stp_addr, 0);
ret = lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_MIRROR,
LAN9303_SWE_PORT_MIRROR_SNIFFER_PORT0 |
LAN9303_SWE_PORT_MIRROR_MIRRORED_PORT1 |
@@ -527,22 +800,6 @@ static int lan9303_separate_ports(struct lan9303 *chip)
if (ret)
return ret;
- /* enable defining the destination port via special VLAN tagging
- * for port 0
- */
- ret = lan9303_write_switch_reg(chip, LAN9303_SWE_INGRESS_PORT_TYPE,
- 0x03);
- if (ret)
- return ret;
-
- /* tag incoming packets at port 1 and 2 on their way to port 0 to be
- * able to discover their source port
- */
- ret = lan9303_write_switch_reg(chip, LAN9303_BM_EGRSS_PORT_TYPE,
- LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0);
- if (ret)
- return ret;
-
/* prevent port 1 and 2 from forwarding packets by their own */
return lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
LAN9303_SWE_PORT_STATE_FORWARDING_PORT0 |
@@ -550,6 +807,17 @@ static int lan9303_separate_ports(struct lan9303 *chip)
LAN9303_SWE_PORT_STATE_BLOCKING_PORT2);
}
+static void lan9303_bridge_ports(struct lan9303 *chip)
+{
+ /* ports bridged: remove mirroring */
+ lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_MIRROR,
+ LAN9303_SWE_PORT_MIRROR_DISABLED);
+
+ lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
+ chip->swe_port_state);
+ lan9303_alr_add_port(chip, eth_stp_addr, 0, true);
+}
+
static int lan9303_handle_reset(struct lan9303 *chip)
{
if (!chip->reset_gpio)
@@ -569,7 +837,7 @@ static int lan9303_disable_processing(struct lan9303 *chip)
{
int p;
- for (p = 0; p < LAN9303_NUM_PORTS; p++) {
+ for (p = 1; p < LAN9303_NUM_PORTS; p++) {
int ret = lan9303_disable_processing_port(chip, p);
if (ret)
@@ -626,7 +894,8 @@ static int lan9303_check_device(struct lan9303 *chip)
/* ---------------------------- DSA -----------------------------------*/
-static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds)
+static enum dsa_tag_protocol lan9303_get_tag_protocol(struct dsa_switch *ds,
+ int port)
{
return DSA_TAG_PROTO_LAN9303;
}
@@ -642,6 +911,10 @@ static int lan9303_setup(struct dsa_switch *ds)
return -EINVAL;
}
+ ret = lan9303_setup_tagging(chip);
+ if (ret)
+ dev_err(chip->dev, "failed to setup port tagging %d\n", ret);
+
ret = lan9303_separate_ports(chip);
if (ret)
dev_err(chip->dev, "failed to separate ports %d\n", ret);
@@ -650,6 +923,15 @@ static int lan9303_setup(struct dsa_switch *ds)
if (ret)
dev_err(chip->dev, "failed to re-enable switching %d\n", ret);
+ /* Trap IGMP to port 0 */
+ ret = lan9303_write_switch_reg_mask(chip, LAN9303_SWE_GLB_INGRESS_CFG,
+ LAN9303_SWE_GLB_INGR_IGMP_TRAP |
+ LAN9303_SWE_GLB_INGR_IGMP_PORT(0),
+ LAN9303_SWE_GLB_INGR_IGMP_PORT(1) |
+ LAN9303_SWE_GLB_INGR_IGMP_PORT(2));
+ if (ret)
+ dev_err(chip->dev, "failed to setup IGMP trap %d\n", ret);
+
return 0;
}
@@ -760,22 +1042,49 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
return chip->ops->phy_write(chip, phy, regnum, val);
}
-static int lan9303_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+static void lan9303_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
{
struct lan9303 *chip = ds->priv;
+ int ctl, res;
- /* enable internal packet processing */
- switch (port) {
- case 1:
- case 2:
- return lan9303_enable_processing_port(chip, port);
- default:
- dev_dbg(chip->dev,
- "Error: request to power up invalid port %d\n", port);
+ if (!phy_is_pseudo_fixed_link(phydev))
+ return;
+
+ ctl = lan9303_phy_read(ds, port, MII_BMCR);
+
+ ctl &= ~BMCR_ANENABLE;
+
+ if (phydev->speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ else if (phydev->speed == SPEED_10)
+ ctl &= ~BMCR_SPEED100;
+ else
+ dev_err(ds->dev, "unsupported speed: %d\n", phydev->speed);
+
+ if (phydev->duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ else
+ ctl &= ~BMCR_FULLDPLX;
+
+ res = lan9303_phy_write(ds, port, MII_BMCR, ctl);
+
+ if (port == chip->phy_addr_sel_strap) {
+ /* Virtual Phy: Remove Turbo 200Mbit mode */
+ lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
+
+ ctl &= ~LAN9303_VIRT_SPECIAL_TURBO;
+ res = regmap_write(chip->regmap,
+ LAN9303_VIRT_SPECIAL_CTRL, ctl);
}
+}
- return -ENODEV;
+static int lan9303_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct lan9303 *chip = ds->priv;
+
+ return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port,
@@ -783,18 +1092,171 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port,
{
struct lan9303 *chip = ds->priv;
- /* disable internal packet processing */
- switch (port) {
- case 1:
- case 2:
- lan9303_disable_processing_port(chip, port);
- lan9303_phy_write(ds, chip->phy_addr_sel_strap + port,
- MII_BMCR, BMCR_PDOWN);
+ lan9303_disable_processing_port(chip, port);
+ lan9303_phy_write(ds, chip->phy_addr_sel_strap + port,
+ MII_BMCR, BMCR_PDOWN);
+}
+
+static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
+ if (dsa_to_port(ds, 1)->bridge_dev == dsa_to_port(ds, 2)->bridge_dev) {
+ lan9303_bridge_ports(chip);
+ chip->is_bridged = true; /* unleash stp_state_set() */
+ }
+
+ return 0;
+}
+
+static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
+ if (chip->is_bridged) {
+ lan9303_separate_ports(chip);
+ chip->is_bridged = false;
+ }
+}
+
+static void lan9303_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ int portmask, portstate;
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(port %d, state %d)\n",
+ __func__, port, state);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ portstate = LAN9303_SWE_PORT_STATE_DISABLED_PORT0;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ portstate = LAN9303_SWE_PORT_STATE_BLOCKING_PORT0;
+ break;
+ case BR_STATE_LEARNING:
+ portstate = LAN9303_SWE_PORT_STATE_LEARNING_PORT0;
+ break;
+ case BR_STATE_FORWARDING:
+ portstate = LAN9303_SWE_PORT_STATE_FORWARDING_PORT0;
break;
default:
- dev_dbg(chip->dev,
- "Error: request to power down invalid port %d\n", port);
+ portstate = LAN9303_SWE_PORT_STATE_DISABLED_PORT0;
+ dev_err(chip->dev, "unknown stp state: port %d, state %d\n",
+ port, state);
}
+
+ portmask = 0x3 << (port * 2);
+ portstate <<= (port * 2);
+
+ chip->swe_port_state = (chip->swe_port_state & ~portmask) | portstate;
+
+ if (chip->is_bridged)
+ lan9303_write_switch_reg(chip, LAN9303_SWE_PORT_STATE,
+ chip->swe_port_state);
+ /* else: touching SWE_PORT_STATE would break port separation */
+}
+
+static void lan9303_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct lan9303 *chip = ds->priv;
+ struct del_port_learned_ctx del_ctx = {
+ .port = port,
+ };
+
+ dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
+ lan9303_alr_loop(chip, alr_loop_cb_del_port_learned, &del_ctx);
+}
+
+static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+ if (vid)
+ return -EOPNOTSUPP;
+
+ return lan9303_alr_add_port(chip, addr, port, false);
+}
+
+static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
+ if (vid)
+ return -EOPNOTSUPP;
+ lan9303_alr_del_port(chip, addr, port);
+
+ return 0;
+}
+
+static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct lan9303 *chip = ds->priv;
+ struct port_fdb_dump_ctx dump_ctx = {
+ .port = port,
+ .data = data,
+ .cb = cb,
+ };
+
+ dev_dbg(chip->dev, "%s(%d)\n", __func__, port);
+ lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx);
+
+ return 0;
+}
+
+static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
+ mdb->vid);
+ if (mdb->vid)
+ return -EOPNOTSUPP;
+ if (lan9303_alr_cache_find_mac(chip, mdb->addr))
+ return 0;
+ if (!lan9303_alr_cache_find_free(chip))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static void lan9303_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
+ mdb->vid);
+ lan9303_alr_add_port(chip, mdb->addr, port, false);
+}
+
+static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, mdb->addr,
+ mdb->vid);
+ if (mdb->vid)
+ return -EOPNOTSUPP;
+ lan9303_alr_del_port(chip, mdb->addr, port);
+
+ return 0;
}
static const struct dsa_switch_ops lan9303_switch_ops = {
@@ -803,10 +1265,21 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
.get_strings = lan9303_get_strings,
.phy_read = lan9303_phy_read,
.phy_write = lan9303_phy_write,
+ .adjust_link = lan9303_adjust_link,
.get_ethtool_stats = lan9303_get_ethtool_stats,
.get_sset_count = lan9303_get_sset_count,
.port_enable = lan9303_port_enable,
.port_disable = lan9303_port_disable,
+ .port_bridge_join = lan9303_port_bridge_join,
+ .port_bridge_leave = lan9303_port_bridge_leave,
+ .port_stp_state_set = lan9303_port_stp_state_set,
+ .port_fast_age = lan9303_port_fast_age,
+ .port_fdb_add = lan9303_port_fdb_add,
+ .port_fdb_del = lan9303_port_fdb_del,
+ .port_fdb_dump = lan9303_port_fdb_dump,
+ .port_mdb_prepare = lan9303_port_mdb_prepare,
+ .port_mdb_add = lan9303_port_mdb_add,
+ .port_mdb_del = lan9303_port_mdb_del,
};
static int lan9303_register_switch(struct lan9303 *chip)
@@ -828,7 +1301,7 @@ static void lan9303_probe_reset_gpio(struct lan9303 *chip,
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
GPIOD_OUT_LOW);
- if (!chip->reset_gpio) {
+ if (IS_ERR(chip->reset_gpio)) {
dev_dbg(chip->dev, "No reset GPIO defined\n");
return;
}
diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h
index 4d8be555ff4d..11f590b64701 100644
--- a/drivers/net/dsa/lan9303.h
+++ b/drivers/net/dsa/lan9303.h
@@ -1,27 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/regmap.h>
#include <linux/device.h>
#include <net/dsa.h>
-struct lan9303;
-
-struct lan9303_phy_ops {
- /* PHY 1 and 2 access*/
- int (*phy_read)(struct lan9303 *chip, int port, int regnum);
- int (*phy_write)(struct lan9303 *chip, int port,
- int regnum, u16 val);
-};
-
-struct lan9303 {
- struct device *dev;
- struct regmap *regmap;
- struct regmap_irq_chip_data *irq_data;
- struct gpio_desc *reset_gpio;
- u32 reset_duration; /* in [ms] */
- bool phy_addr_sel_strap;
- struct dsa_switch *ds;
- struct mutex indirect_mutex; /* protect indexed register access */
- const struct lan9303_phy_ops *ops;
-};
+#include <linux/dsa/lan9303.h>
extern const struct regmap_access_table lan9303_register_set;
extern const struct lan9303_phy_ops lan9303_indirect_phy_ops;
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 24ec20f7f444..909a7e864246 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -50,7 +50,7 @@ static int lan9303_i2c_probe(struct i2c_client *client,
return -ENOMEM;
sw_dev->chip.regmap = devm_regmap_init_i2c(client,
- &lan9303_i2c_regmap_config);
+ &lan9303_i2c_regmap_config);
if (IS_ERR(sw_dev->chip.regmap)) {
ret = PTR_ERR(sw_dev->chip.regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index fc16668a487f..cc9c2ea1c4fe 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -67,14 +67,15 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
return 0;
}
-int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg, u16 val)
+static int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg,
+ u16 val)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val);
}
-int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg)
+static int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
@@ -115,7 +116,7 @@ static int lan9303_mdio_probe(struct mdio_device *mdiodev)
return -ENOMEM;
sw_dev->chip.regmap = devm_regmap_init(&mdiodev->dev, NULL, sw_dev,
- &lan9303_mdio_regmap_config);
+ &lan9303_mdio_regmap_config);
if (IS_ERR(sw_dev->chip.regmap)) {
ret = PTR_ERR(sw_dev->chip.regmap);
dev_err(&mdiodev->dev, "regmap init failed: %d\n", ret);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 56cd6d365352..b5be93a1e0df 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -394,7 +394,8 @@ static int ksz_setup(struct dsa_switch *ds)
return 0;
}
-static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds)
+static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
+ int port)
{
return DSA_TAG_PROTO_KSZ;
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index c142b97add2c..2820d69810b3 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -564,7 +564,8 @@ static int mt7530_phy_read(struct dsa_switch *ds, int port, int regnum)
return mdiobus_read_nested(priv->bus, port, regnum);
}
-int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum,
+ u16 val)
{
struct mt7530_priv *priv = ds->priv;
@@ -687,7 +688,7 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
* the switch
*/
mt7530_write(priv, MT7530_PCR_P(port),
- PCR_MATRIX(priv->ds->enabled_port_mask));
+ PCR_MATRIX(dsa_user_ports(priv->ds)));
return 0;
}
@@ -780,8 +781,8 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
* same bridge. If the port is disabled, port matrix is kept
* and not being setup until the port becomes enabled.
*/
- if (ds->enabled_port_mask & BIT(i) && i != port) {
- if (ds->ports[i].bridge_dev != bridge)
+ if (dsa_is_user_port(ds, i) && i != port) {
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
mt7530_set(priv, MT7530_PCR_P(i),
@@ -817,8 +818,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
*/
- if (ds->enabled_port_mask & BIT(i) && i != port) {
- if (ds->ports[i].bridge_dev != bridge)
+ if (dsa_is_user_port(ds, i) && i != port) {
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
mt7530_clear(priv, MT7530_PCR_P(i),
@@ -906,11 +907,11 @@ err:
}
static enum dsa_tag_protocol
-mtk_get_tag_protocol(struct dsa_switch *ds)
+mtk_get_tag_protocol(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- if (!dsa_is_cpu_port(ds, MT7530_CPU_PORT)) {
+ if (port != MT7530_CPU_PORT) {
dev_warn(priv->dev,
"port not matched with tagging CPU port\n");
return DSA_TAG_PROTO_NONE;
@@ -928,11 +929,11 @@ mt7530_setup(struct dsa_switch *ds)
struct device_node *dn;
struct mt7530_dummy_poll p;
- /* The parent node of cpu_dp->netdev which holds the common system
+ /* The parent node of master netdev which holds the common system
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
- dn = ds->dst->cpu_dp->netdev->dev.of_node->parent;
+ dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent;
priv->ethernet = syscon_node_to_regmap(dn);
if (IS_ERR(priv->ethernet))
return PTR_ERR(priv->ethernet);
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index f123ed57630d..65f10fec25b3 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -9,6 +9,7 @@
*/
#include <linux/delay.h>
+#include <linux/etherdevice.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -69,7 +70,8 @@ static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
return NULL;
}
-static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds)
+static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds,
+ int port)
{
return DSA_TAG_PROTO_TRAILER;
}
@@ -174,9 +176,8 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
*/
REG_WRITE(addr, PORT_VLAN_MAP,
((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
- (dsa_is_cpu_port(ds, p) ?
- ds->enabled_port_mask :
- BIT(ds->dst->cpu_dp->index)));
+ (dsa_is_cpu_port(ds, p) ? dsa_user_ports(ds) :
+ BIT(dsa_to_port(ds, p)->cpu_dp->index)));
/* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
@@ -188,6 +189,27 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
return 0;
}
+static int mv88e6060_setup_addr(struct dsa_switch *ds)
+{
+ u8 addr[ETH_ALEN];
+ u16 val;
+
+ eth_random_addr(addr);
+
+ val = addr[0] << 8 | addr[1];
+
+ /* The multicast bit is always transmitted as a zero, so the switch uses
+ * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
+ */
+ val &= 0xfeff;
+
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
+
+ return 0;
+}
+
static int mv88e6060_setup(struct dsa_switch *ds)
{
int ret;
@@ -203,6 +225,10 @@ static int mv88e6060_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
+ ret = mv88e6060_setup_addr(ds);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < MV88E6060_PORTS; i++) {
ret = mv88e6060_setup_port(ds, i);
if (ret < 0)
@@ -212,22 +238,6 @@ static int mv88e6060_setup(struct dsa_switch *ds)
return 0;
}
-static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
-{
- u16 val = addr[0] << 8 | addr[1];
-
- /* The multicast bit is always transmitted as a zero, so the switch uses
- * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
- */
- val &= 0xfeff;
-
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
-
- return 0;
-}
-
static int mv88e6060_port_to_phy_addr(int port)
{
if (port >= 0 && port < MV88E6060_PORTS)
@@ -262,7 +272,6 @@ static const struct dsa_switch_ops mv88e6060_switch_ops = {
.get_tag_protocol = mv88e6060_get_tag_protocol,
.probe = mv88e6060_drv_probe,
.setup = mv88e6060_setup,
- .set_addr = mv88e6060_set_addr,
.phy_read = mv88e6060_phy_read,
.phy_write = mv88e6060_phy_write,
};
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 5cd5551461e3..58a4a0014e59 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
mv88e6xxx-objs := chip.o
mv88e6xxx-objs += global1.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index d74c7335c512..8171055fde7a 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -851,7 +851,7 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
if (dsa_is_cpu_port(chip->ds, i) ||
dsa_is_dsa_port(chip->ds, i) ||
- (br && chip->ds->ports[i].bridge_dev == br))
+ (br && dsa_to_port(chip->ds, i)->bridge_dev == br))
pvlan |= BIT(i);
return pvlan;
@@ -932,6 +932,19 @@ static int mv88e6xxx_irl_setup(struct mv88e6xxx_chip *chip)
return 0;
}
+static int mv88e6xxx_mac_setup(struct mv88e6xxx_chip *chip)
+{
+ if (chip->info->ops->set_switch_mac) {
+ u8 addr[ETH_ALEN];
+
+ eth_random_addr(addr);
+
+ return chip->info->ops->set_switch_mac(chip, addr);
+ }
+
+ return 0;
+}
+
static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
{
u16 pvlan = 0;
@@ -1124,23 +1137,23 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
- if (!ds->ports[port].netdev)
+ if (!ds->ports[i].slave)
continue;
if (vlan.member[i] ==
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
- if (ds->ports[i].bridge_dev ==
+ if (dsa_to_port(ds, i)->bridge_dev ==
ds->ports[port].bridge_dev)
break; /* same bridge, check next VLAN */
- if (!ds->ports[i].bridge_dev)
+ if (!dsa_to_port(ds, i)->bridge_dev)
continue;
- dev_err(ds->dev, "p%d: hw VLAN %d already used by %s\n",
- port, vlan.vid,
- netdev_name(ds->ports[i].bridge_dev));
+ dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
+ port, vlan.vid, i,
+ netdev_name(dsa_to_port(ds, i)->bridge_dev));
err = -EOPNOTSUPP;
goto unlock;
}
@@ -1195,6 +1208,73 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return 0;
}
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_vtu_entry vlan;
+ struct mv88e6xxx_atu_entry entry;
+ int err;
+
+ /* Null VLAN ID corresponds to the port private database */
+ if (vid == 0)
+ err = mv88e6xxx_port_get_fid(chip, port, &vlan.fid);
+ else
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan, false);
+ if (err)
+ return err;
+
+ entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
+ ether_addr_copy(entry.mac, addr);
+ eth_addr_dec(entry.mac);
+
+ err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry);
+ if (err)
+ return err;
+
+ /* Initialize a fresh ATU entry if it isn't found */
+ if (entry.state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED ||
+ !ether_addr_equal(entry.mac, addr)) {
+ memset(&entry, 0, sizeof(entry));
+ ether_addr_copy(entry.mac, addr);
+ }
+
+ /* Purge the ATU entry only if no port is using it anymore */
+ if (state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) {
+ entry.portvec &= ~BIT(port);
+ if (!entry.portvec)
+ entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
+ } else {
+ entry.portvec |= BIT(port);
+ entry.state = state;
+ }
+
+ return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry);
+}
+
+static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port,
+ u16 vid)
+{
+ const char broadcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u8 state = MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC;
+
+ return mv88e6xxx_port_db_load_purge(chip, port, broadcast, vid, state);
+}
+
+static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid)
+{
+ int port;
+ int err;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6xxx_port_add_broadcast(chip, port, vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
u16 vid, u8 member)
{
@@ -1207,7 +1287,11 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
vlan.member[port] = member;
- return mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err)
+ return err;
+
+ return mv88e6xxx_broadcast_setup(chip, vid);
}
static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
@@ -1311,50 +1395,6 @@ unlock:
return err;
}
-static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
- const unsigned char *addr, u16 vid,
- u8 state)
-{
- struct mv88e6xxx_vtu_entry vlan;
- struct mv88e6xxx_atu_entry entry;
- int err;
-
- /* Null VLAN ID corresponds to the port private database */
- if (vid == 0)
- err = mv88e6xxx_port_get_fid(chip, port, &vlan.fid);
- else
- err = mv88e6xxx_vtu_get(chip, vid, &vlan, false);
- if (err)
- return err;
-
- entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
- ether_addr_copy(entry.mac, addr);
- eth_addr_dec(entry.mac);
-
- err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry);
- if (err)
- return err;
-
- /* Initialize a fresh ATU entry if it isn't found */
- if (entry.state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED ||
- !ether_addr_equal(entry.mac, addr)) {
- memset(&entry, 0, sizeof(entry));
- ether_addr_copy(entry.mac, addr);
- }
-
- /* Purge the ATU entry only if no port is using it anymore */
- if (state == MV88E6XXX_G1_ATU_DATA_STATE_UNUSED) {
- entry.portvec &= ~BIT(port);
- if (!entry.portvec)
- entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
- } else {
- entry.portvec |= BIT(port);
- entry.state = state;
- }
-
- return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry);
-}
-
static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
@@ -1663,7 +1703,7 @@ static int mv88e6xxx_setup_port_mode(struct mv88e6xxx_chip *chip, int port)
if (dsa_is_dsa_port(chip->ds, port))
return mv88e6xxx_set_port_mode_dsa(chip, port);
- if (dsa_is_normal_port(chip->ds, port))
+ if (dsa_is_user_port(chip->ds, port))
return mv88e6xxx_set_port_mode_normal(chip, port);
/* Setup CPU port mode depending on its supported tag format */
@@ -1964,19 +2004,7 @@ static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
if (err)
return err;
- /* Clear the statistics counters for all ports */
- err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP,
- MV88E6XXX_G1_STATS_OP_BUSY |
- MV88E6XXX_G1_STATS_OP_FLUSH_ALL);
- if (err)
- return err;
-
- /* Wait for the flush to complete. */
- err = mv88e6xxx_g1_stats_wait(chip);
- if (err)
- return err;
-
- return 0;
+ return mv88e6xxx_g1_stats_clear(chip);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -1992,6 +2020,9 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
+ if (dsa_is_unused_port(ds, i))
+ continue;
+
err = mv88e6xxx_setup_port(chip, i);
if (err)
goto unlock;
@@ -2013,6 +2044,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
+ err = mv88e6xxx_mac_setup(chip);
+ if (err)
+ goto unlock;
+
err = mv88e6xxx_phy_setup(chip);
if (err)
goto unlock;
@@ -2029,6 +2064,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
+ err = mv88e6xxx_broadcast_setup(chip, 0);
+ if (err)
+ goto unlock;
+
err = mv88e6xxx_pot_setup(chip);
if (err)
goto unlock;
@@ -2043,21 +2082,6 @@ unlock:
return err;
}
-static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- if (!chip->info->ops->set_switch_mac)
- return -EOPNOTSUPP;
-
- mutex_lock(&chip->reg_lock);
- err = chip->info->ops->set_switch_mac(chip, addr);
- mutex_unlock(&chip->reg_lock);
-
- return err;
-}
-
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
@@ -2263,6 +2287,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2290,6 +2315,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2320,6 +2346,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2347,6 +2374,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2377,6 +2405,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2413,6 +2442,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@@ -2445,6 +2475,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2470,6 +2501,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2503,6 +2535,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2538,6 +2571,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2572,6 +2606,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2607,6 +2642,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2634,6 +2670,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2775,6 +2812,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2846,6 +2884,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6320_stats_get_stats,
@@ -2879,6 +2918,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6320_stats_get_stats,
@@ -2911,6 +2951,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@@ -2944,6 +2985,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -2977,6 +3019,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -3012,6 +3055,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
@@ -3687,7 +3731,8 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
return 0;
}
-static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds)
+static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
+ int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -3785,7 +3830,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.probe = mv88e6xxx_drv_probe,
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
- .set_addr = mv88e6xxx_set_addr,
.adjust_link = mv88e6xxx_adjust_link,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index d76d7c7ea819..b43bd6476632 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -374,6 +374,22 @@ int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
MV88E6XXX_G1_STATS_OP_BUSY);
}
+int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_OP, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
+
+ return err;
+}
+
int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port)
{
int err;
@@ -444,3 +460,22 @@ void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val)
*val = value | reg;
}
+
+int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
+{
+ int err;
+ u16 val;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STATS_OP, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
+ if (err)
+ return err;
+
+ /* Wait for the flush to complete. */
+ return mv88e6xxx_g1_stats_wait(chip);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 950b914f9251..b0dc7518b47f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -235,8 +235,10 @@ int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
+int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val);
+int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip);
int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 436668bd50dc..46af8052e535 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -149,9 +149,9 @@ static void mv88e6xxx_phy_ppu_reenable_work(struct work_struct *ugly)
mutex_unlock(&chip->reg_lock);
}
-static void mv88e6xxx_phy_ppu_reenable_timer(unsigned long _ps)
+static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
{
- struct mv88e6xxx_chip *chip = (void *)_ps;
+ struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
schedule_work(&chip->ppu_work);
}
@@ -193,8 +193,7 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip)
{
mutex_init(&chip->ppu_mutex);
INIT_WORK(&chip->ppu_work, mv88e6xxx_phy_ppu_reenable_work);
- setup_timer(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer,
- (unsigned long)chip);
+ timer_setup(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer, 0);
}
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 5ada7a41449c..9df22ebee822 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -506,7 +506,7 @@ qca8k_setup(struct dsa_switch *ds)
pr_warn("regmap initialization failed");
/* Initialize CPU port pad mode (xMII type, delays...) */
- phy_mode = of_get_phy_mode(ds->dst->cpu_dp->dn);
+ phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
if (phy_mode < 0) {
pr_err("Can't find phy-mode for master device\n");
return phy_mode;
@@ -536,7 +536,7 @@ qca8k_setup(struct dsa_switch *ds)
/* Disable MAC by default on all user ports */
for (i = 1; i < QCA8K_NUM_PORTS; i++)
- if (ds->enabled_port_mask & BIT(i))
+ if (dsa_is_user_port(ds, i))
qca8k_port_set_status(priv, i, 0);
/* Forward all unknown frames to CPU port for Linux processing */
@@ -551,12 +551,11 @@ qca8k_setup(struct dsa_switch *ds)
/* CPU port gets connected to all user ports of the switch */
if (dsa_is_cpu_port(ds, i)) {
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_LOOKUP_MEMBER,
- ds->enabled_port_mask);
+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
}
/* Invividual user ports get connected to CPU port only */
- if (ds->enabled_port_mask & BIT(i)) {
+ if (dsa_is_user_port(ds, i)) {
int shift = 16 * (i % 2);
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
@@ -700,7 +699,7 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
int i;
for (i = 1; i < QCA8K_NUM_PORTS; i++) {
- if (ds->ports[i].bridge_dev != br)
+ if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Add this port to the portvlan mask of the other ports
* in the bridge
@@ -725,7 +724,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
int i;
for (i = 1; i < QCA8K_NUM_PORTS; i++) {
- if (ds->ports[i].bridge_dev != br)
+ if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Remove this port to the portvlan mask of the other ports
* in the bridge
@@ -824,7 +823,7 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static enum dsa_tag_protocol
-qca8k_get_tag_protocol(struct dsa_switch *ds)
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port)
{
return DSA_TAG_PROTO_QCA;
}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d0a1f9ce3168..58483af80bdb 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -353,7 +353,7 @@ static void dummy_setup(struct net_device *dev)
eth_hw_addr_random(dev);
dev->min_mtu = 0;
- dev->max_mtu = ETH_MAX_MTU;
+ dev->max_mtu = 0;
}
static int dummy_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -388,7 +388,7 @@ static int __init dummy_init_one(void)
int err;
dev_dummy = alloc_netdev(sizeof(struct dummy_priv),
- "dummy%d", NET_NAME_UNKNOWN, dummy_setup);
+ "dummy%d", NET_NAME_ENUM, dummy_setup);
if (!dev_dummy)
return -ENOMEM;
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index fe13bfea30ac..74263f8efe1a 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -139,9 +139,9 @@ static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
-static void eql_timer(unsigned long param)
+static void eql_timer(struct timer_list *t)
{
- equalizer_t *eql = (equalizer_t *) param;
+ equalizer_t *eql = from_timer(eql, t, timer);
struct list_head *this, *tmp, *head;
spin_lock(&eql->queue.lock);
@@ -178,10 +178,8 @@ static void __init eql_setup(struct net_device *dev)
{
equalizer_t *eql = netdev_priv(dev);
- init_timer(&eql->timer);
- eql->timer.data = (unsigned long) eql;
+ timer_setup(&eql->timer, eql_timer, 0);
eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
- eql->timer.function = eql_timer;
spin_lock_init(&eql->queue.lock);
INIT_LIST_HEAD(&eql->queue.all_slaves);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index c5987f518cb2..b648e3f95c01 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -367,7 +367,7 @@ static struct net_device *corkscrew_scan(int unit);
static int corkscrew_setup(struct net_device *dev, int ioaddr,
struct pnp_dev *idev, int card_number);
static int corkscrew_open(struct net_device *dev);
-static void corkscrew_timer(unsigned long arg);
+static void corkscrew_timer(struct timer_list *t);
static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int corkscrew_rx(struct net_device *dev);
@@ -627,7 +627,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
spin_lock_init(&vp->lock);
- setup_timer(&vp->timer, corkscrew_timer, (unsigned long) dev);
+ timer_setup(&vp->timer, corkscrew_timer, 0);
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
@@ -869,11 +869,11 @@ static int corkscrew_open(struct net_device *dev)
return 0;
}
-static void corkscrew_timer(unsigned long data)
+static void corkscrew_timer(struct timer_list *t)
{
#ifdef AUTOMEDIA
- struct net_device *dev = (struct net_device *) data;
- struct corkscrew_private *vp = netdev_priv(dev);
+ struct corkscrew_private *vp = from_timer(vp, t, timer);
+ struct net_device *dev = vp->our_dev;
int ioaddr = dev->base_addr;
unsigned long flags;
int ok = 0;
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index 47c844cc9d27..3044a6f35f04 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -225,7 +225,7 @@ static unsigned short read_eeprom(unsigned int ioaddr, int index);
static void tc574_wait_for_completion(struct net_device *dev, int cmd);
static void tc574_reset(struct net_device *dev);
-static void media_check(unsigned long arg);
+static void media_check(struct timer_list *t);
static int el3_open(struct net_device *dev);
static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
struct net_device *dev);
@@ -377,7 +377,7 @@ static int tc574_config(struct pcmcia_device *link)
lp->autoselect = config & Autoselect ? 1 : 0;
}
- init_timer(&lp->media);
+ timer_setup(&lp->media, media_check, 0);
{
int phy;
@@ -681,8 +681,6 @@ static int el3_open(struct net_device *dev)
netif_start_queue(dev);
tc574_reset(dev);
- lp->media.function = media_check;
- lp->media.data = (unsigned long) dev;
lp->media.expires = jiffies + HZ;
add_timer(&lp->media);
@@ -859,10 +857,10 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
(and as a last resort, poll the NIC for events), and to monitor
the MII, reporting changes in cable status.
*/
-static void media_check(unsigned long arg)
+static void media_check(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) arg;
- struct el3_private *lp = netdev_priv(dev);
+ struct el3_private *lp = from_timer(lp, t, media);
+ struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
unsigned long flags;
unsigned short /* cable, */ media, partner;
@@ -1048,6 +1046,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch(cmd) {
case SIOCGMIIPHY: /* Get the address of the PHY in use. */
data->phy_id = phy;
+ /* fall through */
case SIOCGMIIREG: /* Read the specified MII register. */
{
int saved_window;
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index e28254a00599..2b2695311bda 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -163,7 +163,7 @@ static void tc589_release(struct pcmcia_device *link);
static u16 read_eeprom(unsigned int ioaddr, int index);
static void tc589_reset(struct net_device *dev);
-static void media_check(unsigned long arg);
+static void media_check(struct timer_list *t);
static int el3_config(struct net_device *dev, struct ifmap *map);
static int el3_open(struct net_device *dev);
static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
@@ -517,7 +517,7 @@ static int el3_open(struct net_device *dev)
netif_start_queue(dev);
tc589_reset(dev);
- setup_timer(&lp->media, media_check, (unsigned long)dev);
+ timer_setup(&lp->media, media_check, 0);
mod_timer(&lp->media, jiffies + HZ);
dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
@@ -676,10 +676,10 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static void media_check(unsigned long arg)
+static void media_check(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)(arg);
- struct el3_private *lp = netdev_priv(dev);
+ struct el3_private *lp = from_timer(lp, t, media);
+ struct net_device *dev = lp->p_dev->priv;
unsigned int ioaddr = dev->base_addr;
u16 media, errs;
unsigned long flags;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 402d9090ad29..f4e13a7014bd 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -759,8 +759,8 @@ static int vortex_open(struct net_device *dev);
static void mdio_sync(struct vortex_private *vp, int bits);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
-static void vortex_timer(unsigned long arg);
-static void rx_oom_timer(unsigned long arg);
+static void vortex_timer(struct timer_list *t);
+static void rx_oom_timer(struct timer_list *t);
static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
@@ -1599,9 +1599,9 @@ vortex_up(struct net_device *dev)
dev->name, media_tbl[dev->if_port].name);
}
- setup_timer(&vp->timer, vortex_timer, (unsigned long)dev);
+ timer_setup(&vp->timer, vortex_timer, 0);
mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
- setup_timer(&vp->rx_oom_timer, rx_oom_timer, (unsigned long)dev);
+ timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
if (vortex_debug > 1)
pr_debug("%s: Initial media type %s.\n",
@@ -1784,10 +1784,10 @@ out:
}
static void
-vortex_timer(unsigned long data)
+vortex_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct vortex_private *vp = netdev_priv(dev);
+ struct vortex_private *vp = from_timer(vp, t, timer);
+ struct net_device *dev = vp->mii.dev;
void __iomem *ioaddr = vp->ioaddr;
int next_tick = 60*HZ;
int ok = 0;
@@ -2687,10 +2687,10 @@ boomerang_rx(struct net_device *dev)
* for some memory. Otherwise there is no way to restart the rx process.
*/
static void
-rx_oom_timer(unsigned long arg)
+rx_oom_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)arg;
- struct vortex_private *vp = netdev_priv(dev);
+ struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
+ struct net_device *dev = vp->mii.dev;
spin_lock_irq(&vp->lock);
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile
index 74046afab993..f8b73babc510 100644
--- a/drivers/net/ethernet/3com/Makefile
+++ b/drivers/net/ethernet/3com/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the 3Com Ethernet device drivers
#
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index ff3b31894188..f975c2fc88a3 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the 8390 network device drivers.
#
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 3da1fc539ef9..7bddb8efb6d5 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -85,7 +85,7 @@ static struct net_device_stats *get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void axnet_tx_timeout(struct net_device *dev);
static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
-static void ei_watchdog(u_long arg);
+static void ei_watchdog(struct timer_list *t);
static void axnet_reset_8390(struct net_device *dev);
static int mdio_read(unsigned int addr, int phy_id, int loc);
@@ -483,7 +483,7 @@ static int axnet_open(struct net_device *dev)
link->open++;
info->link_status = 0x00;
- setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ timer_setup(&info->watchdog, ei_watchdog, 0);
mod_timer(&info->watchdog, jiffies + HZ);
return ax_open(dev);
@@ -547,10 +547,10 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
return ax_interrupt(irq, dev_id);
}
-static void ei_watchdog(u_long arg)
+static void ei_watchdog(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)(arg);
- struct axnet_dev *info = PRIV(dev);
+ struct axnet_dev *info = from_timer(info, t, watchdog);
+ struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + AXNET_MII_EEP;
u_short link;
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index bd0a2a14b649..bcad4a7fac9f 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -99,7 +99,7 @@ static int pcnet_open(struct net_device *dev);
static int pcnet_close(struct net_device *dev);
static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
-static void ei_watchdog(u_long arg);
+static void ei_watchdog(struct timer_list *t);
static void pcnet_reset_8390(struct net_device *dev);
static int set_config(struct net_device *dev, struct ifmap *map);
static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
@@ -917,7 +917,7 @@ static int pcnet_open(struct net_device *dev)
info->phy_id = info->eth_phy;
info->link_status = 0x00;
- setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
+ timer_setup(&info->watchdog, ei_watchdog, 0);
mod_timer(&info->watchdog, jiffies + HZ);
return ei_open(dev);
@@ -1006,10 +1006,10 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id)
return ret;
}
-static void ei_watchdog(u_long arg)
+static void ei_watchdog(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)arg;
- struct pcnet_dev *info = PRIV(dev);
+ struct pcnet_dev *info = from_timer(info, t, watchdog);
+ struct net_device *dev = info->p_dev->priv;
unsigned int nic_base = dev->base_addr;
unsigned int mii_addr = nic_base + DLINK_GPIO;
u_short link;
@@ -1107,6 +1107,7 @@ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = info->phy_id;
+ /* fall through */
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f);
return 0;
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index a0a03d4d939a..39f6273358ed 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux network Ethernet device drivers.
#
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index a251de8d9a91..7120f2b9c6ef 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1092,9 +1092,11 @@ static void tx_reclaim_skb(struct bfin_mac_local *lp)
return;
}
-static void tx_reclaim_skb_timeout(unsigned long lp)
+static void tx_reclaim_skb_timeout(struct timer_list *t)
{
- tx_reclaim_skb((struct bfin_mac_local *)lp);
+ struct bfin_mac_local *lp = from_timer(lp, t, tx_reclaim_timer);
+
+ tx_reclaim_skb(lp);
}
static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
@@ -1650,9 +1652,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
ndev->netdev_ops = &bfin_mac_netdev_ops;
ndev->ethtool_ops = &bfin_mac_ethtool_ops;
- init_timer(&lp->tx_reclaim_timer);
- lp->tx_reclaim_timer.data = (unsigned long)lp;
- lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
+ timer_setup(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, 0);
lp->flags = 0;
netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 9c07140a5d8d..a1e04c9e932e 100644
--- a/drivers/net/ethernet/aeroflex/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef GRETH_H
#define GRETH_H
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 54eff90e2f02..48220b6c600d 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3080,9 +3080,9 @@ err_out:
* The routine called when the error timer expires, to track the number of
* recurring errors.
*/
-static void et131x_error_timer_handler(unsigned long data)
+static void et131x_error_timer_handler(struct timer_list *t)
{
- struct et131x_adapter *adapter = (struct et131x_adapter *)data;
+ struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
@@ -3624,11 +3624,9 @@ static int et131x_open(struct net_device *netdev)
int result;
/* Start the timer to track NIC errors */
- init_timer(&adapter->error_timer);
+ timer_setup(&adapter->error_timer, et131x_error_timer_handler, 0);
adapter->error_timer.expires = jiffies +
msecs_to_jiffies(TX_ERROR_PERIOD);
- adapter->error_timer.function = et131x_error_timer_handler;
- adapter->error_timer.data = (unsigned long)adapter;
add_timer(&adapter->error_timer);
result = request_irq(irq, et131x_isr,
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index 08931b4afc96..d0c388cfd52f 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SLIC_H
#define _SLIC_H
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 15a8096c60df..0b60921c392f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -355,10 +355,10 @@ static void slic_xmit_complete(struct slic_device *sdev)
{
struct slic_tx_queue *txq = &sdev->txq;
struct net_device *dev = sdev->netdev;
- unsigned int idx = txq->done_idx;
struct slic_tx_buffer *buff;
unsigned int frames = 0;
unsigned int bytes = 0;
+ unsigned int idx;
/* Limit processing to SLIC_MAX_TX_COMPLETIONS frames to avoid that new
* completions during processing keeps the loop running endlessly.
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index 51c486cfbb8c..c670067b1541 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ACENIC_H_
#define _ACENIC_H_
#include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 305dc1996b4e..4532e574ebcd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -627,6 +627,12 @@ enum ena_admin_flow_hash_proto {
ENA_ADMIN_RSS_NOT_IP = 7,
+ /* TCPv6 with extension header */
+ ENA_ADMIN_RSS_TCP6_EX = 8,
+
+ /* IPv6 with extension header */
+ ENA_ADMIN_RSS_IP6_EX = 9,
+
ENA_ADMIN_RSS_PROTO_NUM = 16,
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 52beba8c7a39..bf2de5298005 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -63,6 +63,8 @@
#define ENA_REGS_ADMIN_INTR_MASK 1
+#define ENA_POLL_MS 5
+
/*****************************************************************************/
/*****************************************************************************/
/*****************************************************************************/
@@ -315,7 +317,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue
cmd_size_in_bytes,
comp,
comp_size_in_bytes);
- if (unlikely(IS_ERR(comp_ctx)))
+ if (IS_ERR(comp_ctx))
admin_queue->running_state = false;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -533,7 +535,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- msleep(100);
+ msleep(ENA_POLL_MS);
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
@@ -746,6 +748,9 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
{
u32 val, i;
+ /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
+ timeout = (timeout * 100) / ENA_POLL_MS;
+
for (i = 0; i < timeout; i++) {
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
@@ -758,8 +763,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
exp_state)
return 0;
- /* The resolution of the timeout is 100ms */
- msleep(100);
+ msleep(ENA_POLL_MS);
}
return -ETIME;
@@ -1130,7 +1134,7 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
comp, comp_size);
- if (unlikely(IS_ERR(comp_ctx))) {
+ if (IS_ERR(comp_ctx)) {
if (comp_ctx == ERR_PTR(-ENODEV))
pr_debug("Failed to submit command [%ld]\n",
PTR_ERR(comp_ctx));
@@ -1253,7 +1257,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- msleep(20);
+ msleep(ENA_POLL_MS);
spin_lock_irqsave(&admin_queue->q_lock, flags);
}
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 967020fb26ee..060cb18fa659 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -60,8 +60,8 @@ struct ena_stats {
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
- ENA_STAT_GLOBAL_ENTRY(io_suspend),
- ENA_STAT_GLOBAL_ENTRY(io_resume),
+ ENA_STAT_GLOBAL_ENTRY(suspend),
+ ENA_STAT_GLOBAL_ENTRY(resume),
ENA_STAT_GLOBAL_ENTRY(wd_expired),
ENA_STAT_GLOBAL_ENTRY(interface_up),
ENA_STAT_GLOBAL_ENTRY(interface_down),
@@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(doorbells),
ENA_STAT_TX_ENTRY(prepare_ctx_err),
ENA_STAT_TX_ENTRY(bad_req_id),
+ ENA_STAT_TX_ENTRY(missed_tx),
};
static const struct ena_stats ena_stats_rx_strings[] = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c6bd5e24005d..97c5a89a9cf7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -517,7 +517,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rc = ena_alloc_rx_page(rx_ring, rx_info,
- __GFP_COLD | GFP_ATOMIC | __GFP_COMP);
+ GFP_ATOMIC | __GFP_COMP);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"failed to alloc buffer for rx queue %d\n",
@@ -2361,38 +2361,6 @@ static const struct net_device_ops ena_netdev_ops = {
#endif /* CONFIG_NET_POLL_CONTROLLER */
};
-static void ena_device_io_suspend(struct work_struct *work)
-{
- struct ena_adapter *adapter =
- container_of(work, struct ena_adapter, suspend_io_task);
- struct net_device *netdev = adapter->netdev;
-
- /* ena_napi_disable_all disables only the IO handling.
- * We are still subject to AENQ keep alive watchdog.
- */
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.io_suspend++;
- u64_stats_update_begin(&adapter->syncp);
- ena_napi_disable_all(adapter);
- netif_tx_lock(netdev);
- netif_device_detach(netdev);
- netif_tx_unlock(netdev);
-}
-
-static void ena_device_io_resume(struct work_struct *work)
-{
- struct ena_adapter *adapter =
- container_of(work, struct ena_adapter, resume_io_task);
- struct net_device *netdev = adapter->netdev;
-
- u64_stats_update_begin(&adapter->syncp);
- adapter->dev_stats.io_resume++;
- u64_stats_update_end(&adapter->syncp);
-
- netif_device_attach(netdev);
- ena_napi_enable_all(adapter);
-}
-
static int ena_device_validate_params(struct ena_adapter *adapter,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -2561,38 +2529,31 @@ err_disable_msix:
return rc;
}
-static void ena_fw_reset_device(struct work_struct *work)
+static void ena_destroy_device(struct ena_adapter *adapter)
{
- struct ena_com_dev_get_features_ctx get_feat_ctx;
- struct ena_adapter *adapter =
- container_of(work, struct ena_adapter, reset_task);
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
- struct pci_dev *pdev = adapter->pdev;
- bool dev_up, wd_state;
- int rc;
-
- if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
- dev_err(&pdev->dev,
- "device reset schedule while reset bit is off\n");
- return;
- }
+ bool dev_up;
netif_carrier_off(netdev);
del_timer_sync(&adapter->timer_service);
- rtnl_lock();
-
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ adapter->dev_up_before_reset = dev_up;
+
ena_com_set_admin_running_state(ena_dev, false);
- /* After calling ena_close the tx queues and the napi
- * are disabled so no one can interfere or touch the
- * data structures
- */
ena_close(netdev);
+ /* Before releasing the ENA resources, a device reset is required.
+ * (to prevent the device from accessing them).
+ * In case the reset flag is set and the device is up, ena_close
+ * already perform the reset, so it can be skipped.
+ */
+ if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
+ ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
+
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
@@ -2606,10 +2567,19 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_com_mmio_reg_read_request_destroy(ena_dev);
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
+
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+}
- /* Finish with the destroy part. Start the init part */
+static int ena_restore_device(struct ena_adapter *adapter)
+{
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct pci_dev *pdev = adapter->pdev;
+ bool wd_state;
+ int rc;
+ set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "Can not initialize device\n");
@@ -2623,6 +2593,11 @@ static void ena_fw_reset_device(struct work_struct *work)
goto err_device_destroy;
}
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
+ /* Make sure we don't have a race with AENQ Links state handler */
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
+
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
adapter->num_queues);
if (rc) {
@@ -2630,7 +2605,7 @@ static void ena_fw_reset_device(struct work_struct *work)
goto err_device_destroy;
}
/* If the interface was up before the reset bring it up */
- if (dev_up) {
+ if (adapter->dev_up_before_reset) {
rc = ena_up(adapter);
if (rc) {
dev_err(&pdev->dev, "Failed to create I/O queues\n");
@@ -2639,24 +2614,38 @@ static void ena_fw_reset_device(struct work_struct *work)
}
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
-
- rtnl_unlock();
-
dev_err(&pdev->dev, "Device reset completed successfully\n");
- return;
+ return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_device_destroy:
ena_com_admin_destroy(ena_dev);
err:
- rtnl_unlock();
-
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
-
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
dev_err(&pdev->dev,
"Reset attempt failed. Can not reset the device\n");
+
+ return rc;
+}
+
+static void ena_fw_reset_device(struct work_struct *work)
+{
+ struct ena_adapter *adapter =
+ container_of(work, struct ena_adapter, reset_task);
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ dev_err(&pdev->dev,
+ "device reset schedule while reset bit is off\n");
+ return;
+ }
+ rtnl_lock();
+ ena_destroy_device(adapter);
+ ena_restore_device(adapter);
+ rtnl_unlock();
}
static int check_missing_comp_in_queue(struct ena_adapter *adapter,
@@ -2665,7 +2654,7 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
u32 missed_tx = 0;
- int i;
+ int i, rc = 0;
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
@@ -2679,21 +2668,25 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
tx_buf->print_once = 1;
missed_tx++;
-
- if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
- netif_err(adapter, tx_err, adapter->netdev,
- "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
- missed_tx,
- adapter->missing_tx_completion_threshold);
- adapter->reset_reason =
- ENA_REGS_RESET_MISS_TX_CMPL;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
- return -EIO;
- }
}
}
- return 0;
+ if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
+ netif_err(adapter, tx_err, adapter->netdev,
+ "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+ missed_tx,
+ adapter->missing_tx_completion_threshold);
+ adapter->reset_reason =
+ ENA_REGS_RESET_MISS_TX_CMPL;
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ rc = -EIO;
+ }
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->tx_stats.missed_tx = missed_tx;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ return rc;
}
static void check_for_missing_tx_completions(struct ena_adapter *adapter)
@@ -2866,9 +2859,9 @@ static void ena_update_host_info(struct ena_admin_host_info *host_info,
(netdev->features & GENMASK_ULL(63, 32)) >> 32;
}
-static void ena_timer_service(unsigned long data)
+static void ena_timer_service(struct timer_list *t)
{
- struct ena_adapter *adapter = (struct ena_adapter *)data;
+ struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
@@ -3276,8 +3269,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_rss;
}
- INIT_WORK(&adapter->suspend_io_task, ena_device_io_suspend);
- INIT_WORK(&adapter->resume_io_task, ena_device_io_resume);
INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies;
@@ -3287,8 +3278,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_update_hints(adapter, &get_feat_ctx.hw_hints);
- setup_timer(&adapter->timer_service, ena_timer_service,
- (unsigned long)adapter);
+ timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
@@ -3311,8 +3301,6 @@ err_free_msix:
err_worker_destroy:
ena_com_destroy_interrupt_moderation(ena_dev);
del_timer(&adapter->timer_service);
- cancel_work_sync(&adapter->suspend_io_task);
- cancel_work_sync(&adapter->resume_io_task);
err_netdev_destroy:
free_netdev(netdev);
err_device_destroy:
@@ -3382,10 +3370,6 @@ static void ena_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
- cancel_work_sync(&adapter->suspend_io_task);
-
- cancel_work_sync(&adapter->resume_io_task);
-
/* Reset the device only if the device is running. */
if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
ena_com_dev_reset(ena_dev, adapter->reset_reason);
@@ -3419,11 +3403,59 @@ static void ena_remove(struct pci_dev *pdev)
vfree(ena_dev);
}
+#ifdef CONFIG_PM
+/* ena_suspend - PM suspend callback
+ * @pdev: PCI device information struct
+ * @state:power state
+ */
+static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct ena_adapter *adapter = pci_get_drvdata(pdev);
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.suspend++;
+ u64_stats_update_end(&adapter->syncp);
+
+ rtnl_lock();
+ if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+ dev_err(&pdev->dev,
+ "ignoring device reset request as the device is being suspended\n");
+ clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ }
+ ena_destroy_device(adapter);
+ rtnl_unlock();
+ return 0;
+}
+
+/* ena_resume - PM resume callback
+ * @pdev: PCI device information struct
+ *
+ */
+static int ena_resume(struct pci_dev *pdev)
+{
+ struct ena_adapter *adapter = pci_get_drvdata(pdev);
+ int rc;
+
+ u64_stats_update_begin(&adapter->syncp);
+ adapter->dev_stats.resume++;
+ u64_stats_update_end(&adapter->syncp);
+
+ rtnl_lock();
+ rc = ena_restore_device(adapter);
+ rtnl_unlock();
+ return rc;
+}
+#endif
+
static struct pci_driver ena_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = ena_pci_tbl,
.probe = ena_probe,
.remove = ena_remove,
+#ifdef CONFIG_PM
+ .suspend = ena_suspend,
+ .resume = ena_resume,
+#endif
.sriov_configure = ena_sriov_configure,
};
@@ -3468,7 +3500,8 @@ static void ena_update_on_link_change(void *adapter_data,
if (status) {
netdev_dbg(adapter->netdev, "%s\n", __func__);
set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
- netif_carrier_on(adapter->netdev);
+ if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
} else {
clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
netif_carrier_off(adapter->netdev);
@@ -3504,16 +3537,6 @@ static void ena_notification(void *adapter_data,
ENA_ADMIN_NOTIFICATION);
switch (aenq_e->aenq_common_desc.syndrom) {
- case ENA_ADMIN_SUSPEND:
- /* Suspend just the IO queues.
- * We deliberately don't suspend admin so the timer and
- * the keep_alive events should remain.
- */
- queue_work(ena_wq, &adapter->suspend_io_task);
- break;
- case ENA_ADMIN_RESUME:
- queue_work(ena_wq, &adapter->resume_io_task);
- break;
case ENA_ADMIN_UPDATE_HINTS:
hints = (struct ena_admin_ena_hw_hints *)
(&aenq_e->inline_data_w4);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 29bb5704260b..3bbc003871de 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,7 +44,7 @@
#include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 2
+#define DRV_MODULE_VER_MINOR 3
#define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_NAME "ena"
@@ -52,7 +52,7 @@
#define DRV_MODULE_VERSION \
__stringify(DRV_MODULE_VER_MAJOR) "." \
__stringify(DRV_MODULE_VER_MINOR) "." \
- __stringify(DRV_MODULE_VER_SUBMINOR) "k"
+ __stringify(DRV_MODULE_VER_SUBMINOR) "K"
#endif
#define DEVICE_NAME "Elastic Network Adapter (ENA)"
@@ -185,6 +185,7 @@ struct ena_stats_tx {
u64 tx_poll;
u64 doorbells;
u64 bad_req_id;
+ u64 missed_tx;
};
struct ena_stats_rx {
@@ -257,8 +258,8 @@ struct ena_ring {
struct ena_stats_dev {
u64 tx_timeout;
- u64 io_suspend;
- u64 io_resume;
+ u64 suspend;
+ u64 resume;
u64 wd_expired;
u64 interface_up;
u64 interface_down;
@@ -271,7 +272,8 @@ enum ena_flags_t {
ENA_FLAG_DEV_UP,
ENA_FLAG_LINK_UP,
ENA_FLAG_MSIX_ENABLED,
- ENA_FLAG_TRIGGER_RESET
+ ENA_FLAG_TRIGGER_RESET,
+ ENA_FLAG_ONGOING_RESET
};
/* adapter specific private data structure */
@@ -326,11 +328,10 @@ struct ena_adapter {
/* timer service */
struct work_struct reset_task;
- struct work_struct suspend_io_task;
- struct work_struct resume_io_task;
struct timer_list timer_service;
bool wd_state;
+ bool dev_up_before_reset;
unsigned long last_keep_alive_jiffies;
struct u64_stats_sync syncp;
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
index e9e0be313804..741cdc392c6b 100644
--- a/drivers/net/ethernet/amd/7990.h
+++ b/drivers/net/ethernet/amd/7990.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* 7990.h -- LANCE ethernet IC generic routines.
* This is an attempt to separate out the bits of various ethernet
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index a38a2dce3eb3..45f86822a5f7 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the AMD network device drivers.
#
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index e22f976a0d18..212fe72a190b 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -123,6 +123,7 @@ struct lance_private {
int burst_sizes; /* ledma SBus burst sizes */
#endif
struct timer_list multicast_timer;
+ struct net_device *dev;
};
#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
@@ -638,6 +639,13 @@ static void lance_set_multicast(struct net_device *dev)
netif_wake_queue(dev);
}
+static void lance_set_multicast_retry(struct timer_list *t)
+{
+ struct lance_private *lp = from_timer(lp, t, multicast_timer);
+
+ lance_set_multicast(lp->dev);
+}
+
static int a2065_init_one(struct zorro_dev *z,
const struct zorro_device_id *ent);
static void a2065_remove_one(struct zorro_dev *z);
@@ -728,15 +736,13 @@ static int a2065_init_one(struct zorro_dev *z,
priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
+ priv->dev = dev;
dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = 5*HZ;
dev->dma = 0;
- init_timer(&priv->multicast_timer);
- priv->multicast_timer.data = (unsigned long) dev;
- priv->multicast_timer.function =
- (void (*)(unsigned long))lance_set_multicast;
+ timer_setup(&priv->multicast_timer, lance_set_multicast_retry, 0);
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index b11e910850f7..01d132c02ff9 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -302,10 +302,10 @@ am79c961_init_for_open(struct net_device *dev)
write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
}
-static void am79c961_timer(unsigned long data)
+static void am79c961_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct dev_priv *priv = netdev_priv(dev);
+ struct dev_priv *priv = from_timer(priv, t, timer);
+ struct net_device *dev = priv->dev;
unsigned int lnkstat, carrier;
unsigned long flags;
@@ -728,9 +728,8 @@ static int am79c961_probe(struct platform_device *pdev)
am79c961_banner();
spin_lock_init(&priv->chip_lock);
- init_timer(&priv->timer);
- priv->timer.data = (unsigned long)dev;
- priv->timer.function = am79c961_timer;
+ priv->dev = dev;
+ timer_setup(&priv->timer, am79c961_timer, 0);
if (am79c961_hw_init(dev))
goto release;
diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h
index 9f384b79507b..fc5088c70731 100644
--- a/drivers/net/ethernet/amd/am79c961a.h
+++ b/drivers/net/ethernet/amd/am79c961a.h
@@ -140,6 +140,7 @@ struct dev_priv {
unsigned long txhdr;
spinlock_t chip_lock;
struct timer_list timer;
+ struct net_device *dev;
};
#endif
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 7b5df562f30f..358f7ab77c70 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1669,9 +1669,9 @@ static int amd8111e_resume(struct pci_dev *pci_dev)
return 0;
}
-static void amd8111e_config_ipg(struct net_device *dev)
+static void amd8111e_config_ipg(struct timer_list *t)
{
- struct amd8111e_priv *lp = netdev_priv(dev);
+ struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
struct ipg_info *ipg_data = &lp->ipg_data;
void __iomem *mmio = lp->mmio;
unsigned int prev_col_cnt = ipg_data->col_cnt;
@@ -1883,9 +1883,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Initialize software ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE){
- init_timer(&lp->ipg_data.ipg_timer);
- lp->ipg_data.ipg_timer.data = (unsigned long) dev;
- lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
+ timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0);
lp->ipg_data.ipg_timer.expires = jiffies +
IPG_CONVERGE_JIFFIES;
lp->ipg_data.ipg = DEFAULT_IPG;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 82cc81385033..116997a8b593 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -260,6 +260,7 @@ struct lance_private {
unsigned short busmaster_regval;
struct timer_list multicast_timer;
+ struct net_device *dev;
/* Pointers to the ring buffers as seen from the CPU */
char *rx_buf_ptr_cpu[RX_RING_SIZE];
@@ -1000,9 +1001,10 @@ static void lance_set_multicast(struct net_device *dev)
netif_wake_queue(dev);
}
-static void lance_set_multicast_retry(unsigned long _opaque)
+static void lance_set_multicast_retry(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) _opaque;
+ struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct net_device *dev = lp->dev;
lance_set_multicast(dev);
}
@@ -1246,9 +1248,9 @@ static int dec_lance_probe(struct device *bdev, const int type)
* can occur from interrupts (ex. IPv6). So we
* use a timer to try again later when necessary. -DaveM
*/
- init_timer(&lp->multicast_timer);
- lp->multicast_timer.data = (unsigned long) dev;
- lp->multicast_timer.function = lance_set_multicast_retry;
+ lp->dev = dev;
+ timer_setup(&lp->multicast_timer, lance_set_multicast_retry, 0);
+
ret = register_netdev(dev);
if (ret) {
diff --git a/drivers/net/ethernet/amd/hplance.h b/drivers/net/ethernet/amd/hplance.h
index 04aee9e0376a..bc845a2c60c1 100644
--- a/drivers/net/ethernet/amd/hplance.h
+++ b/drivers/net/ethernet/amd/hplance.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Random defines and structures for the HP Lance driver.
* Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
* Based on the Sun Lance driver and the NetBSD HP Lance driver
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 7f60d17819ce..a561705f232c 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -321,7 +321,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *);
static void pcnet32_load_multicast(struct net_device *dev);
static void pcnet32_set_multicast_list(struct net_device *);
static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
-static void pcnet32_watchdog(struct net_device *);
+static void pcnet32_watchdog(struct timer_list *);
static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
int val);
@@ -1970,9 +1970,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
lp->options |= PCNET32_PORT_MII;
}
- init_timer(&lp->watchdog_timer);
- lp->watchdog_timer.data = (unsigned long)dev;
- lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
+ timer_setup(&lp->watchdog_timer, pcnet32_watchdog, 0);
/* The PCNET32-specific entries in the device structure. */
dev->netdev_ops = &pcnet32_netdev_ops;
@@ -2902,9 +2900,10 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
* Could possibly be changed to use mii_check_media instead.
*/
-static void pcnet32_watchdog(struct net_device *dev)
+static void pcnet32_watchdog(struct timer_list *t)
{
- struct pcnet32_private *lp = netdev_priv(dev);
+ struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer);
+ struct net_device *dev = lp->dev;
unsigned long flags;
/* Print the link status if it has changed */
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 291ca5187f12..cdd7a611479b 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1248,9 +1248,10 @@ static void lance_set_multicast(struct net_device *dev)
netif_wake_queue(dev);
}
-static void lance_set_multicast_retry(unsigned long _opaque)
+static void lance_set_multicast_retry(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) _opaque;
+ struct lance_private *lp = from_timer(lp, t, multicast_timer);
+ struct net_device *dev = lp->dev;
lance_set_multicast(dev);
}
@@ -1459,9 +1460,7 @@ no_link_test:
* can occur from interrupts (ex. IPv6). So we
* use a timer to try again later when necessary. -DaveM
*/
- init_timer(&lp->multicast_timer);
- lp->multicast_timer.data = (unsigned long) dev;
- lp->multicast_timer.function = lance_set_multicast_retry;
+ timer_setup(&lp->multicast_timer, lance_set_multicast_retry, 0);
if (register_netdev(dev)) {
printk(KERN_ERR "SunLance: Cannot register device.\n");
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 0dea8f5da899..620785ffbd51 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 45d92304068e..cc1e4f820e64 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -295,7 +295,7 @@ again:
order = alloc_order;
/* Try to obtain pages, decreasing order if necessary */
- gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
pages = alloc_pages_node(node, gfp, order);
if (pages)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 608693d11bd7..a74a8fbad53a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -642,9 +642,9 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static void xgbe_tx_timer(unsigned long data)
+static void xgbe_tx_timer(struct timer_list *t)
{
- struct xgbe_channel *channel = (struct xgbe_channel *)data;
+ struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
struct xgbe_prv_data *pdata = channel->pdata;
struct napi_struct *napi;
@@ -680,9 +680,9 @@ static void xgbe_service(struct work_struct *work)
pdata->phy_if.phy_status(pdata);
}
-static void xgbe_service_timer(unsigned long data)
+static void xgbe_service_timer(struct timer_list *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
queue_work(pdata->dev_workqueue, &pdata->service_work);
@@ -694,16 +694,14 @@ static void xgbe_init_timers(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
- setup_timer(&pdata->service_timer, xgbe_service_timer,
- (unsigned long)pdata);
+ timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
for (i = 0; i < pdata->channel_count; i++) {
channel = pdata->channel[i];
if (!channel->tx_ring)
break;
- setup_timer(&channel->tx_timer, xgbe_tx_timer,
- (unsigned long)channel);
+ timer_setup(&channel->tx_timer, xgbe_tx_timer, 0);
}
}
@@ -2208,7 +2206,7 @@ static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type,
struct tc_mqprio_qopt *mqprio = type_data;
u8 tc;
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index eac740c476ce..5a655d289dd5 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -157,7 +157,7 @@ static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
static void bmac_set_timeout(struct net_device *dev);
-static void bmac_tx_timeout(unsigned long data);
+static void bmac_tx_timeout(struct timer_list *t);
static int bmac_output(struct sk_buff *skb, struct net_device *dev);
static void bmac_start(struct net_device *dev);
@@ -555,8 +555,6 @@ static inline void bmac_set_timeout(struct net_device *dev)
if (bp->timeout_active)
del_timer(&bp->tx_timeout);
bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
- bp->tx_timeout.function = bmac_tx_timeout;
- bp->tx_timeout.data = (unsigned long) dev;
add_timer(&bp->tx_timeout);
bp->timeout_active = 1;
spin_unlock_irqrestore(&bp->lock, flags);
@@ -1321,7 +1319,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
skb_queue_head_init(bp->queue);
- init_timer(&bp->tx_timeout);
+ timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0);
ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
if (ret) {
@@ -1471,10 +1469,10 @@ bmac_output(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void bmac_tx_timeout(unsigned long data)
+static void bmac_tx_timeout(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct bmac_data *bp = netdev_priv(dev);
+ struct bmac_data *bp = from_timer(bp, t, tx_timeout);
+ struct net_device *dev = macio_get_drvdata(bp->mdev);
volatile struct dbdma_regs __iomem *td = bp->tx_dma;
volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
volatile struct dbdma_cmd *cp;
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index e58b157b7d7c..0b5429d76bcf 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -86,7 +86,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_txdma_intr(int irq, void *dev_id);
static irqreturn_t mace_rxdma_intr(int irq, void *dev_id);
static void mace_set_timeout(struct net_device *dev);
-static void mace_tx_timeout(unsigned long data);
+static void mace_tx_timeout(struct timer_list *t);
static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
static inline void mace_clean_rings(struct mace_data *mp);
static void __mace_set_address(struct net_device *dev, void *addr);
@@ -196,7 +196,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
memset((char *) mp->tx_cmds, 0,
(NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
- init_timer(&mp->tx_timeout);
+ timer_setup(&mp->tx_timeout, mace_tx_timeout, 0);
spin_lock_init(&mp->lock);
mp->timeout_active = 0;
@@ -521,8 +521,6 @@ static inline void mace_set_timeout(struct net_device *dev)
if (mp->timeout_active)
del_timer(&mp->tx_timeout);
mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
- mp->tx_timeout.function = mace_tx_timeout;
- mp->tx_timeout.data = (unsigned long) dev;
add_timer(&mp->tx_timeout);
mp->timeout_active = 1;
}
@@ -801,10 +799,10 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mace_tx_timeout(unsigned long data)
+static void mace_tx_timeout(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct mace_data *mp = netdev_priv(dev);
+ struct mace_data *mp = from_timer(mp, t, tx_timeout);
+ struct net_device *dev = macio_get_drvdata(mp->mdev);
volatile struct mace __iomem *mb = mp->mace;
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index cdf78e069a39..7d623e90dc19 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -9,7 +9,7 @@ config NET_VENDOR_AQUANTIA
Set this to y if you have an Ethernet network cards that uses the aQuantia
AQC107/AQC108 chipset.
- This option does not build any drivers; it casues the aQuantia
+ This option does not build any drivers; it causes the aQuantia
drivers that can be built to appear in the list of Ethernet drivers.
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index d5e99b468870..70efb7467bf3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -221,8 +221,8 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
return err;
}
-int aq_ethtool_get_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *coal)
+static int aq_ethtool_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
@@ -242,8 +242,8 @@ int aq_ethtool_get_coalesce(struct net_device *ndev,
return 0;
}
-int aq_ethtool_set_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *coal)
+static int aq_ethtool_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 483e97691eea..78dfb2ab78ce 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -163,9 +163,9 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
return 0;
}
-static void aq_nic_service_timer_cb(unsigned long param)
+static void aq_nic_service_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct aq_nic_s *self = from_timer(self, t, service_timer);
struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0;
unsigned int i = 0U;
@@ -201,9 +201,9 @@ err_exit:
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
}
-static void aq_nic_polling_timer_cb(unsigned long param)
+static void aq_nic_polling_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct aq_nic_s *self = from_timer(self, t, polling_timer);
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
@@ -440,14 +440,12 @@ int aq_nic_start(struct aq_nic_s *self)
err = aq_nic_update_interrupt_moderation_settings(self);
if (err)
goto err_exit;
- setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
- (unsigned long)self);
+ timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
mod_timer(&self->service_timer, jiffies +
AQ_CFG_SERVICE_TIMER_INTERVAL);
if (self->aq_nic_cfg.is_polling) {
- setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
- (unsigned long)self);
+ timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0654e0c76bc2..519ca6534b85 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -304,8 +304,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff->flags = 0U;
buff->len = AQ_CFG_RX_FRAME_MAX;
- buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
- __GFP_COMP, pages_order);
+ buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
if (!buff->page) {
err = -ENOMEM;
goto err_exit;
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index e4feb712d4f2..3c63b16d485f 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
*
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index a22403c688c9..0187dbf3b87d 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
*
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index 5cf1c65bbce9..aa3d394b87e6 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Atheros network device drivers.
#
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 8c9986f3fc01..94270f654b3b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -222,9 +222,10 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1c_phy_config(unsigned long data)
+static void atl1c_phy_config(struct timer_list *t)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
+ struct atl1c_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2613,8 +2614,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
- setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
if (err) {
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 4f7e195af0bc..9dc6da039a6d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -130,9 +130,10 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
* atl1e_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1e_phy_config(unsigned long data)
+static void atl1e_phy_config(struct timer_list *t)
{
- struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
+ struct atl1e_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1e_hw *hw = &adapter->hw;
unsigned long flags;
@@ -2361,8 +2362,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
- setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
/* get user settings */
atl1e_check_options(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 83d2db2abb45..b81fbf119bce 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2575,9 +2575,10 @@ static irqreturn_t atl1_intr(int irq, void *data)
* atl1_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1_phy_config(unsigned long data)
+static void atl1_phy_config(struct timer_list *t)
{
- struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+ struct atl1_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1_hw *hw = &adapter->hw;
unsigned long flags;
@@ -3071,8 +3072,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* assume we have no link for now */
netif_carrier_off(netdev);
- setup_timer(&adapter->phy_config_timer, atl1_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1_phy_config, 0);
adapter->phy_timer_pending = false;
INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 77a1c03255de..db4bcc51023a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1028,9 +1028,9 @@ static void atl2_tx_timeout(struct net_device *netdev)
* atl2_watchdog - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl2_watchdog(unsigned long data)
+static void atl2_watchdog(struct timer_list *t)
{
- struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+ struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
@@ -1053,9 +1053,10 @@ static void atl2_watchdog(unsigned long data)
* atl2_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl2_phy_config(unsigned long data)
+static void atl2_phy_config(struct timer_list *t)
{
- struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+ struct atl2_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
@@ -1434,11 +1435,9 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_check_options(adapter);
- setup_timer(&adapter->watchdog_timer, atl2_watchdog,
- (unsigned long)adapter);
+ timer_setup(&adapter->watchdog_timer, atl2_watchdog, 0);
- setup_timer(&adapter->phy_config_timer, atl2_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl2_phy_config, 0);
INIT_WORK(&adapter->reset_task, atl2_reset_task);
INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
index 6ec4a956e1e5..aacc3cce2cc0 100644
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NB8800_H_
#define _NB8800_H_
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 67134ece1107..af75156919ed 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -184,6 +184,7 @@ config BGMAC_PLATFORM
config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
depends on OF
+ depends on NET_DSA || !NET_DSA
select MII
select PHYLIB
select FIXED_PHY
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 79f2372c66ec..7046ad6d3d0e 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Broadcom network device drivers.
#
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index a1125d10c825..e445ab724827 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -599,9 +599,9 @@ static void b44_check_phy(struct b44 *bp)
}
}
-static void b44_timer(unsigned long __opaque)
+static void b44_timer(struct timer_list *t)
{
- struct b44 *bp = (struct b44 *) __opaque;
+ struct b44 *bp = from_timer(bp, t, timer);
spin_lock_irq(&bp->lock);
@@ -1474,10 +1474,8 @@ static int b44_open(struct net_device *dev)
goto out;
}
- init_timer(&bp->timer);
+ timer_setup(&bp->timer, b44_timer, 0);
bp->timer.expires = jiffies + HZ;
- bp->timer.data = (unsigned long) bp;
- bp->timer.function = b44_timer;
add_timer(&bp->timer);
b44_enable_ints(bp);
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 89d2cf341163..b3e36ca0fd19 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _B44_H
#define _B44_H
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 4f3845a58126..d9346e2ac720 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -295,16 +295,13 @@ static int bcm_enet_refill_rx(struct net_device *dev)
/*
* timer callback to defer refill rx queue in case we're OOM
*/
-static void bcm_enet_refill_rx_timer(unsigned long data)
+static void bcm_enet_refill_rx_timer(struct timer_list *t)
{
- struct net_device *dev;
- struct bcm_enet_priv *priv;
-
- dev = (struct net_device *)data;
- priv = netdev_priv(dev);
+ struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
+ struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
- bcm_enet_refill_rx((struct net_device *)data);
+ bcm_enet_refill_rx(dev);
spin_unlock(&priv->rx_lock);
}
@@ -1062,7 +1059,8 @@ static int bcm_enet_open(struct net_device *dev)
val = enet_readl(priv, ENET_CTL_REG);
val |= ENET_CTL_ENABLE_MASK;
enet_writel(priv, val, ENET_CTL_REG);
- enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
+ if (priv->dma_has_sram)
+ enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dmac_writel(priv, priv->dma_chan_en_mask,
ENETDMAC_CHANCFG, priv->rx_chan);
@@ -1721,10 +1719,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
const char *clk_name;
int i, ret;
- /* stop if shared driver failed, assume driver->probe will be
- * called in the same order we register devices (correct ?) */
if (!bcm_enet_shared_base[0])
- return -ENODEV;
+ return -EPROBE_DEFER;
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
@@ -1768,12 +1764,14 @@ static int bcm_enet_probe(struct platform_device *pdev)
clk_name = "enet1";
}
- priv->mac_clk = clk_get(&pdev->dev, clk_name);
+ priv->mac_clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
goto out;
}
- clk_prepare_enable(priv->mac_clk);
+ ret = clk_prepare_enable(priv->mac_clk);
+ if (ret)
+ goto out;
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1801,13 +1799,15 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
/* using internal PHY, enable clock */
- priv->phy_clk = clk_get(&pdev->dev, "ephy");
+ priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
if (IS_ERR(priv->phy_clk)) {
ret = PTR_ERR(priv->phy_clk);
priv->phy_clk = NULL;
- goto out_put_clk_mac;
+ goto out_disable_clk_mac;
}
- clk_prepare_enable(priv->phy_clk);
+ ret = clk_prepare_enable(priv->phy_clk);
+ if (ret)
+ goto out_disable_clk_mac;
}
/* do minimal hardware init to be able to probe mii bus */
@@ -1857,9 +1857,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
spin_lock_init(&priv->rx_lock);
/* init rx timeout (used for oom) */
- init_timer(&priv->rx_timeout);
- priv->rx_timeout.function = bcm_enet_refill_rx_timer;
- priv->rx_timeout.data = (unsigned long)dev;
+ timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
/* init the mib update lock&work */
mutex_init(&priv->mib_update_lock);
@@ -1901,14 +1899,10 @@ out_free_mdio:
out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
- if (priv->phy_clk) {
- clk_disable_unprepare(priv->phy_clk);
- clk_put(priv->phy_clk);
- }
+ clk_disable_unprepare(priv->phy_clk);
-out_put_clk_mac:
+out_disable_clk_mac:
clk_disable_unprepare(priv->mac_clk);
- clk_put(priv->mac_clk);
out:
free_netdev(dev);
return ret;
@@ -1944,12 +1938,8 @@ static int bcm_enet_remove(struct platform_device *pdev)
}
/* disable hw block clocks */
- if (priv->phy_clk) {
- clk_disable_unprepare(priv->phy_clk);
- clk_put(priv->phy_clk);
- }
+ clk_disable_unprepare(priv->phy_clk);
clk_disable_unprepare(priv->mac_clk);
- clk_put(priv->mac_clk);
free_netdev(dev);
return 0;
@@ -2021,9 +2011,9 @@ static inline int bcm_enet_port_is_rgmii(int portid)
/*
* enet sw PHY polling
*/
-static void swphy_poll_timer(unsigned long data)
+static void swphy_poll_timer(struct timer_list *t)
{
- struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
+ struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
unsigned int i;
for (i = 0; i < priv->num_ports; i++) {
@@ -2332,11 +2322,8 @@ static int bcm_enetsw_open(struct net_device *dev)
}
/* start phy polling timer */
- init_timer(&priv->swphy_poll);
- priv->swphy_poll.function = swphy_poll_timer;
- priv->swphy_poll.data = (unsigned long)priv;
- priv->swphy_poll.expires = jiffies;
- add_timer(&priv->swphy_poll);
+ timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
+ mod_timer(&priv->swphy_poll, jiffies);
return 0;
out:
@@ -2692,11 +2679,8 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
struct resource *res_mem;
int ret, irq_rx, irq_tx;
- /* stop if shared driver failed, assume driver->probe will be
- * called in the same order we register devices (correct ?)
- */
if (!bcm_enet_shared_base[0])
- return -ENODEV;
+ return -EPROBE_DEFER;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_rx = platform_get_irq(pdev, 0);
@@ -2735,33 +2719,27 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
if (ret)
goto out;
- if (!request_mem_region(res_mem->start, resource_size(res_mem),
- "bcm63xx_enetsw")) {
- ret = -EBUSY;
+ priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
goto out;
}
- priv->base = ioremap(res_mem->start, resource_size(res_mem));
- if (priv->base == NULL) {
- ret = -ENOMEM;
- goto out_release_mem;
- }
-
- priv->mac_clk = clk_get(&pdev->dev, "enetsw");
+ priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
- goto out_unmap;
+ goto out;
}
- clk_enable(priv->mac_clk);
+ ret = clk_prepare_enable(priv->mac_clk);
+ if (ret)
+ goto out;
priv->rx_chan = 0;
priv->tx_chan = 1;
spin_lock_init(&priv->rx_lock);
/* init rx timeout (used for oom) */
- init_timer(&priv->rx_timeout);
- priv->rx_timeout.function = bcm_enet_refill_rx_timer;
- priv->rx_timeout.data = (unsigned long)dev;
+ timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
/* register netdevice */
dev->netdev_ops = &bcm_enetsw_ops;
@@ -2773,7 +2751,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
ret = register_netdev(dev);
if (ret)
- goto out_put_clk;
+ goto out_disable_clk;
netif_carrier_off(dev);
platform_set_drvdata(pdev, dev);
@@ -2782,14 +2760,8 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
return 0;
-out_put_clk:
- clk_put(priv->mac_clk);
-
-out_unmap:
- iounmap(priv->base);
-
-out_release_mem:
- release_mem_region(res_mem->start, resource_size(res_mem));
+out_disable_clk:
+ clk_disable_unprepare(priv->mac_clk);
out:
free_netdev(dev);
return ret;
@@ -2801,17 +2773,13 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
- struct resource *res;
/* stop netdevice */
dev = platform_get_drvdata(pdev);
priv = netdev_priv(dev);
unregister_netdev(dev);
- /* release device resources */
- iounmap(priv->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
+ clk_disable_unprepare(priv->mac_clk);
free_netdev(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 0a1b7b2e55bd..5a66728d4776 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BCM63XX_ENET_H_
#define BCM63XX_ENET_H_
@@ -8,7 +9,6 @@
#include <linux/platform_device.h>
#include <bcm63xx_regs.h>
-#include <bcm63xx_irq.h>
#include <bcm63xx_io.h>
#include <bcm63xx_iudma.h>
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 83eec9a8c275..087f01b4dc3a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1416,9 +1416,24 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
- tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
+
+ /* Configure QID and port mapping */
+ reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
+ reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
+ if (ring->inspect) {
+ reg |= ring->switch_queue & RING_QID_MASK;
+ reg |= ring->switch_port << RING_PORT_ID_SHIFT;
+ } else {
+ reg |= RING_IGNORE_STATUS;
+ }
+ tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+ /* Enable ACB algorithm 2 */
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ reg |= tdma_control_bit(priv, ACB_ALGO);
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
/* Do not use tdma_control_bit() here because TSB_SWAP1 collides
* with the original definition of ACB_ALGO
*/
@@ -1447,8 +1462,9 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
napi_enable(&ring->napi);
netif_dbg(priv, hw, priv->netdev,
- "TDMA cfg, size=%d, desc_cpu=%p\n",
- ring->size, ring->desc_cpu);
+ "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
+ ring->size, ring->desc_cpu, ring->switch_queue,
+ ring->switch_port);
return 0;
}
@@ -1809,15 +1825,17 @@ static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
{
- u32 __maybe_unused reg;
+ u32 reg;
- /* Include Broadcom tag in pad extension */
+ reg = gib_readl(priv, GIB_CONTROL);
+ /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
if (netdev_uses_dsa(priv->netdev)) {
- reg = gib_readl(priv, GIB_CONTROL);
reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
- gib_writel(priv, reg, GIB_CONTROL);
}
+ reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
+ reg |= 12 << GIB_IPG_LEN_SHIFT;
+ gib_writel(priv, reg, GIB_CONTROL);
}
static int bcm_sysport_open(struct net_device *dev)
@@ -2011,6 +2029,29 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
+static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv,
+ select_queue_fallback_t fallback)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u16 queue = skb_get_queue_mapping(skb);
+ struct bcm_sysport_tx_ring *tx_ring;
+ unsigned int q, port;
+
+ if (!netdev_uses_dsa(dev))
+ return fallback(dev, skb);
+
+ /* DSA tagging layer will have configured the correct queue */
+ q = BRCM_TAG_GET_QUEUE(queue);
+ port = BRCM_TAG_GET_PORT(queue);
+ tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
+
+ if (unlikely(!tx_ring))
+ return fallback(dev, skb);
+
+ return tx_ring->index;
+}
+
static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_start_xmit = bcm_sysport_xmit,
.ndo_tx_timeout = bcm_sysport_tx_timeout,
@@ -2023,8 +2064,79 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_poll_controller = bcm_sysport_poll_controller,
#endif
.ndo_get_stats64 = bcm_sysport_get_stats64,
+ .ndo_select_queue = bcm_sysport_select_queue,
};
+static int bcm_sysport_map_queues(struct net_device *dev,
+ struct dsa_notifier_register_info *info)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *ring;
+ struct net_device *slave_dev;
+ unsigned int num_tx_queues;
+ unsigned int q, start, port;
+
+ /* We can't be setting up queue inspection for non directly attached
+ * switches
+ */
+ if (info->switch_number)
+ return 0;
+
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return 0;
+
+ port = info->port_number;
+ slave_dev = info->info.dev;
+
+ /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
+ * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
+ * per-port (slave_dev) network devices queue, we achieve just that.
+ * This need to happen now before any slave network device is used such
+ * it accurately reflects the number of real TX queues.
+ */
+ if (priv->is_lite)
+ netif_set_real_num_tx_queues(slave_dev,
+ slave_dev->num_tx_queues / 2);
+ num_tx_queues = slave_dev->real_num_tx_queues;
+
+ if (priv->per_port_num_tx_queues &&
+ priv->per_port_num_tx_queues != num_tx_queues)
+ netdev_warn(slave_dev, "asymetric number of per-port queues\n");
+
+ priv->per_port_num_tx_queues = num_tx_queues;
+
+ start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
+ for (q = 0; q < num_tx_queues; q++) {
+ ring = &priv->tx_rings[q + start];
+
+ /* Just remember the mapping actual programming done
+ * during bcm_sysport_init_tx_ring
+ */
+ ring->switch_queue = q;
+ ring->switch_port = port;
+ ring->inspect = true;
+ priv->ring_map[q + port * num_tx_queues] = ring;
+
+ /* Set all queues as being used now */
+ set_bit(q + start, &priv->queue_bitmap);
+ }
+
+ return 0;
+}
+
+static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct dsa_notifier_register_info *info;
+
+ if (event != DSA_PORT_REGISTER)
+ return NOTIFY_DONE;
+
+ info = ptr;
+
+ return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
+}
+
#define REV_FMT "v%2x.%02x"
static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
@@ -2172,10 +2284,18 @@ static int bcm_sysport_probe(struct platform_device *pdev)
u64_stats_init(&priv->syncp);
+ priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
+
+ ret = register_dsa_notifier(&priv->dsa_notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register DSA notifier\n");
+ goto err_deregister_fixed_link;
+ }
+
ret = register_netdev(dev);
if (ret) {
dev_err(&pdev->dev, "failed to register net_device\n");
- goto err_deregister_fixed_link;
+ goto err_deregister_notifier;
}
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
@@ -2188,6 +2308,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
+err_deregister_notifier:
+ unregister_dsa_notifier(&priv->dsa_notifier);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
@@ -2199,11 +2321,13 @@ err_free_netdev:
static int bcm_sysport_remove(struct platform_device *pdev)
{
struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device_node *dn = pdev->dev.of_node;
/* Not much to do, ndo_close has been called
* and we use managed allocations
*/
+ unregister_dsa_notifier(&priv->dsa_notifier);
unregister_netdev(dev);
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 82e401df199e..f5a984c1c986 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -404,7 +404,7 @@ struct bcm_rsb {
#define RING_CONS_INDEX_MASK 0xffff
#define RING_MAPPING 0x14
-#define RING_QID_MASK 0x3
+#define RING_QID_MASK 0x7
#define RING_PORT_ID_SHIFT 3
#define RING_PORT_ID_MASK 0x7
#define RING_IGNORE_STATUS (1 << 6)
@@ -712,6 +712,9 @@ struct bcm_sysport_tx_ring {
struct bcm_sysport_priv *priv; /* private context backpointer */
unsigned long packets; /* packets statistics */
unsigned long bytes; /* bytes statistics */
+ unsigned int switch_queue; /* switch port queue number */
+ unsigned int switch_port; /* switch port queue number */
+ bool inspect; /* inspect switch port and queue */
};
/* Driver private structure */
@@ -765,5 +768,12 @@ struct bcm_sysport_priv {
/* For atomic update generic 64bit value on 32bit Machine */
struct u64_stats_sync syncp;
+
+ /* map information between switch port queues and local queues */
+ struct notifier_block dsa_notifier;
+ unsigned int per_port_num_tx_queues;
+ unsigned long queue_bitmap;
+ struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
+
};
#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 6322594ab260..6fe074c1588b 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -184,13 +184,19 @@ static int bgmac_probe(struct bcma_device *core)
if (!bgmac_is_bcm4707_family(core) &&
!(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) {
+ struct phy_device *phydev;
+
mii_bus = bcma_mdio_mii_register(bgmac);
if (IS_ERR(mii_bus)) {
err = PTR_ERR(mii_bus);
goto err;
}
-
bgmac->mii_bus = mii_bus;
+
+ phydev = mdiobus_get_phy(bgmac->mii_bus, bgmac->phyaddr);
+ if (ci->id == BCMA_CHIP_ID_BCM53573 && phydev &&
+ (phydev->drv->phy_id & phydev->drv->phy_id_mask) == PHY_ID_BCM54210E)
+ phydev->dev_flags |= PHY_BRCM_EN_MASTER_MODE;
}
if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index d937083db9a4..894eda5b13cf 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -131,6 +131,7 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev)
switch (bgmac->net_dev->phydev->speed) {
default:
netdev_err(net_dev, "Unsupported speed. Defaulting to 1000Mb\n");
+ /* fall through */
case SPEED_1000:
val |= NICPM_IOMUX_CTRL_SPD_1000M << NICPM_IOMUX_CTRL_SPD_SHIFT;
break;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 48d672b204a4..1d96cd594ade 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -15,6 +15,7 @@
#include <linux/bcm47xx_nvram.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
+#include <net/dsa.h>
#include "bgmac.h"
static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
@@ -127,6 +128,8 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
dma_desc->ctl1 = cpu_to_le32(ctl1);
}
+#define ENET_BRCM_TAG_LEN 4
+
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
struct sk_buff *skb)
@@ -139,6 +142,18 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
u32 flags;
int i;
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 68 bytes
+ * (including FCS and tag) because the length verification is done after
+ * the Broadcom tag is stripped off the ingress packet.
+ */
+ if (netdev_uses_dsa(net_dev)) {
+ if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN))
+ goto err_stats;
+ }
+
if (skb->len > BGMAC_DESC_CTL1_LEN) {
netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
goto err_drop;
@@ -225,6 +240,7 @@ err_dma_head:
err_drop:
dev_kfree_skb(skb);
+err_stats:
net_dev->stats.tx_dropped++;
net_dev->stats.tx_errors++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 443d57b10264..4040d846da8e 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BGMAC_H
#define _BGMAC_H
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e3af1f3cb61f..7919f6112ecf 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6183,9 +6183,9 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
}
static void
-bnx2_timer(unsigned long data)
+bnx2_timer(struct timer_list *t)
{
- struct bnx2 *bp = (struct bnx2 *) data;
+ struct bnx2 *bp = from_timer(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -8462,10 +8462,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bnx2_set_default_link(bp);
bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
- init_timer(&bp->timer);
+ timer_setup(&bp->timer, bnx2_timer, 0);
bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
- bp->timer.data = (unsigned long) bp;
- bp->timer.function = bnx2_timer;
#ifdef BCM_CNIC
if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1216c1f1e052..4c739d5355d2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4289,7 +4289,7 @@ int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
{
struct tc_mqprio_qopt *mqprio = type_data;
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c12b4d3e946e..91e2a7560b48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -5761,9 +5761,9 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
bp->fw_drv_pulse_wr_seq);
}
-static void bnx2x_timer(unsigned long data)
+static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = (struct bnx2x *) data;
+ struct bnx2x *bp = from_timer(bp, t, timer);
if (!netif_running(bp->dev))
return;
@@ -9332,7 +9332,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
- else
+ else if (bp->slowpath)
bnx2x_set_storm_rx_mode(bp);
/* Cleanup multicast configuration */
@@ -10271,8 +10271,15 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
smp_mb();
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
- bnx2x_nic_load(bp, LOAD_NORMAL);
-
+ /* When ret value shows failure of allocation failure,
+ * the nic is rebooted again. If open still fails, a error
+ * message to notify the user.
+ */
+ if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+ if (bnx2x_nic_load(bp, LOAD_NORMAL))
+ BNX2X_ERR("Open the NIC fails again!\n");
+ }
rtnl_unlock();
return;
}
@@ -12414,10 +12421,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
- init_timer(&bp->timer);
+ timer_setup(&bp->timer, bnx2x_timer, 0);
bp->timer.expires = jiffies + bp->current_interval;
- bp->timer.data = (unsigned long) bp;
- bp->timer.function = bnx2x_timer;
if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9ca994d0bab6..3591077a5f6b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1074,11 +1074,6 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
}
}
-static int bnx2x_ari_enabled(struct pci_dev *dev)
-{
- return dev->bus->self && dev->bus->self->ari_enabled;
-}
-
static int
bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{
@@ -1212,7 +1207,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
err = -EIO;
/* verify ari is enabled */
- if (!bnx2x_ari_enabled(bp->pdev)) {
+ if (!pci_ari_enabled(bp->pdev->bus)) {
BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 4f0cb8e1ffc0..59c8ec9c1cad 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_tc.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o
+bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index dc5de275352a..c5c38d4b7d1c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -61,6 +61,7 @@
#include "bnxt_xdp.h"
#include "bnxt_vfr.h"
#include "bnxt_tc.h"
+#include "bnxt_devlink.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -107,9 +108,11 @@ enum board_idx {
BCM57452,
BCM57454,
BCM58802,
+ BCM58804,
BCM58808,
NETXTREME_E_VF,
NETXTREME_C_VF,
+ NETXTREME_S_VF,
};
/* indexed by enum above */
@@ -145,9 +148,11 @@ static const struct {
[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
+ [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -185,6 +190,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
+ { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
@@ -194,6 +200,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
};
@@ -218,7 +225,8 @@ static struct workqueue_struct *bnxt_pf_wq;
static bool bnxt_vf_pciid(enum board_idx idx)
{
- return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
+ return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
+ idx == NETXTREME_S_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -1509,7 +1517,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
(struct rx_tpa_end_cmp *)rxcmp,
(struct rx_tpa_end_cmp_ext *)rxcmp1, event);
- if (unlikely(IS_ERR(skb)))
+ if (IS_ERR(skb))
return -EBUSY;
rc = -ENOMEM;
@@ -2827,7 +2835,8 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
if (page_mode) {
if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
return -EOPNOTSUPP;
- bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
bp->dev->hw_features &= ~NETIF_F_LRO;
@@ -2835,7 +2844,7 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_dir = DMA_BIDIRECTIONAL;
bp->rx_skb_func = bnxt_rx_page_skb;
} else {
- bp->dev->max_mtu = BNXT_MAX_MTU;
+ bp->dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
@@ -4528,19 +4537,46 @@ static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
return 0;
}
-static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
- u32 buf_tmrs, u16 flags,
+static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
+ u16 val, tmr, max, flags;
+
+ max = hw_coal->bufs_per_record * 128;
+ if (hw_coal->budget)
+ max = hw_coal->bufs_per_record * hw_coal->budget;
+
+ val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
+ req->num_cmpl_aggr_int = cpu_to_le16(val);
+
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ val = min_t(u16, val, 63);
+ req->num_cmpl_dma_aggr = cpu_to_le16(val);
+
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63);
+ req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
+
+ tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks);
+ tmr = max_t(u16, tmr, 1);
+ req->int_lat_tmr_max = cpu_to_le16(tmr);
+
+ /* min timer set to 1/2 of interrupt timer */
+ val = tmr / 2;
+ req->int_lat_tmr_min = cpu_to_le16(val);
+
+ /* buf timer set to 1/4 of interrupt timer */
+ val = max_t(u16, tmr / 4, 1);
+ req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
+
+ tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq);
+ tmr = max_t(u16, tmr, 1);
+ req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
+
+ flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
req->flags = cpu_to_le16(flags);
- req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
- req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
- req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
- req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
- /* Minimum time between 2 interrupts set to buf_tmr x 2 */
- req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
- req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
- req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
}
int bnxt_hwrm_set_coal(struct bnxt *bp)
@@ -4548,51 +4584,14 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
int i, rc = 0;
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
req_tx = {0}, *req;
- u16 max_buf, max_buf_irq;
- u16 buf_tmr, buf_tmr_irq;
- u32 flags;
bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- /* Each rx completion (2 records) should be DMAed immediately.
- * DMA 1/4 of the completion buffers at a time.
- */
- max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
- /* max_buf must not be zero */
- max_buf = clamp_t(u16, max_buf, 1, 63);
- max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
- buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
- /* buf timer set to 1/4 of interrupt timer */
- buf_tmr = max_t(u16, buf_tmr / 4, 1);
- buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
- buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
-
- flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
-
- /* RING_IDLE generates more IRQs for lower latency. Enable it only
- * if coal_ticks is less than 25 us.
- */
- if (bp->rx_coal_ticks < 25)
- flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
-
- bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
- buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
-
- /* max_buf must not be zero */
- max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
- max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
- buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
- /* buf timer set to 1/4 of interrupt timer */
- buf_tmr = max_t(u16, buf_tmr / 4, 1);
- buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
- buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
-
- flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
- bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
- buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
+ bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx);
+ bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -4724,6 +4723,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
else
bp->br_mode = BRIDGE_MODE_UNDEF;
+ bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
+ if (!bp->max_mtu)
+ bp->max_mtu = BNXT_MAX_MTU;
+
func_qcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4884,9 +4887,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_upd);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
- snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
+ snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
- resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+ resp->hwrm_fw_rsvd);
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
@@ -4912,16 +4915,14 @@ hwrm_ver_get_exit:
int bnxt_hwrm_fw_set_time(struct bnxt *bp)
{
-#if IS_ENABLED(CONFIG_RTC_LIB)
struct hwrm_fw_set_time_input req = {0};
- struct rtc_time tm;
- struct timeval tv;
+ struct tm tm;
+ time64_t now = ktime_get_real_seconds();
if (bp->hwrm_spec_code < 0x10400)
return -EOPNOTSUPP;
- do_gettimeofday(&tv);
- rtc_time_to_tm(tv.tv_sec, &tm);
+ time64_to_tm(now, 0, &tm);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
req.year = cpu_to_le16(1900 + tm.tm_year);
req.month = 1 + tm.tm_mon;
@@ -4930,9 +4931,6 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp)
req.minute = tm.tm_min;
req.second = tm.tm_sec;
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-#else
- return -EOPNOTSUPP;
-#endif
}
static int bnxt_hwrm_port_qstats(struct bnxt *bp)
@@ -6964,9 +6962,9 @@ static void bnxt_poll_controller(struct net_device *dev)
}
#endif
-static void bnxt_timer(unsigned long data)
+static void bnxt_timer(struct timer_list *t)
{
- struct bnxt *bp = (struct bnxt *)data;
+ struct bnxt *bp = from_timer(bp, t, timer);
struct net_device *dev = bp->dev;
if (!netif_running(dev))
@@ -6980,6 +6978,11 @@ static void bnxt_timer(unsigned long data)
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
+
+ if (bnxt_tc_flower_enabled(bp)) {
+ set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -7070,6 +7073,10 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_get_port_module_status(bp);
mutex_unlock(&bp->link_lock);
}
+
+ if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
+ bnxt_tc_flow_stats_work(bp);
+
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
@@ -7133,6 +7140,32 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
pci_disable_device(bp->pdev);
}
+static void bnxt_init_dflt_coal(struct bnxt *bp)
+{
+ struct bnxt_coal *coal;
+
+ /* Tick values in micro seconds.
+ * 1 coal_buf x bufs_per_record = 1 completion record.
+ */
+ coal = &bp->rx_coal;
+ coal->coal_ticks = 14;
+ coal->coal_bufs = 30;
+ coal->coal_ticks_irq = 1;
+ coal->coal_bufs_irq = 2;
+ coal->idle_thresh = 25;
+ coal->bufs_per_record = 2;
+ coal->budget = 64; /* NAPI budget */
+
+ coal = &bp->tx_coal;
+ coal->coal_ticks = 28;
+ coal->coal_bufs = 30;
+ coal->coal_ticks_irq = 2;
+ coal->coal_bufs_irq = 2;
+ coal->bufs_per_record = 1;
+
+ bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
+}
+
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{
int rc;
@@ -7201,22 +7234,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
- /* tick values in micro seconds */
- bp->rx_coal_ticks = 12;
- bp->rx_coal_bufs = 30;
- bp->rx_coal_ticks_irq = 1;
- bp->rx_coal_bufs_irq = 2;
+ bnxt_init_dflt_coal(bp);
- bp->tx_coal_ticks = 25;
- bp->tx_coal_bufs = 30;
- bp->tx_coal_ticks_irq = 2;
- bp->tx_coal_bufs_irq = 2;
-
- bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
-
- init_timer(&bp->timer);
- bp->timer.data = (unsigned long)bp;
- bp->timer.function = bnxt_timer;
+ timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
clear_bit(BNXT_STATE_OPEN, &bp->state);
@@ -7243,13 +7263,13 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ if (ether_addr_equal(addr->sa_data, dev->dev_addr))
+ return 0;
+
rc = bnxt_approve_mac(bp, addr->sa_data);
if (rc)
return rc;
- if (ether_addr_equal(addr->sa_data, dev->dev_addr))
- return 0;
-
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
@@ -7321,24 +7341,49 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return 0;
}
-static int bnxt_setup_flower(struct net_device *dev,
- struct tc_cls_flower_offload *cls_flower)
+static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct bnxt *bp = cb_priv;
+
+ if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
{
struct bnxt *bp = netdev_priv(dev);
- if (BNXT_VF(bp))
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
- return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower);
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
+ bp, bp);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
- case TC_SETUP_CLSFLOWER:
- return bnxt_setup_flower(dev, type_data);
- case TC_SETUP_MQPRIO: {
+ case TC_SETUP_BLOCK:
+ return bnxt_setup_tc_block(dev, type_data);
+ case TC_SETUP_QDISC_MQPRIO: {
struct tc_mqprio_qopt *mqprio = type_data;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
@@ -7725,7 +7770,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
#endif
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
- .ndo_xdp = bnxt_xdp,
+ .ndo_bpf = bnxt_xdp,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
.ndo_get_phys_port_name = bnxt_get_phys_port_name
@@ -8064,10 +8109,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_UNICAST_FLT;
- /* MTU range: 60 - 9500 */
- dev->min_mtu = ETH_ZLEN;
- dev->max_mtu = BNXT_MAX_MTU;
-
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
mutex_init(&bp->sriov_lock);
@@ -8115,6 +8156,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_ethtool_init(bp);
bnxt_dcb_init(bp);
+ /* MTU range: 60 - FW defined max */
+ dev->min_mtu = ETH_ZLEN;
+ dev->max_mtu = bp->max_mtu;
+
rc = bnxt_probe_phy(bp);
if (rc)
goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index c911e69ff25f..5359a1f0045f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -944,6 +944,22 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
+struct bnxt_coal {
+ u16 coal_ticks;
+ u16 coal_ticks_irq;
+ u16 coal_bufs;
+ u16 coal_bufs_irq;
+ /* RING_IDLE enabled when coal ticks < idle_thresh */
+ u16 idle_thresh;
+ u8 bufs_per_record;
+ u8 budget;
+};
+
+struct bnxt_tc_flow_stats {
+ u64 packets;
+ u64 bytes;
+};
+
struct bnxt_tc_info {
bool enabled;
@@ -954,12 +970,29 @@ struct bnxt_tc_info {
/* hash table to store L2 keys of TC flows */
struct rhashtable l2_table;
struct rhashtable_params l2_ht_params;
+ /* hash table to store L2 keys for TC tunnel decap */
+ struct rhashtable decap_l2_table;
+ struct rhashtable_params decap_l2_ht_params;
+ /* hash table to store tunnel decap entries */
+ struct rhashtable decap_table;
+ struct rhashtable_params decap_ht_params;
+ /* hash table to store tunnel encap entries */
+ struct rhashtable encap_table;
+ struct rhashtable_params encap_ht_params;
/* lock to atomically add/del an l2 node when a flow is
* added or deleted.
*/
struct mutex lock;
+ /* Fields used for batching stats query */
+ struct rhashtable_iter iter;
+#define BNXT_FLOW_STATS_BATCH_MAX 10
+ struct bnxt_tc_stats_batch {
+ void *flow_node;
+ struct bnxt_tc_flow_stats hw_stats;
+ } stats_batch[BNXT_FLOW_STATS_BATCH_MAX];
+
/* Stat counter mask (width) */
u64 bytes_mask;
u64 packets_mask;
@@ -1013,6 +1046,7 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
#define CHIP_NUM_58802 0xd802
+#define CHIP_NUM_58804 0xd804
#define CHIP_NUM_58808 0xd808
#define BNXT_CHIP_NUM_5730X(chip_num) \
@@ -1048,6 +1082,7 @@ struct bnxt {
#define BNXT_CHIP_NUM_588XX(chip_num) \
((chip_num) == CHIP_NUM_58802 || \
+ (chip_num) == CHIP_NUM_58804 || \
(chip_num) == CHIP_NUM_58808)
struct net_device *dev;
@@ -1170,6 +1205,7 @@ struct bnxt {
int nr_vnics;
u32 rss_hash_cfg;
+ u16 max_mtu;
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
@@ -1232,14 +1268,8 @@ struct bnxt {
u8 port_count;
u16 br_mode;
- u16 rx_coal_ticks;
- u16 rx_coal_ticks_irq;
- u16 rx_coal_bufs;
- u16 rx_coal_bufs_irq;
- u16 tx_coal_ticks;
- u16 tx_coal_ticks_irq;
- u16 tx_coal_bufs;
- u16 tx_coal_bufs_irq;
+ struct bnxt_coal rx_coal;
+ struct bnxt_coal tx_coal;
#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
@@ -1265,6 +1295,7 @@ struct bnxt {
#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
+#define BNXT_FLOW_STATS_SP_EVENT 15
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
@@ -1315,7 +1346,7 @@ struct bnxt {
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
- struct bnxt_tc_info tc_info;
+ struct bnxt_tc_info *tc_info;
};
#define BNXT_RX_STATS_OFFSET(counter) \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
new file mode 100644
index 000000000000..402fa32f7a88
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -0,0 +1,65 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_vfr.h"
+#include "bnxt_devlink.h"
+
+static const struct devlink_ops bnxt_dl_ops = {
+#ifdef CONFIG_BNXT_SRIOV
+ .eswitch_mode_set = bnxt_dl_eswitch_mode_set,
+ .eswitch_mode_get = bnxt_dl_eswitch_mode_get,
+#endif /* CONFIG_BNXT_SRIOV */
+};
+
+int bnxt_dl_register(struct bnxt *bp)
+{
+ struct devlink *dl;
+ int rc;
+
+ if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+ return 0;
+
+ if (bp->hwrm_spec_code < 0x10803) {
+ netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
+ return -ENOTSUPP;
+ }
+
+ dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
+ if (!dl) {
+ netdev_warn(bp->dev, "devlink_alloc failed");
+ return -ENOMEM;
+ }
+
+ bnxt_link_bp_to_dl(bp, dl);
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ rc = devlink_register(dl, &bp->pdev->dev);
+ if (rc) {
+ bnxt_link_bp_to_dl(bp, NULL);
+ devlink_free(dl);
+ netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void bnxt_dl_unregister(struct bnxt *bp)
+{
+ struct devlink *dl = bp->dl;
+
+ if (!dl)
+ return;
+
+ devlink_unregister(dl);
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
new file mode 100644
index 000000000000..e92a35d8b642
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -0,0 +1,39 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_DEVLINK_H
+#define BNXT_DEVLINK_H
+
+/* Struct to hold housekeeping info needed by devlink interface */
+struct bnxt_dl {
+ struct bnxt *bp; /* back ptr to the controlling dev */
+};
+
+static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
+{
+ return ((struct bnxt_dl *)devlink_priv(dl))->bp;
+}
+
+/* To clear devlink pointer from bp, pass NULL dl */
+static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
+{
+ bp->dl = dl;
+
+ /* add a back pointer in dl to bp */
+ if (dl) {
+ struct bnxt_dl *bp_dl = devlink_priv(dl);
+
+ bp_dl->bp = bp;
+ }
+}
+
+int bnxt_dl_register(struct bnxt *bp);
+void bnxt_dl_unregister(struct bnxt *bp);
+
+#endif /* BNXT_DEVLINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 3cbe771b3352..7ce1d4b7e67d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -26,8 +26,6 @@
#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
-static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
-
static u32 bnxt_get_msglevel(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -46,19 +44,24 @@ static int bnxt_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_coal *hw_coal;
+ u16 mult;
memset(coal, 0, sizeof(*coal));
- coal->rx_coalesce_usecs = bp->rx_coal_ticks;
- /* 2 completion records per rx packet */
- coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2;
- coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq;
- coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2;
+ hw_coal = &bp->rx_coal;
+ mult = hw_coal->bufs_per_record;
+ coal->rx_coalesce_usecs = hw_coal->coal_ticks;
+ coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
+ coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
+ coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
- coal->tx_coalesce_usecs = bp->tx_coal_ticks;
- coal->tx_max_coalesced_frames = bp->tx_coal_bufs;
- coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
- coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
+ hw_coal = &bp->tx_coal;
+ mult = hw_coal->bufs_per_record;
+ coal->tx_coalesce_usecs = hw_coal->coal_ticks;
+ coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
+ coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
+ coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
@@ -70,18 +73,23 @@ static int bnxt_set_coalesce(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
bool update_stats = false;
+ struct bnxt_coal *hw_coal;
int rc = 0;
-
- bp->rx_coal_ticks = coal->rx_coalesce_usecs;
- /* 2 completion records per rx packet */
- bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2;
- bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq;
- bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
-
- bp->tx_coal_ticks = coal->tx_coalesce_usecs;
- bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
- bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
- bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
+ u16 mult;
+
+ hw_coal = &bp->rx_coal;
+ mult = hw_coal->bufs_per_record;
+ hw_coal->coal_ticks = coal->rx_coalesce_usecs;
+ hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
+ hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
+ hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
+
+ hw_coal = &bp->tx_coal;
+ mult = hw_coal->bufs_per_record;
+ hw_coal->coal_ticks = coal->tx_coalesce_usecs;
+ hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
+ hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
+ hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
u32 stats_ticks = coal->stats_block_coalesce_usecs;
@@ -822,20 +830,10 @@ static void bnxt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnxt *bp = netdev_priv(dev);
- char *pkglog;
- char *pkgver = NULL;
- pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
- if (pkglog)
- pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- if (pkgver && *pkgver != 0 && isdigit(*pkgver))
- snprintf(info->fw_version, sizeof(info->fw_version) - 1,
- "%s pkg %s", bp->fw_ver_str, pkgver);
- else
- strlcpy(info->fw_version, bp->fw_ver_str,
- sizeof(info->fw_version));
+ strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
@@ -843,7 +841,6 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->eedump_len = 0;
/* TODO CHIMP FW: reg dump details */
info->regdump_len = 0;
- kfree(pkglog);
}
static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -1350,7 +1347,6 @@ static int bnxt_firmware_reset(struct net_device *dev,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
- /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */
/* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
/* (e.g. when firmware isn't already running) */
switch (dir_type) {
@@ -1376,6 +1372,10 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNX_DIR_TYPE_BONO_PATCH:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
break;
+ case BNXT_FW_RESET_CHIP:
+ req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
+ req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
+ break;
default:
return -EINVAL;
}
@@ -1773,6 +1773,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
dma_addr_t dma_handle;
struct hwrm_nvm_read_input req = {0};
+ if (!length)
+ return -EINVAL;
+
buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
GFP_KERNEL);
if (!buf) {
@@ -2495,13 +2498,59 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
}
+static int bnxt_reset(struct net_device *dev, u32 *flags)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ if (!BNXT_PF(bp)) {
+ netdev_err(dev, "Reset is not supported from a VF\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pci_vfs_assigned(bp->pdev)) {
+ netdev_err(dev,
+ "Reset not allowed when VFs are assigned to VMs\n");
+ return -EBUSY;
+ }
+
+ if (*flags == ETH_RESET_ALL) {
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code < 0x10803)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
+ if (!rc)
+ netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+ } else {
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_selftest_qlist_input req = {0};
struct bnxt_test_info *test_info;
+ struct net_device *dev = bp->dev;
+ char *pkglog;
int i, rc;
+ pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
+ if (pkglog) {
+ char *pkgver;
+ int len;
+
+ pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
+ if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
+ len = strlen(bp->fw_ver_str);
+ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
+ "/pkg %s", pkgver);
+ }
+ kfree(pkglog);
+ }
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;
@@ -2592,4 +2641,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
+ .reset = bnxt_reset,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index f1bc90b6fb5b..ff601b42fcc8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -34,6 +34,8 @@ struct bnxt_led_cfg {
#define BNXT_LED_DFLT_ENABLES(x) \
cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+#define BNXT_FW_RESET_CHIP 0xffff
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index cb04cc76e8ad..c99f4d0880e4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -11,21 +11,21 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* HSI and HWRM Specification 1.8.1 */
+/* HSI and HWRM Specification 1.8.3 */
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 8
-#define HWRM_VERSION_UPDATE 1
+#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 4 /* non-zero means beta version */
+#define HWRM_VERSION_RSVD 1 /* non-zero means beta version */
-#define HWRM_VERSION_STR "1.8.1.4"
+#define HWRM_VERSION_STR "1.8.3.1"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
*/
#define HWRM_NA_SIGNATURE ((__le32)(-1))
#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */
+#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
@@ -111,6 +111,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
u8 opaque_v;
@@ -835,8 +836,7 @@ struct hwrm_func_qcfg_output {
u8 port_pf_cnt;
#define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
__le16 dflt_vnic_id;
- u8 unused_0;
- u8 unused_1;
+ __le16 max_mtu_configured;
__le32 min_bw;
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
@@ -873,12 +873,12 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
#define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- u8 unused_2;
+ u8 unused_0;
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
- u8 unused_3;
+ u8 unused_1;
u8 valid;
};
@@ -3407,6 +3407,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -3463,6 +3464,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL
__le32 unused_2;
u8 unused_3;
u8 unused_4;
@@ -3994,6 +3996,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_7;
__le16 dst_id;
@@ -4122,6 +4125,14 @@ struct hwrm_cfa_l2_set_rx_mask_output {
u8 valid;
};
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
+ u8 unused_0[7];
+};
+
/* hwrm_cfa_tunnel_filter_alloc */
/* Input (88 bytes) */
struct hwrm_cfa_tunnel_filter_alloc_input {
@@ -4161,6 +4172,7 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_0;
__le32 vni;
@@ -4323,6 +4335,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 pri_hint;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
@@ -4355,6 +4368,14 @@ struct hwrm_cfa_ntuple_filter_alloc_output {
u8 valid;
};
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
+ u8 unused_0[7];
+};
+
/* hwrm_cfa_ntuple_filter_free */
/* Input (24 bytes) */
struct hwrm_cfa_ntuple_filter_free_input {
@@ -4413,6 +4434,116 @@ struct hwrm_cfa_ntuple_filter_cfg_output {
u8 valid;
};
+/* hwrm_cfa_decap_filter_alloc */
+/* Input (104 bytes) */
+struct hwrm_cfa_decap_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+ __le32 enables;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ __be32 tunnel_id;
+ u8 tunnel_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 src_macaddr[6];
+ u8 unused_2;
+ u8 unused_3;
+ u8 dst_macaddr[6];
+ __be16 ovlan_vid;
+ __be16 ivlan_vid;
+ __be16 t_ovlan_vid;
+ __be16 t_ivlan_vid;
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ u8 ip_protocol;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ u8 unused_4;
+ u8 unused_5;
+ u8 unused_6[3];
+ u8 unused_7;
+ __be32 src_ipaddr[4];
+ __be32 dst_ipaddr[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __le16 dst_id;
+ __le16 l2_ctxt_ref_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_decap_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 decap_filter_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_decap_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 decap_filter_id;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_decap_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* hwrm_cfa_flow_alloc */
/* Input (128 bytes) */
struct hwrm_cfa_flow_alloc_input {
@@ -4634,6 +4765,7 @@ struct hwrm_tunnel_dst_port_query_input {
u8 tunnel_type;
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
u8 unused_0[7];
};
@@ -4662,9 +4794,10 @@ struct hwrm_tunnel_dst_port_alloc_input {
u8 tunnel_type;
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
u8 unused_0;
__be16 tunnel_dst_port_val;
- __le32 unused_1;
+ __be32 unused_1;
};
/* Output (16 bytes) */
@@ -4693,6 +4826,7 @@ struct hwrm_tunnel_dst_port_free_input {
u8 tunnel_type;
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
u8 unused_0;
__le16 tunnel_dst_port_id;
__le32 unused_1;
@@ -4848,6 +4982,8 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
u8 selfrst_status;
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
@@ -4888,6 +5024,8 @@ struct hwrm_fw_qstatus_input {
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
u8 unused_0[7];
};
@@ -5324,6 +5462,32 @@ struct hwrm_wol_reason_qcfg_output {
u8 valid;
};
+/* hwrm_dbg_read_direct */
+/* Input (32 bytes) */
+struct hwrm_dbg_read_direct_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 read_addr;
+ __le32 read_len32;
+};
+
+/* Output (16 bytes) */
+struct hwrm_dbg_read_direct_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* hwrm_nvm_read */
/* Input (40 bytes) */
struct hwrm_nvm_read_input {
@@ -5676,6 +5840,105 @@ struct hwrm_nvm_install_update_cmd_err {
u8 unused_0[7];
};
+/* hwrm_nvm_get_variable */
+/* Input (40 bytes) */
+struct hwrm_nvm_get_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ u8 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_get_variable_cmd_err {
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_variable */
+/* Input (40 bytes) */
+struct hwrm_nvm_set_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1
+ u8 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_set_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_set_variable_cmd_err {
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ u8 unused_0[7];
+};
+
/* hwrm_selftest_qlist */
/* Input (16 bytes) */
struct hwrm_selftest_qlist_input {
@@ -5686,7 +5949,7 @@ struct hwrm_selftest_qlist_input {
__le64 resp_addr;
};
-/* Output (248 bytes) */
+/* Output (280 bytes) */
struct hwrm_selftest_qlist_output {
__le16 error_code;
__le16 req_type;
@@ -5698,15 +5961,15 @@ struct hwrm_selftest_qlist_output {
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_EYE_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_EYE_TEST 0x20UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
u8 offline_tests;
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_EYE_TEST 0x10UL
- #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_EYE_TEST 0x20UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
u8 unused_0;
__le16 test_timeout;
u8 unused_1;
@@ -5719,6 +5982,11 @@ struct hwrm_selftest_qlist_output {
char test5_name[32];
char test6_name[32];
char test7_name[32];
+ __le32 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 unused_6;
+ u8 valid;
};
/* hwrm_selftest_exec */
@@ -5734,8 +6002,8 @@ struct hwrm_selftest_exec_input {
#define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_REQ_FLAGS_PCIE_EYE_TEST 0x10UL
- #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_EYE_TEST 0x20UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
u8 unused_0[7];
};
@@ -5750,16 +6018,21 @@ struct hwrm_selftest_exec_output {
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_EYE_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_EYE_TEST 0x20UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
u8 test_success;
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_EYE_TEST 0x10UL
- #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_EYE_TEST 0x20UL
- __le16 unused_0[3];
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
};
/* hwrm_selftest_irq */
@@ -5772,12 +6045,50 @@ struct hwrm_selftest_irq_input {
__le64 resp_addr;
};
-/* Output (8 bytes) */
+/* Output (16 bytes) */
struct hwrm_selftest_irq_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_selftest_retrieve_serdes_data */
+/* Input (32 bytes) */
+struct hwrm_selftest_retrieve_serdes_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 resp_data_addr;
+ __le32 resp_data_offset;
+ __le16 data_len;
+ u8 flags;
+ #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL
+ #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0
+ #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_selftest_retrieve_serdes_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 total_data_len;
+ __le16 copied_data_len;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
};
/* Hardware Resource Manager Specification */
@@ -5938,10 +6249,16 @@ struct cmd_nums {
#define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL)
#define HWRM_CFA_DECAP_FILTER_FREE (0x109UL)
#define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC (0x10bUL)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE (0x10cUL)
+ #define HWRM_CFA_PAIR_ALLOC (0x10dUL)
+ #define HWRM_CFA_PAIR_FREE (0x10eUL)
+ #define HWRM_CFA_PAIR_INFO (0x10fUL)
+ #define HWRM_FW_IPC_MSG (0x110UL)
#define HWRM_SELFTEST_QLIST (0x200UL)
#define HWRM_SELFTEST_EXEC (0x201UL)
#define HWRM_SELFTEST_IRQ (0x202UL)
- #define HWRM_SELFTEST_RETREIVE_EYE_DATA (0x203UL)
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA (0x203UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
@@ -5949,6 +6266,9 @@ struct cmd_nums {
#define HWRM_DBG_DUMP (0xff14UL)
#define HWRM_DBG_ERASE_NVM (0xff15UL)
#define HWRM_DBG_CFG (0xff16UL)
+ #define HWRM_DBG_COREDUMP_LIST (0xff17UL)
+ #define HWRM_DBG_COREDUMP_INITIATE (0xff18UL)
+ #define HWRM_DBG_COREDUMP_RETRIEVE (0xff19UL)
#define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
#define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
#define HWRM_NVM_FLUSH (0xfff0UL)
@@ -6123,6 +6443,58 @@ struct rx_port_stats {
__le64 rx_stat_err;
};
+/* VXLAN IPv4 encapsulation structure (16 bytes) */
+struct hwrm_vxlan_ipv4_hdr {
+ u8 ver_hlen;
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ u8 tos;
+ __be16 ip_id;
+ __be16 flags_frag_offset;
+ u8 ttl;
+ u8 protocol;
+ __be32 src_ip_addr;
+ __be32 dest_ip_addr;
+};
+
+/* VXLAN IPv6 encapsulation structure (32 bytes) */
+struct hwrm_vxlan_ipv6_hdr {
+ __be32 ver_tc_flow_label;
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 ttl;
+ __be32 src_ip_addr[4];
+ __be32 dest_ip_addr[4];
+};
+
+/* VXLAN encapsulation structure (72 bytes) */
+struct hwrm_cfa_encap_data_vxlan {
+ u8 src_mac_addr[6];
+ __le16 unused_0;
+ u8 dst_mac_addr[6];
+ u8 num_vlan_tags;
+ u8 unused_1;
+ __be16 ovlan_tpid;
+ __be16 ovlan_tci;
+ __be16 ivlan_tpid;
+ __be16 ivlan_tci;
+ __le32 l3[10];
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 vni;
+};
+
/* Periodic Statistics Context DMA to host (160 bytes) */
struct ctx_hw_stats {
__le64 rx_ucast_pkts;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 7dd3d131043a..d5031f436f83 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -16,6 +16,7 @@
#include <net/tc_act/tc_skbedit.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_tunnel_key.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -23,8 +24,6 @@
#include "bnxt_tc.h"
#include "bnxt_vfr.h"
-#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
-
#define BNXT_FID_INVALID 0xffff
#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
@@ -91,6 +90,23 @@ static void bnxt_tc_parse_vlan(struct bnxt *bp,
}
}
+static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
+ struct bnxt_tc_actions *actions,
+ const struct tc_action *tc_act)
+{
+ struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
+ struct ip_tunnel_key *tun_key = &tun_info->key;
+
+ if (ip_tunnel_info_af(tun_info) != AF_INET) {
+ netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
+ return -EOPNOTSUPP;
+ }
+
+ actions->tun_encap_key = *tun_key;
+ actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
+ return 0;
+}
+
static int bnxt_tc_parse_actions(struct bnxt *bp,
struct bnxt_tc_actions *actions,
struct tcf_exts *tc_exts)
@@ -125,9 +141,35 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
bnxt_tc_parse_vlan(bp, actions, tc_act);
continue;
}
+
+ /* Tunnel encap */
+ if (is_tcf_tunnel_set(tc_act)) {
+ rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
+ if (rc)
+ return rc;
+ continue;
+ }
+
+ /* Tunnel decap */
+ if (is_tcf_tunnel_release(tc_act)) {
+ actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
+ continue;
+ }
}
- return 0;
+ if (rc)
+ return rc;
+
+ /* Tunnel encap/decap action must be accompanied by a redirect action */
+ if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP ||
+ actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) &&
+ !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) {
+ netdev_info(bp->dev,
+ "error: no redir action along with encap/decap");
+ return -EINVAL;
+ }
+
+ return rc;
}
#define GET_KEY(flow_cmd, key_type) \
@@ -254,6 +296,54 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
flow->l4_mask.icmp.code = mask->code;
}
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
+
+ addr_type = key->addr_type;
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_dissector_key_ipv4_addrs *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ GET_MASK(tc_flow_cmd,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
+
+ flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
+ flow->tun_key.u.ipv4.dst = key->dst;
+ flow->tun_mask.u.ipv4.dst = mask->dst;
+ flow->tun_key.u.ipv4.src = key->src;
+ flow->tun_mask.u.ipv4.src = mask->src;
+ } else if (dissector_uses_key(dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
+ struct flow_dissector_key_keyid *mask =
+ GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
+
+ flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
+ flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
+ flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ struct flow_dissector_key_ports *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
+ struct flow_dissector_key_ports *mask =
+ GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
+
+ flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
+ flow->tun_key.tp_dst = key->dst;
+ flow->tun_mask.tp_dst = mask->dst;
+ flow->tun_key.tp_src = key->src;
+ flow->tun_mask.tp_src = mask->src;
+ }
+
return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
}
@@ -295,7 +385,8 @@ static bool is_wildcard(void *mask, int len)
}
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
- __le16 ref_flow_handle, __le16 *flow_handle)
+ __le16 ref_flow_handle,
+ __le32 tunnel_handle, __le16 *flow_handle)
{
struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_tc_actions *actions = &flow->actions;
@@ -309,6 +400,14 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
req.src_fid = cpu_to_le16(flow->src_fid);
req.ref_flow_handle = ref_flow_handle;
+
+ if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
+ actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
+ req.tunnel_handle = tunnel_handle;
+ flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
+ action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
+ }
+
req.ethertype = flow->l2_key.ether_type;
req.ip_proto = flow->l4_key.ip_proto;
@@ -405,78 +504,153 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
return rc;
}
-/* Add val to accum while handling a possible wraparound
- * of val. Eventhough val is of type u64, its actual width
- * is denoted by mask and will wrap-around beyond that width.
- */
-static void accumulate_val(u64 *accum, u64 val, u64 mask)
+static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
+ struct bnxt_tc_flow *flow,
+ struct bnxt_tc_l2_key *l2_info,
+ __le32 ref_decap_handle,
+ __le32 *decap_filter_handle)
{
-#define low_bits(x, mask) ((x) & (mask))
-#define high_bits(x, mask) ((x) & ~(mask))
- bool wrapped = val < low_bits(*accum, mask);
+ struct hwrm_cfa_decap_filter_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
+ struct ip_tunnel_key *tun_key = &flow->tun_key;
+ u32 enables = 0;
+ int rc;
- *accum = high_bits(*accum, mask) + val;
- if (wrapped)
- *accum += (mask + 1);
-}
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
-/* The HW counters' width is much less than 64bits.
- * Handle possible wrap-around while updating the stat counters
- */
-static void bnxt_flow_stats_fix_wraparound(struct bnxt_tc_info *tc_info,
- struct bnxt_tc_flow_stats *stats,
- struct bnxt_tc_flow_stats *hw_stats)
-{
- accumulate_val(&stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
- accumulate_val(&stats->packets, hw_stats->packets,
- tc_info->packets_mask);
+ req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
+ CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
+ req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
+
+ if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
+ /* tunnel_id is wrongly defined in hsi defn. as __le32 */
+ req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
+ }
+
+ if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
+ CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
+ ether_addr_copy(req.dst_macaddr, l2_info->dmac);
+ ether_addr_copy(req.src_macaddr, l2_info->smac);
+ }
+ if (l2_info->num_vlans) {
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
+ req.t_ivlan_vid = l2_info->inner_vlan_tci;
+ }
+
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
+ req.ethertype = htons(ETH_P_IP);
+
+ if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
+ CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
+ CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
+ req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
+ req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
+ req.src_ipaddr[0] = tun_key->u.ipv4.src;
+ }
+
+ if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
+ enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
+ req.dst_port = tun_key->tp_dst;
+ }
+
+ /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
+ * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
+ */
+ req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
+ req.enables = cpu_to_le32(enables);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *decap_filter_handle = resp->decap_filter_id;
+ else
+ netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
}
-/* Fix possible wraparound of the stats queried from HW, calculate
- * the delta from prev_stats, and also update the prev_stats.
- * The HW flow stats are fetched under the hwrm_cmd_lock mutex.
- * This routine is best called while under the mutex so that the
- * stats processing happens atomically.
- */
-static void bnxt_flow_stats_calc(struct bnxt_tc_info *tc_info,
- struct bnxt_tc_flow *flow,
- struct bnxt_tc_flow_stats *stats)
+static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
+ __le32 decap_filter_handle)
{
- struct bnxt_tc_flow_stats *acc_stats, *prev_stats;
+ struct hwrm_cfa_decap_filter_free_input req = { 0 };
+ int rc;
- acc_stats = &flow->stats;
- bnxt_flow_stats_fix_wraparound(tc_info, acc_stats, stats);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
+ req.decap_filter_id = decap_filter_handle;
- prev_stats = &flow->prev_stats;
- stats->bytes = acc_stats->bytes - prev_stats->bytes;
- stats->packets = acc_stats->packets - prev_stats->packets;
- *prev_stats = *acc_stats;
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ return rc;
}
-static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp,
- __le16 flow_handle,
- struct bnxt_tc_flow *flow,
- struct bnxt_tc_flow_stats *stats)
+static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
+ struct ip_tunnel_key *encap_key,
+ struct bnxt_tc_l2_key *l2_info,
+ __le32 *encap_record_handle)
{
- struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_cfa_flow_stats_input req = { 0 };
+ struct hwrm_cfa_encap_record_alloc_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_encap_record_alloc_input req = { 0 };
+ struct hwrm_cfa_encap_data_vxlan *encap =
+ (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
+ struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
+ (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
int rc;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
- req.num_flows = cpu_to_le16(1);
- req.flow_handle_0 = flow_handle;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc) {
- stats->packets = le64_to_cpu(resp->packet_0);
- stats->bytes = le64_to_cpu(resp->byte_0);
- bnxt_flow_stats_calc(&bp->tc_info, flow, stats);
- } else {
- netdev_info(bp->dev, "error rc=%d", rc);
+ req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
+
+ ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
+ ether_addr_copy(encap->src_mac_addr, l2_info->smac);
+ if (l2_info->num_vlans) {
+ encap->num_vlan_tags = l2_info->num_vlans;
+ encap->ovlan_tci = l2_info->inner_vlan_tci;
+ encap->ovlan_tpid = l2_info->inner_vlan_tpid;
}
+ encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
+ encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
+ encap_ipv4->ttl = encap_key->ttl;
+
+ encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
+ encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
+ encap_ipv4->protocol = IPPROTO_UDP;
+
+ encap->dst_port = encap_key->tp_dst;
+ encap->vni = tunnel_id_to_key32(encap_key->tun_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *encap_record_handle = resp->encap_record_id;
+ else
+ netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+static int hwrm_cfa_encap_record_free(struct bnxt *bp,
+ __le32 encap_record_handle)
+{
+ struct hwrm_cfa_encap_record_free_input req = { 0 };
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
+ req.encap_record_id = encap_record_handle;
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
return rc;
}
@@ -484,7 +658,7 @@ static int bnxt_tc_put_l2_node(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{
struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
- struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
int rc;
/* remove flow_node from the L2 shared flow list */
@@ -521,7 +695,7 @@ bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
rc = rhashtable_insert_fast(l2_table, &l2_node->node,
ht_params);
if (rc) {
- kfree(l2_node);
+ kfree_rcu(l2_node, rcu);
netdev_err(bp->dev,
"Error: %s: rhashtable_insert_fast: %d",
__func__, rc);
@@ -540,7 +714,7 @@ bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
struct bnxt_tc_flow_node *flow_node,
__le16 *ref_flow_handle)
{
- struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow_node *ref_flow_node;
struct bnxt_tc_l2_node *l2_node;
@@ -590,10 +764,386 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
return true;
}
+/* Returns the final refcount of the node on success
+ * or a -ve error code on failure
+ */
+static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
+ struct rhashtable *tunnel_table,
+ struct rhashtable_params *ht_params,
+ struct bnxt_tc_tunnel_node *tunnel_node)
+{
+ int rc;
+
+ if (--tunnel_node->refcount == 0) {
+ rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
+ *ht_params);
+ if (rc) {
+ netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
+ rc = -1;
+ }
+ kfree_rcu(tunnel_node, rcu);
+ return rc;
+ } else {
+ return tunnel_node->refcount;
+ }
+}
+
+/* Get (or add) either encap or decap tunnel node from/to the supplied
+ * hash table.
+ */
+static struct bnxt_tc_tunnel_node *
+bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
+ struct rhashtable_params *ht_params,
+ struct ip_tunnel_key *tun_key)
+{
+ struct bnxt_tc_tunnel_node *tunnel_node;
+ int rc;
+
+ tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
+ if (!tunnel_node) {
+ tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
+ if (!tunnel_node) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ tunnel_node->key = *tun_key;
+ tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
+ rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
+ *ht_params);
+ if (rc) {
+ kfree_rcu(tunnel_node, rcu);
+ goto err;
+ }
+ }
+ tunnel_node->refcount++;
+ return tunnel_node;
+err:
+ netdev_info(bp->dev, "error rc=%d", rc);
+ return NULL;
+}
+
+static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
+ struct bnxt_tc_flow *flow,
+ struct bnxt_tc_l2_key *l2_key,
+ struct bnxt_tc_flow_node *flow_node,
+ __le32 *ref_decap_handle)
+{
+ struct bnxt_tc_info *tc_info = bp->tc_info;
+ struct bnxt_tc_flow_node *ref_flow_node;
+ struct bnxt_tc_l2_node *decap_l2_node;
+
+ decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
+ tc_info->decap_l2_ht_params,
+ l2_key);
+ if (!decap_l2_node)
+ return -1;
+
+ /* If any other flow is using this decap_l2_node, use it's decap_handle
+ * as the ref_decap_handle
+ */
+ if (decap_l2_node->refcount > 0) {
+ ref_flow_node =
+ list_first_entry(&decap_l2_node->common_l2_flows,
+ struct bnxt_tc_flow_node,
+ decap_l2_list_node);
+ *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
+ } else {
+ *ref_decap_handle = INVALID_TUNNEL_HANDLE;
+ }
+
+ /* Insert the l2_node into the flow_node so that subsequent flows
+ * with a matching decap l2 key can use the decap_filter_handle of
+ * this flow as their ref_decap_handle
+ */
+ flow_node->decap_l2_node = decap_l2_node;
+ list_add(&flow_node->decap_l2_list_node,
+ &decap_l2_node->common_l2_flows);
+ decap_l2_node->refcount++;
+ return 0;
+}
+
+static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
+ struct bnxt_tc_flow_node *flow_node)
+{
+ struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
+ int rc;
+
+ /* remove flow_node from the decap L2 sharing flow list */
+ list_del(&flow_node->decap_l2_list_node);
+ if (--decap_l2_node->refcount == 0) {
+ rc = rhashtable_remove_fast(&tc_info->decap_l2_table,
+ &decap_l2_node->node,
+ tc_info->decap_l2_ht_params);
+ if (rc)
+ netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
+ kfree_rcu(decap_l2_node, rcu);
+ }
+}
+
+static void bnxt_tc_put_decap_handle(struct bnxt *bp,
+ struct bnxt_tc_flow_node *flow_node)
+{
+ __le32 decap_handle = flow_node->decap_node->tunnel_handle;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
+ int rc;
+
+ if (flow_node->decap_l2_node)
+ bnxt_tc_put_decap_l2_node(bp, flow_node);
+
+ rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
+ &tc_info->decap_ht_params,
+ flow_node->decap_node);
+ if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
+ hwrm_cfa_decap_filter_free(bp, decap_handle);
+}
+
+static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
+ struct ip_tunnel_key *tun_key,
+ struct bnxt_tc_l2_key *l2_info,
+ struct net_device *real_dst_dev)
+{
+#ifdef CONFIG_INET
+ struct flowi4 flow = { {0} };
+ struct net_device *dst_dev;
+ struct neighbour *nbr;
+ struct rtable *rt;
+ int rc;
+
+ flow.flowi4_proto = IPPROTO_UDP;
+ flow.fl4_dport = tun_key->tp_dst;
+ flow.daddr = tun_key->u.ipv4.dst;
+
+ rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
+ if (IS_ERR(rt)) {
+ netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
+ return -EOPNOTSUPP;
+ }
+
+ /* The route must either point to the real_dst_dev or a dst_dev that
+ * uses the real_dst_dev.
+ */
+ dst_dev = rt->dst.dev;
+ if (is_vlan_dev(dst_dev)) {
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
+
+ if (vlan->real_dev != real_dst_dev) {
+ netdev_info(bp->dev,
+ "dst_dev(%s) doesn't use PF-if(%s)",
+ netdev_name(dst_dev),
+ netdev_name(real_dst_dev));
+ rc = -EOPNOTSUPP;
+ goto put_rt;
+ }
+ l2_info->inner_vlan_tci = htons(vlan->vlan_id);
+ l2_info->inner_vlan_tpid = vlan->vlan_proto;
+ l2_info->num_vlans = 1;
+#endif
+ } else if (dst_dev != real_dst_dev) {
+ netdev_info(bp->dev,
+ "dst_dev(%s) for %pI4b is not PF-if(%s)",
+ netdev_name(dst_dev), &flow.daddr,
+ netdev_name(real_dst_dev));
+ rc = -EOPNOTSUPP;
+ goto put_rt;
+ }
+
+ nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
+ if (!nbr) {
+ netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
+ &flow.daddr);
+ rc = -EOPNOTSUPP;
+ goto put_rt;
+ }
+
+ tun_key->u.ipv4.src = flow.saddr;
+ tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
+ neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
+ ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
+ neigh_release(nbr);
+ ip_rt_put(rt);
+
+ return 0;
+put_rt:
+ ip_rt_put(rt);
+ return rc;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
+ struct bnxt_tc_flow_node *flow_node,
+ __le32 *decap_filter_handle)
+{
+ struct ip_tunnel_key *decap_key = &flow->tun_key;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
+ struct bnxt_tc_l2_key l2_info = { {0} };
+ struct bnxt_tc_tunnel_node *decap_node;
+ struct ip_tunnel_key tun_key = { 0 };
+ struct bnxt_tc_l2_key *decap_l2_info;
+ __le32 ref_decap_handle;
+ int rc;
+
+ /* Check if there's another flow using the same tunnel decap.
+ * If not, add this tunnel to the table and resolve the other
+ * tunnel header fileds
+ */
+ decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
+ &tc_info->decap_ht_params,
+ decap_key);
+ if (!decap_node)
+ return -ENOMEM;
+
+ flow_node->decap_node = decap_node;
+
+ if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
+ goto done;
+
+ /* Resolve the L2 fields for tunnel decap
+ * Resolve the route for remote vtep (saddr) of the decap key
+ * Find it's next-hop mac addrs
+ */
+ tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
+ tun_key.tp_dst = flow->tun_key.tp_dst;
+ rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev);
+ if (rc)
+ goto put_decap;
+
+ decap_key->ttl = tun_key.ttl;
+ decap_l2_info = &decap_node->l2_info;
+ ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
+ ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
+ if (l2_info.num_vlans) {
+ decap_l2_info->num_vlans = l2_info.num_vlans;
+ decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
+ decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
+ }
+ flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
+
+ /* For getting a decap_filter_handle we first need to check if
+ * there are any other decap flows that share the same tunnel L2
+ * key and if so, pass that flow's decap_filter_handle as the
+ * ref_decap_handle for this flow.
+ */
+ rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
+ &ref_decap_handle);
+ if (rc)
+ goto put_decap;
+
+ /* Issue the hwrm cmd to allocate a decap filter handle */
+ rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
+ ref_decap_handle,
+ &decap_node->tunnel_handle);
+ if (rc)
+ goto put_decap_l2;
+
+done:
+ *decap_filter_handle = decap_node->tunnel_handle;
+ return 0;
+
+put_decap_l2:
+ bnxt_tc_put_decap_l2_node(bp, flow_node);
+put_decap:
+ bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
+ &tc_info->decap_ht_params,
+ flow_node->decap_node);
+ return rc;
+}
+
+static void bnxt_tc_put_encap_handle(struct bnxt *bp,
+ struct bnxt_tc_tunnel_node *encap_node)
+{
+ __le32 encap_handle = encap_node->tunnel_handle;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
+ int rc;
+
+ rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
+ &tc_info->encap_ht_params, encap_node);
+ if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
+ hwrm_cfa_encap_record_free(bp, encap_handle);
+}
+
+/* Lookup the tunnel encap table and check if there's an encap_handle
+ * alloc'd already.
+ * If not, query L2 info via a route lookup and issue an encap_record_alloc
+ * cmd to FW.
+ */
+static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
+ struct bnxt_tc_flow_node *flow_node,
+ __le32 *encap_handle)
+{
+ struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
+ struct bnxt_tc_tunnel_node *encap_node;
+ int rc;
+
+ /* Check if there's another flow using the same tunnel encap.
+ * If not, add this tunnel to the table and resolve the other
+ * tunnel header fileds
+ */
+ encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
+ &tc_info->encap_ht_params,
+ encap_key);
+ if (!encap_node)
+ return -ENOMEM;
+
+ flow_node->encap_node = encap_node;
+
+ if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
+ goto done;
+
+ rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info,
+ flow->actions.dst_dev);
+ if (rc)
+ goto put_encap;
+
+ /* Allocate a new tunnel encap record */
+ rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
+ &encap_node->tunnel_handle);
+ if (rc)
+ goto put_encap;
+
+done:
+ *encap_handle = encap_node->tunnel_handle;
+ return 0;
+
+put_encap:
+ bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
+ &tc_info->encap_ht_params, encap_node);
+ return rc;
+}
+
+static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
+ struct bnxt_tc_flow *flow,
+ struct bnxt_tc_flow_node *flow_node)
+{
+ if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
+ bnxt_tc_put_decap_handle(bp, flow_node);
+ else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
+ bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
+}
+
+static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
+ struct bnxt_tc_flow *flow,
+ struct bnxt_tc_flow_node *flow_node,
+ __le32 *tunnel_handle)
+{
+ if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
+ return bnxt_tc_get_decap_handle(bp, flow, flow_node,
+ tunnel_handle);
+ else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
+ return bnxt_tc_get_encap_handle(bp, flow, flow_node,
+ tunnel_handle);
+ else
+ return 0;
+}
static int __bnxt_tc_del_flow(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{
- struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
int rc;
/* send HWRM cmd to free the flow-id */
@@ -601,6 +1151,9 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
mutex_lock(&tc_info->lock);
+ /* release references to any tunnel encap/decap nodes */
+ bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
+
/* release reference to l2 node */
bnxt_tc_put_l2_node(bp, flow_node);
@@ -633,8 +1186,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *tc_flow_cmd)
{
struct bnxt_tc_flow_node *new_node, *old_node;
- struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow *flow;
+ __le32 tunnel_handle = 0;
__le16 ref_flow_handle;
int rc;
@@ -672,12 +1226,19 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
if (rc)
goto unlock;
+ /* If the flow involves tunnel encap/decap, get tunnel_handle */
+ rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
+ if (rc)
+ goto put_l2;
+
/* send HWRM cmd to alloc the flow */
rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
- &new_node->flow_handle);
+ tunnel_handle, &new_node->flow_handle);
if (rc)
- goto put_l2;
+ goto put_tunnel;
+ flow->lastused = jiffies;
+ spin_lock_init(&flow->stats_lock);
/* add new flow to flow-table */
rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
tc_info->flow_ht_params);
@@ -689,12 +1250,14 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
hwrm_flow_free:
bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
+put_tunnel:
+ bnxt_tc_put_tunnel_handle(bp, flow, new_node);
put_l2:
bnxt_tc_put_l2_node(bp, new_node);
unlock:
mutex_unlock(&tc_info->lock);
free_node:
- kfree(new_node);
+ kfree_rcu(new_node, rcu);
done:
netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
__func__, tc_flow_cmd->cookie, rc);
@@ -704,7 +1267,7 @@ done:
static int bnxt_tc_del_flow(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd)
{
- struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow_node *flow_node;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
@@ -722,10 +1285,11 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
static int bnxt_tc_get_flow_stats(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd)
{
- struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
struct bnxt_tc_flow_node *flow_node;
- struct bnxt_tc_flow_stats stats;
- int rc;
+ struct bnxt_tc_flow *flow;
+ unsigned long lastused;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
@@ -736,22 +1300,189 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
return -1;
}
- rc = bnxt_hwrm_cfa_flow_stats_get(bp, flow_node->flow_handle,
- &flow_node->flow, &stats);
+ flow = &flow_node->flow;
+ curr_stats = &flow->stats;
+ prev_stats = &flow->prev_stats;
+
+ spin_lock(&flow->stats_lock);
+ stats.packets = curr_stats->packets - prev_stats->packets;
+ stats.bytes = curr_stats->bytes - prev_stats->bytes;
+ *prev_stats = *curr_stats;
+ lastused = flow->lastused;
+ spin_unlock(&flow->stats_lock);
+
+ tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
+ lastused);
+ return 0;
+}
+
+static int
+bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
+ struct bnxt_tc_stats_batch stats_batch[])
+{
+ struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_flow_stats_input req = { 0 };
+ __le16 *req_flow_handles = &req.flow_handle_0;
+ int rc, i;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
+ req.num_flows = cpu_to_le16(num_flows);
+ for (i = 0; i < num_flows; i++) {
+ struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
+
+ req_flow_handles[i] = flow_node->flow_handle;
+ }
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ __le64 *resp_packets = &resp->packet_0;
+ __le64 *resp_bytes = &resp->byte_0;
+
+ for (i = 0; i < num_flows; i++) {
+ stats_batch[i].hw_stats.packets =
+ le64_to_cpu(resp_packets[i]);
+ stats_batch[i].hw_stats.bytes =
+ le64_to_cpu(resp_bytes[i]);
+ }
+ } else {
+ netdev_info(bp->dev, "error rc=%d", rc);
+ }
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+/* Add val to accum while handling a possible wraparound
+ * of val. Eventhough val is of type u64, its actual width
+ * is denoted by mask and will wrap-around beyond that width.
+ */
+static void accumulate_val(u64 *accum, u64 val, u64 mask)
+{
+#define low_bits(x, mask) ((x) & (mask))
+#define high_bits(x, mask) ((x) & ~(mask))
+ bool wrapped = val < low_bits(*accum, mask);
+
+ *accum = high_bits(*accum, mask) + val;
+ if (wrapped)
+ *accum += (mask + 1);
+}
+
+/* The HW counters' width is much less than 64bits.
+ * Handle possible wrap-around while updating the stat counters
+ */
+static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
+ struct bnxt_tc_flow_stats *acc_stats,
+ struct bnxt_tc_flow_stats *hw_stats)
+{
+ accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
+ accumulate_val(&acc_stats->packets, hw_stats->packets,
+ tc_info->packets_mask);
+}
+
+static int
+bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
+ struct bnxt_tc_stats_batch stats_batch[])
+{
+ struct bnxt_tc_info *tc_info = bp->tc_info;
+ int rc, i;
+
+ rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
if (rc)
return rc;
- tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 0);
+ for (i = 0; i < num_flows; i++) {
+ struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
+ struct bnxt_tc_flow *flow = &flow_node->flow;
+
+ spin_lock(&flow->stats_lock);
+ bnxt_flow_stats_accum(tc_info, &flow->stats,
+ &stats_batch[i].hw_stats);
+ if (flow->stats.packets != flow->prev_stats.packets)
+ flow->lastused = jiffies;
+ spin_unlock(&flow->stats_lock);
+ }
+
return 0;
}
+static int
+bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
+ struct bnxt_tc_stats_batch stats_batch[],
+ int *num_flows)
+{
+ struct bnxt_tc_info *tc_info = bp->tc_info;
+ struct rhashtable_iter *iter = &tc_info->iter;
+ void *flow_node;
+ int rc, i;
+
+ rc = rhashtable_walk_start(iter);
+ if (rc && rc != -EAGAIN) {
+ i = 0;
+ goto done;
+ }
+
+ rc = 0;
+ for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
+ flow_node = rhashtable_walk_next(iter);
+ if (IS_ERR(flow_node)) {
+ i = 0;
+ if (PTR_ERR(flow_node) == -EAGAIN) {
+ continue;
+ } else {
+ rc = PTR_ERR(flow_node);
+ goto done;
+ }
+ }
+
+ /* No more flows */
+ if (!flow_node)
+ goto done;
+
+ stats_batch[i].flow_node = flow_node;
+ }
+done:
+ rhashtable_walk_stop(iter);
+ *num_flows = i;
+ return rc;
+}
+
+void bnxt_tc_flow_stats_work(struct bnxt *bp)
+{
+ struct bnxt_tc_info *tc_info = bp->tc_info;
+ int num_flows, rc;
+
+ num_flows = atomic_read(&tc_info->flow_table.nelems);
+ if (!num_flows)
+ return;
+
+ rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
+
+ for (;;) {
+ rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
+ &num_flows);
+ if (rc) {
+ if (rc == -EAGAIN)
+ continue;
+ break;
+ }
+
+ if (!num_flows)
+ break;
+
+ bnxt_tc_flow_stats_batch_update(bp, num_flows,
+ tc_info->stats_batch);
+ }
+
+ rhashtable_walk_exit(&tc_info->iter);
+}
+
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower)
{
int rc = 0;
- if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
- cls_flower->common.chain_index)
+ if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
switch (cls_flower->command) {
@@ -784,19 +1515,37 @@ static const struct rhashtable_params bnxt_tc_l2_ht_params = {
.automatic_shrinking = true
};
+static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
+ .head_offset = offsetof(struct bnxt_tc_l2_node, node),
+ .key_offset = offsetof(struct bnxt_tc_l2_node, key),
+ .key_len = BNXT_TC_L2_KEY_LEN,
+ .automatic_shrinking = true
+};
+
+static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
+ .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
+ .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
+ .key_len = sizeof(struct ip_tunnel_key),
+ .automatic_shrinking = true
+};
+
/* convert counter width in bits to a mask */
#define mask(width) ((u64)~0 >> (64 - (width)))
int bnxt_init_tc(struct bnxt *bp)
{
- struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_info *tc_info;
int rc;
- if (bp->hwrm_spec_code < 0x10800) {
+ if (bp->hwrm_spec_code < 0x10803) {
netdev_warn(bp->dev,
"Firmware does not support TC flower offload.\n");
return -ENOTSUPP;
}
+
+ tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
+ if (!tc_info)
+ return -ENOMEM;
mutex_init(&tc_info->lock);
/* Counter widths are programmed by FW */
@@ -806,33 +1555,62 @@ int bnxt_init_tc(struct bnxt *bp)
tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
if (rc)
- return rc;
+ goto free_tc_info;
tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
if (rc)
goto destroy_flow_table;
+ tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
+ rc = rhashtable_init(&tc_info->decap_l2_table,
+ &tc_info->decap_l2_ht_params);
+ if (rc)
+ goto destroy_l2_table;
+
+ tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
+ rc = rhashtable_init(&tc_info->decap_table,
+ &tc_info->decap_ht_params);
+ if (rc)
+ goto destroy_decap_l2_table;
+
+ tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
+ rc = rhashtable_init(&tc_info->encap_table,
+ &tc_info->encap_ht_params);
+ if (rc)
+ goto destroy_decap_table;
+
tc_info->enabled = true;
bp->dev->hw_features |= NETIF_F_HW_TC;
bp->dev->features |= NETIF_F_HW_TC;
+ bp->tc_info = tc_info;
return 0;
+destroy_decap_table:
+ rhashtable_destroy(&tc_info->decap_table);
+destroy_decap_l2_table:
+ rhashtable_destroy(&tc_info->decap_l2_table);
+destroy_l2_table:
+ rhashtable_destroy(&tc_info->l2_table);
destroy_flow_table:
rhashtable_destroy(&tc_info->flow_table);
+free_tc_info:
+ kfree(tc_info);
return rc;
}
void bnxt_shutdown_tc(struct bnxt *bp)
{
- struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_info *tc_info = bp->tc_info;
- if (!tc_info->enabled)
+ if (!bnxt_tc_flower_enabled(bp))
return;
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
+ rhashtable_destroy(&tc_info->decap_l2_table);
+ rhashtable_destroy(&tc_info->decap_table);
+ rhashtable_destroy(&tc_info->encap_table);
+ kfree(tc_info);
+ bp->tc_info = NULL;
}
-
-#else
-#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index 6c4c1ed279ef..97e09a880693 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -12,6 +12,8 @@
#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
+#include <net/ip_tunnels.h>
+
/* Structs used for storing the filter/actions of the TC cmd.
*/
struct bnxt_tc_l2_key {
@@ -50,6 +52,13 @@ struct bnxt_tc_l4_key {
};
};
+struct bnxt_tc_tunnel_key {
+ struct bnxt_tc_l2_key l2;
+ struct bnxt_tc_l3_key l3;
+ struct bnxt_tc_l4_key l4;
+ __be32 id;
+};
+
struct bnxt_tc_actions {
u32 flags;
#define BNXT_TC_ACTION_FLAG_FWD BIT(0)
@@ -57,16 +66,16 @@ struct bnxt_tc_actions {
#define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3)
#define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4)
#define BNXT_TC_ACTION_FLAG_DROP BIT(5)
+#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6)
+#define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7)
u16 dst_fid;
struct net_device *dst_dev;
__be16 push_vlan_tpid;
__be16 push_vlan_tci;
-};
-struct bnxt_tc_flow_stats {
- u64 packets;
- u64 bytes;
+ /* tunnel encap */
+ struct ip_tunnel_key tun_encap_key;
};
struct bnxt_tc_flow {
@@ -76,6 +85,16 @@ struct bnxt_tc_flow {
#define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3)
#define BNXT_TC_FLOW_FLAGS_PORTS BIT(4)
#define BNXT_TC_FLOW_FLAGS_ICMP BIT(5)
+#define BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS BIT(6)
+#define BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS BIT(7)
+#define BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS BIT(8)
+#define BNXT_TC_FLOW_FLAGS_TUNL_PORTS BIT(9)
+#define BNXT_TC_FLOW_FLAGS_TUNL_ID BIT(10)
+#define BNXT_TC_FLOW_FLAGS_TUNNEL (BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS | \
+ BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS | \
+ BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS |\
+ BNXT_TC_FLOW_FLAGS_TUNL_PORTS |\
+ BNXT_TC_FLOW_FLAGS_TUNL_ID)
/* flow applicable to pkts ingressing on this fid */
u16 src_fid;
@@ -85,6 +104,8 @@ struct bnxt_tc_flow {
struct bnxt_tc_l3_key l3_mask;
struct bnxt_tc_l4_key l4_key;
struct bnxt_tc_l4_key l4_mask;
+ struct ip_tunnel_key tun_key;
+ struct ip_tunnel_key tun_mask;
struct bnxt_tc_actions actions;
@@ -93,13 +114,39 @@ struct bnxt_tc_flow {
/* previous snap-shot of stats */
struct bnxt_tc_flow_stats prev_stats;
unsigned long lastused; /* jiffies */
+ /* for calculating delta from prev_stats and
+ * updating prev_stats atomically.
+ */
+ spinlock_t stats_lock;
+};
+
+/* Tunnel encap/decap hash table
+ * This table is used to maintain a list of flows that use
+ * the same tunnel encap/decap params (ip_daddrs, vni, udp_dport)
+ * and the FW returned handle.
+ * A separate table is maintained for encap and decap
+ */
+struct bnxt_tc_tunnel_node {
+ struct ip_tunnel_key key;
+ struct rhash_head node;
+
+ /* tunnel l2 info */
+ struct bnxt_tc_l2_key l2_info;
+
+#define INVALID_TUNNEL_HANDLE cpu_to_le32(0xffffffff)
+ /* tunnel handle returned by FW */
+ __le32 tunnel_handle;
+
+ u32 refcount;
+ struct rcu_head rcu;
};
/* L2 hash table
- * This data-struct is used for L2-flow table.
- * The L2 part of a flow is stored in a hash table.
+ * The same data-struct is used for L2-flow table and L2-tunnel table.
+ * The L2 part of a flow or tunnel is stored in a hash table.
* A flow that shares the same L2 key/mask with an
- * already existing flow must refer to it's flow handle.
+ * already existing flow/tunnel must refer to it's flow handle or
+ * decap_filter_id respectively.
*/
struct bnxt_tc_l2_node {
/* hash key: first 16b of key */
@@ -110,7 +157,7 @@ struct bnxt_tc_l2_node {
/* a linked list of flows that share the same l2 key */
struct list_head common_l2_flows;
- /* number of flows sharing the l2 key */
+ /* number of flows/tunnels sharing the l2 key */
u16 refcount;
struct rcu_head rcu;
@@ -130,6 +177,16 @@ struct bnxt_tc_flow_node {
/* for the shared_flows list maintained in l2_node */
struct list_head l2_list_node;
+ /* tunnel encap related */
+ struct bnxt_tc_tunnel_node *encap_node;
+
+ /* tunnel decap related */
+ struct bnxt_tc_tunnel_node *decap_node;
+ /* L2 node in tunnel-l2 hashtable that shares flow's tunnel l2 key */
+ struct bnxt_tc_l2_node *decap_l2_node;
+ /* for the shared_flows list maintained in tunnel decap l2_node */
+ struct list_head decap_l2_list_node;
+
struct rcu_head rcu;
};
@@ -137,6 +194,12 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower);
int bnxt_init_tc(struct bnxt *bp);
void bnxt_shutdown_tc(struct bnxt *bp);
+void bnxt_tc_flow_stats_work(struct bnxt *bp);
+
+static inline bool bnxt_tc_flower_enabled(struct bnxt *bp)
+{
+ return bp->tc_info && bp->tc_info->enabled;
+}
#else /* CONFIG_BNXT_FLOWER_OFFLOAD */
@@ -154,5 +217,14 @@ static inline int bnxt_init_tc(struct bnxt *bp)
static inline void bnxt_shutdown_tc(struct bnxt *bp)
{
}
+
+static inline void bnxt_tc_flow_stats_work(struct bnxt *bp)
+{
+}
+
+static inline bool bnxt_tc_flower_enabled(struct bnxt *bp)
+{
+ return false;
+}
#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */
#endif /* BNXT_TC_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index e75db04c6cdc..69186d188c43 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -16,6 +16,7 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_vfr.h"
+#include "bnxt_devlink.h"
#include "bnxt_tc.h"
#ifdef CONFIG_BNXT_SRIOV
@@ -115,13 +116,17 @@ bnxt_vf_rep_get_stats64(struct net_device *dev,
stats->tx_bytes = vf_rep->tx_stats.bytes;
}
-static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data,
+ void *cb_priv)
{
- struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+ struct bnxt_vf_rep *vf_rep = cb_priv;
struct bnxt *bp = vf_rep->bp;
int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
+ if (!bnxt_tc_flower_enabled(vf_rep->bp) || !tc_can_offload(bp->dev))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return bnxt_tc_setup_flower(bp, vf_fid, type_data);
@@ -130,6 +135,39 @@ static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ bnxt_vf_rep_setup_tc_block_cb,
+ vf_rep, vf_rep);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ bnxt_vf_rep_setup_tc_block_cb, vf_rep);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return bnxt_vf_rep_setup_tc_block(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
{
u16 vf_idx;
@@ -416,7 +454,7 @@ err:
}
/* Devlink related routines */
-static int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
@@ -424,7 +462,7 @@ static int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return 0;
}
-static int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
int rc = 0;
@@ -462,52 +500,4 @@ done:
return rc;
}
-static const struct devlink_ops bnxt_dl_ops = {
- .eswitch_mode_set = bnxt_dl_eswitch_mode_set,
- .eswitch_mode_get = bnxt_dl_eswitch_mode_get
-};
-
-int bnxt_dl_register(struct bnxt *bp)
-{
- struct devlink *dl;
- int rc;
-
- if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
- return 0;
-
- if (bp->hwrm_spec_code < 0x10800) {
- netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
- return -ENOTSUPP;
- }
-
- dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
- if (!dl) {
- netdev_warn(bp->dev, "devlink_alloc failed");
- return -ENOMEM;
- }
-
- bnxt_link_bp_to_dl(bp, dl);
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- rc = devlink_register(dl, &bp->pdev->dev);
- if (rc) {
- bnxt_link_bp_to_dl(bp, NULL);
- devlink_free(dl);
- netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
- return rc;
- }
-
- return 0;
-}
-
-void bnxt_dl_unregister(struct bnxt *bp)
-{
- struct devlink *dl = bp->dl;
-
- if (!dl)
- return;
-
- devlink_unregister(dl);
- devlink_free(dl);
-}
-
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index 7787cd24606a..fb06bbe70e42 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -14,31 +14,6 @@
#define MAX_CFA_CODE 65536
-/* Struct to hold housekeeping info needed by devlink interface */
-struct bnxt_dl {
- struct bnxt *bp; /* back ptr to the controlling dev */
-};
-
-static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
-{
- return ((struct bnxt_dl *)devlink_priv(dl))->bp;
-}
-
-/* To clear devlink pointer from bp, pass NULL dl */
-static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
-{
- bp->dl = dl;
-
- /* add a back pointer in dl to bp */
- if (dl) {
- struct bnxt_dl *bp_dl = devlink_priv(dl);
-
- bp_dl->bp = bp;
- }
-}
-
-int bnxt_dl_register(struct bnxt *bp);
-void bnxt_dl_unregister(struct bnxt *bp);
void bnxt_vf_reps_destroy(struct bnxt *bp);
void bnxt_vf_reps_close(struct bnxt *bp);
void bnxt_vf_reps_open(struct bnxt *bp);
@@ -53,16 +28,10 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
return bp->pf.vf[vf_rep->vf_idx].fw_fid;
}
-#else
-
-static inline int bnxt_dl_register(struct bnxt *bp)
-{
- return 0;
-}
+int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode);
-static inline void bnxt_dl_unregister(struct bnxt *bp)
-{
-}
+#else
static inline void bnxt_vf_reps_close(struct bnxt *bp)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index d8f0c837b72c..261e5847557a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -94,6 +94,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
xdp.data_hard_start = *data_ptr - offset;
xdp.data = *data_ptr;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = *data_ptr + *len;
orig_data = xdp.data;
mapping = rx_buf->mapping - bp->rx_dma_offset;
@@ -207,7 +208,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
return 0;
}
-int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 12a5ad66b564..414b748038ca 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -16,6 +16,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len,
u8 *event);
-int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp);
+int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 9cebca896913..24b4f4ceceef 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -488,15 +488,13 @@ static void bcmgenet_complete(struct net_device *dev)
static int bcmgenet_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- phy_ethtool_ksettings_get(priv->phydev, cmd);
+ phy_ethtool_ksettings_get(dev->phydev, cmd);
return 0;
}
@@ -504,15 +502,13 @@ static int bcmgenet_get_link_ksettings(struct net_device *dev,
static int bcmgenet_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_ksettings_set(priv->phydev, cmd);
+ return phy_ethtool_ksettings_set(dev->phydev, cmd);
}
static int bcmgenet_set_rx_csum(struct net_device *dev,
@@ -1042,11 +1038,14 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
+ if (!dev->phydev)
+ return -ENODEV;
+
e->eee_enabled = p->eee_enabled;
e->eee_active = p->eee_active;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
- return phy_ethtool_get_eee(priv->phydev, e);
+ return phy_ethtool_get_eee(dev->phydev, e);
}
static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
@@ -1058,12 +1057,15 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
+ if (!dev->phydev)
+ return -ENODEV;
+
p->eee_enabled = e->eee_enabled;
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false);
} else {
- ret = phy_init_eee(priv->phydev, 0);
+ ret = phy_init_eee(dev->phydev, 0);
if (ret) {
netif_err(priv, hw, dev, "EEE initialization failed\n");
return ret;
@@ -1073,7 +1075,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
bcmgenet_eee_enable_set(dev, true);
}
- return phy_ethtool_set_eee(priv->phydev, e);
+ return phy_ethtool_set_eee(dev->phydev, e);
}
/* standard ethtool support functions. */
@@ -1107,7 +1109,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_CABLE_SENSE:
- phy_detach(priv->phydev);
+ phy_detach(priv->dev->phydev);
break;
case GENET_POWER_WOL_MAGIC:
@@ -1172,7 +1174,6 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
bcmgenet_phy_power_set(priv->dev, true);
- bcmgenet_mii_reset(priv->dev);
break;
case GENET_POWER_CABLE_SENSE:
@@ -1193,15 +1194,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
/* ioctl handle special commands that are not present in ethtool. */
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1405,11 +1404,10 @@ static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring)
{
unsigned int released;
- unsigned long flags;
- spin_lock_irqsave(&ring->lock, flags);
+ spin_lock_bh(&ring->lock);
released = __bcmgenet_tx_reclaim(dev, ring);
- spin_unlock_irqrestore(&ring->lock, flags);
+ spin_unlock_bh(&ring->lock);
return released;
}
@@ -1420,15 +1418,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
container_of(napi, struct bcmgenet_tx_ring, napi);
unsigned int work_done = 0;
struct netdev_queue *txq;
- unsigned long flags;
- spin_lock_irqsave(&ring->lock, flags);
+ spin_lock(&ring->lock);
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
netif_tx_wake_queue(txq);
}
- spin_unlock_irqrestore(&ring->lock, flags);
+ spin_unlock(&ring->lock);
if (work_done == 0) {
napi_complete(napi);
@@ -1523,7 +1520,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
struct bcmgenet_tx_ring *ring = NULL;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
- unsigned long flags = 0;
int nr_frags, index;
dma_addr_t mapping;
unsigned int size;
@@ -1550,7 +1546,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
nr_frags = skb_shinfo(skb)->nr_frags;
- spin_lock_irqsave(&ring->lock, flags);
+ spin_lock(&ring->lock);
if (ring->free_bds <= (nr_frags + 1)) {
if (!netif_tx_queue_stopped(txq)) {
netif_tx_stop_queue(txq);
@@ -1584,8 +1580,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i <= nr_frags; i++) {
tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
- if (unlikely(!tx_cb_ptr))
- BUG();
+ BUG_ON(!tx_cb_ptr);
if (!i) {
/* Transmit single SKB or head of fragment list */
@@ -1645,7 +1640,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);
out:
- spin_unlock_irqrestore(&ring->lock, flags);
+ spin_unlock(&ring->lock);
return ret;
@@ -1935,12 +1930,8 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
usleep_range(1000, 2000);
}
-static int reset_umac(struct bcmgenet_priv *priv)
+static void reset_umac(struct bcmgenet_priv *priv)
{
- struct device *kdev = &priv->pdev->dev;
- unsigned int timeout = 0;
- u32 reg;
-
/* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
bcmgenet_rbuf_ctrl_set(priv, 0);
udelay(10);
@@ -1948,23 +1939,10 @@ static int reset_umac(struct bcmgenet_priv *priv)
/* disable MAC while updating its registers */
bcmgenet_umac_writel(priv, 0, UMAC_CMD);
- /* issue soft reset, wait for it to complete */
- bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
- while (timeout++ < 1000) {
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (!(reg & CMD_SW_RESET))
- return 0;
-
- udelay(1);
- }
-
- if (timeout == 1000) {
- dev_err(kdev,
- "timeout waiting for MAC to come out of reset\n");
- return -ETIMEDOUT;
- }
-
- return 0;
+ /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
+ bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
+ udelay(2);
+ bcmgenet_umac_writel(priv, 0, UMAC_CMD);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -1994,20 +1972,16 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
}
-static int init_umac(struct bcmgenet_priv *priv)
+static void init_umac(struct bcmgenet_priv *priv)
{
struct device *kdev = &priv->pdev->dev;
- int ret;
u32 reg;
u32 int0_enable = 0;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
- ret = reset_umac(priv);
- if (ret)
- return ret;
+ reset_umac(priv);
- bcmgenet_umac_writel(priv, 0, UMAC_CMD);
/* clear tx/rx counter */
bcmgenet_umac_writel(priv,
MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
@@ -2046,8 +2020,6 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
dev_dbg(kdev, "done init umac\n");
-
- return 0;
}
/* Initialize a Tx ring along with corresponding hardware registers */
@@ -2104,6 +2076,10 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
TDMA_WRITE_PTR);
bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
DMA_END_ADDR);
+
+ /* Initialize Tx NAPI */
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
+ NAPI_POLL_WEIGHT);
}
/* Initialize a RDMA ring */
@@ -2135,6 +2111,10 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
if (ret)
return ret;
+ /* Initialize Rx NAPI */
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
+ NAPI_POLL_WEIGHT);
+
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
@@ -2159,50 +2139,27 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
return ret;
}
-static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
-{
- unsigned int i;
- struct bcmgenet_tx_ring *ring;
-
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
- ring = &priv->tx_rings[i];
- netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
- }
-
- ring = &priv->tx_rings[DESC_INDEX];
- netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
-}
-
static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
- u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
- u32 int1_enable = 0;
struct bcmgenet_tx_ring *ring;
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
- int1_enable |= (1 << i);
+ ring->int_enable(ring);
}
ring = &priv->tx_rings[DESC_INDEX];
napi_enable(&ring->napi);
-
- bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+ ring->int_enable(ring);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
- u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
- u32 int1_disable = 0xffff;
struct bcmgenet_tx_ring *ring;
- bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
- bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
-
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
@@ -2286,9 +2243,6 @@ static void bcmgenet_init_tx_queues(struct net_device *dev)
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
- /* Initialize Tx NAPI */
- bcmgenet_init_tx_napi(priv);
-
/* Enable Tx queues */
bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
@@ -2298,50 +2252,27 @@ static void bcmgenet_init_tx_queues(struct net_device *dev)
bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
}
-static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
-{
- unsigned int i;
- struct bcmgenet_rx_ring *ring;
-
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
- ring = &priv->rx_rings[i];
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
- }
-
- ring = &priv->rx_rings[DESC_INDEX];
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64);
-}
-
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
- u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
- u32 int1_enable = 0;
struct bcmgenet_rx_ring *ring;
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
- int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
+ ring->int_enable(ring);
}
ring = &priv->rx_rings[DESC_INDEX];
napi_enable(&ring->napi);
-
- bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+ ring->int_enable(ring);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
- u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
- u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
struct bcmgenet_rx_ring *ring;
- bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
- bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
-
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
@@ -2414,9 +2345,6 @@ static int bcmgenet_init_rx_queues(struct net_device *dev)
ring_cfg |= (1 << DESC_INDEX);
dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
- /* Initialize Rx NAPI */
- bcmgenet_init_rx_napi(priv);
-
/* Enable rings */
bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
@@ -2505,9 +2433,6 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- /* disable DMA */
- bcmgenet_dma_teardown(priv);
-
for (i = 0; i < priv->num_tx_bds; i++) {
cb = priv->tx_cbs + i;
skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
@@ -2590,27 +2515,20 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Interrupt bottom half */
static void bcmgenet_irq_task(struct work_struct *work)
{
- unsigned long flags;
unsigned int status;
struct bcmgenet_priv *priv = container_of(
work, struct bcmgenet_priv, bcmgenet_irq_work);
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irq(&priv->lock);
status = priv->irq0_stat;
priv->irq0_stat = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (status & UMAC_IRQ_MPD_R) {
- netif_dbg(priv, wol, priv->dev,
- "magic packet detected, waking up\n");
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
- }
+ spin_unlock_irq(&priv->lock);
/* Link UP/DOWN event */
if (status & UMAC_IRQ_LINK_EVENT)
- phy_mac_interrupt(priv->phydev,
+ phy_mac_interrupt(priv->dev->phydev,
!!(status & UMAC_IRQ_LINK_UP));
}
@@ -2698,23 +2616,13 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
- if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
- UMAC_IRQ_PHY_DET_F |
- UMAC_IRQ_LINK_EVENT |
- UMAC_IRQ_HFB_SM |
- UMAC_IRQ_HFB_MM)) {
- /* all other interested interrupts handled in bottom half */
- schedule_work(&priv->bcmgenet_irq_work);
- }
-
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
wake_up(&priv->wq);
}
/* all other interested interrupts handled in bottom half */
- status &= (UMAC_IRQ_LINK_EVENT |
- UMAC_IRQ_MPD_R);
+ status &= UMAC_IRQ_LINK_EVENT;
if (status) {
/* Save irq status for bottom-half processing. */
spin_lock_irqsave(&priv->lock, flags);
@@ -2849,16 +2757,16 @@ static void bcmgenet_netif_start(struct net_device *dev)
/* Start the network engine */
bcmgenet_enable_rx_napi(priv);
- bcmgenet_enable_tx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
netif_tx_start_all_queues(dev);
+ bcmgenet_enable_tx_napi(priv);
/* Monitor link interrupts now */
bcmgenet_link_intr_enable(priv);
- phy_start(priv->phydev);
+ phy_start(dev->phydev);
}
static int bcmgenet_open(struct net_device *dev)
@@ -2882,12 +2790,7 @@ static int bcmgenet_open(struct net_device *dev)
/* take MAC out of reset */
bcmgenet_umac_reset(priv);
- ret = init_umac(priv);
- if (ret)
- goto err_clk_disable;
-
- /* disable ethernet MAC while updating its registers */
- umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
+ init_umac(priv);
/* Make sure we reflect the value of CRC_CMD_FWD */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
@@ -2946,6 +2849,7 @@ err_irq1:
err_irq0:
free_irq(priv->irq0, priv);
err_fini_dma:
+ bcmgenet_dma_teardown(priv);
bcmgenet_fini_dma(priv);
err_clk_disable:
if (priv->internal_phy)
@@ -2958,11 +2862,20 @@ static void bcmgenet_netif_stop(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ bcmgenet_disable_tx_napi(priv);
netif_tx_stop_all_queues(dev);
- phy_stop(priv->phydev);
- bcmgenet_intr_disable(priv);
+
+ /* Disable MAC receive */
+ umac_enable_set(priv, CMD_RX_EN, false);
+
+ bcmgenet_dma_teardown(priv);
+
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
+ umac_enable_set(priv, CMD_TX_EN, false);
+
+ phy_stop(dev->phydev);
bcmgenet_disable_rx_napi(priv);
- bcmgenet_disable_tx_napi(priv);
+ bcmgenet_intr_disable(priv);
/* Wait for pending work items to complete. Since interrupts are
* disabled no new work will be scheduled.
@@ -2973,33 +2886,23 @@ static void bcmgenet_netif_stop(struct net_device *dev)
priv->old_speed = -1;
priv->old_duplex = -1;
priv->old_pause = -1;
+
+ /* tx reclaim */
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_dma(priv);
}
static int bcmgenet_close(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret;
+ int ret = 0;
netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
bcmgenet_netif_stop(dev);
/* Really kill the PHY state machine and disconnect from it */
- phy_disconnect(priv->phydev);
-
- /* Disable MAC receive */
- umac_enable_set(priv, CMD_RX_EN, false);
-
- ret = bcmgenet_dma_teardown(priv);
- if (ret)
- return ret;
-
- /* Disable MAC transmit. TX DMA disabled must be done before this */
- umac_enable_set(priv, CMD_TX_EN, false);
-
- /* tx reclaim */
- bcmgenet_tx_reclaim_all(dev);
- bcmgenet_fini_dma(priv);
+ phy_disconnect(dev->phydev);
free_irq(priv->irq0, priv);
free_irq(priv->irq1, priv);
@@ -3018,7 +2921,6 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
u32 p_index, c_index, intsts, intmsk;
struct netdev_queue *txq;
unsigned int free_bds;
- unsigned long flags;
bool txq_stopped;
if (!netif_msg_tx_err(priv))
@@ -3026,7 +2928,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
txq = netdev_get_tx_queue(priv->dev, ring->queue);
- spin_lock_irqsave(&ring->lock, flags);
+ spin_lock(&ring->lock);
if (ring->index == DESC_INDEX) {
intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
@@ -3038,7 +2940,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
txq_stopped = netif_tx_queue_stopped(txq);
free_bds = ring->free_bds;
- spin_unlock_irqrestore(&ring->lock, flags);
+ spin_unlock(&ring->lock);
netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
"TX queue status: %s, interrupts: %s\n"
@@ -3564,9 +3466,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
!strcasecmp(phy_mode_str, "internal"))
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
- err = reset_umac(priv);
- if (err)
- goto err_clk_disable;
+ reset_umac(priv);
err = bcmgenet_mii_init(dev);
if (err)
@@ -3614,7 +3514,7 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret;
+ int ret = 0;
if (!netif_running(dev))
return 0;
@@ -3622,24 +3522,10 @@ static int bcmgenet_suspend(struct device *d)
bcmgenet_netif_stop(dev);
if (!device_may_wakeup(d))
- phy_suspend(priv->phydev);
+ phy_suspend(dev->phydev);
netif_device_detach(dev);
- /* Disable MAC receive */
- umac_enable_set(priv, CMD_RX_EN, false);
-
- ret = bcmgenet_dma_teardown(priv);
- if (ret)
- return ret;
-
- /* Disable MAC transmit. TX DMA disabled must be done before this */
- umac_enable_set(priv, CMD_TX_EN, false);
-
- /* tx reclaim */
- bcmgenet_tx_reclaim_all(dev);
- bcmgenet_fini_dma(priv);
-
/* Prepare the device for Wake-on-LAN and switch to the slow clock */
if (device_may_wakeup(d) && priv->wolopts) {
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
@@ -3678,21 +3564,17 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_umac_reset(priv);
- ret = init_umac(priv);
- if (ret)
- goto out_clk_disable;
+ init_umac(priv);
/* From WOL-enabled suspend, switch to regular clock */
if (priv->wolopts)
clk_disable_unprepare(priv->clk_wol);
- phy_init_hw(priv->phydev);
+ phy_init_hw(dev->phydev);
+
/* Speed settings must be restored */
bcmgenet_mii_config(priv->dev, false);
- /* disable ethernet MAC while updating its registers */
- umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
-
bcmgenet_set_hw_addr(priv, dev->dev_addr);
if (priv->internal_phy) {
@@ -3720,7 +3602,7 @@ static int bcmgenet_resume(struct device *d)
netif_device_attach(dev);
if (!device_may_wakeup(d))
- phy_resume(priv->phydev);
+ phy_resume(dev->phydev);
if (priv->eee.eee_enabled)
bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 4c49d0b97748..3c50431ccd2a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -617,7 +617,6 @@ struct bcmgenet_priv {
/* MDIO bus variables */
wait_queue_head_t wq;
- struct phy_device *phydev;
bool internal_phy;
struct device_node *phy_dn;
struct device_node *mdio_dn;
@@ -711,7 +710,6 @@ int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
-void bcmgenet_mii_reset(struct net_device *dev);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 18f5723be2c9..5333274a283c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -34,7 +34,7 @@
void bcmgenet_mii_setup(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
bool status_changed = false;
@@ -121,22 +121,6 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
return 0;
}
-/* Perform a voluntary PHY software reset, since the EPHY is very finicky about
- * not doing it and will start corrupting packets
- */
-void bcmgenet_mii_reset(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
- if (GENET_IS_V4(priv))
- return;
-
- if (priv->phydev) {
- phy_init_hw(priv->phydev);
- phy_start_aneg(priv->phydev);
- }
-}
-
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -182,14 +166,14 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
}
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
- fixed_phy_set_link_update(priv->phydev,
+ fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
}
int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
struct device *kdev = &priv->pdev->dev;
const char *phy_name = NULL;
u32 id_mode_dis = 0;
@@ -236,7 +220,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* capabilities, use that knowledge to also configure the
* Reverse MII interface correctly.
*/
- if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
+ if ((dev->phydev->supported & PHY_BASIC_FEATURES) ==
PHY_BASIC_FEATURES)
port_ctrl = PORT_MODE_EXT_RVMII_25;
else
@@ -306,7 +290,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
return -ENODEV;
}
} else {
- phydev = priv->phydev;
+ phydev = dev->phydev;
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
@@ -317,8 +301,6 @@ int bcmgenet_mii_probe(struct net_device *dev)
}
}
- priv->phydev = phydev;
-
/* Configure port multiplexer based on what the probed PHY device since
* reading the 'max-speed' property determines the maximum supported
* PHY speed which is needed for bcmgenet_mii_config() to configure
@@ -326,7 +308,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
*/
ret = bcmgenet_mii_config(dev, true);
if (ret) {
- phy_disconnect(priv->phydev);
+ phy_disconnect(dev->phydev);
return ret;
}
@@ -336,7 +318,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* Ethernet MAC ISRs
*/
if (priv->internal_phy)
- priv->phydev->irq = PHY_IGNORE_INTERRUPT;
+ dev->phydev->irq = PHY_IGNORE_INTERRUPT;
return 0;
}
@@ -545,7 +527,6 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
}
- priv->phydev = phydev;
priv->phy_interface = pd->phy_interface;
return 0;
@@ -590,5 +571,4 @@ void bcmgenet_mii_exit(struct net_device *dev)
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
platform_device_unregister(priv->mii_pdev);
- platform_device_put(priv->mii_pdev);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 656e6af70f0a..de51c2177d03 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10931,9 +10931,9 @@ static void tg3_chk_missed_msi(struct tg3 *tp)
}
}
-static void tg3_timer(unsigned long __opaque)
+static void tg3_timer(struct timer_list *t)
{
- struct tg3 *tp = (struct tg3 *) __opaque;
+ struct tg3 *tp = from_timer(tp, t, timer);
spin_lock(&tp->lock);
@@ -11087,9 +11087,7 @@ static void tg3_timer_init(struct tg3 *tp)
tp->asf_multiplier = (HZ / tp->timer_offset) *
TG3_FW_UPDATE_FREQ_SEC;
- init_timer(&tp->timer);
- tp->timer.data = (unsigned long) tp;
- tp->timer.function = tg3_timer;
+ timer_setup(&tp->timer, tg3_timer, 0);
}
static void tg3_timer_start(struct tg3 *tp)
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 3b5e98ecba00..c2d02d02d1e6 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $
* tg3.h: Definitions for Broadcom Tigon3 ethernet driver.
*
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 6e13c937d715..a843076597ec 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1693,9 +1693,9 @@ err_return:
/* Timer callbacks */
/* a) IOC timer */
static void
-bnad_ioc_timeout(unsigned long data)
+bnad_ioc_timeout(struct timer_list *t)
{
- struct bnad *bnad = (struct bnad *)data;
+ struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1704,9 +1704,9 @@ bnad_ioc_timeout(unsigned long data)
}
static void
-bnad_ioc_hb_check(unsigned long data)
+bnad_ioc_hb_check(struct timer_list *t)
{
- struct bnad *bnad = (struct bnad *)data;
+ struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1715,9 +1715,9 @@ bnad_ioc_hb_check(unsigned long data)
}
static void
-bnad_iocpf_timeout(unsigned long data)
+bnad_iocpf_timeout(struct timer_list *t)
{
- struct bnad *bnad = (struct bnad *)data;
+ struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1726,9 +1726,9 @@ bnad_iocpf_timeout(unsigned long data)
}
static void
-bnad_iocpf_sem_timeout(unsigned long data)
+bnad_iocpf_sem_timeout(struct timer_list *t)
{
- struct bnad *bnad = (struct bnad *)data;
+ struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -1748,9 +1748,9 @@ bnad_iocpf_sem_timeout(unsigned long data)
/* b) Dynamic Interrupt Moderation Timer */
static void
-bnad_dim_timeout(unsigned long data)
+bnad_dim_timeout(struct timer_list *t)
{
- struct bnad *bnad = (struct bnad *)data;
+ struct bnad *bnad = from_timer(bnad, t, dim_timer);
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
int i, j;
@@ -1781,9 +1781,9 @@ bnad_dim_timeout(unsigned long data)
/* c) Statistics Timer */
static void
-bnad_stats_timeout(unsigned long data)
+bnad_stats_timeout(struct timer_list *t)
{
- struct bnad *bnad = (struct bnad *)data;
+ struct bnad *bnad = from_timer(bnad, t, stats_timer);
unsigned long flags;
if (!netif_running(bnad->netdev) ||
@@ -1804,8 +1804,7 @@ bnad_dim_timer_start(struct bnad *bnad)
{
if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
!test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
- setup_timer(&bnad->dim_timer, bnad_dim_timeout,
- (unsigned long)bnad);
+ timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
mod_timer(&bnad->dim_timer,
jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
@@ -1823,8 +1822,7 @@ bnad_stats_timer_start(struct bnad *bnad)
spin_lock_irqsave(&bnad->bna_lock, flags);
if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
- setup_timer(&bnad->stats_timer, bnad_stats_timeout,
- (unsigned long)bnad);
+ timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
mod_timer(&bnad->stats_timer,
jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
}
@@ -3692,14 +3690,11 @@ bnad_pci_probe(struct pci_dev *pdev,
goto res_free;
/* Set up timers */
- setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
- (unsigned long)bnad);
- setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
- (unsigned long)bnad);
- setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
- (unsigned long)bnad);
- setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
- (unsigned long)bnad);
+ timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
+ timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
+ timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
+ timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
+ 0);
/*
* Start the chip
diff --git a/drivers/net/ethernet/cadence/Makefile b/drivers/net/ethernet/cadence/Makefile
index 1d66ddb68969..1f33cdca9a3c 100644
--- a/drivers/net/ethernet/cadence/Makefile
+++ b/drivers/net/ethernet/cadence/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Atmel network device drivers.
#
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6df2cad61647..72a67f74b97b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -611,6 +611,9 @@ static int macb_mii_init(struct macb *bp)
err_out_unregister_bus:
mdiobus_unregister(bp->mii_bus);
err_out_free_mdiobus:
+ of_node_put(bp->phy_node);
+ if (np && of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
mdiobus_free(bp->mii_bus);
err_out:
return err;
@@ -1218,8 +1221,6 @@ static int macb_poll(struct napi_struct *napi, int budget)
status = macb_readl(bp, RSR);
macb_writel(bp, RSR, status);
- work_done = 0;
-
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
@@ -3552,6 +3553,9 @@ static int macb_probe(struct platform_device *pdev)
err_out_unregister_mdio:
phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
+ of_node_put(bp->phy_node);
+ if (np && of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
mdiobus_free(bp->mii_bus);
/* Shutdown the PHY if there is a GPIO reset */
@@ -3574,6 +3578,7 @@ static int macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
+ struct device_node *np = pdev->dev.of_node;
dev = platform_get_drvdata(pdev);
@@ -3582,6 +3587,8 @@ static int macb_remove(struct platform_device *pdev)
if (dev->phydev)
phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
+ if (np && of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
dev->phydev = NULL;
mdiobus_free(bp->mii_bus);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index dcbce6cac63e..63be75eb34d2 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -53,6 +53,7 @@ config THUNDER_NIC_RGX
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
+ depends on MAY_USE_DEVLINK
imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
index c4d411d1aa28..e3fc4645cd8a 100644
--- a/drivers/net/ethernet/cavium/liquidio/Makefile
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Cavium Liquidio ethernet device driver
#
@@ -17,7 +18,7 @@ liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \
octeon_droq.o \
octeon_nic.o
-liquidio-objs := lio_main.o octeon_console.o $(liquidio-y)
+liquidio-objs := lio_main.o octeon_console.o lio_vf_rep.o $(liquidio-y)
obj-$(CONFIG_LIQUIDIO_VF) += liquidio_vf.o
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 23f6b60030c5..32ae63b6f20e 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -91,7 +91,7 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype,
*bytes_compl += skb->len;
}
-void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
{
struct octnet_buf_free_info *finfo;
struct sk_buff *skb;
@@ -112,11 +112,13 @@ void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
break;
default:
- return;
+ return 0;
}
txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
netdev_tx_sent_queue(txq, skb->len);
+
+ return netif_xmit_stopped(txq);
}
void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
@@ -141,6 +143,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
switch (nctrl->ncmd.s.cmd) {
case OCTNET_CMD_CHANGE_DEVFLAGS:
case OCTNET_CMD_SET_MULTI_LIST:
+ case OCTNET_CMD_SET_UC_LIST:
break;
case OCTNET_CMD_CHANGE_MACADDR:
@@ -464,7 +467,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
if (netdev) {
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- int packet_was_received;
/* Do not proceed if the interface is not in RUNNING state. */
if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
@@ -567,18 +569,10 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
}
- packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
-
- if (packet_was_received) {
- droq->stats.rx_bytes_received += len;
- droq->stats.rx_pkts_received++;
- } else {
- droq->stats.rx_dropped++;
- netif_info(lio, rx_err, lio->netdev,
- "droq:%d error rx_dropped:%llu\n",
- droq->q_no, droq->stats.rx_dropped);
- }
+ napi_gro_receive(napi, skb);
+ droq->stats.rx_bytes_received += len;
+ droq->stats.rx_pkts_received++;
} else {
recv_buffer_free(skb);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 5b19826a7e16..6aa0eee88ea5 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -21,6 +21,7 @@
#include <linux/firmware.h>
#include <net/vxlan.h>
#include <linux/kthread.h>
+#include <net/switchdev.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -34,6 +35,7 @@
#include "cn68xx_device.h"
#include "cn23xx_pf_device.h"
#include "liquidio_image.h"
+#include "lio_vf_rep.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
@@ -59,9 +61,9 @@ static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
-static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_NIC;
+static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
-MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\". Use \"none\" to load firmware from flash.");
+MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
static u32 console_bitmask;
module_param(console_bitmask, int, 0644);
@@ -83,6 +85,11 @@ static int octeon_console_debug_enabled(u32 console)
/* runtime link query interval */
#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
+/* update localtime to octeon firmware every 60 seconds.
+ * make firmware to use same time reference, so that it will be easy to
+ * correlate firmware logged events/errors with host events, for debugging.
+ */
+#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
struct liquidio_if_cfg_context {
int octeon_id;
@@ -901,6 +908,121 @@ static inline void update_link_status(struct net_device *netdev,
}
}
+/**
+ * lio_sync_octeon_time_cb - callback that is invoked when soft command
+ * sent by lio_sync_octeon_time() has completed successfully or failed
+ *
+ * @oct - octeon device structure
+ * @status - indicates success or failure
+ * @buf - pointer to the command that was sent to firmware
+ **/
+static void lio_sync_octeon_time_cb(struct octeon_device *oct,
+ u32 status, void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+
+ if (status)
+ dev_err(&oct->pci_dev->dev,
+ "Failed to sync time to octeon; error=%d\n", status);
+
+ octeon_free_soft_command(oct, sc);
+}
+
+/**
+ * lio_sync_octeon_time - send latest localtime to octeon firmware so that
+ * firmware will correct it's time, in case there is a time skew
+ *
+ * @work: work scheduled to send time update to octeon firmware
+ **/
+static void lio_sync_octeon_time(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct timespec64 ts;
+ struct lio_time *lt;
+ int ret;
+
+ sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to sync time to octeon: soft command allocation failed\n");
+ return;
+ }
+
+ lt = (struct lio_time *)sc->virtdptr;
+
+ /* Get time of the day */
+ getnstimeofday64(&ts);
+ lt->sec = ts.tv_sec;
+ lt->nsec = ts.tv_nsec;
+ octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
+
+ sc->callback = lio_sync_octeon_time_cb;
+ sc->callback_arg = sc;
+ sc->wait_time = 1000;
+
+ ret = octeon_send_soft_command(oct, sc);
+ if (ret == IQ_SEND_FAILED) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to sync time to octeon: failed to send soft command\n");
+ octeon_free_soft_command(oct, sc);
+ }
+
+ queue_delayed_work(lio->sync_octeon_time_wq.wq,
+ &lio->sync_octeon_time_wq.wk.work,
+ msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
+}
+
+/**
+ * setup_sync_octeon_time_wq - Sets up the work to periodically update
+ * local time to octeon firmware
+ *
+ * @netdev - network device which should send time update to firmware
+ **/
+static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->sync_octeon_time_wq.wq =
+ alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
+ if (!lio->sync_octeon_time_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
+ return -1;
+ }
+ INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
+ lio_sync_octeon_time);
+ lio->sync_octeon_time_wq.wk.ctxptr = lio;
+ queue_delayed_work(lio->sync_octeon_time_wq.wq,
+ &lio->sync_octeon_time_wq.wk.work,
+ msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
+
+ return 0;
+}
+
+/**
+ * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
+ * to periodically update local time to octeon firmware
+ *
+ * @netdev - network device which should send time update to firmware
+ **/
+static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
+
+ if (time_wq->wq) {
+ cancel_delayed_work_sync(&time_wq->wk.work);
+ destroy_workqueue(time_wq->wq);
+ }
+}
+
static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
{
struct octeon_device *other_oct;
@@ -1076,19 +1198,13 @@ liquidio_probe(struct pci_dev *pdev,
}
if (OCTEON_CN23XX_PF(oct_dev)) {
- u64 scratch1;
u8 bus, device, function;
- scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
- if (!(scratch1 & 4ULL)) {
- /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
- * the lio watchdog kernel thread is running for this
- * NIC. Each NIC gets one watchdog kernel thread.
+ if (atomic_read(oct_dev->adapter_refcount) == 1) {
+ /* Each NIC gets one watchdog kernel thread. The first
+ * PF (of each NIC) that gets pci_driver->probe()'d
+ * creates that thread.
*/
- scratch1 |= 4ULL;
- octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
- scratch1);
-
bus = pdev->bus->number;
device = PCI_SLOT(pdev->devfn);
function = PCI_FUNC(pdev->devfn);
@@ -1115,10 +1231,10 @@ liquidio_probe(struct pci_dev *pdev,
return 0;
}
-static bool fw_type_is_none(void)
+static bool fw_type_is_auto(void)
{
- return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
- sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
+ return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
+ sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
}
/**
@@ -1302,7 +1418,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
* Implementation note: only soft-reset the device
* if it is a CN6XXX OR the LAST CN23XX device.
*/
- if (fw_type_is_none())
+ if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
octeon_pci_flr(oct);
else if (OCTEON_CN6XXX(oct) || !refcount)
oct->fn_list.soft_reset(oct);
@@ -1455,6 +1571,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
+ cleanup_sync_octeon_time_wq(netdev);
cleanup_link_status_change_wq(netdev);
cleanup_rx_oom_poll_fn(netdev);
@@ -1487,6 +1604,8 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
oct->cmd_resp_state = OCT_DRV_OFFLINE;
spin_unlock_bh(&oct->cmd_resp_wqlock);
+ lio_vf_rep_destroy(oct);
+
for (i = 0; i < oct->ifcount; i++) {
lio = GET_LIO(oct->props[i].netdev);
for (j = 0; j < oct->num_oqs; j++)
@@ -1497,6 +1616,12 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
for (i = 0; i < oct->ifcount; i++)
liquidio_destroy_nic_device(oct, i);
+ if (oct->devlink) {
+ devlink_unregister(oct->devlink);
+ devlink_free(oct->devlink);
+ oct->devlink = NULL;
+ }
+
dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
return 0;
}
@@ -1514,6 +1639,10 @@ static void liquidio_remove(struct pci_dev *pdev)
if (oct_dev->watchdog_task)
kthread_stop(oct_dev->watchdog_task);
+ if (!oct_dev->octeon_id &&
+ oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
+ lio_vf_rep_modexit();
+
if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
liquidio_stop_nic_module(oct_dev);
@@ -1934,10 +2063,12 @@ static int load_firmware(struct octeon_device *oct)
char fw_name[LIO_MAX_FW_FILENAME_LEN];
char *tmp_fw_type;
- if (fw_type[0] == '\0')
+ if (fw_type_is_auto()) {
tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
- else
+ strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
+ } else {
tmp_fw_type = fw_type;
+ }
sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
octeon_get_conf(oct)->card_name, tmp_fw_type,
@@ -2477,7 +2608,8 @@ static void handle_timestamp(struct octeon_device *oct,
*/
static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
- struct octnet_buf_free_info *finfo)
+ struct octnet_buf_free_info *finfo,
+ int xmit_more)
{
int retval;
struct octeon_soft_command *sc;
@@ -2512,7 +2644,7 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
len = (u32)((struct octeon_instr_ih2 *)
(&sc->cmd.cmd2.ih2))->dlengsz;
- ring_doorbell = 1;
+ ring_doorbell = !xmit_more;
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
sc, len, ndata->reqtype);
@@ -2546,7 +2678,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
union tx_info *tx_info;
int status = 0;
int q_idx = 0, iq_no = 0;
- int j;
+ int j, xmit_more = 0;
u64 dptr = 0;
u32 tag = 0;
@@ -2751,17 +2883,19 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
+ xmit_more = skb->xmit_more;
+
if (unlikely(cmdsetup.s.timestamp))
- status = send_nic_timestamp_pkt(oct, &ndata, finfo);
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
else
- status = octnet_send_nic_data_pkt(oct, &ndata);
+ status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
goto lio_xmit_failed;
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
if (status == IQ_SEND_STOP)
- stop_q(lio->netdev, q_idx);
+ stop_q(netdev, q_idx);
netif_trans_update(netdev);
@@ -2780,6 +2914,9 @@ lio_xmit_failed:
if (dptr)
dma_unmap_single(&oct->pci_dev->dev, dptr,
ndata.datasize, DMA_TO_DEVICE);
+
+ octeon_ring_doorbell_locked(oct, iq_no);
+
tx_buffer_free(skb);
return NETDEV_TX_OK;
}
@@ -3186,6 +3323,86 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
return 0;
}
+static int
+liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct lio_devlink_priv *priv;
+ struct octeon_device *oct;
+
+ priv = devlink_priv(devlink);
+ oct = priv->oct;
+
+ *mode = oct->eswitch_mode;
+
+ return 0;
+}
+
+static int
+liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+ struct lio_devlink_priv *priv;
+ struct octeon_device *oct;
+ int ret = 0;
+
+ priv = devlink_priv(devlink);
+ oct = priv->oct;
+
+ if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
+ return -EINVAL;
+
+ if (oct->eswitch_mode == mode)
+ return 0;
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ oct->eswitch_mode = mode;
+ ret = lio_vf_rep_create(oct);
+ break;
+
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ lio_vf_rep_destroy(oct);
+ oct->eswitch_mode = mode;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct devlink_ops liquidio_devlink_ops = {
+ .eswitch_mode_get = liquidio_eswitch_mode_get,
+ .eswitch_mode_set = liquidio_eswitch_mode_set,
+};
+
+static int
+lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return -EOPNOTSUPP;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = ETH_ALEN;
+ ether_addr_copy(attr->u.ppid.id,
+ (void *)&lio->linfo.hw_addr + 2);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct switchdev_ops lio_pf_switchdev_ops = {
+ .switchdev_port_attr_get = lio_pf_switchdev_attr_get,
+};
+
static const struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
@@ -3303,7 +3520,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
{
struct lio *lio = NULL;
struct net_device *netdev;
- u8 mac[6], i, j;
+ u8 mac[6], i, j, *fw_ver;
struct octeon_soft_command *sc;
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
@@ -3315,6 +3532,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
u32 resp_size, ctx_size, data_size;
u32 ifidx_or_pfnum;
struct lio_version *vdata;
+ struct devlink *devlink;
+ struct lio_devlink_priv *lio_devlink;
/* This is to handle link status changes */
octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
@@ -3414,6 +3633,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
goto setup_nic_dev_fail;
}
+ /* Verify f/w version (in case of 'auto' loading from flash) */
+ fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
+ if (memcmp(LIQUIDIO_BASE_VERSION,
+ fw_ver,
+ strlen(LIQUIDIO_BASE_VERSION))) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Unmatched firmware version. Expected %s.x, got %s.\n",
+ LIQUIDIO_BASE_VERSION, fw_ver);
+ goto setup_nic_dev_fail;
+ } else if (atomic_read(octeon_dev->adapter_fw_state) ==
+ FW_IS_PRELOADED) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "Using auto-loaded firmware version %s.\n",
+ fw_ver);
+ }
+
octeon_swap_8B_data((u64 *)(&resp->cfg_info),
(sizeof(struct liquidio_if_cfg_info)) >> 3);
@@ -3444,6 +3679,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
* netdev tasks.
*/
netdev->netdev_ops = &lionetdevops;
+ SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
lio = GET_LIO(netdev);
@@ -3593,6 +3829,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if (setup_link_status_change_wq(netdev))
goto setup_nic_dev_fail;
+ if ((octeon_dev->fw_info.app_cap_flags &
+ LIQUIDIO_TIME_SYNC_CAP) &&
+ setup_sync_octeon_time_wq(netdev))
+ goto setup_nic_dev_fail;
+
if (setup_rx_oom_poll_fn(netdev))
goto setup_nic_dev_fail;
@@ -3625,6 +3866,26 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
octeon_free_soft_command(octeon_dev, sc);
}
+ devlink = devlink_alloc(&liquidio_devlink_ops,
+ sizeof(struct lio_devlink_priv));
+ if (!devlink) {
+ dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
+ goto setup_nic_wait_intr;
+ }
+
+ lio_devlink = devlink_priv(devlink);
+ lio_devlink->oct = octeon_dev;
+
+ if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
+ devlink_free(devlink);
+ dev_err(&octeon_dev->pci_dev->dev,
+ "devlink registration failed\n");
+ goto setup_nic_wait_intr;
+ }
+
+ octeon_dev->devlink = devlink;
+ octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
return 0;
setup_nic_dev_fail:
@@ -3719,6 +3980,7 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
}
if (!num_vfs) {
+ lio_vf_rep_destroy(oct);
ret = lio_pci_sriov_disable(oct);
} else if (num_vfs > oct->sriov_info.max_vfs) {
dev_err(&oct->pci_dev->dev,
@@ -3730,6 +3992,10 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
ret = octeon_enable_sriov(oct);
dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
oct->pf_num, num_vfs);
+ ret = lio_vf_rep_create(oct);
+ if (ret)
+ dev_info(&oct->pci_dev->dev,
+ "vf representor create failed");
}
return ret;
@@ -3767,6 +4033,18 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
goto octnet_init_failure;
}
+ /* Call vf_rep_modinit if the firmware is switchdev capable
+ * and do it from the first liquidio function probed.
+ */
+ if (!oct->octeon_id &&
+ oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
+ retval = lio_vf_rep_modinit();
+ if (retval) {
+ liquidio_stop_nic_module(oct);
+ goto octnet_init_failure;
+ }
+ }
+
liquidio_ptp_init(oct);
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
@@ -3882,9 +4160,9 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
static int octeon_device_init(struct octeon_device *octeon_dev)
{
int j, ret;
- int fw_loaded = 0;
char bootcmd[] = "\n";
char *dbg_enb = NULL;
+ enum lio_fw_state fw_state;
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)octeon_dev->priv;
atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
@@ -3916,24 +4194,40 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
octeon_dev->app_mode = CVM_DRV_INVALID_APP;
- if (OCTEON_CN23XX_PF(octeon_dev)) {
- if (!cn23xx_fw_loaded(octeon_dev) && !fw_type_is_none()) {
- fw_loaded = 0;
- /* Do a soft reset of the Octeon device. */
- if (octeon_dev->fn_list.soft_reset(octeon_dev))
- return 1;
- /* things might have changed */
- if (!cn23xx_fw_loaded(octeon_dev))
- fw_loaded = 0;
- else
- fw_loaded = 1;
- } else {
- fw_loaded = 1;
- }
- } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
- return 1;
+ /* CN23XX supports preloaded firmware if the following is true:
+ *
+ * The adapter indicates that firmware is currently running AND
+ * 'fw_type' is 'auto'.
+ *
+ * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
+ */
+ if (OCTEON_CN23XX_PF(octeon_dev) &&
+ cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
+ atomic_cmpxchg(octeon_dev->adapter_fw_state,
+ FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
}
+ /* If loading firmware, only first device of adapter needs to do so. */
+ fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
+ FW_NEEDS_TO_BE_LOADED,
+ FW_IS_BEING_LOADED);
+
+ /* Here, [local variable] 'fw_state' is set to one of:
+ *
+ * FW_IS_PRELOADED: No firmware is to be loaded (see above)
+ * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
+ * firmware to the adapter.
+ * FW_IS_BEING_LOADED: The driver's second instance will not load
+ * firmware to the adapter.
+ */
+
+ /* Prior to f/w load, perform a soft reset of the Octeon device;
+ * if error resetting, return w/error.
+ */
+ if (fw_state == FW_NEEDS_TO_BE_LOADED)
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+
/* Initialize the dispatch mechanism used to push packets arriving on
* Octeon Output queues.
*/
@@ -4063,7 +4357,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
- if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
+ if (fw_state == FW_NEEDS_TO_BE_LOADED) {
dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
if (!ddr_timeout) {
dev_info(&octeon_dev->pci_dev->dev,
@@ -4125,6 +4419,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
return 1;
}
+
+ atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
}
handshake[octeon_dev->octeon_id].init_ok = 1;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 2e993ce43b66..fd70a4844e2d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -435,8 +435,7 @@ static void delete_glists(struct lio *lio)
do {
g = (struct octnic_gather *)
list_delete_head(&lio->glist[i]);
- if (g)
- kfree(g);
+ kfree(g);
} while (g);
if (lio->glists_virt_base && lio->glists_virt_base[i] &&
@@ -748,7 +747,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (lio_wait_for_oq_pkts(oct))
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
-
+ /* fall through */
case OCT_DEV_INTR_SET_DONE:
/* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
@@ -1289,6 +1288,9 @@ static int liquidio_stop(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
struct napi_struct *napi, *n;
+ /* tell Octeon to stop forwarding packets to host */
+ send_rx_ctrl_cmd(lio, 0);
+
if (oct->props[lio->ifidx].napi_enabled) {
list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
napi_disable(napi);
@@ -1306,9 +1308,6 @@ static int liquidio_stop(struct net_device *netdev)
netif_carrier_off(netdev);
lio->link_changes++;
- /* tell Octeon to stop forwarding packets to host */
- send_rx_ctrl_cmd(lio, 0);
-
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
txqs_stop(netdev);
@@ -1691,7 +1690,8 @@ static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
*/
static int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
- struct octnet_buf_free_info *finfo)
+ struct octnet_buf_free_info *finfo,
+ int xmit_more)
{
struct octeon_soft_command *sc;
int ring_doorbell;
@@ -1721,7 +1721,7 @@ static int send_nic_timestamp_pkt(struct octeon_device *oct,
len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
- ring_doorbell = 1;
+ ring_doorbell = !xmit_more;
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
sc, len, ndata->reqtype);
@@ -1753,6 +1753,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
struct octeon_device *oct;
int q_idx = 0, iq_no = 0;
union tx_info *tx_info;
+ int xmit_more = 0;
struct lio *lio;
int status = 0;
u64 dptr = 0;
@@ -1941,10 +1942,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
}
+ xmit_more = skb->xmit_more;
+
if (unlikely(cmdsetup.s.timestamp))
- status = send_nic_timestamp_pkt(oct, &ndata, finfo);
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
else
- status = octnet_send_nic_data_pkt(oct, &ndata);
+ status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
goto lio_xmit_failed;
@@ -1953,7 +1956,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (status == IQ_SEND_STOP) {
dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
iq_no);
- stop_q(lio->netdev, q_idx);
+ stop_q(netdev, q_idx);
}
netif_trans_update(netdev);
@@ -1973,6 +1976,9 @@ lio_xmit_failed:
if (dptr)
dma_unmap_single(&oct->pci_dev->dev, dptr,
ndata.datasize, DMA_TO_DEVICE);
+
+ octeon_ring_doorbell_locked(oct, iq_no);
+
tx_buffer_free(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
new file mode 100644
index 000000000000..2adafa366d3f
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -0,0 +1,695 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include <net/switchdev.h>
+#include "lio_vf_rep.h"
+#include "octeon_network.h"
+
+static int lio_vf_rep_open(struct net_device *ndev);
+static int lio_vf_rep_stop(struct net_device *ndev);
+static int lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev);
+static void lio_vf_rep_tx_timeout(struct net_device *netdev);
+static int lio_vf_rep_phys_port_name(struct net_device *dev,
+ char *buf, size_t len);
+static void lio_vf_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats64);
+static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
+
+static const struct net_device_ops lio_vf_rep_ndev_ops = {
+ .ndo_open = lio_vf_rep_open,
+ .ndo_stop = lio_vf_rep_stop,
+ .ndo_start_xmit = lio_vf_rep_pkt_xmit,
+ .ndo_tx_timeout = lio_vf_rep_tx_timeout,
+ .ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
+ .ndo_get_stats64 = lio_vf_rep_get_stats64,
+ .ndo_change_mtu = lio_vf_rep_change_mtu,
+};
+
+static void
+lio_vf_rep_send_sc_complete(struct octeon_device *oct,
+ u32 status, void *ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct lio_vf_rep_sc_ctx *ctx =
+ (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
+ struct lio_vf_rep_resp *resp =
+ (struct lio_vf_rep_resp *)sc->virtrptr;
+
+ if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status))
+ WRITE_ONCE(resp->status, 0);
+
+ complete(&ctx->complete);
+}
+
+static int
+lio_vf_rep_send_soft_command(struct octeon_device *oct,
+ void *req, int req_size,
+ void *resp, int resp_size)
+{
+ int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
+ int ctx_size = sizeof(struct lio_vf_rep_sc_ctx);
+ struct octeon_soft_command *sc = NULL;
+ struct lio_vf_rep_resp *rep_resp;
+ struct lio_vf_rep_sc_ctx *ctx;
+ void *sc_req;
+ int err;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, req_size,
+ tot_resp_size, ctx_size);
+ if (!sc)
+ return -ENOMEM;
+
+ ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
+ memset(ctx, 0, ctx_size);
+ init_completion(&ctx->complete);
+
+ sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
+ memcpy(sc_req, req, req_size);
+
+ rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
+ memset(rep_resp, 0, tot_resp_size);
+ WRITE_ONCE(rep_resp->status, 1);
+
+ sc->iq_no = 0;
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
+ sc->callback = lio_vf_rep_send_sc_complete;
+ sc->callback_arg = sc;
+ sc->wait_time = LIO_VF_REP_REQ_TMO_MS;
+
+ err = octeon_send_soft_command(oct, sc);
+ if (err == IQ_SEND_FAILED)
+ goto free_buff;
+
+ wait_for_completion_timeout(&ctx->complete,
+ msecs_to_jiffies
+ (2 * LIO_VF_REP_REQ_TMO_MS));
+ err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
+ if (err)
+ dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
+
+ if (resp)
+ memcpy(resp, (rep_resp + 1), resp_size);
+free_buff:
+ octeon_free_soft_command(oct, sc);
+
+ return err;
+}
+
+static int
+lio_vf_rep_open(struct net_device *ndev)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ oct = vf_rep->oct;
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
+ rep_cfg.ifidx = vf_rep->ifidx;
+ rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
+ sizeof(rep_cfg), NULL, 0);
+
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "VF_REP open failed with err %d\n", ret);
+ return -EIO;
+ }
+
+ atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
+ LIO_IFSTATE_RUNNING));
+
+ netif_carrier_on(ndev);
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int
+lio_vf_rep_stop(struct net_device *ndev)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ oct = vf_rep->oct;
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
+ rep_cfg.ifidx = vf_rep->ifidx;
+ rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
+ sizeof(rep_cfg), NULL, 0);
+
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "VF_REP dev stop failed with err %d\n", ret);
+ return -EIO;
+ }
+
+ atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
+ ~LIO_IFSTATE_RUNNING));
+
+ netif_tx_disable(ndev);
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static void
+lio_vf_rep_tx_timeout(struct net_device *ndev)
+{
+ netif_trans_update(ndev);
+
+ netif_wake_queue(ndev);
+}
+
+static void
+lio_vf_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats64)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
+
+ stats64->tx_packets = vf_rep->stats.tx_packets;
+ stats64->tx_bytes = vf_rep->stats.tx_bytes;
+ stats64->tx_dropped = vf_rep->stats.tx_dropped;
+
+ stats64->rx_packets = vf_rep->stats.rx_packets;
+ stats64->rx_bytes = vf_rep->stats.rx_bytes;
+ stats64->rx_dropped = vf_rep->stats.rx_dropped;
+}
+
+static int
+lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ oct = vf_rep->oct;
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
+ rep_cfg.ifidx = vf_rep->ifidx;
+ rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
+ sizeof(rep_cfg), NULL, 0);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Change MTU failed with err %d\n", ret);
+ return -EIO;
+ }
+
+ ndev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int
+lio_vf_rep_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
+ struct octeon_device *oct = vf_rep->oct;
+ int ret;
+
+ ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
+ vf_rep->ifidx - oct->pf_num * 64 - 1);
+ if (ret >= len)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static struct net_device *
+lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
+{
+ int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
+ int vfid_mask = max_vfs - 1;
+
+ if (ifidx <= oct->pf_num * max_vfs ||
+ ifidx >= oct->pf_num * max_vfs + max_vfs)
+ return NULL;
+
+ /* ifidx 1-63 for PF0 VFs
+ * ifidx 65-127 for PF1 VFs
+ */
+ vf_id = (ifidx & vfid_mask) - 1;
+
+ return oct->vf_rep_list.ndev[vf_id];
+}
+
+static void
+lio_vf_rep_copy_packet(struct octeon_device *oct,
+ struct sk_buff *skb,
+ int len)
+{
+ if (likely(len > MIN_SKB_SIZE)) {
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (pg_info->page) {
+ va = page_address(pg_info->page) +
+ pg_info->page_offset;
+ memcpy(skb->data, va, MIN_SKB_SIZE);
+ skb_put(skb, MIN_SKB_SIZE);
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset + MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
+ } else {
+ struct octeon_skb_page_info *pg_info =
+ ((struct octeon_skb_page_info *)(skb->cb));
+
+ skb_copy_to_linear_data(skb, page_address(pg_info->page) +
+ pg_info->page_offset, len);
+ skb_put(skb, len);
+ put_page(pg_info->page);
+ }
+}
+
+static int
+lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ struct lio_vf_rep_desc *vf_rep;
+ struct net_device *vf_ndev;
+ struct octeon_device *oct;
+ union octeon_rh *rh;
+ struct sk_buff *skb;
+ int i, ifidx;
+
+ oct = lio_get_device(recv_pkt->octeon_id);
+ if (!oct)
+ goto free_buffers;
+
+ skb = recv_pkt->buffer_ptr[0];
+ rh = &recv_pkt->rh;
+ ifidx = rh->r.ossp;
+
+ vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
+ if (!vf_ndev)
+ goto free_buffers;
+
+ vf_rep = netdev_priv(vf_ndev);
+ if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
+ recv_pkt->buffer_count > 1)
+ goto free_buffers;
+
+ skb->dev = vf_ndev;
+
+ /* Multiple buffers are not used for vf_rep packets.
+ * So just buffer_size[0] is valid.
+ */
+ lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
+
+ skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx(skb);
+
+ octeon_free_recv_info(recv_info);
+
+ return 0;
+
+free_buffers:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+
+ octeon_free_recv_info(recv_info);
+
+ return 0;
+}
+
+static void
+lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
+ u32 status, void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct sk_buff *skb = sc->ctxptr;
+ struct net_device *ndev = skb->dev;
+
+ dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
+ sc->datasize, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ octeon_free_soft_command(oct, sc);
+
+ if (octnet_iq_is_full(oct, sc->iq_no))
+ return;
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+static int
+lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
+ struct net_device *parent_ndev = vf_rep->parent_ndev;
+ struct octeon_device *oct = vf_rep->oct;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ struct octeon_soft_command *sc;
+ struct lio *parent_lio;
+ int status;
+
+ parent_lio = GET_LIO(parent_ndev);
+
+ if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
+ skb->len <= 0)
+ goto xmit_failed;
+
+ if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
+ dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, 0, 0, 0);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
+ goto xmit_failed;
+ }
+
+ /* Multiple buffers are not used for vf_rep packets. */
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
+ goto xmit_failed;
+ }
+
+ sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
+ dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
+ goto xmit_failed;
+ }
+
+ sc->virtdptr = skb->data;
+ sc->datasize = skb->len;
+ sc->ctxptr = skb;
+ sc->iq_no = parent_lio->txq;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
+ vf_rep->ifidx, 0, 0);
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
+ pki_ih3->tagtype = ORDERED_TAG;
+
+ sc->callback = lio_vf_rep_packet_sent_callback;
+ sc->callback_arg = sc;
+
+ status = octeon_send_soft_command(oct, sc);
+ if (status == IQ_SEND_FAILED) {
+ dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
+ sc->datasize, DMA_TO_DEVICE);
+ goto xmit_failed;
+ }
+
+ if (status == IQ_SEND_STOP)
+ netif_stop_queue(ndev);
+
+ netif_trans_update(ndev);
+
+ return NETDEV_TX_OK;
+
+xmit_failed:
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int
+lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
+ struct net_device *parent_ndev = vf_rep->parent_ndev;
+ struct lio *lio = GET_LIO(parent_ndev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = ETH_ALEN;
+ ether_addr_copy(attr->u.ppid.id,
+ (void *)&lio->linfo.hw_addr + 2);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
+ .switchdev_port_attr_get = lio_vf_rep_attr_get,
+};
+
+static void
+lio_vf_rep_fetch_stats(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
+ struct lio_vf_rep_stats stats;
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ oct = vf_rep->oct;
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
+ rep_cfg.ifidx = vf_rep->ifidx;
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
+ &stats, sizeof(stats));
+
+ if (!ret) {
+ octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
+ memcpy(&vf_rep->stats, &stats, sizeof(stats));
+ }
+
+ schedule_delayed_work(&vf_rep->stats_wk.work,
+ msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
+}
+
+int
+lio_vf_rep_create(struct octeon_device *oct)
+{
+ struct lio_vf_rep_desc *vf_rep;
+ struct net_device *ndev;
+ int i, num_vfs;
+
+ if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return 0;
+
+ if (!oct->sriov_info.sriov_enabled)
+ return 0;
+
+ num_vfs = oct->sriov_info.num_vfs_alloced;
+
+ oct->vf_rep_list.num_vfs = 0;
+ for (i = 0; i < num_vfs; i++) {
+ ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
+
+ if (!ndev) {
+ dev_err(&oct->pci_dev->dev,
+ "VF rep device %d creation failed\n", i);
+ goto cleanup;
+ }
+
+ ndev->min_mtu = LIO_MIN_MTU_SIZE;
+ ndev->max_mtu = LIO_MAX_MTU_SIZE;
+ ndev->netdev_ops = &lio_vf_rep_ndev_ops;
+ SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
+
+ vf_rep = netdev_priv(ndev);
+ memset(vf_rep, 0, sizeof(*vf_rep));
+
+ vf_rep->ndev = ndev;
+ vf_rep->oct = oct;
+ vf_rep->parent_ndev = oct->props[0].netdev;
+ vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
+
+ eth_hw_addr_random(ndev);
+
+ if (register_netdev(ndev)) {
+ dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
+
+ free_netdev(ndev);
+ goto cleanup;
+ }
+
+ netif_carrier_off(ndev);
+
+ INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
+ lio_vf_rep_fetch_stats);
+ vf_rep->stats_wk.ctxptr = (void *)vf_rep;
+ schedule_delayed_work(&vf_rep->stats_wk.work,
+ msecs_to_jiffies
+ (LIO_VF_REP_STATS_POLL_TIME_MS));
+ oct->vf_rep_list.num_vfs++;
+ oct->vf_rep_list.ndev[i] = ndev;
+ }
+
+ if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
+ OPCODE_NIC_VF_REP_PKT,
+ lio_vf_rep_pkt_recv, oct)) {
+ dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
+
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
+ ndev = oct->vf_rep_list.ndev[i];
+ oct->vf_rep_list.ndev[i] = NULL;
+ if (ndev) {
+ vf_rep = netdev_priv(ndev);
+ cancel_delayed_work_sync
+ (&vf_rep->stats_wk.work);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ }
+ }
+
+ oct->vf_rep_list.num_vfs = 0;
+
+ return -1;
+}
+
+void
+lio_vf_rep_destroy(struct octeon_device *oct)
+{
+ struct lio_vf_rep_desc *vf_rep;
+ struct net_device *ndev;
+ int i;
+
+ if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return;
+
+ if (!oct->sriov_info.sriov_enabled)
+ return;
+
+ for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
+ ndev = oct->vf_rep_list.ndev[i];
+ oct->vf_rep_list.ndev[i] = NULL;
+ if (ndev) {
+ vf_rep = netdev_priv(ndev);
+ cancel_delayed_work_sync
+ (&vf_rep->stats_wk.work);
+ netif_tx_disable(ndev);
+ netif_carrier_off(ndev);
+
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ }
+ }
+
+ oct->vf_rep_list.num_vfs = 0;
+}
+
+static int
+lio_vf_rep_netdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct lio_vf_rep_desc *vf_rep;
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ case NETDEV_CHANGENAME:
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
+ return NOTIFY_DONE;
+
+ vf_rep = netdev_priv(ndev);
+ oct = vf_rep->oct;
+
+ if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
+ dev_err(&oct->pci_dev->dev,
+ "Device name change sync failed as the size is > %d\n",
+ LIO_IF_NAME_SIZE);
+ return NOTIFY_DONE;
+ }
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
+ rep_cfg.ifidx = vf_rep->ifidx;
+ strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
+ sizeof(rep_cfg), NULL, 0);
+ if (ret)
+ dev_err(&oct->pci_dev->dev,
+ "vf_rep netdev name change failed with err %d\n", ret);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lio_vf_rep_netdev_notifier = {
+ .notifier_call = lio_vf_rep_netdev_event,
+};
+
+int
+lio_vf_rep_modinit(void)
+{
+ if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
+ pr_err("netdev notifier registration failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void
+lio_vf_rep_modexit(void)
+{
+ if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
+ pr_err("netdev notifier unregister failed\n");
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h
new file mode 100644
index 000000000000..bb3cedc63c63
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h
@@ -0,0 +1,49 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_vf_main.h
+ * \brief Host Driver: This file defines vf_rep related macros and structures
+ */
+#ifndef __LIO_VF_REP_H__
+#define __LIO_VF_REP_H__
+#define LIO_VF_REP_REQ_TMO_MS 5000
+#define LIO_VF_REP_STATS_POLL_TIME_MS 200
+
+struct lio_vf_rep_desc {
+ struct net_device *parent_ndev;
+ struct net_device *ndev;
+ struct octeon_device *oct;
+ struct lio_vf_rep_stats stats;
+ struct cavium_wk stats_wk;
+ atomic_t ifstate;
+ int ifidx;
+};
+
+struct lio_vf_rep_sc_ctx {
+ struct completion complete;
+};
+
+int lio_vf_rep_create(struct octeon_device *oct);
+void lio_vf_rep_destroy(struct octeon_device *oct);
+int lio_vf_rep_modinit(void);
+void lio_vf_rep_modexit(void);
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 3788c8cd082a..522dcc4dcff7 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -27,8 +27,8 @@
#define LIQUIDIO_PACKAGE ""
#define LIQUIDIO_BASE_MAJOR_VERSION 1
-#define LIQUIDIO_BASE_MINOR_VERSION 6
-#define LIQUIDIO_BASE_MICRO_VERSION 1
+#define LIQUIDIO_BASE_MINOR_VERSION 7
+#define LIQUIDIO_BASE_MICRO_VERSION 0
#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
__stringify(LIQUIDIO_BASE_MINOR_VERSION)
#define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION)
@@ -84,10 +84,14 @@ enum octeon_tag_type {
#define OPCODE_NIC_IF_CFG 0x09
#define OPCODE_NIC_VF_DRV_NOTICE 0x0A
#define OPCODE_NIC_INTRMOD_PARAMS 0x0B
+#define OPCODE_NIC_SYNC_OCTEON_TIME 0x14
#define VF_DRV_LOADED 1
#define VF_DRV_REMOVED -1
#define VF_DRV_MACADDR_CHANGED 2
+#define OPCODE_NIC_VF_REP_PKT 0x15
+#define OPCODE_NIC_VF_REP_CMD 0x16
+
#define CORE_DRV_TEST_SCATTER_OP 0xFFF5
/* Application codes advertised by the core driver initialization packet. */
@@ -108,6 +112,10 @@ enum octeon_tag_type {
#define SCR2_BIT_FW_LOADED 63
+/* App specific capabilities from firmware to pf driver */
+#define LIQUIDIO_TIME_SYNC_CAP 0x1
+#define LIQUIDIO_SWITCHDEV_CAP 0x2
+
static inline u32 incr_index(u32 index, u32 count, u32 max)
{
if ((index + count) >= max)
@@ -901,4 +909,60 @@ union oct_nic_if_cfg {
} s;
};
+struct lio_time {
+ s64 sec; /* seconds */
+ s64 nsec; /* nanoseconds */
+};
+
+struct lio_vf_rep_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+};
+
+enum lio_vf_rep_req_type {
+ LIO_VF_REP_REQ_NONE,
+ LIO_VF_REP_REQ_STATE,
+ LIO_VF_REP_REQ_MTU,
+ LIO_VF_REP_REQ_STATS,
+ LIO_VF_REP_REQ_DEVNAME
+};
+
+enum {
+ LIO_VF_REP_STATE_DOWN,
+ LIO_VF_REP_STATE_UP
+};
+
+#define LIO_IF_NAME_SIZE 16
+struct lio_vf_rep_req {
+ u8 req_type;
+ u8 ifidx;
+ u8 rsvd[6];
+
+ union {
+ struct lio_vf_rep_name {
+ char name[LIO_IF_NAME_SIZE];
+ } rep_name;
+
+ struct lio_vf_rep_mtu {
+ u32 mtu;
+ u32 rsvd;
+ } rep_mtu;
+
+ struct lio_vf_rep_state {
+ u8 state;
+ u8 rsvd[7];
+ } rep_state;
+ };
+};
+
+struct lio_vf_rep_resp {
+ u64 rh;
+ u8 status;
+ u8 rsvd[7];
+};
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
index 78a3685f6fe0..5bf5e8791dfb 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
@@ -24,6 +24,7 @@
#define LIO_FW_BASE_NAME "lio_"
#define LIO_FW_NAME_SUFFIX ".bin"
#define LIO_FW_NAME_TYPE_NIC "nic"
+#define LIO_FW_NAME_TYPE_AUTO "auto"
#define LIO_FW_NAME_TYPE_NONE "none"
#define LIO_MAX_FIRMWARE_VERSION_LEN 16
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index 63bd9c94e547..ceac74388e09 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -37,6 +37,8 @@
#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
#define MAX_OCTEON_MULTICAST_ADDR 32
+#define MAX_OCTEON_FILL_COUNT 8
+
/* CN6xxx IQ configuration macros */
#define CN6XXX_MAX_INPUT_QUEUES 32
#define CN6XXX_MAX_IQ_DESCRIPTORS 2048
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index ec3dd69cd6b2..7f97ae48efed 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -803,15 +803,18 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num,
}
#define FBUF_SIZE (4 * 1024 * 1024)
+#define MAX_BOOTTIME_SIZE 80
int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
size_t size)
{
- int ret = 0;
+ struct octeon_firmware_file_header *h;
+ char boottime[MAX_BOOTTIME_SIZE];
+ struct timespec64 ts;
u32 crc32_result;
u64 load_addr;
u32 image_len;
- struct octeon_firmware_file_header *h;
+ int ret = 0;
u32 i, rem;
if (size < sizeof(struct octeon_firmware_file_header)) {
@@ -890,11 +893,34 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
load_addr += size;
}
}
+
+ /* Pass date and time information to NIC at the time of loading
+ * firmware and periodically update the host time to NIC firmware.
+ * This is to make NIC firmware use the same time reference as Host,
+ * so that it is easy to correlate logs from firmware and host for
+ * debugging.
+ *
+ * Octeon always uses UTC time. so timezone information is not sent.
+ */
+ getnstimeofday64(&ts);
+ ret = snprintf(boottime, MAX_BOOTTIME_SIZE,
+ " time_sec=%lld time_nsec=%ld",
+ (s64)ts.tv_sec, ts.tv_nsec);
+ if ((sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd))) <
+ ret) {
+ dev_err(&oct->pci_dev->dev, "Boot command buffer too small\n");
+ return -EINVAL;
+ }
+ strncat(h->bootcmd, boottime,
+ sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd)));
+
dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
h->bootcmd);
/* Invoke the bootcmd */
ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+ if (ret)
+ dev_info(&oct->pci_dev->dev, "Boot command send failed\n");
- return 0;
+ return ret;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 29d53b1763a7..2c615ab09e64 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -541,6 +541,7 @@ static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
static atomic_t adapter_refcounts[MAX_OCTEON_DEVICES];
+static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES];
static u32 octeon_device_count;
/* locks device array (i.e. octeon_device[]) */
@@ -770,6 +771,10 @@ int octeon_register_device(struct octeon_device *oct,
oct->adapter_refcount = &adapter_refcounts[oct->octeon_id];
atomic_set(oct->adapter_refcount, 0);
+ /* Like the reference count, the f/w state is shared 'per-adapter' */
+ oct->adapter_fw_state = &adapter_fw_states[oct->octeon_id];
+ atomic_set(oct->adapter_fw_state, FW_NEEDS_TO_BE_LOADED);
+
spin_lock(&octeon_devices_lock);
for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) {
if (!octeon_device[idx]) {
@@ -780,11 +785,15 @@ int octeon_register_device(struct octeon_device *oct,
atomic_inc(oct->adapter_refcount);
return 1; /* here, refcount is guaranteed to be 1 */
}
- /* if another device is at same bus/dev, use its refcounter */
+ /* If another device is at same bus/dev, use its refcounter
+ * (and f/w state variable).
+ */
if ((octeon_device[idx]->loc.bus == bus) &&
(octeon_device[idx]->loc.dev == dev)) {
oct->adapter_refcount =
octeon_device[idx]->adapter_refcount;
+ oct->adapter_fw_state =
+ octeon_device[idx]->adapter_fw_state;
break;
}
}
@@ -1171,6 +1180,10 @@ octeon_register_dispatch_fn(struct octeon_device *oct,
spin_unlock_bh(&oct->dispatch.lock);
} else {
+ if (pfn == fn &&
+ octeon_get_dispatch_arg(oct, opcode, subcode) == fn_arg)
+ return 0;
+
dev_err(&oct->pci_dev->dev,
"Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
opcode, subcode);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 894af199ddef..63b0c758a0a6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -23,6 +23,7 @@
#define _OCTEON_DEVICE_H_
#include <linux/interrupt.h>
+#include <net/devlink.h>
/** PCI VendorId Device Id */
#define OCTEON_CN68XX_PCIID 0x91177d
@@ -50,6 +51,13 @@ enum octeon_pci_swap_mode {
OCTEON_PCI_32BIT_LW_SWAP = 3
};
+enum lio_fw_state {
+ FW_IS_PRELOADED = 0,
+ FW_NEEDS_TO_BE_LOADED = 1,
+ FW_IS_BEING_LOADED = 2,
+ FW_HAS_BEEN_LOADED = 3,
+};
+
enum {
OCTEON_CONFIG_TYPE_DEFAULT = 0,
NUM_OCTEON_CONFS,
@@ -384,6 +392,15 @@ struct octeon_ioq_vector {
u32 ioq_num;
};
+struct lio_vf_rep_list {
+ int num_vfs;
+ struct net_device *ndev[CN23XX_MAX_VFS_PER_PF];
+};
+
+struct lio_devlink_priv {
+ struct octeon_device *oct;
+};
+
/** The Octeon device.
* Each Octeon device has this structure to represent all its
* components.
@@ -557,7 +574,14 @@ struct octeon_device {
} loc;
atomic_t *adapter_refcount; /* reference count of adapter */
+
+ atomic_t *adapter_fw_state; /* per-adapter, lio_fw_state */
+
bool ptp_enable;
+
+ struct lio_vf_rep_list vf_rep_list;
+ struct devlink *devlink;
+ enum devlink_eswitch_mode eswitch_mode;
};
#define OCT_DRV_ONLINE 1
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 9372d4ce9954..3461d65ff4eb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -52,8 +52,8 @@ struct __dispatch {
* @return Failure: NULL
*
*/
-static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
- u16 opcode, u16 subcode)
+void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
+ u16 opcode, u16 subcode)
{
int idx;
struct list_head *dispatch;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index f91bc84d1719..815a9f56fd59 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -400,6 +400,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
u16 subcode,
octeon_dispatch_fn_t fn, void *fn_arg);
+void *octeon_get_dispatch_arg(struct octeon_device *oct,
+ u16 opcode, u16 subcode);
+
void octeon_droq_print_stats(void);
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 5c3c8da976f7..81c987682941 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -343,6 +343,9 @@ int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no);
int lio_wait_for_instr_fetch(struct octeon_device *oct);
+void
+octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no);
+
int
octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
void (*fn)(void *));
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 32ef3a7d88d8..c846eec11a45 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -63,7 +63,7 @@ struct octnet_buf_free_info {
};
/* BQL-related functions */
-void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
+int octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
void octeon_update_tx_completion_counters(void *buf, int reqtype,
unsigned int *pkts_compl,
unsigned int *bytes_compl);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 9e36319cead6..f2d1a076a038 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -136,6 +136,9 @@ struct lio {
/* work queue for link status */
struct cavium_wq link_status_wq;
+ /* work queue to regularly send local time to octeon firmware */
+ struct cavium_wq sync_octeon_time_wq;
+
int netdev_uc_count;
};
@@ -195,7 +198,7 @@ static inline void
struct sk_buff *skb;
struct octeon_skb_page_info *skb_pg_info;
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC);
if (unlikely(!page))
return NULL;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index b457cf23fce6..150609bd8849 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -82,9 +82,10 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
}
int octnet_send_nic_data_pkt(struct octeon_device *oct,
- struct octnic_data_pkt *ndata)
+ struct octnic_data_pkt *ndata,
+ int xmit_more)
{
- int ring_doorbell = 1;
+ int ring_doorbell = !xmit_more;
return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
ndata->buf, ndata->datasize,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 6480ef863441..de4130d26a98 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -279,7 +279,8 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int octnet_send_nic_data_pkt(struct octeon_device *oct,
- struct octnic_data_pkt *ndata);
+ struct octnic_data_pkt *ndata,
+ int xmit_more);
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 1e0fbce86d60..e07d2093b971 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -278,6 +278,18 @@ ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
}
}
+void
+octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq;
+
+ iq = oct->instr_queue[iq_no];
+ spin_lock(&iq->post_lock);
+ if (iq->fill_cnt)
+ ring_doorbell(oct, iq);
+ spin_unlock(&iq->post_lock);
+}
+
static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
u8 *cmd)
{
@@ -477,8 +489,6 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
}
tot_inst_processed += inst_processed;
- inst_processed = 0;
-
} while (tot_inst_processed < napi_budget);
if (napi_budget && (tot_inst_processed >= napi_budget))
@@ -543,6 +553,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
u32 datasize, u32 reqtype)
{
+ int xmit_stopped;
struct iq_post_status st;
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
@@ -554,12 +565,13 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
st = __post_command2(iq, cmd);
if (st.status != IQ_SEND_FAILED) {
- octeon_report_sent_bytes_to_bql(buf, reqtype);
+ xmit_stopped = octeon_report_sent_bytes_to_bql(buf, reqtype);
__add_to_request_list(iq, st.index, buf, reqtype);
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
- if (force_db)
+ if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db ||
+ xmit_stopped || st.status == IQ_SEND_STOP)
ring_doorbell(oct, iq);
} else {
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 2887bcaf6af5..3f6afb54a5eb 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -705,14 +705,15 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
if (!ptp.s.ptp_en)
cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
- pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
- (NSEC_PER_SEC << 32) / clock_comp);
+ netdev_info(netdev,
+ "PTP Clock using sclk reference @ %lldHz\n",
+ (NSEC_PER_SEC << 32) / clock_comp);
} else {
/* The clock is already programmed to use a GPIO */
u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
- pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
- ptp.s.ext_clk_in,
- (NSEC_PER_SEC << 32) / clock_comp);
+ netdev_info(netdev,
+ "PTP Clock using GPIO%d @ %lld Hz\n",
+ ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
}
/* Enable the clock if it wasn't done already */
@@ -926,14 +927,11 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
spin_unlock_irqrestore(&p->lock, flags);
if (link_changed != 0) {
- if (link_changed > 0) {
- pr_info("%s: Link is up - %d/%s\n", netdev->name,
- phydev->speed,
- phydev->duplex == DUPLEX_FULL ?
- "Full" : "Half");
- } else {
- pr_info("%s: Link is down\n", netdev->name);
- }
+ if (link_changed > 0)
+ netdev_info(netdev, "Link is up - %d/%s\n",
+ phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
+ else
+ netdev_info(netdev, "Link is down\n");
}
}
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
index 6b4d4add7353..2fc6142d1634 100644
--- a/drivers/net/ethernet/cavium/thunder/Makefile
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Cavium's Thunder ethernet device
#
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index fb770b0182d3..8f1dd55b3e08 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -361,17 +361,8 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
}
}
-static void nic_free_lmacmem(struct nicpf *nic)
+static void nic_get_hw_info(struct nicpf *nic)
{
- kfree(nic->vf_lmac_map);
- kfree(nic->link);
- kfree(nic->duplex);
- kfree(nic->speed);
-}
-
-static int nic_get_hw_info(struct nicpf *nic)
-{
- u8 max_lmac;
u16 sdevid;
struct hw_info *hw = nic->hw;
@@ -419,41 +410,16 @@ static int nic_get_hw_info(struct nicpf *nic)
break;
}
hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev);
-
- /* Allocate memory for LMAC tracking elements */
- max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX;
- nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
- if (!nic->vf_lmac_map)
- goto error;
- nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
- if (!nic->link)
- goto error;
- nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL);
- if (!nic->duplex)
- goto error;
- nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL);
- if (!nic->speed)
- goto error;
- return 0;
-
-error:
- nic_free_lmacmem(nic);
- return -ENOMEM;
}
#define BGX0_BLOCK 8
#define BGX1_BLOCK 9
-static int nic_init_hw(struct nicpf *nic)
+static void nic_init_hw(struct nicpf *nic)
{
- int i, err;
+ int i;
u64 cqm_cfg;
- /* Get HW capability info */
- err = nic_get_hw_info(nic);
- if (err)
- return err;
-
/* Enable NIC HW block */
nic_reg_write(nic, NIC_PF_CFG, 0x3);
@@ -498,8 +464,6 @@ static int nic_init_hw(struct nicpf *nic)
cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
-
- return 0;
}
/* Channel parse index configuration */
@@ -584,9 +548,6 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
static void nic_send_rss_size(struct nicpf *nic, int vf)
{
union nic_mbx mbx = {};
- u64 *msg;
-
- msg = (u64 *)&mbx;
mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size;
@@ -608,7 +569,6 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
rssi = rssi_base;
- qset = cfg->vf_id;
for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
u8 svf = cfg->ind_tbl[idx] >> 3;
@@ -1273,6 +1233,7 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct nicpf *nic;
+ u8 max_lmac;
int err;
BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
@@ -1282,10 +1243,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL);
- if (!nic->hw) {
- devm_kfree(dev, nic);
+ if (!nic->hw)
return -ENOMEM;
- }
pci_set_drvdata(pdev, nic);
@@ -1326,11 +1285,33 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->node = nic_get_node_id(pdev);
- /* Initialize hardware */
- err = nic_init_hw(nic);
- if (err)
+ /* Get HW capability info */
+ nic_get_hw_info(nic);
+
+ /* Allocate memory for LMAC tracking elements */
+ err = -ENOMEM;
+ max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
+
+ nic->vf_lmac_map = devm_kmalloc_array(dev, max_lmac, sizeof(u8),
+ GFP_KERNEL);
+ if (!nic->vf_lmac_map)
+ goto err_release_regions;
+
+ nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->link)
+ goto err_release_regions;
+
+ nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
+ if (!nic->duplex)
+ goto err_release_regions;
+
+ nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
+ if (!nic->speed)
goto err_release_regions;
+ /* Initialize hardware */
+ nic_init_hw(nic);
+
nic_set_lmac_vf_mapping(nic);
/* Register interrupts */
@@ -1364,9 +1345,6 @@ err_unregister_interrupts:
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
- nic_free_lmacmem(nic);
- devm_kfree(dev, nic->hw);
- devm_kfree(dev, nic);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
@@ -1388,10 +1366,6 @@ static void nic_remove(struct pci_dev *pdev)
nic_unregister_interrupts(nic);
pci_release_regions(pdev);
- nic_free_lmacmem(nic);
- devm_kfree(&pdev->dev, nic->hw);
- devm_kfree(&pdev->dev, nic);
-
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 805ab45e9b5a..a063c36c4c58 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -523,6 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
xdp.data_hard_start = page_address(page);
xdp.data = (void *)cpu_addr;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
orig_data = xdp.data;
@@ -1740,7 +1741,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
return 0;
}
-static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
+static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
{
struct nicvf *nic = netdev_priv(netdev);
@@ -1773,7 +1774,7 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_tx_timeout = nicvf_tx_timeout,
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
- .ndo_xdp = nicvf_xdp,
+ .ndo_bpf = nicvf_xdp,
};
static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile
index b6a5eec6ed8e..c0f978d2e8a7 100644
--- a/drivers/net/ethernet/chelsio/Makefile
+++ b/drivers/net/ethernet/chelsio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Chelsio network device drivers.
#
diff --git a/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h
index ccdb2bc9ae98..e9c65d812c4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/fpga_defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Date: 2005/03/07 23:59:05 $ $RCSfile: fpga_defs.h,v $ $Revision: 1.4 $ */
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
index 76ce6e538326..30b003484fc1 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Date: 2005/10/24 23:18:13 $ $RCSfile: mv88e1xxx.c,v $ $Revision: 1.49 $ */
#include "common.h"
#include "mv88e1xxx.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h
index 967cc4286359..11bf0f7c2f90 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88e1xxx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Date: 2005/03/07 23:59:05 $ $RCSfile: mv88e1xxx.h,v $ $Revision: 1.13 $ */
#ifndef CHELSIO_MV8E1XXX_H
#define CHELSIO_MV8E1XXX_H
diff --git a/drivers/net/ethernet/chelsio/cxgb/my3126.c b/drivers/net/ethernet/chelsio/cxgb/my3126.c
index d546f46c8ef7..20c09cc4b323 100644
--- a/drivers/net/ethernet/chelsio/cxgb/my3126.c
+++ b/drivers/net/ethernet/chelsio/cxgb/my3126.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Date: 2005/11/12 02:13:49 $ $RCSfile: my3126.c,v $ $Revision: 1.15 $ */
#include "cphy.h"
#include "elmer0.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 0f13a7f7c1d3..30de26ef3da4 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1882,10 +1882,10 @@ send:
/*
* Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
*/
-static void sge_tx_reclaim_cb(unsigned long data)
+static void sge_tx_reclaim_cb(struct timer_list *t)
{
int i;
- struct sge *sge = (struct sge *)data;
+ struct sge *sge = from_timer(sge, t, tx_reclaim_timer);
for (i = 0; i < SGE_CMDQ_N; ++i) {
struct cmdQ *q = &sge->cmdQ[i];
@@ -1978,10 +1978,10 @@ void t1_sge_start(struct sge *sge)
/*
* Callback for the T2 ESPI 'stuck packet feature' workaorund
*/
-static void espibug_workaround_t204(unsigned long data)
+static void espibug_workaround_t204(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
- struct sge *sge = adapter->sge;
+ struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct adapter *adapter = sge->adapter;
unsigned int nports = adapter->params.nports;
u32 seop[MAX_NPORTS];
@@ -2021,10 +2021,10 @@ static void espibug_workaround_t204(unsigned long data)
mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
}
-static void espibug_workaround(unsigned long data)
+static void espibug_workaround(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
- struct sge *sge = adapter->sge;
+ struct sge *sge = from_timer(sge, t, espibug_timer);
+ struct adapter *adapter = sge->adapter;
if (netif_running(adapter->port[0].dev)) {
struct sk_buff *skb = sge->espibug_skb[0];
@@ -2075,19 +2075,15 @@ struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
goto nomem_port;
}
- init_timer(&sge->tx_reclaim_timer);
- sge->tx_reclaim_timer.data = (unsigned long)sge;
- sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
+ timer_setup(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, 0);
if (is_T2(sge->adapter)) {
- init_timer(&sge->espibug_timer);
+ timer_setup(&sge->espibug_timer,
+ adapter->params.nports > 1 ? espibug_workaround_t204 : espibug_workaround,
+ 0);
- if (adapter->params.nports > 1) {
+ if (adapter->params.nports > 1)
tx_sched_init(sge);
- sge->espibug_timer.function = espibug_workaround_t204;
- } else
- sge->espibug_timer.function = espibug_workaround;
- sge->espibug_timer.data = (unsigned long)sge->adapter;
sge->espibug_timeout = 1;
/* for T204, every 10ms */
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.c b/drivers/net/ethernet/chelsio/cxgb/tp.c
index b146acabf982..4337cee0763e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/tp.c
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Date: 2006/02/07 04:21:54 $ $RCSfile: tp.c,v $ $Revision: 1.73 $ */
#include "common.h"
#include "regs.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.h b/drivers/net/ethernet/chelsio/cxgb/tp.h
index dfd8ce25106a..ba15675d56df 100644
--- a/drivers/net/ethernet/chelsio/cxgb/tp.h
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Date: 2005/03/07 23:59:05 $ $RCSfile: tp.h,v $ $Revision: 1.20 $ */
#ifndef CHELSIO_TP_H
#define CHELSIO_TP_H
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
index bdc895bd2a46..873c1c7b4ca0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Date: 2006/04/28 19:20:06 $ $RCSfile: vsc7326.c,v $ $Revision: 1.19 $ */
/* Driver for Vitesse VSC7326 (Schaumburg) MAC */
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h
index 479edbcabe68..04503857c6a9 100644
--- a/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326_reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Date: 2006/04/28 19:20:17 $ $RCSfile: vsc7326_reg.h,v $ $Revision: 1.5 $ */
#ifndef _VSC7321_REG_H_
#define _VSC7321_REG_H_
diff --git a/drivers/net/ethernet/chelsio/cxgb3/regs.h b/drivers/net/ethernet/chelsio/cxgb3/regs.h
index 81029b872bdd..174eb45100a4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define A_SG_CONTROL 0x0
#define S_CONGMODE 29
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index e2d342647b19..e988caa797cb 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -455,6 +455,11 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
+ __free_pages(q->pg_chunk.page, order);
+ q->pg_chunk.page = NULL;
+ return -EIO;
+ }
q->pg_chunk.mapping = mapping;
}
sd->pg_chunk = q->pg_chunk;
@@ -949,40 +954,78 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
return flits_to_desc(flits);
}
+/* map_skb - map a packet main body and its page fragments
+ * @pdev: the PCI device
+ * @skb: the packet
+ * @addr: placeholder to save the mapped addresses
+ *
+ * map the main body of an sk_buff and its page fragments, if any.
+ */
+static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
+ dma_addr_t *addr)
+{
+ const skb_frag_t *fp, *end;
+ const struct skb_shared_info *si;
+
+ if (skb_headlen(skb)) {
+ *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, *addr))
+ goto out_err;
+ addr++;
+ }
+
+ si = skb_shinfo(skb);
+ end = &si->frags[si->nr_frags];
+
+ for (fp = si->frags; fp < end; fp++) {
+ *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
+ DMA_TO_DEVICE);
+ if (pci_dma_mapping_error(pdev, *addr))
+ goto unwind;
+ addr++;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
+ DMA_TO_DEVICE);
+
+ pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
+out_err:
+ return -ENOMEM;
+}
+
/**
- * make_sgl - populate a scatter/gather list for a packet
+ * write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @sgp: the SGL to populate
* @start: start address of skb main body data to include in the SGL
* @len: length of skb main body data to include in the SGL
- * @pdev: the PCI device
+ * @addr: the list of the mapped addresses
*
- * Generates a scatter/gather list for the buffers that make up a packet
+ * Copies the scatter/gather list for the buffers that make up a packet
* and returns the SGL size in 8-byte words. The caller must size the SGL
* appropriately.
*/
-static inline unsigned int make_sgl(const struct sk_buff *skb,
- struct sg_ent *sgp, unsigned char *start,
- unsigned int len, struct pci_dev *pdev)
+static inline unsigned int write_sgl(const struct sk_buff *skb,
+ struct sg_ent *sgp, unsigned char *start,
+ unsigned int len, const dma_addr_t *addr)
{
- dma_addr_t mapping;
- unsigned int i, j = 0, nfrags;
+ unsigned int i, j = 0, k = 0, nfrags;
if (len) {
- mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
sgp->len[0] = cpu_to_be32(len);
- sgp->addr[0] = cpu_to_be64(mapping);
- j = 1;
+ sgp->addr[j++] = cpu_to_be64(addr[k++]);
}
nfrags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
- sgp->addr[j] = cpu_to_be64(mapping);
+ sgp->addr[j] = cpu_to_be64(addr[k++]);
j ^= 1;
if (j == 0)
++sgp;
@@ -1138,7 +1181,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
const struct port_info *pi,
unsigned int pidx, unsigned int gen,
struct sge_txq *q, unsigned int ndesc,
- unsigned int compl)
+ unsigned int compl, const dma_addr_t *addr)
{
unsigned int flits, sgl_flits, cntrl, tso_info;
struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1196,7 +1239,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
}
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
+ sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1227,6 +1270,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
struct sge_qset *qs;
struct sge_txq *q;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
/*
* The chip min packet length is 9 octets but play safe and reject
@@ -1255,6 +1299,14 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ /* Check if ethernet packet can't be sent as immediate data */
+ if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
+ if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
q->in_use += ndesc;
if (unlikely(credits - ndesc < q->stop_thres)) {
t3_stop_tx_queue(txq, qs, q);
@@ -1312,7 +1364,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!skb_shared(skb)))
skb_orphan(skb);
- write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
+ write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
check_ring_tx_db(adap, q);
return NETDEV_TX_OK;
}
@@ -1577,7 +1629,8 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
*/
static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
struct sge_txq *q, unsigned int pidx,
- unsigned int gen, unsigned int ndesc)
+ unsigned int gen, unsigned int ndesc,
+ const dma_addr_t *addr)
{
unsigned int sgl_flits, flits;
struct work_request_hdr *from;
@@ -1598,10 +1651,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
flits = skb_transport_offset(skb) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
- skb_tail_pointer(skb) -
- skb_transport_header(skb),
- adap->pdev);
+ sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
+ skb_tail_pointer(skb) - skb_transport_header(skb),
+ addr);
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
skb->destructor = deferred_unmap_destructor;
@@ -1659,6 +1711,12 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
goto again;
}
+ if (!immediate(skb) &&
+ map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
+ spin_unlock(&q->lock);
+ return NET_XMIT_SUCCESS;
+ }
+
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
@@ -1669,7 +1727,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
}
spin_unlock(&q->lock);
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
check_ring_tx_db(adap, q);
return NET_XMIT_SUCCESS;
}
@@ -1687,6 +1745,7 @@ static void restart_offloadq(unsigned long data)
struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
+ unsigned int written = 0;
spin_lock(&q->lock);
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1706,10 +1765,15 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
break;
}
+ if (!immediate(skb) &&
+ map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
+ break;
+
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
q->pidx += ndesc;
+ written += ndesc;
if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
@@ -1717,7 +1781,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
__skb_unlink(skb, &q->sendq);
spin_unlock(&q->lock);
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
+ (dma_addr_t *)skb->head);
spin_lock(&q->lock);
}
spin_unlock(&q->lock);
@@ -1727,8 +1792,9 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
set_bit(TXQ_LAST_PKT_DB, &q->flags);
#endif
wmb();
- t3_write_reg(adap, A_SG_KDOORBELL,
- F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ if (likely(written))
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
/**
@@ -2853,9 +2919,9 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
* bother cleaning them up here.
*
*/
-static void sge_timer_tx(unsigned long data)
+static void sge_timer_tx(struct timer_list *t)
{
- struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
@@ -2893,10 +2959,10 @@ static void sge_timer_tx(unsigned long data)
* starved.
*
*/
-static void sge_timer_rx(unsigned long data)
+static void sge_timer_rx(struct timer_list *t)
{
spinlock_t *lock;
- struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
u32 status;
@@ -2976,8 +3042,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
struct sge_qset *q = &adapter->sge.qs[id];
init_qset_cntxt(q, id);
- setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q);
- setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q);
+ timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
+ timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
sizeof(struct rx_desc),
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
index 29b6c800b238..c31ce8dc95fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge_defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is automatically generated --- any changes will be lost.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
index 705713b56636..3c3e6cf6aca6 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3cdev.h
@@ -60,7 +60,7 @@ struct t3cdev {
int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
void *priv; /* driver private data */
- void *l2opt; /* optional layer 2 data */
+ void __rcu *l2opt; /* optional layer 2 data */
void *l3opt; /* optional layer 3 data */
void *l4opt; /* optional layer 4 data */
void *ulp; /* ulp stuff */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 817212702f0a..8c9c6b0d2e5d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -1,10 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Chelsio T4 driver
#
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o cxgb4_ptp.o
+cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
+ cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
+ cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
+ cudbg_common.o cudbg_lib.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 3103ef9b561d..290039026ece 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -96,7 +96,8 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
if (!ret) {
ce = cte;
read_unlock_bh(&ctbl->lock);
- goto found;
+ refcount_inc(&ce->refcnt);
+ return 0;
}
}
read_unlock_bh(&ctbl->lock);
@@ -108,7 +109,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
list_del(&ce->list);
INIT_LIST_HEAD(&ce->list);
spin_lock_init(&ce->lock);
- atomic_set(&ce->refcnt, 0);
+ refcount_set(&ce->refcnt, 0);
atomic_dec(&ctbl->nfree);
list_add_tail(&ce->list, &ctbl->hash_list[hash]);
if (v6) {
@@ -138,9 +139,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
return -ENOMEM;
}
write_unlock_bh(&ctbl->lock);
-found:
- atomic_inc(&ce->refcnt);
-
+ refcount_set(&ce->refcnt, 1);
return 0;
}
EXPORT_SYMBOL(cxgb4_clip_get);
@@ -179,7 +178,7 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
found:
write_lock_bh(&ctbl->lock);
spin_lock_bh(&ce->lock);
- if (atomic_dec_and_test(&ce->refcnt)) {
+ if (refcount_dec_and_test(&ce->refcnt)) {
list_del(&ce->list);
INIT_LIST_HEAD(&ce->list);
list_add_tail(&ce->list, &ctbl->ce_free_head);
@@ -266,7 +265,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
ip[0] = '\0';
sprintf(ip, "%pISc", &ce->addr);
seq_printf(seq, "%-25s %u\n", ip,
- atomic_read(&ce->refcnt));
+ refcount_read(&ce->refcnt));
}
}
seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 35eb43c6bcbb..a0e0ae19649f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -10,9 +10,11 @@
* release for licensing terms and conditions.
*/
+#include <linux/refcount.h>
+
struct clip_entry {
spinlock_t lock; /* Hold while modifying clip reference */
- atomic_t refcnt;
+ refcount_t refcnt;
struct list_head list;
union {
struct sockaddr_in addr;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
new file mode 100644
index 000000000000..f78ba1743b5a
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "cxgb4.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+
+int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+ struct cudbg_buffer *pin_buff)
+{
+ u32 offset;
+
+ offset = pdbg_buff->offset;
+ if (offset + size > pdbg_buff->size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_buff->data + offset;
+ pin_buff->offset = offset;
+ pin_buff->size = size;
+ pdbg_buff->size -= size;
+ return 0;
+}
+
+void cudbg_put_buff(struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pdbg_buff)
+{
+ pdbg_buff->size += pin_buff->size;
+ pin_buff->data = NULL;
+ pin_buff->offset = 0;
+ pin_buff->size = 0;
+}
+
+void cudbg_update_buff(struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff)
+{
+ /* We already write to buffer provided by ethool, so just
+ * increment offset to next free space.
+ */
+ pout_buff->offset += pin_buff->size;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
new file mode 100644
index 000000000000..605689957496
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_ENTITY_H__
+#define __CUDBG_ENTITY_H__
+
+#define EDC0_FLAG 3
+#define EDC1_FLAG 4
+
+#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
+
+struct card_mem {
+ u16 size_edc0;
+ u16 size_edc1;
+ u16 mem_flag;
+};
+
+struct cudbg_mbox_log {
+ struct mbox_cmd entry;
+ u32 hi[MBOX_LEN / 8];
+ u32 lo[MBOX_LEN / 8];
+};
+
+struct cudbg_cim_qcfg {
+ u8 chip;
+ u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+ u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+ u16 thres[CIM_NUM_IBQ];
+ u32 obq_wr[2 * CIM_NUM_OBQ_T5];
+ u32 stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)];
+};
+
+struct cudbg_rss_vf_conf {
+ u32 rss_vf_vfl;
+ u32 rss_vf_vfh;
+};
+
+struct cudbg_pm_stats {
+ u32 tx_cnt[T6_PM_NSTATS];
+ u32 rx_cnt[T6_PM_NSTATS];
+ u64 tx_cyc[T6_PM_NSTATS];
+ u64 rx_cyc[T6_PM_NSTATS];
+};
+
+struct cudbg_hw_sched {
+ u32 kbps[NTX_SCHED];
+ u32 ipg[NTX_SCHED];
+ u32 pace_tab[NTX_SCHED];
+ u32 mode;
+ u32 map;
+};
+
+struct ireg_field {
+ u32 ireg_addr;
+ u32 ireg_data;
+ u32 ireg_local_offset;
+ u32 ireg_offset_range;
+};
+
+struct ireg_buf {
+ struct ireg_field tp_pio;
+ u32 outbuf[32];
+};
+
+struct cudbg_ulprx_la {
+ u32 data[ULPRX_LA_SIZE * 8];
+ u32 size;
+};
+
+struct cudbg_tp_la {
+ u32 size;
+ u32 mode;
+ u8 data[0];
+};
+
+struct cudbg_cim_pif_la {
+ int size;
+ u8 data[0];
+};
+
+struct cudbg_clk_info {
+ u64 retransmit_min;
+ u64 retransmit_max;
+ u64 persist_timer_min;
+ u64 persist_timer_max;
+ u64 keepalive_idle_timer;
+ u64 keepalive_interval;
+ u64 initial_srtt;
+ u64 finwait2_timer;
+ u32 dack_timer;
+ u32 res;
+ u32 cclk_ps;
+ u32 tre;
+ u32 dack_re;
+};
+
+struct cudbg_tid_info_region {
+ u32 ntids;
+ u32 nstids;
+ u32 stid_base;
+ u32 hash_base;
+
+ u32 natids;
+ u32 nftids;
+ u32 ftid_base;
+ u32 aftid_base;
+ u32 aftid_end;
+
+ u32 sftid_base;
+ u32 nsftids;
+
+ u32 uotid_base;
+ u32 nuotids;
+
+ u32 sb;
+ u32 flags;
+ u32 le_db_conf;
+ u32 ip_users;
+ u32 ipv6_users;
+
+ u32 hpftid_base;
+ u32 nhpftids;
+};
+
+#define CUDBG_TID_INFO_REV 1
+
+struct cudbg_tid_info_region_rev1 {
+ struct cudbg_ver_hdr ver_hdr;
+ struct cudbg_tid_info_region tid;
+ u32 tid_start;
+ u32 reserved[16];
+};
+
+#define CUDBG_MAX_FL_QIDS 1024
+
+struct cudbg_ch_cntxt {
+ u32 cntxt_type;
+ u32 cntxt_id;
+ u32 data[SGE_CTXT_SIZE / 4];
+};
+
+#define CUDBG_MAX_RPLC_SIZE 128
+
+struct cudbg_mps_tcam {
+ u64 mask;
+ u32 rplc[8];
+ u32 idx;
+ u32 cls_lo;
+ u32 cls_hi;
+ u32 rplc_size;
+ u32 vniy;
+ u32 vnix;
+ u32 dip_hit;
+ u32 vlan_vld;
+ u32 repli;
+ u16 ivlan;
+ u8 addr[ETH_ALEN];
+ u8 lookup_type;
+ u8 port_num;
+ u8 reserved[2];
+};
+
+#define CUDBG_VPD_PF_SIZE 0x800
+#define CUDBG_SCFG_VER_ADDR 0x06
+#define CUDBG_SCFG_VER_LEN 4
+#define CUDBG_VPD_VER_ADDR 0x18c7
+#define CUDBG_VPD_VER_LEN 2
+
+struct cudbg_vpd_data {
+ u8 sn[SERNUM_LEN + 1];
+ u8 bn[PN_LEN + 1];
+ u8 na[MACADDR_LEN + 1];
+ u8 mn[ID_LEN + 1];
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_micro;
+ u16 fw_build;
+ u32 scfg_vers;
+ u32 vpd_vers;
+};
+
+#define CUDBG_MAX_TCAM_TID 0x800
+
+enum cudbg_le_entry_types {
+ LE_ET_UNKNOWN = 0,
+ LE_ET_TCAM_CON = 1,
+ LE_ET_TCAM_SERVER = 2,
+ LE_ET_TCAM_FILTER = 3,
+ LE_ET_TCAM_CLIP = 4,
+ LE_ET_TCAM_ROUTING = 5,
+ LE_ET_HASH_CON = 6,
+ LE_ET_INVALID_TID = 8,
+};
+
+struct cudbg_tcam {
+ u32 filter_start;
+ u32 server_start;
+ u32 clip_start;
+ u32 routing_start;
+ u32 tid_hash_base;
+ u32 max_tid;
+};
+
+struct cudbg_tid_data {
+ u32 tid;
+ u32 dbig_cmd;
+ u32 dbig_conf;
+ u32 dbig_rsp_stat;
+ u32 data[NUM_LE_DB_DBGI_RSP_DATA_INSTANCES];
+};
+
+#define CUDBG_NUM_ULPTX 11
+#define CUDBG_NUM_ULPTX_READ 512
+
+struct cudbg_ulptx_la {
+ u32 rdptr[CUDBG_NUM_ULPTX];
+ u32 wrptr[CUDBG_NUM_ULPTX];
+ u32 rddata[CUDBG_NUM_ULPTX];
+ u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ];
+};
+
+#define CUDBG_CHAC_PBT_ADDR 0x2800
+#define CUDBG_CHAC_PBT_LRF 0x3000
+#define CUDBG_CHAC_PBT_DATA 0x3800
+#define CUDBG_PBT_DYNAMIC_ENTRIES 8
+#define CUDBG_PBT_STATIC_ENTRIES 16
+#define CUDBG_LRF_ENTRIES 8
+#define CUDBG_PBT_DATA_ENTRIES 512
+
+struct cudbg_pbt_tables {
+ u32 pbt_dynamic[CUDBG_PBT_DYNAMIC_ENTRIES];
+ u32 pbt_static[CUDBG_PBT_STATIC_ENTRIES];
+ u32 lrf_table[CUDBG_LRF_ENTRIES];
+ u32 pbt_data[CUDBG_PBT_DATA_ENTRIES];
+};
+
+#define IREG_NUM_ELEM 4
+
+static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
+ {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
+ {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */
+ {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */
+ {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */
+ {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */
+ {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */
+ {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */
+ {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */
+ {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */
+ {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */
+ {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */
+ {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */
+};
+
+static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = {
+ {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */
+ {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */
+ {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */
+ {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */
+ {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */
+ {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */
+ {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */
+ {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */
+ {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */
+ {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */
+ {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */
+};
+
+static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = {
+ {0x7e18, 0x7e1c, 0x0, 12}
+};
+
+static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = {
+ {0x7e18, 0x7e1c, 0x0, 12}
+};
+
+static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = {
+ {0x7e50, 0x7e54, 0x0, 13},
+ {0x7e50, 0x7e54, 0x10, 6},
+ {0x7e50, 0x7e54, 0x18, 21},
+ {0x7e50, 0x7e54, 0x30, 32},
+ {0x7e50, 0x7e54, 0x50, 22},
+ {0x7e50, 0x7e54, 0x68, 12}
+};
+
+static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = {
+ {0x7e50, 0x7e54, 0x0, 13},
+ {0x7e50, 0x7e54, 0x10, 6},
+ {0x7e50, 0x7e54, 0x18, 8},
+ {0x7e50, 0x7e54, 0x20, 13},
+ {0x7e50, 0x7e54, 0x30, 16},
+ {0x7e50, 0x7e54, 0x40, 16},
+ {0x7e50, 0x7e54, 0x50, 16},
+ {0x7e50, 0x7e54, 0x60, 6},
+ {0x7e50, 0x7e54, 0x68, 4}
+};
+
+static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = {
+ {0x10cc, 0x10d0, 0x0, 16},
+ {0x10cc, 0x10d4, 0x0, 16},
+};
+
+static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = {
+ {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */
+ {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */
+ {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */
+};
+
+static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = {
+ {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */
+ {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */
+};
+
+static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = {
+ {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */
+ {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */
+};
+
+static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
+ {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */
+ {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
+};
+
+static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
+ {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
+ {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
+ {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */
+};
+
+static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
+ {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */
+ {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
+};
+
+static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
+ {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+
+};
+
+static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
+ {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+};
+
+static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
+ {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */
+};
+#endif /* __CUDBG_ENTITY_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
new file mode 100644
index 000000000000..e10ff1ee62c5
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_IF_H__
+#define __CUDBG_IF_H__
+
+/* Error codes */
+#define CUDBG_STATUS_NO_MEM -19
+#define CUDBG_STATUS_ENTITY_NOT_FOUND -24
+#define CUDBG_SYSTEM_ERROR -29
+#define CUDBG_STATUS_CCLK_NOT_DEFINED -32
+
+#define CUDBG_MAJOR_VERSION 1
+#define CUDBG_MINOR_VERSION 14
+
+enum cudbg_dbg_entity_type {
+ CUDBG_REG_DUMP = 1,
+ CUDBG_DEV_LOG = 2,
+ CUDBG_CIM_LA = 3,
+ CUDBG_CIM_MA_LA = 4,
+ CUDBG_CIM_QCFG = 5,
+ CUDBG_CIM_IBQ_TP0 = 6,
+ CUDBG_CIM_IBQ_TP1 = 7,
+ CUDBG_CIM_IBQ_ULP = 8,
+ CUDBG_CIM_IBQ_SGE0 = 9,
+ CUDBG_CIM_IBQ_SGE1 = 10,
+ CUDBG_CIM_IBQ_NCSI = 11,
+ CUDBG_CIM_OBQ_ULP0 = 12,
+ CUDBG_CIM_OBQ_ULP1 = 13,
+ CUDBG_CIM_OBQ_ULP2 = 14,
+ CUDBG_CIM_OBQ_ULP3 = 15,
+ CUDBG_CIM_OBQ_SGE = 16,
+ CUDBG_CIM_OBQ_NCSI = 17,
+ CUDBG_EDC0 = 18,
+ CUDBG_EDC1 = 19,
+ CUDBG_RSS = 22,
+ CUDBG_RSS_VF_CONF = 25,
+ CUDBG_PATH_MTU = 27,
+ CUDBG_PM_STATS = 30,
+ CUDBG_HW_SCHED = 31,
+ CUDBG_TP_INDIRECT = 36,
+ CUDBG_SGE_INDIRECT = 37,
+ CUDBG_ULPRX_LA = 41,
+ CUDBG_TP_LA = 43,
+ CUDBG_CIM_PIF_LA = 45,
+ CUDBG_CLK = 46,
+ CUDBG_CIM_OBQ_RXQ0 = 47,
+ CUDBG_CIM_OBQ_RXQ1 = 48,
+ CUDBG_PCIE_INDIRECT = 50,
+ CUDBG_PM_INDIRECT = 51,
+ CUDBG_TID_INFO = 54,
+ CUDBG_DUMP_CONTEXT = 56,
+ CUDBG_MPS_TCAM = 57,
+ CUDBG_VPD_DATA = 58,
+ CUDBG_LE_TCAM = 59,
+ CUDBG_CCTRL = 60,
+ CUDBG_MA_INDIRECT = 61,
+ CUDBG_ULPTX_LA = 62,
+ CUDBG_UP_CIM_INDIRECT = 64,
+ CUDBG_PBT_TABLE = 65,
+ CUDBG_MBOX_LOG = 66,
+ CUDBG_HMA_INDIRECT = 67,
+ CUDBG_MAX_ENTITY = 70,
+};
+
+struct cudbg_init {
+ struct adapter *adap; /* Pointer to adapter structure */
+ void *outbuf; /* Output buffer */
+ u32 outbuf_size; /* Output buffer size */
+};
+
+static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size)
+{
+ return size * 1024 * 1024;
+}
+#endif /* __CUDBG_IF_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
new file mode 100644
index 000000000000..d699bf88d18f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -0,0 +1,1929 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "t4_regs.h"
+#include "cxgb4.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_lib.h"
+#include "cudbg_entity.h"
+
+static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
+{
+ cudbg_update_buff(pin_buff, dbg_buff);
+ cudbg_put_buff(pin_buff, dbg_buff);
+}
+
+static int is_fw_attached(struct cudbg_init *pdbg_init)
+{
+ struct adapter *padap = pdbg_init->adap;
+
+ if (!(padap->flags & FW_OK) || padap->use_bd)
+ return 0;
+
+ return 1;
+}
+
+/* This function will add additional padding bytes into debug_buffer to make it
+ * 4 byte aligned.
+ */
+void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
+ struct cudbg_entity_hdr *entity_hdr)
+{
+ u8 zero_buf[4] = {0};
+ u8 padding, remain;
+
+ remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
+ padding = 4 - remain;
+ if (remain) {
+ memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
+ padding);
+ dbg_buff->offset += padding;
+ entity_hdr->num_pad = padding;
+ }
+ entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
+}
+
+struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
+{
+ struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
+
+ return (struct cudbg_entity_hdr *)
+ ((char *)outbuf + cudbg_hdr->hdr_len +
+ (sizeof(struct cudbg_entity_hdr) * (i - 1)));
+}
+
+static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
+ void *dest)
+{
+ int vaddr, rc;
+
+ vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
+ if (vaddr < 0)
+ return vaddr;
+
+ rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ u32 buf_size = 0;
+ int rc = 0;
+
+ if (is_t4(padap->params.chip))
+ buf_size = T4_REGMAP_SIZE;
+ else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
+ buf_size = T5_REGMAP_SIZE;
+
+ rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
+ if (rc)
+ return rc;
+ t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct devlog_params *dparams;
+ int rc = 0;
+
+ rc = t4_init_devlog_params(padap);
+ if (rc < 0) {
+ cudbg_err->sys_err = rc;
+ return rc;
+ }
+
+ dparams = &padap->params.devlog;
+ rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
+ if (rc)
+ return rc;
+
+ /* Collect FW devlog */
+ if (dparams->start != 0) {
+ spin_lock(&padap->win0_lock);
+ rc = t4_memory_rw(padap, padap->params.drv_memwin,
+ dparams->memtype, dparams->start,
+ dparams->size,
+ (__be32 *)(char *)temp_buff.data,
+ 1);
+ spin_unlock(&padap->win0_lock);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ int size, rc;
+ u32 cfg = 0;
+
+ if (is_t6(padap->params.chip)) {
+ size = padap->params.cim_la_size / 10 + 1;
+ size *= 11 * sizeof(u32);
+ } else {
+ size = padap->params.cim_la_size / 8;
+ size *= 8 * sizeof(u32);
+ }
+
+ size += sizeof(cfg);
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+
+ memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
+ rc = t4_cim_read_la(padap,
+ (u32 *)((char *)temp_buff.data + sizeof(cfg)),
+ NULL);
+ if (rc < 0) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ int size, rc;
+
+ size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ t4_cim_read_ma_la(padap,
+ (u32 *)temp_buff.data,
+ (u32 *)((char *)temp_buff.data +
+ 5 * CIM_MALA_SIZE));
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_cim_qcfg *cim_qcfg_data;
+ int rc;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
+ cim_qcfg_data->chip = padap->params.chip;
+ rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
+ ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+
+ rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
+ ARRAY_SIZE(cim_qcfg_data->obq_wr),
+ cim_qcfg_data->obq_wr);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+
+ t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
+ cim_qcfg_data->thres);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err, int qid)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ int no_of_read_words, rc = 0;
+ u32 qsize;
+
+ /* collect CIM IBQ */
+ qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
+ rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ if (rc)
+ return rc;
+
+ /* t4_read_cim_ibq will return no. of read words or error */
+ no_of_read_words = t4_read_cim_ibq(padap, qid,
+ (u32 *)temp_buff.data, qsize);
+ /* no_of_read_words is less than or equal to 0 means error */
+ if (no_of_read_words <= 0) {
+ if (!no_of_read_words)
+ rc = CUDBG_SYSTEM_ERROR;
+ else
+ rc = no_of_read_words;
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
+}
+
+int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
+}
+
+int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
+}
+
+int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
+}
+
+int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
+}
+
+int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
+}
+
+u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
+{
+ u32 value;
+
+ t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
+ QUENUMSELECT_V(qid));
+ value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
+ value = CIMQSIZE_G(value) * 64; /* size in number of words */
+ return value * sizeof(u32);
+}
+
+static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err, int qid)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ int no_of_read_words, rc = 0;
+ u32 qsize;
+
+ /* collect CIM OBQ */
+ qsize = cudbg_cim_obq_size(padap, qid);
+ rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ if (rc)
+ return rc;
+
+ /* t4_read_cim_obq will return no. of read words or error */
+ no_of_read_words = t4_read_cim_obq(padap, qid,
+ (u32 *)temp_buff.data, qsize);
+ /* no_of_read_words is less than or equal to 0 means error */
+ if (no_of_read_words <= 0) {
+ if (!no_of_read_words)
+ rc = CUDBG_SYSTEM_ERROR;
+ else
+ rc = no_of_read_words;
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
+}
+
+int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
+}
+
+int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
+}
+
+int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
+}
+
+int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
+}
+
+int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
+}
+
+int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
+}
+
+int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
+}
+
+static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff, u8 mem_type,
+ unsigned long tot_len,
+ struct cudbg_error *cudbg_err)
+{
+ unsigned long bytes, bytes_left, bytes_read = 0;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ int rc = 0;
+
+ bytes_left = tot_len;
+ while (bytes_left > 0) {
+ bytes = min_t(unsigned long, bytes_left,
+ (unsigned long)CUDBG_CHUNK_SIZE);
+ rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
+ if (rc)
+ return rc;
+ spin_lock(&padap->win0_lock);
+ rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
+ bytes_read, bytes,
+ (__be32 *)temp_buff.data,
+ 1);
+ spin_unlock(&padap->win0_lock);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ bytes_left -= bytes;
+ bytes_read += bytes;
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ }
+ return rc;
+}
+
+static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
+ struct card_mem *mem_info)
+{
+ struct adapter *padap = pdbg_init->adap;
+ u32 value;
+
+ value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
+ value = EDRAM0_SIZE_G(value);
+ mem_info->size_edc0 = (u16)value;
+
+ value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
+ value = EDRAM1_SIZE_G(value);
+ mem_info->size_edc1 = (u16)value;
+
+ value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EDRAM0_ENABLE_F)
+ mem_info->mem_flag |= (1 << EDC0_FLAG);
+ if (value & EDRAM1_ENABLE_F)
+ mem_info->mem_flag |= (1 << EDC1_FLAG);
+}
+
+static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ int rc;
+
+ if (is_fw_attached(pdbg_init)) {
+ /* Flush uP dcache before reading edcX/mcX */
+ rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
+ if (rc)
+ cudbg_err->sys_warn = rc;
+ }
+}
+
+static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err,
+ u8 mem_type)
+{
+ struct card_mem mem_info = {0};
+ unsigned long flag, size;
+ int rc;
+
+ cudbg_t4_fwcache(pdbg_init, cudbg_err);
+ cudbg_collect_mem_info(pdbg_init, &mem_info);
+ switch (mem_type) {
+ case MEM_EDC0:
+ flag = (1 << EDC0_FLAG);
+ size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
+ break;
+ case MEM_EDC1:
+ flag = (1 << EDC1_FLAG);
+ size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
+ break;
+ default:
+ rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
+ goto err;
+ }
+
+ if (mem_info.mem_flag & flag) {
+ rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
+ size, cudbg_err);
+ if (rc)
+ goto err;
+ } else {
+ rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
+ goto err;
+ }
+err:
+ return rc;
+}
+
+int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_EDC0);
+}
+
+int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_EDC1);
+}
+
+int cudbg_collect_rss(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ int rc;
+
+ rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
+ if (rc)
+ return rc;
+
+ rc = t4_read_rss(padap, (u16 *)temp_buff.data);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_rss_vf_conf *vfconf;
+ int vf, rc, vf_count;
+
+ vf_count = padap->params.arch.vfcount;
+ rc = cudbg_get_buff(dbg_buff,
+ vf_count * sizeof(struct cudbg_rss_vf_conf),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
+ for (vf = 0; vf < vf_count; vf++)
+ t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
+ &vfconf[vf].rss_vf_vfh, true);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ int rc;
+
+ rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
+ if (rc)
+ return rc;
+
+ t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_pm_stats *pm_stats_buff;
+ int rc;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
+ t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
+ t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_hw_sched *hw_sched_buff;
+ int i, rc = 0;
+
+ if (!padap->params.vpd.cclk)
+ return CUDBG_STATUS_CCLK_NOT_DEFINED;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
+ &temp_buff);
+ hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
+ hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
+ hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
+ t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
+ for (i = 0; i < NTX_SCHED; ++i)
+ t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
+ &hw_sched_buff->ipg[i], true);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct ireg_buf *ch_tp_pio;
+ int i, rc, n = 0;
+ u32 size;
+
+ if (is_t5(padap->params.chip))
+ n = sizeof(t5_tp_pio_array) +
+ sizeof(t5_tp_tm_pio_array) +
+ sizeof(t5_tp_mib_index_array);
+ else
+ n = sizeof(t6_tp_pio_array) +
+ sizeof(t6_tp_tm_pio_array) +
+ sizeof(t6_tp_mib_index_array);
+
+ n = n / (IREG_NUM_ELEM * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ ch_tp_pio = (struct ireg_buf *)temp_buff.data;
+
+ /* TP_PIO */
+ if (is_t5(padap->params.chip))
+ n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
+ else if (is_t6(padap->params.chip))
+ n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
+
+ for (i = 0; i < n; i++) {
+ struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
+ u32 *buff = ch_tp_pio->outbuf;
+
+ if (is_t5(padap->params.chip)) {
+ tp_pio->ireg_addr = t5_tp_pio_array[i][0];
+ tp_pio->ireg_data = t5_tp_pio_array[i][1];
+ tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
+ tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
+ } else if (is_t6(padap->params.chip)) {
+ tp_pio->ireg_addr = t6_tp_pio_array[i][0];
+ tp_pio->ireg_data = t6_tp_pio_array[i][1];
+ tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
+ tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
+ }
+ t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
+ tp_pio->ireg_local_offset, true);
+ ch_tp_pio++;
+ }
+
+ /* TP_TM_PIO */
+ if (is_t5(padap->params.chip))
+ n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
+ else if (is_t6(padap->params.chip))
+ n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
+
+ for (i = 0; i < n; i++) {
+ struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
+ u32 *buff = ch_tp_pio->outbuf;
+
+ if (is_t5(padap->params.chip)) {
+ tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
+ tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
+ tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
+ tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
+ } else if (is_t6(padap->params.chip)) {
+ tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
+ tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
+ tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
+ tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
+ }
+ t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
+ tp_pio->ireg_local_offset, true);
+ ch_tp_pio++;
+ }
+
+ /* TP_MIB_INDEX */
+ if (is_t5(padap->params.chip))
+ n = sizeof(t5_tp_mib_index_array) /
+ (IREG_NUM_ELEM * sizeof(u32));
+ else if (is_t6(padap->params.chip))
+ n = sizeof(t6_tp_mib_index_array) /
+ (IREG_NUM_ELEM * sizeof(u32));
+
+ for (i = 0; i < n ; i++) {
+ struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
+ u32 *buff = ch_tp_pio->outbuf;
+
+ if (is_t5(padap->params.chip)) {
+ tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
+ tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
+ tp_pio->ireg_local_offset =
+ t5_tp_mib_index_array[i][2];
+ tp_pio->ireg_offset_range =
+ t5_tp_mib_index_array[i][3];
+ } else if (is_t6(padap->params.chip)) {
+ tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
+ tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
+ tp_pio->ireg_local_offset =
+ t6_tp_mib_index_array[i][2];
+ tp_pio->ireg_offset_range =
+ t6_tp_mib_index_array[i][3];
+ }
+ t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
+ tp_pio->ireg_local_offset, true);
+ ch_tp_pio++;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct ireg_buf *ch_sge_dbg;
+ int i, rc;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
+ if (rc)
+ return rc;
+
+ ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
+ for (i = 0; i < 2; i++) {
+ struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
+ u32 *buff = ch_sge_dbg->outbuf;
+
+ sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
+ sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
+ sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
+ sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
+ t4_read_indirect(padap,
+ sge_pio->ireg_addr,
+ sge_pio->ireg_data,
+ buff,
+ sge_pio->ireg_offset_range,
+ sge_pio->ireg_local_offset);
+ ch_sge_dbg++;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_ulprx_la *ulprx_la_buff;
+ int rc;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
+ t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
+ ulprx_la_buff->size = ULPRX_LA_SIZE;
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_tp_la *tp_la_buff;
+ int size, rc;
+
+ size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
+ tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
+ t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct cudbg_cim_pif_la *cim_pif_la_buff;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ int size, rc;
+
+ size = sizeof(struct cudbg_cim_pif_la) +
+ 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
+ cim_pif_la_buff->size = CIM_PIFLA_SIZE;
+ t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
+ (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
+ NULL, NULL);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_clk_info *clk_info_buff;
+ u64 tp_tick_us;
+ int rc;
+
+ if (!padap->params.vpd.cclk)
+ return CUDBG_STATUS_CCLK_NOT_DEFINED;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
+ clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
+ clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
+ clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
+ clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
+ tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
+
+ clk_info_buff->dack_timer =
+ (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
+ t4_read_reg(padap, TP_DACK_TIMER_A);
+ clk_info_buff->retransmit_min =
+ tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
+ clk_info_buff->retransmit_max =
+ tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
+ clk_info_buff->persist_timer_min =
+ tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
+ clk_info_buff->persist_timer_max =
+ tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
+ clk_info_buff->keepalive_idle_timer =
+ tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
+ clk_info_buff->keepalive_interval =
+ tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
+ clk_info_buff->initial_srtt =
+ tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
+ clk_info_buff->finwait2_timer =
+ tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
+
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct ireg_buf *ch_pcie;
+ int i, rc, n;
+ u32 size;
+
+ n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n * 2;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ ch_pcie = (struct ireg_buf *)temp_buff.data;
+ /* PCIE_PDBG */
+ for (i = 0; i < n; i++) {
+ struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
+ u32 *buff = ch_pcie->outbuf;
+
+ pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
+ pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
+ pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
+ pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
+ t4_read_indirect(padap,
+ pcie_pio->ireg_addr,
+ pcie_pio->ireg_data,
+ buff,
+ pcie_pio->ireg_offset_range,
+ pcie_pio->ireg_local_offset);
+ ch_pcie++;
+ }
+
+ /* PCIE_CDBG */
+ n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ for (i = 0; i < n; i++) {
+ struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
+ u32 *buff = ch_pcie->outbuf;
+
+ pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
+ pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
+ pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
+ pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
+ t4_read_indirect(padap,
+ pcie_pio->ireg_addr,
+ pcie_pio->ireg_data,
+ buff,
+ pcie_pio->ireg_offset_range,
+ pcie_pio->ireg_local_offset);
+ ch_pcie++;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct ireg_buf *ch_pm;
+ int i, rc, n;
+ u32 size;
+
+ n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n * 2;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ ch_pm = (struct ireg_buf *)temp_buff.data;
+ /* PM_RX */
+ for (i = 0; i < n; i++) {
+ struct ireg_field *pm_pio = &ch_pm->tp_pio;
+ u32 *buff = ch_pm->outbuf;
+
+ pm_pio->ireg_addr = t5_pm_rx_array[i][0];
+ pm_pio->ireg_data = t5_pm_rx_array[i][1];
+ pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
+ pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
+ t4_read_indirect(padap,
+ pm_pio->ireg_addr,
+ pm_pio->ireg_data,
+ buff,
+ pm_pio->ireg_offset_range,
+ pm_pio->ireg_local_offset);
+ ch_pm++;
+ }
+
+ /* PM_TX */
+ n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
+ for (i = 0; i < n; i++) {
+ struct ireg_field *pm_pio = &ch_pm->tp_pio;
+ u32 *buff = ch_pm->outbuf;
+
+ pm_pio->ireg_addr = t5_pm_tx_array[i][0];
+ pm_pio->ireg_data = t5_pm_tx_array[i][1];
+ pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
+ pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
+ t4_read_indirect(padap,
+ pm_pio->ireg_addr,
+ pm_pio->ireg_data,
+ buff,
+ pm_pio->ireg_offset_range,
+ pm_pio->ireg_local_offset);
+ ch_pm++;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_tid(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_tid_info_region_rev1 *tid1;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_tid_info_region *tid;
+ u32 para[2], val[2];
+ int rc;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
+ tid = &tid1->tid;
+ tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
+ tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
+ tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
+ sizeof(struct cudbg_ver_hdr);
+
+#define FW_PARAM_PFVF_A(param) \
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
+ FW_PARAMS_PARAM_Y_V(0) | \
+ FW_PARAMS_PARAM_Z_V(0))
+
+ para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
+ para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
+ rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
+ if (rc < 0) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ tid->uotid_base = val[0];
+ tid->nuotids = val[1] - val[0] + 1;
+
+ if (is_t5(padap->params.chip)) {
+ tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
+ } else if (is_t6(padap->params.chip)) {
+ tid1->tid_start =
+ t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+ tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
+
+ para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
+ para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
+ rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
+ para, val);
+ if (rc < 0) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ tid->hpftid_base = val[0];
+ tid->nhpftids = val[1] - val[0] + 1;
+ }
+
+ tid->ntids = padap->tids.ntids;
+ tid->nstids = padap->tids.nstids;
+ tid->stid_base = padap->tids.stid_base;
+ tid->hash_base = padap->tids.hash_base;
+
+ tid->natids = padap->tids.natids;
+ tid->nftids = padap->tids.nftids;
+ tid->ftid_base = padap->tids.ftid_base;
+ tid->aftid_base = padap->tids.aftid_base;
+ tid->aftid_end = padap->tids.aftid_end;
+
+ tid->sftid_base = padap->tids.sftid_base;
+ tid->nsftids = padap->tids.nsftids;
+
+ tid->flags = padap->flags;
+ tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
+ tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
+ tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
+
+#undef FW_PARAM_PFVF_A
+
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_dump_context_size(struct adapter *padap)
+{
+ u32 value, size;
+ u8 flq;
+
+ value = t4_read_reg(padap, SGE_FLM_CFG_A);
+
+ /* Get number of data freelist queues */
+ flq = HDRSTARTFLQ_G(value);
+ size = CUDBG_MAX_FL_QIDS >> flq;
+
+ /* Add extra space for congestion manager contexts.
+ * The number of CONM contexts are same as number of freelist
+ * queues.
+ */
+ size += size;
+ return size * sizeof(struct cudbg_ch_cntxt);
+}
+
+static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ struct adapter *padap = pdbg_init->adap;
+ int rc = -1;
+
+ /* Under heavy traffic, the SGE Queue contexts registers will be
+ * frequently accessed by firmware.
+ *
+ * To avoid conflicts with firmware, always ask firmware to fetch
+ * the SGE Queue contexts via mailbox. On failure, fallback to
+ * accessing hardware registers directly.
+ */
+ if (is_fw_attached(pdbg_init))
+ rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
+ if (rc)
+ t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
+}
+
+int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_ch_cntxt *buff;
+ u32 size, i = 0;
+ int rc;
+
+ rc = cudbg_dump_context_size(padap);
+ if (rc <= 0)
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ size = rc;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ buff = (struct cudbg_ch_cntxt *)temp_buff.data;
+ while (size > 0) {
+ buff->cntxt_type = CTXT_FLM;
+ buff->cntxt_id = i;
+ cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
+ buff++;
+ size -= sizeof(struct cudbg_ch_cntxt);
+
+ buff->cntxt_type = CTXT_CNM;
+ buff->cntxt_id = i;
+ cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
+ buff++;
+ size -= sizeof(struct cudbg_ch_cntxt);
+
+ i++;
+ }
+
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
+{
+ *mask = x | y;
+ y = (__force u64)cpu_to_be64(y);
+ memcpy(addr, (char *)&y + 2, ETH_ALEN);
+}
+
+static void cudbg_mps_rpl_backdoor(struct adapter *padap,
+ struct fw_ldst_mps_rplc *mps_rplc)
+{
+ if (is_t5(padap->params.chip)) {
+ mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
+ MPS_VF_RPLCT_MAP3_A));
+ mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
+ MPS_VF_RPLCT_MAP2_A));
+ mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
+ MPS_VF_RPLCT_MAP1_A));
+ mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
+ MPS_VF_RPLCT_MAP0_A));
+ } else {
+ mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
+ MPS_VF_RPLCT_MAP7_A));
+ mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
+ MPS_VF_RPLCT_MAP6_A));
+ mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
+ MPS_VF_RPLCT_MAP5_A));
+ mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
+ MPS_VF_RPLCT_MAP4_A));
+ }
+ mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
+ mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
+ mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
+ mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
+}
+
+static int cudbg_collect_tcam_index(struct adapter *padap,
+ struct cudbg_mps_tcam *tcam, u32 idx)
+{
+ u64 tcamy, tcamx, val;
+ u32 ctl, data2;
+ int rc = 0;
+
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
+ /* CtlReqID - 1: use Host Driver Requester ID
+ * CtlCmdType - 0: Read, 1: Write
+ * CtlTcamSel - 0: TCAM0, 1: TCAM1
+ * CtlXYBitSel- 0: Y bit, 1: X bit
+ */
+
+ /* Read tcamy */
+ ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
+ if (idx < 256)
+ ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
+ else
+ ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
+
+ t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
+ tcamy = DMACH_G(val) << 32;
+ tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
+ data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
+ tcam->lookup_type = DATALKPTYPE_G(data2);
+
+ /* 0 - Outer header, 1 - Inner header
+ * [71:48] bit locations are overloaded for
+ * outer vs. inner lookup types.
+ */
+ if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
+ /* Inner header VNI */
+ tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
+ tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
+ tcam->dip_hit = data2 & DATADIPHIT_F;
+ } else {
+ tcam->vlan_vld = data2 & DATAVIDH2_F;
+ tcam->ivlan = VIDL_G(val);
+ }
+
+ tcam->port_num = DATAPORTNUM_G(data2);
+
+ /* Read tcamx. Change the control param */
+ ctl |= CTLXYBITSEL_V(1);
+ t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
+ tcamx = DMACH_G(val) << 32;
+ tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
+ data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
+ if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
+ /* Inner header VNI mask */
+ tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
+ tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
+ }
+ } else {
+ tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
+ tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
+ }
+
+ /* If no entry, return */
+ if (tcamx & tcamy)
+ return rc;
+
+ tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
+ tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
+
+ if (is_t5(padap->params.chip))
+ tcam->repli = (tcam->cls_lo & REPLICATE_F);
+ else if (is_t6(padap->params.chip))
+ tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
+
+ if (tcam->repli) {
+ struct fw_ldst_cmd ldst_cmd;
+ struct fw_ldst_mps_rplc mps_rplc;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ htonl(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
+ ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.mps.rplc.fid_idx =
+ htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
+ FW_LDST_CMD_IDX_V(idx));
+
+ rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+ &ldst_cmd);
+ if (rc)
+ cudbg_mps_rpl_backdoor(padap, &mps_rplc);
+ else
+ mps_rplc = ldst_cmd.u.mps.rplc;
+
+ tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
+ tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
+ tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
+ tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
+ if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
+ tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
+ tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
+ tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
+ tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
+ }
+ }
+ cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
+ tcam->idx = idx;
+ tcam->rplc_size = padap->params.arch.mps_rplc_size;
+ return rc;
+}
+
+int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ u32 size = 0, i, n, total_size = 0;
+ struct cudbg_mps_tcam *tcam;
+ int rc;
+
+ n = padap->params.arch.mps_tcam_size;
+ size = sizeof(struct cudbg_mps_tcam) * n;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ tcam = (struct cudbg_mps_tcam *)temp_buff.data;
+ for (i = 0; i < n; i++) {
+ rc = cudbg_collect_tcam_index(padap, tcam, i);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ total_size += sizeof(struct cudbg_mps_tcam);
+ tcam++;
+ }
+
+ if (!total_size) {
+ rc = CUDBG_SYSTEM_ERROR;
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ char vpd_str[CUDBG_VPD_VER_LEN + 1];
+ u32 scfg_vers, vpd_vers, fw_vers;
+ struct cudbg_vpd_data *vpd_data;
+ struct vpd_params vpd = { 0 };
+ int rc, ret;
+
+ rc = t4_get_raw_vpd_params(padap, &vpd);
+ if (rc)
+ return rc;
+
+ rc = t4_get_fw_version(padap, &fw_vers);
+ if (rc)
+ return rc;
+
+ /* Serial Configuration Version is located beyond the PF's vpd size.
+ * Temporarily give access to entire EEPROM to get it.
+ */
+ rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
+ if (rc < 0)
+ return rc;
+
+ ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
+ &scfg_vers);
+
+ /* Restore back to original PF's vpd size */
+ rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
+ if (rc < 0)
+ return rc;
+
+ if (ret)
+ return ret;
+
+ rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
+ vpd_str);
+ if (rc)
+ return rc;
+
+ vpd_str[CUDBG_VPD_VER_LEN] = '\0';
+ rc = kstrtouint(vpd_str, 0, &vpd_vers);
+ if (rc)
+ return rc;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
+ memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
+ memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
+ memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
+ memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
+ vpd_data->scfg_vers = scfg_vers;
+ vpd_data->vpd_vers = vpd_vers;
+ vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
+ vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
+ vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
+ vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
+ struct cudbg_tid_data *tid_data)
+{
+ struct adapter *padap = pdbg_init->adap;
+ int i, cmd_retry = 8;
+ u32 val;
+
+ /* Fill REQ_DATA regs with 0's */
+ for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
+ t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
+
+ /* Write DBIG command */
+ val = DBGICMD_V(4) | DBGITID_V(tid);
+ t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
+ tid_data->dbig_cmd = val;
+
+ val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */
+ t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
+ tid_data->dbig_conf = val;
+
+ /* Poll the DBGICMDBUSY bit */
+ val = 1;
+ while (val) {
+ val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
+ val = val & DBGICMDBUSY_F;
+ cmd_retry--;
+ if (!cmd_retry)
+ return CUDBG_SYSTEM_ERROR;
+ }
+
+ /* Check RESP status */
+ val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
+ tid_data->dbig_rsp_stat = val;
+ if (!(val & 1))
+ return CUDBG_SYSTEM_ERROR;
+
+ /* Read RESP data */
+ for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
+ tid_data->data[i] = t4_read_reg(padap,
+ LE_DB_DBGI_RSP_DATA_A +
+ (i << 2));
+ tid_data->tid = tid;
+ return 0;
+}
+
+static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
+{
+ int type = LE_ET_UNKNOWN;
+
+ if (tid < tcam_region.server_start)
+ type = LE_ET_TCAM_CON;
+ else if (tid < tcam_region.filter_start)
+ type = LE_ET_TCAM_SERVER;
+ else if (tid < tcam_region.clip_start)
+ type = LE_ET_TCAM_FILTER;
+ else if (tid < tcam_region.routing_start)
+ type = LE_ET_TCAM_CLIP;
+ else if (tid < tcam_region.tid_hash_base)
+ type = LE_ET_TCAM_ROUTING;
+ else if (tid < tcam_region.max_tid)
+ type = LE_ET_HASH_CON;
+ else
+ type = LE_ET_INVALID_TID;
+
+ return type;
+}
+
+static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
+ struct cudbg_tcam tcam_region)
+{
+ int ipv6 = 0;
+ int le_type;
+
+ le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
+ if (tid_data->tid & 1)
+ return 0;
+
+ if (le_type == LE_ET_HASH_CON) {
+ ipv6 = tid_data->data[16] & 0x8000;
+ } else if (le_type == LE_ET_TCAM_CON) {
+ ipv6 = tid_data->data[16] & 0x8000;
+ if (ipv6)
+ ipv6 = tid_data->data[9] == 0x00C00000;
+ } else {
+ ipv6 = 0;
+ }
+ return ipv6;
+}
+
+void cudbg_fill_le_tcam_info(struct adapter *padap,
+ struct cudbg_tcam *tcam_region)
+{
+ u32 value;
+
+ /* Get the LE regions */
+ value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */
+ tcam_region->tid_hash_base = value;
+
+ /* Get routing table index */
+ value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
+ tcam_region->routing_start = value;
+
+ /*Get clip table index */
+ value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
+ tcam_region->clip_start = value;
+
+ /* Get filter table index */
+ value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
+ tcam_region->filter_start = value;
+
+ /* Get server table index */
+ value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
+ tcam_region->server_start = value;
+
+ /* Check whether hash is enabled and calculate the max tids */
+ value = t4_read_reg(padap, LE_DB_CONFIG_A);
+ if ((value >> HASHEN_S) & 1) {
+ value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+ tcam_region->max_tid = (value & 0xFFFFF) +
+ tcam_region->tid_hash_base;
+ } else {
+ value = HASHTIDSIZE_G(value);
+ value = 1 << value;
+ tcam_region->max_tid = value +
+ tcam_region->tid_hash_base;
+ }
+ } else { /* hash not enabled */
+ tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
+ }
+}
+
+int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_tcam tcam_region = { 0 };
+ struct cudbg_tid_data *tid_data;
+ u32 bytes = 0;
+ int rc, size;
+ u32 i;
+
+ cudbg_fill_le_tcam_info(padap, &tcam_region);
+
+ size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
+ size += sizeof(struct cudbg_tcam);
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
+ bytes = sizeof(struct cudbg_tcam);
+ tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
+ /* read all tid */
+ for (i = 0; i < tcam_region.max_tid; ) {
+ rc = cudbg_read_tid(pdbg_init, i, tid_data);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+
+ /* ipv6 takes two tids */
+ cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++;
+
+ tid_data++;
+ bytes += sizeof(struct cudbg_tid_data);
+ }
+
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ u32 size;
+ int rc;
+
+ size = sizeof(u16) * NMTUS * NCCTRL_WIN;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ t4_read_cong_tbl(padap, (void *)temp_buff.data);
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct ireg_buf *ma_indr;
+ int i, rc, n;
+ u32 size, j;
+
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n * 2;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ ma_indr = (struct ireg_buf *)temp_buff.data;
+ for (i = 0; i < n; i++) {
+ struct ireg_field *ma_fli = &ma_indr->tp_pio;
+ u32 *buff = ma_indr->outbuf;
+
+ ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
+ ma_fli->ireg_data = t6_ma_ireg_array[i][1];
+ ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
+ ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
+ t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
+ buff, ma_fli->ireg_offset_range,
+ ma_fli->ireg_local_offset);
+ ma_indr++;
+ }
+
+ n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
+ for (i = 0; i < n; i++) {
+ struct ireg_field *ma_fli = &ma_indr->tp_pio;
+ u32 *buff = ma_indr->outbuf;
+
+ ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
+ ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
+ ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
+ for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
+ t4_read_indirect(padap, ma_fli->ireg_addr,
+ ma_fli->ireg_data, buff, 1,
+ ma_fli->ireg_local_offset);
+ buff++;
+ ma_fli->ireg_local_offset += 0x20;
+ }
+ ma_indr++;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_ulptx_la *ulptx_la_buff;
+ u32 i, j;
+ int rc;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
+ for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
+ ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
+ ULP_TX_LA_RDPTR_0_A +
+ 0x10 * i);
+ ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
+ ULP_TX_LA_WRPTR_0_A +
+ 0x10 * i);
+ ulptx_la_buff->rddata[i] = t4_read_reg(padap,
+ ULP_TX_LA_RDDATA_0_A +
+ 0x10 * i);
+ for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
+ ulptx_la_buff->rd_data[i][j] =
+ t4_read_reg(padap,
+ ULP_TX_LA_RDDATA_0_A + 0x10 * i);
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct ireg_buf *up_cim;
+ int i, rc, n;
+ u32 size;
+
+ n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ up_cim = (struct ireg_buf *)temp_buff.data;
+ for (i = 0; i < n; i++) {
+ struct ireg_field *up_cim_reg = &up_cim->tp_pio;
+ u32 *buff = up_cim->outbuf;
+
+ if (is_t5(padap->params.chip)) {
+ up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
+ up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
+ up_cim_reg->ireg_local_offset =
+ t5_up_cim_reg_array[i][2];
+ up_cim_reg->ireg_offset_range =
+ t5_up_cim_reg_array[i][3];
+ } else if (is_t6(padap->params.chip)) {
+ up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
+ up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
+ up_cim_reg->ireg_local_offset =
+ t6_up_cim_reg_array[i][2];
+ up_cim_reg->ireg_offset_range =
+ t6_up_cim_reg_array[i][3];
+ }
+
+ rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
+ up_cim_reg->ireg_offset_range, buff);
+ if (rc) {
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ up_cim++;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_pbt_tables *pbt;
+ int i, rc;
+ u32 addr;
+
+ rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ pbt = (struct cudbg_pbt_tables *)temp_buff.data;
+ /* PBT dynamic entries */
+ addr = CUDBG_CHAC_PBT_ADDR;
+ for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
+ rc = t4_cim_read(padap, addr + (i * 4), 1,
+ &pbt->pbt_dynamic[i]);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ }
+
+ /* PBT static entries */
+ /* static entries start when bit 6 is set */
+ addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
+ for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
+ rc = t4_cim_read(padap, addr + (i * 4), 1,
+ &pbt->pbt_static[i]);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ }
+
+ /* LRF entries */
+ addr = CUDBG_CHAC_PBT_LRF;
+ for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
+ rc = t4_cim_read(padap, addr + (i * 4), 1,
+ &pbt->lrf_table[i]);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ }
+
+ /* PBT data entries */
+ addr = CUDBG_CHAC_PBT_DATA;
+ for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
+ rc = t4_cim_read(padap, addr + (i * 4), 1,
+ &pbt->pbt_data[i]);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(&temp_buff, dbg_buff);
+ return rc;
+ }
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_mbox_log *mboxlog = NULL;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct mbox_cmd_log *log = NULL;
+ struct mbox_cmd *entry;
+ unsigned int entry_idx;
+ u16 mbox_cmds;
+ int i, k, rc;
+ u64 flit;
+ u32 size;
+
+ log = padap->mbox_log;
+ mbox_cmds = padap->mbox_log->size;
+ size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
+ for (k = 0; k < mbox_cmds; k++) {
+ entry_idx = log->cursor + k;
+ if (entry_idx >= log->size)
+ entry_idx -= log->size;
+
+ entry = mbox_cmd_log_entry(log, entry_idx);
+ /* skip over unused entries */
+ if (entry->timestamp == 0)
+ continue;
+
+ memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
+ for (i = 0; i < MBOX_LEN / 8; i++) {
+ flit = entry->cmd[i];
+ mboxlog->hi[i] = (u32)(flit >> 32);
+ mboxlog->lo[i] = (u32)flit;
+ }
+ mboxlog++;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
+
+int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct ireg_buf *hma_indr;
+ int i, rc, n;
+ u32 size;
+
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ size = sizeof(struct ireg_buf) * n;
+ rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ hma_indr = (struct ireg_buf *)temp_buff.data;
+ for (i = 0; i < n; i++) {
+ struct ireg_field *hma_fli = &hma_indr->tp_pio;
+ u32 *buff = hma_indr->outbuf;
+
+ hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
+ hma_fli->ireg_data = t6_hma_ireg_array[i][1];
+ hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
+ hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
+ t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
+ buff, hma_fli->ireg_offset_range,
+ hma_fli->ireg_local_offset);
+ hma_indr++;
+ }
+ cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ return rc;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
new file mode 100644
index 000000000000..caeee8e33e86
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_LIB_H__
+#define __CUDBG_LIB_H__
+
+int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_rss(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_tid(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+
+struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
+void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
+ struct cudbg_entity_hdr *entity_hdr);
+u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
+int cudbg_dump_context_size(struct adapter *padap);
+
+struct cudbg_tcam;
+void cudbg_fill_le_tcam_info(struct adapter *padap,
+ struct cudbg_tcam *tcam_region);
+#endif /* __CUDBG_LIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
new file mode 100644
index 000000000000..24b33f28e548
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_LIB_COMMON_H__
+#define __CUDBG_LIB_COMMON_H__
+
+#define CUDBG_SIGNATURE 67856866 /* CUDB in ascii */
+
+enum cudbg_dump_type {
+ CUDBG_DUMP_TYPE_MINI = 1,
+};
+
+enum cudbg_compression_type {
+ CUDBG_COMPRESSION_NONE = 1,
+};
+
+struct cudbg_hdr {
+ u32 signature;
+ u32 hdr_len;
+ u16 major_ver;
+ u16 minor_ver;
+ u32 data_len;
+ u32 hdr_flags;
+ u16 max_entities;
+ u8 chip_ver;
+ u8 dump_type:3;
+ u8 reserved1:1;
+ u8 compress_type:4;
+ u32 reserved[8];
+};
+
+struct cudbg_entity_hdr {
+ u32 entity_type;
+ u32 start_offset;
+ u32 size;
+ int hdr_flags;
+ u32 sys_warn;
+ u32 sys_err;
+ u8 num_pad;
+ u8 flag; /* bit 0 is used to indicate ext data */
+ u8 reserved1[2];
+ u32 next_ext_offset; /* pointer to next extended entity meta data */
+ u32 reserved[5];
+};
+
+struct cudbg_ver_hdr {
+ u32 signature;
+ u16 revision;
+ u16 size;
+};
+
+struct cudbg_buffer {
+ u32 size;
+ u32 offset;
+ char *data;
+};
+
+struct cudbg_error {
+ int sys_err;
+ int sys_warn;
+ int app_err;
+};
+
+#define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1)
+#define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024)
+
+int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+ struct cudbg_buffer *pin_buff);
+void cudbg_put_buff(struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pdbg_buff);
+void cudbg_update_buff(struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff);
+#endif /* __CUDBG_LIB_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ea72d2d2e1b4..6f9fa6e3c42a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -287,10 +287,18 @@ struct tp_params {
* places we store their offsets here, or a -1 if the field isn't
* present.
*/
- int vlan_shift;
- int vnic_shift;
+ int fcoe_shift;
int port_shift;
+ int vnic_shift;
+ int vlan_shift;
+ int tos_shift;
int protocol_shift;
+ int ethertype_shift;
+ int macmatch_shift;
+ int matchtype_shift;
+ int frag_shift;
+
+ u64 hash_filter_mask;
};
struct vpd_params {
@@ -358,6 +366,7 @@ struct adapter_params {
unsigned char crypto; /* HW capability for crypto */
unsigned char bypass;
+ unsigned char hash_filter;
unsigned int ofldq_wr_cred;
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
@@ -367,6 +376,7 @@ struct adapter_params {
unsigned int max_ird_adapter; /* Max read depth per adapter */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
u8 fw_caps_support; /* 32-bit Port Capabilities */
+ bool filter2_wr_support; /* FW support for FILTER2_WR */
/* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is
* used by the Port
@@ -549,6 +559,7 @@ enum { /* adapter flags */
MASTER_PF = (1 << 7),
FW_OFLD_CONN = (1 << 9),
ROOT_NO_RELAXED_ORDERING = (1 << 10),
+ SHUTTING_DOWN = (1 << 11),
};
enum {
@@ -857,6 +868,7 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
+ struct smt_data *smt;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@@ -904,6 +916,15 @@ struct adapter {
/* TC u32 offload */
struct cxgb4_tc_u32_table *tc_u32;
struct chcr_stats_debug chcr_stats;
+
+ /* TC flower offload */
+ struct rhashtable flower_tbl;
+ struct rhashtable_params flower_ht_params;
+ struct timer_list flower_stats_timer;
+ struct work_struct flower_stats_work;
+
+ /* Ethtool Dump */
+ struct ethtool_dump eth_dump;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1031,6 +1052,7 @@ struct ch_filter_specification {
* matching that doesn't exist as a (value, mask) tuple.
*/
uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+ u32 hash:1; /* 0 => wild-card, 1 => exact-match */
/* Packet dispatch information. Ingress packets which match the
* filter rules will be dropped, passed to the host or switched back
@@ -1055,10 +1077,19 @@ struct ch_filter_specification {
uint32_t newdmac:1; /* rewrite destination MAC address */
uint32_t newsmac:1; /* rewrite source MAC address */
uint32_t newvlan:2; /* rewrite VLAN Tag */
+ uint32_t nat_mode:3; /* specify NAT operation mode */
uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
uint8_t smac[ETH_ALEN]; /* new source MAC address */
uint16_t vlan; /* VLAN Tag to insert */
+ u8 nat_lip[16]; /* local IP to use after NAT'ing */
+ u8 nat_fip[16]; /* foreign IP to use after NAT'ing */
+ u16 nat_lport; /* local port to use after NAT'ing */
+ u16 nat_fport; /* foreign port to use after NAT'ing */
+
+ /* reservation for future additions */
+ u8 rsvd[24];
+
/* Filter rule value/mask pairs.
*/
struct ch_filter_tuple val;
@@ -1078,6 +1109,17 @@ enum {
VLAN_REWRITE
};
+enum {
+ NAT_MODE_NONE = 0, /* No NAT performed */
+ NAT_MODE_DIP, /* NAT on Dst IP */
+ NAT_MODE_DIP_DP, /* NAT on Dst IP, Dst Port */
+ NAT_MODE_DIP_DP_SIP, /* NAT on Dst IP, Dst Port and Src IP */
+ NAT_MODE_DIP_DP_SP, /* NAT on Dst IP, Dst Port and Src Port */
+ NAT_MODE_SIP_SP, /* NAT on Src IP and Src Port */
+ NAT_MODE_DIP_SIP_SP, /* NAT on Dst IP, Src IP and Src Port */
+ NAT_MODE_ALL /* NAT on entire 4-tuple */
+};
+
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
* firmware command. The use of bit-field structure elements is purely to
@@ -1090,9 +1132,9 @@ struct filter_entry {
u32 locked:1; /* filter is administratively locked */
u32 pending:1; /* filter action is pending firmware reply */
- u32 smtidx:8; /* Source MAC Table index for smac */
struct filter_ctx *ctx; /* Caller's completion hook */
struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+ struct smt_entry *smt; /* Source Mac Table entry for smac */
struct net_device *dev; /* Associated net device */
u32 tid; /* This will store the actual tid */
@@ -1109,6 +1151,11 @@ static inline int is_offload(const struct adapter *adap)
return adap->params.offload;
}
+static inline int is_hashfilter(const struct adapter *adap)
+{
+ return adap->params.hash_filter;
+}
+
static inline int is_pci_uld(const struct adapter *adap)
{
return adap->params.crypto;
@@ -1312,6 +1359,12 @@ static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
adapter->params.vpd.cclk);
}
+static inline unsigned int dack_ticks_to_usec(const struct adapter *adap,
+ unsigned int ticks)
+{
+ return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap);
+}
+
void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
u32 val);
@@ -1406,6 +1459,7 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
unsigned int t4_get_regs_len(struct adapter *adapter);
void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
+int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
int t4_seeprom_wp(struct adapter *adapter, bool enable);
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
@@ -1451,7 +1505,7 @@ unsigned int qtimer_val(const struct adapter *adap,
int t4_init_devlog_params(struct adapter *adapter);
int t4_init_sge_params(struct adapter *adapter);
-int t4_init_tp_params(struct adapter *adap);
+int t4_init_tp_params(struct adapter *adap, bool sleep_ok);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_init_rss_mode(struct adapter *adap, int mbox);
int t4_init_portinfo(struct port_info *pi, int mbox,
@@ -1465,14 +1519,15 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
unsigned int flags, unsigned int defq);
int t4_read_rss(struct adapter *adapter, u16 *entries);
-void t4_read_rss_key(struct adapter *adapter, u32 *key);
-void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
+void t4_read_rss_key(struct adapter *adapter, u32 *key, bool sleep_ok);
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
+ bool sleep_ok);
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
- u32 *valp);
+ u32 *valp, bool sleep_ok);
void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
- u32 *vfl, u32 *vfh);
-u32 t4_read_rss_pf_map(struct adapter *adapter);
-u32 t4_read_rss_pf_mask(struct adapter *adapter);
+ u32 *vfl, u32 *vfh, bool sleep_ok);
+u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok);
+u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok);
unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx);
unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx);
@@ -1503,14 +1558,18 @@ void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
-void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
-void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
-void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
-void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
+ bool sleep_ok);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
+ bool sleep_ok);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
+ bool sleep_ok);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
+ bool sleep_ok);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
- struct tp_tcp_stats *v6);
+ struct tp_tcp_stats *v6, bool sleep_ok);
void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
- struct tp_fcoe_stats *st);
+ struct tp_fcoe_stats *st, bool sleep_ok);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
@@ -1608,6 +1667,13 @@ void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
int filter_index, int *enabled);
int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
u32 addr, u32 val);
+void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]);
+void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
+ unsigned int *kbps, unsigned int *ipg, bool sleep_ok);
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+ enum ctxt_type ctype, u32 *data);
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
+ enum ctxt_type ctype, u32 *data);
int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
int rateunit, int ratemode, int channel, int class,
int minrate, int maxrate, int weight, int pktsize);
@@ -1619,6 +1685,13 @@ void t4_idma_monitor(struct adapter *adapter,
int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
unsigned int naddr, u8 *addr);
+void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
+ u32 start_index, bool sleep_ok);
+void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
+ u32 start_index, bool sleep_ok);
+void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs,
+ u32 start_index, bool sleep_ok);
+
void t4_uld_mem_free(struct adapter *adap);
int t4_uld_mem_alloc(struct adapter *adap);
void t4_uld_clean_up(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
new file mode 100644
index 000000000000..29cc625e9833
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "t4_regs.h"
+#include "cxgb4.h"
+#include "cxgb4_cudbg.h"
+#include "cudbg_entity.h"
+
+static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
+ { CUDBG_EDC0, cudbg_collect_edc0_meminfo },
+ { CUDBG_EDC1, cudbg_collect_edc1_meminfo },
+};
+
+static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
+ { CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
+ { CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
+ { CUDBG_REG_DUMP, cudbg_collect_reg_dump },
+ { CUDBG_CIM_LA, cudbg_collect_cim_la },
+ { CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la },
+ { CUDBG_CIM_QCFG, cudbg_collect_cim_qcfg },
+ { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 },
+ { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 },
+ { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp },
+ { CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 },
+ { CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 },
+ { CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi },
+ { CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 },
+ { CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 },
+ { CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 },
+ { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 },
+ { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge },
+ { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi },
+ { CUDBG_RSS, cudbg_collect_rss },
+ { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config },
+ { CUDBG_PATH_MTU, cudbg_collect_path_mtu },
+ { CUDBG_PM_STATS, cudbg_collect_pm_stats },
+ { CUDBG_HW_SCHED, cudbg_collect_hw_sched },
+ { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect },
+ { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
+ { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
+ { CUDBG_TP_LA, cudbg_collect_tp_la },
+ { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
+ { CUDBG_CLK, cudbg_collect_clk_info },
+ { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
+ { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 },
+ { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
+ { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
+ { CUDBG_TID_INFO, cudbg_collect_tid },
+ { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
+ { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
+ { CUDBG_VPD_DATA, cudbg_collect_vpd_data },
+ { CUDBG_LE_TCAM, cudbg_collect_le_tcam },
+ { CUDBG_CCTRL, cudbg_collect_cctrl },
+ { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect },
+ { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la },
+ { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect },
+ { CUDBG_PBT_TABLE, cudbg_collect_pbt_tables },
+ { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect },
+};
+
+static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
+{
+ struct cudbg_tcam tcam_region = { 0 };
+ u32 value, n = 0, len = 0;
+
+ switch (entity) {
+ case CUDBG_REG_DUMP:
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T4:
+ len = T4_REGMAP_SIZE;
+ break;
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ len = T5_REGMAP_SIZE;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CUDBG_DEV_LOG:
+ len = adap->params.devlog.size;
+ break;
+ case CUDBG_CIM_LA:
+ if (is_t6(adap->params.chip)) {
+ len = adap->params.cim_la_size / 10 + 1;
+ len *= 11 * sizeof(u32);
+ } else {
+ len = adap->params.cim_la_size / 8;
+ len *= 8 * sizeof(u32);
+ }
+ len += sizeof(u32); /* for reading CIM LA configuration */
+ break;
+ case CUDBG_CIM_MA_LA:
+ len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
+ break;
+ case CUDBG_CIM_QCFG:
+ len = sizeof(struct cudbg_cim_qcfg);
+ break;
+ case CUDBG_CIM_IBQ_TP0:
+ case CUDBG_CIM_IBQ_TP1:
+ case CUDBG_CIM_IBQ_ULP:
+ case CUDBG_CIM_IBQ_SGE0:
+ case CUDBG_CIM_IBQ_SGE1:
+ case CUDBG_CIM_IBQ_NCSI:
+ len = CIM_IBQ_SIZE * 4 * sizeof(u32);
+ break;
+ case CUDBG_CIM_OBQ_ULP0:
+ len = cudbg_cim_obq_size(adap, 0);
+ break;
+ case CUDBG_CIM_OBQ_ULP1:
+ len = cudbg_cim_obq_size(adap, 1);
+ break;
+ case CUDBG_CIM_OBQ_ULP2:
+ len = cudbg_cim_obq_size(adap, 2);
+ break;
+ case CUDBG_CIM_OBQ_ULP3:
+ len = cudbg_cim_obq_size(adap, 3);
+ break;
+ case CUDBG_CIM_OBQ_SGE:
+ len = cudbg_cim_obq_size(adap, 4);
+ break;
+ case CUDBG_CIM_OBQ_NCSI:
+ len = cudbg_cim_obq_size(adap, 5);
+ break;
+ case CUDBG_CIM_OBQ_RXQ0:
+ len = cudbg_cim_obq_size(adap, 6);
+ break;
+ case CUDBG_CIM_OBQ_RXQ1:
+ len = cudbg_cim_obq_size(adap, 7);
+ break;
+ case CUDBG_EDC0:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EDRAM0_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+ len = EDRAM0_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
+ case CUDBG_EDC1:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EDRAM1_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+ len = EDRAM1_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
+ case CUDBG_RSS:
+ len = RSS_NENTRIES * sizeof(u16);
+ break;
+ case CUDBG_RSS_VF_CONF:
+ len = adap->params.arch.vfcount *
+ sizeof(struct cudbg_rss_vf_conf);
+ break;
+ case CUDBG_PATH_MTU:
+ len = NMTUS * sizeof(u16);
+ break;
+ case CUDBG_PM_STATS:
+ len = sizeof(struct cudbg_pm_stats);
+ break;
+ case CUDBG_HW_SCHED:
+ len = sizeof(struct cudbg_hw_sched);
+ break;
+ case CUDBG_TP_INDIRECT:
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T5:
+ n = sizeof(t5_tp_pio_array) +
+ sizeof(t5_tp_tm_pio_array) +
+ sizeof(t5_tp_mib_index_array);
+ break;
+ case CHELSIO_T6:
+ n = sizeof(t6_tp_pio_array) +
+ sizeof(t6_tp_tm_pio_array) +
+ sizeof(t6_tp_mib_index_array);
+ break;
+ default:
+ break;
+ }
+ n = n / (IREG_NUM_ELEM * sizeof(u32));
+ len = sizeof(struct ireg_buf) * n;
+ break;
+ case CUDBG_SGE_INDIRECT:
+ len = sizeof(struct ireg_buf) * 2;
+ break;
+ case CUDBG_ULPRX_LA:
+ len = sizeof(struct cudbg_ulprx_la);
+ break;
+ case CUDBG_TP_LA:
+ len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
+ break;
+ case CUDBG_CIM_PIF_LA:
+ len = sizeof(struct cudbg_cim_pif_la);
+ len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
+ break;
+ case CUDBG_CLK:
+ len = sizeof(struct cudbg_clk_info);
+ break;
+ case CUDBG_PCIE_INDIRECT:
+ n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ len = sizeof(struct ireg_buf) * n * 2;
+ break;
+ case CUDBG_PM_INDIRECT:
+ n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
+ len = sizeof(struct ireg_buf) * n * 2;
+ break;
+ case CUDBG_TID_INFO:
+ len = sizeof(struct cudbg_tid_info_region_rev1);
+ break;
+ case CUDBG_DUMP_CONTEXT:
+ len = cudbg_dump_context_size(adap);
+ break;
+ case CUDBG_MPS_TCAM:
+ len = sizeof(struct cudbg_mps_tcam) *
+ adap->params.arch.mps_tcam_size;
+ break;
+ case CUDBG_VPD_DATA:
+ len = sizeof(struct cudbg_vpd_data);
+ break;
+ case CUDBG_LE_TCAM:
+ cudbg_fill_le_tcam_info(adap, &tcam_region);
+ len = sizeof(struct cudbg_tcam) +
+ sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
+ break;
+ case CUDBG_CCTRL:
+ len = sizeof(u16) * NMTUS * NCCTRL_WIN;
+ break;
+ case CUDBG_MA_INDIRECT:
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ n = sizeof(t6_ma_ireg_array) /
+ (IREG_NUM_ELEM * sizeof(u32));
+ len = sizeof(struct ireg_buf) * n * 2;
+ }
+ break;
+ case CUDBG_ULPTX_LA:
+ len = sizeof(struct cudbg_ulptx_la);
+ break;
+ case CUDBG_UP_CIM_INDIRECT:
+ n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ len = sizeof(struct ireg_buf) * n;
+ break;
+ case CUDBG_PBT_TABLE:
+ len = sizeof(struct cudbg_pbt_tables);
+ break;
+ case CUDBG_MBOX_LOG:
+ len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
+ break;
+ case CUDBG_HMA_INDIRECT:
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ n = sizeof(t6_hma_ireg_array) /
+ (IREG_NUM_ELEM * sizeof(u32));
+ len = sizeof(struct ireg_buf) * n;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return len;
+}
+
+u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
+{
+ u32 i, entity;
+ u32 len = 0;
+
+ if (flag & CXGB4_ETH_DUMP_HW) {
+ for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
+ entity = cxgb4_collect_hw_dump[i].entity;
+ len += cxgb4_get_entity_length(adap, entity);
+ }
+ }
+
+ if (flag & CXGB4_ETH_DUMP_MEM) {
+ for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) {
+ entity = cxgb4_collect_mem_dump[i].entity;
+ len += cxgb4_get_entity_length(adap, entity);
+ }
+ }
+
+ return len;
+}
+
+static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ const struct cxgb4_collect_entity *e_arr,
+ u32 arr_size, void *buf, u32 *tot_size)
+{
+ struct adapter *adap = pdbg_init->adap;
+ struct cudbg_error cudbg_err = { 0 };
+ struct cudbg_entity_hdr *entity_hdr;
+ u32 entity_size, i;
+ u32 total_size = 0;
+ int ret;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct cxgb4_collect_entity *e = &e_arr[i];
+
+ /* Skip entities that won't fit in output buffer */
+ entity_size = cxgb4_get_entity_length(adap, e->entity);
+ if (entity_size >
+ pdbg_init->outbuf_size - *tot_size - total_size)
+ continue;
+
+ entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
+ entity_hdr->entity_type = e->entity;
+ entity_hdr->start_offset = dbg_buff->offset;
+ memset(&cudbg_err, 0, sizeof(struct cudbg_error));
+ ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err);
+ if (ret) {
+ entity_hdr->size = 0;
+ dbg_buff->offset = entity_hdr->start_offset;
+ } else {
+ cudbg_align_debug_buffer(dbg_buff, entity_hdr);
+ }
+
+ /* Log error and continue with next entity */
+ if (cudbg_err.sys_err)
+ ret = CUDBG_SYSTEM_ERROR;
+
+ entity_hdr->hdr_flags = ret;
+ entity_hdr->sys_err = cudbg_err.sys_err;
+ entity_hdr->sys_warn = cudbg_err.sys_warn;
+ total_size += entity_hdr->size;
+ }
+
+ *tot_size += total_size;
+}
+
+int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
+ u32 flag)
+{
+ struct cudbg_init cudbg_init = { 0 };
+ struct cudbg_buffer dbg_buff = { 0 };
+ u32 size, min_size, total_size = 0;
+ struct cudbg_hdr *cudbg_hdr;
+
+ size = *buf_size;
+
+ cudbg_init.adap = adap;
+ cudbg_init.outbuf = buf;
+ cudbg_init.outbuf_size = size;
+
+ dbg_buff.data = buf;
+ dbg_buff.size = size;
+ dbg_buff.offset = 0;
+
+ cudbg_hdr = (struct cudbg_hdr *)buf;
+ cudbg_hdr->signature = CUDBG_SIGNATURE;
+ cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
+ cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
+ cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
+ cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
+ cudbg_hdr->chip_ver = adap->params.chip;
+ cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
+ cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE;
+
+ min_size = sizeof(struct cudbg_hdr) +
+ sizeof(struct cudbg_entity_hdr) *
+ cudbg_hdr->max_entities;
+ if (size < min_size)
+ return -ENOMEM;
+
+ dbg_buff.offset += min_size;
+ total_size = dbg_buff.offset;
+
+ if (flag & CXGB4_ETH_DUMP_HW)
+ cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
+ cxgb4_collect_hw_dump,
+ ARRAY_SIZE(cxgb4_collect_hw_dump),
+ buf,
+ &total_size);
+
+ if (flag & CXGB4_ETH_DUMP_MEM)
+ cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
+ cxgb4_collect_mem_dump,
+ ARRAY_SIZE(cxgb4_collect_mem_dump),
+ buf,
+ &total_size);
+
+ cudbg_hdr->data_len = total_size;
+ *buf_size = total_size;
+ return 0;
+}
+
+void cxgb4_init_ethtool_dump(struct adapter *adapter)
+{
+ adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE;
+ adapter->eth_dump.version = adapter->params.fw_vers;
+ adapter->eth_dump.len = 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
new file mode 100644
index 000000000000..c099b5aa2214
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CXGB4_CUDBG_H__
+#define __CXGB4_CUDBG_H__
+
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_lib.h"
+
+typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+
+struct cxgb4_collect_entity {
+ enum cudbg_dbg_entity_type entity;
+ cudbg_collect_callback_t collect_cb;
+};
+
+enum CXGB4_ETHTOOL_DUMP_FLAGS {
+ CXGB4_ETH_DUMP_NONE = ETH_FW_DUMP_DISABLE,
+ CXGB4_ETH_DUMP_MEM = (1 << 0), /* On-Chip Memory Dumps */
+ CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */
+};
+
+u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag);
+int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
+ u32 flag);
+void cxgb4_init_ethtool_dump(struct adapter *adapter);
+#endif /* __CXGB4_CUDBG_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6ee2ed30626b..4e7f72b17e82 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -40,8 +40,7 @@ static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
return false;
}
-/* Initialize a port's Data Center Bridging state. Typically used after a
- * Link Down event.
+/* Initialize a port's Data Center Bridging state.
*/
void cxgb4_dcb_state_init(struct net_device *dev)
{
@@ -106,6 +105,15 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
}
}
+/* Reset a port's Data Center Bridging state. Typically used after a
+ * Link Down event.
+ */
+void cxgb4_dcb_reset(struct net_device *dev)
+{
+ cxgb4_dcb_cleanup_apps(dev);
+ cxgb4_dcb_state_init(dev);
+}
+
/* Finite State machine for Data Center Bridging.
*/
void cxgb4_dcb_state_fsm(struct net_device *dev,
@@ -194,8 +202,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
* state. We need to reset back to a ground state
* of incomplete.
*/
- cxgb4_dcb_cleanup_apps(dev);
- cxgb4_dcb_state_init(dev);
+ cxgb4_dcb_reset(dev);
dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
dcb->supported = CXGB4_DCBX_FW_SUPPORT;
linkwatch_fire_event(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index ccf24d3dc982..02040b99c78a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -131,6 +131,7 @@ struct port_dcb_info {
void cxgb4_dcb_state_init(struct net_device *);
void cxgb4_dcb_version_init(struct net_device *);
+void cxgb4_dcb_reset(struct net_device *dev);
void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input);
void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 76540b0e082d..917663b35603 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2211,7 +2211,7 @@ static int rss_key_show(struct seq_file *seq, void *v)
{
u32 key[10];
- t4_read_rss_key(seq->private, key);
+ t4_read_rss_key(seq->private, key, true);
seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n",
key[9], key[8], key[7], key[6], key[5], key[4], key[3],
key[2], key[1], key[0]);
@@ -2248,7 +2248,7 @@ static ssize_t rss_key_write(struct file *file, const char __user *buf,
}
}
- t4_write_rss_key(adap, key, -1);
+ t4_write_rss_key(adap, key, -1, true);
return count;
}
@@ -2325,12 +2325,13 @@ static int rss_pf_config_open(struct inode *inode, struct file *file)
return -ENOMEM;
pfconf = (struct rss_pf_conf *)p->data;
- rss_pf_map = t4_read_rss_pf_map(adapter);
- rss_pf_mask = t4_read_rss_pf_mask(adapter);
+ rss_pf_map = t4_read_rss_pf_map(adapter, true);
+ rss_pf_mask = t4_read_rss_pf_mask(adapter, true);
for (pf = 0; pf < 8; pf++) {
pfconf[pf].rss_pf_map = rss_pf_map;
pfconf[pf].rss_pf_mask = rss_pf_mask;
- t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config);
+ t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config,
+ true);
}
return 0;
}
@@ -2393,7 +2394,7 @@ static int rss_vf_config_open(struct inode *inode, struct file *file)
vfconf = (struct rss_vf_conf *)p->data;
for (vf = 0; vf < vfcount; vf++) {
t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
- &vfconf[vf].rss_vf_vfh);
+ &vfconf[vf].rss_vf_vfh, true);
}
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index a71af1e587e2..eb338212f5af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -21,6 +21,7 @@
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4fw_api.h"
+#include "cxgb4_cudbg.h"
#define EEPROM_MAGIC 0x38E2F10C
@@ -335,10 +336,10 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
memset(s, 0, sizeof(*s));
spin_lock(&adap->stats_lock);
- t4_tp_get_tcp_stats(adap, &v4, &v6);
- t4_tp_get_rdma_stats(adap, &rdma_stats);
- t4_get_usm_stats(adap, &usm_stats);
- t4_tp_get_err_stats(adap, &err_stats);
+ t4_tp_get_tcp_stats(adap, &v4, &v6, false);
+ t4_tp_get_rdma_stats(adap, &rdma_stats, false);
+ t4_get_usm_stats(adap, &usm_stats, false);
+ t4_tp_get_err_stats(adap, &err_stats, false);
spin_unlock(&adap->stats_lock);
s->db_drop = adap->db_stats.db_drop;
@@ -388,9 +389,9 @@ static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
memset(s, 0, sizeof(*s));
spin_lock(&adap->stats_lock);
- t4_tp_get_cpl_stats(adap, &cpl_stats);
- t4_tp_get_err_stats(adap, &err_stats);
- t4_get_fcoe_stats(adap, i, &fcoe_stats);
+ t4_tp_get_cpl_stats(adap, &cpl_stats, false);
+ t4_tp_get_err_stats(adap, &err_stats, false);
+ t4_get_fcoe_stats(adap, i, &fcoe_stats, false);
spin_unlock(&adap->stats_lock);
s->cpl_req = cpl_stats.req[i];
@@ -1063,40 +1064,11 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
return 0;
}
-/**
- * eeprom_ptov - translate a physical EEPROM address to virtual
- * @phys_addr: the physical EEPROM address
- * @fn: the PCI function number
- * @sz: size of function-specific area
- *
- * Translate a physical EEPROM address to virtual. The first 1K is
- * accessed through virtual addresses starting at 31K, the rest is
- * accessed through virtual addresses starting at 0.
- *
- * The mapping is as follows:
- * [0..1K) -> [31K..32K)
- * [1K..1K+A) -> [31K-A..31K)
- * [1K+A..ES) -> [0..ES-A-1K)
- *
- * where A = @fn * @sz, and ES = EEPROM size.
- */
-static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
-{
- fn *= sz;
- if (phys_addr < 1024)
- return phys_addr + (31 << 10);
- if (phys_addr < 1024 + fn)
- return 31744 - fn + phys_addr - 1024;
- if (phys_addr < EEPROMSIZE)
- return phys_addr - 1024 - fn;
- return -EINVAL;
-}
-
/* The next two routines implement eeprom read/write from physical addresses.
*/
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+ int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -1105,7 +1077,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+ int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -1374,6 +1346,56 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return -EOPNOTSUPP;
}
+static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ u32 len = 0;
+
+ len = sizeof(struct cudbg_hdr) +
+ sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
+ len += cxgb4_get_dump_length(adapter, eth_dump->flag);
+
+ adapter->eth_dump.flag = eth_dump->flag;
+ adapter->eth_dump.len = len;
+ return 0;
+}
+
+static int get_dump_flag(struct net_device *dev, struct ethtool_dump *eth_dump)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ eth_dump->flag = adapter->eth_dump.flag;
+ eth_dump->len = adapter->eth_dump.len;
+ eth_dump->version = adapter->eth_dump.version;
+ return 0;
+}
+
+static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
+ void *buf)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ u32 len = 0;
+ int ret = 0;
+
+ if (adapter->eth_dump.flag == CXGB4_ETH_DUMP_NONE)
+ return -ENOENT;
+
+ len = sizeof(struct cudbg_hdr) +
+ sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
+ len += cxgb4_get_dump_length(adapter, adapter->eth_dump.flag);
+ if (eth_dump->len < len)
+ return -ENOMEM;
+
+ ret = cxgb4_cudbg_collect(adapter, buf, &len, adapter->eth_dump.flag);
+ if (ret)
+ return ret;
+
+ eth_dump->flag = adapter->eth_dump.flag;
+ eth_dump->len = len;
+ eth_dump->version = adapter->eth_dump.version;
+ return 0;
+}
+
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
@@ -1404,7 +1426,10 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_rxfh = get_rss_table,
.set_rxfh = set_rss_table,
.flash_device = set_flash,
- .get_ts_info = get_ts_info
+ .get_ts_info = get_ts_info,
+ .set_dump = set_dump,
+ .get_dump_flag = get_dump_flag,
+ .get_dump_data = get_dump_data,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 45b5853ca2f1..5980f308a253 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -31,10 +31,15 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <net/ipv6.h>
#include "cxgb4.h"
#include "t4_regs.h"
+#include "t4_tcb.h"
+#include "t4_values.h"
+#include "clip_tbl.h"
#include "l2t.h"
+#include "smt.h"
#include "t4fw_api.h"
#include "cxgb4_filter.h"
@@ -48,6 +53,194 @@ static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
return !(conf & conf_mask) && is_field_set(val, mask);
}
+static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
+ unsigned int ftid, u16 word, u64 mask, u64 val,
+ int no_reply)
+{
+ struct cpl_set_tcb_field *req;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
+ req->reply_ctrl = htons(REPLY_CHAN_V(0) |
+ QUEUENO_V(adap->sge.fw_evtq.abs_id) |
+ NO_REPLY_V(no_reply));
+ req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ t4_ofld_send(adap, skb);
+ return 0;
+}
+
+/* Set one of the t_flags bits in the TCB.
+ */
+static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
+ unsigned int ftid, unsigned int bit_pos,
+ unsigned int val, int no_reply)
+{
+ return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos,
+ (unsigned long long)val << bit_pos, no_reply);
+}
+
+static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16));
+ sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ abort_req->rsvd0 = htonl(0);
+ abort_req->rsvd1 = 0;
+ abort_req->cmd = CPL_ABORT_NO_RST;
+}
+
+static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
+ sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ abort_rpl->rsvd0 = htonl(0);
+ abort_rpl->rsvd1 = 0;
+ abort_rpl->cmd = CPL_ABORT_NO_RST;
+}
+
+static void mk_set_tcb_ulp(struct filter_entry *f,
+ struct cpl_set_tcb_field *req,
+ unsigned int word, u64 mask, u64 val,
+ u8 cookie, int no_reply)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
+ txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
+ sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
+ req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
+ QUEUENO_V(0));
+ req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
+ sc->len = htonl(0);
+}
+
+static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
+{
+ int err;
+
+ /* do a set-tcb for smac-sel and CWR bit.. */
+ err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
+ if (err)
+ goto smac_err;
+
+ err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
+ TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
+ TCB_SMAC_SEL_V(f->smt->idx), 1);
+ if (!err)
+ return 0;
+
+smac_err:
+ dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
+ f->tid, err);
+ return err;
+}
+
+static void set_nat_params(struct adapter *adap, struct filter_entry *f,
+ unsigned int tid, bool dip, bool sip, bool dp,
+ bool sp)
+{
+ if (dip) {
+ if (f->fs.type) {
+ set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
+ WORD_MASK, f->fs.nat_lip[15] |
+ f->fs.nat_lip[14] << 8 |
+ f->fs.nat_lip[13] << 16 |
+ f->fs.nat_lip[12] << 24, 1);
+
+ set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
+ WORD_MASK, f->fs.nat_lip[11] |
+ f->fs.nat_lip[10] << 8 |
+ f->fs.nat_lip[9] << 16 |
+ f->fs.nat_lip[8] << 24, 1);
+
+ set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
+ WORD_MASK, f->fs.nat_lip[7] |
+ f->fs.nat_lip[6] << 8 |
+ f->fs.nat_lip[5] << 16 |
+ f->fs.nat_lip[4] << 24, 1);
+
+ set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
+ WORD_MASK, f->fs.nat_lip[3] |
+ f->fs.nat_lip[2] << 8 |
+ f->fs.nat_lip[1] << 16 |
+ f->fs.nat_lip[0] << 24, 1);
+ } else {
+ set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
+ WORD_MASK, f->fs.nat_lip[3] |
+ f->fs.nat_lip[2] << 8 |
+ f->fs.nat_lip[1] << 16 |
+ f->fs.nat_lip[0] << 24, 1);
+ }
+ }
+
+ if (sip) {
+ if (f->fs.type) {
+ set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W,
+ WORD_MASK, f->fs.nat_fip[15] |
+ f->fs.nat_fip[14] << 8 |
+ f->fs.nat_fip[13] << 16 |
+ f->fs.nat_fip[12] << 24, 1);
+
+ set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
+ WORD_MASK, f->fs.nat_fip[11] |
+ f->fs.nat_fip[10] << 8 |
+ f->fs.nat_fip[9] << 16 |
+ f->fs.nat_fip[8] << 24, 1);
+
+ set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
+ WORD_MASK, f->fs.nat_fip[7] |
+ f->fs.nat_fip[6] << 8 |
+ f->fs.nat_fip[5] << 16 |
+ f->fs.nat_fip[4] << 24, 1);
+
+ set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
+ WORD_MASK, f->fs.nat_fip[3] |
+ f->fs.nat_fip[2] << 8 |
+ f->fs.nat_fip[1] << 16 |
+ f->fs.nat_fip[0] << 24, 1);
+
+ } else {
+ set_tcb_field(adap, f, tid,
+ TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
+ WORD_MASK, f->fs.nat_fip[3] |
+ f->fs.nat_fip[2] << 8 |
+ f->fs.nat_fip[1] << 16 |
+ f->fs.nat_fip[0] << 24, 1);
+ }
+ }
+
+ set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
+ (dp ? f->fs.nat_lport : 0) |
+ (sp ? f->fs.nat_fport << 16 : 0), 1);
+}
+
/* Validate filter spec against configuration done on the card. */
static int validate_filter(struct net_device *dev,
struct ch_filter_specification *fs)
@@ -148,6 +341,116 @@ static int get_filter_steerq(struct net_device *dev,
return iq;
}
+static int get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *pkts, u64 *bytes, bool hash)
+{
+ unsigned int tcb_base, tcbaddr;
+ unsigned int word_offset;
+ struct filter_entry *f;
+ __be64 be64_byte_count;
+ int ret;
+
+ tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
+ if (is_hashfilter(adapter) && hash) {
+ if (fidx < adapter->tids.ntids) {
+ f = adapter->tids.tid_tab[fidx];
+ if (!f)
+ return -EINVAL;
+ } else {
+ return -E2BIG;
+ }
+ } else {
+ if ((fidx != (adapter->tids.nftids +
+ adapter->tids.nsftids - 1)) &&
+ fidx >= adapter->tids.nftids)
+ return -E2BIG;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+ }
+ tcbaddr = tcb_base + f->tid * TCB_SIZE;
+
+ spin_lock(&adapter->win0_lock);
+ if (is_t4(adapter->params.chip)) {
+ __be64 be64_count;
+
+ /* T4 doesn't maintain byte counts in hw */
+ *bytes = 0;
+
+ /* Get pkts */
+ word_offset = 4;
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr + (word_offset * sizeof(__be32)),
+ sizeof(be64_count),
+ (__be32 *)&be64_count,
+ T4_MEMORY_READ);
+ if (ret < 0)
+ goto out;
+ *pkts = be64_to_cpu(be64_count);
+ } else {
+ __be32 be32_count;
+
+ /* Get bytes */
+ word_offset = 4;
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr + (word_offset * sizeof(__be32)),
+ sizeof(be64_byte_count),
+ &be64_byte_count,
+ T4_MEMORY_READ);
+ if (ret < 0)
+ goto out;
+ *bytes = be64_to_cpu(be64_byte_count);
+
+ /* Get pkts */
+ word_offset = 6;
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr + (word_offset * sizeof(__be32)),
+ sizeof(be32_count),
+ &be32_count,
+ T4_MEMORY_READ);
+ if (ret < 0)
+ goto out;
+ *pkts = (u64)be32_to_cpu(be32_count);
+ }
+
+out:
+ spin_unlock(&adapter->win0_lock);
+ return ret;
+}
+
+int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
+ u64 *hitcnt, u64 *bytecnt, bool hash)
+{
+ struct adapter *adapter = netdev2adap(dev);
+
+ return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
+}
+
+int cxgb4_get_free_ftid(struct net_device *dev, int family)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct tid_info *t = &adap->tids;
+ int ftid;
+
+ spin_lock_bh(&t->ftid_lock);
+ if (family == PF_INET) {
+ ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
+ if (ftid >= t->nftids)
+ ftid = -1;
+ } else {
+ ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2);
+ if (ftid < 0)
+ goto out_unlock;
+
+ /* this is only a lookup, keep the found region unallocated */
+ bitmap_release_region(t->ftid_bmap, ftid, 2);
+ }
+out_unlock:
+ spin_unlock_bh(&t->ftid_lock);
+ return ftid;
+}
+
static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
{
spin_lock_bh(&t->ftid_lock);
@@ -191,7 +494,8 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
return -ENOMEM;
fwr = __skb_put(skb, len);
- t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+ t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1
+ : adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
@@ -210,7 +514,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
int set_filter_wr(struct adapter *adapter, int fidx)
{
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct fw_filter_wr *fwr;
+ struct fw_filter2_wr *fwr;
struct sk_buff *skb;
skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
@@ -231,6 +535,21 @@ int set_filter_wr(struct adapter *adapter, int fidx)
}
}
+ /* If the new filter requires loopback Source MAC rewriting then
+ * we need to allocate a SMT entry for the filter.
+ */
+ if (f->fs.newsmac) {
+ f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
+ if (!f->smt) {
+ if (f->l2t) {
+ cxgb4_l2t_release(f->l2t);
+ f->l2t = NULL;
+ }
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ }
+
fwr = __skb_put_zero(skb, sizeof(*fwr));
/* It would be nice to put most of the following in t4_hw.c but most
@@ -241,7 +560,10 @@ int set_filter_wr(struct adapter *adapter, int fidx)
* filter specification structure but for now it's easiest to simply
* put this fairly direct code in line ...
*/
- fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+ if (adapter->params.filter2_wr_support)
+ fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
+ else
+ fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
fwr->tid_to_iq =
htonl(FW_FILTER_WR_TID_V(f->tid) |
@@ -256,7 +578,6 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
- FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
@@ -303,8 +624,18 @@ int set_filter_wr(struct adapter *adapter, int fidx)
fwr->lpm = htons(f->fs.mask.lport);
fwr->fp = htons(f->fs.val.fport);
fwr->fpm = htons(f->fs.mask.fport);
- if (f->fs.newsmac)
- memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+ if (adapter->params.filter2_wr_support) {
+ fwr->natmode_to_ulp_type =
+ FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
+ ULP_MODE_TCPDDP :
+ ULP_MODE_NONE) |
+ FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
+ memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
+ memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
+ fwr->newlport = htons(f->fs.nat_lport);
+ fwr->newfport = htons(f->fs.nat_fport);
+ }
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
@@ -354,14 +685,18 @@ int delete_filter(struct adapter *adapter, unsigned int fidx)
void clear_filter(struct adapter *adap, struct filter_entry *f)
{
/* If the new or old filter have loopback rewriteing rules then we'll
- * need to free any existing Layer Two Table (L2T) entries of the old
- * filter rule. The firmware will handle freeing up any Source MAC
- * Table (SMT) entries used for rewriting Source MAC Addresses in
- * loopback rules.
+ * need to free any existing L2T, SMT, CLIP entries of filter
+ * rule.
*/
if (f->l2t)
cxgb4_l2t_release(f->l2t);
+ if (f->smt)
+ cxgb4_smt_release(f->smt);
+
+ if (f->fs.hash && f->fs.type)
+ cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
+
/* The zeroing of the filter rule below clears the filter valid,
* pending, locked flags, l2t pointer, etc. so it's all we need for
* this operation.
@@ -431,6 +766,418 @@ static void fill_default_mask(struct ch_filter_specification *fs)
fs->mask.fport = ~0;
}
+static bool is_addr_all_mask(u8 *ipmask, int family)
+{
+ if (family == AF_INET) {
+ struct in_addr *addr;
+
+ addr = (struct in_addr *)ipmask;
+ if (addr->s_addr == 0xffffffff)
+ return true;
+ } else if (family == AF_INET6) {
+ struct in6_addr *addr6;
+
+ addr6 = (struct in6_addr *)ipmask;
+ if (addr6->s6_addr32[0] == 0xffffffff &&
+ addr6->s6_addr32[1] == 0xffffffff &&
+ addr6->s6_addr32[2] == 0xffffffff &&
+ addr6->s6_addr32[3] == 0xffffffff)
+ return true;
+ }
+ return false;
+}
+
+static bool is_inaddr_any(u8 *ip, int family)
+{
+ int addr_type;
+
+ if (family == AF_INET) {
+ struct in_addr *addr;
+
+ addr = (struct in_addr *)ip;
+ if (addr->s_addr == htonl(INADDR_ANY))
+ return true;
+ } else if (family == AF_INET6) {
+ struct in6_addr *addr6;
+
+ addr6 = (struct in6_addr *)ip;
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &addr6);
+ if (addr_type == IPV6_ADDR_ANY)
+ return true;
+ }
+ return false;
+}
+
+bool is_filter_exact_match(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ struct tp_params *tp = &adap->params.tp;
+ u64 hash_filter_mask = tp->hash_filter_mask;
+ u32 mask;
+
+ if (!is_hashfilter(adap))
+ return false;
+
+ if (fs->type) {
+ if (is_inaddr_any(fs->val.fip, AF_INET6) ||
+ !is_addr_all_mask(fs->mask.fip, AF_INET6))
+ return false;
+
+ if (is_inaddr_any(fs->val.lip, AF_INET6) ||
+ !is_addr_all_mask(fs->mask.lip, AF_INET6))
+ return false;
+ } else {
+ if (is_inaddr_any(fs->val.fip, AF_INET) ||
+ !is_addr_all_mask(fs->mask.fip, AF_INET))
+ return false;
+
+ if (is_inaddr_any(fs->val.lip, AF_INET) ||
+ !is_addr_all_mask(fs->mask.lip, AF_INET))
+ return false;
+ }
+
+ if (!fs->val.lport || fs->mask.lport != 0xffff)
+ return false;
+
+ if (!fs->val.fport || fs->mask.fport != 0xffff)
+ return false;
+
+ if (tp->fcoe_shift >= 0) {
+ mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
+ if (mask && !fs->mask.fcoe)
+ return false;
+ }
+
+ if (tp->port_shift >= 0) {
+ mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W;
+ if (mask && !fs->mask.iport)
+ return false;
+ }
+
+ if (tp->vnic_shift >= 0) {
+ mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W;
+
+ if ((adap->params.tp.ingress_config & VNIC_F)) {
+ if (mask && !fs->mask.pfvf_vld)
+ return false;
+ } else {
+ if (mask && !fs->mask.ovlan_vld)
+ return false;
+ }
+ }
+
+ if (tp->vlan_shift >= 0) {
+ mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W;
+ if (mask && !fs->mask.ivlan)
+ return false;
+ }
+
+ if (tp->tos_shift >= 0) {
+ mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W;
+ if (mask && !fs->mask.tos)
+ return false;
+ }
+
+ if (tp->protocol_shift >= 0) {
+ mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W;
+ if (mask && !fs->mask.proto)
+ return false;
+ }
+
+ if (tp->ethertype_shift >= 0) {
+ mask = (hash_filter_mask >> tp->ethertype_shift) &
+ FT_ETHERTYPE_W;
+ if (mask && !fs->mask.ethtype)
+ return false;
+ }
+
+ if (tp->macmatch_shift >= 0) {
+ mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W;
+ if (mask && !fs->mask.macidx)
+ return false;
+ }
+
+ if (tp->matchtype_shift >= 0) {
+ mask = (hash_filter_mask >> tp->matchtype_shift) &
+ FT_MPSHITTYPE_W;
+ if (mask && !fs->mask.matchtype)
+ return false;
+ }
+ if (tp->frag_shift >= 0) {
+ mask = (hash_filter_mask >> tp->frag_shift) &
+ FT_FRAGMENTATION_W;
+ if (mask && !fs->mask.frag)
+ return false;
+ }
+ return true;
+}
+
+static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
+ struct net_device *dev)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+
+ /* Initialize each of the fields which we care about which are present
+ * in the Compressed Filter Tuple.
+ */
+ if (tp->vlan_shift >= 0 && fs->mask.ivlan)
+ ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
+
+ if (tp->port_shift >= 0 && fs->mask.iport)
+ ntuple |= (u64)fs->val.iport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0) {
+ if (!fs->val.proto)
+ ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
+ else
+ ntuple |= (u64)fs->val.proto << tp->protocol_shift;
+ }
+
+ if (tp->tos_shift >= 0 && fs->mask.tos)
+ ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
+
+ if (tp->vnic_shift >= 0) {
+ if ((adap->params.tp.ingress_config & VNIC_F) &&
+ fs->mask.pfvf_vld)
+ ntuple |= (u64)((fs->val.pfvf_vld << 16) |
+ (fs->val.pf << 13) |
+ (fs->val.vf)) << tp->vnic_shift;
+ else
+ ntuple |= (u64)((fs->val.ovlan_vld << 16) |
+ (fs->val.ovlan)) << tp->vnic_shift;
+ }
+
+ if (tp->macmatch_shift >= 0 && fs->mask.macidx)
+ ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift;
+
+ if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
+ ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift;
+
+ if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
+ ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift;
+
+ if (tp->frag_shift >= 0 && fs->mask.frag)
+ ntuple |= (u64)(fs->val.frag) << tp->frag_shift;
+
+ if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
+ ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift;
+ return ntuple;
+}
+
+static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req6 *t6req = NULL;
+ struct cpl_act_open_req6 *req = NULL;
+
+ t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
+ INIT_TP_WR(t6req, 0);
+ req = (struct cpl_act_open_req6 *)t6req;
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
+ req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
+ req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
+ req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
+ req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ DELACK_V(f->fs.hitcnts) |
+ L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
+ SMAC_SEL_V((cxgb4_port_viid(f->dev) &
+ 0x7F) << 1) |
+ TX_CHAN_V(f->fs.eport) |
+ NO_CONG_V(f->fs.rpttid) |
+ ULP_MODE_V(f->fs.nat_mode ?
+ ULP_MODE_TCPDDP : ULP_MODE_NONE) |
+ TCAM_BYPASS_F | NON_OFFLOAD_F);
+ t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
+ f->dev)));
+ t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
+ RSS_QUEUE_V(f->fs.iq) |
+ TX_QUEUE_V(f->fs.nat_mode) |
+ T5_OPT_2_VALID_F |
+ RX_CHANNEL_F |
+ CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ PACE_V((f->fs.maskhash) |
+ ((f->fs.dirsteerhash) << 1)) |
+ CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+}
+
+static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req *t6req = NULL;
+ struct cpl_act_open_req *req = NULL;
+
+ t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
+ INIT_TP_WR(t6req, 0);
+ req = (struct cpl_act_open_req *)t6req;
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
+ f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
+ req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
+ f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+ req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ DELACK_V(f->fs.hitcnts) |
+ L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
+ SMAC_SEL_V((cxgb4_port_viid(f->dev) &
+ 0x7F) << 1) |
+ TX_CHAN_V(f->fs.eport) |
+ NO_CONG_V(f->fs.rpttid) |
+ ULP_MODE_V(f->fs.nat_mode ?
+ ULP_MODE_TCPDDP : ULP_MODE_NONE) |
+ TCAM_BYPASS_F | NON_OFFLOAD_F);
+
+ t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
+ f->dev)));
+ t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
+ RSS_QUEUE_V(f->fs.iq) |
+ TX_QUEUE_V(f->fs.nat_mode) |
+ T5_OPT_2_VALID_F |
+ RX_CHANNEL_F |
+ CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ PACE_V((f->fs.maskhash) |
+ ((f->fs.dirsteerhash) << 1)) |
+ CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+}
+
+static int cxgb4_set_hash_filter(struct net_device *dev,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct sk_buff *skb;
+ int iq, atid, size;
+ int ret = 0;
+ u32 iconf;
+
+ fill_default_mask(fs);
+ ret = validate_filter(dev, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+ if (iq < 0)
+ return iq;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return -ENOMEM;
+
+ f->fs = *fs;
+ f->ctx = ctx;
+ f->dev = dev;
+ f->fs.iq = iq;
+
+ /* If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
+ if (!f->l2t) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ }
+
+ /* If the new filter requires loopback Source MAC rewriting then
+ * we need to allocate a SMT entry for the filter.
+ */
+ if (f->fs.newsmac) {
+ f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
+ if (!f->smt) {
+ if (f->l2t) {
+ cxgb4_l2t_release(f->l2t);
+ f->l2t = NULL;
+ }
+ ret = -ENOMEM;
+ goto free_l2t;
+ }
+ }
+
+ atid = cxgb4_alloc_atid(t, f);
+ if (atid < 0) {
+ ret = atid;
+ goto free_smt;
+ }
+
+ iconf = adapter->params.tp.ingress_config;
+ if (iconf & VNIC_F) {
+ f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
+ f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
+ f->fs.val.ovlan_vld = fs->val.pfvf_vld;
+ f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ }
+
+ size = sizeof(struct cpl_t6_act_open_req);
+ if (f->fs.type) {
+ ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
+ if (ret)
+ goto free_atid;
+
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto free_clip;
+ }
+
+ mk_act_open_req6(f, skb,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ } else {
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto free_atid;
+ }
+
+ mk_act_open_req(f, skb,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ }
+
+ f->pending = 1;
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
+ t4_ofld_send(adapter, skb);
+ return 0;
+
+free_clip:
+ cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
+
+free_atid:
+ cxgb4_free_atid(t, atid);
+
+free_smt:
+ if (f->smt) {
+ cxgb4_smt_release(f->smt);
+ f->smt = NULL;
+ }
+
+free_l2t:
+ if (f->l2t) {
+ cxgb4_l2t_release(f->l2t);
+ f->l2t = NULL;
+ }
+
+out_err:
+ kfree(f);
+ return ret;
+}
+
/* Check a Chelsio Filter Request for validity, convert it into our internal
* format and send it to the hardware. Return 0 on success, an error number
* otherwise. We attach any provided filter operation context to the internal
@@ -447,6 +1194,14 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
u32 iconf;
int iq, ret;
+ if (fs->hash) {
+ if (is_hashfilter(adapter))
+ return cxgb4_set_hash_filter(dev, fs, ctx);
+ netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
+ __func__);
+ return -EINVAL;
+ }
+
max_fidx = adapter->tids.nftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
@@ -568,12 +1323,74 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
return ret;
}
+static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ struct tid_info *t = &adapter->tids;
+ struct cpl_abort_req *abort_req;
+ struct cpl_abort_rpl *abort_rpl;
+ struct cpl_set_tcb_field *req;
+ struct ulptx_idata *aligner;
+ struct work_request_hdr *wr;
+ struct filter_entry *f;
+ struct sk_buff *skb;
+ unsigned int wrlen;
+ int ret;
+
+ netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
+ __func__, filter_id, adapter->tids.nftids);
+
+ if (filter_id > adapter->tids.ntids)
+ return -E2BIG;
+
+ f = lookup_tid(t, filter_id);
+ if (!f) {
+ netdev_err(dev, "%s: no filter entry for filter_id = %d",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (!f->valid)
+ return -EINVAL;
+
+ f->ctx = ctx;
+ f->pending = 1;
+ wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
+ + sizeof(*abort_req) + sizeof(*abort_rpl), 16);
+ skb = alloc_skb(wrlen, GFP_KERNEL);
+ if (!skb) {
+ netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
+ return -ENOMEM;
+ }
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
+ INIT_ULPTX_WR(req, wrlen, 0, 0);
+ wr = (struct work_request_hdr *)req;
+ wr++;
+ req = (struct cpl_set_tcb_field *)wr;
+ mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
+ TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
+ aligner = (struct ulptx_idata *)(req + 1);
+ abort_req = (struct cpl_abort_req *)(aligner + 1);
+ mk_abort_req_ulp(abort_req, f->tid);
+ abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
+ mk_abort_rpl_ulp(abort_rpl, f->tid);
+ t4_ofld_send(adapter, skb);
+ return 0;
+}
+
/* Check a delete filter request for validity and send it to the hardware.
* Return 0 on success, an error number otherwise. We attach any provided
* filter operation context to the internal filter specification in order to
* facilitate signaling completion of the operation.
*/
int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
@@ -581,6 +1398,14 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
unsigned int max_fidx;
int ret;
+ if (fs && fs->hash) {
+ if (is_hashfilter(adapter))
+ return cxgb4_del_hash_filter(dev, filter_id, ctx);
+ netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
+ __func__);
+ return -EINVAL;
+ }
+
max_fidx = adapter->tids.nftids;
if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
filter_id >= max_fidx)
@@ -631,14 +1456,19 @@ out:
return ret;
}
-int cxgb4_del_filter(struct net_device *dev, int filter_id)
+int cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs)
{
struct filter_ctx ctx;
int ret;
+ /* If we are shutting down the adapter do not wait for completion */
+ if (netdev2adap(dev)->flags & SHUTTING_DOWN)
+ return __cxgb4_del_filter(dev, filter_id, fs, NULL);
+
init_completion(&ctx.completion);
- ret = __cxgb4_del_filter(dev, filter_id, &ctx);
+ ret = __cxgb4_del_filter(dev, filter_id, fs, &ctx);
if (ret)
goto out;
@@ -652,6 +1482,157 @@ out:
return ret;
}
+static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
+ struct filter_entry *f)
+{
+ if (f->fs.hitcnts)
+ set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
+ TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
+ TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
+ TCB_TIMESTAMP_V(0ULL) |
+ TCB_RTT_TS_RECENT_AGE_V(0ULL),
+ 1);
+
+ if (f->fs.newdmac)
+ set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
+ 1);
+
+ if (f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE)
+ set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1,
+ 1);
+ if (f->fs.newsmac)
+ configure_filter_smac(adap, f);
+
+ if (f->fs.nat_mode) {
+ switch (f->fs.nat_mode) {
+ case NAT_MODE_DIP:
+ set_nat_params(adap, f, tid, true, false, false, false);
+ break;
+
+ case NAT_MODE_DIP_DP:
+ set_nat_params(adap, f, tid, true, false, true, false);
+ break;
+
+ case NAT_MODE_DIP_DP_SIP:
+ set_nat_params(adap, f, tid, true, true, true, false);
+ break;
+ case NAT_MODE_DIP_DP_SP:
+ set_nat_params(adap, f, tid, true, false, true, true);
+ break;
+
+ case NAT_MODE_SIP_SP:
+ set_nat_params(adap, f, tid, false, true, false, true);
+ break;
+
+ case NAT_MODE_DIP_SIP_SP:
+ set_nat_params(adap, f, tid, true, true, false, true);
+ break;
+
+ case NAT_MODE_ALL:
+ set_nat_params(adap, f, tid, true, true, true, true);
+ break;
+
+ default:
+ pr_err("%s: Invalid NAT mode: %d\n",
+ __func__, f->fs.nat_mode);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl)
+{
+ unsigned int status = rpl->status;
+ struct tid_info *t = &adap->tids;
+ unsigned int tid = GET_TID(rpl);
+ struct filter_ctx *ctx = NULL;
+ struct filter_entry *f;
+
+ dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n",
+ __func__, status, tid);
+
+ f = lookup_tid(t, tid);
+ if (!f) {
+ dev_err(adap->pdev_dev, "%s:could not find filter entry",
+ __func__);
+ return;
+ }
+ ctx = f->ctx;
+ f->ctx = NULL;
+ clear_filter(adap, f);
+ cxgb4_remove_tid(t, 0, tid, 0);
+ kfree(f);
+ if (ctx) {
+ ctx->result = 0;
+ complete(&ctx->completion);
+ }
+}
+
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
+{
+ unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status)));
+ unsigned int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
+ struct tid_info *t = &adap->tids;
+ unsigned int tid = GET_TID(rpl);
+ struct filter_ctx *ctx = NULL;
+ struct filter_entry *f;
+
+ dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n",
+ __func__, tid, ftid, status);
+
+ f = lookup_atid(t, ftid);
+ if (!f) {
+ dev_err(adap->pdev_dev, "%s:could not find filter entry",
+ __func__);
+ return;
+ }
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ switch (status) {
+ case CPL_ERR_NONE:
+ f->tid = tid;
+ f->pending = 0;
+ f->valid = 1;
+ cxgb4_insert_tid(t, f, f->tid, 0);
+ cxgb4_free_atid(t, ftid);
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ if (configure_filter_tcb(adap, tid, f)) {
+ clear_filter(adap, f);
+ cxgb4_remove_tid(t, 0, tid, 0);
+ kfree(f);
+ if (ctx) {
+ ctx->result = -EINVAL;
+ complete(&ctx->completion);
+ }
+ return;
+ }
+ break;
+
+ default:
+ dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
+ __func__, status);
+
+ if (ctx) {
+ if (status == CPL_ERR_TCAM_FULL)
+ ctx->result = -EAGAIN;
+ else
+ ctx->result = -EINVAL;
+ }
+ clear_filter(adap, f);
+ cxgb4_free_atid(t, ftid);
+ kfree(f);
+ }
+ if (ctx)
+ complete(&ctx->completion);
+}
+
/* Handle a filter write/deletion reply. */
void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
{
@@ -690,19 +1671,23 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
clear_filter(adap, f);
if (ctx)
ctx->result = 0;
- } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
- dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
- idx);
- clear_filter(adap, f);
- if (ctx)
- ctx->result = -ENOMEM;
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
- f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
- f->pending = 0; /* asynchronous setup completed */
- f->valid = 1;
- if (ctx) {
- ctx->result = 0;
- ctx->tid = idx;
+ int err = 0;
+
+ if (f->fs.newsmac)
+ err = configure_filter_smac(adap, f);
+
+ if (!err) {
+ f->pending = 0; /* async setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
+ }
+ } else {
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = err;
}
} else {
/* Something went wrong. Issue a warning about the
@@ -718,3 +1703,25 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
complete(&ctx->completion);
}
}
+
+int init_hash_filter(struct adapter *adap)
+{
+ /* On T6, verify the necessary register configs and warn the user in
+ * case of improper config
+ */
+ if (is_t6(adap->params.chip)) {
+ if (TCAM_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_0_A)) != 4)
+ goto err;
+
+ if (HASH_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_1_A)) != 4)
+ goto err;
+ } else {
+ dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
+ return -EINVAL;
+ }
+ adap->params.hash_filter = 1;
+ return 0;
+err:
+ dev_warn(adap->pdev_dev, "Invalid hash filter config!\n");
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
index 23742cb1c69f..8db5fca6dcc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -37,7 +37,12 @@
#include "t4_msg.h"
+#define WORD_MASK 0xffffffff
+
void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl);
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl);
void clear_filter(struct adapter *adap, struct filter_entry *f);
int set_filter_wr(struct adapter *adapter, int fidx);
@@ -45,4 +50,7 @@ int delete_filter(struct adapter *adapter, unsigned int fidx);
int writable_filter(struct filter_entry *f);
void clear_all_filters(struct adapter *adapter);
+int init_hash_filter(struct adapter *adap);
+bool is_filter_exact_match(struct adapter *adap,
+ struct ch_filter_specification *fs);
#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 92d9d795d874..6f900ffe25cc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -77,9 +77,12 @@
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
+#include "smt.h"
#include "sched.h"
#include "cxgb4_tc_u32.h"
+#include "cxgb4_tc_flower.h"
#include "cxgb4_ptp.h"
+#include "cxgb4_cudbg.h"
char cxgb4_driver_name[] = KBUILD_MODNAME;
@@ -280,7 +283,7 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
else {
#ifdef CONFIG_CHELSIO_T4_DCB
if (cxgb4_dcb_enabled(dev)) {
- cxgb4_dcb_state_init(dev);
+ cxgb4_dcb_reset(dev);
dcb_tx_queue_prio_enable(dev, false);
}
#endif /* CONFIG_CHELSIO_T4_DCB */
@@ -561,10 +564,22 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_l2t_write_rpl *p = (void *)rsp;
do_l2t_write_rpl(q->adap, p);
+ } else if (opcode == CPL_SMT_WRITE_RPL) {
+ const struct cpl_smt_write_rpl *p = (void *)rsp;
+
+ do_smt_write_rpl(q->adap, p);
} else if (opcode == CPL_SET_TCB_RPL) {
const struct cpl_set_tcb_rpl *p = (void *)rsp;
filter_rpl(q->adap, p);
+ } else if (opcode == CPL_ACT_OPEN_RPL) {
+ const struct cpl_act_open_rpl *p = (void *)rsp;
+
+ hash_filter_rpl(q->adap, p);
+ } else if (opcode == CPL_ABORT_RPL_RSS) {
+ const struct cpl_abort_rpl_rss *p = (void *)rsp;
+
+ hash_del_filter_rpl(q->adap, p);
} else
dev_err(q->adap->pdev_dev,
"unexpected CPL %#x on FW event queue\n", opcode);
@@ -1637,7 +1652,7 @@ void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct adapter *adap = pci_get_drvdata(pdev);
spin_lock(&adap->stats_lock);
- t4_tp_get_tcp_stats(adap, v4, v6);
+ t4_tp_get_tcp_stats(adap, v4, v6, false);
spin_unlock(&adap->stats_lock);
}
EXPORT_SYMBOL(cxgb4_get_tcp_stats);
@@ -2303,10 +2318,16 @@ static int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ int ret;
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
+ ret = t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ cxgb4_dcb_reset(dev);
+ dcb_tx_queue_prio_enable(dev, false);
+#endif
+ return ret;
}
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
@@ -2873,11 +2894,28 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
+static int cxgb_setup_tc_flower(struct net_device *dev,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ if (cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return cxgb4_tc_flower_replace(dev, cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ return cxgb4_tc_flower_destroy(dev, cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return cxgb4_tc_flower_stats(dev, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int cxgb_setup_tc_cls_u32(struct net_device *dev,
struct tc_cls_u32_offload *cls_u32)
{
- if (!is_classid_clsact_ingress(cls_u32->common.classid) ||
- cls_u32->common.chain_index)
+ if (cls_u32->common.chain_index)
return -EOPNOTSUPP;
switch (cls_u32->command) {
@@ -2891,9 +2929,10 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev,
}
}
-static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
+ struct net_device *dev = cb_priv;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
@@ -2904,9 +2943,45 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
return -EINVAL;
}
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
switch (type) {
case TC_SETUP_CLSU32:
return cxgb_setup_tc_cls_u32(dev, type_data);
+ case TC_SETUP_CLSFLOWER:
+ return cxgb_setup_tc_flower(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cxgb_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
+ pi, dev);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return cxgb_setup_tc_block(dev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -3876,6 +3951,16 @@ static int adap_init0(struct adapter *adap)
1, params, val);
adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
+ /* See if FW supports FW_FILTER2 work request */
+ if (is_t4(adap->params.chip)) {
+ adap->params.filter2_wr_support = 0;
+ } else {
+ params[0] = FW_PARAM_DEV(FILTER2_WR);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
+ }
+
/*
* Get device capabilities so we can determine what resources we need
* to manage.
@@ -3889,7 +3974,8 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
- if (caps_cmd.ofldcaps) {
+ if (caps_cmd.ofldcaps ||
+ (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
/* query offload-related parameters */
params[0] = FW_PARAM_DEV(NTID);
params[1] = FW_PARAM_PFVF(SERVER_START);
@@ -3926,8 +4012,13 @@ static int adap_init0(struct adapter *adap)
adap->vres.ddp.size = val[4] - val[3] + 1;
adap->params.ofldq_wr_cred = val[5];
- adap->params.offload = 1;
- adap->num_ofld_uld += 1;
+ if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
+ if (init_hash_filter(adap) < 0)
+ goto bye;
+ } else {
+ adap->params.offload = 1;
+ adap->num_ofld_uld += 1;
+ }
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
@@ -4048,7 +4139,7 @@ static int adap_init0(struct adapter *adap)
}
t4_init_sge_params(adap);
adap->flags |= FW_OK;
- t4_init_tp_params(adap);
+ t4_init_tp_params(adap, true);
return 0;
/*
@@ -4612,9 +4703,11 @@ static void free_some_resources(struct adapter *adapter)
{
unsigned int i;
+ kvfree(adapter->smt);
kvfree(adapter->l2t);
t4_cleanup_sched(adapter);
kvfree(adapter->tids.tid_tab);
+ cxgb4_cleanup_tc_flower(adapter);
cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
@@ -4995,7 +5088,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 81 - 9600 */
- netdev->min_mtu = 81;
+ netdev->min_mtu = 81; /* accommodate SACK */
netdev->max_mtu = MAX_MTU;
netdev->netdev_ops = &cxgb4_netdev_ops;
@@ -5006,6 +5099,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
cxgb4_set_ethtool_ops(netdev);
}
+ cxgb4_init_ethtool_dump(adapter);
+
pci_set_drvdata(pdev, adapter);
if (adapter->flags & FW_OK) {
@@ -5035,6 +5130,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
cfg_queues(adapter);
+ adapter->smt = t4_init_smt();
+ if (!adapter->smt) {
+ /* We tolerate a lack of SMT, giving up some functionality */
+ dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
+ }
+
adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
if (!adapter->l2t) {
/* We tolerate a lack of L2T, giving up some functionality */
@@ -5083,9 +5184,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!adapter->tc_u32)
dev_warn(&pdev->dev,
"could not offload tc u32, continuing\n");
+
+ if (cxgb4_init_tc_flower(adapter))
+ dev_warn(&pdev->dev,
+ "could not offload tc flower, continuing\n");
}
- if (is_offload(adapter)) {
+ if (is_offload(adapter) || is_hashfilter(adapter)) {
if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
u32 hash_base, hash_reg;
@@ -5254,6 +5359,8 @@ static void remove_one(struct pci_dev *pdev)
return;
}
+ adapter->flags |= SHUTTING_DOWN;
+
if (adapter->pf == 4) {
int i;
@@ -5339,6 +5446,8 @@ static void shutdown_one(struct pci_dev *pdev)
return;
}
+ adapter->flags |= SHUTTING_DOWN;
+
if (adapter->pf == 4) {
int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
new file mode 100644
index 000000000000..d4a548a6a55c
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -0,0 +1,876 @@
+/*
+ * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_pedit.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_vlan.h>
+
+#include "cxgb4.h"
+#include "cxgb4_filter.h"
+#include "cxgb4_tc_flower.h"
+
+#define STATS_CHECK_PERIOD (HZ / 2)
+
+struct ch_tc_pedit_fields pedits[] = {
+ PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
+ PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
+ PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
+ PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
+ PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
+ PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
+ PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
+ PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
+ PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
+ PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
+ PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
+ PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
+ PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
+ PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
+ PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
+ PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
+ PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
+ PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
+};
+
+static struct ch_tc_flower_entry *allocate_flower_entry(void)
+{
+ struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
+ spin_lock_init(&new->lock);
+ return new;
+}
+
+/* Must be called with either RTNL or rcu_read_lock */
+static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
+ unsigned long flower_cookie)
+{
+ return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
+ adap->flower_ht_params);
+}
+
+static void cxgb4_process_flow_match(struct net_device *dev,
+ struct tc_cls_flower_offload *cls,
+ struct ch_filter_specification *fs)
+{
+ u16 addr_type = 0;
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ cls->key);
+
+ addr_type = key->addr_type;
+ }
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ cls->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ cls->mask);
+ u16 ethtype_key = ntohs(key->n_proto);
+ u16 ethtype_mask = ntohs(mask->n_proto);
+
+ if (ethtype_key == ETH_P_ALL) {
+ ethtype_key = 0;
+ ethtype_mask = 0;
+ }
+
+ fs->val.ethtype = ethtype_key;
+ fs->mask.ethtype = ethtype_mask;
+ fs->val.proto = key->ip_proto;
+ fs->mask.proto = mask->ip_proto;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_dissector_key_ipv4_addrs *key =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ cls->key);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ cls->mask);
+ fs->type = 0;
+ memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst));
+ memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
+ memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
+ memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
+
+ /* also initialize nat_lip/fip to same values */
+ memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst));
+ memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src));
+
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_dissector_key_ipv6_addrs *key =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ cls->key);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ cls->mask);
+
+ fs->type = 1;
+ memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst));
+ memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
+ memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
+ memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
+
+ /* also initialize nat_lip/fip to same values */
+ memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst));
+ memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src));
+ }
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key, *mask;
+
+ key = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ cls->key);
+ mask = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ cls->mask);
+ fs->val.lport = cpu_to_be16(key->dst);
+ fs->mask.lport = cpu_to_be16(mask->dst);
+ fs->val.fport = cpu_to_be16(key->src);
+ fs->mask.fport = cpu_to_be16(mask->src);
+
+ /* also initialize nat_lport/fport to same values */
+ fs->nat_lport = cpu_to_be16(key->dst);
+ fs->nat_fport = cpu_to_be16(key->src);
+ }
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *key, *mask;
+
+ key = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ cls->key);
+ mask = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ cls->mask);
+ fs->val.tos = key->tos;
+ fs->mask.tos = mask->tos;
+ }
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key, *mask;
+ u16 vlan_tci, vlan_tci_mask;
+
+ key = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ cls->key);
+ mask = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ cls->mask);
+ vlan_tci = key->vlan_id | (key->vlan_priority <<
+ VLAN_PRIO_SHIFT);
+ vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
+ VLAN_PRIO_SHIFT);
+ fs->val.ivlan = cpu_to_be16(vlan_tci);
+ fs->mask.ivlan = cpu_to_be16(vlan_tci_mask);
+
+ /* Chelsio adapters use ivlan_vld bit to match vlan packets
+ * as 802.1Q. Also, when vlan tag is present in packets,
+ * ethtype match is used then to match on ethtype of inner
+ * header ie. the header following the vlan header.
+ * So, set the ivlan_vld based on ethtype info supplied by
+ * TC for vlan packets if its 802.1Q. And then reset the
+ * ethtype value else, hw will try to match the supplied
+ * ethtype value with ethtype of inner header.
+ */
+ if (fs->val.ethtype == ETH_P_8021Q) {
+ fs->val.ivlan_vld = 1;
+ fs->mask.ivlan_vld = 1;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ }
+
+ /* Match only packets coming from the ingress port where this
+ * filter will be created.
+ */
+ fs->val.iport = netdev2pinfo(dev)->port_id;
+ fs->mask.iport = ~0;
+}
+
+static int cxgb4_validate_flow_match(struct net_device *dev,
+ struct tc_cls_flower_offload *cls)
+{
+ u16 ethtype_mask = 0;
+ u16 ethtype_key = 0;
+
+ if (cls->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IP))) {
+ netdev_warn(dev, "Unsupported key used: 0x%x\n",
+ cls->dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ cls->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ cls->mask);
+ ethtype_key = ntohs(key->n_proto);
+ ethtype_mask = ntohs(mask->n_proto);
+ }
+
+ if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ u16 eth_ip_type = ethtype_key & ethtype_mask;
+ struct flow_dissector_key_ip *mask;
+
+ if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
+ netdev_err(dev, "IP Key supported only with IPv4/v6");
+ return -EINVAL;
+ }
+
+ mask = skb_flow_dissector_target(cls->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ cls->mask);
+ if (mask->ttl) {
+ netdev_warn(dev, "ttl match unsupported for offload");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
+ u8 field)
+{
+ u32 set_val = val & ~mask;
+ u32 offset = 0;
+ u8 size = 1;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pedits); i++) {
+ if (pedits[i].field == field) {
+ offset = pedits[i].offset;
+ size = pedits[i].size;
+ break;
+ }
+ }
+ memcpy((u8 *)fs + offset, &set_val, size);
+}
+
+static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
+ u32 mask, u32 offset, u8 htype)
+{
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ switch (offset) {
+ case PEDIT_ETH_DMAC_31_0:
+ fs->newdmac = 1;
+ offload_pedit(fs, val, mask, ETH_DMAC_31_0);
+ break;
+ case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
+ if (~mask & PEDIT_ETH_DMAC_MASK)
+ offload_pedit(fs, val, mask, ETH_DMAC_47_32);
+ else
+ offload_pedit(fs, val >> 16, mask >> 16,
+ ETH_SMAC_15_0);
+ break;
+ case PEDIT_ETH_SMAC_47_16:
+ fs->newsmac = 1;
+ offload_pedit(fs, val, mask, ETH_SMAC_47_16);
+ }
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ switch (offset) {
+ case PEDIT_IP4_SRC:
+ offload_pedit(fs, val, mask, IP4_SRC);
+ break;
+ case PEDIT_IP4_DST:
+ offload_pedit(fs, val, mask, IP4_DST);
+ }
+ fs->nat_mode = NAT_MODE_ALL;
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ switch (offset) {
+ case PEDIT_IP6_SRC_31_0:
+ offload_pedit(fs, val, mask, IP6_SRC_31_0);
+ break;
+ case PEDIT_IP6_SRC_63_32:
+ offload_pedit(fs, val, mask, IP6_SRC_63_32);
+ break;
+ case PEDIT_IP6_SRC_95_64:
+ offload_pedit(fs, val, mask, IP6_SRC_95_64);
+ break;
+ case PEDIT_IP6_SRC_127_96:
+ offload_pedit(fs, val, mask, IP6_SRC_127_96);
+ break;
+ case PEDIT_IP6_DST_31_0:
+ offload_pedit(fs, val, mask, IP6_DST_31_0);
+ break;
+ case PEDIT_IP6_DST_63_32:
+ offload_pedit(fs, val, mask, IP6_DST_63_32);
+ break;
+ case PEDIT_IP6_DST_95_64:
+ offload_pedit(fs, val, mask, IP6_DST_95_64);
+ break;
+ case PEDIT_IP6_DST_127_96:
+ offload_pedit(fs, val, mask, IP6_DST_127_96);
+ }
+ fs->nat_mode = NAT_MODE_ALL;
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ switch (offset) {
+ case PEDIT_TCP_SPORT_DPORT:
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ offload_pedit(fs, cpu_to_be32(val) >> 16,
+ cpu_to_be32(mask) >> 16,
+ TCP_SPORT);
+ else
+ offload_pedit(fs, cpu_to_be32(val),
+ cpu_to_be32(mask), TCP_DPORT);
+ }
+ fs->nat_mode = NAT_MODE_ALL;
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ switch (offset) {
+ case PEDIT_UDP_SPORT_DPORT:
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ offload_pedit(fs, cpu_to_be32(val) >> 16,
+ cpu_to_be32(mask) >> 16,
+ UDP_SPORT);
+ else
+ offload_pedit(fs, cpu_to_be32(val),
+ cpu_to_be32(mask), UDP_DPORT);
+ }
+ fs->nat_mode = NAT_MODE_ALL;
+ }
+}
+
+static void cxgb4_process_flow_actions(struct net_device *in,
+ struct tc_cls_flower_offload *cls,
+ struct ch_filter_specification *fs)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ tcf_exts_to_list(cls->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (is_tcf_gact_ok(a)) {
+ fs->action = FILTER_PASS;
+ } else if (is_tcf_gact_shot(a)) {
+ fs->action = FILTER_DROP;
+ } else if (is_tcf_mirred_egress_redirect(a)) {
+ int ifindex = tcf_mirred_ifindex(a);
+ struct net_device *out = __dev_get_by_index(dev_net(in),
+ ifindex);
+ struct port_info *pi = netdev_priv(out);
+
+ fs->action = FILTER_SWITCH;
+ fs->eport = pi->port_id;
+ } else if (is_tcf_vlan(a)) {
+ u32 vlan_action = tcf_vlan_action(a);
+ u8 prio = tcf_vlan_push_prio(a);
+ u16 vid = tcf_vlan_push_vid(a);
+ u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
+
+ switch (vlan_action) {
+ case TCA_VLAN_ACT_POP:
+ fs->newvlan |= VLAN_REMOVE;
+ break;
+ case TCA_VLAN_ACT_PUSH:
+ fs->newvlan |= VLAN_INSERT;
+ fs->vlan = vlan_tci;
+ break;
+ case TCA_VLAN_ACT_MODIFY:
+ fs->newvlan |= VLAN_REWRITE;
+ fs->vlan = vlan_tci;
+ break;
+ default:
+ break;
+ }
+ } else if (is_tcf_pedit(a)) {
+ u32 mask, val, offset;
+ int nkeys, i;
+ u8 htype;
+
+ nkeys = tcf_pedit_nkeys(a);
+ for (i = 0; i < nkeys; i++) {
+ htype = tcf_pedit_htype(a, i);
+ mask = tcf_pedit_mask(a, i);
+ val = tcf_pedit_val(a, i);
+ offset = tcf_pedit_offset(a, i);
+
+ process_pedit_field(fs, val, mask, offset,
+ htype);
+ }
+ }
+ }
+}
+
+static bool valid_l4_mask(u32 mask)
+{
+ u16 hi, lo;
+
+ /* Either the upper 16-bits (SPORT) OR the lower
+ * 16-bits (DPORT) can be set, but NOT BOTH.
+ */
+ hi = (mask >> 16) & 0xFFFF;
+ lo = mask & 0xFFFF;
+
+ return hi && lo ? false : true;
+}
+
+static bool valid_pedit_action(struct net_device *dev,
+ const struct tc_action *a)
+{
+ u32 mask, offset;
+ u8 cmd, htype;
+ int nkeys, i;
+
+ nkeys = tcf_pedit_nkeys(a);
+ for (i = 0; i < nkeys; i++) {
+ htype = tcf_pedit_htype(a, i);
+ cmd = tcf_pedit_cmd(a, i);
+ mask = tcf_pedit_mask(a, i);
+ offset = tcf_pedit_offset(a, i);
+
+ if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
+ netdev_err(dev, "%s: Unsupported pedit cmd\n",
+ __func__);
+ return false;
+ }
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ switch (offset) {
+ case PEDIT_ETH_DMAC_31_0:
+ case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
+ case PEDIT_ETH_SMAC_47_16:
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ switch (offset) {
+ case PEDIT_IP4_SRC:
+ case PEDIT_IP4_DST:
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ switch (offset) {
+ case PEDIT_IP6_SRC_31_0:
+ case PEDIT_IP6_SRC_63_32:
+ case PEDIT_IP6_SRC_95_64:
+ case PEDIT_IP6_SRC_127_96:
+ case PEDIT_IP6_DST_31_0:
+ case PEDIT_IP6_DST_63_32:
+ case PEDIT_IP6_DST_95_64:
+ case PEDIT_IP6_DST_127_96:
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ switch (offset) {
+ case PEDIT_TCP_SPORT_DPORT:
+ if (!valid_l4_mask(~mask)) {
+ netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
+ __func__);
+ return false;
+ }
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ switch (offset) {
+ case PEDIT_UDP_SPORT_DPORT:
+ if (!valid_l4_mask(~mask)) {
+ netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
+ __func__);
+ return false;
+ }
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit field\n",
+ __func__);
+ return false;
+ }
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported pedit type\n",
+ __func__);
+ return false;
+ }
+ }
+ return true;
+}
+
+static int cxgb4_validate_flow_actions(struct net_device *dev,
+ struct tc_cls_flower_offload *cls)
+{
+ const struct tc_action *a;
+ bool act_redir = false;
+ bool act_pedit = false;
+ bool act_vlan = false;
+ LIST_HEAD(actions);
+
+ tcf_exts_to_list(cls->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (is_tcf_gact_ok(a)) {
+ /* Do nothing */
+ } else if (is_tcf_gact_shot(a)) {
+ /* Do nothing */
+ } else if (is_tcf_mirred_egress_redirect(a)) {
+ struct adapter *adap = netdev2adap(dev);
+ struct net_device *n_dev;
+ unsigned int i, ifindex;
+ bool found = false;
+
+ ifindex = tcf_mirred_ifindex(a);
+ for_each_port(adap, i) {
+ n_dev = adap->port[i];
+ if (ifindex == n_dev->ifindex) {
+ found = true;
+ break;
+ }
+ }
+
+ /* If interface doesn't belong to our hw, then
+ * the provided output port is not valid
+ */
+ if (!found) {
+ netdev_err(dev, "%s: Out port invalid\n",
+ __func__);
+ return -EINVAL;
+ }
+ act_redir = true;
+ } else if (is_tcf_vlan(a)) {
+ u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
+ u32 vlan_action = tcf_vlan_action(a);
+
+ switch (vlan_action) {
+ case TCA_VLAN_ACT_POP:
+ break;
+ case TCA_VLAN_ACT_PUSH:
+ case TCA_VLAN_ACT_MODIFY:
+ if (proto != ETH_P_8021Q) {
+ netdev_err(dev, "%s: Unsupported vlan proto\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ netdev_err(dev, "%s: Unsupported vlan action\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+ act_vlan = true;
+ } else if (is_tcf_pedit(a)) {
+ bool pedit_valid = valid_pedit_action(dev, a);
+
+ if (!pedit_valid)
+ return -EOPNOTSUPP;
+ act_pedit = true;
+ } else {
+ netdev_err(dev, "%s: Unsupported action\n", __func__);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if ((act_pedit || act_vlan) && !act_redir) {
+ netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cxgb4_tc_flower_replace(struct net_device *dev,
+ struct tc_cls_flower_offload *cls)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_tc_flower_entry *ch_flower;
+ struct ch_filter_specification *fs;
+ struct filter_ctx ctx;
+ int fidx;
+ int ret;
+
+ if (cxgb4_validate_flow_actions(dev, cls))
+ return -EOPNOTSUPP;
+
+ if (cxgb4_validate_flow_match(dev, cls))
+ return -EOPNOTSUPP;
+
+ ch_flower = allocate_flower_entry();
+ if (!ch_flower) {
+ netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
+ return -ENOMEM;
+ }
+
+ fs = &ch_flower->fs;
+ fs->hitcnts = 1;
+ cxgb4_process_flow_match(dev, cls, fs);
+ cxgb4_process_flow_actions(dev, cls, fs);
+
+ fs->hash = is_filter_exact_match(adap, fs);
+ if (fs->hash) {
+ fidx = 0;
+ } else {
+ fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
+ if (fidx < 0) {
+ netdev_err(dev, "%s: No fidx for offload.\n", __func__);
+ ret = -ENOMEM;
+ goto free_entry;
+ }
+ }
+
+ init_completion(&ctx.completion);
+ ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
+ if (ret) {
+ netdev_err(dev, "%s: filter creation err %d\n",
+ __func__, ret);
+ goto free_entry;
+ }
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto free_entry;
+ }
+
+ ret = ctx.result;
+ /* Check if hw returned error for filter creation */
+ if (ret) {
+ netdev_err(dev, "%s: filter creation err %d\n",
+ __func__, ret);
+ goto free_entry;
+ }
+
+ ch_flower->tc_flower_cookie = cls->cookie;
+ ch_flower->filter_id = ctx.tid;
+ ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
+ adap->flower_ht_params);
+ if (ret)
+ goto del_filter;
+
+ return 0;
+
+del_filter:
+ cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
+
+free_entry:
+ kfree(ch_flower);
+ return ret;
+}
+
+int cxgb4_tc_flower_destroy(struct net_device *dev,
+ struct tc_cls_flower_offload *cls)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_tc_flower_entry *ch_flower;
+ int ret;
+
+ ch_flower = ch_flower_lookup(adap, cls->cookie);
+ if (!ch_flower)
+ return -ENOENT;
+
+ ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
+ if (ret)
+ goto err;
+
+ ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+ adap->flower_ht_params);
+ if (ret) {
+ netdev_err(dev, "Flow remove from rhashtable failed");
+ goto err;
+ }
+ kfree_rcu(ch_flower, rcu);
+
+err:
+ return ret;
+}
+
+static void ch_flower_stats_handler(struct work_struct *work)
+{
+ struct adapter *adap = container_of(work, struct adapter,
+ flower_stats_work);
+ struct ch_tc_flower_entry *flower_entry;
+ struct ch_tc_flower_stats *ofld_stats;
+ struct rhashtable_iter iter;
+ u64 packets;
+ u64 bytes;
+ int ret;
+
+ rhashtable_walk_enter(&adap->flower_tbl, &iter);
+ do {
+ flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
+ if (IS_ERR(flower_entry))
+ goto walk_stop;
+
+ while ((flower_entry = rhashtable_walk_next(&iter)) &&
+ !IS_ERR(flower_entry)) {
+ ret = cxgb4_get_filter_counters(adap->port[0],
+ flower_entry->filter_id,
+ &packets, &bytes,
+ flower_entry->fs.hash);
+ if (!ret) {
+ spin_lock(&flower_entry->lock);
+ ofld_stats = &flower_entry->stats;
+
+ if (ofld_stats->prev_packet_count != packets) {
+ ofld_stats->prev_packet_count = packets;
+ ofld_stats->last_used = jiffies;
+ }
+ spin_unlock(&flower_entry->lock);
+ }
+ }
+walk_stop:
+ rhashtable_walk_stop(&iter);
+ } while (flower_entry == ERR_PTR(-EAGAIN));
+ rhashtable_walk_exit(&iter);
+ mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
+}
+
+static void ch_flower_stats_cb(struct timer_list *t)
+{
+ struct adapter *adap = from_timer(adap, t, flower_stats_timer);
+
+ schedule_work(&adap->flower_stats_work);
+}
+
+int cxgb4_tc_flower_stats(struct net_device *dev,
+ struct tc_cls_flower_offload *cls)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct ch_tc_flower_stats *ofld_stats;
+ struct ch_tc_flower_entry *ch_flower;
+ u64 packets;
+ u64 bytes;
+ int ret;
+
+ ch_flower = ch_flower_lookup(adap, cls->cookie);
+ if (!ch_flower) {
+ ret = -ENOENT;
+ goto err;
+ }
+
+ ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
+ &packets, &bytes,
+ ch_flower->fs.hash);
+ if (ret < 0)
+ goto err;
+
+ spin_lock_bh(&ch_flower->lock);
+ ofld_stats = &ch_flower->stats;
+ if (ofld_stats->packet_count != packets) {
+ if (ofld_stats->prev_packet_count != packets)
+ ofld_stats->last_used = jiffies;
+ tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count,
+ packets - ofld_stats->packet_count,
+ ofld_stats->last_used);
+
+ ofld_stats->packet_count = packets;
+ ofld_stats->byte_count = bytes;
+ ofld_stats->prev_packet_count = packets;
+ }
+ spin_unlock_bh(&ch_flower->lock);
+ return 0;
+
+err:
+ return ret;
+}
+
+static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
+ .nelem_hint = 384,
+ .head_offset = offsetof(struct ch_tc_flower_entry, node),
+ .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
+ .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
+ .max_size = 524288,
+ .min_size = 512,
+ .automatic_shrinking = true
+};
+
+int cxgb4_init_tc_flower(struct adapter *adap)
+{
+ int ret;
+
+ adap->flower_ht_params = cxgb4_tc_flower_ht_params;
+ ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
+ timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
+ mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
+ return 0;
+}
+
+void cxgb4_cleanup_tc_flower(struct adapter *adap)
+{
+ if (adap->flower_stats_timer.function)
+ del_timer_sync(&adap->flower_stats_timer);
+ cancel_work_sync(&adap->flower_stats_work);
+ rhashtable_destroy(&adap->flower_tbl);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
new file mode 100644
index 000000000000..050c8a50ae41
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -0,0 +1,120 @@
+/*
+ * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_TC_FLOWER_H
+#define __CXGB4_TC_FLOWER_H
+
+#include <net/pkt_cls.h>
+
+struct ch_tc_flower_stats {
+ u64 prev_packet_count;
+ u64 packet_count;
+ u64 byte_count;
+ u64 last_used;
+};
+
+struct ch_tc_flower_entry {
+ struct ch_filter_specification fs;
+ struct ch_tc_flower_stats stats;
+ unsigned long tc_flower_cookie;
+ struct rhash_head node;
+ struct rcu_head rcu;
+ spinlock_t lock; /* lock for stats */
+ u32 filter_id;
+};
+
+enum {
+ ETH_DMAC_31_0, /* dmac bits 0.. 31 */
+ ETH_DMAC_47_32, /* dmac bits 32..47 */
+ ETH_SMAC_15_0, /* smac bits 0.. 15 */
+ ETH_SMAC_47_16, /* smac bits 16..47 */
+
+ IP4_SRC, /* 32-bit IPv4 src */
+ IP4_DST, /* 32-bit IPv4 dst */
+
+ IP6_SRC_31_0, /* src bits 0.. 31 */
+ IP6_SRC_63_32, /* src bits 63.. 32 */
+ IP6_SRC_95_64, /* src bits 95.. 64 */
+ IP6_SRC_127_96, /* src bits 127..96 */
+
+ IP6_DST_31_0, /* dst bits 0.. 31 */
+ IP6_DST_63_32, /* dst bits 63.. 32 */
+ IP6_DST_95_64, /* dst bits 95.. 64 */
+ IP6_DST_127_96, /* dst bits 127..96 */
+
+ TCP_SPORT, /* 16-bit TCP sport */
+ TCP_DPORT, /* 16-bit TCP dport */
+
+ UDP_SPORT, /* 16-bit UDP sport */
+ UDP_DPORT, /* 16-bit UDP dport */
+};
+
+struct ch_tc_pedit_fields {
+ u8 field;
+ u8 size;
+ u32 offset;
+};
+
+#define PEDIT_FIELDS(type, field, size, fs_field, offset) \
+ { type## field, size, \
+ offsetof(struct ch_filter_specification, fs_field) + (offset) }
+
+#define PEDIT_ETH_DMAC_MASK 0xffff
+#define PEDIT_TCP_UDP_SPORT_MASK 0xffff
+#define PEDIT_ETH_DMAC_31_0 0x0
+#define PEDIT_ETH_DMAC_47_32_SMAC_15_0 0x4
+#define PEDIT_ETH_SMAC_47_16 0x8
+#define PEDIT_IP4_SRC 0xC
+#define PEDIT_IP4_DST 0x10
+#define PEDIT_IP6_SRC_31_0 0x8
+#define PEDIT_IP6_SRC_63_32 0xC
+#define PEDIT_IP6_SRC_95_64 0x10
+#define PEDIT_IP6_SRC_127_96 0x14
+#define PEDIT_IP6_DST_31_0 0x18
+#define PEDIT_IP6_DST_63_32 0x1C
+#define PEDIT_IP6_DST_95_64 0x20
+#define PEDIT_IP6_DST_127_96 0x24
+#define PEDIT_TCP_SPORT_DPORT 0x0
+#define PEDIT_UDP_SPORT_DPORT 0x0
+
+int cxgb4_tc_flower_replace(struct net_device *dev,
+ struct tc_cls_flower_offload *cls);
+int cxgb4_tc_flower_destroy(struct net_device *dev,
+ struct tc_cls_flower_offload *cls);
+int cxgb4_tc_flower_stats(struct net_device *dev,
+ struct tc_cls_flower_offload *cls);
+
+int cxgb4_init_tc_flower(struct adapter *adap);
+void cxgb4_cleanup_tc_flower(struct adapter *adap);
+#endif /* __CXGB4_TC_FLOWER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 48970ba08bdc..cd0cd13a964d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -380,7 +380,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
return -EINVAL;
}
- ret = cxgb4_del_filter(dev, filter_id);
+ ret = cxgb4_del_filter(dev, filter_id, NULL);
if (ret)
goto out;
@@ -399,7 +399,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
if (!test_bit(j, link->tid_map))
continue;
- ret = __cxgb4_del_filter(dev, j, NULL);
+ ret = __cxgb4_del_filter(dev, j, NULL, NULL);
if (ret)
goto out;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 84541fce94c5..08e709ab6dd4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -212,14 +212,19 @@ struct filter_ctx {
struct ch_filter_specification;
+int cxgb4_get_free_ftid(struct net_device *dev, int family);
int __cxgb4_set_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs,
struct filter_ctx *ctx);
int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
struct filter_ctx *ctx);
int cxgb4_set_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs);
-int cxgb4_del_filter(struct net_device *dev, int filter_id);
+int cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs);
+int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
+ u64 *hitcnt, u64 *bytecnt, bool hash);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index f7ef8871dd0b..1817a0307d26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -422,7 +422,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
u8 lport;
u16 vlan;
struct l2t_entry *e;
- int addr_len = neigh->tbl->key_len;
+ unsigned int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *)neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = addr_hash(d, addr, addr_len, ifidx);
@@ -536,7 +536,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
struct l2t_entry *e;
struct sk_buff_head *arpq = NULL;
struct l2t_data *d = adap->l2t;
- int addr_len = neigh->tbl->key_len;
+ unsigned int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = addr_hash(d, addr, addr_len, ifidx);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 4ef68f69b58c..922f2f937789 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
*/
static inline int reclaimable(const struct sge_txq *q)
{
- int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
hw_cidx -= q->cidx;
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
}
@@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb);
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
- int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
int reclaim = hw_cidx - q->cidx;
if (reclaim < 0)
@@ -1537,7 +1537,13 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
*/
static inline int is_ofld_imm(const struct sk_buff *skb)
{
- return skb->len <= MAX_IMM_TX_PKT_LEN;
+ struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
+ unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
+
+ if (opcode == FW_CRYPTO_LOOKASIDE_WR)
+ return skb->len <= SGE_MAX_WR_LEN;
+ else
+ return skb->len <= MAX_IMM_TX_PKT_LEN;
}
/**
@@ -2583,11 +2589,11 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
return t4_intr_intx;
}
-static void sge_rx_timer_cb(unsigned long data)
+static void sge_rx_timer_cb(struct timer_list *t)
{
unsigned long m;
unsigned int i;
- struct adapter *adap = (struct adapter *)data;
+ struct adapter *adap = from_timer(adap, t, sge.rx_timer);
struct sge *s = &adap->sge;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -2620,11 +2626,11 @@ done:
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
}
-static void sge_tx_timer_cb(unsigned long data)
+static void sge_tx_timer_cb(struct timer_list *t)
{
unsigned long m;
unsigned int i, budget;
- struct adapter *adap = (struct adapter *)data;
+ struct adapter *adap = from_timer(adap, t, sge.tx_timer);
struct sge *s = &adap->sge;
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
@@ -3458,8 +3464,8 @@ int t4_sge_init(struct adapter *adap)
/* Set up timers used for recuring callbacks to process RX and TX
* administrative tasks.
*/
- setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
- setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
+ timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
+ timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
spin_lock_init(&s->intrq_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
new file mode 100644
index 000000000000..7b2207a2a130
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
@@ -0,0 +1,247 @@
+/*
+ * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "cxgb4.h"
+#include "smt.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+#include "t4_regs.h"
+#include "t4_values.h"
+
+struct smt_data *t4_init_smt(void)
+{
+ unsigned int smt_size;
+ struct smt_data *s;
+ int i;
+
+ smt_size = SMT_SIZE;
+
+ s = kvzalloc(sizeof(*s) + smt_size * sizeof(struct smt_entry),
+ GFP_KERNEL);
+ if (!s)
+ return NULL;
+ s->smt_size = smt_size;
+ rwlock_init(&s->lock);
+ for (i = 0; i < s->smt_size; ++i) {
+ s->smtab[i].idx = i;
+ s->smtab[i].state = SMT_STATE_UNUSED;
+ memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
+ spin_lock_init(&s->smtab[i].lock);
+ atomic_set(&s->smtab[i].refcnt, 0);
+ }
+ return s;
+}
+
+static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
+{
+ struct smt_entry *first_free = NULL;
+ struct smt_entry *e, *end;
+
+ for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
+ if (atomic_read(&e->refcnt) == 0) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (e->state == SMT_STATE_SWITCHING) {
+ /* This entry is actually in use. See if we can
+ * re-use it ?
+ */
+ if (memcmp(e->src_mac, smac, ETH_ALEN) == 0)
+ goto found_reuse;
+ }
+ }
+ }
+
+ if (first_free) {
+ e = first_free;
+ goto found;
+ }
+ return NULL;
+
+found:
+ e->state = SMT_STATE_UNUSED;
+
+found_reuse:
+ return e;
+}
+
+static void t4_smte_free(struct smt_entry *e)
+{
+ spin_lock_bh(&e->lock);
+ if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ e->state = SMT_STATE_UNUSED;
+ }
+ spin_unlock_bh(&e->lock);
+}
+
+/**
+ * @e: smt entry to release
+ *
+ * Releases ref count and frees up an smt entry from SMT table
+ */
+void cxgb4_smt_release(struct smt_entry *e)
+{
+ if (atomic_dec_and_test(&e->refcnt))
+ t4_smte_free(e);
+}
+EXPORT_SYMBOL(cxgb4_smt_release);
+
+void do_smt_write_rpl(struct adapter *adap, const struct cpl_smt_write_rpl *rpl)
+{
+ unsigned int smtidx = TID_TID_G(GET_TID(rpl));
+ struct smt_data *s = adap->smt;
+
+ if (unlikely(rpl->status != CPL_ERR_NONE)) {
+ struct smt_entry *e = &s->smtab[smtidx];
+
+ dev_err(adap->pdev_dev,
+ "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+ rpl->status, smtidx);
+ spin_lock(&e->lock);
+ e->state = SMT_STATE_ERROR;
+ spin_unlock(&e->lock);
+ return;
+ }
+}
+
+static int write_smt_entry(struct adapter *adapter, struct smt_entry *e)
+{
+ struct cpl_t6_smt_write_req *t6req;
+ struct smt_data *s = adapter->smt;
+ struct cpl_smt_write_req *req;
+ struct sk_buff *skb;
+ int size;
+ u8 row;
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+ size = sizeof(*req);
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ /* Source MAC Table (SMT) contains 256 SMAC entries
+ * organized in 128 rows of 2 entries each.
+ */
+ req = (struct cpl_smt_write_req *)__skb_put(skb, size);
+ INIT_TP_WR(req, 0);
+
+ /* Each row contains an SMAC pair.
+ * LSB selects the SMAC entry within a row
+ */
+ row = (e->idx >> 1);
+ if (e->idx & 1) {
+ req->pfvf1 = 0x0;
+ memcpy(req->src_mac1, e->src_mac, ETH_ALEN);
+
+ /* fill pfvf0/src_mac0 with entry
+ * at prev index from smt-tab.
+ */
+ req->pfvf0 = 0x0;
+ memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac,
+ ETH_ALEN);
+ } else {
+ req->pfvf0 = 0x0;
+ memcpy(req->src_mac0, e->src_mac, ETH_ALEN);
+
+ /* fill pfvf1/src_mac1 with entry
+ * at next index from smt-tab
+ */
+ req->pfvf1 = 0x0;
+ memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac,
+ ETH_ALEN);
+ }
+ } else {
+ size = sizeof(*t6req);
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ /* Source MAC Table (SMT) contains 256 SMAC entries */
+ t6req = (struct cpl_t6_smt_write_req *)__skb_put(skb, size);
+ INIT_TP_WR(t6req, 0);
+ req = (struct cpl_smt_write_req *)t6req;
+
+ /* fill pfvf0/src_mac0 from smt-tab */
+ req->pfvf0 = 0x0;
+ memcpy(req->src_mac0, s->smtab[e->idx].src_mac, ETH_ALEN);
+ row = e->idx;
+ }
+
+ OPCODE_TID(req) =
+ htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, e->idx |
+ TID_QID_V(adapter->sge.fw_evtq.abs_id)));
+ req->params = htonl(SMTW_NORPL_V(0) |
+ SMTW_IDX_V(row) |
+ SMTW_OVLAN_IDX_V(0));
+ t4_mgmt_tx(adapter, skb);
+ return 0;
+}
+
+static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
+ u8 *smac)
+{
+ struct smt_data *s = adap->smt;
+ struct smt_entry *e;
+
+ write_lock_bh(&s->lock);
+ e = find_or_alloc_smte(s, smac);
+ if (e) {
+ spin_lock(&e->lock);
+ if (!atomic_read(&e->refcnt)) {
+ atomic_set(&e->refcnt, 1);
+ e->state = SMT_STATE_SWITCHING;
+ e->pfvf = pfvf;
+ memcpy(e->src_mac, smac, ETH_ALEN);
+ write_smt_entry(adap, e);
+ } else {
+ atomic_inc(&e->refcnt);
+ }
+ spin_unlock(&e->lock);
+ }
+ write_unlock_bh(&s->lock);
+ return e;
+}
+
+/**
+ * @dev: net_device pointer
+ * @smac: MAC address to add to SMT
+ * Returns pointer to the SMT entry created
+ *
+ * Allocates an SMT entry to be used by switching rule of a filter.
+ */
+struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ return t4_smt_alloc_switching(adap, 0x0, smac);
+}
+EXPORT_SYMBOL(cxgb4_smt_alloc_switching);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
new file mode 100644
index 000000000000..d6c2cc271398
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -0,0 +1,76 @@
+/*
+ * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_SMT_H
+#define __CXGB4_SMT_H
+
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <linux/atomic.h>
+
+struct adapter;
+struct cpl_smt_write_rpl;
+
+/* SMT related handling. Heavily adapted based on l2t ops in l2t.h/l2t.c
+ */
+enum {
+ SMT_STATE_SWITCHING,
+ SMT_STATE_UNUSED,
+ SMT_STATE_ERROR
+};
+
+enum {
+ SMT_SIZE = 256
+};
+
+struct smt_entry {
+ u16 state;
+ u16 idx;
+ u16 pfvf;
+ u8 src_mac[ETH_ALEN];
+ atomic_t refcnt;
+ spinlock_t lock; /* protect smt entry add,removal */
+};
+
+struct smt_data {
+ unsigned int smt_size;
+ rwlock_t lock;
+ struct smt_entry smtab[0];
+};
+
+struct smt_data *t4_init_smt(void);
+struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac);
+void cxgb4_smt_release(struct smt_entry *e);
+void do_smt_write_rpl(struct adapter *p, const struct cpl_smt_write_rpl *rpl);
+#endif /* __CXGB4_SMT_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index b65ce26ff72f..f63210f15579 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2639,6 +2639,35 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
#define CHELSIO_VPD_UNIQUE_ID 0x82
/**
+ * t4_eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
+ */
+int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+ fn *= sz;
+ if (phys_addr < 1024)
+ return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return 31744 - fn + phys_addr - 1024;
+ if (phys_addr < EEPROMSIZE)
+ return phys_addr - 1024 - fn;
+ return -EINVAL;
+}
+
+/**
* t4_seeprom_wp - enable/disable EEPROM write protection
* @adapter: the adapter
* @enable: whether to enable or disable write protection
@@ -5052,23 +5081,26 @@ static unsigned int t4_use_ldst(struct adapter *adap)
}
/**
- * t4_fw_tp_pio_rw - Access TP PIO through LDST
- * @adap: the adapter
- * @vals: where the indirect register values are stored/written
- * @nregs: how many indirect registers to read/write
- * @start_idx: index of first indirect register to read/write
- * @rw: Read (1) or Write (0)
+ * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
+ * @adap: the adapter
+ * @cmd: TP fw ldst address space type
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
- * Access TP PIO registers through LDST
+ * Access TP indirect registers through LDST
*/
-static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
- unsigned int start_index, unsigned int rw)
+static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
+ unsigned int nregs, unsigned int start_index,
+ unsigned int rw, bool sleep_ok)
{
- int ret, i;
- int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ int ret = 0;
+ unsigned int i;
struct fw_ldst_cmd c;
- for (i = 0 ; i < nregs; i++) {
+ for (i = 0; i < nregs; i++) {
memset(&c, 0, sizeof(c));
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
@@ -5079,26 +5111,147 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
c.u.addrval.addr = cpu_to_be32(start_index + i);
c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
- ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
- if (!ret && rw)
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
+ sleep_ok);
+ if (ret)
+ return ret;
+
+ if (rw)
vals[i] = be32_to_cpu(c.u.addrval.val);
}
+ return 0;
+}
+
+/**
+ * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
+ * @adap: the adapter
+ * @reg_addr: Address Register
+ * @reg_data: Data register
+ * @buff: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_index: index of first indirect register to read/write
+ * @rw: READ(1) or WRITE(0)
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Read/Write TP indirect registers through LDST if possible.
+ * Else, use backdoor access
+ **/
+static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
+ u32 *buff, u32 nregs, u32 start_index, int rw,
+ bool sleep_ok)
+{
+ int rc = -EINVAL;
+ int cmd;
+
+ switch (reg_addr) {
+ case TP_PIO_ADDR_A:
+ cmd = FW_LDST_ADDRSPC_TP_PIO;
+ break;
+ case TP_TM_PIO_ADDR_A:
+ cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
+ break;
+ case TP_MIB_INDEX_A:
+ cmd = FW_LDST_ADDRSPC_TP_MIB;
+ break;
+ default:
+ goto indirect_access;
+ }
+
+ if (t4_use_ldst(adap))
+ rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
+ sleep_ok);
+
+indirect_access:
+
+ if (rc) {
+ if (rw)
+ t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
+ start_index);
+ else
+ t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
+ start_index);
+ }
+}
+
+/**
+ * t4_tp_pio_read - Read TP PIO registers
+ * @adap: the adapter
+ * @buff: where the indirect register values are written
+ * @nregs: how many indirect registers to read
+ * @start_index: index of first indirect register to read
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Read TP PIO Registers
+ **/
+void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
+ u32 start_index, bool sleep_ok)
+{
+ t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
+ start_index, 1, sleep_ok);
+}
+
+/**
+ * t4_tp_pio_write - Write TP PIO registers
+ * @adap: the adapter
+ * @buff: where the indirect register values are stored
+ * @nregs: how many indirect registers to write
+ * @start_index: index of first indirect register to write
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Write TP PIO Registers
+ **/
+static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
+ u32 start_index, bool sleep_ok)
+{
+ t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
+ start_index, 0, sleep_ok);
+}
+
+/**
+ * t4_tp_tm_pio_read - Read TP TM PIO registers
+ * @adap: the adapter
+ * @buff: where the indirect register values are written
+ * @nregs: how many indirect registers to read
+ * @start_index: index of first indirect register to read
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Read TP TM PIO Registers
+ **/
+void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
+ u32 start_index, bool sleep_ok)
+{
+ t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
+ nregs, start_index, 1, sleep_ok);
+}
+
+/**
+ * t4_tp_mib_read - Read TP MIB registers
+ * @adap: the adapter
+ * @buff: where the indirect register values are written
+ * @nregs: how many indirect registers to read
+ * @start_index: index of first indirect register to read
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Read TP MIB Registers
+ **/
+void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
+ bool sleep_ok)
+{
+ t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
+ start_index, 1, sleep_ok);
}
/**
* t4_read_rss_key - read the global RSS key
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the global 320-bit RSS key.
*/
-void t4_read_rss_key(struct adapter *adap, u32 *key)
+void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
{
- if (t4_use_ldst(adap))
- t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
- else
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
+ t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
}
/**
@@ -5106,12 +5259,14 @@ void t4_read_rss_key(struct adapter *adap, u32 *key)
* @adap: the adapter
* @key: 10-entry array holding the 320-bit RSS key
* @idx: which RSS key to write
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Writes one of the RSS keys with the given 320-bit value. If @idx is
* 0..15 the corresponding entry in the RSS key table is written,
* otherwise the global RSS key is written.
*/
-void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
+void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
+ bool sleep_ok)
{
u8 rss_key_addr_cnt = 16;
u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
@@ -5124,11 +5279,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
(vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
rss_key_addr_cnt = 32;
- if (t4_use_ldst(adap))
- t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
- else
- t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
- TP_RSS_SECRET_KEY0_A);
+ t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
if (idx >= 0 && idx < rss_key_addr_cnt) {
if (rss_key_addr_cnt > 16)
@@ -5146,19 +5297,15 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
* @adapter: the adapter
* @index: the entry in the PF RSS table to read
* @valp: where to store the returned value
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the PF RSS Configuration Table at the specified index and returns
* the value found there.
*/
void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
- u32 *valp)
+ u32 *valp, bool sleep_ok)
{
- if (t4_use_ldst(adapter))
- t4_fw_tp_pio_rw(adapter, valp, 1,
- TP_RSS_PF0_CONFIG_A + index, 1);
- else
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- valp, 1, TP_RSS_PF0_CONFIG_A + index);
+ t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
}
/**
@@ -5167,12 +5314,13 @@ void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
* @index: the entry in the VF RSS table to read
* @vfl: where to store the returned VFL
* @vfh: where to store the returned VFH
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the VF RSS Configuration Table at the specified index and returns
* the (VFL, VFH) values found there.
*/
void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
- u32 *vfl, u32 *vfh)
+ u32 *vfl, u32 *vfh, bool sleep_ok)
{
u32 vrt, mask, data;
@@ -5193,50 +5341,37 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
/* Grab the VFL/VFH values ...
*/
- if (t4_use_ldst(adapter)) {
- t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
- t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
- } else {
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfl, 1, TP_RSS_VFL_CONFIG_A);
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- vfh, 1, TP_RSS_VFH_CONFIG_A);
- }
+ t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
+ t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
}
/**
* t4_read_rss_pf_map - read PF RSS Map
* @adapter: the adapter
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the PF RSS Map register and returns its value.
*/
-u32 t4_read_rss_pf_map(struct adapter *adapter)
+u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
{
u32 pfmap;
- if (t4_use_ldst(adapter))
- t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
- else
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmap, 1, TP_RSS_PF_MAP_A);
+ t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
return pfmap;
}
/**
* t4_read_rss_pf_mask - read PF RSS Mask
* @adapter: the adapter
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Reads the PF RSS Mask register and returns its value.
*/
-u32 t4_read_rss_pf_mask(struct adapter *adapter)
+u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
{
u32 pfmask;
- if (t4_use_ldst(adapter))
- t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
- else
- t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &pfmask, 1, TP_RSS_PF_MSK_A);
+ t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
return pfmask;
}
@@ -5245,12 +5380,13 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
* @adap: the adapter
* @v4: holds the TCP/IP counter values
* @v6: holds the TCP/IPv6 counter values
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
* Either @v4 or @v6 may be %NULL to skip the corresponding stats.
*/
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
- struct tp_tcp_stats *v6)
+ struct tp_tcp_stats *v6, bool sleep_ok)
{
u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
@@ -5259,16 +5395,16 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
if (v4) {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
- ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
+ t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
+ TP_MIB_TCP_OUT_RST_A, sleep_ok);
v4->tcp_out_rsts = STAT(OUT_RST);
v4->tcp_in_segs = STAT64(IN_SEG);
v4->tcp_out_segs = STAT64(OUT_SEG);
v4->tcp_retrans_segs = STAT64(RXT_SEG);
}
if (v6) {
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
- ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
+ t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
+ TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
v6->tcp_out_rsts = STAT(OUT_RST);
v6->tcp_in_segs = STAT64(IN_SEG);
v6->tcp_out_segs = STAT64(OUT_SEG);
@@ -5283,63 +5419,66 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
* t4_tp_get_err_stats - read TP's error MIB counters
* @adap: the adapter
* @st: holds the counter values
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's error counters.
*/
-void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
+ bool sleep_ok)
{
int nchan = adap->params.arch.nchan;
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
-
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
- &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
+ t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
+ sleep_ok);
+ t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
+ sleep_ok);
+ t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
+ sleep_ok);
+ t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
+ TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
+ t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
+ TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
+ t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
+ sleep_ok);
+ t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
+ TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
+ t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
+ TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
+ t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
+ sleep_ok);
}
/**
* t4_tp_get_cpl_stats - read TP's CPL MIB counters
* @adap: the adapter
* @st: holds the counter values
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's CPL counters.
*/
-void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
+ bool sleep_ok)
{
int nchan = adap->params.arch.nchan;
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
- nchan, TP_MIB_CPL_IN_REQ_0_A);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
- nchan, TP_MIB_CPL_OUT_RSP_0_A);
+ t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
+ t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
}
/**
* t4_tp_get_rdma_stats - read TP's RDMA MIB counters
* @adap: the adapter
* @st: holds the counter values
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's RDMA counters.
*/
-void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
+ bool sleep_ok)
{
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
- 2, TP_MIB_RQE_DFR_PKT_A);
+ t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
+ sleep_ok);
}
/**
@@ -5347,20 +5486,24 @@ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
* @adap: the adapter
* @idx: the port index
* @st: holds the counter values
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's FCoE counters for the selected port.
*/
void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
- struct tp_fcoe_stats *st)
+ struct tp_fcoe_stats *st, bool sleep_ok)
{
u32 val[2];
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
- 1, TP_MIB_FCOE_DDP_0_A + idx);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
- 1, TP_MIB_FCOE_DROP_0_A + idx);
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
- 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
+ t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
+ sleep_ok);
+
+ t4_tp_mib_read(adap, &st->frames_drop, 1,
+ TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
+
+ t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
+ sleep_ok);
+
st->octets_ddp = ((u64)val[0] << 32) | val[1];
}
@@ -5368,15 +5511,16 @@ void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
* t4_get_usm_stats - read TP's non-TCP DDP MIB counters
* @adap: the adapter
* @st: holds the counter values
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Returns the values of TP's counters for non-TCP directly-placed packets.
*/
-void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
+ bool sleep_ok)
{
u32 val[4];
- t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
- TP_MIB_USM_PKTS_A);
+ t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
st->frames = val[0];
st->drops = val[1];
st->octets = ((u64)val[2] << 32) | val[3];
@@ -8205,7 +8349,7 @@ struct flash_desc {
u32 size_mb;
};
-static int get_flash_params(struct adapter *adap)
+static int t4_get_flash_params(struct adapter *adap)
{
/* Table for non-Numonix supported flash parts. Numonix parts are left
* to the preexisting code. All flash parts have 64KB sectors.
@@ -8214,40 +8358,137 @@ static int get_flash_params(struct adapter *adap)
{ 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
};
+ unsigned int part, manufacturer;
+ unsigned int density, size;
+ u32 flashid = 0;
int ret;
- u32 info;
+
+ /* Issue a Read ID Command to the Flash part. We decode supported
+ * Flash parts and their sizes from this. There's a newer Query
+ * Command which can retrieve detailed geometry information but many
+ * Flash parts don't support it.
+ */
ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
if (!ret)
- ret = sf1_read(adap, 3, 0, 1, &info);
+ ret = sf1_read(adap, 3, 0, 1, &flashid);
t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
if (ret)
return ret;
- for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
- if (supported_flash[ret].vendor_and_model_id == info) {
- adap->params.sf_size = supported_flash[ret].size_mb;
+ /* Check to see if it's one of our non-standard supported Flash parts.
+ */
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ adap->params.sf_size = supported_flash[part].size_mb;
adap->params.sf_nsec =
adap->params.sf_size / SF_SEC_SIZE;
- return 0;
+ goto found;
}
- if ((info & 0xff) != 0x20) /* not a Numonix flash */
- return -EINVAL;
- info >>= 16; /* log2 of size */
- if (info >= 0x14 && info < 0x18)
- adap->params.sf_nsec = 1 << (info - 16);
- else if (info == 0x18)
- adap->params.sf_nsec = 64;
- else
+ /* Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /* This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14: /* 1MB */
+ size = 1 << 20;
+ break;
+ case 0x15: /* 2MB */
+ size = 1 << 21;
+ break;
+ case 0x16: /* 4MB */
+ size = 1 << 22;
+ break;
+ case 0x17: /* 8MB */
+ size = 1 << 23;
+ break;
+ case 0x18: /* 16MB */
+ size = 1 << 24;
+ break;
+ case 0x19: /* 32MB */
+ size = 1 << 25;
+ break;
+ case 0x20: /* 64MB */
+ size = 1 << 26;
+ break;
+ case 0x21: /* 128MB */
+ size = 1 << 27;
+ break;
+ case 0x22: /* 256MB */
+ size = 1 << 28;
+ break;
+
+ default:
+ dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
+ flashid, density);
+ return -EINVAL;
+ }
+ break;
+ }
+ case 0xc2: { /* Macronix */
+ /* This Density -> Size decoding table is taken from Macronix
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17: /* 8MB */
+ size = 1 << 23;
+ break;
+ case 0x18: /* 16MB */
+ size = 1 << 24;
+ break;
+ default:
+ dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
+ flashid, density);
+ return -EINVAL;
+ }
+ break;
+ }
+ case 0xef: { /* Winbond */
+ /* This Density -> Size decoding table is taken from Winbond
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17: /* 8MB */
+ size = 1 << 23;
+ break;
+ case 0x18: /* 16MB */
+ size = 1 << 24;
+ break;
+ default:
+ dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
+ flashid, density);
+ return -EINVAL;
+ }
+ break;
+ }
+ default:
+ dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
+ flashid);
return -EINVAL;
- adap->params.sf_size = 1 << info;
- adap->params.sf_fw_start =
- t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
+ }
+
+ /* Store decoded Flash size and fall through into vetting code. */
+ adap->params.sf_size = size;
+ adap->params.sf_nsec = size / SF_SEC_SIZE;
+found:
if (adap->params.sf_size < FLASH_MIN_SIZE)
- dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
- adap->params.sf_size, FLASH_MIN_SIZE);
+ dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, adap->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
@@ -8285,7 +8526,7 @@ int t4_prep_adapter(struct adapter *adapter)
get_pci_mode(adapter, &adapter->params.pci);
pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
- ret = get_flash_params(adapter);
+ ret = t4_get_flash_params(adapter);
if (ret < 0) {
dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
return ret;
@@ -8567,10 +8808,11 @@ int t4_init_sge_params(struct adapter *adapter)
/**
* t4_init_tp_params - initialize adap->params.tp
* @adap: the adapter
+ * @sleep_ok: if true we may sleep while awaiting command completion
*
* Initialize various fields of the adapter's TP Parameters structure.
*/
-int t4_init_tp_params(struct adapter *adap)
+int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
{
int chan;
u32 v;
@@ -8586,19 +8828,11 @@ int t4_init_tp_params(struct adapter *adap)
/* Cache the adapter's Compressed Filter Mode and global Incress
* Configuration.
*/
- if (t4_use_ldst(adap)) {
- t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP_A, 1);
- t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
- TP_INGRESS_CONFIG_A, 1);
- } else {
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.vlan_pri_map, 1,
- TP_VLAN_PRI_MAP_A);
- t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &adap->params.tp.ingress_config, 1,
- TP_INGRESS_CONFIG_A);
- }
+ t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP_A, sleep_ok);
+ t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG_A, sleep_ok);
+
/* For T6, cache the adapter's compressed error vector
* and passing outer header info for encapsulated packets.
*/
@@ -8611,11 +8845,21 @@ int t4_init_tp_params(struct adapter *adap)
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
- adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
- adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
+ adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
+ adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
PROTOCOL_F);
+ adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
+ ETHERTYPE_F);
+ adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
+ MACMATCH_F);
+ adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
+ MPSHITTYPE_F);
+ adap->params.tp.frag_shift = t4_filter_field_shift(adap,
+ FRAGMENTATION_F);
/* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
* represents the presence of an Outer VLAN instead of a VNIC ID.
@@ -8623,6 +8867,10 @@ int t4_init_tp_params(struct adapter *adap)
if ((adap->params.tp.ingress_config & VNIC_F) == 0)
adap->params.tp.vnic_shift = -1;
+ v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask = v;
+ v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask |= ((u64)v << 32);
return 0;
}
@@ -9342,6 +9590,125 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
}
+/**
+ * t4_read_pace_tbl - read the pace table
+ * @adap: the adapter
+ * @pace_vals: holds the returned values
+ *
+ * Returns the values of TP's pace table in microseconds.
+ */
+void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
+{
+ unsigned int i, v;
+
+ for (i = 0; i < NTX_SCHED; i++) {
+ t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
+ v = t4_read_reg(adap, TP_PACE_TABLE_A);
+ pace_vals[i] = dack_ticks_to_usec(adap, v);
+ }
+}
+
+/**
+ * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
+ * @adap: the adapter
+ * @sched: the scheduler index
+ * @kbps: the byte rate in Kbps
+ * @ipg: the interpacket delay in tenths of nanoseconds
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Return the current configuration of a HW Tx scheduler.
+ */
+void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
+ unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
+{
+ unsigned int v, addr, bpt, cpt;
+
+ if (kbps) {
+ addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
+ t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
+ if (sched & 1)
+ v >>= 16;
+ bpt = (v >> 8) & 0xff;
+ cpt = v & 0xff;
+ if (!cpt) {
+ *kbps = 0; /* scheduler disabled */
+ } else {
+ v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
+ *kbps = (v * bpt) / 125;
+ }
+ }
+ if (ipg) {
+ addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
+ t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
+ if (sched & 1)
+ v >>= 16;
+ v &= 0xffff;
+ *ipg = (10000 * v) / core_ticks_per_usec(adap);
+ }
+}
+
+/* t4_sge_ctxt_rd - read an SGE context through FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @cid: the context id
+ * @ctype: the context type
+ * @data: where to store the context data
+ *
+ * Issues a FW command through the given mailbox to read an SGE context.
+ */
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ struct fw_ldst_cmd c;
+ int ret;
+
+ if (ctype == CTXT_FLM)
+ ret = FW_LDST_ADDRSPC_SGE_FLMC;
+ else
+ ret = FW_LDST_ADDRSPC_SGE_CONMC;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(ret));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.idctxt.physid = cpu_to_be32(cid);
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
+ data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
+ data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
+ data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
+ data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
+ data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
+ }
+ return ret;
+}
+
+/**
+ * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
+ * @adap: the adapter
+ * @cid: the context id
+ * @ctype: the context type
+ * @data: where to store the context data
+ *
+ * Reads an SGE context directly, bypassing FW. This is only for
+ * debugging when FW is unavailable.
+ */
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
+ enum ctxt_type ctype, u32 *data)
+{
+ int i, ret;
+
+ t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
+ ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
+ if (!ret)
+ for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
+ *data++ = t4_read_reg(adap, i);
+ return ret;
+}
+
int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
int rateunit, int ratemode, int channel, int class,
int minrate, int maxrate, int weight, int pktsize)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 7f59ca458431..a964ed184356 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,6 +47,7 @@ enum {
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
+ NTX_SCHED = 8, /* # of HW Tx scheduling queues */
PM_NSTATS = 5, /* # of PM stats */
T6_PM_NSTATS = 7, /* # of PM stats in T6 */
MBOX_LEN = 64, /* mailbox size in bytes */
@@ -67,6 +68,12 @@ enum {
ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */
};
+/* SGE context types */
+enum ctxt_type {
+ CTXT_FLM = 2,
+ CTXT_CNM,
+};
+
enum {
SF_PAGE_SIZE = 256, /* serial flash page size */
SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
@@ -78,6 +85,7 @@ enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
+ SGE_CTXT_SIZE = 24, /* size of SGE context */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_MAX_IQ_SIZE = 65520,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index b0ff78da8aa2..7e12f241145b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -50,6 +50,7 @@ enum {
CPL_RX_DATA_ACK = 0xD,
CPL_TX_PKT = 0xE,
CPL_L2T_WRITE_REQ = 0x12,
+ CPL_SMT_WRITE_REQ = 0x14,
CPL_TID_RELEASE = 0x1A,
CPL_TX_DATA_ISO = 0x1F,
@@ -60,6 +61,7 @@ enum {
CPL_PEER_CLOSE = 0x26,
CPL_ABORT_REQ_RSS = 0x2B,
CPL_ABORT_RPL_RSS = 0x2D,
+ CPL_SMT_WRITE_RPL = 0x2E,
CPL_RX_PHYS_ADDR = 0x30,
CPL_CLOSE_CON_RPL = 0x32,
@@ -284,6 +286,7 @@ struct work_request_hdr {
#define RX_CHANNEL_S 26
#define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S)
+#define RX_CHANNEL_F RX_CHANNEL_V(1U)
#define WND_SCALE_EN_S 28
#define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S)
@@ -313,6 +316,10 @@ struct cpl_pass_open_req {
#define DELACK_V(x) ((x) << DELACK_S)
#define DELACK_F DELACK_V(1U)
+#define NON_OFFLOAD_S 7
+#define NON_OFFLOAD_V(x) ((x) << NON_OFFLOAD_S)
+#define NON_OFFLOAD_F NON_OFFLOAD_V(1U)
+
#define DSCP_S 22
#define DSCP_M 0x3F
#define DSCP_V(x) ((x) << DSCP_S)
@@ -681,8 +688,8 @@ struct cpl_set_tcb_field {
};
/* cpl_set_tcb_field.word_cookie fields */
-#define TCB_WORD_S 0
-#define TCB_WORD(x) ((x) << TCB_WORD_S)
+#define TCB_WORD_S 0
+#define TCB_WORD_V(x) ((x) << TCB_WORD_S)
#define TCB_COOKIE_S 5
#define TCB_COOKIE_M 0x7
@@ -1266,6 +1273,44 @@ struct cpl_l2t_write_rpl {
u8 rsvd[3];
};
+struct cpl_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+ __be16 pfvf1;
+ u8 src_mac1[6];
+ __be16 pfvf0;
+ u8 src_mac0[6];
+};
+
+struct cpl_t6_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+ __be64 tag;
+ __be16 pfvf0;
+ u8 src_mac0[6];
+ __be32 local_ip;
+ __be32 rsvd;
+};
+
+struct cpl_smt_write_rpl {
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+/* cpl_smt_{read,write}_req.params fields */
+#define SMTW_OVLAN_IDX_S 16
+#define SMTW_OVLAN_IDX_V(x) ((x) << SMTW_OVLAN_IDX_S)
+
+#define SMTW_IDX_S 20
+#define SMTW_IDX_V(x) ((x) << SMTW_IDX_S)
+
+#define SMTW_NORPL_S 31
+#define SMTW_NORPL_V(x) ((x) << SMTW_NORPL_S)
+#define SMTW_NORPL_F SMTW_NORPL_V(1U)
+
struct cpl_rdma_terminate {
union opcode_tid ot;
__be16 rsvd;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index aa28299aef5f..60cf9e02de5d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -176,6 +176,13 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */
CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */
CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a5), /* Custom T522-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50a6), /* Custom T522-BT-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50a7), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a8), /* Custom T580-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
/* T6 adapters:
*/
@@ -197,6 +204,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR SFP28 */
CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR QSFP28 */
CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index dac90837842b..a7cfece72828 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -65,6 +65,9 @@
#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_DATA_INSTANCES 17
+#define NUM_LE_DB_DBGI_RSP_DATA_INSTANCES 17
+
#define SGE_PF_KDOORBELL_A 0x0
#define QID_S 15
@@ -150,6 +153,23 @@
#define T6_DBVFIFO_SIZE_M 0x1fffU
#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+#define SGE_CTXT_CMD_A 0x11fc
+
+#define BUSY_S 31
+#define BUSY_V(x) ((x) << BUSY_S)
+#define BUSY_F BUSY_V(1U)
+
+#define CTXTTYPE_S 24
+#define CTXTTYPE_M 0x3U
+#define CTXTTYPE_V(x) ((x) << CTXTTYPE_S)
+
+#define CTXTQID_S 0
+#define CTXTQID_M 0x1ffffU
+#define CTXTQID_V(x) ((x) << CTXTQID_S)
+
+#define SGE_CTXT_DATA0_A 0x1200
+#define SGE_CTXT_DATA5_A 0x1214
+
#define GLOBALENABLE_S 0
#define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
#define GLOBALENABLE_F GLOBALENABLE_V(1U)
@@ -319,6 +339,16 @@
#define SGE_IMSG_CTXT_BADDR_A 0x1088
#define SGE_FLM_CACHE_BADDR_A 0x108c
+#define SGE_FLM_CFG_A 0x1090
+
+#define NOHDR_S 18
+#define NOHDR_V(x) ((x) << NOHDR_S)
+#define NOHDR_F NOHDR_V(1U)
+
+#define HDRSTARTFLQ_S 11
+#define HDRSTARTFLQ_M 0x7U
+#define HDRSTARTFLQ_G(x) (((x) >> HDRSTARTFLQ_S) & HDRSTARTFLQ_M)
+
#define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
#define THRESHOLD_0_S 24
@@ -1415,6 +1445,7 @@
#define ROWINDEX_V(x) ((x) << ROWINDEX_S)
#define TP_CCTRL_TABLE_A 0x7ddc
+#define TP_PACE_TABLE_A 0x7dd8
#define TP_MTU_TABLE_A 0x7de4
#define MTUINDEX_S 24
@@ -1447,6 +1478,17 @@
#define LKPTBLQUEUE0_M 0x3ffU
#define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M)
+#define TP_TM_PIO_ADDR_A 0x7e18
+#define TP_TM_PIO_DATA_A 0x7e1c
+#define TP_MOD_CONFIG_A 0x7e24
+
+#define TIMERMODE_S 8
+#define TIMERMODE_M 0xffU
+#define TIMERMODE_G(x) (((x) >> TIMERMODE_S) & TIMERMODE_M)
+
+#define TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A 0x3
+#define TP_TX_MOD_Q1_Q0_RATE_LIMIT_A 0x8
+
#define TP_PIO_ADDR_A 0x7e40
#define TP_PIO_DATA_A 0x7e44
#define TP_MIB_INDEX_A 0x7e50
@@ -1627,6 +1669,10 @@
#define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S)
#define IESPI_PAR_ERROR_F IESPI_PAR_ERROR_V(1U)
+#define ULP_TX_LA_RDPTR_0_A 0x8ec0
+#define ULP_TX_LA_RDDATA_0_A 0x8ec4
+#define ULP_TX_LA_WRPTR_0_A 0x8ec8
+
#define PMRX_E_PCMD_PAR_ERROR_S 0
#define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S)
#define PMRX_E_PCMD_PAR_ERROR_F PMRX_E_PCMD_PAR_ERROR_V(1U)
@@ -2257,6 +2303,35 @@
#define CHNENABLE_V(x) ((x) << CHNENABLE_S)
#define CHNENABLE_F CHNENABLE_V(1U)
+#define LE_DB_DBGI_CONFIG_A 0x19cf0
+
+#define DBGICMDBUSY_S 3
+#define DBGICMDBUSY_V(x) ((x) << DBGICMDBUSY_S)
+#define DBGICMDBUSY_F DBGICMDBUSY_V(1U)
+
+#define DBGICMDSTRT_S 2
+#define DBGICMDSTRT_V(x) ((x) << DBGICMDSTRT_S)
+#define DBGICMDSTRT_F DBGICMDSTRT_V(1U)
+
+#define DBGICMDMODE_S 0
+#define DBGICMDMODE_M 0x3U
+#define DBGICMDMODE_V(x) ((x) << DBGICMDMODE_S)
+
+#define LE_DB_DBGI_REQ_TCAM_CMD_A 0x19cf4
+
+#define DBGICMD_S 20
+#define DBGICMD_M 0xfU
+#define DBGICMD_V(x) ((x) << DBGICMD_S)
+
+#define DBGITID_S 0
+#define DBGITID_M 0xfffffU
+#define DBGITID_V(x) ((x) << DBGITID_S)
+
+#define LE_DB_DBGI_REQ_DATA_A 0x19d00
+#define LE_DB_DBGI_RSP_STATUS_A 0x19d94
+
+#define LE_DB_DBGI_RSP_DATA_A 0x19da0
+
#define PRTENABLE_S 29
#define PRTENABLE_V(x) ((x) << PRTENABLE_S)
#define PRTENABLE_F PRTENABLE_V(1U)
@@ -2433,6 +2508,18 @@
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
+#define CTLREQID_S 30
+#define CTLREQID_V(x) ((x) << CTLREQID_S)
+
+#define MPS_VF_RPLCT_MAP0_A 0x1111c
+#define MPS_VF_RPLCT_MAP1_A 0x11120
+#define MPS_VF_RPLCT_MAP2_A 0x11124
+#define MPS_VF_RPLCT_MAP3_A 0x11128
+#define MPS_VF_RPLCT_MAP4_A 0x11300
+#define MPS_VF_RPLCT_MAP5_A 0x11304
+#define MPS_VF_RPLCT_MAP6_A 0x11308
+#define MPS_VF_RPLCT_MAP7_A 0x1130c
+
#define VIDL_S 16
#define VIDL_M 0xffffU
#define VIDL_G(x) (((x) >> VIDL_S) & VIDL_M)
@@ -2457,6 +2544,10 @@
#define DATAVIDH1_M 0x7fU
#define DATAVIDH1_G(x) (((x) >> DATAVIDH1_S) & DATAVIDH1_M)
+#define MPS_CLS_TCAM_RDATA0_REQ_ID1_A 0xf020
+#define MPS_CLS_TCAM_RDATA1_REQ_ID1_A 0xf024
+#define MPS_CLS_TCAM_RDATA2_REQ_ID1_A 0xf028
+
#define USED_S 16
#define USED_M 0x7ffU
#define USED_G(x) (((x) >> USED_S) & USED_M)
@@ -2850,10 +2941,20 @@
#define T6_LIPMISS_F T6_LIPMISS_V(1U)
#define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_ROUTING_TABLE_INDEX_A 0x19c10
+#define LE_DB_ACTIVE_TABLE_START_INDEX_A 0x19c10
+#define LE_DB_FILTER_TABLE_INDEX_A 0x19c14
#define LE_DB_SERVER_INDEX_A 0x19c18
#define LE_DB_SRVR_START_INDEX_A 0x19c18
+#define LE_DB_CLIP_TABLE_INDEX_A 0x19c1c
#define LE_DB_ACT_CNT_IPV4_A 0x19c20
#define LE_DB_ACT_CNT_IPV6_A 0x19c24
+#define LE_DB_HASH_CONFIG_A 0x19c28
+
+#define HASHTIDSIZE_S 16
+#define HASHTIDSIZE_M 0x3fU
+#define HASHTIDSIZE_G(x) (((x) >> HASHTIDSIZE_S) & HASHTIDSIZE_M)
+
#define LE_DB_HASH_TID_BASE_A 0x19c30
#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
#define LE_DB_INT_CAUSE_A 0x19c3c
@@ -2900,6 +3001,23 @@
#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
#define SSRAMINTPERR_F SSRAMINTPERR_V(1U)
+#define LE_DB_RSP_CODE_0_A 0x19c74
+
+#define TCAM_ACTV_HIT_S 0
+#define TCAM_ACTV_HIT_M 0x1fU
+#define TCAM_ACTV_HIT_V(x) ((x) << TCAM_ACTV_HIT_S)
+#define TCAM_ACTV_HIT_G(x) (((x) >> TCAM_ACTV_HIT_S) & TCAM_ACTV_HIT_M)
+
+#define LE_DB_RSP_CODE_1_A 0x19c78
+
+#define HASH_ACTV_HIT_S 25
+#define HASH_ACTV_HIT_M 0x1fU
+#define HASH_ACTV_HIT_V(x) ((x) << HASH_ACTV_HIT_S)
+#define HASH_ACTV_HIT_G(x) (((x) >> HASH_ACTV_HIT_S) & HASH_ACTV_HIT_M)
+
+#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac
+#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0
+
#define NCSI_INT_CAUSE_A 0x1a0d8
#define CIM_DM_PRTY_ERR_S 8
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
new file mode 100644
index 000000000000..3297ce025e8b
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
@@ -0,0 +1,69 @@
+/*
+ * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_TCB_H
+#define __T4_TCB_H
+
+#define TCB_SMAC_SEL_W 0
+#define TCB_SMAC_SEL_S 24
+#define TCB_SMAC_SEL_M 0xffULL
+#define TCB_SMAC_SEL_V(x) ((x) << TCB_SMAC_SEL_S)
+
+#define TCB_T_FLAGS_W 1
+
+#define TF_CCTRL_ECE_S 60
+#define TF_CCTRL_CWR_S 61
+#define TF_CCTRL_RFR_S 62
+
+#define TCB_RSS_INFO_W 3
+#define TCB_RSS_INFO_S 0
+#define TCB_RSS_INFO_M 0x3ffULL
+#define TCB_RSS_INFO_V(x) ((x) << TCB_RSS_INFO_S)
+
+#define TCB_TIMESTAMP_W 5
+#define TCB_TIMESTAMP_S 0
+#define TCB_TIMESTAMP_M 0xffffffffULL
+#define TCB_TIMESTAMP_V(x) ((x) << TCB_TIMESTAMP_S)
+
+#define TCB_RTT_TS_RECENT_AGE_W 6
+#define TCB_RTT_TS_RECENT_AGE_S 0
+#define TCB_RTT_TS_RECENT_AGE_M 0xffffffffULL
+#define TCB_RTT_TS_RECENT_AGE_V(x) ((x) << TCB_RTT_TS_RECENT_AGE_S)
+
+#define TCB_SND_UNA_RAW_W 10
+#define TCB_RX_FRAG2_PTR_RAW_W 27
+#define TCB_RX_FRAG3_LEN_RAW_W 29
+#define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30
+#define TCB_PDU_HDR_LEN_W 31
+#endif /* __T4_TCB_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ca2756dcefc5..57eb4ad3485d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -105,7 +105,8 @@ enum fw_wr_opcodes {
FW_ISCSI_TX_DATA_WR = 0x45,
FW_PTP_TX_PKT_WR = 0x46,
FW_CRYPTO_LOOKASIDE_WR = 0X6d,
- FW_LASTC2E_WR = 0x70
+ FW_LASTC2E_WR = 0x70,
+ FW_FILTER2_WR = 0x77
};
struct fw_wr_hdr {
@@ -201,6 +202,51 @@ struct fw_filter_wr {
__u8 sma[6];
};
+struct fw_filter2_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+ __be16 r8;
+ __u8 filter_type_swapmac;
+ __u8 natmode_to_ulp_type;
+ __be16 newlport;
+ __be16 newfport;
+ __u8 newlip[16];
+ __u8 newfip[16];
+ __be32 natseqcheck;
+ __be32 r9;
+ __be64 r10;
+ __be64 r11;
+ __be64 r12;
+ __be64 r13;
+};
+
#define FW_FILTER_WR_TID_S 12
#define FW_FILTER_WR_TID_M 0xfffff
#define FW_FILTER_WR_TID_V(x) ((x) << FW_FILTER_WR_TID_S)
@@ -385,6 +431,32 @@ struct fw_filter_wr {
#define FW_FILTER_WR_RX_RPL_IQ_G(x) \
(((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M)
+#define FW_FILTER2_WR_FILTER_TYPE_S 1
+#define FW_FILTER2_WR_FILTER_TYPE_M 0x1
+#define FW_FILTER2_WR_FILTER_TYPE_V(x) ((x) << FW_FILTER2_WR_FILTER_TYPE_S)
+#define FW_FILTER2_WR_FILTER_TYPE_G(x) \
+ (((x) >> FW_FILTER2_WR_FILTER_TYPE_S) & FW_FILTER2_WR_FILTER_TYPE_M)
+#define FW_FILTER2_WR_FILTER_TYPE_F FW_FILTER2_WR_FILTER_TYPE_V(1U)
+
+#define FW_FILTER2_WR_NATMODE_S 5
+#define FW_FILTER2_WR_NATMODE_M 0x7
+#define FW_FILTER2_WR_NATMODE_V(x) ((x) << FW_FILTER2_WR_NATMODE_S)
+#define FW_FILTER2_WR_NATMODE_G(x) \
+ (((x) >> FW_FILTER2_WR_NATMODE_S) & FW_FILTER2_WR_NATMODE_M)
+
+#define FW_FILTER2_WR_NATFLAGCHECK_S 4
+#define FW_FILTER2_WR_NATFLAGCHECK_M 0x1
+#define FW_FILTER2_WR_NATFLAGCHECK_V(x) ((x) << FW_FILTER2_WR_NATFLAGCHECK_S)
+#define FW_FILTER2_WR_NATFLAGCHECK_G(x) \
+ (((x) >> FW_FILTER2_WR_NATFLAGCHECK_S) & FW_FILTER2_WR_NATFLAGCHECK_M)
+#define FW_FILTER2_WR_NATFLAGCHECK_F FW_FILTER2_WR_NATFLAGCHECK_V(1U)
+
+#define FW_FILTER2_WR_ULP_TYPE_S 0
+#define FW_FILTER2_WR_ULP_TYPE_M 0xf
+#define FW_FILTER2_WR_ULP_TYPE_V(x) ((x) << FW_FILTER2_WR_ULP_TYPE_S)
+#define FW_FILTER2_WR_ULP_TYPE_G(x) \
+ (((x) >> FW_FILTER2_WR_ULP_TYPE_S) & FW_FILTER2_WR_ULP_TYPE_M)
+
#define FW_FILTER_WR_MACI_S 23
#define FW_FILTER_WR_MACI_M 0x1ff
#define FW_FILTER_WR_MACI_V(x) ((x) << FW_FILTER_WR_MACI_S)
@@ -1020,6 +1092,7 @@ enum fw_caps_config_switch {
enum fw_caps_config_nic {
FW_CAPS_CONFIG_NIC = 0x00000001,
FW_CAPS_CONFIG_NIC_VM = 0x00000002,
+ FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
};
enum fw_caps_config_ofld {
@@ -1127,6 +1200,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_SCFGREV = 0x1A,
FW_PARAMS_PARAM_DEV_VPDREV = 0x1B,
FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C,
+ FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D,
FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E,
};
@@ -1171,9 +1245,12 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E,
+ FW_PARAMS_PARAM_PFVF_ETHOFLD_START = 0x2F,
FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30,
FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
- FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x32,
+ FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32,
+ FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33,
+ FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index f2d623a7aee0..123e2c1b65f5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
#define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x2D
+#define T4FW_VERSION_MICRO 0x3F
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
#define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x2D
+#define T5FW_VERSION_MICRO 0x3F
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
#define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_MICRO 0x3F
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 8996ebbd222e..b48361cfdc78 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1401,6 +1401,63 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev,
return 0;
}
+/* Translate the Firmware FEC value into the ethtool value. */
+static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
+{
+ unsigned int eth_fec = 0;
+
+ if (fw_fec & FW_PORT_CAP32_FEC_RS)
+ eth_fec |= ETHTOOL_FEC_RS;
+ if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
+ eth_fec |= ETHTOOL_FEC_BASER;
+
+ /* if nothing is set, then FEC is off */
+ if (!eth_fec)
+ eth_fec = ETHTOOL_FEC_OFF;
+
+ return eth_fec;
+}
+
+/* Translate Common Code FEC value into ethtool value. */
+static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
+{
+ unsigned int eth_fec = 0;
+
+ if (cc_fec & FEC_AUTO)
+ eth_fec |= ETHTOOL_FEC_AUTO;
+ if (cc_fec & FEC_RS)
+ eth_fec |= ETHTOOL_FEC_RS;
+ if (cc_fec & FEC_BASER_RS)
+ eth_fec |= ETHTOOL_FEC_BASER;
+
+ /* if nothing is set, then FEC is off */
+ if (!eth_fec)
+ eth_fec = ETHTOOL_FEC_OFF;
+
+ return eth_fec;
+}
+
+static int cxgb4vf_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fec)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ const struct link_config *lc = &pi->link_cfg;
+
+ /* Translate the Firmware FEC Support into the ethtool value. We
+ * always support IEEE 802.3 "automatic" selection of Link FEC type if
+ * any FEC is supported.
+ */
+ fec->fec = fwcap_to_eth_fec(lc->pcaps);
+ if (fec->fec != ETHTOOL_FEC_OFF)
+ fec->fec |= ETHTOOL_FEC_AUTO;
+
+ /* Translate the current internal FEC parameters into the
+ * ethtool values.
+ */
+ fec->active_fec = cc_to_eth_fec(lc->fec);
+ return 0;
+}
+
/*
* Return our driver information.
*/
@@ -1774,6 +1831,7 @@ static void cxgb4vf_get_wol(struct net_device *dev,
static const struct ethtool_ops cxgb4vf_ethtool_ops = {
.get_link_ksettings = cxgb4vf_get_link_ksettings,
+ .get_fecparam = cxgb4vf_get_fecparam,
.get_drvinfo = cxgb4vf_get_drvinfo,
.get_msglevel = cxgb4vf_get_msglevel,
.set_msglevel = cxgb4vf_set_msglevel,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 05498e7f2840..14d7e673c656 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2058,9 +2058,9 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
* when out of memory a queue can become empty. We schedule NAPI to do
* the actual refill.
*/
-static void sge_rx_timer_cb(unsigned long data)
+static void sge_rx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
struct sge *s = &adapter->sge;
unsigned int i;
@@ -2117,9 +2117,9 @@ static void sge_rx_timer_cb(unsigned long data)
* when no new packets are being submitted. This is essential for pktgen,
* at least.
*/
-static void sge_tx_timer_cb(unsigned long data)
+static void sge_tx_timer_cb(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
struct sge *s = &adapter->sge;
unsigned int i, budget;
@@ -2676,8 +2676,8 @@ int t4vf_sge_init(struct adapter *adapter)
/*
* Set up tasklet timers.
*/
- setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
- setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
+ timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
+ timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
/*
* Initialize Forwarded Interrupt Queue lock.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index a8d94963b4d0..67aec59a14e6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1812,7 +1812,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
*
* Returns a string representation of the Link Down Reason Code.
*/
-const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
+static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
{
static const char * const reason[] = {
"Link Down",
@@ -1838,8 +1838,8 @@ const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
*
* Processes a GET_PORT_INFO FW reply message.
*/
-void t4vf_handle_get_port_info(struct port_info *pi,
- const struct fw_port_cmd *cmd)
+static void t4vf_handle_get_port_info(struct port_info *pi,
+ const struct fw_port_cmd *cmd)
{
int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
struct adapter *adapter = pi->adapter;
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index ba032ac9ae86..6a9527004cb1 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.42"
+#define DRV_VERSION "2.3.0.45"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 3c677ed3c29e..973c1fb70d09 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_link.h>
@@ -122,9 +123,9 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
}
#ifdef CONFIG_RFS_ACCEL
-void enic_flow_may_expire(unsigned long data)
+void enic_flow_may_expire(struct timer_list *t)
{
- struct enic *enic = (struct enic *)data;
+ struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
bool res;
int j;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
index 6aa9f89d073b..8c4ce50da6e1 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.h
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ENIC_CLSF_H_
#define _ENIC_CLSF_H_
@@ -15,13 +16,11 @@ struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id);
#ifdef CONFIG_RFS_ACCEL
int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-void enic_flow_may_expire(unsigned long data);
+void enic_flow_may_expire(struct timer_list *t);
static inline void enic_rfs_timer_start(struct enic *enic)
{
- init_timer(&enic->rfs_h.rfs_may_expire);
- enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire;
- enic->rfs_h.rfs_may_expire.data = (unsigned long)enic;
+ timer_setup(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, 0);
mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index fd3980cc1e34..462d0ce51240 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -176,6 +176,81 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
}
}
+static void enic_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_enet_config *c = &enic->config;
+
+ ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
+ ring->rx_pending = c->rq_desc_count;
+ ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
+ ring->tx_pending = c->wq_desc_count;
+}
+
+static int enic_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_enet_config *c = &enic->config;
+ int running = netif_running(netdev);
+ unsigned int rx_pending;
+ unsigned int tx_pending;
+ int err = 0;
+
+ if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
+ netdev_info(netdev,
+ "modifying mini ring params is not supported");
+ return -EINVAL;
+ }
+ if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
+ netdev_info(netdev,
+ "modifying jumbo ring params is not supported");
+ return -EINVAL;
+ }
+ rx_pending = c->rq_desc_count;
+ tx_pending = c->wq_desc_count;
+ if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
+ ring->rx_pending < ENIC_MIN_RQ_DESCS) {
+ netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
+ ring->rx_pending, ENIC_MIN_RQ_DESCS,
+ ENIC_MAX_RQ_DESCS);
+ return -EINVAL;
+ }
+ if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
+ ring->tx_pending < ENIC_MIN_WQ_DESCS) {
+ netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
+ ring->tx_pending, ENIC_MIN_WQ_DESCS,
+ ENIC_MAX_WQ_DESCS);
+ return -EINVAL;
+ }
+ if (running)
+ dev_close(netdev);
+ c->rq_desc_count =
+ ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
+ c->wq_desc_count =
+ ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
+ enic_free_vnic_resources(enic);
+ err = enic_alloc_vnic_resources(enic);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to alloc vNIC resources, aborting\n");
+ enic_free_vnic_resources(enic);
+ goto err_out;
+ }
+ enic_init_vnic_resources(enic);
+ if (running) {
+ err = dev_open(netdev);
+ if (err)
+ goto err_out;
+ }
+ return 0;
+err_out:
+ c->rq_desc_count = rx_pending;
+ c->wq_desc_count = tx_pending;
+ return err;
+}
+
static int enic_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
@@ -509,6 +584,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
.set_msglevel = enic_set_msglevel,
.get_link = ethtool_op_get_link,
.get_strings = enic_get_strings,
+ .get_ringparam = enic_get_ringparam,
+ .set_ringparam = enic_set_ringparam,
.get_sset_count = enic_get_sset_count,
.get_ethtool_stats = enic_get_ethtool_stats,
.get_coalesce = enic_get_coalesce,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index d24ee1ad3be1..e130fb757e7b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1676,9 +1676,9 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
return work_done;
}
-static void enic_notify_timer(unsigned long data)
+static void enic_notify_timer(struct timer_list *t)
{
- struct enic *enic = (struct enic *)data;
+ struct enic *enic = from_timer(enic, t, notify_timer);
enic_notify_check(enic);
@@ -2846,9 +2846,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup notification timer, HW reset task, and wq locks
*/
- init_timer(&enic->notify_timer);
- enic->notify_timer.function = enic_notify_timer;
- enic->notify_timer.data = (unsigned long)enic;
+ timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 36bc2c71fba9..f8aa326d1d58 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -139,20 +139,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- u32 fetch_index = 0;
-
- /* Use current fetch_index as the ring starting point */
- fetch_index = ioread32(&rq->ctrl->fetch_index);
-
- if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
- /* Hardware surprise removal: reset fetch_index */
- fetch_index = 0;
- }
-
- vnic_rq_init_start(rq, cq_index,
- fetch_index, fetch_index,
- error_interrupt_enable,
- error_interrupt_offset);
+ vnic_rq_init_start(rq, cq_index, 0, 0, error_interrupt_enable,
+ error_interrupt_offset);
}
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h
index 9ce058adabab..581b35ad44ef 100644
--- a/drivers/net/ethernet/davicom/dm9000.h
+++ b/drivers/net/ethernet/davicom/dm9000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* dm9000 Ethernet
*/
diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile
index 5e8be38b45bb..8aab37564d5d 100644
--- a/drivers/net/ethernet/dec/tulip/Makefile
+++ b/drivers/net/ethernet/dec/tulip/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux "Tulip" family network device drivers.
#
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index c87b8cc42963..13430f75496c 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -333,8 +333,8 @@ static void de_set_rx_mode (struct net_device *dev);
static void de_tx (struct de_private *de);
static void de_clean_rings (struct de_private *de);
static void de_media_interrupt (struct de_private *de, u32 status);
-static void de21040_media_timer (unsigned long data);
-static void de21041_media_timer (unsigned long data);
+static void de21040_media_timer (struct timer_list *t);
+static void de21041_media_timer (struct timer_list *t);
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
@@ -959,9 +959,9 @@ static void de_next_media (struct de_private *de, const u32 *media,
}
}
-static void de21040_media_timer (unsigned long data)
+static void de21040_media_timer (struct timer_list *t)
{
- struct de_private *de = (struct de_private *) data;
+ struct de_private *de = from_timer(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1040,9 +1040,9 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
return 1;
}
-static void de21041_media_timer (unsigned long data)
+static void de21041_media_timer (struct timer_list *t)
{
- struct de_private *de = (struct de_private *) data;
+ struct de_private *de = from_timer(de, t, media_timer);
struct net_device *dev = de->dev;
u32 status = dr32(SIAStatus);
unsigned int carrier;
@@ -1999,12 +1999,9 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
de->board_idx = board_idx;
spin_lock_init (&de->lock);
- init_timer(&de->media_timer);
- if (de->de21040)
- de->media_timer.function = de21040_media_timer;
- else
- de->media_timer.function = de21041_media_timer;
- de->media_timer.data = (unsigned long) de;
+ timer_setup(&de->media_timer,
+ de->de21040 ? de21040_media_timer : de21041_media_timer,
+ 0);
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 0affee9c8aa2..a31b4df3e7ff 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -912,7 +912,7 @@ static int de4x5_init(struct net_device *dev);
static int de4x5_sw_reset(struct net_device *dev);
static int de4x5_rx(struct net_device *dev);
static int de4x5_tx(struct net_device *dev);
-static void de4x5_ast(struct net_device *dev);
+static void de4x5_ast(struct timer_list *t);
static int de4x5_txur(struct net_device *dev);
static int de4x5_rx_ovfc(struct net_device *dev);
@@ -1147,9 +1147,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
lp->timeout = -1;
lp->gendev = gendev;
spin_lock_init(&lp->lock);
- init_timer(&lp->timer);
- lp->timer.function = (void (*)(unsigned long))de4x5_ast;
- lp->timer.data = (unsigned long)dev;
+ timer_setup(&lp->timer, de4x5_ast, 0);
de4x5_parse_params(dev);
/*
@@ -1742,9 +1740,10 @@ de4x5_tx(struct net_device *dev)
}
static void
-de4x5_ast(struct net_device *dev)
+de4x5_ast(struct timer_list *t)
{
- struct de4x5_private *lp = netdev_priv(dev);
+ struct de4x5_private *lp = from_timer(lp, t, timer);
+ struct net_device *dev = dev_get_drvdata(lp->gendev);
int next_tick = DE4X5_AUTOSENSE_MS;
int dt;
@@ -2370,7 +2369,7 @@ autoconf_media(struct net_device *dev)
lp->media = INIT;
lp->tcount = 0;
- de4x5_ast(dev);
+ de4x5_ast(&lp->timer);
return lp->media;
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 07e10a45beaa..17ef7a28873d 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -331,7 +331,7 @@ static void dmfe_phy_write_1bit(void __iomem *, u32);
static u16 dmfe_phy_read_1bit(void __iomem *);
static u8 dmfe_sense_speed(struct dmfe_board_info *);
static void dmfe_process_mode(struct dmfe_board_info *);
-static void dmfe_timer(unsigned long);
+static void dmfe_timer(struct timer_list *);
static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
@@ -596,10 +596,8 @@ static int dmfe_open(struct net_device *dev)
netif_wake_queue(dev);
/* set and active a timer process */
- init_timer(&db->timer);
+ timer_setup(&db->timer, dmfe_timer, 0);
db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
- db->timer.data = (unsigned long)dev;
- db->timer.function = dmfe_timer;
add_timer(&db->timer);
return 0;
@@ -1130,10 +1128,10 @@ static const struct ethtool_ops netdev_ethtool_ops = {
* Dynamic media sense, allocate Rx buffer...
*/
-static void dmfe_timer(unsigned long data)
+static void dmfe_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct dmfe_board_info *db = netdev_priv(dev);
+ struct dmfe_board_info *db = from_timer(db, t, timer);
+ struct net_device *dev = pci_get_drvdata(db->pdev);
void __iomem *ioaddr = db->ioaddr;
u32 tmp_cr8;
unsigned char tmp_cr12;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 8df80880ecaa..c1ca0765d56d 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -102,10 +102,10 @@ int tulip_refill_rx(struct net_device *dev)
#ifdef CONFIG_TULIP_NAPI
-void oom_timer(unsigned long data)
+void oom_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp = from_timer(tp, t, oom_timer);
+
napi_schedule(&tp->napi);
}
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 7bcccf5cac7a..3fb39e32e1b4 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -84,10 +84,10 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
}
}
-void pnic_timer(unsigned long data)
+void pnic_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp = from_timer(tp, t, timer);
+ struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
index 5895fc43f6e0..412adaa7fdf8 100644
--- a/drivers/net/ethernet/dec/tulip/pnic2.c
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -76,10 +76,10 @@
#include <linux/delay.h>
-void pnic2_timer(unsigned long data)
+void pnic2_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp = from_timer(tp, t, timer);
+ struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 523d9dde50a2..642e9dfc5451 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -137,10 +137,10 @@ void tulip_media_task(struct work_struct *work)
}
-void mxic_timer(unsigned long data)
+void mxic_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp = from_timer(tp, t, timer);
+ struct net_device *dev = tp->dev;
void __iomem *ioaddr = tp->base_addr;
int next_tick = 60*HZ;
@@ -154,10 +154,10 @@ void mxic_timer(unsigned long data)
}
-void comet_timer(unsigned long data)
+void comet_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp = from_timer(tp, t, timer);
+ struct net_device *dev = tp->dev;
int next_tick = 2*HZ;
if (tulip_debug > 1)
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index 06660dbc44b7..b458140aeaef 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -43,7 +43,7 @@ struct tulip_chip_table {
int io_size;
int valid_intrs; /* CSR7 interrupt enable settings */
int flags;
- void (*media_timer) (unsigned long);
+ void (*media_timer) (struct timer_list *);
work_func_t media_task;
};
@@ -476,7 +476,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5);
/* PNIC2.c */
void pnic2_lnk_change(struct net_device *dev, int csr5);
-void pnic2_timer(unsigned long data);
+void pnic2_timer(struct timer_list *t);
void pnic2_start_nway(struct net_device *dev);
void pnic2_lnk_change(struct net_device *dev, int csr5);
@@ -504,19 +504,19 @@ void tulip_find_mii (struct net_device *dev, int board_idx);
/* pnic.c */
void pnic_do_nway(struct net_device *dev);
void pnic_lnk_change(struct net_device *dev, int csr5);
-void pnic_timer(unsigned long data);
+void pnic_timer(struct timer_list *t);
/* timer.c */
void tulip_media_task(struct work_struct *work);
-void mxic_timer(unsigned long data);
-void comet_timer(unsigned long data);
+void mxic_timer(struct timer_list *t);
+void comet_timer(struct timer_list *t);
/* tulip_core.c */
extern int tulip_debug;
extern const char * const medianame[];
extern const char tulip_media_cap[];
extern const struct tulip_chip_table tulip_tbl[];
-void oom_timer(unsigned long data);
+void oom_timer(struct timer_list *t);
extern u8 t21040_csr13[];
static inline void tulip_start_rxtx(struct tulip_private *tp)
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 851b6d1f5a42..00d02a0967d0 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -123,10 +123,10 @@ int tulip_debug = TULIP_DEBUG;
int tulip_debug = 1;
#endif
-static void tulip_timer(unsigned long data)
+static void tulip_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct tulip_private *tp = netdev_priv(dev);
+ struct tulip_private *tp = from_timer(tp, t, timer);
+ struct net_device *dev = tp->dev;
if (netif_running(dev))
schedule_work(&tp->media_work);
@@ -505,7 +505,7 @@ media_picked:
tp->timer.expires = RUN_AT(next_tick);
add_timer(&tp->timer);
#ifdef CONFIG_TULIP_NAPI
- setup_timer(&tp->oom_timer, oom_timer, (unsigned long)dev);
+ timer_setup(&tp->oom_timer, oom_timer, 0);
#endif
}
@@ -780,8 +780,7 @@ static void tulip_down (struct net_device *dev)
spin_unlock_irqrestore (&tp->lock, flags);
- setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer,
- (unsigned long)dev);
+ timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
dev->if_port = tp->saved_if_port;
@@ -1470,8 +1469,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->csr0 = csr0;
spin_lock_init(&tp->lock);
spin_lock_init(&tp->mii_lock);
- setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer,
- (unsigned long)dev);
+ timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 7fc248efc4ba..488a744084c9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -241,7 +241,7 @@ static void phy_write_1bit(struct uli526x_board_info *db, u32);
static u16 phy_read_1bit(struct uli526x_board_info *db);
static u8 uli526x_sense_speed(struct uli526x_board_info *);
static void uli526x_process_mode(struct uli526x_board_info *);
-static void uli526x_timer(unsigned long);
+static void uli526x_timer(struct timer_list *t);
static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
@@ -491,10 +491,8 @@ static int uli526x_open(struct net_device *dev)
netif_wake_queue(dev);
/* set and active a timer process */
- init_timer(&db->timer);
+ timer_setup(&db->timer, uli526x_timer, 0);
db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
- db->timer.data = (unsigned long)dev;
- db->timer.function = uli526x_timer;
add_timer(&db->timer);
return 0;
@@ -1023,10 +1021,10 @@ static const struct ethtool_ops netdev_ethtool_ops = {
* Dynamic media sense, allocate Rx buffer...
*/
-static void uli526x_timer(unsigned long data)
+static void uli526x_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct uli526x_board_info *db = netdev_priv(dev);
+ struct uli526x_board_info *db = from_timer(db, t, timer);
+ struct net_device *dev = pci_get_drvdata(db->pdev);
struct uli_phy_ops *phy = &db->phy;
void __iomem *ioaddr = db->ioaddr;
unsigned long flags;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 32d7229544fa..70cb2d689c2c 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -327,7 +327,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int netdev_open(struct net_device *dev);
static int update_link(struct net_device *dev);
-static void netdev_timer(unsigned long data);
+static void netdev_timer(struct timer_list *t);
static void init_rxtx_rings(struct net_device *dev);
static void free_rxtx_rings(struct netdev_private *np);
static void init_registers(struct net_device *dev);
@@ -655,10 +655,8 @@ static int netdev_open(struct net_device *dev)
netdev_dbg(dev, "Done netdev_open()\n");
/* Set the timer to check for link beat. */
- init_timer(&np->timer);
+ timer_setup(&np->timer, netdev_timer, 0);
np->timer.expires = jiffies + 1*HZ;
- np->timer.data = (unsigned long)dev;
- np->timer.function = netdev_timer; /* timer handler */
add_timer(&np->timer);
return 0;
out_err:
@@ -774,10 +772,10 @@ static inline void update_csr6(struct net_device *dev, int new)
np->mii_if.full_duplex = 1;
}
-static void netdev_timer(unsigned long data)
+static void netdev_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_timer(np, t, timer);
+ struct net_device *dev = pci_get_drvdata(np->pci_dev);
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 778f974e2928..f0536b16b3c3 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -68,7 +68,7 @@ static const int max_intrloop = 50;
static const int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev);
-static void rio_timer (unsigned long data);
+static void rio_timer (struct timer_list *t);
static void rio_tx_timeout (struct net_device *dev);
static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rio_interrupt (int irq, void *dev_instance);
@@ -313,7 +313,7 @@ find_miiphy (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int i, phy_found = 0;
- np = netdev_priv(dev);
+
np->phy_addr = 1;
for (i = 31; i >= 0; i--) {
@@ -644,7 +644,7 @@ static int rio_open(struct net_device *dev)
return i;
}
- setup_timer(&np->timer, rio_timer, (unsigned long)dev);
+ timer_setup(&np->timer, rio_timer, 0);
np->timer.expires = jiffies + 1 * HZ;
add_timer(&np->timer);
@@ -655,10 +655,10 @@ static int rio_open(struct net_device *dev)
}
static void
-rio_timer (unsigned long data)
+rio_timer (struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_timer(np, t, timer);
+ struct net_device *dev = pci_get_drvdata(np->pdev);
unsigned int entry;
int next_tick = 1*HZ;
unsigned long flags;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 2704bcf023be..1a27176381fb 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -431,7 +431,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
static int mdio_wait_link(struct net_device *dev, int wait);
static int netdev_open(struct net_device *dev);
static void check_duplex(struct net_device *dev);
-static void netdev_timer(unsigned long data);
+static void netdev_timer(struct timer_list *t);
static void tx_timeout(struct net_device *dev);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
@@ -913,10 +913,8 @@ static int netdev_open(struct net_device *dev)
ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
/* Set the timer to check for link beat. */
- init_timer(&np->timer);
+ timer_setup(&np->timer, netdev_timer, 0);
np->timer.expires = jiffies + 3*HZ;
- np->timer.data = (unsigned long)dev;
- np->timer.function = netdev_timer; /* timer handler */
add_timer(&np->timer);
/* Enable interrupts by setting the interrupt mask. */
@@ -953,10 +951,10 @@ static void check_duplex(struct net_device *dev)
}
}
-static void netdev_timer(unsigned long data)
+static void netdev_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_timer(np, t, timer);
+ struct net_device *dev = np->mii_if.dev;
void __iomem *ioaddr = np->base;
int next_tick = 10*HZ;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0e3d9f39a807..c6e859a27ee6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
if (wrapped)
newacc += 65536;
- ACCESS_ONCE(*acc) = newacc;
+ WRITE_ONCE(*acc, newacc);
}
static void populate_erx_stats(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 9ed8e4b81530..78db8e62a83f 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -59,6 +60,9 @@
/* Min number of tx ring entries before stopping queue */
#define TX_THRESHOLD (MAX_SKB_FRAGS + 1)
+#define FTGMAC_100MHZ 100000000
+#define FTGMAC_25MHZ 25000000
+
struct ftgmac100 {
/* Registers */
struct resource *res;
@@ -96,6 +100,7 @@ struct ftgmac100 {
struct napi_struct napi;
struct work_struct reset_task;
struct mii_bus *mii_bus;
+ struct clk *clk;
/* Link management */
int cur_speed;
@@ -1734,6 +1739,22 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
nd->link_up ? "up" : "down");
}
+static void ftgmac100_setup_clk(struct ftgmac100 *priv)
+{
+ priv->clk = devm_clk_get(priv->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return;
+
+ clk_prepare_enable(priv->clk);
+
+ /* Aspeed specifies a 100MHz clock is required for up to
+ * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
+ * is sufficient
+ */
+ clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
+ FTGMAC_100MHZ);
+}
+
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -1830,6 +1851,9 @@ static int ftgmac100_probe(struct platform_device *pdev)
goto err_setup_mdio;
}
+ if (priv->is_aspeed)
+ ftgmac100_setup_clk(priv);
+
/* Default ring sizes */
priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
@@ -1883,6 +1907,8 @@ static int ftgmac100_remove(struct platform_device *pdev)
unregister_netdev(netdev);
+ clk_disable_unprepare(priv->clk);
+
/* There's a small chance the reset task will have been re-queued,
* during stop, make sure it's gone before we free the structure.
*/
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 66928a922824..aecc76504b69 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -402,6 +402,7 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
struct page *page;
dma_addr_t map;
int length;
+ bool ret;
rxdes = ftmac100_rx_locate_first_segment(priv);
if (!rxdes)
@@ -416,8 +417,8 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
* It is impossible to get multi-segment packets
* because we always provide big enough receive buffers.
*/
- if (unlikely(!ftmac100_rxdes_last_segment(rxdes)))
- BUG();
+ ret = ftmac100_rxdes_last_segment(rxdes);
+ BUG_ON(!ret);
/* start processing */
skb = netdev_alloc_skb_ip_align(netdev, 128);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index e92859dab7ae..ae55da60ed0e 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -257,8 +257,8 @@ enum rx_desc_status_bits {
RXFSD = 0x00000800, /* first descriptor */
RXLSD = 0x00000400, /* last descriptor */
ErrorSummary = 0x80, /* error summary */
- RUNT = 0x40, /* runt packet received */
- LONG = 0x20, /* long packet received */
+ RUNTPKT = 0x40, /* runt packet received */
+ LONGPKT = 0x20, /* long packet received */
FAE = 0x10, /* frame align error */
CRC = 0x08, /* crc error */
RXER = 0x04, /* receive error */
@@ -426,8 +426,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
static int netdev_open(struct net_device *dev);
static void getlinktype(struct net_device *dev);
static void getlinkstatus(struct net_device *dev);
-static void netdev_timer(unsigned long data);
-static void reset_timer(unsigned long data);
+static void netdev_timer(struct timer_list *t);
+static void reset_timer(struct timer_list *t);
static void fealnx_tx_timeout(struct net_device *dev);
static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
@@ -909,17 +909,13 @@ static int netdev_open(struct net_device *dev)
printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
/* Set the timer to check for link beat. */
- init_timer(&np->timer);
+ timer_setup(&np->timer, netdev_timer, 0);
np->timer.expires = RUN_AT(3 * HZ);
- np->timer.data = (unsigned long) dev;
- np->timer.function = netdev_timer;
/* timer handler */
add_timer(&np->timer);
- init_timer(&np->reset_timer);
- np->reset_timer.data = (unsigned long) dev;
- np->reset_timer.function = reset_timer;
+ timer_setup(&np->reset_timer, reset_timer, 0);
np->reset_timer_armed = 0;
return rc;
}
@@ -1082,10 +1078,10 @@ static void allocate_rx_buffers(struct net_device *dev)
}
-static void netdev_timer(unsigned long data)
+static void netdev_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_timer(np, t, timer);
+ struct net_device *dev = np->mii.dev;
void __iomem *ioaddr = np->mem;
int old_crvalue = np->crvalue;
unsigned int old_linkok = np->linkok;
@@ -1171,10 +1167,10 @@ static void enable_rxtx(struct net_device *dev)
}
-static void reset_timer(unsigned long data)
+static void reset_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_timer(np, t, reset_timer);
+ struct net_device *dev = np->mii.dev;
unsigned long flags;
printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
@@ -1632,7 +1628,7 @@ static int netdev_rx(struct net_device *dev)
dev->name, rx_status);
dev->stats.rx_errors++; /* end of a packet. */
- if (rx_status & (LONG | RUNT))
+ if (rx_status & (LONGPKT | RUNTPKT))
dev->stats.rx_length_errors++;
if (rx_status & RXER)
dev->stats.rx_frame_errors++;
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index c46df5c82af5..ed8ad0fefbda 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Freescale network device drivers.
#
diff --git a/drivers/net/ethernet/freescale/dpaa/Makefile b/drivers/net/ethernet/freescale/dpaa/Makefile
index 7db50bccb137..4f23e79232fa 100644
--- a/drivers/net/ethernet/freescale/dpaa/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Freescale DPAA Ethernet controllers
#
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 42258060f142..7caa8da48421 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -351,7 +351,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
u8 num_tc;
int i;
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
@@ -385,34 +385,19 @@ out:
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
{
- struct platform_device *of_dev;
struct dpaa_eth_data *eth_data;
- struct device *dpaa_dev, *dev;
- struct device_node *mac_node;
+ struct device *dpaa_dev;
struct mac_device *mac_dev;
dpaa_dev = &pdev->dev;
eth_data = dpaa_dev->platform_data;
- if (!eth_data)
+ if (!eth_data) {
+ dev_err(dpaa_dev, "eth_data missing\n");
return ERR_PTR(-ENODEV);
-
- mac_node = eth_data->mac_node;
-
- of_dev = of_find_device_by_node(mac_node);
- if (!of_dev) {
- dev_err(dpaa_dev, "of_find_device_by_node(%pOF) failed\n",
- mac_node);
- of_node_put(mac_node);
- return ERR_PTR(-EINVAL);
}
- of_node_put(mac_node);
-
- dev = &of_dev->dev;
-
- mac_dev = dev_get_drvdata(dev);
+ mac_dev = eth_data->mac_dev;
if (!mac_dev) {
- dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n",
- dev_name(dev));
+ dev_err(dpaa_dev, "mac_dev missing\n");
return ERR_PTR(-EINVAL);
}
@@ -1736,6 +1721,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
/* Iterate through the SGT entries and add data buffers to the skb */
sgt = vaddr + fd_off;
+ skb = NULL;
for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
/* Extension bit is not supported */
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
@@ -1753,7 +1739,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
DMA_FROM_DEVICE);
- if (i == 0) {
+ if (!skb) {
sz = dpaa_bp->size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
skb = build_skb(sg_vaddr, sz);
@@ -2435,6 +2421,44 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
}
}
+static void dpaa_adjust_link(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+ mac_dev->adjust_link(mac_dev);
+}
+
+static int dpaa_phy_init(struct net_device *net_dev)
+{
+ struct mac_device *mac_dev;
+ struct phy_device *phy_dev;
+ struct dpaa_priv *priv;
+
+ priv = netdev_priv(net_dev);
+ mac_dev = priv->mac_dev;
+
+ phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
+ &dpaa_adjust_link, 0,
+ mac_dev->phy_if);
+ if (!phy_dev) {
+ netif_err(priv, ifup, net_dev, "init_phy() failed\n");
+ return -ENODEV;
+ }
+
+ /* Remove any features not supported by the controller */
+ phy_dev->supported &= mac_dev->if_support;
+ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phy_dev->advertising = phy_dev->supported;
+
+ mac_dev->phy_dev = phy_dev;
+ net_dev->phydev = phy_dev;
+
+ return 0;
+}
+
static int dpaa_open(struct net_device *net_dev)
{
struct mac_device *mac_dev;
@@ -2445,12 +2469,9 @@ static int dpaa_open(struct net_device *net_dev)
mac_dev = priv->mac_dev;
dpaa_eth_napi_enable(priv);
- net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
- if (!net_dev->phydev) {
- netif_err(priv, ifup, net_dev, "init_phy() failed\n");
- err = -ENODEV;
+ err = dpaa_phy_init(net_dev);
+ if (err)
goto phy_init_failed;
- }
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
err = fman_port_enable(mac_dev->port[i]);
@@ -2649,7 +2670,6 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
static int dpaa_eth_probe(struct platform_device *pdev)
{
struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
- struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev = NULL;
struct dpaa_fq *dpaa_fq, *tmp;
struct dpaa_priv *priv = NULL;
@@ -2658,7 +2678,13 @@ static int dpaa_eth_probe(struct platform_device *pdev)
int err = 0, i, channel;
struct device *dev;
- dev = &pdev->dev;
+ /* device used for DMA mapping */
+ dev = pdev->dev.parent;
+ err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err) {
+ dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
+ return err;
+ }
/* Allocate this early, so we can store relevant information in
* the private area
@@ -2666,7 +2692,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
- goto alloc_etherdev_mq_failed;
+ return -ENOMEM;
}
/* Do this here, so we can be verbose early */
@@ -2682,7 +2708,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
if (IS_ERR(mac_dev)) {
dev_err(dev, "dpaa_mac_dev_get() failed\n");
err = PTR_ERR(mac_dev);
- goto mac_probe_failed;
+ goto free_netdev;
}
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
@@ -2700,21 +2726,13 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
- /* device used for DMA mapping */
- set_dma_ops(dev, get_dma_ops(&pdev->dev));
- err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
- if (err) {
- dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
- goto dev_mask_failed;
- }
-
/* bp init */
for (i = 0; i < DPAA_BPS_NUM; i++) {
- int err;
-
dpaa_bps[i] = dpaa_bp_alloc(dev);
- if (IS_ERR(dpaa_bps[i]))
- return PTR_ERR(dpaa_bps[i]);
+ if (IS_ERR(dpaa_bps[i])) {
+ err = PTR_ERR(dpaa_bps[i]);
+ goto free_dpaa_bps;
+ }
/* the raw size of the buffers used for reception */
dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
/* avoid runtime computations by keeping the usable size here */
@@ -2722,11 +2740,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
dpaa_bps[i]->dev = dev;
err = dpaa_bp_alloc_pool(dpaa_bps[i]);
- if (err < 0) {
- dpaa_bps_free(priv);
- priv->dpaa_bps[i] = NULL;
- goto bp_create_failed;
- }
+ if (err < 0)
+ goto free_dpaa_bps;
priv->dpaa_bps[i] = dpaa_bps[i];
}
@@ -2737,7 +2752,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
if (err < 0) {
dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
- goto fq_probe_failed;
+ goto free_dpaa_bps;
}
priv->mac_dev = mac_dev;
@@ -2746,7 +2761,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
if (channel < 0) {
dev_err(dev, "dpaa_get_channel() failed\n");
err = channel;
- goto get_channel_failed;
+ goto free_dpaa_bps;
}
priv->channel = (u16)channel;
@@ -2766,20 +2781,20 @@ static int dpaa_eth_probe(struct platform_device *pdev)
err = dpaa_eth_cgr_init(priv);
if (err < 0) {
dev_err(dev, "Error initializing CGR\n");
- goto tx_cgr_init_failed;
+ goto free_dpaa_bps;
}
err = dpaa_ingress_cgr_init(priv);
if (err < 0) {
dev_err(dev, "Error initializing ingress CGR\n");
- goto rx_cgr_init_failed;
+ goto delete_egress_cgr;
}
/* Add the FQs to the interface, and make them active */
list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
err = dpaa_fq_init(dpaa_fq, false);
if (err < 0)
- goto fq_alloc_failed;
+ goto free_dpaa_fqs;
}
priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
@@ -2789,7 +2804,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
&priv->buf_layout[0], dev);
if (err)
- goto init_ports_failed;
+ goto free_dpaa_fqs;
/* Rx traffic distribution based on keygen hashing defaults to on */
priv->keygen_in_use = true;
@@ -2798,11 +2813,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
if (!priv->percpu_priv) {
dev_err(dev, "devm_alloc_percpu() failed\n");
err = -ENOMEM;
- goto alloc_percpu_failed;
- }
- for_each_possible_cpu(i) {
- percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- memset(percpu_priv, 0, sizeof(*percpu_priv));
+ goto free_dpaa_fqs;
}
priv->num_tc = 1;
@@ -2811,11 +2822,11 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
if (err < 0)
- goto napi_add_failed;
+ goto delete_dpaa_napi;
err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
if (err < 0)
- goto netdev_init_failed;
+ goto delete_dpaa_napi;
dpaa_eth_sysfs_init(&net_dev->dev);
@@ -2824,32 +2835,21 @@ static int dpaa_eth_probe(struct platform_device *pdev)
return 0;
-netdev_init_failed:
-napi_add_failed:
+delete_dpaa_napi:
dpaa_napi_del(net_dev);
-alloc_percpu_failed:
-init_ports_failed:
+free_dpaa_fqs:
dpaa_fq_free(dev, &priv->dpaa_fq_list);
-fq_alloc_failed:
qman_delete_cgr_safe(&priv->ingress_cgr);
qman_release_cgrid(priv->ingress_cgr.cgrid);
-rx_cgr_init_failed:
+delete_egress_cgr:
qman_delete_cgr_safe(&priv->cgr_data.cgr);
qman_release_cgrid(priv->cgr_data.cgr.cgrid);
-tx_cgr_init_failed:
-get_channel_failed:
+free_dpaa_bps:
dpaa_bps_free(priv);
-bp_create_failed:
-fq_probe_failed:
-dev_mask_failed:
-mac_probe_failed:
+free_netdev:
dev_set_drvdata(dev, NULL);
free_netdev(net_dev);
-alloc_etherdev_mq_failed:
- for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) {
- if (atomic_read(&dpaa_bps[i]->refs) == 0)
- devm_kfree(dev, dpaa_bps[i]);
- }
+
return err;
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ede1876a9a19..5385074b3b7d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/****************************************************************************/
/*
@@ -582,12 +583,11 @@ struct fec_enet_private {
u64 ethtool_stats[0];
};
-void fec_ptp_init(struct platform_device *pdev);
+void fec_ptp_init(struct platform_device *pdev, int irq_idx);
void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
-uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3dc2d771a222..610573855213 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1602,10 +1602,6 @@ fec_enet_interrupt(int irq, void *dev_id)
ret = IRQ_HANDLED;
complete(&fep->mdio_done);
}
-
- if (fep->ptp_clock)
- if (fec_ptp_check_pps_event(fep))
- ret = IRQ_HANDLED;
return ret;
}
@@ -3312,6 +3308,19 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
}
+static int fec_enet_get_irq_cnt(struct platform_device *pdev)
+{
+ int irq_cnt = platform_irq_count(pdev);
+
+ if (irq_cnt > FEC_IRQ_NUM)
+ irq_cnt = FEC_IRQ_NUM; /* last for pps */
+ else if (irq_cnt == 2)
+ irq_cnt = 1; /* last for pps */
+ else if (irq_cnt <= 0)
+ irq_cnt = 1; /* At least 1 irq is needed */
+ return irq_cnt;
+}
+
static int
fec_probe(struct platform_device *pdev)
{
@@ -3325,6 +3334,8 @@ fec_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node, *phy_node;
int num_tx_qs;
int num_rx_qs;
+ char irq_name[8];
+ int irq_cnt;
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
@@ -3465,18 +3476,20 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_reset;
+ irq_cnt = fec_enet_get_irq_cnt(pdev);
if (fep->bufdesc_ex)
- fec_ptp_init(pdev);
+ fec_ptp_init(pdev, irq_cnt);
ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- irq = platform_get_irq(pdev, i);
+ for (i = 0; i < irq_cnt; i++) {
+ sprintf(irq_name, "int%d", i);
+ irq = platform_get_irq_byname(pdev, irq_name);
+ if (irq < 0)
+ irq = platform_get_irq(pdev, i);
if (irq < 0) {
- if (i)
- break;
ret = irq;
goto failed_irq;
}
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 6ebad3fac81d..f81439796ac7 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -549,6 +549,37 @@ static void fec_time_keep(struct work_struct *work)
schedule_delayed_work(&fep->time_keep, HZ);
}
+/* This function checks the pps event and reloads the timer compare counter. */
+static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u32 val;
+ u8 channel = fep->pps_channel;
+ struct ptp_clock_event event;
+
+ val = readl(fep->hwp + FEC_TCSR(channel));
+ if (val & FEC_T_TF_MASK) {
+ /* Write the next next compare(not the next according the spec)
+ * value to the register
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
+ do {
+ writel(val, fep->hwp + FEC_TCSR(channel));
+ } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
+
+ /* Update the counter; */
+ fep->next_counter = (fep->next_counter + fep->reload_period) &
+ fep->cc.mask;
+
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(fep->ptp_clock, &event);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
/**
* fec_ptp_init
* @ndev: The FEC network adapter
@@ -558,10 +589,12 @@ static void fec_time_keep(struct work_struct *work)
* cyclecounter init routine and exits.
*/
-void fec_ptp_init(struct platform_device *pdev)
+void fec_ptp_init(struct platform_device *pdev, int irq_idx)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int irq;
+ int ret;
fep->ptp_caps.owner = THIS_MODULE;
snprintf(fep->ptp_caps.name, 16, "fec ptp");
@@ -587,6 +620,20 @@ void fec_ptp_init(struct platform_device *pdev)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
+ irq = platform_get_irq_byname(pdev, "pps");
+ if (irq < 0)
+ irq = platform_get_irq(pdev, irq_idx);
+ /* Failure to get an irq is not fatal,
+ * only the PTP_CLOCK_PPS clock events should stop
+ */
+ if (irq >= 0) {
+ ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
+ 0, pdev->name, ndev);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
+ ret);
+ }
+
fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
if (IS_ERR(fep->ptp_clock)) {
fep->ptp_clock = NULL;
@@ -605,36 +652,3 @@ void fec_ptp_stop(struct platform_device *pdev)
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
}
-
-/**
- * fec_ptp_check_pps_event
- * @fep: the fec_enet_private structure handle
- *
- * This function check the pps event and reload the timer compare counter.
- */
-uint fec_ptp_check_pps_event(struct fec_enet_private *fep)
-{
- u32 val;
- u8 channel = fep->pps_channel;
- struct ptp_clock_event event;
-
- val = readl(fep->hwp + FEC_TCSR(channel));
- if (val & FEC_T_TF_MASK) {
- /* Write the next next compare(not the next according the spec)
- * value to the register
- */
- writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
- do {
- writel(val, fep->hwp + FEC_TCSR(channel));
- } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
-
- /* Update the counter; */
- fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
-
- event.type = PTP_CLOCK_PPS;
- ptp_clock_event(fep->ptp_clock, &event);
- return 1;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile
index 2c38119b172c..b618091db091 100644
--- a/drivers/net/ethernet/freescale/fman/Makefile
+++ b/drivers/net/ethernet/freescale/fman/Makefile
@@ -1,9 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman
-obj-$(CONFIG_FSL_FMAN) += fsl_fman.o
-obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o
-obj-$(CONFIG_FSL_FMAN) += fsl_mac.o
+obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_fman.o
+obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_fman_port.o
+obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_mac.o
-fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o
-fsl_fman_port-objs := fman_port.o
-fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
+fsl_dpaa_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o
+fsl_dpaa_fman_port-objs := fman_port.o
+fsl_dpaa_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 1789b206be58..6552d68ea6e1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1339,8 +1339,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
switch (port->port_type) {
case FMAN_PORT_TYPE_RX:
set_rx_dflt_cfg(port, params);
+ /* fall through */
case FMAN_PORT_TYPE_TX:
set_tx_dflt_cfg(port, params, &port->dts_params);
+ /* fall through */
default:
set_dflt_cfg(port, params);
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 387eb4a88b72..88c0a0636b44 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -57,9 +57,7 @@ struct mac_priv_s {
struct device *dev;
void __iomem *vaddr;
u8 cell_index;
- phy_interface_t phy_if;
struct fman *fman;
- struct device_node *phy_node;
struct device_node *internal_phy_node;
/* List of multicast addresses */
struct list_head mc_addr_list;
@@ -106,7 +104,7 @@ static void set_fman_mac_params(struct mac_device *mac_dev,
resource_size(mac_dev->res));
memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
params->max_speed = priv->max_speed;
- params->phy_if = priv->phy_if;
+ params->phy_if = mac_dev->phy_if;
params->basex_if = false;
params->mac_id = priv->cell_index;
params->fm = (void *)priv->fman;
@@ -419,15 +417,12 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
}
EXPORT_SYMBOL(fman_get_pause_cfg);
-static void adjust_link_void(struct net_device *net_dev)
+static void adjust_link_void(struct mac_device *mac_dev)
{
}
-static void adjust_link_dtsec(struct net_device *net_dev)
+static void adjust_link_dtsec(struct mac_device *mac_dev)
{
- struct device *dev = net_dev->dev.parent;
- struct dpaa_eth_data *eth_data = dev->platform_data;
- struct mac_device *mac_dev = eth_data->mac_dev;
struct phy_device *phy_dev = mac_dev->phy_dev;
struct fman_mac *fman_mac;
bool rx_pause, tx_pause;
@@ -444,14 +439,12 @@ static void adjust_link_dtsec(struct net_device *net_dev)
fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
if (err < 0)
- netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
+ dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
-static void adjust_link_memac(struct net_device *net_dev)
+static void adjust_link_memac(struct mac_device *mac_dev)
{
- struct device *dev = net_dev->dev.parent;
- struct dpaa_eth_data *eth_data = dev->platform_data;
- struct mac_device *mac_dev = eth_data->mac_dev;
struct phy_device *phy_dev = mac_dev->phy_dev;
struct fman_mac *fman_mac;
bool rx_pause, tx_pause;
@@ -463,60 +456,12 @@ static void adjust_link_memac(struct net_device *net_dev)
fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
if (err < 0)
- netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err);
-}
-
-/* Initializes driver's PHY state, and attaches to the PHY.
- * Returns 0 on success.
- */
-static struct phy_device *init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev,
- void (*adj_lnk)(struct net_device *))
-{
- struct phy_device *phy_dev;
- struct mac_priv_s *priv = mac_dev->priv;
-
- phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0,
- priv->phy_if);
- if (!phy_dev) {
- netdev_err(net_dev, "Could not connect to PHY\n");
- return NULL;
- }
-
- /* Remove any features not supported by the controller */
- phy_dev->supported &= mac_dev->if_support;
- /* Enable the symmetric and asymmetric PAUSE frame advertisements,
- * as most of the PHY drivers do not enable them by default.
- */
- phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
- phy_dev->advertising = phy_dev->supported;
-
- mac_dev->phy_dev = phy_dev;
-
- return phy_dev;
-}
-
-static struct phy_device *dtsec_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
-{
- return init_phy(net_dev, mac_dev, &adjust_link_dtsec);
-}
-
-static struct phy_device *tgec_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
-{
- return init_phy(net_dev, mac_dev, adjust_link_void);
-}
-
-static struct phy_device *memac_init_phy(struct net_device *net_dev,
- struct mac_device *mac_dev)
-{
- return init_phy(net_dev, mac_dev, &adjust_link_memac);
+ dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
static void setup_dtsec(struct mac_device *mac_dev)
{
- mac_dev->init_phy = dtsec_init_phy;
mac_dev->init = dtsec_initialization;
mac_dev->set_promisc = dtsec_set_promiscuous;
mac_dev->change_addr = dtsec_modify_mac_address;
@@ -528,14 +473,13 @@ static void setup_dtsec(struct mac_device *mac_dev)
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
-
+ mac_dev->adjust_link = adjust_link_dtsec;
mac_dev->priv->enable = dtsec_enable;
mac_dev->priv->disable = dtsec_disable;
}
static void setup_tgec(struct mac_device *mac_dev)
{
- mac_dev->init_phy = tgec_init_phy;
mac_dev->init = tgec_initialization;
mac_dev->set_promisc = tgec_set_promiscuous;
mac_dev->change_addr = tgec_modify_mac_address;
@@ -547,14 +491,13 @@ static void setup_tgec(struct mac_device *mac_dev)
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
-
+ mac_dev->adjust_link = adjust_link_void;
mac_dev->priv->enable = tgec_enable;
mac_dev->priv->disable = tgec_disable;
}
static void setup_memac(struct mac_device *mac_dev)
{
- mac_dev->init_phy = memac_init_phy;
mac_dev->init = memac_initialization;
mac_dev->set_promisc = memac_set_promiscuous;
mac_dev->change_addr = memac_modify_mac_address;
@@ -566,7 +509,7 @@ static void setup_memac(struct mac_device *mac_dev)
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
-
+ mac_dev->adjust_link = adjust_link_memac;
mac_dev->priv->enable = memac_enable;
mac_dev->priv->disable = memac_disable;
}
@@ -599,8 +542,7 @@ static const u16 phy2speed[] = {
};
static struct platform_device *dpaa_eth_add_device(int fman_id,
- struct mac_device *mac_dev,
- struct device_node *node)
+ struct mac_device *mac_dev)
{
struct platform_device *pdev;
struct dpaa_eth_data data;
@@ -613,17 +555,14 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
data.mac_dev = mac_dev;
data.mac_hw_id = priv->cell_index;
data.fman_hw_id = fman_id;
- data.mac_node = node;
mutex_lock(&eth_lock);
-
pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt);
if (!pdev) {
ret = -ENOMEM;
goto no_mem;
}
- pdev->dev.of_node = node;
pdev->dev.parent = priv->dev;
set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
@@ -676,7 +615,6 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
if (!mac_dev) {
err = -ENOMEM;
- dev_err(dev, "devm_kzalloc() = %d\n", err);
goto _return;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -706,9 +644,6 @@ static int mac_probe(struct platform_device *_of_dev)
goto _return;
}
- /* Register mac_dev */
- dev_set_drvdata(dev, mac_dev);
-
INIT_LIST_HEAD(&priv->mc_addr_list);
/* Get the FM node */
@@ -717,7 +652,7 @@ static int mac_probe(struct platform_device *_of_dev)
dev_err(dev, "of_get_parent(%pOF) failed\n",
mac_node);
err = -EINVAL;
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
}
of_dev = of_find_device_by_node(dev_node);
@@ -751,7 +686,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (err < 0) {
dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
mac_node, err);
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
}
mac_dev->res = __devm_request_region(dev,
@@ -761,7 +696,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!mac_dev->res) {
dev_err(dev, "__devm_request_mem_region(mac) failed\n");
err = -EBUSY;
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
}
priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
@@ -769,16 +704,12 @@ static int mac_probe(struct platform_device *_of_dev)
if (!priv->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
err = -EIO;
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
}
if (!of_device_is_available(mac_node)) {
- devm_iounmap(dev, priv->vaddr);
- __devm_release_region(dev, fman_get_mem_region(priv->fman),
- res.start, res.end + 1 - res.start);
- devm_kfree(dev, mac_dev);
- dev_set_drvdata(dev, NULL);
- return -ENODEV;
+ err = -ENODEV;
+ goto _return_of_get_parent;
}
/* Get the cell-index */
@@ -786,7 +717,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
err = -EINVAL;
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
}
priv->cell_index = (u8)val;
@@ -795,7 +726,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!mac_addr) {
dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
err = -EINVAL;
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
}
memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr));
@@ -805,14 +736,14 @@ static int mac_probe(struct platform_device *_of_dev)
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
err = nph;
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
mac_node);
err = -EINVAL;
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
}
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -851,13 +782,13 @@ static int mac_probe(struct platform_device *_of_dev)
mac_node);
phy_if = PHY_INTERFACE_MODE_SGMII;
}
- priv->phy_if = phy_if;
+ mac_dev->phy_if = phy_if;
- priv->speed = phy2speed[priv->phy_if];
+ priv->speed = phy2speed[mac_dev->phy_if];
priv->max_speed = priv->speed;
mac_dev->if_support = DTSEC_SUPPORTED;
/* We don't support half-duplex in SGMII mode */
- if (priv->phy_if == PHY_INTERFACE_MODE_SGMII)
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
SUPPORTED_100baseT_Half);
@@ -866,30 +797,31 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->if_support |= SUPPORTED_1000baseT_Full;
/* The 10G interface only supports one mode */
- if (priv->phy_if == PHY_INTERFACE_MODE_XGMII)
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
mac_dev->if_support = SUPPORTED_10000baseT_Full;
/* Get the rest of the PHY information */
- priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
- if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) {
+ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
+ if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
struct phy_device *phy;
err = of_phy_register_fixed_link(mac_node);
if (err)
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
GFP_KERNEL);
if (!priv->fixed_link) {
err = -ENOMEM;
- goto _return_dev_set_drvdata;
+ goto _return_of_get_parent;
}
- priv->phy_node = of_node_get(mac_node);
- phy = of_phy_find_device(priv->phy_node);
+ mac_dev->phy_node = of_node_get(mac_node);
+ phy = of_phy_find_device(mac_dev->phy_node);
if (!phy) {
err = -EINVAL;
- goto _return_dev_set_drvdata;
+ of_node_put(mac_dev->phy_node);
+ goto _return_of_get_parent;
}
priv->fixed_link->link = phy->link;
@@ -904,8 +836,8 @@ static int mac_probe(struct platform_device *_of_dev)
err = mac_dev->init(mac_dev);
if (err < 0) {
dev_err(dev, "mac_dev->init() = %d\n", err);
- of_node_put(priv->phy_node);
- goto _return_dev_set_drvdata;
+ of_node_put(mac_dev->phy_node);
+ goto _return_of_get_parent;
}
/* pause frame autonegotiation enabled */
@@ -926,7 +858,7 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2],
mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]);
- priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node);
+ priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
if (IS_ERR(priv->eth_dev)) {
dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
priv->cell_index);
@@ -937,9 +869,8 @@ static int mac_probe(struct platform_device *_of_dev)
_return_of_node_put:
of_node_put(dev_node);
-_return_dev_set_drvdata:
+_return_of_get_parent:
kfree(priv->fixed_link);
- dev_set_drvdata(dev, NULL);
_return:
return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index d7313f0c5135..eefb3357e304 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -50,6 +50,8 @@ struct mac_device {
struct fman_port *port[2];
u32 if_support;
struct phy_device *phy_dev;
+ phy_interface_t phy_if;
+ struct device_node *phy_node;
bool autoneg_pause;
bool rx_pause_req;
@@ -58,11 +60,10 @@ struct mac_device {
bool tx_pause_active;
bool promisc;
- struct phy_device *(*init_phy)(struct net_device *net_dev,
- struct mac_device *mac_dev);
int (*init)(struct mac_device *mac_dev);
int (*start)(struct mac_device *mac_dev);
int (*stop)(struct mac_device *mac_dev);
+ void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
int (*set_multi)(struct net_device *net_dev,
@@ -82,7 +83,6 @@ struct mac_device {
};
struct dpaa_eth_data {
- struct device_node *mac_node;
struct mac_device *mac_dev;
int mac_hw_id;
int fman_hw_id;
diff --git a/drivers/net/ethernet/freescale/fs_enet/Makefile b/drivers/net/ethernet/freescale/fs_enet/Makefile
index d4a305ee3455..1821f94ef619 100644
--- a/drivers/net/ethernet/freescale/fs_enet/Makefile
+++ b/drivers/net/ethernet/freescale/fs_enet/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Freescale Ethernet controllers
#
diff --git a/drivers/net/ethernet/freescale/fs_enet/fec.h b/drivers/net/ethernet/freescale/fs_enet/fec.h
index b9fe5bde432a..7832db71dcb9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fec.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef FS_ENET_FEC_H
#define FS_ENET_FEC_H
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 753259091b22..7892f2f0c6b5 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1023,8 +1023,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->ethtool_ops = &fs_ethtool_ops;
- init_timer(&fep->phy_timer_list);
-
netif_carrier_off(ndev);
ndev->features |= NETIF_F_SG;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 5ce516c8a62a..92e06b37a199 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef FS_ENET_H
#define FS_ENET_H
@@ -137,7 +138,6 @@ struct fs_enet_private {
cbd_t __iomem *cur_rx;
cbd_t __iomem *cur_tx;
int tx_free;
- struct timer_list phy_timer_list;
const struct phy_info *phy;
u32 msg_enable;
struct mii_if_info mii_if;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index f77ba9fa257b..a96b838cffce 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3857,8 +3857,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
}
if (netif_msg_probe(&debug))
- pr_info("UCC%1d at 0x%8x (irq = %d)\n",
- ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
+ pr_info("UCC%1d at 0x%8llx (irq = %d)\n",
+ ug_info->uf_info.ucc_num + 1,
+ (u64)ug_info->uf_info.regs,
ug_info->uf_info.irq);
/* Create an ethernet device instance */
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 91c7bdb9b43c..30000b6aa7b8 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -78,7 +78,7 @@ config HNS_ENET
config HNS3
tristate "Hisilicon Network Subsystem Support HNS3 (Framework)"
- depends on PCI
+ depends on PCI
---help---
This selects the framework support for Hisilicon Network Subsystem 3.
This layer facilitates clients like ENET, RoCE and user-space ethernet
@@ -87,7 +87,7 @@ config HNS3
config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
- depends on PCI_MSI
+ depends on PCI_MSI
depends on HNS3
---help---
This selects the HNS3_HCLGE network acceleration engine & its hardware
@@ -96,11 +96,20 @@ config HNS3_HCLGE
config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support"
- depends on 64BIT && PCI
+ depends on 64BIT && PCI
depends on HNS3 && HNS3_HCLGE
---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
devices and their associated operations.
+config HNS3_DCB
+ bool "Hisilicon HNS3 Data Center Bridge Support"
+ default n
+ depends on HNS3 && HNS3_HCLGE && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
+
+ If unsure, say N.
+
endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 3828c435c18f..7f76d412047a 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the HISILICON network device drivers.
#
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 0cec06bec63e..340e28211135 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
unsigned int count;
smp_rmb();
- count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
+ count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
if (count == 0)
goto out;
@@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dma_addr_t phys;
smp_rmb();
- count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
+ count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
if (count == (TX_DESC_NUM - 1)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/hisilicon/hns/Makefile b/drivers/net/ethernet/hisilicon/hns/Makefile
index 6010c83e38d8..7aa623b9c82a 100644
--- a/drivers/net/ethernet/hisilicon/hns/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the HISILICON network device drivers.
#
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 36520634c96a..1ccb6443d2ed 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2159,9 +2159,9 @@ static void hns_nic_task_schedule(struct hns_nic_priv *priv)
(void)schedule_work(&priv->service_task);
}
-static void hns_nic_service_timer(unsigned long data)
+static void hns_nic_service_timer(struct timer_list *t)
{
- struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
+ struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
@@ -2369,8 +2369,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->enet_ver = AE_VERSION_2;
ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
- if (IS_ERR_OR_NULL(ae_node)) {
- ret = PTR_ERR(ae_node);
+ if (!ae_node) {
+ ret = -ENODEV;
dev_err(dev, "not find ae-handle\n");
goto out_read_prop_fail;
}
@@ -2451,8 +2451,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(ndev);
- setup_timer(&priv->service_timer, hns_nic_service_timer,
- (unsigned long)priv);
+ timer_setup(&priv->service_timer, hns_nic_service_timer, 0);
INIT_WORK(&priv->service_task, hns_nic_service_task);
set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 1a01cadfe5f3..67c59e1039f2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -28,6 +28,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dcbnl.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -109,6 +110,21 @@ enum hnae3_media_type {
HNAE3_MEDIA_TYPE_BACKPLANE,
};
+enum hnae3_reset_notify_type {
+ HNAE3_UP_CLIENT,
+ HNAE3_DOWN_CLIENT,
+ HNAE3_INIT_CLIENT,
+ HNAE3_UNINIT_CLIENT,
+};
+
+enum hnae3_reset_type {
+ HNAE3_FUNC_RESET,
+ HNAE3_CORE_RESET,
+ HNAE3_GLOBAL_RESET,
+ HNAE3_IMP_RESET,
+ HNAE3_NONE_RESET,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -131,6 +147,9 @@ struct hnae3_client_ops {
int (*init_instance)(struct hnae3_handle *handle);
void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
void (*link_status_change)(struct hnae3_handle *handle, bool state);
+ int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
+ int (*reset_notify)(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type);
};
#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -337,6 +356,10 @@ struct hnae3_ae_ops {
u8 *hfunc);
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc);
+ int (*set_rss_tuple)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_rss_tuple)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
int (*get_tc_size)(struct hnae3_handle *handle);
@@ -361,6 +384,23 @@ struct hnae3_ae_ops {
u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
u16 vlan, u8 qos, __be16 proto);
+ void (*reset_event)(struct hnae3_handle *handle,
+ enum hnae3_reset_type reset);
+};
+
+struct hnae3_dcb_ops {
+ /* IEEE 802.1Qaz std */
+ int (*ieee_getets)(struct hnae3_handle *, struct ieee_ets *);
+ int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *);
+ int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *);
+
+ /* DCBX configuration */
+ u8 (*getdcbx)(struct hnae3_handle *);
+ u8 (*setdcbx)(struct hnae3_handle *, u8);
+
+ int (*map_update)(struct hnae3_handle *);
+ int (*setup_tc)(struct hnae3_handle *, u8, u8 *);
};
struct hnae3_ae_algo {
@@ -394,6 +434,7 @@ struct hnae3_knic_private_info {
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
+ const struct hnae3_dcb_ops *dcb_ops;
};
struct hnae3_roce_private_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 162e8a42acd0..d2b20d01a58c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -7,5 +7,9 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o
+hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
+
obj-$(CONFIG_HNS3_ENET) += hns3.o
hns3-objs = hns3_enet.o hns3_ethtool.o
+
+hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 8b511e6e0ce9..ff13d1876d9e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -62,7 +62,7 @@ static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
ring->desc = NULL;
}
-static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type)
+static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
{
struct hclge_hw *hw = &hdev->hw;
struct hclge_cmq_ring *ring =
@@ -79,12 +79,18 @@ static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type)
return ret;
}
- ring->next_to_clean = 0;
- ring->next_to_use = 0;
-
return 0;
}
+void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
+{
+ desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
+}
+
void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_opcode_type opcode, bool is_read)
{
@@ -208,7 +214,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- opcode = desc[0].opcode;
+ opcode = le16_to_cpu(desc[0].opcode);
while (handle < num) {
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
*desc_to_use = desc[handle];
@@ -225,7 +231,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
*/
- if (HCLGE_SEND_SYNC(desc->flag)) {
+ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
if (hclge_cmd_csq_done(hw))
break;
@@ -244,9 +250,9 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
pr_debug("Get cmd desc:\n");
if (likely(!hclge_is_special_opcode(opcode)))
- desc_ret = desc[handle].retval;
+ desc_ret = le16_to_cpu(desc[handle].retval);
else
- desc_ret = desc[0].retval;
+ desc_ret = le16_to_cpu(desc[0].retval);
if ((enum hclge_cmd_return_status)desc_ret ==
HCLGE_CMD_EXEC_SUCCESS)
@@ -276,15 +282,15 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
return retval;
}
-enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw,
- u32 *version)
+static enum hclge_cmd_status hclge_cmd_query_firmware_version(
+ struct hclge_hw *hw, u32 *version)
{
- struct hclge_query_version *resp;
+ struct hclge_query_version_cmd *resp;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
- resp = (struct hclge_query_version *)desc.data;
+ resp = (struct hclge_query_version_cmd *)desc.data;
ret = hclge_cmd_send(hw, &desc, 1);
if (!ret)
@@ -293,37 +299,52 @@ enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw,
return ret;
}
-int hclge_cmd_init(struct hclge_dev *hdev)
+int hclge_cmd_queue_init(struct hclge_dev *hdev)
{
- u32 version;
int ret;
/* Setup the queue entries for use cmd queue */
hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
/* Setup Tx write back timeout */
hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
/* Setup queue rings */
- ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CSQ);
+ ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
if (ret) {
dev_err(&hdev->pdev->dev,
"CSQ ring setup error %d\n", ret);
return ret;
}
- ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CRQ);
+ ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
if (ret) {
dev_err(&hdev->pdev->dev,
"CRQ ring setup error %d\n", ret);
goto err_csq;
}
+ return 0;
+err_csq:
+ hclge_free_cmd_desc(&hdev->hw.cmq.csq);
+ return ret;
+}
+
+int hclge_cmd_init(struct hclge_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+ hdev->hw.cmq.crq.next_to_use = 0;
+
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
hclge_cmd_init_regs(&hdev->hw);
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
@@ -337,9 +358,6 @@ int hclge_cmd_init(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
-err_csq:
- hclge_free_cmd_desc(&hdev->hw.cmq.csq);
- return ret;
}
static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 758cf3948131..ce5ed8845042 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -63,6 +63,11 @@ enum hclge_cmd_status {
HCLGE_ERR_CSQ_ERROR = -3,
};
+struct hclge_misc_vector {
+ u8 __iomem *addr;
+ int vector_irq;
+};
+
struct hclge_cmq {
struct hclge_cmq_ring csq;
struct hclge_cmq_ring crq;
@@ -221,12 +226,12 @@ enum hclge_opcode_type {
#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10
#define HCLGE_RCB_INIT_FLAG_EN_B 0
#define HCLGE_RCB_INIT_FLAG_FINI_B 8
-struct hclge_config_rcb_init {
+struct hclge_config_rcb_init_cmd {
__le16 rcb_init_flag;
u8 rsv[22];
};
-struct hclge_tqp_map {
+struct hclge_tqp_map_cmd {
__le16 tqp_id; /* Absolute tqp id for in this pf */
u8 tqp_vf; /* VF id */
#define HCLGE_TQP_MAP_TYPE_PF 0
@@ -246,15 +251,15 @@ enum hclge_int_type {
HCLGE_INT_EVENT,
};
-struct hclge_ctrl_vector_chain {
+struct hclge_ctrl_vector_chain_cmd {
u8 int_vector_id;
u8 int_cause_num;
#define HCLGE_INT_TYPE_S 0
-#define HCLGE_INT_TYPE_M 0x3
+#define HCLGE_INT_TYPE_M GENMASK(1, 0)
#define HCLGE_TQP_ID_S 2
-#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S)
+#define HCLGE_TQP_ID_M GENMASK(12, 2)
#define HCLGE_INT_GL_IDX_S 13
-#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S)
+#define HCLGE_INT_GL_IDX_M GENMASK(14, 13)
__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
u8 vfid;
u8 rsv;
@@ -263,18 +268,18 @@ struct hclge_ctrl_vector_chain {
#define HCLGE_TC_NUM 8
#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
-struct hclge_tx_buff_alloc {
+struct hclge_tx_buff_alloc_cmd {
__le16 tx_pkt_buff[HCLGE_TC_NUM];
u8 tx_buff_rsv[8];
};
-struct hclge_rx_priv_buff {
+struct hclge_rx_priv_buff_cmd {
__le16 buf_num[HCLGE_TC_NUM];
__le16 shared_buf;
u8 rsv[6];
};
-struct hclge_query_version {
+struct hclge_query_version_cmd {
__le32 firmware;
__le32 firmware_rsv[5];
};
@@ -311,6 +316,7 @@ struct hclge_tc_thrd {
struct hclge_priv_buf {
struct hclge_waterline wl; /* Waterline for low and high*/
u32 buf_size; /* TC private buffer size */
+ u32 tx_buf_size;
u32 enable; /* Enable TC private buffer or not */
};
@@ -321,15 +327,20 @@ struct hclge_shared_buf {
u32 buf_size;
};
+struct hclge_pkt_buf_alloc {
+ struct hclge_priv_buf priv_buf[HCLGE_MAX_TC_NUM];
+ struct hclge_shared_buf s_buf;
+};
+
#define HCLGE_RX_COM_WL_EN_B 15
-struct hclge_rx_com_wl_buf {
+struct hclge_rx_com_wl_buf_cmd {
__le16 high_wl;
__le16 low_wl;
u8 rsv[20];
};
#define HCLGE_RX_PKT_EN_B 15
-struct hclge_rx_pkt_buf {
+struct hclge_rx_pkt_buf_cmd {
__le16 high_pkt;
__le16 low_pkt;
u8 rsv[20];
@@ -342,7 +353,7 @@ struct hclge_rx_pkt_buf {
#define HCLGE_PF_MAC_NUM_MASK 0x3
#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
-struct hclge_func_status {
+struct hclge_func_status_cmd {
__le32 vf_rst_state[4];
u8 pf_state;
u8 mac_id;
@@ -353,7 +364,7 @@ struct hclge_func_status {
u8 rsv[2];
};
-struct hclge_pf_res {
+struct hclge_pf_res_cmd {
__le16 tqp_num;
__le16 buf_size;
__le16 msixcap_localid_ba_nic;
@@ -366,30 +377,30 @@ struct hclge_pf_res {
};
#define HCLGE_CFG_OFFSET_S 0
-#define HCLGE_CFG_OFFSET_M 0xfffff /* Byte (8-10.3) */
+#define HCLGE_CFG_OFFSET_M GENMASK(19, 0)
#define HCLGE_CFG_RD_LEN_S 24
-#define HCLGE_CFG_RD_LEN_M (0xf << HCLGE_CFG_RD_LEN_S)
+#define HCLGE_CFG_RD_LEN_M GENMASK(27, 24)
#define HCLGE_CFG_RD_LEN_BYTES 16
#define HCLGE_CFG_RD_LEN_UNIT 4
#define HCLGE_CFG_VMDQ_S 0
-#define HCLGE_CFG_VMDQ_M (0xff << HCLGE_CFG_VMDQ_S)
+#define HCLGE_CFG_VMDQ_M GENMASK(7, 0)
#define HCLGE_CFG_TC_NUM_S 8
-#define HCLGE_CFG_TC_NUM_M (0xff << HCLGE_CFG_TC_NUM_S)
+#define HCLGE_CFG_TC_NUM_M GENMASK(15, 8)
#define HCLGE_CFG_TQP_DESC_N_S 16
-#define HCLGE_CFG_TQP_DESC_N_M (0xffff << HCLGE_CFG_TQP_DESC_N_S)
+#define HCLGE_CFG_TQP_DESC_N_M GENMASK(31, 16)
#define HCLGE_CFG_PHY_ADDR_S 0
-#define HCLGE_CFG_PHY_ADDR_M (0x1f << HCLGE_CFG_PHY_ADDR_S)
+#define HCLGE_CFG_PHY_ADDR_M GENMASK(7, 0)
#define HCLGE_CFG_MEDIA_TP_S 8
-#define HCLGE_CFG_MEDIA_TP_M (0xff << HCLGE_CFG_MEDIA_TP_S)
+#define HCLGE_CFG_MEDIA_TP_M GENMASK(15, 8)
#define HCLGE_CFG_RX_BUF_LEN_S 16
-#define HCLGE_CFG_RX_BUF_LEN_M (0xffff << HCLGE_CFG_RX_BUF_LEN_S)
+#define HCLGE_CFG_RX_BUF_LEN_M GENMASK(31, 16)
#define HCLGE_CFG_MAC_ADDR_H_S 0
-#define HCLGE_CFG_MAC_ADDR_H_M (0xffff << HCLGE_CFG_MAC_ADDR_H_S)
+#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
#define HCLGE_CFG_DEFAULT_SPEED_S 16
-#define HCLGE_CFG_DEFAULT_SPEED_M (0xff << HCLGE_CFG_DEFAULT_SPEED_S)
+#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
-struct hclge_cfg_param {
+struct hclge_cfg_param_cmd {
__le32 offset;
__le32 rsv;
__le32 param[4];
@@ -399,7 +410,7 @@ struct hclge_cfg_param {
#define HCLGE_DESC_NUM 0x40
#define HCLGE_ALLOC_VALID_B 0
-struct hclge_vf_num {
+struct hclge_vf_num_cmd {
u8 alloc_valid;
u8 rsv[23];
};
@@ -407,13 +418,13 @@ struct hclge_vf_num {
#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
#define HCLGE_RSS_HASH_KEY_NUM 16
-struct hclge_rss_config {
+struct hclge_rss_config_cmd {
u8 hash_config;
u8 rsv[7];
u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
};
-struct hclge_rss_input_tuple {
+struct hclge_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
@@ -427,26 +438,26 @@ struct hclge_rss_input_tuple {
#define HCLGE_RSS_CFG_TBL_SIZE 16
-struct hclge_rss_indirection_table {
- u16 start_table_index;
- u16 rss_set_bitmap;
+struct hclge_rss_indirection_table_cmd {
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
u8 rsv[4];
u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE];
};
#define HCLGE_RSS_TC_OFFSET_S 0
-#define HCLGE_RSS_TC_OFFSET_M (0x3ff << HCLGE_RSS_TC_OFFSET_S)
+#define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0)
#define HCLGE_RSS_TC_SIZE_S 12
-#define HCLGE_RSS_TC_SIZE_M (0x7 << HCLGE_RSS_TC_SIZE_S)
+#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGE_RSS_TC_VALID_B 15
-struct hclge_rss_tc_mode {
- u16 rss_tc_mode[HCLGE_MAX_TC_NUM];
+struct hclge_rss_tc_mode_cmd {
+ __le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
u8 rsv[8];
};
#define HCLGE_LINK_STS_B 0
#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B)
-struct hclge_link_status {
+struct hclge_link_status_cmd {
u8 status;
u8 rsv[23];
};
@@ -461,7 +472,7 @@ struct hclge_promisc_param {
#define HCLGE_PROMISC_EN_UC 0x1
#define HCLGE_PROMISC_EN_MC 0x2
#define HCLGE_PROMISC_EN_BC 0x4
-struct hclge_promisc_cfg {
+struct hclge_promisc_cfg_cmd {
u8 flag;
u8 vf_id;
__le16 rsv0;
@@ -489,18 +500,18 @@ enum hclge_promisc_type {
#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21
#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22
-struct hclge_config_mac_mode {
+struct hclge_config_mac_mode_cmd {
__le32 txrx_pad_fcs_loop_en;
u8 rsv[20];
};
#define HCLGE_CFG_SPEED_S 0
-#define HCLGE_CFG_SPEED_M (0x3f << HCLGE_CFG_SPEED_S)
+#define HCLGE_CFG_SPEED_M GENMASK(5, 0)
#define HCLGE_CFG_DUPLEX_B 7
#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B)
-struct hclge_config_mac_speed_dup {
+struct hclge_config_mac_speed_dup_cmd {
u8 speed_dup;
#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
@@ -512,17 +523,17 @@ struct hclge_config_mac_speed_dup {
#define HCLGE_QUERY_AN_B 0
#define HCLGE_QUERY_DUPLEX_B 2
-#define HCLGE_QUERY_SPEED_M (0x1f << HCLGE_QUERY_SPEED_S)
+#define HCLGE_QUERY_SPEED_M GENMASK(4, 0)
#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B)
#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B)
-struct hclge_query_an_speed_dup {
+struct hclge_query_an_speed_dup_cmd {
u8 an_syn_dup_speed;
u8 pause;
u8 rsv[23];
};
-#define HCLGE_RING_ID_MASK 0x3ff
+#define HCLGE_RING_ID_MASK GENMASK(9, 0)
#define HCLGE_TQP_ENABLE_B 0
#define HCLGE_MAC_CFG_AN_EN_B 0
@@ -533,7 +544,7 @@ struct hclge_query_an_speed_dup {
#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B)
-struct hclge_config_auto_neg {
+struct hclge_config_auto_neg_cmd {
__le32 cfg_an_cmd_flag;
u8 rsv[20];
};
@@ -542,7 +553,7 @@ struct hclge_config_auto_neg {
#define HCLGE_MAC_MAX_MTU 9728
#define HCLGE_MAC_UPLINK_PORT 0x100
-struct hclge_config_max_frm_size {
+struct hclge_config_max_frm_size_cmd {
__le16 max_frm_size;
u8 rsv[22];
};
@@ -559,10 +570,10 @@ enum hclge_mac_vlan_tbl_opcode {
#define HCLGE_MAC_EPORT_SW_EN_B 0xc
#define HCLGE_MAC_EPORT_TYPE_B 0xb
#define HCLGE_MAC_EPORT_VFID_S 0x3
-#define HCLGE_MAC_EPORT_VFID_M (0xff << HCLGE_MAC_EPORT_VFID_S)
+#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3)
#define HCLGE_MAC_EPORT_PFID_S 0x0
-#define HCLGE_MAC_EPORT_PFID_M (0x7 << HCLGE_MAC_EPORT_PFID_S)
-struct hclge_mac_vlan_tbl_entry {
+#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0)
+struct hclge_mac_vlan_tbl_entry_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
@@ -577,15 +588,15 @@ struct hclge_mac_vlan_tbl_entry {
};
#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
-#define HCLGE_CFG_MTA_MAC_SEL_M (0x3 << HCLGE_CFG_MTA_MAC_SEL_S)
+#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
#define HCLGE_CFG_MTA_MAC_EN_B 0x7
-struct hclge_mta_filter_mode {
+struct hclge_mta_filter_mode_cmd {
u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
u8 rsv[23];
};
#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0
-struct hclge_cfg_func_mta_filter {
+struct hclge_cfg_func_mta_filter_cmd {
u8 accept; /* Only used lowest 1 bit */
u8 function_id;
u8 rsv[22];
@@ -593,14 +604,14 @@ struct hclge_cfg_func_mta_filter {
#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0
#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0
-#define HCLGE_CFG_MTA_ITEM_IDX_M (0xfff << HCLGE_CFG_MTA_ITEM_IDX_S)
-struct hclge_cfg_func_mta_item {
- u16 item_idx; /* Only used lowest 12 bit */
+#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
+struct hclge_cfg_func_mta_item_cmd {
+ __le16 item_idx; /* Only used lowest 12 bit */
u8 accept; /* Only used lowest 1 bit */
u8 rsv[21];
};
-struct hclge_mac_vlan_add {
+struct hclge_mac_vlan_add_cmd {
__le16 flags;
__le16 mac_addr_hi16;
__le32 mac_addr_lo32;
@@ -613,7 +624,7 @@ struct hclge_mac_vlan_add {
};
#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0
-struct hclge_mac_vlan_remove {
+struct hclge_mac_vlan_remove_cmd {
__le16 flags;
__le16 mac_addr_hi16;
__le32 mac_addr_lo32;
@@ -625,21 +636,21 @@ struct hclge_mac_vlan_remove {
u8 rsv[4];
};
-struct hclge_vlan_filter_ctrl {
+struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type;
u8 vlan_fe;
u8 rsv[22];
};
-struct hclge_vlan_filter_pf_cfg {
+struct hclge_vlan_filter_pf_cfg_cmd {
u8 vlan_offset;
u8 vlan_cfg;
u8 rsv[2];
u8 vlan_offset_bitmap[20];
};
-struct hclge_vlan_filter_vf_cfg {
- u16 vlan_id;
+struct hclge_vlan_filter_vf_cfg_cmd {
+ __le16 vlan_id;
u8 resp_code;
u8 rsv;
u8 vlan_cfg;
@@ -647,14 +658,14 @@ struct hclge_vlan_filter_vf_cfg {
u8 vf_bitmap[16];
};
-struct hclge_cfg_com_tqp_queue {
+struct hclge_cfg_com_tqp_queue_cmd {
__le16 tqp_id;
__le16 stream_id;
u8 enable;
u8 rsv[19];
};
-struct hclge_cfg_tx_queue_pointer {
+struct hclge_cfg_tx_queue_pointer_cmd {
__le16 tqp_id;
__le16 tx_tail;
__le16 tx_head;
@@ -664,12 +675,12 @@ struct hclge_cfg_tx_queue_pointer {
};
#define HCLGE_TSO_MSS_MIN_S 0
-#define HCLGE_TSO_MSS_MIN_M (0x3FFF << HCLGE_TSO_MSS_MIN_S)
+#define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0)
#define HCLGE_TSO_MSS_MAX_S 16
-#define HCLGE_TSO_MSS_MAX_M (0x3FFF << HCLGE_TSO_MSS_MAX_S)
+#define HCLGE_TSO_MSS_MAX_M GENMASK(29, 16)
-struct hclge_cfg_tso_status {
+struct hclge_cfg_tso_status_cmd {
__le16 tso_mss_min;
__le16 tso_mss_max;
u8 rsv[20];
@@ -679,13 +690,20 @@ struct hclge_cfg_tso_status {
#define HCLGE_TSO_MSS_MAX 9668
#define HCLGE_TQP_RESET_B 0
-struct hclge_reset_tqp_queue {
+struct hclge_reset_tqp_queue_cmd {
__le16 tqp_id;
u8 reset_req;
u8 ready_to_reset;
u8 rsv[20];
};
+#define HCLGE_CFG_RESET_MAC_B 3
+#define HCLGE_CFG_RESET_FUNC_B 7
+struct hclge_reset_cmd {
+ u8 mac_func_reset;
+ u8 fun_reset_vfid;
+ u8 rsv[22];
+};
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
@@ -733,6 +751,7 @@ struct hclge_hw;
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_opcode_type opcode, bool is_read);
+void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
struct hclge_promisc_param *param);
@@ -743,4 +762,5 @@ enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
struct hclge_desc *desc);
void hclge_destroy_cmd_queue(struct hclge_hw *hw);
+int hclge_cmd_queue_init(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
new file mode 100644
index 000000000000..5018d6633133
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "hclge_main.h"
+#include "hclge_tm.h"
+#include "hnae3.h"
+
+#define BW_PERCENT 100
+
+static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
+ struct ieee_ets *ets)
+{
+ u8 i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ hdev->tm_info.tc_info[i].tc_sch_mode =
+ HCLGE_SCH_MODE_SP;
+ hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ hdev->tm_info.tc_info[i].tc_sch_mode =
+ HCLGE_SCH_MODE_DWRR;
+ hdev->tm_info.pg_info[0].tc_dwrr[i] =
+ ets->tc_tx_bw[i];
+ break;
+ default:
+ /* Hardware only supports SP (strict priority)
+ * or ETS (enhanced transmission selection)
+ * algorithms, if we receive some other value
+ * from dcbnl, then throw an error.
+ */
+ return -EINVAL;
+ }
+ }
+
+ return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+}
+
+static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
+ struct ieee_ets *ets)
+{
+ u32 i;
+
+ memset(ets, 0, sizeof(*ets));
+ ets->willing = 1;
+ ets->ets_cap = hdev->tc_max;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
+ ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
+
+ if (hdev->tm_info.tc_info[i].tc_sch_mode ==
+ HCLGE_SCH_MODE_SP)
+ ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
+ else
+ ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
+ }
+}
+
+/* IEEE std */
+static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_tm_info_to_ieee_ets(hdev, ets);
+
+ return 0;
+}
+
+static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
+ u8 *tc, bool *changed)
+{
+ u32 total_ets_bw = 0;
+ u8 max_tc = 0;
+ u8 i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (ets->prio_tc[i] >= hdev->tc_max ||
+ i >= hdev->tc_max)
+ return -EINVAL;
+
+ if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
+ *changed = true;
+
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ if (hdev->tm_info.tc_info[i].tc_sch_mode !=
+ HCLGE_SCH_MODE_SP)
+ *changed = true;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ if (hdev->tm_info.tc_info[i].tc_sch_mode !=
+ HCLGE_SCH_MODE_DWRR)
+ *changed = true;
+
+ total_ets_bw += ets->tc_tx_bw[i];
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (total_ets_bw != BW_PERCENT)
+ return -EINVAL;
+
+ *tc = max_tc + 1;
+ if (*tc != hdev->tm_info.num_tc)
+ *changed = true;
+
+ return 0;
+}
+
+static int hclge_map_update(struct hnae3_handle *h)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_tm_map_cfg(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_schd_mode_hw(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_rss_init_hw(hdev);
+}
+
+static int hclge_client_setup_tc(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_client *client;
+ struct hnae3_handle *handle;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ handle = &vport[i].nic;
+ client = handle->client;
+
+ if (!client || !client->ops || !client->ops->setup_tc)
+ continue;
+
+ ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ bool map_changed = false;
+ u8 num_tc = 0;
+ int ret;
+
+ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ return -EINVAL;
+
+ ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
+ if (ret)
+ return ret;
+
+ hclge_tm_schd_info_update(hdev, num_tc);
+
+ ret = hclge_ieee_ets_to_tm_info(hdev, ets);
+ if (ret)
+ return ret;
+
+ if (map_changed) {
+ ret = hclge_client_setup_tc(hdev);
+ if (ret)
+ return ret;
+ }
+
+ return hclge_tm_dwrr_cfg(hdev);
+}
+
+static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ u8 i, j, pfc_map, *prio_tc;
+
+ memset(pfc, 0, sizeof(*pfc));
+ pfc->pfc_cap = hdev->pfc_max;
+ prio_tc = hdev->tm_info.prio_tc;
+ pfc_map = hdev->tm_info.hw_pfc_map;
+
+ /* Pfc setting is based on TC */
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
+ if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
+ pfc->pfc_en |= BIT(j);
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ u8 i, j, pfc_map, *prio_tc;
+
+ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ return -EINVAL;
+
+ prio_tc = hdev->tm_info.prio_tc;
+ pfc_map = 0;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
+ if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
+ pfc_map |= BIT(i);
+ break;
+ }
+ }
+ }
+
+ if (pfc_map == hdev->tm_info.hw_pfc_map)
+ return 0;
+
+ hdev->tm_info.hw_pfc_map = pfc_map;
+
+ return hclge_pause_setup_hw(hdev);
+}
+
+/* DCBX configuration */
+static u8 hclge_getdcbx(struct hnae3_handle *h)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ return 0;
+
+ return hdev->dcbx_cap;
+}
+
+static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+
+ /* No support for LLD_MANAGED modes or CEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ (mode & DCB_CAP_DCBX_VER_CEE) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return 1;
+
+ hdev->dcbx_cap = mode;
+
+ return 0;
+}
+
+/* Set up TC for hardware offloaded mqprio in channel mode */
+static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
+ return -EINVAL;
+
+ if (tc > hdev->tc_max) {
+ dev_err(&hdev->pdev->dev,
+ "setup tc failed, tc(%u) > tc_max(%u)\n",
+ tc, hdev->tc_max);
+ return -EINVAL;
+ }
+
+ hclge_tm_schd_info_update(hdev, tc);
+
+ ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_init_hw(hdev);
+ if (ret)
+ return ret;
+
+ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+
+ if (tc > 1)
+ hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
+ else
+ hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
+
+ return 0;
+}
+
+static const struct hnae3_dcb_ops hns3_dcb_ops = {
+ .ieee_getets = hclge_ieee_getets,
+ .ieee_setets = hclge_ieee_setets,
+ .ieee_getpfc = hclge_ieee_getpfc,
+ .ieee_setpfc = hclge_ieee_setpfc,
+ .getdcbx = hclge_getdcbx,
+ .setdcbx = hclge_setdcbx,
+ .map_update = hclge_map_update,
+ .setup_tc = hclge_setup_tc,
+};
+
+void hclge_dcb_ops_set(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_knic_private_info *kinfo;
+
+ /* Hdev does not support DCB or vport is
+ * not a pf, then dcb_ops is not set.
+ */
+ if (!hnae3_dev_dcb_supported(hdev) ||
+ vport->vport_id != 0)
+ return;
+
+ kinfo = &vport->nic.kinfo;
+ kinfo->dcb_ops = &hns3_dcb_ops;
+ hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
new file mode 100644
index 000000000000..7d808ee96694
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2016~2017 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HCLGE_DCB_H__
+#define __HCLGE_DCB_H__
+
+#include "hclge_main.h"
+
+#ifdef CONFIG_HNS3_DCB
+void hclge_dcb_ops_set(struct hclge_dev *hdev);
+#else
+static inline void hclge_dcb_ops_set(struct hclge_dev *hdev) {}
+#endif
+
+#endif /* __HCLGE_DCB_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c1cdbfd83bdb..59ed806a52c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include "hclge_cmd.h"
+#include "hclge_dcb.h"
#include "hclge_main.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
@@ -30,11 +31,11 @@
#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f))
#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f))
-static int hclge_rss_init_hw(struct hclge_dev *hdev);
static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
enum hclge_mta_dmac_sel_type mta_mac_sel,
bool enable);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
+static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static struct hnae3_ae_algo ae_algo;
@@ -362,7 +363,7 @@ static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
#define HCLGE_64_BIT_RTN_DATANUM 4
u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
- u64 *desc_data;
+ __le64 *desc_data;
int i, k, n;
int ret;
@@ -376,14 +377,14 @@ static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
if (unlikely(i == 0)) {
- desc_data = (u64 *)(&desc[i].data[0]);
+ desc_data = (__le64 *)(&desc[i].data[0]);
n = HCLGE_64_BIT_RTN_DATANUM - 1;
} else {
- desc_data = (u64 *)(&desc[i]);
+ desc_data = (__le64 *)(&desc[i]);
n = HCLGE_64_BIT_RTN_DATANUM;
}
for (k = 0; k < n; k++) {
- *data++ += cpu_to_le64(*desc_data);
+ *data++ += le64_to_cpu(*desc_data);
desc_data++;
}
}
@@ -411,7 +412,7 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
struct hclge_32_bit_stats *all_32_bit_stats;
- u32 *desc_data;
+ __le32 *desc_data;
int i, k, n;
u64 *data;
int ret;
@@ -431,21 +432,27 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
hclge_reset_partial_32bit_counter(all_32_bit_stats);
for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
if (unlikely(i == 0)) {
+ __le16 *desc_data_16bit;
+
all_32_bit_stats->igu_rx_err_pkt +=
- cpu_to_le32(desc[i].data[0]);
+ le32_to_cpu(desc[i].data[0]);
+
+ desc_data_16bit = (__le16 *)&desc[i].data[1];
all_32_bit_stats->igu_rx_no_eof_pkt +=
- cpu_to_le32(desc[i].data[1] & 0xffff);
+ le16_to_cpu(*desc_data_16bit);
+
+ desc_data_16bit++;
all_32_bit_stats->igu_rx_no_sof_pkt +=
- cpu_to_le32((desc[i].data[1] >> 16) & 0xffff);
+ le16_to_cpu(*desc_data_16bit);
- desc_data = (u32 *)(&desc[i].data[2]);
+ desc_data = &desc[i].data[2];
n = HCLGE_32_BIT_RTN_DATANUM - 4;
} else {
- desc_data = (u32 *)(&desc[i]);
+ desc_data = (__le32 *)&desc[i];
n = HCLGE_32_BIT_RTN_DATANUM;
}
for (k = 0; k < n; k++) {
- *data++ += cpu_to_le32(*desc_data);
+ *data++ += le32_to_cpu(*desc_data);
desc_data++;
}
}
@@ -460,7 +467,7 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev)
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
- u64 *desc_data;
+ __le64 *desc_data;
int i, k, n;
int ret;
@@ -475,14 +482,14 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
if (unlikely(i == 0)) {
- desc_data = (u64 *)(&desc[i].data[0]);
+ desc_data = (__le64 *)(&desc[i].data[0]);
n = HCLGE_RTN_DATA_NUM - 2;
} else {
- desc_data = (u64 *)(&desc[i]);
+ desc_data = (__le64 *)(&desc[i]);
n = HCLGE_RTN_DATA_NUM;
}
for (k = 0; k < n; k++) {
- *data++ += cpu_to_le64(*desc_data);
+ *data++ += le64_to_cpu(*desc_data);
desc_data++;
}
}
@@ -508,7 +515,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
HCLGE_OPC_QUERY_RX_STATUS,
true);
- desc[0].data[0] = (tqp->index & 0x1ff);
+ desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -517,7 +524,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- cpu_to_le32(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[4]);
}
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -528,7 +535,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
HCLGE_OPC_QUERY_TX_STATUS,
true);
- desc[0].data[0] = (tqp->index & 0x1ff);
+ desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -537,7 +544,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- cpu_to_le32(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[4]);
}
return 0;
@@ -552,12 +559,12 @@ static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
}
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
}
return buff;
@@ -820,7 +827,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
- struct hclge_func_status *status)
+ struct hclge_func_status_cmd *status)
{
if (!(status->pf_state & HCLGE_PF_STATE_DONE))
return -EINVAL;
@@ -831,19 +838,18 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
else
hdev->flag &= ~HCLGE_FLAG_MAIN;
- hdev->num_req_vfs = status->vf_num / status->pf_num;
return 0;
}
static int hclge_query_function_status(struct hclge_dev *hdev)
{
- struct hclge_func_status *req;
+ struct hclge_func_status_cmd *req;
struct hclge_desc desc;
int timeout = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
- req = (struct hclge_func_status *)desc.data;
+ req = (struct hclge_func_status_cmd *)desc.data;
do {
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -868,7 +874,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
static int hclge_query_pf_resource(struct hclge_dev *hdev)
{
- struct hclge_pf_res *req;
+ struct hclge_pf_res_cmd *req;
struct hclge_desc desc;
int ret;
@@ -880,19 +886,19 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
return ret;
}
- req = (struct hclge_pf_res *)desc.data;
+ req = (struct hclge_pf_res_cmd *)desc.data;
hdev->num_tqps = __le16_to_cpu(req->tqp_num);
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (hnae3_dev_roce_supported(hdev)) {
- hdev->num_roce_msix =
+ hdev->num_roce_msi =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET;
+ hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET;
} else {
hdev->num_msi =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
@@ -938,12 +944,12 @@ static int hclge_parse_speed(int speed_cmd, int *speed)
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
- struct hclge_cfg_param *req;
+ struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
u64 mac_addr_tmp;
int i;
- req = (struct hclge_cfg_param *)desc[0].data;
+ req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */
cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
@@ -978,7 +984,7 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
- req = (struct hclge_cfg_param *)desc[1].data;
+ req = (struct hclge_cfg_param_cmd *)desc[1].data;
cfg->numa_node_map = __le32_to_cpu(req->param[0]);
}
@@ -989,20 +995,21 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
{
struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
- struct hclge_cfg_param *req;
+ struct hclge_cfg_param_cmd *req;
int i, ret;
for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
- req = (struct hclge_cfg_param *)desc[i].data;
+ u32 offset = 0;
+
+ req = (struct hclge_cfg_param_cmd *)desc[i].data;
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
true);
- hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M,
+ hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
/* Len should be united by 4 bytes when send to hardware */
- hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M,
- HCLGE_CFG_RD_LEN_S,
+ hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
- req->offset = cpu_to_le32(req->offset);
+ req->offset = cpu_to_le32(offset);
}
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
@@ -1058,7 +1065,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->hw.mac.phy_addr = cfg.phy_addr;
hdev->num_desc = cfg.tqp_desc_num;
hdev->tm_info.num_pg = 1;
- hdev->tm_info.num_tc = cfg.tc_num;
+ hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
@@ -1067,15 +1074,25 @@ static int hclge_configure(struct hclge_dev *hdev)
return ret;
}
- if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) ||
- (hdev->tm_info.num_tc < 1)) {
+ if ((hdev->tc_max > HNAE3_MAX_TC) ||
+ (hdev->tc_max < 1)) {
dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
- hdev->tm_info.num_tc);
- hdev->tm_info.num_tc = 1;
+ hdev->tc_max);
+ hdev->tc_max = 1;
+ }
+
+ /* Dev does not support DCB */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ hdev->tc_max = 1;
+ hdev->pfc_max = 0;
+ } else {
+ hdev->pfc_max = hdev->tc_max;
}
+ hdev->tm_info.num_tc = hdev->tc_max;
+
/* Currently not support uncontiuous tc */
- for (i = 0; i < cfg.tc_num; i++)
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae_set_bit(hdev->hw_tc_map, i, 1);
if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
@@ -1089,16 +1106,23 @@ static int hclge_configure(struct hclge_dev *hdev)
static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
int tso_mss_max)
{
- struct hclge_cfg_tso_status *req;
+ struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
+ u16 tso_mss;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
- req = (struct hclge_cfg_tso_status *)desc.data;
- hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M,
+ req = (struct hclge_cfg_tso_status_cmd *)desc.data;
+
+ tso_mss = 0;
+ hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
HCLGE_TSO_MSS_MIN_S, tso_mss_min);
- hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M,
+ req->tso_mss_min = cpu_to_le16(tso_mss);
+
+ tso_mss = 0;
+ hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
HCLGE_TSO_MSS_MIN_S, tso_mss_max);
+ req->tso_mss_max = cpu_to_le16(tso_mss);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -1134,15 +1158,15 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
u16 tqp_pid, u16 tqp_vid, bool is_pf)
{
- struct hclge_tqp_map *req;
+ struct hclge_tqp_map_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
- req = (struct hclge_tqp_map *)desc.data;
+ req = (struct hclge_tqp_map_cmd *)desc.data;
req->tqp_id = cpu_to_le16(tqp_pid);
- req->tqp_vf = cpu_to_le16(func_id);
+ req->tqp_vf = func_id;
req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1 << HCLGE_TQP_MAP_EN_B;
req->tqp_vid = cpu_to_le16(tqp_vid);
@@ -1161,11 +1185,7 @@ static int hclge_assign_tqp(struct hclge_vport *vport,
struct hnae3_queue **tqp, u16 num_tqps)
{
struct hclge_dev *hdev = vport->back;
- int i, alloced, func_id, ret;
- bool is_pf;
-
- func_id = vport->vport_id;
- is_pf = (vport->vport_id == 0) ? true : false;
+ int i, alloced;
for (i = 0, alloced = 0; i < hdev->num_tqps &&
alloced < num_tqps; i++) {
@@ -1174,12 +1194,6 @@ static int hclge_assign_tqp(struct hclge_vport *vport,
hdev->htqp[i].q.tqp_index = alloced;
tqp[alloced] = &hdev->htqp[i].q;
hdev->htqp[i].alloced = true;
- ret = hclge_map_tqps_to_func(hdev, func_id,
- hdev->htqp[i].index,
- alloced, is_pf);
- if (ret)
- return ret;
-
alloced++;
}
}
@@ -1231,6 +1245,49 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps)
return 0;
}
+static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hnae3_knic_private_info *kinfo;
+ u16 i;
+
+ kinfo = &nic->kinfo;
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_tqp *q =
+ container_of(kinfo->tqp[i], struct hclge_tqp, q);
+ bool is_pf;
+ int ret;
+
+ is_pf = !(vport->vport_id);
+ ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
+ i, is_pf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_map_tqp(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 i, num_vport;
+
+ num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ for (i = 0; i < num_vport; i++) {
+ int ret;
+
+ ret = hclge_map_tqp_to_vport(hdev, vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
{
/* this would be initialized later */
@@ -1324,23 +1381,27 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
return 0;
}
-static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size)
+static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
/* TX buffer size is unit by 128 byte */
#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
- struct hclge_tx_buff_alloc *req;
+ struct hclge_tx_buff_alloc_cmd *req;
struct hclge_desc desc;
int ret;
u8 i;
- req = (struct hclge_tx_buff_alloc *)desc.data;
+ req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
- for (i = 0; i < HCLGE_TC_NUM; i++)
+ for (i = 0; i < HCLGE_TC_NUM; i++) {
+ u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
+
req->tx_pkt_buff[i] =
cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
HCLGE_BUF_SIZE_UPDATE_EN_MSK);
+ }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -1352,9 +1413,10 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size)
return 0;
}
-static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, u32 buf_size)
+static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
- int ret = hclge_cmd_alloc_tx_buff(hdev, buf_size);
+ int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -1387,13 +1449,14 @@ static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
}
/* Get the number of pfc enabled TCs, which have private buffer */
-static int hclge_get_pfc_priv_num(struct hclge_dev *hdev)
+static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
int i, cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- priv = &hdev->priv_buf[i];
+ priv = &buf_alloc->priv_buf[i];
if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
priv->enable)
cnt++;
@@ -1403,13 +1466,14 @@ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev)
}
/* Get the number of pfc disabled TCs, which have private buffer */
-static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev)
+static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
int i, cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- priv = &hdev->priv_buf[i];
+ priv = &buf_alloc->priv_buf[i];
if (hdev->hw_tc_map & BIT(i) &&
!(hdev->tm_info.hw_pfc_map & BIT(i)) &&
priv->enable)
@@ -1419,21 +1483,33 @@ static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev)
return cnt;
}
-static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev)
+static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
u32 rx_priv = 0;
int i;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- priv = &hdev->priv_buf[i];
+ priv = &buf_alloc->priv_buf[i];
if (priv->enable)
rx_priv += priv->buf_size;
}
return rx_priv;
}
-static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
+static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 i, total_tx_size = 0;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
+
+ return total_tx_size;
+}
+
+static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc,
+ u32 rx_all)
{
u32 shared_buf_min, shared_buf_tc, shared_std;
int tc_num, pfc_enable_num;
@@ -1454,46 +1530,74 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
hdev->mps;
shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
- rx_priv = hclge_get_rx_priv_buff_alloced(hdev);
+ rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
if (rx_all <= rx_priv + shared_std)
return false;
shared_buf = rx_all - rx_priv;
- hdev->s_buf.buf_size = shared_buf;
- hdev->s_buf.self.high = shared_buf;
- hdev->s_buf.self.low = 2 * hdev->mps;
+ buf_alloc->s_buf.buf_size = shared_buf;
+ buf_alloc->s_buf.self.high = shared_buf;
+ buf_alloc->s_buf.self.low = 2 * hdev->mps;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
if ((hdev->hw_tc_map & BIT(i)) &&
(hdev->tm_info.hw_pfc_map & BIT(i))) {
- hdev->s_buf.tc_thrd[i].low = hdev->mps;
- hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps;
+ buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
+ buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
} else {
- hdev->s_buf.tc_thrd[i].low = 0;
- hdev->s_buf.tc_thrd[i].high = hdev->mps;
+ buf_alloc->s_buf.tc_thrd[i].low = 0;
+ buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
}
}
return true;
}
+static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 i, total_size;
+
+ total_size = hdev->pkt_buf_size;
+
+ /* alloc tx buffer for all enabled tc */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ if (total_size < HCLGE_DEFAULT_TX_BUF)
+ return -ENOMEM;
+
+ if (hdev->hw_tc_map & BIT(i))
+ priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
+ else
+ priv->tx_buf_size = 0;
+
+ total_size -= priv->tx_buf_size;
+ }
+
+ return 0;
+}
+
/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hdev: pointer to struct hclge_dev
- * @tx_size: the allocated tx buffer for all TCs
+ * @buf_alloc: pointer to buffer calculation data
* @return: 0: calculate sucessful, negative: fail
*/
-int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
+static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
- u32 rx_all = hdev->pkt_buf_size - tx_size;
+ u32 rx_all = hdev->pkt_buf_size;
int no_pfc_priv_num, pfc_priv_num;
struct hclge_priv_buf *priv;
int i;
+ rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
+
/* When DCB is not supported, rx private
* buffer is not allocated.
*/
if (!hnae3_dev_dcb_supported(hdev)) {
- if (!hclge_is_rx_buf_ok(hdev, rx_all))
+ if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
return -ENOMEM;
return 0;
@@ -1501,7 +1605,7 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
/* step 1, try to alloc private buffer for all enabled tc */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- priv = &hdev->priv_buf[i];
+ priv = &buf_alloc->priv_buf[i];
if (hdev->hw_tc_map & BIT(i)) {
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
@@ -1522,14 +1626,14 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
}
}
- if (hclge_is_rx_buf_ok(hdev, rx_all))
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
return 0;
/* step 2, try to decrease the buffer size of
* no pfc TC's private buffer
*/
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- priv = &hdev->priv_buf[i];
+ priv = &buf_alloc->priv_buf[i];
priv->enable = 0;
priv->wl.low = 0;
@@ -1552,18 +1656,18 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
}
}
- if (hclge_is_rx_buf_ok(hdev, rx_all))
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
return 0;
/* step 3, try to reduce the number of pfc disabled TCs,
* which have private buffer
*/
/* get the total no pfc enable TC number, which have private buffer */
- no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev);
+ no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
- priv = &hdev->priv_buf[i];
+ priv = &buf_alloc->priv_buf[i];
if (hdev->hw_tc_map & BIT(i) &&
!(hdev->tm_info.hw_pfc_map & BIT(i))) {
@@ -1575,22 +1679,22 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
no_pfc_priv_num--;
}
- if (hclge_is_rx_buf_ok(hdev, rx_all) ||
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
no_pfc_priv_num == 0)
break;
}
- if (hclge_is_rx_buf_ok(hdev, rx_all))
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
return 0;
/* step 4, try to reduce the number of pfc enabled TCs
* which have private buffer.
*/
- pfc_priv_num = hclge_get_pfc_priv_num(hdev);
+ pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
- priv = &hdev->priv_buf[i];
+ priv = &buf_alloc->priv_buf[i];
if (hdev->hw_tc_map & BIT(i) &&
hdev->tm_info.hw_pfc_map & BIT(i)) {
@@ -1602,38 +1706,39 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
pfc_priv_num--;
}
- if (hclge_is_rx_buf_ok(hdev, rx_all) ||
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
pfc_priv_num == 0)
break;
}
- if (hclge_is_rx_buf_ok(hdev, rx_all))
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
return 0;
return -ENOMEM;
}
-static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
+static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
- struct hclge_rx_priv_buff *req;
+ struct hclge_rx_priv_buff_cmd *req;
struct hclge_desc desc;
int ret;
int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
- req = (struct hclge_rx_priv_buff *)desc.data;
+ req = (struct hclge_rx_priv_buff_cmd *)desc.data;
/* Alloc private buffer TCs */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- struct hclge_priv_buf *priv = &hdev->priv_buf[i];
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
req->buf_num[i] =
cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
req->buf_num[i] |=
- cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
+ cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
}
req->shared_buf =
- cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
+ cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
(1 << HCLGE_TC0_PRI_BUF_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -1648,7 +1753,8 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
-static int hclge_rx_priv_wl_config(struct hclge_dev *hdev)
+static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_rx_priv_wl_buf *req;
struct hclge_priv_buf *priv;
@@ -1668,7 +1774,9 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev)
desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
- priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j];
+ u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
+
+ priv = &buf_alloc->priv_buf[idx];
req->tc_wl[j].high =
cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].high |=
@@ -1693,9 +1801,10 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev)
return 0;
}
-static int hclge_common_thrd_config(struct hclge_dev *hdev)
+static int hclge_common_thrd_config(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
- struct hclge_shared_buf *s_buf = &hdev->s_buf;
+ struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
struct hclge_rx_com_thrd *req;
struct hclge_desc desc[2];
struct hclge_tc_thrd *tc;
@@ -1739,9 +1848,10 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev)
return 0;
}
-static int hclge_common_wl_config(struct hclge_dev *hdev)
+static int hclge_common_wl_config(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
- struct hclge_shared_buf *buf = &hdev->s_buf;
+ struct hclge_shared_buf *buf = &buf_alloc->s_buf;
struct hclge_rx_com_wl *req;
struct hclge_desc desc;
int ret;
@@ -1771,63 +1881,68 @@ static int hclge_common_wl_config(struct hclge_dev *hdev)
int hclge_buffer_alloc(struct hclge_dev *hdev)
{
- u32 tx_buf_size = HCLGE_DEFAULT_TX_BUF;
+ struct hclge_pkt_buf_alloc *pkt_buf;
int ret;
- hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM,
- sizeof(struct hclge_priv_buf),
- GFP_KERNEL | __GFP_ZERO);
- if (!hdev->priv_buf)
+ pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
+ if (!pkt_buf)
return -ENOMEM;
- ret = hclge_tx_buffer_alloc(hdev, tx_buf_size);
+ ret = hclge_tx_buffer_calc(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not calc tx buffer size for all TCs %d\n", ret);
+ goto out;
+ }
+
+ ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev,
"could not alloc tx buffers %d\n", ret);
- return ret;
+ goto out;
}
- ret = hclge_rx_buffer_calc(hdev, tx_buf_size);
+ ret = hclge_rx_buffer_calc(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev,
"could not calc rx priv buffer size for all TCs %d\n",
ret);
- return ret;
+ goto out;
}
- ret = hclge_rx_priv_buf_alloc(hdev);
+ ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
ret);
- return ret;
+ goto out;
}
if (hnae3_dev_dcb_supported(hdev)) {
- ret = hclge_rx_priv_wl_config(hdev);
+ ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev,
"could not configure rx private waterline %d\n",
ret);
- return ret;
+ goto out;
}
- ret = hclge_common_thrd_config(hdev);
+ ret = hclge_common_thrd_config(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev,
"could not configure common threshold %d\n",
ret);
- return ret;
+ goto out;
}
}
- ret = hclge_common_wl_config(hdev);
- if (ret) {
+ ret = hclge_common_wl_config(hdev, pkt_buf);
+ if (ret)
dev_err(&hdev->pdev->dev,
"could not configure common waterline %d\n", ret);
- return ret;
- }
- return 0;
+out:
+ kfree(pkt_buf);
+ return ret;
}
static int hclge_init_roce_base_info(struct hclge_vport *vport)
@@ -1835,7 +1950,7 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
struct hnae3_handle *roce = &vport->roce;
struct hnae3_handle *nic = &vport->nic;
- roce->rinfo.num_vectors = vport->back->num_roce_msix;
+ roce->rinfo.num_vectors = vport->back->num_roce_msi;
if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
vport->back->num_msi_left == 0)
@@ -1853,67 +1968,47 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
return 0;
}
-static int hclge_init_msix(struct hclge_dev *hdev)
+static int hclge_init_msi(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- int ret, i;
-
- hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!hdev->msix_entries)
- return -ENOMEM;
-
- hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
- sizeof(u16), GFP_KERNEL);
- if (!hdev->vector_status)
- return -ENOMEM;
+ int vectors;
+ int i;
- for (i = 0; i < hdev->num_msi; i++) {
- hdev->msix_entries[i].entry = i;
- hdev->vector_status[i] = HCLGE_INVALID_VPORT;
+ vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
}
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
- hdev->num_msi_left = hdev->num_msi;
- hdev->base_msi_vector = hdev->pdev->irq;
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+ hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
HCLGE_ROCE_VECTOR_OFFSET;
- ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries,
- hdev->num_msi, hdev->num_msi);
- if (ret < 0) {
- dev_info(&hdev->pdev->dev,
- "MSI-X vector alloc failed: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int hclge_init_msi(struct hclge_dev *hdev)
-{
- struct pci_dev *pdev = hdev->pdev;
- int vectors;
- int i;
-
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
- if (!hdev->vector_status)
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
return -ENOMEM;
+ }
for (i = 0; i < hdev->num_msi; i++)
hdev->vector_status[i] = HCLGE_INVALID_VPORT;
- vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI);
- if (vectors < 0) {
- dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors);
- return -EINVAL;
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
}
- hdev->num_msi = vectors;
- hdev->num_msi_left = vectors;
- hdev->base_msi_vector = pdev->irq;
- hdev->roce_base_vector = hdev->base_msi_vector +
- HCLGE_ROCE_VECTOR_OFFSET;
return 0;
}
@@ -1932,11 +2027,11 @@ static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
{
- struct hclge_config_mac_speed_dup *req;
+ struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
int ret;
- req = (struct hclge_config_mac_speed_dup *)desc.data;
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
@@ -2007,12 +2102,12 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
u8 *duplex)
{
- struct hclge_query_an_speed_dup *req;
+ struct hclge_query_an_speed_dup_cmd *req;
struct hclge_desc desc;
int speed_tmp;
int ret;
- req = (struct hclge_query_an_speed_dup *)desc.data;
+ req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2040,11 +2135,11 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
static int hclge_query_autoneg_result(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- struct hclge_query_an_speed_dup *req;
+ struct hclge_query_an_speed_dup_cmd *req;
struct hclge_desc desc;
int ret;
- req = (struct hclge_query_an_speed_dup *)desc.data;
+ req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2061,14 +2156,16 @@ static int hclge_query_autoneg_result(struct hclge_dev *hdev)
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
- struct hclge_config_auto_neg *req;
+ struct hclge_config_auto_neg_cmd *req;
struct hclge_desc desc;
+ u32 flag = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
- req = (struct hclge_config_auto_neg *)desc.data;
- hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ req = (struct hclge_config_auto_neg_cmd *)desc.data;
+ hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2112,13 +2209,6 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- ret = hclge_mac_mdio_config(hdev);
- if (ret) {
- dev_warn(&hdev->pdev->dev,
- "mdio config fail ret=%d\n", ret);
- return ret;
- }
-
/* Initialize the MTA table work mode */
hdev->accept_mta_mc = true;
hdev->enable_mta = true;
@@ -2146,7 +2236,7 @@ static void hclge_task_schedule(struct hclge_dev *hdev)
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
{
- struct hclge_link_status *req;
+ struct hclge_link_status_cmd *req;
struct hclge_desc desc;
int link_status;
int ret;
@@ -2159,7 +2249,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
return ret;
}
- req = (struct hclge_link_status *)desc.data;
+ req = (struct hclge_link_status_cmd *)desc.data;
link_status = req->status & HCLGE_LINK_STATUS;
return !!link_status;
@@ -2215,18 +2305,7 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev)
/* get the speed and duplex as autoneg'result from mac cmd when phy
* doesn't exit.
*/
- if (mac.phydev)
- return 0;
-
- /* update mac->antoneg. */
- ret = hclge_query_autoneg_result(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "autoneg result query failed %d\n", ret);
- return ret;
- }
-
- if (!mac.autoneg)
+ if (mac.phydev || !mac.autoneg)
return 0;
ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
@@ -2266,11 +2345,11 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
-static void hclge_service_timer(unsigned long data)
+static void hclge_service_timer(struct timer_list *t)
{
- struct hclge_dev *hdev = (struct hclge_dev *)data;
- (void)mod_timer(&hdev->service_timer, jiffies + HZ);
+ struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
+ mod_timer(&hdev->service_timer, jiffies + HZ);
hclge_task_schedule(hdev);
}
@@ -2283,11 +2362,275 @@ static void hclge_service_complete(struct hclge_dev *hdev)
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
+static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
+{
+ writel(enable ? 1 : 0, vector->addr);
+}
+
+static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
+{
+ struct hclge_dev *hdev = data;
+
+ hclge_enable_vector(&hdev->misc_vector, false);
+ if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->service_task);
+
+ return IRQ_HANDLED;
+}
+
+static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
+{
+ hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
+ hdev->num_msi_left += 1;
+ hdev->num_msi_used -= 1;
+}
+
+static void hclge_get_misc_vector(struct hclge_dev *hdev)
+{
+ struct hclge_misc_vector *vector = &hdev->misc_vector;
+
+ vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
+
+ vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
+ hdev->vector_status[0] = 0;
+
+ hdev->num_msi_left -= 1;
+ hdev->num_msi_used += 1;
+}
+
+static int hclge_misc_irq_init(struct hclge_dev *hdev)
+{
+ int ret;
+
+ hclge_get_misc_vector(hdev);
+
+ ret = devm_request_irq(&hdev->pdev->dev,
+ hdev->misc_vector.vector_irq,
+ hclge_misc_irq_handle, 0, "hclge_misc", hdev);
+ if (ret) {
+ hclge_free_vector(hdev, 0);
+ dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
+ hdev->misc_vector.vector_irq);
+ }
+
+ return ret;
+}
+
+static int hclge_notify_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+ u16 i;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ struct hnae3_handle *handle = &hdev->vport[i].nic;
+ int ret;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_reset_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_RESET_WATI_MS 100
+#define HCLGE_RESET_WAIT_CNT 5
+ u32 val, reg, reg_bit;
+ u32 cnt = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_GLOBAL_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_GLOBAL_RESET_BIT;
+ break;
+ case HNAE3_CORE_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_CORE_RESET_BIT;
+ break;
+ case HNAE3_FUNC_RESET:
+ reg = HCLGE_FUN_RST_ING;
+ reg_bit = HCLGE_FUN_RST_ING_B;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "Wait for unsupported reset type: %d\n",
+ hdev->reset_type);
+ return -EINVAL;
+ }
+
+ val = hclge_read_dev(&hdev->hw, reg);
+ while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
+ msleep(HCLGE_RESET_WATI_MS);
+ val = hclge_read_dev(&hdev->hw, reg);
+ cnt++;
+ }
+
+ /* must clear reset status register to
+ * prevent driver detect reset interrupt again
+ */
+ reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
+
+ if (cnt >= HCLGE_RESET_WAIT_CNT) {
+ dev_warn(&hdev->pdev->dev,
+ "Wait for reset timeout: %d\n", hdev->reset_type);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
+{
+ struct hclge_desc desc;
+ struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
+ hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0);
+ hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
+ req->fun_reset_vfid = func_id;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "send function reset cmd fail, status =%d\n", ret);
+
+ return ret;
+}
+
+static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u32 val;
+
+ switch (type) {
+ case HNAE3_GLOBAL_RESET:
+ val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
+ hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
+ hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
+ dev_info(&pdev->dev, "Global Reset requested\n");
+ break;
+ case HNAE3_CORE_RESET:
+ val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
+ hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
+ hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
+ dev_info(&pdev->dev, "Core Reset requested\n");
+ break;
+ case HNAE3_FUNC_RESET:
+ dev_info(&pdev->dev, "PF Reset requested\n");
+ hclge_func_reset_cmd(hdev, 0);
+ break;
+ default:
+ dev_warn(&pdev->dev,
+ "Unsupported reset type: %d\n", type);
+ break;
+ }
+}
+
+static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+ u32 rst_reg_val;
+
+ rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
+ rst_level = HNAE3_GLOBAL_RESET;
+ else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
+ rst_level = HNAE3_CORE_RESET;
+ else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
+ rst_level = HNAE3_IMP_RESET;
+
+ return rst_level;
+}
+
+static void hclge_reset_event(struct hnae3_handle *handle,
+ enum hnae3_reset_type reset)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ dev_info(&hdev->pdev->dev,
+ "Receive reset event , reset_type is %d", reset);
+
+ switch (reset) {
+ case HNAE3_FUNC_RESET:
+ case HNAE3_CORE_RESET:
+ case HNAE3_GLOBAL_RESET:
+ if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
+ dev_err(&hdev->pdev->dev, "Already in reset state");
+ return;
+ }
+ hdev->reset_type = reset;
+ set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
+ set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ schedule_work(&hdev->service_task);
+ break;
+ default:
+ dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
+ break;
+ }
+}
+
+static void hclge_reset_subtask(struct hclge_dev *hdev)
+{
+ bool do_reset;
+
+ do_reset = hdev->reset_type != HNAE3_NONE_RESET;
+
+ /* Reset is detected by interrupt */
+ if (hdev->reset_type == HNAE3_NONE_RESET)
+ hdev->reset_type = hclge_detected_reset_event(hdev);
+
+ if (hdev->reset_type == HNAE3_NONE_RESET)
+ return;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ case HNAE3_CORE_RESET:
+ case HNAE3_GLOBAL_RESET:
+ case HNAE3_IMP_RESET:
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+ if (do_reset)
+ hclge_do_reset(hdev, hdev->reset_type);
+ else
+ set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
+
+ if (!hclge_reset_wait(hdev)) {
+ hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ hclge_reset_ae_dev(hdev->ae_dev);
+ hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
+ }
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
+ hdev->reset_type);
+ break;
+ }
+ hdev->reset_type = HNAE3_NONE_RESET;
+}
+
+static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
+{
+ hclge_reset_subtask(hdev);
+ hclge_enable_vector(&hdev->misc_vector, true);
+}
+
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task);
+ hclge_misc_irq_service_task(hdev);
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
hclge_update_stats_for_all(hdev);
@@ -2341,6 +2684,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
vport->vport_id *
HCLGE_VECTOR_VF_OFFSET;
hdev->vector_status[i] = vport->vport_id;
+ hdev->vector_irq[i] = vector->vector;
vector++;
alloc++;
@@ -2359,15 +2703,10 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
{
int i;
- for (i = 0; i < hdev->num_msi; i++) {
- if (hdev->msix_entries) {
- if (vector == hdev->msix_entries[i].vector)
- return i;
- } else {
- if (vector == (hdev->base_msi_vector + i))
- return i;
- }
- }
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
return -EINVAL;
}
@@ -2383,7 +2722,7 @@ static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
static int hclge_get_rss_algo(struct hclge_dev *hdev)
{
- struct hclge_rss_config *req;
+ struct hclge_rss_config_cmd *req;
struct hclge_desc desc;
int rss_hash_algo;
int ret;
@@ -2397,7 +2736,7 @@ static int hclge_get_rss_algo(struct hclge_dev *hdev)
return ret;
}
- req = (struct hclge_rss_config *)desc.data;
+ req = (struct hclge_rss_config_cmd *)desc.data;
rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
@@ -2409,13 +2748,13 @@ static int hclge_get_rss_algo(struct hclge_dev *hdev)
static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
- struct hclge_rss_config *req;
+ struct hclge_rss_config_cmd *req;
struct hclge_desc desc;
int key_offset;
int key_size;
int ret;
- req = (struct hclge_rss_config *)desc.data;
+ req = (struct hclge_rss_config_cmd *)desc.data;
for (key_offset = 0; key_offset < 3; key_offset++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
@@ -2446,19 +2785,20 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
{
- struct hclge_rss_indirection_table *req;
+ struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc;
int i, j;
int ret;
- req = (struct hclge_rss_indirection_table *)desc.data;
+ req = (struct hclge_rss_indirection_table_cmd *)desc.data;
for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
hclge_cmd_setup_basic_desc
(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
- req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE;
- req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK;
+ req->start_table_index =
+ cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
req->rss_result[j] =
@@ -2478,21 +2818,24 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
u16 *tc_size, u16 *tc_offset)
{
- struct hclge_rss_tc_mode *req;
+ struct hclge_rss_tc_mode_cmd *req;
struct hclge_desc desc;
int ret;
int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
- req = (struct hclge_rss_tc_mode *)desc.data;
+ req = (struct hclge_rss_tc_mode_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B,
- (tc_valid[i] & 0x1));
- hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M,
+ u16 mode = 0;
+
+ hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
+ hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M,
+ hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2507,15 +2850,13 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
{
-#define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf
-#define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f
- struct hclge_rss_input_tuple *req;
+ struct hclge_rss_input_tuple_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
- req = (struct hclge_rss_input_tuple *)desc.data;
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
@@ -2589,6 +2930,161 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
return ret;
}
+static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGE_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGE_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGE_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGE_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGE_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+static int hclge_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ u8 tuple_sets;
+ int ret;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Read rss tuple fail, status = %d\n", ret);
+ return ret;
+ }
+
+ hclge_cmd_reuse_desc(&desc, false);
+
+ tuple_sets = hclge_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set rss tuple fail, status = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ u8 tuple_sets;
+ int ret;
+
+ nfc->data = 0;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Read rss tuple fail, status = %d\n", ret);
+ return ret;
+ }
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ tuple_sets = req->ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ tuple_sets = req->ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ tuple_sets = req->ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ tuple_sets = req->ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ tuple_sets = req->ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ tuple_sets = req->ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!tuple_sets)
+ return 0;
+
+ if (tuple_sets & HCLGE_D_PORT_BIT)
+ nfc->data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGE_S_PORT_BIT)
+ nfc->data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGE_D_IP_BIT)
+ nfc->data |= RXH_IP_DST;
+ if (tuple_sets & HCLGE_S_IP_BIT)
+ nfc->data |= RXH_IP_SRC;
+
+ return 0;
+}
+
static int hclge_get_tc_size(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -2597,7 +3093,7 @@ static int hclge_get_tc_size(struct hnae3_handle *handle)
return hdev->rss_size_max;
}
-static int hclge_rss_init_hw(struct hclge_dev *hdev)
+int hclge_rss_init_hw(struct hclge_dev *hdev)
{
const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
@@ -2682,7 +3178,7 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain *req;
+ struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
int ret;
@@ -2690,20 +3186,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
- req = (struct hclge_ctrl_vector_chain *)desc.data;
+ req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
req->int_vector_id = vector_id;
i = 0;
for (node = ring_chain; node; node = node->next) {
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
- HCLGE_INT_TYPE_S,
+ u16 type_and_id = 0;
+
+ hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
- HCLGE_TQP_ID_S, node->tqp_index);
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+ hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
+ node->tqp_index);
+ hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+ req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
@@ -2739,9 +3236,9 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
return 0;
}
-int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle,
- int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_map_handle_ring_to_vector(
+ struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -2763,7 +3260,7 @@ static int hclge_unmap_ring_from_vector(
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain *req;
+ struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
int i, vector_id;
@@ -2778,21 +3275,22 @@ static int hclge_unmap_ring_from_vector(
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
- req = (struct hclge_ctrl_vector_chain *)desc.data;
+ req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
req->int_vector_id = vector_id;
i = 0;
for (node = ring_chain; node; node = node->next) {
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
- HCLGE_INT_TYPE_S,
+ u16 type_and_id = 0;
+
+ hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
- HCLGE_TQP_ID_S, node->tqp_index);
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+ hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
+ node->tqp_index);
+ hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+ req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
@@ -2830,13 +3328,13 @@ static int hclge_unmap_ring_from_vector(
int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
struct hclge_promisc_param *param)
{
- struct hclge_promisc_cfg *req;
+ struct hclge_promisc_cfg_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
- req = (struct hclge_promisc_cfg *)desc.data;
+ req = (struct hclge_promisc_cfg_cmd *)desc.data;
req->vf_id = param->vf_id;
req->flag = (param->enable << HCLGE_PROMISC_EN_B);
@@ -2878,29 +3376,27 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
{
struct hclge_desc desc;
- struct hclge_config_mac_mode *req =
- (struct hclge_config_mac_mode *)desc.data;
+ struct hclge_config_mac_mode_cmd *req =
+ (struct hclge_config_mac_mode_cmd *)desc.data;
+ u32 loop_en = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en,
- HCLGE_MAC_RX_FCS_STRIP_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en,
- HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en,
- HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en,
- HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
+ hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
+ hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+ hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
+ hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -2908,12 +3404,65 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
+static int hclge_set_loopback(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_config_mac_mode_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u32 loop_en;
+ int ret;
+
+ switch (loop_mode) {
+ case HNAE3_MAC_INTER_LOOP_MAC:
+ req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
+ /* 1 Read out the MAC mode config at first */
+ hclge_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_CONFIG_MAC_MODE,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac loopback get fail, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ /* 2 Then setup the loopback flag */
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+ if (en)
+ hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1);
+ else
+ hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+
+ /* 3 Config mac work mode with loopback flag
+ * and its original configure parameters
+ */
+ hclge_cmd_reuse_desc(&desc, false);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "mac loopback set fail, ret =%d.\n", ret);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(&hdev->pdev->dev,
+ "loop_mode %d is not supported\n", loop_mode);
+ break;
+ }
+
+ return ret;
+}
+
static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
int stream_id, bool enable)
{
struct hclge_desc desc;
- struct hclge_cfg_com_tqp_queue *req =
- (struct hclge_cfg_com_tqp_queue *)desc.data;
+ struct hclge_cfg_com_tqp_queue_cmd *req =
+ (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
@@ -2963,7 +3512,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
/* mac enable */
hclge_cfg_mac_mode(hdev, true);
clear_bit(HCLGE_STATE_DOWN, &hdev->state);
- (void)mod_timer(&hdev->service_timer, jiffies + HZ);
+ mod_timer(&hdev->service_timer, jiffies + HZ);
ret = hclge_mac_start_phy(hdev);
if (ret)
@@ -3077,16 +3626,16 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
- desc[1].data[word_num] &= ~(1 << bit_num);
+ desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
else
- desc[1].data[word_num] |= (1 << bit_num);
+ desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - 192) / 32;
bit_num = vfid % 32;
if (clr)
- desc[2].data[word_num] &= ~(1 << bit_num);
+ desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
else
- desc[2].data[word_num] |= (1 << bit_num);
+ desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
}
return 0;
@@ -3106,7 +3655,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
return true;
}
-static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req,
+static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
const u8 *addr)
{
const unsigned char *mac_addr = addr;
@@ -3118,8 +3667,8 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req,
new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
}
-u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
- const u8 *addr)
+static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
+ const u8 *addr)
{
u16 high_val = addr[1] | (addr[0] << 8);
struct hclge_dev *hdev = vport->back;
@@ -3133,11 +3682,11 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
enum hclge_mta_dmac_sel_type mta_mac_sel,
bool enable)
{
- struct hclge_mta_filter_mode *req;
+ struct hclge_mta_filter_mode_cmd *req;
struct hclge_desc desc;
int ret;
- req = (struct hclge_mta_filter_mode *)desc.data;
+ req = (struct hclge_mta_filter_mode_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
@@ -3160,11 +3709,11 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
u8 func_id,
bool enable)
{
- struct hclge_cfg_func_mta_filter *req;
+ struct hclge_cfg_func_mta_filter_cmd *req;
struct hclge_desc desc;
int ret;
- req = (struct hclge_cfg_func_mta_filter *)desc.data;
+ req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
@@ -3187,17 +3736,18 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
bool enable)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_cfg_func_mta_item *req;
+ struct hclge_cfg_func_mta_item_cmd *req;
struct hclge_desc desc;
+ u16 item_idx = 0;
int ret;
- req = (struct hclge_cfg_func_mta_item *)desc.data;
+ req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
- hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
+ hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
HCLGE_CFG_MTA_ITEM_IDX_S, idx);
- req->item_idx = cpu_to_le16(req->item_idx);
+ req->item_idx = cpu_to_le16(item_idx);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -3211,16 +3761,17 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
}
static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
- struct hclge_mac_vlan_tbl_entry *req)
+ struct hclge_mac_vlan_tbl_entry_cmd *req)
{
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
u8 resp_code;
+ u16 retval;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
- memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
+ memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -3229,19 +3780,21 @@ static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
ret);
return ret;
}
- resp_code = (desc.data[0] >> 8) & 0xff;
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
- return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code,
+ return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
HCLGE_MAC_VLAN_REMOVE);
}
static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
- struct hclge_mac_vlan_tbl_entry *req,
+ struct hclge_mac_vlan_tbl_entry_cmd *req,
struct hclge_desc *desc,
bool is_mc)
{
struct hclge_dev *hdev = vport->back;
u8 resp_code;
+ u16 retval;
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
@@ -3249,7 +3802,7 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
memcpy(desc[0].data,
req,
- sizeof(struct hclge_mac_vlan_tbl_entry));
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_MAC_VLAN_ADD,
true);
@@ -3261,7 +3814,7 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
} else {
memcpy(desc[0].data,
req,
- sizeof(struct hclge_mac_vlan_tbl_entry));
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, desc, 1);
}
if (ret) {
@@ -3270,19 +3823,21 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
ret);
return ret;
}
- resp_code = (desc[0].data[0] >> 8) & 0xff;
+ resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc[0].retval);
- return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code,
+ return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
HCLGE_MAC_VLAN_LKUP);
}
static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
- struct hclge_mac_vlan_tbl_entry *req,
+ struct hclge_mac_vlan_tbl_entry_cmd *req,
struct hclge_desc *mc_desc)
{
struct hclge_dev *hdev = vport->back;
int cfg_status;
u8 resp_code;
+ u16 retval;
int ret;
if (!mc_desc) {
@@ -3291,25 +3846,29 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
hclge_cmd_setup_basic_desc(&desc,
HCLGE_OPC_MAC_VLAN_ADD,
false);
- memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
+ memcpy(desc.data, req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- resp_code = (desc.data[0] >> 8) & 0xff;
- cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval,
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
resp_code,
HCLGE_MAC_VLAN_ADD);
} else {
- mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
+ hclge_cmd_reuse_desc(&mc_desc[0], false);
mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
+ hclge_cmd_reuse_desc(&mc_desc[1], false);
mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
+ hclge_cmd_reuse_desc(&mc_desc[2], false);
mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
memcpy(mc_desc[0].data, req,
- sizeof(struct hclge_mac_vlan_tbl_entry));
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
- resp_code = (mc_desc[0].data[0] >> 8) & 0xff;
- cfg_status = hclge_get_mac_vlan_cmd_status(vport,
- mc_desc[0].retval,
+ resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(mc_desc[0].retval);
+
+ cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
resp_code,
HCLGE_MAC_VLAN_ADD);
}
@@ -3336,8 +3895,9 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_mac_vlan_tbl_entry req;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
enum hclge_cmd_status status;
+ u16 egress_port = 0;
/* mac addr check */
if (is_zero_ether_addr(addr) ||
@@ -3357,15 +3917,15 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.egress_port,
- HCLGE_MAC_EPORT_SW_EN_B, 0);
- hnae_set_bit(req.egress_port,
- HCLGE_MAC_EPORT_TYPE_B, 0);
- hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M,
+
+ hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
+ hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
+ hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
- hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M,
+ hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
HCLGE_MAC_EPORT_PFID_S, 0);
- req.egress_port = cpu_to_le16(req.egress_port);
+
+ req.egress_port = cpu_to_le16(egress_port);
hclge_prepare_mac_addr(&req, addr);
@@ -3386,7 +3946,7 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_mac_vlan_tbl_entry req;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
enum hclge_cmd_status status;
/* mac addr check */
@@ -3420,7 +3980,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_mac_vlan_tbl_entry req;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
u16 tbl_idx;
int status;
@@ -3471,7 +4031,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_mac_vlan_tbl_entry req;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
enum hclge_cmd_status status;
struct hclge_desc desc[3];
u16 tbl_idx;
@@ -3554,13 +4114,13 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
bool filter_en)
{
- struct hclge_vlan_filter_ctrl *req;
+ struct hclge_vlan_filter_ctrl_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
- req = (struct hclge_vlan_filter_ctrl *)desc.data;
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type;
req->vlan_fe = filter_en;
@@ -3578,8 +4138,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
bool is_kill, u16 vlan, u8 qos, __be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
- struct hclge_vlan_filter_vf_cfg *req0;
- struct hclge_vlan_filter_vf_cfg *req1;
+ struct hclge_vlan_filter_vf_cfg_cmd *req0;
+ struct hclge_vlan_filter_vf_cfg_cmd *req1;
struct hclge_desc desc[2];
u8 vf_byte_val;
u8 vf_byte_off;
@@ -3595,10 +4155,10 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
vf_byte_off = vfid / 8;
vf_byte_val = 1 << (vfid % 8);
- req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data;
- req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data;
+ req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+ req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
- req0->vlan_id = vlan;
+ req0->vlan_id = cpu_to_le16(vlan);
req0->vlan_cfg = is_kill;
if (vf_byte_off < HCLGE_MAX_VF_BYTES)
@@ -3639,7 +4199,7 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_vlan_filter_pf_cfg *req;
+ struct hclge_vlan_filter_pf_cfg_cmd *req;
struct hclge_desc desc;
u8 vlan_offset_byte_val;
u8 vlan_offset_byte;
@@ -3652,7 +4212,7 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
vlan_offset_byte = (vlan_id % 160) / 8;
vlan_offset_byte_val = 1 << (vlan_id % 8);
- req = (struct hclge_vlan_filter_pf_cfg *)desc.data;
+ req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset_160;
req->vlan_cfg = is_kill;
req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
@@ -3714,7 +4274,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_config_max_frm_size *req;
+ struct hclge_config_max_frm_size_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
int ret;
@@ -3725,7 +4285,7 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
hdev->mps = new_mtu;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
- req = (struct hclge_config_max_frm_size *)desc.data;
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = cpu_to_le16(new_mtu);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3740,13 +4300,13 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
bool enable)
{
- struct hclge_reset_tqp_queue *req;
+ struct hclge_reset_tqp_queue_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
- req = (struct hclge_reset_tqp_queue *)desc.data;
+ req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
@@ -3762,13 +4322,13 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
{
- struct hclge_reset_tqp_queue *req;
+ struct hclge_reset_tqp_queue_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
- req = (struct hclge_reset_tqp_queue *)desc.data;
+ req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3981,7 +4541,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
vport->roce.client = client;
}
- if (hdev->roce_client) {
+ if (hdev->roce_client && hdev->nic_client) {
ret = hclge_init_roce_base_info(vport);
if (ret)
goto err;
@@ -4007,13 +4567,19 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
vport = &hdev->vport[i];
- if (hdev->roce_client)
+ if (hdev->roce_client) {
hdev->roce_client->ops->uninit_instance(&vport->roce,
0);
+ hdev->roce_client = NULL;
+ vport->roce.client = NULL;
+ }
if (client->type == HNAE3_CLIENT_ROCE)
return;
- if (client->ops->uninit_instance)
+ if (client->ops->uninit_instance) {
client->ops->uninit_instance(&vport->nic, 0);
+ hdev->nic_client = NULL;
+ vport->nic.client = NULL;
+ }
}
}
@@ -4056,6 +4622,8 @@ static int hclge_pci_init(struct hclge_dev *hdev)
goto err_clr_master;
}
+ hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
+
return 0;
err_clr_master:
pci_clear_master(pdev);
@@ -4072,14 +4640,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- if (hdev->flag & HCLGE_FLAG_USE_MSIX) {
- pci_disable_msix(pdev);
- devm_kfree(&pdev->dev, hdev->msix_entries);
- hdev->msix_entries = NULL;
- } else {
- pci_disable_msi(pdev);
- }
-
+ pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
@@ -4097,9 +4658,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_hclge_dev;
}
- hdev->flag |= HCLGE_FLAG_USE_MSIX;
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
+ hdev->reset_type = HNAE3_NONE_RESET;
ae_dev->priv = hdev;
ret = hclge_pci_init(hdev);
@@ -4108,7 +4669,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_pci_init;
}
- /* Command queue initialize */
+ /* Firmware command queue initialize */
+ ret = hclge_cmd_queue_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Firmware command initialize */
ret = hclge_cmd_init(hdev);
if (ret)
goto err_cmd_init;
@@ -4126,12 +4694,17 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- if (hdev->flag & HCLGE_FLAG_USE_MSIX)
- ret = hclge_init_msix(hdev);
- else
- ret = hclge_init_msi(hdev);
+ ret = hclge_init_msi(hdev);
if (ret) {
- dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret);
+ dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_misc_irq_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Misc IRQ(vector0) init error, ret = %d.\n",
+ ret);
return ret;
}
@@ -4147,6 +4720,19 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_map_tqp(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_mac_mdio_config(hdev);
+ if (ret) {
+ dev_warn(&hdev->pdev->dev,
+ "mdio config fail ret=%d\n", ret);
+ return ret;
+ }
+
ret = hclge_mac_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
@@ -4182,10 +4768,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- setup_timer(&hdev->service_timer, hclge_service_timer,
- (unsigned long)hdev);
+ hclge_dcb_ops_set(hdev);
+
+ timer_setup(&hdev->service_timer, hclge_service_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
+ /* Enable MISC vector(vector0) */
+ hclge_enable_vector(&hdev->misc_vector, true);
+
set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
set_bit(HCLGE_STATE_DOWN, &hdev->state);
@@ -4200,6 +4790,91 @@ err_hclge_dev:
return ret;
}
+static void hclge_stats_clear(struct hclge_dev *hdev)
+{
+ memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
+}
+
+static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct pci_dev *pdev = ae_dev->pdev;
+ int ret;
+
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ hclge_stats_clear(hdev);
+
+ ret = hclge_cmd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cmd queue init failed\n");
+ return ret;
+ }
+
+ ret = hclge_get_cap(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_configure(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_map_tqp(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_mac_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_tm_schd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ /* Enable MISC vector(vector0) */
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
+ return 0;
+}
+
static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
@@ -4210,7 +4885,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
if (IS_ENABLED(CONFIG_PCI_IOV))
hclge_disable_sriov(hdev);
- if (hdev->service_timer.data)
+ if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
@@ -4218,6 +4893,9 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
if (mac->phydev)
mdiobus_unregister(mac->mdio_bus);
+ /* Disable MISC vector(vector0) */
+ hclge_enable_vector(&hdev->misc_vector, false);
+ hclge_free_vector(hdev, 0);
hclge_destroy_cmd_queue(&hdev->hw);
hclge_pci_uninit(hdev);
ae_dev->priv = NULL;
@@ -4232,6 +4910,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.unmap_ring_from_vector = hclge_unmap_ring_from_vector,
.get_vector = hclge_get_vector,
.set_promisc_mode = hclge_set_promisc_mode,
+ .set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
.get_status = hclge_get_status,
@@ -4243,6 +4922,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_rss_indir_size = hclge_get_rss_indir_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
+ .set_rss_tuple = hclge_set_rss_tuple,
+ .get_rss_tuple = hclge_get_rss_tuple,
.get_tc_size = hclge_get_tc_size,
.get_mac_addr = hclge_get_mac_addr,
.set_mac_addr = hclge_set_mac_addr,
@@ -4263,6 +4944,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_mdix_mode = hclge_get_mdix_mode,
.set_vlan_filter = hclge_set_port_vlan_filter,
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+ .reset_event = hclge_reset_event,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 9fcfd9395424..7027814ea5d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -27,12 +27,13 @@
(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
#define HCLGE_VECTOR_REG_BASE 0x20000
+#define HCLGE_MISC_VECTOR_REG_BASE 0x20400
#define HCLGE_VECTOR_REG_OFFSET 0x4
#define HCLGE_VECTOR_VF_OFFSET 0x100000
#define HCLGE_RSS_IND_TBL_SIZE 512
-#define HCLGE_RSS_SET_BITMAP_MSK 0xffff
+#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
#define HCLGE_RSS_KEY_SIZE 40
#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
@@ -41,6 +42,14 @@
#define HCLGE_RSS_CFG_TBL_NUM \
(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
+#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+#define HCLGE_D_PORT_BIT BIT(0)
+#define HCLGE_S_PORT_BIT BIT(1)
+#define HCLGE_D_IP_BIT BIT(2)
+#define HCLGE_S_IP_BIT BIT(3)
+#define HCLGE_V_TAG_BIT BIT(4)
+
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
#define HCLGE_RSS_TC_SIZE_2 4
@@ -65,11 +74,24 @@
#define HCLGE_PHY_CSS_REG 17
#define HCLGE_PHY_MDIX_CTRL_S (5)
-#define HCLGE_PHY_MDIX_CTRL_M (3 << HCLGE_PHY_MDIX_CTRL_S)
+#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
#define HCLGE_PHY_MDIX_STATUS_B (6)
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
+/* Reset related Registers */
+#define HCLGE_MISC_RESET_STS_REG 0x20700
+#define HCLGE_GLOBAL_RESET_REG 0x20A00
+#define HCLGE_GLOBAL_RESET_BIT 0x0
+#define HCLGE_CORE_RESET_BIT 0x1
+#define HCLGE_FUN_RST_ING 0x20C00
+#define HCLGE_FUN_RST_ING_B 0
+
+/* Vector0 register bits define */
+#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
+#define HCLGE_VECTOR0_CORERESET_INT_B 6
+#define HCLGE_VECTOR0_IMPRESET_INT_B 7
+
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
HCLGE_STATE_DOWN,
@@ -79,6 +101,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_MBX_IRQ,
+ HCLGE_STATE_RESET_INT,
HCLGE_STATE_MAX
};
@@ -392,17 +415,16 @@ struct hclge_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
struct hclge_hw hw;
+ struct hclge_misc_vector misc_vector;
struct hclge_hw_stats hw_stats;
unsigned long state;
+ enum hnae3_reset_type reset_type;
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
u16 num_req_vfs; /* Num VFs requested for this PF */
- u16 num_roce_msix; /* Num of roce vectors for this PF */
- int roce_base_vector;
-
/* Base task tqp physical id of this PF */
u16 base_tqp_pid;
u16 alloc_rss_size; /* Allocated RSS task queue */
@@ -421,16 +443,21 @@ struct hclge_dev {
#define HCLGE_FLAG_TC_BASE_SCH_MODE 1
#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
u8 tx_sch_mode;
+ u8 tc_max;
+ u8 pfc_max;
u8 default_up;
+ u8 dcbx_cap;
struct hclge_tm_info tm_info;
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
u32 base_msi_vector;
- struct msix_entry *msix_entries;
u16 *vector_status;
+ int *vector_irq;
+ u16 num_roce_msi; /* Num of roce vectors for this PF */
+ int roce_base_vector;
u16 pending_udp_bitmap;
@@ -454,17 +481,14 @@ struct hclge_dev {
struct hnae3_client *nic_client;
struct hnae3_client *roce_client;
-#define HCLGE_FLAG_USE_MSI 0x00000001
-#define HCLGE_FLAG_USE_MSIX 0x00000002
-#define HCLGE_FLAG_MAIN 0x00000004
-#define HCLGE_FLAG_DCB_CAPABLE 0x00000008
-#define HCLGE_FLAG_DCB_ENABLE 0x00000010
+#define HCLGE_FLAG_MAIN BIT(0)
+#define HCLGE_FLAG_DCB_CAPABLE BIT(1)
+#define HCLGE_FLAG_DCB_ENABLE BIT(2)
+#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */
- struct hclge_priv_buf *priv_buf;
- struct hclge_shared_buf s_buf;
enum hclge_mta_dmac_sel_type mta_mac_sel_type;
bool enable_mta; /* Mutilcast filter enable */
@@ -517,4 +541,7 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
bool is_kill, u16 vlan, u8 qos, __be16 proto);
+
+int hclge_buffer_alloc(struct hclge_dev *hdev);
+int hclge_rss_init_hw(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index f32d719c4f77..7069e9408d7d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -14,6 +14,13 @@
#include "hclge_main.h"
#include "hclge_mdio.h"
+#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \
+ SUPPORTED_TP | \
+ SUPPORTED_Pause | \
+ PHY_10BT_FEATURES | \
+ PHY_100BT_FEATURES | \
+ PHY_1000BT_FEATURES)
+
enum hclge_mdio_c22_op_seq {
HCLGE_MDIO_C22_WRITE = 1,
HCLGE_MDIO_C22_READ = 2
@@ -195,6 +202,9 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
return ret;
}
+ phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
+ phydev->advertising = phydev->supported;
+
phy_start(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 73a75d7cc551..7bfa2e5497cb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -124,6 +124,20 @@ static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap)
+{
+ struct hclge_desc desc;
+ struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
+
+ pfc->tx_rx_en_bitmap = tx_rx_bitmap;
+ pfc->pri_en_bitmap = pfc_bitmap;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{
u8 tc;
@@ -269,6 +283,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
struct hclge_pg_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
+ u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
HCLGE_OPC_TM_PG_C_SHAPPING;
@@ -278,11 +293,41 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_id = pg_id;
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
+ hclge_tm_set_field(shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shapping_para, BS_S, bs_s);
+
+ shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_port_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para = 0;
+ u8 ir_u, ir_b, ir_s;
+ int ret;
+
+ ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
+ HCLGE_SHAPER_LVL_PORT,
+ &ir_b, &ir_u, &ir_s);
+ if (ret)
+ return ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
+ shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+
+ hclge_tm_set_field(shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
+ hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
+
+ shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -295,6 +340,7 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
struct hclge_pri_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
+ u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
HCLGE_OPC_TM_PRI_C_SHAPPING;
@@ -305,11 +351,13 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id;
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
+ hclge_tm_set_field(shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shapping_para, BS_S, bs_s);
+
+ shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -346,13 +394,13 @@ static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id)
+static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
{
struct hclge_desc desc;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
- if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
+ if (mode == HCLGE_SCH_MODE_DWRR)
desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
else
desc.data[1] = 0;
@@ -386,7 +434,6 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
u8 i;
- kinfo = &vport->nic.kinfo;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
kinfo->num_tc =
min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
@@ -444,7 +491,11 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i;
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+ /* DCB is enabled if we have more than 1 TC */
+ if (hdev->tm_info.num_tc > 1)
+ hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+ else
+ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
}
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
@@ -470,6 +521,24 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
}
}
+static void hclge_pfc_info_init(struct hclge_dev *hdev)
+{
+ if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
+ if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
+ dev_warn(&hdev->pdev->dev,
+ "DCB is disable, but last mode is FC_PFC\n");
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ /* fc_mode_last_time record the last fc_mode when
+ * DCB is enabled, so that fc_mode can be set to
+ * the correct value when DCB is disabled.
+ */
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+ hdev->tm_info.fc_mode = HCLGE_FC_PFC;
+ }
+}
+
static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
{
if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
@@ -482,8 +551,7 @@ static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
hclge_tm_vport_info_update(hdev);
- hdev->tm_info.fc_mode = HCLGE_FC_NONE;
- hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+ hclge_pfc_info_init(hdev);
return 0;
}
@@ -596,17 +664,18 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int ret;
- u32 i;
+ u32 i, k;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
/* Cfg qs -> pri mapping, one by one mapping */
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i);
- if (ret)
- return ret;
- }
+ for (k = 0; k < hdev->num_alloc_vport; k++)
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(
+ hdev, vport[k].qs_offset + i, i);
+ if (ret)
+ return ret;
+ }
} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
- int k;
/* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
for (k = 0; k < hdev->num_alloc_vport; k++)
for (i = 0; i < HNAE3_MAX_TC; i++) {
@@ -696,13 +765,11 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
- struct hnae3_tc_info *v_tc_info;
u8 ir_u, ir_b, ir_s;
u32 i;
int ret;
for (i = 0; i < kinfo->num_tc; i++) {
- v_tc_info = &kinfo->tc_info[i];
ret = hclge_shaper_para_calc(
hdev->tm_info.tc_info[i].bw_limit,
HCLGE_SHAPER_LVL_QSET,
@@ -755,10 +822,11 @@ static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
{
+ struct hclge_vport *vport = hdev->vport;
struct hclge_pg_info *pg_info;
u8 dwrr;
int ret;
- u32 i;
+ u32 i, k;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
pg_info =
@@ -769,9 +837,13 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr);
- if (ret)
- return ret;
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ ret = hclge_tm_qs_weight_cfg(
+ hdev, vport[k].qs_offset + i,
+ vport[k].dwrr);
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -835,10 +907,14 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
return 0;
}
-static int hclge_tm_map_cfg(struct hclge_dev *hdev)
+int hclge_tm_map_cfg(struct hclge_dev *hdev)
{
int ret;
+ ret = hclge_up_to_tc_map(hdev);
+ if (ret)
+ return ret;
+
ret = hclge_tm_pg_to_pri_map(hdev);
if (ret)
return ret;
@@ -850,6 +926,10 @@ static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
{
int ret;
+ ret = hclge_tm_port_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+
ret = hclge_tm_pg_shaper_cfg(hdev);
if (ret)
return ret;
@@ -898,7 +978,10 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
return ret;
for (i = 0; i < kinfo->num_tc; i++) {
- ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i);
+ u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
+
+ ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
+ sch_mode);
if (ret)
return ret;
}
@@ -910,7 +993,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int ret;
- u8 i;
+ u8 i, k;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
for (i = 0; i < hdev->tm_info.num_tc; i++) {
@@ -918,9 +1001,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
- ret = hclge_tm_qs_schd_mode_cfg(hdev, i);
- if (ret)
- return ret;
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ ret = hclge_tm_qs_schd_mode_cfg(
+ hdev, vport[k].qs_offset + i,
+ HCLGE_SCH_MODE_DWRR);
+ if (ret)
+ return ret;
+ }
}
} else {
for (i = 0; i < hdev->num_alloc_vport; i++) {
@@ -935,7 +1022,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
return 0;
}
-static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
+int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
{
int ret;
@@ -969,27 +1056,109 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
return hclge_tm_schd_mode_hw(hdev);
}
+static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
+{
+ u8 enable_bitmap = 0;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+ enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
+ HCLGE_RX_MAC_PAUSE_EN_MSK;
+
+ return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
+ hdev->tm_info.hw_pfc_map);
+}
+
+static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
+{
+ bool tx_en, rx_en;
+
+ switch (hdev->tm_info.fc_mode) {
+ case HCLGE_FC_NONE:
+ tx_en = false;
+ rx_en = false;
+ break;
+ case HCLGE_FC_RX_PAUSE:
+ tx_en = false;
+ rx_en = true;
+ break;
+ case HCLGE_FC_TX_PAUSE:
+ tx_en = true;
+ rx_en = false;
+ break;
+ case HCLGE_FC_FULL:
+ tx_en = true;
+ rx_en = true;
+ break;
+ default:
+ tx_en = true;
+ rx_en = true;
+ }
+
+ return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+}
+
int hclge_pause_setup_hw(struct hclge_dev *hdev)
{
- bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC;
int ret;
u8 i;
- ret = hclge_mac_pause_en_cfg(hdev, en, en);
- if (ret)
- return ret;
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
+ return hclge_mac_pause_setup_hw(hdev);
- /* Only DCB-supported dev supports qset back pressure setting */
+ /* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev))
return 0;
+ /* When MAC is GE Mode, hdev does not support pfc setting */
+ ret = hclge_pfc_setup_hw(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
+
for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_tm_qs_bp_cfg(hdev, i);
if (ret)
return ret;
}
- return hclge_up_to_tc_map(hdev);
+ return 0;
+}
+
+int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_knic_private_info *kinfo;
+ u32 i, k;
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ if (prio_tc[i] >= hdev->tm_info.num_tc)
+ return -EINVAL;
+ hdev->tm_info.prio_tc[i] = prio_tc[i];
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ kinfo = &vport[k].nic.kinfo;
+ kinfo->prio_tc[i] = prio_tc[i];
+ }
+ }
+ return 0;
+}
+
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+{
+ u8 i, bit_map = 0;
+
+ hdev->tm_info.num_tc = num_tc;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ bit_map |= BIT(i);
+
+ if (!bit_map) {
+ bit_map = 1;
+ hdev->tm_info.num_tc = 1;
+ }
+
+ hdev->hw_tc_map = bit_map;
+
+ hclge_tm_schd_info_init(hdev);
}
int hclge_tm_init_hw(struct hclge_dev *hdev)
@@ -1013,8 +1182,13 @@ int hclge_tm_init_hw(struct hclge_dev *hdev)
int hclge_tm_schd_init(struct hclge_dev *hdev)
{
- int ret = hclge_tm_schd_info_init(hdev);
+ int ret;
+
+ /* fc_mode is HCLGE_FC_FULL on reset */
+ hdev->tm_info.fc_mode = HCLGE_FC_FULL;
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+ ret = hclge_tm_schd_info_init(hdev);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 85158b0d73fe..bf59961918ab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -94,6 +94,15 @@ struct hclge_bp_to_qs_map_cmd {
u32 rsvd1;
};
+struct hclge_pfc_en_cmd {
+ u8 tx_rx_en_bitmap;
+ u8 pri_en_bitmap;
+};
+
+struct hclge_port_shapping_cmd {
+ __le32 port_shapping_para;
+};
+
#define hclge_tm_set_field(dest, string, val) \
hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
@@ -103,4 +112,10 @@ struct hclge_bp_to_qs_map_cmd {
int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_pause_setup_hw(struct hclge_dev *hdev);
+int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
+int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
+int hclge_tm_map_cfg(struct hclge_dev *hdev);
+int hclge_tm_init_hw(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
new file mode 100644
index 000000000000..925619a7c50a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016-2017 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+
+static
+int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->ieee_getets)
+ return h->kinfo.dcb_ops->ieee_getets(h, ets);
+
+ return -EOPNOTSUPP;
+}
+
+static
+int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->ieee_setets)
+ return h->kinfo.dcb_ops->ieee_setets(h, ets);
+
+ return -EOPNOTSUPP;
+}
+
+static
+int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->ieee_getpfc)
+ return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
+
+ return -EOPNOTSUPP;
+}
+
+static
+int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->ieee_setpfc)
+ return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
+
+ return -EOPNOTSUPP;
+}
+
+/* DCBX configuration */
+static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->getdcbx)
+ return h->kinfo.dcb_ops->getdcbx(h);
+
+ return 0;
+}
+
+/* return 0 if successful, otherwise fail */
+static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->setdcbx)
+ return h->kinfo.dcb_ops->setdcbx(h, mode);
+
+ return 1;
+}
+
+static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = {
+ .ieee_getets = hns3_dcbnl_ieee_getets,
+ .ieee_setets = hns3_dcbnl_ieee_setets,
+ .ieee_getpfc = hns3_dcbnl_ieee_getpfc,
+ .ieee_setpfc = hns3_dcbnl_ieee_setpfc,
+ .getdcbx = hns3_dcbnl_getdcbx,
+ .setdcbx = hns3_dcbnl_setdcbx,
+};
+
+/* hclge_dcbnl_setup - DCBNL setup
+ * @handle: the corresponding vport handle
+ * Set up DCBNL
+ */
+void hns3_dcbnl_setup(struct hnae3_handle *handle)
+{
+ struct net_device *dev = handle->kinfo.netdev;
+
+ if (!handle->kinfo.dcb_ops)
+ return;
+
+ dev->dcbnl_ops = &hns3_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
index 35369e1c8036..59415090ff0f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
@@ -19,12 +19,13 @@
#include <linux/sctp.h>
#include <linux/vermagic.h>
#include <net/gre.h>
+#include <net/pkt_cls.h>
#include <net/vxlan.h>
#include "hnae3.h"
#include "hns3_enet.h"
-const char hns3_driver_name[] = "hns3";
+static const char hns3_driver_name[] = "hns3";
const char hns3_driver_version[] = VERMAGIC_STRING;
static const char hns3_driver_string[] =
"Hisilicon Ethernet Network Driver for Hip08 Family";
@@ -196,6 +197,31 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
}
+static int hns3_nic_set_real_num_queue(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
+ int ret;
+
+ ret = netif_set_real_num_tx_queues(netdev, queue_size);
+ if (ret) {
+ netdev_err(netdev,
+ "netif_set_real_num_tx_queues fail, ret=%d!\n",
+ ret);
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(netdev, queue_size);
+ if (ret) {
+ netdev_err(netdev,
+ "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -233,25 +259,13 @@ out_start_err:
static int hns3_nic_net_open(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
int ret;
netif_carrier_off(netdev);
- ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps);
- if (ret) {
- netdev_err(netdev,
- "netif_set_real_num_tx_queues fail, ret=%d!\n",
- ret);
+ ret = hns3_nic_set_real_num_queue(netdev);
+ if (ret)
return ret;
- }
-
- ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps);
- if (ret) {
- netdev_err(netdev,
- "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
- return ret;
- }
ret = hns3_nic_net_up(netdev);
if (ret) {
@@ -260,6 +274,7 @@ static int hns3_nic_net_open(struct net_device *netdev)
return ret;
}
+ priv->last_reset_time = jiffies;
return 0;
}
@@ -292,24 +307,10 @@ static int hns3_nic_net_stop(struct net_device *netdev)
return 0;
}
-void hns3_set_multicast_list(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
- struct netdev_hw_addr *ha = NULL;
-
- if (h->ae_algo->ops->set_mc_addr) {
- netdev_for_each_mc_addr(ha, netdev)
- if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
- netdev_err(netdev, "set multicast fail\n");
- }
-}
-
static int hns3_nic_uc_sync(struct net_device *netdev,
const unsigned char *addr)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->add_uc_addr)
return h->ae_algo->ops->add_uc_addr(h, addr);
@@ -320,8 +321,7 @@ static int hns3_nic_uc_sync(struct net_device *netdev,
static int hns3_nic_uc_unsync(struct net_device *netdev,
const unsigned char *addr)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->rm_uc_addr)
return h->ae_algo->ops->rm_uc_addr(h, addr);
@@ -332,8 +332,7 @@ static int hns3_nic_uc_unsync(struct net_device *netdev,
static int hns3_nic_mc_sync(struct net_device *netdev,
const unsigned char *addr)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->add_mc_addr)
return h->ae_algo->ops->add_mc_addr(h, addr);
@@ -344,8 +343,7 @@ static int hns3_nic_mc_sync(struct net_device *netdev,
static int hns3_nic_mc_unsync(struct net_device *netdev,
const unsigned char *addr)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->rm_mc_addr)
return h->ae_algo->ops->rm_mc_addr(h, addr);
@@ -353,10 +351,9 @@ static int hns3_nic_mc_unsync(struct net_device *netdev,
return 0;
}
-void hns3_nic_set_rx_mode(struct net_device *netdev)
+static void hns3_nic_set_rx_mode(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->set_promisc_mode) {
if (netdev->flags & IFF_PROMISC)
@@ -721,7 +718,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
HNS3_TXD_BDTYPE_M, 0);
hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
- hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
+ hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
}
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
@@ -755,7 +752,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if (type == DESC_TYPE_SKB) {
skb = (struct sk_buff *)priv;
- paylen = cpu_to_le16(skb->len);
+ paylen = skb->len;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
@@ -789,7 +786,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len =
cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen = cpu_to_le16(paylen);
+ desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
}
@@ -905,8 +902,7 @@ static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
}
}
-static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_nic_ring_data *ring_data =
@@ -1012,8 +1008,7 @@ out_net_tx_busy:
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
struct sockaddr *mac_addr = p;
int ret;
@@ -1193,61 +1188,80 @@ static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
}
}
-static int hns3_setup_tc(struct net_device *netdev, u8 tc)
+static int hns3_setup_tc(struct net_device *netdev, void *type_data)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
+ u8 tc = mqprio_qopt->qopt.num_tc;
+ u16 mode = mqprio_qopt->mode;
+ u8 hw = mqprio_qopt->qopt.hw;
+ bool if_running;
unsigned int i;
int ret;
+ if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
+ mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
+ return -EOPNOTSUPP;
+
if (tc > HNAE3_MAX_TC)
return -EINVAL;
- if (kinfo->num_tc == tc)
- return 0;
-
if (!netdev)
return -EINVAL;
- if (!tc) {
- netdev_reset_tc(netdev);
- return 0;
+ if_running = netif_running(netdev);
+ if (if_running) {
+ hns3_nic_net_stop(netdev);
+ msleep(100);
}
- /* Set num_tc for netdev */
- ret = netdev_set_num_tc(netdev, tc);
+ ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
+ kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
if (ret)
- return ret;
+ goto out;
+
+ if (tc <= 1) {
+ netdev_reset_tc(netdev);
+ } else {
+ ret = netdev_set_num_tc(netdev, tc);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!kinfo->tc_info[i].enable)
+ continue;
- /* Set per TC queues for the VSI */
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (kinfo->tc_info[i].enable)
netdev_set_tc_queue(netdev,
kinfo->tc_info[i].tc,
kinfo->tc_info[i].tqp_count,
kinfo->tc_info[i].tqp_offset);
+ }
}
- return 0;
+ ret = hns3_nic_set_real_num_queue(netdev);
+
+out:
+ if (if_running)
+ hns3_nic_net_open(netdev);
+
+ return ret;
}
static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- struct tc_mqprio_qopt *mqprio = type_data;
-
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
- return hns3_setup_tc(dev, mqprio->num_tc);
+ return hns3_setup_tc(dev, type_data);
}
static int hns3_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
@@ -1259,8 +1273,7 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev,
static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
@@ -1272,8 +1285,7 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 vlan_proto)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vf_vlan_filter)
@@ -1285,8 +1297,7 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
bool if_running = netif_running(netdev);
int ret;
@@ -1313,10 +1324,91 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
+static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hns3_enet_ring *tx_ring = NULL;
+ int timeout_queue = 0;
+ int hw_head, hw_tail;
+ int i;
+
+ /* Find the stopped queue the same way the stack does */
+ for (i = 0; i < ndev->real_num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
+ q = netdev_get_tx_queue(ndev, i);
+ trans_start = q->trans_start;
+ if (netif_xmit_stopped(q) &&
+ time_after(jiffies,
+ (trans_start + ndev->watchdog_timeo))) {
+ timeout_queue = i;
+ break;
+ }
+ }
+
+ if (i == ndev->num_tx_queues) {
+ netdev_info(ndev,
+ "no netdev TX timeout queue found, timeout count: %llu\n",
+ priv->tx_timeout_count);
+ return false;
+ }
+
+ tx_ring = priv->ring_data[timeout_queue].ring;
+
+ hw_head = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG);
+ hw_tail = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG);
+ netdev_info(ndev,
+ "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
+ priv->tx_timeout_count,
+ timeout_queue,
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ hw_head,
+ hw_tail,
+ readl(tx_ring->tqp_vector->mask_addr));
+
+ return true;
+}
+
+static void hns3_nic_net_timeout(struct net_device *ndev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ unsigned long last_reset_time = priv->last_reset_time;
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (!hns3_get_tx_timeo_queue_info(ndev))
+ return;
+
+ priv->tx_timeout_count++;
+
+ /* This timeout is far away enough from last timeout,
+ * if timeout again,set the reset type to PF reset
+ */
+ if (time_after(jiffies, (last_reset_time + 20 * HZ)))
+ priv->reset_level = HNAE3_FUNC_RESET;
+
+ /* Don't do any new action before the next timeout */
+ else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo)))
+ return;
+
+ priv->last_reset_time = jiffies;
+
+ if (h->ae_algo->ops->reset_event)
+ h->ae_algo->ops->reset_event(h, priv->reset_level);
+
+ priv->reset_level++;
+ if (priv->reset_level > HNAE3_GLOBAL_RESET)
+ priv->reset_level = HNAE3_GLOBAL_RESET;
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
.ndo_start_xmit = hns3_nic_net_xmit,
+ .ndo_tx_timeout = hns3_nic_net_timeout,
.ndo_set_mac_address = hns3_nic_net_set_mac_address,
.ndo_change_mtu = hns3_nic_change_mtu,
.ndo_set_features = hns3_nic_set_features,
@@ -1435,8 +1527,6 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->length = hnae_page_size(ring);
cb->type = DESC_TYPE_PAGE;
- memset(cb->buf, 0, cb->length);
-
return 0;
}
@@ -1546,7 +1636,7 @@ static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
return 0;
out_with_buf:
- hns3_free_buffers(ring);
+ hns3_free_buffer(ring, cb);
out:
return ret;
}
@@ -1586,7 +1676,7 @@ out_buffer_fail:
static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
struct hns3_desc_cb *res_cb)
{
- hns3_map_buffer(ring, &ring->desc_cb[i]);
+ hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
}
@@ -1622,7 +1712,7 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
return u > c ? (h > c && h <= u) : (h > c || h <= u);
}
-int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
+bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
struct netdev_queue *dev_queue;
@@ -1633,7 +1723,7 @@ int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
rmb(); /* Make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean)
- return 0; /* no data to poll */
+ return true; /* no data to poll */
if (!is_valid_clean_head(ring, head)) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
@@ -1642,7 +1732,7 @@ int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
u64_stats_update_begin(&ring->syncp);
ring->stats.io_err_cnt++;
u64_stats_update_end(&ring->syncp);
- return -EIO;
+ return true;
}
bytes = 0;
@@ -1933,6 +2023,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
}
}
+static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
+{
+ napi_gro_receive(&ring->tqp_vector->napi, skb);
+}
+
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb, int *out_bnum)
{
@@ -2067,7 +2162,9 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return 0;
}
-static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
+int hns3_clean_rx_ring(
+ struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
@@ -2105,7 +2202,7 @@ static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
- (void)napi_gro_receive(&ring->tqp_vector->napi, skb);
+ rx_fn(ring, skb);
recv_pkts++;
}
@@ -2248,7 +2345,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
rx_budget = max(budget / tqp_vector->num_tqps, 1);
hns3_for_each_ring(ring, tqp_vector->rx_group) {
- int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
+ int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
+ hns3_rx_skb);
if (rx_cleaned >= rx_budget)
clean_complete = false;
@@ -2460,9 +2558,8 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
(void)irq_set_affinity_hint(
priv->tqp_vector[i].vector_irq,
NULL);
- devm_free_irq(&pdev->dev,
- priv->tqp_vector[i].vector_irq,
- &priv->tqp_vector[i]);
+ free_irq(priv->tqp_vector[i].vector_irq,
+ &priv->tqp_vector[i]);
}
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
@@ -2489,16 +2586,16 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
if (ring_type == HNAE3_RING_TYPE_TX) {
ring_data[q->tqp_index].ring = ring;
+ ring_data[q->tqp_index].queue_index = q->tqp_index;
ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
} else {
ring_data[q->tqp_index + queue_num].ring = ring;
+ ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
ring->io_base = q->io_base;
}
hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
- ring_data[q->tqp_index].queue_index = q->tqp_index;
-
ring->tqp = q;
ring->desc = NULL;
ring->desc_cb = NULL;
@@ -2596,7 +2693,7 @@ static void hns3_fini_ring(struct hns3_enet_ring *ring)
ring->next_to_use = 0;
}
-int hns3_buf_size2type(u32 buf_size)
+static int hns3_buf_size2type(u32 buf_size)
{
int bd_size_type;
@@ -2649,7 +2746,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
}
}
-static int hns3_init_all_ring(struct hns3_nic_priv *priv)
+int hns3_init_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
int ring_num = h->kinfo.num_tqps * 2;
@@ -2673,12 +2770,12 @@ static int hns3_init_all_ring(struct hns3_nic_priv *priv)
out_when_alloc_ring_memory:
for (j = i - 1; j >= 0; j--)
- hns3_fini_ring(priv->ring_data[i].ring);
+ hns3_fini_ring(priv->ring_data[j].ring);
return -ENOMEM;
}
-static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
+int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
@@ -2748,6 +2845,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
+ priv->last_reset_time = jiffies;
+ priv->reset_level = HNAE3_FUNC_RESET;
+ priv->tx_timeout_count = 0;
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
@@ -2790,6 +2890,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail;
}
+ hns3_dcbnl_setup(handle);
+
/* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
@@ -2846,10 +2948,224 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
}
}
-const struct hnae3_client_ops client_ops = {
+static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct net_device *ndev = kinfo->netdev;
+ bool if_running;
+ int ret;
+ u8 i;
+
+ if (tc > HNAE3_MAX_TC)
+ return -EINVAL;
+
+ if (!ndev)
+ return -ENODEV;
+
+ if_running = netif_running(ndev);
+
+ ret = netdev_set_num_tc(ndev, tc);
+ if (ret)
+ return ret;
+
+ if (if_running) {
+ (void)hns3_nic_net_stop(ndev);
+ msleep(100);
+ }
+
+ ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
+ kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
+ if (ret)
+ goto err_out;
+
+ if (tc <= 1) {
+ netdev_reset_tc(ndev);
+ goto out;
+ }
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
+
+ if (tc_info->enable)
+ netdev_set_tc_queue(ndev,
+ tc_info->tc,
+ tc_info->tqp_count,
+ tc_info->tqp_offset);
+ }
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ netdev_set_prio_tc_map(ndev, i,
+ kinfo->prio_tc[i]);
+ }
+
+out:
+ ret = hns3_nic_set_real_num_queue(ndev);
+
+err_out:
+ if (if_running)
+ (void)hns3_nic_net_open(ndev);
+
+ return ret;
+}
+
+static void hns3_recover_hw_addr(struct net_device *ndev)
+{
+ struct netdev_hw_addr_list *list;
+ struct netdev_hw_addr *ha, *tmp;
+
+ /* go through and sync uc_addr entries to the device */
+ list = &ndev->uc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+ hns3_nic_uc_sync(ndev, ha->addr);
+
+ /* go through and sync mc_addr entries to the device */
+ list = &ndev->mc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+ hns3_nic_mc_sync(ndev, ha->addr);
+}
+
+static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+static void hns3_clear_all_ring(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ struct netdev_queue *dev_queue;
+ struct hns3_enet_ring *ring;
+
+ ring = priv->ring_data[i].ring;
+ hns3_clean_tx_ring(ring, ring->desc_num);
+ dev_queue = netdev_get_tx_queue(ndev,
+ priv->ring_data[i].queue_index);
+ netdev_tx_reset_queue(dev_queue);
+
+ ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data);
+ }
+}
+
+static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct net_device *ndev = kinfo->netdev;
+
+ if (!netif_running(ndev))
+ return -EIO;
+
+ return hns3_nic_net_stop(ndev);
+}
+
+static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
+ int ret = 0;
+
+ if (netif_running(kinfo->netdev)) {
+ ret = hns3_nic_net_up(kinfo->netdev);
+ if (ret) {
+ netdev_err(kinfo->netdev,
+ "hns net up fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ priv->last_reset_time = jiffies;
+ }
+
+ return ret;
+}
+
+static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ priv->reset_level = 1;
+ hns3_init_mac_addr(netdev);
+ hns3_nic_set_rx_mode(netdev);
+ hns3_recover_hw_addr(netdev);
+
+ /* Carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ ret = hns3_get_ring_config(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_nic_init_vector_data(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_init_all_ring(priv);
+ if (ret) {
+ hns3_nic_uninit_vector_data(priv);
+ priv->ring_data = NULL;
+ }
+
+ return ret;
+}
+
+static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ hns3_clear_all_ring(handle);
+
+ ret = hns3_nic_uninit_vector_data(priv);
+ if (ret) {
+ netdev_err(netdev, "uninit vector error\n");
+ return ret;
+ }
+
+ ret = hns3_uninit_all_ring(priv);
+ if (ret)
+ netdev_err(netdev, "uninit ring error\n");
+
+ priv->ring_data = NULL;
+
+ return ret;
+}
+
+static int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case HNAE3_UP_CLIENT:
+ ret = hns3_reset_notify_up_enet(handle);
+ break;
+ case HNAE3_DOWN_CLIENT:
+ ret = hns3_reset_notify_down_enet(handle);
+ break;
+ case HNAE3_INIT_CLIENT:
+ ret = hns3_reset_notify_init_enet(handle);
+ break;
+ case HNAE3_UNINIT_CLIENT:
+ ret = hns3_reset_notify_uninit_enet(handle);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,
+ .setup_tc = hns3_client_setup_tc,
+ .reset_notify = hns3_reset_notify,
};
/* hns3_init_module - Driver registration routine
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
index 7e8746189747..8a9de759957b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
@@ -76,6 +76,8 @@ enum hns3_nic_state {
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
#define HNS3_RING_MAX_PENDING 32768
+#define HNS3_RING_MIN_PENDING 8
+#define HNS3_RING_BD_MULTIPLE 8
#define HNS3_MAX_MTU 9728
#define HNS3_BD_SIZE_512_TYPE 0
@@ -516,6 +518,8 @@ struct hns3_nic_priv {
/* The most recently read link state */
int link;
u64 tx_timeout_count;
+ enum hnae3_reset_type reset_level;
+ unsigned long last_reset_time;
unsigned long state;
@@ -587,7 +591,23 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
+#define hns3_get_handle(ndev) \
+ (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+
void hns3_ethtool_set_ops(struct net_device *netdev);
-int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
+bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
+int hns3_init_all_ring(struct hns3_nic_priv *priv);
+int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
+netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+int hns3_clean_rx_ring(
+ struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
+
+#ifdef CONFIG_HNS3_DCB
+void hns3_dcbnl_setup(struct hnae3_handle *handle);
+#else
+static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
+#endif
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
index d636399232fb..a21470c72da3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
@@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
+#include <linux/phy.h>
#include "hns3_enet.h"
@@ -59,6 +60,16 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
+#define HNS3_SELF_TEST_TPYE_NUM 1
+#define HNS3_NIC_LB_TEST_PKT_NUM 1
+#define HNS3_NIC_LB_TEST_RING_ID 0
+#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
+
+/* Nic loopback test err */
+#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
+#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
+#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
+
struct hns3_link_mode_mapping {
u32 hns3_link_mode;
u32 ethtool_link_mode;
@@ -77,6 +88,268 @@ static const struct hns3_link_mode_mapping hns3_lm_map[] = {
{HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
};
+static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ int ret;
+
+ if (!h->ae_algo->ops->set_loopback ||
+ !h->ae_algo->ops->set_promisc_mode)
+ return -EOPNOTSUPP;
+
+ switch (loop) {
+ case HNAE3_MAC_INTER_LOOP_MAC:
+ ret = h->ae_algo->ops->set_loopback(h, loop, true);
+ break;
+ case HNAE3_MAC_LOOP_NONE:
+ ret = h->ae_algo->ops->set_loopback(h,
+ HNAE3_MAC_INTER_LOOP_MAC, false);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ if (loop == HNAE3_MAC_LOOP_NONE)
+ h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC);
+ else
+ h->ae_algo->ops->set_promisc_mode(h, 1);
+
+ return ret;
+}
+
+static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ int ret;
+
+ if (!h->ae_algo->ops->start)
+ return -EOPNOTSUPP;
+
+ ret = h->ae_algo->ops->start(h);
+ if (ret) {
+ netdev_err(ndev,
+ "hns3_lb_up ae start return error: %d\n", ret);
+ return ret;
+ }
+
+ ret = hns3_lp_setup(ndev, loop_mode);
+ usleep_range(10000, 20000);
+
+ return ret;
+}
+
+static int hns3_lp_down(struct net_device *ndev)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ int ret;
+
+ if (!h->ae_algo->ops->stop)
+ return -EOPNOTSUPP;
+
+ ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE);
+ if (ret) {
+ netdev_err(ndev, "lb_setup return error: %d\n", ret);
+ return ret;
+ }
+
+ h->ae_algo->ops->stop(h);
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static void hns3_lp_setup_skb(struct sk_buff *skb)
+{
+ struct net_device *ndev = skb->dev;
+ unsigned char *packet;
+ struct ethhdr *ethh;
+ unsigned int i;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ ethh = skb_put(skb, sizeof(struct ethhdr));
+ packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+ memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
+ eth_zero_addr(ethh->h_source);
+ ethh->h_proto = htons(ETH_P_ARP);
+ skb_reset_mac_header(skb);
+
+ for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++)
+ packet[i] = (unsigned char)(i & 0xff);
+}
+
+static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
+ unsigned char *packet = skb->data;
+ u32 i;
+
+ for (i = 0; i < skb->len; i++)
+ if (packet[i] != (unsigned char)(i & 0xff))
+ break;
+
+ /* The packet is correctly received */
+ if (i == skb->len)
+ tqp_vector->rx_group.total_packets++;
+ else
+ print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, true);
+
+ dev_kfree_skb_any(skb);
+}
+
+static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_knic_private_info *kinfo;
+ u32 i, rcv_good_pkt_total = 0;
+
+ kinfo = &h->kinfo;
+ for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
+ struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+ struct hns3_enet_ring_group *rx_group;
+ u64 pre_rx_pkt;
+
+ rx_group = &ring->tqp_vector->rx_group;
+ pre_rx_pkt = rx_group->total_packets;
+
+ hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
+
+ rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt);
+ rx_group->total_packets = pre_rx_pkt;
+ }
+ return rcv_good_pkt_total;
+}
+
+static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
+ u32 end_ringid, u32 budget)
+{
+ u32 i;
+
+ for (i = start_ringid; i <= end_ringid; i++) {
+ struct hns3_enet_ring *ring = priv->ring_data[i].ring;
+
+ hns3_clean_tx_ring(ring, budget);
+ }
+}
+
+/**
+ * hns3_lp_run_test - run loopback test
+ * @ndev: net device
+ * @mode: loopback type
+ */
+static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u32 i, good_cnt;
+ int ret_val = 0;
+
+ skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN,
+ GFP_KERNEL);
+ if (!skb)
+ return HNS3_NIC_LB_TEST_NO_MEM_ERR;
+
+ skb->dev = ndev;
+ hns3_lp_setup_skb(skb);
+ skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID;
+
+ good_cnt = 0;
+ for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) {
+ netdev_tx_t tx_ret;
+
+ skb_get(skb);
+ tx_ret = hns3_nic_net_xmit(skb, ndev);
+ if (tx_ret == NETDEV_TX_OK)
+ good_cnt++;
+ else
+ netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
+ tx_ret);
+ }
+ if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
+ ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
+ netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n",
+ mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM);
+ goto out;
+ }
+
+ /* Allow 200 milliseconds for packets to go from Tx to Rx */
+ msleep(200);
+
+ good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM);
+ if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
+ ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR;
+ netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n",
+ mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM);
+ }
+
+out:
+ hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID,
+ HNS3_NIC_LB_TEST_RING_ID,
+ HNS3_NIC_LB_TEST_PKT_NUM);
+
+ kfree_skb(skb);
+ return ret_val;
+}
+
+/**
+ * hns3_nic_self_test - self test
+ * @ndev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns3_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int st_param[HNS3_SELF_TEST_TPYE_NUM][2];
+ bool if_running = netif_running(ndev);
+ int test_index = 0;
+ u32 i;
+
+ /* Only do offline selftest, or pass by default */
+ if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+ return;
+
+ st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC;
+ st_param[HNAE3_MAC_INTER_LOOP_MAC][1] =
+ h->flags & HNAE3_SUPPORT_MAC_LOOPBACK;
+
+ if (if_running)
+ dev_close(ndev);
+
+ set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+
+ for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) {
+ enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
+
+ if (!st_param[i][1])
+ continue;
+
+ data[test_index] = hns3_lp_up(ndev, loop_type);
+ if (!data[test_index]) {
+ data[test_index] = hns3_lp_run_test(ndev, loop_type);
+ hns3_lp_down(ndev);
+ }
+
+ if (data[test_index])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ test_index++;
+ }
+
+ clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+
+ if (if_running)
+ dev_open(ndev);
+}
+
static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd,
bool is_advertised)
{
@@ -86,24 +359,18 @@ static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd,
if (!(caps & hns3_lm_map[i].hns3_link_mode))
continue;
- if (is_advertised) {
- ethtool_link_ksettings_zero_link_mode(cmd,
- advertising);
+ if (is_advertised)
__set_bit(hns3_lm_map[i].ethtool_link_mode,
cmd->link_modes.advertising);
- } else {
- ethtool_link_ksettings_zero_link_mode(cmd,
- supported);
+ else
__set_bit(hns3_lm_map[i].ethtool_link_mode,
cmd->link_modes.supported);
- }
}
}
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
if (!ops->get_sset_count)
@@ -164,8 +431,7 @@ static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
char *buff = (char *)data;
@@ -217,11 +483,10 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
* @stats: statistics info.
* @data: statistics data.
*/
-void hns3_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
- u64 *data)
+static void hns3_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
u64 *p = data;
if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
@@ -262,10 +527,7 @@ static void hns3_get_drvinfo(struct net_device *netdev,
static u32 hns3_get_link(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h;
-
- h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status)
return h->ae_algo->ops->get_status(h);
@@ -277,7 +539,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *param)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
@@ -289,8 +552,7 @@ static void hns3_get_ringparam(struct net_device *netdev,
static void hns3_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam)
h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
@@ -300,32 +562,30 @@ static void hns3_get_pauseparam(struct net_device *netdev,
static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
u32 supported_caps;
u32 advertised_caps;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
u8 link_stat;
- u8 auto_neg;
- u8 duplex;
- u32 speed;
if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
/* 1.auto_neg & speed & duplex from cmd */
- if (h->ae_algo->ops->get_ksettings_an_result) {
- h->ae_algo->ops->get_ksettings_an_result(h, &auto_neg,
- &speed, &duplex);
- cmd->base.autoneg = auto_neg;
- cmd->base.speed = speed;
- cmd->base.duplex = duplex;
-
- link_stat = hns3_get_link(netdev);
- if (!link_stat) {
- cmd->base.speed = (u32)SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- }
+ if (netdev->phydev)
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
+ else if (h->ae_algo->ops->get_ksettings_an_result)
+ h->ae_algo->ops->get_ksettings_an_result(h,
+ &cmd->base.autoneg,
+ &cmd->base.speed,
+ &cmd->base.duplex);
+ else
+ return -EOPNOTSUPP;
+
+ link_stat = hns3_get_link(netdev);
+ if (!link_stat) {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
/* 2.media_type get from bios parameter block */
@@ -375,6 +635,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
break;
}
+ if (!cmd->base.autoneg)
+ advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
+
/* now, map driver link modes to ethtool link modes */
hns3_driv_to_eth_caps(supported_caps, cmd, false);
hns3_driv_to_eth_caps(advertised_caps, cmd, true);
@@ -390,10 +653,19 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
return 0;
}
+static int hns3_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ /* Only support ksettings_set for netdev with phy attached for now */
+ if (netdev->phydev)
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static u32 hns3_get_rss_key_size(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops ||
!h->ae_algo->ops->get_rss_key_size)
@@ -404,8 +676,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops ||
!h->ae_algo->ops->get_rss_indir_size)
@@ -417,8 +688,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss)
return -EOPNOTSUPP;
@@ -429,8 +699,7 @@ static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
@@ -454,16 +723,17 @@ static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_tc_size)
+ if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = h->ae_algo->ops->get_tc_size(h);
+ cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
break;
+ case ETHTOOL_GRXFH:
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
default:
return -EOPNOTSUPP;
}
@@ -471,20 +741,133 @@ static int hns3_get_rxnfc(struct net_device *netdev,
return 0;
}
+static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
+ u32 new_desc_num)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ h->kinfo.num_desc = new_desc_num;
+
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ priv->ring_data[i].ring->desc_num = new_desc_num;
+
+ return hns3_init_all_ring(priv);
+}
+
+static int hns3_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ bool if_running = netif_running(ndev);
+ u32 old_desc_num, new_desc_num;
+ int ret;
+
+ if (param->rx_mini_pending || param->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (param->tx_pending != param->rx_pending) {
+ netdev_err(ndev,
+ "Descriptors of tx and rx must be equal");
+ return -EINVAL;
+ }
+
+ if (param->tx_pending > HNS3_RING_MAX_PENDING ||
+ param->tx_pending < HNS3_RING_MIN_PENDING) {
+ netdev_err(ndev,
+ "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n",
+ param->tx_pending, HNS3_RING_MIN_PENDING,
+ HNS3_RING_MAX_PENDING);
+ return -EINVAL;
+ }
+
+ new_desc_num = param->tx_pending;
+
+ /* Hardware requires that its descriptors must be multiple of eight */
+ new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE);
+ old_desc_num = h->kinfo.num_desc;
+ if (old_desc_num == new_desc_num)
+ return 0;
+
+ netdev_info(ndev,
+ "Changing descriptor count from %d to %d.\n",
+ old_desc_num, new_desc_num);
+
+ if (if_running)
+ dev_close(ndev);
+
+ ret = hns3_uninit_all_ring(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_change_all_ring_bd_num(priv, new_desc_num);
+ if (ret) {
+ ret = hns3_change_all_ring_bd_num(priv, old_desc_num);
+ if (ret) {
+ netdev_err(ndev,
+ "Revert to old bd num fail, ret=%d.\n", ret);
+ return ret;
+ }
+ }
+
+ if (if_running)
+ ret = dev_open(ndev);
+
+ return ret;
+}
+
+static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple)
+ return -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hns3_nway_reset(struct net_device *netdev)
+{
+ struct phy_device *phy = netdev->phydev;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ /* Only support nway_reset for netdev with phy attached for now */
+ if (!phy)
+ return -EOPNOTSUPP;
+
+ if (phy->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ return genphy_restart_aneg(phy);
+}
+
static const struct ethtool_ops hns3_ethtool_ops = {
+ .self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
.get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
.get_pauseparam = hns3_get_pauseparam,
.get_strings = hns3_get_strings,
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
+ .set_rxnfc = hns3_set_rxnfc,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
.set_rxfh = hns3_set_rss,
.get_link_ksettings = hns3_get_link_ksettings,
+ .set_link_ksettings = hns3_set_link_ksettings,
+ .nway_reset = hns3_nway_reset,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 1d4f712b15a8..e2e5cdc7119c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -26,6 +26,7 @@
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>
+#include <linux/cpumask.h>
#include <asm/barrier.h>
#include "hinic_common.h"
@@ -171,11 +172,10 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq)
struct hinic_sge sge;
dma_addr_t dma_addr;
struct sk_buff *skb;
- int i, alloc_more;
u16 prod_idx;
+ int i;
free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
- alloc_more = 0;
/* Limit the allocation chunks */
if (free_wqebbs > nic_dev->rx_weight)
@@ -185,7 +185,6 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq)
skb = rx_alloc_skb(rxq, &dma_addr);
if (!skb) {
netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
- alloc_more = 1;
goto skb_out;
}
@@ -195,7 +194,6 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq)
&prod_idx);
if (!rq_wqe) {
rx_free_skb(rxq, skb, dma_addr);
- alloc_more = 1;
goto skb_out;
}
@@ -211,9 +209,7 @@ skb_out:
hinic_rq_update(rxq->rq, prod_idx);
}
- if (alloc_more)
- tasklet_schedule(&rxq->rx_task);
-
+ tasklet_schedule(&rxq->rx_task);
return i;
}
@@ -357,7 +353,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
}
if (pkts)
- tasklet_schedule(&rxq->rx_task); /* hinic_rx_alloc_pkts */
+ tasklet_schedule(&rxq->rx_task); /* rx_alloc_pkts */
u64_stats_update_begin(&rxq->rxq_stats.syncp);
rxq->rxq_stats.pkts += pkts;
@@ -417,6 +413,8 @@ static int rx_request_irq(struct hinic_rxq *rxq)
struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_rq *rq = rxq->rq;
+ struct hinic_qp *qp;
+ struct cpumask mask;
int err;
rx_add_napi(rxq);
@@ -432,7 +430,9 @@ static int rx_request_irq(struct hinic_rxq *rxq)
return err;
}
- return 0;
+ qp = container_of(rq, struct hinic_qp, rq);
+ cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask);
+ return irq_set_affinity_hint(rq->irq, &mask);
}
static void rx_free_irq(struct hinic_rxq *rxq)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index abe3e38cd342..9128858479c4 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -212,10 +212,19 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
if (!sq_wqe) {
- tx_unmap_skb(nic_dev, skb, txq->sges);
-
netif_stop_subqueue(netdev, qp->q_id);
+ /* Check for the case free_tx_poll is called in another cpu
+ * and we stopped the subqueue after free_tx_poll check.
+ */
+ sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
+ if (sq_wqe) {
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+ goto process_sq_wqe;
+ }
+
+ tx_unmap_skb(nic_dev, skb, txq->sges);
+
u64_stats_update_begin(&txq->txq_stats.syncp);
txq->txq_stats.tx_busy++;
u64_stats_update_end(&txq->txq_stats.syncp);
@@ -223,6 +232,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
goto flush_skbs;
}
+process_sq_wqe:
hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile
index 8c8dcd29c40d..422a19a5d94f 100644
--- a/drivers/net/ethernet/i825xx/Makefile
+++ b/drivers/net/ethernet/i825xx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Intel 82586/82593/82596 chipset device drivers.
#
diff --git a/drivers/net/ethernet/ibm/emac/Makefile b/drivers/net/ethernet/ibm/emac/Makefile
index 98768ba0955a..ddf1ce3c8cca 100644
--- a/drivers/net/ethernet/ibm/emac/Makefile
+++ b/drivers/net/ethernet/ibm/emac/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the PowerPC 4xx on-chip ethernet driver
#
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c66abd476023..1dc4aef37d3a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -75,6 +75,7 @@
#include <asm/firmware.h>
#include <linux/workqueue.h>
#include <linux/if_vlan.h>
+#include <linux/utsname.h>
#include "ibmvnic.h"
@@ -115,6 +116,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
+static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -553,6 +555,10 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
if (rc)
return rc;
+ rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb);
+ if (rc)
+ return rc;
+
memset(tx_pool->tx_buff, 0,
adapter->req_tx_entries_per_subcrq *
sizeof(struct ibmvnic_tx_buff));
@@ -562,11 +568,21 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
+ tx_pool->tso_index = 0;
}
return 0;
}
+static void release_vpd_data(struct ibmvnic_adapter *adapter)
+{
+ if (!adapter->vpd)
+ return;
+
+ kfree(adapter->vpd->buff);
+ kfree(adapter->vpd);
+}
+
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
@@ -581,6 +597,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
tx_pool = &adapter->tx_pool[i];
kfree(tx_pool->tx_buff);
free_long_term_buff(adapter, &tx_pool->long_term_buff);
+ free_long_term_buff(adapter, &tx_pool->tso_ltb);
kfree(tx_pool->free_map);
}
@@ -625,6 +642,16 @@ static int init_tx_pools(struct net_device *netdev)
return -1;
}
+ /* alloc TSO ltb */
+ if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb,
+ IBMVNIC_TSO_BUFS *
+ IBMVNIC_TSO_BUF_SZ)) {
+ release_tx_pools(adapter);
+ return -1;
+ }
+
+ tx_pool->tso_index = 0;
+
tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map) {
@@ -736,6 +763,8 @@ static void release_resources(struct ibmvnic_adapter *adapter)
{
int i;
+ release_vpd_data(adapter);
+
release_tx_pools(adapter);
release_rx_pools(adapter);
@@ -816,6 +845,56 @@ static int set_real_num_queues(struct net_device *netdev)
return rc;
}
+static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq crq;
+ int len = 0;
+
+ if (adapter->vpd->buff)
+ len = adapter->vpd->len;
+
+ reinit_completion(&adapter->fw_done);
+ crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
+ crq.get_vpd_size.cmd = GET_VPD_SIZE;
+ ibmvnic_send_crq(adapter, &crq);
+ wait_for_completion(&adapter->fw_done);
+
+ if (!adapter->vpd->len)
+ return -ENODATA;
+
+ if (!adapter->vpd->buff)
+ adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
+ else if (adapter->vpd->len != len)
+ adapter->vpd->buff =
+ krealloc(adapter->vpd->buff,
+ adapter->vpd->len, GFP_KERNEL);
+
+ if (!adapter->vpd->buff) {
+ dev_err(dev, "Could allocate VPD buffer\n");
+ return -ENOMEM;
+ }
+
+ adapter->vpd->dma_addr =
+ dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
+ dev_err(dev, "Could not map VPD buffer\n");
+ kfree(adapter->vpd->buff);
+ return -ENOMEM;
+ }
+
+ reinit_completion(&adapter->fw_done);
+ crq.get_vpd.first = IBMVNIC_CRQ_CMD;
+ crq.get_vpd.cmd = GET_VPD;
+ crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
+ crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
+ ibmvnic_send_crq(adapter, &crq);
+ wait_for_completion(&adapter->fw_done);
+
+ return 0;
+}
+
static int init_resources(struct ibmvnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -833,6 +912,10 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (rc)
return rc;
+ adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
+ if (!adapter->vpd)
+ return -ENOMEM;
+
adapter->map_id = 1;
adapter->napi = kcalloc(adapter->req_rx_queues,
sizeof(struct napi_struct), GFP_KERNEL);
@@ -906,10 +989,15 @@ static int __ibmvnic_open(struct net_device *netdev)
static int ibmvnic_open(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int rc;
+ int rc, vpd;
mutex_lock(&adapter->reset_lock);
+ if (adapter->mac_change_pending) {
+ __ibmvnic_set_mac(netdev, &adapter->desired.mac);
+ adapter->mac_change_pending = false;
+ }
+
if (adapter->state != VNIC_CLOSED) {
rc = ibmvnic_login(netdev);
if (rc) {
@@ -927,6 +1015,13 @@ static int ibmvnic_open(struct net_device *netdev)
}
rc = __ibmvnic_open(netdev);
+ netif_carrier_on(netdev);
+
+ /* Vital Product Data (VPD) */
+ vpd = ibmvnic_get_vpd(adapter);
+ if (vpd)
+ netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
+
mutex_unlock(&adapter->reset_lock);
return rc;
@@ -1200,11 +1295,41 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
index = tx_pool->free_map[tx_pool->consumer_index];
- offset = index * adapter->req_mtu;
- dst = tx_pool->long_term_buff.buff + offset;
- memset(dst, 0, adapter->req_mtu);
- skb_copy_from_linear_data(skb, dst, skb->len);
- data_dma_addr = tx_pool->long_term_buff.addr + offset;
+
+ if (skb_is_gso(skb)) {
+ offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ;
+ dst = tx_pool->tso_ltb.buff + offset;
+ memset(dst, 0, IBMVNIC_TSO_BUF_SZ);
+ data_dma_addr = tx_pool->tso_ltb.addr + offset;
+ tx_pool->tso_index++;
+ if (tx_pool->tso_index == IBMVNIC_TSO_BUFS)
+ tx_pool->tso_index = 0;
+ } else {
+ offset = index * adapter->req_mtu;
+ dst = tx_pool->long_term_buff.buff + offset;
+ memset(dst, 0, adapter->req_mtu);
+ data_dma_addr = tx_pool->long_term_buff.addr + offset;
+ }
+
+ if (skb_shinfo(skb)->nr_frags) {
+ int cur, i;
+
+ /* Copy the head */
+ skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
+ cur = skb_headlen(skb);
+
+ /* Copy the frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ memcpy(dst + cur,
+ page_address(skb_frag_page(frag)) +
+ frag->page_offset, skb_frag_size(frag));
+ cur += skb_frag_size(frag);
+ }
+ } else {
+ skb_copy_from_linear_data(skb, dst, skb->len);
+ }
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) %
@@ -1225,7 +1350,10 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.n_sge = 1;
tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
tx_crq.v1.correlator = cpu_to_be32(index);
- tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
+ if (skb_is_gso(skb))
+ tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id);
+ else
+ tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
@@ -1250,6 +1378,11 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
hdrs += 2;
}
+ if (skb_is_gso(skb)) {
+ tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
+ tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ hdrs += 2;
+ }
/* determine if l2/3/4 headers are sent to firmware */
if ((*hdrs >> 7) & 1 &&
(skb->protocol == htons(ETH_P_IP) ||
@@ -1371,7 +1504,7 @@ static void ibmvnic_set_multi(struct net_device *netdev)
}
}
-static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
@@ -1389,6 +1522,22 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (adapter->state != VNIC_OPEN) {
+ memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
+ adapter->mac_change_pending = true;
+ return 0;
+ }
+
+ __ibmvnic_set_mac(netdev, addr);
+
+ return 0;
+}
+
/**
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
@@ -1415,6 +1564,13 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (rc)
return rc;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
+ adapter->wait_for_reset) {
+ release_resources(adapter);
+ release_sub_crqs(adapter);
+ release_crq_queue(adapter);
+ }
+
if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
/* remove the closed state so when we call open it appears
* we are coming from the probed state.
@@ -1423,7 +1579,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = ibmvnic_init(adapter);
if (rc)
- return 0;
+ return IBMVNIC_INIT_FAILED;
/* If the adapter was in PROBE state prior to the reset,
* exit here.
@@ -1437,16 +1593,23 @@ static int do_reset(struct ibmvnic_adapter *adapter,
return 0;
}
- rc = reset_tx_pools(adapter);
- if (rc)
- return rc;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
+ adapter->wait_for_reset) {
+ rc = init_resources(adapter);
+ if (rc)
+ return rc;
+ } else {
+ rc = reset_tx_pools(adapter);
+ if (rc)
+ return rc;
- rc = reset_rx_pools(adapter);
- if (rc)
- return rc;
+ rc = reset_rx_pools(adapter);
+ if (rc)
+ return rc;
- if (reset_state == VNIC_CLOSED)
- return 0;
+ if (reset_state == VNIC_CLOSED)
+ return 0;
+ }
}
rc = __ibmvnic_open(netdev);
@@ -1506,7 +1669,7 @@ static void __ibmvnic_reset(struct work_struct *work)
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
u32 reset_state;
- int rc;
+ int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
netdev = adapter->netdev;
@@ -1519,12 +1682,18 @@ static void __ibmvnic_reset(struct work_struct *work)
while (rwi) {
rc = do_reset(adapter, rwi, reset_state);
kfree(rwi);
- if (rc)
+ if (rc && rc != IBMVNIC_INIT_FAILED)
break;
rwi = get_next_rwi(adapter);
}
+ if (adapter->wait_for_reset) {
+ adapter->wait_for_reset = false;
+ adapter->reset_done_rc = rc;
+ complete(&adapter->reset_done);
+ }
+
if (rc) {
netdev_dbg(adapter->netdev, "Reset failed\n");
free_all_rwi(adapter);
@@ -1704,9 +1873,42 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
}
#endif
+static int wait_for_reset(struct ibmvnic_adapter *adapter)
+{
+ adapter->fallback.mtu = adapter->req_mtu;
+ adapter->fallback.rx_queues = adapter->req_rx_queues;
+ adapter->fallback.tx_queues = adapter->req_tx_queues;
+ adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
+ adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
+
+ init_completion(&adapter->reset_done);
+ ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ adapter->wait_for_reset = true;
+ wait_for_completion(&adapter->reset_done);
+
+ if (adapter->reset_done_rc) {
+ adapter->desired.mtu = adapter->fallback.mtu;
+ adapter->desired.rx_queues = adapter->fallback.rx_queues;
+ adapter->desired.tx_queues = adapter->fallback.tx_queues;
+ adapter->desired.rx_entries = adapter->fallback.rx_entries;
+ adapter->desired.tx_entries = adapter->fallback.tx_entries;
+
+ init_completion(&adapter->reset_done);
+ ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
+ wait_for_completion(&adapter->reset_done);
+ }
+ adapter->wait_for_reset = false;
+
+ return adapter->reset_done_rc;
+}
+
static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
{
- return -EOPNOTSUPP;
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->desired.mtu = new_mtu + ETH_HLEN;
+
+ return wait_for_reset(adapter);
}
static const struct net_device_ops ibmvnic_netdev_ops = {
@@ -1748,11 +1950,15 @@ static int ibmvnic_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static void ibmvnic_get_drvinfo(struct net_device *dev,
+static void ibmvnic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, adapter->fw_version,
+ sizeof(info->fw_version));
}
static u32 ibmvnic_get_msglevel(struct net_device *netdev)
@@ -1794,6 +2000,27 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
ring->rx_jumbo_pending = 0;
}
+static int ibmvnic_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
+ ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
+ netdev_err(netdev, "Invalid request.\n");
+ netdev_err(netdev, "Max tx buffers = %llu\n",
+ adapter->max_rx_add_entries_per_subcrq);
+ netdev_err(netdev, "Max rx buffers = %llu\n",
+ adapter->max_tx_entries_per_subcrq);
+ return -EINVAL;
+ }
+
+ adapter->desired.rx_entries = ring->rx_pending;
+ adapter->desired.tx_entries = ring->tx_pending;
+
+ return wait_for_reset(adapter);
+}
+
static void ibmvnic_get_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
@@ -1809,6 +2036,17 @@ static void ibmvnic_get_channels(struct net_device *netdev,
channels->combined_count = 0;
}
+static int ibmvnic_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->desired.rx_queues = channels->rx_count;
+ adapter->desired.tx_queues = channels->tx_count;
+
+ return wait_for_reset(adapter);
+}
+
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct ibmvnic_adapter *adapter = netdev_priv(dev);
@@ -1905,7 +2143,9 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.set_msglevel = ibmvnic_set_msglevel,
.get_link = ibmvnic_get_link,
.get_ringparam = ibmvnic_get_ringparam,
+ .set_ringparam = ibmvnic_set_ringparam,
.get_channels = ibmvnic_get_channels,
+ .set_channels = ibmvnic_set_channels,
.get_strings = ibmvnic_get_strings,
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
@@ -2371,6 +2611,7 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
+ int max_entries;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
@@ -2382,21 +2623,60 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
return;
}
- /* Get the minimum between the queried max and the entries
- * that fit in our PAGE_SIZE
- */
- adapter->req_tx_entries_per_subcrq =
- adapter->max_tx_entries_per_subcrq > entries_page ?
- entries_page : adapter->max_tx_entries_per_subcrq;
- adapter->req_rx_add_entries_per_subcrq =
- adapter->max_rx_add_entries_per_subcrq > entries_page ?
- entries_page : adapter->max_rx_add_entries_per_subcrq;
-
- adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
- adapter->req_rx_queues = adapter->opt_rx_comp_queues;
- adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+ if (adapter->desired.mtu)
+ adapter->req_mtu = adapter->desired.mtu;
+ else
+ adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
+
+ if (!adapter->desired.tx_entries)
+ adapter->desired.tx_entries =
+ adapter->max_tx_entries_per_subcrq;
+ if (!adapter->desired.rx_entries)
+ adapter->desired.rx_entries =
+ adapter->max_rx_add_entries_per_subcrq;
+
+ max_entries = IBMVNIC_MAX_LTB_SIZE /
+ (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
- adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
+ if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
+ adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.tx_entries = max_entries;
+ }
+
+ if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
+ adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.rx_entries = max_entries;
+ }
+
+ if (adapter->desired.tx_entries)
+ adapter->req_tx_entries_per_subcrq =
+ adapter->desired.tx_entries;
+ else
+ adapter->req_tx_entries_per_subcrq =
+ adapter->max_tx_entries_per_subcrq;
+
+ if (adapter->desired.rx_entries)
+ adapter->req_rx_add_entries_per_subcrq =
+ adapter->desired.rx_entries;
+ else
+ adapter->req_rx_add_entries_per_subcrq =
+ adapter->max_rx_add_entries_per_subcrq;
+
+ if (adapter->desired.tx_queues)
+ adapter->req_tx_queues =
+ adapter->desired.tx_queues;
+ else
+ adapter->req_tx_queues =
+ adapter->opt_tx_comp_sub_queues;
+
+ if (adapter->desired.rx_queues)
+ adapter->req_rx_queues =
+ adapter->desired.rx_queues;
+ else
+ adapter->req_rx_queues =
+ adapter->opt_rx_comp_queues;
+
+ adapter->req_rx_add_queues = adapter->max_rx_add_queues;
}
memset(&crq, 0, sizeof(crq));
@@ -2609,6 +2889,55 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter)
return ibmvnic_send_crq(adapter, &crq);
}
+struct vnic_login_client_data {
+ u8 type;
+ __be16 len;
+ char name;
+} __packed;
+
+static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
+{
+ int len;
+
+ /* Calculate the amount of buffer space needed for the
+ * vnic client data in the login buffer. There are four entries,
+ * OS name, LPAR name, device name, and a null last entry.
+ */
+ len = 4 * sizeof(struct vnic_login_client_data);
+ len += 6; /* "Linux" plus NULL */
+ len += strlen(utsname()->nodename) + 1;
+ len += strlen(adapter->netdev->name) + 1;
+
+ return len;
+}
+
+static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
+ struct vnic_login_client_data *vlcd)
+{
+ const char *os_name = "Linux";
+ int len;
+
+ /* Type 1 - LPAR OS */
+ vlcd->type = 1;
+ len = strlen(os_name) + 1;
+ vlcd->len = cpu_to_be16(len);
+ strncpy(&vlcd->name, os_name, len);
+ vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
+
+ /* Type 2 - LPAR name */
+ vlcd->type = 2;
+ len = strlen(utsname()->nodename) + 1;
+ vlcd->len = cpu_to_be16(len);
+ strncpy(&vlcd->name, utsname()->nodename, len);
+ vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
+
+ /* Type 3 - device name */
+ vlcd->type = 3;
+ len = strlen(adapter->netdev->name) + 1;
+ vlcd->len = cpu_to_be16(len);
+ strncpy(&vlcd->name, adapter->netdev->name, len);
+}
+
static void send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
@@ -2621,13 +2950,18 @@ static void send_login(struct ibmvnic_adapter *adapter)
size_t buffer_size;
__be64 *tx_list_p;
__be64 *rx_list_p;
+ int client_data_len;
+ struct vnic_login_client_data *vlcd;
int i;
+ client_data_len = vnic_client_data_len(adapter);
+
buffer_size =
sizeof(struct ibmvnic_login_buffer) +
- sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
+ sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
+ client_data_len;
- login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
+ login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
if (!login_buffer)
goto buf_alloc_failed;
@@ -2694,6 +3028,15 @@ static void send_login(struct ibmvnic_adapter *adapter)
}
}
+ /* Insert vNIC login client data */
+ vlcd = (struct vnic_login_client_data *)
+ ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
+ login_buffer->client_data_offset =
+ cpu_to_be32((char *)vlcd - (char *)login_buffer);
+ login_buffer->client_data_len = cpu_to_be32(client_data_len);
+
+ vnic_add_client_data(adapter, vlcd);
+
netdev_dbg(adapter->netdev, "Login Buffer:\n");
for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
@@ -2872,6 +3215,73 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter)
ibmvnic_send_crq(adapter, &crq);
}
+static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+
+ if (crq->get_vpd_size_rsp.rc.code) {
+ dev_err(dev, "Error retrieving VPD size, rc=%x\n",
+ crq->get_vpd_size_rsp.rc.code);
+ complete(&adapter->fw_done);
+ return;
+ }
+
+ adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
+ complete(&adapter->fw_done);
+}
+
+static void handle_vpd_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ unsigned char *substr = NULL, *ptr = NULL;
+ u8 fw_level_len = 0;
+
+ memset(adapter->fw_version, 0, 32);
+
+ dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
+ DMA_FROM_DEVICE);
+
+ if (crq->get_vpd_rsp.rc.code) {
+ dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
+ crq->get_vpd_rsp.rc.code);
+ goto complete;
+ }
+
+ /* get the position of the firmware version info
+ * located after the ASCII 'RM' substring in the buffer
+ */
+ substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
+ if (!substr) {
+ dev_info(dev, "No FW level provided by VPD\n");
+ goto complete;
+ }
+
+ /* get length of firmware level ASCII substring */
+ if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
+ fw_level_len = *(substr + 2);
+ } else {
+ dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
+ goto complete;
+ }
+
+ /* copy firmware version string from vpd into adapter */
+ if ((substr + 3 + fw_level_len) <
+ (adapter->vpd->buff + adapter->vpd->len)) {
+ ptr = strncpy((char *)adapter->fw_version,
+ substr + 3, fw_level_len);
+
+ if (!ptr)
+ dev_err(dev, "Failed to isolate FW level string\n");
+ } else {
+ dev_info(dev, "FW substr extrapolated VPD buff\n");
+ }
+
+complete:
+ complete(&adapter->fw_done);
+}
+
static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
@@ -2940,14 +3350,14 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
+ adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
+ adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
- /* large_tx/rx disabled for now, additional features needed */
- adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
- adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
+ /* large_rx disabled for now, additional features needed */
adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
- adapter->netdev->features = NETIF_F_GSO;
+ adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
adapter->netdev->features |= NETIF_F_IP_CSUM;
@@ -2959,6 +3369,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
adapter->netdev->features |= NETIF_F_RXCSUM;
+ if (buf->large_tx_ipv4)
+ adapter->netdev->features |= NETIF_F_TSO;
+ if (buf->large_tx_ipv6)
+ adapter->netdev->features |= NETIF_F_TSO6;
+
+ adapter->netdev->hw_features |= adapter->netdev->features;
+
memset(&crq, 0, sizeof(crq));
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
@@ -3210,6 +3627,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
+ struct net_device *netdev = adapter->netdev;
struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
struct ibmvnic_login_buffer *login = adapter->login_buf;
int i;
@@ -3229,6 +3647,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
+
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
@@ -3593,6 +4013,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
netdev_dbg(netdev, "Got Collect firmware trace Response\n");
complete(&adapter->fw_done);
break;
+ case GET_VPD_SIZE_RSP:
+ handle_vpd_size_rsp(crq, adapter);
+ break;
+ case GET_VPD_RSP:
+ handle_vpd_rsp(crq, adapter);
+ break;
default:
netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
gen_crq->cmd);
@@ -3784,7 +4210,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
unsigned long timeout = msecs_to_jiffies(30000);
int rc;
- if (adapter->resetting) {
+ if (adapter->resetting && !adapter->wait_for_reset) {
rc = ibmvnic_reset_crq(adapter);
if (!rc)
rc = vio_enable_interrupts(adapter->vdev);
@@ -3818,7 +4244,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
return -1;
}
- if (adapter->resetting)
+ if (adapter->resetting && !adapter->wait_for_reset)
rc = reset_sub_crq_queues(adapter);
else
rc = init_sub_crqs(adapter);
@@ -3887,6 +4313,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
mutex_init(&adapter->rwi_lock);
adapter->resetting = false;
+ adapter->mac_change_pending = false;
+
do {
rc = ibmvnic_init(adapter);
if (rc && rc != EAGAIN)
@@ -3894,11 +4322,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
} while (rc == EAGAIN);
netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
+ netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
rc = device_create_file(&dev->dev, &dev_attr_failover);
if (rc)
goto ibmvnic_init_fail;
+ netif_carrier_off(netdev);
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
@@ -3907,6 +4338,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
dev_info(&dev->dev, "ibmvnic registered\n");
adapter->state = VNIC_PROBED;
+
+ adapter->wait_for_reset = false;
+
return 0;
ibmvnic_register_fail:
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index d02257ccc377..4487f1e2c266 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -30,6 +30,8 @@
#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
#define IBMVNIC_STATS_TIMEOUT 1
+#define IBMVNIC_INIT_FAILED 2
+
/* basic structures plus 100 2k buffers */
#define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305
@@ -39,6 +41,12 @@
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_TX_QUEUES 5
+#define IBMVNIC_TSO_BUF_SZ 65536
+#define IBMVNIC_TSO_BUFS 64
+
+#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
+#define IBMVNIC_BUFFER_HLEN 500
+
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
@@ -49,6 +57,8 @@ struct ibmvnic_login_buffer {
__be32 off_rxcomp_subcrqs;
__be32 login_rsp_ioba;
__be32 login_rsp_len;
+ __be32 client_data_offset;
+ __be32 client_data_len;
} __packed __aligned(8);
struct ibmvnic_login_rsp_buffer {
@@ -550,6 +560,12 @@ struct ibmvnic_multicast_ctrl {
struct ibmvnic_rc rc;
} __packed __aligned(8);
+struct ibmvnic_get_vpd_size {
+ u8 first;
+ u8 cmd;
+ u8 reserved[14];
+} __packed __aligned(8);
+
struct ibmvnic_get_vpd_size_rsp {
u8 first;
u8 cmd;
@@ -567,6 +583,13 @@ struct ibmvnic_get_vpd {
u8 reserved[4];
} __packed __aligned(8);
+struct ibmvnic_get_vpd_rsp {
+ u8 first;
+ u8 cmd;
+ u8 reserved[10];
+ struct ibmvnic_rc rc;
+} __packed __aligned(8);
+
struct ibmvnic_acl_change_indication {
u8 first;
u8 cmd;
@@ -692,10 +715,10 @@ union ibmvnic_crq {
struct ibmvnic_change_mac_addr change_mac_addr_rsp;
struct ibmvnic_multicast_ctrl multicast_ctrl;
struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
- struct ibmvnic_generic_crq get_vpd_size;
+ struct ibmvnic_get_vpd_size get_vpd_size;
struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
struct ibmvnic_get_vpd get_vpd;
- struct ibmvnic_generic_crq get_vpd_rsp;
+ struct ibmvnic_get_vpd_rsp get_vpd_rsp;
struct ibmvnic_acl_change_indication acl_change_indication;
struct ibmvnic_acl_query acl_query;
struct ibmvnic_generic_crq acl_query_rsp;
@@ -896,6 +919,8 @@ struct ibmvnic_tx_pool {
wait_queue_head_t ibmvnic_tx_comp_q;
struct task_struct *work_thread;
struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_long_term_buff tso_ltb;
+ int tso_index;
};
struct ibmvnic_rx_buff {
@@ -927,6 +952,12 @@ struct ibmvnic_error_buff {
__be32 error_id;
};
+struct ibmvnic_vpd {
+ unsigned char *buff;
+ dma_addr_t dma_addr;
+ u64 len;
+};
+
enum vnic_state {VNIC_PROBING = 1,
VNIC_PROBED,
VNIC_OPENING,
@@ -940,13 +971,23 @@ enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
VNIC_RESET_FATAL,
VNIC_RESET_NON_FATAL,
- VNIC_RESET_TIMEOUT};
+ VNIC_RESET_TIMEOUT,
+ VNIC_RESET_CHANGE_PARAM};
struct ibmvnic_rwi {
enum ibmvnic_reset_reason reset_reason;
struct list_head list;
};
+struct ibmvnic_tunables {
+ u64 rx_queues;
+ u64 tx_queues;
+ u64 rx_entries;
+ u64 tx_entries;
+ u64 mtu;
+ struct sockaddr mac;
+};
+
struct ibmvnic_adapter {
struct vio_dev *vdev;
struct net_device *netdev;
@@ -958,6 +999,10 @@ struct ibmvnic_adapter {
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
+ /* Vital Product Data (VPD) */
+ struct ibmvnic_vpd *vpd;
+ char fw_version[32];
+
/* Statistics */
struct ibmvnic_statistics stats;
dma_addr_t stats_token;
@@ -1007,6 +1052,10 @@ struct ibmvnic_adapter {
struct completion fw_done;
int fw_done_rc;
+ struct completion reset_done;
+ int reset_done_rc;
+ bool wait_for_reset;
+
/* partner capabilities */
u64 min_tx_queues;
u64 min_rx_queues;
@@ -1051,4 +1100,9 @@ struct ibmvnic_adapter {
struct work_struct ibmvnic_reset;
bool resetting;
bool napi_enabled, from_passive_init;
+
+ bool mac_change_pending;
+
+ struct ibmvnic_tunables desired;
+ struct ibmvnic_tunables fallback;
};
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 5ea764d85ec3..90af7757a885 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Intel network device drivers.
#
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 4d10270ddf8f..44b3937f7e81 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1710,9 +1710,9 @@ static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
}
}
-static void e100_watchdog(unsigned long data)
+static void e100_watchdog(struct timer_list *t)
{
- struct nic *nic = (struct nic *)data;
+ struct nic *nic = from_timer(nic, t, watchdog);
struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
u32 speed;
@@ -1910,11 +1910,10 @@ static int e100_alloc_cbs(struct nic *nic)
nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
nic->cbs_avail = 0;
- nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
- &nic->cbs_dma_addr);
+ nic->cbs = pci_pool_zalloc(nic->cbs_pool, GFP_KERNEL,
+ &nic->cbs_dma_addr);
if (!nic->cbs)
return -ENOMEM;
- memset(nic->cbs, 0, count * sizeof(struct cb));
for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
@@ -2921,7 +2920,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
+ timer_setup(&nic->watchdog, e100_watchdog, 0);
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 0641c0098738..afb7ebe20b24 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -398,6 +398,7 @@
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
/* If this bit asserted, the driver should claim the interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 98e68888abb1..2311b31bdcac 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -94,10 +94,6 @@ struct e1000_info;
*/
#define E1000_CHECK_RESET_COUNT 25
-#define DEFAULT_RDTR 0
-#define DEFAULT_RADV 8
-#define BURST_RDTR 0x20
-#define BURST_RADV 0x20
#define PCICFG_DESC_RING_STATUS 0xe4
#define FLUSH_DESC_REQUIRED 0x100
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b322011ec282..f457c5703d0c 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
+ *
+ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ * up).
**/
s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
{
@@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
- return 0;
+ return 1;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
@@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val)
+ if (ret_val) {
e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
- return ret_val;
+ return 1;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 327dfe5bedc0..f2f49239b015 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1071,7 +1071,8 @@ next_desc:
}
static void e1000_put_txbuf(struct e1000_ring *tx_ring,
- struct e1000_buffer *buffer_info)
+ struct e1000_buffer *buffer_info,
+ bool drop)
{
struct e1000_adapter *adapter = tx_ring->adapter;
@@ -1085,7 +1086,10 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
buffer_info->dma = 0;
}
if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
+ if (drop)
+ dev_kfree_skb_any(buffer_info->skb);
+ else
+ dev_consume_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
buffer_info->time_stamp = 0;
@@ -1199,7 +1203,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
wmb(); /* force write prior to skb_tstamp_tx */
skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ adapter->tx_timeout_factor * HZ)) {
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -1254,7 +1258,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
}
}
- e1000_put_txbuf(tx_ring, buffer_info);
+ e1000_put_txbuf(tx_ring, buffer_info, false);
tx_desc->upper.data = 0;
i++;
@@ -1910,14 +1914,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 icr;
+ bool enable = true;
+
+ icr = er32(ICR);
+ if (icr & E1000_ICR_RXO) {
+ ew32(ICR, E1000_ICR_RXO);
+ enable = false;
+ /* napi poll will re-enable Other, make sure it runs */
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+ }
+ if (icr & E1000_ICR_LSC) {
+ ew32(ICR, E1000_ICR_LSC);
+ hw->mac.get_link_status = true;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
- hw->mac.get_link_status = true;
-
- /* guard against interrupt when we're going down */
- if (!test_bit(__E1000_DOWN, &adapter->state)) {
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ if (enable && !test_bit(__E1000_DOWN, &adapter->state))
ew32(IMS, E1000_IMS_OTHER);
- }
return IRQ_HANDLED;
}
@@ -2421,7 +2441,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(tx_ring, buffer_info);
+ e1000_put_txbuf(tx_ring, buffer_info, false);
}
netdev_reset_queue(adapter->netdev);
@@ -2687,7 +2707,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
- ew32(IMS, adapter->rx_ring->ims_val);
+ ew32(IMS, adapter->rx_ring->ims_val |
+ E1000_IMS_OTHER);
else
e1000_irq_enable(adapter);
}
@@ -3004,8 +3025,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
hw->mac.ops.config_collision_dist(hw);
- /* SPT and CNP Si errata workaround to avoid data corruption */
- if (hw->mac.type >= e1000_pch_spt) {
+ /* SPT and KBL Si errata workaround to avoid data corruption */
+ if (hw->mac.type == e1000_pch_spt) {
u32 reg_val;
reg_val = er32(IOSFPC);
@@ -3013,7 +3034,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(IOSFPC, reg_val);
reg_val = er32(TARC(0));
- reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ;
+ /* SPT and KBL Si errata workaround to avoid Tx hang */
+ reg_val &= ~BIT(28);
+ reg_val |= BIT(29);
ew32(TARC(0), reg_val);
}
}
@@ -3223,14 +3246,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
*/
ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
-
- /* override the delay timers for enabling bursting, only if
- * the value was not set by the user via module options
- */
- if (adapter->rx_int_delay == DEFAULT_RDTR)
- adapter->rx_int_delay = BURST_RDTR;
- if (adapter->rx_abs_int_delay == DEFAULT_RADV)
- adapter->rx_abs_int_delay = BURST_RADV;
}
/* set the Receive Delay Timer Register */
@@ -4204,7 +4219,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
if (adapter->msix_entries)
- ew32(ICS, E1000_ICS_OTHER);
+ ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
else
ew32(ICS, E1000_ICS_LSC);
}
@@ -4808,9 +4823,9 @@ static void e1000e_update_phy_task(struct work_struct *work)
* Need to wait a few seconds after link up to get diagnostic information from
* the phy
**/
-static void e1000_update_phy_info(unsigned long data)
+static void e1000_update_phy_info(struct timer_list *t)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -5074,14 +5089,14 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
/* get_link_status is set on LSC (link status) interrupt or
* Rx sequence error interrupt. get_link_status will stay
- * false until the check_for_link establishes link
+ * true until the check_for_link establishes link
* for copper adapters ONLY
*/
switch (hw->phy.media_type) {
case e1000_media_type_copper:
if (hw->mac.get_link_status) {
ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
+ link_active = ret_val > 0;
} else {
link_active = true;
}
@@ -5092,14 +5107,14 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
break;
case e1000_media_type_internal_serdes:
ret_val = hw->mac.ops.check_for_link(hw);
- link_active = adapter->hw.mac.serdes_has_link;
+ link_active = hw->mac.serdes_has_link;
break;
default:
case e1000_media_type_unknown:
break;
}
- if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+ if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
(er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
e_info("Gigabit has been disabled, downgrading speed\n");
@@ -5144,9 +5159,9 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
* e1000_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
-static void e1000_watchdog(unsigned long data)
+static void e1000_watchdog(struct timer_list *t)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -5614,7 +5629,7 @@ dma_error:
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(tx_ring, buffer_info);
+ e1000_put_txbuf(tx_ring, buffer_info, true);
}
return 0;
@@ -7252,13 +7267,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = e1000_watchdog;
- adapter->watchdog_timer.data = (unsigned long)adapter;
-
- init_timer(&adapter->phy_info_timer);
- adapter->phy_info_timer.function = e1000_update_phy_info;
- adapter->phy_info_timer.data = (unsigned long)adapter;
+ timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
+ timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
@@ -7411,7 +7421,7 @@ static void e1000_remove(struct pci_dev *pdev)
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
- dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ dev_consume_skb_any(adapter->tx_hwtstamp_skb);
adapter->tx_hwtstamp_skb = NULL;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 6d8c39abee16..47da51864543 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -73,17 +73,25 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
+ * Burst variant is used as default if device has FLAG2_DMA_BURST.
+ *
* Valid Range: 0-65535
*/
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR 0
+#define BURST_RDTR 0x20
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
+ * Burst variant is used as default if device has FLAG2_DMA_BURST.
+ *
* Valid Range: 0-65535
*/
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV 8
+#define BURST_RADV 0x20
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
@@ -297,6 +305,9 @@ void e1000e_check_options(struct e1000_adapter *adapter)
.max = MAX_RXDELAY } }
};
+ if (adapter->flags2 & FLAG2_DMA_BURST)
+ opt.def = BURST_RDTR;
+
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
@@ -307,7 +318,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
}
/* Receive Absolute Interrupt Delay */
{
- static const struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of "
@@ -317,6 +328,9 @@ void e1000e_check_options(struct e1000_adapter *adapter)
.max = MAX_RXABSDELAY } }
};
+ if (adapter->flags2 & FLAG2_DMA_BURST)
+ opt.def = BURST_RADV;
+
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index d78d47b41a71..86ff0969efb6 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
s32 ret_val = 0;
u16 i, phy_status;
+ *success = false;
for (i = 0; i < iterations; i++) {
/* Some PHYs require the MII_BMSR register to be read
* twice due to the link bit being sticky. No harm doing
@@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- if (phy_status & BMSR_LSTATUS)
+ if (phy_status & BMSR_LSTATUS) {
+ *success = true;
break;
+ }
if (usec_interval >= 1000)
msleep(usec_interval / 1000);
else
udelay(usec_interval);
}
- *success = (i < iterations);
-
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 689c413b7782..46973fb234c5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -248,6 +248,29 @@ struct fm10k_udp_port {
__be16 port;
};
+enum fm10k_macvlan_request_type {
+ FM10K_UC_MAC_REQUEST,
+ FM10K_MC_MAC_REQUEST,
+ FM10K_VLAN_REQUEST
+};
+
+struct fm10k_macvlan_request {
+ enum fm10k_macvlan_request_type type;
+ struct list_head list;
+ union {
+ struct fm10k_mac_request {
+ u8 addr[ETH_ALEN];
+ u16 glort;
+ u16 vid;
+ } mac;
+ struct fm10k_vlan_request {
+ u32 vid;
+ u8 vsi;
+ } vlan;
+ };
+ bool set;
+};
+
/* one work queue for entire driver */
extern struct workqueue_struct *fm10k_workqueue;
@@ -270,11 +293,15 @@ enum fm10k_flags_t {
enum fm10k_state_t {
__FM10K_RESETTING,
+ __FM10K_RESET_DETACHED,
+ __FM10K_RESET_SUSPENDED,
__FM10K_DOWN,
__FM10K_SERVICE_SCHED,
__FM10K_SERVICE_REQUEST,
__FM10K_SERVICE_DISABLE,
- __FM10K_MBX_LOCK,
+ __FM10K_MACVLAN_SCHED,
+ __FM10K_MACVLAN_REQUEST,
+ __FM10K_MACVLAN_DISABLE,
__FM10K_LINK_DOWN,
__FM10K_UPDATING_STATS,
/* This value must be last and determines the BITMAP size */
@@ -344,6 +371,8 @@ struct fm10k_intfc {
struct fm10k_hw_stats stats;
struct fm10k_hw hw;
+ /* Mailbox lock */
+ spinlock_t mbx_lock;
u32 __iomem *uc_addr;
u32 __iomem *sw_addr;
u16 msg_enable;
@@ -365,6 +394,12 @@ struct fm10k_intfc {
struct list_head vxlan_port;
struct list_head geneve_port;
+ /* MAC/VLAN update queue */
+ struct list_head macvlan_requests;
+ struct delayed_work macvlan_task;
+ /* MAC/VLAN update queue lock */
+ spinlock_t macvlan_lock;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_intfc;
#endif /* CONFIG_DEBUG_FS */
@@ -384,23 +419,17 @@ struct fm10k_intfc {
static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
{
- /* busy loop if we cannot obtain the lock as some calls
- * such as ndo_set_rx_mode may be made in atomic context
- */
- while (test_and_set_bit(__FM10K_MBX_LOCK, interface->state))
- udelay(20);
+ spin_lock(&interface->mbx_lock);
}
static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface)
{
- /* flush memory to make sure state is correct */
- smp_mb__before_atomic();
- clear_bit(__FM10K_MBX_LOCK, interface->state);
+ spin_unlock(&interface->mbx_lock);
}
static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface)
{
- return !test_and_set_bit(__FM10K_MBX_LOCK, interface->state);
+ return spin_trylock(&interface->mbx_lock);
}
/* fm10k_test_staterr - test bits in Rx descriptor status and error fields */
@@ -490,6 +519,7 @@ void fm10k_up(struct fm10k_intfc *interface);
void fm10k_down(struct fm10k_intfc *interface);
void fm10k_update_stats(struct fm10k_intfc *interface);
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
+void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
#ifdef CONFIG_NET_POLL_CONTROLLER
void fm10k_netpoll(struct net_device *netdev);
@@ -510,6 +540,12 @@ void fm10k_reset_rx_state(struct fm10k_intfc *);
int fm10k_setup_tc(struct net_device *dev, u8 tc);
int fm10k_open(struct net_device *netdev);
int fm10k_close(struct net_device *netdev);
+int fm10k_queue_vlan_request(struct fm10k_intfc *interface, u32 vid,
+ u8 vsi, bool set);
+int fm10k_queue_mac_request(struct fm10k_intfc *interface, u16 glort,
+ const unsigned char *addr, u16 vid, bool set);
+void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface,
+ u16 glort, bool vlans);
/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);
@@ -526,8 +562,8 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
int vf_idx, u16 vid, u8 qos, __be16 vlan_proto);
-int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
- int unused);
+int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
+ int __always_unused min_rate, int max_rate);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
int vf_idx, struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index 62a6ad9b3eed..736a9f087bc9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -517,8 +517,8 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
}
- /* verify Mailbox is still valid */
- if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU))
+ /* verify Mailbox is still open */
+ if (mbx->state != FM10K_STATE_OPEN)
goto out;
/* interface cannot receive traffic without logical ports */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 5116fd043630..14df09e2d964 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -52,9 +52,9 @@ static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s,
static void fm10k_dbg_desc_break(struct seq_file *s, int i)
{
while (i--)
- seq_puts(s, "-");
+ seq_putc(s, '-');
- seq_puts(s, "\n");
+ seq_putc(s, '\n');
}
static int fm10k_dbg_tx_desc_seq_show(struct seq_file *s, void *v)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 5f4dac0d36ef..ea3ab24265ee 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -35,10 +35,133 @@ static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
return fm10k_tlv_msg_error(hw, results, mbx);
}
+/**
+ * fm10k_iov_msg_queue_mac_vlan - Message handler for MAC/VLAN request from VF
+ * @hw: Pointer to hardware structure
+ * @results: Pointer array to message, results[0] is pointer to message
+ * @mbx: Pointer to mailbox information structure
+ *
+ * This function is a custom handler for MAC/VLAN requests from the VF. The
+ * assumption is that it is acceptable to directly hand off the message from
+ * the VF to the PF's switch manager. However, we use a MAC/VLAN message
+ * queue to avoid overloading the mailbox when a large number of requests
+ * come in.
+ **/
+static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results,
+ struct fm10k_mbx_info *mbx)
+{
+ struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
+ struct fm10k_intfc *interface = hw->back;
+ u8 mac[ETH_ALEN];
+ u32 *result;
+ int err = 0;
+ bool set;
+ u16 vlan;
+ u32 vid;
+
+ /* we shouldn't be updating rules on a disabled interface */
+ if (!FM10K_VF_FLAG_ENABLED(vf_info))
+ err = FM10K_ERR_PARAM;
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
+ result = results[FM10K_MAC_VLAN_MSG_VLAN];
+
+ /* record VLAN id requested */
+ err = fm10k_tlv_attr_get_u32(result, &vid);
+ if (err)
+ return err;
+
+ set = !(vid & FM10K_VLAN_CLEAR);
+ vid &= ~FM10K_VLAN_CLEAR;
+
+ /* if the length field has been set, this is a multi-bit
+ * update request. For multi-bit requests, simply disallow
+ * them when the pf_vid has been set. In this case, the PF
+ * should have already cleared the VLAN_TABLE, and if we
+ * allowed them, it could allow a rogue VF to receive traffic
+ * on a VLAN it was not assigned. In the single-bit case, we
+ * need to modify requests for VLAN 0 to use the default PF or
+ * SW vid when assigned.
+ */
+
+ if (vid >> 16) {
+ /* prevent multi-bit requests when PF has
+ * administratively set the VLAN for this VF
+ */
+ if (vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ } else {
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+
+ vid = err;
+ }
+
+ /* update VSI info for VF in regards to VLAN table */
+ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
+ }
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
+ result = results[FM10K_MAC_VLAN_MSG_MAC];
+
+ /* record unicast MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
+ if (err)
+ return err;
+
+ /* block attempts to set MAC for a locked device */
+ if (is_valid_ether_addr(vf_info->mac) &&
+ !ether_addr_equal(mac, vf_info->mac))
+ return FM10K_ERR_PARAM;
+
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+
+ vlan = (u16)err;
+
+ /* Add this request to the MAC/VLAN queue */
+ err = fm10k_queue_mac_request(interface, vf_info->glort,
+ mac, vlan, set);
+ }
+
+ if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
+ result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
+
+ /* record multicast MAC address requested */
+ err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
+ if (err)
+ return err;
+
+ /* verify that the VF is allowed to request multicast */
+ if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
+ return FM10K_ERR_PARAM;
+
+ set = !(vlan & FM10K_VLAN_CLEAR);
+ vlan &= ~FM10K_VLAN_CLEAR;
+
+ err = fm10k_iov_select_vid(vf_info, vlan);
+ if (err < 0)
+ return err;
+
+ vlan = (u16)err;
+
+ /* Add this request to the MAC/VLAN queue */
+ err = fm10k_queue_mac_request(interface, vf_info->glort,
+ mac, vlan, set);
+ }
+
+ return err;
+}
+
static const struct fm10k_msg_data iov_mbx_data[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
- FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
+ FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan),
FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
};
@@ -66,25 +189,21 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
goto read_unlock;
/* read VFLRE to determine if any VFs have been reset */
- do {
- vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0));
- vflre <<= 32;
- vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1));
- vflre = (vflre << 32) | (vflre >> 32);
- vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
+ vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1));
+ vflre <<= 32;
+ vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
- i = iov_data->num_vfs;
+ i = iov_data->num_vfs;
- for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
- struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
+ for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
+ struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
- if (vflre >= 0)
- continue;
+ if (vflre >= 0)
+ continue;
- hw->iov.ops.reset_resources(hw, vf_info);
- vf_info->mbx.ops.connect(hw, &vf_info->mbx);
- }
- } while (i != iov_data->num_vfs);
+ hw->iov.ops.reset_resources(hw, vf_info);
+ vf_info->mbx.ops.connect(hw, &vf_info->mbx);
+ }
read_unlock:
rcu_read_unlock();
@@ -126,9 +245,14 @@ process_mbx:
struct fm10k_mbx_info *mbx = &vf_info->mbx;
u16 glort = vf_info->glort;
+ /* process the SM mailbox first to drain outgoing messages */
+ hw->mbx.ops.process(hw, &hw->mbx);
+
/* verify port mapping is valid, if not reset port */
- if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
+ if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) {
hw->iov.ops.reset_lport(hw, vf_info);
+ fm10k_clear_macvlan_queue(interface, glort, false);
+ }
/* reset VFs that have mailbox timed out */
if (!mbx->timeout) {
@@ -140,6 +264,10 @@ process_mbx:
if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
/* keep track of how many times this occurs */
interface->hw_sm_mbx_full++;
+
+ /* make sure we try again momentarily */
+ fm10k_service_event_schedule(interface);
+
break;
}
@@ -187,6 +315,7 @@ void fm10k_iov_suspend(struct pci_dev *pdev)
hw->iov.ops.reset_resources(hw, vf_info);
hw->iov.ops.reset_lport(hw, vf_info);
+ fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
}
}
@@ -411,6 +540,8 @@ static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
/* disable LPORT for this VF which clears switch rules */
hw->iov.ops.reset_lport(hw, vf_info);
+ fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
+
/* assign new MAC+VLAN for this VF */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
@@ -482,7 +613,7 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
}
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
- int __always_unused unused, int rate)
+ int __always_unused min_rate, int max_rate)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
@@ -493,14 +624,15 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
return -EINVAL;
/* rate limit cannot be less than 10Mbs or greater than link speed */
- if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
+ if (max_rate &&
+ (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
return -EINVAL;
/* store values */
- iov_data->vf_info[vf_idx].rate = rate;
+ iov_data->vf_info[vf_idx].rate = max_rate;
/* update hardware configuration */
- hw->iov.ops.configure_tc(hw, vf_idx, rate);
+ hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
return 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 9dffaba85ae6..538b42d5c187 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -28,7 +28,7 @@
#include "fm10k.h"
-#define DRV_VERSION "0.21.7-k"
+#define DRV_VERSION "0.22.1-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
@@ -806,9 +806,10 @@ static int fm10k_tso(struct fm10k_ring *tx_ring,
tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
return 1;
+
err_vxlan:
tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
- if (!net_ratelimit())
+ if (net_ratelimit())
netdev_err(tx_ring->netdev,
"TSO requested for unsupported tunnel, disabling offload\n");
return -1;
@@ -876,6 +877,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
case IPPROTO_GRE:
if (skb->encapsulation)
break;
+ /* fall through */
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
@@ -1229,7 +1231,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 334088a101c3..244d3ad58ca7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1586,7 +1586,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0);
break;
}
- /* fallthough */
+ /* fall through */
default:
return FM10K_MBX_ERR_NO_MBX;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index e69d49d91d67..adc62fb38c49 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -643,9 +643,13 @@ int fm10k_close(struct net_device *netdev)
static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
{
struct fm10k_intfc *interface = netdev_priv(dev);
+ int num_tx_queues = READ_ONCE(interface->num_tx_queues);
unsigned int r_idx = skb->queue_mapping;
int err;
+ if (!num_tx_queues)
+ return NETDEV_TX_BUSY;
+
if ((skb->protocol == htons(ETH_P_8021Q)) &&
!skb_vlan_tag_present(skb)) {
/* FM10K only supports hardware tagging, any tags in frame
@@ -698,8 +702,8 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
__skb_put(skb, pad_len);
}
- if (r_idx >= interface->num_tx_queues)
- r_idx %= interface->num_tx_queues;
+ if (r_idx >= num_tx_queues)
+ r_idx %= num_tx_queues;
err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]);
@@ -754,11 +758,132 @@ static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface)
return (hw->mac.type == fm10k_mac_vf || interface->host_ready);
}
+/**
+ * fm10k_queue_vlan_request - Queue a VLAN update request
+ * @interface: the fm10k interface structure
+ * @vid: the VLAN vid
+ * @vsi: VSI index number
+ * @set: whether to set or clear
+ *
+ * This function queues up a VLAN update. For VFs, this must be sent to the
+ * managing PF over the mailbox. For PFs, we'll use the same handling so that
+ * it's similar to the VF. This avoids storming the PF<->VF mailbox with too
+ * many VLAN updates during reset.
+ */
+int fm10k_queue_vlan_request(struct fm10k_intfc *interface,
+ u32 vid, u8 vsi, bool set)
+{
+ struct fm10k_macvlan_request *request;
+ unsigned long flags;
+
+ /* This must be atomic since we may be called while the netdev
+ * addr_list_lock is held
+ */
+ request = kzalloc(sizeof(*request), GFP_ATOMIC);
+ if (!request)
+ return -ENOMEM;
+
+ request->type = FM10K_VLAN_REQUEST;
+ request->vlan.vid = vid;
+ request->vlan.vsi = vsi;
+ request->set = set;
+
+ spin_lock_irqsave(&interface->macvlan_lock, flags);
+ list_add_tail(&request->list, &interface->macvlan_requests);
+ spin_unlock_irqrestore(&interface->macvlan_lock, flags);
+
+ fm10k_macvlan_schedule(interface);
+
+ return 0;
+}
+
+/**
+ * fm10k_queue_mac_request - Queue a MAC update request
+ * @interface: the fm10k interface structure
+ * @glort: the target glort for this update
+ * @addr: the address to update
+ * @vid: the vid to update
+ * @sync: whether to add or remove
+ *
+ * This function queues up a MAC request for sending to the switch manager.
+ * A separate thread monitors the queue and sends updates to the switch
+ * manager. Return 0 on success, and negative error code on failure.
+ **/
+int fm10k_queue_mac_request(struct fm10k_intfc *interface, u16 glort,
+ const unsigned char *addr, u16 vid, bool set)
+{
+ struct fm10k_macvlan_request *request;
+ unsigned long flags;
+
+ /* This must be atomic since we may be called while the netdev
+ * addr_list_lock is held
+ */
+ request = kzalloc(sizeof(*request), GFP_ATOMIC);
+ if (!request)
+ return -ENOMEM;
+
+ if (is_multicast_ether_addr(addr))
+ request->type = FM10K_MC_MAC_REQUEST;
+ else
+ request->type = FM10K_UC_MAC_REQUEST;
+
+ ether_addr_copy(request->mac.addr, addr);
+ request->mac.glort = glort;
+ request->mac.vid = vid;
+ request->set = set;
+
+ spin_lock_irqsave(&interface->macvlan_lock, flags);
+ list_add_tail(&request->list, &interface->macvlan_requests);
+ spin_unlock_irqrestore(&interface->macvlan_lock, flags);
+
+ fm10k_macvlan_schedule(interface);
+
+ return 0;
+}
+
+/**
+ * fm10k_clear_macvlan_queue - Cancel pending updates for a given glort
+ * @interface: the fm10k interface structure
+ * @glort: the target glort to clear
+ * @vlans: true to clear VLAN messages, false to ignore them
+ *
+ * Cancel any outstanding MAC/VLAN requests for a given glort. This is
+ * expected to be called when a logical port goes down.
+ **/
+void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface,
+ u16 glort, bool vlans)
+
+{
+ struct fm10k_macvlan_request *r, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&interface->macvlan_lock, flags);
+
+ /* Free any outstanding MAC/VLAN requests for this interface */
+ list_for_each_entry_safe(r, tmp, &interface->macvlan_requests, list) {
+ switch (r->type) {
+ case FM10K_MC_MAC_REQUEST:
+ case FM10K_UC_MAC_REQUEST:
+ /* Don't free requests for other interfaces */
+ if (r->mac.glort != glort)
+ break;
+ /* fall through */
+ case FM10K_VLAN_REQUEST:
+ if (vlans) {
+ list_del(&r->list);
+ kfree(r);
+ }
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&interface->macvlan_lock, flags);
+}
+
static int fm10k_uc_vlan_unsync(struct net_device *netdev,
const unsigned char *uc_addr)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_hw *hw = &interface->hw;
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
@@ -767,10 +892,7 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev,
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
- if (fm10k_host_mbx_ready(interface))
- err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr,
- vid, set, 0);
-
+ err = fm10k_queue_mac_request(interface, glort, uc_addr, vid, set);
if (err)
return err;
@@ -782,7 +904,6 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
const unsigned char *mc_addr)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_hw *hw = &interface->hw;
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
@@ -791,9 +912,7 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
- if (fm10k_host_mbx_ready(interface))
- err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set);
-
+ err = fm10k_queue_mac_request(interface, glort, mc_addr, vid, set);
if (err)
return err;
@@ -851,18 +970,14 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
/* only need to update the VLAN if not in promiscuous mode */
if (!(netdev->flags & IFF_PROMISC)) {
- err = hw->mac.ops.update_vlan(hw, vid, 0, set);
+ err = fm10k_queue_vlan_request(interface, vid, 0, set);
if (err)
goto err_out;
}
- /* update our base MAC address if host's mailbox is ready */
- if (fm10k_host_mbx_ready(interface))
- err = hw->mac.ops.update_uc_addr(hw, interface->glort,
- hw->mac.addr, vid, set, 0);
- else
- err = -EHOSTDOWN;
-
+ /* Update our base MAC address */
+ err = fm10k_queue_mac_request(interface, interface->glort,
+ hw->mac.addr, vid, set);
if (err)
goto err_out;
@@ -906,7 +1021,6 @@ static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid)
static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface)
{
- struct fm10k_hw *hw = &interface->hw;
u32 vid, prev_vid;
/* loop through and find any gaps in the table */
@@ -918,7 +1032,7 @@ static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface)
/* send request to clear multiple bits at a time */
prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT;
- hw->mac.ops.update_vlan(hw, prev_vid, 0, false);
+ fm10k_queue_vlan_request(interface, prev_vid, 0, false);
}
}
@@ -933,15 +1047,11 @@ static int __fm10k_uc_sync(struct net_device *dev,
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- /* update table with current entries if host's mailbox is ready */
- if (!fm10k_host_mbx_ready(interface))
- return -EHOSTDOWN;
-
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
- err = hw->mac.ops.update_uc_addr(hw, glort, addr,
- vid, sync, 0);
+ err = fm10k_queue_mac_request(interface, glort,
+ addr, vid, sync);
if (err)
return err;
}
@@ -998,15 +1108,18 @@ static int __fm10k_mc_sync(struct net_device *dev,
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_hw *hw = &interface->hw;
u16 vid, glort = interface->glort;
+ s32 err;
- /* update table with current entries if host's mailbox is ready */
- if (!fm10k_host_mbx_ready(interface))
- return 0;
+ if (!is_multicast_ether_addr(addr))
+ return -EADDRNOTAVAIL;
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
- hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
+ err = fm10k_queue_mac_request(interface, glort,
+ addr, vid, sync);
+ if (err)
+ return err;
}
return 0;
@@ -1046,7 +1159,8 @@ static void fm10k_set_rx_mode(struct net_device *dev)
if (interface->xcast_mode != xcast_mode) {
/* update VLAN table */
if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
- hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, true);
+ fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL,
+ 0, true);
if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
fm10k_clear_unused_vlans(interface);
@@ -1094,22 +1208,20 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
interface->glort_count, true);
/* update VLAN table */
- hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0,
- xcast_mode == FM10K_XCAST_MODE_PROMISC);
+ fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0,
+ xcast_mode == FM10K_XCAST_MODE_PROMISC);
/* Add filter for VLAN 0 */
- hw->mac.ops.update_vlan(hw, 0, 0, true);
+ fm10k_queue_vlan_request(interface, 0, 0, true);
/* update table with current entries */
for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
- hw->mac.ops.update_vlan(hw, vid, 0, true);
+ fm10k_queue_vlan_request(interface, vid, 0, true);
- /* Update unicast entries if host's mailbox is ready */
- if (fm10k_host_mbx_ready(interface))
- hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr,
- vid, true, 0);
+ fm10k_queue_mac_request(interface, glort,
+ hw->mac.addr, vid, true);
}
/* update xcast mode before synchronizing addresses if host's mailbox
@@ -1136,6 +1248,13 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
+ /* Wait for MAC/VLAN work to finish */
+ while (test_bit(__FM10K_MACVLAN_SCHED, interface->state))
+ usleep_range(1000, 2000);
+
+ /* Cancel pending MAC/VLAN requests */
+ fm10k_clear_macvlan_queue(interface, interface->glort, true);
+
fm10k_mbx_lock(interface);
/* clear the logical port state on lower device if host's mailbox is
@@ -1270,7 +1389,7 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
{
struct tc_mqprio_qopt *mqprio = type_data;
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
@@ -1370,8 +1489,8 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
if (fm10k_host_mbx_ready(interface)) {
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_MULTI);
- hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr,
- 0, true, 0);
+ fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
+ 0, true);
}
fm10k_mbx_unlock(interface);
@@ -1410,8 +1529,8 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
if (fm10k_host_mbx_ready(interface)) {
hw->mac.ops.update_xcast_mode(hw, glort,
FM10K_XCAST_MODE_NONE);
- hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr,
- 0, false, 0);
+ fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
+ 0, false);
}
fm10k_mbx_unlock(interface);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 63784576ae8b..7f605221a686 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -91,6 +91,76 @@ static int fm10k_hw_ready(struct fm10k_intfc *interface)
return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0;
}
+/**
+ * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task
+ * @interface: fm10k private interface structure
+ *
+ * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be
+ * started immediately, request that it be restarted when possible.
+ */
+void fm10k_macvlan_schedule(struct fm10k_intfc *interface)
+{
+ /* Avoid processing the MAC/VLAN queue when the service task is
+ * disabled, or when we're resetting the device.
+ */
+ if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) &&
+ !test_and_set_bit(__FM10K_MACVLAN_SCHED, interface->state)) {
+ clear_bit(__FM10K_MACVLAN_REQUEST, interface->state);
+ /* We delay the actual start of execution in order to allow
+ * multiple MAC/VLAN updates to accumulate before handling
+ * them, and to allow some time to let the mailbox drain
+ * between runs.
+ */
+ queue_delayed_work(fm10k_workqueue,
+ &interface->macvlan_task, 10);
+ } else {
+ set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
+ }
+}
+
+/**
+ * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor
+ * @interface: fm10k private interface structure
+ *
+ * Wait until the MAC/VLAN queue task has stopped, and cancel any future
+ * requests.
+ */
+static void fm10k_stop_macvlan_task(struct fm10k_intfc *interface)
+{
+ /* Disable the MAC/VLAN work item */
+ set_bit(__FM10K_MACVLAN_DISABLE, interface->state);
+
+ /* Make sure we waited until any current invocations have stopped */
+ cancel_delayed_work_sync(&interface->macvlan_task);
+
+ /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task.
+ * However, it may not be unset of the MAC/VLAN task never actually
+ * got a chance to run. Since we've canceled the task here, and it
+ * cannot be rescheuled right now, we need to ensure the scheduled bit
+ * gets unset.
+ */
+ clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
+}
+
+/**
+ * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor
+ * @interface: fm10k private interface structure
+ *
+ * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule
+ * the MAC/VLAN work monitor.
+ */
+static void fm10k_resume_macvlan_task(struct fm10k_intfc *interface)
+{
+ /* Re-enable the MAC/VLAN work item */
+ clear_bit(__FM10K_MACVLAN_DISABLE, interface->state);
+
+ /* We might have received a MAC/VLAN request while disabled. If so,
+ * kick off the queue now.
+ */
+ if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
+ fm10k_macvlan_schedule(interface);
+}
+
void fm10k_service_event_schedule(struct fm10k_intfc *interface)
{
if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) &&
@@ -118,13 +188,35 @@ static void fm10k_service_event_complete(struct fm10k_intfc *interface)
fm10k_service_event_schedule(interface);
}
+static void fm10k_stop_service_event(struct fm10k_intfc *interface)
+{
+ set_bit(__FM10K_SERVICE_DISABLE, interface->state);
+ cancel_work_sync(&interface->service_task);
+
+ /* It's possible that cancel_work_sync stopped the service task from
+ * running before it could actually start. In this case the
+ * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that
+ * the service task cannot be running at this point, we need to clear
+ * the scheduled bit, as otherwise the service task may never be
+ * restarted.
+ */
+ clear_bit(__FM10K_SERVICE_SCHED, interface->state);
+}
+
+static void fm10k_start_service_event(struct fm10k_intfc *interface)
+{
+ clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
+ fm10k_service_event_schedule(interface);
+}
+
/**
* fm10k_service_timer - Timer Call-back
* @data: pointer to interface cast into an unsigned long
**/
-static void fm10k_service_timer(unsigned long data)
+static void fm10k_service_timer(struct timer_list *t)
{
- struct fm10k_intfc *interface = (struct fm10k_intfc *)data;
+ struct fm10k_intfc *interface = from_timer(interface, t,
+ service_timer);
/* Reset the timer */
mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
@@ -132,36 +224,15 @@ static void fm10k_service_timer(unsigned long data)
fm10k_service_event_schedule(interface);
}
-static void fm10k_detach_subtask(struct fm10k_intfc *interface)
-{
- struct net_device *netdev = interface->netdev;
- u32 __iomem *hw_addr;
- u32 value;
-
- /* do nothing if device is still present or hw_addr is set */
- if (netif_device_present(netdev) || interface->hw.hw_addr)
- return;
-
- /* check the real address space to see if we've recovered */
- hw_addr = READ_ONCE(interface->uc_addr);
- value = readl(hw_addr);
- if (~value) {
- interface->hw.hw_addr = interface->uc_addr;
- netif_device_attach(netdev);
- set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
- netdev_warn(netdev, "PCIe link restored, device now attached\n");
- return;
- }
-
- rtnl_lock();
-
- if (netif_running(netdev))
- dev_close(netdev);
-
- rtnl_unlock();
-}
-
-static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
+/**
+ * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset
+ * @interface: fm10k private data structure
+ *
+ * This function prepares for a device reset by shutting as much down as we
+ * can. It does nothing and returns false if __FM10K_RESETTING was already set
+ * prior to calling this function. It returns true if it actually did work.
+ */
+static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
@@ -170,8 +241,15 @@ static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
/* put off any impending NetWatchDogTimeout */
netif_trans_update(netdev);
- while (test_and_set_bit(__FM10K_RESETTING, interface->state))
- usleep_range(1000, 2000);
+ /* Nothing to do if a reset is already in progress */
+ if (test_and_set_bit(__FM10K_RESETTING, interface->state))
+ return false;
+
+ /* As the MAC/VLAN task will be accessing registers it must not be
+ * running while we reset. Although the task will not be scheduled
+ * once we start resetting it may already be running
+ */
+ fm10k_stop_macvlan_task(interface);
rtnl_lock();
@@ -189,6 +267,8 @@ static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
interface->last_reset = jiffies + (10 * HZ);
rtnl_unlock();
+
+ return true;
}
static int fm10k_handle_reset(struct fm10k_intfc *interface)
@@ -197,6 +277,8 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface)
struct fm10k_hw *hw = &interface->hw;
int err;
+ WARN_ON(!test_bit(__FM10K_RESETTING, interface->state));
+
rtnl_lock();
pci_set_master(interface->pdev);
@@ -253,6 +335,8 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface)
rtnl_unlock();
+ fm10k_resume_macvlan_task(interface);
+
clear_bit(__FM10K_RESETTING, interface->state);
return err;
@@ -270,27 +354,80 @@ reinit_err:
return err;
}
-static void fm10k_reinit(struct fm10k_intfc *interface)
+static void fm10k_detach_subtask(struct fm10k_intfc *interface)
{
+ struct net_device *netdev = interface->netdev;
+ u32 __iomem *hw_addr;
+ u32 value;
int err;
- fm10k_prepare_for_reset(interface);
+ /* do nothing if netdev is still present or hw_addr is set */
+ if (netif_device_present(netdev) || interface->hw.hw_addr)
+ return;
- err = fm10k_handle_reset(interface);
- if (err)
- dev_err(&interface->pdev->dev,
- "fm10k_handle_reset failed: %d\n", err);
+ /* We've lost the PCIe register space, and can no longer access the
+ * device. Shut everything except the detach subtask down and prepare
+ * to reset the device in case we recover. If we actually prepare for
+ * reset, indicate that we're detached.
+ */
+ if (fm10k_prepare_for_reset(interface))
+ set_bit(__FM10K_RESET_DETACHED, interface->state);
+
+ /* check the real address space to see if we've recovered */
+ hw_addr = READ_ONCE(interface->uc_addr);
+ value = readl(hw_addr);
+ if (~value) {
+ /* Make sure the reset was initiated because we detached,
+ * otherwise we might race with a different reset flow.
+ */
+ if (!test_and_clear_bit(__FM10K_RESET_DETACHED,
+ interface->state))
+ return;
+
+ /* Restore the hardware address */
+ interface->hw.hw_addr = interface->uc_addr;
+
+ /* PCIe link has been restored, and the device is active
+ * again. Restore everything and reset the device.
+ */
+ err = fm10k_handle_reset(interface);
+ if (err) {
+ netdev_err(netdev, "Unable to reset device: %d\n", err);
+ interface->hw.hw_addr = NULL;
+ return;
+ }
+
+ /* Re-attach the netdev */
+ netif_device_attach(netdev);
+ netdev_warn(netdev, "PCIe link restored, device now attached\n");
+ return;
+ }
}
static void fm10k_reset_subtask(struct fm10k_intfc *interface)
{
+ int err;
+
if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED,
interface->flags))
return;
+ /* If another thread has already prepared to reset the device, we
+ * should not attempt to handle a reset here, since we'd race with
+ * that thread. This may happen if we suspend the device or if the
+ * PCIe link is lost. In this case, we'll just ignore the RESET
+ * request, as it will (eventually) be taken care of when the thread
+ * which actually started the reset is finished.
+ */
+ if (!fm10k_prepare_for_reset(interface))
+ return;
+
netdev_err(interface->netdev, "Reset interface\n");
- fm10k_reinit(interface);
+ err = fm10k_handle_reset(interface);
+ if (err)
+ dev_err(&interface->pdev->dev,
+ "fm10k_handle_reset failed: %d\n", err);
}
/**
@@ -360,6 +497,10 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface)
**/
static void fm10k_mbx_subtask(struct fm10k_intfc *interface)
{
+ /* If we're resetting, bail out */
+ if (test_bit(__FM10K_RESETTING, interface->state))
+ return;
+
/* process upstream mailbox and update device state */
fm10k_watchdog_update_host_state(interface);
@@ -609,9 +750,11 @@ static void fm10k_service_task(struct work_struct *work)
interface = container_of(work, struct fm10k_intfc, service_task);
+ /* Check whether we're detached first */
+ fm10k_detach_subtask(interface);
+
/* tasks run even when interface is down */
fm10k_mbx_subtask(interface);
- fm10k_detach_subtask(interface);
fm10k_reset_subtask(interface);
/* tasks only run when interface is up */
@@ -623,6 +766,112 @@ static void fm10k_service_task(struct work_struct *work)
}
/**
+ * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager
+ * @work: pointer to work_struct containing our data
+ *
+ * This work item handles sending MAC/VLAN updates to the switch manager. When
+ * the interface is up, it will attempt to queue mailbox messages to the
+ * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the
+ * mailbox is full, it will reschedule itself to try again in a short while.
+ * This ensures that the driver does not overload the switch mailbox with too
+ * many simultaneous requests, causing an unnecessary reset.
+ **/
+static void fm10k_macvlan_task(struct work_struct *work)
+{
+ struct fm10k_macvlan_request *item;
+ struct fm10k_intfc *interface;
+ struct delayed_work *dwork;
+ struct list_head *requests;
+ struct fm10k_hw *hw;
+ unsigned long flags;
+
+ dwork = to_delayed_work(work);
+ interface = container_of(dwork, struct fm10k_intfc, macvlan_task);
+ hw = &interface->hw;
+ requests = &interface->macvlan_requests;
+
+ do {
+ /* Pop the first item off the list */
+ spin_lock_irqsave(&interface->macvlan_lock, flags);
+ item = list_first_entry_or_null(requests,
+ struct fm10k_macvlan_request,
+ list);
+ if (item)
+ list_del_init(&item->list);
+
+ spin_unlock_irqrestore(&interface->macvlan_lock, flags);
+
+ /* We have no more items to process */
+ if (!item)
+ goto done;
+
+ fm10k_mbx_lock(interface);
+
+ /* Check that we have plenty of space to send the message. We
+ * want to ensure that the mailbox stays low enough to avoid a
+ * change in the host state, otherwise we may see spurious
+ * link up / link down notifications.
+ */
+ if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) {
+ hw->mbx.ops.process(hw, &hw->mbx);
+ set_bit(__FM10K_MACVLAN_REQUEST, interface->state);
+ fm10k_mbx_unlock(interface);
+
+ /* Put the request back on the list */
+ spin_lock_irqsave(&interface->macvlan_lock, flags);
+ list_add(&item->list, requests);
+ spin_unlock_irqrestore(&interface->macvlan_lock, flags);
+ break;
+ }
+
+ switch (item->type) {
+ case FM10K_MC_MAC_REQUEST:
+ hw->mac.ops.update_mc_addr(hw,
+ item->mac.glort,
+ item->mac.addr,
+ item->mac.vid,
+ item->set);
+ break;
+ case FM10K_UC_MAC_REQUEST:
+ hw->mac.ops.update_uc_addr(hw,
+ item->mac.glort,
+ item->mac.addr,
+ item->mac.vid,
+ item->set,
+ 0);
+ break;
+ case FM10K_VLAN_REQUEST:
+ hw->mac.ops.update_vlan(hw,
+ item->vlan.vid,
+ item->vlan.vsi,
+ item->set);
+ break;
+ default:
+ break;
+ }
+
+ fm10k_mbx_unlock(interface);
+
+ /* Free the item now that we've sent the update */
+ kfree(item);
+ } while (true);
+
+done:
+ WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED, interface->state));
+
+ /* flush memory to make sure state is correct */
+ smp_mb__before_atomic();
+ clear_bit(__FM10K_MACVLAN_SCHED, interface->state);
+
+ /* If a MAC/VLAN request was scheduled since we started, we should
+ * re-schedule. However, there is no reason to re-schedule if there is
+ * no work to do.
+ */
+ if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state))
+ fm10k_macvlan_schedule(interface);
+}
+
+/**
* fm10k_configure_tx_ring - Configure Tx ring after Reset
* @interface: board private structure
* @ring: structure containing ring specific data
@@ -1544,7 +1793,7 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface)
struct net_device *dev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
struct msix_entry *entry;
- int ri = 0, ti = 0;
+ unsigned int ri = 0, ti = 0;
int vector, err;
entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
@@ -1554,15 +1803,15 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface)
/* name the vector */
if (q_vector->tx.count && q_vector->rx.count) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-TxRx-%d", dev->name, ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-TxRx-%u", dev->name, ri++);
ti++;
} else if (q_vector->rx.count) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-rx-%d", dev->name, ri++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-rx-%u", dev->name, ri++);
} else if (q_vector->tx.count) {
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-tx-%d", dev->name, ti++);
+ snprintf(q_vector->name, sizeof(q_vector->name),
+ "%s-tx-%u", dev->name, ti++);
} else {
/* skip this unused q_vector */
continue;
@@ -1800,9 +2049,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
- /* delay any future reset requests */
- interface->last_reset = jiffies + (10 * HZ);
-
/* reset and initialize the hardware so it is in a known state */
err = hw->mac.ops.reset_hw(hw);
if (err) {
@@ -1857,9 +2103,16 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
INIT_LIST_HEAD(&interface->vxlan_port);
INIT_LIST_HEAD(&interface->geneve_port);
+ /* Initialize the MAC/VLAN queue */
+ INIT_LIST_HEAD(&interface->macvlan_requests);
+
netdev_rss_key_fill(rss_key, sizeof(rss_key));
memcpy(interface->rssrk, rss_key, sizeof(rss_key));
+ /* Initialize the mailbox lock */
+ spin_lock_init(&interface->mbx_lock);
+ spin_lock_init(&interface->macvlan_lock);
+
/* Start off interface as being down */
set_bit(__FM10K_DOWN, interface->state);
set_bit(__FM10K_UPDATING_STATS, interface->state);
@@ -2063,10 +2316,12 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Initialize service timer and service task late in order to avoid
* cleanup issues.
*/
- setup_timer(&interface->service_timer, &fm10k_service_timer,
- (unsigned long)interface);
+ timer_setup(&interface->service_timer, fm10k_service_timer, 0);
INIT_WORK(&interface->service_task, fm10k_service_task);
+ /* Setup the MAC/VLAN queue */
+ INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task);
+
/* kick off service timer now, even when interface is down */
mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
@@ -2079,8 +2334,9 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable SR-IOV after registering netdev to enforce PF/VF ordering */
fm10k_iov_configure(pdev, 0);
- /* clear the service task disable bit to allow service task to start */
+ /* clear the service task disable bit and kick off service task */
clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
+ fm10k_service_event_schedule(interface);
return 0;
@@ -2118,8 +2374,11 @@ static void fm10k_remove(struct pci_dev *pdev)
del_timer_sync(&interface->service_timer);
- set_bit(__FM10K_SERVICE_DISABLE, interface->state);
- cancel_work_sync(&interface->service_task);
+ fm10k_stop_service_event(interface);
+ fm10k_stop_macvlan_task(interface);
+
+ /* Remove all pending MAC/VLAN requests */
+ fm10k_clear_macvlan_queue(interface, interface->glort, true);
/* free netdev, this may bounce the interrupts due to setup_tc */
if (netdev->reg_state == NETREG_REGISTERED)
@@ -2156,11 +2415,14 @@ static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
* a surprise remove if the PCIe device is disabled while we're
* stopped. We stop the watchdog task until after we resume software
* activity.
+ *
+ * Note that the MAC/VLAN task will be stopped as part of preparing
+ * for reset so we don't need to handle it here.
*/
- set_bit(__FM10K_SERVICE_DISABLE, interface->state);
- cancel_work_sync(&interface->service_task);
+ fm10k_stop_service_event(interface);
- fm10k_prepare_for_reset(interface);
+ if (fm10k_prepare_for_reset(interface))
+ set_bit(__FM10K_RESET_SUSPENDED, interface->state);
}
static int fm10k_handle_resume(struct fm10k_intfc *interface)
@@ -2168,6 +2430,13 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
struct fm10k_hw *hw = &interface->hw;
int err;
+ /* Even if we didn't properly prepare for reset in
+ * fm10k_prepare_suspend, we'll attempt to resume anyways.
+ */
+ if (!test_and_clear_bit(__FM10K_RESET_SUSPENDED, interface->state))
+ dev_warn(&interface->pdev->dev,
+ "Device was shut down as part of suspend... Attempting to recover\n");
+
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
@@ -2185,45 +2454,30 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
interface->link_down_event = jiffies + (HZ);
set_bit(__FM10K_LINK_DOWN, interface->state);
- /* clear the service task disable bit to allow service task to start */
- clear_bit(__FM10K_SERVICE_DISABLE, interface->state);
- fm10k_service_event_schedule(interface);
+ /* restart the service task */
+ fm10k_start_service_event(interface);
+
+ /* Restart the MAC/VLAN request queue in-case of outstanding events */
+ fm10k_macvlan_schedule(interface);
return err;
}
#ifdef CONFIG_PM
/**
- * fm10k_resume - Restore device to pre-sleep state
- * @pdev: PCI device information struct
+ * fm10k_resume - Generic PM resume hook
+ * @dev: generic device structure
*
- * fm10k_resume is called after the system has powered back up from a sleep
- * state and is ready to resume operation. This function is meant to restore
- * the device back to its pre-sleep state.
+ * Generic PM hook used when waking the device from a low power state after
+ * suspend or hibernation. This function does not need to handle lower PCIe
+ * device state as the stack takes care of that for us.
**/
-static int fm10k_resume(struct pci_dev *pdev)
+static int fm10k_resume(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(pdev);
+ struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
- u32 err;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- /* pci_restore_state clears dev->state_saved so call
- * pci_save_state to restore it.
- */
- pci_save_state(pdev);
-
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
- return err;
- }
- pci_set_master(pdev);
-
- pci_wake_from_d3(pdev, false);
+ int err;
/* refresh hw_addr in case it was dropped */
hw->hw_addr = interface->uc_addr;
@@ -2238,36 +2492,27 @@ static int fm10k_resume(struct pci_dev *pdev)
}
/**
- * fm10k_suspend - Prepare the device for a system sleep state
- * @pdev: PCI device information struct
+ * fm10k_suspend - Generic PM suspend hook
+ * @dev: generic device structure
*
- * fm10k_suspend is meant to shutdown the device prior to the system entering
- * a sleep state. The fm10k hardware does not support wake on lan so the
- * driver simply needs to shut down the device so it is in a low power state.
+ * Generic PM hook used when setting the device into a low power state for
+ * system suspend or hibernation. This function does not need to handle lower
+ * PCIe device state as the stack takes care of that for us.
**/
-static int fm10k_suspend(struct pci_dev *pdev,
- pm_message_t __always_unused state)
+static int fm10k_suspend(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(pdev);
+ struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
struct net_device *netdev = interface->netdev;
- int err = 0;
netif_device_detach(netdev);
fm10k_prepare_suspend(interface);
- err = pci_save_state(pdev);
- if (err)
- return err;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
-
return 0;
}
#endif /* CONFIG_PM */
+
/**
* fm10k_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -2343,11 +2588,18 @@ static void fm10k_io_resume(struct pci_dev *pdev)
if (err)
dev_warn(&pdev->dev,
- "fm10k_io_resume failed: %d\n", err);
+ "%s failed: %d\n", __func__, err);
else
netif_device_attach(netdev);
}
+/**
+ * fm10k_io_reset_prepare - called when PCI function is about to be reset
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the PCI function is about to be reset,
+ * allowing the device driver to prepare for it.
+ */
static void fm10k_io_reset_prepare(struct pci_dev *pdev)
{
/* warn incase we have any active VF devices */
@@ -2357,6 +2609,13 @@ static void fm10k_io_reset_prepare(struct pci_dev *pdev)
fm10k_prepare_suspend(pci_get_drvdata(pdev));
}
+/**
+ * fm10k_io_reset_done - called when PCI function has finished resetting
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called just after the PCI function is reset, such as via
+ * /sys/class/net/<enpX>/device/reset or similar.
+ */
static void fm10k_io_reset_done(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
@@ -2364,7 +2623,7 @@ static void fm10k_io_reset_done(struct pci_dev *pdev)
if (err) {
dev_warn(&pdev->dev,
- "fm10k_io_reset_notify failed: %d\n", err);
+ "%s failed: %d\n", __func__, err);
netif_device_detach(interface->netdev);
}
}
@@ -2377,15 +2636,18 @@ static const struct pci_error_handlers fm10k_err_handler = {
.reset_done = fm10k_io_reset_done,
};
+static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
+
static struct pci_driver fm10k_driver = {
.name = fm10k_driver_name,
.id_table = fm10k_pci_tbl,
.probe = fm10k_probe,
.remove = fm10k_remove,
#ifdef CONFIG_PM
- .suspend = fm10k_suspend,
- .resume = fm10k_resume,
-#endif
+ .driver = {
+ .pm = &fm10k_pm_ops,
+ },
+#endif /* CONFIG_PM */
.sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 40ee0242a80a..425d814aed4d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1186,7 +1186,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
* Will report an error if the VLAN ID is out of range. For VID = 0, it will
* return either the pf_vid or sw_vid depending on which one is set.
*/
-static s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
+s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
{
if (!vid)
return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
@@ -1334,19 +1334,19 @@ static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
case FM10K_XCAST_MODE_PROMISC:
if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
return FM10K_XCAST_MODE_PROMISC;
- /* fallthough */
+ /* fall through */
case FM10K_XCAST_MODE_ALLMULTI:
if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
return FM10K_XCAST_MODE_ALLMULTI;
- /* fallthough */
+ /* fall through */
case FM10K_XCAST_MODE_MULTI:
if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
return FM10K_XCAST_MODE_MULTI;
- /* fallthough */
+ /* fall through */
case FM10K_XCAST_MODE_NONE:
if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
return FM10K_XCAST_MODE_NONE;
- /* fallthough */
+ /* fall through */
default:
break;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index 3336d3c10760..e04d41f1a532 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2017 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -114,6 +114,7 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
+s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid);
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d0c1bf5441d8..e019baa905c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -54,6 +54,9 @@
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_client.h"
@@ -77,6 +80,7 @@
#define i40e_default_queues_per_vmdq(pf) \
(((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
#define I40E_DEFAULT_QUEUES_PER_VF 4
+#define I40E_MAX_VF_QUEUES 16
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
#define i40e_pf_get_max_q_per_tc(pf) \
(((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
@@ -125,6 +129,11 @@
/* default to trying for four seconds */
#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
+/* BW rate limiting */
+#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
+#define I40E_BW_MBPS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */
+#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */
+
/* driver state flags */
enum i40e_state_t {
__I40E_TESTING,
@@ -136,6 +145,7 @@ enum i40e_state_t {
__I40E_MDD_EVENT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
+ __I40E_MISC_IRQ_REQUESTED,
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
@@ -155,6 +165,8 @@ enum i40e_state_t {
__I40E_STATE_SIZE__,
};
+#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+
/* VSI state flags */
enum i40e_vsi_state_t {
__I40E_VSI_DOWN,
@@ -242,6 +254,58 @@ struct i40e_fdir_filter {
u32 fd_id;
};
+#define I40E_CLOUD_FIELD_OMAC 0x01
+#define I40E_CLOUD_FIELD_IMAC 0x02
+#define I40E_CLOUD_FIELD_IVLAN 0x04
+#define I40E_CLOUD_FIELD_TEN_ID 0x08
+#define I40E_CLOUD_FIELD_IIP 0x10
+
+#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \
+ I40E_CLOUD_FIELD_IVLAN)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+ I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \
+ I40E_CLOUD_FIELD_IMAC | \
+ I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \
+ I40E_CLOUD_FIELD_IVLAN | \
+ I40E_CLOUD_FIELD_TEN_ID)
+#define I40E_CLOUD_FILTER_FLAGS_IIP I40E_CLOUD_FIELD_IIP
+
+struct i40e_cloud_filter {
+ struct hlist_node cloud_node;
+ unsigned long cookie;
+ /* cloud filter input set follows */
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ __be16 vlan_id;
+ u16 seid; /* filter control */
+ __be16 dst_port;
+ __be16 src_port;
+ u32 tenant_id;
+ union {
+ struct {
+ struct in_addr dst_ip;
+ struct in_addr src_ip;
+ } v4;
+ struct {
+ struct in6_addr dst_ip6;
+ struct in6_addr src_ip6;
+ } v6;
+ } ip;
+#define dst_ipv6 ip.v6.dst_ip6.s6_addr32
+#define src_ipv6 ip.v6.src_ip6.s6_addr32
+#define dst_ipv4 ip.v4.dst_ip.s_addr
+#define src_ipv4 ip.v4.src_ip.s_addr
+ u16 n_proto; /* Ethernet Protocol */
+ u8 ip_proto; /* IPPROTO value */
+ u8 flags;
+#define I40E_CLOUD_TNL_TYPE_NONE 0xff
+ u8 tunnel_type;
+};
+
#define I40E_ETH_P_LLDP 0x88cc
#define I40E_DCB_PRIO_TYPE_STRICT 0
@@ -336,6 +400,25 @@ struct i40e_flex_pit {
u8 pit_index;
};
+struct i40e_channel {
+ struct list_head list;
+ bool initialized;
+ u8 type;
+ u16 vsi_number; /* Assigned VSI number from AQ 'Add VSI' response */
+ u16 stat_counter_idx;
+ u16 base_queue;
+ u16 num_queue_pairs; /* Requested by user */
+ u16 seid;
+
+ u8 enabled_tc;
+ struct i40e_aqc_vsi_properties_data info;
+
+ u64 max_tx_rate;
+
+ /* track this channel belongs to which VSI */
+ struct i40e_vsi *parent_vsi;
+};
+
/* struct that defines the Ethernet device */
struct i40e_pf {
struct pci_dev *pdev;
@@ -348,7 +431,7 @@ struct i40e_pf {
u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
- u16 num_req_vfs; /* num VFs requested for this VF */
+ u16 num_req_vfs; /* num VFs requested for this PF */
u16 num_vf_qps; /* num queue pairs per VF */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
@@ -390,6 +473,9 @@ struct i40e_pf {
struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
u16 pending_udp_bitmap;
+ struct hlist_head cloud_filter_list;
+ u16 num_cloud_filters;
+
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
@@ -401,55 +487,60 @@ struct i40e_pf {
struct timer_list service_timer;
struct work_struct service_task;
- u64 hw_features;
-#define I40E_HW_RSS_AQ_CAPABLE BIT_ULL(0)
-#define I40E_HW_128_QP_RSS_CAPABLE BIT_ULL(1)
-#define I40E_HW_ATR_EVICT_CAPABLE BIT_ULL(2)
-#define I40E_HW_WB_ON_ITR_CAPABLE BIT_ULL(3)
-#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(4)
-#define I40E_HW_NO_PCI_LINK_CHECK BIT_ULL(5)
-#define I40E_HW_100M_SGMII_CAPABLE BIT_ULL(6)
-#define I40E_HW_NO_DCB_SUPPORT BIT_ULL(7)
-#define I40E_HW_USE_SET_LLDP_MIB BIT_ULL(8)
-#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT_ULL(9)
-#define I40E_HW_PTP_L4_CAPABLE BIT_ULL(10)
-#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(11)
-#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT_ULL(12)
-#define I40E_HW_HAVE_CRT_RETIMER BIT_ULL(13)
-#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT_ULL(14)
-#define I40E_HW_PHY_CONTROLS_LEDS BIT_ULL(15)
-#define I40E_HW_STOP_FW_LLDP BIT_ULL(16)
-#define I40E_HW_PORT_ID_VALID BIT_ULL(17)
-#define I40E_HW_RESTART_AUTONEG BIT_ULL(18)
-
- u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
-#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
-#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4)
-#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
-#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
-#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
-#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
-#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
-#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
-#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
-#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
-#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
-#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23)
-#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24)
-#define I40E_FLAG_PTP BIT_ULL(25)
-#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
-#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27)
-#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29)
-#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37)
-#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39)
-#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
-#define I40E_FLAG_CLIENT_RESET BIT_ULL(54)
-#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
-#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56)
-#define I40E_FLAG_LEGACY_RX BIT_ULL(58)
+ u32 hw_features;
+#define I40E_HW_RSS_AQ_CAPABLE BIT(0)
+#define I40E_HW_128_QP_RSS_CAPABLE BIT(1)
+#define I40E_HW_ATR_EVICT_CAPABLE BIT(2)
+#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3)
+#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4)
+#define I40E_HW_NO_PCI_LINK_CHECK BIT(5)
+#define I40E_HW_100M_SGMII_CAPABLE BIT(6)
+#define I40E_HW_NO_DCB_SUPPORT BIT(7)
+#define I40E_HW_USE_SET_LLDP_MIB BIT(8)
+#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9)
+#define I40E_HW_PTP_L4_CAPABLE BIT(10)
+#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11)
+#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT(12)
+#define I40E_HW_HAVE_CRT_RETIMER BIT(13)
+#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14)
+#define I40E_HW_PHY_CONTROLS_LEDS BIT(15)
+#define I40E_HW_STOP_FW_LLDP BIT(16)
+#define I40E_HW_PORT_ID_VALID BIT(17)
+#define I40E_HW_RESTART_AUTONEG BIT(18)
+
+ u32 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
+#define I40E_FLAG_MSI_ENABLED BIT(1)
+#define I40E_FLAG_MSIX_ENABLED BIT(2)
+#define I40E_FLAG_RSS_ENABLED BIT(3)
+#define I40E_FLAG_VMDQ_ENABLED BIT(4)
+#define I40E_FLAG_FILTER_SYNC BIT(5)
+#define I40E_FLAG_SRIOV_ENABLED BIT(6)
+#define I40E_FLAG_DCB_CAPABLE BIT(7)
+#define I40E_FLAG_DCB_ENABLED BIT(8)
+#define I40E_FLAG_FD_SB_ENABLED BIT(9)
+#define I40E_FLAG_FD_ATR_ENABLED BIT(10)
+#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT(11)
+#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT(12)
+#define I40E_FLAG_MFP_ENABLED BIT(13)
+#define I40E_FLAG_UDP_FILTER_SYNC BIT(14)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(15)
+#define I40E_FLAG_VEB_MODE_ENABLED BIT(16)
+#define I40E_FLAG_VEB_STATS_ENABLED BIT(17)
+#define I40E_FLAG_LINK_POLLING_ENABLED BIT(18)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(19)
+#define I40E_FLAG_TEMP_LINK_POLLING BIT(20)
+#define I40E_FLAG_LEGACY_RX BIT(21)
+#define I40E_FLAG_PTP BIT(22)
+#define I40E_FLAG_IWARP_ENABLED BIT(23)
+#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT(24)
+#define I40E_FLAG_CLIENT_L2_CHANGE BIT(25)
+#define I40E_FLAG_CLIENT_RESET BIT(26)
+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27)
+#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28)
+#define I40E_FLAG_TC_MQPRIO BIT(29)
+#define I40E_FLAG_FD_SB_INACTIVE BIT(30)
+#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(31)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
@@ -530,6 +621,10 @@ struct i40e_pf {
u32 ioremap_len;
u32 fd_inv;
u16 phy_led_val;
+
+ u16 override_q_count;
+ u16 last_sw_conf_flags;
+ u16 last_sw_conf_valid_flags;
};
/**
@@ -673,6 +768,7 @@ struct i40e_vsi {
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
s16 vf_id; /* Virtual function ID for SRIOV VSIs */
+ struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
struct i40e_tc_configuration tc_config;
struct i40e_aqc_vsi_properties_data info;
@@ -694,6 +790,17 @@ struct i40e_vsi {
bool current_isup; /* Sync 'link up' logging */
enum i40e_aq_link_speed current_speed; /* Sync link speed logging */
+ /* channel specific fields */
+ u16 cnt_q_avail; /* num of queues available for channel usage */
+ u16 orig_rss_size;
+ u16 current_rss_size;
+ bool reconfig_rss;
+
+ u16 next_base_queue; /* next queue to be used for channel setup */
+
+ struct list_head ch_list;
+ u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS];
+
void *priv; /* client driver data reference. */
/* VSI specific handlers */
@@ -945,9 +1052,6 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
struct i40e_hw *hw = &pf->hw;
u32 val;
- /* definitely clear the PBA here, as this function is meant to
- * clean out all previous interrupts AND enable the interrupt
- */
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
@@ -956,7 +1060,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
}
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
@@ -1001,4 +1105,7 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
return !!vsi->xdp_prog;
}
+
+int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
+int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ba04988e0598..9af74253c3f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -607,6 +607,24 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ }
+
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 7))
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 5d5f422cbae5..b0188b8f91ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,15 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc {
__le16 flags;
@@ -236,6 +244,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
@@ -765,7 +775,50 @@ struct i40e_aqc_set_switch_config {
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
- u8 reserved[12];
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ /* Next byte is split into following:
+ * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0
+ * Bit 6 : 0 : Destination Port, 1: source port
+ * Bit 5..4 : L4 type
+ * 0: rsvd
+ * 1: TCP
+ * 2: UDP
+ * 3: Both TCP and UDP
+ * Bits 3:0 Mode
+ * 0: default mode
+ * 1: L4 port only mode
+ * 2: non-tunneled mode
+ * 3: tunneled mode
+ */
+#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80
+
+#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40
+
+#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00
+#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10
+#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20
+#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30
+
+#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00
+#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01
+#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02
+#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03
+ u8 mode;
+ u8 rsvd5[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@@ -1318,14 +1371,16 @@ struct i40e_aqc_add_remove_cloud_filters {
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
+ u8 big_buffer_flag;
+#define I40E_AQC_ADD_CLOUD_CMD_BB 1
+ u8 reserved2[3];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
-struct i40e_aqc_add_remove_cloud_filters_element_data {
+struct i40e_aqc_cloud_filters_element_data {
u8 outer_mac[6];
u8 inner_mac[6];
__le16 inner_vlan;
@@ -1337,6 +1392,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
struct {
u8 data[16];
} v6;
+ struct {
+ __le16 data[8];
+ } raw_v6;
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
@@ -1355,6 +1413,10 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x0010 to 0x0017 is for custom filters */
+#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
@@ -1389,6 +1451,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 response_reserved[7];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
+
+/* i40e_aqc_cloud_filters_element_bb is used when
+ * I40E_AQC_CLOUD_CMD_BB flag is set.
+ */
+struct i40e_aqc_cloud_filters_element_bb {
+ struct i40e_aqc_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
+
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
@@ -1400,6 +1505,60 @@ struct i40e_aqc_remove_cloud_filters_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+/* Replace filter Command 0x025F
+ * uses the i40e_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
+
+struct i40e_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+#define I40E_AQC_REPLACE_L1_FILTER 0x0
+#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
+#define I40E_AQC_GET_CLOUD_FILTERS 0x2
+#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
+#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
+
+struct i40e_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+/* Filter type INPUT codes*/
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7)
+
+/* Field Vector offsets */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
+ struct i40e_filter_data filters[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
+
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
@@ -1726,6 +1885,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1744,7 +1905,12 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
- I40E_PHY_TYPE_MAX
+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
+ I40E_PHY_TYPE_EMPTY = 0xFE,
+ I40E_PHY_TYPE_DEFAULT = 0xFF,
};
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
@@ -1801,6 +1967,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -1934,19 +2102,31 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK 0x07
+#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
+#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 power_desc;
+ union {
+ struct {
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
#define I40E_AQ_PWR_CLASS_MASK 0x03
- u8 reserved[4];
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -2029,6 +2209,22 @@ struct i40e_aqc_run_phy_activity {
I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct i40e_aqc_phy_register_access {
+ u8 phy_interface;
+#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_address;
+ u8 reserved1[2];
+ __le32 reg_address;
+ __le32 reg_value;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 111426ba5fbc..095965f268bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -948,7 +948,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
@@ -1180,6 +1181,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_CR:
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1266,6 +1269,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
if (hw->revision_id == 0)
cnt = I40E_PF_RESET_WAIT_COUNT_A0;
else
@@ -1277,6 +1281,12 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n");
+ hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
usleep_range(1000, 2000);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
@@ -1567,34 +1577,57 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
struct i40e_aq_desc desc;
i40e_status status;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+ u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
if (!abilities)
return I40E_ERR_PARAM;
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_phy_abilities);
+ do {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- if (qualified_modules)
- desc.params.external.param0 |=
+ if (qualified_modules)
+ desc.params.external.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
- if (report_init)
- desc.params.external.param0 |=
+ if (report_init)
+ desc.params.external.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
- status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
- cmd_details);
+ status = i40e_asq_send_command(hw, &desc, abilities,
+ abilities_size, cmd_details);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
- status = I40E_ERR_UNKNOWN_PHY;
+ if (status)
+ break;
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ usleep_range(1000, 2000);
+ total_delay++;
+ status = I40E_ERR_TIMEOUT;
+ }
+ } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
+ (total_delay < max_delay));
+
+ if (status)
+ return status;
if (report_init) {
- hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
- hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ } else {
+ hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
+ hw->phy.phy_types |=
+ ((u64)abilities->phy_type_ext << 32);
+ }
}
return status;
@@ -1819,7 +1852,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
- hw_link_info->loopback = resp->loopback;
+ hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
@@ -1850,6 +1883,15 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+ if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= 7) {
+ __le32 tmp;
+
+ memcpy(&tmp, resp->link_type, sizeof(tmp));
+ hw->phy.phy_types = le32_to_cpu(tmp);
+ hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
+ }
+
/* save link status information */
if (link)
*link = *hw_link_info;
@@ -2373,13 +2415,14 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
* @hw: pointer to the hardware structure
* @flags: bit flag values to set
* @valid_flags: which bit flags to set
+ * @mode: cloud filter mode
* @cmd_details: pointer to command details structure or NULL
*
* Set switch configuration bits
**/
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 flags,
- u16 valid_flags,
+ u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2391,7 +2434,12 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
i40e_aqc_opc_set_switch_config);
scfg->flags = cpu_to_le16(flags);
scfg->valid_flags = cpu_to_le16(valid_flags);
-
+ scfg->mode = mode;
+ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
+ scfg->switch_tag = cpu_to_le16(hw->switch_tag);
+ scfg->first_tag = cpu_to_le16(hw->first_tag);
+ scfg->second_tag = cpu_to_le16(hw->second_tag);
+ }
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4826,6 +4874,74 @@ phy_blinking_end:
}
/**
+ * i40e_led_get_reg - read LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: read register value
+ **/
+static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+ u8 port_num;
+ u32 i;
+
+ *reg_val = 0;
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16 *)reg_val);
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_reg - write LED register
+ * @hw: pointer to the HW structure
+ * @led_addr: LED register address
+ * @reg_val: register value to write
+ **/
+static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
+{
+ enum i40e_status_code status;
+ u8 phy_addr = 0;
+ u8 port_num;
+ u32 i;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ reg_val, NULL);
+ } else {
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ (u16)reg_val);
+ }
+
+ return status;
+}
+
+/**
* i40e_led_get_phy - return current on/off mode
* @hw: pointer to the hw struct
* @led_addr: address of led register to use
@@ -4842,7 +4958,19 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 temp_addr;
u8 port_num;
u32 i;
-
+ u32 reg_val_aq;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ status =
+ i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_LED_PROV_REG_1,
+ &reg_val_aq, NULL);
+ if (status == I40E_SUCCESS)
+ *val = (u16)reg_val_aq;
+ return status;
+ }
temp_addr = I40E_PHY_LED_PROV_REG_1;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
@@ -4877,51 +5005,38 @@ i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode)
{
i40e_status status = 0;
- u16 led_ctl = 0;
- u16 led_reg = 0;
- u8 phy_addr = 0;
- u8 port_num;
- u32 i;
+ u32 led_ctl = 0;
+ u32 led_reg = 0;
- i = rd32(hw, I40E_PFGEN_PORTNUM);
- port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
- phy_addr = i40e_get_phy_address(hw, port_num);
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+
+ status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register_clause45(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
}
return status;
+
restore_config:
- status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_ctl);
+ status = i40e_led_set_reg(hw, led_addr, led_ctl);
return status;
}
@@ -5052,6 +5167,75 @@ do_retry:
}
/**
+ * i40e_aq_set_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write the external PHY register.
+ **/
+i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_address = dev_addr;
+ cmd->reg_address = cpu_to_le32(reg_addr);
+ cmd->reg_value = cpu_to_le32(reg_val);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the external PHY register.
+ **/
+i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_address = dev_addr;
+ cmd->reg_address = cpu_to_le32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = le32_to_cpu(cmd->reg_value);
+
+ return status;
+}
+
+/**
* i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
* @hw: pointer to the hw struct
* @buff: command buffer (size in bytes = buff_size)
@@ -5260,5 +5444,194 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
track_id, &offset, &info, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_data are filled in by the caller
+ * of the function.
+ *
+ **/
+enum i40e_status_code
+i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = cpu_to_le16(buff_len);
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters_bb
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the big buffer cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * function.
+ *
+ **/
+i40e_status
+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ i40e_status status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = cpu_to_le16(buff_len);
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = cpu_to_le16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (le16_to_cpu(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
+ * one more byte further than normally used for Tenant ID in
+ * other tunnel types.
+ */
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = le32_to_cpu(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = cpu_to_le32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_rem_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_data are filled in by the caller
+ * of the function.
+ *
+ **/
+enum i40e_status_code
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = cpu_to_le16(buff_len);
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_rem_cloud_filters_bb
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the big buffer cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * function.
+ *
+ **/
+i40e_status
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ i40e_status status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = cpu_to_le16(buff_len);
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = cpu_to_le16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (le16_to_cpu(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
+ * one more byte further than normally used for Tenant ID in
+ * other tunnel types.
+ */
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = le32_to_cpu(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = cpu_to_le32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8f326f87a815..4c3b4243cf65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+ struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
if (!rx_ring)
continue;
@@ -278,8 +278,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->netdev,
rx_ring->rx_bi);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, rx_ring->state,
+ " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+ i, *rx_ring->state,
rx_ring->queue_index,
rx_ring->reg_idx);
dev_info(&pf->pdev->dev,
@@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
@@ -334,8 +334,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->netdev,
tx_ring->tx_bi);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, tx_ring->state,
+ " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+ i, *tx_ring->state,
tx_ring->queue_index,
tx_ring->reg_idx);
dev_info(&pf->pdev->dev,
@@ -798,8 +798,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
*/
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf,
- BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index f141e78d409e..76ed56641864 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -36,7 +36,9 @@
static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
u32 reg, u32 mask)
{
- const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 patterns[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+ };
u32 pat, val, orig_val;
int i;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 05e89864f781..5f6cf7212d4f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -227,6 +227,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
+ I40E_PRIV_FLAG("disable-source-pruning",
+ I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -251,428 +253,557 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
/**
* i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
- * @phy_types: PHY types to convert
- * @supported: pointer to the ethtool supported variable to fill in
- * @advertising: pointer to the ethtool advertising variable to fill in
+ * @pf: PF struct with phy_types
+ * @ks: ethtool link ksettings struct to fill out
*
**/
-static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
- u32 *advertising)
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
+ struct ethtool_link_ksettings *ks)
{
struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
u64 phy_types = pf->hw.phy.phy_types;
- *supported = 0x0;
- *advertising = 0x0;
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
- *supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- *advertising |= ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- *advertising |= ADVERTISED_1000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
- *supported |= SUPPORTED_100baseT_Full;
- *advertising |= ADVERTISED_100baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
}
}
if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
phy_types & I40E_CAP_PHY_TYPE_XFI ||
phy_types & I40E_CAP_PHY_TYPE_SFI ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
- *supported |= SUPPORTED_10000baseT_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
- *supported |= SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- *advertising |= ADVERTISED_Autoneg;
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- *advertising |= ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
- *supported |= SUPPORTED_40000baseCR4_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
- *supported |= SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- *advertising |= ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB)
- *advertising |= ADVERTISED_40000baseCR4_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
- *supported |= SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- *advertising |= ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- *advertising |= ADVERTISED_100baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
}
- if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
- *supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- *advertising |= ADVERTISED_Autoneg;
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- *advertising |= ADVERTISED_1000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
- *supported |= SUPPORTED_40000baseSR4_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
- *supported |= SUPPORTED_40000baseLR4_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
- *supported |= SUPPORTED_40000baseKR4_Full |
- SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_40000baseKR4_Full |
- ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
- *supported |= SUPPORTED_20000baseKR2_Full |
- SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 20000baseKR2_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB)
- *advertising |= ADVERTISED_20000baseKR2_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 20000baseKR2_Full);
}
- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
- if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
- *supported |= SUPPORTED_10000baseKR_Full |
- SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_Autoneg;
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKX4_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
- *advertising |= ADVERTISED_10000baseKR_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKX4_Full);
}
- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
- *supported |= SUPPORTED_10000baseKX4_Full |
- SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_Autoneg;
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR &&
+ !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- *advertising |= ADVERTISED_10000baseKX4_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
}
- if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
- if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
- *supported |= SUPPORTED_1000baseKX_Full |
- SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_Autoneg;
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX &&
+ !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER))
- *advertising |= ADVERTISED_1000baseKX_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
}
- if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
- phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
- phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ /* need to add 25G PHY types */
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) {
- *supported |= SUPPORTED_Autoneg;
- *advertising |= ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseSR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ }
+ /* need to add new 10G PHY types */
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseCR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseCR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseSR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseLR_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseX_Full);
+ }
+ /* Autoneg PHY types */
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4 ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4 ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
+ phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX ||
+ phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Autoneg);
}
}
/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
- * @ecmd: ethtool command to fill in
+ * @ks: ethtool ksettings to fill in
* @netdev: network interface device structure
- *
+ * @pf: pointer to physical function struct
**/
static void i40e_get_settings_link_up(struct i40e_hw *hw,
- struct ethtool_link_ksettings *cmd,
+ struct ethtool_link_ksettings *ks,
struct net_device *netdev,
struct i40e_pf *pf)
{
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ struct ethtool_link_ksettings cap_ksettings;
u32 link_speed = hw_link_info->link_speed;
- u32 e_advertising = 0x0;
- u32 e_supported = 0x0;
- u32 supported, advertising;
-
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
/* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_40GBASE_CR4_CU:
- supported = SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- advertising = ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
break;
case I40E_PHY_TYPE_XLAUI:
case I40E_PHY_TYPE_XLPPI:
case I40E_PHY_TYPE_40GBASE_AOC:
- supported = SUPPORTED_40000baseCR4_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseCR4_Full);
break;
case I40E_PHY_TYPE_40GBASE_SR4:
- supported = SUPPORTED_40000baseSR4_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseSR4_Full);
break;
case I40E_PHY_TYPE_40GBASE_LR4:
- supported = SUPPORTED_40000baseLR4_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseLR4_Full);
break;
+ case I40E_PHY_TYPE_25GBASE_SR:
+ case I40E_PHY_TYPE_25GBASE_LR:
case I40E_PHY_TYPE_10GBASE_SR:
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
- supported = SUPPORTED_10000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
if (hw_link_info->module_type[2] &
I40E_MODULE_TYPE_1000BASE_SX ||
hw_link_info->module_type[2] &
I40E_MODULE_TYPE_1000BASE_LX) {
- supported |= SUPPORTED_1000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
if (hw_link_info->requested_speeds &
I40E_LINK_SPEED_1GB)
- advertising |= ADVERTISED_1000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(
+ ks, advertising, 1000baseT_Full);
}
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- advertising |= ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_100BASE_TX:
- supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- advertising = ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
- advertising |= ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- advertising |= ADVERTISED_1000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- advertising |= ADVERTISED_100baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 100baseT_Full);
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
- supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
- supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
case I40E_PHY_TYPE_SFI:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_10GBASE_AOC:
- supported = SUPPORTED_10000baseT_Full;
- advertising = SUPPORTED_10000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseT_Full);
break;
case I40E_PHY_TYPE_SGMII:
- supported = SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
- advertising |= ADVERTISED_1000baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseT_Full);
if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) {
- supported |= SUPPORTED_100baseT_Full;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100baseT_Full);
if (hw_link_info->requested_speeds &
I40E_LINK_SPEED_100MB)
- advertising |= ADVERTISED_100baseT_Full;
+ ethtool_link_ksettings_add_link_mode(
+ ks, advertising, 100baseT_Full);
}
break;
case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_25GBASE_KR:
case I40E_PHY_TYPE_20GBASE_KR2:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_1000BASE_KX:
- supported |= SUPPORTED_40000baseKR4_Full |
- SUPPORTED_20000baseKR2_Full |
- SUPPORTED_10000baseKR_Full |
- SUPPORTED_10000baseKX4_Full |
- SUPPORTED_1000baseKX_Full |
- SUPPORTED_Autoneg;
- advertising |= ADVERTISED_40000baseKR4_Full |
- ADVERTISED_20000baseKR2_Full |
- ADVERTISED_10000baseKR_Full |
- ADVERTISED_10000baseKX4_Full |
- ADVERTISED_1000baseKX_Full |
- ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 40000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 20000baseKR2_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseKX4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 1000baseKX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseKR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 20000baseKR2_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseKX4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 1000baseKX_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
break;
- case I40E_PHY_TYPE_25GBASE_KR:
case I40E_PHY_TYPE_25GBASE_CR:
- case I40E_PHY_TYPE_25GBASE_SR:
- case I40E_PHY_TYPE_25GBASE_LR:
- supported = SUPPORTED_Autoneg;
- advertising = ADVERTISED_Autoneg;
- /* TODO: add speeds when ethtool is ready to support*/
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ break;
+ case I40E_PHY_TYPE_25GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_ACC:
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 25000baseCR_Full);
+
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 25000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 10000baseCR_Full);
break;
default:
/* if we got here and link is up something bad is afoot */
- netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
+ netdev_info(netdev,
+ "WARNING: Link is up but PHY type 0x%x is not recognized.\n",
hw_link_info->phy_type);
}
/* Now that we've worked out everything that could be supported by the
- * current PHY type, get what is supported by the NVM and them to
- * get what is truly supported
+ * current PHY type, get what is supported by the NVM and intersect
+ * them to get what is truly supported
*/
- i40e_phy_type_to_ethtool(pf, &e_supported,
- &e_advertising);
-
- supported = supported & e_supported;
- advertising = advertising & e_advertising;
+ memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings));
+ i40e_phy_type_to_ethtool(pf, &cap_ksettings);
+ ethtool_intersect_link_masks(ks, &cap_ksettings);
/* Set speed and duplex */
switch (link_speed) {
case I40E_LINK_SPEED_40GB:
- cmd->base.speed = SPEED_40000;
+ ks->base.speed = SPEED_40000;
break;
case I40E_LINK_SPEED_25GB:
-#ifdef SPEED_25000
- cmd->base.speed = SPEED_25000;
-#else
- netdev_info(netdev,
- "Speed is 25G, display not supported by this version of ethtool.\n");
-#endif
+ ks->base.speed = SPEED_25000;
break;
case I40E_LINK_SPEED_20GB:
- cmd->base.speed = SPEED_20000;
+ ks->base.speed = SPEED_20000;
break;
case I40E_LINK_SPEED_10GB:
- cmd->base.speed = SPEED_10000;
+ ks->base.speed = SPEED_10000;
break;
case I40E_LINK_SPEED_1GB:
- cmd->base.speed = SPEED_1000;
+ ks->base.speed = SPEED_1000;
break;
case I40E_LINK_SPEED_100MB:
- cmd->base.speed = SPEED_100;
+ ks->base.speed = SPEED_100;
break;
default:
break;
}
- cmd->base.duplex = DUPLEX_FULL;
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ ks->base.duplex = DUPLEX_FULL;
}
/**
* i40e_get_settings_link_down - Get the Link settings for when link is down
* @hw: hw structure
- * @ecmd: ethtool command to fill in
+ * @ks: ethtool ksettings to fill in
+ * @pf: pointer to physical function struct
*
* Reports link settings that can be determined when link is down
**/
static void i40e_get_settings_link_down(struct i40e_hw *hw,
- struct ethtool_link_ksettings *cmd,
+ struct ethtool_link_ksettings *ks,
struct i40e_pf *pf)
{
- u32 supported, advertising;
-
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
*/
- i40e_phy_type_to_ethtool(pf, &supported, &advertising);
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ i40e_phy_type_to_ethtool(pf, ks);
/* With no link speed and duplex are unknown */
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
}
/**
- * i40e_get_settings - Get Link Speed and Duplex settings
+ * i40e_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
- * @ecmd: ethtool command
+ * @ks: ethtool ksettings
*
* Reports speed/duplex settings based on media_type
**/
static int i40e_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
+ struct ethtool_link_ksettings *ks)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
- u32 advertising;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
if (link_up)
- i40e_get_settings_link_up(hw, cmd, netdev, pf);
+ i40e_get_settings_link_up(hw, ks, netdev, pf);
else
- i40e_get_settings_link_down(hw, cmd, pf);
+ i40e_get_settings_link_down(hw, ks, pf);
/* Now set the settings that don't rely on link being up/down */
/* Set autoneg settings */
- cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE);
+ ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+ /* Set media type settings */
switch (hw->phy.media_type) {
case I40E_MEDIA_TYPE_BACKPLANE:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- Backplane);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
Backplane);
- cmd->base.port = PORT_NONE;
+ ks->base.port = PORT_NONE;
break;
case I40E_MEDIA_TYPE_BASET:
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
- cmd->base.port = PORT_TP;
+ ethtool_link_ksettings_add_link_mode(ks, supported, TP);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
+ ks->base.port = PORT_TP;
break;
case I40E_MEDIA_TYPE_DA:
case I40E_MEDIA_TYPE_CX4:
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
- cmd->base.port = PORT_DA;
+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
+ ks->base.port = PORT_DA;
break;
case I40E_MEDIA_TYPE_FIBER:
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
- cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ks->base.port = PORT_FIBRE;
break;
case I40E_MEDIA_TYPE_UNKNOWN:
default:
- cmd->base.port = PORT_OTHER;
+ ks->base.port = PORT_OTHER;
break;
}
/* Set flow control settings */
- ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Pause);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
break;
case I40E_FC_TX_PAUSE:
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
break;
case I40E_FC_RX_PAUSE:
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- Pause);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
break;
default:
- ethtool_convert_link_mode_to_legacy_u32(
- &advertising, cmd->link_modes.advertising);
-
- advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
-
- ethtool_convert_legacy_u32_to_link_mode(
- cmd->link_modes.advertising, advertising);
+ ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
+ ethtool_link_ksettings_del_link_mode(ks, advertising,
+ Asym_Pause);
break;
}
@@ -680,30 +811,28 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
}
/**
- * i40e_set_settings - Set Speed and Duplex
+ * i40e_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
- * @ecmd: ethtool command
+ * @ks: ethtool ksettings
*
* Set speed/duplex per media_types advertised/forced
**/
static int i40e_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
+ const struct ethtool_link_ksettings *ks)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
+ struct ethtool_link_ksettings safe_ks;
+ struct ethtool_link_ksettings copy_ks;
struct i40e_aq_set_phy_config config;
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
- struct ethtool_link_ksettings safe_cmd;
- struct ethtool_link_ksettings copy_cmd;
+ bool autoneg_changed = false;
i40e_status status = 0;
- bool change = false;
int timeout = 50;
int err = 0;
- u32 autoneg;
- u32 advertise;
- u32 tmp;
+ u8 autoneg;
/* Changing port settings is not supported if this isn't the
* port's controlling PF
@@ -712,17 +841,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
i40e_partition_setting_complaint(pf);
return -EOPNOTSUPP;
}
-
if (vsi != pf->vsi[pf->lan_vsi])
return -EOPNOTSUPP;
-
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
return -EOPNOTSUPP;
-
if (hw->device_id == I40E_DEV_ID_KX_B ||
hw->device_id == I40E_DEV_ID_KX_C ||
hw->device_id == I40E_DEV_ID_20G_KR2 ||
@@ -731,31 +857,37 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP;
}
- /* copy the cmd to copy_cmd to avoid modifying the origin */
- memcpy(&copy_cmd, cmd, sizeof(struct ethtool_link_ksettings));
+ /* copy the ksettings to copy_ks to avoid modifying the origin */
+ memcpy(&copy_ks, ks, sizeof(struct ethtool_link_ksettings));
- /* get our own copy of the bits to check against */
- memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings));
- i40e_get_link_ksettings(netdev, &safe_cmd);
+ /* save autoneg out of ksettings */
+ autoneg = copy_ks.base.autoneg;
- /* save autoneg and speed out of cmd */
- autoneg = cmd->base.autoneg;
- ethtool_convert_link_mode_to_legacy_u32(&advertise,
- cmd->link_modes.advertising);
+ memset(&safe_ks, 0, sizeof(safe_ks));
+ /* Get link modes supported by hardware and check against modes
+ * requested by the user. Return an error if unsupported mode was set.
+ */
+ i40e_phy_type_to_ethtool(pf, &safe_ks);
+ if (!bitmap_subset(copy_ks.link_modes.advertising,
+ safe_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
- /* set autoneg and speed back to what they currently are */
- copy_cmd.base.autoneg = safe_cmd.base.autoneg;
- ethtool_convert_link_mode_to_legacy_u32(
- &tmp, safe_cmd.link_modes.advertising);
- ethtool_convert_legacy_u32_to_link_mode(
- copy_cmd.link_modes.advertising, tmp);
+ /* get our own copy of the bits to check against */
+ memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
+ safe_ks.base.cmd = copy_ks.base.cmd;
+ safe_ks.base.link_mode_masks_nwords =
+ copy_ks.base.link_mode_masks_nwords;
+ i40e_get_link_ksettings(netdev, &safe_ks);
- copy_cmd.base.cmd = safe_cmd.base.cmd;
+ /* set autoneg back to what it currently is */
+ copy_ks.base.autoneg = safe_ks.base.autoneg;
- /* If copy_cmd and safe_cmd are not the same now, then they are
- * trying to set something that we do not support
+ /* If copy_ks.base and safe_ks.base are not the same now, then they are
+ * trying to set something that we do not support.
*/
- if (memcmp(&copy_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings)))
+ if (memcmp(&copy_ks.base, &safe_ks.base,
+ sizeof(struct ethtool_link_settings)))
return -EOPNOTSUPP;
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
@@ -784,8 +916,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* If autoneg was not already enabled */
if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
/* If autoneg is not supported, return error */
- if (!ethtool_link_ksettings_test_link_mode(
- &safe_cmd, supported, Autoneg)) {
+ if (!ethtool_link_ksettings_test_link_mode(&safe_ks,
+ supported,
+ Autoneg)) {
netdev_info(netdev, "Autoneg not supported on this phy\n");
err = -EINVAL;
goto done;
@@ -793,7 +926,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* Autoneg is allowed to change */
config.abilities = abilities.abilities |
I40E_AQ_PHY_ENABLE_AN;
- change = true;
+ autoneg_changed = true;
}
} else {
/* If autoneg is currently enabled */
@@ -801,8 +934,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* If autoneg is supported 10GBASE_T is the only PHY
* that can disable it, so otherwise return error
*/
- if (ethtool_link_ksettings_test_link_mode(
- &safe_cmd, supported, Autoneg) &&
+ if (ethtool_link_ksettings_test_link_mode(&safe_ks,
+ supported,
+ Autoneg) &&
hw->phy.link_info.phy_type !=
I40E_PHY_TYPE_10GBASE_T) {
netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
@@ -812,32 +946,49 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* Autoneg is allowed to change */
config.abilities = abilities.abilities &
~I40E_AQ_PHY_ENABLE_AN;
- change = true;
+ autoneg_changed = true;
}
}
- ethtool_convert_link_mode_to_legacy_u32(&tmp,
- safe_cmd.link_modes.supported);
- if (advertise & ~tmp) {
- err = -EINVAL;
- goto done;
- }
-
- if (advertise & ADVERTISED_100baseT_Full)
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100baseT_Full))
config.link_speed |= I40E_LINK_SPEED_100MB;
- if (advertise & ADVERTISED_1000baseT_Full ||
- advertise & ADVERTISED_1000baseKX_Full)
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
config.link_speed |= I40E_LINK_SPEED_1GB;
- if (advertise & ADVERTISED_10000baseT_Full ||
- advertise & ADVERTISED_10000baseKX4_Full ||
- advertise & ADVERTISED_10000baseKR_Full)
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKX4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseCR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseSR_Full))
config.link_speed |= I40E_LINK_SPEED_10GB;
- if (advertise & ADVERTISED_20000baseKR2_Full)
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 20000baseKR2_Full))
config.link_speed |= I40E_LINK_SPEED_20GB;
- if (advertise & ADVERTISED_40000baseKR4_Full ||
- advertise & ADVERTISED_40000baseCR4_Full ||
- advertise & ADVERTISED_40000baseSR4_Full ||
- advertise & ADVERTISED_40000baseLR4_Full)
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseCR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseKR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 25000baseSR_Full))
+ config.link_speed |= I40E_LINK_SPEED_25GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseKR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseCR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseSR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 40000baseLR4_Full))
config.link_speed |= I40E_LINK_SPEED_40GB;
/* If speed didn't get set, set it to what it currently is.
@@ -846,8 +997,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
*/
if (!config.link_speed)
config.link_speed = abilities.link_speed;
-
- if (change || (abilities.link_speed != config.link_speed)) {
+ if (autoneg_changed || abilities.link_speed != config.link_speed) {
/* copy over the rest of the abilities */
config.phy_type = abilities.phy_type;
config.phy_type_ext = abilities.phy_type_ext;
@@ -874,7 +1024,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* make the aq call */
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
- netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+ netdev_info(netdev,
+ "Set phy config failed, err %s aq_err %s\n",
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
@@ -883,7 +1034,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_update_link_info(hw);
if (status)
- netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
+ netdev_dbg(netdev,
+ "Updating link info failed with err %s aq_err %s\n",
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
@@ -1570,7 +1722,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) {
- tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+ tx_ring = READ_ONCE(vsi->tx_rings[j]);
if (!tx_ring)
continue;
@@ -2008,7 +2160,9 @@ static int i40e_set_phys_id(struct net_device *netdev,
if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
pf->led_status = i40e_led_get(hw);
} else {
- i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
+ NULL);
ret = i40e_led_get_phy(hw, &temp_status,
&pf->phy_led_val);
pf->led_status = temp_status;
@@ -2033,7 +2187,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val |
I40E_PHY_LED_MODE_ORIG));
- i40e_aq_set_phy_debug(hw, 0, NULL);
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ i40e_aq_set_phy_debug(hw, 0, NULL);
}
break;
default:
@@ -2071,14 +2226,13 @@ static int __i40e_get_coalesce(struct net_device *netdev,
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
ec->rx_max_coalesced_frames_irq = vsi->work_limit;
- /* rx and tx usecs has per queue value. If user doesn't specify the queue,
- * return queue 0's value to represent.
+ /* rx and tx usecs has per queue value. If user doesn't specify the
+ * queue, return queue 0's value to represent.
*/
- if (queue < 0) {
+ if (queue < 0)
queue = 0;
- } else if (queue >= vsi->num_queue_pairs) {
+ else if (queue >= vsi->num_queue_pairs)
return -EINVAL;
- }
rx_ring = vsi->rx_rings[queue];
tx_ring = vsi->tx_rings[queue];
@@ -2092,7 +2246,6 @@ static int __i40e_get_coalesce(struct net_device *netdev,
ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
-
/* we use the _usecs_high to store/set the interrupt rate limit
* that the hardware supports, that almost but not quite
* fits the original intent of the ethtool variable,
@@ -2142,7 +2295,6 @@ static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
*
* Change the ITR settings for a specific queue.
**/
-
static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
@@ -2264,8 +2416,8 @@ static int __i40e_set_coalesce(struct net_device *netdev,
vsi->int_rate_limit);
}
- /* rx and tx usecs has per queue value. If user doesn't specify the queue,
- * apply to all queues.
+ /* rx and tx usecs has per queue value. If user doesn't specify the
+ * queue, apply to all queues.
*/
if (queue < 0) {
for (i = 0; i < vsi->num_queue_pairs; i++)
@@ -2647,7 +2799,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = vsi->num_queue_pairs;
+ cmd->data = vsi->rss_size;
ret = 0;
break;
case ETHTOOL_GRXFH:
@@ -3892,6 +4044,12 @@ static int i40e_set_channels(struct net_device *dev,
if (vsi->type != I40E_VSI_MAIN)
return -EINVAL;
+ /* We do not support setting channels via ethtool when TCs are
+ * configured through mqprio
+ */
+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ return -EINVAL;
+
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
return -EINVAL;
@@ -3959,6 +4117,16 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
return I40E_HLUT_ARRAY_SIZE;
}
+/**
+ * i40e_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function
+ *
+ * Reads the indirection table directly from the hardware. Returns 0 on
+ * success.
+ **/
static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
@@ -4090,7 +4258,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u64 orig_flags, new_flags, changed_flags;
+ u32 orig_flags, new_flags, changed_flags;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@@ -4142,12 +4310,12 @@ flags_complete:
return -EOPNOTSUPP;
/* Compare and exchange the new flags into place. If we failed, that
- * is if cmpxchg64 returns anything but the old value, this means that
+ * is if cmpxchg returns anything but the old value, this means that
* something else has modified the flags variable since we copied it
* originally. We'll just punt with an error and log something in the
* message buffer.
*/
- if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
+ if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) {
dev_warn(&pf->pdev->dev,
"Unable to update pf->flags as it was modified by another thread...\n");
return -EAGAIN;
@@ -4175,7 +4343,7 @@ flags_complete:
sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
- NULL);
+ 0, NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
"couldn't set switch config bits, err %s aq_err %s\n",
@@ -4189,13 +4357,166 @@ flags_complete:
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
- if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) ||
- ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev)))
+ if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+ I40E_FLAG_LEGACY_RX |
+ I40E_FLAG_SOURCE_PRUNING_DISABLED))
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0;
}
+/**
+ * i40e_get_module_info - get (Q)SFP+ module type info
+ * @netdev: network interface device structure
+ * @modinfo: module EEPROM size and layout information structure
+ **/
+static int i40e_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 sff8472_comp = 0;
+ u32 sff8472_swap = 0;
+ u32 sff8636_rev = 0;
+ i40e_status status;
+ u32 type = 0;
+
+ /* Check if firmware supports reading module EEPROM. */
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n");
+ return -EINVAL;
+ }
+
+ status = i40e_update_link_info(hw);
+ if (status)
+ return -EIO;
+
+ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
+ netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n");
+ return -EINVAL;
+ }
+
+ type = hw->phy.link_info.module_type[0];
+
+ switch (type) {
+ case I40E_MODULE_TYPE_SFP:
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_COMP,
+ &sff8472_comp, NULL);
+ if (status)
+ return -EIO;
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_SWAP,
+ &sff8472_swap, NULL);
+ if (status)
+ return -EIO;
+
+ /* Check if the module requires address swap to access
+ * the other EEPROM memory page.
+ */
+ if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
+ netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n");
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp == 0x00) {
+ /* Module is not SFF-8472 compliant */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP_PLUS:
+ /* Read from memory page 0. */
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ 0,
+ I40E_MODULE_REVISION_ADDR,
+ &sff8636_rev, NULL);
+ if (status)
+ return -EIO;
+ /* Determine revision compliance byte */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ break;
+ default:
+ netdev_err(vsi->netdev, "Module type unrecognized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents
+ * @netdev: network interface device structure
+ * @ee: EEPROM dump request structure
+ * @data: buffer to be filled with EEPROM contents
+ **/
+static int i40e_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ bool is_sfp = false;
+ i40e_status status;
+ u32 value = 0;
+ int i;
+
+ if (!ee || !ee->len || !data)
+ return -EINVAL;
+
+ if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ for (i = 0; i < ee->len; i++) {
+ u32 offset = i + ee->offset;
+ u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= ETH_MODULE_SFF_8079_LEN) {
+ offset -= ETH_MODULE_SFF_8079_LEN;
+ addr = I40E_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= ETH_MODULE_SFF_8436_LEN / 2;
+ addr++;
+ }
+ }
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ addr, offset, &value, NULL);
+ if (status)
+ return -EIO;
+ data[i] = value;
+ }
+ return 0;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
@@ -4228,6 +4549,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_rxfh = i40e_set_rxfh,
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
+ .get_module_info = i40e_get_module_info,
+ .get_module_eeprom = i40e_get_module_eeprom,
.get_ts_info = i40e_get_ts_info,
.get_priv_flags = i40e_get_priv_flags,
.set_priv_flags = i40e_set_priv_flags,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6498da8806cb..4c08cc86463e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -69,6 +69,15 @@ static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
+static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add);
+static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add);
+static int i40e_get_capabilities(struct i40e_pf *pf,
+ enum i40e_admin_queue_opc list_type);
+
/* i40e_pci_tbl - PCI Device ID Table
*
@@ -455,7 +464,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
u64 bytes, packets;
unsigned int start;
- tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ tx_ring = READ_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
i40e_get_netdev_stats_struct_tx(tx_ring, stats);
@@ -600,6 +609,20 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
}
/**
+ * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
+ * @hw: ptr to the hardware info
+ * @reg: the hw reg to read and clear
+ * @stat: ptr to the stat
+ **/
+static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
+{
+ u32 new_data = rd32(hw, reg);
+
+ wr32(hw, reg, 1); /* must write a nonzero value to clear register */
+ *stat += new_data;
+}
+
+/**
* i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
* @vsi: the VSI to be updated
**/
@@ -791,7 +814,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
- p = ACCESS_ONCE(vsi->tx_rings[q]);
+ p = READ_ONCE(vsi->tx_rings[q]);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
@@ -1040,18 +1063,15 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_jabber, &nsd->rx_jabber);
/* FDIR stats */
- i40e_stat_update32(hw,
- I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
- pf->stat_offsets_loaded,
- &osd->fd_atr_match, &nsd->fd_atr_match);
- i40e_stat_update32(hw,
- I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
- pf->stat_offsets_loaded,
- &osd->fd_sb_match, &nsd->fd_sb_match);
- i40e_stat_update32(hw,
- I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
- pf->stat_offsets_loaded,
- &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
+ i40e_stat_update_and_clear32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
+ &nsd->fd_atr_match);
+ i40e_stat_update_and_clear32(hw,
+ I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
+ &nsd->fd_sb_match);
+ i40e_stat_update_and_clear32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
+ &nsd->fd_atr_tunnel_match);
val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status =
@@ -1578,6 +1598,170 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
}
/**
+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int ret = 0;
+
+ if (seed) {
+ struct i40e_aqc_get_set_rss_key_data *seed_dw =
+ (struct i40e_aqc_get_set_rss_key_data *)seed;
+ ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS key, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ }
+ if (lut) {
+ bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+
+ ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ u8 *lut;
+ int ret;
+
+ if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
+ return 0;
+ if (!vsi->rss_size)
+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
+ vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ /* Use the user configured hash keys and lookup table if there is one,
+ * otherwise use default
+ */
+ if (vsi->rss_lut_user)
+ memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
+ else
+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+ ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
+ kfree(lut);
+ return ret;
+}
+
+/**
+ * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
+ * @vsi: the VSI being configured,
+ * @ctxt: VSI context structure
+ * @enabled_tc: number of traffic classes to enable
+ *
+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
+ **/
+static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt,
+ u8 enabled_tc)
+{
+ u16 qcount = 0, max_qcount, qmap, sections = 0;
+ int i, override_q, pow, num_qps, ret;
+ u8 netdev_tc = 0, offset = 0;
+
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+ vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
+ vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+ num_qps = vsi->mqprio_qopt.qopt.count[0];
+
+ /* find the next higher power-of-2 of num queue pairs */
+ pow = ilog2(num_qps);
+ if (!is_power_of_2(num_qps))
+ pow++;
+ qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ /* Setup queue offset/count for all TCs for given VSI */
+ max_qcount = vsi->mqprio_qopt.qopt.count[0];
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* See if the given TC is enabled for the given VSI */
+ if (vsi->tc_config.enabled_tc & BIT(i)) {
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount = vsi->mqprio_qopt.qopt.count[i];
+ if (qcount > max_qcount)
+ max_qcount = qcount;
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
+ } else {
+ /* TC is not enabled so set the offset to
+ * default queue and allocate one queue
+ * for the given TC.
+ */
+ vsi->tc_config.tc_info[i].qoffset = 0;
+ vsi->tc_config.tc_info[i].qcount = 1;
+ vsi->tc_config.tc_info[i].netdev_tc = 0;
+ }
+ }
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_queue_pairs = offset + qcount;
+
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+ ctxt->info.valid_sections |= cpu_to_le16(sections);
+
+ /* Reconfigure RSS for main VSI with max queue count */
+ vsi->rss_size = max_qcount;
+ ret = i40e_vsi_config_rss(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to reconfig rss for num_queues (%u)\n",
+ max_qcount);
+ return ret;
+ }
+ vsi->reconfig_rss = true;
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reconfigured rss with num_queues (%u)\n", max_qcount);
+
+ /* Find queue count available for channel VSIs and starting offset
+ * for channel VSIs
+ */
+ override_q = vsi->mqprio_qopt.qopt.count[0];
+ if (override_q && override_q < vsi->num_queue_pairs) {
+ vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
+ vsi->next_base_queue = override_q;
+ }
+ return 0;
+}
+
+/**
* i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
* @vsi: the VSI being setup
* @ctxt: VSI context structure
@@ -1615,7 +1799,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
numtc = 1;
}
} else {
- /* At least TC0 is enabled in case of non-DCB case */
+ /* At least TC0 is enabled in non-DCB, non-MQPRIO case */
numtc = 1;
}
@@ -1765,11 +1949,6 @@ static void i40e_set_rx_mode(struct net_device *netdev)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
-
- /* schedule our worker thread which will take care of
- * applying the new filter changes
- */
- i40e_service_event_schedule(vsi->back);
}
/**
@@ -1988,6 +2167,73 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
}
/**
+ * i40e_set_promiscuous - set promiscuous mode
+ * @pf: board private structure
+ * @promisc: promisc on or off
+ *
+ * There are different ways of setting promiscuous mode on a PF depending on
+ * what state/environment we're in. This identifies and sets it appropriately.
+ * Returns 0 on success.
+ **/
+static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+
+ if (vsi->type == I40E_VSI_MAIN &&
+ pf->lan_veb != I40E_NO_VEB &&
+ !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ /* set defport ON for Main VSI instead of true promisc
+ * this way we will get all unicast/multicast and VLAN
+ * promisc behavior but will not get VF or VMDq traffic
+ * replicated on the Main VSI.
+ */
+ if (promisc)
+ aq_ret = i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret = i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL,
+ true);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set unicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set multicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+
+ if (!aq_ret)
+ pf->cur_promisc = promisc;
+
+ return aq_ret;
+}
+
+/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -2288,81 +2534,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_VSI_OVERFLOW_PROMISC,
vsi->state));
- if ((vsi->type == I40E_VSI_MAIN) &&
- (pf->lan_veb != I40E_NO_VEB) &&
- !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
- /* set defport ON for Main VSI instead of true promisc
- * this way we will get all unicast/multicast and VLAN
- * promisc behavior but will not get VF or VMDq traffic
- * replicated on the Main VSI.
- */
- if (pf->cur_promisc != cur_promisc) {
- pf->cur_promisc = cur_promisc;
- if (cur_promisc)
- aq_ret =
- i40e_aq_set_default_vsi(hw,
- vsi->seid,
- NULL);
- else
- aq_ret =
- i40e_aq_clear_default_vsi(hw,
- vsi->seid,
- NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "Set default VSI failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- } else {
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL,
- true);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set unicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set multicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
+ aq_ret = i40e_set_promiscuous(pf, cur_promisc);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
+ "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+ cur_promisc ? "on" : "off",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
out:
@@ -2873,22 +3054,18 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
**/
static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
{
- struct i40e_vsi *vsi = ring->vsi;
+ int cpu;
- if (!ring->q_vector || !ring->netdev)
+ if (!ring->q_vector || !ring->netdev || ring->ch)
return;
- if ((vsi->tc_config.numtc <= 1) &&
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
- netif_set_xps_queue(ring->netdev,
- get_cpu_mask(ring->q_vector->v_idx),
- ring->queue_index);
- }
+ /* We only initialize XPS once, so as not to overwrite user settings */
+ if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
+ return;
- /* schedule our worker thread which will take care of
- * applying the new filter changes
- */
- i40e_service_event_schedule(vsi->back);
+ cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
+ netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
+ ring->queue_index);
}
/**
@@ -2942,7 +3119,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
* initialization. This has to be done regardless of
* DCB as by default everything is mapped to TC0.
*/
- tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+
+ if (ring->ch)
+ tx_ctx.rdylist =
+ le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
+
+ else
+ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+
tx_ctx.rdylist_act = 0;
/* clear the context in the HMC */
@@ -2964,12 +3148,23 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
}
/* Now associate this queue with this PCI function */
- if (vsi->type == I40E_VSI_VMDQ2) {
- qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
- qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
- I40E_QTX_CTL_VFVM_INDX_MASK;
+ if (ring->ch) {
+ if (ring->ch->type == I40E_VSI_VMDQ2)
+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+ else
+ return -EINVAL;
+
+ qtx_ctl |= (ring->ch->vsi_number <<
+ I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+ I40E_QTX_CTL_VFVM_INDX_MASK;
} else {
- qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ if (vsi->type == I40E_VSI_VMDQ2) {
+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+ qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
+ I40E_QTX_CTL_VFVM_INDX_MASK;
+ } else {
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ }
}
qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
@@ -2998,7 +3193,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hmc_obj_rxq rx_ctx;
i40e_status err = 0;
- ring->state = 0;
+ bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx));
@@ -3023,7 +3218,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (hw->revision_id == 0)
rx_ctx.lrxqthresh = 0;
else
- rx_ctx.lrxqthresh = 2;
+ rx_ctx.lrxqthresh = 1;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
/* this controls whether VLAN is stripped from inner headers */
@@ -3138,6 +3333,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
rx_ring->dcb_tc = 0;
tx_ring->dcb_tc = 0;
}
+ return;
}
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
@@ -3396,15 +3592,14 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
- * @clearpba: true when all pending interrupt events should be cleared
**/
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
- (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTL0, val);
@@ -3471,6 +3666,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int tx_int_idx = 0;
int vector, err;
int irq_num;
+ int cpu;
for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
@@ -3506,10 +3702,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
q_vector->affinity_notify.release = i40e_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
- /* get_cpu_mask returns a static constant mask with
- * a permanent lifetime so it's ok to use here.
+ /* Spread affinity hints out across online CPUs.
+ *
+ * get_cpu_mask returns a static constant mask with
+ * a permanent lifetime so it's ok to pass to
+ * irq_set_affinity_hint without making a copy.
*/
- irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
+ irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
}
vsi->irqs_ready = true;
@@ -3585,7 +3785,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i);
} else {
- i40e_irq_dynamic_enable_icr0(pf, true);
+ i40e_irq_dynamic_enable_icr0(pf);
}
i40e_flush(&pf->hw);
@@ -3593,14 +3793,20 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
}
/**
- * i40e_stop_misc_vector - Stop the vector that handles non-queue events
+ * i40e_free_misc_vector - Free the vector that handles non-queue events
* @pf: board private structure
**/
-static void i40e_stop_misc_vector(struct i40e_pf *pf)
+static void i40e_free_misc_vector(struct i40e_pf *pf)
{
/* Disable ICR 0 */
wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
i40e_flush(&pf->hw);
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
+ }
}
/**
@@ -3728,7 +3934,7 @@ enable_intr:
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, pf->state)) {
i40e_service_event_schedule(pf);
- i40e_irq_dynamic_enable_icr0(pf, false);
+ i40e_irq_dynamic_enable_icr0(pf);
}
return ret;
@@ -3760,7 +3966,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
@@ -4455,11 +4661,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
int i;
- i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[0].vector);
- free_irq(pf->msix_entries[0].vector, pf);
- }
+ i40e_free_misc_vector(pf);
i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
I40E_IWARP_IRQ_PILE_ID);
@@ -4848,6 +5050,24 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
}
/**
+ * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
+ * @pf: PF being queried
+ *
+ * Query the current MQPRIO configuration and return the number of
+ * traffic classes enabled.
+ **/
+static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
+ u8 enabled_tc = 1, i;
+
+ for (i = 1; i < num_tc; i++)
+ enabled_tc |= BIT(i);
+ return enabled_tc;
+}
+
+/**
* i40e_pf_get_num_tc - Get enabled traffic classes for PF
* @pf: PF being queried
*
@@ -4860,7 +5080,10 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
u8 num_tc = 0;
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
- /* If DCB is not enabled then always in single TC */
+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
+
+ /* If neither MQPRIO nor DCB is enabled, then always use single TC */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return 1;
@@ -4889,7 +5112,12 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
**/
static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
- /* If DCB is not enabled for this PF then just return default TC */
+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ return i40e_mqprio_get_enabled_tc(pf);
+
+ /* If neither MQPRIO nor DCB is enabled for this PF then just return
+ * default TC
+ */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return I40E_DEFAULT_TRAFFIC_CLASS;
@@ -4979,6 +5207,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
i40e_status ret;
int i;
+ if (vsi->back->flags & I40E_FLAG_TC_MQPRIO)
+ return 0;
+ if (!vsi->mqprio_qopt.qopt.hw) {
+ ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
+ if (ret)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to reset tx rate for vsi->seid %u\n",
+ vsi->seid);
+ return ret;
+ }
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
@@ -5041,6 +5279,9 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
vsi->tc_config.tc_info[i].qoffset);
}
+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ return;
+
/* Assign UP2TC map for the VSI */
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
/* Get the actual TC# for the UP */
@@ -5091,7 +5332,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
int i;
/* Check if enabled_tc is same as existing or new TCs */
- if (vsi->tc_config.enabled_tc == enabled_tc)
+ if (vsi->tc_config.enabled_tc == enabled_tc &&
+ vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
return ret;
/* Enable ETS TCs with equal BW Share for now across all VSIs */
@@ -5114,15 +5356,37 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.info = vsi->info;
- i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+ if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
+ ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
+ if (ret)
+ goto out;
+ } else {
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+ }
+ /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
+ * queues changed.
+ */
+ if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
+ vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
+ vsi->num_queue_pairs);
+ ret = i40e_vsi_config_rss(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to reconfig rss for num_queues\n");
+ return ret;
+ }
+ vsi->reconfig_rss = false;
+ }
if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
}
- /* Update the VSI after updating the VSI queue-mapping information */
+ /* Update the VSI after updating the VSI queue-mapping
+ * information
+ */
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
@@ -5154,6 +5418,817 @@ out:
}
/**
+ * i40e_get_link_speed - Returns link speed for the interface
+ * @vsi: VSI to be configured
+ *
+ **/
+int i40e_get_link_speed(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ switch (pf->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ return 40000;
+ case I40E_LINK_SPEED_25GB:
+ return 25000;
+ case I40E_LINK_SPEED_20GB:
+ return 20000;
+ case I40E_LINK_SPEED_10GB:
+ return 10000;
+ case I40E_LINK_SPEED_1GB:
+ return 1000;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @seid: seid of the channel/VSI
+ * @max_tx_rate: max TX rate to be configured as BW limit
+ *
+ * Helper function to set BW limit for a given VSI
+ **/
+int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
+{
+ struct i40e_pf *pf = vsi->back;
+ u64 credits = 0;
+ int speed = 0;
+ int ret = 0;
+
+ speed = i40e_get_link_speed(vsi);
+ if (max_tx_rate > speed) {
+ dev_err(&pf->pdev->dev,
+ "Invalid max tx rate %llu specified for VSI seid %d.",
+ max_tx_rate, seid);
+ return -EINVAL;
+ }
+ if (max_tx_rate && max_tx_rate < 50) {
+ dev_warn(&pf->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = 50;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled */
+ credits = max_tx_rate;
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
+ I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
+ max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return ret;
+}
+
+/**
+ * i40e_remove_queue_channels - Remove queue channels for the TCs
+ * @vsi: VSI to be configured
+ *
+ * Remove queue channels for the TCs
+ **/
+static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
+{
+ enum i40e_admin_queue_err last_aq_status;
+ struct i40e_cloud_filter *cfilter;
+ struct i40e_channel *ch, *ch_tmp;
+ struct i40e_pf *pf = vsi->back;
+ struct hlist_node *node;
+ int ret, i;
+
+ /* Reset rss size that was stored when reconfiguring rss for
+ * channel VSIs with non-power-of-2 queue count.
+ */
+ vsi->current_rss_size = 0;
+
+ /* perform cleanup for channels if they exist */
+ if (list_empty(&vsi->ch_list))
+ return;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ struct i40e_vsi *p_vsi;
+
+ list_del(&ch->list);
+ p_vsi = ch->parent_vsi;
+ if (!p_vsi || !ch->initialized) {
+ kfree(ch);
+ continue;
+ }
+ /* Reset queue contexts */
+ for (i = 0; i < ch->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring, *rx_ring;
+ u16 pf_q;
+
+ pf_q = ch->base_queue + i;
+ tx_ring = vsi->tx_rings[pf_q];
+ tx_ring->ch = NULL;
+
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->ch = NULL;
+ }
+
+ /* Reset BW configured for this VSI via mqprio */
+ ret = i40e_set_bw_limit(vsi, ch->seid, 0);
+ if (ret)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to reset tx rate for ch->seid %u\n",
+ ch->seid);
+
+ /* delete cloud filters associated with this channel */
+ hlist_for_each_entry_safe(cfilter, node,
+ &pf->cloud_filter_list, cloud_node) {
+ if (cfilter->seid != ch->seid)
+ continue;
+
+ hash_del(&cfilter->cloud_node);
+ if (cfilter->dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi,
+ cfilter,
+ false);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, cfilter,
+ false);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Failed to delete cloud filter, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, last_aq_status));
+ kfree(cfilter);
+ }
+
+ /* delete VSI from FW */
+ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
+ NULL);
+ if (ret)
+ dev_err(&vsi->back->pdev->dev,
+ "unable to remove channel (%d) for parent VSI(%d)\n",
+ ch->seid, p_vsi->seid);
+ kfree(ch);
+ }
+ INIT_LIST_HEAD(&vsi->ch_list);
+}
+
+/**
+ * i40e_is_any_channel - channel exist or not
+ * @vsi: ptr to VSI to which channels are associated with
+ *
+ * Returns true or false if channel(s) exist for associated VSI or not
+ **/
+static bool i40e_is_any_channel(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch, *ch_tmp;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ if (ch->initialized)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_get_max_queues_for_channel
+ * @vsi: ptr to VSI to which channels are associated with
+ *
+ * Helper function which returns max value among the queue counts set on the
+ * channels/TCs created.
+ **/
+static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch, *ch_tmp;
+ int max = 0;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ if (!ch->initialized)
+ continue;
+ if (ch->num_queue_pairs > max)
+ max = ch->num_queue_pairs;
+ }
+
+ return max;
+}
+
+/**
+ * i40e_validate_num_queues - validate num_queues w.r.t channel
+ * @pf: ptr to PF device
+ * @num_queues: number of queues
+ * @vsi: the parent VSI
+ * @reconfig_rss: indicates should the RSS be reconfigured or not
+ *
+ * This function validates number of queues in the context of new channel
+ * which is being established and determines if RSS should be reconfigured
+ * or not for parent VSI.
+ **/
+static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
+ struct i40e_vsi *vsi, bool *reconfig_rss)
+{
+ int max_ch_queues;
+
+ if (!reconfig_rss)
+ return -EINVAL;
+
+ *reconfig_rss = false;
+ if (vsi->current_rss_size) {
+ if (num_queues > vsi->current_rss_size) {
+ dev_dbg(&pf->pdev->dev,
+ "Error: num_queues (%d) > vsi's current_size(%d)\n",
+ num_queues, vsi->current_rss_size);
+ return -EINVAL;
+ } else if ((num_queues < vsi->current_rss_size) &&
+ (!is_power_of_2(num_queues))) {
+ dev_dbg(&pf->pdev->dev,
+ "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
+ num_queues, vsi->current_rss_size);
+ return -EINVAL;
+ }
+ }
+
+ if (!is_power_of_2(num_queues)) {
+ /* Find the max num_queues configured for channel if channel
+ * exist.
+ * if channel exist, then enforce 'num_queues' to be more than
+ * max ever queues configured for channel.
+ */
+ max_ch_queues = i40e_get_max_queues_for_channel(vsi);
+ if (num_queues < max_ch_queues) {
+ dev_dbg(&pf->pdev->dev,
+ "Error: num_queues (%d) < max queues configured for channel(%d)\n",
+ num_queues, max_ch_queues);
+ return -EINVAL;
+ }
+ *reconfig_rss = true;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
+ * @vsi: the VSI being setup
+ * @rss_size: size of RSS, accordingly LUT gets reprogrammed
+ *
+ * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
+ **/
+static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 seed[I40E_HKEY_ARRAY_SIZE];
+ struct i40e_hw *hw = &pf->hw;
+ int local_rss_size;
+ u8 *lut;
+ int ret;
+
+ if (!vsi->rss_size)
+ return -EINVAL;
+
+ if (rss_size > vsi->rss_size)
+ return -EINVAL;
+
+ local_rss_size = min_t(int, vsi->rss_size, rss_size);
+ lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
+ if (!lut)
+ return -ENOMEM;
+
+ /* Ignoring user configured lut if there is one */
+ i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
+
+ /* Use user configured hash key if there is one, otherwise
+ * use default.
+ */
+ if (vsi->rss_hkey_user)
+ memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
+ else
+ netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+
+ ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ kfree(lut);
+ return ret;
+ }
+ kfree(lut);
+
+ /* Do the update w.r.t. storing rss_size */
+ if (!vsi->orig_rss_size)
+ vsi->orig_rss_size = vsi->rss_size;
+ vsi->current_rss_size = local_rss_size;
+
+ return ret;
+}
+
+/**
+ * i40e_channel_setup_queue_map - Setup a channel queue map
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ctxt: VSI context structure
+ * @ch: ptr to channel structure
+ *
+ * Setup queue map for a specific channel
+ **/
+static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
+ struct i40e_vsi_context *ctxt,
+ struct i40e_channel *ch)
+{
+ u16 qcount, qmap, sections = 0;
+ u8 offset = 0;
+ int pow;
+
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+
+ qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
+ ch->num_queue_pairs = qcount;
+
+ /* find the next higher power-of-2 of num queue pairs */
+ pow = ilog2(qcount);
+ if (!is_power_of_2(qcount))
+ pow++;
+
+ qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+
+ ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
+ ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
+ ctxt->info.valid_sections |= cpu_to_le16(sections);
+}
+
+/**
+ * i40e_add_channel - add a channel by adding VSI
+ * @pf: ptr to PF device
+ * @uplink_seid: underlying HW switching element (VEB) ID
+ * @ch: ptr to channel structure
+ *
+ * Add a channel (VSI) using add_vsi and queue_map
+ **/
+static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
+ struct i40e_channel *ch)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi_context ctxt;
+ u8 enabled_tc = 0x1; /* TC0 enabled */
+ int ret;
+
+ if (ch->type != I40E_VSI_VMDQ2) {
+ dev_info(&pf->pdev->dev,
+ "add new vsi failed, ch->type %d\n", ch->type);
+ return -EINVAL;
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = uplink_seid;
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+ if (ch->type == I40E_VSI_VMDQ2)
+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+ if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
+
+ /* Set queue map for a given VSI context */
+ i40e_channel_setup_queue_map(pf, &ctxt, ch);
+
+ /* Now time to create VSI */
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "add new vsi failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENOENT;
+ }
+
+ /* Success, update channel */
+ ch->enabled_tc = enabled_tc;
+ ch->seid = ctxt.seid;
+ ch->vsi_number = ctxt.vsi_number;
+ ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
+
+ /* copy just the sections touched not the entire info
+ * since not all sections are valid as returned by
+ * update vsi params
+ */
+ ch->info.mapping_flags = ctxt.info.mapping_flags;
+ memcpy(&ch->info.queue_mapping,
+ &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
+ memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
+ sizeof(ctxt.info.tc_mapping));
+
+ return 0;
+}
+
+static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
+ u8 *bw_share)
+{
+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ i40e_status ret;
+ int i;
+
+ bw_data.tc_valid_bits = ch->enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ bw_data.tc_bw_credits[i] = bw_share[i];
+
+ ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
+ vsi->back->hw.aq.asq_last_status, ch->seid);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ ch->info.qs_handle[i] = bw_data.qs_handles[i];
+
+ return 0;
+}
+
+/**
+ * i40e_channel_config_tx_ring - config TX ring associated with new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Configure TX rings associated with channel (VSI) since queues are being
+ * from parent VSI.
+ **/
+static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
+ struct i40e_vsi *vsi,
+ struct i40e_channel *ch)
+{
+ i40e_status ret;
+ int i;
+ u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (ch->enabled_tc & BIT(i))
+ bw_share[i] = 1;
+ }
+
+ /* configure BW for new VSI */
+ ret = i40e_channel_config_bw(vsi, ch, bw_share);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed configuring TC map %d for channel (seid %u)\n",
+ ch->enabled_tc, ch->seid);
+ return ret;
+ }
+
+ for (i = 0; i < ch->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring, *rx_ring;
+ u16 pf_q;
+
+ pf_q = ch->base_queue + i;
+
+ /* Get to TX ring ptr of main VSI, for re-setup TX queue
+ * context
+ */
+ tx_ring = vsi->tx_rings[pf_q];
+ tx_ring->ch = ch;
+
+ /* Get the RX ring ptr */
+ rx_ring = vsi->rx_rings[pf_q];
+ rx_ring->ch = ch;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_setup_hw_channel - setup new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ * @uplink_seid: underlying HW switching element (VEB) ID
+ * @type: type of channel to be created (VMDq2/VF)
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and configures TX rings accordingly
+ **/
+static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
+ struct i40e_vsi *vsi,
+ struct i40e_channel *ch,
+ u16 uplink_seid, u8 type)
+{
+ int ret;
+
+ ch->initialized = false;
+ ch->base_queue = vsi->next_base_queue;
+ ch->type = type;
+
+ /* Proceed with creation of channel (VMDq2) VSI */
+ ret = i40e_add_channel(pf, uplink_seid, ch);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to add_channel using uplink_seid %u\n",
+ uplink_seid);
+ return ret;
+ }
+
+ /* Mark the successful creation of channel */
+ ch->initialized = true;
+
+ /* Reconfigure TX queues using QTX_CTL register */
+ ret = i40e_channel_config_tx_ring(pf, vsi, ch);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to configure TX rings for channel %u\n",
+ ch->seid);
+ return ret;
+ }
+
+ /* update 'next_base_queue' */
+ vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
+ dev_dbg(&pf->pdev->dev,
+ "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
+ ch->seid, ch->vsi_number, ch->stat_counter_idx,
+ ch->num_queue_pairs,
+ vsi->next_base_queue);
+ return ret;
+}
+
+/**
+ * i40e_setup_channel - setup new channel using uplink element
+ * @pf: ptr to PF device
+ * @type: type of channel to be created (VMDq2/VF)
+ * @uplink_seid: underlying HW switching element (VEB) ID
+ * @ch: ptr to channel structure
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and uplink switching element (uplink_seid)
+ **/
+static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
+ struct i40e_channel *ch)
+{
+ u8 vsi_type;
+ u16 seid;
+ int ret;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ vsi_type = I40E_VSI_VMDQ2;
+ } else {
+ dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
+ vsi->type);
+ return false;
+ }
+
+ /* underlying switching element */
+ seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+
+ /* create channel (VSI), configure TX rings */
+ ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
+ return false;
+ }
+
+ return ch->initialized ? true : false;
+}
+
+/**
+ * i40e_validate_and_set_switch_mode - sets up switch mode correctly
+ * @vsi: ptr to VSI which has PF backing
+ *
+ * Sets up switch mode correctly if it needs to be changed and perform
+ * what are allowed modes.
+ **/
+static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
+{
+ u8 mode;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
+ if (ret)
+ return -EINVAL;
+
+ if (hw->dev_caps.switch_mode) {
+ /* if switch mode is set, support mode2 (non-tunneled for
+ * cloud filter) for now
+ */
+ u32 switch_mode = hw->dev_caps.switch_mode &
+ I40E_SWITCH_MODE_MASK;
+ if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
+ if (switch_mode == I40E_CLOUD_FILTER_MODE2)
+ return 0;
+ dev_err(&pf->pdev->dev,
+ "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
+ hw->dev_caps.switch_mode);
+ return -EINVAL;
+ }
+ }
+
+ /* Set Bit 7 to be valid */
+ mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
+
+ /* Set L4type to both TCP and UDP support */
+ mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
+
+ /* Set cloud filter mode */
+ mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
+
+ /* Prep mode field for set_switch_config */
+ ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
+ pf->last_sw_conf_valid_flags,
+ mode, NULL);
+ if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ dev_err(&pf->pdev->dev,
+ "couldn't set switch config bits, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
+
+ return ret;
+}
+
+/**
+ * i40e_create_queue_channel - function to create channel
+ * @vsi: VSI to be configured
+ * @ch: ptr to channel (it contains channel specific params)
+ *
+ * This function creates channel (VSI) using num_queues specified by user,
+ * reconfigs RSS if needed.
+ **/
+int i40e_create_queue_channel(struct i40e_vsi *vsi,
+ struct i40e_channel *ch)
+{
+ struct i40e_pf *pf = vsi->back;
+ bool reconfig_rss;
+ int err;
+
+ if (!ch)
+ return -EINVAL;
+
+ if (!ch->num_queue_pairs) {
+ dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
+ ch->num_queue_pairs);
+ return -EINVAL;
+ }
+
+ /* validate user requested num_queues for channel */
+ err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
+ &reconfig_rss);
+ if (err) {
+ dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
+ ch->num_queue_pairs);
+ return -EINVAL;
+ }
+
+ /* By default we are in VEPA mode, if this is the first VF/VMDq
+ * VSI to be added switch to VEB mode.
+ */
+ if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
+ (!i40e_is_any_channel(vsi))) {
+ if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
+ dev_dbg(&pf->pdev->dev,
+ "Failed to create channel. Override queues (%u) not power of 2\n",
+ vsi->tc_config.tc_info[0].qcount);
+ return -EINVAL;
+ }
+
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG,
+ true);
+ else
+ i40e_do_reset_safe(pf,
+ I40E_PF_RESET_FLAG);
+ }
+ }
+ /* now onwards for main VSI, number of queues will be value
+ * of TC0's queue count
+ */
+ }
+
+ /* By this time, vsi->cnt_q_avail shall be set to non-zero and
+ * it should be more than num_queues
+ */
+ if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
+ dev_dbg(&pf->pdev->dev,
+ "Error: cnt_q_avail (%u) less than num_queues %d\n",
+ vsi->cnt_q_avail, ch->num_queue_pairs);
+ return -EINVAL;
+ }
+
+ /* reconfig_rss only if vsi type is MAIN_VSI */
+ if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
+ err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "Error: unable to reconfig rss for num_queues (%u)\n",
+ ch->num_queue_pairs);
+ return -EINVAL;
+ }
+ }
+
+ if (!i40e_setup_channel(pf, vsi, ch)) {
+ dev_info(&pf->pdev->dev, "Failed to setup channel\n");
+ return -EINVAL;
+ }
+
+ dev_info(&pf->pdev->dev,
+ "Setup channel (id:%u) utilizing num_queues %d\n",
+ ch->seid, ch->num_queue_pairs);
+
+ /* configure VSI for BW limit */
+ if (ch->max_tx_rate) {
+ u64 credits = ch->max_tx_rate;
+
+ if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
+ return -EINVAL;
+
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ dev_dbg(&pf->pdev->dev,
+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+ ch->max_tx_rate,
+ credits,
+ ch->seid);
+ }
+
+ /* in case of VF, this will be main SRIOV VSI */
+ ch->parent_vsi = vsi;
+
+ /* and update main_vsi's count for queue_available to use */
+ vsi->cnt_q_avail -= ch->num_queue_pairs;
+
+ return 0;
+}
+
+/**
+ * i40e_configure_queue_channels - Add queue channel for the given TCs
+ * @vsi: VSI to be configured
+ *
+ * Configures queue channel mapping to the given TCs
+ **/
+static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch;
+ u64 max_rate = 0;
+ int ret = 0, i;
+
+ /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
+ vsi->tc_seid_map[0] = vsi->seid;
+ for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->tc_config.enabled_tc & BIT(i)) {
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ INIT_LIST_HEAD(&ch->list);
+ ch->num_queue_pairs =
+ vsi->tc_config.tc_info[i].qcount;
+ ch->base_queue =
+ vsi->tc_config.tc_info[i].qoffset;
+
+ /* Bandwidth limit through tc interface is in bytes/s,
+ * change to Mbit/s
+ */
+ max_rate = vsi->mqprio_qopt.max_rate[i];
+ do_div(max_rate, I40E_BW_MBPS_DIVISOR);
+ ch->max_tx_rate = max_rate;
+
+ list_add_tail(&ch->list, &vsi->ch_list);
+
+ ret = i40e_create_queue_channel(vsi, ch);
+ if (ret) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed creating queue channel with TC%d: queues %d\n",
+ i, ch->num_queue_pairs);
+ goto err_free;
+ }
+ vsi->tc_seid_map[i] = ch->seid;
+ }
+ }
+ return ret;
+
+err_free:
+ i40e_remove_queue_channels(vsi);
+ return ret;
+}
+
+/**
* i40e_veb_config_tc - Configure TCs for given VEB
* @veb: given VEB
* @enabled_tc: TC bitmap
@@ -5346,13 +6421,14 @@ out:
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{
enum i40e_aq_link_speed new_speed;
+ struct i40e_pf *pf = vsi->back;
char *speed = "Unknown";
char *fc = "Unknown";
char *fec = "";
char *req_fec = "";
char *an = "";
- new_speed = vsi->back->hw.phy.link_info.link_speed;
+ new_speed = pf->hw.phy.link_info.link_speed;
if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
return;
@@ -5366,13 +6442,13 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
/* Warn user if link speed on NPAR enabled partition is not at
* least 10GB
*/
- if (vsi->back->hw.func_caps.npar_enable &&
- (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
- vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
+ if (pf->hw.func_caps.npar_enable &&
+ (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
+ pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
netdev_warn(vsi->netdev,
"The partition detected link speed that is less than 10Gbps\n");
- switch (vsi->back->hw.phy.link_info.link_speed) {
+ switch (pf->hw.phy.link_info.link_speed) {
case I40E_LINK_SPEED_40GB:
speed = "40 G";
break;
@@ -5395,7 +6471,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
break;
}
- switch (vsi->back->hw.fc.current_mode) {
+ switch (pf->hw.fc.current_mode) {
case I40E_FC_FULL:
fc = "RX/TX";
break;
@@ -5410,18 +6486,18 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
break;
}
- if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
+ if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
req_fec = ", Requested FEC: None";
fec = ", FEC: None";
an = ", Autoneg: False";
- if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
an = ", Autoneg: True";
- if (vsi->back->hw.phy.link_info.fec_info &
+ if (pf->hw.phy.link_info.fec_info &
I40E_AQ_CONFIG_FEC_KR_ENA)
fec = ", FEC: CL74 FC-FEC/BASE-R";
- else if (vsi->back->hw.phy.link_info.fec_info &
+ else if (pf->hw.phy.link_info.fec_info &
I40E_AQ_CONFIG_FEC_RS_ENA)
fec = ", FEC: CL108 RS-FEC";
@@ -5470,15 +6546,6 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
i40e_print_link_message(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- } else if (vsi->netdev) {
- i40e_print_link_message(vsi, false);
- /* need to check for qualified module here*/
- if ((pf->hw.phy.link_info.link_info &
- I40E_AQ_MEDIA_AVAILABLE) &&
- (!(pf->hw.phy.link_info.an_info &
- I40E_AQ_QUALIFIED_MODULE)))
- netdev_err(vsi->netdev,
- "the driver failed to link because an unqualified module was detected.");
}
/* replay FDIR SB filters */
@@ -5562,74 +6629,928 @@ void i40e_down(struct i40e_vsi *vsi)
}
/**
+ * i40e_validate_mqprio_qopt- validate queue mapping info
+ * @vsi: the VSI being configured
+ * @mqprio_qopt: queue parametrs
+ **/
+static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u64 sum_max_rate = 0;
+ u64 max_rate = 0;
+ int i;
+
+ if (mqprio_qopt->qopt.offset[0] != 0 ||
+ mqprio_qopt->qopt.num_tc < 1 ||
+ mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
+ return -EINVAL;
+ for (i = 0; ; i++) {
+ if (!mqprio_qopt->qopt.count[i])
+ return -EINVAL;
+ if (mqprio_qopt->min_rate[i]) {
+ dev_err(&vsi->back->pdev->dev,
+ "Invalid min tx rate (greater than 0) specified\n");
+ return -EINVAL;
+ }
+ max_rate = mqprio_qopt->max_rate[i];
+ do_div(max_rate, I40E_BW_MBPS_DIVISOR);
+ sum_max_rate += max_rate;
+
+ if (i >= mqprio_qopt->qopt.num_tc - 1)
+ break;
+ if (mqprio_qopt->qopt.offset[i + 1] !=
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+ }
+ if (vsi->num_queue_pairs <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
+ return -EINVAL;
+ }
+ if (sum_max_rate > i40e_get_link_speed(vsi)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Invalid max tx rate specified\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * i40e_vsi_set_default_tc_config - set default values for tc configuration
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
+{
+ u16 qcount;
+ int i;
+
+ /* Only TC0 is enabled */
+ vsi->tc_config.numtc = 1;
+ vsi->tc_config.enabled_tc = 1;
+ qcount = min_t(int, vsi->alloc_queue_pairs,
+ i40e_pf_get_max_q_per_tc(vsi->back));
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* For the TC that is not enabled set the offset to to default
+ * queue and allocate one queue for the given TC.
+ */
+ vsi->tc_config.tc_info[i].qoffset = 0;
+ if (i == 0)
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ else
+ vsi->tc_config.tc_info[i].qcount = 1;
+ vsi->tc_config.tc_info[i].netdev_tc = 0;
+ }
+}
+
+/**
* i40e_setup_tc - configure multiple traffic classes
* @netdev: net device to configure
- * @tc: number of traffic classes to enable
+ * @type_data: tc offload data
**/
-static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+static int i40e_setup_tc(struct net_device *netdev, void *type_data)
{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u8 enabled_tc = 0;
+ u8 enabled_tc = 0, num_tc, hw;
+ bool need_reset = false;
int ret = -EINVAL;
+ u16 mode;
int i;
- /* Check if DCB enabled to continue */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
- netdev_info(netdev, "DCB is not enabled for adapter\n");
- goto exit;
+ num_tc = mqprio_qopt->qopt.num_tc;
+ hw = mqprio_qopt->qopt.hw;
+ mode = mqprio_qopt->mode;
+ if (!hw) {
+ pf->flags &= ~I40E_FLAG_TC_MQPRIO;
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ goto config_tc;
}
/* Check if MFP enabled */
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
- netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
- goto exit;
+ netdev_info(netdev,
+ "Configuring TC not supported in MFP mode\n");
+ return ret;
}
+ switch (mode) {
+ case TC_MQPRIO_MODE_DCB:
+ pf->flags &= ~I40E_FLAG_TC_MQPRIO;
- /* Check whether tc count is within enabled limit */
- if (tc > i40e_pf_get_num_tc(pf)) {
- netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
- goto exit;
+ /* Check if DCB enabled to continue */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ netdev_info(netdev,
+ "DCB is not enabled for adapter\n");
+ return ret;
+ }
+
+ /* Check whether tc count is within enabled limit */
+ if (num_tc > i40e_pf_get_num_tc(pf)) {
+ netdev_info(netdev,
+ "TC count greater than enabled on link for adapter\n");
+ return ret;
+ }
+ break;
+ case TC_MQPRIO_MODE_CHANNEL:
+ if (pf->flags & I40E_FLAG_DCB_ENABLED) {
+ netdev_info(netdev,
+ "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
+ return ret;
+ }
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ return ret;
+ ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
+ if (ret)
+ return ret;
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt,
+ sizeof(*mqprio_qopt));
+ pf->flags |= I40E_FLAG_TC_MQPRIO;
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ break;
+ default:
+ return -EINVAL;
}
+config_tc:
/* Generate TC map for number of tc requested */
- for (i = 0; i < tc; i++)
+ for (i = 0; i < num_tc; i++)
enabled_tc |= BIT(i);
/* Requesting same TC configuration as already enabled */
- if (enabled_tc == vsi->tc_config.enabled_tc)
+ if (enabled_tc == vsi->tc_config.enabled_tc &&
+ mode != TC_MQPRIO_MODE_CHANNEL)
return 0;
/* Quiesce VSI queues */
i40e_quiesce_vsi(vsi);
+ if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
+ i40e_remove_queue_channels(vsi);
+
/* Configure VSI for enabled TCs */
ret = i40e_vsi_config_tc(vsi, enabled_tc);
if (ret) {
netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
vsi->seid);
+ need_reset = true;
goto exit;
}
+ if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (vsi->mqprio_qopt.max_rate[0]) {
+ u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (!ret) {
+ u64 credits = max_tx_rate;
+
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ dev_dbg(&vsi->back->pdev->dev,
+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+ max_tx_rate,
+ credits,
+ vsi->seid);
+ } else {
+ need_reset = true;
+ goto exit;
+ }
+ }
+ ret = i40e_configure_queue_channels(vsi);
+ if (ret) {
+ netdev_info(netdev,
+ "Failed configuring queue channels\n");
+ need_reset = true;
+ goto exit;
+ }
+ }
+
+exit:
+ /* Reset the configuration data to defaults, only TC0 is enabled */
+ if (need_reset) {
+ i40e_vsi_set_default_tc_config(vsi);
+ need_reset = false;
+ }
+
/* Unquiesce VSI */
i40e_unquiesce_vsi(vsi);
+ return ret;
+}
-exit:
+/**
+ * i40e_set_cld_element - sets cloud filter element data
+ * @filter: cloud filter rule
+ * @cld: ptr to cloud filter element data
+ *
+ * This is helper function to copy data into cloud filter element
+ **/
+static inline void
+i40e_set_cld_element(struct i40e_cloud_filter *filter,
+ struct i40e_aqc_cloud_filters_element_data *cld)
+{
+ int i, j;
+ u32 ipa;
+
+ memset(cld, 0, sizeof(*cld));
+ ether_addr_copy(cld->outer_mac, filter->dst_mac);
+ ether_addr_copy(cld->inner_mac, filter->src_mac);
+
+ if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
+ return;
+
+ if (filter->n_proto == ETH_P_IPV6) {
+#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
+ for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
+ i++, j += 2) {
+ ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
+ ipa = cpu_to_le32(ipa);
+ memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
+ }
+ } else {
+ ipa = be32_to_cpu(filter->dst_ipv4);
+ memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
+ }
+
+ cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
+
+ /* tenant_id is not supported by FW now, once the support is enabled
+ * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
+ */
+ if (filter->tenant_id)
+ return;
+}
+
+/**
+ * i40e_add_del_cloud_filter - Add/del cloud filter
+ * @vsi: pointer to VSI
+ * @filter: cloud filter rule
+ * @add: if true, add, if false, delete
+ *
+ * Add or delete a cloud filter for a specific flow spec.
+ * Returns 0 if the filter were successfully added.
+ **/
+static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter, bool add)
+{
+ struct i40e_aqc_cloud_filters_element_data cld_filter;
+ struct i40e_pf *pf = vsi->back;
+ int ret;
+ static const u16 flag_table[128] = {
+ [I40E_CLOUD_FILTER_FLAGS_OMAC] =
+ I40E_AQC_ADD_CLOUD_FILTER_OMAC,
+ [I40E_CLOUD_FILTER_FLAGS_IMAC] =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC,
+ [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
+ [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
+ [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
+ I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
+ [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
+ I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
+ [I40E_CLOUD_FILTER_FLAGS_IIP] =
+ I40E_AQC_ADD_CLOUD_FILTER_IIP,
+ };
+
+ if (filter->flags >= ARRAY_SIZE(flag_table))
+ return I40E_ERR_CONFIG;
+
+ /* copy element needed to add cloud filter from filter */
+ i40e_set_cld_element(filter, &cld_filter);
+
+ if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
+ cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
+
+ if (filter->n_proto == ETH_P_IPV6)
+ cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
+ else
+ cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
+
+ if (add)
+ ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
+ &cld_filter, 1);
+ else
+ ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
+ &cld_filter, 1);
+ if (ret)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
+ add ? "add" : "delete", filter->dst_port, ret,
+ pf->hw.aq.asq_last_status);
+ else
+ dev_info(&pf->pdev->dev,
+ "%s cloud filter for VSI: %d\n",
+ add ? "Added" : "Deleted", filter->seid);
return ret;
}
-static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
- void *type_data)
+/**
+ * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
+ * @vsi: pointer to VSI
+ * @filter: cloud filter rule
+ * @add: if true, add, if false, delete
+ *
+ * Add or delete a cloud filter for a specific flow spec using big buffer.
+ * Returns 0 if the filter were successfully added.
+ **/
+static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add)
{
- struct tc_mqprio_qopt *mqprio = type_data;
+ struct i40e_aqc_cloud_filters_element_bb cld_filter;
+ struct i40e_pf *pf = vsi->back;
+ int ret;
+
+ /* Both (src/dst) valid mac_addr are not supported */
+ if ((is_valid_ether_addr(filter->dst_mac) &&
+ is_valid_ether_addr(filter->src_mac)) ||
+ (is_multicast_ether_addr(filter->dst_mac) &&
+ is_multicast_ether_addr(filter->src_mac)))
+ return -EINVAL;
- if (type != TC_SETUP_MQPRIO)
+ /* Make sure port is specified, otherwise bail out, for channel
+ * specific cloud filter needs 'L4 port' to be non-zero
+ */
+ if (!filter->dst_port)
+ return -EINVAL;
+
+ /* adding filter using src_port/src_ip is not supported at this stage */
+ if (filter->src_port || filter->src_ipv4 ||
+ !ipv6_addr_any(&filter->ip.v6.src_ip6))
+ return -EINVAL;
+
+ /* copy element needed to add cloud filter from filter */
+ i40e_set_cld_element(filter, &cld_filter.element);
+
+ if (is_valid_ether_addr(filter->dst_mac) ||
+ is_valid_ether_addr(filter->src_mac) ||
+ is_multicast_ether_addr(filter->dst_mac) ||
+ is_multicast_ether_addr(filter->src_mac)) {
+ /* MAC + IP : unsupported mode */
+ if (filter->dst_ipv4)
+ return -EINVAL;
+
+ /* since we validated that L4 port must be valid before
+ * we get here, start with respective "flags" value
+ * and update if vlan is present or not
+ */
+ cld_filter.element.flags =
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
+
+ if (filter->vlan_id) {
+ cld_filter.element.flags =
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
+ }
+
+ } else if (filter->dst_ipv4 ||
+ !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
+ cld_filter.element.flags =
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
+ if (filter->n_proto == ETH_P_IPV6)
+ cld_filter.element.flags |=
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
+ else
+ cld_filter.element.flags |=
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "either mac or ip has to be valid for cloud filter\n");
+ return -EINVAL;
+ }
+
+ /* Now copy L4 port in Byte 6..7 in general fields */
+ cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
+ be16_to_cpu(filter->dst_port);
+
+ if (add) {
+ /* Validate current device switch mode, change if necessary */
+ ret = i40e_validate_and_set_switch_mode(vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "failed to set switch mode, ret %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
+ &cld_filter, 1);
+ } else {
+ ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
+ &cld_filter, 1);
+ }
+
+ if (ret)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
+ add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
+ else
+ dev_info(&pf->pdev->dev,
+ "%s cloud filter for VSI: %d, L4 port: %d\n",
+ add ? "add" : "delete", filter->seid,
+ ntohs(filter->dst_port));
+ return ret;
+}
+
+/**
+ * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct tc_cls_flower_offload
+ * @filter: Pointer to cloud filter structure
+ *
+ **/
+static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
+ struct tc_cls_flower_offload *f,
+ struct i40e_cloud_filter *filter)
+{
+ u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
+ struct i40e_pf *pf = vsi->back;
+ u8 field_flags = 0;
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
+ dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
+ f->dissector->used_keys);
return -EOPNOTSUPP;
+ }
- mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->key);
- return i40e_setup_tc(netdev, mqprio->num_tc);
+ struct flow_dissector_key_keyid *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->mask);
+
+ if (mask->keyid != 0)
+ field_flags |= I40E_CLOUD_FIELD_TEN_ID;
+
+ filter->tenant_id = be32_to_cpu(key->keyid);
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+
+ n_proto_key = ntohs(key->n_proto);
+ n_proto_mask = ntohs(mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ }
+ filter->n_proto = n_proto_key & n_proto_mask;
+ filter->ip_proto = key->ip_proto;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_dissector_key_eth_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->key);
+
+ struct flow_dissector_key_eth_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->mask);
+
+ /* use is_broadcast and is_zero to check for all 0xf or 0 */
+ if (!is_zero_ether_addr(mask->dst)) {
+ if (is_broadcast_ether_addr(mask->dst)) {
+ field_flags |= I40E_CLOUD_FIELD_OMAC;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
+ mask->dst);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (!is_zero_ether_addr(mask->src)) {
+ if (is_broadcast_ether_addr(mask->src)) {
+ field_flags |= I40E_CLOUD_FIELD_IMAC;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
+ mask->src);
+ return I40E_ERR_CONFIG;
+ }
+ }
+ ether_addr_copy(filter->dst_mac, key->dst);
+ ether_addr_copy(filter->src_mac, key->src);
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+
+ if (mask->vlan_id) {
+ if (mask->vlan_id == VLAN_VID_MASK) {
+ field_flags |= I40E_CLOUD_FIELD_IVLAN;
+
+ } else {
+ dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
+ mask->vlan_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ filter->vlan_id = cpu_to_be16(key->vlan_id);
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->key);
+
+ addr_type = key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_dissector_key_ipv4_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->mask);
+
+ if (mask->dst) {
+ if (mask->dst == cpu_to_be32(0xffffffff)) {
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+ } else {
+ mask->dst = be32_to_cpu(mask->dst);
+ dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n",
+ &mask->dst);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (mask->src) {
+ if (mask->src == cpu_to_be32(0xffffffff)) {
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+ } else {
+ mask->src = be32_to_cpu(mask->src);
+ dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n",
+ &mask->src);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
+ dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
+ return I40E_ERR_CONFIG;
+ }
+ filter->dst_ipv4 = key->dst;
+ filter->src_ipv4 = key->src;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_dissector_key_ipv6_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->mask);
+
+ /* src and dest IPV6 address should not be LOOPBACK
+ * (0:0:0:0:0:0:0:1), which can be represented as ::1
+ */
+ if (ipv6_addr_loopback(&key->dst) ||
+ ipv6_addr_loopback(&key->src)) {
+ dev_err(&pf->pdev->dev,
+ "Bad ipv6, addr is LOOPBACK\n");
+ return I40E_ERR_CONFIG;
+ }
+ if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+
+ memcpy(&filter->src_ipv6, &key->src.s6_addr32,
+ sizeof(filter->src_ipv6));
+ memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
+ sizeof(filter->dst_ipv6));
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->key);
+ struct flow_dissector_key_ports *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->mask);
+
+ if (mask->src) {
+ if (mask->src == cpu_to_be16(0xffff)) {
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
+ be16_to_cpu(mask->src));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (mask->dst) {
+ if (mask->dst == cpu_to_be16(0xffff)) {
+ field_flags |= I40E_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
+ be16_to_cpu(mask->dst));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ filter->dst_port = key->dst;
+ filter->src_port = key->src;
+
+ switch (filter->ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ break;
+ default:
+ dev_err(&pf->pdev->dev,
+ "Only UDP and TCP transport are supported\n");
+ return -EINVAL;
+ }
+ }
+ filter->flags = field_flags;
+ return 0;
+}
+
+/**
+ * i40e_handle_tclass: Forward to a traffic class on the device
+ * @vsi: Pointer to VSI
+ * @tc: traffic class index on the device
+ * @filter: Pointer to cloud filter structure
+ *
+ **/
+static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
+ struct i40e_cloud_filter *filter)
+{
+ struct i40e_channel *ch, *ch_tmp;
+
+ /* direct to a traffic class on the same device */
+ if (tc == 0) {
+ filter->seid = vsi->seid;
+ return 0;
+ } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
+ if (!filter->dst_port) {
+ dev_err(&vsi->back->pdev->dev,
+ "Specify destination port to direct to traffic class that is not default\n");
+ return -EINVAL;
+ }
+ if (list_empty(&vsi->ch_list))
+ return -EINVAL;
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
+ list) {
+ if (ch->seid == vsi->tc_seid_map[tc])
+ filter->seid = ch->seid;
+ }
+ return 0;
+ }
+ dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
+ return -EINVAL;
+}
+
+/**
+ * i40e_configure_clsflower - Configure tc flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct tc_cls_flower_offload
+ *
+ **/
+static int i40e_configure_clsflower(struct i40e_vsi *vsi,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+ struct i40e_cloud_filter *filter = NULL;
+ struct i40e_pf *pf = vsi->back;
+ int err = 0;
+
+ if (tc < 0) {
+ dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
+ return -EBUSY;
+
+ if (pf->fdir_pf_active_filters ||
+ (!hlist_empty(&pf->fdir_filter_list))) {
+ dev_err(&vsi->back->pdev->dev,
+ "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
+ return -EINVAL;
+ }
+
+ if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
+ dev_err(&vsi->back->pdev->dev,
+ "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
+ vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+ }
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->cookie = cls_flower->cookie;
+
+ err = i40e_parse_cls_flower(vsi, cls_flower, filter);
+ if (err < 0)
+ goto err;
+
+ err = i40e_handle_tclass(vsi, tc, filter);
+ if (err < 0)
+ goto err;
+
+ /* Add cloud filter */
+ if (filter->dst_port)
+ err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
+ else
+ err = i40e_add_del_cloud_filter(vsi, filter, true);
+
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to add cloud filter, err %s\n",
+ i40e_stat_str(&pf->hw, err));
+ err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
+ goto err;
+ }
+
+ /* add filter to the ordered list */
+ INIT_HLIST_NODE(&filter->cloud_node);
+
+ hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
+
+ pf->num_cloud_filters++;
+
+ return err;
+err:
+ kfree(filter);
+ return err;
+}
+
+/**
+ * i40e_find_cloud_filter - Find the could filter in the list
+ * @vsi: Pointer to VSI
+ * @cookie: filter specific cookie
+ *
+ **/
+static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
+ unsigned long *cookie)
+{
+ struct i40e_cloud_filter *filter = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(filter, node2,
+ &vsi->back->cloud_filter_list, cloud_node)
+ if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+ return filter;
+ return NULL;
+}
+
+/**
+ * i40e_delete_clsflower - Remove tc flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct tc_cls_flower_offload
+ *
+ **/
+static int i40e_delete_clsflower(struct i40e_vsi *vsi,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ struct i40e_cloud_filter *filter = NULL;
+ struct i40e_pf *pf = vsi->back;
+ int err = 0;
+
+ filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
+
+ if (!filter)
+ return -EINVAL;
+
+ hash_del(&filter->cloud_node);
+
+ if (filter->dst_port)
+ err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
+ else
+ err = i40e_add_del_cloud_filter(vsi, filter, false);
+
+ kfree(filter);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to delete cloud filter, err %s\n",
+ i40e_stat_str(&pf->hw, err));
+ return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
+ }
+
+ pf->num_cloud_filters--;
+ if (!pf->num_cloud_filters)
+ if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
+ !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+ pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ }
+ return 0;
+}
+
+/**
+ * i40e_setup_tc_cls_flower - flower classifier offloads
+ * @netdev: net device to configure
+ * @type_data: offload data
+ **/
+static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return i40e_configure_clsflower(vsi, cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ return i40e_delete_clsflower(vsi, cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct i40e_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return i40e_setup_tc_cls_flower(np, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int i40e_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
+ np, np);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return i40e_setup_tc(netdev, type_data);
+ case TC_SETUP_BLOCK:
+ return i40e_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -5747,7 +7668,7 @@ err_setup_rx:
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
if (vsi == pf->vsi[pf->lan_vsi])
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
return err;
}
@@ -5810,6 +7731,33 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
}
/**
+ * i40e_cloud_filter_exit - Cleans up the cloud filters
+ * @pf: Pointer to PF
+ *
+ * This function destroys the hlist where all the cloud filters
+ * were saved.
+ **/
+static void i40e_cloud_filter_exit(struct i40e_pf *pf)
+{
+ struct i40e_cloud_filter *cfilter;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(cfilter, node,
+ &pf->cloud_filter_list, cloud_node) {
+ hlist_del(&cfilter->cloud_node);
+ kfree(cfilter);
+ }
+ pf->num_cloud_filters = 0;
+
+ if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
+ !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
+ pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ }
+}
+
+/**
* i40e_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -5875,7 +7823,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
- } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
+ } else if (reset_flags & I40E_PF_RESET_FLAG) {
/* Request a PF Reset
*
@@ -6226,6 +8174,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
hlist_del(&filter->fdir_node);
kfree(filter);
pf->fdir_pf_active_filters--;
+ pf->fd_inv = 0;
}
}
}
@@ -6429,8 +8378,7 @@ static void i40e_link_event(struct i40e_pf *pf)
new_link == netif_carrier_ok(vsi->netdev)))
return;
- if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- i40e_print_link_message(vsi, new_link);
+ i40e_print_link_message(vsi, new_link);
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -6553,12 +8501,26 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
*/
i40e_link_event(pf);
- /* check for unqualified module, if link is down */
- if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
- (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
- (!(status->link_info & I40E_AQ_LINK_UP)))
+ /* Check if module meets thermal requirements */
+ if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
dev_err(&pf->pdev->dev,
- "The driver failed to link because an unqualified module was detected.\n");
+ "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
+ dev_err(&pf->pdev->dev,
+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ } else {
+ /* check for unqualified module, if link is down, suppress
+ * the message if link was forced to be down.
+ */
+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+ (!(status->link_info & I40E_AQ_LINK_UP)) &&
+ (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
+ dev_err(&pf->pdev->dev,
+ "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
+ dev_err(&pf->pdev->dev,
+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ }
+ }
}
/**
@@ -6900,7 +8862,8 @@ end_reconstitute:
* i40e_get_capabilities - get info about the HW
* @pf: the PF struct
**/
-static int i40e_get_capabilities(struct i40e_pf *pf)
+static int i40e_get_capabilities(struct i40e_pf *pf,
+ enum i40e_admin_queue_opc list_type)
{
struct i40e_aqc_list_capabilities_element_resp *cap_buf;
u16 data_size;
@@ -6915,9 +8878,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
/* this loads the data into the hw struct for us */
err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
- &data_size,
- i40e_aqc_opc_list_func_capabilities,
- NULL);
+ &data_size, list_type,
+ NULL);
/* data loaded, buffer no longer needed */
kfree(cap_buf);
@@ -6934,26 +8896,44 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
- if (pf->hw.debug_mask & I40E_DEBUG_USER)
- dev_info(&pf->pdev->dev,
- "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
- pf->hw.pf_id, pf->hw.func_caps.num_vfs,
- pf->hw.func_caps.num_msix_vectors,
- pf->hw.func_caps.num_msix_vectors_vf,
- pf->hw.func_caps.fd_filters_guaranteed,
- pf->hw.func_caps.fd_filters_best_effort,
- pf->hw.func_caps.num_tx_qp,
- pf->hw.func_caps.num_vsis);
-
+ if (pf->hw.debug_mask & I40E_DEBUG_USER) {
+ if (list_type == i40e_aqc_opc_list_func_capabilities) {
+ dev_info(&pf->pdev->dev,
+ "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
+ pf->hw.pf_id, pf->hw.func_caps.num_vfs,
+ pf->hw.func_caps.num_msix_vectors,
+ pf->hw.func_caps.num_msix_vectors_vf,
+ pf->hw.func_caps.fd_filters_guaranteed,
+ pf->hw.func_caps.fd_filters_best_effort,
+ pf->hw.func_caps.num_tx_qp,
+ pf->hw.func_caps.num_vsis);
+ } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
+ dev_info(&pf->pdev->dev,
+ "switch_mode=0x%04x, function_valid=0x%08x\n",
+ pf->hw.dev_caps.switch_mode,
+ pf->hw.dev_caps.valid_functions);
+ dev_info(&pf->pdev->dev,
+ "SR-IOV=%d, num_vfs for all function=%u\n",
+ pf->hw.dev_caps.sr_iov_1_1,
+ pf->hw.dev_caps.num_vfs);
+ dev_info(&pf->pdev->dev,
+ "num_vsis=%u, num_rx:%u, num_tx=%u\n",
+ pf->hw.dev_caps.num_vsis,
+ pf->hw.dev_caps.num_rx_qp,
+ pf->hw.dev_caps.num_tx_qp);
+ }
+ }
+ if (list_type == i40e_aqc_opc_list_func_capabilities) {
#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+ pf->hw.func_caps.num_vfs)
- if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
- dev_info(&pf->pdev->dev,
- "got num_vsis %d, setting num_vsis to %d\n",
- pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
- pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+ if (pf->hw.revision_id == 0 &&
+ pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
+ dev_info(&pf->pdev->dev,
+ "got num_vsis %d, setting num_vsis to %d\n",
+ pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
+ pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+ }
}
-
return 0;
}
@@ -6995,6 +8975,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
return;
}
}
@@ -7017,6 +8998,95 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
}
/**
+ * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
+ * @vsi: PF main vsi
+ * @seid: seid of main or channel VSIs
+ *
+ * Rebuilds cloud filters associated with main VSI and channel VSIs if they
+ * existed before reset
+ **/
+static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+{
+ struct i40e_cloud_filter *cfilter;
+ struct i40e_pf *pf = vsi->back;
+ struct hlist_node *node;
+ i40e_status ret;
+
+ /* Add cloud filters back if they exist */
+ hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
+ cloud_node) {
+ if (cfilter->seid != seid)
+ continue;
+
+ if (cfilter->dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
+ true);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+
+ if (ret) {
+ dev_dbg(&pf->pdev->dev,
+ "Failed to rebuild cloud filter, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
+ * @vsi: PF main vsi
+ *
+ * Rebuilds channel VSIs if they existed before reset
+ **/
+static int i40e_rebuild_channels(struct i40e_vsi *vsi)
+{
+ struct i40e_channel *ch, *ch_tmp;
+ i40e_status ret;
+
+ if (list_empty(&vsi->ch_list))
+ return 0;
+
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ if (!ch->initialized)
+ break;
+ /* Proceed with creation of channel (VMDq2) VSI */
+ ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "failed to rebuild channels using uplink_seid %u\n",
+ vsi->uplink_seid);
+ return ret;
+ }
+ if (ch->max_tx_rate) {
+ u64 credits = ch->max_tx_rate;
+
+ if (i40e_set_bw_limit(vsi, ch->seid,
+ ch->max_tx_rate))
+ return -EINVAL;
+
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ dev_dbg(&vsi->back->pdev->dev,
+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+ ch->max_tx_rate,
+ credits,
+ ch->seid);
+ }
+ ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
+ if (ret) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Failed to rebuild cloud filters for channel VSI %u\n",
+ ch->seid);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
* @lock_acquired: indicates whether or not the lock has been acquired
@@ -7152,6 +9222,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
u8 set_fc_aq_fail = 0;
i40e_status ret;
@@ -7177,7 +9248,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
i40e_verify_eeprom(pf);
i40e_clear_pxe_mode(hw);
- ret = i40e_get_capabilities(pf);
+ ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (ret)
goto end_core_reset;
@@ -7234,7 +9305,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* If there were VEBs but the reconstitution failed, we'll try
* try to recover minimal use by getting the basic PF VSI working.
*/
- if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
+ if (vsi->uplink_seid != pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
/* find the one VEB connected to the MAC, and find orphans */
for (v = 0; v < I40E_MAX_VEB; v++) {
@@ -7258,8 +9329,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
dev_info(&pf->pdev->dev,
"rebuild of switch failed: %d, will try to set up simple PF connection\n",
ret);
- pf->vsi[pf->lan_vsi]->uplink_seid
- = pf->mac_seid;
+ vsi->uplink_seid = pf->mac_seid;
break;
} else if (pf->veb[v]->uplink_seid == 0) {
dev_info(&pf->pdev->dev,
@@ -7270,10 +9340,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
}
- if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
+ if (vsi->uplink_seid == pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
/* no VEB, so rebuild only the Main VSI */
- ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
+ ret = i40e_add_vsi(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"rebuild of Main VSI failed: %d\n", ret);
@@ -7281,6 +9351,35 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
}
+ if (vsi->mqprio_qopt.max_rate[0]) {
+ u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 credits = 0;
+
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (ret)
+ goto end_unlock;
+
+ credits = max_tx_rate;
+ do_div(credits, I40E_BW_CREDIT_DIVISOR);
+ dev_dbg(&vsi->back->pdev->dev,
+ "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
+ max_tx_rate,
+ credits,
+ vsi->seid);
+ }
+
+ ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
+ if (ret)
+ goto end_unlock;
+
+ /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
+ * for this main VSI if they exist
+ */
+ ret = i40e_rebuild_channels(vsi);
+ if (ret)
+ goto end_unlock;
+
/* Reconfigure hardware for allowing smaller MSS in the case
* of TSO, so that we avoid the MDD being fired and causing
* a reset in the case of small MSS+TSO.
@@ -7324,6 +9423,15 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (!lock_acquired)
rtnl_unlock();
+ /* Restore promiscuous settings */
+ ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ if (ret)
+ dev_warn(&pf->pdev->dev,
+ "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+ pf->cur_promisc ? "on" : "off",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
i40e_reset_all_vfs(pf, true);
/* tell the firmware that we're starting */
@@ -7615,9 +9723,9 @@ static void i40e_service_task(struct work_struct *work)
* i40e_service_timer - timer callback
* @data: pointer to PF struct
**/
-static void i40e_service_timer(unsigned long data)
+static void i40e_service_timer(struct timer_list *t)
{
- struct i40e_pf *pf = (struct i40e_pf *)data;
+ struct i40e_pf *pf = from_timer(pf, t, service_timer);
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
@@ -7674,7 +9782,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
/**
* i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
- * @type: VSI pointer
+ * @vsi: VSI pointer
* @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
*
* On error: returns error code (negative)
@@ -8139,7 +10247,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
- } else if (!vectors_left) {
+ } else if (v_actual != v_budget) {
/* If we have limited resources, we will start with no vectors
* for the special features and then allocate vectors to some
* of these features based on the policy and at the end disable
@@ -8148,7 +10256,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
int vec;
dev_info(&pf->pdev->dev,
- "MSI-X vector limit reached, attempting to redistribute vectors\n");
+ "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
+ v_actual, v_budget);
/* reserve the misc vector */
vec = v_actual - 1;
@@ -8196,6 +10305,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
(pf->num_fdsb_msix == 0)) {
dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
}
if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
(pf->num_vmdq_msix == 0)) {
@@ -8313,6 +10423,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
@@ -8351,6 +10462,55 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
}
/**
+ * i40e_restore_interrupt_scheme - Restore the interrupt scheme
+ * @pf: private board data structure
+ *
+ * Restore the interrupt scheme that was cleared when we suspended the
+ * device. This should be called during resume to re-allocate the q_vectors
+ * and reacquire IRQs.
+ */
+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
+{
+ int err, i;
+
+ /* We cleared the MSI and MSI-X flags when disabling the old interrupt
+ * scheme. We need to re-enabled them here in order to attempt to
+ * re-acquire the MSI or MSI-X vectors
+ */
+ pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ return err;
+
+ /* Now that we've re-acquired IRQs, we need to remap the vectors and
+ * rings together again.
+ */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i]) {
+ err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
+ if (err)
+ goto err_unwind;
+ i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
+ }
+ }
+
+ err = i40e_setup_misc_vector(pf);
+ if (err)
+ goto err_unwind;
+
+ return 0;
+
+err_unwind:
+ while (i--) {
+ if (pf->vsi[i])
+ i40e_vsi_free_q_vectors(pf->vsi[i]);
+ }
+
+ return err;
+}
+
+/**
* i40e_setup_misc_vector - Setup the misc vector to handle non queue events
* @pf: board private structure
*
@@ -8363,13 +10523,12 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err = 0;
- /* Only request the irq if this is the first time through, and
- * not when we're rebuilding after a Reset
- */
- if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
+ /* Only request the IRQ once, the first time through. */
+ if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
err = request_irq(pf->msix_entries[0].vector,
i40e_intr, 0, pf->int_name, pf);
if (err) {
+ clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
dev_info(&pf->pdev->dev,
"request_irq for %s failed: %d\n",
pf->int_name, err);
@@ -8385,51 +10544,12 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
i40e_flush(hw);
- i40e_irq_dynamic_enable_icr0(pf, true);
+ i40e_irq_dynamic_enable_icr0(pf);
return err;
}
/**
- * i40e_config_rss_aq - Prepare for RSS using AQ commands
- * @vsi: vsi structure
- * @seed: RSS hash seed
- **/
-static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
-{
- struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int ret = 0;
-
- if (seed) {
- struct i40e_aqc_get_set_rss_key_data *seed_dw =
- (struct i40e_aqc_get_set_rss_key_data *)seed;
- ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot set RSS key, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
- if (lut) {
- bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
-
- ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
- return ret;
-}
-
-/**
* i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
* @vsi: Pointer to vsi structure
* @seed: Buffter to store the hash keys
@@ -8476,46 +10596,6 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
}
/**
- * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
- * @vsi: VSI structure
- **/
-static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
-{
- u8 seed[I40E_HKEY_ARRAY_SIZE];
- struct i40e_pf *pf = vsi->back;
- u8 *lut;
- int ret;
-
- if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
- return 0;
-
- if (!vsi->rss_size)
- vsi->rss_size = min_t(int, pf->alloc_rss_size,
- vsi->num_queue_pairs);
- if (!vsi->rss_size)
- return -EINVAL;
-
- lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
- /* Use the user configured hash keys and lookup table if there is one,
- * otherwise use default
- */
- if (vsi->rss_lut_user)
- memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
- else
- i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
- if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
- else
- netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
- ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
- kfree(lut);
-
- return ret;
-}
-
-/**
* i40e_config_rss_reg - Configure RSS keys and lut by writing registers
* @vsi: Pointer to vsi structure
* @seed: RSS hash seed
@@ -8913,8 +10993,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_MSIX_ENABLED;
/* Set default ITR */
- pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
- pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+ pf->rx_itr_default = I40E_ITR_RX_DEF;
+ pf->tx_itr_default = I40E_ITR_TX_DEF;
/* Depending on PF configurations, it is possible that the RSS
* maximum might end up larger than the available queues
@@ -9014,6 +11094,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.aq.fw_maj_ver >= 5)))
pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
+ /* Enable PTP L4 if FW > v6.0 */
+ if (pf->hw.mac.type == I40E_MAC_XL710 &&
+ pf->hw.aq.fw_maj_ver >= 6)
+ pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
+
if (pf->hw.func_caps.vmdq) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
@@ -9079,9 +11164,13 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* Enable filters and mark for reset */
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
need_reset = true;
- /* enable FD_SB only if there is MSI-X vector */
- if (pf->num_fdsb_msix > 0)
+ /* enable FD_SB only if there is MSI-X vector and no cloud
+ * filters exist
+ */
+ if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
+ }
} else {
/* turn off filters, mark for reset and clear SW filter list */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -9090,6 +11179,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
}
pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_SB_AUTO_DISABLED);
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
+
/* reset fd counters */
pf->fd_add_err = 0;
pf->fd_atr_cnt = 0;
@@ -9151,10 +11242,16 @@ static int i40e_set_features(struct net_device *netdev,
else
i40e_vlan_stripping_disable(vsi);
+ if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
+ dev_err(&pf->pdev->dev,
+ "Offloaded tc filters active, can't turn hw_tc_offload off");
+ return -EINVAL;
+ }
+
need_reset = i40e_set_ntuple(pf, features);
if (need_reset)
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
return 0;
}
@@ -9406,8 +11503,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
else
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
- true);
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
break;
}
}
@@ -9555,12 +11651,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
}
/**
- * i40e_xdp - implements ndo_xdp for i40e
+ * i40e_xdp - implements ndo_bpf for i40e
* @dev: netdevice
* @xdp: XDP command
**/
static int i40e_xdp(struct net_device *dev,
- struct netdev_xdp *xdp)
+ struct netdev_bpf *xdp)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -9612,7 +11708,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_features_check = i40e_features_check,
.ndo_bridge_getlink = i40e_ndo_bridge_getlink,
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
- .ndo_xdp = i40e_xdp,
+ .ndo_bpf = i40e_xdp,
};
/**
@@ -9671,7 +11767,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
+
hw_features = hw_enc_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
@@ -9849,6 +11946,31 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
enabled_tc = i40e_pf_get_tc_map(pf);
+ /* Source pruning is enabled by default, so the flag is
+ * negative logic - if it's set, we need to fiddle with
+ * the VSI to disable source pruning.
+ */
+ if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "update vsi failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ ret = -ENOENT;
+ goto err;
+ }
+ }
+
/* MFP mode setup queue map and update VSI */
if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
!(pf->hw.func_caps.iscsi)) { /* NIC type PF */
@@ -10951,14 +13073,16 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
*/
if ((pf->hw.pf_id == 0) &&
- !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ pf->last_sw_conf_flags = flags;
+ }
if (pf->hw.pf_id == 0) {
u16 valid_flags;
valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
- ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
+ ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
@@ -10968,6 +13092,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
}
+ pf->last_sw_conf_valid_flags = valid_flags;
}
/* first time setup */
@@ -10988,6 +13113,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
if (!vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
+ i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
return -EAGAIN;
}
@@ -11039,6 +13165,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
static void i40e_determine_queue_usage(struct i40e_pf *pf)
{
int queues_left;
+ int q_max;
pf->num_lan_qps = 0;
@@ -11063,6 +13190,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
@@ -11077,6 +13205,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
} else {
/* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
@@ -11085,10 +13214,12 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_DCB_ENABLED);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
- pf->num_lan_qps = max_t(int, pf->rss_size_max,
- num_online_cpus());
- pf->num_lan_qps = min_t(int, pf->num_lan_qps,
- pf->hw.func_caps.num_tx_qp);
+
+ /* limit lan qps to the smaller of qps, cpus or msix */
+ q_max = max_t(int, pf->rss_size_max, num_online_cpus());
+ q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
+ q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
+ pf->num_lan_qps = q_max;
queues_left -= pf->num_lan_qps;
}
@@ -11098,6 +13229,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= 1; /* save 1 queue for FD */
} else {
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
}
@@ -11304,6 +13436,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.bus_id = pdev->bus->number;
pf->instance = pfs_found;
+ /* Select something other than the 802.1ad ethertype for the
+ * switch to use internally and drop on ingress.
+ */
+ hw->switch_tag = 0xffff;
+ hw->first_tag = ETH_P_8021AD;
+ hw->second_tag = ETH_P_8021Q;
+
INIT_LIST_HEAD(&pf->l3_flex_pit_list);
INIT_LIST_HEAD(&pf->l4_flex_pit_list);
@@ -11380,11 +13519,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_nvm_version_str(hw));
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+ hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
dev_info(&pdev->dev,
"The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
- else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
+ else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
dev_info(&pdev->dev,
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
@@ -11395,7 +13533,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
i40e_clear_pxe_mode(hw);
- err = i40e_get_capabilities(pf);
+ err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (err)
goto err_adminq_setup;
@@ -11454,7 +13592,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_I40E_DCB */
/* set up periodic task facility */
- setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
+ timer_setup(&pf->service_timer, i40e_service_timer, 0);
pf->service_timer_period = HZ;
INIT_WORK(&pf->service_task, i40e_service_task);
@@ -11506,6 +13644,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+ INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
/* Make sure flow control is set according to current settings */
err = i40e_set_fc(hw, &set_fc_aq_fail, true);
@@ -11777,7 +13916,7 @@ static void i40e_remove(struct pci_dev *pdev)
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
- if (pf->service_timer.data)
+ if (pf->service_timer.function)
del_timer_sync(&pf->service_timer);
if (pf->service_task.func)
cancel_work_sync(&pf->service_task);
@@ -11812,6 +13951,8 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+ i40e_cloud_filter_exit(pf);
+
/* remove attached clients */
if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
ret_code = i40e_lan_del_device(pf);
@@ -11937,6 +14078,28 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
}
/**
+ * i40e_pci_error_reset_prepare - prepare device driver for pci reset
+ * @pdev: PCI device information struct
+ */
+static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ i40e_prep_for_reset(pf, false);
+}
+
+/**
+ * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void i40e_pci_error_reset_done(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ i40e_reset_and_rebuild(pf, false, false);
+}
+
+/**
* i40e_pci_error_resume - restart operations after PCI error recovery
* @pdev: PCI device information struct
*
@@ -12021,6 +14184,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
/* Client close must be called explicitly here because the timer
@@ -12046,20 +14210,26 @@ static void i40e_shutdown(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PM
/**
- * i40e_suspend - PCI callback for moving to D3
- * @pdev: PCI device information struct
+ * i40e_suspend - PM callback for moving to D3
+ * @dev: generic device information structure
**/
-static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused i40e_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
- int retval = 0;
- set_bit(__I40E_SUSPENDED, pf->state);
+ /* If we're already suspended, then there is nothing to do */
+ if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
+ return 0;
+
set_bit(__I40E_DOWN, pf->state);
+ /* Ensure service task will not be running */
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+
if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
@@ -12068,81 +14238,70 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
- i40e_stop_misc_vector(pf);
- if (pf->msix_entries) {
- synchronize_irq(pf->msix_entries[0].vector);
- free_irq(pf->msix_entries[0].vector, pf);
- }
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- pci_wake_from_d3(pdev, pf->wol_en);
- pci_set_power_state(pdev, PCI_D3hot);
+ /* Clear the interrupt scheme and release our IRQs so that the system
+ * can safely hibernate even when there are a large number of CPUs.
+ * Otherwise hibernation might fail when mapping all the vectors back
+ * to CPU0.
+ */
+ i40e_clear_interrupt_scheme(pf);
- return retval;
+ return 0;
}
/**
- * i40e_resume - PCI callback for waking up from D3
- * @pdev: PCI device information struct
+ * i40e_resume - PM callback for waking up from D3
+ * @dev: generic device information structure
**/
-static int i40e_resume(struct pci_dev *pdev)
+static int __maybe_unused i40e_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct i40e_pf *pf = pci_get_drvdata(pdev);
- u32 err;
+ int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- /* pci_restore_state() clears dev->state_saves, so
- * call pci_save_state() again to restore it.
- */
- pci_save_state(pdev);
+ /* If we're not suspended, then there is nothing to do */
+ if (!test_bit(__I40E_SUSPENDED, pf->state))
+ return 0;
- err = pci_enable_device_mem(pdev);
+ /* We cleared the interrupt scheme when we suspended, so we need to
+ * restore it now to resume device functionality.
+ */
+ err = i40e_restore_interrupt_scheme(pf);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
- return err;
+ dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
+ err);
}
- pci_set_master(pdev);
- /* no wakeup events while running */
- pci_wake_from_d3(pdev, false);
-
- /* handling the reset will rebuild the device state */
- if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
- clear_bit(__I40E_DOWN, pf->state);
- if (pf->msix_entries) {
- err = request_irq(pf->msix_entries[0].vector,
- i40e_intr, 0, pf->int_name, pf);
- if (err) {
- dev_err(&pf->pdev->dev,
- "request_irq for %s failed: %d\n",
- pf->int_name, err);
- }
- }
- i40e_reset_and_rebuild(pf, false, false);
- }
+ clear_bit(__I40E_DOWN, pf->state);
+ i40e_reset_and_rebuild(pf, false, false);
+
+ /* Clear suspended state last after everything is recovered */
+ clear_bit(__I40E_SUSPENDED, pf->state);
+
+ /* Restart the service task */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
return 0;
}
-#endif
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
+ .reset_prepare = i40e_pci_error_reset_prepare,
+ .reset_done = i40e_pci_error_reset_done,
.resume = i40e_pci_error_resume,
};
+static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
+
static struct pci_driver i40e_driver = {
.name = i40e_driver_name,
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
-#ifdef CONFIG_PM
- .suspend = i40e_suspend,
- .resume = i40e_resume,
-#endif
+ .driver = {
+ .pm = &i40e_pm_ops,
+ },
.shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index d591b3e6bd7c..7689c2ee0d46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -311,13 +311,10 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
u16 offset, u16 *data)
{
- i40e_status ret_code = 0;
-
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
- return ret_code;
+ return i40e_read_nvm_word_aq(hw, offset, data);
+
+ return i40e_read_nvm_word_srctl(hw, offset, data);
}
/**
@@ -333,13 +330,15 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
i40e_status ret_code = 0;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
- i40e_release_nvm(hw);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
return ret_code;
}
@@ -446,13 +445,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
u16 offset, u16 *words,
u16 *data)
{
- i40e_status ret_code = 0;
-
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
- else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
- return ret_code;
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index a39b13197891..3bb6659db822 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -190,7 +190,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 flags,
- u16 valid_flags,
+ u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
@@ -283,6 +283,22 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status
+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
+enum i40e_status_code
+i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+enum i40e_status_code
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+i40e_status
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg);
/* i40e_common */
@@ -360,6 +376,15 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index d8456c381c99..97381238eb7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
}
smp_mb(); /* Force any pending update before accessing. */
- adj = ACCESS_ONCE(pf->ptp_base_adj);
+ adj = READ_ONCE(pf->ptp_base_adj);
freq = adj;
freq *= ppb;
@@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
/* Update the base adjustement value. */
- ACCESS_ONCE(pf->ptp_base_adj) = incval;
+ WRITE_ONCE(pf->ptp_base_adj, incval);
smp_mb(); /* Force the above update. */
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 86ca27f72f02..c234758dad15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -2794,7 +2794,7 @@
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 120c68f78951..4566d66ffc7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
@@ -960,14 +960,14 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{
enum i40e_latency_range new_latency_range = rc->latency_range;
u32 new_itr = rc->itr;
- int bytes_per_int;
+ int bytes_per_usec;
unsigned int usecs, estimated_usecs;
if (rc->total_packets == 0 || !rc->itr)
return false;
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
- bytes_per_int = rc->total_bytes / usecs;
+ bytes_per_usec = rc->total_bytes / usecs;
/* The calculations in this algorithm depend on interrupts actually
* firing at the ITR rate. This may not happen if the packet rate is
@@ -993,18 +993,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
*/
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
- if (bytes_per_int > 10)
+ if (bytes_per_usec > 10)
new_latency_range = I40E_LOW_LATENCY;
break;
case I40E_LOW_LATENCY:
- if (bytes_per_int > 20)
+ if (bytes_per_usec > 20)
new_latency_range = I40E_BULK_LATENCY;
- else if (bytes_per_int <= 10)
+ else if (bytes_per_usec <= 10)
new_latency_range = I40E_LOWEST_LATENCY;
break;
case I40E_BULK_LATENCY:
default:
- if (bytes_per_int <= 20)
+ if (bytes_per_usec <= 20)
new_latency_range = I40E_LOW_LATENCY;
break;
}
@@ -2117,6 +2117,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (!skb) {
xdp.data = page_address(rx_buffer->page) +
rx_buffer->page_offset;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_hard_start = xdp.data -
i40e_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
@@ -2211,9 +2212,7 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
u32 val;
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- /* Don't clear PBA because that can cause lost interrupts that
- * came in while we were cleaning/polling
- */
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
(itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
@@ -2250,7 +2249,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
/* If we don't have MSIX, then we only need to re-enable icr0 */
if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
- i40e_irq_dynamic_enable_icr0(vsi->back, false);
+ i40e_irq_dynamic_enable_icr0(vsi->back);
return;
}
@@ -3176,38 +3175,12 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP;
- /* We can OR these values together as they both are checked against
- * 4 below and at this point desc_count will be used as a boolean value
- * after this if/else block.
+ /* We OR these values together to check both against 4 (WB_STRIDE)
+ * below. This is safe since we don't re-use desc_count afterwards.
*/
desc_count |= ++tx_ring->packet_stride;
- /* Algorithm to optimize tail and RS bit setting:
- * if queue is stopped
- * mark RS bit
- * reset packet counter
- * else if xmit_more is supported and is true
- * advance packet counter to 4
- * reset desc_count to 0
- *
- * if desc_count >= 4
- * mark RS bit
- * reset packet counter
- * if desc_count > 0
- * update tail
- *
- * Note: If there are less than 4 descriptors
- * pending and interrupts were disabled the service task will
- * trigger a force WB.
- */
- if (netif_xmit_stopped(txring_txq(tx_ring))) {
- goto do_rs;
- } else if (skb->xmit_more) {
- /* set stride to arm on next packet and reset desc_count */
- tx_ring->packet_stride = WB_STRIDE;
- desc_count = 0;
- } else if (desc_count >= WB_STRIDE) {
-do_rs:
+ if (desc_count >= WB_STRIDE) {
/* write last descriptor with RS bit set */
td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
@@ -3228,7 +3201,7 @@ do_rs:
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (desc_count) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 2f848bc5e391..fbae1182e2ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -38,8 +38,10 @@
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF I40E_ITR_20K
-#define I40E_ITR_TX_DEF I40E_ITR_20K
+#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -206,7 +208,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
#define I40E_RX_INCREMENT(r, i) \
do { \
(i)++; \
@@ -342,6 +344,7 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
+ __I40E_RING_STATE_NBITS /* must be last */
};
/* some useful defines for virtchannel interface, which
@@ -366,7 +369,7 @@ struct i40e_ring {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
};
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
@@ -423,6 +426,8 @@ struct i40e_ring {
* i40e_clean_rx_ring_irq() is called
* for this ring.
*/
+
+ struct i40e_channel *ch;
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index fd4bbdd88b57..0e8568719b4e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -46,6 +46,9 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) ((time) * 1000)
@@ -268,6 +271,10 @@ struct i40e_phy_info {
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
@@ -276,6 +283,16 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+ /* Cloud filter modes:
+ * Mode1: Filter on L4 port only
+ * Mode2: Filter for non-tunneled traffic
+ * Mode3: Filter for tunnel traffic
+ */
+#define I40E_CLOUD_FILTER_MODE1 0x6
+#define I40E_CLOUD_FILTER_MODE2 0x7
+#define I40E_CLOUD_FILTER_MODE3 0x8
+#define I40E_SWITCH_MODE_MASK 0xF
+
u32 management_mode;
u32 mng_protocols_over_mctp;
#define I40E_MNG_PROTOCOL_PLDM 0x2
@@ -428,6 +445,18 @@ struct i40e_nvm_access {
u8 data[1];
};
+/* (Q)SFP module access definitions */
+#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
+#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
+#define I40E_MODULE_TYPE_ADDR 0x00
+#define I40E_MODULE_REVISION_ADDR 0x01
+#define I40E_MODULE_SFF_8472_COMP 0x5E
+#define I40E_MODULE_SFF_8472_SWAP 0x5C
+#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
+#define I40E_MODULE_TYPE_QSFP28 0x11
+#define I40E_MODULE_QSFP_MAX_LEN 640
+
/* PCI bus types */
enum i40e_bus_type {
i40e_bus_type_unknown = 0,
@@ -598,8 +627,16 @@ struct i40e_hw {
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 4d1e670f490e..a3dc9b932946 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -154,15 +154,30 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
/**
* i40e_vc_disable_vf
- * @pf: pointer to the PF info
* @vf: pointer to the VF info
*
- * Disable the VF through a SW reset
+ * Disable the VF through a SW reset.
**/
-static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
{
+ int i;
+
i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+
+ /* We want to ensure that an actual reset occurs initiated after this
+ * function was called. However, we do not want to wait forever, so
+ * we'll give a reasonable time and print a message if we failed to
+ * ensure a reset.
+ */
+ for (i = 0; i < 20; i++) {
+ if (i40e_reset_vf(vf, false))
+ return;
+ usleep_range(10000, 20000);
+ }
+
+ dev_warn(&vf->pf->pdev->dev,
+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
+ vf->vf_id);
}
/**
@@ -258,7 +273,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
struct i40e_hw *hw = &pf->hw;
u16 vsi_queue_id, pf_queue_id;
enum i40e_queue_type qtype;
- u16 next_q, vector_id;
+ u16 next_q, vector_id, size;
u32 reg, reg_idx;
u16 itr_idx = 0;
@@ -288,9 +303,11 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
vsi_queue_id + 1));
}
- next_q = find_first_bit(&linklistmap,
- (I40E_MAX_VSI_QP *
- I40E_VIRTCHNL_SUPPORTED_QTYPES));
+ size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ next_q = find_first_bit(&linklistmap, size);
+ if (unlikely(next_q == size))
+ goto irq_list_done;
+
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
@@ -298,7 +315,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
wr32(hw, reg_idx, reg);
- while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ while (next_q < size) {
switch (qtype) {
case I40E_QUEUE_TYPE_RX:
reg_idx = I40E_QINT_RQCTL(pf_queue_id);
@@ -312,12 +329,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
break;
}
- next_q = find_next_bit(&linklistmap,
- (I40E_MAX_VSI_QP *
- I40E_VIRTCHNL_SUPPORTED_QTYPES),
- next_q + 1);
- if (next_q <
- (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ next_q = find_next_bit(&linklistmap, size, next_q + 1);
+ if (next_q < size) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
@@ -423,6 +436,9 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
(sizeof(struct virtchnl_iwarp_qv_info) *
(qvlist_info->num_vectors - 1));
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
+ if (!vf->qvlist_info)
+ return -ENOMEM;
+
vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -621,7 +637,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
rx_ctx.dsize = 1;
/* default values */
- rx_ctx.lrxqthresh = 2;
+ rx_ctx.lrxqthresh = 1;
rx_ctx.crcstrip = 1;
rx_ctx.prefena = 1;
rx_ctx.l2tsel = 1;
@@ -815,6 +831,14 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
*/
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+ /* It's possible the VF had requeuested more queues than the default so
+ * do the accounting here when we're about to free them.
+ */
+ if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
+ pf->queues_left += vf->num_queue_pairs -
+ I40E_DEFAULT_QUEUES_PER_VF;
+ }
+
/* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_idx) {
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
@@ -853,7 +877,8 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
}
/* reset some of the state variables keeping track of the resources */
vf->num_queue_pairs = 0;
- vf->vf_states = 0;
+ clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
}
/**
@@ -868,12 +893,27 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
int total_queue_pairs = 0;
int ret;
+ if (vf->num_req_queues &&
+ vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
+ pf->num_vf_qps = vf->num_req_queues;
+ else
+ pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+
/* allocate hw vsi context & associated resources */
ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
if (ret)
goto error_alloc;
total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+ /* We account for each VF to get a default number of queue pairs. If
+ * the VF has now requested more, we need to account for that to make
+ * certain we never request more queues than we actually have left in
+ * HW.
+ */
+ if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
+ pf->queues_left -=
+ total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
+
if (vf->trusted)
set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
@@ -1008,8 +1048,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
/* Do not notify the client during VF init */
- if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
- &vf->vf_states))
+ if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
+ &vf->vf_states))
i40e_notify_client_of_vf_reset(pf, abs_vf_id);
vf->num_vlan = 0;
}
@@ -1026,9 +1066,9 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * reset the VF
+ * Returns true if the VF is reset, false otherwise.
**/
-void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
@@ -1036,9 +1076,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
u32 reg;
int i;
- /* If VFs have been disabled, there is no need to reset */
+ /* If the VFs have been disabled, this means something else is
+ * resetting the VF, so we shouldn't continue.
+ */
if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
- return;
+ return false;
i40e_trigger_vf_reset(vf, flr);
@@ -1075,6 +1117,8 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, pf->state);
+
+ return true;
}
/**
@@ -1086,8 +1130,10 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
* VF, then do all the waiting in one chunk, and finally finish restoring each
* VF after the wait. This is useful during PF routines which need to reset
* all VFs, as otherwise it must perform these resets in a serialized fashion.
+ *
+ * Returns true if any VFs were reset, and false otherwise.
**/
-void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
+bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
{
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
@@ -1096,11 +1142,11 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
/* If we don't have any VFs, then there is nothing to reset */
if (!pf->num_alloc_vfs)
- return;
+ return false;
/* If VFs have been disabled, there is no need to reset */
if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
- return;
+ return false;
/* Begin reset on all VFs at once */
for (v = 0; v < pf->num_alloc_vfs; v++)
@@ -1175,6 +1221,8 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, pf->state);
+
+ return true;
}
/**
@@ -1308,7 +1356,7 @@ err_alloc:
i40e_free_vfs(pf);
err_iov:
/* Re-enable interrupt 0. */
- i40e_irq_dynamic_enable_icr0(pf, false);
+ i40e_irq_dynamic_enable_icr0(pf);
return ret;
}
@@ -1377,8 +1425,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf,
- BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
return i40e_pci_sriov_enable(pdev, num_vfs);
}
@@ -1386,7 +1433,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
return -EINVAL;
@@ -1537,6 +1584,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+ } else {
+ clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
}
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -1579,6 +1628,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
}
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
@@ -1987,6 +2039,57 @@ error_param:
}
/**
+ * i40e_vc_request_queues_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queues and return result of sending VF a message.
+ **/
+static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
+{
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ int req_pairs = vfres->num_queue_pairs;
+ int cur_pairs = vf->num_queue_pairs;
+ struct i40e_pf *pf = vf->pf;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+ return -EINVAL;
+
+ if (req_pairs <= 0) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request %d queues. Ignoring.\n",
+ vf->vf_id, req_pairs);
+ } else if (req_pairs > I40E_MAX_VF_QUEUES) {
+ dev_err(&pf->pdev->dev,
+ "VF %d tried to request more than %d queues.\n",
+ vf->vf_id,
+ I40E_MAX_VF_QUEUES);
+ vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
+ } else if (req_pairs - cur_pairs > pf->queues_left) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but only %d left.\n",
+ vf->vf_id,
+ req_pairs - cur_pairs,
+ pf->queues_left);
+ vfres->num_queue_pairs = pf->queues_left + cur_pairs;
+ } else {
+ /* successful request */
+ vf->num_req_queues = req_pairs;
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
+ return 0;
+ }
+
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
+ (u8 *)vfres, sizeof(vfres));
+}
+
+/**
* i40e_vc_get_stats_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2115,18 +2218,19 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr);
- if (!f)
+ if (!f) {
f = i40e_add_mac_filter(vsi, al->list[i].addr);
- if (!f) {
- dev_err(&pf->pdev->dev,
- "Unable to add MAC filter %pM for VF %d\n",
- al->list[i].addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- goto error_param;
- } else {
- vf->num_mac++;
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add MAC filter %pM for VF %d\n",
+ al->list[i].addr, vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ } else {
+ vf->num_mac++;
+ }
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2708,6 +2812,9 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ ret = i40e_vc_request_queues_msg(vf, msg, msglen);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
@@ -2779,6 +2886,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
struct i40e_mac_filter *f;
struct i40e_vf *vf;
int ret = 0;
+ struct hlist_node *h;
int bkt;
/* validate the request */
@@ -2817,7 +2925,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
__i40e_del_filter(vsi, f);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2840,7 +2948,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* Force the VF driver stop so it has to reload with new MAC address */
- i40e_vc_disable_vf(pf, vf);
+ i40e_vc_disable_vf(vf);
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
error_param:
@@ -2848,6 +2956,34 @@ error_param:
}
/**
+ * i40e_vsi_has_vlans - True if VSI has configured VLANs
+ * @vsi: pointer to the vsi
+ *
+ * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
+ * we have no configured VLANs. Do not call while holding the
+ * mac_filter_hash_lock.
+ */
+static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
+{
+ bool have_vlans;
+
+ /* If we have a port VLAN, then the VSI cannot have any VLANs
+ * configured, as all MAC/VLAN filters will be assigned to the PVID.
+ */
+ if (vsi->info.pvid)
+ return false;
+
+ /* Since we don't have a PVID, we know that if the device is in VLAN
+ * mode it must be because of a VLAN filter configured on this VSI.
+ */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+ have_vlans = i40e_is_vsi_in_vlan(vsi);
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ return have_vlans;
+}
+
+/**
* i40e_ndo_set_vf_port_vlan
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -2899,10 +3035,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
- /* Locked once because multiple functions below iterate list */
- spin_lock_bh(&vsi->mac_filter_hash_lock);
-
- if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
+ if (i40e_vsi_has_vlans(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
@@ -2910,11 +3043,14 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
* the right thing by reconfiguring his network correctly
* and then reloading the VF driver.
*/
- i40e_vc_disable_vf(pf, vf);
+ i40e_vc_disable_vf(vf);
/* During reset the VF got a new VSI, so refresh the pointer. */
vsi = pf->vsi[vf->lan_vsi_idx];
}
+ /* Locked once because multiple functions below iterate list */
+ spin_lock_bh(&vsi->mac_filter_hash_lock);
+
/* Check for condition where there was already a port VLAN ID
* filter set and now it is being deleted by setting it to zero.
* Additionally check for the condition where there was a port
@@ -2987,8 +3123,6 @@ error_pvid:
return ret;
}
-#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
-#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
/**
* i40e_ndo_set_vf_bw
* @netdev: network interface device structure
@@ -3004,7 +3138,6 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
- int speed = 0;
int ret = 0;
/* validate the request */
@@ -3029,48 +3162,10 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
goto error;
}
- switch (pf->hw.phy.link_info.link_speed) {
- case I40E_LINK_SPEED_40GB:
- speed = 40000;
- break;
- case I40E_LINK_SPEED_25GB:
- speed = 25000;
- break;
- case I40E_LINK_SPEED_20GB:
- speed = 20000;
- break;
- case I40E_LINK_SPEED_10GB:
- speed = 10000;
- break;
- case I40E_LINK_SPEED_1GB:
- speed = 1000;
- break;
- default:
- break;
- }
-
- if (max_tx_rate > speed) {
- dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
- max_tx_rate, vf->vf_id);
- ret = -EINVAL;
+ ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
+ if (ret)
goto error;
- }
- if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
- dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
- max_tx_rate = 50;
- }
-
- /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
- ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
- max_tx_rate / I40E_BW_CREDIT_DIVISOR,
- I40E_MAX_BW_INACTIVE_ACCUM, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
- ret);
- ret = -EIO;
- goto error;
- }
vf->tx_rate = max_tx_rate;
error:
return ret;
@@ -3279,14 +3374,11 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
vf = &pf->vf[vf_id];
- if (!vf)
- return -EINVAL;
if (setting == vf->trusted)
goto out;
vf->trusted = setting;
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_disable_vf(vf);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
out:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 1f4b0c504368..5efc4f92bb37 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -56,7 +56,6 @@ enum i40e_vf_states {
I40E_VF_STATE_INIT = 0,
I40E_VF_STATE_ACTIVE,
I40E_VF_STATE_IWARPENA,
- I40E_VF_STATE_FCOEENA,
I40E_VF_STATE_DISABLED,
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
@@ -97,6 +96,7 @@ struct i40e_vf {
u16 lan_vsi_id; /* ID as used by firmware */
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
+ u8 num_req_queues; /* num of requested qps */
u64 num_mdd_events; /* num of mdd events detected */
/* num of continuous malformed or invalid msgs detected */
u64 num_invalid_msgs;
@@ -121,8 +121,8 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
-void i40e_reset_vf(struct i40e_vf *vf, bool flr);
-void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
+bool i40e_reset_vf(struct i40e_vf *vf, bool flr);
+bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* VF configuration related iplink handlers */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 83e63e55c4b4..06b04572c518 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -34,7 +34,15 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc {
__le16 flags;
@@ -236,6 +244,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
@@ -761,7 +771,22 @@ struct i40e_aqc_set_switch_config {
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
- u8 reserved[12];
+ /* The ethertype in switch_tag is dropped on ingress and used
+ * internally by the switch. Set this to zero for the default
+ * of 0x88a8 (802.1ad). Should be zero for firmware API
+ * versions lower than 1.7.
+ */
+ __le16 switch_tag;
+ /* The ethertypes in first_tag and second_tag are used to
+ * match the outer and inner VLAN tags (respectively) when HW
+ * double VLAN tagging is enabled via the set port parameters
+ * AQ command. Otherwise these are both ignored. Set them to
+ * zero for their defaults of 0x8100 (802.1Q). Should be zero
+ * for firmware API versions lower than 1.7.
+ */
+ __le16 first_tag;
+ __le16 second_tag;
+ u8 reserved[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@@ -1314,14 +1339,16 @@ struct i40e_aqc_add_remove_cloud_filters {
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
+ u8 big_buffer_flag;
+#define I40E_AQC_ADD_CLOUD_CMD_BB 1
+ u8 reserved2[3];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
-struct i40e_aqc_add_remove_cloud_filters_element_data {
+struct i40e_aqc_cloud_filters_element_data {
u8 outer_mac[6];
u8 inner_mac[6];
__le16 inner_vlan;
@@ -1333,6 +1360,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
struct {
u8 data[16];
} v6;
+ struct {
+ __le16 data[8];
+ } raw_v6;
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
@@ -1351,6 +1381,10 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x0010 to 0x0017 is for custom filters */
+#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
@@ -1385,6 +1419,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 response_reserved[7];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
+
+/* i40e_aqc_cloud_filters_element_bb is used when
+ * I40E_AQC_ADD_CLOUD_CMD_BB flag is set.
+ */
+struct i40e_aqc_cloud_filters_element_bb {
+ struct i40e_aqc_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
+
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
@@ -1396,6 +1473,60 @@ struct i40e_aqc_remove_cloud_filters_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+/* Replace filter Command 0x025F
+ * uses the i40e_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
+
+struct i40e_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+#define I40E_AQC_REPLACE_L1_FILTER 0x0
+#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
+#define I40E_AQC_GET_CLOUD_FILTERS 0x2
+#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
+#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
+
+struct i40e_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+/* Filter type INPUT codes*/
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7)
+
+/* Field Vector offsets */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
+ struct i40e_filter_data filters[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
+
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
@@ -1722,6 +1853,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
+ I40E_PHY_TYPE_UNSUPPORTED = 0xF,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1740,7 +1873,12 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
- I40E_PHY_TYPE_MAX
+ I40E_PHY_TYPE_25GBASE_AOC = 0x23,
+ I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
+ I40E_PHY_TYPE_EMPTY = 0xFE,
+ I40E_PHY_TYPE_DEFAULT = 0xFF,
};
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
@@ -1797,6 +1935,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
+#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -1930,19 +2070,31 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+/* Since firmware API 1.7 loopback field keeps power class info as well */
+#define I40E_AQ_LOOPBACK_MASK 0x07
+#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
+#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 power_desc;
+ union {
+ struct {
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
#define I40E_AQ_PWR_CLASS_MASK 0x03
- u8 reserved[4];
+ u8 reserved[4];
+ };
+ struct {
+ u8 link_type[4];
+ u8 link_type_ext;
+ };
+ };
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -2022,6 +2174,22 @@ struct i40e_aqc_run_phy_activity {
I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+/* Set PHY Register command (0x0628) */
+/* Get PHY Register command (0x0629) */
+struct i40e_aqc_phy_register_access {
+ u8 phy_interface;
+#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
+#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
+ u8 dev_address;
+ u8 reserved1[2];
+ __le32 reg_address;
+ __le32 reg_value;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 8d3a2bfe186a..7d70bf69b249 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1042,6 +1042,75 @@ do_retry:
}
/**
+ * i40evf_aq_set_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: new register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+i40e_status i40evf_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_address = dev_addr;
+ cmd->reg_address = cpu_to_le32(reg_addr);
+ cmd->reg_value = cpu_to_le32(reg_val);
+
+ status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40evf_aq_get_phy_register
+ * @hw: pointer to the hw struct
+ * @phy_select: select which phy should be accessed
+ * @dev_addr: PHY device address
+ * @reg_addr: PHY register address
+ * @reg_val: read register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+i40e_status i40evf_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_phy_register_access *cmd =
+ (struct i40e_aqc_phy_register_access *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_register);
+
+ cmd->phy_interface = phy_select;
+ cmd->dev_address = dev_addr;
+ cmd->reg_address = cpu_to_le32(reg_addr);
+
+ status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (!status)
+ *reg_val = le32_to_cpu(cmd->reg_value);
+
+ return status;
+}
+
+/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index c9836bba487d..b624b5994075 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -111,6 +111,15 @@ i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index c32c62462c84..50864f99446d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* if the descriptor isn't done, no work yet to do */
@@ -358,14 +358,14 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
{
enum i40e_latency_range new_latency_range = rc->latency_range;
u32 new_itr = rc->itr;
- int bytes_per_int;
+ int bytes_per_usec;
unsigned int usecs, estimated_usecs;
if (rc->total_packets == 0 || !rc->itr)
return false;
usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
- bytes_per_int = rc->total_bytes / usecs;
+ bytes_per_usec = rc->total_bytes / usecs;
/* The calculations in this algorithm depend on interrupts actually
* firing at the ITR rate. This may not happen if the packet rate is
@@ -391,18 +391,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
*/
switch (new_latency_range) {
case I40E_LOWEST_LATENCY:
- if (bytes_per_int > 10)
+ if (bytes_per_usec > 10)
new_latency_range = I40E_LOW_LATENCY;
break;
case I40E_LOW_LATENCY:
- if (bytes_per_int > 20)
+ if (bytes_per_usec > 20)
new_latency_range = I40E_BULK_LATENCY;
- else if (bytes_per_int <= 10)
+ else if (bytes_per_usec <= 10)
new_latency_range = I40E_LOWEST_LATENCY;
break;
case I40E_BULK_LATENCY:
default:
- if (bytes_per_int <= 20)
+ if (bytes_per_usec <= 20)
new_latency_range = I40E_LOW_LATENCY;
break;
}
@@ -1409,9 +1409,7 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
u32 val;
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- /* Don't clear PBA because that can cause lost interrupts that
- * came in while we were cleaning/polling
- */
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
(type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
(itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0d9f98bc07bd..8d26c85d12e1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -38,8 +38,10 @@
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF I40E_ITR_20K
-#define I40E_ITR_TX_DEF I40E_ITR_20K
+#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -189,7 +191,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
#define I40E_RX_INCREMENT(r, i) \
do { \
(i)++; \
@@ -325,6 +327,7 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
+ __I40E_RING_STATE_NBITS /* must be last */
};
/* some useful defines for virtchannel interface, which
@@ -348,7 +351,7 @@ struct i40e_ring {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
};
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 2ea919d9cdcf..213b773dfad6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -46,6 +46,9 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) ((time) * 1000)
@@ -401,6 +404,18 @@ struct i40e_nvm_access {
u8 data[1];
};
+/* (Q)SFP module access definitions */
+#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
+#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
+#define I40E_MODULE_TYPE_ADDR 0x00
+#define I40E_MODULE_REVISION_ADDR 0x01
+#define I40E_MODULE_SFF_8472_COMP 0x5E
+#define I40E_MODULE_SFF_8472_SWAP 0x5C
+#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
+#define I40E_MODULE_TYPE_QSFP28 0x11
+#define I40E_MODULE_QSFP_MAX_LEN 640
+
/* PCI bus types */
enum i40e_bus_type {
i40e_bus_type_unknown = 0,
@@ -556,11 +571,19 @@ struct i40e_hw {
/* LLDP/DCBX Status */
u16 dcbx_status;
+#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+
/* DCBX info */
struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+ /* Used in set switch config AQ command */
+ u16 switch_tag;
+ u16 first_tag;
+ u16 second_tag;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 82f69031e5cd..de0af521d602 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -102,6 +102,7 @@ struct i40e_vsi {
#define I40E_TX_CTXTDESC(R, i) \
(&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
#define MAX_QUEUES 16
+#define I40EVF_MAX_REQ_QUEUES 4
#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4)
@@ -200,6 +201,7 @@ struct i40evf_adapter {
struct list_head vlan_filter_list;
char misc_vector_name[IFNAMSIZ + 9];
int num_active_queues;
+ int num_req_queues;
/* TX */
struct i40e_ring *tx_rings;
@@ -220,21 +222,22 @@ struct i40evf_adapter {
u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
-#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
-#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
-#define I40EVF_FLAG_RESET_PENDING BIT(9)
-#define I40EVF_FLAG_RESET_NEEDED BIT(10)
-#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
-#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
-#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
-#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14)
-#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15)
-#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16)
-#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17)
-#define I40EVF_FLAG_PROMISC_ON BIT(18)
-#define I40EVF_FLAG_ALLMULTI_ON BIT(19)
-#define I40EVF_FLAG_LEGACY_RX BIT(20)
+#define I40EVF_FLAG_IMIR_ENABLED BIT(1)
+#define I40EVF_FLAG_MQ_CAPABLE BIT(2)
+#define I40EVF_FLAG_PF_COMMS_FAILED BIT(3)
+#define I40EVF_FLAG_RESET_PENDING BIT(4)
+#define I40EVF_FLAG_RESET_NEEDED BIT(5)
+#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
+#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(7)
+#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8)
+#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9)
+#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
+#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
+#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
+#define I40EVF_FLAG_PROMISC_ON BIT(13)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(14)
+#define I40EVF_FLAG_LEGACY_RX BIT(15)
+#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16)
/* duplicates for common code */
#define I40E_FLAG_DCB_ENABLED 0
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
@@ -349,6 +352,7 @@ void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
void i40evf_enable_queues(struct i40evf_adapter *adapter);
void i40evf_disable_queues(struct i40evf_adapter *adapter);
void i40evf_map_queues(struct i40evf_adapter *adapter);
+int i40evf_request_queues(struct i40evf_adapter *adapter, int num);
void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
void i40evf_add_vlans(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
index 93cf5fd17d91..da60ce12b33d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/list.h>
#include <linux/errno.h>
@@ -25,6 +26,26 @@ static struct i40e_ops i40evf_lan_ops = {
};
/**
+ * i40evf_client_get_params - retrieve relevant client parameters
+ * @vsi: VSI with parameters
+ * @params: client param struct
+ **/
+static
+void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+ int i;
+
+ memset(params, 0, sizeof(struct i40e_params));
+ params->mtu = vsi->netdev->mtu;
+ params->link_up = vsi->back->link_up;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ params->qos.prio_qos[i].tc = 0;
+ params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
+ }
+}
+
+/**
* i40evf_notify_client_message - call the client message receive callback
* @vsi: the VSI associated with this client
* @msg: message buffer
@@ -65,10 +86,6 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
return;
cinst = vsi->back->cinst;
- memset(&params, 0, sizeof(params));
- params.mtu = vsi->netdev->mtu;
- params.link_up = vsi->back->link_up;
- params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->l2_param_change) {
@@ -76,6 +93,8 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change function\n");
return;
}
+ i40evf_client_get_params(vsi, &params);
+ cinst->lan_info.params = params;
cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
&params);
}
@@ -165,9 +184,9 @@ static struct i40e_client_instance *
i40evf_client_add_instance(struct i40evf_adapter *adapter)
{
struct i40e_client_instance *cinst = NULL;
- struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = &adapter->vsi;
- int i;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_params params;
if (!vf_registered_client)
goto out;
@@ -191,18 +210,14 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter)
cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+ i40evf_client_get_params(vsi, &params);
+ cinst->lan_info.params = params;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
cinst->lan_info.msix_count = adapter->num_iwarp_msix;
cinst->lan_info.msix_entries =
&adapter->msix_entries[adapter->iwarp_base_vector];
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- cinst->lan_info.params.qos.prio_qos[i].tc = 0;
- cinst->lan_info.params.qos.prio_qos[i].qs_handle =
- vsi->qs_handle;
- }
-
mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
if (mac)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
index 7d283c7506a5..15a10da5bd4a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _I40E_CLIENT_H_
#define _I40E_CLIENT_H_
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 65874d6b3ab9..da006fa3fec1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -669,7 +669,7 @@ static void i40evf_get_channels(struct net_device *netdev,
struct i40evf_adapter *adapter = netdev_priv(netdev);
/* Report maximum channels */
- ch->max_combined = adapter->num_active_queues;
+ ch->max_combined = I40EVF_MAX_REQ_QUEUES;
ch->max_other = NONQ_VECS;
ch->other_count = NONQ_VECS;
@@ -678,6 +678,41 @@ static void i40evf_get_channels(struct net_device *netdev,
}
/**
+ * i40evf_set_channels: set the new channel count
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * Negotiate a new number of channels with the PF then do a reset. During
+ * reset we'll realloc queues and fix the RSS table. Returns 0 on success,
+ * negative on failure.
+ **/
+static int i40evf_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int num_req = ch->combined_count;
+
+ if (num_req != adapter->num_active_queues &&
+ !(adapter->vf_res->vf_cap_flags &
+ VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
+ dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
+ return -EINVAL;
+ }
+
+ /* All of these should have already been checked by ethtool before this
+ * even gets to us, but just to be sure.
+ */
+ if (num_req <= 0 || num_req > I40EVF_MAX_REQ_QUEUES)
+ return -EINVAL;
+
+ if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
+ return -EINVAL;
+
+ adapter->num_req_queues = num_req;
+ return i40evf_request_queues(adapter, num_req);
+}
+
+/**
* i40evf_get_rxfh_key_size - get the RSS hash key size
* @netdev: network interface device structure
*
@@ -785,6 +820,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_rxfh = i40evf_get_rxfh,
.set_rxfh = i40evf_set_rxfh,
.get_channels = i40evf_get_channels,
+ .set_channels = i40evf_set_channels,
.get_rxfh_key_size = i40evf_get_rxfh_key_size,
.get_link_ksettings = i40evf_get_link_ksettings,
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 1825d956bb00..7b2a4eba92e2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -46,7 +46,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 3
#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 0
+#define DRV_VERSION_BUILD 1
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -430,57 +430,26 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
* group the rings as "efficiently" as possible. You would add new
* mapping configurations in here.
**/
-static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
{
+ int rings_remaining = adapter->num_active_queues;
+ int ridx = 0, vidx = 0;
int q_vectors;
- int v_start = 0;
- int rxr_idx = 0, txr_idx = 0;
- int rxr_remaining = adapter->num_active_queues;
- int txr_remaining = adapter->num_active_queues;
- int i, j;
- int rqpv, tqpv;
- int err = 0;
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- /* The ideal configuration...
- * We have enough vectors to map one per queue.
- */
- if (q_vectors >= (rxr_remaining * 2)) {
- for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
- i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
-
- for (; txr_idx < txr_remaining; v_start++, txr_idx++)
- i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
- goto out;
- }
+ for (; ridx < rings_remaining; ridx++) {
+ i40evf_map_vector_to_rxq(adapter, vidx, ridx);
+ i40evf_map_vector_to_txq(adapter, vidx, ridx);
- /* If we don't have enough vectors for a 1-to-1
- * mapping, we'll have to group them so there are
- * multiple queues per vector.
- * Re-adjusting *qpv takes care of the remainder.
- */
- for (i = v_start; i < q_vectors; i++) {
- rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
- for (j = 0; j < rqpv; j++) {
- i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
- rxr_idx++;
- rxr_remaining--;
- }
- }
- for (i = v_start; i < q_vectors; i++) {
- tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
- for (j = 0; j < tqpv; j++) {
- i40evf_map_vector_to_txq(adapter, i, txr_idx);
- txr_idx++;
- txr_remaining--;
- }
+ /* In the case where we have more queues than vectors, continue
+ * round-robin on vectors until all queues are mapped.
+ */
+ if (++vidx >= q_vectors)
+ vidx = 0;
}
-out:
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
-
- return err;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -546,6 +515,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
unsigned int vector, q_vectors;
unsigned int rx_int_idx = 0, tx_int_idx = 0;
int irq_num, err;
+ int cpu;
i40evf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@@ -584,10 +554,12 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
q_vector->affinity_notify.release =
i40evf_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
- /* get_cpu_mask returns a static constant mask with
- * a permanent lifetime so it's ok to use here.
+ /* Spread the IRQ affinity hints across online CPUs. Note that
+ * get_cpu_mask returns a mask with a permanent lifetime so
+ * it's safe to use as a hint for irq_set_affinity_hint.
*/
- irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
+ irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -908,6 +880,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ } else {
+ f->remove = false;
}
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
@@ -1217,9 +1191,18 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{
int i, num_active_queues;
- num_active_queues = min_t(int,
- adapter->vsi_res->num_queue_pairs,
- (int)(num_online_cpus()));
+ /* If we're in reset reallocating queues we don't actually know yet for
+ * certain the PF gave us the number of queues we asked for but we'll
+ * assume it did. Once basic reset is finished we'll confirm once we
+ * start negotiating config with PF.
+ */
+ if (adapter->num_req_queues)
+ num_active_queues = adapter->num_req_queues;
+ else
+ num_active_queues = min_t(int,
+ adapter->vsi_res->num_queue_pairs,
+ (int)(num_online_cpus()));
+
adapter->tx_rings = kcalloc(num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
@@ -1240,7 +1223,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
- tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
+ tx_ring->tx_itr_setting = I40E_ITR_TX_DEF;
if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
@@ -1249,7 +1232,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
- rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
+ rx_ring->rx_itr_setting = I40E_ITR_RX_DEF;
}
adapter->num_active_queues = num_active_queues;
@@ -1568,12 +1551,53 @@ static void i40evf_free_rss(struct i40evf_adapter *adapter)
}
/**
+ * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (netif_running(netdev))
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_q_vectors(adapter);
+ i40evf_free_queues(adapter);
+
+ err = i40evf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err;
+
+ netif_tx_stop_all_queues(netdev);
+
+ err = i40evf_request_misc_irq(adapter);
+ if (err)
+ goto err;
+
+ set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
+
+ i40evf_map_rings_to_vectors(adapter);
+
+ if (RSS_AQ(adapter))
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+ else
+ err = i40evf_init_rss(adapter);
+err:
+ return err;
+}
+
+/**
* i40evf_watchdog_timer - Periodic call-back timer
* @data: pointer to adapter disguised as unsigned long
**/
-static void i40evf_watchdog_timer(unsigned long data)
+static void i40evf_watchdog_timer(struct timer_list *t)
{
- struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+ struct i40evf_adapter *adapter = from_timer(adapter, t,
+ watchdog_timer);
schedule_work(&adapter->watchdog_task);
/* timer will be rescheduled in watchdog task */
@@ -1913,8 +1937,15 @@ continue_reset:
if (err)
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
err);
+ adapter->aq_required = 0;
+
+ if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
+ err = i40evf_reinit_interrupt_scheme(adapter);
+ if (err)
+ goto reset_err;
+ }
- adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+ adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
/* re-add all MAC filters */
@@ -1944,6 +1975,15 @@ continue_reset:
if (err)
goto reset_err;
+ if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) {
+ err = i40evf_request_traffic_irqs(adapter,
+ netdev->name);
+ if (err)
+ goto reset_err;
+
+ adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+ }
+
i40evf_configure(adapter);
i40evf_up_complete(adapter);
@@ -2070,6 +2110,11 @@ static void i40evf_client_task(struct work_struct *work)
adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
goto out;
}
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ goto out;
+ }
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
i40evf_notify_client_close(&adapter->vsi, false);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
@@ -2078,11 +2123,6 @@ static void i40evf_client_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
i40evf_notify_client_open(&adapter->vsi);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
- goto out;
- }
- if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
- i40evf_notify_client_l2_params(&adapter->vsi);
- adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
}
out:
clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
@@ -2386,10 +2426,6 @@ out_err:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
-#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
- NETIF_F_HW_VLAN_CTAG_RX |\
- NETIF_F_HW_VLAN_CTAG_FILTER)
-
/**
* i40evf_fix_features - fix up the netdev feature bits
* @netdev: our net device
@@ -2402,9 +2438,11 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- features &= ~I40EVF_VLAN_FEATURES;
- if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
- features |= I40EVF_VLAN_FEATURES;
+ if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+
return features;
}
@@ -2459,9 +2497,9 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
int i40evf_process_config(struct i40evf_adapter *adapter)
{
struct virtchnl_vf_resource *vfres = adapter->vf_res;
+ int i, num_req_queues = adapter->num_req_queues;
struct net_device *netdev = adapter->netdev;
struct i40e_vsi *vsi = &adapter->vsi;
- int i;
netdev_features_t hw_enc_features;
netdev_features_t hw_features;
@@ -2475,6 +2513,23 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
return -ENODEV;
}
+ if (num_req_queues &&
+ num_req_queues != adapter->vsi_res->num_queue_pairs) {
+ /* Problem. The PF gave us fewer queues than what we had
+ * negotiated in our request. Need a reset to see if we can't
+ * get back to a working state.
+ */
+ dev_err(&adapter->pdev->dev,
+ "Requested %d queues, but PF only gave us %d.\n",
+ num_req_queues,
+ adapter->vsi_res->num_queue_pairs);
+ adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
+ i40evf_schedule_reset(adapter);
+ return -ENODEV;
+ }
+ adapter->num_req_queues = 0;
+
hw_enc_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -2518,9 +2573,17 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
*/
hw_features = hw_enc_features;
+ /* Enable VLAN features if supported */
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+ hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
+
netdev->hw_features |= hw_features;
- netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
+ netdev->features |= hw_features;
+
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
adapter->vsi.id = adapter->vsi_res->vsi_id;
@@ -2686,9 +2749,7 @@ static void i40evf_init_task(struct work_struct *work)
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &i40evf_watchdog_timer;
- adapter->watchdog_timer.data = (unsigned long)adapter;
+ timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0);
mod_timer(&adapter->watchdog_timer, jiffies + 1);
adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 85876f4fb1fb..46c8b8a3907c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -52,7 +52,7 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err)
- dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+ dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
op, i40evf_stat_str(hw, err),
i40evf_aq_str(hw, hw->aq.asq_last_status));
return err;
@@ -160,7 +160,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
- VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+ VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
+ VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
@@ -385,6 +386,33 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
}
/**
+ * i40evf_request_queues
+ * @adapter: adapter structure
+ * @num: number of requested queues
+ *
+ * We get a default number of queues from the PF. This enables us to request a
+ * different number. Returns 0 on success, negative on failure
+ **/
+int i40evf_request_queues(struct i40evf_adapter *adapter, int num)
+{
+ struct virtchnl_vf_res_request vfres;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n",
+ adapter->current_op);
+ return -EBUSY;
+ }
+
+ vfres.num_queue_pairs = num;
+
+ adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES;
+ adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES,
+ (u8 *)&vfres, sizeof(vfres));
+}
+
+/**
* i40evf_add_ether_addrs
* @adapter: adapter structure
* @addrs: the MAC address filters to add (contiguous)
@@ -1068,6 +1096,19 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
"Invalid message %d from PF\n", v_opcode);
}
break;
+ case VIRTCHNL_OP_REQUEST_QUEUES: {
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ if (vfres->num_queue_pairs != adapter->num_req_queues) {
+ dev_info(&adapter->pdev->dev,
+ "Requested %d queues, PF can support %d\n",
+ adapter->num_req_queues,
+ vfres->num_queue_pairs);
+ adapter->num_req_queues = 0;
+ adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+ }
+ }
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 1de82f247312..83cabff1e0ab 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -353,7 +353,18 @@
#define E1000_RXPBS_CFG_TS_EN 0x80000000
#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I210_RXPBSIZE_MASK 0x0000003F
+#define I210_RXPBSIZE_PB_32KB 0x00000020
#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+#define I210_TXPBSIZE_MASK 0xC0FFFFFF
+#define I210_TXPBSIZE_PB0_8KB (8 << 0)
+#define I210_TXPBSIZE_PB1_8KB (8 << 6)
+#define I210_TXPBSIZE_PB2_4KB (4 << 12)
+#define I210_TXPBSIZE_PB3_4KB (4 << 18)
+
+#define I210_DTXMXPKTSZ_DEFAULT 0x00000098
+
+#define I210_SR_QUEUES_NUM 2
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -1051,4 +1062,16 @@
#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4))
#define E1000_VLAPQF_QUEUE_MASK 0x03
+/* TX Qav Control fields */
+#define E1000_TQAVCTRL_XMIT_MODE BIT(0)
+#define E1000_TQAVCTRL_DATAFETCHARB BIT(4)
+#define E1000_TQAVCTRL_DATATRANARB BIT(8)
+
+/* TX Qav Credit Control fields */
+#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF
+#define E1000_TQAVCC_QUEUEMODE BIT(31)
+
+/* Transmit Descriptor Control fields */
+#define E1000_TXDCTL_PRIORITY BIT(27)
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 58adbf234e07..568c96842f28 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg);
/* write operations, indexed using DWORDS */
#define wr32(reg, val) \
do { \
- u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
if (!E1000_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
@@ -421,6 +421,14 @@ do { \
#define E1000_I210_FLA 0x1201C
+#define E1000_I210_DTXMXPKTSZ 0x355C
+
+#define E1000_I210_TXDCTL(_n) (0x0E028 + ((_n) * 0x40))
+
+#define E1000_I210_TQAVCTRL 0x3570
+#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40))
+#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40))
+
#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 06ffb2bc713e..92845692087a 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -281,6 +281,11 @@ struct igb_ring {
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
+ bool cbs_enable; /* indicates if CBS is enabled */
+ s32 idleslope; /* idleSlope in kbps */
+ s32 sendslope; /* sendSlope in kbps */
+ s32 hicredit; /* hiCredit in bytes */
+ s32 locredit; /* loCredit in bytes */
/* everything past this point are written often */
u16 next_to_clean;
@@ -621,6 +626,7 @@ struct igb_adapter {
#define IGB_FLAG_EEE BIT(14)
#define IGB_FLAG_VLAN_PROMISC BIT(15)
#define IGB_FLAG_RX_LEGACY BIT(16)
+#define IGB_FLAG_FQTSS BIT(17)
/* Media Auto Sense */
#define IGB_MAS_ENABLE_0 0X0001
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ea69af267d63..c208753ff5b7 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/pkt_sched.h>
#include <linux/net_tstamp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -62,6 +63,17 @@
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
+
+enum queue_mode {
+ QUEUE_MODE_STRICT_PRIORITY,
+ QUEUE_MODE_STREAM_RESERVATION,
+};
+
+enum tx_queue_prio {
+ TX_QUEUE_PRIO_HIGH,
+ TX_QUEUE_PRIO_LOW,
+};
+
char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
@@ -133,8 +145,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *);
static void igb_clean_tx_ring(struct igb_ring *);
static void igb_clean_rx_ring(struct igb_ring *);
static void igb_set_rx_mode(struct net_device *);
-static void igb_update_phy_info(unsigned long);
-static void igb_watchdog(unsigned long);
+static void igb_update_phy_info(struct timer_list *);
+static void igb_watchdog(struct timer_list *);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static void igb_get_stats64(struct net_device *dev,
@@ -750,7 +762,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
u32 igb_rd32(struct e1000_hw *hw, u32 reg)
{
struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
- u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
if (E1000_REMOVED(hw_addr))
@@ -1271,6 +1283,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx;
+ ring->cbs_enable = false;
+ ring->idleslope = 0;
+ ring->sendslope = 0;
+ ring->hicredit = 0;
+ ring->locredit = 0;
+
u64_stats_init(&ring->tx_syncp);
u64_stats_init(&ring->tx_syncp2);
@@ -1598,6 +1616,284 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
}
+static void enable_fqtss(struct igb_adapter *adapter, bool enable)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+
+ WARN_ON(hw->mac.type != e1000_i210);
+
+ if (enable)
+ adapter->flags |= IGB_FLAG_FQTSS;
+ else
+ adapter->flags &= ~IGB_FLAG_FQTSS;
+
+ if (netif_running(netdev))
+ schedule_work(&adapter->reset_task);
+}
+
+static bool is_fqtss_enabled(struct igb_adapter *adapter)
+{
+ return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
+}
+
+static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
+ enum tx_queue_prio prio)
+{
+ u32 val;
+
+ WARN_ON(hw->mac.type != e1000_i210);
+ WARN_ON(queue < 0 || queue > 4);
+
+ val = rd32(E1000_I210_TXDCTL(queue));
+
+ if (prio == TX_QUEUE_PRIO_HIGH)
+ val |= E1000_TXDCTL_PRIORITY;
+ else
+ val &= ~E1000_TXDCTL_PRIORITY;
+
+ wr32(E1000_I210_TXDCTL(queue), val);
+}
+
+static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
+{
+ u32 val;
+
+ WARN_ON(hw->mac.type != e1000_i210);
+ WARN_ON(queue < 0 || queue > 1);
+
+ val = rd32(E1000_I210_TQAVCC(queue));
+
+ if (mode == QUEUE_MODE_STREAM_RESERVATION)
+ val |= E1000_TQAVCC_QUEUEMODE;
+ else
+ val &= ~E1000_TQAVCC_QUEUEMODE;
+
+ wr32(E1000_I210_TQAVCC(queue), val);
+}
+
+/**
+ * igb_configure_cbs - Configure Credit-Based Shaper (CBS)
+ * @adapter: pointer to adapter struct
+ * @queue: queue number
+ * @enable: true = enable CBS, false = disable CBS
+ * @idleslope: idleSlope in kbps
+ * @sendslope: sendSlope in kbps
+ * @hicredit: hiCredit in bytes
+ * @locredit: loCredit in bytes
+ *
+ * Configure CBS for a given hardware queue. When disabling, idleslope,
+ * sendslope, hicredit, locredit arguments are ignored. Returns 0 if
+ * success. Negative otherwise.
+ **/
+static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
+ bool enable, int idleslope, int sendslope,
+ int hicredit, int locredit)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 tqavcc;
+ u16 value;
+
+ WARN_ON(hw->mac.type != e1000_i210);
+ WARN_ON(queue < 0 || queue > 1);
+
+ if (enable) {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
+ set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+
+ /* According to i210 datasheet section 7.2.7.7, we should set
+ * the 'idleSlope' field from TQAVCC register following the
+ * equation:
+ *
+ * For 100 Mbps link speed:
+ *
+ * value = BW * 0x7735 * 0.2 (E1)
+ *
+ * For 1000Mbps link speed:
+ *
+ * value = BW * 0x7735 * 2 (E2)
+ *
+ * E1 and E2 can be merged into one equation as shown below.
+ * Note that 'link-speed' is in Mbps.
+ *
+ * value = BW * 0x7735 * 2 * link-speed
+ * -------------- (E3)
+ * 1000
+ *
+ * 'BW' is the percentage bandwidth out of full link speed
+ * which can be found with the following equation. Note that
+ * idleSlope here is the parameter from this function which
+ * is in kbps.
+ *
+ * BW = idleSlope
+ * ----------------- (E4)
+ * link-speed * 1000
+ *
+ * That said, we can come up with a generic equation to
+ * calculate the value we should set it TQAVCC register by
+ * replacing 'BW' in E3 by E4. The resulting equation is:
+ *
+ * value = idleSlope * 0x7735 * 2 * link-speed
+ * ----------------- -------------- (E5)
+ * link-speed * 1000 1000
+ *
+ * 'link-speed' is present in both sides of the fraction so
+ * it is canceled out. The final equation is the following:
+ *
+ * value = idleSlope * 61034
+ * ----------------- (E6)
+ * 1000000
+ */
+ value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
+
+ tqavcc = rd32(E1000_I210_TQAVCC(queue));
+ tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
+ tqavcc |= value;
+ wr32(E1000_I210_TQAVCC(queue), tqavcc);
+
+ wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
+ } else {
+ set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
+ set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
+
+ /* Set idleSlope to zero. */
+ tqavcc = rd32(E1000_I210_TQAVCC(queue));
+ tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
+ wr32(E1000_I210_TQAVCC(queue), tqavcc);
+
+ /* Set hiCredit to zero. */
+ wr32(E1000_I210_TQAVHC(queue), 0);
+ }
+
+ /* XXX: In i210 controller the sendSlope and loCredit parameters from
+ * CBS are not configurable by software so we don't do any 'controller
+ * configuration' in respect to these parameters.
+ */
+
+ netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
+ (enable) ? "enabled" : "disabled", queue,
+ idleslope, sendslope, hicredit, locredit);
+}
+
+static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
+ bool enable, int idleslope, int sendslope,
+ int hicredit, int locredit)
+{
+ struct igb_ring *ring;
+
+ if (queue < 0 || queue > adapter->num_tx_queues)
+ return -EINVAL;
+
+ ring = adapter->tx_ring[queue];
+
+ ring->cbs_enable = enable;
+ ring->idleslope = idleslope;
+ ring->sendslope = sendslope;
+ ring->hicredit = hicredit;
+ ring->locredit = locredit;
+
+ return 0;
+}
+
+static bool is_any_cbs_enabled(struct igb_adapter *adapter)
+{
+ struct igb_ring *ring;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ring = adapter->tx_ring[i];
+
+ if (ring->cbs_enable)
+ return true;
+ }
+
+ return false;
+}
+
+static void igb_setup_tx_mode(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 val;
+
+ /* Only i210 controller supports changing the transmission mode. */
+ if (hw->mac.type != e1000_i210)
+ return;
+
+ if (is_fqtss_enabled(adapter)) {
+ int i, max_queue;
+
+ /* Configure TQAVCTRL register: set transmit mode to 'Qav',
+ * set data fetch arbitration to 'round robin' and set data
+ * transfer arbitration to 'credit shaper algorithm.
+ */
+ val = rd32(E1000_I210_TQAVCTRL);
+ val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
+ val &= ~E1000_TQAVCTRL_DATAFETCHARB;
+ wr32(E1000_I210_TQAVCTRL, val);
+
+ /* Configure Tx and Rx packet buffers sizes as described in
+ * i210 datasheet section 7.2.7.7.
+ */
+ val = rd32(E1000_TXPBS);
+ val &= ~I210_TXPBSIZE_MASK;
+ val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
+ I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
+ wr32(E1000_TXPBS, val);
+
+ val = rd32(E1000_RXPBS);
+ val &= ~I210_RXPBSIZE_MASK;
+ val |= I210_RXPBSIZE_PB_32KB;
+ wr32(E1000_RXPBS, val);
+
+ /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
+ * register should not exceed the buffer size programmed in
+ * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
+ * so according to the datasheet we should set MAX_TPKT_SIZE to
+ * 4kB / 64.
+ *
+ * However, when we do so, no frame from queue 2 and 3 are
+ * transmitted. It seems the MAX_TPKT_SIZE should not be great
+ * or _equal_ to the buffer size programmed in TXPBS. For this
+ * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
+ */
+ val = (4096 - 1) / 64;
+ wr32(E1000_I210_DTXMXPKTSZ, val);
+
+ /* Since FQTSS mode is enabled, apply any CBS configuration
+ * previously set. If no previous CBS configuration has been
+ * done, then the initial configuration is applied, which means
+ * CBS is disabled.
+ */
+ max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
+ adapter->num_tx_queues : I210_SR_QUEUES_NUM;
+
+ for (i = 0; i < max_queue; i++) {
+ struct igb_ring *ring = adapter->tx_ring[i];
+
+ igb_configure_cbs(adapter, i, ring->cbs_enable,
+ ring->idleslope, ring->sendslope,
+ ring->hicredit, ring->locredit);
+ }
+ } else {
+ wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
+ wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+ wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
+
+ val = rd32(E1000_I210_TQAVCTRL);
+ /* According to Section 8.12.21, the other flags we've set when
+ * enabling FQTSS are not relevant when disabling FQTSS so we
+ * don't set they here.
+ */
+ val &= ~E1000_TQAVCTRL_XMIT_MODE;
+ wr32(E1000_I210_TQAVCTRL, val);
+ }
+
+ netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
+ "enabled" : "disabled");
+}
+
/**
* igb_configure - configure the hardware for RX and TX
* @adapter: private board structure
@@ -1609,6 +1905,7 @@ static void igb_configure(struct igb_adapter *adapter)
igb_get_hw_control(adapter);
igb_set_rx_mode(netdev);
+ igb_setup_tx_mode(adapter);
igb_restore_vlan(adapter);
@@ -2150,6 +2447,55 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
return features;
}
+static int igb_offload_cbs(struct igb_adapter *adapter,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ /* CBS offloading is only supported by i210 controller. */
+ if (hw->mac.type != e1000_i210)
+ return -EOPNOTSUPP;
+
+ /* CBS offloading is only supported by queue 0 and queue 1. */
+ if (qopt->queue < 0 || qopt->queue > 1)
+ return -EINVAL;
+
+ err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
+ qopt->idleslope, qopt->sendslope,
+ qopt->hicredit, qopt->locredit);
+ if (err)
+ return err;
+
+ if (is_fqtss_enabled(adapter)) {
+ igb_configure_cbs(adapter, qopt->queue, qopt->enable,
+ qopt->idleslope, qopt->sendslope,
+ qopt->hicredit, qopt->locredit);
+
+ if (!is_any_cbs_enabled(adapter))
+ enable_fqtss(adapter, false);
+
+ } else {
+ enable_fqtss(adapter, true);
+ }
+
+ return 0;
+}
+
+static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return igb_offload_cbs(adapter, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -2175,6 +2521,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_set_features = igb_set_features,
.ndo_fdb_add = igb_ndo_fdb_add,
.ndo_features_check = igb_features_check,
+ .ndo_setup_tc = igb_setup_tc,
};
/**
@@ -2538,10 +2885,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
}
- setup_timer(&adapter->watchdog_timer, igb_watchdog,
- (unsigned long) adapter);
- setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
- (unsigned long) adapter);
+ timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
+ timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
INIT_WORK(&adapter->reset_task, igb_reset_task);
INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
@@ -3162,6 +3507,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
GFP_ATOMIC);
+ if (!adapter->shadow_vfta)
+ return -ENOMEM;
/* This call may decrease the number of queues */
if (igb_init_interrupt_scheme(adapter, true)) {
@@ -4423,9 +4770,9 @@ static void igb_spoof_check(struct igb_adapter *adapter)
/* Need to wait a few seconds after link up to get diagnostic information from
* the phy
*/
-static void igb_update_phy_info(unsigned long data)
+static void igb_update_phy_info(struct timer_list *t)
{
- struct igb_adapter *adapter = (struct igb_adapter *) data;
+ struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
igb_get_phy_info(&adapter->hw);
}
@@ -4512,9 +4859,9 @@ static void igb_check_lvmmc(struct igb_adapter *adapter)
* igb_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
-static void igb_watchdog(unsigned long data)
+static void igb_watchdog(struct timer_list *t)
{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
}
@@ -6970,7 +7317,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 1ed556911b14..4214c1519a87 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
@@ -1915,9 +1915,9 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
* igbvf_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
-static void igbvf_watchdog(unsigned long data)
+static void igbvf_watchdog(struct timer_list *t)
{
- struct igbvf_adapter *adapter = (struct igbvf_adapter *)data;
+ struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer);
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -2878,8 +2878,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->addr_len);
}
- setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
- (unsigned long)adapter);
+ timer_setup(&adapter->watchdog_timer, igbvf_watchdog, 0);
INIT_WORK(&adapter->reset_task, igbvf_reset_task);
INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5a713199653c..2353c383f0a7 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -83,7 +83,7 @@ static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
static void ixgb_set_multi(struct net_device *netdev);
-static void ixgb_watchdog(unsigned long data);
+static void ixgb_watchdog(struct timer_list *t);
static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev);
static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
@@ -508,9 +508,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = ixgb_watchdog;
- adapter->watchdog_timer.data = (unsigned long)adapter;
+ timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
@@ -1152,9 +1150,9 @@ alloc_failed:
**/
static void
-ixgb_watchdog(unsigned long data)
+ixgb_watchdog(struct timer_list *t)
{
- struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
+ struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
struct net_device *netdev = adapter->netdev;
struct ixgb_desc_ring *txdr = &adapter->tx_ring;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index dd5578756ae0..468c3555a629 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -275,6 +275,7 @@ struct ixgbe_rx_queue_stats {
u64 rsc_count;
u64 rsc_flush;
u64 non_eop_descs;
+ u64 alloc_rx_page;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
u64 csum_err;
@@ -434,8 +435,15 @@ static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
}
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
+#define IXGBE_ITR_ADAPTIVE_MIN_INC 2
+#define IXGBE_ITR_ADAPTIVE_MIN_USECS 10
+#define IXGBE_ITR_ADAPTIVE_MAX_USECS 126
+#define IXGBE_ITR_ADAPTIVE_LATENCY 0x80
+#define IXGBE_ITR_ADAPTIVE_BULK 0x00
+
struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */
+ unsigned long next_update; /* jiffies value of last update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
@@ -655,6 +663,7 @@ struct ixgbe_adapter {
u64 rsc_total_count;
u64 rsc_total_flush;
u64 non_eop_descs;
+ u32 alloc_rx_page;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 6e6ab6f6875e..9bef255f6a18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3781,10 +3781,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.ver_build = build;
fw_cmd.ver_sub = sub;
fw_cmd.hdr.checksum = 0;
- fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
- (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
fw_cmd.pad = 0;
fw_cmd.pad2 = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
@@ -4081,8 +4081,8 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
return false;
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
- fwsm &= IXGBE_FWSM_MODE_MASK;
- return fwsm == IXGBE_FWSM_FW_MODE_PT;
+
+ return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index e083732adf64..a01409e2e06c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr)
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
@@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr)
static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index c3e7a8191128..0aad1c2a3667 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -104,6 +104,7 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
+ {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
@@ -1916,8 +1917,6 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
unsigned int size)
{
union ixgbe_adv_rx_desc *rx_desc;
- struct ixgbe_rx_buffer *rx_buffer;
- struct ixgbe_tx_buffer *tx_buffer;
u16 rx_ntc, tx_ntc, count = 0;
/* initialize next to clean and descriptor values */
@@ -1925,7 +1924,38 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
+ while (tx_ntc != tx_ring->next_to_use) {
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct ixgbe_tx_buffer *tx_buffer;
+
+ tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
+
+ /* if DD is not set transmit has not completed */
+ if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ return count;
+
+ /* unmap buffer on Tx side */
+ tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* increment Tx next to clean counter */
+ tx_ntc++;
+ if (tx_ntc == tx_ring->count)
+ tx_ntc = 0;
+ }
+
while (rx_desc->wb.upper.length) {
+ struct ixgbe_rx_buffer *rx_buffer;
+
/* check Rx buffer */
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
@@ -1938,6 +1968,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
/* verify contents of skb */
if (ixgbe_check_lbtest_frame(rx_buffer, size))
count++;
+ else
+ break;
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
@@ -1945,26 +1977,10 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
- /* unmap buffer on Tx side */
- tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
-
- /* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
-
- /* unmap skb header data */
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
-
- /* increment Rx/Tx next to clean counters */
+ /* increment Rx next to clean counter */
rx_ntc++;
if (rx_ntc == rx_ring->count)
rx_ntc = 0;
- tx_ntc++;
- if (tx_ntc == tx_ring->count)
- tx_ntc = 0;
/* fetch next descriptor */
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index f1bfae0c41d0..8e2a957aca18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -806,6 +806,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
ring->next = head->ring;
head->ring = ring;
head->count++;
+ head->next_update = jiffies + 1;
}
/**
@@ -879,8 +880,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit;
- /* initialize pointer to rings */
- ring = q_vector->ring;
+ /* Initialize setting for adaptive ITR */
+ q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
+ IXGBE_ITR_ADAPTIVE_LATENCY;
+ q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
+ IXGBE_ITR_ADAPTIVE_LATENCY;
/* intialize ITR */
if (txr_count && !rxr_count) {
@@ -897,6 +901,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
q_vector->itr = adapter->rx_itr_setting;
}
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6d5f31e94358..62a18914f00f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
*/
u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
u32 value;
if (ixgbe_removed(reg_addr))
@@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
@@ -1620,6 +1620,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
bi->page = page;
bi->page_offset = ixgbe_rx_offset(rx_ring);
bi->pagecnt_bias = 1;
+ rx_ring->rx_stats.alloc_rx_page++;
return true;
}
@@ -2133,6 +2134,21 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
#if L1_CACHE_BYTES < 128
prefetch(xdp->data + L1_CACHE_BYTES);
#endif
+ /* Note, we get here by enabling legacy-rx via:
+ *
+ * ethtool --set-priv-flags <dev> legacy-rx on
+ *
+ * In this mode, we currently get 0 extra XDP headroom as
+ * opposed to having legacy-rx off, where we process XDP
+ * packets going to stack via ixgbe_build_skb(). The latter
+ * provides us currently with 192 bytes of headroom.
+ *
+ * For ixgbe_construct_skb() mode it means that the
+ * xdp->data_meta will always point to xdp->data, since
+ * the helper cannot expand the head. Should this ever
+ * change in future for legacy-rx mode on, then lets also
+ * add xdp->data_meta handling here.
+ */
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
@@ -2165,6 +2181,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp,
union ixgbe_adv_rx_desc *rx_desc)
{
+ unsigned int metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
@@ -2174,10 +2191,14 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
#endif
struct sk_buff *skb;
- /* prefetch first cache line of first page */
- prefetch(xdp->data);
+ /* Prefetch first cache line of first page. If xdp->data_meta
+ * is unused, this points extactly as xdp->data, otherwise we
+ * likely have a consumer accessing first few bytes of meta
+ * data, and then actual data.
+ */
+ prefetch(xdp->data_meta);
#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
+ prefetch(xdp->data_meta + L1_CACHE_BYTES);
#endif
/* build an skb to around the page buffer */
@@ -2188,6 +2209,8 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
/* update pointers within the skb to store the data */
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
/* record DMA address if this is the start of a chain of buffers */
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
@@ -2326,6 +2349,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (!skb) {
xdp.data = page_address(rx_buffer->page) +
rx_buffer->page_offset;
+ xdp.data_meta = xdp.data;
xdp.data_hard_start = xdp.data -
ixgbe_rx_offset(rx_ring);
xdp.data_end = xdp.data + size;
@@ -2516,50 +2540,174 @@ enum latency_range {
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
{
- int bytes = ring_container->total_bytes;
- int packets = ring_container->total_packets;
- u32 timepassed_us;
- u64 bytes_perint;
- u8 itr_setting = ring_container->itr;
+ unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
+ IXGBE_ITR_ADAPTIVE_LATENCY;
+ unsigned int avg_wire_size, packets, bytes;
+ unsigned long next_update = jiffies;
- if (packets == 0)
+ /* If we don't have any rings just leave ourselves set for maximum
+ * possible latency so we take ourselves out of the equation.
+ */
+ if (!ring_container->ring)
return;
- /* simple throttlerate management
- * 0-10MB/s lowest (100000 ints/s)
- * 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (12000 ints/s)
+ /* If we didn't update within up to 1 - 2 jiffies we can assume
+ * that either packets are coming in so slow there hasn't been
+ * any work, or that there is so much work that NAPI is dealing
+ * with interrupt moderation and we don't need to do anything.
*/
- /* what was last interrupt timeslice? */
- timepassed_us = q_vector->itr >> 2;
- if (timepassed_us == 0)
- return;
+ if (time_after(next_update, ring_container->next_update))
+ goto clear_counts;
- bytes_perint = bytes / timepassed_us; /* bytes/usec */
+ packets = ring_container->total_packets;
- switch (itr_setting) {
- case lowest_latency:
- if (bytes_perint > 10)
- itr_setting = low_latency;
- break;
- case low_latency:
- if (bytes_perint > 20)
- itr_setting = bulk_latency;
- else if (bytes_perint <= 10)
- itr_setting = lowest_latency;
+ /* We have no packets to actually measure against. This means
+ * either one of the other queues on this vector is active or
+ * we are a Tx queue doing TSO with too high of an interrupt rate.
+ *
+ * When this occurs just tick up our delay by the minimum value
+ * and hope that this extra delay will prevent us from being called
+ * without any work on our queue.
+ */
+ if (!packets) {
+ itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
+ if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
+ itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
+ itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
+ goto clear_counts;
+ }
+
+ bytes = ring_container->total_bytes;
+
+ /* If packets are less than 4 or bytes are less than 9000 assume
+ * insufficient data to use bulk rate limiting approach. We are
+ * likely latency driven.
+ */
+ if (packets < 4 && bytes < 9000) {
+ itr = IXGBE_ITR_ADAPTIVE_LATENCY;
+ goto adjust_by_size;
+ }
+
+ /* Between 4 and 48 we can assume that our current interrupt delay
+ * is only slightly too low. As such we should increase it by a small
+ * fixed amount.
+ */
+ if (packets < 48) {
+ itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
+ if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
+ itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
+ goto clear_counts;
+ }
+
+ /* Between 48 and 96 is our "goldilocks" zone where we are working
+ * out "just right". Just report that our current ITR is good for us.
+ */
+ if (packets < 96) {
+ itr = q_vector->itr >> 2;
+ goto clear_counts;
+ }
+
+ /* If packet count is 96 or greater we are likely looking at a slight
+ * overrun of the delay we want. Try halving our delay to see if that
+ * will cut the number of packets in half per interrupt.
+ */
+ if (packets < 256) {
+ itr = q_vector->itr >> 3;
+ if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
+ itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
+ goto clear_counts;
+ }
+
+ /* The paths below assume we are dealing with a bulk ITR since number
+ * of packets is 256 or greater. We are just going to have to compute
+ * a value and try to bring the count under control, though for smaller
+ * packet sizes there isn't much we can do as NAPI polling will likely
+ * be kicking in sooner rather than later.
+ */
+ itr = IXGBE_ITR_ADAPTIVE_BULK;
+
+adjust_by_size:
+ /* If packet counts are 256 or greater we can assume we have a gross
+ * overestimation of what the rate should be. Instead of trying to fine
+ * tune it just use the formula below to try and dial in an exact value
+ * give the current packet size of the frame.
+ */
+ avg_wire_size = bytes / packets;
+
+ /* The following is a crude approximation of:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ *
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to
+ *
+ * (170 * (size + 24)) / (size + 640) = ITR
+ *
+ * We first do some math on the packet size and then finally bitshift
+ * by 8 after rounding up. We also have to account for PCIe link speed
+ * difference as ITR scales based on this.
+ */
+ if (avg_wire_size <= 60) {
+ /* Start at 50k ints/sec */
+ avg_wire_size = 5120;
+ } else if (avg_wire_size <= 316) {
+ /* 50K ints/sec to 16K ints/sec */
+ avg_wire_size *= 40;
+ avg_wire_size += 2720;
+ } else if (avg_wire_size <= 1084) {
+ /* 16K ints/sec to 9.2K ints/sec */
+ avg_wire_size *= 15;
+ avg_wire_size += 11452;
+ } else if (avg_wire_size <= 1980) {
+ /* 9.2K ints/sec to 8K ints/sec */
+ avg_wire_size *= 5;
+ avg_wire_size += 22420;
+ } else {
+ /* plateau at a limit of 8K ints/sec */
+ avg_wire_size = 32256;
+ }
+
+ /* If we are in low latency mode half our delay which doubles the rate
+ * to somewhere between 100K to 16K ints/sec
+ */
+ if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
+ avg_wire_size >>= 1;
+
+ /* Resultant value is 256 times larger than it needs to be. This
+ * gives us room to adjust the value as needed to either increase
+ * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
+ *
+ * Use addition as we have already recorded the new latency flag
+ * for the ITR value.
+ */
+ switch (q_vector->adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ case IXGBE_LINK_SPEED_100_FULL:
+ default:
+ itr += DIV_ROUND_UP(avg_wire_size,
+ IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
+ IXGBE_ITR_ADAPTIVE_MIN_INC;
break;
- case bulk_latency:
- if (bytes_perint <= 20)
- itr_setting = low_latency;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ case IXGBE_LINK_SPEED_10_FULL:
+ itr += DIV_ROUND_UP(avg_wire_size,
+ IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
+ IXGBE_ITR_ADAPTIVE_MIN_INC;
break;
}
- /* clear work counters since we have the values we need */
+clear_counts:
+ /* write back value */
+ ring_container->itr = itr;
+
+ /* next update should occur within next jiffy */
+ ring_container->next_update = next_update + 1;
+
ring_container->total_bytes = 0;
ring_container->total_packets = 0;
-
- /* write updated itr to ring container */
- ring_container->itr = itr_setting;
}
/**
@@ -2601,34 +2749,19 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
{
- u32 new_itr = q_vector->itr;
- u8 current_itr;
+ u32 new_itr;
ixgbe_update_itr(q_vector, &q_vector->tx);
ixgbe_update_itr(q_vector, &q_vector->rx);
- current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+ /* use the smallest value of new ITR delay calculations */
+ new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = IXGBE_100K_ITR;
- break;
- case low_latency:
- new_itr = IXGBE_20K_ITR;
- break;
- case bulk_latency:
- new_itr = IXGBE_12K_ITR;
- break;
- default:
- break;
- }
+ /* Clear latency flag if set, shift into correct position */
+ new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
+ new_itr <<= 2;
if (new_itr != q_vector->itr) {
- /* do an exponential smoothing */
- new_itr = (10 * new_itr * q_vector->itr) /
- ((9 * new_itr) + q_vector->itr);
-
/* save the algorithm value here */
q_vector->itr = new_itr;
@@ -6771,6 +6904,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 alloc_rx_page = 0;
u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
@@ -6791,6 +6925,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
hw_csum_rx_error += rx_ring->rx_stats.csum_err;
@@ -6798,6 +6933,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
packets += rx_ring->stats.packets;
}
adapter->non_eop_descs = non_eop_descs;
+ adapter->alloc_rx_page = alloc_rx_page;
adapter->alloc_rx_page_failed = alloc_rx_page_failed;
adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
adapter->hw_csum_rx_error = hw_csum_rx_error;
@@ -7554,9 +7690,9 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
* ixgbe_service_timer - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
-static void ixgbe_service_timer(unsigned long data)
+static void ixgbe_service_timer(struct timer_list *t)
{
- struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+ struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer);
unsigned long next_event_offset;
/* poll faster when waiting for link */
@@ -8624,7 +8760,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
u64 bytes, packets;
unsigned int start;
@@ -8640,12 +8776,12 @@ static void ixgbe_get_stats64(struct net_device *netdev,
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
@@ -9223,13 +9359,10 @@ free_jump:
return err;
}
-static int ixgbe_setup_tc_cls_u32(struct net_device *dev,
+static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls_u32)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- if (!is_classid_clsact_ingress(cls_u32->common.classid) ||
- cls_u32->common.chain_index)
+ if (cls_u32->common.chain_index)
return -EOPNOTSUPP;
switch (cls_u32->command) {
@@ -9248,6 +9381,43 @@ static int ixgbe_setup_tc_cls_u32(struct net_device *dev,
}
}
+static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct ixgbe_adapter *adapter = cb_priv;
+
+ if (!tc_can_offload(adapter->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSU32:
+ return ixgbe_setup_tc_cls_u32(adapter, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int ixgbe_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb,
+ adapter, adapter);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb,
+ adapter);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ixgbe_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt *mqprio)
{
@@ -9259,9 +9429,9 @@ static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
- case TC_SETUP_CLSU32:
- return ixgbe_setup_tc_cls_u32(dev, type_data);
- case TC_SETUP_MQPRIO:
+ case TC_SETUP_BLOCK:
+ return ixgbe_setup_tc_block(dev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
return ixgbe_setup_tc_mqprio(dev, type_data);
default:
return -EOPNOTSUPP;
@@ -9733,6 +9903,17 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
limit = find_last_bit(&adapter->fwd_bitmask, 32);
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
+
+ /* go back to full RSS if we're done with our VMQs */
+ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
+ int rss = min_t(int, ixgbe_max_rss_indices(adapter),
+ num_online_cpus());
+
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->ring_feature[RING_F_RSS].limit = rss;
+ }
+
ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
fwd_adapter->pool, adapter->num_rx_pools,
@@ -9823,7 +10004,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return 0;
}
-static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -9932,7 +10113,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check,
- .ndo_xdp = ixgbe_xdp,
+ .ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
.ndo_xdp_flush = ixgbe_xdp_flush,
};
@@ -10355,8 +10536,7 @@ skip_sriov:
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
- setup_timer(&adapter->service_timer, &ixgbe_service_timer,
- (unsigned long) adapter);
+ timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
if (ixgbe_removed(hw->hw_addr)) {
err = -EIO;
@@ -10712,6 +10892,9 @@ skip_bad_vf_detection:
if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
return PCI_ERS_RESULT_DISCONNECT;
+ if (!netif_device_present(netdev))
+ return PCI_ERS_RESULT_DISCONNECT;
+
rtnl_lock();
netif_device_detach(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 86d6924a2b71..ae312c45696a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
}
smp_mb();
- incval = ACCESS_ONCE(adapter->base_incval);
+ incval = READ_ONCE(adapter->base_incval);
freq = incval;
freq *= ppb;
@@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/* update the base incval used to calculate frequency adjustment */
- ACCESS_ONCE(adapter->base_incval) = incval;
+ WRITE_ONCE(adapter->base_incval, incval);
smp_mb();
/* need lock to prevent incorrect read while modifying cyclecounter */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 6ea0d6a5fb90..b8c5fd2a2115 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -619,12 +619,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
usleep_range(5000, 10000);
}
- /* Failed to get SW only semaphore */
- if (swmask == IXGBE_GSSR_SW_MNG_SM) {
- hw_dbg(hw, "Failed to get SW only semaphore\n");
- return IXGBE_ERR_SWFW_SYNC;
- }
-
/* If the resource is not released by the FW/HW the SW can assume that
* the FW/HW malfunctions. In that case the SW should set the SW bit(s)
* of the requested resource(s) while ignoring the corresponding FW/HW
@@ -647,7 +641,8 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*/
if (swfw_sync & swmask) {
u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
- IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM;
if (swi2c_mask)
rmask |= IXGBE_GSSR_I2C_MASK;
@@ -763,6 +758,8 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
**/
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
{
+ u32 rmask;
+
/* First try to grab the semaphore but we don't need to bother
* looking to see whether we got the lock or not since we do
* the same thing regardless of whether we got the lock or not.
@@ -771,6 +768,14 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
*/
ixgbe_get_swfw_sync_semaphore(hw);
ixgbe_release_swfw_sync_semaphore(hw);
+
+ /* Acquire and release all software resources. */
+ rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_I2C_MASK;
+
+ ixgbe_acquire_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_X540(hw, rmask);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 19fbb2f28ea4..cb7da5f9c4da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -900,6 +900,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
/* convert offset from words to bytes */
buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2);
+ buffer.pad2 = 0;
+ buffer.pad3 = 0;
status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT);
@@ -3192,6 +3194,9 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
+ return ret_val;
/* Setup function pointers based on detected hardware */
ixgbe_init_mac_link_ops_X550em(hw);
@@ -3394,9 +3399,10 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
ixgbe_clear_tx_pending(hw);
/* PHY ops must be identified and initialized prior to reset */
-
- /* Identify PHY and related function pointers */
status = hw->phy.ops.init(hw);
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ status == IXGBE_ERR_PHY_ADDR_INVALID)
+ return status;
/* start the external PHY */
if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
@@ -3884,7 +3890,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = {
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
};
-static struct ixgbe_mac_operations mac_ops_x550em_a = {
+static const struct ixgbe_mac_operations mac_ops_x550em_a = {
X550_COMMON_MAC
.led_on = ixgbe_led_on_t_x550em,
.led_off = ixgbe_led_off_t_x550em,
@@ -3905,7 +3911,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
};
-static struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
+static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
X550_COMMON_MAC
.led_on = ixgbe_led_on_generic,
.led_off = ixgbe_led_off_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 032f8ac06357..1f4a69134ade 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
u32 value;
if (IXGBE_REMOVED(reg_addr))
@@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
@@ -2747,9 +2747,10 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
* ixgbevf_service_timer - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
-static void ixgbevf_service_timer(unsigned long data)
+static void ixgbevf_service_timer(struct timer_list *t)
{
- struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+ struct ixgbevf_adapter *adapter = from_timer(adapter, t,
+ service_timer);
/* Reset the timer */
mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
@@ -4120,8 +4121,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_sw_init;
}
- setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
- (unsigned long)adapter);
+ timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0);
INIT_WORK(&adapter->service_task, ixgbevf_service_task);
set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 04d8d4ee4f04..c651fefcc3d2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -182,7 +182,7 @@ struct ixgbevf_info {
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (IXGBE_REMOVED(reg_addr))
return;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 3c0a6451273d..ae195f8adff5 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -4,6 +4,7 @@
* Copyright 2004 IDT Inc. (rischelp@idt.com)
* Copyright 2006 Felix Fietkau <nbd@openwrt.org>
* Copyright 2008 Florian Fainelli <florian@openwrt.org>
+ * Copyright 2017 Roman Yeryomin <roman@advem.lv>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -64,9 +65,9 @@
#include <asm/mach-rc32434/eth.h>
#include <asm/mach-rc32434/dma_v.h>
-#define DRV_NAME "korina"
-#define DRV_VERSION "0.10"
-#define DRV_RELDATE "04Mar2008"
+#define DRV_NAME "korina"
+#define DRV_VERSION "0.20"
+#define DRV_RELDATE "15Sep2017"
#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
((dev)->dev_addr[1]))
@@ -75,7 +76,7 @@
((dev)->dev_addr[4] << 8) | \
((dev)->dev_addr[5]))
-#define MII_CLOCK 1250000 /* no more than 2.5MHz */
+#define MII_CLOCK 1250000 /* no more than 2.5MHz */
/* the following must be powers of two */
#define KORINA_NUM_RDS 64 /* number of receive descriptors */
@@ -87,15 +88,19 @@
#define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */
#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
-#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
+#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
#define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc))
-#define TX_TIMEOUT (6000 * HZ / 1000)
+#define TX_TIMEOUT (6000 * HZ / 1000)
-enum chain_status { desc_filled, desc_empty };
-#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0)
-#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0)
-#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
+enum chain_status {
+ desc_filled,
+ desc_empty
+};
+
+#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0)
+#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0)
+#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
/* Information that need to be kept for each board. */
struct korina_private {
@@ -122,10 +127,8 @@ struct korina_private {
int rx_irq;
int tx_irq;
- int ovr_irq;
- int und_irq;
- spinlock_t lock; /* NIC xmit lock */
+ spinlock_t lock; /* NIC xmit lock */
int dma_halt_cnt;
int dma_run_cnt;
@@ -148,17 +151,17 @@ static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
static inline void korina_abort_dma(struct net_device *dev,
struct dma_reg *ch)
{
- if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
- writel(0x10, &ch->dmac);
+ if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
+ writel(0x10, &ch->dmac);
- while (!(readl(&ch->dmas) & DMA_STAT_HALT))
- netif_trans_update(dev);
+ while (!(readl(&ch->dmas) & DMA_STAT_HALT))
+ netif_trans_update(dev);
- writel(0, &ch->dmas);
- }
+ writel(0, &ch->dmas);
+ }
- writel(0, &ch->dmadptr);
- writel(0, &ch->dmandptr);
+ writel(0, &ch->dmadptr);
+ writel(0, &ch->dmandptr);
}
static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
@@ -365,59 +368,60 @@ static int korina_rx(struct net_device *dev, int limit)
if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
break;
- /* Update statistics counters */
- if (devcs & ETH_RX_CRC)
- dev->stats.rx_crc_errors++;
- if (devcs & ETH_RX_LOR)
- dev->stats.rx_length_errors++;
- if (devcs & ETH_RX_LE)
- dev->stats.rx_length_errors++;
- if (devcs & ETH_RX_OVR)
- dev->stats.rx_fifo_errors++;
- if (devcs & ETH_RX_CV)
- dev->stats.rx_frame_errors++;
- if (devcs & ETH_RX_CES)
- dev->stats.rx_length_errors++;
- if (devcs & ETH_RX_MP)
- dev->stats.multicast++;
+ /* check that this is a whole packet
+ * WARNING: DMA_FD bit incorrectly set
+ * in Rc32434 (errata ref #077) */
+ if (!(devcs & ETH_RX_LD))
+ goto next;
- if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
- /* check that this is a whole packet
- * WARNING: DMA_FD bit incorrectly set
- * in Rc32434 (errata ref #077) */
+ if (!(devcs & ETH_RX_ROK)) {
+ /* Update statistics counters */
dev->stats.rx_errors++;
dev->stats.rx_dropped++;
- } else if ((devcs & ETH_RX_ROK)) {
- pkt_len = RCVPKT_LENGTH(devcs);
+ if (devcs & ETH_RX_CRC)
+ dev->stats.rx_crc_errors++;
+ if (devcs & ETH_RX_LE)
+ dev->stats.rx_length_errors++;
+ if (devcs & ETH_RX_OVR)
+ dev->stats.rx_fifo_errors++;
+ if (devcs & ETH_RX_CV)
+ dev->stats.rx_frame_errors++;
+ if (devcs & ETH_RX_CES)
+ dev->stats.rx_frame_errors++;
+
+ goto next;
+ }
- /* must be the (first and) last
- * descriptor then */
- pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
+ pkt_len = RCVPKT_LENGTH(devcs);
- /* invalidate the cache */
- dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
+ /* must be the (first and) last
+ * descriptor then */
+ pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
- /* Malloc up new buffer. */
- skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
+ /* invalidate the cache */
+ dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
- if (!skb_new)
- break;
- /* Do not count the CRC */
- skb_put(skb, pkt_len - 4);
- skb->protocol = eth_type_trans(skb, dev);
+ /* Malloc up new buffer. */
+ skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
- /* Pass the packet to upper layers */
- netif_receive_skb(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
+ if (!skb_new)
+ break;
+ /* Do not count the CRC */
+ skb_put(skb, pkt_len - 4);
+ skb->protocol = eth_type_trans(skb, dev);
- /* Update the mcast stats */
- if (devcs & ETH_RX_MP)
- dev->stats.multicast++;
+ /* Pass the packet to upper layers */
+ napi_gro_receive(&lp->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
- lp->rx_skb[lp->rx_next_done] = skb_new;
- }
+ /* Update the mcast stats */
+ if (devcs & ETH_RX_MP)
+ dev->stats.multicast++;
+
+ lp->rx_skb[lp->rx_next_done] = skb_new;
+next:
rd->devcs = 0;
/* Restore descriptor's curr_addr */
@@ -649,10 +653,10 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media)
&lp->eth_regs->ethmac2);
}
-static void korina_poll_media(unsigned long data)
+static void korina_poll_media(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct korina_private *lp = netdev_priv(dev);
+ struct korina_private *lp = from_timer(lp, t, media_check_timer);
+ struct net_device *dev = lp->dev;
korina_check_media(dev, 0);
mod_timer(&lp->media_check_timer, jiffies + HZ);
@@ -686,7 +690,7 @@ static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
/* ethtool helpers */
static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
struct korina_private *lp = netdev_priv(dev);
@@ -729,10 +733,10 @@ static u32 netdev_get_link(struct net_device *dev)
}
static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_link = netdev_get_link,
- .get_link_ksettings = netdev_get_link_ksettings,
- .set_link_ksettings = netdev_set_link_ksettings,
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_link = netdev_get_link,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int korina_alloc_ring(struct net_device *dev)
@@ -864,7 +868,7 @@ static int korina_init(struct net_device *dev)
/* Management Clock Prescaler Divisor
* Clock independent setting */
writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
- &lp->eth_regs->ethmcp);
+ &lp->eth_regs->ethmcp);
/* don't transmit until fifo contains 48b */
writel(48, &lp->eth_regs->ethfifott);
@@ -891,8 +895,6 @@ static void korina_restart_task(struct work_struct *work)
*/
disable_irq(lp->rx_irq);
disable_irq(lp->tx_irq);
- disable_irq(lp->ovr_irq);
- disable_irq(lp->und_irq);
writel(readl(&lp->tx_dma_regs->dmasm) |
DMA_STAT_FINI | DMA_STAT_ERR,
@@ -911,40 +913,10 @@ static void korina_restart_task(struct work_struct *work)
}
korina_multicast_list(dev);
- enable_irq(lp->und_irq);
- enable_irq(lp->ovr_irq);
enable_irq(lp->tx_irq);
enable_irq(lp->rx_irq);
}
-static void korina_clear_and_restart(struct net_device *dev, u32 value)
-{
- struct korina_private *lp = netdev_priv(dev);
-
- netif_stop_queue(dev);
- writel(value, &lp->eth_regs->ethintfc);
- schedule_work(&lp->restart_task);
-}
-
-/* Ethernet Tx Underflow interrupt */
-static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct korina_private *lp = netdev_priv(dev);
- unsigned int und;
-
- spin_lock(&lp->lock);
-
- und = readl(&lp->eth_regs->ethintfc);
-
- if (und & ETH_INT_FC_UND)
- korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
-
- spin_unlock(&lp->lock);
-
- return IRQ_HANDLED;
-}
-
static void korina_tx_timeout(struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
@@ -952,25 +924,6 @@ static void korina_tx_timeout(struct net_device *dev)
schedule_work(&lp->restart_task);
}
-/* Ethernet Rx Overflow interrupt */
-static irqreturn_t
-korina_ovr_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct korina_private *lp = netdev_priv(dev);
- unsigned int ovr;
-
- spin_lock(&lp->lock);
- ovr = readl(&lp->eth_regs->ethintfc);
-
- if (ovr & ETH_INT_FC_OVR)
- korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
-
- spin_unlock(&lp->lock);
-
- return IRQ_HANDLED;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void korina_poll_controller(struct net_device *dev)
{
@@ -993,48 +946,26 @@ static int korina_open(struct net_device *dev)
}
/* Install the interrupt handler
- * that handles the Done Finished
- * Ovr and Und Events */
+ * that handles the Done Finished */
ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
0, "Korina ethernet Rx", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
- dev->name, lp->rx_irq);
+ dev->name, lp->rx_irq);
goto err_release;
}
ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
0, "Korina ethernet Tx", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
- dev->name, lp->tx_irq);
+ dev->name, lp->tx_irq);
goto err_free_rx_irq;
}
- /* Install handler for overrun error. */
- ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
- 0, "Ethernet Overflow", dev);
- if (ret < 0) {
- printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
- dev->name, lp->ovr_irq);
- goto err_free_tx_irq;
- }
-
- /* Install handler for underflow error. */
- ret = request_irq(lp->und_irq, korina_und_interrupt,
- 0, "Ethernet Underflow", dev);
- if (ret < 0) {
- printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
- dev->name, lp->und_irq);
- goto err_free_ovr_irq;
- }
mod_timer(&lp->media_check_timer, jiffies + 1);
out:
return ret;
-err_free_ovr_irq:
- free_irq(lp->ovr_irq, dev);
-err_free_tx_irq:
- free_irq(lp->tx_irq, dev);
err_free_rx_irq:
free_irq(lp->rx_irq, dev);
err_release:
@@ -1052,8 +983,6 @@ static int korina_close(struct net_device *dev)
/* Disable interrupts */
disable_irq(lp->rx_irq);
disable_irq(lp->tx_irq);
- disable_irq(lp->ovr_irq);
- disable_irq(lp->und_irq);
korina_abort_tx(dev);
tmp = readl(&lp->tx_dma_regs->dmasm);
@@ -1073,8 +1002,6 @@ static int korina_close(struct net_device *dev)
free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev);
- free_irq(lp->ovr_irq, dev);
- free_irq(lp->und_irq, dev);
return 0;
}
@@ -1113,8 +1040,6 @@ static int korina_probe(struct platform_device *pdev)
lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
- lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
- lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
dev->base_addr = r->start;
@@ -1162,7 +1087,7 @@ static int korina_probe(struct platform_device *pdev)
dev->netdev_ops = &korina_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, korina_poll, 64);
+ netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT);
lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
lp->mii_if.dev = dev;
@@ -1178,7 +1103,7 @@ static int korina_probe(struct platform_device *pdev)
": cannot register net device: %d\n", rc);
goto probe_err_register;
}
- setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
+ timer_setup(&lp->media_check_timer, korina_poll_media, 0);
INIT_WORK(&lp->restart_task, korina_restart_task);
@@ -1226,5 +1151,6 @@ module_platform_driver(korina_driver);
MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Roman Yeryomin <roman@advem.lv>");
MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index ff1bffa74803..9498ed26dbe5 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Marvell device drivers.
#
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 81c1fac00d33..62f204f32316 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1346,9 +1346,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
spin_unlock_bh(&mp->mib_counters_lock);
}
-static void mib_counters_timer_wrapper(unsigned long _mp)
+static void mib_counters_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = (void *)_mp;
+ struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
@@ -2321,9 +2321,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static inline void oom_timer_wrapper(unsigned long data)
+static inline void oom_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = (void *)data;
+ struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
napi_schedule(&mp->napi);
}
@@ -3178,8 +3178,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mib_counters_clear(mp);
- setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
- (unsigned long)mp);
+ timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
spin_lock_init(&mp->mib_counters_lock);
@@ -3188,7 +3187,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
- setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
+ timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 64a04975bcf8..bc93b69cfd1e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
{
u32 val;
- /* Only 255 descriptors can be added at once ; Assume caller
- * process TX desriptors in quanta less than 256
- */
- val = pend_desc + txq->pending;
- mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ pend_desc += txq->pending;
+
+ /* Only 255 Tx descriptors can be added at once */
+ do {
+ val = min(pend_desc, 255);
+ mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ pend_desc -= val;
+ } while (pend_desc > 0);
txq->pending = 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a37af5813f33..6c20e811f973 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -38,11 +38,12 @@
#include <net/ipv6.h>
#include <net/tso.h>
-/* RX Fifo Registers */
+/* Fifo Registers */
#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
#define MVPP2_RX_FIFO_INIT_REG 0x64
+#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
/* RX DMA Top Registers */
#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
@@ -82,6 +83,16 @@
#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
+/* RSS Registers */
+#define MVPP22_RSS_INDEX 0x1500
+#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) ((idx) << 8)
+#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
+#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
+#define MVPP22_RSS_TABLE_ENTRY 0x1508
+#define MVPP22_RSS_TABLE 0x1510
+#define MVPP22_RSS_TABLE_POINTER(p) (p)
+#define MVPP22_RSS_WIDTH 0x150c
+
/* Classifier Registers */
#define MVPP2_CLS_MODE_REG 0x1800
#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
@@ -482,6 +493,13 @@
/* Maximum number of TXQs used by single port */
#define MVPP2_MAX_TXQ 8
+/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
+ * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
+ * multiply this value by two to count the maximum number of skb descs needed.
+ */
+#define MVPP2_MAX_TSO_SEGS 300
+#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
/* Dfault number of RXQs in use */
#define MVPP2_DEFAULT_RXQ 4
@@ -504,9 +522,17 @@
#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
/* RX FIFO constants */
-#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
-#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
+#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
+
+/* TX FIFO constants */
+#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
+#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \
@@ -737,6 +763,10 @@ enum mvpp2_prs_l3_cast {
#define MVPP2_CLS_FLOWS_TBL_SIZE 512
#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
#define MVPP2_CLS_LKP_TBL_SIZE 64
+#define MVPP2_CLS_RX_QUEUES 256
+
+/* RSS constants */
+#define MVPP22_RSS_TABLE_ENTRIES 32
/* BM constants */
#define MVPP2_BM_POOLS_NUM 8
@@ -769,6 +799,42 @@ enum mvpp2_bm_type {
MVPP2_BM_SWF_SHORT
};
+/* GMAC MIB Counters register definitions */
+#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
+#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
+#define MVPP22_MIB_COUNTERS_OFFSET 0x0
+#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
+
+#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
+#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
+#define MVPP2_MIB_CRC_ERRORS_SENT 0xc
+#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
+#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
+#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
+#define MVPP2_MIB_FRAMES_64_OCTETS 0x20
+#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
+#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
+#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
+#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
+#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
+#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
+#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
+#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
+#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
+#define MVPP2_MIB_FC_SENT 0x54
+#define MVPP2_MIB_FC_RCVD 0x58
+#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
+#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
+#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
+#define MVPP2_MIB_OVERSIZE_RCVD 0x68
+#define MVPP2_MIB_JABBER_RCVD 0x6c
+#define MVPP2_MIB_MAC_RCV_ERROR 0x70
+#define MVPP2_MIB_BAD_CRC_EVENT 0x74
+#define MVPP2_MIB_COLLISION 0x78
+#define MVPP2_MIB_LATE_COLLISION 0x7c
+
+#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
+
/* Definitions */
/* Shared Packet Processor resources */
@@ -796,6 +862,7 @@ struct mvpp2 {
struct clk *axi_clk;
/* List of pointers to port structures */
+ int port_count;
struct mvpp2_port **port_list;
/* Aggregated TXQs */
@@ -817,6 +884,10 @@ struct mvpp2 {
/* Maximum number of RXQs per port */
unsigned int max_port_rxqs;
+
+ /* Workqueue to gather hardware statistics */
+ char queue_name[30];
+ struct workqueue_struct *stats_queue;
};
struct mvpp2_pcpu_stats {
@@ -861,6 +932,7 @@ struct mvpp2_port {
/* Per-port registers' base address */
void __iomem *base;
+ void __iomem *stats_base;
struct mvpp2_rx_queue **rxqs;
unsigned int nrxqs;
@@ -879,6 +951,11 @@ struct mvpp2_port {
u16 tx_ring_size;
u16 rx_ring_size;
struct mvpp2_pcpu_stats __percpu *stats;
+ u64 *ethtool_stats;
+
+ /* Per-port work and its lock to gather hardware statistics */
+ struct mutex gather_stats_lock;
+ struct delayed_work stats_work;
phy_interface_t phy_interface;
struct device_node *phy_node;
@@ -1022,6 +1099,9 @@ struct mvpp2_txq_pcpu {
*/
int count;
+ int wake_threshold;
+ int stop_threshold;
+
/* Number of Tx DMA descriptors reserved for each CPU */
int reserved_num;
@@ -1257,13 +1337,20 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc,
dma_addr_t dma_addr)
{
+ dma_addr_t addr, offset;
+
+ addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
+ offset = dma_addr & MVPP2_TX_DESC_ALIGN;
+
if (port->priv->hw_version == MVPP21) {
- tx_desc->pp21.buf_dma_addr = dma_addr;
+ tx_desc->pp21.buf_dma_addr = addr;
+ tx_desc->pp21.packet_offset = offset;
} else {
- u64 val = (u64)dma_addr;
+ u64 val = (u64)addr;
tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
tx_desc->pp22.buf_dma_addr_ptp |= val;
+ tx_desc->pp22.packet_offset = offset;
}
}
@@ -1306,16 +1393,6 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
tx_desc->pp22.command = command;
}
-static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
- struct mvpp2_tx_desc *tx_desc,
- unsigned int offset)
-{
- if (port->priv->hw_version == MVPP21)
- tx_desc->pp21.packet_offset = offset;
- else
- tx_desc->pp22.packet_offset = offset;
-}
-
static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
struct mvpp2_tx_desc *tx_desc)
{
@@ -4748,9 +4825,131 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port)
writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
}
+struct mvpp2_ethtool_counter {
+ unsigned int offset;
+ const char string[ETH_GSTRING_LEN];
+ bool reg_is_64b;
+};
+
+static u64 mvpp2_read_count(struct mvpp2_port *port,
+ const struct mvpp2_ethtool_counter *counter)
+{
+ u64 val;
+
+ val = readl(port->stats_base + counter->offset);
+ if (counter->reg_is_64b)
+ val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
+
+ return val;
+}
+
+/* Due to the fact that software statistics and hardware statistics are, by
+ * design, incremented at different moments in the chain of packet processing,
+ * it is very likely that incoming packets could have been dropped after being
+ * counted by hardware but before reaching software statistics (most probably
+ * multicast packets), and in the oppposite way, during transmission, FCS bytes
+ * are added in between as well as TSO skb will be split and header bytes added.
+ * Hence, statistics gathered from userspace with ifconfig (software) and
+ * ethtool (hardware) cannot be compared.
+ */
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
+ { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
+ { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
+ { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
+ { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
+ { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
+ { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
+ { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
+ { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
+ { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
+ { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
+ { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
+ { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
+ { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
+ { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
+ { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
+ { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
+ { MVPP2_MIB_FC_SENT, "fc_sent" },
+ { MVPP2_MIB_FC_RCVD, "fc_received" },
+ { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
+ { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
+ { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
+ { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
+ { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
+ { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
+ { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
+ { MVPP2_MIB_COLLISION, "collision" },
+ { MVPP2_MIB_LATE_COLLISION, "late_collision" },
+};
+
+static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ if (sset == ETH_SS_STATS) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static void mvpp2_gather_hw_statistics(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
+ stats_work);
+ u64 *pstats;
+ int i;
+
+ mutex_lock(&port->gather_stats_lock);
+
+ pstats = port->ethtool_stats;
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
+ *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
+
+ /* No need to read again the counters right after this function if it
+ * was called asynchronously by the user (ie. use of ethtool).
+ */
+ cancel_delayed_work(&port->stats_work);
+ queue_delayed_work(port->priv->stats_queue, &port->stats_work,
+ MVPP2_MIB_COUNTERS_STATS_DELAY);
+
+ mutex_unlock(&port->gather_stats_lock);
+}
+
+static void mvpp2_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ /* Update statistics for the given port, then take the lock to avoid
+ * concurrent accesses on the ethtool_stats structure during its copy.
+ */
+ mvpp2_gather_hw_statistics(&port->stats_work.work);
+
+ mutex_lock(&port->gather_stats_lock);
+ memcpy(data, port->ethtool_stats,
+ sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
+ mutex_unlock(&port->gather_stats_lock);
+}
+
+static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(mvpp2_ethtool_regs);
+
+ return -EOPNOTSUPP;
+}
+
static void mvpp2_port_reset(struct mvpp2_port *port)
{
u32 val;
+ unsigned int i;
+
+ /* Read the GOP statistics to reset the hardware counters */
+ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
+ mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
~MVPP2_GMAC_PORT_RESET_MASK;
@@ -5022,7 +5221,7 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
struct mvpp2_tx_queue *aggr_txq, int num)
{
- if ((aggr_txq->count + num) > aggr_txq->size) {
+ if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
/* Update number of occupied aggregated Tx descriptors */
int cpu = smp_processor_id();
u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
@@ -5030,7 +5229,7 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
}
- if ((aggr_txq->count + num) > aggr_txq->size)
+ if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
return -ENOMEM;
return 0;
@@ -5370,7 +5569,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
txq_pcpu->count -= tx_done;
if (netif_tx_queue_stopped(nq))
- if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
+ if (txq_pcpu->count <= txq_pcpu->wake_threshold)
netif_tx_wake_queue(nq);
}
@@ -5414,7 +5613,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
if (!aggr_txq->descs)
return -ENOMEM;
- aggr_txq->last_desc = aggr_txq->size - 1;
+ aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
/* Aggr TXQ no reset WA */
aggr_txq->next_desc_to_proc = mvpp2_read(priv,
@@ -5613,6 +5812,9 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->txq_put_index = 0;
txq_pcpu->txq_get_index = 0;
+ txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
+ txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
+
txq_pcpu->tso_headers =
dma_alloc_coherent(port->dev->dev.parent,
txq_pcpu->size * TSO_HEADER_SIZE,
@@ -6256,10 +6458,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
goto cleanup;
}
- mvpp2_txdesc_offset_set(port, tx_desc,
- buf_dma_addr & MVPP2_TX_DESC_ALIGN);
- mvpp2_txdesc_dma_addr_set(port, tx_desc,
- buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
@@ -6302,8 +6501,7 @@ static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
addr = txq_pcpu->tso_headers_dma +
txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
- mvpp2_txdesc_offset_set(port, tx_desc, addr & MVPP2_TX_DESC_ALIGN);
- mvpp2_txdesc_dma_addr_set(port, tx_desc, addr & ~MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
MVPP2_TXD_F_DESC |
@@ -6332,10 +6530,7 @@ static inline int mvpp2_tso_put_data(struct sk_buff *skb,
return -ENOMEM;
}
- mvpp2_txdesc_offset_set(port, tx_desc,
- buf_dma_addr & MVPP2_TX_DESC_ALIGN);
- mvpp2_txdesc_dma_addr_set(port, tx_desc,
- buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
if (!left) {
mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
@@ -6447,10 +6642,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- mvpp2_txdesc_offset_set(port, tx_desc,
- buf_dma_addr & MVPP2_TX_DESC_ALIGN);
- mvpp2_txdesc_dma_addr_set(port, tx_desc,
- buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
+ mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
tx_cmd = mvpp2_skb_tx_csum(port, skb);
@@ -6469,7 +6661,6 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
tx_desc_unmap_put(port, txq, tx_desc);
frags = 0;
- goto out;
}
}
@@ -6486,7 +6677,7 @@ out:
wmb();
mvpp2_aggr_txq_pend_desc_add(port, frags);
- if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1)
+ if (txq_pcpu->count >= txq_pcpu->stop_threshold)
netif_tx_stop_queue(nq);
u64_stats_update_begin(&stats->syncp);
@@ -6747,6 +6938,9 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+ irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+
err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
if (err)
goto err;
@@ -6776,10 +6970,44 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
struct mvpp2_queue_vector *qv = port->qvecs + i;
irq_set_affinity_hint(qv->irq, NULL);
+ irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
free_irq(qv->irq, qv);
}
}
+static void mvpp22_init_rss(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ int i;
+
+ /* Set the table width: replace the whole classifier Rx queue number
+ * with the ones configured in RSS table entries.
+ */
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
+ mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
+
+ /* Loop through the classifier Rx Queues and map them to a RSS table.
+ * Map them all to the first table (0) by default.
+ */
+ for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
+ mvpp2_write(priv, MVPP22_RSS_TABLE,
+ MVPP22_RSS_TABLE_POINTER(0));
+ }
+
+ /* Configure the first table to evenly distribute the packets across
+ * real Rx Queues. The table entries map a hash to an port Rx Queue.
+ */
+ for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
+ u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
+ MVPP22_RSS_INDEX_TABLE_ENTRY(i);
+ mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
+
+ mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
+ }
+
+}
+
static int mvpp2_open(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -6854,6 +7082,13 @@ static int mvpp2_open(struct net_device *dev)
mvpp2_start_dev(port);
+ if (priv->hw_version == MVPP22)
+ mvpp22_init_rss(port);
+
+ /* Start hardware statistics gathering */
+ queue_delayed_work(priv->stats_queue, &port->stats_work,
+ MVPP2_MIB_COUNTERS_STATS_DELAY);
+
return 0;
err_free_link_irq:
@@ -6898,6 +7133,8 @@ static int mvpp2_stop(struct net_device *dev)
mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port);
+ cancel_delayed_work_sync(&port->stats_work);
+
return 0;
}
@@ -7209,6 +7446,9 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_drvinfo = mvpp2_ethtool_get_drvinfo,
.get_ringparam = mvpp2_ethtool_get_ringparam,
.set_ringparam = mvpp2_ethtool_set_ringparam,
+ .get_strings = mvpp2_ethtool_get_strings,
+ .get_ethtool_stats = mvpp2_ethtool_get_stats,
+ .get_sset_count = mvpp2_ethtool_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
@@ -7612,6 +7852,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
err = PTR_ERR(port->base);
goto err_free_irq;
}
+
+ port->stats_base = port->priv->lms_base +
+ MVPP21_MIB_COUNTERS_OFFSET +
+ port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
} else {
if (of_property_read_u32(port_node, "gop-port-id",
&port->gop_id)) {
@@ -7621,15 +7865,29 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
+ port->stats_base = port->priv->iface_base +
+ MVPP22_MIB_COUNTERS_OFFSET +
+ port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
}
- /* Alloc per-cpu stats */
+ /* Alloc per-cpu and ethtool stats */
port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
if (!port->stats) {
err = -ENOMEM;
goto err_free_irq;
}
+ port->ethtool_stats = devm_kcalloc(&pdev->dev,
+ ARRAY_SIZE(mvpp2_ethtool_regs),
+ sizeof(u64), GFP_KERNEL);
+ if (!port->ethtool_stats) {
+ err = -ENOMEM;
+ goto err_free_stats;
+ }
+
+ mutex_init(&port->gather_stats_lock);
+ INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
+
mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from);
port->tx_ring_size = MVPP2_MAX_TXD;
@@ -7674,6 +7932,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->features = features | NETIF_F_RXCSUM;
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
dev->vlan_features |= features;
+ dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
/* MTU range: 68 - 9676 */
dev->min_mtu = ETH_MIN_MTU;
@@ -7765,9 +8024,42 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
for (port = 0; port < MVPP2_MAX_PORTS; port++) {
mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_DATA_SIZE);
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
+ }
+
+ mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
+ MVPP2_RX_FIFO_PORT_MIN_PKT);
+ mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
+{
+ int port;
+
+ /* The FIFO size parameters are set depending on the maximum speed a
+ * given port can handle:
+ * - Port 0: 10Gbps
+ * - Port 1: 2.5Gbps
+ * - Ports 2 and 3: 1Gbps
+ */
+
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
+
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
+
+ for (port = 2; port < MVPP2_MAX_PORTS; port++) {
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE);
+ MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
}
mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
@@ -7775,6 +8067,16 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
+/* Initialize Tx FIFO's */
+static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
+{
+ int port;
+
+ for (port = 0; port < MVPP2_MAX_PORTS; port++)
+ mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port),
+ MVPP22_TX_FIFO_DATA_SIZE_3KB);
+}
+
static void mvpp2_axi_init(struct mvpp2 *priv)
{
u32 val, rdval, wrval;
@@ -7870,8 +8172,13 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
return err;
}
- /* Rx Fifo Init */
- mvpp2_rx_fifo_init(priv);
+ /* Fifo Init */
+ if (priv->hw_version == MVPP21) {
+ mvpp2_rx_fifo_init(priv);
+ } else {
+ mvpp22_rx_fifo_init(priv);
+ mvpp22_tx_fifo_init(priv);
+ }
if (priv->hw_version == MVPP21)
writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
@@ -7903,7 +8210,7 @@ static int mvpp2_probe(struct platform_device *pdev)
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
- int port_count, i;
+ int i;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -8018,14 +8325,14 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_mg_clk;
}
- port_count = of_get_available_child_count(dn);
- if (port_count == 0) {
+ priv->port_count = of_get_available_child_count(dn);
+ if (priv->port_count == 0) {
dev_err(&pdev->dev, "no ports enabled\n");
err = -ENODEV;
goto err_mg_clk;
}
- priv->port_list = devm_kcalloc(&pdev->dev, port_count,
+ priv->port_list = devm_kcalloc(&pdev->dev, priv->port_count,
sizeof(*priv->port_list),
GFP_KERNEL);
if (!priv->port_list) {
@@ -8042,6 +8349,21 @@ static int mvpp2_probe(struct platform_device *pdev)
i++;
}
+ /* Statistics must be gathered regularly because some of them (like
+ * packets counters) are 32-bit registers and could overflow quite
+ * quickly. For instance, a 10Gb link used at full bandwidth with the
+ * smallest packets (64B) will overflow a 32-bit counter in less than
+ * 30 seconds. Then, use a workqueue to fill 64-bit counters.
+ */
+ snprintf(priv->queue_name, sizeof(priv->queue_name),
+ "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
+ priv->port_count > 1 ? "+" : "");
+ priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
+ if (!priv->stats_queue) {
+ err = -ENOMEM;
+ goto err_mg_clk;
+ }
+
platform_set_drvdata(pdev, priv);
return 0;
@@ -8063,9 +8385,14 @@ static int mvpp2_remove(struct platform_device *pdev)
struct device_node *port_node;
int i = 0;
+ flush_workqueue(priv->stats_queue);
+ destroy_workqueue(priv->stats_queue);
+
for_each_available_child_of_node(dn, port_node) {
- if (priv->port_list[i])
+ if (priv->port_list[i]) {
+ mutex_destroy(&priv->port_list[i]->gather_stats_lock);
mvpp2_port_remove(priv->port_list[i]);
+ }
i++;
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 993724959a7c..7bbd86f08e5f 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -362,9 +362,9 @@ static void rxq_refill(struct net_device *dev)
}
}
-static inline void rxq_refill_timer_wrapper(unsigned long data)
+static inline void rxq_refill_timer_wrapper(struct timer_list *t)
{
- struct pxa168_eth_private *pep = (void *)data;
+ struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
napi_schedule(&pep->napi);
}
@@ -1496,9 +1496,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
memset(&pep->timeout, 0, sizeof(struct timer_list));
- init_timer(&pep->timeout);
- pep->timeout.function = rxq_refill_timer_wrapper;
- pep->timeout.data = (unsigned long)pep;
+ timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
pep->smi_bus = mdiobus_alloc();
if (!pep->smi_bus) {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index eef35bf3e849..6e423f098a60 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -1495,9 +1495,9 @@ static int xm_check_link(struct net_device *dev)
* get an interrupt when carrier is detected, need to poll for
* link coming up.
*/
-static void xm_link_timer(unsigned long arg)
+static void xm_link_timer(struct timer_list *t)
{
- struct skge_port *skge = (struct skge_port *) arg;
+ struct skge_port *skge = from_timer(skge, t, link_timer);
struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
@@ -3897,7 +3897,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* Only used for Genesis XMAC */
if (is_genesis(hw))
- setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
+ timer_setup(&skge->link_timer, xm_link_timer, 0);
else {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/marvell/skge.h b/drivers/net/ethernet/marvell/skge.h
index 3ea151ff9c43..6fa7b6a34c08 100644
--- a/drivers/net/ethernet/marvell/skge.h
+++ b/drivers/net/ethernet/marvell/skge.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for the new Marvell Yukon / SysKonnect driver.
*/
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 1145cde2274a..9efe1771423c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2974,9 +2974,9 @@ static int sky2_rx_hung(struct net_device *dev)
}
}
-static void sky2_watchdog(unsigned long arg)
+static void sky2_watchdog(struct timer_list *t)
{
- struct sky2_hw *hw = (struct sky2_hw *) arg;
+ struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
/* Check for lost IRQ once a second */
if (sky2_read32(hw, B0_ISRC)) {
@@ -5083,7 +5083,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sky2_show_addr(dev1);
}
- setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
+ timer_setup(&hw->watchdog_timer, sky2_watchdog, 0);
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 0fe160796842..b02b6523083c 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for the new Marvell Yukon 2 driver.
*/
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 5e81a7263654..54adfd967858 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1817,7 +1817,7 @@ static int mtk_open(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
/* we run 2 netdevs on the same dma ring so we only bring it up once */
- if (!atomic_read(&eth->dma_refcnt)) {
+ if (!refcount_read(&eth->dma_refcnt)) {
int err = mtk_start_dma(eth);
if (err)
@@ -1827,8 +1827,10 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ refcount_set(&eth->dma_refcnt, 1);
}
- atomic_inc(&eth->dma_refcnt);
+ else
+ refcount_inc(&eth->dma_refcnt);
phy_start(dev->phydev);
netif_start_queue(dev);
@@ -1868,7 +1870,7 @@ static int mtk_stop(struct net_device *dev)
phy_stop(dev->phydev);
/* only shutdown DMA if this is the last user */
- if (!atomic_dec_and_test(&eth->dma_refcnt))
+ if (!refcount_dec_and_test(&eth->dma_refcnt))
return 0;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 3d3c24a28112..a3af4660de81 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -15,6 +15,8 @@
#ifndef MTK_ETH_H
#define MTK_ETH_H
+#include <linux/refcount.h>
+
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
#define MTK_TX_DMA_BUF_LEN 0x3fff
@@ -632,7 +634,7 @@ struct mtk_eth {
struct regmap *pctl;
u32 chip_id;
bool hwlro;
- atomic_t dma_refcnt;
+ refcount_t dma_refcnt;
struct mtk_tx_ring tx_ring;
struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
struct mtk_rx_ring rx_ring_qdma;
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 22b1cc012bc9..36054e6fb9d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -38,3 +38,11 @@ config MLX4_DEBUG
mlx4_core driver. The output can be turned on via the
debug_level module parameter (which can also be set after
the driver is loaded through sysfs).
+
+config MLX4_CORE_GEN2
+ bool "Support for old gen2 Mellanox PCI IDs" if (MLX4_CORE)
+ depends on MLX4_CORE
+ default y
+ ---help---
+ Say Y here if you want to use old gen2 Mellanox devices in the
+ driver.
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index c82217e0d22d..16b10d01fcf4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 53daa6ca5d83..e2b6b0cac1ac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -231,10 +231,10 @@ static void dump_err_buf(struct mlx4_dev *dev)
i, swab32(readl(priv->catas_err.map + i)));
}
-static void poll_catas(unsigned long dev_ptr)
+static void poll_catas(struct timer_list *t)
{
- struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
- struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
+ struct mlx4_dev *dev = &priv->dev;
u32 slave_read;
if (mlx4_is_slave(dev)) {
@@ -277,7 +277,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
phys_addr_t addr;
INIT_LIST_HEAD(&priv->catas_err.list);
- init_timer(&priv->catas_err.timer);
+ timer_setup(&priv->catas_err.timer, poll_catas, 0);
priv->catas_err.map = NULL;
if (!mlx4_is_slave(dev)) {
@@ -293,8 +293,6 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
}
}
- priv->catas_err.timer.data = (unsigned long) dev;
- priv->catas_err.timer.function = poll_catas;
priv->catas_err.timer.expires =
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
add_timer(&priv->catas_err.timer);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 72eb50cd5ecd..d8e9a323122e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -69,7 +69,7 @@ void mlx4_cq_tasklet_cb(unsigned long data)
list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
- if (atomic_dec_and_test(&mcq->refcount))
+ if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free);
if (time_after(jiffies, end))
break;
@@ -92,7 +92,7 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
kick = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
if (kick)
@@ -344,7 +344,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq->cons_index = 0;
cq->arm_sn = 1;
cq->uar = uar;
- atomic_set(&cq->refcount, 1);
+ refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
@@ -386,7 +386,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3d4e4a5d00d1..bf1f04164885 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1742,13 +1742,18 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
+static int mlx4_en_get_max_num_rx_rings(struct net_device *dev)
+{
+ return min_t(int, num_online_cpus(), MAX_RX_RINGS);
+}
+
static void mlx4_en_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- channel->max_rx = MAX_RX_RINGS;
- channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
+ channel->max_rx = mlx4_en_get_max_num_rx_rings(dev);
+ channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up;
channel->rx_count = priv->rx_ring_num;
channel->tx_count = priv->tx_ring_num[TX] /
@@ -1777,7 +1782,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
if (channel->tx_count * priv->prof->num_up + xdp_count >
- MAX_TX_RINGS) {
+ priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 686e18de9a97..2c2965497ed3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -153,7 +153,7 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
int i;
params->udp_rss = udp_rss;
- params->num_tx_rings_p_up = mlx4_low_memory_profile() ?
+ params->max_num_tx_rings_p_up = mlx4_low_memory_profile() ?
MLX4_EN_MIN_TX_RING_P_UP :
min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP);
@@ -170,8 +170,8 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
- params->prof[i].num_tx_rings_p_up = params->num_tx_rings_p_up;
- params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up *
+ params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up;
+ params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up *
params->prof[i].num_up;
params->prof[i].rss_rings = 0;
params->prof[i].inline_thold = inline_thold;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 9c218f1cfc6c..99051a294fa6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -135,7 +135,7 @@ static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
{
struct tc_mqprio_qopt *mqprio = type_data;
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
@@ -1752,6 +1752,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_arm_cq(priv, cq);
} else {
+ mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
mlx4_en_init_recycle_ring(priv, i);
/* XDP TX CQ should never be armed */
}
@@ -2915,7 +2916,7 @@ static u32 mlx4_xdp_query(struct net_device *dev)
return prog_id;
}
-static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -2957,7 +2958,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
- .ndo_xdp = mlx4_xdp,
+ .ndo_bpf = mlx4_xdp,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2994,7 +2995,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
- .ndo_xdp = mlx4_xdp,
+ .ndo_bpf = mlx4_xdp,
};
struct mlx4_en_bond {
@@ -3305,7 +3306,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
- priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
+ priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 5a47f9669621..6883ac75d37f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -53,7 +53,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
if (is_tx) {
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
- context->params2 |= MLX4_QP_BIT_FPP;
+ context->params2 |= cpu_to_be32(MLX4_QP_BIT_FPP);
} else {
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b97a55c827eb..85e28efcda33 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -193,7 +193,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
- GFP_KERNEL | __GFP_COLD)) {
+ GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
@@ -254,8 +254,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
- min_t(int, num_of_eqs,
- netif_get_num_default_rss_queues());
+ min_t(int, num_of_eqs, num_online_cpus());
mdev->profile.prof[i].rx_ring_num =
rounddown_pow_of_two(num_rx_rings);
}
@@ -552,8 +551,7 @@ static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
do {
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->prod & ring->size_mask,
- GFP_ATOMIC | __GFP_COLD |
- __GFP_MEMALLOC))
+ GFP_ATOMIC | __GFP_MEMALLOC))
break;
ring->prod++;
} while (likely(--missing));
@@ -762,6 +760,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
xdp.data_hard_start = va - frags[0].page_offset;
xdp.data = va;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + length;
orig_data = xdp.data;
@@ -778,7 +777,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_PASS:
break;
case XDP_TX:
- if (likely(!mlx4_en_xmit_frame(ring, frags, dev,
+ if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
length, cq_ring,
&doorbell_pending))) {
frags[0].page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8a32a8f7f9c0..6b6853773848 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
index = cons_index & size_mask;
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
- last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
- ring_cons = ACCESS_ONCE(ring->cons);
+ last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
+ ring_cons = READ_ONCE(ring->cons);
ring_index = ring_cons & size_mask;
stamp_index = ring_index;
@@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
wmb();
/* we want to dirty this cache line once */
- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
+ WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
+ WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP)
return done < budget;
@@ -718,7 +718,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
#else
iowrite32be(
#endif
- ring->doorbell_qpn,
+ (__force u32)ring->doorbell_qpn,
ring->bf.uar->map + MLX4_SEND_DOORBELL);
}
@@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
/* fetch ring->cons far ahead before needing it to avoid stall */
- ring_cons = ACCESS_ONCE(ring->cons);
+ ring_cons = READ_ONCE(ring->cons);
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
@@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_rmb();
- ring_cons = ACCESS_ONCE(ring->cons);
+ ring_cons = READ_ONCE(ring->cons);
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
@@ -1085,13 +1085,35 @@ tx_drop:
#define MLX4_EN_XDP_TX_REAL_SZ (((CTRL_SIZE + MLX4_EN_XDP_TX_NRTXBB * DS_SIZE) \
/ 16) & 0x3f)
+void mlx4_en_init_tx_xdp_ring_descs(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < ring->size; i++) {
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[i];
+ struct mlx4_en_tx_desc *tx_desc = ring->buf +
+ (i << LOG_TXBB_SIZE);
+
+ tx_info->map0_byte_count = PAGE_SIZE;
+ tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
+ tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
+ tx_info->ts_requested = 0;
+ tx_info->nr_maps = 1;
+ tx_info->linear = 1;
+ tx_info->inl = 0;
+
+ tx_desc->data.lkey = ring->mr_key;
+ tx_desc->ctrl.qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
+ }
+}
+
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
- struct net_device *dev, unsigned int length,
+ struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending)
{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- union mlx4_wqe_qpn_vlan qpn_vlan = {};
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_en_tx_info *tx_info;
struct mlx4_wqe_data_seg *data;
@@ -1123,25 +1145,16 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
tx_info->page = frame->page;
frame->page = NULL;
tx_info->map0_dma = dma;
- tx_info->map0_byte_count = PAGE_SIZE;
- tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
- tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
- tx_info->ts_requested = 0;
- tx_info->nr_maps = 1;
- tx_info->linear = 1;
- tx_info->inl = 0;
dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset,
length, PCI_DMA_TODEVICE);
data->addr = cpu_to_be64(dma + frame->page_offset);
- data->lkey = ring->mr_key;
dma_wmb();
data->byte_count = cpu_to_be32(length);
/* tx completion can avoid cache line miss for common cases */
- tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
@@ -1152,10 +1165,13 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
ring->prod += MLX4_EN_XDP_TX_NRTXBB;
- qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
+ /* Ensure new descriptor hits memory
+ * before setting ownership of this descriptor to HW
+ */
+ dma_wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+ ring->xmit_more++;
- mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, 0,
- op_own, false, false);
*doorbell_pending = true;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 16c09949afd5..634f603f941c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -57,12 +57,12 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
- u64 val; \
- switch (sizeof(dest)) { \
+ __be64 val; \
+ switch (sizeof(dest)) { \
case 1: (dest) = *(u8 *) __p; break; \
case 2: (dest) = be16_to_cpup(__p); break; \
case 4: (dest) = be32_to_cpup(__p); break; \
- case 8: val = get_unaligned((u64 *)__p); \
+ case 8: val = get_unaligned((__be64 *)__p); \
(dest) = be64_to_cpu(val); break; \
default: __buggy_use_of_MLX4_GET(); \
} \
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e61c99ef741d..4d84cab77105 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4066,6 +4066,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
#define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
static const struct pci_device_id mlx4_pci_table[] = {
+#ifdef CONFIG_MLX4_CORE_GEN2
/* MT25408 "Hermon" */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */
@@ -4085,6 +4086,7 @@ static const struct pci_device_id mlx4_pci_table[] = {
MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
/* MT25400 Family [ConnectX-2] */
MLX_VF(0x1002), /* Virtual Function */
+#endif /* CONFIG_MLX4_CORE_GEN2 */
/* MT27500 Family [ConnectX-3] */
MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
MLX_VF(0x1004), /* Virtual Function */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index fdb3ad0cbe54..1856e279a7e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -399,7 +399,7 @@ struct mlx4_en_profile {
u32 active_ports;
u32 small_pkt_int;
u8 no_reset;
- u8 num_tx_rings_p_up;
+ u8 max_num_tx_rings_p_up;
struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
};
@@ -693,7 +693,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
- struct net_device *dev, unsigned int length,
+ struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
@@ -705,6 +705,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
int node, int queue_index);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring);
+void mlx4_en_init_tx_xdp_ring_descs(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring);
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq, int user_prio);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 926f3c3f3665..aab28eb27a30 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MLX4_STATS_
#define _MLX4_STATS_
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 728a2fb1f5c0..769598f7b6c8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -55,7 +55,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
qp = __mlx4_qp_lookup(dev, qpn);
if (qp)
- atomic_inc(&qp->refcount);
+ refcount_inc(&qp->refcount);
spin_unlock(&qp_table->lock);
@@ -66,7 +66,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
qp->event(qp, event_type);
- if (atomic_dec_and_test(&qp->refcount))
+ if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
}
@@ -420,7 +420,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
if (err)
goto err_icm;
- atomic_set(&qp->refcount, 1);
+ refcount_set(&qp->refcount, 1);
init_completion(&qp->free);
return 0;
@@ -520,7 +520,7 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
- if (atomic_dec_and_test(&qp->refcount))
+ if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
@@ -925,7 +925,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
context->flags &= cpu_to_be32(~(0xf << 28));
context->flags |= cpu_to_be32(states[i + 1] << 28);
if (states[i + 1] != MLX4_QP_STATE_RTR)
- context->params2 &= ~MLX4_QP_BIT_FPP;
+ context->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index fabb53379727..04304dd894c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3185,7 +3185,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
optpar = be32_to_cpu(*(__be32 *) inbox->buf);
if (slave != mlx4_master_func_num(dev)) {
- qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
+ qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
/* setting QP rate-limit is disallowed for VFs */
if (qp_ctx->rate_limit_params)
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index bedf52126824..cbe4d9746ddf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -49,7 +49,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
rcu_read_unlock();
if (srq)
- atomic_inc(&srq->refcount);
+ refcount_inc(&srq->refcount);
else {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return;
@@ -57,7 +57,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
srq->event(srq, event_type);
- if (atomic_dec_and_test(&srq->refcount))
+ if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
}
@@ -203,7 +203,7 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
if (err)
goto err_radix;
- atomic_set(&srq->refcount, 1);
+ refcount_set(&srq->refcount, 1);
init_completion(&srq->free);
return 0;
@@ -232,7 +232,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
- if (atomic_dec_and_test(&srq->refcount))
+ if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index fdaef00465d7..25deaa5a534c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -6,6 +6,7 @@ config MLX5_CORE
tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
depends on MAY_USE_DEVLINK
depends on PCI
+ imply PTP_1588_CLOCK
default n
---help---
Core driver for low level functionality of the ConnectX-4 and
@@ -29,7 +30,6 @@ config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
depends on IPV6=y || IPV6=n || MLX5_CORE=m
- imply PTP_1588_CLOCK
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 87a3099808f3..19b21b40ab07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,10 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
subdir-ccflags-y += -I$(src)
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
+ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
diag/fs_tracepoint.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
@@ -13,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
+ en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
@@ -22,7 +23,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
-mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 336d4738b807..1016e05c7ec7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
- if (atomic_dec_and_test(&mcq->refcount))
+ if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free);
if (time_after(jiffies, end))
break;
@@ -80,7 +80,7 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
@@ -94,7 +94,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
spin_unlock(&table->lock);
if (!cq) {
@@ -106,7 +106,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
cq->comp(cq);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
}
@@ -119,7 +119,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
cq = radix_tree_lookup(&table->tree, cqn);
if (cq)
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
spin_unlock(&table->lock);
@@ -130,7 +130,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
cq->event(cq, event_type);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
}
@@ -159,7 +159,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
- atomic_set(&cq->refcount, 1);
+ refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
cq->comp = mlx5_add_cq_to_tasklet;
@@ -222,7 +222,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index fc281712869b..17b723218b0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -93,7 +93,7 @@ static void delayed_event_release(struct mlx5_device_context *dev_ctx,
list_splice_init(&priv->waiting_events_list, &temp);
if (!dev_ctx->context)
goto out;
- list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
+ list_for_each_entry_safe(de, n, &temp, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index cc13d3dbd366..c0872b3284cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -57,6 +57,7 @@
#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu))
#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu))
+#define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
@@ -67,7 +68,7 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
@@ -105,6 +106,7 @@
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
@@ -126,6 +128,16 @@
#define MLX5E_NUM_MAIN_GROUPS 9
+#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
+
+#define mlx5e_dbg(mlevel, priv, format, ...) \
+do { \
+ if (NETIF_MSG_##mlevel & (priv)->msglevel) \
+ netdev_warn(priv->netdev, format, \
+ ##__VA_ARGS__); \
+} while (0)
+
+
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{
switch (wq_type) {
@@ -187,12 +199,14 @@ extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"rx_cqe_moder",
+ "tx_cqe_moder",
"rx_cqe_compress",
};
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
- MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
+ MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
+ MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
@@ -212,6 +226,7 @@ enum mlx5e_priv_flag {
struct mlx5e_cq_moder {
u16 usec;
u16 pkts;
+ u8 cq_period_mode;
};
struct mlx5e_params {
@@ -223,7 +238,6 @@ struct mlx5e_params {
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
- u8 rx_cq_period_mode;
bool rx_cqe_compress_def;
struct mlx5e_cq_moder rx_cq_moderation;
struct mlx5e_cq_moder tx_cq_moderation;
@@ -260,34 +274,18 @@ enum {
struct mlx5e_dcbx {
enum mlx5_dcbx_oper_mode mode;
struct mlx5e_cee_config cee_cfg; /* pending configuration */
+ u8 dscp_app_cnt;
/* The only setting that cannot be read from FW */
u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
u8 cap;
};
-#endif
-#define MAX_PIN_NUM 8
-struct mlx5e_pps {
- u8 pin_caps[MAX_PIN_NUM];
- struct work_struct out_work;
- u64 start[MAX_PIN_NUM];
- u8 enabled;
-};
-
-struct mlx5e_tstamp {
- rwlock_t lock;
- struct cyclecounter cycles;
- struct timecounter clock;
- struct hwtstamp_config hwtstamp_config;
- u32 nominal_c_mult;
- unsigned long overflow_period;
- struct delayed_work overflow_work;
- struct mlx5_core_dev *mdev;
- struct ptp_clock *ptp;
- struct ptp_clock_info ptp_info;
- struct mlx5e_pps pps_info;
+struct mlx5e_dcbx_dp {
+ u8 dscp2prio[MLX5E_MAX_DSCP];
+ u8 trust_state;
};
+#endif
enum {
MLX5E_RQ_STATE_ENABLED,
@@ -375,9 +373,10 @@ struct mlx5e_txqsq {
u8 min_inline_mode;
u16 edge;
struct device *pdev;
- struct mlx5e_tstamp *tstamp;
__be32 mkey_be;
unsigned long state;
+ struct hwtstamp_config *tstamp;
+ struct mlx5_clock *clock;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -543,10 +542,11 @@ struct mlx5e_rq {
struct mlx5e_channel *channel;
struct device *pdev;
struct net_device *netdev;
- struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
struct mlx5e_page_cache page_cache;
+ struct hwtstamp_config *tstamp;
+ struct mlx5_clock *clock;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_post_rx_wqes post_wqes;
@@ -588,7 +588,7 @@ struct mlx5e_channel {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
- struct mlx5e_tstamp *tstamp;
+ struct hwtstamp_config *tstamp;
int ix;
};
@@ -655,12 +655,14 @@ struct mlx5e_tc_table {
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
+ DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
+ DECLARE_BITMAP(active_svlans, VLAN_N_VID);
+ struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
- bool filter_disabled;
+ bool cvlan_filter_disabled;
};
struct mlx5e_l2_table {
@@ -762,8 +764,12 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ struct mlx5e_dcbx_dp dcbx_dp;
+#endif
/* priv data path fields - end */
+ u32 msglevel;
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;
@@ -789,7 +795,7 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
- struct mlx5e_tstamp tstamp;
+ struct hwtstamp_config tstamp;
u16 q_counter;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
@@ -820,6 +826,8 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
+ void (*netdev_registered_init)(struct mlx5e_priv *priv);
+ void (*netdev_registered_remove)(struct mlx5e_priv *priv);
int max_tc;
};
@@ -873,12 +881,6 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
-void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
- struct skb_shared_hwtstamps *hwts);
-void mlx5e_timestamp_init(struct mlx5e_priv *priv);
-void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
-void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
- struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
@@ -887,8 +889,9 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
-void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_timestamp_set(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
bool is_rss;
@@ -928,6 +931,8 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
+ u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
@@ -993,6 +998,8 @@ extern const struct ethtool_ops mlx5e_ethtool_ops;
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#endif
#ifndef CONFIG_RFS_ACCEL
@@ -1045,6 +1052,9 @@ void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv);
+
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
@@ -1081,6 +1091,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
/* mlx5e generic netdev management API */
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
@@ -1091,5 +1104,5 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
-
+u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 4614ddfa91eb..6a7c8b04447e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -256,7 +256,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
mdata = mlx5e_ipsec_add_metadata(skb);
- if (unlikely(IS_ERR(mdata))) {
+ if (IS_ERR(mdata)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 12d3ced61114..610d485c4b03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -92,7 +92,7 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
static int arfs_disable(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5e_tir *tir = priv->indir_tir;
int err = 0;
int tt;
@@ -126,7 +126,7 @@ int mlx5e_arfs_disable(struct mlx5e_priv *priv)
int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
int err = 0;
int tt;
int i;
@@ -175,7 +175,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5e_tir *tir = priv->indir_tir;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
enum mlx5e_traffic_types tt;
@@ -466,7 +466,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec;
@@ -557,7 +557,7 @@ out:
static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule, u16 rxq)
{
- struct mlx5_flow_destination dst;
+ struct mlx5_flow_destination dst = {};
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
deleted file mode 100644
index 84dd63e74041..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/clocksource.h>
-#include "en.h"
-
-enum {
- MLX5E_CYCLES_SHIFT = 23
-};
-
-enum {
- MLX5E_PIN_MODE_IN = 0x0,
- MLX5E_PIN_MODE_OUT = 0x1,
-};
-
-enum {
- MLX5E_OUT_PATTERN_PULSE = 0x0,
- MLX5E_OUT_PATTERN_PERIODIC = 0x1,
-};
-
-enum {
- MLX5E_EVENT_MODE_DISABLE = 0x0,
- MLX5E_EVENT_MODE_REPETETIVE = 0x1,
- MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
-};
-
-enum {
- MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
- MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
- MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
- MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
- MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
- MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
-};
-
-void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
- struct skb_shared_hwtstamps *hwts)
-{
- u64 nsec;
-
- read_lock(&tstamp->lock);
- nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
- read_unlock(&tstamp->lock);
-
- hwts->hwtstamp = ns_to_ktime(nsec);
-}
-
-static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
-{
- struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
- cycles);
-
- return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
-}
-
-static void mlx5e_pps_out(struct work_struct *work)
-{
- struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
- out_work);
- struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
- pps_info);
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- unsigned long flags;
- int i;
-
- for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
- u64 tstart;
-
- write_lock_irqsave(&tstamp->lock, flags);
- tstart = tstamp->pps_info.start[i];
- tstamp->pps_info.start[i] = 0;
- write_unlock_irqrestore(&tstamp->lock, flags);
- if (!tstart)
- continue;
-
- MLX5_SET(mtpps_reg, in, pin, i);
- MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
- MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
- mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
- }
-}
-
-static void mlx5e_timestamp_overflow(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
- overflow_work);
- struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_read(&tstamp->clock);
- write_unlock_irqrestore(&tstamp->lock, flags);
- queue_delayed_work(priv->wq, &tstamp->overflow_work,
- msecs_to_jiffies(tstamp->overflow_period * 1000));
-}
-
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
-{
- struct hwtstamp_config config;
- int err;
-
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
- return -EOPNOTSUPP;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* TX HW timestamp */
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- mutex_lock(&priv->state_lock);
- /* RX HW timestamp */
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- /* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- /* Disable CQE compression */
- netdev_warn(priv->netdev, "Disabling cqe compression");
- err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
- if (err) {
- netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
- mutex_unlock(&priv->state_lock);
- return err;
- }
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- mutex_unlock(&priv->state_lock);
- return -ERANGE;
- }
-
- memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
- mutex_unlock(&priv->state_lock);
-
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
-}
-
-int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
-{
- struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
-
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
- return -EOPNOTSUPP;
-
- return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
-}
-
-static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
- u64 ns = timespec64_to_ns(ts);
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- return 0;
-}
-
-static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
-{
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
- u64 ns;
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- ns = timecounter_read(&tstamp->clock);
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- *ts = ns_to_timespec64(ns);
-
- return 0;
-}
-
-static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_adjtime(&tstamp->clock, delta);
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- return 0;
-}
-
-static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
-{
- u64 adj;
- u32 diff;
- unsigned long flags;
- int neg_adj = 0;
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
-
- if (delta < 0) {
- neg_adj = 1;
- delta = -delta;
- }
-
- adj = tstamp->nominal_c_mult;
- adj *= delta;
- diff = div_u64(adj, 1000000000ULL);
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_read(&tstamp->clock);
- tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
- tstamp->nominal_c_mult + diff;
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- return 0;
-}
-
-static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- struct mlx5e_tstamp *tstamp =
- container_of(ptp, struct mlx5e_tstamp, ptp_info);
- struct mlx5e_priv *priv =
- container_of(tstamp, struct mlx5e_priv, tstamp);
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u32 field_select = 0;
- u8 pin_mode = 0;
- u8 pattern = 0;
- int pin = -1;
- int err = 0;
-
- if (!MLX5_PPS_CAP(priv->mdev))
- return -EOPNOTSUPP;
-
- if (rq->extts.index >= tstamp->ptp_info.n_pins)
- return -EINVAL;
-
- if (on) {
- pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
- if (pin < 0)
- return -EBUSY;
- pin_mode = MLX5E_PIN_MODE_IN;
- pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
- field_select = MLX5E_MTPPS_FS_PIN_MODE |
- MLX5E_MTPPS_FS_PATTERN |
- MLX5E_MTPPS_FS_ENABLE;
- } else {
- pin = rq->extts.index;
- field_select = MLX5E_MTPPS_FS_ENABLE;
- }
-
- MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
- MLX5_SET(mtpps_reg, in, pattern, pattern);
- MLX5_SET(mtpps_reg, in, enable, on);
- MLX5_SET(mtpps_reg, in, field_select, field_select);
-
- err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
- if (err)
- return err;
-
- return mlx5_set_mtppse(priv->mdev, pin, 0,
- MLX5E_EVENT_MODE_REPETETIVE & on);
-}
-
-static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- struct mlx5e_tstamp *tstamp =
- container_of(ptp, struct mlx5e_tstamp, ptp_info);
- struct mlx5e_priv *priv =
- container_of(tstamp, struct mlx5e_priv, tstamp);
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u64 nsec_now, nsec_delta, time_stamp = 0;
- u64 cycles_now, cycles_delta;
- struct timespec64 ts;
- unsigned long flags;
- u32 field_select = 0;
- u8 pin_mode = 0;
- u8 pattern = 0;
- int pin = -1;
- int err = 0;
- s64 ns;
-
- if (!MLX5_PPS_CAP(priv->mdev))
- return -EOPNOTSUPP;
-
- if (rq->perout.index >= tstamp->ptp_info.n_pins)
- return -EINVAL;
-
- if (on) {
- pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT,
- rq->perout.index);
- if (pin < 0)
- return -EBUSY;
-
- pin_mode = MLX5E_PIN_MODE_OUT;
- pattern = MLX5E_OUT_PATTERN_PERIODIC;
- ts.tv_sec = rq->perout.period.sec;
- ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec64_to_ns(&ts);
-
- if ((ns >> 1) != 500000000LL)
- return -EINVAL;
-
- ts.tv_sec = rq->perout.start.sec;
- ts.tv_nsec = rq->perout.start.nsec;
- ns = timespec64_to_ns(&ts);
- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
- write_lock_irqsave(&tstamp->lock, flags);
- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
- tstamp->cycles.mult);
- write_unlock_irqrestore(&tstamp->lock, flags);
- time_stamp = cycles_now + cycles_delta;
- field_select = MLX5E_MTPPS_FS_PIN_MODE |
- MLX5E_MTPPS_FS_PATTERN |
- MLX5E_MTPPS_FS_ENABLE |
- MLX5E_MTPPS_FS_TIME_STAMP;
- } else {
- pin = rq->perout.index;
- field_select = MLX5E_MTPPS_FS_ENABLE;
- }
-
- MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
- MLX5_SET(mtpps_reg, in, pattern, pattern);
- MLX5_SET(mtpps_reg, in, enable, on);
- MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
- MLX5_SET(mtpps_reg, in, field_select, field_select);
-
- err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
- if (err)
- return err;
-
- return mlx5_set_mtppse(priv->mdev, pin, 0,
- MLX5E_EVENT_MODE_REPETETIVE & on);
-}
-
-static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- struct mlx5e_tstamp *tstamp =
- container_of(ptp, struct mlx5e_tstamp, ptp_info);
-
- tstamp->pps_info.enabled = !!on;
- return 0;
-}
-
-static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- switch (rq->type) {
- case PTP_CLK_REQ_EXTTS:
- return mlx5e_extts_configure(ptp, rq, on);
- case PTP_CLK_REQ_PEROUT:
- return mlx5e_perout_configure(ptp, rq, on);
- case PTP_CLK_REQ_PPS:
- return mlx5e_pps_configure(ptp, rq, on);
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
- enum ptp_pin_function func, unsigned int chan)
-{
- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
-}
-
-static const struct ptp_clock_info mlx5e_ptp_clock_info = {
- .owner = THIS_MODULE,
- .max_adj = 100000000,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = 0,
- .n_pins = 0,
- .pps = 0,
- .adjfreq = mlx5e_ptp_adjfreq,
- .adjtime = mlx5e_ptp_adjtime,
- .gettime64 = mlx5e_ptp_gettime,
- .settime64 = mlx5e_ptp_settime,
- .enable = NULL,
- .verify = NULL,
-};
-
-static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
-{
- tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
- tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
-}
-
-static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
-{
- int i;
-
- tstamp->ptp_info.pin_config =
- kzalloc(sizeof(*tstamp->ptp_info.pin_config) *
- tstamp->ptp_info.n_pins, GFP_KERNEL);
- if (!tstamp->ptp_info.pin_config)
- return -ENOMEM;
- tstamp->ptp_info.enable = mlx5e_ptp_enable;
- tstamp->ptp_info.verify = mlx5e_ptp_verify;
- tstamp->ptp_info.pps = 1;
-
- for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
- snprintf(tstamp->ptp_info.pin_config[i].name,
- sizeof(tstamp->ptp_info.pin_config[i].name),
- "mlx5_pps%d", i);
- tstamp->ptp_info.pin_config[i].index = i;
- tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE;
- tstamp->ptp_info.pin_config[i].chan = i;
- }
-
- return 0;
-}
-
-static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
- struct mlx5e_tstamp *tstamp)
-{
- u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
-
- mlx5_query_mtpps(priv->mdev, out, sizeof(out));
-
- tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
- cap_number_of_pps_pins);
- tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
- cap_max_num_of_pps_in_pins);
- tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
- cap_max_num_of_pps_out_pins);
-
- tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
- tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
- tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
- tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
- tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
- tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
- tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
- tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
-}
-
-void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
- struct ptp_clock_event *event)
-{
- struct net_device *netdev = priv->netdev;
- struct mlx5e_tstamp *tstamp = &priv->tstamp;
- struct timespec64 ts;
- u64 nsec_now, nsec_delta;
- u64 cycles_now, cycles_delta;
- int pin = event->index;
- s64 ns;
- unsigned long flags;
-
- switch (tstamp->ptp_info.pin_config[pin].func) {
- case PTP_PF_EXTTS:
- if (tstamp->pps_info.enabled) {
- event->type = PTP_CLOCK_PPSUSR;
- event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
- } else {
- event->type = PTP_CLOCK_EXTTS;
- }
- ptp_clock_event(tstamp->ptp, event);
- break;
- case PTP_PF_PEROUT:
- mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
- ts.tv_sec += 1;
- ts.tv_nsec = 0;
- ns = timespec64_to_ns(&ts);
- write_lock_irqsave(&tstamp->lock, flags);
- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
- tstamp->cycles.mult);
- tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
- queue_work(priv->wq, &tstamp->pps_info.out_work);
- write_unlock_irqrestore(&tstamp->lock, flags);
- break;
- default:
- netdev_err(netdev, "%s: Unhandled event\n", __func__);
- }
-}
-
-void mlx5e_timestamp_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_tstamp *tstamp = &priv->tstamp;
- u64 ns;
- u64 frac = 0;
- u32 dev_freq;
-
- mlx5e_timestamp_init_config(tstamp);
- dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz);
- if (!dev_freq) {
- mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
- rwlock_init(&tstamp->lock);
- tstamp->cycles.read = mlx5e_read_internal_timer;
- tstamp->cycles.shift = MLX5E_CYCLES_SHIFT;
- tstamp->cycles.mult = clocksource_khz2mult(dev_freq,
- tstamp->cycles.shift);
- tstamp->nominal_c_mult = tstamp->cycles.mult;
- tstamp->cycles.mask = CLOCKSOURCE_MASK(41);
- tstamp->mdev = priv->mdev;
-
- timecounter_init(&tstamp->clock, &tstamp->cycles,
- ktime_to_ns(ktime_get_real()));
-
- /* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
- */
- ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
- frac, &frac);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
- tstamp->overflow_period = ns;
-
- INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
- INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
- if (tstamp->overflow_period)
- queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
- else
- mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
-
- /* Configure the PHC */
- tstamp->ptp_info = mlx5e_ptp_clock_info;
- snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
-
- /* Initialize 1PPS data structures */
- if (MLX5_PPS_CAP(priv->mdev))
- mlx5e_get_pps_caps(priv, tstamp);
- if (tstamp->ptp_info.n_pins)
- mlx5e_init_pin_config(tstamp);
-
- tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
- &priv->mdev->pdev->dev);
- if (IS_ERR(tstamp->ptp)) {
- mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
- PTR_ERR(tstamp->ptp));
- tstamp->ptp = NULL;
- }
-}
-
-void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_tstamp *tstamp = &priv->tstamp;
-
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
- return;
-
- if (priv->tstamp.ptp) {
- ptp_clock_unregister(priv->tstamp.ptp);
- priv->tstamp.ptp = NULL;
- }
-
- cancel_work_sync(&tstamp->pps_info.out_work);
- cancel_delayed_work_sync(&tstamp->overflow_work);
- kfree(tstamp->ptp_info.pin_config);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index ece3fb147e3e..784e282803db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -134,6 +134,7 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_core_destroy_mkey(mdev, &res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
+ memset(res, 0, sizeof(*res));
}
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
@@ -170,3 +171,15 @@ out:
return err;
}
+
+u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
+{
+ u8 min_inline_mode;
+
+ mlx5_query_min_inline(mdev, &min_inline_mode);
+ if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
+ !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+ min_inline_mode = MLX5_INLINE_MODE_L2;
+
+ return min_inline_mode;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 51c4cc00a186..c6d90b6dd80e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -46,6 +46,13 @@ enum {
MLX5E_LOWEST_PRIO_GROUP = 0,
};
+#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
+ MLX5_CAP_QCAM_REG(mdev, qpts) && \
+ MLX5_CAP_QCAM_REG(mdev, qpdpm))
+
+static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
+static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
+
/* If dcbx mode is non-host set the dcbx mode to host.
*/
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@@ -234,7 +241,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
int max_tc = mlx5_max_tc(mdev);
- int err;
+ int err, i;
mlx5e_build_tc_group(ets, tc_group, max_tc);
mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
@@ -253,6 +260,14 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
+ __func__, i, ets->prio_tc[i]);
+ mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
+ __func__, i, tc_tx_bw[i], tc_group[i]);
+ }
+
return err;
}
@@ -338,6 +353,11 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
mlx5_toggle_port_link(mdev);
+ if (!ret) {
+ mlx5e_dbg(HW, priv,
+ "%s: PFC per priority bit mask: 0x%x\n",
+ __func__, pfc->pfc_en);
+ }
return ret;
}
@@ -381,6 +401,113 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 0;
}
+static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct dcb_app temp;
+ bool is_new;
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return -EINVAL;
+
+ if (!MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EINVAL;
+
+ if (app->protocol >= MLX5E_MAX_DSCP)
+ return -EINVAL;
+
+ /* Save the old entry info */
+ temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ temp.protocol = app->protocol;
+ temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
+
+ /* Check if need to switch to dscp trust state */
+ if (!priv->dcbx.dscp_app_cnt) {
+ err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
+ if (err)
+ return err;
+ }
+
+ /* Skip the fw command if new and old mapping are the same */
+ if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
+ err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
+ if (err)
+ goto fw_err;
+ }
+
+ /* Delete the old entry if exists */
+ is_new = false;
+ err = dcb_ieee_delapp(dev, &temp);
+ if (err)
+ is_new = true;
+
+ /* Add new entry and update counter */
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ if (is_new)
+ priv->dcbx.dscp_app_cnt++;
+
+ return err;
+
+fw_err:
+ mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
+ return err;
+}
+
+static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return -EINVAL;
+
+ if (!MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EINVAL;
+
+ if (app->protocol >= MLX5E_MAX_DSCP)
+ return -EINVAL;
+
+ /* Skip if no dscp app entry */
+ if (!priv->dcbx.dscp_app_cnt)
+ return -ENOENT;
+
+ /* Check if the entry matches fw setting */
+ if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
+ return -ENOENT;
+
+ /* Delete the app entry */
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ /* Reset the priority mapping back to zero */
+ err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
+ if (err)
+ goto fw_err;
+
+ priv->dcbx.dscp_app_cnt--;
+
+ /* Check if need to switch to pcp trust state */
+ if (!priv->dcbx.dscp_app_cnt)
+ err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
+
+ return err;
+
+fw_err:
+ mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
+ return err;
+}
+
static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
struct ieee_maxrate *maxrate)
{
@@ -446,6 +573,11 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
}
}
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
+ __func__, i, max_bw_value[i]);
+ }
+
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
}
@@ -471,6 +603,10 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
+ mlx5e_dbg(HW, priv,
+ "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
+ __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
+ ets.prio_tc[i]);
}
err = mlx5e_dbcnl_validate_ets(netdev, &ets);
@@ -740,6 +876,8 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
+ .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
+ .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
.getdcbx = mlx5e_dcbnl_getdcbx,
.setdcbx = mlx5e_dcbnl_setdcbx,
@@ -801,10 +939,135 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
mlx5e_dcbnl_ieee_setets_core(priv, &ets);
}
+enum {
+ INIT,
+ DELETE,
+};
+
+static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
+{
+ struct dcb_app temp;
+ int i;
+
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return;
+
+ if (!MLX5_DSCP_SUPPORTED(priv->mdev))
+ return;
+
+ /* No SEL_DSCP entry in non DSCP state */
+ if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
+ return;
+
+ temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ for (i = 0; i < MLX5E_MAX_DSCP; i++) {
+ temp.protocol = i;
+ temp.priority = priv->dcbx_dp.dscp2prio[i];
+ if (action == INIT)
+ dcb_ieee_setapp(priv->netdev, &temp);
+ else
+ dcb_ieee_delapp(priv->netdev, &temp);
+ }
+
+ priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
+}
+
+void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
+{
+ mlx5e_dcbnl_dscp_app(priv, INIT);
+}
+
+void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
+{
+ mlx5e_dcbnl_dscp_app(priv, DELETE);
+}
+
+static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
+ struct mlx5e_params *params)
+{
+ params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
+ params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
+ params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
+}
+
+static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channels new_channels = {};
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ new_channels.params = priv->channels.params;
+ mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
+
+ /* Skip if tx_min_inline is the same */
+ if (new_channels.params.tx_min_inline_mode ==
+ priv->channels.params.tx_min_inline_mode)
+ goto out;
+
+ if (mlx5e_open_channels(priv, &new_channels))
+ goto out;
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+
+out:
+ mutex_unlock(&priv->state_lock);
+}
+
+static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
+{
+ int err;
+
+ err = mlx5_set_trust_state(priv->mdev, trust_state);
+ if (err)
+ return err;
+ priv->dcbx_dp.trust_state = trust_state;
+ mlx5e_trust_update_sq_inline_mode(priv);
+
+ return err;
+}
+
+static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
+{
+ int err;
+
+ err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
+ if (err)
+ return err;
+
+ priv->dcbx_dp.dscp2prio[dscp] = prio;
+ return err;
+}
+
+static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ if (!MLX5_DSCP_SUPPORTED(mdev))
+ return 0;
+
+ err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
+ if (err)
+ return err;
+
+ mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
+
+ err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
+ if (err)
+ return err;
+
+ return 0;
+}
+
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ mlx5e_trust_initialize(priv);
+
if (!MLX5_CAP_GEN(priv->mdev, qos))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d12e9fc0d76b..23425f028405 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -31,7 +31,6 @@
*/
#include "en.h"
-#include "en_accel/ipsec.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -136,59 +135,15 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
}
-static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- u8 pfc_en_tx;
- u8 pfc_en_rx;
- int err;
-
- if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return 0;
-
- err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
-
- return err ? 0 : pfc_en_tx | pfc_en_rx;
-}
-
-static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 rx_pause;
- u32 tx_pause;
- int err;
-
- if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return false;
-
- err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
-
- return err ? false : rx_pause | tx_pause;
-}
-
-#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
-#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
-#define MLX5E_NUM_SQ_STATS(priv) \
- (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
-#define MLX5E_NUM_PFC_COUNTERS(priv) \
- ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
- NUM_PPORT_PER_PRIO_PFC_COUNTERS)
-
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
+ int i, num_stats = 0;
+
switch (sset) {
case ETH_SS_STATS:
- return NUM_SW_COUNTERS +
- MLX5E_NUM_Q_CNTRS(priv) +
- NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) +
- NUM_PCIE_COUNTERS(priv) +
- MLX5E_NUM_RQ_STATS(priv) +
- MLX5E_NUM_SQ_STATS(priv) +
- MLX5E_NUM_PFC_COUNTERS(priv) +
- ARRAY_SIZE(mlx5e_pme_status_desc) +
- ARRAY_SIZE(mlx5e_pme_error_desc) +
- mlx5e_ipsec_get_count(priv);
-
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
+ return num_stats;
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx5e_priv_flags);
case ETH_SS_TEST:
@@ -208,104 +163,10 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
{
- int i, j, tc, prio, idx = 0;
- unsigned long pfc_combined;
-
- /* SW counters */
- for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
-
- /* Q counters */
- for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
-
- /* VPORT counters */
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_stats_desc[i].format);
-
- /* PPORT counters */
- for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_802_3_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2863_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2819_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_eth_ext_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc64[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stall_stats_desc[i].format);
-
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_traffic_stats_desc[i].format, prio);
- }
-
- pfc_combined = mlx5e_query_pfc_combined(priv);
- for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- char pfc_string[ETH_GSTRING_LEN];
-
- snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, pfc_string);
- }
- }
-
- if (mlx5e_query_global_pause_combined(priv)) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, "global");
- }
- }
-
- /* port module event counters */
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
-
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
-
- /* IPSec counters */
- idx += mlx5e_ipsec_get_strings(priv, data + idx * ETH_GSTRING_LEN);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return;
-
- /* per channel counters */
- for (i = 0; i < priv->channels.num; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_stats_desc[j].format, i);
+ int i, idx = 0;
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < priv->channels.num; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- sq_stats_desc[j].format,
- priv->channel_tc2txq[i][tc]);
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx);
}
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
@@ -340,10 +201,7 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data)
{
- struct mlx5e_channels *channels;
- struct mlx5_priv *mlx5_priv;
- int i, j, tc, prio, idx = 0;
- unsigned long pfc_combined;
+ int i, idx = 0;
if (!data)
return;
@@ -351,102 +209,10 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_stats(priv, true);
- channels = &priv->channels;
mutex_unlock(&priv->state_lock);
- for (i = 0; i < NUM_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_stats_desc, i);
-
- for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- q_stats_desc, i);
-
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
- pport_802_3_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
- pport_2863_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
- pport_2819_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
- pport_eth_ext_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc64, i);
-
- for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stall_stats_desc, i);
-
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_traffic_stats_desc, i);
- }
-
- pfc_combined = mlx5e_query_pfc_combined(priv);
- for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_pfc_stats_desc, i);
- }
- }
-
- if (mlx5e_query_global_pause_combined(priv)) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_per_prio_pfc_stats_desc, i);
- }
- }
-
- /* port module event counters */
- mlx5_priv = &priv->mdev->priv;
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
- mlx5e_pme_status_desc, i);
-
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
- mlx5e_pme_error_desc, i);
-
- /* IPSec counters */
- idx += mlx5e_ipsec_get_stats(priv, data + idx);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return;
-
- /* per channel counters */
- for (i = 0; i < channels->num; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
- rq_stats_desc, j);
-
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < channels->num; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
- sq_stats_desc, j);
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx);
}
static void mlx5e_get_ethtool_stats(struct net_device *dev,
@@ -1417,14 +1183,15 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
int ret;
ret = ethtool_op_get_ts_info(priv->netdev, info);
if (ret)
return ret;
- info->phc_index = priv->tstamp.ptp ?
- ptp_clock_index(priv->tstamp.ptp) : -1;
+ info->phc_index = mdev->clock.ptp ?
+ ptp_clock_index(mdev->clock.ptp) : -1;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return 0;
@@ -1573,6 +1340,16 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static u32 mlx5e_get_msglevel(struct net_device *dev)
+{
+ return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel;
+}
+
+static void mlx5e_set_msglevel(struct net_device *dev, u32 val)
+{
+ ((struct mlx5e_priv *)netdev_priv(dev))->msglevel = val;
+}
+
static int mlx5e_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
@@ -1677,29 +1454,36 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
-static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
+ bool is_rx_cq)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
- bool rx_mode_changed;
- u8 rx_cq_period_mode;
+ bool mode_changed;
+ u8 cq_period_mode, current_cq_period_mode;
int err = 0;
- rx_cq_period_mode = enable ?
+ cq_period_mode = enable ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;
+ current_cq_period_mode = is_rx_cq ?
+ priv->channels.params.rx_cq_moderation.cq_period_mode :
+ priv->channels.params.tx_cq_moderation.cq_period_mode;
+ mode_changed = cq_period_mode != current_cq_period_mode;
- if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
return -EOPNOTSUPP;
- if (!rx_mode_changed)
+ if (!mode_changed)
return 0;
new_channels.params = priv->channels.params;
- mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode);
+ if (is_rx_cq)
+ mlx5e_set_rx_cq_mode_params(&new_channels.params, cq_period_mode);
+ else
+ mlx5e_set_tx_cq_mode_params(&new_channels.params, cq_period_mode);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -1714,6 +1498,16 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return 0;
}
+static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+ return set_pflag_cqe_based_moder(netdev, enable, false);
+}
+
+static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+ return set_pflag_cqe_based_moder(netdev, enable, true);
+}
+
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
@@ -1754,7 +1548,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (enable && priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
return -EINVAL;
}
@@ -1802,6 +1596,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
goto out;
err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ set_pflag_tx_cqe_based_moder);
+ if (err)
+ goto out;
+
+ err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_CQE_COMPRESS,
set_pflag_rx_cqe_compress);
@@ -1905,4 +1705,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
+ .get_msglevel = mlx5e_get_msglevel,
+ .set_msglevel = mlx5e_set_msglevel,
+
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 850cdc980ab5..def513484845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -118,7 +118,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
+ for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -135,7 +135,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
@@ -154,7 +154,8 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_UNTAGGED,
MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
- MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
};
static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
@@ -162,7 +163,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
u16 vid, struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
@@ -174,6 +175,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ /* cvlan_tag enabled in match criteria and
+ * disabled in match value means both S & C tags
+ * don't exist (untagged of both)
+ */
rule_p = &priv->fs.vlan.untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
@@ -190,8 +195,18 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
- default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
- rule_p = &priv->fs.vlan.active_vlans_rule[vid];
+ case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
+ rule_p = &priv->fs.vlan.active_svlans_rule[vid];
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+ vid);
+ break;
+ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
+ rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -223,7 +238,7 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
if (!spec)
return -ENOMEM;
- if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
+ if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
mlx5e_vport_context_update_vlans(priv);
err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
@@ -255,11 +270,17 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
priv->fs.vlan.any_svlan_rule = NULL;
}
break;
- case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+ case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
+ if (priv->fs.vlan.active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
+ priv->fs.vlan.active_svlans_rule[vid] = NULL;
+ }
+ break;
+ case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
mlx5e_vport_context_update_vlans(priv);
- if (priv->fs.vlan.active_vlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
- priv->fs.vlan.active_vlans_rule[vid] = NULL;
+ if (priv->fs.vlan.active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
+ priv->fs.vlan.active_cvlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
break;
@@ -283,46 +304,83 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
-void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->fs.vlan.filter_disabled)
+ if (!priv->fs.vlan.cvlan_filter_disabled)
return;
- priv->fs.vlan.filter_disabled = false;
+ priv->fs.vlan.cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan.filter_disabled)
+ if (priv->fs.vlan.cvlan_filter_disabled)
return;
- priv->fs.vlan.filter_disabled = true;
+ priv->fs.vlan.cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
- u16 vid)
+static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ set_bit(vid, priv->fs.vlan.active_cvlans);
- set_bit(vid, priv->fs.vlan.active_vlans);
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ if (err)
+ clear_bit(vid, priv->fs.vlan.active_cvlans);
- return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+ return err;
}
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
- u16 vid)
+static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
+{
+ struct net_device *netdev = priv->netdev;
+ int err;
+
+ set_bit(vid, priv->fs.vlan.active_svlans);
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ if (err) {
+ clear_bit(vid, priv->fs.vlan.active_svlans);
+ return err;
+ }
+
+ /* Need to fix some features.. */
+ netdev_update_features(netdev);
+ return err;
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- clear_bit(vid, priv->fs.vlan.active_vlans);
+ if (be16_to_cpu(proto) == ETH_P_8021Q)
+ return mlx5e_vlan_rx_add_cvid(priv, vid);
+ else if (be16_to_cpu(proto) == ETH_P_8021AD)
+ return mlx5e_vlan_rx_add_svid(priv, vid);
+
+ return -EOPNOTSUPP;
+}
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (be16_to_cpu(proto) == ETH_P_8021Q) {
+ clear_bit(vid, priv->fs.vlan.active_cvlans);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
+ clear_bit(vid, priv->fs.vlan.active_svlans);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_update_features(dev);
+ }
return 0;
}
@@ -333,11 +391,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- if (priv->fs.vlan.filter_disabled &&
+ for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+
+ if (priv->fs.vlan.cvlan_filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_add_any_vid_rules(priv);
}
@@ -348,11 +409,14 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- if (priv->fs.vlan.filter_disabled &&
+ for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+
+ if (priv->fs.vlan.cvlan_filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_del_any_vid_rules(priv);
}
@@ -365,21 +429,24 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
struct mlx5e_l2_hash_node *hn)
{
u8 action = hn->action;
+ u8 mac_addr[ETH_ALEN];
int l2_err = 0;
+ ether_addr_copy(mac_addr, hn->ai.addr);
+
switch (action) {
case MLX5E_ACTION_ADD:
mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
- if (!is_multicast_ether_addr(hn->ai.addr)) {
- l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr);
+ if (!is_multicast_ether_addr(mac_addr)) {
+ l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
hn->mpfs = !l2_err;
}
hn->action = MLX5E_ACTION_NONE;
break;
case MLX5E_ACTION_DEL:
- if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs)
- l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr);
+ if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
+ l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
mlx5e_del_l2_flow_rule(priv, &hn->ai);
mlx5e_del_l2_from_hash(hn);
break;
@@ -387,7 +454,7 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
if (l2_err)
netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err);
+ action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
}
static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
@@ -545,8 +612,11 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
if (enable_promisc) {
+ if (!priv->channels.params.vlan_strip_disable)
+ netdev_warn_once(ndev,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
- if (!priv->fs.vlan.filter_disabled)
+ if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
@@ -561,7 +631,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
if (disable_allmulti)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
- if (!priv->fs.vlan.filter_disabled)
+ if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv);
mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
@@ -738,7 +808,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
@@ -909,7 +979,7 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rules;
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_table *ft;
@@ -1005,7 +1075,7 @@ err:
return err;
}
-static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
struct mlx5_flow_table_attr ft_attr = {};
@@ -1041,7 +1111,7 @@ err:
return err;
}
-static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
@@ -1106,7 +1176,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
int err = 0;
@@ -1265,13 +1335,15 @@ err_destroy_flow_table:
return err;
}
-#define MLX5E_NUM_VLAN_GROUPS 3
+#define MLX5E_NUM_VLAN_GROUPS 4
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
-#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
-#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
+#define MLX5E_VLAN_GROUP1_SIZE BIT(12)
+#define MLX5E_VLAN_GROUP2_SIZE BIT(1)
+#define MLX5E_VLAN_GROUP3_SIZE BIT(0)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE +\
- MLX5E_VLAN_GROUP2_SIZE)
+ MLX5E_VLAN_GROUP2_SIZE +\
+ MLX5E_VLAN_GROUP3_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
@@ -1294,7 +1366,8 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1305,7 +1378,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1314,6 +1387,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP3_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
return 0;
err_destroy_groups:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cc11bbbd0309..d2b057a3e512 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -196,6 +196,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
@@ -224,6 +225,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_tso_bytes += sq_stats->tso_bytes;
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
@@ -373,8 +375,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
- struct ptp_clock_event ptp_event;
- struct mlx5_eqe *eqe = NULL;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
@@ -384,14 +384,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
case MLX5_DEV_EVENT_PORT_DOWN:
queue_work(priv->wq, &priv->update_carrier_work);
break;
- case MLX5_DEV_EVENT_PPS:
- eqe = (struct mlx5_eqe *)param;
- ptp_event.index = eqe->data.pps.pin;
- ptp_event.timestamp =
- timecounter_cyc2time(&priv->tstamp.clock,
- be64_to_cpu(eqe->data.pps.time_stamp));
- mlx5e_pps_event_handler(vpriv, &ptp_event);
- break;
default:
break;
}
@@ -585,6 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->tstamp = c->tstamp;
+ rq->clock = &mdev->clock;
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
@@ -690,7 +683,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = params->rx_cq_period_mode;
+ rq->am.mode = params->rx_cq_moderation.cq_period_mode;
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -1123,6 +1116,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->tstamp = c->tstamp;
+ sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->txq_ix = txq_ix;
@@ -1982,7 +1976,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = params->rx_cq_period_mode;
+ param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
@@ -1994,8 +1988,7 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
mlx5e_build_common_cq_param(priv, param);
-
- param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
}
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
@@ -2678,6 +2671,12 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
+void mlx5e_timestamp_set(struct mlx5e_priv *priv)
+{
+ priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
+ priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -2693,7 +2692,7 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- mlx5e_timestamp_init(priv);
+ mlx5e_timestamp_set(priv);
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -2731,7 +2730,6 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
@@ -3086,13 +3084,10 @@ out:
}
#ifdef CONFIG_MLX5_ESWITCH
-static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
+static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
- cls_flower->common.chain_index)
+ if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
switch (cls_flower->command) {
@@ -3106,17 +3101,54 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
return -EOPNOTSUPP;
}
}
+
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct mlx5e_priv *priv = cb_priv;
+
+ if (!tc_can_offload(priv->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
+ priv, priv);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
+ priv);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
#endif
-static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
- case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(dev, type_data);
+ case TC_SETUP_BLOCK:
+ return mlx5e_setup_tc_block(dev, type_data);
#endif
- case TC_SETUP_MQPRIO:
+ case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(dev, type_data);
default:
return -EOPNOTSUPP;
@@ -3230,14 +3262,14 @@ out:
return err;
}
-static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (enable)
- mlx5e_enable_vlan_filter(priv);
+ mlx5e_enable_cvlan_filter(priv);
else
- mlx5e_disable_vlan_filter(priv);
+ mlx5e_disable_cvlan_filter(priv);
return 0;
}
@@ -3348,7 +3380,7 @@ static int mlx5e_set_features(struct net_device *netdev,
set_feature_lro);
err |= mlx5e_handle_feature(netdev, features,
NETIF_F_HW_VLAN_CTAG_FILTER,
- set_feature_vlan_filter);
+ set_feature_cvlan_filter);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
set_feature_tc_num_filters);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
@@ -3365,6 +3397,25 @@ static int mlx5e_set_features(struct net_device *netdev,
return err ? -EINVAL : 0;
}
+static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mutex_lock(&priv->state_lock);
+ if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
+ /* HW strips the outer C-tag header, this is a problem
+ * for S-tag traffic.
+ */
+ features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ if (!priv->channels.params.vlan_strip_disable)
+ netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
+ }
+ mutex_unlock(&priv->state_lock);
+
+ return features;
+}
+
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3403,6 +3454,80 @@ out:
return err;
}
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* TX HW timestamp */
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&priv->state_lock);
+ /* RX HW timestamp */
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* Reset CQE compression to Admin default */
+ mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ /* Disable CQE compression */
+ netdev_warn(priv->netdev, "Disabling cqe compression");
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ if (err) {
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+ mutex_unlock(&priv->state_lock);
+ return err;
+ }
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ mutex_unlock(&priv->state_lock);
+ return -ERANGE;
+ }
+
+ memcpy(&priv->tstamp, &config, sizeof(config));
+ mutex_unlock(&priv->state_lock);
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
+ struct hwtstamp_config *cfg = &priv->tstamp;
+
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
+}
+
static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -3726,7 +3851,7 @@ static u32 mlx5e_xdp_query(struct net_device *dev)
return prog_id;
}
-static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -3768,6 +3893,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
.ndo_set_features = mlx5e_set_features,
+ .ndo_fix_features = mlx5e_fix_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
@@ -3778,7 +3904,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
- .ndo_xdp = mlx5e_xdp,
+ .ndo_bpf = mlx5e_xdp,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif
@@ -3882,14 +4008,32 @@ static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
(pci_bw <= 16000) && (pci_bw < link_speed));
}
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ params->tx_cq_moderation.cq_period_mode = cq_period_mode;
+
+ params->tx_cq_moderation.pkts =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ params->tx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ params->tx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
+
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ params->tx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
- params->rx_cq_period_mode = cq_period_mode;
+ params->rx_cq_moderation.cq_period_mode = cq_period_mode;
params->rx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
params->rx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
params->rx_cq_moderation.usec =
@@ -3897,10 +4041,11 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
if (params->rx_am_enabled)
params->rx_cq_moderation =
- mlx5e_am_get_def_profile(params->rx_cq_period_mode);
+ mlx5e_am_get_def_profile(cq_period_mode);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+ params->rx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -3960,16 +4105,11 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
-
- params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
/* TX inline */
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
- if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
- !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
+ params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
params->rss_hfunc = ETH_RSS_HASH_XOR;
@@ -3989,6 +4129,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->msglevel = MLX5E_MSG_LEVEL;
priv->hard_mtu = MLX5E_ETH_HARD_MTU;
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
@@ -4055,6 +4196,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
@@ -4112,6 +4254,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -4269,7 +4412,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (netdev->reg_state != NETREG_REGISTERED)
return;
-
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_init_app(priv);
+#endif
/* Device already registered: sync netdev system state */
if (mlx5e_vxlan_allowed(mdev)) {
rtnl_lock();
@@ -4290,6 +4435,11 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->netdev->reg_state == NETREG_REGISTERED)
+ mlx5e_dcbnl_delete_app(priv);
+#endif
+
rtnl_lock();
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
@@ -4510,6 +4660,9 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_detach;
}
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_init_app(priv);
+#endif
return priv;
err_detach:
@@ -4526,6 +4679,9 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
void *ppriv = priv->ppriv;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_delete_app(priv);
+#endif
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 45e03c427faf..2c43606c26b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
+#include <net/act_api.h>
#include <net/netevent.h>
#include <net/arp.h>
@@ -658,23 +659,12 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
}
static int
-mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
+mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
- cls_flower->common.chain_index)
+ if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
- if (cls_flower->egress_dev) {
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
- dev = mlx5_eswitch_get_uplink_netdev(esw);
- return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
- cls_flower);
- }
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -687,12 +677,48 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
}
}
+static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct mlx5e_priv *priv = cb_priv;
+
+ if (!tc_can_offload(priv->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
+ priv, priv);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(dev, type_data);
+ case TC_SETUP_BLOCK:
+ return mlx5e_rep_setup_tc_block(dev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -986,6 +1012,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
+ struct mlx5e_priv *upriv;
int err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
@@ -1017,15 +1044,25 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
+ upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
+ upriv);
+ if (err)
+ goto err_neigh_cleanup;
+
err = register_netdev(netdev);
if (err) {
pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_neigh_cleanup;
+ goto err_egdev_cleanup;
}
return 0;
+err_egdev_cleanup:
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ upriv);
+
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1045,9 +1082,12 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
void *ppriv = priv->ppriv;
+ struct mlx5e_priv *upriv;
unregister_netdev(rep->netdev);
-
+ upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ upriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 15a1687483cc..5b499c7a698f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,10 +42,11 @@
#include "en_rep.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "lib/clock.h"
-static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
+static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
- return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
+ return config->rx_filter == HWTSTAMP_FILTER_ALL;
}
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
@@ -215,22 +216,20 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
- struct page *page;
-
if (mlx5e_rx_cache_get(rq, dma_info))
return 0;
- page = dev_alloc_pages(rq->buff.page_order);
- if (unlikely(!page))
+ dma_info->page = dev_alloc_pages(rq->buff.page_order);
+ if (unlikely(!dma_info->page))
return -ENOMEM;
- dma_info->addr = dma_map_page(rq->pdev, page, 0,
+ dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
RQ_PAGE_SIZE(rq), rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
- put_page(page);
+ put_page(dma_info->page);
+ dma_info->page = NULL;
return -ENOMEM;
}
- dma_info->page = page;
return 0;
}
@@ -562,7 +561,6 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
- skb->mac_len = ETH_HLEN;
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
tot_len = cqe_bcnt - network_depth;
@@ -609,10 +607,11 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
}
-static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
{
__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+ ethertype = __vlan_get_protocol(skb, ethertype, network_depth);
return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
}
@@ -622,6 +621,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
struct sk_buff *skb,
bool lro)
{
+ int network_depth = 0;
+
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
goto csum_none;
@@ -631,9 +632,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (is_first_ethertype_ip(skb)) {
+ if (is_last_ethertype_ip(skb, &network_depth)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (network_depth > ETH_HLEN)
+ /* CQE csum is calculated from the IP header and does
+ * not cover VLAN headers (if present). This will add
+ * the checksum manually.
+ */
+ skb->csum = csum_partial(skb->data + ETH_HLEN,
+ network_depth - ETH_HLEN,
+ skb->csum);
rq->stats.csum_complete++;
return;
}
@@ -661,9 +670,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
- struct mlx5e_tstamp *tstamp = rq->tstamp;
int lro_num_seg;
+ skb->mac_len = ETH_HLEN;
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
@@ -676,17 +685,20 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
rq->stats.lro_bytes += cqe_bcnt;
}
- if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
- mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+ if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ skb_hwtstamps(skb)->hwtstamp =
+ mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
mlx5e_skb_set_hash(cqe, skb);
- if (cqe_has_vlan(cqe))
+ if (cqe_has_vlan(cqe)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(cqe->vlan_info));
+ rq->stats.removed_vlan_packets++;
+ }
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
@@ -797,6 +809,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
return false;
xdp.data = va + *rx_headroom;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
@@ -1162,12 +1175,25 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- struct net_device *netdev = rq->netdev;
- struct mlx5e_tstamp *tstamp = rq->tstamp;
+ struct net_device *netdev;
char *pseudo_header;
+ u32 qpn;
u8 *dgid;
u8 g;
+ qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
+ netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
+
+ /* No mapping present, cannot process SKB. This might happen if a child
+ * interface is going down while having unprocessed CQEs on parent RQ
+ */
+ if (unlikely(!netdev)) {
+ /* TODO: add drop counters support */
+ skb->dev = NULL;
+ pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
+ return;
+ }
+
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
@@ -1188,8 +1214,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
- mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+ if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ skb_hwtstamps(skb)->hwtstamp =
+ mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
@@ -1229,6 +1256,10 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_free_wqe;
mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!skb->dev)) {
+ dev_kfree_skb_any(skb);
+ goto wq_free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index acf32fe952cd..e401d9d245f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -63,7 +63,11 @@ profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
{
- return profile[cq_period_mode][ix];
+ struct mlx5e_cq_moder cq_moder;
+
+ cq_moder = profile[cq_period_mode][ix];
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
}
struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
@@ -75,7 +79,7 @@ struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
- return profile[rx_cq_period_mode][default_profile_ix];
+ return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
}
/* Adaptive moderation logic */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
new file mode 100644
index 000000000000..b74ddc7984bc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -0,0 +1,899 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+#include "en_accel/ipsec.h"
+
+static const struct counter_desc sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
+};
+
+#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
+
+static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_SW_COUNTERS;
+}
+
+static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
+ return idx;
+}
+
+static const struct counter_desc q_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
+
+static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
+{
+ return priv->q_counter ? NUM_Q_COUNTERS : 0;
+}
+
+static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i);
+ return idx;
+}
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+static const struct counter_desc vport_stats_desc[] = {
+ { "rx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+ { "rx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+ { "tx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+ { "tx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+ { "rx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+ { "rx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+ { "tx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+ { "tx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+ { "rx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+ { "rx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+ { "tx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+ { "tx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+ { "rx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(received_ib_unicast.packets) },
+ { "rx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_unicast.octets) },
+ { "tx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
+ { "tx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
+ { "rx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(received_ib_multicast.packets) },
+ { "rx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_multicast.octets) },
+ { "tx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
+ { "tx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
+};
+
+#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
+
+static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_VPORT_COUNTERS;
+}
+
+static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_802_3_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_802_3_stats_desc[] = {
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
+
+static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_802_3_COUNTERS;
+}
+
+static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_2863_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_2863_stats_desc[] = {
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
+};
+
+#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
+
+static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_2863_COUNTERS;
+}
+
+static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_2819_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_2819_stats_desc[] = {
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+
+static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_2819_COUNTERS;
+}
+
+static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_PHY_STATISTICAL_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_statistical_cntrs.c##_high)
+static const struct counter_desc pport_phy_statistical_stats_desc[] = {
+ { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
+ { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
+};
+
+#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
+
+static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
+{
+ return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
+ NUM_PPORT_PHY_COUNTERS : 0;
+}
+
+static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_phy_statistical_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_ETH_EXT_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_eth_ext_stats_desc[] = {
+ { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
+};
+
+#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
+
+static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
+{
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ return NUM_PPORT_ETH_EXT_COUNTERS;
+
+ return 0;
+}
+
+static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_eth_ext_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
+ pport_eth_ext_stats_desc, i);
+ return idx;
+}
+
+#define PCIE_PERF_OFF(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
+static const struct counter_desc pcie_perf_stats_desc[] = {
+ { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
+ { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
+};
+
+#define PCIE_PERF_OFF64(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pcie_perf_stats_desc64[] = {
+ { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
+};
+
+static const struct counter_desc pcie_perf_stall_stats_desc[] = {
+ { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
+ { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
+ { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
+ { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
+};
+
+#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
+#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
+#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
+
+static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
+{
+ int num_stats = 0;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ num_stats += NUM_PCIE_PERF_COUNTERS;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ num_stats += NUM_PCIE_PERF_COUNTERS64;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
+
+ return num_stats;
+}
+
+static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc[i].format);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc64[i].format);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stall_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc64, i);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stall_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_PER_PRIO_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+
+static int mlx5e_grp_per_prio_traffic_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
+}
+
+static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
+ u8 *data,
+ int idx)
+{
+ int i, prio;
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
+ }
+
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
+ u64 *data,
+ int idx)
+{
+ int i, prio;
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i);
+ }
+
+ return idx;
+}
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+ /* %s is "global" or "prio{i}" */
+ { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 pfc_en_tx;
+ u8 pfc_en_rx;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
+ err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+ return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 rx_pause;
+ u32 tx_pause;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return false;
+
+ err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
+ return err ? false : rx_pause | tx_pause;
+}
+
+static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
+{
+ return (mlx5e_query_global_pause_combined(priv) +
+ hweight8(mlx5e_query_pfc_combined(priv))) *
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS;
+}
+
+static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
+ u8 *data,
+ int idx)
+{
+ unsigned long pfc_combined;
+ int i, prio;
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ char pfc_string[ETH_GSTRING_LEN];
+
+ snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, "global");
+ }
+ }
+
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
+ u64 *data,
+ int idx)
+{
+ unsigned long pfc_combined;
+ int i, prio;
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ return idx;
+}
+
+static const struct counter_desc mlx5e_pme_status_desc[] = {
+ { "module_unplug", 8 },
+};
+
+static const struct counter_desc mlx5e_pme_error_desc[] = {
+ { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
+ { "module_high_temp", 48 }, /* high temperature */
+ { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
+};
+
+#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
+#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
+
+static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
+}
+
+static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PME_STATUS_STATS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
+
+ for (i = 0; i < NUM_PME_ERR_STATS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
+
+ return idx;
+}
+
+static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
+ int i;
+
+ for (i = 0; i < NUM_PME_STATUS_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
+ mlx5e_pme_status_desc, i);
+
+ for (i = 0; i < NUM_PME_ERR_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
+ mlx5e_pme_error_desc, i);
+
+ return idx;
+}
+
+static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_ipsec_get_count(priv);
+}
+
+static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ return idx + mlx5e_ipsec_get_strings(priv,
+ data + idx * ETH_GSTRING_LEN);
+}
+
+static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ return idx + mlx5e_ipsec_get_stats(priv, data + idx);
+}
+
+static const struct counter_desc rq_stats_desc[] = {
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+};
+
+#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+
+static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
+{
+ return (NUM_RQ_STATS * priv->channels.num) +
+ (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
+}
+
+static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i, j, tc;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return idx;
+
+ for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ sq_stats_desc[j].format,
+ priv->channel_tc2txq[i][tc]);
+
+ return idx;
+}
+
+static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ struct mlx5e_channels *channels = &priv->channels;
+ int i, j, tc;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return idx;
+
+ for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
+ rq_stats_desc, j);
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
+ sq_stats_desc, j);
+
+ return idx;
+}
+
+const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
+ {
+ .get_num_stats = mlx5e_grp_sw_get_num_stats,
+ .fill_strings = mlx5e_grp_sw_fill_strings,
+ .fill_stats = mlx5e_grp_sw_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_q_get_num_stats,
+ .fill_strings = mlx5e_grp_q_fill_strings,
+ .fill_stats = mlx5e_grp_q_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_vport_get_num_stats,
+ .fill_strings = mlx5e_grp_vport_fill_strings,
+ .fill_stats = mlx5e_grp_vport_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_802_3_get_num_stats,
+ .fill_strings = mlx5e_grp_802_3_fill_strings,
+ .fill_stats = mlx5e_grp_802_3_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_2863_get_num_stats,
+ .fill_strings = mlx5e_grp_2863_fill_strings,
+ .fill_stats = mlx5e_grp_2863_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_2819_get_num_stats,
+ .fill_strings = mlx5e_grp_2819_fill_strings,
+ .fill_stats = mlx5e_grp_2819_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_phy_get_num_stats,
+ .fill_strings = mlx5e_grp_phy_fill_strings,
+ .fill_stats = mlx5e_grp_phy_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
+ .fill_strings = mlx5e_grp_eth_ext_fill_strings,
+ .fill_stats = mlx5e_grp_eth_ext_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_pcie_get_num_stats,
+ .fill_strings = mlx5e_grp_pcie_fill_strings,
+ .fill_stats = mlx5e_grp_pcie_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_pme_get_num_stats,
+ .fill_strings = mlx5e_grp_pme_fill_strings,
+ .fill_stats = mlx5e_grp_pme_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
+ .fill_strings = mlx5e_grp_ipsec_fill_strings,
+ .fill_stats = mlx5e_grp_ipsec_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_channels_get_num_stats,
+ .fill_strings = mlx5e_grp_channels_fill_strings,
+ .fill_stats = mlx5e_grp_channels_fill_stats,
+ }
+};
+
+const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index f8637213afc0..d679e21f686e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -59,8 +59,10 @@ struct mlx5e_sw_stats {
u64 tx_tso_bytes;
u64 tx_tso_inner_packets;
u64 tx_tso_inner_bytes;
+ u64 tx_added_vlan_packets;
u64 rx_lro_packets;
u64 rx_lro_bytes;
+ u64 rx_removed_vlan_packets;
u64 rx_csum_unnecessary;
u64 rx_csum_none;
u64 rx_csum_complete;
@@ -91,54 +93,10 @@ struct mlx5e_sw_stats {
u64 link_down_events_phy;
};
-static const struct counter_desc sw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
-};
-
struct mlx5e_qcounter_stats {
u32 rx_out_of_buffer;
};
-static const struct counter_desc q_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
-};
-
-#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
vstats->query_vport_out, c)
@@ -146,83 +104,22 @@ struct mlx5e_vport_stats {
__be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
};
-static const struct counter_desc vport_stats_desc[] = {
- { "rx_vport_unicast_packets",
- VPORT_COUNTER_OFF(received_eth_unicast.packets) },
- { "rx_vport_unicast_bytes",
- VPORT_COUNTER_OFF(received_eth_unicast.octets) },
- { "tx_vport_unicast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
- { "tx_vport_unicast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
- { "rx_vport_multicast_packets",
- VPORT_COUNTER_OFF(received_eth_multicast.packets) },
- { "rx_vport_multicast_bytes",
- VPORT_COUNTER_OFF(received_eth_multicast.octets) },
- { "tx_vport_multicast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
- { "tx_vport_multicast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
- { "rx_vport_broadcast_packets",
- VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
- { "rx_vport_broadcast_bytes",
- VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
- { "tx_vport_broadcast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
- { "tx_vport_broadcast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
- { "rx_vport_rdma_unicast_packets",
- VPORT_COUNTER_OFF(received_ib_unicast.packets) },
- { "rx_vport_rdma_unicast_bytes",
- VPORT_COUNTER_OFF(received_ib_unicast.octets) },
- { "tx_vport_rdma_unicast_packets",
- VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
- { "tx_vport_rdma_unicast_bytes",
- VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
- { "rx_vport_rdma_multicast_packets",
- VPORT_COUNTER_OFF(received_ib_multicast.packets) },
- { "rx_vport_rdma_multicast_bytes",
- VPORT_COUNTER_OFF(received_ib_multicast.octets) },
- { "tx_vport_rdma_multicast_packets",
- VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
- { "tx_vport_rdma_multicast_bytes",
- VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
-};
-
-#define PPORT_802_3_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
#define PPORT_802_3_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
-#define PPORT_2863_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
#define PPORT_2863_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
-#define PPORT_2819_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
#define PPORT_2819_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
-#define PPORT_PHY_STATISTICAL_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.phys_layer_statistical_cntrs.c##_high)
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
-#define PPORT_PER_PRIO_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_per_prio_grp_data_layout.c##_high)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
#define NUM_PPORT_PRIO 8
-#define PPORT_ETH_EXT_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
#define PPORT_ETH_EXT_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -237,82 +134,10 @@ struct mlx5e_pport_stats {
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
};
-static const struct counter_desc pport_802_3_stats_desc[] = {
- { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
- { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
- { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
- { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
- { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
- { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
- { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
- { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
- { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
- { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
- { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
- { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
- { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
- { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
- { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
- { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
- { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
- { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
-};
-
-static const struct counter_desc pport_2863_stats_desc[] = {
- { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
- { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
- { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
-};
-
-static const struct counter_desc pport_2819_stats_desc[] = {
- { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
- { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
- { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
- { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
- { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
- { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
- { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
- { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
- { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
- { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
- { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
- { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
- { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
-};
-
-static const struct counter_desc pport_phy_statistical_stats_desc[] = {
- { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
- { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
-};
-
-static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
- { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
- { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
- { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
- { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
-};
-
-static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
- /* %s is "global" or "prio{i}" */
- { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
- { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
- { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
- { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
- { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
-};
-
-static const struct counter_desc pport_eth_ext_stats_desc[] = {
- { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
-};
-
-#define PCIE_PERF_OFF(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_PERF_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_PERF_OFF64(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
#define PCIE_PERF_GET64(pcie_stats, c) \
MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
@@ -321,22 +146,6 @@ struct mlx5e_pcie_stats {
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
};
-static const struct counter_desc pcie_perf_stats_desc[] = {
- { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
- { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
-};
-
-static const struct counter_desc pcie_perf_stats_desc64[] = {
- { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
-};
-
-static const struct counter_desc pcie_perf_stall_stats_desc[] = {
- { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
- { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
- { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
- { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
-};
-
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@@ -346,6 +155,7 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 removed_vlan_packets;
u64 xdp_drop;
u64 xdp_tx;
u64 xdp_tx_full;
@@ -362,31 +172,6 @@ struct mlx5e_rq_stats {
u64 cache_waive;
};
-static const struct counter_desc rq_stats_desc[] = {
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
-};
-
struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
@@ -398,6 +183,7 @@ struct mlx5e_sq_stats {
u64 tso_inner_bytes;
u64 csum_partial;
u64 csum_partial_inner;
+ u64 added_vlan_packets;
u64 nop;
/* less likely accessed in data path */
u64 csum_none;
@@ -406,61 +192,6 @@ struct mlx5e_sq_stats {
u64 dropped;
};
-static const struct counter_desc sq_stats_desc[] = {
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
-};
-
-#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
-#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
-#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
-#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
-#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
-#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
-#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
- (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
- MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
-#define NUM_PCIE_PERF_COUNTERS(priv) \
- (ARRAY_SIZE(pcie_perf_stats_desc) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
-#define NUM_PCIE_PERF_COUNTERS64(priv) \
- (ARRAY_SIZE(pcie_perf_stats_desc64) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
-#define NUM_PCIE_PERF_STALL_COUNTERS(priv) \
- (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
-#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
- ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
-#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
- ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
-#define NUM_PPORT_ETH_EXT_COUNTERS(priv) \
- (ARRAY_SIZE(pport_eth_ext_stats_desc) * \
- MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
-#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
- NUM_PPORT_2863_COUNTERS + \
- NUM_PPORT_2819_COUNTERS + \
- NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
- NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
- NUM_PPORT_PRIO + \
- NUM_PPORT_ETH_EXT_COUNTERS(priv))
-#define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \
- NUM_PCIE_PERF_COUNTERS64(priv) +\
- NUM_PCIE_PERF_STALL_COUNTERS(priv))
-#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
-#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
-
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -470,14 +201,14 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
-static const struct counter_desc mlx5e_pme_status_desc[] = {
- { "module_unplug", 8 },
+struct mlx5e_priv;
+struct mlx5e_stats_grp {
+ int (*get_num_stats)(struct mlx5e_priv *priv);
+ int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
+ int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
};
-static const struct counter_desc mlx5e_pme_error_desc[] = {
- { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
- { "module_high_temp", 48 }, /* high temperature */
- { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
-};
+extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
+extern const int mlx5e_num_stats_grps;
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9ba1f72060aa..55979ec2e88a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -90,8 +90,8 @@ enum {
MLX5_HEADER_TYPE_NVGRE = 0x1,
};
-#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
+#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
struct mod_hdr_key {
int num_actions;
@@ -263,10 +263,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
+ int tc_grp_size, tc_tbl_size;
+ u32 max_flow_counter;
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+
+ tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
+
+ tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
+ BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
+
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
MLX5E_TC_PRIO,
- MLX5E_TC_TABLE_NUM_ENTRIES,
+ tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
0, 0);
if (IS_ERR(priv->fs.tc.t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 1d6925d4369a..569b42a01026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,9 +32,11 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <net/dsfield.h>
#include "en.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "lib/clock.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
@@ -85,6 +87,20 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
}
}
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ int dscp_cp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return priv->dcbx_dp.dscp2prio[dscp_cp];
+}
+#endif
+
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -96,8 +112,13 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
if (!netdev_get_num_tc(dev))
return channel_ix;
- if (skb_vlan_tag_present(skb))
- up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
+ up = mlx5e_get_dscp_up(priv, skb);
+ else
+#endif
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
@@ -340,6 +361,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
ihs += VLAN_HLEN;
+ sq->stats.added_vlan_packets++;
} else {
memcpy(eseg->inline_hdr.start, skb_data, ihs);
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
@@ -348,7 +370,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
+ if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
+ eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
+ sq->stats.added_vlan_packets++;
}
headlen = skb_len - skb->data_len;
@@ -452,8 +477,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
SKBTX_HW_TSTAMP)) {
struct skb_shared_hwtstamps hwts = {};
- mlx5e_fill_hwstamp(sq->tstamp,
- get_cqe_ts(cqe), &hwts);
+ hwts.hwtstamp =
+ mlx5_timecounter_cyc2time(sq->clock,
+ get_cqe_ts(cqe));
skb_tstamp_tx(skb, &hwts);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index e906b754415c..ab92298eafc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,7 +49,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
bool busy = false;
- int work_done;
+ int work_done = 0;
int i;
for (i = 0; i < c->num_tc; i++)
@@ -58,15 +58,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
- work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
- busy |= work_done == budget;
+ if (likely(budget)) { /* budget=0 means: don't poll rx rings */
+ work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
+ busy |= work_done == budget;
+ }
busy |= c->rq.post_wqes(&c->rq);
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
- if (work_done == budget)
+ if (budget && work_done == budget)
work_done--;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index fc606bfd1d6e..60771865c99c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -491,8 +491,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break;
case MLX5_EVENT_TYPE_PPS_EVENT:
- if (dev->event)
- dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
+ mlx5_pps_event(dev, eqe);
break;
case MLX5_EVENT_TYPE_FPGA_ERROR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c77f4c0c7769..bbb140f517c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -157,7 +157,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
void *mc_misc = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d9fd8570b07c..1143d80119bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -306,7 +306,7 @@ static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
@@ -395,7 +395,7 @@ out_err:
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
int err = 0;
@@ -670,7 +670,7 @@ struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 36ecc2b2e187..881e2e55840c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -40,7 +40,8 @@
#include "eswitch.h"
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft, u32 underlay_qpn)
+ struct mlx5_flow_table *ft, u32 underlay_qpn,
+ bool disconnect)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
@@ -52,7 +53,15 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
- MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+
+ if (disconnect) {
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
+ MLX5_SET(set_flow_table_root_in, in, table_id, 0);
+ } else {
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
+ MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+ }
+
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
if (ft->vport) {
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index c6d7bdf255b6..71e2d0f37ad9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -71,8 +71,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
unsigned int index);
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- u32 underlay_qpn);
+ struct mlx5_flow_table *ft, u32 underlay_qpn,
+ bool disconnect);
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5a7bea688ec8..c70fd663a633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -145,10 +145,10 @@ static struct init_tree_node {
}
};
-enum fs_i_mutex_lock_class {
- FS_MUTEX_GRANDPARENT,
- FS_MUTEX_PARENT,
- FS_MUTEX_CHILD
+enum fs_i_lock_class {
+ FS_LOCK_GRANDPARENT,
+ FS_LOCK_PARENT,
+ FS_LOCK_CHILD
};
static const struct rhashtable_params rhash_fte = {
@@ -168,10 +168,16 @@ static const struct rhashtable_params rhash_fg = {
};
-static void del_rule(struct fs_node *node);
-static void del_flow_table(struct fs_node *node);
-static void del_flow_group(struct fs_node *node);
-static void del_fte(struct fs_node *node);
+static void del_hw_flow_table(struct fs_node *node);
+static void del_hw_flow_group(struct fs_node *node);
+static void del_hw_fte(struct fs_node *node);
+static void del_sw_flow_table(struct fs_node *node);
+static void del_sw_flow_group(struct fs_node *node);
+static void del_sw_fte(struct fs_node *node);
+/* Delete rule (destination) is special case that
+ * requires to lock the FTE for all the deletion process.
+ */
+static void del_sw_hw_rule(struct fs_node *node);
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2);
static struct mlx5_flow_rule *
@@ -179,20 +185,22 @@ find_flow_rule(struct fs_fte *fte,
struct mlx5_flow_destination *dest);
static void tree_init_node(struct fs_node *node,
- unsigned int refcount,
- void (*remove_func)(struct fs_node *))
+ void (*del_hw_func)(struct fs_node *),
+ void (*del_sw_func)(struct fs_node *))
{
- atomic_set(&node->refcount, refcount);
+ refcount_set(&node->refcount, 1);
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
- mutex_init(&node->lock);
- node->remove_func = remove_func;
+ init_rwsem(&node->lock);
+ node->del_hw_func = del_hw_func;
+ node->del_sw_func = del_sw_func;
+ node->active = false;
}
static void tree_add_node(struct fs_node *node, struct fs_node *parent)
{
if (parent)
- atomic_inc(&parent->refcount);
+ refcount_inc(&parent->refcount);
node->parent = parent;
/* Parent is the root */
@@ -202,58 +210,78 @@ static void tree_add_node(struct fs_node *node, struct fs_node *parent)
node->root = parent->root;
}
-static void tree_get_node(struct fs_node *node)
+static int tree_get_node(struct fs_node *node)
{
- atomic_inc(&node->refcount);
+ return refcount_inc_not_zero(&node->refcount);
}
-static void nested_lock_ref_node(struct fs_node *node,
- enum fs_i_mutex_lock_class class)
+static void nested_down_read_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
{
if (node) {
- mutex_lock_nested(&node->lock, class);
- atomic_inc(&node->refcount);
+ down_read_nested(&node->lock, class);
+ refcount_inc(&node->refcount);
}
}
-static void lock_ref_node(struct fs_node *node)
+static void nested_down_write_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
{
if (node) {
- mutex_lock(&node->lock);
- atomic_inc(&node->refcount);
+ down_write_nested(&node->lock, class);
+ refcount_inc(&node->refcount);
}
}
-static void unlock_ref_node(struct fs_node *node)
+static void down_write_ref_node(struct fs_node *node)
{
if (node) {
- atomic_dec(&node->refcount);
- mutex_unlock(&node->lock);
+ down_write(&node->lock);
+ refcount_inc(&node->refcount);
}
}
+static void up_read_ref_node(struct fs_node *node)
+{
+ refcount_dec(&node->refcount);
+ up_read(&node->lock);
+}
+
+static void up_write_ref_node(struct fs_node *node)
+{
+ refcount_dec(&node->refcount);
+ up_write(&node->lock);
+}
+
static void tree_put_node(struct fs_node *node)
{
struct fs_node *parent_node = node->parent;
- lock_ref_node(parent_node);
- if (atomic_dec_and_test(&node->refcount)) {
- if (parent_node)
+ if (refcount_dec_and_test(&node->refcount)) {
+ if (node->del_hw_func)
+ node->del_hw_func(node);
+ if (parent_node) {
+ /* Only root namespace doesn't have parent and we just
+ * need to free its node.
+ */
+ down_write_ref_node(parent_node);
list_del_init(&node->list);
- if (node->remove_func)
- node->remove_func(node);
- kfree(node);
+ if (node->del_sw_func)
+ node->del_sw_func(node);
+ up_write_ref_node(parent_node);
+ } else {
+ kfree(node);
+ }
node = NULL;
}
- unlock_ref_node(parent_node);
if (!node && parent_node)
tree_put_node(parent_node);
}
static int tree_remove_node(struct fs_node *node)
{
- if (atomic_read(&node->refcount) > 1) {
- atomic_dec(&node->refcount);
+ if (refcount_read(&node->refcount) > 1) {
+ refcount_dec(&node->refcount);
return -EEXIST;
}
tree_put_node(node);
@@ -362,6 +390,15 @@ static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
return container_of(ns, struct mlx5_flow_root_namespace, ns);
}
+static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root = find_root(node);
+
+ if (root)
+ return root->dev->priv.steering;
+ return NULL;
+}
+
static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root = find_root(node);
@@ -371,26 +408,36 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
return NULL;
}
-static void del_flow_table(struct fs_node *node)
+static void del_hw_flow_table(struct fs_node *node)
{
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
- struct fs_prio *prio;
int err;
fs_get_obj(ft, node);
dev = get_dev(&ft->node);
- err = mlx5_cmd_destroy_flow_table(dev, ft);
- if (err)
- mlx5_core_warn(dev, "flow steering can't destroy ft\n");
- ida_destroy(&ft->fte_allocator);
+ if (node->active) {
+ err = mlx5_cmd_destroy_flow_table(dev, ft);
+ if (err)
+ mlx5_core_warn(dev, "flow steering can't destroy ft\n");
+ }
+}
+
+static void del_sw_flow_table(struct fs_node *node)
+{
+ struct mlx5_flow_table *ft;
+ struct fs_prio *prio;
+
+ fs_get_obj(ft, node);
+
rhltable_destroy(&ft->fgs_hash);
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
+ kfree(ft);
}
-static void del_rule(struct fs_node *node)
+static void del_sw_hw_rule(struct fs_node *node)
{
struct mlx5_flow_rule *rule;
struct mlx5_flow_table *ft;
@@ -406,7 +453,6 @@ static void del_rule(struct fs_node *node)
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_rule(rule);
- list_del(&rule->node.list);
if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
mutex_lock(&rule->dest_attr.ft->lock);
list_del(&rule->next_ft);
@@ -434,117 +480,203 @@ out:
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
}
+ kfree(rule);
}
-static void destroy_fte(struct fs_fte *fte, struct mlx5_flow_group *fg)
+static void del_hw_fte(struct fs_node *node)
{
struct mlx5_flow_table *ft;
- int ret;
+ struct mlx5_flow_group *fg;
+ struct mlx5_core_dev *dev;
+ struct fs_fte *fte;
+ int err;
- ret = rhashtable_remove_fast(&fg->ftes_hash, &fte->hash, rhash_fte);
- WARN_ON(ret);
- fte->status = 0;
+ fs_get_obj(fte, node);
+ fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
- ida_simple_remove(&ft->fte_allocator, fte->index);
+
+ trace_mlx5_fs_del_fte(fte);
+ dev = get_dev(&ft->node);
+ if (node->active) {
+ err = mlx5_cmd_delete_fte(dev, ft,
+ fte->index);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ }
}
-static void del_fte(struct fs_node *node)
+static void del_sw_fte(struct fs_node *node)
{
- struct mlx5_flow_table *ft;
+ struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
- struct mlx5_core_dev *dev;
struct fs_fte *fte;
int err;
fs_get_obj(fte, node);
fs_get_obj(fg, fte->node.parent);
- fs_get_obj(ft, fg->node.parent);
- trace_mlx5_fs_del_fte(fte);
- dev = get_dev(&ft->node);
- err = mlx5_cmd_delete_fte(dev, ft,
- fte->index);
- if (err)
- mlx5_core_warn(dev,
- "flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
-
- destroy_fte(fte, fg);
+ err = rhashtable_remove_fast(&fg->ftes_hash,
+ &fte->hash,
+ rhash_fte);
+ WARN_ON(err);
+ ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
+ kmem_cache_free(steering->ftes_cache, fte);
}
-static void del_flow_group(struct fs_node *node)
+static void del_hw_flow_group(struct fs_node *node)
{
struct mlx5_flow_group *fg;
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
- int err;
fs_get_obj(fg, node);
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
trace_mlx5_fs_del_fg(fg);
- if (ft->autogroup.active)
- ft->autogroup.num_groups--;
+ if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
+ mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
+ fg->id, ft->id);
+}
+
+static void del_sw_flow_group(struct fs_node *node)
+{
+ struct mlx5_flow_steering *steering = get_steering(node);
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ fs_get_obj(fg, node);
+ fs_get_obj(ft, fg->node.parent);
rhashtable_destroy(&fg->ftes_hash);
+ ida_destroy(&fg->fte_allocator);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
rhash_fg);
WARN_ON(err);
- if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
- mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
- fg->id, ft->id);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
+{
+ int index;
+ int ret;
+
+ index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
+ if (index < 0)
+ return index;
+
+ fte->index = index + fg->start_index;
+ ret = rhashtable_insert_fast(&fg->ftes_hash,
+ &fte->hash,
+ rhash_fte);
+ if (ret)
+ goto err_ida_remove;
+
+ tree_add_node(&fte->node, &fg->node);
+ list_add_tail(&fte->node.list, &fg->node.children);
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&fg->fte_allocator, index);
+ return ret;
}
-static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
+static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
u32 *match_value,
- unsigned int index)
+ struct mlx5_flow_act *flow_act)
{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct fs_fte *fte;
- fte = kzalloc(sizeof(*fte), GFP_KERNEL);
+ fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
if (!fte)
return ERR_PTR(-ENOMEM);
memcpy(fte->val, match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->flow_tag = flow_act->flow_tag;
- fte->index = index;
fte->action = flow_act->action;
fte->encap_id = flow_act->encap_id;
fte->modify_id = flow_act->modify_id;
+ tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
+
return fte;
}
-static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
+static void dealloc_flow_group(struct mlx5_flow_steering *steering,
+ struct mlx5_flow_group *fg)
+{
+ rhashtable_destroy(&fg->ftes_hash);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
+ u8 match_criteria_enable,
+ void *match_criteria,
+ int start_index,
+ int end_index)
{
struct mlx5_flow_group *fg;
- void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- create_fg_in, match_criteria);
- u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
- create_fg_in,
- match_criteria_enable);
int ret;
- fg = kzalloc(sizeof(*fg), GFP_KERNEL);
+ fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
if (!fg)
return ERR_PTR(-ENOMEM);
ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
if (ret) {
- kfree(fg);
+ kmem_cache_free(steering->fgs_cache, fg);
return ERR_PTR(ret);
- }
+}
+ ida_init(&fg->fte_allocator);
fg->mask.match_criteria_enable = match_criteria_enable;
memcpy(&fg->mask.match_criteria, match_criteria,
sizeof(fg->mask.match_criteria));
fg->node.type = FS_TYPE_FLOW_GROUP;
- fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
- start_flow_index);
- fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
- end_flow_index) - fg->start_index + 1;
+ fg->start_index = start_index;
+ fg->max_ftes = end_index - start_index + 1;
+
+ return fg;
+}
+
+static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ void *match_criteria,
+ int start_index,
+ int end_index,
+ struct list_head *prev)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *fg;
+ int ret;
+
+ fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
+ start_index, end_index);
+ if (IS_ERR(fg))
+ return fg;
+
+ /* initialize refcnt, add to parent list */
+ ret = rhltable_insert(&ft->fgs_hash,
+ &fg->hash,
+ rhash_fg);
+ if (ret) {
+ dealloc_flow_group(steering, fg);
+ return ERR_PTR(ret);
+ }
+
+ tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
+ tree_add_node(&fg->node, &ft->node);
+ /* Add node to group list */
+ list_add(&fg->node.list, prev);
+ atomic_inc(&ft->node.version);
+
return fg;
}
@@ -575,7 +707,6 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->flags = flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
- ida_init(&ft->fte_allocator);
return ft;
}
@@ -693,8 +824,10 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
*prio)
{
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
+ struct mlx5_ft_underlay_qp *uqp;
int min_level = INT_MAX;
int err;
+ u32 qpn;
if (root->root_ft)
min_level = root->root_ft->level;
@@ -702,10 +835,24 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
if (ft->level >= min_level)
return 0;
- err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn);
+ if (list_empty(&root->underlay_qpns)) {
+ /* Don't set any QPN (zero) in case QPN list is empty */
+ qpn = 0;
+ err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, false);
+ } else {
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ qpn = uqp->qpn;
+ err = mlx5_cmd_update_root_ft(root->dev, ft, qpn,
+ false);
+ if (err)
+ break;
+ }
+ }
+
if (err)
- mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
- ft->id);
+ mlx5_core_warn(root->dev,
+ "Update root flow table of id(%u) qpn(%d) failed\n",
+ ft->id, qpn);
else
root->root_ft = ft;
@@ -724,7 +871,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
fs_get_obj(fte, rule->node.parent);
if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
- lock_ref_node(&fte->node);
+ down_write_ref_node(&fte->node);
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
@@ -733,7 +880,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
ft, fg->id,
modify_mask,
fte);
- unlock_ref_node(&fte->node);
+ up_write_ref_node(&fte->node);
return err;
}
@@ -765,7 +912,7 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
struct mlx5_flow_table *new_next_ft,
struct mlx5_flow_table *old_next_ft)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_rule *iter;
int err = 0;
@@ -870,7 +1017,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
goto unlock_root;
}
- tree_init_node(&ft->node, 1, del_flow_table);
+ tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
@@ -882,17 +1029,17 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
err = connect_flow_table(root->dev, ft, fs_prio);
if (err)
goto destroy_ft;
- lock_ref_node(&fs_prio->node);
+ ft->node.active = true;
+ down_write_ref_node(&fs_prio->node);
tree_add_node(&ft->node, &fs_prio->node);
list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++;
- unlock_ref_node(&fs_prio->node);
+ up_write_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock);
return ft;
destroy_ft:
mlx5_cmd_destroy_flow_table(root->dev, ft);
free_ft:
- ida_destroy(&ft->fte_allocator);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
@@ -960,54 +1107,6 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
}
EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
-/* Flow table should be locked */
-static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
- u32 *fg_in,
- struct list_head
- *prev_fg,
- bool is_auto_fg)
-{
- struct mlx5_flow_group *fg;
- struct mlx5_core_dev *dev = get_dev(&ft->node);
- int err;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- fg = alloc_flow_group(fg_in);
- if (IS_ERR(fg))
- return fg;
-
- err = rhltable_insert(&ft->fgs_hash, &fg->hash, rhash_fg);
- if (err)
- goto err_free_fg;
-
- err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
- if (err)
- goto err_remove_fg;
-
- if (ft->autogroup.active)
- ft->autogroup.num_groups++;
- /* Add node to tree */
- tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
- tree_add_node(&fg->node, &ft->node);
- /* Add node to group list */
- list_add(&fg->node.list, prev_fg);
-
- trace_mlx5_fs_add_fg(fg);
- return fg;
-
-err_remove_fg:
- WARN_ON(rhltable_remove(&ft->fgs_hash,
- &fg->hash,
- rhash_fg));
-err_free_fg:
- rhashtable_destroy(&fg->ftes_hash);
- kfree(fg);
-
- return ERR_PTR(err);
-}
-
struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
u32 *fg_in)
{
@@ -1016,7 +1115,13 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
fg_in,
match_criteria_enable);
+ int start_index = MLX5_GET(create_flow_group_in, fg_in,
+ start_flow_index);
+ int end_index = MLX5_GET(create_flow_group_in, fg_in,
+ end_flow_index);
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
struct mlx5_flow_group *fg;
+ int err;
if (!check_valid_mask(match_criteria_enable, match_criteria))
return ERR_PTR(-EINVAL);
@@ -1024,9 +1129,21 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (ft->autogroup.active)
return ERR_PTR(-EPERM);
- lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
- unlock_ref_node(&ft->node);
+ down_write_ref_node(&ft->node);
+ fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
+ start_index, end_index,
+ ft->node.children.prev);
+ up_write_ref_node(&ft->node);
+ if (IS_ERR(fg))
+ return fg;
+
+ err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
+ if (err) {
+ tree_put_node(&fg->node);
+ return ERR_PTR(err);
+ }
+ trace_mlx5_fs_add_fg(fg);
+ fg->node.active = true;
return fg;
}
@@ -1067,7 +1184,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
int i)
{
for (; --i >= 0;) {
- if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
+ if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
fte->dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
@@ -1098,7 +1215,7 @@ create_flow_handle(struct fs_fte *fte,
if (dest) {
rule = find_flow_rule(fte, dest + i);
if (rule) {
- atomic_inc(&rule->node.refcount);
+ refcount_inc(&rule->node.refcount);
goto rule_found;
}
}
@@ -1111,7 +1228,7 @@ create_flow_handle(struct fs_fte *fte,
/* Add dest to dests list- we need flow tables to be in the
* end of the list for forward to next prio rules.
*/
- tree_init_node(&rule->node, 1, del_rule);
+ tree_init_node(&rule->node, NULL, del_sw_hw_rule);
if (dest &&
dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
list_add(&rule->node.list, &fte->node.children);
@@ -1167,7 +1284,9 @@ add_rule_fte(struct fs_fte *fte,
if (err)
goto free_handle;
+ fte->node.active = true;
fte->status |= FS_FTE_STATUS_EXISTING;
+ atomic_inc(&fte->node.version);
out:
return handle;
@@ -1177,59 +1296,17 @@ free_handle:
return ERR_PTR(err);
}
-static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
- u32 *match_value,
- struct mlx5_flow_act *flow_act)
-{
- struct mlx5_flow_table *ft;
- struct fs_fte *fte;
- int index;
- int ret;
-
- fs_get_obj(ft, fg->node.parent);
- index = ida_simple_get(&ft->fte_allocator, fg->start_index,
- fg->start_index + fg->max_ftes,
- GFP_KERNEL);
- if (index < 0)
- return ERR_PTR(index);
-
- fte = alloc_fte(flow_act, match_value, index);
- if (IS_ERR(fte)) {
- ret = PTR_ERR(fte);
- goto err_alloc;
- }
- ret = rhashtable_insert_fast(&fg->ftes_hash, &fte->hash, rhash_fte);
- if (ret)
- goto err_hash;
-
- return fte;
-
-err_hash:
- kfree(fte);
-err_alloc:
- ida_simple_remove(&ft->fte_allocator, index);
- return ERR_PTR(ret);
-}
-
-static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria)
+static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec)
{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct list_head *prev = &ft->node.children;
- unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
- void *match_criteria_addr;
+ unsigned int candidate_index = 0;
unsigned int group_size = 0;
- u32 *in;
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return ERR_PTR(-ENOMEM);
-
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
/* We save place for flow groups in addition to max types */
group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
@@ -1247,25 +1324,55 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
prev = &fg->node.list;
}
- if (candidate_index + group_size > ft->max_fte) {
- fg = ERR_PTR(-ENOSPC);
+ if (candidate_index + group_size > ft->max_fte)
+ return ERR_PTR(-ENOSPC);
+
+ fg = alloc_insert_flow_group(ft,
+ spec->match_criteria_enable,
+ spec->match_criteria,
+ candidate_index,
+ candidate_index + group_size - 1,
+ prev);
+ if (IS_ERR(fg))
goto out;
- }
+
+ ft->autogroup.num_groups++;
+
+out:
+ return fg;
+}
+
+static int create_auto_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *match_criteria_addr;
+ int err;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
MLX5_SET(create_flow_group_in, in, match_criteria_enable,
- match_criteria_enable);
- MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
- MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
- group_size - 1);
+ fg->mask.match_criteria_enable);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
+ fg->max_ftes - 1);
match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
in, match_criteria);
- memcpy(match_criteria_addr, match_criteria,
- MLX5_ST_SZ_BYTES(fte_match_param));
+ memcpy(match_criteria_addr, fg->mask.match_criteria,
+ sizeof(fg->mask.match_criteria));
+
+ err = mlx5_cmd_create_flow_group(dev, ft, in, &fg->id);
+ if (!err) {
+ fg->node.active = true;
+ trace_mlx5_fs_add_fg(fg);
+ }
- fg = create_flow_group_common(ft, in, prev, true);
-out:
kvfree(in);
- return fg;
+ return err;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
@@ -1340,60 +1447,30 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
struct mlx5_flow_handle *handle;
- struct mlx5_flow_table *ft;
+ int old_action;
int i;
+ int ret;
- if (fte) {
- int old_action;
- int ret;
-
- nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
- ret = check_conflicting_ftes(fte, flow_act);
- if (ret) {
- handle = ERR_PTR(ret);
- goto unlock_fte;
- }
-
- old_action = fte->action;
- fte->action |= flow_act->action;
- handle = add_rule_fte(fte, fg, dest, dest_num,
- old_action != flow_act->action);
- if (IS_ERR(handle)) {
- fte->action = old_action;
- goto unlock_fte;
- } else {
- trace_mlx5_fs_set_fte(fte, false);
- goto add_rules;
- }
- }
- fs_get_obj(ft, fg->node.parent);
+ ret = check_conflicting_ftes(fte, flow_act);
+ if (ret)
+ return ERR_PTR(ret);
- fte = create_fte(fg, match_value, flow_act);
- if (IS_ERR(fte))
- return (void *)fte;
- tree_init_node(&fte->node, 0, del_fte);
- nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
- handle = add_rule_fte(fte, fg, dest, dest_num, false);
+ old_action = fte->action;
+ fte->action |= flow_act->action;
+ handle = add_rule_fte(fte, fg, dest, dest_num,
+ old_action != flow_act->action);
if (IS_ERR(handle)) {
- unlock_ref_node(&fte->node);
- destroy_fte(fte, fg);
- kfree(fte);
+ fte->action = old_action;
return handle;
}
+ trace_mlx5_fs_set_fte(fte, false);
- tree_add_node(&fte->node, &fg->node);
- /* fte list isn't sorted */
- list_add_tail(&fte->node.list, &fg->node.children);
- trace_mlx5_fs_set_fte(fte, true);
-add_rules:
for (i = 0; i < handle->num_rules; i++) {
- if (atomic_read(&handle->rule[i]->node.refcount) == 1) {
+ if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
}
-unlock_fte:
- unlock_ref_node(&fte->node);
return handle;
}
@@ -1441,93 +1518,197 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
return true;
}
-static struct mlx5_flow_handle *
-try_add_to_existing_fg(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest,
- int dest_num)
-{
+struct match_list {
+ struct list_head list;
struct mlx5_flow_group *g;
- struct mlx5_flow_handle *rule = ERR_PTR(-ENOENT);
+};
+
+struct match_list_head {
+ struct list_head list;
+ struct match_list first;
+};
+
+static void free_match_list(struct match_list_head *head)
+{
+ if (!list_empty(&head->list)) {
+ struct match_list *iter, *match_tmp;
+
+ list_del(&head->first.list);
+ tree_put_node(&head->first.g->node);
+ list_for_each_entry_safe(iter, match_tmp, &head->list,
+ list) {
+ tree_put_node(&iter->g->node);
+ list_del(&iter->list);
+ kfree(iter);
+ }
+ }
+}
+
+static int build_match_list(struct match_list_head *match_head,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec)
+{
struct rhlist_head *tmp, *list;
- struct match_list {
- struct list_head list;
- struct mlx5_flow_group *g;
- } match_list, *iter;
- LIST_HEAD(match_head);
+ struct mlx5_flow_group *g;
+ int err = 0;
rcu_read_lock();
+ INIT_LIST_HEAD(&match_head->list);
/* Collect all fgs which has a matching match_criteria */
list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
+ /* RCU is atomic, we can't execute FW commands here */
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
- if (likely(list_empty(&match_head))) {
- match_list.g = g;
- list_add_tail(&match_list.list, &match_head);
+ if (likely(list_empty(&match_head->list))) {
+ if (!tree_get_node(&g->node))
+ continue;
+ match_head->first.g = g;
+ list_add_tail(&match_head->first.list,
+ &match_head->list);
continue;
}
- curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
+ curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
- rcu_read_unlock();
- rule = ERR_PTR(-ENOMEM);
- goto free_list;
+ free_match_list(match_head);
+ err = -ENOMEM;
+ goto out;
+ }
+ if (!tree_get_node(&g->node)) {
+ kfree(curr_match);
+ continue;
}
curr_match->g = g;
- list_add_tail(&curr_match->list, &match_head);
+ list_add_tail(&curr_match->list, &match_head->list);
}
+out:
rcu_read_unlock();
+ return err;
+}
+
+static u64 matched_fgs_get_version(struct list_head *match_head)
+{
+ struct match_list *iter;
+ u64 version = 0;
+
+ list_for_each_entry(iter, match_head, list)
+ version += (u64)atomic_read(&iter->g->node.version);
+ return version;
+}
+static struct mlx5_flow_handle *
+try_add_to_existing_fg(struct mlx5_flow_table *ft,
+ struct list_head *match_head,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ int ft_version)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_handle *rule;
+ struct match_list *iter;
+ bool take_write = false;
+ struct fs_fte *fte;
+ u64 version;
+ int err;
+
+ fte = alloc_fte(ft, spec->match_value, flow_act);
+ if (IS_ERR(fte))
+ return ERR_PTR(-ENOMEM);
+
+ list_for_each_entry(iter, match_head, list) {
+ nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
+ ida_pre_get(&iter->g->fte_allocator, GFP_KERNEL);
+ }
+
+search_again_locked:
+ version = matched_fgs_get_version(match_head);
/* Try to find a fg that already contains a matching fte */
- list_for_each_entry(iter, &match_head, list) {
- struct fs_fte *fte;
+ list_for_each_entry(iter, match_head, list) {
+ struct fs_fte *fte_tmp;
g = iter->g;
- nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
- fte = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
- rhash_fte);
- if (fte) {
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, fte);
- unlock_ref_node(&g->node);
- goto free_list;
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
+ rhash_fte);
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+ continue;
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+ if (!take_write) {
+ list_for_each_entry(iter, match_head, list)
+ up_read_ref_node(&iter->g->node);
+ } else {
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
}
- unlock_ref_node(&g->node);
+
+ rule = add_rule_fg(g, spec->match_value,
+ flow_act, dest, dest_num, fte_tmp);
+ up_write_ref_node(&fte_tmp->node);
+ tree_put_node(&fte_tmp->node);
+ kmem_cache_free(steering->ftes_cache, fte);
+ return rule;
}
/* No group with matching fte found. Try to add a new fte to any
* matching fg.
*/
- list_for_each_entry(iter, &match_head, list) {
- g = iter->g;
- nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, NULL);
- if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) {
- unlock_ref_node(&g->node);
- goto free_list;
- }
- unlock_ref_node(&g->node);
+ if (!take_write) {
+ list_for_each_entry(iter, match_head, list)
+ up_read_ref_node(&iter->g->node);
+ list_for_each_entry(iter, match_head, list)
+ nested_down_write_ref_node(&iter->g->node,
+ FS_LOCK_PARENT);
+ take_write = true;
}
-free_list:
- if (!list_empty(&match_head)) {
- struct match_list *match_tmp;
+ /* Check the ft version, for case that new flow group
+ * was added while the fgs weren't locked
+ */
+ if (atomic_read(&ft->node.version) != ft_version) {
+ rule = ERR_PTR(-EAGAIN);
+ goto out;
+ }
- /* The most common case is having one FG. Since we want to
- * optimize this case, we save the first on the stack.
- * Therefore, no need to free it.
- */
- list_del(&list_first_entry(&match_head, typeof(*iter), list)->list);
- list_for_each_entry_safe(iter, match_tmp, &match_head, list) {
- list_del(&iter->list);
- kfree(iter);
+ /* Check the fgs version, for case the new FTE with the
+ * same values was added while the fgs weren't locked
+ */
+ if (version != matched_fgs_get_version(match_head))
+ goto search_again_locked;
+
+ list_for_each_entry(iter, match_head, list) {
+ g = iter->g;
+
+ if (!g->node.active)
+ continue;
+ err = insert_fte(g, fte);
+ if (err) {
+ if (err == -ENOSPC)
+ continue;
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ kmem_cache_free(steering->ftes_cache, fte);
+ return ERR_PTR(err);
}
- }
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ rule = add_rule_fg(g, spec->match_value,
+ flow_act, dest, dest_num, fte);
+ up_write_ref_node(&fte->node);
+ tree_put_node(&fte->node);
+ return rule;
+ }
+ rule = ERR_PTR(-ENOENT);
+out:
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
@@ -1539,8 +1720,14 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int dest_num)
{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
+ struct match_list_head match_head;
+ bool take_write = false;
+ struct fs_fte *fte;
+ int version;
+ int err;
int i;
if (!check_valid_spec(spec))
@@ -1550,33 +1737,73 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (!dest_is_valid(&dest[i], flow_act->action, ft))
return ERR_PTR(-EINVAL);
}
+ nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+search_again_locked:
+ version = atomic_read(&ft->node.version);
+
+ /* Collect all fgs which has a matching match_criteria */
+ err = build_match_list(&match_head, ft, spec);
+ if (err)
+ return ERR_PTR(err);
- nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
- rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num);
- if (!IS_ERR(rule))
- goto unlock;
+ if (!take_write)
+ up_read_ref_node(&ft->node);
- g = create_autogroup(ft, spec->match_criteria_enable,
- spec->match_criteria);
+ rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
+ dest_num, version);
+ free_match_list(&match_head);
+ if (!IS_ERR(rule) ||
+ (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
+ return rule;
+
+ if (!take_write) {
+ nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+ take_write = true;
+ }
+
+ if (PTR_ERR(rule) == -EAGAIN ||
+ version != atomic_read(&ft->node.version))
+ goto search_again_locked;
+
+ g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) {
rule = (void *)g;
- goto unlock;
+ up_write_ref_node(&ft->node);
+ return rule;
}
- rule = add_rule_fg(g, spec->match_value, flow_act, dest,
- dest_num, NULL);
- if (IS_ERR(rule)) {
- /* Remove assumes refcount > 0 and autogroup creates a group
- * with a refcount = 0.
- */
- unlock_ref_node(&ft->node);
- tree_get_node(&g->node);
- tree_remove_node(&g->node);
- return rule;
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ up_write_ref_node(&ft->node);
+
+ err = create_auto_flow_group(ft, g);
+ if (err)
+ goto err_release_fg;
+
+ fte = alloc_fte(ft, spec->match_value, flow_act);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ goto err_release_fg;
}
-unlock:
- unlock_ref_node(&ft->node);
+
+ err = insert_fte(g, fte);
+ if (err) {
+ kmem_cache_free(steering->ftes_cache, fte);
+ goto err_release_fg;
+ }
+
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ up_write_ref_node(&g->node);
+ rule = add_rule_fg(g, spec->match_value, flow_act, dest,
+ dest_num, fte);
+ up_write_ref_node(&fte->node);
+ tree_put_node(&fte->node);
+ tree_put_node(&g->node);
return rule;
+
+err_release_fg:
+ up_write_ref_node(&g->node);
+ tree_put_node(&g->node);
+ return ERR_PTR(err);
}
static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
@@ -1593,7 +1820,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int dest_num)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
- struct mlx5_flow_destination gen_dest;
+ struct mlx5_flow_destination gen_dest = {};
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = flow_act->action;
@@ -1661,23 +1888,43 @@ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
static int update_root_ft_destroy(struct mlx5_flow_table *ft)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ struct mlx5_ft_underlay_qp *uqp;
struct mlx5_flow_table *new_root_ft = NULL;
+ int err = 0;
+ u32 qpn;
if (root->root_ft != ft)
return 0;
new_root_ft = find_next_ft(ft);
- if (new_root_ft) {
- int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
- root->underlay_qpn);
- if (err) {
- mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
- ft->id);
- return err;
+ if (!new_root_ft) {
+ root->root_ft = NULL;
+ return 0;
+ }
+
+ if (list_empty(&root->underlay_qpns)) {
+ /* Don't set any QPN (zero) in case QPN list is empty */
+ qpn = 0;
+ err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, qpn,
+ false);
+ } else {
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ qpn = uqp->qpn;
+ err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
+ qpn, false);
+ if (err)
+ break;
}
}
- root->root_ft = new_root_ft;
+
+ if (err)
+ mlx5_core_warn(root->dev,
+ "Update root flow table of id(%u) qpn(%d) failed\n",
+ ft->id, qpn);
+ else
+ root->root_ft = new_root_ft;
+
return 0;
}
@@ -1817,7 +2064,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
return ERR_PTR(-ENOMEM);
fs_prio->node.type = FS_TYPE_PRIO;
- tree_init_node(&fs_prio->node, 1, NULL);
+ tree_init_node(&fs_prio->node, NULL, NULL);
tree_add_node(&fs_prio->node, &ns->node);
fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
@@ -1843,7 +2090,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
- tree_init_node(&ns->node, 1, NULL);
+ tree_init_node(&ns->node, NULL, NULL);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);
@@ -1965,10 +2212,12 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering
root_ns->dev = steering->dev;
root_ns->table_type = table_type;
+ INIT_LIST_HEAD(&root_ns->underlay_qpns);
+
ns = &root_ns->ns;
fs_init_namespace(ns);
mutex_init(&root_ns->chain_lock);
- tree_init_node(&ns->node, 1, NULL);
+ tree_init_node(&ns->node, NULL, NULL);
tree_add_node(&ns->node, NULL);
return root_ns;
@@ -2066,8 +2315,10 @@ static void clean_tree(struct fs_node *node)
struct fs_node *iter;
struct fs_node *temp;
+ tree_get_node(node);
list_for_each_entry_safe(iter, temp, &node->children, list)
clean_tree(iter);
+ tree_put_node(node);
tree_remove_node(node);
}
}
@@ -2091,6 +2342,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
mlx5_cleanup_fc_stats(dev);
+ kmem_cache_destroy(steering->ftes_cache);
+ kmem_cache_destroy(steering->fgs_cache);
kfree(steering);
}
@@ -2196,6 +2449,16 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev;
dev->priv.steering = steering;
+ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ sizeof(struct mlx5_flow_group), 0,
+ 0, NULL);
+ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ 0, NULL);
+ if (!steering->ftes_cache || !steering->fgs_cache) {
+ err = -ENOMEM;
+ goto err;
+ }
+
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
@@ -2245,17 +2508,76 @@ err:
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
{
struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
+ struct mlx5_ft_underlay_qp *new_uqp;
+ int err = 0;
+
+ new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
+ if (!new_uqp)
+ return -ENOMEM;
+
+ mutex_lock(&root->chain_lock);
+
+ if (!root->root_ft) {
+ err = -EINVAL;
+ goto update_ft_fail;
+ }
+
+ err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, false);
+ if (err) {
+ mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
+ underlay_qpn, err);
+ goto update_ft_fail;
+ }
+
+ new_uqp->qpn = underlay_qpn;
+ list_add_tail(&new_uqp->list, &root->underlay_qpns);
+
+ mutex_unlock(&root->chain_lock);
- root->underlay_qpn = underlay_qpn;
return 0;
+
+update_ft_fail:
+ mutex_unlock(&root->chain_lock);
+ kfree(new_uqp);
+ return err;
}
EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
{
struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
+ struct mlx5_ft_underlay_qp *uqp;
+ bool found = false;
+ int err = 0;
+
+ mutex_lock(&root->chain_lock);
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ if (uqp->qpn == underlay_qpn) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
+ underlay_qpn);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, true);
+ if (err)
+ mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
+ underlay_qpn, err);
+
+ list_del(&uqp->list);
+ mutex_unlock(&root->chain_lock);
+ kfree(uqp);
- root->underlay_qpn = 0;
return 0;
+
+out:
+ mutex_unlock(&root->chain_lock);
+ return err;
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 48dd78975062..397d24a621a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -33,6 +33,7 @@
#ifndef _MLX5_FS_CORE_
#define _MLX5_FS_CORE_
+#include <linux/refcount.h>
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
@@ -66,6 +67,8 @@ enum fs_fte_status {
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
+ struct kmem_cache *fgs_cache;
+ struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
@@ -81,9 +84,12 @@ struct fs_node {
struct fs_node *parent;
struct fs_node *root;
/* lock the node for writing and traversing */
- struct mutex lock;
- atomic_t refcount;
- void (*remove_func)(struct fs_node *);
+ struct rw_semaphore lock;
+ refcount_t refcount;
+ bool active;
+ void (*del_hw_func)(struct fs_node *);
+ void (*del_sw_func)(struct fs_node *);
+ atomic_t version;
};
struct mlx5_flow_rule {
@@ -120,7 +126,6 @@ struct mlx5_flow_table {
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
u32 flags;
- struct ida fte_allocator;
struct rhltable fgs_hash;
};
@@ -147,6 +152,11 @@ struct mlx5_fc {
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
+struct mlx5_ft_underlay_qp {
+ struct list_head list;
+ u32 qpn;
+};
+
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_600
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
@@ -200,6 +210,7 @@ struct mlx5_flow_group {
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
+ struct ida fte_allocator;
u32 id;
struct rhashtable ftes_hash;
struct rhlist_head hash;
@@ -212,7 +223,7 @@ struct mlx5_flow_root_namespace {
struct mlx5_flow_table *root_ft;
/* Should be held when chaining flow tables */
struct mutex chain_lock;
- u32 underlay_qpn;
+ struct list_head underlay_qpns;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 2c71557d1cee..5ef1b56b6a96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -106,6 +106,13 @@ static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
MLX5_MCAM_REGS_FIRST_128);
}
+static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
+{
+ return mlx5_query_qcam_reg(dev, dev->caps.qcam,
+ MLX5_QCAM_FEATURE_ENHANCED_FEATURES,
+ MLX5_QCAM_REGS_FIRST_128);
+}
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
@@ -182,6 +189,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, mcam_reg))
mlx5_get_mcam_reg(dev);
+ if (MLX5_CAP_GEN(dev, qcam_reg))
+ mlx5_get_qcam_reg(dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index db86e1506c8b..1a0e797ad001 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -285,9 +285,9 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
spin_unlock_irqrestore(&health->wq_lock, flags);
}
-static void poll_health(unsigned long data)
+static void poll_health(struct timer_list *t)
{
- struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
+ struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
struct mlx5_core_health *health = &dev->priv.health;
u32 count;
@@ -320,15 +320,13 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- init_timer(&health->timer);
+ timer_setup(&health->timer, poll_health, 0);
health->sick = 0;
clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
- health->timer.data = (unsigned long)dev;
- health->timer.function = poll_health;
health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
add_timer(&health->timer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 43c126c63955..6f338a9219c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -250,3 +250,8 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
+
+const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
+ .get_drvinfo = mlx5i_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 145e392ab849..d2a66dc4adc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -40,8 +40,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
-static int mlx5i_dev_init(struct net_device *dev);
-static void mlx5i_dev_cleanup(struct net_device *dev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
@@ -70,10 +68,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-static void mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+void mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
@@ -108,11 +106,69 @@ static void mlx5i_cleanup(struct mlx5e_priv *priv)
/* Do nothing .. */
}
+int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_core_qp *qp = &ipriv->qp;
+ struct mlx5_qp_context *context;
+ int ret;
+
+ /* QP states */
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ context->pri_path.port = 1;
+ context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
+ context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
+ goto err_qp_modify_to_err;
+ }
+ memset(context, 0, sizeof(*context));
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
+ goto err_qp_modify_to_err;
+ }
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
+ goto err_qp_modify_to_err;
+ }
+
+ kfree(context);
+ return 0;
+
+err_qp_modify_to_err:
+ mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
+ kfree(context);
+ return ret;
+}
+
+void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_qp_context context;
+ int err;
+
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
+ &ipriv->qp);
+ if (err)
+ mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
+}
+
#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
-static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{
- struct mlx5_qp_context *context = NULL;
u32 *in = NULL;
void *addr_path;
int ret = 0;
@@ -140,43 +196,12 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core
goto out;
}
- /* QP states */
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- ret = -ENOMEM;
- goto out;
- }
-
- context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
- context->pri_path.port = 1;
- context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
- goto out;
- }
- memset(context, 0, sizeof(*context));
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
- goto out;
- }
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
- goto out;
- }
-
out:
- kfree(context);
kvfree(in);
return ret;
}
-static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{
mlx5_core_destroy_qp(mdev, qp);
}
@@ -195,10 +220,14 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
- return err;
+ goto err_destroy_underlay_qp;
}
return 0;
+
+err_destroy_underlay_qp:
+ mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ return err;
}
static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
@@ -226,15 +255,24 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
+ err = mlx5e_create_inner_ttc_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
+ err);
+ goto err_destroy_arfs_tables;
+ }
+
err = mlx5e_create_ttc_table(priv);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
- goto err_destroy_arfs_tables;
+ goto err_destroy_inner_ttc_table;
}
return 0;
+err_destroy_inner_ttc_table:
+ mlx5e_destroy_inner_ttc_table(priv);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -244,12 +282,12 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
- struct mlx5i_priv *ipriv = priv->ppriv;
int err;
err = mlx5e_create_indirect_rqt(priv);
@@ -268,18 +306,12 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_indirect_tirs;
- err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
- if (err)
- goto err_destroy_direct_tirs;
-
err = mlx5i_create_flow_steering(priv);
if (err)
- goto err_remove_rx_underlay_qpn;
+ goto err_destroy_direct_tirs;
return 0;
-err_remove_rx_underlay_qpn:
- mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
@@ -293,9 +325,6 @@ err_destroy_indirect_rqts:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
- struct mlx5i_priv *ipriv = priv->ppriv;
-
- mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
@@ -351,7 +380,7 @@ out:
return err;
}
-static int mlx5i_dev_init(struct net_device *dev)
+int mlx5i_dev_init(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
struct mlx5i_priv *ipriv = priv->ppriv;
@@ -361,6 +390,9 @@ static int mlx5i_dev_init(struct net_device *dev)
dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
+ /* Add QPN to net-device mapping to HT */
+ mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
+
return 0;
}
@@ -378,63 +410,84 @@ static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-static void mlx5i_dev_cleanup(struct net_device *dev)
+void mlx5i_dev_cleanup(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5i_priv *ipriv = priv->ppriv;
- struct mlx5_qp_context context;
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5i_uninit_underlay_qp(priv);
- /* detach qp from flow-steering by reset it */
- mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, &ipriv->qp);
+ /* Delete QPN to net-device mapping from HT */
+ mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
}
static int mlx5i_open(struct net_device *netdev)
{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5_core_dev *mdev = epriv->mdev;
int err;
- mutex_lock(&priv->state_lock);
+ mutex_lock(&epriv->state_lock);
- set_bit(MLX5E_STATE_OPENED, &priv->state);
+ set_bit(MLX5E_STATE_OPENED, &epriv->state);
- err = mlx5e_open_channels(priv, &priv->channels);
- if (err)
+ err = mlx5i_init_underlay_qp(epriv);
+ if (err) {
+ mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err);
goto err_clear_state_opened_flag;
+ }
+
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ if (err) {
+ mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
+ goto err_reset_qp;
+ }
- mlx5e_refresh_tirs(priv, false);
- mlx5e_activate_priv_channels(priv);
- mlx5e_timestamp_init(priv);
+ err = mlx5e_open_channels(epriv, &epriv->channels);
+ if (err)
+ goto err_remove_fs_underlay_qp;
- mutex_unlock(&priv->state_lock);
+ mlx5e_refresh_tirs(epriv, false);
+ mlx5e_activate_priv_channels(epriv);
+ mlx5e_timestamp_set(epriv);
+
+ mutex_unlock(&epriv->state_lock);
return 0;
+err_remove_fs_underlay_qp:
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+err_reset_qp:
+ mlx5i_uninit_underlay_qp(epriv);
err_clear_state_opened_flag:
- clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mutex_unlock(&priv->state_lock);
+ clear_bit(MLX5E_STATE_OPENED, &epriv->state);
+ mutex_unlock(&epriv->state_lock);
return err;
}
static int mlx5i_close(struct net_device *netdev)
{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5_core_dev *mdev = epriv->mdev;
/* May already be CLOSED in case a previous configuration operation
* (e.g RX/TX queue size change) that involves close&open failed.
*/
- mutex_lock(&priv->state_lock);
+ mutex_lock(&epriv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_OPENED, &epriv->state))
goto unlock;
- clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ clear_bit(MLX5E_STATE_OPENED, &epriv->state);
- mlx5e_timestamp_cleanup(priv);
- netif_carrier_off(priv->netdev);
- mlx5e_deactivate_priv_channels(priv);
- mlx5e_close_channels(&priv->channels);
+ netif_carrier_off(epriv->netdev);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5i_uninit_underlay_qp(epriv);
+ mlx5e_deactivate_priv_channels(epriv);
+ mlx5e_close_channels(&epriv->channels);;
unlock:
- mutex_unlock(&priv->state_lock);
+ mutex_unlock(&epriv->state_lock);
return 0;
}
@@ -492,6 +545,13 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey);
}
+static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+
+ ipriv->pkey_index = (u16)id;
+}
+
static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
@@ -510,12 +570,13 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
const char *name,
void (*setup)(struct net_device *))
{
- const struct mlx5e_profile *profile = &mlx5i_nic_profile;
- int nch = profile->max_nch(mdev);
+ const struct mlx5e_profile *profile;
struct net_device *netdev;
struct mlx5i_priv *ipriv;
struct mlx5e_priv *epriv;
struct rdma_netdev *rn;
+ bool sub_interface;
+ int nch;
int err;
if (mlx5i_check_required_hca_cap(mdev)) {
@@ -523,10 +584,15 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
return ERR_PTR(-EOPNOTSUPP);
}
- /* This function should only be called once per mdev */
- err = mlx5e_create_mdev_resources(mdev);
- if (err)
- return NULL;
+ /* TODO: Need to find a better way to check if child device*/
+ sub_interface = (mdev->mlx5e_res.pdn != 0);
+
+ if (sub_interface)
+ profile = mlx5i_pkey_get_profile();
+ else
+ profile = &mlx5i_nic_profile;
+
+ nch = profile->max_nch(mdev);
netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
name, NET_NAME_UNKNOWN,
@@ -535,7 +601,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
nch);
if (!netdev) {
mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
- goto free_mdev_resources;
+ return NULL;
}
ipriv = netdev_priv(netdev);
@@ -545,6 +611,20 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
if (!epriv->wq)
goto err_free_netdev;
+ ipriv->sub_interface = sub_interface;
+ if (!ipriv->sub_interface) {
+ err = mlx5i_pkey_qpn_ht_init(netdev);
+ if (err) {
+ mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
+ goto destroy_wq;
+ }
+
+ /* This should only be called once per mdev */
+ err = mlx5e_create_mdev_resources(mdev);
+ if (err)
+ goto destroy_ht;
+ }
+
profile->init(mdev, netdev, profile, ipriv);
mlx5e_attach_netdev(epriv);
@@ -556,13 +636,16 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
rn->send = mlx5i_xmit;
rn->attach_mcast = mlx5i_attach_mcast;
rn->detach_mcast = mlx5i_detach_mcast;
+ rn->set_id = mlx5i_set_pkey_index;
return netdev;
+destroy_ht:
+ mlx5i_pkey_qpn_ht_cleanup(netdev);
+destroy_wq:
+ destroy_workqueue(epriv->wq);
err_free_netdev:
free_netdev(netdev);
-free_mdev_resources:
- mlx5e_destroy_mdev_resources(mdev);
return NULL;
}
@@ -570,15 +653,18 @@ EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
void mlx5_rdma_netdev_free(struct net_device *netdev)
{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
const struct mlx5e_profile *profile = priv->profile;
- struct mlx5_core_dev *mdev = priv->mdev;
mlx5e_detach_netdev(priv);
profile->cleanup(priv);
destroy_workqueue(priv->wq);
- free_netdev(netdev);
- mlx5e_destroy_mdev_resources(mdev);
+ if (!ipriv->sub_interface) {
+ mlx5i_pkey_qpn_ht_cleanup(netdev);
+ mlx5e_destroy_mdev_resources(priv->mdev);
+ }
+ free_netdev(netdev);
}
EXPORT_SYMBOL(mlx5_rdma_netdev_free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index a0f405f520f7..49008022c306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -39,6 +39,7 @@
#define MLX5I_MAX_NUM_TC 1
extern const struct ethtool_ops mlx5i_ethtool_ops;
+extern const struct ethtool_ops mlx5i_pkey_ethtool_ops;
#define MLX5_IB_GRH_BYTES 40
#define MLX5_IPOIB_ENCAP_LEN 4
@@ -49,10 +50,45 @@ extern const struct ethtool_ops mlx5i_ethtool_ops;
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
struct mlx5_core_qp qp;
+ bool sub_interface;
u32 qkey;
+ u16 pkey_index;
+ struct mlx5i_pkey_qpn_ht *qpn_htbl;
char *mlx5e_priv[0];
};
+/* Underlay QP create/destroy functions */
+int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
+
+/* Underlay QP state modification init/uninit functions */
+int mlx5i_init_underlay_qp(struct mlx5e_priv *priv);
+void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv);
+
+/* Allocate/Free underlay QPN to net-device hash table */
+int mlx5i_pkey_qpn_ht_init(struct net_device *netdev);
+void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev);
+
+/* Add/Remove an underlay QPN to net-device mapping to/from the hash table */
+int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn);
+int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
+
+/* Get the net-device corresponding to the given underlay QPN */
+struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
+
+/* Shared ndo functionts */
+int mlx5i_dev_init(struct net_device *dev);
+void mlx5i_dev_cleanup(struct net_device *dev);
+
+/* Parent profile functions */
+void mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
+
+/* Get child interface nic profile */
+const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
+
/* Extract mlx5e_priv from IPoIB netdev */
#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
new file mode 100644
index 000000000000..531b02cc979b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/hash.h>
+#include "ipoib.h"
+
+#define MLX5I_MAX_LOG_PKEY_SUP 7
+
+struct qpn_to_netdev {
+ struct net_device *netdev;
+ struct hlist_node hlist;
+ u32 underlay_qpn;
+};
+
+struct mlx5i_pkey_qpn_ht {
+ struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP];
+ spinlock_t ht_lock; /* Synchronise with NAPI */
+};
+
+int mlx5i_pkey_qpn_ht_init(struct net_device *netdev)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+ struct mlx5i_pkey_qpn_ht *qpn_htbl;
+
+ qpn_htbl = kzalloc(sizeof(*qpn_htbl), GFP_KERNEL);
+ if (!qpn_htbl)
+ return -ENOMEM;
+
+ ipriv->qpn_htbl = qpn_htbl;
+ spin_lock_init(&qpn_htbl->ht_lock);
+
+ return 0;
+}
+
+void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+
+ kfree(ipriv->qpn_htbl);
+}
+
+static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets,
+ u32 qpn)
+{
+ struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)];
+ struct qpn_to_netdev *node;
+
+ hlist_for_each_entry(node, h, hlist) {
+ if (node->underlay_qpn == qpn)
+ return node;
+ }
+
+ return NULL;
+}
+
+int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+ struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl;
+ u8 key = hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP);
+ struct qpn_to_netdev *new_node;
+
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node)
+ return -ENOMEM;
+
+ new_node->netdev = netdev;
+ new_node->underlay_qpn = qpn;
+ spin_lock_bh(&ht->ht_lock);
+ hlist_add_head(&new_node->hlist, &ht->buckets[key]);
+ spin_unlock_bh(&ht->ht_lock);
+
+ return 0;
+}
+
+int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl;
+ struct qpn_to_netdev *node;
+
+ node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn);
+ if (!node) {
+ mlx5_core_warn(epriv->mdev, "QPN to netdev delete from HT failed\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&ht->ht_lock);
+ hlist_del_init(&node->hlist);
+ spin_unlock_bh(&ht->ht_lock);
+ kfree(node);
+
+ return 0;
+}
+
+struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+ struct qpn_to_netdev *node;
+
+ node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn);
+ if (!node)
+ return NULL;
+
+ return node->netdev;
+}
+
+static int mlx5i_pkey_open(struct net_device *netdev);
+static int mlx5i_pkey_close(struct net_device *netdev);
+static int mlx5i_pkey_dev_init(struct net_device *dev);
+static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
+static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+
+static const struct net_device_ops mlx5i_pkey_netdev_ops = {
+ .ndo_open = mlx5i_pkey_open,
+ .ndo_stop = mlx5i_pkey_close,
+ .ndo_init = mlx5i_pkey_dev_init,
+ .ndo_uninit = mlx5i_pkey_dev_cleanup,
+ .ndo_change_mtu = mlx5i_pkey_change_mtu,
+};
+
+/* Child NDOs */
+static int mlx5i_pkey_dev_init(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct mlx5i_priv *ipriv, *parent_ipriv;
+ struct net_device *parent_dev;
+ int parent_ifindex;
+
+ ipriv = priv->ppriv;
+
+ /* Get QPN to netdevice hash table from parent */
+ parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev);
+ parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex);
+ if (!parent_dev) {
+ mlx5_core_warn(priv->mdev, "failed to get parent device\n");
+ return -EINVAL;
+ }
+
+ parent_ipriv = netdev_priv(parent_dev);
+ ipriv->qpn_htbl = parent_ipriv->qpn_htbl;
+ dev_put(parent_dev);
+
+ return mlx5i_dev_init(dev);
+}
+
+static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
+{
+ return mlx5i_dev_cleanup(netdev);
+}
+
+static int mlx5i_pkey_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5_core_dev *mdev = epriv->mdev;
+ int err;
+
+ mutex_lock(&epriv->state_lock);
+
+ set_bit(MLX5E_STATE_OPENED, &epriv->state);
+
+ err = mlx5i_init_underlay_qp(epriv);
+ if (err) {
+ mlx5_core_warn(mdev, "prepare child underlay qp state failed, %d\n", err);
+ goto err_release_lock;
+ }
+
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ if (err) {
+ mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err);
+ goto err_unint_underlay_qp;
+ }
+
+ err = mlx5e_create_tis(mdev, 0 /* tc */, ipriv->qp.qpn, &epriv->tisn[0]);
+ if (err) {
+ mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
+ goto err_remove_rx_uderlay_qp;
+ }
+
+ err = mlx5e_open_channels(epriv, &epriv->channels);
+ if (err) {
+ mlx5_core_warn(mdev, "opening child channels failed, %d\n", err);
+ goto err_clear_state_opened_flag;
+ }
+ mlx5e_refresh_tirs(epriv, false);
+ mlx5e_activate_priv_channels(epriv);
+ mutex_unlock(&epriv->state_lock);
+
+ return 0;
+
+err_clear_state_opened_flag:
+ mlx5e_destroy_tis(mdev, epriv->tisn[0]);
+err_remove_rx_uderlay_qp:
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+err_unint_underlay_qp:
+ mlx5i_uninit_underlay_qp(epriv);
+err_release_lock:
+ clear_bit(MLX5E_STATE_OPENED, &epriv->state);
+ mutex_unlock(&epriv->state_lock);
+ return err;
+}
+
+static int mlx5i_pkey_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ netif_carrier_off(priv->netdev);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5i_uninit_underlay_qp(priv);
+ mlx5e_deactivate_priv_channels(priv);
+ mlx5e_close_channels(&priv->channels);
+ mlx5e_destroy_tis(mdev, priv->tisn[0]);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return 0;
+}
+
+static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ mutex_lock(&priv->state_lock);
+ netdev->mtu = new_mtu;
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+/* Called directly after IPoIB netdevice was created to initialize SW structs */
+static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ mlx5i_init(mdev, netdev, profile, ppriv);
+
+ /* Override parent ndo */
+ netdev->netdev_ops = &mlx5i_pkey_netdev_ops;
+
+ /* Set child limited ethtool support */
+ netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops;
+
+ /* Use dummy rqs */
+ priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+}
+
+/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
+static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv)
+{
+ /* Do nothing .. */
+}
+
+static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ int err;
+
+ err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create child underlay QP failed, %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5i_pkey_cleanup_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+}
+
+static int mlx5i_pkey_init_rx(struct mlx5e_priv *priv)
+{
+ /* Since the rx resources are shared between child and parent, the
+ * parent interface is taking care of rx resource allocation and init
+ */
+ return 0;
+}
+
+static void mlx5i_pkey_cleanup_rx(struct mlx5e_priv *priv)
+{
+ /* Since the rx resources are shared between child and parent, the
+ * parent interface is taking care of rx resource free and de-init
+ */
+}
+
+static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
+ .init = mlx5i_pkey_init,
+ .cleanup = mlx5i_pkey_cleanup,
+ .init_tx = mlx5i_pkey_init_tx,
+ .cleanup_tx = mlx5i_pkey_cleanup_tx,
+ .init_rx = mlx5i_pkey_init_rx,
+ .cleanup_rx = mlx5i_pkey_cleanup_rx,
+ .enable = NULL,
+ .disable = NULL,
+ .update_stats = NULL,
+ .max_nch = mlx5e_get_max_num_channels,
+ .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
+ .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
+ .max_tc = MLX5I_MAX_NUM_TC,
+};
+
+const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
+{
+ return &mlx5i_pkey_nic_profile;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
new file mode 100644
index 000000000000..fa8aed62b231
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/clocksource.h>
+#include "en.h"
+
+enum {
+ MLX5_CYCLES_SHIFT = 23
+};
+
+enum {
+ MLX5_PIN_MODE_IN = 0x0,
+ MLX5_PIN_MODE_OUT = 0x1,
+};
+
+enum {
+ MLX5_OUT_PATTERN_PULSE = 0x0,
+ MLX5_OUT_PATTERN_PERIODIC = 0x1,
+};
+
+enum {
+ MLX5_EVENT_MODE_DISABLE = 0x0,
+ MLX5_EVENT_MODE_REPETETIVE = 0x1,
+ MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
+};
+
+enum {
+ MLX5_MTPPS_FS_ENABLE = BIT(0x0),
+ MLX5_MTPPS_FS_PATTERN = BIT(0x2),
+ MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
+ MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
+ MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
+ MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
+};
+
+static u64 read_internal_timer(const struct cyclecounter *cc)
+{
+ struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
+ clock);
+
+ return mlx5_read_internal_timer(mdev) & cc->mask;
+}
+
+static void mlx5_pps_out(struct work_struct *work)
+{
+ struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
+ out_work);
+ struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
+ pps_info);
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
+ clock);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < clock->ptp_info.n_pins; i++) {
+ u64 tstart;
+
+ write_lock_irqsave(&clock->lock, flags);
+ tstart = clock->pps_info.start[i];
+ clock->pps_info.start[i] = 0;
+ write_unlock_irqrestore(&clock->lock, flags);
+ if (!tstart)
+ continue;
+
+ MLX5_SET(mtpps_reg, in, pin, i);
+ MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
+ MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
+ mlx5_set_mtpps(mdev, in, sizeof(in));
+ }
+}
+
+static void mlx5_timestamp_overflow(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
+ overflow_work);
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_read(&clock->tc);
+ write_unlock_irqrestore(&clock->lock, flags);
+ schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
+}
+
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_init(&clock->tc, &clock->cycles, ns);
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ return 0;
+}
+
+static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ u64 ns;
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ ns = timecounter_read(&clock->tc);
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_adjtime(&clock->tc, delta);
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ return 0;
+}
+
+static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ u64 adj;
+ u32 diff;
+ unsigned long flags;
+ int neg_adj = 0;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+
+ adj = clock->nominal_c_mult;
+ adj *= delta;
+ diff = div_u64(adj, 1000000000ULL);
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_read(&clock->tc);
+ clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
+ clock->nominal_c_mult + diff;
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ return 0;
+}
+
+static int mlx5_extts_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev =
+ container_of(clock, struct mlx5_core_dev, clock);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u32 field_select = 0;
+ u8 pin_mode = 0;
+ u8 pattern = 0;
+ int pin = -1;
+ int err = 0;
+
+ if (!MLX5_PPS_CAP(mdev))
+ return -EOPNOTSUPP;
+
+ if (rq->extts.index >= clock->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ pin_mode = MLX5_PIN_MODE_IN;
+ pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
+ field_select = MLX5_MTPPS_FS_PIN_MODE |
+ MLX5_MTPPS_FS_PATTERN |
+ MLX5_MTPPS_FS_ENABLE;
+ } else {
+ pin = rq->extts.index;
+ field_select = MLX5_MTPPS_FS_ENABLE;
+ }
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+ err = mlx5_set_mtpps(mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ return mlx5_set_mtppse(mdev, pin, 0,
+ MLX5_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5_perout_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev =
+ container_of(clock, struct mlx5_core_dev, clock);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u64 nsec_now, nsec_delta, time_stamp = 0;
+ u64 cycles_now, cycles_delta;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 field_select = 0;
+ u8 pin_mode = 0;
+ u8 pattern = 0;
+ int pin = -1;
+ int err = 0;
+ s64 ns;
+
+ if (!MLX5_PPS_CAP(mdev))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index >= clock->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+
+ pin_mode = MLX5_PIN_MODE_OUT;
+ pattern = MLX5_OUT_PATTERN_PERIODIC;
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+
+ if ((ns >> 1) != 500000000LL)
+ return -EINVAL;
+
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ ns = timespec64_to_ns(&ts);
+ cycles_now = mlx5_read_internal_timer(mdev);
+ write_lock_irqsave(&clock->lock, flags);
+ nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
+ clock->cycles.mult);
+ write_unlock_irqrestore(&clock->lock, flags);
+ time_stamp = cycles_now + cycles_delta;
+ field_select = MLX5_MTPPS_FS_PIN_MODE |
+ MLX5_MTPPS_FS_PATTERN |
+ MLX5_MTPPS_FS_ENABLE |
+ MLX5_MTPPS_FS_TIME_STAMP;
+ } else {
+ pin = rq->perout.index;
+ field_select = MLX5_MTPPS_FS_ENABLE;
+ }
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+ err = mlx5_set_mtpps(mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ return mlx5_set_mtppse(mdev, pin, 0,
+ MLX5_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5_pps_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+
+ clock->pps_info.enabled = !!on;
+ return 0;
+}
+
+static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return mlx5_extts_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return mlx5_perout_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PPS:
+ return mlx5_pps_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+}
+
+static const struct ptp_clock_info mlx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlx5_p2p",
+ .max_adj = 100000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = mlx5_ptp_adjfreq,
+ .adjtime = mlx5_ptp_adjtime,
+ .gettime64 = mlx5_ptp_gettime,
+ .settime64 = mlx5_ptp_settime,
+ .enable = NULL,
+ .verify = NULL,
+};
+
+static int mlx5_init_pin_config(struct mlx5_clock *clock)
+{
+ int i;
+
+ clock->ptp_info.pin_config =
+ kzalloc(sizeof(*clock->ptp_info.pin_config) *
+ clock->ptp_info.n_pins, GFP_KERNEL);
+ if (!clock->ptp_info.pin_config)
+ return -ENOMEM;
+ clock->ptp_info.enable = mlx5_ptp_enable;
+ clock->ptp_info.verify = mlx5_ptp_verify;
+ clock->ptp_info.pps = 1;
+
+ for (i = 0; i < clock->ptp_info.n_pins; i++) {
+ snprintf(clock->ptp_info.pin_config[i].name,
+ sizeof(clock->ptp_info.pin_config[i].name),
+ "mlx5_pps%d", i);
+ clock->ptp_info.pin_config[i].index = i;
+ clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
+ clock->ptp_info.pin_config[i].chan = i;
+ }
+
+ return 0;
+}
+
+static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ mlx5_query_mtpps(mdev, out, sizeof(out));
+
+ clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
+ cap_number_of_pps_pins);
+ clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_in_pins);
+ clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_out_pins);
+
+ clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+ clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+ clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+ clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+ clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+ clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+ clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+ clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+}
+
+void mlx5_pps_event(struct mlx5_core_dev *mdev,
+ struct mlx5_eqe *eqe)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ struct ptp_clock_event ptp_event;
+ struct timespec64 ts;
+ u64 nsec_now, nsec_delta;
+ u64 cycles_now, cycles_delta;
+ int pin = eqe->data.pps.pin;
+ s64 ns;
+ unsigned long flags;
+
+ switch (clock->ptp_info.pin_config[pin].func) {
+ case PTP_PF_EXTTS:
+ if (clock->pps_info.enabled) {
+ ptp_event.type = PTP_CLOCK_PPSUSR;
+ ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
+ } else {
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ }
+ ptp_clock_event(clock->ptp, &ptp_event);
+ break;
+ case PTP_PF_PEROUT:
+ mlx5_ptp_gettime(&clock->ptp_info, &ts);
+ cycles_now = mlx5_read_internal_timer(mdev);
+ ts.tv_sec += 1;
+ ts.tv_nsec = 0;
+ ns = timespec64_to_ns(&ts);
+ write_lock_irqsave(&clock->lock, flags);
+ nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
+ clock->cycles.mult);
+ clock->pps_info.start[pin] = cycles_now + cycles_delta;
+ schedule_work(&clock->pps_info.out_work);
+ write_unlock_irqrestore(&clock->lock, flags);
+ break;
+ default:
+ mlx5_core_err(mdev, " Unhandled event\n");
+ }
+}
+
+void mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u64 ns;
+ u64 frac = 0;
+ u32 dev_freq;
+
+ dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
+ if (!dev_freq) {
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return;
+ }
+ rwlock_init(&clock->lock);
+ clock->cycles.read = read_internal_timer;
+ clock->cycles.shift = MLX5_CYCLES_SHIFT;
+ clock->cycles.mult = clocksource_khz2mult(dev_freq,
+ clock->cycles.shift);
+ clock->nominal_c_mult = clock->cycles.mult;
+ clock->cycles.mask = CLOCKSOURCE_MASK(41);
+
+ timecounter_init(&clock->tc, &clock->cycles,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Calculate period in seconds to call the overflow watchdog - to make
+ * sure counter is checked at least once every wrap around.
+ */
+ ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
+ frac, &frac);
+ do_div(ns, NSEC_PER_SEC / 2 / HZ);
+ clock->overflow_period = ns;
+
+ INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
+ INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
+ if (clock->overflow_period)
+ schedule_delayed_work(&clock->overflow_work, 0);
+ else
+ mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
+
+ /* Configure the PHC */
+ clock->ptp_info = mlx5_ptp_clock_info;
+
+ /* Initialize 1PPS data structures */
+ if (MLX5_PPS_CAP(mdev))
+ mlx5_get_pps_caps(mdev);
+ if (clock->ptp_info.n_pins)
+ mlx5_init_pin_config(clock);
+
+ clock->ptp = ptp_clock_register(&clock->ptp_info,
+ &mdev->pdev->dev);
+ if (IS_ERR(clock->ptp)) {
+ mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
+ PTR_ERR(clock->ptp));
+ clock->ptp = NULL;
+ }
+}
+
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (clock->ptp) {
+ ptp_clock_unregister(clock->ptp);
+ clock->ptp = NULL;
+ }
+
+ cancel_work_sync(&clock->pps_info.out_work);
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock->ptp_info.pin_config);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
new file mode 100644
index 000000000000..a8eecedd46c2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIB_CLOCK_H__
+#define __LIB_CLOCK_H__
+
+void mlx5_init_clock(struct mlx5_core_dev *mdev);
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+
+static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ u64 nsec;
+
+ read_lock(&clock->lock);
+ nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ read_unlock(&clock->lock);
+
+ return ns_to_ktime(nsec);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0d2c8dcd6eae..5f323442cc5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -59,6 +59,7 @@
#include "lib/mlx5.h"
#include "fpga/core.h"
#include "accel/ipsec.h"
+#include "lib/clock.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@@ -889,6 +890,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_reserved_gids(dev);
+ mlx5_init_clock(dev);
+
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -949,6 +952,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
+ mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
@@ -1482,9 +1486,16 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
return -EAGAIN;
}
+ /* Panic tear down fw command will stop the PCI bus communication
+ * with the HCA, so the health polll is no longer needed.
+ */
+ mlx5_drain_health_wq(dev);
+ mlx5_stop_health_poll(dev);
+
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
+ mlx5_start_health_poll(dev);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b7c2900b75f9..ff4a0b889a6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -93,6 +93,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault);
+void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
@@ -121,6 +122,8 @@ int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);
int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
u8 access_reg_group);
+int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
+ u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index e07061f565d6..c37d00cd472a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -98,6 +98,18 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group,
return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0);
}
+int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
+ u8 feature_group, u8 access_reg_group)
+{
+ u32 in[MLX5_ST_SZ_DW(qcam_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(qcam_reg);
+
+ MLX5_SET(qcam_reg, in, feature_group, feature_group);
+ MLX5_SET(qcam_reg, in, access_reg_group, access_reg_group);
+
+ return mlx5_core_access_reg(mdev, in, sz, qcam, sz, MLX5_REG_QCAM, 0, 0);
+}
+
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
@@ -959,3 +971,102 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode)
return mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MTPPSE, 0, 1);
}
+
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state)
+{
+ u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ int err;
+
+ MLX5_SET(qpts_reg, in, local_port, 1);
+ MLX5_SET(qpts_reg, in, trust_state, trust_state);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_QPTS, 0, 1);
+ return err;
+}
+
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
+{
+ u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ int err;
+
+ MLX5_SET(qpts_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_QPTS, 0, 0);
+ if (!err)
+ *trust_state = MLX5_GET(qpts_reg, out, trust_state);
+
+ return err;
+}
+
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
+{
+ int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+ void *qpdpm_dscp;
+ void *out;
+ void *in;
+ int err;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpdpm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
+ if (err)
+ goto out;
+
+ memcpy(in, out, sz);
+ MLX5_SET(qpdpm_reg, in, local_port, 1);
+
+ /* Update the corresponding dscp entry */
+ qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, in, dscp[dscp]);
+ MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, prio, prio);
+ MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, e, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 1);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+
+/* dscp2prio[i]: priority that dscp i mapped to */
+#define MLX5E_SUPPORTED_DSCP 64
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio)
+{
+ int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+ void *qpdpm_dscp;
+ void *out;
+ void *in;
+ int err;
+ int i;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpdpm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
+ if (err)
+ goto out;
+
+ for (i = 0; i < (MLX5E_SUPPORTED_DSCP); i++) {
+ qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, out, dscp[i]);
+ dscp2prio[i] = MLX5_GET16(qpdpm_dscp_reg, qpdpm_dscp, prio);
+ }
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 891ff418bb5e..9463c3fa254f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o
mlxsw_core-objs := core.o core_acl_flex_keys.o \
core_acl_flex_actions.o
@@ -17,7 +18,9 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o \
spectrum_cnt.o spectrum_fid.o \
- spectrum_ipip.o
+ spectrum_ipip.o spectrum_acl_flex_actions.o \
+ spectrum_mr.o spectrum_mr_tcam.o \
+ spectrum_qdisc.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 5ae110172c22..6a979a09ab72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -399,23 +399,25 @@ u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
-void mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
+int mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
{
- if (WARN_ON(block->finished))
- return;
+ if (block->finished)
+ return -EINVAL;
mlxsw_afa_set_goto_set(block->cur_set,
MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
block->finished = true;
+ return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_continue);
-void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
+int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
{
- if (WARN_ON(block->finished))
- return;
+ if (block->finished)
+ return -EINVAL;
mlxsw_afa_set_goto_set(block->cur_set,
MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
block->finished = true;
+ return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_jump);
@@ -674,6 +676,7 @@ enum mlxsw_afa_trapdisc_trap_action {
MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4);
enum mlxsw_afa_trapdisc_forward_action {
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD = 1,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
};
@@ -712,7 +715,7 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
}
EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
-int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block)
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
{
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_TRAPDISC_CODE,
@@ -722,11 +725,27 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block)
return -ENOBUFS;
mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
- MLXSW_TRAP_ID_ACL0);
+ trap_id);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
+int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
+ u16 trap_id)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_TRAPDISC_CODE,
+ MLXSW_AFA_TRAPDISC_SIZE);
+
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
+ MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
+ trap_id);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward);
+
/* Forwarding Action
* -----------------
* Forwarding Action can be used to implement Policy Based Switching (PBS)
@@ -891,3 +910,74 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);
+
+/* MC Routing Action
+ * -----------------
+ * The Multicast router action. Can be used by RMFT_V2 - Router Multicast
+ * Forwarding Table Version 2 Register.
+ */
+
+#define MLXSW_AFA_MCROUTER_CODE 0x10
+#define MLXSW_AFA_MCROUTER_SIZE 2
+
+enum mlxsw_afa_mcrouter_rpf_action {
+ MLXSW_AFA_MCROUTER_RPF_ACTION_NOP,
+ MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
+ MLXSW_AFA_MCROUTER_RPF_ACTION_DISCARD_ERROR,
+};
+
+/* afa_mcrouter_rpf_action */
+MLXSW_ITEM32(afa, mcrouter, rpf_action, 0x00, 28, 3);
+
+/* afa_mcrouter_expected_irif */
+MLXSW_ITEM32(afa, mcrouter, expected_irif, 0x00, 0, 16);
+
+/* afa_mcrouter_min_mtu */
+MLXSW_ITEM32(afa, mcrouter, min_mtu, 0x08, 0, 16);
+
+enum mlxsw_afa_mrouter_vrmid {
+ MLXSW_AFA_MCROUTER_VRMID_INVALID,
+ MLXSW_AFA_MCROUTER_VRMID_VALID
+};
+
+/* afa_mcrouter_vrmid
+ * Valid RMID: rigr_rmid_index is used as RMID
+ */
+MLXSW_ITEM32(afa, mcrouter, vrmid, 0x0C, 31, 1);
+
+/* afa_mcrouter_rigr_rmid_index
+ * When the vrmid field is set to invalid, the field is used as pointer to
+ * Router Interface Group (RIGR) Table in the KVD linear.
+ * When the vrmid is set to valid, the field is used as RMID index, ranged
+ * from 0 to max_mid - 1. The index is to the Port Group Table.
+ */
+MLXSW_ITEM32(afa, mcrouter, rigr_rmid_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_afa_mcrouter_pack(char *payload,
+ enum mlxsw_afa_mcrouter_rpf_action rpf_action,
+ u16 expected_irif, u16 min_mtu,
+ enum mlxsw_afa_mrouter_vrmid vrmid, u32 rigr_rmid_index)
+
+{
+ mlxsw_afa_mcrouter_rpf_action_set(payload, rpf_action);
+ mlxsw_afa_mcrouter_expected_irif_set(payload, expected_irif);
+ mlxsw_afa_mcrouter_min_mtu_set(payload, min_mtu);
+ mlxsw_afa_mcrouter_vrmid_set(payload, vrmid);
+ mlxsw_afa_mcrouter_rigr_rmid_index_set(payload, rigr_rmid_index);
+}
+
+int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
+ u16 expected_irif, u16 min_mtu,
+ bool rmid_valid, u32 kvdl_index)
+{
+ char *act = mlxsw_afa_block_append_action(block,
+ MLXSW_AFA_MCROUTER_CODE,
+ MLXSW_AFA_MCROUTER_SIZE);
+ if (!act)
+ return -ENOBUFS;
+ mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
+ expected_irif, min_mtu, rmid_valid, kvdl_index);
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index f99c341b2497..a8d3314c3a24 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -57,10 +57,12 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
-void mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
-void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
+int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
-int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
+int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
+ u16 trap_id);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
@@ -68,5 +70,8 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
u32 counter_index);
int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid);
+int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
+ u16 expected_irif, u16 min_mtu,
+ bool rmid_valid, u32 kvdl_index);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 12c3a4449120..c0dcfa05b077 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -294,7 +294,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size;
mlxsw_i2c_set_slave_addr(tran_buf, off);
memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox +
- chunk_size * i, chunk_size);
+ MLXSW_I2C_BLK_MAX * i, chunk_size);
j = 0;
end = jiffies + timeout;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 4afc8486eb9a..6c4e08b8058a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1758,6 +1758,191 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
}
}
+/* CWTP - Congetion WRED ECN TClass Profile
+ * ----------------------------------------
+ * Configures the profiles for queues of egress port and traffic class
+ */
+#define MLXSW_REG_CWTP_ID 0x2802
+#define MLXSW_REG_CWTP_BASE_LEN 0x28
+#define MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN 0x08
+#define MLXSW_REG_CWTP_LEN 0x40
+
+MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN);
+
+/* reg_cwtp_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8);
+
+/* reg_cwtp_traffic_class
+ * Traffic Class to configure
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtp, traffic_class, 32, 0, 8);
+
+/* reg_cwtp_profile_min
+ * Minimum Average Queue Size of the profile in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, cwtp, profile_min, MLXSW_REG_CWTP_BASE_LEN,
+ 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 0, false);
+
+/* reg_cwtp_profile_percent
+ * Percentage of WRED and ECN marking for maximum Average Queue size
+ * Range is 0 to 100, units of integer percentage
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, cwtp, profile_percent, MLXSW_REG_CWTP_BASE_LEN,
+ 24, 7, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false);
+
+/* reg_cwtp_profile_max
+ * Maximum Average Queue size of the profile in cells
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN,
+ 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false);
+
+#define MLXSW_REG_CWTP_MIN_VALUE 64
+#define MLXSW_REG_CWTP_MAX_PROFILE 2
+#define MLXSW_REG_CWTP_DEFAULT_PROFILE 1
+
+static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port,
+ u8 traffic_class)
+{
+ int i;
+
+ MLXSW_REG_ZERO(cwtp, payload);
+ mlxsw_reg_cwtp_local_port_set(payload, local_port);
+ mlxsw_reg_cwtp_traffic_class_set(payload, traffic_class);
+
+ for (i = 0; i <= MLXSW_REG_CWTP_MAX_PROFILE; i++) {
+ mlxsw_reg_cwtp_profile_min_set(payload, i,
+ MLXSW_REG_CWTP_MIN_VALUE);
+ mlxsw_reg_cwtp_profile_max_set(payload, i,
+ MLXSW_REG_CWTP_MIN_VALUE);
+ }
+}
+
+#define MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile) (profile - 1)
+
+static inline void
+mlxsw_reg_cwtp_profile_pack(char *payload, u8 profile, u32 min, u32 max,
+ u32 probability)
+{
+ u8 index = MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile);
+
+ mlxsw_reg_cwtp_profile_min_set(payload, index, min);
+ mlxsw_reg_cwtp_profile_max_set(payload, index, max);
+ mlxsw_reg_cwtp_profile_percent_set(payload, index, probability);
+}
+
+/* CWTPM - Congestion WRED ECN TClass and Pool Mapping
+ * ---------------------------------------------------
+ * The CWTPM register maps each egress port and traffic class to profile num.
+ */
+#define MLXSW_REG_CWTPM_ID 0x2803
+#define MLXSW_REG_CWTPM_LEN 0x44
+
+MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN);
+
+/* reg_cwtpm_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8);
+
+/* reg_cwtpm_traffic_class
+ * Traffic Class to configure
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, cwtpm, traffic_class, 32, 0, 8);
+
+/* reg_cwtpm_ew
+ * Control enablement of WRED for traffic class:
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ew, 36, 1, 1);
+
+/* reg_cwtpm_ee
+ * Control enablement of ECN for traffic class:
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ee, 36, 0, 1);
+
+/* reg_cwtpm_tcp_g
+ * TCP Green Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, tcp_g, 52, 0, 2);
+
+/* reg_cwtpm_tcp_y
+ * TCP Yellow Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, tcp_y, 56, 16, 2);
+
+/* reg_cwtpm_tcp_r
+ * TCP Red Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, tcp_r, 56, 0, 2);
+
+/* reg_cwtpm_ntcp_g
+ * Non-TCP Green Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ntcp_g, 60, 0, 2);
+
+/* reg_cwtpm_ntcp_y
+ * Non-TCP Yellow Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ntcp_y, 64, 16, 2);
+
+/* reg_cwtpm_ntcp_r
+ * Non-TCP Red Profile.
+ * Index of the profile within {port, traffic class} to use.
+ * 0 for disabling both WRED and ECN for this type of traffic.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2);
+
+#define MLXSW_REG_CWTPM_RESET_PROFILE 0
+
+static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port,
+ u8 traffic_class, u8 profile,
+ bool wred, bool ecn)
+{
+ MLXSW_REG_ZERO(cwtpm, payload);
+ mlxsw_reg_cwtpm_local_port_set(payload, local_port);
+ mlxsw_reg_cwtpm_traffic_class_set(payload, traffic_class);
+ mlxsw_reg_cwtpm_ew_set(payload, wred);
+ mlxsw_reg_cwtpm_ee_set(payload, ecn);
+ mlxsw_reg_cwtpm_tcp_g_set(payload, profile);
+ mlxsw_reg_cwtpm_tcp_y_set(payload, profile);
+ mlxsw_reg_cwtpm_tcp_r_set(payload, profile);
+ mlxsw_reg_cwtpm_ntcp_g_set(payload, profile);
+ mlxsw_reg_cwtpm_ntcp_y_set(payload, profile);
+ mlxsw_reg_cwtpm_ntcp_r_set(payload, profile);
+}
+
/* PPBT - Policy-Engine Port Binding Table
* ---------------------------------------
* This register is used for configuration of the Port Binding Table.
@@ -2142,15 +2327,14 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN);
*/
MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
-#define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8
+#define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8
/* reg_pefa_flex_action_set
* Action-set to perform when rule is matched.
* Must be zero padded if action set is shorter.
* Access: RW
*/
-MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08,
- MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN);
+MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN);
static inline void mlxsw_reg_pefa_pack(char *payload, u32 index,
const char *flex_action_set)
@@ -2243,7 +2427,7 @@ MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
* Access: RW
*/
MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
- MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN);
+ MLXSW_REG_FLEX_ACTION_SET_LEN);
static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
enum mlxsw_reg_ptce2_op op,
@@ -3124,6 +3308,7 @@ static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
*/
#define MLXSW_REG_PPCNT_ID 0x5008
#define MLXSW_REG_PPCNT_LEN 0x100
+#define MLXSW_REG_PPCNT_COUNTERS_OFFSET 0x08
MLXSW_REG_DEFINE(ppcnt, MLXSW_REG_PPCNT_ID, MLXSW_REG_PPCNT_LEN);
@@ -3156,8 +3341,10 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_EXT_CNT = 0x5,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
+ MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
};
/* reg_ppcnt_grp
@@ -3173,6 +3360,7 @@ enum mlxsw_reg_ppcnt_grp {
* 0x10: Per Priority Counters
* 0x11: Per Traffic Class Counters
* 0x12: Physical Layer Counters
+ * 0x13: Per Traffic Class Congestion Counters
* Access: Index
*/
MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
@@ -3201,162 +3389,179 @@ MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
- 0x08 + 0x00, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
/* reg_ppcnt_a_frames_received_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
- 0x08 + 0x08, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
/* reg_ppcnt_a_frame_check_sequence_errors
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
- 0x08 + 0x10, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
/* reg_ppcnt_a_alignment_errors
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
- 0x08 + 0x18, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64);
/* reg_ppcnt_a_octets_transmitted_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
- 0x08 + 0x20, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
/* reg_ppcnt_a_octets_received_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
- 0x08 + 0x28, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
/* reg_ppcnt_a_multicast_frames_xmitted_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
- 0x08 + 0x30, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
/* reg_ppcnt_a_broadcast_frames_xmitted_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
- 0x08 + 0x38, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
/* reg_ppcnt_a_multicast_frames_received_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
- 0x08 + 0x40, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
/* reg_ppcnt_a_broadcast_frames_received_ok
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
- 0x08 + 0x48, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x48, 0, 64);
/* reg_ppcnt_a_in_range_length_errors
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
- 0x08 + 0x50, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
/* reg_ppcnt_a_out_of_range_length_field
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
- 0x08 + 0x58, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
/* reg_ppcnt_a_frame_too_long_errors
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
- 0x08 + 0x60, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
/* reg_ppcnt_a_symbol_error_during_carrier
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
- 0x08 + 0x68, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
/* reg_ppcnt_a_mac_control_frames_transmitted
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
- 0x08 + 0x70, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
/* reg_ppcnt_a_mac_control_frames_received
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
- 0x08 + 0x78, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64);
/* reg_ppcnt_a_unsupported_opcodes_received
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
- 0x08 + 0x80, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64);
/* reg_ppcnt_a_pause_mac_ctrl_frames_received
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
- 0x08 + 0x88, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64);
/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
* Access: RO
*/
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
- 0x08 + 0x90, 0, 64);
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+
+/* Ethernet Extended Counter Group Counters */
+
+/* reg_ppcnt_ecn_marked
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ecn_marked,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
/* Ethernet Per Priority Group Counters */
/* reg_ppcnt_rx_octets
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, rx_octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
/* reg_ppcnt_rx_frames
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, rx_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
/* reg_ppcnt_tx_octets
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_octets,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
/* reg_ppcnt_tx_frames
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x48, 0, 64);
/* reg_ppcnt_rx_pause
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, rx_pause,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
/* reg_ppcnt_rx_pause_duration
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, rx_pause_duration,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
/* reg_ppcnt_tx_pause
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_pause,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
/* reg_ppcnt_tx_pause_duration
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_pause_duration,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
/* reg_ppcnt_rx_pause_transition
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tx_pause_transition,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
/* Ethernet Per Traffic Group Counters */
@@ -3366,14 +3571,24 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
* The field cannot be cleared.
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
/* reg_ppcnt_tc_no_buffer_discard_uc
* The number of unicast packets dropped due to lack of shared
* buffer resources.
* Access: RO
*/
-MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64);
+MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* Ethernet Per Traffic Class Congestion Group Counters */
+
+/* reg_ppcnt_wred_discard
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, wred_discard,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
enum mlxsw_reg_ppcnt_grp grp,
@@ -3682,12 +3897,15 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND,
@@ -3992,6 +4210,12 @@ MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
+/* reg_ritr_ipv4_mc
+ * IPv4 multicast routing enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1);
+
enum mlxsw_reg_ritr_if_type {
/* VLAN interface. */
MLXSW_REG_RITR_VLAN_IF,
@@ -4049,6 +4273,14 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
+/* reg_ritr_ipv4_mc_fe
+ * IPv4 Multicast Forwarding Enable.
+ * When disabled, forwarding is blocked but local traffic (traps and IP to me)
+ * will be enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1);
+
/* reg_ritr_lb_en
* Loop-back filter enable for unicast packets.
* If the flag is set then loop-back filter for unicast packets is
@@ -4271,11 +4503,13 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_enable_set(payload, enable);
mlxsw_reg_ritr_ipv4_set(payload, 1);
mlxsw_reg_ritr_ipv6_set(payload, 1);
+ mlxsw_reg_ritr_ipv4_mc_set(payload, 1);
mlxsw_reg_ritr_type_set(payload, type);
mlxsw_reg_ritr_op_set(payload, op);
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
mlxsw_reg_ritr_ipv6_fe_set(payload, 1);
+ mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1);
mlxsw_reg_ritr_lb_en_set(payload, 1);
mlxsw_reg_ritr_virtual_router_set(payload, vr_id);
mlxsw_reg_ritr_mtu_set(payload, mtu);
@@ -4311,6 +4545,57 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload,
mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip);
}
+/* RTAR - Router TCAM Allocation Register
+ * --------------------------------------
+ * This register is used for allocation of regions in the TCAM table.
+ */
+#define MLXSW_REG_RTAR_ID 0x8004
+#define MLXSW_REG_RTAR_LEN 0x20
+
+MLXSW_REG_DEFINE(rtar, MLXSW_REG_RTAR_ID, MLXSW_REG_RTAR_LEN);
+
+enum mlxsw_reg_rtar_op {
+ MLXSW_REG_RTAR_OP_ALLOCATE,
+ MLXSW_REG_RTAR_OP_RESIZE,
+ MLXSW_REG_RTAR_OP_DEALLOCATE,
+};
+
+/* reg_rtar_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rtar, op, 0x00, 28, 4);
+
+enum mlxsw_reg_rtar_key_type {
+ MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST = 1,
+ MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST = 3
+};
+
+/* reg_rtar_key_type
+ * TCAM key type for the region.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rtar, key_type, 0x00, 0, 8);
+
+/* reg_rtar_region_size
+ * TCAM region size. When allocating/resizing this is the requested
+ * size, the response is the actual size.
+ * Note: Actual size may be larger than requested.
+ * Reserved for op = Deallocate
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rtar, region_size, 0x04, 0, 16);
+
+static inline void mlxsw_reg_rtar_pack(char *payload,
+ enum mlxsw_reg_rtar_op op,
+ enum mlxsw_reg_rtar_key_type key_type,
+ u16 region_size)
+{
+ MLXSW_REG_ZERO(rtar, payload);
+ mlxsw_reg_rtar_op_set(payload, op);
+ mlxsw_reg_rtar_key_type_set(payload, key_type);
+ mlxsw_reg_rtar_region_size_set(payload, region_size);
+}
+
/* RATR - Router Adjacency Table Register
* --------------------------------------
* The RATR register is used to configure the Router Adjacency (next-hop)
@@ -4480,6 +4765,27 @@ MLXSW_ITEM32(reg, ratr, ipip_ipv4_udip, 0x18, 0, 32);
*/
MLXSW_ITEM32(reg, ratr, ipip_ipv6_ptr, 0x1C, 0, 24);
+enum mlxsw_reg_flow_counter_set_type {
+ /* No count */
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT = 0x00,
+ /* Count packets and bytes */
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
+ /* Count only packets */
+ MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS = 0x05,
+};
+
+/* reg_ratr_counter_set_type
+ * Counter set type for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, counter_set_type, 0x28, 24, 8);
+
+/* reg_ratr_counter_index
+ * Counter index for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, counter_index, 0x28, 0, 24);
+
static inline void
mlxsw_reg_ratr_pack(char *payload,
enum mlxsw_reg_ratr_op op, bool valid,
@@ -4507,6 +4813,20 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip)
mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip);
}
+static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
+ bool counter_enable)
+{
+ enum mlxsw_reg_flow_counter_set_type set_type;
+
+ if (counter_enable)
+ set_type = MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES;
+ else
+ set_type = MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT;
+
+ mlxsw_reg_ratr_counter_index_set(payload, counter_index);
+ mlxsw_reg_ratr_counter_set_type_set(payload, set_type);
+}
+
/* RICNT - Router Interface Counter Register
* -----------------------------------------
* The RICNT register retrieves per port performance counters
@@ -4630,6 +4950,65 @@ static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index,
MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC);
}
+/* RRCR - Router Rules Copy Register Layout
+ * ----------------------------------------
+ * This register is used for moving and copying route entry rules.
+ */
+#define MLXSW_REG_RRCR_ID 0x800F
+#define MLXSW_REG_RRCR_LEN 0x24
+
+MLXSW_REG_DEFINE(rrcr, MLXSW_REG_RRCR_ID, MLXSW_REG_RRCR_LEN);
+
+enum mlxsw_reg_rrcr_op {
+ /* Move rules */
+ MLXSW_REG_RRCR_OP_MOVE,
+ /* Copy rules */
+ MLXSW_REG_RRCR_OP_COPY,
+};
+
+/* reg_rrcr_op
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rrcr, op, 0x00, 28, 4);
+
+/* reg_rrcr_offset
+ * Offset within the region from which to copy/move.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rrcr, offset, 0x00, 0, 16);
+
+/* reg_rrcr_size
+ * The number of rules to copy/move.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, rrcr, size, 0x04, 0, 16);
+
+/* reg_rrcr_table_id
+ * Identifier of the table on which to perform the operation. Encoding is the
+ * same as in RTAR.key_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rrcr, table_id, 0x10, 0, 4);
+
+/* reg_rrcr_dest_offset
+ * Offset within the region to which to copy/move
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rrcr, dest_offset, 0x20, 0, 16);
+
+static inline void mlxsw_reg_rrcr_pack(char *payload, enum mlxsw_reg_rrcr_op op,
+ u16 offset, u16 size,
+ enum mlxsw_reg_rtar_key_type table_id,
+ u16 dest_offset)
+{
+ MLXSW_REG_ZERO(rrcr, payload);
+ mlxsw_reg_rrcr_op_set(payload, op);
+ mlxsw_reg_rrcr_offset_set(payload, offset);
+ mlxsw_reg_rrcr_size_set(payload, size);
+ mlxsw_reg_rrcr_table_id_set(payload, table_id);
+ mlxsw_reg_rrcr_dest_offset_set(payload, dest_offset);
+}
+
/* RALTA - Router Algorithmic LPM Tree Allocation Register
* -------------------------------------------------------
* RALTA is used to allocate the LPM trees of the SHSPM method.
@@ -5169,15 +5548,6 @@ enum mlxsw_reg_rauht_trap_id {
*/
MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9);
-enum mlxsw_reg_flow_counter_set_type {
- /* No count */
- MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT = 0x00,
- /* Count packets and bytes */
- MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
- /* Count only packets */
- MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS = 0x05,
-};
-
/* reg_rauht_counter_set_type
* Counter set type for flow counters
* Access: RW
@@ -5596,6 +5966,360 @@ mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key);
}
+/* RIGR-V2 - Router Interface Group Register Version 2
+ * ---------------------------------------------------
+ * The RIGR_V2 register is used to add, remove and query egress interface list
+ * of a multicast forwarding entry.
+ */
+#define MLXSW_REG_RIGR2_ID 0x8023
+#define MLXSW_REG_RIGR2_LEN 0xB0
+
+#define MLXSW_REG_RIGR2_MAX_ERIFS 32
+
+MLXSW_REG_DEFINE(rigr2, MLXSW_REG_RIGR2_ID, MLXSW_REG_RIGR2_LEN);
+
+/* reg_rigr2_rigr_index
+ * KVD Linear index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rigr2, rigr_index, 0x04, 0, 24);
+
+/* reg_rigr2_vnext
+ * Next RIGR Index is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, vnext, 0x08, 31, 1);
+
+/* reg_rigr2_next_rigr_index
+ * Next RIGR Index. The index is to the KVD linear.
+ * Reserved when vnxet = '0'.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, next_rigr_index, 0x08, 0, 24);
+
+/* reg_rigr2_vrmid
+ * RMID Index is valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, vrmid, 0x20, 31, 1);
+
+/* reg_rigr2_rmid_index
+ * RMID Index.
+ * Range 0 .. max_mid - 1
+ * Reserved when vrmid = '0'.
+ * The index is to the Port Group Table (PGT)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rigr2, rmid_index, 0x20, 0, 16);
+
+/* reg_rigr2_erif_entry_v
+ * Egress Router Interface is valid.
+ * Note that low-entries must be set if high-entries are set. For
+ * example: if erif_entry[2].v is set then erif_entry[1].v and
+ * erif_entry[0].v must be set.
+ * Index can be from 0 to cap_mc_erif_list_entries-1
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_v, 0x24, 31, 1, 4, 0, false);
+
+/* reg_rigr2_erif_entry_erif
+ * Egress Router Interface.
+ * Valid range is from 0 to cap_max_router_interfaces - 1
+ * Index can be from 0 to MLXSW_REG_RIGR2_MAX_ERIFS - 1
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_erif, 0x24, 0, 16, 4, 0, false);
+
+static inline void mlxsw_reg_rigr2_pack(char *payload, u32 rigr_index,
+ bool vnext, u32 next_rigr_index)
+{
+ MLXSW_REG_ZERO(rigr2, payload);
+ mlxsw_reg_rigr2_rigr_index_set(payload, rigr_index);
+ mlxsw_reg_rigr2_vnext_set(payload, vnext);
+ mlxsw_reg_rigr2_next_rigr_index_set(payload, next_rigr_index);
+ mlxsw_reg_rigr2_vrmid_set(payload, 0);
+ mlxsw_reg_rigr2_rmid_index_set(payload, 0);
+}
+
+static inline void mlxsw_reg_rigr2_erif_entry_pack(char *payload, int index,
+ bool v, u16 erif)
+{
+ mlxsw_reg_rigr2_erif_entry_v_set(payload, index, v);
+ mlxsw_reg_rigr2_erif_entry_erif_set(payload, index, erif);
+}
+
+/* RECR-V2 - Router ECMP Configuration Version 2 Register
+ * ------------------------------------------------------
+ */
+#define MLXSW_REG_RECR2_ID 0x8025
+#define MLXSW_REG_RECR2_LEN 0x38
+
+MLXSW_REG_DEFINE(recr2, MLXSW_REG_RECR2_ID, MLXSW_REG_RECR2_LEN);
+
+/* reg_recr2_pp
+ * Per-port configuration
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, recr2, pp, 0x00, 24, 1);
+
+/* reg_recr2_sh
+ * Symmetric hash
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, recr2, sh, 0x00, 8, 1);
+
+/* reg_recr2_seed
+ * Seed
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, recr2, seed, 0x08, 0, 32);
+
+enum {
+ /* Enable IPv4 fields if packet is not TCP and not UDP */
+ MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP = 3,
+ /* Enable IPv4 fields if packet is TCP or UDP */
+ MLXSW_REG_RECR2_IPV4_EN_TCP_UDP = 4,
+ /* Enable IPv6 fields if packet is not TCP and not UDP */
+ MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP = 5,
+ /* Enable IPv6 fields if packet is TCP or UDP */
+ MLXSW_REG_RECR2_IPV6_EN_TCP_UDP = 6,
+ /* Enable TCP/UDP header fields if packet is IPv4 */
+ MLXSW_REG_RECR2_TCP_UDP_EN_IPV4 = 7,
+ /* Enable TCP/UDP header fields if packet is IPv6 */
+ MLXSW_REG_RECR2_TCP_UDP_EN_IPV6 = 8,
+};
+
+/* reg_recr2_outer_header_enables
+ * Bit mask where each bit enables a specific layer to be included in
+ * the hash calculation.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_enables, 0x10, 0x04, 1);
+
+enum {
+ /* IPv4 Source IP */
+ MLXSW_REG_RECR2_IPV4_SIP0 = 9,
+ MLXSW_REG_RECR2_IPV4_SIP3 = 12,
+ /* IPv4 Destination IP */
+ MLXSW_REG_RECR2_IPV4_DIP0 = 13,
+ MLXSW_REG_RECR2_IPV4_DIP3 = 16,
+ /* IP Protocol */
+ MLXSW_REG_RECR2_IPV4_PROTOCOL = 17,
+ /* IPv6 Source IP */
+ MLXSW_REG_RECR2_IPV6_SIP0_7 = 21,
+ MLXSW_REG_RECR2_IPV6_SIP8 = 29,
+ MLXSW_REG_RECR2_IPV6_SIP15 = 36,
+ /* IPv6 Destination IP */
+ MLXSW_REG_RECR2_IPV6_DIP0_7 = 37,
+ MLXSW_REG_RECR2_IPV6_DIP8 = 45,
+ MLXSW_REG_RECR2_IPV6_DIP15 = 52,
+ /* IPv6 Next Header */
+ MLXSW_REG_RECR2_IPV6_NEXT_HEADER = 53,
+ /* IPv6 Flow Label */
+ MLXSW_REG_RECR2_IPV6_FLOW_LABEL = 57,
+ /* TCP/UDP Source Port */
+ MLXSW_REG_RECR2_TCP_UDP_SPORT = 74,
+ /* TCP/UDP Destination Port */
+ MLXSW_REG_RECR2_TCP_UDP_DPORT = 75,
+};
+
+/* reg_recr2_outer_header_fields_enable
+ * Packet fields to enable for ECMP hash subject to outer_header_enable.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_fields_enable, 0x14, 0x14, 1);
+
+static inline void mlxsw_reg_recr2_ipv4_sip_enable(char *payload)
+{
+ int i;
+
+ for (i = MLXSW_REG_RECR2_IPV4_SIP0; i <= MLXSW_REG_RECR2_IPV4_SIP3; i++)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
+ true);
+}
+
+static inline void mlxsw_reg_recr2_ipv4_dip_enable(char *payload)
+{
+ int i;
+
+ for (i = MLXSW_REG_RECR2_IPV4_DIP0; i <= MLXSW_REG_RECR2_IPV4_DIP3; i++)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
+ true);
+}
+
+static inline void mlxsw_reg_recr2_ipv6_sip_enable(char *payload)
+{
+ int i = MLXSW_REG_RECR2_IPV6_SIP0_7;
+
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true);
+
+ i = MLXSW_REG_RECR2_IPV6_SIP8;
+ for (; i <= MLXSW_REG_RECR2_IPV6_SIP15; i++)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
+ true);
+}
+
+static inline void mlxsw_reg_recr2_ipv6_dip_enable(char *payload)
+{
+ int i = MLXSW_REG_RECR2_IPV6_DIP0_7;
+
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true);
+
+ i = MLXSW_REG_RECR2_IPV6_DIP8;
+ for (; i <= MLXSW_REG_RECR2_IPV6_DIP15; i++)
+ mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
+ true);
+}
+
+static inline void mlxsw_reg_recr2_pack(char *payload, u32 seed)
+{
+ MLXSW_REG_ZERO(recr2, payload);
+ mlxsw_reg_recr2_pp_set(payload, false);
+ mlxsw_reg_recr2_sh_set(payload, true);
+ mlxsw_reg_recr2_seed_set(payload, seed);
+}
+
+/* RMFT-V2 - Router Multicast Forwarding Table Version 2 Register
+ * --------------------------------------------------------------
+ * The RMFT_V2 register is used to configure and query the multicast table.
+ */
+#define MLXSW_REG_RMFT2_ID 0x8027
+#define MLXSW_REG_RMFT2_LEN 0x174
+
+MLXSW_REG_DEFINE(rmft2, MLXSW_REG_RMFT2_ID, MLXSW_REG_RMFT2_LEN);
+
+/* reg_rmft2_v
+ * Valid
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, v, 0x00, 31, 1);
+
+enum mlxsw_reg_rmft2_type {
+ MLXSW_REG_RMFT2_TYPE_IPV4,
+ MLXSW_REG_RMFT2_TYPE_IPV6
+};
+
+/* reg_rmft2_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rmft2, type, 0x00, 28, 2);
+
+enum mlxsw_sp_reg_rmft2_op {
+ /* For Write:
+ * Write operation. Used to write a new entry to the table. All RW
+ * fields are relevant for new entry. Activity bit is set for new
+ * entries - Note write with v (Valid) 0 will delete the entry.
+ * For Query:
+ * Read operation
+ */
+ MLXSW_REG_RMFT2_OP_READ_WRITE,
+};
+
+/* reg_rmft2_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rmft2, op, 0x00, 20, 2);
+
+/* reg_rmft2_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the specific
+ * entry.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, rmft2, a, 0x00, 16, 1);
+
+/* reg_rmft2_offset
+ * Offset within the multicast forwarding table to write to.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rmft2, offset, 0x00, 0, 16);
+
+/* reg_rmft2_virtual_router
+ * Virtual Router ID. Range from 0..cap_max_virtual_routers-1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, virtual_router, 0x04, 0, 16);
+
+enum mlxsw_reg_rmft2_irif_mask {
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE,
+ MLXSW_REG_RMFT2_IRIF_MASK_COMPARE
+};
+
+/* reg_rmft2_irif_mask
+ * Ingress RIF mask.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1);
+
+/* reg_rmft2_irif
+ * Ingress RIF index.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16);
+
+/* reg_rmft2_dip4
+ * Destination IPv4 address
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32);
+
+/* reg_rmft2_dip4_mask
+ * A bit that is set directs the TCAM to compare the corresponding bit in key. A
+ * bit that is clear directs the TCAM to ignore the corresponding bit in key.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32);
+
+/* reg_rmft2_sip4
+ * Source IPv4 address
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32);
+
+/* reg_rmft2_sip4_mask
+ * A bit that is set directs the TCAM to compare the corresponding bit in key. A
+ * bit that is clear directs the TCAM to ignore the corresponding bit in key.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32);
+
+/* reg_rmft2_flexible_action_set
+ * ACL action set. The only supported action types in this field and in any
+ * action-set pointed from here are as follows:
+ * 00h: ACTION_NULL
+ * 01h: ACTION_MAC_TTL, only TTL configuration is supported.
+ * 03h: ACTION_TRAP
+ * 06h: ACTION_QOS
+ * 08h: ACTION_POLICING_MONITORING
+ * 10h: ACTION_ROUTER_MC
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80,
+ MLXSW_REG_FLEX_ACTION_SET_LEN);
+
+static inline void
+mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router,
+ enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif,
+ u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask,
+ const char *flexible_action_set)
+{
+ MLXSW_REG_ZERO(rmft2, payload);
+ mlxsw_reg_rmft2_v_set(payload, v);
+ mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4);
+ mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE);
+ mlxsw_reg_rmft2_offset_set(payload, offset);
+ mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask);
+ mlxsw_reg_rmft2_irif_set(payload, irif);
+ mlxsw_reg_rmft2_dip4_set(payload, dip4);
+ mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask);
+ mlxsw_reg_rmft2_sip4_set(payload, sip4);
+ mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask);
+ if (flexible_action_set)
+ mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload,
+ flexible_action_set);
+}
+
/* MFCR - Management Fan Control Register
* --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism.
@@ -5827,6 +6551,29 @@ MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1);
*/
MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16);
+/* reg_mtmp_tee
+ * Temperature Event Enable.
+ * 0 - Do not generate event
+ * 1 - Generate event
+ * 2 - Generate single event
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtmp, tee, 0x0C, 30, 2);
+
+#define MLXSW_REG_MTMP_THRESH_HI 0x348 /* 105 Celsius */
+
+/* reg_mtmp_temperature_threshold_hi
+ * High threshold for Temperature Warning Event. In 0.125 Celsius.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtmp, temperature_threshold_hi, 0x0C, 0, 16);
+
+/* reg_mtmp_temperature_threshold_lo
+ * Low threshold for Temperature Warning Event. In 0.125 Celsius.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
+
#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8
/* reg_mtmp_sensor_name
@@ -5843,6 +6590,8 @@ static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index,
mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
+ mlxsw_reg_mtmp_temperature_threshold_hi_set(payload,
+ MLXSW_REG_MTMP_THRESH_HI);
}
static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
@@ -6860,6 +7609,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(svpe),
MLXSW_REG(sfmr),
MLXSW_REG(spvmlr),
+ MLXSW_REG(cwtp),
+ MLXSW_REG(cwtpm),
MLXSW_REG(ppbt),
MLXSW_REG(pacl),
MLXSW_REG(pagt),
@@ -6886,9 +7637,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
MLXSW_REG(ritr),
+ MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
MLXSW_REG(ricnt),
+ MLXSW_REG(rrcr),
MLXSW_REG(ralta),
MLXSW_REG(ralst),
MLXSW_REG(raltb),
@@ -6896,6 +7649,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rauht),
MLXSW_REG(raleu),
MLXSW_REG(rauhtd),
+ MLXSW_REG(rigr2),
+ MLXSW_REG(recr2),
+ MLXSW_REG(rmft2),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 9556d934714b..087aad52c195 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -63,6 +63,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
+ MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES,
MLXSW_RES_ID_MAX_LPM_TREES,
/* Internal resources.
@@ -100,6 +101,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
+ [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10,
[MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 696b99e65a5a..2d0897b7d860 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -53,6 +53,7 @@
#include <linux/notifier.h>
#include <linux/dcbnl.h>
#include <linux/inetdevice.h>
+#include <linux/netlink.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -69,11 +70,12 @@
#include "txheader.h"
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
+#include "spectrum_acl_flex_actions.h"
#include "../mlxfw/mlxfw.h"
#define MLXSW_FWREV_MAJOR 13
-#define MLXSW_FWREV_MINOR 1420
-#define MLXSW_FWREV_SUBMINOR 122
+#define MLXSW_FWREV_MINOR 1530
+#define MLXSW_FWREV_SUBMINOR 152
static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
.major = MLXSW_FWREV_MAJOR,
@@ -1322,20 +1324,54 @@ out:
return err;
}
+static void
+mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
+ struct mlxsw_sp_port_xstats *xstats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err, i;
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
+ ppcnt_pl);
+ if (!err)
+ xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
+
+ for (i = 0; i < TC_MAX_QUEUE; i++) {
+ err = mlxsw_sp_port_get_stats_raw(dev,
+ MLXSW_REG_PPCNT_TC_CONG_TC,
+ i, ppcnt_pl);
+ if (!err)
+ xstats->wred_drop[i] =
+ mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
+ i, ppcnt_pl);
+ if (err)
+ continue;
+
+ xstats->backlog[i] =
+ mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
+ xstats->tail_drop[i] =
+ mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
+ }
+}
+
static void update_stats_cache(struct work_struct *work)
{
struct mlxsw_sp_port *mlxsw_sp_port =
container_of(work, struct mlxsw_sp_port,
- hw_stats.update_dw.work);
+ periodic_hw_stats.update_dw.work);
if (!netif_carrier_ok(mlxsw_sp_port->dev))
goto out;
mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
- mlxsw_sp_port->hw_stats.cache);
+ &mlxsw_sp_port->periodic_hw_stats.stats);
+ mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
+ &mlxsw_sp_port->periodic_hw_stats.xstats);
out:
- mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
MLXSW_HW_STATS_UPDATE_TIME);
}
@@ -1348,7 +1384,7 @@ mlxsw_sp_port_get_stats64(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
+ memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
}
static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1695,17 +1731,9 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f)
+ struct tc_cls_matchall_offload *f,
+ bool ingress)
{
- bool ingress;
-
- if (is_classid_clsact_ingress(f->common.classid))
- ingress = true;
- else if (is_classid_clsact_egress(f->common.classid))
- ingress = false;
- else
- return -EOPNOTSUPP;
-
if (f->common.chain_index)
return -EOPNOTSUPP;
@@ -1723,17 +1751,9 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f,
+ bool ingress)
{
- bool ingress;
-
- if (is_classid_clsact_ingress(f->common.classid))
- ingress = true;
- else if (is_classid_clsact_egress(f->common.classid))
- ingress = false;
- else
- return -EOPNOTSUPP;
-
switch (f->command) {
case TC_CLSFLOWER_REPLACE:
return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
@@ -1747,16 +1767,72 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
+static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv, bool ingress)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
+
+ if (!tc_can_offload(mlxsw_sp_port->dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSMATCHALL:
+ return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
+ ingress);
+ case TC_SETUP_CLSFLOWER:
+ return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data,
+ ingress);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true);
+}
+
+static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false);
+}
+
+static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_block_offload *f)
+{
+ tc_setup_cb_t *cb;
+
+ if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ cb = mlxsw_sp_setup_tc_block_cb_ig;
+ else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ cb = mlxsw_sp_setup_tc_block_cb_eg;
+ else
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, cb, mlxsw_sp_port,
+ mlxsw_sp_port);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
switch (type) {
- case TC_SETUP_CLSMATCHALL:
- return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
- case TC_SETUP_CLSFLOWER:
- return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
+ case TC_SETUP_BLOCK:
+ return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
+ case TC_SETUP_QDISC_RED:
+ return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -2868,14 +2944,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_sample;
}
- mlxsw_sp_port->hw_stats.cache =
- kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
-
- if (!mlxsw_sp_port->hw_stats.cache) {
- err = -ENOMEM;
- goto err_alloc_hw_stats;
- }
- INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
+ INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
&update_stats_cache);
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
@@ -2974,6 +3043,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
mlxsw_sp_port->local_port);
+ err = PTR_ERR(mlxsw_sp_port_vlan);
goto err_port_vlan_get;
}
@@ -2989,7 +3059,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
mlxsw_sp_port, dev, mlxsw_sp_port->split,
module);
- mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
return 0;
err_register_netdev:
@@ -3012,8 +3082,6 @@ err_dev_addr_init:
err_port_swid_set:
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
err_port_module_map:
- kfree(mlxsw_sp_port->hw_stats.cache);
-err_alloc_hw_stats:
kfree(mlxsw_sp_port->sample);
err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -3028,7 +3096,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
+ cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
@@ -3038,7 +3106,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
- kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
@@ -3075,13 +3142,17 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->ports)
return -ENOMEM;
- mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
+ mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
+ GFP_KERNEL);
if (!mlxsw_sp->port_to_module) {
err = -ENOMEM;
goto err_port_to_module_alloc;
}
for (i = 1; i < max_ports; i++) {
+ /* Mark as invalid */
+ mlxsw_sp->port_to_module[i] = -1;
+
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
&width, &lane);
if (err)
@@ -3149,6 +3220,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < count; i++) {
local_port = base_port + i * 2;
+ if (mlxsw_sp->port_to_module[local_port] < 0)
+ continue;
module = mlxsw_sp->port_to_module[local_port];
mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
@@ -3311,6 +3384,14 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
+static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb,
+ u8 local_port, void *priv)
+{
+ skb->offload_mr_fwd_mark = 1;
+ skb->offload_fwd_mark = 1;
+ return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
+}
+
static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
@@ -3354,6 +3435,10 @@ out:
MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
+#define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \
+ _is_ctrl, SP_##_trap_group, DISCARD)
+
#define MLXSW_SP_EVENTL(_func, _trap_id) \
MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
@@ -3420,6 +3505,11 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
false, SP_IP2ME, DISCARD),
/* ACL trap */
MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
+ /* Multicast Router Traps */
+ MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false),
+ MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
+ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -3445,6 +3535,8 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
rate = 128;
burst_size = 7;
break;
@@ -3460,6 +3552,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
rate = 1024;
burst_size = 7;
break;
@@ -3505,6 +3598,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
priority = 5;
tc = 5;
break;
@@ -3521,12 +3615,14 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
priority = 2;
tc = 2;
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
priority = 1;
tc = 1;
break;
@@ -3642,6 +3738,9 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
+static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
@@ -3663,10 +3762,16 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return err;
}
+ err = mlxsw_sp_kvdl_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
+ return err;
+ }
+
err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
- return err;
+ goto err_fids_init;
}
err = mlxsw_sp_traps_init(mlxsw_sp);
@@ -3693,12 +3798,34 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_switchdev_init;
}
+ err = mlxsw_sp_counter_pool_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
+ goto err_counter_pool_init;
+ }
+
+ err = mlxsw_sp_afa_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
+ goto err_afa_init;
+ }
+
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
goto err_router_init;
}
+ /* Initialize netdevice notifier after router is initialized, so that
+ * the event handler can use router structures.
+ */
+ mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
+ err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
+ goto err_netdev_notifier;
+ }
+
err = mlxsw_sp_span_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
@@ -3711,12 +3838,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_acl_init;
}
- err = mlxsw_sp_counter_pool_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
- goto err_counter_pool_init;
- }
-
err = mlxsw_sp_dpipe_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
@@ -3734,14 +3855,18 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
err_ports_create:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
- mlxsw_sp_counter_pool_fini(mlxsw_sp);
-err_counter_pool_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
mlxsw_sp_span_fini(mlxsw_sp);
err_span_init:
+ unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
+err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_afa_fini(mlxsw_sp);
+err_afa_init:
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
+err_counter_pool_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
mlxsw_sp_lag_fini(mlxsw_sp);
@@ -3751,6 +3876,8 @@ err_buffers_init:
mlxsw_sp_traps_fini(mlxsw_sp);
err_traps_init:
mlxsw_sp_fids_fini(mlxsw_sp);
+err_fids_init:
+ mlxsw_sp_kvdl_fini(mlxsw_sp);
return err;
}
@@ -3760,15 +3887,18 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
- mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
+ unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_afa_fini(mlxsw_sp);
+ mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp_kvdl_fini(mlxsw_sp);
}
static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -3791,8 +3921,8 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_pkey = 0,
.used_kvd_split_data = 1,
.kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
- .kvd_hash_single_parts = 2,
- .kvd_hash_double_parts = 1,
+ .kvd_hash_single_parts = 59,
+ .kvd_hash_double_parts = 41,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
.swid_config = {
{
@@ -3986,14 +4116,21 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
static bool
mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
struct net_device *lag_dev,
- struct netdev_lag_upper_info *lag_upper_info)
+ struct netdev_lag_upper_info *lag_upper_info,
+ struct netlink_ext_ack *extack)
{
u16 lag_id;
- if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
+ if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Exceeded number of supported LAG devices");
return false;
- if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ }
+ if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: LAG device using unsupported Tx type");
return false;
+ }
return true;
}
@@ -4198,6 +4335,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
{
struct netdev_notifier_changeupper_info *info;
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct netlink_ext_ack *extack;
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
int err = 0;
@@ -4205,6 +4343,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
info = ptr;
+ extack = netdev_notifier_info_to_extack(&info->info);
switch (event) {
case NETDEV_PRECHANGEUPPER:
@@ -4212,25 +4351,43 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
if (!is_vlan_dev(upper_dev) &&
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
- !netif_is_ovs_master(upper_dev))
+ !netif_is_ovs_master(upper_dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Unknown upper device type");
return -EINVAL;
+ }
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev))
+ if (netdev_has_any_upper_dev(upper_dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
+ }
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
- info->upper_info))
+ info->upper_info, extack))
return -EINVAL;
- if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
+ if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Master device is a LAG master and this device has a VLAN");
return -EINVAL;
+ }
if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
- !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
+ !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Can not put a VLAN on a LAG port");
return -EINVAL;
- if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
+ }
+ if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Master device is an OVS master and this device has a VLAN");
return -EINVAL;
- if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
+ }
+ if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
+ NL_SET_ERR_MSG(extack,
+ "spectrum: Can not put a VLAN on an OVS port");
return -EINVAL;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4238,7 +4395,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
lower_dev,
- upper_dev);
+ upper_dev,
+ extack);
else
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
lower_dev,
@@ -4329,18 +4487,25 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
struct net_device *upper_dev;
int err = 0;
+ extack = netdev_notifier_info_to_extack(&info->info);
+
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev))
+ if (!netif_is_bridge_master(upper_dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers");
return -EINVAL;
+ }
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev))
+ if (netdev_has_any_upper_dev(upper_dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4348,7 +4513,8 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
vlan_dev,
- upper_dev);
+ upper_dev,
+ extack);
else
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
@@ -4411,13 +4577,21 @@ static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
return netif_is_l3_master(info->upper_dev);
}
-static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp *mlxsw_sp;
int err = 0;
- if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
+ mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
+ if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
+ err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
+ event, ptr);
+ else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
err = mlxsw_sp_netdevice_router_port_event(dev);
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
@@ -4431,21 +4605,20 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
return notifier_from_errno(err);
}
-static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
- .notifier_call = mlxsw_sp_netdevice_event,
+static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inetaddr_valid_event,
};
static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
.notifier_call = mlxsw_sp_inetaddr_event,
- .priority = 10, /* Must be called before FIB notifier block */
};
-static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
- .notifier_call = mlxsw_sp_inet6addr_event,
+static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inet6addr_valid_event,
};
-static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
- .notifier_call = mlxsw_sp_router_netevent_event,
+static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inet6addr_event,
};
static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
@@ -4462,10 +4635,10 @@ static int __init mlxsw_sp_module_init(void)
{
int err;
- register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
+ register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
- register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err)
@@ -4480,10 +4653,10 @@ static int __init mlxsw_sp_module_init(void)
err_pci_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
err_core_driver_register:
- unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
+ unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
- unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
return err;
}
@@ -4491,10 +4664,10 @@ static void __exit mlxsw_sp_module_exit(void)
{
mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
- unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
+ unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
- unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
}
module_init(mlxsw_sp_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 84ce83acdc19..432ab9b12b7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -48,6 +48,7 @@
#include <linux/notifier.h>
#include <net/psample.h>
#include <net/pkt_cls.h>
+#include <net/red.h>
#include "port.h"
#include "core.h"
@@ -62,7 +63,7 @@
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
-#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
+#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
struct mlxsw_sp_port;
@@ -94,7 +95,8 @@ struct mlxsw_sp_mid {
unsigned char addr[ETH_ALEN];
u16 fid;
u16 mid;
- unsigned int ref_count;
+ bool in_hw;
+ unsigned long *ports_in_mid; /* bits array */
};
enum mlxsw_sp_span_type {
@@ -138,9 +140,11 @@ struct mlxsw_sp_port_mall_tc_entry {
struct mlxsw_sp_sb;
struct mlxsw_sp_bridge;
struct mlxsw_sp_router;
+struct mlxsw_sp_mr;
struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
+struct mlxsw_sp_kvdl;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
@@ -148,15 +152,16 @@ struct mlxsw_sp {
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
struct mlxsw_sp_upper *lags;
- u8 *port_to_module;
+ int *port_to_module;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
+ struct mlxsw_sp_mr *mr;
+ struct mlxsw_afa *afa;
struct mlxsw_sp_acl *acl;
struct mlxsw_sp_fid_core *fid_core;
- struct {
- DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
- } kvdl;
+ struct mlxsw_sp_kvdl *kvdl;
+ struct notifier_block netdevice_nb;
struct mlxsw_sp_counter_pool *counter_pool;
struct {
@@ -199,6 +204,37 @@ struct mlxsw_sp_port_vlan {
struct list_head bridge_vlan_node;
};
+enum mlxsw_sp_qdisc_type {
+ MLXSW_SP_QDISC_NO_QDISC,
+ MLXSW_SP_QDISC_RED,
+};
+
+struct mlxsw_sp_qdisc {
+ u32 handle;
+ enum mlxsw_sp_qdisc_type type;
+ struct red_stats xstats_base;
+ union {
+ struct {
+ u64 tail_drop_base;
+ u64 ecn_base;
+ u64 wred_drop_base;
+ } red;
+ } xstats;
+
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 drops;
+ u64 overlimits;
+};
+
+/* No need an internal lock; At worse - miss a single periodic iteration */
+struct mlxsw_sp_port_xstats {
+ u64 ecn;
+ u64 wred_drop[TC_MAX_QUEUE];
+ u64 tail_drop[TC_MAX_QUEUE];
+ u64 backlog[TC_MAX_QUEUE];
+};
+
struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
@@ -227,11 +263,13 @@ struct mlxsw_sp_port {
struct list_head mall_tc_list;
struct {
#define MLXSW_HW_STATS_UPDATE_TIME HZ
- struct rtnl_link_stats64 *cache;
+ struct rtnl_link_stats64 stats;
+ struct mlxsw_sp_port_xstats xstats;
struct delayed_work update_dw;
- } hw_stats;
+ } periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
+ struct mlxsw_sp_qdisc root_qdisc;
};
static inline bool
@@ -322,7 +360,8 @@ void
mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
- struct net_device *br_dev);
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
@@ -380,23 +419,43 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
/* spectrum_router.c */
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
- unsigned long event, void *ptr);
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
+int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
unsigned long event, void *ptr);
+int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct netdev_notifier_changeupper_info *info);
+bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev);
+int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info);
+int
+mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info);
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
+int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ unsigned int entry_count,
+ unsigned int *p_alloc_size);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
@@ -466,9 +525,9 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
enum mlxsw_afk_element element,
const char *key_value,
const char *mask_value, unsigned int len);
-void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
-void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
- u16 group_id);
+int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+ u16 group_id);
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
@@ -521,6 +580,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
+/* spectrum_qdisc.c */
+int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p);
+
/* spectrum_fid.c */
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u8 local_port,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 4b2455e3e079..93dcd315f7d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -52,7 +52,6 @@
struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
- struct mlxsw_afa *afa;
struct mlxsw_sp_fid *dummy_fid;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
@@ -333,7 +332,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
if (!rulei)
return NULL;
- rulei->act_block = mlxsw_afa_block_create(acl->afa);
+ rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
if (IS_ERR(rulei->act_block)) {
err = PTR_ERR(rulei->act_block);
goto err_afa_block_create;
@@ -379,15 +378,15 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
key_value, mask_value, len);
}
-void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
+int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
{
- mlxsw_afa_block_continue(rulei->act_block);
+ return mlxsw_afa_block_continue(rulei->act_block);
}
-void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
- u16 group_id)
+int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+ u16 group_id)
{
- mlxsw_afa_block_jump(rulei->act_block, group_id);
+ return mlxsw_afa_block_jump(rulei->act_block, group_id);
}
int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
@@ -397,7 +396,8 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
{
- return mlxsw_afa_block_append_trap(rulei->act_block);
+ return mlxsw_afa_block_append_trap(rulei->act_block,
+ MLXSW_TRAP_ID_ACL0);
}
int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
@@ -653,85 +653,6 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
-
-static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
- char *enc_actions, bool is_first)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
- char pefa_pl[MLXSW_REG_PEFA_LEN];
- u32 kvdl_index;
- int err;
-
- /* The first action set of a TCAM entry is stored directly in TCAM,
- * not KVD linear area.
- */
- if (is_first)
- return 0;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
- &kvdl_index);
- if (err)
- return err;
- mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
- if (err)
- goto err_pefa_write;
- *p_kvdl_index = kvdl_index;
- return 0;
-
-err_pefa_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
- return err;
-}
-
-static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
- bool is_first)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
-
- if (is_first)
- return;
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
-}
-
-static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
- u8 local_port)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
- char ppbs_pl[MLXSW_REG_PPBS_LEN];
- u32 kvdl_index;
- int err;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
- if (err)
- return err;
- mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
- if (err)
- goto err_ppbs_write;
- *p_kvdl_index = kvdl_index;
- return 0;
-
-err_ppbs_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
- return err;
-}
-
-static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
-{
- struct mlxsw_sp *mlxsw_sp = priv;
-
- mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
-}
-
-static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
- .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
- .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
- .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
- .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
-};
-
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
@@ -753,14 +674,6 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
goto err_afk_create;
}
- acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
- ACL_ACTIONS_PER_SET),
- &mlxsw_sp_act_afa_ops, mlxsw_sp);
- if (IS_ERR(acl->afa)) {
- err = PTR_ERR(acl->afa);
- goto err_afa_create;
- }
-
err = rhashtable_init(&acl->ruleset_ht,
&mlxsw_sp_acl_ruleset_ht_params);
if (err)
@@ -792,8 +705,6 @@ err_acl_ops_init:
err_fid_get:
rhashtable_destroy(&acl->ruleset_ht);
err_rhashtable_init:
- mlxsw_afa_destroy(acl->afa);
-err_afa_create:
mlxsw_afk_destroy(acl->afk);
err_afk_create:
kfree(acl);
@@ -810,7 +721,6 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht);
- mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk);
kfree(acl);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
new file mode 100644
index 000000000000..4d3340ed0291
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -0,0 +1,129 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spectrum_acl_flex_actions.h"
+#include "core_acl_flex_actions.h"
+
+#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
+
+static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+ char *enc_actions, bool is_first)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char pefa_pl[MLXSW_REG_PEFA_LEN];
+ u32 kvdl_index;
+ int err;
+
+ /* The first action set of a TCAM entry is stored directly in TCAM,
+ * not KVD linear area.
+ */
+ if (is_first)
+ return 0;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE,
+ &kvdl_index);
+ if (err)
+ return err;
+ mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+ if (err)
+ goto err_pefa_write;
+ *p_kvdl_index = kvdl_index;
+ return 0;
+
+err_pefa_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
+ bool is_first)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ if (is_first)
+ return;
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+}
+
+static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
+ u8 local_port)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ char ppbs_pl[MLXSW_REG_PPBS_LEN];
+ u32 kvdl_index;
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
+ if (err)
+ return err;
+ mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
+ if (err)
+ goto err_ppbs_write;
+ *p_kvdl_index = kvdl_index;
+ return 0;
+
+err_ppbs_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+ return err;
+}
+
+static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+
+ mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+}
+
+static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
+ .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
+ .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
+ .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
+ .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
+};
+
+int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_ACTIONS_PER_SET),
+ &mlxsw_sp_act_afa_ops, mlxsw_sp);
+ return PTR_ERR_OR_ZERO(mlxsw_sp->afa);
+}
+
+void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_afa_destroy(mlxsw_sp->afa);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
new file mode 100644
index 000000000000..2726192836ad
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
@@ -0,0 +1,44 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
+#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
+
+#include "spectrum.h"
+
+int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 50b40de1fb91..7e8284b46968 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -608,7 +608,10 @@ mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
goto err_rulei_create;
}
- mlxsw_sp_acl_rulei_act_continue(rulei);
+ err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ if (WARN_ON(err))
+ goto err_rulei_act_continue;
+
err = mlxsw_sp_acl_rulei_commit(rulei);
if (err)
goto err_rulei_commit;
@@ -623,6 +626,7 @@ mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
err_rule_insert:
err_rulei_commit:
+err_rulei_act_continue:
mlxsw_sp_acl_rulei_destroy(rulei);
err_rulei_create:
parman_item_remove(region->parman, parman_prio, parman_item);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 51e6846da72b..96fdba78acab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -43,21 +43,42 @@ enum mlxsw_sp_field_metadata_id {
MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+ MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX,
+ MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE,
+ MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX,
};
static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = {
- { .name = "erif_port",
- .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
- .bitwidth = 32,
- .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
+ {
+ .name = "erif_port",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT,
+ .bitwidth = 32,
+ .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
},
- { .name = "l3_forward",
- .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
- .bitwidth = 1,
+ {
+ .name = "l3_forward",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD,
+ .bitwidth = 1,
},
- { .name = "l3_drop",
- .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
- .bitwidth = 1,
+ {
+ .name = "l3_drop",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP,
+ .bitwidth = 1,
+ },
+ {
+ .name = "adj_index",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX,
+ .bitwidth = 32,
+ },
+ {
+ .name = "adj_size",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE,
+ .bitwidth = 32,
+ },
+ {
+ .name = "adj_hash_index",
+ .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX,
+ .bitwidth = 32,
},
};
@@ -826,6 +847,390 @@ static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp)
MLXSW_SP_DPIPE_TABLE_NAME_HOST6);
}
+static int mlxsw_sp_dpipe_table_adj_matches_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_match match = {0};
+ int err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX;
+
+ err = devlink_dpipe_match_put(skb, &match);
+ if (err)
+ return err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE;
+
+ err = devlink_dpipe_match_put(skb, &match);
+ if (err)
+ return err;
+
+ match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match.header = &mlxsw_sp_dpipe_header_metadata;
+ match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX;
+
+ return devlink_dpipe_match_put(skb, &match);
+}
+
+static int mlxsw_sp_dpipe_table_adj_actions_dump(void *priv,
+ struct sk_buff *skb)
+{
+ struct devlink_dpipe_action action = {0};
+ int err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &devlink_dpipe_header_ethernet;
+ action.field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC;
+
+ err = devlink_dpipe_action_put(skb, &action);
+ if (err)
+ return err;
+
+ action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action.header = &mlxsw_sp_dpipe_header_metadata;
+ action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+
+ return devlink_dpipe_action_put(skb, &action);
+}
+
+static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nexthop *nh;
+ u64 size = 0;
+
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router)
+ if (mlxsw_sp_nexthop_offload(nh) &&
+ !mlxsw_sp_nexthop_group_has_ipip(nh))
+ size++;
+ return size;
+}
+
+enum mlxsw_sp_dpipe_table_adj_match {
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX,
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE,
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX,
+ MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT,
+};
+
+enum mlxsw_sp_dpipe_table_adj_action {
+ MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC,
+ MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT,
+ MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT,
+};
+
+static void
+mlxsw_sp_dpipe_table_adj_match_action_prepare(struct devlink_dpipe_match *matches,
+ struct devlink_dpipe_action *actions)
+{
+ struct devlink_dpipe_action *action;
+ struct devlink_dpipe_match *match;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+ match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT;
+ match->header = &mlxsw_sp_dpipe_header_metadata;
+ match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &devlink_dpipe_header_ethernet;
+ action->field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+ action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY;
+ action->header = &mlxsw_sp_dpipe_header_metadata;
+ action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT;
+}
+
+static int
+mlxsw_sp_dpipe_table_adj_entry_prepare(struct devlink_dpipe_entry *entry,
+ struct devlink_dpipe_value *match_values,
+ struct devlink_dpipe_match *matches,
+ struct devlink_dpipe_value *action_values,
+ struct devlink_dpipe_action *actions)
+{ struct devlink_dpipe_value *action_value;
+ struct devlink_dpipe_value *match_value;
+ struct devlink_dpipe_action *action;
+ struct devlink_dpipe_match *match;
+
+ entry->match_values = match_values;
+ entry->match_values_count = MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT;
+
+ entry->action_values = action_values;
+ entry->action_values_count = MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+ match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+
+ match_value->match = match;
+ match_value->value_size = sizeof(u32);
+ match_value->value = kmalloc(match_value->value_size, GFP_KERNEL);
+ if (!match_value->value)
+ return -ENOMEM;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+ action_value = &action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u64);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ return -ENOMEM;
+
+ action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+ action_value = &action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+
+ action_value->action = action;
+ action_value->value_size = sizeof(u32);
+ action_value->value = kmalloc(action_value->value_size, GFP_KERNEL);
+ if (!action_value->value)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+__mlxsw_sp_dpipe_table_adj_entry_fill(struct devlink_dpipe_entry *entry,
+ u32 adj_index, u32 adj_size,
+ u32 adj_hash_index, unsigned char *ha,
+ struct mlxsw_sp_rif *rif)
+{
+ struct devlink_dpipe_value *value;
+ u32 *p_rif_value;
+ u32 *p_index;
+
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX];
+ p_index = value->value;
+ *p_index = adj_index;
+
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE];
+ p_index = value->value;
+ *p_index = adj_size;
+
+ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX];
+ p_index = value->value;
+ *p_index = adj_hash_index;
+
+ value = &entry->action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC];
+ ether_addr_copy(value->value, ha);
+
+ value = &entry->action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT];
+ p_rif_value = value->value;
+ *p_rif_value = mlxsw_sp_rif_index(rif);
+ value->mapping_value = mlxsw_sp_rif_dev_ifindex(rif);
+ value->mapping_valid = true;
+}
+
+static void mlxsw_sp_dpipe_table_adj_entry_fill(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct devlink_dpipe_entry *entry)
+{
+ struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
+ unsigned char *ha = mlxsw_sp_nexthop_ha(nh);
+ u32 adj_hash_index = 0;
+ u32 adj_index = 0;
+ u32 adj_size = 0;
+ int err;
+
+ mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, &adj_hash_index);
+ __mlxsw_sp_dpipe_table_adj_entry_fill(entry, adj_index, adj_size,
+ adj_hash_index, ha, rif);
+ err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &entry->counter);
+ if (!err)
+ entry->counter_valid = true;
+}
+
+static int
+mlxsw_sp_dpipe_table_adj_entries_get(struct mlxsw_sp *mlxsw_sp,
+ struct devlink_dpipe_entry *entry,
+ bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct mlxsw_sp_nexthop *nh;
+ int entry_index = 0;
+ int nh_count_max;
+ int nh_count = 0;
+ int nh_skip;
+ int j;
+ int err;
+
+ rtnl_lock();
+ nh_count_max = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
+start_again:
+ err = devlink_dpipe_entry_ctx_prepare(dump_ctx);
+ if (err)
+ goto err_ctx_prepare;
+ j = 0;
+ nh_skip = nh_count;
+ nh_count = 0;
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
+ if (!mlxsw_sp_nexthop_offload(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
+ continue;
+
+ if (nh_count < nh_skip)
+ goto skip;
+
+ mlxsw_sp_dpipe_table_adj_entry_fill(mlxsw_sp, nh, entry);
+ entry->index = entry_index;
+ err = devlink_dpipe_entry_ctx_append(dump_ctx, entry);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!j)
+ goto err_entry_append;
+ break;
+ }
+ goto err_entry_append;
+ }
+ entry_index++;
+ j++;
+skip:
+ nh_count++;
+ }
+
+ devlink_dpipe_entry_ctx_close(dump_ctx);
+ if (nh_count != nh_count_max)
+ goto start_again;
+ rtnl_unlock();
+
+ return 0;
+
+err_ctx_prepare:
+err_entry_append:
+ rtnl_unlock();
+ return err;
+}
+
+static int
+mlxsw_sp_dpipe_table_adj_entries_dump(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct devlink_dpipe_value action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT];
+ struct devlink_dpipe_value match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT];
+ struct devlink_dpipe_action actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT];
+ struct devlink_dpipe_match matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT];
+ struct devlink_dpipe_entry entry = {0};
+ struct mlxsw_sp *mlxsw_sp = priv;
+ int err;
+
+ memset(matches, 0, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT *
+ sizeof(matches[0]));
+ memset(match_values, 0, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT *
+ sizeof(match_values[0]));
+ memset(actions, 0, MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT *
+ sizeof(actions[0]));
+ memset(action_values, 0, MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT *
+ sizeof(action_values[0]));
+
+ mlxsw_sp_dpipe_table_adj_match_action_prepare(matches, actions);
+ err = mlxsw_sp_dpipe_table_adj_entry_prepare(&entry,
+ match_values, matches,
+ action_values, actions);
+ if (err)
+ goto out;
+
+ err = mlxsw_sp_dpipe_table_adj_entries_get(mlxsw_sp, &entry,
+ counters_enabled, dump_ctx);
+out:
+ devlink_dpipe_entry_clear(&entry);
+ return err;
+}
+
+static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_nexthop *nh;
+ u32 adj_hash_index = 0;
+ u32 adj_index = 0;
+ u32 adj_size = 0;
+
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
+ if (!mlxsw_sp_nexthop_offload(nh) ||
+ mlxsw_sp_nexthop_group_has_ipip(nh))
+ continue;
+
+ mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
+ &adj_hash_index);
+ if (enable)
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ else
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_update(mlxsw_sp,
+ adj_index + adj_hash_index, nh);
+ }
+ return 0;
+}
+
+static u64
+mlxsw_sp_dpipe_table_adj_size_get(void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ u64 size;
+
+ rtnl_lock();
+ size = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp);
+ rtnl_unlock();
+
+ return size;
+}
+
+static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
+ .matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump,
+ .actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump,
+ .entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump,
+ .counters_set_update = mlxsw_sp_dpipe_table_adj_counters_update,
+ .size_get = mlxsw_sp_dpipe_table_adj_size_get,
+};
+
+static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ return devlink_dpipe_table_register(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ,
+ &mlxsw_sp_dpipe_table_adj_ops,
+ mlxsw_sp, false);
+}
+
+static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_dpipe_table_unregister(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ);
+}
+
int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
@@ -846,8 +1251,14 @@ int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
err = mlxsw_sp_dpipe_host6_table_init(mlxsw_sp);
if (err)
goto err_host6_table_init;
- return 0;
+ err = mlxsw_sp_dpipe_adj_table_init(mlxsw_sp);
+ if (err)
+ goto err_adj_table_init;
+
+ return 0;
+err_adj_table_init:
+ mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
err_host6_table_init:
mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
err_host4_table_init:
@@ -861,6 +1272,7 @@ void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ mlxsw_sp_dpipe_adj_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp);
mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
index 283fde4e6783..815d543cf114 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -56,5 +56,6 @@ static inline void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif"
#define MLXSW_SP_DPIPE_TABLE_NAME_HOST4 "mlxsw_host4"
#define MLXSW_SP_DPIPE_TABLE_NAME_HOST6 "mlxsw_host6"
+#define MLXSW_SP_DPIPE_TABLE_NAME_ADJ "mlxsw_adj"
#endif /* _MLXSW_PIPELINE_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 8aace9a06a5d..2f0e57857ea4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -63,7 +63,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
- if (is_tcf_gact_shot(a)) {
+ if (is_tcf_gact_ok(a)) {
+ err = mlxsw_sp_acl_rulei_act_continue(rulei);
+ if (err)
+ return err;
+ } else if (is_tcf_gact_shot(a)) {
err = mlxsw_sp_acl_rulei_act_drop(rulei);
if (err)
return err;
@@ -84,7 +88,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(ruleset);
group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
- mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
+ err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
+ if (err)
+ return err;
} else if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 702fe945227c..7502e53447bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -36,36 +36,123 @@
#include "spectrum_ipip.h"
-static bool
-mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev)
+struct ip_tunnel_parm
+mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev)
{
struct ip_tunnel *tun = netdev_priv(ol_dev);
- return !!(tun->parms.i_flags & TUNNEL_KEY);
+ return tun->parms;
}
-static bool
-mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev)
+static bool mlxsw_sp_ipip_parms_has_ikey(struct ip_tunnel_parm parms)
{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
+ return !!(parms.i_flags & TUNNEL_KEY);
+}
- return !!(tun->parms.o_flags & TUNNEL_KEY);
+static bool mlxsw_sp_ipip_parms_has_okey(struct ip_tunnel_parm parms)
+{
+ return !!(parms.o_flags & TUNNEL_KEY);
}
-static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev)
+static u32 mlxsw_sp_ipip_parms_ikey(struct ip_tunnel_parm parms)
{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
+ return mlxsw_sp_ipip_parms_has_ikey(parms) ?
+ be32_to_cpu(parms.i_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms_okey(struct ip_tunnel_parm parms)
+{
+ return mlxsw_sp_ipip_parms_has_okey(parms) ?
+ be32_to_cpu(parms.o_key) : 0;
+}
- return mlxsw_sp_ipip_netdev_has_ikey(ol_dev) ?
- be32_to_cpu(tun->parms.i_key) : 0;
+static __be32 mlxsw_sp_ipip_parms_saddr4(struct ip_tunnel_parm parms)
+{
+ return parms.iph.saddr;
+}
+
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms_saddr(enum mlxsw_sp_l3proto proto,
+ struct ip_tunnel_parm parms)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return (union mlxsw_sp_l3addr) {
+ .addr4 = mlxsw_sp_ipip_parms_saddr4(parms),
+ };
+ case MLXSW_SP_L3_PROTO_IPV6:
+ break;
+ }
+
+ WARN_ON(1);
+ return (union mlxsw_sp_l3addr) {
+ .addr4 = 0,
+ };
+}
+
+static __be32 mlxsw_sp_ipip_parms_daddr4(struct ip_tunnel_parm parms)
+{
+ return parms.iph.daddr;
+}
+
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms_daddr(enum mlxsw_sp_l3proto proto,
+ struct ip_tunnel_parm parms)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return (union mlxsw_sp_l3addr) {
+ .addr4 = mlxsw_sp_ipip_parms_daddr4(parms),
+ };
+ case MLXSW_SP_L3_PROTO_IPV6:
+ break;
+ }
+
+ WARN_ON(1);
+ return (union mlxsw_sp_l3addr) {
+ .addr4 = 0,
+ };
+}
+
+static bool mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_has_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
+
+static bool mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_has_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
+
+static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
}
static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev)
{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
+ return mlxsw_sp_ipip_parms_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
+
+union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_saddr(proto,
+ mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
+
+static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_daddr4(mlxsw_sp_ipip_netdev_parms(ol_dev));
+}
- return mlxsw_sp_ipip_netdev_has_okey(ol_dev) ?
- be32_to_cpu(tun->parms.o_key) : 0;
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
+{
+ return mlxsw_sp_ipip_parms_daddr(proto,
+ mlxsw_sp_ipip_netdev_parms(ol_dev));
}
static int
@@ -200,6 +287,73 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
};
}
+static int
+mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ union mlxsw_sp_l3addr old_saddr, new_saddr;
+ union mlxsw_sp_l3addr old_daddr, new_daddr;
+ struct ip_tunnel_parm new_parms;
+ bool update_tunnel = false;
+ bool update_decap = false;
+ bool update_nhs = false;
+ int err = 0;
+
+ new_parms = mlxsw_sp_ipip_netdev_parms(ipip_entry->ol_dev);
+
+ new_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
+ new_parms);
+ old_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
+ ipip_entry->parms);
+ new_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
+ new_parms);
+ old_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
+ ipip_entry->parms);
+
+ if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
+ u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
+
+ /* Since the local address has changed, if there is another
+ * tunnel with a matching saddr, both need to be demoted.
+ */
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp,
+ MLXSW_SP_L3_PROTO_IPV4,
+ new_saddr, ul_tb_id,
+ ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
+ update_tunnel = true;
+ } else if ((mlxsw_sp_ipip_parms_okey(ipip_entry->parms) !=
+ mlxsw_sp_ipip_parms_okey(new_parms)) ||
+ ipip_entry->parms.link != new_parms.link) {
+ update_tunnel = true;
+ } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
+ update_nhs = true;
+ } else if (mlxsw_sp_ipip_parms_ikey(ipip_entry->parms) !=
+ mlxsw_sp_ipip_parms_ikey(new_parms)) {
+ update_decap = true;
+ }
+
+ if (update_tunnel)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, true, true,
+ extack);
+ else if (update_nhs)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true,
+ extack);
+ else if (update_decap)
+ err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, false,
+ extack);
+
+ ipip_entry->parms = new_parms;
+ return err;
+}
+
static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.dev_type = ARPHRD_IPGRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV4,
@@ -207,6 +361,7 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.fib_entry_op = mlxsw_sp_ipip_fib_entry_op_gre4,
.can_offload = mlxsw_sp_ipip_can_offload_gre4,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
+ .ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
};
const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 1c2db831d83b..04b08d9d76e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -38,6 +38,13 @@
#include "spectrum_router.h"
#include <net/ip_fib.h>
+struct ip_tunnel_parm
+mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev);
+
+union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev);
+
enum mlxsw_sp_ipip_type {
MLXSW_SP_IPIP_TYPE_GRE4,
MLXSW_SP_IPIP_TYPE_MAX,
@@ -47,9 +54,9 @@ struct mlxsw_sp_ipip_entry {
enum mlxsw_sp_ipip_type ipipt;
struct net_device *ol_dev; /* Overlay. */
struct mlxsw_sp_rif_ipip_lb *ol_lb;
- unsigned int ref_count; /* Number of next hops using the tunnel. */
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
+ struct ip_tunnel_parm parms;
};
struct mlxsw_sp_ipip_ops {
@@ -72,6 +79,10 @@ struct mlxsw_sp_ipip_ops {
struct mlxsw_sp_ipip_entry *ipip_entry,
enum mlxsw_reg_ralue_op op,
u32 tunnel_index);
+
+ int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack);
};
extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 26c26cd30c3d..310c38247b5c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -39,55 +39,276 @@
#define MLXSW_SP_KVDL_SINGLE_BASE 0
#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP_KVDL_SINGLE_END \
+ (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1)
+
#define MLXSW_SP_KVDL_CHUNKS_BASE \
(MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
-#define MLXSW_SP_KVDL_CHUNKS_SIZE \
- (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE)
+#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152
+#define MLXSW_SP_KVDL_CHUNKS_END \
+ (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1)
+
+#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \
+ (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE)
+#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE)
+#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
+ (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
+
#define MLXSW_SP_CHUNK_MAX 32
+#define MLXSW_SP_LARGE_CHUNK_MAX 512
+
+struct mlxsw_sp_kvdl_part_info {
+ unsigned int part_index;
+ unsigned int start_index;
+ unsigned int end_index;
+ unsigned int alloc_size;
+};
+
+struct mlxsw_sp_kvdl_part {
+ struct list_head list;
+ const struct mlxsw_sp_kvdl_part_info *info;
+ unsigned long usage[0]; /* Entries */
+};
+
+struct mlxsw_sp_kvdl {
+ struct list_head parts_list;
+};
+
+static struct mlxsw_sp_kvdl_part *
+mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
+ unsigned int alloc_size)
+{
+ struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
+
+ list_for_each_entry(part, &kvdl->parts_list, list) {
+ if (alloc_size <= part->info->alloc_size &&
+ (!min_part ||
+ part->info->alloc_size <= min_part->info->alloc_size))
+ min_part = part;
+ }
+
+ return min_part ?: ERR_PTR(-ENOBUFS);
+}
+
+static struct mlxsw_sp_kvdl_part *
+mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ list_for_each_entry(part, &kvdl->parts_list, list) {
+ if (kvdl_index >= part->info->start_index &&
+ kvdl_index <= part->info->end_index)
+ return part;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static u32
+mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info,
+ unsigned int entry_index)
+{
+ return info->start_index + entry_index * info->alloc_size;
+}
+
+static unsigned int
+mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
+ u32 kvdl_index)
+{
+ return (kvdl_index - info->start_index) / info->alloc_size;
+}
+
+static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
+ u32 *p_kvdl_index)
+{
+ const struct mlxsw_sp_kvdl_part_info *info = part->info;
+ unsigned int entry_index, nr_entries;
+
+ nr_entries = (info->end_index - info->start_index + 1) /
+ info->alloc_size;
+ entry_index = find_first_zero_bit(part->usage, nr_entries);
+ if (entry_index == nr_entries)
+ return -ENOBUFS;
+ __set_bit(entry_index, part->usage);
+
+ *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(part->info,
+ entry_index);
+
+ return 0;
+}
+
+static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
+ u32 kvdl_index)
+{
+ unsigned int entry_index;
+
+ entry_index = mlxsw_sp_kvdl_index_entry_index(part->info,
+ kvdl_index);
+ __clear_bit(entry_index, part->usage);
+}
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index)
{
- int entry_index;
- int size;
- int type_base;
- int type_size;
- int type_entries;
-
- if (entry_count == 0 || entry_count > MLXSW_SP_CHUNK_MAX) {
- return -EINVAL;
- } else if (entry_count == 1) {
- type_base = MLXSW_SP_KVDL_SINGLE_BASE;
- type_size = MLXSW_SP_KVDL_SINGLE_SIZE;
- type_entries = 1;
- } else {
- type_base = MLXSW_SP_KVDL_CHUNKS_BASE;
- type_size = MLXSW_SP_KVDL_CHUNKS_SIZE;
- type_entries = MLXSW_SP_CHUNK_MAX;
+ struct mlxsw_sp_kvdl_part *part;
+
+ /* Find partition with smallest allocation size satisfying the
+ * requested size.
+ */
+ part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ return mlxsw_sp_kvdl_part_alloc(part, p_entry_index);
+}
+
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index);
+ if (IS_ERR(part))
+ return;
+ mlxsw_sp_kvdl_part_free(part, entry_index);
+}
+
+int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
+ unsigned int entry_count,
+ unsigned int *p_alloc_size)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
+ if (IS_ERR(part))
+ return PTR_ERR(part);
+
+ *p_alloc_size = part->info->alloc_size;
+
+ return 0;
+}
+
+static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = {
+ {
+ .part_index = 0,
+ .start_index = MLXSW_SP_KVDL_SINGLE_BASE,
+ .end_index = MLXSW_SP_KVDL_SINGLE_END,
+ .alloc_size = 1,
+ },
+ {
+ .part_index = 1,
+ .start_index = MLXSW_SP_KVDL_CHUNKS_BASE,
+ .end_index = MLXSW_SP_KVDL_CHUNKS_END,
+ .alloc_size = MLXSW_SP_CHUNK_MAX,
+ },
+ {
+ .part_index = 2,
+ .start_index = MLXSW_SP_KVDL_LARGE_CHUNKS_BASE,
+ .end_index = MLXSW_SP_KVDL_LARGE_CHUNKS_END,
+ .alloc_size = MLXSW_SP_LARGE_CHUNK_MAX,
+ },
+};
+
+static struct mlxsw_sp_kvdl_part *
+mlxsw_sp_kvdl_part_find(struct mlxsw_sp *mlxsw_sp, unsigned int part_index)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) {
+ if (part->info->part_index == part_index)
+ return part;
}
- entry_index = type_base;
- size = type_base + type_size;
- for_each_clear_bit_from(entry_index, mlxsw_sp->kvdl.usage, size) {
- int i;
+ return NULL;
+}
+
+static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
+ unsigned int part_index)
+{
+ const struct mlxsw_sp_kvdl_part_info *info;
+ struct mlxsw_sp_kvdl_part *part;
+ unsigned int nr_entries;
+ size_t usage_size;
+
+ info = &kvdl_parts_info[part_index];
+
+ nr_entries = (info->end_index - info->start_index + 1) /
+ info->alloc_size;
+ usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
+ part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
+ if (!part)
+ return -ENOMEM;
+
+ part->info = info;
+ list_add(&part->list, &mlxsw_sp->kvdl->parts_list);
+
+ return 0;
+}
+
+static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp,
+ unsigned int part_index)
+{
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_part_find(mlxsw_sp, part_index);
+ if (!part)
+ return;
+
+ list_del(&part->list);
+ kfree(part);
+}
+
+static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err, i;
+
+ INIT_LIST_HEAD(&mlxsw_sp->kvdl->parts_list);
- for (i = 0; i < type_entries; i++)
- set_bit(entry_index + i, mlxsw_sp->kvdl.usage);
- *p_entry_index = entry_index;
- return 0;
+ for (i = 0; i < ARRAY_SIZE(kvdl_parts_info); i++) {
+ err = mlxsw_sp_kvdl_part_init(mlxsw_sp, i);
+ if (err)
+ goto err_kvdl_part_init;
}
- return -ENOBUFS;
+
+ return 0;
+
+err_kvdl_part_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
+ return err;
}
-void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
+static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
{
- int type_entries;
int i;
- if (entry_index < MLXSW_SP_KVDL_CHUNKS_BASE)
- type_entries = 1;
- else
- type_entries = MLXSW_SP_CHUNK_MAX;
- for (i = 0; i < type_entries; i++)
- clear_bit(entry_index + i, mlxsw_sp->kvdl.usage);
+ for (i = ARRAY_SIZE(kvdl_parts_info) - 1; i >= 0; i--)
+ mlxsw_sp_kvdl_part_fini(mlxsw_sp, i);
+}
+
+int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_kvdl *kvdl;
+ int err;
+
+ kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL);
+ if (!kvdl)
+ return -ENOMEM;
+ mlxsw_sp->kvdl = kvdl;
+
+ err = mlxsw_sp_kvdl_parts_init(mlxsw_sp);
+ if (err)
+ goto err_kvdl_parts_init;
+
+ return 0;
+
+err_kvdl_parts_init:
+ kfree(mlxsw_sp->kvdl);
+ return err;
+}
+
+void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
+ kfree(mlxsw_sp->kvdl);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
new file mode 100644
index 000000000000..d20b143de3b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -0,0 +1,1012 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/rhashtable.h>
+
+#include "spectrum_mr.h"
+#include "spectrum_router.h"
+
+struct mlxsw_sp_mr {
+ const struct mlxsw_sp_mr_ops *mr_ops;
+ void *catchall_route_priv;
+ struct delayed_work stats_update_dw;
+ struct list_head table_list;
+#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
+ unsigned long priv[0];
+ /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_mr_vif {
+ struct net_device *dev;
+ const struct mlxsw_sp_rif *rif;
+ unsigned long vif_flags;
+
+ /* A list of route_vif_entry structs that point to routes that the VIF
+ * instance is used as one of the egress VIFs
+ */
+ struct list_head route_evif_list;
+
+ /* A list of route_vif_entry structs that point to routes that the VIF
+ * instance is used as an ingress VIF
+ */
+ struct list_head route_ivif_list;
+};
+
+struct mlxsw_sp_mr_route_vif_entry {
+ struct list_head vif_node;
+ struct list_head route_node;
+ struct mlxsw_sp_mr_vif *mr_vif;
+ struct mlxsw_sp_mr_route *mr_route;
+};
+
+struct mlxsw_sp_mr_table {
+ struct list_head node;
+ enum mlxsw_sp_l3proto proto;
+ struct mlxsw_sp *mlxsw_sp;
+ u32 vr_id;
+ struct mlxsw_sp_mr_vif vifs[MAXVIFS];
+ struct list_head route_list;
+ struct rhashtable route_ht;
+ char catchall_route_priv[0];
+ /* catchall_route_priv has to be always the last item */
+};
+
+struct mlxsw_sp_mr_route {
+ struct list_head node;
+ struct rhash_head ht_node;
+ struct mlxsw_sp_mr_route_key key;
+ enum mlxsw_sp_mr_route_action route_action;
+ u16 min_mtu;
+ struct mfc_cache *mfc4;
+ void *route_priv;
+ const struct mlxsw_sp_mr_table *mr_table;
+ /* A list of route_vif_entry structs that point to the egress VIFs */
+ struct list_head evif_list;
+ /* A route_vif_entry struct that point to the ingress VIF */
+ struct mlxsw_sp_mr_route_vif_entry ivif;
+};
+
+static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
+ .key_len = sizeof(struct mlxsw_sp_mr_route_key),
+ .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
+ .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
+ .automatic_shrinking = true,
+};
+
+static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif)
+{
+ return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
+}
+
+static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
+{
+ return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif;
+}
+
+static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif)
+{
+ return vif->dev;
+}
+
+static bool
+mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
+{
+ vifi_t ivif;
+
+ switch (mr_route->mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ ivif = mr_route->mfc4->mfc_parent;
+ return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+ return false;
+}
+
+static int
+mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+ int valid_evifs;
+
+ valid_evifs = 0;
+ list_for_each_entry(rve, &mr_route->evif_list, route_node)
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ valid_evifs++;
+ return valid_evifs;
+}
+
+static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route)
+{
+ switch (mr_route->mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+ return false;
+}
+
+static enum mlxsw_sp_mr_route_action
+mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+
+ /* If the ingress port is not regular and resolved, trap the route */
+ if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+
+ /* The kernel does not match a (*,G) route that the ingress interface is
+ * not one of the egress interfaces, so trap these kind of routes.
+ */
+ if (mlxsw_sp_mr_route_starg(mr_route) &&
+ !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+
+ /* If the route has no valid eVIFs, trap it. */
+ if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+
+ /* If one of the eVIFs has no RIF, trap-and-forward the route as there
+ * is some more routing to do in software too.
+ */
+ list_for_each_entry(rve, &mr_route->evif_list, route_node)
+ if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif)
+ return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD;
+
+ return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
+}
+
+static enum mlxsw_sp_mr_route_prio
+mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
+{
+ return mlxsw_sp_mr_route_starg(mr_route) ?
+ MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
+}
+
+static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_key *key,
+ const struct mfc_cache *mfc)
+{
+ bool starg = (mfc->mfc_origin == htonl(INADDR_ANY));
+
+ memset(key, 0, sizeof(*key));
+ key->vrid = mr_table->vr_id;
+ key->proto = mr_table->proto;
+ key->group.addr4 = mfc->mfc_mcastgrp;
+ key->group_mask.addr4 = htonl(0xffffffff);
+ key->source.addr4 = mfc->mfc_origin;
+ key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff);
+}
+
+static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
+ struct mlxsw_sp_mr_vif *mr_vif)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+
+ rve = kzalloc(sizeof(*rve), GFP_KERNEL);
+ if (!rve)
+ return -ENOMEM;
+ rve->mr_route = mr_route;
+ rve->mr_vif = mr_vif;
+ list_add_tail(&rve->route_node, &mr_route->evif_list);
+ list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ list_del(&rve->route_node);
+ list_del(&rve->vif_node);
+ kfree(rve);
+}
+
+static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
+ struct mlxsw_sp_mr_vif *mr_vif)
+{
+ mr_route->ivif.mr_route = mr_route;
+ mr_route->ivif.mr_vif = mr_vif;
+ list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
+}
+
+static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
+{
+ list_del(&mr_route->ivif.vif_node);
+}
+
+static int
+mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route,
+ struct mlxsw_sp_mr_route_info *route_info)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+ u16 *erif_indices;
+ u16 irif_index;
+ u16 erif = 0;
+
+ erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
+ GFP_KERNEL);
+ if (!erif_indices)
+ return -ENOMEM;
+
+ list_for_each_entry(rve, &mr_route->evif_list, route_node) {
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+ u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
+
+ erif_indices[erif++] = rifi;
+ }
+ }
+
+ if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
+ irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
+ else
+ irif_index = 0;
+
+ route_info->irif_index = irif_index;
+ route_info->erif_indices = erif_indices;
+ route_info->min_mtu = mr_route->min_mtu;
+ route_info->route_action = mr_route->route_action;
+ route_info->erif_num = erif;
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
+{
+ kfree(route_info->erif_indices);
+}
+
+static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route,
+ bool replace)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr_route_info route_info;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ int err;
+
+ err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
+ if (err)
+ return err;
+
+ if (!replace) {
+ struct mlxsw_sp_mr_route_params route_params;
+
+ mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
+ GFP_KERNEL);
+ if (!mr_route->route_priv) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ route_params.key = mr_route->key;
+ route_params.value = route_info;
+ route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
+ err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
+ mr_route->route_priv,
+ &route_params);
+ if (err)
+ kfree(mr_route->route_priv);
+ } else {
+ err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
+ &route_info);
+ }
+out:
+ mlxsw_sp_mr_route_info_destroy(&route_info);
+ return err;
+}
+
+static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
+ kfree(mr_route->route_priv);
+}
+
+static struct mlxsw_sp_mr_route *
+mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
+ struct mlxsw_sp_mr_route *mr_route;
+ int err = 0;
+ int i;
+
+ /* Allocate and init a new route and fill it with parameters */
+ mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
+ if (!mr_route)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&mr_route->evif_list);
+ mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc);
+
+ /* Find min_mtu and link iVIF and eVIFs */
+ mr_route->min_mtu = ETH_MAX_MTU;
+ ipmr_cache_hold(mfc);
+ mr_route->mfc4 = mfc;
+ mr_route->mr_table = mr_table;
+ for (i = 0; i < MAXVIFS; i++) {
+ if (mfc->mfc_un.res.ttls[i] != 255) {
+ err = mlxsw_sp_mr_route_evif_link(mr_route,
+ &mr_table->vifs[i]);
+ if (err)
+ goto err;
+ if (mr_table->vifs[i].dev &&
+ mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
+ mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
+ }
+ }
+ mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
+
+ mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
+ return mr_route;
+err:
+ ipmr_cache_put(mfc);
+ list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
+ mlxsw_sp_mr_route_evif_unlink(rve);
+ kfree(mr_route);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
+
+ mlxsw_sp_mr_route_ivif_unlink(mr_route);
+ ipmr_cache_put(mr_route->mfc4);
+ list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
+ mlxsw_sp_mr_route_evif_unlink(rve);
+ kfree(mr_route);
+}
+
+static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ switch (mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
+ bool offload)
+{
+ switch (mr_route->mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ if (offload)
+ mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
+ else
+ mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
+{
+ bool offload;
+
+ offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+ mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
+}
+
+static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ mlxsw_sp_mr_mfc_offload_set(mr_route, false);
+ mlxsw_sp_mr_route_erase(mr_table, mr_route);
+ rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+ list_del(&mr_route->node);
+ mlxsw_sp_mr_route_destroy(mr_table, mr_route);
+}
+
+int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc, bool replace)
+{
+ struct mlxsw_sp_mr_route *mr_orig_route = NULL;
+ struct mlxsw_sp_mr_route *mr_route;
+ int err;
+
+ /* If the route is a (*,*) route, abort, as these kind of routes are
+ * used for proxy routes.
+ */
+ if (mfc->mfc_origin == htonl(INADDR_ANY) &&
+ mfc->mfc_mcastgrp == htonl(INADDR_ANY)) {
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Create a new route */
+ mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc);
+ if (IS_ERR(mr_route))
+ return PTR_ERR(mr_route);
+
+ /* Find any route with a matching key */
+ mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
+ &mr_route->key,
+ mlxsw_sp_mr_route_ht_params);
+ if (replace) {
+ /* On replace case, make the route point to the new route_priv.
+ */
+ if (WARN_ON(!mr_orig_route)) {
+ err = -ENOENT;
+ goto err_no_orig_route;
+ }
+ mr_route->route_priv = mr_orig_route->route_priv;
+ } else if (mr_orig_route) {
+ /* On non replace case, if another route with the same key was
+ * found, abort, as duplicate routes are used for proxy routes.
+ */
+ dev_warn(mr_table->mlxsw_sp->bus_info->dev,
+ "Offloading proxy routes is not supported.\n");
+ err = -EINVAL;
+ goto err_duplicate_route;
+ }
+
+ /* Put it in the table data-structures */
+ list_add_tail(&mr_route->node, &mr_table->route_list);
+ err = rhashtable_insert_fast(&mr_table->route_ht,
+ &mr_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ /* Write the route to the hardware */
+ err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
+ if (err)
+ goto err_mr_route_write;
+
+ /* Destroy the original route */
+ if (replace) {
+ rhashtable_remove_fast(&mr_table->route_ht,
+ &mr_orig_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+ list_del(&mr_orig_route->node);
+ mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route);
+ }
+
+ mlxsw_sp_mr_mfc_offload_update(mr_route);
+ return 0;
+
+err_mr_route_write:
+ rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
+ mlxsw_sp_mr_route_ht_params);
+err_rhashtable_insert:
+ list_del(&mr_route->node);
+err_no_orig_route:
+err_duplicate_route:
+ mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
+ return err;
+}
+
+void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc)
+{
+ struct mlxsw_sp_mr_route *mr_route;
+ struct mlxsw_sp_mr_route_key key;
+
+ mlxsw_sp_mr_route4_key(mr_table, &key, mfc);
+ mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
+ mlxsw_sp_mr_route_ht_params);
+ if (mr_route)
+ __mlxsw_sp_mr_route_del(mr_table, mr_route);
+}
+
+/* Should be called after the VIF struct is updated */
+static int
+mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ enum mlxsw_sp_mr_route_action route_action;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u16 irif_index;
+ int err;
+
+ route_action = mlxsw_sp_mr_route_action(rve->mr_route);
+ if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return 0;
+
+ /* rve->mr_vif->rif is guaranteed to be valid at this stage */
+ irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
+ irif_index);
+ if (err)
+ return err;
+
+ err = mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ route_action);
+ if (err)
+ /* No need to rollback here because the iRIF change only takes
+ * place after the action has been updated.
+ */
+ return err;
+
+ rve->mr_route->route_action = route_action;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
+ MLXSW_SP_MR_ROUTE_ACTION_TRAP);
+ rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+}
+
+/* Should be called after the RIF struct is updated */
+static int
+mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ enum mlxsw_sp_mr_route_action route_action;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u16 erif_index = 0;
+ int err;
+
+ /* Update the route action, as the new eVIF can be a tunnel or a pimreg
+ * device which will require updating the action.
+ */
+ route_action = mlxsw_sp_mr_route_action(rve->mr_route);
+ if (route_action != rve->mr_route->route_action) {
+ err = mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ route_action);
+ if (err)
+ return err;
+ }
+
+ /* Add the eRIF */
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
+ erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ err = mr->mr_ops->route_erif_add(mlxsw_sp,
+ rve->mr_route->route_priv,
+ erif_index);
+ if (err)
+ goto err_route_erif_add;
+ }
+
+ /* Update the minimum MTU */
+ if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
+ rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
+ err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ rve->mr_route->min_mtu);
+ if (err)
+ goto err_route_min_mtu_update;
+ }
+
+ rve->mr_route->route_action = route_action;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+ return 0;
+
+err_route_min_mtu_update:
+ if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
+ erif_index);
+err_route_erif_add:
+ if (route_action != rve->mr_route->route_action)
+ mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ rve->mr_route->route_action);
+ return err;
+}
+
+/* Should be called before the RIF struct is updated */
+static void
+mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
+ struct mlxsw_sp_mr_route_vif_entry *rve)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ enum mlxsw_sp_mr_route_action route_action;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u16 rifi;
+
+ /* If the unresolved RIF was not valid, no need to delete it */
+ if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
+ return;
+
+ /* Update the route action: if there is only one valid eVIF in the
+ * route, set the action to trap as the VIF deletion will lead to zero
+ * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
+ * determine the route action.
+ */
+ if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
+ route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
+ else
+ route_action = mlxsw_sp_mr_route_action(rve->mr_route);
+ if (route_action != rve->mr_route->route_action)
+ mr->mr_ops->route_action_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ route_action);
+
+ /* Delete the erif from the route */
+ rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
+ mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
+ rve->mr_route->route_action = route_action;
+ mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
+}
+
+static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev,
+ struct mlxsw_sp_mr_vif *mr_vif,
+ unsigned long vif_flags,
+ const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
+ int err;
+
+ /* Update the VIF */
+ mr_vif->dev = dev;
+ mr_vif->rif = rif;
+ mr_vif->vif_flags = vif_flags;
+
+ /* Update all routes where this VIF is used as an unresolved iRIF */
+ list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
+ err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
+ if (err)
+ goto err_irif_unresolve;
+ }
+
+ /* Update all routes where this VIF is used as an unresolved eRIF */
+ list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
+ err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
+ if (err)
+ goto err_erif_unresolve;
+ }
+ return 0;
+
+err_erif_unresolve:
+ list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
+ vif_node)
+ mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
+err_irif_unresolve:
+ list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
+ vif_node)
+ mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
+ mr_vif->rif = NULL;
+ return err;
+}
+
+static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev,
+ struct mlxsw_sp_mr_vif *mr_vif)
+{
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+
+ /* Update all routes where this VIF is used as an unresolved eRIF */
+ list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
+ mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
+
+ /* Update all routes where this VIF is used as an unresolved iRIF */
+ list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
+ mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
+
+ /* Update the VIF */
+ mr_vif->dev = dev;
+ mr_vif->rif = NULL;
+}
+
+int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev, vifi_t vif_index,
+ unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
+
+ if (WARN_ON(vif_index >= MAXVIFS))
+ return -EINVAL;
+ if (mr_vif->dev)
+ return -EEXIST;
+ return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
+}
+
+void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
+{
+ struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
+
+ if (WARN_ON(vif_index >= MAXVIFS))
+ return;
+ if (WARN_ON(!mr_vif->dev))
+ return;
+ mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
+}
+
+static struct mlxsw_sp_mr_vif *
+mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
+ const struct net_device *dev)
+{
+ vifi_t vif_index;
+
+ for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
+ if (mr_table->vifs[vif_index].dev == dev)
+ return &mr_table->vifs[vif_index];
+ return NULL;
+}
+
+int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif)
+{
+ const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
+ struct mlxsw_sp_mr_vif *mr_vif;
+
+ if (!rif_dev)
+ return 0;
+
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ if (!mr_vif)
+ return 0;
+ return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
+ mr_vif->vif_flags, rif);
+}
+
+void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif)
+{
+ const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
+ struct mlxsw_sp_mr_vif *mr_vif;
+
+ if (!rif_dev)
+ return;
+
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ if (!mr_vif)
+ return;
+ mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
+}
+
+void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif, int mtu)
+{
+ const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr_route_vif_entry *rve;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ struct mlxsw_sp_mr_vif *mr_vif;
+
+ if (!rif_dev)
+ return;
+
+ /* Search for a VIF that use that RIF */
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ if (!mr_vif)
+ return;
+
+ /* Update all the routes that uses that VIF as eVIF */
+ list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
+ if (mtu < rve->mr_route->min_mtu) {
+ rve->mr_route->min_mtu = mtu;
+ mr->mr_ops->route_min_mtu_update(mlxsw_sp,
+ rve->mr_route->route_priv,
+ mtu);
+ }
+ }
+}
+
+struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
+ u32 vr_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_mr_route_params catchall_route_params = {
+ .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ .key = {
+ .vrid = vr_id,
+ },
+ .value = {
+ .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
+ }
+ };
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ struct mlxsw_sp_mr_table *mr_table;
+ int err;
+ int i;
+
+ mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
+ GFP_KERNEL);
+ if (!mr_table)
+ return ERR_PTR(-ENOMEM);
+
+ mr_table->vr_id = vr_id;
+ mr_table->mlxsw_sp = mlxsw_sp;
+ mr_table->proto = proto;
+ INIT_LIST_HEAD(&mr_table->route_list);
+
+ err = rhashtable_init(&mr_table->route_ht,
+ &mlxsw_sp_mr_route_ht_params);
+ if (err)
+ goto err_route_rhashtable_init;
+
+ for (i = 0; i < MAXVIFS; i++) {
+ INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
+ INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
+ }
+
+ err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
+ mr_table->catchall_route_priv,
+ &catchall_route_params);
+ if (err)
+ goto err_ops_route_create;
+ list_add_tail(&mr_table->node, &mr->table_list);
+ return mr_table;
+
+err_ops_route_create:
+ rhashtable_destroy(&mr_table->route_ht);
+err_route_rhashtable_init:
+ kfree(mr_table);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
+ list_del(&mr_table->node);
+ mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
+ &mr_table->catchall_route_priv);
+ rhashtable_destroy(&mr_table->route_ht);
+ kfree(mr_table);
+}
+
+void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
+{
+ struct mlxsw_sp_mr_route *mr_route, *tmp;
+ int i;
+
+ list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
+ __mlxsw_sp_mr_route_del(mr_table, mr_route);
+
+ for (i = 0; i < MAXVIFS; i++) {
+ mr_table->vifs[i].dev = NULL;
+ mr_table->vifs[i].rif = NULL;
+ }
+}
+
+bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
+{
+ int i;
+
+ for (i = 0; i < MAXVIFS; i++)
+ if (mr_table->vifs[i].dev)
+ return false;
+ return list_empty(&mr_table->route_list);
+}
+
+static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_route *mr_route)
+{
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+ u64 packets, bytes;
+
+ if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return;
+
+ mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
+ &bytes);
+
+ switch (mr_route->mr_table->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ if (mr_route->mfc4->mfc_un.res.pkt != packets)
+ mr_route->mfc4->mfc_un.res.lastuse = jiffies;
+ mr_route->mfc4->mfc_un.res.pkt = packets;
+ mr_route->mfc4->mfc_un.res.bytes = bytes;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ /* fall through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static void mlxsw_sp_mr_stats_update(struct work_struct *work)
+{
+ struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
+ stats_update_dw.work);
+ struct mlxsw_sp_mr_table *mr_table;
+ struct mlxsw_sp_mr_route *mr_route;
+ unsigned long interval;
+
+ rtnl_lock();
+ list_for_each_entry(mr_table, &mr->table_list, node)
+ list_for_each_entry(mr_route, &mr_table->route_list, node)
+ mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
+ mr_route);
+ rtnl_unlock();
+
+ interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
+ mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
+}
+
+int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mr_ops *mr_ops)
+{
+ struct mlxsw_sp_mr *mr;
+ unsigned long interval;
+ int err;
+
+ mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
+ if (!mr)
+ return -ENOMEM;
+ mr->mr_ops = mr_ops;
+ mlxsw_sp->mr = mr;
+ INIT_LIST_HEAD(&mr->table_list);
+
+ err = mr_ops->init(mlxsw_sp, mr->priv);
+ if (err)
+ goto err;
+
+ /* Create the delayed work for counter updates */
+ INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
+ interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
+ mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
+ return 0;
+err:
+ kfree(mr);
+ return err;
+}
+
+void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
+
+ cancel_delayed_work_sync(&mr->stats_update_dw);
+ mr->mr_ops->fini(mr->priv);
+ kfree(mr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
new file mode 100644
index 000000000000..5d26a122af49
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
@@ -0,0 +1,134 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_MCROUTER_H
+#define _MLXSW_SPECTRUM_MCROUTER_H
+
+#include <linux/mroute.h>
+#include "spectrum_router.h"
+#include "spectrum.h"
+
+enum mlxsw_sp_mr_route_action {
+ MLXSW_SP_MR_ROUTE_ACTION_FORWARD,
+ MLXSW_SP_MR_ROUTE_ACTION_TRAP,
+ MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD,
+};
+
+enum mlxsw_sp_mr_route_prio {
+ MLXSW_SP_MR_ROUTE_PRIO_SG,
+ MLXSW_SP_MR_ROUTE_PRIO_STARG,
+ MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
+ __MLXSW_SP_MR_ROUTE_PRIO_MAX
+};
+
+#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1)
+
+struct mlxsw_sp_mr_route_key {
+ int vrid;
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr group;
+ union mlxsw_sp_l3addr group_mask;
+ union mlxsw_sp_l3addr source;
+ union mlxsw_sp_l3addr source_mask;
+};
+
+struct mlxsw_sp_mr_route_info {
+ enum mlxsw_sp_mr_route_action route_action;
+ u16 irif_index;
+ u16 *erif_indices;
+ size_t erif_num;
+ u16 min_mtu;
+};
+
+struct mlxsw_sp_mr_route_params {
+ struct mlxsw_sp_mr_route_key key;
+ struct mlxsw_sp_mr_route_info value;
+ enum mlxsw_sp_mr_route_prio prio;
+};
+
+struct mlxsw_sp_mr_ops {
+ int priv_size;
+ int route_priv_size;
+ int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+ int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_params *route_params);
+ int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_info *route_info);
+ int (*route_stats)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u64 *packets, u64 *bytes);
+ int (*route_action_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ enum mlxsw_sp_mr_route_action route_action);
+ int (*route_min_mtu_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 min_mtu);
+ int (*route_irif_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 irif_index);
+ int (*route_erif_add)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 erif_index);
+ int (*route_erif_del)(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ u16 erif_index);
+ void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv);
+ void (*fini)(void *priv);
+};
+
+struct mlxsw_sp_mr;
+struct mlxsw_sp_mr_table;
+
+int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_mr_ops *mr_ops);
+void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc, bool replace);
+void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
+ struct mfc_cache *mfc);
+int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
+ struct net_device *dev, vifi_t vif_index,
+ unsigned long vif_flags,
+ const struct mlxsw_sp_rif *rif);
+void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index);
+int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif);
+void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif);
+void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
+ const struct mlxsw_sp_rif *rif, int mtu);
+struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto);
+void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table);
+void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table);
+bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
new file mode 100644
index 000000000000..34a0b632e5dd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -0,0 +1,839 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/parman.h>
+
+#include "spectrum_mr_tcam.h"
+#include "reg.h"
+#include "spectrum.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_mr.h"
+
+struct mlxsw_sp_mr_tcam_region {
+ struct mlxsw_sp *mlxsw_sp;
+ enum mlxsw_reg_rtar_key_type rtar_key_type;
+ struct parman *parman;
+ struct parman_prio *parman_prios;
+};
+
+struct mlxsw_sp_mr_tcam {
+ struct mlxsw_sp_mr_tcam_region ipv4_tcam_region;
+};
+
+/* This struct maps to one RIGR2 register entry */
+struct mlxsw_sp_mr_erif_sublist {
+ struct list_head list;
+ u32 rigr2_kvdl_index;
+ int num_erifs;
+ u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
+ bool synced;
+};
+
+struct mlxsw_sp_mr_tcam_erif_list {
+ struct list_head erif_sublists;
+ u32 kvdl_index;
+};
+
+static bool
+mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist)
+{
+ int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MC_ERIF_LIST_ENTRIES);
+
+ return erif_sublist->num_erifs == erif_list_entries;
+}
+
+static void
+mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ INIT_LIST_HEAD(&erif_list->erif_sublists);
+}
+
+#define MLXSW_SP_KVDL_RIGR2_SIZE 1
+
+static struct mlxsw_sp_mr_erif_sublist *
+mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist;
+ int err;
+
+ erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
+ if (!erif_sublist)
+ return ERR_PTR(-ENOMEM);
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
+ &erif_sublist->rigr2_kvdl_index);
+ if (err) {
+ kfree(erif_sublist);
+ return ERR_PTR(err);
+ }
+
+ list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
+ return erif_sublist;
+}
+
+static void
+mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist)
+{
+ list_del(&erif_sublist->list);
+ mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
+ kfree(erif_sublist);
+}
+
+static int
+mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list,
+ u16 erif_index)
+{
+ struct mlxsw_sp_mr_erif_sublist *sublist;
+
+ /* If either there is no erif_entry or the last one is full, allocate a
+ * new one.
+ */
+ if (list_empty(&erif_list->erif_sublists)) {
+ sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
+ if (IS_ERR(sublist))
+ return PTR_ERR(sublist);
+ erif_list->kvdl_index = sublist->rigr2_kvdl_index;
+ } else {
+ sublist = list_last_entry(&erif_list->erif_sublists,
+ struct mlxsw_sp_mr_erif_sublist,
+ list);
+ sublist->synced = false;
+ if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
+ sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
+ erif_list);
+ if (IS_ERR(sublist))
+ return PTR_ERR(sublist);
+ }
+ }
+
+ /* Add the eRIF to the last entry's last index */
+ sublist->erif_indices[sublist->num_erifs++] = erif_index;
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
+
+ list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
+ list)
+ mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
+}
+
+static int
+mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_sp_mr_erif_sublist *curr_sublist;
+ char rigr2_pl[MLXSW_REG_RIGR2_LEN];
+ int err;
+ int i;
+
+ list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
+ if (curr_sublist->synced)
+ continue;
+
+ /* If the sublist is not the last one, pack the next index */
+ if (list_is_last(&curr_sublist->list,
+ &erif_list->erif_sublists)) {
+ mlxsw_reg_rigr2_pack(rigr2_pl,
+ curr_sublist->rigr2_kvdl_index,
+ false, 0);
+ } else {
+ struct mlxsw_sp_mr_erif_sublist *next_sublist;
+
+ next_sublist = list_next_entry(curr_sublist, list);
+ mlxsw_reg_rigr2_pack(rigr2_pl,
+ curr_sublist->rigr2_kvdl_index,
+ true,
+ next_sublist->rigr2_kvdl_index);
+ }
+
+ /* Pack all the erifs */
+ for (i = 0; i < curr_sublist->num_erifs; i++) {
+ u16 erif_index = curr_sublist->erif_indices[i];
+
+ mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
+ erif_index);
+ }
+
+ /* Write the entry */
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
+ rigr2_pl);
+ if (err)
+ /* No need of a rollback here because this
+ * hardware entry should not be pointed yet.
+ */
+ return err;
+ curr_sublist->synced = true;
+ }
+ return 0;
+}
+
+static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
+ struct mlxsw_sp_mr_tcam_erif_list *from)
+{
+ list_splice(&from->erif_sublists, &to->erif_sublists);
+ to->kvdl_index = from->kvdl_index;
+}
+
+struct mlxsw_sp_mr_tcam_route {
+ struct mlxsw_sp_mr_tcam_erif_list erif_list;
+ struct mlxsw_afa_block *afa_block;
+ u32 counter_index;
+ struct parman_item parman_item;
+ struct parman_prio *parman_prio;
+ enum mlxsw_sp_mr_route_action action;
+ struct mlxsw_sp_mr_route_key key;
+ u16 irif_index;
+ u16 min_mtu;
+};
+
+static struct mlxsw_afa_block *
+mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_mr_route_action route_action,
+ u16 irif_index, u32 counter_index,
+ u16 min_mtu,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list)
+{
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
+ if (!afa_block)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_afa_block_append_counter(afa_block, counter_index);
+ if (err)
+ goto err;
+
+ switch (route_action) {
+ case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
+ err = mlxsw_afa_block_append_trap(afa_block,
+ MLXSW_TRAP_ID_ACL1);
+ if (err)
+ goto err;
+ break;
+ case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
+ case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
+ /* If we are about to append a multicast router action, commit
+ * the erif_list.
+ */
+ err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
+ if (err)
+ goto err;
+
+ err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
+ min_mtu, false,
+ erif_list->kvdl_index);
+ if (err)
+ goto err;
+
+ if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
+ err = mlxsw_afa_block_append_trap_and_forward(afa_block,
+ MLXSW_TRAP_ID_ACL2);
+ if (err)
+ goto err;
+ }
+ break;
+ default:
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = mlxsw_afa_block_commit(afa_block);
+ if (err)
+ goto err;
+ return afa_block;
+err:
+ mlxsw_afa_block_destroy(afa_block);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
+{
+ mlxsw_afa_block_destroy(afa_block);
+}
+
+static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
+ struct parman_item *parman_item,
+ struct mlxsw_sp_mr_route_key *key,
+ struct mlxsw_afa_block *afa_block)
+{
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
+ key->vrid,
+ MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
+ ntohl(key->group.addr4),
+ ntohl(key->group_mask.addr4),
+ ntohl(key->source.addr4),
+ ntohl(key->source_mask.addr4),
+ mlxsw_afa_block_first_set(afa_block));
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
+ struct parman_item *parman_item)
+{
+ char rmft2_pl[MLXSW_REG_RMFT2_LEN];
+
+ mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid,
+ 0, 0, 0, 0, 0, 0, NULL);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
+}
+
+static int
+mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_erif_list *erif_list,
+ struct mlxsw_sp_mr_route_info *route_info)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < route_info->erif_num; i++) {
+ u16 erif_index = route_info->erif_indices[i];
+
+ err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
+ erif_index);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int
+mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
+ struct mlxsw_sp_mr_tcam_route *route,
+ enum mlxsw_sp_mr_route_prio prio)
+{
+ struct parman_prio *parman_prio = NULL;
+ int err;
+
+ switch (route->key.proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio];
+ err = parman_item_add(mr_tcam->ipv4_tcam_region.parman,
+ parman_prio, &route->parman_item);
+ if (err)
+ return err;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ default:
+ WARN_ON_ONCE(1);
+ }
+ route->parman_prio = parman_prio;
+ return 0;
+}
+
+static void
+mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
+ struct mlxsw_sp_mr_tcam_route *route)
+{
+ switch (route->key.proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ parman_item_remove(mr_tcam->ipv4_tcam_region.parman,
+ route->parman_prio, &route->parman_item);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static int
+mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
+ void *route_priv,
+ struct mlxsw_sp_mr_route_params *route_params)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+ int err;
+
+ route->key = route_params->key;
+ route->irif_index = route_params->value.irif_index;
+ route->min_mtu = route_params->value.min_mtu;
+ route->action = route_params->value.route_action;
+
+ /* Create the egress RIFs list */
+ mlxsw_sp_mr_erif_list_init(&route->erif_list);
+ err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
+ &route_params->value);
+ if (err)
+ goto err_erif_populate;
+
+ /* Create the flow counter */
+ err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
+ if (err)
+ goto err_counter_alloc;
+
+ /* Create the flexible action block */
+ route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
+ route->action,
+ route->irif_index,
+ route->counter_index,
+ route->min_mtu,
+ &route->erif_list);
+ if (IS_ERR(route->afa_block)) {
+ err = PTR_ERR(route->afa_block);
+ goto err_afa_block_create;
+ }
+
+ /* Allocate place in the TCAM */
+ err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
+ route_params->prio);
+ if (err)
+ goto err_parman_item_add;
+
+ /* Write the route to the TCAM */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, route->afa_block);
+ if (err)
+ goto err_route_replace;
+ return 0;
+
+err_route_replace:
+ mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
+err_parman_item_add:
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+err_afa_block_create:
+ mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
+err_erif_populate:
+err_counter_alloc:
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+ return err;
+}
+
+static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
+ void *priv, void *route_priv)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
+ &route->parman_item);
+ mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+}
+
+static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u64 *packets,
+ u64 *bytes)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
+ packets, bytes);
+}
+
+static int
+mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv,
+ enum mlxsw_sp_mr_route_action route_action)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ /* Create a new flexible action block */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
+ route->irif_index,
+ route->counter_index,
+ route->min_mtu,
+ &route->erif_list);
+ if (IS_ERR(afa_block))
+ return PTR_ERR(afa_block);
+
+ /* Update the TCAM route entry */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, afa_block);
+ if (err)
+ goto err;
+
+ /* Delete the old one */
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ route->afa_block = afa_block;
+ route->action = route_action;
+ return 0;
+err:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+ return err;
+}
+
+static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 min_mtu)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ /* Create a new flexible action block */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
+ route->action,
+ route->irif_index,
+ route->counter_index,
+ min_mtu,
+ &route->erif_list);
+ if (IS_ERR(afa_block))
+ return PTR_ERR(afa_block);
+
+ /* Update the TCAM route entry */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, afa_block);
+ if (err)
+ goto err;
+
+ /* Delete the old one */
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ route->afa_block = afa_block;
+ route->min_mtu = min_mtu;
+ return 0;
+err:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+ return err;
+}
+
+static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 irif_index)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+
+ if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return -EINVAL;
+ route->irif_index = irif_index;
+ return 0;
+}
+
+static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 erif_index)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ int err;
+
+ err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
+ erif_index);
+ if (err)
+ return err;
+
+ /* Commit the action only if the route action is not TRAP */
+ if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
+ return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
+ &route->erif_list);
+ return 0;
+}
+
+static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
+ void *route_priv, u16 erif_index)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_erif_sublist *erif_sublist;
+ struct mlxsw_sp_mr_tcam_erif_list erif_list;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+ int i;
+
+ /* Create a copy of the original erif_list without the deleted entry */
+ mlxsw_sp_mr_erif_list_init(&erif_list);
+ list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
+ for (i = 0; i < erif_sublist->num_erifs; i++) {
+ u16 curr_erif = erif_sublist->erif_indices[i];
+
+ if (curr_erif == erif_index)
+ continue;
+ err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
+ curr_erif);
+ if (err)
+ goto err_erif_list_add;
+ }
+ }
+
+ /* Create the flexible action block pointing to the new erif_list */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
+ route->irif_index,
+ route->counter_index,
+ route->min_mtu,
+ &erif_list);
+ if (IS_ERR(afa_block)) {
+ err = PTR_ERR(afa_block);
+ goto err_afa_block_create;
+ }
+
+ /* Update the TCAM route entry */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, afa_block);
+ if (err)
+ goto err_route_write;
+
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+ route->afa_block = afa_block;
+ mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
+ return 0;
+
+err_route_write:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+err_afa_block_create:
+err_erif_list_add:
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
+ return err;
+}
+
+static int
+mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
+ struct mlxsw_sp_mr_route_info *route_info)
+{
+ struct mlxsw_sp_mr_tcam_route *route = route_priv;
+ struct mlxsw_sp_mr_tcam_erif_list erif_list;
+ struct mlxsw_afa_block *afa_block;
+ int err;
+
+ /* Create a new erif_list */
+ mlxsw_sp_mr_erif_list_init(&erif_list);
+ err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
+ if (err)
+ goto err_erif_populate;
+
+ /* Create the flexible action block pointing to the new erif_list */
+ afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
+ route_info->route_action,
+ route_info->irif_index,
+ route->counter_index,
+ route_info->min_mtu,
+ &erif_list);
+ if (IS_ERR(afa_block)) {
+ err = PTR_ERR(afa_block);
+ goto err_afa_block_create;
+ }
+
+ /* Update the TCAM route entry */
+ err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
+ &route->key, afa_block);
+ if (err)
+ goto err_route_write;
+
+ mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
+ route->afa_block = afa_block;
+ mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
+ route->action = route_info->route_action;
+ route->irif_index = route_info->irif_index;
+ route->min_mtu = route_info->min_mtu;
+ return 0;
+
+err_route_write:
+ mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
+err_afa_block_create:
+err_erif_populate:
+ mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
+ return err;
+}
+
+#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
+ mr_tcam_region->rtar_key_type,
+ MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void
+mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
+{
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
+ mr_tcam_region->rtar_key_type, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
+ unsigned long new_count)
+{
+ struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rtar_pl[MLXSW_REG_RTAR_LEN];
+ u64 max_tcam_rules;
+
+ max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+ if (new_count > max_tcam_rules)
+ return -EINVAL;
+ mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
+ mr_tcam_region->rtar_key_type, new_count);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
+}
+
+static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
+ unsigned long from_index,
+ unsigned long to_index,
+ unsigned long count)
+{
+ struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
+ struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
+ char rrcr_pl[MLXSW_REG_RRCR_LEN];
+
+ mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
+ from_index, count,
+ mr_tcam_region->rtar_key_type, to_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
+}
+
+static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
+ .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
+ .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
+ .resize = mlxsw_sp_mr_tcam_region_parman_resize,
+ .move = mlxsw_sp_mr_tcam_region_parman_move,
+ .algo = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static int
+mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
+ enum mlxsw_reg_rtar_key_type rtar_key_type)
+{
+ struct parman_prio *parman_prios;
+ struct parman *parman;
+ int err;
+ int i;
+
+ mr_tcam_region->rtar_key_type = rtar_key_type;
+ mr_tcam_region->mlxsw_sp = mlxsw_sp;
+
+ err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
+ if (err)
+ return err;
+
+ parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
+ mr_tcam_region);
+ if (!parman) {
+ err = -ENOMEM;
+ goto err_parman_create;
+ }
+ mr_tcam_region->parman = parman;
+
+ parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
+ sizeof(*parman_prios), GFP_KERNEL);
+ if (!parman_prios) {
+ err = -ENOMEM;
+ goto err_parman_prios_alloc;
+ }
+ mr_tcam_region->parman_prios = parman_prios;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_init(mr_tcam_region->parman,
+ &mr_tcam_region->parman_prios[i], i);
+ return 0;
+
+err_parman_prios_alloc:
+ parman_destroy(parman);
+err_parman_create:
+ mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
+ return err;
+}
+
+static void
+mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
+ parman_prio_fini(&mr_tcam_region->parman_prios[i]);
+ kfree(mr_tcam_region->parman_prios);
+ parman_destroy(mr_tcam_region->parman);
+ mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
+}
+
+static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
+ return -EIO;
+
+ return mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
+ &mr_tcam->ipv4_tcam_region,
+ MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST);
+}
+
+static void mlxsw_sp_mr_tcam_fini(void *priv)
+{
+ struct mlxsw_sp_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region);
+}
+
+const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp_mr_tcam),
+ .route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
+ .init = mlxsw_sp_mr_tcam_init,
+ .route_create = mlxsw_sp_mr_tcam_route_create,
+ .route_update = mlxsw_sp_mr_tcam_route_update,
+ .route_stats = mlxsw_sp_mr_tcam_route_stats,
+ .route_action_update = mlxsw_sp_mr_tcam_route_action_update,
+ .route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
+ .route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
+ .route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
+ .route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
+ .route_destroy = mlxsw_sp_mr_tcam_route_destroy,
+ .fini = mlxsw_sp_mr_tcam_fini,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
new file mode 100644
index 000000000000..f9b59ee25406
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
@@ -0,0 +1,43 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H
+#define _MLXSW_SPECTRUM_MCROUTER_TCAM_H
+
+#include "spectrum.h"
+#include "spectrum_mr.h"
+
+extern const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
new file mode 100644
index 000000000000..c33beac5def0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -0,0 +1,276 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Nogah Frankel <nogahf@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/pkt_cls.h>
+#include <net/red.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+static int
+mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
+ int tclass_num, u32 min, u32 max,
+ u32 probability, bool is_ecn)
+{
+ char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int err;
+
+ mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
+ mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
+ roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
+ roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
+ probability);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
+ if (err)
+ return err;
+
+ mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
+ MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
+}
+
+static int
+mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
+ int tclass_num)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
+
+ mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
+ MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
+}
+
+static void
+mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num)
+{
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+
+ mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
+ mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+
+ switch (mlxsw_sp_qdisc->type) {
+ case MLXSW_SP_QDISC_RED:
+ xstats_base->prob_mark = xstats->ecn;
+ xstats_base->prob_drop = xstats->wred_drop[tclass_num];
+ xstats_base->pdrop = xstats->tail_drop[tclass_num];
+
+ mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
+ xstats_base->prob_mark;
+ mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
+ xstats_base->pdrop;
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num)
+{
+ int err;
+
+ if (mlxsw_sp_qdisc->handle != handle)
+ return 0;
+
+ err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
+ mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+ mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
+
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num,
+ struct tc_red_qopt_offload_params *p)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u32 min, max;
+ u64 prob;
+ int err = 0;
+
+ if (p->min > p->max) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: RED: min %u is bigger then max %u\n", p->min,
+ p->max);
+ goto err_bad_param;
+ }
+ if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: RED: max value %u is too big\n", p->max);
+ goto err_bad_param;
+ }
+ if (p->min == 0 || p->max == 0) {
+ dev_err(mlxsw_sp->bus_info->dev,
+ "spectrum: RED: 0 value is illegal for min and max\n");
+ goto err_bad_param;
+ }
+
+ /* calculate probability in percentage */
+ prob = p->probability;
+ prob *= 100;
+ prob = DIV_ROUND_UP(prob, 1 << 16);
+ prob = DIV_ROUND_UP(prob, 1 << 16);
+ min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
+ max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
+ err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+ max, prob, p->is_ecn);
+ if (err)
+ goto err_config;
+
+ mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
+ if (mlxsw_sp_qdisc->handle != handle)
+ mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc,
+ tclass_num);
+
+ mlxsw_sp_qdisc->handle = handle;
+ return 0;
+
+err_bad_param:
+ err = -EINVAL;
+err_config:
+ mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
+ mlxsw_sp_qdisc, tclass_num);
+ return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num, struct red_stats *res)
+{
+ struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+ struct mlxsw_sp_port_xstats *xstats;
+
+ if (mlxsw_sp_qdisc->handle != handle ||
+ mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
+ return -EOPNOTSUPP;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+
+ res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ res->prob_mark = xstats->ecn - xstats_base->prob_mark;
+ res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+ return 0;
+}
+
+static int
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ int tclass_num,
+ struct tc_red_qopt_offload_stats *res)
+{
+ u64 tx_bytes, tx_packets, overlimits, drops;
+ struct mlxsw_sp_port_xstats *xstats;
+ struct rtnl_link_stats64 *stats;
+
+ if (mlxsw_sp_qdisc->handle != handle ||
+ mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
+ return -EOPNOTSUPP;
+
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
+ stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+
+ tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
+ tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+ overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
+ mlxsw_sp_qdisc->overlimits;
+ drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
+ mlxsw_sp_qdisc->drops;
+
+ _bstats_update(res->bstats, tx_bytes, tx_packets);
+ res->qstats->overlimits += overlimits;
+ res->qstats->drops += drops;
+ res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+ xstats->backlog[tclass_num]);
+
+ mlxsw_sp_qdisc->drops += drops;
+ mlxsw_sp_qdisc->overlimits += overlimits;
+ mlxsw_sp_qdisc->tx_bytes += tx_bytes;
+ mlxsw_sp_qdisc->tx_packets += tx_packets;
+ return 0;
+}
+
+#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+
+int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_red_qopt_offload *p)
+{
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ int tclass_num;
+
+ if (p->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
+ tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+ switch (p->command) {
+ case TC_RED_REPLACE:
+ return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc, tclass_num,
+ &p->set);
+ case TC_RED_DESTROY:
+ return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc, tclass_num);
+ case TC_RED_XSTATS:
+ return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc, tclass_num,
+ p->xstats);
+ case TC_RED_STATS:
+ return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
+ mlxsw_sp_qdisc, tclass_num,
+ &p->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 5189022a1c8c..632c7b229054 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -46,6 +46,8 @@
#include <linux/if_bridge.h>
#include <linux/socket.h>
#include <linux/route.h>
+#include <linux/gcd.h>
+#include <linux/random.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -65,6 +67,8 @@
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
#include "spectrum_ipip.h"
+#include "spectrum_mr.h"
+#include "spectrum_mr_tcam.h"
#include "spectrum_router.h"
struct mlxsw_sp_vr;
@@ -78,6 +82,7 @@ struct mlxsw_sp_router {
struct rhashtable neigh_ht;
struct rhashtable nexthop_group_ht;
struct rhashtable nexthop_ht;
+ struct list_head nexthop_list;
struct {
struct mlxsw_sp_lpm_tree *trees;
unsigned int tree_count;
@@ -92,6 +97,7 @@ struct mlxsw_sp_router {
struct list_head ipip_list;
bool aborted;
struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
};
@@ -458,6 +464,7 @@ struct mlxsw_sp_vr {
unsigned int rif_count;
struct mlxsw_sp_fib *fib4;
struct mlxsw_sp_fib *fib6;
+ struct mlxsw_sp_mr_table *mr4_table;
};
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
@@ -652,7 +659,7 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
{
- return !!vr->fib4 || !!vr->fib6;
+ return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table;
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
@@ -692,8 +699,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
{
- /* For our purpose, squash main and local table into one */
- if (tb_id == RT_TABLE_LOCAL)
+ /* For our purpose, squash main, default and local tables into one */
+ if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
tb_id = RT_TABLE_MAIN;
return tb_id;
}
@@ -727,14 +734,17 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
}
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
- u32 tb_id)
+ u32 tb_id,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
- if (!vr)
+ if (!vr) {
+ NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
+ }
vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr->fib4))
return ERR_CAST(vr->fib4);
@@ -743,9 +753,18 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
err = PTR_ERR(vr->fib6);
goto err_fib6_create;
}
+ vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(vr->mr4_table)) {
+ err = PTR_ERR(vr->mr4_table);
+ goto err_mr_table_create;
+ }
vr->tb_id = tb_id;
return vr;
+err_mr_table_create:
+ mlxsw_sp_fib_destroy(vr->fib6);
+ vr->fib6 = NULL;
err_fib6_create:
mlxsw_sp_fib_destroy(vr->fib4);
vr->fib4 = NULL;
@@ -754,27 +773,31 @@ err_fib6_create:
static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
{
+ mlxsw_sp_mr_table_destroy(vr->mr4_table);
+ vr->mr4_table = NULL;
mlxsw_sp_fib_destroy(vr->fib6);
vr->fib6 = NULL;
mlxsw_sp_fib_destroy(vr->fib4);
vr->fib4 = NULL;
}
-static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_vr *vr;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
if (!vr)
- vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
+ vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
return vr;
}
static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
{
if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
- list_empty(&vr->fib6->node_list))
+ list_empty(&vr->fib6->node_list) &&
+ mlxsw_sp_mr_table_empty(vr->mr4_table))
mlxsw_sp_vr_destroy(vr);
}
@@ -920,7 +943,7 @@ __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
return __dev_get_by_index(net, tun->parms.link);
}
-static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
+u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
@@ -932,12 +955,14 @@ static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_rif_params *params);
+ const struct mlxsw_sp_rif_params *params,
+ struct netlink_ext_ack *extack);
static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_ipip_type ipipt,
- struct net_device *ol_dev)
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif_params_ipip_lb lb_params;
const struct mlxsw_sp_ipip_ops *ipip_ops;
@@ -950,7 +975,7 @@ mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
.lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
};
- rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
if (IS_ERR(rif))
return ERR_CAST(rif);
return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
@@ -969,7 +994,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
- ol_dev);
+ ol_dev, NULL);
if (IS_ERR(ipip_entry->ol_lb)) {
ret = ERR_CAST(ipip_entry->ol_lb);
goto err_ol_ipip_lb_create;
@@ -977,6 +1002,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
+ ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
return ipip_entry;
@@ -986,72 +1012,12 @@ err_ol_ipip_lb_create:
}
static void
-mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp_ipip_entry *ipip_entry)
+mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
{
- WARN_ON(ipip_entry->ref_count > 0);
mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
kfree(ipip_entry);
}
-static __be32
-mlxsw_sp_ipip_netdev_saddr4(const struct net_device *ol_dev)
-{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
-
- return tun->parms.iph.saddr;
-}
-
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev)
-{
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_netdev_saddr4(ol_dev),
- };
- case MLXSW_SP_L3_PROTO_IPV6:
- break;
- };
-
- WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
-}
-
-__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
-{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
-
- return tun->parms.iph.daddr;
-}
-
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev)
-{
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_netdev_daddr4(ol_dev),
- };
- case MLXSW_SP_L3_PROTO_IPV6:
- break;
- };
-
- WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
-}
-
-static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
- const union mlxsw_sp_l3addr *addr2)
-{
- return !memcmp(addr1, addr2, sizeof(*addr1));
-}
-
static bool
mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
const enum mlxsw_sp_l3proto ul_proto,
@@ -1184,60 +1150,28 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
}
static struct mlxsw_sp_ipip_entry *
-mlxsw_sp_ipip_entry_get(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_ipip_type ipipt,
- struct net_device *ol_dev)
+mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt,
+ struct net_device *ol_dev)
{
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
- struct mlxsw_sp_router *router = mlxsw_sp->router;
- struct mlxsw_sp_fib_entry *decap_fib_entry;
struct mlxsw_sp_ipip_entry *ipip_entry;
- enum mlxsw_sp_l3proto ul_proto;
- union mlxsw_sp_l3addr saddr;
-
- list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
- ipip_list_node) {
- if (ipip_entry->ol_dev == ol_dev)
- goto inc_ref_count;
-
- /* The configuration where several tunnels have the same local
- * address in the same underlay table needs special treatment in
- * the HW. That is currently not implemented in the driver.
- */
- ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
- saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
- if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
- ul_tb_id, ipip_entry))
- return ERR_PTR(-EEXIST);
- }
ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
if (IS_ERR(ipip_entry))
return ipip_entry;
- decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
- if (decap_fib_entry)
- mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
- decap_fib_entry);
-
list_add_tail(&ipip_entry->ipip_list_node,
&mlxsw_sp->router->ipip_list);
-inc_ref_count:
- ++ipip_entry->ref_count;
return ipip_entry;
}
static void
-mlxsw_sp_ipip_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_ipip_entry *ipip_entry)
+mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
{
- if (--ipip_entry->ref_count == 0) {
- list_del(&ipip_entry->ipip_list_node);
- if (ipip_entry->decap_fib_entry)
- mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
- mlxsw_sp_ipip_entry_destroy(ipip_entry);
- }
+ list_del(&ipip_entry->ipip_list_node);
+ mlxsw_sp_ipip_entry_dealloc(ipip_entry);
}
static bool
@@ -1279,6 +1213,455 @@ mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
return NULL;
}
+static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev,
+ enum mlxsw_sp_ipip_type *p_type)
+{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ enum mlxsw_sp_ipip_type ipipt;
+
+ for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
+ ipip_ops = router->ipip_ops_arr[ipipt];
+ if (dev->type == ipip_ops->dev_type) {
+ if (p_type)
+ *p_type = ipipt;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+}
+
+static struct mlxsw_sp_ipip_entry *
+mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
+ ipip_list_node)
+ if (ipip_entry->ol_dev == ol_dev)
+ return ipip_entry;
+
+ return NULL;
+}
+
+static struct mlxsw_sp_ipip_entry *
+mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ul_dev,
+ struct mlxsw_sp_ipip_entry *start)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
+ ipip_list_node);
+ list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
+ ipip_list_node) {
+ struct net_device *ipip_ul_dev =
+ __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+
+ if (ipip_ul_dev == ul_dev)
+ return ipip_entry;
+ }
+
+ return NULL;
+}
+
+bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
+}
+
+static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ops
+ = mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ /* For deciding whether decap should be offloaded, we don't care about
+ * overlay protocol, so ask whether either one is supported.
+ */
+ return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
+ ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ enum mlxsw_sp_l3proto ul_proto;
+ enum mlxsw_sp_ipip_type ipipt;
+ union mlxsw_sp_l3addr saddr;
+ u32 ul_tb_id;
+
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
+ if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
+ ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
+ ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
+ saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
+ if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+ saddr, ul_tb_id,
+ NULL)) {
+ ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
+ ol_dev);
+ if (IS_ERR(ipip_entry))
+ return PTR_ERR(ipip_entry);
+ }
+ }
+
+ return 0;
+}
+
+static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry)
+ mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
+}
+
+static void
+mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ struct mlxsw_sp_fib_entry *decap_fib_entry;
+
+ decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
+ if (decap_fib_entry)
+ mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
+ decap_fib_entry);
+}
+
+static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry)
+ mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
+}
+
+static void
+mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ if (ipip_entry->decap_fib_entry)
+ mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
+}
+
+static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (ipip_entry)
+ mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
+}
+
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif);
+static int
+mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool keep_encap,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
+ struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
+
+ new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
+ ipip_entry->ipipt,
+ ipip_entry->ol_dev,
+ extack);
+ if (IS_ERR(new_lb_rif))
+ return PTR_ERR(new_lb_rif);
+ ipip_entry->ol_lb = new_lb_rif;
+
+ if (keep_encap) {
+ list_splice_init(&old_lb_rif->common.nexthop_list,
+ &new_lb_rif->common.nexthop_list);
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
+ }
+
+ mlxsw_sp_rif_destroy(&old_lb_rif->common);
+
+ return 0;
+}
+
+/**
+ * Update the offload related to an IPIP entry. This always updates decap, and
+ * in addition to that it also:
+ * @recreate_loopback: recreates the associated loopback RIF
+ * @keep_encap: updates next hops that use the tunnel netdevice. This is only
+ * relevant when recreate_loopback is true.
+ * @update_nexthops: updates next hops, keeping the current loopback RIF. This
+ * is only relevant when recreate_loopback is false.
+ */
+int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool recreate_loopback,
+ bool keep_encap,
+ bool update_nexthops,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* RIFs can't be edited, so to update loopback, we need to destroy and
+ * recreate it. That creates a window of opportunity where RALUE and
+ * RATR registers end up referencing a RIF that's already gone. RATRs
+ * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
+ * of RALUE, demote the decap route back.
+ */
+ if (ipip_entry->decap_fib_entry)
+ mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
+
+ if (recreate_loopback) {
+ err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
+ keep_encap, extack);
+ if (err)
+ return err;
+ } else if (update_nexthops) {
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp,
+ &ipip_entry->ol_lb->common);
+ }
+
+ if (ipip_entry->ol_dev->flags & IFF_UP)
+ mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
+
+ return 0;
+}
+
+static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry =
+ mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+
+ if (!ipip_entry)
+ return 0;
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, false, false, extack);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev,
+ struct netlink_ext_ack *extack)
+{
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ true, true, false, extack);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev)
+{
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true, NULL);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev)
+{
+ /* A down underlay device causes encapsulated packets to not be
+ * forwarded, but decap still works. So refresh next hops without
+ * touching anything else.
+ */
+ return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
+ false, false, true, NULL);
+}
+
+static int
+mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ struct netlink_ext_ack *extack)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+ int err;
+
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (!ipip_entry)
+ /* A change might make a tunnel eligible for offloading, but
+ * that is currently not implemented. What falls to slow path
+ * stays there.
+ */
+ return 0;
+
+ /* A change might make a tunnel not eligible for offloading. */
+ if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
+ ipip_entry->ipipt)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
+ return err;
+}
+
+void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ struct net_device *ol_dev = ipip_entry->ol_dev;
+
+ if (ol_dev->flags & IFF_UP)
+ mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
+ mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
+}
+
+/* The configuration where several tunnels have the same local address in the
+ * same underlay table needs special treatment in the HW. That is currently not
+ * implemented in the driver. This function finds and demotes the first tunnel
+ * with a given source address, except the one passed in in the argument
+ * `except'.
+ */
+bool
+mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_l3proto ul_proto,
+ union mlxsw_sp_l3addr saddr,
+ u32 ul_tb_id,
+ const struct mlxsw_sp_ipip_entry *except)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
+
+ list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
+ ipip_list_node) {
+ if (ipip_entry != except &&
+ mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
+ ul_tb_id, ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ul_dev)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
+
+ list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
+ ipip_list_node) {
+ struct net_device *ipip_ul_dev =
+ __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
+
+ if (ipip_ul_dev == ul_dev)
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ }
+}
+
+int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ol_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
+{
+ struct netdev_notifier_changeupper_info *chup;
+ struct netlink_ext_ack *extack;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
+ case NETDEV_UNREGISTER:
+ mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
+ return 0;
+ case NETDEV_UP:
+ mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
+ return 0;
+ case NETDEV_DOWN:
+ mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
+ return 0;
+ case NETDEV_CHANGEUPPER:
+ chup = container_of(info, typeof(*chup), info);
+ extack = info->extack;
+ if (netif_is_l3_master(chup->upper_dev))
+ return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
+ ol_dev,
+ extack);
+ return 0;
+ case NETDEV_CHANGE:
+ extack = info->extack;
+ return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
+ ol_dev, extack);
+ }
+ return 0;
+}
+
+static int
+__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct net_device *ul_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
+{
+ struct netdev_notifier_changeupper_info *chup;
+ struct netlink_ext_ack *extack;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ chup = container_of(info, typeof(*chup), info);
+ extack = info->extack;
+ if (netif_is_l3_master(chup->upper_dev))
+ return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
+ ipip_entry,
+ ul_dev,
+ extack);
+ break;
+
+ case NETDEV_UP:
+ return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
+ ul_dev);
+ case NETDEV_DOWN:
+ return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
+ ipip_entry,
+ ul_dev);
+ }
+ return 0;
+}
+
+int
+mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *ul_dev,
+ unsigned long event,
+ struct netdev_notifier_info *info)
+{
+ struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
+ int err;
+
+ while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
+ ul_dev,
+ ipip_entry))) {
+ err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
+ ul_dev, event, info);
+ if (err) {
+ mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
+ ul_dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
struct mlxsw_sp_neigh_key {
struct neighbour *n;
};
@@ -1316,7 +1699,7 @@ mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
typeof(*neigh_entry),
rif_list_node);
}
- if (neigh_entry->rif_list_node.next == &rif->neigh_list)
+ if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
return NULL;
return list_next_entry(neigh_entry, rif_list_node);
}
@@ -1664,7 +2047,7 @@ __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
rauhtd_pl);
if (err) {
- dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
break;
}
num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
@@ -1857,7 +2240,7 @@ mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
}
-struct mlxsw_sp_neigh_event_work {
+struct mlxsw_sp_netevent_work {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
struct neighbour *n;
@@ -1865,11 +2248,11 @@ struct mlxsw_sp_neigh_event_work {
static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
{
- struct mlxsw_sp_neigh_event_work *neigh_work =
- container_of(work, struct mlxsw_sp_neigh_event_work, work);
- struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
struct mlxsw_sp_neigh_entry *neigh_entry;
- struct neighbour *n = neigh_work->n;
+ struct neighbour *n = net_work->n;
unsigned char ha[ETH_ALEN];
bool entry_connected;
u8 nud_state, dead;
@@ -1905,18 +2288,32 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
out:
rtnl_unlock();
neigh_release(n);
- kfree(neigh_work);
+ kfree(net_work);
}
-int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
+
+static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_netevent_work *net_work =
+ container_of(work, struct mlxsw_sp_netevent_work, work);
+ struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
+
+ mlxsw_sp_mp_hash_init(mlxsw_sp);
+ kfree(net_work);
+}
+
+static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
- struct mlxsw_sp_neigh_event_work *neigh_work;
+ struct mlxsw_sp_netevent_work *net_work;
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp_router *router;
struct mlxsw_sp *mlxsw_sp;
unsigned long interval;
struct neigh_parms *p;
struct neighbour *n;
+ struct net *net;
switch (event) {
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -1950,24 +2347,39 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
if (!mlxsw_sp_port)
return NOTIFY_DONE;
- neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
- if (!neigh_work) {
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work) {
mlxsw_sp_port_dev_put(mlxsw_sp_port);
return NOTIFY_BAD;
}
- INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
- neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- neigh_work->n = n;
+ INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
+ net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ net_work->n = n;
/* Take a reference to ensure the neighbour won't be
* destructed until we drop the reference in delayed
* work.
*/
neigh_clone(n);
- mlxsw_core_schedule_work(&neigh_work->work);
+ mlxsw_core_schedule_work(&net_work->work);
mlxsw_sp_port_dev_put(mlxsw_sp_port);
break;
+ case NETEVENT_MULTIPATH_HASH_UPDATE:
+ net = ptr;
+
+ if (!net_eq(net, &init_net))
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (!net_work)
+ return NOTIFY_BAD;
+
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
+ INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work);
+ net_work->mlxsw_sp = router->mlxsw_sp;
+ mlxsw_core_schedule_work(&net_work->work);
+ break;
}
return NOTIFY_DONE;
@@ -2004,16 +2416,25 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
}
+static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif *rif)
+{
+ char rauht_pl[MLXSW_REG_RAUHT_LEN];
+
+ mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
+ rif->rif_index, rif->addr);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+}
+
static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
+ mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
- rif_list_node) {
- mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
+ rif_list_node)
mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
- }
}
enum mlxsw_sp_nexthop_type {
@@ -2028,6 +2449,7 @@ struct mlxsw_sp_nexthop_key {
struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
struct list_head rif_list_node;
+ struct list_head router_list_node;
struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
* this belongs to
*/
@@ -2035,6 +2457,9 @@ struct mlxsw_sp_nexthop {
struct mlxsw_sp_nexthop_key key;
unsigned char gw_addr[sizeof(struct in6_addr)];
int ifindex;
+ int nh_weight;
+ int norm_nh_weight;
+ int num_adj_entries;
struct mlxsw_sp_rif *rif;
u8 should_offload:1, /* set indicates this neigh is connected and
* should be put to KVD linear area of this group.
@@ -2050,6 +2475,8 @@ struct mlxsw_sp_nexthop {
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_ipip_entry *ipip_entry;
};
+ unsigned int counter_index;
+ bool counter_valid;
};
struct mlxsw_sp_nexthop_group {
@@ -2062,10 +2489,118 @@ struct mlxsw_sp_nexthop_group {
u32 adj_index;
u16 ecmp_size;
u16 count;
+ int sum_norm_weight;
struct mlxsw_sp_nexthop nexthops[0];
#define nh_rif nexthops[0].rif
};
+void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ if (!devlink_dpipe_table_counter_enabled(devlink,
+ MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
+ return;
+
+ if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
+ return;
+
+ nh->counter_valid = true;
+}
+
+void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh->counter_valid)
+ return;
+ mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
+ nh->counter_valid = false;
+}
+
+int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh, u64 *p_counter)
+{
+ if (!nh->counter_valid)
+ return -EINVAL;
+
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
+ p_counter, NULL);
+}
+
+struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh) {
+ if (list_empty(&router->nexthop_list))
+ return NULL;
+ else
+ return list_first_entry(&router->nexthop_list,
+ typeof(*nh), router_list_node);
+ }
+ if (list_is_last(&nh->router_list_node, &router->nexthop_list))
+ return NULL;
+ return list_next_entry(nh, router_list_node);
+}
+
+bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
+{
+ return nh->offloaded;
+}
+
+unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh->offloaded)
+ return NULL;
+ return nh->neigh_entry->ha;
+}
+
+int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
+ u32 *p_adj_size, u32 *p_adj_hash_index)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ u32 adj_hash_index = 0;
+ int i;
+
+ if (!nh->offloaded || !nh_grp->adj_index_valid)
+ return -EINVAL;
+
+ *p_adj_index = nh_grp->adj_index;
+ *p_adj_size = nh_grp->ecmp_size;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+
+ if (nh_iter == nh)
+ break;
+ if (nh_iter->offloaded)
+ adj_hash_index += nh_iter->num_adj_entries;
+ }
+
+ *p_adj_hash_index = adj_hash_index;
+ return 0;
+}
+
+struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
+{
+ return nh->rif;
+}
+
+bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ int i;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+
+ if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
+ return true;
+ }
+ return false;
+}
+
static struct fib_info *
mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
{
@@ -2323,8 +2858,8 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
char ratr_pl[MLXSW_REG_RATR_LEN];
@@ -2333,12 +2868,33 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
true, MLXSW_REG_RATR_TYPE_ETHERNET,
adj_index, neigh_entry->rif);
mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ if (nh->counter_valid)
+ mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
+ else
+ mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
}
-static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
- u32 adj_index,
- struct mlxsw_sp_nexthop *nh)
+int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
+{
+ int i;
+
+ for (i = 0; i < nh->num_adj_entries; i++) {
+ int err;
+
+ err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
@@ -2346,6 +2902,24 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
}
+static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
+ u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
+{
+ int i;
+
+ for (i = 0; i < nh->num_adj_entries; i++) {
+ int err;
+
+ err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
+ nh);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
@@ -2367,7 +2941,7 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
if (nh->update || reallocate) {
switch (nh->type) {
case MLXSW_SP_NEXTHOP_TYPE_ETH:
- err = mlxsw_sp_nexthop_mac_update
+ err = mlxsw_sp_nexthop_update
(mlxsw_sp, adj_index, nh);
break;
case MLXSW_SP_NEXTHOP_TYPE_IPIP:
@@ -2380,7 +2954,7 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
nh->update = 0;
nh->offloaded = 1;
}
- adj_index++;
+ adj_index += nh->num_adj_entries;
}
return 0;
}
@@ -2425,17 +2999,118 @@ mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
}
}
+static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
+{
+ /* Valid sizes for an adjacency group are:
+ * 1-64, 512, 1024, 2048 and 4096.
+ */
+ if (*p_adj_grp_size <= 64)
+ return;
+ else if (*p_adj_grp_size <= 512)
+ *p_adj_grp_size = 512;
+ else if (*p_adj_grp_size <= 1024)
+ *p_adj_grp_size = 1024;
+ else if (*p_adj_grp_size <= 2048)
+ *p_adj_grp_size = 2048;
+ else
+ *p_adj_grp_size = 4096;
+}
+
+static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
+ unsigned int alloc_size)
+{
+ if (alloc_size >= 4096)
+ *p_adj_grp_size = 4096;
+ else if (alloc_size >= 2048)
+ *p_adj_grp_size = 2048;
+ else if (alloc_size >= 1024)
+ *p_adj_grp_size = 1024;
+ else if (alloc_size >= 512)
+ *p_adj_grp_size = 512;
+}
+
+static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
+ u16 *p_adj_grp_size)
+{
+ unsigned int alloc_size;
+ int err;
+
+ /* Round up the requested group size to the next size supported
+ * by the device and make sure the request can be satisfied.
+ */
+ mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
+ err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size,
+ &alloc_size);
+ if (err)
+ return err;
+ /* It is possible the allocation results in more allocated
+ * entries than requested. Try to use as much of them as
+ * possible.
+ */
+ mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
+
+ return 0;
+}
+
+static void
+mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ int i, g = 0, sum_norm_weight = 0;
+ struct mlxsw_sp_nexthop *nh;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (!nh->should_offload)
+ continue;
+ if (g > 0)
+ g = gcd(nh->nh_weight, g);
+ else
+ g = nh->nh_weight;
+ }
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (!nh->should_offload)
+ continue;
+ nh->norm_nh_weight = nh->nh_weight / g;
+ sum_norm_weight += nh->norm_nh_weight;
+ }
+
+ nh_grp->sum_norm_weight = sum_norm_weight;
+}
+
+static void
+mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ int total = nh_grp->sum_norm_weight;
+ u16 ecmp_size = nh_grp->ecmp_size;
+ int i, weight = 0, lower_bound = 0;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ int upper_bound;
+
+ if (!nh->should_offload)
+ continue;
+ weight += nh->norm_nh_weight;
+ upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
+ nh->num_adj_entries = upper_bound - lower_bound;
+ lower_bound = upper_bound;
+ }
+}
+
static void
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
+ u16 ecmp_size, old_ecmp_size;
struct mlxsw_sp_nexthop *nh;
bool offload_change = false;
u32 adj_index;
- u16 ecmp_size = 0;
bool old_adj_index_valid;
u32 old_adj_index;
- u16 old_ecmp_size;
int i;
int err;
@@ -2452,8 +3127,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
if (nh->should_offload)
nh->update = 1;
}
- if (nh->should_offload)
- ecmp_size++;
}
if (!offload_change) {
/* Nothing was added or removed, so no need to reallocate. Just
@@ -2466,12 +3139,19 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
}
return;
}
- if (!ecmp_size)
+ mlxsw_sp_nexthop_group_normalize(nh_grp);
+ if (!nh_grp->sum_norm_weight)
/* No neigh of this group is connected so we just set
* the trap and let everthing flow through kernel.
*/
goto set_trap;
+ ecmp_size = nh_grp->sum_norm_weight;
+ err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
+ if (err)
+ /* No valid allocation size available. */
+ goto set_trap;
+
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
if (err) {
/* We ran out of KVD linear space, just set the
@@ -2486,6 +3166,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
nh_grp->adj_index_valid = 1;
nh_grp->adj_index = adj_index;
nh_grp->ecmp_size = ecmp_size;
+ mlxsw_sp_nexthop_group_rebalance(nh_grp);
err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
@@ -2655,38 +3336,28 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
neigh_release(n);
}
-static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *dev,
- enum mlxsw_sp_ipip_type *p_type)
+static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
{
- struct mlxsw_sp_router *router = mlxsw_sp->router;
- const struct mlxsw_sp_ipip_ops *ipip_ops;
- enum mlxsw_sp_ipip_type ipipt;
+ struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
- for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
- ipip_ops = router->ipip_ops_arr[ipipt];
- if (dev->type == ipip_ops->dev_type) {
- if (p_type)
- *p_type = ipipt;
- return true;
- }
- }
- return false;
+ return ul_dev ? (ul_dev->flags & IFF_UP) : true;
}
static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_ipip_type ipipt,
struct mlxsw_sp_nexthop *nh,
struct net_device *ol_dev)
{
+ bool removing;
+
if (!nh->nh_grp->gateway || nh->ipip_entry)
return 0;
- nh->ipip_entry = mlxsw_sp_ipip_entry_get(mlxsw_sp, ipipt, ol_dev);
- if (IS_ERR(nh->ipip_entry))
- return PTR_ERR(nh->ipip_entry);
+ nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ if (!nh->ipip_entry)
+ return -ENOENT;
- __mlxsw_sp_nexthop_neigh_update(nh, false);
+ removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev);
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
return 0;
}
@@ -2699,7 +3370,6 @@ static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
return;
__mlxsw_sp_nexthop_neigh_update(nh, true);
- mlxsw_sp_ipip_entry_put(mlxsw_sp, ipip_entry);
nh->ipip_entry = NULL;
}
@@ -2743,7 +3413,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV4)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
if (err)
return err;
mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
@@ -2784,11 +3454,19 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
nh->nh_grp = nh_grp;
nh->key.fib_nh = fib_nh;
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+ nh->nh_weight = fib_nh->nh_weight;
+#else
+ nh->nh_weight = 1;
+#endif
memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
if (err)
return err;
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
+
if (!dev)
return 0;
@@ -2812,6 +3490,8 @@ static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
}
@@ -2841,6 +3521,30 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
}
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_nexthop *nh;
+ bool removing;
+
+ list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
+ switch (nh->type) {
+ case MLXSW_SP_NEXTHOP_TYPE_ETH:
+ removing = false;
+ break;
+ case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
+ break;
+ default:
+ WARN_ON(1);
+ continue;
+ }
+
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ }
+}
+
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
@@ -3121,7 +3825,7 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
return;
if (mlxsw_sp_fib_entry_should_offload(fib_entry))
mlxsw_sp_fib_entry_offload_set(fib_entry);
- else if (!mlxsw_sp_fib_entry_should_offload(fib_entry))
+ else
mlxsw_sp_fib_entry_offload_unset(fib_entry);
return;
default:
@@ -3576,7 +4280,7 @@ mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
struct mlxsw_sp_vr *vr;
int err;
- vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id);
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
if (IS_ERR(vr))
return ERR_CAST(vr);
fib = mlxsw_sp_vr_fib(vr, proto);
@@ -4000,7 +4704,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV6)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
+ err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
if (err)
return err;
mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
@@ -4038,7 +4742,11 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev = rt->dst.dev;
nh->nh_grp = nh_grp;
+ nh->nh_weight = 1;
memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+
+ list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
if (!dev)
return 0;
@@ -4051,6 +4759,8 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
}
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
@@ -4601,6 +5311,75 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
+ struct mfc_entry_notifier_info *men_info,
+ bool replace)
+{
+ struct mlxsw_sp_vr *vr;
+
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace);
+}
+
+static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
+ struct mfc_entry_notifier_info *men_info)
+{
+ struct mlxsw_sp_vr *vr;
+
+ if (mlxsw_sp->router->aborted)
+ return;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
+ if (WARN_ON(!vr))
+ return;
+
+ mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc);
+ mlxsw_sp_vr_put(vr);
+}
+
+static int
+mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
+ struct vif_entry_notifier_info *ven_info)
+{
+ struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_vr *vr;
+
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
+ if (IS_ERR(vr))
+ return PTR_ERR(vr);
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
+ return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev,
+ ven_info->vif_index,
+ ven_info->vif_flags, rif);
+}
+
+static void
+mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
+ struct vif_entry_notifier_info *ven_info)
+{
+ struct mlxsw_sp_vr *vr;
+
+ if (mlxsw_sp->router->aborted)
+ return;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
+ if (WARN_ON(!vr))
+ return;
+
+ mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index);
+ mlxsw_sp_vr_put(vr);
+}
+
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{
enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
@@ -4611,6 +5390,10 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
+ /* The multicast router code does not need an abort trap as by default,
+ * packets that don't match any routes are trapped to the CPU.
+ */
+
proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
MLXSW_SP_LPM_TREE_MIN + 1);
@@ -4692,6 +5475,8 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp_vr_is_used(vr))
continue;
+
+ mlxsw_sp_mr_table_flush(vr->mr4_table);
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
/* If virtual router was only used for IPv4, then it's no
@@ -4724,6 +5509,8 @@ struct mlxsw_sp_fib_event_work {
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
+ struct mfc_entry_notifier_info men_info;
+ struct vif_entry_notifier_info ven_info;
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
@@ -4734,7 +5521,6 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- struct fib_rule *rule;
bool replace, append;
int err;
@@ -4756,12 +5542,11 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
fib_info_put(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
- case FIB_EVENT_RULE_DEL:
- rule = fib_work->fr_info.rule;
- if (!fib4_rule_default(rule) && !rule->l3mdev)
- mlxsw_sp_router_fib_abort(mlxsw_sp);
- fib_rule_put(rule);
+ case FIB_EVENT_RULE_ADD:
+ /* if we get here, a rule was added that we do not support.
+ * just do the fib_abort
+ */
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
@@ -4779,7 +5564,6 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- struct fib_rule *rule;
bool replace;
int err;
@@ -4798,12 +5582,58 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
- case FIB_EVENT_RULE_DEL:
- rule = fib_work->fr_info.rule;
- if (!fib6_rule_default(rule) && !rule->l3mdev)
+ case FIB_EVENT_RULE_ADD:
+ /* if we get here, a rule was added that we do not support.
+ * just do the fib_abort
+ */
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
+ break;
+ }
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_fib_event_work *fib_work =
+ container_of(work, struct mlxsw_sp_fib_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+ bool replace;
+ int err;
+
+ rtnl_lock();
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_ADD:
+ replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
+
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
+ replace);
+ if (err)
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
+ ipmr_cache_put(fib_work->men_info.mfc);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
+ ipmr_cache_put(fib_work->men_info.mfc);
+ break;
+ case FIB_EVENT_VIF_ADD:
+ err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
+ &fib_work->ven_info);
+ if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- fib_rule_put(rule);
+ dev_put(fib_work->ven_info.dev);
+ break;
+ case FIB_EVENT_VIF_DEL:
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
+ &fib_work->ven_info);
+ dev_put(fib_work->ven_info.dev);
+ break;
+ case FIB_EVENT_RULE_ADD:
+ /* if we get here, a rule was added that we do not support.
+ * just do the fib_abort
+ */
+ mlxsw_sp_router_fib_abort(mlxsw_sp);
break;
}
rtnl_unlock();
@@ -4813,25 +5643,27 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_nh_notifier_info *fnh_info;
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_work->fen_info, info, sizeof(fib_work->fen_info));
- /* Take referece on fib_info to prevent it from being
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ fib_work->fen_info = *fen_info;
+ /* Take reference on fib_info to prevent it from being
* freed while work is queued. Release it afterwards.
*/
fib_info_hold(fib_work->fen_info.fi);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
- case FIB_EVENT_RULE_DEL:
- memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
- fib_rule_get(fib_work->fr_info.rule);
- break;
case FIB_EVENT_NH_ADD: /* fall through */
case FIB_EVENT_NH_DEL:
- memcpy(&fib_work->fnh_info, info, sizeof(fib_work->fnh_info));
+ fnh_info = container_of(info, struct fib_nh_notifier_info,
+ info);
+ fib_work->fnh_info = *fnh_info;
fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
break;
}
@@ -4840,21 +5672,79 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
struct fib_notifier_info *info)
{
+ struct fib6_entry_notifier_info *fen6_info;
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info));
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ fib_work->fen6_info = *fen6_info;
rt6_hold(fib_work->fen6_info.rt);
break;
- case FIB_EVENT_RULE_ADD: /* fall through */
- case FIB_EVENT_RULE_DEL:
- memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
- fib_rule_get(fib_work->fr_info.rule);
+ }
+}
+
+static void
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
+ struct fib_notifier_info *info)
+{
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_DEL:
+ memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
+ ipmr_cache_hold(fib_work->men_info.mfc);
+ break;
+ case FIB_EVENT_VIF_ADD: /* fall through */
+ case FIB_EVENT_VIF_DEL:
+ memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
+ dev_hold(fib_work->ven_info.dev);
break;
}
}
+static int mlxsw_sp_router_fib_rule_event(unsigned long event,
+ struct fib_notifier_info *info,
+ struct mlxsw_sp *mlxsw_sp)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct fib_rule_notifier_info *fr_info;
+ struct fib_rule *rule;
+ int err = 0;
+
+ /* nothing to do at the moment */
+ if (event == FIB_EVENT_RULE_DEL)
+ return 0;
+
+ if (mlxsw_sp->router->aborted)
+ return 0;
+
+ fr_info = container_of(info, struct fib_rule_notifier_info, info);
+ rule = fr_info->rule;
+
+ switch (info->family) {
+ case AF_INET:
+ if (!fib4_rule_default(rule) && !rule->l3mdev)
+ err = -1;
+ break;
+ case AF_INET6:
+ if (!fib6_rule_default(rule) && !rule->l3mdev)
+ err = -1;
+ break;
+ case RTNL_FAMILY_IPMR:
+ if (!ipmr_rule_default(rule) && !rule->l3mdev)
+ err = -1;
+ break;
+ }
+
+ if (err < 0)
+ NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
+
+ return err;
+}
+
/* Called with rcu_read_lock() */
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
@@ -4862,16 +5752,28 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
struct mlxsw_sp_fib_event_work *fib_work;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
+ int err;
if (!net_eq(info->net, &init_net) ||
- (info->family != AF_INET && info->family != AF_INET6))
+ (info->family != AF_INET && info->family != AF_INET6 &&
+ info->family != RTNL_FAMILY_IPMR))
return NOTIFY_DONE;
+ router = container_of(nb, struct mlxsw_sp_router, fib_nb);
+
+ switch (event) {
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+ err = mlxsw_sp_router_fib_rule_event(event, info,
+ router->mlxsw_sp);
+ if (!err)
+ return NOTIFY_DONE;
+ }
+
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
if (WARN_ON(!fib_work))
return NOTIFY_BAD;
- router = container_of(nb, struct mlxsw_sp_router, fib_nb);
fib_work->mlxsw_sp = router->mlxsw_sp;
fib_work->event = event;
@@ -4884,6 +5786,10 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
mlxsw_sp_router_fib6_event(fib_work, info);
break;
+ case RTNL_FAMILY_IPMR:
+ INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
+ mlxsw_sp_router_fibmr_event(fib_work, info);
+ break;
}
mlxsw_core_schedule_work(&fib_work->work);
@@ -5044,9 +5950,15 @@ int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
return rif->dev->ifindex;
}
+const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
+{
+ return rif->dev;
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_rif_params *params)
+ const struct mlxsw_sp_rif_params *params,
+ struct netlink_ext_ack *extack)
{
u32 tb_id = l3mdev_fib_table(params->dev);
const struct mlxsw_sp_rif_ops *ops;
@@ -5060,14 +5972,16 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
ops = mlxsw_sp->router->rif_ops_arr[type];
- vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
+ vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
if (IS_ERR(vr))
return ERR_CAST(vr);
vr->rif_count++;
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
- if (err)
+ if (err) {
+ NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
goto err_rif_index_alloc;
+ }
rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
if (!rif) {
@@ -5093,11 +6007,17 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_configure;
+ err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif);
+ if (err)
+ goto err_mr_rif_add;
+
mlxsw_sp_rif_counters_alloc(rif);
mlxsw_sp->router->rifs[rif_index] = rif;
return rif;
+err_mr_rif_add:
+ ops->deconfigure(rif);
err_configure:
if (fid)
mlxsw_sp_fid_put(fid);
@@ -5122,6 +6042,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
mlxsw_sp_rif_counters_free(rif);
+ mlxsw_sp_mr_rif_del(vr->mr4_table, rif);
ops->deconfigure(rif);
if (fid)
/* Loopback RIFs are not associated with a FID. */
@@ -5147,7 +6068,8 @@ mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
static int
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct net_device *l3_dev)
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -5163,7 +6085,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
};
mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
- rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
}
@@ -5218,7 +6140,8 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
struct net_device *port_dev,
- unsigned long event, u16 vid)
+ unsigned long event, u16 vid,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
@@ -5230,7 +6153,7 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
switch (event) {
case NETDEV_UP:
return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
- l3_dev);
+ l3_dev, extack);
case NETDEV_DOWN:
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
@@ -5240,19 +6163,22 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
if (netif_is_bridge_port(port_dev) ||
netif_is_lag_port(port_dev) ||
netif_is_ovs_port(port_dev))
return 0;
- return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
+ return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
+ extack);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
struct net_device *lag_dev,
- unsigned long event, u16 vid)
+ unsigned long event, u16 vid,
+ struct netlink_ext_ack *extack)
{
struct net_device *port_dev;
struct list_head *iter;
@@ -5262,7 +6188,8 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
if (mlxsw_sp_port_dev_check(port_dev)) {
err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
port_dev,
- event, vid);
+ event, vid,
+ extack);
if (err)
return err;
}
@@ -5272,16 +6199,19 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
if (netif_is_bridge_port(lag_dev))
return 0;
- return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
+ extack);
}
static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
struct mlxsw_sp_rif_params params = {
@@ -5291,7 +6221,7 @@ static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
switch (event) {
case NETDEV_UP:
- rif = mlxsw_sp_rif_create(mlxsw_sp, &params);
+ rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
break;
@@ -5305,7 +6235,8 @@ static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
}
static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
u16 vid = vlan_dev_vlan_id(vlan_dev);
@@ -5315,27 +6246,28 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
if (mlxsw_sp_port_dev_check(real_dev))
return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
- event, vid);
+ event, vid, extack);
else if (netif_is_lag_master(real_dev))
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
- vid);
+ vid, extack);
else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
return 0;
}
static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
- unsigned long event)
+ unsigned long event,
+ struct netlink_ext_ack *extack)
{
if (mlxsw_sp_port_dev_check(dev))
- return mlxsw_sp_inetaddr_port_event(dev, event);
+ return mlxsw_sp_inetaddr_port_event(dev, event, extack);
else if (netif_is_lag_master(dev))
- return mlxsw_sp_inetaddr_lag_event(dev, event);
+ return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
else if (netif_is_bridge_master(dev))
- return mlxsw_sp_inetaddr_bridge_event(dev, event);
+ return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
else if (is_vlan_dev(dev))
- return mlxsw_sp_inetaddr_vlan_event(dev, event);
+ return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
else
return 0;
}
@@ -5349,6 +6281,32 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
struct mlxsw_sp_rif *rif;
int err = 0;
+ /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
+ if (event == NETDEV_UP)
+ goto out;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, dev, event))
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
+out:
+ return notifier_from_errno(err);
+}
+
+int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_validator_info *ivi = (struct in_validator_info *) ptr;
+ struct net_device *dev = ivi->ivi_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
mlxsw_sp = mlxsw_sp_lower_get(dev);
if (!mlxsw_sp)
goto out;
@@ -5357,7 +6315,7 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(dev, event);
+ err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
out:
return notifier_from_errno(err);
}
@@ -5386,7 +6344,7 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- __mlxsw_sp_inetaddr_event(dev, event);
+ __mlxsw_sp_inetaddr_event(dev, event, NULL);
out:
rtnl_unlock();
dev_put(dev);
@@ -5401,6 +6359,10 @@ int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
struct net_device *dev = if6->idev->dev;
+ /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
+ if (event == NETDEV_UP)
+ return NOTIFY_DONE;
+
if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
return NOTIFY_DONE;
@@ -5417,6 +6379,28 @@ int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
+int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
+ struct net_device *dev = i6vi->i6vi_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(rif, dev, event))
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
+out:
+ return notifier_from_errno(err);
+}
+
static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
const char *mac, int mtu)
{
@@ -5463,6 +6447,17 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
if (err)
goto err_rif_fdb_op;
+ if (rif->mtu != dev->mtu) {
+ struct mlxsw_sp_vr *vr;
+
+ /* The RIF is relevant only to its mr_table instance, as unlike
+ * unicast routing, in multicast routing a RIF cannot be shared
+ * between several multicast routing tables.
+ */
+ vr = &mlxsw_sp->router->vrs[rif->vr_id];
+ mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu);
+ }
+
ether_addr_copy(rif->addr, dev->dev_addr);
rif->mtu = dev->mtu;
@@ -5478,7 +6473,8 @@ err_rif_edit:
}
static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
- struct net_device *l3_dev)
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif *rif;
@@ -5487,9 +6483,9 @@ static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
*/
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (rif)
- __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
+ __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
- return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
+ return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
}
static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
@@ -5500,7 +6496,7 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!rif)
return;
- __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
+ __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
}
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
@@ -5516,10 +6512,14 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
case NETDEV_PRECHANGEUPPER:
return 0;
case NETDEV_CHANGEUPPER:
- if (info->linking)
- err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
- else
+ if (info->linking) {
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
+ } else {
mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
+ }
break;
}
@@ -5625,7 +6625,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
@@ -5826,7 +6826,7 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
struct mlxsw_sp_vr *ul_vr;
int err;
- ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id);
+ ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
if (IS_ERR(ul_vr))
return PTR_ERR(ul_vr);
@@ -5930,6 +6930,64 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
mlxsw_sp_router_fib_flush(router->mlxsw_sp);
}
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
+{
+ mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
+}
+
+static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
+{
+ mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
+}
+
+static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
+{
+ bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy;
+
+ mlxsw_sp_mp_hash_header_set(recr2_pl,
+ MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
+ mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
+ mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
+ mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
+ if (only_l3)
+ return;
+ mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
+}
+
+static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
+{
+ mlxsw_sp_mp_hash_header_set(recr2_pl,
+ MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
+ mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
+ mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
+ mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
+ mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
+}
+
+static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char recr2_pl[MLXSW_REG_RECR2_LEN];
+ u32 seed;
+
+ get_random_bytes(&seed, sizeof(seed));
+ mlxsw_reg_recr2_pack(recr2_pl, seed);
+ mlxsw_sp_mp4_hash_init(recr2_pl);
+ mlxsw_sp_mp6_hash_init(recr2_pl);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
+}
+#else
+static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return 0;
+}
+#endif
+
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -5990,10 +7048,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_nexthop_group_ht_init;
+ INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
err = mlxsw_sp_lpm_init(mlxsw_sp);
if (err)
goto err_lpm_init;
+ err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
+ if (err)
+ goto err_mr_init;
+
err = mlxsw_sp_vrs_init(mlxsw_sp);
if (err)
goto err_vrs_init;
@@ -6002,6 +7065,16 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_neigh_init;
+ mlxsw_sp->router->netevent_nb.notifier_call =
+ mlxsw_sp_router_netevent_event;
+ err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
+ err = mlxsw_sp_mp_hash_init(mlxsw_sp);
+ if (err)
+ goto err_mp_hash_init;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
mlxsw_sp_router_fib_dump_flush);
@@ -6011,10 +7084,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_register_fib_notifier:
+err_mp_hash_init:
+ unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+err_register_netevent_notifier:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
+ mlxsw_sp_mr_fini(mlxsw_sp);
+err_mr_init:
mlxsw_sp_lpm_fini(mlxsw_sp);
err_lpm_init:
rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
@@ -6034,8 +7112,10 @@ err_router_init:
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
+ unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
+ mlxsw_sp_mr_fini(mlxsw_sp);
mlxsw_sp_lpm_fini(mlxsw_sp);
rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 345fcc4f38e9..1fb82246ce96 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -62,13 +62,18 @@ enum mlxsw_sp_rif_counter_dir {
};
struct mlxsw_sp_neigh_entry;
+struct mlxsw_sp_nexthop;
+struct mlxsw_sp_ipip_entry;
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
+u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
+u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
+const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
@@ -100,12 +105,44 @@ mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry,
bool adding);
bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry);
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev);
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev);
-__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev);
+int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool recreate_loopback,
+ bool keep_encap,
+ bool update_nexthops,
+ struct netlink_ext_ack *extack);
+void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry);
+bool
+mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_l3proto ul_proto,
+ union mlxsw_sp_l3addr saddr,
+ u32 ul_tb_id,
+ const struct mlxsw_sp_ipip_entry *except);
+struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
+ struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh);
+unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh);
+int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
+ u32 *p_adj_size, u32 *p_adj_hash_index);
+struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh);
+#define mlxsw_sp_nexthop_for_each(nh, router) \
+ for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \
+ nh = mlxsw_sp_nexthop_next(router, nh))
+int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh, u64 *p_counter);
+int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh);
+void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh);
+void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh);
+
+static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
+ const union mlxsw_sp_l3addr *addr2)
+{
+ return !memcmp(addr1, addr2, sizeof(*addr1));
+}
#endif /* _MLXSW_ROUTER_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index d39ffbfcc436..7b8548e25ae7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -46,8 +46,10 @@
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/rtnetlink.h>
+#include <linux/netlink.h>
#include <net/switchdev.h>
+#include "spectrum_router.h"
#include "spectrum.h"
#include "core.h"
#include "reg.h"
@@ -67,7 +69,6 @@ struct mlxsw_sp_bridge {
u32 ageing_time;
bool vlan_enabled_exists;
struct list_head bridges_list;
- struct list_head mids_list;
DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
@@ -77,8 +78,10 @@ struct mlxsw_sp_bridge_device {
struct net_device *dev;
struct list_head list;
struct list_head ports_list;
+ struct list_head mids_list;
u8 vlan_enabled:1,
- multicast_enabled:1;
+ multicast_enabled:1,
+ mrouter:1;
const struct mlxsw_sp_bridge_ops *ops;
};
@@ -107,7 +110,8 @@ struct mlxsw_sp_bridge_vlan {
struct mlxsw_sp_bridge_ops {
int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack);
void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port);
@@ -121,6 +125,20 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_bridge_port *bridge_port,
u16 fid_index);
+static void
+mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port);
+
+static void
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_device
+ *bridge_device);
+
+static void
+mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool add);
+
static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
const struct net_device *br_dev)
@@ -154,6 +172,7 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
bridge_device->dev = br_dev;
bridge_device->vlan_enabled = vlan_enabled;
bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
+ bridge_device->mrouter = br_multicast_router(br_dev);
INIT_LIST_HEAD(&bridge_device->ports_list);
if (vlan_enabled) {
bridge->vlan_enabled_exists = true;
@@ -161,6 +180,7 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
+ INIT_LIST_HEAD(&bridge_device->mids_list);
list_add(&bridge_device->list, &bridge->bridges_list);
return bridge_device;
@@ -174,6 +194,7 @@ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
if (bridge_device->vlan_enabled)
bridge->vlan_enabled_exists = false;
WARN_ON(!list_empty(&bridge_device->ports_list));
+ WARN_ON(!list_empty(&bridge_device->mids_list));
kfree(bridge_device);
}
@@ -249,7 +270,8 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
bridge_port->dev = brport_dev;
bridge_port->bridge_device = bridge_device;
bridge_port->stp_state = BR_STATE_DISABLED;
- bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC;
+ bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
+ BR_MCAST_FLOOD;
INIT_LIST_HEAD(&bridge_port->vlans_list);
list_add(&bridge_port->list, &bridge_device->ports_list);
bridge_port->ref_count = 1;
@@ -455,7 +477,8 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev,
&attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
- attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
+ attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
+ BR_MCAST_FLOOD;
break;
default:
return -EOPNOTSUPP;
@@ -640,8 +663,18 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
return err;
- memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
+ if (bridge_port->bridge_device->multicast_enabled)
+ goto out;
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ brport_flags &
+ BR_MCAST_FLOOD);
+ if (err)
+ return err;
+
+out:
+ memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
return 0;
}
@@ -699,10 +732,10 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EINVAL;
}
-static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
- struct net_device *orig_dev,
- bool is_port_mc_router)
+static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ bool is_port_mrouter)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
@@ -720,15 +753,26 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
MLXSW_SP_FLOOD_TYPE_MC,
- is_port_mc_router);
+ is_port_mrouter);
if (err)
return err;
+ mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
+ is_port_mrouter);
out:
- bridge_port->mrouter = is_port_mc_router;
+ bridge_port->mrouter = is_port_mrouter;
return 0;
}
+static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
+{
+ const struct mlxsw_sp_bridge_device *bridge_device;
+
+ bridge_device = bridge_port->bridge_device;
+ return bridge_device->multicast_enabled ? bridge_port->mrouter :
+ bridge_port->flags & BR_MCAST_FLOOD;
+}
+
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
struct net_device *orig_dev,
@@ -749,9 +793,15 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_device)
return 0;
+ if (bridge_device->multicast_enabled != !mc_disabled) {
+ bridge_device->multicast_enabled = !mc_disabled;
+ mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
+ bridge_device);
+ }
+
list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
- bool member = mc_disabled ? true : bridge_port->mrouter;
+ bool member = mlxsw_sp_mc_flood(bridge_port);
err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
bridge_port,
@@ -765,6 +815,60 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
+ u16 mid_idx, bool add)
+{
+ char *smid_pl;
+ int err;
+
+ smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+ if (!smid_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid_pack(smid_pl, mid_idx,
+ mlxsw_sp_router_port(mlxsw_sp), add);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
+ kfree(smid_pl);
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool add)
+{
+ struct mlxsw_sp_mid *mid;
+
+ list_for_each_entry(mid, &bridge_device->mids_list, list)
+ mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
+}
+
+static int
+mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ bool is_mrouter)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_device)
+ return 0;
+
+ if (bridge_device->mrouter != is_mrouter)
+ mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
+ is_mrouter);
+ bridge_device->mrouter = is_mrouter;
+ return 0;
+}
+
static int mlxsw_sp_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
@@ -793,15 +897,20 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
- err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
- attr->orig_dev,
- attr->u.mrouter);
+ err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
+ attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
attr->orig_dev,
attr->u.mc_disabled);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
+ err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
+ attr->u.mrouter);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -810,14 +919,6 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err;
}
-static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
-{
- const struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = bridge_port->bridge_device;
- return !bridge_device->multicast_enabled ? true : bridge_port->mrouter;
-}
-
static int
mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
struct mlxsw_sp_bridge_port *bridge_port)
@@ -955,24 +1056,28 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
struct mlxsw_sp_bridge_vlan *bridge_vlan;
struct mlxsw_sp_bridge_port *bridge_port;
u16 vid = mlxsw_sp_port_vlan->vid;
- bool last;
+ bool last_port, last_vlan;
if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
return;
bridge_port = mlxsw_sp_port_vlan->bridge_port;
+ last_vlan = list_is_singular(&bridge_port->vlans_list);
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
- last = list_is_singular(&bridge_vlan->port_vlan_list);
+ last_port = list_is_singular(&bridge_vlan->port_vlan_list);
list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
mlxsw_sp_bridge_vlan_put(bridge_vlan);
mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
- if (last)
+ if (last_port)
mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
bridge_port,
mlxsw_sp_fid_index(fid));
+ if (last_vlan)
+ mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
+
mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
@@ -1182,7 +1287,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
- u16 fid, u16 mid, bool adding)
+ u16 fid, u16 mid_idx, bool adding)
{
char *sfd_pl;
int err;
@@ -1193,16 +1298,16 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
- MLXSW_REG_SFD_REC_ACTION_NOP, mid);
+ MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl);
return err;
}
-static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
- bool add, bool clear_all_ports)
+static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
+ long *ports_bitmap,
+ bool set_router_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char *smid_pl;
int err, i;
@@ -1210,66 +1315,208 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
if (!smid_pl)
return -ENOMEM;
- mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
- if (clear_all_ports) {
- for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
- if (mlxsw_sp->ports[i])
- mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
+ mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
+ for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
+ if (mlxsw_sp->ports[i])
+ mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
}
+
+ mlxsw_reg_smid_port_mask_set(smid_pl,
+ mlxsw_sp_router_port(mlxsw_sp), 1);
+
+ for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
+ mlxsw_reg_smid_port_set(smid_pl, i, 1);
+
+ mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
+ set_router_port);
+
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
+ kfree(smid_pl);
+ return err;
+}
+
+static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 mid_idx, bool add)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char *smid_pl;
+ int err;
+
+ smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+ if (!smid_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
kfree(smid_pl);
return err;
}
-static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
- const unsigned char *addr,
- u16 fid)
+static struct
+mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr,
+ u16 fid)
{
struct mlxsw_sp_mid *mid;
- list_for_each_entry(mid, &mlxsw_sp->bridge->mids_list, list) {
+ list_for_each_entry(mid, &bridge_device->mids_list, list) {
if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
return mid;
}
return NULL;
}
-static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
- const unsigned char *addr,
- u16 fid)
+static void
+mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ unsigned long *ports_bitmap)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u64 max_lag_members, i;
+ int lag_id;
+
+ if (!bridge_port->lagged) {
+ set_bit(bridge_port->system_port, ports_bitmap);
+ } else {
+ max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_LAG_MEMBERS);
+ lag_id = bridge_port->lag_id;
+ for (i = 0; i < max_lag_members; i++) {
+ mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
+ lag_id, i);
+ if (mlxsw_sp_port)
+ set_bit(mlxsw_sp_port->local_port,
+ ports_bitmap);
+ }
+ }
+}
+
+static void
+mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp *mlxsw_sp)
{
- struct mlxsw_sp_mid *mid;
+ struct mlxsw_sp_bridge_port *bridge_port;
+
+ list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
+ if (bridge_port->mrouter) {
+ mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
+ bridge_port,
+ flood_bitmap);
+ }
+ }
+}
+
+static bool
+mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mid *mid,
+ struct mlxsw_sp_bridge_device *bridge_device)
+{
+ long *flood_bitmap;
+ int num_of_ports;
+ int alloc_size;
u16 mid_idx;
+ int err;
mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
MLXSW_SP_MID_MAX);
if (mid_idx == MLXSW_SP_MID_MAX)
- return NULL;
+ return false;
+
+ num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
+ alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
+ flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
+ if (!flood_bitmap)
+ return false;
+
+ bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
+ mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
+
+ mid->mid = mid_idx;
+ err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
+ bridge_device->mrouter);
+ kfree(flood_bitmap);
+ if (err)
+ return false;
+
+ err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
+ true);
+ if (err)
+ return false;
+
+ set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
+ mid->in_hw = true;
+ return true;
+}
+
+static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mid *mid)
+{
+ if (!mid->in_hw)
+ return 0;
+
+ clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
+ mid->in_hw = false;
+ return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
+ false);
+}
+
+static struct
+mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const unsigned char *addr,
+ u16 fid)
+{
+ struct mlxsw_sp_mid *mid;
+ size_t alloc_size;
mid = kzalloc(sizeof(*mid), GFP_KERNEL);
if (!mid)
return NULL;
- set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
+ alloc_size = sizeof(unsigned long) *
+ BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
+
+ mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
+ if (!mid->ports_in_mid)
+ goto err_ports_in_mid_alloc;
+
ether_addr_copy(mid->addr, addr);
mid->fid = fid;
- mid->mid = mid_idx;
- mid->ref_count = 0;
- list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list);
+ mid->in_hw = false;
+
+ if (!bridge_device->multicast_enabled)
+ goto out;
+ if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
+ goto err_write_mdb_entry;
+
+out:
+ list_add_tail(&mid->list, &bridge_device->mids_list);
return mid;
+
+err_write_mdb_entry:
+ kfree(mid->ports_in_mid);
+err_ports_in_mid_alloc:
+ kfree(mid);
+ return NULL;
}
-static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mid *mid)
+static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_mid *mid)
{
- if (--mid->ref_count == 0) {
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int err = 0;
+
+ clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
+ if (bitmap_empty(mid->ports_in_mid,
+ mlxsw_core_max_ports(mlxsw_sp->core))) {
+ err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
list_del(&mid->list);
- clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
+ kfree(mid->ports_in_mid);
kfree(mid);
- return 1;
}
- return 0;
+ return err;
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1302,39 +1549,72 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
+ mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
if (!mid) {
- mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index);
+ mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
+ fid_index);
if (!mid) {
netdev_err(dev, "Unable to allocate MC group\n");
return -ENOMEM;
}
}
- mid->ref_count++;
+ set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
+
+ if (!bridge_device->multicast_enabled)
+ return 0;
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
- mid->ref_count == 1);
+ if (bridge_port->mrouter)
+ return 0;
+
+ err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
if (err) {
netdev_err(dev, "Unable to set SMID\n");
goto err_out;
}
- if (mid->ref_count == 1) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
- mid->mid, true);
- if (err) {
- netdev_err(dev, "Unable to set MC SFD\n");
- goto err_out;
- }
- }
-
return 0;
err_out:
- __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
+ mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
return err;
}
+static void
+mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_device
+ *bridge_device)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_mid *mid;
+ bool mc_enabled;
+
+ mc_enabled = bridge_device->multicast_enabled;
+
+ list_for_each_entry(mid, &bridge_device->mids_list, list) {
+ if (mc_enabled)
+ mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
+ bridge_device);
+ else
+ mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
+ }
+}
+
+static void
+mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ bool add)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_mid *mid;
+
+ bridge_device = bridge_port->bridge_device;
+
+ list_for_each_entry(mid, &bridge_device->mids_list, list) {
+ if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
+ mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
+ }
+}
+
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans)
@@ -1399,6 +1679,30 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static int
+__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_mid *mid)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ if (bridge_port->bridge_device->multicast_enabled) {
+ if (bridge_port->bridge_device->multicast_enabled) {
+ err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid,
+ false);
+ if (err)
+ netdev_err(dev, "Unable to remove port from SMID\n");
+ }
+ }
+
+ err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
+ if (err)
+ netdev_err(dev, "Unable to remove MC SFD\n");
+
+ return err;
+}
+
static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -1410,8 +1714,6 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
struct mlxsw_sp_mid *mid;
u16 fid_index;
- u16 mid_idx;
- int err = 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
@@ -1426,25 +1728,33 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
- mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
+ mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
if (!mid) {
netdev_err(dev, "Unable to remove port from MC DB\n");
return -EINVAL;
}
- err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
- if (err)
- netdev_err(dev, "Unable to remove port from SMID\n");
+ return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
+}
- mid_idx = mid->mid;
- if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
- err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
- mid_idx, false);
- if (err)
- netdev_err(dev, "Unable to remove MC SFD\n");
- }
+static void
+mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_bridge_port *bridge_port)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_mid *mid, *tmp;
- return err;
+ bridge_device = bridge_port->bridge_device;
+
+ list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
+ if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
+ __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
+ mid);
+ } else if (bridge_device->multicast_enabled &&
+ bridge_port->mrouter) {
+ mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
+ }
+ }
}
static int mlxsw_sp_port_obj_del(struct net_device *dev,
@@ -1497,12 +1807,15 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
static int
mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- if (is_vlan_dev(bridge_port->dev))
+ if (is_vlan_dev(bridge_port->dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge");
return -EINVAL;
+ }
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
if (WARN_ON(!mlxsw_sp_port_vlan))
@@ -1559,13 +1872,16 @@ mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
u16 vid;
- if (!is_vlan_dev(bridge_port->dev))
+ if (!is_vlan_dev(bridge_port->dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge");
return -EINVAL;
+ }
vid = vlan_dev_vlan_id(bridge_port->dev);
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
@@ -1573,7 +1889,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
return -EINVAL;
if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
- netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
+ NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port");
return -EINVAL;
}
@@ -1616,7 +1932,8 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
@@ -1629,7 +1946,7 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_device = bridge_port->bridge_device;
err = bridge_device->ops->port_join(bridge_device, bridge_port,
- mlxsw_sp_port);
+ mlxsw_sp_port, extack);
if (err)
goto err_port_join;
@@ -1981,17 +2298,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
}
-static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_mid *mid, *tmp;
-
- list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
- list_del(&mid->list);
- clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
- kfree(mid);
- }
-}
-
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge;
@@ -2003,7 +2309,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
bridge->mlxsw_sp = mlxsw_sp;
INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
- INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list);
bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
@@ -2014,7 +2319,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
- mlxsw_sp_mids_fini(mlxsw_sp);
WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
kfree(mlxsw_sp->bridge);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index f396a1fef633..ec6cef8267ae 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -62,6 +62,8 @@ enum {
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
MLXSW_TRAP_ID_IPV4_OSPF = 0x55,
+ MLXSW_TRAP_ID_IPV4_PIM = 0x58,
+ MLXSW_TRAP_ID_RPF = 0x5C,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60,
MLXSW_TRAP_ID_IPV6_LINK_LOCAL_DEST = 0x61,
@@ -89,6 +91,10 @@ enum {
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
+ /* Multicast trap used for routes with trap action */
+ MLXSW_TRAP_ID_ACL1 = 0x1C1,
+ /* Multicast trap used for routes with trap-and-forward action */
+ MLXSW_TRAP_ID_ACL2 = 0x1C2,
MLXSW_TRAP_ID_MAX = 0x1FF
};
diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
index c83e4bc50c73..848fc1c5a5dc 100644
--- a/drivers/net/ethernet/micrel/Makefile
+++ b/drivers/net/ethernet/micrel/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Micrel network device drivers.
#
diff --git a/drivers/net/ethernet/micrel/ks8695net.h b/drivers/net/ethernet/micrel/ks8695net.h
index 80eff6ea5163..b18fad4ad5fd 100644
--- a/drivers/net/ethernet/micrel/ks8695net.h
+++ b/drivers/net/ethernet/micrel/ks8695net.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Micrel KS8695 (Centaur) Ethernet.
*
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index e798fbe08600..52207508744c 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4338,11 +4338,11 @@ static void ksz_stop_timer(struct ksz_timer_info *info)
}
static void ksz_init_timer(struct ksz_timer_info *info, int period,
- void (*function)(unsigned long), void *data)
+ void (*function)(struct timer_list *))
{
info->max = 0;
info->period = period;
- setup_timer(&info->timer, function, (unsigned long)data);
+ timer_setup(&info->timer, function, 0);
}
static void ksz_update_timer(struct ksz_timer_info *info)
@@ -6689,9 +6689,9 @@ static void mib_read_work(struct work_struct *work)
}
}
-static void mib_monitor(unsigned long ptr)
+static void mib_monitor(struct timer_list *t)
{
- struct dev_info *hw_priv = (struct dev_info *) ptr;
+ struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
mib_read_work(&hw_priv->mib_read);
@@ -6716,10 +6716,10 @@ static void mib_monitor(unsigned long ptr)
*
* This routine is run in a kernel timer to monitor the network device.
*/
-static void dev_monitor(unsigned long ptr)
+static void dev_monitor(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) ptr;
- struct dev_priv *priv = netdev_priv(dev);
+ struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer);
+ struct net_device *dev = priv->mii_if.dev;
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
@@ -6789,7 +6789,7 @@ static int __init netdev_init(struct net_device *dev)
/* 500 ms timeout */
ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
- dev_monitor, dev);
+ dev_monitor);
/* 500 ms timeout */
dev->watchdog_timeo = HZ / 2;
@@ -7065,7 +7065,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
/* 500 ms timeout */
ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
- mib_monitor, hw_priv);
+ mib_monitor);
for (i = 0; i < hw->dev_count; i++) {
dev = alloc_etherdev(sizeof(struct dev_priv));
diff --git a/drivers/net/ethernet/microchip/enc28j60_hw.h b/drivers/net/ethernet/microchip/enc28j60_hw.h
index 25b41de49f0e..da4ab172527d 100644
--- a/drivers/net/ethernet/microchip/enc28j60_hw.h
+++ b/drivers/net/ethernet/microchip/enc28j60_hw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* enc28j60_hw.h: EDTP FrameThrower style enc28j60 registers
*
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index 4be73d5553f8..f604a260ede7 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* encx24j600_hw.h: Register definitions
*
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index b171ed2015fe..2521c8c40015 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3501,7 +3501,7 @@ static void myri10ge_watchdog(struct work_struct *work)
* cannot detect a NIC with a parity error in a timely fashion if the
* NIC is lightly loaded.
*/
-static void myri10ge_watchdog_timer(unsigned long arg)
+static void myri10ge_watchdog_timer(struct timer_list *t)
{
struct myri10ge_priv *mgp;
struct myri10ge_slice_state *ss;
@@ -3509,7 +3509,7 @@ static void myri10ge_watchdog_timer(unsigned long arg)
u32 rx_pause_cnt;
u16 cmd;
- mgp = (struct myri10ge_priv *)arg;
+ mgp = from_timer(mgp, t, watchdog_timer);
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
@@ -3930,8 +3930,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_save_state(pdev);
/* Setup the watchdog timer */
- setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
- (unsigned long)mgp);
+ timer_setup(&mgp->watchdog_timer, myri10ge_watchdog_timer, 0);
netdev->ethtool_ops = &myri10ge_ethtool_ops;
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
index b7fc26c4f738..cf73810608a2 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MYRI10GE_MCP_H__
#define __MYRI10GE_MCP_H__
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
index 75ec5e7cf50d..95a0095a4a75 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp_gen_header.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MYRI10GE_MCP_GEN_HEADER_H__
#define __MYRI10GE_MCP_GEN_HEADER_H__
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile
index 764c532a96d1..cc664977596e 100644
--- a/drivers/net/ethernet/natsemi/Makefile
+++ b/drivers/net/ethernet/natsemi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the National Semi-conductor Sonic devices.
#
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index a6caeb567c0d..d5b28884e21e 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* jazzsonic.c
*
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 3ca6ae7caf55..a42433fb6949 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* macsonic.c
*
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 18af2a23a933..b9a1a9f999ea 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -610,7 +610,7 @@ static int netdev_open(struct net_device *dev);
static void do_cable_magic(struct net_device *dev);
static void undo_cable_magic(struct net_device *dev);
static void check_link(struct net_device *dev);
-static void netdev_timer(unsigned long data);
+static void netdev_timer(struct timer_list *t);
static void dump_ring(struct net_device *dev);
static void ns_tx_timeout(struct net_device *dev);
static int alloc_ring(struct net_device *dev);
@@ -1571,10 +1571,8 @@ static int netdev_open(struct net_device *dev)
dev->name, (int)readl(ioaddr + ChipCmd));
/* Set the timer to check for link beat. */
- init_timer(&np->timer);
+ timer_setup(&np->timer, netdev_timer, 0);
np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
- np->timer.data = (unsigned long)dev;
- np->timer.function = netdev_timer; /* timer handler */
add_timer(&np->timer);
return 0;
@@ -1789,10 +1787,10 @@ static void init_registers(struct net_device *dev)
* this check via dspcfg_workaround sysfs option.
* 3) check of death of the RX path due to OOM
*/
-static void netdev_timer(unsigned long data)
+static void netdev_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_timer(np, t, timer);
+ struct net_device *dev = np->dev;
void __iomem * ioaddr = ns_ioaddr(dev);
int next_tick = NATSEMI_TIMER_FREQ;
const int irq = np->pci_dev->irq;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 729095db3e08..958fced4dacf 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1600,10 +1600,10 @@ static void ns83820_tx_timeout(struct net_device *ndev)
spin_unlock_irqrestore(&dev->tx_lock, flags);
}
-static void ns83820_tx_watch(unsigned long data)
+static void ns83820_tx_watch(struct timer_list *t)
{
- struct net_device *ndev = (void *)data;
- struct ns83820 *dev = PRIV(ndev);
+ struct ns83820 *dev = from_timer(dev, t, tx_watchdog);
+ struct net_device *ndev = dev->ndev;
#if defined(DEBUG)
printk("ns83820_tx_watch: %u %u %d\n",
@@ -1652,9 +1652,7 @@ static int ns83820_open(struct net_device *ndev)
writel(0, dev->base + TXDP_HI);
writel(desc, dev->base + TXDP);
- init_timer(&dev->tx_watchdog);
- dev->tx_watchdog.data = (unsigned long)ndev;
- dev->tx_watchdog.function = ns83820_tx_watch;
+ timer_setup(&dev->tx_watchdog, ns83820_tx_watch, 0);
mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
netif_start_queue(ndev); /* FIXME: wait for phy to come up */
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 7b0a8db57af9..421b1a283fed 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for sonic.c
*
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 9ee0f69a83c0..1817deea98a4 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xtsonic.c
*
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 462eda926b1c..b8983e73265a 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -337,12 +337,6 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
-#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
- init_timer(&timer); \
- timer.function = handle; \
- timer.data = (unsigned long)arg; \
- mod_timer(&timer, (jiffies + exp)) \
-
/* copy mac addr to def_mac_addr array */
static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
{
@@ -4193,9 +4187,9 @@ pci_map_failed:
}
static void
-s2io_alarm_handle(unsigned long data)
+s2io_alarm_handle(struct timer_list *t)
{
- struct s2io_nic *sp = (struct s2io_nic *)data;
+ struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
struct net_device *dev = sp->dev;
s2io_handle_errors(dev);
@@ -7186,7 +7180,8 @@ static int s2io_card_up(struct s2io_nic *sp)
return -ENODEV;
}
- S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
+ timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
+ mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
set_bit(__S2IO_STATE_CARD_UP, &sp->state);
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 6c5997dc8afc..1a24a7218794 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1094,7 +1094,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget);
static int s2io_poll_inta(struct napi_struct *napi, int budget);
static void s2io_init_pci(struct s2io_nic * sp);
static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
-static void s2io_alarm_handle(unsigned long data);
+static void s2io_alarm_handle(struct timer_list *t);
static irqreturn_t
s2io_msix_ring_handle(int irq, void *dev_id);
static irqreturn_t
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 50ea69d88480..b2299f2b2155 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1122,7 +1122,6 @@ static void vxge_set_multicast(struct net_device *dev)
struct netdev_hw_addr *ha;
struct vxgedev *vdev;
int i, mcast_cnt = 0;
- struct __vxge_hw_device *hldev;
struct vxge_vpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info;
@@ -1136,7 +1135,6 @@ static void vxge_set_multicast(struct net_device *dev)
"%s:%d", __func__, __LINE__);
vdev = netdev_priv(dev);
- hldev = vdev->devh;
if (unlikely(!is_vxge_card_up(vdev)))
return;
@@ -1283,7 +1281,6 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct vxgedev *vdev;
- struct __vxge_hw_device *hldev;
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info_new, mac_info_old;
int vpath_idx = 0;
@@ -1291,7 +1288,6 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
vdev = netdev_priv(dev);
- hldev = vdev->devh;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
@@ -1534,7 +1530,7 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for"
"vpath:%d", vp_id);
- return status;
+ return status;
}
} else
return VXGE_HW_FAIL;
@@ -1954,19 +1950,19 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
* for all VPATHs. The h/w only uses the lowest numbered VPATH
* when steering frames.
*/
- for (index = 0; index < vdev->no_of_vpath; index++) {
+ for (index = 0; index < vdev->no_of_vpath; index++) {
status = vxge_hw_vpath_rts_rth_set(
vdev->vpaths[index].handle,
vdev->config.rth_algorithm,
&hash_types,
vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
+ if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"RTH configuration failed for vpath:%d",
vdev->vpaths[index].device_id);
return status;
- }
- }
+ }
+ }
return status;
}
@@ -1995,7 +1991,7 @@ static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for "
"vpath:%d", i);
- return status;
+ return status;
}
}
}
@@ -2177,7 +2173,6 @@ static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
*/
static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
{
- struct net_device *dev;
struct __vxge_hw_device *hldev;
u64 reason;
enum vxge_hw_status status;
@@ -2185,7 +2180,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- dev = vdev->ndev;
hldev = pci_get_drvdata(vdev->pdev);
if (pci_channel_offline(vdev->pdev))
@@ -2480,32 +2474,31 @@ static int vxge_add_isr(struct vxgedev *vdev)
switch (msix_idx) {
case 0:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+ "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
vdev->ndev->name,
vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq(
- vdev->entries[intr_cnt].vector,
+ vdev->entries[intr_cnt].vector,
vxge_tx_msix_handle, 0,
vdev->desc[intr_cnt],
&vdev->vpaths[vp_idx].fifo);
- vdev->vxge_entries[intr_cnt].arg =
+ vdev->vxge_entries[intr_cnt].arg =
&vdev->vpaths[vp_idx].fifo;
irq_req = 1;
break;
case 1:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+ "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
vdev->ndev->name,
vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_rx_msix_napi_handle,
- 0,
+ vdev->entries[intr_cnt].vector,
+ vxge_rx_msix_napi_handle, 0,
vdev->desc[intr_cnt],
&vdev->vpaths[vp_idx].ring);
- vdev->vxge_entries[intr_cnt].arg =
+ vdev->vxge_entries[intr_cnt].arg =
&vdev->vpaths[vp_idx].ring;
irq_req = 1;
break;
@@ -2518,9 +2511,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
vxge_rem_msix_isr(vdev);
vdev->config.intr_type = INTA;
vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA"
- , vdev->ndev->name);
- goto INTA_MODE;
+ "%s: Defaulting to INTA",
+ vdev->ndev->name);
+ goto INTA_MODE;
}
if (irq_req) {
@@ -2597,9 +2590,9 @@ INTA_MODE:
return VXGE_HW_OK;
}
-static void vxge_poll_vp_reset(unsigned long data)
+static void vxge_poll_vp_reset(struct timer_list *t)
{
- struct vxgedev *vdev = (struct vxgedev *)data;
+ struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer);
int i, j = 0;
for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -2616,9 +2609,9 @@ static void vxge_poll_vp_reset(unsigned long data)
mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
}
-static void vxge_poll_vp_lockup(unsigned long data)
+static void vxge_poll_vp_lockup(struct timer_list *t)
{
- struct vxgedev *vdev = (struct vxgedev *)data;
+ struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer);
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
struct vxge_ring *ring;
@@ -2629,7 +2622,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
ring = &vdev->vpaths[i].ring;
/* Truncated to machine word size number of frames */
- rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
+ rx_frms = READ_ONCE(ring->stats.rx_frms);
/* Did this vpath received any packets */
if (ring->stats.prev_rx_frms == rx_frms) {
@@ -2713,14 +2706,13 @@ static int vxge_open(struct net_device *dev)
struct vxge_vpath *vpath;
int ret = 0;
int i;
- u64 val64, function_mode;
+ u64 val64;
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d", dev->name, __func__, __LINE__);
vdev = netdev_priv(dev);
hldev = pci_get_drvdata(vdev->pdev);
- function_mode = vdev->config.device_hw_info.function_mode;
/* make sure you have link off by default every time Nic is
* initialized */
@@ -2858,12 +2850,12 @@ static int vxge_open(struct net_device *dev)
vdev->config.rx_pause_enable);
if (vdev->vp_reset_timer.function == NULL)
- vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev,
+ vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset,
HZ / 2);
/* There is no need to check for RxD leak and RxD lookup on Titan1A */
if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
- vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+ vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup,
HZ / 2);
set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -4512,8 +4504,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"Failed to initialize device (%d)", status);
- ret = -EINVAL;
- goto _exit3;
+ ret = -EINVAL;
+ goto _exit3;
}
if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 3a79d93b8445..59a57ff5e96a 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -417,12 +417,10 @@ struct vxge_tx_priv {
module_param(p, int, 0)
static inline
-void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
- struct vxgedev *vdev, unsigned long timeout)
+void vxge_os_timer(struct timer_list *timer, void (*func)(struct timer_list *),
+ unsigned long timeout)
{
- init_timer(timer);
- timer->function = func;
- timer->data = (unsigned long)vdev;
+ timer_setup(timer, func, 0);
mod_timer(timer, jiffies + timeout);
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 5f630a24e491..0c3b5dea2858 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1209,9 +1209,6 @@ void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
{
struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
- struct __vxge_hw_channel *channel;
-
- channel = &ring->channel;
rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
@@ -1359,11 +1356,8 @@ exit:
enum vxge_hw_status vxge_hw_ring_handle_tcode(
struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
{
- struct __vxge_hw_channel *channel;
enum vxge_hw_status status = VXGE_HW_OK;
- channel = &ring->channel;
-
/* If the t_code is not supported and if the
* t_code is other than 0x5 (unparseable packet
* such as unknown UPV6 header), Drop it !!!
@@ -1399,10 +1393,6 @@ exit:
static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
u64 txdl_ptr, u32 num_txds, u32 no_snoop)
{
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
-
writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
@@ -1506,9 +1496,6 @@ void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
{
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
struct vxge_hw_fifo_txd *txdp, *txdp_last;
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
@@ -1554,9 +1541,6 @@ void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
struct __vxge_hw_fifo_txdl_priv *txdl_priv;
struct vxge_hw_fifo_txd *txdp_last;
struct vxge_hw_fifo_txd *txdp_first;
- struct __vxge_hw_channel *channel;
-
- channel = &fifo->channel;
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
txdp_first = txdlh;
@@ -1672,10 +1656,7 @@ enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
void *txdlh,
enum vxge_hw_fifo_tcode t_code)
{
- struct __vxge_hw_channel *channel;
-
enum vxge_hw_status status = VXGE_HW_OK;
- channel = &fifo->channel;
if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
status = VXGE_HW_ERR_INVALID_TCODE;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 96e579a15cbe..24c4408b5734 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NFP) += nfp.o
nfp-objs := \
@@ -14,6 +15,7 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
nfp_devlink.o \
@@ -26,8 +28,6 @@ nfp-objs := \
nfp_net_sriov.o \
nfp_netvf_main.o \
nfp_port.o \
- bpf/main.o \
- bpf/offload.o \
nic/main.o
ifeq ($(CONFIG_NFP_APP_FLOWER),y)
@@ -37,11 +37,14 @@ nfp-objs += \
flower/main.o \
flower/match.o \
flower/metadata.o \
- flower/offload.o
+ flower/offload.o \
+ flower/tunnel_conf.o
endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \
+ bpf/main.o \
+ bpf/offload.o \
bpf/verifier.o \
bpf/jit.o
endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 239dfbe8a0a1..995e95410b11 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -77,17 +77,6 @@ nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return meta->l.prev != &nfp_prog->insns;
}
-static void nfp_prog_free(struct nfp_prog *nfp_prog)
-{
- struct nfp_insn_meta *meta, *tmp;
-
- list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
- list_del(&meta->l);
- kfree(meta);
- }
- kfree(nfp_prog);
-}
-
static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
{
if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
@@ -110,150 +99,7 @@ nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
return offset - nfp_prog->start_off;
}
-/* --- SW reg --- */
-struct nfp_insn_ur_regs {
- enum alu_dst_ab dst_ab;
- u16 dst;
- u16 areg, breg;
- bool swap;
- bool wr_both;
-};
-
-struct nfp_insn_re_regs {
- enum alu_dst_ab dst_ab;
- u8 dst;
- u8 areg, breg;
- bool swap;
- bool wr_both;
- bool i8;
-};
-
-static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst)
-{
- u16 val = FIELD_GET(NN_REG_VAL, swreg);
-
- switch (FIELD_GET(NN_REG_TYPE, swreg)) {
- case NN_REG_GPR_A:
- case NN_REG_GPR_B:
- case NN_REG_GPR_BOTH:
- return val;
- case NN_REG_NNR:
- return UR_REG_NN | val;
- case NN_REG_XFER:
- return UR_REG_XFR | val;
- case NN_REG_IMM:
- if (val & ~0xff) {
- pr_err("immediate too large\n");
- return 0;
- }
- return UR_REG_IMM_encode(val);
- case NN_REG_NONE:
- return is_dst ? UR_REG_NO_DST : REG_NONE;
- default:
- pr_err("unrecognized reg encoding %08x\n", swreg);
- return 0;
- }
-}
-
-static int
-swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg)
-{
- memset(reg, 0, sizeof(*reg));
-
- /* Decode destination */
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
- reg->dst_ab = ALU_DST_B;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
- reg->wr_both = true;
- reg->dst = nfp_swreg_to_unreg(dst, true);
-
- /* Decode source operands */
- if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
- FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
- reg->areg = nfp_swreg_to_unreg(rreg, false);
- reg->breg = nfp_swreg_to_unreg(lreg, false);
- reg->swap = true;
- } else {
- reg->areg = nfp_swreg_to_unreg(lreg, false);
- reg->breg = nfp_swreg_to_unreg(rreg, false);
- }
-
- return 0;
-}
-
-static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8)
-{
- u16 val = FIELD_GET(NN_REG_VAL, swreg);
-
- switch (FIELD_GET(NN_REG_TYPE, swreg)) {
- case NN_REG_GPR_A:
- case NN_REG_GPR_B:
- case NN_REG_GPR_BOTH:
- return val;
- case NN_REG_XFER:
- return RE_REG_XFR | val;
- case NN_REG_IMM:
- if (val & ~(0x7f | has_imm8 << 7)) {
- pr_err("immediate too large\n");
- return 0;
- }
- *i8 = val & 0x80;
- return RE_REG_IMM_encode(val & 0x7f);
- case NN_REG_NONE:
- return is_dst ? RE_REG_NO_DST : REG_NONE;
- default:
- pr_err("unrecognized reg encoding\n");
- return 0;
- }
-}
-
-static int
-swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg,
- bool has_imm8)
-{
- memset(reg, 0, sizeof(*reg));
-
- /* Decode destination */
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
- reg->dst_ab = ALU_DST_B;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
- reg->wr_both = true;
- reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
-
- /* Decode source operands */
- if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
- FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
- reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
- reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
- reg->swap = true;
- } else {
- reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
- reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
- }
-
- return 0;
-}
-
/* --- Emitters --- */
-static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
- [CMD_TGT_WRITE8] = { 0x00, 0x42 },
- [CMD_TGT_READ8] = { 0x01, 0x43 },
- [CMD_TGT_READ_LE] = { 0x01, 0x40 },
- [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
-};
-
static void
__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
@@ -281,7 +127,7 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
static void
emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync)
+ u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
{
struct nfp_insn_re_regs reg;
int err;
@@ -296,6 +142,11 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
nfp_prog->error = -EFAULT;
return;
}
+ if (reg.dst_lmextn || reg.src_lmextn) {
+ pr_err("cmd can't use LMextn\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
}
@@ -340,49 +191,10 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
}
static void
-__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
- u8 byte, bool equal, u16 addr, u8 defer)
-{
- u16 addr_lo, addr_hi;
- u64 insn;
-
- addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
- addr_hi = addr != addr_lo;
-
- insn = OP_BBYTE_BASE |
- FIELD_PREP(OP_BB_A_SRC, areg) |
- FIELD_PREP(OP_BB_BYTE, byte) |
- FIELD_PREP(OP_BB_B_SRC, breg) |
- FIELD_PREP(OP_BB_I8, imm8) |
- FIELD_PREP(OP_BB_EQ, equal) |
- FIELD_PREP(OP_BB_DEFBR, defer) |
- FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
- FIELD_PREP(OP_BB_ADDR_HI, addr_hi);
-
- nfp_prog_push(nfp_prog, insn);
-}
-
-static void
-emit_br_byte_neq(struct nfp_prog *nfp_prog,
- u32 dst, u8 imm, u8 byte, u16 addr, u8 defer)
-{
- struct nfp_insn_re_regs reg;
- int err;
-
- err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), &reg, true);
- if (err) {
- nfp_prog->error = err;
- return;
- }
-
- __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
- defer);
-}
-
-static void
__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
enum immed_width width, bool invert,
- enum immed_shift shift, bool wr_both)
+ enum immed_shift shift, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -393,19 +205,21 @@ __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
FIELD_PREP(OP_IMMED_WIDTH, width) |
FIELD_PREP(OP_IMMED_INV, invert) |
FIELD_PREP(OP_IMMED_SHIFT, shift) |
- FIELD_PREP(OP_IMMED_WR_AB, wr_both);
+ FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
+ FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
+emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
enum immed_width width, bool invert, enum immed_shift shift)
{
struct nfp_insn_ur_regs reg;
int err;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) {
+ if (swreg_type(dst) == NN_REG_IMM) {
nfp_prog->error = -EFAULT;
return;
}
@@ -417,13 +231,15 @@ emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
}
__emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
- invert, shift, reg.wr_both);
+ invert, shift, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
enum shf_sc sc, u8 shift,
- u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both)
+ u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -445,14 +261,16 @@ __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
FIELD_PREP(OP_SHF_SHIFT, shift) |
FIELD_PREP(OP_SHF_OP, op) |
FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
- FIELD_PREP(OP_SHF_WR_AB, wr_both);
+ FIELD_PREP(OP_SHF_WR_AB, wr_both) |
+ FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
- enum shf_sc sc, u8 shift)
+emit_shf(struct nfp_prog *nfp_prog, swreg dst,
+ swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
{
struct nfp_insn_re_regs reg;
int err;
@@ -464,12 +282,14 @@ emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
}
__emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
- reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both);
+ reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
- u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both)
+ u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -480,13 +300,16 @@ __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
FIELD_PREP(OP_ALU_SW, swap) |
FIELD_PREP(OP_ALU_OP, op) |
FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
- FIELD_PREP(OP_ALU_WR_AB, wr_both);
+ FIELD_PREP(OP_ALU_WR_AB, wr_both) |
+ FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
+emit_alu(struct nfp_prog *nfp_prog, swreg dst,
+ swreg lreg, enum alu_op op, swreg rreg)
{
struct nfp_insn_ur_regs reg;
int err;
@@ -498,13 +321,15 @@ emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
}
__emit_alu(nfp_prog, reg.dst, reg.dst_ab,
- reg.areg, op, reg.breg, reg.swap, reg.wr_both);
+ reg.areg, op, reg.breg, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
- bool zero, bool swap, bool wr_both)
+ bool zero, bool swap, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -517,33 +342,84 @@ __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
FIELD_PREP(OP_LDF_ZF, zero) |
FIELD_PREP(OP_LDF_BMASK, bmask) |
FIELD_PREP(OP_LDF_SHF, shift) |
- FIELD_PREP(OP_LDF_WR_AB, wr_both);
+ FIELD_PREP(OP_LDF_WR_AB, wr_both) |
+ FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift,
- u32 dst, u8 bmask, u32 src, bool zero)
+emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
+ enum shf_sc sc, u8 shift, bool zero)
{
struct nfp_insn_re_regs reg;
int err;
- err = swreg_to_restricted(reg_none(), dst, src, &reg, true);
+ /* Note: ld_field is special as it uses one of the src regs as dst */
+ err = swreg_to_restricted(dst, dst, src, &reg, true);
if (err) {
nfp_prog->error = err;
return;
}
__emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
- reg.i8, zero, reg.swap, reg.wr_both);
+ reg.i8, zero, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
-emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src,
+emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
enum shf_sc sc, u8 shift)
{
- emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false);
+ emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
+}
+
+static void
+__emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
+ bool dst_lmextn, bool src_lmextn)
+{
+ u64 insn;
+
+ insn = OP_LCSR_BASE |
+ FIELD_PREP(OP_LCSR_A_SRC, areg) |
+ FIELD_PREP(OP_LCSR_B_SRC, breg) |
+ FIELD_PREP(OP_LCSR_WRITE, wr) |
+ FIELD_PREP(OP_LCSR_ADDR, addr) |
+ FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ /* This instruction takes immeds instead of reg_none() for the ignored
+ * operand, but we can't encode 2 immeds in one instr with our normal
+ * swreg infra so if param is an immed, we encode as reg_none() and
+ * copy the immed to both operands.
+ */
+ if (swreg_type(src) == NN_REG_IMM) {
+ err = swreg_to_unrestricted(reg_none(), src, reg_none(), &reg);
+ reg.breg = reg.areg;
+ } else {
+ err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), &reg);
+ }
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4,
+ false, reg.src_lmextn);
+}
+
+static void emit_nop(struct nfp_prog *nfp_prog)
+{
+ __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
}
/* --- Wrappers --- */
@@ -565,7 +441,7 @@ static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
return true;
}
-static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
+static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
{
enum immed_shift shift;
u16 val;
@@ -586,7 +462,7 @@ static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
*/
-static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{
if (FIELD_FIT(UR_REG_IMM_MAX, imm))
return reg_imm(imm);
@@ -599,7 +475,7 @@ static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
*/
-static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{
if (FIELD_FIT(RE_REG_IMM_MAX, imm))
return reg_imm(imm);
@@ -608,6 +484,12 @@ static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
return tmp_reg;
}
+static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
+{
+ while (count--)
+ emit_nop(nfp_prog);
+}
+
static void
wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
enum br_special special)
@@ -618,78 +500,374 @@ wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
FIELD_PREP(OP_BR_SPECIAL, special);
}
+static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
+{
+ emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
+}
+
static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
{
- emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src));
+ wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
}
static int
-construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
- u16 src, bool src_valid, u8 size)
+data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
{
unsigned int i;
u16 shift, sz;
- u32 tmp_reg;
/* We load the value from the address indicated in @offset and then
* shift out the data we don't need. Note: this is big endian!
*/
- sz = size < 4 ? 4 : size;
+ sz = max(size, 4);
shift = size < 4 ? 4 - size : 0;
- if (src_valid) {
- /* Calculate the true offset (src_reg + imm) */
- tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- emit_alu(nfp_prog, imm_both(nfp_prog),
- reg_a(src), ALU_OP_ADD, tmp_reg);
- /* Check packet length (size guaranteed to fit b/c it's u8) */
- emit_alu(nfp_prog, imm_a(nfp_prog),
- imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
- emit_alu(nfp_prog, reg_none(),
- NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog));
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
- /* Load data */
- emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
- pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
- } else {
- /* Check packet length */
- tmp_reg = ur_load_imm_any(nfp_prog, offset + size,
- imm_a(nfp_prog));
- emit_alu(nfp_prog, reg_none(),
- NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg);
- wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
- /* Load data */
- tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
- emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
- pkt_reg(nfp_prog), tmp_reg, sz - 1, true);
- }
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pptr_reg(nfp_prog), offset, sz - 1, true);
i = 0;
if (shift)
- emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE,
+ emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
reg_xfer(0), SHF_SC_R_SHF, shift * 8);
else
for (; i * 4 < size; i++)
- emit_alu(nfp_prog, reg_both(i),
- reg_none(), ALU_OP_NONE, reg_xfer(i));
+ wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
+
+ if (i < 2)
+ wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
+
+ return 0;
+}
+
+static int
+data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
+ u8 dst_gpr, int size)
+{
+ unsigned int i;
+ u8 mask, sz;
+
+ /* We load the value from the address indicated in @offset and then
+ * mask out the data we don't need. Note: this is little endian!
+ */
+ sz = max(size, 4);
+ mask = size < 4 ? GENMASK(size - 1, 0) : 0;
+
+ emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0,
+ reg_a(src_gpr), offset, sz / 4 - 1, true);
+
+ i = 0;
+ if (mask)
+ emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
+ reg_xfer(0), SHF_SC_NONE, 0, true);
+ else
+ for (; i * 4 < size; i++)
+ wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
if (i < 2)
- wrp_immed(nfp_prog, reg_both(1), 0);
+ wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
return 0;
}
+static int
+construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
+{
+ swreg tmp_reg;
+
+ /* Calculate the true offset (src_reg + imm) */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
+
+ /* Check packet length (size guaranteed to fit b/c it's u8) */
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
+ emit_alu(nfp_prog, reg_none(),
+ plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+
+ /* Load data */
+ return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
+}
+
static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
{
- return construct_data_ind_ld(nfp_prog, offset, 0, false, size);
+ swreg tmp_reg;
+
+ /* Check packet length */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
+ emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+
+ /* Load data */
+ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ return data_ld(nfp_prog, tmp_reg, 0, size);
+}
+
+static int
+data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
+ u8 src_gpr, u8 size)
+{
+ unsigned int i;
+
+ for (i = 0; i * 4 < size; i++)
+ wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
+
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(dst_gpr), offset, size - 1, true);
+
+ return 0;
}
-static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
+static int
+data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
+ u64 imm, u8 size)
{
- emit_alu(nfp_prog, NFP_BPF_ABI_MARK,
- reg_none(), ALU_OP_NONE, reg_b(src));
- emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS,
- NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK));
+ wrp_immed(nfp_prog, reg_xfer(0), imm);
+ if (size == 8)
+ wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
+
+ emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+ reg_a(dst_gpr), offset, size - 1, true);
+
+ return 0;
+}
+
+typedef int
+(*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off,
+ unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
+ bool needs_inc);
+
+static int
+wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
+ unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
+ bool needs_inc)
+{
+ bool should_inc = needs_inc && new_gpr && !last;
+ u32 idx, src_byte;
+ enum shf_sc sc;
+ swreg reg;
+ int shf;
+ u8 mask;
+
+ if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4))
+ return -EOPNOTSUPP;
+
+ idx = off / 4;
+
+ /* Move the entire word */
+ if (size == 4) {
+ wrp_mov(nfp_prog, reg_both(dst),
+ should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx));
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
+ return -EOPNOTSUPP;
+
+ src_byte = off % 4;
+
+ mask = (1 << size) - 1;
+ mask <<= dst_byte;
+
+ if (WARN_ON_ONCE(mask > 0xf))
+ return -EOPNOTSUPP;
+
+ shf = abs(src_byte - dst_byte) * 8;
+ if (src_byte == dst_byte) {
+ sc = SHF_SC_NONE;
+ } else if (src_byte < dst_byte) {
+ shf = 32 - shf;
+ sc = SHF_SC_L_SHF;
+ } else {
+ sc = SHF_SC_R_SHF;
+ }
+
+ /* ld_field can address fewer indexes, if offset too large do RMW.
+ * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
+ */
+ if (idx <= RE_REG_LM_IDX_MAX) {
+ reg = reg_lm(lm3 ? 3 : 0, idx);
+ } else {
+ reg = imm_a(nfp_prog);
+ /* If it's not the first part of the load and we start a new GPR
+ * that means we are loading a second part of the LMEM word into
+ * a new GPR. IOW we've already looked that LMEM word and
+ * therefore it has been loaded into imm_a().
+ */
+ if (first || !new_gpr)
+ wrp_mov(nfp_prog, reg, reg_lm(0, idx));
+ }
+
+ emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr);
+
+ if (should_inc)
+ wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
+
+ return 0;
+}
+
+static int
+wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
+ unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
+ bool needs_inc)
+{
+ bool should_inc = needs_inc && new_gpr && !last;
+ u32 idx, dst_byte;
+ enum shf_sc sc;
+ swreg reg;
+ int shf;
+ u8 mask;
+
+ if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4))
+ return -EOPNOTSUPP;
+
+ idx = off / 4;
+
+ /* Move the entire word */
+ if (size == 4) {
+ wrp_mov(nfp_prog,
+ should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx),
+ reg_b(src));
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
+ return -EOPNOTSUPP;
+
+ dst_byte = off % 4;
+
+ mask = (1 << size) - 1;
+ mask <<= dst_byte;
+
+ if (WARN_ON_ONCE(mask > 0xf))
+ return -EOPNOTSUPP;
+
+ shf = abs(src_byte - dst_byte) * 8;
+ if (src_byte == dst_byte) {
+ sc = SHF_SC_NONE;
+ } else if (src_byte < dst_byte) {
+ shf = 32 - shf;
+ sc = SHF_SC_L_SHF;
+ } else {
+ sc = SHF_SC_R_SHF;
+ }
+
+ /* ld_field can address fewer indexes, if offset too large do RMW.
+ * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
+ */
+ if (idx <= RE_REG_LM_IDX_MAX) {
+ reg = reg_lm(lm3 ? 3 : 0, idx);
+ } else {
+ reg = imm_a(nfp_prog);
+ /* Only first and last LMEM locations are going to need RMW,
+ * the middle location will be overwritten fully.
+ */
+ if (first || last)
+ wrp_mov(nfp_prog, reg, reg_lm(0, idx));
+ }
+
+ emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf);
+
+ if (new_gpr || last) {
+ if (idx > RE_REG_LM_IDX_MAX)
+ wrp_mov(nfp_prog, reg_lm(0, idx), reg);
+ if (should_inc)
+ wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
+ }
+
+ return 0;
+}
+
+static int
+mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
+ bool clr_gpr, lmem_step step)
+{
+ s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off;
+ bool first = true, last;
+ bool needs_inc = false;
+ swreg stack_off_reg;
+ u8 prev_gpr = 255;
+ u32 gpr_byte = 0;
+ bool lm3 = true;
+ int ret;
+
+ if (meta->ptr_not_const) {
+ /* Use of the last encountered ptr_off is OK, they all have
+ * the same alignment. Depend on low bits of value being
+ * discarded when written to LMaddr register.
+ */
+ stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off,
+ stack_imm(nfp_prog));
+
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg);
+
+ needs_inc = true;
+ } else if (off + size <= 64) {
+ /* We can reach bottom 64B with LMaddr0 */
+ lm3 = false;
+ } else if (round_down(off, 32) == round_down(off + size - 1, 32)) {
+ /* We have to set up a new pointer. If we know the offset
+ * and the entire access falls into a single 32 byte aligned
+ * window we won't have to increment the LM pointer.
+ * The 32 byte alignment is imporant because offset is ORed in
+ * not added when doing *l$indexN[off].
+ */
+ stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32),
+ stack_imm(nfp_prog));
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
+
+ off %= 32;
+ } else {
+ stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4),
+ stack_imm(nfp_prog));
+
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
+
+ needs_inc = true;
+ }
+ if (lm3) {
+ emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
+ /* For size < 4 one slot will be filled by zeroing of upper. */
+ wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
+ }
+
+ if (clr_gpr && size < 8)
+ wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+
+ while (size) {
+ u32 slice_end;
+ u8 slice_size;
+
+ slice_size = min(size, 4 - gpr_byte);
+ slice_end = min(off + slice_size, round_up(off + 1, 4));
+ slice_size = slice_end - off;
+
+ last = slice_size == size;
+
+ if (needs_inc)
+ off %= 4;
+
+ ret = step(nfp_prog, gpr, gpr_byte, off, slice_size,
+ first, gpr != prev_gpr, last, lm3, needs_inc);
+ if (ret)
+ return ret;
+
+ prev_gpr = gpr;
+ first = false;
+
+ gpr_byte += slice_size;
+ if (gpr_byte >= 4) {
+ gpr_byte -= 4;
+ gpr++;
+ }
+
+ size -= slice_size;
+ off += slice_size;
+ }
return 0;
}
@@ -697,7 +875,7 @@ static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
static void
wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
{
- u32 tmp_reg;
+ swreg tmp_reg;
if (alu_op == ALU_OP_AND) {
if (!imm)
@@ -714,7 +892,7 @@ wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
if (alu_op == ALU_OP_XOR) {
if (!~imm)
emit_alu(nfp_prog, reg_both(dst), reg_none(),
- ALU_OP_NEG, reg_b(dst));
+ ALU_OP_NOT, reg_b(dst));
if (!imm || !~imm)
return;
}
@@ -815,7 +993,7 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
u8 reg = insn->dst_reg * 2;
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -844,7 +1022,10 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
enum br_mask br_mask, bool swap)
{
const struct bpf_insn *insn = &meta->insn;
- u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
+ u8 areg, breg;
+
+ areg = insn->dst_reg * 2;
+ breg = insn->src_reg * 2;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -863,13 +1044,34 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
+static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
+{
+ emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
+ SHF_SC_R_ROT, 8);
+ emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
+ SHF_SC_R_ROT, 16);
+}
+
/* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
-
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1);
+ u8 dst = insn->dst_reg * 2;
+ u8 src = insn->src_reg * 2;
+
+ if (insn->src_reg == BPF_REG_10) {
+ swreg stack_depth_reg;
+
+ stack_depth_reg = ur_load_imm_any(nfp_prog,
+ nfp_prog->stack_depth,
+ stack_imm(nfp_prog));
+ emit_alu(nfp_prog, reg_both(dst),
+ stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ } else {
+ wrp_reg_mov(nfp_prog, dst, src);
+ wrp_reg_mov(nfp_prog, dst + 1, src + 1);
+ }
return 0;
}
@@ -964,28 +1166,64 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
-static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
- if (insn->imm != 32)
- return 1; /* TODO */
-
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0);
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0),
+ ALU_OP_SUB, reg_b(insn->dst_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0),
+ ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1));
return 0;
}
-static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
+
+ if (insn->imm < 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_R_DSHF, 32 - insn->imm);
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_none(), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_L_SHF, insn->imm);
+ } else if (insn->imm == 32) {
+ wrp_reg_mov(nfp_prog, dst + 1, dst);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ } else if (insn->imm > 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_none(), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_L_SHF, insn->imm - 32);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ }
- if (insn->imm != 32)
- return 1; /* TODO */
+ return 0;
+}
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
+
+ if (insn->imm < 32) {
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_R_DSHF, insn->imm);
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_none(), SHF_OP_NONE, reg_b(dst + 1),
+ SHF_SC_R_SHF, insn->imm);
+ } else if (insn->imm == 32) {
+ wrp_reg_mov(nfp_prog, dst, dst + 1);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ } else if (insn->imm > 32) {
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_none(), SHF_OP_NONE, reg_b(dst + 1),
+ SHF_SC_R_SHF, insn->imm - 32);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ }
return 0;
}
@@ -1060,6 +1298,16 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
}
+static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u8 dst = meta->insn.dst_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -1075,21 +1323,59 @@ static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 gpr = insn->dst_reg * 2;
+
+ switch (insn->imm) {
+ case 16:
+ emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
+ SHF_SC_R_ROT, 8);
+ emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
+ SHF_SC_R_SHF, 16);
+
+ wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ break;
+ case 32:
+ wrp_end32(nfp_prog, reg_a(gpr), gpr);
+ wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
+ break;
+ case 64:
+ wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
+
+ wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
+ wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
+ break;
+ }
+
+ return 0;
+}
+
static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
- meta->insn.imm);
+ struct nfp_insn_meta *prev = nfp_meta_prev(meta);
+ u32 imm_lo, imm_hi;
+ u8 dst;
+
+ dst = prev->insn.dst_reg * 2;
+ imm_lo = prev->insn.imm;
+ imm_hi = meta->insn.imm;
+
+ wrp_immed(nfp_prog, reg_both(dst), imm_lo);
+
+ /* mov is always 1 insn, load imm may be two, so try to use mov */
+ if (imm_hi == imm_lo)
+ wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
+ else
+ wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
return 0;
}
static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- const struct bpf_insn *insn = &meta->insn;
-
meta->double_cb = imm_ld8_part2;
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
-
return 0;
}
@@ -1111,82 +1397,235 @@ static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return construct_data_ind_ld(nfp_prog, meta->insn.imm,
- meta->insn.src_reg * 2, true, 1);
+ meta->insn.src_reg * 2, 1);
}
static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return construct_data_ind_ld(nfp_prog, meta->insn.imm,
- meta->insn.src_reg * 2, true, 2);
+ meta->insn.src_reg * 2, 2);
}
static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return construct_data_ind_ld(nfp_prog, meta->insn.imm,
- meta->insn.src_reg * 2, true, 4);
+ meta->insn.src_reg * 2, 4);
}
-static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int
+mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size, unsigned int ptr_off)
{
- if (meta->insn.off == offsetof(struct sk_buff, len))
- emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
- else
+ return mem_op_stack(nfp_prog, meta, size, ptr_off,
+ meta->insn.dst_reg * 2, meta->insn.src_reg * 2,
+ true, wrp_lmem_load);
+}
+
+static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 size)
+{
+ swreg dst = reg_both(meta->insn.dst_reg * 2);
+
+ switch (meta->insn.off) {
+ case offsetof(struct __sk_buff, len):
+ if (size != FIELD_SIZEOF(struct __sk_buff, len))
+ return -EOPNOTSUPP;
+ wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
+ break;
+ case offsetof(struct __sk_buff, data):
+ if (size != FIELD_SIZEOF(struct __sk_buff, data))
+ return -EOPNOTSUPP;
+ wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
+ break;
+ case offsetof(struct __sk_buff, data_end):
+ if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
+ return -EOPNOTSUPP;
+ emit_alu(nfp_prog, dst,
+ plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
+ break;
+ default:
return -EOPNOTSUPP;
+ }
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
return 0;
}
-static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ u8 size)
{
- u32 dst = reg_both(meta->insn.dst_reg * 2);
+ swreg dst = reg_both(meta->insn.dst_reg * 2);
- if (meta->insn.off != offsetof(struct xdp_md, data) &&
- meta->insn.off != offsetof(struct xdp_md, data_end))
+ switch (meta->insn.off) {
+ case offsetof(struct xdp_md, data):
+ if (size != FIELD_SIZEOF(struct xdp_md, data))
+ return -EOPNOTSUPP;
+ wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
+ break;
+ case offsetof(struct xdp_md, data_end):
+ if (size != FIELD_SIZEOF(struct xdp_md, data_end))
+ return -EOPNOTSUPP;
+ emit_alu(nfp_prog, dst,
+ plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
+ break;
+ default:
return -EOPNOTSUPP;
+ }
- emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
- if (meta->insn.off == offsetof(struct xdp_md, data))
- return 0;
+ return 0;
+}
- emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, NFP_BPF_ABI_LEN);
+static int
+mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg tmp_reg;
- return 0;
+ tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg,
+ meta->insn.dst_reg * 2, size);
+}
+
+static int
+mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ if (meta->ptr.type == PTR_TO_CTX) {
+ if (nfp_prog->type == BPF_PROG_TYPE_XDP)
+ return mem_ldx_xdp(nfp_prog, meta, size);
+ else
+ return mem_ldx_skb(nfp_prog, meta, size);
+ }
+
+ if (meta->ptr.type == PTR_TO_PACKET)
+ return mem_ldx_data(nfp_prog, meta, size);
+
+ if (meta->ptr.type == PTR_TO_STACK)
+ return mem_ldx_stack(nfp_prog, meta, size,
+ meta->ptr.off + meta->ptr.var_off.value);
+
+ return -EOPNOTSUPP;
+}
+
+static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_ldx(nfp_prog, meta, 1);
+}
+
+static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_ldx(nfp_prog, meta, 2);
}
static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- int ret;
+ return mem_ldx(nfp_prog, meta, 4);
+}
- if (nfp_prog->act == NN_ACT_XDP)
- ret = mem_ldx4_xdp(nfp_prog, meta);
- else
- ret = mem_ldx4_skb(nfp_prog, meta);
+static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_ldx(nfp_prog, meta, 8);
+}
- wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+static int
+mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ u64 imm = meta->insn.imm; /* sign extend */
+ swreg off_reg;
+
+ off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
- return ret;
+ return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
+ imm, size);
}
-static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
{
- if (meta->insn.off == offsetof(struct sk_buff, mark))
- return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
+ if (meta->ptr.type == PTR_TO_PACKET)
+ return mem_st_data(nfp_prog, meta, size);
return -EOPNOTSUPP;
}
-static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_st(nfp_prog, meta, 1);
+}
+
+static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_st(nfp_prog, meta, 2);
+}
+
+static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_st(nfp_prog, meta, 4);
+}
+
+static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ return mem_st(nfp_prog, meta, 8);
+}
+
+static int
+mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ swreg off_reg;
+
+ off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+
+ return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
+ meta->insn.src_reg * 2, size);
+}
+
+static int
+mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size, unsigned int ptr_off)
+{
+ return mem_op_stack(nfp_prog, meta, size, ptr_off,
+ meta->insn.src_reg * 2, meta->insn.dst_reg * 2,
+ false, wrp_lmem_store);
+}
+
+static int
+mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int size)
+{
+ if (meta->ptr.type == PTR_TO_PACKET)
+ return mem_stx_data(nfp_prog, meta, size);
+
+ if (meta->ptr.type == PTR_TO_STACK)
+ return mem_stx_stack(nfp_prog, meta, size,
+ meta->ptr.off + meta->ptr.var_off.value);
+
return -EOPNOTSUPP;
}
+static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_stx(nfp_prog, meta, 1);
+}
+
+static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_stx(nfp_prog, meta, 2);
+}
+
static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- if (nfp_prog->act == NN_ACT_XDP)
- return mem_stx4_xdp(nfp_prog, meta);
- return mem_stx4_skb(nfp_prog, meta);
+ return mem_stx(nfp_prog, meta, 4);
+}
+
+static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return mem_stx(nfp_prog, meta, 8);
}
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1202,8 +1641,10 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1);
- u32 tmp_reg;
+ swreg or1, or2, tmp_reg;
+
+ or1 = reg_a(insn->dst_reg * 2);
+ or2 = reg_b(insn->dst_reg * 2 + 1);
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1230,29 +1671,29 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
}
static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
}
static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
}
static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true);
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
}
static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1283,7 +1724,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1292,6 +1733,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ return 0;
}
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
@@ -1327,22 +1769,22 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
}
static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
}
static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
}
static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true);
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
}
static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -1375,6 +1817,7 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
[BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
[BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_NEG] = neg_reg64,
[BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
[BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
[BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
@@ -1389,7 +1832,9 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_ADD | BPF_K] = add_imm,
[BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
[BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_NEG] = neg_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_ALU | BPF_END | BPF_X] = end_reg32,
[BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
[BPF_LD | BPF_ABS | BPF_B] = data_ld1,
[BPF_LD | BPF_ABS | BPF_H] = data_ld2,
@@ -1397,8 +1842,18 @@ static const instr_cb_t instr_cb[256] = {
[BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
[BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
[BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
+ [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1,
+ [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2,
[BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
+ [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8,
+ [BPF_STX | BPF_MEM | BPF_B] = mem_stx1,
+ [BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
[BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
+ [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
+ [BPF_ST | BPF_MEM | BPF_B] = mem_st1,
+ [BPF_ST | BPF_MEM | BPF_H] = mem_st2,
+ [BPF_ST | BPF_MEM | BPF_W] = mem_st4,
+ [BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
[BPF_JMP | BPF_JA | BPF_K] = jump,
[BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
[BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
@@ -1510,37 +1965,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
static void nfp_intro(struct nfp_prog *nfp_prog)
{
- emit_alu(nfp_prog, pkt_reg(nfp_prog),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
-}
-
-static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
-{
- const u8 act2code[] = {
- [NN_ACT_TC_DROP] = 0x22,
- [NN_ACT_TC_REDIR] = 0x24
- };
- /* Target for aborts */
- nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
- wrp_immed(nfp_prog, reg_both(0), 0);
-
- /* Target for normal exits */
- nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
- /* Legacy TC mode:
- * 0 0x11 -> pass, count as stat0
- * -1 drop 0x22 -> drop, count as stat1
- * redir 0x24 -> redir, count as stat1
- * ife mark 0x21 -> pass, count as stat1
- * ife + tx 0x24 -> redir, count as stat1
- */
- emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
- emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
-
- emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
- emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
- SHF_SC_L_SHF, 16);
+ wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
}
static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
@@ -1562,8 +1989,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
/* Target for normal exits */
@@ -1572,8 +1998,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
/* if R0 > 7 jump to abort */
emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
wrp_immed(nfp_prog, reg_b(2), 0x41221211);
wrp_immed(nfp_prog, reg_b(3), 0x41001211);
@@ -1610,8 +2035,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
/* Target for normal exits */
@@ -1632,24 +2056,21 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
- emit_alu(nfp_prog, reg_a(0),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
}
static void nfp_outro(struct nfp_prog *nfp_prog)
{
- switch (nfp_prog->act) {
- case NN_ACT_DIRECT:
+ switch (nfp_prog->type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
nfp_outro_tc_da(nfp_prog);
break;
- case NN_ACT_TC_DROP:
- case NN_ACT_TC_REDIR:
- nfp_outro_tc_legacy(nfp_prog);
- break;
- case NN_ACT_XDP:
+ case BPF_PROG_TYPE_XDP:
nfp_outro_xdp(nfp_prog);
break;
+ default:
+ WARN_ON(1);
}
}
@@ -1688,29 +2109,11 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
if (nfp_prog->error)
return nfp_prog->error;
- return nfp_fixup_branches(nfp_prog);
-}
-
-static int
-nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
- unsigned int cnt)
-{
- unsigned int i;
-
- for (i = 0; i < cnt; i++) {
- struct nfp_insn_meta *meta;
-
- meta = kzalloc(sizeof(*meta), GFP_KERNEL);
- if (!meta)
- return -ENOMEM;
-
- meta->insn = prog[i];
- meta->n = i;
-
- list_add_tail(&meta->l, &nfp_prog->insns);
- }
+ wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW);
+ if (nfp_prog->error)
+ return nfp_prog->error;
- return 0;
+ return nfp_fixup_branches(nfp_prog);
}
/* --- Optimizations --- */
@@ -1737,38 +2140,6 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
}
}
-/* Try to rename registers so that program uses only low ones */
-static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog)
-{
- bool reg_used[MAX_BPF_REG] = {};
- u8 tgt_reg[MAX_BPF_REG] = {};
- struct nfp_insn_meta *meta;
- unsigned int i, j;
-
- list_for_each_entry(meta, &nfp_prog->insns, l) {
- if (meta->skip)
- continue;
-
- reg_used[meta->insn.src_reg] = true;
- reg_used[meta->insn.dst_reg] = true;
- }
-
- for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) {
- if (!reg_used[i])
- continue;
-
- tgt_reg[i] = j++;
- }
- nfp_prog->num_regs = j;
-
- list_for_each_entry(meta, &nfp_prog->insns, l) {
- meta->insn.src_reg = tgt_reg[meta->insn.src_reg];
- meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg];
- }
-
- return 0;
-}
-
/* Remove masking after load since our load guarantees this is not needed */
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
@@ -1845,79 +2216,47 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
- int ret;
-
nfp_bpf_opt_reg_init(nfp_prog);
- ret = nfp_bpf_opt_reg_rename(nfp_prog);
- if (ret)
- return ret;
-
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
return 0;
}
-/**
- * nfp_bpf_jit() - translate BPF code into NFP assembly
- * @filter: kernel BPF filter struct
- * @prog_mem: memory to store assembler instructions
- * @act: action attached to this eBPF program
- * @prog_start: offset of the first instruction when loaded
- * @prog_done: where to jump on exit
- * @prog_sz: size of @prog_mem in instructions
- * @res: achieved parameters of translation results
- */
-int
-nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
- enum nfp_bpf_action_type act,
- unsigned int prog_start, unsigned int prog_done,
- unsigned int prog_sz, struct nfp_bpf_result *res)
+static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
{
- struct nfp_prog *nfp_prog;
- int ret;
+ int i;
- nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
- if (!nfp_prog)
- return -ENOMEM;
+ for (i = 0; i < nfp_prog->prog_len; i++) {
+ int err;
- INIT_LIST_HEAD(&nfp_prog->insns);
- nfp_prog->act = act;
- nfp_prog->start_off = prog_start;
- nfp_prog->tgt_done = prog_done;
+ err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+ if (err)
+ return err;
- ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
- if (ret)
- goto out;
+ nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
- ret = nfp_prog_verify(nfp_prog, filter);
- if (ret)
- goto out;
+ ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+ }
- ret = nfp_bpf_optimize(nfp_prog);
- if (ret)
- goto out;
+ return 0;
+}
- if (nfp_prog->num_regs <= 7)
- nfp_prog->regs_per_thread = 16;
- else
- nfp_prog->regs_per_thread = 32;
+int nfp_bpf_jit(struct nfp_prog *nfp_prog)
+{
+ int ret;
- nfp_prog->prog = prog_mem;
- nfp_prog->__prog_alloc_len = prog_sz;
+ ret = nfp_bpf_optimize(nfp_prog);
+ if (ret)
+ return ret;
ret = nfp_translate(nfp_prog);
if (ret) {
pr_err("Translation failed with error %d (translated: %u)\n",
ret, nfp_prog->n_translated);
- ret = -EINVAL;
+ return -EINVAL;
}
- res->n_instr = nfp_prog->prog_len;
- res->dense_mode = nfp_prog->num_regs <= 7;
-out:
- nfp_prog_free(nfp_prog);
-
- return ret;
+ return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index be2cf10a2cd7..e379b78e86ef 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -42,9 +42,11 @@
static bool nfp_net_ebpf_capable(struct nfp_net *nn)
{
+#ifdef __LITTLE_ENDIAN
if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
return true;
+#endif
return false;
}
@@ -52,28 +54,25 @@ static int
nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
- struct tc_cls_bpf_offload cmd = {
- .prog = prog,
- };
+ bool running, xdp_running;
int ret;
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
- if (!nn->dp.bpf_offload_xdp)
- return prog ? -EBUSY : 0;
- cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
- } else {
- if (!prog)
- return 0;
- cmd.command = TC_CLSBPF_ADD;
- }
+ running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+ xdp_running = running && nn->dp.bpf_offload_xdp;
+
+ if (!prog && !xdp_running)
+ return 0;
+ if (prog && running && !xdp_running)
+ return -EBUSY;
- ret = nfp_net_bpf_offload(nn, &cmd);
+ ret = nfp_net_bpf_offload(nn, prog, running);
/* Stop offload if replace not possible */
- if (ret && cmd.command == TC_CLSBPF_REPLACE)
+ if (ret && prog)
nfp_bpf_xdp_offload(app, nn, NULL);
+
nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
}
@@ -83,59 +82,78 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
return nfp_net_ebpf_capable(nn) ? "BPF" : "";
}
-static int
-nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
-{
- struct nfp_net_bpf_priv *priv;
- int ret;
-
- /* Limit to single port, otherwise it's just a NIC */
- if (id > 0) {
- nfp_warn(app->cpp,
- "BPF NIC doesn't support more than one port right now\n");
- nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
- return PTR_ERR_OR_ZERO(nn->port);
- }
-
- priv = kmalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- nn->app_priv = priv;
- spin_lock_init(&priv->rx_filter_lock);
- setup_timer(&priv->rx_filter_stats_timer,
- nfp_net_filter_stats_timer, (unsigned long)nn);
-
- ret = nfp_app_nic_vnic_alloc(app, nn, id);
- if (ret)
- kfree(priv);
-
- return ret;
-}
-
static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
{
if (nn->dp.bpf_offload_xdp)
nfp_bpf_xdp_offload(app, nn, NULL);
- kfree(nn->app_priv);
}
-static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
- enum tc_setup_type type, void *type_data)
+static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
struct tc_cls_bpf_offload *cls_bpf = type_data;
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_net *nn = cb_priv;
- if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) ||
- !is_classid_clsact_ingress(cls_bpf->common.classid) ||
+ if (type != TC_SETUP_CLSBPF ||
+ !tc_can_offload(nn->dp.netdev) ||
+ !nfp_net_ebpf_capable(nn) ||
cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index)
return -EOPNOTSUPP;
-
if (nn->dp.bpf_offload_xdp)
return -EBUSY;
- return nfp_net_bpf_offload(nn, cls_bpf);
+ /* Only support TC direct action */
+ if (!cls_bpf->exts_integrated ||
+ tcf_exts_has_actions(cls_bpf->exts)) {
+ nn_err(nn, "only direct action with no legacy actions supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (cls_bpf->command) {
+ case TC_CLSBPF_REPLACE:
+ return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
+ case TC_CLSBPF_ADD:
+ return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
+ case TC_CLSBPF_DESTROY:
+ return nfp_net_bpf_offload(nn, NULL, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nfp_bpf_setup_tc_block(struct net_device *netdev,
+ struct tc_block_offload *f)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ nfp_bpf_setup_tc_block_cb,
+ nn, nn);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ nfp_bpf_setup_tc_block_cb,
+ nn);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_bpf_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
@@ -149,10 +167,14 @@ const struct nfp_app_type app_bpf = {
.extra_cap = nfp_bpf_extra_cap,
- .vnic_alloc = nfp_bpf_vnic_alloc,
+ .vnic_alloc = nfp_app_nic_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free,
.setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy,
.xdp_offload = nfp_bpf_xdp_offload,
+
+ .bpf_verifier_prep = nfp_bpf_verifier_prep,
+ .bpf_translate = nfp_bpf_translate,
+ .bpf_destroy = nfp_bpf_destroy,
};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 4051e943f363..082a15f6dfb5 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -36,10 +36,11 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/list.h>
#include <linux/types.h>
-#include "../nfp_net.h"
+#include "../nfp_asm.h"
/* For branch fixup logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW!
@@ -53,51 +54,29 @@ enum br_special {
};
enum static_regs {
- STATIC_REG_PKT = 1,
-#define REG_PKT_BANK ALU_DST_A
- STATIC_REG_IMM = 2, /* Bank AB */
+ STATIC_REG_IMM = 21, /* Bank AB */
+ STATIC_REG_STACK = 22, /* Bank A */
+ STATIC_REG_PKT_LEN = 22, /* Bank B */
};
-enum nfp_bpf_action_type {
- NN_ACT_TC_DROP,
- NN_ACT_TC_REDIR,
- NN_ACT_DIRECT,
- NN_ACT_XDP,
+enum pkt_vec {
+ PKT_VEC_PKT_LEN = 0,
+ PKT_VEC_PKT_PTR = 2,
};
-/* Software register representation, hardware encoding in asm.h */
-#define NN_REG_TYPE GENMASK(31, 24)
-#define NN_REG_VAL GENMASK(7, 0)
-
-enum nfp_bpf_reg_type {
- NN_REG_GPR_A = BIT(0),
- NN_REG_GPR_B = BIT(1),
- NN_REG_NNR = BIT(2),
- NN_REG_XFER = BIT(3),
- NN_REG_IMM = BIT(4),
- NN_REG_NONE = BIT(5),
-};
-
-#define NN_REG_GPR_BOTH (NN_REG_GPR_A | NN_REG_GPR_B)
-
-#define reg_both(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_BOTH))
-#define reg_a(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_A))
-#define reg_b(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_B))
-#define reg_nnr(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_NNR))
-#define reg_xfer(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_XFER))
-#define reg_imm(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_IMM))
-#define reg_none() (FIELD_PREP(NN_REG_TYPE, NN_REG_NONE))
+#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
+#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
-#define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT)
-#define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM)
-#define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM)
-#define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM)
+#define stack_reg(np) reg_a(STATIC_REG_STACK)
+#define stack_imm(np) imm_b(np)
+#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
+#define pptr_reg(np) pv_ctm_ptr(np)
+#define imm_a(np) reg_a(STATIC_REG_IMM)
+#define imm_b(np) reg_b(STATIC_REG_IMM)
+#define imm_both(np) reg_both(STATIC_REG_IMM)
-#define NFP_BPF_ABI_FLAGS reg_nnr(0)
+#define NFP_BPF_ABI_FLAGS reg_imm(0)
#define NFP_BPF_ABI_FLAG_MARK 1
-#define NFP_BPF_ABI_MARK reg_nnr(1)
-#define NFP_BPF_ABI_PKT reg_nnr(2)
-#define NFP_BPF_ABI_LEN reg_nnr(3)
struct nfp_prog;
struct nfp_insn_meta;
@@ -113,6 +92,8 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
/**
* struct nfp_insn_meta - BPF instruction wrapper
* @insn: BPF instruction
+ * @ptr: pointer type for memory operations
+ * @ptr_not_const: pointer is not always constant
* @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number
* @skip: skip this instruction (optimized out)
@@ -121,6 +102,8 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
*/
struct nfp_insn_meta {
struct bpf_insn insn;
+ struct bpf_reg_state ptr;
+ bool ptr_not_const;
unsigned int off;
unsigned short n;
bool skip;
@@ -156,15 +139,15 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
- * @act: BPF program/action type (TC DA, TC with action, XDP etc.)
- * @num_regs: number of registers used by this program
- * @regs_per_thread: number of basic registers allocated per thread
+ * @verifier_meta: temporary storage for verifier's insn meta
+ * @type: BPF program type
* @start_off: address of the first instruction in the memory
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
* @tgt_done: jump target to get the next packet
* @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong
+ * @stack_depth: max stack depth from the verifier
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@@ -172,10 +155,9 @@ struct nfp_prog {
unsigned int prog_len;
unsigned int __prog_alloc_len;
- enum nfp_bpf_action_type act;
+ struct nfp_insn_meta *verifier_meta;
- unsigned int num_regs;
- unsigned int regs_per_thread;
+ enum bpf_prog_type type;
unsigned int start_off;
unsigned int tgt_out;
@@ -185,40 +167,26 @@ struct nfp_prog {
unsigned int n_translated;
int error;
- struct list_head insns;
-};
+ unsigned int stack_depth;
-struct nfp_bpf_result {
- unsigned int n_instr;
- bool dense_mode;
+ struct list_head insns;
};
-int
-nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
- unsigned int prog_start, unsigned int prog_done,
- unsigned int prog_sz, struct nfp_bpf_result *res);
+int nfp_bpf_jit(struct nfp_prog *prog);
-int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
+extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+struct netdev_bpf;
+struct nfp_app;
struct nfp_net;
-struct tc_cls_bpf_offload;
-
-/**
- * struct nfp_net_bpf_priv - per-vNIC BPF private data
- * @rx_filter: Filter offload statistics - dropped packets/bytes
- * @rx_filter_prev: Filter offload statistics - values from previous update
- * @rx_filter_change: Jiffies when statistics last changed
- * @rx_filter_stats_timer: Timer for polling filter offload statistics
- * @rx_filter_lock: Lock protecting timer state changes (teardown)
- */
-struct nfp_net_bpf_priv {
- struct nfp_stat_pair rx_filter, rx_filter_prev;
- unsigned long rx_filter_change;
- struct timer_list rx_filter_stats_timer;
- spinlock_t rx_filter_lock;
-};
-int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
-void nfp_net_filter_stats_timer(unsigned long data);
+int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
+ bool old_prog);
+int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
+int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
+int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index a88bb5bc0082..bc879aeb62d4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -51,112 +51,114 @@
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
-void nfp_net_filter_stats_timer(unsigned long data)
+static int
+nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
+ unsigned int cnt)
{
- struct nfp_net *nn = (void *)data;
- struct nfp_net_bpf_priv *priv;
- struct nfp_stat_pair latest;
-
- priv = nn->app_priv;
-
- spin_lock_bh(&priv->rx_filter_lock);
+ unsigned int i;
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
- mod_timer(&priv->rx_filter_stats_timer,
- jiffies + NFP_NET_STAT_POLL_IVL);
+ for (i = 0; i < cnt; i++) {
+ struct nfp_insn_meta *meta;
- spin_unlock_bh(&priv->rx_filter_lock);
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
- latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
- latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+ meta->insn = prog[i];
+ meta->n = i;
- if (latest.pkts != priv->rx_filter.pkts)
- priv->rx_filter_change = jiffies;
+ list_add_tail(&meta->l, &nfp_prog->insns);
+ }
- priv->rx_filter = latest;
+ return 0;
}
-static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
+static void nfp_prog_free(struct nfp_prog *nfp_prog)
{
- struct nfp_net_bpf_priv *priv = nn->app_priv;
+ struct nfp_insn_meta *meta, *tmp;
- priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
- priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
- priv->rx_filter_prev = priv->rx_filter;
- priv->rx_filter_change = jiffies;
+ list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
+ list_del(&meta->l);
+ kfree(meta);
+ }
+ kfree(nfp_prog);
}
-static int
-nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
{
- struct nfp_net_bpf_priv *priv = nn->app_priv;
- u64 bytes, pkts;
+ struct bpf_prog *prog = bpf->verifier.prog;
+ struct nfp_prog *nfp_prog;
+ int ret;
- pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts;
- bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes;
- bytes -= pkts * ETH_HLEN;
+ nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
+ if (!nfp_prog)
+ return -ENOMEM;
+ prog->aux->offload->dev_priv = nfp_prog;
- priv->rx_filter_prev = priv->rx_filter;
+ INIT_LIST_HEAD(&nfp_prog->insns);
+ nfp_prog->type = prog->type;
- tcf_exts_stats_update(cls_bpf->exts,
- bytes, pkts, priv->rx_filter_change);
+ ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
+ if (ret)
+ goto err_free;
- return 0;
-}
+ nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
+ bpf->verifier.ops = &nfp_bpf_analyzer_ops;
-static int
-nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
-{
- const struct tc_action *a;
- LIST_HEAD(actions);
+ return 0;
- if (!cls_bpf->exts)
- return NN_ACT_XDP;
+err_free:
+ nfp_prog_free(nfp_prog);
- /* TC direct action */
- if (cls_bpf->exts_integrated) {
- if (!tcf_exts_has_actions(cls_bpf->exts))
- return NN_ACT_DIRECT;
+ return ret;
+}
+int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
+ unsigned int stack_size;
+ unsigned int max_instr;
+
+ stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
+ if (prog->aux->stack_depth > stack_size) {
+ nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
+ prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
- /* TC legacy mode */
- if (!tcf_exts_has_one_action(cls_bpf->exts))
- return -EOPNOTSUPP;
+ nfp_prog->stack_depth = prog->aux->stack_depth;
+ nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
- tcf_exts_to_list(cls_bpf->exts, &actions);
- list_for_each_entry(a, &actions, list) {
- if (is_tcf_gact_shot(a))
- return NN_ACT_TC_DROP;
+ max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+ nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
- if (is_tcf_mirred_egress_redirect(a) &&
- tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
- return NN_ACT_TC_REDIR;
- }
+ nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
+ if (!nfp_prog->prog)
+ return -ENOMEM;
- return -EOPNOTSUPP;
+ return nfp_bpf_jit(nfp_prog);
}
-static int
-nfp_net_bpf_offload_prepare(struct nfp_net *nn,
- struct tc_cls_bpf_offload *cls_bpf,
- struct nfp_bpf_result *res,
- void **code, dma_addr_t *dma_addr, u16 max_instr)
+int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
{
- unsigned int code_sz = max_instr * sizeof(u64);
- enum nfp_bpf_action_type act;
- u16 start_off, done_off;
- unsigned int max_mtu;
- int ret;
+ struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
- return -EOPNOTSUPP;
+ kfree(nfp_prog->prog);
+ nfp_prog_free(nfp_prog);
- ret = nfp_net_bpf_get_act(nn, cls_bpf);
- if (ret < 0)
- return ret;
- act = ret;
+ return 0;
+}
+
+static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
+{
+ struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
+ unsigned int max_mtu;
+ dma_addr_t dma_addr;
+ int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
@@ -164,134 +166,86 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
return -EOPNOTSUPP;
}
- start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
- done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
-
- *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
- if (!*code)
+ dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
+ nfp_prog->prog_len * sizeof(u64),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(nn->dp.dev, dma_addr))
return -ENOMEM;
- ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
- max_instr, res);
- if (ret)
- goto out;
+ nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
+ nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
- return 0;
+ /* Load up the JITed code */
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
+ if (err)
+ nn_err(nn, "FW command error while loading BPF: %d\n", err);
-out:
- dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
- return ret;
+ dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
+ DMA_TO_DEVICE);
+
+ return err;
}
-static void
-nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
- void *code, dma_addr_t dma_addr,
- unsigned int code_sz, unsigned int n_instr,
- bool dense_mode)
+static void nfp_net_bpf_start(struct nfp_net *nn)
{
- struct nfp_net_bpf_priv *priv = nn->app_priv;
- u64 bpf_addr = dma_addr;
int err;
- nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
-
- if (dense_mode)
- bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
-
- nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
- nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
-
- /* Load up the JITed code */
- err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
- if (err)
- nn_err(nn, "FW command error while loading BPF: %d\n", err);
-
/* Enable passing packets through BPF function */
nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
nn_err(nn, "FW command error while enabling BPF: %d\n", err);
-
- dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
-
- nfp_net_bpf_stats_reset(nn);
- mod_timer(&priv->rx_filter_stats_timer,
- jiffies + NFP_NET_STAT_POLL_IVL);
}
static int nfp_net_bpf_stop(struct nfp_net *nn)
{
- struct nfp_net_bpf_priv *priv = nn->app_priv;
-
if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
return 0;
- spin_lock_bh(&priv->rx_filter_lock);
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
- spin_unlock_bh(&priv->rx_filter_lock);
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
- del_timer_sync(&priv->rx_filter_stats_timer);
- nn->dp.bpf_offload_skip_sw = 0;
-
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
}
-int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
+ bool old_prog)
{
- struct nfp_bpf_result res;
- dma_addr_t dma_addr;
- u16 max_instr;
- void *code;
int err;
- max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+ if (prog) {
+ struct bpf_dev_offload *offload = prog->aux->offload;
- switch (cls_bpf->command) {
- case TC_CLSBPF_REPLACE:
- /* There is nothing stopping us from implementing seamless
- * replace but the simple method of loading I adopted in
- * the firmware does not handle atomic replace (i.e. we have to
- * stop the BPF offload and re-enable it). Leaking-in a few
- * frames which didn't have BPF applied in the hardware should
- * be fine if software fallback is available, though.
- */
- if (nn->dp.bpf_offload_skip_sw)
- return -EBUSY;
-
- err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
- &dma_addr, max_instr);
- if (err)
- return err;
+ if (!offload)
+ return -EINVAL;
+ if (offload->netdev != nn->dp.netdev)
+ return -EINVAL;
+ }
- nfp_net_bpf_stop(nn);
- nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
- dma_addr, max_instr * sizeof(u64),
- res.n_instr, res.dense_mode);
- return 0;
+ if (prog && old_prog) {
+ u8 cap;
- case TC_CLSBPF_ADD:
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+ cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
+ if (!(cap & NFP_NET_BPF_CAP_RELO)) {
+ nn_err(nn, "FW does not support live reload\n");
return -EBUSY;
+ }
+ }
- err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
- &dma_addr, max_instr);
- if (err)
- return err;
-
- nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
- dma_addr, max_instr * sizeof(u64),
- res.n_instr, res.dense_mode);
- return 0;
+ /* Something else is loaded, different program type? */
+ if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+ return -EBUSY;
- case TC_CLSBPF_DESTROY:
+ if (old_prog && !prog)
return nfp_net_bpf_stop(nn);
- case TC_CLSBPF_STATS:
- return nfp_net_bpf_stats_update(nn, cls_bpf);
+ err = nfp_net_bpf_load(nn, prog);
+ if (err)
+ return err;
+
+ if (!old_prog)
+ nfp_net_bpf_start(nn);
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 5b783a91b115..8d43491ddd6b 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -40,12 +40,6 @@
#include "main.h"
-/* Analyzer/verifier definitions */
-struct nfp_bpf_analyzer_priv {
- struct nfp_prog *prog;
- struct nfp_insn_meta *meta;
-};
-
static struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
@@ -76,12 +70,12 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
static int
nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
- const struct bpf_verifier_env *env)
+ struct bpf_verifier_env *env)
{
- const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
+ const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
u64 imm;
- if (nfp_prog->act == NN_ACT_XDP)
+ if (nfp_prog->type == BPF_PROG_TYPE_XDP)
return 0;
if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
@@ -94,13 +88,8 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
}
imm = reg0->var_off.value;
- if (nfp_prog->act != NN_ACT_DIRECT && imm != 0 && (imm & ~0U) != ~0U) {
- pr_info("unsupported exit state: %d, imm: %llx\n",
- reg0->type, imm);
- return -EINVAL;
- }
-
- if (nfp_prog->act == NN_ACT_DIRECT && imm <= TC_ACT_REDIRECT &&
+ if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
+ imm <= TC_ACT_REDIRECT &&
imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
imm != TC_ACT_QUEUED) {
pr_info("unsupported exit state: %d, imm: %llx\n",
@@ -112,29 +101,76 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
}
static int
-nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog,
- const struct bpf_verifier_env *env, u8 reg)
+nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
+ struct nfp_insn_meta *meta,
+ const struct bpf_reg_state *reg)
{
- if (env->cur_state.regs[reg].type != PTR_TO_CTX)
+ s32 old_off, new_off;
+
+ if (!tnum_is_const(reg->var_off)) {
+ pr_info("variable ptr stack access\n");
return -EINVAL;
+ }
- return 0;
+ if (meta->ptr.type == NOT_INIT)
+ return 0;
+
+ old_off = meta->ptr.off + meta->ptr.var_off.value;
+ new_off = reg->off + reg->var_off.value;
+
+ meta->ptr_not_const |= old_off != new_off;
+
+ if (!meta->ptr_not_const)
+ return 0;
+
+ if (old_off % 4 == new_off % 4)
+ return 0;
+
+ pr_info("stack access changed location was:%d is:%d\n",
+ old_off, new_off);
+ return -EINVAL;
}
static int
-nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env, u8 reg_no)
{
- struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
- struct nfp_insn_meta *meta = priv->meta;
+ const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
+ int err;
- meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
- priv->meta = meta;
+ if (reg->type != PTR_TO_CTX &&
+ reg->type != PTR_TO_STACK &&
+ reg->type != PTR_TO_PACKET) {
+ pr_info("unsupported ptr type: %d\n", reg->type);
+ return -EINVAL;
+ }
- if (meta->insn.src_reg == BPF_REG_10 ||
- meta->insn.dst_reg == BPF_REG_10) {
- pr_err("stack not yet supported\n");
+ if (reg->type == PTR_TO_STACK) {
+ err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
+ if (err)
+ return err;
+ }
+
+ if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
+ pr_info("ptr type changed for instruction %d -> %d\n",
+ meta->ptr.type, reg->type);
return -EINVAL;
}
+
+ meta->ptr = *reg;
+
+ return 0;
+}
+
+static int
+nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+{
+ struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
+ struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
+
+ meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
+ nfp_prog->verifier_meta = meta;
+
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
pr_err("program uses extended registers - jit hardening?\n");
@@ -142,37 +178,18 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
}
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
- return nfp_bpf_check_exit(priv->prog, env);
+ return nfp_bpf_check_exit(nfp_prog, env);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
- return nfp_bpf_check_ctx_ptr(priv->prog, env,
- meta->insn.src_reg);
+ return nfp_bpf_check_ptr(nfp_prog, meta, env,
+ meta->insn.src_reg);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
- return nfp_bpf_check_ctx_ptr(priv->prog, env,
- meta->insn.dst_reg);
+ return nfp_bpf_check_ptr(nfp_prog, meta, env,
+ meta->insn.dst_reg);
return 0;
}
-static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
};
-
-int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
-{
- struct nfp_bpf_analyzer_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->prog = nfp_prog;
- priv->meta = nfp_prog_first_meta(nfp_prog);
-
- ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
-
- kfree(priv);
-
- return ret;
-}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 8ea9320014ee..c1c595f8bb87 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -36,7 +36,9 @@
#include <net/switchdev.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_tunnel_key.h>
#include "cmsg.h"
#include "main.h"
@@ -45,13 +47,9 @@
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
- u16 tmp_pop_vlan_op;
- tmp_pop_vlan_op =
- FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
- FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_POP_VLAN);
-
- pop_vlan->a_op = cpu_to_be16(tmp_pop_vlan_op);
+ pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
+ pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
pop_vlan->reserved = 0;
}
@@ -60,64 +58,373 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
const struct tc_action *action)
{
size_t act_size = sizeof(struct nfp_fl_push_vlan);
- struct tcf_vlan *vlan = to_vlan(action);
u16 tmp_push_vlan_tci;
- u16 tmp_push_vlan_op;
-
- tmp_push_vlan_op =
- FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
- FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PUSH_VLAN);
- push_vlan->a_op = cpu_to_be16(tmp_push_vlan_op);
- /* Set action push vlan parameters. */
+ push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
+ push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
push_vlan->reserved = 0;
push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
tmp_push_vlan_tci =
- FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, vlan->tcfv_push_prio) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, vlan->tcfv_push_vid) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
NFP_FL_PUSH_VLAN_CFI;
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
+static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
+ enum nfp_flower_tun_type tun_type)
+{
+ if (!out_dev->rtnl_link_ops)
+ return false;
+
+ if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
+ return tun_type == NFP_FL_TUNNEL_VXLAN;
+
+ return false;
+}
+
static int
nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
struct nfp_fl_payload *nfp_flow, bool last,
- struct net_device *in_dev)
+ struct net_device *in_dev, enum nfp_flower_tun_type tun_type,
+ int *tun_out_cnt)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct net_device *out_dev;
- u16 tmp_output_op;
+ u16 tmp_flags;
int ifindex;
- /* Set action opcode to output action. */
- tmp_output_op =
- FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) |
- FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_OUTPUT);
-
- output->a_op = cpu_to_be16(tmp_output_op);
-
- /* Set action output parameters. */
- output->flags = cpu_to_be16(last ? NFP_FL_OUT_FLAGS_LAST : 0);
+ output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
+ output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
ifindex = tcf_mirred_ifindex(action);
out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
if (!out_dev)
return -EOPNOTSUPP;
- /* Only offload egress ports are on the same device as the ingress
- * port.
+ tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
+
+ if (tun_type) {
+ /* Verify the egress netdev matches the tunnel type. */
+ if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
+ return -EOPNOTSUPP;
+
+ if (*tun_out_cnt)
+ return -EOPNOTSUPP;
+ (*tun_out_cnt)++;
+
+ output->flags = cpu_to_be16(tmp_flags |
+ NFP_FL_OUT_FLAGS_USE_TUN);
+ output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
+ } else {
+ /* Set action output parameters. */
+ output->flags = cpu_to_be16(tmp_flags);
+
+ /* Only offload if egress ports are on the same device as the
+ * ingress port.
+ */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+ if (!nfp_netdev_is_nfp_repr(out_dev))
+ return -EOPNOTSUPP;
+
+ output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
+ if (!output->port)
+ return -EOPNOTSUPP;
+ }
+ nfp_flow->meta.shortcut = output->port;
+
+ return 0;
+}
+
+static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+{
+ struct ip_tunnel_info *tun = tcf_tunnel_info(action);
+
+ return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+}
+
+static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
+{
+ size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
+ struct nfp_fl_pre_tunnel *pre_tun_act;
+
+ /* Pre_tunnel action must be first on action list.
+ * If other actions already exist they need pushed forward.
*/
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ if (act_len)
+ memmove(act_data + act_size, act_data, act_len);
+
+ pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
+
+ memset(pre_tun_act, 0, act_size);
+
+ pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
+ pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ return pre_tun_act;
+}
+
+static int
+nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
+ const struct tc_action *action,
+ struct nfp_fl_pre_tunnel *pre_tun)
+{
+ struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
+ size_t act_size = sizeof(struct nfp_fl_set_vxlan);
+ u32 tmp_set_vxlan_type_index = 0;
+ /* Currently support one pre-tunnel so index is always 0. */
+ int pretun_idx = 0;
+
+ if (vxlan->options_len) {
+ /* Do not support options e.g. vxlan gpe. */
return -EOPNOTSUPP;
- if (!nfp_netdev_is_nfp_repr(out_dev))
+ }
+
+ set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ /* Set tunnel type and pre-tunnel index. */
+ tmp_set_vxlan_type_index |=
+ FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+ FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
+
+ set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
+
+ set_vxlan->tun_id = vxlan->key.tun_id;
+ set_vxlan->tun_flags = vxlan->key.tun_flags;
+ set_vxlan->ipv4_ttl = vxlan->key.ttl;
+ set_vxlan->ipv4_tos = vxlan->key.tos;
+
+ /* Complete pre_tunnel action. */
+ pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+
+ return 0;
+}
+
+static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
+{
+ u32 oldvalue = get_unaligned((u32 *)p_exact);
+ u32 oldmask = get_unaligned((u32 *)p_mask);
+
+ value &= mask;
+ value |= oldvalue & ~mask;
+
+ put_unaligned(oldmask | mask, (u32 *)p_mask);
+ put_unaligned(value, (u32 *)p_exact);
+}
+
+static int
+nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_eth *set_eth)
+{
+ u32 exact, mask;
+
+ if (off + 4 > ETH_ALEN * 2)
return -EOPNOTSUPP;
- output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
- if (!output->port)
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
return -EOPNOTSUPP;
- nfp_flow->meta.shortcut = output->port;
+ nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
+ &set_eth->eth_addr_mask[off]);
+
+ set_eth->reserved = cpu_to_be16(0);
+ set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
+ set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
+static int
+nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ip4_addrs *set_ip_addr)
+{
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ switch (off) {
+ case offsetof(struct iphdr, daddr):
+ set_ip_addr->ipv4_dst_mask = mask;
+ set_ip_addr->ipv4_dst = exact;
+ break;
+ case offsetof(struct iphdr, saddr):
+ set_ip_addr->ipv4_src_mask = mask;
+ set_ip_addr->ipv4_src = exact;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ set_ip_addr->reserved = cpu_to_be16(0);
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
+static void
+nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+ struct nfp_fl_set_ipv6_addr *ip6)
+{
+ ip6->ipv6[idx % 4].mask = mask;
+ ip6->ipv6[idx % 4].exact = exact;
+
+ ip6->reserved = cpu_to_be16(0);
+ ip6->head.jump_id = opcode_tag;
+ ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
+}
+
+static int
+nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ipv6_addr *ip_dst,
+ struct nfp_fl_set_ipv6_addr *ip_src)
+{
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ if (off < offsetof(struct ipv6hdr, saddr))
+ return -EOPNOTSUPP;
+ else if (off < offsetof(struct ipv6hdr, daddr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+ exact, mask, ip_src);
+ else if (off < offsetof(struct ipv6hdr, daddr) +
+ sizeof(struct in6_addr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+ exact, mask, ip_dst);
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_tport *set_tport, int opcode)
+{
+ u32 exact, mask;
+
+ if (off)
+ return -EOPNOTSUPP;
+
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
+ set_tport->tp_port_mask);
+
+ set_tport->reserved = cpu_to_be16(0);
+ set_tport->head.jump_id = opcode;
+ set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
+static int
+nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+{
+ struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+ struct nfp_fl_set_ip4_addrs set_ip_addr;
+ struct nfp_fl_set_tport set_tport;
+ struct nfp_fl_set_eth set_eth;
+ enum pedit_header_type htype;
+ int idx, nkeys, err;
+ size_t act_size;
+ u32 offset, cmd;
+
+ memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
+ memset(&set_ip6_src, 0, sizeof(set_ip6_src));
+ memset(&set_ip_addr, 0, sizeof(set_ip_addr));
+ memset(&set_tport, 0, sizeof(set_tport));
+ memset(&set_eth, 0, sizeof(set_eth));
+ nkeys = tcf_pedit_nkeys(action);
+
+ for (idx = 0; idx < nkeys; idx++) {
+ cmd = tcf_pedit_cmd(action, idx);
+ htype = tcf_pedit_htype(action, idx);
+ offset = tcf_pedit_offset(action, idx);
+
+ if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
+ return -EOPNOTSUPP;
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ err = nfp_fl_set_eth(action, idx, offset, &set_eth);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
+ &set_ip6_src);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (err)
+ return err;
+ }
+
+ if (set_eth.head.len_lw) {
+ act_size = sizeof(set_eth);
+ memcpy(nfp_action, &set_eth, act_size);
+ *a_len += act_size;
+ } else if (set_ip_addr.head.len_lw) {
+ act_size = sizeof(set_ip_addr);
+ memcpy(nfp_action, &set_ip_addr, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+ /* TC compiles set src and dst IPv6 address as a single action,
+ * the hardware requires this to be 2 separate actions.
+ */
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+
+ act_size = sizeof(set_ip6_dst);
+ memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
+ act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.head.len_lw) {
+ act_size = sizeof(set_ip6_dst);
+ memcpy(nfp_action, &set_ip6_dst, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_src.head.len_lw) {
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+ } else if (set_tport.head.len_lw) {
+ act_size = sizeof(set_tport);
+ memcpy(nfp_action, &set_tport, act_size);
+ *a_len += act_size;
+ }
return 0;
}
@@ -125,8 +432,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
static int
nfp_flower_loop_action(const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
- struct net_device *netdev)
+ struct net_device *netdev,
+ enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
{
+ struct nfp_fl_pre_tunnel *pre_tun;
+ struct nfp_fl_set_vxlan *s_vxl;
struct nfp_fl_push_vlan *psh_v;
struct nfp_fl_pop_vlan *pop_v;
struct nfp_fl_output *output;
@@ -139,7 +449,8 @@ nfp_flower_loop_action(const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, true, netdev);
+ err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type,
+ tun_out_cnt);
if (err)
return err;
@@ -149,7 +460,8 @@ nfp_flower_loop_action(const struct tc_action *a,
return -EOPNOTSUPP;
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
- err = nfp_fl_output(output, a, nfp_fl, false, netdev);
+ err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type,
+ tun_out_cnt);
if (err)
return err;
@@ -172,6 +484,32 @@ nfp_flower_loop_action(const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
+ } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+ /* Pre-tunnel action is required for tunnel encap.
+ * This checks for next hop entries on NFP.
+ * If none, the packet falls back before applying other actions.
+ */
+ if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
+ sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+ return -EOPNOTSUPP;
+
+ *tun_type = NFP_FL_TUNNEL_VXLAN;
+ pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+ *a_len += sizeof(struct nfp_fl_pre_tunnel);
+
+ s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
+ err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+ if (err)
+ return err;
+
+ *a_len += sizeof(struct nfp_fl_set_vxlan);
+ } else if (is_tcf_tunnel_release(a)) {
+ /* Tunnel decap is handled by default so accept action. */
+ return 0;
+ } else if (is_tcf_pedit(a)) {
+ if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+ return -EOPNOTSUPP;
} else {
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
@@ -184,18 +522,22 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
- int act_len, act_cnt, err;
+ int act_len, act_cnt, err, tun_out_cnt;
+ enum nfp_flower_tun_type tun_type;
const struct tc_action *a;
LIST_HEAD(actions);
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
nfp_flow->meta.act_len = 0;
+ tun_type = NFP_FL_TUNNEL_NONE;
act_len = 0;
act_cnt = 0;
+ tun_out_cnt = 0;
tcf_exts_to_list(flow->exts, &actions);
list_for_each_entry(a, &actions, list) {
- err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev);
+ err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev,
+ &tun_type, &tun_out_cnt);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index c3ca05d10fe1..e98bb9cdb6a3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -38,17 +38,10 @@
#include <net/dst_metadata.h>
#include "main.h"
-#include "../nfpcore/nfp_cpp.h"
#include "../nfp_net.h"
#include "../nfp_net_repr.h"
#include "./cmsg.h"
-#define nfp_flower_cmsg_warn(app, fmt, args...) \
- do { \
- if (net_ratelimit()) \
- nfp_warn((app)->cpp, fmt, ## args); \
- } while (0)
-
static struct nfp_flower_cmsg_hdr *
nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
{
@@ -57,14 +50,14 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
- enum nfp_flower_cmsg_type_port type)
+ enum nfp_flower_cmsg_type_port type, gfp_t flag)
{
struct nfp_flower_cmsg_hdr *ch;
struct sk_buff *skb;
size += NFP_FLOWER_CMSG_HLEN;
- skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL);
+ skb = nfp_app_ctrl_msg_alloc(app, size, flag);
if (!skb)
return NULL;
@@ -85,7 +78,8 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
unsigned int size;
size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
- skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR);
+ skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR,
+ GFP_KERNEL);
if (!skb)
return NULL;
@@ -116,7 +110,7 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
struct sk_buff *skb;
skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
- NFP_FLOWER_CMSG_TYPE_PORT_MOD);
+ NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -188,6 +182,15 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_FLOW_STATS:
nfp_flower_rx_flow_stats(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
+ nfp_tunnel_request_route(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
+ nfp_tunnel_keep_alive(app, skb);
+ break;
+ case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
+ /* Acks from the NFP that the route is added - ignore. */
+ break;
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index a2ec60344236..66070741d55f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -39,6 +39,7 @@
#include <linux/types.h>
#include "../nfp_app.h"
+#include "../nfpcore/nfp_cpp.h"
#define NFP_FLOWER_LAYER_META BIT(0)
#define NFP_FLOWER_LAYER_PORT BIT(1)
@@ -56,6 +57,11 @@
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
+#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
+#define NFP_FLOWER_MASK_MPLS_TC GENMASK(11, 9)
+#define NFP_FLOWER_MASK_MPLS_BOS BIT(8)
+#define NFP_FLOWER_MASK_MPLS_Q BIT(0)
+
#define NFP_FL_SC_ACT_DROP 0x80000000
#define NFP_FL_SC_ACT_USER 0x7D000000
#define NFP_FL_SC_ACT_POPV 0x6A000000
@@ -67,13 +73,18 @@
#define NFP_FL_LW_SIZ 2
/* Action opcodes */
-#define NFP_FL_ACTION_OPCODE_OUTPUT 0
-#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
-#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
-#define NFP_FL_ACTION_OPCODE_NUM 32
-
-#define NFP_FL_ACT_JMP_ID GENMASK(15, 8)
-#define NFP_FL_ACT_LEN_LW GENMASK(7, 0)
+#define NFP_FL_ACTION_OPCODE_OUTPUT 0
+#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
+#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_UDP 14
+#define NFP_FL_ACTION_OPCODE_SET_TCP 15
+#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
+#define NFP_FL_ACTION_OPCODE_NUM 32
#define NFP_FL_OUT_FLAGS_LAST BIT(15)
#define NFP_FL_OUT_FLAGS_USE_TUN BIT(4)
@@ -83,21 +94,74 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+/* Tunnel ports */
+#define NFP_FL_PORT_TYPE_TUN 0x50000000
+#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
+#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
+
+#define nfp_flower_cmsg_warn(app, fmt, args...) \
+ do { \
+ if (net_ratelimit()) \
+ nfp_warn((app)->cpp, fmt, ## args); \
+ } while (0)
+
+enum nfp_flower_tun_type {
+ NFP_FL_TUNNEL_NONE = 0,
+ NFP_FL_TUNNEL_VXLAN = 2,
+};
+
+struct nfp_fl_act_head {
+ u8 jump_id;
+ u8 len_lw;
+};
+
+struct nfp_fl_set_eth {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ u8 eth_addr_mask[ETH_ALEN * 2];
+ u8 eth_addr_val[ETH_ALEN * 2];
+};
+
+struct nfp_fl_set_ip4_addrs {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 ipv4_src_mask;
+ __be32 ipv4_src;
+ __be32 ipv4_dst_mask;
+ __be32 ipv4_dst;
+};
+
+struct nfp_fl_set_ipv6_addr {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ struct {
+ __be32 mask;
+ __be32 exact;
+ } ipv6[4];
+};
+
+struct nfp_fl_set_tport {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ u8 tp_port_mask[4];
+ u8 tp_port_val[4];
+};
+
struct nfp_fl_output {
- __be16 a_op;
+ struct nfp_fl_act_head head;
__be16 flags;
__be32 port;
};
struct nfp_fl_push_vlan {
- __be16 a_op;
+ struct nfp_fl_act_head head;
__be16 reserved;
__be16 vlan_tpid;
__be16 vlan_tci;
};
struct nfp_fl_pop_vlan {
- __be16 a_op;
+ struct nfp_fl_act_head head;
__be16 reserved;
};
@@ -115,6 +179,25 @@ struct nfp_flower_meta_one {
u16 reserved;
};
+struct nfp_fl_pre_tunnel {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 ipv4_dst;
+ /* reserved for use with IPv6 addresses */
+ __be32 extra[3];
+};
+
+struct nfp_fl_set_vxlan {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be64 tun_id;
+ __be32 tun_type_index;
+ __be16 tun_flags;
+ u8 ipv4_ttl;
+ u8 ipv4_tos;
+ __be32 extra[2];
+} __packed;
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -230,6 +313,36 @@ struct nfp_flower_ipv6 {
struct in6_addr ipv6_dst;
};
+/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+ * -----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_src |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | ipv4_addr_dst |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | tun_flags | tos | ttl |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | gpe_flags | Reserved | Next Protocol |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | VNI | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_vxlan {
+ __be32 ip_src;
+ __be32 ip_dst;
+ __be16 tun_flags;
+ u8 tos;
+ u8 ttl;
+ u8 gpe_flags;
+ u8 reserved[2];
+ u8 nxt_proto;
+ __be32 tun_id;
+};
+
+#define NFP_FL_TUN_VNI_OFFSET 8
+
/* The base header for a control message packet.
* Defines an 8-bit version, and an 8-bit type, padded
* to a 32-bit word. Rest of the packet is type-specific.
@@ -249,6 +362,11 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
+ NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
+ NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
+ NFP_FLOWER_CMSG_TYPE_TUN_NEIGH = 13,
+ NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
@@ -282,6 +400,7 @@ enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT = 0x2,
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT = 0x3,
};
enum nfp_flower_cmsg_port_vnic_type {
@@ -323,6 +442,11 @@ static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
}
+static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
+{
+ return skb->len - NFP_FLOWER_CMSG_HLEN;
+}
+
struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
@@ -334,6 +458,6 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
- enum nfp_flower_cmsg_type_port type);
+ enum nfp_flower_cmsg_type_port type, gfp_t flag);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 91fe03617106..8fcc90c0d2d3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -125,6 +125,21 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
return nfp_flower_cmsg_portmod(repr, false);
}
+static int
+nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
+{
+ return tc_setup_cb_egdev_register(netdev,
+ nfp_flower_setup_tc_egress_cb,
+ netdev_priv(netdev));
+}
+
+static void
+nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
+{
+ tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
+ netdev_priv(netdev));
+}
+
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -142,8 +157,8 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
{
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv;
- struct nfp_reprs *reprs, *old_reprs;
enum nfp_port_type port_type;
+ struct nfp_reprs *reprs;
const u8 queue = 0;
int i, err;
@@ -194,11 +209,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
reprs->reprs[i]->name);
}
- old_reprs = nfp_app_reprs_set(app, repr_type, reprs);
- if (IS_ERR(old_reprs)) {
- err = PTR_ERR(old_reprs);
- goto err_reprs_clean;
- }
+ nfp_app_reprs_set(app, repr_type, reprs);
return 0;
err_reprs_clean:
@@ -222,8 +233,8 @@ static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
- struct nfp_reprs *reprs, *old_reprs;
struct sk_buff *ctrl_skb;
+ struct nfp_reprs *reprs;
unsigned int i;
int err;
@@ -280,11 +291,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
phys_port, reprs->reprs[phys_port]->name);
}
- old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
- if (IS_ERR(old_reprs)) {
- err = PTR_ERR(old_reprs);
- goto err_reprs_clean;
- }
+ nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
/* The MAC_REPR control message should be sent after the MAC
* representors are registered using nfp_app_reprs_set(). This is
@@ -436,6 +443,16 @@ static void nfp_flower_clean(struct nfp_app *app)
app->priv = NULL;
}
+static int nfp_flower_start(struct nfp_app *app)
+{
+ return nfp_tunnel_config_start(app);
+}
+
+static void nfp_flower_stop(struct nfp_app *app)
+{
+ nfp_tunnel_config_stop(app);
+}
+
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
@@ -450,9 +467,15 @@ const struct nfp_app_type app_flower = {
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
+ .repr_init = nfp_flower_repr_netdev_init,
+ .repr_clean = nfp_flower_repr_netdev_clean,
+
.repr_open = nfp_flower_repr_netdev_open,
.repr_stop = nfp_flower_repr_netdev_stop,
+ .start = nfp_flower_start,
+ .stop = nfp_flower_stop,
+
.ctrl_msg_rx = nfp_flower_cmsg_rx,
.sriov_enable = nfp_flower_sriov_enable,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c20dd00a1cae..e6b26c5ae6e0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -52,12 +52,13 @@ struct nfp_app;
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
-#define NFP_FL_META_FLAG_NEW_MASK 128
-#define NFP_FL_META_FLAG_LAST_MASK 1
+#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
+#define NFP_FL_VXLAN_PORT 4789
+
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
struct timespec64 *last_used;
@@ -82,6 +83,18 @@ struct nfp_fl_stats_id {
* @flow_table: Hash table used to store flower rules
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs: List of skbs for control message processing
+ * @nfp_mac_off_list: List of MAC addresses to offload
+ * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
+ * @nfp_ipv4_off_list: List of IPv4 addresses to offload
+ * @nfp_neigh_off_list: List of neighbour offloads
+ * @nfp_mac_off_lock: Lock for the MAC address list
+ * @nfp_mac_index_lock: Lock for the MAC index list
+ * @nfp_ipv4_off_lock: Lock for the IPv4 address list
+ * @nfp_neigh_off_lock: Lock for the neighbour address list
+ * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
+ * @nfp_mac_off_count: Number of MACs in address list
+ * @nfp_tun_mac_nb: Notifier to monitor link state
+ * @nfp_tun_neigh_nb: Notifier to monitor neighbour state
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -94,6 +107,18 @@ struct nfp_flower_priv {
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs;
+ struct list_head nfp_mac_off_list;
+ struct list_head nfp_mac_index_list;
+ struct list_head nfp_ipv4_off_list;
+ struct list_head nfp_neigh_off_list;
+ struct mutex nfp_mac_off_lock;
+ struct mutex nfp_mac_index_lock;
+ struct mutex nfp_ipv4_off_lock;
+ spinlock_t nfp_neigh_off_lock;
+ struct ida nfp_mac_off_ids;
+ int nfp_mac_off_count;
+ struct notifier_block nfp_tun_mac_nb;
+ struct notifier_block nfp_tun_neigh_nb;
};
struct nfp_fl_key_ls {
@@ -126,6 +151,7 @@ struct nfp_fl_payload {
struct rcu_head rcu;
spinlock_t lock; /* lock stats */
struct nfp_fl_stats stats;
+ __be32 nfp_tun_ipv4_addr;
char *unmasked_data;
char *mask_data;
char *action_data;
@@ -163,4 +189,14 @@ nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
+int nfp_tunnel_config_start(struct nfp_app *app);
+void nfp_tunnel_config_stop(struct nfp_app *app);
+void nfp_tunnel_write_macs(struct nfp_app *app);
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
+void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
+void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
+int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index d25b5038c3a2..60614d4f0e22 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -77,14 +77,17 @@ nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
static int
nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
- bool mask_version)
+ bool mask_version, enum nfp_flower_tun_type tun_type)
{
if (mask_version) {
frame->in_port = cpu_to_be32(~0);
return 0;
}
- frame->in_port = cpu_to_be32(cmsg_port);
+ if (tun_type)
+ frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
+ else
+ frame->in_port = cpu_to_be32(cmsg_port);
return 0;
}
@@ -108,8 +111,21 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
ether_addr_copy(frame->mac_src, &addr->src[0]);
}
- if (mask_version)
- frame->mpls_lse = cpu_to_be32(~0);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_dissector_key_mpls *mpls;
+ u32 t_mpls;
+
+ mpls = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_MPLS,
+ target);
+
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+
+ frame->mpls_lse = cpu_to_be32(t_mpls);
+ }
}
static void
@@ -140,7 +156,6 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
struct flow_dissector_key_ipv4_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv4));
if (dissector_uses_key(flow->dissector,
@@ -158,6 +173,16 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
}
static void
@@ -169,7 +194,6 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
struct flow_dissector_key_ipv6_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard LABEL/TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv6));
if (dissector_uses_key(flow->dissector,
@@ -187,6 +211,51 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
+}
+
+static void
+nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
+ struct tc_cls_flower_offload *flow,
+ bool mask_version, __be32 *tun_dst)
+{
+ struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
+ struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+ struct flow_dissector_key_keyid *vni;
+
+ /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
+ memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ u32 temp_vni;
+
+ vni = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ target);
+ temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
+ frame->tun_id = cpu_to_be32(temp_vni);
+ }
+
+ if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ vxlan_ips =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ target);
+ frame->ip_src = vxlan_ips->src;
+ frame->ip_dst = vxlan_ips->dst;
+ *tun_dst = vxlan_ips->dst;
+ }
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
@@ -194,10 +263,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
+ enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
+ __be32 tun_dst, tun_dst_mask = 0;
+ struct nfp_repr *netdev_repr;
int err;
u8 *ext;
u8 *msk;
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
+ tun_type = NFP_FL_TUNNEL_VXLAN;
+
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -216,14 +291,14 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
nfp_repr_get_port_id(netdev),
- false);
+ false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
nfp_repr_get_port_id(netdev),
- true);
+ true, tun_type);
if (err)
return err;
@@ -291,5 +366,28 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv6);
}
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+ /* Populate Exact VXLAN Data. */
+ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
+ flow, false, &tun_dst);
+ /* Populate Mask VXLAN Data. */
+ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
+ flow, true, &tun_dst_mask);
+ ext += sizeof(struct nfp_flower_vxlan);
+ msk += sizeof(struct nfp_flower_vxlan);
+
+ /* Configure tunnel end point MAC. */
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ netdev_repr = netdev_priv(netdev);
+ nfp_tunnel_write_macs(netdev_repr->app);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 3226ddc55f99..db977cf8e933 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -140,7 +140,7 @@ exit_rcu_unlock:
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
{
- unsigned int msg_len = skb->len - NFP_FLOWER_CMSG_HLEN;
+ unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
struct nfp_fl_stats_frame *stats_frame;
unsigned char *msg;
int i;
@@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
id = nfp_add_mask_table(app, mask_data, mask_len);
if (id < 0)
return false;
- *meta_flags |= NFP_FL_META_FLAG_NEW_MASK;
+ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
}
*mask_id = id;
@@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
if (!mask_entry)
return false;
+ if (meta_flags)
+ *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
+
*mask_id = mask_entry->mask_id;
mask_entry->ref_cnt--;
if (!mask_entry->ref_cnt) {
@@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
nfp_release_mask_id(app, *mask_id);
kfree(mask_entry);
if (meta_flags)
- *meta_flags |= NFP_FL_META_FLAG_LAST_MASK;
+ *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
}
return true;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index a18b4d2b1d3e..553f94f55dce 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -52,8 +52,26 @@
BIT(FLOW_DISSECTOR_KEY_PORTS) | \
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_VLAN) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_MPLS) | \
BIT(FLOW_DISSECTOR_KEY_IP))
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+
+#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
+ (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+
static int
nfp_flower_xmit_flow(struct net_device *netdev,
struct nfp_fl_payload *nfp_flow, u8 mtype)
@@ -77,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
- skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype);
+ skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -113,11 +131,11 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
static int
nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
- struct tc_cls_flower_offload *flow)
+ struct tc_cls_flower_offload *flow,
+ bool egress)
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
- struct flow_dissector_key_ip *mask_ip = NULL;
u32 key_layer_two;
u8 key_layer;
int key_size;
@@ -125,15 +143,64 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
return -EOPNOTSUPP;
+ /* If any tun dissector is used then the required set must be used. */
+ if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
+ (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
+ return -EOPNOTSUPP;
+
+ key_layer_two = 0;
+ key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
+ key_size = sizeof(struct nfp_flower_meta_one) +
+ sizeof(struct nfp_flower_in_port) +
+ sizeof(struct nfp_flower_mac_mpls);
+
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
+ struct flow_dissector_key_ports *mask_enc_ports = NULL;
+ struct flow_dissector_key_ports *enc_ports = NULL;
struct flow_dissector_key_control *mask_enc_ctl =
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->mask);
- /* We are expecting a tunnel. For now we ignore offloading. */
- if (mask_enc_ctl->addr_type)
+ struct flow_dissector_key_control *enc_ctl =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ flow->key);
+ if (!egress)
+ return -EOPNOTSUPP;
+
+ if (mask_enc_ctl->addr_type != 0xffff ||
+ enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+ return -EOPNOTSUPP;
+
+ /* These fields are already verified as used. */
+ mask_ipv4 =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+ flow->mask);
+ if (mask_ipv4->dst != cpu_to_be32(~0))
return -EOPNOTSUPP;
+
+ mask_enc_ports =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ flow->mask);
+ enc_ports =
+ skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ flow->key);
+
+ if (mask_enc_ports->dst != cpu_to_be16(~0) ||
+ enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+ return -EOPNOTSUPP;
+
+ key_layer |= NFP_FLOWER_LAYER_VXLAN;
+ key_size += sizeof(struct nfp_flower_vxlan);
+ } else if (egress) {
+ /* Reject non tunnel matches offloaded to egress repr. */
+ return -EOPNOTSUPP;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -146,34 +213,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
flow->key);
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
- mask_ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IP,
- flow->mask);
-
- key_layer_two = 0;
- key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
- key_size = sizeof(struct nfp_flower_meta_one) +
- sizeof(struct nfp_flower_in_port) +
- sizeof(struct nfp_flower_mac_mpls);
-
if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->n_proto) {
case cpu_to_be16(ETH_P_IP):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
break;
case cpu_to_be16(ETH_P_IPV6):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -184,11 +232,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
- /* Currently we do not offload MPLS. */
- case cpu_to_be16(ETH_P_MPLS_UC):
- case cpu_to_be16(ETH_P_MPLS_MC):
- return -EOPNOTSUPP;
-
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@@ -252,6 +295,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
if (!flow_pay->action_data)
goto err_free_mask;
+ flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
spin_lock_init(&flow_pay->lock);
@@ -271,6 +315,7 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
+ * @egress: NFP netdev is the egress.
*
* Adds a new flow to the repeated hash structure and action payload.
*
@@ -278,7 +323,7 @@ err_free_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow)
+ struct tc_cls_flower_offload *flow, bool egress)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
@@ -289,7 +334,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(key_layer, flow);
+ err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
if (err)
goto err_free_key_ls;
@@ -361,6 +406,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_free_flow;
+ if (nfp_flow->nfp_tun_ipv4_addr)
+ nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+
err = nfp_flower_xmit_flow(netdev, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err)
@@ -407,11 +455,15 @@ nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower)
+ struct tc_cls_flower_offload *flower, bool egress)
{
+ if (!eth_proto_is_802_3(flower->common.protocol) ||
+ flower->common.chain_index)
+ return -EOPNOTSUPP;
+
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
- return nfp_flower_add_offload(app, netdev, flower);
+ return nfp_flower_add_offload(app, netdev, flower, egress);
case TC_CLSFLOWER_DESTROY:
return nfp_flower_del_offload(app, netdev, flower);
case TC_CLSFLOWER_STATS:
@@ -421,16 +473,70 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
-int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
- enum tc_setup_type type, void *type_data)
+int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct nfp_repr *repr = cb_priv;
+
+ if (!tc_can_offload(repr->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(repr->app, repr->netdev,
+ type_data, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
- struct tc_cls_flower_offload *cls_flower = type_data;
+ struct nfp_repr *repr = cb_priv;
- if (type != TC_SETUP_CLSFLOWER ||
- !is_classid_clsact_ingress(cls_flower->common.classid) ||
- !eth_proto_is_802_3(cls_flower->common.protocol) ||
- cls_flower->common.chain_index)
+ if (!tc_can_offload(repr->netdev))
return -EOPNOTSUPP;
- return nfp_flower_repr_offload(app, netdev, cls_flower);
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(repr->app, repr->netdev,
+ type_data, false);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nfp_flower_setup_tc_block(struct net_device *netdev,
+ struct tc_block_offload *f)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ nfp_flower_setup_tc_block_cb,
+ repr, repr);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ nfp_flower_setup_tc_block_cb,
+ repr);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_flower_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
new file mode 100644
index 000000000000..b03f22f29612
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -0,0 +1,804 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <net/netevent.h>
+#include <linux/idr.h>
+#include <net/dst_metadata.h>
+#include <net/arp.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_net_repr.h"
+#include "../nfp_net.h"
+
+#define NFP_FL_MAX_ROUTES 32
+
+/**
+ * struct nfp_tun_active_tuns - periodic message of active tunnels
+ * @seq: sequence number of the message
+ * @count: number of tunnels report in message
+ * @flags: options part of the request
+ * @ipv4: dest IPv4 address of active route
+ * @egress_port: port the encapsulated packet egressed
+ * @extra: reserved for future use
+ * @tun_info: tunnels that have sent traffic in reported period
+ */
+struct nfp_tun_active_tuns {
+ __be32 seq;
+ __be32 count;
+ __be32 flags;
+ struct route_ip_info {
+ __be32 ipv4;
+ __be32 egress_port;
+ __be32 extra[2];
+ } tun_info[];
+};
+
+/**
+ * struct nfp_tun_neigh - neighbour/route entry on the NFP
+ * @dst_ipv4: destination IPv4 address
+ * @src_ipv4: source IPv4 address
+ * @dst_addr: destination MAC address
+ * @src_addr: source MAC address
+ * @port_id: NFP port to output packet on - associated with source IPv4
+ */
+struct nfp_tun_neigh {
+ __be32 dst_ipv4;
+ __be32 src_ipv4;
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 port_id;
+};
+
+/**
+ * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
+ * @ingress_port: ingress port of packet that signalled request
+ * @ipv4_addr: destination ipv4 address for route
+ * @reserved: reserved for future use
+ */
+struct nfp_tun_req_route_ipv4 {
+ __be32 ingress_port;
+ __be32 ipv4_addr;
+ __be32 reserved[2];
+};
+
+/**
+ * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP
+ * @ipv4_addr: destination of route
+ * @list: list pointer
+ */
+struct nfp_ipv4_route_entry {
+ __be32 ipv4_addr;
+ struct list_head list;
+};
+
+#define NFP_FL_IPV4_ADDRS_MAX 32
+
+/**
+ * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
+ * @count: number of IPs populated in the array
+ * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses
+ */
+struct nfp_tun_ipv4_addr {
+ __be32 count;
+ __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
+};
+
+/**
+ * struct nfp_ipv4_addr_entry - cached IPv4 addresses
+ * @ipv4_addr: IP address
+ * @ref_count: number of rules currently using this IP
+ * @list: list pointer
+ */
+struct nfp_ipv4_addr_entry {
+ __be32 ipv4_addr;
+ int ref_count;
+ struct list_head list;
+};
+
+/**
+ * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
+ * @reserved: reserved for future use
+ * @count: number of MAC addresses in the message
+ * @index: index of MAC address in the lookup table
+ * @addr: interface MAC address
+ * @addresses: series of MACs to offload
+ */
+struct nfp_tun_mac_addr {
+ __be16 reserved;
+ __be16 count;
+ struct index_mac_addr {
+ __be16 index;
+ u8 addr[ETH_ALEN];
+ } addresses[];
+};
+
+/**
+ * struct nfp_tun_mac_offload_entry - list of MACs to offload
+ * @index: index of MAC address for offloading
+ * @addr: interface MAC address
+ * @list: list pointer
+ */
+struct nfp_tun_mac_offload_entry {
+ __be16 index;
+ u8 addr[ETH_ALEN];
+ struct list_head list;
+};
+
+#define NFP_MAX_MAC_INDEX 0xff
+
+/**
+ * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
+ * @ifindex: netdev ifindex of the device
+ * @index: index of netdevs mac on NFP
+ * @list: list pointer
+ */
+struct nfp_tun_mac_non_nfp_idx {
+ int ifindex;
+ u8 index;
+ struct list_head list;
+};
+
+void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_active_tuns *payload;
+ struct net_device *netdev;
+ int count, i, pay_len;
+ struct neighbour *n;
+ __be32 ipv4_addr;
+ u32 port;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+ count = be32_to_cpu(payload->count);
+ if (count > NFP_FL_MAX_ROUTES) {
+ nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n");
+ return;
+ }
+
+ pay_len = nfp_flower_cmsg_get_data_len(skb);
+ if (pay_len != sizeof(struct nfp_tun_active_tuns) +
+ sizeof(struct route_ip_info) * count) {
+ nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n");
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ ipv4_addr = payload->tun_info[i].ipv4;
+ port = be32_to_cpu(payload->tun_info[i].egress_port);
+ netdev = nfp_app_repr_get(app, port);
+ if (!netdev)
+ continue;
+
+ n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev);
+ if (!n)
+ continue;
+
+ /* Update the used timestamp of neighbour */
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+}
+
+static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
+{
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan"))
+ return true;
+
+ return false;
+}
+
+static int
+nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
+ gfp_t flag)
+{
+ struct sk_buff *skb;
+ unsigned char *msg;
+
+ skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag);
+ if (!skb)
+ return -ENOMEM;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb));
+
+ nfp_ctrl_tx(app->ctrl, skb);
+ return 0;
+}
+
+static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ spin_lock_bh(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ return true;
+ }
+ }
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ return false;
+}
+
+static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ spin_lock_bh(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ return;
+ }
+ }
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
+ return;
+ }
+
+ entry->ipv4_addr = ipv4_addr;
+ list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+}
+
+static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *entry;
+ struct list_head *ptr, *storage;
+
+ spin_lock_bh(&priv->nfp_neigh_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
+ if (entry->ipv4_addr == ipv4_addr) {
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ spin_unlock_bh(&priv->nfp_neigh_off_lock);
+}
+
+static void
+nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
+ struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
+{
+ struct nfp_tun_neigh payload;
+
+ /* Only offload representor IPv4s for now. */
+ if (!nfp_netdev_is_nfp_repr(netdev))
+ return;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_neigh));
+ payload.dst_ipv4 = flow->daddr;
+
+ /* If entry has expired send dst IP with all other fields 0. */
+ if (!(neigh->nud_state & NUD_VALID)) {
+ nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
+ /* Trigger ARP to verify invalid neighbour state. */
+ neigh_event_send(neigh, NULL);
+ goto send_msg;
+ }
+
+ /* Have a valid neighbour so populate rest of entry. */
+ payload.src_ipv4 = flow->saddr;
+ ether_addr_copy(payload.src_addr, netdev->dev_addr);
+ neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
+ payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
+ /* Add destination of new route to NFP cache. */
+ nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
+
+send_msg:
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH,
+ sizeof(struct nfp_tun_neigh),
+ (unsigned char *)&payload, flag);
+}
+
+static int
+nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct nfp_flower_priv *app_priv;
+ struct netevent_redirect *redir;
+ struct flowi4 flow = {};
+ struct neighbour *n;
+ struct nfp_app *app;
+ struct rtable *rt;
+ int err;
+
+ switch (event) {
+ case NETEVENT_REDIRECT:
+ redir = (struct netevent_redirect *)ptr;
+ n = redir->neigh;
+ break;
+ case NETEVENT_NEIGH_UPDATE:
+ n = (struct neighbour *)ptr;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ flow.daddr = *(__be32 *)n->primary_key;
+
+ /* Only concerned with route changes for representors. */
+ if (!nfp_netdev_is_nfp_repr(n->dev))
+ return NOTIFY_DONE;
+
+ app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
+ app = app_priv->app;
+
+ /* Only concerned with changes to routes already added to NFP. */
+ if (!nfp_tun_has_route(app, flow.daddr))
+ return NOTIFY_DONE;
+
+#if IS_ENABLED(CONFIG_INET)
+ /* Do a route lookup to populate flow data. */
+ rt = ip_route_output_key(dev_net(n->dev), &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ return NOTIFY_DONE;
+#else
+ return NOTIFY_DONE;
+#endif
+
+ flow.flowi4_proto = IPPROTO_UDP;
+ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC);
+
+ return NOTIFY_OK;
+}
+
+void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_tun_req_route_ipv4 *payload;
+ struct net_device *netdev;
+ struct flowi4 flow = {};
+ struct neighbour *n;
+ struct rtable *rt;
+ int err;
+
+ payload = nfp_flower_cmsg_get_data(skb);
+
+ netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
+ if (!netdev)
+ goto route_fail_warning;
+
+ flow.daddr = payload->ipv4_addr;
+ flow.flowi4_proto = IPPROTO_UDP;
+
+#if IS_ENABLED(CONFIG_INET)
+ /* Do a route lookup on same namespace as ingress port. */
+ rt = ip_route_output_key(dev_net(netdev), &flow);
+ err = PTR_ERR_OR_ZERO(rt);
+ if (err)
+ goto route_fail_warning;
+#else
+ goto route_fail_warning;
+#endif
+
+ /* Get the neighbour entry for the lookup */
+ n = dst_neigh_lookup(&rt->dst, &flow.daddr);
+ ip_rt_put(rt);
+ if (!n)
+ goto route_fail_warning;
+ nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL);
+ neigh_release(n);
+ return;
+
+route_fail_warning:
+ nfp_flower_cmsg_warn(app, "Requested route not found.\n");
+}
+
+static void nfp_tun_write_ipv4_list(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct nfp_tun_ipv4_addr payload;
+ struct list_head *ptr, *storage;
+ int count;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ count = 0;
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ if (count >= NFP_FL_IPV4_ADDRS_MAX) {
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
+ return;
+ }
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ payload.ipv4_addr[count++] = entry->ipv4_addr;
+ }
+ payload.count = cpu_to_be32(count);
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
+ sizeof(struct nfp_tun_ipv4_addr),
+ &payload, GFP_KERNEL);
+}
+
+void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count++;
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ return;
+ }
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+ nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
+ return;
+ }
+ entry->ipv4_addr = ipv4;
+ entry->ref_count = 1;
+ list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_ipv4_off_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count--;
+ if (!entry->ref_count) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&priv->nfp_ipv4_off_lock);
+
+ nfp_tun_write_ipv4_list(app);
+}
+
+void nfp_tunnel_write_macs(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_offload_entry *entry;
+ struct nfp_tun_mac_addr *payload;
+ struct list_head *ptr, *storage;
+ int mac_count, err, pay_size;
+
+ mutex_lock(&priv->nfp_mac_off_lock);
+ if (!priv->nfp_mac_off_count) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ return;
+ }
+
+ pay_size = sizeof(struct nfp_tun_mac_addr) +
+ sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
+
+ payload = kzalloc(pay_size, GFP_KERNEL);
+ if (!payload) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ return;
+ }
+
+ payload->count = cpu_to_be16(priv->nfp_mac_off_count);
+
+ mac_count = 0;
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ payload->addresses[mac_count].index = entry->index;
+ ether_addr_copy(payload->addresses[mac_count].addr,
+ entry->addr);
+ mac_count++;
+ }
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
+ pay_size, payload, GFP_KERNEL);
+
+ kfree(payload);
+
+ if (err) {
+ mutex_unlock(&priv->nfp_mac_off_lock);
+ /* Write failed so retain list for future retry. */
+ return;
+ }
+
+ /* If list was successfully offloaded, flush it. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
+ priv->nfp_mac_off_count = 0;
+ mutex_unlock(&priv->nfp_mac_off_lock);
+}
+
+static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_non_nfp_idx *entry;
+ struct list_head *ptr, *storage;
+ int idx;
+
+ mutex_lock(&priv->nfp_mac_index_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
+ if (entry->ifindex == ifindex) {
+ idx = entry->index;
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return idx;
+ }
+ }
+
+ idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (idx < 0) {
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return idx;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ mutex_unlock(&priv->nfp_mac_index_lock);
+ return -ENOMEM;
+ }
+ entry->ifindex = ifindex;
+ entry->index = idx;
+ list_add_tail(&entry->list, &priv->nfp_mac_index_list);
+ mutex_unlock(&priv->nfp_mac_index_lock);
+
+ return idx;
+}
+
+static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_non_nfp_idx *entry;
+ struct list_head *ptr, *storage;
+
+ mutex_lock(&priv->nfp_mac_index_lock);
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
+ if (entry->ifindex == ifindex) {
+ ida_simple_remove(&priv->nfp_mac_off_ids,
+ entry->index);
+ list_del(&entry->list);
+ kfree(entry);
+ break;
+ }
+ }
+ mutex_unlock(&priv->nfp_mac_index_lock);
+}
+
+static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
+ struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_mac_offload_entry *entry;
+ u16 nfp_mac_idx;
+ int port = 0;
+
+ /* Check if MAC should be offloaded. */
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ return;
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_repr_get_port_id(netdev);
+ else if (!nfp_tun_is_netdev_to_offload(netdev))
+ return;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
+ return;
+ }
+
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
+ nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
+ } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
+ port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
+ nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
+ } else {
+ /* Must assign our own unique 8-bit index. */
+ int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
+
+ if (idx < 0) {
+ nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
+ kfree(entry);
+ return;
+ }
+ nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
+ }
+
+ entry->index = cpu_to_be16(nfp_mac_idx);
+ ether_addr_copy(entry->addr, netdev->dev_addr);
+
+ mutex_lock(&priv->nfp_mac_off_lock);
+ priv->nfp_mac_off_count++;
+ list_add_tail(&entry->list, &priv->nfp_mac_off_list);
+ mutex_unlock(&priv->nfp_mac_off_lock);
+}
+
+static int nfp_tun_mac_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct nfp_flower_priv *app_priv;
+ struct net_device *netdev;
+ struct nfp_app *app;
+
+ if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
+ app_priv = container_of(nb, struct nfp_flower_priv,
+ nfp_tun_mac_nb);
+ app = app_priv->app;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ /* If non-nfp netdev then free its offload index. */
+ if (nfp_tun_is_netdev_to_offload(netdev))
+ nfp_tun_del_mac_idx(app, netdev->ifindex);
+ } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
+ event == NETDEV_REGISTER) {
+ app_priv = container_of(nb, struct nfp_flower_priv,
+ nfp_tun_mac_nb);
+ app = app_priv->app;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ nfp_tun_add_to_mac_offload_list(netdev, app);
+
+ /* Force a list write to keep NFP up to date. */
+ nfp_tunnel_write_macs(app);
+ }
+ return NOTIFY_OK;
+}
+
+int nfp_tunnel_config_start(struct nfp_app *app)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct net_device *netdev;
+ int err;
+
+ /* Initialise priv data for MAC offloading. */
+ priv->nfp_mac_off_count = 0;
+ mutex_init(&priv->nfp_mac_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_mac_off_list);
+ priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
+ mutex_init(&priv->nfp_mac_index_lock);
+ INIT_LIST_HEAD(&priv->nfp_mac_index_list);
+ ida_init(&priv->nfp_mac_off_ids);
+
+ /* Initialise priv data for IPv4 offloading. */
+ mutex_init(&priv->nfp_ipv4_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
+
+ /* Initialise priv data for neighbour offloading. */
+ spin_lock_init(&priv->nfp_neigh_off_lock);
+ INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
+ priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
+
+ err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
+ if (err)
+ goto err_free_mac_ida;
+
+ err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
+ if (err)
+ goto err_unreg_mac_nb;
+
+ /* Parse netdevs already registered for MACs that need offloaded. */
+ rtnl_lock();
+ for_each_netdev(&init_net, netdev)
+ nfp_tun_add_to_mac_offload_list(netdev, app);
+ rtnl_unlock();
+
+ return 0;
+
+err_unreg_mac_nb:
+ unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
+err_free_mac_ida:
+ ida_destroy(&priv->nfp_mac_off_ids);
+ return err;
+}
+
+void nfp_tunnel_config_stop(struct nfp_app *app)
+{
+ struct nfp_tun_mac_offload_entry *mac_entry;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_ipv4_route_entry *route_entry;
+ struct nfp_tun_mac_non_nfp_idx *mac_idx;
+ struct nfp_ipv4_addr_entry *ip_entry;
+ struct list_head *ptr, *storage;
+
+ unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
+ unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
+
+ /* Free any memory that may be occupied by MAC list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
+ mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
+ list);
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+
+ /* Free any memory that may be occupied by MAC index list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
+ mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
+ list);
+ list_del(&mac_idx->list);
+ kfree(mac_idx);
+ }
+
+ ida_destroy(&priv->nfp_mac_off_ids);
+
+ /* Free any memory that may be occupied by ipv4 list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
+ ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
+ list_del(&ip_entry->list);
+ kfree(ip_entry);
+ }
+
+ /* Free any memory that may be occupied by the route list. */
+ list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
+ route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
+ list);
+ list_del(&route_entry->list);
+ kfree(route_entry);
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 82c290763529..955a9f44d244 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/bug.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
@@ -42,10 +43,14 @@
#include "nfp_net_repr.h"
static const struct nfp_app_type *apps[] = {
- &app_nic,
- &app_bpf,
+ [NFP_APP_CORE_NIC] = &app_nic,
+#ifdef CONFIG_BPF_SYSCALL
+ [NFP_APP_BPF_NIC] = &app_bpf,
+#else
+ [NFP_APP_BPF_NIC] = &app_nic,
+#endif
#ifdef CONFIG_NFP_APP_FLOWER
- &app_flower,
+ [NFP_APP_FLOWER_NIC] = &app_flower,
#endif
};
@@ -101,31 +106,21 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
old = rcu_dereference_protected(app->reprs[type],
lockdep_is_held(&app->pf->lock));
- if (reprs && old) {
- old = ERR_PTR(-EBUSY);
- goto exit_unlock;
- }
-
rcu_assign_pointer(app->reprs[type], reprs);
-exit_unlock:
return old;
}
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(apps); i++)
- if (apps[i]->id == id)
- break;
- if (i == ARRAY_SIZE(apps)) {
+ if (id >= ARRAY_SIZE(apps) || !apps[id]) {
nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id);
return ERR_PTR(-EINVAL);
}
- if (WARN_ON(!apps[i]->name || !apps[i]->vnic_alloc))
+ if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc))
return ERR_PTR(-EINVAL);
app = kzalloc(sizeof(*app), GFP_KERNEL);
@@ -135,7 +130,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
app->pf = pf;
app->cpp = pf->cpp;
app->pdev = pf->pdev;
- app->type = apps[i];
+ app->type = apps[id];
return app;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index af640b5c2108..0e5e0305ad1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -36,10 +36,13 @@
#include <net/devlink.h>
+#include <trace/events/devlink.h>
+
#include "nfp_net_repr.h"
struct bpf_prog;
struct net_device;
+struct netdev_bpf;
struct pci_dev;
struct sk_buff;
struct sk_buff;
@@ -73,6 +76,8 @@ extern const struct nfp_app_type app_flower;
* @vnic_free: free up app's vNIC state
* @vnic_init: vNIC netdev was registered
* @vnic_clean: vNIC netdev about to be unregistered
+ * @repr_init: representor about to be registered
+ * @repr_clean: representor about to be unregistered
* @repr_open: representor netdev open callback
* @repr_stop: representor netdev stop callback
* @start: start application logic
@@ -81,6 +86,9 @@ extern const struct nfp_app_type app_flower;
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
* @xdp_offload: offload an XDP program
+ * @bpf_verifier_prep: verifier prep for dev-specific BPF programs
+ * @bpf_translate: translate call for dev-specific BPF programs
+ * @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
@@ -103,6 +111,9 @@ struct nfp_app_type {
int (*vnic_init)(struct nfp_app *app, struct nfp_net *nn);
void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
+ int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
+ void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
+
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
@@ -116,6 +127,12 @@ struct nfp_app_type {
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
+ int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf);
+ int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
+ int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
@@ -200,6 +217,21 @@ static inline int nfp_app_repr_stop(struct nfp_app *app, struct nfp_repr *repr)
return app->type->repr_stop(app, repr);
}
+static inline int
+nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
+{
+ if (!app->type->repr_init)
+ return 0;
+ return app->type->repr_init(app, netdev);
+}
+
+static inline void
+nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
+{
+ if (app->type->repr_clean)
+ app->type->repr_clean(app, netdev);
+}
+
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{
app->ctrl = ctrl;
@@ -269,13 +301,46 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
return app->type->xdp_offload(app, nn, prog);
}
+static inline int
+nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+ struct netdev_bpf *bpf)
+{
+ if (!app || !app->type->bpf_verifier_prep)
+ return -EOPNOTSUPP;
+ return app->type->bpf_verifier_prep(app, nn, bpf);
+}
+
+static inline int
+nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ if (!app || !app->type->bpf_translate)
+ return -EOPNOTSUPP;
+ return app->type->bpf_translate(app, nn, prog);
+}
+
+static inline int
+nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
+ struct bpf_prog *prog)
+{
+ if (!app || !app->type->bpf_destroy)
+ return -EOPNOTSUPP;
+ return app->type->bpf_destroy(app, nn, prog);
+}
+
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+ skb->data, skb->len);
+
return nfp_ctrl_tx(app->ctrl, skb);
}
static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
{
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0,
+ skb->data, skb->len);
+
app->type->ctrl_msg_rx(app, skb);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
new file mode 100644
index 000000000000..830f6de25f47
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "nfp_asm.h"
+
+const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
+ [CMD_TGT_WRITE8_SWAP] = { 0x02, 0x42 },
+ [CMD_TGT_READ8] = { 0x01, 0x43 },
+ [CMD_TGT_READ32] = { 0x00, 0x5c },
+ [CMD_TGT_READ32_LE] = { 0x01, 0x5c },
+ [CMD_TGT_READ32_SWAP] = { 0x02, 0x5c },
+ [CMD_TGT_READ_LE] = { 0x01, 0x40 },
+ [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
+};
+
+static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
+{
+ bool lm_id, lm_dec = false;
+ u16 val = swreg_value(reg);
+
+ switch (swreg_type(reg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_NNR:
+ return UR_REG_NN | val;
+ case NN_REG_XFER:
+ return UR_REG_XFR | val;
+ case NN_REG_LMEM:
+ lm_id = swreg_lm_idx(reg);
+
+ switch (swreg_lm_mode(reg)) {
+ case NN_LM_MOD_NONE:
+ if (val & ~UR_REG_LM_IDX_MAX) {
+ pr_err("LM offset too large\n");
+ return 0;
+ }
+ return UR_REG_LM | FIELD_PREP(UR_REG_LM_IDX, lm_id) |
+ val;
+ case NN_LM_MOD_DEC:
+ lm_dec = true;
+ /* fall through */
+ case NN_LM_MOD_INC:
+ if (val) {
+ pr_err("LM offset in inc/dev mode\n");
+ return 0;
+ }
+ return UR_REG_LM | UR_REG_LM_POST_MOD |
+ FIELD_PREP(UR_REG_LM_IDX, lm_id) |
+ FIELD_PREP(UR_REG_LM_POST_MOD_DEC, lm_dec);
+ default:
+ pr_err("bad LM mode for unrestricted operands %d\n",
+ swreg_lm_mode(reg));
+ return 0;
+ }
+ case NN_REG_IMM:
+ if (val & ~0xff) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ return UR_REG_IMM_encode(val);
+ case NN_REG_NONE:
+ return is_dst ? UR_REG_NO_DST : REG_NONE;
+ }
+
+ pr_err("unrecognized reg encoding %08x\n", reg);
+ return 0;
+}
+
+int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_ur_regs *reg)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (swreg_type(dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (swreg_type(dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (swreg_type(dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_unreg(dst, true);
+
+ /* Decode source operands */
+ if (swreg_type(lreg) == swreg_type(rreg))
+ return -EFAULT;
+
+ if (swreg_type(lreg) == NN_REG_GPR_B ||
+ swreg_type(rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_unreg(rreg, false);
+ reg->breg = nfp_swreg_to_unreg(lreg, false);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_unreg(lreg, false);
+ reg->breg = nfp_swreg_to_unreg(rreg, false);
+ }
+
+ reg->dst_lmextn = swreg_lmextn(dst);
+ reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+
+ return 0;
+}
+
+static u16 nfp_swreg_to_rereg(swreg reg, bool is_dst, bool has_imm8, bool *i8)
+{
+ u16 val = swreg_value(reg);
+ bool lm_id;
+
+ switch (swreg_type(reg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_XFER:
+ return RE_REG_XFR | val;
+ case NN_REG_LMEM:
+ lm_id = swreg_lm_idx(reg);
+
+ if (swreg_lm_mode(reg) != NN_LM_MOD_NONE) {
+ pr_err("bad LM mode for restricted operands %d\n",
+ swreg_lm_mode(reg));
+ return 0;
+ }
+
+ if (val & ~RE_REG_LM_IDX_MAX) {
+ pr_err("LM offset too large\n");
+ return 0;
+ }
+
+ return RE_REG_LM | FIELD_PREP(RE_REG_LM_IDX, lm_id) | val;
+ case NN_REG_IMM:
+ if (val & ~(0x7f | has_imm8 << 7)) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ *i8 = val & 0x80;
+ return RE_REG_IMM_encode(val & 0x7f);
+ case NN_REG_NONE:
+ return is_dst ? RE_REG_NO_DST : REG_NONE;
+ case NN_REG_NNR:
+ pr_err("NNRs used with restricted encoding\n");
+ return 0;
+ }
+
+ pr_err("unrecognized reg encoding\n");
+ return 0;
+}
+
+int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_re_regs *reg, bool has_imm8)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (swreg_type(dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (swreg_type(dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (swreg_type(dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
+
+ /* Decode source operands */
+ if (swreg_type(lreg) == swreg_type(rreg))
+ return -EFAULT;
+
+ if (swreg_type(lreg) == NN_REG_GPR_B ||
+ swreg_type(rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ }
+
+ reg->dst_lmextn = swreg_lmextn(dst);
+ reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+
+ return 0;
+}
+
+#define NFP_USTORE_ECC_POLY_WORDS 7
+#define NFP_USTORE_OP_BITS 45
+
+static const u64 nfp_ustore_ecc_polynomials[NFP_USTORE_ECC_POLY_WORDS] = {
+ 0x0ff800007fffULL,
+ 0x11f801ff801fULL,
+ 0x1e387e0781e1ULL,
+ 0x17cb8e388e22ULL,
+ 0x1af5b2c93244ULL,
+ 0x1f56d5525488ULL,
+ 0x0daf69a46910ULL,
+};
+
+static bool parity(u64 value)
+{
+ return hweight64(value) & 1;
+}
+
+int nfp_ustore_check_valid_no_ecc(u64 insn)
+{
+ if (insn & ~GENMASK_ULL(NFP_USTORE_OP_BITS, 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 nfp_ustore_calc_ecc_insn(u64 insn)
+{
+ u8 ecc = 0;
+ int i;
+
+ for (i = 0; i < NFP_USTORE_ECC_POLY_WORDS; i++)
+ ecc |= parity(nfp_ustore_ecc_polynomials[i] & insn) << i;
+
+ return insn | (u64)ecc << NFP_USTORE_OP_BITS;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index d2b535739d2b..74d0c11ab2f9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -34,6 +34,8 @@
#ifndef __NFP_ASM_H__
#define __NFP_ASM_H__ 1
+#include <linux/bitfield.h>
+#include <linux/bug.h>
#include <linux/types.h>
#define REG_NONE 0
@@ -43,23 +45,31 @@
#define RE_REG_IMM_encode(x) \
(RE_REG_IMM | ((x) & 0x1f) | (((x) & 0x60) << 1))
#define RE_REG_IMM_MAX 0x07fULL
+#define RE_REG_LM 0x050
+#define RE_REG_LM_IDX 0x008
+#define RE_REG_LM_IDX_MAX 0x7
#define RE_REG_XFR 0x080
#define UR_REG_XFR 0x180
+#define UR_REG_LM 0x200
+#define UR_REG_LM_IDX 0x020
+#define UR_REG_LM_POST_MOD 0x010
+#define UR_REG_LM_POST_MOD_DEC 0x001
+#define UR_REG_LM_IDX_MAX 0xf
#define UR_REG_NN 0x280
#define UR_REG_NO_DST 0x300
#define UR_REG_IMM UR_REG_NO_DST
#define UR_REG_IMM_encode(x) (UR_REG_IMM | (x))
#define UR_REG_IMM_MAX 0x0ffULL
-#define OP_BR_BASE 0x0d800000020ULL
-#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
-#define OP_BR_MASK 0x0000000001fULL
-#define OP_BR_EV_PIP 0x00000000300ULL
-#define OP_BR_CSS 0x0000003c000ULL
-#define OP_BR_DEFBR 0x00000300000ULL
-#define OP_BR_ADDR_LO 0x007ffc00000ULL
-#define OP_BR_ADDR_HI 0x10000000000ULL
+#define OP_BR_BASE 0x0d800000020ULL
+#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
+#define OP_BR_MASK 0x0000000001fULL
+#define OP_BR_EV_PIP 0x00000000300ULL
+#define OP_BR_CSS 0x0000003c000ULL
+#define OP_BR_DEFBR 0x00000300000ULL
+#define OP_BR_ADDR_LO 0x007ffc00000ULL
+#define OP_BR_ADDR_HI 0x10000000000ULL
#define nfp_is_br(_insn) \
(((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE)
@@ -82,30 +92,33 @@ enum br_ctx_signal_state {
BR_CSS_NONE = 2,
};
-#define OP_BBYTE_BASE 0x0c800000000ULL
-#define OP_BB_A_SRC 0x000000000ffULL
-#define OP_BB_BYTE 0x00000000300ULL
-#define OP_BB_B_SRC 0x0000003fc00ULL
-#define OP_BB_I8 0x00000040000ULL
-#define OP_BB_EQ 0x00000080000ULL
-#define OP_BB_DEFBR 0x00000300000ULL
-#define OP_BB_ADDR_LO 0x007ffc00000ULL
-#define OP_BB_ADDR_HI 0x10000000000ULL
-
-#define OP_BALU_BASE 0x0e800000000ULL
-#define OP_BA_A_SRC 0x000000003ffULL
-#define OP_BA_B_SRC 0x000000ffc00ULL
-#define OP_BA_DEFBR 0x00000300000ULL
-#define OP_BA_ADDR_HI 0x0007fc00000ULL
-
-#define OP_IMMED_A_SRC 0x000000003ffULL
-#define OP_IMMED_B_SRC 0x000000ffc00ULL
-#define OP_IMMED_IMM 0x0000ff00000ULL
-#define OP_IMMED_WIDTH 0x00060000000ULL
-#define OP_IMMED_INV 0x00080000000ULL
-#define OP_IMMED_SHIFT 0x00600000000ULL
-#define OP_IMMED_BASE 0x0f000000000ULL
-#define OP_IMMED_WR_AB 0x20000000000ULL
+#define OP_BBYTE_BASE 0x0c800000000ULL
+#define OP_BB_A_SRC 0x000000000ffULL
+#define OP_BB_BYTE 0x00000000300ULL
+#define OP_BB_B_SRC 0x0000003fc00ULL
+#define OP_BB_I8 0x00000040000ULL
+#define OP_BB_EQ 0x00000080000ULL
+#define OP_BB_DEFBR 0x00000300000ULL
+#define OP_BB_ADDR_LO 0x007ffc00000ULL
+#define OP_BB_ADDR_HI 0x10000000000ULL
+#define OP_BB_SRC_LMEXTN 0x40000000000ULL
+
+#define OP_BALU_BASE 0x0e800000000ULL
+#define OP_BA_A_SRC 0x000000003ffULL
+#define OP_BA_B_SRC 0x000000ffc00ULL
+#define OP_BA_DEFBR 0x00000300000ULL
+#define OP_BA_ADDR_HI 0x0007fc00000ULL
+
+#define OP_IMMED_A_SRC 0x000000003ffULL
+#define OP_IMMED_B_SRC 0x000000ffc00ULL
+#define OP_IMMED_IMM 0x0000ff00000ULL
+#define OP_IMMED_WIDTH 0x00060000000ULL
+#define OP_IMMED_INV 0x00080000000ULL
+#define OP_IMMED_SHIFT 0x00600000000ULL
+#define OP_IMMED_BASE 0x0f000000000ULL
+#define OP_IMMED_WR_AB 0x20000000000ULL
+#define OP_IMMED_SRC_LMEXTN 0x40000000000ULL
+#define OP_IMMED_DST_LMEXTN 0x80000000000ULL
enum immed_width {
IMMED_WIDTH_ALL = 0,
@@ -119,17 +132,19 @@ enum immed_shift {
IMMED_SHIFT_2B = 2,
};
-#define OP_SHF_BASE 0x08000000000ULL
-#define OP_SHF_A_SRC 0x000000000ffULL
-#define OP_SHF_SC 0x00000000300ULL
-#define OP_SHF_B_SRC 0x0000003fc00ULL
-#define OP_SHF_I8 0x00000040000ULL
-#define OP_SHF_SW 0x00000080000ULL
-#define OP_SHF_DST 0x0000ff00000ULL
-#define OP_SHF_SHIFT 0x001f0000000ULL
-#define OP_SHF_OP 0x00e00000000ULL
-#define OP_SHF_DST_AB 0x01000000000ULL
-#define OP_SHF_WR_AB 0x20000000000ULL
+#define OP_SHF_BASE 0x08000000000ULL
+#define OP_SHF_A_SRC 0x000000000ffULL
+#define OP_SHF_SC 0x00000000300ULL
+#define OP_SHF_B_SRC 0x0000003fc00ULL
+#define OP_SHF_I8 0x00000040000ULL
+#define OP_SHF_SW 0x00000080000ULL
+#define OP_SHF_DST 0x0000ff00000ULL
+#define OP_SHF_SHIFT 0x001f0000000ULL
+#define OP_SHF_OP 0x00e00000000ULL
+#define OP_SHF_DST_AB 0x01000000000ULL
+#define OP_SHF_WR_AB 0x20000000000ULL
+#define OP_SHF_SRC_LMEXTN 0x40000000000ULL
+#define OP_SHF_DST_LMEXTN 0x80000000000ULL
enum shf_op {
SHF_OP_NONE = 0,
@@ -139,24 +154,27 @@ enum shf_op {
enum shf_sc {
SHF_SC_R_ROT = 0,
+ SHF_SC_NONE = SHF_SC_R_ROT,
SHF_SC_R_SHF = 1,
SHF_SC_L_SHF = 2,
SHF_SC_R_DSHF = 3,
};
-#define OP_ALU_A_SRC 0x000000003ffULL
-#define OP_ALU_B_SRC 0x000000ffc00ULL
-#define OP_ALU_DST 0x0003ff00000ULL
-#define OP_ALU_SW 0x00040000000ULL
-#define OP_ALU_OP 0x00f80000000ULL
-#define OP_ALU_DST_AB 0x01000000000ULL
-#define OP_ALU_BASE 0x0a000000000ULL
-#define OP_ALU_WR_AB 0x20000000000ULL
+#define OP_ALU_A_SRC 0x000000003ffULL
+#define OP_ALU_B_SRC 0x000000ffc00ULL
+#define OP_ALU_DST 0x0003ff00000ULL
+#define OP_ALU_SW 0x00040000000ULL
+#define OP_ALU_OP 0x00f80000000ULL
+#define OP_ALU_DST_AB 0x01000000000ULL
+#define OP_ALU_BASE 0x0a000000000ULL
+#define OP_ALU_WR_AB 0x20000000000ULL
+#define OP_ALU_SRC_LMEXTN 0x40000000000ULL
+#define OP_ALU_DST_LMEXTN 0x80000000000ULL
enum alu_op {
ALU_OP_NONE = 0x00,
ALU_OP_ADD = 0x01,
- ALU_OP_NEG = 0x04,
+ ALU_OP_NOT = 0x04,
ALU_OP_AND = 0x08,
ALU_OP_SUB_C = 0x0d,
ALU_OP_ADD_C = 0x11,
@@ -170,26 +188,28 @@ enum alu_dst_ab {
ALU_DST_B = 1,
};
-#define OP_LDF_BASE 0x0c000000000ULL
-#define OP_LDF_A_SRC 0x000000000ffULL
-#define OP_LDF_SC 0x00000000300ULL
-#define OP_LDF_B_SRC 0x0000003fc00ULL
-#define OP_LDF_I8 0x00000040000ULL
-#define OP_LDF_SW 0x00000080000ULL
-#define OP_LDF_ZF 0x00000100000ULL
-#define OP_LDF_BMASK 0x0000f000000ULL
-#define OP_LDF_SHF 0x001f0000000ULL
-#define OP_LDF_WR_AB 0x20000000000ULL
-
-#define OP_CMD_A_SRC 0x000000000ffULL
-#define OP_CMD_CTX 0x00000000300ULL
-#define OP_CMD_B_SRC 0x0000003fc00ULL
-#define OP_CMD_TOKEN 0x000000c0000ULL
-#define OP_CMD_XFER 0x00001f00000ULL
-#define OP_CMD_CNT 0x0000e000000ULL
-#define OP_CMD_SIG 0x000f0000000ULL
-#define OP_CMD_TGT_CMD 0x07f00000000ULL
-#define OP_CMD_MODE 0x1c0000000000ULL
+#define OP_LDF_BASE 0x0c000000000ULL
+#define OP_LDF_A_SRC 0x000000000ffULL
+#define OP_LDF_SC 0x00000000300ULL
+#define OP_LDF_B_SRC 0x0000003fc00ULL
+#define OP_LDF_I8 0x00000040000ULL
+#define OP_LDF_SW 0x00000080000ULL
+#define OP_LDF_ZF 0x00000100000ULL
+#define OP_LDF_BMASK 0x0000f000000ULL
+#define OP_LDF_SHF 0x001f0000000ULL
+#define OP_LDF_WR_AB 0x20000000000ULL
+#define OP_LDF_SRC_LMEXTN 0x40000000000ULL
+#define OP_LDF_DST_LMEXTN 0x80000000000ULL
+
+#define OP_CMD_A_SRC 0x000000000ffULL
+#define OP_CMD_CTX 0x00000000300ULL
+#define OP_CMD_B_SRC 0x0000003fc00ULL
+#define OP_CMD_TOKEN 0x000000c0000ULL
+#define OP_CMD_XFER 0x00001f00000ULL
+#define OP_CMD_CNT 0x0000e000000ULL
+#define OP_CMD_SIG 0x000f0000000ULL
+#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_MODE 0x1c0000000000ULL
struct cmd_tgt_act {
u8 token;
@@ -198,12 +218,17 @@ struct cmd_tgt_act {
enum cmd_tgt_map {
CMD_TGT_READ8,
- CMD_TGT_WRITE8,
+ CMD_TGT_WRITE8_SWAP,
+ CMD_TGT_READ32,
+ CMD_TGT_READ32_LE,
+ CMD_TGT_READ32_SWAP,
CMD_TGT_READ_LE,
CMD_TGT_READ_SWAP_LE,
__CMD_TGT_MAP_SIZE,
};
+extern const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE];
+
enum cmd_mode {
CMD_MODE_40b_AB = 0,
CMD_MODE_40b_BA = 1,
@@ -215,11 +240,13 @@ enum cmd_ctx_swap {
CMD_CTX_NO_SWAP = 3,
};
-#define OP_LCSR_BASE 0x0fc00000000ULL
-#define OP_LCSR_A_SRC 0x000000003ffULL
-#define OP_LCSR_B_SRC 0x000000ffc00ULL
-#define OP_LCSR_WRITE 0x00000200000ULL
-#define OP_LCSR_ADDR 0x001ffc00000ULL
+#define OP_LCSR_BASE 0x0fc00000000ULL
+#define OP_LCSR_A_SRC 0x000000003ffULL
+#define OP_LCSR_B_SRC 0x000000ffc00ULL
+#define OP_LCSR_WRITE 0x00000200000ULL
+#define OP_LCSR_ADDR 0x001ffc00000ULL
+#define OP_LCSR_SRC_LMEXTN 0x40000000000ULL
+#define OP_LCSR_DST_LMEXTN 0x80000000000ULL
enum lcsr_wr_src {
LCSR_WR_AREG,
@@ -227,7 +254,127 @@ enum lcsr_wr_src {
LCSR_WR_IMM,
};
-#define OP_CARB_BASE 0x0e000000000ULL
-#define OP_CARB_OR 0x00000010000ULL
+#define OP_CARB_BASE 0x0e000000000ULL
+#define OP_CARB_OR 0x00000010000ULL
+
+#define NFP_CSR_ACT_LM_ADDR0 0x64
+#define NFP_CSR_ACT_LM_ADDR1 0x6c
+#define NFP_CSR_ACT_LM_ADDR2 0x94
+#define NFP_CSR_ACT_LM_ADDR3 0x9c
+
+/* Software register representation, independent of operand type */
+#define NN_REG_TYPE GENMASK(31, 24)
+#define NN_REG_LM_IDX GENMASK(23, 22)
+#define NN_REG_LM_IDX_HI BIT(23)
+#define NN_REG_LM_IDX_LO BIT(22)
+#define NN_REG_LM_MOD GENMASK(21, 20)
+#define NN_REG_VAL GENMASK(7, 0)
+
+enum nfp_bpf_reg_type {
+ NN_REG_GPR_A = BIT(0),
+ NN_REG_GPR_B = BIT(1),
+ NN_REG_GPR_BOTH = NN_REG_GPR_A | NN_REG_GPR_B,
+ NN_REG_NNR = BIT(2),
+ NN_REG_XFER = BIT(3),
+ NN_REG_IMM = BIT(4),
+ NN_REG_NONE = BIT(5),
+ NN_REG_LMEM = BIT(6),
+};
+
+enum nfp_bpf_lm_mode {
+ NN_LM_MOD_NONE = 0,
+ NN_LM_MOD_INC,
+ NN_LM_MOD_DEC,
+};
+
+#define reg_both(x) __enc_swreg((x), NN_REG_GPR_BOTH)
+#define reg_a(x) __enc_swreg((x), NN_REG_GPR_A)
+#define reg_b(x) __enc_swreg((x), NN_REG_GPR_B)
+#define reg_nnr(x) __enc_swreg((x), NN_REG_NNR)
+#define reg_xfer(x) __enc_swreg((x), NN_REG_XFER)
+#define reg_imm(x) __enc_swreg((x), NN_REG_IMM)
+#define reg_none() __enc_swreg(0, NN_REG_NONE)
+#define reg_lm(x, off) __enc_swreg_lm((x), NN_LM_MOD_NONE, (off))
+#define reg_lm_inc(x) __enc_swreg_lm((x), NN_LM_MOD_INC, 0)
+#define reg_lm_dec(x) __enc_swreg_lm((x), NN_LM_MOD_DEC, 0)
+#define __reg_lm(x, mod, off) __enc_swreg_lm((x), (mod), (off))
+
+typedef __u32 __bitwise swreg;
+
+static inline swreg __enc_swreg(u16 id, u8 type)
+{
+ return (__force swreg)(id | FIELD_PREP(NN_REG_TYPE, type));
+}
+
+static inline swreg __enc_swreg_lm(u8 id, enum nfp_bpf_lm_mode mode, u8 off)
+{
+ WARN_ON(id > 3 || (off && mode != NN_LM_MOD_NONE));
+
+ return (__force swreg)(FIELD_PREP(NN_REG_TYPE, NN_REG_LMEM) |
+ FIELD_PREP(NN_REG_LM_IDX, id) |
+ FIELD_PREP(NN_REG_LM_MOD, mode) |
+ off);
+}
+
+static inline u32 swreg_raw(swreg reg)
+{
+ return (__force u32)reg;
+}
+
+static inline enum nfp_bpf_reg_type swreg_type(swreg reg)
+{
+ return FIELD_GET(NN_REG_TYPE, swreg_raw(reg));
+}
+
+static inline u16 swreg_value(swreg reg)
+{
+ return FIELD_GET(NN_REG_VAL, swreg_raw(reg));
+}
+
+static inline bool swreg_lm_idx(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_IDX_LO, swreg_raw(reg));
+}
+
+static inline bool swreg_lmextn(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_IDX_HI, swreg_raw(reg));
+}
+
+static inline enum nfp_bpf_lm_mode swreg_lm_mode(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_MOD, swreg_raw(reg));
+}
+
+struct nfp_insn_ur_regs {
+ enum alu_dst_ab dst_ab;
+ u16 dst;
+ u16 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool dst_lmextn;
+ bool src_lmextn;
+};
+
+struct nfp_insn_re_regs {
+ enum alu_dst_ab dst_ab;
+ u8 dst;
+ u8 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool i8;
+ bool dst_lmextn;
+ bool src_lmextn;
+};
+
+int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_ur_regs *reg);
+int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_re_regs *reg, bool has_imm8);
+
+#define NFP_USTORE_PREFETCH_WINDOW 8
+
+int nfp_ustore_check_valid_no_ecc(u64 insn);
+u64 nfp_ustore_calc_ecc_insn(u64 insn);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index f8fa63b66739..35eaccbece36 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -346,6 +346,32 @@ exit_release_fw:
return err < 0 ? err : 1;
}
+static void
+nfp_nsp_init_ports(struct pci_dev *pdev, struct nfp_pf *pf,
+ struct nfp_nsp *nsp)
+{
+ bool needs_reinit = false;
+ int i;
+
+ pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+ if (!pf->eth_tbl)
+ return;
+
+ if (!nfp_nsp_has_mac_reinit(nsp))
+ return;
+
+ for (i = 0; i < pf->eth_tbl->count; i++)
+ needs_reinit |= pf->eth_tbl->ports[i].override_changed;
+ if (!needs_reinit)
+ return;
+
+ kfree(pf->eth_tbl);
+ if (nfp_nsp_mac_reinit(nsp))
+ dev_warn(&pdev->dev, "MAC reinit failed\n");
+
+ pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+}
+
static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
{
struct nfp_nsp *nsp;
@@ -366,7 +392,7 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf)
if (err < 0)
goto exit_close_nsp;
- pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+ nfp_nsp_init_ports(pdev, pf, nsp);
pf->nspi = __nfp_nsp_identify(nsp);
if (pf->nspi)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index d51d8237b984..7f9857c276b1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -394,6 +394,7 @@ struct nfp_net_rx_ring {
* @tx_lso: Counter of LSO packets sent
* @tx_errors: How many TX errors were encountered
* @tx_busy: How often was TX busy (no space)?
+ * @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures
* @irq_vector: Interrupt vector number (use for talking to the OS)
* @handler: Interrupt handler for this ring vector
* @name: Name of the interrupt vector
@@ -437,6 +438,8 @@ struct nfp_net_r_vector {
u64 hw_csum_tx_inner;
u64 tx_gather;
u64 tx_lso;
+
+ u64 rx_replace_buf_alloc_fail;
u64 tx_errors;
u64 tx_busy;
@@ -473,7 +476,6 @@ struct nfp_stat_pair {
* @dev: Backpointer to struct device
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
- * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP
* @chained_metadata_format: Firemware will use new metadata format
* @rx_dma_dir: Mapping direction for RX buffers
@@ -499,7 +501,6 @@ struct nfp_net_dp {
struct net_device *netdev;
u8 is_vf:1;
- u8 bpf_offload_skip_sw:1;
u8 bpf_offload_xdp:1;
u8 chained_metadata_format:1;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e118b5f23996..1a603fdd9e80 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -177,9 +177,9 @@ static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
return timed_out ? -EIO : 0;
}
-static void nfp_net_reconfig_timer(unsigned long data)
+static void nfp_net_reconfig_timer(struct timer_list *t)
{
- struct nfp_net *nn = (void *)data;
+ struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
spin_lock_bh(&nn->reconfig_lock);
@@ -1185,7 +1185,7 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
} else {
struct page *page;
- page = alloc_page(GFP_KERNEL | __GFP_COLD);
+ page = alloc_page(GFP_KERNEL);
frag = page ? page_address(page) : NULL;
}
if (!frag) {
@@ -1209,15 +1209,15 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
if (!dp->xdp_prog) {
frag = napi_alloc_frag(dp->fl_bufsz);
+ if (unlikely(!frag))
+ return NULL;
} else {
struct page *page;
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- frag = page ? page_address(page) : NULL;
- }
- if (!frag) {
- nn_dp_warn(dp, "Failed to alloc receive page frag\n");
- return NULL;
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return NULL;
+ frag = page_address(page);
}
*dma_addr = nfp_net_dma_map_rx(dp, frag);
@@ -1514,6 +1514,11 @@ nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
{
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_drops++;
+ /* If we have both skb and rxbuf the replacement buffer allocation
+ * must have failed, count this as an alloc failure.
+ */
+ if (skb && rxbuf)
+ r_vec->rx_replace_buf_alloc_fail++;
u64_stats_update_end(&r_vec->rx_sync);
/* skb is build based on the frag, free_skb() would free the frag
@@ -1582,26 +1587,6 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
return true;
}
-static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
- unsigned int *off, unsigned int *len)
-{
- struct xdp_buff xdp;
- void *orig_data;
- int ret;
-
- xdp.data_hard_start = hard_start;
- xdp.data = data + *off;
- xdp.data_end = data + *off + *len;
-
- orig_data = xdp.data;
- ret = bpf_prog_run_xdp(prog, &xdp);
-
- *len -= xdp.data - orig_data;
- *off += xdp.data - orig_data;
-
- return ret;
-}
-
/**
* nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from
@@ -1637,6 +1622,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_meta_parsed meta;
struct net_device *netdev;
dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
void *new_frag;
idx = D_IDX(rx_ring, rx_ring->rd_p);
@@ -1715,16 +1701,24 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
dp->bpf_offload_xdp) && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
- void *hard_start;
+ struct xdp_buff xdp;
int act;
- hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+ xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+ xdp.data = orig_data;
+ xdp.data_meta = orig_data;
+ xdp.data_end = orig_data + pkt_len;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len -= xdp.data - orig_data;
+ pkt_off += xdp.data - orig_data;
- act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
- &pkt_off, &pkt_len);
switch (act) {
case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
break;
case XDP_TX:
dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
@@ -1792,6 +1786,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxd->rxd.vlan));
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
napi_gro_receive(&rx_ring->r_vec->napi, skb);
}
@@ -3382,7 +3378,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
return 0;
}
-static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
+static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
{
struct nfp_net *nn = netdev_priv(netdev);
@@ -3397,6 +3393,14 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
xdp->prog_attached = XDP_ATTACHED_HW;
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
return 0;
+ case BPF_OFFLOAD_VERIFIER_PREP:
+ return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
+ case BPF_OFFLOAD_TRANSLATE:
+ return nfp_app_bpf_translate(nn->app, nn,
+ xdp->offload.prog);
+ case BPF_OFFLOAD_DESTROY:
+ return nfp_app_bpf_destroy(nn->app, nn,
+ xdp->offload.prog);
default:
return -EINVAL;
}
@@ -3445,7 +3449,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_get_phys_port_name = nfp_port_get_phys_port_name,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
- .ndo_xdp = nfp_net_xdp,
+ .ndo_bpf = nfp_net_xdp,
};
/**
@@ -3546,8 +3550,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
- setup_timer(&nn->reconfig_timer,
- nfp_net_reconfig_timer, (unsigned long)nn);
+ timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
return nn;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index b0a452ba9039..782d452e0fc2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -255,7 +255,7 @@
* @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
-#define NFP_NET_BPF_ABI 1
+#define NFP_NET_BPF_ABI 2
#define NFP_NET_CFG_BPF_CAP 0x0081
#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
#define NFP_NET_CFG_BPF_MAX_LEN 0x0082
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index dc016dfec64d..2801ecd09eab 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -104,7 +104,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
{ "rx_frame_too_long_errors",
NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
{ "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
- { "rx_vlan_reveive_ok", NFP_MAC_STATS_RX_VLAN_REVEIVE_OK, },
+ { "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK, },
{ "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS, },
{ "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
{ "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS, },
@@ -181,7 +181,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_SWITCH_STATS_LEN 9
-#define NN_ET_RVEC_GATHER_STATS 7
+#define NN_RVEC_GATHER_STATS 8
+#define NN_RVEC_PER_Q_STATS 3
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
@@ -243,6 +244,30 @@ nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
}
+static void
+nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
+ struct ethtool_link_ksettings *c)
+{
+ unsigned int modes;
+
+ ethtool_link_ksettings_add_link_mode(c, supported, FEC_NONE);
+ if (!nfp_eth_can_support_fec(eth_port)) {
+ ethtool_link_ksettings_add_link_mode(c, advertising, FEC_NONE);
+ return;
+ }
+
+ modes = nfp_eth_supported_fec_modes(eth_port);
+ if (modes & NFP_FEC_BASER) {
+ ethtool_link_ksettings_add_link_mode(c, supported, FEC_BASER);
+ ethtool_link_ksettings_add_link_mode(c, advertising, FEC_BASER);
+ }
+
+ if (modes & NFP_FEC_REED_SOLOMON) {
+ ethtool_link_ksettings_add_link_mode(c, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(c, advertising, FEC_RS);
+ }
+}
+
/**
* nfp_net_get_link_ksettings - Get Link Speed settings
* @netdev: network interface device structure
@@ -277,9 +302,11 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
port = nfp_port_from_netdev(netdev);
eth_port = nfp_port_get_eth_port(port);
- if (eth_port)
+ if (eth_port) {
cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
+ nfp_net_set_fec_link_mode(eth_port, cmd);
+ }
if (!netif_carrier_ok(netdev))
return 0;
@@ -327,7 +354,7 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP;
if (netif_running(netdev)) {
- netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
+ netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
return -EBUSY;
}
@@ -427,7 +454,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3;
+ return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS;
}
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
@@ -444,6 +471,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_ok");
data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
data = nfp_pr_et(data, "hw_rx_csum_err");
+ data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
@@ -454,9 +482,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
{
- u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
+ u64 gathered_stats[NN_RVEC_GATHER_STATS] = {};
struct nfp_net *nn = netdev_priv(netdev);
- u64 tmp[NN_ET_RVEC_GATHER_STATS];
+ u64 tmp[NN_RVEC_GATHER_STATS];
unsigned int i, j;
for (i = 0; i < nn->dp.num_r_vecs; i++) {
@@ -468,25 +496,26 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
+ tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
- tmp[3] = nn->r_vecs[i].hw_csum_tx;
- tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
- tmp[5] = nn->r_vecs[i].tx_gather;
- tmp[6] = nn->r_vecs[i].tx_lso;
+ tmp[4] = nn->r_vecs[i].hw_csum_tx;
+ tmp[5] = nn->r_vecs[i].hw_csum_tx_inner;
+ tmp[6] = nn->r_vecs[i].tx_gather;
+ tmp[7] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
- data += 3;
+ data += NN_RVEC_PER_Q_STATS;
- for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
+ for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
gathered_stats[j] += tmp[j];
}
- for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
+ for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
return data;
@@ -683,6 +712,91 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int nfp_port_fec_ethtool_to_nsp(u32 fec)
+{
+ switch (fec) {
+ case ETHTOOL_FEC_AUTO:
+ return NFP_FEC_AUTO_BIT;
+ case ETHTOOL_FEC_OFF:
+ return NFP_FEC_DISABLED_BIT;
+ case ETHTOOL_FEC_RS:
+ return NFP_FEC_REED_SOLOMON_BIT;
+ case ETHTOOL_FEC_BASER:
+ return NFP_FEC_BASER_BIT;
+ default:
+ /* NSP only supports a single mode at a time */
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 nfp_port_fec_nsp_to_ethtool(u32 fec)
+{
+ u32 result = 0;
+
+ if (fec & NFP_FEC_AUTO)
+ result |= ETHTOOL_FEC_AUTO;
+ if (fec & NFP_FEC_BASER)
+ result |= ETHTOOL_FEC_BASER;
+ if (fec & NFP_FEC_REED_SOLOMON)
+ result |= ETHTOOL_FEC_RS;
+ if (fec & NFP_FEC_DISABLED)
+ result |= ETHTOOL_FEC_OFF;
+
+ return result ?: ETHTOOL_FEC_NONE;
+}
+
+static int
+nfp_port_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *param)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+
+ param->active_fec = ETHTOOL_FEC_NONE_BIT;
+ param->fec = ETHTOOL_FEC_NONE_BIT;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!nfp_eth_can_support_fec(eth_port))
+ return 0;
+
+ param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported);
+ param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec);
+
+ return 0;
+}
+
+static int
+nfp_port_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *param)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err, fec;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (!nfp_eth_can_support_fec(eth_port))
+ return -EOPNOTSUPP;
+
+ fec = nfp_port_fec_ethtool_to_nsp(param->fec);
+ if (fec < 0)
+ return fec;
+
+ err = nfp_eth_set_fec(port->app->cpp, eth_port->index, fec);
+ if (!err)
+ /* Only refresh if we did something */
+ nfp_net_refresh_port_table(port);
+
+ return err < 0 ? err : 0;
+}
+
/* RX network flow classification (RSS, filters, etc)
*/
static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
@@ -1141,6 +1255,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_channels = nfp_net_set_channels,
.get_link_ksettings = nfp_net_get_link_ksettings,
.set_link_ksettings = nfp_net_set_link_ksettings,
+ .get_fecparam = nfp_port_get_fecparam,
+ .set_fecparam = nfp_port_set_fecparam,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
@@ -1152,6 +1268,10 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_link_ksettings = nfp_net_get_link_ksettings,
+ .set_link_ksettings = nfp_net_set_link_ksettings,
+ .get_fecparam = nfp_port_get_fecparam,
+ .set_fecparam = nfp_port_set_fecparam,
};
void nfp_net_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index ff373acd28f3..c505014121c4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -597,7 +597,7 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
return -EIO;
}
if (eth_port->override_changed) {
- nfp_warn(cpp, "Port #%d config changed, unregistering. Reboot required before port will be operational again.\n", port->eth_id);
+ nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
port->type = NFP_PORT_INVALID;
}
@@ -611,6 +611,7 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
struct nfp_eth_table *eth_table;
struct nfp_net *nn, *next;
struct nfp_port *port;
+ int err;
lockdep_assert_held(&pf->lock);
@@ -640,6 +641,11 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
kfree(eth_table);
+ /* Resync repr state. This may cause reprs to be removed. */
+ err = nfp_reprs_resync_phys_ports(pf->app);
+ if (err)
+ return err;
+
/* Shoot off the ports which became invalid */
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nn->port || nn->port->type != NFP_PORT_INVALID)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d540a9dc77b3..924a05e05da0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -258,6 +258,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
static void nfp_repr_clean(struct nfp_repr *repr)
{
unregister_netdev(repr->netdev);
+ nfp_app_repr_clean(repr->app, repr->netdev);
dst_release((struct dst_entry *)repr->dst);
nfp_port_free(repr->port);
}
@@ -297,6 +298,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->netdev_ops = &nfp_repr_netdev_ops;
netdev->ethtool_ops = &nfp_port_ethtool_ops;
+ netdev->max_mtu = pf_netdev->max_mtu;
+
SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
if (nfp_app_has_tc(app)) {
@@ -304,12 +307,18 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->hw_features |= NETIF_F_HW_TC;
}
- err = register_netdev(netdev);
+ err = nfp_app_repr_init(app, netdev);
if (err)
goto err_clean;
+ err = register_netdev(netdev);
+ if (err)
+ goto err_repr_clean;
+
return 0;
+err_repr_clean:
+ nfp_app_repr_clean(app, netdev);
err_clean:
dst_release((struct dst_entry *)repr->dst);
return err;
@@ -390,3 +399,50 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
return reprs;
}
+
+int nfp_reprs_resync_phys_ports(struct nfp_app *app)
+{
+ struct nfp_reprs *reprs, *old_reprs;
+ struct nfp_repr *repr;
+ int i;
+
+ old_reprs =
+ rcu_dereference_protected(app->reprs[NFP_REPR_TYPE_PHYS_PORT],
+ lockdep_is_held(&app->pf->lock));
+ if (!old_reprs)
+ return 0;
+
+ reprs = nfp_reprs_alloc(old_reprs->num_reprs);
+ if (!reprs)
+ return -ENOMEM;
+
+ for (i = 0; i < old_reprs->num_reprs; i++) {
+ if (!old_reprs->reprs[i])
+ continue;
+
+ repr = netdev_priv(old_reprs->reprs[i]);
+ if (repr->port->type == NFP_PORT_INVALID)
+ continue;
+
+ reprs->reprs[i] = old_reprs->reprs[i];
+ }
+
+ old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
+ synchronize_rcu();
+
+ /* Now we free up removed representors */
+ for (i = 0; i < old_reprs->num_reprs; i++) {
+ if (!old_reprs->reprs[i])
+ continue;
+
+ repr = netdev_priv(old_reprs->reprs[i]);
+ if (repr->port->type != NFP_PORT_INVALID)
+ continue;
+
+ nfp_app_repr_stop(app, repr);
+ nfp_repr_clean(repr);
+ }
+
+ kfree(old_reprs);
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 32179cad062a..5d4d897bc9c6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -124,5 +124,6 @@ void
nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
enum nfp_repr_type type);
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
+int nfp_reprs_resync_phys_ports(struct nfp_app *app);
#endif /* NFP_NET_REPR_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index e6d2e06b050c..8b1b962cf1d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -112,7 +112,13 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
writew(get_unaligned_be16(mac + 4),
app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO);
- return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC");
+ err = nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC");
+ if (!err)
+ nfp_info(app->pf->cpp,
+ "MAC %pM set on VF %d, reload the VF driver to make this change effective.\n",
+ mac, vf);
+
+ return err;
}
int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 51dcb9c603ee..21bd4aa32646 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -157,7 +157,7 @@ void nfp_devlink_port_unregister(struct nfp_port *port);
/* unused 0x008 */
#define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010)
#define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018)
-#define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020)
+#define NFP_MAC_STATS_RX_VLAN_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x020)
#define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028)
#define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030)
#define NFP_MAC_STATS_RX_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 37364555c42b..14a6d1ba51a9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -477,6 +477,11 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state)
return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
}
+int nfp_nsp_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0);
+}
+
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
{
return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index e2f028027c6f..650ca1a5bd21 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -48,6 +48,12 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
int nfp_nsp_wait(struct nfp_nsp *state);
int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+int nfp_nsp_mac_reinit(struct nfp_nsp *state);
+
+static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 20;
+}
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
@@ -73,6 +79,18 @@ enum nfp_eth_aneg {
NFP_ANEG_DISABLED,
};
+enum nfp_eth_fec {
+ NFP_FEC_AUTO_BIT = 0,
+ NFP_FEC_BASER_BIT,
+ NFP_FEC_REED_SOLOMON_BIT,
+ NFP_FEC_DISABLED_BIT,
+};
+
+#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT)
+#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT)
+#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT)
+#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT)
+
/**
* struct nfp_eth_table - ETH table information
* @count: number of table entries
@@ -87,6 +105,7 @@ enum nfp_eth_aneg {
* @speed: interface speed (in Mbps)
* @interface: interface (module) plugged in
* @media: media type of the @interface
+ * @fec: forward error correction mode
* @aneg: auto negotiation mode
* @mac_addr: interface MAC address
* @label_port: port id
@@ -99,6 +118,7 @@ enum nfp_eth_aneg {
* @port_type: one of %PORT_* defines for ethtool
* @port_lanes: total number of lanes on the port (sum of lanes of all subports)
* @is_split: is interface part of a split port
+ * @fec_modes_supported: bitmap of FEC modes supported
*/
struct nfp_eth_table {
unsigned int count;
@@ -114,6 +134,7 @@ struct nfp_eth_table {
unsigned int interface;
enum nfp_eth_media media;
+ enum nfp_eth_fec fec;
enum nfp_eth_aneg aneg;
u8 mac_addr[ETH_ALEN];
@@ -133,6 +154,8 @@ struct nfp_eth_table {
unsigned int port_lanes;
bool is_split;
+
+ unsigned int fec_modes_supported;
} ports[0];
};
@@ -143,6 +166,19 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
bool configed);
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
+
+static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
+{
+ return !!eth_port->fec_modes_supported;
+}
+
+static inline unsigned int
+nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port)
+{
+ return eth_port->fec_modes_supported;
+}
struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx);
int nfp_eth_config_commit_end(struct nfp_nsp *nsp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index f6f7c085f8e0..7ca589660e4d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -55,6 +55,8 @@
#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8)
#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48)
#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
+#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60)
+#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61)
#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES)
@@ -67,6 +69,7 @@
#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20)
#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
+#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
@@ -75,6 +78,7 @@
#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
+#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
enum nfp_eth_raw {
NSP_ETH_RAW_PORT = 0,
@@ -152,6 +156,7 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
unsigned int index, struct nfp_eth_table_port *dst)
{
unsigned int rate;
+ unsigned int fec;
u64 port, state;
port = le64_to_cpu(src->port);
@@ -183,6 +188,18 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state);
dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 22)
+ return;
+
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT;
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT;
+ if (dst->fec_modes_supported)
+ dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED;
+
+ dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state);
}
static void
@@ -469,10 +486,10 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed)
return nfp_eth_config_commit_end(nsp);
}
-/* Force inline, FIELD_* macroes require masks to be compilation-time known */
-static __always_inline int
+static int
nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
- const u64 mask, unsigned int val, const u64 ctrl_bit)
+ const u64 mask, const unsigned int shift,
+ unsigned int val, const u64 ctrl_bit)
{
union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
unsigned int idx = nfp_nsp_config_idx(nsp);
@@ -489,11 +506,11 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
/* Check if we are already in requested state */
reg = le64_to_cpu(entries[idx].raw[raw_idx]);
- if (val == FIELD_GET(mask, reg))
+ if (val == (reg & mask) >> shift)
return 0;
reg &= ~mask;
- reg |= FIELD_PREP(mask, val);
+ reg |= (val << shift) & mask;
entries[idx].raw[raw_idx] = cpu_to_le64(reg);
entries[idx].control |= cpu_to_le64(ctrl_bit);
@@ -503,6 +520,13 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
return 0;
}
+#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \
+ ({ \
+ __BF_FIELD_CHECK(mask, 0ULL, val, "NFP_ETH_SET_BIT_CONFIG: "); \
+ nfp_eth_set_bit_config(nsp, raw_idx, mask, __bf_shf(mask), \
+ val, ctrl_bit); \
+ })
+
/**
* __nfp_eth_set_aneg() - set PHY autonegotiation control bit
* @nsp: NFP NSP handle returned from nfp_eth_config_start()
@@ -515,12 +539,59 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
*/
int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode)
{
- return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
NSP_ETH_STATE_ANEG, mode,
NSP_ETH_CTRL_SET_ANEG);
}
/**
+ * __nfp_eth_set_fec() - set PHY forward error correction control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired fec mode
+ *
+ * Set the PHY module forward error correction mode.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int __nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_FEC, mode,
+ NSP_ETH_CTRL_SET_FEC);
+}
+
+/**
+ * nfp_eth_set_fec() - set PHY forward error correction control mode
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @mode: Desired fec mode
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ err = __nfp_eth_set_fec(nsp, mode);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/**
* __nfp_eth_set_speed() - set interface speed/rate
* @nsp: NFP NSP handle returned from nfp_eth_config_start()
* @speed: Desired speed (per lane)
@@ -544,7 +615,7 @@ int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
return -EINVAL;
}
- return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE,
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
NSP_ETH_STATE_RATE, rate,
NSP_ETH_CTRL_SET_RATE);
}
@@ -561,6 +632,6 @@ int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
*/
int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
{
- return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
lanes, NSP_ETH_CTRL_SET_LANES);
}
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 4a67c55aa9f1..052b3d2c07a1 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -253,10 +253,10 @@ static void update_linkspeed(struct net_device *dev)
netif_carrier_on(dev);
}
-static void w90p910_check_link(unsigned long dev_id)
+static void w90p910_check_link(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct w90p910_ether *ether = netdev_priv(dev);
+ struct w90p910_ether *ether = from_timer(ether, t, check_timer);
+ struct net_device *dev = ether->mii.dev;
update_linkspeed(dev);
mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
@@ -957,8 +957,7 @@ static int w90p910_ether_setup(struct net_device *dev)
ether->mii.mdio_read = w90p910_mdio_read;
ether->mii.mdio_write = w90p910_mdio_write;
- setup_timer(&ether->check_timer, w90p910_check_link,
- (unsigned long)dev);
+ timer_setup(&ether->check_timer, w90p910_check_link, 0);
return 0;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 994a83a1f0a5..ac8439ceea10 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1024,12 +1024,18 @@ static void free_rings(struct net_device *dev)
if (!nv_optimized(np)) {
if (np->rx_ring.orig)
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
- np->rx_ring.orig, np->ring_addr);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct ring_desc) *
+ (np->rx_ring_size +
+ np->tx_ring_size),
+ np->rx_ring.orig, np->ring_addr);
} else {
if (np->rx_ring.ex)
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
- np->rx_ring.ex, np->ring_addr);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct ring_desc_ex) *
+ (np->rx_ring_size +
+ np->tx_ring_size),
+ np->rx_ring.ex, np->ring_addr);
}
kfree(np->rx_skb);
kfree(np->tx_skb);
@@ -1813,12 +1819,12 @@ static int nv_alloc_rx(struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (skb) {
np->put_rx_ctx->skb = skb;
- np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+ np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
skb_tailroom(skb),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->put_rx_ctx->dma)) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_rx_ctx->dma))) {
kfree_skb(skb);
goto packet_dropped;
}
@@ -1854,12 +1860,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
if (skb) {
np->put_rx_ctx->skb = skb;
- np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
+ np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data,
skb_tailroom(skb),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->put_rx_ctx->dma)) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_rx_ctx->dma))) {
kfree_skb(skb);
goto packet_dropped;
}
@@ -1884,10 +1890,9 @@ packet_dropped:
}
/* If rx bufs are exhausted called after 50ms to attempt to refresh */
-static void nv_do_rx_refill(unsigned long data)
+static void nv_do_rx_refill(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = from_timer(np, t, oom_kick);
/* Just reschedule NAPI rx processing */
napi_schedule(&np->napi);
@@ -1977,9 +1982,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
{
if (tx_skb->dma) {
if (tx_skb->dma_single)
- pci_unmap_single(np->pci_dev, tx_skb->dma,
+ dma_unmap_single(&np->pci_dev->dev, tx_skb->dma,
tx_skb->dma_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
pci_unmap_page(np->pci_dev, tx_skb->dma,
tx_skb->dma_len,
@@ -2047,10 +2052,10 @@ static void nv_drain_rx(struct net_device *dev)
}
wmb();
if (np->rx_skb[i].skb) {
- pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
+ dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma,
(skb_end_pointer(np->rx_skb[i].skb) -
- np->rx_skb[i].skb->data),
- PCI_DMA_FROMDEVICE);
+ np->rx_skb[i].skb->data),
+ DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_skb[i].skb);
np->rx_skb[i].skb = NULL;
}
@@ -2221,13 +2226,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* setup the header buffer */
do {
- prev_tx = put_tx;
- prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->put_tx_ctx->dma)) {
+ np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
+ skb->data + offset, bcnt,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_tx_ctx->dma))) {
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
@@ -2256,8 +2260,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
offset = 0;
do {
- prev_tx = put_tx;
- prev_tx_ctx = np->put_tx_ctx;
if (!start_tx_ctx)
start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
@@ -2267,7 +2269,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
frag, offset,
bcnt,
DMA_TO_DEVICE);
- if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_tx_ctx->dma))) {
/* Unwind the mapped fragments */
do {
@@ -2297,6 +2300,16 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
} while (frag_size);
}
+ if (unlikely(put_tx == np->first_tx.orig))
+ prev_tx = np->last_tx.orig;
+ else
+ prev_tx = put_tx - 1;
+
+ if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ prev_tx_ctx = np->last_tx_ctx;
+ else
+ prev_tx_ctx = np->put_tx_ctx - 1;
+
/* set last fragment flag */
prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
@@ -2370,13 +2383,12 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* setup the header buffer */
do {
- prev_tx = put_tx;
- prev_tx_ctx = np->put_tx_ctx;
bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
- np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->put_tx_ctx->dma)) {
+ np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
+ skb->data + offset, bcnt,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_tx_ctx->dma))) {
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
@@ -2406,8 +2418,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
offset = 0;
do {
- prev_tx = put_tx;
- prev_tx_ctx = np->put_tx_ctx;
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
if (!start_tx_ctx)
start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
@@ -2417,7 +2427,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
bcnt,
DMA_TO_DEVICE);
- if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_tx_ctx->dma))) {
/* Unwind the mapped fragments */
do {
@@ -2447,6 +2458,16 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
} while (frag_size);
}
+ if (unlikely(put_tx == np->first_tx.ex))
+ prev_tx = np->last_tx.ex;
+ else
+ prev_tx = put_tx - 1;
+
+ if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+ prev_tx_ctx = np->last_tx_ctx;
+ else
+ prev_tx_ctx = np->put_tx_ctx - 1;
+
/* set last fragment flag */
prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
@@ -2810,9 +2831,9 @@ static int nv_rx_process(struct net_device *dev, int limit)
* TODO: check if a prefetch of the first cacheline improves
* the performance.
*/
- pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
- np->get_rx_ctx->dma_len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
+ np->get_rx_ctx->dma_len,
+ DMA_FROM_DEVICE);
skb = np->get_rx_ctx->skb;
np->get_rx_ctx->skb = NULL;
@@ -2916,9 +2937,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
* TODO: check if a prefetch of the first cacheline improves
* the performance.
*/
- pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
- np->get_rx_ctx->dma_len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
+ np->get_rx_ctx->dma_len,
+ DMA_FROM_DEVICE);
skb = np->get_rx_ctx->skb;
np->get_rx_ctx->skb = NULL;
@@ -4061,10 +4082,10 @@ static void nv_free_irq(struct net_device *dev)
}
}
-static void nv_do_nic_poll(unsigned long data)
+static void nv_do_nic_poll(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = from_timer(np, t, nic_poll);
+ struct net_device *dev = np->dev;
u8 __iomem *base = get_hwbase(dev);
u32 mask = 0;
unsigned long flags;
@@ -4172,16 +4193,18 @@ static void nv_do_nic_poll(unsigned long data)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void nv_poll_controller(struct net_device *dev)
{
- nv_do_nic_poll((unsigned long) dev);
+ struct fe_priv *np = netdev_priv(dev);
+
+ nv_do_nic_poll(&np->nic_poll);
}
#endif
-static void nv_do_stats_poll(unsigned long data)
+static void nv_do_stats_poll(struct timer_list *t)
__acquires(&netdev_priv(dev)->hwstats_lock)
__releases(&netdev_priv(dev)->hwstats_lock)
{
- struct net_device *dev = (struct net_device *) data;
- struct fe_priv *np = netdev_priv(dev);
+ struct fe_priv *np = from_timer(np, t, stats_poll);
+ struct net_device *dev = np->dev;
/* If lock is currently taken, the stats are being refreshed
* and hence fresh enough */
@@ -4591,13 +4614,17 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
/* allocate new rings */
if (!nv_optimized(np)) {
- rxtx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
- &ring_addr);
+ rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
+ sizeof(struct ring_desc) *
+ (ring->rx_pending +
+ ring->tx_pending),
+ &ring_addr, GFP_ATOMIC);
} else {
- rxtx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
- &ring_addr);
+ rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
+ sizeof(struct ring_desc_ex) *
+ (ring->rx_pending +
+ ring->tx_pending),
+ &ring_addr, GFP_ATOMIC);
}
rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
@@ -4605,12 +4632,18 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
/* fall back to old rings */
if (!nv_optimized(np)) {
if (rxtx_ring)
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
- rxtx_ring, ring_addr);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct ring_desc) *
+ (ring->rx_pending +
+ ring->tx_pending),
+ rxtx_ring, ring_addr);
} else {
if (rxtx_ring)
- pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
- rxtx_ring, ring_addr);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct ring_desc_ex) *
+ (ring->rx_pending +
+ ring->tx_pending),
+ rxtx_ring, ring_addr);
}
kfree(rx_skbuff);
@@ -5070,11 +5103,11 @@ static int nv_loopback_test(struct net_device *dev)
ret = 0;
goto out;
}
- test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+ test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
skb_tailroom(tx_skb),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- test_dma_addr)) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ test_dma_addr))) {
dev_kfree_skb_any(tx_skb);
goto out;
}
@@ -5129,9 +5162,9 @@ static int nv_loopback_test(struct net_device *dev)
}
}
- pci_unmap_single(np->pci_dev, test_dma_addr,
- (skb_end_pointer(tx_skb) - tx_skb->data),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, test_dma_addr,
+ (skb_end_pointer(tx_skb) - tx_skb->data),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(tx_skb);
out:
/* stop engines */
@@ -5627,10 +5660,9 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
u64_stats_init(&np->swstats_rx_syncp);
u64_stats_init(&np->swstats_tx_syncp);
- setup_timer(&np->oom_kick, nv_do_rx_refill, (unsigned long)dev);
- setup_timer(&np->nic_poll, nv_do_nic_poll, (unsigned long)dev);
- setup_deferrable_timer(&np->stats_poll, nv_do_stats_poll,
- (unsigned long)dev);
+ timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
+ timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
+ timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE);
err = pci_enable_device(pci_dev);
if (err)
@@ -5736,16 +5768,21 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
np->tx_ring_size = TX_RING_DEFAULT;
if (!nv_optimized(np)) {
- np->rx_ring.orig = pci_alloc_consistent(pci_dev,
- sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
- &np->ring_addr);
+ np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev,
+ sizeof(struct ring_desc) *
+ (np->rx_ring_size +
+ np->tx_ring_size),
+ &np->ring_addr,
+ GFP_ATOMIC);
if (!np->rx_ring.orig)
goto out_unmap;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
} else {
- np->rx_ring.ex = pci_alloc_consistent(pci_dev,
- sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
- &np->ring_addr);
+ np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev,
+ sizeof(struct ring_desc_ex) *
+ (np->rx_ring_size +
+ np->tx_ring_size),
+ &np->ring_addr, GFP_ATOMIC);
if (!np->rx_ring.ex)
goto out_unmap;
np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 8d710a3b4db0..697e29dd4bd3 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -613,7 +613,6 @@ struct pch_gbe_privdata {
* @rx_ring: Pointer of Rx descriptor ring structure
* @rx_buffer_len: Receive buffer length
* @tx_queue_len: Transmit queue length
- * @have_msi: PCI MSI mode flag
* @pch_gbe_privdata: PCI Device ID driver_data
*/
@@ -623,6 +622,7 @@ struct pch_gbe_adapter {
atomic_t irq_sem;
struct net_device *netdev;
struct pci_dev *pdev;
+ int irq;
struct net_device *polling_netdev;
struct napi_struct napi;
struct pch_gbe_hw hw;
@@ -637,7 +637,6 @@ struct pch_gbe_adapter {
struct pch_gbe_rx_ring *rx_ring;
unsigned long rx_buffer_len;
unsigned long tx_queue_len;
- bool have_msi;
bool rx_stop_flag;
int hwts_tx_en;
int hwts_rx_en;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 5ae9681a2da7..40e52ffb732f 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -781,11 +781,8 @@ static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- free_irq(adapter->pdev->irq, netdev);
- if (adapter->have_msi) {
- pci_disable_msi(adapter->pdev);
- netdev_dbg(netdev, "call pci_disable_msi\n");
- }
+ free_irq(adapter->irq, netdev);
+ pci_free_irq_vectors(adapter->pdev);
}
/**
@@ -799,7 +796,7 @@ static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
atomic_inc(&adapter->irq_sem);
iowrite32(0, &hw->reg->INT_EN);
ioread32(&hw->reg->INT_ST);
- synchronize_irq(adapter->pdev->irq);
+ synchronize_irq(adapter->irq);
netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
ioread32(&hw->reg->INT_EN));
@@ -1092,9 +1089,10 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
* pch_gbe_watchdog - Watchdog process
* @data: Board private structure
*/
-static void pch_gbe_watchdog(unsigned long data)
+static void pch_gbe_watchdog(struct timer_list *t)
{
- struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+ struct pch_gbe_adapter *adapter = from_timer(adapter, t,
+ watchdog_timer);
struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
@@ -1903,30 +1901,23 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
- int flags;
- flags = IRQF_SHARED;
- adapter->have_msi = false;
- err = pci_enable_msi(adapter->pdev);
- netdev_dbg(netdev, "call pci_enable_msi\n");
- if (err) {
- netdev_dbg(netdev, "call pci_enable_msi - Error: %d\n", err);
- } else {
- flags = 0;
- adapter->have_msi = true;
- }
- err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
- flags, netdev->name, netdev);
+ err = pci_alloc_irq_vectors(adapter->pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (err < 0)
+ return err;
+
+ adapter->irq = pci_irq_vector(adapter->pdev, 0);
+
+ err = request_irq(adapter->irq, &pch_gbe_intr, IRQF_SHARED,
+ netdev->name, netdev);
if (err)
netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
err);
- netdev_dbg(netdev,
- "adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
- adapter->have_msi, flags, err);
+ netdev_dbg(netdev, "have_msi : %d return : 0x%04x\n",
+ pci_dev_msi_enabled(adapter->pdev), err);
return err;
}
-
/**
* pch_gbe_up - Up GbE network device
* @adapter: Board private structure
@@ -2399,9 +2390,9 @@ static void pch_gbe_netpoll(struct net_device *netdev)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- disable_irq(adapter->pdev->irq);
- pch_gbe_intr(adapter->pdev->irq, netdev);
- enable_irq(adapter->pdev->irq);
+ disable_irq(adapter->irq);
+ pch_gbe_intr(adapter->irq, netdev);
+ enable_irq(adapter->irq);
}
#endif
@@ -2654,8 +2645,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "Invalid MAC address, "
"interface disabled.\n");
}
- setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
- (unsigned long)adapter);
+ timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 482b85e4d665..c9529c29a0a7 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -413,13 +413,13 @@ that case.
/* The rest of these values should never change. */
-static void hamachi_timer(unsigned long data);
+static void hamachi_timer(struct timer_list *t);
enum capability_flags {CanHaveMII=1, };
static const struct chip_info {
u16 vendor_id, device_id, device_id_mask, pad;
const char *name;
- void (*media_timer)(unsigned long data);
+ void (*media_timer)(struct timer_list *t);
int flags;
} chip_tbl[] = {
{0x1318, 0x0911, 0xffff, 0, "Hamachi GNIC-II", hamachi_timer, 0},
@@ -547,7 +547,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int hamachi_open(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static void hamachi_timer(unsigned long data);
+static void hamachi_timer(struct timer_list *t);
static void hamachi_tx_timeout(struct net_device *dev);
static void hamachi_init_ring(struct net_device *dev);
static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
@@ -979,10 +979,8 @@ static int hamachi_open(struct net_device *dev)
dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus));
}
/* Set the timer to check for link beat. */
- init_timer(&hmp->timer);
+ timer_setup(&hmp->timer, hamachi_timer, 0);
hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
- hmp->timer.data = (unsigned long)dev;
- hmp->timer.function = hamachi_timer; /* timer handler */
add_timer(&hmp->timer);
return 0;
@@ -1019,10 +1017,10 @@ static inline int hamachi_tx(struct net_device *dev)
return 0;
}
-static void hamachi_timer(unsigned long data)
+static void hamachi_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct hamachi_private *hmp = netdev_priv(dev);
+ struct hamachi_private *hmp = from_timer(hmp, t, timer);
+ struct net_device *dev = hmp->mii_if.dev;
void __iomem *ioaddr = hmp->base;
int next_tick = 10*HZ;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index fa7770da6ef8..54224d1822e3 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -343,7 +343,7 @@ static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int yellowfin_open(struct net_device *dev);
-static void yellowfin_timer(unsigned long data);
+static void yellowfin_timer(struct timer_list *t);
static void yellowfin_tx_timeout(struct net_device *dev);
static int yellowfin_init_ring(struct net_device *dev);
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
@@ -632,10 +632,8 @@ static int yellowfin_open(struct net_device *dev)
}
/* Set the timer to check for link beat. */
- init_timer(&yp->timer);
+ timer_setup(&yp->timer, yellowfin_timer, 0);
yp->timer.expires = jiffies + 3*HZ;
- yp->timer.data = (unsigned long)dev;
- yp->timer.function = yellowfin_timer; /* timer handler */
add_timer(&yp->timer);
out:
return rc;
@@ -645,10 +643,10 @@ err_free_irq:
goto out;
}
-static void yellowfin_timer(unsigned long data)
+static void yellowfin_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct yellowfin_private *yp = netdev_priv(dev);
+ struct yellowfin_private *yp = from_timer(yp, t, timer);
+ struct net_device *dev = pci_get_drvdata(yp->pci_dev);
void __iomem *ioaddr = yp->base;
int next_tick = 60*HZ;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 49591d9c2e1b..c9a55b774935 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -943,9 +943,9 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
#define TX_CLEAN_INTERVAL HZ
-static void pasemi_mac_tx_timer(unsigned long data)
+static void pasemi_mac_tx_timer(struct timer_list *t)
{
- struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
+ struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
struct pasemi_mac *mac = txring->mac;
pasemi_mac_clean_tx(txring);
@@ -1199,8 +1199,7 @@ static int pasemi_mac_open(struct net_device *dev)
if (dev->phydev)
phy_start(dev->phydev);
- setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
- (unsigned long)mac->tx);
+ timer_setup(&mac->tx->clean_timer, pasemi_mac_tx_timer, 0);
mod_timer(&mac->tx->clean_timer, jiffies + HZ);
return 0;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index c2e24afbaeb2..26ddf092e3ec 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -117,4 +117,7 @@ config QED_ISCSI
config QED_FCOE
bool
+config QED_OOO
+ bool
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
index cee90e05beb8..6cd2e333a5fc 100644
--- a/drivers/net/ethernet/qlogic/Makefile
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the QLogic network device drivers.
#
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 82dd47068e18..c70cf2ad81c0 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
@@ -6,5 +7,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
-qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o
+qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
+qed-$(CONFIG_QED_OOO) += qed_ooo.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index af106be8cc08..afd07ad91631 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2069,6 +2069,12 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
+ if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
+ DP_NOTICE(p_hwfn,
+ "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
+ p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
+ }
+
switch (p_hwfn->hw_info.personality) {
case QED_PCI_ETH_IWARP:
/* Each QP requires one connection */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 8f6ccc0c39e5..fe7c1f230028 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1277,11 +1277,10 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
{
struct qed_dcbx_get *dcbx_info;
- dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_ATOMIC);
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_ATOMIC);
if (!dcbx_info)
return NULL;
- memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;
@@ -2308,7 +2307,7 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n",
app->selector, app->protocol, app->priority);
- if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
+ if (app->priority >= QED_MAX_PFC_PRIORITIES) {
DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 9d989c96278c..409041eab189 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -41,6 +41,7 @@
#include "qed_rdma.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_ooo.h"
#define QED_IWARP_ORD_DEFAULT 32
#define QED_IWARP_IRD_DEFAULT 32
@@ -119,6 +120,13 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
+void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+ struct iwarp_init_func_params *p_ramrod)
+{
+ p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
+ p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+}
+
static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
{
int rc;
@@ -1402,12 +1410,22 @@ int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
- return qed_iwarp_prealloc_ep(p_hwfn, true);
+ rc = qed_iwarp_prealloc_ep(p_hwfn, true);
+ if (rc)
+ return rc;
+
+ return qed_ooo_alloc(p_hwfn);
}
void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+
+ qed_ooo_free(p_hwfn);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
+ kfree(iwarp_info->mpa_bufs);
+ kfree(iwarp_info->partial_fpdus);
+ kfree(iwarp_info->mpa_intermediate_buf);
}
int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
@@ -1705,6 +1723,569 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
return 0;
}
+static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
+ u16 cid)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct qed_iwarp_fpdu *partial_fpdu;
+ u32 idx;
+
+ idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
+ if (idx >= iwarp_info->max_num_partial_fpdus) {
+ DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
+ iwarp_info->max_num_partial_fpdus);
+ return NULL;
+ }
+
+ partial_fpdu = &iwarp_info->partial_fpdus[idx];
+
+ return partial_fpdu;
+}
+
+enum qed_iwarp_mpa_pkt_type {
+ QED_IWARP_MPA_PKT_PACKED,
+ QED_IWARP_MPA_PKT_PARTIAL,
+ QED_IWARP_MPA_PKT_UNALIGNED
+};
+
+#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
+#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
+#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
+
+/* Pad to multiple of 4 */
+#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
+#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \
+ (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \
+ QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
+ QED_IWARP_MPA_CRC32_DIGEST_SIZE)
+
+/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
+#define QED_IWARP_MAX_BDS_PER_FPDU 3
+
+char *pkt_type_str[] = {
+ "QED_IWARP_MPA_PKT_PACKED",
+ "QED_IWARP_MPA_PKT_PARTIAL",
+ "QED_IWARP_MPA_PKT_UNALIGNED"
+};
+
+static int
+qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct qed_iwarp_ll2_buff *buf);
+
+static enum qed_iwarp_mpa_pkt_type
+qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ u16 tcp_payload_len, u8 *mpa_data)
+{
+ enum qed_iwarp_mpa_pkt_type pkt_type;
+ u16 mpa_len;
+
+ if (fpdu->incomplete_bytes) {
+ pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
+ goto out;
+ }
+
+ /* special case of one byte remaining...
+ * lower byte will be read next packet
+ */
+ if (tcp_payload_len == 1) {
+ fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
+ pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
+ goto out;
+ }
+
+ mpa_len = ntohs(*((u16 *)(mpa_data)));
+ fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
+
+ if (fpdu->fpdu_length <= tcp_payload_len)
+ pkt_type = QED_IWARP_MPA_PKT_PACKED;
+ else
+ pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
+
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
+ pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
+
+ return pkt_type;
+}
+
+static void
+qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
+ struct qed_iwarp_fpdu *fpdu,
+ struct unaligned_opaque_data *pkt_data,
+ u16 tcp_payload_size, u8 placement_offset)
+{
+ fpdu->mpa_buf = buf;
+ fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
+ fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
+ fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
+ fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
+
+ if (tcp_payload_size == 1)
+ fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
+ else if (tcp_payload_size < fpdu->fpdu_length)
+ fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
+ else
+ fpdu->incomplete_bytes = 0; /* complete fpdu */
+
+ fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
+}
+
+static int
+qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct unaligned_opaque_data *pkt_data,
+ struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
+{
+ u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
+ int rc;
+
+ /* need to copy the data from the partial packet stored in fpdu
+ * to the new buf, for this we also need to move the data currently
+ * placed on the buf. The assumption is that the buffer is big enough
+ * since fpdu_length <= mss, we use an intermediate buffer since
+ * we may need to copy the new data to an overlapping location
+ */
+ if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
+ DP_ERR(p_hwfn,
+ "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
+ buf->buff_size, fpdu->mpa_frag_len,
+ tcp_payload_size, fpdu->incomplete_bytes);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
+ fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
+ (u8 *)(buf->data) + pkt_data->first_mpa_offset,
+ tcp_payload_size);
+
+ memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
+ memcpy(tmp_buf + fpdu->mpa_frag_len,
+ (u8 *)(buf->data) + pkt_data->first_mpa_offset,
+ tcp_payload_size);
+
+ rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
+ if (rc)
+ return rc;
+
+ /* If we managed to post the buffer copy the data to the new buffer
+ * o/w this will occur in the next round...
+ */
+ memcpy((u8 *)(buf->data), tmp_buf,
+ fpdu->mpa_frag_len + tcp_payload_size);
+
+ fpdu->mpa_buf = buf;
+ /* fpdu->pkt_hdr remains as is */
+ /* fpdu->mpa_frag is overridden with new buf */
+ fpdu->mpa_frag = buf->data_phys_addr;
+ fpdu->mpa_frag_virt = buf->data;
+ fpdu->mpa_frag_len += tcp_payload_size;
+
+ fpdu->incomplete_bytes -= tcp_payload_size;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
+ buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
+ fpdu->incomplete_bytes);
+
+ return 0;
+}
+
+static void
+qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
+{
+ u16 mpa_len;
+
+ /* Update incomplete packets if needed */
+ if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
+ /* Missing lower byte is now available */
+ mpa_len = fpdu->fpdu_length | *mpa_data;
+ fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
+ fpdu->mpa_frag_len = fpdu->fpdu_length;
+ /* one byte of hdr */
+ fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
+ mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
+ }
+}
+
+#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
+ (GET_FIELD((_curr_pkt)->flags, \
+ UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
+
+/* This function is used to recycle a buffer using the ll2 drop option. It
+ * uses the mechanism to ensure that all buffers posted to tx before this one
+ * were completed. The buffer sent here will be sent as a cookie in the tx
+ * completion function and can then be reposted to rx chain when done. The flow
+ * that requires this is the flow where a FPDU splits over more than 3 tcp
+ * segments. In this case the driver needs to re-post a rx buffer instead of
+ * the one received, but driver can't simply repost a buffer it copied from
+ * as there is a case where the buffer was originally a packed FPDU, and is
+ * partially posted to FW. Driver needs to ensure FW is done with it.
+ */
+static int
+qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct qed_iwarp_ll2_buff *buf)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ u8 ll2_handle;
+ int rc;
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
+ tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
+ tx_pkt.first_frag = fpdu->pkt_hdr;
+ tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
+ buf->piggy_buf = NULL;
+ tx_pkt.cookie = buf;
+
+ ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
+ if (rc)
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't drop packet rc=%d\n", rc);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
+ (unsigned long int)tx_pkt.first_frag,
+ tx_pkt.first_frag_len, buf, rc);
+
+ return rc;
+}
+
+static int
+qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ u8 ll2_handle;
+ int rc;
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
+
+ tx_pkt.first_frag = fpdu->pkt_hdr;
+ tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
+ tx_pkt.enable_ip_cksum = true;
+ tx_pkt.enable_l4_cksum = true;
+ tx_pkt.calc_ip_len = true;
+ /* vlan overload with enum iwarp_ll2_tx_queues */
+ tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
+
+ ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
+ if (rc)
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send right edge rc=%d\n", rc);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
+ tx_pkt.num_of_bds,
+ (unsigned long int)tx_pkt.first_frag,
+ tx_pkt.first_frag_len, rc);
+
+ return rc;
+}
+
+static int
+qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct unaligned_opaque_data *curr_pkt,
+ struct qed_iwarp_ll2_buff *buf,
+ u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ u8 ll2_handle;
+ int rc;
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+
+ /* An unaligned packet means it's split over two tcp segments. So the
+ * complete packet requires 3 bds, one for the header, one for the
+ * part of the fpdu of the first tcp segment, and the last fragment
+ * will point to the remainder of the fpdu. A packed pdu, requires only
+ * two bds, one for the header and one for the data.
+ */
+ tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
+
+ /* Send the mpa_buf only with the last fpdu (in case of packed) */
+ if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
+ tcp_payload_size <= fpdu->fpdu_length)
+ tx_pkt.cookie = fpdu->mpa_buf;
+
+ tx_pkt.first_frag = fpdu->pkt_hdr;
+ tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
+ tx_pkt.enable_ip_cksum = true;
+ tx_pkt.enable_l4_cksum = true;
+ tx_pkt.calc_ip_len = true;
+ /* vlan overload with enum iwarp_ll2_tx_queues */
+ tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
+
+ /* special case of unaligned packet and not packed, need to send
+ * both buffers as cookie to release.
+ */
+ if (tcp_payload_size == fpdu->incomplete_bytes)
+ fpdu->mpa_buf->piggy_buf = buf;
+
+ ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
+
+ /* Set first fragment to header */
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
+ if (rc)
+ goto out;
+
+ /* Set second fragment to first part of packet */
+ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
+ fpdu->mpa_frag,
+ fpdu->mpa_frag_len);
+ if (rc)
+ goto out;
+
+ if (!fpdu->incomplete_bytes)
+ goto out;
+
+ /* Set third fragment to second part of the packet */
+ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
+ ll2_handle,
+ buf->data_phys_addr +
+ curr_pkt->first_mpa_offset,
+ fpdu->incomplete_bytes);
+out:
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
+ tx_pkt.num_of_bds,
+ tx_pkt.first_frag_len,
+ fpdu->mpa_frag_len,
+ fpdu->incomplete_bytes, rc);
+
+ return rc;
+}
+
+static void
+qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
+ struct unaligned_opaque_data *curr_pkt,
+ u32 opaque_data0, u32 opaque_data1)
+{
+ u64 opaque_data;
+
+ opaque_data = HILO_64(opaque_data1, opaque_data0);
+ *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
+
+ curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
+ le16_to_cpu(curr_pkt->first_mpa_offset);
+ curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
+}
+
+/* This function is called when an unaligned or incomplete MPA packet arrives
+ * driver needs to align the packet, perhaps using previous data and send
+ * it down to FW once it is aligned.
+ */
+static int
+qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ll2_mpa_buf *mpa_buf)
+{
+ struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
+ struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
+ enum qed_iwarp_mpa_pkt_type pkt_type;
+ struct qed_iwarp_fpdu *fpdu;
+ int rc = -EINVAL;
+ u8 *mpa_data;
+
+ fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
+ if (!fpdu) { /* something corrupt with cid, post rx back */
+ DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
+ curr_pkt->cid);
+ goto err;
+ }
+
+ do {
+ mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
+
+ pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
+ mpa_buf->tcp_payload_len,
+ mpa_data);
+
+ switch (pkt_type) {
+ case QED_IWARP_MPA_PKT_PARTIAL:
+ qed_iwarp_init_fpdu(buf, fpdu,
+ curr_pkt,
+ mpa_buf->tcp_payload_len,
+ mpa_buf->placement_offset);
+
+ if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
+ mpa_buf->tcp_payload_len = 0;
+ break;
+ }
+
+ rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send FPDU:reset rc=%d\n", rc);
+ memset(fpdu, 0, sizeof(*fpdu));
+ break;
+ }
+
+ mpa_buf->tcp_payload_len = 0;
+ break;
+ case QED_IWARP_MPA_PKT_PACKED:
+ qed_iwarp_init_fpdu(buf, fpdu,
+ curr_pkt,
+ mpa_buf->tcp_payload_len,
+ mpa_buf->placement_offset);
+
+ rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
+ mpa_buf->tcp_payload_len,
+ pkt_type);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send FPDU:reset rc=%d\n", rc);
+ memset(fpdu, 0, sizeof(*fpdu));
+ break;
+ }
+
+ mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
+ curr_pkt->first_mpa_offset += fpdu->fpdu_length;
+ break;
+ case QED_IWARP_MPA_PKT_UNALIGNED:
+ qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
+ if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
+ /* special handling of fpdu split over more
+ * than 2 segments
+ */
+ if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
+ rc = qed_iwarp_win_right_edge(p_hwfn,
+ fpdu);
+ /* packet will be re-processed later */
+ if (rc)
+ return rc;
+ }
+
+ rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
+ buf,
+ mpa_buf->tcp_payload_len);
+ if (rc) /* packet will be re-processed later */
+ return rc;
+
+ mpa_buf->tcp_payload_len = 0;
+ break;
+ }
+
+ rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
+ mpa_buf->tcp_payload_len,
+ pkt_type);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send FPDU:delay rc=%d\n", rc);
+ /* don't reset fpdu -> we need it for next
+ * classify
+ */
+ break;
+ }
+
+ mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
+ curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
+ /* The framed PDU was sent - no more incomplete bytes */
+ fpdu->incomplete_bytes = 0;
+ break;
+ }
+ } while (mpa_buf->tcp_payload_len && !rc);
+
+ return rc;
+
+err:
+ qed_iwarp_ll2_post_rx(p_hwfn,
+ buf,
+ p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
+ return rc;
+}
+
+static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
+ int rc;
+
+ while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
+ mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
+ struct qed_iwarp_ll2_mpa_buf,
+ list_entry);
+
+ rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
+
+ /* busy means break and continue processing later, don't
+ * remove the buf from the pending list.
+ */
+ if (rc == -EBUSY)
+ break;
+
+ list_del(&mpa_buf->list_entry);
+ list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list);
+
+ if (rc) { /* different error, don't continue */
+ DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
+ break;
+ }
+ }
+}
+
+static void
+qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
+{
+ struct qed_iwarp_ll2_mpa_buf *mpa_buf;
+ struct qed_iwarp_info *iwarp_info;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
+ struct qed_iwarp_ll2_mpa_buf, list_entry);
+ if (!mpa_buf) {
+ DP_ERR(p_hwfn, "No free mpa buf\n");
+ goto err;
+ }
+
+ list_del(&mpa_buf->list_entry);
+ qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
+ data->opaque_data_0, data->opaque_data_1);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
+ data->length.packet_length, mpa_buf->data.first_mpa_offset,
+ mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
+ mpa_buf->data.cid);
+
+ mpa_buf->ll2_buf = data->cookie;
+ mpa_buf->tcp_payload_len = data->length.packet_length -
+ mpa_buf->data.first_mpa_offset;
+ mpa_buf->data.first_mpa_offset += data->u.placement_offset;
+ mpa_buf->placement_offset = data->u.placement_offset;
+
+ list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
+
+ qed_iwarp_process_pending_pkts(p_hwfn);
+ return;
+err:
+ qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
+ iwarp_info->ll2_mpa_handle);
+}
+
static void
qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
{
@@ -1725,6 +2306,14 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
memset(&cm_info, 0, sizeof(cm_info));
ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
+
+ /* Check if packet was received with errors... */
+ if (data->err_flags) {
+ DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
+ data->err_flags);
+ goto err;
+ }
+
if (GET_FIELD(data->parse_flags,
PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
@@ -1839,10 +2428,25 @@ static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
bool b_last_fragment, bool b_last_packet)
{
struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_iwarp_ll2_buff *piggy;
struct qed_hwfn *p_hwfn = cxt;
+ if (!buffer) /* can happen in packed mpa unaligned... */
+ return;
+
/* this was originally an rx packet, post it back */
+ piggy = buffer->piggy_buf;
+ if (piggy) {
+ buffer->piggy_buf = NULL;
+ qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
+ }
+
qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
+
+ if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
+ qed_iwarp_process_pending_pkts(p_hwfn);
+
+ return;
}
static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
@@ -1855,12 +2459,44 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
if (!buffer)
return;
+ if (buffer->piggy_buf) {
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ buffer->piggy_buf->buff_size,
+ buffer->piggy_buf->data,
+ buffer->piggy_buf->data_phys_addr);
+
+ kfree(buffer->piggy_buf);
+ }
+
dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
buffer->data, buffer->data_phys_addr);
kfree(buffer);
}
+/* The only slowpath for iwarp ll2 is unalign flush. When this completion
+ * is received, need to reset the FPDU.
+ */
+void
+qed_iwarp_ll2_slowpath(void *cxt,
+ u8 connection_handle,
+ u32 opaque_data_0, u32 opaque_data_1)
+{
+ struct unaligned_opaque_data unalign_data;
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_iwarp_fpdu *fpdu;
+
+ qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
+ opaque_data_0, opaque_data_1);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
+ unalign_data.cid);
+
+ fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
+ if (fpdu)
+ memset(fpdu, 0, sizeof(*fpdu));
+}
+
static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
@@ -1876,6 +2512,26 @@ static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
}
+ if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
+ rc = qed_ll2_terminate_connection(p_hwfn,
+ iwarp_info->ll2_ooo_handle);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
+
+ qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
+ iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
+ }
+
+ if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
+ rc = qed_ll2_terminate_connection(p_hwfn,
+ iwarp_info->ll2_mpa_handle);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
+
+ qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
+ iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
+ }
+
qed_llh_remove_mac_filter(p_hwfn,
p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
return rc;
@@ -1927,10 +2583,15 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
struct qed_iwarp_info *iwarp_info;
struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs;
+ u32 mpa_buff_size;
+ u16 n_ooo_bufs;
int rc = 0;
+ int i;
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
+ iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
+ iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
iwarp_info->max_mtu = params->max_mtu;
@@ -1978,6 +2639,91 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
+ /* Start OOO connection */
+ data.input.conn_type = QED_LL2_TYPE_OOO;
+ data.input.mtu = params->max_mtu;
+
+ n_ooo_bufs = (QED_IWARP_MAX_OOO * QED_IWARP_RCV_WND_SIZE_DEF) /
+ iwarp_info->max_mtu;
+ n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
+
+ data.input.rx_num_desc = n_ooo_bufs;
+ data.input.rx_num_ooo_buffers = n_ooo_bufs;
+
+ data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
+ data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
+ data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc)
+ goto err;
+
+ rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
+ if (rc)
+ goto err;
+
+ /* Start Unaligned MPA connection */
+ cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
+ cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
+
+ memset(&data, 0, sizeof(data));
+ data.input.conn_type = QED_LL2_TYPE_IWARP;
+ data.input.mtu = params->max_mtu;
+ /* FW requires that once a packet arrives OOO, it must have at
+ * least 2 rx buffers available on the unaligned connection
+ * for handling the case that it is a partial fpdu.
+ */
+ data.input.rx_num_desc = n_ooo_bufs * 2;
+ data.input.tx_num_desc = data.input.rx_num_desc;
+ data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
+ data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
+ data.input.secondary_queue = true;
+ data.cbs = &cbs;
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc)
+ goto err;
+
+ rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
+ if (rc)
+ goto err;
+
+ mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
+ rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
+ data.input.rx_num_desc,
+ mpa_buff_size,
+ iwarp_info->ll2_mpa_handle);
+ if (rc)
+ goto err;
+
+ iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
+ sizeof(*iwarp_info->partial_fpdus),
+ GFP_KERNEL);
+ if (!iwarp_info->partial_fpdus)
+ goto err;
+
+ iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
+
+ iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
+ if (!iwarp_info->mpa_intermediate_buf)
+ goto err;
+
+ /* The mpa_bufs array serves for pending RX packets received on the
+ * mpa ll2 that don't have place on the tx ring and require later
+ * processing. We can't fail on allocation of such a struct therefore
+ * we allocate enough to take care of all rx packets
+ */
+ iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
+ sizeof(*iwarp_info->mpa_bufs),
+ GFP_KERNEL);
+ if (!iwarp_info->mpa_bufs)
+ goto err;
+
+ INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
+ INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
+ for (i = 0; i < data.input.rx_num_desc; i++)
+ list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
+ &iwarp_info->mpa_buf_list);
return rc;
err:
qed_iwarp_ll2_stop(p_hwfn, p_ptt);
@@ -2014,6 +2760,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
qed_iwarp_async_event);
+ qed_ooo_setup(p_hwfn);
return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index 148ef3c33a5d..c1ecd743305f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -47,18 +47,51 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_LL2_SYN_TX_SIZE (128)
#define QED_IWARP_LL2_SYN_RX_SIZE (256)
#define QED_IWARP_MAX_SYN_PKT_SIZE (128)
-#define QED_IWARP_HANDLE_INVAL (0xff)
+
+#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
+#define QED_IWARP_MAX_OOO (16)
+#define QED_IWARP_LL2_OOO_MAX_RX_SIZE (16384)
+
+#define QED_IWARP_HANDLE_INVAL (0xff)
struct qed_iwarp_ll2_buff {
+ struct qed_iwarp_ll2_buff *piggy_buf;
void *data;
dma_addr_t data_phys_addr;
u32 buff_size;
};
+struct qed_iwarp_ll2_mpa_buf {
+ struct list_head list_entry;
+ struct qed_iwarp_ll2_buff *ll2_buf;
+ struct unaligned_opaque_data data;
+ u16 tcp_payload_len;
+ u8 placement_offset;
+};
+
+/* In some cases a fpdu will arrive with only one byte of the header, in this
+ * case the fpdu_length will be partial (contain only higher byte and
+ * incomplete bytes will contain the invalid value
+ */
+#define QED_IWARP_INVALID_INCOMPLETE_BYTES 0xffff
+
+struct qed_iwarp_fpdu {
+ struct qed_iwarp_ll2_buff *mpa_buf;
+ void *mpa_frag_virt;
+ dma_addr_t mpa_frag;
+ dma_addr_t pkt_hdr;
+ u16 mpa_frag_len;
+ u16 fpdu_length;
+ u16 incomplete_bytes;
+ u8 pkt_hdr_size;
+};
+
struct qed_iwarp_info {
struct list_head listen_list; /* qed_iwarp_listener */
struct list_head ep_list; /* qed_iwarp_ep */
struct list_head ep_free_list; /* pre-allocated ep's */
+ struct list_head mpa_buf_list; /* list of mpa_bufs */
+ struct list_head mpa_buf_pending_list;
spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale;
@@ -67,9 +100,15 @@ struct qed_iwarp_info {
u8 crc_needed;
u8 tcp_flags;
u8 ll2_syn_handle;
+ u8 ll2_ooo_handle;
+ u8 ll2_mpa_handle;
u8 peer2peer;
enum mpa_negotiation_mode mpa_rev;
enum mpa_rtr_type rtr_type;
+ struct qed_iwarp_fpdu *partial_fpdus;
+ struct qed_iwarp_ll2_mpa_buf *mpa_bufs;
+ u8 *mpa_intermediate_buf;
+ u16 max_num_partial_fpdus;
};
enum qed_iwarp_ep_state {
@@ -147,6 +186,9 @@ int qed_iwarp_alloc(struct qed_hwfn *p_hwfn);
int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params);
+void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+ struct iwarp_init_func_params *p_ramrod);
+
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c06ad4f0758e..047f556ca62e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -413,6 +413,7 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
struct qed_ll2_comp_rx_data *data)
{
data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
+ data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
data->length.packet_length =
le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
@@ -422,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
}
static int
+qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long *p_lock_flags)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct core_rx_slow_path_cqe *sp_cqe;
+
+ sp_cqe = &p_cqe->rx_cqe_sp;
+ if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
+ DP_NOTICE(p_hwfn,
+ "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
+ sp_cqe->ramrod_cmd_id);
+ return -EINVAL;
+ }
+
+ if (!p_ll2_conn->cbs.slowpath_cb) {
+ DP_NOTICE(p_hwfn,
+ "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
+ return -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
+
+ p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ le32_to_cpu(sp_cqe->opaque_data.data[0]),
+ le32_to_cpu(sp_cqe->opaque_data.data[1]));
+
+ spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
+
+ return 0;
+}
+
+static int
qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe,
@@ -494,8 +530,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
switch (cqe->rx_cqe_sp.type) {
case CORE_RX_CQE_TYPE_SLOW_PATH:
- DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
- rc = -EINVAL;
+ rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
+ cqe, &flags);
break;
case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
case CORE_RX_CQE_TYPE_REGULAR:
@@ -893,7 +929,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
- p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1;
+ p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
@@ -1104,6 +1140,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info)
{
struct qed_ll2_tx_packet *p_descq;
+ u32 desc_size;
u32 capacity;
int rc = 0;
@@ -1121,13 +1158,17 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
goto out;
capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
- p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
- GFP_KERNEL);
+ /* First element is part of the packet, rest are flexibly added */
+ desc_size = (sizeof(*p_descq) +
+ (p_ll2_info->input.tx_max_bds_per_packet - 1) *
+ sizeof(p_descq->bds_set));
+
+ p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
if (!p_descq) {
rc = -ENOMEM;
goto out;
}
- p_ll2_info->tx_queue.descq_array = p_descq;
+ p_ll2_info->tx_queue.descq_mem = p_descq;
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
@@ -1208,6 +1249,7 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
+ p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
p_ll2_info->cbs.cookie = cbs->cookie;
return 0;
@@ -1259,6 +1301,11 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+ if (data->input.conn_type == QED_LL2_TYPE_OOO ||
+ data->input.secondary_queue)
+ p_ll2_info->main_func_queue = false;
+ else
+ p_ll2_info->main_func_queue = true;
/* Correct maximum number of Tx BDs */
p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
@@ -1358,11 +1405,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_tx_packet *p_pkt;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
+ u32 desc_size;
u8 qid;
p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1396,9 +1445,15 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
INIT_LIST_HEAD(&p_tx->sending_descq);
spin_lock_init(&p_tx->lock);
capacity = qed_chain_get_capacity(&p_tx->txq_chain);
- for (i = 0; i < capacity; i++)
- list_add_tail(&p_tx->descq_array[i].list_entry,
- &p_tx->free_descq);
+ /* First element is part of the packet, rest are flexibly added */
+ desc_size = (sizeof(*p_pkt) +
+ (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
+ sizeof(p_pkt->bds_set));
+
+ for (i = 0; i < capacity; i++) {
+ p_pkt = p_tx->descq_mem + desc_size * i;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ }
p_tx->cur_completing_bd_idx = 0;
p_tx->bds_idx = 0;
p_tx->b_completing_packet = false;
@@ -1578,11 +1633,28 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
: CORE_RROCE;
- tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
- : CORE_TX_DEST_LB;
+ switch (pkt->tx_dest) {
+ case QED_LL2_TX_DEST_NW:
+ tx_dest = CORE_TX_DEST_NW;
+ break;
+ case QED_LL2_TX_DEST_LB:
+ tx_dest = CORE_TX_DEST_LB;
+ break;
+ case QED_LL2_TX_DEST_DROP:
+ tx_dest = CORE_TX_DEST_DROP;
+ break;
+ default:
+ tx_dest = CORE_TX_DEST_LB;
+ break;
+ }
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_OOO)
+ start_bd->nw_vlan_or_lb_echo =
+ cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
+ else
+ start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(pkt->l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
@@ -1590,6 +1662,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
@@ -1697,7 +1772,7 @@ int qed_ll2_prepare_tx_packet(void *cxt,
p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain;
- if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+ if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
return -EIO;
spin_lock_irqsave(&p_tx->lock, flags);
@@ -1857,7 +1932,7 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
}
- kfree(p_ll2_conn->tx_queue.descq_array);
+ kfree(p_ll2_conn->tx_queue.descq_mem);
qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
kfree(p_ll2_conn->rx_queue.descq_array);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index a822528e9c63..f65817012e97 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -63,17 +63,14 @@ struct qed_ll2_rx_packet {
struct qed_ll2_tx_packet {
struct list_head list_entry;
u16 bd_used;
- u16 vlan;
- u16 l4_hdr_offset_w;
- u8 bd_flags;
bool notify_fw;
void *cookie;
-
+ /* Flexible Array of bds_set determined by max_bds_per_packet */
struct {
struct core_tx_bd *txq_bd;
dma_addr_t tx_frag;
u16 frag_len;
- } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET];
+ } bds_set[1];
};
struct qed_ll2_rx_queue {
@@ -101,7 +98,7 @@ struct qed_ll2_tx_queue {
struct list_head active_descq;
struct list_head free_descq;
struct list_head sending_descq;
- struct qed_ll2_tx_packet *descq_array;
+ void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/
struct qed_ll2_tx_packet *cur_send_packet;
struct qed_ll2_tx_packet cur_completing_packet;
u16 cur_completing_bd_idx;
@@ -124,6 +121,7 @@ struct qed_ll2_info {
bool b_active;
enum core_tx_dest tx_dest;
u8 tx_stats_en;
+ bool main_func_queue;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
struct qed_ll2_cbs cbs;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 376485d99357..8b99c7d26f34 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1691,12 +1691,12 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
case FW_MB_PARAM_GET_PF_RDMA_ROCE:
*p_proto = QED_PCI_ETH_ROCE;
break;
+ case FW_MB_PARAM_GET_PF_RDMA_IWARP:
+ *p_proto = QED_PCI_ETH_IWARP;
+ break;
case FW_MB_PARAM_GET_PF_RDMA_BOTH:
- DP_NOTICE(p_hwfn,
- "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
- *p_proto = QED_PCI_ETH_ROCE;
+ *p_proto = QED_PCI_ETH_RDMA;
break;
- case FW_MB_PARAM_GET_PF_RDMA_IWARP:
default:
DP_NOTICE(p_hwfn,
"MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 000636530111..6172354b451c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -103,18 +103,28 @@ int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
u16 max_num_archipelagos = 0, cid_base;
struct qed_ooo_info *p_ooo_info;
+ enum protocol_type proto;
u16 max_num_isles = 0;
u32 i;
- if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ proto = PROTOCOLID_ISCSI;
+ break;
+ case QED_PCI_ETH_RDMA:
+ case QED_PCI_ETH_IWARP:
+ proto = PROTOCOLID_IWARP;
+ break;
+ default:
DP_NOTICE(p_hwfn,
"Failed to allocate qed_ooo_info: unknown personality\n");
return -EINVAL;
}
- max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
+ max_num_archipelagos = (u16)qed_cxt_get_proto_cid_count(p_hwfn, proto,
+ NULL);
max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
- cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI);
+ cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, proto);
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
index e8ed40b848f5..49c4e75b15b1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -83,7 +83,7 @@ struct qed_ooo_info {
u16 cid_base;
};
-#if IS_ENABLED(CONFIG_QED_ISCSI)
+#if IS_ENABLED(CONFIG_QED_OOO)
void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 6fb99518a61f..c8c4b3940564 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -156,7 +156,10 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
return rc;
p_hwfn->p_rdma_info = p_rdma_info;
- p_rdma_info->proto = PROTOCOLID_ROCE;
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ p_rdma_info->proto = PROTOCOLID_IWARP;
+ else
+ p_rdma_info->proto = PROTOCOLID_ROCE;
num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
NULL);
@@ -206,11 +209,11 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
goto free_pd_map;
}
- /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
- * twice the number of QPs.
+ /* Allocate bitmap for cq's. The maximum number of CQs is bound to
+ * the number of connections we support. (num_qps in iWARP or
+ * num_qps/2 in RoCE).
*/
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
- p_rdma_info->num_qps * 2, "CQ");
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate cq bitmap, rc = %d\n", rc);
@@ -219,10 +222,10 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Allocate bitmap for toggle bit for cq icids
* We toggle the bit every time we create or resize cq for a given icid.
- * The maximum number of CQs is bounded to twice the number of QPs.
+ * Size needs to equal the size of the cq bmap.
*/
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
- p_rdma_info->num_qps * 2, "Toggle");
+ num_cons, "Toggle");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate toogle bits, rc = %d\n", rc);
@@ -548,10 +551,13 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ qed_iwarp_init_fw_ramrod(p_hwfn,
+ &p_ent->ramrod.iwarp_init_func.iwarp);
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
- else
+ } else {
p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+ }
p_params_header = &p_ramrod->params_header;
p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
index 739ddb730967..ad00d082fec8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _QED_SELFTEST_API_H
#define _QED_SELFTEST_API_H
#include <linux/types.h>
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 46d0c3cb83a5..a1d33f35aad3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -377,6 +377,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_ISCSI;
break;
case QED_PCI_ETH_ROCE:
+ case QED_PCI_ETH_IWARP:
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index adb700512baa..a3a70ade411f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -503,7 +503,7 @@ void qede_fill_rss_params(struct qede_dev *edev,
void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
-int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp);
+int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp);
#ifdef CONFIG_DCB
void qede_set_dcbnl_ops(struct net_device *ndev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index f79e36e4060a..c1a0708a7d7c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1065,7 +1065,7 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
return 0;
}
-int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
struct qede_dev *edev = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 6fc854b120b0..48ec4c56cddf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1004,6 +1004,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
xdp.data_hard_start = page_address(bd->data);
xdp.data = xdp.data_hard_start + *data_offset;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
/* Queues always have a full reset currently, so for the time
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index e5ee9f274a71..8f9b3eb82137 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -556,7 +556,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
- .ndo_xdp = qede_xdp,
+ .ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = qede_rx_flow_steer,
#endif
@@ -594,7 +594,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_udp_tunnel_add = qede_udp_tunnel_add,
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
- .ndo_xdp = qede_xdp,
+ .ndo_bpf = qede_xdp,
};
/* -------------------------------------------------------------------------
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2991179c2fd0..9e5264d8773b 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3749,9 +3749,9 @@ static void ql_get_board_info(struct ql3_adapter *qdev)
qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
}
-static void ql3xxx_timer(unsigned long ptr)
+static void ql3xxx_timer(struct timer_list *t)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+ struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
}
@@ -3891,10 +3891,8 @@ static int ql3xxx_probe(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
- init_timer(&qdev->adapter_timer);
- qdev->adapter_timer.function = ql3xxx_timer;
+ timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
- qdev->adapter_timer.data = (unsigned long)qdev;
if (!cards_found) {
pr_alert("%s\n", DRV_STRING);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 3c2c2c7c1559..dbaeab344667 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
#
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index fe2599b83d09..31389ab8bdf7 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 31f40148fa5c..5edbd532127d 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 9feec7009443..7b97a9969046 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1092,8 +1092,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
{
if (!rx_ring->pg_chunk.page) {
u64 map;
- rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
- GFP_ATOMIC,
+ rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
netif_err(qdev, drv, qdev->ndev,
@@ -4725,9 +4724,9 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
-static void ql_timer(unsigned long data)
+static void ql_timer(struct timer_list *t)
{
- struct ql_adapter *qdev = (struct ql_adapter *)data;
+ struct ql_adapter *qdev = from_timer(qdev, t, timer);
u32 var = 0;
var = ql_read32(qdev, STS);
@@ -4806,11 +4805,8 @@ static int qlge_probe(struct pci_dev *pdev,
/* Start up the timer to trigger EEH if
* the bus goes dead
*/
- init_timer_deferrable(&qdev->timer);
- qdev->timer.data = (unsigned long)qdev;
- qdev->timer.function = ql_timer;
- qdev->timer.expires = jiffies + (5*HZ);
- add_timer(&qdev->timer);
+ timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
+ mod_timer(&qdev->timer, jiffies + (5*HZ));
ql_link_off(qdev);
ql_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index 7ad146080c36..4be65d6761b3 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "qlge.h"
int ql_unpause_mpi_risc(struct ql_adapter *qdev)
@@ -212,7 +213,6 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
/* Get the status data and start up a thread to
* handle the request.
*/
- mbcp = &qdev->idc_mbc;
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
index 1847350f48a7..9250976dd884 100644
--- a/drivers/net/ethernet/qualcomm/Makefile
+++ b/drivers/net/ethernet/qualcomm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Qualcomm network device drivers.
#
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 3ed9033e56db..9cbb27263742 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -309,22 +309,12 @@ void emac_mac_mode_config(struct emac_adapter *adpt)
/* Config descriptor rings */
static void emac_mac_dma_rings_config(struct emac_adapter *adpt)
{
- static const unsigned short tpd_q_offset[] = {
- EMAC_DESC_CTRL_8, EMAC_H1TPD_BASE_ADDR_LO,
- EMAC_H2TPD_BASE_ADDR_LO, EMAC_H3TPD_BASE_ADDR_LO};
- static const unsigned short rfd_q_offset[] = {
- EMAC_DESC_CTRL_2, EMAC_DESC_CTRL_10,
- EMAC_DESC_CTRL_12, EMAC_DESC_CTRL_13};
- static const unsigned short rrd_q_offset[] = {
- EMAC_DESC_CTRL_5, EMAC_DESC_CTRL_14,
- EMAC_DESC_CTRL_15, EMAC_DESC_CTRL_16};
-
/* TPD (Transmit Packet Descriptor) */
writel(upper_32_bits(adpt->tx_q.tpd.dma_addr),
adpt->base + EMAC_DESC_CTRL_1);
writel(lower_32_bits(adpt->tx_q.tpd.dma_addr),
- adpt->base + tpd_q_offset[0]);
+ adpt->base + EMAC_DESC_CTRL_8);
writel(adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK,
adpt->base + EMAC_DESC_CTRL_9);
@@ -334,9 +324,9 @@ static void emac_mac_dma_rings_config(struct emac_adapter *adpt)
adpt->base + EMAC_DESC_CTRL_0);
writel(lower_32_bits(adpt->rx_q.rfd.dma_addr),
- adpt->base + rfd_q_offset[0]);
+ adpt->base + EMAC_DESC_CTRL_2);
writel(lower_32_bits(adpt->rx_q.rrd.dma_addr),
- adpt->base + rrd_q_offset[0]);
+ adpt->base + EMAC_DESC_CTRL_5);
writel(adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK,
adpt->base + EMAC_DESC_CTRL_3);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 29ba37a08372..e8ab512ee7e3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -68,10 +68,10 @@ static void emac_sgmii_link_init(struct emac_adapter *adpt)
writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
}
-static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
+static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u8 irq_bits)
{
struct emac_sgmii *phy = &adpt->phy;
- u32 status;
+ u8 status;
writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
writel_relaxed(IRQ_GLOBAL_CLEAR, phy->base + EMAC_SGMII_PHY_IRQ_CMD);
@@ -86,9 +86,8 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
EMAC_SGMII_PHY_INTERRUPT_STATUS,
status, !(status & irq_bits), 1,
SGMII_PHY_IRQ_CLR_WAIT_TIME)) {
- netdev_err(adpt->netdev,
- "error: failed clear SGMII irq: status:0x%x bits:0x%x\n",
- status, irq_bits);
+ net_err_ratelimited("%s: failed to clear SGMII irq: status:0x%x bits:0x%x\n",
+ adpt->netdev->name, status, irq_bits);
return -EIO;
}
@@ -109,7 +108,7 @@ static irqreturn_t emac_sgmii_interrupt(int irq, void *data)
{
struct emac_adapter *adpt = data;
struct emac_sgmii *phy = &adpt->phy;
- u32 status;
+ u8 status;
status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS);
status &= SGMII_ISR_MASK;
@@ -139,10 +138,8 @@ static irqreturn_t emac_sgmii_interrupt(int irq, void *data)
atomic_set(&phy->decode_error_count, 0);
}
- if (emac_sgmii_irq_clear(adpt, status)) {
- netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n");
+ if (emac_sgmii_irq_clear(adpt, status))
schedule_work(&adpt->work_thread);
- }
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 759543512117..70c92b649b29 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -130,7 +130,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb);
}
-irqreturn_t emac_isr(int _irq, void *data)
+static irqreturn_t emac_isr(int _irq, void *data)
{
struct emac_irq *irq = data;
struct emac_adapter *adpt =
@@ -148,9 +148,8 @@ irqreturn_t emac_isr(int _irq, void *data)
goto exit;
if (status & ISR_ERROR) {
- netif_warn(adpt, intr, adpt->netdev,
- "warning: error irq status 0x%lx\n",
- status & ISR_ERROR);
+ net_err_ratelimited("%s: error interrupt 0x%lx\n",
+ adpt->netdev->name, status & ISR_ERROR);
/* reset MAC */
schedule_work(&adpt->work_thread);
}
@@ -169,7 +168,8 @@ irqreturn_t emac_isr(int _irq, void *data)
emac_mac_tx_process(adpt, &adpt->tx_q);
if (status & ISR_OVER)
- net_warn_ratelimited("warning: TX/RX overflow\n");
+ net_warn_ratelimited("%s: TX/RX overflow interrupt\n",
+ adpt->netdev->name);
exit:
/* enable the interrupt */
@@ -615,20 +615,11 @@ static int emac_probe(struct platform_device *pdev)
u32 reg;
int ret;
- /* The EMAC itself is capable of 64-bit DMA, so try that first. */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ /* The TPD buffer address is limited to 45 bits. */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(45));
if (ret) {
- /* Some platforms may restrict the EMAC's address bus to less
- * then the size of DDR. In this case, we need to try a
- * smaller mask. We could try every possible smaller mask,
- * but that's overkill. Instead, just fall to 32-bit, which
- * should always work.
- */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(&pdev->dev, "could not set DMA mask\n");
- return ret;
- }
+ dev_err(&pdev->dev, "could not set DMA mask\n");
+ return ret;
}
netdev = alloc_etherdev(sizeof(struct emac_adapter));
diff --git a/drivers/net/ethernet/qualcomm/rmnet/Kconfig b/drivers/net/ethernet/qualcomm/rmnet/Kconfig
index 6e2587af47a4..9bb06d284644 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/Kconfig
+++ b/drivers/net/ethernet/qualcomm/rmnet/Kconfig
@@ -5,6 +5,7 @@
menuconfig RMNET
tristate "RmNet MAP driver"
default n
+ select GRO_CELLS
---help---
If you select this, you will enable the RMNET module which is used
for handling data in the multiplexing and aggregation protocol (MAP)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 1e33aea59f50..71bee1af71ef 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -61,23 +61,6 @@ rmnet_get_port_rtnl(const struct net_device *real_dev)
return rtnl_dereference(real_dev->rx_handler_data);
}
-static struct rmnet_endpoint*
-rmnet_get_endpoint(struct net_device *dev, int config_id)
-{
- struct rmnet_endpoint *ep;
- struct rmnet_port *port;
-
- if (!rmnet_is_real_dev_registered(dev)) {
- ep = rmnet_vnd_get_endpoint(dev);
- } else {
- port = rmnet_get_port_rtnl(dev);
-
- ep = &port->muxed_ep[config_id];
- }
-
- return ep;
-}
-
static int rmnet_unregister_real_device(struct net_device *real_dev,
struct rmnet_port *port)
{
@@ -98,7 +81,7 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
static int rmnet_register_real_device(struct net_device *real_dev)
{
struct rmnet_port *port;
- int rc;
+ int rc, entry;
ASSERT_RTNL();
@@ -119,27 +102,41 @@ static int rmnet_register_real_device(struct net_device *real_dev)
/* hold on to real dev for MAP data */
dev_hold(real_dev);
+ for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
+ INIT_HLIST_HEAD(&port->muxed_ep[entry]);
+
netdev_dbg(real_dev, "registered with rmnet\n");
return 0;
}
-static void rmnet_set_endpoint_config(struct net_device *dev,
- u8 mux_id, u8 rmnet_mode,
- struct net_device *egress_dev)
+static void rmnet_unregister_bridge(struct net_device *dev,
+ struct rmnet_port *port)
{
- struct rmnet_endpoint *ep;
+ struct net_device *rmnet_dev, *bridge_dev;
+ struct rmnet_port *bridge_port;
+
+ if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
+ return;
- netdev_dbg(dev, "id %d mode %d dev %s\n",
- mux_id, rmnet_mode, egress_dev->name);
+ /* bridge slave handling */
+ if (!port->nr_rmnet_devs) {
+ rmnet_dev = netdev_master_upper_dev_get_rcu(dev);
+ netdev_upper_dev_unlink(dev, rmnet_dev);
- ep = rmnet_get_endpoint(dev, mux_id);
- /* This config is cleared on every set, so its ok to not
- * clear it on a device delete.
- */
- memset(ep, 0, sizeof(struct rmnet_endpoint));
- ep->rmnet_mode = rmnet_mode;
- ep->egress_dev = egress_dev;
- ep->mux_id = mux_id;
+ bridge_dev = port->bridge_ep;
+
+ bridge_port = rmnet_get_port_rtnl(bridge_dev);
+ bridge_port->bridge_ep = NULL;
+ bridge_port->rmnet_mode = RMNET_EPMODE_VND;
+ } else {
+ bridge_dev = port->bridge_ep;
+
+ bridge_port = rmnet_get_port_rtnl(bridge_dev);
+ rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev);
+ netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
+
+ rmnet_unregister_real_device(bridge_dev, bridge_port);
+ }
}
static int rmnet_newlink(struct net *src_net, struct net_device *dev,
@@ -153,6 +150,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
RMNET_EGRESS_FORMAT_MAP;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
+ struct rmnet_endpoint *ep;
struct rmnet_port *port;
int err = 0;
u16 mux_id;
@@ -164,6 +162,10 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (!data[IFLA_VLAN_ID])
return -EINVAL;
+ ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+ if (!ep)
+ return -ENOMEM;
+
mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
err = rmnet_register_real_device(real_dev);
@@ -171,11 +173,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
goto err0;
port = rmnet_get_port_rtnl(real_dev);
- err = rmnet_vnd_newlink(mux_id, dev, port, real_dev);
+ err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
if (err)
goto err1;
- err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL);
+ err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack);
if (err)
goto err2;
@@ -183,13 +185,13 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
ingress_format, egress_format);
port->egress_data_format = egress_format;
port->ingress_data_format = ingress_format;
+ port->rmnet_mode = mode;
- rmnet_set_endpoint_config(real_dev, mux_id, mode, dev);
- rmnet_set_endpoint_config(dev, mux_id, mode, real_dev);
+ hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
return 0;
err2:
- rmnet_vnd_dellink(mux_id, port);
+ rmnet_vnd_dellink(mux_id, port, ep);
err1:
rmnet_unregister_real_device(real_dev, port);
err0:
@@ -199,6 +201,7 @@ err0:
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
{
struct net_device *real_dev;
+ struct rmnet_endpoint *ep;
struct rmnet_port *port;
u8 mux_id;
@@ -212,8 +215,15 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
port = rmnet_get_port_rtnl(real_dev);
mux_id = rmnet_vnd_get_mux(dev);
- rmnet_vnd_dellink(mux_id, port);
netdev_upper_dev_unlink(dev, real_dev);
+
+ ep = rmnet_get_endpoint(port, mux_id);
+ if (ep) {
+ hlist_del_init_rcu(&ep->hlnode);
+ rmnet_unregister_bridge(dev, port);
+ rmnet_vnd_dellink(mux_id, port, ep);
+ kfree(ep);
+ }
rmnet_unregister_real_device(real_dev, port);
unregister_netdevice_queue(dev, head);
@@ -222,11 +232,16 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
{
struct rmnet_walk_data *d = data;
+ struct rmnet_endpoint *ep;
u8 mux_id;
mux_id = rmnet_vnd_get_mux(rmnet_dev);
-
- rmnet_vnd_dellink(mux_id, d->port);
+ ep = rmnet_get_endpoint(d->port, mux_id);
+ if (ep) {
+ hlist_del_init_rcu(&ep->hlnode);
+ rmnet_vnd_dellink(mux_id, d->port, ep);
+ kfree(ep);
+ }
netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
unregister_netdevice_queue(rmnet_dev, d->head);
@@ -252,6 +267,8 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
d.port = port;
rcu_read_lock();
+ rmnet_unregister_bridge(dev, port);
+
netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d);
rcu_read_unlock();
unregister_netdevice_many(&list);
@@ -324,6 +341,77 @@ struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
return NULL;
}
+struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
+{
+ struct rmnet_endpoint *ep;
+
+ hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
+ if (ep->mux_id == mux_id)
+ return ep;
+ }
+
+ return NULL;
+}
+
+int rmnet_add_bridge(struct net_device *rmnet_dev,
+ struct net_device *slave_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+ struct net_device *real_dev = priv->real_dev;
+ struct rmnet_port *port, *slave_port;
+ int err;
+
+ port = rmnet_get_port(real_dev);
+
+ /* If there is more than one rmnet dev attached, its probably being
+ * used for muxing. Skip the briding in that case
+ */
+ if (port->nr_rmnet_devs > 1)
+ return -EINVAL;
+
+ if (rmnet_is_real_dev_registered(slave_dev))
+ return -EBUSY;
+
+ err = rmnet_register_real_device(slave_dev);
+ if (err)
+ return -EBUSY;
+
+ err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
+ extack);
+ if (err)
+ return -EINVAL;
+
+ slave_port = rmnet_get_port(slave_dev);
+ slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+ slave_port->bridge_ep = real_dev;
+
+ port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+ port->bridge_ep = slave_dev;
+
+ netdev_dbg(slave_dev, "registered with rmnet as slave\n");
+ return 0;
+}
+
+int rmnet_del_bridge(struct net_device *rmnet_dev,
+ struct net_device *slave_dev)
+{
+ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+ struct net_device *real_dev = priv->real_dev;
+ struct rmnet_port *port, *slave_port;
+
+ port = rmnet_get_port(real_dev);
+ port->rmnet_mode = RMNET_EPMODE_VND;
+ port->bridge_ep = NULL;
+
+ netdev_upper_dev_unlink(slave_dev, rmnet_dev);
+ slave_port = rmnet_get_port(slave_dev);
+ rmnet_unregister_real_device(slave_dev, slave_port);
+
+ netdev_dbg(slave_dev, "removed from rmnet as slave\n");
+ return 0;
+}
+
/* Startup/Shutdown */
static int __init rmnet_init(void)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index dde4e9f14f4a..c19259eea99e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -14,19 +14,17 @@
*/
#include <linux/skbuff.h>
+#include <net/gro_cells.h>
#ifndef _RMNET_CONFIG_H_
#define _RMNET_CONFIG_H_
#define RMNET_MAX_LOGICAL_EP 255
-/* Information about the next device to deliver the packet to.
- * Exact usage of this parameter depends on the rmnet_mode.
- */
struct rmnet_endpoint {
- u8 rmnet_mode;
u8 mux_id;
struct net_device *egress_dev;
+ struct hlist_node hlnode;
};
/* One instance of this structure is instantiated for each real_dev associated
@@ -34,22 +32,41 @@ struct rmnet_endpoint {
*/
struct rmnet_port {
struct net_device *dev;
- struct rmnet_endpoint local_ep;
- struct rmnet_endpoint muxed_ep[RMNET_MAX_LOGICAL_EP];
u32 ingress_data_format;
u32 egress_data_format;
- struct net_device *rmnet_devices[RMNET_MAX_LOGICAL_EP];
u8 nr_rmnet_devs;
+ u8 rmnet_mode;
+ struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
+ struct net_device *bridge_ep;
};
extern struct rtnl_link_ops rmnet_link_ops;
+struct rmnet_vnd_stats {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u32 tx_drops;
+};
+
+struct rmnet_pcpu_stats {
+ struct rmnet_vnd_stats stats;
+ struct u64_stats_sync syncp;
+};
+
struct rmnet_priv {
- struct rmnet_endpoint local_ep;
u8 mux_id;
struct net_device *real_dev;
+ struct rmnet_pcpu_stats __percpu *pcpu_stats;
+ struct gro_cells gro_cells;
};
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
-
+struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
+int rmnet_add_bridge(struct net_device *rmnet_dev,
+ struct net_device *slave_dev,
+ struct netlink_ext_ack *extack);
+int rmnet_del_bridge(struct net_device *rmnet_dev,
+ struct net_device *slave_dev);
#endif /* _RMNET_CONFIG_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 540c7622dcb1..29842ccc91a9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -43,60 +43,23 @@ static void rmnet_set_skb_proto(struct sk_buff *skb)
/* Generic handler */
-static rx_handler_result_t
-rmnet_bridge_handler(struct sk_buff *skb, struct rmnet_endpoint *ep)
+static void
+rmnet_deliver_skb(struct sk_buff *skb)
{
- if (!ep->egress_dev)
- kfree_skb(skb);
- else
- rmnet_egress_handler(skb, ep);
-
- return RX_HANDLER_CONSUMED;
-}
-
-static rx_handler_result_t
-rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_endpoint *ep)
-{
- switch (ep->rmnet_mode) {
- case RMNET_EPMODE_NONE:
- return RX_HANDLER_PASS;
-
- case RMNET_EPMODE_BRIDGE:
- return rmnet_bridge_handler(skb, ep);
-
- case RMNET_EPMODE_VND:
- skb_reset_transport_header(skb);
- skb_reset_network_header(skb);
- rmnet_vnd_rx_fixup(skb, skb->dev);
-
- skb->pkt_type = PACKET_HOST;
- skb_set_mac_header(skb, 0);
- netif_receive_skb(skb);
- return RX_HANDLER_CONSUMED;
-
- default:
- kfree_skb(skb);
- return RX_HANDLER_CONSUMED;
- }
-}
-
-static rx_handler_result_t
-rmnet_ingress_deliver_packet(struct sk_buff *skb,
- struct rmnet_port *port)
-{
- if (!port) {
- kfree_skb(skb);
- return RX_HANDLER_CONSUMED;
- }
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
- skb->dev = port->local_ep.egress_dev;
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ rmnet_vnd_rx_fixup(skb, skb->dev);
- return rmnet_deliver_skb(skb, &port->local_ep);
+ skb->pkt_type = PACKET_HOST;
+ skb_set_mac_header(skb, 0);
+ gro_cells_receive(&priv->gro_cells, skb);
}
/* MAP handler */
-static rx_handler_result_t
+static void
__rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
@@ -109,53 +72,50 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
& RMNET_INGRESS_FORMAT_MAP_COMMANDS)
return rmnet_map_command(skb, port);
- kfree_skb(skb);
- return RX_HANDLER_CONSUMED;
+ goto free_skb;
}
mux_id = RMNET_MAP_GET_MUX_ID(skb);
len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
- if (mux_id >= RMNET_MAX_LOGICAL_EP) {
- kfree_skb(skb);
- return RX_HANDLER_CONSUMED;
- }
+ if (mux_id >= RMNET_MAX_LOGICAL_EP)
+ goto free_skb;
- ep = &port->muxed_ep[mux_id];
+ ep = rmnet_get_endpoint(port, mux_id);
+ if (!ep)
+ goto free_skb;
- if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
- skb->dev = ep->egress_dev;
+ skb->dev = ep->egress_dev;
/* Subtract MAP header */
skb_pull(skb, sizeof(struct rmnet_map_header));
skb_trim(skb, len);
rmnet_set_skb_proto(skb);
- return rmnet_deliver_skb(skb, ep);
+ rmnet_deliver_skb(skb);
+ return;
+
+free_skb:
+ kfree_skb(skb);
}
-static rx_handler_result_t
+static void
rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct sk_buff *skbn;
- int rc;
if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
__rmnet_map_ingress_handler(skbn, port);
consume_skb(skb);
- rc = RX_HANDLER_CONSUMED;
} else {
- rc = __rmnet_map_ingress_handler(skb, port);
+ __rmnet_map_ingress_handler(skb, port);
}
-
- return rc;
}
static int rmnet_map_egress_handler(struct sk_buff *skb,
- struct rmnet_port *port,
- struct rmnet_endpoint *ep,
+ struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
int required_headroom, additional_header_len;
@@ -174,10 +134,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
return RMNET_MAP_CONSUMED;
if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
- if (ep->mux_id == 0xff)
+ if (mux_id == 0xff)
map_header->mux_id = 0;
else
- map_header->mux_id = ep->mux_id;
+ map_header->mux_id = mux_id;
}
skb->protocol = htons(ETH_P_MAP);
@@ -185,6 +145,15 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
return RMNET_MAP_SUCCESS;
}
+static void
+rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
+{
+ if (bridge_dev) {
+ skb->dev = bridge_dev;
+ dev_queue_xmit(skb);
+ }
+}
+
/* Ingress / Egress Entry Points */
/* Processes packet as per ingress data format for receiving device. Logical
@@ -193,56 +162,45 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
*/
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
{
- struct rmnet_port *port;
struct sk_buff *skb = *pskb;
+ struct rmnet_port *port;
struct net_device *dev;
- int rc;
if (!skb)
- return RX_HANDLER_CONSUMED;
+ goto done;
dev = skb->dev;
port = rmnet_get_port(dev);
- if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
- rc = rmnet_map_ingress_handler(skb, port);
- } else {
- switch (ntohs(skb->protocol)) {
- case ETH_P_MAP:
- if (port->local_ep.rmnet_mode ==
- RMNET_EPMODE_BRIDGE) {
- rc = rmnet_ingress_deliver_packet(skb, port);
- } else {
- kfree_skb(skb);
- rc = RX_HANDLER_CONSUMED;
- }
- break;
-
- case ETH_P_IP:
- case ETH_P_IPV6:
- rc = rmnet_ingress_deliver_packet(skb, port);
- break;
-
- default:
- rc = RX_HANDLER_PASS;
- }
+ switch (port->rmnet_mode) {
+ case RMNET_EPMODE_VND:
+ if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
+ rmnet_map_ingress_handler(skb, port);
+ break;
+ case RMNET_EPMODE_BRIDGE:
+ rmnet_bridge_handler(skb, port->bridge_ep);
+ break;
}
- return rc;
+done:
+ return RX_HANDLER_CONSUMED;
}
/* Modifies packet as per logical endpoint configuration and egress data format
* for egress device configured in logical endpoint. Packet is then transmitted
* on the egress device.
*/
-void rmnet_egress_handler(struct sk_buff *skb,
- struct rmnet_endpoint *ep)
+void rmnet_egress_handler(struct sk_buff *skb)
{
struct net_device *orig_dev;
struct rmnet_port *port;
+ struct rmnet_priv *priv;
+ u8 mux_id;
orig_dev = skb->dev;
- skb->dev = ep->egress_dev;
+ priv = netdev_priv(orig_dev);
+ skb->dev = priv->real_dev;
+ mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev);
if (!port) {
@@ -251,7 +209,7 @@ void rmnet_egress_handler(struct sk_buff *skb,
}
if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
- switch (rmnet_map_egress_handler(skb, port, ep, orig_dev)) {
+ switch (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) {
case RMNET_MAP_CONSUMED:
return;
@@ -264,8 +222,7 @@ void rmnet_egress_handler(struct sk_buff *skb,
}
}
- if (ep->rmnet_mode == RMNET_EPMODE_VND)
- rmnet_vnd_tx_fixup(skb, orig_dev);
+ rmnet_vnd_tx_fixup(skb, orig_dev);
dev_queue_xmit(skb);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
index f2638cf5693c..3537e4ceedb3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
@@ -18,8 +18,7 @@
#include "rmnet_config.h"
-void rmnet_egress_handler(struct sk_buff *skb,
- struct rmnet_endpoint *ep);
+void rmnet_egress_handler(struct sk_buff *skb);
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index ce2302c25b12..3af3fe7b5457 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -80,7 +80,6 @@ u8 rmnet_map_demultiplex(struct sk_buff *skb);
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
-rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
- struct rmnet_port *port);
+void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index d1ea5e21b982..51e604923ac1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -17,7 +17,7 @@
#include "rmnet_vnd.h"
static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
- struct rmnet_port *rdinfo,
+ struct rmnet_port *port,
int enable)
{
struct rmnet_map_control_command *cmd;
@@ -37,7 +37,7 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
return RX_HANDLER_CONSUMED;
}
- ep = &rdinfo->muxed_ep[mux_id];
+ ep = rmnet_get_endpoint(port, mux_id);
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
@@ -76,8 +76,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*/
-rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
- struct rmnet_port *port)
+void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
@@ -102,5 +101,4 @@ rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_map_send_ack(skb, rc);
- return RX_HANDLER_CONSUMED;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index 7967198fdd90..49102f922b31 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -19,23 +19,15 @@
#define RMNET_TX_QUEUE_LEN 1000
/* Constants */
-#define RMNET_EGRESS_FORMAT__RESERVED__ BIT(0)
#define RMNET_EGRESS_FORMAT_MAP BIT(1)
#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(2)
#define RMNET_EGRESS_FORMAT_MUXING BIT(3)
-#define RMNET_EGRESS_FORMAT_MAP_CKSUMV3 BIT(4)
-#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(5)
-#define RMNET_INGRESS_FIX_ETHERNET BIT(0)
#define RMNET_INGRESS_FORMAT_MAP BIT(1)
#define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(2)
#define RMNET_INGRESS_FORMAT_DEMUXING BIT(3)
#define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(4)
-#define RMNET_INGRESS_FORMAT_MAP_CKSUMV3 BIT(5)
-#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(6)
-/* Pass the frame up the stack with no modifications to skb->dev */
-#define RMNET_EPMODE_NONE (0)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 7f90d5587653..9caa5e387450 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -27,14 +27,28 @@
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
{
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_pcpu_stats *pcpu_ptr;
+
+ pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+
+ u64_stats_update_begin(&pcpu_ptr->syncp);
+ pcpu_ptr->stats.rx_pkts++;
+ pcpu_ptr->stats.rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_ptr->syncp);
}
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
{
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_pcpu_stats *pcpu_ptr;
+
+ pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+
+ u64_stats_update_begin(&pcpu_ptr->syncp);
+ pcpu_ptr->stats.tx_pkts++;
+ pcpu_ptr->stats.tx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_ptr->syncp);
}
/* Network Device Operations */
@@ -45,10 +59,10 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
struct rmnet_priv *priv;
priv = netdev_priv(dev);
- if (priv->local_ep.egress_dev) {
- rmnet_egress_handler(skb, &priv->local_ep);
+ if (priv->real_dev) {
+ rmnet_egress_handler(skb);
} else {
- dev->stats.tx_dropped++;
+ this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
}
return NETDEV_TX_OK;
@@ -70,10 +84,72 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev)
return priv->real_dev->ifindex;
}
+static int rmnet_vnd_init(struct net_device *dev)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ int err;
+
+ priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
+ if (!priv->pcpu_stats)
+ return -ENOMEM;
+
+ err = gro_cells_init(&priv->gro_cells, dev);
+ if (err) {
+ free_percpu(priv->pcpu_stats);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rmnet_vnd_uninit(struct net_device *dev)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+
+ gro_cells_destroy(&priv->gro_cells);
+ free_percpu(priv->pcpu_stats);
+}
+
+static void rmnet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_vnd_stats total_stats;
+ struct rmnet_pcpu_stats *pcpu_ptr;
+ unsigned int cpu, start;
+
+ memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
+
+ for_each_possible_cpu(cpu) {
+ pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
+ total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
+ total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
+ total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
+ total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
+
+ total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
+ }
+
+ s->rx_packets = total_stats.rx_pkts;
+ s->rx_bytes = total_stats.rx_bytes;
+ s->tx_packets = total_stats.tx_pkts;
+ s->tx_bytes = total_stats.tx_bytes;
+ s->tx_dropped = total_stats.tx_drops;
+}
+
static const struct net_device_ops rmnet_vnd_ops = {
.ndo_start_xmit = rmnet_vnd_start_xmit,
.ndo_change_mtu = rmnet_vnd_change_mtu,
.ndo_get_iflink = rmnet_vnd_get_iflink,
+ .ndo_add_slave = rmnet_add_bridge,
+ .ndo_del_slave = rmnet_del_bridge,
+ .ndo_init = rmnet_vnd_init,
+ .ndo_uninit = rmnet_vnd_uninit,
+ .ndo_get_stats64 = rmnet_get_stats64,
};
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
@@ -100,17 +176,19 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
- struct net_device *real_dev)
+ struct net_device *real_dev,
+ struct rmnet_endpoint *ep)
{
struct rmnet_priv *priv;
int rc;
- if (port->rmnet_devices[id])
+ if (ep->egress_dev)
return -EINVAL;
rc = register_netdevice(rmnet_dev);
if (!rc) {
- port->rmnet_devices[id] = rmnet_dev;
+ ep->egress_dev = rmnet_dev;
+ ep->mux_id = id;
port->nr_rmnet_devs++;
rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
@@ -125,12 +203,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
return rc;
}
-int rmnet_vnd_dellink(u8 id, struct rmnet_port *port)
+int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+ struct rmnet_endpoint *ep)
{
- if (id >= RMNET_MAX_LOGICAL_EP || !port->rmnet_devices[id])
+ if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
return -EINVAL;
- port->rmnet_devices[id] = NULL;
+ ep->egress_dev = NULL;
port->nr_rmnet_devs--;
return 0;
}
@@ -143,21 +222,6 @@ u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
return priv->mux_id;
}
-/* Gets the logical endpoint configuration for a RmNet virtual network device
- * node. Caller should confirm that devices is a RmNet VND before calling.
- */
-struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *rmnet_dev)
-{
- struct rmnet_priv *priv;
-
- if (!rmnet_dev)
- return NULL;
-
- priv = netdev_priv(rmnet_dev);
-
- return &priv->local_ep;
-}
-
int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
{
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index 8a4042f0f6bf..71e4c3286951 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -17,11 +17,12 @@
#define _RMNET_VND_H_
int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
-struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *dev);
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
- struct net_device *real_dev);
-int rmnet_vnd_dellink(u8 id, struct rmnet_port *port);
+ struct net_device *real_dev,
+ struct rmnet_endpoint *ep);
+int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+ struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index bed34684994f..7e011c1c1e6e 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -170,6 +170,7 @@ struct net_local {
spinlock_t lock;
struct net_device *next_module;
struct timer_list timer; /* Media selection timer. */
+ struct net_device *dev; /* Timer dev. */
unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
int saved_tx_size;
unsigned int tx_unit_busy:1;
@@ -184,7 +185,7 @@ struct net_local {
#define TIMED_CHECKER (HZ/4)
#ifdef TIMED_CHECKER
#include <linux/timer.h>
-static void atp_timed_checker(unsigned long ignored);
+static void atp_timed_checker(struct timer_list *t);
#endif
/* Index to functions, as function prototypes. */
@@ -438,10 +439,9 @@ static int net_open(struct net_device *dev)
hardware_init(dev);
- init_timer(&lp->timer);
+ lp->dev = dev;
+ timer_setup(&lp->timer, atp_timed_checker, 0);
lp->timer.expires = jiffies + TIMED_CHECKER;
- lp->timer.data = (unsigned long)dev;
- lp->timer.function = atp_timed_checker; /* timer handler */
add_timer(&lp->timer);
netif_start_queue(dev);
@@ -710,11 +710,11 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
#ifdef TIMED_CHECKER
/* This following code fixes a rare (and very difficult to track down)
problem where the adapter forgets its ethernet address. */
-static void atp_timed_checker(unsigned long data)
+static void atp_timed_checker(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
+ struct net_local *lp = from_timer(lp, t, timer);
+ struct net_device *dev = lp->dev;
long ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
int tickssofar = jiffies - lp->last_rx_time;
int i;
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 32497f0e537c..63f0d2d0e87b 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Linux header file for the ATP pocket ethernet adapter. */
/* v1.09 8/9/2000 becker@scyld.com. */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a3c949ea7d1a..fc0d5fa65ad4 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -399,6 +399,12 @@ enum rtl_registers {
RxMaxSize = 0xda,
CPlusCmd = 0xe0,
IntrMitigate = 0xe2,
+
+#define RTL_COALESCE_MASK 0x0f
+#define RTL_COALESCE_SHIFT 4
+#define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK)
+#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2)
+
RxDescAddrLow = 0xe4,
RxDescAddrHigh = 0xe8,
EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
@@ -795,6 +801,7 @@ struct rtl8169_private {
u16 cp_cmd;
u16 event_slow;
+ const struct rtl_coalesce_info *coalesce_info;
struct mdio_ops {
void (*write)(struct rtl8169_private *, int, int);
@@ -1975,8 +1982,6 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
rtl_writephy(tp, MII_ADVERTISE, auto_nego);
rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
} else {
- giga_ctrl = 0;
-
if (speed == SPEED_10)
bmcr = 0;
else if (speed == SPEED_100)
@@ -2025,21 +2030,6 @@ out:
return ret;
}
-static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
-
- del_timer_sync(&tp->timer);
-
- rtl_lock_work(tp);
- ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->advertising);
- rtl_unlock_work(tp);
-
- return ret;
-}
-
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -2166,6 +2156,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev,
return rc;
}
+static int rtl8169_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int rc;
+ u32 advertising;
+
+ if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising))
+ return -EINVAL;
+
+ del_timer_sync(&tp->timer);
+
+ rtl_lock_work(tp);
+ rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
+ cmd->base.duplex, advertising);
+ rtl_unlock_work(tp);
+
+ return rc;
+}
+
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
@@ -2363,11 +2374,229 @@ static int rtl8169_nway_reset(struct net_device *dev)
return mii_nway_restart(&tp->mii);
}
+/*
+ * Interrupt coalescing
+ *
+ * > 1 - the availability of the IntrMitigate (0xe2) register through the
+ * > 8169, 8168 and 810x line of chipsets
+ *
+ * 8169, 8168, and 8136(810x) serial chipsets support it.
+ *
+ * > 2 - the Tx timer unit at gigabit speed
+ *
+ * The unit of the timer depends on both the speed and the setting of CPlusCmd
+ * (0xe0) bit 1 and bit 0.
+ *
+ * For 8169
+ * bit[1:0] \ speed 1000M 100M 10M
+ * 0 0 320ns 2.56us 40.96us
+ * 0 1 2.56us 20.48us 327.7us
+ * 1 0 5.12us 40.96us 655.4us
+ * 1 1 10.24us 81.92us 1.31ms
+ *
+ * For the other
+ * bit[1:0] \ speed 1000M 100M 10M
+ * 0 0 5us 2.56us 40.96us
+ * 0 1 40us 20.48us 327.7us
+ * 1 0 80us 40.96us 655.4us
+ * 1 1 160us 81.92us 1.31ms
+ */
+
+/* rx/tx scale factors for one particular CPlusCmd[0:1] value */
+struct rtl_coalesce_scale {
+ /* Rx / Tx */
+ u32 nsecs[2];
+};
+
+/* rx/tx scale factors for all CPlusCmd[0:1] cases */
+struct rtl_coalesce_info {
+ u32 speed;
+ struct rtl_coalesce_scale scalev[4]; /* each CPlusCmd[0:1] case */
+};
+
+/* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */
+#define rxtx_x1822(r, t) { \
+ {{(r), (t)}}, \
+ {{(r)*8, (t)*8}}, \
+ {{(r)*8*2, (t)*8*2}}, \
+ {{(r)*8*2*2, (t)*8*2*2}}, \
+}
+static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
+ /* speed delays: rx00 tx00 */
+ { SPEED_10, rxtx_x1822(40960, 40960) },
+ { SPEED_100, rxtx_x1822( 2560, 2560) },
+ { SPEED_1000, rxtx_x1822( 320, 320) },
+ { 0 },
+};
+
+static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
+ /* speed delays: rx00 tx00 */
+ { SPEED_10, rxtx_x1822(40960, 40960) },
+ { SPEED_100, rxtx_x1822( 2560, 2560) },
+ { SPEED_1000, rxtx_x1822( 5000, 5000) },
+ { 0 },
+};
+#undef rxtx_x1822
+
+/* get rx/tx scale vector corresponding to current speed */
+static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ struct ethtool_link_ksettings ecmd;
+ const struct rtl_coalesce_info *ci;
+ int rc;
+
+ rc = rtl8169_get_link_ksettings(dev, &ecmd);
+ if (rc < 0)
+ return ERR_PTR(rc);
+
+ for (ci = tp->coalesce_info; ci->speed != 0; ci++) {
+ if (ecmd.base.speed == ci->speed) {
+ return ci;
+ }
+ }
+
+ return ERR_PTR(-ELNRNG);
+}
+
+static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ const struct rtl_coalesce_info *ci;
+ const struct rtl_coalesce_scale *scale;
+ struct {
+ u32 *max_frames;
+ u32 *usecs;
+ } coal_settings [] = {
+ { &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs },
+ { &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs }
+ }, *p = coal_settings;
+ int i;
+ u16 w;
+
+ memset(ec, 0, sizeof(*ec));
+
+ /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
+ ci = rtl_coalesce_info(dev);
+ if (IS_ERR(ci))
+ return PTR_ERR(ci);
+
+ scale = &ci->scalev[RTL_R16(CPlusCmd) & 3];
+
+ /* read IntrMitigate and adjust according to scale */
+ for (w = RTL_R16(IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
+ *p->max_frames = (w & RTL_COALESCE_MASK) << 2;
+ w >>= RTL_COALESCE_SHIFT;
+ *p->usecs = w & RTL_COALESCE_MASK;
+ }
+
+ for (i = 0; i < 2; i++) {
+ p = coal_settings + i;
+ *p->usecs = (*p->usecs * scale->nsecs[i]) / 1000;
+
+ /*
+ * ethtool_coalesce says it is illegal to set both usecs and
+ * max_frames to 0.
+ */
+ if (!*p->usecs && !*p->max_frames)
+ *p->max_frames = 1;
+ }
+
+ return 0;
+}
+
+/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */
+static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale(
+ struct net_device *dev, u32 nsec, u16 *cp01)
+{
+ const struct rtl_coalesce_info *ci;
+ u16 i;
+
+ ci = rtl_coalesce_info(dev);
+ if (IS_ERR(ci))
+ return ERR_CAST(ci);
+
+ for (i = 0; i < 4; i++) {
+ u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0],
+ ci->scalev[i].nsecs[1]);
+ if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) {
+ *cp01 = i;
+ return &ci->scalev[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ const struct rtl_coalesce_scale *scale;
+ struct {
+ u32 frames;
+ u32 usecs;
+ } coal_settings [] = {
+ { ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs },
+ { ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs }
+ }, *p = coal_settings;
+ u16 w = 0, cp01;
+ int i;
+
+ scale = rtl_coalesce_choose_scale(dev,
+ max(p[0].usecs, p[1].usecs) * 1000, &cp01);
+ if (IS_ERR(scale))
+ return PTR_ERR(scale);
+
+ for (i = 0; i < 2; i++, p++) {
+ u32 units;
+
+ /*
+ * accept max_frames=1 we returned in rtl_get_coalesce.
+ * accept it not only when usecs=0 because of e.g. the following scenario:
+ *
+ * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
+ * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
+ * - then user does `ethtool -C eth0 rx-usecs 100`
+ *
+ * since ethtool sends to kernel whole ethtool_coalesce
+ * settings, if we do not handle rx_usecs=!0, rx_frames=1
+ * we'll reject it below in `frames % 4 != 0`.
+ */
+ if (p->frames == 1) {
+ p->frames = 0;
+ }
+
+ units = p->usecs * 1000 / scale->nsecs[i];
+ if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4)
+ return -EINVAL;
+
+ w <<= RTL_COALESCE_SHIFT;
+ w |= units;
+ w <<= RTL_COALESCE_SHIFT;
+ w |= p->frames >> 2;
+ }
+
+ rtl_lock_work(tp);
+
+ RTL_W16(IntrMitigate, swab16(w));
+
+ tp->cp_cmd = (tp->cp_cmd & ~3) | cp01;
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_R16(CPlusCmd);
+
+ rtl_unlock_work(tp);
+
+ return 0;
+}
+
static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
.get_link = ethtool_op_get_link,
- .set_settings = rtl8169_set_settings,
+ .get_coalesce = rtl_get_coalesce,
+ .set_coalesce = rtl_set_coalesce,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
.get_regs = rtl8169_get_regs,
@@ -2379,6 +2608,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = rtl8169_nway_reset,
.get_link_ksettings = rtl8169_get_link_ksettings,
+ .set_link_ksettings = rtl8169_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -3565,27 +3795,32 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* EEE setting */
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
+ rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x1f, 0x0007);
rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
+ rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000);
rtl_writephy(tp, 0x1f, 0x0002);
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x0d, 0x0007);
rtl_writephy(tp, 0x0e, 0x003c);
rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0e, 0x0006);
rtl_writephy(tp, 0x0d, 0x0000);
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
+ rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
+ rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ /* soft-reset phy */
+ rtl_writephy(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
@@ -4401,10 +4636,9 @@ static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
schedule_work(&tp->wk.work);
}
-static void rtl8169_phy_timer(unsigned long __opaque)
+static void rtl8169_phy_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)__opaque;
- struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl8169_private *tp = from_timer(tp, t, timer);
rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
}
@@ -8062,6 +8296,7 @@ static const struct rtl_cfg_info {
unsigned int align;
u16 event_slow;
unsigned features;
+ const struct rtl_coalesce_info *coalesce_info;
u8 default_ver;
} rtl_cfg_infos [] = {
[RTL_CFG_0] = {
@@ -8070,6 +8305,7 @@ static const struct rtl_cfg_info {
.align = 0,
.event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
.features = RTL_FEATURE_GMII,
+ .coalesce_info = rtl_coalesce_info_8169,
.default_ver = RTL_GIGA_MAC_VER_01,
},
[RTL_CFG_1] = {
@@ -8078,6 +8314,7 @@ static const struct rtl_cfg_info {
.align = 8,
.event_slow = SYSErr | LinkChg | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
+ .coalesce_info = rtl_coalesce_info_8168_8136,
.default_ver = RTL_GIGA_MAC_VER_11,
},
[RTL_CFG_2] = {
@@ -8087,6 +8324,7 @@ static const struct rtl_cfg_info {
.event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
PCSTimeout,
.features = RTL_FEATURE_MSI,
+ .coalesce_info = rtl_coalesce_info_8168_8136,
.default_ver = RTL_GIGA_MAC_VER_13,
}
};
@@ -8450,11 +8688,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->hw_start = cfg->hw_start;
tp->event_slow = cfg->event_slow;
+ tp->coalesce_info = cfg->coalesce_info;
tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
~(RxBOVF | RxFOVF) : ~0;
- setup_timer(&tp->timer, rtl8169_phy_timer, (unsigned long)dev);
+ timer_setup(&tp->timer, rtl8169_phy_timer, 0);
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fdf30bfa403b..2b962d349f5f 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -403,8 +403,9 @@ static void ravb_emac_init(struct net_device *ndev)
/* Receive frame limit set register */
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
- /* PAUSE prohibition */
+ /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
+ (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
ravb_set_rate(ndev);
@@ -520,6 +521,19 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
}
}
+static void ravb_rx_csum(struct sk_buff *skb)
+{
+ u8 *hw_csum;
+
+ /* The hardware checksum is 2 bytes appended to packet data */
+ if (unlikely(skb->len < 2))
+ return;
+ hw_csum = skb_tail_pointer(skb) - 2;
+ skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb_trim(skb, skb->len - 2);
+}
+
/* Packet receive function for Ethernet AVB */
static bool ravb_rx(struct net_device *ndev, int *quota, int q)
{
@@ -587,8 +601,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
ts.tv_nsec = le32_to_cpu(desc->ts_n);
shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
}
+
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum(skb);
napi_gro_receive(&priv->napi[q], skb);
stats->rx_packets++;
stats->rx_bytes += pkt_len;
@@ -1337,20 +1354,15 @@ static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ravb_private *priv = netdev_priv(ndev);
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (priv->clk) {
- wol->supported = WAKE_MAGIC;
- wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
- }
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0;
}
static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ravb_private *priv = netdev_priv(ndev);
- if (!priv->clk || wol->wolopts & ~WAKE_MAGIC)
+ if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
@@ -1842,6 +1854,38 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
return phy_mii_ioctl(phydev, req, cmd);
}
+static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable TX and RX */
+ ravb_rcv_snd_disable(ndev);
+
+ /* Modify RX Checksum setting */
+ ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
+
+ /* Enable TX and RX */
+ ravb_rcv_snd_enable(ndev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int ravb_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = ndev->features ^ features;
+
+ if (changed & NETIF_F_RXCSUM)
+ ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
+
+ ndev->features = features;
+
+ return 0;
+}
+
static const struct net_device_ops ravb_netdev_ops = {
.ndo_open = ravb_open,
.ndo_stop = ravb_close,
@@ -1853,6 +1897,7 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_do_ioctl = ravb_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_features = ravb_set_features,
};
/* MDIO bus init function */
@@ -1912,22 +1957,12 @@ MODULE_DEVICE_TABLE(of, ravb_match_table);
static int ravb_set_gti(struct net_device *ndev)
{
-
+ struct ravb_private *priv = netdev_priv(ndev);
struct device *dev = ndev->dev.parent;
- struct device_node *np = dev->of_node;
unsigned long rate;
- struct clk *clk;
uint64_t inc;
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- dev_err(dev, "could not get clock\n");
- return PTR_ERR(clk);
- }
-
- rate = clk_get_rate(clk);
- clk_put(clk);
-
+ rate = clk_get_rate(priv->clk);
if (!rate)
return -EINVAL;
@@ -2004,6 +2039,9 @@ static int ravb_probe(struct platform_device *pdev)
if (!ndev)
return -ENOMEM;
+ ndev->features = NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_RXCSUM;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -2073,10 +2111,11 @@ static int ravb_probe(struct platform_device *pdev)
priv->chip_id = chip_id;
- /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk))
- priv->clk = NULL;
+ if (IS_ERR(priv->clk)) {
+ error = PTR_ERR(priv->clk);
+ goto out_release;
+ }
/* Set function */
ndev->netdev_ops = &ravb_netdev_ops;
@@ -2144,8 +2183,7 @@ static int ravb_probe(struct platform_device *pdev)
if (error)
goto out_napi_del;
- if (priv->clk)
- device_set_wakeup_capable(&pdev->dev, 1);
+ device_set_wakeup_capable(&pdev->dev, 1);
/* Print device information */
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d2e88a30f57b..7e060aa9fbed 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -594,7 +594,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
};
/* There is CPU dependent code */
-static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
+static void sh_eth_set_rate_rcar(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -608,10 +608,10 @@ static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
}
}
-/* R8A7778/9 */
-static struct sh_eth_cpu_data r8a777x_data = {
+/* R-Car Gen1 */
+static struct sh_eth_cpu_data rcar_gen1_data = {
.set_duplex = sh_eth_set_duplex,
- .set_rate = sh_eth_set_rate_r8a777x,
+ .set_rate = sh_eth_set_rate_rcar,
.register_type = SH_ETH_REG_FAST_RCAR,
@@ -635,10 +635,10 @@ static struct sh_eth_cpu_data r8a777x_data = {
.hw_swap = 1,
};
-/* R8A7790/1 */
-static struct sh_eth_cpu_data r8a779x_data = {
+/* R-Car Gen2 and RZ/G1 */
+static struct sh_eth_cpu_data rcar_gen2_data = {
.set_duplex = sh_eth_set_duplex,
- .set_rate = sh_eth_set_rate_r8a777x,
+ .set_rate = sh_eth_set_rate_rcar,
.register_type = SH_ETH_REG_FAST_RCAR,
@@ -3086,15 +3086,17 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
static const struct of_device_id sh_eth_match_table[] = {
{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
- { .compatible = "renesas,ether-r8a7743", .data = &r8a779x_data },
- { .compatible = "renesas,ether-r8a7745", .data = &r8a779x_data },
- { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
- { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
- { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
- { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
- { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
- { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
+ { .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
+ { .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
+ { .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
+ { .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
+ { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
+ { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
+ { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+ { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
+ { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
{ }
};
MODULE_DEVICE_TABLE(of, sh_eth_match_table);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 0653b70723a3..6d6fb8cf3e7c 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1983,9 +1983,9 @@ err_out:
return err;
}
-static void ofdpa_fdb_cleanup(unsigned long data)
+static void ofdpa_fdb_cleanup(struct timer_list *t)
{
- struct ofdpa *ofdpa = (struct ofdpa *)data;
+ struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
struct ofdpa_port *ofdpa_port;
struct ofdpa_fdb_tbl_entry *entry;
struct hlist_node *tmp;
@@ -2368,8 +2368,7 @@ static int ofdpa_init(struct rocker *rocker)
hash_init(ofdpa->neigh_tbl);
spin_lock_init(&ofdpa->neigh_tbl_lock);
- setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
- (unsigned long) ofdpa);
+ timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 89831adb8eb7..fd35d8004a78 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -105,9 +105,9 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
* If there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
*/
-static void sxgbe_eee_ctrl_timer(unsigned long arg)
+static void sxgbe_eee_ctrl_timer(struct timer_list *t)
{
- struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
+ struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer);
sxgbe_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
@@ -134,8 +134,7 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
return false;
priv->eee_active = 1;
- setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer,
- (unsigned long)priv);
+ timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0);
priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
add_timer(&priv->eee_ctrl_timer);
@@ -1002,13 +1001,13 @@ static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
/**
* sxgbe_tx_timer: mitigation sw timer for tx.
- * @data: data pointer
+ * @t: timer pointer
* Description:
* This is the timer handler to directly invoke the sxgbe_tx_clean.
*/
-static void sxgbe_tx_timer(unsigned long data)
+static void sxgbe_tx_timer(struct timer_list *t)
{
- struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
+ struct sxgbe_tx_queue *p = from_timer(p, t, txtimer);
sxgbe_tx_queue_clean(p);
}
@@ -1028,8 +1027,7 @@ static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
struct sxgbe_tx_queue *p = priv->txq[queue_num];
p->tx_coal_frames = SXGBE_TX_FRAMES;
p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
- setup_timer(&p->txtimer, sxgbe_tx_timer,
- (unsigned long)&priv->txq[queue_num]);
+ timer_setup(&p->txtimer, sxgbe_tx_timer, 0);
p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
add_timer(&p->txtimer);
}
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 244c1e171017..c5bc124b41a9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -170,9 +170,11 @@ ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start)
/*
* Switch LED off...
*/
-static void ether3_ledoff(unsigned long data)
+static void ether3_ledoff(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
+ struct dev_priv *private = from_timer(private, t, timer);
+ struct net_device *dev = private->dev;
+
ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
}
@@ -183,8 +185,6 @@ static inline void ether3_ledon(struct net_device *dev)
{
del_timer(&priv(dev)->timer);
priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */
- priv(dev)->timer.data = (unsigned long)dev;
- priv(dev)->timer.function = ether3_ledoff;
add_timer(&priv(dev)->timer);
if (priv(dev)->regs.config2 & CFG2_CTRLO)
ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2);
@@ -783,7 +783,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
ether3_addr(dev->dev_addr, ec);
- init_timer(&priv(dev)->timer);
+ priv(dev)->dev = dev;
+ timer_setup(&priv(dev)->timer, ether3_ledoff, 0);
/* Reset card...
*/
diff --git a/drivers/net/ethernet/seeq/ether3.h b/drivers/net/ethernet/seeq/ether3.h
index 2db63b08bdf3..be19e5fa5cf2 100644
--- a/drivers/net/ethernet/seeq/ether3.h
+++ b/drivers/net/ethernet/seeq/ether3.h
@@ -165,6 +165,7 @@ struct dev_priv {
unsigned char tx_tail; /* buffer nr of transmitting packet */
unsigned int rx_head; /* address to fetch next packet from */
struct timer_list timer;
+ struct net_device *dev;
int broken; /* 0 = ok, 1 = something went wrong */
};
diff --git a/drivers/net/ethernet/seeq/sgiseeq.h b/drivers/net/ethernet/seeq/sgiseeq.h
index 2211e2987a8d..2f65c55608df 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.h
+++ b/drivers/net/ethernet/seeq/sgiseeq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* sgiseeq.h: Defines for the Seeq8003 ethernet controller.
*
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 520cfcc17785..3bac58d0f88b 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
sfc-y += efx.o nic.o farch.o siena.o ef10.o tx.o rx.o \
selftest.o ethtool.o ptp.o tx_tso.o \
mcdi.o mcdi_port.o mcdi_mon.o
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 13f72f5b18d2..e566dbb3343d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -674,6 +674,10 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN))
+ efx->net_dev->hw_features |= NETIF_F_RXFCS;
+
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
goto fail5;
@@ -2073,7 +2077,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
- if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
+ if (likely(READ_ONCE(efx->irq_soft_enabled))) {
/* Note test interrupts */
if (context->index == efx->irq_level)
efx->last_irq_cpu = raw_smp_processor_id();
@@ -2088,7 +2092,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
@@ -3199,11 +3203,15 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
const efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
+ bool handled = false;
if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
- if (!efx->loopback_selftest)
- channel->n_rx_eth_crc_err += n_packets;
- return EFX_RX_PKT_DISCARD;
+ if (!(efx->net_dev->features & NETIF_F_RXALL)) {
+ if (!efx->loopback_selftest)
+ channel->n_rx_eth_crc_err += n_packets;
+ return EFX_RX_PKT_DISCARD;
+ }
+ handled = true;
}
if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
@@ -3274,7 +3282,7 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
return 0;
}
- WARN_ON(1); /* No error bits were recognised */
+ WARN_ON(!handled); /* No error bits were recognised */
return 0;
}
@@ -3291,7 +3299,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
bool rx_cont;
u16 flags = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
/* Basic packet information */
@@ -3428,7 +3436,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
unsigned int tx_ev_q_label;
int tx_descs = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
@@ -5316,7 +5324,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
int i;
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- if (ACCESS_ONCE(table->entry[i].spec) &
+ if (READ_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
rc = efx_ef10_filter_remove_internal(efx,
1U << EFX_FILTER_PRI_AUTO, i, true);
@@ -5726,7 +5734,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
* MCFW do not support VFs.
*/
rc = efx_ef10_vport_set_mac_address(efx);
- } else {
+ } else if (rc) {
efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
sizeof(inbuf), NULL, 0, rc);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b9cb697b2818..e3c492fcaff0 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -471,8 +471,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
- setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
- (unsigned long)rx_queue);
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
return channel;
}
@@ -511,8 +510,7 @@ efx_copy_channel(const struct efx_channel *old_channel)
rx_queue = &channel->rx_queue;
rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
- setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
- (unsigned long)rx_queue);
+ timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
return channel;
}
@@ -2317,8 +2315,11 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return rc;
}
- /* If Rx VLAN filter is changed, update filters via mac_reconfigure */
- if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
+ * If rx-fcs is changed, mac_reconfigure updates that too.
+ */
+ if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_RXFCS)) {
/* efx_set_rx_mode() will schedule MAC work to update filters
* when a new features are finally set in net_dev.
*/
@@ -2809,7 +2810,7 @@ static void efx_reset_work(struct work_struct *data)
unsigned long pending;
enum reset_type method;
- pending = ACCESS_ONCE(efx->reset_pending);
+ pending = READ_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if (method == RESET_TYPE_MC_BIST)
@@ -2874,7 +2875,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (ACCESS_ONCE(efx->state) != STATE_READY)
+ if (READ_ONCE(efx->state) != STATE_READY)
return;
/* efx_process_channel() will no longer read events once a
@@ -3244,7 +3245,7 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
/* Determine netdevice features */
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_RXCSUM);
+ NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
net_dev->features |= NETIF_F_TSO6;
/* Check whether device supports TSO */
@@ -3255,7 +3256,10 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
- net_dev->hw_features = net_dev->features & ~efx->fixed_features;
+ net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
+
+ /* Disable receiving frames with bad FCS, by default. */
+ net_dev->features &= ~NETIF_F_RXALL;
/* Disable VLAN filtering by default. It may be enforced if
* the feature is fixed (i.e. VLAN filters are required to
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index d407adf59610..52c84b782901 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -46,7 +46,7 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
-void efx_rx_slow_fill(unsigned long context);
+void efx_rx_slow_fill(struct timer_list *t);
void __efx_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
diff --git a/drivers/net/ethernet/sfc/falcon/Makefile b/drivers/net/ethernet/sfc/falcon/Makefile
index aa1b45979ca4..39448e5b7f1e 100644
--- a/drivers/net/ethernet/sfc/falcon/Makefile
+++ b/drivers/net/ethernet/sfc/falcon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
sfc-falcon-y += efx.o nic.o farch.o falcon.o tx.o rx.o selftest.o \
ethtool.o qt202x_phy.o mdio_10g.o tenxpress.o \
txc43128_phy.o falcon_boards.o
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 29614da91cbf..3d6c91e96589 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -449,8 +449,7 @@ ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
- setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill,
- (unsigned long)rx_queue);
+ timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
return channel;
}
@@ -489,8 +488,7 @@ ef4_copy_channel(const struct ef4_channel *old_channel)
rx_queue = &channel->rx_queue;
rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
- setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill,
- (unsigned long)rx_queue);
+ timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
return channel;
}
@@ -2545,7 +2543,7 @@ static void ef4_reset_work(struct work_struct *data)
unsigned long pending;
enum reset_type method;
- pending = ACCESS_ONCE(efx->reset_pending);
+ pending = READ_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
@@ -2605,7 +2603,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (ACCESS_ONCE(efx->state) != STATE_READY)
+ if (READ_ONCE(efx->state) != STATE_READY)
return;
queue_work(reset_workqueue, &efx->reset_work);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h
index 4f3bb30661ea..a4e4d8ea4078 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.h
+++ b/drivers/net/ethernet/sfc/falcon/efx.h
@@ -45,7 +45,7 @@ void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue);
void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic);
-void ef4_rx_slow_fill(unsigned long context);
+void ef4_rx_slow_fill(struct timer_list *t);
void __ef4_rx_packet(struct ef4_channel *channel);
void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags);
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 93c713c1f627..6520d7bc8d21 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Check to see if we have a serious error condition */
@@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
ef4_oword_t reg;
int link_speed, isolate;
- isolate = !!ACCESS_ONCE(efx->reset_pending);
+ isolate = !!READ_ONCE(efx->reset_pending);
switch (link_state->speed) {
case 10000: link_speed = 3; break;
@@ -1454,10 +1454,11 @@ static void falcon_stats_complete(struct ef4_nic *efx)
}
}
-static void falcon_stats_timer_func(unsigned long context)
+static void falcon_stats_timer_func(struct timer_list *t)
{
- struct ef4_nic *efx = (struct ef4_nic *)context;
- struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_nic_data *nic_data = from_timer(nic_data, t,
+ stats_timer);
+ struct ef4_nic *efx = nic_data->efx;
spin_lock(&efx->stats_lock);
@@ -2295,6 +2296,7 @@ static int falcon_probe_nic(struct ef4_nic *efx)
if (!nic_data)
return -ENOMEM;
efx->nic_data = nic_data;
+ nic_data->efx = efx;
rc = -ENODEV;
@@ -2402,8 +2404,7 @@ static int falcon_probe_nic(struct ef4_nic *efx)
}
nic_data->stats_disable_count = 1;
- setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
- (unsigned long)efx);
+ timer_setup(&nic_data->stats_timer, falcon_stats_timer_func, 0);
return 0;
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 05916c710d8c..494884f6af4a 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
struct ef4_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
struct ef4_rx_queue *rx_queue;
struct ef4_nic *efx = channel->efx;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
{
struct ef4_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
ef4_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct ef4_channel *channel;
@@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
index a4c4592f6023..07c62dc552cb 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.h
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_
static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
@@ -267,6 +267,7 @@ enum {
/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
+ * @efx: ef4_nic pointer
* @board: Board state and functions
* @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches
@@ -280,6 +281,7 @@ enum {
*/
struct falcon_nic_data {
struct pci_dev *pci_dev2;
+ struct ef4_nic *efx;
struct falcon_board board;
u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count;
@@ -464,11 +466,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
{
- return ACCESS_ONCE(channel->event_test_cpu);
+ return READ_ONCE(channel->event_test_cpu);
}
static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
{
- return ACCESS_ONCE(efx->last_irq_cpu);
+ return READ_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 6a8406dc0c2b..02456ed13a7d 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -163,7 +163,7 @@ static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
do {
page = ef4_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ page = alloc_pages(__GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
@@ -376,9 +376,9 @@ void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
ef4_nic_notify_rx_desc(rx_queue);
}
-void ef4_rx_slow_fill(unsigned long context)
+void ef4_rx_slow_fill(struct timer_list *t)
{
- struct ef4_rx_queue *rx_queue = (struct ef4_rx_queue *)context;
+ struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
ef4_nic_generate_fill_event(rx_queue);
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 6a75f4140a4b..3409bbf5b19f 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
- txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+ txq1->old_read_count = READ_ONCE(txq1->read_count);
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
@@ -435,7 +435,7 @@ int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
unsigned tc, num_tc;
int rc;
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
num_tc = mqprio->num_tc;
@@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index ba45150f53c7..5334dc83d926 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
struct efx_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -927,6 +927,10 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
}
#endif
+ if (efx->net_dev->features & NETIF_F_RXALL)
+ /* don't discard frame for CRC error */
+ rx_ev_eth_crc_err = false;
+
/* The frame must be discarded if any of these are true. */
return (rx_ev_eth_crc_err | rx_ev_frm_trunc |
rx_ev_tobe_disc | rx_ev_pause_frm) ?
@@ -979,7 +983,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1520,7 +1524,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
@@ -1612,7 +1616,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 3df872f56289..9c2567b0d93e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -48,7 +48,7 @@ struct efx_mcdi_async_param {
/* followed by request/response buffer */
};
-static void efx_mcdi_timeout_async(unsigned long context);
+static void efx_mcdi_timeout_async(struct timer_list *t);
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
static bool efx_mcdi_poll_once(struct efx_nic *efx);
@@ -87,8 +87,7 @@ int efx_mcdi_init(struct efx_nic *efx)
mcdi->mode = MCDI_MODE_POLL;
spin_lock_init(&mcdi->async_lock);
INIT_LIST_HEAD(&mcdi->async_list);
- setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
- (unsigned long)mcdi);
+ timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
(void) efx_mcdi_poll_reboot(efx);
mcdi->new_epoch = true;
@@ -608,9 +607,9 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
}
}
-static void efx_mcdi_timeout_async(unsigned long context)
+static void efx_mcdi_timeout_async(struct timer_list *t)
{
- struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
+ struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
efx_mcdi_complete_async(mcdi, true);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index c7407d129c7d..6e1f282b2976 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -1029,6 +1029,10 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS,
+ !!(efx->net_dev->features & NETIF_F_RXFCS));
+
switch (efx->wanted_fc) {
case EFX_FC_RX | EFX_FC_TX:
fcntl = MC_CMD_FCNTL_BIDIR;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 4d7fb8af880d..7b51b6371724 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
@@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
- return ACCESS_ONCE(channel->event_test_cpu);
+ return READ_ONCE(channel->event_test_cpu);
}
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
{
- return ACCESS_ONCE(efx->last_irq_cpu);
+ return READ_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 60cdb97f58e2..caa89bf7603e 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -648,17 +648,15 @@ static void efx_ptp_send_times(struct efx_nic *efx,
struct pps_event_time now;
struct timespec64 limit;
struct efx_ptp_data *ptp = efx->ptp_data;
- struct timespec64 start;
int *mc_running = ptp->start.addr;
pps_get_ts(&now);
- start = now.ts_real;
limit = now.ts_real;
timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
/* Write host time for specified period or until MC is done */
while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
- ACCESS_ONCE(*mc_running)) {
+ READ_ONCE(*mc_running)) {
struct timespec64 update_time;
unsigned int host_time;
@@ -668,7 +666,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
do {
pps_get_ts(&now);
} while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
- ACCESS_ONCE(*mc_running));
+ READ_ONCE(*mc_running));
/* Synchronise NIC with single word of time only */
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
@@ -832,14 +830,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
ptp->start.dma_addr);
/* Clear flag that signals MC ready */
- ACCESS_ONCE(*start) = 0;
+ WRITE_ONCE(*start, 0);
rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
EFX_WARN_ON_ONCE_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
- while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
+ while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) {
udelay(20); /* Usually start MCDI execution quickly */
loops++;
}
@@ -849,7 +847,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
if (!time_before(jiffies, timeout))
++ptp->sync_timeouts;
- if (ACCESS_ONCE(*start))
+ if (READ_ONCE(*start))
efx_ptp_send_times(efx, &last_time);
/* Collect results */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 42443f434569..cfe76aad79ee 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -163,7 +163,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
do {
page = efx_reuse_page(rx_queue);
if (page == NULL) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP |
+ page = alloc_pages(__GFP_COMP |
(atomic ? GFP_ATOMIC : GFP_KERNEL),
efx->rx_buffer_order);
if (unlikely(page == NULL))
@@ -376,9 +376,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
efx_nic_notify_rx_desc(rx_queue);
}
-void efx_rx_slow_fill(unsigned long context)
+void efx_rx_slow_fill(struct timer_list *t)
{
- struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
+ struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
/* Post an event to cause NAPI to run and refill the queue */
efx_nic_generate_fill_event(rx_queue);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 32bf1fecf864..0ea7e16f2e6e 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
- txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+ txq1->old_read_count = READ_ONCE(txq1->read_count);
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
@@ -663,7 +663,7 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
unsigned tc, num_tc;
int rc;
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
num_tc = mqprio->num_tc;
@@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 9c0488e0f08e..18d533fdf14c 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -764,9 +764,9 @@ static inline void ioc3_setup_duplex(struct ioc3_private *ip)
ioc3_w_emcr(ip->emcr);
}
-static void ioc3_timer(unsigned long data)
+static void ioc3_timer(struct timer_list *t)
{
- struct ioc3_private *ip = (struct ioc3_private *) data;
+ struct ioc3_private *ip = from_timer(ip, t, ioc3_timer);
/* Print the link status if it has changed */
mii_check_media(&ip->mii, 1, 0);
@@ -818,8 +818,6 @@ out:
static void ioc3_mii_start(struct ioc3_private *ip)
{
ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
- ip->ioc3_timer.data = (unsigned long) ip;
- ip->ioc3_timer.function = ioc3_timer;
add_timer(&ip->ioc3_timer);
}
@@ -1291,7 +1289,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
spin_lock_init(&ip->ioc3_lock);
- init_timer(&ip->ioc3_timer);
+ timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
ioc3_stop(ip);
ioc3_init(dev);
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 445109bd6910..c2c50522b96d 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1018,10 +1018,10 @@ out_unlock:
rtnl_unlock();
}
-static void sis190_phy_timer(unsigned long __opaque)
+static void sis190_phy_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)__opaque;
- struct sis190_private *tp = netdev_priv(dev);
+ struct sis190_private *tp = from_timer(tp, t, timer);
+ struct net_device *dev = tp->dev;
if (likely(netif_running(dev)))
schedule_work(&tp->phy_task);
@@ -1039,10 +1039,8 @@ static inline void sis190_request_timer(struct net_device *dev)
struct sis190_private *tp = netdev_priv(dev);
struct timer_list *timer = &tp->timer;
- init_timer(timer);
+ timer_setup(timer, sis190_phy_timer, 0);
timer->expires = jiffies + SIS190_PHY_TIMEOUT;
- timer->data = (unsigned long)dev;
- timer->function = sis190_phy_timer;
add_timer(timer);
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 40bd88362e3d..4bb89f74742c 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -218,7 +218,7 @@ static void sis900_init_rxfilter (struct net_device * net_dev);
static u16 read_eeprom(void __iomem *ioaddr, int location);
static int mdio_read(struct net_device *net_dev, int phy_id, int location);
static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
-static void sis900_timer(unsigned long data);
+static void sis900_timer(struct timer_list *t);
static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
static void sis900_tx_timeout(struct net_device *net_dev);
static void sis900_init_tx_ring(struct net_device *net_dev);
@@ -1065,10 +1065,8 @@ sis900_open(struct net_device *net_dev)
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
- init_timer(&sis_priv->timer);
+ timer_setup(&sis_priv->timer, sis900_timer, 0);
sis_priv->timer.expires = jiffies + HZ;
- sis_priv->timer.data = (unsigned long)net_dev;
- sis_priv->timer.function = sis900_timer;
add_timer(&sis_priv->timer);
return 0;
@@ -1302,10 +1300,10 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
* link status (ON/OFF) and link mode (10/100/Full/Half)
*/
-static void sis900_timer(unsigned long data)
+static void sis900_timer(struct timer_list *t)
{
- struct net_device *net_dev = (struct net_device *)data;
- struct sis900_private *sis_priv = netdev_priv(net_dev);
+ struct sis900_private *sis_priv = from_timer(sis_priv, t, timer);
+ struct net_device *net_dev = sis_priv->mii_info.dev;
struct mii_phy *mii_phy = sis_priv->mii;
static const int next_tick = 5*HZ;
int speed = 0, duplex = 0;
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
index f0da3dc52c01..66e4bf38770f 100644
--- a/drivers/net/ethernet/sis/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* sis900.h Definitions for SiS ethernet controllers including 7014/7016 and 900
* Copyright 1999 Silicon Integrated System Corporation
* References:
diff --git a/drivers/net/ethernet/smsc/Makefile b/drivers/net/ethernet/smsc/Makefile
index f3438dec9d90..4105912b1629 100644
--- a/drivers/net/ethernet/smsc/Makefile
+++ b/drivers/net/ethernet/smsc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the SMSC network device drivers.
#
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 6a0e1d4b597c..949aaef390b6 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -290,7 +290,7 @@ static int read_eeprom(struct epic_private *, int);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
static void epic_restart(struct net_device *dev);
-static void epic_timer(unsigned long data);
+static void epic_timer(struct timer_list *t);
static void epic_tx_timeout(struct net_device *dev);
static void epic_init_ring(struct net_device *dev);
static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
@@ -739,10 +739,8 @@ static int epic_open(struct net_device *dev)
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
- init_timer(&ep->timer);
+ timer_setup(&ep->timer, epic_timer, 0);
ep->timer.expires = jiffies + 3*HZ;
- ep->timer.data = (unsigned long)dev;
- ep->timer.function = epic_timer; /* timer handler */
add_timer(&ep->timer);
return rc;
@@ -845,10 +843,10 @@ static void check_media(struct net_device *dev)
}
}
-static void epic_timer(unsigned long data)
+static void epic_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct epic_private *ep = netdev_priv(dev);
+ struct epic_private *ep = from_timer(ep, t, timer);
+ struct net_device *dev = ep->mii.dev;
void __iomem *ioaddr = ep->ioaddr;
int next_tick = 5*HZ;
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 92c927aec66d..a55f430f6a7b 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -280,7 +280,7 @@ static void set_rx_mode(struct net_device *dev);
static int s9k_config(struct net_device *dev, struct ifmap *map);
static void smc_set_xcvr(struct net_device *dev, int if_port);
static void smc_reset(struct net_device *dev);
-static void media_check(u_long arg);
+static void media_check(struct timer_list *t);
static void mdio_sync(unsigned int addr);
static int mdio_read(struct net_device *dev, int phy_id, int loc);
static void mdio_write(struct net_device *dev, int phy_id, int loc, int value);
@@ -1070,7 +1070,7 @@ static int smc_open(struct net_device *dev)
smc->packets_waiting = 0;
smc_reset(dev);
- setup_timer(&smc->media, media_check, (u_long)dev);
+ timer_setup(&smc->media, media_check, 0);
mod_timer(&smc->media, jiffies + HZ);
return 0;
@@ -1708,10 +1708,10 @@ static void smc_reset(struct net_device *dev)
======================================================================*/
-static void media_check(u_long arg)
+static void media_check(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) arg;
- struct smc_private *smc = netdev_priv(dev);
+ struct smc_private *smc = from_timer(smc, t, media);
+ struct net_device *dev = smc->mii_if.dev;
unsigned int ioaddr = dev->base_addr;
u_short i, media, saved_bank;
u_short link;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 97035766c291..e28c0d2c58e9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -159,6 +159,7 @@ config DWMAC_SUN8I
tristate "Allwinner sun8i GMAC support"
default ARCH_SUNXI
depends on OF && (ARCH_SUNXI || COMPILE_TEST)
+ select MDIO_BUS_MUX
---help---
Support for Allwinner H3 A83T A64 EMAC ethernet controllers.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 238307fadcdb..ff3f83b86d10 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 6a9c954492f2..8b50afcdb52d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -118,10 +118,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
return ret;
}
-static void pcs_link_timer_callback(unsigned long data)
+static void pcs_link_timer_callback(struct tse_pcs *pcs)
{
u16 val = 0;
- struct tse_pcs *pcs = (struct tse_pcs *)data;
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
@@ -138,12 +137,11 @@ static void pcs_link_timer_callback(unsigned long data)
}
}
-static void auto_nego_timer_callback(unsigned long data)
+static void auto_nego_timer_callback(struct tse_pcs *pcs)
{
u16 val = 0;
u16 speed = 0;
u16 duplex = 0;
- struct tse_pcs *pcs = (struct tse_pcs *)data;
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
@@ -201,14 +199,14 @@ static void auto_nego_timer_callback(unsigned long data)
}
}
-static void aneg_link_timer_callback(unsigned long data)
+static void aneg_link_timer_callback(struct timer_list *t)
{
- struct tse_pcs *pcs = (struct tse_pcs *)data;
+ struct tse_pcs *pcs = from_timer(pcs, t, aneg_link_timer);
if (pcs->autoneg == AUTONEG_ENABLE)
- auto_nego_timer_callback(data);
+ auto_nego_timer_callback(pcs);
else if (pcs->autoneg == AUTONEG_DISABLE)
- pcs_link_timer_callback(data);
+ pcs_link_timer_callback(pcs);
}
void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
@@ -237,8 +235,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
tse_pcs_reset(tse_pcs_base, pcs);
- setup_timer(&pcs->aneg_link_timer,
- aneg_link_timer_callback, (unsigned long)pcs);
+ timer_setup(&pcs->aneg_link_timer, aneg_link_timer_callback,
+ 0);
mod_timer(&pcs->aneg_link_timer, jiffies +
msecs_to_jiffies(AUTONEGO_LINK_TIMER));
} else if (phy_dev->autoneg == AUTONEG_DISABLE) {
@@ -270,8 +268,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
tse_pcs_reset(tse_pcs_base, pcs);
- setup_timer(&pcs->aneg_link_timer,
- aneg_link_timer_callback, (unsigned long)pcs);
+ timer_setup(&pcs->aneg_link_timer, aneg_link_timer_callback,
+ 0);
mod_timer(&pcs->aneg_link_timer, jiffies +
msecs_to_jiffies(AUTONEGO_LINK_TIMER));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e82b4b70b7be..e1e5ac053760 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -442,8 +442,9 @@ struct stmmac_dma_ops {
void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
int rxfifosz);
void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
- int fifosz);
- void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
+ int fifosz, u8 qmode);
+ void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+ int fifosz, u8 qmode);
/* To track extra statistic (if supported) */
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 866444b6c82f..2c6d7c69c8f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -51,15 +51,11 @@
#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1
#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0)
-#define NSS_COMMON_MACSEC_CTL 0x28
-#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x)
-
#define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4))
#define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19)
#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16)
#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8
#define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0
-#define NSS_COMMON_GMAC_CTL_IFG_MASK 0x3f
#define NSS_COMMON_CLK_DIV_RGMII_1000 1
#define NSS_COMMON_CLK_DIV_RGMII_100 9
@@ -68,9 +64,6 @@
#define NSS_COMMON_CLK_DIV_SGMII_100 4
#define NSS_COMMON_CLK_DIV_SGMII_10 49
-#define QSGMII_PCS_MODE_CTL 0x68
-#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x) BIT((x * 8) + 7)
-
#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
@@ -83,15 +76,10 @@
#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
#define QSGMII_PHY_QSGMII_EN BIT(7)
#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12
-#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK 0x7
#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18
-#define QSGMII_PHY_RX_DC_BIAS_MASK 0x3
#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20
-#define QSGMII_PHY_RX_INPUT_EQU_MASK 0x3
#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22
-#define QSGMII_PHY_CDR_PI_SLEW_MASK 0x3
#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28
-#define QSGMII_PHY_TX_DRV_AMP_MASK 0xf
struct ipq806x_gmac {
struct platform_device *pdev;
@@ -217,7 +205,7 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
* code and keep it consistent with the Linux convention, we'll number
* them from 0 to 3 here.
*/
- if (gmac->id < 0 || gmac->id > 3) {
+ if (gmac->id > 3) {
dev_err(dev, "invalid gmac id\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 39c2122a4f26..e5ff734d4f9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mdio-mux.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -41,14 +42,14 @@
* This value is used for disabling properly EMAC
* and used as a good starting value in case of the
* boot process(uboot) leave some stuff.
- * @internal_phy: Does the MAC embed an internal PHY
+ * @soc_has_internal_phy: Does the MAC embed an internal PHY
* @support_mii: Does the MAC handle MII
* @support_rmii: Does the MAC handle RMII
* @support_rgmii: Does the MAC handle RGMII
*/
struct emac_variant {
u32 default_syscon_value;
- int internal_phy;
+ bool soc_has_internal_phy;
bool support_mii;
bool support_rmii;
bool support_rgmii;
@@ -61,7 +62,8 @@ struct emac_variant {
* @rst_ephy: reference to the optional EPHY reset for the internal PHY
* @variant: reference to the current board variant
* @regmap: regmap for using the syscon
- * @use_internal_phy: Does the current PHY choice imply using the internal PHY
+ * @internal_phy_powered: Does the internal PHY is enabled
+ * @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
struct clk *tx_clk;
@@ -70,12 +72,13 @@ struct sunxi_priv_data {
struct reset_control *rst_ephy;
const struct emac_variant *variant;
struct regmap *regmap;
- bool use_internal_phy;
+ bool internal_phy_powered;
+ void *mux_handle;
};
static const struct emac_variant emac_variant_h3 = {
.default_syscon_value = 0x58000,
- .internal_phy = PHY_INTERFACE_MODE_MII,
+ .soc_has_internal_phy = true,
.support_mii = true,
.support_rmii = true,
.support_rgmii = true
@@ -83,20 +86,20 @@ static const struct emac_variant emac_variant_h3 = {
static const struct emac_variant emac_variant_v3s = {
.default_syscon_value = 0x38000,
- .internal_phy = PHY_INTERFACE_MODE_MII,
+ .soc_has_internal_phy = true,
.support_mii = true
};
static const struct emac_variant emac_variant_a83t = {
.default_syscon_value = 0,
- .internal_phy = 0,
+ .soc_has_internal_phy = false,
.support_mii = true,
.support_rgmii = true
};
static const struct emac_variant emac_variant_a64 = {
.default_syscon_value = 0,
- .internal_phy = 0,
+ .soc_has_internal_phy = false,
.support_mii = true,
.support_rmii = true,
.support_rgmii = true
@@ -195,6 +198,9 @@ static const struct emac_variant emac_variant_a64 = {
#define H3_EPHY_LED_POL BIT(17) /* 1: active low, 0: active high */
#define H3_EPHY_SHUTDOWN BIT(16) /* 1: shutdown, 0: power up */
#define H3_EPHY_SELECT BIT(15) /* 1: internal PHY, 0: external PHY */
+#define H3_EPHY_MUX_MASK (H3_EPHY_SHUTDOWN | H3_EPHY_SELECT)
+#define DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID 1
+#define DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID 2
/* H3/A64 specific bits */
#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
@@ -634,6 +640,159 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
return 0;
}
+/* Search in mdio-mux node for internal PHY node and get its clk/reset */
+static int get_ephy_nodes(struct stmmac_priv *priv)
+{
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ struct device_node *mdio_mux, *iphynode;
+ struct device_node *mdio_internal;
+ int ret;
+
+ mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux");
+ if (!mdio_mux) {
+ dev_err(priv->device, "Cannot get mdio-mux node\n");
+ return -ENODEV;
+ }
+
+ mdio_internal = of_find_compatible_node(mdio_mux, NULL,
+ "allwinner,sun8i-h3-mdio-internal");
+ if (!mdio_internal) {
+ dev_err(priv->device, "Cannot get internal_mdio node\n");
+ return -ENODEV;
+ }
+
+ /* Seek for internal PHY */
+ for_each_child_of_node(mdio_internal, iphynode) {
+ gmac->ephy_clk = of_clk_get(iphynode, 0);
+ if (IS_ERR(gmac->ephy_clk))
+ continue;
+ gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL);
+ if (IS_ERR(gmac->rst_ephy)) {
+ ret = PTR_ERR(gmac->rst_ephy);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ continue;
+ }
+ dev_info(priv->device, "Found internal PHY node\n");
+ return 0;
+ }
+ return -ENODEV;
+}
+
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
+{
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ int ret;
+
+ if (gmac->internal_phy_powered) {
+ dev_warn(priv->device, "Internal PHY already powered\n");
+ return 0;
+ }
+
+ dev_info(priv->device, "Powering internal PHY\n");
+ ret = clk_prepare_enable(gmac->ephy_clk);
+ if (ret) {
+ dev_err(priv->device, "Cannot enable internal PHY\n");
+ return ret;
+ }
+
+ /* Make sure the EPHY is properly reseted, as U-Boot may leave
+ * it at deasserted state, and thus it may fail to reset EMAC.
+ */
+ reset_control_assert(gmac->rst_ephy);
+
+ ret = reset_control_deassert(gmac->rst_ephy);
+ if (ret) {
+ dev_err(priv->device, "Cannot deassert internal phy\n");
+ clk_disable_unprepare(gmac->ephy_clk);
+ return ret;
+ }
+
+ gmac->internal_phy_powered = true;
+
+ return 0;
+}
+
+static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
+{
+ if (!gmac->internal_phy_powered)
+ return 0;
+
+ clk_disable_unprepare(gmac->ephy_clk);
+ reset_control_assert(gmac->rst_ephy);
+ gmac->internal_phy_powered = false;
+ return 0;
+}
+
+/* MDIO multiplexing switch function
+ * This function is called by the mdio-mux layer when it thinks the mdio bus
+ * multiplexer needs to switch.
+ * 'current_child' is the current value of the mux register
+ * 'desired_child' is the value of the 'reg' property of the target child MDIO
+ * node.
+ * The first time this function is called, current_child == -1.
+ * If current_child == desired_child, then the mux is already set to the
+ * correct bus.
+ */
+static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct stmmac_priv *priv = data;
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ u32 reg, val;
+ int ret = 0;
+ bool need_power_ephy = false;
+
+ if (current_child ^ desired_child) {
+ regmap_read(gmac->regmap, SYSCON_EMAC_REG, &reg);
+ switch (desired_child) {
+ case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
+ dev_info(priv->device, "Switch mux to internal PHY");
+ val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
+
+ need_power_ephy = true;
+ break;
+ case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
+ dev_info(priv->device, "Switch mux to external PHY");
+ val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
+ need_power_ephy = false;
+ break;
+ default:
+ dev_err(priv->device, "Invalid child ID %x\n",
+ desired_child);
+ return -EINVAL;
+ }
+ regmap_write(gmac->regmap, SYSCON_EMAC_REG, val);
+ if (need_power_ephy) {
+ ret = sun8i_dwmac_power_internal_phy(priv);
+ if (ret)
+ return ret;
+ } else {
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ }
+ /* After changing syscon value, the MAC need reset or it will
+ * use the last value (and so the last PHY set).
+ */
+ ret = sun8i_dwmac_reset(priv);
+ }
+ return ret;
+}
+
+static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
+{
+ int ret;
+ struct device_node *mdio_mux;
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux");
+ if (!mdio_mux)
+ return -ENODEV;
+
+ ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn,
+ &gmac->mux_handle, priv, priv->mii);
+ return ret;
+}
+
static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
{
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
@@ -648,35 +807,25 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
"Current syscon value is not the default %x (expect %x)\n",
val, reg);
- if (gmac->variant->internal_phy) {
- if (!gmac->use_internal_phy) {
- /* switch to external PHY interface */
- reg &= ~H3_EPHY_SELECT;
- } else {
- reg |= H3_EPHY_SELECT;
- reg &= ~H3_EPHY_SHUTDOWN;
- dev_dbg(priv->device, "Select internal_phy %x\n", reg);
-
- if (of_property_read_bool(priv->plat->phy_node,
- "allwinner,leds-active-low"))
- reg |= H3_EPHY_LED_POL;
- else
- reg &= ~H3_EPHY_LED_POL;
-
- /* Force EPHY xtal frequency to 24MHz. */
- reg |= H3_EPHY_CLK_SEL;
-
- ret = of_mdio_parse_addr(priv->device,
- priv->plat->phy_node);
- if (ret < 0) {
- dev_err(priv->device, "Could not parse MDIO addr\n");
- return ret;
- }
- /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
- * address. No need to mask it again.
- */
- reg |= ret << H3_EPHY_ADDR_SHIFT;
+ if (gmac->variant->soc_has_internal_phy) {
+ if (of_property_read_bool(priv->plat->phy_node,
+ "allwinner,leds-active-low"))
+ reg |= H3_EPHY_LED_POL;
+ else
+ reg &= ~H3_EPHY_LED_POL;
+
+ /* Force EPHY xtal frequency to 24MHz. */
+ reg |= H3_EPHY_CLK_SEL;
+
+ ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
+ if (ret < 0) {
+ dev_err(priv->device, "Could not parse MDIO addr\n");
+ return ret;
}
+ /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
+ * address. No need to mask it again.
+ */
+ reg |= 1 << H3_EPHY_ADDR_SHIFT;
}
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
@@ -746,81 +895,21 @@ static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg);
}
-static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
+static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
{
- struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- int ret;
-
- if (!gmac->use_internal_phy)
- return 0;
-
- ret = clk_prepare_enable(gmac->ephy_clk);
- if (ret) {
- dev_err(priv->device, "Cannot enable ephy\n");
- return ret;
- }
-
- /* Make sure the EPHY is properly reseted, as U-Boot may leave
- * it at deasserted state, and thus it may fail to reset EMAC.
- */
- reset_control_assert(gmac->rst_ephy);
+ struct sunxi_priv_data *gmac = priv;
- ret = reset_control_deassert(gmac->rst_ephy);
- if (ret) {
- dev_err(priv->device, "Cannot deassert ephy\n");
- clk_disable_unprepare(gmac->ephy_clk);
- return ret;
+ if (gmac->variant->soc_has_internal_phy) {
+ /* sun8i_dwmac_exit could be called with mdiomux uninit */
+ if (gmac->mux_handle)
+ mdio_mux_uninit(gmac->mux_handle);
+ if (gmac->internal_phy_powered)
+ sun8i_dwmac_unpower_internal_phy(gmac);
}
- return 0;
-}
-
-static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
-{
- if (!gmac->use_internal_phy)
- return 0;
-
- clk_disable_unprepare(gmac->ephy_clk);
- reset_control_assert(gmac->rst_ephy);
- return 0;
-}
-
-/* sun8i_power_phy() - Activate the PHY:
- * In case of error, no need to call sun8i_unpower_phy(),
- * it will be called anyway by sun8i_dwmac_exit()
- */
-static int sun8i_power_phy(struct stmmac_priv *priv)
-{
- int ret;
-
- ret = sun8i_dwmac_power_internal_phy(priv);
- if (ret)
- return ret;
-
- ret = sun8i_dwmac_set_syscon(priv);
- if (ret)
- return ret;
-
- /* After changing syscon value, the MAC need reset or it will use
- * the last value (and so the last PHY set.
- */
- ret = sun8i_dwmac_reset(priv);
- if (ret)
- return ret;
- return 0;
-}
-
-static void sun8i_unpower_phy(struct sunxi_priv_data *gmac)
-{
sun8i_dwmac_unset_syscon(gmac);
- sun8i_dwmac_unpower_internal_phy(gmac);
-}
-
-static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
-{
- struct sunxi_priv_data *gmac = priv;
- sun8i_unpower_phy(gmac);
+ reset_control_put(gmac->rst_ephy);
clk_disable_unprepare(gmac->tx_clk);
@@ -849,7 +938,7 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
if (!mac)
return NULL;
- ret = sun8i_power_phy(priv);
+ ret = sun8i_dwmac_set_syscon(priv);
if (ret)
return NULL;
@@ -889,6 +978,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
int ret;
+ struct stmmac_priv *priv;
+ struct net_device *ndev;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -932,29 +1023,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
}
plat_dat->interface = of_get_phy_mode(dev->of_node);
- if (plat_dat->interface == gmac->variant->internal_phy) {
- dev_info(&pdev->dev, "Will use internal PHY\n");
- gmac->use_internal_phy = true;
- gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0);
- if (IS_ERR(gmac->ephy_clk)) {
- ret = PTR_ERR(gmac->ephy_clk);
- dev_err(&pdev->dev, "Cannot get EPHY clock: %d\n", ret);
- return -EINVAL;
- }
-
- gmac->rst_ephy = of_reset_control_get(plat_dat->phy_node, NULL);
- if (IS_ERR(gmac->rst_ephy)) {
- ret = PTR_ERR(gmac->rst_ephy);
- if (ret == -EPROBE_DEFER)
- return ret;
- dev_err(&pdev->dev, "No EPHY reset control found %d\n",
- ret);
- return -EINVAL;
- }
- } else {
- dev_info(&pdev->dev, "Will use external PHY\n");
- gmac->use_internal_phy = false;
- }
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
@@ -973,12 +1041,45 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
+ goto dwmac_exit;
+
+ ndev = dev_get_drvdata(&pdev->dev);
+ priv = netdev_priv(ndev);
+ /* The mux must be registered after parent MDIO
+ * so after stmmac_dvr_probe()
+ */
+ if (gmac->variant->soc_has_internal_phy) {
+ ret = get_ephy_nodes(priv);
+ if (ret)
+ goto dwmac_exit;
+ ret = sun8i_dwmac_register_mdio_mux(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register mux\n");
+ goto dwmac_mux;
+ }
+ } else {
+ ret = sun8i_dwmac_reset(priv);
+ if (ret)
+ goto dwmac_exit;
+ }
return ret;
+dwmac_mux:
+ sun8i_dwmac_unset_syscon(gmac);
+dwmac_exit:
+ sun8i_dwmac_exit(pdev, plat_dat->bsp_priv);
+return ret;
}
static const struct of_device_id sun8i_dwmac_match[] = {
+ { .compatible = "allwinner,sun8i-h3-emac",
+ .data = &emac_variant_h3 },
+ { .compatible = "allwinner,sun8i-v3s-emac",
+ .data = &emac_variant_v3s },
+ { .compatible = "allwinner,sun8i-a83t-emac",
+ .data = &emac_variant_a83t },
+ { .compatible = "allwinner,sun50i-a64-emac",
+ .data = &emac_variant_a64 },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index d74cedf2a397..789dad8a07b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -98,7 +98,7 @@
#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
GMAC_INT_PCS_ANE)
-#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN
+#define GMAC_INT_DEFAULT_MASK (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
enum dwmac4_irq_status {
time_stamp_irq = 0x00001000,
@@ -106,6 +106,7 @@ enum dwmac4_irq_status {
mmc_tx_irq = 0x00000400,
mmc_rx_irq = 0x00000200,
mmc_irq = 0x00000100,
+ lpi_irq = 0x00000020,
pmt_irq = 0x00000010,
};
@@ -132,6 +133,10 @@ enum power_event {
#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
+#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
+#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
+#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
@@ -225,6 +230,8 @@ enum power_event {
#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
#define MTL_OP_MODE_RSF BIT(5)
+#define MTL_OP_MODE_TXQEN_MASK GENMASK(3, 2)
+#define MTL_OP_MODE_TXQEN_AV BIT(2)
#define MTL_OP_MODE_TXQEN BIT(3)
#define MTL_OP_MODE_TSF BIT(1)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 2f7d7ec59962..f3ed8f7853eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -580,6 +580,25 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
x->irq_receive_pmt_irq_n++;
}
+ /* MAC tx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & lpi_irq) {
+ /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
+ u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
+ ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
+ x->irq_tx_path_in_lpi_mode_n++;
+ }
+ if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
+ ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
+ x->irq_tx_path_exit_lpi_mode_n++;
+ }
+ if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
+ x->irq_rx_path_in_lpi_mode_n++;
+ if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+
dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
if (intr_status & PCS_RGSMIIIS_IRQ)
dwmac4_phystatus(ioaddr, x);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index e84831e1b63b..c110f6850ffa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -191,7 +191,7 @@ static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
}
static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
- u32 channel, int fifosz)
+ u32 channel, int fifosz, u8 qmode)
{
unsigned int rqs = fifosz / 256 - 1;
u32 mtl_rx_op, mtl_rx_int;
@@ -218,8 +218,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
- /* enable flow control only if each channel gets 4 KiB or more FIFO */
- if (fifosz >= 4096) {
+ /* Enable flow control only if each channel gets 4 KiB or more FIFO and
+ * only if channel is not an AVB channel.
+ */
+ if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
unsigned int rfd, rfa;
mtl_rx_op |= MTL_OP_MODE_EHFC;
@@ -271,9 +273,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
}
static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
- u32 channel)
+ u32 channel, int fifosz, u8 qmode)
{
u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+ unsigned int tqs = fifosz / 256 - 1;
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable TX store and forward mode\n");
@@ -306,12 +309,18 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
* For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
* with reset values: TXQEN off, TQS 256 bytes.
*
- * Write the bits in both cases, since it will have no effect when RO.
- * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
- * be RO, however, writing the whole TQS field will result in a value
- * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
+ * TXQEN must be written for multi-channel operation and TQS must
+ * reflect the available fifo size per queue (total fifo size / number
+ * of enabled queues).
*/
- mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
+ mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
+ if (qmode != MTL_QUEUE_AVB)
+ mtl_tx_op |= MTL_OP_MODE_TXQEN;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
+ mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
+ mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
+
writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 16bd50929084..f63c2ddced3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -345,9 +345,9 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
* if there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
*/
-static void stmmac_eee_ctrl_timer(unsigned long arg)
+static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+ struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
@@ -401,9 +401,8 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
- setup_timer(&priv->eee_ctrl_timer,
- stmmac_eee_ctrl_timer,
- (unsigned long)priv);
+ timer_setup(&priv->eee_ctrl_timer,
+ stmmac_eee_ctrl_timer, 0);
mod_timer(&priv->eee_ctrl_timer,
STMMAC_LPI_T(eee_timer));
@@ -1749,12 +1748,20 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
u32 rx_channels_count = priv->plat->rx_queues_to_use;
u32 tx_channels_count = priv->plat->tx_queues_to_use;
int rxfifosz = priv->plat->rx_fifo_size;
+ int txfifosz = priv->plat->tx_fifo_size;
u32 txmode = 0;
u32 rxmode = 0;
u32 chan = 0;
+ u8 qmode = 0;
if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ /* Adjust for real per queue fifo size */
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
if (priv->plat->force_thresh_dma_mode) {
txmode = tc;
@@ -1777,12 +1784,19 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/* configure all channels */
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
- for (chan = 0; chan < rx_channels_count; chan++)
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+
priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
- rxfifosz);
+ rxfifosz, qmode);
+ }
- for (chan = 0; chan < tx_channels_count; chan++)
- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+
+ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
+ txfifosz, qmode);
+ }
} else {
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz);
@@ -1946,15 +1960,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
u32 rxmode, u32 chan)
{
+ u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+ u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
int rxfifosz = priv->plat->rx_fifo_size;
+ int txfifosz = priv->plat->tx_fifo_size;
if (rxfifosz == 0)
rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ /* Adjust for real per queue fifo size */
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
if (priv->synopsys_id >= DWMAC_CORE_4_00) {
priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
- rxfifosz);
- priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
+ rxfifosz, rxqmode);
+ priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
+ txfifosz, txqmode);
} else {
priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
rxfifosz);
@@ -2194,9 +2220,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
*/
-static void stmmac_tx_timer(unsigned long data)
+static void stmmac_tx_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)data;
+ struct stmmac_priv *priv = from_timer(priv, t, txtimer);
u32 tx_queues_count = priv->plat->tx_queues_to_use;
u32 queue;
@@ -2217,10 +2243,8 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
{
priv->tx_coal_frames = STMMAC_TX_FRAMES;
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
- init_timer(&priv->txtimer);
+ timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
- priv->txtimer.data = (unsigned long)priv;
- priv->txtimer.function = stmmac_tx_timer;
add_timer(&priv->txtimer);
}
@@ -3724,6 +3748,20 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return ret;
}
+static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = eth_mac_addr(ndev, addr);
+ if (ret)
+ return ret;
+
+ priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
+
+ return ret;
+}
+
#ifdef CONFIG_DEBUG_FS
static struct dentry *stmmac_fs_dir;
@@ -3951,7 +3989,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = stmmac_set_mac_address,
};
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6383695004a5..05f122b8424a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -168,8 +168,8 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing RX queues common config */
- if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
- &plat->rx_queues_to_use))
+ if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
+ &plat->rx_queues_to_use))
plat->rx_queues_to_use = 1;
if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
@@ -191,8 +191,8 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
else
plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
- if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
- &plat->rx_queues_cfg[queue].chan))
+ if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
+ &plat->rx_queues_cfg[queue].chan))
plat->rx_queues_cfg[queue].chan = queue;
/* TODO: Dynamic mapping to be included in the future */
@@ -222,8 +222,8 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
}
/* Processing TX queues common config */
- if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
- &plat->tx_queues_to_use))
+ if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
+ &plat->tx_queues_to_use))
plat->tx_queues_to_use = 1;
if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
@@ -244,8 +244,8 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
if (queue >= plat->tx_queues_to_use)
break;
- if (of_property_read_u8(q_node, "snps,weight",
- &plat->tx_queues_cfg[queue].weight))
+ if (of_property_read_u32(q_node, "snps,weight",
+ &plat->tx_queues_cfg[queue].weight))
plat->tx_queues_cfg[queue].weight = 0x10 + queue;
if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
@@ -318,10 +318,6 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
bool mdio = true;
static const struct of_device_id need_mdio_ids[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10" },
- { .compatible = "allwinner,sun8i-a83t-emac" },
- { .compatible = "allwinner,sun8i-h3-emac" },
- { .compatible = "allwinner,sun8i-v3s-emac" },
- { .compatible = "allwinner,sun50i-a64-emac" },
{},
};
diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile
index 37855438b3cb..9a5249dee56e 100644
--- a/drivers/net/ethernet/sun/Makefile
+++ b/drivers/net/ethernet/sun/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Sun network device drivers.
#
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 382993c1561c..113bd57e2ea0 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4079,9 +4079,9 @@ done:
#endif
}
-static void cas_link_timer(unsigned long data)
+static void cas_link_timer(struct timer_list *t)
{
- struct cas *cp = (struct cas *) data;
+ struct cas *cp = from_timer(cp, t, link_timer);
int mask, pending = 0, reset = 0;
unsigned long flags;
@@ -5039,9 +5039,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
mutex_init(&cp->pm_mutex);
- init_timer(&cp->link_timer);
- cp->link_timer.function = cas_link_timer;
- cp->link_timer.data = (unsigned long) cp;
+ timer_setup(&cp->link_timer, cas_link_timer, 0);
#if 1
/* Just in case the implementation of atomic operations
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 5b56c24b6ed2..5ea037672e6f 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -307,7 +307,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
/* Get (or create) the vnet associated with this port */
vp = vsw_get_vnet(hp, vdev->mp, &handle);
- if (unlikely(IS_ERR(vp))) {
+ if (IS_ERR(vp)) {
err = PTR_ERR(vp);
pr_err("Failed to get vnet for vsw-port\n");
mdesc_release(hp);
@@ -363,8 +363,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
list_add_rcu(&port->list, &vp->port_list);
spin_unlock_irqrestore(&vp->lock, flags);
- setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common,
- (unsigned long)port);
+ timer_setup(&port->clean_timer, sunvnet_clean_timer_expire_common, 0);
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 6a4e8e1bbd90..06001bacbe0f 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -2221,9 +2221,9 @@ static int niu_link_status(struct niu *np, int *link_up_p)
return err;
}
-static void niu_timer(unsigned long __opaque)
+static void niu_timer(struct timer_list *t)
{
- struct niu *np = (struct niu *) __opaque;
+ struct niu *np = from_timer(np, t, timer);
unsigned long off;
int err, link_up;
@@ -6123,10 +6123,8 @@ static int niu_open(struct net_device *dev)
err = niu_init_hw(np);
if (!err) {
- init_timer(&np->timer);
+ timer_setup(&np->timer, niu_timer, 0);
np->timer.expires = jiffies + HZ;
- np->timer.data = (unsigned long) np;
- np->timer.function = niu_timer;
err = niu_enable_interrupts(np, 1);
if (err)
@@ -6245,7 +6243,7 @@ static void niu_get_rx_stats(struct niu *np,
pkts = dropped = errors = bytes = 0;
- rx_rings = ACCESS_ONCE(np->rx_rings);
+ rx_rings = READ_ONCE(np->rx_rings);
if (!rx_rings)
goto no_rings;
@@ -6276,7 +6274,7 @@ static void niu_get_tx_stats(struct niu *np,
pkts = errors = bytes = 0;
- tx_rings = ACCESS_ONCE(np->tx_rings);
+ tx_rings = READ_ONCE(np->tx_rings);
if (!tx_rings)
goto no_rings;
@@ -6775,10 +6773,8 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
err = niu_init_hw(np);
if (!err) {
- init_timer(&np->timer);
+ timer_setup(&np->timer, niu_timer, 0);
np->timer.expires = jiffies + HZ;
- np->timer.data = (unsigned long) np;
- np->timer.function = niu_timer;
err = niu_enable_interrupts(np, 1);
if (err)
diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h
index 51e177e1860d..04c215f91fc0 100644
--- a/drivers/net/ethernet/sun/niu.h
+++ b/drivers/net/ethernet/sun/niu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* niu.h: Definitions for Neptune ethernet driver.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 3189722110c2..0b1f41f6bceb 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -523,9 +523,9 @@ static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
return -1;
}
-static void bigmac_timer(unsigned long data)
+static void bigmac_timer(struct timer_list *t)
{
- struct bigmac *bp = (struct bigmac *) data;
+ struct bigmac *bp = from_timer(bp, t, bigmac_timer);
void __iomem *tregs = bp->tregs;
int restart_timer = 0;
@@ -613,8 +613,6 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp)
bp->timer_state = ltrywait;
bp->timer_ticks = 0;
bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
- bp->bigmac_timer.data = (unsigned long) bp;
- bp->bigmac_timer.function = bigmac_timer;
add_timer(&bp->bigmac_timer);
}
@@ -921,7 +919,7 @@ static int bigmac_open(struct net_device *dev)
printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
return ret;
}
- init_timer(&bp->bigmac_timer);
+ timer_setup(&bp->bigmac_timer, bigmac_timer, 0);
ret = bigmac_init_hw(bp, 0);
if (ret)
free_irq(dev->irq, bp);
@@ -1172,7 +1170,7 @@ static int bigmac_ether_init(struct platform_device *op,
"board-version", 1);
/* Init auto-negotiation timer state. */
- init_timer(&bp->bigmac_timer);
+ timer_setup(&bp->bigmac_timer, bigmac_timer, 0);
bp->timer_state = asleep;
bp->timer_ticks = 0;
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
index ee56930475a8..d379bd407eca 100644
--- a/drivers/net/ethernet/sun/sunbmac.h
+++ b/drivers/net/ethernet/sun/sunbmac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: sunbmac.h,v 1.7 2000/07/11 22:35:22 davem Exp $
* sunbmac.h: Defines for the Sun "Big MAC" 100baseT ethernet cards.
*
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index fa607d062cb3..a7afcee3c5ae 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1496,9 +1496,9 @@ static int gem_mdio_link_not_up(struct gem *gp)
}
}
-static void gem_link_timer(unsigned long data)
+static void gem_link_timer(struct timer_list *t)
{
- struct gem *gp = (struct gem *) data;
+ struct gem *gp = from_timer(gp, t, link_timer);
struct net_device *dev = gp->dev;
int restart_aneg = 0;
@@ -2910,9 +2910,7 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
gp->msg_enable = DEFAULT_MSG;
- init_timer(&gp->link_timer);
- gp->link_timer.function = gem_link_timer;
- gp->link_timer.data = (unsigned long) gp;
+ timer_setup(&gp->link_timer, gem_link_timer, 0);
INIT_WORK(&gp->reset_task, gem_reset_task);
diff --git a/drivers/net/ethernet/sun/sungem.h b/drivers/net/ethernet/sun/sungem.h
index 835ce1b3cb9f..626302a9bc89 100644
--- a/drivers/net/ethernet/sun/sungem.h
+++ b/drivers/net/ethernet/sun/sungem.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: sungem.h,v 1.10.2.4 2002/03/11 08:54:48 davem Exp $
* sungem.h: Definitions for Sun GEM ethernet driver.
*
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 9e983e1d8249..0431f1e5f511 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -685,9 +685,9 @@ static int is_lucent_phy(struct happy_meal *hp)
return ret;
}
-static void happy_meal_timer(unsigned long data)
+static void happy_meal_timer(struct timer_list *t)
{
- struct happy_meal *hp = (struct happy_meal *) data;
+ struct happy_meal *hp = from_timer(hp, t, happy_timer);
void __iomem *tregs = hp->tcvregs;
int restart_timer = 0;
@@ -1413,8 +1413,6 @@ force_link:
hp->timer_ticks = 0;
hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
- hp->happy_timer.data = (unsigned long) hp;
- hp->happy_timer.function = happy_meal_timer;
add_timer(&hp->happy_timer);
}
@@ -2819,7 +2817,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
hp->timer_state = asleep;
hp->timer_ticks = 0;
- init_timer(&hp->happy_timer);
+ timer_setup(&hp->happy_timer, happy_meal_timer, 0);
hp->dev = dev;
dev->netdev_ops = &hme_netdev_ops;
@@ -3133,7 +3131,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hp->timer_state = asleep;
hp->timer_ticks = 0;
- init_timer(&hp->happy_timer);
+ timer_setup(&hp->happy_timer, happy_meal_timer, 0);
hp->irq = pdev->irq;
hp->dev = dev;
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index fca1bca7f69d..9118c60c9426 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: sunhme.h,v 1.33 2001/08/03 06:23:04 davem Exp $
* sunhme.h: Definitions for Sparc HME/BigMac 10/100baseT ethernet driver.
* Also known as the "Happy Meal".
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
index ae190b77431b..0daed05b7c83 100644
--- a/drivers/net/ethernet/sun/sunqe.h
+++ b/drivers/net/ethernet/sun/sunqe.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $
* sunqe.h: Definitions for the Sun QuadEthernet driver.
*
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 0b95105f7060..27fb22638885 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -492,8 +492,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
pr_info("%s: PORT ( remote-mac %pM%s )\n",
vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
- setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common,
- (unsigned long)port);
+ timer_setup(&port->clean_timer, sunvnet_clean_timer_expire_common, 0);
napi_enable(&port->napi);
vio_port_up(&port->vio);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index ecf456c7b6d1..8aa3ce46bb81 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1040,9 +1040,9 @@ static inline void vnet_free_skbs(struct sk_buff *skb)
}
}
-void sunvnet_clean_timer_expire_common(unsigned long port0)
+void sunvnet_clean_timer_expire_common(struct timer_list *t)
{
- struct vnet_port *port = (struct vnet_port *)port0;
+ struct vnet_port *port = from_timer(port, t, clean_timer);
struct sk_buff *freeskbs;
unsigned pending;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
index b20d6fa7ef25..1ea0b016580a 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.h
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SUNVNETCOMMON_H
#define _SUNVNETCOMMON_H
@@ -129,7 +130,7 @@ struct vnet {
((__port)->vsw ? (__port)->dev : (__port)->vp->dev)
/* Common funcs */
-void sunvnet_clean_timer_expire_common(unsigned long port0);
+void sunvnet_clean_timer_expire_common(struct timer_list *t);
int sunvnet_open_common(struct net_device *dev);
int sunvnet_close_common(struct net_device *dev);
void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
index 0ad01916f11e..7a46393abf26 100644
--- a/drivers/net/ethernet/synopsys/Makefile
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Synopsys network device drivers.
#
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
index e9672b1f9968..031cf9c3435a 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -335,7 +335,7 @@ static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
dma_addr_t pages_dma;
/* Try to obtain pages, decreasing order if necessary */
- gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN;
while (order >= 0) {
pages = alloc_pages(gfp, order);
if (pages)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index e1b55b8fb8e0..1f8e9601592a 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -358,9 +358,9 @@ static irqreturn_t xlgmac_dma_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static void xlgmac_tx_timer(unsigned long data)
+static void xlgmac_tx_timer(struct timer_list *t)
{
- struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+ struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
struct xlgmac_pdata *pdata = channel->pdata;
struct napi_struct *napi;
@@ -391,8 +391,7 @@ static void xlgmac_init_timers(struct xlgmac_pdata *pdata)
if (!channel->tx_ring)
break;
- setup_timer(&channel->tx_timer, xlgmac_tx_timer,
- (unsigned long)channel);
+ timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
}
}
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 10e6b0ce51ba..0be551de821c 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the TI network device drivers.
#
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index db8a4bcfc6c7..a73600dceb8b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -119,8 +119,8 @@ do { \
#define CPDMA_RXCP 0x60
#define CPSW_POLL_WEIGHT 64
-#define CPSW_MIN_PACKET_SIZE 60
-#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
+#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
+#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
#define RX_PRIORITY_MAPPING 0x76543210
#define TX_PRIORITY_MAPPING 0x33221100
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index ddd43e09111e..b432a75fb874 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -765,9 +765,9 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
}
EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
-static void cpsw_ale_timer(unsigned long arg)
+static void cpsw_ale_timer(struct timer_list *t)
{
- struct cpsw_ale *ale = (struct cpsw_ale *)arg;
+ struct cpsw_ale *ale = from_timer(ale, t, timer);
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
@@ -859,9 +859,7 @@ void cpsw_ale_start(struct cpsw_ale *ale)
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
- init_timer(&ale->timer);
- ale->timer.data = (unsigned long)ale;
- ale->timer.function = cpsw_ale_timer;
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
if (ale->ageout) {
ale->timer.expires = jiffies + ale->ageout;
add_timer(&ale->timer);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 437d36289786..ed58c746e4af 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -906,7 +906,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
sw_data[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
- page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC | GFP_DMA);
if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail;
@@ -1887,7 +1887,7 @@ static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
/* setup tc must be called under rtnl lock */
ASSERT_RTNL();
- if (type != TC_SETUP_MQPRIO)
+ if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 28cb38af1a34..e831c49713ee 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -2745,9 +2745,9 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
return -EOPNOTSUPP;
}
-static void netcp_ethss_timer(unsigned long arg)
+static void netcp_ethss_timer(struct timer_list *t)
{
- struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
+ struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
struct gbe_intf *gbe_intf;
struct gbe_slave *slave;
@@ -3616,9 +3616,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
}
spin_unlock_bh(&gbe_dev->hw_stats_lock);
- init_timer(&gbe_dev->timer);
- gbe_dev->timer.data = (unsigned long)gbe_dev;
- gbe_dev->timer.function = netcp_ethss_timer;
+ timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
add_timer(&gbe_dev->timer);
*inst_priv = gbe_dev;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index c8d53d8c83ee..5a4e78fde530 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -172,7 +172,8 @@ static u32 tlan_handle_tx_eoc(struct net_device *, u16);
static u32 tlan_handle_status_check(struct net_device *, u16);
static u32 tlan_handle_rx_eoc(struct net_device *, u16);
-static void tlan_timer(unsigned long);
+static void tlan_timer(struct timer_list *t);
+static void tlan_phy_monitor(struct timer_list *t);
static void tlan_reset_lists(struct net_device *);
static void tlan_free_lists(struct net_device *);
@@ -190,7 +191,6 @@ static void tlan_phy_power_up(struct net_device *);
static void tlan_phy_reset(struct net_device *);
static void tlan_phy_start_link(struct net_device *);
static void tlan_phy_finish_auto_neg(struct net_device *);
-static void tlan_phy_monitor(unsigned long);
/*
static int tlan_phy_nop(struct net_device *);
@@ -258,7 +258,6 @@ tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
- priv->timer.data = (unsigned long) dev;
priv->timer_set_at = jiffies;
priv->timer_type = type;
mod_timer(&priv->timer, jiffies + ticks);
@@ -926,8 +925,8 @@ static int tlan_open(struct net_device *dev)
return err;
}
- init_timer(&priv->timer);
- init_timer(&priv->media_timer);
+ timer_setup(&priv->timer, NULL, 0);
+ timer_setup(&priv->media_timer, tlan_phy_monitor, 0);
tlan_start(dev);
@@ -1427,7 +1426,6 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
priv->timer.function = tlan_timer;
- priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
@@ -1579,7 +1577,6 @@ drop_and_reuse:
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
priv->timer.function = tlan_timer;
- priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
@@ -1836,10 +1833,10 @@ ThunderLAN driver timer function
*
**************************************************************/
-static void tlan_timer(unsigned long data)
+static void tlan_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct tlan_priv *priv = netdev_priv(dev);
+ struct tlan_priv *priv = from_timer(priv, t, timer);
+ struct net_device *dev = priv->dev;
u32 elapsed;
unsigned long flags = 0;
@@ -1872,7 +1869,6 @@ static void tlan_timer(unsigned long data)
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK);
} else {
- priv->timer.function = tlan_timer;
priv->timer.expires = priv->timer_set_at
+ TLAN_TIMER_ACT_DELAY;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2317,8 +2313,6 @@ tlan_finish_reset(struct net_device *dev)
} else
netdev_info(dev, "Link active\n");
/* Enabling link beat monitoring */
- priv->media_timer.function = tlan_phy_monitor;
- priv->media_timer.data = (unsigned long) dev;
priv->media_timer.expires = jiffies + HZ;
add_timer(&priv->media_timer);
}
@@ -2763,10 +2757,10 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
*
*******************************************************************/
-static void tlan_phy_monitor(unsigned long data)
+static void tlan_phy_monitor(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) data;
- struct tlan_priv *priv = netdev_priv(dev);
+ struct tlan_priv *priv = from_timer(priv, t, media_timer);
+ struct net_device *dev = priv->dev;
u16 phy;
u16 phy_status;
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index 0ef9eefd3211..3d0ae1f07fc9 100644
--- a/drivers/net/ethernet/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the TILE on-chip networking support.
#
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index c00102b8145a..b3e5816a4678 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -40,7 +40,7 @@
#include <linux/tcp.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#include <linux/tick.h>
+#include <linux/sched/isolation.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -2270,8 +2270,8 @@ static int __init tile_net_init_module(void)
tile_net_dev_init(name, mac);
if (!network_cpus_init())
- cpumask_and(&network_cpus_map, housekeeping_cpumask(),
- cpu_online_mask);
+ cpumask_and(&network_cpus_map,
+ housekeeping_cpumask(HK_FLAG_MISC), cpu_online_mask);
return 0;
}
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 49ccee4b9aec..56d06282fbde 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -608,9 +608,9 @@ static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
* ISSUE: Maybe instead track number of expected completions, and free
* only that many, resetting to zero if "pending" is ever false.
*/
-static void tile_net_handle_egress_timer(unsigned long arg)
+static void tile_net_handle_egress_timer(struct timer_list *t)
{
- struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
+ struct tile_net_cpu *info = from_timer(info, t, egress_timer);
struct net_device *dev = info->napi.dev;
/* The timer is no longer scheduled. */
@@ -1004,9 +1004,8 @@ static void tile_net_register(void *dev_ptr)
BUG();
/* Initialize the egress timer. */
- init_timer_pinned(&info->egress_timer);
- info->egress_timer.data = (long)info;
- info->egress_timer.function = tile_net_handle_egress_timer;
+ timer_setup(&info->egress_timer, tile_net_handle_egress_timer,
+ TIMER_PINNED);
u64_stats_init(&info->stats.syncp);
diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile
index a5069008435b..f434fd0f429e 100644
--- a/drivers/net/ethernet/toshiba/Makefile
+++ b/drivers/net/ethernet/toshiba/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Toshiba network device drivers.
#
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index cec9e70ab995..d925b8203996 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -912,8 +912,9 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
* packets, including updating the queue tail pointer.
*/
static void
-spider_net_cleanup_tx_ring(struct spider_net_card *card)
+spider_net_cleanup_tx_ring(struct timer_list *t)
{
+ struct spider_net_card *card = from_timer(card, t, tx_timer);
if ((spider_net_release_tx_chain(card, 0) != 0) &&
(card->netdev->flags & IFF_UP)) {
spider_net_kick_tx_dma(card);
@@ -1265,7 +1266,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
- spider_net_cleanup_tx_ring(card);
+ spider_net_cleanup_tx_ring(&card->tx_timer);
/* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */
@@ -1977,9 +1978,9 @@ init_firmware_failed:
* @data: used for pointer to card structure
*
*/
-static void spider_net_link_phy(unsigned long data)
+static void spider_net_link_phy(struct timer_list *t)
{
- struct spider_net_card *card = (struct spider_net_card *)data;
+ struct spider_net_card *card = from_timer(card, t, aneg_timer);
struct mii_phy *phy = &card->phy;
/* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
@@ -2256,16 +2257,11 @@ spider_net_setup_netdev(struct spider_net_card *card)
pci_set_drvdata(card->pdev, netdev);
- init_timer(&card->tx_timer);
- card->tx_timer.function =
- (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
- card->tx_timer.data = (unsigned long) card;
+ timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
netdev->irq = card->pdev->irq;
card->aneg_count = 0;
- init_timer(&card->aneg_timer);
- card->aneg_timer.function = spider_net_link_phy;
- card->aneg_timer.data = (unsigned long) card;
+ timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
netif_napi_add(netdev, &card->napi,
spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index c2d15d9c0c33..0624b71ab5d4 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -164,7 +164,7 @@ static struct platform_driver tsi_eth_driver = {
},
};
-static void tsi108_timed_checker(unsigned long dev_ptr);
+static void tsi108_timed_checker(struct timer_list *t);
#ifdef DEBUG
static void dump_eth_one(struct net_device *dev)
@@ -1370,7 +1370,7 @@ static int tsi108_open(struct net_device *dev)
napi_enable(&data->napi);
- setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
+ timer_setup(&data->timer, tsi108_timed_checker, 0);
mod_timer(&data->timer, jiffies + 1);
tsi108_restart_rx(data, dev);
@@ -1666,10 +1666,10 @@ regs_fail:
* Thus, we have to do it using a timer.
*/
-static void tsi108_timed_checker(unsigned long dev_ptr)
+static void tsi108_timed_checker(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)dev_ptr;
- struct tsi108_prv_data *data = netdev_priv(dev);
+ struct tsi108_prv_data *data = from_timer(data, t, timer);
+ struct net_device *dev = data->dev;
tsi108_check_phy(dev);
tsi108_check_rxring(dev);
diff --git a/drivers/net/ethernet/xilinx/Makefile b/drivers/net/ethernet/xilinx/Makefile
index 214205e975e3..7d7dc1771423 100644
--- a/drivers/net/ethernet/xilinx/Makefile
+++ b/drivers/net/ethernet/xilinx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Xilink network device drivers.
#
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 7d06e3e1abac..107575225383 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef XILINX_LL_TEMAC_H
#define XILINX_LL_TEMAC_H
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 7714aff78b7d..f5e83ac6f7e2 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MDIO bus driver for the Xilinx TEMAC device
*
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 5ef626331f85..c337400485da 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for Xilinx Axi Ethernet device driver.
*
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 63307ea97846..16c3bfbe1992 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MDIO bus driver for the Xilinx Axi Ethernet device
*
diff --git a/drivers/net/fddi/skfp/Makefile b/drivers/net/fddi/skfp/Makefile
index a957a1c7e5ba..875eac8a76f3 100644
--- a/drivers/net/fddi/skfp/Makefile
+++ b/drivers/net/fddi/skfp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the SysKonnect FDDI PCI adapter driver
#
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index ed51018a813e..b718a02a6bb6 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1134,24 +1134,11 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
return t;
}
-static bool is_all_zero(const u8 *fp, size_t size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- if (fp[i])
- return false;
- return true;
-}
-
static bool is_tnl_info_zero(const struct ip_tunnel_info *info)
{
- if (info->key.tun_id || info->key.tun_flags || info->key.tos ||
- info->key.ttl || info->key.label || info->key.tp_src ||
- !is_all_zero((const u8 *)&info->key.u, sizeof(info->key.u)))
- return false;
- else
- return true;
+ return !(info->key.tun_id || info->key.tun_flags || info->key.tos ||
+ info->key.ttl || info->key.label || info->key.tp_src ||
+ memchr_inv(&info->key.u, 0, sizeof(info->key.u)));
}
static bool geneve_dst_addr_equal(struct ip_tunnel_info *a,
@@ -1350,21 +1337,33 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
info->key.tun_flags &= ~TUNNEL_CSUM;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
*use_udp6_rx_checksums = false;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
return 0;
@@ -1503,6 +1502,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
struct ip_tunnel_info *info = &geneve->info;
+ bool metadata = geneve->collect_md;
__u8 tmp_vni[3];
__u32 vni;
@@ -1511,32 +1511,24 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
goto nla_put_failure;
- if (rtnl_dereference(geneve->sock4)) {
+ if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
info->key.u.ipv4.dst))
goto nla_put_failure;
-
if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
!!(info->key.tun_flags & TUNNEL_CSUM)))
goto nla_put_failure;
- }
-
#if IS_ENABLED(CONFIG_IPV6)
- if (rtnl_dereference(geneve->sock6)) {
+ } else if (!metadata) {
if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
&info->key.u.ipv6.dst))
goto nla_put_failure;
-
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
!(info->key.tun_flags & TUNNEL_CSUM)))
goto nla_put_failure;
-
- if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
- !geneve->use_udp6_rx_checksums))
- goto nla_put_failure;
- }
#endif
+ }
if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
@@ -1546,10 +1538,15 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
goto nla_put_failure;
- if (geneve->collect_md) {
- if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
- goto nla_put_failure;
- }
+ if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
+ goto nla_put_failure;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
+ !geneve->use_udp6_rx_checksums))
+ goto nla_put_failure;
+#endif
+
return 0;
nla_put_failure:
@@ -1667,6 +1664,7 @@ static void __net_exit geneve_exit_net(struct net *net)
/* unregister the devices gathered above */
unregister_netdevice_many(&list);
rtnl_unlock();
+ WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static struct pernet_operations geneve_net_ops = {
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 021a8ec411ab..32f49c4ce457 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -35,7 +35,7 @@
#include <linux/tcp.h>
#include <linux/semaphore.h>
#include <linux/compat.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#define SIXPACK_VERSION "Revision: 0.3.0"
@@ -120,7 +120,7 @@ struct sixpack {
struct timer_list tx_t;
struct timer_list resync_t;
- atomic_t refcnt;
+ refcount_t refcnt;
struct semaphore dead_sem;
spinlock_t lock;
};
@@ -136,9 +136,9 @@ static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
* Note that in case of DAMA operation, the data is not sent here.
*/
-static void sp_xmit_on_air(unsigned long channel)
+static void sp_xmit_on_air(struct timer_list *t)
{
- struct sixpack *sp = (struct sixpack *) channel;
+ struct sixpack *sp = from_timer(sp, t, tx_t);
int actual, when = sp->slottime;
static unsigned char random;
@@ -229,7 +229,7 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
sp->xleft = count;
sp->xhead = sp->xbuff;
sp->status2 = count;
- sp_xmit_on_air((unsigned long)sp);
+ sp_xmit_on_air(&sp->tx_t);
}
return;
@@ -381,7 +381,7 @@ static struct sixpack *sp_get(struct tty_struct *tty)
read_lock(&disc_data_lock);
sp = tty->disc_data;
if (sp)
- atomic_inc(&sp->refcnt);
+ refcount_inc(&sp->refcnt);
read_unlock(&disc_data_lock);
return sp;
@@ -389,7 +389,7 @@ static struct sixpack *sp_get(struct tty_struct *tty)
static void sp_put(struct sixpack *sp)
{
- if (atomic_dec_and_test(&sp->refcnt))
+ if (refcount_dec_and_test(&sp->refcnt))
up(&sp->dead_sem);
}
@@ -500,9 +500,9 @@ static inline void tnc_set_sync_state(struct sixpack *sp, int new_tnc_state)
__tnc_set_sync_state(sp, new_tnc_state);
}
-static void resync_tnc(unsigned long channel)
+static void resync_tnc(struct timer_list *t)
{
- struct sixpack *sp = (struct sixpack *) channel;
+ struct sixpack *sp = from_timer(sp, t, resync_t);
static char resync_cmd = 0xe8;
/* clear any data that might have been received */
@@ -526,8 +526,6 @@ static void resync_tnc(unsigned long channel)
/* Start resync timer again -- the TNC might be still absent */
del_timer(&sp->resync_t);
- sp->resync_t.data = (unsigned long) sp;
- sp->resync_t.function = resync_tnc;
sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
add_timer(&sp->resync_t);
}
@@ -541,8 +539,6 @@ static inline int tnc_init(struct sixpack *sp)
sp->tty->ops->write(sp->tty, &inbyte, 1);
del_timer(&sp->resync_t);
- sp->resync_t.data = (unsigned long) sp;
- sp->resync_t.function = resync_tnc;
sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
add_timer(&sp->resync_t);
@@ -580,7 +576,7 @@ static int sixpack_open(struct tty_struct *tty)
sp->dev = dev;
spin_lock_init(&sp->lock);
- atomic_set(&sp->refcnt, 1);
+ refcount_set(&sp->refcnt, 1);
sema_init(&sp->dead_sem, 0);
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
@@ -623,11 +619,9 @@ static int sixpack_open(struct tty_struct *tty)
netif_start_queue(dev);
- init_timer(&sp->tx_t);
- sp->tx_t.function = sp_xmit_on_air;
- sp->tx_t.data = (unsigned long) sp;
+ timer_setup(&sp->tx_t, sp_xmit_on_air, 0);
- init_timer(&sp->resync_t);
+ timer_setup(&sp->resync_t, resync_tnc, 0);
spin_unlock_bh(&sp->lock);
@@ -676,7 +670,7 @@ static void sixpack_close(struct tty_struct *tty)
* We have now ensured that nobody can start using ap from now on, but
* we have to wait for all existing users to finish.
*/
- if (!atomic_dec_and_test(&sp->refcnt))
+ if (!refcount_dec_and_test(&sp->refcnt))
down(&sp->dead_sem);
/* We must stop the queue to avoid potentially scribbling
@@ -928,8 +922,6 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
if (sp->tnc_state == TNC_IN_SYNC) {
del_timer(&sp->resync_t);
- sp->resync_t.data = (unsigned long) sp;
- sp->resync_t.function = resync_tnc;
sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT;
add_timer(&sp->resync_t);
}
diff --git a/drivers/net/hamradio/Makefile b/drivers/net/hamradio/Makefile
index 104096070026..7a1518d763e3 100644
--- a/drivers/net/hamradio/Makefile
+++ b/drivers/net/hamradio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux AX.25 and HFMODEM device drivers.
#
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 1503f10122f7..1e62d00732f2 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -840,6 +840,7 @@ static int epp_open(struct net_device *dev)
unsigned char tmp[128];
unsigned char stat;
unsigned long tstart;
+ struct pardev_cb par_cb;
if (!pp) {
printk(KERN_ERR "%s: parport at 0x%lx unknown\n", bc_drvname, dev->base_addr);
@@ -859,8 +860,21 @@ static int epp_open(struct net_device *dev)
return -EIO;
}
memset(&bc->modem, 0, sizeof(bc->modem));
- bc->pdev = parport_register_device(pp, dev->name, NULL, epp_wakeup,
- NULL, PARPORT_DEV_EXCL, dev);
+ memset(&par_cb, 0, sizeof(par_cb));
+ par_cb.wakeup = epp_wakeup;
+ par_cb.private = (void *)dev;
+ par_cb.flags = PARPORT_DEV_EXCL;
+ for (i = 0; i < NR_PORTS; i++)
+ if (baycom_device[i] == dev)
+ break;
+
+ if (i == NR_PORTS) {
+ pr_err("%s: no device found\n", bc_drvname);
+ parport_put_port(pp);
+ return -ENODEV;
+ }
+
+ bc->pdev = parport_register_dev_model(pp, dev->name, &par_cb, i);
parport_put_port(pp);
if (!bc->pdev) {
printk(KERN_ERR "%s: cannot register parport at 0x%lx\n", bc_drvname, pp->base);
@@ -1185,6 +1199,23 @@ MODULE_LICENSE("GPL");
/* --------------------------------------------------------------------- */
+static int baycom_epp_par_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+ int len = strlen(drv->name);
+
+ if (strncmp(par_dev->name, drv->name, len))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver baycom_epp_par_driver = {
+ .name = "bce",
+ .probe = baycom_epp_par_probe,
+ .devmodel = true,
+};
+
static void __init baycom_epp_dev_setup(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
@@ -1204,10 +1235,15 @@ static void __init baycom_epp_dev_setup(struct net_device *dev)
static int __init init_baycomepp(void)
{
- int i, found = 0;
+ int i, found = 0, ret;
char set_hw = 1;
printk(bc_drvinfo);
+
+ ret = parport_register_driver(&baycom_epp_par_driver);
+ if (ret)
+ return ret;
+
/*
* register net devices
*/
@@ -1241,7 +1277,12 @@ static int __init init_baycomepp(void)
found++;
}
- return found ? 0 : -ENXIO;
+ if (found == 0) {
+ parport_unregister_driver(&baycom_epp_par_driver);
+ return -ENXIO;
+ }
+
+ return 0;
}
static void __exit cleanup_baycomepp(void)
@@ -1260,6 +1301,7 @@ static void __exit cleanup_baycomepp(void)
printk(paranoia_str, "cleanup_module");
}
}
+ parport_unregister_driver(&baycom_epp_par_driver);
}
module_init(init_baycomepp);
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index e1783832d304..1f7ceafd61ff 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -311,7 +311,9 @@ static void par96_wakeup(void *handle)
static int par96_open(struct net_device *dev)
{
struct baycom_state *bc = netdev_priv(dev);
+ struct pardev_cb par_cb;
struct parport *pp;
+ int i;
if (!dev || !bc)
return -ENXIO;
@@ -332,8 +334,21 @@ static int par96_open(struct net_device *dev)
}
memset(&bc->modem, 0, sizeof(bc->modem));
bc->hdrv.par.bitrate = 9600;
- bc->pdev = parport_register_device(pp, dev->name, NULL, par96_wakeup,
- par96_interrupt, PARPORT_DEV_EXCL, dev);
+ memset(&par_cb, 0, sizeof(par_cb));
+ par_cb.wakeup = par96_wakeup;
+ par_cb.irq_func = par96_interrupt;
+ par_cb.private = (void *)dev;
+ par_cb.flags = PARPORT_DEV_EXCL;
+ for (i = 0; i < NR_PORTS; i++)
+ if (baycom_device[i] == dev)
+ break;
+
+ if (i == NR_PORTS) {
+ pr_err("%s: no device found\n", bc_drvname);
+ parport_put_port(pp);
+ return -ENODEV;
+ }
+ bc->pdev = parport_register_dev_model(pp, dev->name, &par_cb, i);
parport_put_port(pp);
if (!bc->pdev) {
printk(KERN_ERR "baycom_par: cannot register parport at 0x%lx\n", dev->base_addr);
@@ -490,12 +505,34 @@ MODULE_LICENSE("GPL");
/* --------------------------------------------------------------------- */
+static int baycom_par_probe(struct pardevice *par_dev)
+{
+ struct device_driver *drv = par_dev->dev.driver;
+ int len = strlen(drv->name);
+
+ if (strncmp(par_dev->name, drv->name, len))
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct parport_driver baycom_par_driver = {
+ .name = "bcp",
+ .probe = baycom_par_probe,
+ .devmodel = true,
+};
+
static int __init init_baycompar(void)
{
- int i, found = 0;
+ int i, found = 0, ret;
char set_hw = 1;
printk(bc_drvinfo);
+
+ ret = parport_register_driver(&baycom_par_driver);
+ if (ret)
+ return ret;
+
/*
* register net devices
*/
@@ -524,8 +561,10 @@ static int __init init_baycompar(void)
baycom_device[i] = dev;
}
- if (!found)
+ if (!found) {
+ parport_unregister_driver(&baycom_par_driver);
return -ENXIO;
+ }
return 0;
}
@@ -539,6 +578,7 @@ static void __exit cleanup_baycompar(void)
if (dev)
hdlcdrv_unregister(dev);
}
+ parport_unregister_driver(&baycom_par_driver);
}
module_init(init_baycompar);
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index aec6c26563cf..c180b480f8ef 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -440,7 +440,6 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
ax_changedmtu(ax);
if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */
- len = ax->mtu;
printk(KERN_ERR "mkiss: %s: truncating oversized transmit packet!\n", ax->dev->name);
dev->stats.tx_dropped++;
netif_start_queue(dev);
@@ -477,7 +476,8 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
cmd = 0;
}
ax->crcauto = (cmd ? 0 : 1);
- printk(KERN_INFO "mkiss: %s: crc mode %s %d\n", ax->dev->name, (len) ? "set to" : "is", cmd);
+ printk(KERN_INFO "mkiss: %s: crc mode set to %d\n",
+ ax->dev->name, cmd);
}
spin_unlock_bh(&ax->buflock);
netif_start_queue(dev);
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 295f267b73ea..3de272959090 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -185,14 +185,15 @@
static const char banner[] __initconst = KERN_INFO \
"AX.25: Z8530 SCC driver version "VERSION".dl1bke\n";
-static void t_dwait(unsigned long);
-static void t_txdelay(unsigned long);
-static void t_tail(unsigned long);
-static void t_busy(unsigned long);
-static void t_maxkeyup(unsigned long);
-static void t_idle(unsigned long);
+static void t_dwait(struct timer_list *t);
+static void t_txdelay(struct timer_list *t);
+static void t_tail(struct timer_list *t);
+static void t_busy(struct timer_list *);
+static void t_maxkeyup(struct timer_list *);
+static void t_idle(struct timer_list *t);
static void scc_tx_done(struct scc_channel *);
-static void scc_start_tx_timer(struct scc_channel *, void (*)(unsigned long), unsigned long);
+static void scc_start_tx_timer(struct scc_channel *,
+ void (*)(struct timer_list *), unsigned long);
static void scc_start_maxkeyup(struct scc_channel *);
static void scc_start_defer(struct scc_channel *);
@@ -992,24 +993,27 @@ static void scc_key_trx(struct scc_channel *scc, char tx)
/* ----> SCC timer interrupt handler and friends. <---- */
-static void __scc_start_tx_timer(struct scc_channel *scc, void (*handler)(unsigned long), unsigned long when)
+static void __scc_start_tx_timer(struct scc_channel *scc,
+ void (*handler)(struct timer_list *t),
+ unsigned long when)
{
del_timer(&scc->tx_t);
if (when == 0)
{
- handler((unsigned long) scc);
+ handler(&scc->tx_t);
} else
if (when != TIMER_OFF)
{
- scc->tx_t.data = (unsigned long) scc;
scc->tx_t.function = handler;
scc->tx_t.expires = jiffies + (when*HZ)/100;
add_timer(&scc->tx_t);
}
}
-static void scc_start_tx_timer(struct scc_channel *scc, void (*handler)(unsigned long), unsigned long when)
+static void scc_start_tx_timer(struct scc_channel *scc,
+ void (*handler)(struct timer_list *t),
+ unsigned long when)
{
unsigned long flags;
@@ -1027,7 +1031,6 @@ static void scc_start_defer(struct scc_channel *scc)
if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
{
- scc->tx_wdog.data = (unsigned long) scc;
scc->tx_wdog.function = t_busy;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
add_timer(&scc->tx_wdog);
@@ -1044,7 +1047,6 @@ static void scc_start_maxkeyup(struct scc_channel *scc)
if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
{
- scc->tx_wdog.data = (unsigned long) scc;
scc->tx_wdog.function = t_maxkeyup;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
add_timer(&scc->tx_wdog);
@@ -1121,9 +1123,9 @@ static inline int is_grouped(struct scc_channel *scc)
* fulldup == 2: mintime expired, reset status or key trx and start txdelay
*/
-static void t_dwait(unsigned long channel)
+static void t_dwait(struct timer_list *t)
{
- struct scc_channel *scc = (struct scc_channel *) channel;
+ struct scc_channel *scc = from_timer(scc, t, tx_t);
if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */
{
@@ -1163,9 +1165,9 @@ static void t_dwait(unsigned long channel)
* kick transmission by a fake scc_txint(scc), start 'maxkeyup' watchdog.
*/
-static void t_txdelay(unsigned long channel)
+static void t_txdelay(struct timer_list *t)
{
- struct scc_channel *scc = (struct scc_channel *) channel;
+ struct scc_channel *scc = from_timer(scc, t, tx_t);
scc_start_maxkeyup(scc);
@@ -1184,9 +1186,9 @@ static void t_txdelay(unsigned long channel)
* transmission after 'mintime' seconds
*/
-static void t_tail(unsigned long channel)
+static void t_tail(struct timer_list *t)
{
- struct scc_channel *scc = (struct scc_channel *) channel;
+ struct scc_channel *scc = from_timer(scc, t, tx_t);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
@@ -1211,9 +1213,9 @@ static void t_tail(unsigned long channel)
* throw away send buffers if DCD remains active too long.
*/
-static void t_busy(unsigned long channel)
+static void t_busy(struct timer_list *t)
{
- struct scc_channel *scc = (struct scc_channel *) channel;
+ struct scc_channel *scc = from_timer(scc, t, tx_wdog);
del_timer(&scc->tx_t);
netif_stop_queue(scc->dev); /* don't pile on the wabbit! */
@@ -1230,9 +1232,9 @@ static void t_busy(unsigned long channel)
* this is our watchdog.
*/
-static void t_maxkeyup(unsigned long channel)
+static void t_maxkeyup(struct timer_list *t)
{
- struct scc_channel *scc = (struct scc_channel *) channel;
+ struct scc_channel *scc = from_timer(scc, t, tx_wdog);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
@@ -1264,9 +1266,9 @@ static void t_maxkeyup(unsigned long channel)
* expires.
*/
-static void t_idle(unsigned long channel)
+static void t_idle(struct timer_list *t)
{
- struct scc_channel *scc = (struct scc_channel *) channel;
+ struct scc_channel *scc = from_timer(scc, t, tx_t);
del_timer(&scc->tx_wdog);
@@ -1397,9 +1399,9 @@ static unsigned long scc_get_param(struct scc_channel *scc, unsigned int cmd)
/* * Send calibration pattern * */
/* ******************************************************************* */
-static void scc_stop_calibrate(unsigned long channel)
+static void scc_stop_calibrate(struct timer_list *t)
{
- struct scc_channel *scc = (struct scc_channel *) channel;
+ struct scc_channel *scc = from_timer(scc, t, tx_wdog);
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
@@ -1426,7 +1428,6 @@ scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern
del_timer(&scc->tx_wdog);
- scc->tx_wdog.data = (unsigned long) scc;
scc->tx_wdog.function = scc_stop_calibrate;
scc->tx_wdog.expires = jiffies + HZ*duration;
add_timer(&scc->tx_wdog);
@@ -1522,8 +1523,8 @@ static int scc_net_alloc(const char *name, struct scc_channel *scc)
dev->ml_priv = scc;
scc->dev = dev;
spin_lock_init(&scc->lock);
- init_timer(&scc->tx_t);
- init_timer(&scc->tx_wdog);
+ timer_setup(&scc->tx_t, NULL, 0);
+ timer_setup(&scc->tx_wdog, NULL, 0);
err = register_netdevice(dev);
if (err) {
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 7a7c5224a336..14c3632b8cde 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -157,7 +157,7 @@ static struct net_device *yam_devs[NR_PORTS];
static struct yam_mcs *yam_data;
-static DEFINE_TIMER(yam_timer, NULL, 0, 0);
+static DEFINE_TIMER(yam_timer, NULL);
/* --------------------------------------------------------------------- */
@@ -647,7 +647,7 @@ static void yam_arbitrate(struct net_device *dev)
yam_start_tx(dev, yp);
}
-static void yam_dotimer(unsigned long dummy)
+static void yam_dotimer(struct timer_list *unused)
{
int i;
@@ -1164,7 +1164,7 @@ static int __init yam_init_driver(void)
}
- yam_timer.function = yam_dotimer;
+ timer_setup(&yam_timer, yam_dotimer, 0);
yam_timer.expires = jiffies + HZ / 100;
add_timer(&yam_timer);
diff --git a/drivers/net/hamradio/z8530.h b/drivers/net/hamradio/z8530.h
index 8bef548572aa..1655901d713b 100644
--- a/drivers/net/hamradio/z8530.h
+++ b/drivers/net/hamradio/z8530.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* 8530 Serial Communications Controller Register definitions */
#define FLAG 0x7e
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 71ddadbf2368..8483f03d5a41 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1146,10 +1146,10 @@ static inline void rr_raz_rx(struct rr_private *rrpriv,
}
}
-static void rr_timer(unsigned long data)
+static void rr_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct rr_private *rrpriv = netdev_priv(dev);
+ struct rr_private *rrpriv = from_timer(rrpriv, t, timer);
+ struct net_device *dev = pci_get_drvdata(rrpriv->pci_dev);
struct rr_regs __iomem *regs = rrpriv->regs;
unsigned long flags;
@@ -1229,10 +1229,8 @@ static int rr_open(struct net_device *dev)
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
- init_timer(&rrpriv->timer);
+ timer_setup(&rrpriv->timer, rr_timer, 0);
rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
- rrpriv->timer.data = (unsigned long)dev;
- rrpriv->timer.function = rr_timer; /* timer handler */
add_timer(&rrpriv->timer);
netif_start_queue(dev);
diff --git a/drivers/net/hippi/rrunner.h b/drivers/net/hippi/rrunner.h
index 28169043ae49..87533784604f 100644
--- a/drivers/net/hippi/rrunner.h
+++ b/drivers/net/hippi/rrunner.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RRUNNER_H_
#define _RRUNNER_H_
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 5176be76ca7d..88ddfb92122b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -179,7 +179,7 @@ struct rndis_device {
u8 hw_mac_adr[ETH_ALEN];
u8 rss_key[NETVSC_HASH_KEYLEN];
- u16 ind_table[ITAB_NUM];
+ u16 rx_table[ITAB_NUM];
};
@@ -646,6 +646,10 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
#define NETVSC_SEND_BUFFER_ID 0
+#define NETVSC_SUPPORTED_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | \
+ NETIF_F_TSO | NETIF_F_IPV6_CSUM | \
+ NETIF_F_TSO6)
+
#define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */
#define VRSS_CHANNEL_MAX 64
#define VRSS_CHANNEL_DEFAULT 8
@@ -686,6 +690,8 @@ struct netvsc_ethtool_stats {
unsigned long tx_busy;
unsigned long tx_send_full;
unsigned long rx_comp_busy;
+ unsigned long stop_queue;
+ unsigned long wake_queue;
};
struct netvsc_vf_pcpu_stats {
@@ -702,6 +708,14 @@ struct netvsc_reconfig {
u32 event;
};
+/* L4 hash bits for different protocols */
+#define HV_TCP4_L4HASH 1
+#define HV_TCP6_L4HASH 2
+#define HV_UDP4_L4HASH 4
+#define HV_UDP6_L4HASH 8
+#define HV_DEFAULT_L4HASH (HV_TCP4_L4HASH | HV_TCP6_L4HASH | HV_UDP4_L4HASH | \
+ HV_UDP6_L4HASH)
+
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
@@ -721,13 +735,12 @@ struct net_device_context {
u32 tx_checksum_mask;
- u32 tx_send_table[VRSS_SEND_TAB_SIZE];
+ u32 tx_table[VRSS_SEND_TAB_SIZE];
/* Ethtool settings */
- bool udp4_l4_hash;
- bool udp6_l4_hash;
u8 duplex;
u32 speed;
+ u32 l4_hash; /* L4 hash settings */
struct netvsc_ethtool_stats eth_stats;
/* State to manage the associated VF interface. */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8d5077fb0492..bfc79698b8f4 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -100,12 +100,11 @@ static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
call_rcu(&nvdev->rcu, free_netvsc_device);
}
-static void netvsc_destroy_buf(struct hv_device *device)
+static void netvsc_revoke_buf(struct hv_device *device,
+ struct netvsc_device *net_device)
{
struct nvsp_message *revoke_packet;
struct net_device *ndev = hv_get_drvdata(device);
- struct net_device_context *ndc = netdev_priv(ndev);
- struct netvsc_device *net_device = rtnl_dereference(ndc->nvdev);
int ret;
/*
@@ -148,28 +147,6 @@ static void netvsc_destroy_buf(struct hv_device *device)
net_device->recv_section_cnt = 0;
}
- /* Teardown the gpadl on the vsp end */
- if (net_device->recv_buf_gpadl_handle) {
- ret = vmbus_teardown_gpadl(device->channel,
- net_device->recv_buf_gpadl_handle);
-
- /* If we failed here, we might as well return and have a leak
- * rather than continue and a bugchk
- */
- if (ret != 0) {
- netdev_err(ndev,
- "unable to teardown receive buffer's gpadl\n");
- return;
- }
- net_device->recv_buf_gpadl_handle = 0;
- }
-
- if (net_device->recv_buf) {
- /* Free up the receive buffer */
- vfree(net_device->recv_buf);
- net_device->recv_buf = NULL;
- }
-
/* Deal with the send buffer we may have setup.
* If we got a send section size, it means we received a
* NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
@@ -210,7 +187,35 @@ static void netvsc_destroy_buf(struct hv_device *device)
}
net_device->send_section_cnt = 0;
}
- /* Teardown the gpadl on the vsp end */
+}
+
+static void netvsc_teardown_gpadl(struct hv_device *device,
+ struct netvsc_device *net_device)
+{
+ struct net_device *ndev = hv_get_drvdata(device);
+ int ret;
+
+ if (net_device->recv_buf_gpadl_handle) {
+ ret = vmbus_teardown_gpadl(device->channel,
+ net_device->recv_buf_gpadl_handle);
+
+ /* If we failed here, we might as well return and have a leak
+ * rather than continue and a bugchk
+ */
+ if (ret != 0) {
+ netdev_err(ndev,
+ "unable to teardown receive buffer's gpadl\n");
+ return;
+ }
+ net_device->recv_buf_gpadl_handle = 0;
+ }
+
+ if (net_device->recv_buf) {
+ /* Free up the receive buffer */
+ vfree(net_device->recv_buf);
+ net_device->recv_buf = NULL;
+ }
+
if (net_device->send_buf_gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
net_device->send_buf_gpadl_handle);
@@ -420,7 +425,8 @@ static int netvsc_init_buf(struct hv_device *device,
goto exit;
cleanup:
- netvsc_destroy_buf(device);
+ netvsc_revoke_buf(device, net_device);
+ netvsc_teardown_gpadl(device, net_device);
exit:
return ret;
@@ -484,7 +490,7 @@ static int netvsc_connect_vsp(struct hv_device *device,
struct netvsc_device *net_device,
const struct netvsc_device_info *device_info)
{
- const u32 ver_list[] = {
+ static const u32 ver_list[] = {
NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5
};
@@ -539,11 +545,6 @@ cleanup:
return ret;
}
-static void netvsc_disconnect_vsp(struct hv_device *device)
-{
- netvsc_destroy_buf(device);
-}
-
/*
* netvsc_device_remove - Callback when the root bus device is removed
*/
@@ -557,7 +558,7 @@ void netvsc_device_remove(struct hv_device *device)
cancel_work_sync(&net_device->subchan_work);
- netvsc_disconnect_vsp(device);
+ netvsc_revoke_buf(device, net_device);
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
@@ -570,6 +571,8 @@ void netvsc_device_remove(struct hv_device *device)
/* Now, we can close the channel safely */
vmbus_close(device->channel);
+ netvsc_teardown_gpadl(device, net_device);
+
/* And dissassociate NAPI context from device */
for (i = 0; i < net_device->num_chn; i++)
netif_napi_del(&net_device->chan_table[i].napi);
@@ -609,6 +612,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
{
struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = device->channel;
u16 q_idx = 0;
int queue_sends;
@@ -643,8 +647,10 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
(hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
- queue_sends < 1))
+ queue_sends < 1)) {
netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx));
+ ndev_ctx->eth_stats.wake_queue++;
+ }
}
static void netvsc_send_completion(struct netvsc_device *net_device,
@@ -749,6 +755,7 @@ static inline int netvsc_send_pkt(
&net_device->chan_table[packet->q_idx];
struct vmbus_channel *out_channel = nvchan->channel;
struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
u64 req_id;
int ret;
@@ -789,12 +796,16 @@ static inline int netvsc_send_pkt(
if (ret == 0) {
atomic_inc_return(&nvchan->queue_sends);
- if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
+ if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
netif_tx_stop_queue(txq);
+ ndev_ctx->eth_stats.stop_queue++;
+ }
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
+ ndev_ctx->eth_stats.stop_queue++;
if (atomic_read(&nvchan->queue_sends) < 1) {
netif_tx_wake_queue(txq);
+ ndev_ctx->eth_stats.wake_queue++;
ret = -ENOSPC;
}
} else {
@@ -1102,7 +1113,7 @@ static void netvsc_send_table(struct hv_device *hdev,
nvmsg->msg.v5_msg.send_table.offset);
for (i = 0; i < count; i++)
- net_device_ctx->tx_send_table[i] = tab[i];
+ net_device_ctx->tx_table[i] = tab[i];
}
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
@@ -1247,6 +1258,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
if (!net_device)
return ERR_PTR(-ENOMEM);
+ for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
+ net_device_ctx->tx_table[i] = 0;
+
net_device->ring_size = ring_size;
/* Because the device uses NAPI, all the interrupt batching and
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index a32ae02e1b6c..5129647d420c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -203,7 +203,7 @@ static inline u32 netvsc_get_hash(
const struct net_device_context *ndc)
{
struct flow_keys flow;
- u32 hash;
+ u32 hash, pkt_proto = 0;
static u32 hashrnd __read_mostly;
net_get_random_once(&hashrnd, sizeof(hashrnd));
@@ -211,11 +211,25 @@ static inline u32 netvsc_get_hash(
if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
return 0;
- if (flow.basic.ip_proto == IPPROTO_TCP ||
- (flow.basic.ip_proto == IPPROTO_UDP &&
- ((flow.basic.n_proto == htons(ETH_P_IP) && ndc->udp4_l4_hash) ||
- (flow.basic.n_proto == htons(ETH_P_IPV6) &&
- ndc->udp6_l4_hash)))) {
+ switch (flow.basic.ip_proto) {
+ case IPPROTO_TCP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_TCP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_TCP6_L4HASH;
+
+ break;
+
+ case IPPROTO_UDP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_UDP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_UDP6_L4HASH;
+
+ break;
+ }
+
+ if (pkt_proto & ndc->l4_hash) {
return skb_get_hash(skb);
} else {
if (flow.basic.n_proto == htons(ETH_P_IP))
@@ -238,8 +252,8 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev,
struct sock *sk = skb->sk;
int q_idx;
- q_idx = ndc->tx_send_table[netvsc_get_hash(skb, ndc) &
- (VRSS_SEND_TAB_SIZE - 1)];
+ q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
+ (VRSS_SEND_TAB_SIZE - 1)];
/* If queue index changed record the new value */
if (q_idx != old_idx &&
@@ -898,8 +912,7 @@ static void netvsc_init_settings(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
- ndc->udp4_l4_hash = true;
- ndc->udp6_l4_hash = true;
+ ndc->l4_hash = HV_DEFAULT_L4HASH;
ndc->speed = SPEED_UNKNOWN;
ndc->duplex = DUPLEX_FULL;
@@ -1126,6 +1139,8 @@ static const struct {
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
+ { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
+ { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
}, vf_stats[] = {
{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
{ "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
@@ -1243,23 +1258,32 @@ static int
netvsc_get_rss_hash_opts(struct net_device_context *ndc,
struct ethtool_rxnfc *info)
{
+ const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
+
info->data = RXH_IP_SRC | RXH_IP_DST;
switch (info->flow_type) {
case TCP_V4_FLOW:
+ if (ndc->l4_hash & HV_TCP4_L4HASH)
+ info->data |= l4_flag;
+
+ break;
+
case TCP_V6_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (ndc->l4_hash & HV_TCP6_L4HASH)
+ info->data |= l4_flag;
+
break;
case UDP_V4_FLOW:
- if (ndc->udp4_l4_hash)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (ndc->l4_hash & HV_UDP4_L4HASH)
+ info->data |= l4_flag;
break;
case UDP_V6_FLOW:
- if (ndc->udp6_l4_hash)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (ndc->l4_hash & HV_UDP6_L4HASH)
+ info->data |= l4_flag;
break;
@@ -1300,23 +1324,51 @@ static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
{
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (info->flow_type == UDP_V4_FLOW)
- ndc->udp4_l4_hash = true;
- else if (info->flow_type == UDP_V6_FLOW)
- ndc->udp6_l4_hash = true;
- else
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ ndc->l4_hash |= HV_TCP4_L4HASH;
+ break;
+
+ case TCP_V6_FLOW:
+ ndc->l4_hash |= HV_TCP6_L4HASH;
+ break;
+
+ case UDP_V4_FLOW:
+ ndc->l4_hash |= HV_UDP4_L4HASH;
+ break;
+
+ case UDP_V6_FLOW:
+ ndc->l4_hash |= HV_UDP6_L4HASH;
+ break;
+
+ default:
return -EOPNOTSUPP;
+ }
return 0;
}
if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
- if (info->flow_type == UDP_V4_FLOW)
- ndc->udp4_l4_hash = false;
- else if (info->flow_type == UDP_V6_FLOW)
- ndc->udp6_l4_hash = false;
- else
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ ndc->l4_hash &= ~HV_TCP4_L4HASH;
+ break;
+
+ case TCP_V6_FLOW:
+ ndc->l4_hash &= ~HV_TCP6_L4HASH;
+ break;
+
+ case UDP_V4_FLOW:
+ ndc->l4_hash &= ~HV_UDP4_L4HASH;
+ break;
+
+ case UDP_V6_FLOW:
+ ndc->l4_hash &= ~HV_UDP6_L4HASH;
+ break;
+
+ default:
return -EOPNOTSUPP;
+ }
return 0;
}
@@ -1382,7 +1434,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
rndis_dev = ndev->extension;
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
- indir[i] = rndis_dev->ind_table[i];
+ indir[i] = rndis_dev->rx_table[i];
}
if (key)
@@ -1412,7 +1464,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
return -EINVAL;
for (i = 0; i < ITAB_NUM; i++)
- rndis_dev->ind_table[i] = indir[i];
+ rndis_dev->rx_table[i] = indir[i];
}
if (!key) {
@@ -1746,7 +1798,7 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto rx_handler_failed;
}
- ret = netdev_upper_dev_link(vf_netdev, ndev);
+ ret = netdev_upper_dev_link(vf_netdev, ndev, NULL);
if (ret != 0) {
netdev_err(vf_netdev,
"can not set master device %s (err = %d)\n",
@@ -1935,6 +1987,12 @@ static int netvsc_probe(struct hv_device *dev,
/* We always need headroom for rndis header */
net->needed_headroom = RNDIS_AND_PPI_SIZE;
+ /* Initialize the number of queues to be 1, we may change it if more
+ * channels are offered later.
+ */
+ netif_set_real_num_tx_queues(net, 1);
+ netif_set_real_num_rx_queues(net, 1);
+
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
@@ -1953,7 +2011,7 @@ static int netvsc_probe(struct hv_device *dev,
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
- /* hw_features computed in rndis_filter_device_add */
+ /* hw_features computed in rndis_netdev_set_hwcaps() */
net->features = net->hw_features |
NETIF_F_HIGHDMA | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 065b204d8e17..7b637c7dd1e5 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -407,13 +407,13 @@ int rndis_filter_receive(struct net_device *ndev,
/* Make sure the rndis device state is initialized */
if (unlikely(!rndis_dev)) {
- netif_err(net_device_ctx, rx_err, ndev,
+ netif_dbg(net_device_ctx, rx_err, ndev,
"got rndis message but no rndis device!\n");
return NVSP_STAT_FAIL;
}
if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
- netif_err(net_device_ctx, rx_err, ndev,
+ netif_dbg(net_device_ctx, rx_err, ndev,
"got rndis message uninitialized\n");
return NVSP_STAT_FAIL;
}
@@ -759,7 +759,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
/* Set indirection table entries */
itab = (u32 *)(rssp + 1);
for (i = 0; i < ITAB_NUM; i++)
- itab[i] = rdev->ind_table[i];
+ itab[i] = rdev->rx_table[i];
/* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
@@ -1114,6 +1114,9 @@ void rndis_set_subchannel(struct work_struct *w)
netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
+ for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
+ ndev_ctx->tx_table[i] = i % nvdev->num_chn;
+
rtnl_unlock();
return;
@@ -1128,69 +1131,20 @@ unlock:
rtnl_unlock();
}
-struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
- struct netvsc_device_info *device_info)
+static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
+ struct netvsc_device *nvdev)
{
- struct net_device *net = hv_get_drvdata(dev);
+ struct net_device *net = rndis_device->ndev;
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct netvsc_device *net_device;
- struct rndis_device *rndis_device;
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- struct ndis_recv_scale_cap rsscap;
- u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
unsigned int gso_max_size = GSO_MAX_SIZE;
- u32 mtu, size;
- const struct cpumask *node_cpu_mask;
- u32 num_possible_rss_qs;
- int i, ret;
-
- rndis_device = get_rndis_device();
- if (!rndis_device)
- return ERR_PTR(-ENODEV);
-
- /*
- * Let the inner driver handle this first to create the netvsc channel
- * NOTE! Once the channel is created, we may get a receive callback
- * (RndisFilterOnReceive()) before this call is completed
- */
- net_device = netvsc_device_add(dev, device_info);
- if (IS_ERR(net_device)) {
- kfree(rndis_device);
- return net_device;
- }
-
- /* Initialize the rndis device */
- net_device->max_chn = 1;
- net_device->num_chn = 1;
-
- net_device->extension = rndis_device;
- rndis_device->ndev = net;
-
- /* Send the rndis initialization message */
- ret = rndis_filter_init_device(rndis_device, net_device);
- if (ret != 0)
- goto err_dev_remv;
-
- /* Get the MTU from the host */
- size = sizeof(u32);
- ret = rndis_filter_query_device(rndis_device, net_device,
- RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
- &mtu, &size);
- if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
- net->mtu = mtu;
-
- /* Get the mac address */
- ret = rndis_filter_query_device_mac(rndis_device, net_device);
- if (ret != 0)
- goto err_dev_remv;
-
- memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
+ int ret;
/* Find HW offload capabilities */
- ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps);
+ ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
if (ret != 0)
- goto err_dev_remv;
+ return ret;
/* A value of zero means "no change"; now turn on what we want. */
memset(&offloads, 0, sizeof(struct ndis_offload_params));
@@ -1198,8 +1152,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
/* Linux does not care about IP checksum, always does in kernel */
offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
+ /* Reset previously set hw_features flags */
+ net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
+ net_device_ctx->tx_checksum_mask = 0;
+
/* Compute tx offload settings based on hw capabilities */
- net->hw_features = NETIF_F_RXCSUM;
+ net->hw_features |= NETIF_F_RXCSUM;
if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
/* Can checksum TCP */
@@ -1243,10 +1201,75 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
}
}
+ /* In case some hw_features disappeared we need to remove them from
+ * net->features list as they're no longer supported.
+ */
+ net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
+
netif_set_gso_max_size(net, gso_max_size);
- ret = rndis_filter_set_offload_params(net, net_device, &offloads);
- if (ret)
+ ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
+
+ return ret;
+}
+
+struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
+ struct netvsc_device_info *device_info)
+{
+ struct net_device *net = hv_get_drvdata(dev);
+ struct netvsc_device *net_device;
+ struct rndis_device *rndis_device;
+ struct ndis_recv_scale_cap rsscap;
+ u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
+ u32 mtu, size;
+ const struct cpumask *node_cpu_mask;
+ u32 num_possible_rss_qs;
+ int i, ret;
+
+ rndis_device = get_rndis_device();
+ if (!rndis_device)
+ return ERR_PTR(-ENODEV);
+
+ /* Let the inner driver handle this first to create the netvsc channel
+ * NOTE! Once the channel is created, we may get a receive callback
+ * (RndisFilterOnReceive()) before this call is completed
+ */
+ net_device = netvsc_device_add(dev, device_info);
+ if (IS_ERR(net_device)) {
+ kfree(rndis_device);
+ return net_device;
+ }
+
+ /* Initialize the rndis device */
+ net_device->max_chn = 1;
+ net_device->num_chn = 1;
+
+ net_device->extension = rndis_device;
+ rndis_device->ndev = net;
+
+ /* Send the rndis initialization message */
+ ret = rndis_filter_init_device(rndis_device, net_device);
+ if (ret != 0)
+ goto err_dev_remv;
+
+ /* Get the MTU from the host */
+ size = sizeof(u32);
+ ret = rndis_filter_query_device(rndis_device, net_device,
+ RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
+ &mtu, &size);
+ if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
+ net->mtu = mtu;
+
+ /* Get the mac address */
+ ret = rndis_filter_query_device_mac(rndis_device, net_device);
+ if (ret != 0)
+ goto err_dev_remv;
+
+ memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
+
+ /* Query and set hardware capabilities */
+ ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
+ if (ret != 0)
goto err_dev_remv;
rndis_filter_query_device_link_status(rndis_device, net_device);
@@ -1284,8 +1307,8 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
for (i = 0; i < ITAB_NUM; i++)
- rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
- net_device->num_chn);
+ rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
+ i, net_device->num_chn);
atomic_set(&net_device->open_chn, 1);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index 8374bb44a145..bea1de5e726c 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 3e4c8b21403c..400fdbd3a120 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -311,8 +311,8 @@ static int adf7242_status(struct adf7242_local *lp, u8 *stat)
return status;
}
-static int adf7242_wait_status(struct adf7242_local *lp, unsigned status,
- unsigned mask, int line)
+static int adf7242_wait_status(struct adf7242_local *lp, unsigned int status,
+ unsigned int mask, int line)
{
int cnt = 0, ret = 0;
u8 stat;
@@ -477,7 +477,7 @@ static int adf7242_write_reg(struct adf7242_local *lp, u16 addr, u8 data)
return status;
}
-static int adf7242_cmd(struct adf7242_local *lp, unsigned cmd)
+static int adf7242_cmd(struct adf7242_local *lp, unsigned int cmd)
{
int status;
@@ -920,7 +920,7 @@ static void adf7242_debug(u8 irq1)
static irqreturn_t adf7242_isr(int irq, void *data)
{
struct adf7242_local *lp = data;
- unsigned xmit;
+ unsigned int xmit;
u8 irq1;
adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index ef688518ad77..9fb9b565a002 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -21,6 +21,9 @@
*
* USB initialization is
* Copyright (c) 2013 Alexander Aring <alex.aring@gmail.com>
+ *
+ * Busware HUL support is
+ * Copyright (c) 2017 Josef Filzmaier <j.filzmaier@gmx.at>
*/
#include <linux/kernel.h>
@@ -45,6 +48,7 @@
struct atusb {
struct ieee802154_hw *hw;
struct usb_device *usb_dev;
+ struct atusb_chip_data *data;
int shutdown; /* non-zero if shutting down */
int err; /* set by first error */
@@ -57,7 +61,7 @@ struct atusb {
struct usb_ctrlrequest tx_dr;
struct urb *tx_urb;
struct sk_buff *tx_skb;
- uint8_t tx_ack_seq; /* current TX ACK sequence number */
+ u8 tx_ack_seq; /* current TX ACK sequence number */
/* Firmware variable */
unsigned char fw_ver_maj; /* Firmware major version number */
@@ -65,6 +69,14 @@ struct atusb {
unsigned char fw_hw_type; /* Firmware hardware type */
};
+struct atusb_chip_data {
+ u16 t_channel_switch;
+ int rssi_base_val;
+
+ int (*set_channel)(struct ieee802154_hw*, u8, u8);
+ int (*set_txpower)(struct ieee802154_hw*, s32);
+};
+
/* ----- USB commands without data ----------------------------------------- */
/* To reduce the number of error checks in the code, we record the first error
@@ -87,44 +99,43 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
if (ret < 0) {
atusb->err = ret;
dev_err(&usb_dev->dev,
- "atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n",
- request, value, index, ret);
+ "%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
+ __func__, request, value, index, ret);
}
return ret;
}
-static int atusb_command(struct atusb *atusb, uint8_t cmd, uint8_t arg)
+static int atusb_command(struct atusb *atusb, u8 cmd, u8 arg)
{
struct usb_device *usb_dev = atusb->usb_dev;
- dev_dbg(&usb_dev->dev, "atusb_command: cmd = 0x%x\n", cmd);
+ dev_dbg(&usb_dev->dev, "%s: cmd = 0x%x\n", __func__, cmd);
return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
}
-static int atusb_write_reg(struct atusb *atusb, uint8_t reg, uint8_t value)
+static int atusb_write_reg(struct atusb *atusb, u8 reg, u8 value)
{
struct usb_device *usb_dev = atusb->usb_dev;
- dev_dbg(&usb_dev->dev, "atusb_write_reg: 0x%02x <- 0x%02x\n",
- reg, value);
+ dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value);
return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
value, reg, NULL, 0, 1000);
}
-static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
+static int atusb_read_reg(struct atusb *atusb, u8 reg)
{
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
- uint8_t *buffer;
- uint8_t value;
+ u8 *buffer;
+ u8 value;
buffer = kmalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
+ dev_dbg(&usb_dev->dev, "%s: reg = 0x%x\n", __func__, reg);
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
0, reg, buffer, 1, 1000);
@@ -139,15 +150,14 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
}
}
-static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
- uint8_t shift, uint8_t value)
+static int atusb_write_subreg(struct atusb *atusb, u8 reg, u8 mask,
+ u8 shift, u8 value)
{
struct usb_device *usb_dev = atusb->usb_dev;
- uint8_t orig, tmp;
+ u8 orig, tmp;
int ret = 0;
- dev_dbg(&usb_dev->dev, "atusb_write_subreg: 0x%02x <- 0x%02x\n",
- reg, value);
+ dev_dbg(&usb_dev->dev, "%s: 0x%02x <- 0x%02x\n", __func__, reg, value);
orig = atusb_read_reg(atusb, reg);
@@ -163,6 +173,18 @@ static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
return ret;
}
+static int atusb_read_subreg(struct atusb *lp,
+ unsigned int addr, unsigned int mask,
+ unsigned int shift)
+{
+ int rc;
+
+ rc = atusb_read_reg(lp, addr);
+ rc = (rc & mask) >> shift;
+
+ return rc;
+}
+
static int atusb_get_and_clear_error(struct atusb *atusb)
{
int err = atusb->err;
@@ -237,12 +259,12 @@ static void atusb_work_urbs(struct work_struct *work)
/* ----- Asynchronous USB -------------------------------------------------- */
-static void atusb_tx_done(struct atusb *atusb, uint8_t seq)
+static void atusb_tx_done(struct atusb *atusb, u8 seq)
{
struct usb_device *usb_dev = atusb->usb_dev;
- uint8_t expect = atusb->tx_ack_seq;
+ u8 expect = atusb->tx_ack_seq;
- dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect);
+ dev_dbg(&usb_dev->dev, "%s (0x%02x/0x%02x)\n", __func__, seq, expect);
if (seq == expect) {
/* TODO check for ifs handling in firmware */
ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
@@ -263,7 +285,7 @@ static void atusb_in_good(struct urb *urb)
struct usb_device *usb_dev = urb->dev;
struct sk_buff *skb = urb->context;
struct atusb *atusb = SKB_ATUSB(skb);
- uint8_t len, lqi;
+ u8 len, lqi;
if (!urb->actual_length) {
dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n");
@@ -302,7 +324,7 @@ static void atusb_in(struct urb *urb)
struct sk_buff *skb = urb->context;
struct atusb *atusb = SKB_ATUSB(skb);
- dev_dbg(&usb_dev->dev, "atusb_in: status %d len %d\n",
+ dev_dbg(&usb_dev->dev, "%s: status %d len %d\n", __func__,
urb->status, urb->actual_length);
if (urb->status) {
if (urb->status == -ENOENT) { /* being killed */
@@ -310,7 +332,7 @@ static void atusb_in(struct urb *urb)
urb->context = NULL;
return;
}
- dev_dbg(&usb_dev->dev, "atusb_in: URB error %d\n", urb->status);
+ dev_dbg(&usb_dev->dev, "%s: URB error %d\n", __func__, urb->status);
} else {
atusb_in_good(urb);
}
@@ -364,7 +386,7 @@ static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
- dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len);
+ dev_dbg(&usb_dev->dev, "%s (%d)\n", __func__, skb->len);
atusb->tx_skb = skb;
atusb->tx_ack_seq++;
atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq);
@@ -375,25 +397,13 @@ static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
(unsigned char *)&atusb->tx_dr, skb->data,
skb->len, atusb_xmit_complete, NULL);
ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC);
- dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret);
+ dev_dbg(&usb_dev->dev, "%s done (%d)\n", __func__, ret);
return ret;
}
-static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
-{
- struct atusb *atusb = hw->priv;
- int ret;
-
- ret = atusb_write_subreg(atusb, SR_CHANNEL, channel);
- if (ret < 0)
- return ret;
- msleep(1); /* @@@ ugly synchronization */
- return 0;
-}
-
static int atusb_ed(struct ieee802154_hw *hw, u8 *level)
{
- BUG_ON(!level);
+ WARN_ON(!level);
*level = 0xbe;
return 0;
}
@@ -408,7 +418,7 @@ static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
- dev_vdbg(dev, "atusb_set_hw_addr_filt called for saddr\n");
+ dev_vdbg(dev, "%s called for saddr\n", __func__);
atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
}
@@ -416,7 +426,7 @@ static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
- dev_vdbg(dev, "atusb_set_hw_addr_filt called for pan id\n");
+ dev_vdbg(dev, "%s called for pan id\n", __func__);
atusb_write_reg(atusb, RG_PAN_ID_0, pan);
atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
}
@@ -425,14 +435,13 @@ static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN];
memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
- dev_vdbg(dev, "atusb_set_hw_addr_filt called for IEEE addr\n");
+ dev_vdbg(dev, "%s called for IEEE addr\n", __func__);
for (i = 0; i < 8; i++)
atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
- dev_vdbg(dev,
- "atusb_set_hw_addr_filt called for panc change\n");
+ dev_vdbg(dev, "%s called for panc change\n", __func__);
if (filt->pan_coord)
atusb_write_subreg(atusb, SR_AACK_I_AM_COORD, 1);
else
@@ -448,7 +457,7 @@ static int atusb_start(struct ieee802154_hw *hw)
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
- dev_dbg(&usb_dev->dev, "atusb_start\n");
+ dev_dbg(&usb_dev->dev, "%s\n", __func__);
schedule_delayed_work(&atusb->work, 0);
atusb_command(atusb, ATUSB_RX_MODE, 1);
ret = atusb_get_and_clear_error(atusb);
@@ -462,7 +471,7 @@ static void atusb_stop(struct ieee802154_hw *hw)
struct atusb *atusb = hw->priv;
struct usb_device *usb_dev = atusb->usb_dev;
- dev_dbg(&usb_dev->dev, "atusb_stop\n");
+ dev_dbg(&usb_dev->dev, "%s\n", __func__);
usb_kill_anchored_urbs(&atusb->idle_urbs);
atusb_command(atusb, ATUSB_RX_MODE, 0);
atusb_get_and_clear_error(atusb);
@@ -475,6 +484,17 @@ static const s32 atusb_powers[ATUSB_MAX_TX_POWERS + 1] = {
};
static int
+atusb_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+ struct atusb *atusb = hw->priv;
+
+ if (atusb->data)
+ return atusb->data->set_txpower(hw, mbm);
+ else
+ return -ENOTSUPP;
+}
+
+static int
atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm)
{
struct atusb *atusb = hw->priv;
@@ -488,12 +508,43 @@ atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm)
return -EINVAL;
}
+static int
+hulusb_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+ u32 i;
+
+ for (i = 0; i < hw->phy->supported.tx_powers_size; i++) {
+ if (hw->phy->supported.tx_powers[i] == mbm)
+ return atusb_write_subreg(hw->priv, SR_TX_PWR_212, i);
+ }
+
+ return -EINVAL;
+}
+
#define ATUSB_MAX_ED_LEVELS 0xF
static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = {
-9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
-7100, -6900, -6700, -6500, -6300, -6100,
};
+#define AT86RF212_MAX_TX_POWERS 0x1F
+static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = {
+ 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700,
+ -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700,
+ -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600,
+};
+
+#define AT86RF2XX_MAX_ED_LEVELS 0xF
+static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200,
+ -8000, -7800, -7600, -7400, -7200, -7000,
+};
+
+static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000,
+ -7800, -7600, -7400, -7200, -7000, -6800,
+};
+
static int
atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca)
{
@@ -527,6 +578,30 @@ atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca)
return atusb_write_subreg(atusb, SR_CCA_MODE, val);
}
+static int hulusb_set_cca_ed_level(struct atusb *lp, int rssi_base_val)
+{
+ unsigned int cca_ed_thres;
+
+ cca_ed_thres = atusb_read_subreg(lp, SR_CCA_ED_THRES);
+
+ switch (rssi_base_val) {
+ case -98:
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98);
+ lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres];
+ break;
+ case -100:
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
+ lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres];
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+
static int
atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
{
@@ -541,6 +616,92 @@ atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
return -EINVAL;
}
+static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ struct atusb *atusb = hw->priv;
+ int ret = -ENOTSUPP;
+
+ if (atusb->data) {
+ ret = atusb->data->set_channel(hw, page, channel);
+ /* @@@ ugly synchronization */
+ msleep(atusb->data->t_channel_switch);
+ }
+
+ return ret;
+}
+
+static int atusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ struct atusb *atusb = hw->priv;
+ int ret;
+
+ ret = atusb_write_subreg(atusb, SR_CHANNEL, channel);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int hulusb_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ int rc;
+ int rssi_base_val;
+
+ struct atusb *lp = hw->priv;
+
+ if (channel == 0)
+ rc = atusb_write_subreg(lp, SR_SUB_MODE, 0);
+ else
+ rc = atusb_write_subreg(lp, SR_SUB_MODE, 1);
+ if (rc < 0)
+ return rc;
+
+ if (page == 0) {
+ rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 0);
+ rssi_base_val = -100;
+ } else {
+ rc = atusb_write_subreg(lp, SR_BPSK_QPSK, 1);
+ rssi_base_val = -98;
+ }
+ if (rc < 0)
+ return rc;
+
+ rc = hulusb_set_cca_ed_level(lp, rssi_base_val);
+ if (rc < 0)
+ return rc;
+
+ /* This sets the symbol_duration according frequency on the 212.
+ * TODO move this handling while set channel and page in cfg802154.
+ * We can do that, this timings are according 802.15.4 standard.
+ * If we do that in cfg802154, this is a more generic calculation.
+ *
+ * This should also protected from ifs_timer. Means cancel timer and
+ * init with a new value. For now, this is okay.
+ */
+ if (channel == 0) {
+ if (page == 0) {
+ /* SUB:0 and BPSK:0 -> BPSK-20 */
+ lp->hw->phy->symbol_duration = 50;
+ } else {
+ /* SUB:1 and BPSK:0 -> BPSK-40 */
+ lp->hw->phy->symbol_duration = 25;
+ }
+ } else {
+ if (page == 0)
+ /* SUB:0 and BPSK:1 -> OQPSK-100/200/400 */
+ lp->hw->phy->symbol_duration = 40;
+ else
+ /* SUB:1 and BPSK:1 -> OQPSK-250/500/1000 */
+ lp->hw->phy->symbol_duration = 16;
+ }
+
+ lp->hw->phy->lifs_period = IEEE802154_LIFS_PERIOD *
+ lp->hw->phy->symbol_duration;
+ lp->hw->phy->sifs_period = IEEE802154_SIFS_PERIOD *
+ lp->hw->phy->symbol_duration;
+
+ return atusb_write_subreg(lp, SR_CHANNEL, channel);
+}
+
static int
atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries)
{
@@ -559,6 +720,14 @@ atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries
}
static int
+hulusb_set_lbt(struct ieee802154_hw *hw, bool on)
+{
+ struct atusb *atusb = hw->priv;
+
+ return atusb_write_subreg(atusb, SR_CSMA_LBT_MODE, on);
+}
+
+static int
atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
struct atusb *atusb = hw->priv;
@@ -593,6 +762,20 @@ atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
return 0;
}
+static struct atusb_chip_data atusb_chip_data = {
+ .t_channel_switch = 1,
+ .rssi_base_val = -91,
+ .set_txpower = atusb_set_txpower,
+ .set_channel = atusb_set_channel,
+};
+
+static struct atusb_chip_data hulusb_chip_data = {
+ .t_channel_switch = 11,
+ .rssi_base_val = -100,
+ .set_txpower = hulusb_set_txpower,
+ .set_channel = hulusb_set_channel,
+};
+
static const struct ieee802154_ops atusb_ops = {
.owner = THIS_MODULE,
.xmit_async = atusb_xmit,
@@ -601,7 +784,8 @@ static const struct ieee802154_ops atusb_ops = {
.start = atusb_start,
.stop = atusb_stop,
.set_hw_addr_filt = atusb_set_hw_addr_filt,
- .set_txpower = atusb_set_txpower,
+ .set_txpower = atusb_txpower,
+ .set_lbt = hulusb_set_lbt,
.set_cca_mode = atusb_set_cca_mode,
.set_cca_ed_level = atusb_set_cca_ed_level,
.set_csma_params = atusb_set_csma_params,
@@ -614,6 +798,7 @@ static const struct ieee802154_ops atusb_ops = {
static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
+ char *hw_name;
unsigned char *buffer;
int ret;
@@ -630,9 +815,32 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
atusb->fw_ver_min = buffer[1];
atusb->fw_hw_type = buffer[2];
+ switch (atusb->fw_hw_type) {
+ case ATUSB_HW_TYPE_100813:
+ case ATUSB_HW_TYPE_101216:
+ case ATUSB_HW_TYPE_110131:
+ hw_name = "ATUSB";
+ atusb->data = &atusb_chip_data;
+ break;
+ case ATUSB_HW_TYPE_RZUSB:
+ hw_name = "RZUSB";
+ atusb->data = &atusb_chip_data;
+ break;
+ case ATUSB_HW_TYPE_HULUSB:
+ hw_name = "HULUSB";
+ atusb->data = &hulusb_chip_data;
+ break;
+ default:
+ hw_name = "UNKNOWN";
+ atusb->err = -ENOTSUPP;
+ ret = -ENOTSUPP;
+ break;
+ }
+
dev_info(&usb_dev->dev,
- "Firmware: major: %u, minor: %u, hardware type: %u\n",
- atusb->fw_ver_maj, atusb->fw_ver_min, atusb->fw_hw_type);
+ "Firmware: major: %u, minor: %u, hardware type: %s (%d)\n",
+ atusb->fw_ver_maj, atusb->fw_ver_min, hw_name,
+ atusb->fw_hw_type);
}
if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 2) {
dev_info(&usb_dev->dev,
@@ -667,11 +875,12 @@ static int atusb_get_and_show_build(struct atusb *atusb)
return ret;
}
-static int atusb_get_and_show_chip(struct atusb *atusb)
+static int atusb_get_and_conf_chip(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- uint8_t man_id_0, man_id_1, part_num, version_num;
+ u8 man_id_0, man_id_1, part_num, version_num;
const char *chip;
+ struct ieee802154_hw *hw = atusb->hw;
man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
@@ -681,6 +890,22 @@ static int atusb_get_and_show_chip(struct atusb *atusb)
if (atusb->err)
return atusb->err;
+ hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
+ IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
+
+ hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
+ WPAN_PHY_FLAG_CCA_MODE;
+
+ hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+ BIT(NL802154_CCA_CARRIER) |
+ BIT(NL802154_CCA_ENERGY_CARRIER);
+ hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+ BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+ hw->phy->cca.mode = NL802154_CCA_ENERGY;
+
+ hw->phy->current_page = 0;
+
if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) {
dev_err(&usb_dev->dev,
"non-Atmel transceiver xxxx%02x%02x\n",
@@ -691,9 +916,36 @@ static int atusb_get_and_show_chip(struct atusb *atusb)
switch (part_num) {
case 2:
chip = "AT86RF230";
+ atusb->hw->phy->supported.channels[0] = 0x7FFF800;
+ atusb->hw->phy->current_channel = 11; /* reset default */
+ atusb->hw->phy->symbol_duration = 16;
+ atusb->hw->phy->supported.tx_powers = atusb_powers;
+ atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
+ hw->phy->supported.cca_ed_levels = atusb_ed_levels;
+ hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels);
break;
case 3:
chip = "AT86RF231";
+ atusb->hw->phy->supported.channels[0] = 0x7FFF800;
+ atusb->hw->phy->current_channel = 11; /* reset default */
+ atusb->hw->phy->symbol_duration = 16;
+ atusb->hw->phy->supported.tx_powers = atusb_powers;
+ atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
+ hw->phy->supported.cca_ed_levels = atusb_ed_levels;
+ hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels);
+ break;
+ case 7:
+ chip = "AT86RF212";
+ atusb->hw->flags |= IEEE802154_HW_LBT;
+ atusb->hw->phy->supported.channels[0] = 0x00007FF;
+ atusb->hw->phy->supported.channels[2] = 0x00007FF;
+ atusb->hw->phy->current_channel = 5;
+ atusb->hw->phy->symbol_duration = 25;
+ atusb->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
+ atusb->hw->phy->supported.tx_powers = at86rf212_powers;
+ atusb->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
+ atusb->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+ atusb->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
break;
default:
dev_err(&usb_dev->dev,
@@ -702,6 +954,9 @@ static int atusb_get_and_show_chip(struct atusb *atusb)
goto fail;
}
+ hw->phy->transmit_power = hw->phy->supported.tx_powers[0];
+ hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7];
+
dev_info(&usb_dev->dev, "ATUSB: %s version %d\n", chip, version_num);
return 0;
@@ -720,7 +975,8 @@ static int atusb_set_extended_addr(struct atusb *atusb)
int ret;
/* Firmware versions before 0.3 do not support the EUI64_READ command.
- * Just use a random address and be done */
+ * Just use a random address and be done.
+ */
if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
return 0;
@@ -750,7 +1006,7 @@ static int atusb_set_extended_addr(struct atusb *atusb)
atusb->hw->phy->perm_extended_addr = extended_addr;
addr = swab64((__force u64)atusb->hw->phy->perm_extended_addr);
dev_info(&usb_dev->dev, "Read permanent extended address %8phC from device\n",
- &addr);
+ &addr);
}
kfree(buffer);
@@ -794,37 +1050,14 @@ static int atusb_probe(struct usb_interface *interface,
goto fail;
hw->parent = &usb_dev->dev;
- hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
- IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
-
- hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
- WPAN_PHY_FLAG_CCA_MODE;
-
- hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
- BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
- hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
- BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
-
- hw->phy->supported.cca_ed_levels = atusb_ed_levels;
- hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels);
-
- hw->phy->cca.mode = NL802154_CCA_ENERGY;
-
- hw->phy->current_page = 0;
- hw->phy->current_channel = 11; /* reset default */
- hw->phy->supported.channels[0] = 0x7FFF800;
- hw->phy->supported.tx_powers = atusb_powers;
- hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
- hw->phy->transmit_power = hw->phy->supported.tx_powers[0];
- hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7];
atusb_command(atusb, ATUSB_RF_RESET, 0);
- atusb_get_and_show_chip(atusb);
+ atusb_get_and_conf_chip(atusb);
atusb_get_and_show_revision(atusb);
atusb_get_and_show_build(atusb);
atusb_set_extended_addr(atusb);
- if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3)
+ if ((atusb->fw_ver_maj == 0 && atusb->fw_ver_min >= 3) || atusb->fw_ver_maj > 0)
hw->flags |= IEEE802154_HW_FRAME_RETRIES;
ret = atusb_get_and_clear_error(atusb);
@@ -895,7 +1128,7 @@ static void atusb_disconnect(struct usb_interface *interface)
{
struct atusb *atusb = usb_get_intfdata(interface);
- dev_dbg(&atusb->usb_dev->dev, "atusb_disconnect\n");
+ dev_dbg(&atusb->usb_dev->dev, "%s\n", __func__);
atusb->shutdown = 1;
cancel_delayed_work_sync(&atusb->work);
@@ -912,7 +1145,7 @@ static void atusb_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
usb_put_dev(atusb->usb_dev);
- pr_debug("atusb_disconnect done\n");
+ pr_debug("%s done\n", __func__);
}
/* The devices we work with */
@@ -941,5 +1174,6 @@ MODULE_AUTHOR("Alexander Aring <alex.aring@gmail.com>");
MODULE_AUTHOR("Richard Sharpe <realrichardsharpe@gmail.com>");
MODULE_AUTHOR("Stefan Schmidt <stefan@datenfreihafen.org>");
MODULE_AUTHOR("Werner Almesberger <werner@almesberger.net>");
+MODULE_AUTHOR("Josef Filzmaier <j.filzmaier@gmx.at>");
MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h
index b22bbaa77590..555d14bf14a3 100644
--- a/drivers/net/ieee802154/atusb.h
+++ b/drivers/net/ieee802154/atusb.h
@@ -50,6 +50,14 @@ enum atusb_requests {
ATUSB_EUI64_READ,
};
+enum {
+ ATUSB_HW_TYPE_100813, /* 2010-08-13 */
+ ATUSB_HW_TYPE_101216, /* 2010-12-16 */
+ ATUSB_HW_TYPE_110131, /* 2011-01-31, ATmega32U2-based */
+ ATUSB_HW_TYPE_RZUSB, /* Atmel Raven USB dongle with at86rf230 */
+ ATUSB_HW_TYPE_HULUSB, /* Busware HUL USB dongle with at86rf212 */
+};
+
/*
* Direction bRequest wValue wIndex wLength
*
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 24a1eabbbc9d..7900ed066d8a 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -924,7 +924,7 @@ static int ca8210_spi_transfer(
priv = spi_get_drvdata(spi);
reinit_completion(&priv->spi_transfer_complete);
- dev_dbg(&spi->dev, "ca8210_spi_transfer called\n");
+ dev_dbg(&spi->dev, "%s called\n", __func__);
cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC);
if (!cas_ctl)
@@ -1303,7 +1303,7 @@ static u8 tdme_checkpibattribute(
break;
/* MAC */
case MAC_BATT_LIFE_EXT_PERIODS:
- if ((value < 6) || (value > 41))
+ if (value < 6 || value > 41)
status = MAC_INVALID_PARAMETER;
break;
case MAC_BEACON_PAYLOAD:
@@ -1319,7 +1319,7 @@ static u8 tdme_checkpibattribute(
status = MAC_INVALID_PARAMETER;
break;
case MAC_MAX_BE:
- if ((value < 3) || (value > 8))
+ if (value < 3 || value > 8)
status = MAC_INVALID_PARAMETER;
break;
case MAC_MAX_CSMA_BACKOFFS:
@@ -1335,7 +1335,7 @@ static u8 tdme_checkpibattribute(
status = MAC_INVALID_PARAMETER;
break;
case MAC_RESPONSE_WAIT_TIME:
- if ((value < 2) || (value > 64))
+ if (value < 2 || value > 64)
status = MAC_INVALID_PARAMETER;
break;
case MAC_SUPERFRAME_ORDER:
@@ -1511,7 +1511,7 @@ static u8 mcps_data_request(
psec = (struct secspec *)(command.pdata.data_req.msdu + msdu_length);
command.length = sizeof(struct mcps_data_request_pset) -
MAX_DATA_SIZE + msdu_length;
- if (!security || (security->security_level == 0)) {
+ if (!security || security->security_level == 0) {
psec->security_level = 0;
command.length += 1;
} else {
@@ -1561,7 +1561,7 @@ static u8 mlme_reset_request_sync(
status = response.pdata.status;
/* reset COORD Bit for Channel Filtering as Coordinator */
- if (CA8210_MAC_WORKAROUNDS && set_default_pib && (!status)) {
+ if (CA8210_MAC_WORKAROUNDS && set_default_pib && !status) {
status = tdme_setsfr_request_sync(
0,
CA8210_SFR_MACCON,
@@ -1898,7 +1898,7 @@ static int ca8210_net_rx(struct ieee802154_hw *hw, u8 *command, size_t len)
unsigned long flags;
u8 status;
- dev_dbg(&priv->spi->dev, "ca8210_net_rx(), CmdID = %d\n", command[0]);
+ dev_dbg(&priv->spi->dev, "%s: CmdID = %d\n", __func__, command[0]);
if (command[0] == SPI_MCPS_DATA_INDICATION) {
/* Received data */
@@ -1944,11 +1944,11 @@ static int ca8210_skb_tx(
)
{
int status;
- struct ieee802154_hdr header = { 0 };
+ struct ieee802154_hdr header = { };
struct secspec secspec;
unsigned int mac_len;
- dev_dbg(&priv->spi->dev, "ca8210_skb_tx() called\n");
+ dev_dbg(&priv->spi->dev, "%s called\n", __func__);
/* Get addressing info from skb - ieee802154 layer creates a full
* packet
@@ -2051,7 +2051,7 @@ static int ca8210_xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb)
struct ca8210_priv *priv = hw->priv;
int status;
- dev_dbg(&priv->spi->dev, "calling ca8210_xmit_async()\n");
+ dev_dbg(&priv->spi->dev, "calling %s\n", __func__);
priv->tx_skb = skb;
priv->async_tx_pending = true;
@@ -2369,7 +2369,7 @@ static int ca8210_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
MAC_PROMISCUOUS_MODE,
0,
1,
- (const void*)&on,
+ (const void *)&on,
priv->spi
);
if (status) {
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index d50add705a79..0c89d3edf901 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -517,7 +517,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
}
spin_lock_irqsave(&priv->lock, flags);
- BUG_ON(priv->is_tx);
+ WARN_ON(priv->is_tx);
priv->is_tx = 1;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -643,12 +643,12 @@ cc2520_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
dev_dbg(&priv->spi->dev, "trying to set channel\n");
- BUG_ON(page != 0);
- BUG_ON(channel < CC2520_MINCHANNEL);
- BUG_ON(channel > CC2520_MAXCHANNEL);
+ WARN_ON(page != 0);
+ WARN_ON(channel < CC2520_MINCHANNEL);
+ WARN_ON(channel > CC2520_MAXCHANNEL);
ret = cc2520_write_register(priv, CC2520_FREQCTRL,
- 11 + 5*(channel - 11));
+ 11 + 5 * (channel - 11));
return ret;
}
@@ -663,15 +663,14 @@ cc2520_filter(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 panid = le16_to_cpu(filt->pan_id);
- dev_vdbg(&priv->spi->dev,
- "cc2520_filter called for pan id\n");
+ dev_vdbg(&priv->spi->dev, "%s called for pan id\n", __func__);
ret = cc2520_write_ram(priv, CC2520RAM_PANID,
sizeof(panid), (u8 *)&panid);
}
if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
dev_vdbg(&priv->spi->dev,
- "cc2520_filter called for IEEE addr\n");
+ "%s called for IEEE addr\n", __func__);
ret = cc2520_write_ram(priv, CC2520RAM_IEEEADDR,
sizeof(filt->ieee_addr),
(u8 *)&filt->ieee_addr);
@@ -680,8 +679,7 @@ cc2520_filter(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
- dev_vdbg(&priv->spi->dev,
- "cc2520_filter called for saddr\n");
+ dev_vdbg(&priv->spi->dev, "%s called for saddr\n", __func__);
ret = cc2520_write_ram(priv, CC2520RAM_SHORTADDR,
sizeof(addr), (u8 *)&addr);
}
@@ -690,7 +688,7 @@ cc2520_filter(struct ieee802154_hw *hw,
u8 frmfilt0;
dev_vdbg(&priv->spi->dev,
- "cc2520_filter called for panc change\n");
+ "%s called for panc change\n", __func__);
cc2520_read_register(priv, CC2520_FRMFILT0, &frmfilt0);
@@ -929,6 +927,7 @@ static int cc2520_get_platform_data(struct spi_device *spi,
if (!np) {
struct cc2520_platform_data *spi_pdata = spi->dev.platform_data;
+
if (!spi_pdata)
return -ENOENT;
*pdata = *spi_pdata;
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index ee7084b2d52d..cf4788d840bf 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -635,7 +635,7 @@ static void mrf24j40_stop(struct ieee802154_hw *hw)
/* Set TXNIE and RXIE. Disable Interrupts */
regmap_update_bits(devrec->regmap_short, REG_INTCON,
- BIT_TXNIE | BIT_TXNIE, BIT_TXNIE | BIT_TXNIE);
+ BIT_TXNIE | BIT_RXIE, BIT_TXNIE | BIT_RXIE);
}
static int mrf24j40_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 8870bd2a2e8a..0008da7e9d4c 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -231,6 +231,9 @@ static void ifb_setup(struct net_device *dev)
eth_hw_addr_random(dev);
dev->needs_free_netdev = true;
dev->priv_destructor = ifb_dev_free;
+
+ dev->min_mtu = 0;
+ dev->max_mtu = 0;
}
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index ba8173a0b62e..5166575a164d 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -96,6 +96,7 @@ struct ipvl_port {
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
struct list_head ipvlans;
u16 mode;
+ u16 flags;
u16 dev_id_start;
struct work_struct wq;
struct sk_buff_head backlog;
@@ -123,6 +124,36 @@ static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
return rtnl_dereference(d->rx_handler_data);
}
+static inline bool ipvlan_is_private(const struct ipvl_port *port)
+{
+ return !!(port->flags & IPVLAN_F_PRIVATE);
+}
+
+static inline void ipvlan_mark_private(struct ipvl_port *port)
+{
+ port->flags |= IPVLAN_F_PRIVATE;
+}
+
+static inline void ipvlan_clear_private(struct ipvl_port *port)
+{
+ port->flags &= ~IPVLAN_F_PRIVATE;
+}
+
+static inline bool ipvlan_is_vepa(const struct ipvl_port *port)
+{
+ return !!(port->flags & IPVLAN_F_VEPA);
+}
+
+static inline void ipvlan_mark_vepa(struct ipvl_port *port)
+{
+ port->flags |= IPVLAN_F_VEPA;
+}
+
+static inline void ipvlan_clear_vepa(struct ipvl_port *port)
+{
+ port->flags &= ~IPVLAN_F_VEPA;
+}
+
void ipvlan_init_secret(void);
unsigned int ipvlan_mac_hash(const unsigned char *addr);
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 1f3295e274d0..11c1e7950fe5 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -116,7 +116,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
return false;
}
-static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
+static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
{
void *lyr3h = NULL;
@@ -124,7 +124,7 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
case htons(ETH_P_ARP): {
struct arphdr *arph;
- if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
+ if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
return NULL;
arph = arp_hdr(skb);
@@ -165,8 +165,26 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
/* Only Neighbour Solicitation pkts need different treatment */
if (ipv6_addr_any(&ip6h->saddr) &&
ip6h->nexthdr == NEXTHDR_ICMP) {
+ struct icmp6hdr *icmph;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+
+ if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ /* Need to access the ipv6 address in body */
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
+ + sizeof(struct in6_addr))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+ }
+
*type = IPVL_ICMPV6;
- lyr3h = ip6h + 1;
+ lyr3h = icmph;
}
break;
}
@@ -409,7 +427,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
struct dst_entry *dst;
int err, ret = NET_XMIT_DROP;
struct flowi6 fl6 = {
- .flowi6_iif = dev->ifindex,
+ .flowi6_oif = dev->ifindex,
.daddr = ip6h->daddr,
.saddr = ip6h->saddr,
.flowi6_flags = FLOWI_FLAG_ANYSRC,
@@ -510,14 +528,20 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
struct ipvl_addr *addr;
int addr_type;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (!lyr3h)
goto out;
- addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
- if (addr)
- return ipvlan_rcv_frame(addr, &skb, true);
-
+ if (!ipvlan_is_vepa(ipvlan->port)) {
+ addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
+ if (addr) {
+ if (ipvlan_is_private(ipvlan->port)) {
+ consume_skb(skb);
+ return NET_XMIT_DROP;
+ }
+ return ipvlan_rcv_frame(addr, &skb, true);
+ }
+ }
out:
ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
return ipvlan_process_outbound(skb);
@@ -531,12 +555,18 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
void *lyr3h;
int addr_type;
- if (ether_addr_equal(eth->h_dest, eth->h_source)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!ipvlan_is_vepa(ipvlan->port) &&
+ ether_addr_equal(eth->h_dest, eth->h_source)) {
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (lyr3h) {
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
- if (addr)
+ if (addr) {
+ if (ipvlan_is_private(ipvlan->port)) {
+ consume_skb(skb);
+ return NET_XMIT_DROP;
+ }
return ipvlan_rcv_frame(addr, &skb, true);
+ }
}
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
@@ -594,7 +624,7 @@ static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
int addr_type;
if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return true;
@@ -615,7 +645,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
struct sk_buff *skb = *pskb;
rx_handler_result_t ret = RX_HANDLER_PASS;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
@@ -654,7 +684,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
} else {
struct ipvl_addr *addr;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return ret;
@@ -705,7 +735,7 @@ static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
if (!port || port->mode != IPVLAN_MODE_L3S)
goto out;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index c74893c1e620..30cb803e2fe5 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -107,16 +107,6 @@ static int ipvlan_port_create(struct net_device *dev)
struct ipvl_port *port;
int err, idx;
- if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) {
- netdev_err(dev, "Master is either lo or non-ether device\n");
- return -EINVAL;
- }
-
- if (netdev_is_rx_handler_busy(dev)) {
- netdev_err(dev, "Device is already in use.\n");
- return -EBUSY;
- }
-
port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
@@ -179,8 +169,9 @@ static void ipvlan_port_destroy(struct net_device *dev)
static int ipvlan_init(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- const struct net_device *phy_dev = ipvlan->phy_dev;
- struct ipvl_port *port = ipvlan->port;
+ struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_port *port;
+ int err;
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
@@ -196,18 +187,27 @@ static int ipvlan_init(struct net_device *dev)
if (!ipvlan->pcpu_stats)
return -ENOMEM;
+ if (!netif_is_ipvlan_port(phy_dev)) {
+ err = ipvlan_port_create(phy_dev);
+ if (err < 0) {
+ free_percpu(ipvlan->pcpu_stats);
+ return err;
+ }
+ }
+ port = ipvlan_port_get_rtnl(phy_dev);
port->count += 1;
-
return 0;
}
static void ipvlan_uninit(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- struct ipvl_port *port = ipvlan->port;
+ struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_port *port;
free_percpu(ipvlan->pcpu_stats);
+ port = ipvlan_port_get_rtnl(phy_dev);
port->count -= 1;
if (!port->count)
ipvlan_port_destroy(port->dev);
@@ -407,7 +407,7 @@ static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
* while the packets use the mac-addr on the physical device.
*/
return dev_hard_header(skb, phy_dev, type, daddr,
- saddr ? : dev->dev_addr, len);
+ saddr ? : phy_dev->dev_addr, len);
}
static const struct header_ops ipvlan_header_ops = {
@@ -462,11 +462,29 @@ static int ipvlan_nl_changelink(struct net_device *dev,
struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
int err = 0;
- if (data && data[IFLA_IPVLAN_MODE]) {
+ if (!data)
+ return 0;
+
+ if (data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
err = ipvlan_set_port_mode(port, nmode);
}
+
+ if (!err && data[IFLA_IPVLAN_FLAGS]) {
+ u16 flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
+
+ if (flags & IPVLAN_F_PRIVATE)
+ ipvlan_mark_private(port);
+ else
+ ipvlan_clear_private(port);
+
+ if (flags & IPVLAN_F_VEPA)
+ ipvlan_mark_vepa(port);
+ else
+ ipvlan_clear_vepa(port);
+ }
+
return err;
}
@@ -474,18 +492,34 @@ static size_t ipvlan_nl_getsize(const struct net_device *dev)
{
return (0
+ nla_total_size(2) /* IFLA_IPVLAN_MODE */
+ + nla_total_size(2) /* IFLA_IPVLAN_FLAGS */
);
}
static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- if (data && data[IFLA_IPVLAN_MODE]) {
+ if (!data)
+ return 0;
+
+ if (data[IFLA_IPVLAN_MODE]) {
u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX)
return -EINVAL;
}
+ if (data[IFLA_IPVLAN_FLAGS]) {
+ u16 flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
+
+ /* Only two bits are used at this moment. */
+ if (flags & ~(IPVLAN_F_PRIVATE | IPVLAN_F_VEPA))
+ return -EINVAL;
+ /* Also both flags can't be active at the same time. */
+ if ((flags & (IPVLAN_F_PRIVATE | IPVLAN_F_VEPA)) ==
+ (IPVLAN_F_PRIVATE | IPVLAN_F_VEPA))
+ return -EINVAL;
+ }
+
return 0;
}
@@ -502,6 +536,8 @@ static int ipvlan_nl_fillinfo(struct sk_buff *skb,
ret = -EMSGSIZE;
if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode))
goto err;
+ if (nla_put_u16(skb, IFLA_IPVLAN_FLAGS, port->flags))
+ goto err;
return 0;
@@ -518,7 +554,6 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct net_device *phy_dev;
int err;
u16 mode = IPVLAN_MODE_L3;
- bool create = false;
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -532,23 +567,42 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
phy_dev = tmp->phy_dev;
} else if (!netif_is_ipvlan_port(phy_dev)) {
- err = ipvlan_port_create(phy_dev);
- if (err < 0)
- return err;
- create = true;
- }
+ /* Exit early if the underlying link is invalid or busy */
+ if (phy_dev->type != ARPHRD_ETHER ||
+ phy_dev->flags & IFF_LOOPBACK) {
+ netdev_err(phy_dev,
+ "Master is either lo or non-ether device\n");
+ return -EINVAL;
+ }
- if (data && data[IFLA_IPVLAN_MODE])
- mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+ if (netdev_is_rx_handler_busy(phy_dev)) {
+ netdev_err(phy_dev, "Device is already in use.\n");
+ return -EBUSY;
+ }
+ }
- port = ipvlan_port_get_rtnl(phy_dev);
ipvlan->phy_dev = phy_dev;
ipvlan->dev = dev;
- ipvlan->port = port;
ipvlan->sfeatures = IPVLAN_FEATURES;
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
+ /* TODO Probably put random address here to be presented to the
+ * world but keep using the physical-dev address for the outgoing
+ * packets.
+ */
+ memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+
+ dev->priv_flags |= IFF_IPVLAN_SLAVE;
+
+ err = register_netdevice(dev);
+ if (err < 0)
+ return err;
+
+ /* ipvlan_init() would have created the port, if required */
+ port = ipvlan_port_get_rtnl(phy_dev);
+ ipvlan->port = port;
+
/* If the port-id base is at the MAX value, then wrap it around and
* begin from 0x1 again. This may be due to a busy system where lots
* of slaves are getting created and deleted.
@@ -567,31 +621,28 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
GFP_KERNEL);
if (err < 0)
- goto destroy_ipvlan_port;
+ goto unregister_netdev;
dev->dev_id = err;
+
/* Increment id-base to the next slot for the future assignment */
port->dev_id_start = err + 1;
- /* TODO Probably put random address here to be presented to the
- * world but keep using the physical-dev address for the outgoing
- * packets.
- */
- memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+ err = netdev_upper_dev_link(phy_dev, dev, extack);
+ if (err)
+ goto remove_ida;
- dev->priv_flags |= IFF_IPVLAN_SLAVE;
+ /* Flags are per port and latest update overrides. User has
+ * to be consistent in setting it just like the mode attribute.
+ */
+ if (data && data[IFLA_IPVLAN_FLAGS])
+ port->flags = nla_get_u16(data[IFLA_IPVLAN_FLAGS]);
- err = register_netdevice(dev);
- if (err < 0)
- goto remove_ida;
+ if (data && data[IFLA_IPVLAN_MODE])
+ mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
- err = netdev_upper_dev_link(phy_dev, dev);
- if (err) {
- goto unregister_netdev;
- }
err = ipvlan_set_port_mode(port, mode);
- if (err) {
+ if (err)
goto unlink_netdev;
- }
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
netif_stacked_transfer_operstate(phy_dev, dev);
@@ -599,13 +650,10 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
unlink_netdev:
netdev_upper_dev_unlink(phy_dev, dev);
-unregister_netdev:
- unregister_netdevice(dev);
remove_ida:
ida_simple_remove(&port->ida, dev->dev_id);
-destroy_ipvlan_port:
- if (create)
- ipvlan_port_destroy(phy_dev);
+unregister_netdev:
+ unregister_netdevice(dev);
return err;
}
EXPORT_SYMBOL_GPL(ipvlan_link_new);
@@ -644,6 +692,7 @@ EXPORT_SYMBOL_GPL(ipvlan_link_setup);
static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
{
[IFLA_IPVLAN_MODE] = { .type = NLA_U16 },
+ [IFLA_IPVLAN_FLAGS] = { .type = NLA_U16 },
};
static struct rtnl_link_ops ipvlan_link_ops = {
@@ -730,6 +779,11 @@ static int ipvlan_device_event(struct notifier_block *unused,
ipvlan_adjust_mtu(ipvlan, dev);
break;
+ case NETDEV_CHANGEADDR:
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode)
+ ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
+ break;
+
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlying device to change its type. */
return NOTIFY_BAD;
@@ -803,10 +857,6 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
struct net_device *dev = (struct net_device *)if6->idev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
- /* FIXME IPv6 autoconf calls us from bh without RTNL */
- if (in_softirq())
- return NOTIFY_DONE;
-
if (!netif_is_ipvlan(dev))
return NOTIFY_DONE;
@@ -846,8 +896,11 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
switch (event) {
case NETDEV_UP:
- if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true))
+ if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true)) {
+ NL_SET_ERR_MSG(i6vi->extack,
+ "Address already assigned to an ipvlan device");
return notifier_from_errno(-EADDRINUSE);
+ }
break;
}
@@ -916,8 +969,11 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused,
switch (event) {
case NETDEV_UP:
- if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false))
+ if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false)) {
+ NL_SET_ERR_MSG(ivi->extack,
+ "Address already assigned to an ipvlan device");
return notifier_from_errno(-EADDRINUSE);
+ }
break;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 5ab1b8849c30..1d025ab9568f 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -16,6 +16,7 @@
#include <crypto/aead.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
+#include <linux/refcount.h>
#include <net/genetlink.h>
#include <net/sock.h>
#include <net/gro_cells.h>
@@ -146,7 +147,7 @@ struct macsec_rx_sa {
struct macsec_key key;
spinlock_t lock;
u32 next_pn;
- atomic_t refcnt;
+ refcount_t refcnt;
bool active;
struct macsec_rx_sa_stats __percpu *stats;
struct macsec_rx_sc *sc;
@@ -171,7 +172,7 @@ struct macsec_rx_sc {
bool active;
struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
struct pcpu_rx_sc_stats __percpu *stats;
- atomic_t refcnt;
+ refcount_t refcnt;
struct rcu_head rcu_head;
};
@@ -187,7 +188,7 @@ struct macsec_tx_sa {
struct macsec_key key;
spinlock_t lock;
u32 next_pn;
- atomic_t refcnt;
+ refcount_t refcnt;
bool active;
struct macsec_tx_sa_stats __percpu *stats;
struct rcu_head rcu;
@@ -314,7 +315,7 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
if (!sa || !sa->active)
return NULL;
- if (!atomic_inc_not_zero(&sa->refcnt))
+ if (!refcount_inc_not_zero(&sa->refcnt))
return NULL;
return sa;
@@ -330,12 +331,12 @@ static void free_rx_sc_rcu(struct rcu_head *head)
static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
{
- return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL;
+ return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
}
static void macsec_rxsc_put(struct macsec_rx_sc *sc)
{
- if (atomic_dec_and_test(&sc->refcnt))
+ if (refcount_dec_and_test(&sc->refcnt))
call_rcu(&sc->rcu_head, free_rx_sc_rcu);
}
@@ -350,7 +351,7 @@ static void free_rxsa(struct rcu_head *head)
static void macsec_rxsa_put(struct macsec_rx_sa *sa)
{
- if (atomic_dec_and_test(&sa->refcnt))
+ if (refcount_dec_and_test(&sa->refcnt))
call_rcu(&sa->rcu, free_rxsa);
}
@@ -361,7 +362,7 @@ static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
if (!sa || !sa->active)
return NULL;
- if (!atomic_inc_not_zero(&sa->refcnt))
+ if (!refcount_inc_not_zero(&sa->refcnt))
return NULL;
return sa;
@@ -378,7 +379,7 @@ static void free_txsa(struct rcu_head *head)
static void macsec_txsa_put(struct macsec_tx_sa *sa)
{
- if (atomic_dec_and_test(&sa->refcnt))
+ if (refcount_dec_and_test(&sa->refcnt))
call_rcu(&sa->rcu, free_txsa);
}
@@ -1341,7 +1342,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
rx_sa->active = false;
rx_sa->next_pn = 1;
- atomic_set(&rx_sa->refcnt, 1);
+ refcount_set(&rx_sa->refcnt, 1);
spin_lock_init(&rx_sa->lock);
return 0;
@@ -1412,7 +1413,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
rx_sc->sci = sci;
rx_sc->active = true;
- atomic_set(&rx_sc->refcnt, 1);
+ refcount_set(&rx_sc->refcnt, 1);
secy = &macsec_priv(dev)->secy;
rcu_assign_pointer(rx_sc->next, secy->rx_sc);
@@ -1438,7 +1439,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
}
tx_sa->active = false;
- atomic_set(&tx_sa->refcnt, 1);
+ refcount_set(&tx_sa->refcnt, 1);
spin_lock_init(&tx_sa->lock);
return 0;
@@ -2410,7 +2411,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (!hdr)
return -EMSGSIZE;
- genl_dump_check_consistent(cb, hdr, &macsec_fam);
+ genl_dump_check_consistent(cb, hdr);
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
@@ -3246,7 +3247,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
&macsec_netdev_addr_lock_key,
macsec_get_nest_level(dev));
- err = netdev_upper_dev_link(real_dev, dev);
+ err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
goto unregister;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d2aea961e0f4..a178c5efd33e 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -413,7 +413,9 @@ static void macvlan_forward_source_one(struct sk_buff *skb,
len = nskb->len + ETH_HLEN;
nskb->dev = dev;
- nskb->pkt_type = PACKET_HOST;
+
+ if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr))
+ nskb->pkt_type = PACKET_HOST;
ret = netif_rx(nskb);
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
@@ -480,7 +482,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
struct macvlan_dev, list);
else
vlan = macvlan_hash_lookup(port, eth->h_dest);
- if (vlan == NULL)
+ if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE)
return RX_HANDLER_PASS;
dev = vlan->dev;
@@ -596,8 +598,6 @@ static const struct header_ops macvlan_hard_header_ops = {
.cache_update = eth_header_cache_update,
};
-static struct rtnl_link_ops macvlan_link_ops;
-
static int macvlan_open(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
@@ -613,8 +613,7 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
- if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
- dev->rtnl_link_ops == &macvlan_link_ops) {
+ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
vlan->fwd_priv =
lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
@@ -1231,11 +1230,14 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
return -EADDRNOTAVAIL;
}
- if (data && data[IFLA_MACVLAN_FLAGS] &&
+ if (!data)
+ return 0;
+
+ if (data[IFLA_MACVLAN_FLAGS] &&
nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
return -EINVAL;
- if (data && data[IFLA_MACVLAN_MODE]) {
+ if (data[IFLA_MACVLAN_MODE]) {
switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
case MACVLAN_MODE_PRIVATE:
case MACVLAN_MODE_VEPA:
@@ -1248,7 +1250,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
- if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
+ if (data[IFLA_MACVLAN_MACADDR_MODE]) {
switch (nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE])) {
case MACVLAN_MACADDR_ADD:
case MACVLAN_MACADDR_DEL:
@@ -1260,7 +1262,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
- if (data && data[IFLA_MACVLAN_MACADDR]) {
+ if (data[IFLA_MACVLAN_MACADDR]) {
if (nla_len(data[IFLA_MACVLAN_MACADDR]) != ETH_ALEN)
return -EINVAL;
@@ -1268,7 +1270,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
return -EADDRNOTAVAIL;
}
- if (data && data[IFLA_MACVLAN_MACADDR_COUNT])
+ if (data[IFLA_MACVLAN_MACADDR_COUNT])
return -EINVAL;
return 0;
@@ -1341,7 +1343,8 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
}
int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port;
@@ -1430,7 +1433,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
goto destroy_macvlan_port;
dev->priv_flags |= IFF_MACVLAN;
- err = netdev_upper_dev_link(lowerdev, dev);
+ err = netdev_upper_dev_link(lowerdev, dev, extack);
if (err)
goto unregister_netdev;
@@ -1453,7 +1456,7 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- return macvlan_common_newlink(src_net, dev, tb, data);
+ return macvlan_common_newlink(src_net, dev, tb, data, extack);
}
void macvlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index cba5cb3b849a..9a10029caf83 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -105,7 +105,7 @@ static int macvtap_newlink(struct net *src_net, struct net_device *dev,
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- err = macvlan_common_newlink(src_net, dev, tb, data);
+ err = macvlan_common_newlink(src_net, dev, tb, data, extack);
if (err) {
netdev_rx_handler_unregister(dev);
return err;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 0e27920c2b6b..be9aa368639f 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -616,7 +616,7 @@ static struct configfs_item_operations netconsole_target_item_ops = {
.release = netconsole_target_release,
};
-static struct config_item_type netconsole_target_type = {
+static const struct config_item_type netconsole_target_type = {
.ct_attrs = netconsole_target_attrs,
.ct_item_ops = &netconsole_target_item_ops,
.ct_owner = THIS_MODULE,
@@ -682,7 +682,7 @@ static struct configfs_group_operations netconsole_subsys_group_ops = {
.drop_item = drop_netconsole_target,
};
-static struct config_item_type netconsole_subsys_type = {
+static const struct config_item_type netconsole_subsys_type = {
.ct_group_ops = &netconsole_subsys_group_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 0250aa9ae2cb..9f6f7ccd44f7 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -230,10 +230,10 @@ err:
return NETDEV_TX_BUSY;
}
-static void ntb_netdev_tx_timer(unsigned long data)
+static void ntb_netdev_tx_timer(struct timer_list *t)
{
- struct net_device *ndev = (struct net_device *)data;
- struct ntb_netdev *dev = netdev_priv(ndev);
+ struct ntb_netdev *dev = from_timer(dev, t, tx_timer);
+ struct net_device *ndev = dev->ndev;
if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
@@ -269,7 +269,7 @@ static int ntb_netdev_open(struct net_device *ndev)
}
}
- setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev);
+ timer_setup(&dev->tx_timer, ntb_netdev_tx_timer, 0);
netif_carrier_off(ndev);
ntb_transport_link_up(dev->qp);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cd931cf9dcc2..bdfbabb86ee0 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -191,11 +191,14 @@ config LED_TRIGGER_PHY
Adds support for a set of LED trigger events per-PHY. Link
state change will trigger the events, for consumption by an
LED class driver. There are triggers for each link speed currently
- supported by the phy, and are of the form:
+ supported by the PHY and also a one common "link" trigger as a
+ logical-or of all the link speed ones.
+ All these triggers are named according to the following pattern:
<mii bus id>:<phy>:<speed>
Where speed is in the form:
- <Speed in megabits>Mbps or <Speed in gigabits>Gbps
+ <Speed in megabits>Mbps OR <Speed in gigabits>Gbps OR link
+ for any speed known to the PHY.
comment "MII PHY device drivers"
@@ -277,6 +280,11 @@ config DAVICOM_PHY
---help---
Currently supports dm9161e and dm9131
+config DP83822_PHY
+ tristate "Texas Instruments DP83822 PHY"
+ ---help---
+ Supports the DP83822 PHY.
+
config DP83848_PHY
tristate "Texas Instruments DP83848 PHY"
---help---
@@ -366,6 +374,11 @@ config REALTEK_PHY
---help---
Supports the Realtek 821x PHY.
+config RENESAS_PHY
+ tristate "Driver for Renesas PHYs"
+ ---help---
+ Supports the Renesas PHYs uPD60620 and uPD60620A.
+
config ROCKCHIP_PHY
tristate "Driver for Rockchip Ethernet PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 416df92fbf4f..01acbcb2c798 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux PHY drivers and MDIO bus drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o
@@ -55,6 +56,7 @@ obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
+obj-$(CONFIG_DP83822_PHY) += dp83822.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
@@ -72,6 +74,7 @@ obj-$(CONFIG_MICROSEMI_PHY) += mscc.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
+obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
obj-$(CONFIG_ROCKCHIP_PHY) += rockchip.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index c1e52b9dc58d..5f93e6add563 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -167,7 +167,7 @@ static int at803x_set_wol(struct phy_device *phydev,
mac = (const u8 *) ndev->dev_addr;
if (!is_valid_ether_addr(mac))
- return -EFAULT;
+ return -EINVAL;
for (i = 0; i < 3; i++) {
phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 1e9ad30a35c8..d7ed69deabfb 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -43,6 +43,12 @@ static int bcm54210e_config_init(struct phy_device *phydev)
val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+ if (phydev->dev_flags & PHY_BRCM_EN_MASTER_MODE) {
+ val = phy_read(phydev, MII_CTRL1000);
+ val |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+ phy_write(phydev, MII_CTRL1000, val);
+ }
+
return 0;
}
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 72f4228a63bb..9442db221834 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -116,3 +116,7 @@ static struct mdio_device_id __maybe_unused cortina_tbl[] = {
};
MODULE_DEVICE_TABLE(mdio, cortina_tbl);
+
+MODULE_DESCRIPTION("Cortina EDC CDR 10G Ethernet PHY driver");
+MODULE_AUTHOR("NXP");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h
index e7fe41117003..21aa24c741b9 100644
--- a/drivers/net/phy/dp83640_reg.h
+++ b/drivers/net/phy/dp83640_reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* dp83640_reg.h
* Generated by regen.tcl on Thu Feb 17 10:02:48 AM CET 2011
*/
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
new file mode 100644
index 000000000000..14335d14e9e4
--- /dev/null
+++ b/drivers/net/phy/dp83822.c
@@ -0,0 +1,344 @@
+/*
+ * Driver for the Texas Instruments DP83822 PHY
+ *
+ * Copyright (C) 2017 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#define DP83822_PHY_ID 0x2000a240
+#define DP83822_DEVADDR 0x1f
+
+#define MII_DP83822_PHYSCR 0x11
+#define MII_DP83822_MISR1 0x12
+#define MII_DP83822_MISR2 0x13
+#define MII_DP83822_RESET_CTRL 0x1f
+
+#define DP83822_HW_RESET BIT(15)
+#define DP83822_SW_RESET BIT(14)
+
+/* PHYSCR Register Fields */
+#define DP83822_PHYSCR_INT_OE BIT(0) /* Interrupt Output Enable */
+#define DP83822_PHYSCR_INTEN BIT(1) /* Interrupt Enable */
+
+/* MISR1 bits */
+#define DP83822_RX_ERR_HF_INT_EN BIT(0)
+#define DP83822_FALSE_CARRIER_HF_INT_EN BIT(1)
+#define DP83822_ANEG_COMPLETE_INT_EN BIT(2)
+#define DP83822_DUP_MODE_CHANGE_INT_EN BIT(3)
+#define DP83822_SPEED_CHANGED_INT_EN BIT(4)
+#define DP83822_LINK_STAT_INT_EN BIT(5)
+#define DP83822_ENERGY_DET_INT_EN BIT(6)
+#define DP83822_LINK_QUAL_INT_EN BIT(7)
+
+/* MISR2 bits */
+#define DP83822_JABBER_DET_INT_EN BIT(0)
+#define DP83822_WOL_PKT_INT_EN BIT(1)
+#define DP83822_SLEEP_MODE_INT_EN BIT(2)
+#define DP83822_MDI_XOVER_INT_EN BIT(3)
+#define DP83822_LB_FIFO_INT_EN BIT(4)
+#define DP83822_PAGE_RX_INT_EN BIT(5)
+#define DP83822_ANEG_ERR_INT_EN BIT(6)
+#define DP83822_EEE_ERROR_CHANGE_INT_EN BIT(7)
+
+/* INT_STAT1 bits */
+#define DP83822_WOL_INT_EN BIT(4)
+#define DP83822_WOL_INT_STAT BIT(12)
+
+#define MII_DP83822_RXSOP1 0x04a5
+#define MII_DP83822_RXSOP2 0x04a6
+#define MII_DP83822_RXSOP3 0x04a7
+
+/* WoL Registers */
+#define MII_DP83822_WOL_CFG 0x04a0
+#define MII_DP83822_WOL_STAT 0x04a1
+#define MII_DP83822_WOL_DA1 0x04a2
+#define MII_DP83822_WOL_DA2 0x04a3
+#define MII_DP83822_WOL_DA3 0x04a4
+
+/* WoL bits */
+#define DP83822_WOL_MAGIC_EN BIT(0)
+#define DP83822_WOL_SECURE_ON BIT(5)
+#define DP83822_WOL_EN BIT(7)
+#define DP83822_WOL_INDICATION_SEL BIT(8)
+#define DP83822_WOL_CLR_INDICATION BIT(11)
+
+static int dp83822_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, MII_DP83822_MISR1);
+ if (err < 0)
+ return err;
+
+ err = phy_read(phydev, MII_DP83822_MISR2);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83822_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *ndev = phydev->attached_dev;
+ u16 value;
+ const u8 *mac;
+
+ if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+ mac = (const u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ /* MAC addresses start with byte 5, but stored in mac[0].
+ * 822 PHYs store bytes 4|5, 2|3, 0|1
+ */
+ phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA1,
+ (mac[1] << 8) | mac[0]);
+ phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA2,
+ (mac[3] << 8) | mac[2]);
+ phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_DA3,
+ (mac[5] << 8) | mac[4]);
+
+ value = phy_read_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_WOL_CFG);
+ if (wol->wolopts & WAKE_MAGIC)
+ value |= DP83822_WOL_MAGIC_EN;
+ else
+ value &= ~DP83822_WOL_MAGIC_EN;
+
+ if (wol->wolopts & WAKE_MAGICSECURE) {
+ phy_write_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RXSOP1,
+ (wol->sopass[1] << 8) | wol->sopass[0]);
+ phy_write_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RXSOP2,
+ (wol->sopass[3] << 8) | wol->sopass[2]);
+ phy_write_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RXSOP3,
+ (wol->sopass[5] << 8) | wol->sopass[4]);
+ value |= DP83822_WOL_SECURE_ON;
+ } else {
+ value &= ~DP83822_WOL_SECURE_ON;
+ }
+
+ value |= (DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
+ DP83822_WOL_CLR_INDICATION);
+ phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
+ value);
+ } else {
+ value = phy_read_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_WOL_CFG);
+ value &= ~DP83822_WOL_EN;
+ phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
+ value);
+ }
+
+ return 0;
+}
+
+static void dp83822_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int value;
+ u16 sopass_val;
+
+ wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE);
+ wol->wolopts = 0;
+
+ value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+
+ if (value & DP83822_WOL_MAGIC_EN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (value & DP83822_WOL_SECURE_ON) {
+ sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RXSOP1);
+ wol->sopass[0] = (sopass_val & 0xff);
+ wol->sopass[1] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RXSOP2);
+ wol->sopass[2] = (sopass_val & 0xff);
+ wol->sopass[3] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RXSOP3);
+ wol->sopass[4] = (sopass_val & 0xff);
+ wol->sopass[5] = (sopass_val >> 8);
+
+ wol->wolopts |= WAKE_MAGICSECURE;
+ }
+
+ /* WoL is not enabled so set wolopts to 0 */
+ if (!(value & DP83822_WOL_EN))
+ wol->wolopts = 0;
+}
+
+static int dp83822_config_intr(struct phy_device *phydev)
+{
+ int misr_status;
+ int physcr_status;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ misr_status = phy_read(phydev, MII_DP83822_MISR1);
+ if (misr_status < 0)
+ return misr_status;
+
+ misr_status |= (DP83822_RX_ERR_HF_INT_EN |
+ DP83822_FALSE_CARRIER_HF_INT_EN |
+ DP83822_ANEG_COMPLETE_INT_EN |
+ DP83822_DUP_MODE_CHANGE_INT_EN |
+ DP83822_SPEED_CHANGED_INT_EN |
+ DP83822_LINK_STAT_INT_EN |
+ DP83822_ENERGY_DET_INT_EN |
+ DP83822_LINK_QUAL_INT_EN);
+
+ err = phy_write(phydev, MII_DP83822_MISR1, misr_status);
+ if (err < 0)
+ return err;
+
+ misr_status = phy_read(phydev, MII_DP83822_MISR2);
+ if (misr_status < 0)
+ return misr_status;
+
+ misr_status |= (DP83822_JABBER_DET_INT_EN |
+ DP83822_WOL_PKT_INT_EN |
+ DP83822_SLEEP_MODE_INT_EN |
+ DP83822_MDI_XOVER_INT_EN |
+ DP83822_LB_FIFO_INT_EN |
+ DP83822_PAGE_RX_INT_EN |
+ DP83822_ANEG_ERR_INT_EN |
+ DP83822_EEE_ERROR_CHANGE_INT_EN);
+
+ err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
+ if (err < 0)
+ return err;
+
+ physcr_status = phy_read(phydev, MII_DP83822_PHYSCR);
+ if (physcr_status < 0)
+ return physcr_status;
+
+ physcr_status |= DP83822_PHYSCR_INT_OE | DP83822_PHYSCR_INTEN;
+
+ } else {
+ err = phy_write(phydev, MII_DP83822_MISR1, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_DP83822_MISR1, 0);
+ if (err < 0)
+ return err;
+
+ physcr_status = phy_read(phydev, MII_DP83822_PHYSCR);
+ if (physcr_status < 0)
+ return physcr_status;
+
+ physcr_status &= ~DP83822_PHYSCR_INTEN;
+ }
+
+ return phy_write(phydev, MII_DP83822_PHYSCR, physcr_status);
+}
+
+static int dp83822_config_init(struct phy_device *phydev)
+{
+ int err;
+ int value;
+
+ err = genphy_config_init(phydev);
+ if (err < 0)
+ return err;
+
+ value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN;
+
+ return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
+ value);
+}
+
+static int dp83822_phy_reset(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_DP83822_RESET_CTRL, DP83822_HW_RESET);
+ if (err < 0)
+ return err;
+
+ dp83822_config_init(phydev);
+
+ return 0;
+}
+
+static int dp83822_suspend(struct phy_device *phydev)
+{
+ int value;
+
+ value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+
+ if (!(value & DP83822_WOL_EN))
+ genphy_suspend(phydev);
+
+ return 0;
+}
+
+static int dp83822_resume(struct phy_device *phydev)
+{
+ int value;
+
+ genphy_resume(phydev);
+
+ value = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG);
+
+ phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, value |
+ DP83822_WOL_CLR_INDICATION);
+
+ return 0;
+}
+
+static struct phy_driver dp83822_driver[] = {
+ {
+ .phy_id = DP83822_PHY_ID,
+ .phy_id_mask = 0xfffffff0,
+ .name = "TI DP83822",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = dp83822_config_init,
+ .soft_reset = dp83822_phy_reset,
+ .get_wol = dp83822_get_wol,
+ .set_wol = dp83822_set_wol,
+ .ack_interrupt = dp83822_ack_interrupt,
+ .config_intr = dp83822_config_intr,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = dp83822_suspend,
+ .resume = dp83822_resume,
+ },
+};
+module_phy_driver(dp83822_driver);
+
+static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
+ { DP83822_PHY_ID, 0xfffffff0 },
+ { },
+};
+MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83822 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 3de4fe4dda77..3966d43c5146 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -20,7 +20,6 @@
#define TI_DP83620_PHY_ID 0x20005ce0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
-#define TI_DP83822_PHY_ID 0x2000a240
/* Registers */
#define DP83848_MICR 0x11 /* MII Interrupt Control Register */
@@ -80,7 +79,6 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TI_DP83620_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
- { TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
@@ -110,7 +108,6 @@ static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
- DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
module_phy_driver(dp83848_driver);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 15cbcdba618a..4d02b27df044 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -681,9 +681,11 @@ static int m88e1116r_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- err = m88e1121_config_aneg_rgmii_delays(phydev);
- if (err < 0)
- return err;
+ if (phy_interface_is_rgmii(phydev)) {
+ err = m88e1121_config_aneg_rgmii_delays(phydev);
+ if (err < 0)
+ return err;
+ }
err = genphy_soft_reset(phydev);
if (err < 0)
diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h
index 3a7f143904e8..773bb51399be 100644
--- a/drivers/net/phy/mdio-boardinfo.h
+++ b/drivers/net/phy/mdio-boardinfo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mdio-boardinfo.h - board info interface internal to the mdio_bus
* component
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 94ca42e630bb..39ecad25b201 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -27,12 +27,21 @@ static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
return NULL;
}
+static void phy_led_trigger_no_link(struct phy_device *phy)
+{
+ if (phy->last_triggered) {
+ led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
+ led_trigger_event(&phy->led_link_trigger->trigger, LED_OFF);
+ phy->last_triggered = NULL;
+ }
+}
+
void phy_led_trigger_change_speed(struct phy_device *phy)
{
struct phy_led_trigger *plt;
if (!phy->link)
- goto out_change_speed;
+ return phy_led_trigger_no_link(phy);
if (phy->speed == 0)
return;
@@ -42,25 +51,28 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
netdev_alert(phy->attached_dev,
"No phy led trigger registered for speed(%d)\n",
phy->speed);
- goto out_change_speed;
+ return phy_led_trigger_no_link(phy);
}
if (plt != phy->last_triggered) {
+ if (!phy->last_triggered)
+ led_trigger_event(&phy->led_link_trigger->trigger,
+ LED_FULL);
+
led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
led_trigger_event(&plt->trigger, LED_FULL);
phy->last_triggered = plt;
}
- return;
-
-out_change_speed:
- if (phy->last_triggered) {
- led_trigger_event(&phy->last_triggered->trigger,
- LED_OFF);
- phy->last_triggered = NULL;
- }
}
EXPORT_SYMBOL_GPL(phy_led_trigger_change_speed);
+static void phy_led_trigger_format_name(struct phy_device *phy, char *buf,
+ size_t size, char *suffix)
+{
+ snprintf(buf, size, PHY_ID_FMT ":%s",
+ phy->mdio.bus->id, phy->mdio.addr, suffix);
+}
+
static int phy_led_trigger_register(struct phy_device *phy,
struct phy_led_trigger *plt,
unsigned int speed)
@@ -77,8 +89,8 @@ static int phy_led_trigger_register(struct phy_device *phy,
snprintf(name_suffix, sizeof(name_suffix), "%dGbps",
DIV_ROUND_CLOSEST(speed, 1000));
- snprintf(plt->name, sizeof(plt->name), PHY_ID_FMT ":%s",
- phy->mdio.bus->id, phy->mdio.addr, name_suffix);
+ phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name),
+ name_suffix);
plt->trigger.name = plt->name;
return led_trigger_register(&plt->trigger);
@@ -99,13 +111,30 @@ int phy_led_triggers_register(struct phy_device *phy)
if (!phy->phy_num_led_triggers)
return 0;
+ phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev,
+ sizeof(*phy->led_link_trigger),
+ GFP_KERNEL);
+ if (!phy->led_link_trigger) {
+ err = -ENOMEM;
+ goto out_clear;
+ }
+
+ phy_led_trigger_format_name(phy, phy->led_link_trigger->name,
+ sizeof(phy->led_link_trigger->name),
+ "link");
+ phy->led_link_trigger->trigger.name = phy->led_link_trigger->name;
+
+ err = led_trigger_register(&phy->led_link_trigger->trigger);
+ if (err)
+ goto out_free_link;
+
phy->phy_led_triggers = devm_kzalloc(&phy->mdio.dev,
sizeof(struct phy_led_trigger) *
phy->phy_num_led_triggers,
GFP_KERNEL);
if (!phy->phy_led_triggers) {
err = -ENOMEM;
- goto out_clear;
+ goto out_unreg_link;
}
for (i = 0; i < phy->phy_num_led_triggers; i++) {
@@ -123,6 +152,11 @@ out_unreg:
while (i--)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+out_unreg_link:
+ phy_led_trigger_unregister(phy->led_link_trigger);
+out_free_link:
+ devm_kfree(&phy->mdio.dev, phy->led_link_trigger);
+ phy->led_link_trigger = NULL;
out_clear:
phy->phy_num_led_triggers = 0;
return err;
@@ -135,5 +169,8 @@ void phy_led_triggers_unregister(struct phy_device *phy)
for (i = 0; i < phy->phy_num_led_triggers; i++)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
+
+ if (phy->led_link_trigger)
+ phy_led_trigger_unregister(phy->led_link_trigger);
}
EXPORT_SYMBOL_GPL(phy_led_triggers_unregister);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index bcb4755bcd95..e3bbc70372d3 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -357,7 +357,7 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
* 1 1 0 1 TX
*/
static void phylink_resolve_flow(struct phylink *pl,
- struct phylink_link_state *state)
+ struct phylink_link_state *state)
{
int new_pause = 0;
@@ -506,7 +506,8 @@ static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
}
struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
- phy_interface_t iface, const struct phylink_mac_ops *ops)
+ phy_interface_t iface,
+ const struct phylink_mac_ops *ops)
{
struct phylink *pl;
int ret;
@@ -566,7 +567,8 @@ void phylink_destroy(struct phylink *pl)
}
EXPORT_SYMBOL_GPL(phylink_destroy);
-void phylink_phy_change(struct phy_device *phydev, bool up, bool do_carrier)
+static void phylink_phy_change(struct phy_device *phydev, bool up,
+ bool do_carrier)
{
struct phylink *pl = phydev->phylink;
@@ -585,7 +587,7 @@ void phylink_phy_change(struct phy_device *phydev, bool up, bool do_carrier)
phylink_run_resolve(pl);
netdev_dbg(pl->netdev, "phy link %s %s/%s/%s\n", up ? "up" : "down",
- phy_modes(phydev->interface),
+ phy_modes(phydev->interface),
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex));
}
@@ -823,7 +825,7 @@ static void phylink_get_ksettings(const struct phylink_link_state *state,
}
int phylink_ethtool_ksettings_get(struct phylink *pl,
- struct ethtool_link_ksettings *kset)
+ struct ethtool_link_ksettings *kset)
{
struct phylink_link_state link_state;
@@ -870,7 +872,7 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
int phylink_ethtool_ksettings_set(struct phylink *pl,
- const struct ethtool_link_ksettings *kset)
+ const struct ethtool_link_ksettings *kset)
{
struct ethtool_link_ksettings our_kset;
struct phylink_link_state config;
@@ -1337,8 +1339,6 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL_GPL(phylink_mii_ioctl);
-
-
static int phylink_sfp_module_insert(void *upstream,
const struct sfp_eeprom_id *id)
{
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 9cbe645e3d89..eda0a6e86918 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -22,17 +22,29 @@
#define RTL821x_INER 0x12
#define RTL821x_INER_INIT 0x6400
#define RTL821x_INSR 0x13
+#define RTL821x_PAGE_SELECT 0x1f
#define RTL8211E_INER_LINK_STATUS 0x400
#define RTL8211F_INER_LINK_STATUS 0x0010
#define RTL8211F_INSR 0x1d
-#define RTL8211F_PAGE_SELECT 0x1f
#define RTL8211F_TX_DELAY 0x100
+#define RTL8201F_ISR 0x1e
+#define RTL8201F_IER 0x13
+
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
+static int rtl8201_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, RTL8201F_ISR);
+
+ return (err < 0) ? err : 0;
+}
+
static int rtl821x_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -46,14 +58,33 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
{
int err;
- phy_write(phydev, RTL8211F_PAGE_SELECT, 0xa43);
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0xa43);
err = phy_read(phydev, RTL8211F_INSR);
/* restore to default page 0 */
- phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0);
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
return (err < 0) ? err : 0;
}
+static int rtl8201_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ /* switch to page 7 */
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0x7);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, RTL8201F_IER,
+ BIT(13) | BIT(12) | BIT(11));
+ else
+ err = phy_write(phydev, RTL8201F_IER, 0);
+
+ /* restore to default page 0 */
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+
+ return err;
+}
+
static int rtl8211b_config_intr(struct phy_device *phydev)
{
int err;
@@ -84,11 +115,13 @@ static int rtl8211f_config_intr(struct phy_device *phydev)
{
int err;
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0xa42);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, RTL821x_INER,
RTL8211F_INER_LINK_STATUS);
else
err = phy_write(phydev, RTL821x_INER, 0);
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0);
return err;
}
@@ -102,7 +135,7 @@ static int rtl8211f_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08);
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0xd08);
reg = phy_read(phydev, 0x11);
/* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
@@ -114,7 +147,7 @@ static int rtl8211f_config_init(struct phy_device *phydev)
phy_write(phydev, 0x11, reg);
/* restore to default page 0 */
- phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0);
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
return 0;
}
@@ -129,6 +162,18 @@ static struct phy_driver realtek_drvs[] = {
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
}, {
+ .phy_id = 0x001cc816,
+ .name = "RTL8201F 10/100Mbps Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl8201_ack_interrupt,
+ .config_intr = &rtl8201_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ }, {
.phy_id = 0x001cc912,
.name = "RTL8211B Gigabit Ethernet",
.phy_id_mask = 0x001fffff,
@@ -181,6 +226,7 @@ static struct phy_driver realtek_drvs[] = {
module_phy_driver(realtek_drvs);
static struct mdio_device_id __maybe_unused realtek_tbl[] = {
+ { 0x001cc816, 0x001fffff },
{ 0x001cc912, 0x001fffff },
{ 0x001cc914, 0x001fffff },
{ 0x001cc915, 0x001fffff },
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 5cb5384697ea..8a1b1f4c1b7c 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -26,7 +26,6 @@ struct sfp_bus {
bool started;
};
-
int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support)
{
@@ -208,7 +207,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
}
EXPORT_SYMBOL_GPL(sfp_parse_support);
-
static LIST_HEAD(sfp_buses);
static DEFINE_MUTEX(sfp_mutex);
@@ -295,7 +293,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
bus->registered = false;
}
-
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
{
if (!bus->registered)
@@ -305,7 +302,7 @@ int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
EXPORT_SYMBOL_GPL(sfp_get_module_info);
int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
- u8 *data)
+ u8 *data)
{
if (!bus->registered)
return -ENOIOCTLCMD;
@@ -330,8 +327,8 @@ void sfp_upstream_stop(struct sfp_bus *bus)
EXPORT_SYMBOL_GPL(sfp_upstream_stop);
struct sfp_bus *sfp_register_upstream(struct device_node *np,
- struct net_device *ndev, void *upstream,
- const struct sfp_upstream_ops *ops)
+ struct net_device *ndev, void *upstream,
+ const struct sfp_upstream_ops *ops)
{
struct sfp_bus *bus = sfp_bus_get(np);
int ret = 0;
@@ -368,7 +365,6 @@ void sfp_unregister_upstream(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_unregister_upstream);
-
/* Socket driver entry points */
int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev)
{
@@ -395,7 +391,6 @@ void sfp_remove_phy(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_remove_phy);
-
void sfp_link_up(struct sfp_bus *bus)
{
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index baee371bf767..e381811e5f11 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,5 +1,5 @@
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -88,15 +88,12 @@ static const enum gpiod_flags gpio_flags[] = {
#define T_PROBE_INIT msecs_to_jiffies(300)
#define T_PROBE_RETRY msecs_to_jiffies(100)
-/*
- * SFP modules appear to always have their PHY configured for bus address
+/* SFP modules appear to always have their PHY configured for bus address
* 0x56 (which with mdio-i2c, translates to a PHY address of 22).
*/
#define SFP_PHY_ADDR 22
-/*
- * Give this long for the PHY to reset.
- */
+/* Give this long for the PHY to reset. */
#define T_PHY_RESET_MS 50
static DEFINE_MUTEX(sfp_mutex);
@@ -150,10 +147,10 @@ static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state)
/* If the module is present, drive the signals */
if (sfp->gpio[GPIO_TX_DISABLE])
gpiod_direction_output(sfp->gpio[GPIO_TX_DISABLE],
- state & SFP_F_TX_DISABLE);
+ state & SFP_F_TX_DISABLE);
if (state & SFP_F_RATE_SELECT)
gpiod_direction_output(sfp->gpio[GPIO_RATE_SELECT],
- state & SFP_F_RATE_SELECT);
+ state & SFP_F_RATE_SELECT);
} else {
/* Otherwise, let them float to the pull-ups */
if (sfp->gpio[GPIO_TX_DISABLE])
@@ -164,7 +161,7 @@ static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state)
}
static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr,
- void *buf, size_t len)
+ void *buf, size_t len)
{
struct i2c_msg msgs[2];
int ret;
@@ -186,7 +183,7 @@ static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr,
}
static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 addr, void *buf,
- size_t len)
+ size_t len)
{
return sfp__i2c_read(sfp->i2c, a2 ? 0x51 : 0x50, addr, buf, len);
}
@@ -220,7 +217,6 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
return 0;
}
-
/* Interface */
static unsigned int sfp_get_state(struct sfp *sfp)
{
@@ -295,7 +291,8 @@ static void sfp_sm_next(struct sfp *sfp, unsigned int state,
sfp_sm_set_timer(sfp, timeout);
}
-static void sfp_sm_ins_next(struct sfp *sfp, unsigned int state, unsigned int timeout)
+static void sfp_sm_ins_next(struct sfp *sfp, unsigned int state,
+ unsigned int timeout)
{
sfp->sm_mod_state = state;
sfp_sm_set_timer(sfp, timeout);
@@ -370,7 +367,8 @@ static void sfp_sm_link_check_los(struct sfp *sfp)
static void sfp_sm_fault(struct sfp *sfp, bool warn)
{
if (sfp->sm_retries && !--sfp->sm_retries) {
- dev_err(sfp->dev, "module persistently indicates fault, disabling\n");
+ dev_err(sfp->dev,
+ "module persistently indicates fault, disabling\n");
sfp_sm_next(sfp, SFP_S_TX_DISABLE, 0);
} else {
if (warn)
@@ -461,7 +459,8 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
memcpy(date, sfp->id.ext.datecode, 8);
date[8] = '\0';
- dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n", vendor, part, rev, sn, date);
+ dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n",
+ vendor, part, rev, sn, date);
/* We only support SFP modules, not the legacy GBIC modules. */
if (sfp->id.base.phys_id != SFP_PHYS_ID_SFP ||
@@ -651,7 +650,7 @@ static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo)
}
static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
- u8 *data)
+ u8 *data)
{
unsigned int first, last, len;
int ret;
diff --git a/drivers/net/phy/swphy.h b/drivers/net/phy/swphy.h
index 2f09ac324e18..3668ab8c901a 100644
--- a/drivers/net/phy/swphy.h
+++ b/drivers/net/phy/swphy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SWPHY_H
#define SWPHY_H
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
new file mode 100644
index 000000000000..96b33475ea5e
--- /dev/null
+++ b/drivers/net/phy/uPD60620.c
@@ -0,0 +1,109 @@
+/*
+ * Driver for the Renesas PHY uPD60620.
+ *
+ * Copyright (C) 2015 Softing Industrial Automation GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define UPD60620_PHY_ID 0xb8242824
+
+/* Extended Registers and values */
+/* PHY Special Control/Status */
+#define PHY_PHYSCR 0x1F /* PHY.31 */
+#define PHY_PHYSCR_10MB 0x0004 /* PHY speed = 10mb */
+#define PHY_PHYSCR_100MB 0x0008 /* PHY speed = 100mb */
+#define PHY_PHYSCR_DUPLEX 0x0010 /* PHY Duplex */
+
+/* PHY Special Modes */
+#define PHY_SPM 0x12 /* PHY.18 */
+
+/* Init PHY */
+
+static int upd60620_config_init(struct phy_device *phydev)
+{
+ /* Enable support for passive HUBs (could be a strap option) */
+ /* PHYMODE: All speeds, HD in parallel detect */
+ return phy_write(phydev, PHY_SPM, 0x0180 | phydev->mdio.addr);
+}
+
+/* Get PHY status from common registers */
+
+static int upd60620_read_status(struct phy_device *phydev)
+{
+ int phy_state;
+
+ /* Read negotiated state */
+ phy_state = phy_read(phydev, MII_BMSR);
+ if (phy_state < 0)
+ return phy_state;
+
+ phydev->link = 0;
+ phydev->lp_advertising = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phy_state & (BMSR_ANEGCOMPLETE | BMSR_LSTATUS)) {
+ phy_state = phy_read(phydev, PHY_PHYSCR);
+ if (phy_state < 0)
+ return phy_state;
+
+ if (phy_state & (PHY_PHYSCR_10MB | PHY_PHYSCR_100MB)) {
+ phydev->link = 1;
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_HALF;
+
+ if (phy_state & PHY_PHYSCR_100MB)
+ phydev->speed = SPEED_100;
+ if (phy_state & PHY_PHYSCR_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+
+ phy_state = phy_read(phydev, MII_LPA);
+ if (phy_state < 0)
+ return phy_state;
+
+ phydev->lp_advertising
+ = mii_lpa_to_ethtool_lpa_t(phy_state);
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ if (phy_state & LPA_PAUSE_CAP)
+ phydev->pause = 1;
+ if (phy_state & LPA_PAUSE_ASYM)
+ phydev->asym_pause = 1;
+ }
+ }
+ }
+ return 0;
+}
+
+MODULE_DESCRIPTION("Renesas uPD60620 PHY driver");
+MODULE_AUTHOR("Bernd Edlinger <bernd.edlinger@hotmail.de>");
+MODULE_LICENSE("GPL");
+
+static struct phy_driver upd60620_driver[1] = { {
+ .phy_id = UPD60620_PHY_ID,
+ .phy_id_mask = 0xfffffffe,
+ .name = "Renesas uPD60620",
+ .features = PHY_BASIC_FEATURES,
+ .flags = 0,
+ .config_init = upd60620_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = upd60620_read_status,
+} };
+
+module_phy_driver(upd60620_driver);
+
+static struct mdio_device_id __maybe_unused upd60620_tbl[] = {
+ { UPD60620_PHY_ID, 0xfffffffe },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, upd60620_tbl);
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 3c55ea357f35..feb92ecd1880 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -502,6 +502,7 @@ plip_receive(unsigned short nibble_timeout, struct net_device *dev,
*data_p = (c0 >> 3) & 0x0f;
write_data (dev, 0x10); /* send ACK */
*ns_p = PLIP_NB_1;
+ /* fall through */
case PLIP_NB_1:
cx = nibble_timeout;
@@ -597,6 +598,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
printk(KERN_DEBUG "%s: receive start\n", dev->name);
rcv->state = PLIP_PK_LENGTH_LSB;
rcv->nibble = PLIP_NB_BEGIN;
+ /* fall through */
case PLIP_PK_LENGTH_LSB:
if (snd->state != PLIP_PK_DONE) {
@@ -617,6 +619,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
return TIMEOUT;
}
rcv->state = PLIP_PK_LENGTH_MSB;
+ /* fall through */
case PLIP_PK_LENGTH_MSB:
if (plip_receive(nibble_timeout, dev,
@@ -639,6 +642,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
rcv->state = PLIP_PK_DATA;
rcv->byte = 0;
rcv->checksum = 0;
+ /* fall through */
case PLIP_PK_DATA:
lbuf = rcv->skb->data;
@@ -651,6 +655,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
rcv->checksum += lbuf[--rcv->byte];
} while (rcv->byte);
rcv->state = PLIP_PK_CHECKSUM;
+ /* fall through */
case PLIP_PK_CHECKSUM:
if (plip_receive(nibble_timeout, dev,
@@ -663,6 +668,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
return ERROR;
}
rcv->state = PLIP_PK_DONE;
+ /* fall through */
case PLIP_PK_DONE:
/* Inform the upper layer for the arrival of a packet. */
@@ -708,6 +714,7 @@ plip_send(unsigned short nibble_timeout, struct net_device *dev,
case PLIP_NB_BEGIN:
write_data (dev, data & 0x0f);
*ns_p = PLIP_NB_1;
+ /* fall through */
case PLIP_NB_1:
write_data (dev, 0x10 | (data & 0x0f));
@@ -722,6 +729,7 @@ plip_send(unsigned short nibble_timeout, struct net_device *dev,
}
write_data (dev, 0x10 | (data >> 4));
*ns_p = PLIP_NB_2;
+ /* fall through */
case PLIP_NB_2:
write_data (dev, (data >> 4));
@@ -810,6 +818,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
&snd->nibble, snd->length.b.lsb))
return TIMEOUT;
snd->state = PLIP_PK_LENGTH_MSB;
+ /* fall through */
case PLIP_PK_LENGTH_MSB:
if (plip_send(nibble_timeout, dev,
@@ -818,6 +827,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
snd->state = PLIP_PK_DATA;
snd->byte = 0;
snd->checksum = 0;
+ /* fall through */
case PLIP_PK_DATA:
do {
@@ -829,6 +839,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
snd->checksum += lbuf[--snd->byte];
} while (snd->byte);
snd->state = PLIP_PK_CHECKSUM;
+ /* fall through */
case PLIP_PK_CHECKSUM:
if (plip_send(nibble_timeout, dev,
@@ -839,6 +850,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
dev_kfree_skb(snd->skb);
dev->stats.tx_packets++;
snd->state = PLIP_PK_DONE;
+ /* fall through */
case PLIP_PK_DONE:
/* Close the connection */
@@ -927,6 +939,7 @@ plip_interrupt(void *dev_id)
switch (nl->connection) {
case PLIP_CN_CLOSING:
netif_wake_queue (dev);
+ /* fall through */
case PLIP_CN_NONE:
case PLIP_CN_SEND:
rcv->state = PLIP_PK_TRIGGER;
diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile
index a6b6297b0066..16c457d6b324 100644
--- a/drivers/net/ppp/Makefile
+++ b/drivers/net/ppp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux PPP network device drivers.
#
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 814fd8fae67d..1b28e6e702f5 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -69,7 +69,7 @@ struct asyncppp {
struct tasklet_struct tsk;
- atomic_t refcnt;
+ refcount_t refcnt;
struct semaphore dead_sem;
struct ppp_channel chan; /* interface to generic ppp layer */
unsigned char obuf[OBUFSIZE];
@@ -140,14 +140,14 @@ static struct asyncppp *ap_get(struct tty_struct *tty)
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
- atomic_inc(&ap->refcnt);
+ refcount_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
}
static void ap_put(struct asyncppp *ap)
{
- if (atomic_dec_and_test(&ap->refcnt))
+ if (refcount_dec_and_test(&ap->refcnt))
up(&ap->dead_sem);
}
@@ -185,7 +185,7 @@ ppp_asynctty_open(struct tty_struct *tty)
skb_queue_head_init(&ap->rqueue);
tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
- atomic_set(&ap->refcnt, 1);
+ refcount_set(&ap->refcnt, 1);
sema_init(&ap->dead_sem, 0);
ap->chan.private = ap;
@@ -234,7 +234,7 @@ ppp_asynctty_close(struct tty_struct *tty)
* our channel ops (i.e. ppp_async_send/ioctl) are in progress
* by the time it returns.
*/
- if (!atomic_dec_and_test(&ap->refcnt))
+ if (!refcount_dec_and_test(&ap->refcnt))
down(&ap->dead_sem);
tasklet_kill(&ap->tsk);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e365866600ba..d8e5747ff4e3 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -51,6 +51,7 @@
#include <asm/unaligned.h>
#include <net/slhc_vj.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
@@ -84,7 +85,7 @@ struct ppp_file {
struct sk_buff_head xq; /* pppd transmit queue */
struct sk_buff_head rq; /* receive queue for pppd */
wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
- atomic_t refcnt; /* # refs (incl /dev/ppp attached) */
+ refcount_t refcnt; /* # refs (incl /dev/ppp attached) */
int hdrlen; /* space to leave for headers */
int index; /* interface unit / channel number */
int dead; /* unit/channel has been shut down */
@@ -389,7 +390,7 @@ static int ppp_open(struct inode *inode, struct file *file)
/*
* This could (should?) be enforced by the permissions on /dev/ppp.
*/
- if (!capable(CAP_NET_ADMIN))
+ if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN))
return -EPERM;
return 0;
}
@@ -408,7 +409,7 @@ static int ppp_release(struct inode *unused, struct file *file)
unregister_netdevice(ppp->dev);
rtnl_unlock();
}
- if (atomic_dec_and_test(&pf->refcnt)) {
+ if (refcount_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
case INTERFACE:
ppp_destroy_interface(PF_TO_PPP(pf));
@@ -881,7 +882,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
mutex_lock(&pn->all_ppp_mutex);
ppp = ppp_find_unit(pn, unit);
if (ppp) {
- atomic_inc(&ppp->file.refcnt);
+ refcount_inc(&ppp->file.refcnt);
file->private_data = &ppp->file;
err = 0;
}
@@ -896,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
spin_lock_bh(&pn->all_channels_lock);
chan = ppp_find_channel(pn, unit);
if (chan) {
- atomic_inc(&chan->file.refcnt);
+ refcount_inc(&chan->file.refcnt);
file->private_data = &chan->file;
err = 0;
}
@@ -959,7 +960,10 @@ static __net_exit void ppp_exit_net(struct net *net)
unregister_netdevice_many(&list);
rtnl_unlock();
+ mutex_destroy(&pn->all_ppp_mutex);
idr_destroy(&pn->units_idr);
+ WARN_ON_ONCE(!list_empty(&pn->all_channels));
+ WARN_ON_ONCE(!list_empty(&pn->new_channels));
}
static struct pernet_operations ppp_net_ops = {
@@ -1348,7 +1352,7 @@ static int ppp_dev_init(struct net_device *dev)
* that ppp_destroy_interface() won't run before the device gets
* unregistered.
*/
- atomic_inc(&ppp->file.refcnt);
+ refcount_inc(&ppp->file.refcnt);
return 0;
}
@@ -1377,7 +1381,7 @@ static void ppp_dev_priv_destructor(struct net_device *dev)
struct ppp *ppp;
ppp = netdev_priv(dev);
- if (atomic_dec_and_test(&ppp->file.refcnt))
+ if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
}
@@ -2676,7 +2680,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
- if (atomic_dec_and_test(&pch->file.refcnt))
+ if (refcount_dec_and_test(&pch->file.refcnt))
ppp_destroy_channel(pch);
}
@@ -3046,7 +3050,7 @@ init_ppp_file(struct ppp_file *pf, int kind)
pf->kind = kind;
skb_queue_head_init(&pf->xq);
skb_queue_head_init(&pf->rq);
- atomic_set(&pf->refcnt, 1);
+ refcount_set(&pf->refcnt, 1);
init_waitqueue_head(&pf->rwait);
}
@@ -3164,7 +3168,7 @@ ppp_connect_channel(struct channel *pch, int unit)
list_add_tail(&pch->clist, &ppp->channels);
++ppp->n_channels;
pch->ppp = ppp;
- atomic_inc(&ppp->file.refcnt);
+ refcount_inc(&ppp->file.refcnt);
ppp_unlock(ppp);
ret = 0;
@@ -3195,7 +3199,7 @@ ppp_disconnect_channel(struct channel *pch)
if (--ppp->n_channels == 0)
wake_up_interruptible(&ppp->file.rwait);
ppp_unlock(ppp);
- if (atomic_dec_and_test(&ppp->file.refcnt))
+ if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
err = 0;
}
diff --git a/drivers/net/ppp/ppp_mppe.h b/drivers/net/ppp/ppp_mppe.h
index 7a14e058c668..677b3b3f7425 100644
--- a/drivers/net/ppp/ppp_mppe.h
+++ b/drivers/net/ppp/ppp_mppe.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define MPPE_PAD 4 /* MPPE growth per frame */
#define MPPE_MAX_KEY_LEN 16 /* largest key length (128-bit) */
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 7868c29071d4..7196f00f0991 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -46,6 +46,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/refcount.h>
#include <asm/unaligned.h>
#include <linux/uaccess.h>
@@ -72,7 +73,7 @@ struct syncppp {
struct tasklet_struct tsk;
- atomic_t refcnt;
+ refcount_t refcnt;
struct completion dead_cmp;
struct ppp_channel chan; /* interface to generic ppp layer */
};
@@ -141,14 +142,14 @@ static struct syncppp *sp_get(struct tty_struct *tty)
read_lock(&disc_data_lock);
ap = tty->disc_data;
if (ap != NULL)
- atomic_inc(&ap->refcnt);
+ refcount_inc(&ap->refcnt);
read_unlock(&disc_data_lock);
return ap;
}
static void sp_put(struct syncppp *ap)
{
- if (atomic_dec_and_test(&ap->refcnt))
+ if (refcount_dec_and_test(&ap->refcnt))
complete(&ap->dead_cmp);
}
@@ -182,7 +183,7 @@ ppp_sync_open(struct tty_struct *tty)
skb_queue_head_init(&ap->rqueue);
tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
- atomic_set(&ap->refcnt, 1);
+ refcount_set(&ap->refcnt, 1);
init_completion(&ap->dead_cmp);
ap->chan.private = ap;
@@ -232,7 +233,7 @@ ppp_sync_close(struct tty_struct *tty)
* our channel ops (i.e. ppp_sync_send/ioctl) are in progress
* by the time it returns.
*/
- if (!atomic_dec_and_test(&ap->refcnt))
+ if (!refcount_dec_and_test(&ap->refcnt))
wait_for_completion(&ap->dead_cmp);
tasklet_kill(&ap->tsk);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 436dd78c396a..cc63102ca96e 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -106,8 +106,8 @@ static int slip_esc6(unsigned char *p, unsigned char *d, int len);
static void slip_unesc6(struct slip *sl, unsigned char c);
#endif
#ifdef CONFIG_SLIP_SMART
-static void sl_keepalive(unsigned long sls);
-static void sl_outfill(unsigned long sls);
+static void sl_keepalive(struct timer_list *t);
+static void sl_outfill(struct timer_list *t);
static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#endif
@@ -763,12 +763,8 @@ static struct slip *sl_alloc(dev_t line)
sl->mode = SL_MODE_DEFAULT;
#ifdef CONFIG_SLIP_SMART
/* initialize timer_list struct */
- init_timer(&sl->keepalive_timer);
- sl->keepalive_timer.data = (unsigned long)sl;
- sl->keepalive_timer.function = sl_keepalive;
- init_timer(&sl->outfill_timer);
- sl->outfill_timer.data = (unsigned long)sl;
- sl->outfill_timer.function = sl_outfill;
+ timer_setup(&sl->keepalive_timer, sl_keepalive, 0);
+ timer_setup(&sl->outfill_timer, sl_outfill, 0);
#endif
slip_devs[i] = dev;
return sl;
@@ -1392,9 +1388,9 @@ module_exit(slip_exit);
* added by Stanislav Voronyi. All changes before marked VSV
*/
-static void sl_outfill(unsigned long sls)
+static void sl_outfill(struct timer_list *t)
{
- struct slip *sl = (struct slip *)sls;
+ struct slip *sl = from_timer(sl, t, outfill_timer);
spin_lock(&sl->lock);
@@ -1423,9 +1419,9 @@ out:
spin_unlock(&sl->lock);
}
-static void sl_keepalive(unsigned long sls)
+static void sl_keepalive(struct timer_list *t)
{
- struct slip *sl = (struct slip *)sls;
+ struct slip *sl = from_timer(sl, t, keepalive_timer);
spin_lock(&sl->lock);
diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
index cf32aadf508f..c420e5948522 100644
--- a/drivers/net/slip/slip.h
+++ b/drivers/net/slip/slip.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* slip.h Define the SLIP device driver interface and constants.
*
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 1b10fcc6a58d..e9489b88407c 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap,
* and validate that the result isn't NULL - in case we are
* racing against queue removal.
*/
- int numvtaps = ACCESS_ONCE(tap->numvtaps);
+ int numvtaps = READ_ONCE(tap->numvtaps);
__u32 rxq;
if (!numvtaps)
@@ -1032,6 +1032,8 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
case TUNSETSNDBUF:
if (get_user(s, sp))
return -EFAULT;
+ if (s <= 0)
+ return -EINVAL;
q->sk.sk_sndbuf = s;
return 0;
@@ -1075,7 +1077,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN))
+ TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
rtnl_lock();
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index c57e85889751..f582d81a5091 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the network team driver
#
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ae53e899259f..a468439969df 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1112,7 +1112,7 @@ static int team_upper_dev_link(struct team *team, struct team_port *port)
lag_upper_info.tx_type = team->mode->lag_tx_type;
err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
- &lag_upper_info);
+ &lag_upper_info, NULL);
if (err)
return err;
port->dev->priv_flags |= IFF_TEAM_PORT;
@@ -1914,7 +1914,8 @@ static int team_netpoll_setup(struct net_device *dev,
}
#endif
-static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
+static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
+ struct netlink_ext_ack *extack)
{
struct team *team = netdev_priv(dev);
int err;
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 1468ddf424cc..a5ef97010eb3 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -137,7 +137,13 @@ static struct team_port *lb_htpm_select_tx_port(struct team *team,
struct sk_buff *skb,
unsigned char hash)
{
- return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
+ struct team_port *port;
+
+ port = rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
+ if (likely(port))
+ return port;
+ /* If no valid port in the table, fall back to simple hash */
+ return lb_hash_select_tx_port(team, lb_priv, skb, hash);
}
struct lb_select_tx_port {
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
new file mode 100644
index 000000000000..228d4aa6d9ae
--- /dev/null
+++ b/drivers/net/thunderbolt.c
@@ -0,0 +1,1363 @@
+/*
+ * Networking over Thunderbolt cable using Apple ThunderboltIP protocol
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Amir Levy <amir.jer.levy@intel.com>
+ * Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/sizes.h>
+#include <linux/thunderbolt.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+
+#include <net/ip6_checksum.h>
+
+/* Protocol timeouts in ms */
+#define TBNET_LOGIN_DELAY 4500
+#define TBNET_LOGIN_TIMEOUT 500
+#define TBNET_LOGOUT_TIMEOUT 100
+
+#define TBNET_RING_SIZE 256
+#define TBNET_LOCAL_PATH 0xf
+#define TBNET_LOGIN_RETRIES 60
+#define TBNET_LOGOUT_RETRIES 5
+#define TBNET_MATCH_FRAGS_ID BIT(1)
+#define TBNET_MAX_MTU SZ_64K
+#define TBNET_FRAME_SIZE SZ_4K
+#define TBNET_MAX_PAYLOAD_SIZE \
+ (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
+/* Rx packets need to hold space for skb_shared_info */
+#define TBNET_RX_MAX_SIZE \
+ (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE)
+#define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER)
+
+#define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
+
+/**
+ * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame
+ * @frame_size: size of the data with the frame
+ * @frame_index: running index on the frames
+ * @frame_id: ID of the frame to match frames to specific packet
+ * @frame_count: how many frames assembles a full packet
+ *
+ * Each data frame passed to the high-speed DMA ring has this header. If
+ * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is
+ * supported then @frame_id is filled, otherwise it stays %0.
+ */
+struct thunderbolt_ip_frame_header {
+ u32 frame_size;
+ u16 frame_index;
+ u16 frame_id;
+ u32 frame_count;
+};
+
+enum thunderbolt_ip_frame_pdf {
+ TBIP_PDF_FRAME_START = 1,
+ TBIP_PDF_FRAME_END,
+};
+
+enum thunderbolt_ip_type {
+ TBIP_LOGIN,
+ TBIP_LOGIN_RESPONSE,
+ TBIP_LOGOUT,
+ TBIP_STATUS,
+};
+
+struct thunderbolt_ip_header {
+ u32 route_hi;
+ u32 route_lo;
+ u32 length_sn;
+ uuid_t uuid;
+ uuid_t initiator_uuid;
+ uuid_t target_uuid;
+ u32 type;
+ u32 command_id;
+};
+
+#define TBIP_HDR_LENGTH_MASK GENMASK(5, 0)
+#define TBIP_HDR_SN_MASK GENMASK(28, 27)
+#define TBIP_HDR_SN_SHIFT 27
+
+struct thunderbolt_ip_login {
+ struct thunderbolt_ip_header hdr;
+ u32 proto_version;
+ u32 transmit_path;
+ u32 reserved[4];
+};
+
+#define TBIP_LOGIN_PROTO_VERSION 1
+
+struct thunderbolt_ip_login_response {
+ struct thunderbolt_ip_header hdr;
+ u32 status;
+ u32 receiver_mac[2];
+ u32 receiver_mac_len;
+ u32 reserved[4];
+};
+
+struct thunderbolt_ip_logout {
+ struct thunderbolt_ip_header hdr;
+};
+
+struct thunderbolt_ip_status {
+ struct thunderbolt_ip_header hdr;
+ u32 status;
+};
+
+struct tbnet_stats {
+ u64 tx_packets;
+ u64 rx_packets;
+ u64 tx_bytes;
+ u64 rx_bytes;
+ u64 rx_errors;
+ u64 tx_errors;
+ u64 rx_length_errors;
+ u64 rx_over_errors;
+ u64 rx_crc_errors;
+ u64 rx_missed_errors;
+};
+
+struct tbnet_frame {
+ struct net_device *dev;
+ struct page *page;
+ struct ring_frame frame;
+};
+
+struct tbnet_ring {
+ struct tbnet_frame frames[TBNET_RING_SIZE];
+ unsigned int cons;
+ unsigned int prod;
+ struct tb_ring *ring;
+};
+
+/**
+ * struct tbnet - ThunderboltIP network driver private data
+ * @svc: XDomain service the driver is bound to
+ * @xd: XDomain the service blongs to
+ * @handler: ThunderboltIP configuration protocol handler
+ * @dev: Networking device
+ * @napi: NAPI structure for Rx polling
+ * @stats: Network statistics
+ * @skb: Network packet that is currently processed on Rx path
+ * @command_id: ID used for next configuration protocol packet
+ * @login_sent: ThunderboltIP login message successfully sent
+ * @login_received: ThunderboltIP login message received from the remote
+ * host
+ * @transmit_path: HopID the other end needs to use building the
+ * opposite side path.
+ * @connection_lock: Lock serializing access to @login_sent,
+ * @login_received and @transmit_path.
+ * @login_retries: Number of login retries currently done
+ * @login_work: Worker to send ThunderboltIP login packets
+ * @connected_work: Worker that finalizes the ThunderboltIP connection
+ * setup and enables DMA paths for high speed data
+ * transfers
+ * @rx_hdr: Copy of the currently processed Rx frame. Used when a
+ * network packet consists of multiple Thunderbolt frames.
+ * In host byte order.
+ * @rx_ring: Software ring holding Rx frames
+ * @frame_id: Frame ID use for next Tx packet
+ * (if %TBNET_MATCH_FRAGS_ID is supported in both ends)
+ * @tx_ring: Software ring holding Tx frames
+ */
+struct tbnet {
+ const struct tb_service *svc;
+ struct tb_xdomain *xd;
+ struct tb_protocol_handler handler;
+ struct net_device *dev;
+ struct napi_struct napi;
+ struct tbnet_stats stats;
+ struct sk_buff *skb;
+ atomic_t command_id;
+ bool login_sent;
+ bool login_received;
+ u32 transmit_path;
+ struct mutex connection_lock;
+ int login_retries;
+ struct delayed_work login_work;
+ struct work_struct connected_work;
+ struct thunderbolt_ip_frame_header rx_hdr;
+ struct tbnet_ring rx_ring;
+ atomic_t frame_id;
+ struct tbnet_ring tx_ring;
+};
+
+/* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */
+static const uuid_t tbnet_dir_uuid =
+ UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
+ 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
+
+/* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */
+static const uuid_t tbnet_svc_uuid =
+ UUID_INIT(0x798f589e, 0x3616, 0x8a47,
+ 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
+
+static struct tb_property_dir *tbnet_dir;
+
+static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
+ u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
+ enum thunderbolt_ip_type type, size_t size, u32 command_id)
+{
+ u32 length_sn;
+
+ /* Length does not include route_hi/lo and length_sn fields */
+ length_sn = (size - 3 * 4) / 4;
+ length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
+
+ hdr->route_hi = upper_32_bits(route);
+ hdr->route_lo = lower_32_bits(route);
+ hdr->length_sn = length_sn;
+ uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
+ uuid_copy(&hdr->initiator_uuid, initiator_uuid);
+ uuid_copy(&hdr->target_uuid, target_uuid);
+ hdr->type = type;
+ hdr->command_id = command_id;
+}
+
+static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
+ u32 command_id)
+{
+ struct thunderbolt_ip_login_response reply;
+ struct tb_xdomain *xd = net->xd;
+
+ memset(&reply, 0, sizeof(reply));
+ tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
+ xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
+ command_id);
+ memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
+ reply.receiver_mac_len = ETH_ALEN;
+
+ return tb_xdomain_response(xd, &reply, sizeof(reply),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tbnet_login_request(struct tbnet *net, u8 sequence)
+{
+ struct thunderbolt_ip_login_response reply;
+ struct thunderbolt_ip_login request;
+ struct tb_xdomain *xd = net->xd;
+
+ memset(&request, 0, sizeof(request));
+ tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
+ xd->remote_uuid, TBIP_LOGIN, sizeof(request),
+ atomic_inc_return(&net->command_id));
+
+ request.proto_version = TBIP_LOGIN_PROTO_VERSION;
+ request.transmit_path = TBNET_LOCAL_PATH;
+
+ return tb_xdomain_request(xd, &request, sizeof(request),
+ TB_CFG_PKG_XDOMAIN_RESP, &reply,
+ sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
+ TBNET_LOGIN_TIMEOUT);
+}
+
+static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
+ u32 command_id)
+{
+ struct thunderbolt_ip_status reply;
+ struct tb_xdomain *xd = net->xd;
+
+ memset(&reply, 0, sizeof(reply));
+ tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
+ xd->remote_uuid, TBIP_STATUS, sizeof(reply),
+ atomic_inc_return(&net->command_id));
+ return tb_xdomain_response(xd, &reply, sizeof(reply),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tbnet_logout_request(struct tbnet *net)
+{
+ struct thunderbolt_ip_logout request;
+ struct thunderbolt_ip_status reply;
+ struct tb_xdomain *xd = net->xd;
+
+ memset(&request, 0, sizeof(request));
+ tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
+ xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
+ atomic_inc_return(&net->command_id));
+
+ return tb_xdomain_request(xd, &request, sizeof(request),
+ TB_CFG_PKG_XDOMAIN_RESP, &reply,
+ sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
+ TBNET_LOGOUT_TIMEOUT);
+}
+
+static void start_login(struct tbnet *net)
+{
+ mutex_lock(&net->connection_lock);
+ net->login_sent = false;
+ net->login_received = false;
+ mutex_unlock(&net->connection_lock);
+
+ queue_delayed_work(system_long_wq, &net->login_work,
+ msecs_to_jiffies(1000));
+}
+
+static void stop_login(struct tbnet *net)
+{
+ cancel_delayed_work_sync(&net->login_work);
+ cancel_work_sync(&net->connected_work);
+}
+
+static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
+{
+ return tf->frame.size ? : TBNET_FRAME_SIZE;
+}
+
+static void tbnet_free_buffers(struct tbnet_ring *ring)
+{
+ unsigned int i;
+
+ for (i = 0; i < TBNET_RING_SIZE; i++) {
+ struct device *dma_dev = tb_ring_dma_device(ring->ring);
+ struct tbnet_frame *tf = &ring->frames[i];
+ enum dma_data_direction dir;
+ unsigned int order;
+ size_t size;
+
+ if (!tf->page)
+ continue;
+
+ if (ring->ring->is_tx) {
+ dir = DMA_TO_DEVICE;
+ order = 0;
+ size = tbnet_frame_size(tf);
+ } else {
+ dir = DMA_FROM_DEVICE;
+ order = TBNET_RX_PAGE_ORDER;
+ size = TBNET_RX_PAGE_SIZE;
+ }
+
+ if (tf->frame.buffer_phy)
+ dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
+ dir);
+
+ __free_pages(tf->page, order);
+ tf->page = NULL;
+ }
+
+ ring->cons = 0;
+ ring->prod = 0;
+}
+
+static void tbnet_tear_down(struct tbnet *net, bool send_logout)
+{
+ netif_carrier_off(net->dev);
+ netif_stop_queue(net->dev);
+
+ stop_login(net);
+
+ mutex_lock(&net->connection_lock);
+
+ if (net->login_sent && net->login_received) {
+ int retries = TBNET_LOGOUT_RETRIES;
+
+ while (send_logout && retries-- > 0) {
+ int ret = tbnet_logout_request(net);
+ if (ret != -ETIMEDOUT)
+ break;
+ }
+
+ tb_ring_stop(net->rx_ring.ring);
+ tb_ring_stop(net->tx_ring.ring);
+ tbnet_free_buffers(&net->rx_ring);
+ tbnet_free_buffers(&net->tx_ring);
+
+ if (tb_xdomain_disable_paths(net->xd))
+ netdev_warn(net->dev, "failed to disable DMA paths\n");
+ }
+
+ net->login_retries = 0;
+ net->login_sent = false;
+ net->login_received = false;
+
+ mutex_unlock(&net->connection_lock);
+}
+
+static int tbnet_handle_packet(const void *buf, size_t size, void *data)
+{
+ const struct thunderbolt_ip_login *pkg = buf;
+ struct tbnet *net = data;
+ u32 command_id;
+ int ret = 0;
+ u32 sequence;
+ u64 route;
+
+ /* Make sure the packet is for us */
+ if (size < sizeof(struct thunderbolt_ip_header))
+ return 0;
+ if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
+ return 0;
+ if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
+ return 0;
+
+ route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
+ route &= ~BIT_ULL(63);
+ if (route != net->xd->route)
+ return 0;
+
+ sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
+ sequence >>= TBIP_HDR_SN_SHIFT;
+ command_id = pkg->hdr.command_id;
+
+ switch (pkg->hdr.type) {
+ case TBIP_LOGIN:
+ if (!netif_running(net->dev))
+ break;
+
+ ret = tbnet_login_response(net, route, sequence,
+ pkg->hdr.command_id);
+ if (!ret) {
+ mutex_lock(&net->connection_lock);
+ net->login_received = true;
+ net->transmit_path = pkg->transmit_path;
+
+ /* If we reached the number of max retries or
+ * previous logout, schedule another round of
+ * login retries
+ */
+ if (net->login_retries >= TBNET_LOGIN_RETRIES ||
+ !net->login_sent) {
+ net->login_retries = 0;
+ queue_delayed_work(system_long_wq,
+ &net->login_work, 0);
+ }
+ mutex_unlock(&net->connection_lock);
+
+ queue_work(system_long_wq, &net->connected_work);
+ }
+ break;
+
+ case TBIP_LOGOUT:
+ ret = tbnet_logout_response(net, route, sequence, command_id);
+ if (!ret)
+ tbnet_tear_down(net, false);
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (ret)
+ netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
+
+ return 1;
+}
+
+static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
+{
+ return ring->prod - ring->cons;
+}
+
+static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
+{
+ struct tbnet_ring *ring = &net->rx_ring;
+ int ret;
+
+ while (nbuffers--) {
+ struct device *dma_dev = tb_ring_dma_device(ring->ring);
+ unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
+ struct tbnet_frame *tf = &ring->frames[index];
+ dma_addr_t dma_addr;
+
+ if (tf->page)
+ break;
+
+ /* Allocate page (order > 0) so that it can hold maximum
+ * ThunderboltIP frame (4kB) and the additional room for
+ * SKB shared info required by build_skb().
+ */
+ tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
+ if (!tf->page) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ dma_addr = dma_map_page(dma_dev, tf->page, 0,
+ TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dma_addr)) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ tf->frame.buffer_phy = dma_addr;
+ tf->dev = net->dev;
+
+ tb_ring_rx(ring->ring, &tf->frame);
+
+ ring->prod++;
+ }
+
+ return 0;
+
+err_free:
+ tbnet_free_buffers(ring);
+ return ret;
+}
+
+static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
+{
+ struct tbnet_ring *ring = &net->tx_ring;
+ struct tbnet_frame *tf;
+ unsigned int index;
+
+ if (!tbnet_available_buffers(ring))
+ return NULL;
+
+ index = ring->cons++ & (TBNET_RING_SIZE - 1);
+
+ tf = &ring->frames[index];
+ tf->frame.size = 0;
+ tf->frame.buffer_phy = 0;
+
+ return tf;
+}
+
+static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
+ bool canceled)
+{
+ struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
+ struct device *dma_dev = tb_ring_dma_device(ring);
+ struct tbnet *net = netdev_priv(tf->dev);
+
+ dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
+ DMA_TO_DEVICE);
+ tf->frame.buffer_phy = 0;
+
+ /* Return buffer to the ring */
+ net->tx_ring.prod++;
+
+ if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
+ netif_wake_queue(net->dev);
+}
+
+static int tbnet_alloc_tx_buffers(struct tbnet *net)
+{
+ struct tbnet_ring *ring = &net->tx_ring;
+ unsigned int i;
+
+ for (i = 0; i < TBNET_RING_SIZE; i++) {
+ struct tbnet_frame *tf = &ring->frames[i];
+
+ tf->page = alloc_page(GFP_KERNEL);
+ if (!tf->page) {
+ tbnet_free_buffers(ring);
+ return -ENOMEM;
+ }
+
+ tf->dev = net->dev;
+ tf->frame.callback = tbnet_tx_callback;
+ tf->frame.sof = TBIP_PDF_FRAME_START;
+ tf->frame.eof = TBIP_PDF_FRAME_END;
+ }
+
+ ring->cons = 0;
+ ring->prod = TBNET_RING_SIZE - 1;
+
+ return 0;
+}
+
+static void tbnet_connected_work(struct work_struct *work)
+{
+ struct tbnet *net = container_of(work, typeof(*net), connected_work);
+ bool connected;
+ int ret;
+
+ if (netif_carrier_ok(net->dev))
+ return;
+
+ mutex_lock(&net->connection_lock);
+ connected = net->login_sent && net->login_received;
+ mutex_unlock(&net->connection_lock);
+
+ if (!connected)
+ return;
+
+ /* Both logins successful so enable the high-speed DMA paths and
+ * start the network device queue.
+ */
+ ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH,
+ net->rx_ring.ring->hop,
+ net->transmit_path,
+ net->tx_ring.ring->hop);
+ if (ret) {
+ netdev_err(net->dev, "failed to enable DMA paths\n");
+ return;
+ }
+
+ tb_ring_start(net->tx_ring.ring);
+ tb_ring_start(net->rx_ring.ring);
+
+ ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
+ if (ret)
+ goto err_stop_rings;
+
+ ret = tbnet_alloc_tx_buffers(net);
+ if (ret)
+ goto err_free_rx_buffers;
+
+ netif_carrier_on(net->dev);
+ netif_start_queue(net->dev);
+ return;
+
+err_free_rx_buffers:
+ tbnet_free_buffers(&net->rx_ring);
+err_stop_rings:
+ tb_ring_stop(net->rx_ring.ring);
+ tb_ring_stop(net->tx_ring.ring);
+}
+
+static void tbnet_login_work(struct work_struct *work)
+{
+ struct tbnet *net = container_of(work, typeof(*net), login_work.work);
+ unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
+ int ret;
+
+ if (netif_carrier_ok(net->dev))
+ return;
+
+ ret = tbnet_login_request(net, net->login_retries % 4);
+ if (ret) {
+ if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
+ queue_delayed_work(system_long_wq, &net->login_work,
+ delay);
+ } else {
+ netdev_info(net->dev, "ThunderboltIP login timed out\n");
+ }
+ } else {
+ net->login_retries = 0;
+
+ mutex_lock(&net->connection_lock);
+ net->login_sent = true;
+ mutex_unlock(&net->connection_lock);
+
+ queue_work(system_long_wq, &net->connected_work);
+ }
+}
+
+static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
+ const struct thunderbolt_ip_frame_header *hdr)
+{
+ u32 frame_id, frame_count, frame_size, frame_index;
+ unsigned int size;
+
+ if (tf->frame.flags & RING_DESC_CRC_ERROR) {
+ net->stats.rx_crc_errors++;
+ return false;
+ } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
+ net->stats.rx_over_errors++;
+ return false;
+ }
+
+ /* Should be greater than just header i.e. contains data */
+ size = tbnet_frame_size(tf);
+ if (size <= sizeof(*hdr)) {
+ net->stats.rx_length_errors++;
+ return false;
+ }
+
+ frame_count = le32_to_cpu(hdr->frame_count);
+ frame_size = le32_to_cpu(hdr->frame_size);
+ frame_index = le16_to_cpu(hdr->frame_index);
+ frame_id = le16_to_cpu(hdr->frame_id);
+
+ if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
+ net->stats.rx_length_errors++;
+ return false;
+ }
+
+ /* In case we're in the middle of packet, validate the frame
+ * header based on first fragment of the packet.
+ */
+ if (net->skb && net->rx_hdr.frame_count) {
+ /* Check the frame count fits the count field */
+ if (frame_count != net->rx_hdr.frame_count) {
+ net->stats.rx_length_errors++;
+ return false;
+ }
+
+ /* Check the frame identifiers are incremented correctly,
+ * and id is matching.
+ */
+ if (frame_index != net->rx_hdr.frame_index + 1 ||
+ frame_id != net->rx_hdr.frame_id) {
+ net->stats.rx_missed_errors++;
+ return false;
+ }
+
+ if (net->skb->len + frame_size > TBNET_MAX_MTU) {
+ net->stats.rx_length_errors++;
+ return false;
+ }
+
+ return true;
+ }
+
+ /* Start of packet, validate the frame header */
+ if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
+ net->stats.rx_length_errors++;
+ return false;
+ }
+ if (frame_index != 0) {
+ net->stats.rx_missed_errors++;
+ return false;
+ }
+
+ return true;
+}
+
+static int tbnet_poll(struct napi_struct *napi, int budget)
+{
+ struct tbnet *net = container_of(napi, struct tbnet, napi);
+ unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
+ struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
+ unsigned int rx_packets = 0;
+
+ while (rx_packets < budget) {
+ const struct thunderbolt_ip_frame_header *hdr;
+ unsigned int hdr_size = sizeof(*hdr);
+ struct sk_buff *skb = NULL;
+ struct ring_frame *frame;
+ struct tbnet_frame *tf;
+ struct page *page;
+ bool last = true;
+ u32 frame_size;
+
+ /* Return some buffers to hardware, one at a time is too
+ * slow so allocate MAX_SKB_FRAGS buffers at the same
+ * time.
+ */
+ if (cleaned_count >= MAX_SKB_FRAGS) {
+ tbnet_alloc_rx_buffers(net, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ frame = tb_ring_poll(net->rx_ring.ring);
+ if (!frame)
+ break;
+
+ dma_unmap_page(dma_dev, frame->buffer_phy,
+ TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ tf = container_of(frame, typeof(*tf), frame);
+
+ page = tf->page;
+ tf->page = NULL;
+ net->rx_ring.cons++;
+ cleaned_count++;
+
+ hdr = page_address(page);
+ if (!tbnet_check_frame(net, tf, hdr)) {
+ __free_pages(page, TBNET_RX_PAGE_ORDER);
+ dev_kfree_skb_any(net->skb);
+ net->skb = NULL;
+ continue;
+ }
+
+ frame_size = le32_to_cpu(hdr->frame_size);
+
+ skb = net->skb;
+ if (!skb) {
+ skb = build_skb(page_address(page),
+ TBNET_RX_PAGE_SIZE);
+ if (!skb) {
+ __free_pages(page, TBNET_RX_PAGE_ORDER);
+ net->stats.rx_errors++;
+ break;
+ }
+
+ skb_reserve(skb, hdr_size);
+ skb_put(skb, frame_size);
+
+ net->skb = skb;
+ } else {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, hdr_size, frame_size,
+ TBNET_RX_PAGE_SIZE - hdr_size);
+ }
+
+ net->rx_hdr.frame_size = frame_size;
+ net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count);
+ net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index);
+ net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id);
+ last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1;
+
+ rx_packets++;
+ net->stats.rx_bytes += frame_size;
+
+ if (last) {
+ skb->protocol = eth_type_trans(skb, net->dev);
+ napi_gro_receive(&net->napi, skb);
+ net->skb = NULL;
+ }
+ }
+
+ net->stats.rx_packets += rx_packets;
+
+ if (cleaned_count)
+ tbnet_alloc_rx_buffers(net, cleaned_count);
+
+ if (rx_packets >= budget)
+ return budget;
+
+ napi_complete_done(napi, rx_packets);
+ /* Re-enable the ring interrupt */
+ tb_ring_poll_complete(net->rx_ring.ring);
+
+ return rx_packets;
+}
+
+static void tbnet_start_poll(void *data)
+{
+ struct tbnet *net = data;
+
+ napi_schedule(&net->napi);
+}
+
+static int tbnet_open(struct net_device *dev)
+{
+ struct tbnet *net = netdev_priv(dev);
+ struct tb_xdomain *xd = net->xd;
+ u16 sof_mask, eof_mask;
+ struct tb_ring *ring;
+
+ netif_carrier_off(dev);
+
+ ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
+ RING_FLAG_FRAME);
+ if (!ring) {
+ netdev_err(dev, "failed to allocate Tx ring\n");
+ return -ENOMEM;
+ }
+ net->tx_ring.ring = ring;
+
+ sof_mask = BIT(TBIP_PDF_FRAME_START);
+ eof_mask = BIT(TBIP_PDF_FRAME_END);
+
+ ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
+ RING_FLAG_FRAME | RING_FLAG_E2E, sof_mask,
+ eof_mask, tbnet_start_poll, net);
+ if (!ring) {
+ netdev_err(dev, "failed to allocate Rx ring\n");
+ tb_ring_free(net->tx_ring.ring);
+ net->tx_ring.ring = NULL;
+ return -ENOMEM;
+ }
+ net->rx_ring.ring = ring;
+
+ napi_enable(&net->napi);
+ start_login(net);
+
+ return 0;
+}
+
+static int tbnet_stop(struct net_device *dev)
+{
+ struct tbnet *net = netdev_priv(dev);
+
+ napi_disable(&net->napi);
+
+ tbnet_tear_down(net, true);
+
+ tb_ring_free(net->rx_ring.ring);
+ net->rx_ring.ring = NULL;
+ tb_ring_free(net->tx_ring.ring);
+ net->tx_ring.ring = NULL;
+
+ return 0;
+}
+
+static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma_addr))
+ return false;
+
+ tf->frame.buffer_phy = dma_addr;
+ return true;
+}
+
+static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
+ struct tbnet_frame **frames, u32 frame_count)
+{
+ struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
+ struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
+ __wsum wsum = htonl(skb->len - skb_transport_offset(skb));
+ unsigned int i, len, offset = skb_transport_offset(skb);
+ __be16 protocol = skb->protocol;
+ void *data = skb->data;
+ void *dest = hdr + 1;
+ __sum16 *tucso;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ /* No need to calculate checksum so we just update the
+ * total frame count and map the frames for DMA.
+ */
+ for (i = 0; i < frame_count; i++) {
+ hdr = page_address(frames[i]->page);
+ hdr->frame_count = cpu_to_le32(frame_count);
+ if (!tbnet_xmit_map(dma_dev, frames[i]))
+ goto err_unmap;
+ }
+
+ return true;
+ }
+
+ if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, vh;
+
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
+ if (!vhdr)
+ return false;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ }
+
+ /* Data points on the beginning of packet.
+ * Check is the checksum absolute place in the packet.
+ * ipcso will update IP checksum.
+ * tucso will update TCP/UPD checksum.
+ */
+ if (protocol == htons(ETH_P_IP)) {
+ __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
+
+ *ipcso = 0;
+ *ipcso = ip_fast_csum(dest + skb_network_offset(skb),
+ ip_hdr(skb)->ihl);
+
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
+ else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
+ else
+ return false;
+
+ *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, 0,
+ ip_hdr(skb)->protocol, 0);
+ } else if (skb_is_gso_v6(skb)) {
+ tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
+ *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, 0,
+ IPPROTO_TCP, 0);
+ return false;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
+ *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr, 0,
+ ipv6_hdr(skb)->nexthdr, 0);
+ } else {
+ return false;
+ }
+
+ /* First frame was headers, rest of the frames contain data.
+ * Calculate checksum over each frame.
+ */
+ for (i = 0; i < frame_count; i++) {
+ hdr = page_address(frames[i]->page);
+ dest = (void *)(hdr + 1) + offset;
+ len = le32_to_cpu(hdr->frame_size) - offset;
+ wsum = csum_partial(dest, len, wsum);
+ hdr->frame_count = cpu_to_le32(frame_count);
+
+ offset = 0;
+ }
+
+ *tucso = csum_fold(wsum);
+
+ /* Checksum is finally calculated and we don't touch the memory
+ * anymore, so DMA map the frames now.
+ */
+ for (i = 0; i < frame_count; i++) {
+ if (!tbnet_xmit_map(dma_dev, frames[i]))
+ goto err_unmap;
+ }
+
+ return true;
+
+err_unmap:
+ while (i--)
+ dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
+ tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
+
+ return false;
+}
+
+static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
+ unsigned int *len)
+{
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
+
+ *len = skb_frag_size(frag);
+ return kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+}
+
+static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct tbnet *net = netdev_priv(dev);
+ struct tbnet_frame *frames[MAX_SKB_FRAGS];
+ u16 frame_id = atomic_read(&net->frame_id);
+ struct thunderbolt_ip_frame_header *hdr;
+ unsigned int len = skb_headlen(skb);
+ unsigned int data_len = skb->len;
+ unsigned int nframes, i;
+ unsigned int frag = 0;
+ void *src = skb->data;
+ u32 frame_index = 0;
+ bool unmap = false;
+ void *dest;
+
+ nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
+ if (tbnet_available_buffers(&net->tx_ring) < nframes) {
+ netif_stop_queue(net->dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ frames[frame_index] = tbnet_get_tx_buffer(net);
+ if (!frames[frame_index])
+ goto err_drop;
+
+ hdr = page_address(frames[frame_index]->page);
+ dest = hdr + 1;
+
+ /* If overall packet is bigger than the frame data size */
+ while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
+ unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
+
+ hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
+ hdr->frame_index = cpu_to_le16(frame_index);
+ hdr->frame_id = cpu_to_le16(frame_id);
+
+ do {
+ if (len > size_left) {
+ /* Copy data onto Tx buffer data with
+ * full frame size then break and go to
+ * next frame
+ */
+ memcpy(dest, src, size_left);
+ len -= size_left;
+ dest += size_left;
+ src += size_left;
+ break;
+ }
+
+ memcpy(dest, src, len);
+ size_left -= len;
+ dest += len;
+
+ if (unmap) {
+ kunmap_atomic(src);
+ unmap = false;
+ }
+
+ /* Ensure all fragments have been processed */
+ if (frag < skb_shinfo(skb)->nr_frags) {
+ /* Map and then unmap quickly */
+ src = tbnet_kmap_frag(skb, frag++, &len);
+ unmap = true;
+ } else if (unlikely(size_left > 0)) {
+ goto err_drop;
+ }
+ } while (size_left > 0);
+
+ data_len -= TBNET_MAX_PAYLOAD_SIZE;
+ frame_index++;
+
+ frames[frame_index] = tbnet_get_tx_buffer(net);
+ if (!frames[frame_index])
+ goto err_drop;
+
+ hdr = page_address(frames[frame_index]->page);
+ dest = hdr + 1;
+ }
+
+ hdr->frame_size = cpu_to_le32(data_len);
+ hdr->frame_index = cpu_to_le16(frame_index);
+ hdr->frame_id = cpu_to_le16(frame_id);
+
+ frames[frame_index]->frame.size = data_len + sizeof(*hdr);
+
+ /* In case the remaining data_len is smaller than a frame */
+ while (len < data_len) {
+ memcpy(dest, src, len);
+ data_len -= len;
+ dest += len;
+
+ if (unmap) {
+ kunmap_atomic(src);
+ unmap = false;
+ }
+
+ if (frag < skb_shinfo(skb)->nr_frags) {
+ src = tbnet_kmap_frag(skb, frag++, &len);
+ unmap = true;
+ } else if (unlikely(data_len > 0)) {
+ goto err_drop;
+ }
+ }
+
+ memcpy(dest, src, data_len);
+
+ if (unmap)
+ kunmap_atomic(src);
+
+ if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
+ goto err_drop;
+
+ for (i = 0; i < frame_index + 1; i++)
+ tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
+
+ if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
+ atomic_inc(&net->frame_id);
+
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += skb->len;
+
+ dev_consume_skb_any(skb);
+
+ return NETDEV_TX_OK;
+
+err_drop:
+ /* We can re-use the buffers */
+ net->tx_ring.cons -= frame_index;
+
+ dev_kfree_skb_any(skb);
+ net->stats.tx_errors++;
+
+ return NETDEV_TX_OK;
+}
+
+static void tbnet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tbnet *net = netdev_priv(dev);
+
+ stats->tx_packets = net->stats.tx_packets;
+ stats->rx_packets = net->stats.rx_packets;
+ stats->tx_bytes = net->stats.tx_bytes;
+ stats->rx_bytes = net->stats.rx_bytes;
+ stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
+ net->stats.rx_over_errors + net->stats.rx_crc_errors +
+ net->stats.rx_missed_errors;
+ stats->tx_errors = net->stats.tx_errors;
+ stats->rx_length_errors = net->stats.rx_length_errors;
+ stats->rx_over_errors = net->stats.rx_over_errors;
+ stats->rx_crc_errors = net->stats.rx_crc_errors;
+ stats->rx_missed_errors = net->stats.rx_missed_errors;
+}
+
+static const struct net_device_ops tbnet_netdev_ops = {
+ .ndo_open = tbnet_open,
+ .ndo_stop = tbnet_stop,
+ .ndo_start_xmit = tbnet_start_xmit,
+ .ndo_get_stats64 = tbnet_get_stats64,
+};
+
+static void tbnet_generate_mac(struct net_device *dev)
+{
+ const struct tbnet *net = netdev_priv(dev);
+ const struct tb_xdomain *xd = net->xd;
+ u8 phy_port;
+ u32 hash;
+
+ phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
+
+ /* Unicast and locally administered MAC */
+ dev->dev_addr[0] = phy_port << 4 | 0x02;
+ hash = jhash2((u32 *)xd->local_uuid, 4, 0);
+ memcpy(dev->dev_addr + 1, &hash, sizeof(hash));
+ hash = jhash2((u32 *)xd->local_uuid, 4, hash);
+ dev->dev_addr[5] = hash & 0xff;
+}
+
+static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
+{
+ struct tb_xdomain *xd = tb_service_parent(svc);
+ struct net_device *dev;
+ struct tbnet *net;
+ int ret;
+
+ dev = alloc_etherdev(sizeof(*net));
+ if (!dev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, &svc->dev);
+
+ net = netdev_priv(dev);
+ INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
+ INIT_WORK(&net->connected_work, tbnet_connected_work);
+ mutex_init(&net->connection_lock);
+ atomic_set(&net->command_id, 0);
+ atomic_set(&net->frame_id, 0);
+ net->svc = svc;
+ net->dev = dev;
+ net->xd = xd;
+
+ tbnet_generate_mac(dev);
+
+ strcpy(dev->name, "thunderbolt%d");
+ dev->netdev_ops = &tbnet_netdev_ops;
+
+ /* ThunderboltIP takes advantage of TSO packets but instead of
+ * segmenting them we just split the packet into Thunderbolt
+ * frames (maximum payload size of each frame is 4084 bytes) and
+ * calculate checksum over the whole packet here.
+ *
+ * The receiving side does the opposite if the host OS supports
+ * LRO, otherwise it needs to split the large packet into MTU
+ * sized smaller packets.
+ *
+ * In order to receive large packets from the networking stack,
+ * we need to announce support for most of the offloading
+ * features here.
+ */
+ dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->features = dev->hw_features | NETIF_F_HIGHDMA;
+ dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
+
+ netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
+
+ /* MTU range: 68 - 65522 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
+
+ net->handler.uuid = &tbnet_svc_uuid;
+ net->handler.callback = tbnet_handle_packet,
+ net->handler.data = net;
+ tb_register_protocol_handler(&net->handler);
+
+ tb_service_set_drvdata(svc, net);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ tb_unregister_protocol_handler(&net->handler);
+ free_netdev(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tbnet_remove(struct tb_service *svc)
+{
+ struct tbnet *net = tb_service_get_drvdata(svc);
+
+ unregister_netdev(net->dev);
+ tb_unregister_protocol_handler(&net->handler);
+ free_netdev(net->dev);
+}
+
+static void tbnet_shutdown(struct tb_service *svc)
+{
+ tbnet_tear_down(tb_service_get_drvdata(svc), true);
+}
+
+static int __maybe_unused tbnet_suspend(struct device *dev)
+{
+ struct tb_service *svc = tb_to_service(dev);
+ struct tbnet *net = tb_service_get_drvdata(svc);
+
+ stop_login(net);
+ if (netif_running(net->dev)) {
+ netif_device_detach(net->dev);
+ tb_ring_stop(net->rx_ring.ring);
+ tb_ring_stop(net->tx_ring.ring);
+ tbnet_free_buffers(&net->rx_ring);
+ tbnet_free_buffers(&net->tx_ring);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tbnet_resume(struct device *dev)
+{
+ struct tb_service *svc = tb_to_service(dev);
+ struct tbnet *net = tb_service_get_drvdata(svc);
+
+ netif_carrier_off(net->dev);
+ if (netif_running(net->dev)) {
+ netif_device_attach(net->dev);
+ start_login(net);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tbnet_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume)
+};
+
+static const struct tb_service_id tbnet_ids[] = {
+ { TB_SERVICE("network", 1) },
+ { },
+};
+MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
+
+static struct tb_service_driver tbnet_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "thunderbolt-net",
+ .pm = &tbnet_pm_ops,
+ },
+ .probe = tbnet_probe,
+ .remove = tbnet_remove,
+ .shutdown = tbnet_shutdown,
+ .id_table = tbnet_ids,
+};
+
+static int __init tbnet_init(void)
+{
+ int ret;
+
+ tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
+ if (!tbnet_dir)
+ return -ENOMEM;
+
+ tb_property_add_immediate(tbnet_dir, "prtcid", 1);
+ tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
+ tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
+ tb_property_add_immediate(tbnet_dir, "prtcstns",
+ TBNET_MATCH_FRAGS_ID);
+
+ ret = tb_register_property_dir("network", tbnet_dir);
+ if (ret) {
+ tb_property_free_dir(tbnet_dir);
+ return ret;
+ }
+
+ return tb_register_service_driver(&tbnet_driver);
+}
+module_init(tbnet_init);
+
+static void __exit tbnet_exit(void)
+{
+ tb_unregister_service_driver(&tbnet_driver);
+ tb_unregister_property_dir("network", tbnet_dir);
+ tb_property_free_dir(tbnet_dir);
+}
+module_exit(tbnet_exit);
+
+MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
+MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Thunderbolt network driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5550f56cb895..95749006d687 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -75,6 +75,7 @@
#include <linux/skb_array.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <linux/mutex.h>
#include <linux/uaccess.h>
@@ -121,7 +122,8 @@ do { \
#define TUN_VNET_BE 0x40000000
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
- IFF_MULTI_QUEUE)
+ IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
+
#define GOODCOPY_LEN 128
#define FLT_EXACT_COUNT 8
@@ -172,6 +174,9 @@ struct tun_file {
u16 queue_index;
unsigned int ifindex;
};
+ struct napi_struct napi;
+ bool napi_enabled;
+ struct mutex napi_mutex; /* Protects access to the above napi */
struct list_head next;
struct tun_struct *detached;
struct skb_array tx_array;
@@ -229,6 +234,75 @@ struct tun_struct {
struct bpf_prog __rcu *xdp_prog;
};
+static int tun_napi_receive(struct napi_struct *napi, int budget)
+{
+ struct tun_file *tfile = container_of(napi, struct tun_file, napi);
+ struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
+ struct sk_buff_head process_queue;
+ struct sk_buff *skb;
+ int received = 0;
+
+ __skb_queue_head_init(&process_queue);
+
+ spin_lock(&queue->lock);
+ skb_queue_splice_tail_init(queue, &process_queue);
+ spin_unlock(&queue->lock);
+
+ while (received < budget && (skb = __skb_dequeue(&process_queue))) {
+ napi_gro_receive(napi, skb);
+ ++received;
+ }
+
+ if (!skb_queue_empty(&process_queue)) {
+ spin_lock(&queue->lock);
+ skb_queue_splice(&process_queue, queue);
+ spin_unlock(&queue->lock);
+ }
+
+ return received;
+}
+
+static int tun_napi_poll(struct napi_struct *napi, int budget)
+{
+ unsigned int received;
+
+ received = tun_napi_receive(napi, budget);
+
+ if (received < budget)
+ napi_complete_done(napi, received);
+
+ return received;
+}
+
+static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
+ bool napi_en)
+{
+ tfile->napi_enabled = napi_en;
+ if (napi_en) {
+ netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&tfile->napi);
+ mutex_init(&tfile->napi_mutex);
+ }
+}
+
+static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
+{
+ if (tfile->napi_enabled)
+ napi_disable(&tfile->napi);
+}
+
+static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
+{
+ if (tfile->napi_enabled)
+ netif_napi_del(&tfile->napi);
+}
+
+static bool tun_napi_frags_enabled(const struct tun_struct *tun)
+{
+ return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
+}
+
#ifdef CONFIG_TUN_VNET_CROSS_LE
static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
{
@@ -370,9 +444,9 @@ static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
spin_unlock_bh(&tun->lock);
}
-static void tun_flow_cleanup(unsigned long data)
+static void tun_flow_cleanup(struct timer_list *t)
{
- struct tun_struct *tun = (struct tun_struct *)data;
+ struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
unsigned long delay = tun->ageing_time;
unsigned long next_timer = jiffies + delay;
unsigned long count = 0;
@@ -380,25 +454,28 @@ static void tun_flow_cleanup(unsigned long data)
tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
- spin_lock_bh(&tun->lock);
+ spin_lock(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
struct hlist_node *n;
hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
unsigned long this_timer;
- count++;
+
this_timer = e->updated + delay;
- if (time_before_eq(this_timer, jiffies))
+ if (time_before_eq(this_timer, jiffies)) {
tun_flow_delete(tun, e);
- else if (time_before(this_timer, next_timer))
+ continue;
+ }
+ count++;
+ if (time_before(this_timer, next_timer))
next_timer = this_timer;
}
}
if (count)
mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
- spin_unlock_bh(&tun->lock);
+ spin_unlock(&tun->lock);
}
static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
@@ -469,7 +546,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
u32 numqueues = 0;
rcu_read_lock();
- numqueues = ACCESS_ONCE(tun->numqueues);
+ numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
if (txq) {
@@ -541,6 +618,11 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
+ if (tun && clean) {
+ tun_napi_disable(tun, tfile);
+ tun_napi_del(tun, tfile);
+ }
+
if (tun && !tfile->detached) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
@@ -598,6 +680,7 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
+ tun_napi_disable(tun, tfile);
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
@@ -613,6 +696,7 @@ static void tun_detach_all(struct net_device *dev)
synchronize_net();
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
+ tun_napi_del(tun, tfile);
/* Drop read queue */
tun_queue_purge(tfile);
sock_put(&tfile->sk);
@@ -631,7 +715,8 @@ static void tun_detach_all(struct net_device *dev)
module_put(THIS_MODULE);
}
-static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
+static int tun_attach(struct tun_struct *tun, struct file *file,
+ bool skip_filter, bool napi)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
@@ -677,10 +762,12 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
- if (tfile->detached)
+ if (tfile->detached) {
tun_enable_queue(tfile);
- else
+ } else {
sock_hold(&tfile->sk);
+ tun_napi_init(tun, tfile, napi);
+ }
tun_set_real_num_queues(tun);
@@ -692,7 +779,7 @@ out:
return err;
}
-static struct tun_struct *__tun_get(struct tun_file *tfile)
+static struct tun_struct *tun_get(struct tun_file *tfile)
{
struct tun_struct *tun;
@@ -705,11 +792,6 @@ static struct tun_struct *__tun_get(struct tun_file *tfile)
return tun;
}
-static struct tun_struct *tun_get(struct file *file)
-{
- return __tun_get(file->private_data);
-}
-
static void tun_put(struct tun_struct *tun)
{
dev_put(tun->dev);
@@ -864,7 +946,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
tfile = rcu_dereference(tun->tfiles[txq]);
- numqueues = ACCESS_ONCE(tun->numqueues);
+ numqueues = READ_ONCE(tun->numqueues);
/* Drop packet if interface is not attached */
if (txq >= numqueues)
@@ -956,13 +1038,33 @@ static void tun_poll_controller(struct net_device *dev)
* Tun only receives frames when:
* 1) the char device endpoint gets data from user space
* 2) the tun socket gets a sendmsg call from user space
- * Since both of those are synchronous operations, we are guaranteed
- * never to have pending data when we poll for it
- * so there is nothing to do here but return.
+ * If NAPI is not enabled, since both of those are synchronous
+ * operations, we are guaranteed never to have pending data when we poll
+ * for it so there is nothing to do here but return.
* We need this though so netpoll recognizes us as an interface that
* supports polling, which enables bridge devices in virt setups to
* still use netconsole
+ * If NAPI is enabled, however, we need to schedule polling for all
+ * queues unless we are using napi_gro_frags(), which we call in
+ * process context and not in NAPI context.
*/
+ struct tun_struct *tun = netdev_priv(dev);
+
+ if (tun->flags & IFF_NAPI) {
+ struct tun_file *tfile;
+ int i;
+
+ if (tun_napi_frags_enabled(tun))
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < tun->numqueues; i++) {
+ tfile = rcu_dereference(tun->tfiles[i]);
+ if (tfile->napi_enabled)
+ napi_schedule(&tfile->napi);
+ }
+ rcu_read_unlock();
+ }
return;
}
#endif
@@ -1039,7 +1141,7 @@ static u32 tun_xdp_query(struct net_device *dev)
return 0;
}
-static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -1083,7 +1185,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
- .ndo_xdp = tun_xdp,
+ .ndo_bpf = tun_xdp,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1094,7 +1196,7 @@ static void tun_flow_init(struct tun_struct *tun)
INIT_HLIST_HEAD(&tun->flows[i]);
tun->ageing_time = TUN_FLOW_EXPIRE;
- setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+ timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
mod_timer(&tun->flow_gc_timer,
round_jiffies_up(jiffies + tun->ageing_time));
}
@@ -1149,7 +1251,7 @@ static void tun_net_init(struct net_device *dev)
static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
{
struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
+ struct tun_struct *tun = tun_get(tfile);
struct sock *sk;
unsigned int mask = 0;
@@ -1178,6 +1280,64 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
return mask;
}
+static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
+ size_t len,
+ const struct iov_iter *it)
+{
+ struct sk_buff *skb;
+ size_t linear;
+ int err;
+ int i;
+
+ if (it->nr_segs > MAX_SKB_FRAGS + 1)
+ return ERR_PTR(-ENOMEM);
+
+ local_bh_disable();
+ skb = napi_get_frags(&tfile->napi);
+ local_bh_enable();
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ linear = iov_iter_single_seg_count(it);
+ err = __skb_grow(skb, linear);
+ if (err)
+ goto free;
+
+ skb->len = len;
+ skb->data_len = len - linear;
+ skb->truesize += skb->data_len;
+
+ for (i = 1; i < it->nr_segs; i++) {
+ size_t fragsz = it->iov[i].iov_len;
+ unsigned long offset;
+ struct page *page;
+ void *data;
+
+ if (fragsz == 0 || fragsz > PAGE_SIZE) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ local_bh_disable();
+ data = napi_alloc_frag(fragsz);
+ local_bh_enable();
+ if (!data) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ page = virt_to_head_page(data);
+ offset = data - page_address(page);
+ skb_fill_page_desc(skb, i - 1, page, offset, fragsz);
+ }
+
+ return skb;
+free:
+ /* frees skb and all frags allocated with napi_alloc_frag() */
+ napi_free_frags(&tfile->napi);
+ return ERR_PTR(err);
+}
+
/* prepad is the amount to reserve at front. len is length after that.
* linear is a hint as to how much to copy (usually headers). */
static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
@@ -1315,6 +1475,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
xdp.data_hard_start = buf;
xdp.data = buf + pad;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -1326,6 +1487,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
if (err)
goto err_redirect;
+ rcu_read_unlock();
return NULL;
case XDP_TX:
xdp_xmit = true;
@@ -1358,7 +1520,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (xdp_xmit) {
skb->dev = tun->dev;
generic_xdp_tx(skb, xdp_prog);
- rcu_read_lock();
+ rcu_read_unlock();
return NULL;
}
@@ -1391,6 +1553,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int err;
u32 rxhash;
int skb_xdp = 1;
+ bool frags = tun_napi_frags_enabled(tun);
if (!(tun->dev->flags & IFF_UP))
return -EIO;
@@ -1448,7 +1611,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
zerocopy = true;
}
- if (tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
+ if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
/* For the packet that is not easy to be processed
* (e.g gso or jumbo packet), we will do it at after
* skb was created with generic XDP routine.
@@ -1469,10 +1632,24 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
linear = tun16_to_cpu(tun, gso.hdr_len);
}
- skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
+ if (frags) {
+ mutex_lock(&tfile->napi_mutex);
+ skb = tun_napi_alloc_frags(tfile, copylen, from);
+ /* tun_napi_alloc_frags() enforces a layout for the skb.
+ * If zerocopy is enabled, then this layout will be
+ * overwritten by zerocopy_sg_from_iter().
+ */
+ zerocopy = false;
+ } else {
+ skb = tun_alloc_skb(tfile, align, copylen, linear,
+ noblock);
+ }
+
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ if (frags)
+ mutex_unlock(&tfile->napi_mutex);
return PTR_ERR(skb);
}
@@ -1484,6 +1661,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (err) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
+ if (frags) {
+ tfile->napi.skb = NULL;
+ mutex_unlock(&tfile->napi_mutex);
+ }
+
return -EFAULT;
}
}
@@ -1491,6 +1673,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
+ if (frags) {
+ tfile->napi.skb = NULL;
+ mutex_unlock(&tfile->napi_mutex);
+ }
+
return -EINVAL;
}
@@ -1518,7 +1705,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb->dev = tun->dev;
break;
case IFF_TAP:
- skb->protocol = eth_type_trans(skb, tun->dev);
+ if (!frags)
+ skb->protocol = eth_type_trans(skb, tun->dev);
break;
}
@@ -1552,11 +1740,41 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
rxhash = __skb_get_hash_symmetric(skb);
-#ifndef CONFIG_4KSTACKS
- tun_rx_batched(tun, tfile, skb, more);
-#else
- netif_rx_ni(skb);
-#endif
+
+ if (frags) {
+ /* Exercise flow dissector code path. */
+ u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
+
+ if (unlikely(headlen > skb_headlen(skb))) {
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ napi_free_frags(&tfile->napi);
+ mutex_unlock(&tfile->napi_mutex);
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ local_bh_disable();
+ napi_gro_frags(&tfile->napi);
+ local_bh_enable();
+ mutex_unlock(&tfile->napi_mutex);
+ } else if (tfile->napi_enabled) {
+ struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
+ int queue_len;
+
+ spin_lock_bh(&queue->lock);
+ __skb_queue_tail(queue, skb);
+ queue_len = skb_queue_len(queue);
+ spin_unlock(&queue->lock);
+
+ if (!more || queue_len > NAPI_POLL_WEIGHT)
+ napi_schedule(&tfile->napi);
+
+ local_bh_enable();
+ } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
+ tun_rx_batched(tun, tfile, skb, more);
+ } else {
+ netif_rx_ni(skb);
+ }
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
@@ -1572,8 +1790,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- struct tun_struct *tun = tun_get(file);
struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun = tun_get(tfile);
ssize_t result;
if (!tun)
@@ -1757,7 +1975,7 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
+ struct tun_struct *tun = tun_get(tfile);
ssize_t len = iov_iter_count(to), ret;
if (!tun)
@@ -1834,7 +2052,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
int ret;
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
- struct tun_struct *tun = __tun_get(tfile);
+ struct tun_struct *tun = tun_get(tfile);
if (!tun)
return -EBADFD;
@@ -1850,7 +2068,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
int flags)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
- struct tun_struct *tun = __tun_get(tfile);
+ struct tun_struct *tun = tun_get(tfile);
int ret;
if (!tun)
@@ -1882,7 +2100,7 @@ static int tun_peek_len(struct socket *sock)
struct tun_struct *tun;
int ret = 0;
- tun = __tun_get(tfile);
+ tun = tun_get(tfile);
if (!tun)
return 0;
@@ -1962,6 +2180,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (tfile->detached)
return -EINVAL;
+ if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!(ifr->ifr_flags & IFF_NAPI) ||
+ (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
+ return -EINVAL;
+ }
+
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
if (ifr->ifr_flags & IFF_TUN_EXCL)
@@ -1983,7 +2210,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (err < 0)
return err;
- err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
+ err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
+ ifr->ifr_flags & IFF_NAPI);
if (err < 0)
return err;
@@ -2072,7 +2300,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false);
+ err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
if (err < 0)
goto err_free_flow;
@@ -2144,6 +2372,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
+
+ arg &= ~TUN_F_UFO;
}
/* This gives the user a way to test for new features in future by
@@ -2222,7 +2452,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file, false);
+ ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -2271,7 +2501,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = 0;
rtnl_lock();
- tun = __tun_get(tfile);
+ tun = tun_get(tfile);
if (cmd == TUNSETIFF) {
ret = -EEXIST;
if (tun)
@@ -2429,6 +2659,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EFAULT;
break;
}
+ if (sndbuf <= 0) {
+ ret = -EINVAL;
+ break;
+ }
tun->sndbuf = sndbuf;
tun_set_sndbuf(tun);
@@ -2618,15 +2852,16 @@ static int tun_chr_close(struct inode *inode, struct file *file)
}
#ifdef CONFIG_PROC_FS
-static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
+static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
{
+ struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
rtnl_lock();
- tun = tun_get(f);
+ tun = tun_get(tfile);
if (tun)
tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
rtnl_unlock();
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 37fb46aee341..27307a4ab003 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB Network drivers
#
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 522d2900cd1d..f4d7362eb325 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -245,7 +245,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
* - We are allowed to put 4 bytes at tail if skb_cloned()
* is false (and if we have 4 bytes of tailroom)
*
- * TCP packets for example are cloned, but skb_header_release()
+ * TCP packets for example are cloned, but __skb_header_release()
* was called in tcp stack, allowing us to use headroom for our needs.
*/
if (!skb_header_cloned(skb) &&
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b2ff88e69a81..3d4f7959dabb 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -626,7 +626,7 @@ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
struct usbnet *dev = usb_get_intfdata(intf);
struct asix_common_private *priv = dev->driver_priv;
- if (priv->suspend)
+ if (priv && priv->suspend)
priv->suspend(dev);
return usbnet_suspend(intf, message);
@@ -678,7 +678,7 @@ static int asix_resume(struct usb_interface *intf)
struct usbnet *dev = usb_get_intfdata(intf);
struct asix_common_private *priv = dev->driver_priv;
- if (priv->resume)
+ if (priv && priv->resume)
priv->resume(dev);
return usbnet_resume(intf);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index dbc90313f472..18d36dff97ea 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -611,9 +611,9 @@ static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
catc->stats_vals[index >> 1] = data;
}
-static void catc_stats_timer(unsigned long data)
+static void catc_stats_timer(struct timer_list *t)
{
- struct catc *catc = (void *) data;
+ struct catc *catc = from_timer(catc, t, timer);
int i;
for (i = 0; i < 8; i++)
@@ -805,9 +805,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
spin_lock_init(&catc->tx_lock);
spin_lock_init(&catc->ctrl_lock);
- init_timer(&catc->timer);
- catc->timer.data = (long) catc;
- catc->timer.function = catc_stats_timer;
+ timer_setup(&catc->timer, catc_stats_timer, 0);
catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3e7a3ac3a362..05dca3e5c93d 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -230,7 +230,7 @@ skip:
goto bad_desc;
}
- if (header.usb_cdc_ether_desc) {
+ if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
/* because of Zaurus, we may be ignoring the host
* side link address we were given.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 47cab1bde065..9e1b74590682 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -771,7 +771,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
int err;
u8 iface_no;
struct usb_cdc_parsed_header hdr;
- u16 curr_ntb_format;
+ __le16 curr_ntb_format;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -889,7 +889,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
goto error2;
}
- if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
+ if (curr_ntb_format == cpu_to_le16(USB_CDC_NCM_NTB32_FORMAT)) {
dev_info(&intf->dev, "resetting NTB format to 16-bit");
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
USB_TYPE_CLASS | USB_DIR_OUT
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index d7a3379ea668..981c931a7a1f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -76,7 +76,6 @@
#define MOD_AUTHOR "Option Wireless"
#define MOD_DESCRIPTION "USB High Speed Option driver"
-#define MOD_LICENSE "GPL"
#define HSO_MAX_NET_DEVICES 10
#define HSO__MAX_MTU 2048
@@ -2263,7 +2262,6 @@ static void hso_serial_common_free(struct hso_serial *serial)
static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
int rx_size, int tx_size)
{
- struct device *dev;
int minor;
int i;
@@ -2277,7 +2275,6 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->parent->dev = tty_port_register_device_attr(&serial->port,
tty_drv, minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups);
- dev = serial->parent->dev;
/* fill in specific data for later use */
serial->minor = minor;
@@ -3288,7 +3285,7 @@ module_exit(hso_exit);
MODULE_AUTHOR(MOD_AUTHOR);
MODULE_DESCRIPTION(MOD_DESCRIPTION);
-MODULE_LICENSE(MOD_LICENSE);
+MODULE_LICENSE("GPL");
/* change the debug level (eg: insmod hso.ko debug=0x04) */
MODULE_PARM_DESC(debug, "debug level mask [0x01 | 0x02 | 0x04 | 0x08 | 0x10]");
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index d49c7103085e..7275761a1177 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -149,6 +149,7 @@ struct ipheth_device {
u8 bulk_in;
u8 bulk_out;
struct delayed_work carrier_work;
+ bool confirmed_pairing;
};
static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
@@ -259,7 +260,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += len;
-
+ dev->confirmed_pairing = true;
netif_rx(skb);
ipheth_rx_submit(dev, GFP_ATOMIC);
}
@@ -281,14 +282,24 @@ static void ipheth_sndbulk_callback(struct urb *urb)
__func__, status);
dev_kfree_skb_irq(dev->tx_skb);
- netif_wake_queue(dev->net);
+ if (status == 0)
+ netif_wake_queue(dev->net);
+ else
+ // on URB error, trigger immediate poll
+ schedule_delayed_work(&dev->carrier_work, 0);
}
static int ipheth_carrier_set(struct ipheth_device *dev)
{
- struct usb_device *udev = dev->udev;
+ struct usb_device *udev;
int retval;
+ if (!dev)
+ return 0;
+ if (!dev->confirmed_pairing)
+ return 0;
+
+ udev = dev->udev;
retval = usb_control_msg(udev,
usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
IPHETH_CMD_CARRIER_CHECK, /* request */
@@ -303,11 +314,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
return retval;
}
- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
+ if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
netif_carrier_on(dev->net);
- else
+ if (dev->tx_urb->status != -EINPROGRESS)
+ netif_wake_queue(dev->net);
+ } else {
netif_carrier_off(dev->net);
-
+ netif_stop_queue(dev->net);
+ }
return 0;
}
@@ -387,7 +401,6 @@ static int ipheth_open(struct net_device *net)
return retval;
schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
- netif_start_queue(net);
return retval;
}
@@ -491,7 +504,7 @@ static int ipheth_probe(struct usb_interface *intf,
dev->udev = udev;
dev->net = netdev;
dev->intf = intf;
-
+ dev->confirmed_pairing = false;
/* Set up endpoints */
hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
if (hintf == NULL) {
@@ -542,7 +555,9 @@ static int ipheth_probe(struct usb_interface *intf,
retval = -EIO;
goto err_register_netdev;
}
-
+ // carrier down and transmit queues stopped until packet from device
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
return 0;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 0161f77641fa..94c7804903c4 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3516,11 +3516,9 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
};
-static void lan78xx_stat_monitor(unsigned long param)
+static void lan78xx_stat_monitor(struct timer_list *t)
{
- struct lan78xx_net *dev;
-
- dev = (struct lan78xx_net *)param;
+ struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
}
@@ -3571,10 +3569,8 @@ static int lan78xx_probe(struct usb_interface *intf,
netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
netdev->ethtool_ops = &lan78xx_ethtool_ops;
- dev->stat_monitor.function = lan78xx_stat_monitor;
- dev->stat_monitor.data = (unsigned long)dev;
dev->delta = 1;
- init_timer(&dev->stat_monitor);
+ timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
mutex_init(&dev->stats.access_lock);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8c3733608271..c750cf7c042b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -221,7 +221,7 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
/* Account for reference in struct qmimux_priv_priv */
dev_hold(real_dev);
- err = netdev_upper_dev_link(real_dev, new_dev);
+ err = netdev_upper_dev_link(real_dev, new_dev, NULL);
if (err)
goto out_unregister_netdev;
@@ -499,6 +499,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 1;
}
if (rawip) {
+ skb_reset_mac_header(skb);
skb->dev = dev->net; /* normally set by eth_type_trans */
skb->protocol = proto;
return 1;
@@ -681,7 +682,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* errors aren't fatal - we can live with the dynamic address */
- if (cdc_ether) {
+ if (cdc_ether && cdc_ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
}
@@ -1238,6 +1239,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 2110ab3513f0..c43087e06696 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -189,9 +189,6 @@ struct lsi_umts_dual {
#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
-/* Forward definitions */
-static void sierra_sync_timer(unsigned long syncdata);
-
/* Our own net device operations structure */
static const struct net_device_ops sierra_net_device_ops = {
.ndo_open = usbnet_open,
@@ -475,8 +472,6 @@ static void sierra_net_dosync(struct usbnet *dev)
"Send SYNC failed, status %d\n", status);
/* Now, start a timer and make sure we get the Restart Indication */
- priv->sync_timer.function = sierra_sync_timer;
- priv->sync_timer.data = (unsigned long) dev;
priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY;
add_timer(&priv->sync_timer);
}
@@ -593,9 +588,10 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work)
/*
* Sync Retransmit Timer Handler. On expiry, kick the work queue
*/
-static void sierra_sync_timer(unsigned long syncdata)
+static void sierra_sync_timer(struct timer_list *t)
{
- struct usbnet *dev = (struct usbnet *)syncdata;
+ struct sierra_net_data *priv = from_timer(priv, t, sync_timer);
+ struct usbnet *dev = priv->usbnet;
dev_dbg(&dev->udev->dev, "%s", __func__);
/* Kick the tasklet */
@@ -752,7 +748,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent);
/* Only need to do this once */
- init_timer(&priv->sync_timer);
+ timer_setup(&priv->sync_timer, sierra_sync_timer, 0);
/* verify fw attributes */
status = sierra_net_get_fw_attr(dev, &fwattr);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 6510e5cc1817..80348b6a8646 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1509,9 +1509,9 @@ err:
// tasklet (work deferred from completions, in_irq) or timer
-static void usbnet_bh (unsigned long param)
+static void usbnet_bh (struct timer_list *t)
{
- struct usbnet *dev = (struct usbnet *) param;
+ struct usbnet *dev = from_timer(dev, t, delay);
struct sk_buff *skb;
struct skb_data *entry;
@@ -1694,13 +1694,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- dev->bh.func = usbnet_bh;
- dev->bh.data = (unsigned long) dev;
+ dev->bh.func = (void (*)(unsigned long))usbnet_bh;
+ dev->bh.data = (unsigned long)&dev->delay;
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
- dev->delay.function = usbnet_bh;
- dev->delay.data = (unsigned long) dev;
- init_timer (&dev->delay);
+ timer_setup(&dev->delay, usbnet_bh, 0);
mutex_init (&dev->phy_mutex);
mutex_init(&dev->interrupt_mutex);
dev->interrupt_count = 0;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 511f8339fa96..19a985ef9104 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/average.h>
+#include <linux/filter.h>
#include <net/route.h>
static int napi_weight = NAPI_POLL_WEIGHT;
@@ -372,9 +373,20 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
return skb;
}
-static bool virtnet_xdp_xmit(struct virtnet_info *vi,
- struct receive_queue *rq,
- struct xdp_buff *xdp)
+static void virtnet_xdp_flush(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct send_queue *sq;
+ unsigned int qp;
+
+ qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
+ sq = &vi->sq[qp];
+
+ virtqueue_kick(sq->vq);
+}
+
+static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
+ struct xdp_buff *xdp)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
unsigned int len;
@@ -408,10 +420,19 @@ static bool virtnet_xdp_xmit(struct virtnet_info *vi,
return false;
}
- virtqueue_kick(sq->vq);
return true;
}
+static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ bool sent = __virtnet_xdp_xmit(vi, xdp);
+
+ if (!sent)
+ return -ENOSPC;
+ return 0;
+}
+
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
@@ -484,7 +505,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
void *buf, void *ctx,
- unsigned int len)
+ unsigned int len,
+ bool *xdp_xmit)
{
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
@@ -494,7 +516,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
struct page *page = virt_to_head_page(buf);
- unsigned int delta = 0;
+ unsigned int delta = 0, err;
struct page *xdp_page;
len -= vi->hdr_len;
@@ -532,6 +554,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
xdp.data = xdp.data_hard_start + xdp_headroom;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -542,8 +565,16 @@ static struct sk_buff *receive_small(struct net_device *dev,
delta = orig_data - xdp.data;
break;
case XDP_TX:
- if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
+ if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
trace_xdp_exception(vi->dev, xdp_prog, act);
+ else
+ *xdp_xmit = true;
+ rcu_read_unlock();
+ goto xdp_xmit;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(dev, &xdp, xdp_prog);
+ if (!err)
+ *xdp_xmit = true;
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -604,7 +635,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct receive_queue *rq,
void *buf,
void *ctx,
- unsigned int len)
+ unsigned int len,
+ bool *xdp_xmit)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -614,6 +646,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct bpf_prog *xdp_prog;
unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
+ int err;
head_skb = NULL;
@@ -654,9 +687,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
data = page_address(xdp_page) + offset;
xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
xdp.data = data + vi->hdr_len;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + (len - vi->hdr_len);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ if (act != XDP_PASS)
+ ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+
switch (act) {
case XDP_PASS:
/* recalculate offset to account for any header
@@ -672,18 +709,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page,
offset, len, PAGE_SIZE);
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
return head_skb;
}
break;
case XDP_TX:
- if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp)))
+ if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
trace_xdp_exception(vi->dev, xdp_prog, act);
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
+ else
+ *xdp_xmit = true;
if (unlikely(xdp_page != page))
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(dev, &xdp, xdp_prog);
+ if (!err)
+ *xdp_xmit = true;
+ rcu_read_unlock();
+ goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
@@ -691,7 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
case XDP_DROP:
if (unlikely(xdp_page != page))
__free_pages(xdp_page, 0);
- ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
goto err_xdp;
}
}
@@ -789,7 +831,7 @@ xdp_xmit:
}
static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len, void **ctx)
+ void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
@@ -810,11 +852,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
}
if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, vi, rq, buf, ctx, len);
+ skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit);
else if (vi->big_packets)
skb = receive_big(dev, vi, rq, buf, len);
else
- skb = receive_small(dev, vi, rq, buf, ctx, len);
+ skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit);
if (unlikely(!skb))
return 0;
@@ -988,7 +1030,6 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
int err;
bool oom;
- gfp |= __GFP_COLD;
do {
if (vi->mergeable_rx_bufs)
err = add_recvbuf_mergeable(vi, rq, gfp);
@@ -1072,7 +1113,7 @@ static void refill_work(struct work_struct *work)
}
}
-static int virtnet_receive(struct receive_queue *rq, int budget)
+static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int len, received = 0, bytes = 0;
@@ -1084,13 +1125,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
while (received < budget &&
(buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
- bytes += receive_buf(vi, rq, buf, len, ctx);
+ bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit);
received++;
}
} else {
while (received < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- bytes += receive_buf(vi, rq, buf, len, NULL);
+ bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit);
received++;
}
}
@@ -1162,15 +1203,19 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
unsigned int received;
+ bool xdp_xmit = false;
virtnet_poll_cleantx(rq);
- received = virtnet_receive(rq, budget);
+ received = virtnet_receive(rq, budget, &xdp_xmit);
/* Out of packets? */
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
+ if (xdp_xmit)
+ xdp_do_flush_map();
+
return received;
}
@@ -2042,7 +2087,7 @@ static u32 virtnet_xdp_query(struct net_device *dev)
return 0;
}
-static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -2069,7 +2114,9 @@ static const struct net_device_ops virtnet_netdev = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = virtnet_netpoll,
#endif
- .ndo_xdp = virtnet_xdp,
+ .ndo_bpf = virtnet_xdp,
+ .ndo_xdp_xmit = virtnet_xdp_xmit,
+ .ndo_xdp_flush = virtnet_xdp_flush,
.ndo_features_check = passthru_features_check,
};
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9b243e6f3008..feb1b2e15c2e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -132,7 +132,6 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb_orphan(skb);
skb_dst_set(skb, dst);
- skb_dst_force(skb);
/* set pkt_type to avoid skb hitting packet taps twice -
* once on Tx and again in Rx processing
@@ -765,18 +764,22 @@ static void cycle_netdev(struct net_device *dev)
}
}
-static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
+ struct netlink_ext_ack *extack)
{
int ret;
/* do not allow loopback device to be enslaved to a VRF.
* The vrf device acts as the loopback for the vrf.
*/
- if (port_dev == dev_net(dev)->loopback_dev)
+ if (port_dev == dev_net(dev)->loopback_dev) {
+ NL_SET_ERR_MSG(extack,
+ "Can not enslave loopback device to a VRF");
return -EOPNOTSUPP;
+ }
port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
- ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
+ ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack);
if (ret < 0)
goto err;
@@ -789,12 +792,19 @@ err:
return ret;
}
-static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
+ struct netlink_ext_ack *extack)
{
- if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
+ if (netif_is_l3_master(port_dev)) {
+ NL_SET_ERR_MSG(extack,
+ "Can not enslave an L3 master device to a VRF");
+ return -EINVAL;
+ }
+
+ if (netif_is_l3_slave(port_dev))
return -EINVAL;
- return do_vrf_add_slave(dev, port_dev);
+ return do_vrf_add_slave(dev, port_dev, extack);
}
/* inverse of do_vrf_add_slave */
@@ -1165,7 +1175,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
frh->family = family;
frh->action = FR_ACT_TO_TBL;
- if (nla_put_u32(skb, FRA_L3MDEV, 1))
+ if (nla_put_u8(skb, FRA_L3MDEV, 1))
goto nla_put_failure;
if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d7c49cf1d5e9..7ac487031b4b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1623,26 +1623,19 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct nd_msg *msg;
- const struct ipv6hdr *iphdr;
const struct in6_addr *daddr;
- struct neighbour *n;
+ const struct ipv6hdr *iphdr;
struct inet6_dev *in6_dev;
+ struct neighbour *n;
+ struct nd_msg *msg;
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
goto out;
- if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
- goto out;
-
iphdr = ipv6_hdr(skb);
daddr = &iphdr->daddr;
-
msg = (struct nd_msg *)(iphdr + 1);
- if (msg->icmph.icmp6_code != 0 ||
- msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
- goto out;
if (ipv6_addr_loopback(daddr) ||
ipv6_addr_is_multicast(&msg->target))
@@ -2240,11 +2233,11 @@ tx_error:
static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_rdst *rdst, *fdst = NULL;
const struct ip_tunnel_info *info;
- struct ethhdr *eth;
bool did_rsc = false;
- struct vxlan_rdst *rdst, *fdst = NULL;
struct vxlan_fdb *f;
+ struct ethhdr *eth;
__be32 vni = 0;
info = skb_tunnel_info(skb);
@@ -2269,12 +2262,14 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
if (ntohs(eth->h_proto) == ETH_P_ARP)
return arp_reduce(dev, skb, vni);
#if IS_ENABLED(CONFIG_IPV6)
- else if (ntohs(eth->h_proto) == ETH_P_IPV6) {
- struct ipv6hdr *hdr, _hdr;
- if ((hdr = skb_header_pointer(skb,
- skb_network_offset(skb),
- sizeof(_hdr), &_hdr)) &&
- hdr->nexthdr == IPPROTO_ICMPV6)
+ else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+ sizeof(struct nd_msg)) &&
+ ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
+ struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
+
+ if (m->icmph.icmp6_code == 0 &&
+ m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
return neigh_reduce(dev, skb, vni);
}
#endif
@@ -2325,9 +2320,9 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Walk the forwarding table and purge stale entries */
-static void vxlan_cleanup(unsigned long arg)
+static void vxlan_cleanup(struct timer_list *t)
{
- struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
+ struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
unsigned int h;
@@ -2647,9 +2642,7 @@ static void vxlan_setup(struct net_device *dev)
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
- init_timer_deferrable(&vxlan->age_timer);
- vxlan->age_timer.function = vxlan_cleanup;
- vxlan->age_timer.data = (unsigned long) vxlan;
+ timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
vxlan->dev = dev;
@@ -3704,6 +3697,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
struct net_device *dev, *aux;
+ unsigned int h;
LIST_HEAD(list);
rtnl_lock();
@@ -3723,6 +3717,9 @@ static void __net_exit vxlan_exit_net(struct net *net)
unregister_netdevice_many(&list);
rtnl_unlock();
+
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static struct pernet_operations vxlan_net_ops = {
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 73c2326603fc..9532e69fda87 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux network (wan) device drivers.
#
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index a043fb1367bd..c0b0f525c87c 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -223,8 +223,6 @@ struct dscc4_dev_priv {
u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */
- struct timer_list timer;
-
struct dscc4_pci_priv *pci_priv;
spinlock_t lock;
@@ -369,7 +367,6 @@ static int dscc4_close(struct net_device *);
static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int dscc4_init_ring(struct net_device *);
static void dscc4_release_ring(struct dscc4_dev_priv *);
-static void dscc4_timer(unsigned long);
static void dscc4_tx_timeout(struct net_device *);
static irqreturn_t dscc4_irq(int irq, void *dev_id);
static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short);
@@ -983,19 +980,6 @@ err_out:
return ret;
};
-/* FIXME: get rid of the unneeded code */
-static void dscc4_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
-// struct dscc4_pci_priv *ppriv;
-
- goto done;
-done:
- dpriv->timer.expires = jiffies + TX_TIMEOUT;
- add_timer(&dpriv->timer);
-}
-
static void dscc4_tx_timeout(struct net_device *dev)
{
/* FIXME: something is missing there */
@@ -1127,11 +1111,6 @@ static int dscc4_open(struct net_device *dev)
done:
netif_start_queue(dev);
- init_timer(&dpriv->timer);
- dpriv->timer.expires = jiffies + 10*HZ;
- dpriv->timer.data = (unsigned long)dev;
- dpriv->timer.function = dscc4_timer;
- add_timer(&dpriv->timer);
netif_carrier_on(dev);
return 0;
@@ -1199,7 +1178,6 @@ static int dscc4_close(struct net_device *dev)
{
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
- del_timer_sync(&dpriv->timer);
netif_stop_queue(dev);
scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
index 10963e8f4b39..24529996c872 100644
--- a/drivers/net/wan/hd64570.h
+++ b/drivers/net/wan/hd64570.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __HD64570_H
#define __HD64570_H
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index a408abc25512..320039d329c7 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -54,6 +54,7 @@ struct cisco_state {
cisco_proto settings;
struct timer_list timer;
+ struct net_device *dev;
spinlock_t lock;
unsigned long last_poll;
int up;
@@ -257,11 +258,10 @@ rx_error:
-static void cisco_timer(unsigned long arg)
+static void cisco_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)arg;
- hdlc_device *hdlc = dev_to_hdlc(dev);
- struct cisco_state *st = state(hdlc);
+ struct cisco_state *st = from_timer(st, t, timer);
+ struct net_device *dev = st->dev;
spin_lock(&st->lock);
if (st->up &&
@@ -276,8 +276,6 @@ static void cisco_timer(unsigned long arg)
spin_unlock(&st->lock);
st->timer.expires = jiffies + st->settings.interval * HZ;
- st->timer.function = cisco_timer;
- st->timer.data = arg;
add_timer(&st->timer);
}
@@ -293,10 +291,9 @@ static void cisco_start(struct net_device *dev)
st->up = st->txseq = st->rxseq = 0;
spin_unlock_irqrestore(&st->lock, flags);
- init_timer(&st->timer);
+ st->dev = dev;
+ timer_setup(&st->timer, cisco_timer, 0);
st->timer.expires = jiffies + HZ; /* First poll after 1 s */
- st->timer.function = cisco_timer;
- st->timer.data = (unsigned long)dev;
add_timer(&st->timer);
}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 78596e42a3f3..038236a9c60e 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -140,6 +140,7 @@ struct frad_state {
int dce_pvc_count;
struct timer_list timer;
+ struct net_device *dev;
unsigned long last_poll;
int reliable;
int dce_changed;
@@ -597,9 +598,10 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
}
-static void fr_timer(unsigned long arg)
+static void fr_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)arg;
+ struct frad_state *st = from_timer(st, t, timer);
+ struct net_device *dev = st->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
int i, cnt = 0, reliable;
u32 list;
@@ -644,8 +646,6 @@ static void fr_timer(unsigned long arg)
state(hdlc)->settings.t391 * HZ;
}
- state(hdlc)->timer.function = fr_timer;
- state(hdlc)->timer.data = arg;
add_timer(&state(hdlc)->timer);
}
@@ -1003,11 +1003,10 @@ static void fr_start(struct net_device *dev)
state(hdlc)->n391cnt = 0;
state(hdlc)->txseq = state(hdlc)->rxseq = 0;
- init_timer(&state(hdlc)->timer);
+ state(hdlc)->dev = dev;
+ timer_setup(&state(hdlc)->timer, fr_timer, 0);
/* First poll after 1 s */
state(hdlc)->timer.expires = jiffies + HZ;
- state(hdlc)->timer.function = fr_timer;
- state(hdlc)->timer.data = (unsigned long)dev;
add_timer(&state(hdlc)->timer);
} else
fr_set_link_state(1, dev);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 0d2e00ece804..afeca6bcdade 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -558,9 +558,9 @@ out:
return NET_RX_DROP;
}
-static void ppp_timer(unsigned long arg)
+static void ppp_timer(struct timer_list *t)
{
- struct proto *proto = (struct proto *)arg;
+ struct proto *proto = from_timer(proto, t, timer);
struct ppp *ppp = get_ppp(proto->dev);
unsigned long flags;
@@ -610,9 +610,7 @@ static void ppp_start(struct net_device *dev)
for (i = 0; i < IDX_COUNT; i++) {
struct proto *proto = &ppp->protos[i];
proto->dev = dev;
- init_timer(&proto->timer);
- proto->timer.function = ppp_timer;
- proto->timer.data = (unsigned long)proto;
+ timer_setup(&proto->timer, ppp_timer, 0);
proto->state = CLOSED;
}
ppp->protos[IDX_LCP].pid = PID_LCP;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 63f749078a1f..0e3f8ed84660 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -346,7 +346,6 @@ out:
fail:
dev_put(dev);
free_netdev(ndev);
- kfree(lapbeth);
goto out;
}
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
index 4ced7ac16c2c..38961793adad 100644
--- a/drivers/net/wan/lmc/lmc.h
+++ b/drivers/net/wan/lmc/lmc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LMC_H_
#define _LMC_H_
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
index 15049d711f47..f999db788506 100644
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
index 2d46f121549f..820adcae5d67 100644
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ b/drivers/net/wan/lmc/lmc_debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LMC_DEBUG_H_
#define _LMC_DEBUG_H_
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4698450c77d1..37b1e0d03e31 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -99,7 +99,7 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t c
static void lmc_softreset(lmc_softc_t * const);
static void lmc_running_reset(struct net_device *dev);
static int lmc_ifdown(struct net_device * const);
-static void lmc_watchdog(unsigned long data);
+static void lmc_watchdog(struct timer_list *t);
static void lmc_reset(lmc_softc_t * const sc);
static void lmc_dec_reset(lmc_softc_t * const sc);
static void lmc_driver_timeout(struct net_device *dev);
@@ -636,10 +636,10 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
/* the watchdog process that cruises around */
-static void lmc_watchdog (unsigned long data) /*fold00*/
+static void lmc_watchdog(struct timer_list *t) /*fold00*/
{
- struct net_device *dev = (struct net_device *)data;
- lmc_softc_t *sc = dev_to_sc(dev);
+ lmc_softc_t *sc = from_timer(sc, t, timer);
+ struct net_device *dev = sc->lmc_device;
int link_status;
u32 ticks;
unsigned long flags;
@@ -1084,10 +1084,8 @@ static int lmc_open(struct net_device *dev)
* Setup a timer for the watchdog on probe, and start it running.
* Since lmc_ok == 0, it will be a NOP for now.
*/
- init_timer (&sc->timer);
+ timer_setup(&sc->timer, lmc_watchdog, 0);
sc->timer.expires = jiffies + HZ;
- sc->timer.data = (unsigned long) dev;
- sc->timer.function = lmc_watchdog;
add_timer (&sc->timer);
lmc_trace(dev, "lmc_open out");
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
index 662148c54644..bb098e443776 100644
--- a/drivers/net/wan/lmc/lmc_proto.h
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LMC_PROTO_H_
#define _LMC_PROTO_H_
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index bde8c0339831..8e8c4c0e1b64 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -71,6 +71,7 @@
struct net_local {
struct timer_list watchdog;
+ struct net_device *watchdog_dev;
spinlock_t lock;
struct sk_buff *rx_buf_p; /* receive buffer ptr */
@@ -128,7 +129,7 @@ static void send_frame( struct net_device * );
static int upload_data( struct net_device *,
unsigned, unsigned, unsigned, u32 );
static void download_data( struct net_device *, u32 * );
-static void sbni_watchdog( unsigned long );
+static void sbni_watchdog(struct timer_list *);
static void interpret_ack( struct net_device *, unsigned );
static int append_frame_to_pkt( struct net_device *, unsigned, u32 );
static void indicate_pkt( struct net_device * );
@@ -1029,11 +1030,10 @@ indicate_pkt( struct net_device *dev )
*/
static void
-sbni_watchdog( unsigned long arg )
+sbni_watchdog(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) arg;
- struct net_local *nl = netdev_priv(dev);
- struct timer_list *w = &nl->watchdog;
+ struct net_local *nl = from_timer(nl, t, watchdog);
+ struct net_device *dev = nl->watchdog_dev;
unsigned long flags;
unsigned char csr0;
@@ -1060,11 +1060,7 @@ sbni_watchdog( unsigned long arg )
outb( csr0 | RC_CHK, dev->base_addr + CSR0 );
- init_timer( w );
- w->expires = jiffies + SBNI_TIMEOUT;
- w->data = arg;
- w->function = sbni_watchdog;
- add_timer( w );
+ mod_timer(t, jiffies + SBNI_TIMEOUT);
spin_unlock_irqrestore( &nl->lock, flags );
}
@@ -1195,10 +1191,9 @@ handler_attached:
netif_start_queue( dev );
/* set timer watchdog */
- init_timer( w );
+ nl->watchdog_dev = dev;
+ timer_setup(w, sbni_watchdog, 0);
w->expires = jiffies + SBNI_TIMEOUT;
- w->data = (unsigned long) dev;
- w->function = sbni_watchdog;
add_timer( w );
spin_unlock( &nl->lock );
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 236c62538036..57ed259c8208 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -927,13 +927,10 @@ static irqreturn_t sdla_isr(int dummy, void *dev_id)
return IRQ_HANDLED;
}
-static void sdla_poll(unsigned long device)
+static void sdla_poll(struct timer_list *t)
{
- struct net_device *dev;
- struct frad_local *flp;
-
- dev = (struct net_device *) device;
- flp = netdev_priv(dev);
+ struct frad_local *flp = from_timer(flp, t, timer);
+ struct net_device *dev = flp->dev;
if (sdla_byte(dev, SDLA_502_RCV_BUF))
sdla_receive(dev);
@@ -1616,11 +1613,10 @@ static void setup_sdla(struct net_device *dev)
flp->assoc = sdla_assoc;
flp->deassoc = sdla_deassoc;
flp->dlci_conf = sdla_dlci_conf;
+ flp->dev = dev;
- init_timer(&flp->timer);
+ timer_setup(&flp->timer, sdla_poll, 0);
flp->timer.expires = 1;
- flp->timer.data = (unsigned long) dev;
- flp->timer.function = sdla_poll;
}
static struct net_device *sdla;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 0c7317520ed3..d573a57bc301 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -734,7 +734,6 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
return -ENODEV;
}
- stat = 0;
timeout = jiffies + 5 * HZ;
do {
if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 40ee80c03c94..74c06a5f586f 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -324,6 +324,7 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
if (err != LAPB_OK)
netdev_err(dev, "lapb_disconnect_request error: %d\n",
err);
+ /* fall through */
default:
kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
index f57ee67836ae..eb4a4216ee94 100644
--- a/drivers/net/wan/x25_asy.h
+++ b/drivers/net/wan/x25_asy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_X25_ASY_H
#define _LINUX_X25_ASY_H
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 2416a9d60bd6..32ae710d4f40 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Description of Z8530 Z85C30 and Z85230 communications chips
*
diff --git a/drivers/net/wimax/i2400m/Makefile b/drivers/net/wimax/i2400m/Makefile
index f6d19c348082..b1db1eff0648 100644
--- a/drivers/net/wimax/i2400m/Makefile
+++ b/drivers/net/wimax/i2400m/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_WIMAX_I2400M) += i2400m.o
obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 54b41ac5f9c8..7fc96306712a 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux Wireless network device drivers.
#
diff --git a/drivers/net/wireless/admtek/adm8211.h b/drivers/net/wireless/admtek/adm8211.h
index bbc10b1cde87..2c55c629de28 100644
--- a/drivers/net/wireless/admtek/adm8211.h
+++ b/drivers/net/wireless/admtek/adm8211.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ADM8211_H
#define ADM8211_H
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 4cdebc7659dd..e4e460b5498e 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ATH5K) += ath5k/
obj-$(CONFIG_ATH9K_HW) += ath9k/
obj-$(CONFIG_CARL9170) += carl9170/
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 68f0463ed8df..b94759daeacc 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -889,9 +889,9 @@ static void ar5523_tx_work(struct work_struct *work)
mutex_unlock(&ar->mutex);
}
-static void ar5523_tx_wd_timer(unsigned long arg)
+static void ar5523_tx_wd_timer(struct timer_list *t)
{
- struct ar5523 *ar = (struct ar5523 *) arg;
+ struct ar5523 *ar = from_timer(ar, t, tx_wd_timer);
ar5523_dbg(ar, "TX watchdog timer triggered\n");
ieee80211_queue_work(ar->hw, &ar->tx_wd_work);
@@ -1599,8 +1599,7 @@ static int ar5523_probe(struct usb_interface *intf,
mutex_init(&ar->mutex);
INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work);
- init_timer(&ar->tx_wd_timer);
- setup_timer(&ar->tx_wd_timer, ar5523_tx_wd_timer, (unsigned long) ar);
+ timer_setup(&ar->tx_wd_timer, ar5523_tx_wd_timer, 0);
INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work);
INIT_WORK(&ar->tx_work, ar5523_tx_work);
INIT_LIST_HEAD(&ar->tx_queue_pending);
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 899b9b79f4ce..9492177e9063 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ATH10K) += ath10k_core.o
ath10k_core-y += mac.o \
debug.o \
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index a4f635820f35..b29fdbd21ead 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -74,6 +74,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -97,6 +98,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -119,6 +121,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -141,6 +144,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -163,6 +167,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -188,6 +193,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -216,6 +222,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 4,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 11,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -249,6 +256,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
*/
.vht160_mcs_rx_highest = 1560,
.vht160_mcs_tx_highest = 1560,
+ .n_cipher_suites = 11,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -281,6 +289,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
*/
.vht160_mcs_rx_highest = 780,
.vht160_mcs_tx_highest = 780,
+ .n_cipher_suites = 11,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -303,6 +312,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -327,6 +337,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 0,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -356,6 +367,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.spectral_bin_discard = 4,
.vht160_mcs_rx_highest = 0,
.vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 11,
},
};
@@ -377,6 +389,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
[ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
+ [ATH10K_FW_FEATURE_NO_PS] = "no-ps",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 949ebb3e967b..643041ef3271 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -612,6 +612,9 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST = 16,
+ /* Firmware does not support power save in station mode. */
+ ATH10K_FW_FEATURE_NO_PS = 17,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index a3f5dc78353f..7d295ee71534 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -200,9 +200,9 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
spin_unlock_bh(&htt->rx_ring.lock);
}
-static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
+static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
{
- struct ath10k_htt *htt = (struct ath10k_htt *)arg;
+ struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
ath10k_htt_rx_msdu_buff_replenish(htt);
}
@@ -507,7 +507,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
*htt->rx_ring.alloc_idx.vaddr = 0;
/* Initialize the Rx refill retry timer */
- setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
+ timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
spin_lock_init(&htt->rx_ring.lock);
@@ -550,6 +550,11 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
return IEEE80211_TKIP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
@@ -561,11 +566,41 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
#define MICHAEL_MIC_LEN 8
-static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
- enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
+static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
@@ -573,8 +608,6 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
return IEEE80211_TKIP_ICV_LEN;
- case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
- return IEEE80211_CCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
@@ -1051,15 +1084,29 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
hdr = (void *)msdu->data;
/* Tail */
- if (status->flag & RX_FLAG_IV_STRIPPED)
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
skb_trim(msdu, msdu->len -
- ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
+ }
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
- skb_trim(msdu, msdu->len - 8);
+ skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {
@@ -1075,7 +1122,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
- const u8 first_hdr[64])
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
@@ -1083,6 +1131,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
int l3_pad_bytes;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1111,6 +1160,14 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
@@ -1171,6 +1228,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
u8 sa[ETH_ALEN];
int l3_pad_bytes;
struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1199,6 +1257,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
@@ -1212,12 +1278,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
- const u8 first_hdr[64])
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
size_t hdr_len;
int l3_pad_bytes;
struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [amsdu header] <-- replaced with 802.11 hdr
@@ -1233,6 +1301,14 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
}
@@ -1267,13 +1343,15 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
is_decrypted);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
+ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
+ enctype);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
- ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
+ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
+ enctype);
break;
}
}
@@ -1316,7 +1394,8 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ bool fill_crypt_header)
{
struct sk_buff *first;
struct sk_buff *last;
@@ -1326,7 +1405,6 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype;
u8 first_hdr[64];
u8 *qos;
- size_t hdr_len;
bool has_fcs_err;
bool has_crypto_err;
bool has_tkip_err;
@@ -1351,15 +1429,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
* decapped header. It'll be used for undecapping of each MSDU.
*/
hdr = (void *)rxd->rx_hdr_status;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- memcpy(first_hdr, hdr, hdr_len);
+ memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
/* Each A-MSDU subframe will use the original header as the base and be
* reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
*/
hdr = (void *)first_hdr;
- qos = ieee80211_get_qos_ctl(hdr);
- qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu);
@@ -1406,9 +1486,14 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
status->flag |= RX_FLAG_DECRYPTED;
if (likely(!is_mgmt))
- status->flag |= RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
-}
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+
+ if (fill_crypt_header)
+ status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
skb_queue_walk(amsdu, msdu) {
ath10k_htt_rx_h_csum_offload(msdu);
@@ -1424,6 +1509,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
if (is_mgmt)
continue;
+ if (fill_crypt_header)
+ continue;
+
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
@@ -1434,6 +1522,9 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
struct ieee80211_rx_status *status)
{
struct sk_buff *msdu;
+ struct sk_buff *first_subframe;
+
+ first_subframe = skb_peek(amsdu);
while ((msdu = __skb_dequeue(amsdu))) {
/* Setup per-MSDU flags */
@@ -1442,6 +1533,13 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
else
status->flag |= RX_FLAG_AMSDU_MORE;
+ if (msdu == first_subframe) {
+ first_subframe = NULL;
+ status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ status->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+
ath10k_process_rx(ar, status, msdu);
}
}
@@ -1584,7 +1682,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_unchain(ar, &amsdu);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
return num_msdus;
@@ -1745,8 +1843,7 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
}
static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
- struct sk_buff_head *amsdu,
- int budget_left)
+ struct sk_buff_head *amsdu)
{
struct sk_buff *msdu;
struct htt_rx_desc *rxd;
@@ -1757,9 +1854,8 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
if (WARN_ON(!skb_queue_empty(amsdu)))
return -EINVAL;
- while ((msdu = __skb_dequeue(list)) && budget_left) {
+ while ((msdu = __skb_dequeue(list))) {
__skb_queue_tail(amsdu, msdu);
- budget_left--;
rxd = (void *)msdu->data - sizeof(*rxd);
if (rxd->msdu_end.common.info0 &
@@ -1850,8 +1946,7 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
return num_msdu;
}
-static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
- int budget_left)
+static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (void *)skb->data;
@@ -1908,9 +2003,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
if (offload)
num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
- while (!skb_queue_empty(&list) && budget_left) {
+ while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
- ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu, budget_left);
+ ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
switch (ret) {
case 0:
/* Note: The in-order indication may report interleaved
@@ -1920,10 +2015,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
* should still give an idea about rx rate to the user.
*/
num_msdus += skb_queue_len(&amsdu);
- budget_left -= skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
break;
case -EAGAIN:
@@ -2563,8 +2657,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
}
spin_lock_bh(&htt->rx_ring.lock);
- num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb,
- (budget - quota));
+ num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
if (num_rx_msdus < 0) {
resched_napi = true;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index a860691d635d..88955bbe20bd 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -310,7 +310,7 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
.wm_high = &wcn3990_dst_wm_high,
};
-struct ath10k_hw_ce_regs wcn3990_ce_regs = {
+const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
.sr_base_addr = 0x00000000,
.sr_size_addr = 0x00000008,
.dr_base_addr = 0x0000000c,
@@ -457,7 +457,7 @@ static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
.wm_high = &qcax_dst_wm_high,
};
-struct ath10k_hw_ce_regs qcax_ce_regs = {
+const struct ath10k_hw_ce_regs qcax_ce_regs = {
.sr_base_addr = 0x00000000,
.sr_size_addr = 0x00000004,
.dr_base_addr = 0x00000008,
@@ -604,8 +604,13 @@ static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
/* Only modify registers if the core is started. */
if ((ar->state != ATH10K_STATE_ON) &&
- (ar->state != ATH10K_STATE_RESTARTED))
+ (ar->state != ATH10K_STATE_RESTARTED)) {
+ spin_lock_bh(&ar->data_lock);
+ /* Store config value for when radio boots up */
+ ar->fw_coverage.coverage_class = value;
+ spin_unlock_bh(&ar->data_lock);
goto unlock;
+ }
/* Retrieve the current values of the two registers that need to be
* adjusted.
@@ -637,7 +642,7 @@ static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
ar->fw_coverage.reg_ack_cts_timeout_orig = timeout_reg;
ar->fw_coverage.reg_phyclk = phyclk_reg;
- /* Calculat new value based on the (original) firmware calculation. */
+ /* Calculate new value based on the (original) firmware calculation. */
slottime_reg = ar->fw_coverage.reg_slottime_orig;
timeout_reg = ar->fw_coverage.reg_ack_cts_timeout_orig;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 0c089f6dd3d9..05f26e5858ad 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -369,8 +369,8 @@ extern const struct ath10k_hw_values qca99x0_values;
extern const struct ath10k_hw_values qca9888_values;
extern const struct ath10k_hw_values qca4019_values;
extern const struct ath10k_hw_values wcn3990_values;
-extern struct ath10k_hw_ce_regs wcn3990_ce_regs;
-extern struct ath10k_hw_ce_regs qcax_ce_regs;
+extern const struct ath10k_hw_ce_regs wcn3990_ce_regs;
+extern const struct ath10k_hw_ce_regs qcax_ce_regs;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
@@ -550,6 +550,9 @@ struct ath10k_hw_params {
*/
int vht160_mcs_rx_highest;
int vht160_mcs_tx_highest;
+
+ /* Number of ciphers supported (i.e First N) in cipher_suites array */
+ int n_cipher_suites;
};
struct htt_rx_desc;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 5683f1a5330e..0a947eef348d 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -242,6 +242,16 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
case WLAN_CIPHER_SUITE_WEP104:
arg.key_cipher = WMI_CIPHER_WEP;
break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_AES_CMAC:
WARN_ON(1);
return -EINVAL;
@@ -5575,6 +5585,59 @@ static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
ar->hw_params.hw_ops->set_coverage_class(ar, value);
}
+struct ath10k_mac_tdls_iter_data {
+ u32 num_tdls_stations;
+ struct ieee80211_vif *curr_vif;
+};
+
+static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_mac_tdls_iter_data *iter_data = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+ if (sta->tdls && sta_vif == iter_data->curr_vif)
+ iter_data->num_tdls_stations++;
+}
+
+static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_tdls_iter_data data = {};
+
+ data.curr_vif = vif;
+
+ ieee80211_iterate_stations_atomic(hw,
+ ath10k_mac_tdls_vif_stations_count_iter,
+ &data);
+ return data.num_tdls_stations;
+}
+
+static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int *num_tdls_vifs = data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
+ (*num_tdls_vifs)++;
+}
+
+static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
+{
+ int num_tdls_vifs = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_tdls_vifs_count_iter,
+ &num_tdls_vifs);
+ return num_tdls_vifs;
+}
+
static int ath10k_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
@@ -5588,6 +5651,11 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH10K_SCAN_IDLE:
@@ -5723,7 +5791,10 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
u32 flags2;
/* this one needs to be done in software */
- if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
return 1;
if (arvif->nohwcrypt)
@@ -6000,59 +6071,6 @@ static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
ar->num_stations--;
}
-struct ath10k_mac_tdls_iter_data {
- u32 num_tdls_stations;
- struct ieee80211_vif *curr_vif;
-};
-
-static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
- struct ieee80211_sta *sta)
-{
- struct ath10k_mac_tdls_iter_data *iter_data = data;
- struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
- struct ieee80211_vif *sta_vif = arsta->arvif->vif;
-
- if (sta->tdls && sta_vif == iter_data->curr_vif)
- iter_data->num_tdls_stations++;
-}
-
-static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct ath10k_mac_tdls_iter_data data = {};
-
- data.curr_vif = vif;
-
- ieee80211_iterate_stations_atomic(hw,
- ath10k_mac_tdls_vif_stations_count_iter,
- &data);
- return data.num_tdls_stations;
-}
-
-static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct ath10k_vif *arvif = (void *)vif->drv_priv;
- int *num_tdls_vifs = data;
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
-
- if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
- (*num_tdls_vifs)++;
-}
-
-static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
-{
- int num_tdls_vifs = 0;
-
- ieee80211_iterate_active_interfaces_atomic(hw,
- IEEE80211_IFACE_ITER_NORMAL,
- ath10k_mac_tdls_vifs_count_iter,
- &num_tdls_vifs);
- return num_tdls_vifs;
-}
-
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -6477,6 +6495,11 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
spin_lock_bh(&ar->data_lock);
switch (ar->scan.state) {
case ATH10K_SCAN_IDLE:
@@ -8074,7 +8097,22 @@ int ath10k_mac_register(struct ath10k *ar)
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
+
+ /* Do not add hardware supported ciphers before this line.
+ * Allow software encryption for all chips. Don't forget to
+ * update n_cipher_suites below.
+ */
WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+
+ /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256
+ * and CCMP-256 in hardware.
+ */
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
};
struct ieee80211_supported_band *band;
void *channels;
@@ -8146,8 +8184,13 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_P2P_GO);
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
- ieee80211_hw_set(ar->hw, SUPPORTS_PS);
- ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_PS,
+ ar->running_fw->fw_file.fw_features)) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ }
+
ieee80211_hw_set(ar->hw, MFP_CAPABLE);
ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
@@ -8313,7 +8356,18 @@ int ath10k_mac_register(struct ath10k *ar)
}
ar->hw->wiphy->cipher_suites = cipher_suites;
- ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128
+ * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported
+ * from chip specific hw_param table.
+ */
+ if (!ar->hw_params.n_cipher_suites ||
+ ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) {
+ ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n",
+ ar->hw_params.n_cipher_suites);
+ ar->hw_params.n_cipher_suites = 8;
+ }
+ ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites;
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 195dafb98131..ffea348b2190 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -585,10 +585,10 @@ skip:
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
-static void ath10k_pci_ps_timer(unsigned long ptr)
+static void ath10k_pci_ps_timer(struct timer_list *t)
{
- struct ath10k *ar = (void *)ptr;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
+ struct ath10k *ar = ar_pci->ar;
unsigned long flags;
spin_lock_irqsave(&ar_pci->ps_lock, flags);
@@ -838,9 +838,10 @@ void ath10k_pci_rx_post(struct ath10k *ar)
ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
}
-void ath10k_pci_rx_replenish_retry(unsigned long ptr)
+void ath10k_pci_rx_replenish_retry(struct timer_list *t)
{
- struct ath10k *ar = (void *)ptr;
+ struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
+ struct ath10k *ar = ar_pci->ar;
ath10k_pci_rx_post(ar);
}
@@ -2577,10 +2578,14 @@ void ath10k_pci_hif_power_down(struct ath10k *ar)
*/
}
-#ifdef CONFIG_PM
-
static int ath10k_pci_hif_suspend(struct ath10k *ar)
{
+ /* Nothing to do; the important stuff is in the driver suspend. */
+ return 0;
+}
+
+static int ath10k_pci_suspend(struct ath10k *ar)
+{
/* The grace timer can still be counting down and ar->ps_awake be true.
* It is known that the device may be asleep after resuming regardless
* of the SoC powersave state before suspending. Hence make sure the
@@ -2593,6 +2598,12 @@ static int ath10k_pci_hif_suspend(struct ath10k *ar)
static int ath10k_pci_hif_resume(struct ath10k *ar)
{
+ /* Nothing to do; the important stuff is in the driver resume. */
+ return 0;
+}
+
+static int ath10k_pci_resume(struct ath10k *ar)
+{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct pci_dev *pdev = ar_pci->pdev;
u32 val;
@@ -2615,7 +2626,6 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
return ret;
}
-#endif
static bool ath10k_pci_validate_cal(void *data, size_t size)
{
@@ -2770,10 +2780,8 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.power_down = ath10k_pci_hif_power_down,
.read32 = ath10k_pci_read32,
.write32 = ath10k_pci_write32,
-#ifdef CONFIG_PM
.suspend = ath10k_pci_hif_suspend,
.resume = ath10k_pci_hif_resume,
-#endif
.fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
};
@@ -3157,8 +3165,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
spin_lock_init(&ce->ce_lock);
spin_lock_init(&ar_pci->ps_lock);
- setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
- (unsigned long)ar);
+ timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
ath10k_pci_override_ce_config(ar);
@@ -3284,8 +3291,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar->id.subsystem_vendor = pdev->subsystem_vendor;
ar->id.subsystem_device = pdev->subsystem_device;
- setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
- (unsigned long)ar);
+ timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
ret = ath10k_pci_setup_resource(ar);
if (ret) {
@@ -3401,11 +3407,7 @@ static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
struct ath10k *ar = dev_get_drvdata(dev);
int ret;
- if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
- ar->running_fw->fw_file.fw_features))
- return 0;
-
- ret = ath10k_hif_suspend(ar);
+ ret = ath10k_pci_suspend(ar);
if (ret)
ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
@@ -3417,11 +3419,7 @@ static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
struct ath10k *ar = dev_get_drvdata(dev);
int ret;
- if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
- ar->running_fw->fw_file.fw_features))
- return 0;
-
- ret = ath10k_hif_resume(ar);
+ ret = ath10k_pci_resume(ar);
if (ret)
ath10k_warn(ar, "failed to resume hif: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 424ff323b2dc..08704fbc11e3 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -278,7 +278,7 @@ void ath10k_pci_hif_power_down(struct ath10k *ar);
int ath10k_pci_alloc_pipes(struct ath10k *ar);
void ath10k_pci_free_pipes(struct ath10k *ar);
void ath10k_pci_free_pipes(struct ath10k *ar);
-void ath10k_pci_rx_replenish_retry(unsigned long ptr);
+void ath10k_pci_rx_replenish_retry(struct timer_list *t);
void ath10k_pci_ce_deinit(struct ath10k *ar);
void ath10k_pci_init_napi(struct ath10k *ar);
int ath10k_pci_init_pipes(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index c1022a1cf855..28da14398951 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -239,6 +239,9 @@ enum htt_rx_mpdu_encrypt_type {
HTT_RX_MPDU_ENCRYPT_WAPI = 5,
HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
HTT_RX_MPDU_ENCRYPT_NONE = 7,
+ HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10,
};
#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index dd9cc0939ea8..2048b1e5262b 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -406,7 +406,7 @@ static ssize_t write_file_spectral_count(struct file *file,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- if (val < 0 || val > 255)
+ if (val > 255)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 38a97086708b..cad2e42dcef6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -7870,7 +7870,8 @@ ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
if (!skb)
return ERR_PTR(-ENOMEM);
- if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map))
+ if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
+ state == WMI_TDLS_ENABLE_ACTIVE)
state = WMI_TDLS_ENABLE_PASSIVE;
if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 7a3606dde227..c02b21cff38d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -4751,6 +4751,7 @@ struct wmi_key_seq_counter {
#define WMI_CIPHER_WAPI 0x5
#define WMI_CIPHER_CKIP 0x6
#define WMI_CIPHER_AES_CMAC 0x7
+#define WMI_CIPHER_AES_GCM 0x8
struct wmi_vdev_install_key_cmd {
__le32 vdev_id;
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 1b3a34f7f224..a8724eee21f8 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ath5k-y += caps.o
ath5k-y += initvals.o
ath5k-y += eeprom.o
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index bd8d4392d68b..80f75139495f 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
tx_status = &desc->ud.ds_tx5212.tx_stat;
- txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
+ txstat1 = READ_ONCE(tx_status->tx_status_1);
/* No frame has been send or error */
if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
return -EINPROGRESS;
- txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
+ txstat0 = READ_ONCE(tx_status->tx_status_0);
/*
* Get descriptor status
@@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
u32 rxstat0, rxstat1;
rx_status = &desc->ud.ds_rx.rx_stat;
- rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
+ rxstat1 = READ_ONCE(rx_status->rx_status_1);
/* No frame received / not ready */
if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
- rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
+ rxstat0 = READ_ONCE(rx_status->rx_status_0);
/*
* Frame receive status
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 04cf0ca72610..25978c732fe1 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
index c6eef519bb61..a41e3bf42dfc 100644
--- a/drivers/net/wireless/ath/ath5k/trace.h
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
#define __TRACE_ATH5K_H
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 414b5b596efc..b53eb2b85f02 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3589,10 +3589,8 @@ static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
return -ENOMEM;
}
- setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
- (unsigned long) vif->ndev);
- setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer,
- (unsigned long) vif);
+ timer_setup(&vif->disconnect_timer, disconnect_timer_handler, 0);
+ timer_setup(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer, 0);
set_bit(WMM_ENABLED, &vif->flags);
spin_lock_init(&vif->if_lock);
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 87e99c12d4ba..e23d450babd2 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -893,7 +893,7 @@ static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
int ath6kl_configure_target(struct ath6kl *ar);
void ath6kl_detect_error(unsigned long ptr);
-void disconnect_timer_handler(unsigned long ptr);
+void disconnect_timer_handler(struct timer_list *t);
void init_netdev(struct net_device *dev);
void ath6kl_cookie_init(struct ath6kl *ar);
void ath6kl_cookie_cleanup(struct ath6kl *ar);
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index b90c77ef792e..db95f85751e3 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -494,10 +494,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
netif_wake_queue(vif->ndev);
}
-void disconnect_timer_handler(unsigned long ptr)
+void disconnect_timer_handler(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)ptr;
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif = from_timer(vif, t, disconnect_timer);
ath6kl_init_profile_info(vif);
ath6kl_disconnect(vif);
diff --git a/drivers/net/wireless/ath/ath6kl/recovery.c b/drivers/net/wireless/ath/ath6kl/recovery.c
index 3a8d5e97dc8e..c09e40c9010f 100644
--- a/drivers/net/wireless/ath/ath6kl/recovery.c
+++ b/drivers/net/wireless/ath/ath6kl/recovery.c
@@ -60,9 +60,9 @@ void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie)
ar->fw_recovery.hb_pending = false;
}
-static void ath6kl_recovery_hb_timer(unsigned long data)
+static void ath6kl_recovery_hb_timer(struct timer_list *t)
{
- struct ath6kl *ar = (struct ath6kl *) data;
+ struct ath6kl *ar = from_timer(ar, t, fw_recovery.hb_timer);
int err;
if (test_bit(RECOVERY_CLEANUP, &ar->flag) ||
@@ -104,9 +104,8 @@ void ath6kl_recovery_init(struct ath6kl *ar)
recovery->seq_num = 0;
recovery->hb_misscnt = 0;
ar->fw_recovery.hb_pending = false;
- ar->fw_recovery.hb_timer.function = ath6kl_recovery_hb_timer;
- ar->fw_recovery.hb_timer.data = (unsigned long) ar;
- init_timer_deferrable(&ar->fw_recovery.hb_timer);
+ timer_setup(&ar->fw_recovery.hb_timer, ath6kl_recovery_hb_timer,
+ TIMER_DEFERRABLE);
if (ar->fw_recovery.hb_poll)
mod_timer(&ar->fw_recovery.hb_timer, jiffies +
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
index 1a1ea7881b4d..91e735cfdef7 100644
--- a/drivers/net/wireless/ath/ath6kl/trace.h
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#include <net/cfg80211.h>
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index e6b2517e6334..1379906bf849 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1620,10 +1620,10 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
-static void aggr_timeout(unsigned long arg)
+static void aggr_timeout(struct timer_list *t)
{
u8 i, j;
- struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
+ struct aggr_info_conn *aggr_conn = from_timer(aggr_conn, t, timer);
struct rxtid *rxtid;
struct rxtid_stats *stats;
@@ -1753,9 +1753,7 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
aggr_conn->dev = vif->ndev;
- init_timer(&aggr_conn->timer);
- aggr_conn->timer.function = aggr_timeout;
- aggr_conn->timer.data = (unsigned long) aggr_conn;
+ timer_setup(&aggr_conn->timer, aggr_timeout, 0);
aggr_conn->aggr_info = aggr_info;
aggr_conn->timer_scheduled = false;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index bfc20b45b806..777acc564ac9 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1078,9 +1078,9 @@ static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
return 0;
}
-void ath6kl_wmi_sscan_timer(unsigned long ptr)
+void ath6kl_wmi_sscan_timer(struct timer_list *t)
{
- struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr;
+ struct ath6kl_vif *vif = from_timer(vif, t, sched_scan_timer);
cfg80211_sched_scan_results(vif->ar->wiphy, 0);
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 3af464a73b58..a60bb49fe920 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -2719,7 +2719,7 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout);
-void ath6kl_wmi_sscan_timer(unsigned long ptr);
+void ath6kl_wmi_sscan_timer(struct timer_list *t);
int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 36a40ffdce15..d804ce7391a0 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ath9k-y += beacon.o \
gpio.o \
init.o \
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 3dbfd86ebe36..c2e210c0a770 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -15,6 +15,7 @@
*/
#include <asm/unaligned.h>
+#include <linux/kernel.h>
#include "hw.h"
#include "ar9003_phy.h"
#include "ar9003_eeprom.h"
@@ -2946,14 +2947,12 @@ static const struct ar9300_eeprom *ar9300_eep_templates[] = {
static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id)
{
-#define N_LOOP (sizeof(ar9300_eep_templates) / sizeof(ar9300_eep_templates[0]))
int it;
- for (it = 0; it < N_LOOP; it++)
+ for (it = 0; it < ARRAY_SIZE(ar9300_eep_templates); it++)
if (ar9300_eep_templates[it]->templateVersion == id)
return ar9300_eep_templates[it];
return NULL;
-#undef N_LOOP
}
static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index b3f20b3c0210..e1fe7a7c3ad8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -480,7 +480,7 @@ EXPORT_SYMBOL(ath9k_hw_addrxbuf_edma);
int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
void *buf_addr)
{
- struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
+ struct ar9003_rxs *rxsp = buf_addr;
unsigned int phyerr;
if ((rxsp->status11 & AR_RxDone) == 0)
@@ -610,7 +610,7 @@ void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
ah->ts_paddr_start = ts_paddr_start;
ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
ah->ts_size = size;
- ah->ts_ring = (struct ar9003_txs *) ts_start;
+ ah->ts_ring = ts_start;
ath9k_hw_reset_txstatus_ring(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index cf076719c27e..ef0de4f1312c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -750,14 +750,14 @@ void ath_reset_work(struct work_struct *work);
bool ath_hw_check(struct ath_softc *sc);
void ath_hw_pll_work(struct work_struct *work);
void ath_paprd_calibrate(struct work_struct *work);
-void ath_ani_calibrate(unsigned long data);
+void ath_ani_calibrate(struct timer_list *t);
void ath_start_ani(struct ath_softc *sc);
void ath_stop_ani(struct ath_softc *sc);
void ath_check_ani(struct ath_softc *sc);
int ath_update_survey_stats(struct ath_softc *sc);
void ath_update_survey_nf(struct ath_softc *sc, int channel);
void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
-void ath_ps_full_sleep(unsigned long data);
+void ath_ps_full_sleep(struct timer_list *t);
void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
bool sw_pending, bool timeout_override);
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index f0439f2d566b..dfb26f03c1a2 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -29,6 +29,7 @@ static int ath_set_channel(struct ath_softc *sc)
struct cfg80211_chan_def *chandef = &sc->cur_chan->chandef;
struct ieee80211_channel *chan = chandef->chan;
int pos = chan->hw_value;
+ unsigned long flags;
int old_pos = -1;
int r;
@@ -42,9 +43,9 @@ static int ath_set_channel(struct ath_softc *sc)
chan->center_freq, chandef->width);
/* update survey stats for the old channel before switching */
- spin_lock_bh(&common->cc_lock);
+ spin_lock_irqsave(&common->cc_lock, flags);
ath_update_survey_stats(sc);
- spin_unlock_bh(&common->cc_lock);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
ath9k_cmn_get_channel(hw, ah, chandef);
@@ -1042,9 +1043,9 @@ static void ath_scan_channel_start(struct ath_softc *sc)
mod_timer(&sc->offchannel.timer, jiffies + sc->offchannel.duration);
}
-static void ath_chanctx_timer(unsigned long data)
+static void ath_chanctx_timer(struct timer_list *t)
{
- struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_softc *sc = from_timer(sc, t, sched.timer);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
ath_dbg(common, CHAN_CTX,
@@ -1053,9 +1054,9 @@ static void ath_chanctx_timer(unsigned long data)
ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER);
}
-static void ath_offchannel_timer(unsigned long data)
+static void ath_offchannel_timer(struct timer_list *t)
{
- struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_softc *sc = from_timer(sc, t, offchannel.timer);
struct ath_chanctx *ctx;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -1361,10 +1362,8 @@ void ath9k_init_channel_context(struct ath_softc *sc)
{
INIT_WORK(&sc->chanctx_work, ath_chanctx_work);
- setup_timer(&sc->offchannel.timer, ath_offchannel_timer,
- (unsigned long)sc);
- setup_timer(&sc->sched.timer, ath_chanctx_timer,
- (unsigned long)sc);
+ timer_setup(&sc->offchannel.timer, ath_offchannel_timer, 0);
+ timer_setup(&sc->sched.timer, ath_chanctx_timer, 0);
init_completion(&sc->go_beacon);
}
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 01fa30117288..9e8aed5c478c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -916,7 +916,7 @@ static int open_file_regdump(struct inode *inode, struct file *file)
u8 *buf;
int i, j = 0;
unsigned long num_regs, regdump_len, max_reg_offset;
- const struct reg_hole {
+ static const struct reg_hole {
u32 start;
u32 end;
} reg_hole_list[] = {
@@ -1167,7 +1167,7 @@ static ssize_t write_file_tpc(struct file *file, const char __user *user_buf,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- if (val < 0 || val > 1)
+ if (val > 1)
return -EINVAL;
tpc_enabled = !!val;
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 1ece42c2443d..40a397fd0e0e 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -326,7 +326,7 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
if (ard.ext_rssi & 0x80)
ard.ext_rssi = 0;
- vdata_end = (char *)data + datalen;
+ vdata_end = data + datalen;
ard.pulse_bw_info = vdata_end[-1];
ard.pulse_length_ext = vdata_end[-2];
ard.pulse_length_pri = vdata_end[-3];
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index ddb28861e7fe..b457e52dd365 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -191,9 +191,9 @@ static void ath_mci_ftp_adjust(struct ath_softc *sc)
* 45ms, bt traffic will be given priority during 55% of this
* period while wlan gets remaining 45%
*/
-static void ath_btcoex_period_timer(unsigned long data)
+static void ath_btcoex_period_timer(struct timer_list *t)
{
- struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_softc *sc = from_timer(sc, t, btcoex.period_timer);
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
enum ath_stomp_type stomp_type;
@@ -252,9 +252,9 @@ skip_hw_wakeup:
* Generic tsf based hw timer which configures weight
* registers to time slice between wlan and bt traffic
*/
-static void ath_btcoex_no_stomp_timer(unsigned long arg)
+static void ath_btcoex_no_stomp_timer(struct timer_list *t)
{
- struct ath_softc *sc = (struct ath_softc *)arg;
+ struct ath_softc *sc = from_timer(sc, t, btcoex.no_stomp_timer);
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
@@ -284,10 +284,8 @@ static void ath_init_btcoex_timer(struct ath_softc *sc)
btcoex->btcoex_period / 100;
btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
- (unsigned long) sc);
- setup_timer(&btcoex->no_stomp_timer, ath_btcoex_no_stomp_timer,
- (unsigned long) sc);
+ timer_setup(&btcoex->period_timer, ath_btcoex_period_timer, 0);
+ timer_setup(&btcoex->no_stomp_timer, ath_btcoex_no_stomp_timer, 0);
spin_lock_init(&btcoex->btcoex_lock);
}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index c5f4dd808745..56676eaff24c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -424,7 +424,7 @@ static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb)
static void hif_usb_start(void *hif_handle)
{
- struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ struct hif_device_usb *hif_dev = hif_handle;
unsigned long flags;
hif_dev->flags |= HIF_USB_START;
@@ -436,7 +436,7 @@ static void hif_usb_start(void *hif_handle)
static void hif_usb_stop(void *hif_handle)
{
- struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ struct hif_device_usb *hif_dev = hif_handle;
struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
unsigned long flags;
@@ -457,7 +457,7 @@ static void hif_usb_stop(void *hif_handle)
static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb)
{
- struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ struct hif_device_usb *hif_dev = hif_handle;
int ret = 0;
switch (pipe_id) {
@@ -492,7 +492,7 @@ static inline bool check_index(struct sk_buff *skb, u8 idx)
static void hif_usb_sta_drain(void *hif_handle, u8 idx)
{
- struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
+ struct hif_device_usb *hif_dev = hif_handle;
struct sk_buff *skb, *tmp;
unsigned long flags;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 16dff4b89a86..9f64e32381f9 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -584,7 +584,7 @@ void ath9k_htc_tx_clear_slot(struct ath9k_htc_priv *priv, int slot);
void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv);
void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event);
void ath9k_tx_failed_tasklet(unsigned long data);
-void ath9k_htc_tx_cleanup_timer(unsigned long data);
+void ath9k_htc_tx_cleanup_timer(struct timer_list *t);
bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv);
int ath9k_rx_init(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 2c0e4d26e8f9..f20c839aeda2 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -384,7 +384,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
- bool *beacon_configured = (bool *)data;
+ bool *beacon_configured = data;
struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
if (vif->type == NL80211_IFTYPE_STATION &&
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index da2164b0cccc..e89e5ef2c2a4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -233,7 +233,7 @@ static void ath9k_reg_notifier(struct wiphy *wiphy,
static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
__be32 val, reg = cpu_to_be32(reg_offset);
@@ -255,7 +255,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
static void ath9k_multi_regread(void *hw_priv, u32 *addr,
u32 *val, u16 count)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
__be32 tmpaddr[8];
@@ -301,7 +301,7 @@ static void ath9k_regwrite_multi(struct ath_common *common)
static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
const __be32 buf[2] = {
@@ -322,7 +322,7 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
@@ -345,7 +345,7 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
@@ -357,7 +357,7 @@ static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
static void ath9k_enable_regwrite_buffer(void *hw_priv)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
@@ -366,7 +366,7 @@ static void ath9k_enable_regwrite_buffer(void *hw_priv)
static void ath9k_regwrite_flush(void *hw_priv)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
@@ -383,7 +383,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
static void ath9k_reg_rmw_buffer(void *hw_priv,
u32 reg_offset, u32 set, u32 clr)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
u32 rsp_status;
@@ -421,7 +421,7 @@ static void ath9k_reg_rmw_buffer(void *hw_priv,
static void ath9k_reg_rmw_flush(void *hw_priv)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
u32 rsp_status;
@@ -453,7 +453,7 @@ static void ath9k_reg_rmw_flush(void *hw_priv)
static void ath9k_enable_rmw_buffer(void *hw_priv)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
@@ -466,7 +466,7 @@ static void ath9k_enable_rmw_buffer(void *hw_priv)
static u32 ath9k_reg_rmw_single(void *hw_priv,
u32 reg_offset, u32 set, u32 clr)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
struct register_rmw buf, buf_ret;
@@ -490,7 +490,7 @@ static u32 ath9k_reg_rmw_single(void *hw_priv,
static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
@@ -654,8 +654,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
INIT_WORK(&priv->ps_work, ath9k_ps_work);
INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
- setup_timer(&priv->tx.cleanup_timer, ath9k_htc_tx_cleanup_timer,
- (unsigned long)priv);
+ timer_setup(&priv->tx.cleanup_timer, ath9k_htc_tx_cleanup_timer, 0);
/*
* Cache line size is used to size and align various
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index a553c91d41a1..f808e5833d7e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1483,7 +1483,7 @@ static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv)
static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_priv *priv = data;
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index b38a586ea59a..585736a837ed 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -641,7 +641,7 @@ static struct sk_buff* ath9k_htc_tx_get_packet(struct ath9k_htc_priv *priv,
void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
{
- struct wmi_event_txstatus *txs = (struct wmi_event_txstatus *)wmi_event;
+ struct wmi_event_txstatus *txs = wmi_event;
struct __wmi_event_txstatus *__txs;
struct sk_buff *skb;
struct ath9k_htc_tx_event *tx_pend;
@@ -684,7 +684,7 @@ void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event)
void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
enum htc_endpoint_id ep_id, bool txok)
{
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
+ struct ath9k_htc_priv *priv = drv_priv;
struct ath9k_htc_tx_ctl *tx_ctl;
struct sk_buff_head *epid_queue;
@@ -752,9 +752,9 @@ static void ath9k_htc_tx_cleanup_queue(struct ath9k_htc_priv *priv,
}
}
-void ath9k_htc_tx_cleanup_timer(unsigned long data)
+void ath9k_htc_tx_cleanup_timer(struct timer_list *t)
{
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) data;
+ struct ath9k_htc_priv *priv = from_timer(priv, t, tx.cleanup_timer);
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_htc_tx_event *event, *tmp;
struct sk_buff *skb;
@@ -1103,7 +1103,7 @@ requeue:
void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
enum htc_endpoint_id ep_id)
{
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
+ struct ath9k_htc_priv *priv = drv_priv;
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index bb7936090b91..fa58a32227f5 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -117,7 +117,7 @@ static const struct ath_ps_ops ath9k_ps_ops = {
static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
@@ -132,7 +132,7 @@ static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
u32 val;
@@ -172,7 +172,7 @@ static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_hw *ah = hw_priv;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
unsigned long uninitialized_var(flags);
@@ -275,7 +275,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
if (!dd->dd_desc)
return -ENOMEM;
- ds = (u8 *) dd->dd_desc;
+ ds = dd->dd_desc;
ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -369,7 +369,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int i = 0;
- setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+ timer_setup(&common->ani.timer, ath_ani_calibrate, 0);
common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
@@ -678,7 +678,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
(unsigned long)sc);
- setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc);
+ timer_setup(&sc->sleep_timer, ath_ps_full_sleep, 0);
INIT_WORK(&sc->hw_reset_work, ath_reset_work);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 27c50562dc47..9d84003db800 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -301,11 +301,11 @@ fail_paprd:
* When the task is complete, it reschedules itself depending on the
* appropriate interval that was calculated.
*/
-void ath_ani_calibrate(unsigned long data)
+void ath_ani_calibrate(struct timer_list *t)
{
- struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_common *common = from_timer(common, t, ani.timer);
+ struct ath_softc *sc = (struct ath_softc *)common->priv;
struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
bool longcal = false;
bool shortcal = false;
bool aniflag = false;
@@ -367,10 +367,10 @@ void ath_ani_calibrate(unsigned long data)
/* Call ANI routine if necessary */
if (aniflag) {
- spin_lock(&common->cc_lock);
+ spin_lock_irqsave(&common->cc_lock, flags);
ath9k_hw_ani_monitor(ah, ah->curchan);
ath_update_survey_stats(sc);
- spin_unlock(&common->cc_lock);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
}
/* Perform calibration if necessary */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 8b4ac7f0a09b..a3be8add56e1 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -93,15 +93,16 @@ static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
return ret;
}
-void ath_ps_full_sleep(unsigned long data)
+void ath_ps_full_sleep(struct timer_list *t)
{
- struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_softc *sc = from_timer(sc, t, sleep_timer);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long flags;
bool reset;
- spin_lock(&common->cc_lock);
+ spin_lock_irqsave(&common->cc_lock, flags);
ath_hw_cycle_counters_update(common);
- spin_unlock(&common->cc_lock);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
ath9k_hw_setrxabort(sc->sc_ah, 1);
ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
@@ -394,10 +395,10 @@ void ath9k_tasklet(unsigned long data)
if ((ah->config.hw_hang_checks & HW_BB_WATCHDOG) &&
(status & ATH9K_INT_BB_WATCHDOG)) {
- spin_lock(&common->cc_lock);
+ spin_lock_irqsave(&common->cc_lock, flags);
ath_hw_cycle_counters_update(common);
ar9003_hw_bb_watchdog_dbg_info(ah);
- spin_unlock(&common->cc_lock);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
if (ar9003_hw_bb_watchdog_check(ah)) {
type = RESET_TYPE_BB_WATCHDOG;
@@ -1193,7 +1194,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
static void ath9k_tpc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
- int *power = (int *)data;
+ int *power = data;
if (*power < vif->bss_conf.txpower)
*power = vif->bss_conf.txpower;
@@ -1955,12 +1956,13 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
+ unsigned long flags;
int pos;
if (IS_ENABLED(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
- spin_lock_bh(&common->cc_lock);
+ spin_lock_irqsave(&common->cc_lock, flags);
if (idx == 0)
ath_update_survey_stats(sc);
@@ -1974,7 +1976,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
- spin_unlock_bh(&common->cc_lock);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
return -ENOENT;
}
@@ -1982,7 +1984,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
pos = chan->hw_value;
memcpy(survey, &sc->survey[pos], sizeof(*survey));
survey->channel = chan;
- spin_unlock_bh(&common->cc_lock);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index cf23fd815211..39d46c203f6b 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -453,7 +453,7 @@ int ath_mci_setup(struct ath_softc *sc)
mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
- mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
+ mci->gpm_buf.bf_addr = mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index 49ed1afb913c..fe3a8263b224 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -179,6 +179,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
ssize_t len;
int r;
+ if (count < 1)
+ return -EINVAL;
+
if (sc->cur_chan->nvifs > 1)
return -EOPNOTSUPP;
@@ -186,6 +189,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
+ buf[len] = '\0';
+
if (strtobool(buf, &start))
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 64a354fa78ab..b0b5579b7560 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -159,7 +159,7 @@ void ath9k_wmi_event_tasklet(unsigned long data)
switch (cmd_id) {
case WMI_SWBA_EVENTID:
- swba = (struct wmi_event_swba *) wmi_event;
+ swba = wmi_event;
ath9k_htc_swba(priv, swba);
break;
case WMI_FATAL_EVENTID:
@@ -207,7 +207,7 @@ static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
enum htc_endpoint_id epid)
{
- struct wmi *wmi = (struct wmi *) priv;
+ struct wmi *wmi = priv;
struct wmi_cmd_hdr *hdr;
u16 cmd_id;
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index a0410fe8c03a..9a44d004c206 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
#define CARL9170FW_VERSION_YEAR 16
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
index 50c43b4382ba..3b09435104eb 100644
--- a/drivers/net/wireless/ath/wcn36xx/Makefile
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_WCN36XX) := wcn36xx.o
wcn36xx-y += main.o \
dxe.o \
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 35bd50bcbbd5..f7d228b5ba93 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -812,7 +812,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
if (!sta) {
wcn36xx_err("sta %pM is not found\n",
bss_conf->bssid);
- rcu_read_unlock();
goto out;
}
sta_priv = wcn36xx_sta_to_priv(sta);
@@ -1136,7 +1135,8 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
BIT(NL80211_IFTYPE_MESH_POINT);
wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz;
- wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
+ if (wcn->rf_id != RF_IRIS_WCN3620)
+ wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS;
wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN;
@@ -1169,6 +1169,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
struct platform_device *pdev)
{
struct device_node *mmio_node;
+ struct device_node *iris_node;
struct resource *res;
int index;
int ret;
@@ -1231,6 +1232,14 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
goto unmap_ccu;
}
+ /* External RF module */
+ iris_node = of_get_child_by_name(mmio_node, "iris");
+ if (iris_node) {
+ if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
+ wcn->rf_id = RF_IRIS_WCN3620;
+ of_node_put(iris_node);
+ }
+
of_node_put(mmio_node);
return 0;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 6aefba4c0cda..81017e6703b4 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -94,6 +94,9 @@ enum wcn36xx_ampdu_state {
#define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags)
#define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power)
+#define RF_UNKNOWN 0x0000
+#define RF_IRIS_WCN3620 0x3620
+
static inline void buff_to_be(u32 *buf, size_t len)
{
int i;
@@ -241,6 +244,9 @@ struct wcn36xx {
struct sk_buff *tx_ack_skb;
+ /* RF module */
+ unsigned rf_id;
+
#ifdef CONFIG_WCN36XX_DEBUGFS
/* Debug file system entry */
struct wcn36xx_dfs_entry dfs;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index d27efe83748b..398edd2a7f2b 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_WIL6210) += wil6210.o
wil6210-y := main.o
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 6db00c167d2e..e58dc6dc1f9c 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1048,50 +1048,6 @@ static const struct file_operations fops_bf = {
.llseek = seq_lseek,
};
-/*---------SSID------------*/
-static ssize_t wil_read_file_ssid(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct wil6210_priv *wil = file->private_data;
- struct wireless_dev *wdev = wil_to_wdev(wil);
-
- return simple_read_from_buffer(user_buf, count, ppos,
- wdev->ssid, wdev->ssid_len);
-}
-
-static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct wil6210_priv *wil = file->private_data;
- struct wireless_dev *wdev = wil_to_wdev(wil);
- struct net_device *ndev = wil_to_ndev(wil);
-
- if (*ppos != 0) {
- wil_err(wil, "Unable to set SSID substring from [%d]\n",
- (int)*ppos);
- return -EINVAL;
- }
-
- if (count > sizeof(wdev->ssid)) {
- wil_err(wil, "SSID too long, len = %d\n", (int)count);
- return -EINVAL;
- }
- if (netif_running(ndev)) {
- wil_err(wil, "Unable to change SSID on running interface\n");
- return -EINVAL;
- }
-
- wdev->ssid_len = count;
- return simple_write_to_buffer(wdev->ssid, wdev->ssid_len, ppos,
- buf, count);
-}
-
-static const struct file_operations fops_ssid = {
- .read = wil_read_file_ssid,
- .write = wil_write_file_ssid,
- .open = simple_open,
-};
-
/*---------temp------------*/
static void print_temp(struct seq_file *s, const char *prefix, u32 t)
{
@@ -1695,7 +1651,6 @@ static const struct {
{"stations", 0444, &fops_sta},
{"desc", 0444, &fops_txdesc},
{"bf", 0444, &fops_bf},
- {"ssid", 0644, &fops_ssid},
{"mem_val", 0644, &fops_memread},
{"reset", 0244, &fops_reset},
{"rxon", 0244, &fops_rxon},
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index bac829aa950d..885924abf61c 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -336,9 +336,9 @@ static void wil_disconnect_worker(struct work_struct *work)
clear_bit(wil_status_fwconnecting, wil->status);
}
-static void wil_connect_timer_fn(ulong x)
+static void wil_connect_timer_fn(struct timer_list *t)
{
- struct wil6210_priv *wil = (void *)x;
+ struct wil6210_priv *wil = from_timer(wil, t, connect_timer);
bool q;
wil_err(wil, "Connect timeout detected, disconnect station\n");
@@ -351,9 +351,9 @@ static void wil_connect_timer_fn(ulong x)
wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q);
}
-static void wil_scan_timer_fn(ulong x)
+static void wil_scan_timer_fn(struct timer_list *t)
{
- struct wil6210_priv *wil = (void *)x;
+ struct wil6210_priv *wil = from_timer(wil, t, scan_timer);
clear_bit(wil_status_fwready, wil->status);
wil_err(wil, "Scan timeout detected, start fw error recovery\n");
@@ -540,10 +540,9 @@ int wil_priv_init(struct wil6210_priv *wil)
init_completion(&wil->halp.comp);
wil->bcast_vring = -1;
- setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
- setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
- setup_timer(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn,
- (ulong)wil);
+ timer_setup(&wil->connect_timer, wil_connect_timer_fn, 0);
+ timer_setup(&wil->scan_timer, wil_scan_timer_fn, 0);
+ timer_setup(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn, 0);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index 792484756654..7dbee2c3e482 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -65,9 +65,9 @@ bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request)
(request->channels[0]->hw_value == P2P_DMG_SOCIAL_CHANNEL);
}
-void wil_p2p_discovery_timer_fn(ulong x)
+void wil_p2p_discovery_timer_fn(struct timer_list *t)
{
- struct wil6210_priv *wil = (void *)x;
+ struct wil6210_priv *wil = from_timer(wil, t, p2p.discovery_timer);
wil_dbg_misc(wil, "p2p_discovery_timer_fn\n");
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 315ec8b59662..1e340d04bd70 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -918,7 +918,7 @@ void wil6210_mask_halp(struct wil6210_priv *wil);
/* P2P */
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
-void wil_p2p_discovery_timer_fn(ulong x);
+void wil_p2p_discovery_timer_fn(struct timer_list *t);
int wil_p2p_search(struct wil6210_priv *wil,
struct cfg80211_scan_request *request);
int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 94bf01f8b2a8..e99e766a3028 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -518,11 +518,11 @@ exit:
/* LED trigger */
static int tx_activity;
-static void at76_ledtrig_tx_timerfunc(unsigned long data);
-static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc, 0, 0);
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused);
+static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc);
DEFINE_LED_TRIGGER(ledtrig_tx);
-static void at76_ledtrig_tx_timerfunc(unsigned long data)
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused)
{
static int tx_lastactivity;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index b68436b23a63..c9dd5e44c9c6 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -586,7 +586,7 @@ static int atmel_validate_channel(struct atmel_private *priv, int channel);
static void atmel_management_frame(struct atmel_private *priv,
struct ieee80211_hdr *header,
u16 frame_len, u8 rssi);
-static void atmel_management_timer(u_long a);
+static void atmel_management_timer(struct timer_list *t);
static void atmel_send_command(struct atmel_private *priv, int command,
void *cmd, int cmd_size);
static int atmel_send_command_wait(struct atmel_private *priv, int command,
@@ -1579,11 +1579,9 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
priv->default_beacon_period = priv->beacon_period = 100;
priv->listen_interval = 1;
- init_timer(&priv->management_timer);
+ timer_setup(&priv->management_timer, atmel_management_timer, 0);
spin_lock_init(&priv->irqlock);
spin_lock_init(&priv->timerlock);
- priv->management_timer.function = atmel_management_timer;
- priv->management_timer.data = (unsigned long) dev;
dev->netdev_ops = &atmel_netdev_ops;
dev->wireless_handlers = &atmel_handler_def;
@@ -3435,10 +3433,9 @@ static void atmel_management_frame(struct atmel_private *priv,
}
/* run when timer expires */
-static void atmel_management_timer(u_long a)
+static void atmel_management_timer(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *) a;
- struct atmel_private *priv = netdev_priv(dev);
+ struct atmel_private *priv = from_timer(priv, t, management_timer);
unsigned long flags;
/* Check if the card has been yanked. */
diff --git a/drivers/net/wireless/broadcom/b43/Makefile b/drivers/net/wireless/broadcom/b43/Makefile
index 27fab958e3d5..54f92ce49bb8 100644
--- a/drivers/net/wireless/broadcom/b43/Makefile
+++ b/drivers/net/wireless/broadcom/b43/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
b43-y += main.o
b43-y += bus.o
b43-$(CONFIG_B43_PHY_G) += phy_g.o tables.o lo.o wa.o
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index d7d42f0b80c3..b77d1a904f7e 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_H_
#define B43_H_
diff --git a/drivers/net/wireless/broadcom/b43/bus.h b/drivers/net/wireless/broadcom/b43/bus.h
index 256c2c17939a..2a695f9e528b 100644
--- a/drivers/net/wireless/broadcom/b43/bus.h
+++ b/drivers/net/wireless/broadcom/b43/bus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_BUS_H_
#define B43_BUS_H_
diff --git a/drivers/net/wireless/broadcom/b43/debugfs.h b/drivers/net/wireless/broadcom/b43/debugfs.h
index d05377745011..0bf437c86c67 100644
--- a/drivers/net/wireless/broadcom/b43/debugfs.h
+++ b/drivers/net/wireless/broadcom/b43/debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_DEBUGFS_H_
#define B43_DEBUGFS_H_
diff --git a/drivers/net/wireless/broadcom/b43/dma.h b/drivers/net/wireless/broadcom/b43/dma.h
index df8c8cdcbdb5..c2a357219d4b 100644
--- a/drivers/net/wireless/broadcom/b43/dma.h
+++ b/drivers/net/wireless/broadcom/b43/dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_DMA_H_
#define B43_DMA_H_
diff --git a/drivers/net/wireless/broadcom/b43/leds.h b/drivers/net/wireless/broadcom/b43/leds.h
index 32b66d53cdac..5ebd1b2b1749 100644
--- a/drivers/net/wireless/broadcom/b43/leds.h
+++ b/drivers/net/wireless/broadcom/b43/leds.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_LEDS_H_
#define B43_LEDS_H_
diff --git a/drivers/net/wireless/broadcom/b43/lo.h b/drivers/net/wireless/broadcom/b43/lo.h
index 7b4df3883bc2..66e07ab79c51 100644
--- a/drivers/net/wireless/broadcom/b43/lo.h
+++ b/drivers/net/wireless/broadcom/b43/lo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_LO_H_
#define B43_LO_H_
diff --git a/drivers/net/wireless/broadcom/b43/phy_a.h b/drivers/net/wireless/broadcom/b43/phy_a.h
index 0a92d01c21f9..b1cbfc02beb6 100644
--- a/drivers/net/wireless/broadcom/b43/phy_a.h
+++ b/drivers/net/wireless/broadcom/b43/phy_a.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_B43_PHY_A_H_
#define LINUX_B43_PHY_A_H_
diff --git a/drivers/net/wireless/broadcom/b43/phy_ac.h b/drivers/net/wireless/broadcom/b43/phy_ac.h
index d1ca79e0eb24..02eb6c08670d 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ac.h
+++ b/drivers/net/wireless/broadcom/b43/phy_ac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_PHY_AC_H_
#define B43_PHY_AC_H_
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h
index ced054a9850c..57a1ad8afa08 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.h
+++ b/drivers/net/wireless/broadcom/b43/phy_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_B43_PHY_COMMON_H_
#define LINUX_B43_PHY_COMMON_H_
diff --git a/drivers/net/wireless/broadcom/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c
index 822dcaa8ace6..f59c02166462 100644
--- a/drivers/net/wireless/broadcom/b43/phy_g.c
+++ b/drivers/net/wireless/broadcom/b43/phy_g.c
@@ -2297,7 +2297,7 @@ static u8 b43_gphy_aci_detect(struct b43_wldev *dev, u8 channel)
static u8 b43_gphy_aci_scan(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- u8 ret[13];
+ u8 ret[13] = { 0 };
unsigned int channel = phy->channel;
unsigned int i, j, start, end;
diff --git a/drivers/net/wireless/broadcom/b43/phy_g.h b/drivers/net/wireless/broadcom/b43/phy_g.h
index 5413c906a3e7..a27b8603c75f 100644
--- a/drivers/net/wireless/broadcom/b43/phy_g.h
+++ b/drivers/net/wireless/broadcom/b43/phy_g.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_B43_PHY_G_H_
#define LINUX_B43_PHY_G_H_
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index 718c90e81696..c3158d085c2b 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -119,7 +119,7 @@ static void b43_radio_2059_rcal(struct b43_wldev *dev)
/* Calibrate the internal RC oscillator? */
static void b43_radio_2057_rccal(struct b43_wldev *dev)
{
- const u16 radio_values[3][2] = {
+ static const u16 radio_values[3][2] = {
{ 0x61, 0xE9 }, { 0x69, 0xD5 }, { 0x73, 0x99 },
};
int i;
@@ -154,7 +154,7 @@ static void b43_radio_2059_init_pre(struct b43_wldev *dev)
static void b43_radio_2059_init(struct b43_wldev *dev)
{
- const u16 routing[] = { R2059_C1, R2059_C2, R2059_C3 };
+ static const u16 routing[] = { R2059_C1, R2059_C2, R2059_C3 };
int i;
/* Prepare (reset?) radio */
@@ -263,7 +263,7 @@ static void b43_phy_ht_reset_cca(struct b43_wldev *dev)
static void b43_phy_ht_zero_extg(struct b43_wldev *dev)
{
u8 i, j;
- u16 base[] = { 0x40, 0x60, 0x80 };
+ static const u16 base[] = { 0x40, 0x60, 0x80 };
for (i = 0; i < ARRAY_SIZE(base); i++) {
for (j = 0; j < 4; j++)
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.h b/drivers/net/wireless/broadcom/b43/phy_ht.h
index c086f56ce478..046753857493 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.h
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_PHY_HT_H_
#define B43_PHY_HT_H_
diff --git a/drivers/net/wireless/broadcom/b43/phy_lcn.h b/drivers/net/wireless/broadcom/b43/phy_lcn.h
index 6a7092e13fff..0b1cfaf6394c 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lcn.h
+++ b/drivers/net/wireless/broadcom/b43/phy_lcn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_PHY_LCN_H_
#define B43_PHY_LCN_H_
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.h b/drivers/net/wireless/broadcom/b43/phy_lp.h
index 62737f700cbc..50bef2ea6e52 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.h
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_B43_PHY_LP_H_
#define LINUX_B43_PHY_LP_H_
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.h b/drivers/net/wireless/broadcom/b43/phy_n.h
index a6da2c31a99c..b96ded0ef539 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.h
+++ b/drivers/net/wireless/broadcom/b43/phy_n.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_NPHY_H_
#define B43_NPHY_H_
diff --git a/drivers/net/wireless/broadcom/b43/pio.h b/drivers/net/wireless/broadcom/b43/pio.h
index 1e516147424f..ffbfec622f82 100644
--- a/drivers/net/wireless/broadcom/b43/pio.h
+++ b/drivers/net/wireless/broadcom/b43/pio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_PIO_H_
#define B43_PIO_H_
diff --git a/drivers/net/wireless/broadcom/b43/ppr.h b/drivers/net/wireless/broadcom/b43/ppr.h
index 24d7447e9f01..094389f9f477 100644
--- a/drivers/net/wireless/broadcom/b43/ppr.h
+++ b/drivers/net/wireless/broadcom/b43/ppr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_B43_PPR_H_
#define LINUX_B43_PPR_H_
diff --git a/drivers/net/wireless/broadcom/b43/radio_2055.h b/drivers/net/wireless/broadcom/b43/radio_2055.h
index 67f96122f8d8..ad7271cde001 100644
--- a/drivers/net/wireless/broadcom/b43/radio_2055.h
+++ b/drivers/net/wireless/broadcom/b43/radio_2055.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_RADIO_2055_H_
#define B43_RADIO_2055_H_
diff --git a/drivers/net/wireless/broadcom/b43/radio_2056.h b/drivers/net/wireless/broadcom/b43/radio_2056.h
index 5b86673459fa..59297fdce5e3 100644
--- a/drivers/net/wireless/broadcom/b43/radio_2056.h
+++ b/drivers/net/wireless/broadcom/b43/radio_2056.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_RADIO_2056_H_
#define B43_RADIO_2056_H_
diff --git a/drivers/net/wireless/broadcom/b43/radio_2057.h b/drivers/net/wireless/broadcom/b43/radio_2057.h
index 220d080238ff..d7959da77b51 100644
--- a/drivers/net/wireless/broadcom/b43/radio_2057.h
+++ b/drivers/net/wireless/broadcom/b43/radio_2057.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_RADIO_2057_H_
#define B43_RADIO_2057_H_
diff --git a/drivers/net/wireless/broadcom/b43/radio_2059.h b/drivers/net/wireless/broadcom/b43/radio_2059.h
index 9e22fb60588b..32c0025bce9d 100644
--- a/drivers/net/wireless/broadcom/b43/radio_2059.h
+++ b/drivers/net/wireless/broadcom/b43/radio_2059.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_RADIO_2059_H_
#define B43_RADIO_2059_H_
diff --git a/drivers/net/wireless/broadcom/b43/rfkill.h b/drivers/net/wireless/broadcom/b43/rfkill.h
index f046c3ca0519..8682ac5b828a 100644
--- a/drivers/net/wireless/broadcom/b43/rfkill.h
+++ b/drivers/net/wireless/broadcom/b43/rfkill.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_RFKILL_H_
#define B43_RFKILL_H_
diff --git a/drivers/net/wireless/broadcom/b43/sdio.h b/drivers/net/wireless/broadcom/b43/sdio.h
index 1e93926f388f..aa5693b123ad 100644
--- a/drivers/net/wireless/broadcom/b43/sdio.h
+++ b/drivers/net/wireless/broadcom/b43/sdio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_SDIO_H_
#define B43_SDIO_H_
diff --git a/drivers/net/wireless/broadcom/b43/sysfs.h b/drivers/net/wireless/broadcom/b43/sysfs.h
index 12bda9ef1a85..e70e6cff3c53 100644
--- a/drivers/net/wireless/broadcom/b43/sysfs.h
+++ b/drivers/net/wireless/broadcom/b43/sysfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_SYSFS_H_
#define B43_SYSFS_H_
diff --git a/drivers/net/wireless/broadcom/b43/tables.h b/drivers/net/wireless/broadcom/b43/tables.h
index 80e73c7cbac5..3b0777f15f3e 100644
--- a/drivers/net/wireless/broadcom/b43/tables.h
+++ b/drivers/net/wireless/broadcom/b43/tables.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_TABLES_H_
#define B43_TABLES_H_
diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.h b/drivers/net/wireless/broadcom/b43/tables_lpphy.h
index 84f1d265f657..62002098bbda 100644
--- a/drivers/net/wireless/broadcom/b43/tables_lpphy.h
+++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_TABLES_LPPHY_H_
#define B43_TABLES_LPPHY_H_
diff --git a/drivers/net/wireless/broadcom/b43/tables_nphy.h b/drivers/net/wireless/broadcom/b43/tables_nphy.h
index b51f386db02f..3876786d2692 100644
--- a/drivers/net/wireless/broadcom/b43/tables_nphy.h
+++ b/drivers/net/wireless/broadcom/b43/tables_nphy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_TABLES_NPHY_H_
#define B43_TABLES_NPHY_H_
diff --git a/drivers/net/wireless/broadcom/b43/tables_phy_ht.h b/drivers/net/wireless/broadcom/b43/tables_phy_ht.h
index 1b5ef2bc770c..7ed057118ae3 100644
--- a/drivers/net/wireless/broadcom/b43/tables_phy_ht.h
+++ b/drivers/net/wireless/broadcom/b43/tables_phy_ht.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_TABLES_PHY_HT_H_
#define B43_TABLES_PHY_HT_H_
diff --git a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.h b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.h
index caff9db6831f..5ea6c15e851e 100644
--- a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.h
+++ b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_TABLES_PHY_LCN_H_
#define B43_TABLES_PHY_LCN_H_
diff --git a/drivers/net/wireless/broadcom/b43/wa.h b/drivers/net/wireless/broadcom/b43/wa.h
index e163c5e56e81..f3459b99d83b 100644
--- a/drivers/net/wireless/broadcom/b43/wa.h
+++ b/drivers/net/wireless/broadcom/b43/wa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_WA_H_
#define B43_WA_H_
diff --git a/drivers/net/wireless/broadcom/b43/xmit.h b/drivers/net/wireless/broadcom/b43/xmit.h
index ba6115308068..6524a75bb73b 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.h
+++ b/drivers/net/wireless/broadcom/b43/xmit.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43_XMIT_H_
#define B43_XMIT_H_
diff --git a/drivers/net/wireless/broadcom/b43legacy/Makefile b/drivers/net/wireless/broadcom/b43legacy/Makefile
index 227a77e84362..f8b392f09009 100644
--- a/drivers/net/wireless/broadcom/b43legacy/Makefile
+++ b/drivers/net/wireless/broadcom/b43legacy/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# b43legacy core
b43legacy-y += main.o
b43legacy-y += ilt.o
diff --git a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
index 482476fdb1f3..6b0cec467938 100644
--- a/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/broadcom/b43legacy/b43legacy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43legacy_H_
#define B43legacy_H_
diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.h b/drivers/net/wireless/broadcom/b43legacy/debugfs.h
index 9ee32158b947..7a37764406b1 100644
--- a/drivers/net/wireless/broadcom/b43legacy/debugfs.h
+++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43legacy_DEBUGFS_H_
#define B43legacy_DEBUGFS_H_
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.h b/drivers/net/wireless/broadcom/b43legacy/dma.h
index c3282f906bc7..b5c1a51db2a4 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.h
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43legacy_DMA_H_
#define B43legacy_DMA_H_
diff --git a/drivers/net/wireless/broadcom/b43legacy/ilt.h b/drivers/net/wireless/broadcom/b43legacy/ilt.h
index 48bcf37eccb8..ce7a61e2efb1 100644
--- a/drivers/net/wireless/broadcom/b43legacy/ilt.h
+++ b/drivers/net/wireless/broadcom/b43legacy/ilt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43legacy_ILT_H_
#define B43legacy_ILT_H_
diff --git a/drivers/net/wireless/broadcom/b43legacy/leds.h b/drivers/net/wireless/broadcom/b43legacy/leds.h
index 9ff6750dc57f..389ae06a2d10 100644
--- a/drivers/net/wireless/broadcom/b43legacy/leds.h
+++ b/drivers/net/wireless/broadcom/b43legacy/leds.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43legacy_LEDS_H_
#define B43legacy_LEDS_H_
diff --git a/drivers/net/wireless/broadcom/b43legacy/pio.h b/drivers/net/wireless/broadcom/b43legacy/pio.h
index 8e6773ea6e75..1cd1b9ca5e9c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/pio.h
+++ b/drivers/net/wireless/broadcom/b43legacy/pio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43legacy_PIO_H_
#define B43legacy_PIO_H_
diff --git a/drivers/net/wireless/broadcom/b43legacy/radio.c b/drivers/net/wireless/broadcom/b43legacy/radio.c
index 9501420340a9..eab1c9387846 100644
--- a/drivers/net/wireless/broadcom/b43legacy/radio.c
+++ b/drivers/net/wireless/broadcom/b43legacy/radio.c
@@ -280,7 +280,7 @@ u8 b43legacy_radio_aci_detect(struct b43legacy_wldev *dev, u8 channel)
u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev)
{
struct b43legacy_phy *phy = &dev->phy;
- u8 ret[13];
+ u8 ret[13] = { 0 };
unsigned int channel = phy->channel;
unsigned int i;
unsigned int j;
diff --git a/drivers/net/wireless/broadcom/b43legacy/rfkill.h b/drivers/net/wireless/broadcom/b43legacy/rfkill.h
index 75585571c544..7f314eb815d4 100644
--- a/drivers/net/wireless/broadcom/b43legacy/rfkill.h
+++ b/drivers/net/wireless/broadcom/b43legacy/rfkill.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43legacy_RFKILL_H_
#define B43legacy_RFKILL_H_
diff --git a/drivers/net/wireless/broadcom/b43legacy/sysfs.h b/drivers/net/wireless/broadcom/b43legacy/sysfs.h
index 417d509803c7..ea9d783e6796 100644
--- a/drivers/net/wireless/broadcom/b43legacy/sysfs.h
+++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43legacy_SYSFS_H_
#define B43legacy_SYSFS_H_
diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.h b/drivers/net/wireless/broadcom/b43legacy/xmit.h
index 289db00a4a7b..e4ef869f0b8c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/xmit.h
+++ b/drivers/net/wireless/broadcom/b43legacy/xmit.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef B43legacy_XMIT_H_
#define B43legacy_XMIT_H_
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 3559fb5b8fb0..03aae6bc1838 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -280,9 +280,9 @@ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
/**
* brcmf_btcoex_timerfunc() - BT coex timer callback
*/
-static void brcmf_btcoex_timerfunc(ulong data)
+static void brcmf_btcoex_timerfunc(struct timer_list *t)
{
- struct brcmf_btcoex_info *bt_local = (struct brcmf_btcoex_info *)data;
+ struct brcmf_btcoex_info *bt_local = from_timer(bt_local, t, timer);
brcmf_dbg(TRACE, "enter\n");
bt_local->timer_on = false;
@@ -380,7 +380,7 @@ int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg)
/* Set up timer for BT */
btci->timer_on = false;
btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME;
- setup_timer(&btci->timer, brcmf_btcoex_timerfunc, (ulong)btci);
+ timer_setup(&btci->timer, brcmf_btcoex_timerfunc, 0);
btci->cfg = cfg;
btci->saved_regs_part1 = false;
btci->saved_regs_part2 = false;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 163ddc49f951..0b76a615708e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -71,6 +71,7 @@ struct brcmf_bus_dcmd {
* @wowl_config: specify if dongle is configured for wowl when going to suspend
* @get_ramsize: obtain size of device memory.
* @get_memdump: obtain device memory dump in provided buffer.
+ * @get_fwname: obtain firmware name.
*
* This structure provides an abstract interface towards the
* bus specific driver. For control messages to common driver
@@ -87,6 +88,8 @@ struct brcmf_bus_ops {
void (*wowl_config)(struct device *dev, bool enabled);
size_t (*get_ramsize)(struct device *dev);
int (*get_memdump)(struct device *dev, void *data, size_t len);
+ int (*get_fwname)(struct device *dev, uint chip, uint chiprev,
+ unsigned char *fw_name);
};
@@ -224,6 +227,13 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
return bus->ops->get_memdump(bus->dev, data, len);
}
+static inline
+int brcmf_bus_get_fwname(struct brcmf_bus *bus, uint chip, uint chiprev,
+ unsigned char *fw_name)
+{
+ return bus->ops->get_fwname(bus->dev, chip, chiprev, fw_name);
+}
+
/*
* interface functions from common layer
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 4157c90ad973..15fa00d79fc6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -472,47 +472,6 @@ send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key)
return err;
}
-static s32
-brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable)
-{
- s32 err;
- u32 mode;
-
- if (enable)
- mode = BRCMF_ARP_OL_AGENT | BRCMF_ARP_OL_PEER_AUTO_REPLY;
- else
- mode = 0;
-
- /* Try to set and enable ARP offload feature, this may fail, then it */
- /* is simply not supported and err 0 will be returned */
- err = brcmf_fil_iovar_int_set(ifp, "arp_ol", mode);
- if (err) {
- brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
- mode, err);
- err = 0;
- } else {
- err = brcmf_fil_iovar_int_set(ifp, "arpoe", enable);
- if (err) {
- brcmf_dbg(TRACE, "failed to configure (%d) ARP offload err = %d\n",
- enable, err);
- err = 0;
- } else
- brcmf_dbg(TRACE, "successfully configured (%d) ARP offload to 0x%x\n",
- enable, mode);
- }
-
- err = brcmf_fil_iovar_int_set(ifp, "ndoe", enable);
- if (err) {
- brcmf_dbg(TRACE, "failed to configure (%d) ND offload err = %d\n",
- enable, err);
- err = 0;
- } else
- brcmf_dbg(TRACE, "successfully configured (%d) ND offload to 0x%x\n",
- enable, mode);
-
- return err;
-}
-
static void
brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
{
@@ -1084,7 +1043,6 @@ brcmf_do_escan(struct brcmf_if *ifp, struct cfg80211_scan_request *request)
{
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 err;
- u32 passive_scan;
struct brcmf_scan_results *results;
struct escan_info *escan = &cfg->escan_info;
@@ -1092,13 +1050,7 @@ brcmf_do_escan(struct brcmf_if *ifp, struct cfg80211_scan_request *request)
escan->ifp = ifp;
escan->wiphy = cfg->wiphy;
escan->escan_state = WL_ESCAN_STATE_SCANNING;
- passive_scan = cfg->active_scan ? 0 : 1;
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
- passive_scan);
- if (err) {
- brcmf_err("error (%d)\n", err);
- return err;
- }
+
brcmf_scan_config_mpc(ifp, 0);
results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
results->version = 0;
@@ -1112,21 +1064,16 @@ brcmf_do_escan(struct brcmf_if *ifp, struct cfg80211_scan_request *request)
}
static s32
-brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid)
+brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
{
- struct brcmf_if *ifp = vif->ifp;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- struct cfg80211_ssid *ssids;
- u32 passive_scan;
- bool escan_req;
- bool spec_scan;
- s32 err;
- struct brcmf_ssid_le ssid_le;
- u32 SSID_len;
+ struct brcmf_cfg80211_vif *vif;
+ s32 err = 0;
- brcmf_dbg(SCAN, "START ESCAN\n");
+ brcmf_dbg(TRACE, "Enter\n");
+ vif = container_of(request->wdev, struct brcmf_cfg80211_vif, wdev);
+ if (!check_vif_up(vif))
+ return -EIO;
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
@@ -1142,8 +1089,8 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
cfg->scan_status);
return -EAGAIN;
}
- if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) {
- brcmf_err("Connecting: status (%lu)\n", ifp->vif->sme_state);
+ if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state)) {
+ brcmf_err("Connecting: status (%lu)\n", vif->sme_state);
return -EAGAIN;
}
@@ -1151,96 +1098,38 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif;
- escan_req = false;
- if (request) {
- /* scan bss */
- ssids = request->ssids;
- escan_req = true;
- } else {
- /* scan in ibss */
- /* we don't do escan in ibss */
- ssids = this_ssid;
- }
+ brcmf_dbg(SCAN, "START ESCAN\n");
cfg->scan_request = request;
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
- if (escan_req) {
- cfg->escan_info.run = brcmf_run_escan;
- err = brcmf_p2p_scan_prep(wiphy, request, vif);
- if (err)
- goto scan_out;
- err = brcmf_do_escan(vif->ifp, request);
- if (err)
- goto scan_out;
- } else {
- brcmf_dbg(SCAN, "ssid \"%s\", ssid_len (%d)\n",
- ssids->ssid, ssids->ssid_len);
- memset(&ssid_le, 0, sizeof(ssid_le));
- SSID_len = min_t(u8, sizeof(ssid_le.SSID), ssids->ssid_len);
- ssid_le.SSID_len = cpu_to_le32(0);
- spec_scan = false;
- if (SSID_len) {
- memcpy(ssid_le.SSID, ssids->ssid, SSID_len);
- ssid_le.SSID_len = cpu_to_le32(SSID_len);
- spec_scan = true;
- } else
- brcmf_dbg(SCAN, "Broadcast scan\n");
+ cfg->escan_info.run = brcmf_run_escan;
+ err = brcmf_p2p_scan_prep(wiphy, request, vif);
+ if (err)
+ goto scan_out;
- passive_scan = cfg->active_scan ? 0 : 1;
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
- passive_scan);
- if (err) {
- brcmf_err("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
- goto scan_out;
- }
- brcmf_scan_config_mpc(ifp, 0);
- err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, &ssid_le,
- sizeof(ssid_le));
- if (err) {
- if (err == -EBUSY)
- brcmf_dbg(INFO, "BUSY: scan for \"%s\" canceled\n",
- ssid_le.SSID);
- else
- brcmf_err("WLC_SCAN error (%d)\n", err);
+ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG,
+ request->ie, request->ie_len);
+ if (err)
+ goto scan_out;
- brcmf_scan_config_mpc(ifp, 1);
- goto scan_out;
- }
- }
+ err = brcmf_do_escan(vif->ifp, request);
+ if (err)
+ goto scan_out;
/* Arm scan timeout timer */
- mod_timer(&cfg->escan_timeout, jiffies +
- BRCMF_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
+ mod_timer(&cfg->escan_timeout,
+ jiffies + msecs_to_jiffies(BRCMF_ESCAN_TIMER_INTERVAL_MS));
return 0;
scan_out:
+ brcmf_err("scan error (%d)\n", err);
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
cfg->scan_request = NULL;
return err;
}
-static s32
-brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
-{
- struct brcmf_cfg80211_vif *vif;
- s32 err = 0;
-
- brcmf_dbg(TRACE, "Enter\n");
- vif = container_of(request->wdev, struct brcmf_cfg80211_vif, wdev);
- if (!check_vif_up(vif))
- return -EIO;
-
- err = brcmf_cfg80211_escan(wiphy, vif, request, NULL);
-
- if (err)
- brcmf_err("scan error (%d)\n", err);
-
- brcmf_dbg(TRACE, "Exit\n");
- return err;
-}
-
static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold)
{
s32 err = 0;
@@ -3094,10 +2983,10 @@ static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true);
}
-static void brcmf_escan_timeout(unsigned long data)
+static void brcmf_escan_timeout(struct timer_list *t)
{
struct brcmf_cfg80211_info *cfg =
- (struct brcmf_cfg80211_info *)data;
+ from_timer(cfg, t, escan_timeout);
if (cfg->int_escan_map || cfg->scan_request) {
brcmf_err("timer expired\n");
@@ -3261,9 +3150,7 @@ static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
brcmf_cfg80211_escan_handler);
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
/* Init scan_timeout timer */
- init_timer(&cfg->escan_timeout);
- cfg->escan_timeout.data = (unsigned long) cfg;
- cfg->escan_timeout.function = brcmf_escan_timeout;
+ timer_setup(&cfg->escan_timeout, brcmf_escan_timeout, 0);
INIT_WORK(&cfg->escan_timeout_work,
brcmf_cfg80211_escan_timeout_worker);
}
@@ -5877,7 +5764,6 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
cfg->scan_request = NULL;
cfg->pwr_save = true;
- cfg->active_scan = true; /* we do active scan per default */
cfg->dongle_up = false; /* dongle is not up yet */
err = brcmf_init_priv_mem(cfg);
if (err)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 7b2835e5e434..b5b5f0f10b63 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -283,7 +283,6 @@ struct brcmf_cfg80211_wowl {
* @scan_status: scan activity on the dongle.
* @pub: common driver information.
* @channel: current channel.
- * @active_scan: current scan mode.
* @int_escan_map: bucket map for which internal e-scan is done.
* @ibss_starter: indicates this sta is ibss starter.
* @pwr_save: indicate whether dongle to support power save mode.
@@ -316,7 +315,6 @@ struct brcmf_cfg80211_info {
unsigned long scan_status;
struct brcmf_pub *pub;
u32 channel;
- bool active_scan;
u32 int_escan_map;
bool ibss_starter;
bool pwr_save;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 7a2b49587b4d..6a59d0609d30 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/module.h>
+#include <linux/firmware.h>
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include "core.h"
@@ -28,6 +29,7 @@
#include "tracepoint.h"
#include "common.h"
#include "of.h"
+#include "firmware.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -104,12 +106,140 @@ void brcmf_c_set_joinpref_default(struct brcmf_if *ifp)
brcmf_err("Set join_pref error (%d)\n", err);
}
+static int brcmf_c_download(struct brcmf_if *ifp, u16 flag,
+ struct brcmf_dload_data_le *dload_buf,
+ u32 len)
+{
+ s32 err;
+
+ flag |= (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT);
+ dload_buf->flag = cpu_to_le16(flag);
+ dload_buf->dload_type = cpu_to_le16(DL_TYPE_CLM);
+ dload_buf->len = cpu_to_le32(len);
+ dload_buf->crc = cpu_to_le32(0);
+ len = sizeof(*dload_buf) + len - 1;
+
+ err = brcmf_fil_iovar_data_set(ifp, "clmload", dload_buf, len);
+
+ return err;
+}
+
+static int brcmf_c_get_clm_name(struct brcmf_if *ifp, u8 *clm_name)
+{
+ struct brcmf_bus *bus = ifp->drvr->bus_if;
+ struct brcmf_rev_info *ri = &ifp->drvr->revinfo;
+ u8 fw_name[BRCMF_FW_NAME_LEN];
+ u8 *ptr;
+ size_t len;
+ s32 err;
+
+ memset(fw_name, 0, BRCMF_FW_NAME_LEN);
+ err = brcmf_bus_get_fwname(bus, ri->chipnum, ri->chiprev, fw_name);
+ if (err) {
+ brcmf_err("get firmware name failed (%d)\n", err);
+ goto done;
+ }
+
+ /* generate CLM blob file name */
+ ptr = strrchr(fw_name, '.');
+ if (!ptr) {
+ err = -ENOENT;
+ goto done;
+ }
+
+ len = ptr - fw_name + 1;
+ if (len + strlen(".clm_blob") > BRCMF_FW_NAME_LEN) {
+ err = -E2BIG;
+ } else {
+ strlcpy(clm_name, fw_name, len);
+ strlcat(clm_name, ".clm_blob", BRCMF_FW_NAME_LEN);
+ }
+done:
+ return err;
+}
+
+static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
+{
+ struct device *dev = ifp->drvr->bus_if->dev;
+ struct brcmf_dload_data_le *chunk_buf;
+ const struct firmware *clm = NULL;
+ u8 clm_name[BRCMF_FW_NAME_LEN];
+ u32 chunk_len;
+ u32 datalen;
+ u32 cumulative_len;
+ u16 dl_flag = DL_BEGIN;
+ u32 status;
+ s32 err;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ memset(clm_name, 0, BRCMF_FW_NAME_LEN);
+ err = brcmf_c_get_clm_name(ifp, clm_name);
+ if (err) {
+ brcmf_err("get CLM blob file name failed (%d)\n", err);
+ return err;
+ }
+
+ err = request_firmware(&clm, clm_name, dev);
+ if (err) {
+ if (err == -ENOENT) {
+ brcmf_dbg(INFO, "continue with CLM data currently present in firmware\n");
+ return 0;
+ }
+ brcmf_err("request CLM blob file failed (%d)\n", err);
+ return err;
+ }
+
+ chunk_buf = kzalloc(sizeof(*chunk_buf) + MAX_CHUNK_LEN - 1, GFP_KERNEL);
+ if (!chunk_buf) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ datalen = clm->size;
+ cumulative_len = 0;
+ do {
+ if (datalen > MAX_CHUNK_LEN) {
+ chunk_len = MAX_CHUNK_LEN;
+ } else {
+ chunk_len = datalen;
+ dl_flag |= DL_END;
+ }
+ memcpy(chunk_buf->data, clm->data + cumulative_len, chunk_len);
+
+ err = brcmf_c_download(ifp, dl_flag, chunk_buf, chunk_len);
+
+ dl_flag &= ~DL_BEGIN;
+
+ cumulative_len += chunk_len;
+ datalen -= chunk_len;
+ } while ((datalen > 0) && (err == 0));
+
+ if (err) {
+ brcmf_err("clmload (%zu byte file) failed (%d); ",
+ clm->size, err);
+ /* Retrieve clmload_status and print */
+ err = brcmf_fil_iovar_int_get(ifp, "clmload_status", &status);
+ if (err)
+ brcmf_err("get clmload_status failed (%d)\n", err);
+ else
+ brcmf_dbg(INFO, "clmload_status=%d\n", status);
+ err = -EIO;
+ }
+
+ kfree(chunk_buf);
+done:
+ release_firmware(clm);
+ return err;
+}
+
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
u8 buf[BRCMF_DCMD_SMLEN];
struct brcmf_rev_info_le revinfo;
struct brcmf_rev_info *ri;
+ char *clmver;
char *ptr;
s32 err;
@@ -148,6 +278,13 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
}
ri->result = err;
+ /* Do any CLM downloading */
+ err = brcmf_c_process_clm_blob(ifp);
+ if (err < 0) {
+ brcmf_err("download CLM blob file failed, %d\n", err);
+ goto done;
+ }
+
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
strcpy(buf, "ver");
@@ -167,6 +304,26 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
ptr = strrchr(buf, ' ') + 1;
strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
+ /* Query for 'clmver' to get CLM version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ err = brcmf_fil_iovar_data_get(ifp, "clmver", buf, sizeof(buf));
+ if (err) {
+ brcmf_dbg(TRACE, "retrieving clmver failed, %d\n", err);
+ } else {
+ clmver = (char *)buf;
+ /* store CLM version for adding it to revinfo debugfs file */
+ memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
+
+ /* Replace all newline/linefeed characters with space
+ * character
+ */
+ ptr = clmver;
+ while ((ptr = strnchr(ptr, '\n', sizeof(buf))) != NULL)
+ *ptr = ' ';
+
+ brcmf_dbg(INFO, "CLM version = %s\n", clmver);
+ }
+
/* set mpc */
err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
if (err) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 5cc3a07dda9e..930e423f83a8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -71,6 +71,43 @@ struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx)
return ifp;
}
+void brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable)
+{
+ s32 err;
+ u32 mode;
+
+ if (enable)
+ mode = BRCMF_ARP_OL_AGENT | BRCMF_ARP_OL_PEER_AUTO_REPLY;
+ else
+ mode = 0;
+
+ /* Try to set and enable ARP offload feature, this may fail, then it */
+ /* is simply not supported and err 0 will be returned */
+ err = brcmf_fil_iovar_int_set(ifp, "arp_ol", mode);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
+ mode, err);
+ } else {
+ err = brcmf_fil_iovar_int_set(ifp, "arpoe", enable);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to configure (%d) ARP offload err = %d\n",
+ enable, err);
+ } else {
+ brcmf_dbg(TRACE, "successfully configured (%d) ARP offload to 0x%x\n",
+ enable, mode);
+ }
+ }
+
+ err = brcmf_fil_iovar_int_set(ifp, "ndoe", enable);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to configure (%d) ND offload err = %d\n",
+ enable, err);
+ } else {
+ brcmf_dbg(TRACE, "successfully configured (%d) ND offload to 0x%x\n",
+ enable, mode);
+ }
+}
+
static void _brcmf_set_multicast_list(struct work_struct *work)
{
struct brcmf_if *ifp;
@@ -134,6 +171,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
if (err < 0)
brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
err);
+ brcmf_configure_arp_nd_offload(ifp, !cmd_value);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -950,6 +988,8 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data)
seq_printf(s, "anarev: %u\n", ri->anarev);
seq_printf(s, "nvramrev: %08x\n", ri->nvramrev);
+ seq_printf(s, "clmver: %s\n", bus_if->drvr->clmver);
+
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index a4dd313140f3..df8a1ecb9924 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -141,6 +141,8 @@ struct brcmf_pub {
struct notifier_block inetaddr_notifier;
struct notifier_block inet6addr_notifier;
struct brcmf_mp_device *settings;
+
+ u8 clmver[BRCMF_DCMD_SMLEN];
};
/* forward declarations */
@@ -203,6 +205,7 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
/* Return pointer to interface name */
char *brcmf_ifname(struct brcmf_if *ifp);
struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx);
+void brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable);
int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx,
bool is_p2pdev, const char *name, u8 *mac_addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index ef72baf6dd96..e7eaa57d11d9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -257,11 +257,6 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data,
min_t(u32, emsg.datalen, 64),
"event payload, len=%d\n", emsg.datalen);
- if (emsg.datalen > event->datalen) {
- brcmf_err("event invalid length header=%d, msg=%d\n",
- event->datalen, emsg.datalen);
- goto event_free;
- }
/* special handling of interface event */
if (event->code == BRCMF_E_IF) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index e0d22fedb2b4..4b290705e3e6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -155,6 +155,21 @@
#define BRCMF_MFP_CAPABLE 1
#define BRCMF_MFP_REQUIRED 2
+/* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each
+ * ioctl. It is relatively small because firmware has small maximum size input
+ * playload restriction for ioctls.
+ */
+#define MAX_CHUNK_LEN 1400
+
+#define DLOAD_HANDLER_VER 1 /* Downloader version */
+#define DLOAD_FLAG_VER_MASK 0xf000 /* Downloader version mask */
+#define DLOAD_FLAG_VER_SHIFT 12 /* Downloader version shift */
+
+#define DL_BEGIN 0x0002
+#define DL_END 0x0004
+
+#define DL_TYPE_CLM 2
+
/* join preference types for join_pref iovar */
enum brcmf_join_pref_types {
BRCMF_JOIN_PREF_RSSI = 1,
@@ -827,6 +842,22 @@ struct brcmf_pno_macaddr_le {
};
/**
+ * struct brcmf_dload_data_le - data passing to firmware for downloading
+ * @flag: flags related to download data.
+ * @dload_type: type of download data.
+ * @len: length in bytes of download data.
+ * @crc: crc of download data.
+ * @data: download data.
+ */
+struct brcmf_dload_data_le {
+ __le16 flag;
+ __le16 dload_type;
+ __le32 len;
+ __le32 crc;
+ u8 data[1];
+};
+
+/**
* struct brcmf_pno_bssid_le - bssid configuration for PNO scan.
*
* @bssid: BSS network identifier.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 2ce675ab40ef..2ee54133efa1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -692,10 +692,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
/* determine the scan engine parameters */
sparams->bss_type = DOT11_BSSTYPE_ANY;
- if (p2p->cfg->active_scan)
- sparams->scan_type = 0;
- else
- sparams->scan_type = 1;
+ sparams->scan_type = BRCMF_SCANTYPE_ACTIVE;
eth_broadcast_addr(sparams->bssid);
sparams->home_time = cpu_to_le32(P2PAPI_SCAN_HOME_TIME_MS);
@@ -884,7 +881,7 @@ int brcmf_p2p_scan_prep(struct wiphy *wiphy,
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_p2p_info *p2p = &cfg->p2p;
- int err = 0;
+ int err;
if (brcmf_p2p_scan_is_p2p_request(request)) {
/* find my listen channel */
@@ -907,9 +904,7 @@ int brcmf_p2p_scan_prep(struct wiphy *wiphy,
/* override .run_escan() callback. */
cfg->escan_info.run = brcmf_p2p_run_escan;
}
- err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_PRBREQ_FLAG,
- request->ie, request->ie_len);
- return err;
+ return 0;
}
@@ -1853,7 +1848,6 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
struct brcmf_cfg80211_vif *vif = ifp->vif;
struct brcmf_rx_mgmt_data *rxframe = (struct brcmf_rx_mgmt_data *)data;
- u16 chanspec = be16_to_cpu(rxframe->chanspec);
struct brcmu_chan ch;
u8 *mgmt_frame;
u32 mgmt_frame_len;
@@ -1906,7 +1900,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
- mgmt_frame_len, e->datalen, chanspec, freq);
+ mgmt_frame_len, e->datalen, ch.chspec, freq);
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index e6e9b00b79d7..3c87157f5b85 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1350,6 +1350,24 @@ static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
return 0;
}
+static int brcmf_pcie_get_fwname(struct device *dev, u32 chip, u32 chiprev,
+ u8 *fw_name)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+ int ret = 0;
+
+ if (devinfo->fw_name[0] != '\0')
+ strlcpy(fw_name, devinfo->fw_name, BRCMF_FW_NAME_LEN);
+ else
+ ret = brcmf_fw_map_chip_to_name(chip, chiprev,
+ brcmf_pcie_fwnames,
+ ARRAY_SIZE(brcmf_pcie_fwnames),
+ fw_name, NULL);
+
+ return ret;
+}
static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.txdata = brcmf_pcie_tx,
@@ -1359,6 +1377,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.wowl_config = brcmf_pcie_wowl_config,
.get_ramsize = brcmf_pcie_get_ramsize,
.get_memdump = brcmf_pcie_get_memdump,
+ .get_fwname = brcmf_pcie_get_fwname,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 613caca7dc02..310c4e2746aa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -260,10 +260,11 @@ struct rte_console {
#define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
/* tohostmailboxdata */
-#define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
-#define HMB_DATA_DEVREADY 2 /* talk to host after enable */
-#define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
-#define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
+#define HMB_DATA_NAKHANDLED 0x0001 /* retransmit NAK'd frame */
+#define HMB_DATA_DEVREADY 0x0002 /* talk to host after enable */
+#define HMB_DATA_FC 0x0004 /* per prio flowcontrol update flag */
+#define HMB_DATA_FWREADY 0x0008 /* fw ready for protocol activity */
+#define HMB_DATA_FWHALT 0x0010 /* firmware halted */
#define HMB_DATA_FCDATA_MASK 0xff000000
#define HMB_DATA_FCDATA_SHIFT 24
@@ -1094,6 +1095,10 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
offsetof(struct sdpcmd_regs, tosbmailbox));
bus->sdcnt.f1regdata += 2;
+ /* dongle indicates the firmware has halted/crashed */
+ if (hmb_data & HMB_DATA_FWHALT)
+ brcmf_err("mailbox indicates firmware halted\n");
+
/* Dongle recomposed rx frames, accept them again */
if (hmb_data & HMB_DATA_NAKHANDLED) {
brcmf_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n",
@@ -1151,6 +1156,7 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
HMB_DATA_NAKHANDLED |
HMB_DATA_FC |
HMB_DATA_FWREADY |
+ HMB_DATA_FWHALT |
HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
brcmf_err("Unknown mailbox data content: 0x%02x\n",
hmb_data);
@@ -3628,7 +3634,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
bus->dpc_running = true;
wmb();
- while (ACCESS_ONCE(bus->dpc_triggered)) {
+ while (READ_ONCE(bus->dpc_triggered)) {
bus->dpc_triggered = false;
brcmf_sdio_dpc(bus);
bus->idlecount = 0;
@@ -3966,9 +3972,9 @@ brcmf_sdio_watchdog_thread(void *data)
}
static void
-brcmf_sdio_watchdog(unsigned long data)
+brcmf_sdio_watchdog(struct timer_list *t)
{
- struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+ struct brcmf_sdio *bus = from_timer(bus, t, timer);
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
@@ -3979,6 +3985,24 @@ brcmf_sdio_watchdog(unsigned long data)
}
}
+static int brcmf_sdio_get_fwname(struct device *dev, u32 chip, u32 chiprev,
+ u8 *fw_name)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+ int ret = 0;
+
+ if (sdiodev->fw_name[0] != '\0')
+ strlcpy(fw_name, sdiodev->fw_name, BRCMF_FW_NAME_LEN);
+ else
+ ret = brcmf_fw_map_chip_to_name(chip, chiprev,
+ brcmf_sdio_fwnames,
+ ARRAY_SIZE(brcmf_sdio_fwnames),
+ fw_name, NULL);
+
+ return ret;
+}
+
static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.stop = brcmf_sdio_bus_stop,
.preinit = brcmf_sdio_bus_preinit,
@@ -3989,6 +4013,7 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
.wowl_config = brcmf_sdio_wowl_config,
.get_ramsize = brcmf_sdio_bus_get_ramsize,
.get_memdump = brcmf_sdio_bus_get_memdump,
+ .get_fwname = brcmf_sdio_get_fwname,
};
static void brcmf_sdio_firmware_callback(struct device *dev, int err,
@@ -4144,10 +4169,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
init_waitqueue_head(&bus->dcmd_resp_wait);
/* Set up the watchdog timer */
- init_timer(&bus->timer);
- bus->timer.data = (unsigned long)bus;
- bus->timer.function = brcmf_sdio_watchdog;
-
+ timer_setup(&bus->timer, brcmf_sdio_watchdog, 0);
/* Initialize watchdog thread */
init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 11ffaa01599e..b27170c12482 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1128,12 +1128,30 @@ static void brcmf_usb_wowl_config(struct device *dev, bool enabled)
device_set_wakeup_enable(devinfo->dev, false);
}
+static int brcmf_usb_get_fwname(struct device *dev, u32 chip, u32 chiprev,
+ u8 *fw_name)
+{
+ struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
+ int ret = 0;
+
+ if (devinfo->fw_name[0] != '\0')
+ strlcpy(fw_name, devinfo->fw_name, BRCMF_FW_NAME_LEN);
+ else
+ ret = brcmf_fw_map_chip_to_name(chip, chiprev,
+ brcmf_usb_fwnames,
+ ARRAY_SIZE(brcmf_usb_fwnames),
+ fw_name, NULL);
+
+ return ret;
+}
+
static const struct brcmf_bus_ops brcmf_usb_bus_ops = {
.txdata = brcmf_usb_tx,
.stop = brcmf_usb_down,
.txctl = brcmf_usb_tx_ctlpkt,
.rxctl = brcmf_usb_rx_ctlpkt,
.wowl_config = brcmf_usb_wowl_config,
+ .get_fwname = brcmf_usb_get_fwname,
};
static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
index 74b17cecb189..c0a5449ed72c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <net/mac80211.h>
#include <linux/bcma/bcma_driver_chipcommon.h>
#include <linux/gpio.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index 1c4e9dd57960..3a13d176b221 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -1916,7 +1916,7 @@ void wlc_phy_txpower_update_shm(struct brcms_phy *pi)
pi->hwpwr_txcur);
for (j = TXP_FIRST_OFDM; j <= TXP_LAST_OFDM; j++) {
- const u8 ucode_ofdm_rates[] = {
+ static const u8 ucode_ofdm_rates[] = {
0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c
};
offset = wlapi_bmac_rate_shm_offset(
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index ef685465f80a..763e8ba6b178 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -16061,52 +16061,8 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
}
}
-static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+static void wlc_phy_workarounds_nphy_rev7(struct brcms_phy *pi)
{
- static const u8 rfseq_rx2tx_events[] = {
- NPHY_RFSEQ_CMD_NOP,
- NPHY_RFSEQ_CMD_RXG_FBW,
- NPHY_RFSEQ_CMD_TR_SWITCH,
- NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
- NPHY_RFSEQ_CMD_RXPD_TXPD,
- NPHY_RFSEQ_CMD_TX_GAIN,
- NPHY_RFSEQ_CMD_EXT_PA
- };
- u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
- static const u8 rfseq_tx2rx_events[] = {
- NPHY_RFSEQ_CMD_NOP,
- NPHY_RFSEQ_CMD_EXT_PA,
- NPHY_RFSEQ_CMD_TX_GAIN,
- NPHY_RFSEQ_CMD_RXPD_TXPD,
- NPHY_RFSEQ_CMD_TR_SWITCH,
- NPHY_RFSEQ_CMD_RXG_FBW,
- NPHY_RFSEQ_CMD_CLR_HIQ_DIS
- };
- static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
- static const u8 rfseq_tx2rx_events_rev3[] = {
- NPHY_REV3_RFSEQ_CMD_EXT_PA,
- NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
- NPHY_REV3_RFSEQ_CMD_TX_GAIN,
- NPHY_REV3_RFSEQ_CMD_RXPD_TXPD,
- NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
- NPHY_REV3_RFSEQ_CMD_RXG_FBW,
- NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
- NPHY_REV3_RFSEQ_CMD_END
- };
- static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
- u8 rfseq_rx2tx_events_rev3[] = {
- NPHY_REV3_RFSEQ_CMD_NOP,
- NPHY_REV3_RFSEQ_CMD_RXG_FBW,
- NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
- NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
- NPHY_REV3_RFSEQ_CMD_RXPD_TXPD,
- NPHY_REV3_RFSEQ_CMD_TX_GAIN,
- NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
- NPHY_REV3_RFSEQ_CMD_EXT_PA,
- NPHY_REV3_RFSEQ_CMD_END
- };
- u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
-
static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
NPHY_REV3_RFSEQ_CMD_NOP,
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16118,31 +16074,18 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
NPHY_REV3_RFSEQ_CMD_END
};
- static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ static const u8 rfseq_rx2tx_dlys_rev3_ipa[] =
+ { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
-
- s16 alpha0, alpha1, alpha2;
- s16 beta0, beta1, beta2;
- u32 leg_data_weights, ht_data_weights, nss1_data_weights,
- stbc_data_weights;
+ u32 leg_data_weights;
u8 chan_freq_range = 0;
static const u16 dac_control = 0x0002;
u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
- u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
- u16 aux_adc_vmid_rev3[] = { 0xa2, 0xb4, 0xb4, 0x89 };
- u16 *aux_adc_vmid;
u16 aux_adc_gain_rev7[] = { 0x02, 0x02, 0x02, 0x02 };
- u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
- u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
- u16 *aux_adc_gain;
- static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
- static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
s32 min_nvar_val = 0x18d;
s32 min_nvar_offset_6mbps = 20;
u8 pdetrange;
- u8 triso;
- u16 regval;
u16 afectrl_adc_ctrl1_rev7 = 0x20;
u16 afectrl_adc_ctrl2_rev7 = 0x0;
u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
@@ -16171,965 +16114,939 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 freq;
int coreNum;
- if (CHSPEC_IS5G(pi->radio_chanspec))
- wlc_phy_classifier_nphy(pi, NPHY_ClassifierCtrl_cck_en, 0);
- else
- wlc_phy_classifier_nphy(pi, NPHY_ClassifierCtrl_cck_en, 1);
- if (pi->phyhang_avoid)
- wlc_phy_stay_in_carriersearch_nphy(pi, true);
+ if (NREV_IS(pi->pubpi.phy_rev, 7)) {
+ mod_phy_reg(pi, 0x221, (0x1 << 4), (1 << 4));
+
+ mod_phy_reg(pi, 0x160, (0x7f << 0), (32 << 0));
+ mod_phy_reg(pi, 0x160, (0x7f << 8), (39 << 8));
+ mod_phy_reg(pi, 0x161, (0x7f << 0), (46 << 0));
+ mod_phy_reg(pi, 0x161, (0x7f << 8), (51 << 8));
+ mod_phy_reg(pi, 0x162, (0x7f << 0), (55 << 0));
+ mod_phy_reg(pi, 0x162, (0x7f << 8), (58 << 8));
+ mod_phy_reg(pi, 0x163, (0x7f << 0), (60 << 0));
+ mod_phy_reg(pi, 0x163, (0x7f << 8), (62 << 8));
+ mod_phy_reg(pi, 0x164, (0x7f << 0), (62 << 0));
+ mod_phy_reg(pi, 0x164, (0x7f << 8), (63 << 8));
+ mod_phy_reg(pi, 0x165, (0x7f << 0), (63 << 0));
+ mod_phy_reg(pi, 0x165, (0x7f << 8), (64 << 8));
+ mod_phy_reg(pi, 0x166, (0x7f << 0), (64 << 0));
+ mod_phy_reg(pi, 0x166, (0x7f << 8), (64 << 8));
+ mod_phy_reg(pi, 0x167, (0x7f << 0), (64 << 0));
+ mod_phy_reg(pi, 0x167, (0x7f << 8), (64 << 8));
+ }
- or_phy_reg(pi, 0xb1, NPHY_IQFlip_ADC1 | NPHY_IQFlip_ADC2);
+ if (NREV_LE(pi->pubpi.phy_rev, 8)) {
+ write_phy_reg(pi, 0x23f, 0x1b0);
+ write_phy_reg(pi, 0x240, 0x1b0);
+ }
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 8))
+ mod_phy_reg(pi, 0xbd, (0xff << 0), (114 << 0));
- if (NREV_IS(pi->pubpi.phy_rev, 7)) {
- mod_phy_reg(pi, 0x221, (0x1 << 4), (1 << 4));
-
- mod_phy_reg(pi, 0x160, (0x7f << 0), (32 << 0));
- mod_phy_reg(pi, 0x160, (0x7f << 8), (39 << 8));
- mod_phy_reg(pi, 0x161, (0x7f << 0), (46 << 0));
- mod_phy_reg(pi, 0x161, (0x7f << 8), (51 << 8));
- mod_phy_reg(pi, 0x162, (0x7f << 0), (55 << 0));
- mod_phy_reg(pi, 0x162, (0x7f << 8), (58 << 8));
- mod_phy_reg(pi, 0x163, (0x7f << 0), (60 << 0));
- mod_phy_reg(pi, 0x163, (0x7f << 8), (62 << 8));
- mod_phy_reg(pi, 0x164, (0x7f << 0), (62 << 0));
- mod_phy_reg(pi, 0x164, (0x7f << 8), (63 << 8));
- mod_phy_reg(pi, 0x165, (0x7f << 0), (63 << 0));
- mod_phy_reg(pi, 0x165, (0x7f << 8), (64 << 8));
- mod_phy_reg(pi, 0x166, (0x7f << 0), (64 << 0));
- mod_phy_reg(pi, 0x166, (0x7f << 8), (64 << 8));
- mod_phy_reg(pi, 0x167, (0x7f << 0), (64 << 0));
- mod_phy_reg(pi, 0x167, (0x7f << 8), (64 << 8));
- }
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x00, 16,
+ &dac_control);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x10, 16,
+ &dac_control);
- if (NREV_LE(pi->pubpi.phy_rev, 8)) {
- write_phy_reg(pi, 0x23f, 0x1b0);
- write_phy_reg(pi, 0x240, 0x1b0);
- }
+ wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
+ 1, 0, 32, &leg_data_weights);
+ leg_data_weights = leg_data_weights & 0xffffff;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
+ 1, 0, 32, &leg_data_weights);
- if (NREV_GE(pi->pubpi.phy_rev, 8))
- mod_phy_reg(pi, 0xbd, (0xff << 0), (114 << 0));
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
+ 2, 0x15e, 16, rfseq_rx2tx_dacbufpu_rev7);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x16e, 16,
+ rfseq_rx2tx_dacbufpu_rev7);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x00, 16,
- &dac_control);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x10, 16,
- &dac_control);
+ if (PHY_IPA(pi))
+ wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
+ rfseq_rx2tx_events_rev3_ipa,
+ rfseq_rx2tx_dlys_rev3_ipa,
+ ARRAY_SIZE
+ (rfseq_rx2tx_events_rev3_ipa));
- wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
- 1, 0, 32, &leg_data_weights);
- leg_data_weights = leg_data_weights & 0xffffff;
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
- 1, 0, 32, &leg_data_weights);
+ mod_phy_reg(pi, 0x299, (0x3 << 14), (0x1 << 14));
+ mod_phy_reg(pi, 0x29d, (0x3 << 14), (0x1 << 14));
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ,
- 2, 0x15e, 16,
- rfseq_rx2tx_dacbufpu_rev7);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x16e, 16,
- rfseq_rx2tx_dacbufpu_rev7);
+ tx_lpf_bw_ofdm_20mhz = wlc_phy_read_lpf_bw_ctl_nphy(pi, 0x154);
+ tx_lpf_bw_ofdm_40mhz = wlc_phy_read_lpf_bw_ctl_nphy(pi, 0x159);
+ tx_lpf_bw_11b = wlc_phy_read_lpf_bw_ctl_nphy(pi, 0x152);
- if (PHY_IPA(pi))
- wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
- rfseq_rx2tx_events_rev3_ipa,
- rfseq_rx2tx_dlys_rev3_ipa,
- ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa));
+ if (PHY_IPA(pi)) {
- mod_phy_reg(pi, 0x299, (0x3 << 14), (0x1 << 14));
- mod_phy_reg(pi, 0x29d, (0x3 << 14), (0x1 << 14));
+ if (((pi->pubpi.radiorev == 5)
+ && (CHSPEC_IS40(pi->radio_chanspec) == 1))
+ || (pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8)) {
- tx_lpf_bw_ofdm_20mhz = wlc_phy_read_lpf_bw_ctl_nphy(pi, 0x154);
- tx_lpf_bw_ofdm_40mhz = wlc_phy_read_lpf_bw_ctl_nphy(pi, 0x159);
- tx_lpf_bw_11b = wlc_phy_read_lpf_bw_ctl_nphy(pi, 0x152);
+ rccal_bcap_val =
+ read_radio_reg(pi, RADIO_2057_RCCAL_BCAP_VAL);
+ rccal_scap_val =
+ read_radio_reg(pi, RADIO_2057_RCCAL_SCAP_VAL);
- if (PHY_IPA(pi)) {
+ rccal_tx20_11b_bcap = rccal_bcap_val;
+ rccal_tx20_11b_scap = rccal_scap_val;
- if (((pi->pubpi.radiorev == 5)
- && (CHSPEC_IS40(pi->radio_chanspec) == 1))
- || (pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
+ if ((pi->pubpi.radiorev == 5) &&
+ (CHSPEC_IS40(pi->radio_chanspec) == 1)) {
- rccal_bcap_val =
- read_radio_reg(
- pi,
- RADIO_2057_RCCAL_BCAP_VAL);
- rccal_scap_val =
- read_radio_reg(
- pi,
- RADIO_2057_RCCAL_SCAP_VAL);
+ rccal_tx20_11n_bcap = rccal_bcap_val;
+ rccal_tx20_11n_scap = rccal_scap_val;
+ rccal_tx40_11n_bcap = 0xc;
+ rccal_tx40_11n_scap = 0xc;
- rccal_tx20_11b_bcap = rccal_bcap_val;
- rccal_tx20_11b_scap = rccal_scap_val;
+ rccal_ovrd = true;
- if ((pi->pubpi.radiorev == 5) &&
- (CHSPEC_IS40(pi->radio_chanspec) == 1)) {
+ } else if ((pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8)) {
- rccal_tx20_11n_bcap = rccal_bcap_val;
- rccal_tx20_11n_scap = rccal_scap_val;
- rccal_tx40_11n_bcap = 0xc;
- rccal_tx40_11n_scap = 0xc;
+ tx_lpf_bw_ofdm_20mhz = 4;
+ tx_lpf_bw_11b = 1;
- rccal_ovrd = true;
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ rccal_tx20_11n_bcap = 0xc;
+ rccal_tx20_11n_scap = 0xc;
+ rccal_tx40_11n_bcap = 0xa;
+ rccal_tx40_11n_scap = 0xa;
+ } else {
+ rccal_tx20_11n_bcap = 0x14;
+ rccal_tx20_11n_scap = 0x14;
+ rccal_tx40_11n_bcap = 0xf;
+ rccal_tx40_11n_scap = 0xf;
+ }
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
+ rccal_ovrd = true;
+ }
+ }
- tx_lpf_bw_ofdm_20mhz = 4;
- tx_lpf_bw_11b = 1;
+ } else {
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- rccal_tx20_11n_bcap = 0xc;
- rccal_tx20_11n_scap = 0xc;
- rccal_tx40_11n_bcap = 0xa;
- rccal_tx40_11n_scap = 0xa;
- } else {
- rccal_tx20_11n_bcap = 0x14;
- rccal_tx20_11n_scap = 0x14;
- rccal_tx40_11n_bcap = 0xf;
- rccal_tx40_11n_scap = 0xf;
- }
+ if (pi->pubpi.radiorev == 5) {
- rccal_ovrd = true;
- }
- }
+ tx_lpf_bw_ofdm_20mhz = 1;
+ tx_lpf_bw_ofdm_40mhz = 3;
- } else {
+ rccal_bcap_val =
+ read_radio_reg(pi, RADIO_2057_RCCAL_BCAP_VAL);
+ rccal_scap_val =
+ read_radio_reg(pi, RADIO_2057_RCCAL_SCAP_VAL);
- if (pi->pubpi.radiorev == 5) {
+ rccal_tx20_11b_bcap = rccal_bcap_val;
+ rccal_tx20_11b_scap = rccal_scap_val;
- tx_lpf_bw_ofdm_20mhz = 1;
- tx_lpf_bw_ofdm_40mhz = 3;
+ rccal_tx20_11n_bcap = 0x13;
+ rccal_tx20_11n_scap = 0x11;
+ rccal_tx40_11n_bcap = 0x13;
+ rccal_tx40_11n_scap = 0x11;
- rccal_bcap_val =
- read_radio_reg(
- pi,
- RADIO_2057_RCCAL_BCAP_VAL);
- rccal_scap_val =
- read_radio_reg(
- pi,
- RADIO_2057_RCCAL_SCAP_VAL);
+ rccal_ovrd = true;
+ }
+ }
- rccal_tx20_11b_bcap = rccal_bcap_val;
- rccal_tx20_11b_scap = rccal_scap_val;
+ if (rccal_ovrd) {
- rccal_tx20_11n_bcap = 0x13;
- rccal_tx20_11n_scap = 0x11;
- rccal_tx40_11n_bcap = 0x13;
- rccal_tx40_11n_scap = 0x11;
+ rx2tx_lpf_rc_lut_tx20_11b =
+ (rccal_tx20_11b_bcap << 8) |
+ (rccal_tx20_11b_scap << 3) | tx_lpf_bw_11b;
+ rx2tx_lpf_rc_lut_tx20_11n =
+ (rccal_tx20_11n_bcap << 8) |
+ (rccal_tx20_11n_scap << 3) | tx_lpf_bw_ofdm_20mhz;
+ rx2tx_lpf_rc_lut_tx40_11n =
+ (rccal_tx40_11n_bcap << 8) |
+ (rccal_tx40_11n_scap << 3) | tx_lpf_bw_ofdm_40mhz;
- rccal_ovrd = true;
- }
+ for (coreNum = 0; coreNum <= 1; coreNum++) {
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ 0x152 + coreNum * 0x10, 16,
+ &rx2tx_lpf_rc_lut_tx20_11b);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ 0x153 + coreNum * 0x10, 16,
+ &rx2tx_lpf_rc_lut_tx20_11n);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ 0x154 + coreNum * 0x10, 16,
+ &rx2tx_lpf_rc_lut_tx20_11n);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ 0x155 + coreNum * 0x10, 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ 0x156 + coreNum * 0x10, 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ 0x157 + coreNum * 0x10, 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ 0x158 + coreNum * 0x10, 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1,
+ 0x159 + coreNum * 0x10, 16,
+ &rx2tx_lpf_rc_lut_tx40_11n);
}
- if (rccal_ovrd) {
-
- rx2tx_lpf_rc_lut_tx20_11b =
- (rccal_tx20_11b_bcap << 8) |
- (rccal_tx20_11b_scap << 3) |
- tx_lpf_bw_11b;
- rx2tx_lpf_rc_lut_tx20_11n =
- (rccal_tx20_11n_bcap << 8) |
- (rccal_tx20_11n_scap << 3) |
- tx_lpf_bw_ofdm_20mhz;
- rx2tx_lpf_rc_lut_tx40_11n =
- (rccal_tx40_11n_bcap << 8) |
- (rccal_tx40_11n_scap << 3) |
- tx_lpf_bw_ofdm_40mhz;
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 4), 1, 0x3, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID2);
+ }
- for (coreNum = 0; coreNum <= 1; coreNum++) {
- wlc_phy_table_write_nphy(
- pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x152 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx20_11b);
- wlc_phy_table_write_nphy(
- pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x153 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx20_11n);
- wlc_phy_table_write_nphy(
- pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x154 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx20_11n);
- wlc_phy_table_write_nphy(
- pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x155 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
- wlc_phy_table_write_nphy(
- pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x156 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
- wlc_phy_table_write_nphy(
- pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x157 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
- wlc_phy_table_write_nphy(
- pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x158 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
- wlc_phy_table_write_nphy(
- pi, NPHY_TBL_ID_RFSEQ,
- 1,
- 0x159 + coreNum * 0x10,
- 16,
- &rx2tx_lpf_rc_lut_tx40_11n);
- }
+ write_phy_reg(pi, 0x32f, 0x3);
- wlc_phy_rfctrl_override_nphy_rev7(
- pi, (0x1 << 4),
- 1, 0x3, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID2);
- }
+ if ((pi->pubpi.radiorev == 4) || (pi->pubpi.radiorev == 6))
+ wlc_phy_rfctrl_override_nphy_rev7(pi, (0x1 << 2), 1, 0x3, 0,
+ NPHY_REV7_RFCTRLOVERRIDE_ID0);
- write_phy_reg(pi, 0x32f, 0x3);
+ if ((pi->pubpi.radiorev == 3) || (pi->pubpi.radiorev == 4) ||
+ (pi->pubpi.radiorev == 6)) {
+ if ((pi->sh->sromrev >= 8)
+ && (pi->sh->boardflags2 & BFL2_IPALVLSHIFT_3P3))
+ ipalvlshift_3p3_war_en = 1;
- if ((pi->pubpi.radiorev == 4) || (pi->pubpi.radiorev == 6))
- wlc_phy_rfctrl_override_nphy_rev7(
- pi, (0x1 << 2),
- 1, 0x3, 0,
- NPHY_REV7_RFCTRLOVERRIDE_ID0);
+ if (ipalvlshift_3p3_war_en) {
+ write_radio_reg(pi, RADIO_2057_GPAIO_CONFIG, 0x5);
+ write_radio_reg(pi, RADIO_2057_GPAIO_SEL1, 0x30);
+ write_radio_reg(pi, RADIO_2057_GPAIO_SEL0, 0x0);
+ or_radio_reg(pi, RADIO_2057_RXTXBIAS_CONFIG_CORE0, 0x1);
+ or_radio_reg(pi, RADIO_2057_RXTXBIAS_CONFIG_CORE1, 0x1);
- if ((pi->pubpi.radiorev == 3) || (pi->pubpi.radiorev == 4) ||
- (pi->pubpi.radiorev == 6)) {
- if ((pi->sh->sromrev >= 8)
- && (pi->sh->boardflags2 & BFL2_IPALVLSHIFT_3P3))
- ipalvlshift_3p3_war_en = 1;
-
- if (ipalvlshift_3p3_war_en) {
- write_radio_reg(pi, RADIO_2057_GPAIO_CONFIG,
- 0x5);
- write_radio_reg(pi, RADIO_2057_GPAIO_SEL1,
- 0x30);
- write_radio_reg(pi, RADIO_2057_GPAIO_SEL0, 0x0);
- or_radio_reg(pi,
- RADIO_2057_RXTXBIAS_CONFIG_CORE0,
- 0x1);
- or_radio_reg(pi,
- RADIO_2057_RXTXBIAS_CONFIG_CORE1,
- 0x1);
-
- ipa2g_mainbias = 0x1f;
-
- ipa2g_casconv = 0x6f;
-
- ipa2g_biasfilt = 0xaa;
- } else {
+ ipa2g_mainbias = 0x1f;
- ipa2g_mainbias = 0x2b;
+ ipa2g_casconv = 0x6f;
- ipa2g_casconv = 0x7f;
+ ipa2g_biasfilt = 0xaa;
+ } else {
- ipa2g_biasfilt = 0xee;
- }
+ ipa2g_mainbias = 0x2b;
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- for (coreNum = 0; coreNum <= 1; coreNum++) {
- WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
- coreNum, IPA2G_IMAIN,
- ipa2g_mainbias);
- WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
- coreNum, IPA2G_CASCONV,
- ipa2g_casconv);
- WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
- coreNum,
- IPA2G_BIAS_FILTER,
- ipa2g_biasfilt);
- }
- }
- }
+ ipa2g_casconv = 0x7f;
- if (PHY_IPA(pi)) {
- if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if ((pi->pubpi.radiorev == 3)
- || (pi->pubpi.radiorev == 4)
- || (pi->pubpi.radiorev == 6))
- txgm_idac_bleed = 0x7f;
+ ipa2g_biasfilt = 0xee;
+ }
- for (coreNum = 0; coreNum <= 1; coreNum++) {
- if (txgm_idac_bleed != 0)
- WRITE_RADIO_REG4(
- pi, RADIO_2057,
- CORE, coreNum,
- TXGM_IDAC_BLEED,
- txgm_idac_bleed);
- }
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ for (coreNum = 0; coreNum <= 1; coreNum++) {
+ WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
+ coreNum, IPA2G_IMAIN,
+ ipa2g_mainbias);
+ WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
+ coreNum, IPA2G_CASCONV,
+ ipa2g_casconv);
+ WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
+ coreNum,
+ IPA2G_BIAS_FILTER,
+ ipa2g_biasfilt);
+ }
+ }
+ }
- if (pi->pubpi.radiorev == 5) {
-
- for (coreNum = 0; coreNum <= 1;
- coreNum++) {
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, coreNum,
- IPA2G_CASCONV,
- 0x13);
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, coreNum,
- IPA2G_IMAIN,
- 0x1f);
- WRITE_RADIO_REG4(
- pi, RADIO_2057,
- CORE, coreNum,
- IPA2G_BIAS_FILTER,
- 0xee);
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, coreNum,
- PAD2G_IDACS,
- 0x8a);
- WRITE_RADIO_REG4(
- pi, RADIO_2057,
- CORE, coreNum,
- PAD_BIAS_FILTER_BWS,
- 0x3e);
- }
+ if (PHY_IPA(pi)) {
+ if (CHSPEC_IS2G(pi->radio_chanspec)) {
+ if ((pi->pubpi.radiorev == 3)
+ || (pi->pubpi.radiorev == 4)
+ || (pi->pubpi.radiorev == 6))
+ txgm_idac_bleed = 0x7f;
- } else if ((pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
+ for (coreNum = 0; coreNum <= 1; coreNum++) {
+ if (txgm_idac_bleed != 0)
+ WRITE_RADIO_REG4(pi, RADIO_2057,
+ CORE, coreNum,
+ TXGM_IDAC_BLEED,
+ txgm_idac_bleed);
+ }
- if (CHSPEC_IS40(pi->radio_chanspec) ==
- 0) {
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, 0,
- IPA2G_IMAIN,
- 0x14);
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, 1,
- IPA2G_IMAIN,
- 0x12);
- } else {
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, 0,
- IPA2G_IMAIN,
- 0x16);
- WRITE_RADIO_REG4(pi, RADIO_2057,
- CORE, 1,
- IPA2G_IMAIN,
- 0x16);
- }
+ if (pi->pubpi.radiorev == 5) {
+ for (coreNum = 0; coreNum <= 1; coreNum++) {
+ WRITE_RADIO_REG4(pi, RADIO_2057,
+ CORE, coreNum,
+ IPA2G_CASCONV,
+ 0x13);
+ WRITE_RADIO_REG4(pi, RADIO_2057,
+ CORE, coreNum,
+ IPA2G_IMAIN,
+ 0x1f);
+ WRITE_RADIO_REG4(pi, RADIO_2057,
+ CORE, coreNum,
+ IPA2G_BIAS_FILTER,
+ 0xee);
+ WRITE_RADIO_REG4(pi, RADIO_2057,
+ CORE, coreNum,
+ PAD2G_IDACS,
+ 0x8a);
+ WRITE_RADIO_REG4(pi, RADIO_2057,
+ CORE, coreNum,
+ PAD_BIAS_FILTER_BWS,
+ 0x3e);
}
+ } else if ((pi->pubpi.radiorev == 7) ||
+ (pi->pubpi.radiorev == 8)) {
- } else {
- freq = CHAN5G_FREQ(CHSPEC_CHANNEL(
- pi->radio_chanspec));
- if (((freq >= 5180) && (freq <= 5230))
- || ((freq >= 5745) && (freq <= 5805))) {
+ if (CHSPEC_IS40(pi->radio_chanspec) == 0) {
WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
- 0, IPA5G_BIAS_FILTER,
- 0xff);
+ 0, IPA2G_IMAIN, 0x14);
WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
- 1, IPA5G_BIAS_FILTER,
- 0xff);
- }
- }
- } else {
-
- if (pi->pubpi.radiorev != 5) {
- for (coreNum = 0; coreNum <= 1; coreNum++) {
+ 1, IPA2G_IMAIN, 0x12);
+ } else {
WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
- coreNum,
- TXMIX2G_TUNE_BOOST_PU,
- 0x61);
+ 0, IPA2G_IMAIN, 0x16);
WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
- coreNum,
- TXGM_IDAC_BLEED, 0x70);
+ 1, IPA2G_IMAIN, 0x16);
}
}
- }
- if (pi->pubpi.radiorev == 4) {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1,
- 0x05, 16,
- &afectrl_adc_ctrl1_rev7);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1,
- 0x15, 16,
- &afectrl_adc_ctrl1_rev7);
+ } else {
+ freq =
+ CHAN5G_FREQ(CHSPEC_CHANNEL
+ (pi->radio_chanspec));
+ if (((freq >= 5180) && (freq <= 5230))
+ || ((freq >= 5745) && (freq <= 5805))) {
+ WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
+ 0, IPA5G_BIAS_FILTER, 0xff);
+ WRITE_RADIO_REG4(pi, RADIO_2057, CORE,
+ 1, IPA5G_BIAS_FILTER, 0xff);
+ }
+ }
+ } else {
+ if (pi->pubpi.radiorev != 5) {
for (coreNum = 0; coreNum <= 1; coreNum++) {
WRITE_RADIO_REG4(pi, RADIO_2057, CORE, coreNum,
- AFE_VCM_CAL_MASTER, 0x0);
- WRITE_RADIO_REG4(pi, RADIO_2057, CORE, coreNum,
- AFE_SET_VCM_I, 0x3f);
+ TXMIX2G_TUNE_BOOST_PU, 0x61);
WRITE_RADIO_REG4(pi, RADIO_2057, CORE, coreNum,
- AFE_SET_VCM_Q, 0x3f);
+ TXGM_IDAC_BLEED, 0x70);
}
- } else {
- mod_phy_reg(pi, 0xa6, (0x1 << 2), (0x1 << 2));
- mod_phy_reg(pi, 0x8f, (0x1 << 2), (0x1 << 2));
- mod_phy_reg(pi, 0xa7, (0x1 << 2), (0x1 << 2));
- mod_phy_reg(pi, 0xa5, (0x1 << 2), (0x1 << 2));
-
- mod_phy_reg(pi, 0xa6, (0x1 << 0), 0);
- mod_phy_reg(pi, 0x8f, (0x1 << 0), (0x1 << 0));
- mod_phy_reg(pi, 0xa7, (0x1 << 0), 0);
- mod_phy_reg(pi, 0xa5, (0x1 << 0), (0x1 << 0));
-
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1,
- 0x05, 16,
- &afectrl_adc_ctrl2_rev7);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1,
- 0x15, 16,
- &afectrl_adc_ctrl2_rev7);
-
- mod_phy_reg(pi, 0xa6, (0x1 << 2), 0);
- mod_phy_reg(pi, 0x8f, (0x1 << 2), 0);
- mod_phy_reg(pi, 0xa7, (0x1 << 2), 0);
- mod_phy_reg(pi, 0xa5, (0x1 << 2), 0);
}
+ }
- write_phy_reg(pi, 0x6a, 0x2);
+ if (pi->pubpi.radiorev == 4) {
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x05, 16,
+ &afectrl_adc_ctrl1_rev7);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x15, 16,
+ &afectrl_adc_ctrl1_rev7);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 256, 32,
- &min_nvar_offset_6mbps);
+ for (coreNum = 0; coreNum <= 1; coreNum++) {
+ WRITE_RADIO_REG4(pi, RADIO_2057, CORE, coreNum,
+ AFE_VCM_CAL_MASTER, 0x0);
+ WRITE_RADIO_REG4(pi, RADIO_2057, CORE, coreNum,
+ AFE_SET_VCM_I, 0x3f);
+ WRITE_RADIO_REG4(pi, RADIO_2057, CORE, coreNum,
+ AFE_SET_VCM_Q, 0x3f);
+ }
+ } else {
+ mod_phy_reg(pi, 0xa6, (0x1 << 2), (0x1 << 2));
+ mod_phy_reg(pi, 0x8f, (0x1 << 2), (0x1 << 2));
+ mod_phy_reg(pi, 0xa7, (0x1 << 2), (0x1 << 2));
+ mod_phy_reg(pi, 0xa5, (0x1 << 2), (0x1 << 2));
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x138, 16,
- &rfseq_pktgn_lpf_hpc_rev7);
+ mod_phy_reg(pi, 0xa6, (0x1 << 0), 0);
+ mod_phy_reg(pi, 0x8f, (0x1 << 0), (0x1 << 0));
+ mod_phy_reg(pi, 0xa7, (0x1 << 0), 0);
+ mod_phy_reg(pi, 0xa5, (0x1 << 0), (0x1 << 0));
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1, 0x141, 16,
- &rfseq_pktgn_lpf_h_hpc_rev7);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x05, 16,
+ &afectrl_adc_ctrl2_rev7);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x15, 16,
+ &afectrl_adc_ctrl2_rev7);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 3, 0x133, 16,
- &rfseq_htpktgn_lpf_hpc_rev7);
+ mod_phy_reg(pi, 0xa6, (0x1 << 2), 0);
+ mod_phy_reg(pi, 0x8f, (0x1 << 2), 0);
+ mod_phy_reg(pi, 0xa7, (0x1 << 2), 0);
+ mod_phy_reg(pi, 0xa5, (0x1 << 2), 0);
+ }
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x146, 16,
- &rfseq_cckpktgn_lpf_hpc_rev7);
+ write_phy_reg(pi, 0x6a, 0x2);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1, 0x123, 16,
- &rfseq_tx2rx_lpf_h_hpc_rev7);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 256, 32,
+ &min_nvar_offset_6mbps);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1, 0x12A, 16,
- &rfseq_rx2tx_lpf_h_hpc_rev7);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x138, 16,
+ &rfseq_pktgn_lpf_hpc_rev7);
- if (CHSPEC_IS40(pi->radio_chanspec) == 0) {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 3,
- 32, &min_nvar_val);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
- 127, 32, &min_nvar_val);
- } else {
- min_nvar_val = noise_var_tbl_rev7[3];
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 3,
- 32, &min_nvar_val);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1, 0x141, 16,
+ &rfseq_pktgn_lpf_h_hpc_rev7);
- min_nvar_val = noise_var_tbl_rev7[127];
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
- 127, 32, &min_nvar_val);
- }
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 3, 0x133, 16,
+ &rfseq_htpktgn_lpf_hpc_rev7);
- wlc_phy_workarounds_nphy_gainctrl(pi);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x146, 16,
+ &rfseq_cckpktgn_lpf_hpc_rev7);
- pdetrange =
- (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.
- pdetrange : pi->srom_fem2g.pdetrange;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1, 0x123, 16,
+ &rfseq_tx2rx_lpf_h_hpc_rev7);
- if (pdetrange == 0) {
- chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
- if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
- aux_adc_vmid_rev7_core0[3] = 0x70;
- aux_adc_vmid_rev7_core1[3] = 0x70;
- aux_adc_gain_rev7[3] = 2;
- } else {
- aux_adc_vmid_rev7_core0[3] = 0x80;
- aux_adc_vmid_rev7_core1[3] = 0x80;
- aux_adc_gain_rev7[3] = 3;
- }
- } else if (pdetrange == 1) {
- if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
- aux_adc_vmid_rev7_core0[3] = 0x7c;
- aux_adc_vmid_rev7_core1[3] = 0x7c;
- aux_adc_gain_rev7[3] = 2;
- } else {
- aux_adc_vmid_rev7_core0[3] = 0x8c;
- aux_adc_vmid_rev7_core1[3] = 0x8c;
- aux_adc_gain_rev7[3] = 1;
- }
- } else if (pdetrange == 2) {
- if (pi->pubpi.radioid == BCM2057_ID) {
- if ((pi->pubpi.radiorev == 5)
- || (pi->pubpi.radiorev == 7)
- || (pi->pubpi.radiorev == 8)) {
- if (chan_freq_range ==
- WL_CHAN_FREQ_RANGE_2G) {
- aux_adc_vmid_rev7_core0[3] =
- 0x8c;
- aux_adc_vmid_rev7_core1[3] =
- 0x8c;
- aux_adc_gain_rev7[3] = 0;
- } else {
- aux_adc_vmid_rev7_core0[3] =
- 0x96;
- aux_adc_vmid_rev7_core1[3] =
- 0x96;
- aux_adc_gain_rev7[3] = 0;
- }
- }
- }
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 1, 0x12A, 16,
+ &rfseq_rx2tx_lpf_h_hpc_rev7);
- } else if (pdetrange == 3) {
- if (chan_freq_range == WL_CHAN_FREQ_RANGE_2G) {
- aux_adc_vmid_rev7_core0[3] = 0x89;
- aux_adc_vmid_rev7_core1[3] = 0x89;
- aux_adc_gain_rev7[3] = 0;
- }
+ if (CHSPEC_IS40(pi->radio_chanspec) == 0) {
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 3,
+ 32, &min_nvar_val);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
+ 127, 32, &min_nvar_val);
+ } else {
+ min_nvar_val = noise_var_tbl_rev7[3];
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 3,
+ 32, &min_nvar_val);
- } else if (pdetrange == 5) {
+ min_nvar_val = noise_var_tbl_rev7[127];
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
+ 127, 32, &min_nvar_val);
+ }
- if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
- aux_adc_vmid_rev7_core0[3] = 0x80;
- aux_adc_vmid_rev7_core1[3] = 0x80;
- aux_adc_gain_rev7[3] = 3;
- } else {
- aux_adc_vmid_rev7_core0[3] = 0x70;
- aux_adc_vmid_rev7_core1[3] = 0x70;
- aux_adc_gain_rev7[3] = 2;
+ wlc_phy_workarounds_nphy_gainctrl(pi);
+
+ pdetrange = (CHSPEC_IS5G(pi->radio_chanspec)) ?
+ pi->srom_fem5g.pdetrange : pi->srom_fem2g.pdetrange;
+
+ if (pdetrange == 0) {
+ chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
+ aux_adc_vmid_rev7_core0[3] = 0x70;
+ aux_adc_vmid_rev7_core1[3] = 0x70;
+ aux_adc_gain_rev7[3] = 2;
+ } else {
+ aux_adc_vmid_rev7_core0[3] = 0x80;
+ aux_adc_vmid_rev7_core1[3] = 0x80;
+ aux_adc_gain_rev7[3] = 3;
+ }
+ } else if (pdetrange == 1) {
+ if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
+ aux_adc_vmid_rev7_core0[3] = 0x7c;
+ aux_adc_vmid_rev7_core1[3] = 0x7c;
+ aux_adc_gain_rev7[3] = 2;
+ } else {
+ aux_adc_vmid_rev7_core0[3] = 0x8c;
+ aux_adc_vmid_rev7_core1[3] = 0x8c;
+ aux_adc_gain_rev7[3] = 1;
+ }
+ } else if (pdetrange == 2) {
+ if (pi->pubpi.radioid == BCM2057_ID) {
+ if ((pi->pubpi.radiorev == 5)
+ || (pi->pubpi.radiorev == 7)
+ || (pi->pubpi.radiorev == 8)) {
+ if (chan_freq_range ==
+ WL_CHAN_FREQ_RANGE_2G) {
+ aux_adc_vmid_rev7_core0[3] = 0x8c;
+ aux_adc_vmid_rev7_core1[3] = 0x8c;
+ aux_adc_gain_rev7[3] = 0;
+ } else {
+ aux_adc_vmid_rev7_core0[3] = 0x96;
+ aux_adc_vmid_rev7_core1[3] = 0x96;
+ aux_adc_gain_rev7[3] = 0;
+ }
}
}
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4, 0x08, 16,
- &aux_adc_vmid_rev7_core0);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4, 0x18, 16,
- &aux_adc_vmid_rev7_core1);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4, 0x0c, 16,
- &aux_adc_gain_rev7);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4, 0x1c, 16,
- &aux_adc_gain_rev7);
+ } else if (pdetrange == 3) {
+ if (chan_freq_range == WL_CHAN_FREQ_RANGE_2G) {
+ aux_adc_vmid_rev7_core0[3] = 0x89;
+ aux_adc_vmid_rev7_core1[3] = 0x89;
+ aux_adc_gain_rev7[3] = 0;
+ }
- } else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ } else if (pdetrange == 5) {
- write_phy_reg(pi, 0x23f, 0x1f8);
- write_phy_reg(pi, 0x240, 0x1f8);
-
- wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
- 1, 0, 32, &leg_data_weights);
- leg_data_weights = leg_data_weights & 0xffffff;
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
- 1, 0, 32, &leg_data_weights);
-
- alpha0 = 293;
- alpha1 = 435;
- alpha2 = 261;
- beta0 = 366;
- beta1 = 205;
- beta2 = 32;
- write_phy_reg(pi, 0x145, alpha0);
- write_phy_reg(pi, 0x146, alpha1);
- write_phy_reg(pi, 0x147, alpha2);
- write_phy_reg(pi, 0x148, beta0);
- write_phy_reg(pi, 0x149, beta1);
- write_phy_reg(pi, 0x14a, beta2);
-
- write_phy_reg(pi, 0x38, 0xC);
- write_phy_reg(pi, 0x2ae, 0xC);
-
- wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX,
- rfseq_tx2rx_events_rev3,
- rfseq_tx2rx_dlys_rev3,
- ARRAY_SIZE(rfseq_tx2rx_events_rev3));
-
- if (PHY_IPA(pi))
- wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
- rfseq_rx2tx_events_rev3_ipa,
- rfseq_rx2tx_dlys_rev3_ipa,
- ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa));
-
- if ((pi->sh->hw_phyrxchain != 0x3) &&
- (pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) {
+ if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
+ aux_adc_vmid_rev7_core0[3] = 0x80;
+ aux_adc_vmid_rev7_core1[3] = 0x80;
+ aux_adc_gain_rev7[3] = 3;
+ } else {
+ aux_adc_vmid_rev7_core0[3] = 0x70;
+ aux_adc_vmid_rev7_core1[3] = 0x70;
+ aux_adc_gain_rev7[3] = 2;
+ }
+ }
- if (PHY_IPA(pi)) {
- rfseq_rx2tx_dlys_rev3[5] = 59;
- rfseq_rx2tx_dlys_rev3[6] = 1;
- rfseq_rx2tx_events_rev3[7] =
- NPHY_REV3_RFSEQ_CMD_END;
- }
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4, 0x08, 16,
+ &aux_adc_vmid_rev7_core0);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4, 0x18, 16,
+ &aux_adc_vmid_rev7_core1);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4, 0x0c, 16,
+ &aux_adc_gain_rev7);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4, 0x1c, 16,
+ &aux_adc_gain_rev7);
+}
- wlc_phy_set_rfseq_nphy(
- pi, NPHY_RFSEQ_RX2TX,
- rfseq_rx2tx_events_rev3,
- rfseq_rx2tx_dlys_rev3,
- ARRAY_SIZE(rfseq_rx2tx_events_rev3));
- }
+static void wlc_phy_workarounds_nphy_rev3(struct brcms_phy *pi)
+{
+ static const u8 rfseq_tx2rx_events_rev3[] = {
+ NPHY_REV3_RFSEQ_CMD_EXT_PA,
+ NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
+ NPHY_REV3_RFSEQ_CMD_TX_GAIN,
+ NPHY_REV3_RFSEQ_CMD_RXPD_TXPD,
+ NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
+ NPHY_REV3_RFSEQ_CMD_RXG_FBW,
+ NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
+ NPHY_REV3_RFSEQ_CMD_END
+ };
+ static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ u8 rfseq_rx2tx_events_rev3[] = {
+ NPHY_REV3_RFSEQ_CMD_NOP,
+ NPHY_REV3_RFSEQ_CMD_RXG_FBW,
+ NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
+ NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
+ NPHY_REV3_RFSEQ_CMD_RXPD_TXPD,
+ NPHY_REV3_RFSEQ_CMD_TX_GAIN,
+ NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
+ NPHY_REV3_RFSEQ_CMD_EXT_PA,
+ NPHY_REV3_RFSEQ_CMD_END
+ };
+ u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+ static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
+ NPHY_REV3_RFSEQ_CMD_NOP,
+ NPHY_REV3_RFSEQ_CMD_RXG_FBW,
+ NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
+ NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
+ NPHY_REV3_RFSEQ_CMD_RXPD_TXPD,
+ NPHY_REV3_RFSEQ_CMD_TX_GAIN,
+ NPHY_REV3_RFSEQ_CMD_CLR_RXRX_BIAS,
+ NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
+ NPHY_REV3_RFSEQ_CMD_END
+ };
+ static const u8 rfseq_rx2tx_dlys_rev3_ipa[] =
+ { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ s16 alpha0, alpha1, alpha2;
+ s16 beta0, beta1, beta2;
+ u32 leg_data_weights, ht_data_weights, nss1_data_weights,
+ stbc_data_weights;
+ u8 chan_freq_range = 0;
+ static const u16 dac_control = 0x0002;
+ u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
+ u16 aux_adc_vmid_rev3[] = { 0xa2, 0xb4, 0xb4, 0x89 };
+ u16 *aux_adc_vmid;
+ u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
+ u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
+ u16 *aux_adc_gain;
+ static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+ static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+ s32 min_nvar_val = 0x18d;
+ u8 pdetrange;
+ u8 triso;
- if (CHSPEC_IS2G(pi->radio_chanspec))
- write_phy_reg(pi, 0x6a, 0x2);
- else
- write_phy_reg(pi, 0x6a, 0x9c40);
+ write_phy_reg(pi, 0x23f, 0x1f8);
+ write_phy_reg(pi, 0x240, 0x1f8);
+
+ wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
+ 1, 0, 32, &leg_data_weights);
+ leg_data_weights = leg_data_weights & 0xffffff;
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
+ 1, 0, 32, &leg_data_weights);
+
+ alpha0 = 293;
+ alpha1 = 435;
+ alpha2 = 261;
+ beta0 = 366;
+ beta1 = 205;
+ beta2 = 32;
+ write_phy_reg(pi, 0x145, alpha0);
+ write_phy_reg(pi, 0x146, alpha1);
+ write_phy_reg(pi, 0x147, alpha2);
+ write_phy_reg(pi, 0x148, beta0);
+ write_phy_reg(pi, 0x149, beta1);
+ write_phy_reg(pi, 0x14a, beta2);
+
+ write_phy_reg(pi, 0x38, 0xC);
+ write_phy_reg(pi, 0x2ae, 0xC);
+
+ wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX,
+ rfseq_tx2rx_events_rev3,
+ rfseq_tx2rx_dlys_rev3,
+ ARRAY_SIZE(rfseq_tx2rx_events_rev3));
- mod_phy_reg(pi, 0x294, (0xf << 8), (7 << 8));
+ if (PHY_IPA(pi))
+ wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
+ rfseq_rx2tx_events_rev3_ipa,
+ rfseq_rx2tx_dlys_rev3_ipa,
+ ARRAY_SIZE (rfseq_rx2tx_events_rev3_ipa));
- if (CHSPEC_IS40(pi->radio_chanspec) == 0) {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 3,
- 32, &min_nvar_val);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
- 127, 32, &min_nvar_val);
- } else {
- min_nvar_val = noise_var_tbl_rev3[3];
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 3,
- 32, &min_nvar_val);
+ if ((pi->sh->hw_phyrxchain != 0x3) &&
+ (pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) {
- min_nvar_val = noise_var_tbl_rev3[127];
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
- 127, 32, &min_nvar_val);
+ if (PHY_IPA(pi)) {
+ rfseq_rx2tx_dlys_rev3[5] = 59;
+ rfseq_rx2tx_dlys_rev3[6] = 1;
+ rfseq_rx2tx_events_rev3[7] = NPHY_REV3_RFSEQ_CMD_END;
}
- wlc_phy_workarounds_nphy_gainctrl(pi);
+ wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
+ rfseq_rx2tx_events_rev3,
+ rfseq_rx2tx_dlys_rev3,
+ ARRAY_SIZE (rfseq_rx2tx_events_rev3));
+ }
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x00, 16,
- &dac_control);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x10, 16,
- &dac_control);
+ if (CHSPEC_IS2G(pi->radio_chanspec))
+ write_phy_reg(pi, 0x6a, 0x2);
+ else
+ write_phy_reg(pi, 0x6a, 0x9c40);
- pdetrange =
- (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.
- pdetrange : pi->srom_fem2g.pdetrange;
+ mod_phy_reg(pi, 0x294, (0xf << 8), (7 << 8));
- if (pdetrange == 0) {
- if (NREV_GE(pi->pubpi.phy_rev, 4)) {
- aux_adc_vmid = aux_adc_vmid_rev4;
- aux_adc_gain = aux_adc_gain_rev4;
- } else {
- aux_adc_vmid = aux_adc_vmid_rev3;
- aux_adc_gain = aux_adc_gain_rev3;
- }
- chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
- if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
- switch (chan_freq_range) {
- case WL_CHAN_FREQ_RANGE_5GL:
- aux_adc_vmid[3] = 0x89;
- aux_adc_gain[3] = 0;
- break;
- case WL_CHAN_FREQ_RANGE_5GM:
- aux_adc_vmid[3] = 0x89;
- aux_adc_gain[3] = 0;
- break;
- case WL_CHAN_FREQ_RANGE_5GH:
- aux_adc_vmid[3] = 0x89;
- aux_adc_gain[3] = 0;
- break;
- default:
- break;
- }
- }
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x08, 16, aux_adc_vmid);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x18, 16, aux_adc_vmid);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x0c, 16, aux_adc_gain);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x1c, 16, aux_adc_gain);
- } else if (pdetrange == 1) {
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x08, 16, sk_adc_vmid);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x18, 16, sk_adc_vmid);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x0c, 16, sk_adc_gain);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x1c, 16, sk_adc_gain);
- } else if (pdetrange == 2) {
+ if (CHSPEC_IS40(pi->radio_chanspec) == 0) {
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 3,
+ 32, &min_nvar_val);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
+ 127, 32, &min_nvar_val);
+ } else {
+ min_nvar_val = noise_var_tbl_rev3[3];
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1, 3,
+ 32, &min_nvar_val);
- u16 bcm_adc_vmid[] = { 0xa2, 0xb4, 0xb4, 0x74 };
- u16 bcm_adc_gain[] = { 0x02, 0x02, 0x02, 0x04 };
+ min_nvar_val = noise_var_tbl_rev3[127];
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_NOISEVAR, 1,
+ 127, 32, &min_nvar_val);
+ }
- if (NREV_GE(pi->pubpi.phy_rev, 6)) {
- chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
- if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
- bcm_adc_vmid[3] = 0x8e;
- bcm_adc_gain[3] = 0x03;
- } else {
- bcm_adc_vmid[3] = 0x94;
- bcm_adc_gain[3] = 0x03;
- }
- } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
- bcm_adc_vmid[3] = 0x84;
- bcm_adc_gain[3] = 0x02;
- }
+ wlc_phy_workarounds_nphy_gainctrl(pi);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x08, 16, bcm_adc_vmid);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x18, 16, bcm_adc_vmid);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x0c, 16, bcm_adc_gain);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x1c, 16, bcm_adc_gain);
- } else if (pdetrange == 3) {
- chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
- if ((NREV_GE(pi->pubpi.phy_rev, 4))
- && (chan_freq_range == WL_CHAN_FREQ_RANGE_2G)) {
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x00, 16,
+ &dac_control);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x10, 16,
+ &dac_control);
- u16 auxadc_vmid[] = {
- 0xa2, 0xb4, 0xb4, 0x270
- };
- u16 auxadc_gain[] = {
- 0x02, 0x02, 0x02, 0x00
- };
+ pdetrange = (CHSPEC_IS5G(pi->radio_chanspec)) ?
+ pi->srom_fem5g.pdetrange : pi->srom_fem2g.pdetrange;
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_AFECTRL, 4,
- 0x08, 16, auxadc_vmid);
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_AFECTRL, 4,
- 0x18, 16, auxadc_vmid);
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_AFECTRL, 4,
- 0x0c, 16, auxadc_gain);
- wlc_phy_table_write_nphy(pi,
- NPHY_TBL_ID_AFECTRL, 4,
- 0x1c, 16, auxadc_gain);
+ if (pdetrange == 0) {
+ if (NREV_GE(pi->pubpi.phy_rev, 4)) {
+ aux_adc_vmid = aux_adc_vmid_rev4;
+ aux_adc_gain = aux_adc_gain_rev4;
+ } else {
+ aux_adc_vmid = aux_adc_vmid_rev3;
+ aux_adc_gain = aux_adc_gain_rev3;
+ }
+ chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
+ switch (chan_freq_range) {
+ case WL_CHAN_FREQ_RANGE_5GL:
+ aux_adc_vmid[3] = 0x89;
+ aux_adc_gain[3] = 0;
+ break;
+ case WL_CHAN_FREQ_RANGE_5GM:
+ aux_adc_vmid[3] = 0x89;
+ aux_adc_gain[3] = 0;
+ break;
+ case WL_CHAN_FREQ_RANGE_5GH:
+ aux_adc_vmid[3] = 0x89;
+ aux_adc_gain[3] = 0;
+ break;
+ default:
+ break;
}
- } else if ((pdetrange == 4) || (pdetrange == 5)) {
- u16 bcm_adc_vmid[] = { 0xa2, 0xb4, 0xb4, 0x0 };
- u16 bcm_adc_gain[] = { 0x02, 0x02, 0x02, 0x0 };
- u16 Vmid[2], Av[2];
+ }
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x08, 16, aux_adc_vmid);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x18, 16, aux_adc_vmid);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x0c, 16, aux_adc_gain);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x1c, 16, aux_adc_gain);
+ } else if (pdetrange == 1) {
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x08, 16, sk_adc_vmid);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x18, 16, sk_adc_vmid);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x0c, 16, sk_adc_gain);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x1c, 16, sk_adc_gain);
+ } else if (pdetrange == 2) {
+
+ u16 bcm_adc_vmid[] = { 0xa2, 0xb4, 0xb4, 0x74 };
+ u16 bcm_adc_gain[] = { 0x02, 0x02, 0x02, 0x04 };
+ if (NREV_GE(pi->pubpi.phy_rev, 6)) {
chan_freq_range =
- wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ wlc_phy_get_chan_freq_range_nphy(pi, 0);
if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
- Vmid[0] = (pdetrange == 4) ? 0x8e : 0x89;
- Vmid[1] = (pdetrange == 4) ? 0x96 : 0x89;
- Av[0] = (pdetrange == 4) ? 2 : 0;
- Av[1] = (pdetrange == 4) ? 2 : 0;
+ bcm_adc_vmid[3] = 0x8e;
+ bcm_adc_gain[3] = 0x03;
} else {
- Vmid[0] = (pdetrange == 4) ? 0x89 : 0x74;
- Vmid[1] = (pdetrange == 4) ? 0x8b : 0x70;
- Av[0] = (pdetrange == 4) ? 2 : 0;
- Av[1] = (pdetrange == 4) ? 2 : 0;
+ bcm_adc_vmid[3] = 0x94;
+ bcm_adc_gain[3] = 0x03;
}
+ } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
+ bcm_adc_vmid[3] = 0x84;
+ bcm_adc_gain[3] = 0x02;
+ }
+
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x08, 16, bcm_adc_vmid);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x18, 16, bcm_adc_vmid);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x0c, 16, bcm_adc_gain);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x1c, 16, bcm_adc_gain);
+ } else if (pdetrange == 3) {
+ chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ if ((NREV_GE(pi->pubpi.phy_rev, 4)) &&
+ (chan_freq_range == WL_CHAN_FREQ_RANGE_2G)) {
+ u16 auxadc_vmid[] = { 0xa2, 0xb4, 0xb4, 0x270 };
+ u16 auxadc_gain[] = { 0x02, 0x02, 0x02, 0x00 };
- bcm_adc_vmid[3] = Vmid[0];
- bcm_adc_gain[3] = Av[0];
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x08, 16, bcm_adc_vmid);
+ 0x08, 16, auxadc_vmid);
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x0c, 16, bcm_adc_gain);
-
- bcm_adc_vmid[3] = Vmid[1];
- bcm_adc_gain[3] = Av[1];
+ 0x18, 16, auxadc_vmid);
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x18, 16, bcm_adc_vmid);
+ 0x0c, 16, auxadc_gain);
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
- 0x1c, 16, bcm_adc_gain);
+ 0x1c, 16, auxadc_gain);
}
+ } else if ((pdetrange == 4) || (pdetrange == 5)) {
+ u16 bcm_adc_vmid[] = { 0xa2, 0xb4, 0xb4, 0x0 };
+ u16 bcm_adc_gain[] = { 0x02, 0x02, 0x02, 0x0 };
+ u16 Vmid[2], Av[2];
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_MAST_BIAS | RADIO_2056_RX0),
- 0x0);
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_MAST_BIAS | RADIO_2056_RX1),
- 0x0);
+ chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
+ if (chan_freq_range != WL_CHAN_FREQ_RANGE_2G) {
+ Vmid[0] = (pdetrange == 4) ? 0x8e : 0x89;
+ Vmid[1] = (pdetrange == 4) ? 0x96 : 0x89;
+ Av[0] = (pdetrange == 4) ? 2 : 0;
+ Av[1] = (pdetrange == 4) ? 2 : 0;
+ } else {
+ Vmid[0] = (pdetrange == 4) ? 0x89 : 0x74;
+ Vmid[1] = (pdetrange == 4) ? 0x8b : 0x70;
+ Av[0] = (pdetrange == 4) ? 2 : 0;
+ Av[1] = (pdetrange == 4) ? 2 : 0;
+ }
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_BIAS_MAIN | RADIO_2056_RX0),
- 0x6);
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_BIAS_MAIN | RADIO_2056_RX1),
- 0x6);
+ bcm_adc_vmid[3] = Vmid[0];
+ bcm_adc_gain[3] = Av[0];
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x08, 16, bcm_adc_vmid);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x0c, 16, bcm_adc_gain);
+
+ bcm_adc_vmid[3] = Vmid[1];
+ bcm_adc_gain[3] = Av[1];
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x18, 16, bcm_adc_vmid);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 4,
+ 0x1c, 16, bcm_adc_gain);
+ }
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_BIAS_AUX | RADIO_2056_RX0),
- 0x7);
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_BIAS_AUX | RADIO_2056_RX1),
- 0x7);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_MAST_BIAS | RADIO_2056_RX0), 0x0);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_MAST_BIAS | RADIO_2056_RX1), 0x0);
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_LOB_BIAS | RADIO_2056_RX0),
- 0x88);
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_LOB_BIAS | RADIO_2056_RX1),
- 0x88);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_BIAS_MAIN | RADIO_2056_RX0), 0x6);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_BIAS_MAIN | RADIO_2056_RX1), 0x6);
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_CMFB_IDAC | RADIO_2056_RX0),
- 0x0);
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXA_CMFB_IDAC | RADIO_2056_RX1),
- 0x0);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_BIAS_AUX | RADIO_2056_RX0), 0x7);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_BIAS_AUX | RADIO_2056_RX1), 0x7);
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXG_CMFB_IDAC | RADIO_2056_RX0),
- 0x0);
- write_radio_reg(pi,
- (RADIO_2056_RX_MIXG_CMFB_IDAC | RADIO_2056_RX1),
- 0x0);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_LOB_BIAS | RADIO_2056_RX0), 0x88);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_LOB_BIAS | RADIO_2056_RX1), 0x88);
- triso =
- (CHSPEC_IS5G(pi->radio_chanspec)) ? pi->srom_fem5g.
- triso : pi->srom_fem2g.triso;
- if (triso == 7) {
- wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_0);
- wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_1);
- }
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_CMFB_IDAC | RADIO_2056_RX0), 0x0);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXA_CMFB_IDAC | RADIO_2056_RX1), 0x0);
- wlc_phy_war_txchain_upd_nphy(pi, pi->sh->hw_phytxchain);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXG_CMFB_IDAC | RADIO_2056_RX0), 0x0);
+ write_radio_reg(pi, (RADIO_2056_RX_MIXG_CMFB_IDAC | RADIO_2056_RX1), 0x0);
- if (((pi->sh->boardflags2 & BFL2_APLL_WAR) &&
- (CHSPEC_IS5G(pi->radio_chanspec))) ||
- (((pi->sh->boardflags2 & BFL2_GPLL_WAR) ||
- (pi->sh->boardflags2 & BFL2_GPLL_WAR2)) &&
- (CHSPEC_IS2G(pi->radio_chanspec)))) {
- nss1_data_weights = 0x00088888;
- ht_data_weights = 0x00088888;
- stbc_data_weights = 0x00088888;
- } else {
- nss1_data_weights = 0x88888888;
- ht_data_weights = 0x88888888;
- stbc_data_weights = 0x88888888;
- }
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
- 1, 1, 32, &nss1_data_weights);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
- 1, 2, 32, &ht_data_weights);
- wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
- 1, 3, 32, &stbc_data_weights);
-
- if (NREV_IS(pi->pubpi.phy_rev, 4)) {
- if (CHSPEC_IS5G(pi->radio_chanspec)) {
- write_radio_reg(pi,
- RADIO_2056_TX_GMBB_IDAC |
- RADIO_2056_TX0, 0x70);
- write_radio_reg(pi,
- RADIO_2056_TX_GMBB_IDAC |
- RADIO_2056_TX1, 0x70);
- }
- }
+ triso = (CHSPEC_IS5G(pi->radio_chanspec)) ?
+ pi->srom_fem5g.triso : pi->srom_fem2g.triso;
+ if (triso == 7) {
+ wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_0);
+ wlc_phy_war_force_trsw_to_R_cliplo_nphy(pi, PHY_CORE_1);
+ }
+
+ wlc_phy_war_txchain_upd_nphy(pi, pi->sh->hw_phytxchain);
- if (!pi->edcrs_threshold_lock) {
- write_phy_reg(pi, 0x224, 0x3eb);
- write_phy_reg(pi, 0x225, 0x3eb);
- write_phy_reg(pi, 0x226, 0x341);
- write_phy_reg(pi, 0x227, 0x341);
- write_phy_reg(pi, 0x228, 0x42b);
- write_phy_reg(pi, 0x229, 0x42b);
- write_phy_reg(pi, 0x22a, 0x381);
- write_phy_reg(pi, 0x22b, 0x381);
- write_phy_reg(pi, 0x22c, 0x42b);
- write_phy_reg(pi, 0x22d, 0x42b);
- write_phy_reg(pi, 0x22e, 0x381);
- write_phy_reg(pi, 0x22f, 0x381);
+ if (((pi->sh->boardflags2 & BFL2_APLL_WAR) &&
+ (CHSPEC_IS5G(pi->radio_chanspec))) ||
+ (((pi->sh->boardflags2 & BFL2_GPLL_WAR) ||
+ (pi->sh->boardflags2 & BFL2_GPLL_WAR2)) &&
+ (CHSPEC_IS2G(pi->radio_chanspec)))) {
+ nss1_data_weights = 0x00088888;
+ ht_data_weights = 0x00088888;
+ stbc_data_weights = 0x00088888;
+ } else {
+ nss1_data_weights = 0x88888888;
+ ht_data_weights = 0x88888888;
+ stbc_data_weights = 0x88888888;
+ }
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
+ 1, 1, 32, &nss1_data_weights);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
+ 1, 2, 32, &ht_data_weights);
+ wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_CMPMETRICDATAWEIGHTTBL,
+ 1, 3, 32, &stbc_data_weights);
+
+ if (NREV_IS(pi->pubpi.phy_rev, 4)) {
+ if (CHSPEC_IS5G(pi->radio_chanspec)) {
+ write_radio_reg(pi,
+ RADIO_2056_TX_GMBB_IDAC |
+ RADIO_2056_TX0, 0x70);
+ write_radio_reg(pi,
+ RADIO_2056_TX_GMBB_IDAC |
+ RADIO_2056_TX1, 0x70);
}
+ }
- if (NREV_GE(pi->pubpi.phy_rev, 6)) {
+ if (!pi->edcrs_threshold_lock) {
+ write_phy_reg(pi, 0x224, 0x3eb);
+ write_phy_reg(pi, 0x225, 0x3eb);
+ write_phy_reg(pi, 0x226, 0x341);
+ write_phy_reg(pi, 0x227, 0x341);
+ write_phy_reg(pi, 0x228, 0x42b);
+ write_phy_reg(pi, 0x229, 0x42b);
+ write_phy_reg(pi, 0x22a, 0x381);
+ write_phy_reg(pi, 0x22b, 0x381);
+ write_phy_reg(pi, 0x22c, 0x42b);
+ write_phy_reg(pi, 0x22d, 0x42b);
+ write_phy_reg(pi, 0x22e, 0x381);
+ write_phy_reg(pi, 0x22f, 0x381);
+ }
- if (pi->sh->boardflags2 & BFL2_SINGLEANT_CCK)
- wlapi_bmac_mhf(pi->sh->physhim, MHF4,
- MHF4_BPHY_TXCORE0,
- MHF4_BPHY_TXCORE0, BRCM_BAND_ALL);
- }
- } else {
+ if (NREV_GE(pi->pubpi.phy_rev, 6)) {
- if (pi->sh->boardflags2 & BFL2_SKWRKFEM_BRD ||
- (pi->sh->boardtype == 0x8b)) {
- uint i;
- u8 war_dlys[] = { 1, 6, 6, 2, 4, 20, 1 };
- for (i = 0; i < ARRAY_SIZE(rfseq_rx2tx_dlys); i++)
- rfseq_rx2tx_dlys[i] = war_dlys[i];
- }
+ if (pi->sh->boardflags2 & BFL2_SINGLEANT_CCK)
+ wlapi_bmac_mhf(pi->sh->physhim, MHF4,
+ MHF4_BPHY_TXCORE0,
+ MHF4_BPHY_TXCORE0, BRCM_BAND_ALL);
+ }
+}
- if (CHSPEC_IS5G(pi->radio_chanspec) && pi->phy_5g_pwrgain) {
- and_radio_reg(pi, RADIO_2055_CORE1_TX_RF_SPARE, 0xf7);
- and_radio_reg(pi, RADIO_2055_CORE2_TX_RF_SPARE, 0xf7);
- } else {
- or_radio_reg(pi, RADIO_2055_CORE1_TX_RF_SPARE, 0x8);
- or_radio_reg(pi, RADIO_2055_CORE2_TX_RF_SPARE, 0x8);
- }
+void wlc_phy_workarounds_nphy_rev1(struct brcms_phy *pi)
+{
+ static const u8 rfseq_rx2tx_events[] = {
+ NPHY_RFSEQ_CMD_NOP,
+ NPHY_RFSEQ_CMD_RXG_FBW,
+ NPHY_RFSEQ_CMD_TR_SWITCH,
+ NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
+ NPHY_RFSEQ_CMD_RXPD_TXPD,
+ NPHY_RFSEQ_CMD_TX_GAIN,
+ NPHY_RFSEQ_CMD_EXT_PA
+ };
+ u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
+ static const u8 rfseq_tx2rx_events[] = {
+ NPHY_RFSEQ_CMD_NOP,
+ NPHY_RFSEQ_CMD_EXT_PA,
+ NPHY_RFSEQ_CMD_TX_GAIN,
+ NPHY_RFSEQ_CMD_RXPD_TXPD,
+ NPHY_RFSEQ_CMD_TR_SWITCH,
+ NPHY_RFSEQ_CMD_RXG_FBW,
+ NPHY_RFSEQ_CMD_CLR_HIQ_DIS
+ };
+ static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+ s16 alpha0, alpha1, alpha2;
+ s16 beta0, beta1, beta2;
+ u16 regval;
- regval = 0x000a;
- wlc_phy_table_write_nphy(pi, 8, 1, 0, 16, &regval);
- wlc_phy_table_write_nphy(pi, 8, 1, 0x10, 16, &regval);
+ if (pi->sh->boardflags2 & BFL2_SKWRKFEM_BRD ||
+ (pi->sh->boardtype == 0x8b)) {
+ uint i;
+ u8 war_dlys[] = { 1, 6, 6, 2, 4, 20, 1 };
+ for (i = 0; i < ARRAY_SIZE(rfseq_rx2tx_dlys); i++)
+ rfseq_rx2tx_dlys[i] = war_dlys[i];
+ }
- if (NREV_LT(pi->pubpi.phy_rev, 3)) {
- regval = 0xcdaa;
- wlc_phy_table_write_nphy(pi, 8, 1, 0x02, 16, &regval);
- wlc_phy_table_write_nphy(pi, 8, 1, 0x12, 16, &regval);
- }
+ if (CHSPEC_IS5G(pi->radio_chanspec) && pi->phy_5g_pwrgain) {
+ and_radio_reg(pi, RADIO_2055_CORE1_TX_RF_SPARE, 0xf7);
+ and_radio_reg(pi, RADIO_2055_CORE2_TX_RF_SPARE, 0xf7);
+ } else {
+ or_radio_reg(pi, RADIO_2055_CORE1_TX_RF_SPARE, 0x8);
+ or_radio_reg(pi, RADIO_2055_CORE2_TX_RF_SPARE, 0x8);
+ }
- if (NREV_LT(pi->pubpi.phy_rev, 2)) {
- regval = 0x0000;
- wlc_phy_table_write_nphy(pi, 8, 1, 0x08, 16, &regval);
- wlc_phy_table_write_nphy(pi, 8, 1, 0x18, 16, &regval);
+ regval = 0x000a;
+ wlc_phy_table_write_nphy(pi, 8, 1, 0, 16, &regval);
+ wlc_phy_table_write_nphy(pi, 8, 1, 0x10, 16, &regval);
- regval = 0x7aab;
- wlc_phy_table_write_nphy(pi, 8, 1, 0x07, 16, &regval);
- wlc_phy_table_write_nphy(pi, 8, 1, 0x17, 16, &regval);
+ if (NREV_LT(pi->pubpi.phy_rev, 3)) {
+ regval = 0xcdaa;
+ wlc_phy_table_write_nphy(pi, 8, 1, 0x02, 16, &regval);
+ wlc_phy_table_write_nphy(pi, 8, 1, 0x12, 16, &regval);
+ }
- regval = 0x0800;
- wlc_phy_table_write_nphy(pi, 8, 1, 0x06, 16, &regval);
- wlc_phy_table_write_nphy(pi, 8, 1, 0x16, 16, &regval);
- }
+ if (NREV_LT(pi->pubpi.phy_rev, 2)) {
+ regval = 0x0000;
+ wlc_phy_table_write_nphy(pi, 8, 1, 0x08, 16, &regval);
+ wlc_phy_table_write_nphy(pi, 8, 1, 0x18, 16, &regval);
- write_phy_reg(pi, 0xf8, 0x02d8);
- write_phy_reg(pi, 0xf9, 0x0301);
- write_phy_reg(pi, 0xfa, 0x02d8);
- write_phy_reg(pi, 0xfb, 0x0301);
+ regval = 0x7aab;
+ wlc_phy_table_write_nphy(pi, 8, 1, 0x07, 16, &regval);
+ wlc_phy_table_write_nphy(pi, 8, 1, 0x17, 16, &regval);
- wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events,
- rfseq_rx2tx_dlys,
- ARRAY_SIZE(rfseq_rx2tx_events));
+ regval = 0x0800;
+ wlc_phy_table_write_nphy(pi, 8, 1, 0x06, 16, &regval);
+ wlc_phy_table_write_nphy(pi, 8, 1, 0x16, 16, &regval);
+ }
- wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, rfseq_tx2rx_events,
- rfseq_tx2rx_dlys,
- ARRAY_SIZE(rfseq_tx2rx_events));
+ write_phy_reg(pi, 0xf8, 0x02d8);
+ write_phy_reg(pi, 0xf9, 0x0301);
+ write_phy_reg(pi, 0xfa, 0x02d8);
+ write_phy_reg(pi, 0xfb, 0x0301);
- wlc_phy_workarounds_nphy_gainctrl(pi);
+ wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events,
+ rfseq_rx2tx_dlys,
+ ARRAY_SIZE(rfseq_rx2tx_events));
- if (NREV_LT(pi->pubpi.phy_rev, 2)) {
+ wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, rfseq_tx2rx_events,
+ rfseq_tx2rx_dlys,
+ ARRAY_SIZE(rfseq_tx2rx_events));
- if (read_phy_reg(pi, 0xa0) & NPHY_MLenable)
- wlapi_bmac_mhf(pi->sh->physhim, MHF3,
- MHF3_NPHY_MLADV_WAR,
- MHF3_NPHY_MLADV_WAR,
- BRCM_BAND_ALL);
+ wlc_phy_workarounds_nphy_gainctrl(pi);
- } else if (NREV_IS(pi->pubpi.phy_rev, 2)) {
- write_phy_reg(pi, 0x1e3, 0x0);
- write_phy_reg(pi, 0x1e4, 0x0);
- }
+ if (NREV_LT(pi->pubpi.phy_rev, 2)) {
- if (NREV_LT(pi->pubpi.phy_rev, 2))
- mod_phy_reg(pi, 0x90, (0x1 << 7), 0);
-
- alpha0 = 293;
- alpha1 = 435;
- alpha2 = 261;
- beta0 = 366;
- beta1 = 205;
- beta2 = 32;
- write_phy_reg(pi, 0x145, alpha0);
- write_phy_reg(pi, 0x146, alpha1);
- write_phy_reg(pi, 0x147, alpha2);
- write_phy_reg(pi, 0x148, beta0);
- write_phy_reg(pi, 0x149, beta1);
- write_phy_reg(pi, 0x14a, beta2);
-
- if (NREV_LT(pi->pubpi.phy_rev, 3)) {
- mod_phy_reg(pi, 0x142, (0xf << 12), 0);
-
- write_phy_reg(pi, 0x192, 0xb5);
- write_phy_reg(pi, 0x193, 0xa4);
- write_phy_reg(pi, 0x194, 0x0);
- }
+ if (read_phy_reg(pi, 0xa0) & NPHY_MLenable)
+ wlapi_bmac_mhf(pi->sh->physhim, MHF3,
+ MHF3_NPHY_MLADV_WAR,
+ MHF3_NPHY_MLADV_WAR, BRCM_BAND_ALL);
- if (NREV_IS(pi->pubpi.phy_rev, 2))
- mod_phy_reg(pi, 0x221,
- NPHY_FORCESIG_DECODEGATEDCLKS,
- NPHY_FORCESIG_DECODEGATEDCLKS);
+ } else if (NREV_IS(pi->pubpi.phy_rev, 2)) {
+ write_phy_reg(pi, 0x1e3, 0x0);
+ write_phy_reg(pi, 0x1e4, 0x0);
+ }
+
+ if (NREV_LT(pi->pubpi.phy_rev, 2))
+ mod_phy_reg(pi, 0x90, (0x1 << 7), 0);
+
+ alpha0 = 293;
+ alpha1 = 435;
+ alpha2 = 261;
+ beta0 = 366;
+ beta1 = 205;
+ beta2 = 32;
+ write_phy_reg(pi, 0x145, alpha0);
+ write_phy_reg(pi, 0x146, alpha1);
+ write_phy_reg(pi, 0x147, alpha2);
+ write_phy_reg(pi, 0x148, beta0);
+ write_phy_reg(pi, 0x149, beta1);
+ write_phy_reg(pi, 0x14a, beta2);
+
+ if (NREV_LT(pi->pubpi.phy_rev, 3)) {
+ mod_phy_reg(pi, 0x142, (0xf << 12), 0);
+
+ write_phy_reg(pi, 0x192, 0xb5);
+ write_phy_reg(pi, 0x193, 0xa4);
+ write_phy_reg(pi, 0x194, 0x0);
}
+ if (NREV_IS(pi->pubpi.phy_rev, 2))
+ mod_phy_reg(pi, 0x221,
+ NPHY_FORCESIG_DECODEGATEDCLKS,
+ NPHY_FORCESIG_DECODEGATEDCLKS);
+}
+
+static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+{
+ if (CHSPEC_IS5G(pi->radio_chanspec))
+ wlc_phy_classifier_nphy(pi, NPHY_ClassifierCtrl_cck_en, 0);
+ else
+ wlc_phy_classifier_nphy(pi, NPHY_ClassifierCtrl_cck_en, 1);
+
+ if (pi->phyhang_avoid)
+ wlc_phy_stay_in_carriersearch_nphy(pi, true);
+
+ or_phy_reg(pi, 0xb1, NPHY_IQFlip_ADC1 | NPHY_IQFlip_ADC2);
+
+ if (NREV_GE(pi->pubpi.phy_rev, 7))
+ wlc_phy_workarounds_nphy_rev7(pi);
+ else if (NREV_GE(pi->pubpi.phy_rev, 3))
+ wlc_phy_workarounds_nphy_rev3(pi);
+ else
+ wlc_phy_workarounds_nphy_rev1(pi);
+
if (pi->phyhang_avoid)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
diff --git a/drivers/net/wireless/cisco/airo.h b/drivers/net/wireless/cisco/airo.h
index e480adf86be6..8a02977a2e2b 100644
--- a/drivers/net/wireless/cisco/airo.h
+++ b/drivers/net/wireless/cisco/airo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AIRO_H_
#define _AIRO_H_
diff --git a/drivers/net/wireless/intel/ipw2x00/Makefile b/drivers/net/wireless/intel/ipw2x00/Makefile
index aecd2cff462b..e1ec50359dff 100644
--- a/drivers/net/wireless/intel/ipw2x00/Makefile
+++ b/drivers/net/wireless/intel/ipw2x00/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Intel Centrino wireless drivers
#
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 329f3a63dadd..4b53ebf00c7f 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3429,7 +3429,7 @@ il3945_setup_deferred_work(struct il_priv *il)
il3945_hw_setup_deferred_work(il);
- setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
+ timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_init(&il->irq_tasklet,
(void (*)(unsigned long))il3945_irq_tasklet,
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index b2f35dfbc01b..e8983c6a2b7b 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -181,9 +181,9 @@ il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
#define IL_AVERAGE_PACKETS 1500
static void
-il3945_bg_rate_scale_flush(unsigned long data)
+il3945_bg_rate_scale_flush(struct timer_list *t)
{
- struct il3945_rs_sta *rs_sta = (void *)data;
+ struct il3945_rs_sta *rs_sta = from_timer(rs_sta, t, rate_scale_flush);
struct il_priv *il __maybe_unused = rs_sta->il;
int unflushed = 0;
unsigned long flags;
@@ -360,9 +360,6 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
rs_sta->flush_time = RATE_FLUSH;
rs_sta->last_tx_packets = 0;
- rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
- rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
-
for (i = 0; i < RATE_COUNT_3945; i++)
il3945_clear_win(&rs_sta->win[i]);
@@ -415,8 +412,7 @@ il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
rs_sta = &psta->rs_sta;
spin_lock_init(&rs_sta->lock);
- init_timer(&rs_sta->rate_scale_flush);
-
+ timer_setup(&rs_sta->rate_scale_flush, il3945_bg_rate_scale_flush, 0);
D_RATE("leave\n");
return rs_sta;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index de9b6522c43f..de63f2518f23 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -1480,7 +1480,7 @@ il4965_get_ac_from_tid(u16 tid)
static inline int
il4965_get_fifo_from_tid(u16 tid)
{
- const u8 ac_to_fifo[] = {
+ static const u8 ac_to_fifo[] = {
IL_TX_FIFO_VO,
IL_TX_FIFO_VI,
IL_TX_FIFO_BE,
@@ -4074,9 +4074,9 @@ il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
* used for calibrating the TXPOWER.
*/
static void
-il4965_bg_stats_periodic(unsigned long data)
+il4965_bg_stats_periodic(struct timer_list *t)
{
- struct il_priv *il = (struct il_priv *)data;
+ struct il_priv *il = from_timer(il, t, stats_periodic);
if (test_bit(S_EXIT_PENDING, &il->status))
return;
@@ -6258,10 +6258,9 @@ il4965_setup_deferred_work(struct il_priv *il)
INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
- setup_timer(&il->stats_periodic, il4965_bg_stats_periodic,
- (unsigned long)il);
+ timer_setup(&il->stats_periodic, il4965_bg_stats_periodic, 0);
- setup_timer(&il->watchdog, il_bg_watchdog, (unsigned long)il);
+ timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_init(&il->irq_tasklet,
(void (*)(unsigned long))il4965_irq_tasklet,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index c055f6da11c6..365a4187fc37 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -2154,13 +2154,11 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
u8 use_green;
u8 active_tbl = 0;
u8 valid_tx_ant;
- struct il_station_priv *sta_priv;
if (!sta || !lq_sta)
return;
use_green = il4965_rs_use_green(il, sta);
- sta_priv = (void *)sta->drv_priv;
i = lq_sta->last_txrate_idx;
diff --git a/drivers/net/wireless/intel/iwlegacy/Makefile b/drivers/net/wireless/intel/iwlegacy/Makefile
index c826a6b985bb..c5ad0453b334 100644
--- a/drivers/net/wireless/intel/iwlegacy/Makefile
+++ b/drivers/net/wireless/intel/iwlegacy/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IWLEGACY) += iwlegacy.o
iwlegacy-objs := common.o
iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 8d5acda92a9b..558bb16bfd46 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -4844,9 +4844,9 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
* we reset the firmware. If everything is fine just rearm the timer.
*/
void
-il_bg_watchdog(unsigned long data)
+il_bg_watchdog(struct timer_list *t)
{
- struct il_priv *il = (struct il_priv *)data;
+ struct il_priv *il = from_timer(il, t, watchdog);
int cnt;
unsigned long timeout;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 18c60c92e3a3..dc6a74a05983 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -1832,7 +1832,7 @@ int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
* PCI *
*****************************************************/
-void il_bg_watchdog(unsigned long data);
+void il_bg_watchdog(struct timer_list *t);
u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
u32 beacon_interval);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 35a32a3ec882..ff136f299a0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# common
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
iwlwifi-objs += iwl-io.o
@@ -13,6 +14,7 @@ iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o
+iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index c2a5936ccede..9bb7c19d48eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -100,14 +100,6 @@
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
-/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
-#define MAX_RX_AGG_SIZE_8260_SDIO 21
-#define MAX_TX_AGG_SIZE_8260_SDIO 40
-
-/* Max A-MPDU exponent for HT and VHT */
-#define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K
-#define MAX_VHT_AMPDU_EXPONENT_8260_SDIO IEEE80211_VHT_MAX_AMPDU_32K
-
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
@@ -165,7 +157,8 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.thermal_params = &iwl8000_tt_params, \
.apmg_not_supported = true, \
.nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true
+ .dbgc_supported = true, \
+ .min_umac_error_event_table = 0x800000
#define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \
@@ -233,48 +226,5 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
-const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
- .name = "Intel(R) Dual Band Wireless-AC 8260",
- .fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8260,
- .ht_params = &iwl8000_ht_params,
- .nvm_ver = IWL8000_NVM_VERSION,
- .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
- .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
- .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
- .disable_dummy_notification = true,
- .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
- .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
-};
-
-const struct iwl_cfg iwl8265_2ac_sdio_cfg = {
- .name = "Intel(R) Dual Band Wireless-AC 8265",
- .fw_name_pre = IWL8265_FW_PRE,
- IWL_DEVICE_8265,
- .ht_params = &iwl8000_ht_params,
- .nvm_ver = IWL8000_NVM_VERSION,
- .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
- .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
- .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
- .disable_dummy_notification = true,
- .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
- .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
-};
-
-const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
- .name = "Intel(R) Dual Band Wireless-AC 4165",
- .fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8000,
- .ht_params = &iwl8000_ht_params,
- .nvm_ver = IWL8000_NVM_VERSION,
- .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
- .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
- .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
- .bt_shared_single_ant = true,
- .disable_dummy_notification = true,
- .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
- .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
-};
-
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e8b5ff42f5a8..e7e75b458005 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -72,18 +72,21 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
-#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
-#define IWL9000_MODULE_FIRMWARE(api) \
- IWL9000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000A_MODULE_FIRMWARE(api) \
+ IWL9000A_FW_PRE __stringify(api) ".ucode"
+#define IWL9000B_MODULE_FIRMWARE(api) \
+ IWL9000B_FW_PRE __stringify(api) ".ucode"
#define IWL9000RFB_MODULE_FIRMWARE(api) \
- IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9000RFB_FW_PRE __stringify(api) ".ucode"
#define IWL9260A_MODULE_FIRMWARE(api) \
- IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260A_FW_PRE __stringify(api) ".ucode"
#define IWL9260B_MODULE_FIRMWARE(api) \
- IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260B_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@@ -149,7 +152,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.mac_addr_from_csr = true, \
.rf_id = true, \
.nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true
+ .dbgc_supported = true, \
+ .min_umac_error_event_table = 0x800000
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
@@ -193,7 +197,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9460",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_cfg = {
@@ -205,10 +250,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .integrated = true,
};
-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+const struct iwl_cfg iwl9560_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9560",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
index a440140ed8dd..705f83b02e13 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
@@ -80,15 +80,15 @@
#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
- IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_MODULE_FIRMWARE(api) \
- IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_A000 10
@@ -134,77 +134,79 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
.rf_id = true, \
.gen2 = true, \
.nvm_type = IWL_NVM_EXT, \
- .dbgc_supported = true
+ .dbgc_supported = true, \
+ .tx_cmd_queue_size = 32, \
+ .min_umac_error_event_table = 0x400000
const struct iwl_cfg iwla000_2ac_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_HR_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .name = "Intel(R) Dual Band Wireless AC a000",
+ .fw_name_pre = IWL_A000_HR_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
- .name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .cdb = true,
+ .name = "Intel(R) Dual Band Wireless AC a000",
+ .fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .cdb = true,
};
const struct iwl_cfg iwla000_2ac_cfg_jf = {
- .name = "Intel(R) Dual Band Wireless AC a000",
- .fw_name_pre = IWL_A000_JF_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .name = "Intel(R) Dual Band Wireless AC a000",
+ .fw_name_pre = IWL_A000_JF_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwla000_2ax_cfg_hr = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_HR_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .name = "Intel(R) Dual Band Wireless AX a000",
+ .fw_name_pre = IWL_A000_HR_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0 = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_HR_F0_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .name = "Intel(R) Dual Band Wireless AX a000",
+ .fw_name_pre = IWL_A000_HR_F0_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0 = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_JF_B0_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .name = "Intel(R) Dual Band Wireless AX a000",
+ .fw_name_pre = IWL_A000_JF_B0_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0 = {
- .name = "Intel(R) Dual Band Wireless AX a000",
- .fw_name_pre = IWL_A000_HR_A0_FW_PRE,
- IWL_DEVICE_A000,
- .ht_params = &iwl_a000_ht_params,
- .nvm_ver = IWL_A000_NVM_VERSION,
- .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .name = "Intel(R) Dual Band Wireless AX a000",
+ .fw_name_pre = IWL_A000_HR_A0_FW_PRE,
+ IWL_DEVICE_A000,
+ .ht_params = &iwl_a000_ht_params,
+ .nvm_ver = IWL_A000_NVM_VERSION,
+ .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
index b256a354953a..702d42b2d452 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# DVM
obj-$(CONFIG_IWLDVM) += iwldvm.o
iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 2acd94da9efe..d11d72615de2 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -399,9 +399,9 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
* was received. We need to ensure we receive the statistics in order
* to update the temperature used for calibrating the TXPOWER.
*/
-static void iwl_bg_statistics_periodic(unsigned long data)
+static void iwl_bg_statistics_periodic(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -556,9 +556,9 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
* this function is to perform continuous uCode event logging operation
* if enabled
*/
-static void iwl_bg_ucode_trace(unsigned long data)
+static void iwl_bg_ucode_trace(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@@ -1085,11 +1085,9 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
if (priv->lib->bt_params)
iwlagn_bt_setup_deferred_work(priv);
- setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic,
- (unsigned long)priv);
+ timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
- setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace,
- (unsigned long)priv);
+ timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
}
void iwl_cancel_deferred_work(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 5b73492e7ff7..6524533d723c 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -164,9 +164,10 @@ enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
* without doing anything, driver should continue the 5 seconds timer
* to wake up uCode for temperature check until temperature drop below CT
*/
-static void iwl_tt_check_exit_ct_kill(unsigned long data)
+static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t,
+ thermal_throttle.ct_kill_exit_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
@@ -214,9 +215,10 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
}
}
-static void iwl_tt_ready_for_ct_kill(unsigned long data)
+static void iwl_tt_ready_for_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t,
+ thermal_throttle.ct_kill_waiting_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -612,10 +614,10 @@ void iwl_tt_initialize(struct iwl_priv *priv)
memset(tt, 0, sizeof(struct iwl_tt_mgmt));
tt->state = IWL_TI_0;
- setup_timer(&priv->thermal_throttle.ct_kill_exit_tm,
- iwl_tt_check_exit_ct_kill, (unsigned long)priv);
- setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
- iwl_tt_ready_for_ct_kill, (unsigned long)priv);
+ timer_setup(&priv->thermal_throttle.ct_kill_exit_tm,
+ iwl_tt_check_exit_ct_kill, 0);
+ timer_setup(&priv->thermal_throttle.ct_kill_waiting_tm,
+ iwl_tt_ready_for_ct_kill, 0);
/* setup deferred ct kill work */
INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
new file mode 100644
index 000000000000..75cae54ea7de
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -0,0 +1,210 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "acpi.h"
+
+void *iwl_acpi_get_object(struct device *dev, acpi_string method)
+{
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ root_handle = ACPI_HANDLE(dev);
+ if (!root_handle) {
+ IWL_DEBUG_DEV_RADIO(dev,
+ "Could not retrieve root port ACPI handle\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(root_handle, method, &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_DEV_RADIO(dev, "%s method not found\n", method);
+ return ERR_PTR(-ENOENT);
+ }
+
+ /* Call the method with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_DEV_RADIO(dev, "%s invocation failed (0x%x)\n",
+ method, status);
+ return ERR_PTR(-ENOENT);
+ }
+
+ return buf.pointer;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
+
+union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+ union acpi_object *data,
+ int data_size)
+{
+ int i;
+ union acpi_object *wifi_pkg;
+
+ /*
+ * We need at least one entry in the wifi package that
+ * describes the domain, and one more entry, otherwise there's
+ * no point in reading it.
+ */
+ if (WARN_ON_ONCE(data_size < 2))
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * We need at least two packages, one for the revision and one
+ * for the data itself. Also check that the revision is valid
+ * (i.e. it is an integer set to 0).
+ */
+ if (data->type != ACPI_TYPE_PACKAGE ||
+ data->package.count < 2 ||
+ data->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ data->package.elements[0].integer.value != 0) {
+ IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* loop through all the packages to find the one for WiFi */
+ for (i = 1; i < data->package.count; i++) {
+ union acpi_object *domain;
+
+ wifi_pkg = &data->package.elements[i];
+
+ /* skip entries that are not a package with the right size */
+ if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
+ wifi_pkg->package.count != data_size)
+ continue;
+
+ domain = &wifi_pkg->package.elements[0];
+ if (domain->type == ACPI_TYPE_INTEGER &&
+ domain->integer.value == ACPI_WIFI_DOMAIN)
+ goto found;
+ }
+
+ return ERR_PTR(-ENOENT);
+
+found:
+ return wifi_pkg;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg);
+
+int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+{
+ union acpi_object *wifi_pkg, *data;
+ u32 mcc_val;
+ int ret;
+
+ data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg)) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ mcc_val = wifi_pkg->package.elements[1].integer.value;
+
+ mcc[0] = (mcc_val >> 8) & 0xff;
+ mcc[1] = mcc_val & 0xff;
+ mcc[2] = '\0';
+
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
+
+u64 iwl_acpi_get_pwr_limit(struct device *dev)
+{
+ union acpi_object *data, *wifi_pkg;
+ u64 dflt_pwr_limit;
+
+ data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
+ if (IS_ERR(data)) {
+ dflt_pwr_limit = 0;
+ goto out;
+ }
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
+ ACPI_SPLC_WIFI_DATA_SIZE);
+ if (IS_ERR(wifi_pkg) ||
+ wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
+ dflt_pwr_limit = 0;
+ goto out_free;
+ }
+
+ dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+out_free:
+ kfree(data);
+out:
+ return dflt_pwr_limit;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
new file mode 100644
index 000000000000..cb5f32c1d705
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -0,0 +1,138 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_acpi__
+#define __iwl_fw_acpi__
+
+#include <linux/acpi.h>
+
+#define ACPI_WRDS_METHOD "WRDS"
+#define ACPI_EWRD_METHOD "EWRD"
+#define ACPI_WGDS_METHOD "WGDS"
+#define ACPI_WRDD_METHOD "WRDD"
+#define ACPI_SPLC_METHOD "SPLC"
+
+#define ACPI_WIFI_DOMAIN (0x07)
+
+#define ACPI_SAR_TABLE_SIZE 10
+#define ACPI_SAR_PROFILE_NUM 4
+
+#define ACPI_GEO_TABLE_SIZE 6
+#define ACPI_NUM_GEO_PROFILES 3
+#define ACPI_GEO_PER_CHAIN_SIZE 3
+
+#define ACPI_SAR_NUM_CHAIN_LIMITS 2
+#define ACPI_SAR_NUM_SUB_BANDS 5
+
+#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2)
+#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \
+ ACPI_SAR_TABLE_SIZE + 3)
+#define ACPI_WGDS_WIFI_DATA_SIZE 18
+#define ACPI_WRDD_WIFI_DATA_SIZE 2
+#define ACPI_SPLC_WIFI_DATA_SIZE 2
+
+#define ACPI_WGDS_NUM_BANDS 2
+#define ACPI_WGDS_TABLE_SIZE 3
+
+#ifdef CONFIG_ACPI
+
+void *iwl_acpi_get_object(struct device *dev, acpi_string method);
+union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+ union acpi_object *data,
+ int data_size);
+
+/**
+ * iwl_acpi_get_mcc - read MCC from ACPI, if available
+ *
+ * @dev: the struct device
+ * @mcc: output buffer (3 bytes) that will get the MCC
+ *
+ * This function tries to read the current MCC from ACPI if available.
+ */
+int iwl_acpi_get_mcc(struct device *dev, char *mcc);
+
+u64 iwl_acpi_get_pwr_limit(struct device *dev);
+
+#else /* CONFIG_ACPI */
+
+static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+ union acpi_object *data,
+ int data_size)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+{
+ return -ENOENT;
+}
+
+static inline u64 iwl_acpi_get_pwr_limit(struct device *dev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ACPI */
+#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
index d2717fafdf5b..570f19026c91 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
@@ -116,14 +116,14 @@ struct iwl_binding_cmd {
#define IWL_MVM_MAX_QUOTA 128
/**
- * struct iwl_time_quota_data - configuration of time quota per binding
+ * struct iwl_time_quota_data_v1 - configuration of time quota per binding
* @id_and_color: ID and color of the relevant Binding,
* &enum iwl_ctxt_id_and_color
* @quota: absolute time quota in TU. The scheduler will try to divide the
* remainig quota (after Time Events) according to this quota.
* @max_duration: max uninterrupted context duration in TU
*/
-struct iwl_time_quota_data {
+struct iwl_time_quota_data_v1 {
__le32 id_and_color;
__le32 quota;
__le32 max_duration;
@@ -137,8 +137,43 @@ struct iwl_time_quota_data {
* essentially zero.
* On CDB the fourth one is a regular binding.
*/
+struct iwl_time_quota_cmd_v1 {
+ struct iwl_time_quota_data_v1 quotas[MAX_BINDINGS];
+} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+enum iwl_quota_low_latency {
+ IWL_QUOTA_LOW_LATENCY_NONE = 0,
+ IWL_QUOTA_LOW_LATENCY_TX = BIT(0),
+ IWL_QUOTA_LOW_LATENCY_RX = BIT(1),
+ IWL_QUOTA_LOW_LATENCY_TX_RX =
+ IWL_QUOTA_LOW_LATENCY_TX | IWL_QUOTA_LOW_LATENCY_RX,
+};
+
+/**
+ * struct iwl_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding.
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ * @low_latency: low latency status, &enum iwl_quota_low_latency
+ */
+struct iwl_time_quota_data {
+ __le32 id_and_color;
+ __le32 quota;
+ __le32 max_duration;
+ __le32 low_latency;
+} __packed; /* TIME_QUOTA_DATA_API_S_VER_2 */
+
+/**
+ * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * ( TIME_QUOTA_CMD = 0x2c )
+ * Note: on non-CDB the fourth one is the auxilary mac and is essentially zero.
+ * On CDB the fourth one is a regular binding.
+ *
+ * @quotas: allocations per binding
+ */
struct iwl_time_quota_cmd {
struct iwl_time_quota_data quotas[MAX_BINDINGS];
-} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_2 */
#endif /* __iwl_fw_api_binding_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 074868394427..7ebbf097488b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -504,6 +504,7 @@ enum iwl_legacy_cmds {
/**
* @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker
+ * with &struct iwl_mvm_marker_rsp
*/
MARKER_CMD = 0xcb,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 9f88b61536bc..0a81fb1b6ed4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -278,6 +278,15 @@ struct iwl_mvm_marker {
__le32 metadata[0];
} __packed; /* MARKER_API_S_VER_1 */
+/**
+ * struct iwl_mvm_marker_rsp - Response to marker cmd
+ *
+ * @gp2: The gp2 clock value in the FW
+ */
+struct iwl_mvm_marker_rsp {
+ __le32 gp2;
+} __packed;
+
/* Operation types for the debug mem access */
enum {
DEBUG_MEM_OP_READ = 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index 39c89e85fd2f..ec42c84e5df2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -68,78 +68,11 @@
*/
enum iwl_mac_conf_subcmd_ids {
/**
- * @LINK_QUALITY_MEASUREMENT_CMD: &struct iwl_link_qual_msrmnt_cmd
- */
- LINK_QUALITY_MEASUREMENT_CMD = 0x1,
-
- /**
- * @LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF:
- * &struct iwl_link_qual_msrmnt_notif
- */
- LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
-
- /**
* @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
*/
CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
};
-#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
-
-enum iwl_lqm_cmd_operatrions {
- LQM_CMD_OPERATION_START_MEASUREMENT = 0x01,
- LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02,
-};
-
-enum iwl_lqm_status {
- LQM_STATUS_SUCCESS = 0,
- LQM_STATUS_TIMEOUT = 1,
- LQM_STATUS_ABORT = 2,
-};
-
-/**
- * struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command
- * @cmd_operation: command operation to be performed (start or stop)
- * as defined above.
- * @mac_id: MAC ID the measurement applies to.
- * @measurement_time: time of the total measurement to be performed, in uSec.
- * @timeout: maximum time allowed until a response is sent, in uSec.
- */
-struct iwl_link_qual_msrmnt_cmd {
- __le32 cmd_operation;
- __le32 mac_id;
- __le32 measurement_time;
- __le32 timeout;
-} __packed /* LQM_CMD_API_S_VER_1 */;
-
-/**
- * struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification
- *
- * @frequent_stations_air_time: an array containing the total air time
- * (in uSec) used by the most frequently transmitting stations.
- * @number_of_stations: the number of uniqe stations included in the array
- * (a number between 0 to 16)
- * @total_air_time_other_stations: the total air time (uSec) used by all the
- * stations which are not included in the above report.
- * @time_in_measurement_window: the total time in uSec in which a measurement
- * took place.
- * @tx_frame_dropped: the number of TX frames dropped due to retry limit during
- * measurement
- * @mac_id: MAC ID the measurement applies to.
- * @status: return status. may be one of the LQM_STATUS_* defined above.
- * @reserved: reserved.
- */
-struct iwl_link_qual_msrmnt_notif {
- __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT];
- __le32 number_of_stations;
- __le32 total_air_time_other_stations;
- __le32 time_in_measurement_window;
- __le32 tx_frame_dropped;
- __le32 mac_id;
- __le32 status;
- u8 reserved[12];
-} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
-
/**
* struct iwl_channel_switch_noa_notif - Channel switch NOA notification
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h
index e76f9cd4473d..721b9fed7201 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h
@@ -81,28 +81,4 @@ struct iwl_fw_paging_cmd {
__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
-/**
- * enum iwl_fw_item_id - FW item IDs
- *
- * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
- * download
- */
-enum iwl_fw_item_id {
- IWL_FW_ITEM_ID_PAGING = 3,
-};
-
-/**
- * struct iwl_fw_get_item_cmd - get an item from the fw
- * @item_id: ID of item to obtain, see &enum iwl_fw_item_id
- */
-struct iwl_fw_get_item_cmd {
- __le32 item_id;
-} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
-
-struct iwl_fw_get_item_resp {
- __le32 item_id;
- __le32 item_byte_cnt;
- __le32 item_val;
-} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
-
#endif /* __iwl_fw_api_paging_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index a06afb5605d2..a3c77e01863b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -357,8 +357,7 @@ struct iwl_dev_tx_power_cmd {
u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
-#define IWL_NUM_GEO_PROFILES 3
-#define IWL_GEO_PER_CHAIN_SIZE 3
+#define IWL_NUM_GEO_PROFILES 3
/**
* enum iwl_geo_per_chain_offset_operation - type of operation
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 5a40092febfb..3bfc657f6b42 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -531,6 +531,8 @@ struct iwl_scan_config_v1 {
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
#define SCAN_TWO_LMACS 2
+#define SCAN_LB_LMAC_IDX 0
+#define SCAN_HB_LMAC_IDX 1
struct iwl_scan_config {
__le32 flags;
@@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags {
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
+ IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
};
/**
@@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail {
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
- * @reserved2: for future use and alignment
* @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @extended_dwell: dwell time for channels 1, 6 and 11
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
* @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ * per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ * number of APs per social (1,6,11) channel
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ * to total scan time
* @max_out_time: max out of serving channel time, per LMAC - for CDB there
* are 2 LMACs
* @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
@@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail {
* @channel_flags: &enum iwl_scan_channel_flags
* @n_channels: num of channels in scan request
* @reserved: for future use and alignment
+ * @reserved2: for future use and alignment
+ * @reserved3: for future use and alignment
* @data: &struct iwl_scan_channel_cfg_umac and
* &struct iwl_scan_req_umac_tail
*/
@@ -651,41 +661,64 @@ struct iwl_scan_req_umac {
__le32 flags;
__le32 uid;
__le32 ooc_priority;
- /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
__le16 general_flags;
- u8 reserved2;
+ u8 reserved;
u8 scan_start_mac_id;
- u8 extended_dwell;
- u8 active_dwell;
- u8 passive_dwell;
- u8 fragmented_dwell;
union {
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+ struct {
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ u8 adwell_default_n_aps;
+ u8 adwell_default_n_aps_social;
+ u8 reserved3;
+ __le16 adwell_max_budget;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ u8 channel_flags;
+ u8 n_channels;
+ __le16 reserved2;
+ u8 data[];
+ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
};
} __packed;
-#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
+ 2 * sizeof(u8) - sizeof(__le16))
#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
- 2 * sizeof(__le32))
+ 2 * sizeof(__le32) - 2 * sizeof(u8) - \
+ sizeof(__le16))
/**
* struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index af369eba3795..dc40cbd52f92 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -68,9 +68,6 @@
* @STA_FLG_REDUCED_TX_PWR_DATA: reduced TX power (data frames)
* @STA_FLG_DISABLE_TX: set if TX should be disabled
* @STA_FLG_PS: set if STA is in Power Save
- * @STA_FLG_INVALID: set if STA is invalid
- * @STA_FLG_DLP_EN: Direct Link Protocol is enabled
- * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
* @STA_FLG_DRAIN_FLOW: drain flow
* @STA_FLG_PAN: STA is for PAN interface
* @STA_FLG_CLASS_AUTH: station is authenticated
@@ -100,7 +97,6 @@
* @STA_FLG_MIMO_EN_SISO: no support for MIMO
* @STA_FLG_MIMO_EN_MIMO2: 2 streams supported
* @STA_FLG_MIMO_EN_MIMO3: 3 streams supported
- * @STA_FLG_MFP_EN: Management Frame Protection
* @STA_FLG_AGG_MPDU_DENS_MSK: A-MPDU density (mask)
* @STA_FLG_AGG_MPDU_DENS_SHIFT: A-MPDU density (bit shift)
* @STA_FLG_AGG_MPDU_DENS_2US: A-MPDU density (2 usec gap)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 14ad9fb895f9..f5d5ba7e37ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -710,7 +710,7 @@ enum iwl_mvm_ba_resp_flags {
* @reduced_txp: power reduced according to TPC. This is the actual value and
* not a copy from the LQ command. Thus, if not the first rate was used
* for Tx-ing then this value will be set to 0 by FW.
- * @initial_rate: TLC rate info, initial rate index, TLC table color
+ * @tlc_rate_info: TLC rate info, initial rate index, TLC table color
* @retry_cnt: retry count
* @query_byte_cnt: SCD query byte count
* @query_frame_cnt: SCD query frame count
@@ -730,7 +730,7 @@ struct iwl_mvm_compressed_ba_notif {
__le32 flags;
u8 sta_id;
u8 reduced_txp;
- u8 initial_rate;
+ u8 tlc_rate_info;
u8 retry_cnt;
__le32 query_byte_cnt;
__le16 query_frame_cnt;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index f5dd7d83cd0a..8106fd4be996 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -93,6 +93,8 @@ static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
unsigned long flags;
int i;
+ IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
+
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
@@ -233,6 +235,8 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
unsigned long flags;
int i, j;
+ IWL_DEBUG_INFO(fwrt, "WRT FIFO dump\n");
+
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
@@ -476,6 +480,8 @@ static void iwl_dump_prph(struct iwl_trans *trans,
unsigned long flags;
u32 i;
+ IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
+
if (!iwl_trans_grab_nic_access(trans, &flags))
return;
@@ -559,6 +565,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
bool monitor_dump_only = false;
int i;
+ IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
+
/* there's no point in fw dump if the bus is dead */
if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
@@ -816,6 +824,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_mem->type = fw_dbg_mem[i].data_type;
dump_mem->offset = cpu_to_le32(ofs);
+ IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n",
+ dump_mem->type);
+
switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
iwl_trans_read_mem_bytes(fwrt->trans, ofs,
@@ -841,6 +852,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
if (smem_len) {
+ IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -853,6 +865,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
if (sram2_len) {
+ IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -868,6 +881,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
+ IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
@@ -930,6 +944,7 @@ out:
iwl_fw_free_dump_desc(fwrt);
fwrt->dump.trig = NULL;
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
+ IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
}
IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 279248cd9cfb..37a5c5b4eda6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -136,7 +136,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_N_SCAN_CHANNELS = 31,
IWL_UCODE_TLV_PAGING = 32,
IWL_UCODE_TLV_SEC_RT_USNIFFER = 34,
- IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35,
+ /* 35 is unused */
IWL_UCODE_TLV_FW_VERSION = 36,
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
@@ -248,6 +248,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
* @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to
* include information about ACL time sharing.
+ * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
+ * indicating low latency direction.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -262,9 +264,11 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
/* API Set 1 */
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,
+ IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index e6bc9cb60700..985496cc01d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -138,11 +138,6 @@ struct fw_img {
u32 paging_mem_size;
};
-struct iwl_sf_region {
- u32 addr;
- u32 size;
-};
-
/*
* Block paging calculations
*/
@@ -257,7 +252,6 @@ enum iwl_fw_type {
* @type: firmware type (&enum iwl_fw_type)
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
- * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
* we get the ALIVE from the uCode
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
@@ -290,8 +284,6 @@ struct iwl_fw {
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
- u32 sdio_adma_addr;
-
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index 1610722b8099..1fec8e3a6b35 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -87,9 +87,6 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt)
get_order(paging->fw_paging_size));
paging->fw_paging_block = NULL;
}
- kfree(fwrt->trans->paging_download_buf);
- fwrt->trans->paging_download_buf = NULL;
- fwrt->trans->paging_db = NULL;
memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db));
}
@@ -100,13 +97,11 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
{
struct page *block;
dma_addr_t phys = 0;
- int blk_idx, order, num_of_pages, size, dma_enabled;
+ int blk_idx, order, num_of_pages, size;
if (fwrt->fw_paging_db[0].fw_paging_block)
return 0;
- dma_enabled = is_device_dma_capable(fwrt->trans->dev);
-
/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
@@ -139,24 +134,18 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
fwrt->fw_paging_db[blk_idx].fw_paging_block = block;
fwrt->fw_paging_db[blk_idx].fw_paging_size = size;
- if (dma_enabled) {
- phys = dma_map_page(fwrt->trans->dev, block, 0,
- PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(fwrt->trans->dev, phys)) {
- /*
- * free the previous pages and the current one
- * since we failed to map_page.
- */
- iwl_free_fw_paging(fwrt);
- return -ENOMEM;
- }
- fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys;
- } else {
- fwrt->fw_paging_db[blk_idx].fw_paging_phys =
- PAGING_ADDR_SIG |
- blk_idx << BLOCK_2_EXP_SIZE;
+ phys = dma_map_page(fwrt->trans->dev, block, 0,
+ PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(fwrt->trans->dev, phys)) {
+ /*
+ * free the previous pages and the current one
+ * since we failed to map_page.
+ */
+ iwl_free_fw_paging(fwrt);
+ return -ENOMEM;
}
+ fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys;
if (!blk_idx)
IWL_DEBUG_FW(fwrt,
@@ -312,60 +301,6 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
return iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
-/*
- * Send paging item cmd to FW in case CPU2 has paging image
- */
-static int iwl_trans_get_paging_item(struct iwl_fw_runtime *fwrt)
-{
- int ret;
- struct iwl_fw_get_item_cmd fw_get_item_cmd = {
- .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
- };
- struct iwl_fw_get_item_resp *item_resp;
- struct iwl_host_cmd cmd = {
- .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
- .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
- .data = { &fw_get_item_cmd, },
- .len = { sizeof(fw_get_item_cmd), },
- };
-
- ret = iwl_trans_send_cmd(fwrt->trans, &cmd);
- if (ret) {
- IWL_ERR(fwrt,
- "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
- ret);
- return ret;
- }
-
- item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
- if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
- IWL_ERR(fwrt,
- "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
- le32_to_cpu(item_resp->item_id));
- ret = -EIO;
- goto exit;
- }
-
- /* Add an extra page for headers */
- fwrt->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
- FW_PAGING_SIZE,
- GFP_KERNEL);
- if (!fwrt->trans->paging_download_buf) {
- ret = -ENOMEM;
- goto exit;
- }
- fwrt->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
- fwrt->trans->paging_db = fwrt->fw_paging_db;
- IWL_DEBUG_FW(fwrt,
- "Paging: got paging request address (paging_req_addr 0x%08x)\n",
- fwrt->trans->paging_req_addr);
-
-exit:
- iwl_free_resp(&cmd);
-
- return ret;
-}
-
int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type)
{
const struct fw_img *fw = &fwrt->fw->img[type];
@@ -382,20 +317,6 @@ int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type)
if (!fw->paging_mem_size)
return 0;
- /*
- * When dma is not enabled, the driver needs to copy / write
- * the downloaded / uploaded page to / from the smem.
- * This gets the location of the place were the pages are
- * stored.
- */
- if (!is_device_dma_capable(fwrt->trans->dev)) {
- ret = iwl_trans_get_paging_item(fwrt);
- if (ret) {
- IWL_ERR(fwrt, "failed to get FW paging item\n");
- return ret;
- }
- }
-
ret = iwl_save_fw_paging(fwrt, fw);
if (ret) {
IWL_ERR(fwrt, "failed to save the FW paging image\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 71cb1ecde0f7..e21e46cf6f9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -333,6 +333,8 @@ struct iwl_pwr_tx_backoff {
* @gen2: a000 and on transport operation
* @cdb: CDB support
* @nvm_type: see &enum iwl_nvm_type
+ * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
+ * the regular queues
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -364,6 +366,7 @@ struct iwl_cfg {
u32 dccm2_len;
u32 smem_offset;
u32 smem_len;
+ u32 soc_latency;
u16 nvm_ver;
u16 nvm_calib_ver;
u16 rx_with_siso_diversity:1,
@@ -383,6 +386,7 @@ struct iwl_cfg {
gen2:1,
cdb:1,
dbgc_supported:1;
+ u16 tx_cmd_queue_size;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@@ -393,6 +397,7 @@ struct iwl_cfg {
u8 max_vht_ampdu_exponent;
u8 ucode_api_max;
u8 ucode_api_min;
+ u32 min_umac_error_event_table;
};
/*
@@ -463,14 +468,15 @@ extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
-extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
-extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
-extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index b03e0f975b5a..4f0d070eda54 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -218,7 +218,6 @@
#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
-#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */
#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
@@ -229,7 +228,6 @@
CSR_INT_BIT_HW_ERR | \
CSR_INT_BIT_FH_TX | \
CSR_INT_BIT_SW_ERR | \
- CSR_INT_BIT_PAGING | \
CSR_INT_BIT_RF_KILL | \
CSR_INT_BIT_SW_RX | \
CSR_INT_BIT_WAKEUP | \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index cd77c6971753..c023fcf5d452 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -216,6 +216,7 @@ do { \
#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
+#define IWL_DEBUG_DEV_RADIO(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 99676d6c4713..4b224d7d967c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -832,7 +832,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
capa->standard_phy_calibration_size =
le32_to_cpup((__le32 *)tlv_data);
break;
- case IWL_UCODE_TLV_SEC_RT:
+ case IWL_UCODE_TLV_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
tlv_len);
drv->fw.type = IWL_FW_MVM;
@@ -864,7 +864,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
FW_PHY_CFG_RX_CHAIN) >>
FW_PHY_CFG_RX_CHAIN_POS;
break;
- case IWL_UCODE_TLV_SECURE_SEC_RT:
+ case IWL_UCODE_TLV_SECURE_SEC_RT:
iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
tlv_len);
drv->fw.type = IWL_FW_MVM;
@@ -1039,12 +1039,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
drv->fw.img[usniffer_img].paging_mem_size =
paging_mem_size;
break;
- case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
- if (tlv_len != sizeof(u32))
- goto invalid_tlv_len;
- drv->fw.sdio_adma_addr =
- le32_to_cpup((__le32 *)tlv_data);
- break;
case IWL_UCODE_TLV_FW_GSCAN_CAPA:
/*
* Don't return an error in case of a shorter tlv_len
@@ -1335,7 +1329,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
+ * 2) backup cache for save/restore during power-downs
+ */
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
if (iwl_alloc_ucode(drv, pieces, i))
goto out_free_fw;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index c3a5d8ccc95e..8928613e033e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -68,13 +68,14 @@
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
-#include <linux/acpi.h>
+
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
#include "iwl-prph.h"
#include "iwl-io.h"
#include "iwl-csr.h"
+#include "fw/acpi.h"
/* NVM offsets (in words) definitions */
enum nvm_offsets {
@@ -937,91 +938,3 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
return regd;
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
-
-#ifdef CONFIG_ACPI
-#define WRDD_METHOD "WRDD"
-#define WRDD_WIFI (0x07)
-#define WRDD_WIGIG (0x10)
-
-static u32 iwl_wrdd_get_mcc(struct device *dev, union acpi_object *wrdd)
-{
- union acpi_object *mcc_pkg, *domain_type, *mcc_value;
- u32 i;
-
- if (wrdd->type != ACPI_TYPE_PACKAGE ||
- wrdd->package.count < 2 ||
- wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
- wrdd->package.elements[0].integer.value != 0) {
- IWL_DEBUG_EEPROM(dev, "Unsupported wrdd structure\n");
- return 0;
- }
-
- for (i = 1 ; i < wrdd->package.count ; ++i) {
- mcc_pkg = &wrdd->package.elements[i];
-
- if (mcc_pkg->type != ACPI_TYPE_PACKAGE ||
- mcc_pkg->package.count < 2 ||
- mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
- mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
- mcc_pkg = NULL;
- continue;
- }
-
- domain_type = &mcc_pkg->package.elements[0];
- if (domain_type->integer.value == WRDD_WIFI)
- break;
-
- mcc_pkg = NULL;
- }
-
- if (mcc_pkg) {
- mcc_value = &mcc_pkg->package.elements[1];
- return mcc_value->integer.value;
- }
-
- return 0;
-}
-
-int iwl_get_bios_mcc(struct device *dev, char *mcc)
-{
- acpi_handle root_handle;
- acpi_handle handle;
- struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
- u32 mcc_val;
-
- root_handle = ACPI_HANDLE(dev);
- if (!root_handle) {
- IWL_DEBUG_EEPROM(dev,
- "Could not retrieve root port ACPI handle\n");
- return -ENOENT;
- }
-
- /* Get the method's handle */
- status = acpi_get_handle(root_handle, (acpi_string)WRDD_METHOD,
- &handle);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_EEPROM(dev, "WRD method not found\n");
- return -ENOENT;
- }
-
- /* Call WRDD with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_EEPROM(dev, "WRDC invocation failed (0x%x)\n",
- status);
- return -ENOENT;
- }
-
- mcc_val = iwl_wrdd_get_mcc(dev, wrdd.pointer);
- kfree(wrdd.pointer);
- if (!mcc_val)
- return -ENOENT;
-
- mcc[0] = (mcc_val >> 8) & 0xff;
- mcc[1] = mcc_val & 0xff;
- mcc[2] = '\0';
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_get_bios_mcc);
-#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 2d1a24dd8410..306736c7a042 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -109,21 +109,4 @@ struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc);
-#ifdef CONFIG_ACPI
-/**
- * iwl_get_bios_mcc - read MCC from BIOS, if available
- *
- * @dev: the struct device
- * @mcc: output buffer (3 bytes) that will get the MCC
- *
- * This function tries to read the current MCC from ACPI if available.
- */
-int iwl_get_bios_mcc(struct device *dev, char *mcc);
-#else
-static inline int iwl_get_bios_mcc(struct device *dev, char *mcc)
-{
- return -ENOENT;
-}
-#endif
-
#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 784bdd0ed233..7e9c924e1220 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -205,3 +207,17 @@ int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans)
return 0;
}
IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted);
+
+void iwl_trans_ref(struct iwl_trans *trans)
+{
+ if (trans->ops->ref)
+ trans->ops->ref(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_ref);
+
+void iwl_trans_unref(struct iwl_trans *trans)
+{
+ if (trans->ops->unref)
+ trans->ops->unref(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_unref);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index e90abbfba718..ca0b5536a8a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -398,8 +398,6 @@ struct iwl_hcmd_arr {
* @command_groups: array of command groups, each member is an array of the
* commands in the group; for debugging only
* @command_groups_size: number of command groups, to avoid illegal access
- * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
- * we get the ALIVE from the uCode
* @cb_data_offs: offset inside skb->cb to store transport data at, must have
* space for at least two pointers
*/
@@ -419,8 +417,6 @@ struct iwl_trans_config {
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
- u32 sdio_adma_addr;
-
u8 cb_data_offs;
};
@@ -524,6 +520,9 @@ struct iwl_trans_txq_scd_cfg {
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
* Note that the transport must fill in the proper file headers.
+ * @dump_regs: dump using IWL_ERR configuration space and memory mapped
+ * registers of the device to diagnose failure, e.g., when HW becomes
+ * inaccessible.
*/
struct iwl_trans_ops {
@@ -531,8 +530,6 @@ struct iwl_trans_ops {
void (*op_mode_leave)(struct iwl_trans *iwl_trans);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
- int (*update_sf)(struct iwl_trans *trans,
- struct iwl_sf_region *st_fwrd_space);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans, bool low_power);
@@ -593,6 +590,8 @@ struct iwl_trans_ops {
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
const struct iwl_fw_dbg_trigger_tlv
*trigger);
+
+ void (*dump_regs)(struct iwl_trans *trans);
};
/**
@@ -700,12 +699,6 @@ enum iwl_plat_pm_mode {
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
- * @paging_req_addr: The location were the FW will upload / download the pages
- * from. The address is set by the opmode
- * @paging_db: Pointer to the opmode paging data base, the pointer is set by
- * the opmode.
- * @paging_download_buf: Buffer used for copying all of the pages before
- * downloading them to the FW. The buffer is allocated in the opmode
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
@@ -749,21 +742,11 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
- u64 dflt_pwr_limit;
-
const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num;
- /*
- * Paging parameters - All of the parameters should be set by the
- * opmode when paging is enabled
- */
- u32 paging_req_addr;
- struct iwl_fw_paging *paging_db;
- void *paging_download_buf;
-
enum iwl_plat_pm_mode system_pm_mode;
enum iwl_plat_pm_mode runtime_pm_mode;
bool suspending;
@@ -830,17 +813,6 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
return trans->ops->start_fw(trans, fw, run_in_rfkill);
}
-static inline int iwl_trans_update_sf(struct iwl_trans *trans,
- struct iwl_sf_region *st_fwrd_space)
-{
- might_sleep();
-
- if (trans->ops->update_sf)
- return trans->ops->update_sf(trans, st_fwrd_space);
-
- return 0;
-}
-
static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
bool low_power)
{
@@ -875,18 +847,6 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
return trans->ops->d3_resume(trans, status, test, reset);
}
-static inline void iwl_trans_ref(struct iwl_trans *trans)
-{
- if (trans->ops->ref)
- trans->ops->ref(trans);
-}
-
-static inline void iwl_trans_unref(struct iwl_trans *trans)
-{
- if (trans->ops->unref)
- trans->ops->unref(trans);
-}
-
static inline int iwl_trans_suspend(struct iwl_trans *trans)
{
if (!trans->ops->suspend)
@@ -910,6 +870,12 @@ iwl_trans_dump_data(struct iwl_trans *trans,
return trans->ops->dump_data(trans, trigger);
}
+static inline void iwl_trans_dump_regs(struct iwl_trans *trans)
+{
+ if (trans->ops->dump_regs)
+ trans->ops->dump_regs(trans);
+}
+
static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
{
@@ -1191,6 +1157,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
const struct iwl_cfg *cfg,
const struct iwl_trans_ops *ops);
void iwl_trans_free(struct iwl_trans *trans);
+void iwl_trans_ref(struct iwl_trans *trans);
+void iwl_trans_unref(struct iwl_trans *trans);
/*****************************************************
* driver (transport) register/unregister functions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 00e6737dda72..a47635c32c11 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b205a7bfb828..b1f73dcabd31 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -664,6 +664,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int ret, i;
struct iwl_binding_cmd binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
+ struct iwl_time_quota_data *quota;
u32 status;
int size;
@@ -745,17 +746,20 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
/* and some quota */
- quota_cmd.quotas[0].id_and_color =
+ quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, 0);
+ quota->id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
mvmvif->phy_ctxt->color));
- quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
- quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+ quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
- for (i = 1; i < MAX_BINDINGS; i++)
- quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+ for (i = 1; i < MAX_BINDINGS; i++) {
+ quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, i);
+ quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+ }
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
- sizeof(quota_cmd), &quota_cmd);
+ iwl_mvm_quota_cmd_size(mvm), &quota_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 71a01df96f8b..4228fac77f41 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1455,80 +1455,6 @@ static const char * const chanwidths[] = {
[NL80211_CHAN_WIDTH_160] = "vht160",
};
-static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait,
- struct iwl_rx_packet *pkt, void *data)
-{
- struct ieee80211_vif *vif = data;
- struct iwl_mvm *mvm =
- container_of(notif_wait, struct iwl_mvm, notif_wait);
- struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data;
- u32 num_of_stations = le32_to_cpu(report->number_of_stations);
- int i;
-
- IWL_INFO(mvm, "LQM report:\n");
- IWL_INFO(mvm, "\tstatus: %d\n", report->status);
- IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id));
- IWL_INFO(mvm, "\ttx_frame_dropped: %d\n",
- le32_to_cpu(report->tx_frame_dropped));
- IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n",
- le32_to_cpu(report->time_in_measurement_window));
- IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n",
- le32_to_cpu(report->total_air_time_other_stations));
- IWL_INFO(mvm, "\tchannel_freq: %d\n",
- vif->bss_conf.chandef.center_freq1);
- IWL_INFO(mvm, "\tchannel_width: %s\n",
- chanwidths[vif->bss_conf.chandef.width]);
- IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations);
- for (i = 0; i < num_of_stations; i++)
- IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i,
- report->frequent_stations_air_time[i]);
-
- return true;
-}
-
-static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct iwl_notification_wait wait_lqm_notif;
- static u16 lqm_notif[] = {
- WIDE_ID(MAC_CONF_GROUP,
- LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF)
- };
- int err;
- u32 duration;
- u32 timeout;
-
- if (sscanf(buf, "%d,%d", &duration, &timeout) != 2)
- return -EINVAL;
-
- iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif,
- lqm_notif, ARRAY_SIZE(lqm_notif),
- iwl_mvm_lqm_notif_wait, vif);
- mutex_lock(&mvm->mutex);
- err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT,
- duration, timeout);
- mutex_unlock(&mvm->mutex);
-
- if (err) {
- IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err);
- iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif);
- return err;
- }
-
- /* wait for 2 * timeout (safety guard) and convert to jiffies*/
- timeout = msecs_to_jiffies((timeout * 2) / 1000);
-
- err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif,
- timeout);
- if (err)
- IWL_ERR(mvm, "Getting lqm notif timed out\n");
-
- return count;
-}
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1553,7 +1479,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
-MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
@@ -1594,7 +1519,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
- MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff,
mvmvif->dbgfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index e97904c2c4d4..2ff594f11259 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -660,6 +660,36 @@ out:
return ret ?: count;
}
+static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char *buff, *pos, *endpos;
+ static const size_t bufsz = 1024;
+ int ret;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n",
+ mvm->trans->cfg->fw_name_pre);
+ pos += scnprintf(pos, endpos - pos, "FW: %s\n",
+ mvm->fwrt.fw->human_readable);
+ pos += scnprintf(pos, endpos - pos, "Device: %s\n",
+ mvm->fwrt.trans->cfg->name);
+ pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
+ mvm->fwrt.dev->bus->name);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+
+ return ret;
+}
+
#define PRINT_STATS_LE32(_struct, _memb) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, #_memb, \
@@ -1662,6 +1692,7 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(fw_ver);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
@@ -1843,6 +1874,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 83485493a79a..c0de7bb86cf7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -66,7 +66,6 @@
*****************************************************************************/
#include <net/mac80211.h>
#include <linux/netdevice.h>
-#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-op-mode.h"
@@ -75,7 +74,7 @@
#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-prph.h"
-#include "iwl-eeprom-parse.h"
+#include "fw/acpi.h"
#include "mvm.h"
#include "fw/dbg.h"
@@ -177,6 +176,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_lmac_alive *lmac1;
struct iwl_lmac_alive *lmac2 = NULL;
u16 status;
+ u32 umac_error_event_table;
if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
palive = (void *)pkt->data;
@@ -196,15 +196,26 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
mvm->error_event_table[1] =
le32_to_cpu(lmac2->error_event_table_ptr);
mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
- mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
- mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
- mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
+ umac_error_event_table = le32_to_cpu(umac->error_info_addr);
+
+ if (!umac_error_event_table) {
+ mvm->support_umac_log = false;
+ } else if (umac_error_event_table >=
+ mvm->trans->cfg->min_umac_error_event_table) {
+ mvm->support_umac_log = true;
+ mvm->umac_error_event_table = umac_error_event_table;
+ } else {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ mvm->umac_error_event_table,
+ (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
+ "Init" : "RT");
+ mvm->support_umac_log = false;
+ }
alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
alive_data->valid = status == IWL_ALIVE_STATUS_OK;
- if (mvm->umac_error_event_table)
- mvm->support_umac_log = true;
IWL_DEBUG_FW(mvm,
"Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
@@ -253,7 +264,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
int ret, i;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
- struct iwl_sf_region st_fwrd_space;
if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
@@ -307,18 +317,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return -EIO;
}
- /*
- * update the sdio allocation according to the pointer we get in the
- * alive notification.
- */
- st_fwrd_space.addr = mvm->sf_space.addr;
- st_fwrd_space.size = mvm->sf_space.size;
- ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
- if (ret) {
- IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
- return ret;
- }
-
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
@@ -579,17 +577,6 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
}
#ifdef CONFIG_ACPI
-#define ACPI_WRDS_METHOD "WRDS"
-#define ACPI_EWRD_METHOD "EWRD"
-#define ACPI_WGDS_METHOD "WGDS"
-#define ACPI_WIFI_DOMAIN (0x07)
-#define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2)
-#define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \
- IWL_MVM_SAR_TABLE_SIZE + 3)
-#define ACPI_WGDS_WIFI_DATA_SIZE 18
-#define ACPI_WGDS_NUM_BANDS 2
-#define ACPI_WGDS_TABLE_SIZE 3
-
static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
union acpi_object *table,
struct iwl_mvm_sar_profile *profile,
@@ -599,7 +586,7 @@ static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
profile->enabled = enabled;
- for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) {
+ for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
if ((table[i].type != ACPI_TYPE_INTEGER) ||
(table[i].integer.value > U8_MAX))
return -EINVAL;
@@ -610,88 +597,18 @@ static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
return 0;
}
-static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm,
- union acpi_object *data,
- int data_size)
-{
- union acpi_object *wifi_pkg = NULL;
- int i;
-
- /*
- * We need at least two packages, one for the revision and one
- * for the data itself. Also check that the revision is valid
- * (i.e. it is an integer set to 0).
- */
- if (data->type != ACPI_TYPE_PACKAGE ||
- data->package.count < 2 ||
- data->package.elements[0].type != ACPI_TYPE_INTEGER ||
- data->package.elements[0].integer.value != 0) {
- IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n");
- return ERR_PTR(-EINVAL);
- }
-
- /* loop through all the packages to find the one for WiFi */
- for (i = 1; i < data->package.count; i++) {
- union acpi_object *domain;
-
- wifi_pkg = &data->package.elements[i];
-
- /* Skip anything that is not a package with the right
- * amount of elements (i.e. domain_type,
- * enabled/disabled plus the actual data size.
- */
- if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
- wifi_pkg->package.count != data_size)
- continue;
-
- domain = &wifi_pkg->package.elements[0];
- if (domain->type == ACPI_TYPE_INTEGER &&
- domain->integer.value == ACPI_WIFI_DOMAIN)
- break;
-
- wifi_pkg = NULL;
- }
-
- if (!wifi_pkg)
- return ERR_PTR(-ENOENT);
-
- return wifi_pkg;
-}
-
static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
{
- union acpi_object *wifi_pkg, *table;
- acpi_handle root_handle;
- acpi_handle handle;
- struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
+ union acpi_object *wifi_pkg, *table, *data;
bool enabled;
int ret;
- root_handle = ACPI_HANDLE(mvm->dev);
- if (!root_handle) {
- IWL_DEBUG_RADIO(mvm,
- "Could not retrieve root port ACPI handle\n");
- return -ENOENT;
- }
+ data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
- /* Get the method's handle */
- status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD,
- &handle);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_RADIO(mvm, "WRDS method not found\n");
- return -ENOENT;
- }
-
- /* Call WRDS with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &wrds);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status);
- return -ENOENT;
- }
-
- wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer,
- ACPI_WRDS_WIFI_DATA_SIZE);
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+ ACPI_WRDS_WIFI_DATA_SIZE);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
@@ -712,46 +629,23 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
*/
ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
enabled);
-
out_free:
- kfree(wrds.pointer);
+ kfree(data);
return ret;
}
static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
{
- union acpi_object *wifi_pkg;
- acpi_handle root_handle;
- acpi_handle handle;
- struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
+ union acpi_object *wifi_pkg, *data;
bool enabled;
int i, n_profiles, ret;
- root_handle = ACPI_HANDLE(mvm->dev);
- if (!root_handle) {
- IWL_DEBUG_RADIO(mvm,
- "Could not retrieve root port ACPI handle\n");
- return -ENOENT;
- }
-
- /* Get the method's handle */
- status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD,
- &handle);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_RADIO(mvm, "EWRD method not found\n");
- return -ENOENT;
- }
+ data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
- /* Call EWRD with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &ewrd);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status);
- return -ENOENT;
- }
-
- wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer,
- ACPI_EWRD_WIFI_DATA_SIZE);
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+ ACPI_EWRD_WIFI_DATA_SIZE);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
@@ -788,55 +682,33 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
break;
/* go to the next table */
- pos += IWL_MVM_SAR_TABLE_SIZE;
+ pos += ACPI_SAR_TABLE_SIZE;
}
out_free:
- kfree(ewrd.pointer);
+ kfree(data);
return ret;
}
static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
{
- union acpi_object *wifi_pkg;
- acpi_handle root_handle;
- acpi_handle handle;
- struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
+ union acpi_object *wifi_pkg, *data;
int i, j, ret;
int idx = 1;
- root_handle = ACPI_HANDLE(mvm->dev);
- if (!root_handle) {
- IWL_DEBUG_RADIO(mvm,
- "Could not retrieve root port ACPI handle\n");
- return -ENOENT;
- }
-
- /* Get the method's handle */
- status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD,
- &handle);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_RADIO(mvm, "WGDS method not found\n");
- return -ENOENT;
- }
-
- /* Call WGDS with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &wgds);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status);
- return -ENOENT;
- }
+ data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
- wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer,
- ACPI_WGDS_WIFI_DATA_SIZE);
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+ ACPI_WGDS_WIFI_DATA_SIZE);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
goto out_free;
}
- for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
- for (j = 0; j < IWL_MVM_GEO_TABLE_SIZE; j++) {
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
union acpi_object *entry;
entry = &wifi_pkg->package.elements[idx++];
@@ -851,7 +723,7 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
}
ret = 0;
out_free:
- kfree(wgds.pointer);
+ kfree(data);
return ret;
}
@@ -861,25 +733,25 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
int i, j, idx;
- int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
+ int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
int len = sizeof(cmd);
- BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2);
- BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
- IWL_MVM_SAR_TABLE_SIZE);
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
+ BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
+ ACPI_SAR_TABLE_SIZE);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v3);
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
struct iwl_mvm_sar_profile *prof;
/* don't allow SAR to be disabled (profile 0 means disable) */
if (profs[i] == 0)
return -EPERM;
- /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */
- if (profs[i] > IWL_MVM_SAR_PROFILE_NUM)
+ /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
+ if (profs[i] > ACPI_SAR_PROFILE_NUM)
return -EINVAL;
/* profiles go from 1 to 4, so decrement to access the array */
@@ -894,8 +766,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
}
IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i);
- for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
- idx = (i * IWL_NUM_SUB_BANDS) + j;
+ for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
+ idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
cmd.v3.per_chain_restriction[i][j] =
cpu_to_le16(prof->table[idx]);
IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n",
@@ -931,7 +803,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
resp = (void *)cmd.resp_pkt->data;
ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > IWL_NUM_GEO_PROFILES)) {
+ if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
ret = -EIO;
IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
}
@@ -959,10 +831,12 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
- BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE);
- for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) {
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
+
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
struct iwl_per_chain_offset *chain =
(struct iwl_per_chain_offset *)&cmd.table[i];
@@ -970,7 +844,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
u8 *value;
value = &mvm->geo_profiles[i].values[j *
- IWL_GEO_PER_CHAIN_SIZE];
+ ACPI_GEO_PER_CHAIN_SIZE];
chain[j].max_tx_power = cpu_to_le16(value[0]);
chain[j].chain_a = value[1];
chain[j].chain_b = value[2];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a9ac872226fd..3e92a117c0b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1899,11 +1899,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
- if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
- mvmvif->lqm_active)
- iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
- 0, 0);
-
/*
* If we're not associated yet, take the (new) BSSID before associating
* so the firmware knows. If we're already associated, then use the old
@@ -3432,10 +3427,24 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
return;
mutex_lock(&mvm->mutex);
+
+ /* we are only changing the min_width, may be a noop */
+ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
+ if (phy_ctxt->width == ctx->min_def.width)
+ goto out_unlock;
+
+ /* we are just toggling between 20_NOHT and 20 */
+ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
+ ctx->min_def.width <= NL80211_CHAN_WIDTH_20)
+ goto out_unlock;
+ }
+
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
+
+out_unlock:
mutex_unlock(&mvm->mutex);
}
@@ -3900,11 +3909,6 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
- if (mvmvif->lqm_active)
- iwl_mvm_send_lqm_cmd(vif,
- LQM_CMD_OPERATION_STOP_MEASUREMENT,
- 0, 0);
-
/* Schedule the time event to a bit before beacon 1,
* to make sure we're in the new channel when the
* GO/AP arrives. In case count <= 1 immediately schedule the
@@ -3998,39 +4002,36 @@ out_unlock:
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
{
- if (drop) {
- if (iwl_mvm_has_new_tx_api(mvm))
- /* TODO new tx api */
- WARN_ONCE(1,
- "Need to implement flush TX queue\n");
- else
- iwl_mvm_flush_tx_path(mvm,
- iwl_mvm_flushable_queues(mvm) & queues,
- 0);
- } else {
- if (iwl_mvm_has_new_tx_api(mvm)) {
- struct ieee80211_sta *sta;
- int i;
+ int i;
+ if (!iwl_mvm_has_new_tx_api(mvm)) {
+ if (drop) {
mutex_lock(&mvm->mutex);
-
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
- sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[i],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
- continue;
-
- iwl_mvm_wait_sta_queues_empty(mvm,
- iwl_mvm_sta_from_mac80211(sta));
- }
-
+ iwl_mvm_flush_tx_path(mvm,
+ iwl_mvm_flushable_queues(mvm) & queues, 0);
mutex_unlock(&mvm->mutex);
} else {
- iwl_trans_wait_tx_queues_empty(mvm->trans,
- queues);
+ iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
}
+ return;
+ }
+
+ mutex_lock(&mvm->mutex);
+ for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ struct ieee80211_sta *sta;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ if (drop)
+ iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
+ else
+ iwl_mvm_wait_sta_queues_empty(mvm,
+ iwl_mvm_sta_from_mac80211(sta));
}
+ mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
@@ -4259,31 +4260,6 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
event->u.ba.ssn);
}
-static void
-iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- const struct ieee80211_event *event)
-{
- struct iwl_fw_dbg_trigger_tlv *trig;
- struct iwl_fw_dbg_trigger_ba *ba_trig;
-
- if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
- return;
-
- trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
- ba_trig = (void *)trig->data;
- if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
- ieee80211_vif_to_wdev(vif), trig))
- return;
-
- if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
- return;
-
- iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
- "Frame from %pM timed out, tid %d",
- event->u.ba.sta->addr, event->u.ba.tid);
-}
-
static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct ieee80211_event *event)
@@ -4298,7 +4274,8 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
iwl_mvm_event_bar_rx_callback(mvm, vif, event);
break;
case BA_FRAME_TIMEOUT:
- iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
+ iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta,
+ event->u.ba.tid);
break;
default:
break;
@@ -4314,9 +4291,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- /* TODO - remove a000 disablement when we have RXQ config API */
- if (!iwl_mvm_has_new_rx_api(mvm) ||
- mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
+ if (!iwl_mvm_has_new_rx_api(mvm))
return;
notif->cookie = mvm->queue_sync_cookie;
@@ -4325,6 +4300,13 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
+ /* TODO - remove this when we have RXQ config API */
+ if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
+ qmask = BIT(0);
+ if (notif->sync)
+ atomic_set(&mvm->queue_sync_counter, 1);
+ }
+
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 949e63418299..4575595ab022 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -89,6 +89,7 @@
#include "tof.h"
#include "fw/runtime.h"
#include "fw/dbg.h"
+#include "fw/acpi.h"
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
@@ -147,6 +148,8 @@ struct iwl_mvm_phy_ctxt {
u16 color;
u32 ref;
+ enum nl80211_chan_width width;
+
/*
* TODO: This should probably be removed. Currently here only for rate
* scaling algorithm
@@ -436,12 +439,6 @@ struct iwl_mvm_vif {
/* TCP Checksum Offload */
netdev_features_t features;
-
- /*
- * link quality measurement - used to check whether this interface
- * is in the middle of a link quality measurement
- */
- bool lqm_active;
};
static inline struct iwl_mvm_vif *
@@ -588,12 +585,9 @@ enum iwl_mvm_tdls_cs_state {
* @head_sn: reorder window head sn
* @num_stored: number of mpdus stored in the buffer
* @buf_size: the reorder buffer size as set by the last addba request
- * @sta_id: sta id of this reorder buffer
* @queue: queue of this reorder buffer
* @last_amsdu: track last ASMDU SN for duplication detection
* @last_sub_index: track ASMDU sub frame index for duplication detection
- * @entries: list of skbs stored
- * @reorder_time: time the packet was stored in the reorder buffer
* @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
* it is the time of last received sub-frame
* @removed: prevent timer re-arming
@@ -605,12 +599,9 @@ struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
u8 buf_size;
- u8 sta_id;
int queue;
u16 last_amsdu;
u8 last_sub_index;
- struct sk_buff_head entries[IEEE80211_MAX_AMPDU_BUF];
- unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
struct timer_list reorder_timer;
bool removed;
bool valid;
@@ -619,15 +610,38 @@ struct iwl_mvm_reorder_buffer {
} ____cacheline_aligned_in_smp;
/**
+ * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
+ * @frames: list of skbs stored
+ * @reorder_time: time the packet was stored in the reorder buffer
+ */
+struct _iwl_mvm_reorder_buf_entry {
+ struct sk_buff_head frames;
+ unsigned long reorder_time;
+};
+
+/* make this indirection to get the aligned thing */
+struct iwl_mvm_reorder_buf_entry {
+ struct _iwl_mvm_reorder_buf_entry e;
+}
+#ifndef __CHECKER__
+/* sparse doesn't like this construct: "bad integer constant expression" */
+__aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry)))
+#endif
+;
+
+/**
* struct iwl_mvm_baid_data - BA session data
* @sta_id: station id
* @tid: tid of the session
* @baid baid of the session
* @timeout: the timeout set in the addba request
+ * @entries_per_queue: # of buffers per queue, this actually gets
+ * aligned up to avoid cache line sharing between queues
* @last_rx: last rx jiffies, updated only if timeout passed from last update
* @session_timer: timer to check if BA session expired, runs at 2 * timeout
* @mvm: mvm pointer, needed for timer context
* @reorder_buf: reorder buffer, allocated per queue
+ * @reorder_buf_data: data
*/
struct iwl_mvm_baid_data {
struct rcu_head rcu_head;
@@ -635,12 +649,23 @@ struct iwl_mvm_baid_data {
u8 tid;
u8 baid;
u16 timeout;
+ u16 entries_per_queue;
unsigned long last_rx;
struct timer_list session_timer;
+ struct iwl_mvm_baid_data __rcu **rcu_ptr;
struct iwl_mvm *mvm;
- struct iwl_mvm_reorder_buffer reorder_buf[];
+ struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
+ struct iwl_mvm_reorder_buf_entry entries[];
};
+static inline struct iwl_mvm_baid_data *
+iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
+{
+ return (void *)((u8 *)buf -
+ offsetof(struct iwl_mvm_baid_data, reorder_buf) -
+ sizeof(*buf) * buf->queue);
+}
+
/*
* enum iwl_mvm_queue_status - queue status
* @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
@@ -685,20 +710,14 @@ enum iwl_mvm_queue_status {
#define IWL_MVM_NUM_CIPHERS 10
-#ifdef CONFIG_ACPI
-#define IWL_MVM_SAR_TABLE_SIZE 10
-#define IWL_MVM_SAR_PROFILE_NUM 4
-#define IWL_MVM_GEO_TABLE_SIZE 6
-
struct iwl_mvm_sar_profile {
bool enabled;
- u8 table[IWL_MVM_SAR_TABLE_SIZE];
+ u8 table[ACPI_SAR_TABLE_SIZE];
};
struct iwl_mvm_geo_profile {
- u8 values[IWL_MVM_GEO_TABLE_SIZE];
+ u8 values[ACPI_GEO_TABLE_SIZE];
};
-#endif
struct iwl_mvm {
/* for logger access */
@@ -736,7 +755,6 @@ struct iwl_mvm {
u32 log_event_table;
u32 umac_error_event_table;
bool support_umac_log;
- struct iwl_sf_region sf_space;
u32 ampdu_ref;
bool ampdu_toggle;
@@ -1019,8 +1037,8 @@ struct iwl_mvm {
/* does a monitor vif exist (only one can exist hence bool) */
bool monitor_on;
#ifdef CONFIG_ACPI
- struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
- struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES];
+ struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
#endif
};
@@ -1124,6 +1142,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
+static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
+}
+
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{
/* For now we only use this mode to differentiate between
@@ -1251,6 +1275,12 @@ static inline bool iwl_mvm_has_new_ats_coex_api(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL);
}
+static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
+}
+
static inline struct agg_tx_status *
iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
{
@@ -1489,6 +1519,27 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
/* Quota management */
+static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_has_quota_low_latency(mvm) ?
+ sizeof(struct iwl_time_quota_cmd) :
+ sizeof(struct iwl_time_quota_cmd_v1);
+}
+
+static inline struct iwl_time_quota_data
+*iwl_mvm_quota_cmd_get_quota(struct iwl_mvm *mvm,
+ struct iwl_time_quota_cmd *cmd,
+ int i)
+{
+ struct iwl_time_quota_data_v1 *quotas;
+
+ if (iwl_mvm_has_quota_low_latency(mvm))
+ return &cmd->quotas[i];
+
+ quotas = (struct iwl_time_quota_data_v1 *)cmd->quotas;
+ return (struct iwl_time_quota_data *)&quotas[i];
+}
+
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
struct ieee80211_vif *disabled_vif);
@@ -1809,7 +1860,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
struct iwl_mvm_internal_rxq_notif *notif,
u32 size);
-void iwl_mvm_reorder_timer_expired(unsigned long data);
+void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
@@ -1821,12 +1872,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
bool tdls, bool cmd_q);
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
const char *errmsg);
-
-/* Link Quality Measurement */
-int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
- enum iwl_lqm_cmd_operatrions operation,
- u32 duration, u32 timeout);
-bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
+void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_sta *sta,
+ u16 tid);
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index fb25b6f29323..5bfe5306524c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -73,6 +73,7 @@
#include "iwl-eeprom-read.h"
#include "iwl-nvm-parse.h"
#include "iwl-prph.h"
+#include "fw/acpi.h"
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
@@ -784,7 +785,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
- !iwl_get_bios_mcc(mvm->dev, mcc)) {
+ !iwl_acpi_get_mcc(mvm->dev, mcc)) {
kfree(regd);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
MCC_SOURCE_BIOS, NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 231878969332..7078b7e458be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -86,6 +86,7 @@
#include "time-event.h"
#include "fw-api.h"
#include "fw/api/scan.h"
+#include "fw/acpi.h"
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -423,8 +424,6 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
- HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
- HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
};
@@ -490,18 +489,21 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
-static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
+static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
{
- const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
+ const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
+ u64 dflt_pwr_limit;
- if (!pwr_tx_backoff)
+ if (!backoff)
return 0;
- while (pwr_tx_backoff->pwr) {
- if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
- return pwr_tx_backoff->backoff;
+ dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
- pwr_tx_backoff++;
+ while (backoff->pwr) {
+ if (dflt_pwr_limit >= backoff->pwr)
+ return backoff->backoff;
+
+ backoff++;
}
return 0;
@@ -701,7 +703,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
driver_data[2]);
- trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
/* Set a short watchdog for the command queue */
@@ -771,7 +772,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
goto out_free;
mvm->hw_registered = true;
- min_backoff = calc_min_backoff(trans, cfg);
+ min_backoff = iwl_mvm_min_backoff(mvm);
iwl_mvm_thermal_initialize(mvm, min_backoff);
err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
@@ -1118,7 +1119,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- bool calibrating = ACCESS_ONCE(mvm->calibrating);
+ bool calibrating = READ_ONCE(mvm->calibrating);
if (state)
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 7ee8e9077baf..305cd56bf746 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -272,6 +272,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
}
ctxt->channel = chandef->chan;
+ ctxt->width = chandef->width;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
action, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index 2141db5bff82..b4a0264329b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -164,9 +164,12 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
beacon_int = mvm->noa_vif->bss_conf.beacon_int;
for (i = 0; i < MAX_BINDINGS; i++) {
- u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color);
+ struct iwl_time_quota_data *data =
+ iwl_mvm_quota_cmd_get_quota(mvm, cmd,
+ i);
+ u32 id_n_c = le32_to_cpu(data->id_and_color);
u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
- u32 quota = le32_to_cpu(cmd->quotas[i].quota);
+ u32 quota = le32_to_cpu(data->quota);
if (id != phy_id)
continue;
@@ -175,9 +178,9 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
quota /= beacon_int;
IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n",
- le32_to_cpu(cmd->quotas[i].quota), quota);
+ le32_to_cpu(data->quota), quota);
- cmd->quotas[i].quota = cpu_to_le32(quota);
+ data->quota = cpu_to_le32(quota);
}
#endif
}
@@ -194,6 +197,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
.disabled_vif = disabled_vif,
};
struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd;
+ struct iwl_time_quota_data *qdata, *last_data;
bool send = false;
lockdep_assert_held(&mvm->mutex);
@@ -216,7 +220,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
*/
num_active_macs = 0;
for (i = 0; i < MAX_BINDINGS; i++) {
- cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+ qdata->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
num_active_macs += data.n_interfaces[i];
}
@@ -265,14 +270,16 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
if (data.colors[i] < 0)
continue;
- cmd.quotas[idx].id_and_color =
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, idx);
+
+ qdata->id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
if (data.n_interfaces[i] <= 0)
- cmd.quotas[idx].quota = cpu_to_le32(0);
+ qdata->quota = cpu_to_le32(0);
#ifdef CONFIG_IWLWIFI_DEBUGFS
else if (data.dbgfs_min[i])
- cmd.quotas[idx].quota =
+ qdata->quota =
cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100);
#endif
else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
@@ -283,24 +290,25 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
* the minimal required quota for the low latency
* binding.
*/
- cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+ qdata->quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
else
- cmd.quotas[idx].quota =
+ qdata->quota =
cpu_to_le32(quota * data.n_interfaces[i]);
- WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
+ WARN_ONCE(le32_to_cpu(qdata->quota) > QUOTA_100,
"Binding=%d, quota=%u > max=%u\n",
- idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
+ idx, le32_to_cpu(qdata->quota), QUOTA_100);
- cmd.quotas[idx].max_duration = cpu_to_le32(0);
+ qdata->max_duration = cpu_to_le32(0);
idx++;
}
/* Give the remainder of the session to the first data binding */
for (i = 0; i < MAX_BINDINGS; i++) {
- if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
- le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+ if (le32_to_cpu(qdata->quota) != 0) {
+ le32_add_cpu(&qdata->quota, quota_rem);
IWL_DEBUG_QUOTA(mvm,
"quota: giving remainder of %d to binding %d\n",
quota_rem, i);
@@ -312,17 +320,19 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
/* check that we have non-zero quota for all valid bindings */
for (i = 0; i < MAX_BINDINGS; i++) {
- if (cmd.quotas[i].id_and_color != last->quotas[i].id_and_color)
+ qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+ last_data = iwl_mvm_quota_cmd_get_quota(mvm, last, i);
+ if (qdata->id_and_color != last_data->id_and_color)
send = true;
- if (cmd.quotas[i].max_duration != last->quotas[i].max_duration)
+ if (qdata->max_duration != last_data->max_duration)
send = true;
- if (abs((int)le32_to_cpu(cmd.quotas[i].quota) -
- (int)le32_to_cpu(last->quotas[i].quota))
+ if (abs((int)le32_to_cpu(qdata->quota) -
+ (int)le32_to_cpu(last_data->quota))
> IWL_MVM_QUOTA_THRESHOLD)
send = true;
- if (cmd.quotas[i].id_and_color == cpu_to_le32(FW_CTXT_INVALID))
+ if (qdata->id_and_color == cpu_to_le32(FW_CTXT_INVALID))
continue;
- WARN_ONCE(cmd.quotas[i].quota == 0,
+ WARN_ONCE(qdata->quota == 0,
"zero quota on binding %d\n", i);
}
@@ -334,7 +344,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
return 0;
}
- err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
+ err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
+ iwl_mvm_quota_cmd_size(mvm), &cmd);
if (err)
IWL_ERR(mvm, "Failed to send quota: %d\n", err);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 0fe723ca844e..c69515ed72df 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -67,12 +67,8 @@ static u8 rs_ht_to_legacy[] = {
static const u8 ant_toggle_lookup[] = {
[ANT_NONE] = ANT_NONE,
[ANT_A] = ANT_B,
- [ANT_B] = ANT_C,
- [ANT_AB] = ANT_BC,
- [ANT_C] = ANT_A,
- [ANT_AC] = ANT_AB,
- [ANT_BC] = ANT_AC,
- [ANT_ABC] = ANT_ABC,
+ [ANT_B] = ANT_A,
+ [ANT_AB] = ANT_AB,
};
#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
@@ -975,7 +971,7 @@ static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
{
u8 new_ant_type;
- if (!rate->ant || rate->ant > ANT_ABC)
+ if (!rate->ant || WARN_ON_ONCE(rate->ant & ANT_C))
return 0;
if (!rs_is_valid_ant(valid_ant, rate->ant))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 2d14a58cbdd7..d1a40688d5e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -663,11 +663,10 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
expected_size = sizeof(struct iwl_notif_statistics_cdb);
}
- if (iwl_rx_packet_payload_len(pkt) != expected_size) {
- IWL_ERR(mvm, "received invalid statistics size (%d)!\n",
- iwl_rx_packet_payload_len(pkt));
+ if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
+ "received invalid statistics size (%d)!\n",
+ iwl_rx_packet_payload_len(pkt)))
return;
- }
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 248699c2c4bf..76dc58381e1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -409,9 +409,13 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct napi_struct *napi,
+ struct iwl_mvm_baid_data *baid_data,
struct iwl_mvm_reorder_buffer *reorder_buf,
u16 nssn)
{
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &baid_data->entries[reorder_buf->queue *
+ baid_data->entries_per_queue];
u16 ssn = reorder_buf->head_sn;
lockdep_assert_held(&reorder_buf->lock);
@@ -422,7 +426,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
int index = ssn % reorder_buf->buf_size;
- struct sk_buff_head *skb_list = &reorder_buf->entries[index];
+ struct sk_buff_head *skb_list = &entries[index].e.frames;
struct sk_buff *skb;
ssn = ieee80211_sn_inc(ssn);
@@ -445,20 +449,24 @@ set_timer:
if (reorder_buf->num_stored && !reorder_buf->removed) {
u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
- while (skb_queue_empty(&reorder_buf->entries[index]))
+ while (skb_queue_empty(&entries[index].e.frames))
index = (index + 1) % reorder_buf->buf_size;
/* modify timer to match next frame's expiration time */
mod_timer(&reorder_buf->reorder_timer,
- reorder_buf->reorder_time[index] + 1 +
+ entries[index].e.reorder_time + 1 +
RX_REORDER_BUF_TIMEOUT_MQ);
} else {
del_timer(&reorder_buf->reorder_timer);
}
}
-void iwl_mvm_reorder_timer_expired(unsigned long data)
+void iwl_mvm_reorder_timer_expired(struct timer_list *t)
{
- struct iwl_mvm_reorder_buffer *buf = (void *)data;
+ struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
+ struct iwl_mvm_baid_data *baid_data =
+ iwl_mvm_baid_data_from_reorder_buf(buf);
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &baid_data->entries[buf->queue * baid_data->entries_per_queue];
int i;
u16 sn = 0, index = 0;
bool expired = false;
@@ -474,7 +482,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
for (i = 0; i < buf->buf_size ; i++) {
index = (buf->head_sn + i) % buf->buf_size;
- if (skb_queue_empty(&buf->entries[index])) {
+ if (skb_queue_empty(&entries[index].e.frames)) {
/*
* If there is a hole and the next frame didn't expire
* we want to break and not advance SN
@@ -482,7 +490,8 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
cont = false;
continue;
}
- if (!cont && !time_after(jiffies, buf->reorder_time[index] +
+ if (!cont &&
+ !time_after(jiffies, entries[index].e.reorder_time +
RX_REORDER_BUF_TIMEOUT_MQ))
break;
@@ -494,14 +503,20 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
if (expired) {
struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ u8 sta_id = baid_data->sta_id;
rcu_read_lock();
- sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
+ sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
/* SN is set to the last expired frame + 1 */
IWL_DEBUG_HT(buf->mvm,
"Releasing expired frames for sta %u, sn %d\n",
- buf->sta_id, sn);
- iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
+ sta_id, sn);
+ iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
+ sta, baid_data->tid);
+ iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
rcu_read_unlock();
} else {
/*
@@ -510,7 +525,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
* accordingly to this frame.
*/
mod_timer(&buf->reorder_timer,
- buf->reorder_time[index] +
+ entries[index].e.reorder_time +
1 + RX_REORDER_BUF_TIMEOUT_MQ);
}
spin_unlock(&buf->lock);
@@ -541,7 +556,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
/* release all frames that are in the reorder buffer to the stack */
spin_lock_bh(&reorder_buf->lock);
- iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
+ iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
reorder_buf->buf_size));
spin_unlock_bh(&reorder_buf->lock);
@@ -605,6 +620,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
u8 sub_frame_idx = desc->amsdu_info &
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ struct iwl_mvm_reorder_buf_entry *entries;
int index;
u16 nssn, sn;
u8 baid;
@@ -621,7 +637,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
return false;
/* no sta yet */
- if (WARN_ON(IS_ERR_OR_NULL(sta)))
+ if (WARN_ONCE(IS_ERR_OR_NULL(sta),
+ "Got valid BAID without a valid station assigned\n"))
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -654,6 +671,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
IWL_RX_MPDU_REORDER_SN_SHIFT;
buffer = &baid_data->reorder_buf[queue];
+ entries = &baid_data->entries[queue * baid_data->entries_per_queue];
spin_lock_bh(&buffer->lock);
@@ -666,7 +684,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
}
if (ieee80211_is_back_req(hdr->frame_control)) {
- iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
goto drop;
}
@@ -682,7 +700,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
!ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
- iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
+ min_sn);
}
/* drop any oudated packets */
@@ -700,6 +719,22 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
return false;
}
+ /*
+ * release immediately if there are no stored frames, and the sn is
+ * equal to the head.
+ * This can happen due to reorder timer, where NSSN is behind head_sn.
+ * When we released everything, and we got the next frame in the
+ * sequence, according to the NSSN we can't release immediately,
+ * while technically there is no hole and we can move forward.
+ */
+ if (!buffer->num_stored && sn == buffer->head_sn) {
+ if (!amsdu || last_subframe)
+ buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
+ /* No need to update AMSDU last SN - we are moving the head */
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+
index = sn % buffer->buf_size;
/*
@@ -710,7 +745,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* If it is the same SN then if the subframe index is incrementing it
* is the same AMSDU - otherwise it is a retransmission.
*/
- tail = skb_peek_tail(&buffer->entries[index]);
+ tail = skb_peek_tail(&entries[index].e.frames);
if (tail && !amsdu)
goto drop;
else if (tail && (sn != buffer->last_amsdu ||
@@ -718,9 +753,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
goto drop;
/* put in reorder buffer */
- __skb_queue_tail(&buffer->entries[index], skb);
+ __skb_queue_tail(&entries[index].e.frames, skb);
buffer->num_stored++;
- buffer->reorder_time[index] = jiffies;
+ entries[index].e.reorder_time = jiffies;
if (amsdu) {
buffer->last_amsdu = sn;
@@ -739,7 +774,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* release notification with up to date NSSN.
*/
if (!amsdu || last_subframe)
- iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
spin_unlock_bh(&buffer->lock);
return true;
@@ -799,6 +834,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct sk_buff *skb;
u8 crypt_len = 0;
+ if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+ return;
+
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -1059,7 +1097,7 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
reorder_buf = &ba_data->reorder_buf[queue];
spin_lock_bh(&reorder_buf->lock);
- iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
+ iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
le16_to_cpu(release->nssn));
spin_unlock_bh(&reorder_buf->lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 774122fed454..e4fd476e9ccb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -130,6 +130,19 @@ struct iwl_mvm_scan_params {
u32 measurement_dwell;
};
+static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ return (void *)&cmd->v7.data;
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return (void *)&cmd->v6.data;
+
+ return (void *)&cmd->v1.data;
+}
+
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
@@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
{
struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
+ if (iwl_mvm_is_regular_scan(params))
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ if (params->measurement_dwell) {
+ cmd->v7.active_dwell = params->measurement_dwell;
+ cmd->v7.passive_dwell = params->measurement_dwell;
+ } else {
+ cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ }
+ cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+ cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ }
+
+ return;
+ }
+
if (params->measurement_dwell) {
- cmd->active_dwell = params->measurement_dwell;
- cmd->passive_dwell = params->measurement_dwell;
- cmd->extended_dwell = params->measurement_dwell;
+ cmd->v1.active_dwell = params->measurement_dwell;
+ cmd->v1.passive_dwell = params->measurement_dwell;
+ cmd->v1.extended_dwell = params->measurement_dwell;
} else {
- cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
- cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
- cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+ cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
}
- cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+ cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
- cmd->v6.max_out_time[1] =
+ cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[1] =
+ cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->suspend_time);
}
} else {
@@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
-
- if (iwl_mvm_is_regular_scan(params))
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- else
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
@@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
- (void *)&cmd->v6.data : (void *)&cmd->v1.data;
+ void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
@@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ cmd->v7.channel_flags = channel_flags;
+ cmd->v7.n_channels = params->n_channels;
+ } else if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.channel_flags = channel_flags;
cmd->v6.n_channels = params->n_channels;
} else {
@@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- if (iwl_mvm_has_new_tx_api(mvm))
- base_size = IWL_SCAN_REQ_UMAC_SIZE;
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+ else if (iwl_mvm_has_new_tx_api(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size +
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index c4a343534c5e..c19f98489d4e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -252,9 +252,11 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
-static void iwl_mvm_rx_agg_session_expired(unsigned long data)
+static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
{
- struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
+ struct iwl_mvm_baid_data *data =
+ from_timer(data, t, session_timer);
+ struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
struct iwl_mvm_baid_data *ba_data;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvm_sta;
@@ -644,8 +646,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
/* Redirect to lower AC */
iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
- cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
- ssn);
+ cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
/* Update AC marking of the queue */
spin_lock_bh(&mvm->queue_info_lock);
@@ -1258,6 +1259,14 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
mvm_sta->sta_id,
i, wdg_timeout);
tid_data->txq_id = txq_id;
+
+ /*
+ * Since we don't set the seq number after reset, and HW
+ * sets it now, FW reset will cause the seq num to start
+ * at 0 again, so driver will need to update it
+ * internally as well, so it keeps in sync with real val
+ */
+ tid_data->seq_number = 0;
} else {
u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
@@ -2104,6 +2113,8 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
int j;
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
spin_lock_bh(&reorder_buf->lock);
if (likely(!reorder_buf->num_stored)) {
@@ -2119,7 +2130,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
WARN_ON(1);
for (j = 0; j < reorder_buf->buf_size; j++)
- __skb_queue_purge(&reorder_buf->entries[j]);
+ __skb_queue_purge(&entries[j].e.frames);
/*
* Prevent timer re-arm. This prevents a very far fetched case
* where we timed out on the notification. There may be prior
@@ -2135,7 +2146,6 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
}
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
- u32 sta_id,
struct iwl_mvm_baid_data *data,
u16 ssn, u8 buf_size)
{
@@ -2144,23 +2154,22 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
for (i = 0; i < mvm->trans->num_rx_queues; i++) {
struct iwl_mvm_reorder_buffer *reorder_buf =
&data->reorder_buf[i];
+ struct iwl_mvm_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
int j;
reorder_buf->num_stored = 0;
reorder_buf->head_sn = ssn;
reorder_buf->buf_size = buf_size;
/* rx reorder timer */
- reorder_buf->reorder_timer.function =
- iwl_mvm_reorder_timer_expired;
- reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
- init_timer(&reorder_buf->reorder_timer);
+ timer_setup(&reorder_buf->reorder_timer,
+ iwl_mvm_reorder_timer_expired, 0);
spin_lock_init(&reorder_buf->lock);
reorder_buf->mvm = mvm;
reorder_buf->queue = i;
- reorder_buf->sta_id = sta_id;
reorder_buf->valid = false;
for (j = 0; j < reorder_buf->buf_size; j++)
- __skb_queue_head_init(&reorder_buf->entries[j]);
+ __skb_queue_head_init(&entries[j].e.frames);
}
}
@@ -2181,16 +2190,44 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
if (iwl_mvm_has_new_rx_api(mvm) && start) {
+ u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+
+ /* sparse doesn't like the __align() so don't check */
+#ifndef __CHECKER__
+ /*
+ * The division below will be OK if either the cache line size
+ * can be divided by the entry size (ALIGN will round up) or if
+ * if the entry size can be divided by the cache line size, in
+ * which case the ALIGN() will do nothing.
+ */
+ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
+ sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
+#endif
+
+ /*
+ * Upward align the reorder buffer size to fill an entire cache
+ * line for each queue, to avoid sharing cache lines between
+ * different queues.
+ */
+ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
+
/*
* Allocate here so if allocation fails we can bail out early
* before starting the BA session in the firmware
*/
baid_data = kzalloc(sizeof(*baid_data) +
mvm->trans->num_rx_queues *
- sizeof(baid_data->reorder_buf[0]),
+ reorder_buf_size,
GFP_KERNEL);
if (!baid_data)
return -ENOMEM;
+
+ /*
+ * This division is why we need the above BUILD_BUG_ON(),
+ * if that doesn't hold then this will not be right.
+ */
+ baid_data->entries_per_queue =
+ reorder_buf_size / sizeof(baid_data->entries[0]);
}
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
@@ -2249,9 +2286,9 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
- setup_timer(&baid_data->session_timer,
- iwl_mvm_rx_agg_session_expired,
- (unsigned long)&mvm->baid_map[baid]);
+ baid_data->rcu_ptr = &mvm->baid_map[baid];
+ timer_setup(&baid_data->session_timer,
+ iwl_mvm_rx_agg_session_expired, 0);
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_id = mvm_sta->sta_id;
@@ -2261,8 +2298,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mod_timer(&baid_data->session_timer,
TU_TO_EXP_TIME(timeout * 2));
- iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
- baid_data, ssn, buf_size);
+ iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
/*
* protect the BA data with RCU to cover a case where our
* internal RX sync mechanism will timeout (not that it's
@@ -2515,12 +2551,6 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
!= IWL_MAX_TID_COUNT);
- if (!mvm->trans->cfg->gen2)
- buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
- else
- buf_size = min_t(int, buf_size,
- LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF);
-
spin_lock_bh(&mvmsta->lock);
ssn = tid_data->ssn;
queue = tid_data->txq_id;
@@ -2532,10 +2562,17 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (iwl_mvm_has_new_tx_api(mvm)) {
/*
- * If no queue iwl_mvm_sta_tx_agg_start() would have failed so
- * no need to check queue's status
+ * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
+ * would have failed, so if we are here there is no need to
+ * allocate a queue.
+ * However, if aggregation size is different than the default
+ * size, the scheduler should be reconfigured.
+ * We cannot do this with the new TX API, so return unsupported
+ * for now, until it will be offloaded to firmware..
+ * Note that if SCD default value changes - this condition
+ * should be updated as well.
*/
- if (buf_size < mvmsta->max_agg_bufsize)
+ if (buf_size < IWL_FRAME_LIMIT)
return -ENOTSUPP;
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
@@ -2558,7 +2595,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Only reconfig the SCD for the queue if the window size has
* changed from current (become smaller)
*/
- if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
+ if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
/*
* If reconfiguring an existing queue, it first must be
* drained
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6f2e2af23219..593b7f97b29c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
- u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+ u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
@@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
- dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
+ dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
@@ -1594,8 +1594,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
mvmsta->tid_data[tid].tx_time =
le16_to_cpu(tx_resp->wireless_media_time);
mvmsta->tid_data[tid].lq_color =
- (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
- TX_RES_RATE_TABLE_COLOR_POS;
+ TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
}
rcu_read_unlock();
@@ -1746,6 +1745,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_mvm_compressed_ba_notif *ba_res =
(void *)pkt->data;
+ u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
int i;
sta_id = ba_res->sta_id;
@@ -1759,11 +1759,18 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (!le16_to_cpu(ba_res->tfd_cnt))
goto out;
+ rcu_read_lock();
+
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+ if (!mvmsta)
+ goto out_unlock;
+
/* Free per TID */
for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
struct iwl_mvm_compressed_ba_tfd *ba_tfd =
&ba_res->tfd[i];
+ mvmsta->tid_data[i].lq_color = lq_color;
iwl_mvm_tx_reclaim(mvm, sta_id, ba_tfd->tid,
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
@@ -1771,6 +1778,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
le32_to_cpu(ba_res->tx_rate));
}
+out_unlock:
+ rcu_read_unlock();
out:
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 2ea74abad73d..d46115e2d69e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -455,20 +455,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_umac_error_event_table table;
- u32 base;
- base = mvm->umac_error_event_table;
-
- if (base < 0x800000) {
- IWL_ERR(mvm,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base,
- (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
- ? "Init" : "RT");
+ if (!mvm->support_umac_log)
return;
- }
- iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+ iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
+ sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -608,8 +600,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
if (mvm->error_event_table[1])
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
- if (mvm->support_umac_log)
- iwl_mvm_dump_umac_error_log(mvm);
+ iwl_mvm_dump_umac_error_log(mvm);
}
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
@@ -1368,6 +1359,31 @@ void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
rcu_read_unlock();
}
+void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct ieee80211_sta *sta,
+ u16 tid)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+ ba_trig = (void *)trig->data;
+ if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+ ieee80211_vif_to_wdev(vif), trig))
+ return;
+
+ if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
+ return;
+
+ iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+ "Frame from %pM timed out, tid %d",
+ sta->addr, tid);
+}
+
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
{
bool ps_disabled;
@@ -1389,74 +1405,3 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
iwl_mvm_power_update_device(mvm);
}
}
-
-int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
- enum iwl_lqm_cmd_operatrions operation,
- u32 duration, u32 timeout)
-{
- struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_link_qual_msrmnt_cmd cmd = {
- .cmd_operation = cpu_to_le32(operation),
- .mac_id = cpu_to_le32(mvm_vif->id),
- .measurement_time = cpu_to_le32(duration),
- .timeout = cpu_to_le32(timeout),
- };
- u32 cmdid =
- iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0);
- int ret;
-
- if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_LQM_SUPPORT))
- return -EOPNOTSUPP;
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return -EINVAL;
-
- switch (operation) {
- case LQM_CMD_OPERATION_START_MEASUREMENT:
- if (iwl_mvm_lqm_active(mvm_vif->mvm))
- return -EBUSY;
- if (!vif->bss_conf.assoc)
- return -EINVAL;
- mvm_vif->lqm_active = true;
- break;
- case LQM_CMD_OPERATION_STOP_MEASUREMENT:
- if (!iwl_mvm_lqm_active(mvm_vif->mvm))
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd),
- &cmd);
-
- /* command failed - roll back lqm_active state */
- if (ret) {
- mvm_vif->lqm_active =
- operation == LQM_CMD_OPERATION_STOP_MEASUREMENT;
- }
-
- return ret;
-}
-
-static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
- bool *lqm_active = _data;
-
- *lqm_active = *lqm_active || mvm_vif->lqm_active;
-}
-
-bool iwl_mvm_lqm_active(struct iwl_mvm *mvm)
-{
- bool ret = false;
-
- lockdep_assert_held(&mvm->mutex);
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_lqm_active_iterator, &ret);
-
- return ret;
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 3fc4343581ee..5ef216f3a60b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -244,7 +244,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
- TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
+ TFD_QUEUE_CB_SIZE(trans_pcie->tx_cmd_queue_size);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 858765fed8f8..f21fe59faccf 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -73,6 +73,8 @@
#include <linux/pci-aspm.h>
#include <linux/acpi.h>
+#include "fw/acpi.h"
+
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "internal.h"
@@ -465,6 +467,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
@@ -483,6 +487,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
@@ -508,67 +513,143 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
@@ -576,107 +657,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
+ {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
+
#endif /* CONFIG_IWLMVM */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
-#ifdef CONFIG_ACPI
-#define ACPI_SPLC_METHOD "SPLC"
-#define ACPI_SPLC_DOMAIN_WIFI (0x07)
-
-static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
-{
- union acpi_object *data_pkg, *dflt_pwr_limit;
- int i;
-
- /* We need at least two elements, one for the revision and one
- * for the data itself. Also check that the revision is
- * supported (currently only revision 0).
- */
- if (splc->type != ACPI_TYPE_PACKAGE ||
- splc->package.count < 2 ||
- splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
- splc->package.elements[0].integer.value != 0) {
- IWL_DEBUG_INFO(trans,
- "Unsupported structure returned by the SPLC method. Ignoring.\n");
- return 0;
- }
-
- /* loop through all the packages to find the one for WiFi */
- for (i = 1; i < splc->package.count; i++) {
- union acpi_object *domain;
-
- data_pkg = &splc->package.elements[i];
-
- /* Skip anything that is not a package with the right
- * amount of elements (i.e. at least 2 integers).
- */
- if (data_pkg->type != ACPI_TYPE_PACKAGE ||
- data_pkg->package.count < 2 ||
- data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
- data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
- continue;
-
- domain = &data_pkg->package.elements[0];
- if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
- break;
-
- data_pkg = NULL;
- }
-
- if (!data_pkg) {
- IWL_DEBUG_INFO(trans,
- "No element for the WiFi domain returned by the SPLC method.\n");
- return 0;
- }
-
- dflt_pwr_limit = &data_pkg->package.elements[1];
- return dflt_pwr_limit->integer.value;
-}
-
-static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
-{
- acpi_handle pxsx_handle;
- acpi_handle handle;
- struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
-
- pxsx_handle = ACPI_HANDLE(&pdev->dev);
- if (!pxsx_handle) {
- IWL_DEBUG_INFO(trans,
- "Could not retrieve root port ACPI handle\n");
- return;
- }
-
- /* Get the method's handle */
- status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
- &handle);
- if (ACPI_FAILURE(status)) {
- IWL_DEBUG_INFO(trans, "SPLC method not found\n");
- return;
- }
-
- /* Call SPLC with no arguments */
- status = acpi_evaluate_object(handle, NULL, NULL, &splc);
- if (ACPI_FAILURE(status)) {
- IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
- return;
- }
-
- trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
- IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
- trans->dflt_pwr_limit);
- kfree(splc.pointer);
-}
-
-#else /* CONFIG_ACPI */
-static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
-#endif
-
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -740,8 +734,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_trans;
}
- set_dflt_pwr_limit(iwl_trans, pdev);
-
/* register transport layer debugfs here */
ret = iwl_trans_pcie_dbgfs_register(iwl_trans);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 4fb7647995c3..d749abeca3ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -383,6 +383,7 @@ struct iwl_self_init_dram {
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
* @hw_mask: current unmasked hw causes
+ * @tx_cmd_queue_size: the size of the tx command queue
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -442,6 +443,7 @@ struct iwl_trans_pcie {
bool bc_table_dword;
bool scd_set_active;
bool sw_csum_tx;
+ bool pcie_dbg_dumped_once;
u32 rx_page_order;
/*protect hw register */
@@ -463,6 +465,7 @@ struct iwl_trans_pcie {
u32 fh_mask;
u32 hw_mask;
cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
+ u16 tx_cmd_queue_size;
};
static inline struct iwl_trans_pcie *
@@ -534,6 +537,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
u8 idx)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index a06b6612b658..f25ce3a1ea50 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1247,7 +1247,7 @@ restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 2e3e013ec95a..b7a51603465b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -88,6 +88,93 @@
#define IWL_FW_MEM_EXTENDED_START 0x40000
#define IWL_FW_MEM_EXTENDED_END 0x57FFF
+static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
+{
+#define PCI_DUMP_SIZE 64
+#define PREFIX_LEN 32
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct pci_dev *pdev = trans_pcie->pci_dev;
+ u32 i, pos, alloc_size, *ptr, *buf;
+ char *prefix;
+
+ if (trans_pcie->pcie_dbg_dumped_once)
+ return;
+
+ /* Should be a multiple of 4 */
+ BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
+ /* Alloc a max size buffer */
+ if (PCI_ERR_ROOT_ERR_SRC + 4 > PCI_DUMP_SIZE)
+ alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
+ else
+ alloc_size = PCI_DUMP_SIZE + PREFIX_LEN;
+ buf = kmalloc(alloc_size, GFP_ATOMIC);
+ if (!buf)
+ return;
+ prefix = (char *)buf + alloc_size - PREFIX_LEN;
+
+ IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
+
+ /* Print wifi device registers */
+ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+ IWL_ERR(trans, "iwlwifi device config registers:\n");
+ for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+ IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
+ for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+ *ptr = iwl_read32(trans, i);
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (pos) {
+ IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
+ for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, pos + i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+ 32, 4, buf, i, 0);
+ }
+
+ /* Print parent device registers next */
+ if (!pdev->bus->self)
+ goto out;
+
+ pdev = pdev->bus->self;
+ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+
+ IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
+ pci_name(pdev));
+ for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+ /* Print root port AER registers */
+ pos = 0;
+ pdev = pcie_find_root_port(pdev);
+ if (pdev)
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (pos) {
+ IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
+ pci_name(pdev));
+ sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+ for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
+ if (pci_read_config_dword(pdev, pos + i, ptr))
+ goto err_read;
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
+ 4, buf, i, 0);
+ }
+
+err_read:
+ print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+ IWL_ERR(trans, "Read failed at 0x%X\n", i);
+out:
+ trans_pcie->pcie_dbg_dumped_once = 1;
+ kfree(buf);
+}
+
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -649,6 +736,7 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
trans_pcie->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(trans, "Failed to load firmware chunk!\n");
+ iwl_trans_pcie_dump_regs(trans);
return -ETIMEDOUT;
}
@@ -1868,6 +1956,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
+ iwl_trans_pcie_dump_regs(trans);
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
WARN_ONCE(1,
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
@@ -2076,12 +2165,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
txq = trans_pcie->txq[txq_idx];
- wr_ptr = ACCESS_ONCE(txq->write_ptr);
+ wr_ptr = READ_ONCE(txq->write_ptr);
- while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
+ while (txq->read_ptr != READ_ONCE(txq->write_ptr) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
+ u8 write_ptr = READ_ONCE(txq->write_ptr);
if (WARN_ONCE(wr_ptr != write_ptr,
"WR pointer moved while flushing %d -> %d\n",
@@ -2553,7 +2642,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock);
- r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
@@ -2814,7 +2903,7 @@ static struct iwl_trans_dump_data
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
+ num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
& 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
@@ -2932,6 +3021,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.ref = iwl_trans_pcie_ref, \
.unref = iwl_trans_pcie_unref, \
.dump_data = iwl_trans_pcie_dump_data, \
+ .dump_regs = iwl_trans_pcie_dump_regs, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
.d3_resume = iwl_trans_pcie_d3_resume
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index d74613fcb756..16b345f54ff0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -289,8 +289,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
struct sk_buff *csum_skb = NULL;
unsigned int tb_len;
dma_addr_t tb_phys;
- struct tcphdr *tcph;
- u8 *iph, *subf_hdrs_start = hdr_page->pos;
+ u8 *subf_hdrs_start = hdr_page->pos;
total_len -= data_left;
@@ -312,8 +311,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
* as MAC header.
*/
tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
- iph = hdr_page->pos + 8;
- tcph = (void *)(iph + ip_hdrlen);
hdr_page->pos += snap_ip_tcp_hdrlen;
@@ -1160,6 +1157,8 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
struct iwl_txq *cmd_queue;
int txq_id = trans_pcie->cmd_queue, ret;
+ iwl_pcie_set_tx_cmd_queue_size(trans);
+
/* alloc and init the command queue */
if (!trans_pcie->txq[txq_id]) {
cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
@@ -1168,7 +1167,8 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
return -ENOMEM;
}
trans_pcie->txq[txq_id] = cmd_queue;
- ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
+ ret = iwl_pcie_txq_alloc(trans, cmd_queue,
+ trans_pcie->tx_cmd_queue_size, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
@@ -1177,7 +1177,8 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
cmd_queue = trans_pcie->txq[txq_id];
}
- ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
+ ret = iwl_pcie_txq_init(trans, cmd_queue,
+ trans_pcie->tx_cmd_queue_size, true);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index c645d10d3707..fed6d842a5e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -147,9 +147,9 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
memset(ptr, 0, sizeof(*ptr));
}
-static void iwl_pcie_txq_stuck_timer(unsigned long data)
+static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
{
- struct iwl_txq *txq = (void *)data;
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
@@ -495,8 +495,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
- setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
- (unsigned long)txq);
+ timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
txq->trans_pcie = trans_pcie;
txq->n_window = slots_num;
@@ -951,7 +950,8 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
- slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size :
+ TFD_TX_CMD_SLOTS;
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
@@ -970,6 +970,21 @@ error:
return ret;
}
+void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int queue_size = TFD_CMD_SLOTS;
+
+ if (trans->cfg->tx_cmd_queue_size)
+ queue_size = trans->cfg->tx_cmd_queue_size;
+
+ if (WARN_ON(!(is_power_of_2(queue_size) &&
+ TFD_QUEUE_CB_SIZE(queue_size) > 0)))
+ trans_pcie->tx_cmd_queue_size = TFD_CMD_SLOTS;
+ else
+ trans_pcie->tx_cmd_queue_size = queue_size;
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -977,6 +992,8 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
int txq_id, slots_num;
bool alloc = false;
+ iwl_pcie_set_tx_cmd_queue_size(trans);
+
if (!trans_pcie->txq_memory) {
ret = iwl_pcie_tx_alloc(trans);
if (ret)
@@ -1000,7 +1017,8 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
txq_id++) {
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
- slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size :
+ TFD_TX_CMD_SLOTS;
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
if (ret) {
@@ -1890,6 +1908,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+ iwl_trans_dump_regs(trans);
IWL_ERR(trans, "FW error in SYNC CMD %s\n",
iwl_get_cmd_string(trans, cmd->id));
dump_stack();
diff --git a/drivers/net/wireless/intersil/hostap/Makefile b/drivers/net/wireless/intersil/hostap/Makefile
index b8e41a702c00..ae3bb73b2d99 100644
--- a/drivers/net/wireless/intersil/hostap/Makefile
+++ b/drivers/net/wireless/intersil/hostap/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
hostap_ioctl.o hostap_main.o hostap_proc.o
obj-$(CONFIG_HOSTAP) += hostap.o
diff --git a/drivers/net/wireless/intersil/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h
index ce8721fbc10e..8130d29c7989 100644
--- a/drivers/net/wireless/intersil/hostap/hostap.h
+++ b/drivers/net/wireless/intersil/hostap/hostap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef HOSTAP_H
#define HOSTAP_H
diff --git a/drivers/net/wireless/intersil/hostap/hostap_80211.h b/drivers/net/wireless/intersil/hostap/hostap_80211.h
index ed98ce7c8f65..1452cf6ecb07 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_80211.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_80211.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef HOSTAP_80211_H
#define HOSTAP_80211_H
diff --git a/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c b/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
index 6d8b64ca1a63..61be822f90b5 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
diff --git a/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c b/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c
index c1b10d5117ad..c47da06945c2 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_80211_tx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index eb9cd6fa9c4d..b4dfe1893d18 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intersil Prism2 driver with Host AP (software access point) support
* Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
@@ -184,9 +185,9 @@ static void hostap_event_expired_sta(struct net_device *dev,
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void ap_handle_timer(unsigned long data)
+static void ap_handle_timer(struct timer_list *t)
{
- struct sta_info *sta = (struct sta_info *) data;
+ struct sta_info *sta = from_timer(sta, t, timer);
local_info_t *local;
struct ap_data *ap;
unsigned long next_time = 0;
@@ -1188,10 +1189,8 @@ static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- init_timer(&sta->timer);
+ timer_setup(&sta->timer, ap_handle_timer, 0);
sta->timer.expires = jiffies + ap->max_inactivity;
- sta->timer.data = (unsigned long) sta;
- sta->timer.function = ap_handle_timer;
if (!ap->local->hostapd)
add_timer(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.h b/drivers/net/wireless/intersil/hostap/hostap_ap.h
index 334e2d0b8e11..b7ac9e2f1a39 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef HOSTAP_AP_H
#define HOSTAP_AP_H
diff --git a/drivers/net/wireless/intersil/hostap/hostap_common.h b/drivers/net/wireless/intersil/hostap/hostap_common.h
index 4230102ac9e4..22543538239b 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_common.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef HOSTAP_COMMON_H
#define HOSTAP_COMMON_H
diff --git a/drivers/net/wireless/intersil/hostap/hostap_config.h b/drivers/net/wireless/intersil/hostap/hostap_config.h
index 2c8f71f0ed45..3ebd55847fad 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_config.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_config.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef HOSTAP_CONFIG_H
#define HOSTAP_CONFIG_H
diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c
index 705fe668b969..4507614a7c5a 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_download.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_download.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
static int prism2_enable_aux_port(struct net_device *dev, int enable)
{
u16 val, reg;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 72b46eaf3de2..5c4a17a18968 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -2794,9 +2794,9 @@ static void prism2_check_sta_fw_version(local_info_t *local)
}
-static void hostap_passive_scan(unsigned long data)
+static void hostap_passive_scan(struct timer_list *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_timer(local, t, passive_scan_timer);
struct net_device *dev = local->dev;
u16 chan;
@@ -2869,10 +2869,10 @@ static void handle_comms_qual_update(struct work_struct *work)
* used to monitor that local->last_tick_timer is being updated. If not,
* interrupt busy-loop is assumed and driver tries to recover by masking out
* some events. */
-static void hostap_tick_timer(unsigned long data)
+static void hostap_tick_timer(struct timer_list *t)
{
static unsigned long last_inquire = 0;
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_timer(local, t, tick_timer);
local->last_tick_timer = jiffies;
/* Inquire CommTallies every 10 seconds to keep the statistics updated
@@ -3225,13 +3225,8 @@ while (0)
lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock);
- init_timer(&local->passive_scan_timer);
- local->passive_scan_timer.data = (unsigned long) local;
- local->passive_scan_timer.function = hostap_passive_scan;
-
- init_timer(&local->tick_timer);
- local->tick_timer.data = (unsigned long) local;
- local->tick_timer.function = hostap_tick_timer;
+ timer_setup(&local->passive_scan_timer, hostap_passive_scan, 0);
+ timer_setup(&local->tick_timer, hostap_tick_timer, 0);
local->tick_timer.expires = jiffies + 2 * HZ;
add_timer(&local->tick_timer);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_info.c b/drivers/net/wireless/intersil/hostap/hostap_info.c
index 7635ac4f6679..de8a099a9386 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_info.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_info.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Host AP driver Info Frame processing (part of hostap.o module) */
#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index ff153ce29539..c1bc0a6ef300 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
#include <linux/slab.h>
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index dd84557cf957..d234231bf532 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* /proc routines for Host AP driver */
#include <linux/types.h>
diff --git a/drivers/net/wireless/intersil/hostap/hostap_wlan.h b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
index 5352adb94d50..a8c4c1a8b29d 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/intersil/hostap/hostap_wlan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef HOSTAP_WLAN_H
#define HOSTAP_WLAN_H
diff --git a/drivers/net/wireless/intersil/orinoco/Makefile b/drivers/net/wireless/intersil/orinoco/Makefile
index b7ecef820f76..0c29c56c88d6 100644
--- a/drivers/net/wireless/intersil/orinoco/Makefile
+++ b/drivers/net/wireless/intersil/orinoco/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the orinoco wireless device drivers.
#
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 56f6e3b71f48..94ad6fe29e69 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -319,9 +319,9 @@ static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
mod_timer(timer, expire);
}
-static void ezusb_request_timerfn(u_long _ctx)
+static void ezusb_request_timerfn(struct timer_list *t)
{
- struct request_context *ctx = (void *) _ctx;
+ struct request_context *ctx = from_timer(ctx, t, timer);
ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
@@ -365,7 +365,7 @@ static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
refcount_set(&ctx->refcount, 1);
init_completion(&ctx->done);
- setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx);
+ timer_setup(&ctx->timer, ezusb_request_timerfn, 0);
return ctx;
}
@@ -1457,7 +1457,6 @@ static void ezusb_bulk_in_callback(struct urb *urb)
static inline void ezusb_delete(struct ezusb_priv *upriv)
{
- struct net_device *dev;
struct list_head *item;
struct list_head *tmp_item;
unsigned long flags;
@@ -1465,7 +1464,6 @@ static inline void ezusb_delete(struct ezusb_priv *upriv)
BUG_ON(in_interrupt());
BUG_ON(!upriv);
- dev = upriv->dev;
mutex_lock(&upriv->mtx);
upriv->udev = NULL; /* No timer will be rearmed from here */
diff --git a/drivers/net/wireless/intersil/p54/Makefile b/drivers/net/wireless/intersil/p54/Makefile
index b542e68f1781..d71651ff904e 100644
--- a/drivers/net/wireless/intersil/p54/Makefile
+++ b/drivers/net/wireless/intersil/p54/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
p54common-objs := eeprom.o fwio.o txrx.o main.o
p54common-$(CONFIG_P54_LEDS) += led.o
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index d5a3bf91a03e..ab6d39e12069 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -852,12 +852,11 @@ void p54_unregister_common(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
-#ifdef CONFIG_P54_LEDS
- p54_unregister_leds(priv);
-#endif /* CONFIG_P54_LEDS */
-
if (priv->registered) {
priv->registered = false;
+#ifdef CONFIG_P54_LEDS
+ p54_unregister_leds(priv);
+#endif /* CONFIG_P54_LEDS */
ieee80211_unregister_hw(dev);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6467ffac9811..10b075a46b26 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -396,7 +396,7 @@ static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy,
if (!tb[QCA_WLAN_VENDOR_ATTR_TEST])
return -EINVAL;
val = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_TEST]);
- wiphy_debug(wiphy, "%s: test=%u\n", __func__, val);
+ wiphy_dbg(wiphy, "%s: test=%u\n", __func__, val);
/* Send a vendor event as a test. Note that this would not normally be
* done within a command handler, but rather, based on some other
@@ -643,9 +643,9 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
if (!vp->assoc)
return;
- wiphy_debug(data->hw->wiphy,
- "%s: send PS-Poll to %pM for aid %d\n",
- __func__, vp->bssid, vp->aid);
+ wiphy_dbg(data->hw->wiphy,
+ "%s: send PS-Poll to %pM for aid %d\n",
+ __func__, vp->bssid, vp->aid);
skb = dev_alloc_skb(sizeof(*pspoll));
if (!skb)
@@ -674,9 +674,9 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
if (!vp->assoc)
return;
- wiphy_debug(data->hw->wiphy,
- "%s: send data::nullfunc to %pM ps=%d\n",
- __func__, vp->bssid, ps);
+ wiphy_dbg(data->hw->wiphy,
+ "%s: send data::nullfunc to %pM ps=%d\n",
+ __func__, vp->bssid, ps);
skb = dev_alloc_skb(sizeof(*hdr));
if (!skb)
@@ -1034,7 +1034,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
HWSIM_CMD_FRAME);
if (msg_head == NULL) {
- printk(KERN_DEBUG "mac80211_hwsim: problem with msg_head\n");
+ pr_debug("mac80211_hwsim: problem with msg_head\n");
goto nla_put_failure;
}
@@ -1093,7 +1093,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
nla_put_failure:
nlmsg_free(skb);
err_free_txskb:
- printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
+ pr_debug("mac80211_hwsim: error occurred in %s\n", __func__);
ieee80211_free_txskb(hw, my_skb);
data->tx_failed++;
}
@@ -1347,7 +1347,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
}
if (data->idle && !data->tmp_chan) {
- wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
+ wiphy_dbg(hw->wiphy, "Trying to TX when idle - reject\n");
ieee80211_free_txskb(hw, skb);
return;
}
@@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, channel);
/* wmediumd mode check */
- _portid = ACCESS_ONCE(data->wmediumd);
+ _portid = READ_ONCE(data->wmediumd);
if (_portid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
@@ -1408,7 +1408,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
static int mac80211_hwsim_start(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
- wiphy_debug(hw->wiphy, "%s\n", __func__);
+ wiphy_dbg(hw->wiphy, "%s\n", __func__);
data->started = true;
return 0;
}
@@ -1419,16 +1419,16 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
struct mac80211_hwsim_data *data = hw->priv;
data->started = false;
tasklet_hrtimer_cancel(&data->beacon_timer);
- wiphy_debug(hw->wiphy, "%s\n", __func__);
+ wiphy_dbg(hw->wiphy, "%s\n", __func__);
}
static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
- __func__, ieee80211_vif_type_p2p(vif),
- vif->addr);
+ wiphy_dbg(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
+ __func__, ieee80211_vif_type_p2p(vif),
+ vif->addr);
hwsim_set_magic(vif);
vif->cab_queue = 0;
@@ -1447,9 +1447,9 @@ static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
bool newp2p)
{
newtype = ieee80211_iftype_p2p(newtype, newp2p);
- wiphy_debug(hw->wiphy,
- "%s (old type=%d, new type=%d, mac_addr=%pM)\n",
- __func__, ieee80211_vif_type_p2p(vif),
+ wiphy_dbg(hw->wiphy,
+ "%s (old type=%d, new type=%d, mac_addr=%pM)\n",
+ __func__, ieee80211_vif_type_p2p(vif),
newtype, vif->addr);
hwsim_check_magic(vif);
@@ -1465,9 +1465,9 @@ static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
static void mac80211_hwsim_remove_interface(
struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
- __func__, ieee80211_vif_type_p2p(vif),
- vif->addr);
+ wiphy_dbg(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
+ __func__, ieee80211_vif_type_p2p(vif),
+ vif->addr);
hwsim_check_magic(vif);
hwsim_clear_magic(vif);
}
@@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct ieee80211_channel *chan)
{
struct mac80211_hwsim_data *data = hw->priv;
- u32 _pid = ACCESS_ONCE(data->wmediumd);
+ u32 _pid = READ_ONCE(data->wmediumd);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
@@ -1589,23 +1589,23 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
int idx;
if (conf->chandef.chan)
- wiphy_debug(hw->wiphy,
- "%s (freq=%d(%d - %d)/%s idle=%d ps=%d smps=%s)\n",
- __func__,
- conf->chandef.chan->center_freq,
- conf->chandef.center_freq1,
- conf->chandef.center_freq2,
- hwsim_chanwidths[conf->chandef.width],
- !!(conf->flags & IEEE80211_CONF_IDLE),
- !!(conf->flags & IEEE80211_CONF_PS),
- smps_modes[conf->smps_mode]);
+ wiphy_dbg(hw->wiphy,
+ "%s (freq=%d(%d - %d)/%s idle=%d ps=%d smps=%s)\n",
+ __func__,
+ conf->chandef.chan->center_freq,
+ conf->chandef.center_freq1,
+ conf->chandef.center_freq2,
+ hwsim_chanwidths[conf->chandef.width],
+ !!(conf->flags & IEEE80211_CONF_IDLE),
+ !!(conf->flags & IEEE80211_CONF_PS),
+ smps_modes[conf->smps_mode]);
else
- wiphy_debug(hw->wiphy,
- "%s (freq=0 idle=%d ps=%d smps=%s)\n",
- __func__,
- !!(conf->flags & IEEE80211_CONF_IDLE),
- !!(conf->flags & IEEE80211_CONF_PS),
- smps_modes[conf->smps_mode]);
+ wiphy_dbg(hw->wiphy,
+ "%s (freq=0 idle=%d ps=%d smps=%s)\n",
+ __func__,
+ !!(conf->flags & IEEE80211_CONF_IDLE),
+ !!(conf->flags & IEEE80211_CONF_PS),
+ smps_modes[conf->smps_mode]);
data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
@@ -1659,7 +1659,7 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
{
struct mac80211_hwsim_data *data = hw->priv;
- wiphy_debug(hw->wiphy, "%s\n", __func__);
+ wiphy_dbg(hw->wiphy, "%s\n", __func__);
data->rx_filter = 0;
if (*total_flags & FIF_ALLMULTI)
@@ -1688,25 +1688,25 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
hwsim_check_magic(vif);
- wiphy_debug(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n",
- __func__, changed, vif->addr);
+ wiphy_dbg(hw->wiphy, "%s(changed=0x%x vif->addr=%pM)\n",
+ __func__, changed, vif->addr);
if (changed & BSS_CHANGED_BSSID) {
- wiphy_debug(hw->wiphy, "%s: BSSID changed: %pM\n",
- __func__, info->bssid);
+ wiphy_dbg(hw->wiphy, "%s: BSSID changed: %pM\n",
+ __func__, info->bssid);
memcpy(vp->bssid, info->bssid, ETH_ALEN);
}
if (changed & BSS_CHANGED_ASSOC) {
- wiphy_debug(hw->wiphy, " ASSOC: assoc=%d aid=%d\n",
- info->assoc, info->aid);
+ wiphy_dbg(hw->wiphy, " ASSOC: assoc=%d aid=%d\n",
+ info->assoc, info->aid);
vp->assoc = info->assoc;
vp->aid = info->aid;
}
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- wiphy_debug(hw->wiphy, " BCN EN: %d (BI=%u)\n",
- info->enable_beacon, info->beacon_int);
+ wiphy_dbg(hw->wiphy, " BCN EN: %d (BI=%u)\n",
+ info->enable_beacon, info->beacon_int);
vp->bcn_en = info->enable_beacon;
if (data->started &&
!hrtimer_is_queued(&data->beacon_timer.timer) &&
@@ -1725,8 +1725,8 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
mac80211_hwsim_bcn_en_iter, &count);
- wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
- count);
+ wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u",
+ count);
if (count == 0) {
tasklet_hrtimer_cancel(&data->beacon_timer);
data->beacon_int = 0;
@@ -1735,31 +1735,31 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- wiphy_debug(hw->wiphy, " ERP_CTS_PROT: %d\n",
- info->use_cts_prot);
+ wiphy_dbg(hw->wiphy, " ERP_CTS_PROT: %d\n",
+ info->use_cts_prot);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- wiphy_debug(hw->wiphy, " ERP_PREAMBLE: %d\n",
- info->use_short_preamble);
+ wiphy_dbg(hw->wiphy, " ERP_PREAMBLE: %d\n",
+ info->use_short_preamble);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- wiphy_debug(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot);
+ wiphy_dbg(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot);
}
if (changed & BSS_CHANGED_HT) {
- wiphy_debug(hw->wiphy, " HT: op_mode=0x%x\n",
- info->ht_operation_mode);
+ wiphy_dbg(hw->wiphy, " HT: op_mode=0x%x\n",
+ info->ht_operation_mode);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
- wiphy_debug(hw->wiphy, " BASIC_RATES: 0x%llx\n",
- (unsigned long long) info->basic_rates);
+ wiphy_dbg(hw->wiphy, " BASIC_RATES: 0x%llx\n",
+ (unsigned long long) info->basic_rates);
}
if (changed & BSS_CHANGED_TXPOWER)
- wiphy_debug(hw->wiphy, " TX Power: %d dBm\n", info->txpower);
+ wiphy_dbg(hw->wiphy, " TX Power: %d dBm\n", info->txpower);
}
static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
@@ -1813,11 +1813,11 @@ static int mac80211_hwsim_conf_tx(
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- wiphy_debug(hw->wiphy,
- "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n",
- __func__, queue,
- params->txop, params->cw_min,
- params->cw_max, params->aifs);
+ wiphy_dbg(hw->wiphy,
+ "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n",
+ __func__, queue,
+ params->txop, params->cw_min,
+ params->cw_max, params->aifs);
return 0;
}
@@ -1981,7 +1981,7 @@ static void hw_scan_work(struct work_struct *work)
.aborted = false,
};
- wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n");
+ wiphy_dbg(hwsim->hw->wiphy, "hw scan complete\n");
ieee80211_scan_completed(hwsim->hw, &info);
hwsim->hw_scan_request = NULL;
hwsim->hw_scan_vif = NULL;
@@ -1990,8 +1990,8 @@ static void hw_scan_work(struct work_struct *work)
return;
}
- wiphy_debug(hwsim->hw->wiphy, "hw scan %d MHz\n",
- req->channels[hwsim->scan_chan_idx]->center_freq);
+ wiphy_dbg(hwsim->hw->wiphy, "hw scan %d MHz\n",
+ req->channels[hwsim->scan_chan_idx]->center_freq);
hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
if (hwsim->tmp_chan->flags & (IEEE80211_CHAN_NO_IR |
@@ -2060,7 +2060,7 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data));
mutex_unlock(&hwsim->mutex);
- wiphy_debug(hw->wiphy, "hwsim hw_scan request\n");
+ wiphy_dbg(hw->wiphy, "hwsim hw_scan request\n");
ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0);
@@ -2075,7 +2075,7 @@ static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw,
.aborted = true,
};
- wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n");
+ wiphy_dbg(hw->wiphy, "hwsim cancel_hw_scan\n");
cancel_delayed_work_sync(&hwsim->hw_scan);
@@ -2096,11 +2096,11 @@ static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw,
mutex_lock(&hwsim->mutex);
if (hwsim->scanning) {
- printk(KERN_DEBUG "two hwsim sw_scans detected!\n");
+ pr_debug("two hwsim sw_scans detected!\n");
goto out;
}
- printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n");
+ pr_debug("hwsim sw_scan request, prepping stuff\n");
memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN);
hwsim->scanning = true;
@@ -2117,7 +2117,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw,
mutex_lock(&hwsim->mutex);
- printk(KERN_DEBUG "hwsim sw_scan_complete\n");
+ pr_debug("hwsim sw_scan_complete\n");
hwsim->scanning = false;
eth_zero_addr(hwsim->scan_addr);
@@ -2131,7 +2131,7 @@ static void hw_roc_start(struct work_struct *work)
mutex_lock(&hwsim->mutex);
- wiphy_debug(hwsim->hw->wiphy, "hwsim ROC begins\n");
+ wiphy_dbg(hwsim->hw->wiphy, "hwsim ROC begins\n");
hwsim->tmp_chan = hwsim->roc_chan;
ieee80211_ready_on_channel(hwsim->hw);
@@ -2151,7 +2151,7 @@ static void hw_roc_done(struct work_struct *work)
hwsim->tmp_chan = NULL;
mutex_unlock(&hwsim->mutex);
- wiphy_debug(hwsim->hw->wiphy, "hwsim ROC expired\n");
+ wiphy_dbg(hwsim->hw->wiphy, "hwsim ROC expired\n");
}
static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
@@ -2172,8 +2172,8 @@ static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
hwsim->roc_duration = duration;
mutex_unlock(&hwsim->mutex);
- wiphy_debug(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n",
- chan->center_freq, duration);
+ wiphy_dbg(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n",
+ chan->center_freq, duration);
ieee80211_queue_delayed_work(hw, &hwsim->roc_start, HZ/50);
return 0;
@@ -2190,7 +2190,7 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw)
hwsim->tmp_chan = NULL;
mutex_unlock(&hwsim->mutex);
- wiphy_debug(hw->wiphy, "hwsim ROC canceled\n");
+ wiphy_dbg(hw->wiphy, "hwsim ROC canceled\n");
return 0;
}
@@ -2199,20 +2199,20 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
hwsim_set_chanctx_magic(ctx);
- wiphy_debug(hw->wiphy,
- "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
- ctx->def.chan->center_freq, ctx->def.width,
- ctx->def.center_freq1, ctx->def.center_freq2);
+ wiphy_dbg(hw->wiphy,
+ "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ ctx->def.chan->center_freq, ctx->def.width,
+ ctx->def.center_freq1, ctx->def.center_freq2);
return 0;
}
static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- wiphy_debug(hw->wiphy,
- "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
- ctx->def.chan->center_freq, ctx->def.width,
- ctx->def.center_freq1, ctx->def.center_freq2);
+ wiphy_dbg(hw->wiphy,
+ "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ ctx->def.chan->center_freq, ctx->def.width,
+ ctx->def.center_freq1, ctx->def.center_freq2);
hwsim_check_chanctx_magic(ctx);
hwsim_clear_chanctx_magic(ctx);
}
@@ -2222,10 +2222,10 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
u32 changed)
{
hwsim_check_chanctx_magic(ctx);
- wiphy_debug(hw->wiphy,
- "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
- ctx->def.chan->center_freq, ctx->def.width,
- ctx->def.center_freq1, ctx->def.center_freq2);
+ wiphy_dbg(hw->wiphy,
+ "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ ctx->def.chan->center_freq, ctx->def.width,
+ ctx->def.center_freq1, ctx->def.center_freq2);
}
static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
@@ -2479,7 +2479,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ops = &mac80211_hwsim_mchan_ops;
hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname);
if (!hw) {
- printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw failed\n");
+ pr_debug("mac80211_hwsim: ieee80211_alloc_hw failed\n");
err = -ENOMEM;
goto failed;
}
@@ -2507,7 +2507,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
data->dev->driver = &mac80211_hwsim_driver.driver;
err = device_bind_driver(data->dev);
if (err != 0) {
- printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
+ pr_debug("mac80211_hwsim: device_bind_driver failed (%d)\n",
err);
goto failed_bind;
}
@@ -2698,12 +2698,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
err = ieee80211_register_hw(hw);
if (err < 0) {
- printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
+ pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
err);
goto failed_hw;
}
- wiphy_debug(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr);
+ wiphy_dbg(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr);
if (param->reg_alpha2) {
data->alpha2[0] = param->reg_alpha2[0];
@@ -2805,7 +2805,7 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb,
return -EMSGSIZE;
if (cb)
- genl_dump_check_consistent(cb, hdr, &hwsim_genl_family);
+ genl_dump_check_consistent(cb, hdr);
if (data->alpha2[0] && data->alpha2[1])
param.reg_alpha2 = data->alpha2;
@@ -3067,7 +3067,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
return 0;
err:
- printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
+ pr_debug("mac80211_hwsim: error occurred in %s\n", __func__);
out:
dev_kfree_skb(skb);
return -EINVAL;
@@ -3098,7 +3098,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
hwsim_register_wmediumd(net, info->snd_portid);
- printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
+ pr_debug("mac80211_hwsim: received a REGISTER, "
"switching to wmediumd mode with pid %d\n", info->snd_portid);
return 0;
@@ -3108,6 +3108,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
const char *hwname = NULL;
+ int ret;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -3147,7 +3148,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_new_radio(info, &param);
+ ret = mac80211_hwsim_new_radio(info, &param);
+ kfree(hwname);
+ return ret;
}
static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
@@ -3387,7 +3390,7 @@ static int __init hwsim_init_netlink(void)
return 0;
failure:
- printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
+ pr_debug("mac80211_hwsim: error occurred in %s\n", __func__);
return -EINVAL;
}
@@ -3578,7 +3581,7 @@ module_init(init_mac80211_hwsim);
static void __exit exit_mac80211_hwsim(void)
{
- printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n");
+ pr_debug("mac80211_hwsim: unregister radios\n");
hwsim_exit_netlink();
diff --git a/drivers/net/wireless/marvell/libertas/Makefile b/drivers/net/wireless/marvell/libertas/Makefile
index eac72f7bd341..41b9b440a542 100644
--- a/drivers/net/wireless/marvell/libertas/Makefile
+++ b/drivers/net/wireless/marvell/libertas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
libertas-y += cfg.o
libertas-y += cmd.o
libertas-y += cmdresp.o
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 71ba2c8d09b5..f99031cfdf86 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Implement cfg80211 ("iw") support.
*
@@ -1698,9 +1699,6 @@ static void lbs_join_post(struct lbs_private *priv,
0, GFP_KERNEL);
cfg80211_put_bss(priv->wdev->wiphy, bss);
- memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
- priv->wdev->ssid_len = params->ssid_len;
-
cfg80211_ibss_joined(priv->dev, bssid, params->chandef.chan,
GFP_KERNEL);
diff --git a/drivers/net/wireless/marvell/libertas/cfg.h b/drivers/net/wireless/marvell/libertas/cfg.h
index acccc2922401..0e48dc6d81d1 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.h
+++ b/drivers/net/wireless/marvell/libertas/cfg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LBS_CFG80211_H__
#define __LBS_CFG80211_H__
diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h
index 0c5444b02c64..80878561cb90 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.h
+++ b/drivers/net/wireless/marvell/libertas/cmd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2007, Red Hat, Inc. */
#ifndef _LBS_CMD_H_
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index aaf01619de59..b73d08381398 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file contains the handling of command
* responses as well as events generated by firmware.
diff --git a/drivers/net/wireless/marvell/libertas/debugfs.c b/drivers/net/wireless/marvell/libertas/debugfs.c
index faed1823c58e..c83f44f9ddf1 100644
--- a/drivers/net/wireless/marvell/libertas/debugfs.c
+++ b/drivers/net/wireless/marvell/libertas/debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/dcache.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/marvell/libertas/debugfs.h b/drivers/net/wireless/marvell/libertas/debugfs.h
index f2b9c7ffe0fd..6efd1a66dad7 100644
--- a/drivers/net/wireless/marvell/libertas/debugfs.h
+++ b/drivers/net/wireless/marvell/libertas/debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LBS_DEBUGFS_H_
#define _LBS_DEBUGFS_H_
diff --git a/drivers/net/wireless/marvell/libertas/decl.h b/drivers/net/wireless/marvell/libertas/decl.h
index 84a3aa7ac570..5d1e30e0c5db 100644
--- a/drivers/net/wireless/marvell/libertas/decl.h
+++ b/drivers/net/wireless/marvell/libertas/decl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file contains declaration referring to
diff --git a/drivers/net/wireless/marvell/libertas/defs.h b/drivers/net/wireless/marvell/libertas/defs.h
index d3221444e51c..58e2ead7b0cc 100644
--- a/drivers/net/wireless/marvell/libertas/defs.h
+++ b/drivers/net/wireless/marvell/libertas/defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This header file contains global constant/enum definitions,
* global variable declaration.
diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
index edf710bc5e77..dd1ee1f0af48 100644
--- a/drivers/net/wireless/marvell/libertas/dev.h
+++ b/drivers/net/wireless/marvell/libertas/dev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file contains definitions and data structures specific
* to Marvell 802.11 NIC. It contains the Device Information
diff --git a/drivers/net/wireless/marvell/libertas/ethtool.c b/drivers/net/wireless/marvell/libertas/ethtool.c
index 693868f16921..1bb8746a0b23 100644
--- a/drivers/net/wireless/marvell/libertas/ethtool.c
+++ b/drivers/net/wireless/marvell/libertas/ethtool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/hardirq.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/wireless/marvell/libertas/host.h b/drivers/net/wireless/marvell/libertas/host.h
index 96726f79a1dd..a4fc3f79bb17 100644
--- a/drivers/net/wireless/marvell/libertas/host.h
+++ b/drivers/net/wireless/marvell/libertas/host.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file function prototypes, data structure
* and definitions for all the host/station commands
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 16e54c757dd0..ffea610f67e2 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -161,9 +161,9 @@ static void if_usb_setup_firmware(struct lbs_private *priv)
}
}
-static void if_usb_fw_timeo(unsigned long priv)
+static void if_usb_fw_timeo(struct timer_list *t)
{
- struct if_usb_card *cardp = (void *)priv;
+ struct if_usb_card *cardp = from_timer(cardp, t, fw_timeout);
if (cardp->fwdnldover) {
lbs_deb_usb("Download complete, no event. Assuming success\n");
@@ -205,7 +205,7 @@ static int if_usb_probe(struct usb_interface *intf,
if (!cardp)
goto error;
- setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
+ timer_setup(&cardp->fw_timeout, if_usb_fw_timeo, 0);
init_waitqueue_head(&cardp->fw_wq);
cardp->udev = udev;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.h b/drivers/net/wireless/marvell/libertas/if_usb.h
index 6e42eac331de..8dc14bec3e16 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.h
+++ b/drivers/net/wireless/marvell/libertas/if_usb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LBS_IF_USB_H
#define _LBS_IF_USB_H
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index aefa88f4f29c..f22e1c220cba 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -722,9 +722,9 @@ EXPORT_SYMBOL_GPL(lbs_resume);
*
* @data: &struct lbs_private pointer
*/
-static void lbs_cmd_timeout_handler(unsigned long data)
+static void lbs_cmd_timeout_handler(struct timer_list *t)
{
- struct lbs_private *priv = (struct lbs_private *)data;
+ struct lbs_private *priv = from_timer(priv, t, command_timer);
unsigned long flags;
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -756,9 +756,9 @@ out:
*
* @data: &struct lbs_private pointer
*/
-static void lbs_tx_lockup_handler(unsigned long data)
+static void lbs_tx_lockup_handler(struct timer_list *t)
{
- struct lbs_private *priv = (struct lbs_private *)data;
+ struct lbs_private *priv = from_timer(priv, t, tx_lockup_timer);
unsigned long flags;
spin_lock_irqsave(&priv->driver_lock, flags);
@@ -779,9 +779,9 @@ static void lbs_tx_lockup_handler(unsigned long data)
* @data: &struct lbs_private pointer
* returns: N/A
*/
-static void auto_deepsleep_timer_fn(unsigned long data)
+static void auto_deepsleep_timer_fn(struct timer_list *t)
{
- struct lbs_private *priv = (struct lbs_private *)data;
+ struct lbs_private *priv = from_timer(priv, t, auto_deepsleep_timer);
if (priv->is_activity_detected) {
priv->is_activity_detected = 0;
@@ -847,12 +847,9 @@ static int lbs_init_adapter(struct lbs_private *priv)
init_waitqueue_head(&priv->fw_waitq);
mutex_init(&priv->lock);
- setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
- (unsigned long)priv);
- setup_timer(&priv->tx_lockup_timer, lbs_tx_lockup_handler,
- (unsigned long)priv);
- setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
- (unsigned long)priv);
+ timer_setup(&priv->command_timer, lbs_cmd_timeout_handler, 0);
+ timer_setup(&priv->tx_lockup_timer, lbs_tx_lockup_handler, 0);
+ timer_setup(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn, 0);
INIT_LIST_HEAD(&priv->cmdfreeq);
INIT_LIST_HEAD(&priv->cmdpendingq);
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index 37ace5cb309d..b0cb16ef8d1d 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
diff --git a/drivers/net/wireless/marvell/libertas/mesh.h b/drivers/net/wireless/marvell/libertas/mesh.h
index 6603f341c874..dfe22c91aade 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.h
+++ b/drivers/net/wireless/marvell/libertas/mesh.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Contains all definitions needed for the Libertas' MESH implementation.
*/
diff --git a/drivers/net/wireless/marvell/libertas/radiotap.h b/drivers/net/wireless/marvell/libertas/radiotap.h
index b3c8ea6d610e..1ed5608d353f 100644
--- a/drivers/net/wireless/marvell/libertas/radiotap.h
+++ b/drivers/net/wireless/marvell/libertas/radiotap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <net/ieee80211_radiotap.h>
struct tx_radiotap_hdr {
diff --git a/drivers/net/wireless/marvell/libertas/types.h b/drivers/net/wireless/marvell/libertas/types.h
index cf1d9b047ee6..cd4ceb6f885d 100644
--- a/drivers/net/wireless/marvell/libertas/types.h
+++ b/drivers/net/wireless/marvell/libertas/types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This header file contains definition for global types
*/
diff --git a/drivers/net/wireless/marvell/libertas_tf/deb_defs.h b/drivers/net/wireless/marvell/libertas_tf/deb_defs.h
index 4bd3dc5adf7c..37a98e228b46 100644
--- a/drivers/net/wireless/marvell/libertas_tf/deb_defs.h
+++ b/drivers/net/wireless/marvell/libertas_tf/deb_defs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/**
* This header file contains global constant/enum definitions,
* global variable declaration.
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index e9104eca327b..5153922e7ce1 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -115,9 +115,9 @@ static void if_usb_setup_firmware(struct lbtf_private *priv)
lbtf_deb_leave(LBTF_DEB_USB);
}
-static void if_usb_fw_timeo(unsigned long priv)
+static void if_usb_fw_timeo(struct timer_list *t)
{
- struct if_usb_card *cardp = (void *)priv;
+ struct if_usb_card *cardp = from_timer(cardp, t, fw_timeout);
lbtf_deb_enter(LBTF_DEB_USB);
if (!cardp->fwdnldover) {
@@ -156,7 +156,7 @@ static int if_usb_probe(struct usb_interface *intf,
if (!cardp)
goto error;
- setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
+ timer_setup(&cardp->fw_timeout, if_usb_fw_timeo, 0);
init_waitqueue_head(&cardp->fw_wq);
cardp->udev = udev;
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 81228bf73043..1d45da187b9b 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -165,9 +165,9 @@ done:
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
-static void command_timer_fn(unsigned long data)
+static void command_timer_fn(struct timer_list *t)
{
- struct lbtf_private *priv = (struct lbtf_private *)data;
+ struct lbtf_private *priv = from_timer(priv, t, command_timer);
unsigned long flags;
lbtf_deb_enter(LBTF_DEB_CMD);
@@ -196,8 +196,7 @@ static int lbtf_init_adapter(struct lbtf_private *priv)
mutex_init(&priv->lock);
priv->vif = NULL;
- setup_timer(&priv->command_timer, command_timer_fn,
- (unsigned long)priv);
+ timer_setup(&priv->command_timer, command_timer_fn, 0);
INIT_LIST_HEAD(&priv->cmdfreeq);
INIT_LIST_HEAD(&priv->cmdpendingq);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 725206914911..8772e3949327 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -658,12 +658,6 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
unsigned long flags;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- if (list_empty(&priv->rx_reorder_tbl_ptr)) {
- dev_dbg(priv->adapter->dev,
- "mwifiex_11n_delba: rx_reorder_tbl_ptr empty\n");
- goto exit;
- }
-
list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) {
if (rx_reor_tbl_ptr->tid == tid) {
dev_dbg(priv->adapter->dev,
@@ -854,9 +848,6 @@ mwifiex_send_delba_txbastream_tbl(struct mwifiex_private *priv, u8 tid)
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_tx_ba_stream_tbl *tx_ba_stream_tbl_ptr;
- if (list_empty(&priv->tx_ba_stream_tbl_ptr))
- return;
-
list_for_each_entry(tx_ba_stream_tbl_ptr,
&priv->tx_ba_stream_tbl_ptr, list) {
if (tx_ba_stream_tbl_ptr->ba_status == BA_SETUP_COMPLETE) {
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 274dd5a1574a..1edcddaf7b4b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -312,10 +312,10 @@ mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
* them and then dumps the Rx reordering table.
*/
static void
-mwifiex_flush_data(unsigned long context)
+mwifiex_flush_data(struct timer_list *t)
{
struct reorder_tmr_cnxt *ctx =
- (struct reorder_tmr_cnxt *) context;
+ from_timer(ctx, t, timer);
int start_win, seq_num;
ctx->timer_is_set = false;
@@ -412,8 +412,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
new_node->timer_context.priv = priv;
new_node->timer_context.timer_is_set = false;
- setup_timer(&new_node->timer_context.timer, mwifiex_flush_data,
- (unsigned long)&new_node->timer_context);
+ timer_setup(&new_node->timer_context.timer, mwifiex_flush_data, 0);
for (i = 0; i < win_size; ++i)
new_node->rx_reorder_ptr[i] = NULL;
@@ -835,12 +834,6 @@ void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
continue;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
- if (list_empty(&priv->rx_reorder_tbl_ptr)) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- lock_flags);
- continue;
- }
-
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
tbl->flags = flags;
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 32c5074da84c..6e0d9a9c5cfb 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -142,7 +142,7 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
- const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ static const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
@@ -454,7 +454,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
struct mwifiex_wep_key *wep_key;
- const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ static const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP &&
@@ -2503,6 +2503,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
struct ieee80211_channel *chan;
struct ieee_types_header *ie;
struct mwifiex_user_scan_cfg *user_scan_cfg;
+ u8 mac_addr[ETH_ALEN];
mwifiex_dbg(priv->adapter, CMD,
"info: received scan request on %s\n", dev->name);
@@ -2529,15 +2530,10 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->scan_request = request;
if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- ether_addr_copy(priv->random_mac, request->mac_addr);
- for (i = 0; i < ETH_ALEN; i++) {
- priv->random_mac[i] &= request->mac_addr_mask[i];
- priv->random_mac[i] |= get_random_int() &
- ~(request->mac_addr_mask[i]);
- }
- ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
- } else {
- eth_zero_addr(priv->random_mac);
+ get_random_mask_addr(mac_addr, request->mac_addr,
+ request->mac_addr_mask);
+ ether_addr_copy(request->mac_addr, mac_addr);
+ ether_addr_copy(user_scan_cfg->random_mac, mac_addr);
}
user_scan_cfg->num_ssids = request->n_ssids;
@@ -2959,18 +2955,21 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
}
mwifiex_init_priv_params(priv, dev);
- mwifiex_set_mac_address(priv, dev);
priv->netdev = dev;
- ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
- HostCmd_ACT_GEN_SET, 0, NULL, true);
- if (ret)
- goto err_set_bss_mode;
+ if (!adapter->mfg_mode) {
+ mwifiex_set_mac_address(priv, dev);
- ret = mwifiex_sta_init_cmd(priv, false, false);
- if (ret)
- goto err_sta_init;
+ ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+ HostCmd_ACT_GEN_SET, 0, NULL, true);
+ if (ret)
+ goto err_set_bss_mode;
+
+ ret = mwifiex_sta_init_cmd(priv, false, false);
+ if (ret)
+ goto err_sta_init;
+ }
mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv);
if (adapter->is_hw_11ac_capable)
@@ -3250,8 +3249,8 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
int i, filt_num = 0, ret = 0;
bool first_pat = true;
u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
- const u8 ipv4_mc_mac[] = {0x33, 0x33};
- const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
+ static const u8 ipv4_mc_mac[] = {0x33, 0x33};
+ static const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
mef_entry->mode = MEF_MODE_HOST_SLEEP;
mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST;
@@ -3544,9 +3543,9 @@ static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
{
- const u8 ipv4_mc_mac[] = {0x33, 0x33};
- const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
- const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff};
+ static const u8 ipv4_mc_mac[] = {0x33, 0x33};
+ static const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
+ static const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff};
if ((byte_seq[0] & 0x01) &&
(byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 1))
@@ -3795,9 +3794,8 @@ mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_ptr = mwifiex_get_sta_entry(priv, addr);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-
if (!sta_ptr) {
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
__func__, addr);
return -ENOENT;
@@ -3805,15 +3803,18 @@ mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] &
WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) {
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
wiphy_err(wiphy, "%pM do not support tdls cs\n", addr);
return -ENOENT;
}
if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) {
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
wiphy_err(wiphy, "channel switch is running, abort request\n");
return -EALREADY;
}
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
chan = chandef->chan->hw_value;
second_chan_offset = mwifiex_get_sec_chan_offset(chan);
@@ -3834,18 +3835,20 @@ mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_ptr = mwifiex_get_sta_entry(priv, addr);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-
if (!sta_ptr) {
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
__func__, addr);
} else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
sta_ptr->tdls_status == TDLS_IN_BASE_CHAN ||
sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) {
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n",
addr);
- } else
+ } else {
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
mwifiex_stop_tdls_cs(priv, addr);
+ }
}
static int
@@ -4202,7 +4205,10 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
if (adapter->config_bands & BAND_A)
n_channels_a = mwifiex_band_5ghz.n_channels;
- adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
+ /* allocate twice the number total channels, since the driver issues an
+ * additional active scan request for hidden SSIDs on passive channels.
+ */
+ adapter->num_in_chan_stats = 2 * (n_channels_bg + n_channels_a);
adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
adapter->num_in_chan_stats);
@@ -4306,10 +4312,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->features |= NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_INACTIVITY_TIMER |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
- NL80211_FEATURE_NEED_OBSS_SCAN |
- NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
- NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
- NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ NL80211_FEATURE_NEED_OBSS_SCAN;
+
+ if (ISSUPP_RANDOM_MAC(adapter->fw_cap_info))
+ wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 0edc5d621304..dcc529e9c0ef 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -17,6 +17,7 @@
* this warranty disclaimer.
*/
+#include <asm/unaligned.h>
#include "decl.h"
#include "ioctl.h"
#include "util.h"
@@ -183,7 +184,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
uint16_t cmd_code;
uint16_t cmd_size;
unsigned long flags;
- __le32 tmp;
if (!adapter || !cmd_node)
return -1;
@@ -249,9 +249,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size);
if (adapter->iface_type == MWIFIEX_USB) {
- tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
skb_push(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN);
- memcpy(cmd_node->cmd_skb->data, &tmp, MWIFIEX_TYPE_LEN);
+ put_unaligned_le32(MWIFIEX_USB_TYPE_CMD,
+ cmd_node->cmd_skb->data);
adapter->cmd_sent = true;
ret = adapter->if_ops.host_to_card(adapter,
MWIFIEX_USB_EP_CMD_EVENT,
@@ -317,7 +317,6 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
(struct mwifiex_opt_sleep_confirm *)
adapter->sleep_cfm->data;
struct sk_buff *sleep_cfm_tmp;
- __le32 tmp;
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -342,8 +341,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
+ MWIFIEX_TYPE_LEN);
skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm)
+ MWIFIEX_TYPE_LEN);
- tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
- memcpy(sleep_cfm_tmp->data, &tmp, MWIFIEX_TYPE_LEN);
+ put_unaligned_le32(MWIFIEX_USB_TYPE_CMD, sleep_cfm_tmp->data);
memcpy(sleep_cfm_tmp->data + MWIFIEX_TYPE_LEN,
adapter->sleep_cfm->data,
sizeof(struct mwifiex_opt_sleep_confirm));
@@ -922,10 +920,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
* It will re-send the same command again.
*/
void
-mwifiex_cmd_timeout_func(unsigned long function_context)
+mwifiex_cmd_timeout_func(struct timer_list *t)
{
- struct mwifiex_adapter *adapter =
- (struct mwifiex_adapter *) function_context;
+ struct mwifiex_adapter *adapter = from_timer(adapter, t, cmd_timer);
struct cmd_ctrl_node *cmd_node;
adapter->is_cmd_timedout = 1;
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 9e75522d248a..13cd58e963b3 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -225,7 +225,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define IS_11N_ENABLED(priv) ((priv->adapter->config_bands & BAND_GN || \
priv->adapter->config_bands & BAND_AN) && \
- priv->curr_bss_params.bss_descriptor.bcn_ht_cap)
+ priv->curr_bss_params.bss_descriptor.bcn_ht_cap && \
+ !priv->curr_bss_params.bss_descriptor.disable_11n)
#define INITIATOR_BIT(DelBAParamSet) (((DelBAParamSet) &\
BIT(DELBA_INITIATOR_POS)) >> DELBA_INITIATOR_POS)
@@ -238,6 +239,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15))
#define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
#define ISSUPP_ADHOC_ENABLED(FwCapInfo) (FwCapInfo & BIT(25))
+#define ISSUPP_RANDOM_MAC(FwCapInfo) (FwCapInfo & BIT(27))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index e11919db7818..e1aa86042469 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -52,9 +52,9 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
return 0;
}
-static void wakeup_timer_fn(unsigned long data)
+static void wakeup_timer_fn(struct timer_list *t)
{
- struct mwifiex_adapter *adapter = (struct mwifiex_adapter *)data;
+ struct mwifiex_adapter *adapter = from_timer(adapter, t, wakeup_timer);
mwifiex_dbg(adapter, ERROR, "Firmware wakeup failed\n");
adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
@@ -313,8 +313,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
adapter->active_scan_triggered = false;
- setup_timer(&adapter->wakeup_timer, wakeup_timer_fn,
- (unsigned long)adapter);
+ timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0);
}
/*
@@ -579,10 +578,6 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
{
spin_lock_irqsave(lock, flags);
- if (list_empty(head)) {
- spin_unlock_irqrestore(lock, flags);
- continue;
- }
list_for_each_entry_safe(bssprio_node, tmp_node, head,
list) {
if (bssprio_node->priv == priv) {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index ee40b739b289..a96bd7e653bf 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -100,8 +100,7 @@ static int mwifiex_register(void *card, struct device *dev,
}
mwifiex_init_lock_list(adapter);
- setup_timer(&adapter->cmd_timer, mwifiex_cmd_timeout_func,
- (unsigned long)adapter);
+ timer_setup(&adapter->cmd_timer, mwifiex_cmd_timeout_func, 0);
return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index a76bd797e454..154c0796c0c5 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -680,7 +680,6 @@ struct mwifiex_private {
struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
u8 assoc_resp_ht_param;
bool ht_param_present;
- u8 random_mac[ETH_ALEN];
};
@@ -1073,7 +1072,7 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync);
-void mwifiex_cmd_timeout_func(unsigned long function_context);
+void mwifiex_cmd_timeout_func(struct timer_list *t);
int mwifiex_get_debug_info(struct mwifiex_private *,
struct mwifiex_debug_info *);
@@ -1618,7 +1617,7 @@ void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
const u8 *mac, u8 link_status);
void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
u8 *mac, s8 snr, s8 nflr);
-void mwifiex_check_auto_tdls(unsigned long context);
+void mwifiex_check_auto_tdls(struct timer_list *t);
void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index c9d41ed77fc7..d7ce7f75ae38 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1936,8 +1936,6 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
if (!user_scan_cfg)
return -ENOMEM;
- memset(user_scan_cfg, 0, sizeof(*user_scan_cfg));
-
for (id = 0; id < MWIFIEX_USER_SCAN_CHAN_MAX; id++) {
if (!priv->hidden_chan[id].chan_number)
break;
@@ -1948,7 +1946,8 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
adapter->active_scan_triggered = true;
if (priv->scan_request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
- ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
+ ether_addr_copy(user_scan_cfg->random_mac,
+ priv->scan_request->mac_addr);
user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
user_scan_cfg->ssid_list = priv->scan_request->ssids;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 0fba5b10ef2d..1bd4e13b8449 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -70,11 +70,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
break;
case HostCmd_CMD_802_11_SCAN:
case HostCmd_CMD_802_11_SCAN_EXT:
- mwifiex_cancel_pending_scan_cmd(adapter);
-
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ mwifiex_cancel_scan(adapter);
break;
case HostCmd_CMD_MAC_CONTROL:
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 839df8a9634e..d8db412b76c6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -359,13 +359,12 @@ static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
} else {
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-
if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
sta_ptr->tx_pause = tp->tx_pause;
mwifiex_update_ralist_tx_pause(priv, tp->peermac,
tp->tx_pause);
}
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
}
@@ -396,14 +395,13 @@ static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
if (mwifiex_is_tdls_link_setup(status)) {
spin_lock_irqsave(&priv->sta_list_spinlock, flags);
sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-
if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
sta_ptr->tx_pause = tp->tx_pause;
mwifiex_update_ralist_tx_pause(priv,
tp->peermac,
tp->tx_pause);
}
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index e76af2866a19..27779d7317fd 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -1389,9 +1389,9 @@ void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
}
-void mwifiex_check_auto_tdls(unsigned long context)
+void mwifiex_check_auto_tdls(struct timer_list *t)
{
- struct mwifiex_private *priv = (struct mwifiex_private *)context;
+ struct mwifiex_private *priv = from_timer(priv, t, auto_tdls_timer);
struct mwifiex_auto_tdls_peer *tdls_peer;
unsigned long flags;
u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
@@ -1413,13 +1413,6 @@ void mwifiex_check_auto_tdls(unsigned long context)
priv->check_tdls_tx = false;
- if (list_empty(&priv->auto_tdls_list)) {
- mod_timer(&priv->auto_tdls_timer,
- jiffies +
- msecs_to_jiffies(MWIFIEX_TIMER_10S));
- return;
- }
-
spin_lock_irqsave(&priv->auto_tdls_lock, flags);
list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
if ((jiffies - tdls_peer->rssi_jiffies) >
@@ -1463,8 +1456,7 @@ void mwifiex_check_auto_tdls(unsigned long context)
void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv)
{
- setup_timer(&priv->auto_tdls_timer, mwifiex_check_auto_tdls,
- (unsigned long)priv);
+ timer_setup(&priv->auto_tdls_timer, mwifiex_check_auto_tdls, 0);
priv->auto_tdls_timer_active = true;
mod_timer(&priv->auto_tdls_timer,
jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index f4f2b9b27e32..4bc244801636 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1096,12 +1096,12 @@ postcopy_cur_buf:
return -EINPROGRESS;
}
-static void mwifiex_usb_tx_aggr_tmo(unsigned long context)
+static void mwifiex_usb_tx_aggr_tmo(struct timer_list *t)
{
struct urb_context *urb_cnxt = NULL;
struct sk_buff *skb_send = NULL;
struct tx_aggr_tmr_cnxt *timer_context =
- (struct tx_aggr_tmr_cnxt *)context;
+ from_timer(timer_context, t, hold_timer);
struct mwifiex_adapter *adapter = timer_context->adapter;
struct usb_tx_data_port *port = timer_context->port;
unsigned long flags;
@@ -1236,9 +1236,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
port->tx_aggr.timer_cnxt.port = port;
port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
- setup_timer(&port->tx_aggr.timer_cnxt.hold_timer,
- mwifiex_usb_tx_aggr_tmo,
- (unsigned long)&port->tx_aggr.timer_cnxt);
+ timer_setup(&port->tx_aggr.timer_cnxt.hold_timer,
+ mwifiex_usb_tx_aggr_tmo, 0);
}
return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 0edd26881321..936a0a841af8 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -359,7 +359,8 @@ static enum mwifiex_wmm_ac_e
mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
{
/* Map of TOS UP values to WMM AC */
- const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE,
+ static const enum mwifiex_wmm_ac_e tos_to_ac[] = {
+ WMM_AC_BE,
WMM_AC_BK,
WMM_AC_BK,
WMM_AC_BE,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Makefile b/drivers/net/wireless/quantenna/qtnfmac/Makefile
index f236b7dc2be3..97f760a3d599 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Makefile
+++ b/drivers/net/wireless/quantenna/qtnfmac/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2015-2016 Quantenna Communications, Inc.
# All rights reserved.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index a450bc6bc774..6711e7fb6926 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -73,7 +73,10 @@ qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_AP] = {
.tx = BIT(IEEE80211_STYPE_ACTION >> 4),
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
- BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4),
},
};
@@ -133,6 +136,7 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
vif->netdev = NULL;
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
eth_zero_addr(vif->mac_addr);
+ eth_zero_addr(vif->bssid);
return 0;
}
@@ -201,6 +205,8 @@ err_mac:
qtnf_cmd_send_del_intf(vif);
err_cmd:
vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
+ eth_zero_addr(vif->mac_addr);
+ eth_zero_addr(vif->bssid);
return ERR_PTR(-EFAULT);
}
@@ -211,10 +217,10 @@ static int qtnf_mgmt_set_appie(struct qtnf_vif *vif,
int ret = 0;
if (!info->beacon_ies || !info->beacon_ies_len) {
- ret = qtnf_cmd_send_mgmt_set_appie(vif, QLINK_MGMT_FRAME_BEACON,
+ ret = qtnf_cmd_send_mgmt_set_appie(vif, QLINK_IE_SET_BEACON_IES,
NULL, 0);
} else {
- ret = qtnf_cmd_send_mgmt_set_appie(vif, QLINK_MGMT_FRAME_BEACON,
+ ret = qtnf_cmd_send_mgmt_set_appie(vif, QLINK_IE_SET_BEACON_IES,
info->beacon_ies,
info->beacon_ies_len);
}
@@ -224,11 +230,11 @@ static int qtnf_mgmt_set_appie(struct qtnf_vif *vif,
if (!info->proberesp_ies || !info->proberesp_ies_len) {
ret = qtnf_cmd_send_mgmt_set_appie(vif,
- QLINK_MGMT_FRAME_PROBE_RESP,
+ QLINK_IE_SET_PROBE_RESP_IES,
NULL, 0);
} else {
ret = qtnf_cmd_send_mgmt_set_appie(vif,
- QLINK_MGMT_FRAME_PROBE_RESP,
+ QLINK_IE_SET_PROBE_RESP_IES,
info->proberesp_ies,
info->proberesp_ies_len);
}
@@ -238,11 +244,11 @@ static int qtnf_mgmt_set_appie(struct qtnf_vif *vif,
if (!info->assocresp_ies || !info->assocresp_ies_len) {
ret = qtnf_cmd_send_mgmt_set_appie(vif,
- QLINK_MGMT_FRAME_ASSOC_RESP,
+ QLINK_IE_SET_ASSOC_RESP,
NULL, 0);
} else {
ret = qtnf_cmd_send_mgmt_set_appie(vif,
- QLINK_MGMT_FRAME_ASSOC_RESP,
+ QLINK_IE_SET_ASSOC_RESP,
info->assocresp_ies,
info->assocresp_ies_len);
}
@@ -256,11 +262,6 @@ static int qtnf_change_beacon(struct wiphy *wiphy, struct net_device *dev,
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
- if (!(vif->bss_status & QTNF_STATE_AP_START)) {
- pr_err("VIF%u.%u: not started\n", vif->mac->macid, vif->vifid);
- return -EFAULT;
- }
-
return qtnf_mgmt_set_appie(vif, info);
}
@@ -268,67 +269,13 @@ static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
- struct qtnf_wmac *mac = wiphy_priv(wiphy);
- struct qtnf_bss_config *bss_cfg;
int ret;
- if (!cfg80211_chandef_identical(&mac->chandef, &settings->chandef)) {
- memcpy(&mac->chandef, &settings->chandef, sizeof(mac->chandef));
- if (vif->vifid != 0)
- pr_warn("%s: unexpected chan %u (%u MHz)\n", dev->name,
- settings->chandef.chan->hw_value,
- settings->chandef.chan->center_freq);
- }
-
- bss_cfg = &vif->bss_cfg;
- memset(bss_cfg, 0, sizeof(*bss_cfg));
-
- bss_cfg->bcn_period = settings->beacon_interval;
- bss_cfg->dtim = settings->dtim_period;
- bss_cfg->auth_type = settings->auth_type;
- bss_cfg->privacy = settings->privacy;
-
- bss_cfg->ssid_len = settings->ssid_len;
- memcpy(&bss_cfg->ssid, settings->ssid, bss_cfg->ssid_len);
-
- memcpy(&bss_cfg->crypto, &settings->crypto,
- sizeof(struct cfg80211_crypto_settings));
-
- ret = qtnf_cmd_send_config_ap(vif);
- if (ret) {
- pr_err("VIF%u.%u: failed to push config to FW\n",
- vif->mac->macid, vif->vifid);
- goto out;
- }
-
- if (!(vif->bss_status & QTNF_STATE_AP_CONFIG)) {
- pr_err("VIF%u.%u: AP config failed in FW\n", vif->mac->macid,
- vif->vifid);
- ret = -EFAULT;
- goto out;
- }
-
- ret = qtnf_mgmt_set_appie(vif, &settings->beacon);
- if (ret) {
- pr_err("VIF%u.%u: failed to add IEs to beacon\n",
- vif->mac->macid, vif->vifid);
- goto out;
- }
-
- ret = qtnf_cmd_send_start_ap(vif);
- if (ret) {
+ ret = qtnf_cmd_send_start_ap(vif, settings);
+ if (ret)
pr_err("VIF%u.%u: failed to start AP\n", vif->mac->macid,
vif->vifid);
- goto out;
- }
- if (!(vif->bss_status & QTNF_STATE_AP_START)) {
- pr_err("VIF%u.%u: FW failed to start AP operation\n",
- vif->mac->macid, vif->vifid);
- ret = -EFAULT;
- }
-
-out:
return ret;
}
@@ -343,8 +290,6 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
if (ret) {
pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
vif->mac->macid, vif->vifid);
- vif->bss_status &= ~QTNF_STATE_AP_START;
- vif->bss_status &= ~QTNF_STATE_AP_CONFIG;
netif_carrier_off(vif->netdev);
}
@@ -396,6 +341,13 @@ qtnf_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
return;
switch (frame_type & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_REASSOC_REQ:
+ case IEEE80211_STYPE_ASSOC_REQ:
+ qlink_frame_type = QLINK_MGMT_FRAME_ASSOC_REQ;
+ break;
+ case IEEE80211_STYPE_AUTH:
+ qlink_frame_type = QLINK_MGMT_FRAME_AUTH;
+ break;
case IEEE80211_STYPE_PROBE_REQ:
qlink_frame_type = QLINK_MGMT_FRAME_PROBE_REQ;
break;
@@ -581,9 +533,9 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static void qtnf_scan_timeout(unsigned long data)
+static void qtnf_scan_timeout(struct timer_list *t)
{
- struct qtnf_wmac *mac = (struct qtnf_wmac *)data;
+ struct qtnf_wmac *mac = from_timer(mac, t, scan_timeout);
pr_warn("mac%d scan timed out\n", mac->macid);
qtnf_scan_done(mac, true);
@@ -602,7 +554,6 @@ qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
return -EFAULT;
}
- mac->scan_timeout.data = (unsigned long)mac;
mac->scan_timeout.function = qtnf_scan_timeout;
mod_timer(&mac->scan_timeout,
jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ);
@@ -615,9 +566,6 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
- struct qtnf_wmac *mac = wiphy_priv(wiphy);
- struct cfg80211_chan_def chandef;
- struct qtnf_bss_config *bss_cfg;
int ret;
if (vif->wdev.iftype != NL80211_IFTYPE_STATION)
@@ -626,49 +574,10 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev,
if (vif->sta_state != QTNF_STA_DISCONNECTED)
return -EBUSY;
- bss_cfg = &vif->bss_cfg;
- memset(bss_cfg, 0, sizeof(*bss_cfg));
-
- if (sme->channel) {
- /* FIXME: need to set proper nl80211_channel_type value */
- cfg80211_chandef_create(&chandef, sme->channel,
- NL80211_CHAN_HT20);
- /* fall-back to minimal safe chandef description */
- if (!cfg80211_chandef_valid(&chandef))
- cfg80211_chandef_create(&chandef, sme->channel,
- NL80211_CHAN_HT20);
-
- memcpy(&mac->chandef, &chandef, sizeof(mac->chandef));
- }
-
- bss_cfg->ssid_len = sme->ssid_len;
- memcpy(&bss_cfg->ssid, sme->ssid, bss_cfg->ssid_len);
- bss_cfg->auth_type = sme->auth_type;
- bss_cfg->privacy = sme->privacy;
- bss_cfg->mfp = sme->mfp;
-
- if ((sme->bg_scan_period > 0) &&
- (sme->bg_scan_period <= QTNF_MAX_BG_SCAN_PERIOD))
- bss_cfg->bg_scan_period = sme->bg_scan_period;
- else if (sme->bg_scan_period == -1)
- bss_cfg->bg_scan_period = QTNF_DEFAULT_BG_SCAN_PERIOD;
- else
- bss_cfg->bg_scan_period = 0; /* disabled */
-
- bss_cfg->connect_flags = 0;
-
- if (sme->flags & ASSOC_REQ_DISABLE_HT)
- bss_cfg->connect_flags |= QLINK_STA_CONNECT_DISABLE_HT;
- if (sme->flags & ASSOC_REQ_DISABLE_VHT)
- bss_cfg->connect_flags |= QLINK_STA_CONNECT_DISABLE_VHT;
- if (sme->flags & ASSOC_REQ_USE_RRM)
- bss_cfg->connect_flags |= QLINK_STA_CONNECT_USE_RRM;
-
- memcpy(&bss_cfg->crypto, &sme->crypto, sizeof(bss_cfg->crypto));
if (sme->bssid)
- ether_addr_copy(bss_cfg->bssid, sme->bssid);
+ ether_addr_copy(vif->bssid, sme->bssid);
else
- eth_zero_addr(bss_cfg->bssid);
+ eth_zero_addr(vif->bssid);
ret = qtnf_cmd_send_connect(vif, sme);
if (ret) {
@@ -717,15 +626,15 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev,
int idx, struct survey_info *survey)
{
struct qtnf_wmac *mac = wiphy_priv(wiphy);
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
struct ieee80211_supported_band *sband;
- struct cfg80211_chan_def *chandef;
+ const struct cfg80211_chan_def *chandef = &wdev->chandef;
struct ieee80211_channel *chan;
struct qtnf_chan_stats stats;
struct qtnf_vif *vif;
int ret;
vif = qtnf_netdev_get_priv(dev);
- chandef = &mac->chandef;
sband = wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
@@ -792,46 +701,35 @@ static int
qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_chan_def *chandef)
{
- struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct net_device *ndev = wdev->netdev;
struct qtnf_vif *vif;
+ int ret;
if (!ndev)
return -ENODEV;
vif = qtnf_netdev_get_priv(wdev->netdev);
- switch (vif->wdev.iftype) {
- case NL80211_IFTYPE_STATION:
- if (vif->sta_state == QTNF_STA_DISCONNECTED) {
- pr_warn("%s: STA disconnected\n", ndev->name);
- return -ENODATA;
- }
- break;
- case NL80211_IFTYPE_AP:
- if (!(vif->bss_status & QTNF_STATE_AP_START)) {
- pr_warn("%s: AP not started\n", ndev->name);
- return -ENODATA;
- }
- break;
- default:
- pr_err("unsupported vif type (%d)\n", vif->wdev.iftype);
- return -ENODATA;
+ ret = qtnf_cmd_get_channel(vif, chandef);
+ if (ret) {
+ pr_err("%s: failed to get channel: %d\n", ndev->name, ret);
+ goto out;
}
- if (!cfg80211_chandef_valid(&mac->chandef)) {
- pr_err("invalid channel settings on %s\n", ndev->name);
- return -ENODATA;
+ if (!cfg80211_chandef_valid(chandef)) {
+ pr_err("%s: bad chan freq1=%u freq2=%u bw=%u\n", ndev->name,
+ chandef->center_freq1, chandef->center_freq2,
+ chandef->width);
+ ret = -ENODATA;
}
- memcpy(chandef, &mac->chandef, sizeof(*chandef));
- return 0;
+out:
+ return ret;
}
static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_csa_settings *params)
{
- struct qtnf_wmac *mac = wiphy_priv(wiphy);
struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
int ret;
@@ -839,41 +737,12 @@ static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev,
params->chandef.chan->hw_value, params->count,
params->radar_required, params->block_tx);
- switch (vif->wdev.iftype) {
- case NL80211_IFTYPE_AP:
- if (!(vif->bss_status & QTNF_STATE_AP_START)) {
- pr_warn("AP not started on %s\n", dev->name);
- return -ENOTCONN;
- }
- break;
- default:
- pr_err("unsupported vif type (%d) on %s\n",
- vif->wdev.iftype, dev->name);
- return -EOPNOTSUPP;
- }
-
- if (vif->vifid != 0) {
- if (!(mac->status & QTNF_MAC_CSA_ACTIVE))
- return -EOPNOTSUPP;
-
- if (!cfg80211_chandef_identical(&params->chandef,
- &mac->csa_chandef))
- return -EINVAL;
-
- return 0;
- }
-
if (!cfg80211_chandef_valid(&params->chandef)) {
pr_err("%s: invalid channel\n", dev->name);
return -EINVAL;
}
- if (cfg80211_chandef_identical(&params->chandef, &mac->chandef)) {
- pr_err("%s: switch request to the same channel\n", dev->name);
- return -EALREADY;
- }
-
- ret = qtnf_cmd_send_chan_switch(mac, params);
+ ret = qtnf_cmd_send_chan_switch(vif, params);
if (ret)
pr_warn("%s: failed to switch to channel (%u)\n",
dev->name, params->chandef.chan->hw_value);
@@ -939,8 +808,7 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
if (!wiphy->bands[band])
continue;
- ret = qtnf_cmd_get_mac_chan_info(mac,
- wiphy->bands[band]);
+ ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]);
if (ret)
pr_err("failed to get chan info for mac %u band %u\n",
mac_idx, band);
@@ -948,33 +816,6 @@ static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
}
}
-void qtnf_band_setup_htvht_caps(struct qtnf_mac_info *macinfo,
- struct ieee80211_supported_band *band)
-{
- struct ieee80211_sta_ht_cap *ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap;
-
- ht_cap = &band->ht_cap;
- ht_cap->ht_supported = true;
- memcpy(&ht_cap->cap, &macinfo->ht_cap.cap_info,
- sizeof(u16));
- ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
- memcpy(&ht_cap->mcs, &macinfo->ht_cap.mcs,
- sizeof(ht_cap->mcs));
-
- if (macinfo->phymode_cap & QLINK_PHYMODE_AC) {
- vht_cap = &band->vht_cap;
- vht_cap->vht_supported = true;
- memcpy(&vht_cap->cap,
- &macinfo->vht_cap.vht_cap_info, sizeof(u32));
- /* Update MCS support for VHT */
- memcpy(&vht_cap->vht_mcs,
- &macinfo->vht_cap.supp_mcs,
- sizeof(struct ieee80211_vht_mcs_info));
- }
-}
-
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus)
{
struct wiphy *wiphy;
@@ -1035,9 +876,6 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
if (ret)
goto out;
- pr_info("MAC%u: phymode=%#x radar=%#x\n", mac->macid,
- mac->macinfo.phymode_cap, mac->macinfo.radar_detect_widths);
-
wiphy->frag_threshold = mac->macinfo.frag_thr;
wiphy->rts_threshold = mac->macinfo.rts_thr;
wiphy->retry_short = mac->macinfo.sretry_limit;
@@ -1069,10 +907,15 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->available_antennas_rx = mac->macinfo.num_rx_chain;
wiphy->max_ap_assoc_sta = mac->macinfo.max_ap_assoc_sta;
+ wiphy->ht_capa_mod_mask = &mac->macinfo.ht_cap_mod_mask;
+ wiphy->vht_capa_mod_mask = &mac->macinfo.vht_cap_mod_mask;
ether_addr_copy(wiphy->perm_addr, mac->macaddr);
- if (hw_info->hw_capab & QLINK_HW_SUPPORTS_REG_UPDATE) {
+ if (hw_info->hw_capab & QLINK_HW_CAPAB_STA_INACT_TIMEOUT)
+ wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
+
+ if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
REGULATORY_CUSTOM_REG;
wiphy->reg_notifier = qtnf_cfg80211_reg_notifier;
@@ -1119,7 +962,7 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
break;
case QTNF_STA_CONNECTING:
cfg80211_connect_result(vif->netdev,
- vif->bss_cfg.bssid, NULL, 0,
+ vif->bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
@@ -1147,7 +990,7 @@ void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
switch (vif->sta_state) {
case QTNF_STA_CONNECTING:
cfg80211_connect_result(vif->netdev,
- vif->bss_cfg.bssid, NULL, 0,
+ vif->bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 4206886b110c..8bc8dd637315 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -147,96 +147,143 @@ static struct sk_buff *qtnf_cmd_alloc_new_cmdskb(u8 macid, u8 vifid, u16 cmd_no,
return cmd_skb;
}
-int qtnf_cmd_send_start_ap(struct qtnf_vif *vif)
+static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
+ const u8 *buf, size_t len)
{
- struct sk_buff *cmd_skb;
- u16 res_code = QLINK_CMD_RESULT_OK;
- int ret;
+ struct qlink_tlv_ie_set *tlv;
- cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
- QLINK_CMD_START_AP,
- sizeof(struct qlink_cmd));
- if (unlikely(!cmd_skb))
- return -ENOMEM;
+ tlv = (struct qlink_tlv_ie_set *)skb_put(cmd_skb, sizeof(*tlv) + len);
+ tlv->hdr.type = cpu_to_le16(QTN_TLV_ID_IE_SET);
+ tlv->hdr.len = cpu_to_le16(len + sizeof(*tlv) - sizeof(tlv->hdr));
+ tlv->type = frame_type;
+ tlv->flags = 0;
- qtnf_bus_lock(vif->mac->bus);
+ if (len && buf)
+ memcpy(tlv->ie_data, buf, len);
+}
- ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
+static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
+ const struct cfg80211_ap_settings *s)
+{
+ unsigned int len = sizeof(struct qlink_cmd_start_ap);
- if (unlikely(ret))
- goto out;
+ len += s->ssid_len;
+ len += s->beacon.head_len;
+ len += s->beacon.tail_len;
+ len += s->beacon.beacon_ies_len;
+ len += s->beacon.proberesp_ies_len;
+ len += s->beacon.assocresp_ies_len;
+ len += s->beacon.probe_resp_len;
- if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
- pr_err("VIF%u.%u: CMD failed: %u\n", vif->mac->macid,
- vif->vifid, res_code);
- ret = -EFAULT;
- goto out;
- }
+ if (cfg80211_chandef_valid(&s->chandef))
+ len += sizeof(struct qlink_tlv_chandef);
- vif->bss_status |= QTNF_STATE_AP_START;
- netif_carrier_on(vif->netdev);
+ if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
+ pr_err("VIF%u.%u: can not fit AP settings: %u\n",
+ vif->mac->macid, vif->vifid, len);
+ return false;
+ }
-out:
- qtnf_bus_unlock(vif->mac->bus);
- return ret;
+ return true;
}
-int qtnf_cmd_send_config_ap(struct qtnf_vif *vif)
+int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
+ const struct cfg80211_ap_settings *s)
{
struct sk_buff *cmd_skb;
- struct qtnf_bss_config *bss_cfg = &vif->bss_cfg;
- struct cfg80211_chan_def *chandef = &vif->mac->chandef;
- struct qlink_tlv_channel *qchan;
- struct qlink_auth_encr aen;
+ struct qlink_cmd_start_ap *cmd;
+ struct qlink_auth_encr *aen;
u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
int i;
+ if (!qtnf_cmd_start_ap_can_fit(vif, s))
+ return -E2BIG;
+
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
- QLINK_CMD_CONFIG_AP,
- sizeof(struct qlink_cmd));
+ QLINK_CMD_START_AP,
+ sizeof(*cmd));
if (unlikely(!cmd_skb))
return -ENOMEM;
- qtnf_bus_lock(vif->mac->bus);
-
- qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID, bss_cfg->ssid,
- bss_cfg->ssid_len);
- qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_BCN_PERIOD,
- bss_cfg->bcn_period);
- qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_DTIM, bss_cfg->dtim);
-
- qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
- qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
- qchan->hdr.len = cpu_to_le16(sizeof(*qchan) -
- sizeof(struct qlink_tlv_hdr));
- qchan->hw_value = cpu_to_le16(
- ieee80211_frequency_to_channel(chandef->chan->center_freq));
-
- memset(&aen, 0, sizeof(aen));
- aen.auth_type = bss_cfg->auth_type;
- aen.privacy = !!bss_cfg->privacy;
- aen.mfp = bss_cfg->mfp;
- aen.wpa_versions = cpu_to_le32(bss_cfg->crypto.wpa_versions);
- aen.cipher_group = cpu_to_le32(bss_cfg->crypto.cipher_group);
- aen.n_ciphers_pairwise = cpu_to_le32(
- bss_cfg->crypto.n_ciphers_pairwise);
+ cmd = (struct qlink_cmd_start_ap *)cmd_skb->data;
+ cmd->dtim_period = s->dtim_period;
+ cmd->beacon_interval = cpu_to_le16(s->beacon_interval);
+ cmd->hidden_ssid = qlink_hidden_ssid_nl2q(s->hidden_ssid);
+ cmd->inactivity_timeout = cpu_to_le16(s->inactivity_timeout);
+ cmd->smps_mode = s->smps_mode;
+ cmd->p2p_ctwindow = s->p2p_ctwindow;
+ cmd->p2p_opp_ps = s->p2p_opp_ps;
+ cmd->pbss = s->pbss;
+ cmd->ht_required = s->ht_required;
+ cmd->vht_required = s->vht_required;
+
+ aen = &cmd->aen;
+ aen->auth_type = s->auth_type;
+ aen->privacy = !!s->privacy;
+ aen->wpa_versions = cpu_to_le32(s->crypto.wpa_versions);
+ aen->cipher_group = cpu_to_le32(s->crypto.cipher_group);
+ aen->n_ciphers_pairwise = cpu_to_le32(s->crypto.n_ciphers_pairwise);
for (i = 0; i < QLINK_MAX_NR_CIPHER_SUITES; i++)
- aen.ciphers_pairwise[i] = cpu_to_le32(
- bss_cfg->crypto.ciphers_pairwise[i]);
- aen.n_akm_suites = cpu_to_le32(
- bss_cfg->crypto.n_akm_suites);
+ aen->ciphers_pairwise[i] =
+ cpu_to_le32(s->crypto.ciphers_pairwise[i]);
+ aen->n_akm_suites = cpu_to_le32(s->crypto.n_akm_suites);
for (i = 0; i < QLINK_MAX_NR_AKM_SUITES; i++)
- aen.akm_suites[i] = cpu_to_le32(
- bss_cfg->crypto.akm_suites[i]);
- aen.control_port = bss_cfg->crypto.control_port;
- aen.control_port_no_encrypt =
- bss_cfg->crypto.control_port_no_encrypt;
- aen.control_port_ethertype = cpu_to_le16(be16_to_cpu(
- bss_cfg->crypto.control_port_ethertype));
+ aen->akm_suites[i] = cpu_to_le32(s->crypto.akm_suites[i]);
+ aen->control_port = s->crypto.control_port;
+ aen->control_port_no_encrypt = s->crypto.control_port_no_encrypt;
+ aen->control_port_ethertype =
+ cpu_to_le16(be16_to_cpu(s->crypto.control_port_ethertype));
+
+ if (s->ssid && s->ssid_len > 0 && s->ssid_len <= IEEE80211_MAX_SSID_LEN)
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID, s->ssid,
+ s->ssid_len);
+
+ if (cfg80211_chandef_valid(&s->chandef)) {
+ struct qlink_tlv_chandef *chtlv =
+ (struct qlink_tlv_chandef *)skb_put(cmd_skb,
+ sizeof(*chtlv));
+
+ chtlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANDEF);
+ chtlv->hdr.len = cpu_to_le16(sizeof(*chtlv) -
+ sizeof(chtlv->hdr));
+ qlink_chandef_cfg2q(&s->chandef, &chtlv->chan);
+ }
+
+ qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_BEACON_HEAD,
+ s->beacon.head, s->beacon.head_len);
+ qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_BEACON_TAIL,
+ s->beacon.tail, s->beacon.tail_len);
+ qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_BEACON_IES,
+ s->beacon.beacon_ies, s->beacon.beacon_ies_len);
+ qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_PROBE_RESP,
+ s->beacon.probe_resp, s->beacon.probe_resp_len);
+ qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_PROBE_RESP_IES,
+ s->beacon.proberesp_ies,
+ s->beacon.proberesp_ies_len);
+ qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_ASSOC_RESP,
+ s->beacon.assocresp_ies,
+ s->beacon.assocresp_ies_len);
+
+ if (s->ht_cap) {
+ struct qlink_tlv_hdr *tlv = (struct qlink_tlv_hdr *)
+ skb_put(cmd_skb, sizeof(*tlv) + sizeof(*s->ht_cap));
+
+ tlv->type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ tlv->len = cpu_to_le16(sizeof(*s->ht_cap));
+ memcpy(tlv->val, s->ht_cap, sizeof(*s->ht_cap));
+ }
+
+ if (s->vht_cap) {
+ struct qlink_tlv_hdr *tlv = (struct qlink_tlv_hdr *)
+ skb_put(cmd_skb, sizeof(*tlv) + sizeof(*s->vht_cap));
+
+ tlv->type = cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
+ tlv->len = cpu_to_le16(sizeof(*s->vht_cap));
+ memcpy(tlv->val, s->vht_cap, sizeof(*s->vht_cap));
+ }
- qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_CRYPTO, (u8 *)&aen,
- sizeof(aen));
+ qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
@@ -250,7 +297,7 @@ int qtnf_cmd_send_config_ap(struct qtnf_vif *vif)
goto out;
}
- vif->bss_status |= QTNF_STATE_AP_CONFIG;
+ netif_carrier_on(vif->netdev);
out:
qtnf_bus_unlock(vif->mac->bus);
@@ -283,9 +330,6 @@ int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif)
goto out;
}
- vif->bss_status &= ~QTNF_STATE_AP_START;
- vif->bss_status &= ~QTNF_STATE_AP_CONFIG;
-
netif_carrier_off(vif->netdev);
out:
@@ -380,11 +424,10 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
const u8 *buf, size_t len)
{
struct sk_buff *cmd_skb;
- struct qlink_cmd_mgmt_append_ie *cmd;
u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
- if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) {
+ if (len > QTNF_MAX_CMD_BUF_SIZE) {
pr_warn("VIF%u.%u: %u frame is too big: %zu\n", vif->mac->macid,
vif->vifid, frame_type, len);
return -E2BIG;
@@ -392,21 +435,13 @@ int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_MGMT_SET_APPIE,
- sizeof(*cmd));
+ sizeof(struct qlink_cmd));
if (unlikely(!cmd_skb))
return -ENOMEM;
- qtnf_bus_lock(vif->mac->bus);
+ qtnf_cmd_tlv_ie_set_add(cmd_skb, frame_type, buf, len);
- cmd = (struct qlink_cmd_mgmt_append_ie *)cmd_skb->data;
- cmd->type = frame_type;
- cmd->flags = 0;
-
- /* If len == 0 then IE buf for specified frame type
- * should be cleared on EP.
- */
- if (len && buf)
- qtnf_cmd_skb_put_buffer(cmd_skb, buf, len);
+ qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
@@ -975,10 +1010,11 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
return -EINVAL;
}
- pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u\n",
+ pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u, capab=0x%x\n",
hwinfo->fw_ver, hwinfo->mac_bitmap,
hwinfo->rd->alpha2[0], hwinfo->rd->alpha2[1],
- hwinfo->total_tx_chain, hwinfo->total_rx_chain);
+ hwinfo->total_tx_chain, hwinfo->total_rx_chain,
+ hwinfo->hw_capab);
return 0;
}
@@ -1089,7 +1125,6 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
mac_info = &mac->macinfo;
mac_info->bands_cap = resp_info->bands_cap;
- mac_info->phymode_cap = resp_info->phymode_cap;
memcpy(&mac_info->dev_mac, &resp_info->dev_mac,
sizeof(mac_info->dev_mac));
@@ -1109,24 +1144,56 @@ qtnf_cmd_resp_proc_mac_info(struct qtnf_wmac *mac,
qlink_chan_width_mask_to_nl(le16_to_cpu(
resp_info->radar_detect_widths));
- memcpy(&mac_info->ht_cap, &resp_info->ht_cap, sizeof(mac_info->ht_cap));
- memcpy(&mac_info->vht_cap, &resp_info->vht_cap,
- sizeof(mac_info->vht_cap));
+ memcpy(&mac_info->ht_cap_mod_mask, &resp_info->ht_cap_mod_mask,
+ sizeof(mac_info->ht_cap_mod_mask));
+ memcpy(&mac_info->vht_cap_mod_mask, &resp_info->vht_cap_mod_mask,
+ sizeof(mac_info->vht_cap_mod_mask));
+}
+
+static void qtnf_cmd_resp_band_fill_htcap(const u8 *info,
+ struct ieee80211_sta_ht_cap *bcap)
+{
+ const struct ieee80211_ht_cap *ht_cap =
+ (const struct ieee80211_ht_cap *)info;
+
+ bcap->ht_supported = true;
+ bcap->cap = le16_to_cpu(ht_cap->cap_info);
+ bcap->ampdu_factor =
+ ht_cap->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_FACTOR;
+ bcap->ampdu_density =
+ (ht_cap->ampdu_params_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >>
+ IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
+ memcpy(&bcap->mcs, &ht_cap->mcs, sizeof(bcap->mcs));
+}
+
+static void qtnf_cmd_resp_band_fill_vhtcap(const u8 *info,
+ struct ieee80211_sta_vht_cap *bcap)
+{
+ const struct ieee80211_vht_cap *vht_cap =
+ (const struct ieee80211_vht_cap *)info;
+
+ bcap->vht_supported = true;
+ bcap->cap = le32_to_cpu(vht_cap->vht_cap_info);
+ memcpy(&bcap->vht_mcs, &vht_cap->supp_mcs, sizeof(bcap->vht_mcs));
}
static int
-qtnf_cmd_resp_fill_channels_info(struct ieee80211_supported_band *band,
- struct qlink_resp_get_chan_info *resp,
- size_t payload_len)
+qtnf_cmd_resp_fill_band_info(struct ieee80211_supported_band *band,
+ struct qlink_resp_band_info_get *resp,
+ size_t payload_len)
{
u16 tlv_type;
size_t tlv_len;
+ size_t tlv_dlen;
const struct qlink_tlv_hdr *tlv;
const struct qlink_tlv_channel *qchan;
struct ieee80211_channel *chan;
unsigned int chidx = 0;
u32 qflags;
+ memset(&band->ht_cap, 0, sizeof(band->ht_cap));
+ memset(&band->vht_cap, 0, sizeof(band->vht_cap));
+
if (band->channels) {
if (band->n_channels == resp->num_chans) {
memset(band->channels, 0,
@@ -1154,7 +1221,8 @@ qtnf_cmd_resp_fill_channels_info(struct ieee80211_supported_band *band,
while (payload_len >= sizeof(*tlv)) {
tlv_type = le16_to_cpu(tlv->type);
- tlv_len = le16_to_cpu(tlv->len) + sizeof(*tlv);
+ tlv_dlen = le16_to_cpu(tlv->len);
+ tlv_len = tlv_dlen + sizeof(*tlv);
if (tlv_len > payload_len) {
pr_warn("malformed TLV 0x%.2X; LEN: %zu\n",
@@ -1240,13 +1308,32 @@ qtnf_cmd_resp_fill_channels_info(struct ieee80211_supported_band *band,
chan->hw_value, chan->flags, chan->max_power,
chan->max_reg_power);
break;
+ case WLAN_EID_HT_CAPABILITY:
+ if (unlikely(tlv_dlen !=
+ sizeof(struct ieee80211_ht_cap))) {
+ pr_err("bad HTCAP TLV len %zu\n", tlv_dlen);
+ goto error_ret;
+ }
+
+ qtnf_cmd_resp_band_fill_htcap(tlv->val, &band->ht_cap);
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (unlikely(tlv_dlen !=
+ sizeof(struct ieee80211_vht_cap))) {
+ pr_err("bad VHTCAP TLV len %zu\n", tlv_dlen);
+ goto error_ret;
+ }
+
+ qtnf_cmd_resp_band_fill_vhtcap(tlv->val,
+ &band->vht_cap);
+ break;
default:
pr_warn("unknown TLV type: %#x\n", tlv_type);
break;
}
payload_len -= tlv_len;
- tlv = (struct qlink_tlv_hdr *)((u8 *)tlv + tlv_len);
+ tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_dlen);
}
if (payload_len) {
@@ -1468,13 +1555,13 @@ out:
return ret;
}
-int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
- struct ieee80211_supported_band *band)
+int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
+ struct ieee80211_supported_band *band)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
size_t info_len;
- struct qlink_cmd_chans_info_get *cmd;
- struct qlink_resp_get_chan_info *resp;
+ struct qlink_cmd_band_info_get *cmd;
+ struct qlink_resp_band_info_get *resp;
u16 res_code = QLINK_CMD_RESULT_OK;
int ret = 0;
u8 qband;
@@ -1494,12 +1581,12 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
}
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
- QLINK_CMD_CHANS_INFO_GET,
+ QLINK_CMD_BAND_INFO_GET,
sizeof(*cmd));
if (!cmd_skb)
return -ENOMEM;
- cmd = (struct qlink_cmd_chans_info_get *)cmd_skb->data;
+ cmd = (struct qlink_cmd_band_info_get *)cmd_skb->data;
cmd->band = qband;
qtnf_bus_lock(mac->bus);
@@ -1516,7 +1603,7 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
goto out;
}
- resp = (struct qlink_resp_get_chan_info *)resp_skb->data;
+ resp = (struct qlink_resp_band_info_get *)resp_skb->data;
if (resp->band != qband) {
pr_err("MAC%u: reply band %u != cmd band %u\n", mac->macid,
resp->band, qband);
@@ -1524,7 +1611,7 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
goto out;
}
- ret = qtnf_cmd_resp_fill_channels_info(band, resp, info_len);
+ ret = qtnf_cmd_resp_fill_band_info(band, resp, info_len);
out:
qtnf_bus_unlock(mac->bus);
@@ -1941,17 +2028,36 @@ out:
return ret;
}
+static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
+ const struct ieee80211_channel *sc)
+{
+ struct qlink_tlv_channel *qchan;
+ u32 flags = 0;
+
+ qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
+ qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
+ qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr));
+ qchan->center_freq = cpu_to_le16(sc->center_freq);
+ qchan->hw_value = cpu_to_le16(sc->hw_value);
+
+ if (sc->flags & IEEE80211_CHAN_NO_IR)
+ flags |= QLINK_CHAN_NO_IR;
+
+ if (sc->flags & IEEE80211_CHAN_RADAR)
+ flags |= QLINK_CHAN_RADAR;
+
+ qchan->flags = cpu_to_le32(flags);
+}
+
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb;
u16 res_code = QLINK_CMD_RESULT_OK;
struct ieee80211_channel *sc;
struct cfg80211_scan_request *scan_req = mac->scan_req;
- struct qlink_tlv_channel *qchan;
int n_channels;
int count = 0;
int ret;
- u32 flags;
if (scan_req->n_ssids > QTNF_MAX_SSID_LIST_LENGTH) {
pr_err("MAC%u: too many SSIDs in scan request\n", mac->macid);
@@ -1976,9 +2082,8 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
}
if (scan_req->ie_len != 0)
- qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_IE_SET,
- scan_req->ie,
- scan_req->ie_len);
+ qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_PROBE_REQ,
+ scan_req->ie, scan_req->ie_len);
if (scan_req->n_channels) {
n_channels = scan_req->n_channels;
@@ -1994,22 +2099,8 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
pr_debug("MAC%u: scan chan=%d, freq=%d, flags=%#x\n",
mac->macid, sc->hw_value, sc->center_freq,
sc->flags);
- qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
- flags = 0;
-
- qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
- qchan->hdr.len = cpu_to_le16(sizeof(*qchan) -
- sizeof(struct qlink_tlv_hdr));
- qchan->center_freq = cpu_to_le16(sc->center_freq);
- qchan->hw_value = cpu_to_le16(sc->hw_value);
- if (sc->flags & IEEE80211_CHAN_NO_IR)
- flags |= QLINK_CHAN_NO_IR;
-
- if (sc->flags & IEEE80211_CHAN_RADAR)
- flags |= QLINK_CHAN_RADAR;
-
- qchan->flags = cpu_to_le32(flags);
+ qtnf_cmd_channel_tlv_add(cmd_skb, sc);
n_channels--;
count++;
}
@@ -2037,11 +2128,11 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
{
struct sk_buff *cmd_skb;
struct qlink_cmd_connect *cmd;
- struct qtnf_bss_config *bss_cfg = &vif->bss_cfg;
- struct qlink_auth_encr aen;
+ struct qlink_auth_encr *aen;
u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
int i;
+ u32 connect_flags = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
QLINK_CMD_CONNECT,
@@ -2049,51 +2140,78 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif,
if (unlikely(!cmd_skb))
return -ENOMEM;
- qtnf_bus_lock(vif->mac->bus);
-
cmd = (struct qlink_cmd_connect *)cmd_skb->data;
- ether_addr_copy(cmd->bssid, bss_cfg->bssid);
+ ether_addr_copy(cmd->bssid, vif->bssid);
- if (vif->mac->chandef.chan)
- cmd->channel = cpu_to_le16(vif->mac->chandef.chan->hw_value);
+ if (sme->bssid_hint)
+ ether_addr_copy(cmd->bssid_hint, sme->bssid_hint);
+ else
+ eth_zero_addr(cmd->bssid_hint);
- cmd->bg_scan_period = cpu_to_le16(bss_cfg->bg_scan_period);
+ if (sme->prev_bssid)
+ ether_addr_copy(cmd->prev_bssid, sme->prev_bssid);
+ else
+ eth_zero_addr(cmd->prev_bssid);
- memset(&aen, 0, sizeof(aen));
- aen.auth_type = bss_cfg->auth_type;
- aen.privacy = !!bss_cfg->privacy;
- aen.mfp = bss_cfg->mfp;
- aen.wpa_versions = cpu_to_le32(bss_cfg->crypto.wpa_versions);
- aen.cipher_group = cpu_to_le32(bss_cfg->crypto.cipher_group);
- aen.n_ciphers_pairwise = cpu_to_le32(
- bss_cfg->crypto.n_ciphers_pairwise);
+ if ((sme->bg_scan_period > 0) &&
+ (sme->bg_scan_period <= QTNF_MAX_BG_SCAN_PERIOD))
+ cmd->bg_scan_period = cpu_to_le16(sme->bg_scan_period);
+ else if (sme->bg_scan_period == -1)
+ cmd->bg_scan_period = cpu_to_le16(QTNF_DEFAULT_BG_SCAN_PERIOD);
+ else
+ cmd->bg_scan_period = 0; /* disabled */
+
+ if (sme->flags & ASSOC_REQ_DISABLE_HT)
+ connect_flags |= QLINK_STA_CONNECT_DISABLE_HT;
+ if (sme->flags & ASSOC_REQ_DISABLE_VHT)
+ connect_flags |= QLINK_STA_CONNECT_DISABLE_VHT;
+ if (sme->flags & ASSOC_REQ_USE_RRM)
+ connect_flags |= QLINK_STA_CONNECT_USE_RRM;
+
+ cmd->flags = cpu_to_le32(connect_flags);
+ memcpy(&cmd->ht_capa, &sme->ht_capa, sizeof(cmd->ht_capa));
+ memcpy(&cmd->ht_capa_mask, &sme->ht_capa_mask,
+ sizeof(cmd->ht_capa_mask));
+ memcpy(&cmd->vht_capa, &sme->vht_capa, sizeof(cmd->vht_capa));
+ memcpy(&cmd->vht_capa_mask, &sme->vht_capa_mask,
+ sizeof(cmd->vht_capa_mask));
+ cmd->pbss = sme->pbss;
+
+ aen = &cmd->aen;
+ aen->auth_type = sme->auth_type;
+ aen->privacy = !!sme->privacy;
+ cmd->mfp = sme->mfp;
+ aen->wpa_versions = cpu_to_le32(sme->crypto.wpa_versions);
+ aen->cipher_group = cpu_to_le32(sme->crypto.cipher_group);
+ aen->n_ciphers_pairwise = cpu_to_le32(sme->crypto.n_ciphers_pairwise);
for (i = 0; i < QLINK_MAX_NR_CIPHER_SUITES; i++)
- aen.ciphers_pairwise[i] = cpu_to_le32(
- bss_cfg->crypto.ciphers_pairwise[i]);
+ aen->ciphers_pairwise[i] =
+ cpu_to_le32(sme->crypto.ciphers_pairwise[i]);
- aen.n_akm_suites = cpu_to_le32(bss_cfg->crypto.n_akm_suites);
+ aen->n_akm_suites = cpu_to_le32(sme->crypto.n_akm_suites);
for (i = 0; i < QLINK_MAX_NR_AKM_SUITES; i++)
- aen.akm_suites[i] = cpu_to_le32(
- bss_cfg->crypto.akm_suites[i]);
+ aen->akm_suites[i] = cpu_to_le32(sme->crypto.akm_suites[i]);
- aen.control_port = bss_cfg->crypto.control_port;
- aen.control_port_no_encrypt =
- bss_cfg->crypto.control_port_no_encrypt;
- aen.control_port_ethertype = cpu_to_le16(be16_to_cpu(
- bss_cfg->crypto.control_port_ethertype));
+ aen->control_port = sme->crypto.control_port;
+ aen->control_port_no_encrypt =
+ sme->crypto.control_port_no_encrypt;
+ aen->control_port_ethertype =
+ cpu_to_le16(be16_to_cpu(sme->crypto.control_port_ethertype));
- qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID, bss_cfg->ssid,
- bss_cfg->ssid_len);
- qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_CRYPTO, (u8 *)&aen,
- sizeof(aen));
+ qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_SSID, sme->ssid,
+ sme->ssid_len);
if (sme->ie_len != 0)
- qtnf_cmd_skb_put_tlv_arr(cmd_skb, QTN_TLV_ID_IE_SET,
- sme->ie,
- sme->ie_len);
+ qtnf_cmd_tlv_ie_set_add(cmd_skb, QLINK_IE_SET_ASSOC_REQ,
+ sme->ie, sme->ie_len);
+
+ if (sme->channel)
+ qtnf_cmd_channel_tlv_add(cmd_skb, sme->channel);
+
+ qtnf_bus_lock(vif->mac->bus);
ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code);
@@ -2304,15 +2422,16 @@ out:
return ret;
}
-int qtnf_cmd_send_chan_switch(struct qtnf_wmac *mac,
+int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct cfg80211_csa_settings *params)
{
+ struct qtnf_wmac *mac = vif->mac;
struct qlink_cmd_chan_switch *cmd;
struct sk_buff *cmd_skb;
u16 res_code = QLINK_CMD_RESULT_OK;
int ret;
- cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0x0,
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, vif->vifid,
QLINK_CMD_CHAN_SWITCH,
sizeof(*cmd));
@@ -2334,9 +2453,6 @@ int qtnf_cmd_send_chan_switch(struct qtnf_wmac *mac,
switch (res_code) {
case QLINK_CMD_RESULT_OK:
- memcpy(&mac->csa_chandef, &params->chandef,
- sizeof(mac->csa_chandef));
- mac->status |= QTNF_MAC_CSA_ACTIVE;
ret = 0;
break;
case QLINK_CMD_RESULT_ENOTFOUND:
@@ -2358,3 +2474,41 @@ out:
qtnf_bus_unlock(mac->bus);
return ret;
}
+
+int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef)
+{
+ struct qtnf_bus *bus = vif->mac->bus;
+ const struct qlink_resp_channel_get *resp;
+ struct sk_buff *cmd_skb;
+ struct sk_buff *resp_skb = NULL;
+ u16 res_code = QLINK_CMD_RESULT_OK;
+ int ret;
+
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
+ QLINK_CMD_CHAN_GET,
+ sizeof(struct qlink_cmd));
+ if (unlikely(!cmd_skb))
+ return -ENOMEM;
+
+ qtnf_bus_lock(bus);
+
+ ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code,
+ sizeof(*resp), NULL);
+
+ qtnf_bus_unlock(bus);
+
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(res_code != QLINK_CMD_RESULT_OK)) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ resp = (const struct qlink_resp_channel_get *)resp_skb->data;
+ qlink_chandef_q2cfg(priv_to_wiphy(vif->mac), &resp->chan, chdef);
+
+out:
+ consume_skb(resp_skb);
+ return ret;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 783b20364296..d981a76e5835 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -30,11 +30,11 @@ int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype,
int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
enum nl80211_iftype iftype, u8 *mac_addr);
int qtnf_cmd_send_del_intf(struct qtnf_vif *vif);
-int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
- struct ieee80211_supported_band *band);
+int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
+ struct ieee80211_supported_band *band);
int qtnf_cmd_send_regulatory_config(struct qtnf_wmac *mac, const char *alpha2);
-int qtnf_cmd_send_config_ap(struct qtnf_vif *vif);
-int qtnf_cmd_send_start_ap(struct qtnf_vif *vif);
+int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
+ const struct cfg80211_ap_settings *s);
int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif);
int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg);
int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
@@ -73,7 +73,8 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req);
int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
struct qtnf_chan_stats *stats);
-int qtnf_cmd_send_chan_switch(struct qtnf_wmac *mac,
+int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
struct cfg80211_csa_settings *params);
+int qtnf_cmd_get_channel(struct qtnf_vif *vif, struct cfg80211_chan_def *chdef);
#endif /* QLINK_COMMANDS_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 5e60180482d1..3423dc51198b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -171,7 +171,7 @@ static int qtnf_mac_init_single_band(struct wiphy *wiphy,
wiphy->bands[band]->band = band;
- ret = qtnf_cmd_get_mac_chan_info(mac, wiphy->bands[band]);
+ ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]);
if (ret) {
pr_err("MAC%u: band %u: failed to get chans info: %d\n",
mac->macid, band, ret);
@@ -179,7 +179,6 @@ static int qtnf_mac_init_single_band(struct wiphy *wiphy,
}
qtnf_band_init_rates(wiphy->bands[band]);
- qtnf_band_setup_htvht_caps(&mac->macinfo, wiphy->bands[band]);
return 0;
}
@@ -289,7 +288,7 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
mac->iflist[i].vifid = i;
qtnf_sta_list_init(&mac->iflist[i].sta_list);
mutex_init(&mac->mac_lock);
- init_timer(&mac->scan_timeout);
+ timer_setup(&mac->scan_timeout, NULL, 0);
}
qtnf_mac_init_primary_intf(mac);
@@ -618,6 +617,33 @@ out:
}
EXPORT_SYMBOL_GPL(qtnf_classify_skb);
+void qtnf_wake_all_queues(struct net_device *ndev)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
+ struct qtnf_wmac *mac;
+ struct qtnf_bus *bus;
+ int macid;
+ int i;
+
+ if (unlikely(!vif || !vif->mac || !vif->mac->bus))
+ return;
+
+ bus = vif->mac->bus;
+
+ for (macid = 0; macid < QTNF_MAX_MAC; macid++) {
+ if (!(bus->hw_info.mac_bitmap & BIT(macid)))
+ continue;
+
+ mac = bus->mac[macid];
+ for (i = 0; i < QTNF_MAX_INTF; i++) {
+ vif = &mac->iflist[i];
+ if (vif->netdev && netif_queue_stopped(vif->netdev))
+ netif_tx_wake_all_queues(vif->netdev);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(qtnf_wake_all_queues);
+
MODULE_AUTHOR("Quantenna Communications");
MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 066fcd1095a0..1b7bc0318f3e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -52,27 +52,11 @@
#define QTNF_DEF_WDOG_TIMEOUT 5
#define QTNF_TX_TIMEOUT_TRSHLD 100
-#define QTNF_STATE_AP_CONFIG BIT(2)
-#define QTNF_STATE_AP_START BIT(1)
-
extern const struct net_device_ops qtnf_netdev_ops;
+
struct qtnf_bus;
struct qtnf_vif;
-struct qtnf_bss_config {
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 bssid[ETH_ALEN];
- size_t ssid_len;
- u8 dtim;
- u16 bcn_period;
- u16 auth_type;
- bool privacy;
- enum nl80211_mfp mfp;
- struct cfg80211_crypto_settings crypto;
- u16 bg_scan_period;
- u32 connect_flags;
-};
-
struct qtnf_sta_node {
struct list_head list;
u8 mac_addr[ETH_ALEN];
@@ -89,12 +73,10 @@ enum qtnf_sta_state {
QTNF_STA_CONNECTED
};
-enum qtnf_mac_status {
- QTNF_MAC_CSA_ACTIVE = BIT(0)
-};
-
struct qtnf_vif {
struct wireless_dev wdev;
+ u8 bssid[ETH_ALEN];
+ u8 mac_addr[ETH_ALEN];
u8 vifid;
u8 bss_priority;
u8 bss_status;
@@ -102,16 +84,14 @@ struct qtnf_vif {
u16 mgmt_frames_bitmask;
struct net_device *netdev;
struct qtnf_wmac *mac;
- u8 mac_addr[ETH_ALEN];
+
struct work_struct reset_work;
- struct qtnf_bss_config bss_cfg;
struct qtnf_sta_list sta_list;
unsigned long cons_tx_timeout_cnt;
};
struct qtnf_mac_info {
u8 bands_cap;
- u8 phymode_cap;
u8 dev_mac[ETH_ALEN];
u8 num_tx_chain;
u8 num_rx_chain;
@@ -122,8 +102,8 @@ struct qtnf_mac_info {
u8 sretry_limit;
u8 coverage_class;
u8 radar_detect_widths;
- struct ieee80211_ht_cap ht_cap;
- struct ieee80211_vht_cap vht_cap;
+ struct ieee80211_ht_cap ht_cap_mod_mask;
+ struct ieee80211_vht_cap vht_cap_mod_mask;
struct ieee80211_iface_limit *limits;
size_t n_limits;
};
@@ -141,13 +121,10 @@ struct qtnf_wmac {
u8 macid;
u8 wiphy_registered;
u8 macaddr[ETH_ALEN];
- u32 status;
struct qtnf_bus *bus;
struct qtnf_mac_info macinfo;
struct qtnf_vif iflist[QTNF_MAX_INTF];
struct cfg80211_scan_request *scan_req;
- struct cfg80211_chan_def chandef;
- struct cfg80211_chan_def csa_chandef;
struct mutex mac_lock; /* lock during wmac speicific ops */
struct timer_list scan_timeout;
};
@@ -175,9 +152,7 @@ int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac);
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
-struct net_device *qtnf_classify_skb_no_mbss(struct qtnf_bus *bus,
- struct sk_buff *skb);
-
+void qtnf_wake_all_queues(struct net_device *ndev);
void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 43d2e7fd6e02..4abc6d9ed560 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -25,6 +25,7 @@
#include "trans.h"
#include "util.h"
#include "event.h"
+#include "qlink_util.h"
static int
qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
@@ -52,12 +53,6 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
return -EPROTO;
}
- if (!(vif->bss_status & QTNF_STATE_AP_START)) {
- pr_err("VIF%u.%u: STA_ASSOC event when AP is not started\n",
- mac->macid, vif->vifid);
- return -EPROTO;
- }
-
sta_addr = sta_assoc->sta_addr;
frame_control = le16_to_cpu(sta_assoc->frame_control);
@@ -70,34 +65,39 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif,
sinfo.assoc_req_ies_len = 0;
payload_len = len - sizeof(*sta_assoc);
- tlv = (struct qlink_tlv_hdr *)sta_assoc->ies;
+ tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies;
- while (payload_len >= sizeof(struct qlink_tlv_hdr)) {
+ while (payload_len >= sizeof(*tlv)) {
tlv_type = le16_to_cpu(tlv->type);
tlv_value_len = le16_to_cpu(tlv->len);
tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > payload_len) {
- pr_warn("VIF%u.%u: malformed TLV 0x%.2X; LEN: %u\n",
- mac->macid, vif->vifid, tlv_type,
- tlv_value_len);
+ if (tlv_full_len > payload_len)
return -EINVAL;
- }
if (tlv_type == QTN_TLV_ID_IE_SET) {
- sinfo.assoc_req_ies = tlv->val;
- sinfo.assoc_req_ies_len = tlv_value_len;
+ const struct qlink_tlv_ie_set *ie_set;
+ unsigned int ie_len;
+
+ if (payload_len < sizeof(*ie_set))
+ return -EINVAL;
+
+ ie_set = (const struct qlink_tlv_ie_set *)tlv;
+ ie_len = tlv_value_len -
+ (sizeof(*ie_set) - sizeof(ie_set->hdr));
+
+ if (ie_set->type == QLINK_IE_SET_ASSOC_REQ && ie_len) {
+ sinfo.assoc_req_ies = ie_set->ie_data;
+ sinfo.assoc_req_ies_len = ie_len;
+ }
}
payload_len -= tlv_full_len;
tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len) {
- pr_warn("VIF%u.%u: malformed TLV buf; bytes left: %zu\n",
- mac->macid, vif->vifid, payload_len);
+ if (payload_len)
return -EINVAL;
- }
cfg80211_new_sta(vif->netdev, sta_assoc->sta_addr, &sinfo,
GFP_KERNEL);
@@ -126,12 +126,6 @@ qtnf_event_handle_sta_deauth(struct qtnf_wmac *mac, struct qtnf_vif *vif,
return -EPROTO;
}
- if (!(vif->bss_status & QTNF_STATE_AP_START)) {
- pr_err("VIF%u.%u: STA_DEAUTH event when AP is not started\n",
- mac->macid, vif->vifid);
- return -EPROTO;
- }
-
sta_addr = sta_deauth->sta_addr;
reason = le16_to_cpu(sta_deauth->reason);
@@ -258,13 +252,12 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
struct cfg80211_bss *bss;
struct ieee80211_channel *channel;
struct wiphy *wiphy = priv_to_wiphy(vif->mac);
- enum cfg80211_bss_frame_type frame_type;
+ enum cfg80211_bss_frame_type frame_type = CFG80211_BSS_FTYPE_UNKNOWN;
size_t payload_len;
u16 tlv_type;
u16 tlv_value_len;
size_t tlv_full_len;
const struct qlink_tlv_hdr *tlv;
-
const u8 *ies = NULL;
size_t ies_len = 0;
@@ -281,17 +274,6 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
return -EINVAL;
}
- switch (sr->frame_type) {
- case QLINK_BSS_FTYPE_BEACON:
- frame_type = CFG80211_BSS_FTYPE_BEACON;
- break;
- case QLINK_BSS_FTYPE_PRESP:
- frame_type = CFG80211_BSS_FTYPE_PRESP;
- break;
- default:
- frame_type = CFG80211_BSS_FTYPE_UNKNOWN;
- }
-
payload_len = len - sizeof(*sr);
tlv = (struct qlink_tlv_hdr *)sr->payload;
@@ -300,27 +282,43 @@ qtnf_event_handle_scan_results(struct qtnf_vif *vif,
tlv_value_len = le16_to_cpu(tlv->len);
tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr);
- if (tlv_full_len > payload_len) {
- pr_warn("VIF%u.%u: malformed TLV 0x%.2X; LEN: %u\n",
- vif->mac->macid, vif->vifid, tlv_type,
- tlv_value_len);
+ if (tlv_full_len > payload_len)
return -EINVAL;
- }
if (tlv_type == QTN_TLV_ID_IE_SET) {
- ies = tlv->val;
- ies_len = tlv_value_len;
+ const struct qlink_tlv_ie_set *ie_set;
+ unsigned int ie_len;
+
+ if (payload_len < sizeof(*ie_set))
+ return -EINVAL;
+
+ ie_set = (const struct qlink_tlv_ie_set *)tlv;
+ ie_len = tlv_value_len -
+ (sizeof(*ie_set) - sizeof(ie_set->hdr));
+
+ switch (ie_set->type) {
+ case QLINK_IE_SET_BEACON_IES:
+ frame_type = CFG80211_BSS_FTYPE_BEACON;
+ break;
+ case QLINK_IE_SET_PROBE_RESP_IES:
+ frame_type = CFG80211_BSS_FTYPE_PRESP;
+ break;
+ default:
+ frame_type = CFG80211_BSS_FTYPE_UNKNOWN;
+ }
+
+ if (ie_len) {
+ ies = ie_set->ie_data;
+ ies_len = ie_len;
+ }
}
payload_len -= tlv_full_len;
tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (payload_len) {
- pr_warn("VIF%u.%u: malformed TLV buf; bytes left: %zu\n",
- vif->mac->macid, vif->vifid, payload_len);
+ if (payload_len)
return -EINVAL;
- }
bss = cfg80211_inform_bss(wiphy, channel, frame_type,
sr->bssid, get_unaligned_le64(&sr->tsf),
@@ -357,40 +355,29 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
{
struct wiphy *wiphy = priv_to_wiphy(mac);
struct cfg80211_chan_def chandef;
- struct ieee80211_channel *chan;
struct qtnf_vif *vif;
- int freq;
int i;
if (len < sizeof(*data)) {
- pr_err("payload is too short\n");
+ pr_err("MAC%u: payload is too short\n", mac->macid);
return -EINVAL;
}
- freq = le32_to_cpu(data->freq);
- chan = ieee80211_get_channel(wiphy, freq);
- if (!chan) {
- pr_err("channel at %d MHz not found\n", freq);
- return -EINVAL;
- }
+ if (!wiphy->registered)
+ return 0;
- pr_debug("MAC%d switch to new channel %u MHz\n", mac->macid, freq);
+ qlink_chandef_q2cfg(wiphy, &data->chan, &chandef);
- if (mac->status & QTNF_MAC_CSA_ACTIVE) {
- mac->status &= ~QTNF_MAC_CSA_ACTIVE;
- if (chan->hw_value != mac->csa_chandef.chan->hw_value)
- pr_warn("unexpected switch to %u during CSA to %u\n",
- chan->hw_value,
- mac->csa_chandef.chan->hw_value);
+ if (!cfg80211_chandef_valid(&chandef)) {
+ pr_err("MAC%u: bad channel f1=%u f2=%u bw=%u\n", mac->macid,
+ chandef.center_freq1, chandef.center_freq2,
+ chandef.width);
+ return -EINVAL;
}
- /* FIXME: need to figure out proper nl80211_channel_type value */
- cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
- /* fall-back to minimal safe chandef description */
- if (!cfg80211_chandef_valid(&chandef))
- cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
-
- memcpy(&mac->chandef, &chandef, sizeof(mac->chandef));
+ pr_debug("MAC%d: new channel ieee=%u freq1=%u freq2=%u bw=%u\n",
+ mac->macid, chandef.chan->hw_value, chandef.center_freq1,
+ chandef.center_freq2, chandef.width);
for (i = 0; i < QTNF_MAX_INTF; i++) {
vif = &mac->iflist[i];
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index 69131965a298..7e487622d87d 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -617,9 +617,10 @@ static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv)
if (skb->dev) {
skb->dev->stats.tx_packets++;
skb->dev->stats.tx_bytes += skb->len;
-
- if (netif_queue_stopped(skb->dev))
- netif_wake_queue(skb->dev);
+ if (unlikely(priv->tx_stopped)) {
+ qtnf_wake_all_queues(skb->dev);
+ priv->tx_stopped = 0;
+ }
}
dev_kfree_skb_any(skb);
@@ -643,11 +644,11 @@ static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv)
{
if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
priv->tx_bd_num)) {
- pr_err_ratelimited("reclaim full Tx queue\n");
qtnf_pcie_data_tx_reclaim(priv);
if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
priv->tx_bd_num)) {
+ pr_warn_ratelimited("reclaim full Tx queue\n");
priv->tx_full_count++;
return 0;
}
@@ -669,8 +670,10 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
spin_lock_irqsave(&priv->tx0_lock, flags);
if (!qtnf_tx_queue_ready(priv)) {
- if (skb->dev)
- netif_stop_queue(skb->dev);
+ if (skb->dev) {
+ netif_tx_stop_all_queues(skb->dev);
+ priv->tx_stopped = 1;
+ }
spin_unlock_irqrestore(&priv->tx0_lock, flags);
return NETDEV_TX_BUSY;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
index 86ac1ccedb52..397875a50fc2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
@@ -37,6 +37,7 @@ struct qtnf_pcie_bus_priv {
/* lock for tx0 operations */
spinlock_t tx0_lock;
u8 msi_enabled;
+ u8 tx_stopped;
int mps;
struct workqueue_struct *workqueue;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index a8242f678496..a432fb001c41 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -19,7 +19,7 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 5
+#define QLINK_PROTO_VER 6
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -61,14 +61,17 @@ struct qlink_msg_header {
/* Generic definitions of data and information carried in QLINK messages
*/
+/**
+ * enum qlink_hw_capab - device capabilities.
+ *
+ * @QLINK_HW_CAPAB_REG_UPDATE: device can update it's regulatory region.
+ * @QLINK_HW_CAPAB_STA_INACT_TIMEOUT: device implements a logic to kick-out
+ * associated STAs due to inactivity. Inactivity timeout period is taken
+ * from QLINK_CMD_START_AP parameters.
+ */
enum qlink_hw_capab {
- QLINK_HW_SUPPORTS_REG_UPDATE = BIT(0),
-};
-
-enum qlink_phy_mode {
- QLINK_PHYMODE_BGN = BIT(0),
- QLINK_PHYMODE_AN = BIT(1),
- QLINK_PHYMODE_AC = BIT(2),
+ QLINK_HW_CAPAB_REG_UPDATE = BIT(0),
+ QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1),
};
enum qlink_iface_type {
@@ -108,16 +111,48 @@ enum qlink_sta_flags {
};
enum qlink_channel_width {
- QLINK_CHAN_WIDTH_5 = BIT(0),
- QLINK_CHAN_WIDTH_10 = BIT(1),
- QLINK_CHAN_WIDTH_20_NOHT = BIT(2),
- QLINK_CHAN_WIDTH_20 = BIT(3),
- QLINK_CHAN_WIDTH_40 = BIT(4),
- QLINK_CHAN_WIDTH_80 = BIT(5),
- QLINK_CHAN_WIDTH_80P80 = BIT(6),
- QLINK_CHAN_WIDTH_160 = BIT(7),
+ QLINK_CHAN_WIDTH_5 = 0,
+ QLINK_CHAN_WIDTH_10,
+ QLINK_CHAN_WIDTH_20_NOHT,
+ QLINK_CHAN_WIDTH_20,
+ QLINK_CHAN_WIDTH_40,
+ QLINK_CHAN_WIDTH_80,
+ QLINK_CHAN_WIDTH_80P80,
+ QLINK_CHAN_WIDTH_160,
};
+/**
+ * struct qlink_chandef - qlink channel definition
+ *
+ * @center_freq1: center frequency of first segment
+ * @center_freq2: center frequency of second segment (80+80 only)
+ * @width: channel width, one of @enum qlink_channel_width
+ */
+struct qlink_chandef {
+ __le16 center_freq1;
+ __le16 center_freq2;
+ u8 width;
+ u8 rsvd[3];
+} __packed;
+
+#define QLINK_MAX_NR_CIPHER_SUITES 5
+#define QLINK_MAX_NR_AKM_SUITES 2
+
+struct qlink_auth_encr {
+ __le32 wpa_versions;
+ __le32 cipher_group;
+ __le32 n_ciphers_pairwise;
+ __le32 ciphers_pairwise[QLINK_MAX_NR_CIPHER_SUITES];
+ __le32 n_akm_suites;
+ __le32 akm_suites[QLINK_MAX_NR_AKM_SUITES];
+ __le16 control_port_ethertype;
+ u8 auth_type;
+ u8 privacy;
+ u8 control_port;
+ u8 control_port_no_encrypt;
+ u8 rsvd[2];
+} __packed;
+
/* QLINK Command messages related definitions
*/
@@ -127,11 +162,12 @@ enum qlink_channel_width {
* Commands are QLINK messages of type @QLINK_MSG_TYPE_CMD, sent by driver to
* wireless network device for processing. Device is expected to send back a
* reply message of type &QLINK_MSG_TYPE_CMDRSP, containing at least command
- * execution status (one of &enum qlink_cmd_result) at least. Reply message
+ * execution status (one of &enum qlink_cmd_result). Reply message
* may also contain data payload specific to the command type.
*
- * @QLINK_CMD_CHANS_INFO_GET: for the specified MAC and specified band, get
- * number of operational channels and information on each of the channel.
+ * @QLINK_CMD_BAND_INFO_GET: for the specified MAC and specified band, get
+ * the band's description including number of operational channels and
+ * info on each channel, HT/VHT capabilities, supported rates etc.
* This command is generic to a specified MAC, interface index must be set
* to QLINK_VIFID_RSVD in command header.
* @QLINK_CMD_REG_NOTIFY: notify device about regulatory domain change. This
@@ -153,9 +189,9 @@ enum qlink_cmd_type {
QLINK_CMD_CHANGE_INTF = 0x0017,
QLINK_CMD_UPDOWN_INTF = 0x0018,
QLINK_CMD_REG_NOTIFY = 0x0019,
- QLINK_CMD_CHANS_INFO_GET = 0x001A,
+ QLINK_CMD_BAND_INFO_GET = 0x001A,
QLINK_CMD_CHAN_SWITCH = 0x001B,
- QLINK_CMD_CONFIG_AP = 0x0020,
+ QLINK_CMD_CHAN_GET = 0x001C,
QLINK_CMD_START_AP = 0x0021,
QLINK_CMD_STOP_AP = 0x0022,
QLINK_CMD_GET_STA_INFO = 0x0030,
@@ -262,21 +298,6 @@ struct qlink_cmd_mgmt_frame_tx {
} __packed;
/**
- * struct qlink_cmd_mgmt_append_ie - data for QLINK_CMD_MGMT_SET_APPIE command
- *
- * @type: type of MGMT frame to appent requested IEs to, one of
- * &enum qlink_mgmt_frame_type.
- * @flags: for future use.
- * @ie_data: IEs data to append.
- */
-struct qlink_cmd_mgmt_append_ie {
- struct qlink_cmd chdr;
- u8 type;
- u8 flags;
- u8 ie_data[0];
-} __packed;
-
-/**
* struct qlink_cmd_get_sta_info - data for QLINK_CMD_GET_STA_INFO command
*
* @sta_addr: MAC address of the STA statistics is requested for.
@@ -383,18 +404,36 @@ enum qlink_sta_connect_flags {
/**
* struct qlink_cmd_connect - data for QLINK_CMD_CONNECT command
*
- * @flags: for future use.
- * @freq: center frequence of a channel which should be used to connect.
- * @bg_scan_period: period of background scan.
* @bssid: BSSID of the BSS to connect to.
+ * @bssid_hint: recommended AP BSSID for initial connection to the BSS or
+ * 00:00:00:00:00:00 if not specified.
+ * @prev_bssid: previous BSSID, if specified (not 00:00:00:00:00:00) indicates
+ * a request to reassociate.
+ * @bg_scan_period: period of background scan.
+ * @flags: one of &enum qlink_sta_connect_flags.
+ * @ht_capa: HT Capabilities overrides.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
+ * @vht_capa: VHT Capability overrides
+ * @vht_capa_mask: The bits of vht_capa which are to be used.
+ * @aen: authentication information.
+ * @mfp: whether to use management frame protection.
* @payload: variable portion of connection request.
*/
struct qlink_cmd_connect {
struct qlink_cmd chdr;
- __le32 flags;
- __le16 channel;
- __le16 bg_scan_period;
u8 bssid[ETH_ALEN];
+ u8 bssid_hint[ETH_ALEN];
+ u8 prev_bssid[ETH_ALEN];
+ __le16 bg_scan_period;
+ __le32 flags;
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
+ struct ieee80211_vht_cap vht_capa;
+ struct ieee80211_vht_cap vht_capa_mask;
+ struct qlink_auth_encr aen;
+ u8 mfp;
+ u8 pbss;
+ u8 rsvd[2];
u8 payload[0];
} __packed;
@@ -433,11 +472,11 @@ enum qlink_band {
};
/**
- * struct qlink_cmd_chans_info_get - data for QLINK_CMD_CHANS_INFO_GET command
+ * struct qlink_cmd_band_info_get - data for QLINK_CMD_BAND_INFO_GET command
*
- * @band: a PHY band for which channels info is needed, one of @enum qlink_band
+ * @band: a PHY band for which information is queried, one of @enum qlink_band
*/
-struct qlink_cmd_chans_info_get {
+struct qlink_cmd_band_info_get {
struct qlink_cmd chdr;
u8 band;
} __packed;
@@ -506,6 +545,46 @@ struct qlink_cmd_chan_switch {
u8 beacon_count;
} __packed;
+/**
+ * enum qlink_hidden_ssid - values for %NL80211_ATTR_HIDDEN_SSID
+ *
+ * Refer to &enum nl80211_hidden_ssid
+ */
+enum qlink_hidden_ssid {
+ QLINK_HIDDEN_SSID_NOT_IN_USE,
+ QLINK_HIDDEN_SSID_ZERO_LEN,
+ QLINK_HIDDEN_SSID_ZERO_CONTENTS
+};
+
+/**
+ * struct qlink_cmd_start_ap - data for QLINK_CMD_START_AP command
+ *
+ * @beacon_interval: beacon interval
+ * @inactivity_timeout: station's inactivity period in seconds
+ * @dtim_period: DTIM period
+ * @hidden_ssid: whether to hide the SSID, one of &enum qlink_hidden_ssid
+ * @smps_mode: SMPS mode
+ * @ht_required: stations must support HT
+ * @vht_required: stations must support VHT
+ * @aen: encryption info
+ * @info: variable configurations
+ */
+struct qlink_cmd_start_ap {
+ struct qlink_cmd chdr;
+ __le16 beacon_interval;
+ __le16 inactivity_timeout;
+ u8 dtim_period;
+ u8 hidden_ssid;
+ u8 smps_mode;
+ u8 p2p_ctwindow;
+ u8 p2p_opp_ps;
+ u8 pbss;
+ u8 ht_required;
+ u8 vht_required;
+ struct qlink_auth_encr aen;
+ u8 info[0];
+} __packed;
+
/* QLINK Command Responses messages related definitions
*/
@@ -551,10 +630,9 @@ struct qlink_resp {
* specified WMAC).
* @num_tx_chain: Number of transmit chains used by WMAC.
* @num_rx_chain: Number of receive chains used by WMAC.
- * @vht_cap: VHT capabilities.
- * @ht_cap: HT capabilities.
+ * @vht_cap_mod_mask: mask specifying which VHT capabilities can be altered.
+ * @ht_cap_mod_mask: mask specifying which HT capabilities can be altered.
* @bands_cap: wireless bands WMAC can operate in, bitmap of &enum qlink_band.
- * @phymode_cap: PHY modes WMAC can operate in, bitmap of &enum qlink_phy_mode.
* @max_ap_assoc_sta: Maximum number of associations supported by WMAC.
* @radar_detect_widths: bitmask of channels BW for which WMAC can detect radar.
* @var_info: variable-length WMAC info data.
@@ -564,12 +642,12 @@ struct qlink_resp_get_mac_info {
u8 dev_mac[ETH_ALEN];
u8 num_tx_chain;
u8 num_rx_chain;
- struct ieee80211_vht_cap vht_cap;
- struct ieee80211_ht_cap ht_cap;
- u8 bands_cap;
- u8 phymode_cap;
+ struct ieee80211_vht_cap vht_cap_mod_mask;
+ struct ieee80211_ht_cap ht_cap_mod_mask;
__le16 max_ap_assoc_sta;
__le16 radar_detect_widths;
+ u8 bands_cap;
+ u8 rsvd[1];
u8 var_info[0];
} __packed;
@@ -646,17 +724,19 @@ struct qlink_resp_get_sta_info {
} __packed;
/**
- * struct qlink_resp_get_chan_info - response for QLINK_CMD_CHANS_INFO_GET cmd
+ * struct qlink_resp_band_info_get - response for QLINK_CMD_BAND_INFO_GET cmd
*
- * @band: frequency band to which channels belong to, one of @enum qlink_band.
- * @num_chans: total number of channels info data contained in reply data.
- * @info: variable-length channels info.
+ * @band: frequency band that the response describes, one of @enum qlink_band.
+ * @num_chans: total number of channels info TLVs contained in reply.
+ * @num_bitrates: total number of bitrate TLVs contained in reply.
+ * @info: variable-length info portion.
*/
-struct qlink_resp_get_chan_info {
+struct qlink_resp_band_info_get {
struct qlink_resp rhdr;
u8 band;
u8 num_chans;
- u8 rsvd[2];
+ u8 num_bitrates;
+ u8 rsvd[1];
u8 info[0];
} __packed;
@@ -680,6 +760,16 @@ struct qlink_resp_get_chan_stats {
u8 info[0];
} __packed;
+/**
+ * struct qlink_resp_channel_get - response for QLINK_CMD_CHAN_GET command
+ *
+ * @chan: definition of current operating channel.
+ */
+struct qlink_resp_channel_get {
+ struct qlink_resp rhdr;
+ struct qlink_chandef chan;
+} __packed;
+
/* QLINK Events messages related definitions
*/
@@ -764,11 +854,11 @@ struct qlink_event_bss_leave {
/**
* struct qlink_event_freq_change - data for QLINK_EVENT_FREQ_CHANGE event
*
- * @freq: new operating frequency in MHz
+ * @chan: new operating channel definition
*/
struct qlink_event_freq_change {
struct qlink_event ehdr;
- __le32 freq;
+ struct qlink_chandef chan;
} __packed;
enum qlink_rxmgmt_flags {
@@ -791,12 +881,6 @@ struct qlink_event_rxmgmt {
u8 frame_data[0];
} __packed;
-enum qlink_frame_type {
- QLINK_BSS_FTYPE_UNKNOWN,
- QLINK_BSS_FTYPE_BEACON,
- QLINK_BSS_FTYPE_PRESP,
-};
-
/**
* struct qlink_event_scan_result - data for QLINK_EVENT_SCAN_RESULTS event
*
@@ -806,7 +890,6 @@ enum qlink_frame_type {
* @capab: capabilities field.
* @bintval: beacon interval announced by discovered BSS.
* @signal: signal strength.
- * @frame_type: frame type used to get scan result, see &enum qlink_frame_type.
* @bssid: BSSID announced by discovered BSS.
* @ssid_len: length of SSID announced by BSS.
* @ssid: SSID announced by discovered BSS.
@@ -819,10 +902,10 @@ struct qlink_event_scan_result {
__le16 capab;
__le16 bintval;
s8 signal;
- u8 frame_type;
- u8 bssid[ETH_ALEN];
u8 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 bssid[ETH_ALEN];
+ u8 rsvd[2];
u8 payload[0];
} __packed;
@@ -856,10 +939,9 @@ enum qlink_tlv_id {
QTN_TLV_ID_RTS_THRESH = 0x0202,
QTN_TLV_ID_SRETRY_LIMIT = 0x0203,
QTN_TLV_ID_LRETRY_LIMIT = 0x0204,
- QTN_TLV_ID_BCN_PERIOD = 0x0205,
- QTN_TLV_ID_DTIM = 0x0206,
QTN_TLV_ID_REG_RULE = 0x0207,
QTN_TLV_ID_CHANNEL = 0x020F,
+ QTN_TLV_ID_CHANDEF = 0x0210,
QTN_TLV_ID_COVERAGE_CLASS = 0x0213,
QTN_TLV_ID_IFACE_LIMIT = 0x0214,
QTN_TLV_ID_NUM_IFACE_COMB = 0x0215,
@@ -868,7 +950,6 @@ enum qlink_tlv_id {
QTN_TLV_ID_STA_GENERIC_INFO = 0x0301,
QTN_TLV_ID_KEY = 0x0302,
QTN_TLV_ID_SEQ = 0x0303,
- QTN_TLV_ID_CRYPTO = 0x0304,
QTN_TLV_ID_IE_SET = 0x0305,
};
@@ -1047,22 +1128,43 @@ struct qlink_tlv_channel {
u8 rsvd[2];
} __packed;
-#define QLINK_MAX_NR_CIPHER_SUITES 5
-#define QLINK_MAX_NR_AKM_SUITES 2
+/**
+ * struct qlink_tlv_chandef - data for QTN_TLV_ID_CHANDEF TLV
+ *
+ * Channel definition.
+ *
+ * @chan: channel definition data.
+ */
+struct qlink_tlv_chandef {
+ struct qlink_tlv_hdr hdr;
+ struct qlink_chandef chan;
+} __packed;
-struct qlink_auth_encr {
- __le32 wpa_versions;
- __le32 cipher_group;
- __le32 n_ciphers_pairwise;
- __le32 ciphers_pairwise[QLINK_MAX_NR_CIPHER_SUITES];
- __le32 n_akm_suites;
- __le32 akm_suites[QLINK_MAX_NR_AKM_SUITES];
- __le16 control_port_ethertype;
- u8 auth_type;
- u8 privacy;
- u8 mfp;
- u8 control_port;
- u8 control_port_no_encrypt;
+enum qlink_ie_set_type {
+ QLINK_IE_SET_UNKNOWN,
+ QLINK_IE_SET_ASSOC_REQ,
+ QLINK_IE_SET_ASSOC_RESP,
+ QLINK_IE_SET_PROBE_REQ,
+ QLINK_IE_SET_SCAN,
+ QLINK_IE_SET_BEACON_HEAD,
+ QLINK_IE_SET_BEACON_TAIL,
+ QLINK_IE_SET_BEACON_IES,
+ QLINK_IE_SET_PROBE_RESP,
+ QLINK_IE_SET_PROBE_RESP_IES,
+};
+
+/**
+ * struct qlink_tlv_ie_set - data for QTN_TLV_ID_IE_SET
+ *
+ * @type: type of MGMT frame IEs belong to, one of &enum qlink_ie_set_type.
+ * @flags: for future use.
+ * @ie_data: IEs data.
+ */
+struct qlink_tlv_ie_set {
+ struct qlink_tlv_hdr hdr;
+ u8 type;
+ u8 flags;
+ u8 ie_data[0];
} __packed;
struct qlink_chan_stats {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index cf024c995fd6..61d999affb09 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -49,29 +49,126 @@ u8 qlink_chan_width_mask_to_nl(u16 qlink_mask)
{
u8 result = 0;
- if (qlink_mask & QLINK_CHAN_WIDTH_5)
+ if (qlink_mask & BIT(QLINK_CHAN_WIDTH_5))
result |= BIT(NL80211_CHAN_WIDTH_5);
- if (qlink_mask & QLINK_CHAN_WIDTH_10)
+ if (qlink_mask & BIT(QLINK_CHAN_WIDTH_10))
result |= BIT(NL80211_CHAN_WIDTH_10);
- if (qlink_mask & QLINK_CHAN_WIDTH_20_NOHT)
+ if (qlink_mask & BIT(QLINK_CHAN_WIDTH_20_NOHT))
result |= BIT(NL80211_CHAN_WIDTH_20_NOHT);
- if (qlink_mask & QLINK_CHAN_WIDTH_20)
+ if (qlink_mask & BIT(QLINK_CHAN_WIDTH_20))
result |= BIT(NL80211_CHAN_WIDTH_20);
- if (qlink_mask & QLINK_CHAN_WIDTH_40)
+ if (qlink_mask & BIT(QLINK_CHAN_WIDTH_40))
result |= BIT(NL80211_CHAN_WIDTH_40);
- if (qlink_mask & QLINK_CHAN_WIDTH_80)
+ if (qlink_mask & BIT(QLINK_CHAN_WIDTH_80))
result |= BIT(NL80211_CHAN_WIDTH_80);
- if (qlink_mask & QLINK_CHAN_WIDTH_80P80)
+ if (qlink_mask & BIT(QLINK_CHAN_WIDTH_80P80))
result |= BIT(NL80211_CHAN_WIDTH_80P80);
- if (qlink_mask & QLINK_CHAN_WIDTH_160)
+ if (qlink_mask & BIT(QLINK_CHAN_WIDTH_160))
result |= BIT(NL80211_CHAN_WIDTH_160);
return result;
}
+
+static enum nl80211_chan_width qlink_chanwidth_to_nl(u8 qlw)
+{
+ switch (qlw) {
+ case QLINK_CHAN_WIDTH_20_NOHT:
+ return NL80211_CHAN_WIDTH_20_NOHT;
+ case QLINK_CHAN_WIDTH_20:
+ return NL80211_CHAN_WIDTH_20;
+ case QLINK_CHAN_WIDTH_40:
+ return NL80211_CHAN_WIDTH_40;
+ case QLINK_CHAN_WIDTH_80:
+ return NL80211_CHAN_WIDTH_80;
+ case QLINK_CHAN_WIDTH_80P80:
+ return NL80211_CHAN_WIDTH_80P80;
+ case QLINK_CHAN_WIDTH_160:
+ return NL80211_CHAN_WIDTH_160;
+ case QLINK_CHAN_WIDTH_5:
+ return NL80211_CHAN_WIDTH_5;
+ case QLINK_CHAN_WIDTH_10:
+ return NL80211_CHAN_WIDTH_10;
+ default:
+ return -1;
+ }
+}
+
+void qlink_chandef_q2cfg(struct wiphy *wiphy,
+ const struct qlink_chandef *qch,
+ struct cfg80211_chan_def *chdef)
+{
+ chdef->center_freq1 = le16_to_cpu(qch->center_freq1);
+ chdef->center_freq2 = le16_to_cpu(qch->center_freq2);
+ chdef->width = qlink_chanwidth_to_nl(qch->width);
+
+ switch (chdef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ chdef->chan = ieee80211_get_channel(wiphy, chdef->center_freq1);
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ chdef->chan = ieee80211_get_channel(wiphy,
+ chdef->center_freq1 - 10);
+ break;
+ default:
+ chdef->chan = NULL;
+ break;
+ }
+}
+
+static u8 qlink_chanwidth_nl_to_qlink(enum nl80211_chan_width nlwidth)
+{
+ switch (nlwidth) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ return QLINK_CHAN_WIDTH_20_NOHT;
+ case NL80211_CHAN_WIDTH_20:
+ return QLINK_CHAN_WIDTH_20;
+ case NL80211_CHAN_WIDTH_40:
+ return QLINK_CHAN_WIDTH_40;
+ case NL80211_CHAN_WIDTH_80:
+ return QLINK_CHAN_WIDTH_80;
+ case NL80211_CHAN_WIDTH_80P80:
+ return QLINK_CHAN_WIDTH_80P80;
+ case NL80211_CHAN_WIDTH_160:
+ return QLINK_CHAN_WIDTH_160;
+ case NL80211_CHAN_WIDTH_5:
+ return QLINK_CHAN_WIDTH_5;
+ case NL80211_CHAN_WIDTH_10:
+ return QLINK_CHAN_WIDTH_10;
+ default:
+ return -1;
+ }
+}
+
+void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
+ struct qlink_chandef *qch)
+{
+ qch->center_freq1 = cpu_to_le16(chdef->center_freq1);
+ qch->center_freq2 = cpu_to_le16(chdef->center_freq2);
+ qch->width = qlink_chanwidth_nl_to_qlink(chdef->width);
+}
+
+enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val)
+{
+ switch (nl_val) {
+ case NL80211_HIDDEN_SSID_ZERO_LEN:
+ return QLINK_HIDDEN_SSID_ZERO_LEN;
+ case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+ return QLINK_HIDDEN_SSID_ZERO_CONTENTS;
+ case NL80211_HIDDEN_SSID_NOT_IN_USE:
+ default:
+ return QLINK_HIDDEN_SSID_NOT_IN_USE;
+ }
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index de06c1e20b5b..260383d6d109 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
+#include <net/cfg80211.h>
#include "qlink.h"
@@ -62,5 +63,11 @@ static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb,
u16 qlink_iface_type_to_nl_mask(u16 qlink_type);
u8 qlink_chan_width_mask_to_nl(u16 qlink_mask);
+void qlink_chandef_q2cfg(struct wiphy *wiphy,
+ const struct qlink_chandef *qch,
+ struct cfg80211_chan_def *chdef);
+void qlink_chandef_cfg2q(const struct cfg80211_chan_def *chdef,
+ struct qlink_chandef *qch);
+enum qlink_hidden_ssid qlink_hidden_ssid_nl2q(enum nl80211_hidden_ssid nl_val);
#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/Makefile b/drivers/net/wireless/ralink/rt2x00/Makefile
index 24a66015a495..de030ebcdf6e 100644
--- a/drivers/net/wireless/ralink/rt2x00/Makefile
+++ b/drivers/net/wireless/ralink/rt2x00/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rt2x00lib-y += rt2x00dev.o
rt2x00lib-y += rt2x00mac.o
rt2x00lib-y += rt2x00config.o
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index 51520a0e2138..f4fdad2ed319 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -164,13 +164,13 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct sk_buff *skbcopy;
struct rt2x00dump_hdr *dump_hdr;
- struct timeval timestamp;
+ struct timespec64 timestamp;
u32 data_len;
if (likely(!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)))
return;
- do_gettimeofday(&timestamp);
+ ktime_get_ts64(&timestamp);
if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) {
rt2x00_dbg(rt2x00dev, "txrx dump queue length exceeded\n");
@@ -200,7 +200,8 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
dump_hdr->queue_index = entry->queue->qid;
dump_hdr->entry_index = entry->entry_idx;
dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
- dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);
+ dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_nsec /
+ NSEC_PER_USEC);
if (!(skbdesc->flags & SKBDESC_DESC_IN_SKB))
skb_put_data(skbcopy, skbdesc->desc, skbdesc->desc_len);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dump.h b/drivers/net/wireless/ralink/rt2x00/rt2x00dump.h
index 4c0e01b5d515..3b14eef0b646 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dump.h
@@ -106,7 +106,7 @@ enum rt2x00_dump_type {
*/
struct rt2x00dump_hdr {
__le32 version;
-#define DUMP_HEADER_VERSION 2
+#define DUMP_HEADER_VERSION 3
__le32 header_length;
__le32 desc_length;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index e2f4f5778267..086aad22743d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -57,7 +57,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
if (status >= 0)
return 0;
- if (status == -ENODEV) {
+ if (status == -ENODEV || status == -ENOENT) {
/* Device has disappeared. */
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
break;
@@ -321,7 +321,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- if (status == -ENODEV)
+ if (status == -ENODEV || status == -ENOENT)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
rt2x00lib_dmadone(entry);
@@ -410,7 +410,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- if (status == -ENODEV)
+ if (status == -ENODEV || status == -ENOENT)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
rt2x00lib_dmadone(entry);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 170cd504e8ff..0133fcd4601b 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -92,7 +92,7 @@ static const struct iw_handler_def ray_handler_def;
/***** Prototypes for raylink functions **************************************/
static void authenticate(ray_dev_t *local);
static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type);
-static void authenticate_timeout(u_long);
+static void authenticate_timeout(struct timer_list *t);
static int get_free_ccs(ray_dev_t *local);
static int get_free_tx_ccs(ray_dev_t *local);
static void init_startup_params(ray_dev_t *local);
@@ -102,7 +102,7 @@ static int ray_init(struct net_device *dev);
static int interrupt_ecf(ray_dev_t *local, int ccs);
static void ray_reset(struct net_device *dev);
static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len);
-static void verify_dl_startup(u_long);
+static void verify_dl_startup(struct timer_list *t);
/* Prototypes for interrpt time functions **********************************/
static irqreturn_t ray_interrupt(int reg, void *dev_id);
@@ -120,9 +120,8 @@ static void associate(ray_dev_t *local);
/* Card command functions */
static int dl_startup_params(struct net_device *dev);
-static void join_net(u_long local);
-static void start_net(u_long local);
-/* void start_net(ray_dev_t *local); */
+static void join_net(struct timer_list *t);
+static void start_net(struct timer_list *t);
/*===========================================================================*/
/* Parameters that can be set with 'insmod' */
@@ -323,7 +322,7 @@ static int ray_probe(struct pcmcia_device *p_dev)
dev_dbg(&p_dev->dev, "ray_cs ray_attach calling ether_setup.)\n");
netif_stop_queue(dev);
- init_timer(&local->timer);
+ timer_setup(&local->timer, NULL, 0);
this_device = p_dev;
return ray_config(p_dev);
@@ -570,7 +569,6 @@ static int dl_startup_params(struct net_device *dev)
local->card_status = CARD_DL_PARAM;
/* Start kernel timer to wait for dl startup to complete. */
local->timer.expires = jiffies + HZ / 2;
- local->timer.data = (long)local;
local->timer.function = verify_dl_startup;
add_timer(&local->timer);
dev_dbg(&link->dev,
@@ -641,9 +639,9 @@ static void init_startup_params(ray_dev_t *local)
} /* init_startup_params */
/*===========================================================================*/
-static void verify_dl_startup(u_long data)
+static void verify_dl_startup(struct timer_list *t)
{
- ray_dev_t *local = (ray_dev_t *) data;
+ ray_dev_t *local = from_timer(local, t, timer);
struct ccs __iomem *pccs = ccs_base(local) + local->dl_param_ccs;
UCHAR status;
struct pcmcia_device *link = local->finder;
@@ -676,16 +674,16 @@ static void verify_dl_startup(u_long data)
return;
}
if (local->sparm.b4.a_network_type == ADHOC)
- start_net((u_long) local);
+ start_net(&local->timer);
else
- join_net((u_long) local);
+ join_net(&local->timer);
} /* end verify_dl_startup */
/*===========================================================================*/
/* Command card to start a network */
-static void start_net(u_long data)
+static void start_net(struct timer_list *t)
{
- ray_dev_t *local = (ray_dev_t *) data;
+ ray_dev_t *local = from_timer(local, t, timer);
struct ccs __iomem *pccs;
int ccsindex;
struct pcmcia_device *link = local->finder;
@@ -710,9 +708,9 @@ static void start_net(u_long data)
/*===========================================================================*/
/* Command card to join a network */
-static void join_net(u_long data)
+static void join_net(struct timer_list *t)
{
- ray_dev_t *local = (ray_dev_t *) data;
+ ray_dev_t *local = from_timer(local, t, timer);
struct ccs __iomem *pccs;
int ccsindex;
@@ -1639,13 +1637,13 @@ static int get_free_ccs(ray_dev_t *local)
} /* get_free_ccs */
/*===========================================================================*/
-static void authenticate_timeout(u_long data)
+static void authenticate_timeout(struct timer_list *t)
{
- ray_dev_t *local = (ray_dev_t *) data;
+ ray_dev_t *local = from_timer(local, t, timer);
del_timer(&local->timer);
printk(KERN_INFO "ray_cs Authentication with access point failed"
" - timeout\n");
- join_net((u_long) local);
+ join_net(&local->timer);
}
/*===========================================================================*/
@@ -1945,7 +1943,6 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
del_timer(&local->timer);
local->timer.expires = jiffies + HZ * 5;
- local->timer.data = (long)local;
if (status == CCS_START_NETWORK) {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" start failed\n",
@@ -1967,7 +1964,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
} else {
dev_dbg(&link->dev, "ray_cs association failed,\n");
local->card_status = CARD_ASSOC_FAILED;
- join_net((u_long) local);
+ join_net(&local->timer);
}
break;
case CCS_TX_REQUEST:
@@ -2425,7 +2422,6 @@ static void authenticate(ray_dev_t *local)
local->timer.function = authenticate_timeout;
}
local->timer.expires = jiffies + HZ * 2;
- local->timer.data = (long)local;
add_timer(&local->timer);
local->authentication_state = AWAITING_RESPONSE;
} /* end authenticate */
@@ -2468,7 +2464,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
} else {
pr_debug("Authentication refused\n");
local->card_status = CARD_AUTH_REFUSED;
- join_net((u_long) local);
+ join_net(&local->timer);
local->authentication_state =
UNAUTHENTICATED;
}
@@ -2506,7 +2502,6 @@ static void associate(ray_dev_t *local)
del_timer(&local->timer);
local->timer.expires = jiffies + HZ * 2;
- local->timer.data = (long)local;
local->timer.function = join_net;
add_timer(&local->timer);
local->card_status = CARD_ASSOC_FAILED;
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
index 524c2f02dd82..0609d8625019 100644
--- a/drivers/net/wireless/ray_cs.h
+++ b/drivers/net/wireless/ray_cs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Raytheon wireless LAN PCMCIA card driver for Linux
A PCMCIA client driver for the Raylink wireless network card
Written by Corey Thomas
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
index b21ed64e15df..668444f6bf07 100644
--- a/drivers/net/wireless/rayctl.h
+++ b/drivers/net/wireless/rayctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RAYCTL_H_
#define _RAYCTL_H_
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
index e8243a44d6b6..7948a2da195a 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef RTL8180_H
#define RTL8180_H
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h
index 310013a2d726..de7727b74631 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef RTL8180_RTL8225_H
#define RTL8180_RTL8225_H
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h
index e12575e96d11..c493e59eede9 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef RTL8187_RFKILL_H
#define RTL8187_RFKILL_H
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 80fee699f58a..38b2ba1ac6f8 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -614,7 +614,10 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
- dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
+ if (memchr_inv(efuse->serial, 0xff, 11))
+ dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
+ else
+ dev_info(&priv->udev->dev, "Serial not available.\n");
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
unsigned char *raw = priv->efuse_wifi.raw;
diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
index 84c2e826fa1d..09c30e428375 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RTLWIFI) += rtlwifi.o
rtlwifi-objs := \
base.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index ea18aa7afecb..cad2272ae21b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -249,8 +249,6 @@ static void _rtl_init_hw_vht_capab(struct ieee80211_hw *hw,
vht_cap->vht_supported = true;
vht_cap->cap =
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_TXSTBC |
@@ -283,8 +281,6 @@ static void _rtl_init_hw_vht_capab(struct ieee80211_hw *hw,
vht_cap->vht_supported = true;
vht_cap->cap =
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_TXSTBC |
@@ -461,10 +457,10 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* <1> timer */
- setup_timer(&rtlpriv->works.watchdog_timer,
- rtl_watch_dog_timer_callback, (unsigned long)hw);
- setup_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
- rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
+ timer_setup(&rtlpriv->works.watchdog_timer,
+ rtl_watch_dog_timer_callback, 0);
+ timer_setup(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
+ rtl_easy_concurrent_retrytimer_callback, 0);
/* <2> work queue */
rtlpriv->works.hw = hw;
rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
@@ -835,7 +831,7 @@ static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw,
else if ((tx_mcs_map & 0x000c) >> 2 ==
IEEE80211_VHT_MCS_SUPPORT_0_8)
hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9];
+ rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS8];
else
hw_rate =
rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9];
@@ -847,7 +843,7 @@ static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw,
else if ((tx_mcs_map & 0x0003) ==
IEEE80211_VHT_MCS_SUPPORT_0_8)
hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9];
+ rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS8];
else
hw_rate =
rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9];
@@ -1103,6 +1099,42 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
}
EXPORT_SYMBOL(rtlwifi_rate_mapping);
+static u8 _rtl_get_tx_hw_rate(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_tx_rate *r = &info->status.rates[0];
+ struct ieee80211_rate *txrate;
+ u8 hw_value = 0x0;
+
+ if (r->flags & IEEE80211_TX_RC_MCS) {
+ /* HT MCS0-15 */
+ hw_value = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15] - 15 +
+ r->idx;
+ } else if (r->flags & IEEE80211_TX_RC_VHT_MCS) {
+ /* VHT MCS0-9, NSS */
+ if (ieee80211_rate_get_vht_nss(r) == 2)
+ hw_value = rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9];
+ else
+ hw_value = rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9];
+
+ hw_value = hw_value - 9 + ieee80211_rate_get_vht_mcs(r);
+ } else {
+ /* legacy */
+ txrate = ieee80211_get_tx_rate(hw, info);
+
+ if (txrate)
+ hw_value = txrate->hw_value;
+ }
+
+ /* check 5G band */
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G &&
+ hw_value < rtlpriv->cfg->maps[RTL_RC_OFDM_RATE6M])
+ hw_value = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE6M];
+
+ return hw_value;
+}
+
void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
@@ -1111,12 +1143,10 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
- struct ieee80211_rate *txrate;
+
__le16 fc = rtl_get_fc(skb);
- txrate = ieee80211_get_tx_rate(hw, info);
- if (txrate)
- tcb_desc->hw_rate = txrate->hw_value;
+ tcb_desc->hw_rate = _rtl_get_tx_hw_rate(hw, info);
if (rtl_is_tx_report_skb(hw, skb))
tcb_desc->use_spe_rpt = 1;
@@ -1527,6 +1557,42 @@ void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms)
"Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
}
}
+
+u32 rtl_get_hal_edca_param(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum wireless_mode wirelessmode,
+ struct ieee80211_tx_queue_params *param)
+{
+ u32 reg = 0;
+ u8 sifstime = 10;
+ u8 slottime = 20;
+
+ /* AIFS = AIFSN * slot time + SIFS */
+ switch (wirelessmode) {
+ case WIRELESS_MODE_A:
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ case WIRELESS_MODE_AC_5G:
+ case WIRELESS_MODE_AC_24G:
+ sifstime = 16;
+ slottime = 9;
+ break;
+ case WIRELESS_MODE_G:
+ slottime = (vif->bss_conf.use_short_slot ? 9 : 20);
+ break;
+ default:
+ break;
+ }
+
+ reg |= (param->txop & 0x7FF) << 16;
+ reg |= (fls(param->cw_max) & 0xF) << 12;
+ reg |= (fls(param->cw_min) & 0xF) << 8;
+ reg |= (param->aifs & 0x0F) * slottime + sifstime;
+
+ return reg;
+}
+EXPORT_SYMBOL_GPL(rtl_get_hal_edca_param);
+
/*********************************************************
*
* functions called by core.c
@@ -1552,9 +1618,8 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
"on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
- tid_data->seq_number);
+ *ssn);
- *ssn = tid_data->seq_number;
tid_data->agg.agg_state = RTL_AGG_START;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -1565,7 +1630,6 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_tid_data *tid_data;
struct rtl_sta_info *sta_entry = NULL;
if (sta == NULL)
@@ -1578,7 +1642,6 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -EINVAL;
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- tid_data = &sta_entry->tids[tid];
sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -1613,8 +1676,7 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
tid_data = &sta_entry->tids[tid];
RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
- tid_data->seq_number);
+ "on ra = %pM tid = %d\n", sta->addr, tid);
tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
return 0;
@@ -1932,6 +1994,22 @@ label_lps_done:
rtlpriv->link_info.tx_busy_traffic = tx_busy_traffic;
rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic;
+ rtlpriv->stats.txbytesunicast_inperiod =
+ rtlpriv->stats.txbytesunicast -
+ rtlpriv->stats.txbytesunicast_last;
+ rtlpriv->stats.rxbytesunicast_inperiod =
+ rtlpriv->stats.rxbytesunicast -
+ rtlpriv->stats.rxbytesunicast_last;
+ rtlpriv->stats.txbytesunicast_last = rtlpriv->stats.txbytesunicast;
+ rtlpriv->stats.rxbytesunicast_last = rtlpriv->stats.rxbytesunicast;
+
+ rtlpriv->stats.txbytesunicast_inperiod_tp =
+ (u32)(rtlpriv->stats.txbytesunicast_inperiod * 8 / 2 /
+ 1024 / 1024);
+ rtlpriv->stats.rxbytesunicast_inperiod_tp =
+ (u32)(rtlpriv->stats.rxbytesunicast_inperiod * 8 / 2 /
+ 1024 / 1024);
+
/* <3> DM */
if (!rtlpriv->cfg->mod_params->disable_watchdog)
rtlpriv->cfg->ops->dm_watchdog(hw);
@@ -1975,10 +2053,9 @@ label_lps_done:
rtl_scan_list_expire(hw);
}
-void rtl_watch_dog_timer_callback(unsigned long data)
+void rtl_watch_dog_timer_callback(struct timer_list *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *rtlpriv = from_timer(rtlpriv, t, works.watchdog_timer);
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.watchdog_wq, 0);
@@ -2084,10 +2161,11 @@ void rtl_c2hcmd_wq_callback(void *data)
rtl_c2hcmd_launcher(hw, 1);
}
-void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
+void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *rtlpriv =
+ from_timer(rtlpriv, t, works.dualmac_easyconcurrent_retrytimer);
+ struct ieee80211_hw *hw = rtlpriv->hw;
struct rtl_priv *buddy_priv = rtlpriv->buddy_priv;
if (buddy_priv == NULL)
@@ -2204,7 +2282,7 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry =
(struct rtl_sta_info *) sta->drv_priv;
sta_entry->mimo_ps = smps;
- /* rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); */
+ /* rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0, true); */
info->control.rates[0].idx = 0;
info->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index b56d1b7f5567..26735319b38f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -120,7 +120,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw);
void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
-void rtl_watch_dog_timer_callback(unsigned long data);
+void rtl_watch_dog_timer_callback(struct timer_list *t);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -137,6 +137,10 @@ void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf,
u8 c2h_cmd_len);
bool rtl_check_tx_report_acked(struct ieee80211_hw *hw);
void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms);
+u32 rtl_get_hal_edca_param(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum wireless_mode wirelessmode,
+ struct ieee80211_tx_queue_params *param);
void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -169,7 +173,7 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
-void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
+void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t);
extern struct rtl_global_var rtl_global_var;
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
index 20582df0465c..d15c58737388 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
btcoexist-objs := halbtc8192e2ant.o \
halbtc8723b1ant.o \
halbtc8723b2ant.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index c04425236ce4..5f726f6d3567 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -2260,14 +2260,11 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (iot_peer != BTC_IOT_PEER_CISCO &&
iot_peer != BTC_IOT_PEER_BROADCOM) {
- if (bt_link_info->sco_exist)
- halbtc8723b1ant_limited_rx(btcoexist,
- NORMAL_EXEC, false,
- false, 0x5);
- else
- halbtc8723b1ant_limited_rx(btcoexist,
- NORMAL_EXEC, false,
- false, 0x5);
+ bool sco_exist = bt_link_info->sco_exist;
+
+ halbtc8723b1ant_limited_rx(btcoexist,
+ NORMAL_EXEC, sco_exist,
+ false, 0x5);
} else {
if (bt_link_info->sco_exist) {
halbtc8723b1ant_limited_rx(btcoexist,
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index c53cbf3d52bd..3cb88825473e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -160,7 +160,7 @@ static int rtl_op_start(struct ieee80211_hw *hw)
mutex_lock(&rtlpriv->locks.conf_mutex);
err = rtlpriv->intf_ops->adapter_start(hw);
if (!err)
- rtl_watch_dog_timer_callback((unsigned long)hw);
+ rtl_watch_dog_timer_callback(&rtlpriv->works.watchdog_timer);
mutex_unlock(&rtlpriv->locks.conf_mutex);
return err;
}
@@ -453,7 +453,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
for (i = 0; i < wow->n_patterns; i++) {
memset(&rtl_pattern, 0, sizeof(struct rtl_wow_pattern));
memset(mask, 0, MAX_WOL_BIT_MASK_SIZE);
- if (patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
+ if (patterns[i].pattern_len < 0 ||
+ patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_WARNING,
"Pattern[%d] is too long\n", i);
continue;
@@ -549,15 +550,13 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct timeval ts;
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
if (WARN_ON(!wow))
return -EINVAL;
/* to resolve s4 can not wake up*/
- do_gettimeofday(&ts);
- rtlhal->last_suspend_sec = ts.tv_sec;
+ rtlhal->last_suspend_sec = ktime_get_real_seconds();
if ((ppsc->wo_wlan_mode & WAKE_ON_PATTERN_MATCH) && wow->n_patterns)
_rtl_add_wowlan_patterns(hw, wow);
@@ -576,7 +575,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct timeval ts;
+ time64_t now;
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
rtlhal->driver_is_goingto_unload = false;
@@ -584,8 +583,8 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
rtlhal->wake_from_pnp_sleep = true;
/* to resovle s4 can not wake up*/
- do_gettimeofday(&ts);
- if (ts.tv_sec - rtlhal->last_suspend_sec < 5)
+ now = ktime_get_real_seconds();
+ if (now - rtlhal->last_suspend_sec < 5)
return -1;
rtl_op_start(hw);
@@ -945,7 +944,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
"Add sta addr is %pM\n", sta->addr);
- rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0, true);
}
return 0;
@@ -1151,7 +1150,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (vif->type == NL80211_IFTYPE_STATION)
- rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
+ rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0,
+ true);
rcu_read_unlock();
/* to avoid AP Disassociation caused by inactivity */
@@ -1746,7 +1746,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
u8 faversion, u8 interface_type,
struct wlan_pwr_cfg pwrcfgcmd[])
{
- struct wlan_pwr_cfg cfg_cmd = {0};
+ struct wlan_pwr_cfg cfg_cmd;
bool polling_bit = false;
u32 ary_idx = 0;
u8 value = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 08dc8919ef60..c2575b0b9440 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -54,8 +54,7 @@ static const u8 ac_to_hwq[] = {
BK_QUEUE
};
-static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
__le16 fc = rtl_get_fc(skb);
@@ -104,20 +103,18 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
break;
case 3:
- /*
- * Always enable ASPM and Clock Req
+ /* Always enable ASPM and Clock Req
* from initialization to halt.
- * */
+ */
ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
RT_RF_OFF_LEVL_CLK_REQ);
break;
case 4:
- /*
- * Always enable ASPM without Clock Req
+ /* Always enable ASPM without Clock Req
* from initialization to halt.
- * */
+ */
ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
RT_RF_OFF_LEVL_CLK_REQ);
ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
@@ -146,32 +143,19 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
/*Set HW definition to determine if it supports ASPM. */
switch (rtlpci->const_support_pciaspm) {
- case 0:{
- /*Not support ASPM. */
- bool support_aspm = false;
- ppsc->support_aspm = support_aspm;
- break;
- }
- case 1:{
- /*Support ASPM. */
- bool support_aspm = true;
- bool support_backdoor = true;
- ppsc->support_aspm = support_aspm;
-
- /*if (priv->oem_id == RT_CID_TOSHIBA &&
- !priv->ndis_adapter.amd_l1_patch)
- support_backdoor = false; */
-
- ppsc->support_backdoor = support_backdoor;
-
- break;
- }
+ case 0:
+ /*Not support ASPM. */
+ ppsc->support_aspm = false;
+ break;
+ case 1:
+ /*Support ASPM. */
+ ppsc->support_aspm = true;
+ ppsc->support_backdoor = true;
+ break;
case 2:
/*ASPM value set by chipset. */
- if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
- bool support_aspm = true;
- ppsc->support_aspm = support_aspm;
- }
+ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
+ ppsc->support_aspm = true;
break;
default:
pr_err("switch case %#x not processed\n",
@@ -180,10 +164,11 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
}
/* toshiba aspm issue, toshiba will set aspm selfly
- * so we should not set aspm in driver */
+ * so we should not set aspm in driver
+ */
pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
- init_aspm == 0x43)
+ init_aspm == 0x43)
ppsc->support_aspm = false;
}
@@ -263,8 +248,7 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
udelay(50);
}
-/*
- *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
+/*Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
*power saving We should follow the sequence to enable
*RTL8192SE first then enable Pci Bridge ASPM
*or the system will show bluescreen.
@@ -334,7 +318,7 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
bool status = false;
u8 offset_e0;
- unsigned offset_e4;
+ unsigned int offset_e4;
pci_write_config_byte(rtlpci->pdev, 0xe0, 0xa0);
@@ -369,12 +353,12 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
"tpcipriv->ndis_adapter.funcnumber %x\n",
tpcipriv->ndis_adapter.funcnumber);
- if ((pcipriv->ndis_adapter.busnumber ==
- tpcipriv->ndis_adapter.busnumber) &&
- (pcipriv->ndis_adapter.devnumber ==
- tpcipriv->ndis_adapter.devnumber) &&
- (pcipriv->ndis_adapter.funcnumber !=
- tpcipriv->ndis_adapter.funcnumber)) {
+ if (pcipriv->ndis_adapter.busnumber ==
+ tpcipriv->ndis_adapter.busnumber &&
+ pcipriv->ndis_adapter.devnumber ==
+ tpcipriv->ndis_adapter.devnumber &&
+ pcipriv->ndis_adapter.funcnumber !=
+ tpcipriv->ndis_adapter.funcnumber) {
find_buddy_priv = true;
break;
}
@@ -407,7 +391,7 @@ static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
}
static void rtl_pci_parse_configuration(struct pci_dev *pdev,
- struct ieee80211_hw *hw)
+ struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
@@ -441,7 +425,6 @@ static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
rtl_pci_enable_aspm(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
}
-
}
static void _rtl_pci_io_handler_init(struct device *dev,
@@ -458,11 +441,11 @@ static void _rtl_pci_io_handler_init(struct device *dev,
rtlpriv->io.read8_sync = pci_read8_sync;
rtlpriv->io.read16_sync = pci_read16_sync;
rtlpriv->io.read32_sync = pci_read32_sync;
-
}
static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
- struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid)
+ struct sk_buff *skb,
+ struct rtl_tcb_desc *tcb_desc, u8 tid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -520,13 +503,15 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
(rtlpriv->buddy_priv &&
rtlpriv->buddy_priv->easy_concurrent_ctl.switch_in_process)))
return;
- /* we juse use em for BE/BK/VI/VO */
+ /* we just use em for BE/BK/VI/VO */
for (tid = 7; tid >= 0; tid--) {
u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)];
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+
while (!mac->act_scanning &&
rtlpriv->psc.rfpwr_state == ERFON) {
struct rtl_tcb_desc tcb_desc;
+
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
spin_lock_bh(&rtlpriv->locks.waitq_lock);
@@ -541,7 +526,8 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
spin_unlock_bh(&rtlpriv->locks.waitq_lock);
/* Some macaddr can't do early mode. like
- * multicast/broadcast/no_qos data */
+ * multicast/broadcast/no_qos data
+ */
info = IEEE80211_SKB_CB(skb);
if (info->flags & IEEE80211_TX_CTL_AMPDU)
_rtl_update_earlymode_info(hw, skb,
@@ -552,7 +538,6 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
}
}
-
static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -586,7 +571,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
skb = __skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->
- get_desc((u8 *)entry, true,
+ get_desc(hw, (u8 *)entry, true,
HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
@@ -603,7 +588,6 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
if (prio == TXCMD_QUEUE) {
dev_kfree_skb(skb);
goto tx_status_ok;
-
}
/* for sw LPS, just after NULL skb send out, we can
@@ -643,15 +627,12 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
ieee80211_tx_status_irqsafe(hw, skb);
if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
-
RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
"more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
prio, ring->idx,
skb_queue_len(&ring->queue));
- ieee80211_wake_queue(hw,
- skb_get_queue_mapping
- (skb));
+ ieee80211_wake_queue(hw, skb_get_queue_mapping(skb));
}
tx_status_ok:
skb = NULL;
@@ -659,7 +640,7 @@ tx_status_ok:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2))
+ rtlpriv->link_info.num_rx_inperiod > 2)
rtl_lps_leave(hw);
}
@@ -691,9 +672,10 @@ remap:
return 0;
rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
if (rtlpriv->use_new_trx_flow) {
+ /* skb->cb may be 64 bit address */
rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RX_PREPARE,
- (u8 *)&bufferaddress);
+ (u8 *)(dma_addr_t *)skb->cb);
} else {
rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RXBUFF_ADDR,
@@ -798,7 +780,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
pdesc = &rtlpci->rx_ring[rxring_idx].desc[
rtlpci->rx_ring[rxring_idx].idx];
- own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)pdesc,
+ own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc,
false,
HW_DESC_OWN);
if (own) /* wait data to be filled by hardware */
@@ -816,7 +798,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb))
goto no_new;
- memset(&rx_status , 0 , sizeof(rx_status));
+ memset(&rx_status, 0, sizeof(rx_status));
rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
&rx_status, (u8 *)pdesc, skb);
@@ -825,7 +807,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
(u8 *)buffer_desc,
hw_queue);
- len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false,
+ len = rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, false,
HW_DESC_RXPKT_LEN);
if (skb->end - skb->tail > len) {
@@ -846,12 +828,11 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
/* handle command packet here */
if (rtlpriv->cfg->ops->rx_command_packet &&
rtlpriv->cfg->ops->rx_command_packet(hw, &stats, skb)) {
- dev_kfree_skb_any(skb);
- goto new_trx_end;
+ dev_kfree_skb_any(skb);
+ goto new_trx_end;
}
- /*
- * NOTICE This can not be use for mac80211,
+ /* NOTICE This can not be use for mac80211,
* this is done in mac80211 code,
* if done here sec DHCP will fail
* skb_trim(skb, skb->len - 4);
@@ -888,9 +869,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
/* for sw lps */
rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
rtl_recognize_peer(hw, (void *)skb->data, skb->len);
- if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
- (rtlpriv->rtlhal.current_bandtype ==
- BAND_ON_2_4G) &&
+ if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP &&
+ rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G &&
(ieee80211_is_beacon(fc) ||
ieee80211_is_probe_resp(fc))) {
dev_kfree_skb_any(skb);
@@ -912,7 +892,7 @@ new_trx_end:
}
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2))
+ rtlpriv->link_info.num_rx_inperiod > 2)
rtl_lps_leave(hw);
skb = new_skb;
no_new:
@@ -946,35 +926,34 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
unsigned long flags;
u32 inta = 0;
u32 intb = 0;
+ u32 intc = 0;
+ u32 intd = 0;
irqreturn_t ret = IRQ_HANDLED;
if (rtlpci->irq_enabled == 0)
return ret;
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock , flags);
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
rtlpriv->cfg->ops->disable_interrupt(hw);
/*read ISR: 4/8bytes */
- rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
+ rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb, &intc, &intd);
- /*Shared IRQ or HW disappared */
+ /*Shared IRQ or HW disappeared */
if (!inta || inta == 0xffff)
goto done;
/*<1> beacon related */
- if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"beacon ok interrupt!\n");
- }
- if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
+ if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
"beacon err interrupt!\n");
- }
- if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
+ if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
- }
if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1030,6 +1009,16 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
_rtl_pci_tx_isr(hw, VO_QUEUE);
}
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
+ if (intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
+ rtlpriv->link_info.num_tx_inperiod++;
+
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
+ "H2C TX OK interrupt!\n");
+ _rtl_pci_tx_isr(hw, H2C_QUEUE);
+ }
+ }
+
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
@@ -1122,14 +1111,14 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
if (pskb) {
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->get_desc(
- (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+ hw, (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
pskb->len, PCI_DMA_TODEVICE);
kfree_skb(pskb);
}
/*NB: the beacon data buffer must be 32-bit aligned. */
pskb = ieee80211_beacon_get(hw, mac->vif);
- if (pskb == NULL)
+ if (!pskb)
return;
hdr = rtl_get_hdr(pskb);
info = IEEE80211_SKB_CB(pskb);
@@ -1151,7 +1140,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
&temp_one);
}
- return;
}
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
@@ -1164,14 +1152,15 @@ static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
desc_num = TX_DESC_NUM_92E;
+ else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE)
+ desc_num = TX_DESC_NUM_8822B;
else
desc_num = RT_TXDESC_NUM;
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
rtlpci->txringcount[i] = desc_num;
- /*
- *we just alloc 2 desc for beacon queue,
+ /*we just alloc 2 desc for beacon queue,
*because we just need first desc in hw beacon.
*/
rtlpci->txringcount[BEACON_QUEUE] = 2;
@@ -1188,7 +1177,7 @@ static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
}
static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
- struct pci_dev *pdev)
+ struct pci_dev *pdev)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -1360,7 +1349,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
}
static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
- unsigned int prio)
+ unsigned int prio)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1377,8 +1366,8 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
entry = (u8 *)(&ring->desc[ring->idx]);
pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->
- ops->get_desc((u8 *)entry, true,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true,
HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
@@ -1449,8 +1438,7 @@ static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
}
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
- ret = _rtl_pci_init_tx_ring(hw, i,
- rtlpci->txringcount[i]);
+ ret = _rtl_pci_init_tx_ring(hw, i, rtlpci->txringcount[i]);
if (ret)
goto err_free_rings;
}
@@ -1498,7 +1486,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
/* force the rx_ring[RX_MPDU_QUEUE/
* RX_CMD_QUEUE].idx to the first one
*new trx flow, do nothing
- */
+ */
if (!rtlpriv->use_new_trx_flow &&
rtlpci->rx_ring[rxring_idx].desc) {
struct rtl_rx_desc *entry = NULL;
@@ -1507,9 +1495,9 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].desc[i];
bufferaddress =
- rtlpriv->cfg->ops->get_desc((u8 *)entry,
- false , HW_DESC_RXBUFF_ADDR);
- memset((u8 *)entry , 0 ,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ false, HW_DESC_RXBUFF_ADDR);
+ memset((u8 *)entry, 0,
sizeof(*rtlpci->rx_ring
[rxring_idx].desc));/*clear one entry*/
if (rtlpriv->use_new_trx_flow) {
@@ -1538,8 +1526,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
rtlpci->rx_ring[rxring_idx].idx = 0;
}
- /*
- *after reset, release previous pending packet,
+ /*after reset, release previous pending packet,
*and force the tx idx to the first one
*/
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
@@ -1560,7 +1547,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->
- get_desc((u8 *)
+ get_desc(hw, (u8 *)
entry,
true,
HW_DESC_TXBUFF_ADDR),
@@ -1621,7 +1608,6 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
struct rtl_tcb_desc *ptcb_desc)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_sta_info *sta_entry = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
@@ -1633,9 +1619,6 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
__le16 fc = rtl_get_fc(skb);
u8 *pda_addr = hdr->addr1;
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- /*ssn */
- u8 tid = 0;
- u16 seq_number = 0;
u8 own;
u8 temp_one = 1;
@@ -1644,7 +1627,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if (rtlpriv->psc.sw_ps_enabled) {
if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
- !ieee80211_has_pm(fc))
+ !ieee80211_has_pm(fc))
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
}
@@ -1673,10 +1656,10 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if (rtlpriv->use_new_trx_flow) {
ptx_bd_desc = &ring->buffer_desc[idx];
} else {
- own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc,
+ own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc,
true, HW_DESC_OWN);
- if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
+ if (own == 1 && hw_queue != BEACON_QUEUE) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
hw_queue, ring->idx, idx,
@@ -1690,24 +1673,10 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->get_available_desc &&
rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "get_available_desc fail\n");
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
- flags);
- return skb->len;
- }
-
- if (ieee80211_is_data_qos(fc)) {
- tid = rtl_get_tid(skb);
- if (sta) {
- sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- seq_number = (le16_to_cpu(hdr->seq_ctrl) &
- IEEE80211_SCTL_SEQ) >> 4;
- seq_number += 1;
-
- if (!ieee80211_has_morefrags(hdr->frame_control))
- sta_entry->tids[tid].seq_number = seq_number;
- }
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ "get_available_desc fail\n");
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+ return skb->len;
}
if (ieee80211_is_data(fc))
@@ -1766,7 +1735,7 @@ static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
ring = &pcipriv->dev.tx_ring[queue_id];
queue_len = skb_queue_len(&ring->queue);
if (queue_len == 0 || queue_id == BEACON_QUEUE ||
- queue_id == TXCMD_QUEUE) {
+ queue_id == TXCMD_QUEUE) {
queue_id--;
continue;
} else {
@@ -1776,7 +1745,7 @@ static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
/* we just wait 1s for all queues */
if (rtlpriv->psc.rfpwr_state == ERFOFF ||
- is_hal_stop(rtlhal) || i >= 200)
+ is_hal_stop(rtlhal) || i >= 200)
return;
}
}
@@ -1794,7 +1763,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
flush_workqueue(rtlpriv->works.rtl_wq);
destroy_workqueue(rtlpriv->works.rtl_wq);
-
}
static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
@@ -1852,7 +1820,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
rtlpci->up_first_time = false;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "rtl_pci_start OK\n");
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
return 0;
}
@@ -1863,13 +1831,12 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
unsigned long flags;
- u8 RFInProgressTimeOut = 0;
+ u8 rf_timeout = 0;
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_halt_notify();
- /*
- *should be before disable interrupt&adapter
+ /*should be before disable interrupt&adapter
*and will do it immediately.
*/
set_hal_stop(rtlhal);
@@ -1881,12 +1848,12 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
while (ppsc->rfchange_inprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
- if (RFInProgressTimeOut > 100) {
+ if (rf_timeout > 100) {
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
break;
}
mdelay(1);
- RFInProgressTimeOut++;
+ rf_timeout++;
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
}
ppsc->rfchange_inprogress = true;
@@ -1906,7 +1873,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
}
static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
- struct ieee80211_hw *hw)
+ struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
@@ -1961,13 +1928,12 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
-
}
} else if (deviceid == RTL_PCI_8723AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8723AE PCI-E is found - "
- "vid/did=%x/%x\n", venderid, deviceid);
+ "8723AE PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192CET_DID ||
deviceid == RTL_PCI_8192CE_DID ||
deviceid == RTL_PCI_8191CE_DID ||
@@ -1987,21 +1953,26 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"Find adapter, Hardware type is 8188EE\n");
} else if (deviceid == RTL_PCI_8723BE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
- RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD,
- "Find adapter, Hardware type is 8723BE\n");
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8723BE\n");
} else if (deviceid == RTL_PCI_8192EE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
- RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD,
- "Find adapter, Hardware type is 8192EE\n");
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8192EE\n");
} else if (deviceid == RTL_PCI_8821AE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
- RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD,
- "Find adapter, Hardware type is 8821AE\n");
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8821AE\n");
} else if (deviceid == RTL_PCI_8812AE_DID) {
- rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
- RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD,
- "Find adapter, Hardware type is 8812AE\n");
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8812AE\n");
+ } else if (deviceid == RTL_PCI_8822BE_DID) {
+ rtlhal->hw_type = HARDWARE_TYPE_RTL8822BE;
+ rtlhal->bandset = BAND_ON_BOTH;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8822BE\n");
} else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Err: Unknown device - vid/did=%x/%x\n",
@@ -2029,11 +2000,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
}
}
- /* 92ee use new trx flow */
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
+ switch (rtlhal->hw_type) {
+ case HARDWARE_TYPE_RTL8192EE:
+ case HARDWARE_TYPE_RTL8822BE:
+ /* use new trx flow */
rtlpriv->use_new_trx_flow = true;
- else
+ break;
+
+ default:
rtlpriv->use_new_trx_flow = false;
+ break;
+ }
/*find bus info */
pcipriv->ndis_adapter.busnumber = pdev->bus->number;
@@ -2124,7 +2101,7 @@ static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
rtlpci->using_msi = true;
- RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
+ RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
"MSI Interrupt Mode!\n");
return 0;
}
@@ -2142,7 +2119,7 @@ static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
return ret;
rtlpci->using_msi = false;
- RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
+ RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
"Pin-based Interrupt Mode!\n");
return 0;
}
@@ -2163,8 +2140,23 @@ static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
return ret;
}
+static void platform_enable_dma64(struct pci_dev *pdev, bool dma64)
+{
+ u8 value;
+
+ pci_read_config_byte(pdev, 0x719, &value);
+
+ /* 0x719 Bit5 is DMA64 bit fetch. */
+ if (dma64)
+ value |= BIT(5);
+ else
+ value &= ~BIT(5);
+
+ pci_write_config_byte(pdev, 0x719, value);
+}
+
int rtl_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+ const struct pci_device_id *id)
{
struct ieee80211_hw *hw = NULL;
@@ -2181,13 +2173,25 @@ int rtl_pci_probe(struct pci_dev *pdev,
return err;
}
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (((struct rtl_hal_cfg *)id->driver_data)->mod_params->dma64 &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ WARN_ONCE(true,
+ "Unable to obtain 64bit DMA for consistent allocations\n");
+ err = -ENOMEM;
+ goto fail1;
+ }
+
+ platform_enable_dma64(pdev, true);
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
WARN_ONCE(true,
"rtlwifi: Unable to obtain 32bit DMA for consistent allocations\n");
err = -ENOMEM;
goto fail1;
}
+
+ platform_enable_dma64(pdev, false);
}
pci_set_master(pdev);
@@ -2331,7 +2335,6 @@ fail1:
pci_disable_device(pdev);
return err;
-
}
EXPORT_SYMBOL(rtl_pci_probe);
@@ -2390,20 +2393,20 @@ EXPORT_SYMBOL(rtl_pci_disconnect);
#ifdef CONFIG_PM_SLEEP
/***************************************
-kernel pci power state define:
-PCI_D0 ((pci_power_t __force) 0)
-PCI_D1 ((pci_power_t __force) 1)
-PCI_D2 ((pci_power_t __force) 2)
-PCI_D3hot ((pci_power_t __force) 3)
-PCI_D3cold ((pci_power_t __force) 4)
-PCI_UNKNOWN ((pci_power_t __force) 5)
-
-This function is called when system
-goes into suspend state mac80211 will
-call rtl_mac_stop() from the mac80211
-suspend function first, So there is
-no need to call hw_disable here.
-****************************************/
+ * kernel pci power state define:
+ * PCI_D0 ((pci_power_t __force) 0)
+ * PCI_D1 ((pci_power_t __force) 1)
+ * PCI_D2 ((pci_power_t __force) 2)
+ * PCI_D3hot ((pci_power_t __force) 3)
+ * PCI_D3cold ((pci_power_t __force) 4)
+ * PCI_UNKNOWN ((pci_power_t __force) 5)
+
+ * This function is called when system
+ * goes into suspend state mac80211 will
+ * call rtl_mac_stop() from the mac80211
+ * suspend function first, So there is
+ * no need to call hw_disable here.
+ ****************************************/
int rtl_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index d9039ea10ba4..e7d070e8da2d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -27,10 +27,9 @@
#define __RTL_PCI_H__
#include <linux/pci.h>
-/*
-1: MSDU packet queue,
-2: Rx Command Queue
-*/
+/* 1: MSDU packet queue,
+ * 2: Rx Command Queue
+ */
#define RTL_PCI_RX_MPDU_QUEUE 0
#define RTL_PCI_RX_CMD_QUEUE 1
#define RTL_PCI_MAX_RX_QUEUE 2
@@ -40,6 +39,7 @@
#define RT_TXDESC_NUM 128
#define TX_DESC_NUM_92E 512
+#define TX_DESC_NUM_8822B 512
#define RT_TXDESC_NUM_BE_QUEUE 256
#define BK_QUEUE 0
@@ -51,6 +51,7 @@
#define MGNT_QUEUE 6
#define HIGH_QUEUE 7
#define HCCA_QUEUE 8
+#define H2C_QUEUE TXCMD_QUEUE /* In 8822B */
#define RTL_PCI_DEVICE(vend, dev, cfg) \
.vendor = (vend), \
@@ -108,6 +109,7 @@
#define RTL_PCI_8192EE_DID 0x818B /*8192ee*/
#define RTL_PCI_8821AE_DID 0x8821 /*8821ae*/
#define RTL_PCI_8812AE_DID 0x8812 /*8812ae*/
+#define RTL_PCI_8822BE_DID 0xB822 /*8822be*/
/*8192 support 16 pages of IO registers*/
#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000
@@ -143,13 +145,7 @@ struct rtl_pci_capabilities_header {
* RX wifi info == RX descriptor in old flow
*/
struct rtl_tx_buffer_desc {
-#if (RTL8192EE_SEG_NUM == 2)
- u32 dword[2*(DMA_IS_64BIT + 1)*8]; /*seg = 8*/
-#elif (RTL8192EE_SEG_NUM == 1)
- u32 dword[2*(DMA_IS_64BIT + 1)*4]; /*seg = 4*/
-#elif (RTL8192EE_SEG_NUM == 0)
- u32 dword[2*(DMA_IS_64BIT + 1)*2]; /*seg = 2*/
-#endif
+ u32 dword[4 * (1 << (BUFDESC_SEG_NUM + 1))];
} __packed;
struct rtl_tx_desc {
@@ -157,7 +153,7 @@ struct rtl_tx_desc {
} __packed;
struct rtl_rx_buffer_desc { /*rx buffer desc*/
- u32 dword[2];
+ u32 dword[4];
} __packed;
struct rtl_rx_desc { /*old: rx desc new: rx wifi info*/
@@ -215,7 +211,7 @@ struct rtl_pci {
/*irq */
u8 irq_alloc;
- u32 irq_mask[2];
+ u32 irq_mask[4]; /* 0-1: normal, 2: unused, 3: h2c */
u32 sys_irq_mask;
/*Bcn control register setting */
@@ -229,8 +225,9 @@ struct rtl_pci {
u8 const_hostpci_aspm_setting;
/*pci-e device */
u8 const_devicepci_aspm_setting;
- /*If it supports ASPM, Offset[560h] = 0x40,
- otherwise Offset[560h] = 0x00. */
+ /* If it supports ASPM, Offset[560h] = 0x40,
+ * otherwise Offset[560h] = 0x00.
+ */
bool support_aspm;
bool support_backdoor;
@@ -285,7 +282,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
extern const struct rtl_intf_ops rtl_pci_ops;
int rtl_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
+ const struct pci_device_id *id);
void rtl_pci_disconnect(struct pci_dev *pdev);
#ifdef CONFIG_PM_SLEEP
int rtl_pci_suspend(struct device *dev);
@@ -293,34 +290,34 @@ int rtl_pci_resume(struct device *dev);
#endif /* CONFIG_PM_SLEEP */
static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
{
- return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+ return readb((u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
}
static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
{
- return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+ return readw((u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
}
static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
{
- return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+ return readl((u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
}
static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
{
- writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+ writeb(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
}
static inline void pci_write16_async(struct rtl_priv *rtlpriv,
u32 addr, u16 val)
{
- writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+ writew(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
}
static inline void pci_write32_async(struct rtl_priv *rtlpriv,
u32 addr, u32 val)
{
- writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
+ writel(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
}
static inline u16 calc_fifo_space(u16 rp, u16 wp)
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 07ee3096f50e..24c87fae5382 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -55,7 +55,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->enable_interrupt(hw);
/*<enable timer> */
- rtl_watch_dog_timer_callback((unsigned long)hw);
+ rtl_watch_dog_timer_callback(&rtlpriv->works.watchdog_timer);
return true;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
index dae4f0f19cd3..5ea368e8e64b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8188ee-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index f936a491371b..e05af7d60830 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -1221,7 +1221,8 @@ static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
sta = rtl_find_sta(hw, mac->bssid);
if (sta)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
- p_ra->ratr_state);
+ p_ra->ratr_state,
+ true);
rcu_read_unlock();
p_ra->pre_ratr_state = p_ra->ratr_state;
@@ -1707,9 +1708,11 @@ static void rtl88e_dm_fast_ant_training(struct ieee80211_hw *hw)
}
}
-void rtl88e_dm_fast_antenna_training_callback(unsigned long data)
+void rtl88e_dm_fast_antenna_training_callback(struct timer_list *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct rtl_priv *rtlpriv =
+ from_timer(rtlpriv, t, works.fast_antenna_training_timer);
+ struct ieee80211_hw *hw = rtlpriv->hw;
rtl88e_dm_fast_ant_training(hw);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
index 0fd2bac14db6..50f26a9a97db 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
@@ -270,7 +270,7 @@ void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
void rtl88e_dm_ant_sel_statistics(struct ieee80211_hw *hw,
u8 antsel_tr_mux, u32 mac_id,
u32 rx_pwdb_all);
-void rtl88e_dm_fast_antenna_training_callback(unsigned long data);
+void rtl88e_dm_fast_antenna_training_callback(struct timer_list *t);
void rtl88e_dm_init(struct ieee80211_hw *hw);
void rtl88e_dm_watchdog(struct ieee80211_hw *hw);
void rtl88e_dm_write_dig(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 0ba26d27d11c..e30a18e64ff5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -99,6 +99,7 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->get_desc(
+ hw,
(u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
@@ -252,9 +253,12 @@ static void _rtl88ee_set_fw_ps_rf_off_low_power(struct ieee80211_hw *hw)
rpwm_val |= FW_PS_STATE_RF_OFF_LOW_PWR_88E;
_rtl88ee_set_fw_clock_off(hw, rpwm_val);
}
-void rtl88ee_fw_clk_off_timer_callback(unsigned long data)
+
+void rtl88ee_fw_clk_off_timer_callback(struct timer_list *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct rtl_priv *rtlpriv = from_timer(rtlpriv, t,
+ works.fw_clockoff_timer);
+ struct ieee80211_hw *hw = rtlpriv->hw;
_rtl88ee_set_fw_ps_rf_off_low_power(hw);
}
@@ -1468,7 +1472,8 @@ void rtl88ee_card_disable(struct ieee80211_hw *hw)
}
void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb)
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -2076,7 +2081,7 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw,
}
static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -2207,12 +2212,12 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
}
void rtl88ee_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl88ee_update_hal_rate_mask(hw, sta, rssi_level);
+ rtl88ee_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
rtl88ee_update_hal_rate_table(hw, sta);
}
@@ -2235,7 +2240,7 @@ bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ enum rf_pwrstate e_rfpowerstate_toset;
u32 u4tmp;
bool b_actuallyset = false;
@@ -2254,8 +2259,6 @@ bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
- cur_rfstate = ppsc->rfpwr_state;
-
u4tmp = rtl_read_dword(rtlpriv, REG_GPIO_OUTPUT);
e_rfpowerstate_toset = (u4tmp & BIT(31)) ? ERFON : ERFOFF;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
index d38dbca3c19e..cdf49de1e6ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
@@ -29,7 +29,8 @@
void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw);
void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb);
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd);
int rtl88ee_hw_init(struct ieee80211_hw *hw);
void rtl88ee_card_disable(struct ieee80211_hw *hw);
void rtl88ee_enable_interrupt(struct ieee80211_hw *hw);
@@ -43,7 +44,8 @@ void rtl88ee_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr);
void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl88ee_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level);
+ struct ieee80211_sta *sta, u8 rssi_level,
+ bool update_bw);
void rtl88ee_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw);
@@ -57,6 +59,6 @@ void rtl8188ee_bt_reg_init(struct ieee80211_hw *hw);
void rtl8188ee_bt_hw_init(struct ieee80211_hw *hw);
void rtl88ee_suspend(struct ieee80211_hw *hw);
void rtl88ee_resume(struct ieee80211_hw *hw);
-void rtl88ee_fw_clk_off_timer_callback(unsigned long data);
+void rtl88ee_fw_clk_off_timer_callback(struct timer_list *t);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 57e5d5c1d24b..82681b96ef93 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -41,6 +41,7 @@
static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*close ASPM for AMD defaultly */
@@ -77,7 +78,7 @@ static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
* 1 - Support ASPM,
* 2 - According to chipset.
*/
- rtlpci->const_support_pciaspm = 1;
+ rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
@@ -189,16 +190,12 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
/*low power */
rtlpriv->psc.low_power_enable = false;
if (rtlpriv->psc.low_power_enable) {
- init_timer(&rtlpriv->works.fw_clockoff_timer);
- setup_timer(&rtlpriv->works.fw_clockoff_timer,
- rtl88ee_fw_clk_off_timer_callback,
- (unsigned long)hw);
+ timer_setup(&rtlpriv->works.fw_clockoff_timer,
+ rtl88ee_fw_clk_off_timer_callback, 0);
}
- init_timer(&rtlpriv->works.fast_antenna_training_timer);
- setup_timer(&rtlpriv->works.fast_antenna_training_timer,
- rtl88e_dm_fast_antenna_training_callback,
- (unsigned long)hw);
+ timer_setup(&rtlpriv->works.fast_antenna_training_timer,
+ rtl88e_dm_fast_antenna_training_callback, 0);
return err;
}
@@ -276,6 +273,7 @@ static struct rtl_mod_params rtl88ee_mod_params = {
.swctrl_lps = false,
.fwctrl_lps = false,
.msi_support = true,
+ .aspm_support = 1,
.debug_level = 0,
.debug_mask = 0,
};
@@ -399,6 +397,7 @@ module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
module_param_named(msi, rtl88ee_mod_params.msi_support, bool, 0444);
+module_param_named(aspm, rtl88ee_mod_params.aspm_support, int, 0444);
module_param_named(disable_watchdog, rtl88ee_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
@@ -406,6 +405,7 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
+MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index dd3e12b74447..9670732b2bc6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -786,7 +786,8 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
}
}
-u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+u64 rtl88ee_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name)
{
u32 ret = 0;
@@ -828,7 +829,7 @@ bool rtl88ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
u8 *entry = (u8 *)(&ring->desc[ring->idx]);
- u8 own = (u8)rtl88ee_get_desc(entry, true, HW_DESC_OWN);
+ u8 own = (u8)rtl88ee_get_desc(hw, entry, true, HW_DESC_OWN);
/*beacon packet will only use the first
*descriptor defautly,and the own may not
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
index 9a1c2087adee..f902d6769aa8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
@@ -782,7 +782,8 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
u8 *pdesc, struct sk_buff *skb);
void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
bool istx, u8 desc_name, u8 *val);
-u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+u64 rtl88ee_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name);
bool rtl88ee_is_tx_desc_closed(struct ieee80211_hw *hw,
u8 hw_queue, u16 index);
void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
index 0546b7556259..40f075527fc8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8192c-common-objs := \
main.o \
dm_common.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
index 577c7adbc322..bc307ccc5e83 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8192ce-objs := \
dm.o \
hw.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 9956026bae0a..0f4c86a28716 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1375,7 +1375,8 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw)
}
void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb)
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1865,7 +1866,7 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
}
static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -1995,12 +1996,12 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
}
void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl92ce_update_hal_rate_mask(hw, sta, rssi_level);
+ rtl92ce_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
rtl92ce_update_hal_rate_table(hw, sta);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
index 877f138a0cb9..b5c8e2fc1ba2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
@@ -42,7 +42,8 @@ static inline u8 rtl92c_get_chnl_group(u8 chnl)
void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb);
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd);
int rtl92ce_hw_init(struct ieee80211_hw *hw);
void rtl92ce_card_disable(struct ieee80211_hw *hw);
void rtl92ce_enable_interrupt(struct ieee80211_hw *hw);
@@ -56,9 +57,8 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr);
void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level);
-void rtl92ce_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level);
+ struct ieee80211_sta *sta, u8 rssi_level,
+ bool update_bw);
void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 38f85bfdf0c7..71a6761d3648 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -44,6 +44,7 @@
static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*close ASPM for AMD defaultly */
@@ -83,7 +84,7 @@ static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
* 1 - Support ASPM,
* 2 - According to chipset.
*/
- rtlpci->const_support_pciaspm = 1;
+ rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
@@ -252,6 +253,7 @@ static struct rtl_mod_params rtl92ce_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
+ .aspm_support = 1,
.debug_level = 0,
.debug_mask = 0,
};
@@ -375,10 +377,12 @@ module_param_named(debug_mask, rtl92ce_mod_params.debug_mask, ullong, 0644);
module_param_named(ips, rtl92ce_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92ce_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92ce_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(aspm, rtl92ce_mod_params.aspm_support, int, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 94a4b39437cd..d36e0060cc7a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -697,7 +697,8 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
}
-u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
+u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc,
+ bool istx, u8 desc_name)
{
u32 ret = 0;
@@ -740,7 +741,7 @@ bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
u8 *entry = (u8 *)(&ring->desc[ring->idx]);
- u8 own = (u8)rtl92ce_get_desc(entry, true, HW_DESC_OWN);
+ u8 own = (u8)rtl92ce_get_desc(hw, entry, true, HW_DESC_OWN);
/*beacon packet will only use the first
*descriptor defautly,and the own may not
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
index 66291fc341e7..91f0bd6b752f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
@@ -718,7 +718,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
u8 *pdesc, struct sk_buff *skb);
void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
-u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc,
+ bool istx, u8 desc_name);
bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw,
u8 hw_queue, u16 index);
void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
index 97437dadc287..8b3921fd2cb6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8192cu-objs := \
dm.o \
hw.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 530e80f0ef0b..1e60f70481f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -2006,7 +2006,7 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level)
+ u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -2153,12 +2153,12 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level)
+ u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl92cu_update_hal_rate_mask(hw, sta, rssi_level);
+ rtl92cu_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
rtl92cu_update_hal_rate_table(hw, sta);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
index 932f056f7ef8..ebd168400d45 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
@@ -104,6 +104,6 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level);
+ u8 rssi_level, bool update_bw);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
index d0703f20d30c..6482d823a57a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8192de-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index f4129cf96e7c..85cedd083d2b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -490,7 +490,7 @@ static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pdesc = &ring->desc[idx];
/* discard output from call below */
- rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN);
rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
__skb_queue_tail(&ring->queue, skb);
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index cf28d25c551f..0da6c0136857 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1356,7 +1356,8 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
}
void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb)
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1897,7 +1898,7 @@ static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
}
static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -2033,12 +2034,12 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
}
void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl92de_update_hal_rate_mask(hw, sta, rssi_level);
+ rtl92de_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
rtl92de_update_hal_rate_table(hw, sta);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
index 24b03b9999be..9236aa91273d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
@@ -29,7 +29,8 @@
void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb);
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd);
int rtl92de_hw_init(struct ieee80211_hw *hw);
void rtl92de_card_disable(struct ieee80211_hw *hw);
void rtl92de_enable_interrupt(struct ieee80211_hw *hw);
@@ -43,7 +44,8 @@ void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr);
void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level);
+ struct ieee80211_sta *sta, u8 rssi_level,
+ bool update_bw);
void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index a6549f5f6c59..d5ba2bace79b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -40,6 +40,7 @@
static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*close ASPM for AMD defaultly */
@@ -79,7 +80,7 @@ static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw)
* 1 - Support ASPM,
* 2 - According to chipset.
*/
- rtlpci->const_support_pciaspm = 1;
+ rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
@@ -254,6 +255,7 @@ static struct rtl_mod_params rtl92de_mod_params = {
.inactiveps = true,
.swctrl_lps = true,
.fwctrl_lps = false,
+ .aspm_support = 1,
.debug_level = 0,
.debug_mask = 0,
};
@@ -369,11 +371,13 @@ module_param_named(debug_level, rtl92de_mod_params.debug_level, int, 0644);
module_param_named(ips, rtl92de_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(aspm, rtl92de_mod_params.aspm_support, int, 0444);
module_param_named(debug_mask, rtl92de_mod_params.debug_mask, ullong, 0644);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 86019f654428..d7b023cf7400 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -821,7 +821,8 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
}
-u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
+u64 rtl92de_get_desc(struct ieee80211_hw *hw,
+ u8 *p_desc, bool istx, u8 desc_name)
{
u32 ret = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 9bb6cc648590..f7f776539438 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -735,7 +735,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
u8 *pdesc, struct sk_buff *skb);
void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
-u32 rtl92de_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+u64 rtl92de_get_desc(struct ieee80211_hw *hw,
+ u8 *p_desc, bool istx, u8 desc_name);
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool b_firstseg, bool b_lastseg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
index f254b9f64326..12dfbeb1f3ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8192ee-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index e6b5786c7d4a..faed6e2dedf6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -1039,7 +1039,8 @@ static void rtl92ee_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
sta = rtl_find_sta(hw, mac->bssid);
if (sta)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
- p_ra->ratr_state);
+ p_ra->ratr_state,
+ true);
rcu_read_unlock();
p_ra->pre_ratr_state = p_ra->ratr_state;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 7eae27f8e173..f9563ae301ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -682,7 +682,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct sk_buff *skb = NULL;
-
+ bool rtstatus;
u32 totalpacketlen;
u8 u1rsvdpageloc[5] = { 0 };
bool b_dlok = false;
@@ -768,7 +768,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
skb = dev_alloc_skb(totalpacketlen);
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
- b_dlok = true;
+ rtstatus = rtl_cmd_send_packet(hw, skb);
+ if (rtstatus)
+ b_dlok = true;
if (b_dlok) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index ef9394be7016..fe5da637e77a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -840,6 +840,31 @@ static bool _rtl92ee_init_mac(struct ieee80211_hw *hw)
/* Set TCR register */
rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
+ /* Set TX/RX descriptor physical address -- HI part */
+ if (!rtlpriv->cfg->mod_params->dma64)
+ goto dma64_end;
+
+ rtl_write_dword(rtlpriv, REG_BCNQ_DESA + 4,
+ ((u64)rtlpci->tx_ring[BEACON_QUEUE].buffer_desc_dma) >>
+ 32);
+ rtl_write_dword(rtlpriv, REG_MGQ_DESA + 4,
+ (u64)rtlpci->tx_ring[MGNT_QUEUE].buffer_desc_dma >> 32);
+ rtl_write_dword(rtlpriv, REG_VOQ_DESA + 4,
+ (u64)rtlpci->tx_ring[VO_QUEUE].buffer_desc_dma >> 32);
+ rtl_write_dword(rtlpriv, REG_VIQ_DESA + 4,
+ (u64)rtlpci->tx_ring[VI_QUEUE].buffer_desc_dma >> 32);
+ rtl_write_dword(rtlpriv, REG_BEQ_DESA + 4,
+ (u64)rtlpci->tx_ring[BE_QUEUE].buffer_desc_dma >> 32);
+ rtl_write_dword(rtlpriv, REG_BKQ_DESA + 4,
+ (u64)rtlpci->tx_ring[BK_QUEUE].buffer_desc_dma >> 32);
+ rtl_write_dword(rtlpriv, REG_HQ0_DESA + 4,
+ (u64)rtlpci->tx_ring[HIGH_QUEUE].buffer_desc_dma >> 32);
+
+ rtl_write_dword(rtlpriv, REG_RX_DESA + 4,
+ (u64)rtlpci->rx_ring[RX_MPDU_QUEUE].dma >> 32);
+
+dma64_end:
+
/* Set TX/RX descriptor physical address(from OS API). */
rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
((u64)rtlpci->tx_ring[BEACON_QUEUE].buffer_desc_dma) &
@@ -913,15 +938,9 @@ static bool _rtl92ee_init_mac(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_HI7Q_TXBD_NUM,
TX_DESC_NUM_92E | ((RTL8192EE_SEG_NUM << 12) & 0x3000));
/*Rx*/
-#if (DMA_IS_64BIT == 1)
rtl_write_word(rtlpriv, REG_RX_RXBD_NUM,
RX_DESC_NUM_92E |
((RTL8192EE_SEG_NUM << 13) & 0x6000) | 0x8000);
-#else
- rtl_write_word(rtlpriv, REG_RX_RXBD_NUM,
- RX_DESC_NUM_92E |
- ((RTL8192EE_SEG_NUM << 13) & 0x6000) | 0x0000);
-#endif
rtl_write_dword(rtlpriv, REG_TSFTIMER_HCI, 0XFFFFFFFF);
@@ -1675,7 +1694,8 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw)
}
void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb)
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -2251,7 +2271,7 @@ static u8 _rtl92ee_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index)
static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level)
+ u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
@@ -2370,7 +2390,7 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
(ratr_index << 28);
rate_mask[0] = macid;
rate_mask[1] = ratr_index | (b_shortgi ? 0x80 : 0x00);
- rate_mask[2] = curtxbw_40mhz;
+ rate_mask[2] = curtxbw_40mhz | ((!update_bw) << 3);
rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8);
rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
@@ -2385,12 +2405,13 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
}
void rtl92ee_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level,
+ bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl92ee_update_hal_rate_mask(hw, sta, rssi_level);
+ rtl92ee_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
}
void rtl92ee_update_channel_access_setting(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
index 05413f189685..cd6d3322f033 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
@@ -29,7 +29,8 @@
void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb);
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd);
int rtl92ee_hw_init(struct ieee80211_hw *hw);
void rtl92ee_card_disable(struct ieee80211_hw *hw);
void rtl92ee_enable_interrupt(struct ieee80211_hw *hw);
@@ -43,7 +44,8 @@ void rtl92ee_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr);
void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92ee_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level);
+ struct ieee80211_sta *sta, u8 rssi_level,
+ bool update_bw);
void rtl92ee_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl92ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index a3490080d066..ef92a789871d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -44,6 +44,7 @@
static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*close ASPM for AMD defaultly */
@@ -83,7 +84,7 @@ static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw)
* 1 - Support ASPM,
* 2 - According to chipset.
*/
- rtlpci->const_support_pciaspm = 1;
+ rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
@@ -259,6 +260,8 @@ static struct rtl_mod_params rtl92ee_mod_params = {
.swctrl_lps = false,
.fwctrl_lps = true,
.msi_support = true,
+ .dma64 = false,
+ .aspm_support = 1,
.debug_level = 0,
.debug_mask = 0,
};
@@ -376,6 +379,8 @@ module_param_named(ips, rtl92ee_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92ee_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92ee_mod_params.fwctrl_lps, bool, 0444);
module_param_named(msi, rtl92ee_mod_params.msi_support, bool, 0444);
+module_param_named(dma64, rtl92ee_mod_params.dma64, bool, 0444);
+module_param_named(aspm, rtl92ee_mod_params.aspm_support, int, 0444);
module_param_named(disable_watchdog, rtl92ee_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
@@ -383,6 +388,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
+MODULE_PARM_DESC(dma64, "Set to 1 to use DMA 64 (default 0)\n");
+MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index c58393eab6a1..12255682e890 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -581,13 +581,9 @@ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
u8 i = 0;
u16 real_desc_size = 0x28;
u16 append_early_mode_size = 0;
-#if (RTL8192EE_SEG_NUM == 0)
- u8 segmentnum = 2;
-#elif (RTL8192EE_SEG_NUM == 1)
- u8 segmentnum = 4;
-#elif (RTL8192EE_SEG_NUM == 2)
- u8 segmentnum = 8;
-#endif
+ u8 segmentnum = 1 << (RTL8192EE_SEG_NUM + 1);
+ dma_addr_t desc_dma_addr;
+ bool dma64 = rtlpriv->cfg->mod_params->dma64;
tx_page_size = 2;
current_bd_desc = rtlpci->tx_ring[queue_index].cur_tx_wp;
@@ -609,6 +605,10 @@ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
psblen += 1;
}
+ /* tx desc addr */
+ desc_dma_addr = rtlpci->tx_ring[queue_index].dma +
+ (current_bd_desc * TX_DESC_SIZE);
+
/* Reset */
SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, 0);
SET_TX_BUFF_DESC_PSB(tx_bd_desc, 0);
@@ -618,17 +618,9 @@ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
SET_TXBUFFER_DESC_LEN_WITH_OFFSET(tx_bd_desc, i, 0);
SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(tx_bd_desc, i, 0);
SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(tx_bd_desc, i, 0);
-#if (DMA_IS_64BIT == 1)
- SET_TXBUFFER_DESC_ADD_HIGT_WITH_OFFSET(tx_bd_desc, i, 0);
-#endif
+ SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(tx_bd_desc, i, 0, dma64);
}
- SET_TX_BUFF_DESC_LEN_1(tx_bd_desc, 0);
- SET_TX_BUFF_DESC_AMSDU_1(tx_bd_desc, 0);
- SET_TX_BUFF_DESC_LEN_2(tx_bd_desc, 0);
- SET_TX_BUFF_DESC_AMSDU_2(tx_bd_desc, 0);
- SET_TX_BUFF_DESC_LEN_3(tx_bd_desc, 0);
- SET_TX_BUFF_DESC_AMSDU_3(tx_bd_desc, 0);
/* Clear all status */
CLEAR_PCI_TX_DESC_CONTENT(desc, TX_DESC_SIZE);
@@ -643,14 +635,16 @@ void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
SET_TX_BUFF_DESC_LEN_0(tx_bd_desc, desc_size);
}
SET_TX_BUFF_DESC_PSB(tx_bd_desc, psblen);
- SET_TX_BUFF_DESC_ADDR_LOW_0(tx_bd_desc,
- rtlpci->tx_ring[queue_index].dma +
- (current_bd_desc * TX_DESC_SIZE));
+ SET_TX_BUFF_DESC_ADDR_LOW_0(tx_bd_desc, desc_dma_addr);
+ SET_TX_BUFF_DESC_ADDR_HIGH_0(tx_bd_desc, ((u64)desc_dma_addr >> 32),
+ dma64);
SET_TXBUFFER_DESC_LEN_WITH_OFFSET(tx_bd_desc, 1, pkt_len);
/* don't using extendsion mode. */
SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(tx_bd_desc, 1, 0);
SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(tx_bd_desc, 1, addr);
+ SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(tx_bd_desc, 1,
+ ((u64)addr >> 32), dma64);
SET_TX_DESC_PKT_SIZE(desc, (u16)(pkt_len));
SET_TX_DESC_TX_BUFFER_SIZE(desc, (u16)(pkt_len));
@@ -918,6 +912,7 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
static bool over_run;
u32 tmp = 0;
u8 q_idx = *val;
+ bool dma64 = rtlpriv->cfg->mod_params->dma64;
if (istx) {
switch (desc_name) {
@@ -982,7 +977,12 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
MAX_RECEIVE_BUFFER_SIZE +
RX_DESC_SIZE);
- SET_RX_BUFFER_PHYSICAL_LOW(pdesc, *(u32 *)val);
+ SET_RX_BUFFER_PHYSICAL_LOW(pdesc, (*(dma_addr_t *)val) &
+ DMA_BIT_MASK(32));
+ SET_RX_BUFFER_PHYSICAL_HIGH(pdesc,
+ ((u64)(*(dma_addr_t *)val)
+ >> 32),
+ dma64);
break;
case HW_DESC_RXERO:
SET_RX_DESC_EOR(pdesc, 1);
@@ -996,9 +996,12 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
}
-u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+u64 rtl92ee_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name)
{
- u32 ret = 0;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u64 ret = 0;
+ bool dma64 = rtlpriv->cfg->mod_params->dma64;
if (istx) {
switch (desc_name) {
@@ -1007,6 +1010,8 @@ u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
break;
case HW_DESC_TXBUFF_ADDR:
ret = GET_TXBUFFER_DESC_ADDR_LOW(pdesc, 1);
+ ret |= (u64)GET_TXBUFFER_DESC_ADDR_HIGH(pdesc, 1,
+ dma64) << 32;
break;
default:
WARN_ONCE(true,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
index b0105c529010..48c16fff20c6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
@@ -26,24 +26,6 @@
#ifndef __RTL92E_TRX_H__
#define __RTL92E_TRX_H__
-#if (DMA_IS_64BIT == 1)
-#if (RTL8192EE_SEG_NUM == 2)
-#define TX_BD_DESC_SIZE 128
-#elif (RTL8192EE_SEG_NUM == 1)
-#define TX_BD_DESC_SIZE 64
-#elif (RTL8192EE_SEG_NUM == 0)
-#define TX_BD_DESC_SIZE 32
-#endif
-#else
-#if (RTL8192EE_SEG_NUM == 2)
-#define TX_BD_DESC_SIZE 64
-#elif (RTL8192EE_SEG_NUM == 1)
-#define TX_BD_DESC_SIZE 32
-#elif (RTL8192EE_SEG_NUM == 0)
-#define TX_BD_DESC_SIZE 16
-#endif
-#endif
-
#define TX_DESC_SIZE 64
#define RX_DRV_INFO_SIZE_UNIT 8
@@ -331,111 +313,34 @@
SET_BITS_TO_LE_4BYTE(__pdesc+(__set*16)+8, 0, 32, __val)
/* for Txfilldescroptor92ee, fill the desc content. */
-#if (DMA_IS_64BIT == 1)
-#define SET_TXBUFFER_DESC_LEN_WITH_OFFSET(__pdesc, __offset, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*16), 0, 16, __val)
-#define SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(__pdesc, __offset, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*16), 31, 1, __val)
-#define SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(__pdesc, __offset, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*16)+4, 0, 32, __val)
-#define SET_TXBUFFER_DESC_ADD_HIGT_WITH_OFFSET(__pdesc, __offset, __val)\
- SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*16)+8, 0, 32, __val)
-#define GET_TXBUFFER_DESC_ADDR_LOW(__pdesc, __offset) \
- LE_BITS_TO_4BYTE(__pdesc+(__offset*16)+4, 0, 32)
-#else
-#define SET_TXBUFFER_DESC_LEN_WITH_OFFSET(__pdesc, __offset, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*8), 0, 16, __val)
-#define SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(__pdesc, __offset, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*8), 31, 1, __val)
-#define SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(__pdesc, __offset, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+(__offset*8)+4, 0, 32, __val)
-#define SET_TXBUFFER_DESC_ADD_HIGT_WITH_OFFSET(__pdesc, __offset, __val)
-#define GET_TXBUFFER_DESC_ADDR_LOW(__pdesc, __offset) \
- LE_BITS_TO_4BYTE(__pdesc+(__offset*8)+4, 0, 32)
-#endif
+#define SET_TXBUFFER_DESC_LEN_WITH_OFFSET(__pdesc, __offset, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + ((__offset) * 16), 0, 16, __val)
+#define SET_TXBUFFER_DESC_AMSDU_WITH_OFFSET(__pdesc, __offset, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + ((__offset) * 16), 31, 1, __val)
+#define SET_TXBUFFER_DESC_ADD_LOW_WITH_OFFSET(__pdesc, __offset, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + ((__offset) * 16) + 4, 0, 32, __val)
+#define SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(pbd, off, val, dma64) \
+ (dma64 ? SET_BITS_TO_LE_4BYTE((pbd) + ((off) * 16) + 8, 0, 32, val) : 0)
+#define GET_TXBUFFER_DESC_ADDR_LOW(__pdesc, __offset) \
+ LE_BITS_TO_4BYTE((__pdesc) + ((__offset) * 16) + 4, 0, 32)
+#define GET_TXBUFFER_DESC_ADDR_HIGH(pbd, off, dma64) \
+ (dma64 ? LE_BITS_TO_4BYTE((pbd) + ((off) * 16) + 8, 0, 32) : 0)
/* Dword 0 */
-#define SET_TX_BUFF_DESC_LEN_0(__pdesc, __val) \
+#define SET_TX_BUFF_DESC_LEN_0(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
-#define SET_TX_BUFF_DESC_PSB(__pdesc, __val) \
+#define SET_TX_BUFF_DESC_PSB(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc, 16, 15, __val)
-#define SET_TX_BUFF_DESC_OWN(__pdesc, __val) \
+#define SET_TX_BUFF_DESC_OWN(__pdesc, __val) \
SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
/* Dword 1 */
-#define SET_TX_BUFF_DESC_ADDR_LOW_0(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 32, __val)
-#if (DMA_IS_64BIT == 1)
+#define SET_TX_BUFF_DESC_ADDR_LOW_0(__pdesc, __val) \
+ SET_BITS_TO_LE_4BYTE((__pdesc) + 4, 0, 32, __val)
/* Dword 2 */
-#define SET_TX_BUFF_DESC_ADDR_HIGH_0(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 32, __val)
+#define SET_TX_BUFF_DESC_ADDR_HIGH_0(bdesc, val, dma64) \
+ SET_TXBUFFER_DESC_ADD_HIGH_WITH_OFFSET(bdesc, 0, val, dma64)
/* Dword 3 / RESERVED 0 */
-/* Dword 4 */
-#define SET_TX_BUFF_DESC_LEN_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 16, __val)
-#define SET_TX_BUFF_DESC_AMSDU_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 31, 1, __val)
-/* Dword 5 */
-#define SET_TX_BUFF_DESC_ADDR_LOW_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 32, __val)
-/* Dword 6 */
-#define SET_TX_BUFF_DESC_ADDR_HIGH_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
-/* Dword 7 / RESERVED 0 */
-/* Dword 8 */
-#define SET_TX_BUFF_DESC_LEN_2(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 16, __val)
-#define SET_TX_BUFF_DESC_AMSDU_2(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+32, 31, 1, __val)
-/* Dword 9 */
-#define SET_TX_BUFF_DESC_ADDR_LOW_2(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
-/* Dword 10 */
-#define SET_TX_BUFF_DESC_ADDR_HIGH_2(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
-/* Dword 11 / RESERVED 0 */
-/* Dword 12 */
-#define SET_TX_BUFF_DESC_LEN_3(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 16, __val)
-#define SET_TX_BUFF_DESC_AMSDU_3(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+48, 31, 1, __val)
-/* Dword 13 */
-#define SET_TX_BUFF_DESC_ADDR_LOW_3(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+52, 0, 32, __val)
-/* Dword 14 */
-#define SET_TX_BUFF_DESC_ADDR_HIGH_3(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+56, 0, 32, __val)
-/* Dword 15 / RESERVED 0 */
-#else
-#define SET_TX_BUFF_DESC_ADDR_HIGH_0(__pdesc, __val)
-/* Dword 2 */
-#define SET_TX_BUFF_DESC_LEN_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 16, __val)
-#define SET_TX_BUFF_DESC_AMSDU_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 31, 1, __val)
-/* Dword 3 */
-#define SET_TX_BUFF_DESC_ADDR_LOW_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 32, __val)
-#define SET_TX_BUFF_DESC_ADDR_HIGH_1(__pdesc, __val)
-/* Dword 4 */
-#define SET_TX_BUFF_DESC_LEN_2(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 16, __val)
-#define SET_TX_BUFF_DESC_AMSDU_2(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 31, 1, __val)
-/* Dword 5 */
-#define SET_TX_BUFF_DESC_ADDR_LOW_2(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 32, __val)
-#define SET_TX_BUFF_DESC_ADDR_HIGH_2(__pdesc, __val)
-/* Dword 6 */
-#define SET_TX_BUFF_DESC_LEN_3(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 16, __val)
-#define SET_TX_BUFF_DESC_AMSDU_3(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 31, 1, __val)
-/* Dword 7 */
-#define SET_TX_BUFF_DESC_ADDR_LOW_3(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
-#define SET_TX_BUFF_DESC_ADDR_HIGH_3(__pdesc, __val)
-#endif
/* RX buffer */
@@ -463,8 +368,8 @@
SET_BITS_TO_LE_4BYTE(__status+4, 0, 32, __val)
/* DWORD 2 */
-#define SET_RX_BUFFER_PHYSICAL_HIGH(__status, __val) \
- SET_BITS_TO_LE_4BYTE(__status+8, 0, 32, __val)
+#define SET_RX_BUFFER_PHYSICAL_HIGH(__rx_status_desc, __val, dma64) \
+ (dma64 ? SET_BITS_TO_LE_4BYTE((__rx_status_desc) + 8, 0, 32, __val) : 0)
#define GET_RX_DESC_PKT_LEN(__pdesc) \
LE_BITS_TO_4BYTE(__pdesc, 0, 14)
@@ -850,7 +755,8 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
-u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+u64 rtl92ee_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name);
bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index);
void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
index dfa9dbbe2cdf..e577235f5286 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8192se-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
index 2c073a77b194..44f510a94b09 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
@@ -295,7 +295,8 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
sta = rtl_find_sta(hw, mac->bssid);
if (sta)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
- ra->ratr_state);
+ ra->ratr_state,
+ true);
rcu_read_unlock();
ra->pre_ratr_state = ra->ratr_state;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index ba1bd782238b..76bf089cced4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1559,7 +1559,7 @@ void rtl92se_card_disable(struct ieee80211_hw *hw)
}
void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta,
- u32 *p_intb)
+ u32 *p_intb, u32 *p_intc, u32 *p_intd)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -2129,7 +2129,7 @@ static void rtl92se_update_hal_rate_table(struct ieee80211_hw *hw,
static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level)
+ u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -2288,12 +2288,12 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
}
void rtl92se_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl92se_update_hal_rate_mask(hw, sta, rssi_level);
+ rtl92se_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
rtl92se_update_hal_rate_table(hw, sta);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
index 86bce1be83ce..607056010974 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
@@ -42,7 +42,8 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw,
u8 variable, u8 *val);
void rtl92se_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *inta, u32 *intb);
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd);
int rtl92se_hw_init(struct ieee80211_hw *hw);
void rtl92se_card_disable(struct ieee80211_hw *hw);
void rtl92se_enable_interrupt(struct ieee80211_hw *hw);
@@ -59,7 +60,7 @@ void rtl92se_update_interrupt_mask(struct ieee80211_hw *hw,
void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable,
u8 *val);
void rtl92se_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level);
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw);
void rtl92se_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw,
u8 *valid);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index d7945b9db493..d55554b7fa9a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -41,6 +41,7 @@
static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*close ASPM for AMD defaultly */
@@ -77,7 +78,7 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
* 1 - Support ASPM,
* 2 - According to chipset.
*/
- rtlpci->const_support_pciaspm = 2;
+ rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
@@ -240,7 +241,7 @@ static bool rtl92se_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
u8 *entry = (u8 *)(&ring->desc[ring->idx]);
- u8 own = (u8)rtl92se_get_desc(entry, true, HW_DESC_OWN);
+ u8 own = (u8)rtl92se_get_desc(hw, entry, true, HW_DESC_OWN);
if (own)
return false;
@@ -297,6 +298,7 @@ static struct rtl_mod_params rtl92se_mod_params = {
.inactiveps = true,
.swctrl_lps = true,
.fwctrl_lps = false,
+ .aspm_support = 2,
.debug_level = 0,
.debug_mask = 0,
};
@@ -422,10 +424,12 @@ module_param_named(debug_mask, rtl92se_mod_params.debug_mask, ullong, 0644);
module_param_named(ips, rtl92se_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(aspm, rtl92se_mod_params.aspm_support, int, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index a01dbd31d1b4..e1904c39f147 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -610,7 +610,8 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
}
-u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
+u64 rtl92se_get_desc(struct ieee80211_hw *hw,
+ u8 *desc, bool istx, u8 desc_name)
{
u32 ret = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
index 728589138072..81a5445c04a3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
@@ -38,7 +38,8 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
struct sk_buff *skb);
void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
-u32 rtl92se_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+u64 rtl92se_get_desc(struct ieee80211_hw *hw,
+ u8 *desc, bool istx, u8 desc_name);
void rtl92se_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
index e7607d2cb2ef..d1238707291f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8723ae-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 5ac7b815648a..c3f98d58124c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1340,7 +1340,8 @@ void rtl8723e_card_disable(struct ieee80211_hw *hw)
}
void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb)
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1943,7 +1944,7 @@ static void rtl8723e_update_hal_rate_table(struct ieee80211_hw *hw,
static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level)
+ u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -2074,12 +2075,13 @@ static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
}
void rtl8723e_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level,
+ bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl8723e_update_hal_rate_mask(hw, sta, rssi_level);
+ rtl8723e_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
rtl8723e_update_hal_rate_table(hw, sta);
}
@@ -2103,7 +2105,7 @@ bool rtl8723e_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ enum rf_pwrstate e_rfpowerstate_toset;
u8 u1tmp;
bool b_actuallyset = false;
@@ -2122,8 +2124,6 @@ bool rtl8723e_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
- cur_rfstate = ppsc->rfpwr_state;
-
rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2)&~(BIT(1)));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
index 32c1ace97c3f..19e467a37c72 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
@@ -34,7 +34,8 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw);
void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb);
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd);
int rtl8723e_hw_init(struct ieee80211_hw *hw);
void rtl8723e_card_disable(struct ieee80211_hw *hw);
void rtl8723e_enable_interrupt(struct ieee80211_hw *hw);
@@ -49,7 +50,8 @@ void rtl8723e_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr);
void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8723e_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level);
+ struct ieee80211_sta *sta, u8 rssi_level,
+ bool update_bw);
void rtl8723e_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl8723e_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
void rtl8723e_enable_hw_security_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 97b8bd294aa8..a545ea317323 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -46,6 +46,7 @@
static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*close ASPM for AMD defaultly */
@@ -85,7 +86,7 @@ static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw)
* 1 - Support ASPM,
* 2 - According to chipset.
*/
- rtlpci->const_support_pciaspm = 1;
+ rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
@@ -268,6 +269,7 @@ static struct rtl_mod_params rtl8723e_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
+ .aspm_support = 1,
.debug_level = 0,
.debug_mask = 0,
.msi_support = false,
@@ -389,6 +391,7 @@ module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444);
+module_param_named(aspm, rtl8723e_mod_params.aspm_support, int, 0444);
module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
@@ -396,6 +399,7 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
+MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index f713c7249fed..23485602a9a1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -643,7 +643,8 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
}
}
-u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+u64 rtl8723e_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name)
{
u32 ret = 0;
@@ -686,7 +687,7 @@ bool rtl8723e_is_tx_desc_closed(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
u8 *entry = (u8 *)(&ring->desc[ring->idx]);
- u8 own = (u8)rtl8723e_get_desc(entry, true, HW_DESC_OWN);
+ u8 own = (u8)rtl8723e_get_desc(hw, entry, true, HW_DESC_OWN);
/**
*beacon packet will only use the first
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
index 43d4c791d563..985ce0b77ea5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
@@ -708,7 +708,8 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw,
u8 *pdesc, struct sk_buff *skb);
void rtl8723e_set_desc(struct ieee80211_hw *hw,
u8 *pdesc, bool istx, u8 desc_name, u8 *val);
-u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+u64 rtl8723e_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name);
bool rtl8723e_is_tx_desc_closed(struct ieee80211_hw *hw,
u8 hw_queue, u16 index);
void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
index a841cbd55d8e..d3ed44d80011 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8723be-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
index 15c117e95a99..47e87a21ae27 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
@@ -984,7 +984,8 @@ static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
sta = rtl_find_sta(hw, mac->bssid);
if (sta)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
- p_ra->ratr_state);
+ p_ra->ratr_state,
+ true);
rcu_read_unlock();
p_ra->pre_ratr_state = p_ra->ratr_state;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 4d47b97adfed..7cd1ffa7d4a7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -60,6 +60,7 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->get_desc(
+ hw,
(u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
@@ -1681,7 +1682,8 @@ void rtl8723be_card_disable(struct ieee80211_hw *hw)
}
void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb)
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -2324,7 +2326,7 @@ static u8 _rtl8723be_mrate_idx_to_arfr_id(struct ieee80211_hw *hw,
static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level)
+ u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -2440,7 +2442,7 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
rate_mask[0] = macid;
rate_mask[1] = _rtl8723be_mrate_idx_to_arfr_id(hw, ratr_index) |
(shortgi ? 0x80 : 0x00);
- rate_mask[2] = curtxbw_40mhz;
+ rate_mask[2] = curtxbw_40mhz | ((!update_bw) << 3);
rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8);
@@ -2460,11 +2462,11 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level)
+ u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl8723be_update_hal_rate_mask(hw, sta, rssi_level);
+ rtl8723be_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
}
void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw)
@@ -2486,7 +2488,7 @@ bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ enum rf_pwrstate e_rfpowerstate_toset;
u8 u1tmp;
bool b_actuallyset = false;
@@ -2505,8 +2507,6 @@ bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
- cur_rfstate = ppsc->rfpwr_state;
-
rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1)));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
index eae863d08de8..2215a792f6bf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
@@ -30,7 +30,8 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb);
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd);
int rtl8723be_hw_init(struct ieee80211_hw *hw);
void rtl8723be_card_disable(struct ieee80211_hw *hw);
void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
@@ -46,7 +47,7 @@ void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level);
+ u8 rssi_level, bool update_bw);
void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 2b16a1467e78..6a42988aad65 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -46,6 +46,7 @@
static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*close ASPM for AMD defaultly */
@@ -82,7 +83,7 @@ static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
* 1 - Support ASPM,
* 2 - According to chipset.
*/
- rtlpci->const_support_pciaspm = 1;
+ rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
@@ -271,6 +272,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
.swctrl_lps = false,
.fwctrl_lps = true,
.msi_support = false,
+ .aspm_support = 1,
.disable_watchdog = false,
.debug_level = 0,
.debug_mask = 0,
@@ -396,6 +398,7 @@ module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
+module_param_named(aspm, rtl8723be_mod_params.aspm_support, int, 0444);
module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
bool, 0444);
module_param_named(ant_sel, rtl8723be_mod_params.ant_sel, int, 0444);
@@ -404,6 +407,7 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
+MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 0e8944119652..fd9b38aa08a1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -695,7 +695,8 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
}
}
-u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+u64 rtl8723be_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name)
{
u32 ret = 0;
@@ -738,7 +739,7 @@ bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
u8 *entry = (u8 *)(&ring->desc[ring->idx]);
- u8 own = (u8)rtl8723be_get_desc(entry, true, HW_DESC_OWN);
+ u8 own = (u8)rtl8723be_get_desc(hw, entry, true, HW_DESC_OWN);
/*beacon packet will only use the first
*descriptor defautly,and the own may not
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
index 0274659f48ed..988bf0586674 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
@@ -624,7 +624,8 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
u8 *pdesc, struct sk_buff *skb);
void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
bool istx, u8 desc_name, u8 *val);
-u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+u64 rtl8723be_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name);
bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
u8 hw_queue, u16 index);
void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
index 73da75526e2a..3451198b670c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8723-common-objs := \
main.o \
dm_common.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index ac573d69f6d6..efa7e1262461 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -253,7 +253,8 @@ bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pdesc = &ring->desc[0];
- own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);
+ own = (u8)rtlpriv->cfg->ops->get_desc(hw, (u8 *)pdesc, true,
+ HW_DESC_OWN);
rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
index 8ca406b95f02..81dbf4ca6bc6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtl8821ae-objs := \
dm.o \
fw.o \
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 32900c51f024..b11365a5ee1f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2592,7 +2592,7 @@ static void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
sta = rtl_find_sta(hw, mac->bssid);
if (sta)
rtlpriv->cfg->ops->update_rate_tbl(hw,
- sta, p_ra->ratr_state);
+ sta, p_ra->ratr_state, true);
rcu_read_unlock();
p_ra->pre_ratr_state = p_ra->ratr_state;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 1d431d4bf6d2..43e18c4c1e68 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -57,6 +57,7 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->get_desc(
+ hw,
(u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
kfree_skb(skb);
@@ -1130,13 +1131,13 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
static void _rtl8821ae_dbi_write(struct rtl_priv *rtlpriv, u16 addr, u8 data)
{
u8 tmp = 0, count = 0;
- u16 wrtie_addr, remainder = addr % 4;
+ u16 write_addr, remainder = addr % 4;
- wrtie_addr = REG_DBI_WDATA + remainder;
- rtl_write_byte(rtlpriv, wrtie_addr, data);
+ write_addr = REG_DBI_WDATA + remainder;
+ rtl_write_byte(rtlpriv, write_addr, data);
- wrtie_addr = (addr & 0xfffc) | (BIT(0) << (remainder + 12));
- rtl_write_word(rtlpriv, REG_DBI_ADDR, wrtie_addr);
+ write_addr = (addr & 0xfffc) | (BIT(0) << (remainder + 12));
+ rtl_write_word(rtlpriv, REG_DBI_ADDR, write_addr);
rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x1);
@@ -1363,7 +1364,6 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
u8 fw_reason = 0;
- struct timeval ts;
fw_reason = rtl_read_byte(rtlpriv, REG_MCUTST_WOWLAN);
@@ -1372,20 +1372,16 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
ppsc->wakeup_reason = 0;
- rtlhal->last_suspend_sec = ts.tv_sec;
+ rtlhal->last_suspend_sec = ktime_get_real_seconds();
switch (fw_reason) {
case FW_WOW_V2_PTK_UPDATE_EVENT:
ppsc->wakeup_reason = WOL_REASON_PTK_UPDATE;
- do_gettimeofday(&ts);
- ppsc->last_wakeup_time = ts.tv_sec*1000 + ts.tv_usec/1000;
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
"It's a WOL PTK Key update event!\n");
break;
case FW_WOW_V2_GTK_UPDATE_EVENT:
ppsc->wakeup_reason = WOL_REASON_GTK_UPDATE;
- do_gettimeofday(&ts);
- ppsc->last_wakeup_time = ts.tv_sec*1000 + ts.tv_usec/1000;
RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
"It's a WOL GTK Key update event!\n");
break;
@@ -2487,7 +2483,8 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
}
void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb)
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -3598,7 +3595,7 @@ static bool _rtl8821ae_get_ra_shortgi(struct ieee80211_hw *hw, struct ieee80211_
}
static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
@@ -3777,7 +3774,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
rate_mask[0] = macid;
rate_mask[1] = ratr_index | (b_shortgi ? 0x80 : 0x00);
- rate_mask[2] = rtlphy->current_chan_bw
+ rate_mask[2] = rtlphy->current_chan_bw | ((!update_bw) << 3)
| _rtl8821ae_get_vht_eni(wirelessmode, ratr_bitmap)
| _rtl8821ae_get_ra_ldpc(hw, macid, sta_entry, wirelessmode);
@@ -3798,11 +3795,11 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
}
void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level)
+ struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->dm.useramask)
- rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level);
+ rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
/*RT_TRACE(rtlpriv, COMP_RATR,DBG_LOUD,
"rtl8821ae_update_hal_rate_tbl() Error! 8821ae FW RA Only\n");*/
@@ -3845,7 +3842,7 @@ bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_phy *rtlphy = &rtlpriv->phy;
- enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ enum rf_pwrstate e_rfpowerstate_toset;
u8 u1tmp = 0;
bool b_actuallyset = false;
@@ -3864,8 +3861,6 @@ bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
spin_unlock(&rtlpriv->locks.rf_ps_lock);
}
- cur_rfstate = ppsc->rfpwr_state;
-
rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
rtl_read_byte(rtlpriv,
REG_GPIO_IO_SEL_2) & ~(BIT(1)));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
index a3553e3abaa1..284d259fe557 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
@@ -30,7 +30,8 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw);
void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb);
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd);
int rtl8821ae_hw_init(struct ieee80211_hw *hw);
void rtl8821ae_card_disable(struct ieee80211_hw *hw);
void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw);
@@ -46,7 +47,7 @@ void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw,
void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
- u8 rssi_level);
+ u8 rssi_level, bool update_bw);
void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 0894ef48ab87..ab5d462b1a3a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -43,6 +43,7 @@
static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
/*close ASPM for AMD defaultly */
@@ -82,7 +83,7 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
* 1 - Support ASPM,
* 2 - According to chipset.
*/
- rtlpci->const_support_pciaspm = 1;
+ rtlpci->const_support_pciaspm = rtlpriv->cfg->mod_params->aspm_support;
}
/*InitializeVariables8812E*/
@@ -313,6 +314,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
.swctrl_lps = false,
.fwctrl_lps = true,
.msi_support = true,
+ .aspm_support = 1,
.int_clear = true,
.debug_level = 0,
.debug_mask = 0,
@@ -444,6 +446,7 @@ module_param_named(ips, rtl8821ae_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8821ae_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444);
module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444);
+module_param_named(aspm, rtl8821ae_mod_params.aspm_support, int, 0444);
module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog,
bool, 0444);
module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444);
@@ -452,6 +455,7 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
+MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n");
MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 749818929e8f..1e1bacf562f3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -935,7 +935,8 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
}
}
-u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+u64 rtl8821ae_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name)
{
u32 ret = 0;
@@ -980,7 +981,7 @@ bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
u8 *entry = (u8 *)(&ring->desc[ring->idx]);
- u8 own = (u8)rtl8821ae_get_desc(entry, true, HW_DESC_OWN);
+ u8 own = (u8)rtl8821ae_get_desc(hw, entry, true, HW_DESC_OWN);
/**
*beacon packet will only use the first
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
index 9843a616dcec..221dd2b29d3b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
@@ -620,7 +620,8 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
u8 *pdesc, struct sk_buff *skb);
void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
bool istx, u8 desc_name, u8 *val);
-u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+u64 rtl8821ae_get_desc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool istx, u8 desc_name);
bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw,
u8 hw_queue, u16 index);
void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 5590d07d0918..39b033b3b53a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -952,17 +952,12 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
u16 hw_queue)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rtl_tx_desc *pdesc = NULL;
struct rtl_tcb_desc tcb_desc;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
u8 *pda_addr = hdr->addr1;
- /* ssn */
- u8 *qc = NULL;
- u8 tid = 0;
- u16 seq_number = 0;
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
@@ -983,20 +978,8 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
rtlpriv->stats.txbytesbroadcast += skb->len;
else
rtlpriv->stats.txbytesunicast += skb->len;
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- seq_number = (le16_to_cpu(hdr->seq_ctrl) &
- IEEE80211_SCTL_SEQ) >> 4;
- seq_number += 1;
- seq_number <<= 4;
- }
rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, NULL, info, sta, skb,
hw_queue, &tcb_desc);
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- if (qc)
- mac->tids[tid].seq_number = seq_number;
- }
if (ieee80211_is_data(fc))
rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 1ab1024330fb..92d4859ec906 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -169,16 +169,14 @@ enum rtl8192c_h2c_cmd {
#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6
#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5
-#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
+#define BUFDESC_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
#define DEL_SW_IDX_SZ 30
-#define BAND_NUM 3
/* For now, it's just for 8192ee
* but not OK yet, keep it 0
*/
-#define DMA_IS_64BIT 0
-#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
+#define RTL8192EE_SEG_NUM BUFDESC_SEG_NUM
enum rf_tx_num {
RF_1TX = 0,
@@ -561,6 +559,11 @@ enum rf_type {
RF_1T2R = 1,
RF_2T2R = 2,
RF_2T2R_GREEN = 3,
+ RF_2T3R = 4,
+ RF_2T4R = 5,
+ RF_3T3R = 6,
+ RF_3T4R = 7,
+ RF_4T4R = 8,
};
enum ht_channel_width {
@@ -706,6 +709,7 @@ enum rtl_var_map {
RTL_IMR_RXFOVW, /*Receive FIFO Overflow */
RTL_IMR_RDU, /*Receive Descriptor Unavailable */
RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
+ RTL_IMR_H2CDOK, /*H2C Queue DMA OK Interrupt */
RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */
RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/
@@ -1014,10 +1018,17 @@ struct init_gain {
};
struct wireless_stats {
- unsigned long txbytesunicast;
- unsigned long txbytesmulticast;
- unsigned long txbytesbroadcast;
- unsigned long rxbytesunicast;
+ u64 txbytesunicast;
+ u64 txbytesmulticast;
+ u64 txbytesbroadcast;
+ u64 rxbytesunicast;
+
+ u64 txbytesunicast_inperiod;
+ u64 rxbytesunicast_inperiod;
+ u32 txbytesunicast_inperiod_tp;
+ u32 rxbytesunicast_inperiod_tp;
+ u64 txbytesunicast_last;
+ u64 rxbytesunicast_last;
long rx_snr_db[4];
/*Correct smoothed ss in Dbm, only used
@@ -1314,7 +1325,6 @@ struct rssi_sta {
};
struct rtl_tid_data {
- u16 seq_number;
struct rtl_ht_agg agg;
};
@@ -1323,6 +1333,7 @@ struct rtl_sta_info {
struct rtl_tid_data tids[MAX_TID_COUNT];
/* just used for ap adhoc or mesh*/
struct rssi_sta rssi_stat;
+ u8 rssi_level;
u16 wireless_mode;
u8 ratr_index;
u8 mimo_ps;
@@ -1589,7 +1600,7 @@ struct rtl_hal {
bool enter_pnp_sleep;
bool wake_from_pnp_sleep;
bool wow_enabled;
- __kernel_time_t last_suspend_sec;
+ time64_t last_suspend_sec;
u32 wowlan_fwsize;
u8 *wowlan_firmware;
@@ -1736,21 +1747,6 @@ struct rtl_dm {
s8 swing_diff_2g;
s8 swing_diff_5g;
- u8 delta_swing_table_idx_24gccka_p[DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_24gccka_n[DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_24gcckb_p[DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_24gcckb_n[DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_24ga_p[DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_24ga_n[DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_24gb_p[DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_24gb_n[DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_5ga_p[BAND_NUM][DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_5ga_n[BAND_NUM][DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_5gb_p[BAND_NUM][DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_5gb_n[BAND_NUM][DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_24ga_p_8188e[DEL_SW_IDX_SZ];
- u8 delta_swing_table_idx_24ga_n_8188e[DEL_SW_IDX_SZ];
-
/* DMSP */
bool supp_phymode_switch;
@@ -1958,8 +1954,6 @@ struct rtl_ps_ctl {
u8 gtk_offload_enable;
/* Used for WOL, indicates the reason for waking event.*/
u32 wakeup_reason;
- /* Record the last waking time for comparison with setting key. */
- u64 last_wakeup_time;
};
struct rtl_stats {
@@ -2105,7 +2099,8 @@ struct rtl_hal_ops {
void (*read_chip_version)(struct ieee80211_hw *hw);
void (*read_eeprom_info) (struct ieee80211_hw *hw);
void (*interrupt_recognized) (struct ieee80211_hw *hw,
- u32 *p_inta, u32 *p_intb);
+ u32 *p_inta, u32 *p_intb,
+ u32 *p_intc, u32 *p_intd);
int (*hw_init) (struct ieee80211_hw *hw);
void (*hw_disable) (struct ieee80211_hw *hw);
void (*hw_suspend) (struct ieee80211_hw *hw);
@@ -2127,7 +2122,8 @@ struct rtl_hal_ops {
void (*get_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
void (*update_rate_tbl) (struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level);
+ struct ieee80211_sta *sta, u8 rssi_leve,
+ bool update_bw);
void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc,
u8 *desc, u8 queue_index,
struct sk_buff *skb, dma_addr_t addr);
@@ -2148,6 +2144,9 @@ struct rtl_hal_ops {
void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
bool firstseg, bool lastseg,
struct sk_buff *skb);
+ void (*fill_tx_special_desc)(struct ieee80211_hw *hw,
+ u8 *pdesc, u8 *pbd_desc,
+ struct sk_buff *skb, u8 hw_queue);
bool (*query_rx_desc) (struct ieee80211_hw *hw,
struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
@@ -2162,7 +2161,8 @@ struct rtl_hal_ops {
enum led_ctl_mode ledaction);
void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
u8 desc_name, u8 *val);
- u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
+ u64 (*get_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name);
bool (*is_tx_desc_closed) (struct ieee80211_hw *hw,
u8 hw_queue, u16 index);
void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue);
@@ -2261,6 +2261,12 @@ struct rtl_mod_params {
*/
bool msi_support;
+ /* default: 0 = dma 32 */
+ bool dma64;
+
+ /* default: 1 = enable aspm */
+ int aspm_support;
+
/* default 0: 1 means disable */
bool disable_watchdog;
@@ -2857,19 +2863,19 @@ value to host byte ordering.*/
cpu_to_le32( \
LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
- );
+ )
#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
*((__le16 *)(__pstart)) = \
cpu_to_le16( \
LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
- );
+ )
#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
*((u8 *)(__pstart)) = EF1BYTE \
( \
LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
- );
+ )
#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
(__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
diff --git a/drivers/net/wireless/rsi/Makefile b/drivers/net/wireless/rsi/Makefile
index ebb89965997a..47c45908d894 100644
--- a/drivers/net/wireless/rsi/Makefile
+++ b/drivers/net/wireless/rsi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rsi_91x-y += rsi_91x_main.o
rsi_91x-y += rsi_91x_core.o
rsi_91x-y += rsi_91x_mac80211.o
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 2b0516d2f63d..d0d2201830e8 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -95,6 +95,8 @@ static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
s16 txop = common->tx_qinfo[q_num].txop * 32;
__le16 r_txop;
struct ieee80211_rate rate;
+ struct ieee80211_hdr *wh;
+ struct ieee80211_vif *vif;
rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
if (q_num == VI_Q)
@@ -106,8 +108,10 @@ static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
return 0;
do {
+ wh = (struct ieee80211_hdr *)skb->data;
+ vif = rsi_get_vif(adapter, wh->addr2);
r_txop = ieee80211_generic_frame_duration(adapter->hw,
- adapter->vifs[0],
+ vif,
common->band,
skb->len, &rate);
txop -= le16_to_cpu(r_txop);
@@ -272,6 +276,8 @@ void rsi_core_qos_processor(struct rsi_common *common)
rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
break;
}
+ if (common->hibernate_resume)
+ break;
mutex_lock(&common->tx_lock);
@@ -334,6 +340,21 @@ struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr)
return NULL;
}
+struct ieee80211_vif *rsi_get_vif(struct rsi_hw *adapter, u8 *mac)
+{
+ struct ieee80211_vif *vif;
+ int i;
+
+ for (i = 0; i < RSI_MAX_VIFS; i++) {
+ vif = adapter->vifs[i];
+ if (!vif)
+ continue;
+ if (!memcmp(vif->addr, mac, ETH_ALEN))
+ return vif;
+ }
+ return NULL;
+}
+
/**
* rsi_core_xmit() - This function transmits the packets received from mac80211
* @common: Pointer to the driver private structure.
@@ -346,8 +367,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
struct rsi_hw *adapter = common->priv;
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
- struct ieee80211_hdr *wh;
- struct ieee80211_vif *vif = adapter->vifs[0];
+ struct ieee80211_hdr *wh = NULL;
+ struct ieee80211_vif *vif;
u8 q_num, tid = 0;
struct rsi_sta *rsta = NULL;
@@ -360,12 +381,23 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
goto xmit_fail;
}
+ if (common->wow_flags & RSI_WOW_ENABLED) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Blocking Tx_packets when WOWLAN is enabled\n",
+ __func__);
+ goto xmit_fail;
+ }
info = IEEE80211_SKB_CB(skb);
tx_params = (struct skb_info *)info->driver_data;
wh = (struct ieee80211_hdr *)&skb->data[0];
tx_params->sta_id = 0;
+ vif = rsi_get_vif(adapter, wh->addr2);
+ if (!vif)
+ goto xmit_fail;
+ tx_params->vif = vif;
+ tx_params->vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id;
if ((ieee80211_is_mgmt(wh->frame_control)) ||
(ieee80211_is_ctl(wh->frame_control)) ||
(ieee80211_is_qos_nullfunc(wh->frame_control))) {
@@ -383,7 +415,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
q_num = skb->priority;
tx_params->tid = tid;
- if ((vif->type == NL80211_IFTYPE_AP) &&
+ if (((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) &&
(!is_broadcast_ether_addr(wh->addr1)) &&
(!is_multicast_ether_addr(wh->addr1))) {
rsta = rsi_find_sta(common, wh->addr1);
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
index e98eb55c26cc..8c6ca8e689e4 100644
--- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -83,19 +83,12 @@ static int rsi_version_read(struct seq_file *seq, void *data)
{
struct rsi_common *common = seq->private;
- common->driver_ver.major = 0;
- common->driver_ver.minor = 1;
- common->driver_ver.release_num = 0;
- common->driver_ver.patch_num = 0;
- seq_printf(seq, "Driver : %x.%d.%d.%d\nLMAC : %d.%d.%d.%d\n",
- common->driver_ver.major,
- common->driver_ver.minor,
- common->driver_ver.release_num,
- common->driver_ver.patch_num,
- common->fw_ver.major,
- common->fw_ver.minor,
- common->fw_ver.release_num,
- common->fw_ver.patch_num);
+ seq_printf(seq, "LMAC : %d.%d.%d.%d\n",
+ common->lmac_ver.major,
+ common->lmac_ver.minor,
+ common->lmac_ver.release_num,
+ common->lmac_ver.patch_num);
+
return 0;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 070dfd68bb83..1176de646942 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -42,7 +42,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
struct ieee80211_hdr *wh = NULL;
struct ieee80211_tx_info *info;
struct ieee80211_conf *conf = &adapter->hw->conf;
- struct ieee80211_vif *vif = adapter->vifs[0];
+ struct ieee80211_vif *vif;
struct rsi_mgmt_desc *mgmt_desc;
struct skb_info *tx_params;
struct ieee80211_bss_conf *bss = NULL;
@@ -57,6 +57,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
tx_params = (struct skb_info *)info->driver_data;
+ vif = tx_params->vif;
/* Update header size */
header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc);
@@ -78,7 +79,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
tx_params->internal_hdr_size = header_size;
memset(&skb->data[0], 0, header_size);
- bss = &info->control.vif->bss_conf;
+ bss = &vif->bss_conf;
wh = (struct ieee80211_hdr *)&skb->data[header_size];
mgmt_desc = (struct rsi_mgmt_desc *)skb->data;
@@ -95,10 +96,10 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
mgmt_desc->seq_ctrl =
cpu_to_le16(IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl)));
- if (common->band == NL80211_BAND_2GHZ)
- mgmt_desc->rate_info = RSI_RATE_1;
+ if ((common->band == NL80211_BAND_2GHZ) && !common->p2p_enabled)
+ mgmt_desc->rate_info = cpu_to_le16(RSI_RATE_1);
else
- mgmt_desc->rate_info = RSI_RATE_6;
+ mgmt_desc->rate_info = cpu_to_le16(RSI_RATE_6);
if (conf_is_ht40(conf))
mgmt_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE);
@@ -121,7 +122,8 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
xtend_desc->retry_cnt = PROBE_RESP_RETRY_CNT;
}
- if ((vif->type == NL80211_IFTYPE_AP) &&
+ if (((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) &&
(ieee80211_is_action(wh->frame_control))) {
struct rsi_sta *rsta = rsi_find_sta(common, wh->addr1);
@@ -130,6 +132,10 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb)
else
return -EINVAL;
}
+ mgmt_desc->rate_info |=
+ cpu_to_le16((tx_params->vap_id << RSI_DESC_VAP_ID_OFST) &
+ RSI_DESC_VAP_ID_MASK);
+
return 0;
}
@@ -151,7 +157,8 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
u16 seq_num;
info = IEEE80211_SKB_CB(skb);
- bss = &info->control.vif->bss_conf;
+ vif = info->control.vif;
+ bss = &vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc);
@@ -175,7 +182,6 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ];
wh = (struct ieee80211_hdr *)&skb->data[header_size];
seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl));
- vif = adapter->vifs[0];
data_desc->xtend_desc_size = header_size - FRAME_DESC_SZ;
@@ -184,7 +190,8 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
data_desc->mac_flags |= cpu_to_le16(RSI_QOS_ENABLE);
}
- if ((vif->type == NL80211_IFTYPE_STATION) &&
+ if (((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
(adapter->ps_state == PS_ENABLED))
wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE);
@@ -240,17 +247,23 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
data_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT);
data_desc->sta_id = vap_id;
- if (vif->type == NL80211_IFTYPE_AP) {
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) {
if (common->band == NL80211_BAND_5GHZ)
data_desc->rate_info = cpu_to_le16(RSI_RATE_6);
else
data_desc->rate_info = cpu_to_le16(RSI_RATE_1);
}
}
- if ((vif->type == NL80211_IFTYPE_AP) &&
+ if (((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) &&
(ieee80211_has_moredata(wh->frame_control)))
data_desc->frame_info |= cpu_to_le16(MORE_DATA_PRESENT);
+ data_desc->rate_info |=
+ cpu_to_le16((tx_params->vap_id << RSI_DESC_VAP_ID_OFST) &
+ RSI_DESC_VAP_ID_MASK);
+
return 0;
}
@@ -258,7 +271,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
- struct ieee80211_vif *vif = adapter->vifs[0];
+ struct ieee80211_vif *vif;
struct ieee80211_tx_info *info;
struct ieee80211_bss_conf *bss;
int status = -EINVAL;
@@ -271,9 +284,12 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
if (!info->control.vif)
goto err;
- bss = &info->control.vif->bss_conf;
+ vif = info->control.vif;
+ bss = &vif->bss_conf;
- if ((vif->type == NL80211_IFTYPE_STATION) && (!bss->assoc))
+ if (((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
+ (!bss->assoc))
goto err;
status = rsi_prepare_data_desc(common, skb);
@@ -306,21 +322,11 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
int status = -E2BIG;
- u8 extnd_size;
info = IEEE80211_SKB_CB(skb);
tx_params = (struct skb_info *)info->driver_data;
- extnd_size = ((uintptr_t)skb->data & 0x3);
if (tx_params->flags & INTERNAL_MGMT_PKT) {
- skb->data[1] |= BIT(7); /* Immediate Wakeup bit*/
- if ((extnd_size) > skb_headroom(skb)) {
- rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
- dev_kfree_skb(skb);
- return -ENOSPC;
- }
- skb_push(skb, extnd_size);
- skb->data[extnd_size + 4] = extnd_size;
status = adapter->host_intf_ops->write_pkt(common->priv,
(u8 *)skb->data,
skb->len);
@@ -352,12 +358,23 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
struct rsi_data_desc *bcn_frm;
struct ieee80211_hw *hw = common->priv->hw;
struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_vif *vif;
struct sk_buff *mac_bcn;
- u8 vap_id = 0;
- u16 tim_offset;
-
+ u8 vap_id = 0, i;
+ u16 tim_offset = 0;
+
+ for (i = 0; i < RSI_MAX_VIFS; i++) {
+ vif = adapter->vifs[i];
+ if (!vif)
+ continue;
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO))
+ break;
+ }
+ if (!vif)
+ return -EINVAL;
mac_bcn = ieee80211_beacon_get_tim(adapter->hw,
- adapter->vifs[adapter->sc_nvifs - 1],
+ vif,
&tim_offset, NULL);
if (!mac_bcn) {
rsi_dbg(ERR_ZONE, "Failed to get beacon from mac80211\n");
@@ -401,9 +418,9 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
return 0;
}
-static void bl_cmd_timeout(unsigned long priv)
+static void bl_cmd_timeout(struct timer_list *t)
{
- struct rsi_hw *adapter = (struct rsi_hw *)priv;
+ struct rsi_hw *adapter = from_timer(adapter, t, bl_cmd_timer);
adapter->blcmd_timer_expired = true;
del_timer(&adapter->bl_cmd_timer);
@@ -411,9 +428,7 @@ static void bl_cmd_timeout(unsigned long priv)
static int bl_start_cmd_timer(struct rsi_hw *adapter, u32 timeout)
{
- init_timer(&adapter->bl_cmd_timer);
- adapter->bl_cmd_timer.data = (unsigned long)adapter;
- adapter->bl_cmd_timer.function = (void *)&bl_cmd_timeout;
+ timer_setup(&adapter->bl_cmd_timer, bl_cmd_timeout, 0);
adapter->bl_cmd_timer.expires = (msecs_to_jiffies(timeout) + jiffies);
adapter->blcmd_timer_expired = false;
@@ -752,6 +767,7 @@ static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
static int rsi_load_firmware(struct rsi_hw *adapter)
{
+ struct rsi_common *common = adapter->priv;
struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
const struct firmware *fw_entry = NULL;
u32 regout_val = 0, content_size;
@@ -827,6 +843,18 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
content_size = fw_entry->size;
rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", content_size);
+ /* Get the firmware version */
+ common->lmac_ver.ver.info.fw_ver[0] =
+ flash_content[LMAC_VER_OFFSET] & 0xFF;
+ common->lmac_ver.ver.info.fw_ver[1] =
+ flash_content[LMAC_VER_OFFSET + 1] & 0xFF;
+ common->lmac_ver.major = flash_content[LMAC_VER_OFFSET + 2] & 0xFF;
+ common->lmac_ver.release_num =
+ flash_content[LMAC_VER_OFFSET + 3] & 0xFF;
+ common->lmac_ver.minor = flash_content[LMAC_VER_OFFSET + 4] & 0xFF;
+ common->lmac_ver.patch_num = 0;
+ rsi_print_version(common);
+
status = bl_write_header(adapter, flash_content, content_size);
if (status) {
rsi_dbg(ERR_ZONE,
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index fa12c05d9e23..32f5cb46fd4f 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include "rsi_debugfs.h"
#include "rsi_mgmt.h"
+#include "rsi_sdio.h"
#include "rsi_common.h"
#include "rsi_ps.h"
@@ -139,6 +140,32 @@ static const u32 rsi_max_ap_stas[16] = {
4, /* 14 - AP + BT Dual */
};
+static const struct ieee80211_iface_limit rsi_iface_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_combination rsi_iface_combinations[] = {
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .limits = rsi_iface_limits,
+ .n_limits = ARRAY_SIZE(rsi_iface_limits),
+ },
+};
+
/**
* rsi_is_cipher_wep() - This function determines if the cipher is WEP or not.
* @common: Pointer to the driver private structure.
@@ -299,6 +326,11 @@ static int rsi_mac80211_start(struct ieee80211_hw *hw)
rsi_dbg(ERR_ZONE, "===> Interface UP <===\n");
mutex_lock(&common->mutex);
+ if (common->hibernate_resume) {
+ common->reinit_hw = true;
+ adapter->host_intf_ops->reinit_device(adapter);
+ wait_for_completion(&adapter->priv->wlan_init_completion);
+ }
common->iface_down = false;
wiphy_rfkill_start_polling(hw->wiphy);
rsi_send_rx_filter_frame(common, 0);
@@ -329,6 +361,24 @@ static void rsi_mac80211_stop(struct ieee80211_hw *hw)
mutex_unlock(&common->mutex);
}
+static int rsi_map_intf_mode(enum nl80211_iftype vif_type)
+{
+ switch (vif_type) {
+ case NL80211_IFTYPE_STATION:
+ return RSI_OPMODE_STA;
+ case NL80211_IFTYPE_AP:
+ return RSI_OPMODE_AP;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return RSI_OPMODE_P2P_CLIENT;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return RSI_OPMODE_P2P_CLIENT;
+ case NL80211_IFTYPE_P2P_GO:
+ return RSI_OPMODE_P2P_GO;
+ default:
+ return RSI_OPMODE_UNSUPPORTED;
+ }
+}
+
/**
* rsi_mac80211_add_interface() - This function is called when a netdevice
* attached to the hardware is enabled.
@@ -342,54 +392,62 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
+ struct vif_priv *vif_info = (struct vif_priv *)vif->drv_priv;
enum opmode intf_mode;
- int ret = -EOPNOTSUPP;
+ enum vap_status vap_status;
+ int vap_idx = -1, i;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
mutex_lock(&common->mutex);
- if (adapter->sc_nvifs > 1) {
- mutex_unlock(&common->mutex);
- return -EOPNOTSUPP;
- }
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- rsi_dbg(INFO_ZONE, "Station Mode");
- intf_mode = STA_OPMODE;
- break;
- case NL80211_IFTYPE_AP:
- rsi_dbg(INFO_ZONE, "AP Mode");
- intf_mode = AP_OPMODE;
- break;
- default:
+ intf_mode = rsi_map_intf_mode(vif->type);
+ if (intf_mode == RSI_OPMODE_UNSUPPORTED) {
rsi_dbg(ERR_ZONE,
"%s: Interface type %d not supported\n", __func__,
vif->type);
- goto out;
+ mutex_unlock(&common->mutex);
+ return -EOPNOTSUPP;
+ }
+ if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) ||
+ (vif->type == NL80211_IFTYPE_P2P_CLIENT) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO))
+ common->p2p_enabled = true;
+
+ /* Get free vap index */
+ for (i = 0; i < RSI_MAX_VIFS; i++) {
+ if (!adapter->vifs[i]) {
+ vap_idx = i;
+ break;
+ }
}
+ if (vap_idx < 0) {
+ rsi_dbg(ERR_ZONE, "Reject: Max VAPs reached\n");
+ mutex_unlock(&common->mutex);
+ return -EOPNOTSUPP;
+ }
+ vif_info->vap_id = vap_idx;
+ adapter->vifs[vap_idx] = vif;
+ adapter->sc_nvifs++;
+ vap_status = VAP_ADD;
- adapter->vifs[adapter->sc_nvifs++] = vif;
- ret = rsi_set_vap_capabilities(common, intf_mode, common->mac_addr,
- 0, VAP_ADD);
- if (ret) {
+ if (rsi_set_vap_capabilities(common, intf_mode, vif->addr,
+ vif_info->vap_id, vap_status)) {
rsi_dbg(ERR_ZONE, "Failed to set VAP capabilities\n");
- goto out;
+ mutex_unlock(&common->mutex);
+ return -EINVAL;
}
- if (vif->type == NL80211_IFTYPE_AP) {
- int i;
-
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) {
rsi_send_rx_filter_frame(common, DISALLOW_BEACONS);
common->min_rate = RSI_RATE_AUTO;
for (i = 0; i < common->max_stations; i++)
common->stations[i].sta = NULL;
}
-out:
mutex_unlock(&common->mutex);
- return ret;
+ return 0;
}
/**
@@ -406,6 +464,7 @@ static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw,
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
enum opmode opmode;
+ int i;
rsi_dbg(INFO_ZONE, "Remove Interface Called\n");
@@ -416,23 +475,22 @@ static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw,
return;
}
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- opmode = STA_OPMODE;
- break;
- case NL80211_IFTYPE_AP:
- opmode = AP_OPMODE;
- break;
- default:
+ opmode = rsi_map_intf_mode(vif->type);
+ if (opmode == RSI_OPMODE_UNSUPPORTED) {
+ rsi_dbg(ERR_ZONE, "Opmode error : %d\n", opmode);
mutex_unlock(&common->mutex);
return;
}
- rsi_set_vap_capabilities(common, opmode, vif->addr,
- 0, VAP_DELETE);
- adapter->sc_nvifs--;
-
- if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif)))
- adapter->vifs[0] = NULL;
+ for (i = 0; i < RSI_MAX_VIFS; i++) {
+ if (!adapter->vifs[i])
+ continue;
+ if (vif == adapter->vifs[i]) {
+ rsi_set_vap_capabilities(common, opmode, vif->addr,
+ i, VAP_DELETE);
+ adapter->sc_nvifs--;
+ adapter->vifs[i] = NULL;
+ }
+ }
mutex_unlock(&common->mutex);
}
@@ -451,35 +509,44 @@ static int rsi_channel_change(struct ieee80211_hw *hw)
int status = -EOPNOTSUPP;
struct ieee80211_channel *curchan = hw->conf.chandef.chan;
u16 channel = curchan->hw_value;
- struct ieee80211_bss_conf *bss = &adapter->vifs[0]->bss_conf;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *bss;
+ bool assoc = false;
+ int i;
rsi_dbg(INFO_ZONE,
"%s: Set channel: %d MHz type: %d channel_no %d\n",
__func__, curchan->center_freq,
curchan->flags, channel);
- if (bss->assoc) {
+ for (i = 0; i < RSI_MAX_VIFS; i++) {
+ vif = adapter->vifs[i];
+ if (!vif)
+ continue;
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bss = &vif->bss_conf;
+ if (bss->assoc) {
+ assoc = true;
+ break;
+ }
+ }
+ }
+ if (assoc) {
if (!common->hw_data_qs_blocked &&
- (rsi_get_connected_channel(adapter) != channel)) {
+ (rsi_get_connected_channel(vif) != channel)) {
rsi_dbg(INFO_ZONE, "blk data q %d\n", channel);
if (!rsi_send_block_unblock_frame(common, true))
common->hw_data_qs_blocked = true;
}
}
- status = rsi_band_check(common);
+ status = rsi_band_check(common, curchan);
if (!status)
status = rsi_set_channel(adapter->priv, curchan);
- if (bss->assoc) {
+ if (assoc) {
if (common->hw_data_qs_blocked &&
- (rsi_get_connected_channel(adapter) == channel)) {
- rsi_dbg(INFO_ZONE, "unblk data q %d\n", channel);
- if (!rsi_send_block_unblock_frame(common, false))
- common->hw_data_qs_blocked = false;
- }
- } else {
- if (common->hw_data_qs_blocked) {
+ (rsi_get_connected_channel(vif) == channel)) {
rsi_dbg(INFO_ZONE, "unblk data q %d\n", channel);
if (!rsi_send_block_unblock_frame(common, false))
common->hw_data_qs_blocked = false;
@@ -531,7 +598,6 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
- struct ieee80211_vif *vif = adapter->vifs[0];
struct ieee80211_conf *conf = &hw->conf;
int status = -EOPNOTSUPP;
@@ -547,16 +613,30 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
}
/* Power save parameters */
- if ((changed & IEEE80211_CONF_CHANGE_PS) &&
- (vif->type == NL80211_IFTYPE_STATION)) {
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ struct ieee80211_vif *vif;
unsigned long flags;
+ int i, set_ps = 1;
- spin_lock_irqsave(&adapter->ps_lock, flags);
- if (conf->flags & IEEE80211_CONF_PS)
- rsi_enable_ps(adapter);
- else
- rsi_disable_ps(adapter);
- spin_unlock_irqrestore(&adapter->ps_lock, flags);
+ for (i = 0; i < RSI_MAX_VIFS; i++) {
+ vif = adapter->vifs[i];
+ if (!vif)
+ continue;
+ /* Don't go to power save if AP vap exists */
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) {
+ set_ps = 0;
+ break;
+ }
+ }
+ if (set_ps) {
+ spin_lock_irqsave(&adapter->ps_lock, flags);
+ if (conf->flags & IEEE80211_CONF_PS)
+ rsi_enable_ps(adapter, vif);
+ else
+ rsi_disable_ps(adapter, vif);
+ spin_unlock_irqrestore(&adapter->ps_lock, flags);
+ }
}
/* RTS threshold */
@@ -580,16 +660,42 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
*
* Return: Current connected AP's channel number is returned.
*/
-u16 rsi_get_connected_channel(struct rsi_hw *adapter)
+u16 rsi_get_connected_channel(struct ieee80211_vif *vif)
{
- struct ieee80211_vif *vif = adapter->vifs[0];
- if (vif) {
- struct ieee80211_bss_conf *bss = &vif->bss_conf;
- struct ieee80211_channel *channel = bss->chandef.chan;
- return channel->hw_value;
- }
+ struct ieee80211_bss_conf *bss;
+ struct ieee80211_channel *channel;
- return 0;
+ if (!vif)
+ return 0;
+
+ bss = &vif->bss_conf;
+ channel = bss->chandef.chan;
+
+ if (!channel)
+ return 0;
+
+ return channel->hw_value;
+}
+
+static void rsi_switch_channel(struct rsi_hw *adapter,
+ struct ieee80211_vif *vif)
+{
+ struct rsi_common *common = adapter->priv;
+ struct ieee80211_channel *channel;
+
+ if (common->iface_down)
+ return;
+ if (!vif)
+ return;
+
+ channel = vif->bss_conf.chandef.chan;
+
+ if (!channel)
+ return;
+
+ rsi_band_check(common, channel);
+ rsi_set_channel(common, channel);
+ rsi_dbg(INFO_ZONE, "Switched to channel - %d\n", channel->hw_value);
}
/**
@@ -626,12 +732,12 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
rsi_send_rx_filter_frame(common, rx_filter_word);
}
rsi_inform_bss_status(common,
- STA_OPMODE,
+ RSI_OPMODE_STA,
bss_conf->assoc,
bss_conf->bssid,
bss_conf->qos,
bss_conf->aid,
- NULL, 0);
+ NULL, 0, vif);
adapter->ps_info.dtim_interval_duration = bss->dtim_period;
adapter->ps_info.listen_interval = conf->listen_interval;
@@ -639,7 +745,7 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
if (bss->assoc) {
if (common->uapsd_bitmap) {
rsi_dbg(INFO_ZONE, "Configuring UAPSD\n");
- rsi_conf_uapsd(adapter);
+ rsi_conf_uapsd(adapter, vif);
}
} else {
common->uapsd_bitmap = 0;
@@ -656,7 +762,8 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
}
if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
- (vif->type == NL80211_IFTYPE_AP)) {
+ ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO))) {
if (bss->enable_beacon) {
rsi_dbg(INFO_ZONE, "===> BEACON ENABLED <===\n");
common->beacon_enabled = 1;
@@ -775,7 +882,8 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw,
rsi_dbg(ERR_ZONE, "%s: Cipher 0x%x key_type: %d key_len: %d\n",
__func__, key->cipher, key_type, key->keylen);
- if (vif->type == NL80211_IFTYPE_AP) {
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) {
if (sta) {
rsta = rsi_find_sta(adapter->priv, sta->addr);
if (rsta)
@@ -791,7 +899,8 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw,
RSI_PAIRWISE_KEY,
key->keyidx,
key->cipher,
- sta_id);
+ sta_id,
+ vif);
if (status)
return status;
}
@@ -803,7 +912,8 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw,
key_type,
key->keyidx,
key->cipher,
- sta_id);
+ sta_id,
+ vif);
}
/**
@@ -902,7 +1012,8 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
if (ssn != NULL)
seq_no = *ssn;
- if (vif->type == NL80211_IFTYPE_AP) {
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) {
rsta = rsi_find_sta(common, sta->addr);
if (!rsta) {
rsi_dbg(ERR_ZONE, "No station mapped\n");
@@ -936,9 +1047,11 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
break;
case IEEE80211_AMPDU_TX_START:
- if (vif->type == NL80211_IFTYPE_STATION)
+ if ((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_P2P_CLIENT))
common->vif_info[ii].seq_start = seq_no;
- else if (vif->type == NL80211_IFTYPE_AP)
+ else if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO))
rsta->seq_start[tid] = seq_no;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
status = 0;
@@ -958,9 +1071,11 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
- if (vif->type == NL80211_IFTYPE_STATION)
+ if ((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_P2P_CLIENT))
seq_start = common->vif_info[ii].seq_start;
- else if (vif->type == NL80211_IFTYPE_AP)
+ else if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO))
seq_start = rsta->seq_start[tid];
status = rsi_send_aggregation_params_frame(common,
tid,
@@ -1039,9 +1154,9 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
*/
static void rsi_perform_cqm(struct rsi_common *common,
u8 *bssid,
- s8 rssi)
+ s8 rssi,
+ struct ieee80211_vif *vif)
{
- struct rsi_hw *adapter = common->priv;
s8 last_event = common->cqm_info.last_cqm_event_rssi;
int thold = common->cqm_info.rssi_thold;
u32 hyst = common->cqm_info.rssi_hyst;
@@ -1057,7 +1172,7 @@ static void rsi_perform_cqm(struct rsi_common *common,
common->cqm_info.last_cqm_event_rssi = rssi;
rsi_dbg(INFO_ZONE, "CQM: Notifying event: %d\n", event);
- ieee80211_cqm_rssi_notify(adapter->vifs[0], event, rssi, GFP_KERNEL);
+ ieee80211_cqm_rssi_notify(vif, event, rssi, GFP_KERNEL);
return;
}
@@ -1077,7 +1192,9 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw,
struct rsi_common *common,
struct ieee80211_rx_status *rxs)
{
- struct ieee80211_bss_conf *bss = &common->priv->vifs[0]->bss_conf;
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *bss = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct skb_info *rx_params = (struct skb_info *)info->driver_data;
struct ieee80211_hdr *hdr;
@@ -1085,6 +1202,7 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw,
u8 hdrlen = 0;
u8 channel = rx_params->channel;
s32 freq;
+ int i;
hdr = ((struct ieee80211_hdr *)(skb->data));
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -1113,10 +1231,21 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw,
rxs->flag |= RX_FLAG_IV_STRIPPED;
}
+ for (i = 0; i < RSI_MAX_VIFS; i++) {
+ vif = adapter->vifs[i];
+ if (!vif)
+ continue;
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ bss = &vif->bss_conf;
+ break;
+ }
+ }
+ if (!bss)
+ return;
/* CQM only for connected AP beacons, the RSSI is a weighted avg */
if (bss->assoc && !(memcmp(bss->bssid, hdr->addr2, ETH_ALEN))) {
if (ieee80211_is_beacon(hdr->frame_control))
- rsi_perform_cqm(common, hdr->addr2, rxs->signal);
+ rsi_perform_cqm(common, hdr->addr2, rxs->signal, vif);
}
return;
@@ -1210,7 +1339,8 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
mutex_lock(&common->mutex);
- if (vif->type == NL80211_IFTYPE_AP) {
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) {
u8 cnt;
int sta_idx = -1;
int free_index = -1;
@@ -1259,8 +1389,9 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
/* Send peer notify to device */
rsi_dbg(INFO_ZONE, "Indicate bss status to device\n");
- rsi_inform_bss_status(common, AP_OPMODE, 1, sta->addr,
- sta->wme, sta->aid, sta, sta_idx);
+ rsi_inform_bss_status(common, RSI_OPMODE_AP, 1,
+ sta->addr, sta->wme, sta->aid,
+ sta, sta_idx, vif);
if (common->key) {
struct ieee80211_key_conf *key = common->key;
@@ -1273,14 +1404,16 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
RSI_PAIRWISE_KEY,
key->keyidx,
key->cipher,
- sta_idx);
+ sta_idx,
+ vif);
}
common->num_stations++;
}
}
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if ((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_P2P_CLIENT)) {
rsi_set_min_rate(hw, sta, common);
if (sta->ht_cap.ht_supported) {
common->vif_info[0].is_ht = true;
@@ -1321,7 +1454,8 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
mutex_lock(&common->mutex);
- if (vif->type == NL80211_IFTYPE_AP) {
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) {
u8 sta_idx, cnt;
/* Send peer notify to device */
@@ -1332,9 +1466,10 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
if (!rsta->sta)
continue;
if (!memcmp(rsta->sta->addr, sta->addr, ETH_ALEN)) {
- rsi_inform_bss_status(common, AP_OPMODE, 0,
+ rsi_inform_bss_status(common, RSI_OPMODE_AP, 0,
sta->addr, sta->wme,
- sta->aid, sta, sta_idx);
+ sta->aid, sta, sta_idx,
+ vif);
rsta->sta = NULL;
rsta->sta_id = -1;
for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++)
@@ -1348,7 +1483,8 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
rsi_dbg(ERR_ZONE, "%s: No station found\n", __func__);
}
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if ((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_P2P_CLIENT)) {
/* Resetting all the fields to default values */
memcpy((u8 *)bss->bssid, (u8 *)sta->addr, ETH_ALEN);
bss->qos = sta->wme;
@@ -1508,6 +1644,231 @@ static void rsi_mac80211_rfkill_poll(struct ieee80211_hw *hw)
mutex_unlock(&common->mutex);
}
+static void rsi_resume_conn_channel(struct rsi_common *common)
+{
+ struct rsi_hw *adapter = common->priv;
+ struct ieee80211_vif *vif;
+ int cnt;
+
+ for (cnt = 0; cnt < RSI_MAX_VIFS; cnt++) {
+ vif = adapter->vifs[cnt];
+ if (!vif)
+ continue;
+
+ if ((vif->type == NL80211_IFTYPE_AP) ||
+ (vif->type == NL80211_IFTYPE_P2P_GO)) {
+ rsi_switch_channel(adapter, vif);
+ break;
+ }
+ if (((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_P2P_CLIENT)) &&
+ vif->bss_conf.assoc) {
+ rsi_switch_channel(adapter, vif);
+ break;
+ }
+ }
+}
+
+void rsi_roc_timeout(struct timer_list *t)
+{
+ struct rsi_common *common = from_timer(common, t, roc_timer);
+
+ rsi_dbg(INFO_ZONE, "Remain on channel expired\n");
+
+ mutex_lock(&common->mutex);
+ ieee80211_remain_on_channel_expired(common->priv->hw);
+
+ if (timer_pending(&common->roc_timer))
+ del_timer(&common->roc_timer);
+
+ rsi_resume_conn_channel(common);
+ mutex_unlock(&common->mutex);
+}
+
+static int rsi_mac80211_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan, int duration,
+ enum ieee80211_roc_type type)
+{
+ struct rsi_hw *adapter = (struct rsi_hw *)hw->priv;
+ struct rsi_common *common = (struct rsi_common *)adapter->priv;
+ int status = 0;
+
+ rsi_dbg(INFO_ZONE, "***** Remain on channel *****\n");
+
+ mutex_lock(&common->mutex);
+ rsi_dbg(INFO_ZONE, "%s: channel: %d duration: %dms\n",
+ __func__, chan->hw_value, duration);
+
+ if (timer_pending(&common->roc_timer)) {
+ rsi_dbg(INFO_ZONE, "Stop on-going ROC\n");
+ del_timer(&common->roc_timer);
+ }
+ common->roc_timer.expires = msecs_to_jiffies(duration) + jiffies;
+ add_timer(&common->roc_timer);
+
+ /* Configure band */
+ if (rsi_band_check(common, chan)) {
+ rsi_dbg(ERR_ZONE, "Failed to set band\n");
+ status = -EINVAL;
+ goto out;
+ }
+
+ /* Configure channel */
+ if (rsi_set_channel(common, chan)) {
+ rsi_dbg(ERR_ZONE, "Failed to set the channel\n");
+ status = -EINVAL;
+ goto out;
+ }
+
+ common->roc_vif = vif;
+ ieee80211_ready_on_channel(hw);
+ rsi_dbg(INFO_ZONE, "%s: Ready on channel :%d\n",
+ __func__, chan->hw_value);
+
+out:
+ mutex_unlock(&common->mutex);
+
+ return status;
+}
+
+static int rsi_mac80211_cancel_roc(struct ieee80211_hw *hw)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ rsi_dbg(INFO_ZONE, "Cancel remain on channel\n");
+
+ mutex_lock(&common->mutex);
+ if (!timer_pending(&common->roc_timer)) {
+ mutex_unlock(&common->mutex);
+ return 0;
+ }
+
+ del_timer(&common->roc_timer);
+
+ rsi_resume_conn_channel(common);
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rsi_wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE,
+};
+
+static u16 rsi_wow_map_triggers(struct rsi_common *common,
+ struct cfg80211_wowlan *wowlan)
+{
+ u16 wow_triggers = 0;
+
+ rsi_dbg(INFO_ZONE, "Mapping wowlan triggers\n");
+
+ if (wowlan->any)
+ wow_triggers |= RSI_WOW_ANY;
+ if (wowlan->magic_pkt)
+ wow_triggers |= RSI_WOW_MAGIC_PKT;
+ if (wowlan->disconnect)
+ wow_triggers |= RSI_WOW_DISCONNECT;
+ if (wowlan->gtk_rekey_failure || wowlan->eap_identity_req ||
+ wowlan->four_way_handshake)
+ wow_triggers |= RSI_WOW_GTK_REKEY;
+
+ return wow_triggers;
+}
+
+int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan)
+{
+ struct rsi_common *common = adapter->priv;
+ u16 triggers = 0;
+ u16 rx_filter_word = 0;
+ struct ieee80211_bss_conf *bss = &adapter->vifs[0]->bss_conf;
+
+ rsi_dbg(INFO_ZONE, "Config WoWLAN to device\n");
+
+ if (WARN_ON(!wowlan)) {
+ rsi_dbg(ERR_ZONE, "WoW triggers not enabled\n");
+ return -EINVAL;
+ }
+
+ triggers = rsi_wow_map_triggers(common, wowlan);
+ if (!triggers) {
+ rsi_dbg(ERR_ZONE, "%s:No valid WoW triggers\n", __func__);
+ return -EINVAL;
+ }
+ if (!bss->assoc) {
+ rsi_dbg(ERR_ZONE,
+ "Cannot configure WoWLAN (Station not connected)\n");
+ common->wow_flags |= RSI_WOW_NO_CONNECTION;
+ return 0;
+ }
+ rsi_dbg(INFO_ZONE, "TRIGGERS %x\n", triggers);
+ rsi_send_wowlan_request(common, triggers, 1);
+
+ /**
+ * Increase the beacon_miss threshold & keep-alive timers in
+ * vap_update frame
+ */
+ rsi_send_vap_dynamic_update(common);
+
+ rx_filter_word = (ALLOW_DATA_ASSOC_PEER | DISALLOW_BEACONS);
+ rsi_send_rx_filter_frame(common, rx_filter_word);
+ common->wow_flags |= RSI_WOW_ENABLED;
+
+ return 0;
+}
+EXPORT_SYMBOL(rsi_config_wowlan);
+
+static int rsi_mac80211_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ rsi_dbg(INFO_ZONE, "%s: mac80211 suspend\n", __func__);
+ mutex_lock(&common->mutex);
+ if (rsi_config_wowlan(adapter, wowlan)) {
+ rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+ mutex_unlock(&common->mutex);
+ return 1;
+ }
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+static int rsi_mac80211_resume(struct ieee80211_hw *hw)
+{
+ u16 rx_filter_word = 0;
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+
+ common->wow_flags = 0;
+
+ rsi_dbg(INFO_ZONE, "%s: mac80211 resume\n", __func__);
+
+ if (common->hibernate_resume)
+ return 0;
+
+ mutex_lock(&common->mutex);
+ rsi_send_wowlan_request(common, 0, 0);
+
+ rx_filter_word = (ALLOW_DATA_ASSOC_PEER | ALLOW_CTRL_ASSOC_PEER |
+ ALLOW_MGMT_ASSOC_PEER);
+ rsi_send_rx_filter_frame(common, rx_filter_word);
+ mutex_unlock(&common->mutex);
+
+ return 0;
+}
+
+#endif
+
static const struct ieee80211_ops mac80211_ops = {
.tx = rsi_mac80211_tx,
.start = rsi_mac80211_start,
@@ -1527,6 +1888,12 @@ static const struct ieee80211_ops mac80211_ops = {
.set_antenna = rsi_mac80211_set_antenna,
.get_antenna = rsi_mac80211_get_antenna,
.rfkill_poll = rsi_mac80211_rfkill_poll,
+ .remain_on_channel = rsi_mac80211_roc,
+ .cancel_remain_on_channel = rsi_mac80211_cancel_roc,
+#ifdef CONFIG_PM
+ .suspend = rsi_mac80211_suspend,
+ .resume = rsi_mac80211_resume,
+#endif
};
/**
@@ -1581,7 +1948,11 @@ int rsi_mac80211_attach(struct rsi_common *common)
ether_addr_copy(hw->wiphy->addr_mask, addr_mask);
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP);
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->retry_short = RETRY_SHORT;
wiphy->retry_long = RETRY_LONG;
@@ -1606,8 +1977,20 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
wiphy->reg_notifier = rsi_reg_notify;
+#ifdef CONFIG_PM
+ wiphy->wowlan = &rsi_wowlan_support;
+#endif
+
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ /* Wi-Fi direct parameters */
+ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX;
+ wiphy->max_remain_on_channel_duration = 10000;
+ hw->max_listen_interval = 10;
+ wiphy->iface_combinations = rsi_iface_combinations;
+ wiphy->n_iface_combinations = ARRAY_SIZE(rsi_iface_combinations);
+
status = ieee80211_register_hw(hw);
if (status)
return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 3e1e80888d98..0cb8e68bab58 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -20,6 +20,7 @@
#include <linux/firmware.h>
#include "rsi_mgmt.h"
#include "rsi_common.h"
+#include "rsi_hal.h"
u32 rsi_zone_enabled = /* INFO_ZONE |
INIT_ZONE |
@@ -56,6 +57,30 @@ void rsi_dbg(u32 zone, const char *fmt, ...)
}
EXPORT_SYMBOL_GPL(rsi_dbg);
+static char *opmode_str(int oper_mode)
+{
+ switch (oper_mode) {
+ case RSI_DEV_OPMODE_WIFI_ALONE:
+ return "Wi-Fi alone";
+ }
+
+ return "Unknown";
+}
+
+void rsi_print_version(struct rsi_common *common)
+{
+ rsi_dbg(ERR_ZONE, "================================================\n");
+ rsi_dbg(ERR_ZONE, "================ RSI Version Info ==============\n");
+ rsi_dbg(ERR_ZONE, "================================================\n");
+ rsi_dbg(ERR_ZONE, "FW Version\t: %d.%d.%d\n",
+ common->lmac_ver.major, common->lmac_ver.minor,
+ common->lmac_ver.release_num);
+ rsi_dbg(ERR_ZONE, "Operating mode\t: %d [%s]",
+ common->oper_mode, opmode_str(common->oper_mode));
+ rsi_dbg(ERR_ZONE, "Firmware file\t: %s", common->priv->fw_file_name);
+ rsi_dbg(ERR_ZONE, "================================================\n");
+}
+
/**
* rsi_prepare_skb() - This function prepares the skb.
* @common: Pointer to the driver private structure.
@@ -74,6 +99,8 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
struct skb_info *rx_params;
struct sk_buff *skb = NULL;
u8 payload_offset;
+ struct ieee80211_vif *vif;
+ struct ieee80211_hdr *wh;
if (WARN(!pkt_len, "%s: Dummy pkt received", __func__))
return NULL;
@@ -92,11 +119,13 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
payload_offset = (extended_desc + FRAME_DESC_SZ);
skb_put(skb, pkt_len);
memcpy((skb->data), (buffer + payload_offset), skb->len);
+ wh = (struct ieee80211_hdr *)skb->data;
+ vif = rsi_get_vif(common->priv, wh->addr1);
info = IEEE80211_SKB_CB(skb);
rx_params = (struct skb_info *)info->driver_data;
rx_params->rssi = rsi_get_rssi(buffer);
- rx_params->channel = rsi_get_connected_channel(common->priv);
+ rx_params->channel = rsi_get_connected_channel(vif);
return skb;
}
@@ -233,6 +262,8 @@ struct rsi_hw *rsi_91x_init(void)
rsi_default_ps_params(adapter);
spin_lock_init(&adapter->ps_lock);
+ timer_setup(&common->roc_timer, rsi_roc_timeout, 0);
+ init_completion(&common->wlan_init_completion);
common->init_done = true;
return adapter;
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index f7b550f900c4..46c9d5470dfb 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -460,12 +460,12 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common,
const unsigned char *bssid,
u8 qos_enable,
u16 aid,
- u16 sta_id)
+ u16 sta_id,
+ struct ieee80211_vif *vif)
{
- struct ieee80211_vif *vif = common->priv->vifs[0];
struct sk_buff *skb = NULL;
struct rsi_peer_notify *peer_notify;
- u16 vap_id = 0;
+ u16 vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id;
int status;
u16 frame_len = sizeof(struct rsi_peer_notify);
@@ -482,9 +482,9 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common,
memset(skb->data, 0, frame_len);
peer_notify = (struct rsi_peer_notify *)skb->data;
- if (opmode == STA_OPMODE)
+ if (opmode == RSI_OPMODE_STA)
peer_notify->command = cpu_to_le16(PEER_TYPE_AP << 1);
- else if (opmode == AP_OPMODE)
+ else if (opmode == RSI_OPMODE_AP)
peer_notify->command = cpu_to_le16(PEER_TYPE_STA << 1);
switch (notify_event) {
@@ -716,9 +716,9 @@ int rsi_hal_load_key(struct rsi_common *common,
u8 key_type,
u8 key_id,
u32 cipher,
- s16 sta_id)
+ s16 sta_id,
+ struct ieee80211_vif *vif)
{
- struct ieee80211_vif *vif = common->priv->vifs[0];
struct sk_buff *skb = NULL;
struct rsi_set_key *set_key;
u16 key_descriptor = 0;
@@ -926,13 +926,13 @@ static int rsi_send_reset_mac(struct rsi_common *common)
*
* Return: 0 on success, corresponding error code on failure.
*/
-int rsi_band_check(struct rsi_common *common)
+int rsi_band_check(struct rsi_common *common,
+ struct ieee80211_channel *curchan)
{
struct rsi_hw *adapter = common->priv;
struct ieee80211_hw *hw = adapter->hw;
u8 prev_bw = common->channel_width;
u8 prev_ep = common->endpoint;
- struct ieee80211_channel *curchan = hw->conf.chandef.chan;
int status = 0;
if (common->band != curchan->band) {
@@ -1094,9 +1094,18 @@ int rsi_send_vap_dynamic_update(struct rsi_common *common)
dynamic_frame->desc_dword0.frame_type = VAP_DYNAMIC_UPDATE;
dynamic_frame->desc_dword2.pkt_info =
cpu_to_le32(common->rts_threshold);
- /* Beacon miss threshold */
- dynamic_frame->frame_body.keep_alive_period =
+
+ if (common->wow_flags & RSI_WOW_ENABLED) {
+ /* Beacon miss threshold */
+ dynamic_frame->desc_dword3.token =
+ cpu_to_le16(RSI_BCN_MISS_THRESHOLD);
+ dynamic_frame->frame_body.keep_alive_period =
+ cpu_to_le16(RSI_WOW_KEEPALIVE);
+ } else {
+ dynamic_frame->frame_body.keep_alive_period =
cpu_to_le16(RSI_DEF_KEEPALIVE);
+ }
+
dynamic_frame->desc_dword3.sta_id = 0; /* vap id */
skb_put(skb, sizeof(struct rsi_dynamic_s));
@@ -1160,9 +1169,9 @@ static bool rsi_map_rates(u16 rate, int *offset)
*/
static int rsi_send_auto_rate_request(struct rsi_common *common,
struct ieee80211_sta *sta,
- u16 sta_id)
+ u16 sta_id,
+ struct ieee80211_vif *vif)
{
- struct ieee80211_vif *vif = common->priv->vifs[0];
struct sk_buff *skb;
struct rsi_auto_rate *auto_rate;
int ii = 0, jj = 0, kk = 0;
@@ -1318,33 +1327,35 @@ void rsi_inform_bss_status(struct rsi_common *common,
u8 qos_enable,
u16 aid,
struct ieee80211_sta *sta,
- u16 sta_id)
+ u16 sta_id,
+ struct ieee80211_vif *vif)
{
if (status) {
- if (opmode == STA_OPMODE)
+ if (opmode == RSI_OPMODE_STA)
common->hw_data_qs_blocked = true;
rsi_hal_send_sta_notify_frame(common,
opmode,
STA_CONNECTED,
addr,
qos_enable,
- aid, sta_id);
+ aid, sta_id,
+ vif);
if (common->min_rate == 0xffff)
- rsi_send_auto_rate_request(common, sta, sta_id);
- if (opmode == STA_OPMODE) {
+ rsi_send_auto_rate_request(common, sta, sta_id, vif);
+ if (opmode == RSI_OPMODE_STA) {
if (!rsi_send_block_unblock_frame(common, false))
common->hw_data_qs_blocked = false;
}
} else {
- if (opmode == STA_OPMODE)
+ if (opmode == RSI_OPMODE_STA)
common->hw_data_qs_blocked = true;
- rsi_hal_send_sta_notify_frame(common,
- opmode,
- STA_DISCONNECTED,
- addr,
- qos_enable,
- aid, sta_id);
- if (opmode == STA_OPMODE)
+
+ if (!(common->wow_flags & RSI_WOW_ENABLED))
+ rsi_hal_send_sta_notify_frame(common, opmode,
+ STA_DISCONNECTED, addr,
+ qos_enable, aid, sta_id,
+ vif);
+ if (opmode == RSI_OPMODE_STA)
rsi_send_block_unblock_frame(common, true);
}
}
@@ -1471,10 +1482,11 @@ int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word)
return rsi_send_internal_mgmt_frame(common, skb);
}
-int rsi_send_ps_request(struct rsi_hw *adapter, bool enable)
+int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
+ struct ieee80211_vif *vif)
{
struct rsi_common *common = adapter->priv;
- struct ieee80211_bss_conf *bss = &adapter->vifs[0]->bss_conf;
+ struct ieee80211_bss_conf *bss = &vif->bss_conf;
struct rsi_request_ps *ps;
struct rsi_ps_info *ps_info;
struct sk_buff *skb;
@@ -1585,6 +1597,42 @@ static int rsi_send_beacon(struct rsi_common *common)
return 0;
}
+#ifdef CONFIG_PM
+int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
+ u16 sleep_status)
+{
+ struct rsi_wowlan_req *cmd_frame;
+ struct sk_buff *skb;
+ u8 length;
+
+ rsi_dbg(ERR_ZONE, "%s: Sending wowlan request frame\n", __func__);
+
+ length = sizeof(*cmd_frame);
+ skb = dev_alloc_skb(length);
+ if (!skb)
+ return -ENOMEM;
+ memset(skb->data, 0, length);
+ cmd_frame = (struct rsi_wowlan_req *)skb->data;
+
+ rsi_set_len_qno(&cmd_frame->desc.desc_dword0.len_qno,
+ (length - FRAME_DESC_SZ),
+ RSI_WIFI_MGMT_Q);
+ cmd_frame->desc.desc_dword0.frame_type = WOWLAN_CONFIG_PARAMS;
+ cmd_frame->host_sleep_status = sleep_status;
+ if (common->secinfo.security_enable &&
+ common->secinfo.gtk_cipher)
+ flags |= RSI_WOW_GTK_REKEY;
+ if (sleep_status)
+ cmd_frame->wow_flags = flags;
+ rsi_dbg(INFO_ZONE, "Host_Sleep_Status : %d Flags : %d\n",
+ cmd_frame->host_sleep_status, cmd_frame->wow_flags);
+
+ skb_put(skb, length);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+#endif
+
/**
* rsi_handle_ta_confirm_type() - This function handles the confirm frames.
* @common: Pointer to the driver private structure.
@@ -1715,7 +1763,11 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
common->bb_rf_prog_count--;
if (!common->bb_rf_prog_count) {
common->fsm_state = FSM_MAC_INIT_DONE;
- return rsi_mac80211_attach(common);
+ if (common->reinit_hw) {
+ complete(&common->wlan_init_completion);
+ } else {
+ return rsi_mac80211_attach(common);
+ }
}
} else {
rsi_dbg(INFO_ZONE,
@@ -1793,6 +1845,7 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
case TA_CONFIRM_TYPE:
return rsi_handle_ta_confirm_type(common, msg);
case CARD_READY_IND:
+ common->hibernate_resume = false;
rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n",
__func__);
return rsi_handle_card_ready(common, msg);
diff --git a/drivers/net/wireless/rsi/rsi_91x_ps.c b/drivers/net/wireless/rsi/rsi_91x_ps.c
index 48c79f035c59..01472fac8b9a 100644
--- a/drivers/net/wireless/rsi/rsi_91x_ps.c
+++ b/drivers/net/wireless/rsi/rsi_91x_ps.c
@@ -36,7 +36,6 @@ char *str_psstate(enum ps_state state)
default:
return "INVALID_STATE";
}
- return "INVALID_STATE";
}
static inline void rsi_modify_ps_state(struct rsi_hw *adapter,
@@ -67,7 +66,7 @@ void rsi_default_ps_params(struct rsi_hw *adapter)
ps_info->deep_sleep_wakeup_period = RSI_DEF_DS_WAKEUP_PERIOD;
}
-void rsi_enable_ps(struct rsi_hw *adapter)
+void rsi_enable_ps(struct rsi_hw *adapter, struct ieee80211_vif *vif)
{
if (adapter->ps_state != PS_NONE) {
rsi_dbg(ERR_ZONE,
@@ -76,7 +75,7 @@ void rsi_enable_ps(struct rsi_hw *adapter)
return;
}
- if (rsi_send_ps_request(adapter, true)) {
+ if (rsi_send_ps_request(adapter, true, vif)) {
rsi_dbg(ERR_ZONE,
"%s: Failed to send PS request to device\n",
__func__);
@@ -86,7 +85,8 @@ void rsi_enable_ps(struct rsi_hw *adapter)
rsi_modify_ps_state(adapter, PS_ENABLE_REQ_SENT);
}
-void rsi_disable_ps(struct rsi_hw *adapter)
+/* This function is used to disable power save */
+void rsi_disable_ps(struct rsi_hw *adapter, struct ieee80211_vif *vif)
{
if (adapter->ps_state != PS_ENABLED) {
rsi_dbg(ERR_ZONE,
@@ -95,7 +95,7 @@ void rsi_disable_ps(struct rsi_hw *adapter)
return;
}
- if (rsi_send_ps_request(adapter, false)) {
+ if (rsi_send_ps_request(adapter, false, vif)) {
rsi_dbg(ERR_ZONE,
"%s: Failed to send PS request to device\n",
__func__);
@@ -105,16 +105,16 @@ void rsi_disable_ps(struct rsi_hw *adapter)
rsi_modify_ps_state(adapter, PS_DISABLE_REQ_SENT);
}
-void rsi_conf_uapsd(struct rsi_hw *adapter)
+void rsi_conf_uapsd(struct rsi_hw *adapter, struct ieee80211_vif *vif)
{
int ret;
if (adapter->ps_state != PS_ENABLED)
return;
- ret = rsi_send_ps_request(adapter, false);
+ ret = rsi_send_ps_request(adapter, false, vif);
if (!ret)
- ret = rsi_send_ps_request(adapter, true);
+ ret = rsi_send_ps_request(adapter, true, vif);
if (ret)
rsi_dbg(ERR_ZONE,
"%s: Failed to send PS request to device\n",
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 8d3a4839b6ef..b0cf41195051 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -871,6 +871,32 @@ fail:
return status;
}
+static int rsi_sdio_reinit_device(struct rsi_hw *adapter)
+{
+ struct rsi_91x_sdiodev *sdev = adapter->rsi_dev;
+ struct sdio_func *pfunction = sdev->pfunction;
+ int ii;
+
+ for (ii = 0; ii < NUM_SOFT_QUEUES; ii++)
+ skb_queue_purge(&adapter->priv->tx_queue[ii]);
+
+ /* Initialize device again */
+ sdio_claim_host(pfunction);
+
+ sdio_release_irq(pfunction);
+ rsi_reset_card(pfunction);
+
+ sdio_enable_func(pfunction);
+ rsi_setupcard(adapter);
+ rsi_init_sdio_slave_regs(adapter);
+ sdio_claim_irq(pfunction, rsi_handle_interrupt);
+ rsi_hal_device_init(adapter);
+
+ sdio_release_host(pfunction);
+
+ return 0;
+}
+
static struct rsi_host_intf_ops sdio_host_intf_ops = {
.write_pkt = rsi_sdio_host_intf_write_pkt,
.read_pkt = rsi_sdio_host_intf_read_pkt,
@@ -880,6 +906,7 @@ static struct rsi_host_intf_ops sdio_host_intf_ops = {
.master_reg_read = rsi_sdio_master_reg_read,
.master_reg_write = rsi_sdio_master_reg_write,
.load_data_master_write = rsi_sdio_load_data_master_write,
+ .reinit_device = rsi_sdio_reinit_device,
};
/**
@@ -936,6 +963,8 @@ static int rsi_probe(struct sdio_func *pfunction,
return -EIO;
}
+ adapter->priv->hibernate_resume = false;
+ adapter->priv->reinit_hw = false;
return 0;
fail:
rsi_91x_deinit(adapter);
@@ -1059,21 +1088,252 @@ static void rsi_disconnect(struct sdio_func *pfunction)
}
#ifdef CONFIG_PM
+static int rsi_set_sdio_pm_caps(struct rsi_hw *adapter)
+{
+ struct rsi_91x_sdiodev *dev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ struct sdio_func *func = dev->pfunction;
+ int ret;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (ret)
+ rsi_dbg(ERR_ZONE, "Set sdio keep pwr flag failed: %d\n", ret);
+
+ return ret;
+}
+
+static int rsi_sdio_disable_interrupts(struct sdio_func *pfunc)
+{
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunc);
+ u8 isr_status = 0, data = 0;
+ int ret;
+ unsigned long t1;
+
+ rsi_dbg(INFO_ZONE, "Waiting for interrupts to be cleared..");
+ t1 = jiffies;
+ do {
+ rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER,
+ &isr_status);
+ rsi_dbg(INFO_ZONE, ".");
+ } while ((isr_status) && (jiffies_to_msecs(jiffies - t1) < 20));
+ rsi_dbg(INFO_ZONE, "Interrupts cleared\n");
+
+ sdio_claim_host(pfunc);
+ ret = rsi_cmd52readbyte(pfunc->card, RSI_INT_ENABLE_REGISTER, &data);
+ if (ret < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read int enable register\n",
+ __func__);
+ goto done;
+ }
+
+ data &= RSI_INT_ENABLE_MASK;
+ ret = rsi_cmd52writebyte(pfunc->card, RSI_INT_ENABLE_REGISTER, data);
+ if (ret < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write to int enable register\n",
+ __func__);
+ goto done;
+ }
+ ret = rsi_cmd52readbyte(pfunc->card, RSI_INT_ENABLE_REGISTER, &data);
+ if (ret < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read int enable register\n",
+ __func__);
+ goto done;
+ }
+ rsi_dbg(INFO_ZONE, "int enable reg content = %x\n", data);
+
+done:
+ sdio_release_host(pfunc);
+ return ret;
+}
+
+static int rsi_sdio_enable_interrupts(struct sdio_func *pfunc)
+{
+ u8 data;
+ int ret;
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunc);
+ struct rsi_common *common = adapter->priv;
+
+ sdio_claim_host(pfunc);
+ ret = rsi_cmd52readbyte(pfunc->card, RSI_INT_ENABLE_REGISTER, &data);
+ if (ret < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read int enable register\n", __func__);
+ goto done;
+ }
+
+ data |= ~RSI_INT_ENABLE_MASK & 0xff;
+
+ ret = rsi_cmd52writebyte(pfunc->card, RSI_INT_ENABLE_REGISTER, data);
+ if (ret < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to write to int enable register\n",
+ __func__);
+ goto done;
+ }
+
+ if ((common->wow_flags & RSI_WOW_ENABLED) &&
+ (common->wow_flags & RSI_WOW_NO_CONNECTION))
+ rsi_dbg(ERR_ZONE,
+ "##### Device can not wake up through WLAN\n");
+
+ ret = rsi_cmd52readbyte(pfunc->card, RSI_INT_ENABLE_REGISTER, &data);
+ if (ret < 0) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to read int enable register\n", __func__);
+ goto done;
+ }
+ rsi_dbg(INFO_ZONE, "int enable reg content = %x\n", data);
+
+done:
+ sdio_release_host(pfunc);
+ return ret;
+}
+
static int rsi_suspend(struct device *dev)
{
- /* Not yet implemented */
- return -ENOSYS;
+ int ret;
+ struct sdio_func *pfunction = dev_to_sdio_func(dev);
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
+ struct rsi_common *common;
+
+ if (!adapter) {
+ rsi_dbg(ERR_ZONE, "Device is not ready\n");
+ return -ENODEV;
+ }
+ common = adapter->priv;
+ rsi_sdio_disable_interrupts(pfunction);
+
+ ret = rsi_set_sdio_pm_caps(adapter);
+ if (ret)
+ rsi_dbg(INFO_ZONE,
+ "Setting power management caps failed\n");
+ common->fsm_state = FSM_CARD_NOT_READY;
+
+ return 0;
}
static int rsi_resume(struct device *dev)
{
- /* Not yet implemented */
- return -ENOSYS;
+ struct sdio_func *pfunction = dev_to_sdio_func(dev);
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
+ struct rsi_common *common = adapter->priv;
+
+ common->fsm_state = FSM_MAC_INIT_DONE;
+ rsi_sdio_enable_interrupts(pfunction);
+
+ return 0;
+}
+
+static int rsi_freeze(struct device *dev)
+{
+ int ret;
+ struct sdio_func *pfunction = dev_to_sdio_func(dev);
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
+ struct rsi_common *common;
+ struct rsi_91x_sdiodev *sdev;
+
+ rsi_dbg(INFO_ZONE, "SDIO Bus freeze ===>\n");
+
+ if (!adapter) {
+ rsi_dbg(ERR_ZONE, "Device is not ready\n");
+ return -ENODEV;
+ }
+ common = adapter->priv;
+ sdev = (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+ if ((common->wow_flags & RSI_WOW_ENABLED) &&
+ (common->wow_flags & RSI_WOW_NO_CONNECTION))
+ rsi_dbg(ERR_ZONE,
+ "##### Device can not wake up through WLAN\n");
+
+ ret = rsi_sdio_disable_interrupts(pfunction);
+
+ if (sdev->write_fail)
+ rsi_dbg(INFO_ZONE, "###### Device is not ready #######\n");
+
+ ret = rsi_set_sdio_pm_caps(adapter);
+ if (ret)
+ rsi_dbg(INFO_ZONE, "Setting power management caps failed\n");
+
+ rsi_dbg(INFO_ZONE, "***** RSI module freezed *****\n");
+
+ return 0;
+}
+
+static int rsi_thaw(struct device *dev)
+{
+ struct sdio_func *pfunction = dev_to_sdio_func(dev);
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
+ struct rsi_common *common = adapter->priv;
+
+ rsi_dbg(ERR_ZONE, "SDIO Bus thaw =====>\n");
+
+ common->hibernate_resume = true;
+ common->fsm_state = FSM_CARD_NOT_READY;
+ common->iface_down = true;
+
+ rsi_sdio_enable_interrupts(pfunction);
+
+ rsi_dbg(INFO_ZONE, "***** RSI module thaw done *****\n");
+
+ return 0;
}
+static void rsi_shutdown(struct device *dev)
+{
+ struct sdio_func *pfunction = dev_to_sdio_func(dev);
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
+ struct rsi_91x_sdiodev *sdev =
+ (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ struct ieee80211_hw *hw = adapter->hw;
+ struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config;
+
+ rsi_dbg(ERR_ZONE, "SDIO Bus shutdown =====>\n");
+
+ if (rsi_config_wowlan(adapter, wowlan))
+ rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n");
+
+ rsi_sdio_disable_interrupts(sdev->pfunction);
+
+ if (sdev->write_fail)
+ rsi_dbg(INFO_ZONE, "###### Device is not ready #######\n");
+
+ if (rsi_set_sdio_pm_caps(adapter))
+ rsi_dbg(INFO_ZONE, "Setting power management caps failed\n");
+
+ rsi_dbg(INFO_ZONE, "***** RSI module shut down *****\n");
+}
+
+static int rsi_restore(struct device *dev)
+{
+ struct sdio_func *pfunction = dev_to_sdio_func(dev);
+ struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
+ struct rsi_common *common = adapter->priv;
+
+ rsi_dbg(INFO_ZONE, "SDIO Bus restore ======>\n");
+ common->hibernate_resume = true;
+ common->fsm_state = FSM_FW_NOT_LOADED;
+ common->iface_down = true;
+
+ adapter->sc_nvifs = 0;
+ ieee80211_restart_hw(adapter->hw);
+
+ common->wow_flags = 0;
+ common->iface_down = false;
+
+ rsi_dbg(INFO_ZONE, "RSI module restored\n");
+
+ return 0;
+}
static const struct dev_pm_ops rsi_pm_ops = {
.suspend = rsi_suspend,
.resume = rsi_resume,
+ .freeze = rsi_freeze,
+ .thaw = rsi_thaw,
+ .restore = rsi_restore,
};
#endif
@@ -1093,6 +1353,7 @@ static struct sdio_driver rsi_driver = {
#ifdef CONFIG_PM
.drv = {
.pm = &rsi_pm_ops,
+ .shutdown = rsi_shutdown,
}
#endif
};
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 81df09dd2636..8f8443833348 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -73,8 +73,7 @@ static int rsi_write_multiple(struct rsi_hw *adapter,
u8 *data,
u32 count)
{
- struct rsi_91x_usbdev *dev =
- (struct rsi_91x_usbdev *)adapter->rsi_dev;
+ struct rsi_91x_usbdev *dev;
if (!adapter)
return -ENODEV;
@@ -82,6 +81,7 @@ static int rsi_write_multiple(struct rsi_hw *adapter,
if (endpoint == 0)
return -EINVAL;
+ dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
if (dev->write_fail)
return -ENETDOWN;
@@ -162,13 +162,13 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
u8 *buf;
int status = -ENOMEM;
+ if (len > RSI_USB_CTRL_BUF_SIZE)
+ return -EINVAL;
+
buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
if (!buf)
return status;
- if (len > RSI_USB_CTRL_BUF_SIZE)
- return -EINVAL;
-
status = usb_control_msg(usbdev,
usb_rcvctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_READ,
@@ -207,13 +207,13 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
u8 *usb_reg_buf;
int status = -ENOMEM;
+ if (len > RSI_USB_CTRL_BUF_SIZE)
+ return -EINVAL;
+
usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL);
if (!usb_reg_buf)
return status;
- if (len > RSI_USB_CTRL_BUF_SIZE)
- return -EINVAL;
-
usb_reg_buf[0] = (value & 0x00ff);
usb_reg_buf[1] = (value & 0xff00) >> 8;
usb_reg_buf[2] = 0x0;
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
index e579d694d13c..d07dbba61727 100644
--- a/drivers/net/wireless/rsi/rsi_common.h
+++ b/drivers/net/wireless/rsi/rsi_common.h
@@ -79,9 +79,14 @@ static inline int rsi_kill_thread(struct rsi_thread *handle)
}
void rsi_mac80211_detach(struct rsi_hw *hw);
-u16 rsi_get_connected_channel(struct rsi_hw *adapter);
+u16 rsi_get_connected_channel(struct ieee80211_vif *vif);
struct rsi_hw *rsi_91x_init(void);
void rsi_91x_deinit(struct rsi_hw *adapter);
int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len);
+#ifdef CONFIG_PM
+int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan);
+#endif
struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr);
+struct ieee80211_vif *rsi_get_vif(struct rsi_hw *adapter, u8 *mac);
+void rsi_roc_timeout(struct timer_list *t);
#endif
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index 7c145053da6d..a09d36b6b765 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -101,6 +101,9 @@
#define BBP_INFO_40MHZ 0x6
+#define FW_FLASH_OFFSET 0x820
+#define LMAC_VER_OFFSET (FW_FLASH_OFFSET + 0x200)
+
struct bl_header {
__le32 flags;
__le32 image_no;
@@ -121,8 +124,7 @@ struct rsi_mgmt_desc {
u8 xtend_desc_size;
u8 header_len;
__le16 frame_info;
- u8 rate_info;
- u8 reserved1;
+ __le16 rate_info;
__le16 bbp_info;
__le16 seq_ctrl;
u8 reserved2;
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 2c18dde633ea..8cab630af4a5 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -60,12 +60,14 @@ enum RSI_FSM_STATES {
extern u32 rsi_zone_enabled;
extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
-#define RSI_MAX_VIFS 1
+#define RSI_MAX_VIFS 3
#define NUM_EDCA_QUEUES 4
#define IEEE80211_ADDR_LEN 6
#define FRAME_DESC_SZ 16
#define MIN_802_11_HDR_LEN 24
#define RSI_DEF_KEEPALIVE 90
+#define RSI_WOW_KEEPALIVE 5
+#define RSI_BCN_MISS_THRESHOLD 24
#define DATA_QUEUE_WATER_MARK 400
#define MIN_DATA_QUEUE_WATER_MARK 300
@@ -108,13 +110,22 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
((_q) == VI_Q) ? IEEE80211_AC_VI : \
IEEE80211_AC_VO)
+/* WoWLAN flags */
+#define RSI_WOW_ENABLED BIT(0)
+#define RSI_WOW_NO_CONNECTION BIT(1)
+
#define RSI_DEV_9113 1
struct version_info {
u16 major;
u16 minor;
- u16 release_num;
- u16 patch_num;
+ u8 release_num;
+ u8 patch_num;
+ union {
+ struct {
+ u8 fw_ver[8];
+ } info;
+ } ver;
} __packed;
struct skb_info {
@@ -124,6 +135,8 @@ struct skb_info {
s8 tid;
s8 sta_id;
u8 internal_hdr_size;
+ struct ieee80211_vif *vif;
+ u8 vap_id;
};
enum edca_queue {
@@ -157,6 +170,7 @@ struct vif_priv {
bool is_ht;
bool sgi;
u16 seq_start;
+ int vap_id;
};
struct rsi_event {
@@ -196,11 +210,11 @@ struct rsi_common {
struct vif_priv vif_info[RSI_MAX_VIFS];
bool mgmt_q_block;
- struct version_info driver_ver;
- struct version_info fw_ver;
+ struct version_info lmac_ver;
struct rsi_thread tx_thread;
struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 2];
+ struct completion wlan_init_completion;
/* Mutex declaration */
struct mutex mutex;
/* Mutex used for tx thread */
@@ -259,7 +273,9 @@ struct rsi_common {
u8 obm_ant_sel_val;
int tx_power;
u8 ant_in_use;
-
+ bool hibernate_resume;
+ bool reinit_hw;
+ u8 wow_flags;
u16 beacon_interval;
u8 dtim_cnt;
@@ -270,6 +286,11 @@ struct rsi_common {
int num_stations;
int max_stations;
struct ieee80211_key_conf *key;
+
+ /* Wi-Fi direct mode related */
+ bool p2p_enabled;
+ struct timer_list roc_timer;
+ struct ieee80211_vif *roc_vif;
};
enum host_intf {
@@ -326,6 +347,8 @@ struct rsi_hw {
int (*determine_event_timeout)(struct rsi_hw *adapter);
};
+void rsi_print_version(struct rsi_common *common);
+
struct rsi_host_intf_ops {
int (*read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
int (*write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
@@ -342,5 +365,6 @@ struct rsi_host_intf_ops {
int (*load_data_master_write)(struct rsi_hw *adapter, u32 addr,
u32 instructions_size, u16 block_size,
u8 *fw);
+ int (*reinit_device)(struct rsi_hw *adapter);
};
#endif
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index c6e1fa669a27..389094a3f91c 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -45,6 +45,17 @@
#define MAGIC_WORD 0x5A
#define WLAN_EEPROM_RFTYPE_ADDR 424
+/*WOWLAN RESUME WAKEUP TYPES*/
+#define RSI_UNICAST_MAGIC_PKT BIT(0)
+#define RSI_BROADCAST_MAGICPKT BIT(1)
+#define RSI_EAPOL_PKT BIT(2)
+#define RSI_DISCONNECT_PKT BIT(3)
+#define RSI_HW_BMISS_PKT BIT(4)
+#define RSI_INSERT_SEQ_IN_FW BIT(2)
+
+#define WOW_MAX_FILTERS_PER_LIST 16
+#define WOW_PATTERN_SIZE 256
+
/* Receive Frame Types */
#define TA_CONFIRM_TYPE 0x01
#define RX_DOT11_MGMT 0x02
@@ -189,6 +200,8 @@
IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+#define RSI_DESC_VAP_ID_MASK 0xC000u
+#define RSI_DESC_VAP_ID_OFST 14
#define RSI_DATA_DESC_MAC_BBP_INFO BIT(0)
#define RSI_DATA_DESC_NO_ACK_IND BIT(9)
#define RSI_DATA_DESC_QOS_EN BIT(12)
@@ -199,9 +212,19 @@
#define RSI_DATA_DESC_INSERT_TSF BIT(15)
#define RSI_DATA_DESC_INSERT_SEQ_NO BIT(2)
+#ifdef CONFIG_PM
+#define RSI_WOW_ANY BIT(1)
+#define RSI_WOW_GTK_REKEY BIT(3)
+#define RSI_WOW_MAGIC_PKT BIT(4)
+#define RSI_WOW_DISCONNECT BIT(5)
+#endif
+
enum opmode {
- AP_OPMODE = 0,
- STA_OPMODE,
+ RSI_OPMODE_UNSUPPORTED = -1,
+ RSI_OPMODE_AP = 0,
+ RSI_OPMODE_STA,
+ RSI_OPMODE_P2P_GO,
+ RSI_OPMODE_P2P_CLIENT
};
enum vap_status {
@@ -257,7 +280,9 @@ enum cmd_frame_type {
ANT_SEL_FRAME = 0x20,
VAP_DYNAMIC_UPDATE = 0x27,
COMMON_DEV_CONFIG = 0x28,
- RADIO_PARAMS_UPDATE = 0x29
+ RADIO_PARAMS_UPDATE = 0x29,
+ WOWLAN_CONFIG_PARAMS = 0x2B,
+ WOWLAN_WAKEUP_REASON = 0xc5
};
struct rsi_mac_frame {
@@ -363,9 +388,9 @@ struct rsi_vap_caps {
u8 vif_type;
u8 channel_bw;
__le16 antenna_info;
+ __le16 token;
u8 radioid_macid;
u8 vap_id;
- __le16 reserved3;
u8 mac_addr[6];
__le16 keep_alive_period;
u8 bssid[6];
@@ -576,6 +601,13 @@ struct rsi_request_ps {
__le16 ps_num_dtim_intervals;
} __packed;
+struct rsi_wowlan_req {
+ struct rsi_cmd_desc desc;
+ u8 sourceid[ETH_ALEN];
+ u16 wow_flags;
+ u16 host_sleep_status;
+} __packed;
+
static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
{
return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
@@ -613,14 +645,16 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid,
u16 ssn, u8 buf_size, u8 event,
u8 sta_id);
int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len,
- u8 key_type, u8 key_id, u32 cipher, s16 sta_id);
+ u8 key_type, u8 key_id, u32 cipher, s16 sta_id,
+ struct ieee80211_vif *vif);
int rsi_set_channel(struct rsi_common *common,
struct ieee80211_channel *channel);
int rsi_send_vap_dynamic_update(struct rsi_common *common);
int rsi_send_block_unblock_frame(struct rsi_common *common, bool event);
void rsi_inform_bss_status(struct rsi_common *common, enum opmode opmode,
u8 status, const u8 *addr, u8 qos_enable, u16 aid,
- struct ieee80211_sta *sta, u16 sta_id);
+ struct ieee80211_sta *sta, u16 sta_id,
+ struct ieee80211_vif *vif);
void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb);
int rsi_mac80211_attach(struct rsi_common *common);
void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb,
@@ -630,8 +664,14 @@ void rsi_core_qos_processor(struct rsi_common *common);
void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb);
int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb);
int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb);
-int rsi_band_check(struct rsi_common *common);
+int rsi_band_check(struct rsi_common *common, struct ieee80211_channel *chan);
int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word);
int rsi_send_radio_params_update(struct rsi_common *common);
int rsi_set_antenna(struct rsi_common *common, u8 antenna);
+#ifdef CONFIG_PM
+int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
+ u16 sleep_status);
+#endif
+int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
+ struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/rsi/rsi_ps.h b/drivers/net/wireless/rsi/rsi_ps.h
index d8475873df36..98ff6a4ced57 100644
--- a/drivers/net/wireless/rsi/rsi_ps.h
+++ b/drivers/net/wireless/rsi/rsi_ps.h
@@ -55,10 +55,9 @@ struct rsi_ps_info {
} __packed;
char *str_psstate(enum ps_state state);
-void rsi_enable_ps(struct rsi_hw *adapter);
-void rsi_disable_ps(struct rsi_hw *adapter);
+void rsi_enable_ps(struct rsi_hw *adapter, struct ieee80211_vif *vif);
+void rsi_disable_ps(struct rsi_hw *adapter, struct ieee80211_vif *vif);
int rsi_handle_ps_confirm(struct rsi_hw *adapter, u8 *msg);
void rsi_default_ps_params(struct rsi_hw *hw);
-int rsi_send_ps_request(struct rsi_hw *adapter, bool enable);
-void rsi_conf_uapsd(struct rsi_hw *adapter);
+void rsi_conf_uapsd(struct rsi_hw *adapter, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 95e4bed57baf..49c549ba6682 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -48,6 +48,8 @@ enum sdio_interrupt_type {
#define RSI_DEVICE_BUFFER_STATUS_REGISTER 0xf3
#define RSI_FN1_INT_REGISTER 0xf9
+#define RSI_INT_ENABLE_REGISTER 0x04
+#define RSI_INT_ENABLE_MASK 0xfc
#define RSI_SD_REQUEST_MASTER 0x10000
/* FOR SD CARD ONLY */
diff --git a/drivers/net/wireless/st/cw1200/Makefile b/drivers/net/wireless/st/cw1200/Makefile
index b086aac6547a..386a484e0707 100644
--- a/drivers/net/wireless/st/cw1200/Makefile
+++ b/drivers/net/wireless/st/cw1200/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
cw1200_core-y := \
fwio.o \
txrx.o \
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index dc478cedbde0..a186d1df1f29 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -373,8 +373,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work);
INIT_WORK(&priv->set_beacon_wakeup_period_work,
cw1200_set_beacon_wakeup_period_work);
- setup_timer(&priv->mcast_timeout, cw1200_mcast_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->mcast_timeout, cw1200_mcast_timeout, 0);
if (cw1200_queue_stats_init(&priv->tx_queue_stats,
CW1200_LINK_ID_MAX,
diff --git a/drivers/net/wireless/st/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c
index d2202ae92bdd..ded23df1ac1d 100644
--- a/drivers/net/wireless/st/cw1200/pm.c
+++ b/drivers/net/wireless/st/cw1200/pm.c
@@ -91,7 +91,7 @@ struct cw1200_suspend_state {
u8 prev_ps_mode;
};
-static void cw1200_pm_stay_awake_tmo(unsigned long arg)
+static void cw1200_pm_stay_awake_tmo(struct timer_list *unused)
{
/* XXX what's the point of this ? */
}
@@ -101,8 +101,7 @@ int cw1200_pm_init(struct cw1200_pm_state *pm,
{
spin_lock_init(&pm->lock);
- setup_timer(&pm->stay_awake, cw1200_pm_stay_awake_tmo,
- (unsigned long)pm);
+ timer_setup(&pm->stay_awake, cw1200_pm_stay_awake_tmo, 0);
return 0;
}
diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c
index 0ba5ef9b3e7b..5153d2cfd991 100644
--- a/drivers/net/wireless/st/cw1200/queue.c
+++ b/drivers/net/wireless/st/cw1200/queue.c
@@ -130,11 +130,11 @@ static void __cw1200_queue_gc(struct cw1200_queue *queue,
}
}
-static void cw1200_queue_gc(unsigned long arg)
+static void cw1200_queue_gc(struct timer_list *t)
{
LIST_HEAD(list);
struct cw1200_queue *queue =
- (struct cw1200_queue *)arg;
+ from_timer(queue, t, gc);
spin_lock_bh(&queue->lock);
__cw1200_queue_gc(queue, &list, true);
@@ -179,7 +179,7 @@ int cw1200_queue_init(struct cw1200_queue *queue,
INIT_LIST_HEAD(&queue->pending);
INIT_LIST_HEAD(&queue->free_pool);
spin_lock_init(&queue->lock);
- setup_timer(&queue->gc, cw1200_queue_gc, (unsigned long)queue);
+ timer_setup(&queue->gc, cw1200_queue_gc, 0);
queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
GFP_KERNEL);
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index a52224836a2b..03687a80d6e9 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -2112,10 +2112,9 @@ void cw1200_multicast_stop_work(struct work_struct *work)
}
}
-void cw1200_mcast_timeout(unsigned long arg)
+void cw1200_mcast_timeout(struct timer_list *t)
{
- struct cw1200_common *priv =
- (struct cw1200_common *)arg;
+ struct cw1200_common *priv = from_timer(priv, t, mcast_timeout);
wiphy_warn(priv->hw->wiphy,
"Multicast delivery timeout.\n");
diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h
index a0bacaa39b31..719de34dcbfe 100644
--- a/drivers/net/wireless/st/cw1200/sta.h
+++ b/drivers/net/wireless/st/cw1200/sta.h
@@ -117,6 +117,6 @@ void cw1200_set_tim_work(struct work_struct *work);
void cw1200_set_cts_work(struct work_struct *work);
void cw1200_multicast_start_work(struct work_struct *work);
void cw1200_multicast_stop_work(struct work_struct *work);
-void cw1200_mcast_timeout(unsigned long arg);
+void cw1200_mcast_timeout(struct timer_list *t);
#endif
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
index af14231aeede..0530dd744275 100644
--- a/drivers/net/wireless/ti/Makefile
+++ b/drivers/net/wireless/ti/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_WLCORE) += wlcore/
obj-$(CONFIG_WL12XX) += wl12xx/
obj-$(CONFIG_WL1251) += wl1251/
diff --git a/drivers/net/wireless/ti/wl1251/Makefile b/drivers/net/wireless/ti/wl1251/Makefile
index 58b4f935a3f6..38da9f8e5f82 100644
--- a/drivers/net/wireless/ti/wl1251/Makefile
+++ b/drivers/net/wireless/ti/wl1251/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \
acx.o boot.o init.o debugfs.o io.o
wl1251_spi-objs += spi.o
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index d6fbdda2cba3..f78fc3880423 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "acx.h"
#include <linux/module.h>
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index ede31f048ef9..9547aea01b0f 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "cmd.h"
#include <linux/module.h>
diff --git a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
index 04ed51495772..7fabe702c4cc 100644
--- a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __WL12XX_80211_H__
#define __WL12XX_80211_H__
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
index e286713b3c18..7d418c57bcb5 100644
--- a/drivers/net/wireless/ti/wlcore/Makefile
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
boot.o init.o debugfs.o scan.o sysfs.o vendor_cmd.o
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index c346c021b999..d47921a84509 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -196,9 +196,9 @@ out:
mutex_unlock(&wl->mutex);
}
-static void wl1271_rx_streaming_timer(unsigned long data)
+static void wl1271_rx_streaming_timer(struct timer_list *t)
{
- struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+ struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
struct wl1271 *wl = wlvif->wl;
ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
@@ -2279,8 +2279,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlcore_pending_auth_complete_work);
INIT_LIST_HEAD(&wlvif->list);
- setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
- (unsigned long) wlvif);
+ timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
return 0;
}
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
index 22b0bc98d7b5..181be725eff8 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __WL12XX_80211_H__
#define __WL12XX_80211_H__
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 3fbfd19818f1..efdce9ae36ea 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __WL3501_H__
#define __WL3501_H__
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 581e8577a221..253403899fe9 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -230,8 +230,7 @@ static void zd1201_usbrx(struct urb *urb)
/* Info frame */
if (type == ZD1201_PACKET_INQUIRE) {
int i = 0;
- unsigned short infotype, framelen, copylen;
- framelen = le16_to_cpu(*(__le16*)&data[4]);
+ unsigned short infotype, copylen;
infotype = le16_to_cpu(*(__le16*)&data[6]);
if (infotype == ZD1201_INF_LINKSTATUS) {
diff --git a/drivers/net/wireless/zydas/zd1211rw/Makefile b/drivers/net/wireless/zydas/zd1211rw/Makefile
index 5728a918e508..1647a449ce1a 100644
--- a/drivers/net/wireless/zydas/zd1211rw/Makefile
+++ b/drivers/net/wireless/zydas/zd1211rw/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ZD1211RW) += zd1211rw.o
zd1211rw-objs := zd_chip.o zd_mac.o \
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5b1d2e8402d9..a46a1e94505d 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -307,7 +307,7 @@ static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
return to_xenbus_device(vif->dev->dev.parent);
}
-void xenvif_tx_credit_callback(unsigned long data);
+void xenvif_tx_credit_callback(struct timer_list *t);
struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 4491ca5aee90..d6dff347f896 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -520,8 +520,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
queue->credit_bytes = queue->remaining_credit = ~0UL;
queue->credit_usec = 0UL;
- init_timer(&queue->credit_timeout);
- queue->credit_timeout.function = xenvif_tx_credit_callback;
+ timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
queue->credit_window_start = get_jiffies_64();
queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 5042ff8d449a..a27daa23c9dc 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -183,9 +183,9 @@ static void tx_add_credit(struct xenvif_queue *queue)
queue->rate_limited = false;
}
-void xenvif_tx_credit_callback(unsigned long data)
+void xenvif_tx_credit_callback(struct timer_list *t)
{
- struct xenvif_queue *queue = (struct xenvif_queue *)data;
+ struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
tx_add_credit(queue);
xenvif_napi_schedule_or_enable_events(queue);
}
@@ -700,8 +700,6 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
/* Still too big to send right now? Set a callback. */
if (size > queue->remaining_credit) {
- queue->credit_timeout.data =
- (unsigned long)queue;
mod_timer(&queue->credit_timeout,
next_credit);
queue->credit_window_start = next_credit;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8b8689c6d887..18c85e55e76a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -228,9 +228,9 @@ static bool xennet_can_sg(struct net_device *dev)
}
-static void rx_refill_timeout(unsigned long data)
+static void rx_refill_timeout(struct timer_list *t)
{
- struct netfront_queue *queue = (struct netfront_queue *)data;
+ struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
napi_schedule(&queue->napi);
}
@@ -1605,8 +1605,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
- setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
- (unsigned long)queue);
+ timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
queue->info->netdev->name, queue->id);
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 640b7274371c..5393ba59b17d 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for nfc devices
#
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index ec50027b0d8b..d5784a47fc13 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -726,7 +726,7 @@ static struct nci_driver_ops fdp_prop_ops[] = {
},
};
-struct nci_ops nci_ops = {
+static struct nci_ops nci_ops = {
.open = fdp_nci_open,
.close = fdp_nci_close,
.send = fdp_nci_send,
diff --git a/drivers/nfc/mei_phy.h b/drivers/nfc/mei_phy.h
index acd3a1fc69e6..51bd44f5f3b8 100644
--- a/drivers/nfc/mei_phy.h
+++ b/drivers/nfc/mei_phy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LOCAL_MEI_PHY_H_
#define __LOCAL_MEI_PHY_H_
diff --git a/drivers/nfc/microread/Makefile b/drivers/nfc/microread/Makefile
index 755c24cba253..2f7dda265f8f 100644
--- a/drivers/nfc/microread/Makefile
+++ b/drivers/nfc/microread/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Microread HCI based NFC driver
#
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index b668b7b9a61e..1806d20a5e29 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -294,7 +294,7 @@ static int microread_i2c_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id microread_i2c_id[] = {
+static const struct i2c_device_id microread_i2c_id[] = {
{ MICROREAD_I2C_DRIVER_NAME, 0},
{ }
};
diff --git a/drivers/nfc/nfcmrvl/Makefile b/drivers/nfc/nfcmrvl/Makefile
index fa07c7806492..e74de0cb3bb0 100644
--- a/drivers/nfc/nfcmrvl/Makefile
+++ b/drivers/nfc/nfcmrvl/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for NFCMRVL NCI based NFC driver
#
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index 7f8960a46aab..52c8ae504e32 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -130,9 +130,9 @@ static void fw_dnld_over(struct nfcmrvl_private *priv, u32 error)
nfc_fw_download_done(priv->ndev->nfc_dev, priv->fw_dnld.name, error);
}
-static void fw_dnld_timeout(unsigned long arg)
+static void fw_dnld_timeout(struct timer_list *t)
{
- struct nfcmrvl_private *priv = (struct nfcmrvl_private *) arg;
+ struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer);
nfc_err(priv->dev, "FW loading timeout");
priv->fw_dnld.state = STATE_RESET;
@@ -538,8 +538,7 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name)
}
/* Configure a timer for timeout */
- setup_timer(&priv->fw_dnld.timer, fw_dnld_timeout,
- (unsigned long) priv);
+ timer_setup(&priv->fw_dnld.timer, fw_dnld_timeout, 0);
mod_timer(&priv->fw_dnld.timer,
jiffies + msecs_to_jiffies(FW_DNLD_TIMEOUT));
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index ffec103702f1..0f22379887ca 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -266,7 +266,7 @@ static const struct of_device_id of_nfcmrvl_i2c_match[] = {
};
MODULE_DEVICE_TABLE(of, of_nfcmrvl_i2c_match);
-static struct i2c_device_id nfcmrvl_i2c_id_table[] = {
+static const struct i2c_device_id nfcmrvl_i2c_id_table[] = {
{ "nfcmrvl_i2c", 0 },
{}
};
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 198585bbc771..ba695e392c3b 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -393,7 +393,7 @@ static int nxp_nci_i2c_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id nxp_nci_i2c_id_table[] = {
+static const struct i2c_device_id nxp_nci_i2c_id_table[] = {
{"nxp-nci_i2c", 0},
{}
};
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 8f60ce039b0d..4389eb4c8d0b 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -264,7 +264,7 @@ static const struct of_device_id of_pn533_i2c_match[] = {
};
MODULE_DEVICE_TABLE(of, of_pn533_i2c_match);
-static struct i2c_device_id pn533_i2c_id_table[] = {
+static const struct i2c_device_id pn533_i2c_id_table[] = {
{ PN533_I2C_DRIVER_NAME, 0 },
{}
};
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index c05cb637ba92..a0cc1cc45292 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1232,9 +1232,9 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
return 0;
}
-static void pn533_listen_mode_timer(unsigned long data)
+static void pn533_listen_mode_timer(struct timer_list *t)
{
- struct pn533 *dev = (struct pn533 *)data;
+ struct pn533 *dev = from_timer(dev, t, listen_timer);
dev_dbg(dev->dev, "Listen mode timeout\n");
@@ -2632,9 +2632,7 @@ struct pn533 *pn533_register_device(u32 device_type,
if (priv->wq == NULL)
goto error;
- init_timer(&priv->listen_timer);
- priv->listen_timer.data = (unsigned long) priv;
- priv->listen_timer.function = pn533_listen_mode_timer;
+ timer_setup(&priv->listen_timer, pn533_listen_mode_timer, 0);
skb_queue_head_init(&priv->resp_q);
skb_queue_head_init(&priv->fragment_skb);
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 4b14740edb67..d0207f8e68b7 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -54,7 +54,7 @@
#define PN544_HCI_I2C_LLC_MAX_SIZE (PN544_HCI_I2C_LLC_LEN_CRC + 1 + \
PN544_HCI_I2C_LLC_MAX_PAYLOAD)
-static struct i2c_device_id pn544_hci_i2c_id_table[] = {
+static const struct i2c_device_id pn544_hci_i2c_id_table[] = {
{"pn544", 0},
{}
};
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 38548bd970cd..b7828fb252f2 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -336,7 +336,7 @@ static int s3fwrn5_fw_get_base_addr(
struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo, u32 *base_addr)
{
int i;
- struct {
+ static const struct {
u8 version[4];
u32 base_addr;
} match[] = {
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index 3f09d7fd2285..4da409e77a72 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -276,7 +276,7 @@ static int s3fwrn5_i2c_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id s3fwrn5_i2c_id_table[] = {
+static const struct i2c_device_id s3fwrn5_i2c_id_table[] = {
{S3FWRN5_I2C_DRIVER_NAME, 0},
{}
};
diff --git a/drivers/nfc/st-nci/Kconfig b/drivers/nfc/st-nci/Kconfig
index dc9b777d78f6..5c6e21ccb19c 100644
--- a/drivers/nfc/st-nci/Kconfig
+++ b/drivers/nfc/st-nci/Kconfig
@@ -11,7 +11,7 @@ config NFC_ST_NCI_I2C
select NFC_ST_NCI
---help---
This module adds support for an I2C interface to the
- STMicroelectronics NFC NCI chips familly.
+ STMicroelectronics NFC NCI chips family.
Select this if your platform is using the i2c bus.
If you choose to build a module, it'll be called st-nci_i2c.
@@ -23,7 +23,7 @@ config NFC_ST_NCI_SPI
select NFC_ST_NCI
---help---
This module adds support for an SPI interface to the
- STMicroelectronics NFC NCI chips familly.
+ STMicroelectronics NFC NCI chips family.
Select this if your platform is using the spi bus.
If you choose to build a module, it'll be called st-nci_spi.
diff --git a/drivers/nfc/st-nci/Makefile b/drivers/nfc/st-nci/Makefile
index 439b2fa8654a..e0310743fed1 100644
--- a/drivers/nfc/st-nci/Makefile
+++ b/drivers/nfc/st-nci/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for ST_NCI NCI based NFC driver
#
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 515f08d037fb..f9525ef87d57 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -279,7 +279,7 @@ static int st_nci_i2c_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id st_nci_i2c_id_table[] = {
+static const struct i2c_device_id st_nci_i2c_id_table[] = {
{ST_NCI_DRIVER_NAME, 0},
{}
};
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 9477994cf975..f26d938d240f 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -246,18 +246,18 @@ void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb)
}
EXPORT_SYMBOL(ndlc_recv);
-static void ndlc_t1_timeout(unsigned long data)
+static void ndlc_t1_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+ struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
pr_debug("\n");
schedule_work(&ndlc->sm_work);
}
-static void ndlc_t2_timeout(unsigned long data)
+static void ndlc_t2_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+ struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
pr_debug("\n");
@@ -282,13 +282,8 @@ int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
*ndlc_id = ndlc;
/* initialize timers */
- init_timer(&ndlc->t1_timer);
- ndlc->t1_timer.data = (unsigned long)ndlc;
- ndlc->t1_timer.function = ndlc_t1_timeout;
-
- init_timer(&ndlc->t2_timer);
- ndlc->t2_timer.data = (unsigned long)ndlc;
- ndlc->t2_timer.function = ndlc_t2_timeout;
+ timer_setup(&ndlc->t1_timer, ndlc_t1_timeout, 0);
+ timer_setup(&ndlc->t2_timer, ndlc_t2_timeout, 0);
skb_queue_head_init(&ndlc->rcv_q);
skb_queue_head_init(&ndlc->send_q);
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 56f2112e0cd8..f55d082ace71 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -677,7 +677,7 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
}
EXPORT_SYMBOL(st_nci_se_io);
-static void st_nci_se_wt_timeout(unsigned long data)
+static void st_nci_se_wt_timeout(struct timer_list *t)
{
/*
* No answer from the secure element
@@ -690,7 +690,7 @@ static void st_nci_se_wt_timeout(unsigned long data)
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st_nci_info *info = (struct st_nci_info *) data;
+ struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
pr_debug("\n");
@@ -708,9 +708,10 @@ static void st_nci_se_wt_timeout(unsigned long data)
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
-static void st_nci_se_activation_timeout(unsigned long data)
+static void st_nci_se_activation_timeout(struct timer_list *t)
{
- struct st_nci_info *info = (struct st_nci_info *) data;
+ struct st_nci_info *info = from_timer(info, t,
+ se_info.se_active_timer);
pr_debug("\n");
@@ -725,15 +726,11 @@ int st_nci_se_init(struct nci_dev *ndev, struct st_nci_se_status *se_status)
init_completion(&info->se_info.req_completion);
/* initialize timers */
- init_timer(&info->se_info.bwi_timer);
- info->se_info.bwi_timer.data = (unsigned long)info;
- info->se_info.bwi_timer.function = st_nci_se_wt_timeout;
+ timer_setup(&info->se_info.bwi_timer, st_nci_se_wt_timeout, 0);
info->se_info.bwi_active = false;
- init_timer(&info->se_info.se_active_timer);
- info->se_info.se_active_timer.data = (unsigned long)info;
- info->se_info.se_active_timer.function =
- st_nci_se_activation_timeout;
+ timer_setup(&info->se_info.se_active_timer,
+ st_nci_se_activation_timeout, 0);
info->se_info.se_active = false;
info->se_info.xch_error = false;
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index cd1f7bfa75eb..1b347096422f 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -589,7 +589,7 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
+static const struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
{ST21NFCA_HCI_DRIVER_NAME, 0},
{}
};
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 3a98563d4a12..4bed9e842db3 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -252,7 +252,7 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
}
EXPORT_SYMBOL(st21nfca_hci_se_io);
-static void st21nfca_se_wt_timeout(unsigned long data)
+static void st21nfca_se_wt_timeout(struct timer_list *t)
{
/*
* No answer from the secure element
@@ -265,7 +265,8 @@ static void st21nfca_se_wt_timeout(unsigned long data)
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+ struct st21nfca_hci_info *info = from_timer(info, t,
+ se_info.bwi_timer);
pr_debug("\n");
@@ -283,9 +284,10 @@ static void st21nfca_se_wt_timeout(unsigned long data)
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
-static void st21nfca_se_activation_timeout(unsigned long data)
+static void st21nfca_se_activation_timeout(struct timer_list *t)
{
- struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+ struct st21nfca_hci_info *info = from_timer(info, t,
+ se_info.se_active_timer);
pr_debug("\n");
@@ -392,14 +394,11 @@ void st21nfca_se_init(struct nfc_hci_dev *hdev)
init_completion(&info->se_info.req_completion);
/* initialize timers */
- init_timer(&info->se_info.bwi_timer);
- info->se_info.bwi_timer.data = (unsigned long)info;
- info->se_info.bwi_timer.function = st21nfca_se_wt_timeout;
+ timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
info->se_info.bwi_active = false;
- init_timer(&info->se_info.se_active_timer);
- info->se_info.se_active_timer.data = (unsigned long)info;
- info->se_info.se_active_timer.function = st21nfca_se_activation_timeout;
+ timer_setup(&info->se_info.se_active_timer,
+ st21nfca_se_activation_timeout, 0);
info->se_info.se_active = false;
info->se_info.count_pipes = 0;
diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig
index a89243c9fdd3..e51b581fd102 100644
--- a/drivers/ntb/hw/Kconfig
+++ b/drivers/ntb/hw/Kconfig
@@ -1,3 +1,4 @@
source "drivers/ntb/hw/amd/Kconfig"
source "drivers/ntb/hw/idt/Kconfig"
source "drivers/ntb/hw/intel/Kconfig"
+source "drivers/ntb/hw/mscc/Kconfig"
diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile
index 87332c3905f0..923c442db750 100644
--- a/drivers/ntb/hw/Makefile
+++ b/drivers/ntb/hw/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_NTB_AMD) += amd/
obj-$(CONFIG_NTB_IDT) += idt/
obj-$(CONFIG_NTB_INTEL) += intel/
+obj-$(CONFIG_NTB_SWITCHTEC) += mscc/
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index d44d7ef38fe8..0cd79f367f7c 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2628,35 +2628,35 @@ static void idt_pci_remove(struct pci_dev *pdev)
/*
* IDT PCIe-switch models ports configuration structures
*/
-static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
+static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = {
.name = "89HPES24NT6AG2",
.port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12}
};
-static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = {
.name = "89HPES32NT8AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
-static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = {
.name = "89HPES32NT8BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
-static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
+static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = {
.name = "89HPES12NT12G2",
.port_cnt = 3, .ports = {0, 8, 16}
};
-static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
+static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = {
.name = "89HPES16NT16G2",
.port_cnt = 4, .ports = {0, 8, 12, 16}
};
-static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
+static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = {
.name = "89HPES24NT24G2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
-static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = {
.name = "89HPES32NT24AG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
-static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
+static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = {
.name = "89HPES32NT24BG2",
.port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20}
};
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 2557e2c05b90..4de074a86073 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -1742,89 +1742,18 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev,
{
struct pci_dev *pdev;
void __iomem *mmio;
- resource_size_t bar_size;
phys_addr_t bar_addr;
- int b2b_bar;
- u8 bar_sz;
pdev = ndev->ntb.pdev;
mmio = ndev->self_mmio;
- if (ndev->b2b_idx == UINT_MAX) {
- dev_dbg(&pdev->dev, "not using b2b mw\n");
- b2b_bar = 0;
- ndev->b2b_off = 0;
- } else {
- b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
- if (b2b_bar < 0)
- return -EIO;
-
- dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
-
- bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
-
- dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
-
- if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) {
- dev_dbg(&pdev->dev, "b2b using first half of bar\n");
- ndev->b2b_off = bar_size >> 1;
- } else if (bar_size >= XEON_B2B_MIN_SIZE) {
- dev_dbg(&pdev->dev, "b2b using whole bar\n");
- ndev->b2b_off = 0;
- --ndev->mw_count;
- } else {
- dev_dbg(&pdev->dev, "b2b bar size is too small\n");
- return -EIO;
- }
- }
-
- /*
- * Reset the secondary bar sizes to match the primary bar sizes,
- * except disable or halve the size of the b2b secondary bar.
- */
- pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz);
- dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz);
- if (b2b_bar == 1) {
- if (ndev->b2b_off)
- bar_sz -= 1;
- else
- bar_sz = 0;
- }
-
- pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz);
- pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz);
- dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz);
-
- pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz);
- dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz);
- if (b2b_bar == 2) {
- if (ndev->b2b_off)
- bar_sz -= 1;
- else
- bar_sz = 0;
- }
-
- pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz);
- pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz);
- dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz);
-
- /* SBAR01 hit by first part of the b2b bar */
- if (b2b_bar == 0)
- bar_addr = addr->bar0_addr;
- else if (b2b_bar == 1)
- bar_addr = addr->bar2_addr64;
- else if (b2b_bar == 2)
- bar_addr = addr->bar4_addr64;
- else
- return -EIO;
-
/* setup incoming bar limits == base addrs (zero length windows) */
- bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0);
+ bar_addr = addr->bar2_addr64;
iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr);
- bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
+ bar_addr = addr->bar4_addr64;
iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET);
bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET);
dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr);
diff --git a/drivers/ntb/hw/mscc/Kconfig b/drivers/ntb/hw/mscc/Kconfig
new file mode 100644
index 000000000000..013ed6716438
--- /dev/null
+++ b/drivers/ntb/hw/mscc/Kconfig
@@ -0,0 +1,9 @@
+config NTB_SWITCHTEC
+ tristate "MicroSemi Switchtec Non-Transparent Bridge Support"
+ select PCI_SW_SWITCHTEC
+ help
+ Enables NTB support for Switchtec PCI switches. This also
+ selects the Switchtec management driver as they share the same
+ hardware interface.
+
+ If unsure, say N.
diff --git a/drivers/ntb/hw/mscc/Makefile b/drivers/ntb/hw/mscc/Makefile
new file mode 100644
index 000000000000..064686ead1ba
--- /dev/null
+++ b/drivers/ntb/hw/mscc/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NTB_SWITCHTEC) += ntb_hw_switchtec.o
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
new file mode 100644
index 000000000000..afe8ed6f3b23
--- /dev/null
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -0,0 +1,1216 @@
+/*
+ * Microsemi Switchtec(tm) PCIe Management Driver
+ * Copyright (c) 2017, Microsemi Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/switchtec.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/ntb.h>
+
+MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Microsemi Corporation");
+
+static ulong max_mw_size = SZ_2M;
+module_param(max_mw_size, ulong, 0644);
+MODULE_PARM_DESC(max_mw_size,
+ "Max memory window size reported to the upper layer");
+
+static bool use_lut_mws;
+module_param(use_lut_mws, bool, 0644);
+MODULE_PARM_DESC(use_lut_mws,
+ "Enable the use of the LUT based memory windows");
+
+#ifndef ioread64
+#ifdef readq
+#define ioread64 readq
+#else
+#define ioread64 _ioread64
+static inline u64 _ioread64(void __iomem *mmio)
+{
+ u64 low, high;
+
+ low = ioread32(mmio);
+ high = ioread32(mmio + sizeof(u32));
+ return low | (high << 32);
+}
+#endif
+#endif
+
+#ifndef iowrite64
+#ifdef writeq
+#define iowrite64 writeq
+#else
+#define iowrite64 _iowrite64
+static inline void _iowrite64(u64 val, void __iomem *mmio)
+{
+ iowrite32(val, mmio);
+ iowrite32(val >> 32, mmio + sizeof(u32));
+}
+#endif
+#endif
+
+#define SWITCHTEC_NTB_MAGIC 0x45CC0001
+#define MAX_MWS 128
+
+struct shared_mw {
+ u32 magic;
+ u32 link_sta;
+ u32 partition_id;
+ u64 mw_sizes[MAX_MWS];
+ u32 spad[128];
+};
+
+#define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
+#define LUT_SIZE SZ_64K
+
+struct switchtec_ntb {
+ struct ntb_dev ntb;
+ struct switchtec_dev *stdev;
+
+ int self_partition;
+ int peer_partition;
+
+ int doorbell_irq;
+ int message_irq;
+
+ struct ntb_info_regs __iomem *mmio_ntb;
+ struct ntb_ctrl_regs __iomem *mmio_ctrl;
+ struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
+ struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
+ struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
+ struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
+
+ struct shared_mw *self_shared;
+ struct shared_mw __iomem *peer_shared;
+ dma_addr_t self_shared_dma;
+
+ u64 db_mask;
+ u64 db_valid_mask;
+ int db_shift;
+ int db_peer_shift;
+
+ /* synchronize rmw access of db_mask and hw reg */
+ spinlock_t db_mask_lock;
+
+ int nr_direct_mw;
+ int nr_lut_mw;
+ int direct_mw_to_bar[MAX_DIRECT_MW];
+
+ int peer_nr_direct_mw;
+ int peer_nr_lut_mw;
+ int peer_direct_mw_to_bar[MAX_DIRECT_MW];
+
+ bool link_is_up;
+ enum ntb_speed link_speed;
+ enum ntb_width link_width;
+};
+
+static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
+{
+ return container_of(ntb, struct switchtec_ntb, ntb);
+}
+
+static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
+ struct ntb_ctrl_regs __iomem *ctl,
+ u32 op, int wait_status)
+{
+ static const char * const op_text[] = {
+ [NTB_CTRL_PART_OP_LOCK] = "lock",
+ [NTB_CTRL_PART_OP_CFG] = "configure",
+ [NTB_CTRL_PART_OP_RESET] = "reset",
+ };
+
+ int i;
+ u32 ps;
+ int status;
+
+ switch (op) {
+ case NTB_CTRL_PART_OP_LOCK:
+ status = NTB_CTRL_PART_STATUS_LOCKING;
+ break;
+ case NTB_CTRL_PART_OP_CFG:
+ status = NTB_CTRL_PART_STATUS_CONFIGURING;
+ break;
+ case NTB_CTRL_PART_OP_RESET:
+ status = NTB_CTRL_PART_STATUS_RESETTING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iowrite32(op, &ctl->partition_op);
+
+ for (i = 0; i < 1000; i++) {
+ if (msleep_interruptible(50) != 0) {
+ iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
+ return -EINTR;
+ }
+
+ ps = ioread32(&ctl->partition_status) & 0xFFFF;
+
+ if (ps != status)
+ break;
+ }
+
+ if (ps == wait_status)
+ return 0;
+
+ if (ps == status) {
+ dev_err(&sndev->stdev->dev,
+ "Timed out while peforming %s (%d). (%08x)",
+ op_text[op], op,
+ ioread32(&ctl->partition_status));
+
+ return -ETIMEDOUT;
+ }
+
+ return -EIO;
+}
+
+static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
+ u32 val)
+{
+ if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
+ return -EINVAL;
+
+ iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
+
+ return 0;
+}
+
+static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ int nr_direct_mw = sndev->peer_nr_direct_mw;
+ int nr_lut_mw = sndev->peer_nr_lut_mw - 1;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (!use_lut_mws)
+ nr_lut_mw = 0;
+
+ return nr_direct_mw + nr_lut_mw;
+}
+
+static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
+{
+ return mw_idx - sndev->nr_direct_mw + 1;
+}
+
+static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
+{
+ return mw_idx - sndev->peer_nr_direct_mw + 1;
+}
+
+static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
+ int widx, resource_size_t *addr_align,
+ resource_size_t *size_align,
+ resource_size_t *size_max)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ int lut;
+ resource_size_t size;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ lut = widx >= sndev->peer_nr_direct_mw;
+ size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
+
+ if (size == 0)
+ return -EINVAL;
+
+ if (addr_align)
+ *addr_align = lut ? size : SZ_4K;
+
+ if (size_align)
+ *size_align = lut ? size : SZ_4K;
+
+ if (size_max)
+ *size_max = size;
+
+ return 0;
+}
+
+static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
+{
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+ int bar = sndev->peer_direct_mw_to_bar[idx];
+ u32 ctl_val;
+
+ ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
+ ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
+ iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
+ iowrite32(0, &ctl->bar_entry[bar].win_size);
+ iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
+}
+
+static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
+{
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+
+ iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
+}
+
+static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ int xlate_pos = ilog2(size);
+ int bar = sndev->peer_direct_mw_to_bar[idx];
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+ u32 ctl_val;
+
+ ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
+ ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
+
+ iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
+ iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
+ iowrite64(sndev->self_partition | addr,
+ &ctl->bar_entry[bar].xlate_addr);
+}
+
+static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+
+ iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
+ &ctl->lut_entry[peer_lut_index(sndev, idx)]);
+}
+
+static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
+ dma_addr_t addr, resource_size_t size)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+ int xlate_pos = ilog2(size);
+ int nr_direct_mw = sndev->peer_nr_direct_mw;
+ int rc;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap",
+ widx, pidx, &addr, &size);
+
+ if (widx >= switchtec_ntb_mw_count(ntb, pidx))
+ return -EINVAL;
+
+ if (xlate_pos < 12)
+ return -EINVAL;
+
+ rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
+ NTB_CTRL_PART_STATUS_LOCKED);
+ if (rc)
+ return rc;
+
+ if (addr == 0 || size == 0) {
+ if (widx < nr_direct_mw)
+ switchtec_ntb_mw_clr_direct(sndev, widx);
+ else
+ switchtec_ntb_mw_clr_lut(sndev, widx);
+ } else {
+ if (widx < nr_direct_mw)
+ switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
+ else
+ switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
+ }
+
+ rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
+ NTB_CTRL_PART_STATUS_NORMAL);
+
+ if (rc == -EIO) {
+ dev_err(&sndev->stdev->dev,
+ "Hardware reported an error configuring mw %d: %08x",
+ widx, ioread32(&ctl->bar_error));
+
+ if (widx < nr_direct_mw)
+ switchtec_ntb_mw_clr_direct(sndev, widx);
+ else
+ switchtec_ntb_mw_clr_lut(sndev, widx);
+
+ switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
+ NTB_CTRL_PART_STATUS_NORMAL);
+ }
+
+ return rc;
+}
+
+static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0);
+}
+
+static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
+ int idx, phys_addr_t *base,
+ resource_size_t *size)
+{
+ int bar = sndev->direct_mw_to_bar[idx];
+ size_t offset = 0;
+
+ if (bar < 0)
+ return -EINVAL;
+
+ if (idx == 0) {
+ /*
+ * This is the direct BAR shared with the LUTs
+ * which means the actual window will be offset
+ * by the size of all the LUT entries.
+ */
+
+ offset = LUT_SIZE * sndev->nr_lut_mw;
+ }
+
+ if (base)
+ *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
+
+ if (size) {
+ *size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
+ if (offset && *size > offset)
+ *size = offset;
+
+ if (*size > max_mw_size)
+ *size = max_mw_size;
+ }
+
+ return 0;
+}
+
+static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
+ int idx, phys_addr_t *base,
+ resource_size_t *size)
+{
+ int bar = sndev->direct_mw_to_bar[0];
+ int offset;
+
+ offset = LUT_SIZE * lut_index(sndev, idx);
+
+ if (base)
+ *base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
+
+ if (size)
+ *size = LUT_SIZE;
+
+ return 0;
+}
+
+static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
+ phys_addr_t *base,
+ resource_size_t *size)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (idx < sndev->nr_direct_mw)
+ return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
+ else if (idx < switchtec_ntb_peer_mw_count(ntb))
+ return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
+ else
+ return -EINVAL;
+}
+
+static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
+ int partition,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct switchtec_dev *stdev = sndev->stdev;
+
+ u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
+ u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
+
+ if (speed)
+ *speed = (linksta >> 16) & 0xF;
+
+ if (width)
+ *width = (linksta >> 20) & 0x3F;
+}
+
+static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
+{
+ enum ntb_speed self_speed, peer_speed;
+ enum ntb_width self_width, peer_width;
+
+ if (!sndev->link_is_up) {
+ sndev->link_speed = NTB_SPEED_NONE;
+ sndev->link_width = NTB_WIDTH_NONE;
+ return;
+ }
+
+ switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
+ &self_speed, &self_width);
+ switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
+ &peer_speed, &peer_width);
+
+ sndev->link_speed = min(self_speed, peer_speed);
+ sndev->link_width = min(self_width, peer_width);
+}
+
+enum {
+ LINK_MESSAGE = 0,
+ MSG_LINK_UP = 1,
+ MSG_LINK_DOWN = 2,
+ MSG_CHECK_LINK = 3,
+};
+
+static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
+{
+ int link_sta;
+ int old = sndev->link_is_up;
+
+ link_sta = sndev->self_shared->link_sta;
+ if (link_sta) {
+ u64 peer = ioread64(&sndev->peer_shared->magic);
+
+ if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
+ link_sta = peer >> 32;
+ else
+ link_sta = 0;
+ }
+
+ sndev->link_is_up = link_sta;
+ switchtec_ntb_set_link_speed(sndev);
+
+ if (link_sta != old) {
+ switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
+ ntb_link_event(&sndev->ntb);
+ dev_info(&sndev->stdev->dev, "ntb link %s",
+ link_sta ? "up" : "down");
+ }
+}
+
+static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
+{
+ struct switchtec_ntb *sndev = stdev->sndev;
+
+ switchtec_ntb_check_link(sndev);
+}
+
+static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
+ enum ntb_speed *speed,
+ enum ntb_width *width)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (speed)
+ *speed = sndev->link_speed;
+ if (width)
+ *width = sndev->link_width;
+
+ return sndev->link_is_up;
+}
+
+static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
+ enum ntb_speed max_speed,
+ enum ntb_width max_width)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ dev_dbg(&sndev->stdev->dev, "enabling link");
+
+ sndev->self_shared->link_sta = 1;
+ switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
+
+ switchtec_ntb_check_link(sndev);
+
+ return 0;
+}
+
+static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ dev_dbg(&sndev->stdev->dev, "disabling link");
+
+ sndev->self_shared->link_sta = 0;
+ switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
+
+ switchtec_ntb_check_link(sndev);
+
+ return 0;
+}
+
+static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ return sndev->db_valid_mask;
+}
+
+static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
+{
+ return 1;
+}
+
+static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (db_vector < 0 || db_vector > 1)
+ return 0;
+
+ return sndev->db_valid_mask;
+}
+
+static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
+{
+ u64 ret;
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
+
+ return ret & sndev->db_valid_mask;
+}
+
+static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
+
+ return 0;
+}
+
+static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ unsigned long irqflags;
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (db_bits & ~sndev->db_valid_mask)
+ return -EINVAL;
+
+ spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
+
+ sndev->db_mask |= db_bits << sndev->db_shift;
+ iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
+
+ spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
+
+ return 0;
+}
+
+static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
+{
+ unsigned long irqflags;
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (db_bits & ~sndev->db_valid_mask)
+ return -EINVAL;
+
+ spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
+
+ sndev->db_mask &= ~(db_bits << sndev->db_shift);
+ iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
+
+ spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
+
+ return 0;
+}
+
+static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
+}
+
+static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
+ phys_addr_t *db_addr,
+ resource_size_t *db_size)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ unsigned long offset;
+
+ offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
+ (unsigned long)sndev->stdev->mmio;
+
+ offset += sndev->db_shift / 8;
+
+ if (db_addr)
+ *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
+ if (db_size)
+ *db_size = sizeof(u32);
+
+ return 0;
+}
+
+static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ iowrite64(db_bits << sndev->db_peer_shift,
+ &sndev->mmio_self_dbmsg->odb);
+
+ return 0;
+}
+
+static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ return ARRAY_SIZE(sndev->self_shared->spad);
+}
+
+static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
+ return 0;
+
+ if (!sndev->self_shared)
+ return 0;
+
+ return sndev->self_shared->spad[idx];
+}
+
+static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
+ return -EINVAL;
+
+ if (!sndev->self_shared)
+ return -EIO;
+
+ sndev->self_shared->spad[idx] = val;
+
+ return 0;
+}
+
+static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
+ int sidx)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
+ return 0;
+
+ if (!sndev->peer_shared)
+ return 0;
+
+ return ioread32(&sndev->peer_shared->spad[sidx]);
+}
+
+static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
+ int sidx, u32 val)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
+ return -EINVAL;
+
+ if (!sndev->peer_shared)
+ return -EIO;
+
+ iowrite32(val, &sndev->peer_shared->spad[sidx]);
+
+ return 0;
+}
+
+static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
+ int sidx, phys_addr_t *spad_addr)
+{
+ struct switchtec_ntb *sndev = ntb_sndev(ntb);
+ unsigned long offset;
+
+ if (pidx != NTB_DEF_PEER_IDX)
+ return -EINVAL;
+
+ offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
+ (unsigned long)sndev->stdev->mmio;
+
+ if (spad_addr)
+ *spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
+
+ return 0;
+}
+
+static const struct ntb_dev_ops switchtec_ntb_ops = {
+ .mw_count = switchtec_ntb_mw_count,
+ .mw_get_align = switchtec_ntb_mw_get_align,
+ .mw_set_trans = switchtec_ntb_mw_set_trans,
+ .peer_mw_count = switchtec_ntb_peer_mw_count,
+ .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr,
+ .link_is_up = switchtec_ntb_link_is_up,
+ .link_enable = switchtec_ntb_link_enable,
+ .link_disable = switchtec_ntb_link_disable,
+ .db_valid_mask = switchtec_ntb_db_valid_mask,
+ .db_vector_count = switchtec_ntb_db_vector_count,
+ .db_vector_mask = switchtec_ntb_db_vector_mask,
+ .db_read = switchtec_ntb_db_read,
+ .db_clear = switchtec_ntb_db_clear,
+ .db_set_mask = switchtec_ntb_db_set_mask,
+ .db_clear_mask = switchtec_ntb_db_clear_mask,
+ .db_read_mask = switchtec_ntb_db_read_mask,
+ .peer_db_addr = switchtec_ntb_peer_db_addr,
+ .peer_db_set = switchtec_ntb_peer_db_set,
+ .spad_count = switchtec_ntb_spad_count,
+ .spad_read = switchtec_ntb_spad_read,
+ .spad_write = switchtec_ntb_spad_write,
+ .peer_spad_read = switchtec_ntb_peer_spad_read,
+ .peer_spad_write = switchtec_ntb_peer_spad_write,
+ .peer_spad_addr = switchtec_ntb_peer_spad_addr,
+};
+
+static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
+{
+ u64 part_map;
+
+ sndev->ntb.pdev = sndev->stdev->pdev;
+ sndev->ntb.topo = NTB_TOPO_SWITCH;
+ sndev->ntb.ops = &switchtec_ntb_ops;
+
+ sndev->self_partition = sndev->stdev->partition;
+
+ sndev->mmio_ntb = sndev->stdev->mmio_ntb;
+ part_map = ioread64(&sndev->mmio_ntb->ep_map);
+ part_map &= ~(1 << sndev->self_partition);
+ sndev->peer_partition = ffs(part_map) - 1;
+
+ dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)",
+ sndev->self_partition, sndev->stdev->partition_count,
+ part_map);
+
+ sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
+ SWITCHTEC_NTB_REG_CTRL_OFFSET;
+ sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
+ SWITCHTEC_NTB_REG_DBMSG_OFFSET;
+
+ sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
+ sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
+ sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
+}
+
+static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
+{
+ int i;
+ int cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
+ u32 r = ioread32(&ctrl->bar_entry[i].ctl);
+
+ if (r & NTB_CTRL_BAR_VALID)
+ map[cnt++] = i;
+ }
+
+ return cnt;
+}
+
+static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
+{
+ sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
+ sndev->mmio_self_ctrl);
+
+ sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
+ sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
+
+ dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut",
+ sndev->nr_direct_mw, sndev->nr_lut_mw);
+
+ sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
+ sndev->mmio_peer_ctrl);
+
+ sndev->peer_nr_lut_mw =
+ ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
+ sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
+
+ dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut",
+ sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
+
+}
+
+/*
+ * There are 64 doorbells in the switch hardware but this is
+ * shared among all partitions. So we must split them in half
+ * (32 for each partition). However, the message interrupts are
+ * also shared with the top 4 doorbells so we just limit this to
+ * 28 doorbells per partition
+ */
+static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
+{
+ sndev->db_valid_mask = 0x0FFFFFFF;
+
+ if (sndev->self_partition < sndev->peer_partition) {
+ sndev->db_shift = 0;
+ sndev->db_peer_shift = 32;
+ } else {
+ sndev->db_shift = 32;
+ sndev->db_peer_shift = 0;
+ }
+
+ sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
+ iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
+ iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
+ &sndev->mmio_self_dbmsg->odb_mask);
+}
+
+static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
+{
+ int i;
+ u32 msg_map = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
+ int m = i | sndev->peer_partition << 2;
+
+ msg_map |= m << i * 8;
+ }
+
+ iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
+
+ for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
+ iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
+ &sndev->mmio_self_dbmsg->imsg[i]);
+}
+
+static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
+{
+ int rc = 0;
+ u16 req_id;
+ u32 error;
+
+ req_id = ioread16(&sndev->mmio_ntb->requester_id);
+
+ if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
+ dev_err(&sndev->stdev->dev,
+ "Not enough requester IDs available.");
+ return -EFAULT;
+ }
+
+ rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
+ NTB_CTRL_PART_OP_LOCK,
+ NTB_CTRL_PART_STATUS_LOCKED);
+ if (rc)
+ return rc;
+
+ iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
+ &sndev->mmio_self_ctrl->partition_ctrl);
+
+ /*
+ * Root Complex Requester ID (which is 0:00.0)
+ */
+ iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
+ &sndev->mmio_self_ctrl->req_id_table[0]);
+
+ /*
+ * Host Bridge Requester ID (as read from the mmap address)
+ */
+ iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
+ &sndev->mmio_self_ctrl->req_id_table[1]);
+
+ rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
+ NTB_CTRL_PART_OP_CFG,
+ NTB_CTRL_PART_STATUS_NORMAL);
+ if (rc == -EIO) {
+ error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
+ dev_err(&sndev->stdev->dev,
+ "Error setting up the requester ID table: %08x",
+ error);
+ }
+
+ return rc;
+}
+
+static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
+{
+ int i;
+
+ memset(sndev->self_shared, 0, LUT_SIZE);
+ sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
+ sndev->self_shared->partition_id = sndev->stdev->partition;
+
+ for (i = 0; i < sndev->nr_direct_mw; i++) {
+ int bar = sndev->direct_mw_to_bar[i];
+ resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
+
+ if (i == 0)
+ sz = min_t(resource_size_t, sz,
+ LUT_SIZE * sndev->nr_lut_mw);
+
+ sndev->self_shared->mw_sizes[i] = sz;
+ }
+
+ for (i = 0; i < sndev->nr_lut_mw; i++) {
+ int idx = sndev->nr_direct_mw + i;
+
+ sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
+ }
+}
+
+static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
+{
+ struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
+ int bar = sndev->direct_mw_to_bar[0];
+ u32 ctl_val;
+ int rc;
+
+ sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
+ LUT_SIZE,
+ &sndev->self_shared_dma,
+ GFP_KERNEL);
+ if (!sndev->self_shared) {
+ dev_err(&sndev->stdev->dev,
+ "unable to allocate memory for shared mw");
+ return -ENOMEM;
+ }
+
+ switchtec_ntb_init_shared(sndev);
+
+ rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
+ NTB_CTRL_PART_STATUS_LOCKED);
+ if (rc)
+ goto unalloc_and_exit;
+
+ ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
+ ctl_val &= 0xFF;
+ ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
+ ctl_val |= ilog2(LUT_SIZE) << 8;
+ ctl_val |= (sndev->nr_lut_mw - 1) << 14;
+ iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
+
+ iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) |
+ sndev->self_shared_dma),
+ &ctl->lut_entry[0]);
+
+ rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
+ NTB_CTRL_PART_STATUS_NORMAL);
+ if (rc) {
+ u32 bar_error, lut_error;
+
+ bar_error = ioread32(&ctl->bar_error);
+ lut_error = ioread32(&ctl->lut_error);
+ dev_err(&sndev->stdev->dev,
+ "Error setting up shared MW: %08x / %08x",
+ bar_error, lut_error);
+ goto unalloc_and_exit;
+ }
+
+ sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE);
+ if (!sndev->peer_shared) {
+ rc = -ENOMEM;
+ goto unalloc_and_exit;
+ }
+
+ dev_dbg(&sndev->stdev->dev, "Shared MW Ready");
+ return 0;
+
+unalloc_and_exit:
+ dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
+ sndev->self_shared, sndev->self_shared_dma);
+
+ return rc;
+}
+
+static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
+{
+ if (sndev->peer_shared)
+ pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
+
+ if (sndev->self_shared)
+ dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
+ sndev->self_shared,
+ sndev->self_shared_dma);
+}
+
+static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
+{
+ struct switchtec_ntb *sndev = dev;
+
+ dev_dbg(&sndev->stdev->dev, "doorbell\n");
+
+ ntb_db_event(&sndev->ntb, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
+{
+ int i;
+ struct switchtec_ntb *sndev = dev;
+
+ for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
+ u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
+
+ if (msg & NTB_DBMSG_IMSG_STATUS) {
+ dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i,
+ (u32)msg);
+ iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
+
+ if (i == LINK_MESSAGE)
+ switchtec_ntb_check_link(sndev);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
+{
+ int i;
+ int rc;
+ int doorbell_irq = 0;
+ int message_irq = 0;
+ int event_irq;
+ int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
+
+ event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
+
+ while (doorbell_irq == event_irq)
+ doorbell_irq++;
+ while (message_irq == doorbell_irq ||
+ message_irq == event_irq)
+ message_irq++;
+
+ dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d",
+ event_irq, doorbell_irq, message_irq);
+
+ for (i = 0; i < idb_vecs - 4; i++)
+ iowrite8(doorbell_irq,
+ &sndev->mmio_self_dbmsg->idb_vec_map[i]);
+
+ for (; i < idb_vecs; i++)
+ iowrite8(message_irq,
+ &sndev->mmio_self_dbmsg->idb_vec_map[i]);
+
+ sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
+ sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
+
+ rc = request_irq(sndev->doorbell_irq,
+ switchtec_ntb_doorbell_isr, 0,
+ "switchtec_ntb_doorbell", sndev);
+ if (rc)
+ return rc;
+
+ rc = request_irq(sndev->message_irq,
+ switchtec_ntb_message_isr, 0,
+ "switchtec_ntb_message", sndev);
+ if (rc) {
+ free_irq(sndev->doorbell_irq, sndev);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
+{
+ free_irq(sndev->doorbell_irq, sndev);
+ free_irq(sndev->message_irq, sndev);
+}
+
+static int switchtec_ntb_add(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ struct switchtec_ntb *sndev;
+ int rc;
+
+ stdev->sndev = NULL;
+
+ if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
+ return -ENODEV;
+
+ if (stdev->partition_count != 2)
+ dev_warn(dev, "ntb driver only supports 2 partitions");
+
+ sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
+ if (!sndev)
+ return -ENOMEM;
+
+ sndev->stdev = stdev;
+ switchtec_ntb_init_sndev(sndev);
+ switchtec_ntb_init_mw(sndev);
+ switchtec_ntb_init_db(sndev);
+ switchtec_ntb_init_msgs(sndev);
+
+ rc = switchtec_ntb_init_req_id_table(sndev);
+ if (rc)
+ goto free_and_exit;
+
+ rc = switchtec_ntb_init_shared_mw(sndev);
+ if (rc)
+ goto free_and_exit;
+
+ rc = switchtec_ntb_init_db_msg_irq(sndev);
+ if (rc)
+ goto deinit_shared_and_exit;
+
+ rc = ntb_register_device(&sndev->ntb);
+ if (rc)
+ goto deinit_and_exit;
+
+ stdev->sndev = sndev;
+ stdev->link_notifier = switchtec_ntb_link_notification;
+ dev_info(dev, "NTB device registered");
+
+ return 0;
+
+deinit_and_exit:
+ switchtec_ntb_deinit_db_msg_irq(sndev);
+deinit_shared_and_exit:
+ switchtec_ntb_deinit_shared_mw(sndev);
+free_and_exit:
+ kfree(sndev);
+ dev_err(dev, "failed to register ntb device: %d", rc);
+ return rc;
+}
+
+void switchtec_ntb_remove(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct switchtec_dev *stdev = to_stdev(dev);
+ struct switchtec_ntb *sndev = stdev->sndev;
+
+ if (!sndev)
+ return;
+
+ stdev->link_notifier = NULL;
+ stdev->sndev = NULL;
+ ntb_unregister_device(&sndev->ntb);
+ switchtec_ntb_deinit_db_msg_irq(sndev);
+ switchtec_ntb_deinit_shared_mw(sndev);
+ kfree(sndev);
+ dev_info(dev, "ntb device unregistered");
+}
+
+static struct class_interface switchtec_interface = {
+ .add_dev = switchtec_ntb_add,
+ .remove_dev = switchtec_ntb_remove,
+};
+
+static int __init switchtec_ntb_init(void)
+{
+ switchtec_interface.class = switchtec_class;
+ return class_interface_register(&switchtec_interface);
+}
+module_init(switchtec_ntb_init);
+
+static void __exit switchtec_ntb_exit(void)
+{
+ class_interface_unregister(&switchtec_interface);
+}
+module_exit(switchtec_ntb_exit);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index f58d8e305323..045e3dd4750e 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -191,8 +191,6 @@ struct ntb_transport_qp {
struct ntb_transport_mw {
phys_addr_t phys_addr;
resource_size_t phys_size;
- resource_size_t xlat_align;
- resource_size_t xlat_align_size;
void __iomem *vbase;
size_t xlat_size;
size_t buff_size;
@@ -687,13 +685,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
size_t xlat_size, buff_size;
+ resource_size_t xlat_align;
+ resource_size_t xlat_align_size;
int rc;
if (!size)
return -EINVAL;
- xlat_size = round_up(size, mw->xlat_align_size);
- buff_size = round_up(size, mw->xlat_align);
+ rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align,
+ &xlat_align_size, NULL);
+ if (rc)
+ return rc;
+
+ xlat_size = round_up(size, xlat_align_size);
+ buff_size = round_up(size, xlat_align);
/* No need to re-setup */
if (mw->xlat_size == xlat_size)
@@ -722,7 +727,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
* is a requirement of the hardware. It is recommended to setup CMA
* for BAR sizes equal or greater than 4MB.
*/
- if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
+ if (!IS_ALIGNED(mw->dma_addr, xlat_align)) {
dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
&mw->dma_addr);
ntb_free_mw(nt, num_mw);
@@ -1104,11 +1109,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
for (i = 0; i < mw_count; i++) {
mw = &nt->mw_vec[i];
- rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align,
- &mw->xlat_align_size, NULL);
- if (rc)
- goto err1;
-
rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr,
&mw->phys_size);
if (rc)
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 759f772fa00c..427112cf101a 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -108,8 +108,6 @@ MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)")
struct perf_mw {
phys_addr_t phys_addr;
resource_size_t phys_size;
- resource_size_t xlat_align;
- resource_size_t xlat_align_size;
void __iomem *vbase;
size_t xlat_size;
size_t buf_size;
@@ -472,13 +470,20 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
{
struct perf_mw *mw = &perf->mw;
size_t xlat_size, buf_size;
+ resource_size_t xlat_align;
+ resource_size_t xlat_align_size;
int rc;
if (!size)
return -EINVAL;
- xlat_size = round_up(size, mw->xlat_align_size);
- buf_size = round_up(size, mw->xlat_align);
+ rc = ntb_mw_get_align(perf->ntb, PIDX, 0, &xlat_align,
+ &xlat_align_size, NULL);
+ if (rc)
+ return rc;
+
+ xlat_size = round_up(size, xlat_align_size);
+ buf_size = round_up(size, xlat_align);
if (mw->xlat_size == xlat_size)
return 0;
@@ -567,11 +572,6 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
mw = &perf->mw;
- rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
- &mw->xlat_align_size, NULL);
- if (rc)
- return rc;
-
rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
if (rc)
return rc;
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 938a18bcfc3f..3f5a92bae6f8 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -107,9 +107,9 @@ struct pp_ctx {
static struct dentry *pp_debugfs_dir;
-static void pp_ping(unsigned long ctx)
+static void pp_ping(struct timer_list *t)
{
- struct pp_ctx *pp = (void *)ctx;
+ struct pp_ctx *pp = from_timer(pp, t, db_timer);
unsigned long irqflags;
u64 db_bits, db_mask;
u32 spad_rd, spad_wr;
@@ -153,7 +153,7 @@ static void pp_link_event(void *ctx)
if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
dev_dbg(&pp->ntb->dev, "link is up\n");
- pp_ping((unsigned long)pp);
+ pp_ping(&pp->db_timer);
} else {
dev_dbg(&pp->ntb->dev, "link is down\n");
del_timer(&pp->db_timer);
@@ -252,7 +252,7 @@ static int pp_probe(struct ntb_client *client,
pp->db_bits = 0;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->db_lock);
- setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+ timer_setup(&pp->db_timer, pp_ping, 0);
pp->db_delay = msecs_to_jiffies(delay_ms);
rc = ntb_set_ctx(ntb, pp, &pp_ops);
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index a69815c45ce6..91526a986caa 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -753,9 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep,
phys_addr_t base;
resource_size_t mw_size;
- resource_size_t align_addr;
- resource_size_t align_size;
- resource_size_t max_size;
+ resource_size_t align_addr = 0;
+ resource_size_t align_size = 0;
+ resource_size_t max_size = 0;
buf_size = min_t(size_t, size, 512);
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index df431e8a0631..b793727cd4f7 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Macintosh Nubus Interface Code
*
@@ -18,11 +19,6 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/hwtest.h>
-#include <asm/mac_via.h>
-#include <asm/mac_oss.h>
-
-extern void via_nubus_init(void);
-extern void oss_nubus_init(void);
/* Constants */
@@ -840,14 +836,6 @@ static int __init nubus_init(void)
if (!MACH_IS_MAC)
return 0;
- /* Initialize the NuBus interrupts */
- if (oss_present) {
- oss_nubus_init();
- } else {
- via_nubus_init();
- }
-
- /* And probe */
pr_info("NuBus: Scanning NuBus slots.\n");
nubus_devices = NULL;
nubus_boards = NULL;
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index e8f68f5732f1..004a122ac0ff 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* drivers/nubus/proc.c: Proc FS interface for NuBus.
By David Huggins-Daines <dhd@debian.org>
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 5bdd499b5f4f..a65f2e1d9f53 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -96,7 +96,7 @@ config NVDIMM_DAX
help
Support raw device dax access to a persistent memory
namespace. For environments that want to hard partition
- peristent memory, this capability provides a mechanism to
+ persistent memory, this capability provides a mechanism to
sub-divide a namespace into character devices that can only be
accessed via DAX (mmap(2)).
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 909554c3f955..70d5f3ad9909 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
@@ -20,6 +21,7 @@ libnvdimm-y += region_devs.o
libnvdimm-y += region.o
libnvdimm-y += namespace_devs.o
libnvdimm-y += label.o
+libnvdimm-y += badrange.o
libnvdimm-$(CONFIG_ND_CLAIM) += claim.o
libnvdimm-$(CONFIG_BTT) += btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
new file mode 100644
index 000000000000..e068d72b4357
--- /dev/null
+++ b/drivers/nvdimm/badrange.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/libnvdimm.h>
+#include <linux/badblocks.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/ndctl.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include "nd-core.h"
+#include "nd.h"
+
+void badrange_init(struct badrange *badrange)
+{
+ INIT_LIST_HEAD(&badrange->list);
+ spin_lock_init(&badrange->lock);
+}
+EXPORT_SYMBOL_GPL(badrange_init);
+
+static void append_badrange_entry(struct badrange *badrange,
+ struct badrange_entry *bre, u64 addr, u64 length)
+{
+ lockdep_assert_held(&badrange->lock);
+ bre->start = addr;
+ bre->length = length;
+ list_add_tail(&bre->list, &badrange->list);
+}
+
+static int alloc_and_append_badrange_entry(struct badrange *badrange,
+ u64 addr, u64 length, gfp_t flags)
+{
+ struct badrange_entry *bre;
+
+ bre = kzalloc(sizeof(*bre), flags);
+ if (!bre)
+ return -ENOMEM;
+
+ append_badrange_entry(badrange, bre, addr, length);
+ return 0;
+}
+
+static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
+{
+ struct badrange_entry *bre, *bre_new;
+
+ spin_unlock(&badrange->lock);
+ bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
+ spin_lock(&badrange->lock);
+
+ if (list_empty(&badrange->list)) {
+ if (!bre_new)
+ return -ENOMEM;
+ append_badrange_entry(badrange, bre_new, addr, length);
+ return 0;
+ }
+
+ /*
+ * There is a chance this is a duplicate, check for those first.
+ * This will be the common case as ARS_STATUS returns all known
+ * errors in the SPA space, and we can't query it per region
+ */
+ list_for_each_entry(bre, &badrange->list, list)
+ if (bre->start == addr) {
+ /* If length has changed, update this list entry */
+ if (bre->length != length)
+ bre->length = length;
+ kfree(bre_new);
+ return 0;
+ }
+
+ /*
+ * If not a duplicate or a simple length update, add the entry as is,
+ * as any overlapping ranges will get resolved when the list is consumed
+ * and converted to badblocks
+ */
+ if (!bre_new)
+ return -ENOMEM;
+ append_badrange_entry(badrange, bre_new, addr, length);
+
+ return 0;
+}
+
+int badrange_add(struct badrange *badrange, u64 addr, u64 length)
+{
+ int rc;
+
+ spin_lock(&badrange->lock);
+ rc = add_badrange(badrange, addr, length);
+ spin_unlock(&badrange->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(badrange_add);
+
+void badrange_forget(struct badrange *badrange, phys_addr_t start,
+ unsigned int len)
+{
+ struct list_head *badrange_list = &badrange->list;
+ u64 clr_end = start + len - 1;
+ struct badrange_entry *bre, *next;
+
+ spin_lock(&badrange->lock);
+
+ /*
+ * [start, clr_end] is the badrange interval being cleared.
+ * [bre->start, bre_end] is the badrange_list entry we're comparing
+ * the above interval against. The badrange list entry may need
+ * to be modified (update either start or length), deleted, or
+ * split into two based on the overlap characteristics
+ */
+
+ list_for_each_entry_safe(bre, next, badrange_list, list) {
+ u64 bre_end = bre->start + bre->length - 1;
+
+ /* Skip intervals with no intersection */
+ if (bre_end < start)
+ continue;
+ if (bre->start > clr_end)
+ continue;
+ /* Delete completely overlapped badrange entries */
+ if ((bre->start >= start) && (bre_end <= clr_end)) {
+ list_del(&bre->list);
+ kfree(bre);
+ continue;
+ }
+ /* Adjust start point of partially cleared entries */
+ if ((start <= bre->start) && (clr_end > bre->start)) {
+ bre->length -= clr_end - bre->start + 1;
+ bre->start = clr_end + 1;
+ continue;
+ }
+ /* Adjust bre->length for partial clearing at the tail end */
+ if ((bre->start < start) && (bre_end <= clr_end)) {
+ /* bre->start remains the same */
+ bre->length = start - bre->start;
+ continue;
+ }
+ /*
+ * If clearing in the middle of an entry, we split it into
+ * two by modifying the current entry to represent one half of
+ * the split, and adding a new entry for the second half.
+ */
+ if ((bre->start < start) && (bre_end > clr_end)) {
+ u64 new_start = clr_end + 1;
+ u64 new_len = bre_end - new_start + 1;
+
+ /* Add new entry covering the right half */
+ alloc_and_append_badrange_entry(badrange, new_start,
+ new_len, GFP_NOWAIT);
+ /* Adjust this entry to cover the left half */
+ bre->length = start - bre->start;
+ continue;
+ }
+ }
+ spin_unlock(&badrange->lock);
+}
+EXPORT_SYMBOL_GPL(badrange_forget);
+
+static void set_badblock(struct badblocks *bb, sector_t s, int num)
+{
+ dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
+ (u64) s * 512, (u64) num * 512);
+ /* this isn't an error as the hardware will still throw an exception */
+ if (badblocks_set(bb, s, num, 1))
+ dev_info_once(bb->dev, "%s: failed for sector %llx\n",
+ __func__, (u64) s);
+}
+
+/**
+ * __add_badblock_range() - Convert a physical address range to bad sectors
+ * @bb: badblocks instance to populate
+ * @ns_offset: namespace offset where the error range begins (in bytes)
+ * @len: number of bytes of badrange to be added
+ *
+ * This assumes that the range provided with (ns_offset, len) is within
+ * the bounds of physical addresses for this namespace, i.e. lies in the
+ * interval [ns_start, ns_start + ns_size)
+ */
+static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
+{
+ const unsigned int sector_size = 512;
+ sector_t start_sector, end_sector;
+ u64 num_sectors;
+ u32 rem;
+
+ start_sector = div_u64(ns_offset, sector_size);
+ end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
+ if (rem)
+ end_sector++;
+ num_sectors = end_sector - start_sector;
+
+ if (unlikely(num_sectors > (u64)INT_MAX)) {
+ u64 remaining = num_sectors;
+ sector_t s = start_sector;
+
+ while (remaining) {
+ int done = min_t(u64, remaining, INT_MAX);
+
+ set_badblock(bb, s, done);
+ remaining -= done;
+ s += done;
+ }
+ } else
+ set_badblock(bb, start_sector, num_sectors);
+}
+
+static void badblocks_populate(struct badrange *badrange,
+ struct badblocks *bb, const struct resource *res)
+{
+ struct badrange_entry *bre;
+
+ if (list_empty(&badrange->list))
+ return;
+
+ list_for_each_entry(bre, &badrange->list, list) {
+ u64 bre_end = bre->start + bre->length - 1;
+
+ /* Discard intervals with no intersection */
+ if (bre_end < res->start)
+ continue;
+ if (bre->start > res->end)
+ continue;
+ /* Deal with any overlap after start of the namespace */
+ if (bre->start >= res->start) {
+ u64 start = bre->start;
+ u64 len;
+
+ if (bre_end <= res->end)
+ len = bre->length;
+ else
+ len = res->start + resource_size(res)
+ - bre->start;
+ __add_badblock_range(bb, start - res->start, len);
+ continue;
+ }
+ /*
+ * Deal with overlap for badrange starting before
+ * the namespace.
+ */
+ if (bre->start < res->start) {
+ u64 len;
+
+ if (bre_end < res->end)
+ len = bre->start + bre->length - res->start;
+ else
+ len = resource_size(res);
+ __add_badblock_range(bb, 0, len);
+ }
+ }
+}
+
+/**
+ * nvdimm_badblocks_populate() - Convert a list of badranges to badblocks
+ * @region: parent region of the range to interrogate
+ * @bb: badblocks instance to populate
+ * @res: resource range to consider
+ *
+ * The badrange list generated during bus initialization may contain
+ * multiple, possibly overlapping physical address ranges. Compare each
+ * of these ranges to the resource range currently being initialized,
+ * and add badblocks entries for all matching sub-ranges
+ */
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+ struct badblocks *bb, const struct resource *res)
+{
+ struct nvdimm_bus *nvdimm_bus;
+
+ if (!is_memory(&nd_region->dev)) {
+ dev_WARN_ONCE(&nd_region->dev, 1,
+ "%s only valid for pmem regions\n", __func__);
+ return;
+ }
+ nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ badblocks_populate(&nvdimm_bus->badrange, bb, res);
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index d5612bd1cc81..e949e3302af4 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -23,6 +23,7 @@
#include <linux/ndctl.h>
#include <linux/fs.h>
#include <linux/nd.h>
+#include <linux/backing-dev.h>
#include "btt.h"
#include "nd.h"
@@ -1402,6 +1403,8 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->private_data = btt;
btt->btt_disk->queue = btt->btt_queue;
btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
+ btt->btt_disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_SYNCHRONOUS_IO;
blk_queue_make_request(btt->btt_queue, btt_make_request);
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index baf283986a7e..0a5e6cd758fe 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -11,6 +11,7 @@
* General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/libnvdimm.h>
#include <linux/sched/mm.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@@ -221,7 +222,7 @@ static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
phys_addr_t phys, u64 cleared)
{
if (cleared > 0)
- nvdimm_forget_poison(nvdimm_bus, phys, cleared);
+ badrange_forget(&nvdimm_bus->badrange, phys, cleared);
if (cleared > 0 && cleared / 512)
nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
@@ -344,11 +345,10 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
return NULL;
INIT_LIST_HEAD(&nvdimm_bus->list);
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
- INIT_LIST_HEAD(&nvdimm_bus->poison_list);
init_waitqueue_head(&nvdimm_bus->probe_wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
mutex_init(&nvdimm_bus->reconfig_mutex);
- spin_lock_init(&nvdimm_bus->poison_lock);
+ badrange_init(&nvdimm_bus->badrange);
if (nvdimm_bus->id < 0) {
kfree(nvdimm_bus);
return NULL;
@@ -395,15 +395,15 @@ static int child_unregister(struct device *dev, void *data)
return 0;
}
-static void free_poison_list(struct list_head *poison_list)
+static void free_badrange_list(struct list_head *badrange_list)
{
- struct nd_poison *pl, *next;
+ struct badrange_entry *bre, *next;
- list_for_each_entry_safe(pl, next, poison_list, list) {
- list_del(&pl->list);
- kfree(pl);
+ list_for_each_entry_safe(bre, next, badrange_list, list) {
+ list_del(&bre->list);
+ kfree(bre);
}
- list_del_init(poison_list);
+ list_del_init(badrange_list);
}
static int nd_bus_remove(struct device *dev)
@@ -417,9 +417,9 @@ static int nd_bus_remove(struct device *dev)
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
- spin_lock(&nvdimm_bus->poison_lock);
- free_poison_list(&nvdimm_bus->poison_list);
- spin_unlock(&nvdimm_bus->poison_lock);
+ spin_lock(&nvdimm_bus->badrange.lock);
+ free_badrange_list(&nvdimm_bus->badrange.list);
+ spin_unlock(&nvdimm_bus->badrange.lock);
nvdimm_bus_destroy_ndctl(nvdimm_bus);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index bb71f0cf8f5d..1dc527660637 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -398,265 +398,11 @@ struct attribute_group nvdimm_bus_attribute_group = {
};
EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
-static void set_badblock(struct badblocks *bb, sector_t s, int num)
+int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
- dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
- (u64) s * 512, (u64) num * 512);
- /* this isn't an error as the hardware will still throw an exception */
- if (badblocks_set(bb, s, num, 1))
- dev_info_once(bb->dev, "%s: failed for sector %llx\n",
- __func__, (u64) s);
+ return badrange_add(&nvdimm_bus->badrange, addr, length);
}
-
-/**
- * __add_badblock_range() - Convert a physical address range to bad sectors
- * @bb: badblocks instance to populate
- * @ns_offset: namespace offset where the error range begins (in bytes)
- * @len: number of bytes of poison to be added
- *
- * This assumes that the range provided with (ns_offset, len) is within
- * the bounds of physical addresses for this namespace, i.e. lies in the
- * interval [ns_start, ns_start + ns_size)
- */
-static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
-{
- const unsigned int sector_size = 512;
- sector_t start_sector, end_sector;
- u64 num_sectors;
- u32 rem;
-
- start_sector = div_u64(ns_offset, sector_size);
- end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
- if (rem)
- end_sector++;
- num_sectors = end_sector - start_sector;
-
- if (unlikely(num_sectors > (u64)INT_MAX)) {
- u64 remaining = num_sectors;
- sector_t s = start_sector;
-
- while (remaining) {
- int done = min_t(u64, remaining, INT_MAX);
-
- set_badblock(bb, s, done);
- remaining -= done;
- s += done;
- }
- } else
- set_badblock(bb, start_sector, num_sectors);
-}
-
-static void badblocks_populate(struct list_head *poison_list,
- struct badblocks *bb, const struct resource *res)
-{
- struct nd_poison *pl;
-
- if (list_empty(poison_list))
- return;
-
- list_for_each_entry(pl, poison_list, list) {
- u64 pl_end = pl->start + pl->length - 1;
-
- /* Discard intervals with no intersection */
- if (pl_end < res->start)
- continue;
- if (pl->start > res->end)
- continue;
- /* Deal with any overlap after start of the namespace */
- if (pl->start >= res->start) {
- u64 start = pl->start;
- u64 len;
-
- if (pl_end <= res->end)
- len = pl->length;
- else
- len = res->start + resource_size(res)
- - pl->start;
- __add_badblock_range(bb, start - res->start, len);
- continue;
- }
- /* Deal with overlap for poison starting before the namespace */
- if (pl->start < res->start) {
- u64 len;
-
- if (pl_end < res->end)
- len = pl->start + pl->length - res->start;
- else
- len = resource_size(res);
- __add_badblock_range(bb, 0, len);
- }
- }
-}
-
-/**
- * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
- * @region: parent region of the range to interrogate
- * @bb: badblocks instance to populate
- * @res: resource range to consider
- *
- * The poison list generated during bus initialization may contain
- * multiple, possibly overlapping physical address ranges. Compare each
- * of these ranges to the resource range currently being initialized,
- * and add badblocks entries for all matching sub-ranges
- */
-void nvdimm_badblocks_populate(struct nd_region *nd_region,
- struct badblocks *bb, const struct resource *res)
-{
- struct nvdimm_bus *nvdimm_bus;
- struct list_head *poison_list;
-
- if (!is_memory(&nd_region->dev)) {
- dev_WARN_ONCE(&nd_region->dev, 1,
- "%s only valid for pmem regions\n", __func__);
- return;
- }
- nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
- poison_list = &nvdimm_bus->poison_list;
-
- nvdimm_bus_lock(&nvdimm_bus->dev);
- badblocks_populate(poison_list, bb, res);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
-}
-EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
-
-static void append_poison_entry(struct nvdimm_bus *nvdimm_bus,
- struct nd_poison *pl, u64 addr, u64 length)
-{
- lockdep_assert_held(&nvdimm_bus->poison_lock);
- pl->start = addr;
- pl->length = length;
- list_add_tail(&pl->list, &nvdimm_bus->poison_list);
-}
-
-static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
- gfp_t flags)
-{
- struct nd_poison *pl;
-
- pl = kzalloc(sizeof(*pl), flags);
- if (!pl)
- return -ENOMEM;
-
- append_poison_entry(nvdimm_bus, pl, addr, length);
- return 0;
-}
-
-static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
-{
- struct nd_poison *pl, *pl_new;
-
- spin_unlock(&nvdimm_bus->poison_lock);
- pl_new = kzalloc(sizeof(*pl_new), GFP_KERNEL);
- spin_lock(&nvdimm_bus->poison_lock);
-
- if (list_empty(&nvdimm_bus->poison_list)) {
- if (!pl_new)
- return -ENOMEM;
- append_poison_entry(nvdimm_bus, pl_new, addr, length);
- return 0;
- }
-
- /*
- * There is a chance this is a duplicate, check for those first.
- * This will be the common case as ARS_STATUS returns all known
- * errors in the SPA space, and we can't query it per region
- */
- list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
- if (pl->start == addr) {
- /* If length has changed, update this list entry */
- if (pl->length != length)
- pl->length = length;
- kfree(pl_new);
- return 0;
- }
-
- /*
- * If not a duplicate or a simple length update, add the entry as is,
- * as any overlapping ranges will get resolved when the list is consumed
- * and converted to badblocks
- */
- if (!pl_new)
- return -ENOMEM;
- append_poison_entry(nvdimm_bus, pl_new, addr, length);
-
- return 0;
-}
-
-int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
-{
- int rc;
-
- spin_lock(&nvdimm_bus->poison_lock);
- rc = bus_add_poison(nvdimm_bus, addr, length);
- spin_unlock(&nvdimm_bus->poison_lock);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
-
-void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus, phys_addr_t start,
- unsigned int len)
-{
- struct list_head *poison_list = &nvdimm_bus->poison_list;
- u64 clr_end = start + len - 1;
- struct nd_poison *pl, *next;
-
- spin_lock(&nvdimm_bus->poison_lock);
- WARN_ON_ONCE(list_empty(poison_list));
-
- /*
- * [start, clr_end] is the poison interval being cleared.
- * [pl->start, pl_end] is the poison_list entry we're comparing
- * the above interval against. The poison list entry may need
- * to be modified (update either start or length), deleted, or
- * split into two based on the overlap characteristics
- */
-
- list_for_each_entry_safe(pl, next, poison_list, list) {
- u64 pl_end = pl->start + pl->length - 1;
-
- /* Skip intervals with no intersection */
- if (pl_end < start)
- continue;
- if (pl->start > clr_end)
- continue;
- /* Delete completely overlapped poison entries */
- if ((pl->start >= start) && (pl_end <= clr_end)) {
- list_del(&pl->list);
- kfree(pl);
- continue;
- }
- /* Adjust start point of partially cleared entries */
- if ((start <= pl->start) && (clr_end > pl->start)) {
- pl->length -= clr_end - pl->start + 1;
- pl->start = clr_end + 1;
- continue;
- }
- /* Adjust pl->length for partial clearing at the tail end */
- if ((pl->start < start) && (pl_end <= clr_end)) {
- /* pl->start remains the same */
- pl->length = start - pl->start;
- continue;
- }
- /*
- * If clearing in the middle of an entry, we split it into
- * two by modifying the current entry to represent one half of
- * the split, and adding a new entry for the second half.
- */
- if ((pl->start < start) && (pl_end > clr_end)) {
- u64 new_start = clr_end + 1;
- u64 new_len = pl_end - new_start + 1;
-
- /* Add new entry covering the right half */
- add_poison(nvdimm_bus, new_start, new_len, GFP_NOWAIT);
- /* Adjust this entry to cover the left half */
- pl->length = start - pl->start;
- continue;
- }
- }
- spin_unlock(&nvdimm_bus->poison_lock);
-}
-EXPORT_SYMBOL_GPL(nvdimm_forget_poison);
+EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange);
#ifdef CONFIG_BLK_DEV_INTEGRITY
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index e0f0e3ce1a32..f8913b8124b6 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -55,6 +55,8 @@ static int nvdimm_probe(struct device *dev)
goto err;
rc = nvdimm_init_config_data(ndd);
+ if (rc == -EACCES)
+ nvdimm_set_locked(dev);
if (rc)
goto err;
@@ -68,6 +70,7 @@ static int nvdimm_probe(struct device *dev)
rc = nd_label_reserve_dpa(ndd);
if (ndd->ns_current >= 0)
nvdimm_set_aliasing(dev);
+ nvdimm_clear_locked(dev);
nvdimm_bus_unlock(dev);
if (rc)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index f0d1b7e5de01..097794d9f786 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -200,6 +200,13 @@ void nvdimm_set_locked(struct device *dev)
set_bit(NDD_LOCKED, &nvdimm->flags);
}
+void nvdimm_clear_locked(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ clear_bit(NDD_LOCKED, &nvdimm->flags);
+}
+
static void nvdimm_release(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
@@ -324,6 +331,17 @@ static ssize_t commands_show(struct device *dev,
}
static DEVICE_ATTR_RO(commands);
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ return sprintf(buf, "%s%s\n",
+ test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
+ test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
+}
+static DEVICE_ATTR_RO(flags);
+
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -365,6 +383,7 @@ static DEVICE_ATTR_RO(available_slots);
static struct attribute *nvdimm_attributes[] = {
&dev_attr_state.attr,
+ &dev_attr_flags.attr,
&dev_attr_commands.attr,
&dev_attr_available_slots.attr,
NULL,
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 9c5f108910e3..de66c02f6140 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -1050,7 +1050,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
nsindex = to_namespace_index(ndd, 0);
memset(nsindex, 0, ndd->nsarea.config_size);
for (i = 0; i < 2; i++) {
- int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
+ int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
if (rc)
return rc;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 3e4d1e7998da..bb3ba8cf24d4 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1620,7 +1620,7 @@ static umode_t namespace_visible(struct kobject *kobj,
if (a == &dev_attr_resource.attr) {
if (is_namespace_blk(dev))
return 0;
- return a->mode;
+ return 0400;
}
if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
@@ -1875,7 +1875,7 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
* @nspm: target namespace to create
* @nd_label: target pmem namespace label to evaluate
*/
-struct device *create_namespace_pmem(struct nd_region *nd_region,
+static struct device *create_namespace_pmem(struct nd_region *nd_region,
struct nd_namespace_index *nsindex,
struct nd_namespace_label *nd_label)
{
@@ -2186,7 +2186,7 @@ static int add_namespace_resource(struct nd_region *nd_region,
return i;
}
-struct device *create_namespace_blk(struct nd_region *nd_region,
+static struct device *create_namespace_blk(struct nd_region *nd_region,
struct nd_namespace_label *nd_label, int count)
{
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 86bc19ae30da..79274ead54fb 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -29,10 +29,9 @@ struct nvdimm_bus {
struct list_head list;
struct device dev;
int id, probe_active;
- struct list_head poison_list;
struct list_head mapping_list;
struct mutex reconfig_mutex;
- spinlock_t poison_lock;
+ struct badrange badrange;
};
struct nvdimm {
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 9c758a91372b..e958f3724c41 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -34,12 +34,6 @@ enum {
NVDIMM_IO_ATOMIC = 1,
};
-struct nd_poison {
- u64 start;
- u64 length;
- struct list_head list;
-};
-
struct nvdimm_drvdata {
struct device *dev;
int nslabel_size;
@@ -254,6 +248,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len);
void nvdimm_set_aliasing(struct device *dev);
void nvdimm_set_locked(struct device *dev);
+void nvdimm_clear_locked(struct device *dev);
struct nd_btt *to_nd_btt(struct device *dev);
struct nd_gen_sb {
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 9576c444f0ab..65cc171c721d 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -282,8 +282,16 @@ static struct attribute *nd_pfn_attributes[] = {
NULL,
};
+static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ if (a == &dev_attr_resource.attr)
+ return 0400;
+ return a->mode;
+}
+
struct attribute_group nd_pfn_attribute_group = {
.attrs = nd_pfn_attributes,
+ .is_visible = pfn_visible,
};
static const struct attribute_group *nd_pfn_attribute_groups[] = {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 39dfd7affa31..7fbc5c5dc8e1 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -31,6 +31,7 @@
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/nd.h>
+#include <linux/backing-dev.h>
#include "pmem.h"
#include "pfn.h"
#include "nd.h"
@@ -394,6 +395,7 @@ static int pmem_attach_disk(struct device *dev,
disk->fops = &pmem_fops;
disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
+ disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index c5917f040fa7..6a3cd2a10db6 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVDIMM_PMEM_H__
#define __NVDIMM_PMEM_H__
#include <linux/badblocks.h>
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 829d760f651c..abaf38c61220 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -562,8 +562,12 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
return 0;
- if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
- return 0;
+ if (a == &dev_attr_resource.attr) {
+ if (is_nd_pmem(dev))
+ return 0400;
+ else
+ return 0;
+ }
if (a == &dev_attr_deep_flush.attr) {
int has_flush = nvdimm_has_flush(nd_region);
diff --git a/drivers/nvme/Kconfig b/drivers/nvme/Kconfig
index b7c78a5b1f7a..04008e0bbe81 100644
--- a/drivers/nvme/Kconfig
+++ b/drivers/nvme/Kconfig
@@ -1,2 +1,6 @@
+menu "NVME Support"
+
source "drivers/nvme/host/Kconfig"
source "drivers/nvme/target/Kconfig"
+
+endmenu
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 46d6cb1e03bd..b979cf3bce65 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -13,6 +13,15 @@ config BLK_DEV_NVME
To compile this driver as a module, choose M here: the
module will be called nvme.
+config NVME_MULTIPATH
+ bool "NVMe multipath support"
+ depends on NVME_CORE
+ ---help---
+ This option enables support for multipath access to NVMe
+ subsystems. If this option is enabled only a single
+ /dev/nvmeXnY device will show up for each NVMe namespaces,
+ even if it is accessible through multiple controllers.
+
config NVME_FABRICS
tristate
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index cc0aacb4c8b4..a25fd43650ad 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
@@ -5,6 +6,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
+nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
nvme-y += pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5a14cc7f28ee..25da74d310d1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -34,13 +34,13 @@
#define NVME_MINORS (1U << MINORBITS)
-unsigned char admin_timeout = 60;
-module_param(admin_timeout, byte, 0644);
+unsigned int admin_timeout = 60;
+module_param(admin_timeout, uint, 0644);
MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
EXPORT_SYMBOL_GPL(admin_timeout);
-unsigned char nvme_io_timeout = 30;
-module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
+unsigned int nvme_io_timeout = 30;
+module_param_named(io_timeout, nvme_io_timeout, uint, 0644);
MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
EXPORT_SYMBOL_GPL(nvme_io_timeout);
@@ -52,9 +52,6 @@ static u8 nvme_max_retries = 5;
module_param_named(max_retries, nvme_max_retries, byte, 0644);
MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
-static int nvme_char_major;
-module_param(nvme_char_major, int, 0);
-
static unsigned long default_ps_max_latency_us = 100000;
module_param(default_ps_max_latency_us, ulong, 0644);
MODULE_PARM_DESC(default_ps_max_latency_us,
@@ -71,10 +68,17 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);
-static LIST_HEAD(nvme_ctrl_list);
-static DEFINE_SPINLOCK(dev_list_lock);
+static DEFINE_IDA(nvme_subsystems_ida);
+static LIST_HEAD(nvme_subsystems);
+static DEFINE_MUTEX(nvme_subsystems_lock);
+static DEFINE_IDA(nvme_instance_ida);
+static dev_t nvme_chr_devt;
static struct class *nvme_class;
+static struct class *nvme_subsys_class;
+
+static void nvme_ns_remove(struct nvme_ns *ns);
+static int nvme_revalidate_disk(struct gendisk *disk);
static __le32 nvme_get_log_dw10(u8 lid, size_t size)
{
@@ -101,6 +105,51 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
+static void nvme_delete_ctrl_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, delete_work);
+
+ flush_work(&ctrl->reset_work);
+ nvme_stop_ctrl(ctrl);
+ nvme_remove_namespaces(ctrl);
+ ctrl->ops->delete_ctrl(ctrl);
+ nvme_uninit_ctrl(ctrl);
+ nvme_put_ctrl(ctrl);
+}
+
+int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
+ return -EBUSY;
+ if (!queue_work(nvme_wq, &ctrl->delete_work))
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
+
+int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
+{
+ int ret = 0;
+
+ /*
+ * Keep a reference until the work is flushed since ->delete_ctrl
+ * can free the controller.
+ */
+ nvme_get_ctrl(ctrl);
+ ret = nvme_delete_ctrl(ctrl);
+ if (!ret)
+ flush_work(&ctrl->delete_work);
+ nvme_put_ctrl(ctrl);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync);
+
+static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
+{
+ return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
+}
+
static blk_status_t nvme_error_status(struct request *req)
{
switch (nvme_req(req)->status & 0x7ff) {
@@ -142,9 +191,16 @@ static inline bool nvme_req_needs_retry(struct request *req)
void nvme_complete_rq(struct request *req)
{
if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
- nvme_req(req)->retries++;
- blk_mq_requeue_request(req, true);
- return;
+ if (nvme_req_needs_failover(req)) {
+ nvme_failover_req(req);
+ return;
+ }
+
+ if (!blk_queue_dying(req->q)) {
+ nvme_req(req)->retries++;
+ blk_mq_requeue_request(req, true);
+ return;
+ }
}
blk_mq_end_request(req, nvme_error_status(req));
@@ -153,18 +209,13 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
void nvme_cancel_request(struct request *req, void *data, bool reserved)
{
- int status;
-
if (!blk_mq_request_started(req))
return;
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag);
- status = NVME_SC_ABORT_REQ;
- if (blk_queue_dying(req->q))
- status |= NVME_SC_DNR;
- nvme_req(req)->status = status;
+ nvme_req(req)->status = NVME_SC_ABORT_REQ;
blk_mq_complete_request(req);
}
@@ -205,6 +256,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_RECONNECTING:
switch (old_state) {
case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
changed = true;
/* FALLTHRU */
default:
@@ -239,11 +291,29 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
ctrl->state = new_state;
spin_unlock_irqrestore(&ctrl->lock, flags);
-
+ if (changed && ctrl->state == NVME_CTRL_LIVE)
+ nvme_kick_requeue_lists(ctrl);
return changed;
}
EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+static void nvme_free_ns_head(struct kref *ref)
+{
+ struct nvme_ns_head *head =
+ container_of(ref, struct nvme_ns_head, ref);
+
+ nvme_mpath_remove_disk(head);
+ ida_simple_remove(&head->subsys->ns_ida, head->instance);
+ list_del_init(&head->entry);
+ cleanup_srcu_struct(&head->srcu);
+ kfree(head);
+}
+
+static void nvme_put_ns_head(struct nvme_ns_head *head)
+{
+ kref_put(&head->ref, nvme_free_ns_head);
+}
+
static void nvme_free_ns(struct kref *kref)
{
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
@@ -251,14 +321,8 @@ static void nvme_free_ns(struct kref *kref)
if (ns->ndev)
nvme_nvm_unregister(ns);
- if (ns->disk) {
- spin_lock(&dev_list_lock);
- ns->disk->private_data = NULL;
- spin_unlock(&dev_list_lock);
- }
-
put_disk(ns->disk);
- ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
+ nvme_put_ns_head(ns->head);
nvme_put_ctrl(ns->ctrl);
kfree(ns);
}
@@ -268,31 +332,8 @@ static void nvme_put_ns(struct nvme_ns *ns)
kref_put(&ns->kref, nvme_free_ns);
}
-static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
-{
- struct nvme_ns *ns;
-
- spin_lock(&dev_list_lock);
- ns = disk->private_data;
- if (ns) {
- if (!kref_get_unless_zero(&ns->kref))
- goto fail;
- if (!try_module_get(ns->ctrl->ops->module))
- goto fail_put_ns;
- }
- spin_unlock(&dev_list_lock);
-
- return ns;
-
-fail_put_ns:
- kref_put(&ns->kref, nvme_free_ns);
-fail:
- spin_unlock(&dev_list_lock);
- return NULL;
-}
-
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags, int qid)
+ struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
struct request *req;
@@ -417,7 +458,7 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
{
memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
- cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
}
static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
@@ -448,7 +489,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
- cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->dsm.nr = cpu_to_le32(segments - 1);
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
@@ -467,16 +508,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
u16 control = 0;
u32 dsmgmt = 0;
- /*
- * If formated with metadata, require the block layer provide a buffer
- * unless this namespace is formated such that the metadata can be
- * stripped/generated by the controller with PRACT=1.
- */
- if (ns && ns->ms &&
- (!ns->pi_type || ns->ms != sizeof(struct t10_pi_tuple)) &&
- !blk_integrity_rq(req) && !blk_rq_is_passthrough(req))
- return BLK_STS_NOTSUPP;
-
if (req->cmd_flags & REQ_FUA)
control |= NVME_RW_FUA;
if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
@@ -487,7 +518,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
- cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
@@ -495,6 +526,18 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
if (ns->ms) {
+ /*
+ * If formated with metadata, the block layer always provides a
+ * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
+ * we enable the PRACT bit for protection information or set the
+ * namespace capacity to zero to prevent any I/O.
+ */
+ if (!blk_integrity_rq(req)) {
+ if (WARN_ON_ONCE(!nvme_ns_has_pi(ns)))
+ return BLK_STS_NOTSUPP;
+ control |= NVME_RW_PRINFO_PRACT;
+ }
+
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD;
@@ -507,8 +550,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
nvme_block_nr(ns, blk_rq_pos(req)));
break;
}
- if (!blk_integrity_rq(req))
- control |= NVME_RW_PRINFO_PRACT;
}
cmnd->rw.control = cpu_to_le16(control);
@@ -560,7 +601,8 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head, int flags)
+ unsigned timeout, int qid, int at_head,
+ blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -778,7 +820,7 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
}
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
- u8 *eui64, u8 *nguid, uuid_t *uuid)
+ struct nvme_ns_ids *ids)
{
struct nvme_command c = { };
int status;
@@ -814,7 +856,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
goto free_data;
}
len = NVME_NIDT_EUI64_LEN;
- memcpy(eui64, data + pos + sizeof(*cur), len);
+ memcpy(ids->eui64, data + pos + sizeof(*cur), len);
break;
case NVME_NIDT_NGUID:
if (cur->nidl != NVME_NIDT_NGUID_LEN) {
@@ -824,7 +866,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
goto free_data;
}
len = NVME_NIDT_NGUID_LEN;
- memcpy(nguid, data + pos + sizeof(*cur), len);
+ memcpy(ids->nguid, data + pos + sizeof(*cur), len);
break;
case NVME_NIDT_UUID:
if (cur->nidl != NVME_NIDT_UUID_LEN) {
@@ -834,7 +876,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
goto free_data;
}
len = NVME_NIDT_UUID_LEN;
- uuid_copy(uuid, data + pos + sizeof(*cur));
+ uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
break;
default:
/* Skip unnkown types */
@@ -968,7 +1010,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
memset(&c, 0, sizeof(c));
c.rw.opcode = io.opcode;
c.rw.flags = io.flags;
- c.rw.nsid = cpu_to_le32(ns->ns_id);
+ c.rw.nsid = cpu_to_le32(ns->head->ns_id);
c.rw.slba = cpu_to_le64(io.slba);
c.rw.length = cpu_to_le16(io.nblocks);
c.rw.control = cpu_to_le16(io.control);
@@ -982,12 +1024,87 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
metadata, meta_len, io.slba, NULL, 0);
}
+static u32 nvme_known_admin_effects(u8 opcode)
+{
+ switch (opcode) {
+ case nvme_admin_format_nvm:
+ return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_CSE_MASK;
+ case nvme_admin_sanitize_nvm:
+ return NVME_CMD_EFFECTS_CSE_MASK;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ u8 opcode)
+{
+ u32 effects = 0;
+
+ if (ns) {
+ if (ctrl->effects)
+ effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
+ if (effects & ~NVME_CMD_EFFECTS_CSUPP)
+ dev_warn(ctrl->device,
+ "IO command:%02x has unhandled effects:%08x\n",
+ opcode, effects);
+ return 0;
+ }
+
+ if (ctrl->effects)
+ effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
+ else
+ effects = nvme_known_admin_effects(opcode);
+
+ /*
+ * For simplicity, IO to all namespaces is quiesced even if the command
+ * effects say only one namespace is affected.
+ */
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ }
+ return effects;
+}
+
+static void nvme_update_formats(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (ns->disk && nvme_revalidate_disk(ns->disk))
+ nvme_ns_remove(ns);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+}
+
+static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
+{
+ /*
+ * Revalidate LBA changes prior to unfreezing. This is necessary to
+ * prevent memory corruption if a logical block size was changed by
+ * this command.
+ */
+ if (effects & NVME_CMD_EFFECTS_LBCC)
+ nvme_update_formats(ctrl);
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
+ nvme_unfreeze(ctrl);
+ if (effects & NVME_CMD_EFFECTS_CCC)
+ nvme_init_identify(ctrl);
+ if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
+ nvme_queue_scan(ctrl);
+}
+
static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd)
{
struct nvme_passthru_cmd cmd;
struct nvme_command c;
unsigned timeout = 0;
+ u32 effects;
int status;
if (!capable(CAP_SYS_ADMIN))
@@ -1013,10 +1130,13 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
+ effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
(void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
0, &cmd.result, timeout);
+ nvme_passthru_end(ctrl, effects);
+
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
return -EFAULT;
@@ -1025,15 +1145,37 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return status;
}
-static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+/*
+ * Issue ioctl requests on the first available path. Note that unlike normal
+ * block layer requests we will not retry failed request on another controller.
+ */
+static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
+ struct nvme_ns_head **head, int *srcu_idx)
{
- struct nvme_ns *ns = bdev->bd_disk->private_data;
+#ifdef CONFIG_NVME_MULTIPATH
+ if (disk->fops == &nvme_ns_head_ops) {
+ *head = disk->private_data;
+ *srcu_idx = srcu_read_lock(&(*head)->srcu);
+ return nvme_find_path(*head);
+ }
+#endif
+ *head = NULL;
+ *srcu_idx = -1;
+ return disk->private_data;
+}
+static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
+{
+ if (head)
+ srcu_read_unlock(&head->srcu, idx);
+}
+
+static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned cmd, unsigned long arg)
+{
switch (cmd) {
case NVME_IOCTL_ID:
force_successful_syscall_return();
- return ns->ns_id;
+ return ns->head->ns_id;
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ns->ctrl, NULL, (void __user *)arg);
case NVME_IOCTL_IO_CMD:
@@ -1052,27 +1194,39 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
}
}
-#ifdef CONFIG_COMPAT
-static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
{
- return nvme_ioctl(bdev, mode, cmd, arg);
+ struct nvme_ns_head *head = NULL;
+ struct nvme_ns *ns;
+ int srcu_idx, ret;
+
+ ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+ if (unlikely(!ns))
+ ret = -EWOULDBLOCK;
+ else
+ ret = nvme_ns_ioctl(ns, cmd, arg);
+ nvme_put_ns_from_disk(head, srcu_idx);
+ return ret;
}
-#else
-#define nvme_compat_ioctl NULL
-#endif
static int nvme_open(struct block_device *bdev, fmode_t mode)
{
- return nvme_get_ns_from_disk(bdev->bd_disk) ? 0 : -ENXIO;
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+#ifdef CONFIG_NVME_MULTIPATH
+ /* should never be called due to GENHD_FL_HIDDEN */
+ if (WARN_ON_ONCE(ns->head->disk))
+ return -ENXIO;
+#endif
+ if (!kref_get_unless_zero(&ns->kref))
+ return -ENXIO;
+ return 0;
}
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
- struct nvme_ns *ns = disk->private_data;
-
- module_put(ns->ctrl->ops->module);
- nvme_put_ns(ns);
+ nvme_put_ns(disk->private_data);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1085,35 +1239,12 @@ static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
- u16 bs)
-{
- struct nvme_ns *ns = disk->private_data;
- u16 old_ms = ns->ms;
- u8 pi_type = 0;
-
- ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
- ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
-
- /* PI implementation requires metadata equal t10 pi tuple size */
- if (ns->ms == sizeof(struct t10_pi_tuple))
- pi_type = id->dps & NVME_NS_DPS_PI_MASK;
-
- if (blk_get_integrity(disk) &&
- (ns->pi_type != pi_type || ns->ms != old_ms ||
- bs != queue_logical_block_size(disk->queue) ||
- (ns->ms && ns->ext)))
- blk_integrity_unregister(disk);
-
- ns->pi_type = pi_type;
-}
-
-static void nvme_init_integrity(struct nvme_ns *ns)
+static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
{
struct blk_integrity integrity;
memset(&integrity, 0, sizeof(integrity));
- switch (ns->pi_type) {
+ switch (pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
integrity.tag_size = sizeof(u16) + sizeof(u32);
@@ -1129,16 +1260,12 @@ static void nvme_init_integrity(struct nvme_ns *ns)
integrity.profile = NULL;
break;
}
- integrity.tuple_size = ns->ms;
- blk_integrity_register(ns->disk, &integrity);
- blk_queue_max_integrity_segments(ns->queue, 1);
+ integrity.tuple_size = ms;
+ blk_integrity_register(disk, &integrity);
+ blk_queue_max_integrity_segments(disk->queue, 1);
}
#else
-static void nvme_prep_integrity(struct gendisk *disk, struct nvme_id_ns *id,
- u16 bs)
-{
-}
-static void nvme_init_integrity(struct nvme_ns *ns)
+static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -1149,53 +1276,89 @@ static void nvme_set_chunk_size(struct nvme_ns *ns)
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
}
-static void nvme_config_discard(struct nvme_ns *ns)
+static void nvme_config_discard(struct nvme_ctrl *ctrl,
+ unsigned stream_alignment, struct request_queue *queue)
{
- struct nvme_ctrl *ctrl = ns->ctrl;
- u32 logical_block_size = queue_logical_block_size(ns->queue);
+ u32 size = queue_logical_block_size(queue);
+
+ if (stream_alignment)
+ size *= stream_alignment;
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- if (ctrl->nr_streams && ns->sws && ns->sgs) {
- unsigned int sz = logical_block_size * ns->sws * ns->sgs;
+ queue->limits.discard_alignment = size;
+ queue->limits.discard_granularity = size;
- ns->queue->limits.discard_alignment = sz;
- ns->queue->limits.discard_granularity = sz;
- } else {
- ns->queue->limits.discard_alignment = logical_block_size;
- ns->queue->limits.discard_granularity = logical_block_size;
- }
- blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
- blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+ blk_queue_max_discard_sectors(queue, UINT_MAX);
+ blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
- blk_queue_max_write_zeroes_sectors(ns->queue, UINT_MAX);
+ blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
}
static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
- struct nvme_id_ns *id, u8 *eui64, u8 *nguid, uuid_t *uuid)
+ struct nvme_id_ns *id, struct nvme_ns_ids *ids)
{
+ memset(ids, 0, sizeof(*ids));
+
if (ctrl->vs >= NVME_VS(1, 1, 0))
- memcpy(eui64, id->eui64, sizeof(id->eui64));
+ memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
if (ctrl->vs >= NVME_VS(1, 2, 0))
- memcpy(nguid, id->nguid, sizeof(id->nguid));
+ memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
if (ctrl->vs >= NVME_VS(1, 3, 0)) {
/* Don't treat error as fatal we potentially
* already have a NGUID or EUI-64
*/
- if (nvme_identify_ns_descs(ctrl, nsid, eui64, nguid, uuid))
+ if (nvme_identify_ns_descs(ctrl, nsid, ids))
dev_warn(ctrl->device,
"%s: Identify Descriptors failed\n", __func__);
}
}
+static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
+{
+ return !uuid_is_null(&ids->uuid) ||
+ memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) ||
+ memchr_inv(ids->eui64, 0, sizeof(ids->eui64));
+}
+
+static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
+{
+ return uuid_equal(&a->uuid, &b->uuid) &&
+ memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 &&
+ memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0;
+}
+
+static void nvme_update_disk_info(struct gendisk *disk,
+ struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+ unsigned stream_alignment = 0;
+
+ if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
+ stream_alignment = ns->sws * ns->sgs;
+
+ blk_mq_freeze_queue(disk->queue);
+ blk_integrity_unregister(disk);
+
+ blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift);
+ if (ns->ms && !ns->ext &&
+ (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
+ nvme_init_integrity(disk, ns->ms, ns->pi_type);
+ if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
+ capacity = 0;
+ set_capacity(disk, capacity);
+
+ if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)
+ nvme_config_discard(ns->ctrl, stream_alignment, disk->queue);
+ blk_mq_unfreeze_queue(disk->queue);
+}
+
static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{
struct nvme_ns *ns = disk->private_data;
- struct nvme_ctrl *ctrl = ns->ctrl;
- u16 bs;
/*
* If identify namespace failed, use default 512 byte block size so
@@ -1204,26 +1367,22 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds;
if (ns->lba_shift == 0)
ns->lba_shift = 9;
- bs = 1 << ns->lba_shift;
ns->noiob = le16_to_cpu(id->noiob);
+ ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
+ ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
+ /* the PI implementation requires metadata equal t10 pi tuple size */
+ if (ns->ms == sizeof(struct t10_pi_tuple))
+ ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ else
+ ns->pi_type = 0;
- blk_mq_freeze_queue(disk->queue);
-
- if (ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
- nvme_prep_integrity(disk, id, bs);
- blk_queue_logical_block_size(ns->queue, bs);
if (ns->noiob)
nvme_set_chunk_size(ns);
- if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
- nvme_init_integrity(ns);
- if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
- set_capacity(disk, 0);
- else
- set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
-
- if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
- nvme_config_discard(ns);
- blk_mq_unfreeze_queue(disk->queue);
+ nvme_update_disk_info(disk, ns, id);
+#ifdef CONFIG_NVME_MULTIPATH
+ if (ns->head->disk)
+ nvme_update_disk_info(ns->head->disk, ns, id);
+#endif
}
static int nvme_revalidate_disk(struct gendisk *disk)
@@ -1231,8 +1390,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
struct nvme_ns *ns = disk->private_data;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_id_ns *id;
- u8 eui64[8] = { 0 }, nguid[16] = { 0 };
- uuid_t uuid = uuid_null;
+ struct nvme_ns_ids ids;
int ret = 0;
if (test_bit(NVME_NS_DEAD, &ns->flags)) {
@@ -1240,7 +1398,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
return -ENODEV;
}
- id = nvme_identify_ns(ctrl, ns->ns_id);
+ id = nvme_identify_ns(ctrl, ns->head->ns_id);
if (!id)
return -ENODEV;
@@ -1249,12 +1407,11 @@ static int nvme_revalidate_disk(struct gendisk *disk)
goto out;
}
- nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid);
- if (!uuid_equal(&ns->uuid, &uuid) ||
- memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) ||
- memcmp(&ns->eui, &eui64, sizeof(ns->eui))) {
+ __nvme_revalidate_disk(disk, id);
+ nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids);
+ if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) {
dev_err(ctrl->device,
- "identifiers changed for nsid %d\n", ns->ns_id);
+ "identifiers changed for nsid %d\n", ns->head->ns_id);
ret = -ENODEV;
}
@@ -1286,8 +1443,10 @@ static char nvme_pr_type(enum pr_type type)
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
u64 key, u64 sa_key, u8 op)
{
- struct nvme_ns *ns = bdev->bd_disk->private_data;
+ struct nvme_ns_head *head = NULL;
+ struct nvme_ns *ns;
struct nvme_command c;
+ int srcu_idx, ret;
u8 data[16] = { 0, };
put_unaligned_le64(key, &data[0]);
@@ -1295,10 +1454,16 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
memset(&c, 0, sizeof(c));
c.common.opcode = op;
- c.common.nsid = cpu_to_le32(ns->ns_id);
+ c.common.nsid = cpu_to_le32(head->ns_id);
c.common.cdw10[0] = cpu_to_le32(cdw10);
- return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+ ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+ if (unlikely(!ns))
+ ret = -EWOULDBLOCK;
+ else
+ ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+ nvme_put_ns_from_disk(head, srcu_idx);
+ return ret;
}
static int nvme_pr_register(struct block_device *bdev, u64 old,
@@ -1380,7 +1545,7 @@ EXPORT_SYMBOL_GPL(nvme_sec_submit);
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_compat_ioctl,
+ .compat_ioctl = nvme_ioctl,
.open = nvme_open,
.release = nvme_release,
.getgeo = nvme_getgeo,
@@ -1388,6 +1553,32 @@ static const struct block_device_operations nvme_fops = {
.pr_ops = &nvme_pr_ops,
};
+#ifdef CONFIG_NVME_MULTIPATH
+static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
+{
+ struct nvme_ns_head *head = bdev->bd_disk->private_data;
+
+ if (!kref_get_unless_zero(&head->ref))
+ return -ENXIO;
+ return 0;
+}
+
+static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
+{
+ nvme_put_ns_head(disk->private_data);
+}
+
+const struct block_device_operations nvme_ns_head_ops = {
+ .owner = THIS_MODULE,
+ .open = nvme_ns_head_open,
+ .release = nvme_ns_head_release,
+ .ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_ioctl,
+ .getgeo = nvme_getgeo,
+ .pr_ops = &nvme_pr_ops,
+};
+#endif /* CONFIG_NVME_MULTIPATH */
+
static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
{
unsigned long timeout =
@@ -1736,14 +1927,15 @@ static bool quirk_matches(const struct nvme_id_ctrl *id,
string_matches(id->fr, q->fr, sizeof(id->fr));
}
-static void nvme_init_subnqn(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl,
+ struct nvme_id_ctrl *id)
{
size_t nqnlen;
int off;
nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
- strcpy(ctrl->subnqn, id->subnqn);
+ strncpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
return;
}
@@ -1751,14 +1943,222 @@ static void nvme_init_subnqn(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
- off = snprintf(ctrl->subnqn, NVMF_NQN_SIZE,
+ off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
"nqn.2014.08.org.nvmexpress:%4x%4x",
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
- memcpy(ctrl->subnqn + off, id->sn, sizeof(id->sn));
+ memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
off += sizeof(id->sn);
- memcpy(ctrl->subnqn + off, id->mn, sizeof(id->mn));
+ memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn));
off += sizeof(id->mn);
- memset(ctrl->subnqn + off, 0, sizeof(ctrl->subnqn) - off);
+ memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
+}
+
+static void __nvme_release_subsystem(struct nvme_subsystem *subsys)
+{
+ ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
+ kfree(subsys);
+}
+
+static void nvme_release_subsystem(struct device *dev)
+{
+ __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
+}
+
+static void nvme_destroy_subsystem(struct kref *ref)
+{
+ struct nvme_subsystem *subsys =
+ container_of(ref, struct nvme_subsystem, ref);
+
+ mutex_lock(&nvme_subsystems_lock);
+ list_del(&subsys->entry);
+ mutex_unlock(&nvme_subsystems_lock);
+
+ ida_destroy(&subsys->ns_ida);
+ device_del(&subsys->dev);
+ put_device(&subsys->dev);
+}
+
+static void nvme_put_subsystem(struct nvme_subsystem *subsys)
+{
+ kref_put(&subsys->ref, nvme_destroy_subsystem);
+}
+
+static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn)
+{
+ struct nvme_subsystem *subsys;
+
+ lockdep_assert_held(&nvme_subsystems_lock);
+
+ list_for_each_entry(subsys, &nvme_subsystems, entry) {
+ if (strcmp(subsys->subnqn, subsysnqn))
+ continue;
+ if (!kref_get_unless_zero(&subsys->ref))
+ continue;
+ return subsys;
+ }
+
+ return NULL;
+}
+
+#define SUBSYS_ATTR_RO(_name, _mode, _show) \
+ struct device_attribute subsys_attr_##_name = \
+ __ATTR(_name, _mode, _show, NULL)
+
+static ssize_t nvme_subsys_show_nqn(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
+}
+static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
+
+#define nvme_subsys_show_str_function(field) \
+static ssize_t subsys_##field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_subsystem *subsys = \
+ container_of(dev, struct nvme_subsystem, dev); \
+ return sprintf(buf, "%.*s\n", \
+ (int)sizeof(subsys->field), subsys->field); \
+} \
+static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show);
+
+nvme_subsys_show_str_function(model);
+nvme_subsys_show_str_function(serial);
+nvme_subsys_show_str_function(firmware_rev);
+
+static struct attribute *nvme_subsys_attrs[] = {
+ &subsys_attr_model.attr,
+ &subsys_attr_serial.attr,
+ &subsys_attr_firmware_rev.attr,
+ &subsys_attr_subsysnqn.attr,
+ NULL,
+};
+
+static struct attribute_group nvme_subsys_attrs_group = {
+ .attrs = nvme_subsys_attrs,
+};
+
+static const struct attribute_group *nvme_subsys_attrs_groups[] = {
+ &nvme_subsys_attrs_group,
+ NULL,
+};
+
+static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ struct nvme_subsystem *subsys, *found;
+ int ret;
+
+ subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+ if (!subsys)
+ return -ENOMEM;
+ ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(subsys);
+ return ret;
+ }
+ subsys->instance = ret;
+ mutex_init(&subsys->lock);
+ kref_init(&subsys->ref);
+ INIT_LIST_HEAD(&subsys->ctrls);
+ INIT_LIST_HEAD(&subsys->nsheads);
+ nvme_init_subnqn(subsys, ctrl, id);
+ memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
+ memcpy(subsys->model, id->mn, sizeof(subsys->model));
+ memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
+ subsys->vendor_id = le16_to_cpu(id->vid);
+ subsys->cmic = id->cmic;
+
+ subsys->dev.class = nvme_subsys_class;
+ subsys->dev.release = nvme_release_subsystem;
+ subsys->dev.groups = nvme_subsys_attrs_groups;
+ dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance);
+ device_initialize(&subsys->dev);
+
+ mutex_lock(&nvme_subsystems_lock);
+ found = __nvme_find_get_subsystem(subsys->subnqn);
+ if (found) {
+ /*
+ * Verify that the subsystem actually supports multiple
+ * controllers, else bail out.
+ */
+ if (!(id->cmic & (1 << 1))) {
+ dev_err(ctrl->device,
+ "ignoring ctrl due to duplicate subnqn (%s).\n",
+ found->subnqn);
+ nvme_put_subsystem(found);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ __nvme_release_subsystem(subsys);
+ subsys = found;
+ } else {
+ ret = device_add(&subsys->dev);
+ if (ret) {
+ dev_err(ctrl->device,
+ "failed to register subsystem device.\n");
+ goto out_unlock;
+ }
+ ida_init(&subsys->ns_ida);
+ list_add_tail(&subsys->entry, &nvme_subsystems);
+ }
+
+ ctrl->subsys = subsys;
+ mutex_unlock(&nvme_subsystems_lock);
+
+ if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
+ dev_name(ctrl->device))) {
+ dev_err(ctrl->device,
+ "failed to create sysfs link from subsystem.\n");
+ /* the transport driver will eventually put the subsystem */
+ return -EINVAL;
+ }
+
+ mutex_lock(&subsys->lock);
+ list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ mutex_unlock(&subsys->lock);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&nvme_subsystems_lock);
+ put_device(&subsys->dev);
+ return ret;
+}
+
+static int nvme_get_log(struct nvme_ctrl *ctrl, u8 log_page, void *log,
+ size_t size)
+{
+ struct nvme_command c = { };
+
+ c.common.opcode = nvme_admin_get_log_page;
+ c.common.nsid = cpu_to_le32(NVME_NSID_ALL);
+ c.common.cdw10[0] = nvme_get_log_dw10(log_page, size);
+
+ return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size);
+}
+
+static int nvme_get_effects_log(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
+ if (!ctrl->effects)
+ ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
+
+ if (!ctrl->effects)
+ return 0;
+
+ ret = nvme_get_log(ctrl, NVME_LOG_CMD_EFFECTS, ctrl->effects,
+ sizeof(*ctrl->effects));
+ if (ret) {
+ kfree(ctrl->effects);
+ ctrl->effects = NULL;
+ }
+ return ret;
}
/*
@@ -1796,9 +2196,19 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
return -EIO;
}
- nvme_init_subnqn(ctrl, id);
+ if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
+ ret = nvme_get_effects_log(ctrl);
+ if (ret < 0)
+ return ret;
+ }
if (!ctrl->identified) {
+ int i;
+
+ ret = nvme_init_subsystem(ctrl, id);
+ if (ret)
+ goto out_free;
+
/*
* Check for quirks. Quirk can depend on firmware version,
* so, in principle, the set of quirks present can change
@@ -1807,9 +2217,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
* the device, but we'd have to make sure that the driver
* behaves intelligently if the quirks change.
*/
-
- int i;
-
for (i = 0; i < ARRAY_SIZE(core_quirks); i++) {
if (quirk_matches(id, &core_quirks[i]))
ctrl->quirks |= core_quirks[i].quirks;
@@ -1822,14 +2229,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
ctrl->oacs = le16_to_cpu(id->oacs);
- ctrl->vid = le16_to_cpu(id->vid);
ctrl->oncs = le16_to_cpup(&id->oncs);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
ctrl->cntlid = le16_to_cpup(&id->cntlid);
- memcpy(ctrl->serial, id->sn, sizeof(id->sn));
- memcpy(ctrl->model, id->mn, sizeof(id->mn));
- memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
if (id->mdts)
max_hw_sectors = 1 << (id->mdts + page_shift - 9);
else
@@ -1930,33 +2333,12 @@ EXPORT_SYMBOL_GPL(nvme_init_identify);
static int nvme_dev_open(struct inode *inode, struct file *file)
{
- struct nvme_ctrl *ctrl;
- int instance = iminor(inode);
- int ret = -ENODEV;
-
- spin_lock(&dev_list_lock);
- list_for_each_entry(ctrl, &nvme_ctrl_list, node) {
- if (ctrl->instance != instance)
- continue;
-
- if (!ctrl->admin_q) {
- ret = -EWOULDBLOCK;
- break;
- }
- if (!kref_get_unless_zero(&ctrl->kref))
- break;
- file->private_data = ctrl;
- ret = 0;
- break;
- }
- spin_unlock(&dev_list_lock);
-
- return ret;
-}
+ struct nvme_ctrl *ctrl =
+ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
-static int nvme_dev_release(struct inode *inode, struct file *file)
-{
- nvme_put_ctrl(file->private_data);
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return -EWOULDBLOCK;
+ file->private_data = ctrl;
return 0;
}
@@ -2020,7 +2402,6 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
- .release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl,
.compat_ioctl = nvme_dev_ioctl,
};
@@ -2050,77 +2431,86 @@ static ssize_t nvme_sysfs_rescan(struct device *dev,
}
static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ if (disk->fops == &nvme_fops)
+ return nvme_get_ns_from_dev(dev)->head;
+ else
+ return disk->private_data;
+}
+
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- struct nvme_ctrl *ctrl = ns->ctrl;
- int serial_len = sizeof(ctrl->serial);
- int model_len = sizeof(ctrl->model);
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
+ struct nvme_ns_ids *ids = &head->ids;
+ struct nvme_subsystem *subsys = head->subsys;
+ int serial_len = sizeof(subsys->serial);
+ int model_len = sizeof(subsys->model);
- if (!uuid_is_null(&ns->uuid))
- return sprintf(buf, "uuid.%pU\n", &ns->uuid);
+ if (!uuid_is_null(&ids->uuid))
+ return sprintf(buf, "uuid.%pU\n", &ids->uuid);
- if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
- return sprintf(buf, "eui.%16phN\n", ns->nguid);
+ if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ return sprintf(buf, "eui.%16phN\n", ids->nguid);
- if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
- return sprintf(buf, "eui.%8phN\n", ns->eui);
+ if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ return sprintf(buf, "eui.%8phN\n", ids->eui64);
- while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
- ctrl->serial[serial_len - 1] == '\0'))
+ while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' ||
+ subsys->serial[serial_len - 1] == '\0'))
serial_len--;
- while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
- ctrl->model[model_len - 1] == '\0'))
+ while (model_len > 0 && (subsys->model[model_len - 1] == ' ' ||
+ subsys->model[model_len - 1] == '\0'))
model_len--;
- return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
- serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
+ return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id,
+ serial_len, subsys->serial, model_len, subsys->model,
+ head->ns_id);
}
static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- return sprintf(buf, "%pU\n", ns->nguid);
+ return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid);
}
static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
/* For backward compatibility expose the NGUID to userspace if
* we have no UUID set
*/
- if (uuid_is_null(&ns->uuid)) {
+ if (uuid_is_null(&ids->uuid)) {
printk_ratelimited(KERN_WARNING
"No UUID available providing old NGUID\n");
- return sprintf(buf, "%pU\n", ns->nguid);
+ return sprintf(buf, "%pU\n", ids->nguid);
}
- return sprintf(buf, "%pU\n", &ns->uuid);
+ return sprintf(buf, "%pU\n", &ids->uuid);
}
static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
static ssize_t eui_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- return sprintf(buf, "%8phd\n", ns->eui);
+ return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64);
}
static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL);
static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
- return sprintf(buf, "%d\n", ns->ns_id);
+ return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id);
}
static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
-static struct attribute *nvme_ns_attrs[] = {
+static struct attribute *nvme_ns_id_attrs[] = {
&dev_attr_wwid.attr,
&dev_attr_uuid.attr,
&dev_attr_nguid.attr,
@@ -2129,31 +2519,31 @@ static struct attribute *nvme_ns_attrs[] = {
NULL,
};
-static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
+static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids;
if (a == &dev_attr_uuid.attr) {
- if (uuid_is_null(&ns->uuid) &&
- !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+ if (uuid_is_null(&ids->uuid) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
return 0;
}
if (a == &dev_attr_nguid.attr) {
- if (!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+ if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
return 0;
}
if (a == &dev_attr_eui.attr) {
- if (!memchr_inv(ns->eui, 0, sizeof(ns->eui)))
+ if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
return 0;
}
return a->mode;
}
-static const struct attribute_group nvme_ns_attr_group = {
- .attrs = nvme_ns_attrs,
- .is_visible = nvme_ns_attrs_are_visible,
+const struct attribute_group nvme_ns_id_attr_group = {
+ .attrs = nvme_ns_id_attrs,
+ .is_visible = nvme_ns_id_attrs_are_visible,
};
#define nvme_show_str_function(field) \
@@ -2161,10 +2551,15 @@ static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
- return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
+ return sprintf(buf, "%.*s\n", \
+ (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+nvme_show_str_function(model);
+nvme_show_str_function(serial);
+nvme_show_str_function(firmware_rev);
+
#define nvme_show_int_function(field) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -2174,9 +2569,6 @@ static ssize_t field##_show(struct device *dev, \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
-nvme_show_str_function(model);
-nvme_show_str_function(serial);
-nvme_show_str_function(firmware_rev);
nvme_show_int_function(cntlid);
static ssize_t nvme_sysfs_delete(struct device *dev,
@@ -2186,7 +2578,7 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
if (device_remove_file_self(dev, attr))
- ctrl->ops->delete_ctrl(ctrl);
+ nvme_delete_ctrl_sync(ctrl);
return count;
}
static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete);
@@ -2230,7 +2622,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subnqn);
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
@@ -2283,12 +2675,128 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
NULL,
};
+static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys,
+ unsigned nsid)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+
+ list_for_each_entry(h, &subsys->nsheads, entry) {
+ if (h->ns_id == nsid && kref_get_unless_zero(&h->ref))
+ return h;
+ }
+
+ return NULL;
+}
+
+static int __nvme_check_ids(struct nvme_subsystem *subsys,
+ struct nvme_ns_head *new)
+{
+ struct nvme_ns_head *h;
+
+ lockdep_assert_held(&subsys->lock);
+
+ list_for_each_entry(h, &subsys->nsheads, entry) {
+ if (nvme_ns_ids_valid(&new->ids) &&
+ nvme_ns_ids_equal(&new->ids, &h->ids))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
+ unsigned nsid, struct nvme_id_ns *id)
+{
+ struct nvme_ns_head *head;
+ int ret = -ENOMEM;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ goto out;
+ ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto out_free_head;
+ head->instance = ret;
+ INIT_LIST_HEAD(&head->list);
+ init_srcu_struct(&head->srcu);
+ head->subsys = ctrl->subsys;
+ head->ns_id = nsid;
+ kref_init(&head->ref);
+
+ nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
+
+ ret = __nvme_check_ids(ctrl->subsys, head);
+ if (ret) {
+ dev_err(ctrl->device,
+ "duplicate IDs for nsid %d\n", nsid);
+ goto out_cleanup_srcu;
+ }
+
+ ret = nvme_mpath_alloc_disk(ctrl, head);
+ if (ret)
+ goto out_cleanup_srcu;
+
+ list_add_tail(&head->entry, &ctrl->subsys->nsheads);
+ return head;
+out_cleanup_srcu:
+ cleanup_srcu_struct(&head->srcu);
+ ida_simple_remove(&ctrl->subsys->ns_ida, head->instance);
+out_free_head:
+ kfree(head);
+out:
+ return ERR_PTR(ret);
+}
+
+static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
+ struct nvme_id_ns *id, bool *new)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ bool is_shared = id->nmic & (1 << 0);
+ struct nvme_ns_head *head = NULL;
+ int ret = 0;
+
+ mutex_lock(&ctrl->subsys->lock);
+ if (is_shared)
+ head = __nvme_find_ns_head(ctrl->subsys, nsid);
+ if (!head) {
+ head = nvme_alloc_ns_head(ctrl, nsid, id);
+ if (IS_ERR(head)) {
+ ret = PTR_ERR(head);
+ goto out_unlock;
+ }
+
+ *new = true;
+ } else {
+ struct nvme_ns_ids ids;
+
+ nvme_report_ns_ids(ctrl, nsid, id, &ids);
+ if (!nvme_ns_ids_equal(&head->ids, &ids)) {
+ dev_err(ctrl->device,
+ "IDs don't match for shared namespace %d\n",
+ nsid);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ *new = false;
+ }
+
+ list_add_tail(&ns->siblings, &head->list);
+ ns->head = head;
+
+out_unlock:
+ mutex_unlock(&ctrl->subsys->lock);
+ return ret;
+}
+
static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
- return nsa->ns_id - nsb->ns_id;
+ return nsa->head->ns_id - nsb->head->ns_id;
}
static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
@@ -2297,12 +2805,13 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->ns_id == nsid) {
- kref_get(&ns->kref);
+ if (ns->head->ns_id == nsid) {
+ if (!kref_get_unless_zero(&ns->kref))
+ continue;
ret = ns;
break;
}
- if (ns->ns_id > nsid)
+ if (ns->head->ns_id > nsid)
break;
}
mutex_unlock(&ctrl->namespaces_mutex);
@@ -2317,7 +2826,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
if (!ctrl->nr_streams)
return 0;
- ret = nvme_get_stream_params(ctrl, &s, ns->ns_id);
+ ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id);
if (ret)
return ret;
@@ -2341,33 +2850,27 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
struct gendisk *disk;
struct nvme_id_ns *id;
char disk_name[DISK_NAME_LEN];
- int node = dev_to_node(ctrl->dev);
+ int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT;
+ bool new = true;
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
return;
- ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
- if (ns->instance < 0)
- goto out_free_ns;
-
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
- goto out_release_instance;
+ goto out_free_ns;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
kref_init(&ns->kref);
- ns->ns_id = nsid;
ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
nvme_set_queue_limits(ctrl, ns->queue);
nvme_setup_streams_ns(ctrl, ns);
- sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
-
id = nvme_identify_ns(ctrl, nsid);
if (!id)
goto out_free_queue;
@@ -2375,23 +2878,49 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (id->ncap == 0)
goto out_free_id;
- nvme_report_ns_ids(ctrl, ns->ns_id, id, ns->eui, ns->nguid, &ns->uuid);
+ if (nvme_init_ns_head(ns, nsid, id, &new))
+ goto out_free_id;
+
+#ifdef CONFIG_NVME_MULTIPATH
+ /*
+ * If multipathing is enabled we need to always use the subsystem
+ * instance number for numbering our devices to avoid conflicts
+ * between subsystems that have multiple controllers and thus use
+ * the multipath-aware subsystem node and those that have a single
+ * controller and use the controller node directly.
+ */
+ if (ns->head->disk) {
+ sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+ ctrl->cntlid, ns->head->instance);
+ flags = GENHD_FL_HIDDEN;
+ } else {
+ sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
+ ns->head->instance);
+ }
+#else
+ /*
+ * But without the multipath code enabled, multiple controller per
+ * subsystems are visible as devices and thus we cannot use the
+ * subsystem instance.
+ */
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+#endif
if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
if (nvme_nvm_register(ns, disk_name, node)) {
dev_warn(ctrl->device, "LightNVM init failure\n");
- goto out_free_id;
+ goto out_unlink_ns;
}
}
disk = alloc_disk_node(0, node);
if (!disk)
- goto out_free_id;
+ goto out_unlink_ns;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
- disk->flags = GENHD_FL_EXT_DEVT;
+ disk->flags = flags;
memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
ns->disk = disk;
@@ -2401,49 +2930,65 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
list_add_tail(&ns->list, &ctrl->namespaces);
mutex_unlock(&ctrl->namespaces_mutex);
- kref_get(&ctrl->kref);
+ nvme_get_ctrl(ctrl);
kfree(id);
device_add_disk(ctrl->device, ns->disk);
if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_attr_group))
+ &nvme_ns_id_attr_group))
pr_warn("%s: failed to create sysfs group for identification\n",
ns->disk->disk_name);
if (ns->ndev && nvme_nvm_register_sysfs(ns))
pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
ns->disk->disk_name);
+
+ if (new)
+ nvme_mpath_add_disk(ns->head);
+ nvme_mpath_add_disk_links(ns);
return;
+ out_unlink_ns:
+ mutex_lock(&ctrl->subsys->lock);
+ list_del_rcu(&ns->siblings);
+ mutex_unlock(&ctrl->subsys->lock);
out_free_id:
kfree(id);
out_free_queue:
blk_cleanup_queue(ns->queue);
- out_release_instance:
- ida_simple_remove(&ctrl->ns_ida, ns->instance);
out_free_ns:
kfree(ns);
}
static void nvme_ns_remove(struct nvme_ns *ns)
{
+ struct nvme_ns_head *head = ns->head;
+
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
if (blk_get_integrity(ns->disk))
blk_integrity_unregister(ns->disk);
+ nvme_mpath_remove_disk_links(ns);
sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_attr_group);
+ &nvme_ns_id_attr_group);
if (ns->ndev)
nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
}
+ mutex_lock(&ns->ctrl->subsys->lock);
+ nvme_mpath_clear_current_path(ns);
+ if (head)
+ list_del_rcu(&ns->siblings);
+ mutex_unlock(&ns->ctrl->subsys->lock);
+
mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
mutex_unlock(&ns->ctrl->namespaces_mutex);
+ synchronize_srcu(&head->srcu);
nvme_put_ns(ns);
}
@@ -2466,7 +3011,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
struct nvme_ns *ns, *next;
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->ns_id > nsid)
+ if (ns->head->ns_id > nsid)
nvme_ns_remove(ns);
}
}
@@ -2582,20 +3127,29 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
+static void nvme_aen_uevent(struct nvme_ctrl *ctrl)
+{
+ char *envp[2] = { NULL, NULL };
+ u32 aen_result = ctrl->aen_result;
+
+ ctrl->aen_result = 0;
+ if (!aen_result)
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result);
+ if (!envp[0])
+ return;
+ kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp);
+ kfree(envp[0]);
+}
+
static void nvme_async_event_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl =
container_of(work, struct nvme_ctrl, async_event_work);
- spin_lock_irq(&ctrl->lock);
- while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
- int aer_idx = --ctrl->event_limit;
-
- spin_unlock_irq(&ctrl->lock);
- ctrl->ops->submit_async_event(ctrl, aer_idx);
- spin_lock_irq(&ctrl->lock);
- }
- spin_unlock_irq(&ctrl->lock);
+ nvme_aen_uevent(ctrl);
+ ctrl->ops->submit_async_event(ctrl);
}
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
@@ -2614,18 +3168,13 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
- struct nvme_command c = { };
struct nvme_fw_slot_info_log *log;
log = kmalloc(sizeof(*log), GFP_KERNEL);
if (!log)
return;
- c.common.opcode = nvme_admin_get_log_page;
- c.common.nsid = cpu_to_le32(NVME_NSID_ALL);
- c.common.cdw10[0] = nvme_get_log_dw10(NVME_LOG_FW_SLOT, sizeof(*log));
-
- if (!nvme_submit_sync_cmd(ctrl->admin_q, &c, log, sizeof(*log)))
+ if (nvme_get_log(ctrl, NVME_LOG_FW_SLOT, log, sizeof(*log)))
dev_warn(ctrl->device,
"Get FW SLOT INFO log error\n");
kfree(log);
@@ -2659,7 +3208,7 @@ static void nvme_fw_act_work(struct work_struct *work)
return;
nvme_start_queues(ctrl);
- /* read FW slot informationi to clear the AER*/
+ /* read FW slot information to clear the AER */
nvme_get_fw_slot_info(ctrl);
}
@@ -2667,24 +3216,21 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
- bool done = true;
- switch (le16_to_cpu(status) >> 1) {
- case NVME_SC_SUCCESS:
- done = false;
- /*FALLTHRU*/
- case NVME_SC_ABORT_REQ:
- ++ctrl->event_limit;
- if (ctrl->state == NVME_CTRL_LIVE)
- queue_work(nvme_wq, &ctrl->async_event_work);
+ if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
+ return;
+
+ switch (result & 0x7) {
+ case NVME_AER_ERROR:
+ case NVME_AER_SMART:
+ case NVME_AER_CSS:
+ case NVME_AER_VS:
+ ctrl->aen_result = result;
break;
default:
break;
}
- if (done)
- return;
-
switch (result & 0xff07) {
case NVME_AER_NOTICE_NS_CHANGED:
dev_info(ctrl->device, "rescanning\n");
@@ -2696,44 +3242,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
-}
-EXPORT_SYMBOL_GPL(nvme_complete_async_event);
-
-void nvme_queue_async_events(struct nvme_ctrl *ctrl)
-{
- ctrl->event_limit = NVME_NR_AERS;
queue_work(nvme_wq, &ctrl->async_event_work);
}
-EXPORT_SYMBOL_GPL(nvme_queue_async_events);
-
-static DEFINE_IDA(nvme_instance_ida);
-
-static int nvme_set_instance(struct nvme_ctrl *ctrl)
-{
- int instance, error;
-
- do {
- if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
- return -ENODEV;
-
- spin_lock(&dev_list_lock);
- error = ida_get_new(&nvme_instance_ida, &instance);
- spin_unlock(&dev_list_lock);
- } while (error == -EAGAIN);
-
- if (error)
- return -ENODEV;
-
- ctrl->instance = instance;
- return 0;
-}
-
-static void nvme_release_instance(struct nvme_ctrl *ctrl)
-{
- spin_lock(&dev_list_lock);
- ida_remove(&nvme_instance_ida, ctrl->instance);
- spin_unlock(&dev_list_lock);
-}
+EXPORT_SYMBOL_GPL(nvme_complete_async_event);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
@@ -2751,7 +3262,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
- nvme_queue_async_events(ctrl);
+ queue_work(nvme_wq, &ctrl->async_event_work);
nvme_start_queues(ctrl);
}
}
@@ -2759,30 +3270,31 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
- device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
-
- spin_lock(&dev_list_lock);
- list_del(&ctrl->node);
- spin_unlock(&dev_list_lock);
+ cdev_device_del(&ctrl->cdev, ctrl->device);
}
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
-static void nvme_free_ctrl(struct kref *kref)
+static void nvme_free_ctrl(struct device *dev)
{
- struct nvme_ctrl *ctrl = container_of(kref, struct nvme_ctrl, kref);
+ struct nvme_ctrl *ctrl =
+ container_of(dev, struct nvme_ctrl, ctrl_device);
+ struct nvme_subsystem *subsys = ctrl->subsys;
- put_device(ctrl->device);
- nvme_release_instance(ctrl);
- ida_destroy(&ctrl->ns_ida);
+ ida_simple_remove(&nvme_instance_ida, ctrl->instance);
+ kfree(ctrl->effects);
+
+ if (subsys) {
+ mutex_lock(&subsys->lock);
+ list_del(&ctrl->subsys_entry);
+ mutex_unlock(&subsys->lock);
+ sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
+ }
ctrl->ops->free_ctrl(ctrl);
-}
-void nvme_put_ctrl(struct nvme_ctrl *ctrl)
-{
- kref_put(&ctrl->kref, nvme_free_ctrl);
+ if (subsys)
+ nvme_put_subsystem(subsys);
}
-EXPORT_SYMBOL_GPL(nvme_put_ctrl);
/*
* Initialize a NVMe controller structures. This needs to be called during
@@ -2798,32 +3310,36 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
spin_lock_init(&ctrl->lock);
INIT_LIST_HEAD(&ctrl->namespaces);
mutex_init(&ctrl->namespaces_mutex);
- kref_init(&ctrl->kref);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
INIT_WORK(&ctrl->scan_work, nvme_scan_work);
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
+ INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
- ret = nvme_set_instance(ctrl);
- if (ret)
+ ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
goto out;
-
- ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
- MKDEV(nvme_char_major, ctrl->instance),
- ctrl, nvme_dev_attr_groups,
- "nvme%d", ctrl->instance);
- if (IS_ERR(ctrl->device)) {
- ret = PTR_ERR(ctrl->device);
+ ctrl->instance = ret;
+
+ device_initialize(&ctrl->ctrl_device);
+ ctrl->device = &ctrl->ctrl_device;
+ ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance);
+ ctrl->device->class = nvme_class;
+ ctrl->device->parent = ctrl->dev;
+ ctrl->device->groups = nvme_dev_attr_groups;
+ ctrl->device->release = nvme_free_ctrl;
+ dev_set_drvdata(ctrl->device, ctrl);
+ ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance);
+ if (ret)
goto out_release_instance;
- }
- get_device(ctrl->device);
- ida_init(&ctrl->ns_ida);
- spin_lock(&dev_list_lock);
- list_add_tail(&ctrl->node, &nvme_ctrl_list);
- spin_unlock(&dev_list_lock);
+ cdev_init(&ctrl->cdev, &nvme_dev_fops);
+ ctrl->cdev.owner = ops->module;
+ ret = cdev_device_add(&ctrl->cdev, ctrl->device);
+ if (ret)
+ goto out_free_name;
/*
* Initialize latency tolerance controls. The sysfs files won't
@@ -2834,8 +3350,10 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
return 0;
+out_free_name:
+ kfree_const(dev->kobj.name);
out_release_instance:
- nvme_release_instance(ctrl);
+ ida_simple_remove(&nvme_instance_ida, ctrl->instance);
out:
return ret;
}
@@ -2944,6 +3462,16 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
+int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set)
+{
+ if (!ctrl->ops->reinit_request)
+ return 0;
+
+ return blk_mq_tagset_iter(set, set->driver_data,
+ ctrl->ops->reinit_request);
+}
+EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
+
int __init nvme_core_init(void)
{
int result;
@@ -2953,12 +3481,9 @@ int __init nvme_core_init(void)
if (!nvme_wq)
return -ENOMEM;
- result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
- &nvme_dev_fops);
+ result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
if (result < 0)
goto destroy_wq;
- else if (result > 0)
- nvme_char_major = result;
nvme_class = class_create(THIS_MODULE, "nvme");
if (IS_ERR(nvme_class)) {
@@ -2966,10 +3491,17 @@ int __init nvme_core_init(void)
goto unregister_chrdev;
}
+ nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem");
+ if (IS_ERR(nvme_subsys_class)) {
+ result = PTR_ERR(nvme_subsys_class);
+ goto destroy_class;
+ }
return 0;
+destroy_class:
+ class_destroy(nvme_class);
unregister_chrdev:
- __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+ unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
destroy_wq:
destroy_workqueue(nvme_wq);
return result;
@@ -2977,8 +3509,10 @@ destroy_wq:
void nvme_core_exit(void)
{
+ ida_destroy(&nvme_subsystems_ida);
+ class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
- __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+ unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
destroy_workqueue(nvme_wq);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 555c976cc2ee..76b4fe6816a0 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -548,6 +548,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_HOSTNQN, "hostnqn=%s" },
{ NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
{ NVMF_OPT_HOST_ID, "hostid=%s" },
+ { NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
{ NVMF_OPT_ERR, NULL }
};
@@ -566,6 +567,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->nr_io_queues = num_online_cpus();
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
opts->kato = NVME_DEFAULT_KATO;
+ opts->duplicate_connect = false;
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -742,6 +744,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
goto out;
}
break;
+ case NVMF_OPT_DUP_CONNECT:
+ opts->duplicate_connect = true;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -823,7 +828,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
- NVMF_OPT_HOST_ID)
+ NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
@@ -841,6 +846,9 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
if (ret)
goto out_free_opts;
+
+ request_module("nvme-%s", opts->transport);
+
/*
* Check the generic options first as we need a valid transport for
* the lookup below. Then clear the generic flags so that transport
@@ -874,12 +882,12 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_unlock;
}
- if (strcmp(ctrl->subnqn, opts->subsysnqn)) {
+ if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n",
- ctrl->subnqn);
+ ctrl->subsys->subnqn);
up_read(&nvmf_transports_rwsem);
- ctrl->ops->delete_ctrl(ctrl);
+ nvme_delete_ctrl_sync(ctrl);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index bf33663218cd..42232e731f19 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -57,6 +57,7 @@ enum {
NVMF_OPT_HOST_TRADDR = 1 << 10,
NVMF_OPT_CTRL_LOSS_TMO = 1 << 11,
NVMF_OPT_HOST_ID = 1 << 12,
+ NVMF_OPT_DUP_CONNECT = 1 << 13,
};
/**
@@ -96,6 +97,7 @@ struct nvmf_ctrl_options {
unsigned int nr_io_queues;
unsigned int reconnect_delay;
bool discovery_nqn;
+ bool duplicate_connect;
unsigned int kato;
struct nvmf_host *host;
int max_reconnects;
@@ -131,6 +133,18 @@ struct nvmf_transport_ops {
struct nvmf_ctrl_options *opts);
};
+static inline bool
+nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts)
+{
+ if (strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
+ strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
+ memcmp(&opts->host->id, &ctrl->opts->host->id, sizeof(uuid_t)))
+ return false;
+
+ return true;
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index be49d0f79381..7ab0be55c7d0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -30,27 +30,19 @@
/* *************************** Data Structures/Defines ****************** */
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_FC_NR_AEN_COMMANDS 1
-#define NVME_FC_AQ_BLKMQ_DEPTH \
- (NVME_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
-#define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
-
enum nvme_fc_queue_flags {
NVME_FC_Q_CONNECTED = (1 << 0),
};
#define NVMEFC_QUEUE_DELAY 3 /* ms units */
+#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
+
struct nvme_fc_queue {
struct nvme_fc_ctrl *ctrl;
struct device *dev;
struct blk_mq_hw_ctx *hctx;
void *lldd_handle;
- int queue_size;
size_t cmnd_capsule_len;
u32 qnum;
u32 rqcnt;
@@ -124,6 +116,7 @@ struct nvme_fc_lport {
struct device *dev; /* physical device for dma */
struct nvme_fc_port_template *ops;
struct kref ref;
+ atomic_t act_rport_cnt;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
struct nvme_fc_rport {
@@ -136,6 +129,8 @@ struct nvme_fc_rport {
struct nvme_fc_lport *lport;
spinlock_t lock;
struct kref ref;
+ atomic_t act_ctrl_cnt;
+ unsigned long dev_loss_end;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
enum nvme_fcctrl_flags {
@@ -150,6 +145,7 @@ struct nvme_fc_ctrl {
struct nvme_fc_rport *rport;
u32 cnum;
+ bool assoc_active;
u64 association_id;
struct list_head ctrl_list; /* rport->ctrl_list */
@@ -157,7 +153,6 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
- struct work_struct delete_work;
struct delayed_work connect_work;
struct kref ref;
@@ -165,7 +160,7 @@ struct nvme_fc_ctrl {
u32 iocnt;
wait_queue_head_t ioabort_wait;
- struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
+ struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
struct nvme_ctrl ctrl;
};
@@ -213,10 +208,16 @@ static DEFINE_IDA(nvme_fc_ctrl_cnt);
+/*
+ * These items are short-term. They will eventually be moved into
+ * a generic FC class. See comments in module init.
+ */
+static struct class *fc_class;
+static struct device *fc_udev_device;
+
/* *********************** FC-NVME Port Management ************************ */
-static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
struct nvme_fc_queue *, unsigned int);
@@ -235,9 +236,6 @@ nvme_fc_free_lport(struct kref *ref)
list_del(&lport->port_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
- /* let the LLDD know we've finished tearing it down */
- lport->ops->localport_delete(&lport->localport);
-
ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
ida_destroy(&lport->endp_cnt);
@@ -260,7 +258,9 @@ nvme_fc_lport_get(struct nvme_fc_lport *lport)
static struct nvme_fc_lport *
-nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo)
+nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
+ struct nvme_fc_port_template *ops,
+ struct device *dev)
{
struct nvme_fc_lport *lport;
unsigned long flags;
@@ -272,6 +272,11 @@ nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo)
lport->localport.port_name != pinfo->port_name)
continue;
+ if (lport->dev != dev) {
+ lport = ERR_PTR(-EXDEV);
+ goto out_done;
+ }
+
if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
lport = ERR_PTR(-EEXIST);
goto out_done;
@@ -288,6 +293,7 @@ nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo)
/* resume the lport */
+ lport->ops = ops;
lport->localport.port_role = pinfo->port_role;
lport->localport.port_id = pinfo->port_id;
lport->localport.port_state = FC_OBJSTATE_ONLINE;
@@ -348,7 +354,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
* expired, we can simply re-enable the localport. Remoteports
* and controller reconnections should resume naturally.
*/
- newrec = nvme_fc_attach_to_unreg_lport(pinfo);
+ newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
/* found an lport, but something about its state is bad */
if (IS_ERR(newrec)) {
@@ -384,6 +390,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
INIT_LIST_HEAD(&newrec->port_list);
INIT_LIST_HEAD(&newrec->endp_list);
kref_init(&newrec->ref);
+ atomic_set(&newrec->act_rport_cnt, 0);
newrec->ops = template;
newrec->dev = dev;
ida_init(&newrec->endp_cnt);
@@ -446,12 +453,177 @@ nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ if (atomic_read(&lport->act_rport_cnt) == 0)
+ lport->ops->localport_delete(&lport->localport);
+
nvme_fc_lport_put(lport);
return 0;
}
EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
+/*
+ * TRADDR strings, per FC-NVME are fixed format:
+ * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
+ * udev event will only differ by prefix of what field is
+ * being specified:
+ * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
+ * 19 + 43 + null_fudge = 64 characters
+ */
+#define FCNVME_TRADDR_LENGTH 64
+
+static void
+nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
+ struct nvme_fc_rport *rport)
+{
+ char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
+ char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
+ char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
+
+ if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
+ return;
+
+ snprintf(hostaddr, sizeof(hostaddr),
+ "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
+ lport->localport.node_name, lport->localport.port_name);
+ snprintf(tgtaddr, sizeof(tgtaddr),
+ "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
+ rport->remoteport.node_name, rport->remoteport.port_name);
+ kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static void
+nvme_fc_free_rport(struct kref *ref)
+{
+ struct nvme_fc_rport *rport =
+ container_of(ref, struct nvme_fc_rport, ref);
+ struct nvme_fc_lport *lport =
+ localport_to_lport(rport->remoteport.localport);
+ unsigned long flags;
+
+ WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
+ WARN_ON(!list_empty(&rport->ctrl_list));
+
+ /* remove from lport list */
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ list_del(&rport->endp_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
+
+ kfree(rport);
+
+ nvme_fc_lport_put(lport);
+}
+
+static void
+nvme_fc_rport_put(struct nvme_fc_rport *rport)
+{
+ kref_put(&rport->ref, nvme_fc_free_rport);
+}
+
+static int
+nvme_fc_rport_get(struct nvme_fc_rport *rport)
+{
+ return kref_get_unless_zero(&rport->ref);
+}
+
+static void
+nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
+{
+ switch (ctrl->ctrl.state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_RECONNECTING:
+ /*
+ * As all reconnects were suppressed, schedule a
+ * connect.
+ */
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: connectivity re-established. "
+ "Attempting reconnect\n", ctrl->cnum);
+
+ queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
+ break;
+
+ case NVME_CTRL_RESETTING:
+ /*
+ * Controller is already in the process of terminating the
+ * association. No need to do anything further. The reconnect
+ * step will naturally occur after the reset completes.
+ */
+ break;
+
+ default:
+ /* no action to take - let it delete */
+ break;
+ }
+}
+
+static struct nvme_fc_rport *
+nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
+ struct nvme_fc_port_info *pinfo)
+{
+ struct nvme_fc_rport *rport;
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ if (rport->remoteport.node_name != pinfo->node_name ||
+ rport->remoteport.port_name != pinfo->port_name)
+ continue;
+
+ if (!nvme_fc_rport_get(rport)) {
+ rport = ERR_PTR(-ENOLCK);
+ goto out_done;
+ }
+
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ /* has it been unregistered */
+ if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
+ /* means lldd called us twice */
+ spin_unlock_irqrestore(&rport->lock, flags);
+ nvme_fc_rport_put(rport);
+ return ERR_PTR(-ESTALE);
+ }
+
+ rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
+ rport->dev_loss_end = 0;
+
+ /*
+ * kick off a reconnect attempt on all associations to the
+ * remote port. A successful reconnects will resume i/o.
+ */
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
+ nvme_fc_resume_controller(ctrl);
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return rport;
+ }
+
+ rport = NULL;
+
+out_done:
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return rport;
+}
+
+static inline void
+__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
+ struct nvme_fc_port_info *pinfo)
+{
+ if (pinfo->dev_loss_tmo)
+ rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
+ else
+ rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
+}
+
/**
* nvme_fc_register_remoteport - transport entry point called by an
* LLDD to register the existence of a NVME
@@ -478,28 +650,52 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
unsigned long flags;
int ret, idx;
+ if (!nvme_fc_lport_get(lport)) {
+ ret = -ESHUTDOWN;
+ goto out_reghost_failed;
+ }
+
+ /*
+ * look to see if there is already a remoteport that is waiting
+ * for a reconnect (within dev_loss_tmo) with the same WWN's.
+ * If so, transition to it and reconnect.
+ */
+ newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
+
+ /* found an rport, but something about its state is bad */
+ if (IS_ERR(newrec)) {
+ ret = PTR_ERR(newrec);
+ goto out_lport_put;
+
+ /* found existing rport, which was resumed */
+ } else if (newrec) {
+ nvme_fc_lport_put(lport);
+ __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
+ nvme_fc_signal_discovery_scan(lport, newrec);
+ *portptr = &newrec->remoteport;
+ return 0;
+ }
+
+ /* nothing found - allocate a new remoteport struct */
+
newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
GFP_KERNEL);
if (!newrec) {
ret = -ENOMEM;
- goto out_reghost_failed;
- }
-
- if (!nvme_fc_lport_get(lport)) {
- ret = -ESHUTDOWN;
- goto out_kfree_rport;
+ goto out_lport_put;
}
idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
- goto out_lport_put;
+ goto out_kfree_rport;
}
INIT_LIST_HEAD(&newrec->endp_list);
INIT_LIST_HEAD(&newrec->ctrl_list);
INIT_LIST_HEAD(&newrec->ls_req_list);
kref_init(&newrec->ref);
+ atomic_set(&newrec->act_ctrl_cnt, 0);
spin_lock_init(&newrec->lock);
newrec->remoteport.localport = &lport->localport;
newrec->dev = lport->dev;
@@ -511,63 +707,27 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
newrec->remoteport.port_id = pinfo->port_id;
newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
newrec->remoteport.port_num = idx;
+ __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
spin_lock_irqsave(&nvme_fc_lock, flags);
list_add_tail(&newrec->endp_list, &lport->endp_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ nvme_fc_signal_discovery_scan(lport, newrec);
+
*portptr = &newrec->remoteport;
return 0;
-out_lport_put:
- nvme_fc_lport_put(lport);
out_kfree_rport:
kfree(newrec);
+out_lport_put:
+ nvme_fc_lport_put(lport);
out_reghost_failed:
*portptr = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
-static void
-nvme_fc_free_rport(struct kref *ref)
-{
- struct nvme_fc_rport *rport =
- container_of(ref, struct nvme_fc_rport, ref);
- struct nvme_fc_lport *lport =
- localport_to_lport(rport->remoteport.localport);
- unsigned long flags;
-
- WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
- WARN_ON(!list_empty(&rport->ctrl_list));
-
- /* remove from lport list */
- spin_lock_irqsave(&nvme_fc_lock, flags);
- list_del(&rport->endp_list);
- spin_unlock_irqrestore(&nvme_fc_lock, flags);
-
- /* let the LLDD know we've finished tearing it down */
- lport->ops->remoteport_delete(&rport->remoteport);
-
- ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
-
- kfree(rport);
-
- nvme_fc_lport_put(lport);
-}
-
-static void
-nvme_fc_rport_put(struct nvme_fc_rport *rport)
-{
- kref_put(&rport->ref, nvme_fc_free_rport);
-}
-
-static int
-nvme_fc_rport_get(struct nvme_fc_rport *rport)
-{
- return kref_get_unless_zero(&rport->ref);
-}
-
static int
nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
{
@@ -592,6 +752,58 @@ restart:
return 0;
}
+static void
+nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
+{
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: controller connectivity lost. Awaiting "
+ "Reconnect", ctrl->cnum);
+
+ switch (ctrl->ctrl.state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ /*
+ * Schedule a controller reset. The reset will terminate the
+ * association and schedule the reconnect timer. Reconnects
+ * will be attempted until either the ctlr_loss_tmo
+ * (max_retries * connect_delay) expires or the remoteport's
+ * dev_loss_tmo expires.
+ */
+ if (nvme_reset_ctrl(&ctrl->ctrl)) {
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: Couldn't schedule reset. "
+ "Deleting controller.\n",
+ ctrl->cnum);
+ nvme_delete_ctrl(&ctrl->ctrl);
+ }
+ break;
+
+ case NVME_CTRL_RECONNECTING:
+ /*
+ * The association has already been terminated and the
+ * controller is attempting reconnects. No need to do anything
+ * futher. Reconnects will be attempted until either the
+ * ctlr_loss_tmo (max_retries * connect_delay) expires or the
+ * remoteport's dev_loss_tmo expires.
+ */
+ break;
+
+ case NVME_CTRL_RESETTING:
+ /*
+ * Controller is already in the process of terminating the
+ * association. No need to do anything further. The reconnect
+ * step will kick in naturally after the association is
+ * terminated.
+ */
+ break;
+
+ case NVME_CTRL_DELETING:
+ default:
+ /* no action to take - let it delete */
+ break;
+ }
+}
+
/**
* nvme_fc_unregister_remoteport - transport entry point called by an
* LLDD to deregister/remove a previously
@@ -621,19 +833,78 @@ nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
}
portptr->port_state = FC_OBJSTATE_DELETED;
- /* tear down all associations to the remote port */
- list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
- __nvme_fc_del_ctrl(ctrl);
+ rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
+
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ /* if dev_loss_tmo==0, dev loss is immediate */
+ if (!portptr->dev_loss_tmo) {
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: controller connectivity lost. "
+ "Deleting controller.\n",
+ ctrl->cnum);
+ nvme_delete_ctrl(&ctrl->ctrl);
+ } else
+ nvme_fc_ctrl_connectivity_loss(ctrl);
+ }
spin_unlock_irqrestore(&rport->lock, flags);
nvme_fc_abort_lsops(rport);
+ if (atomic_read(&rport->act_ctrl_cnt) == 0)
+ rport->lport->ops->remoteport_delete(portptr);
+
+ /*
+ * release the reference, which will allow, if all controllers
+ * go away, which should only occur after dev_loss_tmo occurs,
+ * for the rport to be torn down.
+ */
nvme_fc_rport_put(rport);
+
return 0;
}
EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
+/**
+ * nvme_fc_rescan_remoteport - transport entry point called by an
+ * LLDD to request a nvme device rescan.
+ * @remoteport: pointer to the (registered) remote port that is to be
+ * rescanned.
+ *
+ * Returns: N/A
+ */
+void
+nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
+
+ nvme_fc_signal_discovery_scan(rport->lport, rport);
+}
+EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
+
+int
+nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
+ u32 dev_loss_tmo)
+{
+ struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rport->lock, flags);
+
+ if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&rport->lock, flags);
+ return -EINVAL;
+ }
+
+ /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
+ rport->remoteport.dev_loss_tmo = dev_loss_tmo;
+
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
+
/* *********************** FC-NVME DMA Handling **************************** */
@@ -723,7 +994,6 @@ fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
dma_unmap_sg(dev, sg, nents, dir);
}
-
/* *********************** FC-NVME LS Handling **************************** */
static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
@@ -1266,7 +1536,7 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
unsigned long flags;
int i, ret;
- for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
continue;
@@ -1331,7 +1601,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
struct nvme_command *sqe = &op->cmd_iu.sqe;
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
union nvme_result result;
- bool complete_rq, terminate_assoc = true;
+ bool terminate_assoc = true;
/*
* WARNING:
@@ -1373,8 +1643,9 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
sizeof(op->rsp_iu), DMA_FROM_DEVICE);
- if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
- status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+ if (atomic_read(&op->state) == FCPOP_STATE_ABORTED ||
+ op->flags & FCOP_FLAGS_TERMIO)
+ status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
else if (freq->status)
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
@@ -1438,23 +1709,27 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
done:
if (op->flags & FCOP_FLAGS_AEN) {
nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
- complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op);
atomic_set(&op->state, FCPOP_STATE_IDLE);
op->flags = FCOP_FLAGS_AEN; /* clear other flags */
nvme_fc_ctrl_put(ctrl);
goto check_error;
}
- complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
- if (!complete_rq) {
- if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
- status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
- if (blk_queue_dying(rq->q))
- status |= cpu_to_le16(NVME_SC_DNR << 1);
- }
- nvme_end_request(rq, status, result);
- } else
+ /*
+ * Force failures of commands if we're killing the controller
+ * or have an error on a command used to create an new association
+ */
+ if (status &&
+ (blk_queue_dying(rq->q) ||
+ ctrl->ctrl.state == NVME_CTRL_NEW ||
+ ctrl->ctrl.state == NVME_CTRL_RECONNECTING))
+ status |= cpu_to_le16(NVME_SC_DNR << 1);
+
+ if (__nvme_fc_fcpop_chk_teardowns(ctrl, op))
__nvme_fc_final_op_cleanup(rq);
+ else
+ nvme_end_request(rq, status, result);
check_error:
if (terminate_assoc)
@@ -1531,7 +1806,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
int i, ret;
aen_op = ctrl->aen_ops;
- for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
GFP_KERNEL);
if (!private)
@@ -1541,7 +1816,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
sqe = &cmdiu->sqe;
ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
aen_op, (struct request *)NULL,
- (AEN_CMDID_BASE + i));
+ (NVME_AQ_BLK_MQ_DEPTH + i));
if (ret) {
kfree(private);
return ret;
@@ -1554,7 +1829,7 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
memset(sqe, 0, sizeof(*sqe));
sqe->common.opcode = nvme_admin_async_event;
/* Note: core layer may overwrite the sqe.command_id value */
- sqe->common.command_id = AEN_CMDID_BASE + i;
+ sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
}
return 0;
}
@@ -1566,7 +1841,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
int i;
aen_op = ctrl->aen_ops;
- for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
if (!aen_op->fcp_req.private)
continue;
@@ -1610,7 +1885,7 @@ nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
}
static void
-nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
+nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
{
struct nvme_fc_queue *queue;
@@ -1626,8 +1901,6 @@ nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
else
queue->cmnd_capsule_len = sizeof(struct nvme_command);
- queue->queue_size = queue_size;
-
/*
* Considered whether we should allocate buffers for all SQEs
* and CQEs and dma map them - mapping their respective entries
@@ -1751,7 +2024,7 @@ nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
int i;
for (i = 1; i < ctrl->ctrl.queue_count; i++)
- nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+ nvme_fc_init_queue(ctrl, i);
}
static void
@@ -1825,13 +2098,6 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: resetting controller\n", ctrl->cnum);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
- dev_err(ctrl->ctrl.device,
- "NVME-FC{%d}: error_recovery: Couldn't change state "
- "to RECONNECTING\n", ctrl->cnum);
- return;
- }
-
nvme_reset_ctrl(&ctrl->ctrl);
}
@@ -1842,13 +2108,14 @@ nvme_fc_timeout(struct request *rq, bool reserved)
struct nvme_fc_ctrl *ctrl = op->ctrl;
int ret;
- if (reserved)
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
+ atomic_read(&op->state) == FCPOP_STATE_ABORTED)
return BLK_EH_RESET_TIMER;
ret = __nvme_fc_abort_op(ctrl, op);
if (ret)
- /* io wasn't active to abort consider it done */
- return BLK_EH_HANDLED;
+ /* io wasn't active to abort */
+ return BLK_EH_NOT_HANDLED;
/*
* we can't individually ABTS an io without affecting the queue,
@@ -1859,7 +2126,12 @@ nvme_fc_timeout(struct request *rq, bool reserved)
*/
nvme_fc_error_recovery(ctrl, "io timeout error");
- return BLK_EH_HANDLED;
+ /*
+ * the io abort has been initiated. Have the reset timer
+ * restarted and the abort completion will complete the io
+ * shortly. Avoids a synchronous wait while the abort finishes.
+ */
+ return BLK_EH_RESET_TIMER;
}
static int
@@ -2110,7 +2382,7 @@ nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
}
static void
-nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+nvme_fc_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
struct nvme_fc_fcp_op *aen_op;
@@ -2118,9 +2390,6 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
bool terminating = false;
blk_status_t ret;
- if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
- return;
-
spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->flags & FCCTRL_TERMIO)
terminating = true;
@@ -2129,13 +2398,13 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
if (terminating)
return;
- aen_op = &ctrl->aen_ops[aer_idx];
+ aen_op = &ctrl->aen_ops[0];
ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
NVMEFC_FCP_NODATA);
if (ret)
dev_err(ctrl->ctrl.device,
- "failed async event work [%d]\n", aer_idx);
+ "failed async event work\n");
}
static void
@@ -2337,7 +2606,7 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
nvme_fc_init_io_queues(ctrl);
- ret = blk_mq_reinit_tagset(&ctrl->tag_set, nvme_fc_reinit_request);
+ ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
if (ret)
goto out_free_io_queues;
@@ -2360,6 +2629,61 @@ out_free_io_queues:
return ret;
}
+static void
+nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_lport *lport = rport->lport;
+
+ atomic_inc(&lport->act_rport_cnt);
+}
+
+static void
+nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
+{
+ struct nvme_fc_lport *lport = rport->lport;
+ u32 cnt;
+
+ cnt = atomic_dec_return(&lport->act_rport_cnt);
+ if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
+ lport->ops->localport_delete(&lport->localport);
+}
+
+static int
+nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_rport *rport = ctrl->rport;
+ u32 cnt;
+
+ if (ctrl->assoc_active)
+ return 1;
+
+ ctrl->assoc_active = true;
+ cnt = atomic_inc_return(&rport->act_ctrl_cnt);
+ if (cnt == 1)
+ nvme_fc_rport_active_on_lport(rport);
+
+ return 0;
+}
+
+static int
+nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
+{
+ struct nvme_fc_rport *rport = ctrl->rport;
+ struct nvme_fc_lport *lport = rport->lport;
+ u32 cnt;
+
+ /* ctrl->assoc_active=false will be set independently */
+
+ cnt = atomic_dec_return(&rport->act_ctrl_cnt);
+ if (cnt == 0) {
+ if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
+ lport->ops->remoteport_delete(&rport->remoteport);
+ nvme_fc_rport_inactive_on_lport(rport);
+ }
+
+ return 0;
+}
+
/*
* This routine restarts the controller on the host side, and
* on the link side, recreates the controller association.
@@ -2368,26 +2692,31 @@ static int
nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- u32 segs;
int ret;
bool changed;
++ctrl->ctrl.nr_reconnects;
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ return -ENODEV;
+
+ if (nvme_fc_ctlr_active_on_rport(ctrl))
+ return -ENOTUNIQ;
+
/*
* Create the admin queue
*/
- nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
+ nvme_fc_init_queue(ctrl, 0);
ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
- NVME_FC_AQ_BLKMQ_DEPTH);
+ NVME_AQ_BLK_MQ_DEPTH);
if (ret)
goto out_free_queue;
ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
- NVME_FC_AQ_BLKMQ_DEPTH,
- (NVME_FC_AQ_BLKMQ_DEPTH / 4));
+ NVME_AQ_BLK_MQ_DEPTH,
+ (NVME_AQ_BLK_MQ_DEPTH / 4));
if (ret)
goto out_delete_hw_queue;
@@ -2419,9 +2748,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_disconnect_admin_queue;
- segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
- ctrl->lport->ops->max_sgl_segments);
- ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
+ ctrl->ctrl.max_hw_sectors =
+ (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
ret = nvme_init_identify(&ctrl->ctrl);
if (ret)
@@ -2465,11 +2793,11 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
ctrl->ctrl.nr_reconnects = 0;
- nvme_start_ctrl(&ctrl->ctrl);
+ if (changed)
+ nvme_start_ctrl(&ctrl->ctrl);
return 0; /* Success */
@@ -2482,6 +2810,8 @@ out_delete_hw_queue:
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
out_free_queue:
nvme_fc_free_queue(&ctrl->queues[0]);
+ ctrl->assoc_active = false;
+ nvme_fc_ctlr_inactive_on_rport(ctrl);
return ret;
}
@@ -2497,6 +2827,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
{
unsigned long flags;
+ if (!ctrl->assoc_active)
+ return;
+ ctrl->assoc_active = false;
+
spin_lock_irqsave(&ctrl->lock, flags);
ctrl->flags |= FCCTRL_TERMIO;
ctrl->iocnt = 0;
@@ -2537,7 +2871,8 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
* use blk_mq_tagset_busy_itr() and the transport routine to
* terminate the exchanges.
*/
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ if (ctrl->ctrl.state != NVME_CTRL_NEW)
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -2568,102 +2903,64 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
nvme_fc_free_queue(&ctrl->queues[0]);
+
+ nvme_fc_ctlr_inactive_on_rport(ctrl);
}
static void
-nvme_fc_delete_ctrl_work(struct work_struct *work)
+nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
- struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, delete_work);
+ struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
- nvme_stop_ctrl(&ctrl->ctrl);
- nvme_remove_namespaces(&ctrl->ctrl);
/*
* kill the association on the link side. this will block
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
-
- /*
- * tear down the controller
- * After the last reference on the nvme ctrl is removed,
- * the transport nvme_fc_nvme_ctrl_freed() callback will be
- * invoked. From there, the transport will tear down it's
- * logical queues and association.
- */
- nvme_uninit_ctrl(&ctrl->ctrl);
-
- nvme_put_ctrl(&ctrl->ctrl);
-}
-
-static bool
-__nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
-{
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
- return true;
-
- if (!queue_work(nvme_wq, &ctrl->delete_work))
- return true;
-
- return false;
-}
-
-static int
-__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
-{
- return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
-}
-
-/*
- * Request from nvme core layer to delete the controller
- */
-static int
-nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
-{
- struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
- int ret;
-
- if (!kref_get_unless_zero(&ctrl->ctrl.kref))
- return -EBUSY;
-
- ret = __nvme_fc_del_ctrl(ctrl);
-
- if (!ret)
- flush_workqueue(nvme_wq);
-
- nvme_put_ctrl(&ctrl->ctrl);
-
- return ret;
}
static void
nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
{
- /* If we are resetting/deleting then do nothing */
- if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
- WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
- ctrl->ctrl.state == NVME_CTRL_LIVE);
- return;
- }
+ struct nvme_fc_rport *rport = ctrl->rport;
+ struct nvme_fc_remote_port *portptr = &rport->remoteport;
+ unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
+ bool recon = true;
- dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
- ctrl->cnum, status);
+ if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING)
+ return;
- if (nvmf_should_reconnect(&ctrl->ctrl)) {
+ if (portptr->port_state == FC_OBJSTATE_ONLINE)
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
- ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
- queue_delayed_work(nvme_wq, &ctrl->connect_work,
- ctrl->ctrl.opts->reconnect_delay * HZ);
+ "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
+ ctrl->cnum, status);
+ else if (time_after_eq(jiffies, rport->dev_loss_end))
+ recon = false;
+
+ if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
+ if (portptr->port_state == FC_OBJSTATE_ONLINE)
+ dev_info(ctrl->ctrl.device,
+ "NVME-FC{%d}: Reconnect attempt in %ld "
+ "seconds\n",
+ ctrl->cnum, recon_delay / HZ);
+ else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
+ recon_delay = rport->dev_loss_end - jiffies;
+
+ queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
} else {
- dev_warn(ctrl->ctrl.device,
+ if (portptr->port_state == FC_OBJSTATE_ONLINE)
+ dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: Max reconnect attempts (%d) "
"reached. Removing controller\n",
ctrl->cnum, ctrl->ctrl.nr_reconnects);
- WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
+ else
+ dev_warn(ctrl->ctrl.device,
+ "NVME-FC{%d}: dev_loss_tmo (%d) expired "
+ "while waiting for remoteport connectivity. "
+ "Removing controller\n", ctrl->cnum,
+ portptr->dev_loss_tmo);
+ WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
}
}
@@ -2675,15 +2972,28 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
int ret;
nvme_stop_ctrl(&ctrl->ctrl);
+
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
- ret = nvme_fc_create_association(ctrl);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: error_recovery: Couldn't change state "
+ "to RECONNECTING\n", ctrl->cnum);
+ return;
+ }
+
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
+ ret = nvme_fc_create_association(ctrl);
+ else
+ ret = -ENOTCONN;
+
if (ret)
nvme_fc_reconnect_or_delete(ctrl, ret);
else
dev_info(ctrl->ctrl.device,
- "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
+ "NVME-FC{%d}: controller reset complete\n",
+ ctrl->cnum);
}
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
@@ -2695,8 +3005,9 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.reg_write32 = nvmf_reg_write32,
.free_ctrl = nvme_fc_nvme_ctrl_freed,
.submit_async_event = nvme_fc_submit_async_event,
- .delete_ctrl = nvme_fc_del_nvme_ctrl,
+ .delete_ctrl = nvme_fc_delete_ctrl,
.get_address = nvmf_get_address,
+ .reinit_request = nvme_fc_reinit_request,
};
static void
@@ -2728,6 +3039,33 @@ static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
};
+/*
+ * Fails a controller request if it matches an existing controller
+ * (association) with the same tuple:
+ * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
+ *
+ * The ports don't need to be compared as they are intrinsically
+ * already matched by the port pointers supplied.
+ */
+static bool
+nvme_fc_existing_controller(struct nvme_fc_rport *rport,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_fc_ctrl *ctrl;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&rport->lock, flags);
+ list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
+ found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
+ if (found)
+ break;
+ }
+ spin_unlock_irqrestore(&rport->lock, flags);
+
+ return found;
+}
+
static struct nvme_ctrl *
nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
@@ -2742,6 +3080,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
+ if (!opts->duplicate_connect &&
+ nvme_fc_existing_controller(rport, opts)) {
+ ret = -EALREADY;
+ goto out_fail;
+ }
+
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl) {
ret = -ENOMEM;
@@ -2760,12 +3104,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->rport = rport;
ctrl->dev = lport->dev;
ctrl->cnum = idx;
+ ctrl->assoc_active = false;
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
kref_init(&ctrl->ref);
- INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
spin_lock_init(&ctrl->lock);
@@ -2787,7 +3131,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
@@ -2797,6 +3141,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+ ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
if (ret)
@@ -2878,7 +3223,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
return ERR_PTR(ret);
}
- kref_get(&ctrl->ctrl.kref);
+ nvme_get_ctrl(&ctrl->ctrl);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
@@ -3026,7 +3371,50 @@ static struct nvmf_transport_ops nvme_fc_transport = {
static int __init nvme_fc_init_module(void)
{
- return nvmf_register_transport(&nvme_fc_transport);
+ int ret;
+
+ /*
+ * NOTE:
+ * It is expected that in the future the kernel will combine
+ * the FC-isms that are currently under scsi and now being
+ * added to by NVME into a new standalone FC class. The SCSI
+ * and NVME protocols and their devices would be under this
+ * new FC class.
+ *
+ * As we need something to post FC-specific udev events to,
+ * specifically for nvme probe events, start by creating the
+ * new device class. When the new standalone FC class is
+ * put in place, this code will move to a more generic
+ * location for the class.
+ */
+ fc_class = class_create(THIS_MODULE, "fc");
+ if (IS_ERR(fc_class)) {
+ pr_err("couldn't register class fc\n");
+ return PTR_ERR(fc_class);
+ }
+
+ /*
+ * Create a device for the FC-centric udev events
+ */
+ fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
+ "fc_udev_device");
+ if (IS_ERR(fc_udev_device)) {
+ pr_err("couldn't create fc_udev device!\n");
+ ret = PTR_ERR(fc_udev_device);
+ goto out_destroy_class;
+ }
+
+ ret = nvmf_register_transport(&nvme_fc_transport);
+ if (ret)
+ goto out_destroy_device;
+
+ return 0;
+
+out_destroy_device:
+ device_destroy(fc_class, MKDEV(0, 0));
+out_destroy_class:
+ class_destroy(fc_class);
+ return ret;
}
static void __exit nvme_fc_exit_module(void)
@@ -3039,6 +3427,9 @@ static void __exit nvme_fc_exit_module(void)
ida_destroy(&nvme_fc_local_port_cnt);
ida_destroy(&nvme_fc_ctrl_cnt);
+
+ device_destroy(fc_class, MKDEV(0, 0));
+ class_destroy(fc_class);
}
module_init(nvme_fc_init_module);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 1f79e3f141e6..ba3d7f3349e5 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -305,7 +305,7 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
int ret;
c.identity.opcode = nvme_nvm_admin_identity;
- c.identity.nsid = cpu_to_le32(ns->ns_id);
+ c.identity.nsid = cpu_to_le32(ns->head->ns_id);
c.identity.chnl_off = 0;
nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
@@ -344,7 +344,7 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
int ret = 0;
c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
- c.l2p.nsid = cpu_to_le32(ns->ns_id);
+ c.l2p.nsid = cpu_to_le32(ns->head->ns_id);
entries = kmalloc(len, GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -402,7 +402,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
int ret = 0;
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
- c.get_bb.nsid = cpu_to_le32(ns->ns_id);
+ c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
c.get_bb.spba = cpu_to_le64(ppa.ppa);
bb_tbl = kzalloc(tblsz, GFP_KERNEL);
@@ -452,7 +452,7 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
int ret = 0;
c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
- c.set_bb.nsid = cpu_to_le32(ns->ns_id);
+ c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
c.set_bb.spba = cpu_to_le64(ppas->ppa);
c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
c.set_bb.value = type;
@@ -469,7 +469,7 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
struct nvme_nvm_command *c)
{
c->ph_rw.opcode = rqd->opcode;
- c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
+ c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags);
@@ -492,34 +492,47 @@ static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
blk_mq_free_request(rq);
}
-static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static struct request *nvme_nvm_alloc_request(struct request_queue *q,
+ struct nvm_rq *rqd,
+ struct nvme_nvm_command *cmd)
{
- struct request_queue *q = dev->q;
struct nvme_ns *ns = q->queuedata;
struct request *rq;
- struct bio *bio = rqd->bio;
- struct nvme_nvm_command *cmd;
-
- cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
nvme_nvm_rqtocmd(rqd, ns, cmd);
rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
- if (IS_ERR(rq)) {
- kfree(cmd);
- return PTR_ERR(rq);
- }
+ if (IS_ERR(rq))
+ return rq;
+
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
- if (bio) {
- blk_init_request_from_bio(rq, bio);
+ if (rqd->bio) {
+ blk_init_request_from_bio(rq, rqd->bio);
} else {
rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
rq->__data_len = 0;
}
+ return rq;
+}
+
+static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+ struct request_queue *q = dev->q;
+ struct nvme_nvm_command *cmd;
+ struct request *rq;
+
+ cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ rq = nvme_nvm_alloc_request(q, rqd, cmd);
+ if (IS_ERR(rq)) {
+ kfree(cmd);
+ return PTR_ERR(rq);
+ }
+
rq->end_io_data = rqd;
blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
@@ -527,6 +540,34 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
return 0;
}
+static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+ struct request_queue *q = dev->q;
+ struct request *rq;
+ struct nvme_nvm_command cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(struct nvme_nvm_command));
+
+ rq = nvme_nvm_alloc_request(q, rqd, &cmd);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ /* I/Os can fail and the error is signaled through rqd. Callers must
+ * handle the error accordingly.
+ */
+ blk_execute_rq(q, NULL, rq, 0);
+ if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
+ ret = -EINTR;
+
+ rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
+ rqd->error = nvme_req(rq)->status;
+
+ blk_mq_free_request(rq);
+
+ return ret;
+}
+
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
@@ -562,6 +603,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
.set_bb_tbl = nvme_nvm_set_bb_tbl,
.submit_io = nvme_nvm_submit_io,
+ .submit_io_sync = nvme_nvm_submit_io_sync,
.create_dma_pool = nvme_nvm_create_dma_pool,
.destroy_dma_pool = nvme_nvm_destroy_dma_pool,
@@ -600,8 +642,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
-
if (ppa_buf && ppa_len) {
ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
if (!ppa_list) {
@@ -691,7 +731,7 @@ static int nvme_nvm_submit_vio(struct nvme_ns *ns,
memset(&c, 0, sizeof(c));
c.ph_rw.opcode = vio.opcode;
- c.ph_rw.nsid = cpu_to_le32(ns->ns_id);
+ c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
c.ph_rw.control = cpu_to_le16(vio.control);
c.ph_rw.length = cpu_to_le16(vio.nppas);
@@ -728,7 +768,7 @@ static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
memset(&c, 0, sizeof(c));
c.common.opcode = vcmd.opcode;
- c.common.nsid = cpu_to_le32(ns->ns_id);
+ c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
/* cdw11-12 */
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
new file mode 100644
index 000000000000..78d92151a904
--- /dev/null
+++ b/drivers/nvme/host/multipath.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2017 Christoph Hellwig.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/moduleparam.h>
+#include "nvme.h"
+
+static bool multipath = true;
+module_param(multipath, bool, 0644);
+MODULE_PARM_DESC(multipath,
+ "turn on native support for multiple controllers per subsystem");
+
+void nvme_failover_req(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ns->head->requeue_lock, flags);
+ blk_steal_bios(&ns->head->requeue_list, req);
+ spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+ blk_mq_end_request(req, 0);
+
+ nvme_reset_ctrl(ns->ctrl);
+ kblockd_schedule_work(&ns->head->requeue_work);
+}
+
+bool nvme_req_needs_failover(struct request *req)
+{
+ if (!(req->cmd_flags & REQ_NVME_MPATH))
+ return false;
+
+ switch (nvme_req(req)->status & 0x7ff) {
+ /*
+ * Generic command status:
+ */
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
+ case NVME_SC_LBA_RANGE:
+ case NVME_SC_CAP_EXCEEDED:
+ case NVME_SC_RESERVATION_CONFLICT:
+ return false;
+
+ /*
+ * I/O command set specific error. Unfortunately these values are
+ * reused for fabrics commands, but those should never get here.
+ */
+ case NVME_SC_BAD_ATTRIBUTES:
+ case NVME_SC_INVALID_PI:
+ case NVME_SC_READ_ONLY:
+ case NVME_SC_ONCS_NOT_SUPPORTED:
+ WARN_ON_ONCE(nvme_req(req)->cmd->common.opcode ==
+ nvme_fabrics_command);
+ return false;
+
+ /*
+ * Media and Data Integrity Errors:
+ */
+ case NVME_SC_WRITE_FAULT:
+ case NVME_SC_READ_ERROR:
+ case NVME_SC_GUARD_CHECK:
+ case NVME_SC_APPTAG_CHECK:
+ case NVME_SC_REFTAG_CHECK:
+ case NVME_SC_COMPARE_FAILED:
+ case NVME_SC_ACCESS_DENIED:
+ case NVME_SC_UNWRITTEN_BLOCK:
+ return false;
+ }
+
+ /* Everything else could be a path failure, so should be retried */
+ return true;
+}
+
+void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+{
+ struct nvme_ns *ns;
+
+ mutex_lock(&ctrl->namespaces_mutex);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ if (ns->head->disk)
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
+ mutex_unlock(&ctrl->namespaces_mutex);
+}
+
+static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (ns->ctrl->state == NVME_CTRL_LIVE) {
+ rcu_assign_pointer(head->current_path, ns);
+ return ns;
+ }
+ }
+
+ return NULL;
+}
+
+inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
+
+ if (unlikely(!ns || ns->ctrl->state != NVME_CTRL_LIVE))
+ ns = __nvme_find_path(head);
+ return ns;
+}
+
+static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
+ struct bio *bio)
+{
+ struct nvme_ns_head *head = q->queuedata;
+ struct device *dev = disk_to_dev(head->disk);
+ struct nvme_ns *ns;
+ blk_qc_t ret = BLK_QC_T_NONE;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = nvme_find_path(head);
+ if (likely(ns)) {
+ bio->bi_disk = ns->disk;
+ bio->bi_opf |= REQ_NVME_MPATH;
+ ret = direct_make_request(bio);
+ } else if (!list_empty_careful(&head->list)) {
+ dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
+
+ spin_lock_irq(&head->requeue_lock);
+ bio_list_add(&head->requeue_list, bio);
+ spin_unlock_irq(&head->requeue_lock);
+ } else {
+ dev_warn_ratelimited(dev, "no path - failing I/O\n");
+
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ }
+
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
+
+static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
+{
+ struct nvme_ns_head *head = q->queuedata;
+ struct nvme_ns *ns;
+ bool found = false;
+ int srcu_idx;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ ns = srcu_dereference(head->current_path, &head->srcu);
+ if (likely(ns && ns->ctrl->state == NVME_CTRL_LIVE))
+ found = ns->queue->poll_fn(q, qc);
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return found;
+}
+
+static void nvme_requeue_work(struct work_struct *work)
+{
+ struct nvme_ns_head *head =
+ container_of(work, struct nvme_ns_head, requeue_work);
+ struct bio *bio, *next;
+
+ spin_lock_irq(&head->requeue_lock);
+ next = bio_list_get(&head->requeue_list);
+ spin_unlock_irq(&head->requeue_lock);
+
+ while ((bio = next) != NULL) {
+ next = bio->bi_next;
+ bio->bi_next = NULL;
+
+ /*
+ * Reset disk to the mpath node and resubmit to select a new
+ * path.
+ */
+ bio->bi_disk = head->disk;
+ generic_make_request(bio);
+ }
+}
+
+int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
+{
+ struct request_queue *q;
+ bool vwc = false;
+
+ bio_list_init(&head->requeue_list);
+ spin_lock_init(&head->requeue_lock);
+ INIT_WORK(&head->requeue_work, nvme_requeue_work);
+
+ /*
+ * Add a multipath node if the subsystems supports multiple controllers.
+ * We also do this for private namespaces as the namespace sharing data could
+ * change after a rescan.
+ */
+ if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
+ return 0;
+
+ q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
+ if (!q)
+ goto out;
+ q->queuedata = head;
+ blk_queue_make_request(q, nvme_ns_head_make_request);
+ q->poll_fn = nvme_ns_head_poll;
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ /* set to a default value for 512 until disk is validated */
+ blk_queue_logical_block_size(q, 512);
+
+ /* we need to propagate up the VMC settings */
+ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ vwc = true;
+ blk_queue_write_cache(q, vwc, vwc);
+
+ head->disk = alloc_disk(0);
+ if (!head->disk)
+ goto out_cleanup_queue;
+ head->disk->fops = &nvme_ns_head_ops;
+ head->disk->private_data = head;
+ head->disk->queue = q;
+ head->disk->flags = GENHD_FL_EXT_DEVT;
+ sprintf(head->disk->disk_name, "nvme%dn%d",
+ ctrl->subsys->instance, head->instance);
+ return 0;
+
+out_cleanup_queue:
+ blk_cleanup_queue(q);
+out:
+ return -ENOMEM;
+}
+
+void nvme_mpath_add_disk(struct nvme_ns_head *head)
+{
+ if (!head->disk)
+ return;
+ device_add_disk(&head->subsys->dev, head->disk);
+ if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
+ &nvme_ns_id_attr_group))
+ pr_warn("%s: failed to create sysfs group for identification\n",
+ head->disk->disk_name);
+}
+
+void nvme_mpath_add_disk_links(struct nvme_ns *ns)
+{
+ struct kobject *slave_disk_kobj, *holder_disk_kobj;
+
+ if (!ns->head->disk)
+ return;
+
+ slave_disk_kobj = &disk_to_dev(ns->disk)->kobj;
+ if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj,
+ kobject_name(slave_disk_kobj)))
+ return;
+
+ holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj;
+ if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj,
+ kobject_name(holder_disk_kobj)))
+ sysfs_remove_link(ns->head->disk->slave_dir,
+ kobject_name(slave_disk_kobj));
+}
+
+void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+ if (!head->disk)
+ return;
+ sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
+ &nvme_ns_id_attr_group);
+ del_gendisk(head->disk);
+ blk_set_queue_dying(head->disk->queue);
+ /* make sure all pending bios are cleaned up */
+ kblockd_schedule_work(&head->requeue_work);
+ flush_work(&head->requeue_work);
+ blk_cleanup_queue(head->disk->queue);
+ put_disk(head->disk);
+}
+
+void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
+{
+ if (!ns->head->disk)
+ return;
+
+ sysfs_remove_link(ns->disk->part0.holder_dir,
+ kobject_name(&disk_to_dev(ns->head->disk)->kobj));
+ sysfs_remove_link(ns->head->disk->slave_dir,
+ kobject_name(&disk_to_dev(ns->disk)->kobj));
+}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d3f3c4447515..c0873a68872f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -15,16 +15,17 @@
#define _NVME_H
#include <linux/nvme.h>
+#include <linux/cdev.h>
#include <linux/pci.h>
#include <linux/kref.h>
#include <linux/blk-mq.h>
#include <linux/lightnvm.h>
#include <linux/sed-opal.h>
-extern unsigned char nvme_io_timeout;
+extern unsigned int nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
-extern unsigned char admin_timeout;
+extern unsigned int admin_timeout;
#define ADMIN_TIMEOUT (admin_timeout * HZ)
#define NVME_DEFAULT_KATO 5
@@ -94,6 +95,11 @@ struct nvme_request {
u16 status;
};
+/*
+ * Mark a bio as coming in through the mpath node.
+ */
+#define REQ_NVME_MPATH REQ_DRV
+
enum {
NVME_REQ_CANCELLED = (1 << 0),
};
@@ -127,24 +133,23 @@ struct nvme_ctrl {
struct request_queue *admin_q;
struct request_queue *connect_q;
struct device *dev;
- struct kref kref;
int instance;
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
struct mutex namespaces_mutex;
+ struct device ctrl_device;
struct device *device; /* char device */
- struct list_head node;
- struct ida ns_ida;
+ struct cdev cdev;
struct work_struct reset_work;
+ struct work_struct delete_work;
+
+ struct nvme_subsystem *subsys;
+ struct list_head subsys_entry;
struct opal_dev *opal_dev;
char name[12];
- char serial[20];
- char model[40];
- char firmware_rev[8];
- char subnqn[NVMF_NQN_SIZE];
u16 cntlid;
u32 ctrl_config;
@@ -155,23 +160,23 @@ struct nvme_ctrl {
u32 page_size;
u32 max_hw_sectors;
u16 oncs;
- u16 vid;
u16 oacs;
u16 nssa;
u16 nr_streams;
atomic_t abort_limit;
- u8 event_limit;
u8 vwc;
u32 vs;
u32 sgls;
u16 kas;
u8 npss;
u8 apsta;
+ u32 aen_result;
unsigned int shutdown_timeout;
unsigned int kato;
bool subsystem;
unsigned long quirks;
struct nvme_id_power_state psd[32];
+ struct nvme_effects_log *effects;
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
@@ -197,21 +202,72 @@ struct nvme_ctrl {
struct nvmf_ctrl_options *opts;
};
+struct nvme_subsystem {
+ int instance;
+ struct device dev;
+ /*
+ * Because we unregister the device on the last put we need
+ * a separate refcount.
+ */
+ struct kref ref;
+ struct list_head entry;
+ struct mutex lock;
+ struct list_head ctrls;
+ struct list_head nsheads;
+ char subnqn[NVMF_NQN_SIZE];
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
+ u8 cmic;
+ u16 vendor_id;
+ struct ida ns_ida;
+};
+
+/*
+ * Container structure for uniqueue namespace identifiers.
+ */
+struct nvme_ns_ids {
+ u8 eui64[8];
+ u8 nguid[16];
+ uuid_t uuid;
+};
+
+/*
+ * Anchor structure for namespaces. There is one for each namespace in a
+ * NVMe subsystem that any of our controllers can see, and the namespace
+ * structure for each controller is chained of it. For private namespaces
+ * there is a 1:1 relation to our namespace structures, that is ->list
+ * only ever has a single entry for private namespaces.
+ */
+struct nvme_ns_head {
+#ifdef CONFIG_NVME_MULTIPATH
+ struct gendisk *disk;
+ struct nvme_ns __rcu *current_path;
+ struct bio_list requeue_list;
+ spinlock_t requeue_lock;
+ struct work_struct requeue_work;
+#endif
+ struct list_head list;
+ struct srcu_struct srcu;
+ struct nvme_subsystem *subsys;
+ unsigned ns_id;
+ struct nvme_ns_ids ids;
+ struct list_head entry;
+ struct kref ref;
+ int instance;
+};
+
struct nvme_ns {
struct list_head list;
struct nvme_ctrl *ctrl;
struct request_queue *queue;
struct gendisk *disk;
+ struct list_head siblings;
struct nvm_dev *ndev;
struct kref kref;
- int instance;
+ struct nvme_ns_head *head;
- u8 eui[8];
- u8 nguid[16];
- uuid_t uuid;
-
- unsigned ns_id;
int lba_shift;
u16 ms;
u16 sgs;
@@ -234,9 +290,10 @@ struct nvme_ctrl_ops {
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
- void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
- int (*delete_ctrl)(struct nvme_ctrl *ctrl);
+ void (*submit_async_event)(struct nvme_ctrl *ctrl);
+ void (*delete_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
+ int (*reinit_request)(void *data, struct request *rq);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
@@ -278,6 +335,16 @@ static inline void nvme_end_request(struct request *req, __le16 status,
blk_mq_complete_request(req);
}
+static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
+{
+ get_device(ctrl->device);
+}
+
+static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
+{
+ put_device(ctrl->device);
+}
+
void nvme_complete_rq(struct request *req);
void nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
@@ -299,10 +366,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
bool send);
-#define NVME_NR_AERS 1
void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
union nvme_result *res);
-void nvme_queue_async_events(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
@@ -311,21 +376,79 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
+int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
- struct nvme_command *cmd, unsigned int flags, int qid);
+ struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
- unsigned timeout, int qid, int at_head, int flags);
+ unsigned timeout, int qid, int at_head,
+ blk_mq_req_flags_t flags);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
+int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
+int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
+
+extern const struct attribute_group nvme_ns_id_attr_group;
+extern const struct block_device_operations nvme_ns_head_ops;
+
+#ifdef CONFIG_NVME_MULTIPATH
+void nvme_failover_req(struct request *req);
+bool nvme_req_needs_failover(struct request *req);
+void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
+int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
+void nvme_mpath_add_disk(struct nvme_ns_head *head);
+void nvme_mpath_add_disk_links(struct nvme_ns *ns);
+void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+void nvme_mpath_remove_disk_links(struct nvme_ns *ns);
+
+static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+
+ if (head && ns == srcu_dereference(head->current_path, &head->srcu))
+ rcu_assign_pointer(head->current_path, NULL);
+}
+struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+#else
+static inline void nvme_failover_req(struct request *req)
+{
+}
+static inline bool nvme_req_needs_failover(struct request *req)
+{
+ return false;
+}
+static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+{
+}
+static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head)
+{
+ return 0;
+}
+static inline void nvme_mpath_add_disk(struct nvme_ns_head *head)
+{
+}
+static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+{
+}
+static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns)
+{
+}
+static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
+{
+}
+static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+}
+#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3f5a04c586ce..a11cfd470089 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -13,7 +13,6 @@
*/
#include <linux/aer.h>
-#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
@@ -26,12 +25,9 @@
#include <linux/mutex.h>
#include <linux/once.h>
#include <linux/pci.h>
-#include <linux/poison.h>
#include <linux/t10-pi.h>
-#include <linux/timer.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <asm/unaligned.h>
#include <linux/sed-opal.h>
#include "nvme.h"
@@ -39,11 +35,7 @@
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AERS)
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
@@ -57,6 +49,12 @@ module_param(max_host_mem_size_mb, uint, 0444);
MODULE_PARM_DESC(max_host_mem_size_mb,
"Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
+static unsigned int sgl_threshold = SZ_32K;
+module_param(sgl_threshold, uint, 0644);
+MODULE_PARM_DESC(sgl_threshold,
+ "Use SGLs when average request segment size is larger or equal to "
+ "this size. Use 0 to disable SGLs.");
+
static int io_queue_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops io_queue_depth_ops = {
.set = io_queue_depth_set,
@@ -178,6 +176,7 @@ struct nvme_queue {
struct nvme_iod {
struct nvme_request req;
struct nvme_queue *nvmeq;
+ bool use_sgl;
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
int nents; /* Used in scatterlist */
@@ -331,17 +330,35 @@ static int nvme_npages(unsigned size, struct nvme_dev *dev)
return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}
-static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev,
- unsigned int size, unsigned int nseg)
+/*
+ * Calculates the number of pages needed for the SGL segments. For example a 4k
+ * page can accommodate 256 SGL descriptors.
+ */
+static int nvme_pci_npages_sgl(unsigned int num_seg)
{
- return sizeof(__le64 *) * nvme_npages(size, dev) +
- sizeof(struct scatterlist) * nseg;
+ return DIV_ROUND_UP(num_seg * sizeof(struct nvme_sgl_desc), PAGE_SIZE);
}
-static unsigned int nvme_cmd_size(struct nvme_dev *dev)
+static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
+ unsigned int size, unsigned int nseg, bool use_sgl)
{
- return sizeof(struct nvme_iod) +
- nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
+ size_t alloc_size;
+
+ if (use_sgl)
+ alloc_size = sizeof(__le64 *) * nvme_pci_npages_sgl(nseg);
+ else
+ alloc_size = sizeof(__le64 *) * nvme_npages(size, dev);
+
+ return alloc_size + sizeof(struct scatterlist) * nseg;
+}
+
+static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl)
+{
+ unsigned int alloc_size = nvme_pci_iod_alloc_size(dev,
+ NVME_INT_BYTES(dev), NVME_INT_PAGES,
+ use_sgl);
+
+ return sizeof(struct nvme_iod) + alloc_size;
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -425,10 +442,10 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
nvmeq->sq_tail = tail;
}
-static __le64 **iod_list(struct request *req)
+static void **nvme_pci_iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
+ return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
}
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
@@ -438,7 +455,10 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
unsigned int size = blk_rq_payload_bytes(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
- iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
+ size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
+ iod->use_sgl);
+
+ iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
if (!iod->sg)
return BLK_STS_RESOURCE;
} else {
@@ -456,18 +476,31 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- const int last_prp = dev->ctrl.page_size / 8 - 1;
+ const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+
int i;
- __le64 **list = iod_list(req);
- dma_addr_t prp_dma = iod->first_dma;
if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
+ dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ dma_addr);
+
for (i = 0; i < iod->npages; i++) {
- __le64 *prp_list = list[i];
- dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
- dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
- prp_dma = next_prp_dma;
+ void *addr = nvme_pci_iod_list(req)[i];
+
+ if (iod->use_sgl) {
+ struct nvme_sgl_desc *sg_list = addr;
+
+ next_dma_addr =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ __le64 *prp_list = addr;
+
+ next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(dev->prp_page_pool, addr, dma_addr);
+ dma_addr = next_dma_addr;
}
if (iod->sg != iod->inline_sg)
@@ -555,7 +588,8 @@ static void nvme_print_sgl(struct scatterlist *sgl, int nents)
}
}
-static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
@@ -566,14 +600,16 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
u32 page_size = dev->ctrl.page_size;
int offset = dma_addr & (page_size - 1);
__le64 *prp_list;
- __le64 **list = iod_list(req);
+ void **list = nvme_pci_iod_list(req);
dma_addr_t prp_dma;
int nprps, i;
+ iod->use_sgl = false;
+
length -= (page_size - offset);
if (length <= 0) {
iod->first_dma = 0;
- return BLK_STS_OK;
+ goto done;
}
dma_len -= (page_size - offset);
@@ -587,7 +623,7 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
if (length <= page_size) {
iod->first_dma = dma_addr;
- return BLK_STS_OK;
+ goto done;
}
nprps = DIV_ROUND_UP(length, page_size);
@@ -634,6 +670,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
dma_len = sg_dma_len(sg);
}
+done:
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+
return BLK_STS_OK;
bad_sgl:
@@ -643,6 +683,110 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
return BLK_STS_IOERR;
}
+static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
+ struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = NVME_SGL_FMT_DATA_DESC << 4;
+}
+
+static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
+ dma_addr_t dma_addr, int entries)
+{
+ sge->addr = cpu_to_le64(dma_addr);
+ if (entries < SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = NVME_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmd)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ int length = blk_rq_payload_bytes(req);
+ struct dma_pool *pool;
+ struct nvme_sgl_desc *sg_list;
+ struct scatterlist *sg = iod->sg;
+ int entries = iod->nents, i = 0;
+ dma_addr_t sgl_dma;
+
+ iod->use_sgl = true;
+
+ /* setting the transfer type as SGL */
+ cmd->flags = NVME_CMD_SGL_METABUF;
+
+ if (length == sg_dma_len(sg)) {
+ nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
+ return BLK_STS_OK;
+ }
+
+ if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
+ pool = dev->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ iod->npages = -1;
+ return BLK_STS_RESOURCE;
+ }
+
+ nvme_pci_iod_list(req)[0] = sg_list;
+ iod->first_dma = sgl_dma;
+
+ nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
+
+ do {
+ if (i == SGES_PER_PAGE) {
+ struct nvme_sgl_desc *old_sg_desc = sg_list;
+ struct nvme_sgl_desc *link = &old_sg_desc[i - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list)
+ return BLK_STS_RESOURCE;
+
+ i = 0;
+ nvme_pci_iod_list(req)[iod->npages++] = sg_list;
+ sg_list[i++] = *link;
+ nvme_pci_sgl_set_seg(link, sgl_dma, entries);
+ }
+
+ nvme_pci_sgl_set_data(&sg_list[i++], sg);
+
+ length -= sg_dma_len(sg);
+ sg = sg_next(sg);
+ entries--;
+ } while (length > 0);
+
+ WARN_ON(entries > 0);
+ return BLK_STS_OK;
+}
+
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int avg_seg_size;
+
+ avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
+ blk_rq_nr_phys_segments(req));
+
+ if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
+ return false;
+ if (!iod->nvmeq->qid)
+ return false;
+ if (!sgl_threshold || avg_seg_size < sgl_threshold)
+ return false;
+ return true;
+}
+
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
@@ -662,7 +806,11 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_ATTR_NO_WARN))
goto out;
- ret = nvme_setup_prps(dev, req);
+ if (nvme_pci_use_sgls(dev, req))
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
+ else
+ ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
+
if (ret != BLK_STS_OK)
goto out_unmap;
@@ -682,8 +830,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out_unmap;
}
- cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
- cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
if (blk_integrity_rq(req))
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
return BLK_STS_OK;
@@ -804,7 +950,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
* for them but rather special case them here.
*/
if (unlikely(nvmeq->qid == 0 &&
- cqe->command_id >= NVME_AQ_BLKMQ_DEPTH)) {
+ cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
@@ -897,7 +1043,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return __nvme_poll(nvmeq, tag);
}
-static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
+static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = dev->queues[0];
@@ -905,7 +1051,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
- c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
+ c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
spin_lock_irq(&nvmeq->q_lock);
__nvme_submit_cmd(nvmeq, &c);
@@ -930,7 +1076,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
/*
- * Note: we (ab)use the fact the the prp fields survive if no data
+ * Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
memset(&c, 0, sizeof(c));
@@ -951,7 +1097,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
int flags = NVME_QUEUE_PHYS_CONTIG;
/*
- * Note: we (ab)use the fact the the prp fields survive if no data
+ * Note: we (ab)use the fact that the prp fields survive if no data
* is attached to the request.
*/
memset(&c, 0, sizeof(c));
@@ -1372,14 +1518,10 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.ops = &nvme_mq_admin_ops;
dev->admin_tagset.nr_hw_queues = 1;
- /*
- * Subtract one to leave an empty queue entry for 'Full Queue'
- * condition. See NVM-Express 1.2 specification, section 4.1.2.
- */
- dev->admin_tagset.queue_depth = NVME_AQ_BLKMQ_DEPTH - 1;
+ dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(dev->dev);
- dev->admin_tagset.cmd_size = nvme_cmd_size(dev);
+ dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev;
@@ -1906,7 +2048,11 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth =
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = nvme_cmd_size(dev);
+ dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
+ if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
+ dev->tagset.cmd_size = max(dev->tagset.cmd_size,
+ nvme_pci_cmd_size(dev, true));
+ }
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
@@ -2132,9 +2278,9 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
{
dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
- kref_get(&dev->ctrl.kref);
+ nvme_get_ctrl(&dev->ctrl);
nvme_dev_disable(dev, false);
- if (!schedule_work(&dev->remove_work))
+ if (!queue_work(nvme_wq, &dev->remove_work))
nvme_put_ctrl(&dev->ctrl);
}
@@ -2557,6 +2703,7 @@ static int __init nvme_init(void)
static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
+ flush_workqueue(nvme_wq);
_nvme_check_size();
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 87bac27ec64b..4f9bf2f815c3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -41,17 +41,9 @@
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_RDMA_NR_AEN_COMMANDS 1
-#define NVME_RDMA_AQ_BLKMQ_DEPTH \
- (NVME_AQ_DEPTH - NVME_RDMA_NR_AEN_COMMANDS)
-
struct nvme_rdma_device {
- struct ib_device *dev;
- struct ib_pd *pd;
+ struct ib_device *dev;
+ struct ib_pd *pd;
struct kref ref;
struct list_head entry;
};
@@ -79,8 +71,8 @@ struct nvme_rdma_request {
};
enum nvme_rdma_queue_flags {
- NVME_RDMA_Q_LIVE = 0,
- NVME_RDMA_Q_DELETING = 1,
+ NVME_RDMA_Q_ALLOCATED = 0,
+ NVME_RDMA_Q_LIVE = 1,
};
struct nvme_rdma_queue {
@@ -105,7 +97,6 @@ struct nvme_rdma_ctrl {
/* other member variables */
struct blk_mq_tag_set tag_set;
- struct work_struct delete_work;
struct work_struct err_work;
struct nvme_rdma_qe async_event_sqe;
@@ -274,6 +265,9 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int ret = 0;
+ if (WARN_ON_ONCE(!req->mr))
+ return 0;
+
ib_dereg_mr(req->mr);
req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
@@ -434,11 +428,9 @@ out_err:
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
- struct nvme_rdma_device *dev;
- struct ib_device *ibdev;
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
- dev = queue->device;
- ibdev = dev->dev;
rdma_destroy_qp(queue->cm_id);
ib_free_cq(queue->ib_cq);
@@ -493,7 +485,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
return 0;
out_destroy_qp:
- ib_destroy_qp(queue->qp);
+ rdma_destroy_qp(queue->cm_id);
out_destroy_ib_cq:
ib_free_cq(queue->ib_cq);
out_put_dev:
@@ -544,11 +536,11 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
ret = nvme_rdma_wait_for_cm(queue);
if (ret) {
dev_info(ctrl->ctrl.device,
- "rdma_resolve_addr wait failed (%d).\n", ret);
+ "rdma connection establishment failed (%d)\n", ret);
goto out_destroy_cm_id;
}
- clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
+ set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
return 0;
@@ -568,7 +560,7 @@ static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
{
- if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
+ if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
if (nvme_rdma_queue_idx(queue) == 0) {
@@ -676,11 +668,10 @@ out_free_queues:
return ret;
}
-static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl, bool admin)
+static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl,
+ struct blk_mq_tag_set *set)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- struct blk_mq_tag_set *set = admin ?
- &ctrl->admin_tag_set : &ctrl->tag_set;
blk_mq_free_tag_set(set);
nvme_rdma_dev_put(ctrl->device);
@@ -697,7 +688,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set = &ctrl->admin_tag_set;
memset(set, 0, sizeof(*set));
set->ops = &nvme_rdma_admin_mq_ops;
- set->queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
set->reserved_tags = 2; /* connect + keep-alive */
set->numa_node = NUMA_NO_NODE;
set->cmd_size = sizeof(struct nvme_rdma_request) +
@@ -705,6 +696,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->driver_data = ctrl;
set->nr_hw_queues = 1;
set->timeout = ADMIN_TIMEOUT;
+ set->flags = BLK_MQ_F_NO_SCHED;
} else {
set = &ctrl->tag_set;
memset(set, 0, sizeof(*set));
@@ -748,7 +740,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, true);
+ nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
}
nvme_rdma_free_queue(&ctrl->queues[0]);
}
@@ -780,8 +772,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_tagset;
}
} else {
- error = blk_mq_reinit_tagset(&ctrl->admin_tag_set,
- nvme_rdma_reinit_request);
+ error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
if (error)
goto out_free_queue;
}
@@ -825,7 +816,7 @@ out_cleanup_queue:
blk_cleanup_queue(ctrl->ctrl.admin_q);
out_free_tagset:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, true);
+ nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;
@@ -837,7 +828,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_rdma_stop_io_queues(ctrl);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.connect_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, false);
+ nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
}
nvme_rdma_free_io_queues(ctrl);
}
@@ -863,8 +854,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_free_tag_set;
}
} else {
- ret = blk_mq_reinit_tagset(&ctrl->tag_set,
- nvme_rdma_reinit_request);
+ ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
if (ret)
goto out_free_io_queues;
@@ -883,7 +873,7 @@ out_cleanup_connect_q:
blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, false);
+ nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
out_free_io_queues:
nvme_rdma_free_io_queues(ctrl);
return ret;
@@ -922,7 +912,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
ctrl->ctrl.opts->reconnect_delay * HZ);
} else {
dev_info(ctrl->ctrl.device, "Removing controller...\n");
- queue_work(nvme_wq, &ctrl->delete_work);
+ nvme_delete_ctrl(&ctrl->ctrl);
}
}
@@ -935,10 +925,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
++ctrl->ctrl.nr_reconnects;
- if (ctrl->ctrl.queue_count > 1)
- nvme_rdma_destroy_io_queues(ctrl, false);
-
- nvme_rdma_destroy_admin_queue(ctrl, false);
ret = nvme_rdma_configure_admin_queue(ctrl, false);
if (ret)
goto requeue;
@@ -946,7 +932,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
if (ctrl->ctrl.queue_count > 1) {
ret = nvme_rdma_configure_io_queues(ctrl, false);
if (ret)
- goto requeue;
+ goto destroy_admin;
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -956,14 +942,17 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
return;
}
- ctrl->ctrl.nr_reconnects = 0;
-
nvme_start_ctrl(&ctrl->ctrl);
- dev_info(ctrl->ctrl.device, "Successfully reconnected\n");
+ dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
+ ctrl->ctrl.nr_reconnects);
+
+ ctrl->ctrl.nr_reconnects = 0;
return;
+destroy_admin:
+ nvme_rdma_destroy_admin_queue(ctrl, false);
requeue:
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
ctrl->ctrl.nr_reconnects);
@@ -979,17 +968,15 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
- nvme_rdma_stop_io_queues(ctrl);
- }
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- nvme_rdma_stop_queue(&ctrl->queues[0]);
-
- /* We must take care of fastfail/requeue all our inflight requests */
- if (ctrl->ctrl.queue_count > 1)
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_destroy_io_queues(ctrl, false);
+ }
+
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
+ nvme_rdma_destroy_admin_queue(ctrl, false);
/*
* queues are not a live anymore, so restart the queues to fail fast
@@ -1065,7 +1052,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
if (!blk_rq_bytes(rq))
return;
- if (req->mr->need_inval) {
+ if (req->mr->need_inval && test_bit(NVME_RDMA_Q_LIVE, &req->queue->flags)) {
res = nvme_rdma_inv_rkey(queue, req);
if (unlikely(res < 0)) {
dev_err(ctrl->ctrl.device,
@@ -1314,7 +1301,7 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
return queue->ctrl->tag_set.tags[queue_idx - 1];
}
-static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
struct nvme_rdma_queue *queue = &ctrl->queues[0];
@@ -1324,14 +1311,11 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
struct ib_sge sge;
int ret;
- if (WARN_ON_ONCE(aer_idx != 0))
- return;
-
ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
memset(cmd, 0, sizeof(*cmd));
cmd->common.opcode = nvme_admin_async_event;
- cmd->common.command_id = NVME_RDMA_AQ_BLKMQ_DEPTH;
+ cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
cmd->common.flags |= NVME_CMD_SGL_METABUF;
nvme_rdma_set_sg_null(cmd);
@@ -1393,7 +1377,7 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
* for them but rather special case them here.
*/
if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
+ cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
@@ -1590,6 +1574,10 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ dev_warn(req->queue->ctrl->ctrl.device,
+ "I/O %d QID %d timeout, reset controller\n",
+ rq->tag, nvme_rdma_queue_idx(req->queue));
+
/* queue error recovery */
nvme_rdma_error_recovery(req->queue->ctrl);
@@ -1614,12 +1602,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
/*
* reconnecting state means transport disruption, which
* can take a long time and even might fail permanently,
- * so we can't let incoming I/O be requeued forever.
- * fail it fast to allow upper layers a chance to
- * failover.
+ * fail fast to give upper layers a chance to failover.
+ * deleting state means that the ctrl will never accept
+ * commands again, fail it permanently.
*/
- if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
+ if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
+ queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
+ }
return BLK_STS_RESOURCE; /* try again later */
}
}
@@ -1764,50 +1755,9 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
nvme_rdma_destroy_admin_queue(ctrl, shutdown);
}
-static void nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl)
+static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
{
- nvme_remove_namespaces(&ctrl->ctrl);
- nvme_rdma_shutdown_ctrl(ctrl, true);
- nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
-}
-
-static void nvme_rdma_del_ctrl_work(struct work_struct *work)
-{
- struct nvme_rdma_ctrl *ctrl = container_of(work,
- struct nvme_rdma_ctrl, delete_work);
-
- nvme_stop_ctrl(&ctrl->ctrl);
- nvme_rdma_remove_ctrl(ctrl);
-}
-
-static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
-{
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
- return -EBUSY;
-
- if (!queue_work(nvme_wq, &ctrl->delete_work))
- return -EBUSY;
-
- return 0;
-}
-
-static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
-{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- int ret = 0;
-
- /*
- * Keep a reference until all work is flushed since
- * __nvme_rdma_del_ctrl can free the ctrl mem
- */
- if (!kref_get_unless_zero(&ctrl->ctrl.kref))
- return -EBUSY;
- ret = __nvme_rdma_del_ctrl(ctrl);
- if (!ret)
- flush_work(&ctrl->delete_work);
- nvme_put_ctrl(&ctrl->ctrl);
- return ret;
+ nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
}
static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
@@ -1831,7 +1781,11 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
}
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
- WARN_ON_ONCE(!changed);
+ if (!changed) {
+ /* state change failure is ok if we're in DELETING state */
+ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
+ return;
+ }
nvme_start_ctrl(&ctrl->ctrl);
@@ -1839,7 +1793,10 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
out_fail:
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
- nvme_rdma_remove_ctrl(ctrl);
+ nvme_remove_namespaces(&ctrl->ctrl);
+ nvme_rdma_shutdown_ctrl(ctrl, true);
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
}
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -1851,10 +1808,88 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.reg_write32 = nvmf_reg_write32,
.free_ctrl = nvme_rdma_free_ctrl,
.submit_async_event = nvme_rdma_submit_async_event,
- .delete_ctrl = nvme_rdma_del_ctrl,
+ .delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
+ .reinit_request = nvme_rdma_reinit_request,
};
+static inline bool
+__nvme_rdma_options_match(struct nvme_rdma_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts)
+{
+ char *stdport = __stringify(NVME_RDMA_IP_PORT);
+
+
+ if (!nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts) ||
+ strcmp(opts->traddr, ctrl->ctrl.opts->traddr))
+ return false;
+
+ if (opts->mask & NVMF_OPT_TRSVCID &&
+ ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
+ if (strcmp(opts->trsvcid, ctrl->ctrl.opts->trsvcid))
+ return false;
+ } else if (opts->mask & NVMF_OPT_TRSVCID) {
+ if (strcmp(opts->trsvcid, stdport))
+ return false;
+ } else if (ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
+ if (strcmp(stdport, ctrl->ctrl.opts->trsvcid))
+ return false;
+ }
+ /* else, it's a match as both have stdport. Fall to next checks */
+
+ /*
+ * checking the local address is rough. In most cases, one
+ * is not specified and the host port is selected by the stack.
+ *
+ * Assume no match if:
+ * local address is specified and address is not the same
+ * local address is not specified but remote is, or vice versa
+ * (admin using specific host_traddr when it matters).
+ */
+ if (opts->mask & NVMF_OPT_HOST_TRADDR &&
+ ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
+ if (strcmp(opts->host_traddr, ctrl->ctrl.opts->host_traddr))
+ return false;
+ } else if (opts->mask & NVMF_OPT_HOST_TRADDR ||
+ ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
+ return false;
+ /*
+ * if neither controller had an host port specified, assume it's
+ * a match as everything else matched.
+ */
+
+ return true;
+}
+
+/*
+ * Fails a connection request if it matches an existing controller
+ * (association) with the same tuple:
+ * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN>
+ *
+ * if local address is not specified in the request, it will match an
+ * existing controller with all the other parameters the same and no
+ * local port address specified as well.
+ *
+ * The ports don't need to be compared as they are intrinsically
+ * already matched by the port pointers supplied.
+ */
+static bool
+nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
+{
+ struct nvme_rdma_ctrl *ctrl;
+ bool found = false;
+
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+ found = __nvme_rdma_options_match(ctrl, opts);
+ if (found)
+ break;
+ }
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ return found;
+}
+
static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
struct nvmf_ctrl_options *opts)
{
@@ -1891,6 +1926,11 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
}
}
+ if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) {
+ ret = -EALREADY;
+ goto out_free_ctrl;
+ }
+
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
0 /* no quirks, we're perfect! */);
if (ret)
@@ -1899,7 +1939,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
INIT_DELAYED_WORK(&ctrl->reconnect_work,
nvme_rdma_reconnect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
- INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
@@ -1958,7 +1997,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
- kref_get(&ctrl->ctrl.kref);
+ nvme_get_ctrl(&ctrl->ctrl);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
@@ -2003,7 +2042,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
dev_info(ctrl->ctrl.device,
"Removing ctrl: NQN \"%s\", addr %pISp\n",
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
- __nvme_rdma_del_ctrl(ctrl);
+ nvme_delete_ctrl(&ctrl->ctrl);
}
mutex_unlock(&nvme_rdma_ctrl_mutex);
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index fecc14f535b2..488250189c99 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NVME_TARGET) += nvmet.o
obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index c4a0bf36e752..90dcdc40ac71 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -35,17 +35,14 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
- u16 status;
struct nvmet_ns *ns;
u64 host_reads, host_writes, data_units_read, data_units_written;
- status = NVME_SC_SUCCESS;
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
if (!ns) {
- status = NVME_SC_INVALID_NS;
pr_err("nvmet : Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid));
- goto out;
+ return NVME_SC_INVALID_NS;
}
host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
@@ -58,20 +55,18 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
put_unaligned_le64(host_writes, &slog->host_writes[0]);
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
nvmet_put_namespace(ns);
-out:
- return status;
+
+ return NVME_SC_SUCCESS;
}
static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
- u16 status;
u64 host_reads = 0, host_writes = 0;
u64 data_units_read = 0, data_units_written = 0;
struct nvmet_ns *ns;
struct nvmet_ctrl *ctrl;
- status = NVME_SC_SUCCESS;
ctrl = req->sq->ctrl;
rcu_read_lock();
@@ -91,7 +86,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
put_unaligned_le64(host_writes, &slog->host_writes[0]);
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
- return status;
+ return NVME_SC_SUCCESS;
}
static u16 nvmet_get_smart_log(struct nvmet_req *req,
@@ -144,10 +139,8 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
}
smart_log = buf;
status = nvmet_get_smart_log(req, smart_log);
- if (status) {
- memset(buf, '\0', data_len);
+ if (status)
goto err;
- }
break;
case NVME_LOG_FW_SLOT:
/*
@@ -300,7 +293,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
}
/*
- * nuse = ncap = nsze isn't aways true, but we have no way to find
+ * nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
*/
id->ncap = id->nuse = id->nsze =
@@ -424,7 +417,7 @@ out:
}
/*
- * A "mimimum viable" abort implementation: the command is mandatory in the
+ * A "minimum viable" abort implementation: the command is mandatory in the
* spec, but we are not required to do any useful work. We couldn't really
* do a useful abort, so don't bother even with waiting for the command
* to be exectuted and return immediately telling the command to abort
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index b6aeb1d70951..e6b2d2af81b6 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -20,8 +20,8 @@
#include "nvmet.h"
-static struct config_item_type nvmet_host_type;
-static struct config_item_type nvmet_subsys_type;
+static const struct config_item_type nvmet_host_type;
+static const struct config_item_type nvmet_subsys_type;
/*
* nvmet_port Generic ConfigFS definitions.
@@ -425,7 +425,7 @@ static struct configfs_item_operations nvmet_ns_item_ops = {
.release = nvmet_ns_release,
};
-static struct config_item_type nvmet_ns_type = {
+static const struct config_item_type nvmet_ns_type = {
.ct_item_ops = &nvmet_ns_item_ops,
.ct_attrs = nvmet_ns_attrs,
.ct_owner = THIS_MODULE,
@@ -464,7 +464,7 @@ static struct configfs_group_operations nvmet_namespaces_group_ops = {
.make_group = nvmet_ns_make,
};
-static struct config_item_type nvmet_namespaces_type = {
+static const struct config_item_type nvmet_namespaces_type = {
.ct_group_ops = &nvmet_namespaces_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -540,7 +540,7 @@ static struct configfs_item_operations nvmet_port_subsys_item_ops = {
.drop_link = nvmet_port_subsys_drop_link,
};
-static struct config_item_type nvmet_port_subsys_type = {
+static const struct config_item_type nvmet_port_subsys_type = {
.ct_item_ops = &nvmet_port_subsys_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -613,7 +613,7 @@ static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
.drop_link = nvmet_allowed_hosts_drop_link,
};
-static struct config_item_type nvmet_allowed_hosts_type = {
+static const struct config_item_type nvmet_allowed_hosts_type = {
.ct_item_ops = &nvmet_allowed_hosts_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -729,7 +729,7 @@ static struct configfs_item_operations nvmet_subsys_item_ops = {
.release = nvmet_subsys_release,
};
-static struct config_item_type nvmet_subsys_type = {
+static const struct config_item_type nvmet_subsys_type = {
.ct_item_ops = &nvmet_subsys_item_ops,
.ct_attrs = nvmet_subsys_attrs,
.ct_owner = THIS_MODULE,
@@ -767,7 +767,7 @@ static struct configfs_group_operations nvmet_subsystems_group_ops = {
.make_group = nvmet_subsys_make,
};
-static struct config_item_type nvmet_subsystems_type = {
+static const struct config_item_type nvmet_subsystems_type = {
.ct_group_ops = &nvmet_subsystems_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -827,7 +827,7 @@ static struct configfs_item_operations nvmet_referral_item_ops = {
.release = nvmet_referral_release,
};
-static struct config_item_type nvmet_referral_type = {
+static const struct config_item_type nvmet_referral_type = {
.ct_owner = THIS_MODULE,
.ct_attrs = nvmet_referral_attrs,
.ct_item_ops = &nvmet_referral_item_ops,
@@ -852,7 +852,7 @@ static struct configfs_group_operations nvmet_referral_group_ops = {
.make_group = nvmet_referral_make,
};
-static struct config_item_type nvmet_referrals_type = {
+static const struct config_item_type nvmet_referrals_type = {
.ct_owner = THIS_MODULE,
.ct_group_ops = &nvmet_referral_group_ops,
};
@@ -880,7 +880,7 @@ static struct configfs_item_operations nvmet_port_item_ops = {
.release = nvmet_port_release,
};
-static struct config_item_type nvmet_port_type = {
+static const struct config_item_type nvmet_port_type = {
.ct_attrs = nvmet_port_attrs,
.ct_item_ops = &nvmet_port_item_ops,
.ct_owner = THIS_MODULE,
@@ -921,7 +921,7 @@ static struct configfs_group_operations nvmet_ports_group_ops = {
.make_group = nvmet_ports_make,
};
-static struct config_item_type nvmet_ports_type = {
+static const struct config_item_type nvmet_ports_type = {
.ct_group_ops = &nvmet_ports_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -940,7 +940,7 @@ static struct configfs_item_operations nvmet_host_item_ops = {
.release = nvmet_host_release,
};
-static struct config_item_type nvmet_host_type = {
+static const struct config_item_type nvmet_host_type = {
.ct_item_ops = &nvmet_host_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -963,14 +963,14 @@ static struct configfs_group_operations nvmet_hosts_group_ops = {
.make_group = nvmet_hosts_make_group,
};
-static struct config_item_type nvmet_hosts_type = {
+static const struct config_item_type nvmet_hosts_type = {
.ct_group_ops = &nvmet_hosts_group_ops,
.ct_owner = THIS_MODULE,
};
static struct config_group nvmet_hosts_group;
-static struct config_item_type nvmet_root_type = {
+static const struct config_item_type nvmet_root_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 645ba7eee35d..b54748ad5f48 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -57,6 +57,17 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
return 0;
}
+static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
+{
+ struct nvmet_ns *ns;
+
+ if (list_empty(&subsys->namespaces))
+ return 0;
+
+ ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
+ return ns->nsid;
+}
+
static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
{
return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
@@ -334,6 +345,8 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
ns->enabled = false;
list_del_rcu(&ns->dev_link);
+ if (ns->nsid == subsys->max_nsid)
+ subsys->max_nsid = nvmet_max_nsid(subsys);
mutex_unlock(&subsys->lock);
/*
@@ -497,6 +510,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->ops = ops;
req->sg = NULL;
req->sg_cnt = 0;
+ req->transfer_len = 0;
req->rsp->status = 0;
/* no support for fused commands yet */
@@ -546,6 +560,15 @@ void nvmet_req_uninit(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
+void nvmet_req_execute(struct nvmet_req *req)
+{
+ if (unlikely(req->data_len != req->transfer_len))
+ nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
+ else
+ req->execute(req);
+}
+EXPORT_SYMBOL_GPL(nvmet_req_execute);
+
static inline bool nvmet_cc_en(u32 cc)
{
return (cc >> NVME_CC_EN_SHIFT) & 0x1;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 58e010bdda3e..664d3013f68f 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -76,7 +76,6 @@ struct nvmet_fc_fcp_iod {
dma_addr_t rspdma;
struct scatterlist *data_sg;
int data_sg_cnt;
- u32 total_length;
u32 offset;
enum nvmet_fcp_datadir io_dir;
bool active;
@@ -150,6 +149,7 @@ struct nvmet_fc_tgt_assoc {
struct list_head a_list;
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
+ struct work_struct del_work;
};
@@ -232,6 +232,7 @@ static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod);
+static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
/* *********************** FC-NVME DMA Handling **************************** */
@@ -802,6 +803,16 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
return NULL;
}
+static void
+nvmet_fc_delete_assoc(struct work_struct *work)
+{
+ struct nvmet_fc_tgt_assoc *assoc =
+ container_of(work, struct nvmet_fc_tgt_assoc, del_work);
+
+ nvmet_fc_delete_target_assoc(assoc);
+ nvmet_fc_tgt_a_put(assoc);
+}
+
static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
{
@@ -826,6 +837,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
assoc->a_id = idx;
INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref);
+ INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
while (needrandom) {
get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
@@ -1118,8 +1130,7 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) {
- nvmet_fc_delete_target_assoc(assoc);
- nvmet_fc_tgt_a_put(assoc);
+ schedule_work(&assoc->del_work);
return;
}
@@ -1688,7 +1699,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
u32 page_len, length;
int i = 0;
- length = fod->total_length;
+ length = fod->req.transfer_len;
nent = DIV_ROUND_UP(length, PAGE_SIZE);
sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
if (!sg)
@@ -1777,7 +1788,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
u32 rsn, rspcnt, xfr_length;
if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
- xfr_length = fod->total_length;
+ xfr_length = fod->req.transfer_len;
else
xfr_length = fod->offset;
@@ -1803,7 +1814,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
if (!(rspcnt % fod->queue->ersp_ratio) ||
sqe->opcode == nvme_fabrics_command ||
- xfr_length != fod->total_length ||
+ xfr_length != fod->req.transfer_len ||
(le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
(sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
@@ -1880,7 +1891,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
- (fod->total_length - fod->offset));
+ (fod->req.transfer_len - fod->offset));
fcpreq->transfer_length = tlen;
fcpreq->transferred_length = 0;
fcpreq->fcp_error = 0;
@@ -1894,7 +1905,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
* combined xfr with response.
*/
if ((op == NVMET_FCOP_READDATA) &&
- ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
+ ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
(tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
fcpreq->op = NVMET_FCOP_READDATA_RSP;
nvmet_fc_prep_fcp_rsp(tgtport, fod);
@@ -1974,7 +1985,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
fod->offset += fcpreq->transferred_length;
- if (fod->offset != fod->total_length) {
+ if (fod->offset != fod->req.transfer_len) {
spin_lock_irqsave(&fod->flock, flags);
fod->writedataactive = true;
spin_unlock_irqrestore(&fod->flock, flags);
@@ -1986,9 +1997,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
/* data transfer complete, resume with nvmet layer */
-
- fod->req.execute(&fod->req);
-
+ nvmet_req_execute(&fod->req);
break;
case NVMET_FCOP_READDATA:
@@ -2011,7 +2020,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
fod->offset += fcpreq->transferred_length;
- if (fod->offset != fod->total_length) {
+ if (fod->offset != fod->req.transfer_len) {
/* transfer the next chunk */
nvmet_fc_transfer_fcp_data(tgtport, fod,
NVMET_FCOP_READDATA);
@@ -2135,6 +2144,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod)
{
struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
+ u32 xfrlen = be32_to_cpu(cmdiu->data_len);
int ret;
/*
@@ -2148,7 +2158,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
- fod->total_length = be32_to_cpu(cmdiu->data_len);
if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
fod->io_dir = NVMET_FCP_WRITE;
if (!nvme_is_write(&cmdiu->sqe))
@@ -2159,7 +2168,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
goto transport_error;
} else {
fod->io_dir = NVMET_FCP_NODATA;
- if (fod->total_length)
+ if (xfrlen)
goto transport_error;
}
@@ -2167,9 +2176,6 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.rsp = &fod->rspiubuf.cqe;
fod->req.port = fod->queue->port;
- /* ensure nvmet handlers will set cmd handler callback */
- fod->req.execute = NULL;
-
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
@@ -2186,10 +2192,12 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
return;
}
+ fod->req.transfer_len = xfrlen;
+
/* keep a running counter of tail position */
atomic_inc(&fod->queue->sqtail);
- if (fod->total_length) {
+ if (fod->req.transfer_len) {
ret = nvmet_fc_alloc_tgt_pgs(fod);
if (ret) {
nvmet_req_complete(&fod->req, ret);
@@ -2212,9 +2220,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
* can invoke the nvmet_layer now. If read data, cmd completion will
* push the data
*/
-
- fod->req.execute(&fod->req);
-
+ nvmet_req_execute(&fod->req);
return;
transport_error:
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 0d4c23dc4532..0a4372a016f2 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -33,18 +33,11 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
req->ns->blksize_shift;
}
-static void nvmet_inline_bio_init(struct nvmet_req *req)
-{
- struct bio *bio = &req->inline_bio;
-
- bio_init(bio, req->inline_bvec, NVMET_MAX_INLINE_BIOVEC);
-}
-
static void nvmet_execute_rw(struct nvmet_req *req)
{
int sg_cnt = req->sg_cnt;
+ struct bio *bio = &req->inline_bio;
struct scatterlist *sg;
- struct bio *bio;
sector_t sector;
blk_qc_t cookie;
int op, op_flags = 0, i;
@@ -66,8 +59,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
- nvmet_inline_bio_init(req);
- bio = &req->inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
@@ -94,16 +86,14 @@ static void nvmet_execute_rw(struct nvmet_req *req)
cookie = submit_bio(bio);
- blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
+ blk_poll(bdev_get_queue(req->ns->bdev), cookie);
}
static void nvmet_execute_flush(struct nvmet_req *req)
{
- struct bio *bio;
-
- nvmet_inline_bio_init(req);
- bio = &req->inline_bio;
+ struct bio *bio = &req->inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
bio_set_dev(bio, req->ns->bdev);
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 92628c432926..96d390416789 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -23,14 +23,6 @@
#define NVME_LOOP_MAX_SEGMENTS 256
-/*
- * We handle AEN commands ourselves and don't even let the
- * block layer know about them.
- */
-#define NVME_LOOP_NR_AEN_COMMANDS 1
-#define NVME_LOOP_AQ_BLKMQ_DEPTH \
- (NVME_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
-
struct nvme_loop_iod {
struct nvme_request nvme_req;
struct nvme_command cmd;
@@ -53,7 +45,6 @@ struct nvme_loop_ctrl {
struct nvme_ctrl ctrl;
struct nvmet_ctrl *target_ctrl;
- struct work_struct delete_work;
};
static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
@@ -113,7 +104,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
* for them but rather special case them here.
*/
if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
- cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
+ cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
} else {
@@ -136,7 +127,7 @@ static void nvme_loop_execute_work(struct work_struct *work)
struct nvme_loop_iod *iod =
container_of(work, struct nvme_loop_iod, work);
- iod->req.execute(&iod->req);
+ nvmet_req_execute(&iod->req);
}
static enum blk_eh_timer_return
@@ -185,6 +176,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->req.sg = iod->sg_table.sgl;
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
+ iod->req.transfer_len = blk_rq_bytes(req);
}
blk_mq_start_request(req);
@@ -193,7 +185,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
struct nvme_loop_queue *queue = &ctrl->queues[0];
@@ -201,7 +193,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
memset(&iod->cmd, 0, sizeof(iod->cmd));
iod->cmd.common.opcode = nvme_admin_async_event;
- iod->cmd.common.command_id = NVME_LOOP_AQ_BLKMQ_DEPTH;
+ iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
@@ -357,7 +349,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH;
+ ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
@@ -365,6 +357,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+ ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
ctrl->queues[0].ctrl = ctrl;
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
@@ -438,41 +431,9 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
nvme_loop_destroy_admin_queue(ctrl);
}
-static void nvme_loop_del_ctrl_work(struct work_struct *work)
+static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
{
- struct nvme_loop_ctrl *ctrl = container_of(work,
- struct nvme_loop_ctrl, delete_work);
-
- nvme_stop_ctrl(&ctrl->ctrl);
- nvme_remove_namespaces(&ctrl->ctrl);
- nvme_loop_shutdown_ctrl(ctrl);
- nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
-}
-
-static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
-{
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
- return -EBUSY;
-
- if (!queue_work(nvme_wq, &ctrl->delete_work))
- return -EBUSY;
-
- return 0;
-}
-
-static int nvme_loop_del_ctrl(struct nvme_ctrl *nctrl)
-{
- struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
- int ret;
-
- ret = __nvme_loop_del_ctrl(ctrl);
- if (ret)
- return ret;
-
- flush_work(&ctrl->delete_work);
-
- return 0;
+ nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
}
static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
@@ -482,7 +443,7 @@ static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
mutex_lock(&nvme_loop_ctrl_mutex);
list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
if (ctrl->ctrl.cntlid == nctrl->cntlid)
- __nvme_loop_del_ctrl(ctrl);
+ nvme_delete_ctrl(&ctrl->ctrl);
}
mutex_unlock(&nvme_loop_ctrl_mutex);
}
@@ -538,7 +499,7 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
.reg_write32 = nvmf_reg_write32,
.free_ctrl = nvme_loop_free_ctrl,
.submit_async_event = nvme_loop_submit_async_event,
- .delete_ctrl = nvme_loop_del_ctrl,
+ .delete_ctrl = nvme_loop_delete_ctrl_host,
};
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -600,7 +561,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
- INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
@@ -641,7 +601,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
dev_info(ctrl->ctrl.device,
"new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
- kref_get(&ctrl->ctrl.kref);
+ nvme_get_ctrl(&ctrl->ctrl);
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
WARN_ON_ONCE(!changed);
@@ -730,7 +690,7 @@ static void __exit nvme_loop_cleanup_module(void)
mutex_lock(&nvme_loop_ctrl_mutex);
list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
- __nvme_loop_del_ctrl(ctrl);
+ nvme_delete_ctrl(&ctrl->ctrl);
mutex_unlock(&nvme_loop_ctrl_mutex);
flush_workqueue(nvme_wq);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 87e429bfcd8a..417f6c0331cc 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -223,7 +223,10 @@ struct nvmet_req {
struct bio inline_bio;
struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
int sg_cnt;
+ /* data length as parsed from the command: */
size_t data_len;
+ /* data length as parsed from the SGL descriptor: */
+ size_t transfer_len;
struct nvmet_port *port;
@@ -266,6 +269,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req);
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops);
void nvmet_req_uninit(struct nvmet_req *req);
+void nvmet_req_execute(struct nvmet_req *req);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
@@ -314,7 +318,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
u32 nvmet_get_log_page_len(struct nvme_command *cmd);
#define NVMET_QUEUE_SIZE 1024
-#define NVMET_NR_QUEUES 64
+#define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
#define NVMET_KAS 10
#define NVMET_DISC_KATO 120
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 76d2bb793afe..49912909c298 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -148,14 +148,14 @@ static inline u32 get_unaligned_le24(const u8 *p)
static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
{
return nvme_is_write(rsp->req.cmd) &&
- rsp->req.data_len &&
+ rsp->req.transfer_len &&
!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
}
static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
{
return !nvme_is_write(rsp->req.cmd) &&
- rsp->req.data_len &&
+ rsp->req.transfer_len &&
!rsp->req.rsp->status &&
!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
}
@@ -577,7 +577,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
- rsp->req.execute(&rsp->req);
+ nvmet_req_execute(&rsp->req);
}
static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
@@ -609,6 +609,7 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
nvmet_rdma_use_inline_sg(rsp, len, off);
rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
+ rsp->req.transfer_len += len;
return 0;
}
@@ -636,6 +637,7 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
nvmet_data_dir(&rsp->req));
if (ret < 0)
return NVME_SC_INTERNAL;
+ rsp->req.transfer_len += len;
rsp->n_rdma += ret;
if (invalidate) {
@@ -693,7 +695,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
- rsp->req.execute(&rsp->req);
+ nvmet_req_execute(&rsp->req);
}
return true;
@@ -1512,15 +1514,17 @@ static struct nvmet_fabrics_ops nvmet_rdma_ops = {
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
{
- struct nvmet_rdma_queue *queue;
+ struct nvmet_rdma_queue *queue, *tmp;
/* Device is being removed, delete all queues using this device */
mutex_lock(&nvmet_rdma_queue_mutex);
- list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
+ list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+ queue_list) {
if (queue->dev->device != ib_device)
continue;
pr_info("Removing queue %d\n", queue->idx);
+ list_del_init(&queue->queue_list);
__nvmet_rdma_queue_disconnect(queue);
}
mutex_unlock(&nvmet_rdma_queue_mutex);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 101ced4c84be..ff505af064ba 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -123,6 +123,17 @@ config NVMEM_SUNXI_SID
This driver can also be built as a module. If so, the module
will be called nvmem_sunxi_sid.
+config UNIPHIER_EFUSE
+ tristate "UniPhier SoCs eFuse support"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This is a simple driver to dump specified values of UniPhier SoC
+ from eFuse.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-uniphier-efuse.
+
config NVMEM_VF610_OCOTP
tristate "VF610 SoC OCOTP support"
depends on SOC_VF610 || COMPILE_TEST
@@ -135,13 +146,33 @@ config NVMEM_VF610_OCOTP
be called nvmem-vf610-ocotp.
config MESON_EFUSE
- tristate "Amlogic eFuse Support"
+ tristate "Amlogic Meson GX eFuse Support"
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
help
This is a driver to retrieve specific values from the eFuse found on
- the Amlogic Meson SoCs.
+ the Amlogic Meson GX SoCs.
This driver can also be built as a module. If so, the module
will be called nvmem_meson_efuse.
+config MESON_MX_EFUSE
+ tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson6, Meson8 and Meson8b SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_mx_efuse.
+
+config NVMEM_SNVS_LPGPR
+ tristate "Support for Low Power General Purpose Register"
+ depends on SOC_IMX6 || COMPILE_TEST
+ help
+ This is a driver for Low Power General Purpose Register (LPGPR) available on
+ i.MX6 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem-snvs-lpgpr.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 173140658693..e54dcfa6565a 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for nvmem drivers.
#
@@ -26,7 +27,13 @@ obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
+obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
+nvmem-uniphier-efuse-y := uniphier-efuse.o
obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
nvmem-vf610-ocotp-y := vf610-ocotp.o
obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o
nvmem_meson_efuse-y := meson-efuse.o
+obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
+nvmem_meson_mx_efuse-y := meson-mx-efuse.o
+obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
+nvmem_snvs_lpgpr-y := snvs_lpgpr.o
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 3c56e3b2bd65..5e9e324427f9 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -232,7 +232,6 @@ static struct nvmem_config bcm_otpc_nvmem_config = {
.read_only = false,
.word_size = 4,
.stride = 4,
- .owner = THIS_MODULE,
.reg_read = bcm_otpc_read,
.reg_write = bcm_otpc_write,
};
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index d12e5de78e70..5a5cefd12153 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -462,6 +462,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->id = rval;
nvmem->owner = config->owner;
+ if (!nvmem->owner && config->dev->driver)
+ nvmem->owner = config->dev->driver->owner;
nvmem->stride = config->stride;
nvmem->word_size = config->word_size;
nvmem->size = config->size;
@@ -615,7 +617,7 @@ static struct nvmem_device *nvmem_find(const char *name)
return to_nvmem_device(d);
}
-#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_device_get() - Get nvmem device from a given id
*
@@ -753,7 +755,7 @@ static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
return cell;
}
-#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
@@ -946,8 +948,7 @@ void nvmem_cell_put(struct nvmem_cell *cell)
}
EXPORT_SYMBOL_GPL(nvmem_cell_put);
-static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
- void *buf)
+static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
{
u8 *p, *b;
int i, bit_offset = cell->bit_offset;
@@ -1028,8 +1029,8 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
}
EXPORT_SYMBOL_GPL(nvmem_cell_read);
-static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
- u8 *_buf, int len)
+static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
+ u8 *_buf, int len)
{
struct nvmem_device *nvmem = cell->nvmem;
int i, rc, nbits, bit_offset = cell->bit_offset;
diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c
index 52ff65e0673f..52cfe91d9762 100644
--- a/drivers/nvmem/imx-iim.c
+++ b/drivers/nvmem/imx-iim.c
@@ -34,7 +34,6 @@ struct imx_iim_drvdata {
struct iim_priv {
void __iomem *base;
struct clk *clk;
- struct nvmem_config nvmem;
};
static int imx_iim_read(void *context, unsigned int offset,
@@ -108,7 +107,7 @@ static int imx_iim_probe(struct platform_device *pdev)
struct resource *res;
struct iim_priv *iim;
struct nvmem_device *nvmem;
- struct nvmem_config *cfg;
+ struct nvmem_config cfg = {};
const struct imx_iim_drvdata *drvdata = NULL;
iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL);
@@ -130,19 +129,16 @@ static int imx_iim_probe(struct platform_device *pdev)
if (IS_ERR(iim->clk))
return PTR_ERR(iim->clk);
- cfg = &iim->nvmem;
+ cfg.name = "imx-iim",
+ cfg.read_only = true,
+ cfg.word_size = 1,
+ cfg.stride = 1,
+ cfg.reg_read = imx_iim_read,
+ cfg.dev = dev;
+ cfg.size = drvdata->nregs;
+ cfg.priv = iim;
- cfg->name = "imx-iim",
- cfg->read_only = true,
- cfg->word_size = 1,
- cfg->stride = 1,
- cfg->owner = THIS_MODULE,
- cfg->reg_read = imx_iim_read,
- cfg->dev = dev;
- cfg->size = drvdata->nregs;
- cfg->priv = iim;
-
- nvmem = nvmem_register(cfg);
+ nvmem = nvmem_register(&cfg);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 193ca8fd350a..d7ba351a70c9 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -40,14 +40,19 @@
#define IMX_OCOTP_ADDR_CTRL_SET 0x0004
#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008
#define IMX_OCOTP_ADDR_TIMING 0x0010
-#define IMX_OCOTP_ADDR_DATA 0x0020
+#define IMX_OCOTP_ADDR_DATA0 0x0020
+#define IMX_OCOTP_ADDR_DATA1 0x0030
+#define IMX_OCOTP_ADDR_DATA2 0x0040
+#define IMX_OCOTP_ADDR_DATA3 0x0050
#define IMX_OCOTP_BM_CTRL_ADDR 0x0000007F
#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
-#define DEF_RELAX 20 /* > 16.5ns */
+#define DEF_RELAX 20 /* > 16.5ns */
+#define DEF_FSOURCE 1001 /* > 1000 ns */
+#define DEF_STROBE_PROG 10000 /* IPG clocks */
#define IMX_OCOTP_WR_UNLOCK 0x3E770000
#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA
@@ -57,10 +62,16 @@ struct ocotp_priv {
struct device *dev;
struct clk *clk;
void __iomem *base;
- unsigned int nregs;
+ const struct ocotp_params *params;
struct nvmem_config *config;
};
+struct ocotp_params {
+ unsigned int nregs;
+ unsigned int bank_address_words;
+ void (*set_timing)(struct ocotp_priv *priv);
+};
+
static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
{
int count;
@@ -121,8 +132,8 @@ static int imx_ocotp_read(void *context, unsigned int offset,
index = offset >> 2;
count = bytes >> 2;
- if (count > (priv->nregs - index))
- count = priv->nregs - index;
+ if (count > (priv->params->nregs - index))
+ count = priv->params->nregs - index;
mutex_lock(&ocotp_mutex);
@@ -160,6 +171,52 @@ read_end:
return ret;
}
+static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
+{
+ unsigned long clk_rate = 0;
+ unsigned long strobe_read, relax, strobe_prog;
+ u32 timing = 0;
+
+ /* 47.3.1.3.1
+ * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
+ * fields with timing values to match the current frequency of the
+ * ipg_clk. OTP writes will work at maximum bus frequencies as long
+ * as the HW_OCOTP_TIMING parameters are set correctly.
+ */
+ clk_rate = clk_get_rate(priv->clk);
+
+ relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
+ strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
+ strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
+
+ timing = strobe_prog & 0x00000FFF;
+ timing |= (relax << 12) & 0x0000F000;
+ timing |= (strobe_read << 16) & 0x003F0000;
+
+ writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+}
+
+static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv)
+{
+ unsigned long clk_rate = 0;
+ u64 fsource, strobe_prog;
+ u32 timing = 0;
+
+ /* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1
+ * 6.4.3.3
+ */
+ clk_rate = clk_get_rate(priv->clk);
+ fsource = DIV_ROUND_UP_ULL((u64)clk_rate * DEF_FSOURCE,
+ NSEC_PER_SEC) + 1;
+ strobe_prog = DIV_ROUND_CLOSEST_ULL((u64)clk_rate * DEF_STROBE_PROG,
+ NSEC_PER_SEC) + 1;
+
+ timing = strobe_prog & 0x00000FFF;
+ timing |= (fsource << 12) & 0x000FF000;
+
+ writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+}
+
static int imx_ocotp_write(void *context, unsigned int offset, void *val,
size_t bytes)
{
@@ -167,11 +224,9 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
u32 *buf = val;
int ret;
- unsigned long clk_rate = 0;
- unsigned long strobe_read, relax, strobe_prog;
- u32 timing = 0;
u32 ctrl;
u8 waddr;
+ u8 word = 0;
/* allow only writing one complete OTP word at a time */
if ((bytes != priv->config->word_size) ||
@@ -187,23 +242,8 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
return ret;
}
- /* 47.3.1.3.1
- * Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
- * fields with timing values to match the current frequency of the
- * ipg_clk. OTP writes will work at maximum bus frequencies as long
- * as the HW_OCOTP_TIMING parameters are set correctly.
- */
- clk_rate = clk_get_rate(priv->clk);
-
- relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
- strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
- strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
-
- timing = strobe_prog & 0x00000FFF;
- timing |= (relax << 12) & 0x0000F000;
- timing |= (strobe_read << 16) & 0x003F0000;
-
- writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
+ /* Setup the write timing values */
+ priv->params->set_timing(priv);
/* 47.3.1.3.2
* Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear.
@@ -224,8 +264,23 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* description. Both the unlock code and address can be written in the
* same operation.
*/
- /* OTP write/read address specifies one of 128 word address locations */
- waddr = offset / 4;
+ if (priv->params->bank_address_words != 0) {
+ /*
+ * In banked/i.MX7 mode the OTP register bank goes into waddr
+ * see i.MX 7Solo Applications Processor Reference Manual, Rev.
+ * 0.1 section 6.4.3.1
+ */
+ offset = offset / priv->config->word_size;
+ waddr = offset / priv->params->bank_address_words;
+ word = offset & (priv->params->bank_address_words - 1);
+ } else {
+ /*
+ * Non-banked i.MX6 mode.
+ * OTP write/read address specifies one of 128 word address
+ * locations
+ */
+ waddr = offset / 4;
+ }
ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
ctrl &= ~IMX_OCOTP_BM_CTRL_ADDR;
@@ -251,8 +306,43 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* shift right (with zero fill). This shifting is required to program
* the OTP serially. During the write operation, HW_OCOTP_DATA cannot be
* modified.
+ * Note: on i.MX7 there are four data fields to write for banked write
+ * with the fuse blowing operation only taking place after data0
+ * has been written. This is why data0 must always be the last
+ * register written.
*/
- writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA);
+ if (priv->params->bank_address_words != 0) {
+ /* Banked/i.MX7 mode */
+ switch (word) {
+ case 0:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 1:
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 2:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ case 3:
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA3);
+ writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
+ break;
+ }
+ } else {
+ /* Non-banked i.MX6 mode */
+ writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
+ }
/* 47.4.1.4.5
* Once complete, the controller will clear BUSY. A write request to a
@@ -303,17 +393,46 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
.read_only = false,
.word_size = 4,
.stride = 4,
- .owner = THIS_MODULE,
.reg_read = imx_ocotp_read,
.reg_write = imx_ocotp_write,
};
+static const struct ocotp_params imx6q_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx6sl_params = {
+ .nregs = 64,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx6sx_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx6ul_params = {
+ .nregs = 128,
+ .bank_address_words = 0,
+ .set_timing = imx_ocotp_set_imx6_timing,
+};
+
+static const struct ocotp_params imx7d_params = {
+ .nregs = 64,
+ .bank_address_words = 4,
+ .set_timing = imx_ocotp_set_imx7_timing,
+};
+
static const struct of_device_id imx_ocotp_dt_ids[] = {
- { .compatible = "fsl,imx6q-ocotp", (void *)128 },
- { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
- { .compatible = "fsl,imx6sx-ocotp", (void *)128 },
- { .compatible = "fsl,imx6ul-ocotp", (void *)128 },
- { .compatible = "fsl,imx7d-ocotp", (void *)64 },
+ { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
+ { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
+ { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
+ { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
+ { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
@@ -342,8 +461,8 @@ static int imx_ocotp_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
of_id = of_match_device(imx_ocotp_dt_ids, dev);
- priv->nregs = (unsigned long)of_id->data;
- imx_ocotp_nvmem_config.size = 4 * priv->nregs;
+ priv->params = of_device_get_match_data(&pdev->dev);
+ imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
priv->config = &imx_ocotp_nvmem_config;
@@ -375,5 +494,5 @@ static struct platform_driver imx_ocotp_driver = {
module_platform_driver(imx_ocotp_driver);
MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
-MODULE_DESCRIPTION("i.MX6 OCOTP fuse box driver");
+MODULE_DESCRIPTION("i.MX6/i.MX7 OCOTP fuse box driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index 6c7e2c424a4e..b1af966206a6 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -159,7 +159,6 @@ static struct nvmem_config lpc18xx_nvmem_config = {
.word_size = 4,
.reg_read = lpc18xx_eeprom_read,
.reg_write = lpc18xx_eeprom_gather_write,
- .owner = THIS_MODULE,
};
static int lpc18xx_eeprom_probe(struct platform_device *pdev)
diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c
index be8d07403ffc..95268db155e9 100644
--- a/drivers/nvmem/lpc18xx_otp.c
+++ b/drivers/nvmem/lpc18xx_otp.c
@@ -64,7 +64,6 @@ static struct nvmem_config lpc18xx_otp_nvmem_config = {
.read_only = true,
.word_size = LPC18XX_OTP_WORD_SIZE,
.stride = LPC18XX_OTP_WORD_SIZE,
- .owner = THIS_MODULE,
.reg_read = lpc18xx_otp_read,
};
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index 70bfc9839bb2..a43c68f90937 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -1,5 +1,5 @@
/*
- * Amlogic eFuse Driver
+ * Amlogic Meson GX eFuse Driver
*
* Copyright (c) 2016 Endless Computers, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
@@ -37,7 +37,6 @@ static int meson_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "meson-efuse",
- .owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.read_only = true,
@@ -89,5 +88,5 @@ static struct platform_driver meson_efuse_driver = {
module_platform_driver(meson_efuse_driver);
MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
-MODULE_DESCRIPTION("Amlogic Meson NVMEM driver");
+MODULE_DESCRIPTION("Amlogic Meson GX NVMEM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
new file mode 100644
index 000000000000..a346b4923550
--- /dev/null
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -0,0 +1,265 @@
+/*
+ * Amlogic Meson6, Meson8 and Meson8b eFuse Driver
+ *
+ * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#define MESON_MX_EFUSE_CNTL1 0x04
+#define MESON_MX_EFUSE_CNTL1_PD_ENABLE BIT(27)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY BIT(26)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_START BIT(25)
+#define MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE BIT(24)
+#define MESON_MX_EFUSE_CNTL1_BYTE_WR_DATA GENMASK(23, 16)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_BUSY BIT(14)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_START BIT(13)
+#define MESON_MX_EFUSE_CNTL1_AUTO_WR_ENABLE BIT(12)
+#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET BIT(11)
+#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK GENMASK(10, 0)
+
+#define MESON_MX_EFUSE_CNTL2 0x08
+
+#define MESON_MX_EFUSE_CNTL4 0x10
+#define MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE BIT(10)
+
+struct meson_mx_efuse_platform_data {
+ const char *name;
+ unsigned int word_size;
+};
+
+struct meson_mx_efuse {
+ void __iomem *base;
+ struct clk *core_clk;
+ struct nvmem_device *nvmem;
+ struct nvmem_config config;
+};
+
+static void meson_mx_efuse_mask_bits(struct meson_mx_efuse *efuse, u32 reg,
+ u32 mask, u32 set)
+{
+ u32 data;
+
+ data = readl(efuse->base + reg);
+ data &= ~mask;
+ data |= (set & mask);
+
+ writel(data, efuse->base + reg);
+}
+
+static int meson_mx_efuse_hw_enable(struct meson_mx_efuse *efuse)
+{
+ int err;
+
+ err = clk_prepare_enable(efuse->core_clk);
+ if (err)
+ return err;
+
+ /* power up the efuse */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE, 0);
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL4,
+ MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE, 0);
+
+ return 0;
+}
+
+static void meson_mx_efuse_hw_disable(struct meson_mx_efuse *efuse)
+{
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE,
+ MESON_MX_EFUSE_CNTL1_PD_ENABLE);
+
+ clk_disable_unprepare(efuse->core_clk);
+}
+
+static int meson_mx_efuse_read_addr(struct meson_mx_efuse *efuse,
+ unsigned int addr, u32 *value)
+{
+ int err;
+ u32 regval;
+
+ /* write the address to read */
+ regval = FIELD_PREP(MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, addr);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, regval);
+
+ /* inform the hardware that we changed the address */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, 0);
+
+ /* start the read process */
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START);
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_START, 0);
+
+ /*
+ * perform a dummy read to ensure that the HW has the RD_BUSY bit set
+ * when polling for the status below.
+ */
+ readl(efuse->base + MESON_MX_EFUSE_CNTL1);
+
+ err = readl_poll_timeout_atomic(efuse->base + MESON_MX_EFUSE_CNTL1,
+ regval,
+ (!(regval & MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY)),
+ 1, 1000);
+ if (err) {
+ dev_err(efuse->config.dev,
+ "Timeout while reading efuse address %u\n", addr);
+ return err;
+ }
+
+ *value = readl(efuse->base + MESON_MX_EFUSE_CNTL2);
+
+ return 0;
+}
+
+static int meson_mx_efuse_read(void *context, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct meson_mx_efuse *efuse = context;
+ u32 tmp;
+ int err, i, addr;
+
+ err = meson_mx_efuse_hw_enable(efuse);
+ if (err)
+ return err;
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
+
+ for (i = offset; i < offset + bytes; i += efuse->config.word_size) {
+ addr = i / efuse->config.word_size;
+
+ err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
+ if (err)
+ break;
+
+ memcpy(buf + i, &tmp, efuse->config.word_size);
+ }
+
+ meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
+ MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 0);
+
+ meson_mx_efuse_hw_disable(efuse);
+
+ return err;
+}
+
+static const struct meson_mx_efuse_platform_data meson6_efuse_data = {
+ .name = "meson6-efuse",
+ .word_size = 1,
+};
+
+static const struct meson_mx_efuse_platform_data meson8_efuse_data = {
+ .name = "meson8-efuse",
+ .word_size = 4,
+};
+
+static const struct meson_mx_efuse_platform_data meson8b_efuse_data = {
+ .name = "meson8b-efuse",
+ .word_size = 4,
+};
+
+static const struct of_device_id meson_mx_efuse_match[] = {
+ { .compatible = "amlogic,meson6-efuse", .data = &meson6_efuse_data },
+ { .compatible = "amlogic,meson8-efuse", .data = &meson8_efuse_data },
+ { .compatible = "amlogic,meson8b-efuse", .data = &meson8b_efuse_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_mx_efuse_match);
+
+static int meson_mx_efuse_probe(struct platform_device *pdev)
+{
+ const struct meson_mx_efuse_platform_data *drvdata;
+ struct meson_mx_efuse *efuse;
+ struct resource *res;
+
+ drvdata = of_device_get_match_data(&pdev->dev);
+ if (!drvdata)
+ return -EINVAL;
+
+ efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ efuse->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(efuse->base))
+ return PTR_ERR(efuse->base);
+
+ efuse->config.name = devm_kstrdup(&pdev->dev, drvdata->name,
+ GFP_KERNEL);
+ efuse->config.owner = THIS_MODULE;
+ efuse->config.dev = &pdev->dev;
+ efuse->config.priv = efuse;
+ efuse->config.stride = drvdata->word_size;
+ efuse->config.word_size = drvdata->word_size;
+ efuse->config.size = SZ_512;
+ efuse->config.read_only = true;
+ efuse->config.reg_read = meson_mx_efuse_read;
+
+ efuse->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(efuse->core_clk)) {
+ dev_err(&pdev->dev, "Failed to get core clock\n");
+ return PTR_ERR(efuse->core_clk);
+ }
+
+ efuse->nvmem = nvmem_register(&efuse->config);
+ if (IS_ERR(efuse->nvmem))
+ return PTR_ERR(efuse->nvmem);
+
+ platform_set_drvdata(pdev, efuse);
+
+ return 0;
+}
+
+static int meson_mx_efuse_remove(struct platform_device *pdev)
+{
+ struct meson_mx_efuse *efuse = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(efuse->nvmem);
+}
+
+static struct platform_driver meson_mx_efuse_driver = {
+ .probe = meson_mx_efuse_probe,
+ .remove = meson_mx_efuse_remove,
+ .driver = {
+ .name = "meson-mx-efuse",
+ .of_match_table = meson_mx_efuse_match,
+ },
+};
+
+module_platform_driver(meson_mx_efuse_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Amlogic Meson MX eFuse NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 32fd572e18c5..9ee3479cfc7b 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -18,15 +18,19 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
+struct mtk_efuse_priv {
+ void __iomem *base;
+};
+
static int mtk_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct mtk_efuse_priv *priv = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
- *val++ = readl(base + reg + (i++ * 4));
+ *val++ = readl(priv->base + reg + (i++ * 4));
return 0;
}
@@ -34,12 +38,12 @@ static int mtk_reg_read(void *context,
static int mtk_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct mtk_efuse_priv *priv = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
- writel(*val++, base + reg + (i++ * 4));
+ writel(*val++, priv->base + reg + (i++ * 4));
return 0;
}
@@ -49,27 +53,26 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- struct nvmem_config *econfig;
- void __iomem *base;
+ struct nvmem_config econfig = {};
+ struct mtk_efuse_priv *priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
- if (!econfig)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- econfig->stride = 4;
- econfig->word_size = 4;
- econfig->reg_read = mtk_reg_read;
- econfig->reg_write = mtk_reg_write;
- econfig->size = resource_size(res);
- econfig->priv = base;
- econfig->dev = dev;
- econfig->owner = THIS_MODULE;
- nvmem = nvmem_register(econfig);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ econfig.stride = 4;
+ econfig.word_size = 4;
+ econfig.reg_read = mtk_reg_read;
+ econfig.reg_write = mtk_reg_write;
+ econfig.size = resource_size(res);
+ econfig.priv = priv;
+ econfig.dev = dev;
+ nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index d26dd03cec80..7018e2ef5714 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -118,7 +118,6 @@ static struct nvmem_config ocotp_config = {
.name = "mxs-ocotp",
.stride = 16,
.word_size = 4,
- .owner = THIS_MODULE,
.reg_read = mxs_ocotp_read,
};
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 2bdb6c389328..cb3b48b47d64 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -17,15 +17,19 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
+struct qfprom_priv {
+ void __iomem *base;
+};
+
static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
while (words--)
- *val++ = readb(base + reg + i++);
+ *val++ = readb(priv->base + reg + i++);
return 0;
}
@@ -33,12 +37,12 @@ static int qfprom_reg_read(void *context,
static int qfprom_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
- void __iomem *base = context;
+ struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
while (words--)
- writeb(*val++, base + reg + i++);
+ writeb(*val++, priv->base + reg + i++);
return 0;
}
@@ -52,7 +56,6 @@ static int qfprom_remove(struct platform_device *pdev)
static struct nvmem_config econfig = {
.name = "qfprom",
- .owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.reg_read = qfprom_reg_read,
@@ -64,16 +67,20 @@ static int qfprom_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- void __iomem *base;
+ struct qfprom_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
econfig.size = resource_size(res);
econfig.dev = dev;
- econfig.priv = base;
+ econfig.priv = priv;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index 63e3eb55f3ac..123de77ca5d6 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -149,7 +149,6 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "rockchip-efuse",
- .owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.read_only = true,
@@ -178,6 +177,10 @@ static const struct of_device_id rockchip_efuse_match[] = {
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
+ .compatible = "rockchip,rk3368-efuse",
+ .data = (void *)&rockchip_rk3288_efuse_read,
+ },
+ {
.compatible = "rockchip,rk3399-efuse",
.data = (void *)&rockchip_rk3399_efuse_read,
},
diff --git a/drivers/nvmem/snvs_lpgpr.c b/drivers/nvmem/snvs_lpgpr.c
new file mode 100644
index 000000000000..e5c2a4a17f03
--- /dev/null
+++ b/drivers/nvmem/snvs_lpgpr.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define IMX6Q_SNVS_HPLR 0x00
+#define IMX6Q_GPR_SL BIT(5)
+#define IMX6Q_SNVS_LPLR 0x34
+#define IMX6Q_GPR_HL BIT(5)
+#define IMX6Q_SNVS_LPGPR 0x68
+
+struct snvs_lpgpr_cfg {
+ int offset;
+ int offset_hplr;
+ int offset_lplr;
+};
+
+struct snvs_lpgpr_priv {
+ struct device_d *dev;
+ struct regmap *regmap;
+ struct nvmem_config cfg;
+ const struct snvs_lpgpr_cfg *dcfg;
+};
+
+static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx6q = {
+ .offset = IMX6Q_SNVS_LPGPR,
+ .offset_hplr = IMX6Q_SNVS_HPLR,
+ .offset_lplr = IMX6Q_SNVS_LPLR,
+};
+
+static int snvs_lpgpr_write(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct snvs_lpgpr_priv *priv = context;
+ const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
+ unsigned int lock_reg;
+ int ret;
+
+ ret = regmap_read(priv->regmap, dcfg->offset_hplr, &lock_reg);
+ if (ret < 0)
+ return ret;
+
+ if (lock_reg & IMX6Q_GPR_SL)
+ return -EPERM;
+
+ ret = regmap_read(priv->regmap, dcfg->offset_lplr, &lock_reg);
+ if (ret < 0)
+ return ret;
+
+ if (lock_reg & IMX6Q_GPR_HL)
+ return -EPERM;
+
+ return regmap_bulk_write(priv->regmap, dcfg->offset + offset, val,
+ bytes / 4);
+}
+
+static int snvs_lpgpr_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct snvs_lpgpr_priv *priv = context;
+ const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
+
+ return regmap_bulk_read(priv->regmap, dcfg->offset + offset,
+ val, bytes / 4);
+}
+
+static int snvs_lpgpr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *syscon_node;
+ struct snvs_lpgpr_priv *priv;
+ struct nvmem_config *cfg;
+ struct nvmem_device *nvmem;
+ const struct snvs_lpgpr_cfg *dcfg;
+
+ if (!node)
+ return -ENOENT;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dcfg = of_device_get_match_data(dev);
+ if (!dcfg)
+ return -EINVAL;
+
+ syscon_node = of_get_parent(node);
+ if (!syscon_node)
+ return -ENODEV;
+
+ priv->regmap = syscon_node_to_regmap(syscon_node);
+ of_node_put(syscon_node);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->dcfg = dcfg;
+
+ cfg = &priv->cfg;
+ cfg->priv = priv;
+ cfg->name = dev_name(dev);
+ cfg->dev = dev;
+ cfg->stride = 4,
+ cfg->word_size = 4,
+ cfg->size = 4,
+ cfg->owner = THIS_MODULE,
+ cfg->reg_read = snvs_lpgpr_read,
+ cfg->reg_write = snvs_lpgpr_write,
+
+ nvmem = nvmem_register(cfg);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int snvs_lpgpr_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id snvs_lpgpr_dt_ids[] = {
+ { .compatible = "fsl,imx6q-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx6q },
+ { .compatible = "fsl,imx6ul-snvs-lpgpr",
+ .data = &snvs_lpgpr_cfg_imx6q },
+ { },
+};
+MODULE_DEVICE_TABLE(of, snvs_lpgpr_dt_ids);
+
+static struct platform_driver snvs_lpgpr_driver = {
+ .probe = snvs_lpgpr_probe,
+ .remove = snvs_lpgpr_remove,
+ .driver = {
+ .name = "snvs_lpgpr",
+ .of_match_table = snvs_lpgpr_dt_ids,
+ },
+};
+module_platform_driver(snvs_lpgpr_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
+MODULE_DESCRIPTION("Low Power General Purpose Register in i.MX6 Secure Non-Volatile Storage");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 0d6648be93b8..99bd54d85fcb 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -40,7 +40,6 @@ static struct nvmem_config econfig = {
.read_only = true,
.stride = 4,
.word_size = 1,
- .owner = THIS_MODULE,
};
struct sunxi_sid_cfg {
@@ -199,10 +198,16 @@ static const struct sunxi_sid_cfg sun8i_h3_cfg = {
.need_register_readout = true,
};
+static const struct sunxi_sid_cfg sun50i_a64_cfg = {
+ .value_offset = 0x200,
+ .size = 0x100,
+};
+
static const struct of_device_id sunxi_sid_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
{ .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
+ { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
new file mode 100644
index 000000000000..9d278b4e1dc7
--- /dev/null
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -0,0 +1,97 @@
+/*
+ * UniPhier eFuse driver
+ *
+ * Copyright (C) 2017 Socionext Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct uniphier_efuse_priv {
+ void __iomem *base;
+};
+
+static int uniphier_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ struct uniphier_efuse_priv *priv = context;
+ u32 *val = _val;
+ int offs;
+
+ for (offs = 0; offs < bytes; offs += sizeof(u32))
+ *val++ = readl(priv->base + reg + offs);
+
+ return 0;
+}
+
+static int uniphier_efuse_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ struct nvmem_config econfig = {};
+ struct uniphier_efuse_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ econfig.stride = 4;
+ econfig.word_size = 4;
+ econfig.read_only = true;
+ econfig.reg_read = uniphier_reg_read;
+ econfig.size = resource_size(res);
+ econfig.priv = priv;
+ econfig.dev = dev;
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int uniphier_efuse_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id uniphier_efuse_of_match[] = {
+ { .compatible = "socionext,uniphier-efuse",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, uniphier_efuse_of_match);
+
+static struct platform_driver uniphier_efuse_driver = {
+ .probe = uniphier_efuse_probe,
+ .remove = uniphier_efuse_remove,
+ .driver = {
+ .name = "uniphier-efuse",
+ .of_match_table = uniphier_efuse_of_match,
+ },
+};
+module_platform_driver(uniphier_efuse_driver);
+
+MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
+MODULE_DESCRIPTION("UniPhier eFuse driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c
index 72e4faabce29..5ae9e002f195 100644
--- a/drivers/nvmem/vf610-ocotp.c
+++ b/drivers/nvmem/vf610-ocotp.c
@@ -206,7 +206,6 @@ static int vf610_ocotp_read(void *context, unsigned int offset,
static struct nvmem_config ocotp_config = {
.name = "ocotp",
- .owner = THIS_MODULE,
.stride = 4,
.word_size = 4,
.reg_read = vf610_ocotp_read,
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ba7b034b2b91..ad9a9578f9c4 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -46,10 +46,14 @@ config OF_EARLY_FLATTREE
config OF_PROMTREE
bool
+config OF_KOBJ
+ def_bool SYSFS
+
# Hardly any platforms need this. It is safe to select, but only do so if you
# need it.
config OF_DYNAMIC
bool "Support for dynamic device trees" if OF_UNITTEST
+ select OF_KOBJ
help
On some platforms, the device tree can be manipulated at runtime.
While this option is selected automatically on such platforms, you
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 97dc01c81438..63a4be62ce19 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,4 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y = base.o device.o platform.o property.o
+obj-$(CONFIG_OF_KOBJ) += kobj.o
obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
obj-$(CONFIG_OF_FLATTREE) += fdt.o
obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 792722e7d458..fa6cabfc3cb9 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -232,8 +232,8 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
-int of_pci_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
+static int parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node, const char *name)
{
const int na = 3, ns = 2;
int rlen;
@@ -242,7 +242,7 @@ int of_pci_range_parser_init(struct of_pci_range_parser *parser,
parser->pna = of_n_addr_cells(node);
parser->np = parser->pna + na + ns;
- parser->range = of_get_property(node, "ranges", &rlen);
+ parser->range = of_get_property(node, name, &rlen);
if (parser->range == NULL)
return -ENOENT;
@@ -250,8 +250,21 @@ int of_pci_range_parser_init(struct of_pci_range_parser *parser,
return 0;
}
+
+int of_pci_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return parser_init(parser, node, "ranges");
+}
EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
+int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return parser_init(parser, node, "dma-ranges");
+}
+EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
+
struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
struct of_pci_range *range)
{
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 63897531cd75..26618ba8f92a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -95,108 +95,6 @@ int __weak of_node_to_nid(struct device_node *np)
}
#endif
-#ifndef CONFIG_OF_DYNAMIC
-static void of_node_release(struct kobject *kobj)
-{
- /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
-}
-#endif /* CONFIG_OF_DYNAMIC */
-
-struct kobj_type of_node_ktype = {
- .release = of_node_release,
-};
-
-static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t offset, size_t count)
-{
- struct property *pp = container_of(bin_attr, struct property, attr);
- return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
-}
-
-/* always return newly allocated name, caller must free after use */
-static const char *safe_name(struct kobject *kobj, const char *orig_name)
-{
- const char *name = orig_name;
- struct kernfs_node *kn;
- int i = 0;
-
- /* don't be a hero. After 16 tries give up */
- while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
- sysfs_put(kn);
- if (name != orig_name)
- kfree(name);
- name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
- }
-
- if (name == orig_name) {
- name = kstrdup(orig_name, GFP_KERNEL);
- } else {
- pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
- kobject_name(kobj), name);
- }
- return name;
-}
-
-int __of_add_property_sysfs(struct device_node *np, struct property *pp)
-{
- int rc;
-
- /* Important: Don't leak passwords */
- bool secure = strncmp(pp->name, "security-", 9) == 0;
-
- if (!IS_ENABLED(CONFIG_SYSFS))
- return 0;
-
- if (!of_kset || !of_node_is_attached(np))
- return 0;
-
- sysfs_bin_attr_init(&pp->attr);
- pp->attr.attr.name = safe_name(&np->kobj, pp->name);
- pp->attr.attr.mode = secure ? 0400 : 0444;
- pp->attr.size = secure ? 0 : pp->length;
- pp->attr.read = of_node_property_read;
-
- rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
- WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np);
- return rc;
-}
-
-int __of_attach_node_sysfs(struct device_node *np)
-{
- const char *name;
- struct kobject *parent;
- struct property *pp;
- int rc;
-
- if (!IS_ENABLED(CONFIG_SYSFS))
- return 0;
-
- if (!of_kset)
- return 0;
-
- np->kobj.kset = of_kset;
- if (!np->parent) {
- /* Nodes without parents are new top level trees */
- name = safe_name(&of_kset->kobj, "base");
- parent = NULL;
- } else {
- name = safe_name(&np->parent->kobj, kbasename(np->full_name));
- parent = &np->parent->kobj;
- }
- if (!name)
- return -ENOMEM;
- rc = kobject_add(&np->kobj, parent, "%s", name);
- kfree(name);
- if (rc)
- return rc;
-
- for_each_property_of_node(np, pp)
- __of_add_property_sysfs(np, pp);
-
- return 0;
-}
-
void __init of_core_init(void)
{
struct device_node *np;
@@ -760,7 +658,7 @@ struct device_node *of_get_child_by_name(const struct device_node *node,
}
EXPORT_SYMBOL(of_get_child_by_name);
-static struct device_node *__of_find_node_by_path(struct device_node *parent,
+struct device_node *__of_find_node_by_path(struct device_node *parent,
const char *path)
{
struct device_node *child;
@@ -863,10 +761,10 @@ EXPORT_SYMBOL(of_find_node_opts_by_path);
/**
* of_find_node_by_name - Find a node by its "name" property
- * @from: The node to start searching from or NULL, the node
+ * @from: The node to start searching from or NULL; the node
* you pass will not be searched, only the next one
- * will; typically, you pass what the previous call
- * returned. of_node_put() will be called on it
+ * will. Typically, you pass what the previous call
+ * returned. of_node_put() will be called on @from.
* @name: The name string to match against
*
* Returns a node pointer with refcount incremented, use
@@ -1504,22 +1402,6 @@ int __of_remove_property(struct device_node *np, struct property *prop)
return 0;
}
-void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
-{
- sysfs_remove_bin_file(&np->kobj, &prop->attr);
- kfree(prop->attr.attr.name);
-}
-
-void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
-{
- if (!IS_ENABLED(CONFIG_SYSFS))
- return;
-
- /* at early boot, bail here and defer setup to of_init() */
- if (of_kset && of_node_is_attached(np))
- __of_sysfs_remove_bin_file(np, prop);
-}
-
/**
* of_remove_property - Remove a property from a node.
*
@@ -1579,21 +1461,6 @@ int __of_update_property(struct device_node *np, struct property *newprop,
return 0;
}
-void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
- struct property *oldprop)
-{
- if (!IS_ENABLED(CONFIG_SYSFS))
- return;
-
- /* At early boot, bail out and defer setup to of_init() */
- if (!of_kset)
- return;
-
- if (oldprop)
- __of_sysfs_remove_bin_file(np, oldprop);
- __of_add_property_sysfs(np, newprop);
-}
-
/*
* of_update_property - Update a property in a node, if the property does
* not exist, add it.
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 64b710265d39..25bddf9c9fe1 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -9,9 +9,7 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/pci.h>
#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
#include <asm/errno.h>
#include "of_private.h"
@@ -101,11 +99,7 @@ int of_dma_configure(struct device *dev, struct device_node *np)
* DMA configuration regardless of whether "dma-ranges" is
* correctly specified or not.
*/
- if (!dev_is_pci(dev) &&
-#ifdef CONFIG_ARM_AMBA
- dev->bus != &amba_bustype &&
-#endif
- dev->bus != &platform_bus_type)
+ if (!dev->bus->force_dma)
return ret == -ENODEV ? 0 : ret;
dma_addr = offset = 0;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 301b6db2b48d..c454941b34ec 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -16,6 +16,11 @@
#include "of_private.h"
+static struct device_node *kobj_to_device_node(struct kobject *kobj)
+{
+ return container_of(kobj, struct device_node, kobj);
+}
+
/**
* of_node_get() - Increment refcount of a node
* @node: Node to inc refcount, NULL is supported to simplify writing of
@@ -43,28 +48,6 @@ void of_node_put(struct device_node *node)
}
EXPORT_SYMBOL(of_node_put);
-void __of_detach_node_sysfs(struct device_node *np)
-{
- struct property *pp;
-
- if (!IS_ENABLED(CONFIG_SYSFS))
- return;
-
- BUG_ON(!of_node_is_initialized(np));
- if (!of_kset)
- return;
-
- /* only remove properties if on sysfs */
- if (of_node_is_attached(np)) {
- for_each_property_of_node(np, pp)
- __of_sysfs_remove_bin_file(np, pp);
- kobject_del(&np->kobj);
- }
-
- /* finally remove the kobj_init ref */
- of_node_put(np);
-}
-
static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
int of_reconfig_notifier_register(struct notifier_block *nb)
@@ -315,6 +298,18 @@ int of_detach_node(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_detach_node);
+static void property_list_free(struct property *prop_list)
+{
+ struct property *prop, *next;
+
+ for (prop = prop_list; prop != NULL; prop = next) {
+ next = prop->next;
+ kfree(prop->name);
+ kfree(prop->value);
+ kfree(prop);
+ }
+}
+
/**
* of_node_release() - release a dynamically allocated node
* @kref: kref element of the node to be released
@@ -324,7 +319,6 @@ EXPORT_SYMBOL_GPL(of_detach_node);
void of_node_release(struct kobject *kobj)
{
struct device_node *node = kobj_to_device_node(kobj);
- struct property *prop = node->properties;
/* We should never be releasing nodes that haven't been detached. */
if (!of_node_check_flag(node, OF_DETACHED)) {
@@ -335,18 +329,9 @@ void of_node_release(struct kobject *kobj)
if (!of_node_check_flag(node, OF_DYNAMIC))
return;
- while (prop) {
- struct property *next = prop->next;
- kfree(prop->name);
- kfree(prop->value);
- kfree(prop);
- prop = next;
+ property_list_free(node->properties);
+ property_list_free(node->deadprops);
- if (!prop) {
- prop = node->deadprops;
- node->deadprops = NULL;
- }
- }
kfree(node->full_name);
kfree(node->data);
kfree(node);
@@ -508,11 +493,12 @@ static void __of_changeset_entry_invert(struct of_changeset_entry *ce,
}
}
-static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool revert)
+static int __of_changeset_entry_notify(struct of_changeset_entry *ce,
+ bool revert)
{
struct of_reconfig_data rd;
struct of_changeset_entry ce_inverted;
- int ret;
+ int ret = 0;
if (revert) {
__of_changeset_entry_invert(ce, &ce_inverted);
@@ -534,11 +520,12 @@ static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool reve
default:
pr_err("invalid devicetree changeset action: %i\n",
(int)ce->action);
- return;
+ ret = -EINVAL;
}
if (ret)
pr_err("changeset notifier error @%pOF\n", ce->np);
+ return ret;
}
static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
@@ -672,32 +659,82 @@ void of_changeset_destroy(struct of_changeset *ocs)
}
EXPORT_SYMBOL_GPL(of_changeset_destroy);
-int __of_changeset_apply(struct of_changeset *ocs)
+/*
+ * Apply the changeset entries in @ocs.
+ * If apply fails, an attempt is made to revert the entries that were
+ * successfully applied.
+ *
+ * If multiple revert errors occur then only the final revert error is reported.
+ *
+ * Returns 0 on success, a negative error value in case of an error.
+ * If a revert error occurs, it is returned in *ret_revert.
+ */
+int __of_changeset_apply_entries(struct of_changeset *ocs, int *ret_revert)
{
struct of_changeset_entry *ce;
- int ret;
+ int ret, ret_tmp;
- /* perform the rest of the work */
pr_debug("changeset: applying...\n");
list_for_each_entry(ce, &ocs->entries, node) {
ret = __of_changeset_entry_apply(ce);
if (ret) {
pr_err("Error applying changeset (%d)\n", ret);
- list_for_each_entry_continue_reverse(ce, &ocs->entries, node)
- __of_changeset_entry_revert(ce);
+ list_for_each_entry_continue_reverse(ce, &ocs->entries,
+ node) {
+ ret_tmp = __of_changeset_entry_revert(ce);
+ if (ret_tmp)
+ *ret_revert = ret_tmp;
+ }
return ret;
}
}
- pr_debug("changeset: applied, emitting notifiers.\n");
+
+ return 0;
+}
+
+/*
+ * Returns 0 on success, a negative error value in case of an error.
+ *
+ * If multiple changset entry notification errors occur then only the
+ * final notification error is reported.
+ */
+int __of_changeset_apply_notify(struct of_changeset *ocs)
+{
+ struct of_changeset_entry *ce;
+ int ret = 0, ret_tmp;
+
+ pr_debug("changeset: emitting notifiers.\n");
/* drop the global lock while emitting notifiers */
mutex_unlock(&of_mutex);
- list_for_each_entry(ce, &ocs->entries, node)
- __of_changeset_entry_notify(ce, 0);
+ list_for_each_entry(ce, &ocs->entries, node) {
+ ret_tmp = __of_changeset_entry_notify(ce, 0);
+ if (ret_tmp)
+ ret = ret_tmp;
+ }
mutex_lock(&of_mutex);
pr_debug("changeset: notifiers sent.\n");
- return 0;
+ return ret;
+}
+
+/*
+ * Returns 0 on success, a negative error value in case of an error.
+ *
+ * If a changeset entry apply fails, an attempt is made to revert any
+ * previous entries in the changeset. If any of the reverts fails,
+ * that failure is not reported. Thus the state of the device tree
+ * is unknown if an apply error occurs.
+ */
+static int __of_changeset_apply(struct of_changeset *ocs)
+{
+ int ret, ret_revert = 0;
+
+ ret = __of_changeset_apply_entries(ocs, &ret_revert);
+ if (!ret)
+ ret = __of_changeset_apply_notify(ocs);
+
+ return ret;
}
/**
@@ -724,31 +761,74 @@ int of_changeset_apply(struct of_changeset *ocs)
}
EXPORT_SYMBOL_GPL(of_changeset_apply);
-int __of_changeset_revert(struct of_changeset *ocs)
+/*
+ * Revert the changeset entries in @ocs.
+ * If revert fails, an attempt is made to re-apply the entries that were
+ * successfully removed.
+ *
+ * If multiple re-apply errors occur then only the final apply error is
+ * reported.
+ *
+ * Returns 0 on success, a negative error value in case of an error.
+ * If an apply error occurs, it is returned in *ret_apply.
+ */
+int __of_changeset_revert_entries(struct of_changeset *ocs, int *ret_apply)
{
struct of_changeset_entry *ce;
- int ret;
+ int ret, ret_tmp;
pr_debug("changeset: reverting...\n");
list_for_each_entry_reverse(ce, &ocs->entries, node) {
ret = __of_changeset_entry_revert(ce);
if (ret) {
pr_err("Error reverting changeset (%d)\n", ret);
- list_for_each_entry_continue(ce, &ocs->entries, node)
- __of_changeset_entry_apply(ce);
+ list_for_each_entry_continue(ce, &ocs->entries, node) {
+ ret_tmp = __of_changeset_entry_apply(ce);
+ if (ret_tmp)
+ *ret_apply = ret_tmp;
+ }
return ret;
}
}
- pr_debug("changeset: reverted, emitting notifiers.\n");
+
+ return 0;
+}
+
+/*
+ * If multiple changset entry notification errors occur then only the
+ * final notification error is reported.
+ */
+int __of_changeset_revert_notify(struct of_changeset *ocs)
+{
+ struct of_changeset_entry *ce;
+ int ret = 0, ret_tmp;
+
+ pr_debug("changeset: emitting notifiers.\n");
/* drop the global lock while emitting notifiers */
mutex_unlock(&of_mutex);
- list_for_each_entry_reverse(ce, &ocs->entries, node)
- __of_changeset_entry_notify(ce, 1);
+ list_for_each_entry_reverse(ce, &ocs->entries, node) {
+ ret_tmp = __of_changeset_entry_notify(ce, 1);
+ if (ret_tmp)
+ ret = ret_tmp;
+ }
mutex_lock(&of_mutex);
pr_debug("changeset: notifiers sent.\n");
- return 0;
+ return ret;
+}
+
+static int __of_changeset_revert(struct of_changeset *ocs)
+{
+ int ret, ret_reply;
+
+ ret_reply = 0;
+ ret = __of_changeset_revert_entries(ocs, &ret_reply);
+
+ if (!ret)
+ ret = __of_changeset_revert_notify(ocs);
+
+ return ret;
}
/**
@@ -775,7 +855,7 @@ int of_changeset_revert(struct of_changeset *ocs)
EXPORT_SYMBOL_GPL(of_changeset_revert);
/**
- * of_changeset_action - Perform a changeset action
+ * of_changeset_action - Add an action to the tail of the changeset list
*
* @ocs: changeset pointer
* @action: action to perform
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ce30c9a588a4..4675e5ac4d11 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -132,6 +132,19 @@ bool of_fdt_is_big_endian(const void *blob, unsigned long node)
return false;
}
+static bool of_fdt_device_is_available(const void *blob, unsigned long node)
+{
+ const char *status = fdt_getprop(blob, node, "status", NULL);
+
+ if (!status)
+ return true;
+
+ if (!strcmp(status, "ok") || !strcmp(status, "okay"))
+ return true;
+
+ return false;
+}
+
/**
* of_fdt_match - Return true if node matches a list of compatible values
*/
@@ -266,74 +279,32 @@ static void populate_properties(const void *blob,
*pprev = NULL;
}
-static unsigned int populate_node(const void *blob,
- int offset,
- void **mem,
- struct device_node *dad,
- unsigned int fpsize,
- struct device_node **pnp,
- bool dryrun)
+static bool populate_node(const void *blob,
+ int offset,
+ void **mem,
+ struct device_node *dad,
+ struct device_node **pnp,
+ bool dryrun)
{
struct device_node *np;
const char *pathp;
unsigned int l, allocl;
- int new_format = 0;
pathp = fdt_get_name(blob, offset, &l);
if (!pathp) {
*pnp = NULL;
- return 0;
+ return false;
}
allocl = ++l;
- /* version 0x10 has a more compact unit name here instead of the full
- * path. we accumulate the full path size using "fpsize", we'll rebuild
- * it later. We detect this because the first character of the name is
- * not '/'.
- */
- if ((*pathp) != '/') {
- new_format = 1;
- if (fpsize == 0) {
- /* root node: special case. fpsize accounts for path
- * plus terminating zero. root node only has '/', so
- * fpsize should be 2, but we want to avoid the first
- * level nodes to have two '/' so we use fpsize 1 here
- */
- fpsize = 1;
- allocl = 2;
- l = 1;
- pathp = "";
- } else {
- /* account for '/' and path size minus terminal 0
- * already in 'l'
- */
- fpsize += l;
- allocl = fpsize;
- }
- }
-
np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
__alignof__(struct device_node));
if (!dryrun) {
char *fn;
of_node_init(np);
np->full_name = fn = ((char *)np) + sizeof(*np);
- if (new_format) {
- /* rebuild full path for new format */
- if (dad && dad->parent) {
- strcpy(fn, dad->full_name);
-#ifdef DEBUG
- if ((strlen(fn) + l + 1) != allocl) {
- pr_debug("%s: p: %d, l: %d, a: %d\n",
- pathp, (int)strlen(fn),
- l, allocl);
- }
-#endif
- fn += strlen(fn);
- }
- *(fn++) = '/';
- }
+
memcpy(fn, pathp, l);
if (dad != NULL) {
@@ -355,7 +326,7 @@ static unsigned int populate_node(const void *blob,
}
*pnp = np;
- return fpsize;
+ return true;
}
static void reverse_nodes(struct device_node *parent)
@@ -399,7 +370,6 @@ static int unflatten_dt_nodes(const void *blob,
struct device_node *root;
int offset = 0, depth = 0, initial_depth = 0;
#define FDT_MAX_DEPTH 64
- unsigned int fpsizes[FDT_MAX_DEPTH];
struct device_node *nps[FDT_MAX_DEPTH];
void *base = mem;
bool dryrun = !base;
@@ -418,7 +388,6 @@ static int unflatten_dt_nodes(const void *blob,
depth = initial_depth = 1;
root = dad;
- fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
nps[depth] = dad;
for (offset = 0;
@@ -427,11 +396,12 @@ static int unflatten_dt_nodes(const void *blob,
if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
continue;
- fpsizes[depth+1] = populate_node(blob, offset, &mem,
- nps[depth],
- fpsizes[depth],
- &nps[depth+1], dryrun);
- if (!fpsizes[depth+1])
+ if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
+ !of_fdt_device_is_available(blob, offset))
+ continue;
+
+ if (!populate_node(blob, offset, &mem, nps[depth],
+ &nps[depth+1], dryrun))
return mem - base;
if (!dryrun && nodepp && !*nodepp)
@@ -467,6 +437,7 @@ static int unflatten_dt_nodes(const void *blob,
* @mynodes: The device_node tree created by the call
* @dt_alloc: An allocator that provides a virtual address to memory
* for the resulting tree
+ * @detached: if true set OF_DETACHED on @mynodes
*
* Returns NULL on failure or the memory chunk containing the unflattened
* device tree on success.
@@ -652,7 +623,6 @@ static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
int depth, void *data)
{
static int found;
- const char *status;
int err;
if (!found && depth == 1 && strcmp(uname, "reserved-memory") == 0) {
@@ -672,8 +642,7 @@ static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
return 1;
}
- status = of_get_flat_dt_prop(node, "status", NULL);
- if (status && strcmp(status, "okay") != 0 && strcmp(status, "ok") != 0)
+ if (!of_fdt_device_is_available(initial_boot_params, node))
return 0;
err = __reserved_mem_reserve_reg(node, uname);
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
new file mode 100644
index 000000000000..250fc7bb550f
--- /dev/null
+++ b/drivers/of/kobj.c
@@ -0,0 +1,164 @@
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "of_private.h"
+
+/* true when node is initialized */
+static int of_node_is_initialized(struct device_node *node)
+{
+ return node && node->kobj.state_initialized;
+}
+
+/* true when node is attached (i.e. present on sysfs) */
+int of_node_is_attached(struct device_node *node)
+{
+ return node && node->kobj.state_in_sysfs;
+}
+
+
+#ifndef CONFIG_OF_DYNAMIC
+static void of_node_release(struct kobject *kobj)
+{
+ /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+struct kobj_type of_node_ktype = {
+ .release = of_node_release,
+};
+
+static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct property *pp = container_of(bin_attr, struct property, attr);
+ return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
+}
+
+/* always return newly allocated name, caller must free after use */
+static const char *safe_name(struct kobject *kobj, const char *orig_name)
+{
+ const char *name = orig_name;
+ struct kernfs_node *kn;
+ int i = 0;
+
+ /* don't be a hero. After 16 tries give up */
+ while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) {
+ sysfs_put(kn);
+ if (name != orig_name)
+ kfree(name);
+ name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
+ }
+
+ if (name == orig_name) {
+ name = kstrdup(orig_name, GFP_KERNEL);
+ } else {
+ pr_warn("Duplicate name in %s, renamed to \"%s\"\n",
+ kobject_name(kobj), name);
+ }
+ return name;
+}
+
+int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+ int rc;
+
+ /* Important: Don't leak passwords */
+ bool secure = strncmp(pp->name, "security-", 9) == 0;
+
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return 0;
+
+ if (!of_kset || !of_node_is_attached(np))
+ return 0;
+
+ sysfs_bin_attr_init(&pp->attr);
+ pp->attr.attr.name = safe_name(&np->kobj, pp->name);
+ pp->attr.attr.mode = secure ? 0400 : 0444;
+ pp->attr.size = secure ? 0 : pp->length;
+ pp->attr.read = of_node_property_read;
+
+ rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
+ WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np);
+ return rc;
+}
+
+void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
+{
+ if (!IS_ENABLED(CONFIG_SYSFS))
+ return;
+
+ sysfs_remove_bin_file(&np->kobj, &prop->attr);
+ kfree(prop->attr.attr.name);
+}
+
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
+{
+ /* at early boot, bail here and defer setup to of_init() */
+ if (of_kset && of_node_is_attached(np))
+ __of_sysfs_remove_bin_file(np, prop);
+}
+
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+ struct property *oldprop)
+{
+ /* At early boot, bail out and defer setup to of_init() */
+ if (!of_kset)
+ return;
+
+ if (oldprop)
+ __of_sysfs_remove_bin_file(np, oldprop);
+ __of_add_property_sysfs(np, newprop);
+}
+
+int __of_attach_node_sysfs(struct device_node *np)
+{
+ const char *name;
+ struct kobject *parent;
+ struct property *pp;
+ int rc;
+
+ if (!of_kset)
+ return 0;
+
+ np->kobj.kset = of_kset;
+ if (!np->parent) {
+ /* Nodes without parents are new top level trees */
+ name = safe_name(&of_kset->kobj, "base");
+ parent = NULL;
+ } else {
+ name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+ parent = &np->parent->kobj;
+ }
+ if (!name)
+ return -ENOMEM;
+ rc = kobject_add(&np->kobj, parent, "%s", name);
+ kfree(name);
+ if (rc)
+ return rc;
+
+ for_each_property_of_node(np, pp)
+ __of_add_property_sysfs(np, pp);
+
+ return 0;
+}
+
+void __of_detach_node_sysfs(struct device_node *np)
+{
+ struct property *pp;
+
+ BUG_ON(!of_node_is_initialized(np));
+ if (!of_kset)
+ return;
+
+ /* only remove properties if on sysfs */
+ if (of_node_is_attached(np)) {
+ for_each_property_of_node(np, pp)
+ __of_sysfs_remove_bin_file(np, pp);
+ kobject_del(&np->kobj);
+ }
+
+ /* finally remove the kobj_init ref */
+ of_node_put(np);
+}
+
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index e9ec931f5b9a..a7b1cb6c2f65 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -374,7 +374,7 @@ int of_pci_map_rid(struct device_node *np, u32 rid,
pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
np, map_name, map_mask, rid_base, out_base,
- rid_len, rid, *id_out);
+ rid_len, rid, masked_rid - rid_base + out_base);
return 0;
}
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 3ae12ffbf547..92a9a3687446 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -35,18 +35,16 @@ extern struct mutex of_mutex;
extern struct list_head aliases_lookup;
extern struct kset *of_kset;
-
-static inline struct device_node *kobj_to_device_node(struct kobject *kobj)
-{
- return container_of(kobj, struct device_node, kobj);
-}
-
#if defined(CONFIG_OF_DYNAMIC)
extern int of_property_notify(int action, struct device_node *np,
struct property *prop, struct property *old_prop);
extern void of_node_release(struct kobject *kobj);
-extern int __of_changeset_apply(struct of_changeset *ocs);
-extern int __of_changeset_revert(struct of_changeset *ocs);
+extern int __of_changeset_apply_entries(struct of_changeset *ocs,
+ int *ret_revert);
+extern int __of_changeset_apply_notify(struct of_changeset *ocs);
+extern int __of_changeset_revert_entries(struct of_changeset *ocs,
+ int *ret_apply);
+extern int __of_changeset_revert_notify(struct of_changeset *ocs);
#else /* CONFIG_OF_DYNAMIC */
static inline int of_property_notify(int action, struct device_node *np,
struct property *prop, struct property *old_prop)
@@ -55,6 +53,41 @@ static inline int of_property_notify(int action, struct device_node *np,
}
#endif /* CONFIG_OF_DYNAMIC */
+#if defined(CONFIG_OF_KOBJ)
+int of_node_is_attached(struct device_node *node);
+int __of_add_property_sysfs(struct device_node *np, struct property *pp);
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop);
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+ struct property *oldprop);
+int __of_attach_node_sysfs(struct device_node *np);
+void __of_detach_node_sysfs(struct device_node *np);
+#else
+static inline int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+ return 0;
+}
+static inline void __of_remove_property_sysfs(struct device_node *np, struct property *prop) {}
+static inline void __of_update_property_sysfs(struct device_node *np,
+ struct property *newprop, struct property *oldprop) {}
+static inline int __of_attach_node_sysfs(struct device_node *np)
+{
+ return 0;
+}
+static inline void __of_detach_node_sysfs(struct device_node *np) {}
+#endif
+
+#if defined(CONFIG_OF_RESOLVE)
+int of_resolve_phandles(struct device_node *tree);
+#endif
+
+#if defined(CONFIG_OF_OVERLAY)
+void of_overlay_mutex_lock(void);
+void of_overlay_mutex_unlock(void);
+#else
+static inline void of_overlay_mutex_lock(void) {};
+static inline void of_overlay_mutex_unlock(void) {};
+#endif
+
#if defined(CONFIG_OF_UNITTEST) && defined(CONFIG_OF_OVERLAY)
extern void __init unittest_unflatten_overlay_base(void);
#else
@@ -77,6 +110,8 @@ extern void *__unflatten_device_tree(const void *blob,
struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags);
__printf(2, 3) struct device_node *__of_node_dup(const struct device_node *np, const char *fmt, ...);
+struct device_node *__of_find_node_by_path(struct device_node *parent,
+ const char *path);
struct device_node *__of_find_node_by_full_path(struct device_node *node,
const char *path);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 32771c2ced7b..22b75c82e377 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -397,3 +397,29 @@ void of_reserved_mem_device_release(struct device *dev)
rmem->ops->device_release(rmem, dev);
}
EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
+
+/**
+ * of_reserved_mem_lookup() - acquire reserved_mem from a device node
+ * @np: node pointer of the desired reserved-memory region
+ *
+ * This function allows drivers to acquire a reference to the reserved_mem
+ * struct based on a device node handle.
+ *
+ * Returns a reserved_mem reference, or NULL on error.
+ */
+struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
+{
+ const char *name;
+ int i;
+
+ if (!np->full_name)
+ return NULL;
+
+ name = kbasename(np->full_name);
+ for (i = 0; i < reserved_mem_count; i++)
+ if (!strcmp(reserved_mem[i].name, name))
+ return &reserved_mem[i];
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 8ecfee31ab6d..c150abb9049d 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -25,252 +25,378 @@
#include "of_private.h"
/**
- * struct of_overlay_info - Holds a single overlay info
+ * struct fragment - info about fragment nodes in overlay expanded device tree
* @target: target of the overlay operation
- * @overlay: pointer to the overlay contents node
- *
- * Holds a single overlay state, including all the overlay logs &
- * records.
+ * @overlay: pointer to the __overlay__ node
*/
-struct of_overlay_info {
+struct fragment {
struct device_node *target;
struct device_node *overlay;
- bool is_symbols_node;
};
/**
- * struct of_overlay - Holds a complete overlay transaction
- * @node: List on which we are located
- * @count: Count of ovinfo structures
- * @ovinfo_tab: Overlay info table (count sized)
- * @cset: Changeset to be used
- *
- * Holds a complete overlay transaction
+ * struct overlay_changeset
+ * @ovcs_list: list on which we are located
+ * @overlay_tree: expanded device tree that contains the fragment nodes
+ * @count: count of fragment structures
+ * @fragments: fragment nodes in the overlay expanded device tree
+ * @symbols_fragment: last element of @fragments[] is the __symbols__ node
+ * @cset: changeset to apply fragments to live device tree
*/
-struct of_overlay {
+struct overlay_changeset {
int id;
- struct list_head node;
+ struct list_head ovcs_list;
+ struct device_node *overlay_tree;
int count;
- struct of_overlay_info *ovinfo_tab;
+ struct fragment *fragments;
+ bool symbols_fragment;
struct of_changeset cset;
};
-static int of_overlay_apply_one(struct of_overlay *ov,
- struct device_node *target, const struct device_node *overlay,
- bool is_symbols_node);
+/* flags are sticky - once set, do not reset */
+static int devicetree_state_flags;
+#define DTSF_APPLY_FAIL 0x01
+#define DTSF_REVERT_FAIL 0x02
+
+/*
+ * If a changeset apply or revert encounters an error, an attempt will
+ * be made to undo partial changes, but may fail. If the undo fails
+ * we do not know the state of the devicetree.
+ */
+static int devicetree_corrupt(void)
+{
+ return devicetree_state_flags &
+ (DTSF_APPLY_FAIL | DTSF_REVERT_FAIL);
+}
+
+static int build_changeset_next_level(struct overlay_changeset *ovcs,
+ struct device_node *target_node,
+ const struct device_node *overlay_node);
+
+/*
+ * of_resolve_phandles() finds the largest phandle in the live tree.
+ * of_overlay_apply() may add a larger phandle to the live tree.
+ * Do not allow race between two overlays being applied simultaneously:
+ * mutex_lock(&of_overlay_phandle_mutex)
+ * of_resolve_phandles()
+ * of_overlay_apply()
+ * mutex_unlock(&of_overlay_phandle_mutex)
+ */
+static DEFINE_MUTEX(of_overlay_phandle_mutex);
+
+void of_overlay_mutex_lock(void)
+{
+ mutex_lock(&of_overlay_phandle_mutex);
+}
+
+void of_overlay_mutex_unlock(void)
+{
+ mutex_unlock(&of_overlay_phandle_mutex);
+}
-static BLOCKING_NOTIFIER_HEAD(of_overlay_chain);
+
+static LIST_HEAD(ovcs_list);
+static DEFINE_IDR(ovcs_idr);
+
+static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain);
int of_overlay_notifier_register(struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&of_overlay_chain, nb);
+ return blocking_notifier_chain_register(&overlay_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(of_overlay_notifier_register);
int of_overlay_notifier_unregister(struct notifier_block *nb)
{
- return blocking_notifier_chain_unregister(&of_overlay_chain, nb);
+ return blocking_notifier_chain_unregister(&overlay_notify_chain, nb);
}
EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister);
-static int of_overlay_notify(struct of_overlay *ov,
- enum of_overlay_notify_action action)
+static char *of_overlay_action_name[] = {
+ "pre-apply",
+ "post-apply",
+ "pre-remove",
+ "post-remove",
+};
+
+static int overlay_notify(struct overlay_changeset *ovcs,
+ enum of_overlay_notify_action action)
{
struct of_overlay_notify_data nd;
int i, ret;
- for (i = 0; i < ov->count; i++) {
- struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i];
+ for (i = 0; i < ovcs->count; i++) {
+ struct fragment *fragment = &ovcs->fragments[i];
- nd.target = ovinfo->target;
- nd.overlay = ovinfo->overlay;
+ nd.target = fragment->target;
+ nd.overlay = fragment->overlay;
- ret = blocking_notifier_call_chain(&of_overlay_chain,
+ ret = blocking_notifier_call_chain(&overlay_notify_chain,
action, &nd);
- if (ret)
- return notifier_to_errno(ret);
+ if (ret == NOTIFY_OK || ret == NOTIFY_STOP)
+ return 0;
+ if (ret) {
+ ret = notifier_to_errno(ret);
+ pr_err("overlay changeset %s notifier error %d, target: %pOF\n",
+ of_overlay_action_name[action], ret, nd.target);
+ return ret;
+ }
}
return 0;
}
-static struct property *dup_and_fixup_symbol_prop(struct of_overlay *ov,
- const struct property *prop)
+/*
+ * The values of properties in the "/__symbols__" node are paths in
+ * the ovcs->overlay_tree. When duplicating the properties, the paths
+ * need to be adjusted to be the correct path for the live device tree.
+ *
+ * The paths refer to a node in the subtree of a fragment node's "__overlay__"
+ * node, for example "/fragment@0/__overlay__/symbol_path_tail",
+ * where symbol_path_tail can be a single node or it may be a multi-node path.
+ *
+ * The duplicated property value will be modified by replacing the
+ * "/fragment_name/__overlay/" portion of the value with the target
+ * path from the fragment node.
+ */
+static struct property *dup_and_fixup_symbol_prop(
+ struct overlay_changeset *ovcs, const struct property *prop)
{
- struct of_overlay_info *ovinfo;
- struct property *new;
- const char *overlay_name;
- char *label_path;
- char *symbol_path;
+ struct fragment *fragment;
+ struct property *new_prop;
+ struct device_node *fragment_node;
+ struct device_node *overlay_node;
+ const char *path;
+ const char *path_tail;
const char *target_path;
int k;
- int label_path_len;
int overlay_name_len;
+ int path_len;
+ int path_tail_len;
int target_path_len;
if (!prop->value)
return NULL;
- symbol_path = prop->value;
-
- new = kzalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
+ if (strnlen(prop->value, prop->length) >= prop->length)
return NULL;
+ path = prop->value;
+ path_len = strlen(path);
- for (k = 0; k < ov->count; k++) {
- ovinfo = &ov->ovinfo_tab[k];
- overlay_name = ovinfo->overlay->full_name;
- overlay_name_len = strlen(overlay_name);
- if (!strncasecmp(symbol_path, overlay_name, overlay_name_len))
+ if (path_len < 1)
+ return NULL;
+ fragment_node = __of_find_node_by_path(ovcs->overlay_tree, path + 1);
+ overlay_node = __of_find_node_by_path(fragment_node, "__overlay__/");
+ of_node_put(fragment_node);
+ of_node_put(overlay_node);
+
+ for (k = 0; k < ovcs->count; k++) {
+ fragment = &ovcs->fragments[k];
+ if (fragment->overlay == overlay_node)
break;
}
+ if (k >= ovcs->count)
+ return NULL;
+
+ overlay_name_len = snprintf(NULL, 0, "%pOF", fragment->overlay);
- if (k >= ov->count)
- goto err_free;
+ if (overlay_name_len > path_len)
+ return NULL;
+ path_tail = path + overlay_name_len;
+ path_tail_len = strlen(path_tail);
- target_path = ovinfo->target->full_name;
+ target_path = kasprintf(GFP_KERNEL, "%pOF", fragment->target);
+ if (!target_path)
+ return NULL;
target_path_len = strlen(target_path);
- label_path = symbol_path + overlay_name_len;
- label_path_len = strlen(label_path);
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ goto err_free_target_path;
- new->name = kstrdup(prop->name, GFP_KERNEL);
- new->length = target_path_len + label_path_len + 1;
- new->value = kzalloc(new->length, GFP_KERNEL);
+ new_prop->name = kstrdup(prop->name, GFP_KERNEL);
+ new_prop->length = target_path_len + path_tail_len + 1;
+ new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
+ if (!new_prop->name || !new_prop->value)
+ goto err_free_new_prop;
- if (!new->name || !new->value)
- goto err_free;
+ strcpy(new_prop->value, target_path);
+ strcpy(new_prop->value + target_path_len, path_tail);
- strcpy(new->value, target_path);
- strcpy(new->value + target_path_len, label_path);
+ of_property_set_flag(new_prop, OF_DYNAMIC);
- /* mark the property as dynamic */
- of_property_set_flag(new, OF_DYNAMIC);
+ return new_prop;
- return new;
+err_free_new_prop:
+ kfree(new_prop->name);
+ kfree(new_prop->value);
+ kfree(new_prop);
+err_free_target_path:
+ kfree(target_path);
- err_free:
- kfree(new->name);
- kfree(new->value);
- kfree(new);
return NULL;
-
-
}
-static int of_overlay_apply_single_property(struct of_overlay *ov,
- struct device_node *target, struct property *prop,
- bool is_symbols_node)
+/**
+ * add_changeset_property() - add @overlay_prop to overlay changeset
+ * @ovcs: overlay changeset
+ * @target_node: where to place @overlay_prop in live tree
+ * @overlay_prop: property to add or update, from overlay tree
+ * @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__"
+ *
+ * If @overlay_prop does not already exist in @target_node, add changeset entry
+ * to add @overlay_prop in @target_node, else add changeset entry to update
+ * value of @overlay_prop.
+ *
+ * Some special properties are not updated (no error returned).
+ *
+ * Update of property in symbols node is not allowed.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * invalid @overlay.
+ */
+static int add_changeset_property(struct overlay_changeset *ovcs,
+ struct device_node *target_node,
+ struct property *overlay_prop,
+ bool is_symbols_prop)
{
- struct property *propn = NULL, *tprop;
+ struct property *new_prop = NULL, *prop;
+ int ret = 0;
- /* NOTE: Multiple changes of single properties not supported */
- tprop = of_find_property(target, prop->name, NULL);
+ prop = of_find_property(target_node, overlay_prop->name, NULL);
- /* special properties are not meant to be updated (silent NOP) */
- if (of_prop_cmp(prop->name, "name") == 0 ||
- of_prop_cmp(prop->name, "phandle") == 0 ||
- of_prop_cmp(prop->name, "linux,phandle") == 0)
+ if (!of_prop_cmp(overlay_prop->name, "name") ||
+ !of_prop_cmp(overlay_prop->name, "phandle") ||
+ !of_prop_cmp(overlay_prop->name, "linux,phandle"))
return 0;
- if (is_symbols_node) {
- /* changing a property in __symbols__ node not allowed */
- if (tprop)
+ if (is_symbols_prop) {
+ if (prop)
return -EINVAL;
- propn = dup_and_fixup_symbol_prop(ov, prop);
+ new_prop = dup_and_fixup_symbol_prop(ovcs, overlay_prop);
} else {
- propn = __of_prop_dup(prop, GFP_KERNEL);
+ new_prop = __of_prop_dup(overlay_prop, GFP_KERNEL);
}
- if (propn == NULL)
+ if (!new_prop)
return -ENOMEM;
- /* not found? add */
- if (tprop == NULL)
- return of_changeset_add_property(&ov->cset, target, propn);
-
- /* found? update */
- return of_changeset_update_property(&ov->cset, target, propn);
+ if (!prop)
+ ret = of_changeset_add_property(&ovcs->cset, target_node,
+ new_prop);
+ else
+ ret = of_changeset_update_property(&ovcs->cset, target_node,
+ new_prop);
+
+ if (ret) {
+ kfree(new_prop->name);
+ kfree(new_prop->value);
+ kfree(new_prop);
+ }
+ return ret;
}
-static int of_overlay_apply_single_device_node(struct of_overlay *ov,
- struct device_node *target, struct device_node *child)
+/**
+ * add_changeset_node() - add @node (and children) to overlay changeset
+ * @ovcs: overlay changeset
+ * @target_node: where to place @node in live tree
+ * @node: node from within overlay device tree fragment
+ *
+ * If @node does not already exist in @target_node, add changeset entry
+ * to add @node in @target_node.
+ *
+ * If @node already exists in @target_node, and the existing node has
+ * a phandle, the overlay node is not allowed to have a phandle.
+ *
+ * If @node has child nodes, add the children recursively via
+ * build_changeset_next_level().
+ *
+ * NOTE: Multiple mods of created nodes not supported.
+ * If more than one fragment contains a node that does not already exist
+ * in the live tree, then for each fragment of_changeset_attach_node()
+ * will add a changeset entry to add the node. When the changeset is
+ * applied, __of_attach_node() will attach the node twice (once for
+ * each fragment). At this point the device tree will be corrupted.
+ *
+ * TODO: add integrity check to ensure that multiple fragments do not
+ * create the same node.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * invalid @overlay.
+ */
+static int add_changeset_node(struct overlay_changeset *ovcs,
+ struct device_node *target_node, struct device_node *node)
{
- const char *cname;
+ const char *node_kbasename;
struct device_node *tchild;
int ret = 0;
- cname = kbasename(child->full_name);
- if (cname == NULL)
- return -ENOMEM;
+ node_kbasename = kbasename(node->full_name);
- /* NOTE: Multiple mods of created nodes not supported */
- for_each_child_of_node(target, tchild)
- if (!of_node_cmp(cname, kbasename(tchild->full_name)))
+ for_each_child_of_node(target_node, tchild)
+ if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name)))
break;
- if (tchild != NULL) {
- /* new overlay phandle value conflicts with existing value */
- if (child->phandle)
- return -EINVAL;
-
- /* apply overlay recursively */
- ret = of_overlay_apply_one(ov, tchild, child, 0);
- of_node_put(tchild);
- } else {
- /* create empty tree as a target */
- tchild = __of_node_dup(child, "%pOF/%s", target, cname);
+ if (!tchild) {
+ tchild = __of_node_dup(node, "%pOF/%s",
+ target_node, node_kbasename);
if (!tchild)
return -ENOMEM;
- /* point to parent */
- tchild->parent = target;
+ tchild->parent = target_node;
- ret = of_changeset_attach_node(&ov->cset, tchild);
+ ret = of_changeset_attach_node(&ovcs->cset, tchild);
if (ret)
return ret;
- ret = of_overlay_apply_one(ov, tchild, child, 0);
- if (ret)
- return ret;
+ return build_changeset_next_level(ovcs, tchild, node);
}
+ if (node->phandle && tchild->phandle)
+ ret = -EINVAL;
+ else
+ ret = build_changeset_next_level(ovcs, tchild, node);
+ of_node_put(tchild);
+
return ret;
}
-/*
- * Apply a single overlay node recursively.
+/**
+ * build_changeset_next_level() - add level of overlay changeset
+ * @ovcs: overlay changeset
+ * @target_node: where to place @overlay_node in live tree
+ * @overlay_node: node from within an overlay device tree fragment
+ *
+ * Add the properties (if any) and nodes (if any) from @overlay_node to the
+ * @ovcs->cset changeset. If an added node has child nodes, they will
+ * be added recursively.
*
- * Note that the in case of an error the target node is left
- * in a inconsistent state. Error recovery should be performed
- * by using the changeset.
+ * Do not allow symbols node to have any children.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * invalid @overlay_node.
*/
-static int of_overlay_apply_one(struct of_overlay *ov,
- struct device_node *target, const struct device_node *overlay,
- bool is_symbols_node)
+static int build_changeset_next_level(struct overlay_changeset *ovcs,
+ struct device_node *target_node,
+ const struct device_node *overlay_node)
{
struct device_node *child;
struct property *prop;
int ret;
- for_each_property_of_node(overlay, prop) {
- ret = of_overlay_apply_single_property(ov, target, prop,
- is_symbols_node);
+ for_each_property_of_node(overlay_node, prop) {
+ ret = add_changeset_property(ovcs, target_node, prop, 0);
if (ret) {
- pr_err("Failed to apply prop @%pOF/%s\n",
- target, prop->name);
+ pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
+ target_node, prop->name, ret);
return ret;
}
}
- /* do not allow symbols node to have any children */
- if (is_symbols_node)
- return 0;
-
- for_each_child_of_node(overlay, child) {
- ret = of_overlay_apply_single_device_node(ov, target, child);
- if (ret != 0) {
- pr_err("Failed to apply single node @%pOF/%s\n",
- target, child->name);
+ for_each_child_of_node(overlay_node, child) {
+ ret = add_changeset_node(ovcs, target_node, child);
+ if (ret) {
+ pr_debug("Failed to apply node @%pOF/%s, err=%d\n",
+ target_node, child->name, ret);
of_node_put(child);
return ret;
}
@@ -279,28 +405,72 @@ static int of_overlay_apply_one(struct of_overlay *ov,
return 0;
}
+/*
+ * Add the properties from __overlay__ node to the @ovcs->cset changeset.
+ */
+static int build_changeset_symbols_node(struct overlay_changeset *ovcs,
+ struct device_node *target_node,
+ const struct device_node *overlay_symbols_node)
+{
+ struct property *prop;
+ int ret;
+
+ for_each_property_of_node(overlay_symbols_node, prop) {
+ ret = add_changeset_property(ovcs, target_node, prop, 1);
+ if (ret) {
+ pr_debug("Failed to apply prop @%pOF/%s, err=%d\n",
+ target_node, prop->name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
- * of_overlay_apply() - Apply @count overlays pointed at by @ovinfo_tab
- * @ov: Overlay to apply
+ * build_changeset() - populate overlay changeset in @ovcs from @ovcs->fragments
+ * @ovcs: Overlay changeset
*
- * Applies the overlays given, while handling all error conditions
- * appropriately. Either the operation succeeds, or if it fails the
- * live tree is reverted to the state before the attempt.
- * Returns 0, or an error if the overlay attempt failed.
+ * Create changeset @ovcs->cset to contain the nodes and properties of the
+ * overlay device tree fragments in @ovcs->fragments[]. If an error occurs,
+ * any portions of the changeset that were successfully created will remain
+ * in @ovcs->cset.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if
+ * invalid overlay in @ovcs->fragments[].
*/
-static int of_overlay_apply(struct of_overlay *ov)
+static int build_changeset(struct overlay_changeset *ovcs)
{
- int i, err;
-
- /* first we apply the overlays atomically */
- for (i = 0; i < ov->count; i++) {
- struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i];
+ struct fragment *fragment;
+ int fragments_count, i, ret;
+
+ /*
+ * if there is a symbols fragment in ovcs->fragments[i] it is
+ * the final element in the array
+ */
+ if (ovcs->symbols_fragment)
+ fragments_count = ovcs->count - 1;
+ else
+ fragments_count = ovcs->count;
+
+ for (i = 0; i < fragments_count; i++) {
+ fragment = &ovcs->fragments[i];
+
+ ret = build_changeset_next_level(ovcs, fragment->target,
+ fragment->overlay);
+ if (ret) {
+ pr_debug("apply failed '%pOF'\n", fragment->target);
+ return ret;
+ }
+ }
- err = of_overlay_apply_one(ov, ovinfo->target, ovinfo->overlay,
- ovinfo->is_symbols_node);
- if (err != 0) {
- pr_err("apply failed '%pOF'\n", ovinfo->target);
- return err;
+ if (ovcs->symbols_fragment) {
+ fragment = &ovcs->fragments[ovcs->count - 1];
+ ret = build_changeset_symbols_node(ovcs, fragment->target,
+ fragment->overlay);
+ if (ret) {
+ pr_debug("apply failed '%pOF'\n", fragment->target);
+ return ret;
}
}
@@ -309,10 +479,10 @@ static int of_overlay_apply(struct of_overlay *ov)
/*
* Find the target node using a number of different strategies
- * in order of preference
+ * in order of preference:
*
- * "target" property containing the phandle of the target
- * "target-path" property containing the path of the target
+ * 1) "target" property containing the phandle of the target
+ * 2) "target-path" property containing the path of the target
*/
static struct device_node *find_target_node(struct device_node *info_node)
{
@@ -320,14 +490,12 @@ static struct device_node *find_target_node(struct device_node *info_node)
u32 val;
int ret;
- /* first try to go by using the target as a phandle */
ret = of_property_read_u32(info_node, "target", &val);
- if (ret == 0)
+ if (!ret)
return of_find_node_by_phandle(val);
- /* now try to locate by path */
ret = of_property_read_string(info_node, "target-path", &path);
- if (ret == 0)
+ if (!ret)
return of_find_node_by_path(path);
pr_err("Failed to find target for node %p (%s)\n",
@@ -337,228 +505,290 @@ static struct device_node *find_target_node(struct device_node *info_node)
}
/**
- * of_fill_overlay_info() - Fill an overlay info structure
- * @ov Overlay to fill
- * @info_node: Device node containing the overlay
- * @ovinfo: Pointer to the overlay info structure to fill
- *
- * Fills an overlay info structure with the overlay information
- * from a device node. This device node must have a target property
- * which contains a phandle of the overlay target node, and an
- * __overlay__ child node which has the overlay contents.
- * Both ovinfo->target & ovinfo->overlay have their references taken.
- *
- * Returns 0 on success, or a negative error value.
+ * init_overlay_changeset() - initialize overlay changeset from overlay tree
+ * @ovcs Overlay changeset to build
+ * @tree: Contains all the overlay fragments and overlay fixup nodes
+ *
+ * Initialize @ovcs. Populate @ovcs->fragments with node information from
+ * the top level of @tree. The relevant top level nodes are the fragment
+ * nodes and the __symbols__ node. Any other top level node will be ignored.
+ *
+ * Returns 0 on success, -ENOMEM if memory allocation failure, -EINVAL if error
+ * detected in @tree, or -ENOSPC if idr_alloc() error.
*/
-static int of_fill_overlay_info(struct of_overlay *ov,
- struct device_node *info_node, struct of_overlay_info *ovinfo)
+static int init_overlay_changeset(struct overlay_changeset *ovcs,
+ struct device_node *tree)
{
- ovinfo->overlay = of_get_child_by_name(info_node, "__overlay__");
- if (ovinfo->overlay == NULL)
- goto err_fail;
+ struct device_node *node, *overlay_node;
+ struct fragment *fragment;
+ struct fragment *fragments;
+ int cnt, ret;
- ovinfo->target = find_target_node(info_node);
- if (ovinfo->target == NULL)
- goto err_fail;
+ /*
+ * Warn for some issues. Can not return -EINVAL for these until
+ * of_unittest_apply_overlay() is fixed to pass these checks.
+ */
+ if (!of_node_check_flag(tree, OF_DYNAMIC))
+ pr_debug("%s() tree is not dynamic\n", __func__);
- return 0;
+ if (!of_node_check_flag(tree, OF_DETACHED))
+ pr_debug("%s() tree is not detached\n", __func__);
-err_fail:
- of_node_put(ovinfo->target);
- of_node_put(ovinfo->overlay);
+ if (!of_node_is_root(tree))
+ pr_debug("%s() tree is not root\n", __func__);
- memset(ovinfo, 0, sizeof(*ovinfo));
- return -EINVAL;
-}
+ ovcs->overlay_tree = tree;
-/**
- * of_build_overlay_info() - Build an overlay info array
- * @ov Overlay to build
- * @tree: Device node containing all the overlays
- *
- * Helper function that given a tree containing overlay information,
- * allocates and builds an overlay info array containing it, ready
- * for use using of_overlay_apply.
- *
- * Returns 0 on success with the @cntp @ovinfop pointers valid,
- * while on error a negative error value is returned.
- */
-static int of_build_overlay_info(struct of_overlay *ov,
- struct device_node *tree)
-{
- struct device_node *node;
- struct of_overlay_info *ovinfo;
- int cnt, err;
+ INIT_LIST_HEAD(&ovcs->ovcs_list);
+
+ of_changeset_init(&ovcs->cset);
+
+ ovcs->id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL);
+ if (ovcs->id <= 0)
+ return ovcs->id;
- /* worst case; every child is a node */
cnt = 0;
- for_each_child_of_node(tree, node)
- cnt++;
- if (of_get_child_by_name(tree, "__symbols__"))
+ /* fragment nodes */
+ for_each_child_of_node(tree, node) {
+ overlay_node = of_get_child_by_name(node, "__overlay__");
+ if (overlay_node) {
+ cnt++;
+ of_node_put(overlay_node);
+ }
+ }
+
+ node = of_get_child_by_name(tree, "__symbols__");
+ if (node) {
cnt++;
+ of_node_put(node);
+ }
- ovinfo = kcalloc(cnt, sizeof(*ovinfo), GFP_KERNEL);
- if (ovinfo == NULL)
- return -ENOMEM;
+ fragments = kcalloc(cnt, sizeof(*fragments), GFP_KERNEL);
+ if (!fragments) {
+ ret = -ENOMEM;
+ goto err_free_idr;
+ }
cnt = 0;
for_each_child_of_node(tree, node) {
- err = of_fill_overlay_info(ov, node, &ovinfo[cnt]);
- if (err == 0)
- cnt++;
+ fragment = &fragments[cnt];
+ fragment->overlay = of_get_child_by_name(node, "__overlay__");
+ if (fragment->overlay) {
+ fragment->target = find_target_node(node);
+ if (!fragment->target) {
+ of_node_put(fragment->overlay);
+ ret = -EINVAL;
+ goto err_free_fragments;
+ } else {
+ cnt++;
+ }
+ }
}
+ /*
+ * if there is a symbols fragment in ovcs->fragments[i] it is
+ * the final element in the array
+ */
node = of_get_child_by_name(tree, "__symbols__");
if (node) {
- ovinfo[cnt].overlay = node;
- ovinfo[cnt].target = of_find_node_by_path("/__symbols__");
- ovinfo[cnt].is_symbols_node = 1;
-
- if (!ovinfo[cnt].target) {
- pr_err("no symbols in root of device tree.\n");
- return -EINVAL;
+ ovcs->symbols_fragment = 1;
+ fragment = &fragments[cnt];
+ fragment->overlay = node;
+ fragment->target = of_find_node_by_path("/__symbols__");
+
+ if (!fragment->target) {
+ pr_err("symbols in overlay, but not in live tree\n");
+ ret = -EINVAL;
+ goto err_free_fragments;
}
cnt++;
}
- /* if nothing filled, return error */
- if (cnt == 0) {
- kfree(ovinfo);
- return -ENODEV;
+ if (!cnt) {
+ ret = -EINVAL;
+ goto err_free_fragments;
}
- ov->count = cnt;
- ov->ovinfo_tab = ovinfo;
+ ovcs->count = cnt;
+ ovcs->fragments = fragments;
return 0;
+
+err_free_fragments:
+ kfree(fragments);
+err_free_idr:
+ idr_remove(&ovcs_idr, ovcs->id);
+
+ pr_err("%s() failed, ret = %d\n", __func__, ret);
+
+ return ret;
}
-/**
- * of_free_overlay_info() - Free an overlay info array
- * @ov Overlay to free the overlay info from
- * @ovinfo_tab: Array of overlay_info's to free
- *
- * Releases the memory of a previously allocated ovinfo array
- * by of_build_overlay_info.
- * Returns 0, or an error if the arguments are bogus.
- */
-static int of_free_overlay_info(struct of_overlay *ov)
+static void free_overlay_changeset(struct overlay_changeset *ovcs)
{
- struct of_overlay_info *ovinfo;
int i;
- /* do it in reverse */
- for (i = ov->count - 1; i >= 0; i--) {
- ovinfo = &ov->ovinfo_tab[i];
+ if (!ovcs->cset.entries.next)
+ return;
+ of_changeset_destroy(&ovcs->cset);
+
+ if (ovcs->id)
+ idr_remove(&ovcs_idr, ovcs->id);
- of_node_put(ovinfo->target);
- of_node_put(ovinfo->overlay);
+ for (i = 0; i < ovcs->count; i++) {
+ of_node_put(ovcs->fragments[i].target);
+ of_node_put(ovcs->fragments[i].overlay);
}
- kfree(ov->ovinfo_tab);
+ kfree(ovcs->fragments);
- return 0;
+ kfree(ovcs);
}
-static LIST_HEAD(ov_list);
-static DEFINE_IDR(ov_idr);
-
/**
- * of_overlay_create() - Create and apply an overlay
- * @tree: Device node containing all the overlays
+ * of_overlay_apply() - Create and apply an overlay changeset
+ * @tree: Expanded overlay device tree
+ * @ovcs_id: Pointer to overlay changeset id
*
- * Creates and applies an overlay while also keeping track
- * of the overlay in a list. This list can be used to prevent
- * illegal overlay removals.
+ * Creates and applies an overlay changeset.
+ *
+ * If an error occurs in a pre-apply notifier, then no changes are made
+ * to the device tree.
+ *
+
+ * A non-zero return value will not have created the changeset if error is from:
+ * - parameter checks
+ * - building the changeset
+ * - overlay changset pre-apply notifier
*
- * Returns the id of the created overlay, or a negative error number
+ * If an error is returned by an overlay changeset pre-apply notifier
+ * then no further overlay changeset pre-apply notifier will be called.
+ *
+ * A non-zero return value will have created the changeset if error is from:
+ * - overlay changeset entry notifier
+ * - overlay changset post-apply notifier
+ *
+ * If an error is returned by an overlay changeset post-apply notifier
+ * then no further overlay changeset post-apply notifier will be called.
+ *
+ * If more than one notifier returns an error, then the last notifier
+ * error to occur is returned.
+ *
+ * If an error occurred while applying the overlay changeset, then an
+ * attempt is made to revert any changes that were made to the
+ * device tree. If there were any errors during the revert attempt
+ * then the state of the device tree can not be determined, and any
+ * following attempt to apply or remove an overlay changeset will be
+ * refused.
+ *
+ * Returns 0 on success, or a negative error number. Overlay changeset
+ * id is returned to *ovcs_id.
*/
-int of_overlay_create(struct device_node *tree)
+
+int of_overlay_apply(struct device_node *tree, int *ovcs_id)
{
- struct of_overlay *ov;
- int err, id;
+ struct overlay_changeset *ovcs;
+ int ret = 0, ret_revert, ret_tmp;
- /* allocate the overlay structure */
- ov = kzalloc(sizeof(*ov), GFP_KERNEL);
- if (ov == NULL)
- return -ENOMEM;
- ov->id = -1;
+ *ovcs_id = 0;
+
+ if (devicetree_corrupt()) {
+ pr_err("devicetree state suspect, refuse to apply overlay\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ovcs = kzalloc(sizeof(*ovcs), GFP_KERNEL);
+ if (!ovcs) {
+ ret = -ENOMEM;
+ goto out;
+ }
- INIT_LIST_HEAD(&ov->node);
+ of_overlay_mutex_lock();
- of_changeset_init(&ov->cset);
+ ret = of_resolve_phandles(tree);
+ if (ret)
+ goto err_overlay_unlock;
mutex_lock(&of_mutex);
- id = idr_alloc(&ov_idr, ov, 0, 0, GFP_KERNEL);
- if (id < 0) {
- err = id;
- goto err_destroy_trans;
- }
- ov->id = id;
+ ret = init_overlay_changeset(ovcs, tree);
+ if (ret)
+ goto err_free_overlay_changeset;
- /* build the overlay info structures */
- err = of_build_overlay_info(ov, tree);
- if (err) {
- pr_err("of_build_overlay_info() failed for tree@%pOF\n",
- tree);
- goto err_free_idr;
+ ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY);
+ if (ret) {
+ pr_err("overlay changeset pre-apply notify error %d\n", ret);
+ goto err_free_overlay_changeset;
}
- err = of_overlay_notify(ov, OF_OVERLAY_PRE_APPLY);
- if (err < 0) {
- pr_err("%s: Pre-apply notifier failed (err=%d)\n",
- __func__, err);
- goto err_free_idr;
+ ret = build_changeset(ovcs);
+ if (ret)
+ goto err_free_overlay_changeset;
+
+ ret_revert = 0;
+ ret = __of_changeset_apply_entries(&ovcs->cset, &ret_revert);
+ if (ret) {
+ if (ret_revert) {
+ pr_debug("overlay changeset revert error %d\n",
+ ret_revert);
+ devicetree_state_flags |= DTSF_APPLY_FAIL;
+ }
+ goto err_free_overlay_changeset;
+ } else {
+ ret = __of_changeset_apply_notify(&ovcs->cset);
+ if (ret)
+ pr_err("overlay changeset entry notify error %d\n",
+ ret);
+ /* fall through */
}
- /* apply the overlay */
- err = of_overlay_apply(ov);
- if (err)
- goto err_abort_trans;
-
- /* apply the changeset */
- err = __of_changeset_apply(&ov->cset);
- if (err)
- goto err_revert_overlay;
+ list_add_tail(&ovcs->ovcs_list, &ovcs_list);
+ *ovcs_id = ovcs->id;
+ ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_APPLY);
+ if (ret_tmp) {
+ pr_err("overlay changeset post-apply notify error %d\n",
+ ret_tmp);
+ if (!ret)
+ ret = ret_tmp;
+ }
- /* add to the tail of the overlay list */
- list_add_tail(&ov->node, &ov_list);
+ mutex_unlock(&of_mutex);
+ of_overlay_mutex_unlock();
- of_overlay_notify(ov, OF_OVERLAY_POST_APPLY);
+ goto out;
- mutex_unlock(&of_mutex);
+err_overlay_unlock:
+ of_overlay_mutex_unlock();
- return id;
+err_free_overlay_changeset:
+ free_overlay_changeset(ovcs);
-err_revert_overlay:
-err_abort_trans:
- of_free_overlay_info(ov);
-err_free_idr:
- idr_remove(&ov_idr, ov->id);
-err_destroy_trans:
- of_changeset_destroy(&ov->cset);
- kfree(ov);
mutex_unlock(&of_mutex);
- return err;
+out:
+ pr_debug("%s() err=%d\n", __func__, ret);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(of_overlay_create);
+EXPORT_SYMBOL_GPL(of_overlay_apply);
-/* check whether the given node, lies under the given tree */
-static int overlay_subtree_check(struct device_node *tree,
- struct device_node *dn)
+/*
+ * Find @np in @tree.
+ *
+ * Returns 1 if @np is @tree or is contained in @tree, else 0
+ */
+static int find_node(struct device_node *tree, struct device_node *np)
{
struct device_node *child;
- /* match? */
- if (tree == dn)
+ if (tree == np)
return 1;
for_each_child_of_node(tree, child) {
- if (overlay_subtree_check(child, dn)) {
+ if (find_node(child, np)) {
of_node_put(child);
return 1;
}
@@ -567,29 +797,39 @@ static int overlay_subtree_check(struct device_node *tree,
return 0;
}
-/* check whether this overlay is the topmost */
-static int overlay_is_topmost(struct of_overlay *ov, struct device_node *dn)
+/*
+ * Is @remove_ce_node a child of, a parent of, or the same as any
+ * node in an overlay changeset more topmost than @remove_ovcs?
+ *
+ * Returns 1 if found, else 0
+ */
+static int node_overlaps_later_cs(struct overlay_changeset *remove_ovcs,
+ struct device_node *remove_ce_node)
{
- struct of_overlay *ovt;
+ struct overlay_changeset *ovcs;
struct of_changeset_entry *ce;
- list_for_each_entry_reverse(ovt, &ov_list, node) {
- /* if we hit ourselves, we're done */
- if (ovt == ov)
+ list_for_each_entry_reverse(ovcs, &ovcs_list, ovcs_list) {
+ if (ovcs == remove_ovcs)
break;
- /* check against each subtree affected by this overlay */
- list_for_each_entry(ce, &ovt->cset.entries, node) {
- if (overlay_subtree_check(ce->np, dn)) {
- pr_err("%s: #%d clashes #%d @%pOF\n",
- __func__, ov->id, ovt->id, dn);
- return 0;
+ list_for_each_entry(ce, &ovcs->cset.entries, node) {
+ if (find_node(ce->np, remove_ce_node)) {
+ pr_err("%s: #%d overlaps with #%d @%pOF\n",
+ __func__, remove_ovcs->id, ovcs->id,
+ remove_ce_node);
+ return 1;
+ }
+ if (find_node(remove_ce_node, ce->np)) {
+ pr_err("%s: #%d overlaps with #%d @%pOF\n",
+ __func__, remove_ovcs->id, ovcs->id,
+ remove_ce_node);
+ return 1;
}
}
}
- /* overlay is topmost */
- return 1;
+ return 0;
}
/*
@@ -602,13 +842,13 @@ static int overlay_is_topmost(struct of_overlay *ov, struct device_node *dn)
* the one closest to the tail. If another overlay has affected this
* device node and is closest to the tail, then removal is not permited.
*/
-static int overlay_removal_is_ok(struct of_overlay *ov)
+static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
{
- struct of_changeset_entry *ce;
+ struct of_changeset_entry *remove_ce;
- list_for_each_entry(ce, &ov->cset.entries, node) {
- if (!overlay_is_topmost(ov, ce->np)) {
- pr_err("overlay #%d is not topmost\n", ov->id);
+ list_for_each_entry(remove_ce, &remove_ovcs->cset.entries, node) {
+ if (node_overlaps_later_cs(remove_ovcs, remove_ce->np)) {
+ pr_err("overlay #%d is not topmost\n", remove_ovcs->id);
return 0;
}
}
@@ -617,75 +857,130 @@ static int overlay_removal_is_ok(struct of_overlay *ov)
}
/**
- * of_overlay_destroy() - Removes an overlay
- * @id: Overlay id number returned by a previous call to of_overlay_create
+ * of_overlay_remove() - Revert and free an overlay changeset
+ * @ovcs_id: Pointer to overlay changeset id
*
- * Removes an overlay if it is permissible.
+ * Removes an overlay if it is permissible. @ovcs_id was previously returned
+ * by of_overlay_apply().
*
- * Returns 0 on success, or a negative error number
+ * If an error occurred while attempting to revert the overlay changeset,
+ * then an attempt is made to re-apply any changeset entry that was
+ * reverted. If an error occurs on re-apply then the state of the device
+ * tree can not be determined, and any following attempt to apply or remove
+ * an overlay changeset will be refused.
+ *
+ * A non-zero return value will not revert the changeset if error is from:
+ * - parameter checks
+ * - overlay changset pre-remove notifier
+ * - overlay changeset entry revert
+ *
+ * If an error is returned by an overlay changeset pre-remove notifier
+ * then no further overlay changeset pre-remove notifier will be called.
+ *
+ * If more than one notifier returns an error, then the last notifier
+ * error to occur is returned.
+ *
+ * A non-zero return value will revert the changeset if error is from:
+ * - overlay changeset entry notifier
+ * - overlay changset post-remove notifier
+ *
+ * If an error is returned by an overlay changeset post-remove notifier
+ * then no further overlay changeset post-remove notifier will be called.
+ *
+ * Returns 0 on success, or a negative error number. *ovcs_id is set to
+ * zero after reverting the changeset, even if a subsequent error occurs.
*/
-int of_overlay_destroy(int id)
+int of_overlay_remove(int *ovcs_id)
{
- struct of_overlay *ov;
- int err;
+ struct overlay_changeset *ovcs;
+ int ret, ret_apply, ret_tmp;
- mutex_lock(&of_mutex);
+ ret = 0;
- ov = idr_find(&ov_idr, id);
- if (ov == NULL) {
- err = -ENODEV;
- pr_err("destroy: Could not find overlay #%d\n", id);
+ if (devicetree_corrupt()) {
+ pr_err("suspect devicetree state, refuse to remove overlay\n");
+ ret = -EBUSY;
goto out;
}
- /* check whether the overlay is safe to remove */
- if (!overlay_removal_is_ok(ov)) {
- err = -EBUSY;
- goto out;
+ mutex_lock(&of_mutex);
+
+ ovcs = idr_find(&ovcs_idr, *ovcs_id);
+ if (!ovcs) {
+ ret = -ENODEV;
+ pr_err("remove: Could not find overlay #%d\n", *ovcs_id);
+ goto out_unlock;
}
- of_overlay_notify(ov, OF_OVERLAY_PRE_REMOVE);
- list_del(&ov->node);
- __of_changeset_revert(&ov->cset);
- of_overlay_notify(ov, OF_OVERLAY_POST_REMOVE);
- of_free_overlay_info(ov);
- idr_remove(&ov_idr, id);
- of_changeset_destroy(&ov->cset);
- kfree(ov);
+ if (!overlay_removal_is_ok(ovcs)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
- err = 0;
+ ret = overlay_notify(ovcs, OF_OVERLAY_PRE_REMOVE);
+ if (ret) {
+ pr_err("overlay changeset pre-remove notify error %d\n", ret);
+ goto out_unlock;
+ }
-out:
+ list_del(&ovcs->ovcs_list);
+
+ ret_apply = 0;
+ ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
+ if (ret) {
+ if (ret_apply)
+ devicetree_state_flags |= DTSF_REVERT_FAIL;
+ goto out_unlock;
+ } else {
+ ret = __of_changeset_revert_notify(&ovcs->cset);
+ if (ret) {
+ pr_err("overlay changeset entry notify error %d\n",
+ ret);
+ /* fall through - changeset was reverted */
+ }
+ }
+
+ *ovcs_id = 0;
+
+ ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE);
+ if (ret_tmp) {
+ pr_err("overlay changeset post-remove notify error %d\n",
+ ret_tmp);
+ if (!ret)
+ ret = ret_tmp;
+ }
+
+ free_overlay_changeset(ovcs);
+
+out_unlock:
mutex_unlock(&of_mutex);
- return err;
+out:
+ pr_debug("%s() err=%d\n", __func__, ret);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(of_overlay_destroy);
+EXPORT_SYMBOL_GPL(of_overlay_remove);
/**
- * of_overlay_destroy_all() - Removes all overlays from the system
+ * of_overlay_remove_all() - Reverts and frees all overlay changesets
*
* Removes all overlays from the system in the correct order.
*
* Returns 0 on success, or a negative error number
*/
-int of_overlay_destroy_all(void)
+int of_overlay_remove_all(void)
{
- struct of_overlay *ov, *ovn;
-
- mutex_lock(&of_mutex);
+ struct overlay_changeset *ovcs, *ovcs_n;
+ int ret;
/* the tail of list is guaranteed to be safe to remove */
- list_for_each_entry_safe_reverse(ov, ovn, &ov_list, node) {
- list_del(&ov->node);
- __of_changeset_revert(&ov->cset);
- of_free_overlay_info(ov);
- idr_remove(&ov_idr, ov->id);
- kfree(ov);
+ list_for_each_entry_safe_reverse(ovcs, ovcs_n, &ovcs_list, ovcs_list) {
+ ret = of_overlay_remove(&ovcs->id);
+ if (ret)
+ return ret;
}
- mutex_unlock(&of_mutex);
-
return 0;
}
-EXPORT_SYMBOL_GPL(of_overlay_destroy_all);
+EXPORT_SYMBOL_GPL(of_overlay_remove_all);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index ac15d0e3d27d..b7cf84b29737 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -497,6 +497,12 @@ int of_platform_default_populate(struct device_node *root,
EXPORT_SYMBOL_GPL(of_platform_default_populate);
#ifndef CONFIG_PPC
+static const struct of_device_id reserved_mem_matches[] = {
+ { .compatible = "qcom,rmtfs-mem" },
+ { .compatible = "ramoops" },
+ {}
+};
+
static int __init of_platform_default_populate_init(void)
{
struct device_node *node;
@@ -505,15 +511,12 @@ static int __init of_platform_default_populate_init(void)
return -ENODEV;
/*
- * Handle ramoops explicitly, since it is inside /reserved-memory,
- * which lacks a "compatible" property.
+ * Handle certain compatibles explicitly, since we don't want to create
+ * platform_devices for every node in /reserved-memory with a
+ * "compatible",
*/
- node = of_find_node_by_path("/reserved-memory");
- if (node) {
- node = of_find_compatible_node(node, NULL, "ramoops");
- if (node)
- of_platform_device_create(node, NULL, NULL);
- }
+ for_each_matching_node(node, reserved_mem_matches)
+ of_platform_device_create(node, NULL, NULL);
/* Populate everything else. */
of_platform_default_populate(NULL, NULL, NULL);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 264c355ba1ff..8ad33a44a7b8 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -817,9 +817,9 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
}
EXPORT_SYMBOL(of_graph_get_remote_node);
-static void of_fwnode_get(struct fwnode_handle *fwnode)
+static struct fwnode_handle *of_fwnode_get(struct fwnode_handle *fwnode)
{
- of_node_get(to_of_node(fwnode));
+ return of_fwnode_handle(of_node_get(to_of_node(fwnode)));
}
static void of_fwnode_put(struct fwnode_handle *fwnode)
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 99309cb7d372..cfaeef5f6cb1 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -84,10 +84,9 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
int offset, len;
int err = 0;
- value = kmalloc(prop_fixup->length, GFP_KERNEL);
+ value = kmemdup(prop_fixup->value, prop_fixup->length, GFP_KERNEL);
if (!value)
return -ENOMEM;
- memcpy(value, prop_fixup->value, prop_fixup->length);
/* prop_fixup contains a list of tuples of path:property_name:offset */
end = value + prop_fixup->length;
@@ -165,7 +164,6 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
struct property *prop_fix, *prop;
int err, i, count;
unsigned int off;
- phandle phandle;
if (!local_fixups)
return 0;
@@ -195,9 +193,7 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
if ((off + 4) > prop->length)
return -EINVAL;
- phandle = be32_to_cpu(*(__be32 *)(prop->value + off));
- phandle += phandle_delta;
- *(__be32 *)(prop->value + off) = cpu_to_be32(phandle);
+ be32_add_cpu(prop->value + off, phandle_delta);
}
}
@@ -275,11 +271,18 @@ int of_resolve_phandles(struct device_node *overlay)
err = -EINVAL;
goto out;
}
+
+#if 0
+ Temporarily disable check so that old style overlay unittests
+ do not fail when of_resolve_phandles() is moved into
+ of_overlay_apply().
+
if (!of_node_check_flag(overlay, OF_DETACHED)) {
pr_err("overlay not detached\n");
err = -EINVAL;
goto out;
}
+#endif
phandle_delta = live_tree_max_phandle() + 1;
adjust_overlay_phandles(overlay, phandle_delta);
diff --git a/drivers/of/unittest-data/.gitignore b/drivers/of/unittest-data/.gitignore
deleted file mode 100644
index 4b3cf8b16de2..000000000000
--- a/drivers/of/unittest-data/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-testcases.dtb
-testcases.dtb.S
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 2d135fba94c1..32389acfa616 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+DTC_FLAGS_testcases := -Wno-interrupts_property
obj-y += testcases.dtb.o
targets += testcases.dtb testcases.dtb.S
diff --git a/drivers/of/unittest-data/overlay.dts b/drivers/of/unittest-data/overlay.dts
index 9e791fcf05dd..ab5e89b5e27e 100644
--- a/drivers/of/unittest-data/overlay.dts
+++ b/drivers/of/unittest-data/overlay.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
/plugin/;
diff --git a/drivers/of/unittest-data/overlay_bad_phandle.dts b/drivers/of/unittest-data/overlay_bad_phandle.dts
index 270ee885a623..4d5b99723bad 100644
--- a/drivers/of/unittest-data/overlay_bad_phandle.dts
+++ b/drivers/of/unittest-data/overlay_bad_phandle.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
/plugin/;
diff --git a/drivers/of/unittest-data/overlay_base.dts b/drivers/of/unittest-data/overlay_base.dts
index 453d0bd83320..820b79ca378a 100644
--- a/drivers/of/unittest-data/overlay_base.dts
+++ b/drivers/of/unittest-data/overlay_base.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
/plugin/;
diff --git a/drivers/of/unittest-data/testcases.dts b/drivers/of/unittest-data/testcases.dts
index 12f7c3d649c8..55fe0ee20109 100644
--- a/drivers/of/unittest-data/testcases.dts
+++ b/drivers/of/unittest-data/testcases.dts
@@ -1,4 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
+/plugin/;
+
/ {
testcase-data {
changeset {
@@ -14,66 +17,3 @@
#include "tests-match.dtsi"
#include "tests-platform.dtsi"
#include "tests-overlay.dtsi"
-
-/*
- * phandle fixup data - generated by dtc patches that aren't upstream.
- * This data must be regenerated whenever phandle references are modified in
- * the testdata tree.
- *
- * The format of this data may be subject to change. For the time being consider
- * this a kernel-internal data format.
- */
-/ { __local_fixups__ {
- testcase-data {
- phandle-tests {
- consumer-a {
- phandle-list = <0x00000000 0x00000008
- 0x00000018 0x00000028
- 0x00000034 0x00000038>;
- phandle-list-bad-args = <0x00000000 0x0000000c>;
- };
- };
- interrupts {
- intmap0 {
- interrupt-map = <0x00000004 0x00000010
- 0x00000024 0x00000034>;
- };
- intmap1 {
- interrupt-map = <0x0000000c>;
- };
- interrupts0 {
- interrupt-parent = <0x00000000>;
- };
- interrupts1 {
- interrupt-parent = <0x00000000>;
- };
- interrupts-extended0 {
- interrupts-extended = <0x00000000 0x00000008
- 0x00000018 0x00000024
- 0x0000002c 0x00000034
- 0x0000003c>;
- };
- };
- testcase-device1 {
- interrupt-parent = <0x00000000>;
- };
- testcase-device2 {
- interrupt-parent = <0x00000000>;
- };
- overlay2 {
- fragment@0 {
- target = <0x00000000>;
- };
- };
- overlay3 {
- fragment@0 {
- target = <0x00000000>;
- };
- };
- overlay4 {
- fragment@0 {
- target = <0x00000000>;
- };
- };
- };
-}; };
diff --git a/drivers/of/unittest-data/tests-interrupts.dtsi b/drivers/of/unittest-data/tests-interrupts.dtsi
index da4695f60351..ec175e800725 100644
--- a/drivers/of/unittest-data/tests-interrupts.dtsi
+++ b/drivers/of/unittest-data/tests-interrupts.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/ {
testcase-data {
diff --git a/drivers/of/unittest-data/tests-match.dtsi b/drivers/of/unittest-data/tests-match.dtsi
index c9e541129534..1fd3b21313f5 100644
--- a/drivers/of/unittest-data/tests-match.dtsi
+++ b/drivers/of/unittest-data/tests-match.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/ {
testcase-data {
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 02ba56c20fe1..7b8001ab9f3a 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/ {
testcase-data {
diff --git a/drivers/of/unittest-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi
index 5b1527e8a7fb..3c2f09e56b61 100644
--- a/drivers/of/unittest-data/tests-phandle.dtsi
+++ b/drivers/of/unittest-data/tests-phandle.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/ {
aliases {
diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
index a0c93822aee3..fa39611071b3 100644
--- a/drivers/of/unittest-data/tests-platform.dtsi
+++ b/drivers/of/unittest-data/tests-platform.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/ {
testcase-data {
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 29a35cb1da64..e568b1e82501 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Self tests for device tree subsystem
*/
@@ -993,10 +994,17 @@ static int __init unittest_data_add(void)
pr_warn("%s: No tree to attach; not running tests\n", __func__);
return -ENODATA;
}
- of_node_set_flag(unittest_data_node, OF_DETACHED);
+
+ /*
+ * This lock normally encloses of_overlay_apply() as well as
+ * of_resolve_phandles().
+ */
+ of_overlay_mutex_lock();
+
rc = of_resolve_phandles(unittest_data_node);
if (rc) {
pr_err("%s: Failed to resolve phandles (rc=%i)\n", __func__, rc);
+ of_overlay_mutex_unlock();
return -EINVAL;
}
@@ -1006,6 +1014,7 @@ static int __init unittest_data_add(void)
__of_attach_node_sysfs(np);
of_aliases = of_find_node_by_path("/aliases");
of_chosen = of_find_node_by_path("/chosen");
+ of_overlay_mutex_unlock();
return 0;
}
@@ -1018,6 +1027,9 @@ static int __init unittest_data_add(void)
attach_node_and_children(np);
np = next;
}
+
+ of_overlay_mutex_unlock();
+
return 0;
}
@@ -1218,7 +1230,7 @@ static void of_unittest_untrack_overlay(int id)
static void of_unittest_destroy_tracked_overlays(void)
{
- int id, ret, defers;
+ int id, ret, defers, ovcs_id;
if (overlay_first_id < 0)
return;
@@ -1231,7 +1243,8 @@ static void of_unittest_destroy_tracked_overlays(void)
if (!(overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id)))
continue;
- ret = of_overlay_destroy(id + overlay_first_id);
+ ovcs_id = id + overlay_first_id;
+ ret = of_overlay_remove(&ovcs_id);
if (ret == -ENODEV) {
pr_warn("%s: no overlay to destroy for #%d\n",
__func__, id + overlay_first_id);
@@ -1253,7 +1266,7 @@ static int of_unittest_apply_overlay(int overlay_nr, int unittest_nr,
int *overlay_id)
{
struct device_node *np = NULL;
- int ret, id = -1;
+ int ret;
np = of_find_node_by_path(overlay_path(overlay_nr));
if (np == NULL) {
@@ -1263,23 +1276,20 @@ static int of_unittest_apply_overlay(int overlay_nr, int unittest_nr,
goto out;
}
- ret = of_overlay_create(np);
+ *overlay_id = 0;
+ ret = of_overlay_apply(np, overlay_id);
if (ret < 0) {
unittest(0, "could not create overlay from \"%s\"\n",
overlay_path(overlay_nr));
goto out;
}
- id = ret;
- of_unittest_track_overlay(id);
+ of_unittest_track_overlay(*overlay_id);
ret = 0;
out:
of_node_put(np);
- if (overlay_id)
- *overlay_id = id;
-
return ret;
}
@@ -1287,7 +1297,7 @@ out:
static int of_unittest_apply_overlay_check(int overlay_nr, int unittest_nr,
int before, int after, enum overlay_type ovtype)
{
- int ret;
+ int ret, ovcs_id;
/* unittest device must not be in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
@@ -1298,7 +1308,8 @@ static int of_unittest_apply_overlay_check(int overlay_nr, int unittest_nr,
return -EINVAL;
}
- ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, NULL);
+ ovcs_id = 0;
+ ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, &ovcs_id);
if (ret != 0) {
/* of_unittest_apply_overlay already called unittest() */
return ret;
@@ -1321,7 +1332,7 @@ static int of_unittest_apply_revert_overlay_check(int overlay_nr,
int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
- int ret, ov_id;
+ int ret, ovcs_id;
/* unittest device must be in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
@@ -1333,7 +1344,8 @@ static int of_unittest_apply_revert_overlay_check(int overlay_nr,
}
/* apply the overlay */
- ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, &ov_id);
+ ovcs_id = 0;
+ ret = of_unittest_apply_overlay(overlay_nr, unittest_nr, &ovcs_id);
if (ret != 0) {
/* of_unittest_apply_overlay already called unittest() */
return ret;
@@ -1348,7 +1360,7 @@ static int of_unittest_apply_revert_overlay_check(int overlay_nr,
return -EINVAL;
}
- ret = of_overlay_destroy(ov_id);
+ ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "overlay @\"%s\" failed to be destroyed @\"%s\"\n",
overlay_path(overlay_nr),
@@ -1450,7 +1462,7 @@ static void of_unittest_overlay_5(void)
static void of_unittest_overlay_6(void)
{
struct device_node *np;
- int ret, i, ov_id[2];
+ int ret, i, ov_id[2], ovcs_id;
int overlay_nr = 6, unittest_nr = 6;
int before = 0, after = 1;
@@ -1477,13 +1489,14 @@ static void of_unittest_overlay_6(void)
return;
}
- ret = of_overlay_create(np);
+ ovcs_id = 0;
+ ret = of_overlay_apply(np, &ovcs_id);
if (ret < 0) {
unittest(0, "could not create overlay from \"%s\"\n",
overlay_path(overlay_nr + i));
return;
}
- ov_id[i] = ret;
+ ov_id[i] = ovcs_id;
of_unittest_track_overlay(ov_id[i]);
}
@@ -1501,7 +1514,8 @@ static void of_unittest_overlay_6(void)
}
for (i = 1; i >= 0; i--) {
- ret = of_overlay_destroy(ov_id[i]);
+ ovcs_id = ov_id[i];
+ ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "overlay @\"%s\" failed destroy @\"%s\"\n",
overlay_path(overlay_nr + i),
@@ -1532,7 +1546,7 @@ static void of_unittest_overlay_6(void)
static void of_unittest_overlay_8(void)
{
struct device_node *np;
- int ret, i, ov_id[2];
+ int ret, i, ov_id[2], ovcs_id;
int overlay_nr = 8, unittest_nr = 8;
/* we don't care about device state in this test */
@@ -1547,18 +1561,20 @@ static void of_unittest_overlay_8(void)
return;
}
- ret = of_overlay_create(np);
+ ovcs_id = 0;
+ ret = of_overlay_apply(np, &ovcs_id);
if (ret < 0) {
unittest(0, "could not create overlay from \"%s\"\n",
overlay_path(overlay_nr + i));
return;
}
- ov_id[i] = ret;
+ ov_id[i] = ovcs_id;
of_unittest_track_overlay(ov_id[i]);
}
/* now try to remove first overlay (it should fail) */
- ret = of_overlay_destroy(ov_id[0]);
+ ovcs_id = ov_id[0];
+ ret = of_overlay_remove(&ovcs_id);
if (ret == 0) {
unittest(0, "overlay @\"%s\" was destroyed @\"%s\"\n",
overlay_path(overlay_nr + 0),
@@ -1569,7 +1585,8 @@ static void of_unittest_overlay_8(void)
/* removing them in order should work */
for (i = 1; i >= 0; i--) {
- ret = of_overlay_destroy(ov_id[i]);
+ ovcs_id = ov_id[i];
+ ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "overlay @\"%s\" not destroyed @\"%s\"\n",
overlay_path(overlay_nr + i),
@@ -2143,21 +2160,13 @@ static int __init overlay_data_add(int onum)
ret = 0;
goto out_free_data;
}
- of_node_set_flag(info->np_overlay, OF_DETACHED);
- ret = of_resolve_phandles(info->np_overlay);
- if (ret) {
- pr_err("resolve ot phandles (ret=%d), %d\n", ret, onum);
- goto out_free_np_overlay;
- }
-
- ret = of_overlay_create(info->np_overlay);
+ info->overlay_id = 0;
+ ret = of_overlay_apply(info->np_overlay, &info->overlay_id);
if (ret < 0) {
- pr_err("of_overlay_create() (ret=%d), %d\n", ret, onum);
+ pr_err("of_overlay_apply() (ret=%d), %d\n", ret, onum);
+ of_overlay_mutex_unlock();
goto out_free_np_overlay;
- } else {
- info->overlay_id = ret;
- ret = 0;
}
pr_debug("__dtb_overlay_begin applied, overlay id %d\n", ret);
@@ -2206,7 +2215,10 @@ static __init void of_unittest_overlay_high_level(void)
* Could not fixup phandles in unittest_unflatten_overlay_base()
* because kmalloc() was not yet available.
*/
+ of_overlay_mutex_lock();
of_resolve_phandles(overlay_base_root);
+ of_overlay_mutex_unlock();
+
/*
* do not allow overlay_base to duplicate any node already in
diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig
new file mode 100644
index 000000000000..a7fbb93f302c
--- /dev/null
+++ b/drivers/opp/Kconfig
@@ -0,0 +1,13 @@
+config PM_OPP
+ bool
+ select SRCU
+ ---help---
+ SOCs have a standard set of tuples consisting of frequency and
+ voltage pairs that the device will support per voltage domain. This
+ is called Operating Performance Point or OPP. The actual definitions
+ of OPP varies over silicon within the same family of devices.
+
+ OPP layer organizes the data internally using device pointers
+ representing individual voltage domains and provides SOC
+ implementations a ready to use framework to manage OPPs.
+ For more information, read <file:Documentation/power/opp.txt>
diff --git a/drivers/base/power/opp/Makefile b/drivers/opp/Makefile
index e70ceb406fe9..e70ceb406fe9 100644
--- a/drivers/base/power/opp/Makefile
+++ b/drivers/opp/Makefile
diff --git a/drivers/base/power/opp/core.c b/drivers/opp/core.c
index a6de32530693..92fa94a6dcc1 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/opp/core.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/export.h>
+#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include "opp.h"
@@ -296,7 +297,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
count = PTR_ERR(opp_table);
- dev_err(dev, "%s: OPP table not found (%d)\n",
+ dev_dbg(dev, "%s: OPP table not found (%d)\n",
__func__, count);
return count;
}
@@ -535,6 +536,44 @@ _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
return ret;
}
+static inline int
+_generic_set_opp_domain(struct device *dev, struct clk *clk,
+ unsigned long old_freq, unsigned long freq,
+ unsigned int old_pstate, unsigned int new_pstate)
+{
+ int ret;
+
+ /* Scaling up? Scale domain performance state before frequency */
+ if (freq > old_freq) {
+ ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
+ if (ret)
+ return ret;
+ }
+
+ ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+ if (ret)
+ goto restore_domain_state;
+
+ /* Scaling down? Scale domain performance state after frequency */
+ if (freq < old_freq) {
+ ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ if (_generic_set_opp_clk_only(dev, clk, freq, old_freq))
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_domain_state:
+ if (freq > old_freq)
+ dev_pm_genpd_set_performance_state(dev, old_pstate);
+
+ return ret;
+}
+
static int _generic_set_opp_regulator(const struct opp_table *opp_table,
struct device *dev,
unsigned long old_freq,
@@ -653,7 +692,16 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
/* Only frequency scaling */
if (!opp_table->regulators) {
- ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+ /*
+ * We don't support devices with both regulator and
+ * domain performance-state for now.
+ */
+ if (opp_table->genpd_performance_state)
+ ret = _generic_set_opp_domain(dev, clk, old_freq, freq,
+ IS_ERR(old_opp) ? 0 : old_opp->pstate,
+ opp->pstate);
+ else
+ ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
} else if (!opp_table->set_opp) {
ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
IS_ERR(old_opp) ? NULL : old_opp->supplies,
@@ -988,6 +1036,9 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
return ret;
}
+ if (opp_table->get_pstate)
+ new_opp->pstate = opp_table->get_pstate(dev, new_opp->rate);
+
list_add(&new_opp->node, head);
mutex_unlock(&opp_table->lock);
@@ -1476,13 +1527,13 @@ err:
EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
/**
- * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
+ * dev_pm_opp_unregister_set_opp_helper() - Releases resources blocked for
* set_opp helper
* @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper().
*
* Release resources blocked for platform specific set_opp helper.
*/
-void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table)
+void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
{
if (!opp_table->set_opp) {
pr_err("%s: Doesn't have custom set_opp helper set\n",
@@ -1497,7 +1548,82 @@ void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table)
dev_pm_opp_put_opp_table(opp_table);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
+EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
+
+/**
+ * dev_pm_opp_register_get_pstate_helper() - Register get_pstate() helper.
+ * @dev: Device for which the helper is getting registered.
+ * @get_pstate: Helper.
+ *
+ * TODO: Remove this callback after the same information is available via Device
+ * Tree.
+ *
+ * This allows a platform to initialize the performance states of individual
+ * OPPs for its devices, until we get similar information directly from DT.
+ *
+ * This must be called before the OPPs are initialized for the device.
+ */
+struct opp_table *dev_pm_opp_register_get_pstate_helper(struct device *dev,
+ int (*get_pstate)(struct device *dev, unsigned long rate))
+{
+ struct opp_table *opp_table;
+ int ret;
+
+ if (!get_pstate)
+ return ERR_PTR(-EINVAL);
+
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return ERR_PTR(-ENOMEM);
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Already have genpd_performance_state set */
+ if (WARN_ON(opp_table->genpd_performance_state)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ opp_table->genpd_performance_state = true;
+ opp_table->get_pstate = get_pstate;
+
+ return opp_table;
+
+err:
+ dev_pm_opp_put_opp_table(opp_table);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_get_pstate_helper);
+
+/**
+ * dev_pm_opp_unregister_get_pstate_helper() - Releases resources blocked for
+ * get_pstate() helper
+ * @opp_table: OPP table returned from dev_pm_opp_register_get_pstate_helper().
+ *
+ * Release resources blocked for platform specific get_pstate() helper.
+ */
+void dev_pm_opp_unregister_get_pstate_helper(struct opp_table *opp_table)
+{
+ if (!opp_table->genpd_performance_state) {
+ pr_err("%s: Doesn't have performance states set\n",
+ __func__);
+ return;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ opp_table->genpd_performance_state = false;
+ opp_table->get_pstate = NULL;
+
+ dev_pm_opp_put_opp_table(opp_table);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_get_pstate_helper);
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
@@ -1706,6 +1832,13 @@ void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev,
if (remove_all || !opp->dynamic)
dev_pm_opp_put(opp);
}
+
+ /*
+ * The OPP table is getting removed, drop the performance state
+ * constraints.
+ */
+ if (opp_table->genpd_performance_state)
+ dev_pm_genpd_set_performance_state(dev, 0);
} else {
_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
}
diff --git a/drivers/base/power/opp/cpu.c b/drivers/opp/cpu.c
index 2d87bc1adf38..2d87bc1adf38 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/opp/cpu.c
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/opp/debugfs.c
index 81cf120fcf43..b03c03576a62 100644
--- a/drivers/base/power/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -41,16 +41,15 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
{
struct dentry *d;
int i;
- char *name;
for (i = 0; i < opp_table->regulator_count; i++) {
- name = kasprintf(GFP_KERNEL, "supply-%d", i);
+ char name[15];
+
+ snprintf(name, sizeof(name), "supply-%d", i);
/* Create per-opp directory */
d = debugfs_create_dir(name, pdentry);
- kfree(name);
-
if (!d)
return false;
@@ -100,6 +99,9 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend))
return -ENOMEM;
+ if (!debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate))
+ return -ENOMEM;
+
if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
return -ENOMEM;
diff --git a/drivers/base/power/opp/of.c b/drivers/opp/of.c
index 0b718886479b..cb716aa2f44b 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/opp/of.c
@@ -16,7 +16,7 @@
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/device.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -397,6 +397,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
ret);
_dev_pm_opp_remove_table(opp_table, dev, false);
+ of_node_put(np);
goto put_opp_table;
}
}
@@ -603,7 +604,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
if (cpu == cpu_dev->id)
continue;
- cpu_np = of_get_cpu_node(cpu, NULL);
+ cpu_np = of_cpu_device_node_get(cpu);
if (!cpu_np) {
dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
__func__, cpu);
@@ -613,6 +614,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
/* Get OPP descriptor node */
tmp_np = _opp_of_get_opp_desc_node(cpu_np);
+ of_node_put(cpu_np);
if (!tmp_np) {
pr_err("%pOF: Couldn't find opp node\n", cpu_np);
ret = -ENOENT;
diff --git a/drivers/base/power/opp/opp.h b/drivers/opp/opp.h
index 166eef990599..4d00061648a3 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -58,6 +58,7 @@ extern struct list_head opp_tables;
* @dynamic: not-created from static DT entries.
* @turbo: true if turbo (boost) OPP
* @suspend: true if suspend OPP
+ * @pstate: Device's power domain's performance state.
* @rate: Frequency in hertz
* @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
@@ -76,6 +77,7 @@ struct dev_pm_opp {
bool dynamic;
bool turbo;
bool suspend;
+ unsigned int pstate;
unsigned long rate;
struct dev_pm_opp_supply *supplies;
@@ -135,8 +137,10 @@ enum opp_table_access {
* @clk: Device's clock handle
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators
+ * @genpd_performance_state: Device's power domain support performance state.
* @set_opp: Platform specific set_opp callback
* @set_opp_data: Data to be passed to set_opp callback
+ * @get_pstate: Platform specific get_pstate callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
@@ -170,9 +174,11 @@ struct opp_table {
struct clk *clk;
struct regulator **regulators;
unsigned int regulator_count;
+ bool genpd_performance_state;
int (*set_opp)(struct dev_pm_set_opp_data *data);
struct dev_pm_set_opp_data *set_opp_data;
+ int (*get_pstate)(struct device *dev, unsigned long rate);
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
index e65a576e4032..f343bd96609a 100644
--- a/drivers/oprofile/nmi_timer_int.c
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* @file nmi_timer_int.c
*
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index d5b2732b1b81..4b150a754890 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2010 ARM Ltd.
* Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
diff --git a/drivers/parisc/Makefile b/drivers/parisc/Makefile
index f95cab57133a..3cd5e6cb8478 100644
--- a/drivers/parisc/Makefile
+++ b/drivers/parisc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for most of the non-PCI devices in PA-RISC machines
#
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index e56f1569f6c3..0905be256de0 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/prefetch.h>
/**
diff --git a/drivers/parport/Makefile b/drivers/parport/Makefile
index 696b8d4ca887..6fa41f8173b6 100644
--- a/drivers/parport/Makefile
+++ b/drivers/parport/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel Parallel port device drivers.
#
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index 74cc6dd982d2..2d1a5c737c6e 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -44,10 +44,11 @@ static void parport_ieee1284_wakeup (struct parport *port)
up (&port->physport->ieee1284.irq);
}
-static struct parport *port_from_cookie[PARPORT_MAX];
-static void timeout_waiting_on_port (unsigned long cookie)
+static void timeout_waiting_on_port (struct timer_list *t)
{
- parport_ieee1284_wakeup (port_from_cookie[cookie % PARPORT_MAX]);
+ struct parport *port = from_timer(port, t, timer);
+
+ parport_ieee1284_wakeup (port);
}
/**
@@ -69,27 +70,19 @@ static void timeout_waiting_on_port (unsigned long cookie)
int parport_wait_event (struct parport *port, signed long timeout)
{
int ret;
- struct timer_list timer;
if (!port->physport->cad->timeout)
/* Zero timeout is special, and we can't down() the
semaphore. */
return 1;
- init_timer_on_stack(&timer);
- timer.expires = jiffies + timeout;
- timer.function = timeout_waiting_on_port;
- port_from_cookie[port->number % PARPORT_MAX] = port;
- timer.data = port->number;
-
- add_timer (&timer);
+ timer_setup(&port->timer, timeout_waiting_on_port, 0);
+ mod_timer(&port->timer, jiffies + timeout);
ret = down_interruptible (&port->physport->ieee1284.irq);
- if (!del_timer_sync(&timer) && !ret)
+ if (!del_timer_sync(&port->timer) && !ret)
/* Timed out. */
ret = 1;
- destroy_timer_on_stack(&timer);
-
return ret;
}
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index a959224d011b..5d41dda6da4e 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* IEEE-1284 operations for parport.
*
* This file is for generic IEEE 1284 operations. The idea is that
diff --git a/drivers/parport/multiface.h b/drivers/parport/multiface.h
index 56769dd5d315..6513a44b9ca7 100644
--- a/drivers/parport/multiface.h
+++ b/drivers/parport/multiface.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MULTIFACE_H_
#define _MULTIFACE_H_
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index 0186db7680d4..62873070f988 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -1769,7 +1769,7 @@ stop:
/*--- Default parport operations ---------------------------------------*/
-static __initdata struct parport_operations parport_ip32_ops = {
+static const struct parport_operations parport_ip32_ops __initconst = {
.write_data = parport_ip32_write_data,
.read_data = parport_ip32_read_data,
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index 4d1d6eaf333d..e035174ba205 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Parallel port device probing code
*
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index 8ee44a104ac4..48804049d697 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Sysctl interface for parport devices.
*
* Authors: David Campbell
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index c32a77fc8b03..bda151788f3f 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -29,6 +29,15 @@ config PCI_MSI_IRQ_DOMAIN
depends on PCI_MSI
select GENERIC_MSI_IRQ_DOMAIN
+config PCI_QUIRKS
+ default y
+ bool "Enable PCI quirk workarounds" if EXPERT
+ depends on PCI
+ help
+ This enables workarounds for various PCI chipset bugs/quirks.
+ Disable this only if your target machine is unaffected by PCI
+ quirks.
+
config PCI_DEBUG
bool "PCI Debugging"
depends on PCI && DEBUG_KERNEL
@@ -42,13 +51,13 @@ config PCI_DEBUG
config PCI_REALLOC_ENABLE_AUTO
bool "Enable PCI resource re-allocation detection"
depends on PCI
+ depends on PCI_IOV
help
Say Y here if you want the PCI core to detect if PCI resource
re-allocation needs to be enabled. You can always use pci=realloc=on
- or pci=realloc=off to override it. Note this feature is a no-op
- unless PCI_IOV support is also enabled; in that case it will
- automatically re-allocate PCI resources if SR-IOV BARs have not
- been allocated by the BIOS.
+ or pci=realloc=off to override it. It will automatically
+ re-allocate PCI resources if SR-IOV BARs have not been allocated by
+ the BIOS.
When in doubt, say N.
@@ -71,15 +80,6 @@ config XEN_PCIDEV_FRONTEND
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
-config HT_IRQ
- bool "Interrupts on hypertransport devices"
- default y
- depends on PCI && X86_LOCAL_APIC
- help
- This allows native hypertransport devices to use interrupts.
-
- If unsure say Y.
-
config PCI_ATS
bool
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 66a21acad952..c7819b973df7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the PCI bus specific drivers.
#
@@ -16,16 +17,10 @@ obj-$(CONFIG_PCIEPORTBUS) += pcie/
# Build the PCI Hotplug drivers if we were asked to
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
-ifdef CONFIG_HOTPLUG_PCI
-obj-y += hotplug-pci.o
-endif
# Build the PCI MSI interrupt support
obj-$(CONFIG_PCI_MSI) += msi.o
-# Build the Hypertransport interrupt support
-obj-$(CONFIG_HT_IRQ) += htirq.o
-
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index 22ec82fcdea2..113e09440f85 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -169,4 +169,14 @@ config PCIE_KIRIN
Say Y here if you want PCIe controller support
on HiSilicon Kirin series SoCs.
+config PCIE_HISI_STB
+ bool "HiSilicon STB SoCs PCIe controllers"
+ depends on ARCH_HISI
+ depends on PCI
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCIEPORTBUS
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+
endmenu
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index c61be9738cce..41ba499c96ee 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
@@ -14,6 +15,7 @@ obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
+obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 34427a6a15af..e77a4ceed74c 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -11,6 +11,7 @@
*/
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -594,6 +595,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
int i;
int phy_count;
struct phy **phy;
+ struct device_link **link;
void __iomem *base;
struct resource *res;
struct dw_pcie *pci;
@@ -649,11 +651,21 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (!phy)
return -ENOMEM;
+ link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
for (i = 0; i < phy_count; i++) {
snprintf(name, sizeof(name), "pcie-phy%d", i);
phy[i] = devm_phy_get(dev, name);
if (IS_ERR(phy[i]))
return PTR_ERR(phy[i]);
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
}
dra7xx->base = base;
@@ -732,6 +744,10 @@ err_get_sync:
pm_runtime_disable(dev);
dra7xx_pcie_disable_phy(dra7xx);
+err_link:
+ while (--i >= 0)
+ device_link_del(link[i]);
+
return ret;
}
@@ -794,6 +810,22 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
}
#endif
+void dra7xx_pcie_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+ int ret;
+
+ dra7xx_pcie_stop_link(dra7xx->pci);
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0)
+ dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+ pm_runtime_disable(dev);
+ dra7xx_pcie_disable_phy(dra7xx);
+}
+
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
@@ -807,5 +839,6 @@ static struct platform_driver dra7xx_pcie_driver = {
.suppress_bind_attrs = true,
.pm = &dra7xx_pcie_pm_ops,
},
+ .shutdown = dra7xx_pcie_shutdown,
};
builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index 87fa486bee2c..8f34c2fdc600 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -33,6 +33,8 @@
/* PEX Internal Configuration Registers */
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
+#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
+#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
#define PCIE_IATU_NUM 6
@@ -124,6 +126,14 @@ static int ls_pcie_link_up(struct dw_pcie *pci)
return 1;
}
+/* Forward error response of outbound non-posted requests */
+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+}
+
static int ls_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -135,6 +145,7 @@ static int ls_pcie_host_init(struct pcie_port *pp)
* dw_pcie_setup_rc() will reconfigure the outbound windows.
*/
ls_pcie_disable_outbound_atus(pcie);
+ ls_pcie_fix_error_response(pcie);
dw_pcie_dbi_ro_wr_en(pci);
ls_pcie_clear_multifunction(pcie);
@@ -253,6 +264,7 @@ static struct ls_pcie_drvdata ls2088_drvdata = {
};
static const struct of_device_id ls_pcie_of_match[] = {
+ { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
diff --git a/drivers/pci/dwc/pcie-histb.c b/drivers/pci/dwc/pcie-histb.c
new file mode 100644
index 000000000000..33b01b734d7d
--- /dev/null
+++ b/drivers/pci/dwc/pcie-histb.c
@@ -0,0 +1,470 @@
+/*
+ * PCIe host controller driver for HiSilicon STB SoCs
+ *
+ * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com
+ *
+ * Authors: Ruqiang Ju <juruqiang@hisilicon.com>
+ * Jianguo Sun <sunjianguo1@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_histb_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_SYS_CTRL0 0x0000
+#define PCIE_SYS_CTRL1 0x0004
+#define PCIE_SYS_CTRL7 0x001C
+#define PCIE_SYS_CTRL13 0x0034
+#define PCIE_SYS_CTRL15 0x003C
+#define PCIE_SYS_CTRL16 0x0040
+#define PCIE_SYS_CTRL17 0x0044
+
+#define PCIE_SYS_STAT0 0x0100
+#define PCIE_SYS_STAT4 0x0110
+
+#define PCIE_RDLH_LINK_UP BIT(5)
+#define PCIE_XMLH_LINK_UP BIT(15)
+#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
+#define PCIE_APP_LTSSM_ENABLE BIT(11)
+
+#define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28)
+#define PCIE_WM_EP 0
+#define PCIE_WM_LEGACY BIT(1)
+#define PCIE_WM_RC BIT(30)
+
+#define PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define PCIE_LTSSM_STATE_ACTIVE 0x11
+
+struct histb_pcie {
+ struct dw_pcie *pci;
+ struct clk *aux_clk;
+ struct clk *pipe_clk;
+ struct clk *sys_clk;
+ struct clk *bus_clk;
+ struct phy *phy;
+ struct reset_control *soft_reset;
+ struct reset_control *sys_reset;
+ struct reset_control *bus_reset;
+ void __iomem *ctrl;
+ int reset_gpio;
+};
+
+static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg)
+{
+ return readl(histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
+{
+ writel(val, histb_pcie->ctrl + reg);
+}
+
+static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
+}
+
+static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 val;
+
+ val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1);
+ if (enable)
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val);
+}
+
+static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ u32 val;
+
+ histb_pcie_dbi_r_mode(&pci->pp, true);
+ dw_pcie_read(base + reg, size, &val);
+ histb_pcie_dbi_r_mode(&pci->pp, false);
+
+ return val;
+}
+
+static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ histb_pcie_dbi_w_mode(&pci->pp, true);
+ dw_pcie_write(base + reg, size, val);
+ histb_pcie_dbi_w_mode(&pci->pp, false);
+}
+
+static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where,
+ int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ histb_pcie_dbi_r_mode(pp, true);
+ ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ histb_pcie_dbi_r_mode(pp, false);
+
+ return ret;
+}
+
+static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where,
+ int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret;
+
+ histb_pcie_dbi_w_mode(pp, true);
+ ret = dw_pcie_write(pci->dbi_base + where, size, val);
+ histb_pcie_dbi_w_mode(pp, false);
+
+ return ret;
+}
+
+static int histb_pcie_link_up(struct dw_pcie *pci)
+{
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+ u32 status;
+
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
+ status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
+ status &= PCIE_LTSSM_STATE_MASK;
+ if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE))
+ return 1;
+
+ return 0;
+}
+
+static int histb_pcie_establish_link(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
+
+ if (dw_pcie_link_up(pci)) {
+ dev_info(pci->dev, "Link already up\n");
+ return 0;
+ }
+
+ /* PCIe RC work mode */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ regval &= ~PCIE_DEVICE_TYPE_MASK;
+ regval |= PCIE_WM_RC;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
+
+ /* setup root complex */
+ dw_pcie_setup_rc(pp);
+
+ /* assert LTSSM enable */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
+ regval |= PCIE_APP_LTSSM_ENABLE;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static int histb_pcie_host_init(struct pcie_port *pp)
+{
+ histb_pcie_establish_link(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+
+ return 0;
+}
+
+static struct dw_pcie_host_ops histb_pcie_host_ops = {
+ .rd_own_conf = histb_pcie_rd_own_conf,
+ .wr_own_conf = histb_pcie_wr_own_conf,
+ .host_init = histb_pcie_host_init,
+};
+
+static irqreturn_t histb_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static void histb_pcie_host_disable(struct histb_pcie *hipcie)
+{
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_assert(hipcie->bus_reset);
+
+ clk_disable_unprepare(hipcie->aux_clk);
+ clk_disable_unprepare(hipcie->pipe_clk);
+ clk_disable_unprepare(hipcie->sys_clk);
+ clk_disable_unprepare(hipcie->bus_clk);
+
+ if (gpio_is_valid(hipcie->reset_gpio))
+ gpio_set_value_cansleep(hipcie->reset_gpio, 0);
+}
+
+static int histb_pcie_host_enable(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* power on PCIe device if have */
+ if (gpio_is_valid(hipcie->reset_gpio))
+ gpio_set_value_cansleep(hipcie->reset_gpio, 1);
+
+ ret = clk_prepare_enable(hipcie->bus_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable bus clk\n");
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->sys_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable sys clk\n");
+ goto err_sys_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->pipe_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable pipe clk\n");
+ goto err_pipe_clk;
+ }
+
+ ret = clk_prepare_enable(hipcie->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable aux clk\n");
+ goto err_aux_clk;
+ }
+
+ reset_control_assert(hipcie->soft_reset);
+ reset_control_deassert(hipcie->soft_reset);
+
+ reset_control_assert(hipcie->sys_reset);
+ reset_control_deassert(hipcie->sys_reset);
+
+ reset_control_assert(hipcie->bus_reset);
+ reset_control_deassert(hipcie->bus_reset);
+
+ return 0;
+
+err_aux_clk:
+ clk_disable_unprepare(hipcie->aux_clk);
+err_pipe_clk:
+ clk_disable_unprepare(hipcie->pipe_clk);
+err_sys_clk:
+ clk_disable_unprepare(hipcie->sys_clk);
+err_bus_clk:
+ clk_disable_unprepare(hipcie->bus_clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .read_dbi = histb_pcie_read_dbi,
+ .write_dbi = histb_pcie_write_dbi,
+ .link_up = histb_pcie_link_up,
+};
+
+static int histb_pcie_probe(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie;
+ struct dw_pcie *pci;
+ struct pcie_port *pp;
+ struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ enum of_gpio_flags of_flags;
+ unsigned long flag = GPIOF_DIR_OUT;
+ int ret;
+
+ hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL);
+ if (!hipcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ hipcie->pci = pci;
+ pp = &pci->pp;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
+ hipcie->ctrl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hipcie->ctrl)) {
+ dev_err(dev, "cannot get control reg base\n");
+ return PTR_ERR(hipcie->ctrl);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi");
+ pci->dbi_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pci->dbi_base)) {
+ dev_err(dev, "cannot get rc-dbi base\n");
+ return PTR_ERR(pci->dbi_base);
+ }
+
+ hipcie->reset_gpio = of_get_named_gpio_flags(np,
+ "reset-gpios", 0, &of_flags);
+ if (of_flags & OF_GPIO_ACTIVE_LOW)
+ flag |= GPIOF_ACTIVE_LOW;
+ if (gpio_is_valid(hipcie->reset_gpio)) {
+ ret = devm_gpio_request_one(dev, hipcie->reset_gpio,
+ flag, "PCIe device power control");
+ if (ret) {
+ dev_err(dev, "unable to request gpio\n");
+ return ret;
+ }
+ }
+
+ hipcie->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(hipcie->aux_clk)) {
+ dev_err(dev, "Failed to get PCIe aux clk\n");
+ return PTR_ERR(hipcie->aux_clk);
+ }
+
+ hipcie->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(hipcie->pipe_clk)) {
+ dev_err(dev, "Failed to get PCIe pipe clk\n");
+ return PTR_ERR(hipcie->pipe_clk);
+ }
+
+ hipcie->sys_clk = devm_clk_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_clk)) {
+ dev_err(dev, "Failed to get PCIEe sys clk\n");
+ return PTR_ERR(hipcie->sys_clk);
+ }
+
+ hipcie->bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_clk)) {
+ dev_err(dev, "Failed to get PCIe bus clk\n");
+ return PTR_ERR(hipcie->bus_clk);
+ }
+
+ hipcie->soft_reset = devm_reset_control_get(dev, "soft");
+ if (IS_ERR(hipcie->soft_reset)) {
+ dev_err(dev, "couldn't get soft reset\n");
+ return PTR_ERR(hipcie->soft_reset);
+ }
+
+ hipcie->sys_reset = devm_reset_control_get(dev, "sys");
+ if (IS_ERR(hipcie->sys_reset)) {
+ dev_err(dev, "couldn't get sys reset\n");
+ return PTR_ERR(hipcie->sys_reset);
+ }
+
+ hipcie->bus_reset = devm_reset_control_get(dev, "bus");
+ if (IS_ERR(hipcie->bus_reset)) {
+ dev_err(dev, "couldn't get bus reset\n");
+ return PTR_ERR(hipcie->bus_reset);
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq < 0) {
+ dev_err(dev, "Failed to get MSI IRQ\n");
+ return pp->msi_irq;
+ }
+
+ ret = devm_request_irq(dev, pp->msi_irq,
+ histb_pcie_msi_irq_handler,
+ IRQF_SHARED, "histb-pcie-msi", pp);
+ if (ret) {
+ dev_err(dev, "cannot request MSI IRQ\n");
+ return ret;
+ }
+ }
+
+ hipcie->phy = devm_phy_get(dev, "phy");
+ if (IS_ERR(hipcie->phy)) {
+ dev_info(dev, "no pcie-phy found\n");
+ hipcie->phy = NULL;
+ /* fall through here!
+ * if no pcie-phy found, phy init
+ * should be done under boot!
+ */
+ } else {
+ phy_init(hipcie->phy);
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &histb_pcie_host_ops;
+
+ platform_set_drvdata(pdev, hipcie);
+
+ ret = histb_pcie_host_enable(pp);
+ if (ret) {
+ dev_err(dev, "failed to enable host\n");
+ return ret;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int histb_pcie_remove(struct platform_device *pdev)
+{
+ struct histb_pcie *hipcie = platform_get_drvdata(pdev);
+
+ histb_pcie_host_disable(hipcie);
+
+ if (hipcie->phy)
+ phy_exit(hipcie->phy);
+
+ return 0;
+}
+
+static const struct of_device_id histb_pcie_of_match[] = {
+ { .compatible = "hisilicon,hi3798cv200-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
+
+static struct platform_driver histb_pcie_platform_driver = {
+ .probe = histb_pcie_probe,
+ .remove = histb_pcie_remove,
+ .driver = {
+ .name = "histb-pcie",
+ .of_match_table = histb_pcie_of_match,
+ },
+};
+module_platform_driver(histb_pcie_platform_driver);
+
+MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 424fdd6ed1ca..4f74386c1ced 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -150,7 +150,7 @@ static struct configfs_item_operations pci_epc_item_ops = {
.drop_link = pci_epc_epf_unlink,
};
-static struct config_item_type pci_epc_type = {
+static const struct config_item_type pci_epc_type = {
.ct_item_ops = &pci_epc_item_ops,
.ct_attrs = pci_epc_attrs,
.ct_owner = THIS_MODULE,
@@ -361,7 +361,7 @@ static struct configfs_item_operations pci_epf_ops = {
.release = pci_epf_release,
};
-static struct config_item_type pci_epf_type = {
+static const struct config_item_type pci_epf_type = {
.ct_item_ops = &pci_epf_ops,
.ct_attrs = pci_epf_attrs,
.ct_owner = THIS_MODULE,
@@ -400,7 +400,7 @@ static struct configfs_group_operations pci_epf_group_ops = {
.drop_item = &pci_epf_drop,
};
-static struct config_item_type pci_epf_group_type = {
+static const struct config_item_type pci_epf_group_type = {
.ct_group_ops = &pci_epf_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -428,15 +428,15 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group)
}
EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
-static struct config_item_type pci_functions_type = {
+static const struct config_item_type pci_functions_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_item_type pci_controllers_type = {
+static const struct config_item_type pci_controllers_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_item_type pci_ep_type = {
+static const struct config_item_type pci_ep_type = {
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index b868803792d8..38d12980db0f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -95,6 +95,12 @@ config PCI_XGENE_MSI
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC.
+config PCI_V3_SEMI
+ bool "V3 Semiconductor PCI controller"
+ depends on OF
+ depends on ARM
+ default ARCH_INTEGRATOR_AP
+
config PCI_VERSATILE
bool "ARM Versatile PB PCI controller"
depends on ARCH_VERSATILE
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 12382785e02a..34ec1d88f961 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
@@ -9,6 +10,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
+obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index 96028f01bc90..b9617d1c1d48 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Support for Faraday Technology FTPC100 PCI Controller
*
@@ -370,24 +371,6 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return 0;
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
- parser->end = parser->range + rlen / sizeof(__be32);
-
- return 0;
-}
-
static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
struct device_node *np)
{
@@ -402,7 +385,7 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
int i = 0;
u32 val;
- if (pci_dma_range_parser_init(&parser, np)) {
+ if (of_pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
@@ -481,7 +464,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
}
p->bus_clk = devm_clk_get(dev, "PCICLK");
if (IS_ERR(p->bus_clk))
- return PTR_ERR(clk);
+ return PTR_ERR(p->bus_clk);
ret = clk_prepare_enable(p->bus_clk);
if (ret) {
dev_err(dev, "could not prepare PCICLK\n");
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 7d709a7e0aa8..2f05511ce718 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -35,6 +35,40 @@ static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
}
};
+static bool pci_dw_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+
+ /*
+ * The Synopsys DesignWare PCIe controller in ECAM mode will not filter
+ * type 0 config TLPs sent to devices 1 and up on its downstream port,
+ * resulting in devices appearing multiple times on bus 0 unless we
+ * filter out those accesses here.
+ */
+ if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0)
+ return false;
+
+ return true;
+}
+
+static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ if (!pci_dw_valid_device(bus, devfn))
+ return NULL;
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static struct pci_ecam_ops pci_dw_ecam_bus_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_dw_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
static const struct of_device_id gen_pci_of_match[] = {
{ .compatible = "pci-host-cam-generic",
.data = &gen_pci_cfg_cam_bus_ops },
@@ -42,6 +76,15 @@ static const struct of_device_id gen_pci_of_match[] = {
{ .compatible = "pci-host-ecam-generic",
.data = &pci_generic_ecam_ops },
+ { .compatible = "marvell,armada8k-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
+ { .compatible = "socionext,synquacer-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
+ { .compatible = "snps,dw-pcie-ecam",
+ .data = &pci_dw_ecam_bus_ops },
+
{ },
};
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 0fe3ea164ee5..04dac6a42c9f 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -879,7 +879,7 @@ static void hv_irq_unmask(struct irq_data *data)
int cpu;
u64 res;
- dest = irq_data_get_affinity_mask(data);
+ dest = irq_data_get_effective_affinity_mask(data);
pdev = msi_desc_to_pci_dev(msi_desc);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
@@ -1042,6 +1042,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct hv_pci_dev *hpdev;
struct pci_bus *pbus;
struct pci_dev *pdev;
+ struct cpumask *dest;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct {
@@ -1056,6 +1057,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
int ret;
pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
+ dest = irq_data_get_effective_affinity_mask(data);
pbus = pdev->bus;
hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
@@ -1081,14 +1083,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
switch (pci_protocol_version) {
case PCI_PROTOCOL_VERSION_1_1:
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
- irq_data_get_affinity_mask(data),
+ dest,
hpdev->desc.win_slot.slot,
cfg->vector);
break;
case PCI_PROTOCOL_VERSION_1_2:
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
- irq_data_get_affinity_mask(data),
+ dest,
hpdev->desc.win_slot.slot,
cfg->vector);
break;
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 6f879685fedd..e46de69f0380 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -293,24 +293,6 @@ static struct pci_ops rcar_pci_ops = {
.write = pci_generic_config_write,
};
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
- return 0;
-}
-
static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
struct device_node *np)
{
@@ -320,7 +302,7 @@ static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci,
int index = 0;
/* Failure to parse is ok as we fall back to defaults */
- if (pci_dma_range_parser_init(&parser, np))
+ if (of_pci_dma_range_parser_init(&parser, np))
return 0;
/* Get the dma-ranges from DT */
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 1987fec1f126..f9d3960dc39f 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -159,10 +159,13 @@
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
#define AFI_FUSE 0x104
#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
@@ -253,6 +256,7 @@ struct tegra_pcie_soc {
bool has_cml_clk;
bool has_gen2;
bool force_pca_enable;
+ bool program_uphy;
};
static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
@@ -492,12 +496,32 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
return addr;
}
+static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (bus->number == 0)
+ return pci_generic_config_read32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (bus->number == 0)
+ return pci_generic_config_write32(bus, devfn, where, size,
+ value);
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
static struct pci_ops tegra_pcie_ops = {
.add_bus = tegra_pcie_add_bus,
.remove_bus = tegra_pcie_remove_bus,
.map_bus = tegra_pcie_map_bus,
- .read = pci_generic_config_read32,
- .write = pci_generic_config_write32,
+ .read = tegra_pcie_config_read,
+ .write = tegra_pcie_config_write,
};
static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
@@ -1013,10 +1037,12 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
afi_writel(pcie, value, AFI_FUSE);
}
- err = tegra_pcie_phy_power_on(pcie);
- if (err < 0) {
- dev_err(dev, "failed to power on PHY(s): %d\n", err);
- return err;
+ if (soc->program_uphy) {
+ err = tegra_pcie_phy_power_on(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to power on PHY(s): %d\n", err);
+ return err;
+ }
}
/* take the PCIe interface module out of reset */
@@ -1049,19 +1075,23 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
/* TODO: disable and unprepare clocks? */
- err = tegra_pcie_phy_power_off(pcie);
- if (err < 0)
- dev_err(dev, "failed to power off PHY(s): %d\n", err);
+ if (soc->program_uphy) {
+ err = tegra_pcie_phy_power_off(pcie);
+ if (err < 0)
+ dev_err(dev, "failed to power off PHY(s): %d\n", err);
+ }
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
reset_control_assert(pcie->pex_rst);
- tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
@@ -1078,19 +1108,29 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
reset_control_assert(pcie->afi_rst);
reset_control_assert(pcie->pex_rst);
- tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+ if (!dev->pm_domain)
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
/* enable regulators */
err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
if (err < 0)
dev_err(dev, "failed to enable regulators: %d\n", err);
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
- pcie->pex_clk,
- pcie->pex_rst);
- if (err) {
- dev_err(dev, "powerup sequence failed: %d\n", err);
- return err;
+ if (dev->pm_domain) {
+ err = clk_prepare_enable(pcie->pex_clk);
+ if (err) {
+ dev_err(dev, "failed to enable PEX clock: %d\n", err);
+ return err;
+ }
+ reset_control_deassert(pcie->pex_rst);
+ } else {
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
+ pcie->pex_clk,
+ pcie->pex_rst);
+ if (err) {
+ dev_err(dev, "powerup sequence failed: %d\n", err);
+ return err;
+ }
}
reset_control_deassert(pcie->afi_rst);
@@ -1263,6 +1303,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *pads, *afi, *res;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
err = tegra_pcie_clocks_get(pcie);
@@ -1277,10 +1318,12 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
return err;
}
- err = tegra_pcie_phys_get(pcie);
- if (err < 0) {
- dev_err(dev, "failed to get PHYs: %d\n", err);
- return err;
+ if (soc->program_uphy) {
+ err = tegra_pcie_phys_get(pcie);
+ if (err < 0) {
+ dev_err(dev, "failed to get PHYs: %d\n", err);
+ return err;
+ }
}
err = tegra_pcie_power_on(pcie);
@@ -1342,6 +1385,7 @@ poweroff:
static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
+ const struct tegra_pcie_soc *soc = pcie->soc;
int err;
if (pcie->irq > 0)
@@ -1349,9 +1393,11 @@ static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
tegra_pcie_power_off(pcie);
- err = phy_exit(pcie->phy);
- if (err < 0)
- dev_err(dev, "failed to teardown PHY: %d\n", err);
+ if (soc->program_uphy) {
+ err = phy_exit(pcie->phy);
+ if (err < 0)
+ dev_err(dev, "failed to teardown PHY: %d\n", err);
+ }
return 0;
}
@@ -1606,8 +1652,32 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
struct device *dev = pcie->dev;
struct device_node *np = dev->of_node;
- if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
- of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+ switch (lanes) {
+ case 0x010004:
+ dev_info(dev, "4x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
+ return 0;
+
+ case 0x010102:
+ dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+ return 0;
+
+ case 0x010101:
+ dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
+ return 0;
+
+ default:
+ dev_info(dev, "wrong configuration updated in DT, "
+ "switching to default 2x1, 1x1, 1x1 "
+ "configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
+ of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
switch (lanes) {
case 0x0000104:
dev_info(dev, "4x1, 1x1 configuration\n");
@@ -1727,7 +1797,20 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
struct device_node *np = dev->of_node;
unsigned int i = 0;
- if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+ if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
+ pcie->num_supplies = 4;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "dvdd-pex";
+ pcie->supplies[i++].supply = "hvdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "vddio-pexctl-aud";
+ } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
pcie->num_supplies = 6;
pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
@@ -2066,6 +2149,7 @@ static const struct tegra_pcie_soc tegra20_pcie = {
.has_cml_clk = false,
.has_gen2 = false,
.force_pca_enable = false,
+ .program_uphy = true,
};
static const struct tegra_pcie_soc tegra30_pcie = {
@@ -2081,6 +2165,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
.has_cml_clk = true,
.has_gen2 = false,
.force_pca_enable = false,
+ .program_uphy = true,
};
static const struct tegra_pcie_soc tegra124_pcie = {
@@ -2095,6 +2180,7 @@ static const struct tegra_pcie_soc tegra124_pcie = {
.has_cml_clk = true,
.has_gen2 = true,
.force_pca_enable = false,
+ .program_uphy = true,
};
static const struct tegra_pcie_soc tegra210_pcie = {
@@ -2109,9 +2195,27 @@ static const struct tegra_pcie_soc tegra210_pcie = {
.has_cml_clk = true,
.has_gen2 = true,
.force_pca_enable = true,
+ .program_uphy = true,
+};
+
+static const struct tegra_pcie_soc tegra186_pcie = {
+ .num_ports = 3,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .pads_refclk_cfg0 = 0x80b880b8,
+ .pads_refclk_cfg1 = 0x000480b8,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = false,
+ .has_gen2 = true,
+ .force_pca_enable = false,
+ .program_uphy = false,
};
static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
diff --git a/drivers/pci/host/pci-v3-semi.c b/drivers/pci/host/pci-v3-semi.c
new file mode 100644
index 000000000000..02f6e1e3a421
--- /dev/null
+++ b/drivers/pci/host/pci-v3-semi.c
@@ -0,0 +1,959 @@
+/*
+ * Support for V3 Semiconductor PCI Local Bus to PCI Bridge
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the code from arch/arm/mach-integrator/pci_v3.c
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000-2001 Deep Blue Solutions Ltd
+ *
+ * Contributors to the old driver include:
+ * Russell King <linux@armlinux.org.uk>
+ * David A. Rusling <david.rusling@linaro.org> (uHAL, ARM Firmware suite)
+ * Rob Herring <robh@kernel.org>
+ * Liviu Dudau <Liviu.Dudau@arm.com>
+ * Grant Likely <grant.likely@secretlab.ca>
+ * Arnd Bergmann <arnd@arndb.de>
+ * Bjorn Helgaas <bhelgaas@google.com>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+
+#define V3_PCI_VENDOR 0x00000000
+#define V3_PCI_DEVICE 0x00000002
+#define V3_PCI_CMD 0x00000004
+#define V3_PCI_STAT 0x00000006
+#define V3_PCI_CC_REV 0x00000008
+#define V3_PCI_HDR_CFG 0x0000000C
+#define V3_PCI_IO_BASE 0x00000010
+#define V3_PCI_BASE0 0x00000014
+#define V3_PCI_BASE1 0x00000018
+#define V3_PCI_SUB_VENDOR 0x0000002C
+#define V3_PCI_SUB_ID 0x0000002E
+#define V3_PCI_ROM 0x00000030
+#define V3_PCI_BPARAM 0x0000003C
+#define V3_PCI_MAP0 0x00000040
+#define V3_PCI_MAP1 0x00000044
+#define V3_PCI_INT_STAT 0x00000048
+#define V3_PCI_INT_CFG 0x0000004C
+#define V3_LB_BASE0 0x00000054
+#define V3_LB_BASE1 0x00000058
+#define V3_LB_MAP0 0x0000005E
+#define V3_LB_MAP1 0x00000062
+#define V3_LB_BASE2 0x00000064
+#define V3_LB_MAP2 0x00000066
+#define V3_LB_SIZE 0x00000068
+#define V3_LB_IO_BASE 0x0000006E
+#define V3_FIFO_CFG 0x00000070
+#define V3_FIFO_PRIORITY 0x00000072
+#define V3_FIFO_STAT 0x00000074
+#define V3_LB_ISTAT 0x00000076
+#define V3_LB_IMASK 0x00000077
+#define V3_SYSTEM 0x00000078
+#define V3_LB_CFG 0x0000007A
+#define V3_PCI_CFG 0x0000007C
+#define V3_DMA_PCI_ADR0 0x00000080
+#define V3_DMA_PCI_ADR1 0x00000090
+#define V3_DMA_LOCAL_ADR0 0x00000084
+#define V3_DMA_LOCAL_ADR1 0x00000094
+#define V3_DMA_LENGTH0 0x00000088
+#define V3_DMA_LENGTH1 0x00000098
+#define V3_DMA_CSR0 0x0000008B
+#define V3_DMA_CSR1 0x0000009B
+#define V3_DMA_CTLB_ADR0 0x0000008C
+#define V3_DMA_CTLB_ADR1 0x0000009C
+#define V3_DMA_DELAY 0x000000E0
+#define V3_MAIL_DATA 0x000000C0
+#define V3_PCI_MAIL_IEWR 0x000000D0
+#define V3_PCI_MAIL_IERD 0x000000D2
+#define V3_LB_MAIL_IEWR 0x000000D4
+#define V3_LB_MAIL_IERD 0x000000D6
+#define V3_MAIL_WR_STAT 0x000000D8
+#define V3_MAIL_RD_STAT 0x000000DA
+#define V3_QBA_MAP 0x000000DC
+
+/* PCI STATUS bits */
+#define V3_PCI_STAT_PAR_ERR BIT(15)
+#define V3_PCI_STAT_SYS_ERR BIT(14)
+#define V3_PCI_STAT_M_ABORT_ERR BIT(13)
+#define V3_PCI_STAT_T_ABORT_ERR BIT(12)
+
+/* LB ISTAT bits */
+#define V3_LB_ISTAT_MAILBOX BIT(7)
+#define V3_LB_ISTAT_PCI_RD BIT(6)
+#define V3_LB_ISTAT_PCI_WR BIT(5)
+#define V3_LB_ISTAT_PCI_INT BIT(4)
+#define V3_LB_ISTAT_PCI_PERR BIT(3)
+#define V3_LB_ISTAT_I2O_QWR BIT(2)
+#define V3_LB_ISTAT_DMA1 BIT(1)
+#define V3_LB_ISTAT_DMA0 BIT(0)
+
+/* PCI COMMAND bits */
+#define V3_COMMAND_M_FBB_EN BIT(9)
+#define V3_COMMAND_M_SERR_EN BIT(8)
+#define V3_COMMAND_M_PAR_EN BIT(6)
+#define V3_COMMAND_M_MASTER_EN BIT(2)
+#define V3_COMMAND_M_MEM_EN BIT(1)
+#define V3_COMMAND_M_IO_EN BIT(0)
+
+/* SYSTEM bits */
+#define V3_SYSTEM_M_RST_OUT BIT(15)
+#define V3_SYSTEM_M_LOCK BIT(14)
+#define V3_SYSTEM_UNLOCK 0xa05f
+
+/* PCI CFG bits */
+#define V3_PCI_CFG_M_I2O_EN BIT(15)
+#define V3_PCI_CFG_M_IO_REG_DIS BIT(14)
+#define V3_PCI_CFG_M_IO_DIS BIT(13)
+#define V3_PCI_CFG_M_EN3V BIT(12)
+#define V3_PCI_CFG_M_RETRY_EN BIT(10)
+#define V3_PCI_CFG_M_AD_LOW1 BIT(9)
+#define V3_PCI_CFG_M_AD_LOW0 BIT(8)
+/*
+ * This is the value applied to C/BE[3:1], with bit 0 always held 0
+ * during DMA access.
+ */
+#define V3_PCI_CFG_M_RTYPE_SHIFT 5
+#define V3_PCI_CFG_M_WTYPE_SHIFT 1
+#define V3_PCI_CFG_TYPE_DEFAULT 0x3
+
+/* PCI BASE bits (PCI -> Local Bus) */
+#define V3_PCI_BASE_M_ADR_BASE 0xFFF00000U
+#define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00U
+#define V3_PCI_BASE_M_PREFETCH BIT(3)
+#define V3_PCI_BASE_M_TYPE (3 << 1)
+#define V3_PCI_BASE_M_IO BIT(0)
+
+/* PCI MAP bits (PCI -> Local bus) */
+#define V3_PCI_MAP_M_MAP_ADR 0xFFF00000U
+#define V3_PCI_MAP_M_RD_POST_INH BIT(15)
+#define V3_PCI_MAP_M_ROM_SIZE (3 << 10)
+#define V3_PCI_MAP_M_SWAP (3 << 8)
+#define V3_PCI_MAP_M_ADR_SIZE 0x000000F0U
+#define V3_PCI_MAP_M_REG_EN BIT(1)
+#define V3_PCI_MAP_M_ENABLE BIT(0)
+
+/* LB_BASE0,1 bits (Local bus -> PCI) */
+#define V3_LB_BASE_ADR_BASE 0xfff00000U
+#define V3_LB_BASE_SWAP (3 << 8)
+#define V3_LB_BASE_ADR_SIZE (15 << 4)
+#define V3_LB_BASE_PREFETCH BIT(3)
+#define V3_LB_BASE_ENABLE BIT(0)
+
+#define V3_LB_BASE_ADR_SIZE_1MB (0 << 4)
+#define V3_LB_BASE_ADR_SIZE_2MB (1 << 4)
+#define V3_LB_BASE_ADR_SIZE_4MB (2 << 4)
+#define V3_LB_BASE_ADR_SIZE_8MB (3 << 4)
+#define V3_LB_BASE_ADR_SIZE_16MB (4 << 4)
+#define V3_LB_BASE_ADR_SIZE_32MB (5 << 4)
+#define V3_LB_BASE_ADR_SIZE_64MB (6 << 4)
+#define V3_LB_BASE_ADR_SIZE_128MB (7 << 4)
+#define V3_LB_BASE_ADR_SIZE_256MB (8 << 4)
+#define V3_LB_BASE_ADR_SIZE_512MB (9 << 4)
+#define V3_LB_BASE_ADR_SIZE_1GB (10 << 4)
+#define V3_LB_BASE_ADR_SIZE_2GB (11 << 4)
+
+#define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE)
+
+/* LB_MAP0,1 bits (Local bus -> PCI) */
+#define V3_LB_MAP_MAP_ADR 0xfff0U
+#define V3_LB_MAP_TYPE (7 << 1)
+#define V3_LB_MAP_AD_LOW_EN BIT(0)
+
+#define V3_LB_MAP_TYPE_IACK (0 << 1)
+#define V3_LB_MAP_TYPE_IO (1 << 1)
+#define V3_LB_MAP_TYPE_MEM (3 << 1)
+#define V3_LB_MAP_TYPE_CONFIG (5 << 1)
+#define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1)
+
+#define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR)
+
+/* LB_BASE2 bits (Local bus -> PCI IO) */
+#define V3_LB_BASE2_ADR_BASE 0xff00U
+#define V3_LB_BASE2_SWAP_AUTO (3 << 6)
+#define V3_LB_BASE2_ENABLE BIT(0)
+
+#define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE)
+
+/* LB_MAP2 bits (Local bus -> PCI IO) */
+#define V3_LB_MAP2_MAP_ADR 0xff00U
+
+#define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR)
+
+/* FIFO priority bits */
+#define V3_FIFO_PRIO_LOCAL BIT(12)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_EOB BIT(10)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 BIT(11)
+#define V3_FIFO_PRIO_LB_RD1_FLUSH_ANY (BIT(10)|BIT(11))
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_EOB BIT(8)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 BIT(9)
+#define V3_FIFO_PRIO_LB_RD0_FLUSH_ANY (BIT(8)|BIT(9))
+#define V3_FIFO_PRIO_PCI BIT(4)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_EOB BIT(2)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 BIT(3)
+#define V3_FIFO_PRIO_PCI_RD1_FLUSH_ANY (BIT(2)|BIT(3))
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_EOB BIT(0)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1 BIT(1)
+#define V3_FIFO_PRIO_PCI_RD0_FLUSH_ANY (BIT(0)|BIT(1))
+
+/* Local bus configuration bits */
+#define V3_LB_CFG_LB_TO_64_CYCLES 0x0000
+#define V3_LB_CFG_LB_TO_256_CYCLES BIT(13)
+#define V3_LB_CFG_LB_TO_512_CYCLES BIT(14)
+#define V3_LB_CFG_LB_TO_1024_CYCLES (BIT(13)|BIT(14))
+#define V3_LB_CFG_LB_RST BIT(12)
+#define V3_LB_CFG_LB_PPC_RDY BIT(11)
+#define V3_LB_CFG_LB_LB_INT BIT(10)
+#define V3_LB_CFG_LB_ERR_EN BIT(9)
+#define V3_LB_CFG_LB_RDY_EN BIT(8)
+#define V3_LB_CFG_LB_BE_IMODE BIT(7)
+#define V3_LB_CFG_LB_BE_OMODE BIT(6)
+#define V3_LB_CFG_LB_ENDIAN BIT(5)
+#define V3_LB_CFG_LB_PARK_EN BIT(4)
+#define V3_LB_CFG_LB_FBB_DIS BIT(2)
+
+/* ARM Integrator-specific extended control registers */
+#define INTEGRATOR_SC_PCI_OFFSET 0x18
+#define INTEGRATOR_SC_PCI_ENABLE BIT(0)
+#define INTEGRATOR_SC_PCI_INTCLR BIT(1)
+#define INTEGRATOR_SC_LBFADDR_OFFSET 0x20
+#define INTEGRATOR_SC_LBFCODE_OFFSET 0x24
+
+struct v3_pci {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *config_base;
+ struct pci_bus *bus;
+ u32 config_mem;
+ u32 io_mem;
+ u32 non_pre_mem;
+ u32 pre_mem;
+ phys_addr_t io_bus_addr;
+ phys_addr_t non_pre_bus_addr;
+ phys_addr_t pre_bus_addr;
+ struct regmap *map;
+};
+
+/*
+ * The V3 PCI interface chip in Integrator provides several windows from
+ * local bus memory into the PCI memory areas. Unfortunately, there
+ * are not really enough windows for our usage, therefore we reuse
+ * one of the windows for access to PCI configuration space. On the
+ * Integrator/AP, the memory map is as follows:
+ *
+ * Local Bus Memory Usage
+ *
+ * 40000000 - 4FFFFFFF PCI memory. 256M non-prefetchable
+ * 50000000 - 5FFFFFFF PCI memory. 256M prefetchable
+ * 60000000 - 60FFFFFF PCI IO. 16M
+ * 61000000 - 61FFFFFF PCI Configuration. 16M
+ *
+ * There are three V3 windows, each described by a pair of V3 registers.
+ * These are LB_BASE0/LB_MAP0, LB_BASE1/LB_MAP1 and LB_BASE2/LB_MAP2.
+ * Base0 and Base1 can be used for any type of PCI memory access. Base2
+ * can be used either for PCI I/O or for I20 accesses. By default, uHAL
+ * uses this only for PCI IO space.
+ *
+ * Normally these spaces are mapped using the following base registers:
+ *
+ * Usage Local Bus Memory Base/Map registers used
+ *
+ * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0
+ * Mem 50000000 - 5FFFFFFF LB_BASE1/LB_MAP1
+ * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2
+ * Cfg 61000000 - 61FFFFFF
+ *
+ * This means that I20 and PCI configuration space accesses will fail.
+ * When PCI configuration accesses are needed (via the uHAL PCI
+ * configuration space primitives) we must remap the spaces as follows:
+ *
+ * Usage Local Bus Memory Base/Map registers used
+ *
+ * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0
+ * Mem 50000000 - 5FFFFFFF LB_BASE0/LB_MAP0
+ * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2
+ * Cfg 61000000 - 61FFFFFF LB_BASE1/LB_MAP1
+ *
+ * To make this work, the code depends on overlapping windows working.
+ * The V3 chip translates an address by checking its range within
+ * each of the BASE/MAP pairs in turn (in ascending register number
+ * order). It will use the first matching pair. So, for example,
+ * if the same address is mapped by both LB_BASE0/LB_MAP0 and
+ * LB_BASE1/LB_MAP1, the V3 will use the translation from
+ * LB_BASE0/LB_MAP0.
+ *
+ * To allow PCI Configuration space access, the code enlarges the
+ * window mapped by LB_BASE0/LB_MAP0 from 256M to 512M. This occludes
+ * the windows currently mapped by LB_BASE1/LB_MAP1 so that it can
+ * be remapped for use by configuration cycles.
+ *
+ * At the end of the PCI Configuration space accesses,
+ * LB_BASE1/LB_MAP1 is reset to map PCI Memory. Finally the window
+ * mapped by LB_BASE0/LB_MAP0 is reduced in size from 512M to 256M to
+ * reveal the now restored LB_BASE1/LB_MAP1 window.
+ *
+ * NOTE: We do not set up I2O mapping. I suspect that this is only
+ * for an intelligent (target) device. Using I2O disables most of
+ * the mappings into PCI memory.
+ */
+static void __iomem *v3_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int offset)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ unsigned int address, mapaddress, busnr;
+
+ busnr = bus->number;
+ if (busnr == 0) {
+ int slot = PCI_SLOT(devfn);
+
+ /*
+ * local bus segment so need a type 0 config cycle
+ *
+ * build the PCI configuration "address" with one-hot in
+ * A31-A11
+ *
+ * mapaddress:
+ * 3:1 = config cycle (101)
+ * 0 = PCI A1 & A0 are 0 (0)
+ */
+ address = PCI_FUNC(devfn) << 8;
+ mapaddress = V3_LB_MAP_TYPE_CONFIG;
+
+ if (slot > 12)
+ /*
+ * high order bits are handled by the MAP register
+ */
+ mapaddress |= BIT(slot - 5);
+ else
+ /*
+ * low order bits handled directly in the address
+ */
+ address |= BIT(slot + 11);
+ } else {
+ /*
+ * not the local bus segment so need a type 1 config cycle
+ *
+ * address:
+ * 23:16 = bus number
+ * 15:11 = slot number (7:3 of devfn)
+ * 10:8 = func number (2:0 of devfn)
+ *
+ * mapaddress:
+ * 3:1 = config cycle (101)
+ * 0 = PCI A1 & A0 from host bus (1)
+ */
+ mapaddress = V3_LB_MAP_TYPE_CONFIG | V3_LB_MAP_AD_LOW_EN;
+ address = (busnr << 16) | (devfn << 8);
+ }
+
+ /*
+ * Set up base0 to see all 512Mbytes of memory space (not
+ * prefetchable), this frees up base1 for re-use by
+ * configuration memory
+ */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_512MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+
+ /*
+ * Set up base1/map1 to point into configuration space.
+ * The config mem is always 16MB.
+ */
+ writel(v3_addr_to_lb_base(v3->config_mem) |
+ V3_LB_BASE_ADR_SIZE_16MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(mapaddress, v3->base + V3_LB_MAP1);
+
+ return v3->config_base + address + offset;
+}
+
+static void v3_unmap_bus(struct v3_pci *v3)
+{
+ /*
+ * Reassign base1 for use by prefetchable PCI memory
+ */
+ writel(v3_addr_to_lb_base(v3->pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM, /* was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+ v3->base + V3_LB_MAP1);
+
+ /*
+ * And shrink base0 back to a 256M window (NOTE: MAP0 already correct)
+ */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+}
+
+static int v3_pci_read_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 *value)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ int ret;
+
+ dev_dbg(&bus->dev,
+ "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
+ ret = pci_generic_config_read(bus, fn, config, size, value);
+ v3_unmap_bus(v3);
+ return ret;
+}
+
+static int v3_pci_write_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 value)
+{
+ struct v3_pci *v3 = bus->sysdata;
+ int ret;
+
+ dev_dbg(&bus->dev,
+ "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+ ret = pci_generic_config_write(bus, fn, config, size, value);
+ v3_unmap_bus(v3);
+ return ret;
+}
+
+static struct pci_ops v3_pci_ops = {
+ .map_bus = v3_map_bus,
+ .read = v3_pci_read_config,
+ .write = v3_pci_write_config,
+};
+
+static irqreturn_t v3_irq(int irq, void *data)
+{
+ struct v3_pci *v3 = data;
+ struct device *dev = v3->dev;
+ u32 status;
+
+ status = readw(v3->base + V3_PCI_STAT);
+ if (status & V3_PCI_STAT_PAR_ERR)
+ dev_err(dev, "parity error interrupt\n");
+ if (status & V3_PCI_STAT_SYS_ERR)
+ dev_err(dev, "system error interrupt\n");
+ if (status & V3_PCI_STAT_M_ABORT_ERR)
+ dev_err(dev, "master abort error interrupt\n");
+ if (status & V3_PCI_STAT_T_ABORT_ERR)
+ dev_err(dev, "target abort error interrupt\n");
+ writew(status, v3->base + V3_PCI_STAT);
+
+ status = readb(v3->base + V3_LB_ISTAT);
+ if (status & V3_LB_ISTAT_MAILBOX)
+ dev_info(dev, "PCI mailbox interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_RD)
+ dev_err(dev, "PCI target LB->PCI READ abort interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_WR)
+ dev_err(dev, "PCI target LB->PCI WRITE abort interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_INT)
+ dev_info(dev, "PCI pin interrupt\n");
+ if (status & V3_LB_ISTAT_PCI_PERR)
+ dev_err(dev, "PCI parity error interrupt\n");
+ if (status & V3_LB_ISTAT_I2O_QWR)
+ dev_info(dev, "I2O inbound post queue interrupt\n");
+ if (status & V3_LB_ISTAT_DMA1)
+ dev_info(dev, "DMA channel 1 interrupt\n");
+ if (status & V3_LB_ISTAT_DMA0)
+ dev_info(dev, "DMA channel 0 interrupt\n");
+ /* Clear all possible interrupts on the local bus */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ if (v3->map)
+ regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+ INTEGRATOR_SC_PCI_ENABLE |
+ INTEGRATOR_SC_PCI_INTCLR);
+
+ return IRQ_HANDLED;
+}
+
+static int v3_integrator_init(struct v3_pci *v3)
+{
+ unsigned int val;
+
+ v3->map =
+ syscon_regmap_lookup_by_compatible("arm,integrator-ap-syscon");
+ if (IS_ERR(v3->map)) {
+ dev_err(v3->dev, "no syscon\n");
+ return -ENODEV;
+ }
+
+ regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val);
+ /* Take the PCI bridge out of reset, clear IRQs */
+ regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET,
+ INTEGRATOR_SC_PCI_ENABLE |
+ INTEGRATOR_SC_PCI_INTCLR);
+
+ if (!(val & INTEGRATOR_SC_PCI_ENABLE)) {
+ /* If we were in reset we need to sleep a bit */
+ msleep(230);
+
+ /* Set the physical base for the controller itself */
+ writel(0x6200, v3->base + V3_LB_IO_BASE);
+
+ /* Wait for the mailbox to settle after reset */
+ do {
+ writeb(0xaa, v3->base + V3_MAIL_DATA);
+ writeb(0x55, v3->base + V3_MAIL_DATA + 4);
+ } while (readb(v3->base + V3_MAIL_DATA) != 0xaa &&
+ readb(v3->base + V3_MAIL_DATA) != 0x55);
+ }
+
+ dev_info(v3->dev, "initialized PCI V3 Integrator/AP integration\n");
+
+ return 0;
+}
+
+static int v3_pci_setup_resource(struct v3_pci *v3,
+ resource_size_t io_base,
+ struct pci_host_bridge *host,
+ struct resource_entry *win)
+{
+ struct device *dev = v3->dev;
+ struct resource *mem;
+ struct resource *io;
+ int ret;
+
+ switch (resource_type(win->res)) {
+ case IORESOURCE_IO:
+ io = win->res;
+ io->name = "V3 PCI I/O";
+ v3->io_mem = io_base;
+ v3->io_bus_addr = io->start - win->offset;
+ dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
+ io, &v3->io_bus_addr);
+ ret = pci_remap_iospace(io, io_base);
+ if (ret) {
+ dev_warn(dev,
+ "error %d: failed to map resource %pR\n",
+ ret, io);
+ return ret;
+ }
+ /* Setup window 2 - PCI I/O */
+ writel(v3_addr_to_lb_base2(v3->io_mem) |
+ V3_LB_BASE2_ENABLE,
+ v3->base + V3_LB_BASE2);
+ writew(v3_addr_to_lb_map2(v3->io_bus_addr),
+ v3->base + V3_LB_MAP2);
+ break;
+ case IORESOURCE_MEM:
+ mem = win->res;
+ if (mem->flags & IORESOURCE_PREFETCH) {
+ mem->name = "V3 PCI PRE-MEM";
+ v3->pre_mem = mem->start;
+ v3->pre_bus_addr = mem->start - win->offset;
+ dev_dbg(dev, "PREFETCHABLE MEM window %pR, bus addr %pap\n",
+ mem, &v3->pre_bus_addr);
+ if (resource_size(mem) != SZ_256M) {
+ dev_err(dev, "prefetchable memory range is not 256MB\n");
+ return -EINVAL;
+ }
+ if (v3->non_pre_mem &&
+ (mem->start != v3->non_pre_mem + SZ_256M)) {
+ dev_err(dev,
+ "prefetchable memory is not adjacent to non-prefetchable memory\n");
+ return -EINVAL;
+ }
+ /* Setup window 1 - PCI prefetchable memory */
+ writel(v3_addr_to_lb_base(v3->pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB |
+ V3_LB_BASE_PREFETCH |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE1);
+ writew(v3_addr_to_lb_map(v3->pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM, /* Was V3_LB_MAP_TYPE_MEM_MULTIPLE */
+ v3->base + V3_LB_MAP1);
+ } else {
+ mem->name = "V3 PCI NON-PRE-MEM";
+ v3->non_pre_mem = mem->start;
+ v3->non_pre_bus_addr = mem->start - win->offset;
+ dev_dbg(dev, "NON-PREFETCHABLE MEM window %pR, bus addr %pap\n",
+ mem, &v3->non_pre_bus_addr);
+ if (resource_size(mem) != SZ_256M) {
+ dev_err(dev,
+ "non-prefetchable memory range is not 256MB\n");
+ return -EINVAL;
+ }
+ /* Setup window 0 - PCI non-prefetchable memory */
+ writel(v3_addr_to_lb_base(v3->non_pre_mem) |
+ V3_LB_BASE_ADR_SIZE_256MB |
+ V3_LB_BASE_ENABLE,
+ v3->base + V3_LB_BASE0);
+ writew(v3_addr_to_lb_map(v3->non_pre_bus_addr) |
+ V3_LB_MAP_TYPE_MEM,
+ v3->base + V3_LB_MAP0);
+ }
+ break;
+ case IORESOURCE_BUS:
+ dev_dbg(dev, "BUS %pR\n", win->res);
+ host->busnr = win->res->start;
+ break;
+ default:
+ dev_info(dev, "Unknown resource type %lu\n",
+ resource_type(win->res));
+ break;
+ }
+
+ return 0;
+}
+
+static int v3_get_dma_range_config(struct v3_pci *v3,
+ struct of_pci_range *range,
+ u32 *pci_base, u32 *pci_map)
+{
+ struct device *dev = v3->dev;
+ u64 cpu_end = range->cpu_addr + range->size - 1;
+ u64 pci_end = range->pci_addr + range->size - 1;
+ u32 val;
+
+ if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) {
+ dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n");
+ return -EINVAL;
+ }
+ val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE;
+ *pci_base = val;
+
+ if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) {
+ dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n");
+ return -EINVAL;
+ }
+ val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR;
+
+ switch (range->size) {
+ case SZ_1M:
+ val |= V3_LB_BASE_ADR_SIZE_1MB;
+ break;
+ case SZ_2M:
+ val |= V3_LB_BASE_ADR_SIZE_2MB;
+ break;
+ case SZ_4M:
+ val |= V3_LB_BASE_ADR_SIZE_4MB;
+ break;
+ case SZ_8M:
+ val |= V3_LB_BASE_ADR_SIZE_8MB;
+ break;
+ case SZ_16M:
+ val |= V3_LB_BASE_ADR_SIZE_16MB;
+ break;
+ case SZ_32M:
+ val |= V3_LB_BASE_ADR_SIZE_32MB;
+ break;
+ case SZ_64M:
+ val |= V3_LB_BASE_ADR_SIZE_64MB;
+ break;
+ case SZ_128M:
+ val |= V3_LB_BASE_ADR_SIZE_128MB;
+ break;
+ case SZ_256M:
+ val |= V3_LB_BASE_ADR_SIZE_256MB;
+ break;
+ case SZ_512M:
+ val |= V3_LB_BASE_ADR_SIZE_512MB;
+ break;
+ case SZ_1G:
+ val |= V3_LB_BASE_ADR_SIZE_1GB;
+ break;
+ case SZ_2G:
+ val |= V3_LB_BASE_ADR_SIZE_2GB;
+ break;
+ default:
+ dev_err(v3->dev, "illegal dma memory chunk size\n");
+ return -EINVAL;
+ break;
+ };
+ val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE;
+ *pci_map = val;
+
+ dev_dbg(dev,
+ "DMA MEM CPU: 0x%016llx -> 0x%016llx => "
+ "PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n",
+ range->cpu_addr, cpu_end,
+ range->pci_addr, pci_end,
+ *pci_base, *pci_map);
+
+ return 0;
+}
+
+static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3,
+ struct device_node *np)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = v3->dev;
+ int i = 0;
+
+ if (of_pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the dma-ranges from the device tree
+ */
+ for_each_of_pci_range(&parser, &range) {
+ int ret;
+ u32 pci_base, pci_map;
+
+ ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map);
+ if (ret)
+ return ret;
+
+ if (i == 0) {
+ writel(pci_base, v3->base + V3_PCI_BASE0);
+ writel(pci_map, v3->base + V3_PCI_MAP0);
+ } else if (i == 1) {
+ writel(pci_base, v3->base + V3_PCI_BASE1);
+ writel(pci_map, v3->base + V3_PCI_MAP1);
+ } else {
+ dev_err(dev, "too many ranges, only two supported\n");
+ dev_err(dev, "range %d ignored\n", i);
+ }
+ i++;
+ }
+ return 0;
+}
+
+static int v3_pci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ resource_size_t io_base;
+ struct resource *regs;
+ struct resource_entry *win;
+ struct v3_pci *v3;
+ struct pci_host_bridge *host;
+ struct clk *clk;
+ u16 val;
+ int irq;
+ int ret;
+ LIST_HEAD(res);
+
+ host = pci_alloc_host_bridge(sizeof(*v3));
+ if (!host)
+ return -ENOMEM;
+
+ host->dev.parent = dev;
+ host->ops = &v3_pci_ops;
+ host->busnr = 0;
+ host->msi = NULL;
+ host->map_irq = of_irq_parse_and_map_pci;
+ host->swizzle_irq = pci_common_swizzle;
+ v3 = pci_host_bridge_priv(host);
+ host->sysdata = v3;
+ v3->dev = dev;
+
+ /* Get and enable host clock */
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "clock not found\n");
+ return PTR_ERR(clk);
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "unable to enable clock\n");
+ return ret;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ v3->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(v3->base))
+ return PTR_ERR(v3->base);
+ /*
+ * The hardware has a register with the physical base address
+ * of the V3 controller itself, verify that this is the same
+ * as the physical memory we've remapped it from.
+ */
+ if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16))
+ dev_err(dev, "V3_LB_IO_BASE = %08x but device is @%pR\n",
+ readl(v3->base + V3_LB_IO_BASE), regs);
+
+ /* Configuration space is 16MB directly mapped */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (resource_size(regs) != SZ_16M) {
+ dev_err(dev, "config mem is not 16MB!\n");
+ return -EINVAL;
+ }
+ v3->config_mem = regs->start;
+ v3->config_base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(v3->config_base))
+ return PTR_ERR(v3->config_base);
+
+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &io_base);
+ if (ret)
+ return ret;
+
+ ret = devm_request_pci_bus_resources(dev, &res);
+ if (ret)
+ return ret;
+
+ /* Get and request error IRQ resource */
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "unable to obtain PCIv3 error IRQ\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, irq, v3_irq, 0,
+ "PCIv3 error", v3);
+ if (ret < 0) {
+ dev_err(dev,
+ "unable to request PCIv3 error IRQ %d (%d)\n",
+ irq, ret);
+ return ret;
+ }
+
+ /*
+ * Unlock V3 registers, but only if they were previously locked.
+ */
+ if (readw(v3->base + V3_SYSTEM) & V3_SYSTEM_M_LOCK)
+ writew(V3_SYSTEM_UNLOCK, v3->base + V3_SYSTEM);
+
+ /* Disable all slave access while we set up the windows */
+ val = readw(v3->base + V3_PCI_CMD);
+ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Put the PCI bus into reset */
+ val = readw(v3->base + V3_SYSTEM);
+ val &= ~V3_SYSTEM_M_RST_OUT;
+ writew(val, v3->base + V3_SYSTEM);
+
+ /* Retry until we're ready */
+ val = readw(v3->base + V3_PCI_CFG);
+ val |= V3_PCI_CFG_M_RETRY_EN;
+ writew(val, v3->base + V3_PCI_CFG);
+
+ /* Set up the local bus protocol */
+ val = readw(v3->base + V3_LB_CFG);
+ val |= V3_LB_CFG_LB_BE_IMODE; /* Byte enable input */
+ val |= V3_LB_CFG_LB_BE_OMODE; /* Byte enable output */
+ val &= ~V3_LB_CFG_LB_ENDIAN; /* Little endian */
+ val &= ~V3_LB_CFG_LB_PPC_RDY; /* TODO: when using on PPC403Gx, set to 1 */
+ writew(val, v3->base + V3_LB_CFG);
+
+ /* Enable the PCI bus master */
+ val = readw(v3->base + V3_PCI_CMD);
+ val |= PCI_COMMAND_MASTER;
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry(win, &res) {
+ ret = v3_pci_setup_resource(v3, io_base, host, win);
+ if (ret) {
+ dev_err(dev, "error setting up resources\n");
+ return ret;
+ }
+ }
+ ret = v3_pci_parse_map_dma_ranges(v3, np);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable PCI to host IO cycles, enable I/O buffers @3.3V,
+ * set AD_LOW0 to 1 if one of the LB_MAP registers choose
+ * to use this (should be unused).
+ */
+ writel(0x00000000, v3->base + V3_PCI_IO_BASE);
+ val = V3_PCI_CFG_M_IO_REG_DIS | V3_PCI_CFG_M_IO_DIS |
+ V3_PCI_CFG_M_EN3V | V3_PCI_CFG_M_AD_LOW0;
+ /*
+ * DMA read and write from PCI bus commands types
+ */
+ val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_RTYPE_SHIFT;
+ val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_WTYPE_SHIFT;
+ writew(val, v3->base + V3_PCI_CFG);
+
+ /*
+ * Set the V3 FIFO such that writes have higher priority than
+ * reads, and local bus write causes local bus read fifo flush
+ * on aperture 1. Same for PCI.
+ */
+ writew(V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 |
+ V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 |
+ V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 |
+ V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1,
+ v3->base + V3_FIFO_PRIORITY);
+
+
+ /*
+ * Clear any error interrupts, and enable parity and write error
+ * interrupts
+ */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ val = readw(v3->base + V3_LB_CFG);
+ val |= V3_LB_CFG_LB_LB_INT;
+ writew(val, v3->base + V3_LB_CFG);
+ writeb(V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+ v3->base + V3_LB_IMASK);
+
+ /* Special Integrator initialization */
+ if (of_device_is_compatible(np, "arm,integrator-ap-pci")) {
+ ret = v3_integrator_init(v3);
+ if (ret)
+ return ret;
+ }
+
+ /* Post-init: enable PCI memory and invalidate (master already on) */
+ val = readw(v3->base + V3_PCI_CMD);
+ val |= PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE;
+ writew(val, v3->base + V3_PCI_CMD);
+
+ /* Clear pending interrupts */
+ writeb(0, v3->base + V3_LB_ISTAT);
+ /* Read or write errors and parity errors cause interrupts */
+ writeb(V3_LB_ISTAT_PCI_RD | V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR,
+ v3->base + V3_LB_IMASK);
+
+ /* Take the PCI bus out of reset so devices can initialize */
+ val = readw(v3->base + V3_SYSTEM);
+ val |= V3_SYSTEM_M_RST_OUT;
+ writew(val, v3->base + V3_SYSTEM);
+
+ /*
+ * Re-lock the system register.
+ */
+ val = readw(v3->base + V3_SYSTEM);
+ val |= V3_SYSTEM_M_LOCK;
+ writew(val, v3->base + V3_SYSTEM);
+
+ list_splice_init(&res, &host->windows);
+ ret = pci_scan_root_bus_bridge(host);
+ if (ret) {
+ dev_err(dev, "failed to register host: %d\n", ret);
+ return ret;
+ }
+ v3->bus = host->bus;
+
+ pci_bus_assign_resources(v3->bus);
+ pci_bus_add_devices(v3->bus);
+
+ return 0;
+}
+
+static const struct of_device_id v3_pci_of_match[] = {
+ {
+ .compatible = "v3,v360epc-pci",
+ },
+ {},
+};
+
+static struct platform_driver v3_pci_driver = {
+ .driver = {
+ .name = "pci-v3-semi",
+ .of_match_table = of_match_ptr(v3_pci_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = v3_pci_probe,
+};
+builtin_platform_driver(v3_pci_driver);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 087645116ecb..465aa2a1b38d 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -542,24 +542,6 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
- parser->end = parser->range + rlen / sizeof(__be32);
-
- return 0;
-}
-
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
struct device_node *np = port->node;
@@ -568,7 +550,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
struct device *dev = port->dev;
u8 ib_reg_mask = 0;
- if (pci_dma_range_parser_init(&parser, np)) {
+ if (of_pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
@@ -628,7 +610,7 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
-static int xgene_pcie_probe_bridge(struct platform_device *pdev)
+static int xgene_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
@@ -709,7 +691,7 @@ static struct platform_driver xgene_pcie_driver = {
.of_match_table = of_match_ptr(xgene_pcie_match_table),
.suppress_bind_attrs = true,
},
- .probe = xgene_pcie_probe_bridge,
+ .probe = xgene_pcie_probe,
};
builtin_platform_driver(xgene_pcie_driver);
#endif
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index b468b8cccf8d..5cc4f594d79a 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -105,7 +105,7 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
return readl_relaxed(pcie->cra_base + reg);
}
-static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+static bool altera_pcie_link_up(struct altera_pcie *pcie)
{
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
}
@@ -142,7 +142,7 @@ static bool altera_pcie_valid_device(struct altera_pcie *pcie,
{
/* If there is no link, then there is no device */
if (bus->number != pcie->root_bus_nr) {
- if (!altera_pcie_link_is_up(pcie))
+ if (!altera_pcie_link_up(pcie))
return false;
}
@@ -412,7 +412,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie)
/* Wait for link is up */
start_jiffies = jiffies;
for (;;) {
- if (altera_pcie_link_is_up(pcie))
+ if (altera_pcie_link_up(pcie))
break;
if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
@@ -427,7 +427,7 @@ static void altera_pcie_retrain(struct altera_pcie *pcie)
{
u16 linkcap, linkstat, linkctl;
- if (!altera_pcie_link_is_up(pcie))
+ if (!altera_pcie_link_up(pcie))
return;
/*
diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c
index 2d0f535a2f69..990fc906d73d 100644
--- a/drivers/pci/host/pcie-iproc-msi.c
+++ b/drivers/pci/host/pcie-iproc-msi.c
@@ -179,7 +179,7 @@ static struct irq_chip iproc_msi_irq_chip = {
static struct msi_domain_info iproc_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
.chip = &iproc_msi_irq_chip,
};
@@ -237,7 +237,7 @@ static void iproc_msi_irq_compose_msi_msg(struct irq_data *data,
addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq);
msg->address_lo = lower_32_bits(addr);
msg->address_hi = upper_32_bits(addr);
- msg->data = data->hwirq;
+ msg->data = data->hwirq << 5;
}
static struct irq_chip iproc_msi_bottom_irq_chip = {
@@ -251,7 +251,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
void *args)
{
struct iproc_msi *msi = domain->host_data;
- int hwirq;
+ int hwirq, i;
mutex_lock(&msi->bitmap_lock);
@@ -267,10 +267,14 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
mutex_unlock(&msi->bitmap_lock);
- irq_domain_set_info(domain, virq, hwirq, &iproc_msi_bottom_irq_chip,
- domain->host_data, handle_simple_irq, NULL, NULL);
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &iproc_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
- return 0;
+ return hwirq;
}
static void iproc_msi_irq_domain_free(struct irq_domain *domain,
@@ -302,7 +306,8 @@ static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head)
offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32);
msg = (u32 *)(msi->eq_cpu + offs);
- hwirq = *msg & IPROC_MSI_EQ_MASK;
+ hwirq = readl(msg);
+ hwirq = (hwirq >> 5) + (hwirq & 0x1f);
/*
* Since we have multiple hwirq mapped to a single MSI vector,
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 3a8b9d20ee57..935909bbe5c4 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -1097,24 +1097,6 @@ err_ib:
return ret;
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
- return 0;
-}
-
static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{
struct of_pci_range range;
@@ -1122,7 +1104,7 @@ static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
int ret;
/* Get the dma-ranges from DT */
- ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node);
+ ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node);
if (ret)
return ret;
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 4e0b25d09b0c..12796eccb2be 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1027,24 +1027,6 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
return 0;
}
-static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
- struct device_node *node)
-{
- const int na = 3, ns = 2;
- int rlen;
-
- parser->node = node;
- parser->pna = of_n_addr_cells(node);
- parser->np = parser->pna + na + ns;
-
- parser->range = of_get_property(node, "dma-ranges", &rlen);
- if (!parser->range)
- return -ENOENT;
-
- parser->end = parser->range + rlen / sizeof(__be32);
- return 0;
-}
-
static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
struct device_node *np)
{
@@ -1053,7 +1035,7 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
int index = 0;
int err;
- if (pci_dma_range_parser_init(&parser, np))
+ if (of_pci_dma_range_parser_init(&parser, np))
return -EINVAL;
/* Get the dma-ranges from DT */
diff --git a/drivers/pci/host/pcie-tango.c b/drivers/pci/host/pcie-tango.c
index 6bbb81f06a53..21a208da3f59 100644
--- a/drivers/pci/host/pcie-tango.c
+++ b/drivers/pci/host/pcie-tango.c
@@ -1,12 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/pci-ecam.h>
#include <linux/delay.h>
-#include <linux/of.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+
+#define MSI_MAX 256
#define SMP8759_MUX 0x48
#define SMP8759_TEST_OUT 0x74
+#define SMP8759_DOORBELL 0x7c
+#define SMP8759_STATUS 0x80
+#define SMP8759_ENABLE 0xa0
struct tango_pcie {
- void __iomem *base;
+ DECLARE_BITMAP(used_msi, MSI_MAX);
+ u64 msi_doorbell;
+ spinlock_t used_msi_lock;
+ void __iomem *base;
+ struct irq_domain *dom;
+};
+
+static void tango_msi_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct tango_pcie *pcie = irq_desc_get_handler_data(desc);
+ unsigned long status, base, virq, idx, pos = 0;
+
+ chained_irq_enter(chip, desc);
+ spin_lock(&pcie->used_msi_lock);
+
+ while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) {
+ base = round_down(pos, 32);
+ status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8);
+ for_each_set_bit(idx, &status, 32) {
+ virq = irq_find_mapping(pcie->dom, base + idx);
+ generic_handle_irq(virq);
+ }
+ pos = base + 32;
+ }
+
+ spin_unlock(&pcie->used_msi_lock);
+ chained_irq_exit(chip, desc);
+}
+
+static void tango_ack(struct irq_data *d)
+{
+ struct tango_pcie *pcie = d->chip_data;
+ u32 offset = (d->hwirq / 32) * 4;
+ u32 bit = BIT(d->hwirq % 32);
+
+ writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset);
+}
+
+static void update_msi_enable(struct irq_data *d, bool unmask)
+{
+ unsigned long flags;
+ struct tango_pcie *pcie = d->chip_data;
+ u32 offset = (d->hwirq / 32) * 4;
+ u32 bit = BIT(d->hwirq % 32);
+ u32 val;
+
+ spin_lock_irqsave(&pcie->used_msi_lock, flags);
+ val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset);
+ val = unmask ? val | bit : val & ~bit;
+ writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset);
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+}
+
+static void tango_mask(struct irq_data *d)
+{
+ update_msi_enable(d, false);
+}
+
+static void tango_unmask(struct irq_data *d)
+{
+ update_msi_enable(d, true);
+}
+
+static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct tango_pcie *pcie = d->chip_data;
+ msg->address_lo = lower_32_bits(pcie->msi_doorbell);
+ msg->address_hi = upper_32_bits(pcie->msi_doorbell);
+ msg->data = d->hwirq;
+}
+
+static struct irq_chip tango_chip = {
+ .irq_ack = tango_ack,
+ .irq_mask = tango_mask,
+ .irq_unmask = tango_unmask,
+ .irq_set_affinity = tango_set_affinity,
+ .irq_compose_msi_msg = tango_compose_msi_msg,
+};
+
+static void msi_ack(struct irq_data *d)
+{
+ irq_chip_ack_parent(d);
+}
+
+static void msi_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void msi_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip msi_chip = {
+ .name = "MSI",
+ .irq_ack = msi_ack,
+ .irq_mask = msi_mask,
+ .irq_unmask = msi_unmask,
+};
+
+static struct msi_domain_info msi_dom_info = {
+ .flags = MSI_FLAG_PCI_MSIX
+ | MSI_FLAG_USE_DEF_DOM_OPS
+ | MSI_FLAG_USE_DEF_CHIP_OPS,
+ .chip = &msi_chip,
+};
+
+static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct tango_pcie *pcie = dom->host_data;
+ unsigned long flags;
+ int pos;
+
+ spin_lock_irqsave(&pcie->used_msi_lock, flags);
+ pos = find_first_zero_bit(pcie->used_msi, MSI_MAX);
+ if (pos >= MSI_MAX) {
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+ return -ENOSPC;
+ }
+ __set_bit(pos, pcie->used_msi);
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+ irq_domain_set_info(dom, virq, pos, &tango_chip,
+ pcie, handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ unsigned long flags;
+ struct irq_data *d = irq_domain_get_irq_data(dom, virq);
+ struct tango_pcie *pcie = d->chip_data;
+
+ spin_lock_irqsave(&pcie->used_msi_lock, flags);
+ __clear_bit(d->hwirq, pcie->used_msi);
+ spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
+}
+
+static const struct irq_domain_ops dom_ops = {
+ .alloc = tango_irq_domain_alloc,
+ .free = tango_irq_domain_free,
};
static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -76,7 +237,11 @@ static int tango_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct tango_pcie *pcie;
struct resource *res;
- int ret;
+ struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct irq_domain *msi_dom, *irq_dom;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ int virq, offset;
dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
@@ -95,6 +260,41 @@ static int tango_pcie_probe(struct platform_device *pdev)
if (!tango_pcie_link_up(pcie))
return -ENODEV;
+ if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0)
+ return -ENOENT;
+
+ if (of_pci_range_parser_one(&parser, &range) == NULL)
+ return -ENOENT;
+
+ range.pci_addr += range.size;
+ pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL;
+
+ for (offset = 0; offset < MSI_MAX / 8; offset += 4)
+ writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset);
+
+ virq = platform_get_irq(pdev, 1);
+ if (virq <= 0) {
+ dev_err(dev, "Failed to map IRQ\n");
+ return -ENXIO;
+ }
+
+ irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie);
+ if (!irq_dom) {
+ dev_err(dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom);
+ if (!msi_dom) {
+ dev_err(dev, "Failed to create MSI domain\n");
+ irq_domain_remove(irq_dom);
+ return -ENOMEM;
+ }
+
+ pcie->dom = irq_dom;
+ spin_lock_init(&pcie->used_msi_lock);
+ irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie);
+
return pci_host_common_probe(pdev, &smp8759_ecam_ops);
}
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 94e13cb8608f..7b5325990f5e 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -129,7 +129,7 @@ static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
writel(val, port->reg_base + reg);
}
-static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
+static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port)
{
return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
@@ -165,7 +165,7 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
/* Check if link is up when trying to access downstream ports */
if (bus->number != port->root_busno)
- if (!xilinx_pcie_link_is_up(port))
+ if (!xilinx_pcie_link_up(port))
return false;
/* Only one device down on each root port */
@@ -541,7 +541,7 @@ static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
- if (xilinx_pcie_link_is_up(port))
+ if (xilinx_pcie_link_up(port))
dev_info(dev, "PCIe Link is UP\n");
else
dev_info(dev, "PCIe Link is DOWN\n");
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
deleted file mode 100644
index c68366cee6b7..000000000000
--- a/drivers/pci/hotplug-pci.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Core PCI functionality used only by PCI hotplug */
-
-#include <linux/pci.h>
-#include <linux/export.h>
-#include "pci.h"
-
-int pci_hp_add_bridge(struct pci_dev *dev)
-{
- struct pci_bus *parent = dev->bus;
- int pass, busnr, start = parent->busn_res.start;
- int end = parent->busn_res.end;
-
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent), busnr))
- break;
- }
- if (busnr-- > end) {
- printk(KERN_ERR "No bus number available for hot-added bridge %s\n",
- pci_name(dev));
- return -1;
- }
- for (pass = 0; pass < 2; pass++)
- busnr = pci_scan_bridge(parent, dev, busnr, pass);
- if (!dev->subordinate)
- return -1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index e33cdda45a4d..7e3331603714 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux kernel pci hotplug controller drivers.
#
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 5ed2dcaa8e27..5db6f1839dad 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -462,18 +462,15 @@ static void enable_slot(struct acpiphp_slot *slot)
acpiphp_rescan_slot(slot);
max = acpiphp_max_busnr(bus);
for (pass = 0; pass < 2; pass++) {
- list_for_each_entry(dev, &bus->devices, bus_list) {
+ for_each_pci_bridge(dev, bus) {
if (PCI_SLOT(dev->devfn) != slot->device)
continue;
- if (pci_is_bridge(dev)) {
- max = pci_scan_bridge(bus, dev, max, pass);
- if (pass && dev->subordinate) {
- check_hotplug_bridge(slot, dev);
- pcibios_resource_survey_bus(dev->subordinate);
- __pci_bus_size_bridges(dev->subordinate,
- &add_list);
- }
+ max = pci_scan_bridge(bus, dev, max, pass);
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+ __pci_bus_size_bridges(dev->subordinate, &add_list);
}
}
}
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 80c80017197d..f616358fa938 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -286,14 +286,11 @@ int cpci_configure_slot(struct slot *slot)
}
parent = slot->dev->bus;
- list_for_each_entry(dev, &parent->devices, bus_list) {
- if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
- continue;
- if (pci_is_bridge(dev))
+ for_each_pci_bridge(dev, parent) {
+ if (PCI_SLOT(dev->devfn) == PCI_SLOT(slot->devfn))
pci_hp_add_bridge(dev);
}
-
pci_assign_unassigned_bridge_resources(parent->self);
pci_bus_add_devices(parent);
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 48c8a066a6b7..c2bbe6b65d06 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -410,7 +410,7 @@ void cpqhp_create_debugfs_files(struct controller *ctrl);
void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-void cpqhp_pushbutton_thread(unsigned long event_pointer);
+void cpqhp_pushbutton_thread(struct timer_list *t);
irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
int cpqhp_find_available_resources(struct controller *ctrl,
void __iomem *rom_start);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4d06b8461255..70967ac75265 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -661,9 +661,8 @@ static int ctrl_slot_setup(struct controller *ctrl,
slot->p_sm_slot = slot_entry;
- init_timer(&slot->task_event);
+ timer_setup(&slot->task_event, cpqhp_pushbutton_thread, 0);
slot->task_event.expires = jiffies + 5 * HZ;
- slot->task_event.function = cpqhp_pushbutton_thread;
/*FIXME: these capabilities aren't used but if they are
* they need to be correctly implemented
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index a55653b54eed..a93069e739cb 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -47,7 +47,7 @@ static void interrupt_event_handler(struct controller *ctrl);
static struct task_struct *cpqhp_event_thread;
-static unsigned long pushbutton_pending; /* = 0 */
+static struct timer_list *pushbutton_pending; /* = NULL */
/* delay is in jiffies to wait for */
static void long_delay(int delay)
@@ -1732,9 +1732,10 @@ static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controll
return 0;
}
-static void pushbutton_helper_thread(unsigned long data)
+static void pushbutton_helper_thread(struct timer_list *t)
{
- pushbutton_pending = data;
+ pushbutton_pending = t;
+
wake_up_process(cpqhp_event_thread);
}
@@ -1883,13 +1884,13 @@ static void interrupt_event_handler(struct controller *ctrl)
wait_for_ctrl_irq(ctrl);
mutex_unlock(&ctrl->crit_sect);
- init_timer(&p_slot->task_event);
+ timer_setup(&p_slot->task_event,
+ pushbutton_helper_thread,
+ 0);
p_slot->hp_slot = hp_slot;
p_slot->ctrl = ctrl;
/* p_slot->physical_slot = physical_slot; */
p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */
- p_slot->task_event.function = pushbutton_helper_thread;
- p_slot->task_event.data = (u32) p_slot;
dbg("add_timer p_slot = %p\n", p_slot);
add_timer(&p_slot->task_event);
@@ -1920,15 +1921,15 @@ static void interrupt_event_handler(struct controller *ctrl)
* Scheduled procedure to handle blocking stuff for the pushbuttons.
* Handles all pending events and exits.
*/
-void cpqhp_pushbutton_thread(unsigned long slot)
+void cpqhp_pushbutton_thread(struct timer_list *t)
{
u8 hp_slot;
u8 device;
struct pci_func *func;
- struct slot *p_slot = (struct slot *) slot;
+ struct slot *p_slot = from_timer(p_slot, t, task_event);
struct controller *ctrl = (struct controller *) p_slot->ctrl;
- pushbutton_pending = 0;
+ pushbutton_pending = NULL;
hp_slot = p_slot->hp_slot;
device = p_slot->device;
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index dc1876feb06f..25edd0b18b75 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -1267,20 +1267,19 @@ static int unconfigure_boot_device(u8 busno, u8 device, u8 function)
size = size & 0xFFFFFFFC;
size = ~size + 1;
end_address = start_address + size - 1;
- if (ibmphp_find_resource(bus, start_address, &io, IO) < 0) {
- err("cannot find corresponding IO resource to remove\n");
- return -EIO;
- }
+ if (ibmphp_find_resource(bus, start_address, &io, IO))
+ goto report_search_failure;
+
debug("io->start = %x\n", io->start);
temp_end = io->end;
start_address = io->end + 1;
ibmphp_remove_resource(io);
/* This is needed b/c of the old I/O restrictions in the BIOS */
while (temp_end < end_address) {
- if (ibmphp_find_resource(bus, start_address, &io, IO) < 0) {
- err("cannot find corresponding IO resource to remove\n");
- return -EIO;
- }
+ if (ibmphp_find_resource(bus, start_address,
+ &io, IO))
+ goto report_search_failure;
+
debug("io->start = %x\n", io->start);
temp_end = io->end;
start_address = io->end + 1;
@@ -1327,6 +1326,10 @@ static int unconfigure_boot_device(u8 busno, u8 device, u8 function)
} /* end of for */
return 0;
+
+report_search_failure:
+ err("cannot find corresponding IO resource to remove\n");
+ return -EIO;
}
static int unconfigure_boot_bridge(u8 busno, u8 device, u8 function)
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index ec0b4c11ccd9..83f3d4af3677 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -113,10 +113,11 @@ static int board_added(struct slot *p_slot)
retval = pciehp_configure_device(p_slot);
if (retval) {
- ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
- pci_domain_nr(parent), parent->number);
- if (retval != -EEXIST)
+ if (retval != -EEXIST) {
+ ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
+ pci_domain_nr(parent), parent->number);
goto err_exit;
+ }
}
pciehp_green_led_on(p_slot);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index e5d5ce9e3010..7bab0606f1a9 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -50,14 +50,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
/* This is the interrupt polling timeout function. */
-static void int_poll_timeout(unsigned long data)
+static void int_poll_timeout(struct timer_list *t)
{
- struct controller *ctrl = (struct controller *)data;
+ struct controller *ctrl = from_timer(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
pcie_isr(0, ctrl);
- init_timer(&ctrl->poll_timer);
if (!pciehp_poll_time)
pciehp_poll_time = 2; /* default polling interval is 2 sec */
@@ -71,8 +70,6 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
if ((sec <= 0) || (sec > 60))
sec = 2;
- ctrl->poll_timer.function = &int_poll_timeout;
- ctrl->poll_timer.data = (unsigned long)ctrl;
ctrl->poll_timer.expires = jiffies + sec * HZ;
add_timer(&ctrl->poll_timer);
}
@@ -83,7 +80,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
/* Install interrupt polling timer. Start with 10 sec delay */
if (pciehp_poll_mode) {
- init_timer(&ctrl->poll_timer);
+ timer_setup(&ctrl->poll_timer, int_poll_timeout, 0);
start_int_poll_timer(ctrl, 10);
return 0;
}
@@ -764,8 +761,7 @@ int pciehp_reset_slot(struct slot *slot, int probe)
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
if (pciehp_poll_mode)
- int_poll_timeout(ctrl->poll_timer.data);
-
+ int_poll_timeout(&ctrl->poll_timer);
return 0;
}
@@ -795,7 +791,7 @@ static int pcie_init_slot(struct controller *ctrl)
if (!slot)
return -ENOMEM;
- slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl));
+ slot->wq = alloc_ordered_workqueue("pciehp-%u", 0, PSN(ctrl));
if (!slot->wq)
goto abort;
@@ -862,11 +858,16 @@ struct controller *pcie_init(struct pcie_device *dev)
if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
ctrl->link_active_reporting = 1;
- /* Clear all remaining event bits in Slot Status register */
+ /*
+ * Clear all remaining event bits in Slot Status register except
+ * Presence Detect Changed. We want to make sure possible
+ * hotplug event is triggered when the interrupt is unmasked so
+ * that we don't lose that event.
+ */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
- PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
- PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC);
ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 19f30a9f461d..2a1ca020cf5a 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -46,7 +46,11 @@ int pciehp_configure_device(struct slot *p_slot)
dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
if (dev) {
- ctrl_err(ctrl, "Device %s already exists at %04x:%02x:00, cannot hot-add\n",
+ /*
+ * The device is already there. Either configured by the
+ * boot firmware or a previous hotplug event.
+ */
+ ctrl_dbg(ctrl, "Device %s already exists at %04x:%02x:00, skipping hot-add\n",
pci_name(dev), pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
ret = -EEXIST;
@@ -60,9 +64,8 @@ int pciehp_configure_device(struct slot *p_slot)
goto out;
}
- list_for_each_entry(dev, &parent->devices, bus_list)
- if (pci_is_bridge(dev))
- pci_hp_add_bridge(dev);
+ for_each_pci_bridge(dev, parent)
+ pci_hp_add_bridge(dev);
pci_assign_unassigned_bridge_resources(bridge);
pcie_bus_configure_settings(parent);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index e5824c7b7b6b..4810e9626d9f 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -229,14 +229,13 @@ static inline int shpc_indirect_read(struct controller *ctrl, int index,
/*
* This is the interrupt polling timeout function.
*/
-static void int_poll_timeout(unsigned long data)
+static void int_poll_timeout(struct timer_list *t)
{
- struct controller *ctrl = (struct controller *)data;
+ struct controller *ctrl = from_timer(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
shpc_isr(0, ctrl);
- init_timer(&ctrl->poll_timer);
if (!shpchp_poll_time)
shpchp_poll_time = 2; /* default polling interval is 2 sec */
@@ -252,8 +251,6 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
if ((sec <= 0) || (sec > 60))
sec = 2;
- ctrl->poll_timer.function = &int_poll_timeout;
- ctrl->poll_timer.data = (unsigned long)ctrl;
ctrl->poll_timer.expires = jiffies + sec * HZ;
add_timer(&ctrl->poll_timer);
}
@@ -1054,7 +1051,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
if (shpchp_poll_mode) {
/* Install interrupt polling timer. Start with 10 sec delay */
- init_timer(&ctrl->poll_timer);
+ timer_setup(&ctrl->poll_timer, int_poll_timeout, 0);
start_int_poll_timer(ctrl, 10);
} else {
/* Installs the interrupt handler */
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index f8cd3a27e351..ea63db58b4b1 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -61,10 +61,8 @@ int shpchp_configure_device(struct slot *p_slot)
goto out;
}
- list_for_each_entry(dev, &parent->devices, bus_list) {
- if (PCI_SLOT(dev->devfn) != p_slot->device)
- continue;
- if (pci_is_bridge(dev))
+ for_each_pci_bridge(dev, parent) {
+ if (PCI_SLOT(dev->devfn) == p_slot->device)
pci_hp_add_bridge(dev);
}
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
deleted file mode 100644
index 7eb4109a3df4..000000000000
--- a/drivers/pci/htirq.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * File: htirq.c
- * Purpose: Hypertransport Interrupt Capability
- *
- * Copyright (C) 2006 Linux Networx
- * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
- */
-
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/htirq.h>
-
-/* Global ht irq lock.
- *
- * This is needed to serialize access to the data port in hypertransport
- * irq capability.
- *
- * With multiple simultaneous hypertransport irq devices it might pay
- * to make this more fine grained. But start with simple, stupid, and correct.
- */
-static DEFINE_SPINLOCK(ht_irq_lock);
-
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
- struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
- unsigned long flags;
-
- spin_lock_irqsave(&ht_irq_lock, flags);
- if (cfg->msg.address_lo != msg->address_lo) {
- pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
- pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
- }
- if (cfg->msg.address_hi != msg->address_hi) {
- pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
- pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
- }
- if (cfg->update)
- cfg->update(cfg->dev, irq, msg);
- spin_unlock_irqrestore(&ht_irq_lock, flags);
- cfg->msg = *msg;
-}
-
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
- struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
-
- *msg = cfg->msg;
-}
-
-void mask_ht_irq(struct irq_data *data)
-{
- struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
- struct ht_irq_msg msg = cfg->msg;
-
- msg.address_lo |= 1;
- write_ht_irq_msg(data->irq, &msg);
-}
-
-void unmask_ht_irq(struct irq_data *data)
-{
- struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
- struct ht_irq_msg msg = cfg->msg;
-
- msg.address_lo &= ~1;
- write_ht_irq_msg(data->irq, &msg);
-}
-
-/**
- * __ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- * @update: Function to be called when changing the htirq message
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
-{
- int max_irq, pos, irq;
- unsigned long flags;
- u32 data;
-
- pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
- if (!pos)
- return -EINVAL;
-
- /* Verify the idx I want to use is in range */
- spin_lock_irqsave(&ht_irq_lock, flags);
- pci_write_config_byte(dev, pos + 2, 1);
- pci_read_config_dword(dev, pos + 4, &data);
- spin_unlock_irqrestore(&ht_irq_lock, flags);
-
- max_irq = (data >> 16) & 0xff;
- if (idx > max_irq)
- return -EINVAL;
-
- irq = arch_setup_ht_irq(idx, pos, dev, update);
- if (irq > 0)
- dev_dbg(&dev->dev, "irq %d for HT\n", irq);
-
- return irq;
-}
-EXPORT_SYMBOL(__ht_create_irq);
-
-/**
- * ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- *
- * ht_create_irq needs to be called for all hypertransport devices
- * that generate irqs.
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int ht_create_irq(struct pci_dev *dev, int idx)
-{
- return __ht_create_irq(dev, idx, NULL);
-}
-EXPORT_SYMBOL(ht_create_irq);
-
-/**
- * ht_destroy_irq - destroy an irq created with ht_create_irq
- * @irq: irq to be destroyed
- *
- * This reverses ht_create_irq removing the specified irq from
- * existence. The irq should be free before this happens.
- */
-void ht_destroy_irq(unsigned int irq)
-{
- arch_teardown_ht_irq(irq);
-}
-EXPORT_SYMBOL(ht_destroy_irq);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ac41c8be9200..6bacb8995e96 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -113,7 +113,7 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
}
-int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
+int pci_iov_add_virtfn(struct pci_dev *dev, int id)
{
int i;
int rc = -ENOMEM;
@@ -134,7 +134,7 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
- pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
+ virtfn->device = iov->vf_device;
rc = pci_setup_device(virtfn);
if (rc)
goto failed0;
@@ -157,12 +157,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
BUG_ON(rc);
}
- if (reset)
- __pci_reset_function(virtfn);
-
pci_device_add(virtfn, virtfn->bus);
- pci_bus_add_device(virtfn);
sprintf(buf, "virtfn%u", id);
rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
if (rc)
@@ -173,6 +169,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
+ pci_bus_add_device(virtfn);
+
return 0;
failed2:
@@ -187,7 +185,7 @@ failed:
return rc;
}
-void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
+void pci_iov_remove_virtfn(struct pci_dev *dev, int id)
{
char buf[VIRTFN_ID_LEN];
struct pci_dev *virtfn;
@@ -198,11 +196,6 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
if (!virtfn)
return;
- if (reset) {
- device_release_driver(&virtfn->dev);
- __pci_reset_function(virtfn);
- }
-
sprintf(buf, "virtfn%u", id);
sysfs_remove_link(&dev->dev.kobj, buf);
/*
@@ -317,7 +310,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
pci_cfg_access_unlock(dev);
for (i = 0; i < initial; i++) {
- rc = pci_iov_add_virtfn(dev, i, 0);
+ rc = pci_iov_add_virtfn(dev, i);
if (rc)
goto failed;
}
@@ -329,7 +322,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
failed:
while (i--)
- pci_iov_remove_virtfn(dev, i, 0);
+ pci_iov_remove_virtfn(dev, i);
err_pcibios:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
@@ -356,7 +349,7 @@ static void sriov_disable(struct pci_dev *dev)
return;
for (i = 0; i < iov->num_VFs; i++)
- pci_iov_remove_virtfn(dev, i, 0);
+ pci_iov_remove_virtfn(dev, i);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
@@ -449,6 +442,7 @@ found:
iov->nres = nres;
iov->ctrl = ctrl;
iov->total_VFs = total;
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device);
iov->pgsz = pgsz;
iov->self = dev;
iov->drivers_autoprobe = true;
@@ -504,6 +498,14 @@ static void sriov_restore_state(struct pci_dev *dev)
if (ctrl & PCI_SRIOV_CTRL_VFE)
return;
+ /*
+ * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because
+ * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI.
+ */
+ ctrl &= ~PCI_SRIOV_CTRL_ARI;
+ ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI;
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
+
for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
pci_update_resource(dev, i);
@@ -724,7 +726,7 @@ int pci_vfs_assigned(struct pci_dev *dev)
* determine the device ID for the VFs, the vendor ID will be the
* same as the PF so there is no need to check for that one
*/
- pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id);
+ dev_id = dev->sriov->vf_device;
/* loop through all the VFs to see if we own any that are assigned */
vfdev = pci_get_device(dev->vendor, dev_id, NULL);
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index 83d30953ce19..10929cd43d5d 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI IRQ handling code
*
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 496ed9130600..e06607167858 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1441,6 +1441,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
pci_msi_domain_update_chip_ops(info);
info->flags |= MSI_FLAG_ACTIVATE_EARLY;
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
+ info->flags |= MSI_FLAG_MUST_REACTIVATE;
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index a8da543b3814..4708eb9df71b 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
union acpi_object *obj;
struct pci_host_bridge *bridge;
- if (acpi_pci_disabled || !bus->bridge)
+ if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
return;
acpi_pci_slot_enumerate(bus);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 11bd267fc137..7f47bb72bf30 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -680,17 +680,13 @@ static int pci_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
- /*
- * Devices having power.ignore_children set may still be necessary for
- * suspending their children in the next phase of device suspend.
- */
- if (dev->power.ignore_children)
- pm_runtime_resume(dev);
-
if (drv && drv->pm && drv->pm->prepare) {
int error = drv->pm->prepare(dev);
- if (error)
+ if (error < 0)
return error;
+
+ if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
+ return 0;
}
return pci_dev_keep_suspended(to_pci_dev(dev));
}
@@ -731,18 +727,25 @@ static int pci_pm_suspend(struct device *dev)
if (!pm) {
pci_pm_default_suspend(pci_dev);
- goto Fixup;
+ return 0;
}
/*
- * PCI devices suspended at run time need to be resumed at this point,
- * because in general it is necessary to reconfigure them for system
- * suspend. Namely, if the device is supposed to wake up the system
- * from the sleep state, we may need to reconfigure it for this purpose.
- * In turn, if the device is not supposed to wake up the system from the
- * sleep state, we'll have to prevent it from signaling wake-up.
+ * PCI devices suspended at run time may need to be resumed at this
+ * point, because in general it may be necessary to reconfigure them for
+ * system suspend. Namely, if the device is expected to wake up the
+ * system from the sleep state, it may have to be reconfigured for this
+ * purpose, or if the device is not expected to wake up the system from
+ * the sleep state, it should be prevented from signaling wakeup events
+ * going forward.
+ *
+ * Also if the driver of the device does not indicate that its system
+ * suspend callbacks can cope with runtime-suspended devices, it is
+ * better to resume the device from runtime suspend here.
*/
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ !pci_dev_keep_suspended(pci_dev))
+ pm_runtime_resume(dev);
pci_dev->state_saved = false;
if (pm->suspend) {
@@ -762,17 +765,27 @@ static int pci_pm_suspend(struct device *dev)
}
}
- Fixup:
- pci_fixup_device(pci_fixup_suspend, pci_dev);
-
return 0;
}
+static int pci_pm_suspend_late(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
+
+ return pm_generic_suspend_late(dev);
+}
+
static int pci_pm_suspend_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -805,6 +818,9 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_prepare_to_sleep(pci_dev);
}
+ dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
+ pci_power_name(pci_dev->current_state));
+
pci_pm_set_unknown_state(pci_dev);
/*
@@ -831,6 +847,14 @@ static int pci_pm_resume_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ /*
+ * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
+ * during system suspend, so update their runtime PM status to "active"
+ * as they are going to be put into D0 shortly.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ pm_runtime_set_active(dev);
+
pci_pm_default_resume_early(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
@@ -873,6 +897,7 @@ static int pci_pm_resume(struct device *dev)
#else /* !CONFIG_SUSPEND */
#define pci_pm_suspend NULL
+#define pci_pm_suspend_late NULL
#define pci_pm_suspend_noirq NULL
#define pci_pm_resume NULL
#define pci_pm_resume_noirq NULL
@@ -907,7 +932,8 @@ static int pci_pm_freeze(struct device *dev)
* devices should not be touched during freeze/thaw transitions,
* however.
*/
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
pci_dev->state_saved = false;
if (pm->freeze) {
@@ -919,17 +945,25 @@ static int pci_pm_freeze(struct device *dev)
return error;
}
- if (pcibios_pm_ops.freeze)
- return pcibios_pm_ops.freeze(dev);
-
return 0;
}
+static int pci_pm_freeze_late(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ return pm_generic_freeze_late(dev);;
+}
+
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
@@ -959,6 +993,16 @@ static int pci_pm_thaw_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ /*
+ * If the device is in runtime suspend, the code below may not work
+ * correctly with it, so skip that code and make the PM core skip all of
+ * the subsequent "thaw" callbacks for the device.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.direct_complete = true;
+ return 0;
+ }
+
if (pcibios_pm_ops.thaw_noirq) {
error = pcibios_pm_ops.thaw_noirq(dev);
if (error)
@@ -983,12 +1027,6 @@ static int pci_pm_thaw(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
- if (pcibios_pm_ops.thaw) {
- error = pcibios_pm_ops.thaw(dev);
- if (error)
- return error;
- }
-
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
@@ -1014,11 +1052,13 @@ static int pci_pm_poweroff(struct device *dev)
if (!pm) {
pci_pm_default_suspend(pci_dev);
- goto Fixup;
+ return 0;
}
/* The reason to do that is the same as in pci_pm_suspend(). */
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ !pci_dev_keep_suspended(pci_dev))
+ pm_runtime_resume(dev);
pci_dev->state_saved = false;
if (pm->poweroff) {
@@ -1030,13 +1070,17 @@ static int pci_pm_poweroff(struct device *dev)
return error;
}
- Fixup:
- pci_fixup_device(pci_fixup_suspend, pci_dev);
+ return 0;
+}
- if (pcibios_pm_ops.poweroff)
- return pcibios_pm_ops.poweroff(dev);
+static int pci_pm_poweroff_late(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
- return 0;
+ pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
+
+ return pm_generic_poweroff_late(dev);
}
static int pci_pm_poweroff_noirq(struct device *dev)
@@ -1044,6 +1088,9 @@ static int pci_pm_poweroff_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
if (pci_has_legacy_pm_support(to_pci_dev(dev)))
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
@@ -1085,6 +1132,10 @@ static int pci_pm_restore_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ /* This is analogous to the pci_pm_resume_noirq() case. */
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ pm_runtime_set_active(dev);
+
if (pcibios_pm_ops.restore_noirq) {
error = pcibios_pm_ops.restore_noirq(dev);
if (error)
@@ -1108,12 +1159,6 @@ static int pci_pm_restore(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
- if (pcibios_pm_ops.restore) {
- error = pcibios_pm_ops.restore(dev);
- if (error)
- return error;
- }
-
/*
* This is necessary for the hibernation error path in which restore is
* called without restoring the standard config registers of the device.
@@ -1139,10 +1184,12 @@ static int pci_pm_restore(struct device *dev)
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define pci_pm_freeze NULL
+#define pci_pm_freeze_late NULL
#define pci_pm_freeze_noirq NULL
#define pci_pm_thaw NULL
#define pci_pm_thaw_noirq NULL
#define pci_pm_poweroff NULL
+#define pci_pm_poweroff_late NULL
#define pci_pm_poweroff_noirq NULL
#define pci_pm_restore NULL
#define pci_pm_restore_noirq NULL
@@ -1258,10 +1305,13 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
.complete = pci_pm_complete,
.suspend = pci_pm_suspend,
+ .suspend_late = pci_pm_suspend_late,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
+ .freeze_late = pci_pm_freeze_late,
.thaw = pci_pm_thaw,
.poweroff = pci_pm_poweroff,
+ .poweroff_late = pci_pm_poweroff_late,
.restore = pci_pm_restore,
.suspend_noirq = pci_pm_suspend_noirq,
.resume_noirq = pci_pm_resume_noirq,
@@ -1466,6 +1516,7 @@ struct bus_type pci_bus_type = {
.drv_groups = pci_drv_groups,
.pm = PCI_PM_OPS_PTR,
.num_vf = pci_bus_num_vf,
+ .force_dma = true,
};
EXPORT_SYMBOL(pci_bus_type);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 7e9e79575d93..a961a71d950f 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Purpose: Export the firmware instance and label associated with
* a pci device to sysfs
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 8e075ea2743e..06c7f0b85cd2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/pci/pci-sysfs.c
*
@@ -648,6 +649,33 @@ exit:
return count;
}
+static ssize_t sriov_offset_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->offset);
+}
+
+static ssize_t sriov_stride_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->stride);
+}
+
+static ssize_t sriov_vf_device_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%x\n", pdev->sriov->vf_device);
+}
+
static ssize_t sriov_drivers_autoprobe_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -676,6 +704,9 @@ static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
static struct device_attribute sriov_numvfs_attr =
__ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
sriov_numvfs_show, sriov_numvfs_store);
+static struct device_attribute sriov_offset_attr = __ATTR_RO(sriov_offset);
+static struct device_attribute sriov_stride_attr = __ATTR_RO(sriov_stride);
+static struct device_attribute sriov_vf_device_attr = __ATTR_RO(sriov_vf_device);
static struct device_attribute sriov_drivers_autoprobe_attr =
__ATTR(sriov_drivers_autoprobe, (S_IRUGO|S_IWUSR|S_IWGRP),
sriov_drivers_autoprobe_show, sriov_drivers_autoprobe_store);
@@ -1748,6 +1779,9 @@ static const struct attribute_group pci_dev_hp_attr_group = {
static struct attribute *sriov_dev_attrs[] = {
&sriov_totalvfs_attr.attr,
&sriov_numvfs_attr.attr,
+ &sriov_offset_attr.attr,
+ &sriov_stride_attr.attr,
+ &sriov_vf_device_attr.attr,
&sriov_drivers_autoprobe_attr.attr,
NULL,
};
@@ -1795,6 +1829,6 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
NULL,
};
-struct device_type pci_dev_type = {
+const struct device_type pci_dev_type = {
.groups = pci_dev_attr_groups,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6078dfc11b11..4a7c6864fdf4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2166,8 +2166,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
if (!pm_runtime_suspended(dev)
|| pci_target_state(pci_dev, wakeup) != pci_dev->current_state
- || platform_pci_need_resume(pci_dev)
- || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
+ || platform_pci_need_resume(pci_dev))
return false;
/*
@@ -2966,6 +2965,107 @@ bool pci_acs_path_enabled(struct pci_dev *start,
}
/**
+ * pci_rebar_find_pos - find position of resize ctrl reg for BAR
+ * @pdev: PCI device
+ * @bar: BAR to find
+ *
+ * Helper to find the position of the ctrl register for a BAR.
+ * Returns -ENOTSUPP if resizable BARs are not supported at all.
+ * Returns -ENOENT if no ctrl register for the BAR could be found.
+ */
+static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ if (!pos)
+ return -ENOTSUPP;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ int bar_idx;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
+ if (bar_idx == bar)
+ return pos;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * pci_rebar_get_possible_sizes - get possible sizes for BAR
+ * @pdev: PCI device
+ * @bar: BAR to query
+ *
+ * Get the possible sizes of a resizable BAR as bitmask defined in the spec
+ * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
+ */
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+{
+ int pos;
+ u32 cap;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return 0;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+ return (cap & PCI_REBAR_CAP_SIZES) >> 4;
+}
+
+/**
+ * pci_rebar_get_current_size - get the current size of a BAR
+ * @pdev: PCI device
+ * @bar: BAR to set size to
+ *
+ * Read the size of a BAR from the resizable BAR config.
+ * Returns size if found or negative error code.
+ */
+int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
+{
+ int pos;
+ u32 ctrl;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return pos;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
+}
+
+/**
+ * pci_rebar_set_size - set a new size for a BAR
+ * @pdev: PCI device
+ * @bar: BAR to set size to
+ * @size: new size as defined in the spec (0=1MB, 19=512GB)
+ *
+ * Set the new size of a BAR as defined in the spec.
+ * Returns zero if resizing was successful, error code otherwise.
+ */
+int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
+{
+ int pos;
+ u32 ctrl;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return pos;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
+ ctrl |= size << 8;
+ pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
+ return 0;
+}
+
+/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
* @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
@@ -3471,7 +3571,7 @@ EXPORT_SYMBOL(devm_pci_remap_cfgspace);
* All operations are managed and will be undone on driver detach.
*
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example:
+ * on failure. Usage example::
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_pci_remap_cfg_resource(&pdev->dev, res);
@@ -4146,35 +4246,6 @@ static void pci_dev_restore(struct pci_dev *dev)
}
/**
- * __pci_reset_function - reset a PCI device function
- * @dev: PCI device to reset
- *
- * Some devices allow an individual function to be reset without affecting
- * other functions in the same device. The PCI device must be responsive
- * to PCI config space in order to use this function.
- *
- * The device function is presumed to be unused when this function is called.
- * Resetting the device will make the contents of PCI configuration space
- * random, so any caller of this must be prepared to reinitialise the
- * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
- * etc.
- *
- * Returns 0 if the device function was successfully reset or negative if the
- * device doesn't support resetting a single function.
- */
-int __pci_reset_function(struct pci_dev *dev)
-{
- int ret;
-
- pci_dev_lock(dev);
- ret = __pci_reset_function_locked(dev);
- pci_dev_unlock(dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(__pci_reset_function);
-
-/**
* __pci_reset_function_locked - reset a PCI device function while holding
* the @dev mutex lock.
* @dev: PCI device to reset
@@ -4199,6 +4270,14 @@ int __pci_reset_function_locked(struct pci_dev *dev)
might_sleep();
+ /*
+ * A reset method returns -ENOTTY if it doesn't support this device
+ * and we should try the next method.
+ *
+ * If it returns 0 (success), we're finished. If it returns any
+ * other error, we're also finished: this indicates that further
+ * reset mechanisms might be broken on the device.
+ */
rc = pci_dev_specific_reset(dev, 0);
if (rc != -ENOTTY)
return rc;
@@ -4264,8 +4343,8 @@ int pci_probe_reset_function(struct pci_dev *dev)
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from __pci_reset_function in that it saves and restores device state
- * over the reset.
+ * from __pci_reset_function_locked() in that it saves and restores device state
+ * over the reset and takes the PCI device lock.
*
* Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
@@ -4300,7 +4379,7 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from __pci_reset_function() in that it saves and restores device state
+ * from __pci_reset_function_locked() in that it saves and restores device state
* over the reset. It also differs from pci_reset_function() in that it
* requires the PCI device lock to be held.
*
@@ -4356,6 +4435,10 @@ static bool pci_bus_resetable(struct pci_bus *bus)
{
struct pci_dev *dev;
+
+ if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+ return false;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
(dev->subordinate && !pci_bus_resetable(dev->subordinate)))
@@ -4420,6 +4503,10 @@ static bool pci_slot_resetable(struct pci_slot *slot)
{
struct pci_dev *dev;
+ if (slot->bus->self &&
+ (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+ return false;
+
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a6560c9baa52..f6b58b32a67c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
@@ -192,7 +193,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
}
extern const struct attribute_group *pci_dev_groups[];
extern const struct attribute_group *pcibus_groups[];
-extern struct device_type pci_dev_type;
+extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
@@ -263,6 +264,7 @@ struct pci_sriov {
u16 num_VFs; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
+ u16 vf_device; /* VF device ID */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
u8 max_VF_buses; /* max buses consumed by VFs */
@@ -366,4 +368,12 @@ int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
struct resource *res);
#endif
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
+int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
+int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
+static inline u64 pci_rebar_size_to_bytes(int size)
+{
+ return 1ULL << (size + 20);
+}
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 36e35ea8fde7..223e4c34c29a 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for PCI-Express PORT Driver
#
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 2cba67510dc8..09bd890875a3 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for PCI-Express Root Port Advanced Error Reporting Driver
#
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index d51e4a57b190..5449e5ce139d 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 01906576ab91..b2019440e882 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Access ACPI _OSC method
*
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 890efcc574cb..744805232155 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -390,7 +390,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
* If the error is reported by an end point, we think this
* error is related to the upstream link of the end point.
*/
- pci_walk_bus(dev->bus, cb, &result_data);
+ if (state == pci_channel_io_normal)
+ /*
+ * the error is non fatal so the bus is ok, just invoke
+ * the callback for the function that logged the error.
+ */
+ cb(dev, &result_data);
+ else
+ pci_walk_bus(dev->bus, cb, &result_data);
}
return result_data.result;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 1dfa10cc566b..9783e10da3a9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* File: drivers/pci/pcie/aspm.c
* Enabling PCIe link L0s/L1 state and Clock Power Management
@@ -450,24 +451,25 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
- /* Choose the greater of the two T_cmn_mode_rstr_time */
- val1 = (upreg->l1ss_cap >> 8) & 0xFF;
- val2 = (upreg->l1ss_cap >> 8) & 0xFF;
+ /* Choose the greater of the two Port Common_Mode_Restore_Times */
+ val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
+ val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
if (val1 > val2)
link->l1ss.ctl1 |= val1 << 8;
else
link->l1ss.ctl1 |= val2 << 8;
+
/*
* We currently use LTR L1.2 threshold to be fixed constant picked from
* Intel's coreboot.
*/
link->l1ss.ctl1 |= LTR_L1_2_THRESHOLD_BITS;
- /* Choose the greater of the two T_pwr_on */
- val1 = (upreg->l1ss_cap >> 19) & 0x1F;
- scale1 = (upreg->l1ss_cap >> 16) & 0x03;
- val2 = (dwreg->l1ss_cap >> 19) & 0x1F;
- scale2 = (dwreg->l1ss_cap >> 16) & 0x03;
+ /* Choose the greater of the two Port T_POWER_ON times */
+ val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
+ val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
if (calc_l1ss_pwron(link->pdev, scale1, val1) >
calc_l1ss_pwron(link->downstream, scale2, val2))
@@ -646,21 +648,26 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
if (enable_req & ASPM_STATE_L1_2_MASK) {
- /* Program T_pwr_on in both ports */
+ /* Program T_POWER_ON times in both ports */
pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2,
link->l1ss.ctl2);
pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2,
link->l1ss.ctl2);
- /* Program T_cmn_mode in parent */
+ /* Program Common_Mode_Restore_Time in upstream device */
pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- 0xFF00, link->l1ss.ctl1);
-
- /* Program LTR L1.2 threshold in both ports */
- pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1,
- 0xE3FF0000, link->l1ss.ctl1);
+ PCI_L1SS_CTL1_CM_RESTORE_TIME,
+ link->l1ss.ctl1);
+
+ /* Program LTR_L1.2_THRESHOLD time in both ports */
+ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ link->l1ss.ctl1);
pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- 0xE3FF0000, link->l1ss.ctl1);
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ link->l1ss.ctl1);
}
val = 0;
@@ -802,10 +809,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
/*
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
- * hierarchies.
+ * hierarchies. Note that some PCIe host implementations omit
+ * the root ports entirely, in which case a downstream port on
+ * a switch may become the root of the link state chain for all
+ * its subordinate endpoints.
*/
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
+ !pdev->bus->parent->self) {
link->root = link;
} else {
struct pcie_link_state *parent;
@@ -1060,7 +1071,8 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
}
EXPORT_SYMBOL(pci_disable_link_state);
-static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
+static int pcie_aspm_set_policy(const char *val,
+ const struct kernel_param *kp)
{
int i;
struct pcie_link_state *link;
@@ -1087,7 +1099,7 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
return 0;
}
-static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp)
+static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
{
int i, cnt = 0;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index fafdb165dd2e..df290aa58dce 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -226,6 +226,9 @@ static void pcie_pme_work_fn(struct work_struct *work)
break;
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
+ if (rtsta == (u32) ~0)
+ break;
+
if (rtsta & PCI_EXP_RTSTA_PME) {
/*
* Clear PME status of the port. If there are other
@@ -273,7 +276,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
spin_lock_irqsave(&data->lock, flags);
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
- if (!(rtsta & PCI_EXP_RTSTA_PME)) {
+ if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
return IRQ_NONE;
}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 4334fd5d7de9..a854bc569117 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* File: portdrv.h
* Purpose: PCI Express Port Bus Driver's Internal Data Structures
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 313a21df1692..a59210350c44 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* File: portdrv_core.c
* Purpose: PCI Express Port Bus Driver's Core Functions
@@ -43,134 +44,113 @@ static void release_pcie_device(struct device *dev)
kfree(to_pcie_device(dev));
}
-/**
- * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
- * for given port
- * @dev: PCI Express port to handle
- * @irqs: Array of interrupt vectors to populate
- * @mask: Bitmask of port capabilities returned by get_port_device_capability()
- *
- * Return value: 0 on success, error code on failure
+/*
+ * Fill in *pme, *aer, *dpc with the relevant Interrupt Message Numbers if
+ * services are enabled in "mask". Return the number of MSI/MSI-X vectors
+ * required to accommodate the largest Message Number.
*/
-static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
+static int pcie_message_numbers(struct pci_dev *dev, int mask,
+ u32 *pme, u32 *aer, u32 *dpc)
{
- int nr_entries, entry, nvec = 0;
+ u32 nvec = 0, pos, reg32;
+ u16 reg16;
/*
- * Allocate as many entries as the port wants, so that we can check
- * which of them will be useful. Moreover, if nr_entries is correctly
- * equal to the number of entries this port actually uses, we'll happily
- * go through without any tricks.
+ * The Interrupt Message Number indicates which vector is used, i.e.,
+ * the MSI-X table entry or the MSI offset between the base Message
+ * Data and the generated interrupt message. See PCIe r3.1, sec
+ * 7.8.2, 7.10.10, 7.31.2.
*/
- nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
- PCI_IRQ_MSIX | PCI_IRQ_MSI);
- if (nr_entries < 0)
- return nr_entries;
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
- u16 reg16;
-
- /*
- * Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event
- * interrupts (when both are implemented) always share the
- * same MSI or MSI-X vector, as indicated by the Interrupt
- * Message Number field in the PCI Express Capabilities
- * register".
- *
- * Per sec 7.8.2, "For MSI, the [Interrupt Message Number]
- * indicates the offset between the base Message Data and
- * the interrupt message that is generated."
- *
- * "For MSI-X, the [Interrupt Message Number] indicates
- * which MSI-X Table entry is used to generate the
- * interrupt message."
- */
pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
- entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
- if (entry >= nr_entries)
- goto out_free_irqs;
-
- irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, entry);
- irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, entry);
-
- nvec = max(nvec, entry + 1);
+ *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
+ nvec = *pme + 1;
}
if (mask & PCIE_PORT_SERVICE_AER) {
- u32 reg32, pos;
-
- /*
- * Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt
- * Message Number in the Root Error Status register
- * indicates which MSI/MSI-X vector is used for AER.
- *
- * "For MSI, the [Advanced Error Interrupt Message Number]
- * indicates the offset between the base Message Data and
- * the interrupt message that is generated."
- *
- * "For MSI-X, the [Advanced Error Interrupt Message
- * Number] indicates which MSI-X Table entry is used to
- * generate the interrupt message."
- */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
- entry = reg32 >> 27;
- if (entry >= nr_entries)
- goto out_free_irqs;
-
- irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, entry);
-
- nvec = max(nvec, entry + 1);
+ if (pos) {
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS,
+ &reg32);
+ *aer = (reg32 & PCI_ERR_ROOT_AER_IRQ) >> 27;
+ nvec = max(nvec, *aer + 1);
+ }
}
if (mask & PCIE_PORT_SERVICE_DPC) {
- u16 reg16, pos;
-
- /*
- * Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt
- * Message Number in the DPC Capability register indicates
- * which MSI/MSI-X vector is used for DPC.
- *
- * "For MSI, the [DPC Interrupt Message Number] indicates
- * the offset between the base Message Data and the
- * interrupt message that is generated."
- *
- * "For MSI-X, the [DPC Interrupt Message Number] indicates
- * which MSI-X Table entry is used to generate the
- * interrupt message."
- */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
- pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, &reg16);
- entry = reg16 & 0x1f;
- if (entry >= nr_entries)
- goto out_free_irqs;
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP,
+ &reg16);
+ *dpc = reg16 & PCI_EXP_DPC_IRQ;
+ nvec = max(nvec, *dpc + 1);
+ }
+ }
+
+ return nvec;
+}
- irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry);
+/**
+ * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
+ * for given port
+ * @dev: PCI Express port to handle
+ * @irqs: Array of interrupt vectors to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: 0 on success, error code on failure
+ */
+static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
+{
+ int nr_entries, nvec;
+ u32 pme = 0, aer = 0, dpc = 0;
- nvec = max(nvec, entry + 1);
+ /* Allocate the maximum possible number of MSI/MSI-X vectors */
+ nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nr_entries < 0)
+ return nr_entries;
+
+ /* See how many and which Interrupt Message Numbers we actually use */
+ nvec = pcie_message_numbers(dev, mask, &pme, &aer, &dpc);
+ if (nvec > nr_entries) {
+ pci_free_irq_vectors(dev);
+ return -EIO;
}
/*
- * If nvec is equal to the allocated number of entries, we can just use
- * what we have. Otherwise, the port has some extra entries not for the
- * services we know and we need to work around that.
+ * If we allocated more than we need, free them and reallocate fewer.
+ *
+ * Reallocating may change the specific vectors we get, so
+ * pci_irq_vector() must be done *after* the reallocation.
+ *
+ * If we're using MSI, hardware is *allowed* to change the Interrupt
+ * Message Numbers when we free and reallocate the vectors, but we
+ * assume it won't because we allocate enough vectors for the
+ * biggest Message Number we found.
*/
if (nvec != nr_entries) {
- /* Drop the temporary MSI-X setup */
pci_free_irq_vectors(dev);
- /* Now allocate the MSI-X vectors for real */
nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_entries < 0)
return nr_entries;
}
- return 0;
+ /* PME and hotplug share an MSI/MSI-X vector */
+ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
+ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, pme);
+ irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, pme);
+ }
-out_free_irqs:
- pci_free_irq_vectors(dev);
- return -EIO;
+ if (mask & PCIE_PORT_SERVICE_AER)
+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, aer);
+
+ if (mask & PCIE_PORT_SERVICE_DPC)
+ irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, dpc);
+
+ return 0;
}
/**
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 083276e03c38..ffbf4e723527 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* File: portdrv_pci.c
* Purpose: PCI Express Port Bus Driver
@@ -246,6 +247,7 @@ static struct pci_driver pcie_portdriver = {
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
+ .shutdown = pcie_portdrv_remove,
.err_handler = &pcie_portdrv_err_handler,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ff94b69738a8..14e0ea1ff38b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -959,7 +959,21 @@ static void pci_enable_crs(struct pci_dev *pdev)
PCI_EXP_RTCTL_CRSSVE);
}
+static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
+ unsigned int available_buses);
+
/*
+ * pci_scan_bridge_extend() - Scan buses behind a bridge
+ * @bus: Parent bus the bridge is on
+ * @dev: Bridge itself
+ * @max: Starting subordinate number of buses behind this bridge
+ * @available_buses: Total number of buses available for this bridge and
+ * the devices below. After the minimal bus space has
+ * been allocated the remaining buses will be
+ * distributed equally between hotplug-capable bridges.
+ * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
+ * that need to be reconfigured.
+ *
* If it's a bridge, configure it and scan the bus behind it.
* For CardBus bridges, we don't scan behind as the devices will
* be handled by the bridge driver itself.
@@ -969,7 +983,9 @@ static void pci_enable_crs(struct pci_dev *pdev)
* them, we proceed to assigning numbers to the remaining buses in
* order to avoid overlaps between old and new bus numbers.
*/
-int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
+static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
+ int max, unsigned int available_buses,
+ int pass)
{
struct pci_bus *child;
int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
@@ -1076,9 +1092,13 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
child = pci_add_new_bus(bus, dev, max+1);
if (!child)
goto out;
- pci_bus_insert_busn_res(child, max+1, 0xff);
+ pci_bus_insert_busn_res(child, max+1,
+ bus->busn_res.end);
}
max++;
+ if (available_buses)
+ available_buses--;
+
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
| ((unsigned int)(child->busn_res.start) << 8)
@@ -1100,7 +1120,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
if (!is_cardbus) {
child->bridge_ctl = bctl;
- max = pci_scan_child_bus(child);
+ max = pci_scan_child_bus_extend(child, available_buses);
} else {
/*
* For CardBus bridges, we leave 4 bus numbers
@@ -1168,6 +1188,28 @@ out:
return max;
}
+
+/*
+ * pci_scan_bridge() - Scan buses behind a bridge
+ * @bus: Parent bus the bridge is on
+ * @dev: Bridge itself
+ * @max: Starting subordinate number of buses behind this bridge
+ * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
+ * that need to be reconfigured.
+ *
+ * If it's a bridge, configure it and scan the bus behind it.
+ * For CardBus bridges, we don't scan behind as the devices will
+ * be handled by the bridge driver itself.
+ *
+ * We need to process bridges in two passes -- first we scan those
+ * already configured by the BIOS and after we are done with all of
+ * them, we proceed to assigning numbers to the remaining buses in
+ * order to avoid overlaps between old and new bus numbers.
+ */
+int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
+{
+ return pci_scan_bridge_extend(bus, dev, max, 0, pass);
+}
EXPORT_SYMBOL(pci_scan_bridge);
/*
@@ -2396,9 +2438,24 @@ void __weak pcibios_fixup_bus(struct pci_bus *bus)
/* nothing to do, expected to be removed in the future */
}
-unsigned int pci_scan_child_bus(struct pci_bus *bus)
+/**
+ * pci_scan_child_bus_extend() - Scan devices below a bus
+ * @bus: Bus to scan for devices
+ * @available_buses: Total number of buses available (%0 does not try to
+ * extend beyond the minimal)
+ *
+ * Scans devices below @bus including subordinate buses. Returns new
+ * subordinate number including all the found devices. Passing
+ * @available_buses causes the remaining bus space to be distributed
+ * equally between hotplug-capable bridges to allow future extension of the
+ * hierarchy.
+ */
+static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
+ unsigned int available_buses)
{
- unsigned int devfn, pass, max = bus->busn_res.start;
+ unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
+ unsigned int start = bus->busn_res.start;
+ unsigned int devfn, cmax, max = start;
struct pci_dev *dev;
dev_dbg(&bus->dev, "scanning bus\n");
@@ -2408,7 +2465,8 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
pci_scan_slot(bus, devfn);
/* Reserve buses for SR-IOV capability. */
- max += pci_iov_bus_range(bus);
+ used_buses = pci_iov_bus_range(bus);
+ max += used_buses;
/*
* After performing arch-dependent fixup of the bus, look behind
@@ -2420,19 +2478,73 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
bus->is_added = 1;
}
- for (pass = 0; pass < 2; pass++)
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (pci_is_bridge(dev))
- max = pci_scan_bridge(bus, dev, max, pass);
+ /*
+ * Calculate how many hotplug bridges and normal bridges there
+ * are on this bus. We will distribute the additional available
+ * buses between hotplug bridges.
+ */
+ for_each_pci_bridge(dev, bus) {
+ if (dev->is_hotplug_bridge)
+ hotplug_bridges++;
+ else
+ normal_bridges++;
+ }
+
+ /*
+ * Scan bridges that are already configured. We don't touch them
+ * unless they are misconfigured (which will be done in the second
+ * scan below).
+ */
+ for_each_pci_bridge(dev, bus) {
+ cmax = max;
+ max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
+ used_buses += cmax - max;
+ }
+
+ /* Scan bridges that need to be reconfigured */
+ for_each_pci_bridge(dev, bus) {
+ unsigned int buses = 0;
+
+ if (!hotplug_bridges && normal_bridges == 1) {
+ /*
+ * There is only one bridge on the bus (upstream
+ * port) so it gets all available buses which it
+ * can then distribute to the possible hotplug
+ * bridges below.
+ */
+ buses = available_buses;
+ } else if (dev->is_hotplug_bridge) {
+ /*
+ * Distribute the extra buses between hotplug
+ * bridges if any.
+ */
+ buses = available_buses / hotplug_bridges;
+ buses = min(buses, available_buses - used_buses);
}
+ cmax = max;
+ max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
+ used_buses += max - cmax;
+ }
+
/*
* Make sure a hotplug bridge has at least the minimum requested
- * number of buses.
+ * number of buses but allow it to grow up to the maximum available
+ * bus number of there is room.
*/
- if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
- if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
- max = bus->busn_res.start + pci_hotplug_bus_size - 1;
+ if (bus->self && bus->self->is_hotplug_bridge) {
+ used_buses = max_t(unsigned int, available_buses,
+ pci_hotplug_bus_size - 1);
+ if (max - start < used_buses) {
+ max = start + used_buses;
+
+ /* Do not allocate more buses than we have room left */
+ if (max > bus->busn_res.end)
+ max = bus->busn_res.end;
+
+ dev_dbg(&bus->dev, "%pR extended by %#02x\n",
+ &bus->busn_res, max - start);
+ }
}
/*
@@ -2445,6 +2557,18 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus)
dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
return max;
}
+
+/**
+ * pci_scan_child_bus() - Scan devices below a bus
+ * @bus: Bus to scan for devices
+ *
+ * Scans devices below @bus including subordinate buses. Returns new
+ * subordinate number including all the found devices.
+ */
+unsigned int pci_scan_child_bus(struct pci_bus *bus)
+{
+ return pci_scan_child_bus_extend(bus, 0);
+}
EXPORT_SYMBOL_GPL(pci_scan_child_bus);
/**
@@ -2737,3 +2861,38 @@ void __init pci_sort_breadthfirst(void)
{
bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
}
+
+int pci_hp_add_bridge(struct pci_dev *dev)
+{
+ struct pci_bus *parent = dev->bus;
+ int busnr, start = parent->busn_res.start;
+ unsigned int available_buses = 0;
+ int end = parent->busn_res.end;
+
+ for (busnr = start; busnr <= end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(parent), busnr))
+ break;
+ }
+ if (busnr-- > end) {
+ dev_err(&dev->dev, "No bus number available for hot-added bridge\n");
+ return -1;
+ }
+
+ /* Scan bridges that are already configured */
+ busnr = pci_scan_bridge(parent, dev, busnr, 0);
+
+ /*
+ * Distribute the available bus numbers between hotplug-capable
+ * bridges to make extending the chain later possible.
+ */
+ available_buses = end - busnr;
+
+ /* Scan bridges that need to be reconfigured */
+ pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
+
+ if (!dev->subordinate)
+ return -1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 098360d7ff81..58a662e3c4a6 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Procfs interface for the PCI bus.
*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a4d33619a7bb..10684b17d0bd 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file contains work-arounds for many known PCI hardware
* bugs. Devices present only on certain architectures (host
@@ -3365,6 +3366,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
+/*
+ * Root port on some Cavium CN8xxx chips do not successfully complete a bus
+ * reset when used with certain child devices. After the reset, config
+ * accesses to the child may fail.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
+
static void quirk_no_pm_reset(struct pci_dev *dev)
{
/*
@@ -4211,17 +4219,32 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
#endif
}
+static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
+{
+ /*
+ * Effectively selects all downstream ports for whole ThunderX 1
+ * family by 0xf800 mask (which represents 8 SoCs), while the lower
+ * bits of device ID are used to indicate which subdevice is used
+ * within the SoC.
+ */
+ return (pci_is_pcie(dev) &&
+ (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
+ ((dev->device & 0xf800) == 0xa000));
+}
+
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
- * Cavium devices matching this quirk do not perform peer-to-peer
- * with other functions, allowing masking out these bits as if they
- * were unimplemented in the ACS capability.
+ * Cavium root ports don't advertise an ACS capability. However,
+ * the RTL internally implements similar protection as if ACS had
+ * Request Redirection, Completion Redirection, Source Validation,
+ * and Upstream Forwarding features enabled. Assert that the
+ * hardware implements and enables equivalent ACS functionality for
+ * these flags.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+ acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
- if (!((dev->device >= 0xa000) && (dev->device <= 0xa0ff)))
+ if (!pci_quirk_cavium_acs_match(dev))
return -ENOTTY;
return acs_flags ? 0 : 1;
@@ -4799,3 +4822,11 @@ static void quirk_no_ats(struct pci_dev *pdev)
/* AMD Stoney platform GPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
#endif /* CONFIG_PCI_ATS */
+
+/* Freescale PCIe doesn't support MSI in RC mode */
+static void quirk_fsl_no_msi(struct pci_dev *pdev)
+{
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
+ pdev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 73a03d382590..2fa0dbde36b7 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,9 +19,9 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_pme_active(dev, false);
if (dev->is_added) {
+ device_release_driver(&dev->dev);
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
- device_release_driver(&dev->dev);
dev->is_added = 0;
}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index b6edb187d160..1f5e6af96c83 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -147,12 +147,8 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
return NULL;
rom = ioremap(start, *size);
- if (!rom) {
- /* restore enable if ioremap fails */
- if (!(res->flags & IORESOURCE_ROM_ENABLE))
- pci_disable_rom(pdev);
- return NULL;
- }
+ if (!rom)
+ goto err_ioremap;
/*
* Try to find the true size of the ROM since sometimes the PCI window
@@ -160,7 +156,18 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
* True size is important if the ROM is going to be copied.
*/
*size = pci_get_rom_size(pdev, rom, *size);
+ if (!*size)
+ goto invalid_rom;
+
return rom;
+
+invalid_rom:
+ iounmap(rom);
+err_ioremap:
+ /* restore enable if ioremap fails */
+ if (!(res->flags & IORESOURCE_ROM_ENABLE))
+ pci_disable_rom(pdev);
+ return NULL;
}
EXPORT_SYMBOL(pci_map_rom);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 958da7db9033..b1ad466199ad 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1518,13 +1518,16 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
break;
}
}
+
+#define PCI_RES_TYPE_MASK \
+ (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
+ IORESOURCE_MEM_64)
+
static void pci_bridge_release_resources(struct pci_bus *bus,
unsigned long type)
{
struct pci_dev *dev = bus->self;
struct resource *r;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
unsigned old_flags = 0;
struct resource *b_res;
int idx = 1;
@@ -1567,7 +1570,7 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
*/
release_child_resources(r);
if (!release_resource(r)) {
- type = old_flags = r->flags & type_mask;
+ type = old_flags = r->flags & PCI_RES_TYPE_MASK;
dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n",
PCI_BRIDGE_RESOURCES + idx, r);
/* keep the old size */
@@ -1758,8 +1761,6 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
enum release_type rel_type = leaf_only;
LIST_HEAD(fail_head);
struct pci_dev_resource *fail_res;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
int pci_try_num = 1;
enum enable_type enable_local;
@@ -1818,7 +1819,7 @@ again:
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & type_mask,
+ fail_res->flags & PCI_RES_TYPE_MASK,
rel_type);
/* restore size and flags */
@@ -1853,6 +1854,175 @@ void __init pci_assign_unassigned_resources(void)
}
}
+static void extend_bridge_window(struct pci_dev *bridge, struct resource *res,
+ struct list_head *add_list, resource_size_t available)
+{
+ struct pci_dev_resource *dev_res;
+
+ if (res->parent)
+ return;
+
+ if (resource_size(res) >= available)
+ return;
+
+ dev_res = res_to_dev_res(add_list, res);
+ if (!dev_res)
+ return;
+
+ /* Is there room to extend the window? */
+ if (available - resource_size(res) <= dev_res->add_size)
+ return;
+
+ dev_res->add_size = available - resource_size(res);
+ dev_dbg(&bridge->dev, "bridge window %pR extended by %pa\n", res,
+ &dev_res->add_size);
+}
+
+static void pci_bus_distribute_available_resources(struct pci_bus *bus,
+ struct list_head *add_list, resource_size_t available_io,
+ resource_size_t available_mmio, resource_size_t available_mmio_pref)
+{
+ resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref;
+ unsigned int normal_bridges = 0, hotplug_bridges = 0;
+ struct resource *io_res, *mmio_res, *mmio_pref_res;
+ struct pci_dev *dev, *bridge = bus->self;
+
+ io_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ mmio_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ mmio_pref_res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+
+ /*
+ * Update additional resource list (add_list) to fill all the
+ * extra resource space available for this port except the space
+ * calculated in __pci_bus_size_bridges() which covers all the
+ * devices currently connected to the port and below.
+ */
+ extend_bridge_window(bridge, io_res, add_list, available_io);
+ extend_bridge_window(bridge, mmio_res, add_list, available_mmio);
+ extend_bridge_window(bridge, mmio_pref_res, add_list,
+ available_mmio_pref);
+
+ /*
+ * Calculate the total amount of extra resource space we can
+ * pass to bridges below this one. This is basically the
+ * extra space reduced by the minimal required space for the
+ * non-hotplug bridges.
+ */
+ remaining_io = available_io;
+ remaining_mmio = available_mmio;
+ remaining_mmio_pref = available_mmio_pref;
+
+ /*
+ * Calculate how many hotplug bridges and normal bridges there
+ * are on this bus. We will distribute the additional available
+ * resources between hotplug bridges.
+ */
+ for_each_pci_bridge(dev, bus) {
+ if (dev->is_hotplug_bridge)
+ hotplug_bridges++;
+ else
+ normal_bridges++;
+ }
+
+ for_each_pci_bridge(dev, bus) {
+ const struct resource *res;
+
+ if (dev->is_hotplug_bridge)
+ continue;
+
+ /*
+ * Reduce the available resource space by what the
+ * bridge and devices below it occupy.
+ */
+ res = &dev->resource[PCI_BRIDGE_RESOURCES + 0];
+ if (!res->parent && available_io > resource_size(res))
+ remaining_io -= resource_size(res);
+
+ res = &dev->resource[PCI_BRIDGE_RESOURCES + 1];
+ if (!res->parent && available_mmio > resource_size(res))
+ remaining_mmio -= resource_size(res);
+
+ res = &dev->resource[PCI_BRIDGE_RESOURCES + 2];
+ if (!res->parent && available_mmio_pref > resource_size(res))
+ remaining_mmio_pref -= resource_size(res);
+ }
+
+ /*
+ * Go over devices on this bus and distribute the remaining
+ * resource space between hotplug bridges.
+ */
+ for_each_pci_bridge(dev, bus) {
+ struct pci_bus *b;
+
+ b = dev->subordinate;
+ if (!b)
+ continue;
+
+ if (!hotplug_bridges && normal_bridges == 1) {
+ /*
+ * There is only one bridge on the bus (upstream
+ * port) so it gets all available resources
+ * which it can then distribute to the possible
+ * hotplug bridges below.
+ */
+ pci_bus_distribute_available_resources(b, add_list,
+ available_io, available_mmio,
+ available_mmio_pref);
+ } else if (dev->is_hotplug_bridge) {
+ resource_size_t align, io, mmio, mmio_pref;
+
+ /*
+ * Distribute available extra resources equally
+ * between hotplug-capable downstream ports
+ * taking alignment into account.
+ *
+ * Here hotplug_bridges is always != 0.
+ */
+ align = pci_resource_alignment(bridge, io_res);
+ io = div64_ul(available_io, hotplug_bridges);
+ io = min(ALIGN(io, align), remaining_io);
+ remaining_io -= io;
+
+ align = pci_resource_alignment(bridge, mmio_res);
+ mmio = div64_ul(available_mmio, hotplug_bridges);
+ mmio = min(ALIGN(mmio, align), remaining_mmio);
+ remaining_mmio -= mmio;
+
+ align = pci_resource_alignment(bridge, mmio_pref_res);
+ mmio_pref = div64_ul(available_mmio_pref,
+ hotplug_bridges);
+ mmio_pref = min(ALIGN(mmio_pref, align),
+ remaining_mmio_pref);
+ remaining_mmio_pref -= mmio_pref;
+
+ pci_bus_distribute_available_resources(b, add_list, io,
+ mmio, mmio_pref);
+ }
+ }
+}
+
+static void
+pci_bridge_distribute_available_resources(struct pci_dev *bridge,
+ struct list_head *add_list)
+{
+ resource_size_t available_io, available_mmio, available_mmio_pref;
+ const struct resource *res;
+
+ if (!bridge->is_hotplug_bridge)
+ return;
+
+ /* Take the initial extra resources from the hotplug port */
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ available_io = resource_size(res);
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ available_mmio = resource_size(res);
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ available_mmio_pref = resource_size(res);
+
+ pci_bus_distribute_available_resources(bridge->subordinate,
+ add_list, available_io, available_mmio, available_mmio_pref);
+}
+
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
{
struct pci_bus *parent = bridge->subordinate;
@@ -1862,11 +2032,17 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
LIST_HEAD(fail_head);
struct pci_dev_resource *fail_res;
int retval;
- unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
- IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
again:
__pci_bus_size_bridges(parent, &add_list);
+
+ /*
+ * Distribute remaining resources (if any) equally between
+ * hotplug bridges below. This makes it possible to extend the
+ * hierarchy later without running out of resources.
+ */
+ pci_bridge_distribute_available_resources(bridge, &add_list);
+
__pci_bridge_assign_resources(bridge, &add_list, &fail_head);
BUG_ON(!list_empty(&add_list));
tried_times++;
@@ -1889,7 +2065,7 @@ again:
*/
list_for_each_entry(fail_res, &fail_head, list)
pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & type_mask,
+ fail_res->flags & PCI_RES_TYPE_MASK,
whole_subtree);
/* restore size and flags */
@@ -1914,6 +2090,104 @@ enable_all:
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
+int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+{
+ struct pci_dev_resource *dev_res;
+ struct pci_dev *next;
+ LIST_HEAD(saved);
+ LIST_HEAD(added);
+ LIST_HEAD(failed);
+ unsigned int i;
+ int ret;
+
+ /* Walk to the root hub, releasing bridge BARs when possible */
+ next = bridge;
+ do {
+ bridge = next;
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
+ i++) {
+ struct resource *res = &bridge->resource[i];
+
+ if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
+ continue;
+
+ /* Ignore BARs which are still in use */
+ if (res->child)
+ continue;
+
+ ret = add_to_list(&saved, bridge, res, 0, 0);
+ if (ret)
+ goto cleanup;
+
+ dev_info(&bridge->dev, "BAR %d: releasing %pR\n",
+ i, res);
+
+ if (res->parent)
+ release_resource(res);
+ res->start = 0;
+ res->end = 0;
+ break;
+ }
+ if (i == PCI_BRIDGE_RESOURCE_END)
+ break;
+
+ next = bridge->bus ? bridge->bus->self : NULL;
+ } while (next);
+
+ if (list_empty(&saved))
+ return -ENOENT;
+
+ __pci_bus_size_bridges(bridge->subordinate, &added);
+ __pci_bridge_assign_resources(bridge, &added, &failed);
+ BUG_ON(!list_empty(&added));
+
+ if (!list_empty(&failed)) {
+ ret = -ENOSPC;
+ goto cleanup;
+ }
+
+ list_for_each_entry(dev_res, &saved, list) {
+ /* Skip the bridge we just assigned resources for. */
+ if (bridge == dev_res->dev)
+ continue;
+
+ bridge = dev_res->dev;
+ pci_setup_bridge(bridge->subordinate);
+ }
+
+ free_list(&saved);
+ return 0;
+
+cleanup:
+ /* restore size and flags */
+ list_for_each_entry(dev_res, &failed, list) {
+ struct resource *res = dev_res->res;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+ }
+ free_list(&failed);
+
+ /* Revert to the old configuration */
+ list_for_each_entry(dev_res, &saved, list) {
+ struct resource *res = dev_res->res;
+
+ bridge = dev_res->dev;
+ i = res - bridge->resource;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+
+ pci_claim_resource(bridge, i);
+ pci_setup_bridge(bridge->subordinate);
+ }
+ free_list(&saved);
+
+ return ret;
+}
+
void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1921,10 +2195,9 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
want additional resources */
down_read(&pci_bus_sem);
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (pci_is_bridge(dev) && pci_has_subordinate(dev))
- __pci_bus_size_bridges(dev->subordinate,
- &add_list);
+ for_each_pci_bridge(dev, bus)
+ if (pci_has_subordinate(dev))
+ __pci_bus_size_bridges(dev->subordinate, &add_list);
up_read(&pci_bus_sem);
__pci_bus_assign_resources(bus, &add_list, NULL);
BUG_ON(!list_empty(&add_list));
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index e576e1a8d978..e815111f3f81 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/pci/setup-res.c
*
@@ -396,6 +397,64 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
return 0;
}
+void pci_release_resource(struct pci_dev *dev, int resno)
+{
+ struct resource *res = dev->resource + resno;
+
+ dev_info(&dev->dev, "BAR %d: releasing %pR\n", resno, res);
+ release_resource(res);
+ res->end = resource_size(res) - 1;
+ res->start = 0;
+ res->flags |= IORESOURCE_UNSET;
+}
+EXPORT_SYMBOL(pci_release_resource);
+
+int pci_resize_resource(struct pci_dev *dev, int resno, int size)
+{
+ struct resource *res = dev->resource + resno;
+ int old, ret;
+ u32 sizes;
+ u16 cmd;
+
+ /* Make sure the resource isn't assigned before resizing it. */
+ if (!(res->flags & IORESOURCE_UNSET))
+ return -EBUSY;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_MEMORY)
+ return -EBUSY;
+
+ sizes = pci_rebar_get_possible_sizes(dev, resno);
+ if (!sizes)
+ return -ENOTSUPP;
+
+ if (!(sizes & BIT(size)))
+ return -EINVAL;
+
+ old = pci_rebar_get_current_size(dev, resno);
+ if (old < 0)
+ return old;
+
+ ret = pci_rebar_set_size(dev, resno, size);
+ if (ret)
+ return ret;
+
+ res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
+
+ /* Check if the new config works by trying to assign everything. */
+ ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
+ if (ret)
+ goto error_resize;
+
+ return 0;
+
+error_resize:
+ pci_rebar_set_size(dev, resno, old);
+ res->end = res->start + pci_rebar_size_to_bytes(old) - 1;
+ return ret;
+}
+EXPORT_SYMBOL(pci_resize_resource);
+
int pci_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index af81b2dec42e..730cc897b94d 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/switchtec.h>
#include <linux/switchtec_ioctl.h>
#include <linux/interrupt.h>
@@ -20,8 +21,6 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
-#include <linux/pci.h>
-#include <linux/cdev.h>
#include <linux/wait.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
@@ -34,265 +33,10 @@ module_param(max_devices, int, 0644);
MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
static dev_t switchtec_devt;
-static struct class *switchtec_class;
static DEFINE_IDA(switchtec_minor_ida);
-#define MICROSEMI_VENDOR_ID 0x11f8
-#define MICROSEMI_NTB_CLASSCODE 0x068000
-#define MICROSEMI_MGMT_CLASSCODE 0x058000
-
-#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
-#define SWITCHTEC_MAX_PFF_CSR 48
-
-#define SWITCHTEC_EVENT_OCCURRED BIT(0)
-#define SWITCHTEC_EVENT_CLEAR BIT(0)
-#define SWITCHTEC_EVENT_EN_LOG BIT(1)
-#define SWITCHTEC_EVENT_EN_CLI BIT(2)
-#define SWITCHTEC_EVENT_EN_IRQ BIT(3)
-#define SWITCHTEC_EVENT_FATAL BIT(4)
-
-enum {
- SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
- SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
- SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800,
- SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000,
- SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
- SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000,
- SWITCHTEC_GAS_NTB_OFFSET = 0x10000,
- SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
-};
-
-struct mrpc_regs {
- u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
- u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
- u32 cmd;
- u32 status;
- u32 ret_value;
-} __packed;
-
-enum mrpc_status {
- SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
- SWITCHTEC_MRPC_STATUS_DONE = 2,
- SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
- SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
-};
-
-struct sw_event_regs {
- u64 event_report_ctrl;
- u64 reserved1;
- u64 part_event_bitmap;
- u64 reserved2;
- u32 global_summary;
- u32 reserved3[3];
- u32 stack_error_event_hdr;
- u32 stack_error_event_data;
- u32 reserved4[4];
- u32 ppu_error_event_hdr;
- u32 ppu_error_event_data;
- u32 reserved5[4];
- u32 isp_error_event_hdr;
- u32 isp_error_event_data;
- u32 reserved6[4];
- u32 sys_reset_event_hdr;
- u32 reserved7[5];
- u32 fw_exception_hdr;
- u32 reserved8[5];
- u32 fw_nmi_hdr;
- u32 reserved9[5];
- u32 fw_non_fatal_hdr;
- u32 reserved10[5];
- u32 fw_fatal_hdr;
- u32 reserved11[5];
- u32 twi_mrpc_comp_hdr;
- u32 twi_mrpc_comp_data;
- u32 reserved12[4];
- u32 twi_mrpc_comp_async_hdr;
- u32 twi_mrpc_comp_async_data;
- u32 reserved13[4];
- u32 cli_mrpc_comp_hdr;
- u32 cli_mrpc_comp_data;
- u32 reserved14[4];
- u32 cli_mrpc_comp_async_hdr;
- u32 cli_mrpc_comp_async_data;
- u32 reserved15[4];
- u32 gpio_interrupt_hdr;
- u32 gpio_interrupt_data;
- u32 reserved16[4];
-} __packed;
-
-enum {
- SWITCHTEC_CFG0_RUNNING = 0x04,
- SWITCHTEC_CFG1_RUNNING = 0x05,
- SWITCHTEC_IMG0_RUNNING = 0x03,
- SWITCHTEC_IMG1_RUNNING = 0x07,
-};
-
-struct sys_info_regs {
- u32 device_id;
- u32 device_version;
- u32 firmware_version;
- u32 reserved1;
- u32 vendor_table_revision;
- u32 table_format_version;
- u32 partition_id;
- u32 cfg_file_fmt_version;
- u16 cfg_running;
- u16 img_running;
- u32 reserved2[57];
- char vendor_id[8];
- char product_id[16];
- char product_revision[4];
- char component_vendor[8];
- u16 component_id;
- u8 component_revision;
-} __packed;
-
-struct flash_info_regs {
- u32 flash_part_map_upd_idx;
-
- struct active_partition_info {
- u32 address;
- u32 build_version;
- u32 build_string;
- } active_img;
-
- struct active_partition_info active_cfg;
- struct active_partition_info inactive_img;
- struct active_partition_info inactive_cfg;
-
- u32 flash_length;
-
- struct partition_info {
- u32 address;
- u32 length;
- } cfg0;
-
- struct partition_info cfg1;
- struct partition_info img0;
- struct partition_info img1;
- struct partition_info nvlog;
- struct partition_info vendor[8];
-};
-
-struct ntb_info_regs {
- u8 partition_count;
- u8 partition_id;
- u16 reserved1;
- u64 ep_map;
- u16 requester_id;
-} __packed;
-
-struct part_cfg_regs {
- u32 status;
- u32 state;
- u32 port_cnt;
- u32 usp_port_mode;
- u32 usp_pff_inst_id;
- u32 vep_pff_inst_id;
- u32 dsp_pff_inst_id[47];
- u32 reserved1[11];
- u16 vep_vector_number;
- u16 usp_vector_number;
- u32 port_event_bitmap;
- u32 reserved2[3];
- u32 part_event_summary;
- u32 reserved3[3];
- u32 part_reset_hdr;
- u32 part_reset_data[5];
- u32 mrpc_comp_hdr;
- u32 mrpc_comp_data[5];
- u32 mrpc_comp_async_hdr;
- u32 mrpc_comp_async_data[5];
- u32 dyn_binding_hdr;
- u32 dyn_binding_data[5];
- u32 reserved4[159];
-} __packed;
-
-enum {
- SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
- SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
- SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
- SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
-};
-
-struct pff_csr_regs {
- u16 vendor_id;
- u16 device_id;
- u32 pci_cfg_header[15];
- u32 pci_cap_region[48];
- u32 pcie_cap_region[448];
- u32 indirect_gas_window[128];
- u32 indirect_gas_window_off;
- u32 reserved[127];
- u32 pff_event_summary;
- u32 reserved2[3];
- u32 aer_in_p2p_hdr;
- u32 aer_in_p2p_data[5];
- u32 aer_in_vep_hdr;
- u32 aer_in_vep_data[5];
- u32 dpc_hdr;
- u32 dpc_data[5];
- u32 cts_hdr;
- u32 cts_data[5];
- u32 reserved3[6];
- u32 hotplug_hdr;
- u32 hotplug_data[5];
- u32 ier_hdr;
- u32 ier_data[5];
- u32 threshold_hdr;
- u32 threshold_data[5];
- u32 power_mgmt_hdr;
- u32 power_mgmt_data[5];
- u32 tlp_throttling_hdr;
- u32 tlp_throttling_data[5];
- u32 force_speed_hdr;
- u32 force_speed_data[5];
- u32 credit_timeout_hdr;
- u32 credit_timeout_data[5];
- u32 link_state_hdr;
- u32 link_state_data[5];
- u32 reserved4[174];
-} __packed;
-
-struct switchtec_dev {
- struct pci_dev *pdev;
- struct device dev;
- struct cdev cdev;
-
- int partition;
- int partition_count;
- int pff_csr_count;
- char pff_local[SWITCHTEC_MAX_PFF_CSR];
-
- void __iomem *mmio;
- struct mrpc_regs __iomem *mmio_mrpc;
- struct sw_event_regs __iomem *mmio_sw_event;
- struct sys_info_regs __iomem *mmio_sys_info;
- struct flash_info_regs __iomem *mmio_flash_info;
- struct ntb_info_regs __iomem *mmio_ntb;
- struct part_cfg_regs __iomem *mmio_part_cfg;
- struct part_cfg_regs __iomem *mmio_part_cfg_all;
- struct pff_csr_regs __iomem *mmio_pff_csr;
-
- /*
- * The mrpc mutex must be held when accessing the other
- * mrpc_ fields, alive flag and stuser->state field
- */
- struct mutex mrpc_mutex;
- struct list_head mrpc_queue;
- int mrpc_busy;
- struct work_struct mrpc_work;
- struct delayed_work mrpc_timeout;
- bool alive;
-
- wait_queue_head_t event_wq;
- atomic_t event_cnt;
-};
-
-static struct switchtec_dev *to_stdev(struct device *dev)
-{
- return container_of(dev, struct switchtec_dev, dev);
-}
+struct class *switchtec_class;
+EXPORT_SYMBOL_GPL(switchtec_class);
enum mrpc_state {
MRPC_IDLE = 0,
@@ -943,7 +687,7 @@ static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
#define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
#define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
-const struct event_reg {
+static const struct event_reg {
size_t offset;
u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
size_t offset, int index);
@@ -1234,6 +978,49 @@ static const struct file_operations switchtec_fops = {
.compat_ioctl = switchtec_dev_ioctl,
};
+static void link_event_work(struct work_struct *work)
+{
+ struct switchtec_dev *stdev;
+
+ stdev = container_of(work, struct switchtec_dev, link_event_work);
+
+ if (stdev->link_notifier)
+ stdev->link_notifier(stdev);
+}
+
+static void check_link_state_events(struct switchtec_dev *stdev)
+{
+ int idx;
+ u32 reg;
+ int count;
+ int occurred = 0;
+
+ for (idx = 0; idx < stdev->pff_csr_count; idx++) {
+ reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
+ dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
+ count = (reg >> 5) & 0xFF;
+
+ if (count != stdev->link_event_count[idx]) {
+ occurred = 1;
+ stdev->link_event_count[idx] = count;
+ }
+ }
+
+ if (occurred)
+ schedule_work(&stdev->link_event_work);
+}
+
+static void enable_link_state_events(struct switchtec_dev *stdev)
+{
+ int idx;
+
+ for (idx = 0; idx < stdev->pff_csr_count; idx++) {
+ iowrite32(SWITCHTEC_EVENT_CLEAR |
+ SWITCHTEC_EVENT_EN_IRQ,
+ &stdev->mmio_pff_csr[idx].link_state_hdr);
+ }
+}
+
static void stdev_release(struct device *dev)
{
struct switchtec_dev *stdev = to_stdev(dev);
@@ -1286,6 +1073,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
stdev->mrpc_busy = 0;
INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
+ INIT_WORK(&stdev->link_event_work, link_event_work);
init_waitqueue_head(&stdev->event_wq);
atomic_set(&stdev->event_cnt, 0);
@@ -1329,6 +1117,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0;
+ if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE)
+ return 0;
+
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
iowrite32(hdr, hdr_reg);
@@ -1348,6 +1139,7 @@ static int mask_all_events(struct switchtec_dev *stdev, int eid)
for (idx = 0; idx < stdev->pff_csr_count; idx++) {
if (!stdev->pff_local[idx])
continue;
+
count += mask_event(stdev, eid, idx);
}
} else {
@@ -1372,6 +1164,8 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
}
+ check_link_state_events(stdev);
+
for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
event_count += mask_all_events(stdev, eid);
@@ -1481,6 +1275,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
struct switchtec_dev *stdev;
int rc;
+ if (pdev->class == MICROSEMI_NTB_CLASSCODE)
+ request_module_nowait("ntb_hw_switchtec");
+
stdev = stdev_create(pdev);
if (IS_ERR(stdev))
return PTR_ERR(stdev);
@@ -1498,6 +1295,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_part_cfg->mrpc_comp_hdr);
+ enable_link_state_events(stdev);
rc = cdev_device_add(&stdev->cdev, &stdev->dev);
if (rc)
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 9bf993e1f71e..83efa001c2e7 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* pci_syscall.c
*
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 27e94b30cf96..e7dae16b9a43 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel pcmcia subsystem (c/o David Hinds)
#
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
index 0802e0bc7d0c..16f573173471 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.c
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -263,12 +263,12 @@ static int bcm63xx_pcmcia_get_status(struct pcmcia_socket *sock,
/*
* socket polling timer callback
*/
-static void bcm63xx_pcmcia_poll(unsigned long data)
+static void bcm63xx_pcmcia_poll(struct timer_list *t)
{
struct bcm63xx_pcmcia_socket *skt;
unsigned int stat, events;
- skt = (struct bcm63xx_pcmcia_socket *)data;
+ skt = from_timer(skt, t, timer);
spin_lock_bh(&skt->lock);
@@ -392,7 +392,7 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
sock->map_size = resource_size(skt->common_res);
/* initialize polling timer */
- setup_timer(&skt->timer, bcm63xx_pcmcia_poll, (unsigned long)skt);
+ timer_setup(&skt->timer, bcm63xx_pcmcia_poll, 0);
/* initialize pcmcia control register, drive VS[12] to 0,
* leave CB IDSEL to the old value since it is set by the PCI
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.h b/drivers/pcmcia/bcm63xx_pcmcia.h
index ed957399d863..2122c59a1c4a 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.h
+++ b/drivers/pcmcia/bcm63xx_pcmcia.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BCM63XX_PCMCIA_H_
#define BCM63XX_PCMCIA_H_
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index 8b0923fd76c6..00a296d431ba 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -86,9 +86,9 @@ static int bfin_cf_ss_init(struct pcmcia_socket *s)
}
/* the timer is primarily to kick this socket's pccardd */
-static void bfin_cf_timer(unsigned long _cf)
+static void bfin_cf_timer(struct timer_list *t)
{
- struct bfin_cf_socket *cf = (void *)_cf;
+ struct bfin_cf_socket *cf = from_timer(cf, t, timer);
unsigned short present = bfin_cf_present(cf->cd_pfx);
if (present != cf->present) {
@@ -227,7 +227,7 @@ static int bfin_cf_probe(struct platform_device *pdev)
cf->cd_pfx = cd_pfx;
- setup_timer(&cf->timer, bfin_cf_timer, (unsigned long)cf);
+ timer_setup(&cf->timer, bfin_cf_timer, 0);
cf->pdev = pdev;
platform_set_drvdata(pdev, cf);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 4fe4cc4ae19a..5c0170597037 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -77,9 +77,8 @@ int __ref cb_alloc(struct pcmcia_socket *s)
max = bus->busn_res.start;
for (pass = 0; pass < 2; pass++)
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (pci_is_bridge(dev))
- max = pci_scan_bridge(bus, dev, max, pass);
+ for_each_pci_bridge(dev, bus)
+ max = pci_scan_bridge(bus, dev, max, pass);
/*
* Size all resources below the CardBus controller.
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 55ef7d1fd8da..102646fedb56 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -1599,7 +1599,7 @@ static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
}
-struct bin_attribute pccard_cis_attr = {
+const struct bin_attribute pccard_cis_attr = {
.attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
.size = 0x200,
.read = pccard_show_cis,
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index e86cd6b31773..6765beadea95 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -152,7 +152,7 @@ void pcmcia_cleanup_irq(struct pcmcia_socket *s);
int pcmcia_setup_irq(struct pcmcia_device *p_dev);
/* cistpl.c */
-extern struct bin_attribute pccard_cis_attr;
+extern const struct bin_attribute pccard_cis_attr;
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr,
u_int addr, u_int len, void *ptr);
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index c6fe2a4a7a6a..9671ded549f0 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -79,9 +79,9 @@ static int electra_cf_ss_init(struct pcmcia_socket *s)
}
/* the timer is primarily to kick this socket's pccardd */
-static void electra_cf_timer(unsigned long _cf)
+static void electra_cf_timer(struct timer_list *t)
{
- struct electra_cf_socket *cf = (void *) _cf;
+ struct electra_cf_socket *cf = from_timer(cf, t, timer);
int present = electra_cf_present(cf);
if (present != cf->present) {
@@ -95,7 +95,9 @@ static void electra_cf_timer(unsigned long _cf)
static irqreturn_t electra_cf_irq(int irq, void *_cf)
{
- electra_cf_timer((unsigned long)_cf);
+ struct electra_cf_socket *cf = _cf;
+
+ electra_cf_timer(&cf->timer);
return IRQ_HANDLED;
}
@@ -206,7 +208,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
if (!cf)
return -ENOMEM;
- setup_timer(&cf->timer, electra_cf_timer, (unsigned long)cf);
+ timer_setup(&cf->timer, electra_cf_timer, 0);
cf->irq = 0;
cf->ofdev = ofdev;
@@ -305,7 +307,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
cf->mem_phys, io.start, cf->irq);
cf->active = 1;
- electra_cf_timer((unsigned long)cf);
+ electra_cf_timer(&cf->timer);
return 0;
fail3:
diff --git a/drivers/pcmcia/i82092aa.h b/drivers/pcmcia/i82092aa.h
index 8836d393ad02..fabe08c3e33d 100644
--- a/drivers/pcmcia/i82092aa.h
+++ b/drivers/pcmcia/i82092aa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INCLUDE_GUARD_i82092aa_H_
#define _INCLUDE_GUARD_i82092aa_H_
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index fb38cc01859f..891ccea2cccb 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -875,7 +875,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev)
return IRQ_RETVAL(handled);
} /* pcic_interrupt */
-static void pcic_interrupt_wrapper(u_long data)
+static void pcic_interrupt_wrapper(struct timer_list *unused)
{
pcic_interrupt(0, NULL);
poll_timer.expires = jiffies + poll_interval;
@@ -1289,9 +1289,7 @@ static int __init init_i82365(void)
/* Finally, schedule a polling interrupt */
if (poll_interval != 0) {
- poll_timer.function = pcic_interrupt_wrapper;
- poll_timer.data = 0;
- init_timer(&poll_timer);
+ timer_setup(&poll_timer, pcic_interrupt_wrapper, 0);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 70b089430fcc..9a4940e56e2f 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -380,11 +380,10 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
return IRQ_RETVAL(handled);
} /* pcc_interrupt */
-static void pcc_interrupt_wrapper(u_long data)
+static void pcc_interrupt_wrapper(struct timer_list *unused)
{
pr_debug("m32r_cfc: pcc_interrupt_wrapper:\n");
pcc_interrupt(0, NULL);
- init_timer(&poll_timer);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
@@ -758,9 +757,7 @@ static int __init init_m32r_pcc(void)
/* Finally, schedule a polling interrupt */
if (poll_interval != 0) {
- poll_timer.function = pcc_interrupt_wrapper;
- poll_timer.data = 0;
- init_timer(&poll_timer);
+ timer_setup(&poll_timer, pcc_interrupt_wrapper, 0);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
diff --git a/drivers/pcmcia/m32r_cfc.h b/drivers/pcmcia/m32r_cfc.h
index f558e1adf954..05fec98617d0 100644
--- a/drivers/pcmcia/m32r_cfc.h
+++ b/drivers/pcmcia/m32r_cfc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2001 by Hiroyuki Kondo
*/
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index e50bbf826188..c2239a7e383a 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -386,10 +386,9 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
return IRQ_RETVAL(handled);
} /* pcc_interrupt */
-static void pcc_interrupt_wrapper(u_long data)
+static void pcc_interrupt_wrapper(struct timer_list *unused)
{
pcc_interrupt(0, NULL);
- init_timer(&poll_timer);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
@@ -729,9 +728,7 @@ static int __init init_m32r_pcc(void)
/* Finally, schedule a polling interrupt */
if (poll_interval != 0) {
- poll_timer.function = pcc_interrupt_wrapper;
- poll_timer.data = 0;
- init_timer(&poll_timer);
+ timer_setup(&poll_timer, pcc_interrupt_wrapper, 0);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
diff --git a/drivers/pcmcia/m32r_pcc.h b/drivers/pcmcia/m32r_pcc.h
index f95c58563bc8..d99ad3864ff3 100644
--- a/drivers/pcmcia/m32r_pcc.h
+++ b/drivers/pcmcia/m32r_pcc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2001 by Hiroyuki Kondo
*/
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 4e2f501e5548..c2a17a79f0b2 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -80,9 +80,9 @@ static int omap_cf_ss_init(struct pcmcia_socket *s)
}
/* the timer is primarily to kick this socket's pccardd */
-static void omap_cf_timer(unsigned long _cf)
+static void omap_cf_timer(struct timer_list *t)
{
- struct omap_cf_socket *cf = (void *) _cf;
+ struct omap_cf_socket *cf = from_timer(cf, t, timer);
unsigned present = omap_cf_present();
if (present != cf->present) {
@@ -102,7 +102,9 @@ static void omap_cf_timer(unsigned long _cf)
*/
static irqreturn_t omap_cf_irq(int irq, void *_cf)
{
- omap_cf_timer((unsigned long)_cf);
+ struct omap_cf_socket *cf = (struct omap_cf_socket *)_cf;
+
+ omap_cf_timer(&cf->timer);
return IRQ_HANDLED;
}
@@ -220,7 +222,7 @@ static int __init omap_cf_probe(struct platform_device *pdev)
cf = kzalloc(sizeof *cf, GFP_KERNEL);
if (!cf)
return -ENOMEM;
- setup_timer(&cf->timer, omap_cf_timer, (unsigned long)cf);
+ timer_setup(&cf->timer, omap_cf_timer, 0);
cf->pdev = pdev;
platform_set_drvdata(pdev, cf);
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 0f70b4d58f9e..959ae3e65ef8 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -234,9 +234,9 @@ static irqreturn_t pd6729_interrupt(int irq, void *dev)
/* socket functions */
-static void pd6729_interrupt_wrapper(unsigned long data)
+static void pd6729_interrupt_wrapper(struct timer_list *t)
{
- struct pd6729_socket *socket = (struct pd6729_socket *) data;
+ struct pd6729_socket *socket = from_timer(socket, t, poll_timer);
pd6729_interrupt(0, (void *)socket);
mod_timer(&socket->poll_timer, jiffies + HZ);
@@ -707,8 +707,7 @@ static int pd6729_pci_probe(struct pci_dev *dev,
}
} else {
/* poll Card status change */
- setup_timer(&socket->poll_timer, pd6729_interrupt_wrapper,
- (unsigned long)socket);
+ timer_setup(&socket->poll_timer, pd6729_interrupt_wrapper, 0);
mod_timer(&socket->poll_timer, jiffies + HZ);
}
diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h
index c8e84bdece38..605cc2ccf9c5 100644
--- a/drivers/pcmcia/pd6729.h
+++ b/drivers/pcmcia/pd6729.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INCLUDE_GUARD_PD6729_H_
#define _INCLUDE_GUARD_PD6729_H_
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index 71ace6910d7e..78ad2bba76db 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/pcmcia/sa1100_assabet.c
*
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index c3f67363f6a1..2a54081d161d 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/pcmcia/sa1100_cerf.c
*
diff --git a/drivers/pcmcia/sa1100_generic.h b/drivers/pcmcia/sa1100_generic.h
index adb08dbc723f..a5f1f1dd63cb 100644
--- a/drivers/pcmcia/sa1100_generic.h
+++ b/drivers/pcmcia/sa1100_generic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include "soc_common.h"
#include "sa11xx_base.h"
diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c
index 431d8b07cbaf..aebf9a66fdde 100644
--- a/drivers/pcmcia/sa1100_h3600.c
+++ b/drivers/pcmcia/sa1100_h3600.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/pcmcia/sa1100_h3600.c
*
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c
index b07a2dc3296e..0e52a575986e 100644
--- a/drivers/pcmcia/sa1100_shannon.c
+++ b/drivers/pcmcia/sa1100_shannon.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/pcmcia/sa1100_shannon.c
*
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index 73fd37968b6a..7ce65bb23a8e 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/pcmcia/sa1100_simpad.c
*
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c
index 2f490930430d..93a5c7423d80 100644
--- a/drivers/pcmcia/sa1111_badge4.c
+++ b/drivers/pcmcia/sa1111_badge4.c
@@ -144,6 +144,7 @@ int pcmcia_badge4_init(struct sa1111_dev *dev)
sa11xx_drv_pcmcia_add_one);
}
+#ifndef MODULE
static int __init pcmv_setup(char *s)
{
int v[4];
@@ -158,3 +159,4 @@ static int __init pcmv_setup(char *s)
}
__setup("pcmv=", pcmv_setup);
+#endif
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 3d95dffcff7a..5ef351f87bfe 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -63,11 +63,12 @@
#define IDX_IRQ_S1_READY_NINT (3)
#define IDX_IRQ_S1_CD_VALID (4)
#define IDX_IRQ_S1_BVD1_STSCHG (5)
+#define NUM_IRQS (6)
void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
{
struct sa1111_pcmcia_socket *s = to_skt(skt);
- unsigned long status = sa1111_readl(s->dev->mapbase + PCSR);
+ u32 status = readl_relaxed(s->dev->mapbase + PCSR);
switch (skt->nr) {
case 0:
@@ -95,7 +96,7 @@ void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_sta
int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
{
struct sa1111_pcmcia_socket *s = to_skt(skt);
- unsigned int pccr_skt_mask, pccr_set_mask, val;
+ u32 pccr_skt_mask, pccr_set_mask, val;
unsigned long flags;
switch (skt->nr) {
@@ -123,10 +124,10 @@ int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT;
local_irq_save(flags);
- val = sa1111_readl(s->dev->mapbase + PCCR);
+ val = readl_relaxed(s->dev->mapbase + PCCR);
val &= ~pccr_skt_mask;
val |= pccr_set_mask & pccr_skt_mask;
- sa1111_writel(val, s->dev->mapbase + PCCR);
+ writel_relaxed(val, s->dev->mapbase + PCCR);
local_irq_restore(flags);
return 0;
@@ -137,12 +138,18 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
{
struct sa1111_pcmcia_socket *s;
struct clk *clk;
- int i, ret = 0;
+ int i, ret = 0, irqs[NUM_IRQS];
clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
+ for (i = 0; i < NUM_IRQS; i++) {
+ irqs[i] = sa1111_get_irq(dev, i);
+ if (irqs[i] <= 0)
+ return irqs[i] ? : -ENXIO;
+ }
+
ops->socket_state = sa1111_pcmcia_socket_state;
for (i = 0; i < ops->nr; i++) {
@@ -156,16 +163,16 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
soc_pcmcia_init_one(&s->soc, ops, &dev->dev);
s->dev = dev;
if (s->soc.nr) {
- s->soc.socket.pci_irq = dev->irq[IDX_IRQ_S1_READY_NINT];
- s->soc.stat[SOC_STAT_CD].irq = dev->irq[IDX_IRQ_S1_CD_VALID];
+ s->soc.socket.pci_irq = irqs[IDX_IRQ_S1_READY_NINT];
+ s->soc.stat[SOC_STAT_CD].irq = irqs[IDX_IRQ_S1_CD_VALID];
s->soc.stat[SOC_STAT_CD].name = "SA1111 CF card detect";
- s->soc.stat[SOC_STAT_BVD1].irq = dev->irq[IDX_IRQ_S1_BVD1_STSCHG];
+ s->soc.stat[SOC_STAT_BVD1].irq = irqs[IDX_IRQ_S1_BVD1_STSCHG];
s->soc.stat[SOC_STAT_BVD1].name = "SA1111 CF BVD1";
} else {
- s->soc.socket.pci_irq = dev->irq[IDX_IRQ_S0_READY_NINT];
- s->soc.stat[SOC_STAT_CD].irq = dev->irq[IDX_IRQ_S0_CD_VALID];
+ s->soc.socket.pci_irq = irqs[IDX_IRQ_S0_READY_NINT];
+ s->soc.stat[SOC_STAT_CD].irq = irqs[IDX_IRQ_S0_CD_VALID];
s->soc.stat[SOC_STAT_CD].name = "SA1111 PCMCIA card detect";
- s->soc.stat[SOC_STAT_BVD1].irq = dev->irq[IDX_IRQ_S0_BVD1_STSCHG];
+ s->soc.stat[SOC_STAT_BVD1].irq = irqs[IDX_IRQ_S0_BVD1_STSCHG];
s->soc.stat[SOC_STAT_BVD1].name = "SA1111 PCMCIA BVD1";
}
@@ -201,8 +208,8 @@ static int pcmcia_probe(struct sa1111_dev *dev)
/*
* Initialise the suspend state.
*/
- sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
- sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
+ writel_relaxed(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
+ writel_relaxed(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
ret = -ENODEV;
#ifdef CONFIG_SA1100_BADGE4
diff --git a/drivers/pcmcia/sa1111_generic.h b/drivers/pcmcia/sa1111_generic.h
index e74ecfdc1b26..c01571d46982 100644
--- a/drivers/pcmcia/sa1111_generic.h
+++ b/drivers/pcmcia/sa1111_generic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include "soc_common.h"
#include "sa11xx_base.h"
diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c
index 480a3ede27c8..3d4ca87ca76c 100644
--- a/drivers/pcmcia/sa1111_jornada720.c
+++ b/drivers/pcmcia/sa1111_jornada720.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/pcmcia/sa1100_jornada720.c
*
diff --git a/drivers/pcmcia/sa1111_neponset.c b/drivers/pcmcia/sa1111_neponset.c
index 019c395eb4bf..0ccf05a28a4b 100644
--- a/drivers/pcmcia/sa1111_neponset.c
+++ b/drivers/pcmcia/sa1111_neponset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/pcmcia/sa1100_neponset.c
*
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index b6b316de055c..764650eb8897 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -456,9 +456,9 @@ static void soc_common_check_status(struct soc_pcmcia_socket *skt)
}
/* Let's poll for events in addition to IRQs since IRQ only is unreliable... */
-static void soc_common_pcmcia_poll_event(unsigned long dummy)
+static void soc_common_pcmcia_poll_event(struct timer_list *t)
{
- struct soc_pcmcia_socket *skt = (struct soc_pcmcia_socket *)dummy;
+ struct soc_pcmcia_socket *skt = from_timer(skt, t, poll_timer);
debug(skt, 4, "polling for events\n");
mod_timer(&skt->poll_timer, jiffies + SOC_PCMCIA_POLL_PERIOD);
@@ -794,8 +794,7 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
skt->cs_state = dead_socket;
- setup_timer(&skt->poll_timer, soc_common_pcmcia_poll_event,
- (unsigned long)skt);
+ timer_setup(&skt->poll_timer, soc_common_pcmcia_poll_event, 0);
skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;
ret = request_resource(&iomem_resource, &skt->res_skt);
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 3f3625805353..b7f993f1bbd0 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* linux/drivers/pcmcia/soc_common.h
*
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index a1ac72d51d70..1a0e3f098759 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -98,7 +98,7 @@ module_param(cycle_time, int, 0444);
/*====================================================================*/
static irqreturn_t tcic_interrupt(int irq, void *dev);
-static void tcic_timer(u_long data);
+static void tcic_timer(struct timer_list *unused);
static struct pccard_operations tcic_operations;
struct tcic_socket {
@@ -435,9 +435,7 @@ static int __init init_tcic(void)
}
/* Set up polling */
- poll_timer.function = &tcic_timer;
- poll_timer.data = 0;
- init_timer(&poll_timer);
+ timer_setup(&poll_timer, &tcic_timer, 0);
/* Build interrupt mask */
printk(KERN_CONT ", %d sockets\n", sockets);
@@ -583,7 +581,7 @@ static irqreturn_t tcic_interrupt(int irq, void *dev)
return IRQ_HANDLED;
} /* tcic_interrupt */
-static void tcic_timer(u_long data)
+static void tcic_timer(struct timer_list *unused)
{
pr_debug("tcic_timer()\n");
tcic_timer_pending = 0;
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 5d6d9b1549bc..ab3da2262f0f 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -534,9 +534,9 @@ static irqreturn_t yenta_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void yenta_interrupt_wrapper(unsigned long data)
+static void yenta_interrupt_wrapper(struct timer_list *t)
{
- struct yenta_socket *socket = (struct yenta_socket *) data;
+ struct yenta_socket *socket = from_timer(socket, t, poll_timer);
yenta_interrupt(0, (void *)socket);
socket->poll_timer.expires = jiffies + HZ;
@@ -1233,8 +1233,7 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!socket->cb_irq || request_irq(socket->cb_irq, yenta_interrupt, IRQF_SHARED, "yenta", socket)) {
/* No IRQ or request_irq failed. Poll */
socket->cb_irq = 0; /* But zero is a valid IRQ number. */
- setup_timer(&socket->poll_timer, yenta_interrupt_wrapper,
- (unsigned long)socket);
+ timer_setup(&socket->poll_timer, yenta_interrupt_wrapper, 0);
mod_timer(&socket->poll_timer, jiffies + HZ);
dev_info(&dev->dev,
"no PCI IRQ, CardBus support disabled for this socket.\n");
diff --git a/drivers/pcmcia/yenta_socket.h b/drivers/pcmcia/yenta_socket.h
index 4e75e9e258cd..efeed19e28c7 100644
--- a/drivers/pcmcia/yenta_socket.h
+++ b/drivers/pcmcia/yenta_socket.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __YENTA_H
#define __YENTA_H
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index e5197ffb7422..b8f44b068fc6 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -17,6 +17,13 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
+config HISI_PMU
+ bool "HiSilicon SoC PMU"
+ depends on ARM64 && ACPI
+ help
+ Support for HiSilicon SoC uncore performance monitoring
+ unit (PMU), such as: L3C, HHA and DDRC.
+
config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && ACPI
@@ -43,4 +50,12 @@ config XGENE_PMU
help
Say y if you want to use APM X-Gene SoC performance monitors.
+config ARM_SPE_PMU
+ tristate "Enable support for the ARMv8.2 Statistical Profiling Extension"
+ depends on PERF_EVENTS && ARM64
+ help
+ Enable perf support for the ARMv8.2 Statistical Profiling
+ Extension, which provides periodic sampling of operations in
+ the CPU pipeline and reports this via the perf AUX interface.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 6420bd4394d5..710a0135bd61 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1,5 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
+obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
+obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index d14fc2e67f93..7bc5eee96b31 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -539,7 +539,7 @@ void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
return;
- if (irq_is_percpu(irq)) {
+ if (irq_is_percpu_devid(irq)) {
free_percpu_irq(irq, &hw_events->percpu_pmu);
cpumask_clear(&armpmu->active_irqs);
return;
@@ -565,10 +565,10 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
if (!irq)
return 0;
- if (irq_is_percpu(irq) && cpumask_empty(&armpmu->active_irqs)) {
+ if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
- } else if (irq_is_percpu(irq)) {
+ } else if (irq_is_percpu_devid(irq)) {
int other_cpu = cpumask_first(&armpmu->active_irqs);
int other_irq = per_cpu(hw_events->irq, other_cpu);
@@ -649,7 +649,7 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq) {
- if (irq_is_percpu(irq)) {
+ if (irq_is_percpu_devid(irq)) {
enable_percpu_irq(irq, IRQ_TYPE_NONE);
return 0;
}
@@ -667,7 +667,7 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq && irq_is_percpu(irq))
+ if (irq && irq_is_percpu_devid(irq))
disable_percpu_irq(irq);
return 0;
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 3303dd8d8eb5..705f1a390e31 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -193,9 +193,6 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
int pmu_idx = 0;
int cpu, ret;
- if (acpi_disabled)
- return 0;
-
/*
* Initialise and register the set of PMUs which we know about right
* now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 4eafa7a42e52..91b224eced18 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* platform_device probing code for ARM performance counters.
*
@@ -126,7 +127,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0);
- if (irq && irq_is_percpu(irq))
+ if (irq && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq);
}
@@ -149,7 +150,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (WARN_ON(irq <= 0))
continue;
- if (irq_is_percpu(irq)) {
+ if (irq_is_percpu_devid(irq)) {
pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
return -EINVAL;
}
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
new file mode 100644
index 000000000000..8ce262fc2561
--- /dev/null
+++ b/drivers/perf/arm_spe_pmu.c
@@ -0,0 +1,1249 @@
+/*
+ * Perf support for the Statistical Profiling Extension, introduced as
+ * part of ARMv8.2.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2016 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define PMUNAME "arm_spe"
+#define DRVNAME PMUNAME "_pmu"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/sysreg.h>
+
+#define ARM_SPE_BUF_PAD_BYTE 0
+
+struct arm_spe_pmu_buf {
+ int nr_pages;
+ bool snapshot;
+ void *base;
+};
+
+struct arm_spe_pmu {
+ struct pmu pmu;
+ struct platform_device *pdev;
+ cpumask_t supported_cpus;
+ struct hlist_node hotplug_node;
+
+ int irq; /* PPI */
+
+ u16 min_period;
+ u16 counter_sz;
+
+#define SPE_PMU_FEAT_FILT_EVT (1UL << 0)
+#define SPE_PMU_FEAT_FILT_TYP (1UL << 1)
+#define SPE_PMU_FEAT_FILT_LAT (1UL << 2)
+#define SPE_PMU_FEAT_ARCH_INST (1UL << 3)
+#define SPE_PMU_FEAT_LDS (1UL << 4)
+#define SPE_PMU_FEAT_ERND (1UL << 5)
+#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
+ u64 features;
+
+ u16 max_record_sz;
+ u16 align;
+ struct perf_output_handle __percpu *handle;
+};
+
+#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
+
+/* Convert a free-running index from perf into an SPE buffer offset */
+#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+
+/* Keep track of our dynamic hotplug state */
+static enum cpuhp_state arm_spe_pmu_online;
+
+enum arm_spe_pmu_buf_fault_action {
+ SPE_PMU_BUF_FAULT_ACT_SPURIOUS,
+ SPE_PMU_BUF_FAULT_ACT_FATAL,
+ SPE_PMU_BUF_FAULT_ACT_OK,
+};
+
+/* This sysfs gunk was really good fun to write. */
+enum arm_spe_pmu_capabilities {
+ SPE_PMU_CAP_ARCH_INST = 0,
+ SPE_PMU_CAP_ERND,
+ SPE_PMU_CAP_FEAT_MAX,
+ SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
+ SPE_PMU_CAP_MIN_IVAL,
+};
+
+static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
+ [SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST,
+ [SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND,
+};
+
+static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
+{
+ if (cap < SPE_PMU_CAP_FEAT_MAX)
+ return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
+
+ switch (cap) {
+ case SPE_PMU_CAP_CNT_SZ:
+ return spe_pmu->counter_sz;
+ case SPE_PMU_CAP_MIN_IVAL:
+ return spe_pmu->min_period;
+ default:
+ WARN(1, "unknown cap %d\n", cap);
+ }
+
+ return 0;
+}
+
+static ssize_t arm_spe_pmu_cap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+ struct dev_ext_attribute *ea =
+ container_of(attr, struct dev_ext_attribute, attr);
+ int cap = (long)ea->var;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ arm_spe_pmu_cap_get(spe_pmu, cap));
+}
+
+#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
+ &((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var } \
+ })[0].attr.attr
+
+#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \
+ SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
+
+static struct attribute *arm_spe_pmu_cap_attr[] = {
+ SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
+ SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
+ SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
+ SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
+ NULL,
+};
+
+static struct attribute_group arm_spe_pmu_cap_group = {
+ .name = "caps",
+ .attrs = arm_spe_pmu_cap_attr,
+};
+
+/* User ABI */
+#define ATTR_CFG_FLD_ts_enable_CFG config /* PMSCR_EL1.TS */
+#define ATTR_CFG_FLD_ts_enable_LO 0
+#define ATTR_CFG_FLD_ts_enable_HI 0
+#define ATTR_CFG_FLD_pa_enable_CFG config /* PMSCR_EL1.PA */
+#define ATTR_CFG_FLD_pa_enable_LO 1
+#define ATTR_CFG_FLD_pa_enable_HI 1
+#define ATTR_CFG_FLD_pct_enable_CFG config /* PMSCR_EL1.PCT */
+#define ATTR_CFG_FLD_pct_enable_LO 2
+#define ATTR_CFG_FLD_pct_enable_HI 2
+#define ATTR_CFG_FLD_jitter_CFG config /* PMSIRR_EL1.RND */
+#define ATTR_CFG_FLD_jitter_LO 16
+#define ATTR_CFG_FLD_jitter_HI 16
+#define ATTR_CFG_FLD_branch_filter_CFG config /* PMSFCR_EL1.B */
+#define ATTR_CFG_FLD_branch_filter_LO 32
+#define ATTR_CFG_FLD_branch_filter_HI 32
+#define ATTR_CFG_FLD_load_filter_CFG config /* PMSFCR_EL1.LD */
+#define ATTR_CFG_FLD_load_filter_LO 33
+#define ATTR_CFG_FLD_load_filter_HI 33
+#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
+#define ATTR_CFG_FLD_store_filter_LO 34
+#define ATTR_CFG_FLD_store_filter_HI 34
+
+#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
+#define ATTR_CFG_FLD_event_filter_LO 0
+#define ATTR_CFG_FLD_event_filter_HI 63
+
+#define ATTR_CFG_FLD_min_latency_CFG config2 /* PMSLATFR_EL1.MINLAT */
+#define ATTR_CFG_FLD_min_latency_LO 0
+#define ATTR_CFG_FLD_min_latency_HI 11
+
+/* Why does everything I do descend into this? */
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name) \
+ PMU_FORMAT_ATTR(name, \
+ _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
+ ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name) \
+ _ATTR_CFG_GET_FLD(attr, \
+ ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI)
+
+GEN_PMU_FORMAT_ATTR(ts_enable);
+GEN_PMU_FORMAT_ATTR(pa_enable);
+GEN_PMU_FORMAT_ATTR(pct_enable);
+GEN_PMU_FORMAT_ATTR(jitter);
+GEN_PMU_FORMAT_ATTR(branch_filter);
+GEN_PMU_FORMAT_ATTR(load_filter);
+GEN_PMU_FORMAT_ATTR(store_filter);
+GEN_PMU_FORMAT_ATTR(event_filter);
+GEN_PMU_FORMAT_ATTR(min_latency);
+
+static struct attribute *arm_spe_pmu_formats_attr[] = {
+ &format_attr_ts_enable.attr,
+ &format_attr_pa_enable.attr,
+ &format_attr_pct_enable.attr,
+ &format_attr_jitter.attr,
+ &format_attr_branch_filter.attr,
+ &format_attr_load_filter.attr,
+ &format_attr_store_filter.attr,
+ &format_attr_event_filter.attr,
+ &format_attr_min_latency.attr,
+ NULL,
+};
+
+static struct attribute_group arm_spe_pmu_format_group = {
+ .name = "format",
+ .attrs = arm_spe_pmu_formats_attr,
+};
+
+static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+
+ return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus);
+}
+static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL);
+
+static struct attribute *arm_spe_pmu_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group arm_spe_pmu_group = {
+ .attrs = arm_spe_pmu_attrs,
+};
+
+static const struct attribute_group *arm_spe_pmu_attr_groups[] = {
+ &arm_spe_pmu_group,
+ &arm_spe_pmu_cap_group,
+ &arm_spe_pmu_format_group,
+ NULL,
+};
+
+/* Convert between user ABI and register values */
+static u64 arm_spe_event_to_pmscr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u64 reg = 0;
+
+ reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT;
+ reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT;
+ reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT;
+
+ if (!attr->exclude_user)
+ reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT);
+
+ if (!attr->exclude_kernel)
+ reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
+
+ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && capable(CAP_SYS_ADMIN))
+ reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
+
+ return reg;
+}
+
+static void arm_spe_event_sanitise_period(struct perf_event *event)
+{
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+ u64 period = event->hw.sample_period;
+ u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK
+ << SYS_PMSIRR_EL1_INTERVAL_SHIFT;
+
+ if (period < spe_pmu->min_period)
+ period = spe_pmu->min_period;
+ else if (period > max_period)
+ period = max_period;
+ else
+ period &= max_period;
+
+ event->hw.sample_period = period;
+}
+
+static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u64 reg = 0;
+
+ arm_spe_event_sanitise_period(event);
+
+ reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT;
+ reg |= event->hw.sample_period;
+
+ return reg;
+}
+
+static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u64 reg = 0;
+
+ reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT;
+ reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT;
+ reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT;
+
+ if (reg)
+ reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT);
+
+ if (ATTR_CFG_GET_FLD(attr, event_filter))
+ reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT);
+
+ if (ATTR_CFG_GET_FLD(attr, min_latency))
+ reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT);
+
+ return reg;
+}
+
+static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ return ATTR_CFG_GET_FLD(attr, event_filter);
+}
+
+static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ return ATTR_CFG_GET_FLD(attr, min_latency)
+ << SYS_PMSLATFR_EL1_MINLAT_SHIFT;
+}
+
+static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
+{
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len);
+ if (!buf->snapshot)
+ perf_aux_output_skip(handle, len);
+}
+
+static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle)
+{
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+ u64 limit = buf->nr_pages * PAGE_SIZE;
+
+ /*
+ * The trace format isn't parseable in reverse, so clamp
+ * the limit to half of the buffer size in snapshot mode
+ * so that the worst case is half a buffer of records, as
+ * opposed to a single record.
+ */
+ if (head < limit >> 1)
+ limit >>= 1;
+
+ /*
+ * If we're within max_record_sz of the limit, we must
+ * pad, move the head index and recompute the limit.
+ */
+ if (limit - head < spe_pmu->max_record_sz) {
+ arm_spe_pmu_pad_buf(handle, limit - head);
+ handle->head = PERF_IDX2OFF(limit, buf);
+ limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head;
+ }
+
+ return limit;
+}
+
+static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle)
+{
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ const u64 bufsize = buf->nr_pages * PAGE_SIZE;
+ u64 limit = bufsize;
+ u64 head, tail, wakeup;
+
+ /*
+ * The head can be misaligned for two reasons:
+ *
+ * 1. The hardware left PMBPTR pointing to the first byte after
+ * a record when generating a buffer management event.
+ *
+ * 2. We used perf_aux_output_skip to consume handle->size bytes
+ * and CIRC_SPACE was used to compute the size, which always
+ * leaves one entry free.
+ *
+ * Deal with this by padding to the next alignment boundary and
+ * moving the head index. If we run out of buffer space, we'll
+ * reduce handle->size to zero and end up reporting truncation.
+ */
+ head = PERF_IDX2OFF(handle->head, buf);
+ if (!IS_ALIGNED(head, spe_pmu->align)) {
+ unsigned long delta = roundup(head, spe_pmu->align) - head;
+
+ delta = min(delta, handle->size);
+ arm_spe_pmu_pad_buf(handle, delta);
+ head = PERF_IDX2OFF(handle->head, buf);
+ }
+
+ /* If we've run out of free space, then nothing more to do */
+ if (!handle->size)
+ goto no_space;
+
+ /* Compute the tail and wakeup indices now that we've aligned head */
+ tail = PERF_IDX2OFF(handle->head + handle->size, buf);
+ wakeup = PERF_IDX2OFF(handle->wakeup, buf);
+
+ /*
+ * Avoid clobbering unconsumed data. We know we have space, so
+ * if we see head == tail we know that the buffer is empty. If
+ * head > tail, then there's nothing to clobber prior to
+ * wrapping.
+ */
+ if (head < tail)
+ limit = round_down(tail, PAGE_SIZE);
+
+ /*
+ * Wakeup may be arbitrarily far into the future. If it's not in
+ * the current generation, either we'll wrap before hitting it,
+ * or it's in the past and has been handled already.
+ *
+ * If there's a wakeup before we wrap, arrange to be woken up by
+ * the page boundary following it. Keep the tail boundary if
+ * that's lower.
+ */
+ if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
+ limit = min(limit, round_up(wakeup, PAGE_SIZE));
+
+ if (limit > head)
+ return limit;
+
+ arm_spe_pmu_pad_buf(handle, handle->size);
+no_space:
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ perf_aux_output_end(handle, 0);
+ return 0;
+}
+
+static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle)
+{
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
+ u64 limit = __arm_spe_pmu_next_off(handle);
+ u64 head = PERF_IDX2OFF(handle->head, buf);
+
+ /*
+ * If the head has come too close to the end of the buffer,
+ * then pad to the end and recompute the limit.
+ */
+ if (limit && (limit - head < spe_pmu->max_record_sz)) {
+ arm_spe_pmu_pad_buf(handle, limit - head);
+ limit = __arm_spe_pmu_next_off(handle);
+ }
+
+ return limit;
+}
+
+static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event)
+{
+ u64 base, limit;
+ struct arm_spe_pmu_buf *buf;
+
+ /* Start a new aux session */
+ buf = perf_aux_output_begin(handle, event);
+ if (!buf) {
+ event->hw.state |= PERF_HES_STOPPED;
+ /*
+ * We still need to clear the limit pointer, since the
+ * profiler might only be disabled by virtue of a fault.
+ */
+ limit = 0;
+ goto out_write_limit;
+ }
+
+ limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
+ : arm_spe_pmu_next_off(handle);
+ if (limit)
+ limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT);
+
+ limit += (u64)buf->base;
+ base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
+ write_sysreg_s(base, SYS_PMBPTR_EL1);
+
+out_write_limit:
+ write_sysreg_s(limit, SYS_PMBLIMITR_EL1);
+}
+
+static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle)
+{
+ struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
+ u64 offset, size;
+
+ offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base;
+ size = offset - PERF_IDX2OFF(handle->head, buf);
+
+ if (buf->snapshot)
+ handle->head = offset;
+
+ perf_aux_output_end(handle, size);
+}
+
+static void arm_spe_pmu_disable_and_drain_local(void)
+{
+ /* Disable profiling at EL0 and EL1 */
+ write_sysreg_s(0, SYS_PMSCR_EL1);
+ isb();
+
+ /* Drain any buffered data */
+ psb_csync();
+ dsb(nsh);
+
+ /* Disable the profiling buffer */
+ write_sysreg_s(0, SYS_PMBLIMITR_EL1);
+ isb();
+}
+
+/* IRQ handling */
+static enum arm_spe_pmu_buf_fault_action
+arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
+{
+ const char *err_str;
+ u64 pmbsr;
+ enum arm_spe_pmu_buf_fault_action ret;
+
+ /*
+ * Ensure new profiling data is visible to the CPU and any external
+ * aborts have been resolved.
+ */
+ psb_csync();
+ dsb(nsh);
+
+ /* Ensure hardware updates to PMBPTR_EL1 are visible */
+ isb();
+
+ /* Service required? */
+ pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
+ if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT)))
+ return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
+
+ /*
+ * If we've lost data, disable profiling and also set the PARTIAL
+ * flag to indicate that the last record is corrupted.
+ */
+ if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT))
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
+ PERF_AUX_FLAG_PARTIAL);
+
+ /* Report collisions to userspace so that it can up the period */
+ if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT))
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
+
+ /* We only expect buffer management events */
+ switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) {
+ case SYS_PMBSR_EL1_EC_BUF:
+ /* Handled below */
+ break;
+ case SYS_PMBSR_EL1_EC_FAULT_S1:
+ case SYS_PMBSR_EL1_EC_FAULT_S2:
+ err_str = "Unexpected buffer fault";
+ goto out_err;
+ default:
+ err_str = "Unknown error code";
+ goto out_err;
+ }
+
+ /* Buffer management event */
+ switch (pmbsr &
+ (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) {
+ case SYS_PMBSR_EL1_BUF_BSC_FULL:
+ ret = SPE_PMU_BUF_FAULT_ACT_OK;
+ goto out_stop;
+ default:
+ err_str = "Unknown buffer status code";
+ }
+
+out_err:
+ pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n",
+ err_str, smp_processor_id(), pmbsr,
+ read_sysreg_s(SYS_PMBPTR_EL1),
+ read_sysreg_s(SYS_PMBLIMITR_EL1));
+ ret = SPE_PMU_BUF_FAULT_ACT_FATAL;
+
+out_stop:
+ arm_spe_perf_aux_output_end(handle);
+ return ret;
+}
+
+static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
+{
+ struct perf_output_handle *handle = dev;
+ struct perf_event *event = handle->event;
+ enum arm_spe_pmu_buf_fault_action act;
+
+ if (!perf_get_aux(handle))
+ return IRQ_NONE;
+
+ act = arm_spe_pmu_buf_get_fault_act(handle);
+ if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
+ return IRQ_NONE;
+
+ /*
+ * Ensure perf callbacks have completed, which may disable the
+ * profiling buffer in response to a TRUNCATION flag.
+ */
+ irq_work_run();
+
+ switch (act) {
+ case SPE_PMU_BUF_FAULT_ACT_FATAL:
+ /*
+ * If a fatal exception occurred then leaving the profiling
+ * buffer enabled is a recipe waiting to happen. Since
+ * fatal faults don't always imply truncation, make sure
+ * that the profiling buffer is disabled explicitly before
+ * clearing the syndrome register.
+ */
+ arm_spe_pmu_disable_and_drain_local();
+ break;
+ case SPE_PMU_BUF_FAULT_ACT_OK:
+ /*
+ * We handled the fault (the buffer was full), so resume
+ * profiling as long as we didn't detect truncation.
+ * PMBPTR might be misaligned, but we'll burn that bridge
+ * when we get to it.
+ */
+ if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) {
+ arm_spe_perf_aux_output_begin(handle, event);
+ isb();
+ }
+ break;
+ case SPE_PMU_BUF_FAULT_ACT_SPURIOUS:
+ /* We've seen you before, but GCC has the memory of a sieve. */
+ break;
+ }
+
+ /* The buffer pointers are now sane, so resume profiling. */
+ write_sysreg_s(0, SYS_PMBSR_EL1);
+ return IRQ_HANDLED;
+}
+
+/* Perf callbacks */
+static int arm_spe_pmu_event_init(struct perf_event *event)
+{
+ u64 reg;
+ struct perf_event_attr *attr = &event->attr;
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+
+ /* This is, of course, deeply driver-specific */
+ if (attr->type != event->pmu->type)
+ return -ENOENT;
+
+ if (event->cpu >= 0 &&
+ !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
+ return -ENOENT;
+
+ if (arm_spe_event_to_pmsevfr(event) & SYS_PMSEVFR_EL1_RES0)
+ return -EOPNOTSUPP;
+
+ if (attr->exclude_idle)
+ return -EOPNOTSUPP;
+
+ /*
+ * Feedback-directed frequency throttling doesn't work when we
+ * have a buffer of samples. We'd need to manually count the
+ * samples in the buffer when it fills up and adjust the event
+ * count to reflect that. Instead, just force the user to specify
+ * a sample period.
+ */
+ if (attr->freq)
+ return -EINVAL;
+
+ reg = arm_spe_event_to_pmsfcr(event);
+ if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
+ return -EOPNOTSUPP;
+
+ if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
+ return -EOPNOTSUPP;
+
+ if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
+ return -EOPNOTSUPP;
+
+ reg = arm_spe_event_to_pmscr(event);
+ if (!capable(CAP_SYS_ADMIN) &&
+ (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
+ BIT(SYS_PMSCR_EL1_CX_SHIFT) |
+ BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
+ return -EACCES;
+
+ return 0;
+}
+
+static void arm_spe_pmu_start(struct perf_event *event, int flags)
+{
+ u64 reg;
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
+
+ hwc->state = 0;
+ arm_spe_perf_aux_output_begin(handle, event);
+ if (hwc->state)
+ return;
+
+ reg = arm_spe_event_to_pmsfcr(event);
+ write_sysreg_s(reg, SYS_PMSFCR_EL1);
+
+ reg = arm_spe_event_to_pmsevfr(event);
+ write_sysreg_s(reg, SYS_PMSEVFR_EL1);
+
+ reg = arm_spe_event_to_pmslatfr(event);
+ write_sysreg_s(reg, SYS_PMSLATFR_EL1);
+
+ if (flags & PERF_EF_RELOAD) {
+ reg = arm_spe_event_to_pmsirr(event);
+ write_sysreg_s(reg, SYS_PMSIRR_EL1);
+ isb();
+ reg = local64_read(&hwc->period_left);
+ write_sysreg_s(reg, SYS_PMSICR_EL1);
+ }
+
+ reg = arm_spe_event_to_pmscr(event);
+ isb();
+ write_sysreg_s(reg, SYS_PMSCR_EL1);
+}
+
+static void arm_spe_pmu_stop(struct perf_event *event, int flags)
+{
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle);
+
+ /* If we're already stopped, then nothing to do */
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ /* Stop all trace generation */
+ arm_spe_pmu_disable_and_drain_local();
+
+ if (flags & PERF_EF_UPDATE) {
+ /*
+ * If there's a fault pending then ensure we contain it
+ * to this buffer, since we might be on the context-switch
+ * path.
+ */
+ if (perf_get_aux(handle)) {
+ enum arm_spe_pmu_buf_fault_action act;
+
+ act = arm_spe_pmu_buf_get_fault_act(handle);
+ if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS)
+ arm_spe_perf_aux_output_end(handle);
+ else
+ write_sysreg_s(0, SYS_PMBSR_EL1);
+ }
+
+ /*
+ * This may also contain ECOUNT, but nobody else should
+ * be looking at period_left, since we forbid frequency
+ * based sampling.
+ */
+ local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1));
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static int arm_spe_pmu_add(struct perf_event *event, int flags)
+{
+ int ret = 0;
+ struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu;
+
+ if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
+ return -ENOENT;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START) {
+ arm_spe_pmu_start(event, PERF_EF_RELOAD);
+ if (hwc->state & PERF_HES_STOPPED)
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void arm_spe_pmu_del(struct perf_event *event, int flags)
+{
+ arm_spe_pmu_stop(event, PERF_EF_UPDATE);
+}
+
+static void arm_spe_pmu_read(struct perf_event *event)
+{
+}
+
+static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages,
+ bool snapshot)
+{
+ int i;
+ struct page **pglist;
+ struct arm_spe_pmu_buf *buf;
+
+ /* We need at least two pages for this to work. */
+ if (nr_pages < 2)
+ return NULL;
+
+ /*
+ * We require an even number of pages for snapshot mode, so that
+ * we can effectively treat the buffer as consisting of two equal
+ * parts and give userspace a fighting chance of getting some
+ * useful data out of it.
+ */
+ if (!nr_pages || (snapshot && (nr_pages & 1)))
+ return NULL;
+
+ if (cpu == -1)
+ cpu = raw_smp_processor_id();
+
+ buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu));
+ if (!buf)
+ return NULL;
+
+ pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
+ if (!pglist)
+ goto out_free_buf;
+
+ for (i = 0; i < nr_pages; ++i) {
+ struct page *page = virt_to_page(pages[i]);
+
+ if (PagePrivate(page)) {
+ pr_warn("unexpected high-order page for auxbuf!");
+ goto out_free_pglist;
+ }
+
+ pglist[i] = virt_to_page(pages[i]);
+ }
+
+ buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!buf->base)
+ goto out_free_pglist;
+
+ buf->nr_pages = nr_pages;
+ buf->snapshot = snapshot;
+
+ kfree(pglist);
+ return buf;
+
+out_free_pglist:
+ kfree(pglist);
+out_free_buf:
+ kfree(buf);
+ return NULL;
+}
+
+static void arm_spe_pmu_free_aux(void *aux)
+{
+ struct arm_spe_pmu_buf *buf = aux;
+
+ vunmap(buf->base);
+ kfree(buf);
+}
+
+/* Initialisation and teardown functions */
+static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
+{
+ static atomic_t pmu_idx = ATOMIC_INIT(-1);
+
+ int idx;
+ char *name;
+ struct device *dev = &spe_pmu->pdev->dev;
+
+ spe_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
+ .attr_groups = arm_spe_pmu_attr_groups,
+ /*
+ * We hitch a ride on the software context here, so that
+ * we can support per-task profiling (which is not possible
+ * with the invalid context as it doesn't get sched callbacks).
+ * This requires that userspace either uses a dummy event for
+ * perf_event_open, since the aux buffer is not setup until
+ * a subsequent mmap, or creates the profiling event in a
+ * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it
+ * once the buffer has been created.
+ */
+ .task_ctx_nr = perf_sw_context,
+ .event_init = arm_spe_pmu_event_init,
+ .add = arm_spe_pmu_add,
+ .del = arm_spe_pmu_del,
+ .start = arm_spe_pmu_start,
+ .stop = arm_spe_pmu_stop,
+ .read = arm_spe_pmu_read,
+ .setup_aux = arm_spe_pmu_setup_aux,
+ .free_aux = arm_spe_pmu_free_aux,
+ };
+
+ idx = atomic_inc_return(&pmu_idx);
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
+ return perf_pmu_register(&spe_pmu->pmu, name, -1);
+}
+
+static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu)
+{
+ perf_pmu_unregister(&spe_pmu->pmu);
+}
+
+static void __arm_spe_pmu_dev_probe(void *info)
+{
+ int fld;
+ u64 reg;
+ struct arm_spe_pmu *spe_pmu = info;
+ struct device *dev = &spe_pmu->pdev->dev;
+
+ fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1),
+ ID_AA64DFR0_PMSVER_SHIFT);
+ if (!fld) {
+ dev_err(dev,
+ "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n",
+ fld, smp_processor_id());
+ return;
+ }
+
+ /* Read PMBIDR first to determine whether or not we have access */
+ reg = read_sysreg_s(SYS_PMBIDR_EL1);
+ if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) {
+ dev_err(dev,
+ "profiling buffer owned by higher exception level\n");
+ return;
+ }
+
+ /* Minimum alignment. If it's out-of-range, then fail the probe */
+ fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK;
+ spe_pmu->align = 1 << fld;
+ if (spe_pmu->align > SZ_2K) {
+ dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
+ fld, smp_processor_id());
+ return;
+ }
+
+ /* It's now safe to read PMSIDR and figure out what we've got */
+ reg = read_sysreg_s(SYS_PMSIDR_EL1);
+ if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_LDS;
+
+ if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT))
+ spe_pmu->features |= SPE_PMU_FEAT_ERND;
+
+ /* This field has a spaced out encoding, so just use a look-up */
+ fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK;
+ switch (fld) {
+ case 0:
+ spe_pmu->min_period = 256;
+ break;
+ case 2:
+ spe_pmu->min_period = 512;
+ break;
+ case 3:
+ spe_pmu->min_period = 768;
+ break;
+ case 4:
+ spe_pmu->min_period = 1024;
+ break;
+ case 5:
+ spe_pmu->min_period = 1536;
+ break;
+ case 6:
+ spe_pmu->min_period = 2048;
+ break;
+ case 7:
+ spe_pmu->min_period = 3072;
+ break;
+ default:
+ dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
+ fld);
+ /* Fallthrough */
+ case 8:
+ spe_pmu->min_period = 4096;
+ }
+
+ /* Maximum record size. If it's out-of-range, then fail the probe */
+ fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK;
+ spe_pmu->max_record_sz = 1 << fld;
+ if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
+ dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
+ fld, smp_processor_id());
+ return;
+ }
+
+ fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK;
+ switch (fld) {
+ default:
+ dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
+ fld);
+ /* Fallthrough */
+ case 2:
+ spe_pmu->counter_sz = 12;
+ }
+
+ dev_info(dev,
+ "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
+ cpumask_pr_args(&spe_pmu->supported_cpus),
+ spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
+
+ spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
+ return;
+}
+
+static void __arm_spe_pmu_reset_local(void)
+{
+ /*
+ * This is probably overkill, as we have no idea where we're
+ * draining any buffered data to...
+ */
+ arm_spe_pmu_disable_and_drain_local();
+
+ /* Reset the buffer base pointer */
+ write_sysreg_s(0, SYS_PMBPTR_EL1);
+ isb();
+
+ /* Clear any pending management interrupts */
+ write_sysreg_s(0, SYS_PMBSR_EL1);
+ isb();
+}
+
+static void __arm_spe_pmu_setup_one(void *info)
+{
+ struct arm_spe_pmu *spe_pmu = info;
+
+ __arm_spe_pmu_reset_local();
+ enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE);
+}
+
+static void __arm_spe_pmu_stop_one(void *info)
+{
+ struct arm_spe_pmu *spe_pmu = info;
+
+ disable_percpu_irq(spe_pmu->irq);
+ __arm_spe_pmu_reset_local();
+}
+
+static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_spe_pmu *spe_pmu;
+
+ spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
+ if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
+ return 0;
+
+ __arm_spe_pmu_setup_one(spe_pmu);
+ return 0;
+}
+
+static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ struct arm_spe_pmu *spe_pmu;
+
+ spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node);
+ if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus))
+ return 0;
+
+ __arm_spe_pmu_stop_one(spe_pmu);
+ return 0;
+}
+
+static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
+{
+ int ret;
+ cpumask_t *mask = &spe_pmu->supported_cpus;
+
+ /* Make sure we probe the hardware on a relevant CPU */
+ ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1);
+ if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED))
+ return -ENXIO;
+
+ /* Request our PPIs (note that the IRQ is still disabled) */
+ ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
+ spe_pmu->handle);
+ if (ret)
+ return ret;
+
+ /*
+ * Register our hotplug notifier now so we don't miss any events.
+ * This will enable the IRQ for any supported CPUs that are already
+ * up.
+ */
+ ret = cpuhp_state_add_instance(arm_spe_pmu_online,
+ &spe_pmu->hotplug_node);
+ if (ret)
+ free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
+
+ return ret;
+}
+
+static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
+{
+ cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node);
+ free_percpu_irq(spe_pmu->irq, spe_pmu->handle);
+}
+
+/* Driver and device probing */
+static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
+{
+ struct platform_device *pdev = spe_pmu->pdev;
+ int irq = platform_get_irq(pdev, 0);
+
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ (%d)\n", irq);
+ return -ENXIO;
+ }
+
+ if (!irq_is_percpu(irq)) {
+ dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq);
+ return -EINVAL;
+ }
+
+ if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
+ dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
+ return -EINVAL;
+ }
+
+ spe_pmu->irq = irq;
+ return 0;
+}
+
+static const struct of_device_id arm_spe_pmu_of_match[] = {
+ { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
+ { /* Sentinel */ },
+};
+
+static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct arm_spe_pmu *spe_pmu;
+ struct device *dev = &pdev->dev;
+
+ spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
+ if (!spe_pmu) {
+ dev_err(dev, "failed to allocate spe_pmu\n");
+ return -ENOMEM;
+ }
+
+ spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle));
+ if (!spe_pmu->handle)
+ return -ENOMEM;
+
+ spe_pmu->pdev = pdev;
+ platform_set_drvdata(pdev, spe_pmu);
+
+ ret = arm_spe_pmu_irq_probe(spe_pmu);
+ if (ret)
+ goto out_free_handle;
+
+ ret = arm_spe_pmu_dev_init(spe_pmu);
+ if (ret)
+ goto out_free_handle;
+
+ ret = arm_spe_pmu_perf_init(spe_pmu);
+ if (ret)
+ goto out_teardown_dev;
+
+ return 0;
+
+out_teardown_dev:
+ arm_spe_pmu_dev_teardown(spe_pmu);
+out_free_handle:
+ free_percpu(spe_pmu->handle);
+ return ret;
+}
+
+static int arm_spe_pmu_device_remove(struct platform_device *pdev)
+{
+ struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
+
+ arm_spe_pmu_perf_destroy(spe_pmu);
+ arm_spe_pmu_dev_teardown(spe_pmu);
+ free_percpu(spe_pmu->handle);
+ return 0;
+}
+
+static struct platform_driver arm_spe_pmu_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = of_match_ptr(arm_spe_pmu_of_match),
+ },
+ .probe = arm_spe_pmu_device_dt_probe,
+ .remove = arm_spe_pmu_device_remove,
+};
+
+static int __init arm_spe_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
+ arm_spe_pmu_cpu_startup,
+ arm_spe_pmu_cpu_teardown);
+ if (ret < 0)
+ return ret;
+ arm_spe_pmu_online = ret;
+
+ ret = platform_driver_register(&arm_spe_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(arm_spe_pmu_online);
+
+ return ret;
+}
+
+static void __exit arm_spe_pmu_exit(void)
+{
+ platform_driver_unregister(&arm_spe_pmu_driver);
+ cpuhp_remove_multi_state(arm_spe_pmu_online);
+}
+
+module_init(arm_spe_pmu_init);
+module_exit(arm_spe_pmu_exit);
+
+MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
new file mode 100644
index 000000000000..2621d51ae87a
--- /dev/null
+++ b/drivers/perf/hisilicon/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
new file mode 100644
index 000000000000..1b10ea05a914
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -0,0 +1,463 @@
+/*
+ * HiSilicon SoC DDRC uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ * Anurup M <anurup.m@huawei.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* DDRC register definition */
+#define DDRC_PERF_CTRL 0x010
+#define DDRC_FLUX_WR 0x380
+#define DDRC_FLUX_RD 0x384
+#define DDRC_FLUX_WCMD 0x388
+#define DDRC_FLUX_RCMD 0x38c
+#define DDRC_PRE_CMD 0x3c0
+#define DDRC_ACT_CMD 0x3c4
+#define DDRC_BNK_CHG 0x3c8
+#define DDRC_RNK_CHG 0x3cc
+#define DDRC_EVENT_CTRL 0x6C0
+#define DDRC_INT_MASK 0x6c8
+#define DDRC_INT_STATUS 0x6cc
+#define DDRC_INT_CLEAR 0x6d0
+
+/* DDRC has 8-counters */
+#define DDRC_NR_COUNTERS 0x8
+#define DDRC_PERF_CTRL_EN 0x2
+
+/*
+ * For DDRC PMU, there are eight-events and every event has been mapped
+ * to fixed-purpose counters which register offset is not consistent.
+ * Therefore there is no write event type and we assume that event
+ * code (0 to 7) is equal to counter index in PMU driver.
+ */
+#define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7)
+
+static const u32 ddrc_reg_off[] = {
+ DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
+ DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_BNK_CHG, DDRC_RNK_CHG
+};
+
+/*
+ * Select the counter register offset using the counter index.
+ * In DDRC there are no programmable counter, the count
+ * is readed form the statistics counter register itself.
+ */
+static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx)
+{
+ return ddrc_reg_off[cntr_idx];
+}
+
+static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ /* Use event code as counter index */
+ u32 idx = GET_DDRC_EVENTID(hwc);
+
+ if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
+ dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return 0;
+ }
+
+ return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+}
+
+static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ u32 idx = GET_DDRC_EVENTID(hwc);
+
+ if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) {
+ dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return;
+ }
+
+ writel((u32)val,
+ ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx));
+}
+
+/*
+ * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware,
+ * so there is no need to write event type.
+ */
+static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+ u32 type)
+{
+}
+
+static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
+{
+ u32 val;
+
+ /* Set perf_enable in DDRC_PERF_CTRL to start event counting */
+ val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
+ val |= DDRC_PERF_CTRL_EN;
+ writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
+{
+ u32 val;
+
+ /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
+ val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
+ val &= ~DDRC_PERF_CTRL_EN;
+ writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
+}
+
+static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Set counter index(event code) in DDRC_EVENT_CTRL register */
+ val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
+ val |= (1 << GET_DDRC_EVENTID(hwc));
+ writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
+}
+
+static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index(event code) in DDRC_EVENT_CTRL register */
+ val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
+ val &= ~(1 << GET_DDRC_EVENTID(hwc));
+ writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
+}
+
+static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
+{
+ struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
+ unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
+ struct hw_perf_event *hwc = &event->hw;
+ /* For DDRC PMU, we use event code as counter index */
+ int idx = GET_DDRC_EVENTID(hwc);
+
+ if (test_bit(idx, used_mask))
+ return -EAGAIN;
+
+ set_bit(idx, used_mask);
+
+ return idx;
+}
+
+static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 0 to enable interrupt */
+ val = readl(ddrc_pmu->base + DDRC_INT_MASK);
+ val &= ~(1 << GET_DDRC_EVENTID(hwc));
+ writel(val, ddrc_pmu->base + DDRC_INT_MASK);
+}
+
+static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 1 to mask interrupt */
+ val = readl(ddrc_pmu->base + DDRC_INT_MASK);
+ val |= (1 << GET_DDRC_EVENTID(hwc));
+ writel(val, ddrc_pmu->base + DDRC_INT_MASK);
+}
+
+static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id)
+{
+ struct hisi_pmu *ddrc_pmu = dev_id;
+ struct perf_event *event;
+ unsigned long overflown;
+ int idx;
+
+ /* Read the DDRC_INT_STATUS register */
+ overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS);
+ if (!overflown)
+ return IRQ_NONE;
+
+ /*
+ * Find the counter index which overflowed if the bit was set
+ * and handle it
+ */
+ for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) {
+ /* Write 1 to clear the IRQ status flag */
+ writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR);
+
+ /* Get the corresponding event struct */
+ event = ddrc_pmu->pmu_events.hw_events[idx];
+ if (!event)
+ continue;
+
+ hisi_uncore_pmu_event_update(event);
+ hisi_uncore_pmu_set_event_period(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
+ struct platform_device *pdev)
+{
+ int irq, ret;
+
+ /* Read and init IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), ddrc_pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ:%d ret:%d\n", irq, ret);
+ return ret;
+ }
+
+ ddrc_pmu->irq = irq;
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
+ { "HISI0233", },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
+
+static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *ddrc_pmu)
+{
+ struct resource *res;
+
+ /*
+ * Use the SCCL_ID and DDRC channel ID to identify the
+ * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
+ &ddrc_pmu->index_id)) {
+ dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &ddrc_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
+ return -EINVAL;
+ }
+ /* DDRC PMUs only share the same SCCL */
+ ddrc_pmu->ccl_id = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddrc_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddrc_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
+ return PTR_ERR(ddrc_pmu->base);
+ }
+
+ return 0;
+}
+
+static struct attribute *hisi_ddrc_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
+ NULL,
+};
+
+static const struct attribute_group hisi_ddrc_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_ddrc_pmu_format_attr,
+};
+
+static struct attribute *hisi_ddrc_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(flux_wr, 0x00),
+ HISI_PMU_EVENT_ATTR(flux_rd, 0x01),
+ HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02),
+ HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03),
+ HISI_PMU_EVENT_ATTR(pre_cmd, 0x04),
+ HISI_PMU_EVENT_ATTR(act_cmd, 0x05),
+ HISI_PMU_EVENT_ATTR(rnk_chg, 0x06),
+ HISI_PMU_EVENT_ATTR(rw_chg, 0x07),
+ NULL,
+};
+
+static const struct attribute_group hisi_ddrc_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_ddrc_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
+ .attrs = hisi_ddrc_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = {
+ &hisi_ddrc_pmu_format_group,
+ &hisi_ddrc_pmu_events_group,
+ &hisi_ddrc_pmu_cpumask_attr_group,
+ NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
+ .write_evtype = hisi_ddrc_pmu_write_evtype,
+ .get_event_idx = hisi_ddrc_pmu_get_event_idx,
+ .start_counters = hisi_ddrc_pmu_start_counters,
+ .stop_counters = hisi_ddrc_pmu_stop_counters,
+ .enable_counter = hisi_ddrc_pmu_enable_counter,
+ .disable_counter = hisi_ddrc_pmu_disable_counter,
+ .enable_counter_int = hisi_ddrc_pmu_enable_counter_int,
+ .disable_counter_int = hisi_ddrc_pmu_disable_counter_int,
+ .write_counter = hisi_ddrc_pmu_write_counter,
+ .read_counter = hisi_ddrc_pmu_read_counter,
+};
+
+static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *ddrc_pmu)
+{
+ int ret;
+
+ ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev);
+ if (ret)
+ return ret;
+
+ ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
+ ddrc_pmu->counter_bits = 32;
+ ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
+ ddrc_pmu->dev = &pdev->dev;
+ ddrc_pmu->on_cpu = -1;
+ ddrc_pmu->check_event = 7;
+
+ return 0;
+}
+
+static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *ddrc_pmu;
+ char *name;
+ int ret;
+
+ ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
+ if (!ddrc_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddrc_pmu);
+
+ ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ &ddrc_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u",
+ ddrc_pmu->sccl_id, ddrc_pmu->index_id);
+ ddrc_pmu->pmu = (struct pmu) {
+ .name = name,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = hisi_ddrc_pmu_attr_groups,
+ };
+
+ ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ &ddrc_pmu->node);
+ }
+
+ return ret;
+}
+
+static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&ddrc_pmu->pmu);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ &ddrc_pmu->node);
+
+ return 0;
+}
+
+static struct platform_driver hisi_ddrc_pmu_driver = {
+ .driver = {
+ .name = "hisi_ddrc_pmu",
+ .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
+ },
+ .probe = hisi_ddrc_pmu_probe,
+ .remove = hisi_ddrc_pmu_remove,
+};
+
+static int __init hisi_ddrc_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
+ "AP_PERF_ARM_HISI_DDRC_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_ddrc_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
+
+ return ret;
+}
+module_init(hisi_ddrc_pmu_module_init);
+
+static void __exit hisi_ddrc_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_ddrc_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
+
+}
+module_exit(hisi_ddrc_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
new file mode 100644
index 000000000000..443906e0aff3
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -0,0 +1,473 @@
+/*
+ * HiSilicon SoC HHA uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
+ * Anurup M <anurup.m@huawei.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* HHA register definition */
+#define HHA_INT_MASK 0x0804
+#define HHA_INT_STATUS 0x0808
+#define HHA_INT_CLEAR 0x080C
+#define HHA_PERF_CTRL 0x1E00
+#define HHA_EVENT_CTRL 0x1E04
+#define HHA_EVENT_TYPE0 0x1E80
+/*
+ * Each counter is 48-bits and [48:63] are reserved
+ * which are Read-As-Zero and Writes-Ignored.
+ */
+#define HHA_CNT0_LOWER 0x1F00
+
+/* HHA has 16-counters */
+#define HHA_NR_COUNTERS 0x10
+
+#define HHA_PERF_CTRL_EN 0x1
+#define HHA_EVTYPE_NONE 0xff
+
+/*
+ * Select the counter register offset using the counter index
+ * each counter is 48-bits.
+ */
+static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx)
+{
+ return (HHA_CNT0_LOWER + (cntr_idx * 8));
+}
+
+static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+
+ if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
+ dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return 0;
+ }
+
+ /* Read 64 bits and like L3C, top 16 bits are RAZ */
+ return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+}
+
+static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ u32 idx = hwc->idx;
+
+ if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) {
+ dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return;
+ }
+
+ /* Write 64 bits and like L3C, top 16 bits are WI */
+ writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx));
+}
+
+static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(HHA_EVENT_TYPEx).
+ * There are 4 event select registers for the 16 hardware counters.
+ * Event code is 8-bits and for the first 4 hardware counters,
+ * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters,
+ * HHA_EVENT_TYPE1 is chosen and so on.
+ */
+ reg = HHA_EVENT_TYPE0 + 4 * (idx / 4);
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ /* Write event code to HHA_EVENT_TYPEx register */
+ val = readl(hha_pmu->base + reg);
+ val &= ~(HHA_EVTYPE_NONE << shift);
+ val |= (type << shift);
+ writel(val, hha_pmu->base + reg);
+}
+
+static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu)
+{
+ u32 val;
+
+ /*
+ * Set perf_enable bit in HHA_PERF_CTRL to start event
+ * counting for all enabled counters.
+ */
+ val = readl(hha_pmu->base + HHA_PERF_CTRL);
+ val |= HHA_PERF_CTRL_EN;
+ writel(val, hha_pmu->base + HHA_PERF_CTRL);
+}
+
+static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu)
+{
+ u32 val;
+
+ /*
+ * Clear perf_enable bit in HHA_PERF_CTRL to stop event
+ * counting for all enabled counters.
+ */
+ val = readl(hha_pmu->base + HHA_PERF_CTRL);
+ val &= ~(HHA_PERF_CTRL_EN);
+ writel(val, hha_pmu->base + HHA_PERF_CTRL);
+}
+
+static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Enable counter index in HHA_EVENT_CTRL register */
+ val = readl(hha_pmu->base + HHA_EVENT_CTRL);
+ val |= (1 << hwc->idx);
+ writel(val, hha_pmu->base + HHA_EVENT_CTRL);
+}
+
+static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index in HHA_EVENT_CTRL register */
+ val = readl(hha_pmu->base + HHA_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, hha_pmu->base + HHA_EVENT_CTRL);
+}
+
+static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 0 to enable interrupt */
+ val = readl(hha_pmu->base + HHA_INT_MASK);
+ val &= ~(1 << hwc->idx);
+ writel(val, hha_pmu->base + HHA_INT_MASK);
+}
+
+static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 1 to mask interrupt */
+ val = readl(hha_pmu->base + HHA_INT_MASK);
+ val |= (1 << hwc->idx);
+ writel(val, hha_pmu->base + HHA_INT_MASK);
+}
+
+static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id)
+{
+ struct hisi_pmu *hha_pmu = dev_id;
+ struct perf_event *event;
+ unsigned long overflown;
+ int idx;
+
+ /* Read HHA_INT_STATUS register */
+ overflown = readl(hha_pmu->base + HHA_INT_STATUS);
+ if (!overflown)
+ return IRQ_NONE;
+
+ /*
+ * Find the counter index which overflowed if the bit was set
+ * and handle it
+ */
+ for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) {
+ /* Write 1 to clear the IRQ status flag */
+ writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR);
+
+ /* Get the corresponding event struct */
+ event = hha_pmu->pmu_events.hw_events[idx];
+ if (!event)
+ continue;
+
+ hisi_uncore_pmu_event_update(event);
+ hisi_uncore_pmu_set_event_period(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
+ struct platform_device *pdev)
+{
+ int irq, ret;
+
+ /* Read and init IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), hha_pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ:%d ret:%d\n", irq, ret);
+ return ret;
+ }
+
+ hha_pmu->irq = irq;
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = {
+ { "HISI0243", },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match);
+
+static int hisi_hha_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *hha_pmu)
+{
+ unsigned long long id;
+ struct resource *res;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ "_UID", NULL, &id);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ hha_pmu->index_id = id;
+
+ /*
+ * Use SCCL_ID and UID to identify the HHA PMU, while
+ * SCCL_ID is in MPIDR[aff2].
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &hha_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
+ return -EINVAL;
+ }
+ /* HHA PMUs only share the same SCCL */
+ hha_pmu->ccl_id = -1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hha_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hha_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n");
+ return PTR_ERR(hha_pmu->base);
+ }
+
+ return 0;
+}
+
+static struct attribute *hisi_hha_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL,
+};
+
+static const struct attribute_group hisi_hha_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_hha_pmu_format_attr,
+};
+
+static struct attribute *hisi_hha_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00),
+ HISI_PMU_EVENT_ATTR(rx_outer, 0x01),
+ HISI_PMU_EVENT_ATTR(rx_sccl, 0x02),
+ HISI_PMU_EVENT_ATTR(rx_ccix, 0x03),
+ HISI_PMU_EVENT_ATTR(rx_wbi, 0x04),
+ HISI_PMU_EVENT_ATTR(rx_wbip, 0x05),
+ HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11),
+ HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c),
+ HISI_PMU_EVENT_ATTR(wr_dr_64b, 0x1d),
+ HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e),
+ HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f),
+ HISI_PMU_EVENT_ATTR(spill_num, 0x20),
+ HISI_PMU_EVENT_ATTR(spill_success, 0x21),
+ HISI_PMU_EVENT_ATTR(bi_num, 0x23),
+ HISI_PMU_EVENT_ATTR(mediated_num, 0x32),
+ HISI_PMU_EVENT_ATTR(tx_snp_num, 0x33),
+ HISI_PMU_EVENT_ATTR(tx_snp_outer, 0x34),
+ HISI_PMU_EVENT_ATTR(tx_snp_ccix, 0x35),
+ HISI_PMU_EVENT_ATTR(rx_snprspdata, 0x38),
+ HISI_PMU_EVENT_ATTR(rx_snprsp_outer, 0x3c),
+ HISI_PMU_EVENT_ATTR(sdir-lookup, 0x40),
+ HISI_PMU_EVENT_ATTR(edir-lookup, 0x41),
+ HISI_PMU_EVENT_ATTR(sdir-hit, 0x42),
+ HISI_PMU_EVENT_ATTR(edir-hit, 0x43),
+ HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c),
+ HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d),
+ NULL,
+};
+
+static const struct attribute_group hisi_hha_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_hha_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
+ .attrs = hisi_hha_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_hha_pmu_attr_groups[] = {
+ &hisi_hha_pmu_format_group,
+ &hisi_hha_pmu_events_group,
+ &hisi_hha_pmu_cpumask_attr_group,
+ NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_hha_ops = {
+ .write_evtype = hisi_hha_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_hha_pmu_start_counters,
+ .stop_counters = hisi_hha_pmu_stop_counters,
+ .enable_counter = hisi_hha_pmu_enable_counter,
+ .disable_counter = hisi_hha_pmu_disable_counter,
+ .enable_counter_int = hisi_hha_pmu_enable_counter_int,
+ .disable_counter_int = hisi_hha_pmu_disable_counter_int,
+ .write_counter = hisi_hha_pmu_write_counter,
+ .read_counter = hisi_hha_pmu_read_counter,
+};
+
+static int hisi_hha_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *hha_pmu)
+{
+ int ret;
+
+ ret = hisi_hha_pmu_init_data(pdev, hha_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_hha_pmu_init_irq(hha_pmu, pdev);
+ if (ret)
+ return ret;
+
+ hha_pmu->num_counters = HHA_NR_COUNTERS;
+ hha_pmu->counter_bits = 48;
+ hha_pmu->ops = &hisi_uncore_hha_ops;
+ hha_pmu->dev = &pdev->dev;
+ hha_pmu->on_cpu = -1;
+ hha_pmu->check_event = 0x65;
+
+ return 0;
+}
+
+static int hisi_hha_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *hha_pmu;
+ char *name;
+ int ret;
+
+ hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL);
+ if (!hha_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hha_pmu);
+
+ ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ &hha_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
+ hha_pmu->sccl_id, hha_pmu->index_id);
+ hha_pmu->pmu = (struct pmu) {
+ .name = name,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = hisi_hha_pmu_attr_groups,
+ };
+
+ ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(hha_pmu->dev, "HHA PMU register failed!\n");
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ &hha_pmu->node);
+ }
+
+ return ret;
+}
+
+static int hisi_hha_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&hha_pmu->pmu);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ &hha_pmu->node);
+
+ return 0;
+}
+
+static struct platform_driver hisi_hha_pmu_driver = {
+ .driver = {
+ .name = "hisi_hha_pmu",
+ .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
+ },
+ .probe = hisi_hha_pmu_probe,
+ .remove = hisi_hha_pmu_remove,
+};
+
+static int __init hisi_hha_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
+ "AP_PERF_ARM_HISI_HHA_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_hha_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
+
+ return ret;
+}
+module_init(hisi_hha_pmu_module_init);
+
+static void __exit hisi_hha_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_hha_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE);
+}
+module_exit(hisi_hha_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
new file mode 100644
index 000000000000..0bde5d919b2e
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -0,0 +1,463 @@
+/*
+ * HiSilicon SoC L3C uncore Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ * Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* L3C register definition */
+#define L3C_PERF_CTRL 0x0408
+#define L3C_INT_MASK 0x0800
+#define L3C_INT_STATUS 0x0808
+#define L3C_INT_CLEAR 0x080c
+#define L3C_EVENT_CTRL 0x1c00
+#define L3C_EVENT_TYPE0 0x1d00
+/*
+ * Each counter is 48-bits and [48:63] are reserved
+ * which are Read-As-Zero and Writes-Ignored.
+ */
+#define L3C_CNTR0_LOWER 0x1e00
+
+/* L3C has 8-counters */
+#define L3C_NR_COUNTERS 0x8
+
+#define L3C_PERF_CTRL_EN 0x20000
+#define L3C_EVTYPE_NONE 0xff
+
+/*
+ * Select the counter register offset using the counter index
+ */
+static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
+{
+ return (L3C_CNTR0_LOWER + (cntr_idx * 8));
+}
+
+static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 idx = hwc->idx;
+
+ if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
+ dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return 0;
+ }
+
+ /* Read 64-bits and the upper 16 bits are RAZ */
+ return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+}
+
+static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ u32 idx = hwc->idx;
+
+ if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) {
+ dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return;
+ }
+
+ /* Write 64-bits and the upper 16 bits are WI */
+ writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx));
+}
+
+static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(L3C_EVENT_TYPE0/1).
+ * There are 2 event select registers for the 8 hardware counters.
+ * Event code is 8-bits and for the former 4 hardware counters,
+ * L3C_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+ * L3C_EVENT_TYPE1 is chosen.
+ */
+ reg = L3C_EVENT_TYPE0 + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = 8 * reg_idx;
+
+ /* Write event code to L3C_EVENT_TYPEx Register */
+ val = readl(l3c_pmu->base + reg);
+ val &= ~(L3C_EVTYPE_NONE << shift);
+ val |= (type << shift);
+ writel(val, l3c_pmu->base + reg);
+}
+
+static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu)
+{
+ u32 val;
+
+ /*
+ * Set perf_enable bit in L3C_PERF_CTRL register to start counting
+ * for all enabled counters.
+ */
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val |= L3C_PERF_CTRL_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+}
+
+static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu)
+{
+ u32 val;
+
+ /*
+ * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting
+ * for all enabled counters.
+ */
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val &= ~(L3C_PERF_CTRL_EN);
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+}
+
+static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Enable counter index in L3C_EVENT_CTRL register */
+ val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
+ val |= (1 << hwc->idx);
+ writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+}
+
+static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index in L3C_EVENT_CTRL register */
+ val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
+ val &= ~(1 << hwc->idx);
+ writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+}
+
+static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(l3c_pmu->base + L3C_INT_MASK);
+ /* Write 0 to enable interrupt */
+ val &= ~(1 << hwc->idx);
+ writel(val, l3c_pmu->base + L3C_INT_MASK);
+}
+
+static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ val = readl(l3c_pmu->base + L3C_INT_MASK);
+ /* Write 1 to mask interrupt */
+ val |= (1 << hwc->idx);
+ writel(val, l3c_pmu->base + L3C_INT_MASK);
+}
+
+static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id)
+{
+ struct hisi_pmu *l3c_pmu = dev_id;
+ struct perf_event *event;
+ unsigned long overflown;
+ int idx;
+
+ /* Read L3C_INT_STATUS register */
+ overflown = readl(l3c_pmu->base + L3C_INT_STATUS);
+ if (!overflown)
+ return IRQ_NONE;
+
+ /*
+ * Find the counter index which overflowed if the bit was set
+ * and handle it.
+ */
+ for_each_set_bit(idx, &overflown, L3C_NR_COUNTERS) {
+ /* Write 1 to clear the IRQ status flag */
+ writel((1 << idx), l3c_pmu->base + L3C_INT_CLEAR);
+
+ /* Get the corresponding event struct */
+ event = l3c_pmu->pmu_events.hw_events[idx];
+ if (!event)
+ continue;
+
+ hisi_uncore_pmu_event_update(event);
+ hisi_uncore_pmu_set_event_period(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
+ struct platform_device *pdev)
+{
+ int irq, ret;
+
+ /* Read and init IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq);
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), l3c_pmu);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Fail to request IRQ:%d ret:%d\n", irq, ret);
+ return ret;
+ }
+
+ l3c_pmu->irq = irq;
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
+ { "HISI0213", },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+
+static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *l3c_pmu)
+{
+ unsigned long long id;
+ struct resource *res;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ "_UID", NULL, &id);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ l3c_pmu->index_id = id;
+
+ /*
+ * Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
+ * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
+ */
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &l3c_pmu->sccl_id)) {
+ dev_err(&pdev->dev, "Can not read l3c sccl-id!\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
+ &l3c_pmu->ccl_id)) {
+ dev_err(&pdev->dev, "Can not read l3c ccl-id!\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ l3c_pmu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(l3c_pmu->base)) {
+ dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
+ return PTR_ERR(l3c_pmu->base);
+ }
+
+ return 0;
+}
+
+static struct attribute *hisi_l3c_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_l3c_pmu_format_attr,
+};
+
+static struct attribute *hisi_l3c_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00),
+ HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01),
+ HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02),
+ HISI_PMU_EVENT_ATTR(wr_hit_cpipe, 0x03),
+ HISI_PMU_EVENT_ATTR(victim_num, 0x04),
+ HISI_PMU_EVENT_ATTR(rd_spipe, 0x20),
+ HISI_PMU_EVENT_ATTR(wr_spipe, 0x21),
+ HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x22),
+ HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x23),
+ HISI_PMU_EVENT_ATTR(back_invalid, 0x29),
+ HISI_PMU_EVENT_ATTR(retry_cpu, 0x40),
+ HISI_PMU_EVENT_ATTR(retry_ring, 0x41),
+ HISI_PMU_EVENT_ATTR(prefetch_drop, 0x42),
+ NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_l3c_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
+ .attrs = hisi_l3c_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = {
+ &hisi_l3c_pmu_format_group,
+ &hisi_l3c_pmu_events_group,
+ &hisi_l3c_pmu_cpumask_attr_group,
+ NULL,
+};
+
+static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
+ .write_evtype = hisi_l3c_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_l3c_pmu_start_counters,
+ .stop_counters = hisi_l3c_pmu_stop_counters,
+ .enable_counter = hisi_l3c_pmu_enable_counter,
+ .disable_counter = hisi_l3c_pmu_disable_counter,
+ .enable_counter_int = hisi_l3c_pmu_enable_counter_int,
+ .disable_counter_int = hisi_l3c_pmu_disable_counter_int,
+ .write_counter = hisi_l3c_pmu_write_counter,
+ .read_counter = hisi_l3c_pmu_read_counter,
+};
+
+static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *l3c_pmu)
+{
+ int ret;
+
+ ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev);
+ if (ret)
+ return ret;
+
+ l3c_pmu->num_counters = L3C_NR_COUNTERS;
+ l3c_pmu->counter_bits = 48;
+ l3c_pmu->ops = &hisi_uncore_l3c_ops;
+ l3c_pmu->dev = &pdev->dev;
+ l3c_pmu->on_cpu = -1;
+ l3c_pmu->check_event = 0x59;
+
+ return 0;
+}
+
+static int hisi_l3c_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *l3c_pmu;
+ char *name;
+ int ret;
+
+ l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL);
+ if (!l3c_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, l3c_pmu);
+
+ ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ &l3c_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ return ret;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
+ l3c_pmu->sccl_id, l3c_pmu->index_id);
+ l3c_pmu->pmu = (struct pmu) {
+ .name = name,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = hisi_l3c_pmu_attr_groups,
+ };
+
+ ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(l3c_pmu->dev, "L3C PMU register failed!\n");
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ &l3c_pmu->node);
+ }
+
+ return ret;
+}
+
+static int hisi_l3c_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&l3c_pmu->pmu);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ &l3c_pmu->node);
+
+ return 0;
+}
+
+static struct platform_driver hisi_l3c_pmu_driver = {
+ .driver = {
+ .name = "hisi_l3c_pmu",
+ .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
+ },
+ .probe = hisi_l3c_pmu_probe,
+ .remove = hisi_l3c_pmu_remove,
+};
+
+static int __init hisi_l3c_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
+ "AP_PERF_ARM_HISI_L3_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_l3c_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
+
+ return ret;
+}
+module_init(hisi_l3c_pmu_module_init);
+
+static void __exit hisi_l3c_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_l3c_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE);
+}
+module_exit(hisi_l3c_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
+MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
new file mode 100644
index 000000000000..7ed24b954422
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -0,0 +1,447 @@
+/*
+ * HiSilicon SoC Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ * Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+
+#include <asm/local64.h>
+
+#include "hisi_uncore_pmu.h"
+
+#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
+#define HISI_MAX_PERIOD(nr) (BIT_ULL(nr) - 1)
+
+/*
+ * PMU format attributes
+ */
+ssize_t hisi_format_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sprintf(buf, "%s\n", (char *)eattr->var);
+}
+
+/*
+ * PMU event attributes
+ */
+ssize_t hisi_event_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+
+ return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
+}
+
+/*
+ * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
+ */
+ssize_t hisi_cpumask_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return sprintf(buf, "%d\n", hisi_pmu->on_cpu);
+}
+
+static bool hisi_validate_event_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ /* Include count for the event */
+ int counters = 1;
+
+ if (!is_software_event(leader)) {
+ /*
+ * We must NOT create groups containing mixed PMUs, although
+ * software events are acceptable
+ */
+ if (leader->pmu != event->pmu)
+ return false;
+
+ /* Increment counter for the leader */
+ if (leader != event)
+ counters++;
+ }
+
+ list_for_each_entry(sibling, &event->group_leader->sibling_list,
+ group_entry) {
+ if (is_software_event(sibling))
+ continue;
+ if (sibling->pmu != event->pmu)
+ return false;
+ /* Increment counter for each sibling */
+ counters++;
+ }
+
+ /* The group can not count events more than the counters in the HW */
+ return counters <= hisi_pmu->num_counters;
+}
+
+int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx)
+{
+ return idx >= 0 && idx < hisi_pmu->num_counters;
+}
+
+int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ unsigned long *used_mask = hisi_pmu->pmu_events.used_mask;
+ u32 num_counters = hisi_pmu->num_counters;
+ int idx;
+
+ idx = find_first_zero_bit(used_mask, num_counters);
+ if (idx == num_counters)
+ return -EAGAIN;
+
+ set_bit(idx, used_mask);
+
+ return idx;
+}
+
+static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
+{
+ if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) {
+ dev_err(hisi_pmu->dev, "Unsupported event index:%d!\n", idx);
+ return;
+ }
+
+ clear_bit(idx, hisi_pmu->pmu_events.used_mask);
+}
+
+int hisi_uncore_pmu_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hisi_pmu *hisi_pmu;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * We do not support sampling as the counters are all
+ * shared by all CPU cores in a CPU die(SCCL). Also we
+ * do not support attach to a task(per-process mode)
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ /* counters do not have these bits */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle)
+ return -EINVAL;
+
+ /*
+ * The uncore counters not specific to any CPU, so cannot
+ * support per-task
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /*
+ * Validate if the events in group does not exceed the
+ * available counters in hardware.
+ */
+ if (!hisi_validate_event_group(event))
+ return -EINVAL;
+
+ hisi_pmu = to_hisi_pmu(event->pmu);
+ if (event->attr.config > hisi_pmu->check_event)
+ return -EINVAL;
+
+ if (hisi_pmu->on_cpu == -1)
+ return -EINVAL;
+ /*
+ * We don't assign an index until we actually place the event onto
+ * hardware. Use -1 to signify that we haven't decided where to put it
+ * yet.
+ */
+ hwc->idx = -1;
+ hwc->config_base = event->attr.config;
+
+ /* Enforce to use the same CPU for all events in this PMU */
+ event->cpu = hisi_pmu->on_cpu;
+
+ return 0;
+}
+
+/*
+ * Set the counter to count the event that we're interested in,
+ * and enable interrupt and counter.
+ */
+static void hisi_uncore_pmu_enable_event(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx,
+ HISI_GET_EVENTID(event));
+
+ hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc);
+ hisi_pmu->ops->enable_counter(hisi_pmu, hwc);
+}
+
+/*
+ * Disable counter and interrupt.
+ */
+static void hisi_uncore_pmu_disable_event(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_pmu->ops->disable_counter(hisi_pmu, hwc);
+ hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc);
+}
+
+void hisi_uncore_pmu_set_event_period(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * The HiSilicon PMU counters support 32 bits or 48 bits, depending on
+ * the PMU. We reduce it to 2^(counter_bits - 1) to account for the
+ * extreme interrupt latency. So we could hopefully handle the overflow
+ * interrupt before another 2^(counter_bits - 1) events occur and the
+ * counter overtakes its previous value.
+ */
+ u64 val = BIT_ULL(hisi_pmu->counter_bits - 1);
+
+ local64_set(&hwc->prev_count, val);
+ /* Write start value to the hardware event counter */
+ hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
+}
+
+void hisi_uncore_pmu_event_update(struct perf_event *event)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+
+ do {
+ /* Read the count from the counter register */
+ new_raw_count = hisi_pmu->ops->read_counter(hisi_pmu, hwc);
+ prev_raw_count = local64_read(&hwc->prev_count);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count);
+ /*
+ * compute the delta
+ */
+ delta = (new_raw_count - prev_raw_count) &
+ HISI_MAX_PERIOD(hisi_pmu->counter_bits);
+ local64_add(delta, &event->count);
+}
+
+void hisi_uncore_pmu_start(struct perf_event *event, int flags)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+ hwc->state = 0;
+ hisi_uncore_pmu_set_event_period(event);
+
+ if (flags & PERF_EF_RELOAD) {
+ u64 prev_raw_count = local64_read(&hwc->prev_count);
+
+ hisi_pmu->ops->write_counter(hisi_pmu, hwc, prev_raw_count);
+ }
+
+ hisi_uncore_pmu_enable_event(event);
+ perf_event_update_userpage(event);
+}
+
+void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_uncore_pmu_disable_event(event);
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (hwc->state & PERF_HES_UPTODATE)
+ return;
+
+ /* Read hardware counter and update the perf counter statistics */
+ hisi_uncore_pmu_event_update(event);
+ hwc->state |= PERF_HES_UPTODATE;
+}
+
+int hisi_uncore_pmu_add(struct perf_event *event, int flags)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ /* Get an available counter index for counting */
+ idx = hisi_pmu->ops->get_event_idx(event);
+ if (idx < 0)
+ return idx;
+
+ event->hw.idx = idx;
+ hisi_pmu->pmu_events.hw_events[idx] = event;
+
+ if (flags & PERF_EF_START)
+ hisi_uncore_pmu_start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+void hisi_uncore_pmu_del(struct perf_event *event, int flags)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ hisi_uncore_pmu_stop(event, PERF_EF_UPDATE);
+ hisi_uncore_pmu_clear_event_idx(hisi_pmu, hwc->idx);
+ perf_event_update_userpage(event);
+ hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
+}
+
+void hisi_uncore_pmu_read(struct perf_event *event)
+{
+ /* Read hardware counter and update the perf counter statistics */
+ hisi_uncore_pmu_event_update(event);
+}
+
+void hisi_uncore_pmu_enable(struct pmu *pmu)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
+ int enabled = bitmap_weight(hisi_pmu->pmu_events.used_mask,
+ hisi_pmu->num_counters);
+
+ if (!enabled)
+ return;
+
+ hisi_pmu->ops->start_counters(hisi_pmu);
+}
+
+void hisi_uncore_pmu_disable(struct pmu *pmu)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
+
+ hisi_pmu->ops->stop_counters(hisi_pmu);
+}
+
+/*
+ * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
+ * If multi-threading is supported, SCCL_ID is in MPIDR[aff3] and CCL_ID
+ * is in MPIDR[aff2]; if not, SCCL_ID is in MPIDR[aff2] and CCL_ID is
+ * in MPIDR[aff1]. If this changes in future, this shall be updated.
+ */
+static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
+{
+ u64 mpidr = read_cpuid_mpidr();
+
+ if (mpidr & MPIDR_MT_BITMASK) {
+ if (sccl_id)
+ *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
+ if (ccl_id)
+ *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ } else {
+ if (sccl_id)
+ *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ if (ccl_id)
+ *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ }
+}
+
+/*
+ * Check whether the CPU is associated with this uncore PMU
+ */
+static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
+{
+ int sccl_id, ccl_id;
+
+ if (hisi_pmu->ccl_id == -1) {
+ /* If CCL_ID is -1, the PMU only shares the same SCCL */
+ hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
+
+ return sccl_id == hisi_pmu->sccl_id;
+ }
+
+ hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
+
+ return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
+}
+
+int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
+ node);
+
+ if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
+ return 0;
+
+ cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
+
+ /* If another CPU is already managing this PMU, simply return. */
+ if (hisi_pmu->on_cpu != -1)
+ return 0;
+
+ /* Use this CPU in cpumask for event counting */
+ hisi_pmu->on_cpu = cpu;
+
+ /* Overflow interrupt also should use the same CPU */
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
+
+ return 0;
+}
+
+int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
+ node);
+ cpumask_t pmu_online_cpus;
+ unsigned int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
+ return 0;
+
+ /* Nothing to do if this CPU doesn't own the PMU */
+ if (hisi_pmu->on_cpu != cpu)
+ return 0;
+
+ /* Give up ownership of the PMU */
+ hisi_pmu->on_cpu = -1;
+
+ /* Choose a new CPU to migrate ownership of the PMU to */
+ cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus,
+ cpu_online_mask);
+ target = cpumask_any_but(&pmu_online_cpus, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
+ /* Use this CPU for event counting */
+ hisi_pmu->on_cpu = target;
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
+
+ return 0;
+}
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
new file mode 100644
index 000000000000..f21226a0e9c6
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -0,0 +1,102 @@
+/*
+ * HiSilicon SoC Hardware event counters support
+ *
+ * Copyright (C) 2017 Hisilicon Limited
+ * Author: Anurup M <anurup.m@huawei.com>
+ * Shaokun Zhang <zhangshaokun@hisilicon.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __HISI_UNCORE_PMU_H__
+#define __HISI_UNCORE_PMU_H__
+
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "hisi_pmu: " fmt
+
+#define HISI_MAX_COUNTERS 0x10
+#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu))
+
+#define HISI_PMU_ATTR(_name, _func, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, 0444, _func, NULL), (void *)_config } \
+ })[0].attr.attr)
+
+#define HISI_PMU_FORMAT_ATTR(_name, _config) \
+ HISI_PMU_ATTR(_name, hisi_format_sysfs_show, (void *)_config)
+#define HISI_PMU_EVENT_ATTR(_name, _config) \
+ HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config)
+
+struct hisi_pmu;
+
+struct hisi_uncore_ops {
+ void (*write_evtype)(struct hisi_pmu *, int, u32);
+ int (*get_event_idx)(struct perf_event *);
+ u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*write_counter)(struct hisi_pmu *, struct hw_perf_event *, u64);
+ void (*enable_counter)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*disable_counter)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*enable_counter_int)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *);
+ void (*start_counters)(struct hisi_pmu *);
+ void (*stop_counters)(struct hisi_pmu *);
+};
+
+struct hisi_pmu_hwevents {
+ struct perf_event *hw_events[HISI_MAX_COUNTERS];
+ DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS);
+};
+
+/* Generic pmu struct for different pmu types */
+struct hisi_pmu {
+ struct pmu pmu;
+ const struct hisi_uncore_ops *ops;
+ struct hisi_pmu_hwevents pmu_events;
+ /* associated_cpus: All CPUs associated with the PMU */
+ cpumask_t associated_cpus;
+ /* CPU used for counting */
+ int on_cpu;
+ int irq;
+ struct device *dev;
+ struct hlist_node node;
+ int sccl_id;
+ int ccl_id;
+ void __iomem *base;
+ /* the ID of the PMU modules */
+ u32 index_id;
+ int num_counters;
+ int counter_bits;
+ /* check event code range */
+ int check_event;
+};
+
+int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx);
+int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
+void hisi_uncore_pmu_read(struct perf_event *event);
+int hisi_uncore_pmu_add(struct perf_event *event, int flags);
+void hisi_uncore_pmu_del(struct perf_event *event, int flags);
+void hisi_uncore_pmu_start(struct perf_event *event, int flags);
+void hisi_uncore_pmu_stop(struct perf_event *event, int flags);
+void hisi_uncore_pmu_set_event_period(struct perf_event *event);
+void hisi_uncore_pmu_event_update(struct perf_event *event);
+int hisi_uncore_pmu_event_init(struct perf_event *event);
+void hisi_uncore_pmu_enable(struct pmu *pmu);
+void hisi_uncore_pmu_disable(struct pmu *pmu);
+ssize_t hisi_event_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+ssize_t hisi_format_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+ssize_t hisi_cpumask_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node);
+int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
+#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index b242cce10468..4fdc8486a8e4 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -92,6 +92,21 @@
#define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
+/*
+ * Events
+ */
+#define L2_EVENT_CYCLES 0xfe
+#define L2_EVENT_DCACHE_OPS 0x400
+#define L2_EVENT_ICACHE_OPS 0x401
+#define L2_EVENT_TLBI 0x402
+#define L2_EVENT_BARRIERS 0x403
+#define L2_EVENT_TOTAL_READS 0x405
+#define L2_EVENT_TOTAL_WRITES 0x406
+#define L2_EVENT_TOTAL_REQUESTS 0x407
+#define L2_EVENT_LDREX 0x420
+#define L2_EVENT_STREX 0x421
+#define L2_EVENT_CLREX 0x422
+
static DEFINE_RAW_SPINLOCK(l2_access_lock);
/**
@@ -700,9 +715,12 @@ static struct attribute_group l2_cache_pmu_cpumask_group = {
/* CCG format for perf RAW codes. */
PMU_FORMAT_ATTR(l2_code, "config:4-11");
PMU_FORMAT_ATTR(l2_group, "config:0-3");
+PMU_FORMAT_ATTR(event, "config:0-11");
+
static struct attribute *l2_cache_pmu_formats[] = {
&format_attr_l2_code.attr,
&format_attr_l2_group.attr,
+ &format_attr_event.attr,
NULL,
};
@@ -711,9 +729,45 @@ static struct attribute_group l2_cache_pmu_format_group = {
.attrs = l2_cache_pmu_formats,
};
+static ssize_t l2cache_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define L2CACHE_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *l2_cache_pmu_events[] = {
+ L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
+ L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS),
+ L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS),
+ L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI),
+ L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS),
+ L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS),
+ L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES),
+ L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS),
+ L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX),
+ L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX),
+ L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX),
+ NULL
+};
+
+static struct attribute_group l2_cache_pmu_events_group = {
+ .name = "events",
+ .attrs = l2_cache_pmu_events,
+};
+
static const struct attribute_group *l2_cache_pmu_attr_grps[] = {
&l2_cache_pmu_format_group,
&l2_cache_pmu_cpumask_group,
+ &l2_cache_pmu_events_group,
NULL,
};
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 3a52dcb09566..84e3bd9c5665 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the phy drivers.
#
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 1161e11fb3cf..aa857be692cf 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -24,7 +24,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -926,6 +926,7 @@ static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.enable_pmu_unk1 = true,
+ .phy0_dual_route = true,
};
static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 64fc59c3ae6d..97d27b0d5cc7 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -67,3 +67,16 @@ config PHY_BRCM_SATA
help
Enable this to support the Broadcom SATA PHY.
If unsure, say N.
+
+config PHY_BRCM_USB
+ tristate "Broadcom STB USB PHY driver"
+ depends on ARCH_BRCMSTB
+ depends on OF
+ select GENERIC_PHY
+ select SOC_BRCMSTB
+ default ARCH_BRCMSTB
+ help
+ Enable this to support the Broadcom STB USB PHY.
+ This driver is required by the USB XHCI, EHCI and OHCI
+ drivers.
+ If unsure, say N.
diff --git a/drivers/phy/broadcom/Makefile b/drivers/phy/broadcom/Makefile
index 4eb82ec8d491..13e000c1a43a 100644
--- a/drivers/phy/broadcom/Makefile
+++ b/drivers/phy/broadcom/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
obj-$(CONFIG_BCM_KONA_USB2_PHY) += phy-bcm-kona-usb2.o
obj-$(CONFIG_PHY_BCM_NS_USB2) += phy-bcm-ns-usb2.o
@@ -5,3 +6,6 @@ obj-$(CONFIG_PHY_BCM_NS_USB3) += phy-bcm-ns-usb3.o
obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o
obj-$(CONFIG_PHY_NS2_USB_DRD) += phy-bcm-ns2-usbdrd.o
obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
+obj-$(CONFIG_PHY_BRCM_USB) += phy-brcm-usb-dvr.o
+
+phy-brcm-usb-dvr-objs := phy-brcm-usb.o phy-brcm-usb-init.o
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
index d099a0c8cee5..7ceea5ae2704 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c
@@ -12,7 +12,7 @@
*/
#include <linux/delay.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 9d7f74fe3d7c..3f953db70288 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -49,11 +49,29 @@ enum brcm_sata_phy_version {
BRCM_SATA_PHY_IPROC_SR,
};
+enum brcm_sata_phy_rxaeq_mode {
+ RXAEQ_MODE_OFF = 0,
+ RXAEQ_MODE_AUTO,
+ RXAEQ_MODE_MANUAL,
+};
+
+static enum brcm_sata_phy_rxaeq_mode rxaeq_to_val(const char *m)
+{
+ if (!strcmp(m, "auto"))
+ return RXAEQ_MODE_AUTO;
+ else if (!strcmp(m, "manual"))
+ return RXAEQ_MODE_MANUAL;
+ else
+ return RXAEQ_MODE_OFF;
+}
+
struct brcm_sata_port {
int portnum;
struct phy *phy;
struct brcm_sata_phy *phy_priv;
bool ssc_en;
+ enum brcm_sata_phy_rxaeq_mode rxaeq_mode;
+ u32 rxaeq_val;
};
struct brcm_sata_phy {
@@ -93,6 +111,15 @@ enum sata_phy_regs {
TX_ACTRL0 = 0x80,
TX_ACTRL0_TXPOL_FLIP = BIT(6),
+ AEQRX_REG_BANK_0 = 0xd0,
+ AEQ_CONTROL1 = 0x81,
+ AEQ_CONTROL1_ENABLE = BIT(2),
+ AEQ_CONTROL1_FREEZE = BIT(3),
+ AEQ_FRC_EQ = 0x83,
+ AEQ_FRC_EQ_FORCE = BIT(0),
+ AEQ_FRC_EQ_FORCE_VAL = BIT(1),
+ AEQRX_REG_BANK_1 = 0xe0,
+
OOB_REG_BANK = 0x150,
OOB1_REG_BANK = 0x160,
OOB_CTRL1 = 0x80,
@@ -190,7 +217,7 @@ static u32 brcm_sata_phy_rd(void __iomem *pcb_base, u32 bank, u32 ofs)
#define STB_FMAX_VAL_DEFAULT 0x3df
#define STB_FMAX_VAL_SSC 0x83
-static int brcm_stb_sata_init(struct brcm_sata_port *port)
+static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port)
{
void __iomem *base = brcm_sata_pcb_base(port);
struct brcm_sata_phy *priv = port->phy_priv;
@@ -215,10 +242,47 @@ static int brcm_stb_sata_init(struct brcm_sata_port *port)
brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3,
~TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK, tmp);
+}
+
+#define AEQ_FRC_EQ_VAL_SHIFT 2
+#define AEQ_FRC_EQ_VAL_MASK 0x3f
+
+static int brcm_stb_sata_rxaeq_init(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ u32 tmp = 0, reg = 0;
+
+ switch (port->rxaeq_mode) {
+ case RXAEQ_MODE_OFF:
+ return 0;
+
+ case RXAEQ_MODE_AUTO:
+ reg = AEQ_CONTROL1;
+ tmp = AEQ_CONTROL1_ENABLE | AEQ_CONTROL1_FREEZE;
+ break;
+
+ case RXAEQ_MODE_MANUAL:
+ reg = AEQ_FRC_EQ;
+ tmp = AEQ_FRC_EQ_FORCE | AEQ_FRC_EQ_FORCE_VAL;
+ if (port->rxaeq_val > AEQ_FRC_EQ_VAL_MASK)
+ return -EINVAL;
+ tmp |= port->rxaeq_val << AEQ_FRC_EQ_VAL_SHIFT;
+ break;
+ }
+
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, reg, ~tmp, tmp);
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, reg, ~tmp, tmp);
return 0;
}
+static int brcm_stb_sata_init(struct brcm_sata_port *port)
+{
+ brcm_stb_sata_ssc_init(port);
+
+ return brcm_stb_sata_rxaeq_init(port);
+}
+
/* NS2 SATA PLL1 defaults were characterized by H/W group */
#define NS2_PLL1_ACTRL2_MAGIC 0x1df8
#define NS2_PLL1_ACTRL3_MAGIC 0x2b00
@@ -463,6 +527,7 @@ MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
static int brcm_sata_phy_probe(struct platform_device *pdev)
{
+ const char *rxaeq_mode;
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node, *child;
const struct of_device_id *of_id;
@@ -525,6 +590,13 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
port->portnum = id;
port->phy_priv = priv;
port->phy = devm_phy_create(dev, child, &phy_ops);
+ port->rxaeq_mode = RXAEQ_MODE_OFF;
+ if (!of_property_read_string(child, "brcm,rxaeq-mode",
+ &rxaeq_mode))
+ port->rxaeq_mode = rxaeq_to_val(rxaeq_mode);
+ if (port->rxaeq_mode == RXAEQ_MODE_MANUAL)
+ of_property_read_u32(child, "brcm,rxaeq-value",
+ &port->rxaeq_val);
port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
if (IS_ERR(port->phy)) {
dev_err(dev, "failed to create PHY\n");
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
new file mode 100644
index 000000000000..1e7ce0b6f299
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -0,0 +1,1017 @@
+/*
+ * phy-brcm-usb-init.c - Broadcom USB Phy chip specific init functions
+ *
+ * Copyright (C) 2014-2017 Broadcom
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This module contains USB PHY initialization for power up and S3 resume
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/soc/brcmstb/brcmstb.h>
+#include "phy-brcm-usb-init.h"
+
+#define PHY_PORTS 2
+#define PHY_PORT_SELECT_0 0
+#define PHY_PORT_SELECT_1 0x1000
+
+/* Register definitions for the USB CTRL block */
+#define USB_CTRL_SETUP 0x00
+#define USB_CTRL_SETUP_IOC_MASK 0x00000010
+#define USB_CTRL_SETUP_IPP_MASK 0x00000020
+#define USB_CTRL_SETUP_BABO_MASK 0x00000001
+#define USB_CTRL_SETUP_FNHW_MASK 0x00000002
+#define USB_CTRL_SETUP_FNBO_MASK 0x00000004
+#define USB_CTRL_SETUP_WABO_MASK 0x00000008
+#define USB_CTRL_SETUP_SCB_CLIENT_SWAP_MASK 0x00002000 /* option */
+#define USB_CTRL_SETUP_SCB1_EN_MASK 0x00004000 /* option */
+#define USB_CTRL_SETUP_SCB2_EN_MASK 0x00008000 /* option */
+#define USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK 0X00020000 /* option */
+#define USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK 0x00010000 /* option */
+#define USB_CTRL_SETUP_STRAP_IPP_SEL_MASK 0x02000000 /* option */
+#define USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK 0x04000000 /* option */
+#define USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK 0x08000000 /* opt */
+#define USB_CTRL_SETUP_OC3_DISABLE_MASK 0xc0000000 /* option */
+#define USB_CTRL_PLL_CTL 0x04
+#define USB_CTRL_PLL_CTL_PLL_SUSPEND_EN_MASK 0x08000000
+#define USB_CTRL_PLL_CTL_PLL_RESETB_MASK 0x40000000
+#define USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK 0x80000000 /* option */
+#define USB_CTRL_EBRIDGE 0x0c
+#define USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK 0x00020000 /* option */
+#define USB_CTRL_MDIO 0x14
+#define USB_CTRL_MDIO2 0x18
+#define USB_CTRL_UTMI_CTL_1 0x2c
+#define USB_CTRL_UTMI_CTL_1_POWER_UP_FSM_EN_MASK 0x00000800
+#define USB_CTRL_UTMI_CTL_1_POWER_UP_FSM_EN_P1_MASK 0x08000000
+#define USB_CTRL_USB_PM 0x34
+#define USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK 0x00800000 /* option */
+#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK 0x00400000 /* option */
+#define USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK 0x40000000 /* option */
+#define USB_CTRL_USB_PM_USB_PWRDN_MASK 0x80000000 /* option */
+#define USB_CTRL_USB_PM_SOFT_RESET_MASK 0x40000000 /* option */
+#define USB_CTRL_USB_PM_USB20_HC_RESETB_MASK 0x30000000 /* option */
+#define USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK 0x00300000 /* option */
+#define USB_CTRL_USB30_CTL1 0x60
+#define USB_CTRL_USB30_CTL1_PHY3_PLL_SEQ_START_MASK 0x00000010
+#define USB_CTRL_USB30_CTL1_PHY3_RESETB_MASK 0x00010000
+#define USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK 0x00020000 /* option */
+#define USB_CTRL_USB30_CTL1_USB3_IOC_MASK 0x10000000 /* option */
+#define USB_CTRL_USB30_CTL1_USB3_IPP_MASK 0x20000000 /* option */
+#define USB_CTRL_USB30_PCTL 0x70
+#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_MASK 0x00000002
+#define USB_CTRL_USB30_PCTL_PHY3_SOFT_RESETB_P1_MASK 0x00020000
+#define USB_CTRL_USB_DEVICE_CTL1 0x90
+#define USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK 0x00000003 /* option */
+
+/* Register definitions for the XHCI EC block */
+#define USB_XHCI_EC_IRAADR 0x658
+#define USB_XHCI_EC_IRADAT 0x65c
+
+enum brcm_family_type {
+ BRCM_FAMILY_3390A0,
+ BRCM_FAMILY_7250B0,
+ BRCM_FAMILY_7271A0,
+ BRCM_FAMILY_7364A0,
+ BRCM_FAMILY_7366C0,
+ BRCM_FAMILY_74371A0,
+ BRCM_FAMILY_7439B0,
+ BRCM_FAMILY_7445D0,
+ BRCM_FAMILY_7260A0,
+ BRCM_FAMILY_7278A0,
+ BRCM_FAMILY_COUNT,
+};
+
+#define USB_BRCM_FAMILY(chip) \
+ [BRCM_FAMILY_##chip] = __stringify(chip)
+
+static const char *family_names[BRCM_FAMILY_COUNT] = {
+ USB_BRCM_FAMILY(3390A0),
+ USB_BRCM_FAMILY(7250B0),
+ USB_BRCM_FAMILY(7271A0),
+ USB_BRCM_FAMILY(7364A0),
+ USB_BRCM_FAMILY(7366C0),
+ USB_BRCM_FAMILY(74371A0),
+ USB_BRCM_FAMILY(7439B0),
+ USB_BRCM_FAMILY(7445D0),
+ USB_BRCM_FAMILY(7260A0),
+ USB_BRCM_FAMILY(7278A0),
+};
+
+enum {
+ USB_CTRL_SETUP_SCB1_EN_SELECTOR,
+ USB_CTRL_SETUP_SCB2_EN_SELECTOR,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_SELECTOR,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_SELECTOR,
+ USB_CTRL_SETUP_OC3_DISABLE_SELECTOR,
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_SELECTOR,
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_SELECTOR,
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_SELECTOR,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_SELECTOR,
+ USB_CTRL_USB_PM_USB_PWRDN_SELECTOR,
+ USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_SELECTOR,
+ USB_CTRL_USB30_CTL1_USB3_IOC_SELECTOR,
+ USB_CTRL_USB30_CTL1_USB3_IPP_SELECTOR,
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_SELECTOR,
+ USB_CTRL_USB_PM_SOFT_RESET_SELECTOR,
+ USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_SELECTOR,
+ USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_SELECTOR,
+ USB_CTRL_USB_PM_USB20_HC_RESETB_SELECTOR,
+ USB_CTRL_SETUP_ENDIAN_SELECTOR,
+ USB_CTRL_SELECTOR_COUNT,
+};
+
+#define USB_CTRL_REG(base, reg) ((void *)base + USB_CTRL_##reg)
+#define USB_XHCI_EC_REG(base, reg) ((void *)base + USB_XHCI_EC_##reg)
+#define USB_CTRL_MASK(reg, field) \
+ USB_CTRL_##reg##_##field##_MASK
+#define USB_CTRL_MASK_FAMILY(params, reg, field) \
+ (params->usb_reg_bits_map[USB_CTRL_##reg##_##field##_SELECTOR])
+
+#define USB_CTRL_SET_FAMILY(params, reg, field) \
+ usb_ctrl_set_family(params, USB_CTRL_##reg, \
+ USB_CTRL_##reg##_##field##_SELECTOR)
+#define USB_CTRL_UNSET_FAMILY(params, reg, field) \
+ usb_ctrl_unset_family(params, USB_CTRL_##reg, \
+ USB_CTRL_##reg##_##field##_SELECTOR)
+
+#define USB_CTRL_SET(base, reg, field) \
+ usb_ctrl_set(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+#define USB_CTRL_UNSET(base, reg, field) \
+ usb_ctrl_unset(USB_CTRL_REG(base, reg), \
+ USB_CTRL_##reg##_##field##_MASK)
+
+#define MDIO_USB2 0
+#define MDIO_USB3 BIT(31)
+
+#define USB_CTRL_SETUP_ENDIAN_BITS ( \
+ USB_CTRL_MASK(SETUP, BABO) | \
+ USB_CTRL_MASK(SETUP, FNHW) | \
+ USB_CTRL_MASK(SETUP, FNBO) | \
+ USB_CTRL_MASK(SETUP, WABO))
+
+#ifdef __LITTLE_ENDIAN
+#define ENDIAN_SETTINGS ( \
+ USB_CTRL_MASK(SETUP, BABO) | \
+ USB_CTRL_MASK(SETUP, FNHW))
+#else
+#define ENDIAN_SETTINGS ( \
+ USB_CTRL_MASK(SETUP, FNHW) | \
+ USB_CTRL_MASK(SETUP, FNBO) | \
+ USB_CTRL_MASK(SETUP, WABO))
+#endif
+
+struct id_to_type {
+ u32 id;
+ int type;
+};
+
+static const struct id_to_type id_to_type_table[] = {
+ { 0x33900000, BRCM_FAMILY_3390A0 },
+ { 0x72500010, BRCM_FAMILY_7250B0 },
+ { 0x72600000, BRCM_FAMILY_7260A0 },
+ { 0x72680000, BRCM_FAMILY_7271A0 },
+ { 0x72710000, BRCM_FAMILY_7271A0 },
+ { 0x73640000, BRCM_FAMILY_7364A0 },
+ { 0x73660020, BRCM_FAMILY_7366C0 },
+ { 0x07437100, BRCM_FAMILY_74371A0 },
+ { 0x74390010, BRCM_FAMILY_7439B0 },
+ { 0x74450030, BRCM_FAMILY_7445D0 },
+ { 0x72780000, BRCM_FAMILY_7278A0 },
+ { 0, BRCM_FAMILY_7271A0 }, /* default */
+};
+
+static const u32
+usb_reg_bits_map_table[BRCM_FAMILY_COUNT][USB_CTRL_SELECTOR_COUNT] = {
+ /* 3390B0 */
+ [BRCM_FAMILY_3390A0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7250b0 */
+ [BRCM_FAMILY_7250B0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
+ 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7271a0 */
+ [BRCM_FAMILY_7271A0] = {
+ 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */
+ 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ USB_CTRL_USB_PM_SOFT_RESET_MASK,
+ USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK,
+ USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK,
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7364a0 */
+ [BRCM_FAMILY_7364A0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
+ 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7366c0 */
+ [BRCM_FAMILY_7366C0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_VAR_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 74371A0 */
+ [BRCM_FAMILY_74371A0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ 0, /* USB_CTRL_SETUP_OC3_DISABLE_MASK */
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
+ 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
+ USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB30_CTL1_USB3_IOC_MASK,
+ USB_CTRL_USB30_CTL1_USB3_IPP_MASK,
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_MASK */
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7439B0 */
+ [BRCM_FAMILY_7439B0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7445d0 */
+ [BRCM_FAMILY_7445D0] = {
+ USB_CTRL_SETUP_SCB1_EN_MASK,
+ USB_CTRL_SETUP_SCB2_EN_MASK,
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_VAR_MASK,
+ 0, /* USB_CTRL_SETUP_STRAP_IPP_SEL_MASK */
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK,
+ 0, /* USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK */
+ 0, /* USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB_PM_USB_PWRDN_MASK */
+ USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ 0, /* USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK */
+ 0, /* USB_CTRL_USB_PM_SOFT_RESET_MASK */
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7260a0 */
+ [BRCM_FAMILY_7260A0] = {
+ 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */
+ 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
+ USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK,
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ USB_CTRL_USB_PM_SOFT_RESET_MASK,
+ USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK,
+ USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK,
+ USB_CTRL_USB_PM_USB20_HC_RESETB_VAR_MASK,
+ ENDIAN_SETTINGS, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+ /* 7278a0 */
+ [BRCM_FAMILY_7278A0] = {
+ 0, /* USB_CTRL_SETUP_SCB1_EN_MASK */
+ 0, /* USB_CTRL_SETUP_SCB2_EN_MASK */
+ 0, /*USB_CTRL_SETUP_SS_EHCI64BIT_EN_MASK */
+ USB_CTRL_SETUP_STRAP_IPP_SEL_MASK,
+ USB_CTRL_SETUP_OC3_DISABLE_MASK,
+ 0, /* USB_CTRL_PLL_CTL_PLL_IDDQ_PWRDN_MASK */
+ USB_CTRL_EBRIDGE_ESTOP_SCB_REQ_MASK,
+ USB_CTRL_USB_PM_BDC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_XHC_SOFT_RESETB_MASK,
+ USB_CTRL_USB_PM_USB_PWRDN_MASK,
+ 0, /* USB_CTRL_USB30_CTL1_XHC_SOFT_RESETB_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IOC_MASK */
+ 0, /* USB_CTRL_USB30_CTL1_USB3_IPP_MASK */
+ USB_CTRL_USB_DEVICE_CTL1_PORT_MODE_MASK,
+ USB_CTRL_USB_PM_SOFT_RESET_MASK,
+ 0, /* USB_CTRL_SETUP_CC_DRD_MODE_ENABLE_MASK */
+ 0, /* USB_CTRL_SETUP_STRAP_CC_DRD_MODE_ENABLE_SEL_MASK */
+ 0, /* USB_CTRL_USB_PM_USB20_HC_RESETB_MASK */
+ 0, /* USB_CTRL_SETUP ENDIAN bits */
+ },
+};
+
+static inline u32 brcmusb_readl(void __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline void brcmusb_writel(u32 val, void __iomem *addr)
+{
+ writel(val, addr);
+}
+
+static inline
+void usb_ctrl_unset_family(struct brcm_usb_init_params *params,
+ u32 reg_offset, u32 field)
+{
+ u32 mask;
+ void *reg;
+
+ mask = params->usb_reg_bits_map[field];
+ reg = params->ctrl_regs + reg_offset;
+ brcmusb_writel(brcmusb_readl(reg) & ~mask, reg);
+};
+
+static inline
+void usb_ctrl_set_family(struct brcm_usb_init_params *params,
+ u32 reg_offset, u32 field)
+{
+ u32 mask;
+ void *reg;
+
+ mask = params->usb_reg_bits_map[field];
+ reg = params->ctrl_regs + reg_offset;
+ brcmusb_writel(brcmusb_readl(reg) | mask, reg);
+};
+
+static inline void usb_ctrl_set(void __iomem *reg, u32 field)
+{
+ u32 value;
+
+ value = brcmusb_readl(reg);
+ brcmusb_writel(value | field, reg);
+}
+
+static inline void usb_ctrl_unset(void __iomem *reg, u32 field)
+{
+ u32 value;
+
+ value = brcmusb_readl(reg);
+ brcmusb_writel(value & ~field, reg);
+}
+
+static u32 brcmusb_usb_mdio_read(void __iomem *ctrl_base, u32 reg, int mode)
+{
+ u32 data;
+
+ data = (reg << 16) | mode;
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ data |= (1 << 24);
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ data &= ~(1 << 24);
+ /* wait for the 60MHz parallel to serial shifter */
+ usleep_range(10, 20);
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ /* wait for the 60MHz parallel to serial shifter */
+ usleep_range(10, 20);
+
+ return brcmusb_readl(USB_CTRL_REG(ctrl_base, MDIO2)) & 0xffff;
+}
+
+static void brcmusb_usb_mdio_write(void __iomem *ctrl_base, u32 reg,
+ u32 val, int mode)
+{
+ u32 data;
+
+ data = (reg << 16) | val | mode;
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ data |= (1 << 25);
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ data &= ~(1 << 25);
+
+ /* wait for the 60MHz parallel to serial shifter */
+ usleep_range(10, 20);
+ brcmusb_writel(data, USB_CTRL_REG(ctrl_base, MDIO));
+ /* wait for the 60MHz parallel to serial shifter */
+ usleep_range(10, 20);
+}
+
+static void brcmusb_usb_phy_ldo_fix(void __iomem *ctrl_base)
+{
+ /* first disable FSM but also leave it that way */
+ /* to allow normal suspend/resume */
+ USB_CTRL_UNSET(ctrl_base, UTMI_CTL_1, POWER_UP_FSM_EN);
+ USB_CTRL_UNSET(ctrl_base, UTMI_CTL_1, POWER_UP_FSM_EN_P1);
+
+ /* reset USB 2.0 PLL */
+ USB_CTRL_UNSET(ctrl_base, PLL_CTL, PLL_RESETB);
+ /* PLL reset period */
+ udelay(1);
+ USB_CTRL_SET(ctrl_base, PLL_CTL, PLL_RESETB);
+ /* Give PLL enough time to lock */
+ usleep_range(1000, 2000);
+}
+
+static void brcmusb_usb2_eye_fix(void __iomem *ctrl_base)
+{
+ /* Increase USB 2.0 TX level to meet spec requirement */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x80a0, MDIO_USB2);
+ brcmusb_usb_mdio_write(ctrl_base, 0x0a, 0xc6a0, MDIO_USB2);
+}
+
+static void brcmusb_usb3_pll_fix(void __iomem *ctrl_base)
+{
+ /* Set correct window for PLL lock detect */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x8000, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x07, 0x1503, MDIO_USB3);
+}
+
+static void brcmusb_usb3_enable_pipe_reset(void __iomem *ctrl_base)
+{
+ u32 val;
+
+ /* Re-enable USB 3.0 pipe reset */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x8000, MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x0f, MDIO_USB3) | 0x200;
+ brcmusb_usb_mdio_write(ctrl_base, 0x0f, val, MDIO_USB3);
+}
+
+static void brcmusb_usb3_enable_sigdet(void __iomem *ctrl_base)
+{
+ u32 val, ofs;
+ int ii;
+
+ ofs = 0;
+ for (ii = 0; ii < PHY_PORTS; ++ii) {
+ /* Set correct default for sigdet */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x8080 + ofs),
+ MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x05, MDIO_USB3);
+ val = (val & ~0x800f) | 0x800d;
+ brcmusb_usb_mdio_write(ctrl_base, 0x05, val, MDIO_USB3);
+ ofs = PHY_PORT_SELECT_1;
+ }
+}
+
+static void brcmusb_usb3_enable_skip_align(void __iomem *ctrl_base)
+{
+ u32 val, ofs;
+ int ii;
+
+ ofs = 0;
+ for (ii = 0; ii < PHY_PORTS; ++ii) {
+ /* Set correct default for SKIP align */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x8060 + ofs),
+ MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x01, MDIO_USB3) | 0x200;
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, val, MDIO_USB3);
+ ofs = PHY_PORT_SELECT_1;
+ }
+}
+
+static void brcmusb_usb3_unfreeze_aeq(void __iomem *ctrl_base)
+{
+ u32 val, ofs;
+ int ii;
+
+ ofs = 0;
+ for (ii = 0; ii < PHY_PORTS; ++ii) {
+ /* Let EQ freeze after TSEQ */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x80e0 + ofs),
+ MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x01, MDIO_USB3);
+ val &= ~0x0008;
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, val, MDIO_USB3);
+ ofs = PHY_PORT_SELECT_1;
+ }
+}
+
+static void brcmusb_usb3_pll_54mhz(struct brcm_usb_init_params *params)
+{
+ u32 ofs;
+ int ii;
+ void __iomem *ctrl_base = params->ctrl_regs;
+
+ /*
+ * On newer B53 based SoC's, the reference clock for the
+ * 3.0 PLL has been changed from 50MHz to 54MHz so the
+ * PLL needs to be reprogrammed.
+ * See SWLINUX-4006.
+ *
+ * On the 7364C0, the reference clock for the
+ * 3.0 PLL has been changed from 50MHz to 54MHz to
+ * work around a MOCA issue.
+ * See SWLINUX-4169.
+ */
+ switch (params->selected_family) {
+ case BRCM_FAMILY_3390A0:
+ case BRCM_FAMILY_7250B0:
+ case BRCM_FAMILY_7366C0:
+ case BRCM_FAMILY_74371A0:
+ case BRCM_FAMILY_7439B0:
+ case BRCM_FAMILY_7445D0:
+ case BRCM_FAMILY_7260A0:
+ return;
+ case BRCM_FAMILY_7364A0:
+ if (BRCM_REV(params->family_id) < 0x20)
+ return;
+ break;
+ }
+
+ /* set USB 3.0 PLL to accept 54Mhz reference clock */
+ USB_CTRL_UNSET(ctrl_base, USB30_CTL1, PHY3_PLL_SEQ_START);
+
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x8000, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x10, 0x5784, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x11, 0x01d0, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x12, 0x1DE8, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x13, 0xAA80, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x14, 0x8826, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x15, 0x0044, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x16, 0x8000, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x17, 0x0851, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x18, 0x0000, MDIO_USB3);
+
+ /* both ports */
+ ofs = 0;
+ for (ii = 0; ii < PHY_PORTS; ++ii) {
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x8040 + ofs),
+ MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x03, 0x0090, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x04, 0x0134, MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, (0x8020 + ofs),
+ MDIO_USB3);
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, 0x00e2, MDIO_USB3);
+ ofs = PHY_PORT_SELECT_1;
+ }
+
+ /* restart PLL sequence */
+ USB_CTRL_SET(ctrl_base, USB30_CTL1, PHY3_PLL_SEQ_START);
+ /* Give PLL enough time to lock */
+ usleep_range(1000, 2000);
+}
+
+static void brcmusb_usb3_ssc_enable(void __iomem *ctrl_base)
+{
+ u32 val;
+
+ /* Enable USB 3.0 TX spread spectrum */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x8040, MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x01, MDIO_USB3) | 0xf;
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, val, MDIO_USB3);
+
+ /* Currently, USB 3.0 SSC is enabled via port 0 MDIO registers,
+ * which should have been adequate. However, due to a bug in the
+ * USB 3.0 PHY, it must be enabled via both ports (HWUSB3DVT-26).
+ */
+ brcmusb_usb_mdio_write(ctrl_base, 0x1f, 0x9040, MDIO_USB3);
+ val = brcmusb_usb_mdio_read(ctrl_base, 0x01, MDIO_USB3) | 0xf;
+ brcmusb_usb_mdio_write(ctrl_base, 0x01, val, MDIO_USB3);
+}
+
+static void brcmusb_usb3_phy_workarounds(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl_base = params->ctrl_regs;
+
+ brcmusb_usb3_pll_fix(ctrl_base);
+ brcmusb_usb3_pll_54mhz(params);
+ brcmusb_usb3_ssc_enable(ctrl_base);
+ brcmusb_usb3_enable_pipe_reset(ctrl_base);
+ brcmusb_usb3_enable_sigdet(ctrl_base);
+ brcmusb_usb3_enable_skip_align(ctrl_base);
+ brcmusb_usb3_unfreeze_aeq(ctrl_base);
+}
+
+static void brcmusb_memc_fix(struct brcm_usb_init_params *params)
+{
+ u32 prid;
+
+ if (params->selected_family != BRCM_FAMILY_7445D0)
+ return;
+ /*
+ * This is a workaround for HW7445-1869 where a DMA write ends up
+ * doing a read pre-fetch after the end of the DMA buffer. This
+ * causes a problem when the DMA buffer is at the end of physical
+ * memory, causing the pre-fetch read to access non-existent memory,
+ * and the chip bondout has MEMC2 disabled. When the pre-fetch read
+ * tries to use the disabled MEMC2, it hangs the bus. The workaround
+ * is to disable MEMC2 access in the usb controller which avoids
+ * the hang.
+ */
+
+ prid = params->product_id & 0xfffff000;
+ switch (prid) {
+ case 0x72520000:
+ case 0x74480000:
+ case 0x74490000:
+ case 0x07252000:
+ case 0x07448000:
+ case 0x07449000:
+ USB_CTRL_UNSET_FAMILY(params, SETUP, SCB2_EN);
+ }
+}
+
+static void brcmusb_usb3_otp_fix(struct brcm_usb_init_params *params)
+{
+ void __iomem *xhci_ec_base = params->xhci_ec_regs;
+ u32 val;
+
+ if (params->family_id != 0x74371000 || xhci_ec_base == 0)
+ return;
+ brcmusb_writel(0xa20c, USB_XHCI_EC_REG(xhci_ec_base, IRAADR));
+ val = brcmusb_readl(USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+
+ /* set cfg_pick_ss_lock */
+ val |= (1 << 27);
+ brcmusb_writel(val, USB_XHCI_EC_REG(xhci_ec_base, IRADAT));
+
+ /* Reset USB 3.0 PHY for workaround to take effect */
+ USB_CTRL_UNSET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
+ USB_CTRL_SET(params->ctrl_regs, USB30_CTL1, PHY3_RESETB);
+}
+
+static void brcmusb_xhci_soft_reset(struct brcm_usb_init_params *params,
+ int on_off)
+{
+ /* Assert reset */
+ if (on_off) {
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, XHC_SOFT_RESETB))
+ USB_CTRL_UNSET_FAMILY(params, USB_PM, XHC_SOFT_RESETB);
+ else
+ USB_CTRL_UNSET_FAMILY(params,
+ USB30_CTL1, XHC_SOFT_RESETB);
+ } else { /* De-assert reset */
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, XHC_SOFT_RESETB))
+ USB_CTRL_SET_FAMILY(params, USB_PM, XHC_SOFT_RESETB);
+ else
+ USB_CTRL_SET_FAMILY(params, USB30_CTL1,
+ XHC_SOFT_RESETB);
+ }
+}
+
+/*
+ * Return the best map table family. The order is:
+ * - exact match of chip and major rev
+ * - exact match of chip and closest older major rev
+ * - default chip/rev.
+ * NOTE: The minor rev is always ignored.
+ */
+static enum brcm_family_type brcmusb_get_family_type(
+ struct brcm_usb_init_params *params)
+{
+ int last_type = -1;
+ u32 last_family = 0;
+ u32 family_no_major;
+ unsigned int x;
+ u32 family;
+
+ family = params->family_id & 0xfffffff0;
+ family_no_major = params->family_id & 0xffffff00;
+ for (x = 0; id_to_type_table[x].id; x++) {
+ if (family == id_to_type_table[x].id)
+ return id_to_type_table[x].type;
+ if (family_no_major == (id_to_type_table[x].id & 0xffffff00))
+ if (family > id_to_type_table[x].id &&
+ last_family < id_to_type_table[x].id) {
+ last_family = id_to_type_table[x].id;
+ last_type = id_to_type_table[x].type;
+ }
+ }
+
+ /* If no match, return the default family */
+ if (last_type == -1)
+ return id_to_type_table[x].type;
+ return last_type;
+}
+
+void brcm_usb_init_ipp(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->ctrl_regs;
+ u32 reg;
+ u32 orig_reg;
+
+ /* Starting with the 7445d0, there are no longer separate 3.0
+ * versions of IOC and IPP.
+ */
+ if (USB_CTRL_MASK_FAMILY(params, USB30_CTL1, USB3_IOC)) {
+ if (params->ioc)
+ USB_CTRL_SET_FAMILY(params, USB30_CTL1, USB3_IOC);
+ if (params->ipp == 1)
+ USB_CTRL_SET_FAMILY(params, USB30_CTL1, USB3_IPP);
+ }
+
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ orig_reg = reg;
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_CC_DRD_MODE_ENABLE_SEL))
+ /* Never use the strap, it's going away. */
+ reg &= ~(USB_CTRL_MASK_FAMILY(params,
+ SETUP,
+ STRAP_CC_DRD_MODE_ENABLE_SEL));
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, STRAP_IPP_SEL))
+ if (params->ipp != 2)
+ /* override ipp strap pin (if it exits) */
+ reg &= ~(USB_CTRL_MASK_FAMILY(params, SETUP,
+ STRAP_IPP_SEL));
+
+ /* Override the default OC and PP polarity */
+ reg &= ~(USB_CTRL_MASK(SETUP, IPP) | USB_CTRL_MASK(SETUP, IOC));
+ if (params->ioc)
+ reg |= USB_CTRL_MASK(SETUP, IOC);
+ if (params->ipp == 1 && ((reg & USB_CTRL_MASK(SETUP, IPP)) == 0))
+ reg |= USB_CTRL_MASK(SETUP, IPP);
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+
+ /*
+ * If we're changing IPP, make sure power is off long enough
+ * to turn off any connected devices.
+ */
+ if (reg != orig_reg)
+ msleep(50);
+}
+
+int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->ctrl_regs;
+ u32 reg = 0;
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ }
+ return reg;
+}
+
+void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
+ int mode)
+{
+ void __iomem *ctrl = params->ctrl_regs;
+ u32 reg;
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ reg |= mode;
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+}
+
+void brcm_usb_init_common(struct brcm_usb_init_params *params)
+{
+ u32 reg;
+ void __iomem *ctrl = params->ctrl_regs;
+
+ /* Take USB out of power down */
+ if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN)) {
+ USB_CTRL_UNSET_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN);
+ /* 1 millisecond - for USB clocks to settle down */
+ usleep_range(1000, 2000);
+ }
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB_PWRDN)) {
+ USB_CTRL_UNSET_FAMILY(params, USB_PM, USB_PWRDN);
+ /* 1 millisecond - for USB clocks to settle down */
+ usleep_range(1000, 2000);
+ }
+
+ if (params->selected_family != BRCM_FAMILY_74371A0 &&
+ (BRCM_ID(params->family_id) != 0x7364))
+ /*
+ * HW7439-637: 7439a0 and its derivatives do not have large
+ * enough descriptor storage for this.
+ */
+ USB_CTRL_SET_FAMILY(params, SETUP, SS_EHCI64BIT_EN);
+
+ /* Block auto PLL suspend by USB2 PHY (Sasi) */
+ USB_CTRL_SET(ctrl, PLL_CTL, PLL_SUSPEND_EN);
+
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ if (params->selected_family == BRCM_FAMILY_7364A0)
+ /* Suppress overcurrent indication from USB30 ports for A0 */
+ reg |= USB_CTRL_MASK_FAMILY(params, SETUP, OC3_DISABLE);
+
+ brcmusb_usb_phy_ldo_fix(ctrl);
+ brcmusb_usb2_eye_fix(ctrl);
+
+ /*
+ * Make sure the the second and third memory controller
+ * interfaces are enabled if they exist.
+ */
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, SCB1_EN))
+ reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB1_EN);
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN))
+ reg |= USB_CTRL_MASK_FAMILY(params, SETUP, SCB2_EN);
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+
+ brcmusb_memc_fix(params);
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1, PORT_MODE)) {
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ reg &= ~USB_CTRL_MASK_FAMILY(params, USB_DEVICE_CTL1,
+ PORT_MODE);
+ reg |= params->mode;
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, USB_DEVICE_CTL1));
+ }
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, BDC_SOFT_RESETB)) {
+ switch (params->mode) {
+ case USB_CTLR_MODE_HOST:
+ USB_CTRL_UNSET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
+ break;
+ default:
+ USB_CTRL_SET_FAMILY(params, USB_PM, BDC_SOFT_RESETB);
+ break;
+ }
+ }
+ if (USB_CTRL_MASK_FAMILY(params, SETUP, CC_DRD_MODE_ENABLE)) {
+ if (params->mode == USB_CTLR_MODE_TYPEC_PD)
+ USB_CTRL_SET_FAMILY(params, SETUP, CC_DRD_MODE_ENABLE);
+ else
+ USB_CTRL_UNSET_FAMILY(params, SETUP,
+ CC_DRD_MODE_ENABLE);
+ }
+}
+
+void brcm_usb_init_eohci(struct brcm_usb_init_params *params)
+{
+ u32 reg;
+ void __iomem *ctrl = params->ctrl_regs;
+
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
+ USB_CTRL_SET_FAMILY(params, USB_PM, USB20_HC_RESETB);
+
+ if (params->selected_family == BRCM_FAMILY_7366C0)
+ /*
+ * Don't enable this so the memory controller doesn't read
+ * into memory holes. NOTE: This bit is low true on 7366C0.
+ */
+ USB_CTRL_SET_FAMILY(params, EBRIDGE, ESTOP_SCB_REQ);
+
+ /* Setup the endian bits */
+ reg = brcmusb_readl(USB_CTRL_REG(ctrl, SETUP));
+ reg &= ~USB_CTRL_SETUP_ENDIAN_BITS;
+ reg |= USB_CTRL_MASK_FAMILY(params, SETUP, ENDIAN);
+ brcmusb_writel(reg, USB_CTRL_REG(ctrl, SETUP));
+}
+
+void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
+{
+ void __iomem *ctrl = params->ctrl_regs;
+
+ if (BRCM_ID(params->family_id) == 0x7366) {
+ /*
+ * The PHY3_SOFT_RESETB bits default to the wrong state.
+ */
+ USB_CTRL_SET(ctrl, USB30_PCTL, PHY3_SOFT_RESETB);
+ USB_CTRL_SET(ctrl, USB30_PCTL, PHY3_SOFT_RESETB_P1);
+ }
+
+ /*
+ * Kick start USB3 PHY
+ * Make sure it's low to insure a rising edge.
+ */
+ USB_CTRL_UNSET(ctrl, USB30_CTL1, PHY3_PLL_SEQ_START);
+ USB_CTRL_SET(ctrl, USB30_CTL1, PHY3_PLL_SEQ_START);
+
+ brcmusb_usb3_phy_workarounds(params);
+ brcmusb_xhci_soft_reset(params, 0);
+ brcmusb_usb3_otp_fix(params);
+}
+
+void brcm_usb_uninit_common(struct brcm_usb_init_params *params)
+{
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB_PWRDN))
+ USB_CTRL_SET_FAMILY(params, USB_PM, USB_PWRDN);
+
+ if (USB_CTRL_MASK_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN))
+ USB_CTRL_SET_FAMILY(params, PLL_CTL, PLL_IDDQ_PWRDN);
+}
+
+void brcm_usb_uninit_eohci(struct brcm_usb_init_params *params)
+{
+ if (USB_CTRL_MASK_FAMILY(params, USB_PM, USB20_HC_RESETB))
+ USB_CTRL_UNSET_FAMILY(params, USB_PM, USB20_HC_RESETB);
+}
+
+void brcm_usb_uninit_xhci(struct brcm_usb_init_params *params)
+{
+ brcmusb_xhci_soft_reset(params, 1);
+}
+
+void brcm_usb_set_family_map(struct brcm_usb_init_params *params)
+{
+ int fam;
+
+ fam = brcmusb_get_family_type(params);
+ params->selected_family = fam;
+ params->usb_reg_bits_map =
+ &usb_reg_bits_map_table[fam][0];
+ params->family_name = family_names[fam];
+}
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.h b/drivers/phy/broadcom/phy-brcm-usb-init.h
new file mode 100644
index 000000000000..bb77b863885e
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014-2017 Broadcom
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _USB_BRCM_COMMON_INIT_H
+#define _USB_BRCM_COMMON_INIT_H
+
+#define USB_CTLR_MODE_HOST 0
+#define USB_CTLR_MODE_DEVICE 1
+#define USB_CTLR_MODE_DRD 2
+#define USB_CTLR_MODE_TYPEC_PD 3
+
+struct brcm_usb_init_params;
+
+struct brcm_usb_init_params {
+ void __iomem *ctrl_regs;
+ void __iomem *xhci_ec_regs;
+ int ioc;
+ int ipp;
+ int mode;
+ u32 family_id;
+ u32 product_id;
+ int selected_family;
+ const char *family_name;
+ const u32 *usb_reg_bits_map;
+};
+
+void brcm_usb_set_family_map(struct brcm_usb_init_params *params);
+int brcm_usb_init_get_dual_select(struct brcm_usb_init_params *params);
+void brcm_usb_init_set_dual_select(struct brcm_usb_init_params *params,
+ int mode);
+
+void brcm_usb_init_ipp(struct brcm_usb_init_params *ini);
+void brcm_usb_init_common(struct brcm_usb_init_params *ini);
+void brcm_usb_init_eohci(struct brcm_usb_init_params *ini);
+void brcm_usb_init_xhci(struct brcm_usb_init_params *ini);
+void brcm_usb_uninit_common(struct brcm_usb_init_params *ini);
+void brcm_usb_uninit_eohci(struct brcm_usb_init_params *ini);
+void brcm_usb_uninit_xhci(struct brcm_usb_init_params *ini);
+
+#endif /* _USB_BRCM_COMMON_INIT_H */
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
new file mode 100644
index 000000000000..195b98139e5f
--- /dev/null
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -0,0 +1,459 @@
+/*
+ * phy-brcm-usb.c - Broadcom USB Phy Driver
+ *
+ * Copyright (C) 2015-2017 Broadcom
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-brcm-usb-init.h"
+
+static DEFINE_MUTEX(sysfs_lock);
+
+enum brcm_usb_phy_id {
+ BRCM_USB_PHY_2_0 = 0,
+ BRCM_USB_PHY_3_0,
+ BRCM_USB_PHY_ID_MAX
+};
+
+struct value_to_name_map {
+ int value;
+ const char *name;
+};
+
+static struct value_to_name_map brcm_dr_mode_to_name[] = {
+ { USB_CTLR_MODE_HOST, "host" },
+ { USB_CTLR_MODE_DEVICE, "peripheral" },
+ { USB_CTLR_MODE_DRD, "drd" },
+ { USB_CTLR_MODE_TYPEC_PD, "typec-pd" }
+};
+
+static struct value_to_name_map brcm_dual_mode_to_name[] = {
+ { 0, "host" },
+ { 1, "device" },
+ { 2, "auto" },
+};
+
+struct brcm_usb_phy {
+ struct phy *phy;
+ unsigned int id;
+ bool inited;
+};
+
+struct brcm_usb_phy_data {
+ struct brcm_usb_init_params ini;
+ bool has_eohci;
+ bool has_xhci;
+ struct clk *usb_20_clk;
+ struct clk *usb_30_clk;
+ struct mutex mutex; /* serialize phy init */
+ int init_count;
+ struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
+};
+
+static int brcm_usb_phy_init(struct phy *gphy)
+{
+ struct brcm_usb_phy *phy = phy_get_drvdata(gphy);
+ struct brcm_usb_phy_data *priv =
+ container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+
+ /*
+ * Use a lock to make sure a second caller waits until
+ * the base phy is inited before using it.
+ */
+ mutex_lock(&priv->mutex);
+ if (priv->init_count++ == 0) {
+ clk_enable(priv->usb_20_clk);
+ clk_enable(priv->usb_30_clk);
+ brcm_usb_init_common(&priv->ini);
+ }
+ mutex_unlock(&priv->mutex);
+ if (phy->id == BRCM_USB_PHY_2_0)
+ brcm_usb_init_eohci(&priv->ini);
+ else if (phy->id == BRCM_USB_PHY_3_0)
+ brcm_usb_init_xhci(&priv->ini);
+ phy->inited = true;
+ dev_dbg(&gphy->dev, "INIT, id: %d, total: %d\n", phy->id,
+ priv->init_count);
+
+ return 0;
+}
+
+static int brcm_usb_phy_exit(struct phy *gphy)
+{
+ struct brcm_usb_phy *phy = phy_get_drvdata(gphy);
+ struct brcm_usb_phy_data *priv =
+ container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+
+ dev_dbg(&gphy->dev, "EXIT\n");
+ if (phy->id == BRCM_USB_PHY_2_0)
+ brcm_usb_uninit_eohci(&priv->ini);
+ if (phy->id == BRCM_USB_PHY_3_0)
+ brcm_usb_uninit_xhci(&priv->ini);
+
+ /* If both xhci and eohci are gone, reset everything else */
+ mutex_lock(&priv->mutex);
+ if (--priv->init_count == 0) {
+ brcm_usb_uninit_common(&priv->ini);
+ clk_disable(priv->usb_20_clk);
+ clk_disable(priv->usb_30_clk);
+ }
+ mutex_unlock(&priv->mutex);
+ phy->inited = false;
+ return 0;
+}
+
+static struct phy_ops brcm_usb_phy_ops = {
+ .init = brcm_usb_phy_init,
+ .exit = brcm_usb_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *brcm_usb_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct brcm_usb_phy_data *data = dev_get_drvdata(dev);
+
+ /*
+ * values 0 and 1 are for backward compatibility with
+ * device tree nodes from older bootloaders.
+ */
+ switch (args->args[0]) {
+ case 0:
+ case PHY_TYPE_USB2:
+ if (data->phys[BRCM_USB_PHY_2_0].phy)
+ return data->phys[BRCM_USB_PHY_2_0].phy;
+ dev_warn(dev, "Error, 2.0 Phy not found\n");
+ break;
+ case 1:
+ case PHY_TYPE_USB3:
+ if (data->phys[BRCM_USB_PHY_3_0].phy)
+ return data->phys[BRCM_USB_PHY_3_0].phy;
+ dev_warn(dev, "Error, 3.0 Phy not found\n");
+ break;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+static int name_to_value(struct value_to_name_map *table, int count,
+ const char *name, int *value)
+{
+ int x;
+
+ *value = 0;
+ for (x = 0; x < count; x++) {
+ if (sysfs_streq(name, table[x].name)) {
+ *value = x;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static const char *value_to_name(struct value_to_name_map *table, int count,
+ int value)
+{
+ if (value >= count)
+ return "unknown";
+ return table[value].name;
+}
+
+static ssize_t dr_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n",
+ value_to_name(&brcm_dr_mode_to_name[0],
+ ARRAY_SIZE(brcm_dr_mode_to_name),
+ priv->ini.mode));
+}
+static DEVICE_ATTR_RO(dr_mode);
+
+static ssize_t dual_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+ int value;
+ int res;
+
+ mutex_lock(&sysfs_lock);
+ res = name_to_value(&brcm_dual_mode_to_name[0],
+ ARRAY_SIZE(brcm_dual_mode_to_name), buf, &value);
+ if (!res) {
+ brcm_usb_init_set_dual_select(&priv->ini, value);
+ res = len;
+ }
+ mutex_unlock(&sysfs_lock);
+ return res;
+}
+
+static ssize_t dual_select_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+ int value;
+
+ mutex_lock(&sysfs_lock);
+ value = brcm_usb_init_get_dual_select(&priv->ini);
+ mutex_unlock(&sysfs_lock);
+ return sprintf(buf, "%s\n",
+ value_to_name(&brcm_dual_mode_to_name[0],
+ ARRAY_SIZE(brcm_dual_mode_to_name),
+ value));
+}
+static DEVICE_ATTR_RW(dual_select);
+
+static struct attribute *brcm_usb_phy_attrs[] = {
+ &dev_attr_dr_mode.attr,
+ &dev_attr_dual_select.attr,
+ NULL
+};
+
+static const struct attribute_group brcm_usb_phy_group = {
+ .attrs = brcm_usb_phy_attrs,
+};
+
+static int brcm_usb_phy_dvr_init(struct device *dev,
+ struct brcm_usb_phy_data *priv,
+ struct device_node *dn)
+{
+ struct phy *gphy;
+ int err;
+
+ priv->usb_20_clk = of_clk_get_by_name(dn, "sw_usb");
+ if (IS_ERR(priv->usb_20_clk)) {
+ dev_info(dev, "Clock not found in Device Tree\n");
+ priv->usb_20_clk = NULL;
+ }
+ err = clk_prepare_enable(priv->usb_20_clk);
+ if (err)
+ return err;
+
+ if (priv->has_eohci) {
+ gphy = devm_phy_create(dev, NULL, &brcm_usb_phy_ops);
+ if (IS_ERR(gphy)) {
+ dev_err(dev, "failed to create EHCI/OHCI PHY\n");
+ return PTR_ERR(gphy);
+ }
+ priv->phys[BRCM_USB_PHY_2_0].phy = gphy;
+ priv->phys[BRCM_USB_PHY_2_0].id = BRCM_USB_PHY_2_0;
+ phy_set_drvdata(gphy, &priv->phys[BRCM_USB_PHY_2_0]);
+ }
+
+ if (priv->has_xhci) {
+ gphy = devm_phy_create(dev, NULL, &brcm_usb_phy_ops);
+ if (IS_ERR(gphy)) {
+ dev_err(dev, "failed to create XHCI PHY\n");
+ return PTR_ERR(gphy);
+ }
+ priv->phys[BRCM_USB_PHY_3_0].phy = gphy;
+ priv->phys[BRCM_USB_PHY_3_0].id = BRCM_USB_PHY_3_0;
+ phy_set_drvdata(gphy, &priv->phys[BRCM_USB_PHY_3_0]);
+
+ priv->usb_30_clk = of_clk_get_by_name(dn, "sw_usb3");
+ if (IS_ERR(priv->usb_30_clk)) {
+ dev_info(dev,
+ "USB3.0 clock not found in Device Tree\n");
+ priv->usb_30_clk = NULL;
+ }
+ err = clk_prepare_enable(priv->usb_30_clk);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int brcm_usb_phy_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct brcm_usb_phy_data *priv;
+ struct phy_provider *phy_provider;
+ struct device_node *dn = pdev->dev.of_node;
+ int err;
+ const char *mode;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->ini.family_id = brcmstb_get_family_id();
+ priv->ini.product_id = brcmstb_get_product_id();
+ brcm_usb_set_family_map(&priv->ini);
+ dev_dbg(dev, "Best mapping table is for %s\n",
+ priv->ini.family_name);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "can't get USB_CTRL base address\n");
+ return -EINVAL;
+ }
+ priv->ini.ctrl_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ini.ctrl_regs)) {
+ dev_err(dev, "can't map CTRL register space\n");
+ return -EINVAL;
+ }
+
+ /* The XHCI EC registers are optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ priv->ini.xhci_ec_regs =
+ devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ini.xhci_ec_regs)) {
+ dev_err(dev, "can't map XHCI EC register space\n");
+ return -EINVAL;
+ }
+ }
+
+ of_property_read_u32(dn, "brcm,ipp", &priv->ini.ipp);
+ of_property_read_u32(dn, "brcm,ioc", &priv->ini.ioc);
+
+ priv->ini.mode = USB_CTLR_MODE_HOST;
+ err = of_property_read_string(dn, "dr_mode", &mode);
+ if (err == 0) {
+ name_to_value(&brcm_dr_mode_to_name[0],
+ ARRAY_SIZE(brcm_dr_mode_to_name),
+ mode, &priv->ini.mode);
+ }
+ if (of_property_read_bool(dn, "brcm,has_xhci"))
+ priv->has_xhci = true;
+ if (of_property_read_bool(dn, "brcm,has_eohci"))
+ priv->has_eohci = true;
+
+ err = brcm_usb_phy_dvr_init(dev, priv, dn);
+ if (err)
+ return err;
+
+ mutex_init(&priv->mutex);
+
+ /* make sure invert settings are correct */
+ brcm_usb_init_ipp(&priv->ini);
+
+ /*
+ * Create sysfs entries for mode.
+ * Remove "dual_select" attribute if not in dual mode
+ */
+ if (priv->ini.mode != USB_CTLR_MODE_DRD)
+ brcm_usb_phy_attrs[1] = NULL;
+ err = sysfs_create_group(&dev->kobj, &brcm_usb_phy_group);
+ if (err)
+ dev_warn(dev, "Error creating sysfs attributes\n");
+
+ /* start with everything off */
+ if (priv->has_xhci)
+ brcm_usb_uninit_xhci(&priv->ini);
+ if (priv->has_eohci)
+ brcm_usb_uninit_eohci(&priv->ini);
+ brcm_usb_uninit_common(&priv->ini);
+ clk_disable(priv->usb_20_clk);
+ clk_disable(priv->usb_30_clk);
+
+ phy_provider = devm_of_phy_provider_register(dev, brcm_usb_phy_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int brcm_usb_phy_suspend(struct device *dev)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+
+ if (priv->init_count) {
+ clk_disable(priv->usb_20_clk);
+ clk_disable(priv->usb_30_clk);
+ }
+ return 0;
+}
+
+static int brcm_usb_phy_resume(struct device *dev)
+{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
+
+ clk_enable(priv->usb_20_clk);
+ clk_enable(priv->usb_30_clk);
+ brcm_usb_init_ipp(&priv->ini);
+
+ /*
+ * Initialize anything that was previously initialized.
+ * Uninitialize anything that wasn't previously initialized.
+ */
+ if (priv->init_count) {
+ brcm_usb_init_common(&priv->ini);
+ if (priv->phys[BRCM_USB_PHY_2_0].inited) {
+ brcm_usb_init_eohci(&priv->ini);
+ } else if (priv->has_eohci) {
+ brcm_usb_uninit_eohci(&priv->ini);
+ clk_disable(priv->usb_20_clk);
+ }
+ if (priv->phys[BRCM_USB_PHY_3_0].inited) {
+ brcm_usb_init_xhci(&priv->ini);
+ } else if (priv->has_xhci) {
+ brcm_usb_uninit_xhci(&priv->ini);
+ clk_disable(priv->usb_30_clk);
+ }
+ } else {
+ if (priv->has_xhci)
+ brcm_usb_uninit_xhci(&priv->ini);
+ if (priv->has_eohci)
+ brcm_usb_uninit_eohci(&priv->ini);
+ brcm_usb_uninit_common(&priv->ini);
+ clk_disable(priv->usb_20_clk);
+ clk_disable(priv->usb_30_clk);
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops brcm_usb_phy_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(brcm_usb_phy_suspend, brcm_usb_phy_resume)
+};
+
+static const struct of_device_id brcm_usb_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-usb-phy" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, brcm_usb_dt_ids);
+
+static struct platform_driver brcm_usb_driver = {
+ .probe = brcm_usb_phy_probe,
+ .driver = {
+ .name = "brcmstb-usb-phy",
+ .owner = THIS_MODULE,
+ .pm = &brcm_usb_phy_pm_ops,
+ .of_match_table = brcm_usb_dt_ids,
+ },
+};
+
+module_platform_driver(brcm_usb_driver);
+
+MODULE_ALIAS("platform:brcmstb-usb-phy");
+MODULE_AUTHOR("Al Cooper <acooper@broadcom.com>");
+MODULE_DESCRIPTION("BRCM USB PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
index 0cf6a7cbaf9f..5c3ec5d10e0d 100644
--- a/drivers/phy/marvell/Makefile
+++ b/drivers/phy/marvell/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o
obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o
obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 89c887ea5557..a0d522154cdf 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -154,7 +154,6 @@ struct mvebu_comphy_priv {
void __iomem *base;
struct regmap *regmap;
struct device *dev;
- int modes[MVEBU_COMPHY_LANES];
};
struct mvebu_comphy_lane {
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 721a2a1c97ef..402385f2562a 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -96,9 +96,11 @@
#define U3P_U2PHYDTM1 0x06C
#define P2C_RG_UART_EN BIT(16)
+#define P2C_FORCE_IDDIG BIT(9)
#define P2C_RG_VBUSVALID BIT(5)
#define P2C_RG_SESSEND BIT(4)
#define P2C_RG_AVALID BIT(2)
+#define P2C_RG_IDDIG BIT(1)
#define U3P_U3_CHIP_GPIO_CTLD 0x0c
#define P3C_REG_IP_SW_RST BIT(31)
@@ -585,6 +587,31 @@ static void u2_phy_instance_exit(struct mtk_tphy *tphy,
}
}
+static void u2_phy_instance_set_mode(struct mtk_tphy *tphy,
+ struct mtk_phy_instance *instance,
+ enum phy_mode mode)
+{
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ u32 tmp;
+
+ tmp = readl(u2_banks->com + U3P_U2PHYDTM1);
+ switch (mode) {
+ case PHY_MODE_USB_DEVICE:
+ tmp |= P2C_FORCE_IDDIG | P2C_RG_IDDIG;
+ break;
+ case PHY_MODE_USB_HOST:
+ tmp |= P2C_FORCE_IDDIG;
+ tmp &= ~P2C_RG_IDDIG;
+ break;
+ case PHY_MODE_USB_OTG:
+ tmp &= ~(P2C_FORCE_IDDIG | P2C_RG_IDDIG);
+ break;
+ default:
+ return;
+ }
+ writel(tmp, u2_banks->com + U3P_U2PHYDTM1);
+}
+
static void pcie_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
@@ -881,6 +908,17 @@ static int mtk_phy_exit(struct phy *phy)
return 0;
}
+static int mtk_phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+ struct mtk_phy_instance *instance = phy_get_drvdata(phy);
+ struct mtk_tphy *tphy = dev_get_drvdata(phy->dev.parent);
+
+ if (instance->type == PHY_TYPE_USB2)
+ u2_phy_instance_set_mode(tphy, instance, mode);
+
+ return 0;
+}
+
static struct phy *mtk_phy_xlate(struct device *dev,
struct of_phandle_args *args)
{
@@ -931,6 +969,7 @@ static const struct phy_ops mtk_tphy_ops = {
.exit = mtk_phy_exit,
.power_on = mtk_phy_power_on,
.power_off = mtk_phy_power_off,
+ .set_mode = mtk_phy_set_mode,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index a268f4d6f3e9..b4964b067aec 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -372,6 +372,21 @@ int phy_reset(struct phy *phy)
}
EXPORT_SYMBOL_GPL(phy_reset);
+int phy_calibrate(struct phy *phy)
+{
+ int ret;
+
+ if (!phy || !phy->ops->calibrate)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->calibrate(phy);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_calibrate);
+
/**
* _of_phy_get() - lookup and obtain a reference to a phy by phandle
* @np: device_node for which to get the phy
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 2e183d7695fd..9abb7899762a 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index 13b02b7de30b..822c83b8efcd 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -114,14 +114,16 @@ struct ufs_qcom_phy {
struct ufs_qcom_phy_calibration *cached_regs;
int cached_regs_table_size;
bool is_powered_on;
+ bool is_started;
struct ufs_qcom_phy_specific_ops *phy_spec_ops;
+
+ enum phy_mode mode;
};
/**
* struct ufs_qcom_phy_specific_ops - set of pointers to functions which have a
* specific implementation per phy. Each UFS phy, should implement
* those functions according to its spec and requirements
- * @calibrate_phy: pointer to a function that calibrate the phy
* @start_serdes: pointer to a function that starts the serdes
* @is_physical_coding_sublayer_ready: pointer to a function that
* checks pcs readiness. returns 0 for success and non-zero for error.
@@ -130,7 +132,6 @@ struct ufs_qcom_phy {
* and writes to QSERDES_RX_SIGDET_CNTRL attribute
*/
struct ufs_qcom_phy_specific_ops {
- int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
void (*start_serdes)(struct ufs_qcom_phy *phy);
int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
index 12a1b498dc4b..ba1895b76a5d 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c
@@ -44,7 +44,19 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
{
- return 0;
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ bool is_rate_B = false;
+ int ret;
+
+ if (phy_common->mode == PHY_MODE_UFS_HS_B)
+ is_rate_B = true;
+
+ ret = ufs_qcom_phy_qmp_14nm_phy_calibrate(phy_common, is_rate_B);
+ if (!ret)
+ /* phy calibrated, but yet to be started */
+ phy_common->is_started = false;
+
+ return ret;
}
static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
@@ -53,6 +65,19 @@ static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
}
static
+int ufs_qcom_phy_qmp_14nm_set_mode(struct phy *generic_phy, enum phy_mode mode)
+{
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+
+ phy_common->mode = PHY_MODE_INVALID;
+
+ if (mode > 0)
+ phy_common->mode = mode;
+
+ return 0;
+}
+
+static
void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val)
{
writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
@@ -102,11 +127,11 @@ static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
.exit = ufs_qcom_phy_qmp_14nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
+ .set_mode = ufs_qcom_phy_qmp_14nm_set_mode,
.owner = THIS_MODULE,
};
static struct ufs_qcom_phy_specific_ops phy_14nm_ops = {
- .calibrate_phy = ufs_qcom_phy_qmp_14nm_phy_calibrate,
.start_serdes = ufs_qcom_phy_qmp_14nm_start_serdes,
.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
.set_tx_lane_enable = ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
index 4f68acb58b73..49f435c71147 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c
@@ -63,7 +63,19 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
{
- return 0;
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+ bool is_rate_B = false;
+ int ret;
+
+ if (phy_common->mode == PHY_MODE_UFS_HS_B)
+ is_rate_B = true;
+
+ ret = ufs_qcom_phy_qmp_20nm_phy_calibrate(phy_common, is_rate_B);
+ if (!ret)
+ /* phy calibrated, but yet to be started */
+ phy_common->is_started = false;
+
+ return ret;
}
static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
@@ -72,6 +84,19 @@ static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
}
static
+int ufs_qcom_phy_qmp_20nm_set_mode(struct phy *generic_phy, enum phy_mode mode)
+{
+ struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
+
+ phy_common->mode = PHY_MODE_INVALID;
+
+ if (mode > 0)
+ phy_common->mode = mode;
+
+ return 0;
+}
+
+static
void ufs_qcom_phy_qmp_20nm_power_control(struct ufs_qcom_phy *phy, bool val)
{
bool hibern8_exit_after_pwr_collapse = phy->quirks &
@@ -160,11 +185,11 @@ static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
.exit = ufs_qcom_phy_qmp_20nm_exit,
.power_on = ufs_qcom_phy_power_on,
.power_off = ufs_qcom_phy_power_off,
+ .set_mode = ufs_qcom_phy_qmp_20nm_set_mode,
.owner = THIS_MODULE,
};
static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
- .calibrate_phy = ufs_qcom_phy_qmp_20nm_phy_calibrate,
.start_serdes = ufs_qcom_phy_qmp_20nm_start_serdes,
.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
.set_tx_lane_enable = ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index 43865ef340e2..c5ff4525edef 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -518,9 +518,8 @@ void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
}
}
-int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
+static int ufs_qcom_phy_start_serdes(struct ufs_qcom_phy *ufs_qcom_phy)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
int ret = 0;
if (!ufs_qcom_phy->phy_spec_ops->start_serdes) {
@@ -533,7 +532,6 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
return ret;
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
{
@@ -564,31 +562,8 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
}
EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
-int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
-{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
- int ret = 0;
-
- if (!ufs_qcom_phy->phy_spec_ops->calibrate_phy) {
- dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() callback is not supported\n",
- __func__);
- ret = -ENOTSUPP;
- } else {
- ret = ufs_qcom_phy->phy_spec_ops->
- calibrate_phy(ufs_qcom_phy, is_rate_B);
- if (ret)
- dev_err(ufs_qcom_phy->dev, "%s: calibrate_phy() failed %d\n",
- __func__, ret);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
-
-int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
+static int ufs_qcom_phy_is_pcs_ready(struct ufs_qcom_phy *ufs_qcom_phy)
{
- struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
if (!ufs_qcom_phy->phy_spec_ops->is_physical_coding_sublayer_ready) {
dev_err(ufs_qcom_phy->dev, "%s: is_physical_coding_sublayer_ready() callback is not supported\n",
__func__);
@@ -598,7 +573,6 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
return ufs_qcom_phy->phy_spec_ops->
is_physical_coding_sublayer_ready(ufs_qcom_phy);
}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
int ufs_qcom_phy_power_on(struct phy *generic_phy)
{
@@ -609,6 +583,18 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
if (phy_common->is_powered_on)
return 0;
+ if (!phy_common->is_started) {
+ err = ufs_qcom_phy_start_serdes(phy_common);
+ if (err)
+ return err;
+
+ err = ufs_qcom_phy_is_pcs_ready(phy_common);
+ if (err)
+ return err;
+
+ phy_common->is_started = true;
+ }
+
err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
if (err) {
dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 54c34298a000..9c90e7d67e0a 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -1,7 +1,7 @@
/*
* Renesas R-Car Gen3 for USB2.0 PHY driver
*
- * Copyright (C) 2015 Renesas Electronics Corporation
+ * Copyright (C) 2015-2017 Renesas Electronics Corporation
*
* This is based on the phy-rcar-gen2 driver:
* Copyright (C) 2014 Renesas Solutions Corp.
@@ -12,16 +12,18 @@
* published by the Free Software Foundation.
*/
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/usb/of.h>
#include <linux/workqueue.h>
/******* USB2.0 Host registers (original offset is +0x200) *******/
@@ -79,6 +81,8 @@
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
+#define RCAR_GEN3_PHY_HAS_DEDICATED_PINS 1
+
struct rcar_gen3_chan {
void __iomem *base;
struct extcon_dev *extcon;
@@ -86,7 +90,7 @@ struct rcar_gen3_chan {
struct regulator *vbus;
struct work_struct work;
bool extcon_host;
- bool has_otg;
+ bool has_otg_pins;
};
static void rcar_gen3_phy_usb2_work(struct work_struct *work)
@@ -218,33 +222,40 @@ static bool rcar_gen3_is_host(struct rcar_gen3_chan *ch)
return !(readl(ch->base + USB2_COMMCTRL) & USB2_COMMCTRL_OTG_PERI);
}
+static enum phy_mode rcar_gen3_get_phy_mode(struct rcar_gen3_chan *ch)
+{
+ if (rcar_gen3_is_host(ch))
+ return PHY_MODE_USB_HOST;
+
+ return PHY_MODE_USB_DEVICE;
+}
+
static ssize_t role_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- bool is_b_device, is_host, new_mode_is_host;
+ bool is_b_device;
+ enum phy_mode cur_mode, new_mode;
- if (!ch->has_otg || !ch->phy->init_count)
+ if (!ch->has_otg_pins || !ch->phy->init_count)
return -EIO;
- /*
- * is_b_device: true is B-Device. false is A-Device.
- * If {new_mode_}is_host: true is Host mode. false is Peripheral mode.
- */
- is_b_device = rcar_gen3_check_id(ch);
- is_host = rcar_gen3_is_host(ch);
if (!strncmp(buf, "host", strlen("host")))
- new_mode_is_host = true;
+ new_mode = PHY_MODE_USB_HOST;
else if (!strncmp(buf, "peripheral", strlen("peripheral")))
- new_mode_is_host = false;
+ new_mode = PHY_MODE_USB_DEVICE;
else
return -EINVAL;
+ /* is_b_device: true is B-Device. false is A-Device. */
+ is_b_device = rcar_gen3_check_id(ch);
+ cur_mode = rcar_gen3_get_phy_mode(ch);
+
/* If current and new mode is the same, this returns the error */
- if (is_host == new_mode_is_host)
+ if (cur_mode == new_mode)
return -EINVAL;
- if (new_mode_is_host) { /* And is_host must be false */
+ if (new_mode == PHY_MODE_USB_HOST) { /* And is_host must be false */
if (!is_b_device) /* A-Peripheral */
rcar_gen3_init_from_a_peri_to_a_host(ch);
else /* B-Peripheral */
@@ -264,7 +275,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- if (!ch->has_otg || !ch->phy->init_count)
+ if (!ch->has_otg_pins || !ch->phy->init_count)
return -EIO;
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
@@ -303,7 +314,7 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
/* Initialize otg part */
- if (channel->has_otg)
+ if (channel->has_otg_pins)
rcar_gen3_init_otg(channel);
return 0;
@@ -377,9 +388,17 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
}
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
- { .compatible = "renesas,usb2-phy-r8a7795" },
- { .compatible = "renesas,usb2-phy-r8a7796" },
- { .compatible = "renesas,rcar-gen3-usb2-phy" },
+ {
+ .compatible = "renesas,usb2-phy-r8a7795",
+ .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
+ },
+ {
+ .compatible = "renesas,usb2-phy-r8a7796",
+ .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
+ },
+ {
+ .compatible = "renesas,rcar-gen3-usb2-phy",
+ },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb2_match_table);
@@ -415,14 +434,17 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
/* call request_irq for OTG */
irq = platform_get_irq(pdev, 0);
if (irq >= 0) {
- int ret;
-
INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
IRQF_SHARED, dev_name(dev), channel);
if (irq < 0)
dev_err(dev, "No irq handler (%d)\n", irq);
- channel->has_otg = true;
+ }
+
+ if (of_usb_get_dr_mode_by_phy(dev->of_node, 0) == USB_DR_MODE_OTG) {
+ int ret;
+
+ channel->has_otg_pins = (uintptr_t)of_device_get_match_data(dev);
channel->extcon = devm_extcon_dev_allocate(dev,
rcar_gen3_phy_cable);
if (IS_ERR(channel->extcon))
@@ -464,7 +486,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
dev_err(dev, "Failed to register PHY provider\n");
ret = PTR_ERR(provider);
goto error;
- } else if (channel->has_otg) {
+ } else if (channel->has_otg_pins) {
int ret;
ret = device_create_file(dev, &dev_attr_role);
@@ -484,7 +506,7 @@ static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
{
struct rcar_gen3_chan *channel = platform_get_drvdata(pdev);
- if (channel->has_otg)
+ if (channel->has_otg_pins)
device_remove_file(&pdev->dev, &dev_attr_role);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index bd0acdf38e0f..7f149d989046 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index ee7ce5ee53f9..5049dac79bd0 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -17,7 +17,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index a958c9bced01..ee85fa0ca4b0 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -102,9 +102,40 @@
#define CMN_PLL1_SS_CTRL1 (0xb8 << 2)
#define CMN_PLL1_SS_CTRL2 (0xb9 << 2)
#define CMN_RXCAL_OVRD (0xd1 << 2)
+
#define CMN_TXPUCAL_CTRL (0xe0 << 2)
#define CMN_TXPUCAL_OVRD (0xe1 << 2)
+#define CMN_TXPDCAL_CTRL (0xf0 << 2)
#define CMN_TXPDCAL_OVRD (0xf1 << 2)
+
+/* For CMN_TXPUCAL_CTRL, CMN_TXPDCAL_CTRL */
+#define CMN_TXPXCAL_START BIT(15)
+#define CMN_TXPXCAL_DONE BIT(14)
+#define CMN_TXPXCAL_NO_RESPONSE BIT(13)
+#define CMN_TXPXCAL_CURRENT_RESPONSE BIT(12)
+
+#define CMN_TXPU_ADJ_CTRL (0x108 << 2)
+#define CMN_TXPD_ADJ_CTRL (0x10c << 2)
+
+/*
+ * For CMN_TXPUCAL_CTRL, CMN_TXPDCAL_CTRL,
+ * CMN_TXPU_ADJ_CTRL, CMN_TXPDCAL_CTRL
+ *
+ * NOTE: some of these registers are documented to be 2's complement
+ * signed numbers, but then documented to be always positive. Weird.
+ * In such a case, using CMN_CALIB_CODE_POS() avoids the unnecessary
+ * sign extension.
+ */
+#define CMN_CALIB_CODE_WIDTH 7
+#define CMN_CALIB_CODE_OFFSET 0
+#define CMN_CALIB_CODE_MASK GENMASK(CMN_CALIB_CODE_WIDTH, 0)
+#define CMN_CALIB_CODE(x) \
+ sign_extend32((x) >> CMN_CALIB_CODE_OFFSET, CMN_CALIB_CODE_WIDTH)
+
+#define CMN_CALIB_CODE_POS_MASK GENMASK(CMN_CALIB_CODE_WIDTH - 1, 0)
+#define CMN_CALIB_CODE_POS(x) \
+ (((x) >> CMN_CALIB_CODE_OFFSET) & CMN_CALIB_CODE_POS_MASK)
+
#define CMN_DIAG_PLL0_FBH_OVRD (0x1c0 << 2)
#define CMN_DIAG_PLL0_FBL_OVRD (0x1c1 << 2)
#define CMN_DIAG_PLL0_OVRD (0x1c2 << 2)
@@ -138,6 +169,15 @@
#define TX_TXCC_MGNFS_MULT_101(n) ((0x4055 | ((n) << 9)) << 2)
#define TX_TXCC_MGNFS_MULT_110(n) ((0x4056 | ((n) << 9)) << 2)
#define TX_TXCC_MGNFS_MULT_111(n) ((0x4057 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_000(n) ((0x4058 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_001(n) ((0x4059 | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_010(n) ((0x405a | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_011(n) ((0x405b | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_100(n) ((0x405c | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_101(n) ((0x405d | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_110(n) ((0x405e | ((n) << 9)) << 2)
+#define TX_TXCC_MGNLS_MULT_111(n) ((0x405f | ((n) << 9)) << 2)
+
#define XCVR_DIAG_PLLDRC_CTRL(n) ((0x40e0 | ((n) << 9)) << 2)
#define XCVR_DIAG_BIDI_CTRL(n) ((0x40e8 | ((n) << 9)) << 2)
#define XCVR_DIAG_LANE_FCM_EN_MGN(n) ((0x40f2 | ((n) << 9)) << 2)
@@ -150,10 +190,63 @@
#define TX_RCVDET_ST_TMR(n) ((0x4123 | ((n) << 9)) << 2)
#define TX_DIAG_TX_DRV(n) ((0x41e1 | ((n) << 9)) << 2)
#define TX_DIAG_BGREF_PREDRV_DELAY (0x41e7 << 2)
+
+/* Use this for "n" in macros like "_MULT_XXX" to target the aux channel */
+#define AUX_CH_LANE 8
+
#define TX_ANA_CTRL_REG_1 (0x5020 << 2)
+
+#define TXDA_DP_AUX_EN BIT(15)
+#define AUXDA_SE_EN BIT(14)
+#define TXDA_CAL_LATCH_EN BIT(13)
+#define AUXDA_POLARITY BIT(12)
+#define TXDA_DRV_POWER_ISOLATION_EN BIT(11)
+#define TXDA_DRV_POWER_EN_PH_2_N BIT(10)
+#define TXDA_DRV_POWER_EN_PH_1_N BIT(9)
+#define TXDA_BGREF_EN BIT(8)
+#define TXDA_DRV_LDO_EN BIT(7)
+#define TXDA_DECAP_EN_DEL BIT(6)
+#define TXDA_DECAP_EN BIT(5)
+#define TXDA_UPHY_SUPPLY_EN_DEL BIT(4)
+#define TXDA_UPHY_SUPPLY_EN BIT(3)
+#define TXDA_LOW_LEAKAGE_EN BIT(2)
+#define TXDA_DRV_IDLE_LOWI_EN BIT(1)
+#define TXDA_DRV_CMN_MODE_EN BIT(0)
+
#define TX_ANA_CTRL_REG_2 (0x5021 << 2)
+
+#define AUXDA_DEBOUNCING_CLK BIT(15)
+#define TXDA_LPBK_RECOVERED_CLK_EN BIT(14)
+#define TXDA_LPBK_ISI_GEN_EN BIT(13)
+#define TXDA_LPBK_SERIAL_EN BIT(12)
+#define TXDA_LPBK_LINE_EN BIT(11)
+#define TXDA_DRV_LDO_REDC_SINKIQ BIT(10)
+#define XCVR_DECAP_EN_DEL BIT(9)
+#define XCVR_DECAP_EN BIT(8)
+#define TXDA_MPHY_ENABLE_HS_NT BIT(7)
+#define TXDA_MPHY_SA_MODE BIT(6)
+#define TXDA_DRV_LDO_RBYR_FB_EN BIT(5)
+#define TXDA_DRV_RST_PULL_DOWN BIT(4)
+#define TXDA_DRV_LDO_BG_FB_EN BIT(3)
+#define TXDA_DRV_LDO_BG_REF_EN BIT(2)
+#define TXDA_DRV_PREDRV_EN_DEL BIT(1)
+#define TXDA_DRV_PREDRV_EN BIT(0)
+
#define TXDA_COEFF_CALC_CTRL (0x5022 << 2)
+
+#define TX_HIGH_Z BIT(6)
+#define TX_VMARGIN_OFFSET 3
+#define TX_VMARGIN_MASK 0x7
+#define LOW_POWER_SWING_EN BIT(2)
+#define TX_FCM_DRV_MAIN_EN BIT(1)
+#define TX_FCM_FULL_MARGIN BIT(0)
+
#define TX_DIG_CTRL_REG_2 (0x5024 << 2)
+
+#define TX_HIGH_Z_TM_EN BIT(15)
+#define TX_RESCAL_CODE_OFFSET 0
+#define TX_RESCAL_CODE_MASK 0x3f
+
#define TXDA_CYA_AUXDA_CYA (0x5025 << 2)
#define TX_ANA_CTRL_REG_3 (0x5026 << 2)
#define TX_ANA_CTRL_REG_4 (0x5027 << 2)
@@ -456,54 +549,72 @@ static void tcphy_dp_aux_set_flip(struct rockchip_typec_phy *tcphy)
*/
tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
if (!tcphy->flip)
- tx_ana_ctrl_reg_1 |= BIT(12);
+ tx_ana_ctrl_reg_1 |= AUXDA_POLARITY;
else
- tx_ana_ctrl_reg_1 &= ~BIT(12);
+ tx_ana_ctrl_reg_1 &= ~AUXDA_POLARITY;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
}
static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
{
+ u16 val;
u16 tx_ana_ctrl_reg_1;
- u16 rdata, rdata2, val;
-
- /* disable txda_cal_latch_en for rewrite the calibration values */
- tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
- tx_ana_ctrl_reg_1 &= ~BIT(13);
- writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ u16 tx_ana_ctrl_reg_2;
+ s32 pu_calib_code, pd_calib_code;
+ s32 pu_adj, pd_adj;
+ u16 calib;
/*
- * read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and
- * write it to TX_DIG_CTRL_REG_2[6:0], and delay 1ms to make sure it
- * works.
+ * Calculate calibration code as per docs: use an average of the
+ * pull down and pull up. Then add in adjustments.
*/
- rdata = readl(tcphy->base + TX_DIG_CTRL_REG_2);
- rdata = rdata & 0xffc0;
+ val = readl(tcphy->base + CMN_TXPUCAL_CTRL);
+ pu_calib_code = CMN_CALIB_CODE_POS(val);
+ val = readl(tcphy->base + CMN_TXPDCAL_CTRL);
+ pd_calib_code = CMN_CALIB_CODE_POS(val);
+ val = readl(tcphy->base + CMN_TXPU_ADJ_CTRL);
+ pu_adj = CMN_CALIB_CODE(val);
+ val = readl(tcphy->base + CMN_TXPD_ADJ_CTRL);
+ pd_adj = CMN_CALIB_CODE(val);
+ calib = (pu_calib_code + pd_calib_code) / 2 + pu_adj + pd_adj;
- rdata2 = readl(tcphy->base + CMN_TXPUCAL_CTRL);
- rdata2 = rdata2 & 0x3f;
+ /* disable txda_cal_latch_en for rewrite the calibration values */
+ tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 &= ~TXDA_CAL_LATCH_EN;
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
- val = rdata | rdata2;
+ /* write the calibration, then delay 10 ms as sample in docs */
+ val = readl(tcphy->base + TX_DIG_CTRL_REG_2);
+ val &= ~(TX_RESCAL_CODE_MASK << TX_RESCAL_CODE_OFFSET);
+ val |= calib << TX_RESCAL_CODE_OFFSET;
writel(val, tcphy->base + TX_DIG_CTRL_REG_2);
- usleep_range(1000, 1050);
+ usleep_range(10000, 10050);
/*
* Enable signal for latch that sample and holds calibration values.
* Activate this signal for 1 clock cycle to sample new calibration
* values.
*/
- tx_ana_ctrl_reg_1 |= BIT(13);
+ tx_ana_ctrl_reg_1 |= TXDA_CAL_LATCH_EN;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
usleep_range(150, 200);
/* set TX Voltage Level and TX Deemphasis to 0 */
writel(0, tcphy->base + PHY_DP_TX_CTL);
+
/* re-enable decap */
- writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2);
- writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2);
- tx_ana_ctrl_reg_1 |= BIT(3);
+ tx_ana_ctrl_reg_2 = XCVR_DECAP_EN;
+ writel(tx_ana_ctrl_reg_2, tcphy->base + TX_ANA_CTRL_REG_2);
+ udelay(1);
+ tx_ana_ctrl_reg_2 |= XCVR_DECAP_EN_DEL;
+ writel(tx_ana_ctrl_reg_2, tcphy->base + TX_ANA_CTRL_REG_2);
+
+ writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
+
+ tx_ana_ctrl_reg_1 |= TXDA_UPHY_SUPPLY_EN;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
- tx_ana_ctrl_reg_1 |= BIT(4);
+ udelay(1);
+ tx_ana_ctrl_reg_1 |= TXDA_UPHY_SUPPLY_EN_DEL;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
@@ -515,44 +626,66 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4);
/* re-enables Bandgap reference for LDO */
- tx_ana_ctrl_reg_1 |= BIT(7);
+ tx_ana_ctrl_reg_1 |= TXDA_DRV_LDO_EN;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
- tx_ana_ctrl_reg_1 |= BIT(8);
+ udelay(5);
+ tx_ana_ctrl_reg_1 |= TXDA_BGREF_EN;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
/*
* re-enables the transmitter pre-driver, driver data selection MUX,
* and receiver detect circuits.
*/
- writel(0x301, tcphy->base + TX_ANA_CTRL_REG_2);
- writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2);
+ tx_ana_ctrl_reg_2 |= TXDA_DRV_PREDRV_EN;
+ writel(tx_ana_ctrl_reg_2, tcphy->base + TX_ANA_CTRL_REG_2);
+ udelay(1);
+ tx_ana_ctrl_reg_2 |= TXDA_DRV_PREDRV_EN_DEL;
+ writel(tx_ana_ctrl_reg_2, tcphy->base + TX_ANA_CTRL_REG_2);
/*
- * Do some magic undocumented stuff, some of which appears to
- * undo the "re-enables Bandgap reference for LDO" above.
+ * Do all the undocumented magic:
+ * - Turn on TXDA_DP_AUX_EN, whatever that is, even though sample
+ * never shows this going on.
+ * - Turn on TXDA_DECAP_EN (and TXDA_DECAP_EN_DEL) even though
+ * docs say for aux it's always 0.
+ * - Turn off the LDO and BGREF, which we just spent time turning
+ * on above (???).
+ *
+ * Without this magic, things seem worse.
*/
- tx_ana_ctrl_reg_1 |= BIT(15);
- tx_ana_ctrl_reg_1 &= ~BIT(8);
- tx_ana_ctrl_reg_1 &= ~BIT(7);
- tx_ana_ctrl_reg_1 |= BIT(6);
- tx_ana_ctrl_reg_1 |= BIT(5);
+ tx_ana_ctrl_reg_1 |= TXDA_DP_AUX_EN;
+ tx_ana_ctrl_reg_1 |= TXDA_DECAP_EN;
+ tx_ana_ctrl_reg_1 &= ~TXDA_DRV_LDO_EN;
+ tx_ana_ctrl_reg_1 &= ~TXDA_BGREF_EN;
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ udelay(1);
+ tx_ana_ctrl_reg_1 |= TXDA_DECAP_EN_DEL;
writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
-
- writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
- writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
- writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
/*
- * Controls low_power_swing_en, don't set the voltage swing of the
- * driver to 400mv. The values below are peak to peak (differential)
- * values.
+ * Undo the work we did to set the LDO voltage.
+ * This doesn't seem to help nor hurt, but it kinda goes with the
+ * undocumented magic above.
*/
+ writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
+
+ /* Don't set voltage swing to 400 mV peak to peak (differential) */
writel(0, tcphy->base + TXDA_COEFF_CALC_CTRL);
+
+ /* Init TXDA_CYA_AUXDA_CYA for unknown magic reasons */
writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA);
- /* Controls tx_high_z_tm_en */
+ /*
+ * More undocumented magic, presumably the goal of which is to
+ * make the "auxda_source_aux_oen" be ignored and instead to decide
+ * about "high impedance state" based on what software puts in the
+ * register TXDA_COEFF_CALC_CTRL (see TX_HIGH_Z). Since we only
+ * program that register once and we don't set the bit TX_HIGH_Z,
+ * presumably the goal here is that we should never put the analog
+ * driver in high impedance state.
+ */
val = readl(tcphy->base + TX_DIG_CTRL_REG_2);
- val |= BIT(15);
+ val |= TX_HIGH_Z_TM_EN;
writel(val, tcphy->base + TX_DIG_CTRL_REG_2);
}
diff --git a/drivers/phy/samsung/Makefile b/drivers/phy/samsung/Makefile
index 20d7f2424772..db9b1aa0de6e 100644
--- a/drivers/phy/samsung/Makefile
+++ b/drivers/phy/samsung/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO) += phy-exynos-dp-video.o
obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o
obj-$(CONFIG_PHY_EXYNOS_PCIE) += phy-exynos-pcie.o
diff --git a/drivers/phy/ti/Makefile b/drivers/phy/ti/Makefile
index 0cc3a1a557a3..9f361756eaf2 100644
--- a/drivers/phy/ti/Makefile
+++ b/drivers/phy/ti/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_DA8XX_USB) += phy-da8xx-usb.o
obj-$(CONFIG_PHY_DM816X_USB) += phy-dm816x-usb.o
obj-$(CONFIG_OMAP_CONTROL_PHY) += phy-omap-control.o
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index 0e564f32749f..68ce4a082b9b 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -68,6 +68,40 @@
#define PCIE_PCS_MASK 0xFF0000
#define PCIE_PCS_DELAY_COUNT_SHIFT 0x10
+#define PCIEPHYRX_ANA_PROGRAMMABILITY 0x0000000C
+#define INTERFACE_MASK GENMASK(31, 27)
+#define INTERFACE_SHIFT 27
+#define LOSD_MASK GENMASK(17, 14)
+#define LOSD_SHIFT 14
+#define MEM_PLLDIV GENMASK(6, 5)
+
+#define PCIEPHYRX_TRIM 0x0000001C
+#define MEM_DLL_TRIM_SEL GENMASK(31, 30)
+#define MEM_DLL_TRIM_SHIFT 30
+
+#define PCIEPHYRX_DLL 0x00000024
+#define MEM_DLL_PHINT_RATE GENMASK(31, 30)
+
+#define PCIEPHYRX_DIGITAL_MODES 0x00000028
+#define MEM_CDR_FASTLOCK BIT(23)
+#define MEM_CDR_LBW GENMASK(22, 21)
+#define MEM_CDR_STEPCNT GENMASK(20, 19)
+#define MEM_CDR_STL_MASK GENMASK(18, 16)
+#define MEM_CDR_STL_SHIFT 16
+#define MEM_CDR_THR_MASK GENMASK(15, 13)
+#define MEM_CDR_THR_SHIFT 13
+#define MEM_CDR_THR_MODE BIT(12)
+#define MEM_CDR_CDR_2NDO_SDM_MODE BIT(11)
+#define MEM_OVRD_HS_RATE BIT(26)
+
+#define PCIEPHYRX_EQUALIZER 0x00000038
+#define MEM_EQLEV GENMASK(31, 16)
+#define MEM_EQFTC GENMASK(15, 11)
+#define MEM_EQCTL GENMASK(10, 7)
+#define MEM_EQCTL_SHIFT 7
+#define MEM_OVRD_EQLEV BIT(2)
+#define MEM_OVRD_EQFTC BIT(1)
+
/*
* This is an Empirical value that works, need to confirm the actual
* value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -91,6 +125,8 @@ struct pipe3_dpll_map {
struct ti_pipe3 {
void __iomem *pll_ctrl_base;
+ void __iomem *phy_rx;
+ void __iomem *phy_tx;
struct device *dev;
struct device *control_dev;
struct clk *wkupclk;
@@ -261,6 +297,37 @@ static int ti_pipe3_dpll_program(struct ti_pipe3 *phy)
return ti_pipe3_dpll_wait_lock(phy);
}
+static void ti_pipe3_calibrate(struct ti_pipe3 *phy)
+{
+ u32 val;
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY);
+ val &= ~(INTERFACE_MASK | LOSD_MASK | MEM_PLLDIV);
+ val = (0x1 << INTERFACE_SHIFT | 0xA << LOSD_SHIFT);
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_ANA_PROGRAMMABILITY, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES);
+ val &= ~(MEM_CDR_STEPCNT | MEM_CDR_STL_MASK | MEM_CDR_THR_MASK |
+ MEM_CDR_CDR_2NDO_SDM_MODE | MEM_OVRD_HS_RATE);
+ val |= (MEM_CDR_FASTLOCK | MEM_CDR_LBW | 0x3 << MEM_CDR_STL_SHIFT |
+ 0x1 << MEM_CDR_THR_SHIFT | MEM_CDR_THR_MODE);
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_DIGITAL_MODES, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_TRIM);
+ val &= ~MEM_DLL_TRIM_SEL;
+ val |= 0x2 << MEM_DLL_TRIM_SHIFT;
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_TRIM, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_DLL);
+ val |= MEM_DLL_PHINT_RATE;
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_DLL, val);
+
+ val = ti_pipe3_readl(phy->phy_rx, PCIEPHYRX_EQUALIZER);
+ val &= ~(MEM_EQLEV | MEM_EQCTL | MEM_OVRD_EQLEV | MEM_OVRD_EQFTC);
+ val |= MEM_EQFTC | 0x1 << MEM_EQCTL_SHIFT;
+ ti_pipe3_writel(phy->phy_rx, PCIEPHYRX_EQUALIZER, val);
+}
+
static int ti_pipe3_init(struct phy *x)
{
struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -282,7 +349,12 @@ static int ti_pipe3_init(struct phy *x)
val = 0x96 << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT;
ret = regmap_update_bits(phy->pcs_syscon, phy->pcie_pcs_reg,
PCIE_PCS_MASK, val);
- return ret;
+ if (ret)
+ return ret;
+
+ ti_pipe3_calibrate(phy);
+
+ return 0;
}
/* Bring it out of IDLE if it is IDLE */
@@ -513,6 +585,29 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
return 0;
}
+static int ti_pipe3_get_tx_rx_base(struct ti_pipe3 *phy)
+{
+ struct resource *res;
+ struct device *dev = phy->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie"))
+ return 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "phy_rx");
+ phy->phy_rx = devm_ioremap_resource(dev, res);
+ if (IS_ERR(phy->phy_rx))
+ return PTR_ERR(phy->phy_rx);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "phy_tx");
+ phy->phy_tx = devm_ioremap_resource(dev, res);
+
+ return PTR_ERR_OR_ZERO(phy->phy_tx);
+}
+
static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy)
{
struct resource *res;
@@ -559,6 +654,10 @@ static int ti_pipe3_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = ti_pipe3_get_tx_rx_base(phy);
+ if (ret)
+ return ret;
+
ret = ti_pipe3_get_sysctrl(phy);
if (ret)
return ret;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 82cd8b08d71f..4571cc098b76 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -2,11 +2,10 @@
# PINCTRL infrastructure and drivers
#
-config PINCTRL
- bool
+menuconfig PINCTRL
+ bool "Pin controllers"
-menu "Pin controllers"
- depends on PINCTRL
+if PINCTRL
config GENERIC_PINCTRL_GROUPS
bool
@@ -33,7 +32,8 @@ config DEBUG_PINCTRL
config PINCTRL_ADI2
bool "ADI pin controller driver"
- depends on BLACKFIN
+ depends on (BF54x || BF60x)
+ depends on !GPIO_ADI
select PINMUX
select IRQ_DOMAIN
help
@@ -98,7 +98,8 @@ config PINCTRL_AT91PIO4
config PINCTRL_AMD
tristate "AMD GPIO pin control"
- depends on GPIOLIB
+ depends on HAS_IOMEM
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select PINMUX
select PINCONF
@@ -152,12 +153,14 @@ config PINCTRL_GEMINI
depends on ARCH_GEMINI
default ARCH_GEMINI
select PINMUX
+ select GENERIC_PINCONF
select MFD_SYSCON
config PINCTRL_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
depends on SPI_MASTER || I2C
depends on I2C || I2C=n
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select REGMAP_I2C if I2C
select REGMAP_SPI if SPI_MASTER
@@ -168,16 +171,6 @@ config PINCTRL_MCP23S08
This provides a GPIO interface supporting inputs and outputs.
The I2C versions of the chips can be used as interrupt-controller.
-config PINCTRL_MESON
- bool
- depends on OF
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB
- select OF_GPIO
- select REGMAP_MMIO
-
config PINCTRL_OXNAS
bool
depends on OF
@@ -210,6 +203,7 @@ config PINCTRL_RZA1
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
+ depends on HAS_IOMEM
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
@@ -226,10 +220,11 @@ config PINCTRL_SIRF
config PINCTRL_SX150X
bool "Semtech SX150x I2C GPIO expander pinctrl driver"
- depends on GPIOLIB && I2C=y
+ depends on I2C=y
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select REGMAP
help
@@ -369,6 +364,7 @@ source "drivers/pinctrl/uniphier/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
source "drivers/pinctrl/zte/Kconfig"
+source "drivers/pinctrl/meson/Kconfig"
config PINCTRL_XWAY
bool
@@ -380,4 +376,4 @@ config PINCTRL_TB10X
depends on OF && ARC_PLAT_TB10X
select GPIOLIB
-endmenu
+endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index c16e27900dbb..d0d4844f8022 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# generic pinmux support
subdir-ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
index 2a65111f3c70..80ceb9dae944 100644
--- a/drivers/pinctrl/bcm/Makefile
+++ b/drivers/pinctrl/bcm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Broadcom pinctrl support
obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index ff782445dfb7..785c366fd6d6 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -379,7 +379,7 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
events &= pc->enabled_irq_map[bank];
for_each_set_bit(offset, &events, 32) {
gpio = (32 * bank) + offset;
- generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
+ generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irq.domain,
gpio));
}
}
@@ -661,7 +661,7 @@ static void bcm2835_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset);
const char *fname = bcm2835_functions[fsel];
int value = bcm2835_gpio_get_bit(pc, GPLEV0, offset);
- int irq = irq_find_mapping(chip->irqdomain, offset);
+ int irq = irq_find_mapping(chip->irq.domain, offset);
seq_printf(s, "function %s in %s; irq %d (%s)",
fname, value ? "hi" : "lo",
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 85a8c97d9dfe..b70058caee50 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -172,7 +172,7 @@ static void iproc_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(bit, &val, NGPIOS_PER_BANK) {
unsigned pin = NGPIOS_PER_BANK * i + bit;
- int child_irq = irq_find_mapping(gc->irqdomain, pin);
+ int child_irq = irq_find_mapping(gc->irq.domain, pin);
/*
* Clear the interrupt before invoking the
@@ -311,7 +311,7 @@ static int iproc_gpio_request(struct gpio_chip *gc, unsigned offset)
if (!chip->pinmux_is_supported)
return 0;
- return pinctrl_request_gpio(gpio);
+ return pinctrl_gpio_request(gpio);
}
static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
@@ -322,7 +322,7 @@ static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
if (!chip->pinmux_is_supported)
return;
- pinctrl_free_gpio(gpio);
+ pinctrl_gpio_free(gpio);
}
static int iproc_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 1cfe45fd391f..e67ae52023ad 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -275,23 +275,6 @@ static struct irq_chip nsp_gpio_irq_chip = {
.irq_set_type = nsp_gpio_irq_set_type,
};
-/*
- * Request the nsp IOMUX pinmux controller to mux individual pins to GPIO
- */
-static int nsp_gpio_request(struct gpio_chip *gc, unsigned offset)
-{
- unsigned gpio = gc->base + offset;
-
- return pinctrl_request_gpio(gpio);
-}
-
-static void nsp_gpio_free(struct gpio_chip *gc, unsigned offset)
-{
- unsigned gpio = gc->base + offset;
-
- pinctrl_free_gpio(gpio);
-}
-
static int nsp_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
{
struct nsp_gpio *chip = gpiochip_get_data(gc);
@@ -670,8 +653,8 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->label = dev_name(dev);
gc->parent = dev;
gc->of_node = dev->of_node;
- gc->request = nsp_gpio_request;
- gc->free = nsp_gpio_free;
+ gc->request = gpiochip_generic_request;
+ gc->free = gpiochip_generic_free;
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
gc->set = nsp_gpio_set;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 56fbe4c3e800..4c8d5b23e4d0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -733,14 +733,14 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
}
/**
- * pinctrl_request_gpio() - request a single pin to be used as GPIO
+ * pinctrl_gpio_request() - request a single pin to be used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
* as part of their gpio_request() semantics, platforms and individual drivers
* shall *NOT* request GPIO pins to be muxed in.
*/
-int pinctrl_request_gpio(unsigned gpio)
+int pinctrl_gpio_request(unsigned gpio)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
@@ -765,17 +765,17 @@ int pinctrl_request_gpio(unsigned gpio)
return ret;
}
-EXPORT_SYMBOL_GPL(pinctrl_request_gpio);
+EXPORT_SYMBOL_GPL(pinctrl_gpio_request);
/**
- * pinctrl_free_gpio() - free control on a single pin, currently used as GPIO
+ * pinctrl_gpio_free() - free control on a single pin, currently used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
*
* This function should *ONLY* be used from gpiolib-based GPIO drivers,
* as part of their gpio_free() semantics, platforms and individual drivers
* shall *NOT* request GPIO pins to be muxed out.
*/
-void pinctrl_free_gpio(unsigned gpio)
+void pinctrl_gpio_free(unsigned gpio)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range;
@@ -795,7 +795,7 @@ void pinctrl_free_gpio(unsigned gpio)
mutex_unlock(&pctldev->mutex);
}
-EXPORT_SYMBOL_GPL(pinctrl_free_gpio);
+EXPORT_SYMBOL_GPL(pinctrl_gpio_free);
static int pinctrl_gpio_direction(unsigned gpio, bool input)
{
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 7880c3adc450..8cf2eba17c8c 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -154,7 +154,7 @@ struct pinctrl_setting {
* or pin, and each of these will increment the @usecount.
* @mux_owner: The name of device that called pinctrl_get().
* @mux_setting: The most recent selected mux setting for this pin, if any.
- * @gpio_owner: If pinctrl_request_gpio() was called for this pin, this is
+ * @gpio_owner: If pinctrl_gpio_request() was called for this pin, this is
* the name of the GPIO that "owns" this pin.
*/
struct pin_desc {
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 525a5ff5dcb4..19bb9a55a567 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Freescale pin control drivers
obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index f30720a752f3..4aea1b8504f7 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -5,7 +5,8 @@ if (X86 || COMPILE_TEST)
config PINCTRL_BAYTRAIL
bool "Intel Baytrail GPIO pin control"
- depends on GPIOLIB && ACPI
+ depends on ACPI
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select PINMUX
select PINCONF
@@ -65,6 +66,14 @@ config PINCTRL_CANNONLAKE
This pinctrl driver provides an interface that allows configuring
of Intel Cannon Lake PCH pins and using them as GPIOs.
+config PINCTRL_CEDARFORK
+ tristate "Intel Cedar Fork pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Cedar Fork PCH pins and using them as GPIOs.
+
config PINCTRL_DENVERTON
tristate "Intel Denverton pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index c12874da5992..fadfe3ea2b04 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Intel pin control drivers
obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
@@ -6,6 +7,7 @@ obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o
+obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o
obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o
obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_LEWISBURG) += pinctrl-lewisburg.o
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 0f3a02495aeb..9c1ca29c60b7 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1627,7 +1627,7 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
pending = readl(reg);
raw_spin_unlock(&vg->lock);
for_each_set_bit(pin, &pending, 32) {
- virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
+ virq = irq_find_mapping(vg->chip.irq.domain, base + pin);
generic_handle_irq(virq);
}
}
@@ -1660,7 +1660,7 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
value = readl(reg);
if (value & BYT_DIRECT_IRQ_EN) {
- clear_bit(i, gc->irq_valid_mask);
+ clear_bit(i, gc->irq.valid_mask);
dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
} else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
byt_gpio_clear_triggering(vg, i);
@@ -1703,7 +1703,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
gc->can_sleep = false;
gc->parent = &vg->pdev->dev;
gc->ngpio = vg->soc_data->npins;
- gc->irq_need_valid_mask = true;
+ gc->irq.need_valid_mask = true;
#ifdef CONFIG_PM_SLEEP
vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
diff --git a/drivers/pinctrl/intel/pinctrl-cedarfork.c b/drivers/pinctrl/intel/pinctrl-cedarfork.c
new file mode 100644
index 000000000000..59216b0533d9
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-cedarfork.c
@@ -0,0 +1,375 @@
+/*
+ * Intel Cedar Fork PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define CDF_PAD_OWN 0x020
+#define CDF_PADCFGLOCK 0x0c0
+#define CDF_HOSTSW_OWN 0x120
+#define CDF_GPI_IS 0x200
+#define CDF_GPI_IE 0x230
+
+#define CDF_GPP(r, s, e) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ }
+
+#define CDF_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = CDF_PAD_OWN, \
+ .padcfglock_offset = CDF_PADCFGLOCK, \
+ .hostown_offset = CDF_HOSTSW_OWN, \
+ .is_offset = CDF_GPI_IS, \
+ .ie_offset = CDF_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Cedar Fork PCH */
+static const struct pinctrl_pin_desc cdf_pins[] = {
+ /* WEST2 */
+ PINCTRL_PIN(0, "GBE_SDP_TIMESYNC0_S2N"),
+ PINCTRL_PIN(1, "GBE_SDP_TIMESYNC1_S2N"),
+ PINCTRL_PIN(2, "GBE_SDP_TIMESYNC2_S2N"),
+ PINCTRL_PIN(3, "GBE_SDP_TIMESYNC3_S2N"),
+ PINCTRL_PIN(4, "GBE0_I2C_CLK"),
+ PINCTRL_PIN(5, "GBE0_I2C_DATA"),
+ PINCTRL_PIN(6, "GBE1_I2C_CLK"),
+ PINCTRL_PIN(7, "GBE1_I2C_DATA"),
+ PINCTRL_PIN(8, "GBE2_I2C_CLK"),
+ PINCTRL_PIN(9, "GBE2_I2C_DATA"),
+ PINCTRL_PIN(10, "GBE3_I2C_CLK"),
+ PINCTRL_PIN(11, "GBE3_I2C_DATA"),
+ PINCTRL_PIN(12, "GBE0_LED0"),
+ PINCTRL_PIN(13, "GBE0_LED1"),
+ PINCTRL_PIN(14, "GBE0_LED2"),
+ PINCTRL_PIN(15, "GBE1_LED0"),
+ PINCTRL_PIN(16, "GBE1_LED1"),
+ PINCTRL_PIN(17, "GBE1_LED2"),
+ PINCTRL_PIN(18, "GBE2_LED0"),
+ PINCTRL_PIN(19, "GBE2_LED1"),
+ PINCTRL_PIN(20, "GBE2_LED2"),
+ PINCTRL_PIN(21, "GBE3_LED0"),
+ PINCTRL_PIN(22, "GBE3_LED1"),
+ PINCTRL_PIN(23, "GBE3_LED2"),
+ /* WEST3 */
+ PINCTRL_PIN(24, "NCSI_RXD0"),
+ PINCTRL_PIN(25, "NCSI_CLK_IN"),
+ PINCTRL_PIN(26, "NCSI_RXD1"),
+ PINCTRL_PIN(27, "NCSI_CRS_DV"),
+ PINCTRL_PIN(28, "NCSI_ARB_IN"),
+ PINCTRL_PIN(29, "NCSI_TX_EN"),
+ PINCTRL_PIN(30, "NCSI_TXD0"),
+ PINCTRL_PIN(31, "NCSI_TXD1"),
+ PINCTRL_PIN(32, "NCSI_ARB_OUT"),
+ PINCTRL_PIN(33, "GBE_SMB_CLK"),
+ PINCTRL_PIN(34, "GBE_SMB_DATA"),
+ PINCTRL_PIN(35, "GBE_SMB_ALRT_N"),
+ PINCTRL_PIN(36, "THERMTRIP_N"),
+ PINCTRL_PIN(37, "PCHHOT_N"),
+ PINCTRL_PIN(38, "ERROR0_N"),
+ PINCTRL_PIN(39, "ERROR1_N"),
+ PINCTRL_PIN(40, "ERROR2_N"),
+ PINCTRL_PIN(41, "MSMI_N"),
+ PINCTRL_PIN(42, "CATERR_N"),
+ PINCTRL_PIN(43, "MEMTRIP_N"),
+ PINCTRL_PIN(44, "UART0_RXD"),
+ PINCTRL_PIN(45, "UART0_TXD"),
+ PINCTRL_PIN(46, "UART1_RXD"),
+ PINCTRL_PIN(47, "UART1_TXD"),
+ /* WEST01 */
+ PINCTRL_PIN(48, "GBE_GPIO13"),
+ PINCTRL_PIN(49, "AUX_PWR"),
+ PINCTRL_PIN(50, "CPU_GP_2"),
+ PINCTRL_PIN(51, "CPU_GP_3"),
+ PINCTRL_PIN(52, "FAN_PWM_0"),
+ PINCTRL_PIN(53, "FAN_PWM_1"),
+ PINCTRL_PIN(54, "FAN_PWM_2"),
+ PINCTRL_PIN(55, "FAN_PWM_3"),
+ PINCTRL_PIN(56, "FAN_TACH_0"),
+ PINCTRL_PIN(57, "FAN_TACH_1"),
+ PINCTRL_PIN(58, "FAN_TACH_2"),
+ PINCTRL_PIN(59, "FAN_TACH_3"),
+ PINCTRL_PIN(60, "ME_SMB0_CLK"),
+ PINCTRL_PIN(61, "ME_SMB0_DATA"),
+ PINCTRL_PIN(62, "ME_SMB0_ALRT_N"),
+ PINCTRL_PIN(63, "ME_SMB1_CLK"),
+ PINCTRL_PIN(64, "ME_SMB1_DATA"),
+ PINCTRL_PIN(65, "ME_SMB1_ALRT_N"),
+ PINCTRL_PIN(66, "ME_SMB2_CLK"),
+ PINCTRL_PIN(67, "ME_SMB2_DATA"),
+ PINCTRL_PIN(68, "ME_SMB2_ALRT_N"),
+ PINCTRL_PIN(69, "GBE_MNG_I2C_CLK"),
+ PINCTRL_PIN(70, "GBE_MNG_I2C_DATA"),
+ /* WEST5 */
+ PINCTRL_PIN(71, "IE_UART_RXD"),
+ PINCTRL_PIN(72, "IE_UART_TXD"),
+ PINCTRL_PIN(73, "VPP_SMB_CLK"),
+ PINCTRL_PIN(74, "VPP_SMB_DATA"),
+ PINCTRL_PIN(75, "VPP_SMB_ALRT_N"),
+ PINCTRL_PIN(76, "PCIE_CLKREQ0_N"),
+ PINCTRL_PIN(77, "PCIE_CLKREQ1_N"),
+ PINCTRL_PIN(78, "PCIE_CLKREQ2_N"),
+ PINCTRL_PIN(79, "PCIE_CLKREQ3_N"),
+ PINCTRL_PIN(80, "PCIE_CLKREQ4_N"),
+ PINCTRL_PIN(81, "PCIE_CLKREQ5_N"),
+ PINCTRL_PIN(82, "PCIE_CLKREQ6_N"),
+ PINCTRL_PIN(83, "PCIE_CLKREQ7_N"),
+ PINCTRL_PIN(84, "PCIE_CLKREQ8_N"),
+ PINCTRL_PIN(85, "PCIE_CLKREQ9_N"),
+ PINCTRL_PIN(86, "FLEX_CLK_SE0"),
+ PINCTRL_PIN(87, "FLEX_CLK_SE1"),
+ PINCTRL_PIN(88, "FLEX_CLK1_50"),
+ PINCTRL_PIN(89, "FLEX_CLK2_50"),
+ PINCTRL_PIN(90, "FLEX_CLK_125"),
+ /* WESTC */
+ PINCTRL_PIN(91, "TCK_PCH"),
+ PINCTRL_PIN(92, "JTAGX_PCH"),
+ PINCTRL_PIN(93, "TRST_N_PCH"),
+ PINCTRL_PIN(94, "TMS_PCH"),
+ PINCTRL_PIN(95, "TDI_PCH"),
+ PINCTRL_PIN(96, "TDO_PCH"),
+ /* WESTC_DFX */
+ PINCTRL_PIN(97, "CX_PRDY_N"),
+ PINCTRL_PIN(98, "CX_PREQ_N"),
+ PINCTRL_PIN(99, "CPU_FBREAK_OUT_N"),
+ PINCTRL_PIN(100, "TRIGGER0_N"),
+ PINCTRL_PIN(101, "TRIGGER1_N"),
+ /* WESTA */
+ PINCTRL_PIN(102, "DBG_PTI_CLK0"),
+ PINCTRL_PIN(103, "DBG_PTI_CLK3"),
+ PINCTRL_PIN(104, "DBG_PTI_DATA0"),
+ PINCTRL_PIN(105, "DBG_PTI_DATA1"),
+ PINCTRL_PIN(106, "DBG_PTI_DATA2"),
+ PINCTRL_PIN(107, "DBG_PTI_DATA3"),
+ PINCTRL_PIN(108, "DBG_PTI_DATA4"),
+ PINCTRL_PIN(109, "DBG_PTI_DATA5"),
+ PINCTRL_PIN(110, "DBG_PTI_DATA6"),
+ PINCTRL_PIN(111, "DBG_PTI_DATA7"),
+ /* WESTB */
+ PINCTRL_PIN(112, "DBG_PTI_DATA8"),
+ PINCTRL_PIN(113, "DBG_PTI_DATA9"),
+ PINCTRL_PIN(114, "DBG_PTI_DATA10"),
+ PINCTRL_PIN(115, "DBG_PTI_DATA11"),
+ PINCTRL_PIN(116, "DBG_PTI_DATA12"),
+ PINCTRL_PIN(117, "DBG_PTI_DATA13"),
+ PINCTRL_PIN(118, "DBG_PTI_DATA14"),
+ PINCTRL_PIN(119, "DBG_PTI_DATA15"),
+ PINCTRL_PIN(120, "DBG_SPARE0"),
+ PINCTRL_PIN(121, "DBG_SPARE1"),
+ PINCTRL_PIN(122, "DBG_SPARE2"),
+ PINCTRL_PIN(123, "DBG_SPARE3"),
+ /* WESTD */
+ PINCTRL_PIN(124, "CPU_PWR_GOOD"),
+ PINCTRL_PIN(125, "PLTRST_CPU_N"),
+ PINCTRL_PIN(126, "NAC_RESET_NAC_N"),
+ PINCTRL_PIN(127, "PCH_SBLINK_RX"),
+ PINCTRL_PIN(128, "PCH_SBLINK_TX"),
+ PINCTRL_PIN(129, "PMSYNC_CLK"),
+ PINCTRL_PIN(130, "CPU_ERR0_N"),
+ PINCTRL_PIN(131, "CPU_ERR1_N"),
+ PINCTRL_PIN(132, "CPU_ERR2_N"),
+ PINCTRL_PIN(133, "CPU_THERMTRIP_N"),
+ PINCTRL_PIN(134, "CPU_MSMI_N"),
+ PINCTRL_PIN(135, "CPU_CATERR_N"),
+ PINCTRL_PIN(136, "CPU_MEMTRIP_N"),
+ PINCTRL_PIN(137, "NAC_GR_N"),
+ PINCTRL_PIN(138, "NAC_XTAL_VALID"),
+ PINCTRL_PIN(139, "NAC_WAKE_N"),
+ PINCTRL_PIN(140, "NAC_SBLINK_CLK_S2N"),
+ PINCTRL_PIN(141, "NAC_SBLINK_N2S"),
+ PINCTRL_PIN(142, "NAC_SBLINK_S2N"),
+ PINCTRL_PIN(143, "NAC_SBLINK_CLK_N2S"),
+ /* WESTD_PECI */
+ PINCTRL_PIN(144, "ME_PECI"),
+ /* WESTF */
+ PINCTRL_PIN(145, "NAC_RMII_CLK"),
+ PINCTRL_PIN(146, "NAC_RGMII_CLK"),
+ PINCTRL_PIN(147, "NAC_SPARE0"),
+ PINCTRL_PIN(148, "NAC_SPARE1"),
+ PINCTRL_PIN(149, "NAC_SPARE2"),
+ PINCTRL_PIN(150, "NAC_INIT_SX_WAKE_N"),
+ PINCTRL_PIN(151, "NAC_GBE_GPIO0_S2N"),
+ PINCTRL_PIN(152, "NAC_GBE_GPIO1_S2N"),
+ PINCTRL_PIN(153, "NAC_GBE_GPIO2_S2N"),
+ PINCTRL_PIN(154, "NAC_GBE_GPIO3_S2N"),
+ PINCTRL_PIN(155, "NAC_NCSI_RXD0"),
+ PINCTRL_PIN(156, "NAC_NCSI_CLK_IN"),
+ PINCTRL_PIN(157, "NAC_NCSI_RXD1"),
+ PINCTRL_PIN(158, "NAC_NCSI_CRS_DV"),
+ PINCTRL_PIN(159, "NAC_NCSI_ARB_IN"),
+ PINCTRL_PIN(160, "NAC_NCSI_TX_EN"),
+ PINCTRL_PIN(161, "NAC_NCSI_TXD0"),
+ PINCTRL_PIN(162, "NAC_NCSI_TXD1"),
+ PINCTRL_PIN(163, "NAC_NCSI_ARB_OUT"),
+ PINCTRL_PIN(164, "NAC_NCSI_OE_N"),
+ PINCTRL_PIN(165, "NAC_GBE_SMB_CLK"),
+ PINCTRL_PIN(166, "NAC_GBE_SMB_DATA"),
+ PINCTRL_PIN(167, "NAC_GBE_SMB_ALRT_N"),
+ /* EAST2 */
+ PINCTRL_PIN(168, "USB_OC0_N"),
+ PINCTRL_PIN(169, "GBE_GPIO0"),
+ PINCTRL_PIN(170, "GBE_GPIO1"),
+ PINCTRL_PIN(171, "GBE_GPIO2"),
+ PINCTRL_PIN(172, "GBE_GPIO3"),
+ PINCTRL_PIN(173, "GBE_GPIO4"),
+ PINCTRL_PIN(174, "GBE_GPIO5"),
+ PINCTRL_PIN(175, "GBE_GPIO6"),
+ PINCTRL_PIN(176, "GBE_GPIO7"),
+ PINCTRL_PIN(177, "GBE_GPIO8"),
+ PINCTRL_PIN(178, "GBE_GPIO9"),
+ PINCTRL_PIN(179, "GBE_GPIO10"),
+ PINCTRL_PIN(180, "GBE_GPIO11"),
+ PINCTRL_PIN(181, "GBE_GPIO12"),
+ PINCTRL_PIN(182, "SATA0_LED_N"),
+ PINCTRL_PIN(183, "SATA1_LED_N"),
+ PINCTRL_PIN(184, "SATA_PDETECT0"),
+ PINCTRL_PIN(185, "SATA_PDETECT1"),
+ PINCTRL_PIN(186, "SATA0_SDOUT"),
+ PINCTRL_PIN(187, "SATA1_SDOUT"),
+ PINCTRL_PIN(188, "SATA2_LED_N"),
+ PINCTRL_PIN(189, "SATA_PDETECT2"),
+ PINCTRL_PIN(190, "SATA2_SDOUT"),
+ /* EAST3 */
+ PINCTRL_PIN(191, "ESPI_IO0"),
+ PINCTRL_PIN(192, "ESPI_IO1"),
+ PINCTRL_PIN(193, "ESPI_IO2"),
+ PINCTRL_PIN(194, "ESPI_IO3"),
+ PINCTRL_PIN(195, "ESPI_CLK"),
+ PINCTRL_PIN(196, "ESPI_RST_N"),
+ PINCTRL_PIN(197, "ESPI_CS0_N"),
+ PINCTRL_PIN(198, "ESPI_ALRT0_N"),
+ PINCTRL_PIN(199, "ESPI_CS1_N"),
+ PINCTRL_PIN(200, "ESPI_ALRT1_N"),
+ PINCTRL_PIN(201, "ESPI_CLK_LOOPBK"),
+ /* EAST0 */
+ PINCTRL_PIN(202, "SPI_CS0_N"),
+ PINCTRL_PIN(203, "SPI_CS1_N"),
+ PINCTRL_PIN(204, "SPI_MOSI_IO0"),
+ PINCTRL_PIN(205, "SPI_MISO_IO1"),
+ PINCTRL_PIN(206, "SPI_IO2"),
+ PINCTRL_PIN(207, "SPI_IO3"),
+ PINCTRL_PIN(208, "SPI_CLK"),
+ PINCTRL_PIN(209, "SPI_CLK_LOOPBK"),
+ PINCTRL_PIN(210, "SUSPWRDNACK"),
+ PINCTRL_PIN(211, "PMU_SUSCLK"),
+ PINCTRL_PIN(212, "ADR_COMPLETE"),
+ PINCTRL_PIN(213, "ADR_TRIGGER_N"),
+ PINCTRL_PIN(214, "PMU_SLP_S45_N"),
+ PINCTRL_PIN(215, "PMU_SLP_S3_N"),
+ PINCTRL_PIN(216, "PMU_WAKE_N"),
+ PINCTRL_PIN(217, "PMU_PWRBTN_N"),
+ PINCTRL_PIN(218, "PMU_RESETBUTTON_N"),
+ PINCTRL_PIN(219, "PMU_PLTRST_N"),
+ PINCTRL_PIN(220, "SUS_STAT_N"),
+ PINCTRL_PIN(221, "PMU_I2C_CLK"),
+ PINCTRL_PIN(222, "PMU_I2C_DATA"),
+ PINCTRL_PIN(223, "PECI_SMB_CLK"),
+ PINCTRL_PIN(224, "PECI_SMB_DATA"),
+ PINCTRL_PIN(225, "PECI_SMB_ALRT_N"),
+ /* EMMC */
+ PINCTRL_PIN(226, "EMMC_CMD"),
+ PINCTRL_PIN(227, "EMMC_STROBE"),
+ PINCTRL_PIN(228, "EMMC_CLK"),
+ PINCTRL_PIN(229, "EMMC_D0"),
+ PINCTRL_PIN(230, "EMMC_D1"),
+ PINCTRL_PIN(231, "EMMC_D2"),
+ PINCTRL_PIN(232, "EMMC_D3"),
+ PINCTRL_PIN(233, "EMMC_D4"),
+ PINCTRL_PIN(234, "EMMC_D5"),
+ PINCTRL_PIN(235, "EMMC_D6"),
+ PINCTRL_PIN(236, "EMMC_D7"),
+};
+
+static const struct intel_padgroup cdf_community0_gpps[] = {
+ CDF_GPP(0, 0, 23), /* WEST2 */
+ CDF_GPP(1, 24, 47), /* WEST3 */
+ CDF_GPP(2, 48, 70), /* WEST01 */
+ CDF_GPP(3, 71, 90), /* WEST5 */
+ CDF_GPP(4, 91, 96), /* WESTC */
+ CDF_GPP(5, 97, 101), /* WESTC_DFX */
+ CDF_GPP(6, 102, 111), /* WESTA */
+ CDF_GPP(7, 112, 123), /* WESTB */
+ CDF_GPP(8, 124, 143), /* WESTD */
+ CDF_GPP(9, 144, 144), /* WESTD_PECI */
+ CDF_GPP(10, 145, 167), /* WESTF */
+};
+
+static const struct intel_padgroup cdf_community1_gpps[] = {
+ CDF_GPP(0, 168, 190), /* EAST2 */
+ CDF_GPP(1, 191, 201), /* EAST3 */
+ CDF_GPP(2, 202, 225), /* EAST0 */
+ CDF_GPP(3, 226, 236), /* EMMC */
+};
+
+static const struct intel_community cdf_communities[] = {
+ CDF_COMMUNITY(0, 0, 167, cdf_community0_gpps), /* West */
+ CDF_COMMUNITY(1, 168, 236, cdf_community1_gpps), /* East */
+};
+
+static const struct intel_pinctrl_soc_data cdf_soc_data = {
+ .pins = cdf_pins,
+ .npins = ARRAY_SIZE(cdf_pins),
+ .communities = cdf_communities,
+ .ncommunities = ARRAY_SIZE(cdf_communities),
+};
+
+static int cdf_pinctrl_probe(struct platform_device *pdev)
+{
+ return intel_pinctrl_probe(pdev, &cdf_soc_data);
+}
+
+static const struct dev_pm_ops cdf_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
+ intel_pinctrl_resume)
+};
+
+static const struct acpi_device_id cdf_pinctrl_acpi_match[] = {
+ { "INTC3001" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cdf_pinctrl_acpi_match);
+
+static struct platform_driver cdf_pinctrl_driver = {
+ .probe = cdf_pinctrl_probe,
+ .driver = {
+ .name = "cedarfork-pinctrl",
+ .acpi_match_table = cdf_pinctrl_acpi_match,
+ .pm = &cdf_pinctrl_pm_ops,
+ },
+};
+
+static int __init cdf_pinctrl_init(void)
+{
+ return platform_driver_register(&cdf_pinctrl_driver);
+}
+subsys_initcall(cdf_pinctrl_init);
+
+static void __exit cdf_pinctrl_exit(void)
+{
+ platform_driver_unregister(&cdf_pinctrl_driver);
+}
+module_exit(cdf_pinctrl_exit);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Cedar Fork PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index fadbca907c7c..bdedb6325c72 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -491,7 +491,7 @@ static const struct chv_community north_community = {
.ngpio_ranges = ARRAY_SIZE(north_gpio_ranges),
.ngpios = ARRAY_SIZE(north_pins),
/*
- * North community can benerate GPIO interrupts only for the first
+ * North community can generate GPIO interrupts only for the first
* 8 interrupts. The upper half (8-15) can only be used to trigger
* GPEs.
*/
@@ -1523,7 +1523,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
unsigned irq, offset;
offset = pctrl->intr_lines[intr_line];
- irq = irq_find_mapping(gc->irqdomain, offset);
+ irq = irq_find_mapping(gc->irq.domain, offset);
generic_handle_irq(irq);
}
@@ -1585,7 +1585,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->base = -1;
- chip->irq_need_valid_mask = need_valid_mask;
+ chip->irq.need_valid_mask = need_valid_mask;
ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
if (ret) {
@@ -1617,7 +1617,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
if (need_valid_mask && intsel >= pctrl->community->nirqs)
- clear_bit(i, chip->irq_valid_mask);
+ clear_bit(i, chip->irq.valid_mask);
}
/* Clear all interrupts */
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 71df0f70b61f..12a1af45acb9 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -30,8 +30,6 @@
#define PADBAR 0x00c
#define GPI_IS 0x100
-#define GPI_GPE_STS 0x140
-#define GPI_GPE_EN 0x160
#define PADOWN_BITS 4
#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
@@ -818,7 +816,7 @@ static void intel_gpio_irq_ack(struct irq_data *d)
community = intel_get_community(pctrl, pin);
if (community) {
const struct intel_padgroup *padgrp;
- unsigned gpp, gpp_offset;
+ unsigned gpp, gpp_offset, is_offset;
padgrp = intel_community_get_padgroup(community, pin);
if (!padgrp)
@@ -826,9 +824,10 @@ static void intel_gpio_irq_ack(struct irq_data *d)
gpp = padgrp->reg_num;
gpp_offset = padgroup_offset(padgrp, pin);
+ is_offset = community->is_offset + gpp * 4;
raw_spin_lock(&pctrl->lock);
- writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+ writel(BIT(gpp_offset), community->regs + is_offset);
raw_spin_unlock(&pctrl->lock);
}
}
@@ -843,7 +842,7 @@ static void intel_gpio_irq_enable(struct irq_data *d)
community = intel_get_community(pctrl, pin);
if (community) {
const struct intel_padgroup *padgrp;
- unsigned gpp, gpp_offset;
+ unsigned gpp, gpp_offset, is_offset;
unsigned long flags;
u32 value;
@@ -853,10 +852,11 @@ static void intel_gpio_irq_enable(struct irq_data *d)
gpp = padgrp->reg_num;
gpp_offset = padgroup_offset(padgrp, pin);
+ is_offset = community->is_offset + gpp * 4;
raw_spin_lock_irqsave(&pctrl->lock, flags);
/* Clear interrupt status first to avoid unexpected interrupt */
- writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+ writel(BIT(gpp_offset), community->regs + is_offset);
value = readl(community->regs + community->ie_offset + gpp * 4);
value |= BIT(gpp_offset);
@@ -991,7 +991,8 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
const struct intel_padgroup *padgrp = &community->gpps[gpp];
unsigned long pending, enabled, gpp_offset;
- pending = readl(community->regs + GPI_IS + padgrp->reg_num * 4);
+ pending = readl(community->regs + community->is_offset +
+ padgrp->reg_num * 4);
enabled = readl(community->regs + community->ie_offset +
padgrp->reg_num * 4);
@@ -1005,7 +1006,7 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
if (padno >= community->npins)
break;
- irq = irq_find_mapping(gc->irqdomain,
+ irq = irq_find_mapping(gc->irq.domain,
community->pin_base + padno);
generic_handle_irq(irq);
@@ -1241,6 +1242,9 @@ int intel_pinctrl_probe(struct platform_device *pdev,
community->regs = regs;
community->pad_regs = regs + padbar;
+ if (!community->is_offset)
+ community->is_offset = GPI_IS;
+
ret = intel_pinctrl_add_padgroups(pctrl, community);
if (ret)
return ret;
@@ -1356,7 +1360,7 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
for (gpp = 0; gpp < community->ngpps; gpp++) {
/* Mask and clear all interrupts */
writel(0, base + community->ie_offset + gpp * 4);
- writel(0xffff, base + GPI_IS + gpp * 4);
+ writel(0xffff, base + community->is_offset + gpp * 4);
}
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 7fdb07753c2d..13b0bd6eb2a2 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -73,6 +73,8 @@ struct intel_padgroup {
* @hostown_offset: Register offset of HOSTSW_OWN from @regs. If %0 then it
* is assumed that the host owns the pin (rather than
* ACPI).
+ * @is_offset: Register offset of GPI_IS from @regs. If %0 then uses the
+ * default (%0x100).
* @ie_offset: Register offset of GPI_IE from @regs.
* @pin_base: Starting pin of pins in this community
* @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
@@ -98,6 +100,7 @@ struct intel_community {
unsigned padown_offset;
unsigned padcfglock_offset;
unsigned hostown_offset;
+ unsigned is_offset;
unsigned ie_offset;
unsigned pin_base;
unsigned gpp_size;
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index e59c613d4ddd..10d90140a38a 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Core
obj-y += pinctrl-mtk-common.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6397.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6397.h
index 4eb98ddb40a4..17df4cfbde4e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6397.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6397.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PINCTRL_MTK_MT6397_H
#define __PINCTRL_MTK_MT6397_H
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8127.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8127.h
index 212559c147f8..850483d7d9be 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt8127.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt8127.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PINCTRL_MTK_MT8127_H
#define __PINCTRL_MTK_MT8127_H
diff --git a/drivers/pinctrl/meson/Kconfig b/drivers/pinctrl/meson/Kconfig
new file mode 100644
index 000000000000..1a51778759ea
--- /dev/null
+++ b/drivers/pinctrl/meson/Kconfig
@@ -0,0 +1,41 @@
+menuconfig PINCTRL_MESON
+ bool "Amlogic SoC pinctrl drivers"
+ depends on ARCH_MESON
+ depends on OF
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select OF_GPIO
+ select REGMAP_MMIO
+
+if PINCTRL_MESON
+
+config PINCTRL_MESON8
+ bool "Meson 8 SoC pinctrl driver"
+ depends on ARM
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON8B
+ bool "Meson 8b SoC pinctrl driver"
+ depends on ARM
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON_GXBB
+ bool "Meson gxbb SoC pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON_GXL
+ bool "Meson gxl SoC pinctrl driver"
+ depends on ARM64
+ select PINCTRL_MESON8_PMX
+ default y
+
+config PINCTRL_MESON8_PMX
+ bool
+
+endif
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index 27c5b5126008..cbd47bb74549 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -1,3 +1,6 @@
-obj-y += pinctrl-meson8.o pinctrl-meson8b.o
-obj-y += pinctrl-meson-gxbb.o pinctrl-meson-gxl.o
-obj-y += pinctrl-meson.o
+obj-$(CONFIG_PINCTRL_MESON) += pinctrl-meson.o
+obj-$(CONFIG_PINCTRL_MESON8_PMX) += pinctrl-meson8-pmx.o
+obj-$(CONFIG_PINCTRL_MESON8) += pinctrl-meson8.o
+obj-$(CONFIG_PINCTRL_MESON8B) += pinctrl-meson8b.o
+obj-$(CONFIG_PINCTRL_MESON_GXBB) += pinctrl-meson-gxbb.o
+obj-$(CONFIG_PINCTRL_MESON_GXL) += pinctrl-meson-gxl.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 7bbc0d3cddcf..9079020259c5 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -14,418 +14,417 @@
#include <dt-bindings/gpio/meson-gxbb-gpio.h>
#include "pinctrl-meson.h"
-
-#define EE_OFF 14
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
- MESON_PIN(GPIOZ_0, EE_OFF),
- MESON_PIN(GPIOZ_1, EE_OFF),
- MESON_PIN(GPIOZ_2, EE_OFF),
- MESON_PIN(GPIOZ_3, EE_OFF),
- MESON_PIN(GPIOZ_4, EE_OFF),
- MESON_PIN(GPIOZ_5, EE_OFF),
- MESON_PIN(GPIOZ_6, EE_OFF),
- MESON_PIN(GPIOZ_7, EE_OFF),
- MESON_PIN(GPIOZ_8, EE_OFF),
- MESON_PIN(GPIOZ_9, EE_OFF),
- MESON_PIN(GPIOZ_10, EE_OFF),
- MESON_PIN(GPIOZ_11, EE_OFF),
- MESON_PIN(GPIOZ_12, EE_OFF),
- MESON_PIN(GPIOZ_13, EE_OFF),
- MESON_PIN(GPIOZ_14, EE_OFF),
- MESON_PIN(GPIOZ_15, EE_OFF),
-
- MESON_PIN(GPIOH_0, EE_OFF),
- MESON_PIN(GPIOH_1, EE_OFF),
- MESON_PIN(GPIOH_2, EE_OFF),
- MESON_PIN(GPIOH_3, EE_OFF),
-
- MESON_PIN(BOOT_0, EE_OFF),
- MESON_PIN(BOOT_1, EE_OFF),
- MESON_PIN(BOOT_2, EE_OFF),
- MESON_PIN(BOOT_3, EE_OFF),
- MESON_PIN(BOOT_4, EE_OFF),
- MESON_PIN(BOOT_5, EE_OFF),
- MESON_PIN(BOOT_6, EE_OFF),
- MESON_PIN(BOOT_7, EE_OFF),
- MESON_PIN(BOOT_8, EE_OFF),
- MESON_PIN(BOOT_9, EE_OFF),
- MESON_PIN(BOOT_10, EE_OFF),
- MESON_PIN(BOOT_11, EE_OFF),
- MESON_PIN(BOOT_12, EE_OFF),
- MESON_PIN(BOOT_13, EE_OFF),
- MESON_PIN(BOOT_14, EE_OFF),
- MESON_PIN(BOOT_15, EE_OFF),
- MESON_PIN(BOOT_16, EE_OFF),
- MESON_PIN(BOOT_17, EE_OFF),
-
- MESON_PIN(CARD_0, EE_OFF),
- MESON_PIN(CARD_1, EE_OFF),
- MESON_PIN(CARD_2, EE_OFF),
- MESON_PIN(CARD_3, EE_OFF),
- MESON_PIN(CARD_4, EE_OFF),
- MESON_PIN(CARD_5, EE_OFF),
- MESON_PIN(CARD_6, EE_OFF),
-
- MESON_PIN(GPIODV_0, EE_OFF),
- MESON_PIN(GPIODV_1, EE_OFF),
- MESON_PIN(GPIODV_2, EE_OFF),
- MESON_PIN(GPIODV_3, EE_OFF),
- MESON_PIN(GPIODV_4, EE_OFF),
- MESON_PIN(GPIODV_5, EE_OFF),
- MESON_PIN(GPIODV_6, EE_OFF),
- MESON_PIN(GPIODV_7, EE_OFF),
- MESON_PIN(GPIODV_8, EE_OFF),
- MESON_PIN(GPIODV_9, EE_OFF),
- MESON_PIN(GPIODV_10, EE_OFF),
- MESON_PIN(GPIODV_11, EE_OFF),
- MESON_PIN(GPIODV_12, EE_OFF),
- MESON_PIN(GPIODV_13, EE_OFF),
- MESON_PIN(GPIODV_14, EE_OFF),
- MESON_PIN(GPIODV_15, EE_OFF),
- MESON_PIN(GPIODV_16, EE_OFF),
- MESON_PIN(GPIODV_17, EE_OFF),
- MESON_PIN(GPIODV_18, EE_OFF),
- MESON_PIN(GPIODV_19, EE_OFF),
- MESON_PIN(GPIODV_20, EE_OFF),
- MESON_PIN(GPIODV_21, EE_OFF),
- MESON_PIN(GPIODV_22, EE_OFF),
- MESON_PIN(GPIODV_23, EE_OFF),
- MESON_PIN(GPIODV_24, EE_OFF),
- MESON_PIN(GPIODV_25, EE_OFF),
- MESON_PIN(GPIODV_26, EE_OFF),
- MESON_PIN(GPIODV_27, EE_OFF),
- MESON_PIN(GPIODV_28, EE_OFF),
- MESON_PIN(GPIODV_29, EE_OFF),
-
- MESON_PIN(GPIOY_0, EE_OFF),
- MESON_PIN(GPIOY_1, EE_OFF),
- MESON_PIN(GPIOY_2, EE_OFF),
- MESON_PIN(GPIOY_3, EE_OFF),
- MESON_PIN(GPIOY_4, EE_OFF),
- MESON_PIN(GPIOY_5, EE_OFF),
- MESON_PIN(GPIOY_6, EE_OFF),
- MESON_PIN(GPIOY_7, EE_OFF),
- MESON_PIN(GPIOY_8, EE_OFF),
- MESON_PIN(GPIOY_9, EE_OFF),
- MESON_PIN(GPIOY_10, EE_OFF),
- MESON_PIN(GPIOY_11, EE_OFF),
- MESON_PIN(GPIOY_12, EE_OFF),
- MESON_PIN(GPIOY_13, EE_OFF),
- MESON_PIN(GPIOY_14, EE_OFF),
- MESON_PIN(GPIOY_15, EE_OFF),
- MESON_PIN(GPIOY_16, EE_OFF),
-
- MESON_PIN(GPIOX_0, EE_OFF),
- MESON_PIN(GPIOX_1, EE_OFF),
- MESON_PIN(GPIOX_2, EE_OFF),
- MESON_PIN(GPIOX_3, EE_OFF),
- MESON_PIN(GPIOX_4, EE_OFF),
- MESON_PIN(GPIOX_5, EE_OFF),
- MESON_PIN(GPIOX_6, EE_OFF),
- MESON_PIN(GPIOX_7, EE_OFF),
- MESON_PIN(GPIOX_8, EE_OFF),
- MESON_PIN(GPIOX_9, EE_OFF),
- MESON_PIN(GPIOX_10, EE_OFF),
- MESON_PIN(GPIOX_11, EE_OFF),
- MESON_PIN(GPIOX_12, EE_OFF),
- MESON_PIN(GPIOX_13, EE_OFF),
- MESON_PIN(GPIOX_14, EE_OFF),
- MESON_PIN(GPIOX_15, EE_OFF),
- MESON_PIN(GPIOX_16, EE_OFF),
- MESON_PIN(GPIOX_17, EE_OFF),
- MESON_PIN(GPIOX_18, EE_OFF),
- MESON_PIN(GPIOX_19, EE_OFF),
- MESON_PIN(GPIOX_20, EE_OFF),
- MESON_PIN(GPIOX_21, EE_OFF),
-
- MESON_PIN(GPIOCLK_0, EE_OFF),
- MESON_PIN(GPIOCLK_1, EE_OFF),
- MESON_PIN(GPIOCLK_2, EE_OFF),
- MESON_PIN(GPIOCLK_3, EE_OFF),
-
- MESON_PIN(GPIO_TEST_N, EE_OFF),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(GPIOZ_15),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_2),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_4),
+ MESON_PIN(GPIOY_5),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+ MESON_PIN(GPIOY_15),
+ MESON_PIN(GPIOY_16),
+
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+ MESON_PIN(GPIOX_22),
+
+ MESON_PIN(GPIOCLK_0),
+ MESON_PIN(GPIOCLK_1),
+ MESON_PIN(GPIOCLK_2),
+ MESON_PIN(GPIOCLK_3),
};
static const unsigned int emmc_nand_d07_pins[] = {
- PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF),
- PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF),
- PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF),
-};
-static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int spi_sclk_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int spi_ss0_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-static const unsigned int spi_miso_pins[] = { PIN(GPIOZ_12, EE_OFF) };
-static const unsigned int spi_mosi_pins[] = { PIN(GPIOZ_13, EE_OFF) };
-
-static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
-static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
-static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
-static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) };
-static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) };
-static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) };
-
-static const unsigned int sdio_d0_pins[] = { PIN(GPIOX_0, EE_OFF) };
-static const unsigned int sdio_d1_pins[] = { PIN(GPIOX_1, EE_OFF) };
-static const unsigned int sdio_d2_pins[] = { PIN(GPIOX_2, EE_OFF) };
-static const unsigned int sdio_d3_pins[] = { PIN(GPIOX_3, EE_OFF) };
-static const unsigned int sdio_cmd_pins[] = { PIN(GPIOX_4, EE_OFF) };
-static const unsigned int sdio_clk_pins[] = { PIN(GPIOX_5, EE_OFF) };
-static const unsigned int sdio_irq_pins[] = { PIN(GPIOX_7, EE_OFF) };
-
-static const unsigned int nand_ce0_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int nand_ce1_pins[] = { PIN(BOOT_9, EE_OFF) };
-static const unsigned int nand_rb0_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nand_ren_wr_pins[] = { PIN(BOOT_14, EE_OFF) };
-static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) };
-static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) };
-static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) };
-static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) };
-
-static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) };
-static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int uart_cts_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-static const unsigned int uart_rts_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_13, EE_OFF) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_14, EE_OFF) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIOX_11, EE_OFF) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIOX_12, EE_OFF) };
-
-static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, EE_OFF) };
-
-static const unsigned int i2c_sck_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-static const unsigned int i2c_sda_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-
-static const unsigned int i2c_sck_c_pins[] = { PIN(GPIODV_29, EE_OFF) };
-static const unsigned int i2c_sda_c_pins[] = { PIN(GPIODV_28, EE_OFF) };
-
-static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) };
-static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) };
-static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) };
-static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) };
-static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) };
-static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) };
-static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) };
-static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) };
-static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) };
-static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) };
-static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
-
-static const unsigned int pwm_a_x_pins[] = { PIN(GPIOX_6, EE_OFF) };
-static const unsigned int pwm_a_y_pins[] = { PIN(GPIOY_16, EE_OFF) };
-static const unsigned int pwm_b_pins[] = { PIN(GPIODV_29, EE_OFF) };
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, EE_OFF) };
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_19, EE_OFF) };
-static const unsigned int pwm_f_x_pins[] = { PIN(GPIOX_7, EE_OFF) };
-static const unsigned int pwm_f_y_pins[] = { PIN(GPIOY_15, EE_OFF) };
-
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
-
-static const unsigned int i2s_out_ch23_y_pins[] = { PIN(GPIOY_8, EE_OFF) };
-static const unsigned int i2s_out_ch45_y_pins[] = { PIN(GPIOY_9, EE_OFF) };
-static const unsigned int i2s_out_ch67_y_pins[] = { PIN(GPIOY_10, EE_OFF) };
-
-static const unsigned int spdif_out_y_pins[] = { PIN(GPIOY_12, EE_OFF) };
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7,
+};
+static const unsigned int emmc_clk_pins[] = { BOOT_8 };
+static const unsigned int emmc_cmd_pins[] = { BOOT_10 };
+static const unsigned int emmc_ds_pins[] = { BOOT_15 };
+
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_15 };
+
+static const unsigned int spi_sclk_pins[] = { GPIOZ_6 };
+static const unsigned int spi_ss0_pins[] = { GPIOZ_7 };
+static const unsigned int spi_miso_pins[] = { GPIOZ_12 };
+static const unsigned int spi_mosi_pins[] = { GPIOZ_13 };
+
+static const unsigned int sdcard_d0_pins[] = { CARD_1 };
+static const unsigned int sdcard_d1_pins[] = { CARD_0 };
+static const unsigned int sdcard_d2_pins[] = { CARD_5 };
+static const unsigned int sdcard_d3_pins[] = { CARD_4 };
+static const unsigned int sdcard_cmd_pins[] = { CARD_3 };
+static const unsigned int sdcard_clk_pins[] = { CARD_2 };
+
+static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
+static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
+static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
+static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
+static const unsigned int sdio_cmd_pins[] = { GPIOX_4 };
+static const unsigned int sdio_clk_pins[] = { GPIOX_5 };
+static const unsigned int sdio_irq_pins[] = { GPIOX_7 };
+
+static const unsigned int nand_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_wr_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { BOOT_15 };
+
+static const unsigned int uart_tx_a_pins[] = { GPIOX_12 };
+static const unsigned int uart_rx_a_pins[] = { GPIOX_13 };
+static const unsigned int uart_cts_a_pins[] = { GPIOX_14 };
+static const unsigned int uart_rts_a_pins[] = { GPIOX_15 };
+
+static const unsigned int uart_tx_b_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_b_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_b_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
+
+static const unsigned int uart_tx_c_pins[] = { GPIOY_13 };
+static const unsigned int uart_rx_c_pins[] = { GPIOY_14 };
+static const unsigned int uart_cts_c_pins[] = { GPIOX_11 };
+static const unsigned int uart_rts_c_pins[] = { GPIOX_12 };
+
+static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+
+static const unsigned int i2c_sck_b_pins[] = { GPIODV_27 };
+static const unsigned int i2c_sda_b_pins[] = { GPIODV_26 };
+
+static const unsigned int i2c_sck_c_pins[] = { GPIODV_29 };
+static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 };
+
+static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
+static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_3 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_4 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_5 };
+static const unsigned int eth_rxd2_pins[] = { GPIOZ_6 };
+static const unsigned int eth_rxd3_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { GPIOZ_8 };
+static const unsigned int eth_tx_en_pins[] = { GPIOZ_9 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_10 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_11 };
+static const unsigned int eth_txd2_pins[] = { GPIOZ_12 };
+static const unsigned int eth_txd3_pins[] = { GPIOZ_13 };
+
+static const unsigned int pwm_a_x_pins[] = { GPIOX_6 };
+static const unsigned int pwm_a_y_pins[] = { GPIOY_16 };
+static const unsigned int pwm_b_pins[] = { GPIODV_29 };
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
+static const unsigned int pwm_e_pins[] = { GPIOX_19 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_7 };
+static const unsigned int pwm_f_y_pins[] = { GPIOY_15 };
+
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+
+static const unsigned int i2s_out_ch23_y_pins[] = { GPIOY_8 };
+static const unsigned int i2s_out_ch45_y_pins[] = { GPIOY_9 };
+static const unsigned int i2s_out_ch67_y_pins[] = { GPIOY_10 };
+
+static const unsigned int spdif_out_y_pins[] = { GPIOY_12 };
static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, 0),
- MESON_PIN(GPIOAO_1, 0),
- MESON_PIN(GPIOAO_2, 0),
- MESON_PIN(GPIOAO_3, 0),
- MESON_PIN(GPIOAO_4, 0),
- MESON_PIN(GPIOAO_5, 0),
- MESON_PIN(GPIOAO_6, 0),
- MESON_PIN(GPIOAO_7, 0),
- MESON_PIN(GPIOAO_8, 0),
- MESON_PIN(GPIOAO_9, 0),
- MESON_PIN(GPIOAO_10, 0),
- MESON_PIN(GPIOAO_11, 0),
- MESON_PIN(GPIOAO_12, 0),
- MESON_PIN(GPIOAO_13, 0),
-};
-
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
-static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
-
-static const unsigned int i2c_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-static const unsigned int i2c_slave_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_slave_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-
-static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
-
-static const unsigned int pwm_ao_a_3_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int pwm_ao_a_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int pwm_ao_a_12_pins[] = { PIN(GPIOAO_12, 0) };
-static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_13, 0) };
-
-static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOAO_8, 0) };
-static const unsigned int i2s_out_ao_clk_pins[] = { PIN(GPIOAO_9, 0) };
-static const unsigned int i2s_out_lr_clk_pins[] = { PIN(GPIOAO_10, 0) };
-static const unsigned int i2s_out_ch01_ao_pins[] = { PIN(GPIOAO_11, 0) };
-static const unsigned int i2s_out_ch23_ao_pins[] = { PIN(GPIOAO_12, 0) };
-static const unsigned int i2s_out_ch45_ao_pins[] = { PIN(GPIOAO_13, 0) };
-
-static const unsigned int spdif_out_ao_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int spdif_out_ao_13_pins[] = { PIN(GPIOAO_13, 0) };
-
-static const unsigned int ao_cec_pins[] = { PIN(GPIOAO_12, 0) };
-static const unsigned int ee_cec_pins[] = { PIN(GPIOAO_12, 0) };
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
+
+ MESON_PIN(GPIO_TEST_N),
+};
+
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
+static const unsigned int uart_tx_ao_b_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b_pins[] = { GPIOAO_5 };
+static const unsigned int uart_cts_ao_b_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_b_pins[] = { GPIOAO_3 };
+
+static const unsigned int i2c_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_sda_ao_pins[] = { GPIOAO_5 };
+static const unsigned int i2c_slave_sck_ao_pins[] = {GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = {GPIOAO_5 };
+
+static const unsigned int remote_input_ao_pins[] = { GPIOAO_7 };
+
+static const unsigned int pwm_ao_a_3_pins[] = { GPIOAO_3 };
+static const unsigned int pwm_ao_a_6_pins[] = { GPIOAO_6 };
+static const unsigned int pwm_ao_a_12_pins[] = { GPIOAO_12 };
+static const unsigned int pwm_ao_b_pins[] = { GPIOAO_13 };
+
+static const unsigned int i2s_am_clk_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_out_ao_clk_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_out_lr_clk_pins[] = { GPIOAO_10 };
+static const unsigned int i2s_out_ch01_ao_pins[] = { GPIOAO_11 };
+static const unsigned int i2s_out_ch23_ao_pins[] = { GPIOAO_12 };
+static const unsigned int i2s_out_ch45_ao_pins[] = { GPIOAO_13 };
+static const unsigned int i2s_out_ch67_ao_pins[] = { GPIO_TEST_N };
+
+static const unsigned int spdif_out_ao_6_pins[] = { GPIOAO_6 };
+static const unsigned int spdif_out_ao_13_pins[] = { GPIOAO_13 };
+
+static const unsigned int ao_cec_pins[] = { GPIOAO_12 };
+static const unsigned int ee_cec_pins[] = { GPIOAO_12 };
static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
- GPIO_GROUP(GPIOZ_0, EE_OFF),
- GPIO_GROUP(GPIOZ_1, EE_OFF),
- GPIO_GROUP(GPIOZ_2, EE_OFF),
- GPIO_GROUP(GPIOZ_3, EE_OFF),
- GPIO_GROUP(GPIOZ_4, EE_OFF),
- GPIO_GROUP(GPIOZ_5, EE_OFF),
- GPIO_GROUP(GPIOZ_6, EE_OFF),
- GPIO_GROUP(GPIOZ_7, EE_OFF),
- GPIO_GROUP(GPIOZ_8, EE_OFF),
- GPIO_GROUP(GPIOZ_9, EE_OFF),
- GPIO_GROUP(GPIOZ_10, EE_OFF),
- GPIO_GROUP(GPIOZ_11, EE_OFF),
- GPIO_GROUP(GPIOZ_12, EE_OFF),
- GPIO_GROUP(GPIOZ_13, EE_OFF),
- GPIO_GROUP(GPIOZ_14, EE_OFF),
- GPIO_GROUP(GPIOZ_15, EE_OFF),
-
- GPIO_GROUP(GPIOH_0, EE_OFF),
- GPIO_GROUP(GPIOH_1, EE_OFF),
- GPIO_GROUP(GPIOH_2, EE_OFF),
- GPIO_GROUP(GPIOH_3, EE_OFF),
-
- GPIO_GROUP(BOOT_0, EE_OFF),
- GPIO_GROUP(BOOT_1, EE_OFF),
- GPIO_GROUP(BOOT_2, EE_OFF),
- GPIO_GROUP(BOOT_3, EE_OFF),
- GPIO_GROUP(BOOT_4, EE_OFF),
- GPIO_GROUP(BOOT_5, EE_OFF),
- GPIO_GROUP(BOOT_6, EE_OFF),
- GPIO_GROUP(BOOT_7, EE_OFF),
- GPIO_GROUP(BOOT_8, EE_OFF),
- GPIO_GROUP(BOOT_9, EE_OFF),
- GPIO_GROUP(BOOT_10, EE_OFF),
- GPIO_GROUP(BOOT_11, EE_OFF),
- GPIO_GROUP(BOOT_12, EE_OFF),
- GPIO_GROUP(BOOT_13, EE_OFF),
- GPIO_GROUP(BOOT_14, EE_OFF),
- GPIO_GROUP(BOOT_15, EE_OFF),
- GPIO_GROUP(BOOT_16, EE_OFF),
- GPIO_GROUP(BOOT_17, EE_OFF),
-
- GPIO_GROUP(CARD_0, EE_OFF),
- GPIO_GROUP(CARD_1, EE_OFF),
- GPIO_GROUP(CARD_2, EE_OFF),
- GPIO_GROUP(CARD_3, EE_OFF),
- GPIO_GROUP(CARD_4, EE_OFF),
- GPIO_GROUP(CARD_5, EE_OFF),
- GPIO_GROUP(CARD_6, EE_OFF),
-
- GPIO_GROUP(GPIODV_0, EE_OFF),
- GPIO_GROUP(GPIODV_1, EE_OFF),
- GPIO_GROUP(GPIODV_2, EE_OFF),
- GPIO_GROUP(GPIODV_3, EE_OFF),
- GPIO_GROUP(GPIODV_4, EE_OFF),
- GPIO_GROUP(GPIODV_5, EE_OFF),
- GPIO_GROUP(GPIODV_6, EE_OFF),
- GPIO_GROUP(GPIODV_7, EE_OFF),
- GPIO_GROUP(GPIODV_8, EE_OFF),
- GPIO_GROUP(GPIODV_9, EE_OFF),
- GPIO_GROUP(GPIODV_10, EE_OFF),
- GPIO_GROUP(GPIODV_11, EE_OFF),
- GPIO_GROUP(GPIODV_12, EE_OFF),
- GPIO_GROUP(GPIODV_13, EE_OFF),
- GPIO_GROUP(GPIODV_14, EE_OFF),
- GPIO_GROUP(GPIODV_15, EE_OFF),
- GPIO_GROUP(GPIODV_16, EE_OFF),
- GPIO_GROUP(GPIODV_17, EE_OFF),
- GPIO_GROUP(GPIODV_19, EE_OFF),
- GPIO_GROUP(GPIODV_20, EE_OFF),
- GPIO_GROUP(GPIODV_21, EE_OFF),
- GPIO_GROUP(GPIODV_22, EE_OFF),
- GPIO_GROUP(GPIODV_23, EE_OFF),
- GPIO_GROUP(GPIODV_24, EE_OFF),
- GPIO_GROUP(GPIODV_25, EE_OFF),
- GPIO_GROUP(GPIODV_26, EE_OFF),
- GPIO_GROUP(GPIODV_27, EE_OFF),
- GPIO_GROUP(GPIODV_28, EE_OFF),
- GPIO_GROUP(GPIODV_29, EE_OFF),
-
- GPIO_GROUP(GPIOY_0, EE_OFF),
- GPIO_GROUP(GPIOY_1, EE_OFF),
- GPIO_GROUP(GPIOY_2, EE_OFF),
- GPIO_GROUP(GPIOY_3, EE_OFF),
- GPIO_GROUP(GPIOY_4, EE_OFF),
- GPIO_GROUP(GPIOY_5, EE_OFF),
- GPIO_GROUP(GPIOY_6, EE_OFF),
- GPIO_GROUP(GPIOY_7, EE_OFF),
- GPIO_GROUP(GPIOY_8, EE_OFF),
- GPIO_GROUP(GPIOY_9, EE_OFF),
- GPIO_GROUP(GPIOY_10, EE_OFF),
- GPIO_GROUP(GPIOY_11, EE_OFF),
- GPIO_GROUP(GPIOY_12, EE_OFF),
- GPIO_GROUP(GPIOY_13, EE_OFF),
- GPIO_GROUP(GPIOY_14, EE_OFF),
- GPIO_GROUP(GPIOY_15, EE_OFF),
- GPIO_GROUP(GPIOY_16, EE_OFF),
-
- GPIO_GROUP(GPIOX_0, EE_OFF),
- GPIO_GROUP(GPIOX_1, EE_OFF),
- GPIO_GROUP(GPIOX_2, EE_OFF),
- GPIO_GROUP(GPIOX_3, EE_OFF),
- GPIO_GROUP(GPIOX_4, EE_OFF),
- GPIO_GROUP(GPIOX_5, EE_OFF),
- GPIO_GROUP(GPIOX_6, EE_OFF),
- GPIO_GROUP(GPIOX_7, EE_OFF),
- GPIO_GROUP(GPIOX_8, EE_OFF),
- GPIO_GROUP(GPIOX_9, EE_OFF),
- GPIO_GROUP(GPIOX_10, EE_OFF),
- GPIO_GROUP(GPIOX_11, EE_OFF),
- GPIO_GROUP(GPIOX_12, EE_OFF),
- GPIO_GROUP(GPIOX_13, EE_OFF),
- GPIO_GROUP(GPIOX_14, EE_OFF),
- GPIO_GROUP(GPIOX_15, EE_OFF),
- GPIO_GROUP(GPIOX_16, EE_OFF),
- GPIO_GROUP(GPIOX_17, EE_OFF),
- GPIO_GROUP(GPIOX_18, EE_OFF),
- GPIO_GROUP(GPIOX_19, EE_OFF),
- GPIO_GROUP(GPIOX_20, EE_OFF),
- GPIO_GROUP(GPIOX_21, EE_OFF),
- GPIO_GROUP(GPIOX_22, EE_OFF),
-
- GPIO_GROUP(GPIOCLK_0, EE_OFF),
- GPIO_GROUP(GPIOCLK_1, EE_OFF),
- GPIO_GROUP(GPIOCLK_2, EE_OFF),
- GPIO_GROUP(GPIOCLK_3, EE_OFF),
-
- GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
+ GPIO_GROUP(GPIOZ_15),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+
+ GPIO_GROUP(BOOT_0),
+ GPIO_GROUP(BOOT_1),
+ GPIO_GROUP(BOOT_2),
+ GPIO_GROUP(BOOT_3),
+ GPIO_GROUP(BOOT_4),
+ GPIO_GROUP(BOOT_5),
+ GPIO_GROUP(BOOT_6),
+ GPIO_GROUP(BOOT_7),
+ GPIO_GROUP(BOOT_8),
+ GPIO_GROUP(BOOT_9),
+ GPIO_GROUP(BOOT_10),
+ GPIO_GROUP(BOOT_11),
+ GPIO_GROUP(BOOT_12),
+ GPIO_GROUP(BOOT_13),
+ GPIO_GROUP(BOOT_14),
+ GPIO_GROUP(BOOT_15),
+ GPIO_GROUP(BOOT_16),
+ GPIO_GROUP(BOOT_17),
+
+ GPIO_GROUP(CARD_0),
+ GPIO_GROUP(CARD_1),
+ GPIO_GROUP(CARD_2),
+ GPIO_GROUP(CARD_3),
+ GPIO_GROUP(CARD_4),
+ GPIO_GROUP(CARD_5),
+ GPIO_GROUP(CARD_6),
+
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_2),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_4),
+ GPIO_GROUP(GPIOY_5),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+ GPIO_GROUP(GPIOY_15),
+ GPIO_GROUP(GPIOY_16),
+
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+ GPIO_GROUP(GPIOX_22),
+
+ GPIO_GROUP(GPIOCLK_0),
+ GPIO_GROUP(GPIOCLK_1),
+ GPIO_GROUP(GPIOCLK_2),
+ GPIO_GROUP(GPIOCLK_3),
+
+ GPIO_GROUP(GPIO_TEST_N),
/* Bank X */
GROUP(sdio_d0, 8, 5),
@@ -522,20 +521,20 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
};
static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, 0),
- GPIO_GROUP(GPIOAO_1, 0),
- GPIO_GROUP(GPIOAO_2, 0),
- GPIO_GROUP(GPIOAO_3, 0),
- GPIO_GROUP(GPIOAO_4, 0),
- GPIO_GROUP(GPIOAO_5, 0),
- GPIO_GROUP(GPIOAO_6, 0),
- GPIO_GROUP(GPIOAO_7, 0),
- GPIO_GROUP(GPIOAO_8, 0),
- GPIO_GROUP(GPIOAO_9, 0),
- GPIO_GROUP(GPIOAO_10, 0),
- GPIO_GROUP(GPIOAO_11, 0),
- GPIO_GROUP(GPIOAO_12, 0),
- GPIO_GROUP(GPIOAO_13, 0),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
/* bank AO */
GROUP(uart_tx_ao_b, 0, 24),
@@ -565,6 +564,9 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GROUP(spdif_out_ao_13, 0, 4),
GROUP(ao_cec, 0, 15),
GROUP(ee_cec, 0, 14),
+
+ /* test n pin */
+ GROUP(i2s_out_ch67_ao, 1, 2),
};
static const char * const gpio_periphs_groups[] = {
@@ -600,8 +602,6 @@ static const char * const gpio_periphs_groups[] = {
"GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
"GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18", "GPIOX_19",
"GPIOX_20", "GPIOX_21", "GPIOX_22",
-
- "GPIO_TEST_N",
};
static const char * const emmc_groups[] = {
@@ -710,6 +710,8 @@ static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
"GPIOAO_10", "GPIOAO_11", "GPIOAO_12", "GPIOAO_13",
+
+ "GPIO_TEST_N",
};
static const char * const uart_ao_groups[] = {
@@ -751,6 +753,7 @@ static const char * const pwm_ao_b_groups[] = {
static const char * const i2s_out_ao_groups[] = {
"i2s_am_clk", "i2s_out_ao_clk", "i2s_out_lr_clk",
"i2s_out_ch01_ao", "i2s_out_ch23_ao", "i2s_out_ch45_ao",
+ "i2s_out_ch67_ao",
};
static const char * const spdif_out_ao_groups[] = {
@@ -806,25 +809,24 @@ static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
};
static struct meson_bank meson_gxbb_periphs_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_22, EE_OFF), 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
- BANK("Y", PIN(GPIOY_0, EE_OFF), PIN(GPIOY_16, EE_OFF), 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 59, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
- BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_3, EE_OFF), 30, 33, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
- BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 14, 29, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
- BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
- BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_17, EE_OFF), 34, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
- BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_3, EE_OFF), 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_22, 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("Y", GPIOY_0, GPIOY_16, 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", GPIODV_0, GPIODV_29, 59, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", GPIOH_0, GPIOH_3, 30, 33, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", GPIOZ_0, GPIOZ_15, 14, 29, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", CARD_0, CARD_6, 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", BOOT_0, BOOT_17, 34, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", GPIOCLK_0, GPIOCLK_3, 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
static struct meson_bank meson_gxbb_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_13, 0), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIOAO_13, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.name = "periphs-banks",
- .pin_base = 14,
.pins = meson_gxbb_periphs_pins,
.groups = meson_gxbb_periphs_groups,
.funcs = meson_gxbb_periphs_functions,
@@ -833,11 +835,11 @@ struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxbb_periphs_groups),
.num_funcs = ARRAY_SIZE(meson_gxbb_periphs_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_periphs_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.name = "aobus-banks",
- .pin_base = 0,
.pins = meson_gxbb_aobus_pins,
.groups = meson_gxbb_aobus_groups,
.funcs = meson_gxbb_aobus_functions,
@@ -846,4 +848,26 @@ struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxbb_aobus_groups),
.num_funcs = ARRAY_SIZE(meson_gxbb_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson_gxbb_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-gxbb-periphs-pinctrl",
+ .data = &meson_gxbb_periphs_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-aobus-pinctrl",
+ .data = &meson_gxbb_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_gxbb_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-gxbb-pinctrl",
+ .of_match_table = meson_gxbb_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson_gxbb_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 36c14b85fc7c..b3786cde963d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -14,408 +14,400 @@
#include <dt-bindings/gpio/meson-gxl-gpio.h>
#include "pinctrl-meson.h"
-
-#define EE_OFF 10
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson_gxl_periphs_pins[] = {
- MESON_PIN(GPIOZ_0, EE_OFF),
- MESON_PIN(GPIOZ_1, EE_OFF),
- MESON_PIN(GPIOZ_2, EE_OFF),
- MESON_PIN(GPIOZ_3, EE_OFF),
- MESON_PIN(GPIOZ_4, EE_OFF),
- MESON_PIN(GPIOZ_5, EE_OFF),
- MESON_PIN(GPIOZ_6, EE_OFF),
- MESON_PIN(GPIOZ_7, EE_OFF),
- MESON_PIN(GPIOZ_8, EE_OFF),
- MESON_PIN(GPIOZ_9, EE_OFF),
- MESON_PIN(GPIOZ_10, EE_OFF),
- MESON_PIN(GPIOZ_11, EE_OFF),
- MESON_PIN(GPIOZ_12, EE_OFF),
- MESON_PIN(GPIOZ_13, EE_OFF),
- MESON_PIN(GPIOZ_14, EE_OFF),
- MESON_PIN(GPIOZ_15, EE_OFF),
-
- MESON_PIN(GPIOH_0, EE_OFF),
- MESON_PIN(GPIOH_1, EE_OFF),
- MESON_PIN(GPIOH_2, EE_OFF),
- MESON_PIN(GPIOH_3, EE_OFF),
- MESON_PIN(GPIOH_4, EE_OFF),
- MESON_PIN(GPIOH_5, EE_OFF),
- MESON_PIN(GPIOH_6, EE_OFF),
- MESON_PIN(GPIOH_7, EE_OFF),
- MESON_PIN(GPIOH_8, EE_OFF),
- MESON_PIN(GPIOH_9, EE_OFF),
-
- MESON_PIN(BOOT_0, EE_OFF),
- MESON_PIN(BOOT_1, EE_OFF),
- MESON_PIN(BOOT_2, EE_OFF),
- MESON_PIN(BOOT_3, EE_OFF),
- MESON_PIN(BOOT_4, EE_OFF),
- MESON_PIN(BOOT_5, EE_OFF),
- MESON_PIN(BOOT_6, EE_OFF),
- MESON_PIN(BOOT_7, EE_OFF),
- MESON_PIN(BOOT_8, EE_OFF),
- MESON_PIN(BOOT_9, EE_OFF),
- MESON_PIN(BOOT_10, EE_OFF),
- MESON_PIN(BOOT_11, EE_OFF),
- MESON_PIN(BOOT_12, EE_OFF),
- MESON_PIN(BOOT_13, EE_OFF),
- MESON_PIN(BOOT_14, EE_OFF),
- MESON_PIN(BOOT_15, EE_OFF),
-
- MESON_PIN(CARD_0, EE_OFF),
- MESON_PIN(CARD_1, EE_OFF),
- MESON_PIN(CARD_2, EE_OFF),
- MESON_PIN(CARD_3, EE_OFF),
- MESON_PIN(CARD_4, EE_OFF),
- MESON_PIN(CARD_5, EE_OFF),
- MESON_PIN(CARD_6, EE_OFF),
-
- MESON_PIN(GPIODV_0, EE_OFF),
- MESON_PIN(GPIODV_1, EE_OFF),
- MESON_PIN(GPIODV_2, EE_OFF),
- MESON_PIN(GPIODV_3, EE_OFF),
- MESON_PIN(GPIODV_4, EE_OFF),
- MESON_PIN(GPIODV_5, EE_OFF),
- MESON_PIN(GPIODV_6, EE_OFF),
- MESON_PIN(GPIODV_7, EE_OFF),
- MESON_PIN(GPIODV_8, EE_OFF),
- MESON_PIN(GPIODV_9, EE_OFF),
- MESON_PIN(GPIODV_10, EE_OFF),
- MESON_PIN(GPIODV_11, EE_OFF),
- MESON_PIN(GPIODV_12, EE_OFF),
- MESON_PIN(GPIODV_13, EE_OFF),
- MESON_PIN(GPIODV_14, EE_OFF),
- MESON_PIN(GPIODV_15, EE_OFF),
- MESON_PIN(GPIODV_16, EE_OFF),
- MESON_PIN(GPIODV_17, EE_OFF),
- MESON_PIN(GPIODV_18, EE_OFF),
- MESON_PIN(GPIODV_19, EE_OFF),
- MESON_PIN(GPIODV_20, EE_OFF),
- MESON_PIN(GPIODV_21, EE_OFF),
- MESON_PIN(GPIODV_22, EE_OFF),
- MESON_PIN(GPIODV_23, EE_OFF),
- MESON_PIN(GPIODV_24, EE_OFF),
- MESON_PIN(GPIODV_25, EE_OFF),
- MESON_PIN(GPIODV_26, EE_OFF),
- MESON_PIN(GPIODV_27, EE_OFF),
- MESON_PIN(GPIODV_28, EE_OFF),
- MESON_PIN(GPIODV_29, EE_OFF),
-
- MESON_PIN(GPIOX_0, EE_OFF),
- MESON_PIN(GPIOX_1, EE_OFF),
- MESON_PIN(GPIOX_2, EE_OFF),
- MESON_PIN(GPIOX_3, EE_OFF),
- MESON_PIN(GPIOX_4, EE_OFF),
- MESON_PIN(GPIOX_5, EE_OFF),
- MESON_PIN(GPIOX_6, EE_OFF),
- MESON_PIN(GPIOX_7, EE_OFF),
- MESON_PIN(GPIOX_8, EE_OFF),
- MESON_PIN(GPIOX_9, EE_OFF),
- MESON_PIN(GPIOX_10, EE_OFF),
- MESON_PIN(GPIOX_11, EE_OFF),
- MESON_PIN(GPIOX_12, EE_OFF),
- MESON_PIN(GPIOX_13, EE_OFF),
- MESON_PIN(GPIOX_14, EE_OFF),
- MESON_PIN(GPIOX_15, EE_OFF),
- MESON_PIN(GPIOX_16, EE_OFF),
- MESON_PIN(GPIOX_17, EE_OFF),
- MESON_PIN(GPIOX_18, EE_OFF),
-
- MESON_PIN(GPIOCLK_0, EE_OFF),
- MESON_PIN(GPIOCLK_1, EE_OFF),
-
- MESON_PIN(GPIO_TEST_N, EE_OFF),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(GPIOZ_15),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+
+ MESON_PIN(GPIOCLK_0),
+ MESON_PIN(GPIOCLK_1),
};
static const unsigned int emmc_nand_d07_pins[] = {
- PIN(BOOT_0, EE_OFF), PIN(BOOT_1, EE_OFF), PIN(BOOT_2, EE_OFF),
- PIN(BOOT_3, EE_OFF), PIN(BOOT_4, EE_OFF), PIN(BOOT_5, EE_OFF),
- PIN(BOOT_6, EE_OFF), PIN(BOOT_7, EE_OFF),
-};
-static const unsigned int emmc_clk_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int emmc_cmd_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int emmc_ds_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int spi_mosi_pins[] = { PIN(GPIOX_8, EE_OFF) };
-static const unsigned int spi_miso_pins[] = { PIN(GPIOX_9, EE_OFF) };
-static const unsigned int spi_ss0_pins[] = { PIN(GPIOX_10, EE_OFF) };
-static const unsigned int spi_sclk_pins[] = { PIN(GPIOX_11, EE_OFF) };
-
-static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
-static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
-static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
-static const unsigned int sdcard_d3_pins[] = { PIN(CARD_4, EE_OFF) };
-static const unsigned int sdcard_cmd_pins[] = { PIN(CARD_3, EE_OFF) };
-static const unsigned int sdcard_clk_pins[] = { PIN(CARD_2, EE_OFF) };
-
-static const unsigned int sdio_d0_pins[] = { PIN(GPIOX_0, EE_OFF) };
-static const unsigned int sdio_d1_pins[] = { PIN(GPIOX_1, EE_OFF) };
-static const unsigned int sdio_d2_pins[] = { PIN(GPIOX_2, EE_OFF) };
-static const unsigned int sdio_d3_pins[] = { PIN(GPIOX_3, EE_OFF) };
-static const unsigned int sdio_cmd_pins[] = { PIN(GPIOX_4, EE_OFF) };
-static const unsigned int sdio_clk_pins[] = { PIN(GPIOX_5, EE_OFF) };
-static const unsigned int sdio_irq_pins[] = { PIN(GPIOX_7, EE_OFF) };
-
-static const unsigned int nand_ce0_pins[] = { PIN(BOOT_8, EE_OFF) };
-static const unsigned int nand_ce1_pins[] = { PIN(BOOT_9, EE_OFF) };
-static const unsigned int nand_rb0_pins[] = { PIN(BOOT_10, EE_OFF) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, EE_OFF) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, EE_OFF) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, EE_OFF) };
-static const unsigned int nand_ren_wr_pins[] = { PIN(BOOT_14, EE_OFF) };
-static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, EE_OFF) };
-
-static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_12, EE_OFF) };
-static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_13, EE_OFF) };
-static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_14, EE_OFF) };
-static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_15, EE_OFF) };
-
-static const unsigned int uart_tx_b_pins[] = { PIN(GPIODV_24, EE_OFF) };
-static const unsigned int uart_rx_b_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int uart_cts_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-static const unsigned int uart_rts_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIOX_8, EE_OFF) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIOX_9, EE_OFF) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIOX_10, EE_OFF) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIOX_11, EE_OFF) };
-
-static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, EE_OFF) };
-static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, EE_OFF) };
-
-static const unsigned int i2c_sck_b_pins[] = { PIN(GPIODV_27, EE_OFF) };
-static const unsigned int i2c_sda_b_pins[] = { PIN(GPIODV_26, EE_OFF) };
-
-static const unsigned int i2c_sck_c_pins[] = { PIN(GPIODV_29, EE_OFF) };
-static const unsigned int i2c_sda_c_pins[] = { PIN(GPIODV_28, EE_OFF) };
-
-static const unsigned int i2c_sck_c_dv19_pins[] = { PIN(GPIODV_19, EE_OFF) };
-static const unsigned int i2c_sda_c_dv18_pins[] = { PIN(GPIODV_18, EE_OFF) };
-
-static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_0, EE_OFF) };
-static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_1, EE_OFF) };
-static const unsigned int eth_clk_rx_clk_pins[] = { PIN(GPIOZ_2, EE_OFF) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_3, EE_OFF) };
-static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_4, EE_OFF) };
-static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_5, EE_OFF) };
-static const unsigned int eth_rxd2_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int eth_rxd3_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-static const unsigned int eth_rgmii_tx_clk_pins[] = { PIN(GPIOZ_8, EE_OFF) };
-static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_9, EE_OFF) };
-static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_10, EE_OFF) };
-static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_11, EE_OFF) };
-static const unsigned int eth_txd2_pins[] = { PIN(GPIOZ_12, EE_OFF) };
-static const unsigned int eth_txd3_pins[] = { PIN(GPIOZ_13, EE_OFF) };
-
-static const unsigned int pwm_a_pins[] = { PIN(GPIOX_6, EE_OFF) };
-
-static const unsigned int pwm_b_pins[] = { PIN(GPIODV_29, EE_OFF) };
-
-static const unsigned int pwm_c_pins[] = { PIN(GPIOZ_15, EE_OFF) };
-
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, EE_OFF) };
-
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_16, EE_OFF) };
-
-static const unsigned int pwm_f_clk_pins[] = { PIN(GPIOCLK_1, EE_OFF) };
-static const unsigned int pwm_f_x_pins[] = { PIN(GPIOX_7, EE_OFF) };
-
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, EE_OFF) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, EE_OFF) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, EE_OFF) };
-
-static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOH_6, EE_OFF) };
-static const unsigned int i2s_out_ao_clk_pins[] = { PIN(GPIOH_7, EE_OFF) };
-static const unsigned int i2s_out_lr_clk_pins[] = { PIN(GPIOH_8, EE_OFF) };
-static const unsigned int i2s_out_ch01_pins[] = { PIN(GPIOH_9, EE_OFF) };
-static const unsigned int i2s_out_ch23_z_pins[] = { PIN(GPIOZ_5, EE_OFF) };
-static const unsigned int i2s_out_ch45_z_pins[] = { PIN(GPIOZ_6, EE_OFF) };
-static const unsigned int i2s_out_ch67_z_pins[] = { PIN(GPIOZ_7, EE_OFF) };
-
-static const unsigned int spdif_out_h_pins[] = { PIN(GPIOH_4, EE_OFF) };
-
-static const unsigned int eth_link_led_pins[] = { PIN(GPIOZ_14, EE_OFF) };
-static const unsigned int eth_act_led_pins[] = { PIN(GPIOZ_15, EE_OFF) };
-
-static const unsigned int tsin_a_d0_pins[] = { PIN(GPIODV_0, EE_OFF) };
-static const unsigned int tsin_a_d0_x_pins[] = { PIN(GPIOX_10, EE_OFF) };
-static const unsigned int tsin_a_clk_pins[] = { PIN(GPIODV_8, EE_OFF) };
-static const unsigned int tsin_a_clk_x_pins[] = { PIN(GPIOX_11, EE_OFF) };
-static const unsigned int tsin_a_sop_pins[] = { PIN(GPIODV_9, EE_OFF) };
-static const unsigned int tsin_a_sop_x_pins[] = { PIN(GPIOX_8, EE_OFF) };
-static const unsigned int tsin_a_d_valid_pins[] = { PIN(GPIODV_10, EE_OFF) };
-static const unsigned int tsin_a_d_valid_x_pins[] = { PIN(GPIOX_9, EE_OFF) };
-static const unsigned int tsin_a_fail_pins[] = { PIN(GPIODV_11, EE_OFF) };
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7,
+};
+static const unsigned int emmc_clk_pins[] = { BOOT_8 };
+static const unsigned int emmc_cmd_pins[] = { BOOT_10 };
+static const unsigned int emmc_ds_pins[] = { BOOT_15 };
+
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_15 };
+
+static const unsigned int spi_mosi_pins[] = { GPIOX_8 };
+static const unsigned int spi_miso_pins[] = { GPIOX_9 };
+static const unsigned int spi_ss0_pins[] = { GPIOX_10 };
+static const unsigned int spi_sclk_pins[] = { GPIOX_11 };
+
+static const unsigned int sdcard_d0_pins[] = { CARD_1 };
+static const unsigned int sdcard_d1_pins[] = { CARD_0 };
+static const unsigned int sdcard_d2_pins[] = { CARD_5 };
+static const unsigned int sdcard_d3_pins[] = { CARD_4 };
+static const unsigned int sdcard_cmd_pins[] = { CARD_3 };
+static const unsigned int sdcard_clk_pins[] = { CARD_2 };
+
+static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
+static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
+static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
+static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
+static const unsigned int sdio_cmd_pins[] = { GPIOX_4 };
+static const unsigned int sdio_clk_pins[] = { GPIOX_5 };
+static const unsigned int sdio_irq_pins[] = { GPIOX_7 };
+
+static const unsigned int nand_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_wr_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { BOOT_15 };
+
+static const unsigned int uart_tx_a_pins[] = { GPIOX_12 };
+static const unsigned int uart_rx_a_pins[] = { GPIOX_13 };
+static const unsigned int uart_cts_a_pins[] = { GPIOX_14 };
+static const unsigned int uart_rts_a_pins[] = { GPIOX_15 };
+
+static const unsigned int uart_tx_b_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_b_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_b_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
+
+static const unsigned int uart_tx_c_pins[] = { GPIOX_8 };
+static const unsigned int uart_rx_c_pins[] = { GPIOX_9 };
+static const unsigned int uart_cts_c_pins[] = { GPIOX_10 };
+static const unsigned int uart_rts_c_pins[] = { GPIOX_11 };
+
+static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+
+static const unsigned int i2c_sck_b_pins[] = { GPIODV_27 };
+static const unsigned int i2c_sda_b_pins[] = { GPIODV_26 };
+
+static const unsigned int i2c_sck_c_pins[] = { GPIODV_29 };
+static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 };
+
+static const unsigned int i2c_sck_c_dv19_pins[] = { GPIODV_19 };
+static const unsigned int i2c_sda_c_dv18_pins[] = { GPIODV_18 };
+
+static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
+static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_3 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_4 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_5 };
+static const unsigned int eth_rxd2_pins[] = { GPIOZ_6 };
+static const unsigned int eth_rxd3_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rgmii_tx_clk_pins[] = { GPIOZ_8 };
+static const unsigned int eth_tx_en_pins[] = { GPIOZ_9 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_10 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_11 };
+static const unsigned int eth_txd2_pins[] = { GPIOZ_12 };
+static const unsigned int eth_txd3_pins[] = { GPIOZ_13 };
+
+static const unsigned int pwm_a_pins[] = { GPIOX_6 };
+
+static const unsigned int pwm_b_pins[] = { GPIODV_29 };
+
+static const unsigned int pwm_c_pins[] = { GPIOZ_15 };
+
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
+
+static const unsigned int pwm_e_pins[] = { GPIOX_16 };
+
+static const unsigned int pwm_f_clk_pins[] = { GPIOCLK_1 };
+static const unsigned int pwm_f_x_pins[] = { GPIOX_7 };
+
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+
+static const unsigned int i2s_am_clk_pins[] = { GPIOH_6 };
+static const unsigned int i2s_out_ao_clk_pins[] = { GPIOH_7 };
+static const unsigned int i2s_out_lr_clk_pins[] = { GPIOH_8 };
+static const unsigned int i2s_out_ch01_pins[] = { GPIOH_9 };
+static const unsigned int i2s_out_ch23_z_pins[] = { GPIOZ_5 };
+static const unsigned int i2s_out_ch45_z_pins[] = { GPIOZ_6 };
+static const unsigned int i2s_out_ch67_z_pins[] = { GPIOZ_7 };
+
+static const unsigned int spdif_out_h_pins[] = { GPIOH_4 };
+
+static const unsigned int eth_link_led_pins[] = { GPIOZ_14 };
+static const unsigned int eth_act_led_pins[] = { GPIOZ_15 };
+
+static const unsigned int tsin_a_d0_pins[] = { GPIODV_0 };
+static const unsigned int tsin_a_d0_x_pins[] = { GPIOX_10 };
+static const unsigned int tsin_a_clk_pins[] = { GPIODV_8 };
+static const unsigned int tsin_a_clk_x_pins[] = { GPIOX_11 };
+static const unsigned int tsin_a_sop_pins[] = { GPIODV_9 };
+static const unsigned int tsin_a_sop_x_pins[] = { GPIOX_8 };
+static const unsigned int tsin_a_d_valid_pins[] = { GPIODV_10 };
+static const unsigned int tsin_a_d_valid_x_pins[] = { GPIOX_9 };
+static const unsigned int tsin_a_fail_pins[] = { GPIODV_11 };
static const unsigned int tsin_a_dp_pins[] = {
- PIN(GPIODV_1, EE_OFF),
- PIN(GPIODV_2, EE_OFF),
- PIN(GPIODV_3, EE_OFF),
- PIN(GPIODV_4, EE_OFF),
- PIN(GPIODV_5, EE_OFF),
- PIN(GPIODV_6, EE_OFF),
- PIN(GPIODV_7, EE_OFF),
+ GPIODV_1, GPIODV_2, GPIODV_3, GPIODV_4, GPIODV_5, GPIODV_6, GPIODV_7,
};
static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, 0),
- MESON_PIN(GPIOAO_1, 0),
- MESON_PIN(GPIOAO_2, 0),
- MESON_PIN(GPIOAO_3, 0),
- MESON_PIN(GPIOAO_4, 0),
- MESON_PIN(GPIOAO_5, 0),
- MESON_PIN(GPIOAO_6, 0),
- MESON_PIN(GPIOAO_7, 0),
- MESON_PIN(GPIOAO_8, 0),
- MESON_PIN(GPIOAO_9, 0),
-};
-
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
-static const unsigned int uart_tx_ao_b_0_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_1_pins[] = { PIN(GPIOAO_1, 0) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
-static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
-static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
-
-static const unsigned int i2c_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-static const unsigned int i2c_slave_sck_ao_pins[] = {PIN(GPIOAO_4, 0) };
-static const unsigned int i2c_slave_sda_ao_pins[] = {PIN(GPIOAO_5, 0) };
-
-static const unsigned int remote_input_ao_pins[] = {PIN(GPIOAO_7, 0) };
-
-static const unsigned int pwm_ao_a_3_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int pwm_ao_a_8_pins[] = { PIN(GPIOAO_8, 0) };
-
-static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_9, 0) };
-static const unsigned int pwm_ao_b_6_pins[] = { PIN(GPIOAO_6, 0) };
-
-static const unsigned int i2s_out_ch23_ao_pins[] = { PIN(GPIOAO_8, 0) };
-static const unsigned int i2s_out_ch45_ao_pins[] = { PIN(GPIOAO_9, 0) };
-
-static const unsigned int spdif_out_ao_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int spdif_out_ao_9_pins[] = { PIN(GPIOAO_9, 0) };
-
-static const unsigned int ao_cec_pins[] = { PIN(GPIOAO_8, 0) };
-static const unsigned int ee_cec_pins[] = { PIN(GPIOAO_8, 0) };
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+
+ MESON_PIN(GPIO_TEST_N),
+};
+
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_tx_ao_b_0_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_b_1_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
+static const unsigned int uart_tx_ao_b_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b_pins[] = { GPIOAO_5 };
+static const unsigned int uart_cts_ao_b_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_b_pins[] = { GPIOAO_3 };
+
+static const unsigned int i2c_sck_ao_pins[] = {GPIOAO_4 };
+static const unsigned int i2c_sda_ao_pins[] = {GPIOAO_5 };
+static const unsigned int i2c_slave_sck_ao_pins[] = {GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = {GPIOAO_5 };
+
+static const unsigned int remote_input_ao_pins[] = {GPIOAO_7 };
+
+static const unsigned int pwm_ao_a_3_pins[] = { GPIOAO_3 };
+static const unsigned int pwm_ao_a_8_pins[] = { GPIOAO_8 };
+
+static const unsigned int pwm_ao_b_pins[] = { GPIOAO_9 };
+static const unsigned int pwm_ao_b_6_pins[] = { GPIOAO_6 };
+
+static const unsigned int i2s_out_ch23_ao_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_out_ch45_ao_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_out_ch67_ao_pins[] = { GPIO_TEST_N };
+
+static const unsigned int spdif_out_ao_6_pins[] = { GPIOAO_6 };
+static const unsigned int spdif_out_ao_9_pins[] = { GPIOAO_9 };
+
+static const unsigned int ao_cec_pins[] = { GPIOAO_8 };
+static const unsigned int ee_cec_pins[] = { GPIOAO_8 };
static struct meson_pmx_group meson_gxl_periphs_groups[] = {
- GPIO_GROUP(GPIOZ_0, EE_OFF),
- GPIO_GROUP(GPIOZ_1, EE_OFF),
- GPIO_GROUP(GPIOZ_2, EE_OFF),
- GPIO_GROUP(GPIOZ_3, EE_OFF),
- GPIO_GROUP(GPIOZ_4, EE_OFF),
- GPIO_GROUP(GPIOZ_5, EE_OFF),
- GPIO_GROUP(GPIOZ_6, EE_OFF),
- GPIO_GROUP(GPIOZ_7, EE_OFF),
- GPIO_GROUP(GPIOZ_8, EE_OFF),
- GPIO_GROUP(GPIOZ_9, EE_OFF),
- GPIO_GROUP(GPIOZ_10, EE_OFF),
- GPIO_GROUP(GPIOZ_11, EE_OFF),
- GPIO_GROUP(GPIOZ_12, EE_OFF),
- GPIO_GROUP(GPIOZ_13, EE_OFF),
- GPIO_GROUP(GPIOZ_14, EE_OFF),
- GPIO_GROUP(GPIOZ_15, EE_OFF),
-
- GPIO_GROUP(GPIOH_0, EE_OFF),
- GPIO_GROUP(GPIOH_1, EE_OFF),
- GPIO_GROUP(GPIOH_2, EE_OFF),
- GPIO_GROUP(GPIOH_3, EE_OFF),
- GPIO_GROUP(GPIOH_4, EE_OFF),
- GPIO_GROUP(GPIOH_5, EE_OFF),
- GPIO_GROUP(GPIOH_6, EE_OFF),
- GPIO_GROUP(GPIOH_7, EE_OFF),
- GPIO_GROUP(GPIOH_8, EE_OFF),
- GPIO_GROUP(GPIOH_9, EE_OFF),
-
- GPIO_GROUP(BOOT_0, EE_OFF),
- GPIO_GROUP(BOOT_1, EE_OFF),
- GPIO_GROUP(BOOT_2, EE_OFF),
- GPIO_GROUP(BOOT_3, EE_OFF),
- GPIO_GROUP(BOOT_4, EE_OFF),
- GPIO_GROUP(BOOT_5, EE_OFF),
- GPIO_GROUP(BOOT_6, EE_OFF),
- GPIO_GROUP(BOOT_7, EE_OFF),
- GPIO_GROUP(BOOT_8, EE_OFF),
- GPIO_GROUP(BOOT_9, EE_OFF),
- GPIO_GROUP(BOOT_10, EE_OFF),
- GPIO_GROUP(BOOT_11, EE_OFF),
- GPIO_GROUP(BOOT_12, EE_OFF),
- GPIO_GROUP(BOOT_13, EE_OFF),
- GPIO_GROUP(BOOT_14, EE_OFF),
- GPIO_GROUP(BOOT_15, EE_OFF),
-
- GPIO_GROUP(CARD_0, EE_OFF),
- GPIO_GROUP(CARD_1, EE_OFF),
- GPIO_GROUP(CARD_2, EE_OFF),
- GPIO_GROUP(CARD_3, EE_OFF),
- GPIO_GROUP(CARD_4, EE_OFF),
- GPIO_GROUP(CARD_5, EE_OFF),
- GPIO_GROUP(CARD_6, EE_OFF),
-
- GPIO_GROUP(GPIODV_0, EE_OFF),
- GPIO_GROUP(GPIODV_1, EE_OFF),
- GPIO_GROUP(GPIODV_2, EE_OFF),
- GPIO_GROUP(GPIODV_3, EE_OFF),
- GPIO_GROUP(GPIODV_4, EE_OFF),
- GPIO_GROUP(GPIODV_5, EE_OFF),
- GPIO_GROUP(GPIODV_6, EE_OFF),
- GPIO_GROUP(GPIODV_7, EE_OFF),
- GPIO_GROUP(GPIODV_8, EE_OFF),
- GPIO_GROUP(GPIODV_9, EE_OFF),
- GPIO_GROUP(GPIODV_10, EE_OFF),
- GPIO_GROUP(GPIODV_11, EE_OFF),
- GPIO_GROUP(GPIODV_12, EE_OFF),
- GPIO_GROUP(GPIODV_13, EE_OFF),
- GPIO_GROUP(GPIODV_14, EE_OFF),
- GPIO_GROUP(GPIODV_15, EE_OFF),
- GPIO_GROUP(GPIODV_16, EE_OFF),
- GPIO_GROUP(GPIODV_17, EE_OFF),
- GPIO_GROUP(GPIODV_19, EE_OFF),
- GPIO_GROUP(GPIODV_20, EE_OFF),
- GPIO_GROUP(GPIODV_21, EE_OFF),
- GPIO_GROUP(GPIODV_22, EE_OFF),
- GPIO_GROUP(GPIODV_23, EE_OFF),
- GPIO_GROUP(GPIODV_24, EE_OFF),
- GPIO_GROUP(GPIODV_25, EE_OFF),
- GPIO_GROUP(GPIODV_26, EE_OFF),
- GPIO_GROUP(GPIODV_27, EE_OFF),
- GPIO_GROUP(GPIODV_28, EE_OFF),
- GPIO_GROUP(GPIODV_29, EE_OFF),
-
- GPIO_GROUP(GPIOX_0, EE_OFF),
- GPIO_GROUP(GPIOX_1, EE_OFF),
- GPIO_GROUP(GPIOX_2, EE_OFF),
- GPIO_GROUP(GPIOX_3, EE_OFF),
- GPIO_GROUP(GPIOX_4, EE_OFF),
- GPIO_GROUP(GPIOX_5, EE_OFF),
- GPIO_GROUP(GPIOX_6, EE_OFF),
- GPIO_GROUP(GPIOX_7, EE_OFF),
- GPIO_GROUP(GPIOX_8, EE_OFF),
- GPIO_GROUP(GPIOX_9, EE_OFF),
- GPIO_GROUP(GPIOX_10, EE_OFF),
- GPIO_GROUP(GPIOX_11, EE_OFF),
- GPIO_GROUP(GPIOX_12, EE_OFF),
- GPIO_GROUP(GPIOX_13, EE_OFF),
- GPIO_GROUP(GPIOX_14, EE_OFF),
- GPIO_GROUP(GPIOX_15, EE_OFF),
- GPIO_GROUP(GPIOX_16, EE_OFF),
- GPIO_GROUP(GPIOX_17, EE_OFF),
- GPIO_GROUP(GPIOX_18, EE_OFF),
-
- GPIO_GROUP(GPIOCLK_0, EE_OFF),
- GPIO_GROUP(GPIOCLK_1, EE_OFF),
-
- GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
+ GPIO_GROUP(GPIOZ_15),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+
+ GPIO_GROUP(BOOT_0),
+ GPIO_GROUP(BOOT_1),
+ GPIO_GROUP(BOOT_2),
+ GPIO_GROUP(BOOT_3),
+ GPIO_GROUP(BOOT_4),
+ GPIO_GROUP(BOOT_5),
+ GPIO_GROUP(BOOT_6),
+ GPIO_GROUP(BOOT_7),
+ GPIO_GROUP(BOOT_8),
+ GPIO_GROUP(BOOT_9),
+ GPIO_GROUP(BOOT_10),
+ GPIO_GROUP(BOOT_11),
+ GPIO_GROUP(BOOT_12),
+ GPIO_GROUP(BOOT_13),
+ GPIO_GROUP(BOOT_14),
+ GPIO_GROUP(BOOT_15),
+
+ GPIO_GROUP(CARD_0),
+ GPIO_GROUP(CARD_1),
+ GPIO_GROUP(CARD_2),
+ GPIO_GROUP(CARD_3),
+ GPIO_GROUP(CARD_4),
+ GPIO_GROUP(CARD_5),
+ GPIO_GROUP(CARD_6),
+
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+
+ GPIO_GROUP(GPIOCLK_0),
+ GPIO_GROUP(GPIOCLK_1),
+
+ GPIO_GROUP(GPIO_TEST_N),
/* Bank X */
GROUP(sdio_d0, 5, 31),
@@ -530,16 +522,16 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
};
static struct meson_pmx_group meson_gxl_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, 0),
- GPIO_GROUP(GPIOAO_1, 0),
- GPIO_GROUP(GPIOAO_2, 0),
- GPIO_GROUP(GPIOAO_3, 0),
- GPIO_GROUP(GPIOAO_4, 0),
- GPIO_GROUP(GPIOAO_5, 0),
- GPIO_GROUP(GPIOAO_6, 0),
- GPIO_GROUP(GPIOAO_7, 0),
- GPIO_GROUP(GPIOAO_8, 0),
- GPIO_GROUP(GPIOAO_9, 0),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
/* bank AO */
GROUP(uart_tx_ao_b_0, 0, 26),
@@ -567,6 +559,9 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
GROUP(spdif_out_ao_9, 0, 4),
GROUP(ao_cec, 0, 15),
GROUP(ee_cec, 0, 14),
+
+ /* test n pin */
+ GROUP(i2s_out_ch67_ao, 1, 2),
};
static const char * const gpio_periphs_groups[] = {
@@ -597,8 +592,6 @@ static const char * const gpio_periphs_groups[] = {
"GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
"GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
"GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18",
-
- "GPIO_TEST_N",
};
static const char * const emmc_groups[] = {
@@ -713,6 +706,8 @@ static const char * const tsin_a_groups[] = {
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
+
+ "GPIO_TEST_N",
};
static const char * const uart_ao_groups[] = {
@@ -745,7 +740,7 @@ static const char * const pwm_ao_b_groups[] = {
};
static const char * const i2s_out_ao_groups[] = {
- "i2s_out_ch23_ao", "i2s_out_ch45_ao",
+ "i2s_out_ch23_ao", "i2s_out_ch45_ao", "i2s_out_ch67_ao",
};
static const char * const spdif_out_ao_groups[] = {
@@ -800,24 +795,23 @@ static struct meson_pmx_func meson_gxl_aobus_functions[] = {
};
static struct meson_bank meson_gxl_periphs_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_18, EE_OFF), 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
- BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
- BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_9, EE_OFF), 26, 35, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
- BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 10, 25, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
- BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
- BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_15, EE_OFF), 36, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
- BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_1, EE_OFF), 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_18, 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("DV", GPIODV_0, GPIODV_29, 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", GPIOH_0, GPIOH_9, 26, 35, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", GPIOZ_0, GPIOZ_15, 10, 25, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", CARD_0, CARD_6, 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", BOOT_0, BOOT_15, 36, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", GPIOCLK_0, GPIOCLK_1, 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
static struct meson_bank meson_gxl_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_9, 0), 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIOAO_9, 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.name = "periphs-banks",
- .pin_base = 10,
.pins = meson_gxl_periphs_pins,
.groups = meson_gxl_periphs_groups,
.funcs = meson_gxl_periphs_functions,
@@ -826,11 +820,11 @@ struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxl_periphs_groups),
.num_funcs = ARRAY_SIZE(meson_gxl_periphs_functions),
.num_banks = ARRAY_SIZE(meson_gxl_periphs_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.name = "aobus-banks",
- .pin_base = 0,
.pins = meson_gxl_aobus_pins,
.groups = meson_gxl_aobus_groups,
.funcs = meson_gxl_aobus_functions,
@@ -839,4 +833,26 @@ struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson_gxl_aobus_groups),
.num_funcs = ARRAY_SIZE(meson_gxl_aobus_functions),
.num_banks = ARRAY_SIZE(meson_gxl_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson_gxl_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson-gxl-periphs-pinctrl",
+ .data = &meson_gxl_periphs_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxl-aobus-pinctrl",
+ .data = &meson_gxl_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson_gxl_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson-gxl-pinctrl",
+ .of_match_table = meson_gxl_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson_gxl_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 66ed70c12733..29a458da78db 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -31,10 +31,6 @@
* In some cases the register ranges for pull enable and pull
* direction are the same and thus there are only 3 register ranges.
*
- * Every pinmux group can be enabled by a specific bit in the first
- * register range; when all groups for a given pin are disabled the
- * pin acts as a GPIO.
- *
* For the pull and GPIO configuration every bank uses a contiguous
* set of bits in the register sets described above; the same register
* can be shared by more banks with different offsets.
@@ -50,6 +46,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -147,94 +144,24 @@ static const struct pinctrl_ops meson_pctrl_ops = {
.pin_dbg_show = meson_pin_dbg_show,
};
-/**
- * meson_pmx_disable_other_groups() - disable other groups using a given pin
- *
- * @pc: meson pin controller device
- * @pin: number of the pin
- * @sel_group: index of the selected group, or -1 if none
- *
- * The function disables all pinmux groups using a pin except the
- * selected one. If @sel_group is -1 all groups are disabled, leaving
- * the pin in GPIO mode.
- */
-static void meson_pmx_disable_other_groups(struct meson_pinctrl *pc,
- unsigned int pin, int sel_group)
-{
- struct meson_pmx_group *group;
- int i, j;
-
- for (i = 0; i < pc->data->num_groups; i++) {
- group = &pc->data->groups[i];
- if (group->is_gpio || i == sel_group)
- continue;
-
- for (j = 0; j < group->num_pins; j++) {
- if (group->pins[j] == pin) {
- /* We have found a group using the pin */
- regmap_update_bits(pc->reg_mux,
- group->reg * 4,
- BIT(group->bit), 0);
- }
- }
- }
-}
-
-static int meson_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
- unsigned group_num)
-{
- struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
- struct meson_pmx_func *func = &pc->data->funcs[func_num];
- struct meson_pmx_group *group = &pc->data->groups[group_num];
- int i, ret = 0;
-
- dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
- group->name);
-
- /*
- * Disable groups using the same pin.
- * The selected group is not disabled to avoid glitches.
- */
- for (i = 0; i < group->num_pins; i++)
- meson_pmx_disable_other_groups(pc, group->pins[i], group_num);
-
- /* Function 0 (GPIO) doesn't need any additional setting */
- if (func_num)
- ret = regmap_update_bits(pc->reg_mux, group->reg * 4,
- BIT(group->bit), BIT(group->bit));
-
- return ret;
-}
-
-static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
- struct pinctrl_gpio_range *range,
- unsigned offset)
-{
- struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
-
- meson_pmx_disable_other_groups(pc, offset, -1);
-
- return 0;
-}
-
-static int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev)
+int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
return pc->data->num_funcs;
}
-static const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
- unsigned selector)
+const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
+ unsigned selector)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
return pc->data->funcs[selector].name;
}
-static int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
- const char * const **groups,
- unsigned * const num_groups)
+int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
{
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
@@ -244,14 +171,6 @@ static int meson_pmx_get_groups(struct pinctrl_dev *pcdev, unsigned selector,
return 0;
}
-static const struct pinmux_ops meson_pmx_ops = {
- .set_mux = meson_pmx_set_mux,
- .get_functions_count = meson_pmx_get_funcs_count,
- .get_function_name = meson_pmx_get_func_name,
- .get_function_groups = meson_pmx_get_groups,
- .gpio_request_enable = meson_pmx_request_gpio,
-};
-
static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
unsigned long *configs, unsigned num_configs)
{
@@ -399,7 +318,7 @@ static int meson_pinconf_group_set(struct pinctrl_dev *pcdev,
static int meson_pinconf_group_get(struct pinctrl_dev *pcdev,
unsigned int group, unsigned long *config)
{
- return -ENOSYS;
+ return -ENOTSUPP;
}
static const struct pinconf_ops meson_pinconf_ops = {
@@ -410,31 +329,18 @@ static const struct pinconf_ops meson_pinconf_ops = {
.is_generic = true,
};
-static int meson_gpio_request(struct gpio_chip *chip, unsigned gpio)
-{
- return pinctrl_request_gpio(chip->base + gpio);
-}
-
-static void meson_gpio_free(struct gpio_chip *chip, unsigned gpio)
-{
- struct meson_pinctrl *pc = gpiochip_get_data(chip);
-
- pinctrl_free_gpio(pc->data->pin_base + gpio);
-}
-
static int meson_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, pin;
+ unsigned int reg, bit;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_DIR, &reg, &bit);
return regmap_update_bits(pc->reg_gpio, reg, BIT(bit), BIT(bit));
}
@@ -443,21 +349,20 @@ static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, pin;
+ unsigned int reg, bit;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_DIR, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_DIR, &reg, &bit);
ret = regmap_update_bits(pc->reg_gpio, reg, BIT(bit), 0);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_OUT, &reg, &bit);
return regmap_update_bits(pc->reg_gpio, reg, BIT(bit),
value ? BIT(bit) : 0);
}
@@ -465,16 +370,15 @@ static int meson_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, pin;
+ unsigned int reg, bit;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return;
- meson_calc_reg_and_bit(bank, pin, REG_OUT, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_OUT, &reg, &bit);
regmap_update_bits(pc->reg_gpio, reg, BIT(bit),
value ? BIT(bit) : 0);
}
@@ -482,70 +386,33 @@ static void meson_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
{
struct meson_pinctrl *pc = gpiochip_get_data(chip);
- unsigned int reg, bit, val, pin;
+ unsigned int reg, bit, val;
struct meson_bank *bank;
int ret;
- pin = pc->data->pin_base + gpio;
- ret = meson_get_bank(pc, pin, &bank);
+ ret = meson_get_bank(pc, gpio, &bank);
if (ret)
return ret;
- meson_calc_reg_and_bit(bank, pin, REG_IN, &reg, &bit);
+ meson_calc_reg_and_bit(bank, gpio, REG_IN, &reg, &bit);
regmap_read(pc->reg_gpio, reg, &val);
return !!(val & BIT(bit));
}
-static const struct of_device_id meson_pinctrl_dt_match[] = {
- {
- .compatible = "amlogic,meson8-cbus-pinctrl",
- .data = &meson8_cbus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson8b-cbus-pinctrl",
- .data = &meson8b_cbus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson8-aobus-pinctrl",
- .data = &meson8_aobus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson8b-aobus-pinctrl",
- .data = &meson8b_aobus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxbb-periphs-pinctrl",
- .data = &meson_gxbb_periphs_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxbb-aobus-pinctrl",
- .data = &meson_gxbb_aobus_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxl-periphs-pinctrl",
- .data = &meson_gxl_periphs_pinctrl_data,
- },
- {
- .compatible = "amlogic,meson-gxl-aobus-pinctrl",
- .data = &meson_gxl_aobus_pinctrl_data,
- },
- { },
-};
-
static int meson_gpiolib_register(struct meson_pinctrl *pc)
{
int ret;
pc->chip.label = pc->data->name;
pc->chip.parent = pc->dev;
- pc->chip.request = meson_gpio_request;
- pc->chip.free = meson_gpio_free;
+ pc->chip.request = gpiochip_generic_request;
+ pc->chip.free = gpiochip_generic_free;
pc->chip.direction_input = meson_gpio_direction_input;
pc->chip.direction_output = meson_gpio_direction_output;
pc->chip.get = meson_gpio_get;
pc->chip.set = meson_gpio_set;
- pc->chip.base = pc->data->pin_base;
+ pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
pc->chip.of_node = pc->of_node;
@@ -640,9 +507,8 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
return 0;
}
-static int meson_pinctrl_probe(struct platform_device *pdev)
+int meson_pinctrl_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct meson_pinctrl *pc;
int ret;
@@ -652,17 +518,16 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
return -ENOMEM;
pc->dev = dev;
- match = of_match_node(meson_pinctrl_dt_match, pdev->dev.of_node);
- pc->data = (struct meson_pinctrl_data *) match->data;
+ pc->data = (struct meson_pinctrl_data *) of_device_get_match_data(dev);
- ret = meson_pinctrl_parse_dt(pc, pdev->dev.of_node);
+ ret = meson_pinctrl_parse_dt(pc, dev->of_node);
if (ret)
return ret;
pc->desc.name = "pinctrl-meson";
pc->desc.owner = THIS_MODULE;
pc->desc.pctlops = &meson_pctrl_ops;
- pc->desc.pmxops = &meson_pmx_ops;
+ pc->desc.pmxops = pc->data->pmx_ops;
pc->desc.confops = &meson_pinconf_ops;
pc->desc.pins = pc->data->pins;
pc->desc.npins = pc->data->num_pins;
@@ -675,12 +540,3 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
return meson_gpiolib_register(pc);
}
-
-static struct platform_driver meson_pinctrl_driver = {
- .probe = meson_pinctrl_probe,
- .driver = {
- .name = "meson-pinctrl",
- .of_match_table = meson_pinctrl_dt_match,
- },
-};
-builtin_platform_driver(meson_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 890f296f5840..183b6e471635 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -13,6 +13,7 @@
#include <linux/gpio.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
@@ -31,9 +32,7 @@ struct meson_pmx_group {
const char *name;
const unsigned int *pins;
unsigned int num_pins;
- bool is_gpio;
- unsigned int reg;
- unsigned int bit;
+ const void *data;
};
/**
@@ -103,12 +102,12 @@ struct meson_pinctrl_data {
const struct pinctrl_pin_desc *pins;
struct meson_pmx_group *groups;
struct meson_pmx_func *funcs;
- unsigned int pin_base;
unsigned int num_pins;
unsigned int num_groups;
unsigned int num_funcs;
struct meson_bank *banks;
unsigned int num_banks;
+ const struct pinmux_ops *pmx_ops;
};
struct meson_pinctrl {
@@ -124,25 +123,6 @@ struct meson_pinctrl {
struct device_node *of_node;
};
-#define PIN(x, b) (b + x)
-
-#define GROUP(grp, r, b) \
- { \
- .name = #grp, \
- .pins = grp ## _pins, \
- .num_pins = ARRAY_SIZE(grp ## _pins), \
- .reg = r, \
- .bit = b, \
- }
-
-#define GPIO_GROUP(gpio, b) \
- { \
- .name = #gpio, \
- .pins = (const unsigned int[]){ PIN(gpio, b) }, \
- .num_pins = 1, \
- .is_gpio = true, \
- }
-
#define FUNCTION(fn) \
{ \
.name = #fn, \
@@ -166,13 +146,16 @@ struct meson_pinctrl {
}, \
}
-#define MESON_PIN(x, b) PINCTRL_PIN(PIN(x, b), #x)
+#define MESON_PIN(x) PINCTRL_PIN(x, #x)
+
+/* Common pmx functions */
+int meson_pmx_get_funcs_count(struct pinctrl_dev *pcdev);
+const char *meson_pmx_get_func_name(struct pinctrl_dev *pcdev,
+ unsigned selector);
+int meson_pmx_get_groups(struct pinctrl_dev *pcdev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups);
-extern struct meson_pinctrl_data meson8_cbus_pinctrl_data;
-extern struct meson_pinctrl_data meson8_aobus_pinctrl_data;
-extern struct meson_pinctrl_data meson8b_cbus_pinctrl_data;
-extern struct meson_pinctrl_data meson8b_aobus_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data;
-extern struct meson_pinctrl_data meson_gxl_aobus_pinctrl_data;
+/* Common probe function */
+int meson_pinctrl_probe(struct platform_device *pdev);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.c b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
new file mode 100644
index 000000000000..b93b058c8a07
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.c
@@ -0,0 +1,108 @@
+/*
+ * First generation of pinmux driver for Amlogic Meson SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ * Copyright (C) 2017 Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* For this first generation of pinctrl driver every pinmux group can be
+ * enabled by a specific bit in the first register range. When all groups for
+ * a given pin are disabled the pin acts as a GPIO.
+ */
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-meson.h"
+#include "pinctrl-meson8-pmx.h"
+
+/**
+ * meson8_pmx_disable_other_groups() - disable other groups using a given pin
+ *
+ * @pc: meson pin controller device
+ * @pin: number of the pin
+ * @sel_group: index of the selected group, or -1 if none
+ *
+ * The function disables all pinmux groups using a pin except the
+ * selected one. If @sel_group is -1 all groups are disabled, leaving
+ * the pin in GPIO mode.
+ */
+static void meson8_pmx_disable_other_groups(struct meson_pinctrl *pc,
+ unsigned int pin, int sel_group)
+{
+ struct meson_pmx_group *group;
+ struct meson8_pmx_data *pmx_data;
+ int i, j;
+
+ for (i = 0; i < pc->data->num_groups; i++) {
+ group = &pc->data->groups[i];
+ pmx_data = (struct meson8_pmx_data *)group->data;
+ if (pmx_data->is_gpio || i == sel_group)
+ continue;
+
+ for (j = 0; j < group->num_pins; j++) {
+ if (group->pins[j] == pin) {
+ /* We have found a group using the pin */
+ regmap_update_bits(pc->reg_mux,
+ pmx_data->reg * 4,
+ BIT(pmx_data->bit), 0);
+ }
+ }
+ }
+}
+
+static int meson8_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
+ unsigned group_num)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+ struct meson_pmx_func *func = &pc->data->funcs[func_num];
+ struct meson_pmx_group *group = &pc->data->groups[group_num];
+ struct meson8_pmx_data *pmx_data =
+ (struct meson8_pmx_data *)group->data;
+ int i, ret = 0;
+
+ dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
+ group->name);
+
+ /*
+ * Disable groups using the same pin.
+ * The selected group is not disabled to avoid glitches.
+ */
+ for (i = 0; i < group->num_pins; i++)
+ meson8_pmx_disable_other_groups(pc, group->pins[i], group_num);
+
+ /* Function 0 (GPIO) doesn't need any additional setting */
+ if (func_num)
+ ret = regmap_update_bits(pc->reg_mux, pmx_data->reg * 4,
+ BIT(pmx_data->bit),
+ BIT(pmx_data->bit));
+
+ return ret;
+}
+
+static int meson8_pmx_request_gpio(struct pinctrl_dev *pcdev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
+
+ meson8_pmx_disable_other_groups(pc, offset, -1);
+
+ return 0;
+}
+
+const struct pinmux_ops meson8_pmx_ops = {
+ .set_mux = meson8_pmx_set_mux,
+ .get_functions_count = meson_pmx_get_funcs_count,
+ .get_function_name = meson_pmx_get_func_name,
+ .get_function_groups = meson_pmx_get_groups,
+ .gpio_request_enable = meson8_pmx_request_gpio,
+};
diff --git a/drivers/pinctrl/meson/pinctrl-meson8-pmx.h b/drivers/pinctrl/meson/pinctrl-meson8-pmx.h
new file mode 100644
index 000000000000..47293c28f913
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson8-pmx.h
@@ -0,0 +1,48 @@
+/*
+ * First generation of pinmux driver for Amlogic Meson SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ * Copyright (C) 2017 Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+struct meson8_pmx_data {
+ bool is_gpio;
+ unsigned int reg;
+ unsigned int bit;
+};
+
+#define PMX_DATA(r, b, g) \
+ { \
+ .reg = r, \
+ .bit = b, \
+ .is_gpio = g, \
+ }
+
+#define GROUP(grp, r, b) \
+ { \
+ .name = #grp, \
+ .pins = grp ## _pins, \
+ .num_pins = ARRAY_SIZE(grp ## _pins), \
+ .data = (const struct meson8_pmx_data[]){ \
+ PMX_DATA(r, b, false), \
+ }, \
+ }
+
+#define GPIO_GROUP(gpio) \
+ { \
+ .name = #gpio, \
+ .pins = (const unsigned int[]){ gpio }, \
+ .num_pins = 1, \
+ .data = (const struct meson8_pmx_data[]){ \
+ PMX_DATA(0, 0, true), \
+ }, \
+ }
+
+extern const struct pinmux_ops meson8_pmx_ops;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 970f6f14502c..49c7ce03547b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -13,506 +13,495 @@
#include <dt-bindings/gpio/meson8-gpio.h>
#include "pinctrl-meson.h"
-
-#define AO_OFF 120
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson8_cbus_pins[] = {
- MESON_PIN(GPIOX_0, 0),
- MESON_PIN(GPIOX_1, 0),
- MESON_PIN(GPIOX_2, 0),
- MESON_PIN(GPIOX_3, 0),
- MESON_PIN(GPIOX_4, 0),
- MESON_PIN(GPIOX_5, 0),
- MESON_PIN(GPIOX_6, 0),
- MESON_PIN(GPIOX_7, 0),
- MESON_PIN(GPIOX_8, 0),
- MESON_PIN(GPIOX_9, 0),
- MESON_PIN(GPIOX_10, 0),
- MESON_PIN(GPIOX_11, 0),
- MESON_PIN(GPIOX_12, 0),
- MESON_PIN(GPIOX_13, 0),
- MESON_PIN(GPIOX_14, 0),
- MESON_PIN(GPIOX_15, 0),
- MESON_PIN(GPIOX_16, 0),
- MESON_PIN(GPIOX_17, 0),
- MESON_PIN(GPIOX_18, 0),
- MESON_PIN(GPIOX_19, 0),
- MESON_PIN(GPIOX_20, 0),
- MESON_PIN(GPIOX_21, 0),
- MESON_PIN(GPIOY_0, 0),
- MESON_PIN(GPIOY_1, 0),
- MESON_PIN(GPIOY_2, 0),
- MESON_PIN(GPIOY_3, 0),
- MESON_PIN(GPIOY_4, 0),
- MESON_PIN(GPIOY_5, 0),
- MESON_PIN(GPIOY_6, 0),
- MESON_PIN(GPIOY_7, 0),
- MESON_PIN(GPIOY_8, 0),
- MESON_PIN(GPIOY_9, 0),
- MESON_PIN(GPIOY_10, 0),
- MESON_PIN(GPIOY_11, 0),
- MESON_PIN(GPIOY_12, 0),
- MESON_PIN(GPIOY_13, 0),
- MESON_PIN(GPIOY_14, 0),
- MESON_PIN(GPIOY_15, 0),
- MESON_PIN(GPIOY_16, 0),
- MESON_PIN(GPIODV_0, 0),
- MESON_PIN(GPIODV_1, 0),
- MESON_PIN(GPIODV_2, 0),
- MESON_PIN(GPIODV_3, 0),
- MESON_PIN(GPIODV_4, 0),
- MESON_PIN(GPIODV_5, 0),
- MESON_PIN(GPIODV_6, 0),
- MESON_PIN(GPIODV_7, 0),
- MESON_PIN(GPIODV_8, 0),
- MESON_PIN(GPIODV_9, 0),
- MESON_PIN(GPIODV_10, 0),
- MESON_PIN(GPIODV_11, 0),
- MESON_PIN(GPIODV_12, 0),
- MESON_PIN(GPIODV_13, 0),
- MESON_PIN(GPIODV_14, 0),
- MESON_PIN(GPIODV_15, 0),
- MESON_PIN(GPIODV_16, 0),
- MESON_PIN(GPIODV_17, 0),
- MESON_PIN(GPIODV_18, 0),
- MESON_PIN(GPIODV_19, 0),
- MESON_PIN(GPIODV_20, 0),
- MESON_PIN(GPIODV_21, 0),
- MESON_PIN(GPIODV_22, 0),
- MESON_PIN(GPIODV_23, 0),
- MESON_PIN(GPIODV_24, 0),
- MESON_PIN(GPIODV_25, 0),
- MESON_PIN(GPIODV_26, 0),
- MESON_PIN(GPIODV_27, 0),
- MESON_PIN(GPIODV_28, 0),
- MESON_PIN(GPIODV_29, 0),
- MESON_PIN(GPIOH_0, 0),
- MESON_PIN(GPIOH_1, 0),
- MESON_PIN(GPIOH_2, 0),
- MESON_PIN(GPIOH_3, 0),
- MESON_PIN(GPIOH_4, 0),
- MESON_PIN(GPIOH_5, 0),
- MESON_PIN(GPIOH_6, 0),
- MESON_PIN(GPIOH_7, 0),
- MESON_PIN(GPIOH_8, 0),
- MESON_PIN(GPIOH_9, 0),
- MESON_PIN(GPIOZ_0, 0),
- MESON_PIN(GPIOZ_1, 0),
- MESON_PIN(GPIOZ_2, 0),
- MESON_PIN(GPIOZ_3, 0),
- MESON_PIN(GPIOZ_4, 0),
- MESON_PIN(GPIOZ_5, 0),
- MESON_PIN(GPIOZ_6, 0),
- MESON_PIN(GPIOZ_7, 0),
- MESON_PIN(GPIOZ_8, 0),
- MESON_PIN(GPIOZ_9, 0),
- MESON_PIN(GPIOZ_10, 0),
- MESON_PIN(GPIOZ_11, 0),
- MESON_PIN(GPIOZ_12, 0),
- MESON_PIN(GPIOZ_13, 0),
- MESON_PIN(GPIOZ_14, 0),
- MESON_PIN(CARD_0, 0),
- MESON_PIN(CARD_1, 0),
- MESON_PIN(CARD_2, 0),
- MESON_PIN(CARD_3, 0),
- MESON_PIN(CARD_4, 0),
- MESON_PIN(CARD_5, 0),
- MESON_PIN(CARD_6, 0),
- MESON_PIN(BOOT_0, 0),
- MESON_PIN(BOOT_1, 0),
- MESON_PIN(BOOT_2, 0),
- MESON_PIN(BOOT_3, 0),
- MESON_PIN(BOOT_4, 0),
- MESON_PIN(BOOT_5, 0),
- MESON_PIN(BOOT_6, 0),
- MESON_PIN(BOOT_7, 0),
- MESON_PIN(BOOT_8, 0),
- MESON_PIN(BOOT_9, 0),
- MESON_PIN(BOOT_10, 0),
- MESON_PIN(BOOT_11, 0),
- MESON_PIN(BOOT_12, 0),
- MESON_PIN(BOOT_13, 0),
- MESON_PIN(BOOT_14, 0),
- MESON_PIN(BOOT_15, 0),
- MESON_PIN(BOOT_16, 0),
- MESON_PIN(BOOT_17, 0),
- MESON_PIN(BOOT_18, 0),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_12),
+ MESON_PIN(GPIOX_13),
+ MESON_PIN(GPIOX_14),
+ MESON_PIN(GPIOX_15),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_2),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_4),
+ MESON_PIN(GPIOY_5),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+ MESON_PIN(GPIOY_15),
+ MESON_PIN(GPIOY_16),
+ MESON_PIN(GPIODV_0),
+ MESON_PIN(GPIODV_1),
+ MESON_PIN(GPIODV_2),
+ MESON_PIN(GPIODV_3),
+ MESON_PIN(GPIODV_4),
+ MESON_PIN(GPIODV_5),
+ MESON_PIN(GPIODV_6),
+ MESON_PIN(GPIODV_7),
+ MESON_PIN(GPIODV_8),
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_10),
+ MESON_PIN(GPIODV_11),
+ MESON_PIN(GPIODV_12),
+ MESON_PIN(GPIODV_13),
+ MESON_PIN(GPIODV_14),
+ MESON_PIN(GPIODV_15),
+ MESON_PIN(GPIODV_16),
+ MESON_PIN(GPIODV_17),
+ MESON_PIN(GPIODV_18),
+ MESON_PIN(GPIODV_19),
+ MESON_PIN(GPIODV_20),
+ MESON_PIN(GPIODV_21),
+ MESON_PIN(GPIODV_22),
+ MESON_PIN(GPIODV_23),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+ MESON_PIN(GPIOZ_0),
+ MESON_PIN(GPIOZ_1),
+ MESON_PIN(GPIOZ_2),
+ MESON_PIN(GPIOZ_3),
+ MESON_PIN(GPIOZ_4),
+ MESON_PIN(GPIOZ_5),
+ MESON_PIN(GPIOZ_6),
+ MESON_PIN(GPIOZ_7),
+ MESON_PIN(GPIOZ_8),
+ MESON_PIN(GPIOZ_9),
+ MESON_PIN(GPIOZ_10),
+ MESON_PIN(GPIOZ_11),
+ MESON_PIN(GPIOZ_12),
+ MESON_PIN(GPIOZ_13),
+ MESON_PIN(GPIOZ_14),
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+ MESON_PIN(BOOT_18),
};
static const struct pinctrl_pin_desc meson8_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, AO_OFF),
- MESON_PIN(GPIOAO_1, AO_OFF),
- MESON_PIN(GPIOAO_2, AO_OFF),
- MESON_PIN(GPIOAO_3, AO_OFF),
- MESON_PIN(GPIOAO_4, AO_OFF),
- MESON_PIN(GPIOAO_5, AO_OFF),
- MESON_PIN(GPIOAO_6, AO_OFF),
- MESON_PIN(GPIOAO_7, AO_OFF),
- MESON_PIN(GPIOAO_8, AO_OFF),
- MESON_PIN(GPIOAO_9, AO_OFF),
- MESON_PIN(GPIOAO_10, AO_OFF),
- MESON_PIN(GPIOAO_11, AO_OFF),
- MESON_PIN(GPIOAO_12, AO_OFF),
- MESON_PIN(GPIOAO_13, AO_OFF),
- MESON_PIN(GPIO_BSD_EN, AO_OFF),
- MESON_PIN(GPIO_TEST_N, AO_OFF),
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
+ MESON_PIN(GPIO_BSD_EN),
+ MESON_PIN(GPIO_TEST_N),
};
/* bank X */
-static const unsigned int sd_d0_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sd_d1_a_pins[] = { PIN(GPIOX_1, 0) };
-static const unsigned int sd_d2_a_pins[] = { PIN(GPIOX_2, 0) };
-static const unsigned int sd_d3_a_pins[] = { PIN(GPIOX_3, 0) };
-static const unsigned int sd_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sd_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-
-static const unsigned int sdxc_d0_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sdxc_d13_a_pins[] = { PIN(GPIOX_1, 0), PIN(GPIOX_2, 0),
- PIN(GPIOX_3, 0) };
-static const unsigned int sdxc_d47_a_pins[] = { PIN(GPIOX_4, 0), PIN(GPIOX_5, 0),
- PIN(GPIOX_6, 0), PIN(GPIOX_7, 0) };
-static const unsigned int sdxc_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sdxc_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-
-static const unsigned int pcm_out_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int pcm_in_a_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int pcm_fs_a_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int pcm_clk_a_pins[] = { PIN(GPIOX_7, 0) };
-
-static const unsigned int uart_tx_a0_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int uart_rx_a0_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int uart_cts_a0_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int uart_rts_a0_pins[] = { PIN(GPIOX_7, 0) };
-
-static const unsigned int uart_tx_a1_pins[] = { PIN(GPIOX_12, 0) };
-static const unsigned int uart_rx_a1_pins[] = { PIN(GPIOX_13, 0) };
-static const unsigned int uart_cts_a1_pins[] = { PIN(GPIOX_14, 0) };
-static const unsigned int uart_rts_a1_pins[] = { PIN(GPIOX_15, 0) };
-
-static const unsigned int uart_tx_b0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int uart_rx_b0_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int uart_cts_b0_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int uart_rts_b0_pins[] = { PIN(GPIOX_19, 0) };
-
-static const unsigned int iso7816_det_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int iso7816_reset_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int iso7816_clk_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int iso7816_data_pins[] = { PIN(GPIOX_19, 0) };
-
-static const unsigned int i2c_sda_d0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
-
-static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
-
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_b_x_pins[] = { PIN(GPIOX_11, 0) };
+static const unsigned int sd_d0_a_pins[] = { GPIOX_0 };
+static const unsigned int sd_d1_a_pins[] = { GPIOX_1 };
+static const unsigned int sd_d2_a_pins[] = { GPIOX_2 };
+static const unsigned int sd_d3_a_pins[] = { GPIOX_3 };
+static const unsigned int sd_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sd_cmd_a_pins[] = { GPIOX_9 };
+
+static const unsigned int sdxc_d0_a_pins[] = { GPIOX_0 };
+static const unsigned int sdxc_d13_a_pins[] = { GPIOX_1, GPIOX_2, GPIOX_3 };
+static const unsigned int sdxc_d47_a_pins[] = { GPIOX_4, GPIOX_5, GPIOX_6,
+ GPIOX_7 };
+static const unsigned int sdxc_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sdxc_cmd_a_pins[] = { GPIOX_9 };
+
+static const unsigned int pcm_out_a_pins[] = { GPIOX_4 };
+static const unsigned int pcm_in_a_pins[] = { GPIOX_5 };
+static const unsigned int pcm_fs_a_pins[] = { GPIOX_6 };
+static const unsigned int pcm_clk_a_pins[] = { GPIOX_7 };
+
+static const unsigned int uart_tx_a0_pins[] = { GPIOX_4 };
+static const unsigned int uart_rx_a0_pins[] = { GPIOX_5 };
+static const unsigned int uart_cts_a0_pins[] = { GPIOX_6 };
+static const unsigned int uart_rts_a0_pins[] = { GPIOX_7 };
+
+static const unsigned int uart_tx_a1_pins[] = { GPIOX_12 };
+static const unsigned int uart_rx_a1_pins[] = { GPIOX_13 };
+static const unsigned int uart_cts_a1_pins[] = { GPIOX_14 };
+static const unsigned int uart_rts_a1_pins[] = { GPIOX_15 };
+
+static const unsigned int uart_tx_b0_pins[] = { GPIOX_16 };
+static const unsigned int uart_rx_b0_pins[] = { GPIOX_17 };
+static const unsigned int uart_cts_b0_pins[] = { GPIOX_18 };
+static const unsigned int uart_rts_b0_pins[] = { GPIOX_19 };
+
+static const unsigned int iso7816_det_pins[] = { GPIOX_16 };
+static const unsigned int iso7816_reset_pins[] = { GPIOX_17 };
+static const unsigned int iso7816_clk_pins[] = { GPIOX_18 };
+static const unsigned int iso7816_data_pins[] = { GPIOX_19 };
+
+static const unsigned int i2c_sda_d0_pins[] = { GPIOX_16 };
+static const unsigned int i2c_sck_d0_pins[] = { GPIOX_17 };
+
+static const unsigned int xtal_32k_out_pins[] = { GPIOX_10 };
+static const unsigned int xtal_24m_out_pins[] = { GPIOX_11 };
+
+static const unsigned int pwm_e_pins[] = { GPIOX_10 };
+static const unsigned int pwm_b_x_pins[] = { GPIOX_11 };
/* bank Y */
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_1, 0) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIOY_2, 0) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int uart_tx_c_pins[] = { GPIOY_0 };
+static const unsigned int uart_rx_c_pins[] = { GPIOY_1 };
+static const unsigned int uart_cts_c_pins[] = { GPIOY_2 };
+static const unsigned int uart_rts_c_pins[] = { GPIOY_3 };
-static const unsigned int pcm_out_b_pins[] = { PIN(GPIOY_4, 0) };
-static const unsigned int pcm_in_b_pins[] = { PIN(GPIOY_5, 0) };
-static const unsigned int pcm_fs_b_pins[] = { PIN(GPIOY_6, 0) };
-static const unsigned int pcm_clk_b_pins[] = { PIN(GPIOY_7, 0) };
+static const unsigned int pcm_out_b_pins[] = { GPIOY_4 };
+static const unsigned int pcm_in_b_pins[] = { GPIOY_5 };
+static const unsigned int pcm_fs_b_pins[] = { GPIOY_6 };
+static const unsigned int pcm_clk_b_pins[] = { GPIOY_7 };
-static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIOY_1, 0) };
+static const unsigned int i2c_sda_c0_pins[] = { GPIOY_0 };
+static const unsigned int i2c_sck_c0_pins[] = { GPIOY_1 };
-static const unsigned int pwm_a_y_pins[] = { PIN(GPIOY_16, 0) };
+static const unsigned int pwm_a_y_pins[] = { GPIOY_16 };
-static const unsigned int i2s_out_ch45_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int i2s_out_ch23_pins[] = { PIN(GPIOY_1, 0) };
-static const unsigned int i2s_out_ch01_pins[] = { PIN(GPIOY_4, 0) };
-static const unsigned int i2s_in_ch01_pins[] = { PIN(GPIOY_5, 0) };
-static const unsigned int i2s_lr_clk_in_pins[] = { PIN(GPIOY_6, 0) };
-static const unsigned int i2s_ao_clk_in_pins[] = { PIN(GPIOY_7, 0) };
-static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOY_8, 0) };
-static const unsigned int i2s_out_ch78_pins[] = { PIN(GPIOY_9, 0) };
+static const unsigned int i2s_out_ch45_pins[] = { GPIOY_0 };
+static const unsigned int i2s_out_ch23_pins[] = { GPIOY_1 };
+static const unsigned int i2s_out_ch01_pins[] = { GPIOY_4 };
+static const unsigned int i2s_in_ch01_pins[] = { GPIOY_5 };
+static const unsigned int i2s_lr_clk_in_pins[] = { GPIOY_6 };
+static const unsigned int i2s_ao_clk_in_pins[] = { GPIOY_7 };
+static const unsigned int i2s_am_clk_pins[] = { GPIOY_8 };
+static const unsigned int i2s_out_ch78_pins[] = { GPIOY_9 };
-static const unsigned int spdif_in_pins[] = { PIN(GPIOY_2, 0) };
-static const unsigned int spdif_out_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int spdif_in_pins[] = { GPIOY_2 };
+static const unsigned int spdif_out_pins[] = { GPIOY_3 };
/* bank DV */
-static const unsigned int dvin_rgb_pins[] = { PIN(GPIODV_0, 0), PIN(GPIODV_1, 0),
- PIN(GPIODV_2, 0), PIN(GPIODV_3, 0),
- PIN(GPIODV_4, 0), PIN(GPIODV_5, 0),
- PIN(GPIODV_6, 0), PIN(GPIODV_7, 0),
- PIN(GPIODV_8, 0), PIN(GPIODV_9, 0),
- PIN(GPIODV_10, 0), PIN(GPIODV_11, 0),
- PIN(GPIODV_12, 0), PIN(GPIODV_13, 0),
- PIN(GPIODV_14, 0), PIN(GPIODV_15, 0),
- PIN(GPIODV_16, 0), PIN(GPIODV_17, 0),
- PIN(GPIODV_18, 0), PIN(GPIODV_19, 0),
- PIN(GPIODV_20, 0), PIN(GPIODV_21, 0),
- PIN(GPIODV_22, 0), PIN(GPIODV_23, 0) };
-static const unsigned int dvin_vs_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int dvin_hs_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int dvin_clk_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int dvin_de_pins[] = { PIN(GPIODV_27, 0) };
-
-static const unsigned int enc_0_pins[] = { PIN(GPIODV_0, 0) };
-static const unsigned int enc_1_pins[] = { PIN(GPIODV_1, 0) };
-static const unsigned int enc_2_pins[] = { PIN(GPIODV_2, 0) };
-static const unsigned int enc_3_pins[] = { PIN(GPIODV_3, 0) };
-static const unsigned int enc_4_pins[] = { PIN(GPIODV_4, 0) };
-static const unsigned int enc_5_pins[] = { PIN(GPIODV_5, 0) };
-static const unsigned int enc_6_pins[] = { PIN(GPIODV_6, 0) };
-static const unsigned int enc_7_pins[] = { PIN(GPIODV_7, 0) };
-static const unsigned int enc_8_pins[] = { PIN(GPIODV_8, 0) };
-static const unsigned int enc_9_pins[] = { PIN(GPIODV_9, 0) };
-static const unsigned int enc_10_pins[] = { PIN(GPIODV_10, 0) };
-static const unsigned int enc_11_pins[] = { PIN(GPIODV_11, 0) };
-static const unsigned int enc_12_pins[] = { PIN(GPIODV_12, 0) };
-static const unsigned int enc_13_pins[] = { PIN(GPIODV_13, 0) };
-static const unsigned int enc_14_pins[] = { PIN(GPIODV_14, 0) };
-static const unsigned int enc_15_pins[] = { PIN(GPIODV_15, 0) };
-static const unsigned int enc_16_pins[] = { PIN(GPIODV_16, 0) };
-static const unsigned int enc_17_pins[] = { PIN(GPIODV_17, 0) };
-
-static const unsigned int uart_tx_b1_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int uart_rx_b1_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int uart_cts_b1_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int uart_rts_b1_pins[] = { PIN(GPIODV_27, 0) };
-
-static const unsigned int vga_vs_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int vga_hs_pins[] = { PIN(GPIODV_25, 0) };
-
-static const unsigned int pwm_c_dv9_pins[] = { PIN(GPIODV_9, 0) };
-static const unsigned int pwm_c_dv29_pins[] = { PIN(GPIODV_29, 0) };
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, 0) };
+static const unsigned int dvin_rgb_pins[] = {
+ GPIODV_0, GPIODV_1, GPIODV_2, GPIODV_3, GPIODV_4, GPIODV_5,
+ GPIODV_6, GPIODV_7, GPIODV_8, GPIODV_9, GPIODV_10, GPIODV_11,
+ GPIODV_12, GPIODV_13, GPIODV_14, GPIODV_15, GPIODV_16, GPIODV_17,
+ GPIODV_18, GPIODV_19, GPIODV_20, GPIODV_21, GPIODV_22, GPIODV_23
+};
+static const unsigned int dvin_vs_pins[] = { GPIODV_24 };
+static const unsigned int dvin_hs_pins[] = { GPIODV_25 };
+static const unsigned int dvin_clk_pins[] = { GPIODV_26 };
+static const unsigned int dvin_de_pins[] = { GPIODV_27 };
+
+static const unsigned int enc_0_pins[] = { GPIODV_0 };
+static const unsigned int enc_1_pins[] = { GPIODV_1 };
+static const unsigned int enc_2_pins[] = { GPIODV_2 };
+static const unsigned int enc_3_pins[] = { GPIODV_3 };
+static const unsigned int enc_4_pins[] = { GPIODV_4 };
+static const unsigned int enc_5_pins[] = { GPIODV_5 };
+static const unsigned int enc_6_pins[] = { GPIODV_6 };
+static const unsigned int enc_7_pins[] = { GPIODV_7 };
+static const unsigned int enc_8_pins[] = { GPIODV_8 };
+static const unsigned int enc_9_pins[] = { GPIODV_9 };
+static const unsigned int enc_10_pins[] = { GPIODV_10 };
+static const unsigned int enc_11_pins[] = { GPIODV_11 };
+static const unsigned int enc_12_pins[] = { GPIODV_12 };
+static const unsigned int enc_13_pins[] = { GPIODV_13 };
+static const unsigned int enc_14_pins[] = { GPIODV_14 };
+static const unsigned int enc_15_pins[] = { GPIODV_15 };
+static const unsigned int enc_16_pins[] = { GPIODV_16 };
+static const unsigned int enc_17_pins[] = { GPIODV_17 };
+
+static const unsigned int uart_tx_b1_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_b1_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_b1_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_b1_pins[] = { GPIODV_27 };
+
+static const unsigned int vga_vs_pins[] = { GPIODV_24 };
+static const unsigned int vga_hs_pins[] = { GPIODV_25 };
+
+static const unsigned int pwm_c_dv9_pins[] = { GPIODV_9 };
+static const unsigned int pwm_c_dv29_pins[] = { GPIODV_29 };
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
/* bank H */
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, 0) };
-static const unsigned int hdmi_cec_pins[] = { PIN(GPIOH_3, 0) };
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+static const unsigned int hdmi_cec_pins[] = { GPIOH_3 };
-static const unsigned int spi_ss0_0_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int spi_miso_0_pins[] = { PIN(GPIOH_4, 0) };
-static const unsigned int spi_mosi_0_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int spi_sclk_0_pins[] = { PIN(GPIOH_6, 0) };
+static const unsigned int spi_ss0_0_pins[] = { GPIOH_3 };
+static const unsigned int spi_miso_0_pins[] = { GPIOH_4 };
+static const unsigned int spi_mosi_0_pins[] = { GPIOH_5 };
+static const unsigned int spi_sclk_0_pins[] = { GPIOH_6 };
-static const unsigned int i2c_sda_d1_pins[] = { PIN(GPIOH_7, 0) };
-static const unsigned int i2c_sck_d1_pins[] = { PIN(GPIOH_8, 0) };
+static const unsigned int i2c_sda_d1_pins[] = { GPIOH_7 };
+static const unsigned int i2c_sck_d1_pins[] = { GPIOH_8 };
/* bank Z */
-static const unsigned int spi_ss0_1_pins[] = { PIN(GPIOZ_9, 0) };
-static const unsigned int spi_ss1_1_pins[] = { PIN(GPIOZ_10, 0) };
-static const unsigned int spi_sclk_1_pins[] = { PIN(GPIOZ_11, 0) };
-static const unsigned int spi_mosi_1_pins[] = { PIN(GPIOZ_12, 0) };
-static const unsigned int spi_miso_1_pins[] = { PIN(GPIOZ_13, 0) };
-static const unsigned int spi_ss2_1_pins[] = { PIN(GPIOZ_14, 0) };
-
-static const unsigned int eth_tx_clk_50m_pins[] = { PIN(GPIOZ_4, 0) };
-static const unsigned int eth_tx_en_pins[] = { PIN(GPIOZ_5, 0) };
-static const unsigned int eth_txd1_pins[] = { PIN(GPIOZ_6, 0) };
-static const unsigned int eth_txd0_pins[] = { PIN(GPIOZ_7, 0) };
-static const unsigned int eth_rx_clk_in_pins[] = { PIN(GPIOZ_8, 0) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(GPIOZ_9, 0) };
-static const unsigned int eth_rxd1_pins[] = { PIN(GPIOZ_10, 0) };
-static const unsigned int eth_rxd0_pins[] = { PIN(GPIOZ_11, 0) };
-static const unsigned int eth_mdio_pins[] = { PIN(GPIOZ_12, 0) };
-static const unsigned int eth_mdc_pins[] = { PIN(GPIOZ_13, 0) };
-
-static const unsigned int i2c_sda_a0_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int i2c_sck_a0_pins[] = { PIN(GPIOZ_1, 0) };
-
-static const unsigned int i2c_sda_b_pins[] = { PIN(GPIOZ_2, 0) };
-static const unsigned int i2c_sck_b_pins[] = { PIN(GPIOZ_3, 0) };
-
-static const unsigned int i2c_sda_c1_pins[] = { PIN(GPIOZ_4, 0) };
-static const unsigned int i2c_sck_c1_pins[] = { PIN(GPIOZ_5, 0) };
-
-static const unsigned int i2c_sda_a1_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int i2c_sck_a1_pins[] = { PIN(GPIOZ_1, 0) };
-
-static const unsigned int i2c_sda_a2_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int i2c_sck_a2_pins[] = { PIN(GPIOZ_1, 0) };
-
-static const unsigned int pwm_a_z0_pins[] = { PIN(GPIOZ_0, 0) };
-static const unsigned int pwm_a_z7_pins[] = { PIN(GPIOZ_7, 0) };
-static const unsigned int pwm_b_z_pins[] = { PIN(GPIOZ_1, 0) };
-static const unsigned int pwm_c_z_pins[] = { PIN(GPIOZ_8, 0) };
+static const unsigned int spi_ss0_1_pins[] = { GPIOZ_9 };
+static const unsigned int spi_ss1_1_pins[] = { GPIOZ_10 };
+static const unsigned int spi_sclk_1_pins[] = { GPIOZ_11 };
+static const unsigned int spi_mosi_1_pins[] = { GPIOZ_12 };
+static const unsigned int spi_miso_1_pins[] = { GPIOZ_13 };
+static const unsigned int spi_ss2_1_pins[] = { GPIOZ_14 };
+
+static const unsigned int eth_tx_clk_50m_pins[] = { GPIOZ_4 };
+static const unsigned int eth_tx_en_pins[] = { GPIOZ_5 };
+static const unsigned int eth_txd1_pins[] = { GPIOZ_6 };
+static const unsigned int eth_txd0_pins[] = { GPIOZ_7 };
+static const unsigned int eth_rx_clk_in_pins[] = { GPIOZ_8 };
+static const unsigned int eth_rx_dv_pins[] = { GPIOZ_9 };
+static const unsigned int eth_rxd1_pins[] = { GPIOZ_10 };
+static const unsigned int eth_rxd0_pins[] = { GPIOZ_11 };
+static const unsigned int eth_mdio_pins[] = { GPIOZ_12 };
+static const unsigned int eth_mdc_pins[] = { GPIOZ_13 };
+
+static const unsigned int i2c_sda_a0_pins[] = { GPIOZ_0 };
+static const unsigned int i2c_sck_a0_pins[] = { GPIOZ_1 };
+
+static const unsigned int i2c_sda_b_pins[] = { GPIOZ_2 };
+static const unsigned int i2c_sck_b_pins[] = { GPIOZ_3 };
+
+static const unsigned int i2c_sda_c1_pins[] = { GPIOZ_4 };
+static const unsigned int i2c_sck_c1_pins[] = { GPIOZ_5 };
+
+static const unsigned int i2c_sda_a1_pins[] = { GPIOZ_0 };
+static const unsigned int i2c_sck_a1_pins[] = { GPIOZ_1 };
+
+static const unsigned int i2c_sda_a2_pins[] = { GPIOZ_0 };
+static const unsigned int i2c_sck_a2_pins[] = { GPIOZ_1 };
+
+static const unsigned int pwm_a_z0_pins[] = { GPIOZ_0 };
+static const unsigned int pwm_a_z7_pins[] = { GPIOZ_7 };
+static const unsigned int pwm_b_z_pins[] = { GPIOZ_1 };
+static const unsigned int pwm_c_z_pins[] = { GPIOZ_8 };
/* bank BOOT */
-static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
-static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
-static const unsigned int sd_d2_c_pins[] = { PIN(BOOT_2, 0) };
-static const unsigned int sd_d3_c_pins[] = { PIN(BOOT_3, 0) };
-static const unsigned int sd_cmd_c_pins[] = { PIN(BOOT_16, 0) };
-static const unsigned int sd_clk_c_pins[] = { PIN(BOOT_17, 0) };
-
-static const unsigned int sdxc_d0_c_pins[] = { PIN(BOOT_0, 0)};
-static const unsigned int sdxc_d13_c_pins[] = { PIN(BOOT_1, 0), PIN(BOOT_2, 0),
- PIN(BOOT_3, 0) };
-static const unsigned int sdxc_d47_c_pins[] = { PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int sdxc_cmd_c_pins[] = { PIN(BOOT_16, 0) };
-static const unsigned int sdxc_clk_c_pins[] = { PIN(BOOT_17, 0) };
-
-static const unsigned int nand_io_pins[] = { PIN(BOOT_0, 0), PIN(BOOT_1, 0),
- PIN(BOOT_2, 0), PIN(BOOT_3, 0),
- PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int nand_io_ce0_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int nand_io_ce1_pins[] = { PIN(BOOT_9, 0) };
-static const unsigned int nand_io_rb0_pins[] = { PIN(BOOT_10, 0) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nand_ren_clk_pins[] = { PIN(BOOT_14, 0) };
-static const unsigned int nand_dqs_pins[] = { PIN(BOOT_15, 0) };
-static const unsigned int nand_ce2_pins[] = { PIN(BOOT_16, 0) };
-static const unsigned int nand_ce3_pins[] = { PIN(BOOT_17, 0) };
-
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_18, 0) };
+static const unsigned int sd_d0_c_pins[] = { BOOT_0 };
+static const unsigned int sd_d1_c_pins[] = { BOOT_1 };
+static const unsigned int sd_d2_c_pins[] = { BOOT_2 };
+static const unsigned int sd_d3_c_pins[] = { BOOT_3 };
+static const unsigned int sd_cmd_c_pins[] = { BOOT_16 };
+static const unsigned int sd_clk_c_pins[] = { BOOT_17 };
+
+static const unsigned int sdxc_d0_c_pins[] = { BOOT_0};
+static const unsigned int sdxc_d13_c_pins[] = { BOOT_1, BOOT_2, BOOT_3 };
+static const unsigned int sdxc_d47_c_pins[] = { BOOT_4, BOOT_5, BOOT_6,
+ BOOT_7 };
+static const unsigned int sdxc_cmd_c_pins[] = { BOOT_16 };
+static const unsigned int sdxc_clk_c_pins[] = { BOOT_17 };
+
+static const unsigned int nand_io_pins[] = {
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7
+};
+static const unsigned int nand_io_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_io_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_io_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_clk_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_pins[] = { BOOT_15 };
+static const unsigned int nand_ce2_pins[] = { BOOT_16 };
+static const unsigned int nand_ce3_pins[] = { BOOT_17 };
+
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_18 };
/* bank CARD */
-static const unsigned int sd_d1_b_pins[] = { PIN(CARD_0, 0) };
-static const unsigned int sd_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sd_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sd_cmd_b_pins[] = { PIN(CARD_3, 0) };
-static const unsigned int sd_d3_b_pins[] = { PIN(CARD_4, 0) };
-static const unsigned int sd_d2_b_pins[] = { PIN(CARD_5, 0) };
-
-static const unsigned int sdxc_d13_b_pins[] = { PIN(CARD_0, 0), PIN(CARD_4, 0),
- PIN(CARD_5, 0) };
-static const unsigned int sdxc_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sdxc_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sdxc_cmd_b_pins[] = { PIN(CARD_3, 0) };
+static const unsigned int sd_d1_b_pins[] = { CARD_0 };
+static const unsigned int sd_d0_b_pins[] = { CARD_1 };
+static const unsigned int sd_clk_b_pins[] = { CARD_2 };
+static const unsigned int sd_cmd_b_pins[] = { CARD_3 };
+static const unsigned int sd_d3_b_pins[] = { CARD_4 };
+static const unsigned int sd_d2_b_pins[] = { CARD_5 };
+
+static const unsigned int sdxc_d13_b_pins[] = { CARD_0, CARD_4, CARD_5 };
+static const unsigned int sdxc_d0_b_pins[] = { CARD_1 };
+static const unsigned int sdxc_clk_b_pins[] = { CARD_2 };
+static const unsigned int sdxc_cmd_b_pins[] = { CARD_3 };
/* bank AO */
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
-static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int remote_output_ao_pins[] = { PIN(GPIOAO_13, AO_OFF) };
+static const unsigned int remote_input_pins[] = { GPIOAO_7 };
+static const unsigned int remote_output_ao_pins[] = { GPIOAO_13 };
-static const unsigned int i2c_slave_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_slave_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int i2c_slave_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_slave_sda_ao_pins[] = { GPIOAO_5 };
-static const unsigned int uart_tx_ao_b0_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_b0_pins[] = { PIN(GPIOAO_1, AO_OFF) };
+static const unsigned int uart_tx_ao_b0_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_b0_pins[] = { GPIOAO_1 };
-static const unsigned int uart_tx_ao_b1_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int uart_tx_ao_b1_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b1_pins[] = { GPIOAO_5 };
-static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int i2c_mst_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_mst_sda_ao_pins[] = { GPIOAO_5 };
-static const unsigned int pwm_f_ao_pins[] = { PIN(GPIO_TEST_N, AO_OFF) };
+static const unsigned int pwm_f_ao_pins[] = { GPIO_TEST_N };
-static const unsigned int i2s_am_clk_out_ao_pins[] = { PIN(GPIOAO_8, AO_OFF) };
-static const unsigned int i2s_ao_clk_out_ao_pins[] = { PIN(GPIOAO_9, AO_OFF) };
-static const unsigned int i2s_lr_clk_out_ao_pins[] = { PIN(GPIOAO_10, AO_OFF) };
-static const unsigned int i2s_out_ch01_ao_pins[] = { PIN(GPIOAO_11, AO_OFF) };
+static const unsigned int i2s_am_clk_out_ao_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_ao_clk_out_ao_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_lr_clk_out_ao_pins[] = { GPIOAO_10 };
+static const unsigned int i2s_out_ch01_ao_pins[] = { GPIOAO_11 };
-static const unsigned int hdmi_cec_ao_pins[] = { PIN(GPIOAO_12, AO_OFF) };
+static const unsigned int hdmi_cec_ao_pins[] = { GPIOAO_12 };
static struct meson_pmx_group meson8_cbus_groups[] = {
- GPIO_GROUP(GPIOX_0, 0),
- GPIO_GROUP(GPIOX_1, 0),
- GPIO_GROUP(GPIOX_2, 0),
- GPIO_GROUP(GPIOX_3, 0),
- GPIO_GROUP(GPIOX_4, 0),
- GPIO_GROUP(GPIOX_5, 0),
- GPIO_GROUP(GPIOX_6, 0),
- GPIO_GROUP(GPIOX_7, 0),
- GPIO_GROUP(GPIOX_8, 0),
- GPIO_GROUP(GPIOX_9, 0),
- GPIO_GROUP(GPIOX_10, 0),
- GPIO_GROUP(GPIOX_11, 0),
- GPIO_GROUP(GPIOX_12, 0),
- GPIO_GROUP(GPIOX_13, 0),
- GPIO_GROUP(GPIOX_14, 0),
- GPIO_GROUP(GPIOX_15, 0),
- GPIO_GROUP(GPIOX_16, 0),
- GPIO_GROUP(GPIOX_17, 0),
- GPIO_GROUP(GPIOX_18, 0),
- GPIO_GROUP(GPIOX_19, 0),
- GPIO_GROUP(GPIOX_20, 0),
- GPIO_GROUP(GPIOX_21, 0),
- GPIO_GROUP(GPIOY_0, 0),
- GPIO_GROUP(GPIOY_1, 0),
- GPIO_GROUP(GPIOY_2, 0),
- GPIO_GROUP(GPIOY_3, 0),
- GPIO_GROUP(GPIOY_4, 0),
- GPIO_GROUP(GPIOY_5, 0),
- GPIO_GROUP(GPIOY_6, 0),
- GPIO_GROUP(GPIOY_7, 0),
- GPIO_GROUP(GPIOY_8, 0),
- GPIO_GROUP(GPIOY_9, 0),
- GPIO_GROUP(GPIOY_10, 0),
- GPIO_GROUP(GPIOY_11, 0),
- GPIO_GROUP(GPIOY_12, 0),
- GPIO_GROUP(GPIOY_13, 0),
- GPIO_GROUP(GPIOY_14, 0),
- GPIO_GROUP(GPIOY_15, 0),
- GPIO_GROUP(GPIOY_16, 0),
- GPIO_GROUP(GPIODV_0, 0),
- GPIO_GROUP(GPIODV_1, 0),
- GPIO_GROUP(GPIODV_2, 0),
- GPIO_GROUP(GPIODV_3, 0),
- GPIO_GROUP(GPIODV_4, 0),
- GPIO_GROUP(GPIODV_5, 0),
- GPIO_GROUP(GPIODV_6, 0),
- GPIO_GROUP(GPIODV_7, 0),
- GPIO_GROUP(GPIODV_8, 0),
- GPIO_GROUP(GPIODV_9, 0),
- GPIO_GROUP(GPIODV_10, 0),
- GPIO_GROUP(GPIODV_11, 0),
- GPIO_GROUP(GPIODV_12, 0),
- GPIO_GROUP(GPIODV_13, 0),
- GPIO_GROUP(GPIODV_14, 0),
- GPIO_GROUP(GPIODV_15, 0),
- GPIO_GROUP(GPIODV_16, 0),
- GPIO_GROUP(GPIODV_17, 0),
- GPIO_GROUP(GPIODV_18, 0),
- GPIO_GROUP(GPIODV_19, 0),
- GPIO_GROUP(GPIODV_20, 0),
- GPIO_GROUP(GPIODV_21, 0),
- GPIO_GROUP(GPIODV_22, 0),
- GPIO_GROUP(GPIODV_23, 0),
- GPIO_GROUP(GPIODV_24, 0),
- GPIO_GROUP(GPIODV_25, 0),
- GPIO_GROUP(GPIODV_26, 0),
- GPIO_GROUP(GPIODV_27, 0),
- GPIO_GROUP(GPIODV_28, 0),
- GPIO_GROUP(GPIODV_29, 0),
- GPIO_GROUP(GPIOH_0, 0),
- GPIO_GROUP(GPIOH_1, 0),
- GPIO_GROUP(GPIOH_2, 0),
- GPIO_GROUP(GPIOH_3, 0),
- GPIO_GROUP(GPIOH_4, 0),
- GPIO_GROUP(GPIOH_5, 0),
- GPIO_GROUP(GPIOH_6, 0),
- GPIO_GROUP(GPIOH_7, 0),
- GPIO_GROUP(GPIOH_8, 0),
- GPIO_GROUP(GPIOH_9, 0),
- GPIO_GROUP(GPIOZ_0, 0),
- GPIO_GROUP(GPIOZ_1, 0),
- GPIO_GROUP(GPIOZ_2, 0),
- GPIO_GROUP(GPIOZ_3, 0),
- GPIO_GROUP(GPIOZ_4, 0),
- GPIO_GROUP(GPIOZ_5, 0),
- GPIO_GROUP(GPIOZ_6, 0),
- GPIO_GROUP(GPIOZ_7, 0),
- GPIO_GROUP(GPIOZ_8, 0),
- GPIO_GROUP(GPIOZ_9, 0),
- GPIO_GROUP(GPIOZ_10, 0),
- GPIO_GROUP(GPIOZ_11, 0),
- GPIO_GROUP(GPIOZ_12, 0),
- GPIO_GROUP(GPIOZ_13, 0),
- GPIO_GROUP(GPIOZ_14, 0),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_12),
+ GPIO_GROUP(GPIOX_13),
+ GPIO_GROUP(GPIOX_14),
+ GPIO_GROUP(GPIOX_15),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_2),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_4),
+ GPIO_GROUP(GPIOY_5),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+ GPIO_GROUP(GPIOY_15),
+ GPIO_GROUP(GPIOY_16),
+ GPIO_GROUP(GPIODV_0),
+ GPIO_GROUP(GPIODV_1),
+ GPIO_GROUP(GPIODV_2),
+ GPIO_GROUP(GPIODV_3),
+ GPIO_GROUP(GPIODV_4),
+ GPIO_GROUP(GPIODV_5),
+ GPIO_GROUP(GPIODV_6),
+ GPIO_GROUP(GPIODV_7),
+ GPIO_GROUP(GPIODV_8),
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_10),
+ GPIO_GROUP(GPIODV_11),
+ GPIO_GROUP(GPIODV_12),
+ GPIO_GROUP(GPIODV_13),
+ GPIO_GROUP(GPIODV_14),
+ GPIO_GROUP(GPIODV_15),
+ GPIO_GROUP(GPIODV_16),
+ GPIO_GROUP(GPIODV_17),
+ GPIO_GROUP(GPIODV_18),
+ GPIO_GROUP(GPIODV_19),
+ GPIO_GROUP(GPIODV_20),
+ GPIO_GROUP(GPIODV_21),
+ GPIO_GROUP(GPIODV_22),
+ GPIO_GROUP(GPIODV_23),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+ GPIO_GROUP(GPIOZ_0),
+ GPIO_GROUP(GPIOZ_1),
+ GPIO_GROUP(GPIOZ_2),
+ GPIO_GROUP(GPIOZ_3),
+ GPIO_GROUP(GPIOZ_4),
+ GPIO_GROUP(GPIOZ_5),
+ GPIO_GROUP(GPIOZ_6),
+ GPIO_GROUP(GPIOZ_7),
+ GPIO_GROUP(GPIOZ_8),
+ GPIO_GROUP(GPIOZ_9),
+ GPIO_GROUP(GPIOZ_10),
+ GPIO_GROUP(GPIOZ_11),
+ GPIO_GROUP(GPIOZ_12),
+ GPIO_GROUP(GPIOZ_13),
+ GPIO_GROUP(GPIOZ_14),
/* bank X */
GROUP(sd_d0_a, 8, 5),
@@ -727,22 +716,22 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
};
static struct meson_pmx_group meson8_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, AO_OFF),
- GPIO_GROUP(GPIOAO_1, AO_OFF),
- GPIO_GROUP(GPIOAO_2, AO_OFF),
- GPIO_GROUP(GPIOAO_3, AO_OFF),
- GPIO_GROUP(GPIOAO_4, AO_OFF),
- GPIO_GROUP(GPIOAO_5, AO_OFF),
- GPIO_GROUP(GPIOAO_6, AO_OFF),
- GPIO_GROUP(GPIOAO_7, AO_OFF),
- GPIO_GROUP(GPIOAO_8, AO_OFF),
- GPIO_GROUP(GPIOAO_9, AO_OFF),
- GPIO_GROUP(GPIOAO_10, AO_OFF),
- GPIO_GROUP(GPIOAO_11, AO_OFF),
- GPIO_GROUP(GPIOAO_12, AO_OFF),
- GPIO_GROUP(GPIOAO_13, AO_OFF),
- GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
- GPIO_GROUP(GPIO_TEST_N, AO_OFF),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
+ GPIO_GROUP(GPIO_BSD_EN),
+ GPIO_GROUP(GPIO_TEST_N),
/* bank AO */
GROUP(uart_tx_ao_a, 0, 12),
@@ -1041,24 +1030,23 @@ static struct meson_pmx_func meson8_aobus_functions[] = {
};
static struct meson_bank meson8_cbus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
- BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_16, 0), 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_0, 0), PIN(GPIODV_29, 0), 65, 94, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
- BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 29, 38, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
- BANK("Z", PIN(GPIOZ_0, 0), PIN(GPIOZ_14, 0), 14, 28, 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
- BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 58, 64, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
- BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_21, 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", GPIOY_0, GPIOY_16, 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", GPIODV_0, GPIODV_29, 65, 94, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", GPIOH_0, GPIOH_9, 29, 38, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("Z", GPIOZ_0, GPIOZ_14, 14, 28, 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
+ BANK("CARD", CARD_0, CARD_6, 58, 64, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", BOOT_0, BOOT_18, 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
};
static struct meson_bank meson8_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
+static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.name = "cbus-banks",
- .pin_base = 0,
.pins = meson8_cbus_pins,
.groups = meson8_cbus_groups,
.funcs = meson8_cbus_functions,
@@ -1067,11 +1055,11 @@ struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8_cbus_groups),
.num_funcs = ARRAY_SIZE(meson8_cbus_functions),
.num_banks = ARRAY_SIZE(meson8_cbus_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.name = "ao-bank",
- .pin_base = 120,
.pins = meson8_aobus_pins,
.groups = meson8_aobus_groups,
.funcs = meson8_aobus_functions,
@@ -1080,4 +1068,26 @@ struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8_aobus_groups),
.num_funcs = ARRAY_SIZE(meson8_aobus_functions),
.num_banks = ARRAY_SIZE(meson8_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson8_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson8-cbus-pinctrl",
+ .data = &meson8_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8-aobus-pinctrl",
+ .data = &meson8_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson8_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson8-pinctrl",
+ .of_match_table = meson8_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson8_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 71f216b5b0b9..5bd808dc81e1 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -14,408 +14,405 @@
#include <dt-bindings/gpio/meson8b-gpio.h>
#include "pinctrl-meson.h"
-
-#define AO_OFF 130
+#include "pinctrl-meson8-pmx.h"
static const struct pinctrl_pin_desc meson8b_cbus_pins[] = {
- MESON_PIN(GPIOX_0, 0),
- MESON_PIN(GPIOX_1, 0),
- MESON_PIN(GPIOX_2, 0),
- MESON_PIN(GPIOX_3, 0),
- MESON_PIN(GPIOX_4, 0),
- MESON_PIN(GPIOX_5, 0),
- MESON_PIN(GPIOX_6, 0),
- MESON_PIN(GPIOX_7, 0),
- MESON_PIN(GPIOX_8, 0),
- MESON_PIN(GPIOX_9, 0),
- MESON_PIN(GPIOX_10, 0),
- MESON_PIN(GPIOX_11, 0),
- MESON_PIN(GPIOX_16, 0),
- MESON_PIN(GPIOX_17, 0),
- MESON_PIN(GPIOX_18, 0),
- MESON_PIN(GPIOX_19, 0),
- MESON_PIN(GPIOX_20, 0),
- MESON_PIN(GPIOX_21, 0),
-
- MESON_PIN(GPIOY_0, 0),
- MESON_PIN(GPIOY_1, 0),
- MESON_PIN(GPIOY_3, 0),
- MESON_PIN(GPIOY_6, 0),
- MESON_PIN(GPIOY_7, 0),
- MESON_PIN(GPIOY_8, 0),
- MESON_PIN(GPIOY_9, 0),
- MESON_PIN(GPIOY_10, 0),
- MESON_PIN(GPIOY_11, 0),
- MESON_PIN(GPIOY_12, 0),
- MESON_PIN(GPIOY_13, 0),
- MESON_PIN(GPIOY_14, 0),
-
- MESON_PIN(GPIODV_9, 0),
- MESON_PIN(GPIODV_24, 0),
- MESON_PIN(GPIODV_25, 0),
- MESON_PIN(GPIODV_26, 0),
- MESON_PIN(GPIODV_27, 0),
- MESON_PIN(GPIODV_28, 0),
- MESON_PIN(GPIODV_29, 0),
-
- MESON_PIN(GPIOH_0, 0),
- MESON_PIN(GPIOH_1, 0),
- MESON_PIN(GPIOH_2, 0),
- MESON_PIN(GPIOH_3, 0),
- MESON_PIN(GPIOH_4, 0),
- MESON_PIN(GPIOH_5, 0),
- MESON_PIN(GPIOH_6, 0),
- MESON_PIN(GPIOH_7, 0),
- MESON_PIN(GPIOH_8, 0),
- MESON_PIN(GPIOH_9, 0),
-
- MESON_PIN(CARD_0, 0),
- MESON_PIN(CARD_1, 0),
- MESON_PIN(CARD_2, 0),
- MESON_PIN(CARD_3, 0),
- MESON_PIN(CARD_4, 0),
- MESON_PIN(CARD_5, 0),
- MESON_PIN(CARD_6, 0),
-
- MESON_PIN(BOOT_0, 0),
- MESON_PIN(BOOT_1, 0),
- MESON_PIN(BOOT_2, 0),
- MESON_PIN(BOOT_3, 0),
- MESON_PIN(BOOT_4, 0),
- MESON_PIN(BOOT_5, 0),
- MESON_PIN(BOOT_6, 0),
- MESON_PIN(BOOT_7, 0),
- MESON_PIN(BOOT_8, 0),
- MESON_PIN(BOOT_9, 0),
- MESON_PIN(BOOT_10, 0),
- MESON_PIN(BOOT_11, 0),
- MESON_PIN(BOOT_12, 0),
- MESON_PIN(BOOT_13, 0),
- MESON_PIN(BOOT_14, 0),
- MESON_PIN(BOOT_15, 0),
- MESON_PIN(BOOT_16, 0),
- MESON_PIN(BOOT_17, 0),
- MESON_PIN(BOOT_18, 0),
-
- MESON_PIN(DIF_0_P, 0),
- MESON_PIN(DIF_0_N, 0),
- MESON_PIN(DIF_1_P, 0),
- MESON_PIN(DIF_1_N, 0),
- MESON_PIN(DIF_2_P, 0),
- MESON_PIN(DIF_2_N, 0),
- MESON_PIN(DIF_3_P, 0),
- MESON_PIN(DIF_3_N, 0),
- MESON_PIN(DIF_4_P, 0),
- MESON_PIN(DIF_4_N, 0),
+ MESON_PIN(GPIOX_0),
+ MESON_PIN(GPIOX_1),
+ MESON_PIN(GPIOX_2),
+ MESON_PIN(GPIOX_3),
+ MESON_PIN(GPIOX_4),
+ MESON_PIN(GPIOX_5),
+ MESON_PIN(GPIOX_6),
+ MESON_PIN(GPIOX_7),
+ MESON_PIN(GPIOX_8),
+ MESON_PIN(GPIOX_9),
+ MESON_PIN(GPIOX_10),
+ MESON_PIN(GPIOX_11),
+ MESON_PIN(GPIOX_16),
+ MESON_PIN(GPIOX_17),
+ MESON_PIN(GPIOX_18),
+ MESON_PIN(GPIOX_19),
+ MESON_PIN(GPIOX_20),
+ MESON_PIN(GPIOX_21),
+
+ MESON_PIN(GPIOY_0),
+ MESON_PIN(GPIOY_1),
+ MESON_PIN(GPIOY_3),
+ MESON_PIN(GPIOY_6),
+ MESON_PIN(GPIOY_7),
+ MESON_PIN(GPIOY_8),
+ MESON_PIN(GPIOY_9),
+ MESON_PIN(GPIOY_10),
+ MESON_PIN(GPIOY_11),
+ MESON_PIN(GPIOY_12),
+ MESON_PIN(GPIOY_13),
+ MESON_PIN(GPIOY_14),
+
+ MESON_PIN(GPIODV_9),
+ MESON_PIN(GPIODV_24),
+ MESON_PIN(GPIODV_25),
+ MESON_PIN(GPIODV_26),
+ MESON_PIN(GPIODV_27),
+ MESON_PIN(GPIODV_28),
+ MESON_PIN(GPIODV_29),
+
+ MESON_PIN(GPIOH_0),
+ MESON_PIN(GPIOH_1),
+ MESON_PIN(GPIOH_2),
+ MESON_PIN(GPIOH_3),
+ MESON_PIN(GPIOH_4),
+ MESON_PIN(GPIOH_5),
+ MESON_PIN(GPIOH_6),
+ MESON_PIN(GPIOH_7),
+ MESON_PIN(GPIOH_8),
+ MESON_PIN(GPIOH_9),
+
+ MESON_PIN(CARD_0),
+ MESON_PIN(CARD_1),
+ MESON_PIN(CARD_2),
+ MESON_PIN(CARD_3),
+ MESON_PIN(CARD_4),
+ MESON_PIN(CARD_5),
+ MESON_PIN(CARD_6),
+
+ MESON_PIN(BOOT_0),
+ MESON_PIN(BOOT_1),
+ MESON_PIN(BOOT_2),
+ MESON_PIN(BOOT_3),
+ MESON_PIN(BOOT_4),
+ MESON_PIN(BOOT_5),
+ MESON_PIN(BOOT_6),
+ MESON_PIN(BOOT_7),
+ MESON_PIN(BOOT_8),
+ MESON_PIN(BOOT_9),
+ MESON_PIN(BOOT_10),
+ MESON_PIN(BOOT_11),
+ MESON_PIN(BOOT_12),
+ MESON_PIN(BOOT_13),
+ MESON_PIN(BOOT_14),
+ MESON_PIN(BOOT_15),
+ MESON_PIN(BOOT_16),
+ MESON_PIN(BOOT_17),
+ MESON_PIN(BOOT_18),
+
+ MESON_PIN(DIF_0_P),
+ MESON_PIN(DIF_0_N),
+ MESON_PIN(DIF_1_P),
+ MESON_PIN(DIF_1_N),
+ MESON_PIN(DIF_2_P),
+ MESON_PIN(DIF_2_N),
+ MESON_PIN(DIF_3_P),
+ MESON_PIN(DIF_3_N),
+ MESON_PIN(DIF_4_P),
+ MESON_PIN(DIF_4_N),
};
static const struct pinctrl_pin_desc meson8b_aobus_pins[] = {
- MESON_PIN(GPIOAO_0, AO_OFF),
- MESON_PIN(GPIOAO_1, AO_OFF),
- MESON_PIN(GPIOAO_2, AO_OFF),
- MESON_PIN(GPIOAO_3, AO_OFF),
- MESON_PIN(GPIOAO_4, AO_OFF),
- MESON_PIN(GPIOAO_5, AO_OFF),
- MESON_PIN(GPIOAO_6, AO_OFF),
- MESON_PIN(GPIOAO_7, AO_OFF),
- MESON_PIN(GPIOAO_8, AO_OFF),
- MESON_PIN(GPIOAO_9, AO_OFF),
- MESON_PIN(GPIOAO_10, AO_OFF),
- MESON_PIN(GPIOAO_11, AO_OFF),
- MESON_PIN(GPIOAO_12, AO_OFF),
- MESON_PIN(GPIOAO_13, AO_OFF),
+ MESON_PIN(GPIOAO_0),
+ MESON_PIN(GPIOAO_1),
+ MESON_PIN(GPIOAO_2),
+ MESON_PIN(GPIOAO_3),
+ MESON_PIN(GPIOAO_4),
+ MESON_PIN(GPIOAO_5),
+ MESON_PIN(GPIOAO_6),
+ MESON_PIN(GPIOAO_7),
+ MESON_PIN(GPIOAO_8),
+ MESON_PIN(GPIOAO_9),
+ MESON_PIN(GPIOAO_10),
+ MESON_PIN(GPIOAO_11),
+ MESON_PIN(GPIOAO_12),
+ MESON_PIN(GPIOAO_13),
/*
* The following 2 pins are not mentionned in the public datasheet
* According to this datasheet, they can't be used with the gpio
* interrupt controller
*/
- MESON_PIN(GPIO_BSD_EN, AO_OFF),
- MESON_PIN(GPIO_TEST_N, AO_OFF),
+ MESON_PIN(GPIO_BSD_EN),
+ MESON_PIN(GPIO_TEST_N),
};
/* bank X */
-static const unsigned int sd_d0_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sd_d1_a_pins[] = { PIN(GPIOX_1, 0) };
-static const unsigned int sd_d2_a_pins[] = { PIN(GPIOX_2, 0) };
-static const unsigned int sd_d3_a_pins[] = { PIN(GPIOX_3, 0) };
-static const unsigned int sdxc_d0_0_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int sdxc_d47_a_pins[] = { PIN(GPIOX_4, 0), PIN(GPIOX_5, 0),
- PIN(GPIOX_6, 0), PIN(GPIOX_7, 0) };
-static const unsigned int sdxc_d13_0_a_pins[] = { PIN(GPIOX_5, 0), PIN(GPIOX_6, 0),
- PIN(GPIOX_7, 0) };
-static const unsigned int sd_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sd_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
-static const unsigned int uart_tx_b0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int uart_rx_b0_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int uart_cts_b0_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int uart_rts_b0_pins[] = { PIN(GPIOX_19, 0) };
-
-static const unsigned int sdxc_d0_1_a_pins[] = { PIN(GPIOX_0, 0) };
-static const unsigned int sdxc_d13_1_a_pins[] = { PIN(GPIOX_1, 0), PIN(GPIOX_2, 0),
- PIN(GPIOX_3, 0) };
-static const unsigned int pcm_out_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int pcm_in_a_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int pcm_fs_a_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int pcm_clk_a_pins[] = { PIN(GPIOX_7, 0) };
-static const unsigned int sdxc_clk_a_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int sdxc_cmd_a_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int pwm_vs_0_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_e_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_vs_1_pins[] = { PIN(GPIOX_11, 0) };
-
-static const unsigned int uart_tx_a_pins[] = { PIN(GPIOX_4, 0) };
-static const unsigned int uart_rx_a_pins[] = { PIN(GPIOX_5, 0) };
-static const unsigned int uart_cts_a_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int uart_rts_a_pins[] = { PIN(GPIOX_7, 0) };
-static const unsigned int uart_tx_b1_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int uart_rx_b1_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int uart_cts_b1_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int uart_rts_b1_pins[] = { PIN(GPIOX_20, 0) };
-
-static const unsigned int iso7816_0_clk_pins[] = { PIN(GPIOX_6, 0) };
-static const unsigned int iso7816_0_data_pins[] = { PIN(GPIOX_7, 0) };
-static const unsigned int spi_sclk_0_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int spi_miso_0_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int spi_mosi_0_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int iso7816_det_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int iso7816_reset_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int iso7816_1_clk_pins[] = { PIN(GPIOX_18, 0) };
-static const unsigned int iso7816_1_data_pins[] = { PIN(GPIOX_19, 0) };
-static const unsigned int spi_ss0_0_pins[] = { PIN(GPIOX_20, 0) };
-
-static const unsigned int tsin_clk_b_pins[] = { PIN(GPIOX_8, 0) };
-static const unsigned int tsin_sop_b_pins[] = { PIN(GPIOX_9, 0) };
-static const unsigned int tsin_d0_b_pins[] = { PIN(GPIOX_10, 0) };
-static const unsigned int pwm_b_pins[] = { PIN(GPIOX_11, 0) };
-static const unsigned int i2c_sda_d0_pins[] = { PIN(GPIOX_16, 0) };
-static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
-static const unsigned int tsin_d_valid_b_pins[] = { PIN(GPIOX_20, 0) };
+static const unsigned int sd_d0_a_pins[] = { GPIOX_0 };
+static const unsigned int sd_d1_a_pins[] = { GPIOX_1 };
+static const unsigned int sd_d2_a_pins[] = { GPIOX_2 };
+static const unsigned int sd_d3_a_pins[] = { GPIOX_3 };
+static const unsigned int sdxc_d0_0_a_pins[] = { GPIOX_4 };
+static const unsigned int sdxc_d47_a_pins[] = { GPIOX_4, GPIOX_5,
+ GPIOX_6, GPIOX_7 };
+static const unsigned int sdxc_d13_0_a_pins[] = { GPIOX_5, GPIOX_6,
+ GPIOX_7 };
+static const unsigned int sd_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sd_cmd_a_pins[] = { GPIOX_9 };
+static const unsigned int xtal_32k_out_pins[] = { GPIOX_10 };
+static const unsigned int xtal_24m_out_pins[] = { GPIOX_11 };
+static const unsigned int uart_tx_b0_pins[] = { GPIOX_16 };
+static const unsigned int uart_rx_b0_pins[] = { GPIOX_17 };
+static const unsigned int uart_cts_b0_pins[] = { GPIOX_18 };
+static const unsigned int uart_rts_b0_pins[] = { GPIOX_19 };
+
+static const unsigned int sdxc_d0_1_a_pins[] = { GPIOX_0 };
+static const unsigned int sdxc_d13_1_a_pins[] = { GPIOX_1, GPIOX_2,
+ GPIOX_3 };
+static const unsigned int pcm_out_a_pins[] = { GPIOX_4 };
+static const unsigned int pcm_in_a_pins[] = { GPIOX_5 };
+static const unsigned int pcm_fs_a_pins[] = { GPIOX_6 };
+static const unsigned int pcm_clk_a_pins[] = { GPIOX_7 };
+static const unsigned int sdxc_clk_a_pins[] = { GPIOX_8 };
+static const unsigned int sdxc_cmd_a_pins[] = { GPIOX_9 };
+static const unsigned int pwm_vs_0_pins[] = { GPIOX_10 };
+static const unsigned int pwm_e_pins[] = { GPIOX_10 };
+static const unsigned int pwm_vs_1_pins[] = { GPIOX_11 };
+
+static const unsigned int uart_tx_a_pins[] = { GPIOX_4 };
+static const unsigned int uart_rx_a_pins[] = { GPIOX_5 };
+static const unsigned int uart_cts_a_pins[] = { GPIOX_6 };
+static const unsigned int uart_rts_a_pins[] = { GPIOX_7 };
+static const unsigned int uart_tx_b1_pins[] = { GPIOX_8 };
+static const unsigned int uart_rx_b1_pins[] = { GPIOX_9 };
+static const unsigned int uart_cts_b1_pins[] = { GPIOX_10 };
+static const unsigned int uart_rts_b1_pins[] = { GPIOX_20 };
+
+static const unsigned int iso7816_0_clk_pins[] = { GPIOX_6 };
+static const unsigned int iso7816_0_data_pins[] = { GPIOX_7 };
+static const unsigned int spi_sclk_0_pins[] = { GPIOX_8 };
+static const unsigned int spi_miso_0_pins[] = { GPIOX_9 };
+static const unsigned int spi_mosi_0_pins[] = { GPIOX_10 };
+static const unsigned int iso7816_det_pins[] = { GPIOX_16 };
+static const unsigned int iso7816_reset_pins[] = { GPIOX_17 };
+static const unsigned int iso7816_1_clk_pins[] = { GPIOX_18 };
+static const unsigned int iso7816_1_data_pins[] = { GPIOX_19 };
+static const unsigned int spi_ss0_0_pins[] = { GPIOX_20 };
+
+static const unsigned int tsin_clk_b_pins[] = { GPIOX_8 };
+static const unsigned int tsin_sop_b_pins[] = { GPIOX_9 };
+static const unsigned int tsin_d0_b_pins[] = { GPIOX_10 };
+static const unsigned int pwm_b_pins[] = { GPIOX_11 };
+static const unsigned int i2c_sda_d0_pins[] = { GPIOX_16 };
+static const unsigned int i2c_sck_d0_pins[] = { GPIOX_17 };
+static const unsigned int tsin_d_valid_b_pins[] = { GPIOX_20 };
/* bank Y */
-static const unsigned int tsin_d_valid_a_pins[] = { PIN(GPIOY_0, 0) };
-static const unsigned int tsin_sop_a_pins[] = { PIN(GPIOY_1, 0) };
-static const unsigned int tsin_d17_a_pins[] = { PIN(GPIOY_6, 0), PIN(GPIOY_7, 0),
- PIN(GPIOY_10, 0), PIN(GPIOY_11, 0),
- PIN(GPIOY_12, 0), PIN(GPIOY_13, 0),
- PIN(GPIOY_14, 0) };
-static const unsigned int tsin_clk_a_pins[] = { PIN(GPIOY_8, 0) };
-static const unsigned int tsin_d0_a_pins[] = { PIN(GPIOY_9, 0) };
+static const unsigned int tsin_d_valid_a_pins[] = { GPIOY_0 };
+static const unsigned int tsin_sop_a_pins[] = { GPIOY_1 };
+static const unsigned int tsin_d17_a_pins[] = {
+ GPIOY_6, GPIOY_7, GPIOY_10, GPIOY_11, GPIOY_12, GPIOY_13, GPIOY_14,
+};
+static const unsigned int tsin_clk_a_pins[] = { GPIOY_8 };
+static const unsigned int tsin_d0_a_pins[] = { GPIOY_9 };
-static const unsigned int spdif_out_0_pins[] = { PIN(GPIOY_3, 0) };
+static const unsigned int spdif_out_0_pins[] = { GPIOY_3 };
-static const unsigned int xtal_24m_pins[] = { PIN(GPIOY_3, 0) };
-static const unsigned int iso7816_2_clk_pins[] = { PIN(GPIOY_13, 0) };
-static const unsigned int iso7816_2_data_pins[] = { PIN(GPIOY_14, 0) };
+static const unsigned int xtal_24m_pins[] = { GPIOY_3 };
+static const unsigned int iso7816_2_clk_pins[] = { GPIOY_13 };
+static const unsigned int iso7816_2_data_pins[] = { GPIOY_14 };
/* bank DV */
-static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, 0) };
-static const unsigned int pwm_c0_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int pwm_d_pins[] = { GPIODV_28 };
+static const unsigned int pwm_c0_pins[] = { GPIODV_29 };
-static const unsigned int pwm_vs_2_pins[] = { PIN(GPIODV_9, 0) };
-static const unsigned int pwm_vs_3_pins[] = { PIN(GPIODV_28, 0) };
-static const unsigned int pwm_vs_4_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int pwm_vs_2_pins[] = { GPIODV_9 };
+static const unsigned int pwm_vs_3_pins[] = { GPIODV_28 };
+static const unsigned int pwm_vs_4_pins[] = { GPIODV_29 };
-static const unsigned int xtal24_out_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int xtal24_out_pins[] = { GPIODV_29 };
-static const unsigned int uart_tx_c_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int uart_rx_c_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int uart_cts_c_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int uart_rts_c_pins[] = { PIN(GPIODV_27, 0) };
+static const unsigned int uart_tx_c_pins[] = { GPIODV_24 };
+static const unsigned int uart_rx_c_pins[] = { GPIODV_25 };
+static const unsigned int uart_cts_c_pins[] = { GPIODV_26 };
+static const unsigned int uart_rts_c_pins[] = { GPIODV_27 };
-static const unsigned int pwm_c1_pins[] = { PIN(GPIODV_9, 0) };
+static const unsigned int pwm_c1_pins[] = { GPIODV_9 };
-static const unsigned int i2c_sda_a_pins[] = { PIN(GPIODV_24, 0) };
-static const unsigned int i2c_sck_a_pins[] = { PIN(GPIODV_25, 0) };
-static const unsigned int i2c_sda_b0_pins[] = { PIN(GPIODV_26, 0) };
-static const unsigned int i2c_sck_b0_pins[] = { PIN(GPIODV_27, 0) };
-static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIODV_28, 0) };
-static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+static const unsigned int i2c_sda_b0_pins[] = { GPIODV_26 };
+static const unsigned int i2c_sck_b0_pins[] = { GPIODV_27 };
+static const unsigned int i2c_sda_c0_pins[] = { GPIODV_28 };
+static const unsigned int i2c_sck_c0_pins[] = { GPIODV_29 };
/* bank H */
-static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
-static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
-static const unsigned int hdmi_scl_pins[] = { PIN(GPIOH_2, 0) };
-static const unsigned int hdmi_cec_0_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int eth_txd1_0_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int eth_txd0_0_pins[] = { PIN(GPIOH_6, 0) };
-static const unsigned int clk_24m_out_pins[] = { PIN(GPIOH_9, 0) };
-
-static const unsigned int spi_ss1_pins[] = { PIN(GPIOH_0, 0) };
-static const unsigned int spi_ss2_pins[] = { PIN(GPIOH_1, 0) };
-static const unsigned int spi_ss0_1_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int spi_miso_1_pins[] = { PIN(GPIOH_4, 0) };
-static const unsigned int spi_mosi_1_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int spi_sclk_1_pins[] = { PIN(GPIOH_6, 0) };
-
-static const unsigned int eth_txd3_pins[] = { PIN(GPIOH_7, 0) };
-static const unsigned int eth_txd2_pins[] = { PIN(GPIOH_8, 0) };
-static const unsigned int eth_tx_clk_pins[] = { PIN(GPIOH_9, 0) };
-
-static const unsigned int i2c_sda_b1_pins[] = { PIN(GPIOH_3, 0) };
-static const unsigned int i2c_sck_b1_pins[] = { PIN(GPIOH_4, 0) };
-static const unsigned int i2c_sda_c1_pins[] = { PIN(GPIOH_5, 0) };
-static const unsigned int i2c_sck_c1_pins[] = { PIN(GPIOH_6, 0) };
-static const unsigned int i2c_sda_d1_pins[] = { PIN(GPIOH_7, 0) };
-static const unsigned int i2c_sck_d1_pins[] = { PIN(GPIOH_8, 0) };
+static const unsigned int hdmi_hpd_pins[] = { GPIOH_0 };
+static const unsigned int hdmi_sda_pins[] = { GPIOH_1 };
+static const unsigned int hdmi_scl_pins[] = { GPIOH_2 };
+static const unsigned int hdmi_cec_0_pins[] = { GPIOH_3 };
+static const unsigned int eth_txd1_0_pins[] = { GPIOH_5 };
+static const unsigned int eth_txd0_0_pins[] = { GPIOH_6 };
+static const unsigned int clk_24m_out_pins[] = { GPIOH_9 };
+
+static const unsigned int spi_ss1_pins[] = { GPIOH_0 };
+static const unsigned int spi_ss2_pins[] = { GPIOH_1 };
+static const unsigned int spi_ss0_1_pins[] = { GPIOH_3 };
+static const unsigned int spi_miso_1_pins[] = { GPIOH_4 };
+static const unsigned int spi_mosi_1_pins[] = { GPIOH_5 };
+static const unsigned int spi_sclk_1_pins[] = { GPIOH_6 };
+
+static const unsigned int eth_txd3_pins[] = { GPIOH_7 };
+static const unsigned int eth_txd2_pins[] = { GPIOH_8 };
+static const unsigned int eth_tx_clk_pins[] = { GPIOH_9 };
+
+static const unsigned int i2c_sda_b1_pins[] = { GPIOH_3 };
+static const unsigned int i2c_sck_b1_pins[] = { GPIOH_4 };
+static const unsigned int i2c_sda_c1_pins[] = { GPIOH_5 };
+static const unsigned int i2c_sck_c1_pins[] = { GPIOH_6 };
+static const unsigned int i2c_sda_d1_pins[] = { GPIOH_7 };
+static const unsigned int i2c_sck_d1_pins[] = { GPIOH_8 };
/* bank BOOT */
-static const unsigned int nand_io_pins[] = { PIN(BOOT_0, 0), PIN(BOOT_1, 0),
- PIN(BOOT_2, 0), PIN(BOOT_3, 0),
- PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int nand_io_ce0_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int nand_io_ce1_pins[] = { PIN(BOOT_9, 0) };
-static const unsigned int nand_io_rb0_pins[] = { PIN(BOOT_10, 0) };
-static const unsigned int nand_ale_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nand_cle_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nand_wen_clk_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nand_ren_clk_pins[] = { PIN(BOOT_14, 0) };
-static const unsigned int nand_dqs_15_pins[] = { PIN(BOOT_15, 0) };
-static const unsigned int nand_dqs_18_pins[] = { PIN(BOOT_18, 0) };
-
-static const unsigned int sdxc_d0_c_pins[] = { PIN(BOOT_0, 0)};
-static const unsigned int sdxc_d13_c_pins[] = { PIN(BOOT_1, 0), PIN(BOOT_2, 0),
- PIN(BOOT_3, 0) };
-static const unsigned int sdxc_d47_c_pins[] = { PIN(BOOT_4, 0), PIN(BOOT_5, 0),
- PIN(BOOT_6, 0), PIN(BOOT_7, 0) };
-static const unsigned int sdxc_clk_c_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int sdxc_cmd_c_pins[] = { PIN(BOOT_10, 0) };
-static const unsigned int nor_d_pins[] = { PIN(BOOT_11, 0) };
-static const unsigned int nor_q_pins[] = { PIN(BOOT_12, 0) };
-static const unsigned int nor_c_pins[] = { PIN(BOOT_13, 0) };
-static const unsigned int nor_cs_pins[] = { PIN(BOOT_18, 0) };
-
-static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
-static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
-static const unsigned int sd_d2_c_pins[] = { PIN(BOOT_2, 0) };
-static const unsigned int sd_d3_c_pins[] = { PIN(BOOT_3, 0) };
-static const unsigned int sd_cmd_c_pins[] = { PIN(BOOT_8, 0) };
-static const unsigned int sd_clk_c_pins[] = { PIN(BOOT_10, 0) };
+static const unsigned int nand_io_pins[] = {
+ BOOT_0, BOOT_1, BOOT_2, BOOT_3, BOOT_4, BOOT_5, BOOT_6, BOOT_7
+};
+static const unsigned int nand_io_ce0_pins[] = { BOOT_8 };
+static const unsigned int nand_io_ce1_pins[] = { BOOT_9 };
+static const unsigned int nand_io_rb0_pins[] = { BOOT_10 };
+static const unsigned int nand_ale_pins[] = { BOOT_11 };
+static const unsigned int nand_cle_pins[] = { BOOT_12 };
+static const unsigned int nand_wen_clk_pins[] = { BOOT_13 };
+static const unsigned int nand_ren_clk_pins[] = { BOOT_14 };
+static const unsigned int nand_dqs_15_pins[] = { BOOT_15 };
+static const unsigned int nand_dqs_18_pins[] = { BOOT_18 };
+
+static const unsigned int sdxc_d0_c_pins[] = { BOOT_0};
+static const unsigned int sdxc_d13_c_pins[] = { BOOT_1, BOOT_2,
+ BOOT_3 };
+static const unsigned int sdxc_d47_c_pins[] = { BOOT_4, BOOT_5,
+ BOOT_6, BOOT_7 };
+static const unsigned int sdxc_clk_c_pins[] = { BOOT_8 };
+static const unsigned int sdxc_cmd_c_pins[] = { BOOT_10 };
+static const unsigned int nor_d_pins[] = { BOOT_11 };
+static const unsigned int nor_q_pins[] = { BOOT_12 };
+static const unsigned int nor_c_pins[] = { BOOT_13 };
+static const unsigned int nor_cs_pins[] = { BOOT_18 };
+
+static const unsigned int sd_d0_c_pins[] = { BOOT_0 };
+static const unsigned int sd_d1_c_pins[] = { BOOT_1 };
+static const unsigned int sd_d2_c_pins[] = { BOOT_2 };
+static const unsigned int sd_d3_c_pins[] = { BOOT_3 };
+static const unsigned int sd_cmd_c_pins[] = { BOOT_8 };
+static const unsigned int sd_clk_c_pins[] = { BOOT_10 };
/* bank CARD */
-static const unsigned int sd_d1_b_pins[] = { PIN(CARD_0, 0) };
-static const unsigned int sd_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sd_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sd_cmd_b_pins[] = { PIN(CARD_3, 0) };
-static const unsigned int sd_d3_b_pins[] = { PIN(CARD_4, 0) };
-static const unsigned int sd_d2_b_pins[] = { PIN(CARD_5, 0) };
-
-static const unsigned int sdxc_d13_b_pins[] = { PIN(CARD_0, 0), PIN(CARD_4, 0),
- PIN(CARD_5, 0) };
-static const unsigned int sdxc_d0_b_pins[] = { PIN(CARD_1, 0) };
-static const unsigned int sdxc_clk_b_pins[] = { PIN(CARD_2, 0) };
-static const unsigned int sdxc_cmd_b_pins[] = { PIN(CARD_3, 0) };
+static const unsigned int sd_d1_b_pins[] = { CARD_0 };
+static const unsigned int sd_d0_b_pins[] = { CARD_1 };
+static const unsigned int sd_clk_b_pins[] = { CARD_2 };
+static const unsigned int sd_cmd_b_pins[] = { CARD_3 };
+static const unsigned int sd_d3_b_pins[] = { CARD_4 };
+static const unsigned int sd_d2_b_pins[] = { CARD_5 };
+
+static const unsigned int sdxc_d13_b_pins[] = { CARD_0, CARD_4,
+ CARD_5 };
+static const unsigned int sdxc_d0_b_pins[] = { CARD_1 };
+static const unsigned int sdxc_clk_b_pins[] = { CARD_2 };
+static const unsigned int sdxc_cmd_b_pins[] = { CARD_3 };
/* bank AO */
-static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
-static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int clk_32k_in_out_pins[] = { PIN(GPIOAO_6, AO_OFF) };
-static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int hdmi_cec_1_pins[] = { PIN(GPIOAO_12, AO_OFF) };
-static const unsigned int ir_blaster_pins[] = { PIN(GPIOAO_13, AO_OFF) };
-
-static const unsigned int pwm_c2_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int i2c_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int i2c_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int ir_remote_out_pins[] = { PIN(GPIOAO_7, AO_OFF) };
-static const unsigned int i2s_am_clk_out_pins[] = { PIN(GPIOAO_8, AO_OFF) };
-static const unsigned int i2s_ao_clk_out_pins[] = { PIN(GPIOAO_9, AO_OFF) };
-static const unsigned int i2s_lr_clk_out_pins[] = { PIN(GPIOAO_10, AO_OFF) };
-static const unsigned int i2s_out_01_pins[] = { PIN(GPIOAO_11, AO_OFF) };
-
-static const unsigned int uart_tx_ao_b0_pins[] = { PIN(GPIOAO_0, AO_OFF) };
-static const unsigned int uart_rx_ao_b0_pins[] = { PIN(GPIOAO_1, AO_OFF) };
-static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, AO_OFF) };
-static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, AO_OFF) };
-static const unsigned int uart_tx_ao_b1_pins[] = { PIN(GPIOAO_4, AO_OFF) };
-static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static const unsigned int spdif_out_1_pins[] = { PIN(GPIOAO_6, AO_OFF) };
-
-static const unsigned int i2s_in_ch01_pins[] = { PIN(GPIOAO_6, AO_OFF) };
-static const unsigned int i2s_ao_clk_in_pins[] = { PIN(GPIOAO_9, AO_OFF) };
-static const unsigned int i2s_lr_clk_in_pins[] = { PIN(GPIOAO_10, AO_OFF) };
+static const unsigned int uart_tx_ao_a_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_a_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_a_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_a_pins[] = { GPIOAO_3 };
+static const unsigned int i2c_mst_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_mst_sda_ao_pins[] = { GPIOAO_5 };
+static const unsigned int clk_32k_in_out_pins[] = { GPIOAO_6 };
+static const unsigned int remote_input_pins[] = { GPIOAO_7 };
+static const unsigned int hdmi_cec_1_pins[] = { GPIOAO_12 };
+static const unsigned int ir_blaster_pins[] = { GPIOAO_13 };
+
+static const unsigned int pwm_c2_pins[] = { GPIOAO_3 };
+static const unsigned int i2c_sck_ao_pins[] = { GPIOAO_4 };
+static const unsigned int i2c_sda_ao_pins[] = { GPIOAO_5 };
+static const unsigned int ir_remote_out_pins[] = { GPIOAO_7 };
+static const unsigned int i2s_am_clk_out_pins[] = { GPIOAO_8 };
+static const unsigned int i2s_ao_clk_out_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_lr_clk_out_pins[] = { GPIOAO_10 };
+static const unsigned int i2s_out_01_pins[] = { GPIOAO_11 };
+
+static const unsigned int uart_tx_ao_b0_pins[] = { GPIOAO_0 };
+static const unsigned int uart_rx_ao_b0_pins[] = { GPIOAO_1 };
+static const unsigned int uart_cts_ao_b_pins[] = { GPIOAO_2 };
+static const unsigned int uart_rts_ao_b_pins[] = { GPIOAO_3 };
+static const unsigned int uart_tx_ao_b1_pins[] = { GPIOAO_4 };
+static const unsigned int uart_rx_ao_b1_pins[] = { GPIOAO_5 };
+static const unsigned int spdif_out_1_pins[] = { GPIOAO_6 };
+
+static const unsigned int i2s_in_ch01_pins[] = { GPIOAO_6 };
+static const unsigned int i2s_ao_clk_in_pins[] = { GPIOAO_9 };
+static const unsigned int i2s_lr_clk_in_pins[] = { GPIOAO_10 };
/* bank DIF */
-static const unsigned int eth_rxd1_pins[] = { PIN(DIF_0_P, 0) };
-static const unsigned int eth_rxd0_pins[] = { PIN(DIF_0_N, 0) };
-static const unsigned int eth_rx_dv_pins[] = { PIN(DIF_1_P, 0) };
-static const unsigned int eth_rx_clk_pins[] = { PIN(DIF_1_N, 0) };
-static const unsigned int eth_txd0_1_pins[] = { PIN(DIF_2_P, 0) };
-static const unsigned int eth_txd1_1_pins[] = { PIN(DIF_2_N, 0) };
-static const unsigned int eth_tx_en_pins[] = { PIN(DIF_3_P, 0) };
-static const unsigned int eth_ref_clk_pins[] = { PIN(DIF_3_N, 0) };
-static const unsigned int eth_mdc_pins[] = { PIN(DIF_4_P, 0) };
-static const unsigned int eth_mdio_en_pins[] = { PIN(DIF_4_N, 0) };
+static const unsigned int eth_rxd1_pins[] = { DIF_0_P };
+static const unsigned int eth_rxd0_pins[] = { DIF_0_N };
+static const unsigned int eth_rx_dv_pins[] = { DIF_1_P };
+static const unsigned int eth_rx_clk_pins[] = { DIF_1_N };
+static const unsigned int eth_txd0_1_pins[] = { DIF_2_P };
+static const unsigned int eth_txd1_1_pins[] = { DIF_2_N };
+static const unsigned int eth_tx_en_pins[] = { DIF_3_P };
+static const unsigned int eth_ref_clk_pins[] = { DIF_3_N };
+static const unsigned int eth_mdc_pins[] = { DIF_4_P };
+static const unsigned int eth_mdio_en_pins[] = { DIF_4_N };
static struct meson_pmx_group meson8b_cbus_groups[] = {
- GPIO_GROUP(GPIOX_0, 0),
- GPIO_GROUP(GPIOX_1, 0),
- GPIO_GROUP(GPIOX_2, 0),
- GPIO_GROUP(GPIOX_3, 0),
- GPIO_GROUP(GPIOX_4, 0),
- GPIO_GROUP(GPIOX_5, 0),
- GPIO_GROUP(GPIOX_6, 0),
- GPIO_GROUP(GPIOX_7, 0),
- GPIO_GROUP(GPIOX_8, 0),
- GPIO_GROUP(GPIOX_9, 0),
- GPIO_GROUP(GPIOX_10, 0),
- GPIO_GROUP(GPIOX_11, 0),
- GPIO_GROUP(GPIOX_16, 0),
- GPIO_GROUP(GPIOX_17, 0),
- GPIO_GROUP(GPIOX_18, 0),
- GPIO_GROUP(GPIOX_19, 0),
- GPIO_GROUP(GPIOX_20, 0),
- GPIO_GROUP(GPIOX_21, 0),
-
- GPIO_GROUP(GPIOY_0, 0),
- GPIO_GROUP(GPIOY_1, 0),
- GPIO_GROUP(GPIOY_3, 0),
- GPIO_GROUP(GPIOY_6, 0),
- GPIO_GROUP(GPIOY_7, 0),
- GPIO_GROUP(GPIOY_8, 0),
- GPIO_GROUP(GPIOY_9, 0),
- GPIO_GROUP(GPIOY_10, 0),
- GPIO_GROUP(GPIOY_11, 0),
- GPIO_GROUP(GPIOY_12, 0),
- GPIO_GROUP(GPIOY_13, 0),
- GPIO_GROUP(GPIOY_14, 0),
-
- GPIO_GROUP(GPIODV_9, 0),
- GPIO_GROUP(GPIODV_24, 0),
- GPIO_GROUP(GPIODV_25, 0),
- GPIO_GROUP(GPIODV_26, 0),
- GPIO_GROUP(GPIODV_27, 0),
- GPIO_GROUP(GPIODV_28, 0),
- GPIO_GROUP(GPIODV_29, 0),
-
- GPIO_GROUP(GPIOH_0, 0),
- GPIO_GROUP(GPIOH_1, 0),
- GPIO_GROUP(GPIOH_2, 0),
- GPIO_GROUP(GPIOH_3, 0),
- GPIO_GROUP(GPIOH_4, 0),
- GPIO_GROUP(GPIOH_5, 0),
- GPIO_GROUP(GPIOH_6, 0),
- GPIO_GROUP(GPIOH_7, 0),
- GPIO_GROUP(GPIOH_8, 0),
- GPIO_GROUP(GPIOH_9, 0),
-
- GPIO_GROUP(DIF_0_P, 0),
- GPIO_GROUP(DIF_0_N, 0),
- GPIO_GROUP(DIF_1_P, 0),
- GPIO_GROUP(DIF_1_N, 0),
- GPIO_GROUP(DIF_2_P, 0),
- GPIO_GROUP(DIF_2_N, 0),
- GPIO_GROUP(DIF_3_P, 0),
- GPIO_GROUP(DIF_3_N, 0),
- GPIO_GROUP(DIF_4_P, 0),
- GPIO_GROUP(DIF_4_N, 0),
+ GPIO_GROUP(GPIOX_0),
+ GPIO_GROUP(GPIOX_1),
+ GPIO_GROUP(GPIOX_2),
+ GPIO_GROUP(GPIOX_3),
+ GPIO_GROUP(GPIOX_4),
+ GPIO_GROUP(GPIOX_5),
+ GPIO_GROUP(GPIOX_6),
+ GPIO_GROUP(GPIOX_7),
+ GPIO_GROUP(GPIOX_8),
+ GPIO_GROUP(GPIOX_9),
+ GPIO_GROUP(GPIOX_10),
+ GPIO_GROUP(GPIOX_11),
+ GPIO_GROUP(GPIOX_16),
+ GPIO_GROUP(GPIOX_17),
+ GPIO_GROUP(GPIOX_18),
+ GPIO_GROUP(GPIOX_19),
+ GPIO_GROUP(GPIOX_20),
+ GPIO_GROUP(GPIOX_21),
+
+ GPIO_GROUP(GPIOY_0),
+ GPIO_GROUP(GPIOY_1),
+ GPIO_GROUP(GPIOY_3),
+ GPIO_GROUP(GPIOY_6),
+ GPIO_GROUP(GPIOY_7),
+ GPIO_GROUP(GPIOY_8),
+ GPIO_GROUP(GPIOY_9),
+ GPIO_GROUP(GPIOY_10),
+ GPIO_GROUP(GPIOY_11),
+ GPIO_GROUP(GPIOY_12),
+ GPIO_GROUP(GPIOY_13),
+ GPIO_GROUP(GPIOY_14),
+
+ GPIO_GROUP(GPIODV_9),
+ GPIO_GROUP(GPIODV_24),
+ GPIO_GROUP(GPIODV_25),
+ GPIO_GROUP(GPIODV_26),
+ GPIO_GROUP(GPIODV_27),
+ GPIO_GROUP(GPIODV_28),
+ GPIO_GROUP(GPIODV_29),
+
+ GPIO_GROUP(GPIOH_0),
+ GPIO_GROUP(GPIOH_1),
+ GPIO_GROUP(GPIOH_2),
+ GPIO_GROUP(GPIOH_3),
+ GPIO_GROUP(GPIOH_4),
+ GPIO_GROUP(GPIOH_5),
+ GPIO_GROUP(GPIOH_6),
+ GPIO_GROUP(GPIOH_7),
+ GPIO_GROUP(GPIOH_8),
+ GPIO_GROUP(GPIOH_9),
+
+ GPIO_GROUP(DIF_0_P),
+ GPIO_GROUP(DIF_0_N),
+ GPIO_GROUP(DIF_1_P),
+ GPIO_GROUP(DIF_1_N),
+ GPIO_GROUP(DIF_2_P),
+ GPIO_GROUP(DIF_2_N),
+ GPIO_GROUP(DIF_3_P),
+ GPIO_GROUP(DIF_3_N),
+ GPIO_GROUP(DIF_4_P),
+ GPIO_GROUP(DIF_4_N),
/* bank X */
GROUP(sd_d0_a, 8, 5),
@@ -577,22 +574,22 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
};
static struct meson_pmx_group meson8b_aobus_groups[] = {
- GPIO_GROUP(GPIOAO_0, AO_OFF),
- GPIO_GROUP(GPIOAO_1, AO_OFF),
- GPIO_GROUP(GPIOAO_2, AO_OFF),
- GPIO_GROUP(GPIOAO_3, AO_OFF),
- GPIO_GROUP(GPIOAO_4, AO_OFF),
- GPIO_GROUP(GPIOAO_5, AO_OFF),
- GPIO_GROUP(GPIOAO_6, AO_OFF),
- GPIO_GROUP(GPIOAO_7, AO_OFF),
- GPIO_GROUP(GPIOAO_8, AO_OFF),
- GPIO_GROUP(GPIOAO_9, AO_OFF),
- GPIO_GROUP(GPIOAO_10, AO_OFF),
- GPIO_GROUP(GPIOAO_11, AO_OFF),
- GPIO_GROUP(GPIOAO_12, AO_OFF),
- GPIO_GROUP(GPIOAO_13, AO_OFF),
- GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
- GPIO_GROUP(GPIO_TEST_N, AO_OFF),
+ GPIO_GROUP(GPIOAO_0),
+ GPIO_GROUP(GPIOAO_1),
+ GPIO_GROUP(GPIOAO_2),
+ GPIO_GROUP(GPIOAO_3),
+ GPIO_GROUP(GPIOAO_4),
+ GPIO_GROUP(GPIOAO_5),
+ GPIO_GROUP(GPIOAO_6),
+ GPIO_GROUP(GPIOAO_7),
+ GPIO_GROUP(GPIOAO_8),
+ GPIO_GROUP(GPIOAO_9),
+ GPIO_GROUP(GPIOAO_10),
+ GPIO_GROUP(GPIOAO_11),
+ GPIO_GROUP(GPIOAO_12),
+ GPIO_GROUP(GPIOAO_13),
+ GPIO_GROUP(GPIO_BSD_EN),
+ GPIO_GROUP(GPIO_TEST_N),
/* bank AO */
GROUP(uart_tx_ao_a, 0, 12),
@@ -887,30 +884,29 @@ static struct meson_pmx_func meson8b_aobus_functions[] = {
};
static struct meson_bank meson8b_cbus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 97, 118, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
- BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_14, 0), 80, 96, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_9, 0), PIN(GPIODV_29, 0), 59, 79, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
- BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 14, 23, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
- BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 43, 49, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
- BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 24, 42, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", GPIOX_0, GPIOX_21, 97, 118, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", GPIOY_0, GPIOY_14, 80, 96, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", GPIODV_9, GPIODV_29, 59, 79, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", GPIOH_0, GPIOH_9, 14, 23, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("CARD", CARD_0, CARD_6, 43, 49, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", BOOT_0, BOOT_18, 24, 42, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
/*
* The following bank is not mentionned in the public datasheet
* There is no information whether it can be used with the gpio
* interrupt controller
*/
- BANK("DIF", PIN(DIF_0_P, 0), PIN(DIF_4_N, 0), -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
+ BANK("DIF", DIF_0_P, DIF_4_N, -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
};
static struct meson_bank meson8b_aobus_banks[] = {
- /* name first last irq pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first lastc irq pullen pull dir out in */
+ BANK("AO", GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
+static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.name = "cbus-banks",
- .pin_base = 0,
.pins = meson8b_cbus_pins,
.groups = meson8b_cbus_groups,
.funcs = meson8b_cbus_functions,
@@ -919,11 +915,11 @@ struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8b_cbus_groups),
.num_funcs = ARRAY_SIZE(meson8b_cbus_functions),
.num_banks = ARRAY_SIZE(meson8b_cbus_banks),
+ .pmx_ops = &meson8_pmx_ops,
};
-struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
+static struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.name = "aobus-banks",
- .pin_base = 130,
.pins = meson8b_aobus_pins,
.groups = meson8b_aobus_groups,
.funcs = meson8b_aobus_functions,
@@ -932,4 +928,26 @@ struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
.num_groups = ARRAY_SIZE(meson8b_aobus_groups),
.num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
.num_banks = ARRAY_SIZE(meson8b_aobus_banks),
+ .pmx_ops = &meson8_pmx_ops,
+};
+
+static const struct of_device_id meson8b_pinctrl_dt_match[] = {
+ {
+ .compatible = "amlogic,meson8b-cbus-pinctrl",
+ .data = &meson8b_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8b-aobus-pinctrl",
+ .data = &meson8b_aobus_pinctrl_data,
+ },
+ { },
+};
+
+static struct platform_driver meson8b_pinctrl_driver = {
+ .probe = meson_pinctrl_probe,
+ .driver = {
+ .name = "meson8b-pinctrl",
+ .of_match_table = meson8b_pinctrl_dt_match,
+ },
};
+builtin_platform_driver(meson8b_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/Makefile b/drivers/pinctrl/mvebu/Makefile
index 5b03fd55e28d..cd082dca4482 100644
--- a/drivers/pinctrl/mvebu/Makefile
+++ b/drivers/pinctrl/mvebu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PINCTRL_MVEBU) += pinctrl-mvebu.o
obj-$(CONFIG_PINCTRL_DOVE) += pinctrl-dove.o
obj-$(CONFIG_PINCTRL_KIRKWOOD) += pinctrl-kirkwood.o
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 71b944748304..d45af31b86b4 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -576,6 +576,19 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_FALLING:
val |= (BIT(d->hwirq % GPIO_PER_REG));
break;
+ case IRQ_TYPE_EDGE_BOTH: {
+ u32 in_val, in_reg = INPUT_VAL;
+
+ armada_37xx_irq_update_reg(&in_reg, d);
+ regmap_read(info->regmap, in_reg, &in_val);
+
+ /* Set initial polarity based on current input level. */
+ if (in_val & d->mask)
+ val |= d->mask; /* falling */
+ else
+ val &= ~d->mask; /* rising */
+ break;
+ }
default:
spin_unlock_irqrestore(&info->irq_lock, flags);
return -EINVAL;
@@ -586,13 +599,47 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
+ u32 pin_idx)
+{
+ u32 reg_idx = pin_idx / GPIO_PER_REG;
+ u32 bit_num = pin_idx % GPIO_PER_REG;
+ u32 p, l, ret;
+ unsigned long flags;
+
+ regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
+
+ spin_lock_irqsave(&info->irq_lock, flags);
+ p = readl(info->base + IRQ_POL + 4 * reg_idx);
+ if ((p ^ l) & (1 << bit_num)) {
+ /*
+ * For the gpios which are used for both-edge irqs, when their
+ * interrupts happen, their input levels are changed,
+ * yet their interrupt polarities are kept in old values, we
+ * should synchronize their interrupt polarities; for example,
+ * at first a gpio's input level is low and its interrupt
+ * polarity control is "Detect rising edge", then the gpio has
+ * a interrupt , its level turns to high, we should change its
+ * polarity control to "Detect falling edge" correspondingly.
+ */
+ p ^= 1 << bit_num;
+ writel(p, info->base + IRQ_POL + 4 * reg_idx);
+ ret = 0;
+ } else {
+ /* Spurious irq */
+ ret = -1;
+ }
+
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+ return ret;
+}
static void armada_37xx_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct armada_37xx_pinctrl *info = gpiochip_get_data(gc);
- struct irq_domain *d = gc->irqdomain;
+ struct irq_domain *d = gc->irq.domain;
int i;
chained_irq_enter(chip, desc);
@@ -609,6 +656,23 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
u32 hwirq = ffs(status) - 1;
u32 virq = irq_find_mapping(d, hwirq +
i * GPIO_PER_REG);
+ u32 t = irq_get_trigger_type(virq);
+
+ if ((t & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ /* Swap polarity (race with GPIO line) */
+ if (armada_37xx_edge_both_irq_swap_pol(info,
+ hwirq + i * GPIO_PER_REG)) {
+ /*
+ * For spurious irq, which gpio level
+ * is not as expected after incoming
+ * edge, just ack the gpio irq.
+ */
+ writel(1 << hwirq,
+ info->base +
+ IRQ_STATUS + 4 * i);
+ continue;
+ }
+ }
generic_handle_irq(virq);
@@ -626,15 +690,13 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
static unsigned int armada_37xx_irq_startup(struct irq_data *d)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- int irq = d->hwirq - chip->irq_base;
/*
* The mask field is a "precomputed bitmask for accessing the
* chip registers" which was introduced for the generic
* irqchip framework. As we don't use this framework, we can
* reuse this field for our own usage.
*/
- d->mask = BIT(irq % GPIO_PER_REG);
+ d->mask = BIT(d->hwirq % GPIO_PER_REG);
armada_37xx_irq_unmask(d);
diff --git a/drivers/pinctrl/nomadik/Makefile b/drivers/pinctrl/nomadik/Makefile
index 30b27f18cd52..bf8b7517ee4a 100644
--- a/drivers/pinctrl/nomadik/Makefile
+++ b/drivers/pinctrl/nomadik/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Nomadik family pin control drivers
obj-$(CONFIG_PINCTRL_ABX500) += pinctrl-abx500.o
obj-$(CONFIG_PINCTRL_AB8500) += pinctrl-ab8500.o
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.h b/drivers/pinctrl/nomadik/pinctrl-abx500.h
index 2beef3bfe9ca..43f9b718a8ef 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.h
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef PINCTRL_PINCTRL_ABx500_H
#define PINCTRL_PINCTRL_ABx500_H
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index af4814479eb0..726c0b5501fa 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-nomadik.h"
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
index 2860eafd1b42..ae3ac7b799a6 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-nomadik.h"
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index e852048c4c04..7e814764da7d 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-nomadik.h"
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index a53f1a9b1ed2..f0e7a8c114b2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -413,7 +413,7 @@ nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
u32 falling = nmk_chip->fimsc & BIT(offset);
u32 rising = nmk_chip->rimsc & BIT(offset);
int gpio = nmk_chip->chip.base + offset;
- int irq = irq_find_mapping(nmk_chip->chip.irqdomain, offset);
+ int irq = irq_find_mapping(nmk_chip->chip.irq.domain, offset);
struct irq_data *d = irq_get_irq_data(irq);
if (!rising && !falling)
@@ -815,7 +815,7 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
while (status) {
int bit = __ffs(status);
- generic_handle_irq(irq_find_mapping(chip->irqdomain, bit));
+ generic_handle_irq(irq_find_mapping(chip->irq.domain, bit));
status &= ~BIT(bit);
}
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
index 30bba2a75a58..ae0bac06639f 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.h
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef PINCTRL_PINCTRL_NOMADIK_H
#define PINCTRL_PINCTRL_NOMADIK_H
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 8eaa25c3384f..b4f7f8a458ea 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -49,6 +49,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
PCONFDUMP(PIN_CONFIG_SLEEP_HARDWARE_STATE, "sleep hardware state", NULL, false),
PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
+ PCONFDUMP(PIN_CONFIG_SKEW_DELAY, "skew delay", NULL, true),
};
static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
@@ -181,6 +182,7 @@ static const struct pinconf_generic_params dt_params[] = {
{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
{ "sleep-hardware-state", PIN_CONFIG_SLEEP_HARDWARE_STATE, 0 },
{ "slew-rate", PIN_CONFIG_SLEW_RATE, 0 },
+ { "skew-delay", PIN_CONFIG_SKEW_DELAY, 0 },
};
/**
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 433af328d981..61d830c2bc17 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -532,7 +532,7 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
regval = readl(regs + i);
if (!(regval & PIN_IRQ_PENDING))
continue;
- irq = irq_find_mapping(gc->irqdomain, irqnr + i);
+ irq = irq_find_mapping(gc->irq.domain, irqnr + i);
generic_handle_irq(irq);
/* Clear interrupt.
@@ -753,7 +753,7 @@ static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
return false;
}
-int amd_gpio_suspend(struct device *dev)
+static int amd_gpio_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
@@ -772,7 +772,7 @@ int amd_gpio_suspend(struct device *dev)
return 0;
}
-int amd_gpio_resume(struct device *dev)
+static int amd_gpio_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 569bc28cb909..03492e3c09fa 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1603,7 +1603,7 @@ static void gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(n, &isr, BITS_PER_LONG) {
generic_handle_irq(irq_find_mapping(
- gpio_chip->irqdomain, n));
+ gpio_chip->irq.domain, n));
}
}
chained_irq_exit(chip, desc);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index ac155e7d3412..7939b178c6ae 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -517,7 +517,7 @@ static void u300_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(irqoffset, &val, U300_GPIO_PINS_PER_PORT) {
int offset = pinoffset + irqoffset;
- int pin_irq = irq_find_mapping(chip->irqdomain, offset);
+ int pin_irq = irq_find_mapping(chip->irq.domain, offset);
dev_dbg(gpio->dev, "GPIO IRQ %d on pin %d\n",
pin_irq, offset);
diff --git a/drivers/pinctrl/pinctrl-coh901.h b/drivers/pinctrl/pinctrl-coh901.h
index 87294222583e..ba2678665168 100644
--- a/drivers/pinctrl/pinctrl-coh901.h
+++ b/drivers/pinctrl/pinctrl-coh901.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
int u300_gpio_config_get(struct gpio_chip *chip,
unsigned offset,
unsigned long *config);
diff --git a/drivers/pinctrl/pinctrl-gemini.c b/drivers/pinctrl/pinctrl-gemini.c
index 39e6221e7100..e9b83e291edf 100644
--- a/drivers/pinctrl/pinctrl-gemini.c
+++ b/drivers/pinctrl/pinctrl-gemini.c
@@ -13,6 +13,8 @@
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -22,6 +24,19 @@
#define DRIVER_NAME "pinctrl-gemini"
/**
+ * struct gemini_pin_conf - information about configuring a pin
+ * @pin: the pin number
+ * @reg: config register
+ * @mask: the bits affecting the configuration of the pin
+ */
+struct gemini_pin_conf {
+ unsigned int pin;
+ u32 reg;
+ u32 mask;
+};
+
+/**
+ * struct gemini_pmx - state holder for the gemini pin controller
* @dev: a pointer back to containing device
* @virtbase: the offset to the controller in virtual memory
* @map: regmap to access registers
@@ -29,6 +44,8 @@
* @is_3516: whether the SoC/package is the 3516 variant
* @flash_pin: whether the flash pin (extended pins for parallel
* flash) is set
+ * @confs: pin config information
+ * @nconfs: number of pin config information items
*/
struct gemini_pmx {
struct device *dev;
@@ -37,6 +54,8 @@ struct gemini_pmx {
bool is_3512;
bool is_3516;
bool flash_pin;
+ const struct gemini_pin_conf *confs;
+ unsigned int nconfs;
};
/**
@@ -57,6 +76,13 @@ struct gemini_pin_group {
u32 value;
};
+/* Some straight-forward control registers */
+#define GLOBAL_WORD_ID 0x00
+#define GLOBAL_STATUS 0x04
+#define GLOBAL_STATUS_FLPIN BIT(20)
+#define GLOBAL_GMAC_CTRL_SKEW 0x1c
+#define GLOBAL_GMAC0_DATA_SKEW 0x20
+#define GLOBAL_GMAC1_DATA_SKEW 0x24
/*
* Global Miscellaneous Control Register
* This register controls all Gemini pad/pin multiplexing
@@ -69,10 +95,14 @@ struct gemini_pin_group {
* DISABLED again. So you select a flash configuration once, and then
* you are stuck with it.
*/
-#define GLOBAL_WORD_ID 0x00
-#define GLOBAL_STATUS 0x04
-#define GLOBAL_STATUS_FLPIN BIT(20)
#define GLOBAL_MISC_CTRL 0x30
+#define GEMINI_GMAC_IOSEL_MASK GENMASK(28, 27)
+/* Not really used */
+#define GEMINI_GMAC_IOSEL_GMAC0_GMII BIT(28)
+/* Activated with GMAC1 */
+#define GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII BIT(27)
+/* This will be the default */
+#define GEMINI_GMAC_IOSEL_GMAC0_RGMII_GMAC1_GPIO2 0
#define TVC_CLK_PAD_ENABLE BIT(20)
#define PCI_CLK_PAD_ENABLE BIT(17)
#define LPC_CLK_PAD_ENABLE BIT(16)
@@ -86,8 +116,8 @@ struct gemini_pin_group {
#define NAND_PADS_DISABLE BIT(2)
#define PFLASH_PADS_DISABLE BIT(1)
#define SFLASH_PADS_DISABLE BIT(0)
-#define PADS_MASK (GENMASK(9, 0) | BIT(16) | BIT(17) | BIT(20))
-#define PADS_MAXBIT 20
+#define PADS_MASK (GENMASK(9, 0) | BIT(16) | BIT(17) | BIT(20) | BIT(27))
+#define PADS_MAXBIT 27
/* Ordered by bit index */
static const char * const gemini_padgroups[] = {
@@ -106,6 +136,8 @@ static const char * const gemini_padgroups[] = {
"PCI CLK",
NULL, NULL,
"TVC CLK",
+ NULL, NULL, NULL, NULL, NULL,
+ "GMAC1",
};
static const struct pinctrl_pin_desc gemini_3512_pins[] = {
@@ -493,9 +525,12 @@ static const unsigned int usb_3512_pins[] = {
};
/* GMII, ethernet pins */
-static const unsigned int gmii_3512_pins[] = {
- 311, 240, 258, 276, 294, 312, 241, 259, 277, 295, 313, 242, 260, 278, 296,
- 315, 297, 279, 261, 243, 316, 298, 280, 262, 244, 317, 299, 281
+static const unsigned int gmii_gmac0_3512_pins[] = {
+ 240, 241, 242, 258, 259, 260, 276, 277, 278, 294, 295, 311, 312, 313
+};
+
+static const unsigned int gmii_gmac1_3512_pins[] = {
+ 243, 244, 261, 262, 279, 280, 281, 296, 297, 298, 299, 315, 316, 317
};
static const unsigned int pci_3512_pins[] = {
@@ -645,10 +680,10 @@ static const unsigned int gpio1c_3512_pins[] = {
/* The GPIO1D (28-31) pins overlap with LCD and TVC */
static const unsigned int gpio1d_3512_pins[] = { 246, 319, 301, 283 };
-/* The GPIO2A (0-3) pins overlap with GMII and extended parallel flash */
+/* The GPIO2A (0-3) pins overlap with GMII GMAC1 and extended parallel flash */
static const unsigned int gpio2a_3512_pins[] = { 315, 297, 279, 261 };
-/* The GPIO2B (4-7) pins overlap with GMII, extended parallel flash and LCD */
+/* The GPIO2B (4-7) pins overlap with GMII GMAC1, extended parallel flash and LCD */
static const unsigned int gpio2b_3512_pins[] = { 262, 244, 317, 299 };
/* The GPIO2C (8-31) pins overlap with PCI */
@@ -715,9 +750,16 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
.num_pins = ARRAY_SIZE(usb_3512_pins),
},
{
- .name = "gmiigrp",
- .pins = gmii_3512_pins,
- .num_pins = ARRAY_SIZE(gmii_3512_pins),
+ .name = "gmii_gmac0_grp",
+ .pins = gmii_gmac0_3512_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac0_3512_pins),
+ },
+ {
+ .name = "gmii_gmac1_grp",
+ .pins = gmii_gmac1_3512_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac1_3512_pins),
+ /* Bring out RGMII on the GMAC1 pins */
+ .value = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "pcigrp",
@@ -931,14 +973,15 @@ static const struct gemini_pin_group gemini_3512_pin_groups[] = {
.name = "gpio2agrp",
.pins = gpio2a_3512_pins,
.num_pins = ARRAY_SIZE(gpio2a_3512_pins),
- /* Conflict with GMII and extended parallel flash */
+ .mask = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
+ /* Conflict with GMII GMAC1 and extended parallel flash */
},
{
.name = "gpio2bgrp",
.pins = gpio2b_3512_pins,
.num_pins = ARRAY_SIZE(gpio2b_3512_pins),
- /* Conflict with GMII, extended parallel flash and LCD */
- .mask = LCD_PADS_ENABLE,
+ /* Conflict with GMII GMAC1, extended parallel flash and LCD */
+ .mask = LCD_PADS_ENABLE | GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "gpio2cgrp",
@@ -1418,9 +1461,12 @@ static const unsigned int usb_3516_pins[] = {
};
/* GMII, ethernet pins */
-static const unsigned int gmii_3516_pins[] = {
- 306, 307, 308, 309, 310, 325, 326, 327, 328, 329, 330, 345, 346, 347,
- 348, 349, 350, 351, 367, 368, 369, 370, 371, 386, 387, 389, 390, 391
+static const unsigned int gmii_gmac0_3516_pins[] = {
+ 306, 307, 325, 326, 327, 328, 345, 346, 347, 348, 367, 368, 386, 387
+};
+
+static const unsigned int gmii_gmac1_3516_pins[] = {
+ 308, 309, 310, 329, 330, 349, 350, 351, 369, 370, 371, 389, 390, 391
};
static const unsigned int pci_3516_pins[] = {
@@ -1562,10 +1608,10 @@ static const unsigned int gpio1c_3516_pins[] = {
/* The GPIO1D (28-31) pins overlap with TVC */
static const unsigned int gpio1d_3516_pins[] = { 353, 311, 394, 374 };
-/* The GPIO2A (0-3) pins overlap with GMII and extended parallel flash */
+/* The GPIO2A (0-3) pins overlap with GMII GMAC1 and extended parallel flash */
static const unsigned int gpio2a_3516_pins[] = { 308, 369, 389, 329 };
-/* The GPIO2B (4-7) pins overlap with GMII, extended parallel flash and LCD */
+/* The GPIO2B (4-7) pins overlap with GMII GMAC1, extended parallel flash and LCD */
static const unsigned int gpio2b_3516_pins[] = { 391, 351, 310, 371 };
/* The GPIO2C (8-31) pins overlap with PCI */
@@ -1637,9 +1683,16 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.num_pins = ARRAY_SIZE(usb_3516_pins),
},
{
- .name = "gmiigrp",
- .pins = gmii_3516_pins,
- .num_pins = ARRAY_SIZE(gmii_3516_pins),
+ .name = "gmii_gmac0_grp",
+ .pins = gmii_gmac0_3516_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac0_3516_pins),
+ },
+ {
+ .name = "gmii_gmac1_grp",
+ .pins = gmii_gmac1_3516_pins,
+ .num_pins = ARRAY_SIZE(gmii_gmac1_3516_pins),
+ /* Bring out RGMII on the GMAC1 pins */
+ .value = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "pcigrp",
@@ -1838,14 +1891,15 @@ static const struct gemini_pin_group gemini_3516_pin_groups[] = {
.name = "gpio2agrp",
.pins = gpio2a_3516_pins,
.num_pins = ARRAY_SIZE(gpio2a_3516_pins),
- /* Conflict with GMII and extended parallel flash */
+ .mask = GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
+ /* Conflict with GMII GMAC1 and extended parallel flash */
},
{
.name = "gpio2bgrp",
.pins = gpio2b_3516_pins,
.num_pins = ARRAY_SIZE(gpio2b_3516_pins),
- /* Conflict with GMII, extended parallel flash and LCD */
- .mask = LCD_PADS_ENABLE,
+ /* Conflict with GMII GMAC1, extended parallel flash and LCD */
+ .mask = LCD_PADS_ENABLE | GEMINI_GMAC_IOSEL_GMAC0_GMAC1_RGMII,
},
{
.name = "gpio2cgrp",
@@ -1918,73 +1972,13 @@ static void gemini_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
seq_printf(s, " " DRIVER_NAME);
}
-static int gemini_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np,
- struct pinctrl_map **map,
- unsigned int *reserved_maps,
- unsigned int *num_maps)
-{
- int ret;
- const char *function = NULL;
- const char *group;
- struct property *prop;
-
- ret = of_property_read_string(np, "function", &function);
- if (ret < 0)
- return ret;
-
- ret = of_property_count_strings(np, "groups");
- if (ret < 0)
- return ret;
-
- ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
- num_maps, ret);
- if (ret < 0)
- return ret;
-
- of_property_for_each_string(np, "groups", prop, group) {
- ret = pinctrl_utils_add_map_mux(pctldev, map, reserved_maps,
- num_maps, group, function);
- if (ret < 0)
- return ret;
- pr_debug("ADDED FUNCTION %s <-> GROUP %s\n",
- function, group);
- }
-
- return 0;
-}
-
-static int gemini_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np_config,
- struct pinctrl_map **map,
- unsigned int *num_maps)
-{
- unsigned int reserved_maps = 0;
- struct device_node *np;
- int ret;
-
- *map = NULL;
- *num_maps = 0;
-
- for_each_child_of_node(np_config, np) {
- ret = gemini_pinctrl_dt_subnode_to_map(pctldev, np, map,
- &reserved_maps, num_maps);
- if (ret < 0) {
- pinctrl_utils_free_map(pctldev, *map, *num_maps);
- return ret;
- }
- }
-
- return 0;
-};
-
static const struct pinctrl_ops gemini_pctrl_ops = {
.get_groups_count = gemini_get_groups_count,
.get_group_name = gemini_get_group_name,
.get_group_pins = gemini_get_group_pins,
.pin_dbg_show = gemini_pin_dbg_show,
- .dt_node_to_map = gemini_pinctrl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_free_map,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
};
/**
@@ -2008,7 +2002,7 @@ static const char * const icegrps[] = { "icegrp" };
static const char * const idegrps[] = { "idegrp" };
static const char * const satagrps[] = { "satagrp" };
static const char * const usbgrps[] = { "usbgrp" };
-static const char * const gmiigrps[] = { "gmiigrp" };
+static const char * const gmiigrps[] = { "gmii_gmac0_grp", "gmii_gmac1_grp" };
static const char * const pcigrps[] = { "pcigrp" };
static const char * const lpcgrps[] = { "lpcgrp" };
static const char * const lcdgrps[] = { "lcdgrp" };
@@ -2074,6 +2068,16 @@ static const struct gemini_pmx_func gemini_pmx_functions[] = {
.num_groups = ARRAY_SIZE(satagrps),
},
{
+ .name = "usb",
+ .groups = usbgrps,
+ .num_groups = ARRAY_SIZE(usbgrps),
+ },
+ {
+ .name = "gmii",
+ .groups = gmiigrps,
+ .num_groups = ARRAY_SIZE(gmiigrps),
+ },
+ {
.name = "pci",
.groups = pcigrps,
.num_groups = ARRAY_SIZE(pcigrps),
@@ -2251,10 +2255,155 @@ static const struct pinmux_ops gemini_pmx_ops = {
.set_mux = gemini_pmx_set_mux,
};
+#define GEMINI_CFGPIN(_n, _r, _lb, _hb) { \
+ .pin = _n, \
+ .reg = _r, \
+ .mask = GENMASK(_hb, _lb) \
+}
+
+static const struct gemini_pin_conf gemini_confs_3512[] = {
+ GEMINI_CFGPIN(259, GLOBAL_GMAC_CTRL_SKEW, 0, 3), /* GMAC0 RXDV */
+ GEMINI_CFGPIN(277, GLOBAL_GMAC_CTRL_SKEW, 4, 7), /* GMAC0 RXC */
+ GEMINI_CFGPIN(241, GLOBAL_GMAC_CTRL_SKEW, 8, 11), /* GMAC0 TXEN */
+ GEMINI_CFGPIN(312, GLOBAL_GMAC_CTRL_SKEW, 12, 15), /* GMAC0 TXC */
+ GEMINI_CFGPIN(298, GLOBAL_GMAC_CTRL_SKEW, 16, 19), /* GMAC1 RXDV */
+ GEMINI_CFGPIN(280, GLOBAL_GMAC_CTRL_SKEW, 20, 23), /* GMAC1 RXC */
+ GEMINI_CFGPIN(316, GLOBAL_GMAC_CTRL_SKEW, 24, 27), /* GMAC1 TXEN */
+ GEMINI_CFGPIN(243, GLOBAL_GMAC_CTRL_SKEW, 28, 31), /* GMAC1 TXC */
+ GEMINI_CFGPIN(295, GLOBAL_GMAC0_DATA_SKEW, 0, 3), /* GMAC0 RXD0 */
+ GEMINI_CFGPIN(313, GLOBAL_GMAC0_DATA_SKEW, 4, 7), /* GMAC0 RXD1 */
+ GEMINI_CFGPIN(242, GLOBAL_GMAC0_DATA_SKEW, 8, 11), /* GMAC0 RXD2 */
+ GEMINI_CFGPIN(260, GLOBAL_GMAC0_DATA_SKEW, 12, 15), /* GMAC0 RXD3 */
+ GEMINI_CFGPIN(294, GLOBAL_GMAC0_DATA_SKEW, 16, 19), /* GMAC0 TXD0 */
+ GEMINI_CFGPIN(276, GLOBAL_GMAC0_DATA_SKEW, 20, 23), /* GMAC0 TXD1 */
+ GEMINI_CFGPIN(258, GLOBAL_GMAC0_DATA_SKEW, 24, 27), /* GMAC0 TXD2 */
+ GEMINI_CFGPIN(240, GLOBAL_GMAC0_DATA_SKEW, 28, 31), /* GMAC0 TXD3 */
+ GEMINI_CFGPIN(262, GLOBAL_GMAC1_DATA_SKEW, 0, 3), /* GMAC1 RXD0 */
+ GEMINI_CFGPIN(244, GLOBAL_GMAC1_DATA_SKEW, 4, 7), /* GMAC1 RXD1 */
+ GEMINI_CFGPIN(317, GLOBAL_GMAC1_DATA_SKEW, 8, 11), /* GMAC1 RXD2 */
+ GEMINI_CFGPIN(299, GLOBAL_GMAC1_DATA_SKEW, 12, 15), /* GMAC1 RXD3 */
+ GEMINI_CFGPIN(261, GLOBAL_GMAC1_DATA_SKEW, 16, 19), /* GMAC1 TXD0 */
+ GEMINI_CFGPIN(279, GLOBAL_GMAC1_DATA_SKEW, 20, 23), /* GMAC1 TXD1 */
+ GEMINI_CFGPIN(297, GLOBAL_GMAC1_DATA_SKEW, 24, 27), /* GMAC1 TXD2 */
+ GEMINI_CFGPIN(315, GLOBAL_GMAC1_DATA_SKEW, 28, 31), /* GMAC1 TXD3 */
+};
+
+static const struct gemini_pin_conf gemini_confs_3516[] = {
+ GEMINI_CFGPIN(347, GLOBAL_GMAC_CTRL_SKEW, 0, 3), /* GMAC0 RXDV */
+ GEMINI_CFGPIN(386, GLOBAL_GMAC_CTRL_SKEW, 4, 7), /* GMAC0 RXC */
+ GEMINI_CFGPIN(307, GLOBAL_GMAC_CTRL_SKEW, 8, 11), /* GMAC0 TXEN */
+ GEMINI_CFGPIN(327, GLOBAL_GMAC_CTRL_SKEW, 12, 15), /* GMAC0 TXC */
+ GEMINI_CFGPIN(309, GLOBAL_GMAC_CTRL_SKEW, 16, 19), /* GMAC1 RXDV */
+ GEMINI_CFGPIN(390, GLOBAL_GMAC_CTRL_SKEW, 20, 23), /* GMAC1 RXC */
+ GEMINI_CFGPIN(370, GLOBAL_GMAC_CTRL_SKEW, 24, 27), /* GMAC1 TXEN */
+ GEMINI_CFGPIN(350, GLOBAL_GMAC_CTRL_SKEW, 28, 31), /* GMAC1 TXC */
+ GEMINI_CFGPIN(367, GLOBAL_GMAC0_DATA_SKEW, 0, 3), /* GMAC0 RXD0 */
+ GEMINI_CFGPIN(348, GLOBAL_GMAC0_DATA_SKEW, 4, 7), /* GMAC0 RXD1 */
+ GEMINI_CFGPIN(387, GLOBAL_GMAC0_DATA_SKEW, 8, 11), /* GMAC0 RXD2 */
+ GEMINI_CFGPIN(328, GLOBAL_GMAC0_DATA_SKEW, 12, 15), /* GMAC0 RXD3 */
+ GEMINI_CFGPIN(306, GLOBAL_GMAC0_DATA_SKEW, 16, 19), /* GMAC0 TXD0 */
+ GEMINI_CFGPIN(325, GLOBAL_GMAC0_DATA_SKEW, 20, 23), /* GMAC0 TXD1 */
+ GEMINI_CFGPIN(346, GLOBAL_GMAC0_DATA_SKEW, 24, 27), /* GMAC0 TXD2 */
+ GEMINI_CFGPIN(326, GLOBAL_GMAC0_DATA_SKEW, 28, 31), /* GMAC0 TXD3 */
+ GEMINI_CFGPIN(391, GLOBAL_GMAC1_DATA_SKEW, 0, 3), /* GMAC1 RXD0 */
+ GEMINI_CFGPIN(351, GLOBAL_GMAC1_DATA_SKEW, 4, 7), /* GMAC1 RXD1 */
+ GEMINI_CFGPIN(310, GLOBAL_GMAC1_DATA_SKEW, 8, 11), /* GMAC1 RXD2 */
+ GEMINI_CFGPIN(371, GLOBAL_GMAC1_DATA_SKEW, 12, 15), /* GMAC1 RXD3 */
+ GEMINI_CFGPIN(329, GLOBAL_GMAC1_DATA_SKEW, 16, 19), /* GMAC1 TXD0 */
+ GEMINI_CFGPIN(389, GLOBAL_GMAC1_DATA_SKEW, 20, 23), /* GMAC1 TXD1 */
+ GEMINI_CFGPIN(369, GLOBAL_GMAC1_DATA_SKEW, 24, 27), /* GMAC1 TXD2 */
+ GEMINI_CFGPIN(308, GLOBAL_GMAC1_DATA_SKEW, 28, 31), /* GMAC1 TXD3 */
+};
+
+static const struct gemini_pin_conf *gemini_get_pin_conf(struct gemini_pmx *pmx,
+ unsigned int pin)
+{
+ const struct gemini_pin_conf *retconf;
+ int i;
+
+ for (i = 0; i < pmx->nconfs; i++) {
+ retconf = &gemini_confs_3516[i];
+ if (retconf->pin == pin)
+ return retconf;
+ }
+ return NULL;
+}
+
+static int gemini_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct gemini_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct gemini_pin_conf *conf;
+ u32 val;
+
+ switch (param) {
+ case PIN_CONFIG_SKEW_DELAY:
+ conf = gemini_get_pin_conf(pmx, pin);
+ if (!conf)
+ return -ENOTSUPP;
+ regmap_read(pmx->map, conf->reg, &val);
+ val &= conf->mask;
+ val >>= (ffs(conf->mask) - 1);
+ *config = pinconf_to_config_packed(PIN_CONFIG_SKEW_DELAY, val);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int gemini_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct gemini_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
+ const struct gemini_pin_conf *conf;
+ enum pin_config_param param;
+ u32 arg;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_SKEW_DELAY:
+ if (arg > 0xf)
+ return -EINVAL;
+ conf = gemini_get_pin_conf(pmx, pin);
+ if (!conf) {
+ dev_err(pmx->dev,
+ "invalid pin for skew delay %d\n", pin);
+ return -ENOTSUPP;
+ }
+ arg <<= (ffs(conf->mask) - 1);
+ dev_dbg(pmx->dev,
+ "set pin %d to skew delay mask %08x, val %08x\n",
+ pin, conf->mask, arg);
+ regmap_update_bits(pmx->map, conf->reg, conf->mask, arg);
+ break;
+ default:
+ dev_err(pmx->dev, "Invalid config param %04x\n", param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return ret;
+}
+
+static const struct pinconf_ops gemini_pinconf_ops = {
+ .pin_config_get = gemini_pinconf_get,
+ .pin_config_set = gemini_pinconf_set,
+ .is_generic = true,
+};
+
static struct pinctrl_desc gemini_pmx_desc = {
.name = DRIVER_NAME,
.pctlops = &gemini_pctrl_ops,
.pmxops = &gemini_pmx_ops,
+ .confops = &gemini_pinconf_ops,
.owner = THIS_MODULE,
};
@@ -2297,11 +2446,15 @@ static int gemini_pmx_probe(struct platform_device *pdev)
val &= 0xffff;
if (val == 0x3512) {
pmx->is_3512 = true;
+ pmx->confs = gemini_confs_3512;
+ pmx->nconfs = ARRAY_SIZE(gemini_confs_3512);
gemini_pmx_desc.pins = gemini_3512_pins;
gemini_pmx_desc.npins = ARRAY_SIZE(gemini_3512_pins);
dev_info(dev, "detected 3512 chip variant\n");
} else if (val == 0x3516) {
pmx->is_3516 = true;
+ pmx->confs = gemini_confs_3516;
+ pmx->nconfs = ARRAY_SIZE(gemini_confs_3516);
gemini_pmx_desc.pins = gemini_3516_pins;
gemini_pmx_desc.npins = ARRAY_SIZE(gemini_3516_pins);
dev_info(dev, "detected 3516 chip variant\n");
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index d84761822243..372ddf386bdb 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -717,7 +717,7 @@ static const struct of_device_id ingenic_pinctrl_of_match[] = {
{},
};
-int ingenic_pinctrl_probe(struct platform_device *pdev)
+static int ingenic_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ingenic_pinctrl *jzpc;
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index b8d2180a2bea..a7f37063518e 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -420,11 +420,9 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
MAX77620_REG_GPIO0 + pin,
MAX77620_CNFG_GPIO_DRV_MASK,
val);
- if (ret < 0) {
- dev_err(dev, "Reg 0x%02x update failed %d\n",
- MAX77620_REG_GPIO0 + pin, ret);
- return ret;
- }
+ if (ret)
+ goto report_update_failure;
+
mpci->pin_info[pin].drv_type = val ?
MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV;
break;
@@ -435,11 +433,9 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
MAX77620_REG_GPIO0 + pin,
MAX77620_CNFG_GPIO_DRV_MASK,
val);
- if (ret < 0) {
- dev_err(dev, "Reg 0x%02x update failed %d\n",
- MAX77620_REG_GPIO0 + pin, ret);
- return ret;
- }
+ if (ret)
+ goto report_update_failure;
+
mpci->pin_info[pin].drv_type = val ?
MAX77620_PIN_PP_DRV : MAX77620_PIN_OD_DRV;
break;
@@ -536,6 +532,11 @@ static int max77620_pinconf_set(struct pinctrl_dev *pctldev,
}
return 0;
+
+report_update_failure:
+ dev_err(dev, "Reg 0x%02x update failed %d\n",
+ MAX77620_REG_GPIO0 + pin, ret);
+ return ret;
}
static const struct pinconf_ops max77620_pinconf_ops = {
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 9c950bbf07ba..4a6ea159c65d 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -25,6 +25,7 @@
#define MCP_TYPE_008 2
#define MCP_TYPE_017 3
#define MCP_TYPE_S18 4
+#define MCP_TYPE_018 5
#define MCP_MAX_DEV_PER_CS 8
@@ -278,8 +279,7 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
{
struct mcp23s08 *mcp = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param;
- u32 arg, mask;
- u16 val;
+ u32 arg;
int ret = 0;
int i;
@@ -289,8 +289,6 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
- val = arg ? 0xFFFF : 0x0000;
- mask = BIT(pin);
ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
break;
default:
@@ -537,7 +535,7 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
((gpio_bit_changed || intcap_changed) &&
(BIT(i) & mcp->irq_fall) && !gpio_set) ||
defval_changed) {
- child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
+ child_irq = irq_find_mapping(mcp->chip.irq.domain, i);
handle_nested_irq(child_irq);
}
}
@@ -837,6 +835,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23017";
break;
+
+ case MCP_TYPE_018:
+ mcp->regmap = devm_regmap_init_i2c(data, &mcp23x17_regmap);
+ mcp->reg_shift = 1;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23018";
+ break;
#endif /* CONFIG_I2C */
default:
@@ -883,7 +888,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (mirror)
status |= IOCON_MIRROR | (IOCON_MIRROR << 8);
- if (type == MCP_TYPE_S18)
+ if (type == MCP_TYPE_S18 || type == MCP_TYPE_018)
status |= IOCON_INTCC | (IOCON_INTCC << 8);
ret = mcp_write(mcp, MCP_IOCON, status);
@@ -964,6 +969,10 @@ static const struct of_device_id mcp23s08_i2c_of_match[] = {
.compatible = "microchip,mcp23017",
.data = (void *) MCP_TYPE_017,
},
+ {
+ .compatible = "microchip,mcp23018",
+ .data = (void *) MCP_TYPE_018,
+ },
/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
{
.compatible = "mcp,mcp23008",
@@ -1013,6 +1022,7 @@ static int mcp230xx_probe(struct i2c_client *client,
static const struct i2c_device_id mcp230xx_id[] = {
{ "mcp23008", MCP_TYPE_008 },
{ "mcp23017", MCP_TYPE_017 },
+ { "mcp23018", MCP_TYPE_018 },
{ },
};
MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index 494ec9a7573a..53ec22a51f5c 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -1064,7 +1064,7 @@ static void oxnas_gpio_irq_handler(struct irq_desc *desc)
stat = readl(bank->reg_base + IRQ_PENDING);
for_each_set_bit(pin, &stat, BITS_PER_LONG)
- generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin));
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index 31ceb958b3fe..96390228d388 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -2106,7 +2106,7 @@ static void pic32_gpio_irq_handler(struct irq_desc *desc)
pending = pic32_gpio_get_pending(gc, stat);
for_each_set_bit(pin, &pending, BITS_PER_LONG)
- generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin));
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 55375b1b3cc8..302190d1558d 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1307,7 +1307,7 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
pending = gpio_readl(bank, GPIO_INTERRUPT_STATUS) &
gpio_readl(bank, GPIO_INTERRUPT_EN);
for_each_set_bit(pin, &pending, 16)
- generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin));
+ generic_handle_irq(irq_linear_revmap(gc->irq.domain, pin));
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index b5cb7858ffdc..2ba17548ad5b 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -884,6 +884,24 @@ static struct rockchip_mux_route_data rk3228_mux_route_data[] = {
},
};
+static struct rockchip_mux_route_data rk3288_mux_route_data[] = {
+ {
+ /* edphdmi_cecinoutt1 */
+ .bank_num = 7,
+ .pin = 16,
+ .func = 2,
+ .route_offset = 0x264,
+ .route_val = BIT(16 + 12) | BIT(12),
+ }, {
+ /* edphdmi_cecinout */
+ .bank_num = 7,
+ .pin = 23,
+ .func = 4,
+ .route_offset = 0x264,
+ .route_val = BIT(16 + 12),
+ },
+};
+
static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
{
/* uart2dbg_rxm0 */
@@ -900,12 +918,19 @@ static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
.route_offset = 0x50,
.route_val = BIT(16) | BIT(16 + 1) | BIT(0),
}, {
- /* gmac-m1-optimized_rxd0 */
+ /* gmac-m1_rxd0 */
.bank_num = 1,
.pin = 11,
.func = 2,
.route_offset = 0x50,
- .route_val = BIT(16 + 2) | BIT(16 + 10) | BIT(2) | BIT(10),
+ .route_val = BIT(16 + 2) | BIT(2),
+ }, {
+ /* gmac-m1-optimized_rxd3 */
+ .bank_num = 1,
+ .pin = 14,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 10) | BIT(10),
}, {
/* pdm_sdi0m0 */
.bank_num = 2,
@@ -3391,6 +3416,8 @@ static struct rockchip_pin_ctrl rk3288_pin_ctrl = {
.type = RK3288,
.grf_mux_offset = 0x0,
.pmu_mux_offset = 0x84,
+ .iomux_routes = rk3288_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3288_mux_route_data),
.pull_calc_reg = rk3288_calc_pull_reg_and_bit,
.drv_calc_reg = rk3288_calc_drv_reg_and_bit,
};
@@ -3456,8 +3483,8 @@ static struct rockchip_pin_bank rk3399_pin_banks[] = {
DRV_TYPE_IO_1V8_ONLY,
DRV_TYPE_IO_DEFAULT,
DRV_TYPE_IO_DEFAULT,
- 0x0,
- 0x8,
+ 0x80,
+ 0x88,
-1,
-1,
PULL_TYPE_IO_1V8_ONLY,
@@ -3473,10 +3500,10 @@ static struct rockchip_pin_bank rk3399_pin_banks[] = {
DRV_TYPE_IO_1V8_OR_3V0,
DRV_TYPE_IO_1V8_OR_3V0,
DRV_TYPE_IO_1V8_OR_3V0,
- 0x20,
- 0x28,
- 0x30,
- 0x38
+ 0xa0,
+ 0xa8,
+ 0xb0,
+ 0xb8
),
PIN_BANK_DRV_FLAGS_PULL_FLAGS(2, 32, "gpio2", DRV_TYPE_IO_1V8_OR_3V0,
DRV_TYPE_IO_1V8_OR_3V0,
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
index 04d058706b80..717c0f4449a0 100644
--- a/drivers/pinctrl/pinctrl-rza1.c
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -303,6 +303,134 @@ static const struct rza1_pinmux_conf rza1h_pmx_conf = {
};
/* ----------------------------------------------------------------------------
+ * RZ/A1L (r7s72102) pinmux flags
+ */
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p1[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p3[] = {
+ { .pin = 0, .func = 2 },
+ { .pin = 1, .func = 2 },
+ { .pin = 2, .func = 2 },
+ { .pin = 4, .func = 2 },
+ { .pin = 5, .func = 2 },
+ { .pin = 10, .func = 2 },
+ { .pin = 11, .func = 2 },
+ { .pin = 12, .func = 2 },
+ { .pin = 13, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p4[] = {
+ { .pin = 1, .func = 4 },
+ { .pin = 2, .func = 2 },
+ { .pin = 3, .func = 2 },
+ { .pin = 6, .func = 2 },
+ { .pin = 7, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p5[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+ { .pin = 8, .func = 1 },
+ { .pin = 9, .func = 1 },
+ { .pin = 10, .func = 1 },
+ { .pin = 11, .func = 1 },
+ { .pin = 12, .func = 1 },
+ { .pin = 13, .func = 1 },
+ { .pin = 14, .func = 1 },
+ { .pin = 15, .func = 1 },
+ { .pin = 0, .func = 2 },
+ { .pin = 1, .func = 2 },
+ { .pin = 2, .func = 2 },
+ { .pin = 3, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p6[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+ { .pin = 8, .func = 1 },
+ { .pin = 9, .func = 1 },
+ { .pin = 10, .func = 1 },
+ { .pin = 11, .func = 1 },
+ { .pin = 12, .func = 1 },
+ { .pin = 13, .func = 1 },
+ { .pin = 14, .func = 1 },
+ { .pin = 15, .func = 1 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p7[] = {
+ { .pin = 2, .func = 2 },
+ { .pin = 3, .func = 2 },
+ { .pin = 5, .func = 2 },
+ { .pin = 6, .func = 2 },
+ { .pin = 7, .func = 2 },
+ { .pin = 2, .func = 3 },
+ { .pin = 3, .func = 3 },
+ { .pin = 5, .func = 3 },
+ { .pin = 6, .func = 3 },
+ { .pin = 7, .func = 3 },
+};
+
+static const struct rza1_bidir_pin rza1l_bidir_pins_p9[] = {
+ { .pin = 1, .func = 2 },
+ { .pin = 0, .func = 3 },
+ { .pin = 1, .func = 3 },
+ { .pin = 3, .func = 3 },
+ { .pin = 4, .func = 3 },
+ { .pin = 5, .func = 3 },
+};
+
+static const struct rza1_swio_pin rza1l_swio_pins[] = {
+ { .port = 2, .pin = 8, .func = 2, .input = 0 },
+ { .port = 5, .pin = 6, .func = 3, .input = 0 },
+ { .port = 6, .pin = 6, .func = 3, .input = 0 },
+ { .port = 6, .pin = 10, .func = 3, .input = 0 },
+ { .port = 7, .pin = 10, .func = 2, .input = 0 },
+ { .port = 8, .pin = 2, .func = 3, .input = 0 },
+};
+
+static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = {
+ [1] = { ARRAY_SIZE(rza1l_bidir_pins_p1), rza1l_bidir_pins_p1 },
+ [3] = { ARRAY_SIZE(rza1l_bidir_pins_p3), rza1l_bidir_pins_p3 },
+ [4] = { ARRAY_SIZE(rza1l_bidir_pins_p4), rza1l_bidir_pins_p4 },
+ [5] = { ARRAY_SIZE(rza1l_bidir_pins_p4), rza1l_bidir_pins_p5 },
+ [6] = { ARRAY_SIZE(rza1l_bidir_pins_p6), rza1l_bidir_pins_p6 },
+ [7] = { ARRAY_SIZE(rza1l_bidir_pins_p7), rza1l_bidir_pins_p7 },
+ [9] = { ARRAY_SIZE(rza1l_bidir_pins_p9), rza1l_bidir_pins_p9 },
+};
+
+static const struct rza1_swio_entry rza1l_swio_entries[] = {
+ [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins },
+};
+
+/* RZ/A1L (r7s72102x) pinmux flags table */
+static const struct rza1_pinmux_conf rza1l_pmx_conf = {
+ .bidir_entries = rza1l_bidir_entries,
+ .swio_entries = rza1l_swio_entries,
+};
+
+/* ----------------------------------------------------------------------------
* RZ/A1 types
*/
/**
@@ -1283,9 +1411,15 @@ static int rza1_pinctrl_probe(struct platform_device *pdev)
static const struct of_device_id rza1_pinctrl_of_match[] = {
{
+ /* RZ/A1H, RZ/A1M */
.compatible = "renesas,r7s72100-ports",
.data = &rza1h_pmx_conf,
},
+ {
+ /* RZ/A1L */
+ .compatible = "renesas,r7s72102-ports",
+ .data = &rza1l_pmx_conf,
+ },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b8b3d932cd73..e6cd8de793e2 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -873,13 +873,13 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
int i = 0, nconfs = 0;
unsigned long *settings = NULL, *s = NULL;
struct pcs_conf_vals *conf = NULL;
- struct pcs_conf_type prop2[] = {
+ static const struct pcs_conf_type prop2[] = {
{ "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, },
{ "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, },
{ "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, },
{ "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, },
};
- struct pcs_conf_type prop4[] = {
+ static const struct pcs_conf_type prop4[] = {
{ "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, },
{ "pinctrl-single,bias-pulldown", PIN_CONFIG_BIAS_PULL_DOWN, },
{ "pinctrl-single,input-schmitt-enable",
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index a5205b94b2e6..2081c67667a8 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1408,7 +1408,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank)
continue;
}
- generic_handle_irq(irq_find_mapping(bank->gpio_chip.irqdomain, n));
+ generic_handle_irq(irq_find_mapping(bank->gpio_chip.irq.domain, n));
}
}
}
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 7450f5118445..fb242c542dc9 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -561,7 +561,7 @@ static irqreturn_t sx150x_irq_thread_fn(int irq, void *dev_id)
status = val;
for_each_set_bit(n, &status, pctl->data->ngpios)
- handle_nested_irq(irq_find_mapping(pctl->gpio.irqdomain, n));
+ handle_nested_irq(irq_find_mapping(pctl->gpio.irq.domain, n));
return IRQ_HANDLED;
}
@@ -1087,7 +1087,7 @@ static bool sx150x_reg_volatile(struct device *dev, unsigned int reg)
return reg == pctl->data->reg_irq_src || reg == pctl->data->reg_data;
}
-const struct regmap_config sx150x_regmap_config = {
+static const struct regmap_config sx150x_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 06c8b2ace05f..a1db345ded1c 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Qualcomm pin control drivers
obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o
obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index ff491da64dab..7a960590ecaa 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -795,7 +795,7 @@ static void msm_gpio_irq_handler(struct irq_desc *desc)
g = &pctrl->soc->groups[i];
val = readl(pctrl->regs + g->intr_status_reg);
if (val & BIT(g->intr_status_bit)) {
- irq_pin = irq_find_mapping(gc->irqdomain, i);
+ irq_pin = irq_find_mapping(gc->irq.domain, i);
generic_handle_irq(irq_pin);
handled++;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index c2c0bab04257..3e66e0d10010 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -453,6 +453,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
pad = pctldev->desc->pins[pin].drv_data;
+ pad->is_enabled = true;
for (i = 0; i < nconfs; i++) {
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
@@ -600,6 +601,10 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
}
+ val = pad->is_enabled << PMIC_GPIO_REG_MASTER_EN_SHIFT;
+
+ ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_EN_CTL, val);
+
return ret;
}
@@ -1032,6 +1037,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8916-gpio" }, /* 4 GPIO's */
{ .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */
{ .compatible = "qcom,pm8994-gpio" }, /* 22 GPIO's */
+ { .compatible = "qcom,pmi8994-gpio" }, /* 10 GPIO's */
{ .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */
{ .compatible = "qcom,spmi-gpio" }, /* Generic */
{ },
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 0357f9701eb9..ecfb90059eeb 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -29,7 +29,7 @@ config PINCTRL_EXYNOS5440
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
- depends on ARCH_S3C24XX
+ depends on ARCH_S3C24XX && OF
select PINCTRL_SAMSUNG
config PINCTRL_S3C64XX
diff --git a/drivers/pinctrl/samsung/Makefile b/drivers/pinctrl/samsung/Makefile
index 595995851ea5..df426561d067 100644
--- a/drivers/pinctrl/samsung/Makefile
+++ b/drivers/pinctrl/samsung/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Samsung pin control drivers
obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 1d4f05a96bd4..a7903904b64e 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PINCTRL_SH_PFC) += core.o pinctrl.o
obj-$(CONFIG_PINCTRL_SH_PFC_GPIO) += gpio.o
obj-$(CONFIG_PINCTRL_PFC_EMEV2) += pfc-emev2.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 0c5e952461fd..cf4ae4bc9115 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -24,6 +24,7 @@
#include <linux/of_device.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_device.h>
+#include <linux/psci.h>
#include <linux/slab.h>
#include "core.h"
@@ -175,19 +176,19 @@ void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
BUG();
}
-u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width)
+u32 sh_pfc_read(struct sh_pfc *pfc, u32 reg)
{
- return sh_pfc_read_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width);
+ return sh_pfc_read_raw_reg(sh_pfc_phys_to_virt(pfc, reg), 32);
}
-void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width, u32 data)
+void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data)
{
if (pfc->info->unlock_reg)
sh_pfc_write_raw_reg(
sh_pfc_phys_to_virt(pfc, pfc->info->unlock_reg), 32,
~data);
- sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width, data);
+ sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, reg), 32, data);
}
static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
@@ -389,15 +390,20 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
return 0;
}
-const struct sh_pfc_bias_info *
-sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
- unsigned int num, unsigned int pin)
+const struct pinmux_bias_reg *
+sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *bit)
{
- unsigned int i;
+ unsigned int i, j;
- for (i = 0; i < num; i++)
- if (info[i].pin == pin)
- return &info[i];
+ for (i = 0; pfc->info->bias_regs[i].puen; i++) {
+ for (j = 0; j < ARRAY_SIZE(pfc->info->bias_regs[i].pins); j++) {
+ if (pfc->info->bias_regs[i].pins[j] == pin) {
+ *bit = j;
+ return &pfc->info->bias_regs[i];
+ }
+ }
+ }
WARN_ONCE(1, "Pin %u is not in bias info list\n", pin);
@@ -567,9 +573,99 @@ static const struct of_device_id sh_pfc_of_table[] = {
};
#endif
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
+static void sh_pfc_nop_reg(struct sh_pfc *pfc, u32 reg, unsigned int idx)
+{
+}
+
+static void sh_pfc_save_reg(struct sh_pfc *pfc, u32 reg, unsigned int idx)
+{
+ pfc->saved_regs[idx] = sh_pfc_read(pfc, reg);
+}
+
+static void sh_pfc_restore_reg(struct sh_pfc *pfc, u32 reg, unsigned int idx)
+{
+ sh_pfc_write(pfc, reg, pfc->saved_regs[idx]);
+}
+
+static unsigned int sh_pfc_walk_regs(struct sh_pfc *pfc,
+ void (*do_reg)(struct sh_pfc *pfc, u32 reg, unsigned int idx))
+{
+ unsigned int i, n = 0;
+
+ if (pfc->info->cfg_regs)
+ for (i = 0; pfc->info->cfg_regs[i].reg; i++)
+ do_reg(pfc, pfc->info->cfg_regs[i].reg, n++);
+
+ if (pfc->info->drive_regs)
+ for (i = 0; pfc->info->drive_regs[i].reg; i++)
+ do_reg(pfc, pfc->info->drive_regs[i].reg, n++);
+
+ if (pfc->info->bias_regs)
+ for (i = 0; pfc->info->bias_regs[i].puen; i++) {
+ do_reg(pfc, pfc->info->bias_regs[i].puen, n++);
+ if (pfc->info->bias_regs[i].pud)
+ do_reg(pfc, pfc->info->bias_regs[i].pud, n++);
+ }
+
+ if (pfc->info->ioctrl_regs)
+ for (i = 0; pfc->info->ioctrl_regs[i].reg; i++)
+ do_reg(pfc, pfc->info->ioctrl_regs[i].reg, n++);
+
+ return n;
+}
+
+static int sh_pfc_suspend_init(struct sh_pfc *pfc)
+{
+ unsigned int n;
+
+ /* This is the best we can do to check for the presence of PSCI */
+ if (!psci_ops.cpu_suspend)
+ return 0;
+
+ n = sh_pfc_walk_regs(pfc, sh_pfc_nop_reg);
+ if (!n)
+ return 0;
+
+ pfc->saved_regs = devm_kmalloc_array(pfc->dev, n,
+ sizeof(*pfc->saved_regs),
+ GFP_KERNEL);
+ if (!pfc->saved_regs)
+ return -ENOMEM;
+
+ dev_dbg(pfc->dev, "Allocated space to save %u regs\n", n);
+ return 0;
+}
+
+static int sh_pfc_suspend_noirq(struct device *dev)
+{
+ struct sh_pfc *pfc = dev_get_drvdata(dev);
+
+ if (pfc->saved_regs)
+ sh_pfc_walk_regs(pfc, sh_pfc_save_reg);
+ return 0;
+}
+
+static int sh_pfc_resume_noirq(struct device *dev)
+{
+ struct sh_pfc *pfc = dev_get_drvdata(dev);
+
+ if (pfc->saved_regs)
+ sh_pfc_walk_regs(pfc, sh_pfc_restore_reg);
+ return 0;
+}
+
+static const struct dev_pm_ops sh_pfc_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sh_pfc_suspend_noirq, sh_pfc_resume_noirq)
+};
+#define DEV_PM_OPS &sh_pfc_pm
+#else
+static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
+
static int sh_pfc_probe(struct platform_device *pdev)
{
- const struct platform_device_id *platid = platform_get_device_id(pdev);
#ifdef CONFIG_OF
struct device_node *np = pdev->dev.of_node;
#endif
@@ -582,10 +678,7 @@ static int sh_pfc_probe(struct platform_device *pdev)
info = of_device_get_match_data(&pdev->dev);
else
#endif
- info = platid ? (const void *)platid->driver_data : NULL;
-
- if (info == NULL)
- return -ENODEV;
+ info = (const void *)platform_get_device_id(pdev)->driver_data;
pfc = devm_kzalloc(&pdev->dev, sizeof(*pfc), GFP_KERNEL);
if (pfc == NULL)
@@ -609,6 +702,10 @@ static int sh_pfc_probe(struct platform_device *pdev)
info = pfc->info;
}
+ ret = sh_pfc_suspend_init(pfc);
+ if (ret)
+ return ret;
+
/* Enable dummy states for those platforms without pinctrl support */
if (!of_have_populated_dt())
pinctrl_provide_dummies();
@@ -683,7 +780,6 @@ static const struct platform_device_id sh_pfc_id_table[] = {
#ifdef CONFIG_PINCTRL_PFC_SHX3
{ "pfc-shx3", (kernel_ulong_t)&shx3_pinmux_info },
#endif
- { "sh-pfc", 0 },
{ },
};
@@ -693,6 +789,7 @@ static struct platform_driver sh_pfc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(sh_pfc_of_table),
+ .pm = DEV_PM_OPS,
},
};
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 6d598dd63720..5af8ee26c03e 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -26,15 +26,14 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
u32 sh_pfc_read_raw_reg(void __iomem *mapped_reg, unsigned int reg_width);
void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
u32 data);
-u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width);
-void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width,
- u32 data);
+u32 sh_pfc_read(struct sh_pfc *pfc, u32 reg);
+void sh_pfc_write(struct sh_pfc *pfc, u32 reg, u32 data);
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
-const struct sh_pfc_bias_info *
-sh_pfc_pin_to_bias_info(const struct sh_pfc_bias_info *info,
- unsigned int num, unsigned int pin);
+const struct pinmux_bias_reg *
+sh_pfc_pin_to_bias_reg(const struct sh_pfc *pfc, unsigned int pin,
+ unsigned int *bit);
#endif /* __SH_PFC_CORE_H__ */
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index 6b5422766f13..946d9be50b62 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -139,12 +139,12 @@ static int gpio_pin_request(struct gpio_chip *gc, unsigned offset)
if (idx < 0 || pfc->info->pins[idx].enum_id == 0)
return -EINVAL;
- return pinctrl_request_gpio(offset);
+ return pinctrl_gpio_request(offset);
}
static void gpio_pin_free(struct gpio_chip *gc, unsigned offset)
{
- return pinctrl_free_gpio(offset);
+ return pinctrl_gpio_free(offset);
}
static void gpio_pin_set_value(struct sh_pfc_chip *chip, unsigned offset,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index c3af9ebee4af..00d61d175249 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -2912,189 +2912,230 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
-#define PUPR0 0x100
-#define PUPR1 0x104
-#define PUPR2 0x108
-#define PUPR3 0x10c
-#define PUPR4 0x110
-#define PUPR5 0x114
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(0, 6), PUPR0, 0 }, /* A0 */
- { RCAR_GP_PIN(0, 7), PUPR0, 1 }, /* A1 */
- { RCAR_GP_PIN(0, 8), PUPR0, 2 }, /* A2 */
- { RCAR_GP_PIN(0, 9), PUPR0, 3 }, /* A3 */
- { RCAR_GP_PIN(0, 10), PUPR0, 4 }, /* A4 */
- { RCAR_GP_PIN(0, 11), PUPR0, 5 }, /* A5 */
- { RCAR_GP_PIN(0, 12), PUPR0, 6 }, /* A6 */
- { RCAR_GP_PIN(0, 13), PUPR0, 7 }, /* A7 */
- { RCAR_GP_PIN(0, 14), PUPR0, 8 }, /* A8 */
- { RCAR_GP_PIN(0, 15), PUPR0, 9 }, /* A9 */
- { RCAR_GP_PIN(0, 16), PUPR0, 10 }, /* A10 */
- { RCAR_GP_PIN(0, 17), PUPR0, 11 }, /* A11 */
- { RCAR_GP_PIN(0, 18), PUPR0, 12 }, /* A12 */
- { RCAR_GP_PIN(0, 19), PUPR0, 13 }, /* A13 */
- { RCAR_GP_PIN(0, 20), PUPR0, 14 }, /* A14 */
- { RCAR_GP_PIN(0, 21), PUPR0, 15 }, /* A15 */
- { RCAR_GP_PIN(0, 22), PUPR0, 16 }, /* A16 */
- { RCAR_GP_PIN(0, 23), PUPR0, 17 }, /* A17 */
- { RCAR_GP_PIN(0, 24), PUPR0, 18 }, /* A18 */
- { RCAR_GP_PIN(0, 25), PUPR0, 19 }, /* A19 */
- { RCAR_GP_PIN(0, 26), PUPR0, 20 }, /* A20 */
- { RCAR_GP_PIN(0, 27), PUPR0, 21 }, /* A21 */
- { RCAR_GP_PIN(0, 28), PUPR0, 22 }, /* A22 */
- { RCAR_GP_PIN(0, 29), PUPR0, 23 }, /* A23 */
- { RCAR_GP_PIN(0, 30), PUPR0, 24 }, /* A24 */
- { RCAR_GP_PIN(0, 31), PUPR0, 25 }, /* A25 */
- { RCAR_GP_PIN(1, 3), PUPR0, 26 }, /* /EX_CS0 */
- { RCAR_GP_PIN(1, 4), PUPR0, 27 }, /* /EX_CS1 */
- { RCAR_GP_PIN(1, 5), PUPR0, 28 }, /* /EX_CS2 */
- { RCAR_GP_PIN(1, 6), PUPR0, 29 }, /* /EX_CS3 */
- { RCAR_GP_PIN(1, 7), PUPR0, 30 }, /* /EX_CS4 */
- { RCAR_GP_PIN(1, 8), PUPR0, 31 }, /* /EX_CS5 */
-
- { RCAR_GP_PIN(0, 0), PUPR1, 0 }, /* /PRESETOUT */
- { RCAR_GP_PIN(0, 5), PUPR1, 1 }, /* /BS */
- { RCAR_GP_PIN(1, 0), PUPR1, 2 }, /* RD//WR */
- { RCAR_GP_PIN(1, 1), PUPR1, 3 }, /* /WE0 */
- { RCAR_GP_PIN(1, 2), PUPR1, 4 }, /* /WE1 */
- { RCAR_GP_PIN(1, 11), PUPR1, 5 }, /* EX_WAIT0 */
- { RCAR_GP_PIN(1, 9), PUPR1, 6 }, /* DREQ0 */
- { RCAR_GP_PIN(1, 10), PUPR1, 7 }, /* DACK0 */
- { RCAR_GP_PIN(1, 12), PUPR1, 8 }, /* IRQ0 */
- { RCAR_GP_PIN(1, 13), PUPR1, 9 }, /* IRQ1 */
-
- { RCAR_GP_PIN(1, 22), PUPR2, 0 }, /* DU0_DR0 */
- { RCAR_GP_PIN(1, 23), PUPR2, 1 }, /* DU0_DR1 */
- { RCAR_GP_PIN(1, 24), PUPR2, 2 }, /* DU0_DR2 */
- { RCAR_GP_PIN(1, 25), PUPR2, 3 }, /* DU0_DR3 */
- { RCAR_GP_PIN(1, 26), PUPR2, 4 }, /* DU0_DR4 */
- { RCAR_GP_PIN(1, 27), PUPR2, 5 }, /* DU0_DR5 */
- { RCAR_GP_PIN(1, 28), PUPR2, 6 }, /* DU0_DR6 */
- { RCAR_GP_PIN(1, 29), PUPR2, 7 }, /* DU0_DR7 */
- { RCAR_GP_PIN(1, 30), PUPR2, 8 }, /* DU0_DG0 */
- { RCAR_GP_PIN(1, 31), PUPR2, 9 }, /* DU0_DG1 */
- { RCAR_GP_PIN(2, 0), PUPR2, 10 }, /* DU0_DG2 */
- { RCAR_GP_PIN(2, 1), PUPR2, 11 }, /* DU0_DG3 */
- { RCAR_GP_PIN(2, 2), PUPR2, 12 }, /* DU0_DG4 */
- { RCAR_GP_PIN(2, 3), PUPR2, 13 }, /* DU0_DG5 */
- { RCAR_GP_PIN(2, 4), PUPR2, 14 }, /* DU0_DG6 */
- { RCAR_GP_PIN(2, 5), PUPR2, 15 }, /* DU0_DG7 */
- { RCAR_GP_PIN(2, 6), PUPR2, 16 }, /* DU0_DB0 */
- { RCAR_GP_PIN(2, 7), PUPR2, 17 }, /* DU0_DB1 */
- { RCAR_GP_PIN(2, 8), PUPR2, 18 }, /* DU0_DB2 */
- { RCAR_GP_PIN(2, 9), PUPR2, 19 }, /* DU0_DB3 */
- { RCAR_GP_PIN(2, 10), PUPR2, 20 }, /* DU0_DB4 */
- { RCAR_GP_PIN(2, 11), PUPR2, 21 }, /* DU0_DB5 */
- { RCAR_GP_PIN(2, 12), PUPR2, 22 }, /* DU0_DB6 */
- { RCAR_GP_PIN(2, 13), PUPR2, 23 }, /* DU0_DB7 */
- { RCAR_GP_PIN(2, 14), PUPR2, 24 }, /* DU0_DOTCLKIN */
- { RCAR_GP_PIN(2, 15), PUPR2, 25 }, /* DU0_DOTCLKOUT0 */
- { RCAR_GP_PIN(2, 17), PUPR2, 26 }, /* DU0_HSYNC */
- { RCAR_GP_PIN(2, 18), PUPR2, 27 }, /* DU0_VSYNC */
- { RCAR_GP_PIN(2, 19), PUPR2, 28 }, /* DU0_EXODDF */
- { RCAR_GP_PIN(2, 20), PUPR2, 29 }, /* DU0_DISP */
- { RCAR_GP_PIN(2, 21), PUPR2, 30 }, /* DU0_CDE */
- { RCAR_GP_PIN(2, 16), PUPR2, 31 }, /* DU0_DOTCLKOUT1 */
-
- { RCAR_GP_PIN(3, 24), PUPR3, 0 }, /* VI0_CLK */
- { RCAR_GP_PIN(3, 25), PUPR3, 1 }, /* VI0_CLKENB */
- { RCAR_GP_PIN(3, 26), PUPR3, 2 }, /* VI0_FIELD */
- { RCAR_GP_PIN(3, 27), PUPR3, 3 }, /* /VI0_HSYNC */
- { RCAR_GP_PIN(3, 28), PUPR3, 4 }, /* /VI0_VSYNC */
- { RCAR_GP_PIN(3, 29), PUPR3, 5 }, /* VI0_DATA0 */
- { RCAR_GP_PIN(3, 30), PUPR3, 6 }, /* VI0_DATA1 */
- { RCAR_GP_PIN(3, 31), PUPR3, 7 }, /* VI0_DATA2 */
- { RCAR_GP_PIN(4, 0), PUPR3, 8 }, /* VI0_DATA3 */
- { RCAR_GP_PIN(4, 1), PUPR3, 9 }, /* VI0_DATA4 */
- { RCAR_GP_PIN(4, 2), PUPR3, 10 }, /* VI0_DATA5 */
- { RCAR_GP_PIN(4, 3), PUPR3, 11 }, /* VI0_DATA6 */
- { RCAR_GP_PIN(4, 4), PUPR3, 12 }, /* VI0_DATA7 */
- { RCAR_GP_PIN(4, 5), PUPR3, 13 }, /* VI0_G2 */
- { RCAR_GP_PIN(4, 6), PUPR3, 14 }, /* VI0_G3 */
- { RCAR_GP_PIN(4, 7), PUPR3, 15 }, /* VI0_G4 */
- { RCAR_GP_PIN(4, 8), PUPR3, 16 }, /* VI0_G5 */
- { RCAR_GP_PIN(4, 21), PUPR3, 17 }, /* VI1_DATA12 */
- { RCAR_GP_PIN(4, 22), PUPR3, 18 }, /* VI1_DATA13 */
- { RCAR_GP_PIN(4, 23), PUPR3, 19 }, /* VI1_DATA14 */
- { RCAR_GP_PIN(4, 24), PUPR3, 20 }, /* VI1_DATA15 */
- { RCAR_GP_PIN(4, 9), PUPR3, 21 }, /* ETH_REF_CLK */
- { RCAR_GP_PIN(4, 10), PUPR3, 22 }, /* ETH_TXD0 */
- { RCAR_GP_PIN(4, 11), PUPR3, 23 }, /* ETH_TXD1 */
- { RCAR_GP_PIN(4, 12), PUPR3, 24 }, /* ETH_CRS_DV */
- { RCAR_GP_PIN(4, 13), PUPR3, 25 }, /* ETH_TX_EN */
- { RCAR_GP_PIN(4, 14), PUPR3, 26 }, /* ETH_RX_ER */
- { RCAR_GP_PIN(4, 15), PUPR3, 27 }, /* ETH_RXD0 */
- { RCAR_GP_PIN(4, 16), PUPR3, 28 }, /* ETH_RXD1 */
- { RCAR_GP_PIN(4, 17), PUPR3, 29 }, /* ETH_MDC */
- { RCAR_GP_PIN(4, 18), PUPR3, 30 }, /* ETH_MDIO */
- { RCAR_GP_PIN(4, 19), PUPR3, 31 }, /* ETH_LINK */
-
- { RCAR_GP_PIN(3, 6), PUPR4, 0 }, /* SSI_SCK012 */
- { RCAR_GP_PIN(3, 7), PUPR4, 1 }, /* SSI_WS012 */
- { RCAR_GP_PIN(3, 10), PUPR4, 2 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(3, 9), PUPR4, 3 }, /* SSI_SDATA1 */
- { RCAR_GP_PIN(3, 8), PUPR4, 4 }, /* SSI_SDATA2 */
- { RCAR_GP_PIN(3, 2), PUPR4, 5 }, /* SSI_SCK34 */
- { RCAR_GP_PIN(3, 3), PUPR4, 6 }, /* SSI_WS34 */
- { RCAR_GP_PIN(3, 5), PUPR4, 7 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(3, 4), PUPR4, 8 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(2, 31), PUPR4, 9 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(3, 0), PUPR4, 10 }, /* SSI_WS5 */
- { RCAR_GP_PIN(3, 1), PUPR4, 11 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(2, 28), PUPR4, 12 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(2, 29), PUPR4, 13 }, /* SSI_WS6 */
- { RCAR_GP_PIN(2, 30), PUPR4, 14 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(2, 24), PUPR4, 15 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(2, 25), PUPR4, 16 }, /* SSI_WS78 */
- { RCAR_GP_PIN(2, 27), PUPR4, 17 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(2, 26), PUPR4, 18 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(3, 23), PUPR4, 19 }, /* TCLK0 */
- { RCAR_GP_PIN(3, 11), PUPR4, 20 }, /* SD0_CLK */
- { RCAR_GP_PIN(3, 12), PUPR4, 21 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 13), PUPR4, 22 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 14), PUPR4, 23 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 15), PUPR4, 24 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 16), PUPR4, 25 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 17), PUPR4, 26 }, /* SD0_CD */
- { RCAR_GP_PIN(3, 18), PUPR4, 27 }, /* SD0_WP */
- { RCAR_GP_PIN(2, 22), PUPR4, 28 }, /* AUDIO_CLKA */
- { RCAR_GP_PIN(2, 23), PUPR4, 29 }, /* AUDIO_CLKB */
- { RCAR_GP_PIN(1, 14), PUPR4, 30 }, /* IRQ2 */
- { RCAR_GP_PIN(1, 15), PUPR4, 31 }, /* IRQ3 */
-
- { RCAR_GP_PIN(0, 1), PUPR5, 0 }, /* PENC0 */
- { RCAR_GP_PIN(0, 2), PUPR5, 1 }, /* PENC1 */
- { RCAR_GP_PIN(0, 3), PUPR5, 2 }, /* USB_OVC0 */
- { RCAR_GP_PIN(0, 4), PUPR5, 3 }, /* USB_OVC1 */
- { RCAR_GP_PIN(1, 16), PUPR5, 4 }, /* SCIF_CLK */
- { RCAR_GP_PIN(1, 17), PUPR5, 5 }, /* TX0 */
- { RCAR_GP_PIN(1, 18), PUPR5, 6 }, /* RX0 */
- { RCAR_GP_PIN(1, 19), PUPR5, 7 }, /* SCK0 */
- { RCAR_GP_PIN(1, 20), PUPR5, 8 }, /* /CTS0 */
- { RCAR_GP_PIN(1, 21), PUPR5, 9 }, /* /RTS0 */
- { RCAR_GP_PIN(3, 19), PUPR5, 10 }, /* HSPI_CLK0 */
- { RCAR_GP_PIN(3, 20), PUPR5, 11 }, /* /HSPI_CS0 */
- { RCAR_GP_PIN(3, 21), PUPR5, 12 }, /* HSPI_RX0 */
- { RCAR_GP_PIN(3, 22), PUPR5, 13 }, /* HSPI_TX0 */
- { RCAR_GP_PIN(4, 20), PUPR5, 14 }, /* ETH_MAGIC */
- { RCAR_GP_PIN(4, 25), PUPR5, 15 }, /* AVS1 */
- { RCAR_GP_PIN(4, 26), PUPR5, 16 }, /* AVS2 */
+#define PIN_NONE U16_MAX
+
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUPR0", 0x100, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 6), /* A0 */
+ [ 1] = RCAR_GP_PIN(0, 7), /* A1 */
+ [ 2] = RCAR_GP_PIN(0, 8), /* A2 */
+ [ 3] = RCAR_GP_PIN(0, 9), /* A3 */
+ [ 4] = RCAR_GP_PIN(0, 10), /* A4 */
+ [ 5] = RCAR_GP_PIN(0, 11), /* A5 */
+ [ 6] = RCAR_GP_PIN(0, 12), /* A6 */
+ [ 7] = RCAR_GP_PIN(0, 13), /* A7 */
+ [ 8] = RCAR_GP_PIN(0, 14), /* A8 */
+ [ 9] = RCAR_GP_PIN(0, 15), /* A9 */
+ [10] = RCAR_GP_PIN(0, 16), /* A10 */
+ [11] = RCAR_GP_PIN(0, 17), /* A11 */
+ [12] = RCAR_GP_PIN(0, 18), /* A12 */
+ [13] = RCAR_GP_PIN(0, 19), /* A13 */
+ [14] = RCAR_GP_PIN(0, 20), /* A14 */
+ [15] = RCAR_GP_PIN(0, 21), /* A15 */
+ [16] = RCAR_GP_PIN(0, 22), /* A16 */
+ [17] = RCAR_GP_PIN(0, 23), /* A17 */
+ [18] = RCAR_GP_PIN(0, 24), /* A18 */
+ [19] = RCAR_GP_PIN(0, 25), /* A19 */
+ [20] = RCAR_GP_PIN(0, 26), /* A20 */
+ [21] = RCAR_GP_PIN(0, 27), /* A21 */
+ [22] = RCAR_GP_PIN(0, 28), /* A22 */
+ [23] = RCAR_GP_PIN(0, 29), /* A23 */
+ [24] = RCAR_GP_PIN(0, 30), /* A24 */
+ [25] = RCAR_GP_PIN(0, 31), /* A25 */
+ [26] = RCAR_GP_PIN(1, 3), /* /EX_CS0 */
+ [27] = RCAR_GP_PIN(1, 4), /* /EX_CS1 */
+ [28] = RCAR_GP_PIN(1, 5), /* /EX_CS2 */
+ [29] = RCAR_GP_PIN(1, 6), /* /EX_CS3 */
+ [30] = RCAR_GP_PIN(1, 7), /* /EX_CS4 */
+ [31] = RCAR_GP_PIN(1, 8), /* /EX_CS5 */
+ } },
+ { PINMUX_BIAS_REG("PUPR1", 0x104, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 0), /* /PRESETOUT */
+ [ 1] = RCAR_GP_PIN(0, 5), /* /BS */
+ [ 2] = RCAR_GP_PIN(1, 0), /* RD//WR */
+ [ 3] = RCAR_GP_PIN(1, 1), /* /WE0 */
+ [ 4] = RCAR_GP_PIN(1, 2), /* /WE1 */
+ [ 5] = RCAR_GP_PIN(1, 11), /* EX_WAIT0 */
+ [ 6] = RCAR_GP_PIN(1, 9), /* DREQ0 */
+ [ 7] = RCAR_GP_PIN(1, 10), /* DACK0 */
+ [ 8] = RCAR_GP_PIN(1, 12), /* IRQ0 */
+ [ 9] = RCAR_GP_PIN(1, 13), /* IRQ1 */
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { PINMUX_BIAS_REG("PUPR2", 0x108, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(1, 22), /* DU0_DR0 */
+ [ 1] = RCAR_GP_PIN(1, 23), /* DU0_DR1 */
+ [ 2] = RCAR_GP_PIN(1, 24), /* DU0_DR2 */
+ [ 3] = RCAR_GP_PIN(1, 25), /* DU0_DR3 */
+ [ 4] = RCAR_GP_PIN(1, 26), /* DU0_DR4 */
+ [ 5] = RCAR_GP_PIN(1, 27), /* DU0_DR5 */
+ [ 6] = RCAR_GP_PIN(1, 28), /* DU0_DR6 */
+ [ 7] = RCAR_GP_PIN(1, 29), /* DU0_DR7 */
+ [ 8] = RCAR_GP_PIN(1, 30), /* DU0_DG0 */
+ [ 9] = RCAR_GP_PIN(1, 31), /* DU0_DG1 */
+ [10] = RCAR_GP_PIN(2, 0), /* DU0_DG2 */
+ [11] = RCAR_GP_PIN(2, 1), /* DU0_DG3 */
+ [12] = RCAR_GP_PIN(2, 2), /* DU0_DG4 */
+ [13] = RCAR_GP_PIN(2, 3), /* DU0_DG5 */
+ [14] = RCAR_GP_PIN(2, 4), /* DU0_DG6 */
+ [15] = RCAR_GP_PIN(2, 5), /* DU0_DG7 */
+ [16] = RCAR_GP_PIN(2, 6), /* DU0_DB0 */
+ [17] = RCAR_GP_PIN(2, 7), /* DU0_DB1 */
+ [18] = RCAR_GP_PIN(2, 8), /* DU0_DB2 */
+ [19] = RCAR_GP_PIN(2, 9), /* DU0_DB3 */
+ [20] = RCAR_GP_PIN(2, 10), /* DU0_DB4 */
+ [21] = RCAR_GP_PIN(2, 11), /* DU0_DB5 */
+ [22] = RCAR_GP_PIN(2, 12), /* DU0_DB6 */
+ [23] = RCAR_GP_PIN(2, 13), /* DU0_DB7 */
+ [24] = RCAR_GP_PIN(2, 14), /* DU0_DOTCLKIN */
+ [25] = RCAR_GP_PIN(2, 15), /* DU0_DOTCLKOUT0 */
+ [26] = RCAR_GP_PIN(2, 17), /* DU0_HSYNC */
+ [27] = RCAR_GP_PIN(2, 18), /* DU0_VSYNC */
+ [28] = RCAR_GP_PIN(2, 19), /* DU0_EXODDF */
+ [29] = RCAR_GP_PIN(2, 20), /* DU0_DISP */
+ [30] = RCAR_GP_PIN(2, 21), /* DU0_CDE */
+ [31] = RCAR_GP_PIN(2, 16), /* DU0_DOTCLKOUT1 */
+ } },
+ { PINMUX_BIAS_REG("PUPR3", 0x10c, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(3, 24), /* VI0_CLK */
+ [ 1] = RCAR_GP_PIN(3, 25), /* VI0_CLKENB */
+ [ 2] = RCAR_GP_PIN(3, 26), /* VI0_FIELD */
+ [ 3] = RCAR_GP_PIN(3, 27), /* /VI0_HSYNC */
+ [ 4] = RCAR_GP_PIN(3, 28), /* /VI0_VSYNC */
+ [ 5] = RCAR_GP_PIN(3, 29), /* VI0_DATA0 */
+ [ 6] = RCAR_GP_PIN(3, 30), /* VI0_DATA1 */
+ [ 7] = RCAR_GP_PIN(3, 31), /* VI0_DATA2 */
+ [ 8] = RCAR_GP_PIN(4, 0), /* VI0_DATA3 */
+ [ 9] = RCAR_GP_PIN(4, 1), /* VI0_DATA4 */
+ [10] = RCAR_GP_PIN(4, 2), /* VI0_DATA5 */
+ [11] = RCAR_GP_PIN(4, 3), /* VI0_DATA6 */
+ [12] = RCAR_GP_PIN(4, 4), /* VI0_DATA7 */
+ [13] = RCAR_GP_PIN(4, 5), /* VI0_G2 */
+ [14] = RCAR_GP_PIN(4, 6), /* VI0_G3 */
+ [15] = RCAR_GP_PIN(4, 7), /* VI0_G4 */
+ [16] = RCAR_GP_PIN(4, 8), /* VI0_G5 */
+ [17] = RCAR_GP_PIN(4, 21), /* VI1_DATA12 */
+ [18] = RCAR_GP_PIN(4, 22), /* VI1_DATA13 */
+ [19] = RCAR_GP_PIN(4, 23), /* VI1_DATA14 */
+ [20] = RCAR_GP_PIN(4, 24), /* VI1_DATA15 */
+ [21] = RCAR_GP_PIN(4, 9), /* ETH_REF_CLK */
+ [22] = RCAR_GP_PIN(4, 10), /* ETH_TXD0 */
+ [23] = RCAR_GP_PIN(4, 11), /* ETH_TXD1 */
+ [24] = RCAR_GP_PIN(4, 12), /* ETH_CRS_DV */
+ [25] = RCAR_GP_PIN(4, 13), /* ETH_TX_EN */
+ [26] = RCAR_GP_PIN(4, 14), /* ETH_RX_ER */
+ [27] = RCAR_GP_PIN(4, 15), /* ETH_RXD0 */
+ [28] = RCAR_GP_PIN(4, 16), /* ETH_RXD1 */
+ [29] = RCAR_GP_PIN(4, 17), /* ETH_MDC */
+ [30] = RCAR_GP_PIN(4, 18), /* ETH_MDIO */
+ [31] = RCAR_GP_PIN(4, 19), /* ETH_LINK */
+ } },
+ { PINMUX_BIAS_REG("PUPR4", 0x110, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(3, 6), /* SSI_SCK012 */
+ [ 1] = RCAR_GP_PIN(3, 7), /* SSI_WS012 */
+ [ 2] = RCAR_GP_PIN(3, 10), /* SSI_SDATA0 */
+ [ 3] = RCAR_GP_PIN(3, 9), /* SSI_SDATA1 */
+ [ 4] = RCAR_GP_PIN(3, 8), /* SSI_SDATA2 */
+ [ 5] = RCAR_GP_PIN(3, 2), /* SSI_SCK34 */
+ [ 6] = RCAR_GP_PIN(3, 3), /* SSI_WS34 */
+ [ 7] = RCAR_GP_PIN(3, 5), /* SSI_SDATA3 */
+ [ 8] = RCAR_GP_PIN(3, 4), /* SSI_SDATA4 */
+ [ 9] = RCAR_GP_PIN(2, 31), /* SSI_SCK5 */
+ [10] = RCAR_GP_PIN(3, 0), /* SSI_WS5 */
+ [11] = RCAR_GP_PIN(3, 1), /* SSI_SDATA5 */
+ [12] = RCAR_GP_PIN(2, 28), /* SSI_SCK6 */
+ [13] = RCAR_GP_PIN(2, 29), /* SSI_WS6 */
+ [14] = RCAR_GP_PIN(2, 30), /* SSI_SDATA6 */
+ [15] = RCAR_GP_PIN(2, 24), /* SSI_SCK78 */
+ [16] = RCAR_GP_PIN(2, 25), /* SSI_WS78 */
+ [17] = RCAR_GP_PIN(2, 27), /* SSI_SDATA7 */
+ [18] = RCAR_GP_PIN(2, 26), /* SSI_SDATA8 */
+ [19] = RCAR_GP_PIN(3, 23), /* TCLK0 */
+ [20] = RCAR_GP_PIN(3, 11), /* SD0_CLK */
+ [21] = RCAR_GP_PIN(3, 12), /* SD0_CMD */
+ [22] = RCAR_GP_PIN(3, 13), /* SD0_DAT0 */
+ [23] = RCAR_GP_PIN(3, 14), /* SD0_DAT1 */
+ [24] = RCAR_GP_PIN(3, 15), /* SD0_DAT2 */
+ [25] = RCAR_GP_PIN(3, 16), /* SD0_DAT3 */
+ [26] = RCAR_GP_PIN(3, 17), /* SD0_CD */
+ [27] = RCAR_GP_PIN(3, 18), /* SD0_WP */
+ [28] = RCAR_GP_PIN(2, 22), /* AUDIO_CLKA */
+ [29] = RCAR_GP_PIN(2, 23), /* AUDIO_CLKB */
+ [30] = RCAR_GP_PIN(1, 14), /* IRQ2 */
+ [31] = RCAR_GP_PIN(1, 15), /* IRQ3 */
+ } },
+ { PINMUX_BIAS_REG("PUPR5", 0x114, "N/A", 0) {
+ [ 0] = RCAR_GP_PIN(0, 1), /* PENC0 */
+ [ 1] = RCAR_GP_PIN(0, 2), /* PENC1 */
+ [ 2] = RCAR_GP_PIN(0, 3), /* USB_OVC0 */
+ [ 3] = RCAR_GP_PIN(0, 4), /* USB_OVC1 */
+ [ 4] = RCAR_GP_PIN(1, 16), /* SCIF_CLK */
+ [ 5] = RCAR_GP_PIN(1, 17), /* TX0 */
+ [ 6] = RCAR_GP_PIN(1, 18), /* RX0 */
+ [ 7] = RCAR_GP_PIN(1, 19), /* SCK0 */
+ [ 8] = RCAR_GP_PIN(1, 20), /* /CTS0 */
+ [ 9] = RCAR_GP_PIN(1, 21), /* /RTS0 */
+ [10] = RCAR_GP_PIN(3, 19), /* HSPI_CLK0 */
+ [11] = RCAR_GP_PIN(3, 20), /* /HSPI_CS0 */
+ [12] = RCAR_GP_PIN(3, 21), /* HSPI_RX0 */
+ [13] = RCAR_GP_PIN(3, 22), /* HSPI_TX0 */
+ [14] = RCAR_GP_PIN(4, 20), /* ETH_MAGIC */
+ [15] = RCAR_GP_PIN(4, 25), /* AVS1 */
+ [16] = RCAR_GP_PIN(4, 26), /* AVS2 */
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
void __iomem *addr;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- addr = pfc->windows->virt + info->reg;
+ addr = pfc->windows->virt + reg->puen;
- if (ioread32(addr) & BIT(info->bit))
+ if (ioread32(addr) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_DISABLE;
@@ -3103,21 +3144,20 @@ static unsigned int r8a7778_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7778_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
void __iomem *addr;
+ unsigned int bit;
u32 value;
- u32 bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- addr = pfc->windows->virt + info->reg;
- bit = BIT(info->bit);
+ addr = pfc->windows->virt + reg->puen;
- value = ioread32(addr) & ~bit;
+ value = ioread32(addr) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- value |= bit;
+ value |= BIT(bit);
iowrite32(value, addr);
}
@@ -3144,6 +3184,7 @@ const struct sh_pfc_soc_info r8a7778_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .bias_regs = pinmux_bias_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index a0ed220071f5..333a3470e842 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -5097,6 +5097,7 @@ static const struct sh_pfc_soc_operations r8a7794_pinmux_ops = {
#ifdef CONFIG_PINCTRL_PFC_R8A7745
const struct sh_pfc_soc_info r8a7745_pinmux_info = {
.name = "r8a77450_pfc",
+ .ops = &r8a7794_pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 95fd0994893a..1d4d84f34d60 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -1443,12 +1443,13 @@ static const u16 pinmux_data[] = {
};
/*
- * R8A7795 has 8 banks with 32 PGIOS in each => 256 GPIOs.
+ * R8A7795 has 8 banks with 32 GPIOs in each => 256 GPIOs.
* Physical layout rows: A - AW, cols: 1 - 39.
*/
#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
@@ -3774,6 +3775,23 @@ static const unsigned int usb2_mux[] = {
USB2_PWEN_MARK, USB2_OVC_MARK,
};
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int usb30_mux[] = {
+ USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+/* - USB31 ------------------------------------------------------------------ */
+static const unsigned int usb31_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int usb31_mux[] = {
+ USB31_PWEN_MARK, USB31_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
SH_PFC_PIN_GROUP(audio_clk_a_b),
@@ -4080,6 +4098,8 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
+ SH_PFC_PIN_GROUP(usb30),
+ SH_PFC_PIN_GROUP(usb31),
};
static const char * const audio_clk_groups[] = {
@@ -4537,6 +4557,14 @@ static const char * const usb2_groups[] = {
"usb2",
};
+static const char * const usb30_groups[] = {
+ "usb30",
+};
+
+static const char * const usb31_groups[] = {
+ "usb31",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
@@ -4588,6 +4616,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb2),
+ SH_PFC_FUNCTION(usb30),
+ SH_PFC_FUNCTION(usb31),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -5393,12 +5423,21 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+enum ioctrl_regs {
+ POCCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POCCTRL] = { 0xe6060380, },
+ { /* sentinel */ },
+};
+
static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
u32 *pocctrl)
{
int bit = -EINVAL;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -5409,242 +5448,261 @@ static int r8a7795es1_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin,
return bit;
}
-#define PUEN 0xe6060400
-#define PUD 0xe6060440
-
-#define PU0 0x00
-#define PU1 0x04
-#define PU2 0x08
-#define PU3 0x0c
-#define PU4 0x10
-#define PU5 0x14
-#define PU6 0x18
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
- { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
- { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
- { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
- { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
- { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
- { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
- { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
- { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
- { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
- { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
- { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
- { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
- { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
- { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
- { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
- { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
- { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
- { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
- { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
- { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
- { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
- { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
- { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
- { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
- { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
- { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
- { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
- { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
- { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
- { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N_A26 */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
- { PIN_NUMBER('F', 1), PU2, 0 }, /* CLKOUT */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
- { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
- /* bit 8 n/a */
- { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
- { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
- { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
- { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
- { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
- { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST# */
- { PIN_A_NUMBER('R', 8), PU3, 1 }, /* DU_DOTCLKIN3 */
- { PIN_A_NUMBER('R', 7), PU3, 0 }, /* DU_DOTCLKIN2 */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB31_OVC */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB31_PWEN */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = PIN_NUMBER('W', 3), /* QSPI0_SPCLK */
+ [ 1] = PIN_A_NUMBER('C', 5), /* QSPI0_MOSI_IO0 */
+ [ 2] = PIN_A_NUMBER('B', 4), /* QSPI0_MISO_IO1 */
+ [ 3] = PIN_NUMBER('Y', 6), /* QSPI0_IO2 */
+ [ 4] = PIN_A_NUMBER('B', 6), /* QSPI0_IO3 */
+ [ 5] = PIN_NUMBER('Y', 3), /* QSPI0_SSL */
+ [ 6] = PIN_NUMBER('V', 3), /* QSPI1_SPCLK */
+ [ 7] = PIN_A_NUMBER('C', 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = PIN_A_NUMBER('E', 5), /* QSPI1_MISO_IO1 */
+ [ 9] = PIN_A_NUMBER('E', 4), /* QSPI1_IO2 */
+ [10] = PIN_A_NUMBER('C', 3), /* QSPI1_IO3 */
+ [11] = PIN_NUMBER('V', 5), /* QSPI1_SSL */
+ [12] = PIN_NUMBER('Y', 7), /* RPC_INT# */
+ [13] = PIN_NUMBER('V', 6), /* RPC_WP# */
+ [14] = PIN_NUMBER('V', 7), /* RPC_RESET# */
+ [15] = PIN_NUMBER('A', 16), /* AVB_RX_CTL */
+ [16] = PIN_NUMBER('B', 19), /* AVB_RXC */
+ [17] = PIN_NUMBER('A', 13), /* AVB_RD0 */
+ [18] = PIN_NUMBER('B', 13), /* AVB_RD1 */
+ [19] = PIN_NUMBER('A', 14), /* AVB_RD2 */
+ [20] = PIN_NUMBER('B', 14), /* AVB_RD3 */
+ [21] = PIN_NUMBER('A', 8), /* AVB_TX_CTL */
+ [22] = PIN_NUMBER('A', 19), /* AVB_TXC */
+ [23] = PIN_NUMBER('A', 18), /* AVB_TD0 */
+ [24] = PIN_NUMBER('B', 18), /* AVB_TD1 */
+ [25] = PIN_NUMBER('A', 17), /* AVB_TD2 */
+ [26] = PIN_NUMBER('B', 17), /* AVB_TD3 */
+ [27] = PIN_NUMBER('A', 12), /* AVB_TXCREFCLK */
+ [28] = PIN_NUMBER('A', 9), /* AVB_MDIO */
+ [29] = RCAR_GP_PIN(2, 9), /* AVB_MDC */
+ [30] = RCAR_GP_PIN(2, 10), /* AVB_MAGIC */
+ [31] = RCAR_GP_PIN(2, 11), /* AVB_PHY_INT */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 12), /* AVB_LINK */
+ [ 1] = RCAR_GP_PIN(2, 13), /* AVB_AVTP_MATCH_A */
+ [ 2] = RCAR_GP_PIN(2, 14), /* AVB_AVTP_CAPTURE_A */
+ [ 3] = RCAR_GP_PIN(2, 0), /* IRQ0 */
+ [ 4] = RCAR_GP_PIN(2, 1), /* IRQ1 */
+ [ 5] = RCAR_GP_PIN(2, 2), /* IRQ2 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* IRQ3 */
+ [ 7] = RCAR_GP_PIN(2, 4), /* IRQ4 */
+ [ 8] = RCAR_GP_PIN(2, 5), /* IRQ5 */
+ [ 9] = RCAR_GP_PIN(2, 6), /* PWM0 */
+ [10] = RCAR_GP_PIN(2, 7), /* PWM1_A */
+ [11] = RCAR_GP_PIN(2, 8), /* PWM2_A */
+ [12] = RCAR_GP_PIN(1, 0), /* A0 */
+ [13] = RCAR_GP_PIN(1, 1), /* A1 */
+ [14] = RCAR_GP_PIN(1, 2), /* A2 */
+ [15] = RCAR_GP_PIN(1, 3), /* A3 */
+ [16] = RCAR_GP_PIN(1, 4), /* A4 */
+ [17] = RCAR_GP_PIN(1, 5), /* A5 */
+ [18] = RCAR_GP_PIN(1, 6), /* A6 */
+ [19] = RCAR_GP_PIN(1, 7), /* A7 */
+ [20] = RCAR_GP_PIN(1, 8), /* A8 */
+ [21] = RCAR_GP_PIN(1, 9), /* A9 */
+ [22] = RCAR_GP_PIN(1, 10), /* A10 */
+ [23] = RCAR_GP_PIN(1, 11), /* A11 */
+ [24] = RCAR_GP_PIN(1, 12), /* A12 */
+ [25] = RCAR_GP_PIN(1, 13), /* A13 */
+ [26] = RCAR_GP_PIN(1, 14), /* A14 */
+ [27] = RCAR_GP_PIN(1, 15), /* A15 */
+ [28] = RCAR_GP_PIN(1, 16), /* A16 */
+ [29] = RCAR_GP_PIN(1, 17), /* A17 */
+ [30] = RCAR_GP_PIN(1, 18), /* A18 */
+ [31] = RCAR_GP_PIN(1, 19), /* A19 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = PIN_NUMBER('F', 1), /* CLKOUT */
+ [ 1] = RCAR_GP_PIN(1, 20), /* CS0_N */
+ [ 2] = RCAR_GP_PIN(1, 21), /* CS1_N_A26 */
+ [ 3] = RCAR_GP_PIN(1, 22), /* BS_N */
+ [ 4] = RCAR_GP_PIN(1, 23), /* RD_N */
+ [ 5] = RCAR_GP_PIN(1, 24), /* RD_WR_N */
+ [ 6] = RCAR_GP_PIN(1, 25), /* WE0_N */
+ [ 7] = RCAR_GP_PIN(1, 26), /* WE1_N */
+ [ 8] = RCAR_GP_PIN(1, 27), /* EX_WAIT0_A */
+ [ 9] = PIN_NUMBER('C', 1), /* PRESETOUT# */
+ [10] = RCAR_GP_PIN(0, 0), /* D0 */
+ [11] = RCAR_GP_PIN(0, 1), /* D1 */
+ [12] = RCAR_GP_PIN(0, 2), /* D2 */
+ [13] = RCAR_GP_PIN(0, 3), /* D3 */
+ [14] = RCAR_GP_PIN(0, 4), /* D4 */
+ [15] = RCAR_GP_PIN(0, 5), /* D5 */
+ [16] = RCAR_GP_PIN(0, 6), /* D6 */
+ [17] = RCAR_GP_PIN(0, 7), /* D7 */
+ [18] = RCAR_GP_PIN(0, 8), /* D8 */
+ [19] = RCAR_GP_PIN(0, 9), /* D9 */
+ [20] = RCAR_GP_PIN(0, 10), /* D10 */
+ [21] = RCAR_GP_PIN(0, 11), /* D11 */
+ [22] = RCAR_GP_PIN(0, 12), /* D12 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 14), /* D14 */
+ [25] = RCAR_GP_PIN(0, 15), /* D15 */
+ [26] = RCAR_GP_PIN(7, 0), /* AVS1 */
+ [27] = RCAR_GP_PIN(7, 1), /* AVS2 */
+ [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [29] = RCAR_GP_PIN(7, 3), /* HDMI1_CEC */
+ [30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
+ [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = PIN_A_NUMBER('R', 7), /* DU_DOTCLKIN2 */
+ [ 1] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN3 */
+ [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST# */
+ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
+ [ 4] = PIN_A_NUMBER('R', 26), /* TRST# */
+ [ 5] = PIN_A_NUMBER('T', 27), /* TCK */
+ [ 6] = PIN_A_NUMBER('R', 30), /* TMS */
+ [ 7] = PIN_A_NUMBER('R', 29), /* TDI */
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_A_NUMBER('T', 30), /* ASEBRK */
+ [10] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [11] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [12] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ [13] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [14] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [15] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [16] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [17] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [18] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [19] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [20] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [21] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [22] = RCAR_GP_PIN(4, 0), /* SD2_CLK */
+ [23] = RCAR_GP_PIN(4, 1), /* SD2_CMD */
+ [24] = RCAR_GP_PIN(4, 2), /* SD2_DAT0 */
+ [25] = RCAR_GP_PIN(4, 3), /* SD2_DAT1 */
+ [26] = RCAR_GP_PIN(4, 4), /* SD2_DAT2 */
+ [27] = RCAR_GP_PIN(4, 5), /* SD2_DAT3 */
+ [28] = RCAR_GP_PIN(4, 6), /* SD2_DS */
+ [29] = RCAR_GP_PIN(4, 7), /* SD3_CLK */
+ [30] = RCAR_GP_PIN(4, 8), /* SD3_CMD */
+ [31] = RCAR_GP_PIN(4, 9), /* SD3_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(4, 10), /* SD3_DAT1 */
+ [ 1] = RCAR_GP_PIN(4, 11), /* SD3_DAT2 */
+ [ 2] = RCAR_GP_PIN(4, 12), /* SD3_DAT3 */
+ [ 3] = RCAR_GP_PIN(4, 13), /* SD3_DAT4 */
+ [ 4] = RCAR_GP_PIN(4, 14), /* SD3_DAT5 */
+ [ 5] = RCAR_GP_PIN(4, 15), /* SD3_DAT6 */
+ [ 6] = RCAR_GP_PIN(4, 16), /* SD3_DAT7 */
+ [ 7] = RCAR_GP_PIN(4, 17), /* SD3_DS */
+ [ 8] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [ 9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [11] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [12] = RCAR_GP_PIN(5, 0), /* SCK0 */
+ [13] = RCAR_GP_PIN(5, 1), /* RX0 */
+ [14] = RCAR_GP_PIN(5, 2), /* TX0 */
+ [15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [17] = RCAR_GP_PIN(5, 5), /* RX1_A */
+ [18] = RCAR_GP_PIN(5, 6), /* TX1_A */
+ [19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [21] = RCAR_GP_PIN(5, 9), /* SCK2 */
+ [22] = RCAR_GP_PIN(5, 10), /* TX2_A */
+ [23] = RCAR_GP_PIN(5, 11), /* RX2_A */
+ [24] = RCAR_GP_PIN(5, 12), /* HSCK0 */
+ [25] = RCAR_GP_PIN(5, 13), /* HRX0 */
+ [26] = RCAR_GP_PIN(5, 14), /* HTX0 */
+ [27] = RCAR_GP_PIN(5, 15), /* HCTS0_N */
+ [28] = RCAR_GP_PIN(5, 16), /* HRTS0_N */
+ [29] = RCAR_GP_PIN(5, 17), /* MSIOF0_SCK */
+ [30] = RCAR_GP_PIN(5, 18), /* MSIOF0_SYNC */
+ [31] = RCAR_GP_PIN(5, 19), /* MSIOF0_SS1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [ 0] = RCAR_GP_PIN(5, 20), /* MSIOF0_TXD */
+ [ 1] = RCAR_GP_PIN(5, 21), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(5, 22), /* MSIOF0_RXD */
+ [ 3] = RCAR_GP_PIN(5, 23), /* MLB_CLK */
+ [ 4] = RCAR_GP_PIN(5, 24), /* MLB_SIG */
+ [ 5] = RCAR_GP_PIN(5, 25), /* MLB_DAT */
+ [ 6] = PIN_NUMBER('H', 37), /* MLB_REF */
+ [ 7] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [ 8] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [ 9] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [10] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1_A */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2_A */
+ [12] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [13] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [14] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [15] = RCAR_GP_PIN(6, 8), /* SSI_SCK4 */
+ [16] = RCAR_GP_PIN(6, 9), /* SSI_WS4 */
+ [17] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [18] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [19] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [20] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [21] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [22] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [23] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [24] = RCAR_GP_PIN(6, 17), /* SSI_SCK78 */
+ [25] = RCAR_GP_PIN(6, 18), /* SSI_WS78 */
+ [26] = RCAR_GP_PIN(6, 19), /* SSI_SDATA7 */
+ [27] = RCAR_GP_PIN(6, 20), /* SSI_SDATA8 */
+ [28] = RCAR_GP_PIN(6, 21), /* SSI_SDATA9_A */
+ [29] = RCAR_GP_PIN(6, 22), /* AUDIO_CLKA_A */
+ [30] = RCAR_GP_PIN(6, 23), /* AUDIO_CLKB_B */
+ [31] = RCAR_GP_PIN(6, 24), /* USB0_PWEN */
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+ [ 0] = RCAR_GP_PIN(6, 25), /* USB0_OVC */
+ [ 1] = RCAR_GP_PIN(6, 26), /* USB1_PWEN */
+ [ 2] = RCAR_GP_PIN(6, 27), /* USB1_OVC */
+ [ 3] = RCAR_GP_PIN(6, 28), /* USB30_PWEN */
+ [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
+ [ 5] = RCAR_GP_PIN(6, 30), /* USB31_PWEN */
+ [ 6] = RCAR_GP_PIN(6, 31), /* USB31_OVC */
+ [ 7] = PIN_NONE,
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
- u32 reg;
- u32 bit;
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg;
- bit = BIT(info->bit);
-
- if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -5653,28 +5711,24 @@ static unsigned int r8a7795es1_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7795es1_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
u32 enable, updown;
- u32 reg;
- u32 bit;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- reg = info->reg;
- bit = BIT(info->bit);
-
- enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= bit;
+ enable |= BIT(bit);
- updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= bit;
+ updown |= BIT(bit);
- sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
- sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
}
static const struct sh_pfc_soc_operations r8a7795es1_pinmux_ops = {
@@ -5699,6 +5753,8 @@ const struct sh_pfc_soc_info r8a7795es1_pinmux_info = {
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 8b35772cda98..d1cec6d12e81 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -1508,12 +1508,13 @@ static const u16 pinmux_data[] = {
};
/*
- * R8A7795 has 8 banks with 32 PGIOS in each => 256 GPIOs.
+ * R8A7795 has 8 banks with 32 GPIOs in each => 256 GPIOs.
* Physical layout rows: A - AW, cols: 1 - 39.
*/
#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
@@ -1572,6 +1573,127 @@ static const struct sh_pfc_pin pinmux_pins[] = {
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, CFG_FLAGS),
};
+/* - AUDIO CLOCK ------------------------------------------------------------ */
+static const unsigned int audio_clk_a_a_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(6, 22),
+};
+static const unsigned int audio_clk_a_a_mux[] = {
+ AUDIO_CLKA_A_MARK,
+};
+static const unsigned int audio_clk_a_b_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int audio_clk_a_b_mux[] = {
+ AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clk_a_c_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clk_a_c_mux[] = {
+ AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clk_b_a_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int audio_clk_b_a_mux[] = {
+ AUDIO_CLKB_A_MARK,
+};
+static const unsigned int audio_clk_b_b_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(6, 23),
+};
+static const unsigned int audio_clk_b_b_mux[] = {
+ AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clk_c_a_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clk_c_a_mux[] = {
+ AUDIO_CLKC_A_MARK,
+};
+static const unsigned int audio_clk_c_b_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clk_c_b_mux[] = {
+ AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkout_a_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int audio_clkout_a_mux[] = {
+ AUDIO_CLKOUT_A_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+ AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+ AUDIO_CLKOUT_C_MARK,
+};
+static const unsigned int audio_clkout_d_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkout_d_mux[] = {
+ AUDIO_CLKOUT_D_MARK,
+};
+static const unsigned int audio_clkout1_a_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int audio_clkout1_a_mux[] = {
+ AUDIO_CLKOUT1_A_MARK,
+};
+static const unsigned int audio_clkout1_b_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int audio_clkout1_b_mux[] = {
+ AUDIO_CLKOUT1_B_MARK,
+};
+static const unsigned int audio_clkout2_a_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int audio_clkout2_a_mux[] = {
+ AUDIO_CLKOUT2_A_MARK,
+};
+static const unsigned int audio_clkout2_b_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int audio_clkout2_b_mux[] = {
+ AUDIO_CLKOUT2_B_MARK,
+};
+static const unsigned int audio_clkout3_a_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clkout3_a_mux[] = {
+ AUDIO_CLKOUT3_A_MARK,
+};
+static const unsigned int audio_clkout3_b_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int audio_clkout3_b_mux[] = {
+ AUDIO_CLKOUT3_B_MARK,
+};
+
/* - EtherAVB --------------------------------------------------------------- */
static const unsigned int avb_link_pins[] = {
/* AVB_LINK */
@@ -1659,6 +1781,221 @@ static const unsigned int avb_avtp_capture_b_mux[] = {
AVB_AVTP_CAPTURE_B_MARK,
};
+/* - DRIF0 --------------------------------------------------------------- */
+static const unsigned int drif0_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif0_ctrl_a_mux[] = {
+ RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK,
+};
+static const unsigned int drif0_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif0_data0_a_mux[] = {
+ RIF0_D0_A_MARK,
+};
+static const unsigned int drif0_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif0_data1_a_mux[] = {
+ RIF0_D1_A_MARK,
+};
+static const unsigned int drif0_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int drif0_ctrl_b_mux[] = {
+ RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK,
+};
+static const unsigned int drif0_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int drif0_data0_b_mux[] = {
+ RIF0_D0_B_MARK,
+};
+static const unsigned int drif0_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int drif0_data1_b_mux[] = {
+ RIF0_D1_B_MARK,
+};
+static const unsigned int drif0_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int drif0_ctrl_c_mux[] = {
+ RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK,
+};
+static const unsigned int drif0_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int drif0_data0_c_mux[] = {
+ RIF0_D0_C_MARK,
+};
+static const unsigned int drif0_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int drif0_data1_c_mux[] = {
+ RIF0_D1_C_MARK,
+};
+/* - DRIF1 --------------------------------------------------------------- */
+static const unsigned int drif1_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif1_ctrl_a_mux[] = {
+ RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK,
+};
+static const unsigned int drif1_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif1_data0_a_mux[] = {
+ RIF1_D0_A_MARK,
+};
+static const unsigned int drif1_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif1_data1_a_mux[] = {
+ RIF1_D1_A_MARK,
+};
+static const unsigned int drif1_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int drif1_ctrl_b_mux[] = {
+ RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK,
+};
+static const unsigned int drif1_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 7),
+};
+static const unsigned int drif1_data0_b_mux[] = {
+ RIF1_D0_B_MARK,
+};
+static const unsigned int drif1_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 8),
+};
+static const unsigned int drif1_data1_b_mux[] = {
+ RIF1_D1_B_MARK,
+};
+static const unsigned int drif1_ctrl_c_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11),
+};
+static const unsigned int drif1_ctrl_c_mux[] = {
+ RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK,
+};
+static const unsigned int drif1_data0_c_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(5, 6),
+};
+static const unsigned int drif1_data0_c_mux[] = {
+ RIF1_D0_C_MARK,
+};
+static const unsigned int drif1_data1_c_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int drif1_data1_c_mux[] = {
+ RIF1_D1_C_MARK,
+};
+/* - DRIF2 --------------------------------------------------------------- */
+static const unsigned int drif2_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int drif2_ctrl_a_mux[] = {
+ RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK,
+};
+static const unsigned int drif2_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int drif2_data0_a_mux[] = {
+ RIF2_D0_A_MARK,
+};
+static const unsigned int drif2_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int drif2_data1_a_mux[] = {
+ RIF2_D1_A_MARK,
+};
+static const unsigned int drif2_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int drif2_ctrl_b_mux[] = {
+ RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK,
+};
+static const unsigned int drif2_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int drif2_data0_b_mux[] = {
+ RIF2_D0_B_MARK,
+};
+static const unsigned int drif2_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int drif2_data1_b_mux[] = {
+ RIF2_D1_B_MARK,
+};
+/* - DRIF3 --------------------------------------------------------------- */
+static const unsigned int drif3_ctrl_a_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int drif3_ctrl_a_mux[] = {
+ RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK,
+};
+static const unsigned int drif3_data0_a_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int drif3_data0_a_mux[] = {
+ RIF3_D0_A_MARK,
+};
+static const unsigned int drif3_data1_a_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int drif3_data1_a_mux[] = {
+ RIF3_D1_A_MARK,
+};
+static const unsigned int drif3_ctrl_b_pins[] = {
+ /* CLK, SYNC */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int drif3_ctrl_b_mux[] = {
+ RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK,
+};
+static const unsigned int drif3_data0_b_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int drif3_data0_b_mux[] = {
+ RIF3_D0_B_MARK,
+};
+static const unsigned int drif3_data1_b_pins[] = {
+ /* D1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int drif3_data1_b_mux[] = {
+ RIF3_D1_B_MARK,
+};
+
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
/* R[7:2], G[7:2], B[7:2] */
@@ -1740,6 +2077,308 @@ static const unsigned int du_disp_mux[] = {
DU_DISP_MARK,
};
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 16), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 6),
+};
+static const unsigned int hscif1_data_a_mux[] = {
+ HRX1_A_MARK, HTX1_A_MARK,
+};
+static const unsigned int hscif1_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif1_clk_a_mux[] = {
+ HSCK1_A_MARK,
+};
+static const unsigned int hscif1_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 7),
+};
+static const unsigned int hscif1_ctrl_a_mux[] = {
+ HRTS1_N_A_MARK, HCTS1_N_A_MARK,
+};
+
+static const unsigned int hscif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
+};
+static const unsigned int hscif1_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int hscif1_clk_b_mux[] = {
+ HSCK1_B_MARK,
+};
+static const unsigned int hscif1_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 3),
+};
+static const unsigned int hscif1_ctrl_b_mux[] = {
+ HRTS1_N_B_MARK, HCTS1_N_B_MARK,
+};
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int hscif2_data_a_mux[] = {
+ HRX2_A_MARK, HTX2_A_MARK,
+};
+static const unsigned int hscif2_clk_a_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int hscif2_clk_a_mux[] = {
+ HSCK2_A_MARK,
+};
+static const unsigned int hscif2_ctrl_a_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int hscif2_ctrl_a_mux[] = {
+ HRTS2_N_A_MARK, HCTS2_N_A_MARK,
+};
+
+static const unsigned int hscif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int hscif2_data_b_mux[] = {
+ HRX2_B_MARK, HTX2_B_MARK,
+};
+static const unsigned int hscif2_clk_b_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int hscif2_clk_b_mux[] = {
+ HSCK2_B_MARK,
+};
+static const unsigned int hscif2_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 20), RCAR_GP_PIN(6, 19),
+};
+static const unsigned int hscif2_ctrl_b_mux[] = {
+ HRTS2_N_B_MARK, HCTS2_N_B_MARK,
+};
+
+static const unsigned int hscif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(6, 25), RCAR_GP_PIN(6, 26),
+};
+static const unsigned int hscif2_data_c_mux[] = {
+ HRX2_C_MARK, HTX2_C_MARK,
+};
+static const unsigned int hscif2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(6, 24),
+};
+static const unsigned int hscif2_clk_c_mux[] = {
+ HSCK2_C_MARK,
+};
+static const unsigned int hscif2_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int hscif2_ctrl_c_mux[] = {
+ HRTS2_N_C_MARK, HCTS2_N_C_MARK,
+};
+/* - HSCIF3 ----------------------------------------------------------------- */
+static const unsigned int hscif3_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int hscif3_data_a_mux[] = {
+ HRX3_A_MARK, HTX3_A_MARK,
+};
+static const unsigned int hscif3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 22),
+};
+static const unsigned int hscif3_clk_mux[] = {
+ HSCK3_MARK,
+};
+static const unsigned int hscif3_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int hscif3_ctrl_mux[] = {
+ HRTS3_N_MARK, HCTS3_N_MARK,
+};
+
+static const unsigned int hscif3_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+};
+static const unsigned int hscif3_data_b_mux[] = {
+ HRX3_B_MARK, HTX3_B_MARK,
+};
+static const unsigned int hscif3_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+};
+static const unsigned int hscif3_data_c_mux[] = {
+ HRX3_C_MARK, HTX3_C_MARK,
+};
+static const unsigned int hscif3_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int hscif3_data_d_mux[] = {
+ HRX3_D_MARK, HTX3_D_MARK,
+};
+/* - HSCIF4 ----------------------------------------------------------------- */
+static const unsigned int hscif4_data_a_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+};
+static const unsigned int hscif4_data_a_mux[] = {
+ HRX4_A_MARK, HTX4_A_MARK,
+};
+static const unsigned int hscif4_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_clk_mux[] = {
+ HSCK4_MARK,
+};
+static const unsigned int hscif4_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14),
+};
+static const unsigned int hscif4_ctrl_mux[] = {
+ HRTS4_N_MARK, HCTS4_N_MARK,
+};
+
+static const unsigned int hscif4_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int hscif4_data_b_mux[] = {
+ HRX4_B_MARK, HTX4_B_MARK,
+};
+
+/* - I2C -------------------------------------------------------------------- */
+static const unsigned int i2c1_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10),
+};
+static const unsigned int i2c1_a_mux[] = {
+ SDA1_A_MARK, SCL1_A_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 24), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int i2c1_b_mux[] = {
+ SDA1_B_MARK, SCL1_B_MARK,
+};
+static const unsigned int i2c2_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4),
+};
+static const unsigned int i2c2_a_mux[] = {
+ SDA2_A_MARK, SCL2_A_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 12),
+};
+static const unsigned int i2c2_b_mux[] = {
+ SDA2_B_MARK, SCL2_B_MARK,
+};
+static const unsigned int i2c6_a_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 8), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c6_a_mux[] = {
+ SDA6_A_MARK, SCL6_A_MARK,
+};
+static const unsigned int i2c6_b_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c6_b_mux[] = {
+ SDA6_B_MARK, SCL6_B_MARK,
+};
+static const unsigned int i2c6_c_pins[] = {
+ /* SDA, SCL */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14),
+};
+static const unsigned int i2c6_c_mux[] = {
+ SDA6_C_MARK, SCL6_C_MARK,
+};
+
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+ /* IRQ4 */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+ IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
/* - MSIOF0 ----------------------------------------------------------------- */
static const unsigned int msiof0_clk_pins[] = {
/* SCK */
@@ -2750,6 +3389,390 @@ static const unsigned int scif_clk_b_mux[] = {
SCIF_CLK_B_MARK,
};
+/* - SDHI0 ------------------------------------------------------------------ */
+static const unsigned int sdhi0_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 2),
+};
+static const unsigned int sdhi0_data1_mux[] = {
+ SD0_DAT0_MARK,
+};
+static const unsigned int sdhi0_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3),
+ RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5),
+};
+static const unsigned int sdhi0_data4_mux[] = {
+ SD0_DAT0_MARK, SD0_DAT1_MARK,
+ SD0_DAT2_MARK, SD0_DAT3_MARK,
+};
+static const unsigned int sdhi0_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+};
+static const unsigned int sdhi0_ctrl_mux[] = {
+ SD0_CLK_MARK, SD0_CMD_MARK,
+};
+static const unsigned int sdhi0_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int sdhi0_cd_mux[] = {
+ SD0_CD_MARK,
+};
+static const unsigned int sdhi0_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 13),
+};
+static const unsigned int sdhi0_wp_mux[] = {
+ SD0_WP_MARK,
+};
+/* - SDHI1 ------------------------------------------------------------------ */
+static const unsigned int sdhi1_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(3, 8),
+};
+static const unsigned int sdhi1_data1_mux[] = {
+ SD1_DAT0_MARK,
+};
+static const unsigned int sdhi1_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi1_data4_mux[] = {
+ SD1_DAT0_MARK, SD1_DAT1_MARK,
+ SD1_DAT2_MARK, SD1_DAT3_MARK,
+};
+static const unsigned int sdhi1_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+static const unsigned int sdhi1_ctrl_mux[] = {
+ SD1_CLK_MARK, SD1_CMD_MARK,
+};
+static const unsigned int sdhi1_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(3, 14),
+};
+static const unsigned int sdhi1_cd_mux[] = {
+ SD1_CD_MARK,
+};
+static const unsigned int sdhi1_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(3, 15),
+};
+static const unsigned int sdhi1_wp_mux[] = {
+ SD1_WP_MARK,
+};
+/* - SDHI2 ------------------------------------------------------------------ */
+static const unsigned int sdhi2_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 2),
+};
+static const unsigned int sdhi2_data1_mux[] = {
+ SD2_DAT0_MARK,
+};
+static const unsigned int sdhi2_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+};
+static const unsigned int sdhi2_data4_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+};
+static const unsigned int sdhi2_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 3),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 5),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int sdhi2_data8_mux[] = {
+ SD2_DAT0_MARK, SD2_DAT1_MARK,
+ SD2_DAT2_MARK, SD2_DAT3_MARK,
+ SD2_DAT4_MARK, SD2_DAT5_MARK,
+ SD2_DAT6_MARK, SD2_DAT7_MARK,
+};
+static const unsigned int sdhi2_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 0), RCAR_GP_PIN(4, 1),
+};
+static const unsigned int sdhi2_ctrl_mux[] = {
+ SD2_CLK_MARK, SD2_CMD_MARK,
+};
+static const unsigned int sdhi2_cd_a_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 13),
+};
+static const unsigned int sdhi2_cd_a_mux[] = {
+ SD2_CD_A_MARK,
+};
+static const unsigned int sdhi2_cd_b_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int sdhi2_cd_b_mux[] = {
+ SD2_CD_B_MARK,
+};
+static const unsigned int sdhi2_wp_a_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 14),
+};
+static const unsigned int sdhi2_wp_a_mux[] = {
+ SD2_WP_A_MARK,
+};
+static const unsigned int sdhi2_wp_b_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(5, 11),
+};
+static const unsigned int sdhi2_wp_b_mux[] = {
+ SD2_WP_B_MARK,
+};
+static const unsigned int sdhi2_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int sdhi2_ds_mux[] = {
+ SD2_DS_MARK,
+};
+/* - SDHI3 ------------------------------------------------------------------ */
+static const unsigned int sdhi3_data1_pins[] = {
+ /* D0 */
+ RCAR_GP_PIN(4, 9),
+};
+static const unsigned int sdhi3_data1_mux[] = {
+ SD3_DAT0_MARK,
+};
+static const unsigned int sdhi3_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int sdhi3_data4_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+};
+static const unsigned int sdhi3_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+ RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_data8_mux[] = {
+ SD3_DAT0_MARK, SD3_DAT1_MARK,
+ SD3_DAT2_MARK, SD3_DAT3_MARK,
+ SD3_DAT4_MARK, SD3_DAT5_MARK,
+ SD3_DAT6_MARK, SD3_DAT7_MARK,
+};
+static const unsigned int sdhi3_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 8),
+};
+static const unsigned int sdhi3_ctrl_mux[] = {
+ SD3_CLK_MARK, SD3_CMD_MARK,
+};
+static const unsigned int sdhi3_cd_pins[] = {
+ /* CD */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int sdhi3_cd_mux[] = {
+ SD3_CD_MARK,
+};
+static const unsigned int sdhi3_wp_pins[] = {
+ /* WP */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int sdhi3_wp_mux[] = {
+ SD3_WP_MARK,
+};
+static const unsigned int sdhi3_ds_pins[] = {
+ /* DS */
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int sdhi3_ds_mux[] = {
+ SD3_DS_MARK,
+};
+
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 2),
+};
+static const unsigned int ssi0_data_mux[] = {
+ SSI_SDATA0_MARK,
+};
+static const unsigned int ssi01239_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+};
+static const unsigned int ssi01239_ctrl_mux[] = {
+ SSI_SCK01239_MARK, SSI_WS01239_MARK,
+};
+static const unsigned int ssi1_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int ssi1_data_a_mux[] = {
+ SSI_SDATA1_A_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+ SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int ssi1_ctrl_a_mux[] = {
+ SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+ SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int ssi2_data_a_mux[] = {
+ SSI_SDATA2_A_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+ SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int ssi2_ctrl_a_mux[] = {
+ SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+ SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK,
+};
+static const unsigned int ssi349_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int ssi349_ctrl_mux[] = {
+ SSI_SCK349_MARK, SSI_WS349_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int ssi4_data_mux[] = {
+ SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+ SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 13),
+};
+static const unsigned int ssi5_data_mux[] = {
+ SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+ SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 16),
+};
+static const unsigned int ssi6_data_mux[] = {
+ SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+ SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int ssi7_data_mux[] = {
+ SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+ SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int ssi8_data_mux[] = {
+ SSI_SDATA8_MARK,
+};
+static const unsigned int ssi9_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi9_data_a_mux[] = {
+ SSI_SDATA9_A_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+ SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi9_ctrl_a_mux[] = {
+ SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+ SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
+
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
/* PWEN, OVC */
@@ -2783,7 +3806,33 @@ static const unsigned int usb2_ch3_mux[] = {
USB2_CH3_PWEN_MARK, USB2_CH3_OVC_MARK,
};
+/* - USB30 ------------------------------------------------------------------ */
+static const unsigned int usb30_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int usb30_mux[] = {
+ USB30_PWEN_MARK, USB30_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
SH_PFC_PIN_GROUP(avb_link),
SH_PFC_PIN_GROUP(avb_magic),
SH_PFC_PIN_GROUP(avb_phy_int),
@@ -2794,6 +3843,36 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_avtp_capture_a),
SH_PFC_PIN_GROUP(avb_avtp_match_b),
SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_a),
+ SH_PFC_PIN_GROUP(drif0_data0_a),
+ SH_PFC_PIN_GROUP(drif0_data1_a),
+ SH_PFC_PIN_GROUP(drif0_ctrl_b),
+ SH_PFC_PIN_GROUP(drif0_data0_b),
+ SH_PFC_PIN_GROUP(drif0_data1_b),
+ SH_PFC_PIN_GROUP(drif0_ctrl_c),
+ SH_PFC_PIN_GROUP(drif0_data0_c),
+ SH_PFC_PIN_GROUP(drif0_data1_c),
+ SH_PFC_PIN_GROUP(drif1_ctrl_a),
+ SH_PFC_PIN_GROUP(drif1_data0_a),
+ SH_PFC_PIN_GROUP(drif1_data1_a),
+ SH_PFC_PIN_GROUP(drif1_ctrl_b),
+ SH_PFC_PIN_GROUP(drif1_data0_b),
+ SH_PFC_PIN_GROUP(drif1_data1_b),
+ SH_PFC_PIN_GROUP(drif1_ctrl_c),
+ SH_PFC_PIN_GROUP(drif1_data0_c),
+ SH_PFC_PIN_GROUP(drif1_data1_c),
+ SH_PFC_PIN_GROUP(drif2_ctrl_a),
+ SH_PFC_PIN_GROUP(drif2_data0_a),
+ SH_PFC_PIN_GROUP(drif2_data1_a),
+ SH_PFC_PIN_GROUP(drif2_ctrl_b),
+ SH_PFC_PIN_GROUP(drif2_data0_b),
+ SH_PFC_PIN_GROUP(drif2_data1_b),
+ SH_PFC_PIN_GROUP(drif3_ctrl_a),
+ SH_PFC_PIN_GROUP(drif3_data0_a),
+ SH_PFC_PIN_GROUP(drif3_data1_a),
+ SH_PFC_PIN_GROUP(drif3_ctrl_b),
+ SH_PFC_PIN_GROUP(drif3_data0_b),
+ SH_PFC_PIN_GROUP(drif3_data1_b),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
@@ -2802,6 +3881,47 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(du_oddf),
SH_PFC_PIN_GROUP(du_cde),
SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_a),
+ SH_PFC_PIN_GROUP(hscif1_clk_a),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_clk_b),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_a),
+ SH_PFC_PIN_GROUP(hscif2_clk_a),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_a),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_clk_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif3_data_a),
+ SH_PFC_PIN_GROUP(hscif3_clk),
+ SH_PFC_PIN_GROUP(hscif3_ctrl),
+ SH_PFC_PIN_GROUP(hscif3_data_b),
+ SH_PFC_PIN_GROUP(hscif3_data_c),
+ SH_PFC_PIN_GROUP(hscif3_data_d),
+ SH_PFC_PIN_GROUP(hscif4_data_a),
+ SH_PFC_PIN_GROUP(hscif4_clk),
+ SH_PFC_PIN_GROUP(hscif4_ctrl),
+ SH_PFC_PIN_GROUP(hscif4_data_b),
+ SH_PFC_PIN_GROUP(i2c1_a),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c2_a),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c6_a),
+ SH_PFC_PIN_GROUP(i2c6_b),
+ SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
SH_PFC_PIN_GROUP(msiof0_clk),
SH_PFC_PIN_GROUP(msiof0_sync),
SH_PFC_PIN_GROUP(msiof0_ss1),
@@ -2943,10 +4063,82 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_clk_b),
SH_PFC_PIN_GROUP(scif_clk_a),
SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_data8),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd_a),
+ SH_PFC_PIN_GROUP(sdhi2_wp_a),
+ SH_PFC_PIN_GROUP(sdhi2_cd_b),
+ SH_PFC_PIN_GROUP(sdhi2_wp_b),
+ SH_PFC_PIN_GROUP(sdhi2_ds),
+ SH_PFC_PIN_GROUP(sdhi3_data1),
+ SH_PFC_PIN_GROUP(sdhi3_data4),
+ SH_PFC_PIN_GROUP(sdhi3_data8),
+ SH_PFC_PIN_GROUP(sdhi3_ctrl),
+ SH_PFC_PIN_GROUP(sdhi3_cd),
+ SH_PFC_PIN_GROUP(sdhi3_wp),
+ SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
SH_PFC_PIN_GROUP(usb2),
SH_PFC_PIN_GROUP(usb2_ch3),
+ SH_PFC_PIN_GROUP(usb30),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a_a",
+ "audio_clk_a_b",
+ "audio_clk_a_c",
+ "audio_clk_b_a",
+ "audio_clk_b_b",
+ "audio_clk_c_a",
+ "audio_clk_c_b",
+ "audio_clkout_a",
+ "audio_clkout_b",
+ "audio_clkout_c",
+ "audio_clkout_d",
+ "audio_clkout1_a",
+ "audio_clkout1_b",
+ "audio_clkout2_a",
+ "audio_clkout2_b",
+ "audio_clkout3_a",
+ "audio_clkout3_b",
};
static const char * const avb_groups[] = {
@@ -2962,6 +4154,48 @@ static const char * const avb_groups[] = {
"avb_avtp_capture_b",
};
+static const char * const drif0_groups[] = {
+ "drif0_ctrl_a",
+ "drif0_data0_a",
+ "drif0_data1_a",
+ "drif0_ctrl_b",
+ "drif0_data0_b",
+ "drif0_data1_b",
+ "drif0_ctrl_c",
+ "drif0_data0_c",
+ "drif0_data1_c",
+};
+
+static const char * const drif1_groups[] = {
+ "drif1_ctrl_a",
+ "drif1_data0_a",
+ "drif1_data1_a",
+ "drif1_ctrl_b",
+ "drif1_data0_b",
+ "drif1_data1_b",
+ "drif1_ctrl_c",
+ "drif1_data0_c",
+ "drif1_data1_c",
+};
+
+static const char * const drif2_groups[] = {
+ "drif2_ctrl_a",
+ "drif2_data0_a",
+ "drif2_data1_a",
+ "drif2_ctrl_b",
+ "drif2_data0_b",
+ "drif2_data1_b",
+};
+
+static const char * const drif3_groups[] = {
+ "drif3_ctrl_a",
+ "drif3_data0_a",
+ "drif3_data1_a",
+ "drif3_ctrl_b",
+ "drif3_data0_b",
+ "drif3_data1_b",
+};
+
static const char * const du_groups[] = {
"du_rgb666",
"du_rgb888",
@@ -2973,6 +4207,74 @@ static const char * const du_groups[] = {
"du_disp",
};
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data_a",
+ "hscif1_clk_a",
+ "hscif1_ctrl_a",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data_a",
+ "hscif2_clk_a",
+ "hscif2_ctrl_a",
+ "hscif2_data_b",
+ "hscif2_clk_b",
+ "hscif2_ctrl_b",
+ "hscif2_data_c",
+ "hscif2_clk_c",
+ "hscif2_ctrl_c",
+};
+
+static const char * const hscif3_groups[] = {
+ "hscif3_data_a",
+ "hscif3_clk",
+ "hscif3_ctrl",
+ "hscif3_data_b",
+ "hscif3_data_c",
+ "hscif3_data_d",
+};
+
+static const char * const hscif4_groups[] = {
+ "hscif4_data_a",
+ "hscif4_clk",
+ "hscif4_ctrl",
+ "hscif4_data_b",
+};
+
+static const char * const i2c1_groups[] = {
+ "i2c1_a",
+ "i2c1_b",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2_a",
+ "i2c2_b",
+};
+
+static const char * const i2c6_groups[] = {
+ "i2c6_a",
+ "i2c6_b",
+ "i2c6_c",
+};
+
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0",
+ "intc_ex_irq1",
+ "intc_ex_irq2",
+ "intc_ex_irq3",
+ "intc_ex_irq4",
+ "intc_ex_irq5",
+};
+
static const char * const msiof0_groups[] = {
"msiof0_clk",
"msiof0_sync",
@@ -3168,6 +4470,72 @@ static const char * const scif_clk_groups[] = {
"scif_clk_b",
};
+static const char * const sdhi0_groups[] = {
+ "sdhi0_data1",
+ "sdhi0_data4",
+ "sdhi0_ctrl",
+ "sdhi0_cd",
+ "sdhi0_wp",
+};
+
+static const char * const sdhi1_groups[] = {
+ "sdhi1_data1",
+ "sdhi1_data4",
+ "sdhi1_ctrl",
+ "sdhi1_cd",
+ "sdhi1_wp",
+};
+
+static const char * const sdhi2_groups[] = {
+ "sdhi2_data1",
+ "sdhi2_data4",
+ "sdhi2_data8",
+ "sdhi2_ctrl",
+ "sdhi2_cd_a",
+ "sdhi2_wp_a",
+ "sdhi2_cd_b",
+ "sdhi2_wp_b",
+ "sdhi2_ds",
+};
+
+static const char * const sdhi3_groups[] = {
+ "sdhi3_data1",
+ "sdhi3_data4",
+ "sdhi3_data8",
+ "sdhi3_ctrl",
+ "sdhi3_cd",
+ "sdhi3_wp",
+ "sdhi3_ds",
+};
+
+static const char * const ssi_groups[] = {
+ "ssi0_data",
+ "ssi01239_ctrl",
+ "ssi1_data_a",
+ "ssi1_data_b",
+ "ssi1_ctrl_a",
+ "ssi1_ctrl_b",
+ "ssi2_data_a",
+ "ssi2_data_b",
+ "ssi2_ctrl_a",
+ "ssi2_ctrl_b",
+ "ssi3_data",
+ "ssi349_ctrl",
+ "ssi4_data",
+ "ssi4_ctrl",
+ "ssi5_data",
+ "ssi5_ctrl",
+ "ssi6_data",
+ "ssi6_ctrl",
+ "ssi7_data",
+ "ssi78_ctrl",
+ "ssi8_data",
+ "ssi9_data_a",
+ "ssi9_data_b",
+ "ssi9_ctrl_a",
+ "ssi9_ctrl_b",
+};
+
static const char * const usb0_groups[] = {
"usb0",
};
@@ -3184,9 +4552,27 @@ static const char * const usb2_ch3_groups[] = {
"usb2_ch3",
};
+static const char * const usb30_groups[] = {
+ "usb30",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(drif0),
+ SH_PFC_FUNCTION(drif1),
+ SH_PFC_FUNCTION(drif2),
+ SH_PFC_FUNCTION(drif3),
SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(hscif3),
+ SH_PFC_FUNCTION(hscif4),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
@@ -3205,10 +4591,16 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(usb2),
SH_PFC_FUNCTION(usb2_ch3),
+ SH_PFC_FUNCTION(usb30),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -4021,11 +5413,20 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+enum ioctrl_regs {
+ POCCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POCCTRL] = { 0xe6060380, },
+ { /* sentinel */ },
+};
+
static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -4036,242 +5437,261 @@ static int r8a7795_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return bit;
}
-#define PUEN 0xe6060400
-#define PUD 0xe6060440
-
-#define PU0 0x00
-#define PU1 0x04
-#define PU2 0x08
-#define PU3 0x0c
-#define PU4 0x10
-#define PU5 0x14
-#define PU6 0x18
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
- { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
- { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
- { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
- { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
- { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
- { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
- { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
- { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
- { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
- { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
- { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
- { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
- { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
- { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
- { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
- { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
- { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
- { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
- { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
- { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
- { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
- { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
- { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
- { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
- { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
- { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
- { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
- { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
- { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
- { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* HDMI1_CEC */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
- { PIN_NUMBER('F', 1), PU2, 0 }, /* CLKOUT */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
- { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
- /* bit 8 n/a */
- { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
- { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
- { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
- { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
- { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
- { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST# */
- { PIN_A_NUMBER('R', 8), PU3, 1 }, /* DU_DOTCLKIN3 */
- { PIN_A_NUMBER('R', 7), PU3, 0 }, /* DU_DOTCLKIN2 */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* USB2_CH3_OVC */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* USB2_CH3_PWEN */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = PIN_NUMBER('W', 3), /* QSPI0_SPCLK */
+ [ 1] = PIN_A_NUMBER('C', 5), /* QSPI0_MOSI_IO0 */
+ [ 2] = PIN_A_NUMBER('B', 4), /* QSPI0_MISO_IO1 */
+ [ 3] = PIN_NUMBER('Y', 6), /* QSPI0_IO2 */
+ [ 4] = PIN_A_NUMBER('B', 6), /* QSPI0_IO3 */
+ [ 5] = PIN_NUMBER('Y', 3), /* QSPI0_SSL */
+ [ 6] = PIN_NUMBER('V', 3), /* QSPI1_SPCLK */
+ [ 7] = PIN_A_NUMBER('C', 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = PIN_A_NUMBER('E', 5), /* QSPI1_MISO_IO1 */
+ [ 9] = PIN_A_NUMBER('E', 4), /* QSPI1_IO2 */
+ [10] = PIN_A_NUMBER('C', 3), /* QSPI1_IO3 */
+ [11] = PIN_NUMBER('V', 5), /* QSPI1_SSL */
+ [12] = PIN_NUMBER('Y', 7), /* RPC_INT# */
+ [13] = PIN_NUMBER('V', 6), /* RPC_WP# */
+ [14] = PIN_NUMBER('V', 7), /* RPC_RESET# */
+ [15] = PIN_NUMBER('A', 16), /* AVB_RX_CTL */
+ [16] = PIN_NUMBER('B', 19), /* AVB_RXC */
+ [17] = PIN_NUMBER('A', 13), /* AVB_RD0 */
+ [18] = PIN_NUMBER('B', 13), /* AVB_RD1 */
+ [19] = PIN_NUMBER('A', 14), /* AVB_RD2 */
+ [20] = PIN_NUMBER('B', 14), /* AVB_RD3 */
+ [21] = PIN_NUMBER('A', 8), /* AVB_TX_CTL */
+ [22] = PIN_NUMBER('A', 19), /* AVB_TXC */
+ [23] = PIN_NUMBER('A', 18), /* AVB_TD0 */
+ [24] = PIN_NUMBER('B', 18), /* AVB_TD1 */
+ [25] = PIN_NUMBER('A', 17), /* AVB_TD2 */
+ [26] = PIN_NUMBER('B', 17), /* AVB_TD3 */
+ [27] = PIN_NUMBER('A', 12), /* AVB_TXCREFCLK */
+ [28] = PIN_NUMBER('A', 9), /* AVB_MDIO */
+ [29] = RCAR_GP_PIN(2, 9), /* AVB_MDC */
+ [30] = RCAR_GP_PIN(2, 10), /* AVB_MAGIC */
+ [31] = RCAR_GP_PIN(2, 11), /* AVB_PHY_INT */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 12), /* AVB_LINK */
+ [ 1] = RCAR_GP_PIN(2, 13), /* AVB_AVTP_MATCH_A */
+ [ 2] = RCAR_GP_PIN(2, 14), /* AVB_AVTP_CAPTURE_A */
+ [ 3] = RCAR_GP_PIN(2, 0), /* IRQ0 */
+ [ 4] = RCAR_GP_PIN(2, 1), /* IRQ1 */
+ [ 5] = RCAR_GP_PIN(2, 2), /* IRQ2 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* IRQ3 */
+ [ 7] = RCAR_GP_PIN(2, 4), /* IRQ4 */
+ [ 8] = RCAR_GP_PIN(2, 5), /* IRQ5 */
+ [ 9] = RCAR_GP_PIN(2, 6), /* PWM0 */
+ [10] = RCAR_GP_PIN(2, 7), /* PWM1_A */
+ [11] = RCAR_GP_PIN(2, 8), /* PWM2_A */
+ [12] = RCAR_GP_PIN(1, 0), /* A0 */
+ [13] = RCAR_GP_PIN(1, 1), /* A1 */
+ [14] = RCAR_GP_PIN(1, 2), /* A2 */
+ [15] = RCAR_GP_PIN(1, 3), /* A3 */
+ [16] = RCAR_GP_PIN(1, 4), /* A4 */
+ [17] = RCAR_GP_PIN(1, 5), /* A5 */
+ [18] = RCAR_GP_PIN(1, 6), /* A6 */
+ [19] = RCAR_GP_PIN(1, 7), /* A7 */
+ [20] = RCAR_GP_PIN(1, 8), /* A8 */
+ [21] = RCAR_GP_PIN(1, 9), /* A9 */
+ [22] = RCAR_GP_PIN(1, 10), /* A10 */
+ [23] = RCAR_GP_PIN(1, 11), /* A11 */
+ [24] = RCAR_GP_PIN(1, 12), /* A12 */
+ [25] = RCAR_GP_PIN(1, 13), /* A13 */
+ [26] = RCAR_GP_PIN(1, 14), /* A14 */
+ [27] = RCAR_GP_PIN(1, 15), /* A15 */
+ [28] = RCAR_GP_PIN(1, 16), /* A16 */
+ [29] = RCAR_GP_PIN(1, 17), /* A17 */
+ [30] = RCAR_GP_PIN(1, 18), /* A18 */
+ [31] = RCAR_GP_PIN(1, 19), /* A19 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = PIN_NUMBER('F', 1), /* CLKOUT */
+ [ 1] = RCAR_GP_PIN(1, 20), /* CS0_N */
+ [ 2] = RCAR_GP_PIN(1, 21), /* CS1_N */
+ [ 3] = RCAR_GP_PIN(1, 22), /* BS_N */
+ [ 4] = RCAR_GP_PIN(1, 23), /* RD_N */
+ [ 5] = RCAR_GP_PIN(1, 24), /* RD_WR_N */
+ [ 6] = RCAR_GP_PIN(1, 25), /* WE0_N */
+ [ 7] = RCAR_GP_PIN(1, 26), /* WE1_N */
+ [ 8] = RCAR_GP_PIN(1, 27), /* EX_WAIT0_A */
+ [ 9] = PIN_NUMBER('C', 1), /* PRESETOUT# */
+ [10] = RCAR_GP_PIN(0, 0), /* D0 */
+ [11] = RCAR_GP_PIN(0, 1), /* D1 */
+ [12] = RCAR_GP_PIN(0, 2), /* D2 */
+ [13] = RCAR_GP_PIN(0, 3), /* D3 */
+ [14] = RCAR_GP_PIN(0, 4), /* D4 */
+ [15] = RCAR_GP_PIN(0, 5), /* D5 */
+ [16] = RCAR_GP_PIN(0, 6), /* D6 */
+ [17] = RCAR_GP_PIN(0, 7), /* D7 */
+ [18] = RCAR_GP_PIN(0, 8), /* D8 */
+ [19] = RCAR_GP_PIN(0, 9), /* D9 */
+ [20] = RCAR_GP_PIN(0, 10), /* D10 */
+ [21] = RCAR_GP_PIN(0, 11), /* D11 */
+ [22] = RCAR_GP_PIN(0, 12), /* D12 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 14), /* D14 */
+ [25] = RCAR_GP_PIN(0, 15), /* D15 */
+ [26] = RCAR_GP_PIN(7, 0), /* AVS1 */
+ [27] = RCAR_GP_PIN(7, 1), /* AVS2 */
+ [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [29] = RCAR_GP_PIN(7, 3), /* HDMI1_CEC */
+ [30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
+ [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = PIN_A_NUMBER('R', 7), /* DU_DOTCLKIN2 */
+ [ 1] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN3 */
+ [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST# */
+ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
+ [ 4] = PIN_A_NUMBER('R', 26), /* TRST# */
+ [ 5] = PIN_A_NUMBER('T', 27), /* TCK */
+ [ 6] = PIN_A_NUMBER('R', 30), /* TMS */
+ [ 7] = PIN_A_NUMBER('R', 29), /* TDI */
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_A_NUMBER('T', 30), /* ASEBRK */
+ [10] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [11] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [12] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ [13] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [14] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [15] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [16] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [17] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [18] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [19] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [20] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [21] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [22] = RCAR_GP_PIN(4, 0), /* SD2_CLK */
+ [23] = RCAR_GP_PIN(4, 1), /* SD2_CMD */
+ [24] = RCAR_GP_PIN(4, 2), /* SD2_DAT0 */
+ [25] = RCAR_GP_PIN(4, 3), /* SD2_DAT1 */
+ [26] = RCAR_GP_PIN(4, 4), /* SD2_DAT2 */
+ [27] = RCAR_GP_PIN(4, 5), /* SD2_DAT3 */
+ [28] = RCAR_GP_PIN(4, 6), /* SD2_DS */
+ [29] = RCAR_GP_PIN(4, 7), /* SD3_CLK */
+ [30] = RCAR_GP_PIN(4, 8), /* SD3_CMD */
+ [31] = RCAR_GP_PIN(4, 9), /* SD3_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(4, 10), /* SD3_DAT1 */
+ [ 1] = RCAR_GP_PIN(4, 11), /* SD3_DAT2 */
+ [ 2] = RCAR_GP_PIN(4, 12), /* SD3_DAT3 */
+ [ 3] = RCAR_GP_PIN(4, 13), /* SD3_DAT4 */
+ [ 4] = RCAR_GP_PIN(4, 14), /* SD3_DAT5 */
+ [ 5] = RCAR_GP_PIN(4, 15), /* SD3_DAT6 */
+ [ 6] = RCAR_GP_PIN(4, 16), /* SD3_DAT7 */
+ [ 7] = RCAR_GP_PIN(4, 17), /* SD3_DS */
+ [ 8] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [ 9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [11] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [12] = RCAR_GP_PIN(5, 0), /* SCK0 */
+ [13] = RCAR_GP_PIN(5, 1), /* RX0 */
+ [14] = RCAR_GP_PIN(5, 2), /* TX0 */
+ [15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [17] = RCAR_GP_PIN(5, 5), /* RX1_A */
+ [18] = RCAR_GP_PIN(5, 6), /* TX1_A */
+ [19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [21] = RCAR_GP_PIN(5, 9), /* SCK2 */
+ [22] = RCAR_GP_PIN(5, 10), /* TX2_A */
+ [23] = RCAR_GP_PIN(5, 11), /* RX2_A */
+ [24] = RCAR_GP_PIN(5, 12), /* HSCK0 */
+ [25] = RCAR_GP_PIN(5, 13), /* HRX0 */
+ [26] = RCAR_GP_PIN(5, 14), /* HTX0 */
+ [27] = RCAR_GP_PIN(5, 15), /* HCTS0_N */
+ [28] = RCAR_GP_PIN(5, 16), /* HRTS0_N */
+ [29] = RCAR_GP_PIN(5, 17), /* MSIOF0_SCK */
+ [30] = RCAR_GP_PIN(5, 18), /* MSIOF0_SYNC */
+ [31] = RCAR_GP_PIN(5, 19), /* MSIOF0_SS1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [ 0] = RCAR_GP_PIN(5, 20), /* MSIOF0_TXD */
+ [ 1] = RCAR_GP_PIN(5, 21), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(5, 22), /* MSIOF0_RXD */
+ [ 3] = RCAR_GP_PIN(5, 23), /* MLB_CLK */
+ [ 4] = RCAR_GP_PIN(5, 24), /* MLB_SIG */
+ [ 5] = RCAR_GP_PIN(5, 25), /* MLB_DAT */
+ [ 6] = PIN_NUMBER('H', 37), /* MLB_REF */
+ [ 7] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [ 8] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [ 9] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [10] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1_A */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2_A */
+ [12] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [13] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [14] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [15] = RCAR_GP_PIN(6, 8), /* SSI_SCK4 */
+ [16] = RCAR_GP_PIN(6, 9), /* SSI_WS4 */
+ [17] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [18] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [19] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [20] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [21] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [22] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [23] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [24] = RCAR_GP_PIN(6, 17), /* SSI_SCK78 */
+ [25] = RCAR_GP_PIN(6, 18), /* SSI_WS78 */
+ [26] = RCAR_GP_PIN(6, 19), /* SSI_SDATA7 */
+ [27] = RCAR_GP_PIN(6, 20), /* SSI_SDATA8 */
+ [28] = RCAR_GP_PIN(6, 21), /* SSI_SDATA9_A */
+ [29] = RCAR_GP_PIN(6, 22), /* AUDIO_CLKA_A */
+ [30] = RCAR_GP_PIN(6, 23), /* AUDIO_CLKB_B */
+ [31] = RCAR_GP_PIN(6, 24), /* USB0_PWEN */
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+ [ 0] = RCAR_GP_PIN(6, 25), /* USB0_OVC */
+ [ 1] = RCAR_GP_PIN(6, 26), /* USB1_PWEN */
+ [ 2] = RCAR_GP_PIN(6, 27), /* USB1_OVC */
+ [ 3] = RCAR_GP_PIN(6, 28), /* USB30_PWEN */
+ [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
+ [ 5] = RCAR_GP_PIN(6, 30), /* USB2_CH3_PWEN */
+ [ 6] = RCAR_GP_PIN(6, 31), /* USB2_CH3_OVC */
+ [ 7] = PIN_NONE,
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
- u32 reg;
- u32 bit;
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg;
- bit = BIT(info->bit);
-
- if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -4280,28 +5700,24 @@ static unsigned int r8a7795_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7795_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
u32 enable, updown;
- u32 reg;
- u32 bit;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- reg = info->reg;
- bit = BIT(info->bit);
-
- enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= bit;
+ enable |= BIT(bit);
- updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= bit;
+ updown |= BIT(bit);
- sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
- sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
}
static const struct soc_device_attribute r8a7795es1[] = {
@@ -4340,6 +5756,8 @@ const struct sh_pfc_soc_info r8a7795_pinmux_info = {
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index 200e1f4f6db9..73ed9c74c137 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -495,7 +495,7 @@ FM(IP16_31_28) IP16_31_28 FM(IP17_31_28) IP17_31_28
#define MOD_SEL1_1 FM(SEL_PWM2_0) FM(SEL_PWM2_1)
#define MOD_SEL1_0 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
-/* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
+/* MOD_SEL2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
#define MOD_SEL2_31 FM(I2C_SEL_5_0) FM(I2C_SEL_5_1)
#define MOD_SEL2_30 FM(I2C_SEL_3_0) FM(I2C_SEL_3_1)
#define MOD_SEL2_29 FM(I2C_SEL_0_0) FM(I2C_SEL_0_1)
@@ -1518,6 +1518,7 @@ static const u16 pinmux_data[] = {
#define ROW_GROUP_A(r) ('Z' - 'A' + 1 + (r))
#define PIN_NUMBER(r, c) (((r) - 'A') * 39 + (c) + 300)
#define PIN_A_NUMBER(r, c) PIN_NUMBER(ROW_GROUP_A(r), c)
+#define PIN_NONE U16_MAX
static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
@@ -2392,6 +2393,50 @@ static const unsigned int i2c6_c_mux[] = {
SDA6_C_MARK, SCL6_C_MARK,
};
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+ /* IRQ4 */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+ IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
/* - MSIOF0 ----------------------------------------------------------------- */
static const unsigned int msiof0_clk_pins[] = {
/* SCK */
@@ -3922,6 +3967,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c6_a),
SH_PFC_PIN_GROUP(i2c6_b),
SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
SH_PFC_PIN_GROUP(msiof0_clk),
SH_PFC_PIN_GROUP(msiof0_sync),
SH_PFC_PIN_GROUP(msiof0_ss1),
@@ -4286,6 +4337,15 @@ static const char * const i2c6_groups[] = {
"i2c6_c",
};
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0",
+ "intc_ex_irq1",
+ "intc_ex_irq2",
+ "intc_ex_irq3",
+ "intc_ex_irq4",
+ "intc_ex_irq5",
+};
+
static const char * const msiof0_groups[] = {
"msiof0_clk",
"msiof0_sync",
@@ -4580,6 +4640,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
@@ -5416,11 +5477,20 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ },
};
+enum ioctrl_regs {
+ POCCTRL,
+};
+
+static const struct pinmux_ioctrl_reg pinmux_ioctrl_regs[] = {
+ [POCCTRL] = { 0xe6060380, },
+ { /* sentinel */ },
+};
+
static int r8a7796_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *pocctrl)
{
int bit = -EINVAL;
- *pocctrl = 0xe6060380;
+ *pocctrl = pinmux_ioctrl_regs[POCCTRL].reg;
if (pin >= RCAR_GP_PIN(3, 0) && pin <= RCAR_GP_PIN(3, 11))
bit = pin & 0x1f;
@@ -5431,242 +5501,261 @@ static int r8a7796_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc
return bit;
}
-#define PUEN 0xe6060400
-#define PUD 0xe6060440
-
-#define PU0 0x00
-#define PU1 0x04
-#define PU2 0x08
-#define PU3 0x0c
-#define PU4 0x10
-#define PU5 0x14
-#define PU6 0x18
-
-static const struct sh_pfc_bias_info bias_info[] = {
- { RCAR_GP_PIN(2, 11), PU0, 31 }, /* AVB_PHY_INT */
- { RCAR_GP_PIN(2, 10), PU0, 30 }, /* AVB_MAGIC */
- { RCAR_GP_PIN(2, 9), PU0, 29 }, /* AVB_MDC */
- { PIN_NUMBER('A', 9), PU0, 28 }, /* AVB_MDIO */
- { PIN_NUMBER('A', 12), PU0, 27 }, /* AVB_TXCREFCLK */
- { PIN_NUMBER('B', 17), PU0, 26 }, /* AVB_TD3 */
- { PIN_NUMBER('A', 17), PU0, 25 }, /* AVB_TD2 */
- { PIN_NUMBER('B', 18), PU0, 24 }, /* AVB_TD1 */
- { PIN_NUMBER('A', 18), PU0, 23 }, /* AVB_TD0 */
- { PIN_NUMBER('A', 19), PU0, 22 }, /* AVB_TXC */
- { PIN_NUMBER('A', 8), PU0, 21 }, /* AVB_TX_CTL */
- { PIN_NUMBER('B', 14), PU0, 20 }, /* AVB_RD3 */
- { PIN_NUMBER('A', 14), PU0, 19 }, /* AVB_RD2 */
- { PIN_NUMBER('B', 13), PU0, 18 }, /* AVB_RD1 */
- { PIN_NUMBER('A', 13), PU0, 17 }, /* AVB_RD0 */
- { PIN_NUMBER('B', 19), PU0, 16 }, /* AVB_RXC */
- { PIN_NUMBER('A', 16), PU0, 15 }, /* AVB_RX_CTL */
- { PIN_NUMBER('V', 7), PU0, 14 }, /* RPC_RESET# */
- { PIN_NUMBER('V', 6), PU0, 13 }, /* RPC_WP# */
- { PIN_NUMBER('Y', 7), PU0, 12 }, /* RPC_INT# */
- { PIN_NUMBER('V', 5), PU0, 11 }, /* QSPI1_SSL */
- { PIN_A_NUMBER('C', 3), PU0, 10 }, /* QSPI1_IO3 */
- { PIN_A_NUMBER('E', 4), PU0, 9 }, /* QSPI1_IO2 */
- { PIN_A_NUMBER('E', 5), PU0, 8 }, /* QSPI1_MISO_IO1 */
- { PIN_A_NUMBER('C', 7), PU0, 7 }, /* QSPI1_MOSI_IO0 */
- { PIN_NUMBER('V', 3), PU0, 6 }, /* QSPI1_SPCLK */
- { PIN_NUMBER('Y', 3), PU0, 5 }, /* QSPI0_SSL */
- { PIN_A_NUMBER('B', 6), PU0, 4 }, /* QSPI0_IO3 */
- { PIN_NUMBER('Y', 6), PU0, 3 }, /* QSPI0_IO2 */
- { PIN_A_NUMBER('B', 4), PU0, 2 }, /* QSPI0_MISO_IO1 */
- { PIN_A_NUMBER('C', 5), PU0, 1 }, /* QSPI0_MOSI_IO0 */
- { PIN_NUMBER('W', 3), PU0, 0 }, /* QSPI0_SPCLK */
-
- { RCAR_GP_PIN(1, 19), PU1, 31 }, /* A19 */
- { RCAR_GP_PIN(1, 18), PU1, 30 }, /* A18 */
- { RCAR_GP_PIN(1, 17), PU1, 29 }, /* A17 */
- { RCAR_GP_PIN(1, 16), PU1, 28 }, /* A16 */
- { RCAR_GP_PIN(1, 15), PU1, 27 }, /* A15 */
- { RCAR_GP_PIN(1, 14), PU1, 26 }, /* A14 */
- { RCAR_GP_PIN(1, 13), PU1, 25 }, /* A13 */
- { RCAR_GP_PIN(1, 12), PU1, 24 }, /* A12 */
- { RCAR_GP_PIN(1, 11), PU1, 23 }, /* A11 */
- { RCAR_GP_PIN(1, 10), PU1, 22 }, /* A10 */
- { RCAR_GP_PIN(1, 9), PU1, 21 }, /* A9 */
- { RCAR_GP_PIN(1, 8), PU1, 20 }, /* A8 */
- { RCAR_GP_PIN(1, 7), PU1, 19 }, /* A7 */
- { RCAR_GP_PIN(1, 6), PU1, 18 }, /* A6 */
- { RCAR_GP_PIN(1, 5), PU1, 17 }, /* A5 */
- { RCAR_GP_PIN(1, 4), PU1, 16 }, /* A4 */
- { RCAR_GP_PIN(1, 3), PU1, 15 }, /* A3 */
- { RCAR_GP_PIN(1, 2), PU1, 14 }, /* A2 */
- { RCAR_GP_PIN(1, 1), PU1, 13 }, /* A1 */
- { RCAR_GP_PIN(1, 0), PU1, 12 }, /* A0 */
- { RCAR_GP_PIN(2, 8), PU1, 11 }, /* PWM2_A */
- { RCAR_GP_PIN(2, 7), PU1, 10 }, /* PWM1_A */
- { RCAR_GP_PIN(2, 6), PU1, 9 }, /* PWM0 */
- { RCAR_GP_PIN(2, 5), PU1, 8 }, /* IRQ5 */
- { RCAR_GP_PIN(2, 4), PU1, 7 }, /* IRQ4 */
- { RCAR_GP_PIN(2, 3), PU1, 6 }, /* IRQ3 */
- { RCAR_GP_PIN(2, 2), PU1, 5 }, /* IRQ2 */
- { RCAR_GP_PIN(2, 1), PU1, 4 }, /* IRQ1 */
- { RCAR_GP_PIN(2, 0), PU1, 3 }, /* IRQ0 */
- { RCAR_GP_PIN(2, 14), PU1, 2 }, /* AVB_AVTP_CAPTURE_A */
- { RCAR_GP_PIN(2, 13), PU1, 1 }, /* AVB_AVTP_MATCH_A */
- { RCAR_GP_PIN(2, 12), PU1, 0 }, /* AVB_LINK */
-
- { PIN_A_NUMBER('P', 8), PU2, 31 }, /* DU_DOTCLKIN1 */
- { PIN_A_NUMBER('P', 7), PU2, 30 }, /* DU_DOTCLKIN0 */
- { RCAR_GP_PIN(7, 3), PU2, 29 }, /* GP7_03 */
- { RCAR_GP_PIN(7, 2), PU2, 28 }, /* HDMI0_CEC */
- { RCAR_GP_PIN(7, 1), PU2, 27 }, /* AVS2 */
- { RCAR_GP_PIN(7, 0), PU2, 26 }, /* AVS1 */
- { RCAR_GP_PIN(0, 15), PU2, 25 }, /* D15 */
- { RCAR_GP_PIN(0, 14), PU2, 24 }, /* D14 */
- { RCAR_GP_PIN(0, 13), PU2, 23 }, /* D13 */
- { RCAR_GP_PIN(0, 12), PU2, 22 }, /* D12 */
- { RCAR_GP_PIN(0, 11), PU2, 21 }, /* D11 */
- { RCAR_GP_PIN(0, 10), PU2, 20 }, /* D10 */
- { RCAR_GP_PIN(0, 9), PU2, 19 }, /* D9 */
- { RCAR_GP_PIN(0, 8), PU2, 18 }, /* D8 */
- { RCAR_GP_PIN(0, 7), PU2, 17 }, /* D7 */
- { RCAR_GP_PIN(0, 6), PU2, 16 }, /* D6 */
- { RCAR_GP_PIN(0, 5), PU2, 15 }, /* D5 */
- { RCAR_GP_PIN(0, 4), PU2, 14 }, /* D4 */
- { RCAR_GP_PIN(0, 3), PU2, 13 }, /* D3 */
- { RCAR_GP_PIN(0, 2), PU2, 12 }, /* D2 */
- { RCAR_GP_PIN(0, 1), PU2, 11 }, /* D1 */
- { RCAR_GP_PIN(0, 0), PU2, 10 }, /* D0 */
- { PIN_NUMBER('C', 1), PU2, 9 }, /* PRESETOUT# */
- { RCAR_GP_PIN(1, 27), PU2, 8 }, /* EX_WAIT0_A */
- { RCAR_GP_PIN(1, 26), PU2, 7 }, /* WE1_N */
- { RCAR_GP_PIN(1, 25), PU2, 6 }, /* WE0_N */
- { RCAR_GP_PIN(1, 24), PU2, 5 }, /* RD_WR_N */
- { RCAR_GP_PIN(1, 23), PU2, 4 }, /* RD_N */
- { RCAR_GP_PIN(1, 22), PU2, 3 }, /* BS_N */
- { RCAR_GP_PIN(1, 21), PU2, 2 }, /* CS1_N */
- { RCAR_GP_PIN(1, 20), PU2, 1 }, /* CS0_N */
- { RCAR_GP_PIN(1, 28), PU2, 0 }, /* CLKOUT */
-
- { RCAR_GP_PIN(4, 9), PU3, 31 }, /* SD3_DAT0 */
- { RCAR_GP_PIN(4, 8), PU3, 30 }, /* SD3_CMD */
- { RCAR_GP_PIN(4, 7), PU3, 29 }, /* SD3_CLK */
- { RCAR_GP_PIN(4, 6), PU3, 28 }, /* SD2_DS */
- { RCAR_GP_PIN(4, 5), PU3, 27 }, /* SD2_DAT3 */
- { RCAR_GP_PIN(4, 4), PU3, 26 }, /* SD2_DAT2 */
- { RCAR_GP_PIN(4, 3), PU3, 25 }, /* SD2_DAT1 */
- { RCAR_GP_PIN(4, 2), PU3, 24 }, /* SD2_DAT0 */
- { RCAR_GP_PIN(4, 1), PU3, 23 }, /* SD2_CMD */
- { RCAR_GP_PIN(4, 0), PU3, 22 }, /* SD2_CLK */
- { RCAR_GP_PIN(3, 11), PU3, 21 }, /* SD1_DAT3 */
- { RCAR_GP_PIN(3, 10), PU3, 20 }, /* SD1_DAT2 */
- { RCAR_GP_PIN(3, 9), PU3, 19 }, /* SD1_DAT1 */
- { RCAR_GP_PIN(3, 8), PU3, 18 }, /* SD1_DAT0 */
- { RCAR_GP_PIN(3, 7), PU3, 17 }, /* SD1_CMD */
- { RCAR_GP_PIN(3, 6), PU3, 16 }, /* SD1_CLK */
- { RCAR_GP_PIN(3, 5), PU3, 15 }, /* SD0_DAT3 */
- { RCAR_GP_PIN(3, 4), PU3, 14 }, /* SD0_DAT2 */
- { RCAR_GP_PIN(3, 3), PU3, 13 }, /* SD0_DAT1 */
- { RCAR_GP_PIN(3, 2), PU3, 12 }, /* SD0_DAT0 */
- { RCAR_GP_PIN(3, 1), PU3, 11 }, /* SD0_CMD */
- { RCAR_GP_PIN(3, 0), PU3, 10 }, /* SD0_CLK */
- { PIN_A_NUMBER('T', 30), PU3, 9 }, /* ASEBRK */
- /* bit 8 n/a */
- { PIN_A_NUMBER('R', 29), PU3, 7 }, /* TDI */
- { PIN_A_NUMBER('R', 30), PU3, 6 }, /* TMS */
- { PIN_A_NUMBER('T', 27), PU3, 5 }, /* TCK */
- { PIN_A_NUMBER('R', 26), PU3, 4 }, /* TRST# */
- { PIN_A_NUMBER('D', 39), PU3, 3 }, /* EXTALR*/
- { PIN_A_NUMBER('D', 38), PU3, 2 }, /* FSCLKST */
- /* bit 1 n/a on M3*/
- { PIN_A_NUMBER('R', 8), PU3, 0 }, /* DU_DOTCLKIN2 */
-
- { RCAR_GP_PIN(5, 19), PU4, 31 }, /* MSIOF0_SS1 */
- { RCAR_GP_PIN(5, 18), PU4, 30 }, /* MSIOF0_SYNC */
- { RCAR_GP_PIN(5, 17), PU4, 29 }, /* MSIOF0_SCK */
- { RCAR_GP_PIN(5, 16), PU4, 28 }, /* HRTS0_N */
- { RCAR_GP_PIN(5, 15), PU4, 27 }, /* HCTS0_N */
- { RCAR_GP_PIN(5, 14), PU4, 26 }, /* HTX0 */
- { RCAR_GP_PIN(5, 13), PU4, 25 }, /* HRX0 */
- { RCAR_GP_PIN(5, 12), PU4, 24 }, /* HSCK0 */
- { RCAR_GP_PIN(5, 11), PU4, 23 }, /* RX2_A */
- { RCAR_GP_PIN(5, 10), PU4, 22 }, /* TX2_A */
- { RCAR_GP_PIN(5, 9), PU4, 21 }, /* SCK2 */
- { RCAR_GP_PIN(5, 8), PU4, 20 }, /* RTS1_N_TANS */
- { RCAR_GP_PIN(5, 7), PU4, 19 }, /* CTS1_N */
- { RCAR_GP_PIN(5, 6), PU4, 18 }, /* TX1_A */
- { RCAR_GP_PIN(5, 5), PU4, 17 }, /* RX1_A */
- { RCAR_GP_PIN(5, 4), PU4, 16 }, /* RTS0_N_TANS */
- { RCAR_GP_PIN(5, 3), PU4, 15 }, /* CTS0_N */
- { RCAR_GP_PIN(5, 2), PU4, 14 }, /* TX0 */
- { RCAR_GP_PIN(5, 1), PU4, 13 }, /* RX0 */
- { RCAR_GP_PIN(5, 0), PU4, 12 }, /* SCK0 */
- { RCAR_GP_PIN(3, 15), PU4, 11 }, /* SD1_WP */
- { RCAR_GP_PIN(3, 14), PU4, 10 }, /* SD1_CD */
- { RCAR_GP_PIN(3, 13), PU4, 9 }, /* SD0_WP */
- { RCAR_GP_PIN(3, 12), PU4, 8 }, /* SD0_CD */
- { RCAR_GP_PIN(4, 17), PU4, 7 }, /* SD3_DS */
- { RCAR_GP_PIN(4, 16), PU4, 6 }, /* SD3_DAT7 */
- { RCAR_GP_PIN(4, 15), PU4, 5 }, /* SD3_DAT6 */
- { RCAR_GP_PIN(4, 14), PU4, 4 }, /* SD3_DAT5 */
- { RCAR_GP_PIN(4, 13), PU4, 3 }, /* SD3_DAT4 */
- { RCAR_GP_PIN(4, 12), PU4, 2 }, /* SD3_DAT3 */
- { RCAR_GP_PIN(4, 11), PU4, 1 }, /* SD3_DAT2 */
- { RCAR_GP_PIN(4, 10), PU4, 0 }, /* SD3_DAT1 */
-
- { RCAR_GP_PIN(6, 24), PU5, 31 }, /* USB0_PWEN */
- { RCAR_GP_PIN(6, 23), PU5, 30 }, /* AUDIO_CLKB_B */
- { RCAR_GP_PIN(6, 22), PU5, 29 }, /* AUDIO_CLKA_A */
- { RCAR_GP_PIN(6, 21), PU5, 28 }, /* SSI_SDATA9_A */
- { RCAR_GP_PIN(6, 20), PU5, 27 }, /* SSI_SDATA8 */
- { RCAR_GP_PIN(6, 19), PU5, 26 }, /* SSI_SDATA7 */
- { RCAR_GP_PIN(6, 18), PU5, 25 }, /* SSI_WS78 */
- { RCAR_GP_PIN(6, 17), PU5, 24 }, /* SSI_SCK78 */
- { RCAR_GP_PIN(6, 16), PU5, 23 }, /* SSI_SDATA6 */
- { RCAR_GP_PIN(6, 15), PU5, 22 }, /* SSI_WS6 */
- { RCAR_GP_PIN(6, 14), PU5, 21 }, /* SSI_SCK6 */
- { RCAR_GP_PIN(6, 13), PU5, 20 }, /* SSI_SDATA5 */
- { RCAR_GP_PIN(6, 12), PU5, 19 }, /* SSI_WS5 */
- { RCAR_GP_PIN(6, 11), PU5, 18 }, /* SSI_SCK5 */
- { RCAR_GP_PIN(6, 10), PU5, 17 }, /* SSI_SDATA4 */
- { RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
- { RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
- { RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
- { RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
- { RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
- { RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
- { RCAR_GP_PIN(6, 1), PU5, 8 }, /* SSI_WS01239 */
- { RCAR_GP_PIN(6, 0), PU5, 7 }, /* SSI_SCK01239 */
- { PIN_NUMBER('H', 37), PU5, 6 }, /* MLB_REF */
- { RCAR_GP_PIN(5, 25), PU5, 5 }, /* MLB_DAT */
- { RCAR_GP_PIN(5, 24), PU5, 4 }, /* MLB_SIG */
- { RCAR_GP_PIN(5, 23), PU5, 3 }, /* MLB_CLK */
- { RCAR_GP_PIN(5, 22), PU5, 2 }, /* MSIOF0_RXD */
- { RCAR_GP_PIN(5, 21), PU5, 1 }, /* MSIOF0_SS2 */
- { RCAR_GP_PIN(5, 20), PU5, 0 }, /* MSIOF0_TXD */
-
- { RCAR_GP_PIN(6, 31), PU6, 6 }, /* GP6_31 */
- { RCAR_GP_PIN(6, 30), PU6, 5 }, /* GP6_30 */
- { RCAR_GP_PIN(6, 29), PU6, 4 }, /* USB30_OVC */
- { RCAR_GP_PIN(6, 28), PU6, 3 }, /* USB30_PWEN */
- { RCAR_GP_PIN(6, 27), PU6, 2 }, /* USB1_OVC */
- { RCAR_GP_PIN(6, 26), PU6, 1 }, /* USB1_PWEN */
- { RCAR_GP_PIN(6, 25), PU6, 0 }, /* USB0_OVC */
+static const struct pinmux_bias_reg pinmux_bias_regs[] = {
+ { PINMUX_BIAS_REG("PUEN0", 0xe6060400, "PUD0", 0xe6060440) {
+ [ 0] = PIN_NUMBER('W', 3), /* QSPI0_SPCLK */
+ [ 1] = PIN_A_NUMBER('C', 5), /* QSPI0_MOSI_IO0 */
+ [ 2] = PIN_A_NUMBER('B', 4), /* QSPI0_MISO_IO1 */
+ [ 3] = PIN_NUMBER('Y', 6), /* QSPI0_IO2 */
+ [ 4] = PIN_A_NUMBER('B', 6), /* QSPI0_IO3 */
+ [ 5] = PIN_NUMBER('Y', 3), /* QSPI0_SSL */
+ [ 6] = PIN_NUMBER('V', 3), /* QSPI1_SPCLK */
+ [ 7] = PIN_A_NUMBER('C', 7), /* QSPI1_MOSI_IO0 */
+ [ 8] = PIN_A_NUMBER('E', 5), /* QSPI1_MISO_IO1 */
+ [ 9] = PIN_A_NUMBER('E', 4), /* QSPI1_IO2 */
+ [10] = PIN_A_NUMBER('C', 3), /* QSPI1_IO3 */
+ [11] = PIN_NUMBER('V', 5), /* QSPI1_SSL */
+ [12] = PIN_NUMBER('Y', 7), /* RPC_INT# */
+ [13] = PIN_NUMBER('V', 6), /* RPC_WP# */
+ [14] = PIN_NUMBER('V', 7), /* RPC_RESET# */
+ [15] = PIN_NUMBER('A', 16), /* AVB_RX_CTL */
+ [16] = PIN_NUMBER('B', 19), /* AVB_RXC */
+ [17] = PIN_NUMBER('A', 13), /* AVB_RD0 */
+ [18] = PIN_NUMBER('B', 13), /* AVB_RD1 */
+ [19] = PIN_NUMBER('A', 14), /* AVB_RD2 */
+ [20] = PIN_NUMBER('B', 14), /* AVB_RD3 */
+ [21] = PIN_NUMBER('A', 8), /* AVB_TX_CTL */
+ [22] = PIN_NUMBER('A', 19), /* AVB_TXC */
+ [23] = PIN_NUMBER('A', 18), /* AVB_TD0 */
+ [24] = PIN_NUMBER('B', 18), /* AVB_TD1 */
+ [25] = PIN_NUMBER('A', 17), /* AVB_TD2 */
+ [26] = PIN_NUMBER('B', 17), /* AVB_TD3 */
+ [27] = PIN_NUMBER('A', 12), /* AVB_TXCREFCLK */
+ [28] = PIN_NUMBER('A', 9), /* AVB_MDIO */
+ [29] = RCAR_GP_PIN(2, 9), /* AVB_MDC */
+ [30] = RCAR_GP_PIN(2, 10), /* AVB_MAGIC */
+ [31] = RCAR_GP_PIN(2, 11), /* AVB_PHY_INT */
+ } },
+ { PINMUX_BIAS_REG("PUEN1", 0xe6060404, "PUD1", 0xe6060444) {
+ [ 0] = RCAR_GP_PIN(2, 12), /* AVB_LINK */
+ [ 1] = RCAR_GP_PIN(2, 13), /* AVB_AVTP_MATCH_A */
+ [ 2] = RCAR_GP_PIN(2, 14), /* AVB_AVTP_CAPTURE_A */
+ [ 3] = RCAR_GP_PIN(2, 0), /* IRQ0 */
+ [ 4] = RCAR_GP_PIN(2, 1), /* IRQ1 */
+ [ 5] = RCAR_GP_PIN(2, 2), /* IRQ2 */
+ [ 6] = RCAR_GP_PIN(2, 3), /* IRQ3 */
+ [ 7] = RCAR_GP_PIN(2, 4), /* IRQ4 */
+ [ 8] = RCAR_GP_PIN(2, 5), /* IRQ5 */
+ [ 9] = RCAR_GP_PIN(2, 6), /* PWM0 */
+ [10] = RCAR_GP_PIN(2, 7), /* PWM1_A */
+ [11] = RCAR_GP_PIN(2, 8), /* PWM2_A */
+ [12] = RCAR_GP_PIN(1, 0), /* A0 */
+ [13] = RCAR_GP_PIN(1, 1), /* A1 */
+ [14] = RCAR_GP_PIN(1, 2), /* A2 */
+ [15] = RCAR_GP_PIN(1, 3), /* A3 */
+ [16] = RCAR_GP_PIN(1, 4), /* A4 */
+ [17] = RCAR_GP_PIN(1, 5), /* A5 */
+ [18] = RCAR_GP_PIN(1, 6), /* A6 */
+ [19] = RCAR_GP_PIN(1, 7), /* A7 */
+ [20] = RCAR_GP_PIN(1, 8), /* A8 */
+ [21] = RCAR_GP_PIN(1, 9), /* A9 */
+ [22] = RCAR_GP_PIN(1, 10), /* A10 */
+ [23] = RCAR_GP_PIN(1, 11), /* A11 */
+ [24] = RCAR_GP_PIN(1, 12), /* A12 */
+ [25] = RCAR_GP_PIN(1, 13), /* A13 */
+ [26] = RCAR_GP_PIN(1, 14), /* A14 */
+ [27] = RCAR_GP_PIN(1, 15), /* A15 */
+ [28] = RCAR_GP_PIN(1, 16), /* A16 */
+ [29] = RCAR_GP_PIN(1, 17), /* A17 */
+ [30] = RCAR_GP_PIN(1, 18), /* A18 */
+ [31] = RCAR_GP_PIN(1, 19), /* A19 */
+ } },
+ { PINMUX_BIAS_REG("PUEN2", 0xe6060408, "PUD2", 0xe6060448) {
+ [ 0] = RCAR_GP_PIN(1, 28), /* CLKOUT */
+ [ 1] = RCAR_GP_PIN(1, 20), /* CS0_N */
+ [ 2] = RCAR_GP_PIN(1, 21), /* CS1_N */
+ [ 3] = RCAR_GP_PIN(1, 22), /* BS_N */
+ [ 4] = RCAR_GP_PIN(1, 23), /* RD_N */
+ [ 5] = RCAR_GP_PIN(1, 24), /* RD_WR_N */
+ [ 6] = RCAR_GP_PIN(1, 25), /* WE0_N */
+ [ 7] = RCAR_GP_PIN(1, 26), /* WE1_N */
+ [ 8] = RCAR_GP_PIN(1, 27), /* EX_WAIT0_A */
+ [ 9] = PIN_NUMBER('C', 1), /* PRESETOUT# */
+ [10] = RCAR_GP_PIN(0, 0), /* D0 */
+ [11] = RCAR_GP_PIN(0, 1), /* D1 */
+ [12] = RCAR_GP_PIN(0, 2), /* D2 */
+ [13] = RCAR_GP_PIN(0, 3), /* D3 */
+ [14] = RCAR_GP_PIN(0, 4), /* D4 */
+ [15] = RCAR_GP_PIN(0, 5), /* D5 */
+ [16] = RCAR_GP_PIN(0, 6), /* D6 */
+ [17] = RCAR_GP_PIN(0, 7), /* D7 */
+ [18] = RCAR_GP_PIN(0, 8), /* D8 */
+ [19] = RCAR_GP_PIN(0, 9), /* D9 */
+ [20] = RCAR_GP_PIN(0, 10), /* D10 */
+ [21] = RCAR_GP_PIN(0, 11), /* D11 */
+ [22] = RCAR_GP_PIN(0, 12), /* D12 */
+ [23] = RCAR_GP_PIN(0, 13), /* D13 */
+ [24] = RCAR_GP_PIN(0, 14), /* D14 */
+ [25] = RCAR_GP_PIN(0, 15), /* D15 */
+ [26] = RCAR_GP_PIN(7, 0), /* AVS1 */
+ [27] = RCAR_GP_PIN(7, 1), /* AVS2 */
+ [28] = RCAR_GP_PIN(7, 2), /* HDMI0_CEC */
+ [29] = RCAR_GP_PIN(7, 3), /* GP7_03 */
+ [30] = PIN_A_NUMBER('P', 7), /* DU_DOTCLKIN0 */
+ [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) {
+ [ 0] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN2 */
+ [ 1] = PIN_NONE,
+ [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST */
+ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/
+ [ 4] = PIN_A_NUMBER('R', 26), /* TRST# */
+ [ 5] = PIN_A_NUMBER('T', 27), /* TCK */
+ [ 6] = PIN_A_NUMBER('R', 30), /* TMS */
+ [ 7] = PIN_A_NUMBER('R', 29), /* TDI */
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_A_NUMBER('T', 30), /* ASEBRK */
+ [10] = RCAR_GP_PIN(3, 0), /* SD0_CLK */
+ [11] = RCAR_GP_PIN(3, 1), /* SD0_CMD */
+ [12] = RCAR_GP_PIN(3, 2), /* SD0_DAT0 */
+ [13] = RCAR_GP_PIN(3, 3), /* SD0_DAT1 */
+ [14] = RCAR_GP_PIN(3, 4), /* SD0_DAT2 */
+ [15] = RCAR_GP_PIN(3, 5), /* SD0_DAT3 */
+ [16] = RCAR_GP_PIN(3, 6), /* SD1_CLK */
+ [17] = RCAR_GP_PIN(3, 7), /* SD1_CMD */
+ [18] = RCAR_GP_PIN(3, 8), /* SD1_DAT0 */
+ [19] = RCAR_GP_PIN(3, 9), /* SD1_DAT1 */
+ [20] = RCAR_GP_PIN(3, 10), /* SD1_DAT2 */
+ [21] = RCAR_GP_PIN(3, 11), /* SD1_DAT3 */
+ [22] = RCAR_GP_PIN(4, 0), /* SD2_CLK */
+ [23] = RCAR_GP_PIN(4, 1), /* SD2_CMD */
+ [24] = RCAR_GP_PIN(4, 2), /* SD2_DAT0 */
+ [25] = RCAR_GP_PIN(4, 3), /* SD2_DAT1 */
+ [26] = RCAR_GP_PIN(4, 4), /* SD2_DAT2 */
+ [27] = RCAR_GP_PIN(4, 5), /* SD2_DAT3 */
+ [28] = RCAR_GP_PIN(4, 6), /* SD2_DS */
+ [29] = RCAR_GP_PIN(4, 7), /* SD3_CLK */
+ [30] = RCAR_GP_PIN(4, 8), /* SD3_CMD */
+ [31] = RCAR_GP_PIN(4, 9), /* SD3_DAT0 */
+ } },
+ { PINMUX_BIAS_REG("PUEN4", 0xe6060410, "PUD4", 0xe6060450) {
+ [ 0] = RCAR_GP_PIN(4, 10), /* SD3_DAT1 */
+ [ 1] = RCAR_GP_PIN(4, 11), /* SD3_DAT2 */
+ [ 2] = RCAR_GP_PIN(4, 12), /* SD3_DAT3 */
+ [ 3] = RCAR_GP_PIN(4, 13), /* SD3_DAT4 */
+ [ 4] = RCAR_GP_PIN(4, 14), /* SD3_DAT5 */
+ [ 5] = RCAR_GP_PIN(4, 15), /* SD3_DAT6 */
+ [ 6] = RCAR_GP_PIN(4, 16), /* SD3_DAT7 */
+ [ 7] = RCAR_GP_PIN(4, 17), /* SD3_DS */
+ [ 8] = RCAR_GP_PIN(3, 12), /* SD0_CD */
+ [ 9] = RCAR_GP_PIN(3, 13), /* SD0_WP */
+ [10] = RCAR_GP_PIN(3, 14), /* SD1_CD */
+ [11] = RCAR_GP_PIN(3, 15), /* SD1_WP */
+ [12] = RCAR_GP_PIN(5, 0), /* SCK0 */
+ [13] = RCAR_GP_PIN(5, 1), /* RX0 */
+ [14] = RCAR_GP_PIN(5, 2), /* TX0 */
+ [15] = RCAR_GP_PIN(5, 3), /* CTS0_N */
+ [16] = RCAR_GP_PIN(5, 4), /* RTS0_N_TANS */
+ [17] = RCAR_GP_PIN(5, 5), /* RX1_A */
+ [18] = RCAR_GP_PIN(5, 6), /* TX1_A */
+ [19] = RCAR_GP_PIN(5, 7), /* CTS1_N */
+ [20] = RCAR_GP_PIN(5, 8), /* RTS1_N_TANS */
+ [21] = RCAR_GP_PIN(5, 9), /* SCK2 */
+ [22] = RCAR_GP_PIN(5, 10), /* TX2_A */
+ [23] = RCAR_GP_PIN(5, 11), /* RX2_A */
+ [24] = RCAR_GP_PIN(5, 12), /* HSCK0 */
+ [25] = RCAR_GP_PIN(5, 13), /* HRX0 */
+ [26] = RCAR_GP_PIN(5, 14), /* HTX0 */
+ [27] = RCAR_GP_PIN(5, 15), /* HCTS0_N */
+ [28] = RCAR_GP_PIN(5, 16), /* HRTS0_N */
+ [29] = RCAR_GP_PIN(5, 17), /* MSIOF0_SCK */
+ [30] = RCAR_GP_PIN(5, 18), /* MSIOF0_SYNC */
+ [31] = RCAR_GP_PIN(5, 19), /* MSIOF0_SS1 */
+ } },
+ { PINMUX_BIAS_REG("PUEN5", 0xe6060414, "PUD5", 0xe6060454) {
+ [ 0] = RCAR_GP_PIN(5, 20), /* MSIOF0_TXD */
+ [ 1] = RCAR_GP_PIN(5, 21), /* MSIOF0_SS2 */
+ [ 2] = RCAR_GP_PIN(5, 22), /* MSIOF0_RXD */
+ [ 3] = RCAR_GP_PIN(5, 23), /* MLB_CLK */
+ [ 4] = RCAR_GP_PIN(5, 24), /* MLB_SIG */
+ [ 5] = RCAR_GP_PIN(5, 25), /* MLB_DAT */
+ [ 6] = PIN_NUMBER('H', 37), /* MLB_REF */
+ [ 7] = RCAR_GP_PIN(6, 0), /* SSI_SCK01239 */
+ [ 8] = RCAR_GP_PIN(6, 1), /* SSI_WS01239 */
+ [ 9] = RCAR_GP_PIN(6, 2), /* SSI_SDATA0 */
+ [10] = RCAR_GP_PIN(6, 3), /* SSI_SDATA1_A */
+ [11] = RCAR_GP_PIN(6, 4), /* SSI_SDATA2_A */
+ [12] = RCAR_GP_PIN(6, 5), /* SSI_SCK349 */
+ [13] = RCAR_GP_PIN(6, 6), /* SSI_WS349 */
+ [14] = RCAR_GP_PIN(6, 7), /* SSI_SDATA3 */
+ [15] = RCAR_GP_PIN(6, 8), /* SSI_SCK4 */
+ [16] = RCAR_GP_PIN(6, 9), /* SSI_WS4 */
+ [17] = RCAR_GP_PIN(6, 10), /* SSI_SDATA4 */
+ [18] = RCAR_GP_PIN(6, 11), /* SSI_SCK5 */
+ [19] = RCAR_GP_PIN(6, 12), /* SSI_WS5 */
+ [20] = RCAR_GP_PIN(6, 13), /* SSI_SDATA5 */
+ [21] = RCAR_GP_PIN(6, 14), /* SSI_SCK6 */
+ [22] = RCAR_GP_PIN(6, 15), /* SSI_WS6 */
+ [23] = RCAR_GP_PIN(6, 16), /* SSI_SDATA6 */
+ [24] = RCAR_GP_PIN(6, 17), /* SSI_SCK78 */
+ [25] = RCAR_GP_PIN(6, 18), /* SSI_WS78 */
+ [26] = RCAR_GP_PIN(6, 19), /* SSI_SDATA7 */
+ [27] = RCAR_GP_PIN(6, 20), /* SSI_SDATA8 */
+ [28] = RCAR_GP_PIN(6, 21), /* SSI_SDATA9_A */
+ [29] = RCAR_GP_PIN(6, 22), /* AUDIO_CLKA_A */
+ [30] = RCAR_GP_PIN(6, 23), /* AUDIO_CLKB_B */
+ [31] = RCAR_GP_PIN(6, 24), /* USB0_PWEN */
+ } },
+ { PINMUX_BIAS_REG("PUEN6", 0xe6060418, "PUD6", 0xe6060458) {
+ [ 0] = RCAR_GP_PIN(6, 25), /* USB0_OVC */
+ [ 1] = RCAR_GP_PIN(6, 26), /* USB1_PWEN */
+ [ 2] = RCAR_GP_PIN(6, 27), /* USB1_OVC */
+ [ 3] = RCAR_GP_PIN(6, 28), /* USB30_PWEN */
+ [ 4] = RCAR_GP_PIN(6, 29), /* USB30_OVC */
+ [ 5] = RCAR_GP_PIN(6, 30), /* GP6_30 */
+ [ 6] = RCAR_GP_PIN(6, 31), /* GP6_31 */
+ [ 7] = PIN_NONE,
+ [ 8] = PIN_NONE,
+ [ 9] = PIN_NONE,
+ [10] = PIN_NONE,
+ [11] = PIN_NONE,
+ [12] = PIN_NONE,
+ [13] = PIN_NONE,
+ [14] = PIN_NONE,
+ [15] = PIN_NONE,
+ [16] = PIN_NONE,
+ [17] = PIN_NONE,
+ [18] = PIN_NONE,
+ [19] = PIN_NONE,
+ [20] = PIN_NONE,
+ [21] = PIN_NONE,
+ [22] = PIN_NONE,
+ [23] = PIN_NONE,
+ [24] = PIN_NONE,
+ [25] = PIN_NONE,
+ [26] = PIN_NONE,
+ [27] = PIN_NONE,
+ [28] = PIN_NONE,
+ [29] = PIN_NONE,
+ [30] = PIN_NONE,
+ [31] = PIN_NONE,
+ } },
+ { /* sentinel */ },
};
static unsigned int r8a7796_pinmux_get_bias(struct sh_pfc *pfc,
unsigned int pin)
{
- const struct sh_pfc_bias_info *info;
- u32 reg;
- u32 bit;
+ const struct pinmux_bias_reg *reg;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return PIN_CONFIG_BIAS_DISABLE;
- reg = info->reg;
- bit = BIT(info->bit);
-
- if (!(sh_pfc_read_reg(pfc, PUEN + reg, 32) & bit))
+ if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit)))
return PIN_CONFIG_BIAS_DISABLE;
- else if (sh_pfc_read_reg(pfc, PUD + reg, 32) & bit)
+ else if (sh_pfc_read(pfc, reg->pud) & BIT(bit))
return PIN_CONFIG_BIAS_PULL_UP;
else
return PIN_CONFIG_BIAS_PULL_DOWN;
@@ -5675,28 +5764,24 @@ static unsigned int r8a7796_pinmux_get_bias(struct sh_pfc *pfc,
static void r8a7796_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin,
unsigned int bias)
{
- const struct sh_pfc_bias_info *info;
+ const struct pinmux_bias_reg *reg;
u32 enable, updown;
- u32 reg;
- u32 bit;
+ unsigned int bit;
- info = sh_pfc_pin_to_bias_info(bias_info, ARRAY_SIZE(bias_info), pin);
- if (!info)
+ reg = sh_pfc_pin_to_bias_reg(pfc, pin, &bit);
+ if (!reg)
return;
- reg = info->reg;
- bit = BIT(info->bit);
-
- enable = sh_pfc_read_reg(pfc, PUEN + reg, 32) & ~bit;
+ enable = sh_pfc_read(pfc, reg->puen) & ~BIT(bit);
if (bias != PIN_CONFIG_BIAS_DISABLE)
- enable |= bit;
+ enable |= BIT(bit);
- updown = sh_pfc_read_reg(pfc, PUD + reg, 32) & ~bit;
+ updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit);
if (bias == PIN_CONFIG_BIAS_PULL_UP)
- updown |= bit;
+ updown |= BIT(bit);
- sh_pfc_write_reg(pfc, PUD + reg, 32, updown);
- sh_pfc_write_reg(pfc, PUEN + reg, 32, enable);
+ sh_pfc_write(pfc, reg->pud, updown);
+ sh_pfc_write(pfc, reg->puen, enable);
}
static const struct sh_pfc_soc_operations r8a7796_pinmux_ops = {
@@ -5721,6 +5806,8 @@ const struct sh_pfc_soc_info r8a7796_pinmux_info = {
.cfg_regs = pinmux_config_regs,
.drive_regs = pinmux_drive_regs,
+ .bias_regs = pinmux_bias_regs,
+ .ioctrl_regs = pinmux_ioctrl_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
index 4f5ee1d7317d..89b7541ab1ed 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
@@ -198,8 +198,8 @@
#define GPSR6_0 FM(QSPI0_SPCLK)
/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */
-#define IP0_3_0 FM(IRQ0_A) FM(MSIOF2_SYNC_B) FM(USB0_IDIN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0_7_4 FM(MSIOF2_SCK) F_(0, 0) FM(USB0_IDPU) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_3_0 FM(IRQ0_A) FM(MSIOF2_SYNC_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0_7_4 FM(MSIOF2_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_11_8 FM(MSIOF2_TXD) FM(SCL3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_15_12 FM(MSIOF2_RXD) FM(SDA3_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0_19_16 FM(MLB_CLK) FM(MSIOF2_SYNC_A) FM(SCK5_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -522,10 +522,8 @@ static const u16 pinmux_data[] = {
/* IPSR0 */
PINMUX_IPSR_MSEL(IP0_3_0, IRQ0_A, SEL_IRQ_0_0),
PINMUX_IPSR_MSEL(IP0_3_0, MSIOF2_SYNC_B, SEL_MSIOF2_1),
- PINMUX_IPSR_GPSR(IP0_3_0, USB0_IDIN),
PINMUX_IPSR_GPSR(IP0_7_4, MSIOF2_SCK),
- PINMUX_IPSR_GPSR(IP0_7_4, USB0_IDPU),
PINMUX_IPSR_GPSR(IP0_11_8, MSIOF2_TXD),
PINMUX_IPSR_MSEL(IP0_11_8, SCL3_A, SEL_I2C3_0),
@@ -936,6 +934,129 @@ static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
+/* - AUDIO CLOCK ------------------------------------------------------------- */
+static const unsigned int audio_clk_a_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(4, 1),
+};
+static const unsigned int audio_clk_a_mux[] = {
+ AUDIO_CLKA_MARK,
+};
+static const unsigned int audio_clk_b_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(2, 27),
+};
+static const unsigned int audio_clk_b_mux[] = {
+ AUDIO_CLKB_MARK,
+};
+static const unsigned int audio_clkout_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(4, 5),
+};
+static const unsigned int audio_clkout_mux[] = {
+ AUDIO_CLKOUT_MARK,
+};
+static const unsigned int audio_clkout1_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(4, 22),
+};
+static const unsigned int audio_clkout1_mux[] = {
+ AUDIO_CLKOUT1_MARK,
+};
+
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb0_link_pins[] = {
+ /* AVB0_LINK */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int avb0_link_mux[] = {
+ AVB0_LINK_MARK,
+};
+static const unsigned int avb0_magic_pins[] = {
+ /* AVB0_MAGIC */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int avb0_magic_mux[] = {
+ AVB0_MAGIC_MARK,
+};
+static const unsigned int avb0_phy_int_pins[] = {
+ /* AVB0_PHY_INT */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int avb0_phy_int_mux[] = {
+ AVB0_PHY_INT_MARK,
+};
+static const unsigned int avb0_mdc_pins[] = {
+ /* AVB0_MDC, AVB0_MDIO */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int avb0_mdc_mux[] = {
+ AVB0_MDC_MARK, AVB0_MDIO_MARK,
+};
+static const unsigned int avb0_mii_pins[] = {
+ /*
+ * AVB0_TX_CTL, AVB0_TXC, AVB0_TD0,
+ * AVB0_TD1, AVB0_TD2, AVB0_TD3,
+ * AVB0_RX_CTL, AVB0_RXC, AVB0_RD0,
+ * AVB0_RD1, AVB0_RD2, AVB0_RD3,
+ * AVB0_TXCREFCLK
+ */
+ RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 11),
+ RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5),
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8),
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int avb0_mii_mux[] = {
+ AVB0_TX_CTL_MARK, AVB0_TXC_MARK, AVB0_TD0_MARK,
+ AVB0_TD1_MARK, AVB0_TD2_MARK, AVB0_TD3_MARK,
+ AVB0_RX_CTL_MARK, AVB0_RXC_MARK, AVB0_RD0_MARK,
+ AVB0_RD1_MARK, AVB0_RD2_MARK, AVB0_RD3_MARK,
+ AVB0_TXCREFCLK_MARK,
+};
+static const unsigned int avb0_avtp_pps_a_pins[] = {
+ /* AVB0_AVTP_PPS_A */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int avb0_avtp_pps_a_mux[] = {
+ AVB0_AVTP_PPS_A_MARK,
+};
+static const unsigned int avb0_avtp_match_a_pins[] = {
+ /* AVB0_AVTP_MATCH_A */
+ RCAR_GP_PIN(5, 1),
+};
+static const unsigned int avb0_avtp_match_a_mux[] = {
+ AVB0_AVTP_MATCH_A_MARK,
+};
+static const unsigned int avb0_avtp_capture_a_pins[] = {
+ /* AVB0_AVTP_CAPTURE_A */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int avb0_avtp_capture_a_mux[] = {
+ AVB0_AVTP_CAPTURE_A_MARK,
+};
+static const unsigned int avb0_avtp_pps_b_pins[] = {
+ /* AVB0_AVTP_PPS_B */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int avb0_avtp_pps_b_mux[] = {
+ AVB0_AVTP_PPS_B_MARK,
+};
+static const unsigned int avb0_avtp_match_b_pins[] = {
+ /* AVB0_AVTP_MATCH_B */
+ RCAR_GP_PIN(4, 18),
+};
+static const unsigned int avb0_avtp_match_b_mux[] = {
+ AVB0_AVTP_MATCH_B_MARK,
+};
+static const unsigned int avb0_avtp_capture_b_pins[] = {
+ /* AVB0_AVTP_CAPTURE_B */
+ RCAR_GP_PIN(4, 17),
+};
+static const unsigned int avb0_avtp_capture_b_mux[] = {
+ AVB0_AVTP_CAPTURE_B_MARK,
+};
+
/* - I2C -------------------------------------------------------------------- */
static const unsigned int i2c0_pins[] = {
/* SCL, SDA */
@@ -1018,6 +1139,118 @@ static const unsigned int mmc_ctrl_mux[] = {
MMC_CLK_MARK, MMC_CMD_MARK,
};
+/* - PWM0 ------------------------------------------------------------------ */
+static const unsigned int pwm0_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 1),
+};
+
+static const unsigned int pwm0_a_mux[] = {
+ PWM0_A_MARK,
+};
+
+static const unsigned int pwm0_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 18),
+};
+
+static const unsigned int pwm0_b_mux[] = {
+ PWM0_B_MARK,
+};
+
+static const unsigned int pwm0_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 29),
+};
+
+static const unsigned int pwm0_c_mux[] = {
+ PWM0_C_MARK,
+};
+
+/* - PWM1 ------------------------------------------------------------------ */
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 2),
+};
+
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 19),
+};
+
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+
+static const unsigned int pwm1_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 30),
+};
+
+static const unsigned int pwm1_c_mux[] = {
+ PWM1_C_MARK,
+};
+
+/* - PWM2 ------------------------------------------------------------------ */
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 3),
+};
+
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 22),
+};
+
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+
+static const unsigned int pwm2_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 31),
+};
+
+static const unsigned int pwm2_c_mux[] = {
+ PWM2_C_MARK,
+};
+
+/* - PWM3 ------------------------------------------------------------------ */
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 4),
+};
+
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 27),
+};
+
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+
+static const unsigned int pwm3_c_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(4, 0),
+};
+
+static const unsigned int pwm3_c_mux[] = {
+ PWM3_C_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_a_pins[] = {
/* RX, TX */
@@ -1202,7 +1435,75 @@ static const unsigned int scif_clk_mux[] = {
SCIF_CLK_MARK,
};
+/* - SSI ---------------------------------------------------------------*/
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(4, 3),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK,
+};
+static const unsigned int ssi34_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 2), RCAR_GP_PIN(4, 4),
+};
+static const unsigned int ssi34_ctrl_mux[] = {
+ SSI_SCK34_MARK, SSI_WS34_MARK,
+};
+static const unsigned int ssi4_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(4, 5), RCAR_GP_PIN(4, 7),
+};
+static const unsigned int ssi4_ctrl_a_mux[] = {
+ SSI_SCK4_A_MARK, SSI_WS4_A_MARK,
+};
+static const unsigned int ssi4_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(4, 6),
+};
+static const unsigned int ssi4_data_a_mux[] = {
+ SSI_SDATA4_A_MARK,
+};
+static const unsigned int ssi4_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 20),
+};
+static const unsigned int ssi4_ctrl_b_mux[] = {
+ SSI_SCK4_B_MARK, SSI_WS4_B_MARK,
+};
+static const unsigned int ssi4_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(2, 16),
+};
+static const unsigned int ssi4_data_b_mux[] = {
+ SSI_SDATA4_B_MARK,
+};
+
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+};
+static const unsigned int usb0_mux[] = {
+ USB0_PWEN_MARK, USB0_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a),
+ SH_PFC_PIN_GROUP(audio_clk_b),
+ SH_PFC_PIN_GROUP(audio_clkout),
+ SH_PFC_PIN_GROUP(audio_clkout1),
+ SH_PFC_PIN_GROUP(avb0_link),
+ SH_PFC_PIN_GROUP(avb0_magic),
+ SH_PFC_PIN_GROUP(avb0_phy_int),
+ SH_PFC_PIN_GROUP(avb0_mdc),
+ SH_PFC_PIN_GROUP(avb0_mii),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps_a),
+ SH_PFC_PIN_GROUP(avb0_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb0_avtp_pps_b),
+ SH_PFC_PIN_GROUP(avb0_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb0_avtp_capture_b),
SH_PFC_PIN_GROUP(i2c0),
SH_PFC_PIN_GROUP(i2c1),
SH_PFC_PIN_GROUP(i2c2_a),
@@ -1213,6 +1514,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(mmc_data4),
SH_PFC_PIN_GROUP(mmc_data8),
SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(pwm0_a),
+ SH_PFC_PIN_GROUP(pwm0_b),
+ SH_PFC_PIN_GROUP(pwm0_c),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm1_c),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm2_c),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm3_c),
SH_PFC_PIN_GROUP(scif0_data_a),
SH_PFC_PIN_GROUP(scif0_clk_a),
SH_PFC_PIN_GROUP(scif0_data_b),
@@ -1238,6 +1551,34 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif5_data_b),
SH_PFC_PIN_GROUP(scif5_clk_b),
SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi4_data_a),
+ SH_PFC_PIN_GROUP(ssi4_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi4_data_b),
+ SH_PFC_PIN_GROUP(usb0),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a",
+ "audio_clk_b",
+ "audio_clkout",
+ "audio_clkout1",
+};
+
+static const char * const avb0_groups[] = {
+ "avb0_link",
+ "avb0_magic",
+ "avb0_phy_int",
+ "avb0_mdc",
+ "avb0_mii",
+ "avb0_avtp_pps_a",
+ "avb0_avtp_match_a",
+ "avb0_avtp_capture_a",
+ "avb0_avtp_pps_b",
+ "avb0_avtp_match_b",
+ "avb0_avtp_capture_b",
};
static const char * const i2c0_groups[] = {
@@ -1264,6 +1605,30 @@ static const char * const mmc_groups[] = {
"mmc_ctrl",
};
+static const char * const pwm0_groups[] = {
+ "pwm0_a",
+ "pwm0_b",
+ "pwm0_c",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+ "pwm1_c",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+ "pwm2_c",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+ "pwm3_c",
+};
+
static const char * const scif0_groups[] = {
"scif0_data_a",
"scif0_clk_a",
@@ -1310,12 +1675,31 @@ static const char * const scif_clk_groups[] = {
"scif_clk",
};
+static const char * const ssi_groups[] = {
+ "ssi3_data",
+ "ssi34_ctrl",
+ "ssi4_ctrl_a",
+ "ssi4_data_a",
+ "ssi4_ctrl_b",
+ "ssi4_data_b",
+};
+
+static const char * const usb0_groups[] = {
+ "usb0",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb0),
SH_PFC_FUNCTION(i2c0),
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c3),
SH_PFC_FUNCTION(mmc),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -1323,6 +1707,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif5),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(usb0),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7722.c b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
index 29c69133b0ef..0e733bffdb38 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7722.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 5c9d79981e6d..736634aee500 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -513,7 +513,7 @@ static int sh_pfc_pinconf_get_drive_strength(struct sh_pfc *pfc,
return -EINVAL;
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, reg, 32);
+ val = sh_pfc_read(pfc, reg);
spin_unlock_irqrestore(&pfc->lock, flags);
val = (val >> offset) & GENMASK(size - 1, 0);
@@ -550,11 +550,11 @@ static int sh_pfc_pinconf_set_drive_strength(struct sh_pfc *pfc,
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, reg, 32);
+ val = sh_pfc_read(pfc, reg);
val &= ~GENMASK(offset + size - 1, offset);
val |= strength << offset;
- sh_pfc_write_reg(pfc, reg, 32, val);
+ sh_pfc_write(pfc, reg, val);
spin_unlock_irqrestore(&pfc->lock, flags);
@@ -645,7 +645,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
return bit;
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, pocctrl, 32);
+ val = sh_pfc_read(pfc, pocctrl);
spin_unlock_irqrestore(&pfc->lock, flags);
arg = (val & BIT(bit)) ? 3300 : 1800;
@@ -716,12 +716,12 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
return -EINVAL;
spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read_reg(pfc, pocctrl, 32);
+ val = sh_pfc_read(pfc, pocctrl);
if (mV == 3300)
val |= BIT(bit);
else
val &= ~BIT(bit);
- sh_pfc_write_reg(pfc, pocctrl, 32, val);
+ sh_pfc_write(pfc, pocctrl, val);
spin_unlock_irqrestore(&pfc->lock, flags);
break;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 8688b405e081..213108a058fe 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -148,6 +148,21 @@ struct pinmux_drive_reg {
.reg = r, \
.fields =
+struct pinmux_bias_reg {
+ u32 puen; /* Pull-enable or pull-up control register */
+ u32 pud; /* Pull-up/down control register (optional) */
+ const u16 pins[32];
+};
+
+#define PINMUX_BIAS_REG(name1, r1, name2, r2) \
+ .puen = r1, \
+ .pud = r2, \
+ .pins =
+
+struct pinmux_ioctrl_reg {
+ u32 reg;
+};
+
struct pinmux_data_reg {
u32 reg;
u8 reg_width;
@@ -189,12 +204,6 @@ struct sh_pfc_window {
unsigned long size;
};
-struct sh_pfc_bias_info {
- u16 pin;
- u16 reg : 11;
- u16 bit : 5;
-};
-
struct sh_pfc_pin_range;
struct sh_pfc {
@@ -213,6 +222,7 @@ struct sh_pfc {
unsigned int nr_gpio_pins;
struct sh_pfc_chip *gpio;
+ u32 *saved_regs;
};
struct sh_pfc_soc_operations {
@@ -245,6 +255,8 @@ struct sh_pfc_soc_info {
const struct pinmux_cfg_reg *cfg_regs;
const struct pinmux_drive_reg *drive_regs;
+ const struct pinmux_bias_reg *bias_regs;
+ const struct pinmux_ioctrl_reg *ioctrl_regs;
const struct pinmux_data_reg *data_regs;
const u16 *pinmux_data;
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 4db9323251e3..3abb028f6158 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5820,7 +5820,7 @@ static void atlas7_gpio_handle_irq(struct irq_desc *desc)
__func__, gc->label,
bank->gpio_offset + pin_in_bank);
generic_handle_irq(
- irq_find_mapping(gc->irqdomain,
+ irq_find_mapping(gc->irq.domain,
bank->gpio_offset + pin_in_bank));
}
@@ -5860,7 +5860,7 @@ static int atlas7_gpio_request(struct gpio_chip *chip,
if (ret < 0)
return ret;
- if (pinctrl_request_gpio(chip->base + gpio))
+ if (pinctrl_gpio_request(chip->base + gpio))
return -ENODEV;
raw_spin_lock_irqsave(&a7gc->lock, flags);
@@ -5890,7 +5890,7 @@ static void atlas7_gpio_free(struct gpio_chip *chip,
raw_spin_unlock_irqrestore(&a7gc->lock, flags);
- pinctrl_free_gpio(chip->base + gpio);
+ pinctrl_gpio_free(chip->base + gpio);
}
static int atlas7_gpio_direction_input(struct gpio_chip *chip,
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index d3ef05973901..ca2347d0d579 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -587,7 +587,7 @@ static void sirfsoc_gpio_handle_irq(struct irq_desc *desc)
if ((status & 0x1) && (ctrl & SIRFSOC_GPIO_CTL_INTR_EN_MASK)) {
pr_debug("%s: gpio id %d idx %d happens\n",
__func__, bank->id, idx);
- generic_handle_irq(irq_find_mapping(gc->irqdomain, idx +
+ generic_handle_irq(irq_find_mapping(gc->irq.domain, idx +
bank->id * SIRFSOC_GPIO_BANK_SIZE));
}
@@ -614,7 +614,7 @@ static int sirfsoc_gpio_request(struct gpio_chip *chip, unsigned offset)
struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(sgpio, offset);
unsigned long flags;
- if (pinctrl_request_gpio(chip->base + offset))
+ if (pinctrl_gpio_request(chip->base + offset))
return -ENODEV;
spin_lock_irqsave(&bank->lock, flags);
@@ -644,7 +644,7 @@ static void sirfsoc_gpio_free(struct gpio_chip *chip, unsigned offset)
spin_unlock_irqrestore(&bank->lock, flags);
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
}
static int sirfsoc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
diff --git a/drivers/pinctrl/spear/Makefile b/drivers/pinctrl/spear/Makefile
index 37b8412ac8a3..da52d17550a3 100644
--- a/drivers/pinctrl/spear/Makefile
+++ b/drivers/pinctrl/spear/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# SPEAr pinmux support
obj-$(CONFIG_PINCTRL_SPEAR_PLGPIO) += pinctrl-plgpio.o
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index cf6d68c7345b..6a0ed8ab33b9 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -204,7 +204,7 @@ static int plgpio_request(struct gpio_chip *chip, unsigned offset)
if (offset >= chip->ngpio)
return -EINVAL;
- ret = pinctrl_request_gpio(gpio);
+ ret = pinctrl_gpio_request(gpio);
if (ret)
return ret;
@@ -242,7 +242,7 @@ err1:
if (!IS_ERR(plgpio->clk))
clk_disable(plgpio->clk);
err0:
- pinctrl_free_gpio(gpio);
+ pinctrl_gpio_free(gpio);
return ret;
}
@@ -273,7 +273,7 @@ disable_clk:
if (!IS_ERR(plgpio->clk))
clk_disable(plgpio->clk);
- pinctrl_free_gpio(gpio);
+ pinctrl_gpio_free(gpio);
}
/* PLGPIO IRQ */
@@ -401,7 +401,7 @@ static void plgpio_irq_handler(struct irq_desc *desc)
/* get correct irq line number */
pin = i * MAX_GPIO_PER_REG + pin;
generic_handle_irq(
- irq_find_mapping(gc->irqdomain, pin));
+ irq_find_mapping(gc->irq.domain, pin));
}
}
chained_irq_exit(irqchip, desc);
diff --git a/drivers/pinctrl/stm32/Makefile b/drivers/pinctrl/stm32/Makefile
index 5f379f5153f1..d13ca3573486 100644
--- a/drivers/pinctrl/stm32/Makefile
+++ b/drivers/pinctrl/stm32/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Core
obj-$(CONFIG_PINCTRL_STM32) += pinctrl-stm32.o
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 50299ad96659..a276c61be217 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -150,12 +150,12 @@ static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_gpio_request(chip->base + offset);
}
static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_gpio_free(chip->base + offset);
}
static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -289,13 +289,14 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
return 0;
}
-static void stm32_gpio_domain_activate(struct irq_domain *d,
- struct irq_data *irq_data)
+static int stm32_gpio_domain_activate(struct irq_domain *d,
+ struct irq_data *irq_data, bool early)
{
struct stm32_gpio_bank *bank = d->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr);
+ return 0;
}
static int stm32_gpio_domain_alloc(struct irq_domain *d,
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index dc6c9619e41c..12a752e836ef 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Core
obj-y += pinctrl-sunxi.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index f763d8d62d6e..295e48fc94bc 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1289,6 +1289,7 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
.npins = ARRAY_SIZE(sun4i_a10_pins),
.irq_banks = 1,
.irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i.c b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
index 47afd558b114..27ec99e81c4c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i.c
@@ -713,6 +713,7 @@ static const struct sunxi_pinctrl_desc sun5i_pinctrl_data = {
.pins = sun5i_pins,
.npins = ARRAY_SIZE(sun5i_pins),
.irq_banks = 1,
+ .disable_strict_mode = true,
};
static int sun5i_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index 49a1deb97bb7..a00246d3dd49 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -106,6 +106,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun6i_a31_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .disable_strict_mode = true,
};
static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index 951a25c18815..82ffaf466892 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -965,6 +965,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_pinctrl_data = {
.pins = sun6i_a31_pins,
.npins = ARRAY_SIZE(sun6i_a31_pins),
.irq_banks = 4,
+ .disable_strict_mode = true,
};
static int sun6i_a31_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
index 67ee6f9b3b68..8a08c4afc6a8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -93,6 +93,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun8i_a23_r_pins),
.pin_base = PL_BASE,
.irq_banks = 1,
+ .disable_strict_mode = true,
};
static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index 721b6935baf3..402fd7d21e7b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -563,6 +563,7 @@ static const struct sunxi_pinctrl_desc sun8i_a23_pinctrl_data = {
.pins = sun8i_a23_pins,
.npins = ARRAY_SIZE(sun8i_a23_pins),
.irq_banks = 3,
+ .disable_strict_mode = true,
};
static int sun8i_a23_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index ef1e0bef4099..da387211a75e 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -486,6 +486,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
.npins = ARRAY_SIZE(sun8i_a33_pins),
.irq_banks = 2,
.irq_bank_base = 1,
+ .disable_strict_mode = true,
};
static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
index ebfd9a26628c..b795a199e240 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
@@ -82,7 +82,8 @@ static const struct sunxi_pinctrl_desc sun8i_h3_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun8i_h3_r_pins),
.irq_banks = 1,
.pin_base = PL_BASE,
- .irq_read_needs_mux = true
+ .irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun8i_h3_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 518a92df4418..d1719a738c20 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -491,7 +491,8 @@ static const struct sunxi_pinctrl_desc sun8i_h3_pinctrl_data = {
.pins = sun8i_h3_pins,
.npins = ARRAY_SIZE(sun8i_h3_pins),
.irq_banks = 2,
- .irq_read_needs_mux = true
+ .irq_read_needs_mux = true,
+ .disable_strict_mode = true,
};
static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index 92a873f73697..c63086c98335 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -152,6 +152,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_r_pinctrl_data = {
.npins = ARRAY_SIZE(sun9i_a80_r_pins),
.pin_base = PL_BASE,
.irq_banks = 2,
+ .disable_strict_mode = true,
};
static int sun9i_a80_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
index bc14e954d7a2..472ef0d91b99 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
@@ -721,6 +721,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_pinctrl_data = {
.pins = sun9i_a80_pins,
.npins = ARRAY_SIZE(sun9i_a80_pins),
.irq_banks = 5,
+ .disable_strict_mode = true,
};
static int sun9i_a80_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 52edf3b5988d..4b6cb25bc796 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -696,6 +696,7 @@ static const struct pinmux_ops sunxi_pmx_ops = {
.get_function_groups = sunxi_pmx_get_func_groups,
.set_mux = sunxi_pmx_set_mux,
.gpio_set_direction = sunxi_pmx_gpio_set_direction,
+ .strict = true,
};
static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
@@ -1245,6 +1246,7 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
struct pinctrl_desc *pctrl_desc;
struct pinctrl_pin_desc *pins;
struct sunxi_pinctrl *pctl;
+ struct pinmux_ops *pmxops;
struct resource *res;
int i, ret, last_pin, pin_idx;
struct clk *clk;
@@ -1305,7 +1307,16 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
pctrl_desc->npins = pctl->ngroups;
pctrl_desc->confops = &sunxi_pconf_ops;
pctrl_desc->pctlops = &sunxi_pctrl_ops;
- pctrl_desc->pmxops = &sunxi_pmx_ops;
+
+ pmxops = devm_kmemdup(&pdev->dev, &sunxi_pmx_ops, sizeof(sunxi_pmx_ops),
+ GFP_KERNEL);
+ if (!pmxops)
+ return -ENOMEM;
+
+ if (desc->disable_strict_mode)
+ pmxops->strict = false;
+
+ pctrl_desc->pmxops = pmxops;
pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, pctrl_desc, pctl);
if (IS_ERR(pctl->pctl_dev)) {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 1bfc0d8a55df..11b128f54ed2 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -112,6 +112,7 @@ struct sunxi_pinctrl_desc {
unsigned irq_banks;
unsigned irq_bank_base;
bool irq_read_needs_mux;
+ bool disable_strict_mode;
};
struct sunxi_pinctrl_function {
diff --git a/drivers/pinctrl/tegra/Makefile b/drivers/pinctrl/tegra/Makefile
index d9ea2be69cc4..bbcb043c34a2 100644
--- a/drivers/pinctrl/tegra/Makefile
+++ b/drivers/pinctrl/tegra/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
index 5c1b6325d80d..a8a6510183b6 100644
--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
@@ -575,11 +575,9 @@ static int ti_iodelay_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned long *config)
{
struct ti_iodelay_device *iod;
- struct device *dev;
struct ti_iodelay_pingroup *group;
iod = pinctrl_dev_get_drvdata(pctldev);
- dev = iod->dev;
group = ti_iodelay_get_pingroup(iod, selector);
if (!group)
@@ -693,12 +691,10 @@ static void ti_iodelay_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
unsigned int selector)
{
struct ti_iodelay_device *iod;
- struct device *dev;
struct ti_iodelay_pingroup *group;
int i;
iod = pinctrl_dev_get_drvdata(pctldev);
- dev = iod->dev;
group = ti_iodelay_get_pingroup(iod, selector);
if (!group)
return;
diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile
index d592ff77d60f..ec66c86e276e 100644
--- a/drivers/pinctrl/uniphier/Makefile
+++ b/drivers/pinctrl/uniphier/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += pinctrl-uniphier-core.o
obj-$(CONFIG_PINCTRL_UNIPHIER_LD4) += pinctrl-uniphier-ld4.o
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index f9267fabe6b0..26fda5c53e65 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -204,9 +204,10 @@ static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
const struct pin_desc *desc = pin_desc_get(pctldev, pin);
enum uniphier_pin_drv_type type =
uniphier_pin_get_drv_type(desc->drv_data);
- const unsigned int strength_1bit[] = {4, 8};
- const unsigned int strength_2bit[] = {8, 12, 16, 20};
- const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16};
+ static const unsigned int strength_1bit[] = {4, 8};
+ static const unsigned int strength_2bit[] = {8, 12, 16, 20};
+ static const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12,
+ 14, 16};
const unsigned int *supported_strength;
unsigned int drvctrl, reg, shift, mask, width, val;
int ret;
@@ -399,9 +400,10 @@ static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev,
const struct pin_desc *desc = pin_desc_get(pctldev, pin);
enum uniphier_pin_drv_type type =
uniphier_pin_get_drv_type(desc->drv_data);
- const unsigned int strength_1bit[] = {4, 8, -1};
- const unsigned int strength_2bit[] = {8, 12, 16, 20, -1};
- const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14, 16, -1};
+ static const unsigned int strength_1bit[] = {4, 8, -1};
+ static const unsigned int strength_2bit[] = {8, 12, 16, 20, -1};
+ static const unsigned int strength_3bit[] = {4, 5, 7, 9, 11, 12, 14,
+ 16, -1};
const unsigned int *supported_strength;
unsigned int drvctrl, reg, shift, mask, width, val;
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 9c5e359a63de..8a5ecd6277d8 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -472,8 +472,8 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = {
static const unsigned aout_pins[] = {135, 136, 137, 138, 139, 140, 141, 142};
static const int aout_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
-static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25};
-static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int emmc_pins[] = {19, 20, 21, 22, 23, 24, 25};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29};
static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned ether_rmii_pins[] = {6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index 83341284dc44..3be7967edae0 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -553,8 +553,8 @@ static const struct pinctrl_pin_desc uniphier_ld20_pins[] = {
static const unsigned aout_pins[] = {135, 136, 137, 138, 139, 140, 141, 142};
static const int aout_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
-static const unsigned emmc_pins[] = {18, 19, 20, 21, 22, 23, 24, 25};
-static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int emmc_pins[] = {19, 20, 21, 22, 23, 24, 25};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned emmc_dat8_pins[] = {26, 27, 28, 29};
static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned ether_rgmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 38,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
index d9f166f0cc86..dbe94a9a0353 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c
@@ -776,8 +776,8 @@ static const struct pinctrl_pin_desc uniphier_pxs3_pins[] = {
250, UNIPHIER_PIN_PULL_DOWN),
};
-static const unsigned int emmc_pins[] = {31, 32, 33, 34, 35, 36, 37, 38};
-static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static const unsigned int emmc_pins[] = {32, 33, 34, 35, 36, 37, 38};
+static const int emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
static const unsigned int emmc_dat8_pins[] = {39, 40, 41, 42};
static const int emmc_dat8_muxvals[] = {0, 0, 0, 0};
static const unsigned int ether_rgmii_pins[] = {52, 53, 54, 55, 56, 57, 58, 59,
diff --git a/drivers/pinctrl/vt8500/Makefile b/drivers/pinctrl/vt8500/Makefile
index 24ec45dd0d80..c6a5c3b14e9e 100644
--- a/drivers/pinctrl/vt8500/Makefile
+++ b/drivers/pinctrl/vt8500/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# VIA/Wondermedia pinctrl support
obj-$(CONFIG_PINCTRL_WMT) += pinctrl-wmt.o
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index ca2692510733..d3a6630266a0 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for linux/drivers/platform
#
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 66c345ca35fc..a077b1f0211d 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 80b87954f6dd..2c745e8ccad6 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -93,12 +93,31 @@ config ASUS_LAPTOP
config DELL_SMBIOS
tristate
- select DCDBAS
+
+config DELL_SMBIOS_WMI
+ tristate "Dell SMBIOS calling interface (WMI implementation)"
+ depends on ACPI_WMI
+ select DELL_WMI_DESCRIPTOR
+ select DELL_SMBIOS
+ ---help---
+ This provides an implementation for the Dell SMBIOS calling interface
+ communicated over ACPI-WMI.
+
+ If you have a Dell computer from >2007 you should say Y or M here.
+ If you aren't sure and this module doesn't work for your computer
+ it just won't load.
+
+config DELL_SMBIOS_SMM
+ tristate "Dell SMBIOS calling interface (SMM implementation)"
+ depends on DCDBAS
+ select DELL_SMBIOS
---help---
- This module provides common functions for kernel modules using
- Dell SMBIOS.
+ This provides an implementation for the Dell SMBIOS calling interface
+ communicated over SMI/SMM.
- If you have a Dell laptop, say Y or M here.
+ If you have a Dell computer from <=2017 you should say Y or M here.
+ If you aren't sure and this module doesn't work for your computer
+ it just won't load.
config DELL_LAPTOP
tristate "Dell Laptop Extras"
@@ -116,11 +135,12 @@ config DELL_LAPTOP
laptops (except for some models covered by the Compal driver).
config DELL_WMI
- tristate "Dell WMI extras"
+ tristate "Dell WMI notifications"
depends on ACPI_WMI
depends on DMI
depends on INPUT
depends on ACPI_VIDEO || ACPI_VIDEO = n
+ select DELL_WMI_DESCRIPTOR
select DELL_SMBIOS
select INPUT_SPARSEKMAP
---help---
@@ -129,6 +149,10 @@ config DELL_WMI
To compile this driver as a module, choose M here: the module will
be called dell-wmi.
+config DELL_WMI_DESCRIPTOR
+ tristate
+ depends on ACPI_WMI
+
config DELL_WMI_AIO
tristate "WMI Hotkeys for Dell All-In-One series"
depends on ACPI_WMI
@@ -426,7 +450,6 @@ config THINKPAD_ACPI_ALSA_SUPPORT
config THINKPAD_ACPI_DEBUGFACILITIES
bool "Maintainer debug facilities"
depends on THINKPAD_ACPI
- default n
---help---
Enables extra stuff in the thinkpad-acpi which is completely useless
for normal use. Read the driver source to find out what it does.
@@ -437,7 +460,6 @@ config THINKPAD_ACPI_DEBUGFACILITIES
config THINKPAD_ACPI_DEBUG
bool "Verbose debug mode"
depends on THINKPAD_ACPI
- default n
---help---
Enables extra debugging information, at the expense of a slightly
increase in driver size.
@@ -447,7 +469,6 @@ config THINKPAD_ACPI_DEBUG
config THINKPAD_ACPI_UNSAFE_LEDS
bool "Allow control of important LEDs (unsafe)"
depends on THINKPAD_ACPI
- default n
---help---
Overriding LED state on ThinkPads can mask important
firmware alerts (like critical battery condition), or misled
@@ -515,7 +536,6 @@ config SENSORS_HDAPS
tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
depends on INPUT
select INPUT_POLLDEV
- default n
help
This driver provides support for the IBM Hard Drive Active Protection
System (hdaps), which provides an accelerometer and other misc. data.
@@ -658,6 +678,18 @@ config WMI_BMOF
To compile this driver as a module, choose M here: the module will
be called wmi-bmof.
+config INTEL_WMI_THUNDERBOLT
+ tristate "Intel WMI thunderbolt force power driver"
+ depends on ACPI_WMI
+ ---help---
+ Say Y here if you want to be able to use the WMI interface on select
+ systems to force the power control of Intel Thunderbolt controllers.
+ This is useful for updating the firmware when devices are not plugged
+ into the controller.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel-wmi-thunderbolt.
+
config MSI_WMI
tristate "MSI WMI extras"
depends on ACPI_WMI
@@ -763,7 +795,6 @@ config TOSHIBA_HAPS
config TOSHIBA_WMI
tristate "Toshiba WMI Hotkeys Driver (EXPERIMENTAL)"
- default n
depends on ACPI_WMI
depends on INPUT
select INPUT_SPARSEKMAP
@@ -774,7 +805,7 @@ config TOSHIBA_WMI
WARNING: This driver is incomplete as it lacks a proper keymap and the
*notify function only prints the ACPI event type value. Be warned that
you will need to provide some information if you have a Toshiba model
- with WMI event hotkeys and want to help with the develpment of this
+ with WMI event hotkeys and want to help with the development of this
driver.
If you have a WMI-based hotkeys Toshiba laptop, say Y or M here.
@@ -785,7 +816,6 @@ config ACPI_CMPC
depends on RFKILL || RFKILL=n
select INPUT
select BACKLIGHT_CLASS_DEVICE
- default n
help
Support for Intel Classmate PC ACPI devices, including some
keys as input device, backlight device, tablet and accelerometer
@@ -793,7 +823,7 @@ config ACPI_CMPC
config INTEL_CHT_INT33FE
tristate "Intel Cherry Trail ACPI INT33FE Driver"
- depends on X86 && ACPI && I2C
+ depends on X86 && ACPI && I2C && REGULATOR
---help---
This driver add support for the INT33FE ACPI device found on
some Intel Cherry Trail devices.
@@ -804,6 +834,10 @@ config INTEL_CHT_INT33FE
This driver instantiates i2c-clients for these, so that standard
i2c drivers for these chips can bind to the them.
+ If you enable this driver it is advised to also select
+ CONFIG_TYPEC_FUSB302=m, CONFIG_CHARGER_BQ24190=m and
+ CONFIG_BATTERY_MAX17042=m.
+
config INTEL_INT0002_VGPIO
tristate "Intel ACPI INT0002 Virtual GPIO driver"
depends on GPIOLIB && ACPI
@@ -892,7 +926,6 @@ config INTEL_IPS
config INTEL_IMR
bool "Intel Isolated Memory Region support"
- default n
depends on X86_INTEL_QUARK && IOSF_MBI
---help---
This option provides a means to manipulate Isolated Memory Regions.
@@ -1088,7 +1121,6 @@ config INTEL_PUNIT_IPC
config INTEL_TELEMETRY
tristate "Intel SoC Telemetry Driver"
- default n
depends on INTEL_PMC_IPC && INTEL_PUNIT_IPC && X86_64
---help---
This driver provides interfaces to configure and use
@@ -1111,7 +1143,6 @@ config MLX_PLATFORM
config MLX_CPLD_PLATFORM
tristate "Mellanox platform hotplug driver support"
- default n
select HWMON
select I2C
---help---
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 91cec1751461..c32b34a72467 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for linux/drivers/platform/x86
# x86 Platform-Specific Drivers
@@ -12,8 +13,11 @@ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
+obj-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
+obj-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
+obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
@@ -39,6 +43,7 @@ obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
+obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o
# toshiba_acpi must link after wmi to ensure that wmi devices are found
# before toshiba_acpi initializes
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 48e1541dc8d4..a32c5c00e0e7 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -119,6 +119,7 @@ MODULE_LICENSE("GPL");
#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
+#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
/* Misc */
#define ASUS_WMI_DEVID_CAMERA 0x00060013
@@ -148,6 +149,7 @@ MODULE_LICENSE("GPL");
#define ASUS_WMI_DSTS_BIOS_BIT 0x00040000
#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF
#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
+#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F
#define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13
@@ -222,10 +224,13 @@ struct asus_wmi {
int tpd_led_wk;
struct led_classdev kbd_led;
int kbd_led_wk;
+ struct led_classdev lightbar_led;
+ int lightbar_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct kbd_led_work;
struct work_struct wlan_led_work;
+ struct work_struct lightbar_led_work;
struct asus_rfkill wlan;
struct asus_rfkill bluetooth;
@@ -567,6 +572,48 @@ static enum led_brightness wlan_led_get(struct led_classdev *led_cdev)
return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
}
+static void lightbar_led_update(struct work_struct *work)
+{
+ struct asus_wmi *asus;
+ int ctrl_param;
+
+ asus = container_of(work, struct asus_wmi, lightbar_led_work);
+
+ ctrl_param = asus->lightbar_led_wk;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_LIGHTBAR, ctrl_param, NULL);
+}
+
+static void lightbar_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
+
+ asus->lightbar_led_wk = !!value;
+ queue_work(asus->led_workqueue, &asus->lightbar_led_work);
+}
+
+static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ u32 result;
+
+ asus = container_of(led_cdev, struct asus_wmi, lightbar_led);
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result);
+
+ return result & ASUS_WMI_DSTS_LIGHTBAR_MASK;
+}
+
+static int lightbar_led_presence(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result);
+
+ return result & ASUS_WMI_DSTS_PRESENCE_BIT;
+}
+
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
@@ -575,6 +622,8 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
led_classdev_unregister(&asus->tpd_led);
if (!IS_ERR_OR_NULL(asus->wlan_led.dev))
led_classdev_unregister(&asus->wlan_led);
+ if (!IS_ERR_OR_NULL(asus->lightbar_led.dev))
+ led_classdev_unregister(&asus->lightbar_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
}
@@ -630,6 +679,20 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
rv = led_classdev_register(&asus->platform_device->dev,
&asus->wlan_led);
+ if (rv)
+ goto error;
+ }
+
+ if (lightbar_led_presence(asus)) {
+ INIT_WORK(&asus->lightbar_led_work, lightbar_led_update);
+
+ asus->lightbar_led.name = "asus::lightbar";
+ asus->lightbar_led.brightness_set = lightbar_led_set;
+ asus->lightbar_led.brightness_get = lightbar_led_get;
+ asus->lightbar_led.max_brightness = 1;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->lightbar_led);
}
error:
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index f42159fd2031..bf897b1832b1 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -35,18 +35,6 @@
#include "dell-rbtn.h"
#include "dell-smbios.h"
-#define BRIGHTNESS_TOKEN 0x7d
-#define KBD_LED_OFF_TOKEN 0x01E1
-#define KBD_LED_ON_TOKEN 0x01E2
-#define KBD_LED_AUTO_TOKEN 0x01E3
-#define KBD_LED_AUTO_25_TOKEN 0x02EA
-#define KBD_LED_AUTO_50_TOKEN 0x02EB
-#define KBD_LED_AUTO_75_TOKEN 0x02EC
-#define KBD_LED_AUTO_100_TOKEN 0x02F6
-#define GLOBAL_MIC_MUTE_ENABLE 0x0364
-#define GLOBAL_MIC_MUTE_DISABLE 0x0365
-#define KBD_LED_AC_TOKEN 0x0451
-
struct quirk_entry {
u8 touchpad_led;
@@ -85,6 +73,7 @@ static struct platform_driver platform_driver = {
}
};
+static struct calling_interface_buffer *buffer;
static struct platform_device *platform_device;
static struct backlight_device *dell_backlight_device;
static struct rfkill *wifi_rfkill;
@@ -283,6 +272,27 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
{ }
};
+void dell_set_arguments(u32 arg0, u32 arg1, u32 arg2, u32 arg3)
+{
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ buffer->input[0] = arg0;
+ buffer->input[1] = arg1;
+ buffer->input[2] = arg2;
+ buffer->input[3] = arg3;
+}
+
+int dell_send_request(u16 class, u16 select)
+{
+ int ret;
+
+ buffer->cmd_class = class;
+ buffer->cmd_select = select;
+ ret = dell_smbios_call(buffer);
+ if (ret != 0)
+ return ret;
+ return dell_smbios_error(buffer->output[0]);
+}
+
/*
* Derived from information in smbios-wireless-ctl:
*
@@ -405,7 +415,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
static int dell_rfkill_set(void *data, bool blocked)
{
- struct calling_interface_buffer *buffer;
int disable = blocked ? 1 : 0;
unsigned long radio = (unsigned long)data;
int hwswitch_bit = (unsigned long)data - 1;
@@ -413,20 +422,16 @@ static int dell_rfkill_set(void *data, bool blocked)
int status;
int ret;
- buffer = dell_smbios_get_buffer();
-
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ if (ret)
+ return ret;
status = buffer->output[1];
- if (ret != 0)
- goto out;
-
- dell_smbios_clear_buffer();
-
- buffer->input[0] = 0x2;
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0x2, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ if (ret)
+ return ret;
hwswitch = buffer->output[1];
/* If the hardware switch controls this radio, and the hardware
@@ -435,28 +440,19 @@ static int dell_rfkill_set(void *data, bool blocked)
(status & BIT(0)) && !(status & BIT(16)))
disable = 1;
- dell_smbios_clear_buffer();
-
- buffer->input[0] = (1 | (radio<<8) | (disable << 16));
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
-
- out:
- dell_smbios_release_buffer();
- return dell_smbios_error(ret);
+ dell_set_arguments(1 | (radio<<8) | (disable << 16), 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ return ret;
}
-/* Must be called with the buffer held */
static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
- int status,
- struct calling_interface_buffer *buffer)
+ int status)
{
if (status & BIT(0)) {
/* Has hw-switch, sync sw_state to BIOS */
int block = rfkill_blocked(rfkill);
- dell_smbios_clear_buffer();
- buffer->input[0] = (1 | (radio << 8) | (block << 16));
- dell_smbios_send_request(17, 11);
+ dell_set_arguments(1 | (radio << 8) | (block << 16), 0, 0, 0);
+ dell_send_request(CLASS_INFO, SELECT_RFKILL);
} else {
/* No hw-switch, sync BIOS state to sw_state */
rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
@@ -472,32 +468,23 @@ static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
static void dell_rfkill_query(struct rfkill *rfkill, void *data)
{
- struct calling_interface_buffer *buffer;
int radio = ((unsigned long)data & 0xF);
int hwswitch;
int status;
int ret;
- buffer = dell_smbios_get_buffer();
-
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
status = buffer->output[1];
if (ret != 0 || !(status & BIT(0))) {
- dell_smbios_release_buffer();
return;
}
- dell_smbios_clear_buffer();
-
- buffer->input[0] = 0x2;
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0x2, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
hwswitch = buffer->output[1];
- dell_smbios_release_buffer();
-
if (ret != 0)
return;
@@ -513,27 +500,23 @@ static struct dentry *dell_laptop_dir;
static int dell_debugfs_show(struct seq_file *s, void *data)
{
- struct calling_interface_buffer *buffer;
int hwswitch_state;
int hwswitch_ret;
int status;
int ret;
- buffer = dell_smbios_get_buffer();
-
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ if (ret)
+ return ret;
status = buffer->output[1];
- dell_smbios_clear_buffer();
-
- buffer->input[0] = 0x2;
- dell_smbios_send_request(17, 11);
- hwswitch_ret = buffer->output[0];
+ dell_set_arguments(0, 0x2, 0, 0);
+ hwswitch_ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
+ if (hwswitch_ret)
+ return hwswitch_ret;
hwswitch_state = buffer->output[1];
- dell_smbios_release_buffer();
-
seq_printf(s, "return:\t%d\n", ret);
seq_printf(s, "status:\t0x%X\n", status);
seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n",
@@ -613,46 +596,36 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
- struct calling_interface_buffer *buffer;
int hwswitch = 0;
int status;
int ret;
- buffer = dell_smbios_get_buffer();
-
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
status = buffer->output[1];
if (ret != 0)
- goto out;
-
- dell_smbios_clear_buffer();
+ return;
- buffer->input[0] = 0x2;
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0x2, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
if (ret == 0 && (status & BIT(0)))
hwswitch = buffer->output[1];
if (wifi_rfkill) {
dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
- dell_rfkill_update_sw_state(wifi_rfkill, 1, status, buffer);
+ dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
}
if (bluetooth_rfkill) {
dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
hwswitch);
- dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status,
- buffer);
+ dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
}
if (wwan_rfkill) {
dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
- dell_rfkill_update_sw_state(wwan_rfkill, 3, status, buffer);
+ dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
}
-
- out:
- dell_smbios_release_buffer();
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
@@ -696,7 +669,6 @@ static struct notifier_block dell_laptop_rbtn_notifier = {
static int __init dell_setup_rfkill(void)
{
- struct calling_interface_buffer *buffer;
int status, ret, whitelisted;
const char *product;
@@ -712,11 +684,9 @@ static int __init dell_setup_rfkill(void)
if (!force_rfkill && !whitelisted)
return 0;
- buffer = dell_smbios_get_buffer();
- dell_smbios_send_request(17, 11);
- ret = buffer->output[0];
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_INFO, SELECT_RFKILL);
status = buffer->output[1];
- dell_smbios_release_buffer();
/* dell wireless info smbios call is not supported */
if (ret != 0)
@@ -869,7 +839,6 @@ static void dell_cleanup_rfkill(void)
static int dell_send_intensity(struct backlight_device *bd)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int ret;
@@ -877,24 +846,17 @@ static int dell_send_intensity(struct backlight_device *bd)
if (!token)
return -ENODEV;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- buffer->input[1] = bd->props.brightness;
-
+ dell_set_arguments(token->location, bd->props.brightness, 0, 0);
if (power_supply_is_system_supplied() > 0)
- dell_smbios_send_request(1, 2);
+ ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_AC);
else
- dell_smbios_send_request(1, 1);
-
- ret = dell_smbios_error(buffer->output[0]);
+ ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT);
- dell_smbios_release_buffer();
return ret;
}
static int dell_get_intensity(struct backlight_device *bd)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int ret;
@@ -902,20 +864,14 @@ static int dell_get_intensity(struct backlight_device *bd)
if (!token)
return -ENODEV;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
-
+ dell_set_arguments(token->location, 0, 0, 0);
if (power_supply_is_system_supplied() > 0)
- dell_smbios_send_request(0, 2);
+ ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
else
- dell_smbios_send_request(0, 1);
+ ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_BAT);
- if (buffer->output[0])
- ret = dell_smbios_error(buffer->output[0]);
- else
+ if (ret == 0)
ret = buffer->output[1];
-
- dell_smbios_release_buffer();
return ret;
}
@@ -1179,20 +1135,13 @@ static DEFINE_MUTEX(kbd_led_mutex);
static int kbd_get_info(struct kbd_info *info)
{
- struct calling_interface_buffer *buffer;
u8 units;
int ret;
- buffer = dell_smbios_get_buffer();
-
- buffer->input[0] = 0x0;
- dell_smbios_send_request(4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smbios_error(ret);
- goto out;
- }
+ dell_set_arguments(0, 0, 0, 0);
+ ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+ if (ret)
+ return ret;
info->modes = buffer->output[1] & 0xFFFF;
info->type = (buffer->output[1] >> 24) & 0xFF;
@@ -1209,8 +1158,6 @@ static int kbd_get_info(struct kbd_info *info)
if (units & BIT(3))
info->days = (buffer->output[3] >> 24) & 0xFF;
- out:
- dell_smbios_release_buffer();
return ret;
}
@@ -1269,19 +1216,12 @@ static int kbd_set_level(struct kbd_state *state, u8 level)
static int kbd_get_state(struct kbd_state *state)
{
- struct calling_interface_buffer *buffer;
int ret;
- buffer = dell_smbios_get_buffer();
-
- buffer->input[0] = 0x1;
- dell_smbios_send_request(4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smbios_error(ret);
- goto out;
- }
+ dell_set_arguments(0x1, 0, 0, 0);
+ ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
+ if (ret)
+ return ret;
state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
if (state->mode_bit != 0)
@@ -1296,31 +1236,27 @@ static int kbd_get_state(struct kbd_state *state)
state->timeout_value_ac = (buffer->output[2] >> 24) & 0x3F;
state->timeout_unit_ac = (buffer->output[2] >> 30) & 0x3;
- out:
- dell_smbios_release_buffer();
return ret;
}
static int kbd_set_state(struct kbd_state *state)
{
- struct calling_interface_buffer *buffer;
int ret;
+ u32 input1;
+ u32 input2;
+
+ input1 = BIT(state->mode_bit) & 0xFFFF;
+ input1 |= (state->triggers & 0xFF) << 16;
+ input1 |= (state->timeout_value & 0x3F) << 24;
+ input1 |= (state->timeout_unit & 0x3) << 30;
+ input2 = state->als_setting & 0xFF;
+ input2 |= (state->level & 0xFF) << 16;
+ input2 |= (state->timeout_value_ac & 0x3F) << 24;
+ input2 |= (state->timeout_unit_ac & 0x3) << 30;
+ dell_set_arguments(0x2, input1, input2, 0);
+ ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = 0x2;
- buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
- buffer->input[1] |= (state->triggers & 0xFF) << 16;
- buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
- buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
- buffer->input[2] = state->als_setting & 0xFF;
- buffer->input[2] |= (state->level & 0xFF) << 16;
- buffer->input[2] |= (state->timeout_value_ac & 0x3F) << 24;
- buffer->input[2] |= (state->timeout_unit_ac & 0x3) << 30;
- dell_smbios_send_request(4, 11);
- ret = buffer->output[0];
- dell_smbios_release_buffer();
-
- return dell_smbios_error(ret);
+ return ret;
}
static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
@@ -1345,7 +1281,6 @@ static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
static int kbd_set_token_bit(u8 bit)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int ret;
@@ -1356,19 +1291,14 @@ static int kbd_set_token_bit(u8 bit)
if (!token)
return -EINVAL;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- buffer->input[1] = token->value;
- dell_smbios_send_request(1, 0);
- ret = buffer->output[0];
- dell_smbios_release_buffer();
+ dell_set_arguments(token->location, token->value, 0, 0);
+ ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
- return dell_smbios_error(ret);
+ return ret;
}
static int kbd_get_token_bit(u8 bit)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int ret;
int val;
@@ -1380,15 +1310,12 @@ static int kbd_get_token_bit(u8 bit)
if (!token)
return -EINVAL;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- dell_smbios_send_request(0, 0);
- ret = buffer->output[0];
+ dell_set_arguments(token->location, 0, 0, 0);
+ ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_STD);
val = buffer->output[1];
- dell_smbios_release_buffer();
if (ret)
- return dell_smbios_error(ret);
+ return ret;
return (val == token->value);
}
@@ -2102,7 +2029,6 @@ static struct notifier_block dell_laptop_notifier = {
int dell_micmute_led_set(int state)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
if (state == 0)
@@ -2115,11 +2041,8 @@ int dell_micmute_led_set(int state)
if (!token)
return -ENODEV;
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- buffer->input[1] = token->value;
- dell_smbios_send_request(1, 0);
- dell_smbios_release_buffer();
+ dell_set_arguments(token->location, token->value, 0, 0);
+ dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
return state;
}
@@ -2127,7 +2050,6 @@ EXPORT_SYMBOL_GPL(dell_micmute_led_set);
static int __init dell_init(void)
{
- struct calling_interface_buffer *buffer;
struct calling_interface_token *token;
int max_intensity = 0;
int ret;
@@ -2151,6 +2073,13 @@ static int __init dell_init(void)
if (ret)
goto fail_platform_device2;
+ buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto fail_buffer;
+ }
+
+
ret = dell_setup_rfkill();
if (ret) {
@@ -2175,12 +2104,10 @@ static int __init dell_init(void)
token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
if (token) {
- buffer = dell_smbios_get_buffer();
- buffer->input[0] = token->location;
- dell_smbios_send_request(0, 2);
- if (buffer->output[0] == 0)
+ dell_set_arguments(token->location, 0, 0, 0);
+ ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC);
+ if (ret)
max_intensity = buffer->output[3];
- dell_smbios_release_buffer();
}
if (max_intensity) {
@@ -2214,6 +2141,8 @@ static int __init dell_init(void)
fail_get_brightness:
backlight_device_unregister(dell_backlight_device);
fail_backlight:
+ kfree(buffer);
+fail_buffer:
dell_cleanup_rfkill();
fail_rfkill:
platform_device_del(platform_device);
@@ -2233,6 +2162,7 @@ static void __exit dell_exit(void)
touchpad_led_exit();
kbd_led_exit();
backlight_device_unregister(dell_backlight_device);
+ kfree(buffer);
dell_cleanup_rfkill();
if (platform_device) {
platform_device_unregister(platform_device);
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
new file mode 100644
index 000000000000..89f65c4651a0
--- /dev/null
+++ b/drivers/platform/x86/dell-smbios-smm.c
@@ -0,0 +1,196 @@
+/*
+ * SMI methods for use with dell-smbios
+ *
+ * Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2017 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/gfp.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include "../../firmware/dcdbas.h"
+#include "dell-smbios.h"
+
+static int da_command_address;
+static int da_command_code;
+static struct calling_interface_buffer *buffer;
+struct platform_device *platform_device;
+static DEFINE_MUTEX(smm_mutex);
+
+static const struct dmi_system_id dell_device_table[] __initconst = {
+ {
+ .ident = "Dell laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /*Notebook*/
+ },
+ },
+ {
+ .ident = "Dell Computer Corporation",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "8"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, dell_device_table);
+
+static void __init parse_da_table(const struct dmi_header *dm)
+{
+ struct calling_interface_structure *table =
+ container_of(dm, struct calling_interface_structure, header);
+
+ /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least
+ * 6 bytes of entry
+ */
+ if (dm->length < 17)
+ return;
+
+ da_command_address = table->cmdIOAddress;
+ da_command_code = table->cmdIOCode;
+}
+
+static void __init find_cmd_address(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0xda: /* Calling interface */
+ parse_da_table(dm);
+ break;
+ }
+}
+
+int dell_smbios_smm_call(struct calling_interface_buffer *input)
+{
+ struct smi_cmd command;
+ size_t size;
+
+ size = sizeof(struct calling_interface_buffer);
+ command.magic = SMI_CMD_MAGIC;
+ command.command_address = da_command_address;
+ command.command_code = da_command_code;
+ command.ebx = virt_to_phys(buffer);
+ command.ecx = 0x42534931;
+
+ mutex_lock(&smm_mutex);
+ memcpy(buffer, input, size);
+ dcdbas_smi_request(&command);
+ memcpy(input, buffer, size);
+ mutex_unlock(&smm_mutex);
+ return 0;
+}
+
+/* When enabled this indicates that SMM won't work */
+static bool test_wsmt_enabled(void)
+{
+ struct calling_interface_token *wsmt;
+
+ /* if token doesn't exist, SMM will work */
+ wsmt = dell_smbios_find_token(WSMT_EN_TOKEN);
+ if (!wsmt)
+ return false;
+
+ /* If token exists, try to access over SMM but set a dummy return.
+ * - If WSMT disabled it will be overwritten by SMM
+ * - If WSMT enabled then dummy value will remain
+ */
+ buffer->cmd_class = CLASS_TOKEN_READ;
+ buffer->cmd_select = SELECT_TOKEN_STD;
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ buffer->input[0] = wsmt->location;
+ buffer->output[0] = 99;
+ dell_smbios_smm_call(buffer);
+ if (buffer->output[0] == 99)
+ return true;
+
+ return false;
+}
+
+static int __init dell_smbios_smm_init(void)
+{
+ int ret;
+ /*
+ * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
+ * is passed to SMI handler.
+ */
+ buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+ if (!buffer)
+ return -ENOMEM;
+
+ dmi_walk(find_cmd_address, NULL);
+
+ if (test_wsmt_enabled()) {
+ pr_debug("Disabling due to WSMT enabled\n");
+ ret = -ENODEV;
+ goto fail_wsmt;
+ }
+
+ platform_device = platform_device_alloc("dell-smbios", 1);
+ if (!platform_device) {
+ ret = -ENOMEM;
+ goto fail_platform_device_alloc;
+ }
+
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device_add;
+
+ ret = dell_smbios_register_device(&platform_device->dev,
+ &dell_smbios_smm_call);
+ if (ret)
+ goto fail_register;
+
+ return 0;
+
+fail_register:
+ platform_device_del(platform_device);
+
+fail_platform_device_add:
+ platform_device_put(platform_device);
+
+fail_wsmt:
+fail_platform_device_alloc:
+ free_page((unsigned long)buffer);
+ return ret;
+}
+
+static void __exit dell_smbios_smm_exit(void)
+{
+ if (platform_device) {
+ dell_smbios_unregister_device(&platform_device->dev);
+ platform_device_unregister(platform_device);
+ free_page((unsigned long)buffer);
+ }
+}
+
+subsys_initcall(dell_smbios_smm_init);
+module_exit(dell_smbios_smm_exit);
+
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Dell SMBIOS communications over SMI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
new file mode 100644
index 000000000000..609557aa5868
--- /dev/null
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -0,0 +1,285 @@
+/*
+ * WMI methods for use with dell-smbios
+ *
+ * Copyright (c) 2017 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/wmi.h>
+#include "dell-smbios.h"
+#include "dell-wmi-descriptor.h"
+
+static DEFINE_MUTEX(call_mutex);
+static DEFINE_MUTEX(list_mutex);
+static int wmi_supported;
+
+struct misc_bios_flags_structure {
+ struct dmi_header header;
+ u16 flags0;
+} __packed;
+#define FLAG_HAS_ACPI_WMI 0x02
+
+#define DELL_WMI_SMBIOS_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
+
+struct wmi_smbios_priv {
+ struct dell_wmi_smbios_buffer *buf;
+ struct list_head list;
+ struct wmi_device *wdev;
+ struct device *child;
+ u32 req_buf_size;
+};
+static LIST_HEAD(wmi_list);
+
+static inline struct wmi_smbios_priv *get_first_smbios_priv(void)
+{
+ return list_first_entry_or_null(&wmi_list,
+ struct wmi_smbios_priv,
+ list);
+}
+
+static int run_smbios_call(struct wmi_device *wdev)
+{
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct wmi_smbios_priv *priv;
+ struct acpi_buffer input;
+ union acpi_object *obj;
+ acpi_status status;
+
+ priv = dev_get_drvdata(&wdev->dev);
+ input.length = priv->req_buf_size - sizeof(u64);
+ input.pointer = &priv->buf->std;
+
+ dev_dbg(&wdev->dev, "evaluating: %u/%u [%x,%x,%x,%x]\n",
+ priv->buf->std.cmd_class, priv->buf->std.cmd_select,
+ priv->buf->std.input[0], priv->buf->std.input[1],
+ priv->buf->std.input[2], priv->buf->std.input[3]);
+
+ status = wmidev_evaluate_method(wdev, 0, 1, &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ obj = (union acpi_object *)output.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_dbg(&wdev->dev, "received type: %d\n", obj->type);
+ if (obj->type == ACPI_TYPE_INTEGER)
+ dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n",
+ obj->integer.value);
+ return -EIO;
+ }
+ memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length);
+ dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
+ priv->buf->std.output[0], priv->buf->std.output[1],
+ priv->buf->std.output[2], priv->buf->std.output[3]);
+
+ return 0;
+}
+
+int dell_smbios_wmi_call(struct calling_interface_buffer *buffer)
+{
+ struct wmi_smbios_priv *priv;
+ size_t difference;
+ size_t size;
+ int ret;
+
+ mutex_lock(&call_mutex);
+ priv = get_first_smbios_priv();
+ if (!priv) {
+ ret = -ENODEV;
+ goto out_wmi_call;
+ }
+
+ size = sizeof(struct calling_interface_buffer);
+ difference = priv->req_buf_size - sizeof(u64) - size;
+
+ memset(&priv->buf->ext, 0, difference);
+ memcpy(&priv->buf->std, buffer, size);
+ ret = run_smbios_call(priv->wdev);
+ memcpy(buffer, &priv->buf->std, size);
+out_wmi_call:
+ mutex_unlock(&call_mutex);
+
+ return ret;
+}
+
+static long dell_smbios_wmi_filter(struct wmi_device *wdev, unsigned int cmd,
+ struct wmi_ioctl_buffer *arg)
+{
+ struct wmi_smbios_priv *priv;
+ int ret = 0;
+
+ switch (cmd) {
+ case DELL_WMI_SMBIOS_CMD:
+ mutex_lock(&call_mutex);
+ priv = dev_get_drvdata(&wdev->dev);
+ if (!priv) {
+ ret = -ENODEV;
+ goto fail_smbios_cmd;
+ }
+ memcpy(priv->buf, arg, priv->req_buf_size);
+ if (dell_smbios_call_filter(&wdev->dev, &priv->buf->std)) {
+ dev_err(&wdev->dev, "Invalid call %d/%d:%8x\n",
+ priv->buf->std.cmd_class,
+ priv->buf->std.cmd_select,
+ priv->buf->std.input[0]);
+ ret = -EFAULT;
+ goto fail_smbios_cmd;
+ }
+ ret = run_smbios_call(priv->wdev);
+ if (ret)
+ goto fail_smbios_cmd;
+ memcpy(arg, priv->buf, priv->req_buf_size);
+fail_smbios_cmd:
+ mutex_unlock(&call_mutex);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+ return ret;
+}
+
+static int dell_smbios_wmi_probe(struct wmi_device *wdev)
+{
+ struct wmi_driver *wdriver =
+ container_of(wdev->dev.driver, struct wmi_driver, driver);
+ struct wmi_smbios_priv *priv;
+ u32 hotfix;
+ int count;
+ int ret;
+
+ ret = dell_wmi_get_descriptor_valid();
+ if (ret)
+ return ret;
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(struct wmi_smbios_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* WMI buffer size will be either 4k or 32k depending on machine */
+ if (!dell_wmi_get_size(&priv->req_buf_size))
+ return -EPROBE_DEFER;
+
+ /* some SMBIOS calls fail unless BIOS contains hotfix */
+ if (!dell_wmi_get_hotfix(&hotfix))
+ return -EPROBE_DEFER;
+ if (!hotfix) {
+ dev_warn(&wdev->dev,
+ "WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n",
+ hotfix);
+ wdriver->filter_callback = NULL;
+ }
+
+ /* add in the length object we will use internally with ioctl */
+ priv->req_buf_size += sizeof(u64);
+ ret = set_required_buffer_size(wdev, priv->req_buf_size);
+ if (ret)
+ return ret;
+
+ count = get_order(priv->req_buf_size);
+ priv->buf = (void *)__get_free_pages(GFP_KERNEL, count);
+ if (!priv->buf)
+ return -ENOMEM;
+
+ /* ID is used by dell-smbios to set priority of drivers */
+ wdev->dev.id = 1;
+ ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call);
+ if (ret)
+ goto fail_register;
+
+ priv->wdev = wdev;
+ dev_set_drvdata(&wdev->dev, priv);
+ mutex_lock(&list_mutex);
+ list_add_tail(&priv->list, &wmi_list);
+ mutex_unlock(&list_mutex);
+
+ return 0;
+
+fail_register:
+ free_pages((unsigned long)priv->buf, count);
+ return ret;
+}
+
+static int dell_smbios_wmi_remove(struct wmi_device *wdev)
+{
+ struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev);
+ int count;
+
+ mutex_lock(&call_mutex);
+ mutex_lock(&list_mutex);
+ list_del(&priv->list);
+ mutex_unlock(&list_mutex);
+ dell_smbios_unregister_device(&wdev->dev);
+ count = get_order(priv->req_buf_size);
+ free_pages((unsigned long)priv->buf, count);
+ mutex_unlock(&call_mutex);
+ return 0;
+}
+
+static const struct wmi_device_id dell_smbios_wmi_id_table[] = {
+ { .guid_string = DELL_WMI_SMBIOS_GUID },
+ { },
+};
+
+static void __init parse_b1_table(const struct dmi_header *dm)
+{
+ struct misc_bios_flags_structure *flags =
+ container_of(dm, struct misc_bios_flags_structure, header);
+
+ /* 4 bytes header, 8 bytes flags */
+ if (dm->length < 12)
+ return;
+ if (dm->handle != 0xb100)
+ return;
+ if ((flags->flags0 & FLAG_HAS_ACPI_WMI))
+ wmi_supported = 1;
+}
+
+static void __init find_b1(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0xb1: /* misc bios flags */
+ parse_b1_table(dm);
+ break;
+ }
+}
+
+static struct wmi_driver dell_smbios_wmi_driver = {
+ .driver = {
+ .name = "dell-smbios",
+ },
+ .probe = dell_smbios_wmi_probe,
+ .remove = dell_smbios_wmi_remove,
+ .id_table = dell_smbios_wmi_id_table,
+ .filter_callback = dell_smbios_wmi_filter,
+};
+
+static int __init init_dell_smbios_wmi(void)
+{
+ dmi_walk(find_b1, NULL);
+
+ if (!wmi_supported)
+ return -ENODEV;
+
+ return wmi_driver_register(&dell_smbios_wmi_driver);
+}
+
+static void __exit exit_dell_smbios_wmi(void)
+{
+ wmi_driver_unregister(&dell_smbios_wmi_driver);
+}
+
+module_init(init_dell_smbios_wmi);
+module_exit(exit_dell_smbios_wmi);
+
+MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Dell SMBIOS communications over WMI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios.c
index 0a5723468bff..6a60db515bda 100644
--- a/drivers/platform/x86/dell-smbios.c
+++ b/drivers/platform/x86/dell-smbios.c
@@ -12,33 +12,119 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/dmi.h>
#include <linux/err.h>
-#include <linux/gfp.h>
#include <linux/mutex.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/io.h>
-#include "../../firmware/dcdbas.h"
#include "dell-smbios.h"
-struct calling_interface_structure {
- struct dmi_header header;
- u16 cmdIOAddress;
- u8 cmdIOCode;
- u32 supportedCmds;
- struct calling_interface_token tokens[];
-} __packed;
-
-static struct calling_interface_buffer *buffer;
-static DEFINE_MUTEX(buffer_mutex);
-
-static int da_command_address;
-static int da_command_code;
+static u32 da_supported_commands;
static int da_num_tokens;
+static struct platform_device *platform_device;
static struct calling_interface_token *da_tokens;
+static struct device_attribute *token_location_attrs;
+static struct device_attribute *token_value_attrs;
+static struct attribute **token_attrs;
+static DEFINE_MUTEX(smbios_mutex);
+
+struct smbios_device {
+ struct list_head list;
+ struct device *device;
+ int (*call_fn)(struct calling_interface_buffer *);
+};
+
+struct smbios_call {
+ u32 need_capability;
+ int cmd_class;
+ int cmd_select;
+};
+
+/* calls that are whitelisted for given capabilities */
+static struct smbios_call call_whitelist[] = {
+ /* generally tokens are allowed, but may be further filtered or
+ * restricted by token blacklist or whitelist
+ */
+ {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_STD},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_AC},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_BAT},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_AC},
+ {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT},
+ /* used by userspace: fwupdate */
+ {CAP_SYS_ADMIN, CLASS_ADMIN_PROP, SELECT_ADMIN_PROP},
+ /* used by userspace: fwupd */
+ {CAP_SYS_ADMIN, CLASS_INFO, SELECT_DOCK},
+ {CAP_SYS_ADMIN, CLASS_FLASH_INTERFACE, SELECT_FLASH_INTERFACE},
+};
+
+/* calls that are explicitly blacklisted */
+static struct smbios_call call_blacklist[] = {
+ {0x0000, 01, 07}, /* manufacturing use */
+ {0x0000, 06, 05}, /* manufacturing use */
+ {0x0000, 11, 03}, /* write once */
+ {0x0000, 11, 07}, /* write once */
+ {0x0000, 11, 11}, /* write once */
+ {0x0000, 19, -1}, /* diagnostics */
+ /* handled by kernel: dell-laptop */
+ {0x0000, CLASS_INFO, SELECT_RFKILL},
+ {0x0000, CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT},
+};
+
+struct token_range {
+ u32 need_capability;
+ u16 min;
+ u16 max;
+};
+
+/* tokens that are whitelisted for given capabilities */
+static struct token_range token_whitelist[] = {
+ /* used by userspace: fwupdate */
+ {CAP_SYS_ADMIN, CAPSULE_EN_TOKEN, CAPSULE_DIS_TOKEN},
+ /* can indicate to userspace that WMI is needed */
+ {0x0000, WSMT_EN_TOKEN, WSMT_DIS_TOKEN}
+};
+
+/* tokens that are explicitly blacklisted */
+static struct token_range token_blacklist[] = {
+ {0x0000, 0x0058, 0x0059}, /* ME use */
+ {0x0000, 0x00CD, 0x00D0}, /* raid shadow copy */
+ {0x0000, 0x013A, 0x01FF}, /* sata shadow copy */
+ {0x0000, 0x0175, 0x0176}, /* write once */
+ {0x0000, 0x0195, 0x0197}, /* diagnostics */
+ {0x0000, 0x01DC, 0x01DD}, /* manufacturing use */
+ {0x0000, 0x027D, 0x0284}, /* diagnostics */
+ {0x0000, 0x02E3, 0x02E3}, /* manufacturing use */
+ {0x0000, 0x02FF, 0x02FF}, /* manufacturing use */
+ {0x0000, 0x0300, 0x0302}, /* manufacturing use */
+ {0x0000, 0x0325, 0x0326}, /* manufacturing use */
+ {0x0000, 0x0332, 0x0335}, /* fan control */
+ {0x0000, 0x0350, 0x0350}, /* manufacturing use */
+ {0x0000, 0x0363, 0x0363}, /* manufacturing use */
+ {0x0000, 0x0368, 0x0368}, /* manufacturing use */
+ {0x0000, 0x03F6, 0x03F7}, /* manufacturing use */
+ {0x0000, 0x049E, 0x049F}, /* manufacturing use */
+ {0x0000, 0x04A0, 0x04A3}, /* disagnostics */
+ {0x0000, 0x04E6, 0x04E7}, /* manufacturing use */
+ {0x0000, 0x4000, 0x7FFF}, /* internal BIOS use */
+ {0x0000, 0x9000, 0x9001}, /* internal BIOS use */
+ {0x0000, 0xA000, 0xBFFF}, /* write only */
+ {0x0000, 0xEFF0, 0xEFFF}, /* internal BIOS use */
+ /* handled by kernel: dell-laptop */
+ {0x0000, BRIGHTNESS_TOKEN, BRIGHTNESS_TOKEN},
+ {0x0000, KBD_LED_OFF_TOKEN, KBD_LED_AUTO_TOKEN},
+ {0x0000, KBD_LED_AC_TOKEN, KBD_LED_AC_TOKEN},
+ {0x0000, KBD_LED_AUTO_25_TOKEN, KBD_LED_AUTO_75_TOKEN},
+ {0x0000, KBD_LED_AUTO_100_TOKEN, KBD_LED_AUTO_100_TOKEN},
+ {0x0000, GLOBAL_MIC_MUTE_ENABLE, GLOBAL_MIC_MUTE_DISABLE},
+};
+
+static LIST_HEAD(smbios_device_list);
int dell_smbios_error(int value)
{
@@ -55,42 +141,175 @@ int dell_smbios_error(int value)
}
EXPORT_SYMBOL_GPL(dell_smbios_error);
-struct calling_interface_buffer *dell_smbios_get_buffer(void)
+int dell_smbios_register_device(struct device *d, void *call_fn)
{
- mutex_lock(&buffer_mutex);
- dell_smbios_clear_buffer();
- return buffer;
+ struct smbios_device *priv;
+
+ priv = devm_kzalloc(d, sizeof(struct smbios_device), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ get_device(d);
+ priv->device = d;
+ priv->call_fn = call_fn;
+ mutex_lock(&smbios_mutex);
+ list_add_tail(&priv->list, &smbios_device_list);
+ mutex_unlock(&smbios_mutex);
+ dev_dbg(d, "Added device: %s\n", d->driver->name);
+ return 0;
}
-EXPORT_SYMBOL_GPL(dell_smbios_get_buffer);
+EXPORT_SYMBOL_GPL(dell_smbios_register_device);
-void dell_smbios_clear_buffer(void)
+void dell_smbios_unregister_device(struct device *d)
{
- memset(buffer, 0, sizeof(struct calling_interface_buffer));
+ struct smbios_device *priv;
+
+ mutex_lock(&smbios_mutex);
+ list_for_each_entry(priv, &smbios_device_list, list) {
+ if (priv->device == d) {
+ list_del(&priv->list);
+ put_device(d);
+ break;
+ }
+ }
+ mutex_unlock(&smbios_mutex);
+ dev_dbg(d, "Remove device: %s\n", d->driver->name);
}
-EXPORT_SYMBOL_GPL(dell_smbios_clear_buffer);
+EXPORT_SYMBOL_GPL(dell_smbios_unregister_device);
-void dell_smbios_release_buffer(void)
+int dell_smbios_call_filter(struct device *d,
+ struct calling_interface_buffer *buffer)
{
- mutex_unlock(&buffer_mutex);
+ u16 t = 0;
+ int i;
+
+ /* can't make calls over 30 */
+ if (buffer->cmd_class > 30) {
+ dev_dbg(d, "class too big: %u\n", buffer->cmd_class);
+ return -EINVAL;
+ }
+
+ /* supported calls on the particular system */
+ if (!(da_supported_commands & (1 << buffer->cmd_class))) {
+ dev_dbg(d, "invalid command, supported commands: 0x%8x\n",
+ da_supported_commands);
+ return -EINVAL;
+ }
+
+ /* match against call blacklist */
+ for (i = 0; i < ARRAY_SIZE(call_blacklist); i++) {
+ if (buffer->cmd_class != call_blacklist[i].cmd_class)
+ continue;
+ if (buffer->cmd_select != call_blacklist[i].cmd_select &&
+ call_blacklist[i].cmd_select != -1)
+ continue;
+ dev_dbg(d, "blacklisted command: %u/%u\n",
+ buffer->cmd_class, buffer->cmd_select);
+ return -EINVAL;
+ }
+
+ /* if a token call, find token ID */
+
+ if ((buffer->cmd_class == CLASS_TOKEN_READ ||
+ buffer->cmd_class == CLASS_TOKEN_WRITE) &&
+ buffer->cmd_select < 3) {
+ /* find the matching token ID */
+ for (i = 0; i < da_num_tokens; i++) {
+ if (da_tokens[i].location != buffer->input[0])
+ continue;
+ t = da_tokens[i].tokenID;
+ break;
+ }
+
+ /* token call; but token didn't exist */
+ if (!t) {
+ dev_dbg(d, "token at location %04x doesn't exist\n",
+ buffer->input[0]);
+ return -EINVAL;
+ }
+
+ /* match against token blacklist */
+ for (i = 0; i < ARRAY_SIZE(token_blacklist); i++) {
+ if (!token_blacklist[i].min || !token_blacklist[i].max)
+ continue;
+ if (t >= token_blacklist[i].min &&
+ t <= token_blacklist[i].max)
+ return -EINVAL;
+ }
+
+ /* match against token whitelist */
+ for (i = 0; i < ARRAY_SIZE(token_whitelist); i++) {
+ if (!token_whitelist[i].min || !token_whitelist[i].max)
+ continue;
+ if (t < token_whitelist[i].min ||
+ t > token_whitelist[i].max)
+ continue;
+ if (!token_whitelist[i].need_capability ||
+ capable(token_whitelist[i].need_capability)) {
+ dev_dbg(d, "whitelisted token: %x\n", t);
+ return 0;
+ }
+
+ }
+ }
+ /* match against call whitelist */
+ for (i = 0; i < ARRAY_SIZE(call_whitelist); i++) {
+ if (buffer->cmd_class != call_whitelist[i].cmd_class)
+ continue;
+ if (buffer->cmd_select != call_whitelist[i].cmd_select)
+ continue;
+ if (!call_whitelist[i].need_capability ||
+ capable(call_whitelist[i].need_capability)) {
+ dev_dbg(d, "whitelisted capable command: %u/%u\n",
+ buffer->cmd_class, buffer->cmd_select);
+ return 0;
+ }
+ dev_dbg(d, "missing capability %d for %u/%u\n",
+ call_whitelist[i].need_capability,
+ buffer->cmd_class, buffer->cmd_select);
+
+ }
+
+ /* not in a whitelist, only allow processes with capabilities */
+ if (capable(CAP_SYS_RAWIO)) {
+ dev_dbg(d, "Allowing %u/%u due to CAP_SYS_RAWIO\n",
+ buffer->cmd_class, buffer->cmd_select);
+ return 0;
+ }
+
+ return -EACCES;
}
-EXPORT_SYMBOL_GPL(dell_smbios_release_buffer);
+EXPORT_SYMBOL_GPL(dell_smbios_call_filter);
-void dell_smbios_send_request(int class, int select)
+int dell_smbios_call(struct calling_interface_buffer *buffer)
{
- struct smi_cmd command;
+ int (*call_fn)(struct calling_interface_buffer *) = NULL;
+ struct device *selected_dev = NULL;
+ struct smbios_device *priv;
+ int ret;
- command.magic = SMI_CMD_MAGIC;
- command.command_address = da_command_address;
- command.command_code = da_command_code;
- command.ebx = virt_to_phys(buffer);
- command.ecx = 0x42534931;
+ mutex_lock(&smbios_mutex);
+ list_for_each_entry(priv, &smbios_device_list, list) {
+ if (!selected_dev || priv->device->id >= selected_dev->id) {
+ dev_dbg(priv->device, "Trying device ID: %d\n",
+ priv->device->id);
+ call_fn = priv->call_fn;
+ selected_dev = priv->device;
+ }
+ }
+
+ if (!selected_dev) {
+ ret = -ENODEV;
+ pr_err("No dell-smbios drivers are loaded\n");
+ goto out_smbios_call;
+ }
- buffer->class = class;
- buffer->select = select;
+ ret = call_fn(buffer);
- dcdbas_smi_request(&command);
+out_smbios_call:
+ mutex_unlock(&smbios_mutex);
+ return ret;
}
-EXPORT_SYMBOL_GPL(dell_smbios_send_request);
+EXPORT_SYMBOL_GPL(dell_smbios_call);
struct calling_interface_token *dell_smbios_find_token(int tokenid)
{
@@ -139,8 +358,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
if (dm->length < 17)
return;
- da_command_address = table->cmdIOAddress;
- da_command_code = table->cmdIOCode;
+ da_supported_commands = table->supportedCmds;
new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
sizeof(struct calling_interface_token),
@@ -156,6 +374,27 @@ static void __init parse_da_table(const struct dmi_header *dm)
da_num_tokens += tokens;
}
+static void zero_duplicates(struct device *dev)
+{
+ int i, j;
+
+ for (i = 0; i < da_num_tokens; i++) {
+ if (da_tokens[i].tokenID == 0)
+ continue;
+ for (j = i+1; j < da_num_tokens; j++) {
+ if (da_tokens[j].tokenID == 0)
+ continue;
+ if (da_tokens[i].tokenID == da_tokens[j].tokenID) {
+ dev_dbg(dev, "Zeroing dup token ID %x(%x/%x)\n",
+ da_tokens[j].tokenID,
+ da_tokens[j].location,
+ da_tokens[j].value);
+ da_tokens[j].tokenID = 0;
+ }
+ }
+ }
+}
+
static void __init find_tokens(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
@@ -169,10 +408,160 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
+static int match_attribute(struct device *dev,
+ struct device_attribute *attr)
+{
+ int i;
+
+ for (i = 0; i < da_num_tokens * 2; i++) {
+ if (!token_attrs[i])
+ continue;
+ if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
+ return i/2;
+ }
+ dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
+ return -EINVAL;
+}
+
+static ssize_t location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ i = match_attribute(dev, attr);
+ if (i > 0)
+ return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].location);
+ return 0;
+}
+
+static ssize_t value_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ i = match_attribute(dev, attr);
+ if (i > 0)
+ return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].value);
+ return 0;
+}
+
+static struct attribute_group smbios_attribute_group = {
+ .name = "tokens"
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "dell-smbios",
+ },
+};
+
+static int build_tokens_sysfs(struct platform_device *dev)
+{
+ char *location_name;
+ char *value_name;
+ size_t size;
+ int ret;
+ int i, j;
+
+ /* (number of tokens + 1 for null terminated */
+ size = sizeof(struct device_attribute) * (da_num_tokens + 1);
+ token_location_attrs = kzalloc(size, GFP_KERNEL);
+ if (!token_location_attrs)
+ return -ENOMEM;
+ token_value_attrs = kzalloc(size, GFP_KERNEL);
+ if (!token_value_attrs)
+ goto out_allocate_value;
+
+ /* need to store both location and value + terminator*/
+ size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
+ token_attrs = kzalloc(size, GFP_KERNEL);
+ if (!token_attrs)
+ goto out_allocate_attrs;
+
+ for (i = 0, j = 0; i < da_num_tokens; i++) {
+ /* skip empty */
+ if (da_tokens[i].tokenID == 0)
+ continue;
+ /* add location */
+ location_name = kasprintf(GFP_KERNEL, "%04x_location",
+ da_tokens[i].tokenID);
+ if (location_name == NULL)
+ goto out_unwind_strings;
+ sysfs_attr_init(&token_location_attrs[i].attr);
+ token_location_attrs[i].attr.name = location_name;
+ token_location_attrs[i].attr.mode = 0444;
+ token_location_attrs[i].show = location_show;
+ token_attrs[j++] = &token_location_attrs[i].attr;
+
+ /* add value */
+ value_name = kasprintf(GFP_KERNEL, "%04x_value",
+ da_tokens[i].tokenID);
+ if (value_name == NULL)
+ goto loop_fail_create_value;
+ sysfs_attr_init(&token_value_attrs[i].attr);
+ token_value_attrs[i].attr.name = value_name;
+ token_value_attrs[i].attr.mode = 0444;
+ token_value_attrs[i].show = value_show;
+ token_attrs[j++] = &token_value_attrs[i].attr;
+ continue;
+
+loop_fail_create_value:
+ kfree(value_name);
+ goto out_unwind_strings;
+ }
+ smbios_attribute_group.attrs = token_attrs;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &smbios_attribute_group);
+ if (ret)
+ goto out_unwind_strings;
+ return 0;
+
+out_unwind_strings:
+ for (i = i-1; i > 0; i--) {
+ kfree(token_location_attrs[i].attr.name);
+ kfree(token_value_attrs[i].attr.name);
+ }
+ kfree(token_attrs);
+out_allocate_attrs:
+ kfree(token_value_attrs);
+out_allocate_value:
+ kfree(token_location_attrs);
+
+ return -ENOMEM;
+}
+
+static void free_group(struct platform_device *pdev)
+{
+ int i;
+
+ sysfs_remove_group(&pdev->dev.kobj,
+ &smbios_attribute_group);
+ for (i = 0; i < da_num_tokens; i++) {
+ kfree(token_location_attrs[i].attr.name);
+ kfree(token_value_attrs[i].attr.name);
+ }
+ kfree(token_attrs);
+ kfree(token_value_attrs);
+ kfree(token_location_attrs);
+}
+
static int __init dell_smbios_init(void)
{
+ const struct dmi_device *valid;
int ret;
+ valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL);
+ if (!valid) {
+ pr_err("Unable to run on non-Dell system\n");
+ return -ENODEV;
+ }
+
dmi_walk(find_tokens, NULL);
if (!da_tokens) {
@@ -180,27 +569,52 @@ static int __init dell_smbios_init(void)
return -ENODEV;
}
- /*
- * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
- * is passed to SMI handler.
- */
- buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
- if (!buffer) {
+ ret = platform_driver_register(&platform_driver);
+ if (ret)
+ goto fail_platform_driver;
+
+ platform_device = platform_device_alloc("dell-smbios", 0);
+ if (!platform_device) {
ret = -ENOMEM;
- goto fail_buffer;
+ goto fail_platform_device_alloc;
}
+ ret = platform_device_add(platform_device);
+ if (ret)
+ goto fail_platform_device_add;
+
+ /* duplicate tokens will cause problems building sysfs files */
+ zero_duplicates(&platform_device->dev);
+
+ ret = build_tokens_sysfs(platform_device);
+ if (ret)
+ goto fail_create_group;
return 0;
-fail_buffer:
+fail_create_group:
+ platform_device_del(platform_device);
+
+fail_platform_device_add:
+ platform_device_put(platform_device);
+
+fail_platform_device_alloc:
+ platform_driver_unregister(&platform_driver);
+
+fail_platform_driver:
kfree(da_tokens);
return ret;
}
static void __exit dell_smbios_exit(void)
{
+ mutex_lock(&smbios_mutex);
+ if (platform_device) {
+ free_group(platform_device);
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&platform_driver);
+ }
kfree(da_tokens);
- free_page((unsigned long)buffer);
+ mutex_unlock(&smbios_mutex);
}
subsys_initcall(dell_smbios_init);
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h
index 45cbc2292cd3..138d478d9adc 100644
--- a/drivers/platform/x86/dell-smbios.h
+++ b/drivers/platform/x86/dell-smbios.h
@@ -16,17 +16,29 @@
#ifndef _DELL_SMBIOS_H_
#define _DELL_SMBIOS_H_
-struct notifier_block;
+#include <linux/device.h>
+#include <uapi/linux/wmi.h>
-/* This structure will be modified by the firmware when we enter
- * system management mode, hence the volatiles */
+/* Classes and selects used only in kernel drivers */
+#define CLASS_KBD_BACKLIGHT 4
+#define SELECT_KBD_BACKLIGHT 11
-struct calling_interface_buffer {
- u16 class;
- u16 select;
- volatile u32 input[4];
- volatile u32 output[4];
-} __packed;
+/* Tokens used in kernel drivers, any of these
+ * should be filtered from userspace access
+ */
+#define BRIGHTNESS_TOKEN 0x007d
+#define KBD_LED_AC_TOKEN 0x0451
+#define KBD_LED_OFF_TOKEN 0x01E1
+#define KBD_LED_ON_TOKEN 0x01E2
+#define KBD_LED_AUTO_TOKEN 0x01E3
+#define KBD_LED_AUTO_25_TOKEN 0x02EA
+#define KBD_LED_AUTO_50_TOKEN 0x02EB
+#define KBD_LED_AUTO_75_TOKEN 0x02EC
+#define KBD_LED_AUTO_100_TOKEN 0x02F6
+#define GLOBAL_MIC_MUTE_ENABLE 0x0364
+#define GLOBAL_MIC_MUTE_DISABLE 0x0365
+
+struct notifier_block;
struct calling_interface_token {
u16 tokenID;
@@ -37,12 +49,21 @@ struct calling_interface_token {
};
};
-int dell_smbios_error(int value);
+struct calling_interface_structure {
+ struct dmi_header header;
+ u16 cmdIOAddress;
+ u8 cmdIOCode;
+ u32 supportedCmds;
+ struct calling_interface_token tokens[];
+} __packed;
-struct calling_interface_buffer *dell_smbios_get_buffer(void);
-void dell_smbios_clear_buffer(void);
-void dell_smbios_release_buffer(void);
-void dell_smbios_send_request(int class, int select);
+int dell_smbios_register_device(struct device *d, void *call_fn);
+void dell_smbios_unregister_device(struct device *d);
+
+int dell_smbios_error(int value);
+int dell_smbios_call_filter(struct device *d,
+ struct calling_interface_buffer *buffer);
+int dell_smbios_call(struct calling_interface_buffer *buffer);
struct calling_interface_token *dell_smbios_find_token(int tokenid);
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c
index 37e646034ef8..1d87237bc731 100644
--- a/drivers/platform/x86/dell-smo8800.c
+++ b/drivers/platform/x86/dell-smo8800.c
@@ -90,7 +90,7 @@ static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
struct smo8800_device, miscdev);
u32 data = 0;
- unsigned char byte_data = 0;
+ unsigned char byte_data;
ssize_t retval = 1;
if (count < 1)
@@ -103,7 +103,6 @@ static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
if (retval)
return retval;
- byte_data = 1;
retval = 1;
if (data < 255)
diff --git a/drivers/platform/x86/dell-wmi-descriptor.c b/drivers/platform/x86/dell-wmi-descriptor.c
new file mode 100644
index 000000000000..072821aa47fc
--- /dev/null
+++ b/drivers/platform/x86/dell-wmi-descriptor.c
@@ -0,0 +1,213 @@
+/*
+ * Dell WMI descriptor driver
+ *
+ * Copyright (C) 2017 Dell Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+#include "dell-wmi-descriptor.h"
+
+#define DELL_WMI_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492"
+
+struct descriptor_priv {
+ struct list_head list;
+ u32 interface_version;
+ u32 size;
+ u32 hotfix;
+};
+static int descriptor_valid = -EPROBE_DEFER;
+static LIST_HEAD(wmi_list);
+static DEFINE_MUTEX(list_mutex);
+
+int dell_wmi_get_descriptor_valid(void)
+{
+ if (!wmi_has_guid(DELL_WMI_DESCRIPTOR_GUID))
+ return -ENODEV;
+
+ return descriptor_valid;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_descriptor_valid);
+
+bool dell_wmi_get_interface_version(u32 *version)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *version = priv->interface_version;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_interface_version);
+
+bool dell_wmi_get_size(u32 *size)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *size = priv->size;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_size);
+
+bool dell_wmi_get_hotfix(u32 *hotfix)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *hotfix = priv->hotfix;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_hotfix);
+
+/*
+ * Descriptor buffer is 128 byte long and contains:
+ *
+ * Name Offset Length Value
+ * Vendor Signature 0 4 "DELL"
+ * Object Signature 4 4 " WMI"
+ * WMI Interface Version 8 4 <version>
+ * WMI buffer length 12 4 <length>
+ * WMI hotfix number 16 4 <hotfix>
+ */
+static int dell_wmi_descriptor_probe(struct wmi_device *wdev)
+{
+ union acpi_object *obj = NULL;
+ struct descriptor_priv *priv;
+ u32 *buffer;
+ int ret;
+
+ obj = wmidev_block_query(wdev, 0);
+ if (!obj) {
+ dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Dell descriptor has wrong type\n");
+ ret = -EINVAL;
+ descriptor_valid = ret;
+ goto out;
+ }
+
+ /* Although it's not technically a failure, this would lead to
+ * unexpected behavior
+ */
+ if (obj->buffer.length != 128) {
+ dev_err(&wdev->dev,
+ "Dell descriptor buffer has unexpected length (%d)\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ descriptor_valid = ret;
+ goto out;
+ }
+
+ buffer = (u32 *)obj->buffer.pointer;
+
+ if (strncmp(obj->string.pointer, "DELL WMI", 8) != 0) {
+ dev_err(&wdev->dev, "Dell descriptor buffer has invalid signature (%8ph)\n",
+ buffer);
+ ret = -EINVAL;
+ descriptor_valid = ret;
+ goto out;
+ }
+ descriptor_valid = 0;
+
+ if (buffer[2] != 0 && buffer[2] != 1)
+ dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%lu)\n",
+ (unsigned long) buffer[2]);
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(struct descriptor_priv),
+ GFP_KERNEL);
+
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ priv->interface_version = buffer[2];
+ priv->size = buffer[3];
+ priv->hotfix = buffer[4];
+ ret = 0;
+ dev_set_drvdata(&wdev->dev, priv);
+ mutex_lock(&list_mutex);
+ list_add_tail(&priv->list, &wmi_list);
+ mutex_unlock(&list_mutex);
+
+ dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu, buffer size %lu, hotfix %lu\n",
+ (unsigned long) priv->interface_version,
+ (unsigned long) priv->size,
+ (unsigned long) priv->hotfix);
+
+out:
+ kfree(obj);
+ return ret;
+}
+
+static int dell_wmi_descriptor_remove(struct wmi_device *wdev)
+{
+ struct descriptor_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ mutex_lock(&list_mutex);
+ list_del(&priv->list);
+ mutex_unlock(&list_mutex);
+ return 0;
+}
+
+static const struct wmi_device_id dell_wmi_descriptor_id_table[] = {
+ { .guid_string = DELL_WMI_DESCRIPTOR_GUID },
+ { },
+};
+
+static struct wmi_driver dell_wmi_descriptor_driver = {
+ .driver = {
+ .name = "dell-wmi-descriptor",
+ },
+ .probe = dell_wmi_descriptor_probe,
+ .remove = dell_wmi_descriptor_remove,
+ .id_table = dell_wmi_descriptor_id_table,
+};
+
+module_wmi_driver(dell_wmi_descriptor_driver);
+
+MODULE_ALIAS("wmi:" DELL_WMI_DESCRIPTOR_GUID);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Dell WMI descriptor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-wmi-descriptor.h b/drivers/platform/x86/dell-wmi-descriptor.h
new file mode 100644
index 000000000000..a6123a4d06a7
--- /dev/null
+++ b/drivers/platform/x86/dell-wmi-descriptor.h
@@ -0,0 +1,28 @@
+/*
+ * Dell WMI descriptor driver
+ *
+ * Copyright (c) 2017 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DELL_WMI_DESCRIPTOR_H_
+#define _DELL_WMI_DESCRIPTOR_H_
+
+#include <linux/wmi.h>
+
+/* possible return values:
+ * -ENODEV: Descriptor GUID missing from WMI bus
+ * -EPROBE_DEFER: probing for dell-wmi-descriptor not yet run
+ * 0: valid descriptor, successfully probed
+ * < 0: invalid descriptor, don't probe dependent devices
+ */
+int dell_wmi_get_descriptor_valid(void);
+
+bool dell_wmi_get_interface_version(u32 *version);
+bool dell_wmi_get_size(u32 *size);
+bool dell_wmi_get_hotfix(u32 *hotfix);
+
+#endif
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 28d9f8696081..39d2f4518483 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -39,6 +39,7 @@
#include <linux/wmi.h>
#include <acpi/video.h>
#include "dell-smbios.h"
+#include "dell-wmi-descriptor.h"
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
@@ -46,12 +47,10 @@ MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
#define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492"
-#define DELL_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492"
static bool wmi_requires_smbios_request;
MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
-MODULE_ALIAS("wmi:"DELL_DESCRIPTOR_GUID);
struct dell_wmi_priv {
struct input_dev *input_dev;
@@ -619,78 +618,6 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev)
}
/*
- * Descriptor buffer is 128 byte long and contains:
- *
- * Name Offset Length Value
- * Vendor Signature 0 4 "DELL"
- * Object Signature 4 4 " WMI"
- * WMI Interface Version 8 4 <version>
- * WMI buffer length 12 4 4096
- */
-static int dell_wmi_check_descriptor_buffer(struct wmi_device *wdev)
-{
- struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
- union acpi_object *obj = NULL;
- struct wmi_device *desc_dev;
- u32 *buffer;
- int ret;
-
- desc_dev = wmidev_get_other_guid(wdev, DELL_DESCRIPTOR_GUID);
- if (!desc_dev) {
- dev_err(&wdev->dev, "Dell WMI descriptor does not exist\n");
- return -ENODEV;
- }
-
- obj = wmidev_block_query(desc_dev, 0);
- if (!obj) {
- dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n");
- ret = -EIO;
- goto out;
- }
-
- if (obj->type != ACPI_TYPE_BUFFER) {
- dev_err(&wdev->dev, "Dell descriptor has wrong type\n");
- ret = -EINVAL;
- goto out;
- }
-
- if (obj->buffer.length != 128) {
- dev_err(&wdev->dev,
- "Dell descriptor buffer has invalid length (%d)\n",
- obj->buffer.length);
- if (obj->buffer.length < 16) {
- ret = -EINVAL;
- goto out;
- }
- }
-
- buffer = (u32 *)obj->buffer.pointer;
-
- if (buffer[0] != 0x4C4C4544 && buffer[1] != 0x494D5720)
- dev_warn(&wdev->dev, "Dell descriptor buffer has invalid signature (%*ph)\n",
- 8, buffer);
-
- if (buffer[2] != 0 && buffer[2] != 1)
- dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%d)\n",
- buffer[2]);
-
- if (buffer[3] != 4096)
- dev_warn(&wdev->dev, "Dell descriptor buffer has invalid buffer length (%d)\n",
- buffer[3]);
-
- priv->interface_version = buffer[2];
- ret = 0;
-
- dev_info(&wdev->dev, "Detected Dell WMI interface version %u\n",
- priv->interface_version);
-
-out:
- kfree(obj);
- put_device(&desc_dev->dev);
- return ret;
-}
-
-/*
* According to Dell SMBIOS documentation:
*
* 17 3 Application Program Registration
@@ -711,13 +638,16 @@ static int dell_wmi_events_set_enabled(bool enable)
struct calling_interface_buffer *buffer;
int ret;
- buffer = dell_smbios_get_buffer();
+ buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
+ buffer->cmd_class = CLASS_INFO;
+ buffer->cmd_select = SELECT_APP_REGISTRATION;
buffer->input[0] = 0x10000;
buffer->input[1] = 0x51534554;
buffer->input[3] = enable;
- dell_smbios_send_request(17, 3);
- ret = buffer->output[0];
- dell_smbios_release_buffer();
+ ret = dell_smbios_call(buffer);
+ if (ret == 0)
+ ret = buffer->output[0];
+ kfree(buffer);
return dell_smbios_error(ret);
}
@@ -725,7 +655,11 @@ static int dell_wmi_events_set_enabled(bool enable)
static int dell_wmi_probe(struct wmi_device *wdev)
{
struct dell_wmi_priv *priv;
- int err;
+ int ret;
+
+ ret = dell_wmi_get_descriptor_valid();
+ if (ret)
+ return ret;
priv = devm_kzalloc(
&wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL);
@@ -733,9 +667,8 @@ static int dell_wmi_probe(struct wmi_device *wdev)
return -ENOMEM;
dev_set_drvdata(&wdev->dev, priv);
- err = dell_wmi_check_descriptor_buffer(wdev);
- if (err)
- return err;
+ if (!dell_wmi_get_interface_version(&priv->interface_version))
+ return -EPROBE_DEFER;
return dell_wmi_input_setup(wdev);
}
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 56a8195096a2..2cfbd3fa5136 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -691,6 +691,7 @@ static enum led_brightness eco_led_get(struct led_classdev *cdev)
static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
struct led_classdev *led;
int result;
@@ -724,12 +725,15 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
}
/*
- * BTNI bit 24 seems to indicate the presence of a radio toggle
- * button in place of a slide switch, and all such machines appear
- * to also have an RF LED. Therefore use bit 24 as an indicator
- * that an RF LED is present.
+ * Some Fujitsu laptops have a radio toggle button in place of a slide
+ * switch and all such machines appear to also have an RF LED. Based on
+ * comparing DSDT tables of four Fujitsu Lifebook models (E744, E751,
+ * S7110, S8420; the first one has a radio toggle button, the other
+ * three have slide switches), bit 17 of flags_supported (the value
+ * returned by method S000 of ACPI device FUJ02E3) seems to indicate
+ * whether given model has a radio toggle button.
*/
- if (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
+ if (priv->flags_supported & BIT(17)) {
led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index b4ed3dc983d5..b4224389febe 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask)
if (state < 0)
return state;
- return state & 0x1;
+ return !!(state & mask);
}
static int __init hp_wmi_bios_2008_later(void)
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 493d8910a74e..7b12abe86b94 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
+ AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted),
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index fe98d4ac0df3..53ab4e0f8962 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1166,6 +1166,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
},
},
+ {
+ .ident = "Lenovo YOGA 920-13IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920-13IKB"),
+ },
+ },
{}
};
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index e34fd70b67af..f470279c4c10 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -226,6 +226,24 @@ wakeup:
return;
}
+ /*
+ * Needed for suspend to work on some platforms that don't expose
+ * the 5-button array, but still send notifies with power button
+ * event code to this device object on power button actions.
+ *
+ * Report the power button press; catch and ignore the button release.
+ */
+ if (!priv->array) {
+ if (event == 0xce) {
+ input_report_key(priv->input_dev, KEY_POWER, 1);
+ input_sync(priv->input_dev);
+ return;
+ }
+
+ if (event == 0xcf)
+ return;
+ }
+
/* 0xC0 is for HID events, other values are for 5 button array */
if (event != 0xc0) {
if (!priv->array ||
diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c
new file mode 100644
index 000000000000..c2257bd06f18
--- /dev/null
+++ b/drivers/platform/x86/intel-wmi-thunderbolt.c
@@ -0,0 +1,98 @@
+/*
+ * WMI Thunderbolt driver
+ *
+ * Copyright (C) 2017 Dell Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#define INTEL_WMI_THUNDERBOLT_GUID "86CCFD48-205E-4A77-9C48-2021CBEDE341"
+
+static ssize_t force_power_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ u8 mode;
+
+ input.length = sizeof(u8);
+ input.pointer = &mode;
+ mode = hex_to_bin(buf[0]);
+ if (mode == 0 || mode == 1) {
+ status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1,
+ &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+ } else {
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR_WO(force_power);
+
+static struct attribute *tbt_attrs[] = {
+ &dev_attr_force_power.attr,
+ NULL
+};
+
+static const struct attribute_group tbt_attribute_group = {
+ .attrs = tbt_attrs,
+};
+
+static int intel_wmi_thunderbolt_probe(struct wmi_device *wdev)
+{
+ int ret;
+
+ ret = sysfs_create_group(&wdev->dev.kobj, &tbt_attribute_group);
+ kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
+ return ret;
+}
+
+static int intel_wmi_thunderbolt_remove(struct wmi_device *wdev)
+{
+ sysfs_remove_group(&wdev->dev.kobj, &tbt_attribute_group);
+ kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE);
+ return 0;
+}
+
+static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = {
+ { .guid_string = INTEL_WMI_THUNDERBOLT_GUID },
+ { },
+};
+
+static struct wmi_driver intel_wmi_thunderbolt_driver = {
+ .driver = {
+ .name = "intel-wmi-thunderbolt",
+ },
+ .probe = intel_wmi_thunderbolt_probe,
+ .remove = intel_wmi_thunderbolt_remove,
+ .id_table = intel_wmi_thunderbolt_id_table,
+};
+
+module_wmi_driver(intel_wmi_thunderbolt_driver);
+
+MODULE_ALIAS("wmi:" INTEL_WMI_THUNDERBOLT_GUID);
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
+MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index da706e2c4232..380ef7ec094f 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -24,6 +24,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define EXPECTED_PTYPE 4
@@ -34,6 +35,42 @@ struct cht_int33fe_data {
struct i2c_client *pi3usb30532;
};
+/*
+ * Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates
+ * the max17047 both through the INT33FE ACPI device (it is right there
+ * in the resources table) as well as through a separate MAX17047 device.
+ *
+ * These helpers are used to work around this by checking if an i2c-client
+ * for the max17047 has already been registered.
+ */
+static int cht_int33fe_check_for_max17047(struct device *dev, void *data)
+{
+ struct i2c_client **max17047 = data;
+ struct acpi_device *adev;
+ const char *hid;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return 0;
+
+ hid = acpi_device_hid(adev);
+
+ /* The MAX17047 ACPI node doesn't have an UID, so we don't check that */
+ if (strcmp(hid, "MAX17047"))
+ return 0;
+
+ *max17047 = to_i2c_client(dev);
+ return 1;
+}
+
+static struct i2c_client *cht_int33fe_find_max17047(void)
+{
+ struct i2c_client *max17047 = NULL;
+
+ i2c_for_each_dev(&max17047, cht_int33fe_check_for_max17047);
+ return max17047;
+}
+
static const char * const max17047_suppliers[] = { "bq24190-charger" };
static const struct property_entry max17047_props[] = {
@@ -41,14 +78,25 @@ static const struct property_entry max17047_props[] = {
{ }
};
+static const struct property_entry fusb302_props[] = {
+ PROPERTY_ENTRY_STRING("fcs,extcon-name", "cht_wcove_pwrsrc"),
+ PROPERTY_ENTRY_U32("fcs,max-sink-microvolt", 12000000),
+ PROPERTY_ENTRY_U32("fcs,max-sink-microamp", 3000000),
+ PROPERTY_ENTRY_U32("fcs,max-sink-microwatt", 36000000),
+ { }
+};
+
static int cht_int33fe_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct i2c_board_info board_info;
struct cht_int33fe_data *data;
+ struct i2c_client *max17047;
+ struct regulator *regulator;
unsigned long long ptyp;
acpi_status status;
int fusb302_irq;
+ int ret;
status = acpi_evaluate_integer(ACPI_HANDLE(dev), "PTYP", NULL, &ptyp);
if (ACPI_FAILURE(status)) {
@@ -63,6 +111,34 @@ static int cht_int33fe_probe(struct i2c_client *client)
if (ptyp != EXPECTED_PTYPE)
return -ENODEV;
+ /* Check presence of INT34D3 (hardware-rev 3) expected for ptype == 4 */
+ if (!acpi_dev_present("INT34D3", "1", 3)) {
+ dev_err(dev, "Error PTYPE == %d, but no INT34D3 device\n",
+ EXPECTED_PTYPE);
+ return -ENODEV;
+ }
+
+ /*
+ * We expect the WC PMIC to be paired with a TI bq24292i charger-IC.
+ * We check for the bq24292i vbus regulator here, this has 2 purposes:
+ * 1) The bq24292i allows charging with up to 12V, setting the fusb302's
+ * max-snk voltage to 12V with another charger-IC is not good.
+ * 2) For the fusb302 driver to get the bq24292i vbus regulator, the
+ * regulator-map, which is part of the bq24292i regulator_init_data,
+ * must be registered before the fusb302 is instantiated, otherwise
+ * it will end up with a dummy-regulator.
+ * Note "cht_wc_usb_typec_vbus" comes from the regulator_init_data
+ * which is defined in i2c-cht-wc.c from where the bq24292i i2c-client
+ * gets instantiated. We use regulator_get_optional here so that we
+ * don't end up getting a dummy-regulator ourselves.
+ */
+ regulator = regulator_get_optional(dev, "cht_wc_usb_typec_vbus");
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
+ return (ret == -ENODEV) ? -EPROBE_DEFER : ret;
+ }
+ regulator_put(regulator);
+
/* The FUSB302 uses the irq at index 1 and is the only irq user */
fusb302_irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 1);
if (fusb302_irq < 0) {
@@ -75,16 +151,31 @@ static int cht_int33fe_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
- board_info.properties = max17047_props;
-
- data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
- if (!data->max17047)
- return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
+ /* Work around BIOS bug, see comment on cht_int33fe_find_max17047 */
+ max17047 = cht_int33fe_find_max17047();
+ if (max17047) {
+ /* Pre-existing i2c-client for the max17047, add device-props */
+ ret = device_add_properties(&max17047->dev, max17047_props);
+ if (ret)
+ return ret;
+ /* And re-probe to get the new device-props applied. */
+ ret = device_reprobe(&max17047->dev);
+ if (ret)
+ dev_warn(dev, "Reprobing max17047 error: %d\n", ret);
+ } else {
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ board_info.dev_name = "max17047";
+ board_info.properties = max17047_props;
+ data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
+ if (!data->max17047)
+ return -EPROBE_DEFER; /* Wait for i2c-adapter to load */
+ }
memset(&board_info, 0, sizeof(board_info));
- strlcpy(board_info.type, "fusb302", I2C_NAME_SIZE);
+ strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
+ board_info.dev_name = "fusb302";
+ board_info.properties = fusb302_props;
board_info.irq = fusb302_irq;
data->fusb302 = i2c_acpi_new_device(dev, 2, &board_info);
@@ -92,6 +183,7 @@ static int cht_int33fe_probe(struct i2c_client *client)
goto out_unregister_max17047;
memset(&board_info, 0, sizeof(board_info));
+ board_info.dev_name = "pi3usb30532";
strlcpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
@@ -106,7 +198,8 @@ out_unregister_fusb302:
i2c_unregister_device(data->fusb302);
out_unregister_max17047:
- i2c_unregister_device(data->max17047);
+ if (data->max17047)
+ i2c_unregister_device(data->max17047);
return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
}
@@ -117,7 +210,8 @@ static int cht_int33fe_remove(struct i2c_client *i2c)
i2c_unregister_device(data->pi3usb30532);
i2c_unregister_device(data->fusb302);
- i2c_unregister_device(data->max17047);
+ if (data->max17047)
+ i2c_unregister_device(data->max17047);
return 0;
}
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index 92dc230ef5b2..f7b67e898abc 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -119,7 +119,7 @@ static irqreturn_t int0002_irq(int irq, void *data)
if (!(gpe_sts_reg & GPE0A_PME_B0_STS_BIT))
return IRQ_NONE;
- generic_handle_irq(irq_find_mapping(chip->irqdomain,
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
GPE0A_PME_B0_VIRT_GPIO_PIN));
pm_system_wakeup();
@@ -165,7 +165,7 @@ static int int0002_probe(struct platform_device *pdev)
chip->direction_output = int0002_gpio_direction_output;
chip->base = -1;
chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1;
- chip->irq_need_valid_mask = true;
+ chip->irq.need_valid_mask = true;
ret = devm_gpiochip_add_data(&pdev->dev, chip, NULL);
if (ret) {
@@ -173,7 +173,7 @@ static int int0002_probe(struct platform_device *pdev)
return ret;
}
- bitmap_clear(chip->irq_valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
+ bitmap_clear(chip->irq.valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN);
/*
* We manually request the irq here instead of passing a flow-handler
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 58dcee562d64..a0c95853fd3f 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -10,10 +10,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -259,8 +255,6 @@ static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */
/* Per-SKU limits */
struct ips_mcp_limits {
- int cpu_family;
- int cpu_model; /* includes extended model... */
int mcp_power_limit; /* mW units */
int core_power_limit;
int mch_power_limit;
@@ -295,11 +289,14 @@ static struct ips_mcp_limits ips_ulv_limits = {
};
struct ips_driver {
- struct pci_dev *dev;
- void *regmap;
+ struct device *dev;
+ void __iomem *regmap;
+ int irq;
+
struct task_struct *monitor;
struct task_struct *adjust;
struct dentry *debug_root;
+ struct timer_list timer;
/* Average CPU core temps (all averages in .01 degrees C for precision) */
u16 ctv1_avg_temp;
@@ -594,7 +591,7 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips)
return;
if (!ips->gpu_turbo_disable())
- dev_err(&ips->dev->dev, "failed to disable graphics turbo\n");
+ dev_err(ips->dev, "failed to disable graphics turbo\n");
else
ips->__gpu_turbo_on = false;
}
@@ -649,8 +646,7 @@ static bool cpu_exceeded(struct ips_driver *ips, int cpu)
spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
if (ret)
- dev_info(&ips->dev->dev,
- "CPU power or thermal limit exceeded\n");
+ dev_info(ips->dev, "CPU power or thermal limit exceeded\n");
return ret;
}
@@ -769,7 +765,7 @@ static int ips_adjust(void *data)
struct ips_driver *ips = data;
unsigned long flags;
- dev_dbg(&ips->dev->dev, "starting ips-adjust thread\n");
+ dev_dbg(ips->dev, "starting ips-adjust thread\n");
/*
* Adjust CPU and GPU clamps every 5s if needed. Doing it more
@@ -816,7 +812,7 @@ sleep:
schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
} while (!kthread_should_stop());
- dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
+ dev_dbg(ips->dev, "ips-adjust thread stopped\n");
return 0;
}
@@ -942,9 +938,10 @@ static u32 calc_avg_power(struct ips_driver *ips, u32 *array)
return avg;
}
-static void monitor_timeout(unsigned long arg)
+static void monitor_timeout(struct timer_list *t)
{
- wake_up_process((struct task_struct *)arg);
+ struct ips_driver *ips = from_timer(ips, t, timer);
+ wake_up_process(ips->monitor);
}
/**
@@ -961,7 +958,6 @@ static void monitor_timeout(unsigned long arg)
static int ips_monitor(void *data)
{
struct ips_driver *ips = data;
- struct timer_list timer;
unsigned long seqno_timestamp, expire, last_msecs, last_sample_period;
int i;
u32 *cpu_samples, *mchp_samples, old_cpu_power;
@@ -976,7 +972,7 @@ static int ips_monitor(void *data)
mchp_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL);
if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples ||
!cpu_samples || !mchp_samples) {
- dev_err(&ips->dev->dev,
+ dev_err(ips->dev,
"failed to allocate sample array, ips disabled\n");
kfree(mcp_samples);
kfree(ctv1_samples);
@@ -1049,8 +1045,7 @@ static int ips_monitor(void *data)
schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
last_sample_period = IPS_SAMPLE_PERIOD;
- setup_deferrable_timer_on_stack(&timer, monitor_timeout,
- (unsigned long)current);
+ timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
do {
u32 cpu_val, mch_val;
u16 val;
@@ -1097,7 +1092,8 @@ static int ips_monitor(void *data)
ITV_ME_SEQNO_SHIFT;
if (cur_seqno == last_seqno &&
time_after(jiffies, seqno_timestamp + HZ)) {
- dev_warn(&ips->dev->dev, "ME failed to update for more than 1s, likely hung\n");
+ dev_warn(ips->dev,
+ "ME failed to update for more than 1s, likely hung\n");
} else {
seqno_timestamp = get_jiffies_64();
last_seqno = cur_seqno;
@@ -1107,7 +1103,7 @@ static int ips_monitor(void *data)
expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD);
__set_current_state(TASK_INTERRUPTIBLE);
- mod_timer(&timer, expire);
+ mod_timer(&ips->timer, expire);
schedule();
/* Calculate actual sample period for power averaging */
@@ -1116,10 +1112,9 @@ static int ips_monitor(void *data)
last_sample_period = 1;
} while (!kthread_should_stop());
- del_timer_sync(&timer);
- destroy_timer_on_stack(&timer);
+ del_timer_sync(&ips->timer);
- dev_dbg(&ips->dev->dev, "ips-monitor thread stopped\n");
+ dev_dbg(ips->dev, "ips-monitor thread stopped\n");
return 0;
}
@@ -1128,17 +1123,17 @@ static int ips_monitor(void *data)
#define THM_DUMPW(reg) \
{ \
u16 val = thm_readw(reg); \
- dev_dbg(&ips->dev->dev, #reg ": 0x%04x\n", val); \
+ dev_dbg(ips->dev, #reg ": 0x%04x\n", val); \
}
#define THM_DUMPL(reg) \
{ \
u32 val = thm_readl(reg); \
- dev_dbg(&ips->dev->dev, #reg ": 0x%08x\n", val); \
+ dev_dbg(ips->dev, #reg ": 0x%08x\n", val); \
}
#define THM_DUMPQ(reg) \
{ \
u64 val = thm_readq(reg); \
- dev_dbg(&ips->dev->dev, #reg ": 0x%016x\n", val); \
+ dev_dbg(ips->dev, #reg ": 0x%016x\n", val); \
}
static void dump_thermal_info(struct ips_driver *ips)
@@ -1146,7 +1141,7 @@ static void dump_thermal_info(struct ips_driver *ips)
u16 ptl;
ptl = thm_readw(THM_PTL);
- dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl);
+ dev_dbg(ips->dev, "Processor temp limit: %d\n", ptl);
THM_DUMPW(THM_CTA);
THM_DUMPW(THM_TRC);
@@ -1175,8 +1170,8 @@ static irqreturn_t ips_irq_handler(int irq, void *arg)
if (!tses && !tes)
return IRQ_NONE;
- dev_info(&ips->dev->dev, "TSES: 0x%02x\n", tses);
- dev_info(&ips->dev->dev, "TES: 0x%02x\n", tes);
+ dev_info(ips->dev, "TSES: 0x%02x\n", tses);
+ dev_info(ips->dev, "TES: 0x%02x\n", tes);
/* STS update from EC? */
if (tes & 1) {
@@ -1214,8 +1209,8 @@ static irqreturn_t ips_irq_handler(int irq, void *arg)
/* Thermal trip */
if (tses) {
- dev_warn(&ips->dev->dev,
- "thermal trip occurred, tses: 0x%04x\n", tses);
+ dev_warn(ips->dev, "thermal trip occurred, tses: 0x%04x\n",
+ tses);
thm_writeb(THM_TSES, tses);
}
@@ -1330,8 +1325,7 @@ static void ips_debugfs_init(struct ips_driver *ips)
ips->debug_root = debugfs_create_dir("ips", NULL);
if (!ips->debug_root) {
- dev_err(&ips->dev->dev,
- "failed to create debugfs entries: %ld\n",
+ dev_err(ips->dev, "failed to create debugfs entries: %ld\n",
PTR_ERR(ips->debug_root));
return;
}
@@ -1345,8 +1339,7 @@ static void ips_debugfs_init(struct ips_driver *ips)
ips->debug_root, node,
&ips_debugfs_ops);
if (!ent) {
- dev_err(&ips->dev->dev,
- "failed to create debug file: %ld\n",
+ dev_err(ips->dev, "failed to create debug file: %ld\n",
PTR_ERR(ent));
goto err_cleanup;
}
@@ -1373,8 +1366,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
u16 tdp;
if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) {
- dev_info(&ips->dev->dev, "Non-IPS CPU detected.\n");
- goto out;
+ dev_info(ips->dev, "Non-IPS CPU detected.\n");
+ return NULL;
}
rdmsrl(IA32_MISC_ENABLE, misc_en);
@@ -1395,8 +1388,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
limits = &ips_ulv_limits;
else {
- dev_info(&ips->dev->dev, "No CPUID match found.\n");
- goto out;
+ dev_info(ips->dev, "No CPUID match found.\n");
+ return NULL;
}
rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
@@ -1404,12 +1397,12 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
/* Sanity check TDP against CPU */
if (limits->core_power_limit != (tdp / 8) * 1000) {
- dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n",
+ dev_info(ips->dev,
+ "CPU TDP doesn't match expected value (found %d, expected %d)\n",
tdp / 8, limits->core_power_limit / 1000);
limits->core_power_limit = (tdp / 8) * 1000;
}
-out:
return limits;
}
@@ -1459,7 +1452,7 @@ ips_gpu_turbo_enabled(struct ips_driver *ips)
{
if (!ips->gpu_busy && late_i915_load) {
if (ips_get_i915_syms(ips)) {
- dev_info(&ips->dev->dev,
+ dev_info(ips->dev,
"i915 driver attached, reenabling gpu turbo\n");
ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS);
}
@@ -1480,8 +1473,7 @@ ips_link_to_i915_driver(void)
EXPORT_SYMBOL_GPL(ips_link_to_i915_driver);
static const struct pci_device_id ips_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), },
{ 0, }
};
@@ -1517,62 +1509,45 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (dmi_check_system(ips_blacklist))
return -ENODEV;
- ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
+ ips = devm_kzalloc(&dev->dev, sizeof(*ips), GFP_KERNEL);
if (!ips)
return -ENOMEM;
- pci_set_drvdata(dev, ips);
- ips->dev = dev;
+ spin_lock_init(&ips->turbo_status_lock);
+ ips->dev = &dev->dev;
ips->limits = ips_detect_cpu(ips);
if (!ips->limits) {
dev_info(&dev->dev, "IPS not supported on this CPU\n");
- ret = -ENXIO;
- goto error_free;
+ return -ENXIO;
}
- spin_lock_init(&ips->turbo_status_lock);
-
- ret = pci_enable_device(dev);
+ ret = pcim_enable_device(dev);
if (ret) {
dev_err(&dev->dev, "can't enable PCI device, aborting\n");
- goto error_free;
+ return ret;
}
- if (!pci_resource_start(dev, 0)) {
- dev_err(&dev->dev, "TBAR not assigned, aborting\n");
- ret = -ENXIO;
- goto error_free;
- }
-
- ret = pci_request_regions(dev, "ips thermal sensor");
+ ret = pcim_iomap_regions(dev, 1 << 0, pci_name(dev));
if (ret) {
- dev_err(&dev->dev, "thermal resource busy, aborting\n");
- goto error_free;
- }
-
-
- ips->regmap = ioremap(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
- if (!ips->regmap) {
dev_err(&dev->dev, "failed to map thermal regs, aborting\n");
- ret = -EBUSY;
- goto error_release;
+ return ret;
}
+ ips->regmap = pcim_iomap_table(dev)[0];
+
+ pci_set_drvdata(dev, ips);
tse = thm_readb(THM_TSE);
if (tse != TSE_EN) {
dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse);
- ret = -ENXIO;
- goto error_unmap;
+ return -ENXIO;
}
trc = thm_readw(THM_TRC);
trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN;
if ((trc & trc_required_mask) != trc_required_mask) {
dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n");
- ret = -ENXIO;
- goto error_unmap;
+ return -ENXIO;
}
if (trc & TRC_CORE2_EN)
@@ -1602,20 +1577,23 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
rdmsrl(PLATFORM_INFO, platform_info);
if (!(platform_info & PLATFORM_TDP)) {
dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n");
- ret = -ENODEV;
- goto error_unmap;
+ return -ENODEV;
}
/*
* IRQ handler for ME interaction
* Note: don't use MSI here as the PCH has bugs.
*/
- pci_disable_msi(dev);
- ret = request_irq(dev->irq, ips_irq_handler, IRQF_SHARED, "ips",
- ips);
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+ if (ret < 0)
+ return ret;
+
+ ips->irq = pci_irq_vector(dev, 0);
+
+ ret = request_irq(ips->irq, ips_irq_handler, IRQF_SHARED, "ips", ips);
if (ret) {
dev_err(&dev->dev, "request irq failed, aborting\n");
- goto error_unmap;
+ return ret;
}
/* Enable aux, hot & critical interrupts */
@@ -1672,13 +1650,8 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
error_thread_cleanup:
kthread_stop(ips->adjust);
error_free_irq:
- free_irq(ips->dev->irq, ips);
-error_unmap:
- iounmap(ips->regmap);
-error_release:
- pci_release_regions(dev);
-error_free:
- kfree(ips);
+ free_irq(ips->irq, ips);
+ pci_free_irq_vectors(dev);
return ret;
}
@@ -1709,27 +1682,20 @@ static void ips_remove(struct pci_dev *dev)
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit);
- free_irq(ips->dev->irq, ips);
+ free_irq(ips->irq, ips);
+ pci_free_irq_vectors(dev);
if (ips->adjust)
kthread_stop(ips->adjust);
if (ips->monitor)
kthread_stop(ips->monitor);
- iounmap(ips->regmap);
- pci_release_regions(dev);
- kfree(ips);
dev_dbg(&dev->dev, "IPS driver removed\n");
}
-static void ips_shutdown(struct pci_dev *dev)
-{
-}
-
static struct pci_driver ips_pci_driver = {
.name = "intel ips",
.id_table = ips_id_table,
.probe = ips_probe,
.remove = ips_remove,
- .shutdown = ips_shutdown,
};
module_pci_driver(ips_pci_driver);
diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h
index 73299beff5b3..60f4e3ddbe9f 100644
--- a/drivers/platform/x86/intel_ips.h
+++ b/drivers/platform/x86/intel_ips.h
@@ -10,10 +10,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*/
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index a47a41fc10ad..b5b890127479 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - GTDRIVER_IPC BASE_IFACE
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (res) {
+ if (res && resource_size(res) > 1) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (res) {
+ if (res && resource_size(res) > 1) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (res) {
+ if (res && resource_size(res) > 1) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (res) {
+ if (res && resource_size(res) > 1) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index 0d4c3808a6d8..f378621b5fe9 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -15,9 +15,8 @@
* Telemetry Framework provides platform related PM and performance statistics.
* This file provides the core telemetry API implementation.
*/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <asm/intel_telemetry.h>
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index d4fc42b4cbeb..4249e8267bbc 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -21,14 +21,12 @@
* /sys/kernel/debug/telemetry/ioss_race_verbosity: Write and Change Tracing
* Verbosity via firmware
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
+#include <linux/device.h>
#include <linux/io.h>
-#include <linux/uaccess.h>
+#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/seq_file.h>
#include <linux/suspend.h>
#include <asm/cpu_device_id.h>
@@ -76,8 +74,6 @@
#define TELEM_IOSS_DX_D0IX_EVTS 25
#define TELEM_IOSS_PG_EVTS 30
-#define TELEM_EVT_LEN(x) (sizeof(x)/sizeof((x)[0]))
-
#define TELEM_DEBUGFS_CPU(model, data) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data}
@@ -304,13 +300,13 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
.ioss_d0ix_data = telem_apl_ioss_d0ix_data,
.ioss_pg_data = telem_apl_ioss_pg_data,
- .pss_idle_evts = TELEM_EVT_LEN(telem_apl_pss_idle_data),
- .pcs_idle_blkd_evts = TELEM_EVT_LEN(telem_apl_pcs_idle_blkd_data),
- .pcs_s0ix_blkd_evts = TELEM_EVT_LEN(telem_apl_pcs_s0ix_blkd_data),
- .pss_ltr_evts = TELEM_EVT_LEN(telem_apl_pss_ltr_data),
- .pss_wakeup_evts = TELEM_EVT_LEN(telem_apl_pss_wakeup),
- .ioss_d0ix_evts = TELEM_EVT_LEN(telem_apl_ioss_d0ix_data),
- .ioss_pg_evts = TELEM_EVT_LEN(telem_apl_ioss_pg_data),
+ .pss_idle_evts = ARRAY_SIZE(telem_apl_pss_idle_data),
+ .pcs_idle_blkd_evts = ARRAY_SIZE(telem_apl_pcs_idle_blkd_data),
+ .pcs_s0ix_blkd_evts = ARRAY_SIZE(telem_apl_pcs_s0ix_blkd_data),
+ .pss_ltr_evts = ARRAY_SIZE(telem_apl_pss_ltr_data),
+ .pss_wakeup_evts = ARRAY_SIZE(telem_apl_pss_wakeup),
+ .ioss_d0ix_evts = ARRAY_SIZE(telem_apl_ioss_d0ix_data),
+ .ioss_pg_evts = ARRAY_SIZE(telem_apl_ioss_pg_data),
.pstates_id = TELEM_APL_PSS_PSTATES_ID,
.pss_idle_id = TELEM_APL_PSS_IDLE_ID,
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index e0424d5a795a..2f889d6c270e 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -16,15 +16,9 @@
* It used the PUNIT and PMC IPC interfaces for configuring the counters.
* The accumulated results are fetched from SRAM.
*/
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
+
#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/pci.h>
-#include <linux/suspend.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
@@ -256,7 +250,7 @@ static int telemetry_check_evtid(enum telemetry_unit telem_unit,
break;
default:
- pr_err("Unknown Telemetry action Specified %d\n", action);
+ pr_err("Unknown Telemetry action specified %d\n", action);
return -EINVAL;
}
@@ -659,7 +653,7 @@ static int telemetry_setup(struct platform_device *pdev)
ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
TELEM_RESET);
if (ret) {
- dev_err(&pdev->dev, "TELEMTRY Setup Failed\n");
+ dev_err(&pdev->dev, "TELEMETRY Setup Failed\n");
return ret;
}
return 0;
@@ -685,7 +679,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
TELEM_UPDATE);
if (ret)
- pr_err("TELEMTRY Config Failed\n");
+ pr_err("TELEMETRY Config Failed\n");
return ret;
}
@@ -822,7 +816,7 @@ static int telemetry_plt_reset_events(void)
ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
TELEM_RESET);
if (ret)
- pr_err("TELEMTRY Reset Failed\n");
+ pr_err("TELEMETRY Reset Failed\n");
return ret;
}
@@ -885,7 +879,7 @@ static int telemetry_plt_add_events(u8 num_pss_evts, u8 num_ioss_evts,
ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig,
TELEM_ADD);
if (ret)
- pr_err("TELEMTRY ADD Failed\n");
+ pr_err("TELEMETRY ADD Failed\n");
return ret;
}
@@ -1195,7 +1189,7 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev)
ret = telemetry_set_pltdata(&telm_pltops, telm_conf);
if (ret) {
- dev_err(&pdev->dev, "TELEMTRY Set Pltops Failed.\n");
+ dev_err(&pdev->dev, "TELEMETRY Set Pltops Failed.\n");
goto out;
}
@@ -1210,7 +1204,7 @@ out:
iounmap(telm_conf->pss_config.regmap);
if (telm_conf->ioss_config.regmap)
iounmap(telm_conf->ioss_config.regmap);
- dev_err(&pdev->dev, "TELEMTRY Setup Failed.\n");
+ dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n");
return ret;
}
@@ -1234,7 +1228,6 @@ static struct platform_driver telemetry_soc_driver = {
static int __init telemetry_module_init(void)
{
- pr_info(DRIVER_NAME ": version %s loaded\n", DRIVER_VERSION);
return platform_driver_register(&telemetry_soc_driver);
}
diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c
index 4f60d8e32a0a..d4ea01805879 100644
--- a/drivers/platform/x86/intel_turbo_max_3.c
+++ b/drivers/platform/x86/intel_turbo_max_3.c
@@ -125,6 +125,7 @@ static int itmt_legacy_cpu_online(unsigned int cpu)
static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
ICPU(INTEL_FAM6_BROADWELL_X),
+ ICPU(INTEL_FAM6_SKYLAKE_X),
{}
};
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 4f3de2a8c4df..504256c3660d 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -216,8 +216,8 @@ static struct resource mlxplat_mlxcpld_resources[] = {
[0] = DEFINE_RES_IRQ_NAMED(17, "mlxcpld-hotplug"),
};
-struct platform_device *mlxplat_dev;
-struct mlxcpld_hotplug_platform_data *mlxplat_hotplug;
+static struct platform_device *mlxplat_dev;
+static struct mlxcpld_hotplug_platform_data *mlxplat_hotplug;
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
index bc98ef95514a..9b9e1f39bbfb 100644
--- a/drivers/platform/x86/peaq-wmi.c
+++ b/drivers/platform/x86/peaq-wmi.c
@@ -8,6 +8,7 @@
*/
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/input-polldev.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -64,8 +65,23 @@ static void peaq_wmi_poll(struct input_polled_dev *dev)
}
}
+/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */
+static const struct dmi_system_id peaq_dmi_table[] __initconst = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"),
+ },
+ },
+ {}
+};
+
static int __init peaq_wmi_init(void)
{
+ /* WMI GUID is not unique, also check for a DMI match */
+ if (!dmi_check_system(peaq_dmi_table))
+ return -ENODEV;
+
if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
return -ENODEV;
@@ -86,9 +102,6 @@ static int __init peaq_wmi_init(void)
static void __exit peaq_wmi_exit(void)
{
- if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
- return;
-
input_unregister_polled_device(peaq_poll_dev);
}
diff --git a/drivers/platform/x86/silead_dmi.c b/drivers/platform/x86/silead_dmi.c
index 1157a7b646d6..266535c2a72f 100644
--- a/drivers/platform/x86/silead_dmi.c
+++ b/drivers/platform/x86/silead_dmi.c
@@ -58,6 +58,7 @@ static const struct property_entry dexp_ursus_7w_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 630),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -72,6 +73,7 @@ static const struct property_entry surftab_wintron70_st70416_6_props[] = {
PROPERTY_ENTRY_STRING("firmware-name",
"gsl1686-surftab-wintron70-st70416-6.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -83,6 +85,8 @@ static const struct silead_ts_dmi_data surftab_wintron70_st70416_6_data = {
static const struct property_entry gp_electronic_t701_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name",
"gsl1680-gp-electronic-t701.fw"),
{ }
@@ -114,6 +118,7 @@ static const struct property_entry pov_mobii_wintab_p800w_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name",
"gsl3692-pov-mobii-wintab-p800w.fw"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
@@ -136,6 +141,36 @@ static const struct silead_ts_dmi_data itworks_tw891_data = {
.properties = itworks_tw891_props,
};
+static const struct property_entry chuwi_hi8_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct silead_ts_dmi_data chuwi_hi8_pro_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi8_pro_props,
+};
+
+static const struct property_entry digma_citi_e200_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1686-digma_citi_e200.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct silead_ts_dmi_data digma_citi_e200_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = digma_citi_e200_props,
+};
+
static const struct dmi_system_id silead_ts_dmi_table[] = {
{
/* CUBE iwork8 Air */
@@ -219,6 +254,23 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TW891"),
},
},
+ {
+ /* Chuwi Hi8 Pro */
+ .driver_data = (void *)&chuwi_hi8_pro_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"),
+ },
+ },
+ {
+ /* Digma Citi E200 */
+ .driver_data = (void *)&digma_citi_e200_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Digma"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CITI E200"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
{ },
};
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index a16cea2be9c3..935121814c97 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -363,7 +363,7 @@ static int sony_laptop_input_keycode_map[] = {
};
/* release buttons after a short delay if pressed */
-static void do_sony_laptop_release_key(unsigned long unused)
+static void do_sony_laptop_release_key(struct timer_list *unused)
{
struct sony_laptop_keypress kp;
unsigned long flags;
@@ -470,7 +470,7 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
goto err_dec_users;
}
- setup_timer(&sony_laptop_input.release_key_timer,
+ timer_setup(&sony_laptop_input.release_key_timer,
do_sony_laptop_release_key, 0);
/* input keys */
@@ -1627,7 +1627,7 @@ static const struct rfkill_ops sony_rfkill_ops = {
static int sony_nc_setup_rfkill(struct acpi_device *device,
enum sony_nc_rfkill nc_type)
{
- int err = 0;
+ int err;
struct rfkill *rfk;
enum rfkill_type type;
const char *name;
@@ -1660,17 +1660,19 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
if (!rfk)
return -ENOMEM;
- if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
+ err = sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+ if (err < 0) {
rfkill_destroy(rfk);
- return -1;
+ return err;
}
hwblock = !(result & 0x1);
- if (sony_call_snc_handle(sony_rfkill_handle,
- sony_rfkill_address[nc_type],
- &result) < 0) {
+ err = sony_call_snc_handle(sony_rfkill_handle,
+ sony_rfkill_address[nc_type],
+ &result);
+ if (err < 0) {
rfkill_destroy(rfk);
- return -1;
+ return err;
}
swblock = !(result & 0x2);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 2242d6035d9e..117be48ff4de 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -310,8 +310,7 @@ static struct {
enum {
TP_HOTKEY_TABLET_NONE = 0,
TP_HOTKEY_TABLET_USES_MHKG,
- /* X1 Yoga 2016, seen on BIOS N1FET44W */
- TP_HOTKEY_TABLET_USES_CMMD,
+ TP_HOTKEY_TABLET_USES_GMMS,
} hotkey_tablet;
u32 kbdlight:1;
u32 light:1;
@@ -2044,8 +2043,28 @@ static void hotkey_poll_setup(const bool may_warn);
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
-/* ThinkPad X1 Yoga (2016) */
-#define TP_EC_CMMD_TABLET_MODE 0x6
+enum {
+ TP_ACPI_MULTI_MODE_INVALID = 0,
+ TP_ACPI_MULTI_MODE_UNKNOWN = 1 << 0,
+ TP_ACPI_MULTI_MODE_LAPTOP = 1 << 1,
+ TP_ACPI_MULTI_MODE_TABLET = 1 << 2,
+ TP_ACPI_MULTI_MODE_FLAT = 1 << 3,
+ TP_ACPI_MULTI_MODE_STAND = 1 << 4,
+ TP_ACPI_MULTI_MODE_TENT = 1 << 5,
+ TP_ACPI_MULTI_MODE_STAND_TENT = 1 << 6,
+};
+
+enum {
+ /* The following modes are considered tablet mode for the purpose of
+ * reporting the status to userspace. i.e. in all these modes it makes
+ * sense to disable the laptop input devices such as touchpad and
+ * keyboard.
+ */
+ TP_ACPI_MULTI_MODE_TABLET_LIKE = TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT |
+ TP_ACPI_MULTI_MODE_STAND_TENT,
+};
static int hotkey_get_wlsw(void)
{
@@ -2066,6 +2085,90 @@ static int hotkey_get_wlsw(void)
return (status) ? TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF;
}
+static int hotkey_gmms_get_tablet_mode(int s, int *has_tablet_mode)
+{
+ int type = (s >> 16) & 0xffff;
+ int value = s & 0xffff;
+ int mode = TP_ACPI_MULTI_MODE_INVALID;
+ int valid_modes = 0;
+
+ if (has_tablet_mode)
+ *has_tablet_mode = 0;
+
+ switch (type) {
+ case 1:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND_TENT;
+ break;
+ case 2:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_FLAT |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT;
+ break;
+ case 3:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_FLAT;
+ break;
+ case 4:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT;
+ break;
+ case 5:
+ valid_modes = TP_ACPI_MULTI_MODE_LAPTOP |
+ TP_ACPI_MULTI_MODE_FLAT |
+ TP_ACPI_MULTI_MODE_TABLET |
+ TP_ACPI_MULTI_MODE_STAND |
+ TP_ACPI_MULTI_MODE_TENT;
+ break;
+ default:
+ pr_err("Unknown multi mode status type %d with value 0x%04X, please report this to %s\n",
+ type, value, TPACPI_MAIL);
+ return 0;
+ }
+
+ if (has_tablet_mode && (valid_modes & TP_ACPI_MULTI_MODE_TABLET_LIKE))
+ *has_tablet_mode = 1;
+
+ switch (value) {
+ case 1:
+ mode = TP_ACPI_MULTI_MODE_LAPTOP;
+ break;
+ case 2:
+ mode = TP_ACPI_MULTI_MODE_FLAT;
+ break;
+ case 3:
+ mode = TP_ACPI_MULTI_MODE_TABLET;
+ break;
+ case 4:
+ if (type == 1)
+ mode = TP_ACPI_MULTI_MODE_STAND_TENT;
+ else
+ mode = TP_ACPI_MULTI_MODE_STAND;
+ break;
+ case 5:
+ mode = TP_ACPI_MULTI_MODE_TENT;
+ break;
+ default:
+ if (type == 5 && value == 0xffff) {
+ pr_warn("Multi mode status is undetected, assuming laptop\n");
+ return 0;
+ }
+ }
+
+ if (!(mode & valid_modes)) {
+ pr_err("Unknown/reserved multi mode value 0x%04X for type %d, please report this to %s\n",
+ value, type, TPACPI_MAIL);
+ return 0;
+ }
+
+ return !!(mode & TP_ACPI_MULTI_MODE_TABLET_LIKE);
+}
+
static int hotkey_get_tablet_mode(int *status)
{
int s;
@@ -2077,11 +2180,11 @@ static int hotkey_get_tablet_mode(int *status)
*status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
break;
- case TP_HOTKEY_TABLET_USES_CMMD:
- if (!acpi_evalf(ec_handle, &s, "CMMD", "d"))
+ case TP_HOTKEY_TABLET_USES_GMMS:
+ if (!acpi_evalf(hkey_handle, &s, "GMMS", "dd", 0))
return -EIO;
- *status = (s == TP_EC_CMMD_TABLET_MODE);
+ *status = hotkey_gmms_get_tablet_mode(s, NULL);
break;
default:
break;
@@ -3113,16 +3216,19 @@ static int hotkey_init_tablet_mode(void)
int in_tablet_mode = 0, res;
char *type = NULL;
- if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
+ if (acpi_evalf(hkey_handle, &res, "GMMS", "qdd", 0)) {
+ int has_tablet_mode;
+
+ in_tablet_mode = hotkey_gmms_get_tablet_mode(res,
+ &has_tablet_mode);
+ if (has_tablet_mode)
+ tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
+ type = "GMMS";
+ } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
/* For X41t, X60t, X61t Tablets... */
tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_MHKG;
in_tablet_mode = !!(res & TP_HOTKEY_TABLET_MASK);
type = "MHKG";
- } else if (acpi_evalf(ec_handle, &res, "CMMD", "qd")) {
- /* For X1 Yoga (2016) */
- tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_CMMD;
- in_tablet_mode = res == TP_EC_CMMD_TABLET_MODE;
- type = "CMMD";
}
if (!tp_features.hotkey_tablet)
@@ -9543,7 +9649,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
},
};
-static int __init set_ibm_param(const char *val, struct kernel_param *kp)
+static int __init set_ibm_param(const char *val, const struct kernel_param *kp)
{
unsigned int i;
struct ibm_struct *ibm;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index bb1dcd7fbdeb..e8d058c5ef21 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -2510,7 +2510,6 @@ static const struct iio_chan_spec toshiba_iio_accel_channels[] = {
};
static const struct iio_info toshiba_iio_accel_info = {
- .driver_module = THIS_MODULE,
.read_raw = &toshiba_iio_accel_read_raw,
};
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 0765b1797d4c..791449a2370f 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -33,17 +33,20 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
+#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/acpi.h>
-#include <linux/slab.h>
+#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/wmi.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/uuid.h>
+#include <linux/wmi.h>
+#include <uapi/linux/wmi.h>
ACPI_MODULE_NAME("wmi");
MODULE_AUTHOR("Carlos Corbacho");
@@ -69,9 +72,12 @@ struct wmi_block {
struct wmi_device dev;
struct list_head list;
struct guid_block gblock;
+ struct miscdevice char_dev;
+ struct mutex char_mutex;
struct acpi_device *acpi_device;
wmi_notify_handler handler;
void *handler_data;
+ u64 req_buf_size;
bool read_takes_no_args;
};
@@ -188,6 +194,25 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
/*
* Exported WMI functions
*/
+
+/**
+ * set_required_buffer_size - Sets the buffer size needed for performing IOCTL
+ * @wdev: A wmi bus device from a driver
+ * @instance: Instance index
+ *
+ * Allocates memory needed for buffer, stores the buffer size in that memory
+ */
+int set_required_buffer_size(struct wmi_device *wdev, u64 length)
+{
+ struct wmi_block *wblock;
+
+ wblock = container_of(wdev, struct wmi_block, dev);
+ wblock->req_buf_size = length;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(set_required_buffer_size);
+
/**
* wmi_evaluate_method - Evaluate a WMI method
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
@@ -201,6 +226,28 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
acpi_status wmi_evaluate_method(const char *guid_string, u8 instance,
u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
{
+ struct wmi_block *wblock = NULL;
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_ERROR;
+ return wmidev_evaluate_method(&wblock->dev, instance, method_id,
+ in, out);
+}
+EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+
+/**
+ * wmidev_evaluate_method - Evaluate a WMI method
+ * @wdev: A wmi bus device from a driver
+ * @instance: Instance index
+ * @method_id: Method ID to call
+ * &in: Buffer containing input for the method call
+ * &out: Empty buffer to return the method results
+ *
+ * Call an ACPI-WMI method
+ */
+acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance,
+ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
+{
struct guid_block *block = NULL;
struct wmi_block *wblock = NULL;
acpi_handle handle;
@@ -209,9 +256,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
union acpi_object params[3];
char method[5] = "WM";
- if (!find_guid(guid_string, &wblock))
- return AE_ERROR;
-
+ wblock = container_of(wdev, struct wmi_block, dev);
block = &wblock->gblock;
handle = wblock->acpi_device->handle;
@@ -246,7 +291,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
return status;
}
-EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+EXPORT_SYMBOL_GPL(wmidev_evaluate_method);
static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
struct acpi_buffer *out)
@@ -348,23 +393,6 @@ union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance)
}
EXPORT_SYMBOL_GPL(wmidev_block_query);
-struct wmi_device *wmidev_get_other_guid(struct wmi_device *wdev,
- const char *guid_string)
-{
- struct wmi_block *this_wb = container_of(wdev, struct wmi_block, dev);
- struct wmi_block *other_wb;
-
- if (!find_guid(guid_string, &other_wb))
- return NULL;
-
- if (other_wb->acpi_device != this_wb->acpi_device)
- return NULL;
-
- get_device(&other_wb->dev.dev);
- return &other_wb->dev;
-}
-EXPORT_SYMBOL_GPL(wmidev_get_other_guid);
-
/**
* wmi_set_block - Write to a WMI block
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
@@ -761,6 +789,113 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
return 0;
}
+static int wmi_char_open(struct inode *inode, struct file *filp)
+{
+ const char *driver_name = filp->f_path.dentry->d_iname;
+ struct wmi_block *wblock = NULL;
+ struct wmi_block *next = NULL;
+
+ list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+ if (!wblock->dev.dev.driver)
+ continue;
+ if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) {
+ filp->private_data = wblock;
+ break;
+ }
+ }
+
+ if (!filp->private_data)
+ return -ENODEV;
+
+ return nonseekable_open(inode, filp);
+}
+
+static ssize_t wmi_char_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct wmi_block *wblock = filp->private_data;
+
+ return simple_read_from_buffer(buffer, length, offset,
+ &wblock->req_buf_size,
+ sizeof(wblock->req_buf_size));
+}
+
+static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct wmi_ioctl_buffer __user *input =
+ (struct wmi_ioctl_buffer __user *) arg;
+ struct wmi_block *wblock = filp->private_data;
+ struct wmi_ioctl_buffer *buf = NULL;
+ struct wmi_driver *wdriver = NULL;
+ int ret;
+
+ if (_IOC_TYPE(cmd) != WMI_IOC)
+ return -ENOTTY;
+
+ /* make sure we're not calling a higher instance than exists*/
+ if (_IOC_NR(cmd) >= wblock->gblock.instance_count)
+ return -EINVAL;
+
+ mutex_lock(&wblock->char_mutex);
+ buf = wblock->handler_data;
+ if (get_user(buf->length, &input->length)) {
+ dev_dbg(&wblock->dev.dev, "Read length from user failed\n");
+ ret = -EFAULT;
+ goto out_ioctl;
+ }
+ /* if it's too small, abort */
+ if (buf->length < wblock->req_buf_size) {
+ dev_err(&wblock->dev.dev,
+ "Buffer %lld too small, need at least %lld\n",
+ buf->length, wblock->req_buf_size);
+ ret = -EINVAL;
+ goto out_ioctl;
+ }
+ /* if it's too big, warn, driver will only use what is needed */
+ if (buf->length > wblock->req_buf_size)
+ dev_warn(&wblock->dev.dev,
+ "Buffer %lld is bigger than required %lld\n",
+ buf->length, wblock->req_buf_size);
+
+ /* copy the structure from userspace */
+ if (copy_from_user(buf, input, wblock->req_buf_size)) {
+ dev_dbg(&wblock->dev.dev, "Copy %llu from user failed\n",
+ wblock->req_buf_size);
+ ret = -EFAULT;
+ goto out_ioctl;
+ }
+
+ /* let the driver do any filtering and do the call */
+ wdriver = container_of(wblock->dev.dev.driver,
+ struct wmi_driver, driver);
+ if (!try_module_get(wdriver->driver.owner)) {
+ ret = -EBUSY;
+ goto out_ioctl;
+ }
+ ret = wdriver->filter_callback(&wblock->dev, cmd, buf);
+ module_put(wdriver->driver.owner);
+ if (ret)
+ goto out_ioctl;
+
+ /* return the result (only up to our internal buffer size) */
+ if (copy_to_user(input, buf, wblock->req_buf_size)) {
+ dev_dbg(&wblock->dev.dev, "Copy %llu to user failed\n",
+ wblock->req_buf_size);
+ ret = -EFAULT;
+ }
+
+out_ioctl:
+ mutex_unlock(&wblock->char_mutex);
+ return ret;
+}
+
+static const struct file_operations wmi_fops = {
+ .owner = THIS_MODULE,
+ .read = wmi_char_read,
+ .open = wmi_char_open,
+ .unlocked_ioctl = wmi_ioctl,
+ .compat_ioctl = wmi_ioctl,
+};
static int wmi_dev_probe(struct device *dev)
{
@@ -768,16 +903,63 @@ static int wmi_dev_probe(struct device *dev)
struct wmi_driver *wdriver =
container_of(dev->driver, struct wmi_driver, driver);
int ret = 0;
+ int count;
+ char *buf;
if (ACPI_FAILURE(wmi_method_enable(wblock, 1)))
dev_warn(dev, "failed to enable device -- probing anyway\n");
if (wdriver->probe) {
ret = wdriver->probe(dev_to_wdev(dev));
- if (ret != 0 && ACPI_FAILURE(wmi_method_enable(wblock, 0)))
- dev_warn(dev, "failed to disable device\n");
+ if (ret != 0)
+ goto probe_failure;
+ }
+
+ /* driver wants a character device made */
+ if (wdriver->filter_callback) {
+ /* check that required buffer size declared by driver or MOF */
+ if (!wblock->req_buf_size) {
+ dev_err(&wblock->dev.dev,
+ "Required buffer size not set\n");
+ ret = -EINVAL;
+ goto probe_failure;
+ }
+
+ count = get_order(wblock->req_buf_size);
+ wblock->handler_data = (void *)__get_free_pages(GFP_KERNEL,
+ count);
+ if (!wblock->handler_data) {
+ ret = -ENOMEM;
+ goto probe_failure;
+ }
+
+ buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto probe_string_failure;
+ }
+ sprintf(buf, "wmi/%s", wdriver->driver.name);
+ wblock->char_dev.minor = MISC_DYNAMIC_MINOR;
+ wblock->char_dev.name = buf;
+ wblock->char_dev.fops = &wmi_fops;
+ wblock->char_dev.mode = 0444;
+ ret = misc_register(&wblock->char_dev);
+ if (ret) {
+ dev_warn(dev, "failed to register char dev: %d", ret);
+ ret = -ENOMEM;
+ goto probe_misc_failure;
+ }
}
+ return 0;
+
+probe_misc_failure:
+ kfree(buf);
+probe_string_failure:
+ kfree(wblock->handler_data);
+probe_failure:
+ if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
+ dev_warn(dev, "failed to disable device\n");
return ret;
}
@@ -788,6 +970,13 @@ static int wmi_dev_remove(struct device *dev)
container_of(dev->driver, struct wmi_driver, driver);
int ret = 0;
+ if (wdriver->filter_callback) {
+ misc_deregister(&wblock->char_dev);
+ kfree(wblock->char_dev.name);
+ free_pages((unsigned long)wblock->handler_data,
+ get_order(wblock->req_buf_size));
+ }
+
if (wdriver->remove)
ret = wdriver->remove(dev_to_wdev(dev));
@@ -844,6 +1033,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
if (gblock->flags & ACPI_WMI_METHOD) {
wblock->dev.dev.type = &wmi_type_method;
+ mutex_init(&wblock->char_mutex);
goto out_init;
}
@@ -1145,7 +1335,7 @@ static int acpi_wmi_remove(struct platform_device *device)
acpi_remove_address_space_handler(acpi_device->handle,
ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
wmi_free_devices(acpi_device);
- device_unregister((struct device *)dev_get_drvdata(&device->dev));
+ device_destroy(&wmi_bus_class, MKDEV(0, 0));
return 0;
}
@@ -1199,7 +1389,7 @@ static int acpi_wmi_probe(struct platform_device *device)
return 0;
err_remove_busdev:
- device_unregister(wmi_bus_dev);
+ device_destroy(&wmi_bus_class, MKDEV(0, 0));
err_remove_notify_handler:
acpi_remove_notify_handler(acpi_device->handle, ACPI_DEVICE_NOTIFY,
@@ -1264,8 +1454,8 @@ err_unreg_class:
static void __exit acpi_wmi_exit(void)
{
platform_driver_unregister(&acpi_wmi_driver);
- class_unregister(&wmi_bus_class);
bus_unregister(&wmi_bus_type);
+ class_unregister(&wmi_bus_class);
}
subsys_initcall(acpi_wmi_init);
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index bfba893cb321..71d532f18219 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux Plug-and-Play Support.
#
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 3151fd164614..cdcfa39cf167 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index 31ad9fc3f701..c2464ee08e4a 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* card.c - contains functions for managing groups of PnP devices
*
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index b54620e53830..3bf18d718975 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* core.c - contains all core device and protocol registration functions
*
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 63452f20e3e9..93a30a8f88d1 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* driver.c - device id matching, driver model, etc.
*
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 5c5b3d47b5f6..187e4a1175b0 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* interface.c - contains everything related to the user interface
*
diff --git a/drivers/pnp/isapnp/compat.c b/drivers/pnp/isapnp/compat.c
index 10bdcc4d4f7b..6c845b628316 100644
--- a/drivers/pnp/isapnp/compat.c
+++ b/drivers/pnp/isapnp/compat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* compat.c - A series of functions to make it easier to convert drivers that use
* the old isapnp APIs. If possible use the new APIs instead.
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 7ad3295752ef..144055593ec8 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* manager.c - Resource Management, Conflict Resolution, Activation and Disabling of Devices
*
diff --git a/drivers/pnp/pnpacpi/pnpacpi.h b/drivers/pnp/pnpacpi/pnpacpi.h
index 051ef9699777..4489cd6dbc84 100644
--- a/drivers/pnp/pnpacpi/pnpacpi.h
+++ b/drivers/pnp/pnpacpi/pnpacpi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ACPI_PNP_H
#define ACPI_PNP_H
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index ff563db025b3..ba5cfc3dbe11 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bioscalls.c - the lowlevel layer of the PnPBIOS driver
*/
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 5ee6b2a5f8d5..7d4aca7948dd 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* /proc/bus/pnp interface for Plug and Play devices
*
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index cca2f9f9f3e3..2f31b212b1a5 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* rsparser.c - parses and encodes pnpbios resource data streams
*/
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index d28e3ab9479c..f054cdddfef8 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file contains quirk handling code for PnP devices
* Some devices do not report all their resources, and need to have extra
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index f980ff7166e9..70d4ba95735a 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* resource.c - Contains functions for registering and analyzing resource information
*
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index f5beb24d036a..e4f53d31191d 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* support.c - standard functions for the use of pnp protocol drivers
*
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 49c1720df59a..6950503741eb 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* system.c - a driver for reserving pnp system resources
*
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index 974fd684bab2..89bf4d6cb486 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -355,7 +355,7 @@ int sr_configure_errgen(struct omap_sr *sr)
u8 senp_shift, senn_shift;
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return -EINVAL;
}
@@ -422,7 +422,7 @@ int sr_disable_errgen(struct omap_sr *sr)
u32 vpboundint_en, vpboundint_st;
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return -EINVAL;
}
@@ -477,7 +477,7 @@ int sr_configure_minmax(struct omap_sr *sr)
u8 senp_shift, senn_shift;
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return -EINVAL;
}
@@ -562,7 +562,7 @@ int sr_enable(struct omap_sr *sr, unsigned long volt)
int ret;
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return -EINVAL;
}
@@ -614,7 +614,7 @@ int sr_enable(struct omap_sr *sr, unsigned long volt)
void sr_disable(struct omap_sr *sr)
{
if (!sr) {
- pr_warn("%s: NULL omap_sr from %pF\n",
+ pr_warn("%s: NULL omap_sr from %pS\n",
__func__, (void *)_RET_IP_);
return;
}
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 58cf5b30559f..aeb65edb17b7 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o
obj-$(CONFIG_POWER_RESET_AT91_POWEROFF) += at91-poweroff.o
obj-$(CONFIG_POWER_RESET_AT91_RESET) += at91-reset.o
diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c
index de878fd26f27..ff75af5abbc5 100644
--- a/drivers/power/reset/gemini-poweroff.c
+++ b/drivers/power/reset/gemini-poweroff.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Gemini power management controller
* Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
diff --git a/drivers/power/reset/piix4-poweroff.c b/drivers/power/reset/piix4-poweroff.c
index bacfc95783f0..20ce3ff5e039 100644
--- a/drivers/power/reset/piix4-poweroff.c
+++ b/drivers/power/reset/piix4-poweroff.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2016 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
+ * Author: Paul Burton <paul.burton@mips.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -109,5 +109,5 @@ static struct pci_driver piix4_poweroff_driver = {
};
module_pci_driver(piix4_poweroff_driver);
-MODULE_AUTHOR("Paul Burton <paul.burton@imgtec.com>");
+MODULE_AUTHOR("Paul Burton <paul.burton@mips.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 5ab90c1f3f7c..428b426842f4 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -182,7 +182,21 @@ config CHARGER_SBS
tristate "SBS Compliant charger"
depends on I2C
help
- Say Y to include support for SBS compilant battery chargers.
+ Say Y to include support for SBS compliant battery chargers.
+
+config MANAGER_SBS
+ tristate "Smart Battery System Manager"
+ depends on I2C && I2C_MUX && GPIOLIB
+ select I2C_SMBUS
+ help
+ Say Y here to include support for Smart Battery System Manager
+ ICs. The driver reports online and charging status via sysfs.
+ It presents itself also as I2C mux which allows to bind
+ smart battery driver to its ports.
+ Supported is for example LTC1760.
+
+ This driver can also be built as a module. If so, the module will be
+ called sbs-manager.
config BATTERY_BQ27XXX
tristate "BQ27xxx battery driver"
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 621a19058fec..e83aa843bcc6 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-$(CONFIG_POWER_SUPPLY_DEBUG) := -DDEBUG
power_supply-y := power_supply_core.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
obj-$(CONFIG_CHARGER_SBS) += sbs-charger.o
+obj-$(CONFIG_MANAGER_SBS) += sbs-manager.o
obj-$(CONFIG_BATTERY_BQ27XXX) += bq27xxx_battery.o
obj-$(CONFIG_BATTERY_BQ27XXX_I2C) += bq27xxx_battery_i2c.o
obj-$(CONFIG_BATTERY_BQ27XXX_HDQ) += bq27xxx_battery_hdq.o
diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c
index 8c49586015d0..4a7ed50d1dc5 100644
--- a/drivers/power/supply/ab8500_bmdata.c
+++ b/drivers/power/supply/ab8500_bmdata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/power_supply.h>
#include <linux/of.h>
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index 11a07633de6c..e4905bef2663 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -484,7 +484,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
int irq, error;
irq = platform_get_irq_byname(pdev, name);
- if (!irq)
+ if (irq < 0)
return -ENODEV;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index 37e523374fe0..28dc056eaafa 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -201,14 +201,12 @@ err:
static void gab_work(struct work_struct *work)
{
struct gab *adc_bat;
- struct gab_platform_data *pdata;
struct delayed_work *delayed_work;
bool is_plugged;
int status;
delayed_work = to_delayed_work(work);
adc_bat = container_of(delayed_work, struct gab, bat_work);
- pdata = adc_bat->pdata;
status = adc_bat->status;
is_plugged = power_supply_am_i_supplied(adc_bat->psy);
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index fa861003fece..c73fb4221695 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -146,8 +146,7 @@ static int max8997_battery_probe(struct platform_device *pdev)
return ret;
}
- charger = devm_kzalloc(&pdev->dev, sizeof(struct charger_data),
- GFP_KERNEL);
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
if (!charger)
return -ENOMEM;
diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c
index 1ad7ccce6075..1aba14046a83 100644
--- a/drivers/power/supply/pcf50633-charger.c
+++ b/drivers/power/supply/pcf50633-charger.c
@@ -43,7 +43,6 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev);
int ret = 0;
u8 bits;
- int charging_start = 1;
u8 mbcs2, chgmod;
unsigned int mbcc5;
@@ -58,7 +57,6 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma)
ma = 100;
} else {
bits = PCF50633_MBCC7_USB_SUSPEND;
- charging_start = 0;
ma = 0;
}
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 02c6340ae36f..82f998ab5a52 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -669,7 +669,7 @@ EXPORT_SYMBOL_GPL(power_supply_powers);
static void power_supply_dev_release(struct device *dev)
{
struct power_supply *psy = container_of(dev, struct power_supply, dev);
- pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+ dev_dbg(dev, "%s\n", __func__);
kfree(psy);
}
diff --git a/drivers/power/supply/qcom_smbb.c b/drivers/power/supply/qcom_smbb.c
index f6a0d245731d..11de691b9a71 100644
--- a/drivers/power/supply/qcom_smbb.c
+++ b/drivers/power/supply/qcom_smbb.c
@@ -34,7 +34,7 @@
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/regulator/driver.h>
#define SMBB_CHG_VMAX 0x040
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index b19a73176910..83d7b4115857 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -177,10 +177,8 @@ static bool force_load;
static int sbs_read_word_data(struct i2c_client *client, u8 address)
{
struct sbs_info *chip = i2c_get_clientdata(client);
+ int retries = chip->i2c_retry_count;
s32 ret = 0;
- int retries = 1;
-
- retries = chip->i2c_retry_count;
while (retries > 0) {
ret = i2c_smbus_read_word_data(client, address);
@@ -204,7 +202,7 @@ static int sbs_read_string_data(struct i2c_client *client, u8 address,
{
struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret = 0, block_length = 0;
- int retries_length = 1, retries_block = 1;
+ int retries_length, retries_block;
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
retries_length = chip->i2c_retry_count;
@@ -269,10 +267,8 @@ static int sbs_write_word_data(struct i2c_client *client, u8 address,
u16 value)
{
struct sbs_info *chip = i2c_get_clientdata(client);
+ int retries = chip->i2c_retry_count;
s32 ret = 0;
- int retries = 1;
-
- retries = chip->i2c_retry_count;
while (retries > 0) {
ret = i2c_smbus_write_word_data(client, address, value);
@@ -321,16 +317,6 @@ static int sbs_get_battery_presence_and_health(
union power_supply_propval *val)
{
s32 ret;
- struct sbs_info *chip = i2c_get_clientdata(client);
-
- if (psp == POWER_SUPPLY_PROP_PRESENT && chip->gpio_detect) {
- ret = gpiod_get_value_cansleep(chip->gpio_detect);
- if (ret < 0)
- return ret;
- val->intval = ret;
- chip->is_present = val->intval;
- return ret;
- }
/*
* Write to ManufacturerAccess with ManufacturerAccess command
@@ -570,7 +556,7 @@ static int sbs_get_battery_serial_number(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = sprintf(sbs_serial, "%04x", ret);
+ sprintf(sbs_serial, "%04x", ret);
val->strval = sbs_serial;
return 0;
@@ -598,6 +584,19 @@ static int sbs_get_property(struct power_supply *psy,
struct sbs_info *chip = power_supply_get_drvdata(psy);
struct i2c_client *client = chip->client;
+ if (chip->gpio_detect) {
+ ret = gpiod_get_value_cansleep(chip->gpio_detect);
+ if (ret < 0)
+ return ret;
+ if (psp == POWER_SUPPLY_PROP_PRESENT) {
+ val->intval = ret;
+ chip->is_present = val->intval;
+ return 0;
+ }
+ if (ret == 0)
+ return -ENODATA;
+ }
+
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_HEALTH:
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
new file mode 100644
index 000000000000..ccb4217b9638
--- /dev/null
+++ b/drivers/power/supply/sbs-manager.c
@@ -0,0 +1,445 @@
+/*
+ * Driver for SBS compliant Smart Battery System Managers
+ *
+ * The device communicates via i2c at address 0x0a and multiplexes access to up
+ * to four smart batteries at address 0x0b.
+ *
+ * Via sysfs interface the online state and charge type are presented.
+ *
+ * Datasheet SBSM: http://sbs-forum.org/specs/sbsm100b.pdf
+ * Datasheet LTC1760: http://cds.linear.com/docs/en/datasheet/1760fb.pdf
+ *
+ * Karl-Heinz Schneider <karl-heinz@schneider-inet.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+
+#define SBSM_MAX_BATS 4
+#define SBSM_RETRY_CNT 3
+
+/* registers addresses */
+#define SBSM_CMD_BATSYSSTATE 0x01
+#define SBSM_CMD_BATSYSSTATECONT 0x02
+#define SBSM_CMD_BATSYSINFO 0x04
+#define SBSM_CMD_LTC 0x3c
+
+#define SBSM_MASK_BAT_SUPPORTED GENMASK(3, 0)
+#define SBSM_MASK_CHARGE_BAT GENMASK(7, 4)
+#define SBSM_BIT_AC_PRESENT BIT(0)
+#define SBSM_BIT_TURBO BIT(7)
+
+#define SBSM_SMB_BAT_OFFSET 11
+struct sbsm_data {
+ struct i2c_client *client;
+ struct i2c_mux_core *muxc;
+
+ struct power_supply *psy;
+
+ u8 cur_chan; /* currently selected channel */
+ struct gpio_chip chip;
+ bool is_ltc1760; /* special capabilities */
+
+ unsigned int supported_bats;
+ unsigned int last_state;
+ unsigned int last_state_cont;
+};
+
+static enum power_supply_property sbsm_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+};
+
+static int sbsm_read_word(struct i2c_client *client, u8 address)
+{
+ int reg, retries;
+
+ for (retries = SBSM_RETRY_CNT; retries > 0; retries--) {
+ reg = i2c_smbus_read_word_data(client, address);
+ if (reg >= 0)
+ break;
+ }
+
+ if (reg < 0) {
+ dev_err(&client->dev, "failed to read register 0x%02x\n",
+ address);
+ }
+
+ return reg;
+}
+
+static int sbsm_write_word(struct i2c_client *client, u8 address, u16 word)
+{
+ int ret, retries;
+
+ for (retries = SBSM_RETRY_CNT; retries > 0; retries--) {
+ ret = i2c_smbus_write_word_data(client, address, word);
+ if (ret >= 0)
+ break;
+ }
+ if (ret < 0)
+ dev_err(&client->dev, "failed to write to register 0x%02x\n",
+ address);
+
+ return ret;
+}
+
+static int sbsm_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct sbsm_data *data = power_supply_get_drvdata(psy);
+ int regval = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ regval = sbsm_read_word(data->client, SBSM_CMD_BATSYSSTATECONT);
+ if (regval < 0)
+ return regval;
+ val->intval = !!(regval & SBSM_BIT_AC_PRESENT);
+ break;
+
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ regval = sbsm_read_word(data->client, SBSM_CMD_BATSYSSTATE);
+ if (regval < 0)
+ return regval;
+
+ if ((regval & SBSM_MASK_CHARGE_BAT) == 0) {
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ return 0;
+ }
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+
+ if (data->is_ltc1760) {
+ /* charge mode fast if turbo is active */
+ regval = sbsm_read_word(data->client, SBSM_CMD_LTC);
+ if (regval < 0)
+ return regval;
+ else if (regval & SBSM_BIT_TURBO)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sbsm_prop_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ struct sbsm_data *data = power_supply_get_drvdata(psy);
+
+ return (psp == POWER_SUPPLY_PROP_CHARGE_TYPE) && data->is_ltc1760;
+}
+
+static int sbsm_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct sbsm_data *data = power_supply_get_drvdata(psy);
+ int ret = -EINVAL;
+ u16 regval;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ /* write 1 to TURBO if type fast is given */
+ if (!data->is_ltc1760)
+ break;
+ regval = val->intval ==
+ POWER_SUPPLY_CHARGE_TYPE_FAST ? SBSM_BIT_TURBO : 0;
+ ret = sbsm_write_word(data->client, SBSM_CMD_LTC, regval);
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Switch to battery
+ * Parameter chan is directly the content of SMB_BAT* nibble
+ */
+static int sbsm_select(struct i2c_mux_core *muxc, u32 chan)
+{
+ struct sbsm_data *data = i2c_mux_priv(muxc);
+ struct device *dev = &data->client->dev;
+ int ret = 0;
+ u16 reg;
+
+ if (data->cur_chan == chan)
+ return ret;
+
+ /* chan goes from 1 ... 4 */
+ reg = 1 << BIT(SBSM_SMB_BAT_OFFSET + chan);
+ ret = sbsm_write_word(data->client, SBSM_CMD_BATSYSSTATE, reg);
+ if (ret)
+ dev_err(dev, "Failed to select channel %i\n", chan);
+ else
+ data->cur_chan = chan;
+
+ return ret;
+}
+
+static int sbsm_gpio_get_value(struct gpio_chip *gc, unsigned int off)
+{
+ struct sbsm_data *data = gpiochip_get_data(gc);
+ int ret;
+
+ ret = sbsm_read_word(data->client, SBSM_CMD_BATSYSSTATE);
+ if (ret < 0)
+ return ret;
+
+ return ret & BIT(off);
+}
+
+/*
+ * This needs to be defined or the GPIO lib fails to register the pin.
+ * But the 'gpio' is always an input.
+ */
+static int sbsm_gpio_direction_input(struct gpio_chip *gc, unsigned int off)
+{
+ return 0;
+}
+
+static int sbsm_do_alert(struct device *dev, void *d)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_driver *driver;
+
+ if (!client || client->addr != 0x0b)
+ return 0;
+
+ device_lock(dev);
+ if (client->dev.driver) {
+ driver = to_i2c_driver(client->dev.driver);
+ if (driver->alert)
+ driver->alert(client, I2C_PROTOCOL_SMBUS_ALERT, 0);
+ else
+ dev_warn(&client->dev, "no driver alert()!\n");
+ } else {
+ dev_dbg(&client->dev, "alert with no driver\n");
+ }
+ device_unlock(dev);
+
+ return -EBUSY;
+}
+
+static void sbsm_alert(struct i2c_client *client, enum i2c_alert_protocol prot,
+ unsigned int d)
+{
+ struct sbsm_data *sbsm = i2c_get_clientdata(client);
+
+ int ret, i, irq_bat = 0, state = 0;
+
+ ret = sbsm_read_word(sbsm->client, SBSM_CMD_BATSYSSTATE);
+ if (ret >= 0) {
+ irq_bat = ret ^ sbsm->last_state;
+ sbsm->last_state = ret;
+ state = ret;
+ }
+
+ ret = sbsm_read_word(sbsm->client, SBSM_CMD_BATSYSSTATECONT);
+ if ((ret >= 0) &&
+ ((ret ^ sbsm->last_state_cont) & SBSM_BIT_AC_PRESENT)) {
+ irq_bat |= sbsm->supported_bats & state;
+ power_supply_changed(sbsm->psy);
+ }
+ sbsm->last_state_cont = ret;
+
+ for (i = 0; i < SBSM_MAX_BATS; i++) {
+ if (irq_bat & BIT(i)) {
+ device_for_each_child(&sbsm->muxc->adapter[i]->dev,
+ NULL, sbsm_do_alert);
+ }
+ }
+}
+
+static int sbsm_gpio_setup(struct sbsm_data *data)
+{
+ struct gpio_chip *gc = &data->chip;
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
+ int ret;
+
+ if (!device_property_present(dev, "gpio-controller"))
+ return 0;
+
+ ret = sbsm_read_word(client, SBSM_CMD_BATSYSSTATE);
+ if (ret < 0)
+ return ret;
+ data->last_state = ret;
+
+ ret = sbsm_read_word(client, SBSM_CMD_BATSYSSTATECONT);
+ if (ret < 0)
+ return ret;
+ data->last_state_cont = ret;
+
+ gc->get = sbsm_gpio_get_value;
+ gc->direction_input = sbsm_gpio_direction_input;
+ gc->can_sleep = true;
+ gc->base = -1;
+ gc->ngpio = SBSM_MAX_BATS;
+ gc->label = client->name;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+
+ ret = devm_gpiochip_add_data(dev, gc, data);
+ if (ret) {
+ dev_err(dev, "devm_gpiochip_add_data failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct power_supply_desc sbsm_default_psy_desc = {
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = sbsm_props,
+ .num_properties = ARRAY_SIZE(sbsm_props),
+ .get_property = &sbsm_get_property,
+ .set_property = &sbsm_set_property,
+ .property_is_writeable = &sbsm_prop_is_writeable,
+};
+
+static int sbsm_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct sbsm_data *data;
+ struct device *dev = &client->dev;
+ struct power_supply_desc *psy_desc;
+ struct power_supply_config psy_cfg = {};
+ int ret = 0, i;
+
+ /* Device listens only at address 0x0a */
+ if (client->addr != 0x0a)
+ return -EINVAL;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -EPFNOSUPPORT;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+
+ data->client = client;
+ data->is_ltc1760 = !!strstr(id->name, "ltc1760");
+
+ ret = sbsm_read_word(client, SBSM_CMD_BATSYSINFO);
+ if (ret < 0)
+ return ret;
+ data->supported_bats = ret & SBSM_MASK_BAT_SUPPORTED;
+ data->muxc = i2c_mux_alloc(adapter, dev, SBSM_MAX_BATS, 0,
+ I2C_MUX_LOCKED, &sbsm_select, NULL);
+ if (!data->muxc) {
+ dev_err(dev, "failed to alloc i2c mux\n");
+ ret = -ENOMEM;
+ goto err_mux_alloc;
+ }
+ data->muxc->priv = data;
+
+ /* register muxed i2c channels. One for each supported battery */
+ for (i = 0; i < SBSM_MAX_BATS; ++i) {
+ if (data->supported_bats & BIT(i)) {
+ ret = i2c_mux_add_adapter(data->muxc, 0, i + 1, 0);
+ if (ret)
+ break;
+ }
+ }
+ if (ret) {
+ dev_err(dev, "failed to register i2c mux channel %d\n", i + 1);
+ goto err_mux_register;
+ }
+
+ psy_desc = devm_kmemdup(dev, &sbsm_default_psy_desc,
+ sizeof(struct power_supply_desc),
+ GFP_KERNEL);
+ if (!psy_desc) {
+ ret = -ENOMEM;
+ goto err_psy;
+ }
+
+ psy_desc->name = devm_kasprintf(dev, GFP_KERNEL, "sbsm-%s",
+ dev_name(&client->dev));
+ if (!psy_desc->name) {
+ ret = -ENOMEM;
+ goto err_psy;
+ }
+ ret = sbsm_gpio_setup(data);
+ if (ret < 0)
+ goto err_psy;
+
+ psy_cfg.drv_data = data;
+ psy_cfg.of_node = dev->of_node;
+ data->psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
+ if (IS_ERR(data->psy)) {
+ ret = PTR_ERR(data->psy);
+ dev_err(dev, "failed to register power supply %s\n",
+ psy_desc->name);
+ goto err_psy;
+ }
+
+ return 0;
+
+err_psy:
+err_mux_register:
+ i2c_mux_del_adapters(data->muxc);
+
+err_mux_alloc:
+ return ret;
+}
+
+static int sbsm_remove(struct i2c_client *client)
+{
+ struct sbsm_data *data = i2c_get_clientdata(client);
+
+ i2c_mux_del_adapters(data->muxc);
+ return 0;
+}
+
+static const struct i2c_device_id sbsm_ids[] = {
+ { "sbs-manager", 0 },
+ { "ltc1760", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sbsm_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id sbsm_dt_ids[] = {
+ { .compatible = "sbs,sbs-manager" },
+ { .compatible = "lltc,ltc1760" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sbsm_dt_ids);
+#endif
+
+static struct i2c_driver sbsm_driver = {
+ .driver = {
+ .name = "sbsm",
+ .of_match_table = of_match_ptr(sbsm_dt_ids),
+ },
+ .probe = sbsm_probe,
+ .remove = sbsm_remove,
+ .alert = sbsm_alert,
+ .id_table = sbsm_ids
+};
+module_i2c_driver(sbsm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Karl-Heinz Schneider <karl-heinz@schneider-inet.de>");
+MODULE_DESCRIPTION("SBSM Smart Battery System Manager");
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index a5915f498eea..bbcaee56db9d 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -743,7 +743,7 @@ static int twl4030bci_state(struct twl4030_bci *bci)
ret = twl4030_bci_read(TWL4030_BCIMSTATEC, &state);
if (ret) {
- pr_err("twl4030_bci: error reading BCIMSTATEC\n");
+ dev_err(bci->dev, "error reading BCIMSTATEC\n");
return ret;
}
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index 436b4e4e71a1..04735649052a 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -39,7 +39,7 @@ static struct timer_list ktimer;
* The kernel timer
*/
-static void pps_ktimer_event(unsigned long ptr)
+static void pps_ktimer_event(struct timer_list *unused)
{
struct pps_event_time ts;
@@ -85,7 +85,7 @@ static int __init pps_ktimer_init(void)
return -ENOMEM;
}
- setup_timer(&ktimer, pps_ktimer_event, 0);
+ timer_setup(&ktimer, pps_ktimer_event, 0);
mod_timer(&ktimer, jiffies + HZ);
dev_info(pps->dev, "ktimer PPS source registered\n");
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index d1f2fb19c980..fd28207f5379 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for PTP 1588 clock support.
#
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c
index 2b1b212c219e..c67dd11e08b1 100644
--- a/drivers/ptp/ptp_kvm.c
+++ b/drivers/ptp/ptp_kvm.c
@@ -178,8 +178,11 @@ static int __init ptp_kvm_init(void)
{
long ret;
+ if (!kvm_para_available())
+ return -ENODEV;
+
clock_pair_gpa = slow_virt_to_phys(&clock_pair);
- hv_clock = pvclock_pvti_cpu0_va();
+ hv_clock = pvclock_get_pvti_cpu0_va();
if (!hv_clock)
return -ENODEV;
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index ebefba5f528b..0258a745f30c 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PWM) += core.o
obj-$(CONFIG_PWM_SYSFS) += sysfs.o
obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 75db585a2a94..acd3ce8ecf3f 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -37,11 +37,20 @@ struct atmel_tcb_pwm_device {
unsigned period; /* PWM period expressed in clk cycles */
};
+struct atmel_tcb_channel {
+ u32 enabled;
+ u32 cmr;
+ u32 ra;
+ u32 rb;
+ u32 rc;
+};
+
struct atmel_tcb_pwm_chip {
struct pwm_chip chip;
spinlock_t lock;
struct atmel_tc *tc;
struct atmel_tcb_pwm_device *pwms[NPWM];
+ struct atmel_tcb_channel bkup[NPWM / 2];
};
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
@@ -175,12 +184,15 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
* Use software trigger to apply the new setting.
* If both PWM devices in this group are disabled we stop the clock.
*/
- if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC)))
+ if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) {
__raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS,
regs + ATMEL_TC_REG(group, CCR));
- else
+ tcbpwmc->bkup[group].enabled = 1;
+ } else {
__raw_writel(ATMEL_TC_SWTRG, regs +
ATMEL_TC_REG(group, CCR));
+ tcbpwmc->bkup[group].enabled = 0;
+ }
spin_unlock(&tcbpwmc->lock);
}
@@ -263,6 +275,7 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Use software trigger to apply the new setting */
__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
regs + ATMEL_TC_REG(group, CCR));
+ tcbpwmc->bkup[group].enabled = 1;
spin_unlock(&tcbpwmc->lock);
return 0;
}
@@ -445,10 +458,56 @@ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
+#ifdef CONFIG_PM_SLEEP
+static int atmel_tcb_pwm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ void __iomem *base = tcbpwm->tc->regs;
+ int i;
+
+ for (i = 0; i < (NPWM / 2); i++) {
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+ chan->cmr = readl(base + ATMEL_TC_REG(i, CMR));
+ chan->ra = readl(base + ATMEL_TC_REG(i, RA));
+ chan->rb = readl(base + ATMEL_TC_REG(i, RB));
+ chan->rc = readl(base + ATMEL_TC_REG(i, RC));
+ }
+ return 0;
+}
+
+static int atmel_tcb_pwm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ void __iomem *base = tcbpwm->tc->regs;
+ int i;
+
+ for (i = 0; i < (NPWM / 2); i++) {
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+ writel(chan->cmr, base + ATMEL_TC_REG(i, CMR));
+ writel(chan->ra, base + ATMEL_TC_REG(i, RA));
+ writel(chan->rb, base + ATMEL_TC_REG(i, RB));
+ writel(chan->rc, base + ATMEL_TC_REG(i, RC));
+ if (chan->enabled) {
+ writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+ base + ATMEL_TC_REG(i, CCR));
+ }
+ }
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
+ atmel_tcb_pwm_resume);
+
static struct platform_driver atmel_tcb_pwm_driver = {
.driver = {
.name = "atmel-tcb-pwm",
.of_match_table = atmel_tcb_pwm_dt_ids,
+ .pm = &atmel_tcb_pwm_pm_ops,
},
.probe = atmel_tcb_pwm_probe,
.remove = atmel_tcb_pwm_remove,
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 2fb30deee345..815f5333bb8f 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -39,6 +40,8 @@
#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
+#define IMG_PWM_PM_TIMEOUT 1000 /* ms */
+
/*
* PWM period is specified with a timebase register,
* in number of step periods. The PWM duty cycle is also
@@ -52,6 +55,8 @@
*/
#define MIN_TMBASE_STEPS 16
+#define IMG_PWM_NPWM 4
+
struct img_pwm_soc_data {
u32 max_timebase;
};
@@ -66,6 +71,8 @@ struct img_pwm_chip {
int max_period_ns;
int min_period_ns;
const struct img_pwm_soc_data *data;
+ u32 suspend_ctrl_cfg;
+ u32 suspend_ch_cfg[IMG_PWM_NPWM];
};
static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
@@ -92,6 +99,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long mul, output_clk_hz, input_clk_hz;
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
unsigned int max_timebase = pwm_chip->data->max_timebase;
+ int ret;
if (period_ns < pwm_chip->min_period_ns ||
period_ns > pwm_chip->max_period_ns) {
@@ -123,6 +131,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
+ ret = pm_runtime_get_sync(chip->dev);
+ if (ret < 0)
+ return ret;
+
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
val |= (div & PWM_CTRL_CFG_DIV_MASK) <<
@@ -133,6 +145,9 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
(timebase << PWM_CH_CFG_TMBASE_SHIFT);
img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val);
+ pm_runtime_mark_last_busy(chip->dev);
+ pm_runtime_put_autosuspend(chip->dev);
+
return 0;
}
@@ -140,6 +155,11 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
u32 val;
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+ int ret;
+
+ ret = pm_runtime_get_sync(chip->dev);
+ if (ret < 0)
+ return ret;
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val |= BIT(pwm->hwpwm);
@@ -160,6 +180,9 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~BIT(pwm->hwpwm);
img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+
+ pm_runtime_mark_last_busy(chip->dev);
+ pm_runtime_put_autosuspend(chip->dev);
}
static const struct pwm_ops img_pwm_ops = {
@@ -182,6 +205,37 @@ static const struct of_device_id img_pwm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, img_pwm_of_match);
+static int img_pwm_runtime_suspend(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pwm_chip->pwm_clk);
+ clk_disable_unprepare(pwm_chip->sys_clk);
+
+ return 0;
+}
+
+static int img_pwm_runtime_resume(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(pwm_chip->sys_clk);
+ if (ret < 0) {
+ dev_err(dev, "could not prepare or enable sys clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(pwm_chip->pwm_clk);
+ if (ret < 0) {
+ dev_err(dev, "could not prepare or enable pwm clock\n");
+ clk_disable_unprepare(pwm_chip->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
static int img_pwm_probe(struct platform_device *pdev)
{
int ret;
@@ -224,23 +278,20 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(pwm->pwm_clk);
}
- ret = clk_prepare_enable(pwm->sys_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
- return ret;
- }
-
- ret = clk_prepare_enable(pwm->pwm_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
- goto disable_sysclk;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_pwm_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
}
clk_rate = clk_get_rate(pwm->pwm_clk);
if (!clk_rate) {
dev_err(&pdev->dev, "pwm clock has no frequency\n");
ret = -EINVAL;
- goto disable_pwmclk;
+ goto err_suspend;
}
/* The maximum input clock divider is 512 */
@@ -255,21 +306,23 @@ static int img_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &img_pwm_ops;
pwm->chip.base = -1;
- pwm->chip.npwm = 4;
+ pwm->chip.npwm = IMG_PWM_NPWM;
ret = pwmchip_add(&pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
- goto disable_pwmclk;
+ goto err_suspend;
}
platform_set_drvdata(pdev, pwm);
return 0;
-disable_pwmclk:
- clk_disable_unprepare(pwm->pwm_clk);
-disable_sysclk:
- clk_disable_unprepare(pwm->sys_clk);
+err_suspend:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_pwm_runtime_suspend(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return ret;
}
@@ -278,6 +331,11 @@ static int img_pwm_remove(struct platform_device *pdev)
struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
u32 val;
unsigned int i;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
for (i = 0; i < pwm_chip->chip.npwm; i++) {
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
@@ -285,15 +343,79 @@ static int img_pwm_remove(struct platform_device *pdev)
img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
}
- clk_disable_unprepare(pwm_chip->pwm_clk);
- clk_disable_unprepare(pwm_chip->sys_clk);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ img_pwm_runtime_suspend(&pdev->dev);
return pwmchip_remove(&pwm_chip->chip);
}
+#ifdef CONFIG_PM_SLEEP
+static int img_pwm_suspend(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int i, ret;
+
+ if (pm_runtime_status_suspended(dev)) {
+ ret = img_pwm_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ pwm_chip->suspend_ch_cfg[i] = img_pwm_readl(pwm_chip,
+ PWM_CH_CFG(i));
+
+ pwm_chip->suspend_ctrl_cfg = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+
+ img_pwm_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int img_pwm_resume(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int ret;
+ int i;
+
+ ret = img_pwm_runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ img_pwm_writel(pwm_chip, PWM_CH_CFG(i),
+ pwm_chip->suspend_ch_cfg[i]);
+
+ img_pwm_writel(pwm_chip, PWM_CTRL_CFG, pwm_chip->suspend_ctrl_cfg);
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ if (pwm_chip->suspend_ctrl_cfg & BIT(i))
+ regmap_update_bits(pwm_chip->periph_regs,
+ PERIP_PWM_PDM_CONTROL,
+ PERIP_PWM_PDM_CONTROL_CH_MASK <<
+ PERIP_PWM_PDM_CONTROL_CH_SHIFT(i),
+ 0);
+
+ if (pm_runtime_status_suspended(dev))
+ img_pwm_runtime_suspend(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops img_pwm_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_pwm_runtime_suspend,
+ img_pwm_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_pwm_suspend, img_pwm_resume)
+};
+
static struct platform_driver img_pwm_driver = {
.driver = {
.name = "img-pwm",
+ .pm = &img_pwm_pm_ops,
.of_match_table = img_pwm_of_match,
},
.probe = img_pwm_probe,
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index b52f3afb2ba1..f5d97e0ad52b 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -40,11 +41,19 @@ enum {
MTK_CLK_PWM3,
MTK_CLK_PWM4,
MTK_CLK_PWM5,
+ MTK_CLK_PWM6,
+ MTK_CLK_PWM7,
+ MTK_CLK_PWM8,
MTK_CLK_MAX,
};
-static const char * const mtk_pwm_clk_name[] = {
- "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5"
+static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
+ "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5", "pwm6", "pwm7",
+ "pwm8"
+};
+
+struct mtk_pwm_platform_data {
+ unsigned int num_pwms;
};
/**
@@ -59,6 +68,10 @@ struct mtk_pwm_chip {
struct clk *clks[MTK_CLK_MAX];
};
+static const unsigned int mtk_pwm_reg_offset[] = {
+ 0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220
+};
+
static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct mtk_pwm_chip, chip);
@@ -103,14 +116,14 @@ static void mtk_pwm_clk_disable(struct pwm_chip *chip, struct pwm_device *pwm)
static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num,
unsigned int offset)
{
- return readl(chip->regs + 0x10 + (num * 0x40) + offset);
+ return readl(chip->regs + mtk_pwm_reg_offset[num] + offset);
}
static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip,
unsigned int num, unsigned int offset,
u32 value)
{
- writel(value, chip->regs + 0x10 + (num * 0x40) + offset);
+ writel(value, chip->regs + mtk_pwm_reg_offset[num] + offset);
}
static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -185,6 +198,7 @@ static const struct pwm_ops mtk_pwm_ops = {
static int mtk_pwm_probe(struct platform_device *pdev)
{
+ const struct mtk_pwm_platform_data *data;
struct mtk_pwm_chip *pc;
struct resource *res;
unsigned int i;
@@ -194,15 +208,22 @@ static int mtk_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
+ data = of_device_get_match_data(&pdev->dev);
+ if (data == NULL)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pc->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- for (i = 0; i < MTK_CLK_MAX; i++) {
+ for (i = 0; i < data->num_pwms + 2; i++) {
pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]);
- if (IS_ERR(pc->clks[i]))
+ if (IS_ERR(pc->clks[i])) {
+ dev_err(&pdev->dev, "clock: %s fail: %ld\n",
+ mtk_pwm_clk_name[i], PTR_ERR(pc->clks[i]));
return PTR_ERR(pc->clks[i]);
+ }
}
platform_set_drvdata(pdev, pc);
@@ -210,7 +231,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &mtk_pwm_ops;
pc->chip.base = -1;
- pc->chip.npwm = 5;
+ pc->chip.npwm = data->num_pwms;
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
@@ -228,9 +249,23 @@ static int mtk_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+static const struct mtk_pwm_platform_data mt2712_pwm_data = {
+ .num_pwms = 8,
+};
+
+static const struct mtk_pwm_platform_data mt7622_pwm_data = {
+ .num_pwms = 6,
+};
+
+static const struct mtk_pwm_platform_data mt7623_pwm_data = {
+ .num_pwms = 5,
+};
+
static const struct of_device_id mtk_pwm_of_match[] = {
- { .compatible = "mediatek,mt7623-pwm" },
- { }
+ { .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data },
+ { .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
+ { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
+ { },
};
MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 9793b296108f..1ac9e4384142 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -219,8 +219,7 @@ static int stm32_pwm_lp_remove(struct platform_device *pdev)
unsigned int i;
for (i = 0; i < priv->chip.npwm; i++)
- if (pwm_is_enabled(&priv->chip.pwms[i]))
- pwm_disable(&priv->chip.pwms[i]);
+ pwm_disable(&priv->chip.pwms[i]);
return pwmchip_remove(&priv->chip);
}
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 6d23f1d1c9b7..334199c58f1d 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -368,14 +368,15 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
struct sun4i_pwm_chip *pwm;
struct resource *res;
int ret;
- const struct of_device_id *match;
-
- match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev);
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (!pwm)
return -ENOMEM;
+ pwm->data = of_device_get_match_data(&pdev->dev);
+ if (!pwm->data)
+ return -ENODEV;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pwm->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pwm->base))
@@ -385,7 +386,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
- pwm->data = match->data;
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &sun4i_pwm_ops;
pwm->chip.base = -1;
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 74dcea45ad49..a34b0254ba38 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for RapidIO interconnect services
#
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5c1b6388122a..ec4bc1515f0d 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -889,11 +889,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
goto err_req;
}
- pinned = get_user_pages_unlocked(
+ pinned = get_user_pages_fast(
(unsigned long)xfer->loc_addr & PAGE_MASK,
- nr_pages,
- page_list,
- dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0);
+ nr_pages, dir == DMA_FROM_DEVICE, page_list);
if (pinned != nr_pages) {
if (pinned < 0) {
@@ -961,9 +959,10 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
nents = dma_map_sg(chan->device->dev,
req->sgt.sgl, req->sgt.nents, dir);
- if (nents == -EFAULT) {
+ if (nents == 0) {
rmcd_error("Failed to map SG list");
- return -EFAULT;
+ ret = -EFAULT;
+ goto err_pg;
}
ret = do_dma_request(req, xfer, sync, nents);
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index 6bdd54c4e733..69e7de31e41c 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for RIO switches
#
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index e67b923b1ca6..4931ed790428 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -458,7 +458,7 @@ static void idtg2_remove(struct rio_dev *rdev)
idtg2_sysfs(rdev, false);
}
-static struct rio_device_id idtg2_id_table[] = {
+static const struct rio_device_id idtg2_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)},
diff --git a/drivers/rapidio/switches/idt_gen3.c b/drivers/rapidio/switches/idt_gen3.c
index c5923a547bed..85a3908294d9 100644
--- a/drivers/rapidio/switches/idt_gen3.c
+++ b/drivers/rapidio/switches/idt_gen3.c
@@ -348,7 +348,7 @@ static void idtg3_shutdown(struct rio_dev *rdev)
}
}
-static struct rio_device_id idtg3_id_table[] = {
+static const struct rio_device_id idtg3_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)},
{ 0, } /* terminate list */
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index 7fbb60d31796..4058ce2c76fa 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -168,7 +168,7 @@ static void idtcps_remove(struct rio_dev *rdev)
spin_unlock(&rdev->rswitch->lock);
}
-static struct rio_device_id idtcps_id_table[] = {
+static const struct rio_device_id idtcps_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)},
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
index 8a43561b9d17..1214628b7ded 100644
--- a/drivers/rapidio/switches/tsi568.c
+++ b/drivers/rapidio/switches/tsi568.c
@@ -169,7 +169,7 @@ static void tsi568_remove(struct rio_dev *rdev)
spin_unlock(&rdev->rswitch->lock);
}
-static struct rio_device_id tsi568_id_table[] = {
+static const struct rio_device_id tsi568_id_table[] = {
{RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)},
{ 0, } /* terminate list */
};
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 2700d15f7584..9f063e214836 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -336,7 +336,7 @@ static void tsi57x_remove(struct rio_dev *rdev)
spin_unlock(&rdev->rswitch->lock);
}
-static struct rio_device_id tsi57x_id_table[] = {
+static const struct rio_device_id tsi57x_id_table[] = {
{RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)},
{RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)},
{RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)},
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index e2c1988cd7c0..ca44e6977cf2 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
@@ -168,11 +169,9 @@ static void cec_mod_timer(struct timer_list *t, unsigned long interval)
mod_timer(t, round_jiffies(iv));
}
-static void cec_timer_fn(unsigned long data)
+static void cec_timer_fn(struct timer_list *unused)
{
- struct ce_array *ca = (struct ce_array *)data;
-
- do_spring_cleaning(ca);
+ do_spring_cleaning(&ce_arr);
cec_mod_timer(&cec_timer, timer_interval);
}
@@ -509,7 +508,7 @@ void __init cec_init(void)
if (create_debugfs_nodes())
return;
- setup_timer(&cec_timer, cec_timer_fn, (unsigned long)&ce_arr);
+ timer_setup(&cec_timer, cec_timer_fn, 0);
cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL);
pr_info("Correctable Errors collector initialized.\n");
diff --git a/drivers/ras/debugfs.h b/drivers/ras/debugfs.h
index db72e4513191..c07443b462ad 100644
--- a/drivers/ras/debugfs.h
+++ b/drivers/ras/debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RAS_DEBUGFS_H__
#define __RAS_DEBUGFS_H__
diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c
index 5429d3795732..3f38907320dc 100644
--- a/drivers/ras/ras.c
+++ b/drivers/ras/ras.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Intel Corporation
*
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 0fd6195601ba..96cd55f9e3c5 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -244,7 +244,7 @@ config REGULATOR_DA9210
interface.
config REGULATOR_DA9211
- tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 regulator"
+ tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator"
depends on I2C
select REGMAP_I2C
help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index cbb6e45c77b2..80ffc57a9ca3 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for regulator drivers.
#
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 376a99b7cf5d..181622b2813d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -244,6 +244,7 @@ static const struct regulator_desc axp22x_drivevbus_regulator = {
.ops = &axp20x_ops_sw,
};
+/* DCDC ranges shared with AXP813 */
static const struct regulator_linear_range axp803_dcdc234_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0x0, 0x46, 10000),
REGULATOR_LINEAR_RANGE(1220000, 0x47, 0x4b, 20000),
@@ -426,6 +427,69 @@ static const struct regulator_desc axp809_regulators[] = {
AXP_DESC_SW(AXP809, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(6)),
};
+static const struct regulator_desc axp813_regulators[] = {
+ AXP_DESC(AXP813, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
+ AXP803_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(0)),
+ AXP_DESC_RANGES(AXP813, DCDC2, "dcdc2", "vin2", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC2_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(1)),
+ AXP_DESC_RANGES(AXP813, DCDC3, "dcdc3", "vin3", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC3_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(2)),
+ AXP_DESC_RANGES(AXP813, DCDC4, "dcdc4", "vin4", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC4_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(3)),
+ AXP_DESC_RANGES(AXP813, DCDC5, "dcdc5", "vin5", axp803_dcdc5_ranges,
+ 68, AXP803_DCDC5_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(4)),
+ AXP_DESC_RANGES(AXP813, DCDC6, "dcdc6", "vin6", axp803_dcdc6_ranges,
+ 72, AXP803_DCDC6_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(5)),
+ AXP_DESC_RANGES(AXP813, DCDC7, "dcdc7", "vin7", axp803_dcdc6_ranges,
+ 72, AXP813_DCDC7_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(6)),
+ AXP_DESC(AXP813, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(5)),
+ AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(6)),
+ AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(7)),
+ AXP_DESC(AXP813, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(3)),
+ AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges,
+ 32, AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
+ BIT(4)),
+ AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
+ AXP_DESC(AXP813, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO4_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(6)),
+ AXP_DESC(AXP813, ELDO1, "eldo1", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+ AXP_DESC(AXP813, ELDO2, "eldo2", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+ /* to do / check ... */
+ AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
+ AXP803_FLDO1_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(2)),
+ AXP_DESC(AXP813, FLDO2, "fldo2", "fldoin", 700, 1450, 50,
+ AXP803_FLDO2_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(3)),
+ /*
+ * TODO: FLDO3 = {DCDC5, FLDOIN} / 2
+ *
+ * This means FLDO3 effectively switches supplies at runtime,
+ * something the regulator subsystem does not support.
+ */
+ AXP_DESC_FIXED(AXP813, RTC_LDO, "rtc-ldo", "ips", 1800),
+ AXP_DESC_IO(AXP813, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_IO(AXP813, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_SW(AXP813, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(7)),
+};
+
static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
@@ -441,9 +505,10 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
step = 75;
break;
case AXP803_ID:
+ case AXP813_ID:
/*
- * AXP803 DCDC work frequency setting has the same range and
- * step as AXP22X, but at a different register.
+ * AXP803/AXP813 DCDC work frequency setting has the same
+ * range and step as AXP22X, but at a different register.
* Fall through to the check below.
* (See include/linux/mfd/axp20x.h)
*/
@@ -561,6 +626,14 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
workmode <<= id - AXP803_DCDC1;
break;
+ case AXP813_ID:
+ if (id < AXP813_DCDC1 || id > AXP813_DCDC7)
+ return -EINVAL;
+
+ mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP813_DCDC1);
+ workmode <<= id - AXP813_DCDC1;
+ break;
+
default:
/* should not happen */
WARN_ON(1);
@@ -579,11 +652,12 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
u32 reg = 0;
/*
- * Currently in our supported AXP variants, only AXP803 and AXP806
- * have polyphase regulators.
+ * Currently in our supported AXP variants, only AXP803, AXP806,
+ * and AXP813 have polyphase regulators.
*/
switch (axp20x->variant) {
case AXP803_ID:
+ case AXP813_ID:
regmap_read(axp20x->regmap, AXP803_POLYPHASE_CTRL, &reg);
switch (id) {
@@ -656,6 +730,12 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
regulators = axp809_regulators;
nregulators = AXP809_REG_ID_MAX;
break;
+ case AXP813_ID:
+ regulators = axp813_regulators;
+ nregulators = AXP813_REG_ID_MAX;
+ drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
+ "x-powers,drive-vbus-en");
+ break;
default:
dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
axp20x->variant);
@@ -677,6 +757,10 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
if (axp20x_is_polyphase_slave(axp20x, i))
continue;
+ /* Support for AXP813's FLDO3 is not implemented */
+ if (axp20x->variant == AXP813_ID && i == AXP813_FLDO3)
+ continue;
+
/*
* Regulators DC1SW and DC5LDO are connected internally,
* so we have to handle their supply names separately.
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index aa47280efd32..9b8f47617724 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,6 +1,6 @@
/*
* da9211-regulator.c - Regulator device driver for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This library is free software; you can redistribute it and/or
@@ -496,8 +496,11 @@ static const struct i2c_device_id da9211_i2c_id[] = {
{"da9211", DA9211},
{"da9212", DA9212},
{"da9213", DA9213},
+ {"da9223", DA9223},
{"da9214", DA9214},
+ {"da9224", DA9224},
{"da9215", DA9215},
+ {"da9225", DA9225},
{},
};
MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
@@ -507,8 +510,11 @@ static const struct of_device_id da9211_dt_ids[] = {
{ .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] },
{ .compatible = "dlg,da9212", .data = &da9211_i2c_id[1] },
{ .compatible = "dlg,da9213", .data = &da9211_i2c_id[2] },
- { .compatible = "dlg,da9214", .data = &da9211_i2c_id[3] },
- { .compatible = "dlg,da9215", .data = &da9211_i2c_id[4] },
+ { .compatible = "dlg,da9223", .data = &da9211_i2c_id[3] },
+ { .compatible = "dlg,da9214", .data = &da9211_i2c_id[4] },
+ { .compatible = "dlg,da9224", .data = &da9211_i2c_id[5] },
+ { .compatible = "dlg,da9215", .data = &da9211_i2c_id[6] },
+ { .compatible = "dlg,da9225", .data = &da9211_i2c_id[7] },
{},
};
MODULE_DEVICE_TABLE(of, da9211_dt_ids);
@@ -526,5 +532,5 @@ static struct i2c_driver da9211_regulator_driver = {
module_i2c_driver(da9211_regulator_driver);
MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
-MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9214/DA9215 regulator driver");
+MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index b841bbf330cc..2cb32aab4f82 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,6 +1,6 @@
/*
* da9211-regulator.h - Regulator definitions for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index f9d027992aae..777fac6fb4cb 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/platform_device.h>
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 0cb76ba29e84..8f782d22fdbe 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -34,6 +34,8 @@ struct pbias_reg_info {
u32 vmode;
unsigned int enable_time;
char *name;
+ const unsigned int *pbias_volt_table;
+ int n_voltages;
};
struct pbias_regulator_data {
@@ -49,11 +51,16 @@ struct pbias_of_data {
unsigned int offset;
};
-static const unsigned int pbias_volt_table[] = {
+static const unsigned int pbias_volt_table_3_0V[] = {
1800000,
3000000
};
+static const unsigned int pbias_volt_table_3_3V[] = {
+ 1800000,
+ 3300000
+};
+
static const struct regulator_ops pbias_regulator_voltage_ops = {
.list_voltage = regulator_list_voltage_table,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -69,6 +76,8 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = {
.vmode = BIT(0),
.disable_val = 0,
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap2430"
};
@@ -77,6 +86,8 @@ static const struct pbias_reg_info pbias_sim_omap3 = {
.enable_mask = BIT(9),
.vmode = BIT(8),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_sim_omap3"
};
@@ -86,6 +97,8 @@ static const struct pbias_reg_info pbias_mmc_omap4 = {
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap4"
};
@@ -95,6 +108,8 @@ static const struct pbias_reg_info pbias_mmc_omap5 = {
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_3V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap5"
};
@@ -199,8 +214,8 @@ static int pbias_regulator_probe(struct platform_device *pdev)
drvdata[data_idx].desc.owner = THIS_MODULE;
drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
- drvdata[data_idx].desc.volt_table = pbias_volt_table;
- drvdata[data_idx].desc.n_voltages = 2;
+ drvdata[data_idx].desc.volt_table = info->pbias_volt_table;
+ drvdata[data_idx].desc.n_voltages = info->n_voltages;
drvdata[data_idx].desc.enable_time = info->enable_time;
drvdata[data_idx].desc.vsel_reg = offset;
drvdata[data_idx].desc.vsel_mask = info->vmode;
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 16c5f84e06a7..0241ada47d04 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -593,13 +593,20 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
u8 *voltage_sel)
{
const struct spmi_voltage_range *range, *end;
+ unsigned offset;
range = vreg->set_points->range;
end = range + vreg->set_points->count;
for (; range < end; range++) {
if (selector < range->n_voltages) {
- *voltage_sel = selector;
+ /*
+ * hardware selectors between set point min and real
+ * min are invalid so we ignore them
+ */
+ offset = range->set_point_min_uV - range->min_uV;
+ offset /= range->step_uV;
+ *voltage_sel = selector + offset;
*range_sel = range->range_sel;
return 0;
}
@@ -613,15 +620,35 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel,
const struct spmi_voltage_range *range)
{
- int sw_sel = hw_sel;
+ unsigned sw_sel = 0;
+ unsigned offset, max_hw_sel;
const struct spmi_voltage_range *r = vreg->set_points->range;
-
- while (r != range) {
+ const struct spmi_voltage_range *end = r + vreg->set_points->count;
+
+ for (; r < end; r++) {
+ if (r == range && range->n_voltages) {
+ /*
+ * hardware selectors between set point min and real
+ * min and between set point max and real max are
+ * invalid so we return an error if they're
+ * programmed into the hardware
+ */
+ offset = range->set_point_min_uV - range->min_uV;
+ offset /= range->step_uV;
+ if (hw_sel < offset)
+ return -EINVAL;
+
+ max_hw_sel = range->set_point_max_uV - range->min_uV;
+ max_hw_sel /= range->step_uV;
+ if (hw_sel > max_hw_sel)
+ return -EINVAL;
+
+ return sw_sel + hw_sel - offset;
+ }
sw_sel += r->n_voltages;
- r++;
}
- return sw_sel;
+ return -EINVAL;
}
static const struct spmi_voltage_range *
@@ -1619,11 +1646,20 @@ static const struct spmi_regulator_data pm8994_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pmi8994_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "l1", 0x4000, "vdd_l1", },
+ { }
+};
+
static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
+ { .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 5324dc9e6d6e..7b12e880d1ea 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -228,11 +228,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
int i, ret;
unsigned int val;
- if (tps65217_chip_id(tps) != TPS65217) {
- dev_err(&pdev->dev, "Invalid tps chip version\n");
- return -ENODEV;
- }
-
/* Allocate memory for strobes */
tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
TPS65217_NUM_REGULATOR, GFP_KERNEL);
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 9aafbb03482d..bc489958fed7 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -154,7 +154,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
if (!tps->strobes[rid]) {
if (rid == TPS65218_DCDC_3)
- tps->info[rid]->strobe = 3;
+ tps->strobes[rid] = 3;
else
return -EINVAL;
}
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index bf04479456a0..b609e1d3654b 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -28,7 +28,6 @@ config OMAP_REMOTEPROC
depends on OMAP_IOMMU
select MAILBOX
select OMAP2PLUS_MBOX
- select RPMSG_VIRTIO
help
Say y here to support OMAP's remote processors (dual M3
and DSP on OMAP4) via the remote processor framework.
@@ -58,7 +57,6 @@ config DA8XX_REMOTEPROC
tristate "DA8xx/OMAP-L13x remoteproc support"
depends on ARCH_DAVINCI_DA8XX
depends on DMA_CMA
- select RPMSG_VIRTIO
help
Say y here to support DA8xx/OMAP-L13x remote processors via the
remote processor framework.
@@ -79,7 +77,6 @@ config DA8XX_REMOTEPROC
config KEYSTONE_REMOTEPROC
tristate "Keystone Remoteproc support"
depends on ARCH_KEYSTONE
- select RPMSG_VIRTIO
help
Say Y here here to support Keystone remote processors (DSP)
via the remote processor framework.
@@ -135,7 +132,6 @@ config ST_REMOTEPROC
depends on ARCH_STI
select MAILBOX
select STI_MBOX
- select RPMSG_VIRTIO
help
Say y here to support ST's adjunct processors via the remote
processor framework.
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 1a0b3dd44b8c..6e16450ce11f 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Generic framework for controlling remote processors
#
diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h
index 4f8bc168473c..832e20271664 100644
--- a/drivers/remoteproc/qcom_common.h
+++ b/drivers/remoteproc/qcom_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RPROC_QCOM_COMMON_H__
#define __RPROC_QCOM_COMMON_H__
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
index 2d3d5ac92c06..8a3fa2bcc9f6 100644
--- a/drivers/remoteproc/qcom_q6v5_pil.c
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -32,6 +32,7 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
+#include <linux/iopoll.h>
#include "remoteproc_internal.h"
#include "qcom_common.h"
@@ -64,6 +65,8 @@
#define QDSP6SS_RESET_REG 0x014
#define QDSP6SS_GFMUX_CTL_REG 0x020
#define QDSP6SS_PWR_CTL_REG 0x030
+#define QDSP6SS_MEM_PWR_CTL 0x0B0
+#define QDSP6SS_STRAP_ACC 0x110
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
@@ -92,6 +95,15 @@
#define QDSS_BHS_ON BIT(21)
#define QDSS_LDO_BYP BIT(22)
+/* QDSP6v56 parameters */
+#define QDSP6v56_LDO_BYP BIT(25)
+#define QDSP6v56_BHS_ON BIT(24)
+#define QDSP6v56_CLAMP_WL BIT(21)
+#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
+#define HALT_CHECK_MAX_LOOPS 200
+#define QDSP6SS_XO_CBCR 0x0038
+#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
+
struct reg_info {
struct regulator *reg;
int uV;
@@ -110,6 +122,8 @@ struct rproc_hexagon_res {
struct qcom_mss_reg_res *active_supply;
char **proxy_clk_names;
char **active_clk_names;
+ int version;
+ bool need_mem_protection;
};
struct q6v5 {
@@ -154,6 +168,16 @@ struct q6v5 {
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
+ bool need_mem_protection;
+ int mpss_perm;
+ int mba_perm;
+ int version;
+};
+
+enum {
+ MSS_MSM8916,
+ MSS_MSM8974,
+ MSS_MSM8996,
};
static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
@@ -289,6 +313,26 @@ static struct resource_table *q6v5_find_rsc_table(struct rproc *rproc,
return &table;
}
+static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
+ bool remote_owner, phys_addr_t addr,
+ size_t size)
+{
+ struct qcom_scm_vmperm next;
+
+ if (!qproc->need_mem_protection)
+ return 0;
+ if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
+ return 0;
+ if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
+ return 0;
+
+ next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
+ next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
+
+ return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
+ current_perm, &next, 1);
+}
+
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
@@ -353,33 +397,98 @@ static int q6v5proc_reset(struct q6v5 *qproc)
{
u32 val;
int ret;
+ int i;
- /* Assert resets, stop core */
- val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
- val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE);
- writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
- /* Enable power block headswitch, and wait for it to stabilize */
- val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= QDSS_BHS_ON | QDSS_LDO_BYP;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- udelay(1);
-
- /*
- * Turn on memories. L2 banks should be done individually
- * to minimize inrush current.
- */
- val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
- Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_L2DATA_SLP_NRET_N_2;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_L2DATA_SLP_NRET_N_1;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_L2DATA_SLP_NRET_N_0;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ if (qproc->version == MSS_MSM8996) {
+ /* Override the ACC value if required */
+ writel(QDSP6SS_ACC_OVERRIDE_VAL,
+ qproc->reg_base + QDSP6SS_STRAP_ACC);
+ /* Assert resets, stop core */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* BHS require xo cbcr to be enabled */
+ val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
+ val |= 0x1;
+ writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
+
+ /* Read CLKOFF bit to go low indicating CLK is enabled */
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
+ val, !(val & BIT(31)), 1,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(qproc->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+ return ret;
+ }
+ /* Enable power block headswitch and wait for it to stabilize */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= QDSP6v56_BHS_ON;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ udelay(1);
+
+ /* Put LDO in bypass mode */
+ val |= QDSP6v56_LDO_BYP;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Deassert QDSP6 compiler memory clamp */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val &= ~QDSP6v56_CLAMP_QMC_MEM;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Deassert memory peripheral sleep and L2 memory standby */
+ val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Turn on L1, L2, ETB and JU memories 1 at a time */
+ val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
+ for (i = 19; i >= 0; i--) {
+ val |= BIT(i);
+ writel(val, qproc->reg_base +
+ QDSP6SS_MEM_PWR_CTL);
+ /*
+ * Read back value to ensure the write is done then
+ * wait for 1us for both memory peripheral and data
+ * array to turn on.
+ */
+ val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
+ udelay(1);
+ }
+ /* Remove word line clamp */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val &= ~QDSP6v56_CLAMP_WL;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ } else {
+ /* Assert resets, stop core */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* Enable power block headswitch and wait for it to stabilize */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ udelay(1);
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ }
/* Remove IO clamp */
val &= ~Q6SS_CLAMP_IO;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
@@ -451,6 +560,8 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
{
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
dma_addr_t phys;
+ int mdata_perm;
+ int xferop_ret;
void *ptr;
int ret;
@@ -462,6 +573,17 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
memcpy(ptr, fw->data, fw->size);
+ /* Hypervisor mapping to access metadata by modem */
+ mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
+ ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
+ true, phys, fw->size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to metadata failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto free_dma_attrs;
+ }
+
writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
@@ -471,6 +593,14 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
else if (ret < 0)
dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
+ /* Metadata authentication done, remove modem access */
+ xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm,
+ false, phys, fw->size);
+ if (xferop_ret)
+ dev_warn(qproc->dev,
+ "mdt buffer not reclaimed system may become unstable\n");
+
+free_dma_attrs:
dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
return ret < 0 ? ret : 0;
@@ -504,7 +634,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
bool relocate = false;
char seg_name[10];
ssize_t offset;
- size_t size;
+ size_t size = 0;
void *ptr;
int ret;
int i;
@@ -542,7 +672,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
}
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
-
+ /* Load firmware segments */
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
@@ -575,18 +705,24 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
memset(ptr + phdr->p_filesz, 0,
phdr->p_memsz - phdr->p_filesz);
}
-
- size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
- if (!size) {
- boot_addr = relocate ? qproc->mpss_phys : min_addr;
- writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
- writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
- }
-
size += phdr->p_memsz;
- writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
}
+ /* Transfer ownership of modem ddr region to q6 */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mpss memory failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto release_firmware;
+ }
+
+ boot_addr = relocate ? qproc->mpss_phys : min_addr;
+ writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "MPSS authentication timed out\n");
@@ -602,6 +738,7 @@ release_firmware:
static int q6v5_start(struct rproc *rproc)
{
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+ int xfermemop_ret;
int ret;
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
@@ -637,11 +774,22 @@ static int q6v5_start(struct rproc *rproc)
goto assert_reset;
}
+ /* Assign MBA image access in DDR to q6 */
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mba memory failed: %d\n",
+ xfermemop_ret);
+ goto disable_active_clks;
+ }
+
writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
ret = q6v5proc_reset(qproc);
if (ret)
- goto halt_axi_ports;
+ goto reclaim_mba;
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
@@ -658,16 +806,22 @@ static int q6v5_start(struct rproc *rproc)
ret = q6v5_mpss_load(qproc);
if (ret)
- goto halt_axi_ports;
+ goto reclaim_mpss;
ret = wait_for_completion_timeout(&qproc->start_done,
msecs_to_jiffies(5000));
if (ret == 0) {
dev_err(qproc->dev, "start timed out\n");
ret = -ETIMEDOUT;
- goto halt_axi_ports;
+ goto reclaim_mpss;
}
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret)
+ dev_err(qproc->dev,
+ "Failed to reclaim mba buffer system may become unstable\n");
qproc->running = true;
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
@@ -677,12 +831,30 @@ static int q6v5_start(struct rproc *rproc)
return 0;
+reclaim_mpss:
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ false, qproc->mpss_phys,
+ qproc->mpss_size);
+ WARN_ON(xfermemop_ret);
+
halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+
+reclaim_mba:
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret) {
+ dev_err(qproc->dev,
+ "Failed to reclaim mba buffer, system may become unstable\n");
+ }
+
+disable_active_clks:
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
+
assert_reset:
reset_control_assert(qproc->mss_restart);
disable_vdd:
@@ -702,6 +874,7 @@ static int q6v5_stop(struct rproc *rproc)
{
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
int ret;
+ u32 val;
qproc->running = false;
@@ -718,6 +891,20 @@ static int q6v5_stop(struct rproc *rproc)
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+ if (qproc->version == MSS_MSM8996) {
+ /*
+ * To avoid high MX current during LPASS/MSS restart.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
+ QDSP6v56_CLAMP_QMC_MEM;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ }
+
+
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
+ qproc->mpss_phys, qproc->mpss_size);
+ WARN_ON(ret);
reset_control_assert(qproc->mss_restart);
q6v5_clk_disable(qproc->dev, qproc->active_clks,
@@ -1017,6 +1204,8 @@ static int q6v5_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
+ qproc->version = desc->version;
+ qproc->need_mem_protection = desc->need_mem_protection;
ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
if (ret < 0)
goto free_rproc;
@@ -1038,7 +1227,8 @@ static int q6v5_probe(struct platform_device *pdev)
ret = PTR_ERR(qproc->state);
goto free_rproc;
}
-
+ qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
+ qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
@@ -1067,6 +1257,24 @@ static int q6v5_remove(struct platform_device *pdev)
return 0;
}
+static const struct rproc_hexagon_res msm8996_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "pnoc",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ "gpll0_mss_clk",
+ NULL
+ },
+ .need_mem_protection = true,
+ .version = MSS_MSM8996,
+};
+
static const struct rproc_hexagon_res msm8916_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
@@ -1094,6 +1302,8 @@ static const struct rproc_hexagon_res msm8916_mss = {
"mem",
NULL
},
+ .need_mem_protection = false,
+ .version = MSS_MSM8916,
};
static const struct rproc_hexagon_res msm8974_mss = {
@@ -1131,12 +1341,15 @@ static const struct rproc_hexagon_res msm8974_mss = {
"mem",
NULL
},
+ .need_mem_protection = false,
+ .version = MSS_MSM8974,
};
static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
+ { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
{ },
};
MODULE_DEVICE_TABLE(of, q6v5_of_match);
diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
index 25fb7f62a457..62c8682d0a92 100644
--- a/drivers/remoteproc/qcom_wcnss.h
+++ b/drivers/remoteproc/qcom_wcnss.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __QCOM_WNCSS_H__
#define __QCOM_WNCSS_H__
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 1c122e230cec..a20488336aa0 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -155,6 +155,132 @@ static const struct file_operations rproc_recovery_ops = {
.llseek = generic_file_llseek,
};
+/* Expose resource table content via debugfs */
+static int rproc_rsc_table_show(struct seq_file *seq, void *p)
+{
+ static const char * const types[] = {"carveout", "devmem", "trace", "vdev"};
+ struct rproc *rproc = seq->private;
+ struct resource_table *table = rproc->table_ptr;
+ struct fw_rsc_carveout *c;
+ struct fw_rsc_devmem *d;
+ struct fw_rsc_trace *t;
+ struct fw_rsc_vdev *v;
+ int i, j;
+
+ if (!table) {
+ seq_puts(seq, "No resource table found\n");
+ return 0;
+ }
+
+ for (i = 0; i < table->num; i++) {
+ int offset = table->offset[i];
+ struct fw_rsc_hdr *hdr = (void *)table + offset;
+ void *rsc = (void *)hdr + sizeof(*hdr);
+
+ switch (hdr->type) {
+ case RSC_CARVEOUT:
+ c = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+ seq_printf(seq, " Device Address 0x%x\n", c->da);
+ seq_printf(seq, " Physical Address 0x%x\n", c->pa);
+ seq_printf(seq, " Length 0x%x Bytes\n", c->len);
+ seq_printf(seq, " Flags 0x%x\n", c->flags);
+ seq_printf(seq, " Reserved (should be zero) [%d]\n", c->reserved);
+ seq_printf(seq, " Name %s\n\n", c->name);
+ break;
+ case RSC_DEVMEM:
+ d = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+ seq_printf(seq, " Device Address 0x%x\n", d->da);
+ seq_printf(seq, " Physical Address 0x%x\n", d->pa);
+ seq_printf(seq, " Length 0x%x Bytes\n", d->len);
+ seq_printf(seq, " Flags 0x%x\n", d->flags);
+ seq_printf(seq, " Reserved (should be zero) [%d]\n", d->reserved);
+ seq_printf(seq, " Name %s\n\n", d->name);
+ break;
+ case RSC_TRACE:
+ t = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+ seq_printf(seq, " Device Address 0x%x\n", t->da);
+ seq_printf(seq, " Length 0x%x Bytes\n", t->len);
+ seq_printf(seq, " Reserved (should be zero) [%d]\n", t->reserved);
+ seq_printf(seq, " Name %s\n\n", t->name);
+ break;
+ case RSC_VDEV:
+ v = rsc;
+ seq_printf(seq, "Entry %d is of type %s\n", i, types[hdr->type]);
+
+ seq_printf(seq, " ID %d\n", v->id);
+ seq_printf(seq, " Notify ID %d\n", v->notifyid);
+ seq_printf(seq, " Device features 0x%x\n", v->dfeatures);
+ seq_printf(seq, " Guest features 0x%x\n", v->gfeatures);
+ seq_printf(seq, " Config length 0x%x\n", v->config_len);
+ seq_printf(seq, " Status 0x%x\n", v->status);
+ seq_printf(seq, " Number of vrings %d\n", v->num_of_vrings);
+ seq_printf(seq, " Reserved (should be zero) [%d][%d]\n\n",
+ v->reserved[0], v->reserved[1]);
+
+ for (j = 0; j < v->num_of_vrings; j++) {
+ seq_printf(seq, " Vring %d\n", j);
+ seq_printf(seq, " Device Address 0x%x\n", v->vring[j].da);
+ seq_printf(seq, " Alignment %d\n", v->vring[j].align);
+ seq_printf(seq, " Number of buffers %d\n", v->vring[j].num);
+ seq_printf(seq, " Notify ID %d\n", v->vring[j].notifyid);
+ seq_printf(seq, " Physical Address 0x%x\n\n",
+ v->vring[j].pa);
+ }
+ break;
+ default:
+ seq_printf(seq, "Unknown resource type found: %d [hdr: %p]\n",
+ hdr->type, hdr);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int rproc_rsc_table_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rproc_rsc_table_show, inode->i_private);
+}
+
+static const struct file_operations rproc_rsc_table_ops = {
+ .open = rproc_rsc_table_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/* Expose carveout content via debugfs */
+static int rproc_carveouts_show(struct seq_file *seq, void *p)
+{
+ struct rproc *rproc = seq->private;
+ struct rproc_mem_entry *carveout;
+
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ seq_puts(seq, "Carveout memory entry:\n");
+ seq_printf(seq, "\tVirtual address: %p\n", carveout->va);
+ seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
+ seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
+ seq_printf(seq, "\tLength: 0x%x Bytes\n\n", carveout->len);
+ }
+
+ return 0;
+}
+
+static int rproc_carveouts_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rproc_carveouts_show, inode->i_private);
+}
+
+static const struct file_operations rproc_carveouts_ops = {
+ .open = rproc_carveouts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void rproc_remove_trace_file(struct dentry *tfile)
{
debugfs_remove(tfile);
@@ -198,6 +324,10 @@ void rproc_create_debug_dir(struct rproc *rproc)
rproc, &rproc_name_ops);
debugfs_create_file("recovery", 0400, rproc->dbg_dir,
rproc, &rproc_recovery_ops);
+ debugfs_create_file("resource_table", 0400, rproc->dbg_dir,
+ rproc, &rproc_rsc_table_ops);
+ debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
+ rproc, &rproc_carveouts_ops);
}
void __init rproc_init_debugfs(void)
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index e2baecbb9dd3..7fc77696bb1e 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -28,6 +28,12 @@ config RESET_ATH79
This enables the ATH79 reset controller driver that supports the
AR71xx SoC reset controller.
+config RESET_AXS10X
+ bool "AXS10x Reset Driver" if COMPILE_TEST
+ default ARC_PLAT_AXS10X
+ help
+ This enables the reset controller driver for AXS10x.
+
config RESET_BERLIN
bool "Berlin Reset Driver" if COMPILE_TEST
default ARCH_BERLIN
@@ -75,21 +81,21 @@ config RESET_PISTACHIO
help
This enables the reset driver for ImgTec Pistachio SoCs.
-config RESET_SOCFPGA
- bool "SoCFPGA Reset Driver" if COMPILE_TEST
- default ARCH_SOCFPGA
+config RESET_SIMPLE
+ bool "Simple Reset Controller Driver" if COMPILE_TEST
+ default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX
help
- This enables the reset controller driver for Altera SoCFPGAs.
+ This enables a simple reset controller driver for reset lines that
+ that can be asserted and deasserted by toggling bits in a contiguous,
+ exclusive register space.
-config RESET_STM32
- bool "STM32 Reset Driver" if COMPILE_TEST
- default ARCH_STM32
- help
- This enables the RCC reset controller driver for STM32 MCUs.
+ Currently this driver supports Altera SoCFPGAs, the RCC reset
+ controller in STM32 MCUs, Allwinner SoCs, and ZTE's zx2967 family.
config RESET_SUNXI
bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
default ARCH_SUNXI
+ select RESET_SIMPLE
help
This enables the reset driver for Allwinner SoCs.
@@ -121,12 +127,6 @@ config RESET_UNIPHIER
Say Y if you want to control reset signals provided by System Control
block, Media I/O block, Peripheral Block.
-config RESET_ZX2967
- bool "ZTE ZX2967 Reset Driver"
- depends on ARCH_ZX || COMPILE_TEST
- help
- This enables the reset controller driver for ZTE's zx2967 family.
-
config RESET_ZYNQ
bool "ZYNQ Reset Driver" if COMPILE_TEST
default ARCH_ZYNQ
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index af1c15c330b3..132c24f5ddb5 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,9 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += core.o
obj-y += hisilicon/
obj-$(CONFIG_ARCH_STI) += sti/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
@@ -12,12 +14,10 @@ obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_RESET_MESON) += reset-meson.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
-obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
-obj-$(CONFIG_RESET_STM32) += reset-stm32.o
+obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
-obj-$(CONFIG_RESET_ZX2967) += reset-zx2967.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
diff --git a/drivers/reset/reset-axs10x.c b/drivers/reset/reset-axs10x.c
new file mode 100644
index 000000000000..afb298e46bd9
--- /dev/null
+++ b/drivers/reset/reset-axs10x.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2017 Synopsys.
+ *
+ * Synopsys AXS10x reset driver.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+#define to_axs10x_rst(p) container_of((p), struct axs10x_rst, rcdev)
+
+#define AXS10X_MAX_RESETS 32
+
+struct axs10x_rst {
+ void __iomem *regs_rst;
+ spinlock_t lock;
+ struct reset_controller_dev rcdev;
+};
+
+static int axs10x_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct axs10x_rst *rst = to_axs10x_rst(rcdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rst->lock, flags);
+ writel(BIT(id), rst->regs_rst);
+ spin_unlock_irqrestore(&rst->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops axs10x_reset_ops = {
+ .reset = axs10x_reset_reset,
+};
+
+static int axs10x_reset_probe(struct platform_device *pdev)
+{
+ struct axs10x_rst *rst;
+ struct resource *mem;
+
+ rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
+ if (!rst)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rst->regs_rst = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rst->regs_rst))
+ return PTR_ERR(rst->regs_rst);
+
+ spin_lock_init(&rst->lock);
+
+ rst->rcdev.owner = THIS_MODULE;
+ rst->rcdev.ops = &axs10x_reset_ops;
+ rst->rcdev.of_node = pdev->dev.of_node;
+ rst->rcdev.nr_resets = AXS10X_MAX_RESETS;
+
+ return devm_reset_controller_register(&pdev->dev, &rst->rcdev);
+}
+
+static const struct of_device_id axs10x_reset_dt_match[] = {
+ { .compatible = "snps,axs10x-reset" },
+ { },
+};
+
+static struct platform_driver axs10x_reset_driver = {
+ .probe = axs10x_reset_probe,
+ .driver = {
+ .name = "axs10x-reset",
+ .of_match_table = axs10x_reset_dt_match,
+ },
+};
+builtin_platform_driver(axs10x_reset_driver);
+
+MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys AXS10x reset driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index a8b915eb8b58..c419a3753d00 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -62,13 +62,16 @@
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/of_device.h>
#define REG_COUNT 8
#define BITS_PER_REG 32
+#define LEVEL_OFFSET 0x7c
struct meson_reset {
void __iomem *reg_base;
struct reset_controller_dev rcdev;
+ spinlock_t lock;
};
static int meson_reset_reset(struct reset_controller_dev *rcdev,
@@ -80,26 +83,68 @@ static int meson_reset_reset(struct reset_controller_dev *rcdev,
unsigned int offset = id % BITS_PER_REG;
void __iomem *reg_addr = data->reg_base + (bank << 2);
- if (bank >= REG_COUNT)
- return -EINVAL;
-
writel(BIT(offset), reg_addr);
return 0;
}
-static const struct reset_control_ops meson_reset_ops = {
+static int meson_reset_level(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct meson_reset *data =
+ container_of(rcdev, struct meson_reset, rcdev);
+ unsigned int bank = id / BITS_PER_REG;
+ unsigned int offset = id % BITS_PER_REG;
+ void __iomem *reg_addr = data->reg_base + LEVEL_OFFSET + (bank << 2);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(reg_addr);
+ if (assert)
+ writel(reg & ~BIT(offset), reg_addr);
+ else
+ writel(reg | BIT(offset), reg_addr);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int meson_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return meson_reset_level(rcdev, id, true);
+}
+
+static int meson_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return meson_reset_level(rcdev, id, false);
+}
+
+static const struct reset_control_ops meson_reset_meson8_ops = {
.reset = meson_reset_reset,
};
+static const struct reset_control_ops meson_reset_gx_ops = {
+ .reset = meson_reset_reset,
+ .assert = meson_reset_assert,
+ .deassert = meson_reset_deassert,
+};
+
static const struct of_device_id meson_reset_dt_ids[] = {
- { .compatible = "amlogic,meson8b-reset", },
- { .compatible = "amlogic,meson-gxbb-reset", },
+ { .compatible = "amlogic,meson8b-reset",
+ .data = &meson_reset_meson8_ops, },
+ { .compatible = "amlogic,meson-gxbb-reset",
+ .data = &meson_reset_gx_ops, },
{ /* sentinel */ },
};
static int meson_reset_probe(struct platform_device *pdev)
{
+ const struct reset_control_ops *ops;
struct meson_reset *data;
struct resource *res;
@@ -107,6 +152,10 @@ static int meson_reset_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(data->reg_base))
@@ -114,9 +163,11 @@ static int meson_reset_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ spin_lock_init(&data->lock);
+
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = REG_COUNT * BITS_PER_REG;
- data->rcdev.ops = &meson_reset_ops;
+ data->rcdev.ops = ops;
data->rcdev.of_node = pdev->dev.of_node;
return devm_reset_controller_register(&pdev->dev, &data->rcdev);
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
new file mode 100644
index 000000000000..2d4f362ef025
--- /dev/null
+++ b/drivers/reset/reset-simple.c
@@ -0,0 +1,186 @@
+/*
+ * Simple Reset Controller Driver
+ *
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ *
+ * Based on Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#include "reset-simple.h"
+
+static inline struct reset_simple_data *
+to_reset_simple_data(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct reset_simple_data, rcdev);
+}
+
+static int reset_simple_update(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct reset_simple_data *data = to_reset_simple_data(rcdev);
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * reg_width));
+ if (assert ^ data->active_low)
+ reg |= BIT(offset);
+ else
+ reg &= ~BIT(offset);
+ writel(reg, data->membase + (bank * reg_width));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int reset_simple_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return reset_simple_update(rcdev, id, true);
+}
+
+static int reset_simple_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return reset_simple_update(rcdev, id, false);
+}
+
+static int reset_simple_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct reset_simple_data *data = to_reset_simple_data(rcdev);
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
+ u32 reg;
+
+ reg = readl(data->membase + (bank * reg_width));
+
+ return !(reg & BIT(offset)) ^ !data->status_active_low;
+}
+
+const struct reset_control_ops reset_simple_ops = {
+ .assert = reset_simple_assert,
+ .deassert = reset_simple_deassert,
+ .status = reset_simple_status,
+};
+
+/**
+ * struct reset_simple_devdata - simple reset controller properties
+ * @reg_offset: offset between base address and first reset register.
+ * @nr_resets: number of resets. If not set, default to resource size in bits.
+ * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits
+ * are set to assert the reset.
+ * @status_active_low: if true, bits read back as cleared while the reset is
+ * asserted. Otherwise, bits read back as set while the
+ * reset is asserted.
+ */
+struct reset_simple_devdata {
+ u32 reg_offset;
+ u32 nr_resets;
+ bool active_low;
+ bool status_active_low;
+};
+
+#define SOCFPGA_NR_BANKS 8
+
+static const struct reset_simple_devdata reset_simple_socfpga = {
+ .reg_offset = 0x10,
+ .nr_resets = SOCFPGA_NR_BANKS * 32,
+ .status_active_low = true,
+};
+
+static const struct reset_simple_devdata reset_simple_active_low = {
+ .active_low = true,
+ .status_active_low = true,
+};
+
+static const struct of_device_id reset_simple_dt_ids[] = {
+ { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga },
+ { .compatible = "st,stm32-rcc", },
+ { .compatible = "allwinner,sun6i-a31-clock-reset",
+ .data = &reset_simple_active_low },
+ { .compatible = "zte,zx296718-reset",
+ .data = &reset_simple_active_low },
+ { /* sentinel */ },
+};
+
+static int reset_simple_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct reset_simple_devdata *devdata;
+ struct reset_simple_data *data;
+ void __iomem *membase;
+ struct resource *res;
+ u32 reg_offset = 0;
+
+ devdata = of_device_get_match_data(dev);
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ membase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(membase))
+ return PTR_ERR(membase);
+
+ spin_lock_init(&data->lock);
+ data->membase = membase;
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
+ data->rcdev.ops = &reset_simple_ops;
+ data->rcdev.of_node = dev->of_node;
+
+ if (devdata) {
+ reg_offset = devdata->reg_offset;
+ if (devdata->nr_resets)
+ data->rcdev.nr_resets = devdata->nr_resets;
+ data->active_low = devdata->active_low;
+ data->status_active_low = devdata->status_active_low;
+ }
+
+ if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") &&
+ of_property_read_u32(dev->of_node, "altr,modrst-offset",
+ &reg_offset)) {
+ dev_warn(dev,
+ "missing altr,modrst-offset property, assuming 0x%x!\n",
+ reg_offset);
+ }
+
+ data->membase += reg_offset;
+
+ return devm_reset_controller_register(dev, &data->rcdev);
+}
+
+static struct platform_driver reset_simple_driver = {
+ .probe = reset_simple_probe,
+ .driver = {
+ .name = "simple-reset",
+ .of_match_table = reset_simple_dt_ids,
+ },
+};
+builtin_platform_driver(reset_simple_driver);
diff --git a/drivers/reset/reset-simple.h b/drivers/reset/reset-simple.h
new file mode 100644
index 000000000000..8a496022baef
--- /dev/null
+++ b/drivers/reset/reset-simple.h
@@ -0,0 +1,45 @@
+/*
+ * Simple Reset Controller ops
+ *
+ * Based on Allwinner SoCs Reset Controller driver
+ *
+ * Copyright 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RESET_SIMPLE_H__
+#define __RESET_SIMPLE_H__
+
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct reset_simple_data - driver data for simple reset controllers
+ * @lock: spinlock to protect registers during read-modify-write cycles
+ * @membase: memory mapped I/O register range
+ * @rcdev: reset controller device base structure
+ * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits
+ * are set to assert the reset. Note that this says nothing about
+ * the voltage level of the actual reset line.
+ * @status_active_low: if true, bits read back as cleared while the reset is
+ * asserted. Otherwise, bits read back as set while the
+ * reset is asserted.
+ */
+struct reset_simple_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+ bool active_low;
+ bool status_active_low;
+};
+
+extern const struct reset_control_ops reset_simple_ops;
+
+#endif /* __RESET_SIMPLE_H__ */
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
deleted file mode 100644
index 3907bbc9c6cf..000000000000
--- a/drivers/reset/reset-socfpga.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Socfpga Reset Controller Driver
- *
- * Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
- *
- * based on
- * Allwinner SoCs Reset Controller driver
- *
- * Copyright 2013 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/reset-controller.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#define BANK_INCREMENT 4
-#define NR_BANKS 8
-
-struct socfpga_reset_data {
- spinlock_t lock;
- void __iomem *membase;
- struct reset_controller_dev rcdev;
-};
-
-static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct socfpga_reset_data *data = container_of(rcdev,
- struct socfpga_reset_data,
- rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * BANK_INCREMENT));
- writel(reg | BIT(offset), data->membase + (bank * BANK_INCREMENT));
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct socfpga_reset_data *data = container_of(rcdev,
- struct socfpga_reset_data,
- rcdev);
-
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * BANK_INCREMENT));
- writel(reg & ~BIT(offset), data->membase + (bank * BANK_INCREMENT));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static int socfpga_reset_status(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct socfpga_reset_data *data = container_of(rcdev,
- struct socfpga_reset_data, rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- u32 reg;
-
- reg = readl(data->membase + (bank * BANK_INCREMENT));
-
- return !(reg & BIT(offset));
-}
-
-static const struct reset_control_ops socfpga_reset_ops = {
- .assert = socfpga_reset_assert,
- .deassert = socfpga_reset_deassert,
- .status = socfpga_reset_status,
-};
-
-static int socfpga_reset_probe(struct platform_device *pdev)
-{
- struct socfpga_reset_data *data;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- u32 modrst_offset;
-
- /*
- * The binding was mainlined without the required property.
- * Do not continue, when we encounter an old DT.
- */
- if (!of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
- dev_err(&pdev->dev, "%pOF missing #reset-cells property\n",
- pdev->dev.of_node);
- return -EINVAL;
- }
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(data->membase))
- return PTR_ERR(data->membase);
-
- if (of_property_read_u32(np, "altr,modrst-offset", &modrst_offset)) {
- dev_warn(dev, "missing altr,modrst-offset property, assuming 0x10!\n");
- modrst_offset = 0x10;
- }
- data->membase += modrst_offset;
-
- spin_lock_init(&data->lock);
-
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE);
- data->rcdev.ops = &socfpga_reset_ops;
- data->rcdev.of_node = pdev->dev.of_node;
-
- return devm_reset_controller_register(dev, &data->rcdev);
-}
-
-static const struct of_device_id socfpga_reset_dt_ids[] = {
- { .compatible = "altr,rst-mgr", },
- { /* sentinel */ },
-};
-
-static struct platform_driver socfpga_reset_driver = {
- .probe = socfpga_reset_probe,
- .driver = {
- .name = "socfpga-reset",
- .of_match_table = socfpga_reset_dt_ids,
- },
-};
-builtin_platform_driver(socfpga_reset_driver);
diff --git a/drivers/reset/reset-stm32.c b/drivers/reset/reset-stm32.c
deleted file mode 100644
index 3a7c8527e66a..000000000000
--- a/drivers/reset/reset-stm32.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) Maxime Coquelin 2015
- * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
- * License terms: GNU General Public License (GPL), version 2
- *
- * Heavily based on sunxi driver from Maxime Ripard.
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/reset-controller.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-struct stm32_reset_data {
- spinlock_t lock;
- void __iomem *membase;
- struct reset_controller_dev rcdev;
-};
-
-static int stm32_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct stm32_reset_data *data = container_of(rcdev,
- struct stm32_reset_data,
- rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * 4));
- writel(reg | BIT(offset), data->membase + (bank * 4));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct stm32_reset_data *data = container_of(rcdev,
- struct stm32_reset_data,
- rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * 4));
- writel(reg & ~BIT(offset), data->membase + (bank * 4));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static const struct reset_control_ops stm32_reset_ops = {
- .assert = stm32_reset_assert,
- .deassert = stm32_reset_deassert,
-};
-
-static const struct of_device_id stm32_reset_dt_ids[] = {
- { .compatible = "st,stm32-rcc", },
- { /* sentinel */ },
-};
-
-static int stm32_reset_probe(struct platform_device *pdev)
-{
- struct stm32_reset_data *data;
- struct resource *res;
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(data->membase))
- return PTR_ERR(data->membase);
-
- spin_lock_init(&data->lock);
-
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = resource_size(res) * 8;
- data->rcdev.ops = &stm32_reset_ops;
- data->rcdev.of_node = pdev->dev.of_node;
-
- return devm_reset_controller_register(&pdev->dev, &data->rcdev);
-}
-
-static struct platform_driver stm32_reset_driver = {
- .probe = stm32_reset_probe,
- .driver = {
- .name = "stm32-rcc-reset",
- .of_match_table = stm32_reset_dt_ids,
- },
-};
-builtin_platform_driver(stm32_reset_driver);
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 2c7dd1fd08df..db9a1a75523f 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -22,64 +22,11 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-struct sunxi_reset_data {
- spinlock_t lock;
- void __iomem *membase;
- struct reset_controller_dev rcdev;
-};
-
-static int sunxi_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct sunxi_reset_data *data = container_of(rcdev,
- struct sunxi_reset_data,
- rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * reg_width));
- writel(reg & ~BIT(offset), data->membase + (bank * reg_width));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static int sunxi_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct sunxi_reset_data *data = container_of(rcdev,
- struct sunxi_reset_data,
- rcdev);
- int reg_width = sizeof(u32);
- int bank = id / (reg_width * BITS_PER_BYTE);
- int offset = id % (reg_width * BITS_PER_BYTE);
- unsigned long flags;
- u32 reg;
-
- spin_lock_irqsave(&data->lock, flags);
-
- reg = readl(data->membase + (bank * reg_width));
- writel(reg | BIT(offset), data->membase + (bank * reg_width));
-
- spin_unlock_irqrestore(&data->lock, flags);
-
- return 0;
-}
-
-static const struct reset_control_ops sunxi_reset_ops = {
- .assert = sunxi_reset_assert,
- .deassert = sunxi_reset_deassert,
-};
+#include "reset-simple.h"
static int sunxi_reset_init(struct device_node *np)
{
- struct sunxi_reset_data *data;
+ struct reset_simple_data *data;
struct resource res;
resource_size_t size;
int ret;
@@ -108,8 +55,9 @@ static int sunxi_reset_init(struct device_node *np)
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = size * 8;
- data->rcdev.ops = &sunxi_reset_ops;
+ data->rcdev.ops = &reset_simple_ops;
data->rcdev.of_node = np;
+ data->active_low = true;
return reset_controller_register(&data->rcdev);
@@ -122,6 +70,8 @@ err_alloc:
* These are the reset controller we need to initialize early on in
* our system, before we can even think of using a regular device
* driver for it.
+ * The controllers that we can register through the regular device
+ * model are handled by the simple reset driver directly.
*/
static const struct of_device_id sunxi_early_reset_dt_ids[] __initconst = {
{ .compatible = "allwinner,sun6i-a31-ahb1-reset", },
@@ -135,45 +85,3 @@ void __init sun6i_reset_init(void)
for_each_matching_node(np, sunxi_early_reset_dt_ids)
sunxi_reset_init(np);
}
-
-/*
- * And these are the controllers we can register through the regular
- * device model.
- */
-static const struct of_device_id sunxi_reset_dt_ids[] = {
- { .compatible = "allwinner,sun6i-a31-clock-reset", },
- { /* sentinel */ },
-};
-
-static int sunxi_reset_probe(struct platform_device *pdev)
-{
- struct sunxi_reset_data *data;
- struct resource *res;
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(data->membase))
- return PTR_ERR(data->membase);
-
- spin_lock_init(&data->lock);
-
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = resource_size(res) * 8;
- data->rcdev.ops = &sunxi_reset_ops;
- data->rcdev.of_node = pdev->dev.of_node;
-
- return devm_reset_controller_register(&pdev->dev, &data->rcdev);
-}
-
-static struct platform_driver sunxi_reset_driver = {
- .probe = sunxi_reset_probe,
- .driver = {
- .name = "sunxi-reset",
- .of_match_table = sunxi_reset_dt_ids,
- },
-};
-builtin_platform_driver(sunxi_reset_driver);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index bda2dd196ae5..e8bb023ff15e 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -58,6 +58,7 @@ static const struct uniphier_reset_data uniphier_ld4_sys_reset_data[] = {
static const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */
+ UNIPHIER_RESETX(6, 0x2000, 12), /* Ether */
UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, MIO, RLE) */
UNIPHIER_RESETX(12, 0x2000, 6), /* GIO (Ether, SATA, USB3) */
UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */
@@ -76,6 +77,7 @@ static const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = {
static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x2000, 2), /* NAND */
+ UNIPHIER_RESETX(6, 0x2000, 12), /* Ether */
UNIPHIER_RESETX(8, 0x2000, 10), /* STDMAC (HSC, RLE) */
UNIPHIER_RESETX(14, 0x2000, 17), /* USB30 */
UNIPHIER_RESETX(15, 0x2004, 17), /* USB31 */
@@ -92,6 +94,7 @@ static const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */
UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
+ UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */
UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC, MIO) */
UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */
UNIPHIER_RESETX(41, 0x2008, 1), /* EVEA */
@@ -102,6 +105,7 @@ static const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */
UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
+ UNIPHIER_RESETX(6, 0x200c, 6), /* Ether */
UNIPHIER_RESETX(8, 0x200c, 8), /* STDMAC (HSC) */
UNIPHIER_RESETX(12, 0x200c, 5), /* GIO (PCIe, USB3) */
UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
@@ -114,6 +118,20 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
UNIPHIER_RESET_END,
};
+static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = {
+ UNIPHIER_RESETX(2, 0x200c, 0), /* NAND */
+ UNIPHIER_RESETX(4, 0x200c, 2), /* eMMC */
+ UNIPHIER_RESETX(8, 0x200c, 12), /* STDMAC */
+ UNIPHIER_RESETX(12, 0x200c, 4), /* USB30 link (GIO0) */
+ UNIPHIER_RESETX(13, 0x200c, 5), /* USB31 link (GIO1) */
+ UNIPHIER_RESETX(16, 0x200c, 16), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x200c, 18), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x200c, 20), /* USB30-PHY2 */
+ UNIPHIER_RESETX(20, 0x200c, 17), /* USB31-PHY0 */
+ UNIPHIER_RESETX(21, 0x200c, 19), /* USB31-PHY1 */
+ UNIPHIER_RESET_END,
+};
+
/* Media I/O reset data */
#define UNIPHIER_MIO_RESET_SD(id, ch) \
UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 0)
@@ -359,6 +377,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-ld20-reset",
.data = uniphier_ld20_sys_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-pxs3-reset",
+ .data = uniphier_pxs3_sys_reset_data,
+ },
/* Media I/O reset, SD reset */
{
.compatible = "socionext,uniphier-ld4-mio-reset",
@@ -392,6 +414,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-ld20-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-pxs3-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
+ },
/* Peripheral reset */
{
.compatible = "socionext,uniphier-ld4-peri-reset",
@@ -421,6 +447,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-ld20-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-pxs3-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
/* Analog signal amplifiers reset */
{
.compatible = "socionext,uniphier-ld11-adamv-reset",
diff --git a/drivers/reset/reset-zx2967.c b/drivers/reset/reset-zx2967.c
deleted file mode 100644
index 4f319f7753d4..000000000000
--- a/drivers/reset/reset-zx2967.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * ZTE's zx2967 family reset controller driver
- *
- * Copyright (C) 2017 ZTE Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/reset-controller.h>
-
-struct zx2967_reset {
- void __iomem *reg_base;
- spinlock_t lock;
- struct reset_controller_dev rcdev;
-};
-
-static int zx2967_reset_act(struct reset_controller_dev *rcdev,
- unsigned long id, bool assert)
-{
- struct zx2967_reset *reset = NULL;
- int bank = id / 32;
- int offset = id % 32;
- u32 reg;
- unsigned long flags;
-
- reset = container_of(rcdev, struct zx2967_reset, rcdev);
-
- spin_lock_irqsave(&reset->lock, flags);
-
- reg = readl_relaxed(reset->reg_base + (bank * 4));
- if (assert)
- reg &= ~BIT(offset);
- else
- reg |= BIT(offset);
- writel_relaxed(reg, reset->reg_base + (bank * 4));
-
- spin_unlock_irqrestore(&reset->lock, flags);
-
- return 0;
-}
-
-static int zx2967_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return zx2967_reset_act(rcdev, id, true);
-}
-
-static int zx2967_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- return zx2967_reset_act(rcdev, id, false);
-}
-
-static const struct reset_control_ops zx2967_reset_ops = {
- .assert = zx2967_reset_assert,
- .deassert = zx2967_reset_deassert,
-};
-
-static int zx2967_reset_probe(struct platform_device *pdev)
-{
- struct zx2967_reset *reset;
- struct resource *res;
-
- reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL);
- if (!reset)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reset->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(reset->reg_base))
- return PTR_ERR(reset->reg_base);
-
- spin_lock_init(&reset->lock);
-
- reset->rcdev.owner = THIS_MODULE;
- reset->rcdev.nr_resets = resource_size(res) * 8;
- reset->rcdev.ops = &zx2967_reset_ops;
- reset->rcdev.of_node = pdev->dev.of_node;
-
- return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
-}
-
-static const struct of_device_id zx2967_reset_dt_ids[] = {
- { .compatible = "zte,zx296718-reset", },
- {},
-};
-
-static struct platform_driver zx2967_reset_driver = {
- .probe = zx2967_reset_probe,
- .driver = {
- .name = "zx2967-reset",
- .of_match_table = zx2967_reset_dt_ids,
- },
-};
-builtin_platform_driver(zx2967_reset_driver);
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index 0fe6eac46512..65a9f6b892f0 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -47,7 +47,8 @@ config RPMSG_QCOM_SMD
platforms.
config RPMSG_VIRTIO
- tristate
+ tristate "Virtio RPMSG bus driver"
+ depends on HAS_DMA
select RPMSG
select VIRTIO
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index c71f4ab1ae17..9aa859502d27 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RPMSG) += rpmsg_core.o
obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 5dcc9bf1c5bc..40d76d2a5eff 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -227,6 +227,7 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
init_completion(&channel->open_req);
init_completion(&channel->open_ack);
+ init_completion(&channel->intent_req_comp);
INIT_LIST_HEAD(&channel->done_intents);
INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);
@@ -1148,19 +1149,38 @@ static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
{
struct glink_channel *channel = to_glink_channel(rpdev->ept);
- struct glink_core_rx_intent *intent;
+ struct device_node *np = rpdev->dev.of_node;
struct qcom_glink *glink = channel->glink;
- int num_intents = glink->intentless ? 0 : 5;
+ struct glink_core_rx_intent *intent;
+ const struct property *prop = NULL;
+ __be32 defaults[] = { cpu_to_be32(SZ_1K), cpu_to_be32(5) };
+ int num_intents;
+ int num_groups = 1;
+ __be32 *val = defaults;
+ int size;
+
+ if (glink->intentless)
+ return 0;
+
+ prop = of_find_property(np, "qcom,intents", NULL);
+ if (prop) {
+ val = prop->value;
+ num_groups = prop->length / sizeof(u32) / 2;
+ }
/* Channel is now open, advertise base set of intents */
- while (num_intents--) {
- intent = qcom_glink_alloc_intent(glink, channel, SZ_1K, true);
- if (!intent)
- break;
+ while (num_groups--) {
+ size = be32_to_cpup(val++);
+ num_intents = be32_to_cpup(val++);
+ while (num_intents--) {
+ intent = qcom_glink_alloc_intent(glink, channel, size,
+ true);
+ if (!intent)
+ break;
- qcom_glink_advertise_intent(glink, channel, intent);
+ qcom_glink_advertise_intent(glink, channel, intent);
+ }
}
-
return 0;
}
@@ -1237,11 +1257,16 @@ static int __qcom_glink_send(struct glink_channel *channel,
spin_lock_irqsave(&channel->intent_lock, flags);
idr_for_each_entry(&channel->riids, tmp, iid) {
if (tmp->size >= len && !tmp->in_use) {
- tmp->in_use = true;
- intent = tmp;
- break;
+ if (!intent)
+ intent = tmp;
+ else if (intent->size > tmp->size)
+ intent = tmp;
+ if (intent->size == len)
+ break;
}
}
+ if (intent)
+ intent->in_use = true;
spin_unlock_irqrestore(&channel->intent_lock, flags);
/* We found an available intent */
@@ -1551,6 +1576,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
idr_init(&glink->rcids);
glink->mbox_client.dev = dev;
+ glink->mbox_client.knows_txdone = true;
glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
if (IS_ERR(glink->mbox_chan)) {
if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
@@ -1616,3 +1642,6 @@ void qcom_glink_native_unregister(struct qcom_glink *glink)
device_unregister(glink->dev);
}
EXPORT_SYMBOL_GPL(qcom_glink_native_unregister);
+
+MODULE_DESCRIPTION("Qualcomm GLINK driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e0e58f3b1420..b59a31b079a5 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -433,6 +433,19 @@ config RTC_DRV_PCF85063
This driver can also be built as a module. If so, the module
will be called rtc-pcf85063.
+config RTC_DRV_PCF85363
+ tristate "NXP PCF85363"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the PCF85363 RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf85363.
+
+ The nvmem interface will be named pcf85363-#, where # is the
+ zero-based instance number.
+
config RTC_DRV_PCF8563
tristate "Philips PCF8563/Epson RTC8564"
help
@@ -1174,6 +1187,17 @@ config RTC_DRV_WM8350
This driver can also be built as a module. If so, the module
will be called "rtc-wm8350".
+config RTC_DRV_SC27XX
+ tristate "Spreadtrum SC27xx RTC"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ If you say Y here you will get support for the RTC subsystem
+ of the Spreadtrum SC27xx series PMICs. The SC27xx series PMICs
+ includes the SC2720, SC2721, SC2723, SC2730 and SC2731 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sc27xx.
+
config RTC_DRV_SPEAR
tristate "SPEAR ST RTC"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -1706,14 +1730,24 @@ config RTC_DRV_MOXART
will be called rtc-moxart
config RTC_DRV_MT6397
- tristate "Mediatek Real Time Clock driver"
+ tristate "MediaTek PMIC based RTC"
depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
help
- This selects the Mediatek(R) RTC driver. RTC is part of Mediatek
+ This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
- Mediatek(R) RTC driver.
+ MediaTek(R) RTC driver.
+
+ If you want to use MediaTek(R) RTC interface, select Y or M here.
- If you want to use Mediatek(R) RTC interface, select Y or M here.
+config RTC_DRV_MT7622
+ tristate "MediaTek SoC based RTC"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This enables support for the real time clock built in the MediaTek
+ SoCs.
+
+ This drive can also be built as a module. If so, the module
+ will be called rtc-mt7622.
config RTC_DRV_XGENE
tristate "APM X-Gene RTC"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 7230014c92af..f2f50c11dc38 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for RTC class/drivers.
#
@@ -102,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o
+obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
@@ -113,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o
obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
+obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
@@ -143,6 +146,7 @@ obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
+obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 2ed970d61da1..722d683e0b0f 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -161,6 +161,9 @@ static struct rtc_device *rtc_allocate_device(void)
device_initialize(&rtc->dev);
+ /* Drivers can revise this default after allocating the device. */
+ rtc->set_offset_nsec = NSEC_PER_SEC / 2;
+
rtc->irq_freq = 1;
rtc->max_user_freq = 64;
rtc->dev.class = rtc_class;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8cec9a02c0b8..672b192f8153 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -779,7 +779,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
}
timerqueue_add(&rtc->timerqueue, &timer->node);
- if (!next) {
+ if (!next || ktime_before(timer->node.expires, next->expires)) {
struct rtc_wkalrm alarm;
int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
@@ -1004,6 +1004,10 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset)
* to compensate for differences in the actual clock rate due to temperature,
* the crystal, capacitor, etc.
*
+ * The adjustment applied is as follows:
+ * t = t0 * (1 + offset * 1e-9)
+ * where t0 is the measured length of 1 RTC second with offset = 0
+ *
* Kernel interface to adjust an rtc clock offset.
* Return 0 on success, or a negative number on error.
* If the rtc offset is not setable (or not implemented), return -EINVAL
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index fea9a60b06cf..b033bc556f5d 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -614,12 +614,12 @@ static int abx80x_probe(struct i2c_client *client,
if (err)
return err;
- rtc = devm_rtc_device_register(&client->dev, "abx8xx",
- &abx80x_rtc_ops, THIS_MODULE);
-
+ rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ rtc->ops = &abx80x_rtc_ops;
+
i2c_set_clientdata(client, rtc);
if (client->irq > 0) {
@@ -646,10 +646,14 @@ static int abx80x_probe(struct i2c_client *client,
err = devm_add_action_or_reset(&client->dev,
rtc_calib_remove_sysfs_group,
&client->dev);
- if (err)
+ if (err) {
dev_err(&client->dev,
"Failed to add sysfs cleanup action: %d\n",
err);
+ return err;
+ }
+
+ err = rtc_register_device(rtc);
return err;
}
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 21f355c37eab..1e4978c96ffd 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -28,6 +28,8 @@
#define RTC_IRQ_AL_EN BIT(0)
#define RTC_IRQ_FREQ_EN BIT(1)
#define RTC_IRQ_FREQ_1HZ BIT(2)
+#define RTC_CCR 0x18
+#define RTC_CCR_MODE BIT(15)
#define RTC_TIME 0xC
#define RTC_ALARM1 0x10
@@ -343,18 +345,117 @@ static irqreturn_t armada38x_rtc_alarm_irq(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * The information given in the Armada 388 functional spec is complex.
+ * They give two different formulas for calculating the offset value,
+ * but when considering "Offset" as an 8-bit signed integer, they both
+ * reduce down to (we shall rename "Offset" as "val" here):
+ *
+ * val = (f_ideal / f_measured - 1) / resolution where f_ideal = 32768
+ *
+ * Converting to time, f = 1/t:
+ * val = (t_measured / t_ideal - 1) / resolution where t_ideal = 1/32768
+ *
+ * => t_measured / t_ideal = val * resolution + 1
+ *
+ * "offset" in the RTC interface is defined as:
+ * t = t0 * (1 + offset * 1e-9)
+ * where t is the desired period, t0 is the measured period with a zero
+ * offset, which is t_measured above. With t0 = t_measured and t = t_ideal,
+ * offset = (t_ideal / t_measured - 1) / 1e-9
+ *
+ * => t_ideal / t_measured = offset * 1e-9 + 1
+ *
+ * so:
+ *
+ * offset * 1e-9 + 1 = 1 / (val * resolution + 1)
+ *
+ * We want "resolution" to be an integer, so resolution = R * 1e-9, giving
+ * offset = 1e18 / (val * R + 1e9) - 1e9
+ * val = (1e18 / (offset + 1e9) - 1e9) / R
+ * with a common transformation:
+ * f(x) = 1e18 / (x + 1e9) - 1e9
+ * offset = f(val * R)
+ * val = f(offset) / R
+ *
+ * Armada 38x supports two modes, fine mode (954ppb) and coarse mode (3815ppb).
+ */
+static long armada38x_ppb_convert(long ppb)
+{
+ long div = ppb + 1000000000L;
+
+ return div_s64(1000000000000000000LL + div / 2, div) - 1000000000L;
+}
+
+static int armada38x_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long ccr, flags;
+ long ppb_cor;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+ ccr = rtc->data->read_rtc_reg(rtc, RTC_CCR);
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ ppb_cor = (ccr & RTC_CCR_MODE ? 3815 : 954) * (s8)ccr;
+ /* ppb_cor + 1000000000L can never be zero */
+ *offset = armada38x_ppb_convert(ppb_cor);
+
+ return 0;
+}
+
+static int armada38x_rtc_set_offset(struct device *dev, long offset)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long ccr = 0;
+ long ppb_cor, off;
+
+ /*
+ * The maximum ppb_cor is -128 * 3815 .. 127 * 3815, but we
+ * need to clamp the input. This equates to -484270 .. 488558.
+ * Not only is this to stop out of range "off" but also to
+ * avoid the division by zero in armada38x_ppb_convert().
+ */
+ offset = clamp(offset, -484270L, 488558L);
+
+ ppb_cor = armada38x_ppb_convert(offset);
+
+ /*
+ * Use low update mode where possible, which gives a better
+ * resolution of correction.
+ */
+ off = DIV_ROUND_CLOSEST(ppb_cor, 954);
+ if (off > 127 || off < -128) {
+ ccr = RTC_CCR_MODE;
+ off = DIV_ROUND_CLOSEST(ppb_cor, 3815);
+ }
+
+ /*
+ * Armada 388 requires a bit pattern in bits 14..8 depending on
+ * the sign bit: { 0, ~S, S, S, S, S, S }
+ */
+ ccr |= (off & 0x3fff) ^ 0x2000;
+ rtc_delayed_write(ccr, rtc, RTC_CCR);
+
+ return 0;
+}
+
static const struct rtc_class_ops armada38x_rtc_ops = {
.read_time = armada38x_rtc_read_time,
.set_time = armada38x_rtc_set_time,
.read_alarm = armada38x_rtc_read_alarm,
.set_alarm = armada38x_rtc_set_alarm,
.alarm_irq_enable = armada38x_rtc_alarm_irq_enable,
+ .read_offset = armada38x_rtc_read_offset,
+ .set_offset = armada38x_rtc_set_offset,
};
static const struct rtc_class_ops armada38x_rtc_ops_noirq = {
.read_time = armada38x_rtc_read_time,
.set_time = armada38x_rtc_set_time,
.read_alarm = armada38x_rtc_read_alarm,
+ .read_offset = armada38x_rtc_read_offset,
+ .set_offset = armada38x_rtc_set_offset,
};
static const struct armada38x_rtc_data armada38x_data = {
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index e221b78b6f10..de81ecedd571 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -42,8 +42,6 @@
#define at91_rtc_write(field, val) \
writel_relaxed((val), at91_rtc_regs + field)
-#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
-
struct at91_rtc_config {
bool use_shadow_imr;
};
@@ -51,7 +49,6 @@ struct at91_rtc_config {
static const struct at91_rtc_config *at91_rtc_config;
static DECLARE_COMPLETION(at91_rtc_updated);
static DECLARE_COMPLETION(at91_rtc_upd_rdy);
-static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
static void __iomem *at91_rtc_regs;
static int irq;
static DEFINE_SPINLOCK(at91_rtc_lock);
@@ -131,8 +128,7 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
/*
* The Calendar Alarm register does not have a field for
- * the year - so these will return an invalid value. When an
- * alarm is set, at91_alarm_year will store the current year.
+ * the year - so these will return an invalid value.
*/
tm->tm_year = bcd2bin(date & AT91_RTC_CENT) * 100; /* century */
tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8); /* year */
@@ -208,15 +204,14 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *tm = &alrm->time;
at91_rtc_decodetime(AT91_RTC_TIMALR, AT91_RTC_CALALR, tm);
- tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
- tm->tm_year = at91_alarm_year - 1900;
+ tm->tm_year = -1;
alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
? 1 : 0;
- dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
- 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ dev_dbg(dev, "%s(): %02d-%02d %02d:%02d:%02d %sabled\n", __func__,
+ tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec,
+ alrm->enabled ? "en" : "dis");
return 0;
}
@@ -230,8 +225,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
- at91_alarm_year = tm.tm_year;
-
tm.tm_mon = alrm->time.tm_mon;
tm.tm_mday = alrm->time.tm_mday;
tm.tm_hour = alrm->time.tm_hour;
@@ -255,7 +248,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
}
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
- at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
+ tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
return 0;
diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h
index ecab76a3207c..513b9bedd2c8 100644
--- a/drivers/rtc/rtc-core.h
+++ b/drivers/rtc/rtc-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_RTC_INTF_DEV
extern void __init rtc_dev_init(void);
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 00efe24a6063..215eac68ae2d 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -71,9 +71,9 @@ static void rtc_uie_task(struct work_struct *work)
if (num)
rtc_handle_legacy_irq(rtc, num, RTC_UF);
}
-static void rtc_uie_timer(unsigned long data)
+static void rtc_uie_timer(struct timer_list *t)
{
- struct rtc_device *rtc = (struct rtc_device *)data;
+ struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
unsigned long flags;
spin_lock_irqsave(&rtc->irq_lock, flags);
@@ -460,7 +460,7 @@ void rtc_dev_prepare(struct rtc_device *rtc)
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
INIT_WORK(&rtc->uie_task, rtc_uie_task);
- setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+ timer_setup(&rtc->uie_timer, rtc_uie_timer, 0);
#endif
cdev_init(&rtc->char_dev, &rtc_dev_fops);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 72b22935eb62..d8df2e9e14ad 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -514,56 +514,43 @@ static void msg_init(struct spi_message *m, struct spi_transfer *x,
spi_message_add_tail(x, m);
}
-static ssize_t
-ds1305_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1305_nvram_read(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct spi_device *spi;
+ struct ds1305 *ds1305 = priv;
+ struct spi_device *spi = ds1305->spi;
u8 addr;
struct spi_message m;
struct spi_transfer x[2];
- int status;
-
- spi = to_spi_device(kobj_to_dev(kobj));
addr = DS1305_NVRAM + off;
msg_init(&m, x, &addr, count, NULL, buf);
- status = spi_sync(spi, &m);
- if (status < 0)
- dev_err(&spi->dev, "nvram %s error %d\n", "read", status);
- return (status < 0) ? status : count;
+ return spi_sync(spi, &m);
}
-static ssize_t
-ds1305_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1305_nvram_write(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct spi_device *spi;
+ struct ds1305 *ds1305 = priv;
+ struct spi_device *spi = ds1305->spi;
u8 addr;
struct spi_message m;
struct spi_transfer x[2];
- int status;
-
- spi = to_spi_device(kobj_to_dev(kobj));
addr = (DS1305_WRITE | DS1305_NVRAM) + off;
msg_init(&m, x, &addr, count, buf, NULL);
- status = spi_sync(spi, &m);
- if (status < 0)
- dev_err(&spi->dev, "nvram %s error %d\n", "write", status);
- return (status < 0) ? status : count;
+ return spi_sync(spi, &m);
}
-static struct bin_attribute nvram = {
- .attr.name = "nvram",
- .attr.mode = S_IRUGO | S_IWUSR,
- .read = ds1305_nvram_read,
- .write = ds1305_nvram_write,
- .size = DS1305_NVRAM_LEN,
+static struct nvmem_config ds1305_nvmem_cfg = {
+ .name = "ds1305_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = DS1305_NVRAM_LEN,
+ .reg_read = ds1305_nvram_read,
+ .reg_write = ds1305_nvram_write,
};
/*----------------------------------------------------------------------*/
@@ -708,10 +695,19 @@ static int ds1305_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "AM/PM\n");
/* register RTC ... from here on, ds1305->ctrl needs locking */
- ds1305->rtc = devm_rtc_device_register(&spi->dev, "ds1305",
- &ds1305_ops, THIS_MODULE);
+ ds1305->rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(ds1305->rtc)) {
- status = PTR_ERR(ds1305->rtc);
+ return PTR_ERR(ds1305->rtc);
+ }
+
+ ds1305->rtc->ops = &ds1305_ops;
+
+ ds1305_nvmem_cfg.priv = ds1305;
+ ds1305->rtc->nvmem_config = &ds1305_nvmem_cfg;
+ ds1305->rtc->nvram_old_abi = true;
+
+ status = rtc_register_device(ds1305->rtc);
+ if (status) {
dev_dbg(&spi->dev, "register rtc --> %d\n", status);
return status;
}
@@ -734,12 +730,6 @@ static int ds1305_probe(struct spi_device *spi)
}
}
- /* export NVRAM */
- status = sysfs_create_bin_file(&spi->dev.kobj, &nvram);
- if (status < 0) {
- dev_err(&spi->dev, "register nvram --> %d\n", status);
- }
-
return 0;
}
@@ -747,8 +737,6 @@ static int ds1305_remove(struct spi_device *spi)
{
struct ds1305 *ds1305 = spi_get_drvdata(spi);
- sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
-
/* carefully shut down irq and workqueue, if present */
if (spi->irq) {
set_bit(FLAG_EXITING, &ds1305->flags);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index e7d9215c9201..923dde912f60 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -325,6 +325,10 @@ static const struct of_device_id ds1307_of_match[] = {
.compatible = "isil,isl12057",
.data = (void *)ds_1337
},
+ {
+ .compatible = "epson,rx8130",
+ .data = (void *)rx_8130
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ds1307_of_match);
@@ -348,6 +352,7 @@ static const struct acpi_device_id ds1307_acpi_ids[] = {
{ .id = "PT7C4338", .driver_data = ds_1307 },
{ .id = "RX8025", .driver_data = rx_8025 },
{ .id = "ISL12057", .driver_data = ds_1337 },
+ { .id = "RX8130", .driver_data = rx_8130 },
{ }
};
MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
@@ -787,8 +792,6 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
* Alarm support for mcp794xx devices.
*/
-#define MCP794XX_REG_WEEKDAY 0x3
-#define MCP794XX_REG_WEEKDAY_WDAY_MASK 0x7
#define MCP794XX_REG_CONTROL 0x07
# define MCP794XX_BIT_ALM0_EN 0x10
# define MCP794XX_BIT_ALM1_EN 0x20
@@ -877,15 +880,38 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+/*
+ * We may have a random RTC weekday, therefore calculate alarm weekday based
+ * on current weekday we read from the RTC timekeeping regs
+ */
+static int mcp794xx_alm_weekday(struct device *dev, struct rtc_time *tm_alarm)
+{
+ struct rtc_time tm_now;
+ int days_now, days_alarm, ret;
+
+ ret = ds1307_get_time(dev, &tm_now);
+ if (ret)
+ return ret;
+
+ days_now = div_s64(rtc_tm_to_time64(&tm_now), 24 * 60 * 60);
+ days_alarm = div_s64(rtc_tm_to_time64(tm_alarm), 24 * 60 * 60);
+
+ return (tm_now.tm_wday + days_alarm - days_now) % 7 + 1;
+}
+
static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned char regs[10];
- int ret;
+ int wday, ret;
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -EINVAL;
+ wday = mcp794xx_alm_weekday(dev, &t->time);
+ if (wday < 0)
+ return wday;
+
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
"enabled=%d pending=%d\n", __func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
@@ -902,7 +928,7 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
regs[3] = bin2bcd(t->time.tm_sec);
regs[4] = bin2bcd(t->time.tm_min);
regs[5] = bin2bcd(t->time.tm_hour);
- regs[6] = bin2bcd(t->time.tm_wday + 1);
+ regs[6] = wday;
regs[7] = bin2bcd(t->time.tm_mday);
regs[8] = bin2bcd(t->time.tm_mon + 1);
@@ -1354,14 +1380,12 @@ static int ds1307_probe(struct i2c_client *client,
{
struct ds1307 *ds1307;
int err = -ENODEV;
- int tmp, wday;
+ int tmp;
const struct chip_desc *chip;
bool want_irq;
bool ds1307_can_wakeup_device = false;
unsigned char regs[8];
struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
- struct rtc_time tm;
- unsigned long timestamp;
u8 trickle_charger_setup = 0;
ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL);
@@ -1641,25 +1665,6 @@ read_rtc:
bin2bcd(tmp));
}
- /*
- * Some IPs have weekday reset value = 0x1 which might not correct
- * hence compute the wday using the current date/month/year values
- */
- ds1307_get_time(ds1307->dev, &tm);
- wday = tm.tm_wday;
- timestamp = rtc_tm_to_time64(&tm);
- rtc_time64_to_tm(timestamp, &tm);
-
- /*
- * Check if reset wday is different from the computed wday
- * If different then set the wday which we computed using
- * timestamp
- */
- if (wday != tm.tm_wday)
- regmap_update_bits(ds1307->regmap, MCP794XX_REG_WEEKDAY,
- MCP794XX_REG_WEEKDAY_WDAY_MASK,
- tm.tm_wday + 1);
-
if (want_irq || ds1307_can_wakeup_device) {
device_set_wakeup_capable(ds1307->dev, true);
set_bit(HAS_ALARM, &ds1307->flags);
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index aa0d2c6f1edc..4d5b007d7fc6 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -216,9 +216,16 @@ static int ds1390_probe(struct spi_device *spi)
return res;
}
+static const struct of_device_id ds1390_of_match[] = {
+ { .compatible = "dallas,ds1390" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ds1390_of_match);
+
static struct spi_driver ds1390_driver = {
.driver = {
.name = "rtc-ds1390",
+ .of_match_table = of_match_ptr(ds1390_of_match),
},
.probe = ds1390_probe,
};
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 1b2dcb58c0ab..1e95312a6f2e 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -398,42 +398,37 @@ static const struct rtc_class_ops ds1511_rtc_ops = {
.alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
};
-static ssize_t
-ds1511_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *ba,
- char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_read(void *priv, unsigned int pos, void *buf,
+ size_t size)
{
- ssize_t count;
+ int i;
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; count < size; count++)
- *buf++ = rtc_read(DS1511_RAMDATA);
+ for (i = 0; i < size; i++)
+ *(char *)buf++ = rtc_read(DS1511_RAMDATA);
- return count;
+ return 0;
}
-static ssize_t
-ds1511_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
+ size_t size)
{
- ssize_t count;
+ int i;
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; count < size; count++)
- rtc_write(*buf++, DS1511_RAMDATA);
+ for (i = 0; i < size; i++)
+ rtc_write(*(char *)buf++, DS1511_RAMDATA);
- return count;
+ return 0;
}
-static struct bin_attribute ds1511_nvram_attr = {
- .attr = {
- .name = "nvram",
- .mode = S_IRUGO | S_IWUSR,
- },
+static struct nvmem_config ds1511_nvmem_cfg = {
+ .name = "ds1511_nvram",
+ .word_size = 1,
+ .stride = 1,
.size = DS1511_RAM_MAX,
- .read = ds1511_nvram_read,
- .write = ds1511_nvram_write,
+ .reg_read = ds1511_nvram_read,
+ .reg_write = ds1511_nvram_write,
};
static int ds1511_rtc_probe(struct platform_device *pdev)
@@ -477,11 +472,20 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
- pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &ds1511_rtc_ops, THIS_MODULE);
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(pdata->rtc))
return PTR_ERR(pdata->rtc);
+ pdata->rtc->ops = &ds1511_rtc_ops;
+
+ ds1511_nvmem_cfg.priv = &pdev->dev;
+ pdata->rtc->nvmem_config = &ds1511_nvmem_cfg;
+ pdata->rtc->nvram_old_abi = true;
+
+ ret = rtc_register_device(pdata->rtc);
+ if (ret)
+ return ret;
+
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
@@ -496,26 +500,6 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
}
}
- ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (ret)
- dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n",
- ds1511_nvram_attr.attr.name);
-
- return 0;
-}
-
-static int ds1511_rtc_remove(struct platform_device *pdev)
-{
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
- sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (pdata->irq > 0) {
- /*
- * disable the alarm interrupt
- */
- rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
- rtc_read(RTC_CMD1);
- }
return 0;
}
@@ -524,7 +508,6 @@ MODULE_ALIAS("platform:ds1511");
static struct platform_driver ds1511_rtc_driver = {
.probe = ds1511_rtc_probe,
- .remove = ds1511_rtc_remove,
.driver = {
.name = "ds1511",
},
diff --git a/drivers/rtc/rtc-efi-platform.c b/drivers/rtc/rtc-efi-platform.c
index 1a7f1d1bc174..6c037dc4e3dc 100644
--- a/drivers/rtc/rtc-efi-platform.c
+++ b/drivers/rtc/rtc-efi-platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Moved from arch/ia64/kernel/time.c
*
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 64989afffa3d..ff65a7d2b9c9 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -82,7 +82,7 @@ static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int timeout = 1000;
+ int timeout = 10000;
do {
ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
@@ -94,7 +94,7 @@ static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int ret, timeout = 1000;
+ int ret, timeout = 10000;
ret = jz4740_rtc_wait_write_ready(rtc);
if (ret != 0)
@@ -368,7 +368,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0);
if (ret) {
- dev_err(&pdev->dev, "Could not write write to RTC registers\n");
+ dev_err(&pdev->dev, "Could not write to RTC registers\n");
return ret;
}
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index f4c070ea8384..c90fba3ed861 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -154,6 +154,8 @@ struct m41t80_data {
struct rtc_device *rtc;
#ifdef CONFIG_COMMON_CLK
struct clk_hw sqw;
+ unsigned long freq;
+ unsigned int sqwe;
#endif
};
@@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
#ifdef CONFIG_COMMON_CLK
#define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw)
-static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long m41t80_decode_freq(int setting)
+{
+ return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ :
+ M41T80_SQW_MAX_FREQ >> setting;
+}
+
+static unsigned long m41t80_get_freq(struct m41t80_data *m41t80)
{
- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
struct i2c_client *client = m41t80->client;
int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
M41T80_REG_WDAY : M41T80_REG_SQW;
int ret = i2c_smbus_read_byte_data(client, reg_sqw);
- unsigned long val = M41T80_SQW_MAX_FREQ;
if (ret < 0)
return 0;
+ return m41t80_decode_freq(ret >> 4);
+}
- ret >>= 4;
- if (ret == 0)
- val = 0;
- else if (ret > 1)
- val = val / (1 << ret);
-
- return val;
+static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return sqw_to_m41t80_data(hw)->freq;
}
static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
- int i, freq = M41T80_SQW_MAX_FREQ;
-
- if (freq <= rate)
- return freq;
-
- for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) {
- freq /= 1 << i;
- if (freq <= rate)
- return freq;
- }
-
- return 0;
+ if (rate >= M41T80_SQW_MAX_FREQ)
+ return M41T80_SQW_MAX_FREQ;
+ if (rate >= M41T80_SQW_MAX_FREQ / 4)
+ return M41T80_SQW_MAX_FREQ / 4;
+ if (!rate)
+ return 0;
+ return 1 << ilog2(rate);
}
static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
M41T80_REG_WDAY : M41T80_REG_SQW;
int reg, ret, val = 0;
- if (rate) {
- if (!is_power_of_2(rate))
- return -EINVAL;
- val = ilog2(rate);
- if (val == ilog2(M41T80_SQW_MAX_FREQ))
- val = 1;
- else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1))
- val = ilog2(M41T80_SQW_MAX_FREQ) - val;
- else
- return -EINVAL;
- }
+ if (rate >= M41T80_SQW_MAX_FREQ)
+ val = 1;
+ else if (rate >= M41T80_SQW_MAX_FREQ / 4)
+ val = 2;
+ else if (rate)
+ val = 15 - ilog2(rate);
reg = i2c_smbus_read_byte_data(client, reg_sqw);
if (reg < 0)
@@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
reg = (reg & 0x0f) | (val << 4);
ret = i2c_smbus_write_byte_data(client, reg_sqw, reg);
- if (ret < 0)
- return ret;
-
- return -EINVAL;
+ if (!ret)
+ m41t80->freq = m41t80_decode_freq(val);
+ return ret;
}
static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
@@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
else
ret &= ~M41T80_ALMON_SQWE;
- return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+ ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+ if (!ret)
+ m41t80->sqwe = enable;
+ return ret;
}
static int m41t80_sqw_prepare(struct clk_hw *hw)
@@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw)
static int m41t80_sqw_is_prepared(struct clk_hw *hw)
{
- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
- struct i2c_client *client = m41t80->client;
- int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
-
- if (ret < 0)
- return ret;
-
- return !!(ret & M41T80_ALMON_SQWE);
+ return sqw_to_m41t80_data(hw)->sqwe;
}
static const struct clk_ops m41t80_sqw_ops = {
@@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
init.parent_names = NULL;
init.num_parents = 0;
m41t80->sqw.init = &init;
+ m41t80->freq = m41t80_get_freq(m41t80);
/* optional override of the clockname */
of_property_read_string(node, "clock-output-names", &init.name);
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 02af045305dd..d9aea9b6d9cd 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -163,35 +163,30 @@ static const struct rtc_class_ops m48t86_rtc_ops = {
.proc = m48t86_rtc_proc,
};
-static ssize_t m48t86_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int m48t86_nvram_read(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
+ struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
- buf[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
+ ((u8 *)buf)[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
- return count;
+ return 0;
}
-static ssize_t m48t86_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int m48t86_nvram_write(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
+ struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
- m48t86_writeb(dev, buf[i], M48T86_NVRAM(off + i));
+ m48t86_writeb(dev, ((u8 *)buf)[i], M48T86_NVRAM(off + i));
- return count;
+ return 0;
}
-static BIN_ATTR(nvram, 0644, m48t86_nvram_read, m48t86_nvram_write,
- M48T86_NVRAM_LEN);
-
/*
* The RTC is an optional feature at purchase time on some Technologic Systems
* boards. Verify that it actually exists by checking if the last two bytes
@@ -223,11 +218,21 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
return false;
}
+static struct nvmem_config m48t86_nvmem_cfg = {
+ .name = "m48t86_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = M48T86_NVRAM_LEN,
+ .reg_read = m48t86_nvram_read,
+ .reg_write = m48t86_nvram_write,
+};
+
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
struct resource *res;
unsigned char reg;
+ int err;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -254,25 +259,25 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
- info->rtc = devm_rtc_device_register(&pdev->dev, "m48t86",
- &m48t86_rtc_ops, THIS_MODULE);
+ info->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc))
return PTR_ERR(info->rtc);
+ info->rtc->ops = &m48t86_rtc_ops;
+
+ m48t86_nvmem_cfg.priv = &pdev->dev;
+ info->rtc->nvmem_config = &m48t86_nvmem_cfg;
+ info->rtc->nvram_old_abi = true;
+
+ err = rtc_register_device(info->rtc);
+ if (err)
+ return err;
+
/* read battery status */
reg = m48t86_readb(&pdev->dev, M48T86_D);
dev_info(&pdev->dev, "battery %s\n",
(reg & M48T86_D_VRT) ? "ok" : "exhausted");
- if (device_create_bin_file(&pdev->dev, &bin_attr_nvram))
- dev_err(&pdev->dev, "failed to create nvram sysfs entry\n");
-
- return 0;
-}
-
-static int m48t86_rtc_remove(struct platform_device *pdev)
-{
- device_remove_bin_file(&pdev->dev, &bin_attr_nvram);
return 0;
}
@@ -281,7 +286,6 @@ static struct platform_driver m48t86_rtc_platform_driver = {
.name = "rtc-m48t86",
},
.probe = m48t86_rtc_probe,
- .remove = m48t86_rtc_remove,
};
module_platform_driver(m48t86_rtc_platform_driver);
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
new file mode 100644
index 000000000000..d79b9ae4d237
--- /dev/null
+++ b/drivers/rtc/rtc-mt7622.c
@@ -0,0 +1,422 @@
+/*
+ * Driver for MediaTek SoC based RTC
+ *
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define MTK_RTC_DEV KBUILD_MODNAME
+
+#define MTK_RTC_PWRCHK1 0x4
+#define RTC_PWRCHK1_MAGIC 0xc6
+
+#define MTK_RTC_PWRCHK2 0x8
+#define RTC_PWRCHK2_MAGIC 0x9a
+
+#define MTK_RTC_KEY 0xc
+#define RTC_KEY_MAGIC 0x59
+
+#define MTK_RTC_PROT1 0x10
+#define RTC_PROT1_MAGIC 0xa3
+
+#define MTK_RTC_PROT2 0x14
+#define RTC_PROT2_MAGIC 0x57
+
+#define MTK_RTC_PROT3 0x18
+#define RTC_PROT3_MAGIC 0x67
+
+#define MTK_RTC_PROT4 0x1c
+#define RTC_PROT4_MAGIC 0xd2
+
+#define MTK_RTC_CTL 0x20
+#define RTC_RC_STOP BIT(0)
+
+#define MTK_RTC_DEBNCE 0x2c
+#define RTC_DEBNCE_MASK GENMASK(2, 0)
+
+#define MTK_RTC_INT 0x30
+#define RTC_INT_AL_STA BIT(4)
+
+/*
+ * Ranges from 0x40 to 0x78 provide RTC time setup for year, month,
+ * day of month, day of week, hour, minute and second.
+ */
+#define MTK_RTC_TREG(_t, _f) (0x40 + (0x4 * (_f)) + ((_t) * 0x20))
+
+#define MTK_RTC_AL_CTL 0x7c
+#define RTC_AL_EN BIT(0)
+#define RTC_AL_ALL GENMASK(7, 0)
+
+/*
+ * The offset is used in the translation for the year between in struct
+ * rtc_time and in hardware register MTK_RTC_TREG(x,MTK_YEA)
+ */
+#define MTK_RTC_TM_YR_OFFSET 100
+
+/*
+ * The lowest value for the valid tm_year. RTC hardware would take incorrectly
+ * tm_year 100 as not a leap year and thus it is also required being excluded
+ * from the valid options.
+ */
+#define MTK_RTC_TM_YR_L (MTK_RTC_TM_YR_OFFSET + 1)
+
+/*
+ * The most year the RTC can hold is 99 and the next to 99 in year register
+ * would be wraparound to 0, for MT7622.
+ */
+#define MTK_RTC_HW_YR_LIMIT 99
+
+/* The highest value for the valid tm_year */
+#define MTK_RTC_TM_YR_H (MTK_RTC_TM_YR_OFFSET + MTK_RTC_HW_YR_LIMIT)
+
+/* Simple macro helps to check whether the hardware supports the tm_year */
+#define MTK_RTC_TM_YR_VALID(_y) ((_y) >= MTK_RTC_TM_YR_L && \
+ (_y) <= MTK_RTC_TM_YR_H)
+
+/* Types of the function the RTC provides are time counter and alarm. */
+enum {
+ MTK_TC,
+ MTK_AL,
+};
+
+/* Indexes are used for the pointer to relevant registers in MTK_RTC_TREG */
+enum {
+ MTK_YEA,
+ MTK_MON,
+ MTK_DOM,
+ MTK_DOW,
+ MTK_HOU,
+ MTK_MIN,
+ MTK_SEC
+};
+
+struct mtk_rtc {
+ struct rtc_device *rtc;
+ void __iomem *base;
+ int irq;
+ struct clk *clk;
+};
+
+static void mtk_w32(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ writel_relaxed(val, rtc->base + reg);
+}
+
+static u32 mtk_r32(struct mtk_rtc *rtc, u32 reg)
+{
+ return readl_relaxed(rtc->base + reg);
+}
+
+static void mtk_rmw(struct mtk_rtc *rtc, u32 reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = mtk_r32(rtc, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(rtc, reg, val);
+}
+
+static void mtk_set(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ mtk_rmw(rtc, reg, 0, val);
+}
+
+static void mtk_clr(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ mtk_rmw(rtc, reg, val, 0);
+}
+
+static void mtk_rtc_hw_init(struct mtk_rtc *hw)
+{
+ /* The setup of the init sequence is for allowing RTC got to work */
+ mtk_w32(hw, MTK_RTC_PWRCHK1, RTC_PWRCHK1_MAGIC);
+ mtk_w32(hw, MTK_RTC_PWRCHK2, RTC_PWRCHK2_MAGIC);
+ mtk_w32(hw, MTK_RTC_KEY, RTC_KEY_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT1, RTC_PROT1_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT2, RTC_PROT2_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT3, RTC_PROT3_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT4, RTC_PROT4_MAGIC);
+ mtk_rmw(hw, MTK_RTC_DEBNCE, RTC_DEBNCE_MASK, 0);
+ mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+}
+
+static void mtk_rtc_get_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+ int time_alarm)
+{
+ u32 year, mon, mday, wday, hour, min, sec;
+
+ /*
+ * Read again until the field of the second is not changed which
+ * ensures all fields in the consistent state. Note that MTK_SEC must
+ * be read first. In this way, it guarantees the others remain not
+ * changed when the results for two MTK_SEC consecutive reads are same.
+ */
+ do {
+ sec = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC));
+ min = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN));
+ hour = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU));
+ wday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW));
+ mday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM));
+ mon = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MON));
+ year = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA));
+ } while (sec != mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC)));
+
+ tm->tm_sec = sec;
+ tm->tm_min = min;
+ tm->tm_hour = hour;
+ tm->tm_wday = wday;
+ tm->tm_mday = mday;
+ tm->tm_mon = mon - 1;
+
+ /* Rebase to the absolute year which userspace queries */
+ tm->tm_year = year + MTK_RTC_TM_YR_OFFSET;
+}
+
+static void mtk_rtc_set_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+ int time_alarm)
+{
+ u32 year;
+
+ /* Rebase to the relative year which RTC hardware requires */
+ year = tm->tm_year - MTK_RTC_TM_YR_OFFSET;
+
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA), year);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MON), tm->tm_mon + 1);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW), tm->tm_wday);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM), tm->tm_mday);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU), tm->tm_hour);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN), tm->tm_min);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC), tm->tm_sec);
+}
+
+static irqreturn_t mtk_rtc_alarmirq(int irq, void *id)
+{
+ struct mtk_rtc *hw = (struct mtk_rtc *)id;
+ u32 irq_sta;
+
+ irq_sta = mtk_r32(hw, MTK_RTC_INT);
+ if (irq_sta & RTC_INT_AL_STA) {
+ /* Stop alarm also implicitly disables the alarm interrupt */
+ mtk_w32(hw, MTK_RTC_AL_CTL, 0);
+ rtc_update_irq(hw->rtc, 1, RTC_IRQF | RTC_AF);
+
+ /* Ack alarm interrupt status */
+ mtk_w32(hw, MTK_RTC_INT, RTC_INT_AL_STA);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mtk_rtc_gettime(struct device *dev, struct rtc_time *tm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ mtk_rtc_get_alarm_or_time(hw, tm, MTK_TC);
+
+ return rtc_valid_tm(tm);
+}
+
+static int mtk_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (!MTK_RTC_TM_YR_VALID(tm->tm_year))
+ return -EINVAL;
+
+ /* Stop time counter before setting a new one*/
+ mtk_set(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+ mtk_rtc_set_alarm_or_time(hw, tm, MTK_TC);
+
+ /* Restart the time counter */
+ mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+ return 0;
+}
+
+static int mtk_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+
+ mtk_rtc_get_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+ wkalrm->enabled = !!(mtk_r32(hw, MTK_RTC_AL_CTL) & RTC_AL_EN);
+ wkalrm->pending = !!(mtk_r32(hw, MTK_RTC_INT) & RTC_INT_AL_STA);
+
+ return 0;
+}
+
+static int mtk_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+
+ if (!MTK_RTC_TM_YR_VALID(alrm_tm->tm_year))
+ return -EINVAL;
+
+ /*
+ * Stop the alarm also implicitly including disables interrupt before
+ * setting a new one.
+ */
+ mtk_clr(hw, MTK_RTC_AL_CTL, RTC_AL_EN);
+
+ /*
+ * Avoid contention between mtk_rtc_setalarm and IRQ handler so that
+ * disabling the interrupt and awaiting for pending IRQ handler to
+ * complete.
+ */
+ synchronize_irq(hw->irq);
+
+ mtk_rtc_set_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+ /* Restart the alarm with the new setup */
+ mtk_w32(hw, MTK_RTC_AL_CTL, RTC_AL_ALL);
+
+ return 0;
+}
+
+static const struct rtc_class_ops mtk_rtc_ops = {
+ .read_time = mtk_rtc_gettime,
+ .set_time = mtk_rtc_settime,
+ .read_alarm = mtk_rtc_getalarm,
+ .set_alarm = mtk_rtc_setalarm,
+};
+
+static const struct of_device_id mtk_rtc_match[] = {
+ { .compatible = "mediatek,mt7622-rtc" },
+ { .compatible = "mediatek,soc-rtc" },
+ {},
+};
+
+static int mtk_rtc_probe(struct platform_device *pdev)
+{
+ struct mtk_rtc *hw;
+ struct resource *res;
+ int ret;
+
+ hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hw);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base))
+ return PTR_ERR(hw->base);
+
+ hw->clk = devm_clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock\n");
+ return PTR_ERR(hw->clk);
+ }
+
+ ret = clk_prepare_enable(hw->clk);
+ if (ret)
+ return ret;
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ ret = hw->irq;
+ goto err;
+ }
+
+ ret = devm_request_irq(&pdev->dev, hw->irq, mtk_rtc_alarmirq,
+ 0, dev_name(&pdev->dev), hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't request IRQ\n");
+ goto err;
+ }
+
+ mtk_rtc_hw_init(hw);
+
+ device_init_wakeup(&pdev->dev, true);
+
+ hw->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &mtk_rtc_ops, THIS_MODULE);
+ if (IS_ERR(hw->rtc)) {
+ ret = PTR_ERR(hw->rtc);
+ dev_err(&pdev->dev, "Unable to register device\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ clk_disable_unprepare(hw->clk);
+
+ return ret;
+}
+
+static int mtk_rtc_remove(struct platform_device *pdev)
+{
+ struct mtk_rtc *hw = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(hw->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_rtc_suspend(struct device *dev)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(hw->irq);
+
+ return 0;
+}
+
+static int mtk_rtc_resume(struct device *dev)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(hw->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_rtc_pm_ops, mtk_rtc_suspend, mtk_rtc_resume);
+
+#define MTK_RTC_PM_OPS (&mtk_rtc_pm_ops)
+#else /* CONFIG_PM */
+#define MTK_RTC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver mtk_rtc_driver = {
+ .probe = mtk_rtc_probe,
+ .remove = mtk_rtc_remove,
+ .driver = {
+ .name = MTK_RTC_DEV,
+ .of_match_table = mtk_rtc_match,
+ .pm = MTK_RTC_PM_OPS,
+ },
+};
+
+module_platform_driver(mtk_rtc_driver);
+
+MODULE_DESCRIPTION("MediaTek SoC based RTC Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 13f7cd11c07e..1d666ac9ef70 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -70,6 +70,10 @@
#define OMAP_RTC_COMP_MSB_REG 0x50
#define OMAP_RTC_OSC_REG 0x54
+#define OMAP_RTC_SCRATCH0_REG 0x60
+#define OMAP_RTC_SCRATCH1_REG 0x64
+#define OMAP_RTC_SCRATCH2_REG 0x68
+
#define OMAP_RTC_KICK0_REG 0x6c
#define OMAP_RTC_KICK1_REG 0x70
@@ -667,6 +671,45 @@ static struct pinctrl_desc rtc_pinctrl_desc = {
.owner = THIS_MODULE,
};
+static int omap_rtc_scratch_read(void *priv, unsigned int offset, void *_val,
+ size_t bytes)
+{
+ struct omap_rtc *rtc = priv;
+ u32 *val = _val;
+ int i;
+
+ for (i = 0; i < bytes / 4; i++)
+ val[i] = rtc_readl(rtc,
+ OMAP_RTC_SCRATCH0_REG + offset + (i * 4));
+
+ return 0;
+}
+
+static int omap_rtc_scratch_write(void *priv, unsigned int offset, void *_val,
+ size_t bytes)
+{
+ struct omap_rtc *rtc = priv;
+ u32 *val = _val;
+ int i;
+
+ rtc->type->unlock(rtc);
+ for (i = 0; i < bytes / 4; i++)
+ rtc_writel(rtc,
+ OMAP_RTC_SCRATCH0_REG + offset + (i * 4), val[i]);
+ rtc->type->lock(rtc);
+
+ return 0;
+}
+
+static struct nvmem_config omap_rtc_nvmem_config = {
+ .name = "omap_rtc_scratch",
+ .word_size = 4,
+ .stride = 4,
+ .size = OMAP_RTC_KICK0_REG - OMAP_RTC_SCRATCH0_REG,
+ .reg_read = omap_rtc_scratch_read,
+ .reg_write = omap_rtc_scratch_write,
+};
+
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
@@ -797,13 +840,16 @@ static int omap_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, true);
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &omap_rtc_ops, THIS_MODULE);
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
goto err;
}
+ rtc->rtc->ops = &omap_rtc_ops;
+ omap_rtc_nvmem_config.priv = rtc;
+ rtc->rtc->nvmem_config = &omap_rtc_nvmem_config;
+
/* handle periodic and alarm irqs */
ret = devm_request_irq(&pdev->dev, rtc->irq_timer, rtc_irq, 0,
dev_name(&rtc->rtc->dev), rtc);
@@ -830,9 +876,14 @@ static int omap_rtc_probe(struct platform_device *pdev)
rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
if (IS_ERR(rtc->pctldev)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
- return PTR_ERR(rtc->pctldev);
+ ret = PTR_ERR(rtc->pctldev);
+ goto err;
}
+ ret = rtc_register_device(rtc->rtc);
+ if (ret)
+ goto err;
+
return 0;
err:
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 28c48b3c1946..c312af0db729 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -35,6 +35,9 @@
#define REG_MONTHS 0x08
#define REG_YEARS 0x09
+#define REG_OFFSET 0x0e
+#define REG_OFFSET_MODE BIT(7)
+
struct pcf8523 {
struct rtc_device *rtc;
};
@@ -272,10 +275,47 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
#define pcf8523_rtc_ioctl NULL
#endif
+static int pcf8523_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
+ u8 value;
+ s8 val;
+
+ err = pcf8523_read(client, REG_OFFSET, &value);
+ if (err < 0)
+ return err;
+
+ /* sign extend the 7-bit offset value */
+ val = value << 1;
+ *offset = (value & REG_OFFSET_MODE ? 4069 : 4340) * (val >> 1);
+
+ return 0;
+}
+
+static int pcf8523_rtc_set_offset(struct device *dev, long offset)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ long reg_m0, reg_m1;
+ u8 value;
+
+ reg_m0 = clamp(DIV_ROUND_CLOSEST(offset, 4340), -64L, 63L);
+ reg_m1 = clamp(DIV_ROUND_CLOSEST(offset, 4069), -64L, 63L);
+
+ if (abs(reg_m0 * 4340 - offset) < abs(reg_m1 * 4069 - offset))
+ value = reg_m0 & 0x7f;
+ else
+ value = (reg_m1 & 0x7f) | REG_OFFSET_MODE;
+
+ return pcf8523_write(client, REG_OFFSET, value);
+}
+
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
.ioctl = pcf8523_rtc_ioctl,
+ .read_offset = pcf8523_rtc_read_offset,
+ .set_offset = pcf8523_rtc_set_offset,
};
static int pcf8523_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
new file mode 100644
index 000000000000..ea04e9f0930b
--- /dev/null
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -0,0 +1,220 @@
+/*
+ * drivers/rtc/rtc-pcf85363.c
+ *
+ * Driver for NXP PCF85363 real-time clock.
+ *
+ * Copyright (C) 2017 Eric Nelson
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based loosely on rtc-8583 by Russell King, Wolfram Sang and Juergen Beisert
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/bcd.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/*
+ * Date/Time registers
+ */
+#define DT_100THS 0x00
+#define DT_SECS 0x01
+#define DT_MINUTES 0x02
+#define DT_HOURS 0x03
+#define DT_DAYS 0x04
+#define DT_WEEKDAYS 0x05
+#define DT_MONTHS 0x06
+#define DT_YEARS 0x07
+
+/*
+ * Alarm registers
+ */
+#define DT_SECOND_ALM1 0x08
+#define DT_MINUTE_ALM1 0x09
+#define DT_HOUR_ALM1 0x0a
+#define DT_DAY_ALM1 0x0b
+#define DT_MONTH_ALM1 0x0c
+#define DT_MINUTE_ALM2 0x0d
+#define DT_HOUR_ALM2 0x0e
+#define DT_WEEKDAY_ALM2 0x0f
+#define DT_ALARM_EN 0x10
+
+/*
+ * Time stamp registers
+ */
+#define DT_TIMESTAMP1 0x11
+#define DT_TIMESTAMP2 0x17
+#define DT_TIMESTAMP3 0x1d
+#define DT_TS_MODE 0x23
+
+/*
+ * control registers
+ */
+#define CTRL_OFFSET 0x24
+#define CTRL_OSCILLATOR 0x25
+#define CTRL_BATTERY 0x26
+#define CTRL_PIN_IO 0x27
+#define CTRL_FUNCTION 0x28
+#define CTRL_INTA_EN 0x29
+#define CTRL_INTB_EN 0x2a
+#define CTRL_FLAGS 0x2b
+#define CTRL_RAMBYTE 0x2c
+#define CTRL_WDOG 0x2d
+#define CTRL_STOP_EN 0x2e
+#define CTRL_RESETS 0x2f
+#define CTRL_RAM 0x40
+
+#define NVRAM_SIZE 0x40
+
+static struct i2c_driver pcf85363_driver;
+
+struct pcf85363 {
+ struct device *dev;
+ struct rtc_device *rtc;
+ struct nvmem_config nvmem_cfg;
+ struct regmap *regmap;
+};
+
+static int pcf85363_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+ unsigned char buf[DT_YEARS + 1];
+ int ret, len = sizeof(buf);
+
+ /* read the RTC date and time registers all at once */
+ ret = regmap_bulk_read(pcf85363->regmap, DT_100THS, buf, len);
+ if (ret) {
+ dev_err(dev, "%s: error %d\n", __func__, ret);
+ return ret;
+ }
+
+ tm->tm_year = bcd2bin(buf[DT_YEARS]);
+ /* adjust for 1900 base of rtc_time */
+ tm->tm_year += 100;
+
+ tm->tm_wday = buf[DT_WEEKDAYS] & 7;
+ buf[DT_SECS] &= 0x7F;
+ tm->tm_sec = bcd2bin(buf[DT_SECS]);
+ buf[DT_MINUTES] &= 0x7F;
+ tm->tm_min = bcd2bin(buf[DT_MINUTES]);
+ tm->tm_hour = bcd2bin(buf[DT_HOURS]);
+ tm->tm_mday = bcd2bin(buf[DT_DAYS]);
+ tm->tm_mon = bcd2bin(buf[DT_MONTHS]) - 1;
+
+ return 0;
+}
+
+static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+ unsigned char buf[DT_YEARS + 1];
+ int len = sizeof(buf);
+
+ buf[DT_100THS] = 0;
+ buf[DT_SECS] = bin2bcd(tm->tm_sec);
+ buf[DT_MINUTES] = bin2bcd(tm->tm_min);
+ buf[DT_HOURS] = bin2bcd(tm->tm_hour);
+ buf[DT_DAYS] = bin2bcd(tm->tm_mday);
+ buf[DT_WEEKDAYS] = tm->tm_wday;
+ buf[DT_MONTHS] = bin2bcd(tm->tm_mon + 1);
+ buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
+
+ return regmap_bulk_write(pcf85363->regmap, DT_100THS,
+ buf, len);
+}
+
+static const struct rtc_class_ops rtc_ops = {
+ .read_time = pcf85363_rtc_read_time,
+ .set_time = pcf85363_rtc_set_time,
+};
+
+static int pcf85363_nvram_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct pcf85363 *pcf85363 = priv;
+
+ return regmap_bulk_read(pcf85363->regmap, CTRL_RAM + offset,
+ val, bytes);
+}
+
+static int pcf85363_nvram_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct pcf85363 *pcf85363 = priv;
+
+ return regmap_bulk_write(pcf85363->regmap, CTRL_RAM + offset,
+ val, bytes);
+}
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int pcf85363_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pcf85363 *pcf85363;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ pcf85363 = devm_kzalloc(&client->dev, sizeof(struct pcf85363),
+ GFP_KERNEL);
+ if (!pcf85363)
+ return -ENOMEM;
+
+ pcf85363->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(pcf85363->regmap)) {
+ dev_err(&client->dev, "regmap allocation failed\n");
+ return PTR_ERR(pcf85363->regmap);
+ }
+
+ pcf85363->dev = &client->dev;
+ i2c_set_clientdata(client, pcf85363);
+
+ pcf85363->rtc = devm_rtc_allocate_device(pcf85363->dev);
+ if (IS_ERR(pcf85363->rtc))
+ return PTR_ERR(pcf85363->rtc);
+
+ pcf85363->nvmem_cfg.name = "pcf85363-";
+ pcf85363->nvmem_cfg.word_size = 1;
+ pcf85363->nvmem_cfg.stride = 1;
+ pcf85363->nvmem_cfg.size = NVRAM_SIZE;
+ pcf85363->nvmem_cfg.reg_read = pcf85363_nvram_read;
+ pcf85363->nvmem_cfg.reg_write = pcf85363_nvram_write;
+ pcf85363->nvmem_cfg.priv = pcf85363;
+ pcf85363->rtc->nvmem_config = &pcf85363->nvmem_cfg;
+ pcf85363->rtc->ops = &rtc_ops;
+
+ return rtc_register_device(pcf85363->rtc);
+}
+
+static const struct of_device_id dev_ids[] = {
+ { .compatible = "nxp,pcf85363" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dev_ids);
+
+static struct i2c_driver pcf85363_driver = {
+ .driver = {
+ .name = "pcf85363",
+ .of_match_table = of_match_ptr(dev_ids),
+ },
+ .probe = pcf85363_probe,
+};
+
+module_i2c_driver(pcf85363_driver);
+
+MODULE_AUTHOR("Eric Nelson");
+MODULE_DESCRIPTION("pcf85363 I2C RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index cea6ea4df970..3efc86c25d27 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -387,7 +387,7 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
if (err)
return err;
- return pcf8563_set_alarm_mode(client, 1);
+ return pcf8563_set_alarm_mode(client, !!tm->enabled);
}
static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
@@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
return 0;
buf &= PCF8563_REG_CLKO_F_MASK;
- return clkout_rates[ret];
+ return clkout_rates[buf];
}
static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index e1687e19c59f..82eb7da2c478 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -308,10 +308,9 @@ static int pl031_remove(struct amba_device *adev)
dev_pm_clear_wake_irq(&adev->dev);
device_init_wakeup(&adev->dev, false);
- free_irq(adev->irq[0], ldata);
+ if (adev->irq[0])
+ free_irq(adev->irq[0], ldata);
rtc_device_unregister(ldata->rtc);
- iounmap(ldata->base);
- kfree(ldata);
amba_release_regions(adev);
return 0;
@@ -322,25 +321,28 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
int ret;
struct pl031_local *ldata;
struct pl031_vendor_data *vendor = id->data;
- struct rtc_class_ops *ops = &vendor->ops;
+ struct rtc_class_ops *ops;
unsigned long time, data;
ret = amba_request_regions(adev, NULL);
if (ret)
goto err_req;
- ldata = kzalloc(sizeof(struct pl031_local), GFP_KERNEL);
- if (!ldata) {
+ ldata = devm_kzalloc(&adev->dev, sizeof(struct pl031_local),
+ GFP_KERNEL);
+ ops = devm_kmemdup(&adev->dev, &vendor->ops, sizeof(vendor->ops),
+ GFP_KERNEL);
+ if (!ldata || !ops) {
ret = -ENOMEM;
goto out;
}
- ldata->vendor = vendor;
-
- ldata->base = ioremap(adev->res.start, resource_size(&adev->res));
+ ldata->vendor = vendor;
+ ldata->base = devm_ioremap(&adev->dev, adev->res.start,
+ resource_size(&adev->res));
if (!ldata->base) {
ret = -ENOMEM;
- goto out_no_remap;
+ goto out;
}
amba_set_drvdata(adev, ldata);
@@ -373,28 +375,32 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
+ if (!adev->irq[0]) {
+ /* When there's no interrupt, no point in exposing the alarm */
+ ops->read_alarm = NULL;
+ ops->set_alarm = NULL;
+ ops->alarm_irq_enable = NULL;
+ }
+
device_init_wakeup(&adev->dev, true);
ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
THIS_MODULE);
if (IS_ERR(ldata->rtc)) {
ret = PTR_ERR(ldata->rtc);
- goto out_no_rtc;
+ goto out;
}
- if (request_irq(adev->irq[0], pl031_interrupt,
- vendor->irqflags, "rtc-pl031", ldata)) {
- ret = -EIO;
- goto out_no_irq;
+ if (adev->irq[0]) {
+ ret = request_irq(adev->irq[0], pl031_interrupt,
+ vendor->irqflags, "rtc-pl031", ldata);
+ if (ret)
+ goto out_no_irq;
+ dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
}
- dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
return 0;
out_no_irq:
rtc_device_unregister(ldata->rtc);
-out_no_rtc:
- iounmap(ldata->base);
-out_no_remap:
- kfree(ldata);
out:
amba_release_regions(adev);
err_req:
@@ -446,7 +452,7 @@ static struct pl031_vendor_data stv2_pl031 = {
.irqflags = IRQF_SHARED | IRQF_COND_SUSPEND,
};
-static struct amba_id pl031_ids[] = {
+static const struct amba_id pl031_ids[] = {
{
.id = 0x00041031,
.mask = 0x000fffff,
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index aa09771de04f..3d6174eb32f6 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -282,13 +282,13 @@ static int rv3029_eeprom_read(struct device *dev, u8 reg,
static int rv3029_eeprom_write(struct device *dev, u8 reg,
u8 const buf[], size_t len)
{
- int ret, err;
+ int ret;
size_t i;
u8 tmp;
- err = rv3029_eeprom_enter(dev);
- if (err < 0)
- return err;
+ ret = rv3029_eeprom_enter(dev);
+ if (ret < 0)
+ return ret;
for (i = 0; i < len; i++, reg++) {
ret = rv3029_read_regs(dev, reg, &tmp, 1);
@@ -304,11 +304,11 @@ static int rv3029_eeprom_write(struct device *dev, u8 reg,
break;
}
- err = rv3029_eeprom_exit(dev);
- if (err < 0)
- return err;
+ ret = rv3029_eeprom_exit(dev);
+ if (ret < 0)
+ return ret;
- return ret;
+ return 0;
}
static int rv3029_eeprom_update_bits(struct device *dev,
@@ -876,6 +876,8 @@ static const struct i2c_device_id rv3029_id[] = {
MODULE_DEVICE_TABLE(i2c, rv3029_id);
static const struct of_device_id rv3029_of_match[] = {
+ { .compatible = "microcrystal,rv3029" },
+ /* Backward compatibility only, do not use compatibles below: */
{ .compatible = "rv3029" },
{ .compatible = "rv3029c2" },
{ .compatible = "mc,rv3029c2" },
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 1ed3403ff8ac..5c5938ab3d86 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -24,7 +24,6 @@
#define RX8010_MDAY 0x14
#define RX8010_MONTH 0x15
#define RX8010_YEAR 0x16
-#define RX8010_YEAR 0x16
#define RX8010_RESV17 0x17
#define RX8010_ALMIN 0x18
#define RX8010_ALHOUR 0x19
@@ -36,7 +35,7 @@
#define RX8010_CTRL 0x1F
/* 0x20 to 0x2F are user registers */
#define RX8010_RESV30 0x30
-#define RX8010_RESV31 0x32
+#define RX8010_RESV31 0x31
#define RX8010_IRQ 0x32
#define RX8010_EXT_WADA BIT(3)
@@ -248,7 +247,7 @@ static int rx8010_init_client(struct i2c_client *client)
rx8010->ctrlreg = (ctrl[1] & ~RX8010_CTRL_TEST);
- return err;
+ return 0;
}
static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
@@ -277,7 +276,7 @@ static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE);
t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled;
- return err;
+ return 0;
}
static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
diff --git a/drivers/rtc/rtc-sa1100.h b/drivers/rtc/rtc-sa1100.h
index 2c79c0c57822..cc724f5b07bc 100644
--- a/drivers/rtc/rtc-sa1100.h
+++ b/drivers/rtc/rtc-sa1100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RTC_SA1100_H__
#define __RTC_SA1100_H__
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
new file mode 100644
index 000000000000..d544d5268757
--- /dev/null
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -0,0 +1,662 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define SPRD_RTC_SEC_CNT_VALUE 0x0
+#define SPRD_RTC_MIN_CNT_VALUE 0x4
+#define SPRD_RTC_HOUR_CNT_VALUE 0x8
+#define SPRD_RTC_DAY_CNT_VALUE 0xc
+#define SPRD_RTC_SEC_CNT_UPD 0x10
+#define SPRD_RTC_MIN_CNT_UPD 0x14
+#define SPRD_RTC_HOUR_CNT_UPD 0x18
+#define SPRD_RTC_DAY_CNT_UPD 0x1c
+#define SPRD_RTC_SEC_ALM_UPD 0x20
+#define SPRD_RTC_MIN_ALM_UPD 0x24
+#define SPRD_RTC_HOUR_ALM_UPD 0x28
+#define SPRD_RTC_DAY_ALM_UPD 0x2c
+#define SPRD_RTC_INT_EN 0x30
+#define SPRD_RTC_INT_RAW_STS 0x34
+#define SPRD_RTC_INT_CLR 0x38
+#define SPRD_RTC_INT_MASK_STS 0x3C
+#define SPRD_RTC_SEC_ALM_VALUE 0x40
+#define SPRD_RTC_MIN_ALM_VALUE 0x44
+#define SPRD_RTC_HOUR_ALM_VALUE 0x48
+#define SPRD_RTC_DAY_ALM_VALUE 0x4c
+#define SPRD_RTC_SPG_VALUE 0x50
+#define SPRD_RTC_SPG_UPD 0x54
+#define SPRD_RTC_SEC_AUXALM_UPD 0x60
+#define SPRD_RTC_MIN_AUXALM_UPD 0x64
+#define SPRD_RTC_HOUR_AUXALM_UPD 0x68
+#define SPRD_RTC_DAY_AUXALM_UPD 0x6c
+
+/* BIT & MASK definition for SPRD_RTC_INT_* registers */
+#define SPRD_RTC_SEC_EN BIT(0)
+#define SPRD_RTC_MIN_EN BIT(1)
+#define SPRD_RTC_HOUR_EN BIT(2)
+#define SPRD_RTC_DAY_EN BIT(3)
+#define SPRD_RTC_ALARM_EN BIT(4)
+#define SPRD_RTC_HRS_FORMAT_EN BIT(5)
+#define SPRD_RTC_AUXALM_EN BIT(6)
+#define SPRD_RTC_SPG_UPD_EN BIT(7)
+#define SPRD_RTC_SEC_UPD_EN BIT(8)
+#define SPRD_RTC_MIN_UPD_EN BIT(9)
+#define SPRD_RTC_HOUR_UPD_EN BIT(10)
+#define SPRD_RTC_DAY_UPD_EN BIT(11)
+#define SPRD_RTC_ALMSEC_UPD_EN BIT(12)
+#define SPRD_RTC_ALMMIN_UPD_EN BIT(13)
+#define SPRD_RTC_ALMHOUR_UPD_EN BIT(14)
+#define SPRD_RTC_ALMDAY_UPD_EN BIT(15)
+#define SPRD_RTC_INT_MASK GENMASK(15, 0)
+
+#define SPRD_RTC_TIME_INT_MASK \
+ (SPRD_RTC_SEC_UPD_EN | SPRD_RTC_MIN_UPD_EN | \
+ SPRD_RTC_HOUR_UPD_EN | SPRD_RTC_DAY_UPD_EN)
+
+#define SPRD_RTC_ALMTIME_INT_MASK \
+ (SPRD_RTC_ALMSEC_UPD_EN | SPRD_RTC_ALMMIN_UPD_EN | \
+ SPRD_RTC_ALMHOUR_UPD_EN | SPRD_RTC_ALMDAY_UPD_EN)
+
+#define SPRD_RTC_ALM_INT_MASK \
+ (SPRD_RTC_SEC_EN | SPRD_RTC_MIN_EN | \
+ SPRD_RTC_HOUR_EN | SPRD_RTC_DAY_EN | \
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN)
+
+/* second/minute/hour/day values mask definition */
+#define SPRD_RTC_SEC_MASK GENMASK(5, 0)
+#define SPRD_RTC_MIN_MASK GENMASK(5, 0)
+#define SPRD_RTC_HOUR_MASK GENMASK(4, 0)
+#define SPRD_RTC_DAY_MASK GENMASK(15, 0)
+
+/* alarm lock definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_ALMLOCK_MASK GENMASK(7, 0)
+#define SPRD_RTC_ALM_UNLOCK 0xa5
+#define SPRD_RTC_ALM_LOCK (~SPRD_RTC_ALM_UNLOCK & \
+ SPRD_RTC_ALMLOCK_MASK)
+
+/* SPG values definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_POWEROFF_ALM_FLAG BIT(8)
+#define SPRD_RTC_POWER_RESET_FLAG BIT(9)
+
+/* timeout of synchronizing time and alarm registers (us) */
+#define SPRD_RTC_POLL_TIMEOUT 200000
+#define SPRD_RTC_POLL_DELAY_US 20000
+
+struct sprd_rtc {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+ struct device *dev;
+ u32 base;
+ int irq;
+ bool valid;
+};
+
+/*
+ * The Spreadtrum RTC controller has 3 groups registers, including time, normal
+ * alarm and auxiliary alarm. The time group registers are used to set RTC time,
+ * the normal alarm registers are used to set normal alarm, and the auxiliary
+ * alarm registers are used to set auxiliary alarm. Both alarm event and
+ * auxiliary alarm event can wake up system from deep sleep, but only alarm
+ * event can power up system from power down status.
+ */
+enum sprd_rtc_reg_types {
+ SPRD_RTC_TIME,
+ SPRD_RTC_ALARM,
+ SPRD_RTC_AUX_ALARM,
+};
+
+static int sprd_rtc_clear_alarm_ints(struct sprd_rtc *rtc)
+{
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_ALM_INT_MASK);
+}
+
+static int sprd_rtc_disable_ints(struct sprd_rtc *rtc)
+{
+ int ret;
+
+ ret = regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_INT_MASK, 0);
+ if (ret)
+ return ret;
+
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_INT_MASK);
+}
+
+static int sprd_rtc_lock_alarm(struct sprd_rtc *rtc, bool lock)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+ if (ret)
+ return ret;
+
+ val &= ~(SPRD_RTC_ALMLOCK_MASK | SPRD_RTC_POWEROFF_ALM_FLAG);
+ if (lock)
+ val |= SPRD_RTC_ALM_LOCK;
+ else
+ val |= SPRD_RTC_ALM_UNLOCK | SPRD_RTC_POWEROFF_ALM_FLAG;
+
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_SPG_UPD, val);
+ if (ret)
+ return ret;
+
+ /* wait until the SPG value is updated successfully */
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS, val,
+ (val & SPRD_RTC_SPG_UPD_EN),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(rtc->dev, "failed to update SPG value:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sprd_rtc_get_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+ time64_t *secs)
+{
+ u32 sec_reg, min_reg, hour_reg, day_reg;
+ u32 val, sec, min, hour, day;
+ int ret;
+
+ switch (type) {
+ case SPRD_RTC_TIME:
+ sec_reg = SPRD_RTC_SEC_CNT_VALUE;
+ min_reg = SPRD_RTC_MIN_CNT_VALUE;
+ hour_reg = SPRD_RTC_HOUR_CNT_VALUE;
+ day_reg = SPRD_RTC_DAY_CNT_VALUE;
+ break;
+ case SPRD_RTC_ALARM:
+ sec_reg = SPRD_RTC_SEC_ALM_VALUE;
+ min_reg = SPRD_RTC_MIN_ALM_VALUE;
+ hour_reg = SPRD_RTC_HOUR_ALM_VALUE;
+ day_reg = SPRD_RTC_DAY_ALM_VALUE;
+ break;
+ case SPRD_RTC_AUX_ALARM:
+ sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+ min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+ day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(rtc->regmap, rtc->base + sec_reg, &val);
+ if (ret)
+ return ret;
+
+ sec = val & SPRD_RTC_SEC_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + min_reg, &val);
+ if (ret)
+ return ret;
+
+ min = val & SPRD_RTC_MIN_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + hour_reg, &val);
+ if (ret)
+ return ret;
+
+ hour = val & SPRD_RTC_HOUR_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + day_reg, &val);
+ if (ret)
+ return ret;
+
+ day = val & SPRD_RTC_DAY_MASK;
+ *secs = (((time64_t)(day * 24) + hour) * 60 + min) * 60 + sec;
+ return 0;
+}
+
+static int sprd_rtc_set_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+ time64_t secs)
+{
+ u32 sec_reg, min_reg, hour_reg, day_reg, sts_mask;
+ u32 sec, min, hour, day, val;
+ int ret, rem;
+
+ /* convert seconds to RTC time format */
+ day = div_s64_rem(secs, 86400, &rem);
+ hour = rem / 3600;
+ rem -= hour * 3600;
+ min = rem / 60;
+ sec = rem - min * 60;
+
+ switch (type) {
+ case SPRD_RTC_TIME:
+ sec_reg = SPRD_RTC_SEC_CNT_UPD;
+ min_reg = SPRD_RTC_MIN_CNT_UPD;
+ hour_reg = SPRD_RTC_HOUR_CNT_UPD;
+ day_reg = SPRD_RTC_DAY_CNT_UPD;
+ sts_mask = SPRD_RTC_TIME_INT_MASK;
+ break;
+ case SPRD_RTC_ALARM:
+ sec_reg = SPRD_RTC_SEC_ALM_UPD;
+ min_reg = SPRD_RTC_MIN_ALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_ALM_UPD;
+ day_reg = SPRD_RTC_DAY_ALM_UPD;
+ sts_mask = SPRD_RTC_ALMTIME_INT_MASK;
+ break;
+ case SPRD_RTC_AUX_ALARM:
+ sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+ min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+ day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+ sts_mask = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_write(rtc->regmap, rtc->base + sec_reg, sec);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + min_reg, min);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + hour_reg, hour);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + day_reg, day);
+ if (ret)
+ return ret;
+
+ if (type == SPRD_RTC_AUX_ALARM)
+ return 0;
+
+ /*
+ * Since the time and normal alarm registers are put in always-power-on
+ * region supplied by VDDRTC, then these registers changing time will
+ * be very long, about 125ms. Thus here we should wait until all
+ * values are updated successfully.
+ */
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS, val,
+ ((val & sts_mask) == sts_mask),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret < 0) {
+ dev_err(rtc->dev, "set time/alarm values timeout\n");
+ return ret;
+ }
+
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ sts_mask);
+}
+
+static int sprd_rtc_read_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ u32 val;
+ int ret;
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_AUX_ALARM, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, &alrm->time);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & SPRD_RTC_AUXALM_EN);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+ if (ret)
+ return ret;
+
+ alrm->pending = !!(val & SPRD_RTC_AUXALM_EN);
+ return 0;
+}
+
+static int sprd_rtc_set_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(&alrm->time);
+ int ret;
+
+ /* clear the auxiliary alarm interrupt status */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_AUXALM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_AUX_ALARM, secs);
+ if (ret)
+ return ret;
+
+ if (alrm->enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_AUXALM_EN,
+ SPRD_RTC_AUXALM_EN);
+ } else {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_AUXALM_EN, 0);
+ }
+
+ return ret;
+}
+
+static int sprd_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ int ret;
+
+ if (!rtc->valid) {
+ dev_warn(dev, "RTC values are invalid\n");
+ return -EINVAL;
+ }
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_TIME, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int sprd_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(tm);
+ u32 val;
+ int ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_TIME, secs);
+ if (ret)
+ return ret;
+
+ if (!rtc->valid) {
+ /*
+ * Set SPRD_RTC_POWER_RESET_FLAG to indicate now RTC has valid
+ * time values.
+ */
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_SPG_UPD,
+ SPRD_RTC_POWER_RESET_FLAG,
+ SPRD_RTC_POWER_RESET_FLAG);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS,
+ val, (val & SPRD_RTC_SPG_UPD_EN),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(rtc->dev, "failed to update SPG value:%d\n",
+ ret);
+ return ret;
+ }
+
+ rtc->valid = true;
+ }
+
+ return 0;
+}
+
+static int sprd_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ int ret;
+ u32 val;
+
+ /*
+ * If aie_timer is enabled, we should get the normal alarm time.
+ * Otherwise we should get auxiliary alarm time.
+ */
+ if (rtc->rtc && rtc->rtc->aie_timer.enabled == 0)
+ return sprd_rtc_read_aux_alarm(dev, alrm);
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_ALARM, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, &alrm->time);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & SPRD_RTC_ALARM_EN);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+ if (ret)
+ return ret;
+
+ alrm->pending = !!(val & SPRD_RTC_ALARM_EN);
+ return 0;
+}
+
+static int sprd_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(&alrm->time);
+ struct rtc_time aie_time =
+ rtc_ktime_to_tm(rtc->rtc->aie_timer.node.expires);
+ int ret;
+
+ /*
+ * We have 2 groups alarms: normal alarm and auxiliary alarm. Since
+ * both normal alarm event and auxiliary alarm event can wake up system
+ * from deep sleep, but only alarm event can power up system from power
+ * down status. Moreover we do not need to poll about 125ms when
+ * updating auxiliary alarm registers. Thus we usually set auxiliary
+ * alarm when wake up system from deep sleep, and for other scenarios,
+ * we should set normal alarm with polling status.
+ *
+ * So here we check if the alarm time is set by aie_timer, if yes, we
+ * should set normal alarm, if not, we should set auxiliary alarm which
+ * means it is just a wake event.
+ */
+ if (!rtc->rtc->aie_timer.enabled || rtc_tm_sub(&aie_time, &alrm->time))
+ return sprd_rtc_set_aux_alarm(dev, alrm);
+
+ /* clear the alarm interrupt status firstly */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_ALARM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_ALARM, secs);
+ if (ret)
+ return ret;
+
+ if (alrm->enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN,
+ SPRD_RTC_ALARM_EN);
+ if (ret)
+ return ret;
+
+ /* unlock the alarm to enable the alarm function. */
+ ret = sprd_rtc_lock_alarm(rtc, false);
+ } else {
+ regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN, 0);
+
+ /*
+ * Lock the alarm function in case fake alarm event will power
+ * up systems.
+ */
+ ret = sprd_rtc_lock_alarm(rtc, true);
+ }
+
+ return ret;
+}
+
+static int sprd_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ int ret;
+
+ if (enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_lock_alarm(rtc, false);
+ } else {
+ regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN, 0);
+
+ ret = sprd_rtc_lock_alarm(rtc, true);
+ }
+
+ return ret;
+}
+
+static const struct rtc_class_ops sprd_rtc_ops = {
+ .read_time = sprd_rtc_read_time,
+ .set_time = sprd_rtc_set_time,
+ .read_alarm = sprd_rtc_read_alarm,
+ .set_alarm = sprd_rtc_set_alarm,
+ .alarm_irq_enable = sprd_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t sprd_rtc_handler(int irq, void *dev_id)
+{
+ struct sprd_rtc *rtc = dev_id;
+ int ret;
+
+ ret = sprd_rtc_clear_alarm_ints(rtc);
+ if (ret)
+ return IRQ_RETVAL(ret);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_AF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+static int sprd_rtc_check_power_down(struct sprd_rtc *rtc)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * If the SPRD_RTC_POWER_RESET_FLAG was not set, which means the RTC has
+ * been powered down, so the RTC time values are invalid.
+ */
+ rtc->valid = (val & SPRD_RTC_POWER_RESET_FLAG) ? true : false;
+ return 0;
+}
+
+static int sprd_rtc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct sprd_rtc *rtc;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!rtc->regmap)
+ return -ENODEV;
+
+ ret = of_property_read_u32(node, "reg", &rtc->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get RTC base address\n");
+ return ret;
+ }
+
+ rtc->irq = platform_get_irq(pdev, 0);
+ if (rtc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get RTC irq number\n");
+ return rtc->irq;
+ }
+
+ rtc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rtc);
+
+ /* clear all RTC interrupts and disable all RTC interrupts */
+ ret = sprd_rtc_disable_ints(rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to disable RTC interrupts\n");
+ return ret;
+ }
+
+ /* check if RTC time values are valid */
+ ret = sprd_rtc_check_power_down(rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to check RTC time values\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ sprd_rtc_handler,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ pdev->name, rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request RTC irq\n");
+ return ret;
+ }
+
+ rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &sprd_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc))
+ return PTR_ERR(rtc->rtc);
+
+ device_init_wakeup(&pdev->dev, 1);
+ return 0;
+}
+
+static int sprd_rtc_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, 0);
+ return 0;
+}
+
+static const struct of_device_id sprd_rtc_of_match[] = {
+ { .compatible = "sprd,sc2731-rtc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_rtc_of_match);
+
+static struct platform_driver sprd_rtc_driver = {
+ .driver = {
+ .name = "sprd-rtc",
+ .of_match_table = sprd_rtc_of_match,
+ },
+ .probe = sprd_rtc_probe,
+ .remove = sprd_rtc_remove,
+};
+module_platform_driver(sprd_rtc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Spreadtrum RTC Device Driver");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index e364550eb9a7..92ff2edb86a6 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -72,9 +72,10 @@ since_epoch_show(struct device *dev, struct device_attribute *attr, char *buf)
retval = rtc_read_time(to_rtc_device(dev), &tm);
if (retval == 0) {
- unsigned long time;
- rtc_tm_to_time(&tm, &time);
- retval = sprintf(buf, "%lu\n", time);
+ time64_t time;
+
+ time = rtc_tm_to_time64(&tm);
+ retval = sprintf(buf, "%lld\n", time);
}
return retval;
@@ -132,7 +133,7 @@ static ssize_t
wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t retval;
- unsigned long alarm;
+ time64_t alarm;
struct rtc_wkalrm alm;
/* Don't show disabled alarms. For uniformity, RTC alarms are
@@ -145,8 +146,8 @@ wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
*/
retval = rtc_read_alarm(to_rtc_device(dev), &alm);
if (retval == 0 && alm.enabled) {
- rtc_tm_to_time(&alm.time, &alarm);
- retval = sprintf(buf, "%lu\n", alarm);
+ alarm = rtc_tm_to_time64(&alm.time);
+ retval = sprintf(buf, "%lld\n", alarm);
}
return retval;
@@ -157,8 +158,8 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
ssize_t retval;
- unsigned long now, alarm;
- unsigned long push = 0;
+ time64_t now, alarm;
+ time64_t push = 0;
struct rtc_wkalrm alm;
struct rtc_device *rtc = to_rtc_device(dev);
const char *buf_ptr;
@@ -170,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
retval = rtc_read_time(rtc, &alm.time);
if (retval < 0)
return retval;
- rtc_tm_to_time(&alm.time, &now);
+ now = rtc_tm_to_time64(&alm.time);
buf_ptr = buf;
if (*buf_ptr == '+') {
@@ -181,7 +182,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
} else
adjust = 1;
}
- retval = kstrtoul(buf_ptr, 0, &alarm);
+ retval = kstrtos64(buf_ptr, 0, &alarm);
if (retval)
return retval;
if (adjust) {
@@ -197,7 +198,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
return retval;
if (alm.enabled) {
if (push) {
- rtc_tm_to_time(&alm.time, &push);
+ push = rtc_tm_to_time64(&alm.time);
alarm += push;
} else
return -EBUSY;
@@ -212,7 +213,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
*/
alarm = now + 300;
}
- rtc_time_to_tm(alarm, &alm.time);
+ rtc_time64_to_tm(alarm, &alm.time);
retval = rtc_set_alarm(rtc, &alm);
return (retval < 0) ? retval : n;
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 65b432a096fe..0c34d3b81279 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -52,6 +52,7 @@ struct xgene_rtc_dev {
void __iomem *csr_base;
struct clk *clk;
unsigned int irq_wake;
+ unsigned int irq_enabled;
};
static int xgene_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -104,15 +105,19 @@ static int xgene_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
return 0;
}
+static int xgene_rtc_alarm_irq_enabled(struct device *dev)
+{
+ struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
+
+ return readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE ? 1 : 0;
+}
+
static int xgene_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
- unsigned long rtc_time;
unsigned long alarm_time;
- rtc_time = readl(pdata->csr_base + RTC_CCVR);
rtc_tm_to_time(&alrm->time, &alarm_time);
-
pdata->alarm_time = alarm_time;
writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR);
@@ -180,12 +185,18 @@ static int xgene_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Couldn't get the clock for RTC\n");
return -ENODEV;
}
- clk_prepare_enable(pdata->clk);
+ ret = clk_prepare_enable(pdata->clk);
+ if (ret)
+ return ret;
/* Turn on the clock and the crystal */
writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
- device_init_wakeup(&pdev->dev, 1);
+ ret = device_init_wakeup(&pdev->dev, 1);
+ if (ret) {
+ clk_disable_unprepare(pdata->clk);
+ return ret;
+ }
pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&xgene_rtc_ops, THIS_MODULE);
@@ -210,45 +221,55 @@ static int xgene_rtc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int xgene_rtc_suspend(struct device *dev)
+static int __maybe_unused xgene_rtc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
int irq;
irq = platform_get_irq(pdev, 0);
+
+ /*
+ * If this RTC alarm will be used for waking the system up,
+ * don't disable it of course. Else we just disable the alarm
+ * and await suspension.
+ */
if (device_may_wakeup(&pdev->dev)) {
if (!enable_irq_wake(irq))
pdata->irq_wake = 1;
} else {
+ pdata->irq_enabled = xgene_rtc_alarm_irq_enabled(dev);
xgene_rtc_alarm_irq_enable(dev, 0);
- clk_disable(pdata->clk);
+ clk_disable_unprepare(pdata->clk);
}
-
return 0;
}
-static int xgene_rtc_resume(struct device *dev)
+static int __maybe_unused xgene_rtc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
int irq;
+ int rc;
irq = platform_get_irq(pdev, 0);
+
if (device_may_wakeup(&pdev->dev)) {
if (pdata->irq_wake) {
disable_irq_wake(irq);
pdata->irq_wake = 0;
}
} else {
- clk_enable(pdata->clk);
- xgene_rtc_alarm_irq_enable(dev, 1);
+ rc = clk_prepare_enable(pdata->clk);
+ if (rc) {
+ dev_err(dev, "Unable to enable clock error %d\n", rc);
+ return rc;
+ }
+ xgene_rtc_alarm_irq_enable(dev, pdata->irq_enabled);
}
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(xgene_rtc_pm_ops, xgene_rtc_suspend, xgene_rtc_resume);
diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c
index b4a68ffcd06b..0c177647ea6c 100644
--- a/drivers/rtc/systohc.c
+++ b/drivers/rtc/systohc.c
@@ -10,6 +10,7 @@
/**
* rtc_set_ntp_time - Save NTP synchronized time to the RTC
* @now: Current time of day
+ * @target_nsec: pointer for desired now->tv_nsec value
*
* Replacement for the NTP platform function update_persistent_clock64
* that stores time for later retrieval by rtc_hctosys.
@@ -18,30 +19,52 @@
* possible at all, and various other -errno for specific temporary failure
* cases.
*
+ * -EPROTO is returned if now.tv_nsec is not close enough to *target_nsec.
+ (
* If temporary failure is indicated the caller should try again 'soon'
*/
-int rtc_set_ntp_time(struct timespec64 now)
+int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec)
{
struct rtc_device *rtc;
struct rtc_time tm;
+ struct timespec64 to_set;
int err = -ENODEV;
-
- if (now.tv_nsec < (NSEC_PER_SEC >> 1))
- rtc_time64_to_tm(now.tv_sec, &tm);
- else
- rtc_time64_to_tm(now.tv_sec + 1, &tm);
+ bool ok;
rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE);
- if (rtc) {
- /* rtc_hctosys exclusively uses UTC, so we call set_time here,
- * not set_mmss. */
- if (rtc->ops &&
- (rtc->ops->set_time ||
- rtc->ops->set_mmss64 ||
- rtc->ops->set_mmss))
- err = rtc_set_time(rtc, &tm);
- rtc_class_close(rtc);
+ if (!rtc)
+ goto out_err;
+
+ if (!rtc->ops || (!rtc->ops->set_time && !rtc->ops->set_mmss64 &&
+ !rtc->ops->set_mmss))
+ goto out_close;
+
+ /* Compute the value of tv_nsec we require the caller to supply in
+ * now.tv_nsec. This is the value such that (now +
+ * set_offset_nsec).tv_nsec == 0.
+ */
+ set_normalized_timespec64(&to_set, 0, -rtc->set_offset_nsec);
+ *target_nsec = to_set.tv_nsec;
+
+ /* The ntp code must call this with the correct value in tv_nsec, if
+ * it does not we update target_nsec and return EPROTO to make the ntp
+ * code try again later.
+ */
+ ok = rtc_tv_nsec_ok(rtc->set_offset_nsec, &to_set, &now);
+ if (!ok) {
+ err = -EPROTO;
+ goto out_close;
}
+ rtc_time64_to_tm(to_set.tv_sec, &tm);
+
+ /* rtc_hctosys exclusively uses UTC, so we call set_time here, not
+ * set_mmss.
+ */
+ err = rtc_set_time(rtc, &tm);
+
+out_close:
+ rtc_class_close(rtc);
+out_err:
return err;
}
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index b64e2b32c753..60c85cff556f 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# S/390 block devices
#
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 29f35e29d480..0f1ff0813493 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -70,8 +70,8 @@ static void do_restore_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
static void do_requeue_requests(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
-static void dasd_device_timeout(unsigned long);
-static void dasd_block_timeout(unsigned long);
+static void dasd_device_timeout(struct timer_list *);
+static void dasd_block_timeout(struct timer_list *);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
@@ -119,9 +119,7 @@ struct dasd_device *dasd_alloc_device(void)
(void (*)(unsigned long)) dasd_device_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
- init_timer(&device->timer);
- device->timer.function = dasd_device_timeout;
- device->timer.data = (unsigned long) device;
+ timer_setup(&device->timer, dasd_device_timeout, 0);
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
INIT_WORK(&device->reload_device, do_reload_device);
@@ -163,9 +161,7 @@ struct dasd_block *dasd_alloc_block(void)
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
- init_timer(&block->timer);
- block->timer.function = dasd_block_timeout;
- block->timer.data = (unsigned long) block;
+ timer_setup(&block->timer, dasd_block_timeout, 0);
spin_lock_init(&block->profile.lock);
return block;
@@ -1560,12 +1556,12 @@ EXPORT_SYMBOL(dasd_start_IO);
* The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
* DASD_CQR_QUEUED for 2) and 3).
*/
-static void dasd_device_timeout(unsigned long ptr)
+static void dasd_device_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_device *device;
- device = (struct dasd_device *) ptr;
+ device = from_timer(device, t, timer);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
@@ -2628,12 +2624,12 @@ EXPORT_SYMBOL(dasd_cancel_req);
* is waiting for something that may not come reliably, (e.g. a state
* change interrupt)
*/
-static void dasd_block_timeout(unsigned long ptr)
+static void dasd_block_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_block *block;
- block = (struct dasd_block *) ptr;
+ block = from_timer(block, t, timer);
spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e448a0fc0c09..c94b606e0df8 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
* Holger Smolinski <Holger.Smolinski@de.ibm.com>
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 0e0e622eadc3..62f5f04d8f61 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PAV alias management for the DASD ECKD discipline
*
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index e84a5468d810..405b6feed465 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.h
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 34e153a6b19c..5869d2fede35 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 8713fefd794b..a7917d473774 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Character device driver for extended error reporting.
*
@@ -295,7 +296,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
{
struct dasd_ccw_req *temp_cqr;
int data_size;
- struct timeval tv;
+ struct timespec64 ts;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
@@ -309,9 +310,9 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = trigger;
- do_gettimeofday(&tv);
- header.tv_sec = tv.tv_sec;
- header.tv_usec = tv.tv_usec;
+ ktime_get_real_ts64(&ts);
+ header.tv_sec = ts.tv_sec;
+ header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
@@ -339,7 +340,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
{
int data_size;
int snss_rc;
- struct timeval tv;
+ struct timespec64 ts;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
@@ -352,9 +353,9 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = DASD_EER_STATECHANGE;
- do_gettimeofday(&tv);
- header.tv_sec = tv.tv_sec;
- header.tv_usec = tv.tv_usec;
+ ktime_get_real_ts64(&ts);
+ header.tv_sec = ts.tv_sec;
+ header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 6389feb2fb7a..ba4fa372d02d 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
index b5d3db0e5efb..b14bf1b2c691 100644
--- a/drivers/s390/block/dasd_fba.h
+++ b/drivers/s390/block/dasd_fba.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 8b1341fb2e0d..7036a6c6f86f 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index db470bd10175..b095a23bcc0c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
@@ -95,14 +96,6 @@ do { \
d_data); \
} while(0)
-#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
-do { \
- debug_sprintf_exception(d_device->debug_area, \
- d_level, \
- d_str "\n", \
- d_data); \
-} while(0)
-
#define DBF_EVENT(d_level, d_str, d_data...)\
do { \
debug_sprintf_event(dasd_debug_area, \
@@ -121,14 +114,6 @@ do { \
__dev_id.ssid, __dev_id.devno, d_data); \
} while (0)
-#define DBF_EXC(d_level, d_str, d_data...)\
-do { \
- debug_sprintf_exception(dasd_debug_area, \
- d_level,\
- d_str "\n", \
- d_data); \
-} while(0)
-
/* limit size for an errorstring */
#define ERRORLENGTH 30
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index ec65c1e51c2a..7bdc6aaa0ba3 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 7104d6765773..c33788a829c3 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index 71288dd9dd7f..a05a4297cfae 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SCM_BLK_H
#define SCM_BLK_H
@@ -55,13 +56,7 @@ extern debug_info_t *scm_debug;
static inline void SCM_LOG_HEX(int level, void *data, int length)
{
- if (!debug_level_enabled(scm_debug, level))
- return;
- while (length > 0) {
- debug_event(scm_debug, level, data, length);
- length -= scm_debug->buf_size;
- data += scm_debug->buf_size;
- }
+ debug_event(scm_debug, level, data, length);
}
static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
index c98cf52d78d1..3134fd6e058e 100644
--- a/drivers/s390/block/scm_drv.c
+++ b/drivers/s390/block/scm_drv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device driver for s390 storage class memory.
*
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 0c443e26835d..05ac6ba15a53 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# S/390 character devices
#
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9ec4ae056158..8c9d412b6d33 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* 3215 line mode terminal driver.
*
@@ -281,9 +282,9 @@ static void raw3215_start_io(struct raw3215_info *raw)
/*
* Function to start a delayed output after RAW3215_TIMEOUT seconds
*/
-static void raw3215_timeout(unsigned long __data)
+static void raw3215_timeout(struct timer_list *t)
{
- struct raw3215_info *raw = (struct raw3215_info *) __data;
+ struct raw3215_info *raw = from_timer(raw, t, timer);
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@@ -669,7 +670,7 @@ static struct raw3215_info *raw3215_alloc_info(void)
return NULL;
}
- setup_timer(&info->timer, raw3215_timeout, (unsigned long)info);
+ timer_setup(&info->timer, raw3215_timeout, 0);
init_waitqueue_head(&info->empty_wait);
tasklet_init(&info->tlet, raw3215_wakeup, (unsigned long)info);
tty_port_init(&info->port);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 8522cfce5b4e..fd2146bcc0ad 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - console view.
*
@@ -68,7 +69,7 @@ static struct con3270 *condev;
#define CON_UPDATE_STATUS 4 /* Update status line. */
#define CON_UPDATE_ALL 8 /* Recreate screen. */
-static void con3270_update(struct con3270 *);
+static void con3270_update(struct timer_list *);
/*
* Setup timeout for a device. On timeout trigger an update.
@@ -204,8 +205,9 @@ con3270_write_callback(struct raw3270_request *rq, void *data)
* Update console display.
*/
static void
-con3270_update(struct con3270 *cp)
+con3270_update(struct timer_list *t)
{
+ struct con3270 *cp = from_timer(cp, t, timer);
struct raw3270_request *wrq;
char wcc, prolog[6];
unsigned long flags;
@@ -551,7 +553,7 @@ con3270_flush(void)
con3270_update_status(cp);
while (cp->update_flags != 0) {
spin_unlock_irqrestore(&cp->view.lock, flags);
- con3270_update(cp);
+ con3270_update(&cp->timer);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
}
@@ -622,8 +624,7 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
- setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update,
- (unsigned long) condev);
+ timer_setup(&condev->timer, con3270_update, 0);
tasklet_init(&condev->readlet,
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index f7d92584b993..e1686a69a68e 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Unified handling of special chars.
*
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
index 59c2d6e55e55..e52afa3b8180 100644
--- a/drivers/s390/char/ctrlchar.h
+++ b/drivers/s390/char/ctrlchar.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Unified handling of special chars.
*
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 07c7f31081bc..98a5c459a1bf 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Do not edit this file! It was automatically generated by */
/* loadkeys --mktable defkeymap.map > defkeymap.c */
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
index a5ccbf6f0d36..6bf1058de873 100644
--- a/drivers/s390/char/diag_ftp.c
+++ b/drivers/s390/char/diag_ftp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DIAGNOSE X'2C4' instruction based HMC FTP services, useable on z/VM
*
diff --git a/drivers/s390/char/diag_ftp.h b/drivers/s390/char/diag_ftp.h
index 3abd2614053a..5d036ba7114f 100644
--- a/drivers/s390/char/diag_ftp.h
+++ b/drivers/s390/char/diag_ftp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* DIAGNOSE X'2C4' instruction based SE/HMC FTP Services, useable on z/VM
*
diff --git a/drivers/s390/char/hmcdrv_cache.c b/drivers/s390/char/hmcdrv_cache.c
index 4cda5ada143a..1f5bdb237862 100644
--- a/drivers/s390/char/hmcdrv_cache.c
+++ b/drivers/s390/char/hmcdrv_cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SE/HMC Drive (Read) Cache Functions
*
diff --git a/drivers/s390/char/hmcdrv_cache.h b/drivers/s390/char/hmcdrv_cache.h
index a14b57526781..d69f9fe87faa 100644
--- a/drivers/s390/char/hmcdrv_cache.h
+++ b/drivers/s390/char/hmcdrv_cache.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SE/HMC Drive (Read) Cache Functions
*
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
index 43cee7fcd01c..20e9cd542e03 100644
--- a/drivers/s390/char/hmcdrv_dev.c
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* HMC Drive CD/DVD Device
*
diff --git a/drivers/s390/char/hmcdrv_dev.h b/drivers/s390/char/hmcdrv_dev.h
index cb17f07e02de..558eba929130 100644
--- a/drivers/s390/char/hmcdrv_dev.h
+++ b/drivers/s390/char/hmcdrv_dev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SE/HMC Drive FTP Device
*
diff --git a/drivers/s390/char/hmcdrv_ftp.c b/drivers/s390/char/hmcdrv_ftp.c
index 8cb7d8fbadd6..0e70397d6e04 100644
--- a/drivers/s390/char/hmcdrv_ftp.c
+++ b/drivers/s390/char/hmcdrv_ftp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* HMC Drive FTP Services
*
diff --git a/drivers/s390/char/hmcdrv_ftp.h b/drivers/s390/char/hmcdrv_ftp.h
index f3643a7b3676..d12ca12b5ccd 100644
--- a/drivers/s390/char/hmcdrv_ftp.h
+++ b/drivers/s390/char/hmcdrv_ftp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SE/HMC Drive FTP Services
*
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 186d05e4c767..5b505fdaedec 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ebcdic keycode functions for s390 console drivers
*
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index a31f339211d5..a074d9711628 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ebcdic keycode functions for s390 console drivers
*
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 56519cbb165c..114ca7cbf889 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* IBM/3270 Driver
*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 6111c1fa2d1e..e4e2df7a478e 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* core function to access sclp interface
*
@@ -135,6 +136,7 @@ static enum sclp_suspend_state_t {
#define SCLP_BUSY_INTERVAL 10
#define SCLP_RETRY_INTERVAL 30
+static void sclp_request_timeout(bool force_restart);
static void sclp_process_queue(void);
static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate);
@@ -153,25 +155,32 @@ __sclp_queue_read_req(void)
/* Set up request retry timer. Called while sclp_lock is locked. */
static inline void
-__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
- unsigned long data)
+__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
{
del_timer(&sclp_request_timer);
- sclp_request_timer.function = function;
- sclp_request_timer.data = data;
+ sclp_request_timer.function = cb;
sclp_request_timer.expires = jiffies + time;
add_timer(&sclp_request_timer);
}
-/* Request timeout handler. Restart the request queue. If DATA is non-zero,
+static void sclp_request_timeout_restart(struct timer_list *unused)
+{
+ sclp_request_timeout(true);
+}
+
+static void sclp_request_timeout_normal(struct timer_list *unused)
+{
+ sclp_request_timeout(false);
+}
+
+/* Request timeout handler. Restart the request queue. If force_restart,
* force restart of running request. */
-static void
-sclp_request_timeout(unsigned long data)
+static void sclp_request_timeout(bool force_restart)
{
unsigned long flags;
spin_lock_irqsave(&sclp_lock, flags);
- if (data) {
+ if (force_restart) {
if (sclp_running_state == sclp_running_state_running) {
/* Break running state and queue NOP read event request
* to get a defined interface state. */
@@ -180,7 +189,7 @@ sclp_request_timeout(unsigned long data)
}
} else {
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
- sclp_request_timeout, 0);
+ sclp_request_timeout_normal);
}
spin_unlock_irqrestore(&sclp_lock, flags);
sclp_process_queue();
@@ -238,7 +247,7 @@ out:
* invokes callback. This timer can be set per request in situations where
* waiting too long would be harmful to the system, e.g. during SE reboot.
*/
-static void sclp_req_queue_timeout(unsigned long data)
+static void sclp_req_queue_timeout(struct timer_list *unused)
{
unsigned long flags, expires_next;
struct sclp_req *req;
@@ -275,12 +284,12 @@ __sclp_start_request(struct sclp_req *req)
req->status = SCLP_REQ_RUNNING;
sclp_running_state = sclp_running_state_running;
__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
- sclp_request_timeout, 1);
+ sclp_request_timeout_restart);
return 0;
} else if (rc == -EBUSY) {
/* Try again later */
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
- sclp_request_timeout, 0);
+ sclp_request_timeout_normal);
return 0;
}
/* Request failed */
@@ -314,7 +323,7 @@ sclp_process_queue(void)
/* Cannot abort already submitted request - could still
* be active at the SCLP */
__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
- sclp_request_timeout, 0);
+ sclp_request_timeout_normal);
break;
}
do_post:
@@ -557,7 +566,7 @@ sclp_sync_wait(void)
if (timer_pending(&sclp_request_timer) &&
get_tod_clock_fast() > timeout &&
del_timer(&sclp_request_timer))
- sclp_request_timer.function(sclp_request_timer.data);
+ sclp_request_timer.function(&sclp_request_timer);
cpu_relax();
}
local_irq_disable();
@@ -914,7 +923,7 @@ static void sclp_check_handler(struct ext_code ext_code,
/* Initial init mask request timed out. Modify request state to failed. */
static void
-sclp_check_timeout(unsigned long data)
+sclp_check_timeout(struct timer_list *unused)
{
unsigned long flags;
@@ -953,7 +962,7 @@ sclp_check_interface(void)
sclp_init_req.status = SCLP_REQ_RUNNING;
sclp_running_state = sclp_running_state_running;
__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
- sclp_check_timeout, 0);
+ sclp_check_timeout);
spin_unlock_irqrestore(&sclp_lock, flags);
/* Enable service-signal interruption - needs to happen
* with IRQs enabled. */
@@ -1158,9 +1167,8 @@ sclp_init(void)
INIT_LIST_HEAD(&sclp_req_queue);
INIT_LIST_HEAD(&sclp_reg_list);
list_add(&sclp_state_change_event.list, &sclp_reg_list);
- init_timer(&sclp_request_timer);
- init_timer(&sclp_queue_timer);
- sclp_queue_timer.function = sclp_req_queue_timeout;
+ timer_setup(&sclp_request_timer, NULL, 0);
+ timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
/* Check interface */
spin_unlock_irqrestore(&sclp_lock, flags);
rc = sclp_check_interface();
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 53b5d1b9761a..f41f6e2ca063 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 1999,2012
*
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index dff8b94871f0..d7686a68c093 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007,2012
*
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 6037bc87e767..8966a1c1b548 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCLP line mode console driver
*
@@ -124,7 +125,7 @@ static void sclp_console_sync_queue(void)
* temporary write buffer without further waiting on a final new line.
*/
static void
-sclp_console_timeout(unsigned long data)
+sclp_console_timeout(struct timer_list *unused)
{
sclp_conbuf_emit();
}
@@ -210,11 +211,7 @@ sclp_console_write(struct console *console, const char *message,
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
!timer_pending(&sclp_con_timer)) {
- init_timer(&sclp_con_timer);
- sclp_con_timer.function = sclp_console_timeout;
- sclp_con_timer.data = 0UL;
- sclp_con_timer.expires = jiffies + HZ/10;
- add_timer(&sclp_con_timer);
+ mod_timer(&sclp_con_timer, jiffies + HZ / 10);
}
out:
spin_unlock_irqrestore(&sclp_con_lock, flags);
@@ -334,7 +331,7 @@ sclp_console_init(void)
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
sclp_conbuf = NULL;
- init_timer(&sclp_con_timer);
+ timer_setup(&sclp_con_timer, sclp_console_timeout, 0);
/* Set output format */
if (MACHINE_IS_VM)
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 7003d52c2191..194ffd5c8580 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 90d92fbe7b9b..f60d7ea8268d 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCLP control program identification sysfs interface
*
diff --git a/drivers/s390/char/sclp_cpi_sys.h b/drivers/s390/char/sclp_cpi_sys.h
index 65bb6a99c97f..edf60d1ca633 100644
--- a/drivers/s390/char/sclp_cpi_sys.h
+++ b/drivers/s390/char/sclp_cpi_sys.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SCLP control program identification sysfs interface
*
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 78a7e4f94721..a78cea0c3a09 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IOCTL interface for SCLP
*
diff --git a/drivers/s390/char/sclp_diag.h b/drivers/s390/char/sclp_diag.h
index 59c4afa5e670..796c5311b865 100644
--- a/drivers/s390/char/sclp_diag.h
+++ b/drivers/s390/char/sclp_diag.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index bc1fc00910b0..d06bc5674e5f 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCLP early driver
*
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index 5029cc87e80f..edeb2597b0b8 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2015
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/drivers/s390/char/sclp_ftp.c b/drivers/s390/char/sclp_ftp.c
index 6561cc5b2d5d..dfdd6c8fd17e 100644
--- a/drivers/s390/char/sclp_ftp.c
+++ b/drivers/s390/char/sclp_ftp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
*
diff --git a/drivers/s390/char/sclp_ftp.h b/drivers/s390/char/sclp_ftp.h
index 98ba3183e7d9..d64da18c194d 100644
--- a/drivers/s390/char/sclp_ftp.h
+++ b/drivers/s390/char/sclp_ftp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
*
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
index f9cbb1ab047b..d35f10ea5b52 100644
--- a/drivers/s390/char/sclp_ocf.c
+++ b/drivers/s390/char/sclp_ocf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCLP OCF communication parameters sysfs interface
*
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
index 4dbb3dfd4bc7..e7c84a4e5eb5 100644
--- a/drivers/s390/char/sclp_pci.c
+++ b/drivers/s390/char/sclp_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI I/O adapter configuration related functions.
*
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index e4958511168a..76956c2131cd 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* signal quiesce handler
*
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 91b26df5227d..44594a492553 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* driver: reading from and writing to system console on S/390 via SCLP
*
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h
index e3b0290995ba..a2eb22f67393 100644
--- a/drivers/s390/char/sclp_rw.h
+++ b/drivers/s390/char/sclp_rw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* interface to the SCLP-read/write driver
*
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 7cdd13dd7be1..8e0b69a2f11a 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCLP "store data in absolute storage"
*
diff --git a/drivers/s390/char/sclp_sdias.h b/drivers/s390/char/sclp_sdias.h
index f2431c414150..bc36cf881010 100644
--- a/drivers/s390/char/sclp_sdias.h
+++ b/drivers/s390/char/sclp_sdias.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SCLP "store data in absolute storage"
*
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 236b736ae136..9f7b87d6d434 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCLP line mode terminal driver.
*
@@ -150,7 +151,7 @@ __sclp_ttybuf_emit(struct sclp_buffer *buffer)
* temporary write buffer.
*/
static void
-sclp_tty_timeout(unsigned long data)
+sclp_tty_timeout(struct timer_list *unused)
{
unsigned long flags;
struct sclp_buffer *buf;
@@ -217,11 +218,7 @@ static int sclp_tty_write_string(const unsigned char *str, int count, int may_fa
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
!timer_pending(&sclp_tty_timer)) {
- init_timer(&sclp_tty_timer);
- sclp_tty_timer.function = sclp_tty_timeout;
- sclp_tty_timer.data = 0UL;
- sclp_tty_timer.expires = jiffies + HZ/10;
- add_timer(&sclp_tty_timer);
+ mod_timer(&sclp_tty_timer, jiffies + HZ / 10);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
out:
@@ -528,7 +525,7 @@ sclp_tty_init(void)
}
INIT_LIST_HEAD(&sclp_tty_outqueue);
spin_lock_init(&sclp_tty_lock);
- init_timer(&sclp_tty_timer);
+ timer_setup(&sclp_tty_timer, sclp_tty_timeout, 0);
sclp_ttybuf = NULL;
sclp_tty_buffer_count = 0;
if (MACHINE_IS_VM) {
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
index c8773421c31f..0fa2d5971d0f 100644
--- a/drivers/s390/char/sclp_tty.h
+++ b/drivers/s390/char/sclp_tty.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* interface to the SCLP-read/write driver
*
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 095481d32236..3f9a6ef650fa 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCLP VT220 terminal driver.
*
@@ -356,7 +357,7 @@ sclp_vt220_add_msg(struct sclp_vt220_request *request,
* Emit buffer after having waited long enough for more data to arrive.
*/
static void
-sclp_vt220_timeout(unsigned long data)
+sclp_vt220_timeout(struct timer_list *unused)
{
sclp_vt220_emit_current();
}
@@ -453,8 +454,6 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
/* Setup timer to output current console buffer after some time */
if (sclp_vt220_current_request != NULL &&
!timer_pending(&sclp_vt220_timer) && do_schedule) {
- sclp_vt220_timer.function = sclp_vt220_timeout;
- sclp_vt220_timer.data = 0UL;
sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
add_timer(&sclp_vt220_timer);
}
@@ -698,7 +697,7 @@ static int __init __sclp_vt220_init(int num_pages)
spin_lock_init(&sclp_vt220_lock);
INIT_LIST_HEAD(&sclp_vt220_empty);
INIT_LIST_HEAD(&sclp_vt220_outqueue);
- init_timer(&sclp_vt220_timer);
+ timer_setup(&sclp_vt220_timer, sclp_vt220_timeout, 0);
tty_port_init(&sclp_vt220_port);
sclp_vt220_current_request = NULL;
sclp_vt220_buffered_chars = 0;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index ea664dd4f56d..8bec5f9ea92c 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* tape device driver for 3480/3490E/3590 tapes.
*
@@ -128,6 +129,7 @@ struct tape_request {
int options; /* options for execution. */
int retries; /* retry counter for error recovery. */
int rescnt; /* residual count from devstat. */
+ struct timer_list timer; /* timer for std_assign_timeout(). */
/* Callback for delivering final status. */
void (*callback)(struct tape_request *, void *);
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index 36b759e89d22..b398d8a3ed3c 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* tape device discipline for 3590 tapes.
*
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 46ac1164f242..fc206c9d1c56 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* character device frontend for tape device driver
*
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 91c3c642c76e..e7d23048d3f0 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -68,9 +68,8 @@ struct tape_class_device *register_tape_dev(
tcd->char_device->owner = fops->owner;
tcd->char_device->ops = fops;
- tcd->char_device->dev = dev;
- rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
+ rc = cdev_add(tcd->char_device, dev, 1);
if (rc)
goto fail_with_cdev;
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index a332c10d50ad..d25ac075b1ad 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2004 All Rights Reserved.
*
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 9dd4534823b3..32503a60ee85 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -32,7 +32,7 @@
static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
static void tape_delayed_next_request(struct work_struct *);
-static void tape_long_busy_timeout(unsigned long data);
+static void tape_long_busy_timeout(struct timer_list *t);
/*
* One list to contain all tape devices of all disciplines, so
@@ -381,8 +381,7 @@ tape_generic_online(struct tape_device *device,
return -EINVAL;
}
- init_timer(&device->lb_timeout);
- device->lb_timeout.function = tape_long_busy_timeout;
+ timer_setup(&device->lb_timeout, tape_long_busy_timeout, 0);
/* Let the discipline have a go at the device. */
device->discipline = discipline;
@@ -867,18 +866,16 @@ tape_delayed_next_request(struct work_struct *work)
spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
-static void tape_long_busy_timeout(unsigned long data)
+static void tape_long_busy_timeout(struct timer_list *t)
{
+ struct tape_device *device = from_timer(device, t, lb_timeout);
struct tape_request *request;
- struct tape_device *device;
- device = (struct tape_device *) data;
spin_lock_irq(get_ccwdev_lock(device->cdev));
request = list_entry(device->req_queue.next, struct tape_request, list);
BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
__tape_start_next_request(device);
- device->lb_timeout.data = 0UL;
tape_put_device(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
}
@@ -1157,7 +1154,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if (req->status == TAPE_REQUEST_LONG_BUSY) {
DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
if (del_timer(&device->lb_timeout)) {
- device->lb_timeout.data = 0UL;
tape_put_device(device);
__tape_start_next_request(device);
}
@@ -1212,8 +1208,6 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case TAPE_IO_PENDING:
break;
case TAPE_IO_LONG_BUSY:
- device->lb_timeout.data =
- (unsigned long) tape_get_device(device);
device->lb_timeout.expires = jiffies +
LONG_BUSY_TIMEOUT * HZ;
DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 8733b232a116..faae30476f4b 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* tape device driver for S/390 and zSeries tapes.
*
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 3478e19ae194..1f5fab617b67 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* standard tape device functions for ibm tapes.
*
@@ -32,14 +33,12 @@
* tape_std_assign
*/
static void
-tape_std_assign_timeout(unsigned long data)
+tape_std_assign_timeout(struct timer_list *t)
{
- struct tape_request * request;
- struct tape_device * device;
+ struct tape_request * request = from_timer(request, t, timer);
+ struct tape_device * device = request->device;
int rc;
- request = (struct tape_request *) data;
- device = request->device;
BUG_ON(!device);
DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
@@ -70,16 +69,12 @@ tape_std_assign(struct tape_device *device)
* to another host (actually this shouldn't happen but it does).
* So we set up a timeout for this call.
*/
- init_timer_on_stack(&timeout);
- timeout.function = tape_std_assign_timeout;
- timeout.data = (unsigned long) request;
- timeout.expires = jiffies + 2 * HZ;
- add_timer(&timeout);
+ timer_setup(&request->timer, tape_std_assign_timeout, 0);
+ mod_timer(&timeout, jiffies + 2 * HZ);
rc = tape_do_io_interruptible(device, request);
- del_timer_sync(&timeout);
- destroy_timer_on_stack(&timeout);
+ del_timer_sync(&request->timer);
if (rc != 0) {
DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
index 8c760c036832..53ec8e2870d4 100644
--- a/drivers/s390/char/tape_std.h
+++ b/drivers/s390/char/tape_std.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* standard tape device functions for ibm tapes.
*
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index e5ebe2fbee23..e417ccd9e299 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -118,7 +118,7 @@ struct tty3270 {
#define TTY_UPDATE_STATUS 8 /* Update status line. */
#define TTY_UPDATE_ALL 16 /* Recreate screen. */
-static void tty3270_update(struct tty3270 *);
+static void tty3270_update(struct timer_list *);
static void tty3270_resize_work(struct work_struct *work);
/*
@@ -361,8 +361,9 @@ tty3270_write_callback(struct raw3270_request *rq, void *data)
* Update 3270 display.
*/
static void
-tty3270_update(struct tty3270 *tp)
+tty3270_update(struct timer_list *t)
{
+ struct tty3270 *tp = from_timer(tp, t, timer);
static char invalid_sba[2] = { 0xff, 0xff };
struct raw3270_request *wrq;
unsigned long updated;
@@ -748,8 +749,7 @@ tty3270_alloc_view(void)
goto out_reset;
tty_port_init(&tp->port);
- setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
- (unsigned long) tp);
+ timer_setup(&tp->timer, tty3270_update, 0);
tasklet_init(&tp->readlet,
(void (*)(unsigned long)) tty3270_read_tasklet,
(unsigned long) tp->read);
diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h
index 11141a8f8974..52ceed6f8408 100644
--- a/drivers/s390/char/tty3270.h
+++ b/drivers/s390/char/tty3270.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2007
*
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 7898bbcc28fc..17e411c57576 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2004, 2010
* Interface implementation for communication with the z/VM control program
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index b19020b9efff..62559dc0169f 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -812,8 +812,7 @@ static int vmlogrdr_register_cdev(dev_t dev)
}
vmlogrdr_cdev->owner = THIS_MODULE;
vmlogrdr_cdev->ops = &vmlogrdr_fops;
- vmlogrdr_cdev->dev = dev;
- rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
+ rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR);
if (!rc)
return 0;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 04aceb694d51..fa90ef05afc0 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -110,7 +110,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
mutex_init(&urd->io_mutex);
init_waitqueue_head(&urd->wait);
spin_lock_init(&urd->open_lock);
- atomic_set(&urd->ref_count, 1);
+ refcount_set(&urd->ref_count, 1);
urd->cdev = cdev;
get_device(&cdev->dev);
return urd;
@@ -126,7 +126,7 @@ static void urdev_free(struct urdev *urd)
static void urdev_get(struct urdev *urd)
{
- atomic_inc(&urd->ref_count);
+ refcount_inc(&urd->ref_count);
}
static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
@@ -159,7 +159,7 @@ static struct urdev *urdev_get_from_devno(u16 devno)
static void urdev_put(struct urdev *urd)
{
- if (atomic_dec_and_test(&urd->ref_count))
+ if (refcount_dec_and_test(&urd->ref_count))
urdev_free(urd);
}
@@ -892,10 +892,9 @@ static int ur_set_online(struct ccw_device *cdev)
}
urd->char_device->ops = &ur_fops;
- urd->char_device->dev = MKDEV(major, minor);
urd->char_device->owner = ur_fops.owner;
- rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
+ rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
if (rc)
goto fail_free_cdev;
if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
@@ -946,7 +945,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
rc = -EBUSY;
goto fail_urdev_put;
}
- if (!force && (atomic_read(&urd->ref_count) > 2)) {
+ if (!force && (refcount_read(&urd->ref_count) > 2)) {
/* There is still a user of urd (e.g. ur_open) */
TRACE("ur_set_offline: BUSY\n");
rc = -EBUSY;
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index fa320ad4593d..608b0719ce17 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Linux driver for System z and s390 unit record devices
* (z/VM virtual punch, reader, printer)
@@ -11,6 +12,8 @@
#ifndef _VMUR_H_
#define _VMUR_H_
+#include <linux/refcount.h>
+
#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
/*
@@ -69,7 +72,7 @@ struct urdev {
size_t reclen; /* Record length for *write* CCWs */
int class; /* VM device class */
int io_request_rc; /* return code from I/O request */
- atomic_t ref_count; /* reference counter */
+ refcount_t ref_count; /* reference counter */
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index bdf47526038a..a070ef0efe65 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the S/390 common i/o drivers
#
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 99b5db469097..a45011e4529e 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Support for adapter interruptions
*
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index bf7f5d4c50e1..2a3f874a21d5 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* S/390 common I/O routines -- blacklisting of specific devices
*
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 34b9ad6b3143..e2f7b6e93efd 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -373,6 +373,12 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
rc = -EINVAL;
goto error;
}
+ /* Check if the devices are bound to the required ccw driver. */
+ if (gdev->count && gdrv && gdrv->ccw_driver &&
+ gdev->cdev[0]->drv != gdrv->ccw_driver) {
+ rc = -EINVAL;
+ goto error;
+ }
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
gdev->dev.groups = ccwgroup_attr_groups;
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 2782100b2c07..603268a33ea1 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Handling of internal CCW device requests.
*
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 0d8437b7ea72..7e80323cd261 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2007, 2010
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 321a3f765810..dda5953534b7 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef S390_CHSC_H
#define S390_CHSC_H
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 735052ecd3e5..8e7e19b9e92c 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -43,11 +43,7 @@ static DEFINE_MUTEX(on_close_mutex);
static void CHSC_LOG_HEX(int level, void *data, int length)
{
- while (length > 0) {
- debug_event(chsc_debug_log_id, level, data, length);
- length -= chsc_debug_log_id->buf_size;
- data += chsc_debug_log_id->buf_size;
- }
+ debug_event(chsc_debug_log_id, level, data, length);
}
MODULE_AUTHOR("IBM Corporation");
diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h
index 589ebfad6aad..ff5328b0bc8a 100644
--- a/drivers/s390/cio/chsc_sch.h
+++ b/drivers/s390/cio/chsc_sch.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CHSC_SCH_H
#define _CHSC_SCH_H
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 939596d81b73..94cd813bdcfe 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef S390_CIO_H
#define S390_CIO_H
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index e64e8278c42e..7bdbe73707c2 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CIO_DEBUG_H
#define CIO_DEBUG_H
@@ -22,13 +23,7 @@ extern debug_info_t *cio_debug_crw_id;
static inline void CIO_HEX_EVENT(int level, void *data, int length)
{
- if (unlikely(!cio_debug_trace_id))
- return;
- while (length > 0) {
- debug_event(cio_debug_trace_id, level, data, length);
- length -= cio_debug_trace_id->buf_size;
- data += cio_debug_trace_id->buf_size;
- }
+ debug_event(cio_debug_trace_id, level, data, length);
}
#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 220491d27ef4..7d59230e88bb 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -58,8 +58,9 @@
/* indices for READCMB */
enum cmb_index {
+ avg_utilization = -1,
/* basic and exended format: */
- cmb_ssch_rsch_count,
+ cmb_ssch_rsch_count = 0,
cmb_sample_count,
cmb_device_connect_time,
cmb_function_pending_time,
@@ -215,71 +216,52 @@ struct set_schib_struct {
unsigned long address;
wait_queue_head_t wait;
int ret;
- struct kref kref;
};
-static void cmf_set_schib_release(struct kref *kref)
-{
- struct set_schib_struct *set_data;
-
- set_data = container_of(kref, struct set_schib_struct, kref);
- kfree(set_data);
-}
-
#define CMF_PENDING 1
+#define SET_SCHIB_TIMEOUT (10 * HZ)
static int set_schib_wait(struct ccw_device *cdev, u32 mme,
- int mbfc, unsigned long address)
+ int mbfc, unsigned long address)
{
- struct set_schib_struct *set_data;
- int ret;
+ struct set_schib_struct set_data;
+ int ret = -ENODEV;
spin_lock_irq(cdev->ccwlock);
- if (!cdev->private->cmb) {
- ret = -ENODEV;
+ if (!cdev->private->cmb)
goto out;
- }
- set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
- if (!set_data) {
- ret = -ENOMEM;
- goto out;
- }
- init_waitqueue_head(&set_data->wait);
- kref_init(&set_data->kref);
- set_data->mme = mme;
- set_data->mbfc = mbfc;
- set_data->address = address;
ret = set_schib(cdev, mme, mbfc, address);
if (ret != -EBUSY)
- goto out_put;
+ goto out;
- if (cdev->private->state != DEV_STATE_ONLINE) {
- /* if the device is not online, don't even try again */
- ret = -EBUSY;
- goto out_put;
- }
+ /* if the device is not online, don't even try again */
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ goto out;
- cdev->private->state = DEV_STATE_CMFCHANGE;
- set_data->ret = CMF_PENDING;
- cdev->private->cmb_wait = set_data;
+ init_waitqueue_head(&set_data.wait);
+ set_data.mme = mme;
+ set_data.mbfc = mbfc;
+ set_data.address = address;
+ set_data.ret = CMF_PENDING;
+ cdev->private->state = DEV_STATE_CMFCHANGE;
+ cdev->private->cmb_wait = &set_data;
spin_unlock_irq(cdev->ccwlock);
- if (wait_event_interruptible(set_data->wait,
- set_data->ret != CMF_PENDING)) {
- spin_lock_irq(cdev->ccwlock);
- if (set_data->ret == CMF_PENDING) {
- set_data->ret = -ERESTARTSYS;
+
+ ret = wait_event_interruptible_timeout(set_data.wait,
+ set_data.ret != CMF_PENDING,
+ SET_SCHIB_TIMEOUT);
+ spin_lock_irq(cdev->ccwlock);
+ if (ret <= 0) {
+ if (set_data.ret == CMF_PENDING) {
+ set_data.ret = (ret == 0) ? -ETIME : ret;
if (cdev->private->state == DEV_STATE_CMFCHANGE)
cdev->private->state = DEV_STATE_ONLINE;
}
- spin_unlock_irq(cdev->ccwlock);
}
- spin_lock_irq(cdev->ccwlock);
cdev->private->cmb_wait = NULL;
- ret = set_data->ret;
-out_put:
- kref_put(&set_data->kref, cmf_set_schib_release);
+ ret = set_data.ret;
out:
spin_unlock_irq(cdev->ccwlock);
return ret;
@@ -287,28 +269,21 @@ out:
void retry_set_schib(struct ccw_device *cdev)
{
- struct set_schib_struct *set_data;
+ struct set_schib_struct *set_data = cdev->private->cmb_wait;
- set_data = cdev->private->cmb_wait;
- if (!set_data) {
- WARN_ON(1);
+ if (!set_data)
return;
- }
- kref_get(&set_data->kref);
+
set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
set_data->address);
wake_up(&set_data->wait);
- kref_put(&set_data->kref, cmf_set_schib_release);
}
static int cmf_copy_block(struct ccw_device *cdev)
{
- struct subchannel *sch;
- void *reference_buf;
- void *hw_block;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct cmb_data *cmb_data;
-
- sch = to_subchannel(cdev->dev.parent);
+ void *hw_block;
if (cio_update_schib(sch))
return -ENODEV;
@@ -323,102 +298,65 @@ static int cmf_copy_block(struct ccw_device *cdev)
}
cmb_data = cdev->private->cmb;
hw_block = cmb_data->hw_block;
- if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
- /* No need to copy. */
- return 0;
- reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
- if (!reference_buf)
- return -ENOMEM;
- /* Ensure consistency of block copied from hardware. */
- do {
- memcpy(cmb_data->last_block, hw_block, cmb_data->size);
- memcpy(reference_buf, hw_block, cmb_data->size);
- } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
+ memcpy(cmb_data->last_block, hw_block, cmb_data->size);
cmb_data->last_update = get_tod_clock();
- kfree(reference_buf);
return 0;
}
struct copy_block_struct {
wait_queue_head_t wait;
int ret;
- struct kref kref;
};
-static void cmf_copy_block_release(struct kref *kref)
-{
- struct copy_block_struct *copy_block;
-
- copy_block = container_of(kref, struct copy_block_struct, kref);
- kfree(copy_block);
-}
-
static int cmf_cmb_copy_wait(struct ccw_device *cdev)
{
- struct copy_block_struct *copy_block;
- int ret;
- unsigned long flags;
+ struct copy_block_struct copy_block;
+ int ret = -ENODEV;
- spin_lock_irqsave(cdev->ccwlock, flags);
- if (!cdev->private->cmb) {
- ret = -ENODEV;
- goto out;
- }
- copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
- if (!copy_block) {
- ret = -ENOMEM;
+ spin_lock_irq(cdev->ccwlock);
+ if (!cdev->private->cmb)
goto out;
- }
- init_waitqueue_head(&copy_block->wait);
- kref_init(&copy_block->kref);
ret = cmf_copy_block(cdev);
if (ret != -EBUSY)
- goto out_put;
+ goto out;
- if (cdev->private->state != DEV_STATE_ONLINE) {
- ret = -EBUSY;
- goto out_put;
- }
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ goto out;
+
+ init_waitqueue_head(&copy_block.wait);
+ copy_block.ret = CMF_PENDING;
cdev->private->state = DEV_STATE_CMFUPDATE;
- copy_block->ret = CMF_PENDING;
- cdev->private->cmb_wait = copy_block;
+ cdev->private->cmb_wait = &copy_block;
+ spin_unlock_irq(cdev->ccwlock);
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- if (wait_event_interruptible(copy_block->wait,
- copy_block->ret != CMF_PENDING)) {
- spin_lock_irqsave(cdev->ccwlock, flags);
- if (copy_block->ret == CMF_PENDING) {
- copy_block->ret = -ERESTARTSYS;
+ ret = wait_event_interruptible(copy_block.wait,
+ copy_block.ret != CMF_PENDING);
+ spin_lock_irq(cdev->ccwlock);
+ if (ret) {
+ if (copy_block.ret == CMF_PENDING) {
+ copy_block.ret = -ERESTARTSYS;
if (cdev->private->state == DEV_STATE_CMFUPDATE)
cdev->private->state = DEV_STATE_ONLINE;
}
- spin_unlock_irqrestore(cdev->ccwlock, flags);
}
- spin_lock_irqsave(cdev->ccwlock, flags);
cdev->private->cmb_wait = NULL;
- ret = copy_block->ret;
-out_put:
- kref_put(&copy_block->kref, cmf_copy_block_release);
+ ret = copy_block.ret;
out:
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ spin_unlock_irq(cdev->ccwlock);
return ret;
}
void cmf_retry_copy_block(struct ccw_device *cdev)
{
- struct copy_block_struct *copy_block;
+ struct copy_block_struct *copy_block = cdev->private->cmb_wait;
- copy_block = cdev->private->cmb_wait;
- if (!copy_block) {
- WARN_ON(1);
+ if (!copy_block)
return;
- }
- kref_get(&copy_block->kref);
+
copy_block->ret = cmf_copy_block(cdev);
wake_up(&copy_block->wait);
- kref_put(&copy_block->kref, cmf_copy_block_release);
}
static void cmf_generic_reset(struct ccw_device *cdev)
@@ -650,25 +588,44 @@ static int set_cmb(struct ccw_device *cdev, u32 mme)
return set_schib_wait(cdev, mme, 0, offset);
}
+/* calculate utilization in 0.1 percent units */
+static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
+ u64 device_disconnect_time, u64 start_time)
+{
+ u64 utilization, elapsed_time;
+
+ utilization = time_to_nsec(device_connect_time +
+ function_pending_time +
+ device_disconnect_time);
+
+ elapsed_time = get_tod_clock() - start_time;
+ elapsed_time = tod_to_ns(elapsed_time);
+ elapsed_time /= 1000;
+
+ return elapsed_time ? (utilization / elapsed_time) : 0;
+}
+
static u64 read_cmb(struct ccw_device *cdev, int index)
{
+ struct cmb_data *cmb_data;
+ unsigned long flags;
struct cmb *cmb;
+ u64 ret = 0;
u32 val;
- int ret;
- unsigned long flags;
-
- ret = cmf_cmb_copy_wait(cdev);
- if (ret < 0)
- return 0;
spin_lock_irqsave(cdev->ccwlock, flags);
- if (!cdev->private->cmb) {
- ret = 0;
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data)
goto out;
- }
- cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
+ cmb = cmb_data->hw_block;
switch (index) {
+ case avg_utilization:
+ ret = __cmb_utilization(cmb->device_connect_time,
+ cmb->function_pending_time,
+ cmb->device_disconnect_time,
+ cdev->private->cmb_start_time);
+ goto out;
case cmb_ssch_rsch_count:
ret = cmb->ssch_rsch_count;
goto out;
@@ -691,7 +648,6 @@ static u64 read_cmb(struct ccw_device *cdev, int index)
val = cmb->device_active_only_time;
break;
default:
- ret = 0;
goto out;
}
ret = time_to_avg_nsec(val, cmb->sample_count);
@@ -729,8 +685,7 @@ static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
/* we only know values before device_busy_time */
data->size = offsetof(struct cmbdata, device_busy_time);
- /* convert to nanoseconds */
- data->elapsed_time = (time * 1000) >> 12;
+ data->elapsed_time = tod_to_ns(time);
/* copy data to new structure */
data->ssch_rsch_count = cmb->ssch_rsch_count;
@@ -904,28 +859,27 @@ static int set_cmbe(struct ccw_device *cdev, u32 mme)
return set_schib_wait(cdev, mme, 1, mba);
}
-
static u64 read_cmbe(struct ccw_device *cdev, int index)
{
- struct cmbe *cmb;
struct cmb_data *cmb_data;
- u32 val;
- int ret;
unsigned long flags;
-
- ret = cmf_cmb_copy_wait(cdev);
- if (ret < 0)
- return 0;
+ struct cmbe *cmb;
+ u64 ret = 0;
+ u32 val;
spin_lock_irqsave(cdev->ccwlock, flags);
cmb_data = cdev->private->cmb;
- if (!cmb_data) {
- ret = 0;
+ if (!cmb_data)
goto out;
- }
- cmb = cmb_data->last_block;
+ cmb = cmb_data->hw_block;
switch (index) {
+ case avg_utilization:
+ ret = __cmb_utilization(cmb->device_connect_time,
+ cmb->function_pending_time,
+ cmb->device_disconnect_time,
+ cdev->private->cmb_start_time);
+ goto out;
case cmb_ssch_rsch_count:
ret = cmb->ssch_rsch_count;
goto out;
@@ -954,7 +908,6 @@ static u64 read_cmbe(struct ccw_device *cdev, int index)
val = cmb->initial_command_response_time;
break;
default:
- ret = 0;
goto out;
}
ret = time_to_avg_nsec(val, cmb->sample_count);
@@ -991,8 +944,7 @@ static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
/* we only know values before device_busy_time */
data->size = offsetof(struct cmbdata, device_busy_time);
- /* conver to nanoseconds */
- data->elapsed_time = (time * 1000) >> 12;
+ data->elapsed_time = tod_to_ns(time);
cmb = cmb_data->last_block;
/* copy data to new structure */
@@ -1045,19 +997,15 @@ static ssize_t cmb_show_avg_sample_interval(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct ccw_device *cdev;
- long interval;
+ struct ccw_device *cdev = to_ccwdev(dev);
unsigned long count;
- struct cmb_data *cmb_data;
+ long interval;
- cdev = to_ccwdev(dev);
count = cmf_read(cdev, cmb_sample_count);
spin_lock_irq(cdev->ccwlock);
- cmb_data = cdev->private->cmb;
if (count) {
- interval = cmb_data->last_update -
- cdev->private->cmb_start_time;
- interval = (interval * 1000) >> 12;
+ interval = get_tod_clock() - cdev->private->cmb_start_time;
+ interval = tod_to_ns(interval);
interval /= count;
} else
interval = -1;
@@ -1069,27 +1017,9 @@ static ssize_t cmb_show_avg_utilization(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct cmbdata data;
- u64 utilization;
- unsigned long t, u;
- int ret;
-
- ret = cmf_readall(to_ccwdev(dev), &data);
- if (ret == -EAGAIN || ret == -ENODEV)
- /* No data (yet/currently) available to use for calculation. */
- return sprintf(buf, "n/a\n");
- else if (ret)
- return ret;
-
- utilization = data.device_connect_time +
- data.function_pending_time +
- data.device_disconnect_time;
-
- /* calculate value in 0.1 percent units */
- t = data.elapsed_time / 1000;
- u = utilization / t;
+ unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
- return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
+ return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
}
#define cmf_attr(name) \
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index 3d3cd402b376..fc285ca41141 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Channel report handling code
*
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index c9f3fb39ebeb..30357cbf350a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CSS_H
#define _CSS_H
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e5c32f4b5287..318d8269f5de 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -142,7 +142,7 @@ static void io_subchannel_shutdown(struct subchannel *);
static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
-static void recovery_func(unsigned long data);
+static void recovery_func(struct timer_list *unused);
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
@@ -194,7 +194,7 @@ int __init io_subchannel_init(void)
{
int ret;
- setup_timer(&recovery_timer, recovery_func, 0);
+ timer_setup(&recovery_timer, recovery_func, 0);
ret = bus_register(&ccw_bus_type);
if (ret)
return ret;
@@ -726,7 +726,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
INIT_WORK(&priv->todo_work, ccw_device_todo);
INIT_LIST_HEAD(&priv->cmb_list);
init_waitqueue_head(&priv->wait_q);
- init_timer(&priv->timer);
+ timer_setup(&priv->timer, ccw_device_timeout, 0);
atomic_set(&priv->onoff, 0);
cdev->ccwlock = sch->lock;
@@ -1271,7 +1271,7 @@ static void recovery_work_func(struct work_struct *unused)
static DECLARE_WORK(recovery_work, recovery_work_func);
-static void recovery_func(unsigned long data)
+static void recovery_func(struct timer_list *unused)
{
/*
* We can't do our recovery in softirq context and it's not
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 69cb70f080a5..f5c427ec24b1 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef S390_DEVICE_H
#define S390_DEVICE_H
#include <asm/ccwdev.h>
#include <linux/atomic.h>
+#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/notifier.h>
#include <linux/kernel_stat.h>
@@ -133,6 +135,7 @@ int ccw_device_notify(struct ccw_device *, int);
void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
+void ccw_device_timeout(struct timer_list *t);
void ccw_device_set_timeout(struct ccw_device *, int);
void ccw_device_schedule_recovery(void);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index f98ea674c3d8..dd7d79d30edc 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -91,12 +91,12 @@ static void ccw_timeout_log(struct ccw_device *cdev)
/*
* Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
*/
-static void
-ccw_device_timeout(unsigned long data)
+void
+ccw_device_timeout(struct timer_list *t)
{
- struct ccw_device *cdev;
+ struct ccw_device_private *priv = from_timer(priv, t, timer);
+ struct ccw_device *cdev = priv->cdev;
- cdev = (struct ccw_device *) data;
spin_lock_irq(cdev->ccwlock);
if (timeout_log_enabled)
ccw_timeout_log(cdev);
@@ -118,8 +118,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires)
if (mod_timer(&cdev->private->timer, jiffies + expires))
return;
}
- cdev->private->timer.function = ccw_device_timeout;
- cdev->private->timer.data = (unsigned long) cdev;
cdev->private->timer.expires = jiffies + expires;
add_timer(&cdev->private->timer);
}
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index d4fa30541a33..f6df83a9dfbb 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CCW device SENSE ID I/O handling.
*
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index da246b67edfe..d30a3babf176 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CCW device PGID and path verification I/O handling.
*
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 9bc3512374c9..7d5c7892b2c4 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2002
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 0f11f3bcac82..ce16e4f45d44 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -43,13 +43,7 @@ static debug_info_t *eadm_debug;
static void EADM_LOG_HEX(int level, void *data, int length)
{
- if (!debug_level_enabled(eadm_debug, level))
- return;
- while (length > 0) {
- debug_event(eadm_debug, level, data, length);
- length -= eadm_debug->buf_size;
- data += eadm_debug->buf_size;
- }
+ debug_event(eadm_debug, level, data, length);
}
static void orb_init(union orb *orb)
@@ -100,9 +94,10 @@ static int eadm_subchannel_clear(struct subchannel *sch)
return 0;
}
-static void eadm_subchannel_timeout(unsigned long data)
+static void eadm_subchannel_timeout(struct timer_list *t)
{
- struct subchannel *sch = (struct subchannel *) data;
+ struct eadm_private *private = from_timer(private, t, timer);
+ struct subchannel *sch = private->sch;
spin_lock_irq(sch->lock);
EADM_LOG(1, "timeout");
@@ -124,8 +119,6 @@ static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
if (mod_timer(&private->timer, jiffies + expires))
return;
}
- private->timer.function = eadm_subchannel_timeout;
- private->timer.data = (unsigned long) sch;
private->timer.expires = jiffies + expires;
add_timer(&private->timer);
}
@@ -230,7 +223,7 @@ static int eadm_subchannel_probe(struct subchannel *sch)
return -ENOMEM;
INIT_LIST_HEAD(&private->head);
- init_timer(&private->timer);
+ timer_setup(&private->timer, eadm_subchannel_timeout, 0);
spin_lock_irq(sch->lock);
set_eadm_private(sch, private);
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
index 9664e4653f98..390ab5a6b72f 100644
--- a/drivers/s390/cio/eadm_sch.h
+++ b/drivers/s390/cio/eadm_sch.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef EADM_SCH_H
#define EADM_SCH_H
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index ca5e9bb9d458..99c900cc3e5b 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions for assembling fcx enabled I/O control blocks.
*
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index b3e06a7b9480..835de44dbbcc 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 89a787790888..a3ece8d8091a 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 9a1b56b2df3e..af571d8d6925 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef S390_IO_SCH_H
#define S390_IO_SCH_H
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 4182f60124da..4fa9ee1d09fa 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Channel subsystem I/O instructions.
*/
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index b31ee6bff1e4..35ad4ddd61e0 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef S390_CIO_IOASM_H
#define S390_CIO_IOASM_H
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
index 358ee16d10a2..deaf59f93326 100644
--- a/drivers/s390/cio/itcw.c
+++ b/drivers/s390/cio/itcw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions for incremental construction of fcx enabled I/O control blocks.
*
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
index 7a640530e7f5..a2d3778b2c95 100644
--- a/drivers/s390/cio/orb.h
+++ b/drivers/s390/cio/orb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Orb related data structures.
*
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 7e70f9298cc1..a6f7c2986b94 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
@@ -392,7 +393,7 @@ int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
void qdio_inbound_processing(unsigned long data);
void qdio_outbound_processing(unsigned long data);
-void qdio_outbound_timer(unsigned long data);
+void qdio_outbound_timer(struct timer_list *t);
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb);
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index b6fc147f83d8..68a82f3e2e92 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2008, 2009
*
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 1d595d17bf11..f85f5fa7cefc 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2008
*
@@ -33,11 +34,7 @@ extern debug_info_t *qdio_dbf_error;
static inline void DBF_HEX(void *addr, int len)
{
- while (len > 0) {
- debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
- len -= qdio_dbf_setup->buf_size;
- addr += qdio_dbf_setup->buf_size;
- }
+ debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
}
#define DBF_ERROR(text...) \
@@ -49,11 +46,7 @@ static inline void DBF_HEX(void *addr, int len)
static inline void DBF_ERROR_HEX(void *addr, int len)
{
- while (len > 0) {
- debug_event(qdio_dbf_error, DBF_ERR, addr, len);
- len -= qdio_dbf_error->buf_size;
- addr += qdio_dbf_error->buf_size;
- }
+ debug_event(qdio_dbf_error, DBF_ERR, addr, len);
}
#define DBF_DEV_EVENT(level, device, text...) \
@@ -68,11 +61,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
int len, int level)
{
- while (len > 0) {
- debug_event(dev->debug_area, level, addr, len);
- len -= dev->debug_area->buf_size;
- addr += dev->debug_area->buf_size;
- }
+ debug_event(dev->debug_area, level, addr, len);
}
int qdio_allocate_dbf(struct qdio_initialize *init_data,
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index a4ad39ba3873..ed4852fab44b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -894,9 +894,9 @@ void qdio_outbound_processing(unsigned long data)
__qdio_outbound_processing(q);
}
-void qdio_outbound_timer(unsigned long data)
+void qdio_outbound_timer(struct timer_list *t)
{
- struct qdio_q *q = (struct qdio_q *)data;
+ struct qdio_q *q = from_timer(q, t, u.out.timer);
qdio_tasklet_schedule(q);
}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 48b3866a9ded..9ae1380cbc31 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -252,8 +252,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
tasklet_init(&q->tasklet, qdio_outbound_processing,
(unsigned long) q);
- setup_timer(&q->u.out.timer, (void(*)(unsigned long))
- &qdio_outbound_timer, (unsigned long)q);
+ timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
}
}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index c61164f4528e..0787b587e4b8 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
@@ -56,10 +57,8 @@ static u32 *get_indicator(void)
int i;
for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
- if (!atomic_read(&q_indicators[i].count)) {
- atomic_set(&q_indicators[i].count, 1);
+ if (!atomic_cmpxchg(&q_indicators[i].count, 0, 1))
return &q_indicators[i].ind;
- }
/* use the shared indicator */
atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
@@ -68,13 +67,11 @@ static u32 *get_indicator(void)
static void put_indicator(u32 *addr)
{
- int i;
+ struct indicator_t *ind = container_of(addr, struct indicator_t, ind);
if (!addr)
return;
- i = ((unsigned long)addr - (unsigned long)q_indicators) /
- sizeof(struct indicator_t);
- atomic_dec(&q_indicators[i].count);
+ atomic_dec(&ind->count);
}
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
diff --git a/drivers/s390/cio/trace.c b/drivers/s390/cio/trace.c
index 8e706669ac8b..e331cd97e83b 100644
--- a/drivers/s390/cio/trace.c
+++ b/drivers/s390/cio/trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tracepoint definitions for s390_cio
*
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 5b807a09f21b..1f8d1c1e566d 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Tracepoint header for the s390 Common I/O layer (CIO)
*
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 5ccfdc80d0ec..d9a2fffd034b 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* channel program interfaces
*
@@ -105,7 +106,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
{
int ret = 0;
- if (!len || pa->pa_nr)
+ if (!len)
+ return 0;
+
+ if (pa->pa_nr)
return -EINVAL;
pa->pa_iova = iova;
@@ -329,6 +333,8 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
struct ccw1 *ccw = chain->ch_ccw + idx;
+ if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
+ return;
if (!ccw->count)
return;
@@ -501,6 +507,16 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ccw = chain->ch_ccw + idx;
+ if (!ccw->count) {
+ /*
+ * We just want the translation result of any direct ccw
+ * to be an IDA ccw, so let's add the IDA flag for it.
+ * Although the flag will be ignored by firmware.
+ */
+ ccw->flags |= CCW_FLAG_IDA;
+ return 0;
+ }
+
/*
* Pin data page(s) in memory.
* The number of pages actually is the count of the idaws which will be
@@ -541,6 +557,9 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
ccw = chain->ch_ccw + idx;
+ if (!ccw->count)
+ return 0;
+
/* Calculate size of idaws. */
ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
if (ret)
@@ -569,10 +588,6 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
for (i = 0; i < idaw_nr; i++) {
idaw_iova = *(idaws + i);
- if (IS_ERR_VALUE(idaw_iova)) {
- ret = -EFAULT;
- goto out_free_idaws;
- }
ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
idaw_iova, 1);
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index 7a1996b3b36d..a4b74fb1aa57 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* channel program interfaces
*
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 80a0559cd7ce..c30420c517b1 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Finite state machine for vfio-ccw device handling
*
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index a66a317f3e4f..41eeb57d68a3 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Physical device callbacks for vfio_ccw
*
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index fc0f01c16ef9..78a66d96756b 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Private stuff for vfio_ccw driver
*
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index be36f1010d75..b59af548ed1c 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# S/390 crypto devices
#
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
index cd350345b3d2..16b59ce5e01d 100644
--- a/drivers/s390/crypto/ap_asm.h
+++ b/drivers/s390/crypto/ap_asm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -116,6 +117,49 @@ static inline int ap_qci(void *config)
return reg1;
}
+/*
+ * union ap_qact_ap_info - used together with the
+ * ap_aqic() function to provide a convenient way
+ * to handle the ap info needed by the qact function.
+ */
+union ap_qact_ap_info {
+ unsigned long val;
+ struct {
+ unsigned int : 3;
+ unsigned int mode : 3;
+ unsigned int : 26;
+ unsigned int cat : 8;
+ unsigned int : 8;
+ unsigned char ver[2];
+ };
+};
+
+/**
+ * ap_qact(): Query AP combatibility type.
+ * @qid: The AP queue number
+ * @apinfo: On input the info about the AP queue. On output the
+ * alternate AP queue info provided by the qact function
+ * in GR2 is stored in.
+ *
+ * Returns AP queue status. Check response_code field for failures.
+ */
+static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
+ union ap_qact_ap_info *apinfo)
+{
+ register unsigned long reg0 asm ("0") = qid | (5UL << 24)
+ | ((ifbit & 0x01) << 22);
+ register unsigned long reg1_in asm ("1") = apinfo->val;
+ register struct ap_queue_status reg1_out asm ("1");
+ register unsigned long reg2 asm ("2") = 0;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(QACT) */
+ : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
+ : : "cc");
+ apinfo->val = reg2;
+ return reg1_out;
+}
+
/**
* ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5f0be2040272..faeba9db3d95 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -176,6 +176,18 @@ static int ap_apft_available(void)
return test_facility(15);
}
+/*
+ * ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
+ *
+ * Returns 1 if the QACT subfunction is available.
+ */
+static inline int ap_qact_available(void)
+{
+ if (ap_configuration)
+ return ap_configuration->qact;
+ return 0;
+}
+
/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -362,13 +374,13 @@ void ap_wait(enum ap_wait wait)
/**
* ap_request_timeout(): Handling of request timeouts
- * @data: Holds the AP device.
+ * @t: timer making this callback
*
* Handles request timeouts.
*/
-void ap_request_timeout(unsigned long data)
+void ap_request_timeout(struct timer_list *t)
{
- struct ap_queue *aq = (struct ap_queue *) data;
+ struct ap_queue *aq = from_timer(aq, t, timeout);
if (ap_suspend_flag)
return;
@@ -988,6 +1000,47 @@ static int ap_select_domain(void)
}
/*
+ * This function checks the type and returns either 0 for not
+ * supported or the highest compatible type value (which may
+ * include the input type value).
+ */
+static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
+{
+ int comp_type = 0;
+
+ /* < CEX2A is not supported */
+ if (rawtype < AP_DEVICE_TYPE_CEX2A)
+ return 0;
+ /* up to CEX6 known and fully supported */
+ if (rawtype <= AP_DEVICE_TYPE_CEX6)
+ return rawtype;
+ /*
+ * unknown new type > CEX6, check for compatibility
+ * to the highest known and supported type which is
+ * currently CEX6 with the help of the QACT function.
+ */
+ if (ap_qact_available()) {
+ struct ap_queue_status status;
+ union ap_qact_ap_info apinfo = {0};
+
+ apinfo.mode = (func >> 26) & 0x07;
+ apinfo.cat = AP_DEVICE_TYPE_CEX6;
+ status = ap_qact(qid, 0, &apinfo);
+ if (status.response_code == AP_RESPONSE_NORMAL
+ && apinfo.cat >= AP_DEVICE_TYPE_CEX2A
+ && apinfo.cat <= AP_DEVICE_TYPE_CEX6)
+ comp_type = apinfo.cat;
+ }
+ if (!comp_type)
+ AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+ else if (comp_type != rawtype)
+ AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type);
+ return comp_type;
+}
+
+/*
* helper function to be used with bus_find_dev
* matches for the card device with the given id
*/
@@ -1014,8 +1067,8 @@ static void ap_scan_bus(struct work_struct *unused)
struct ap_card *ac;
struct device *dev;
ap_qid_t qid;
- int depth = 0, type = 0;
- unsigned int functions = 0;
+ int comp_type, depth = 0, type = 0;
+ unsigned int func = 0;
int rc, id, dom, borked, domains, defdomdevs = 0;
AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
@@ -1066,12 +1119,12 @@ static void ap_scan_bus(struct work_struct *unused)
}
continue;
}
- rc = ap_query_queue(qid, &depth, &type, &functions);
+ rc = ap_query_queue(qid, &depth, &type, &func);
if (dev) {
spin_lock_bh(&aq->lock);
if (rc == -ENODEV ||
/* adapter reconfiguration */
- (ac && ac->functions != functions))
+ (ac && ac->functions != func))
aq->state = AP_STATE_BORKED;
borked = aq->state == AP_STATE_BORKED;
spin_unlock_bh(&aq->lock);
@@ -1087,11 +1140,14 @@ static void ap_scan_bus(struct work_struct *unused)
}
if (rc)
continue;
- /* new queue device needed */
+ /* a new queue device is needed, check out comp type */
+ comp_type = ap_get_compatible_type(qid, type, func);
+ if (!comp_type)
+ continue;
+ /* maybe a card device needs to be created first */
if (!ac) {
- /* but first create the card device */
- ac = ap_card_create(id, depth,
- type, functions);
+ ac = ap_card_create(id, depth, type,
+ comp_type, func);
if (!ac)
continue;
ac->ap_dev.device.bus = &ap_bus_type;
@@ -1109,7 +1165,7 @@ static void ap_scan_bus(struct work_struct *unused)
get_device(&ac->ap_dev.device);
}
/* now create the new queue device */
- aq = ap_queue_create(qid, type);
+ aq = ap_queue_create(qid, comp_type);
if (!aq)
continue;
aq->card = ac;
@@ -1147,7 +1203,7 @@ out:
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
}
-static void ap_config_timeout(unsigned long ptr)
+static void ap_config_timeout(struct timer_list *unused)
{
if (ap_suspend_flag)
return;
@@ -1250,7 +1306,7 @@ int __init ap_module_init(void)
goto out_bus;
/* Setup the AP bus rescan timer. */
- setup_timer(&ap_config_timer, ap_config_timeout, 0);
+ timer_setup(&ap_config_timer, ap_config_timeout, 0);
/*
* Setup the high resultion poll timer.
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 754cf2223cfb..7e45c4d08cad 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -241,7 +241,7 @@ void ap_flush_queue(struct ap_queue *aq);
void *ap_airq_ptr(void);
void ap_wait(enum ap_wait wait);
-void ap_request_timeout(unsigned long data);
+void ap_request_timeout(struct timer_list *t);
void ap_bus_force_rescan(void);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
@@ -250,8 +250,8 @@ void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
-struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
- unsigned int device_functions);
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
+ int comp_device_type, unsigned int functions);
int ap_module_init(void);
void ap_module_exit(void);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 836efac96813..97a8cf578116 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -170,22 +171,20 @@ static void ap_card_device_release(struct device *dev)
kfree(ac);
}
-struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
- unsigned int functions)
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
+ int comp_type, unsigned int functions)
{
struct ap_card *ac;
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return NULL;
+ INIT_LIST_HEAD(&ac->list);
INIT_LIST_HEAD(&ac->queues);
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
- ac->ap_dev.device_type = device_type;
- /* CEX6 toleration: map to CEX5 */
- if (device_type == AP_DEVICE_TYPE_CEX6)
- ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
- ac->raw_hwtype = device_type;
+ ac->ap_dev.device_type = comp_type;
+ ac->raw_hwtype = raw_type;
ac->queue_depth = queue_depth;
ac->functions = functions;
ac->id = id;
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index 78dbff842dae..6a9d77c75ec3 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2016
* Author(s): Harald Freudenberger <freude@de.ibm.com>
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 56b96edffd5b..ba3a2e13b0eb 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -626,16 +627,14 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
- /* CEX6 toleration: map to CEX5 */
- if (device_type == AP_DEVICE_TYPE_CEX6)
- aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
aq->qid = qid;
aq->state = AP_STATE_RESET_START;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
+ INIT_LIST_HEAD(&aq->list);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
- setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
+ timer_setup(&aq->timeout, ap_request_timeout, 0);
return aq;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index f61fa47135a6..8dda5bb34a2f 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -125,10 +125,9 @@ static int alloc_and_prep_cprbmem(size_t paramblen,
* allocate consecutive memory for request CPRB, request param
* block, reply CPRB and reply param block
*/
- cprbmem = kmalloc(2 * cprbplusparamblen, GFP_KERNEL);
+ cprbmem = kzalloc(2 * cprbplusparamblen, GFP_KERNEL);
if (!cprbmem)
return -ENOMEM;
- memset(cprbmem, 0, 2 * cprbplusparamblen);
preqcblk = (struct CPRBX *) cprbmem;
prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 6c94efd23eac..73541a798db7 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -76,6 +76,7 @@ struct ica_z90_status {
#define ZCRYPT_CEX3A 8
#define ZCRYPT_CEX4 10
#define ZCRYPT_CEX5 11
+#define ZCRYPT_CEX6 12
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 4e91163d70a6..e2eebc775a37 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -45,6 +45,8 @@ static struct ap_device_id zcrypt_cex4_card_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ /* end of list */ },
};
@@ -55,6 +57,8 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of list */ },
};
@@ -72,17 +76,25 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
static const int CEX4A_SPEED_IDX[] = {
- 5, 6, 59, 20, 115, 581, 0, 0};
+ 14, 19, 249, 42, 228, 1458, 0, 0};
static const int CEX5A_SPEED_IDX[] = {
- 3, 3, 6, 8, 32, 218, 0, 0};
+ 8, 9, 20, 18, 66, 458, 0, 0};
+ static const int CEX6A_SPEED_IDX[] = {
+ 6, 9, 20, 17, 65, 438, 0, 0};
+
static const int CEX4C_SPEED_IDX[] = {
- 24, 25, 82, 41, 138, 1111, 79, 8};
+ 59, 69, 308, 83, 278, 2204, 209, 40};
static const int CEX5C_SPEED_IDX[] = {
- 10, 14, 23, 17, 45, 242, 63, 4};
+ 24, 31, 50, 37, 90, 479, 27, 10};
+ static const int CEX6C_SPEED_IDX[] = {
+ 16, 20, 32, 27, 77, 455, 23, 9};
+
static const int CEX4P_SPEED_IDX[] = {
- 142, 198, 1852, 203, 331, 1563, 0, 8};
+ 224, 313, 3560, 359, 605, 2827, 0, 50};
static const int CEX5P_SPEED_IDX[] = {
- 49, 67, 131, 52, 85, 287, 0, 4};
+ 63, 84, 156, 83, 142, 533, 0, 10};
+ static const int CEX6P_SPEED_IDX[] = {
+ 55, 70, 121, 73, 129, 522, 0, 9};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
@@ -99,11 +111,16 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX4;
memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
sizeof(CEX4A_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5A";
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
sizeof(CEX5A_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX6A";
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
+ sizeof(CEX6A_SPEED_IDX));
}
zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
@@ -125,7 +142,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
sizeof(CEX4C_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5C";
/* wrong user space type, must be CEX5
* just keep it for cca compatibility
@@ -133,6 +150,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
sizeof(CEX5C_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX6C";
+ /* wrong user space type, must be CEX6
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
+ sizeof(CEX6C_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -143,11 +168,16 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX4;
memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
sizeof(CEX4P_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5P";
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
sizeof(CEX5P_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX6P";
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
+ sizeof(CEX6P_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
diff --git a/drivers/s390/crypto/zcrypt_cex4.h b/drivers/s390/crypto/zcrypt_cex4.h
index 719571375ccc..748390a3799b 100644
--- a/drivers/s390/crypto/zcrypt_cex4.h
+++ b/drivers/s390/crypto/zcrypt_cex4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2012
* Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 13e38defb6b8..241dbb5f75bf 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2016
* Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 6dd5d7c58dd0..db5bde47dfb0 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -240,8 +240,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
inp = meb2->message + sizeof(meb2->message) - mod_len;
- } else {
- /* mod_len > 256 = 4096 bit RSA Key */
+ } else if (mod_len <= 512) {
struct type50_meb3_msg *meb3 = ap_msg->message;
memset(meb3, 0, sizeof(*meb3));
ap_msg->length = sizeof(*meb3);
@@ -251,7 +250,8 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
inp = meb3->message + sizeof(meb3->message) - mod_len;
- }
+ } else
+ return -EINVAL;
if (copy_from_user(mod, mex->n_modulus, mod_len) ||
copy_from_user(exp, mex->b_key, mod_len) ||
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index afd20cee7ea0..785620d30504 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -474,7 +474,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
*fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
*dom = (unsigned short *)&msg->cprbx.domain;
- if (memcmp(function_code, "US", 2) == 0)
+ if (memcmp(function_code, "US", 2) == 0
+ || memcmp(function_code, "AU", 2) == 0)
ap_msg->special = 1;
else
ap_msg->special = 0;
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index c351b07603e0..513b7ae64980 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# S/390 network devices
#
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
index 8363f1c966ef..f7ec51db3cd6 100644
--- a/drivers/s390/net/ctcm_dbug.c
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
index 47bf0501995e..675575ef162e 100644
--- a/drivers/s390/net/ctcm_dbug.h
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 570ae3b7adf6..1b4ee570b712 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index c963d04799c0..225737295cb4 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 26363e0816fe..be9f17218531 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1761,6 +1761,7 @@ static struct ccwgroup_driver ctcm_group_driver = {
.owner = THIS_MODULE,
.name = CTC_DRIVER_NAME,
},
+ .ccw_driver = &ctcm_ccw_driver,
.setup = ctcm_probe_device,
.remove = ctcm_remove_device,
.set_online = ctcm_new_device,
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index 6f4417c80247..16bdf23ee02b 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index f8be39634f03..e02f295d38a9 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2004, 2007
* Authors: Belinda Thompson (belindat@us.ibm.com)
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
index bd1b1cc54ffa..441d7b211f0f 100644
--- a/drivers/s390/net/ctcm_mpc.h
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index ddb0aa321339..ded1930a00b2 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index e5dea67f902e..c81adf8042d7 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -129,8 +129,9 @@ fsm_getstate_str(fsm_instance *fi)
}
static void
-fsm_expire_timer(fsm_timer *this)
+fsm_expire_timer(struct timer_list *t)
{
+ fsm_timer *this = from_timer(this, t, tl);
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
this->fi->name, this);
@@ -142,13 +143,11 @@ void
fsm_settimer(fsm_instance *fi, fsm_timer *this)
{
this->fi = fi;
- this->tl.function = (void *)fsm_expire_timer;
- this->tl.data = (long)this;
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
this);
#endif
- init_timer(&this->tl);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
}
void
@@ -170,9 +169,7 @@ fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
this->fi->name, this, millisec);
#endif
- init_timer(&this->tl);
- this->tl.function = (void *)fsm_expire_timer;
- this->tl.data = (long)this;
+ timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
@@ -191,9 +188,7 @@ fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
#endif
del_timer(&this->tl);
- init_timer(&this->tl);
- this->tl.function = (void *)fsm_expire_timer;
- this->tl.data = (long)this;
+ timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index a4510cf59034..16dc071a2973 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FSM_H_
#define _FSM_H_
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index d01b5c2a7760..e131a03262ad 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -834,13 +834,13 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
* Emit buffer of a lan command.
*/
static void
-lcs_lancmd_timeout(unsigned long data)
+lcs_lancmd_timeout(struct timer_list *t)
{
- struct lcs_reply *reply, *list_reply, *r;
+ struct lcs_reply *reply = from_timer(reply, t, timer);
+ struct lcs_reply *list_reply, *r;
unsigned long flags;
LCS_DBF_TEXT(4, trace, "timeout");
- reply = (struct lcs_reply *) data;
spin_lock_irqsave(&reply->card->lock, flags);
list_for_each_entry_safe(list_reply, r,
&reply->card->lancmd_waiters,list) {
@@ -864,7 +864,6 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
{
struct lcs_reply *reply;
struct lcs_cmd *cmd;
- struct timer_list timer;
unsigned long flags;
int rc;
@@ -885,14 +884,10 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
rc = lcs_ready_buffer(&card->write, buffer);
if (rc)
return rc;
- init_timer_on_stack(&timer);
- timer.function = lcs_lancmd_timeout;
- timer.data = (unsigned long) reply;
- timer.expires = jiffies + HZ*card->lancmd_timeout;
- add_timer(&timer);
+ timer_setup(&reply->timer, lcs_lancmd_timeout, 0);
+ mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout);
wait_event(reply->wait_q, reply->received);
- del_timer_sync(&timer);
- destroy_timer_on_stack(&timer);
+ del_timer_sync(&reply->timer);
LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
rc = reply->rc;
lcs_put_reply(reply);
@@ -2396,6 +2391,7 @@ static struct ccwgroup_driver lcs_group_driver = {
.owner = THIS_MODULE,
.name = "lcs",
},
+ .ccw_driver = &lcs_ccw_driver,
.setup = lcs_probe_device,
.remove = lcs_remove_device,
.set_online = lcs_new_device,
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 150fcb4cebc3..fbc8b90b1f85 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*lcs.h*/
#include <linux/interrupt.h>
@@ -275,6 +276,7 @@ struct lcs_reply {
void (*callback)(struct lcs_card *, struct lcs_cmd *);
wait_queue_head_t wait_q;
struct lcs_card *card;
+ struct timer_list timer;
int received;
int rc;
};
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 59e09854c4f7..9cd569ef43ec 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -182,6 +183,21 @@ struct qeth_sbp_info {
__u32 reflect_promisc_primary:1;
};
+struct qeth_vnicc_info {
+ /* supported/currently configured VNICCs; updated in IPA exchanges */
+ u32 sup_chars;
+ u32 cur_chars;
+ /* supported commands: bitmasks which VNICCs support respective cmd */
+ u32 set_char_sup;
+ u32 getset_timeout_sup;
+ /* timeout value for the learning characteristic */
+ u32 learning_timeout;
+ /* characteristics wanted/configured by user */
+ u32 wanted_chars;
+ /* has user explicitly enabled rx_bcast while online? */
+ bool rx_bcast_enabled;
+};
+
static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
@@ -216,20 +232,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
#define QETH_IDX_FUNC_LEVEL_IQD 0x4108
-#define QETH_MODELLIST_ARRAY \
- {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
- {0x1731, 0x05, 0x1732, QETH_CARD_TYPE_IQD, QETH_MAX_QUEUES, 0x103}, \
- {0x1731, 0x06, 0x1732, QETH_CARD_TYPE_OSN, QETH_MAX_QUEUES, 0}, \
- {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSM, QETH_MAX_QUEUES, 0}, \
- {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSX, QETH_MAX_QUEUES, 0}, \
- {0, 0, 0, 0, 0, 0} }
-#define QETH_CU_TYPE_IND 0
-#define QETH_CU_MODEL_IND 1
-#define QETH_DEV_TYPE_IND 2
-#define QETH_DEV_MODEL_IND 3
-#define QETH_QUEUE_NO_IND 4
-#define QETH_MULTICAST_IND 5
-
#define QETH_REAL_CARD 1
#define QETH_VLAN_CARD 2
#define QETH_BUFSIZE 4096
@@ -673,6 +675,7 @@ struct qeth_card_options {
struct qeth_routing_info route6;
struct qeth_ipa_info ipa6;
struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
+ struct qeth_vnicc_info vnicc; /* VNICC options */
int fake_broadcast;
int layer2;
int performance_stats;
@@ -946,13 +949,13 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
int extra_elems, int data_offset);
int qeth_get_elements_for_frags(struct sk_buff *);
-int qeth_do_send_packet_fast(struct qeth_card *card,
- struct qeth_qdio_out_q *queue, struct sk_buff *skb,
+int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int hd_len, unsigned int offset, int elements);
+ unsigned int offset, unsigned int hd_len,
+ int elements_needed);
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int qeth_core_get_sset_count(struct net_device *, int);
void qeth_core_get_ethtool_stats(struct net_device *,
@@ -982,7 +985,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
__u16, __u16,
enum qeth_prot_versions);
int qeth_set_features(struct net_device *, netdev_features_t);
-int qeth_recover_features(struct net_device *);
+void qeth_recover_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index bae7440abc01..49b9efeba1bd 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
-static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
static struct lock_class_key qdio_out_skb_queue_key;
static struct mutex qeth_mod_mutex;
@@ -1386,6 +1385,7 @@ static void qeth_init_qdio_info(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 4, "intqdinf");
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
/* inbound */
+ card->qdio.no_in_queues = 1;
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
if (card->info.type == QETH_CARD_TYPE_IQD)
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
@@ -1519,34 +1519,17 @@ out:
return NULL;
}
-static int qeth_determine_card_type(struct qeth_card *card)
+static void qeth_determine_card_type(struct qeth_card *card)
{
- int i = 0;
-
QETH_DBF_TEXT(SETUP, 2, "detcdtyp");
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- while (known_devices[i][QETH_DEV_MODEL_IND]) {
- if ((CARD_RDEV(card)->id.dev_type ==
- known_devices[i][QETH_DEV_TYPE_IND]) &&
- (CARD_RDEV(card)->id.dev_model ==
- known_devices[i][QETH_DEV_MODEL_IND])) {
- card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
- card->qdio.no_out_queues =
- known_devices[i][QETH_QUEUE_NO_IND];
- card->qdio.no_in_queues = 1;
- card->info.is_multicast_different =
- known_devices[i][QETH_MULTICAST_IND];
- qeth_update_from_chp_desc(card);
- return 0;
- }
- i++;
- }
- card->info.type = QETH_CARD_TYPE_UNKNOWN;
- dev_err(&card->gdev->dev, "The adapter hardware is of an "
- "unknown type\n");
- return -ENOENT;
+ card->info.type = CARD_RDEV(card)->id.driver_info;
+ card->qdio.no_out_queues = QETH_MAX_QUEUES;
+ if (card->info.type == QETH_CARD_TYPE_IQD)
+ card->info.is_multicast_different = 0x0103;
+ qeth_update_from_chp_desc(card);
}
static int qeth_clear_channel(struct qeth_channel *channel)
@@ -2090,7 +2073,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
spin_lock_irqsave(&card->lock, flags);
list_add_tail(&reply->list, &card->cmd_waiter_list);
spin_unlock_irqrestore(&card->lock, flags);
- QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
qeth_prepare_control_data(card, len, iob);
@@ -2233,23 +2215,15 @@ static int qeth_cm_setup(struct qeth_card *card)
static int qeth_get_initial_mtu_for_card(struct qeth_card *card)
{
switch (card->info.type) {
- case QETH_CARD_TYPE_UNKNOWN:
- return 1500;
case QETH_CARD_TYPE_IQD:
return card->info.max_mtu;
case QETH_CARD_TYPE_OSD:
- switch (card->info.link_type) {
- case QETH_LINK_TYPE_HSTR:
- case QETH_LINK_TYPE_LANE_TR:
- return 2000;
- default:
- return card->options.layer2 ? 1500 : 1492;
- }
- case QETH_CARD_TYPE_OSM:
case QETH_CARD_TYPE_OSX:
- return card->options.layer2 ? 1500 : 1492;
+ if (!card->options.layer2)
+ return ETH_DATA_LEN - 8; /* L3: allow for LLC + SNAP */
+ /* fall through */
default:
- return 1500;
+ return ETH_DATA_LEN;
}
}
@@ -2279,7 +2253,6 @@ static int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
return ((mtu >= 576) &&
(mtu <= card->info.max_mtu));
case QETH_CARD_TYPE_OSN:
- case QETH_CARD_TYPE_UNKNOWN:
default:
return 1;
}
@@ -4040,35 +4013,23 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
return flush_cnt;
}
-int qeth_do_send_packet_fast(struct qeth_card *card,
- struct qeth_qdio_out_q *queue, struct sk_buff *skb,
+int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, unsigned int offset,
unsigned int hd_len)
{
- struct qeth_qdio_out_buffer *buffer;
- int index;
+ int index = queue->next_buf_to_fill;
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
- /* spin until we get the queue ... */
- while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
- /* ... now we've got the queue */
- index = queue->next_buf_to_fill;
- buffer = queue->bufs[queue->next_buf_to_fill];
/*
* check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
- goto out;
- queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ return -EBUSY;
+ queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
qeth_flush_buffers(queue, index, 1);
return 0;
-out:
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
- return -EBUSY;
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
@@ -4923,7 +4884,6 @@ static void qeth_qdio_establish_cq(struct qeth_card *card,
if (card->options.cq == QETH_CQ_ENABLED) {
int offset = QDIO_MAX_BUFFERS_PER_Q *
(card->qdio.no_in_queues - 1);
- i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
virt_to_phys(card->qdio.c_q->bufs[i].buffer);
@@ -5209,49 +5169,27 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
-static int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
- struct qdio_buffer_element *element,
- struct sk_buff **pskb, int offset, int *pfrag,
- int data_len)
+static void qeth_create_skb_frag(struct qdio_buffer_element *element,
+ struct sk_buff *skb, int offset, int data_len)
{
struct page *page = virt_to_page(element->addr);
- if (*pskb == NULL) {
- if (qethbuffer->rx_skb) {
- /* only if qeth_card.options.cq == QETH_CQ_ENABLED */
- *pskb = qethbuffer->rx_skb;
- qethbuffer->rx_skb = NULL;
- } else {
- *pskb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
- if (!(*pskb))
- return -ENOMEM;
- }
+ unsigned int next_frag;
- skb_reserve(*pskb, ETH_HLEN);
- if (data_len <= QETH_RX_PULL_LEN) {
- skb_put_data(*pskb, element->addr + offset, data_len);
- } else {
- get_page(page);
- skb_put_data(*pskb, element->addr + offset,
- QETH_RX_PULL_LEN);
- skb_fill_page_desc(*pskb, *pfrag, page,
- offset + QETH_RX_PULL_LEN,
- data_len - QETH_RX_PULL_LEN);
- (*pskb)->data_len += data_len - QETH_RX_PULL_LEN;
- (*pskb)->len += data_len - QETH_RX_PULL_LEN;
- (*pskb)->truesize += data_len - QETH_RX_PULL_LEN;
- (*pfrag)++;
- }
- } else {
- get_page(page);
- skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
- (*pskb)->data_len += data_len;
- (*pskb)->len += data_len;
- (*pskb)->truesize += data_len;
- (*pfrag)++;
- }
+ /* first fill the linear space */
+ if (!skb->len) {
+ unsigned int linear = min(data_len, skb_tailroom(skb));
+ skb_put_data(skb, element->addr + offset, linear);
+ data_len -= linear;
+ if (!data_len)
+ return;
+ offset += linear;
+ /* fall through to add page frag for remaining data */
+ }
- return 0;
+ next_frag = skb_shinfo(skb)->nr_frags;
+ get_page(page);
+ skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
}
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
@@ -5267,22 +5205,19 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
struct qdio_buffer_element *element = *__element;
struct qdio_buffer *buffer = qethbuffer->buffer;
int offset = *__offset;
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
int skb_len = 0;
void *data_ptr;
int data_len;
int headroom = 0;
int use_rx_sg = 0;
- int frag = 0;
/* qeth_hdr must not cross element boundaries */
- if (element->length < offset + sizeof(struct qeth_hdr)) {
+ while (element->length < offset + sizeof(struct qeth_hdr)) {
if (qeth_is_last_sbale(element))
return NULL;
element++;
offset = 0;
- if (element->length < sizeof(struct qeth_hdr))
- return NULL;
}
*hdr = element->addr + offset;
@@ -5309,27 +5244,32 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
if (((skb_len >= card->options.rx_sg_cb) &&
(!(card->info.type == QETH_CARD_TYPE_OSN)) &&
(!atomic_read(&card->force_alloc_skb))) ||
- (card->options.cq == QETH_CQ_ENABLED)) {
+ (card->options.cq == QETH_CQ_ENABLED))
use_rx_sg = 1;
+
+ if (use_rx_sg && qethbuffer->rx_skb) {
+ /* QETH_CQ_ENABLED only: */
+ skb = qethbuffer->rx_skb;
+ qethbuffer->rx_skb = NULL;
} else {
- skb = dev_alloc_skb(skb_len + headroom);
- if (!skb)
- goto no_mem;
- if (headroom)
- skb_reserve(skb, headroom);
+ unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
+
+ skb = dev_alloc_skb(linear + headroom);
}
+ if (!skb)
+ goto no_mem;
+ if (headroom)
+ skb_reserve(skb, headroom);
data_ptr = element->addr + offset;
while (skb_len) {
data_len = min(skb_len, (int)(element->length - offset));
if (data_len) {
- if (use_rx_sg) {
- if (qeth_create_skb_frag(qethbuffer, element,
- &skb, offset, &frag, data_len))
- goto no_mem;
- } else {
+ if (use_rx_sg)
+ qeth_create_skb_frag(element, skb, offset,
+ data_len);
+ else
skb_put_data(skb, data_ptr, data_len);
- }
}
skb_len -= data_len;
if (skb_len) {
@@ -5429,7 +5369,7 @@ int qeth_poll(struct napi_struct *napi, int budget)
}
}
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (qdio_start_irq(card->data.ccwdev, 0))
napi_schedule(&card->napi);
out:
@@ -5737,11 +5677,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
gdev->cdev[1]->handler = qeth_irq;
gdev->cdev[2]->handler = qeth_irq;
- rc = qeth_determine_card_type(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- goto err_card;
- }
+ qeth_determine_card_type(card);
rc = qeth_setup_card(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
@@ -5875,6 +5811,7 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.owner = THIS_MODULE,
.name = "qeth",
},
+ .ccw_driver = &qeth_ccw_driver,
.setup = qeth_core_probe_device,
.remove = qeth_core_remove_device,
.set_online = qeth_core_set_online,
@@ -6416,32 +6353,29 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
return rc;
}
-/* try to restore device features on a device after recovery */
-int qeth_recover_features(struct net_device *dev)
+#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO)
+
+/**
+ * qeth_recover_features() - Restore device features after recovery
+ * @dev: the recovering net_device
+ *
+ * Caller must hold rtnl lock.
+ */
+void qeth_recover_features(struct net_device *dev)
{
+ netdev_features_t features = dev->features;
struct qeth_card *card = dev->ml_priv;
- netdev_features_t recover = dev->features;
- if (recover & NETIF_F_IP_CSUM) {
- if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM))
- recover ^= NETIF_F_IP_CSUM;
- }
- if (recover & NETIF_F_RXCSUM) {
- if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM))
- recover ^= NETIF_F_RXCSUM;
- }
- if (recover & NETIF_F_TSO) {
- if (qeth_set_ipa_tso(card, 1))
- recover ^= NETIF_F_TSO;
- }
-
- if (recover == dev->features)
- return 0;
+ /* force-off any feature that needs an IPA sequence.
+ * netdev_update_features() will restart them.
+ */
+ dev->features &= ~QETH_HW_FEATURES;
+ netdev_update_features(dev);
+ if (features == dev->features)
+ return;
dev_warn(&card->gdev->dev,
"Device recovery failed to restore all offload features\n");
- dev->features = recover;
- return -EIO;
}
EXPORT_SYMBOL_GPL(qeth_recover_features);
@@ -6498,8 +6432,7 @@ netdev_features_t qeth_fix_features(struct net_device *dev,
/* if the card isn't up, remove features that require hw changes */
if (card->state == CARD_STATE_DOWN ||
card->state == CARD_STATE_RECOVER)
- features = features & ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO);
+ features &= ~QETH_HW_FEATURES;
QETH_DBF_HEX(SETUP, 2, &features, sizeof(features));
return features;
}
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 6dd7d05e5693..22428b769f9b 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
@@ -167,7 +168,7 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
{IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
{IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
- {IPA_RC_TRACE_ALREADY_ACTIVE, "trace already active"},
+ {IPA_RC_VNICC_OOSEQ, "Command issued out of sequence"},
{IPA_RC_INVALID_FORMAT, "invalid format or length"},
{IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
{IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"},
@@ -193,6 +194,7 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
{IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
{IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
+ {IPA_RC_VNICC_VNICBP, "VNIC is BridgePort"},
{IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"},
{IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"},
{IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"},
@@ -253,6 +255,7 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_DELGMAC, "delgmac"},
{IPA_CMD_SETVLAN, "setvlan"},
{IPA_CMD_DELVLAN, "delvlan"},
+ {IPA_CMD_VNICC, "vnic_characteristics"},
{IPA_CMD_SETBRIDGEPORT_OSA, "set_bridge_port(osa)"},
{IPA_CMD_SETCCID, "setccid"},
{IPA_CMD_DELCCID, "delccid"},
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 912e0107de8f..ff6877f7b6f8 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
@@ -44,7 +45,6 @@ extern unsigned char IPA_PDU_HEADER[];
#define IPA_CMD_PRIM_VERSION_NO 0x01
enum qeth_card_types {
- QETH_CARD_TYPE_UNKNOWN = 0,
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
QETH_CARD_TYPE_OSN = 6,
@@ -90,6 +90,7 @@ enum qeth_ipa_cmds {
IPA_CMD_DELGMAC = 0x24,
IPA_CMD_SETVLAN = 0x25,
IPA_CMD_DELVLAN = 0x26,
+ IPA_CMD_VNICC = 0x2a,
IPA_CMD_SETBRIDGEPORT_OSA = 0x2b,
IPA_CMD_SETCCID = 0x41,
IPA_CMD_DELCCID = 0x42,
@@ -165,6 +166,8 @@ enum qeth_ipa_return_codes {
IPA_RC_L2_INVALID_VLAN_ID = 0x2015,
IPA_RC_L2_DUP_VLAN_ID = 0x2016,
IPA_RC_L2_VLAN_ID_NOT_FOUND = 0x2017,
+ IPA_RC_L2_VLAN_ID_NOT_ALLOWED = 0x2050,
+ IPA_RC_VNICC_VNICBP = 0x20B0,
IPA_RC_SBP_OSA_NOT_CONFIGURED = 0x2B0C,
IPA_RC_SBP_OSA_OS_MISMATCH = 0x2B10,
IPA_RC_SBP_OSA_ANO_DEV_PRIMARY = 0x2B14,
@@ -197,6 +200,9 @@ enum qeth_ipa_return_codes {
IPA_RC_ENOMEM = 0xfffe,
IPA_RC_FFFF = 0xffff
};
+/* for VNIC Characteristics */
+#define IPA_RC_VNICC_OOSEQ 0x0005
+
/* for SET_DIAGNOSTIC_ASSIST */
#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
@@ -551,6 +557,71 @@ struct qeth_ipacmd_diagass {
__u8 cdata[64];
} __attribute__ ((packed));
+/* VNIC Characteristics IPA Command: *****************************************/
+/* IPA commands/sub commands for VNICC */
+#define IPA_VNICC_QUERY_CHARS 0x00000000L
+#define IPA_VNICC_QUERY_CMDS 0x00000001L
+#define IPA_VNICC_ENABLE 0x00000002L
+#define IPA_VNICC_DISABLE 0x00000004L
+#define IPA_VNICC_SET_TIMEOUT 0x00000008L
+#define IPA_VNICC_GET_TIMEOUT 0x00000010L
+
+/* VNICC flags */
+#define QETH_VNICC_FLOODING 0x80000000
+#define QETH_VNICC_MCAST_FLOODING 0x40000000
+#define QETH_VNICC_LEARNING 0x20000000
+#define QETH_VNICC_TAKEOVER_SETVMAC 0x10000000
+#define QETH_VNICC_TAKEOVER_LEARNING 0x08000000
+#define QETH_VNICC_BRIDGE_INVISIBLE 0x04000000
+#define QETH_VNICC_RX_BCAST 0x02000000
+
+/* VNICC default values */
+#define QETH_VNICC_ALL 0xff000000
+#define QETH_VNICC_DEFAULT QETH_VNICC_RX_BCAST
+/* default VNICC timeout in seconds */
+#define QETH_VNICC_DEFAULT_TIMEOUT 600
+
+/* VNICC header */
+struct qeth_ipacmd_vnicc_hdr {
+ u32 sup;
+ u32 cur;
+};
+
+/* VNICC sub command header */
+struct qeth_vnicc_sub_hdr {
+ u16 data_length;
+ u16 reserved;
+ u32 sub_command;
+};
+
+/* query supported commands for VNIC characteristic */
+struct qeth_vnicc_query_cmds {
+ u32 vnic_char;
+ u32 sup_cmds;
+};
+
+/* enable/disable VNIC characteristic */
+struct qeth_vnicc_set_char {
+ u32 vnic_char;
+};
+
+/* get/set timeout for VNIC characteristic */
+struct qeth_vnicc_getset_timeout {
+ u32 vnic_char;
+ u32 timeout;
+};
+
+/* complete VNICC IPA command message */
+struct qeth_ipacmd_vnicc {
+ struct qeth_ipacmd_vnicc_hdr hdr;
+ struct qeth_vnicc_sub_hdr sub_hdr;
+ union {
+ struct qeth_vnicc_query_cmds query_cmds;
+ struct qeth_vnicc_set_char set_char;
+ struct qeth_vnicc_getset_timeout getset_timeout;
+ };
+};
+
/* SETBRIDGEPORT IPA Command: *********************************************/
enum qeth_ipa_sbp_cmd {
IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L,
@@ -692,6 +763,7 @@ struct qeth_ipa_cmd {
struct qeth_ipacmd_diagass diagass;
struct qeth_ipacmd_setbridgeport sbp;
struct qeth_ipacmd_addr_change addrchange;
+ struct qeth_ipacmd_vnicc vnicc;
} data;
} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index d1ee9e30c68b..b22ed2a57acd 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -475,10 +475,8 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- /* check for unknown, too, in case we do not yet know who we are */
if (card->info.type != QETH_CARD_TYPE_OSD &&
- card->info.type != QETH_CARD_TYPE_OSX &&
- card->info.type != QETH_CARD_TYPE_UNKNOWN) {
+ card->info.type != QETH_CARD_TYPE_OSX) {
rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 0d59f9a45ea9..09b1c4ef3dc9 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2013
* Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
@@ -14,6 +15,12 @@ int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
+int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state);
+int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state);
+int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout);
+int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
+bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
+
struct qeth_mac {
u8 mac_addr[OSA_ADDR_LEN];
u8 is_uc:1;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 760b023eae95..d2537c09126d 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -33,24 +33,10 @@ static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
static void qeth_bridge_host_event(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
-
-static int qeth_l2_verify_dev(struct net_device *dev)
-{
- struct qeth_card *card;
- unsigned long flags;
- int rc = 0;
-
- read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
- list_for_each_entry(card, &qeth_core_card_list.list, list) {
- if (card->dev == dev) {
- rc = QETH_REAL_CARD;
- break;
- }
- }
- read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
-
- return rc;
-}
+static void qeth_l2_vnicc_set_defaults(struct qeth_card *card);
+static void qeth_l2_vnicc_init(struct qeth_card *card);
+static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
+ u32 *timeout);
static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
{
@@ -74,7 +60,7 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
return ndev;
}
-static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
+static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
{
int rc;
@@ -124,8 +110,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
- return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob,
- NULL, NULL));
+ return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
+ NULL, NULL));
}
static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
@@ -285,17 +271,40 @@ static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb,
}
}
+static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode)
+{
+ if (retcode)
+ QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+
+ switch (retcode) {
+ case IPA_RC_SUCCESS:
+ return 0;
+ case IPA_RC_L2_INVALID_VLAN_ID:
+ return -EINVAL;
+ case IPA_RC_L2_DUP_VLAN_ID:
+ return -EEXIST;
+ case IPA_RC_L2_VLAN_ID_NOT_FOUND:
+ return -ENOENT;
+ case IPA_RC_L2_VLAN_ID_NOT_ALLOWED:
+ return -EPERM;
+ case -ENOMEM:
+ return -ENOMEM;
+ default:
+ return -EIO;
+ }
+}
+
static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long data)
+ struct qeth_reply *reply,
+ unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
QETH_CARD_TEXT(card, 2, "L2sdvcb");
- cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code) {
- QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. "
- "Continuing\n", cmd->data.setdelvlan.vlan_id,
- QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+ QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n",
+ cmd->data.setdelvlan.vlan_id,
+ QETH_CARD_IFNAME(card), cmd->hdr.return_code);
QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
@@ -303,7 +312,7 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
}
static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
- enum qeth_ipa_cmds ipacmd)
+ enum qeth_ipa_cmds ipacmd)
{
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
@@ -314,8 +323,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelvlan.vlan_id = i;
- return qeth_send_ipa_cmd(card, iob,
- qeth_l2_send_setdelvlan_cb, NULL);
+ return qeth_setdelvlan_makerc(card, qeth_send_ipa_cmd(card, iob,
+ qeth_l2_send_setdelvlan_cb, NULL));
}
static void qeth_l2_process_vlans(struct qeth_card *card)
@@ -339,10 +348,6 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
return 0;
- if (card->info.type == QETH_CARD_TYPE_OSM) {
- QETH_CARD_TEXT(card, 3, "aidOSM");
- return 0;
- }
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "aidREC");
return 0;
@@ -372,10 +377,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
int rc = 0;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
- if (card->info.type == QETH_CARD_TYPE_OSM) {
- QETH_CARD_TEXT(card, 3, "kidOSM");
- return 0;
- }
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
return 0;
@@ -541,11 +542,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
QETH_CARD_TEXT(card, 3, "setmac");
- if (qeth_l2_verify_dev(dev) != QETH_REAL_CARD) {
- QETH_CARD_TEXT(card, 3, "setmcINV");
- return -EOPNOTSUPP;
- }
-
if (card->info.type == QETH_CARD_TYPE_OSN ||
card->info.type == QETH_CARD_TYPE_OSM ||
card->info.type == QETH_CARD_TYPE_OSX) {
@@ -694,7 +690,7 @@ static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb,
rc = -E2BIG;
goto out;
}
- rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset,
+ rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
sizeof(*hdr) + data_offset);
out:
if (rc)
@@ -919,6 +915,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
hash_init(card->mac_htable);
card->options.layer2 = 1;
card->info.hwtrap = 0;
+ qeth_l2_vnicc_set_defaults(card);
return 0;
}
@@ -1005,7 +1002,11 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
} else {
card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
}
- card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (card->info.type == QETH_CARD_TYPE_OSM)
+ card->dev->features |= NETIF_F_VLAN_CHALLENGED;
+ else
+ card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->hw_features = NETIF_F_SG;
card->dev->vlan_features = NETIF_F_SG;
@@ -1045,9 +1046,14 @@ static int qeth_l2_start_ipassists(struct qeth_card *card)
static void qeth_l2_trace_features(struct qeth_card *card)
{
- QETH_CARD_TEXT(card, 2, "l2featur");
+ /* Set BridgePort features */
+ QETH_CARD_TEXT(card, 2, "featuSBP");
QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs,
sizeof(card->options.sbp.supported_funcs));
+ /* VNIC Characteristics features */
+ QETH_CARD_TEXT(card, 2, "feaVNICC");
+ QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars,
+ sizeof(card->options.vnicc.sup_chars));
}
static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
@@ -1072,8 +1078,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
if (card->options.sbp.supported_funcs)
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
- qeth_trace_features(card);
- qeth_l2_trace_features(card);
if (!card->dev && qeth_l2_setup_netdev(card)) {
rc = -ENODEV;
@@ -1090,6 +1094,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
} else
card->info.hwtrap = 0;
+ /* for the rx_bcast characteristic, init VNICC after setmac */
+ qeth_l2_vnicc_init(card);
+
+ qeth_trace_features(card);
+ qeth_l2_trace_features(card);
+
qeth_l2_setup_bridgeport_attrs(card);
card->state = CARD_STATE_HARDSETUP;
@@ -1106,8 +1116,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
}
- if (card->info.type != QETH_CARD_TYPE_OSN &&
- card->info.type != QETH_CARD_TYPE_OSM)
+ if (card->info.type != QETH_CARD_TYPE_OSN)
qeth_l2_process_vlans(card);
netif_tx_disable(card->dev);
@@ -2039,6 +2048,454 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
}
EXPORT_SYMBOL_GPL(qeth_bridgeport_an_set);
+static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
+{
+ return (card->options.sbp.role || card->options.sbp.reflect_promisc ||
+ card->options.sbp.hostnotification);
+}
+
+/* VNIC Characteristics support */
+
+/* handle VNICC IPA command return codes; convert to error codes */
+static int qeth_l2_vnicc_makerc(struct qeth_card *card, int ipa_rc)
+{
+ int rc;
+
+ switch (ipa_rc) {
+ case IPA_RC_SUCCESS:
+ return ipa_rc;
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ case IPA_RC_NOTSUPP:
+ rc = -EOPNOTSUPP;
+ break;
+ case IPA_RC_VNICC_OOSEQ:
+ rc = -EALREADY;
+ break;
+ case IPA_RC_VNICC_VNICBP:
+ rc = -EBUSY;
+ break;
+ case IPA_RC_L2_ADDR_TABLE_FULL:
+ rc = -ENOSPC;
+ break;
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ rc = -EACCES;
+ break;
+ default:
+ rc = -EIO;
+ }
+
+ QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc);
+ return rc;
+}
+
+/* generic VNICC request call back control */
+struct _qeth_l2_vnicc_request_cbctl {
+ u32 sub_cmd;
+ struct {
+ u32 vnic_char;
+ u32 timeout;
+ } param;
+ struct {
+ union{
+ u32 *sup_cmds;
+ u32 *timeout;
+ };
+ } result;
+};
+
+/* generic VNICC request call back */
+static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct _qeth_l2_vnicc_request_cbctl *cbctl =
+ (struct _qeth_l2_vnicc_request_cbctl *) reply->param;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
+
+ QETH_CARD_TEXT(card, 2, "vniccrcb");
+ if (cmd->hdr.return_code)
+ return 0;
+ /* return results to caller */
+ card->options.vnicc.sup_chars = rep->hdr.sup;
+ card->options.vnicc.cur_chars = rep->hdr.cur;
+
+ if (cbctl->sub_cmd == IPA_VNICC_QUERY_CMDS)
+ *cbctl->result.sup_cmds = rep->query_cmds.sup_cmds;
+
+ if (cbctl->sub_cmd == IPA_VNICC_GET_TIMEOUT)
+ *cbctl->result.timeout = rep->getset_timeout.timeout;
+
+ return 0;
+}
+
+/* generic VNICC request */
+static int qeth_l2_vnicc_request(struct qeth_card *card,
+ struct _qeth_l2_vnicc_request_cbctl *cbctl)
+{
+ struct qeth_ipacmd_vnicc *req;
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "vniccreq");
+
+ /* get new buffer for request */
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_VNICC, 0);
+ if (!iob)
+ return -ENOMEM;
+
+ /* create header for request */
+ cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
+ req = &cmd->data.vnicc;
+
+ /* create sub command header for request */
+ req->sub_hdr.data_length = sizeof(req->sub_hdr);
+ req->sub_hdr.sub_command = cbctl->sub_cmd;
+
+ /* create sub command specific request fields */
+ switch (cbctl->sub_cmd) {
+ case IPA_VNICC_QUERY_CHARS:
+ break;
+ case IPA_VNICC_QUERY_CMDS:
+ req->sub_hdr.data_length += sizeof(req->query_cmds);
+ req->query_cmds.vnic_char = cbctl->param.vnic_char;
+ break;
+ case IPA_VNICC_ENABLE:
+ case IPA_VNICC_DISABLE:
+ req->sub_hdr.data_length += sizeof(req->set_char);
+ req->set_char.vnic_char = cbctl->param.vnic_char;
+ break;
+ case IPA_VNICC_SET_TIMEOUT:
+ req->getset_timeout.timeout = cbctl->param.timeout;
+ /* fallthrough */
+ case IPA_VNICC_GET_TIMEOUT:
+ req->sub_hdr.data_length += sizeof(req->getset_timeout);
+ req->getset_timeout.vnic_char = cbctl->param.vnic_char;
+ break;
+ default:
+ qeth_release_buffer(iob->channel, iob);
+ return -EOPNOTSUPP;
+ }
+
+ /* send request */
+ rc = qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb,
+ (void *) cbctl);
+
+ return qeth_l2_vnicc_makerc(card, rc);
+}
+
+/* VNICC query VNIC characteristics request */
+static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
+{
+ struct _qeth_l2_vnicc_request_cbctl cbctl;
+
+ /* prepare callback control */
+ cbctl.sub_cmd = IPA_VNICC_QUERY_CHARS;
+
+ QETH_CARD_TEXT(card, 2, "vniccqch");
+ return qeth_l2_vnicc_request(card, &cbctl);
+}
+
+/* VNICC query sub commands request */
+static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
+ u32 *sup_cmds)
+{
+ struct _qeth_l2_vnicc_request_cbctl cbctl;
+
+ /* prepare callback control */
+ cbctl.sub_cmd = IPA_VNICC_QUERY_CMDS;
+ cbctl.param.vnic_char = vnic_char;
+ cbctl.result.sup_cmds = sup_cmds;
+
+ QETH_CARD_TEXT(card, 2, "vniccqcm");
+ return qeth_l2_vnicc_request(card, &cbctl);
+}
+
+/* VNICC enable/disable characteristic request */
+static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
+ u32 cmd)
+{
+ struct _qeth_l2_vnicc_request_cbctl cbctl;
+
+ /* prepare callback control */
+ cbctl.sub_cmd = cmd;
+ cbctl.param.vnic_char = vnic_char;
+
+ QETH_CARD_TEXT(card, 2, "vniccedc");
+ return qeth_l2_vnicc_request(card, &cbctl);
+}
+
+/* VNICC get/set timeout for characteristic request */
+static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
+ u32 cmd, u32 *timeout)
+{
+ struct _qeth_l2_vnicc_request_cbctl cbctl;
+
+ /* prepare callback control */
+ cbctl.sub_cmd = cmd;
+ cbctl.param.vnic_char = vnicc;
+ if (cmd == IPA_VNICC_SET_TIMEOUT)
+ cbctl.param.timeout = *timeout;
+ if (cmd == IPA_VNICC_GET_TIMEOUT)
+ cbctl.result.timeout = timeout;
+
+ QETH_CARD_TEXT(card, 2, "vniccgst");
+ return qeth_l2_vnicc_request(card, &cbctl);
+}
+
+/* set current VNICC flag state; called from sysfs store function */
+int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
+{
+ int rc = 0;
+ u32 cmd;
+
+ QETH_CARD_TEXT(card, 2, "vniccsch");
+
+ /* do not change anything if BridgePort is enabled */
+ if (qeth_bridgeport_is_in_use(card))
+ return -EBUSY;
+
+ /* check if characteristic and enable/disable are supported */
+ if (!(card->options.vnicc.sup_chars & vnicc) ||
+ !(card->options.vnicc.set_char_sup & vnicc))
+ return -EOPNOTSUPP;
+
+ /* set enable/disable command and store wanted characteristic */
+ if (state) {
+ cmd = IPA_VNICC_ENABLE;
+ card->options.vnicc.wanted_chars |= vnicc;
+ } else {
+ cmd = IPA_VNICC_DISABLE;
+ card->options.vnicc.wanted_chars &= ~vnicc;
+ }
+
+ /* do we need to do anything? */
+ if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars)
+ return rc;
+
+ /* if card is not ready, simply stop here */
+ if (!qeth_card_hw_is_reachable(card)) {
+ if (state)
+ card->options.vnicc.cur_chars |= vnicc;
+ else
+ card->options.vnicc.cur_chars &= ~vnicc;
+ return rc;
+ }
+
+ rc = qeth_l2_vnicc_set_char(card, vnicc, cmd);
+ if (rc)
+ card->options.vnicc.wanted_chars =
+ card->options.vnicc.cur_chars;
+ else {
+ /* successful online VNICC change; handle special cases */
+ if (state && vnicc == QETH_VNICC_RX_BCAST)
+ card->options.vnicc.rx_bcast_enabled = true;
+ if (!state && vnicc == QETH_VNICC_LEARNING)
+ qeth_l2_vnicc_recover_timeout(card, vnicc,
+ &card->options.vnicc.learning_timeout);
+ }
+
+ return rc;
+}
+
+/* get current VNICC flag state; called from sysfs show function */
+int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
+{
+ int rc = 0;
+
+ QETH_CARD_TEXT(card, 2, "vniccgch");
+
+ /* do not get anything if BridgePort is enabled */
+ if (qeth_bridgeport_is_in_use(card))
+ return -EBUSY;
+
+ /* check if characteristic is supported */
+ if (!(card->options.vnicc.sup_chars & vnicc))
+ return -EOPNOTSUPP;
+
+ /* if card is ready, query current VNICC state */
+ if (qeth_card_hw_is_reachable(card))
+ rc = qeth_l2_vnicc_query_chars(card);
+
+ *state = (card->options.vnicc.cur_chars & vnicc) ? true : false;
+ return rc;
+}
+
+/* set VNICC timeout; called from sysfs store function. Currently, only learning
+ * supports timeout
+ */
+int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
+{
+ int rc = 0;
+
+ QETH_CARD_TEXT(card, 2, "vniccsto");
+
+ /* do not change anything if BridgePort is enabled */
+ if (qeth_bridgeport_is_in_use(card))
+ return -EBUSY;
+
+ /* check if characteristic and set_timeout are supported */
+ if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
+ !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
+ return -EOPNOTSUPP;
+
+ /* do we need to do anything? */
+ if (card->options.vnicc.learning_timeout == timeout)
+ return rc;
+
+ /* if card is not ready, simply store the value internally and return */
+ if (!qeth_card_hw_is_reachable(card)) {
+ card->options.vnicc.learning_timeout = timeout;
+ return rc;
+ }
+
+ /* send timeout value to card; if successful, store value internally */
+ rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
+ IPA_VNICC_SET_TIMEOUT, &timeout);
+ if (!rc)
+ card->options.vnicc.learning_timeout = timeout;
+
+ return rc;
+}
+
+/* get current VNICC timeout; called from sysfs show function. Currently, only
+ * learning supports timeout
+ */
+int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
+{
+ int rc = 0;
+
+ QETH_CARD_TEXT(card, 2, "vniccgto");
+
+ /* do not get anything if BridgePort is enabled */
+ if (qeth_bridgeport_is_in_use(card))
+ return -EBUSY;
+
+ /* check if characteristic and get_timeout are supported */
+ if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
+ !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
+ return -EOPNOTSUPP;
+ /* if card is ready, get timeout. Otherwise, just return stored value */
+ *timeout = card->options.vnicc.learning_timeout;
+ if (qeth_card_hw_is_reachable(card))
+ rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING,
+ IPA_VNICC_GET_TIMEOUT,
+ timeout);
+
+ return rc;
+}
+
+/* check if VNICC is currently enabled */
+bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
+{
+ /* if everything is turned off, VNICC is not active */
+ if (!card->options.vnicc.cur_chars)
+ return false;
+ /* default values are only OK if rx_bcast was not enabled by user
+ * or the card is offline.
+ */
+ if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) {
+ if (!card->options.vnicc.rx_bcast_enabled ||
+ !qeth_card_hw_is_reachable(card))
+ return false;
+ }
+ return true;
+}
+
+/* recover user timeout setting */
+static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
+ u32 *timeout)
+{
+ if (card->options.vnicc.sup_chars & vnicc &&
+ card->options.vnicc.getset_timeout_sup & vnicc &&
+ !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
+ timeout))
+ return false;
+ *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
+ return true;
+}
+
+/* recover user characteristic setting */
+static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
+ bool enable)
+{
+ u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE;
+
+ if (card->options.vnicc.sup_chars & vnicc &&
+ card->options.vnicc.set_char_sup & vnicc &&
+ !qeth_l2_vnicc_set_char(card, vnicc, cmd))
+ return false;
+ card->options.vnicc.wanted_chars &= ~vnicc;
+ card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc;
+ return true;
+}
+
+/* (re-)initialize VNICC */
+static void qeth_l2_vnicc_init(struct qeth_card *card)
+{
+ u32 *timeout = &card->options.vnicc.learning_timeout;
+ unsigned int chars_len, i;
+ unsigned long chars_tmp;
+ u32 sup_cmds, vnicc;
+ bool enable, error;
+
+ QETH_CARD_TEXT(card, 2, "vniccini");
+ /* reset rx_bcast */
+ card->options.vnicc.rx_bcast_enabled = 0;
+ /* initial query and storage of VNIC characteristics */
+ if (qeth_l2_vnicc_query_chars(card)) {
+ if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT ||
+ *timeout != QETH_VNICC_DEFAULT_TIMEOUT)
+ dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
+ /* fail quietly if user didn't change the default config */
+ card->options.vnicc.sup_chars = 0;
+ card->options.vnicc.cur_chars = 0;
+ card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
+ return;
+ }
+ /* get supported commands for each supported characteristic */
+ chars_tmp = card->options.vnicc.sup_chars;
+ chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
+ for_each_set_bit(i, &chars_tmp, chars_len) {
+ vnicc = BIT(i);
+ qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds);
+ if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) ||
+ !(sup_cmds & IPA_VNICC_GET_TIMEOUT))
+ card->options.vnicc.getset_timeout_sup &= ~vnicc;
+ if (!(sup_cmds & IPA_VNICC_ENABLE) ||
+ !(sup_cmds & IPA_VNICC_DISABLE))
+ card->options.vnicc.set_char_sup &= ~vnicc;
+ }
+ /* enforce assumed default values and recover settings, if changed */
+ error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
+ timeout);
+ chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
+ chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
+ chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
+ for_each_set_bit(i, &chars_tmp, chars_len) {
+ vnicc = BIT(i);
+ enable = card->options.vnicc.wanted_chars & vnicc;
+ error |= qeth_l2_vnicc_recover_char(card, vnicc, enable);
+ }
+ if (error)
+ dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n");
+}
+
+/* configure default values of VNIC characteristics */
+static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
+{
+ /* characteristics values */
+ card->options.vnicc.sup_chars = QETH_VNICC_ALL;
+ card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT;
+ card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT;
+ /* supported commands */
+ card->options.vnicc.set_char_sup = QETH_VNICC_ALL;
+ card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING;
+ /* settings wanted by users */
+ card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
+}
+
module_init(qeth_l2_init);
module_exit(qeth_l2_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 9696baa49e2d..f2c3b127b1e4 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2013
* Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
@@ -20,6 +21,9 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
if (!card)
return -EINVAL;
+ if (qeth_l2_vnicc_is_in_use(card))
+ return sprintf(buf, "n/a (VNIC characteristics)\n");
+
if (qeth_card_hw_is_reachable(card) &&
card->options.sbp.supported_funcs)
rc = qeth_bridgeport_query_ports(card,
@@ -60,6 +64,11 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
static ssize_t qeth_bridge_port_role_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
+
+ if (qeth_l2_vnicc_is_in_use(card))
+ return sprintf(buf, "n/a (VNIC characteristics)\n");
+
return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
}
@@ -83,7 +92,10 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
mutex_lock(&card->conf_mutex);
- if (card->options.sbp.reflect_promisc) /* Forbid direct manipulation */
+ if (qeth_l2_vnicc_is_in_use(card))
+ rc = -EBUSY;
+ else if (card->options.sbp.reflect_promisc)
+ /* Forbid direct manipulation */
rc = -EPERM;
else if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_setrole(card, role);
@@ -103,6 +115,11 @@ static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show,
static ssize_t qeth_bridge_port_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct qeth_card *card = dev_get_drvdata(dev);
+
+ if (qeth_l2_vnicc_is_in_use(card))
+ return sprintf(buf, "n/a (VNIC characteristics)\n");
+
return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
}
@@ -118,6 +135,9 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
if (!card)
return -EINVAL;
+ if (qeth_l2_vnicc_is_in_use(card))
+ return sprintf(buf, "n/a (VNIC characteristics)\n");
+
enabled = card->options.sbp.hostnotification;
return sprintf(buf, "%d\n", enabled);
@@ -127,22 +147,21 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- int rc = 0;
- int enable;
+ bool enable;
+ int rc;
if (!card)
return -EINVAL;
- if (sysfs_streq(buf, "0"))
- enable = 0;
- else if (sysfs_streq(buf, "1"))
- enable = 1;
- else
- return -EINVAL;
+ rc = kstrtobool(buf, &enable);
+ if (rc)
+ return rc;
mutex_lock(&card->conf_mutex);
- if (qeth_card_hw_is_reachable(card)) {
+ if (qeth_l2_vnicc_is_in_use(card))
+ rc = -EBUSY;
+ else if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_an_set(card, enable);
if (!rc)
card->options.sbp.hostnotification = enable;
@@ -167,6 +186,9 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
if (!card)
return -EINVAL;
+ if (qeth_l2_vnicc_is_in_use(card))
+ return sprintf(buf, "n/a (VNIC characteristics)\n");
+
if (card->options.sbp.reflect_promisc) {
if (card->options.sbp.reflect_promisc_primary)
state = "primary";
@@ -202,7 +224,9 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
mutex_lock(&card->conf_mutex);
- if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
+ if (qeth_l2_vnicc_is_in_use(card))
+ rc = -EBUSY;
+ else if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
rc = -EPERM;
else {
card->options.sbp.reflect_promisc = enable;
@@ -231,16 +255,6 @@ static struct attribute_group qeth_l2_bridgeport_attr_group = {
.attrs = qeth_l2_bridgeport_attrs,
};
-int qeth_l2_create_device_attributes(struct device *dev)
-{
- return sysfs_create_group(&dev->kobj, &qeth_l2_bridgeport_attr_group);
-}
-
-void qeth_l2_remove_device_attributes(struct device *dev)
-{
- sysfs_remove_group(&dev->kobj, &qeth_l2_bridgeport_attr_group);
-}
-
/**
* qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online.
* @card: qeth_card structure pointer
@@ -270,10 +284,168 @@ void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
qeth_bridgeport_an_set(card, 0);
}
+/* VNIC CHARS support */
+
+/* convert sysfs attr name to VNIC characteristic */
+static u32 qeth_l2_vnicc_sysfs_attr_to_char(const char *attr_name)
+{
+ if (sysfs_streq(attr_name, "flooding"))
+ return QETH_VNICC_FLOODING;
+ else if (sysfs_streq(attr_name, "mcast_flooding"))
+ return QETH_VNICC_MCAST_FLOODING;
+ else if (sysfs_streq(attr_name, "learning"))
+ return QETH_VNICC_LEARNING;
+ else if (sysfs_streq(attr_name, "takeover_setvmac"))
+ return QETH_VNICC_TAKEOVER_SETVMAC;
+ else if (sysfs_streq(attr_name, "takeover_learning"))
+ return QETH_VNICC_TAKEOVER_LEARNING;
+ else if (sysfs_streq(attr_name, "bridge_invisible"))
+ return QETH_VNICC_BRIDGE_INVISIBLE;
+ else if (sysfs_streq(attr_name, "rx_bcast"))
+ return QETH_VNICC_RX_BCAST;
+
+ return 0;
+}
+
+/* get current timeout setting */
+static ssize_t qeth_vnicc_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ u32 timeout;
+ int rc;
+
+ if (!card)
+ return -EINVAL;
+
+ rc = qeth_l2_vnicc_get_timeout(card, &timeout);
+ if (rc == -EBUSY)
+ return sprintf(buf, "n/a (BridgePort)\n");
+ if (rc == -EOPNOTSUPP)
+ return sprintf(buf, "n/a\n");
+ return rc ? rc : sprintf(buf, "%d\n", timeout);
+}
+
+/* change timeout setting */
+static ssize_t qeth_vnicc_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ u32 timeout;
+ int rc;
+
+ if (!card)
+ return -EINVAL;
+
+ rc = kstrtou32(buf, 10, &timeout);
+ if (rc)
+ return rc;
+
+ mutex_lock(&card->conf_mutex);
+ rc = qeth_l2_vnicc_set_timeout(card, timeout);
+ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
+}
+
+/* get current setting of characteristic */
+static ssize_t qeth_vnicc_char_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ bool state;
+ u32 vnicc;
+ int rc;
+
+ if (!card)
+ return -EINVAL;
+
+ vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
+ rc = qeth_l2_vnicc_get_state(card, vnicc, &state);
+
+ if (rc == -EBUSY)
+ return sprintf(buf, "n/a (BridgePort)\n");
+ if (rc == -EOPNOTSUPP)
+ return sprintf(buf, "n/a\n");
+ return rc ? rc : sprintf(buf, "%d\n", state);
+}
+
+/* change setting of characteristic */
+static ssize_t qeth_vnicc_char_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ bool state;
+ u32 vnicc;
+ int rc;
+
+ if (!card)
+ return -EINVAL;
+
+ if (kstrtobool(buf, &state))
+ return -EINVAL;
+
+ vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name);
+ mutex_lock(&card->conf_mutex);
+ rc = qeth_l2_vnicc_set_state(card, vnicc, state);
+ mutex_unlock(&card->conf_mutex);
+
+ return rc ? rc : count;
+}
+
+static DEVICE_ATTR(flooding, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store);
+static DEVICE_ATTR(mcast_flooding, 0644, qeth_vnicc_char_show,
+ qeth_vnicc_char_store);
+static DEVICE_ATTR(learning, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store);
+static DEVICE_ATTR(learning_timeout, 0644, qeth_vnicc_timeout_show,
+ qeth_vnicc_timeout_store);
+static DEVICE_ATTR(takeover_setvmac, 0644, qeth_vnicc_char_show,
+ qeth_vnicc_char_store);
+static DEVICE_ATTR(takeover_learning, 0644, qeth_vnicc_char_show,
+ qeth_vnicc_char_store);
+static DEVICE_ATTR(bridge_invisible, 0644, qeth_vnicc_char_show,
+ qeth_vnicc_char_store);
+static DEVICE_ATTR(rx_bcast, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store);
+
+static struct attribute *qeth_l2_vnicc_attrs[] = {
+ &dev_attr_flooding.attr,
+ &dev_attr_mcast_flooding.attr,
+ &dev_attr_learning.attr,
+ &dev_attr_learning_timeout.attr,
+ &dev_attr_takeover_setvmac.attr,
+ &dev_attr_takeover_learning.attr,
+ &dev_attr_bridge_invisible.attr,
+ &dev_attr_rx_bcast.attr,
+ NULL,
+};
+
+static struct attribute_group qeth_l2_vnicc_attr_group = {
+ .attrs = qeth_l2_vnicc_attrs,
+ .name = "vnicc",
+};
+
+static const struct attribute_group *qeth_l2_only_attr_groups[] = {
+ &qeth_l2_bridgeport_attr_group,
+ &qeth_l2_vnicc_attr_group,
+ NULL,
+};
+
+int qeth_l2_create_device_attributes(struct device *dev)
+{
+ return sysfs_create_groups(&dev->kobj, qeth_l2_only_attr_groups);
+}
+
+void qeth_l2_remove_device_attributes(struct device *dev)
+{
+ sysfs_remove_groups(&dev->kobj, qeth_l2_only_attr_groups);
+}
+
const struct attribute_group *qeth_l2_attr_groups[] = {
&qeth_device_attr_group,
&qeth_device_blkt_group,
- /* l2 specific, see l2_{create,remove}_device_attributes(): */
+ /* l2 specific, see qeth_l2_only_attr_groups: */
&qeth_l2_bridgeport_attr_group,
+ &qeth_l2_vnicc_attr_group,
NULL,
};
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 9b5e439f18cf..194ae9b577cc 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ab661a431f7c..aadd384316a3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1553,7 +1553,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
if (!addr)
- return;
+ goto out;
spin_lock_bh(&card->ip_lock);
@@ -1567,6 +1567,7 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
spin_unlock_bh(&card->ip_lock);
kfree(addr);
+out:
in_dev_put(in_dev);
}
@@ -1591,7 +1592,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (!addr)
- return;
+ goto out;
spin_lock_bh(&card->ip_lock);
@@ -1606,6 +1607,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
spin_unlock_bh(&card->ip_lock);
kfree(addr);
+out:
in6_dev_put(in6_dev);
#endif /* CONFIG_QETH_IPV6 */
}
@@ -1646,13 +1648,12 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr *hdr, unsigned short *vlan_id)
+static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr *hdr)
{
__u16 prot;
struct iphdr *ip_hdr;
unsigned char tg_addr[MAX_ADDR_LEN];
- int is_vlan = 0;
if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
@@ -1706,11 +1707,14 @@ static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, card->dev);
- if (hdr->hdr.l3.ext_flags &
- (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
- *vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
- hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
- is_vlan = 1;
+ /* copy VLAN tag from hdr into skb */
+ if (!card->options.sniffer &&
+ (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
+ QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
+ u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
+ hdr->hdr.l3.vlan_id :
+ *((u16 *)&hdr->hdr.l3.dest_addr[12]);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
}
if (card->dev->features & NETIF_F_RXCSUM) {
@@ -1724,7 +1728,6 @@ static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
skb->ip_summed = CHECKSUM_NONE;
} else
skb->ip_summed = CHECKSUM_NONE;
- return is_vlan;
}
static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
@@ -1733,8 +1736,6 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
int work_done = 0;
struct sk_buff *skb;
struct qeth_hdr *hdr;
- __u16 vlan_tag = 0;
- int is_vlan;
unsigned int len;
__u16 magic;
@@ -1764,12 +1765,8 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
card->dev->addr_len);
netif_receive_skb(skb);
} else {
- is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
- &vlan_tag);
+ qeth_l3_rebuild_skb(card, skb, hdr);
len = skb->len;
- if (is_vlan && !card->options.sniffer)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&card->napi, skb);
}
break;
@@ -2771,8 +2768,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len,
hd_len, elements);
} else
- rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
- data_offset, hd_len);
+ rc = qeth_do_send_packet_fast(queue, new_skb, hdr, data_offset,
+ hd_len);
if (!rc) {
card->stats.tx_packets++;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index e8bcc314cc5f..bd12fdf678be 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
@@ -1028,52 +1029,31 @@ static const struct attribute_group qeth_device_rxip_group = {
.attrs = qeth_rxip_device_attrs,
};
+static const struct attribute_group *qeth_l3_only_attr_groups[] = {
+ &qeth_l3_device_attr_group,
+ &qeth_device_ipato_group,
+ &qeth_device_vipa_group,
+ &qeth_device_rxip_group,
+ NULL,
+};
+
int qeth_l3_create_device_attributes(struct device *dev)
{
- int ret;
-
- ret = sysfs_create_group(&dev->kobj, &qeth_l3_device_attr_group);
- if (ret)
- return ret;
-
- ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group);
- if (ret) {
- sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
- return ret;
- }
-
- ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group);
- if (ret) {
- sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
- sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
- return ret;
- }
-
- ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group);
- if (ret) {
- sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
- sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
- sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
- return ret;
- }
- return 0;
+ return sysfs_create_groups(&dev->kobj, qeth_l3_only_attr_groups);
}
void qeth_l3_remove_device_attributes(struct device *dev)
{
- sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
- sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
- sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
- sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
+ sysfs_remove_groups(&dev->kobj, qeth_l3_only_attr_groups);
}
const struct attribute_group *qeth_l3_attr_groups[] = {
&qeth_device_attr_group,
&qeth_device_blkt_group,
- /* l3 specific, see l3_{create,remove}_device_attributes(): */
+ /* l3 specific, see qeth_l3_only_attr_groups: */
&qeth_l3_device_attr_group,
&qeth_device_ipato_group,
&qeth_device_vipa_group,
&qeth_device_rxip_group,
-NULL,
+ NULL,
};
diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h
index 45bc925928ca..a0d6c6130c4b 100644
--- a/drivers/s390/net/smsgiucv.h
+++ b/drivers/s390/net/smsgiucv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* IUCV special message driver
*
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 54c7b48fdb46..49eda141ea43 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 8227076c9cbb..a8b831000b2d 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 3508c00458f4..e2a973cd2573 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* zfcp device driver
* debug feature declarations
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index b8e853e53546..3396a47721a7 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index ec2532ee1822..1d91a32db08e 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
@@ -563,21 +564,24 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
* zfcp_erp_timeout_handler - Trigger ERP action from timed out ERP request
* @data: ERP action (from timer data)
*/
-void zfcp_erp_timeout_handler(unsigned long data)
+void zfcp_erp_timeout_handler(struct timer_list *t)
{
- struct zfcp_erp_action *act = (struct zfcp_erp_action *) data;
+ struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
+ struct zfcp_erp_action *act = fsf_req->erp_action;
+
zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
}
-static void zfcp_erp_memwait_handler(unsigned long data)
+static void zfcp_erp_memwait_handler(struct timer_list *t)
{
- zfcp_erp_notify((struct zfcp_erp_action *)data, 0);
+ struct zfcp_erp_action *act = from_timer(act, t, timer);
+
+ zfcp_erp_notify(act, 0);
}
static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
{
- setup_timer(&erp_action->timer, zfcp_erp_memwait_handler,
- (unsigned long) erp_action);
+ timer_setup(&erp_action->timer, zfcp_erp_memwait_handler, 0);
erp_action->timer.expires = jiffies + HZ;
add_timer(&erp_action->timer);
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index a9e968717dd9..bf8ea4df2bb8 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* zfcp device driver
*
@@ -68,7 +69,7 @@ extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
extern void zfcp_erp_wait(struct zfcp_adapter *);
extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
-extern void zfcp_erp_timeout_handler(unsigned long);
+extern void zfcp_erp_timeout_handler(struct timer_list *t);
/* zfcp_fc.c */
extern struct kmem_cache *zfcp_fc_req_cache;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 8210645c2111..ca218c82321f 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 41f22d3dc6d1..6a397ddaadf0 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 69d1dc3ec79d..b12cb81ad8a2 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
@@ -20,9 +21,11 @@
struct kmem_cache *zfcp_fsf_qtcb_cache;
-static void zfcp_fsf_request_timeout_handler(unsigned long data)
+static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
{
- struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+ struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
+ struct zfcp_adapter *adapter = fsf_req->adapter;
+
zfcp_qdio_siosl(adapter);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
"fsrth_1");
@@ -32,7 +35,6 @@ static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
unsigned long timeout)
{
fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
- fsf_req->timer.data = (unsigned long) fsf_req->adapter;
fsf_req->timer.expires = jiffies + timeout;
add_timer(&fsf_req->timer);
}
@@ -41,7 +43,6 @@ static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
{
BUG_ON(!fsf_req->erp_action);
fsf_req->timer.function = zfcp_erp_timeout_handler;
- fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
fsf_req->timer.expires = jiffies + 30 * HZ;
add_timer(&fsf_req->timer);
}
@@ -691,7 +692,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
adapter->req_no++;
INIT_LIST_HEAD(&req->list);
- init_timer(&req->timer);
+ timer_setup(&req->timer, NULL, 0);
init_completion(&req->completion);
req->adapter = adapter;
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 88feba5bfda4..4baca67aba6d 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 9e358fc04b78..4ab02e8d36f3 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 7f647a90c750..886c662cc154 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 703fce59befe..59a943c0d51d 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 6cf8732627e0..4d2ba5682493 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 96a0be13e841..3ac823f2540f 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 9310a547b89f..1bf0a0984a09 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile
index df40692a9011..f68af1f317f1 100644
--- a/drivers/s390/virtio/Makefile
+++ b/drivers/s390/virtio/Makefile
@@ -6,8 +6,4 @@
# it under the terms of the GNU General Public License (version 2 only)
# as published by the Free Software Foundation.
-s390-virtio-objs := virtio_ccw.o
-ifdef CONFIG_S390_GUEST_OLD_TRANSPORT
-s390-virtio-objs += kvm_virtio.o
-endif
-obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs)
+obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
deleted file mode 100644
index a99d09a11f05..000000000000
--- a/drivers/s390/virtio/kvm_virtio.c
+++ /dev/null
@@ -1,515 +0,0 @@
-/*
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/err.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/slab.h>
-#include <linux/virtio_console.h>
-#include <linux/interrupt.h>
-#include <linux/virtio_ring.h>
-#include <linux/export.h>
-#include <linux/pfn.h>
-#include <asm/io.h>
-#include <asm/kvm_para.h>
-#include <asm/kvm_virtio.h>
-#include <asm/sclp.h>
-#include <asm/setup.h>
-#include <asm/irq.h>
-
-#define VIRTIO_SUBCODE_64 0x0D00
-
-/*
- * The pointer to our (page) of device descriptions.
- */
-static void *kvm_devices;
-static struct work_struct hotplug_work;
-
-struct kvm_device {
- struct virtio_device vdev;
- struct kvm_device_desc *desc;
-};
-
-#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
-
-/*
- * memory layout:
- * - kvm_device_descriptor
- * struct kvm_device_desc
- * - configuration
- * struct kvm_vqconfig
- * - feature bits
- * - config space
- */
-static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
-{
- return (struct kvm_vqconfig *)(desc + 1);
-}
-
-static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
-{
- return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
-}
-
-static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
-{
- return kvm_vq_features(desc) + desc->feature_len * 2;
-}
-
-/*
- * The total size of the config page used by this device (incl. desc)
- */
-static unsigned desc_size(const struct kvm_device_desc *desc)
-{
- return sizeof(*desc)
- + desc->num_vq * sizeof(struct kvm_vqconfig)
- + desc->feature_len * 2
- + desc->config_len;
-}
-
-/* This gets the device's feature bits. */
-static u64 kvm_get_features(struct virtio_device *vdev)
-{
- unsigned int i;
- u32 features = 0;
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
- u8 *in_features = kvm_vq_features(desc);
-
- for (i = 0; i < min(desc->feature_len * 8, 32); i++)
- if (in_features[i / 8] & (1 << (i % 8)))
- features |= (1 << i);
- return features;
-}
-
-static int kvm_finalize_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
- /* Second half of bitmap is features we accept. */
- u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
-
- /* Give virtio_ring a chance to accept features. */
- vring_transport_features(vdev);
-
- /* Make sure we don't have any features > 32 bits! */
- BUG_ON((u32)vdev->features != vdev->features);
-
- memset(out_features, 0, desc->feature_len);
- bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
- for (i = 0; i < bits; i++) {
- if (__virtio_test_bit(vdev, i))
- out_features[i / 8] |= (1 << (i % 8));
- }
-
- return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void kvm_get(struct virtio_device *vdev, unsigned int offset,
- void *buf, unsigned len)
-{
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-
- BUG_ON(offset + len > desc->config_len);
- memcpy(buf, kvm_vq_configspace(desc) + offset, len);
-}
-
-static void kvm_set(struct virtio_device *vdev, unsigned int offset,
- const void *buf, unsigned len)
-{
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-
- BUG_ON(offset + len > desc->config_len);
- memcpy(kvm_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access
- * the status field of the device descriptor. set_status will also
- * make a hypercall to the host, to tell about status changes
- */
-static u8 kvm_get_status(struct virtio_device *vdev)
-{
- return to_kvmdev(vdev)->desc->status;
-}
-
-static void kvm_set_status(struct virtio_device *vdev, u8 status)
-{
- BUG_ON(!status);
- to_kvmdev(vdev)->desc->status = status;
- kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
- (unsigned long) to_kvmdev(vdev)->desc);
-}
-
-/*
- * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
- * descriptor address. The Host will zero the status and all the
- * features.
- */
-static void kvm_reset(struct virtio_device *vdev)
-{
- kvm_hypercall1(KVM_S390_VIRTIO_RESET,
- (unsigned long) to_kvmdev(vdev)->desc);
-}
-
-/*
- * When the virtio_ring code wants to notify the Host, it calls us here and we
- * make a hypercall. We hand the address of the virtqueue so the Host
- * knows which virtqueue we're talking about.
- */
-static bool kvm_notify(struct virtqueue *vq)
-{
- long rc;
- struct kvm_vqconfig *config = vq->priv;
-
- rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
- if (rc < 0)
- return false;
- return true;
-}
-
-/*
- * This routine finds the first virtqueue described in the configuration of
- * this device and sets it up.
- */
-static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name, bool ctx)
-{
- struct kvm_device *kdev = to_kvmdev(vdev);
- struct kvm_vqconfig *config;
- struct virtqueue *vq;
- int err;
-
- if (index >= kdev->desc->num_vq)
- return ERR_PTR(-ENOENT);
-
- if (!name)
- return NULL;
-
- config = kvm_vq_config(kdev->desc)+index;
-
- err = vmem_add_mapping(config->address,
- vring_size(config->num,
- KVM_S390_VIRTIO_RING_ALIGN));
- if (err)
- goto out;
-
- vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN,
- vdev, true, ctx, (void *) config->address,
- kvm_notify, callback, name);
- if (!vq) {
- err = -ENOMEM;
- goto unmap;
- }
-
- /*
- * register a callback token
- * The host will sent this via the external interrupt parameter
- */
- config->token = (u64) vq;
-
- vq->priv = config;
- return vq;
-unmap:
- vmem_remove_mapping(config->address,
- vring_size(config->num,
- KVM_S390_VIRTIO_RING_ALIGN));
-out:
- return ERR_PTR(err);
-}
-
-static void kvm_del_vq(struct virtqueue *vq)
-{
- struct kvm_vqconfig *config = vq->priv;
-
- vring_del_virtqueue(vq);
- vmem_remove_mapping(config->address,
- vring_size(config->num,
- KVM_S390_VIRTIO_RING_ALIGN));
-}
-
-static void kvm_del_vqs(struct virtio_device *vdev)
-{
- struct virtqueue *vq, *n;
-
- list_for_each_entry_safe(vq, n, &vdev->vqs, list)
- kvm_del_vq(vq);
-}
-
-static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool *ctx,
- struct irq_affinity *desc)
-{
- struct kvm_device *kdev = to_kvmdev(vdev);
- int i;
-
- /* We must have this many virtqueues. */
- if (nvqs > kdev->desc->num_vq)
- return -ENOENT;
-
- for (i = 0; i < nvqs; ++i) {
- vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i],
- ctx ? ctx[i] : false);
- if (IS_ERR(vqs[i]))
- goto error;
- }
- return 0;
-
-error:
- kvm_del_vqs(vdev);
- return PTR_ERR(vqs[i]);
-}
-
-static const char *kvm_bus_name(struct virtio_device *vdev)
-{
- return "";
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static const struct virtio_config_ops kvm_vq_configspace_ops = {
- .get_features = kvm_get_features,
- .finalize_features = kvm_finalize_features,
- .get = kvm_get,
- .set = kvm_set,
- .get_status = kvm_get_status,
- .set_status = kvm_set_status,
- .reset = kvm_reset,
- .find_vqs = kvm_find_vqs,
- .del_vqs = kvm_del_vqs,
- .bus_name = kvm_bus_name,
-};
-
-/*
- * The root device for the kvm virtio devices.
- * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
- */
-static struct device *kvm_root;
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
-{
- struct kvm_device *kdev;
-
- kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
- if (!kdev) {
- printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n",
- offset, d->type);
- return;
- }
-
- kdev->vdev.dev.parent = kvm_root;
- kdev->vdev.id.device = d->type;
- kdev->vdev.config = &kvm_vq_configspace_ops;
- kdev->desc = d;
-
- if (register_virtio_device(&kdev->vdev) != 0) {
- printk(KERN_ERR "Failed to register kvm device %u type %u\n",
- offset, d->type);
- kfree(kdev);
- }
-}
-
-/*
- * scan_devices() simply iterates through the device page.
- * The type 0 is reserved to mean "end of devices".
- */
-static void scan_devices(void)
-{
- unsigned int i;
- struct kvm_device_desc *d;
-
- for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
- d = kvm_devices + i;
-
- if (d->type == 0)
- break;
-
- add_kvm_device(d, i);
- }
-}
-
-/*
- * match for a kvm device with a specific desc pointer
- */
-static int match_desc(struct device *dev, void *data)
-{
- struct virtio_device *vdev = dev_to_virtio(dev);
- struct kvm_device *kdev = to_kvmdev(vdev);
-
- return kdev->desc == data;
-}
-
-/*
- * hotplug_device tries to find changes in the device page.
- */
-static void hotplug_devices(struct work_struct *dummy)
-{
- unsigned int i;
- struct kvm_device_desc *d;
- struct device *dev;
-
- for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
- d = kvm_devices + i;
-
- /* end of list */
- if (d->type == 0)
- break;
-
- /* device already exists */
- dev = device_find_child(kvm_root, d, match_desc);
- if (dev) {
- /* XXX check for hotplug remove */
- put_device(dev);
- continue;
- }
-
- /* new device */
- printk(KERN_INFO "Adding new virtio device %p\n", d);
- add_kvm_device(d, i);
- }
-}
-
-/*
- * we emulate the request_irq behaviour on top of s390 extints
- */
-static void kvm_extint_handler(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
-{
- struct virtqueue *vq;
- u32 param;
-
- if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
- return;
- inc_irq_stat(IRQEXT_VRT);
-
- /* The LSB might be overloaded, we have to mask it */
- vq = (struct virtqueue *)(param64 & ~1UL);
-
- /* We use ext_params to decide what this interrupt means */
- param = param32 & VIRTIO_PARAM_MASK;
-
- switch (param) {
- case VIRTIO_PARAM_CONFIG_CHANGED:
- virtio_config_changed(vq->vdev);
- break;
- case VIRTIO_PARAM_DEV_ADD:
- schedule_work(&hotplug_work);
- break;
- case VIRTIO_PARAM_VRING_INTERRUPT:
- default:
- vring_interrupt(0, vq);
- break;
- }
-}
-
-/*
- * For s390-virtio, we expect a page above main storage containing
- * the virtio configuration. Try to actually load from this area
- * in order to figure out if the host provides this page.
- */
-static int __init test_devices_support(unsigned long addr)
-{
- int ret = -EIO;
-
- asm volatile(
- "0: lura 0,%1\n"
- "1: xgr %0,%0\n"
- "2:\n"
- EX_TABLE(0b,2b)
- EX_TABLE(1b,2b)
- : "+d" (ret)
- : "a" (addr)
- : "0", "cc");
- return ret;
-}
-/*
- * Init function for virtio
- * devices are in a single page above top of "normal" + standby mem
- */
-static int __init kvm_devices_init(void)
-{
- int rc;
- unsigned long total_memory_size = sclp.rzm * sclp.rnmax;
-
- if (!MACHINE_IS_KVM)
- return -ENODEV;
-
- if (test_devices_support(total_memory_size) < 0)
- return -ENODEV;
-
- pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n");
-
- rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
- if (rc)
- return rc;
-
- kvm_devices = (void *) total_memory_size;
-
- kvm_root = root_device_register("kvm_s390");
- if (IS_ERR(kvm_root)) {
- rc = PTR_ERR(kvm_root);
- printk(KERN_ERR "Could not register kvm_s390 root device");
- vmem_remove_mapping(total_memory_size, PAGE_SIZE);
- return rc;
- }
-
- INIT_WORK(&hotplug_work, hotplug_devices);
-
- irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
- register_external_irq(EXT_IRQ_CP_SERVICE, kvm_extint_handler);
-
- scan_devices();
- return 0;
-}
-
-/* code for early console output with virtio_console */
-static int early_put_chars(u32 vtermno, const char *buf, int count)
-{
- char scratch[17];
- unsigned int len = count;
-
- if (len > sizeof(scratch) - 1)
- len = sizeof(scratch) - 1;
- scratch[len] = '\0';
- memcpy(scratch, buf, len);
- kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch));
- return len;
-}
-
-static int __init s390_virtio_console_init(void)
-{
- if (sclp.has_vt220 || sclp.has_linemode)
- return -ENODEV;
- return virtio_cons_early_init(early_put_chars);
-}
-console_initcall(s390_virtio_console_init);
-
-
-/*
- * We do this after core stuff, but before the drivers.
- */
-postcore_initcall(kvm_devices_init);
diff --git a/drivers/sbus/char/Makefile b/drivers/sbus/char/Makefile
index 78b6183c9866..ae478144c551 100644
--- a/drivers/sbus/char/Makefile
+++ b/drivers/sbus/char/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel miscellaneous SPARC device drivers.
#
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 228c782d6433..fb5bcf6dddc1 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* bbc_envctrl.c: UltraSPARC-III environment control driver.
*
* Copyright (C) 2001, 2008 David S. Miller (davem@davemloft.net)
diff --git a/drivers/sbus/char/bbc_i2c.h b/drivers/sbus/char/bbc_i2c.h
index 4b4531066e75..c2d066d3fa41 100644
--- a/drivers/sbus/char/bbc_i2c.h
+++ b/drivers/sbus/char/bbc_i2c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BBC_I2C_H
#define _BBC_I2C_H
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index f32765d3cbd8..5c8ed7350a04 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -22,7 +22,6 @@
#include <asm/display7seg.h>
-#define D7S_MINOR 193
#define DRIVER_NAME "d7s"
#define PFX DRIVER_NAME ": "
diff --git a/drivers/sbus/char/max1617.h b/drivers/sbus/char/max1617.h
index cd30819a0a30..45c8318787d8 100644
--- a/drivers/sbus/char/max1617.h
+++ b/drivers/sbus/char/max1617.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: max1617.h,v 1.1 2001/04/02 09:59:08 davem Exp $ */
#ifndef _MAX1617_H
#define _MAX1617_H
diff --git a/drivers/scsi/.gitignore b/drivers/scsi/.gitignore
index c89ae9a04399..e2956741fbd1 100644
--- a/drivers/scsi/.gitignore
+++ b/drivers/scsi/.gitignore
@@ -1 +1,2 @@
53c700_d.h
+scsi_devinfo_tbl.c
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index f34c916b95bc..0c9a100af667 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* -*- mode: c; c-basic-offset: 8 -*- */
/* Driver for 53c700 and 53c700-66 chips from NCR and Symbios
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 41366339b950..8a739b74cfb7 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -130,7 +130,8 @@ config CHR_DEV_OSST
config BLK_DEV_SR
tristate "SCSI CDROM support"
- depends on SCSI
+ depends on SCSI && BLK_DEV
+ select CDROM
---help---
If you want to use a CD or DVD drive attached to your computer
by SCSI, FireWire, USB or ATAPI, say Y and read the SCSI-HOWTO
@@ -786,7 +787,7 @@ config SCSI_IBMVSCSIS
depends on PPC_PSERIES && TARGET_CORE && SCSI && PCI
help
This is the IBM POWER Virtual SCSI Target Server
- This driver uses the SRP protocol for communication betwen servers
+ This driver uses the SRP protocol for communication between servers
guest and/or the host that run on the same server.
More information on VSCSI protocol can be found at www.power.org
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 93dbe58c47c8..fcfd28d2884c 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for linux/drivers/scsi
#
@@ -191,6 +192,14 @@ clean-files := 53c700_d.h 53c700_u.h
$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
+$(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
+
+quiet_cmd_bflags = GEN $@
+ cmd_bflags = sed -n 's/.*BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
+
+$(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
+ $(call if_changed,bflags)
+
# If you want to play with the firmware, uncomment
# GENERATE_FIRMWARE := 1
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 8a0812221d72..90ea0f5d9bdb 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NCR 5380 generic driver routines. These should make it *trivial*
* to implement 5380 SCSI drivers under Linux with a non-trantor
@@ -1907,8 +1908,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
switch (extended_msg[2]) {
case EXTENDED_SDTR:
case EXTENDED_WDTR:
- case EXTENDED_MODIFY_DATA_POINTER:
- case EXTENDED_EXTENDED_IDENTIFY:
tmp = 0;
}
} else if (len) {
@@ -1931,18 +1930,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* reject it.
*/
default:
- if (!tmp) {
- shost_printk(KERN_ERR, instance, "rejecting message ");
- spi_print_msg(extended_msg);
- printk("\n");
- } else if (tmp != EXTENDED_MESSAGE)
- scmd_printk(KERN_INFO, cmd,
- "rejecting unknown message %02x\n",
- tmp);
- else
+ if (tmp == EXTENDED_MESSAGE)
scmd_printk(KERN_INFO, cmd,
"rejecting unknown extended message code %02x, length %d\n",
- extended_msg[1], extended_msg[0]);
+ extended_msg[2], extended_msg[1]);
+ else if (tmp)
+ scmd_printk(KERN_INFO, cmd,
+ "rejecting unknown message code %02x\n",
+ tmp);
msgout = MESSAGE_REJECT;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index d78f0957d865..31096a0b0fdd 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NCR 5380 defines
*
diff --git a/drivers/scsi/NCR_D700.h b/drivers/scsi/NCR_D700.h
index f167af6bd2af..eb675d782ef6 100644
--- a/drivers/scsi/NCR_D700.h
+++ b/drivers/scsi/NCR_D700.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* -*- mode: c; c-basic-offset: 8 -*- */
/* NCR Dual 700 MCA SCSI Driver
diff --git a/drivers/scsi/NCR_Q720.h b/drivers/scsi/NCR_Q720.h
index 7b9209008187..d5f46cdb736e 100644
--- a/drivers/scsi/NCR_Q720.h
+++ b/drivers/scsi/NCR_Q720.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* -*- mode: c; c-basic-offset: 8 -*- */
/* NCR Quad 720 MCA SCSI Driver
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 794b8e65c711..8d8a4074a570 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef A2091_H
#define A2091_H
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index 49db4a335aab..5cb3e7535281 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef A3000_H
#define A3000_H
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index dfe8e70f8d99..525a652dab48 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2383,19 +2383,19 @@ fib_free_out:
goto out;
}
-int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now)
+int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
{
struct tm cur_tm;
char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
u32 datasize = sizeof(wellness_str);
- unsigned long local_time;
+ time64_t local_time;
int ret = -ENODEV;
if (!dev->sa_firmware)
goto out;
- local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60));
- time_to_tm(local_time, 0, &cur_tm);
+ local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
+ time64_to_tm(local_time, 0, &cur_tm);
cur_tm.tm_mon += 1;
cur_tm.tm_year += 1900;
wellness_str[8] = bin2bcd(cur_tm.tm_hour);
@@ -2412,7 +2412,7 @@ out:
return ret;
}
-int aac_send_hosttime(struct aac_dev *dev, struct timeval *now)
+int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
{
int ret = -ENOMEM;
struct fib *fibptr;
@@ -2424,7 +2424,7 @@ int aac_send_hosttime(struct aac_dev *dev, struct timeval *now)
aac_fib_init(fibptr);
info = (__le32 *)fib_data(fibptr);
- *info = cpu_to_le32(now->tv_sec);
+ *info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
1, 1, NULL, NULL);
@@ -2496,7 +2496,7 @@ int aac_command_thread(void *data)
}
if (!time_before(next_check_jiffies,next_jiffies)
&& ((difference = next_jiffies - jiffies) <= 0)) {
- struct timeval now;
+ struct timespec64 now;
int ret;
/* Don't even try to talk to adapter if its sick */
@@ -2506,15 +2506,15 @@ int aac_command_thread(void *data)
next_check_jiffies = jiffies
+ ((long)(unsigned)check_interval)
* HZ;
- do_gettimeofday(&now);
+ ktime_get_real_ts64(&now);
/* Synchronize our watches */
- if (((1000000 - (1000000 / HZ)) > now.tv_usec)
- && (now.tv_usec > (1000000 / HZ)))
- difference = (((1000000 - now.tv_usec) * HZ)
- + 500000) / 1000000;
+ if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
+ && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
+ difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ)
+ + NSEC_PER_SEC / 2) / NSEC_PER_SEC;
else {
- if (now.tv_usec > 500000)
+ if (now.tv_nsec > NSEC_PER_SEC / 2)
++now.tv_sec;
if (dev->sa_firmware)
diff --git a/drivers/scsi/aha152x.h b/drivers/scsi/aha152x.h
index ac4bfa438bf2..efd01877d02b 100644
--- a/drivers/scsi/aha152x.h
+++ b/drivers/scsi/aha152x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AHA152X_H
#define _AHA152X_H
diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h
index 0fe9bae1b3d1..f5b0d210fb3c 100644
--- a/drivers/scsi/aha1542.h
+++ b/drivers/scsi/aha1542.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AHA1542_H_
#define _AHA1542_H_
diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h
index b0c5603461ca..dfdaa4d3ea4e 100644
--- a/drivers/scsi/aha1740.h
+++ b/drivers/scsi/aha1740.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AHA1740_H
/* $Id$
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index b03ba0df7a83..c15be2590d1c 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux aic7xxx SCSI driver.
#
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index d47b527b25dd..31f2bb9d7146 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -1046,8 +1046,6 @@ typedef enum {
typedef uint8_t ahd_mode_state;
-typedef void ahd_callback_t (void *);
-
struct ahd_completion
{
uint16_t tag;
@@ -1122,8 +1120,7 @@ struct ahd_softc {
/*
* Timer handles for timer driven callbacks.
*/
- ahd_timer_t reset_timer;
- ahd_timer_t stat_timer;
+ struct timer_list stat_timer;
/*
* Statistics.
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 95d8f25cbcca..b560f396ee99 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -207,7 +207,7 @@ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd,
static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid,
u_int prev, u_int next, u_int tid);
static void ahd_reset_current_bus(struct ahd_softc *ahd);
-static ahd_callback_t ahd_stat_timer;
+static void ahd_stat_timer(struct timer_list *t);
#ifdef AHD_DUMP_SEQ
static void ahd_dumpseq(struct ahd_softc *ahd);
#endif
@@ -6104,8 +6104,7 @@ ahd_alloc(void *platform_arg, char *name)
ahd->bugs = AHD_BUGNONE;
ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A
| AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A;
- ahd_timer_init(&ahd->reset_timer);
- ahd_timer_init(&ahd->stat_timer);
+ timer_setup(&ahd->stat_timer, ahd_stat_timer, 0);
ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT;
ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT;
ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT;
@@ -6235,8 +6234,7 @@ ahd_shutdown(void *arg)
/*
* Stop periodic timer callbacks.
*/
- ahd_timer_stop(&ahd->reset_timer);
- ahd_timer_stop(&ahd->stat_timer);
+ del_timer_sync(&ahd->stat_timer);
/* This will reset most registers to 0, but not all */
ahd_reset(ahd, /*reinit*/FALSE);
@@ -7039,20 +7037,11 @@ static const char *termstat_strings[] = {
};
/***************************** Timer Facilities *******************************/
-#define ahd_timer_init init_timer
-#define ahd_timer_stop del_timer_sync
-typedef void ahd_linux_callback_t (u_long);
-
static void
-ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
+ahd_timer_reset(struct timer_list *timer, int usec)
{
- struct ahd_softc *ahd;
-
- ahd = (struct ahd_softc *)arg;
del_timer(timer);
- timer->data = (u_long)arg;
timer->expires = jiffies + (usec * HZ)/1000000;
- timer->function = (ahd_linux_callback_t*)func;
add_timer(timer);
}
@@ -7279,8 +7268,7 @@ ahd_init(struct ahd_softc *ahd)
}
init_done:
ahd_restart(ahd);
- ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
- ahd_stat_timer, ahd);
+ ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US);
return (0);
}
@@ -8878,9 +8866,9 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
/**************************** Statistics Processing ***************************/
static void
-ahd_stat_timer(void *arg)
+ahd_stat_timer(struct timer_list *t)
{
- struct ahd_softc *ahd = arg;
+ struct ahd_softc *ahd = from_timer(ahd, t, stat_timer);
u_long s;
int enint_coal;
@@ -8907,8 +8895,7 @@ ahd_stat_timer(void *arg)
ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1);
ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket];
ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0;
- ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
- ahd_stat_timer, ahd);
+ ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US);
ahd_unlock(ahd, &s);
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 728193a42e6e..8a8b7ae7aed3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -203,9 +203,6 @@ int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t);
*/
#define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op)
-/************************** Timer DataStructures ******************************/
-typedef struct timer_list ahd_timer_t;
-
/********************************** Includes **********************************/
#ifdef CONFIG_AIC79XX_REG_PRETTY_PRINT
#define AIC_DEBUG_REGISTERS 1
@@ -214,10 +211,6 @@ typedef struct timer_list ahd_timer_t;
#endif
#include "aic79xx.h"
-/***************************** Timer Facilities *******************************/
-#define ahd_timer_init init_timer
-#define ahd_timer_stop del_timer_sync
-
/***************************** SMP support ************************************/
#include <linux/spinlock.h>
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 381846164003..6612ff3b2e83 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -2212,7 +2212,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
* by the capabilities of the bus connectivity of and sync settings for
* the target.
*/
-const struct ahc_syncrate *
+static const struct ahc_syncrate *
ahc_devlimited_syncrate(struct ahc_softc *ahc,
struct ahc_initiator_tinfo *tinfo,
u_int *period, u_int *ppr_options, role_t role)
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index acd687f4554e..c6be3aeb302b 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1141,7 +1141,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
* or forcing transfer negotiations on the next command to any
* target.
*/
-void
+static void
ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc)
{
int i;
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
index 45e2d49c1fff..243adb0a38d1 100644
--- a/drivers/scsi/aic7xxx/aicasm/Makefile
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
PROG= aicasm
OUTDIR ?= ./
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index f2671a8fa7e3..2dbc8330d7d3 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -721,11 +721,8 @@ Out:
*/
static void asd_chip_reset(struct asd_ha_struct *asd_ha)
{
- struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
-
ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
asd_chip_hardrst(asd_ha);
- sas_ha->notify_ha_event(sas_ha, HAE_RESET);
}
/* ---------- Done List Routines ---------- */
@@ -1178,7 +1175,6 @@ static void asd_start_scb_timers(struct list_head *list)
struct asd_ascb *ascb;
list_for_each_entry(ascb, list, list) {
if (!ascb->uldd_timer) {
- ascb->timer.data = (unsigned long) ascb;
ascb->timer.function = asd_ascb_timedout;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
add_timer(&ascb->timer);
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
index 8c1c28239e93..8f147e720cfd 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.h
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -291,8 +291,7 @@ static inline void asd_init_ascb(struct asd_ha_struct *asd_ha,
INIT_LIST_HEAD(&ascb->list);
ascb->scb = ascb->dma_scb.vaddr;
ascb->ha = asd_ha;
- ascb->timer.function = NULL;
- init_timer(&ascb->timer);
+ timer_setup(&ascb->timer, NULL, 0);
ascb->tc_index = -1;
}
@@ -392,7 +391,7 @@ void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op);
int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask);
-void asd_ascb_timedout(unsigned long data);
+void asd_ascb_timedout(struct timer_list *t);
int asd_chip_hardrst(struct asd_ha_struct *asd_ha);
#endif
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index fdac7c2fef37..22873ce8bbfa 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -866,12 +866,12 @@ void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id,
* Upper layers can implement their own timeout function, say to free
* resources they have with this SCB, and then call this one at the
* end of their timeout function. To do this, one should initialize
- * the ascb->timer.{function, data, expires} prior to calling the post
+ * the ascb->timer.{function, expires} prior to calling the post
* function. The timer is started by the post function.
*/
-void asd_ascb_timedout(unsigned long data)
+void asd_ascb_timedout(struct timer_list *t)
{
- struct asd_ascb *ascb = (void *) data;
+ struct asd_ascb *ascb = from_timer(ascb, t, timer);
struct asd_seq_data *seq = &ascb->ha->seq;
unsigned long flags;
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index d4c35df3d4ae..2a01702d5ba7 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -35,14 +35,13 @@
static int asd_enqueue_internal(struct asd_ascb *ascb,
void (*tasklet_complete)(struct asd_ascb *,
struct done_list_struct *),
- void (*timed_out)(unsigned long))
+ void (*timed_out)(struct timer_list *t))
{
int res;
ascb->tasklet_complete = tasklet_complete;
ascb->uldd_timer = 1;
- ascb->timer.data = (unsigned long) ascb;
ascb->timer.function = timed_out;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
@@ -87,9 +86,9 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
asd_ascb_free(ascb);
}
-static void asd_clear_nexus_timedout(unsigned long data)
+static void asd_clear_nexus_timedout(struct timer_list *t)
{
- struct asd_ascb *ascb = (void *)data;
+ struct asd_ascb *ascb = from_timer(ascb, t, timer);
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("%s: here\n", __func__);
@@ -261,9 +260,9 @@ static int asd_clear_nexus_index(struct sas_task *task)
/* ---------- TMFs ---------- */
-static void asd_tmf_timedout(unsigned long data)
+static void asd_tmf_timedout(struct timer_list *t)
{
- struct asd_ascb *ascb = (void *) data;
+ struct asd_ascb *ascb = from_timer(ascb, t, timer);
struct tasklet_completion_status *tcs = ascb->uldd_task;
ASD_DPRINTK("tmf timed out\n");
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index af032c46ec0e..21f6421536a0 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -101,7 +101,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
-static void arcmsr_request_device_map(unsigned long pacb);
+static void arcmsr_request_device_map(struct timer_list *t);
static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
@@ -837,10 +837,8 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
+ timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
add_timer(&acb->eternal_timer);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
@@ -930,10 +928,8 @@ static int arcmsr_resume(struct pci_dev *pdev)
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
+ timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
add_timer(&acb->eternal_timer);
return 0;
controller_stop:
@@ -3459,9 +3455,9 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
}
}
-static void arcmsr_request_device_map(unsigned long pacb)
+static void arcmsr_request_device_map(struct timer_list *t)
{
- struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
+ struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
arcmsr_hbaA_request_device_map(acb);
diff --git a/drivers/scsi/arm/Makefile b/drivers/scsi/arm/Makefile
index 16c3e86a6b1b..b576d9276f71 100644
--- a/drivers/scsi/arm/Makefile
+++ b/drivers/scsi/arm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for drivers/scsi/arm
#
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 24388795ee9a..f4775ca70bab 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2318,9 +2318,9 @@ DEF_SCSI_QCMD(fas216_noqueue_command)
* Error handler timeout function. Indicate that we timed out,
* and wake up any error handler process so it can continue.
*/
-static void fas216_eh_timer(unsigned long data)
+static void fas216_eh_timer(struct timer_list *t)
{
- FAS216_Info *info = (FAS216_Info *)data;
+ FAS216_Info *info = from_timer(info, t, eh_timer);
fas216_log(info, LOG_ERROR, "error handling timed out\n");
@@ -2849,9 +2849,7 @@ int fas216_init(struct Scsi_Host *host)
info->rst_dev_status = -1;
info->rst_bus_status = -1;
init_waitqueue_head(&info->eh_wait);
- init_timer(&info->eh_timer);
- info->eh_timer.data = (unsigned long)info;
- info->eh_timer.function = fas216_eh_timer;
+ timer_setup(&info->eh_timer, fas216_eh_timer, 0);
spin_lock_init(&info->host_lock);
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 9b839b1e895a..75c44399fc88 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ATP870U_H
#define _ATP870U_H
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 55e3f8b40eb3..e035acf56652 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -81,12 +81,12 @@ static inline void queue_tail_inc(struct be_queue_info *q)
/*ISCSI */
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
- u32 min_eqd; /* in usecs */
- u32 max_eqd; /* in usecs */
- u32 prev_eqd; /* in usecs */
- u32 et_eqd; /* configured val when aic is off */
- ulong jiffies;
- u64 eq_prev; /* Used to calculate eqe */
+ unsigned long jiffies;
+ u32 eq_prev; /* Used to calculate eqe */
+ u32 prev_eqd;
+#define BEISCSI_EQ_DELAY_MIN 0
+#define BEISCSI_EQ_DELAY_DEF 32
+#define BEISCSI_EQ_DELAY_MAX 128
};
struct be_eq_obj {
@@ -148,9 +148,8 @@ struct be_ctrl_info {
/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */
#define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1)
-#define PAGE_SHIFT_4K 12
-#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
-#define mcc_timeout 120000 /* 12s timeout */
+#define PAGE_SHIFT_4K 12
+#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) \
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index a79a5e72c777..2eb66df3e3d6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -675,8 +675,8 @@ static int be_mbox_notify(struct be_ctrl_info *ctrl)
return status;
}
-void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
- bool embedded, u8 sge_cnt)
+void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
+ bool embedded, u8 sge_cnt)
{
if (embedded)
wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
@@ -688,7 +688,7 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
}
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len)
+ u8 subsystem, u8 opcode, u32 cmd_len)
{
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
@@ -947,7 +947,6 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
default:
mutex_unlock(&ctrl->mbox_lock);
BUG();
- return -ENXIO;
}
be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
if (queue_type != QTYPE_SGL)
@@ -1522,6 +1521,52 @@ int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
return ret;
}
+int beiscsi_set_host_data(struct beiscsi_hba *phba)
+{
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct be_cmd_set_host_data *ioctl;
+ struct be_mcc_wrb *wrb;
+ int ret = 0;
+
+ if (is_chip_be2_be3r(phba))
+ return ret;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ memset(wrb, 0, sizeof(*wrb));
+ ioctl = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+ be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_HOST_DATA,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
+ ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID;
+ ioctl->param.req.param_len =
+ snprintf((char *)ioctl->param.req.param_data,
+ sizeof(ioctl->param.req.param_data),
+ "Linux iSCSI v%s", BUILD_STR);
+ ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len, 4);
+ if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
+ ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
+ ret = be_mbox_notify(ctrl);
+ if (!ret) {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : HBA set host driver version\n");
+ } else {
+ /**
+ * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
+ * Older FW versions return this error.
+ */
+ if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
+ ret == MCC_STATUS_INVALID_LENGTH)
+ __beiscsi_log(phba, KERN_INFO,
+ "BG_%d : HBA failed to set host driver version\n");
+ }
+
+ mutex_unlock(&ctrl->mbox_lock);
+ return ret;
+}
+
int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index d9b6773facdb..6f05d1dfa10a 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -230,6 +230,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
#define OPCODE_COMMON_FUNCTION_RESET 61
#define OPCODE_COMMON_GET_PORT_NAME 77
+#define OPCODE_COMMON_SET_HOST_DATA 93
#define OPCODE_COMMON_SET_FEATURES 191
/**
@@ -737,6 +738,30 @@ struct be_cmd_hba_name {
u8 initiator_alias[BE_INI_ALIAS_LEN];
} __packed;
+/******************** COMMON SET HOST DATA *******************/
+#define BE_CMD_SET_HOST_PARAM_ID 0x2
+#define BE_CMD_MAX_DRV_VERSION 0x30
+struct be_sethost_req {
+ u32 param_id;
+ u32 param_len;
+ u32 param_data[32];
+};
+
+struct be_sethost_resp {
+ u32 rsvd0;
+};
+
+struct be_cmd_set_host_data {
+ union {
+ struct be_cmd_req_hdr req_hdr;
+ struct be_cmd_resp_hdr resp_hdr;
+ } h;
+ union {
+ struct be_sethost_req req;
+ struct be_sethost_resp resp;
+ } param;
+} __packed;
+
/******************** COMMON SET Features *******************/
#define BE_CMD_SET_FEATURE_UER 0x10
#define BE_CMD_UER_SUPP_BIT 0x1
@@ -793,8 +818,6 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
struct be_queue_info *mccq,
struct be_queue_info *cq);
-unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
-
void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
@@ -847,6 +870,7 @@ int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
int beiscsi_set_uer_feature(struct beiscsi_hba *phba);
+int beiscsi_set_host_data(struct beiscsi_hba *phba);
struct be_default_pdu_context {
u32 dw[4];
@@ -1274,19 +1298,9 @@ struct be_cmd_get_port_name {
* a read command
*/
#define TGT_CTX_UPDT_CMD 7 /* Target context update */
-#define TGT_STS_CMD 8 /* Target R2T and other BHS
- * where only the status number
- * need to be updated
- */
-#define TGT_DATAIN_CMD 9 /* Target Data-Ins in response
- * to read command
- */
-#define TGT_SOS_PDU 10 /* Target:standalone status
- * response
- */
#define TGT_DM_CMD 11 /* Indicates that the bhs
- * preparedby
- * driver should not be touched
+ * prepared by driver should not
+ * be touched.
*/
/* Returns the number of items in the field array. */
@@ -1444,9 +1458,9 @@ struct be_cmd_get_port_name {
* the cxn
*/
-void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
+void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
bool embedded, u8 sge_cnt);
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len);
+ u8 subsystem, u8 opcode, u32 cmd_len);
#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 43a80ce5ce6a..a398c54139aa 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -684,41 +684,6 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
}
/**
- * beiscsi_get_initname - Read Initiator Name from flash
- * @buf: buffer bointer
- * @phba: The device priv structure instance
- *
- * returns number of bytes
- */
-static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
-{
- int rc;
- unsigned int tag;
- struct be_mcc_wrb *wrb;
- struct be_cmd_hba_name *resp;
-
- tag = be_cmd_get_initname(phba);
- if (!tag) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Getting Initiator Name Failed\n");
-
- return -EBUSY;
- }
-
- rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
- if (rc) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : Initiator Name MBX Failed\n");
- return rc;
- }
-
- resp = embedded_payload(wrb);
- rc = sprintf(buf, "%s\n", resp->initiator_name);
- return rc;
-}
-
-/**
* beiscsi_get_port_state - Get the Port State
* @shost : pointer to scsi_host structure
*
@@ -772,7 +737,6 @@ static void beiscsi_get_port_speed(struct Scsi_Host *shost)
* @param: parameter type identifier
* @buf: buffer pointer
*
- * returns host parameter
*/
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf)
@@ -783,7 +747,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
if (!beiscsi_hba_is_online(phba)) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : HBA in error 0x%lx\n", phba->state);
- return -EBUSY;
+ return 0;
}
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : In beiscsi_get_host_param, param = %d\n", param);
@@ -794,15 +758,19 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
if (status < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : beiscsi_get_macaddr Failed\n");
- return status;
+ return 0;
}
break;
case ISCSI_HOST_PARAM_INITIATOR_NAME:
- status = beiscsi_get_initname(buf, phba);
+ /* try fetching user configured name first */
+ status = beiscsi_get_initiator_name(phba, buf, true);
if (status < 0) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Retreiving Initiator Name Failed\n");
- return status;
+ status = beiscsi_get_initiator_name(phba, buf, false);
+ if (status < 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BS_%d : Retreiving Initiator Name Failed\n");
+ status = 0;
+ }
}
break;
case ISCSI_HOST_PARAM_PORT_STATE:
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index b9d459a21f25..f41dfda97e17 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index b4542e7e2ad5..b3cfdd5f4d1c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -455,14 +455,12 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
return -ENOMEM;
phba->ctrl.csr = addr;
phba->csr_va = addr;
- phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
if (addr == NULL)
goto pci_map_err;
phba->ctrl.db = addr;
phba->db_va = addr;
- phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
if (phba->generation == BE_GEN2)
pcicfg_reg = 1;
@@ -476,7 +474,6 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
goto pci_map_err;
phba->ctrl.pcicfg = addr;
phba->pci_va = addr;
- phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
return 0;
pci_map_err:
@@ -790,6 +787,24 @@ static irqreturn_t be_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void beiscsi_free_irqs(struct beiscsi_hba *phba)
+{
+ struct hwi_context_memory *phwi_context;
+ int i;
+
+ if (!phba->pcidev->msix_enabled) {
+ if (phba->pcidev->irq)
+ free_irq(phba->pcidev->irq, phba);
+ return;
+ }
+
+ phwi_context = phba->phwi_ctrlr->phwi_ctxt;
+ for (i = 0; i <= phba->num_cpus; i++) {
+ free_irq(pci_irq_vector(phba->pcidev, i),
+ &phwi_context->be_eq[i]);
+ kfree(phba->msi_name[i]);
+ }
+}
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
{
@@ -803,15 +818,14 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
if (pcidev->msix_enabled) {
for (i = 0; i < phba->num_cpus; i++) {
- phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
- GFP_KERNEL);
+ phba->msi_name[i] = kasprintf(GFP_KERNEL,
+ "beiscsi_%02x_%02x",
+ phba->shost->host_no, i);
if (!phba->msi_name[i]) {
ret = -ENOMEM;
goto free_msix_irqs;
}
- sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
- phba->shost->host_no, i);
ret = request_irq(pci_irq_vector(pcidev, i),
be_isr_msix, 0, phba->msi_name[i],
&phwi_context->be_eq[i]);
@@ -824,13 +838,12 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
goto free_msix_irqs;
}
}
- phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
+ phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x",
+ phba->shost->host_no);
if (!phba->msi_name[i]) {
ret = -ENOMEM;
goto free_msix_irqs;
}
- sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
- phba->shost->host_no);
ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
phba->msi_name[i], &phwi_context->be_eq[i]);
if (ret) {
@@ -924,12 +937,11 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
* this can happen if clean_task is called on a task that
* failed in xmit_task or alloc_pdu.
*/
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
- "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
- "value there=%p\n", phba->io_sgl_free_index,
- phba->io_sgl_hndl_base
- [phba->io_sgl_free_index]);
- spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+ "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n",
+ phba->io_sgl_free_index,
+ phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -1864,8 +1876,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
- code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK);
+ code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] &
+ CQE_CODE_MASK);
/* Get the CID */
if (is_chip_be2_be3r(phba)) {
@@ -3024,7 +3036,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
mem->dma = paddr;
ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
- phwi_context->cur_eqd);
+ BEISCSI_EQ_DELAY_DEF);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_cmd_eq_create"
@@ -3508,13 +3520,14 @@ static int be_mcc_queues_create(struct beiscsi_hba *phba,
goto err;
/* Ask BE to create MCC compl queue; */
if (phba->pcidev->msix_enabled) {
- if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
- [phba->num_cpus].q, false, true, 0))
- goto mcc_cq_free;
+ if (beiscsi_cmd_cq_create(ctrl, cq,
+ &phwi_context->be_eq[phba->num_cpus].q,
+ false, true, 0))
+ goto mcc_cq_free;
} else {
if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
false, true, 0))
- goto mcc_cq_free;
+ goto mcc_cq_free;
}
/* Alloc MCC queue */
@@ -3689,9 +3702,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- phwi_context->max_eqd = 128;
- phwi_context->min_eqd = 0;
- phwi_context->cur_eqd = 32;
/* set port optic state to unknown */
phba->optic_state = 0xff;
@@ -4792,10 +4802,10 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
sg = scsi_sglist(sc);
if (sc->sc_data_direction == DMA_TO_DEVICE)
writedir = 1;
- else
+ else
writedir = 0;
- return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
+ return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
}
/**
@@ -4917,6 +4927,13 @@ void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
schedule_work(&phba->boot_work);
}
+/**
+ * Boot flag info for iscsi-utilities
+ * Bit 0 Block valid flag
+ * Bit 1 Firmware booting selected
+ */
+#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
+
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
{
struct beiscsi_hba *phba = data;
@@ -4972,7 +4989,7 @@ static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
auth_data.chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = sprintf(str, "2\n");
+ rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");
@@ -5004,7 +5021,7 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
- rc = sprintf(str, "2\n");
+ rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
@@ -5209,8 +5226,8 @@ static void beiscsi_eqd_update_work(struct work_struct *work)
if (eqd < 8)
eqd = 0;
- eqd = min_t(u32, eqd, phwi_context->max_eqd);
- eqd = max_t(u32, eqd, phwi_context->min_eqd);
+ eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX);
+ eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN);
aic->jiffies = now;
aic->eq_prev = pbe_eq->cq_count;
@@ -5230,12 +5247,11 @@ static void beiscsi_eqd_update_work(struct work_struct *work)
msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
}
-static void beiscsi_hw_tpe_check(unsigned long ptr)
+static void beiscsi_hw_tpe_check(struct timer_list *t)
{
- struct beiscsi_hba *phba;
+ struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
u32 wait;
- phba = (struct beiscsi_hba *)ptr;
/* if not TPE, do nothing */
if (!beiscsi_detect_tpe(phba))
return;
@@ -5248,11 +5264,10 @@ static void beiscsi_hw_tpe_check(unsigned long ptr)
msecs_to_jiffies(wait));
}
-static void beiscsi_hw_health_check(unsigned long ptr)
+static void beiscsi_hw_health_check(struct timer_list *t)
{
- struct beiscsi_hba *phba;
+ struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
- phba = (struct beiscsi_hba *)ptr;
beiscsi_detect_ue(phba);
if (beiscsi_detect_ue(phba)) {
__beiscsi_log(phba, KERN_ERR,
@@ -5300,6 +5315,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
be2iscsi_enable_msix(phba);
beiscsi_get_params(phba);
+ beiscsi_set_host_data(phba);
/* Re-enable UER. If different TPE occurs then it is recoverable. */
beiscsi_set_uer_feature(phba);
@@ -5389,15 +5405,7 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
hwi_disable_intr(phba);
- if (phba->pcidev->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- free_irq(pci_irq_vector(phba->pcidev, i),
- &phwi_context->be_eq[i]);
- kfree(phba->msi_name[i]);
- }
- } else
- if (phba->pcidev->irq)
- free_irq(phba->pcidev->irq, phba);
+ beiscsi_free_irqs(phba);
pci_free_irq_vectors(phba->pcidev);
for (i = 0; i < phba->num_cpus; i++) {
@@ -5588,12 +5596,12 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : be_ctrl_init failed\n");
- goto hba_free;
+ goto free_hba;
}
ret = beiscsi_init_sliport(phba);
if (ret)
- goto hba_free;
+ goto free_hba;
spin_lock_init(&phba->io_sgl_lock);
spin_lock_init(&phba->mgmt_sgl_lock);
@@ -5606,6 +5614,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
}
beiscsi_get_port_name(&phba->ctrl, phba);
beiscsi_get_params(phba);
+ beiscsi_set_host_data(phba);
beiscsi_set_uer_feature(phba);
be2iscsi_enable_msix(phba);
@@ -5673,13 +5682,13 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_dev_probe-"
"Failed to beiscsi_init_irqs\n");
- goto free_blkenbld;
+ goto disable_iopoll;
}
hwi_enable_intr(phba);
ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
if (ret)
- goto free_blkenbld;
+ goto free_irqs;
/* set online bit after port is operational */
set_bit(BEISCSI_HBA_ONLINE, &phba->state);
@@ -5708,21 +5717,22 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
* Start UE detection here. UE before this will cause stall in probe
* and eventually fail the probe.
*/
- init_timer(&phba->hw_check);
- phba->hw_check.function = beiscsi_hw_health_check;
- phba->hw_check.data = (unsigned long)phba;
+ timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0);
mod_timer(&phba->hw_check,
jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
return 0;
-free_blkenbld:
- destroy_workqueue(phba->wq);
+free_irqs:
+ hwi_disable_intr(phba);
+ beiscsi_free_irqs(phba);
+disable_iopoll:
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
irq_poll_disable(&pbe_eq->iopoll);
}
+ destroy_workqueue(phba->wq);
free_twq:
hwi_cleanup_port(phba);
beiscsi_cleanup_port(phba);
@@ -5731,9 +5741,9 @@ free_port:
pci_free_consistent(phba->pcidev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
- phba->ctrl.mbox_mem_alloced.dma);
+ phba->ctrl.mbox_mem_alloced.dma);
beiscsi_unmap_pci_function(phba);
-hba_free:
+free_hba:
pci_disable_msix(phba->pcidev);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 81ce3ffda968..42bb6bdb68bd 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -31,7 +31,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "11.4.0.0"
+#define BUILD_STR "11.4.0.1"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -59,7 +59,7 @@
#define BE2_DEFPDU_DATA_SZ 8192
#define BE2_MAX_NUM_CQ_PROC 512
-#define MAX_CPUS 64
+#define MAX_CPUS 64U
#define BEISCSI_MAX_NUM_CPUS 7
#define BEISCSI_VER_STRLEN 32
@@ -77,9 +77,7 @@
#define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */
#define BEISCSI_NUM_MAX_LUN 256 /* scsi_host->max_lun */
-#define BEISCSI_NUM_DEVICES_SUPPORTED 0x01
#define BEISCSI_MAX_FRAGS_INIT 192
-#define BE_NUM_MSIX_ENTRIES 1
#define BE_SENSE_INFO_SIZE 258
#define BE_ISCSI_PDU_HEADER_SIZE 64
@@ -155,8 +153,6 @@
#define PAGES_REQUIRED(x) \
((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
-#define BEISCSI_MSI_NAME 20 /* size of msi_name string */
-
#define MEM_DESCR_OFFSET 8
#define BEISCSI_DEFQ_HDR 1
#define BEISCSI_DEFQ_DATA 0
@@ -209,13 +205,8 @@ struct mem_array {
};
struct be_mem_descriptor {
- unsigned int index; /* Index of this memory parameter */
- unsigned int category; /* type indicates cached/non-cached */
- unsigned int num_elements; /* number of elements in this
- * descriptor
- */
- unsigned int alignment_mask; /* Alignment mask for this block */
unsigned int size_in_bytes; /* Size required by memory block */
+ unsigned int num_elements;
struct mem_array *mem_array;
};
@@ -238,32 +229,12 @@ struct hba_parameters {
unsigned int num_eq_entries;
unsigned int wrbs_per_cxn;
unsigned int hwi_ws_sz;
- /**
- * These are calculated from other params. They're here
- * for debug purposes
- */
- unsigned int num_mcc_pages;
- unsigned int num_mcc_cq_pages;
- unsigned int num_cq_pages;
- unsigned int num_eq_pages;
-
- unsigned int num_async_pdu_buf_pages;
- unsigned int num_async_pdu_buf_sgl_pages;
- unsigned int num_async_pdu_buf_cq_pages;
-
- unsigned int num_async_pdu_hdr_pages;
- unsigned int num_async_pdu_hdr_sgl_pages;
- unsigned int num_async_pdu_hdr_cq_pages;
-
- unsigned int num_sge;
};
#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
(phwi_ctrlr->wrb_context[cri].ulp_num)
struct hwi_wrb_context {
spinlock_t wrb_lock;
- struct list_head wrb_handle_list;
- struct list_head wrb_handle_drvr_list;
struct wrb_handle **pwrb_handle_base;
struct wrb_handle **pwrb_handle_basestd;
struct iscsi_wrb *plast_wrb;
@@ -272,8 +243,6 @@ struct hwi_wrb_context {
unsigned short wrb_handles_available;
unsigned short cid;
uint8_t ulp_num; /* ULP to which CID binded */
- uint16_t register_set;
- uint16_t doorbell_format;
uint32_t doorbell_offset;
};
@@ -310,9 +279,6 @@ struct beiscsi_hba {
u8 __iomem *csr_va; /* CSR */
u8 __iomem *db_va; /* Door Bell */
u8 __iomem *pci_va; /* PCI Config */
- struct be_bus_address csr_pa; /* CSR */
- struct be_bus_address db_pa; /* CSR */
- struct be_bus_address pci_pa; /* CSR */
/* PCI representation of our HBA */
struct pci_dev *pcidev;
unsigned int num_cpus;
@@ -324,7 +290,6 @@ struct beiscsi_hba {
unsigned short io_sgl_free_index;
unsigned short io_sgl_hndl_avbl;
struct sgl_handle **io_sgl_hndl_base;
- struct sgl_handle **sgl_hndl_array;
unsigned short eh_sgl_alloc_index;
unsigned short eh_sgl_free_index;
@@ -1009,10 +974,6 @@ struct be_ring {
};
struct hwi_controller {
- struct list_head io_sgl_list;
- struct list_head eh_sgl_list;
- struct sgl_handle *psgl_handle_base;
-
struct hwi_wrb_context *wrb_context;
struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
@@ -1036,10 +997,6 @@ struct wrb_handle {
};
struct hwi_context_memory {
- /* Adaptive interrupt coalescing (AIC) info */
- u16 min_eqd; /* in usecs */
- u16 max_eqd; /* in usecs */
- u16 cur_eqd; /* in usecs */
struct be_eq_obj be_eq[MAX_CPUS];
struct be_queue_info be_cq[MAX_CPUS - 1];
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index c73775368d09..66ca967f2850 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -19,43 +19,6 @@
#include "be_iscsi.h"
#include "be_main.h"
-int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
- struct be_set_eqd *set_eqd,
- int num)
-{
- struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_modify_eq_delay *req;
- unsigned int tag;
- int i;
-
- mutex_lock(&ctrl->mbox_lock);
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
-
- req->num_eq = cpu_to_le32(num);
- for (i = 0; i < num; i++) {
- req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
- req->delay[i].phase = 0;
- req->delay[i].delay_multiplier =
- cpu_to_le32(set_eqd[i].delay_multiplier);
- }
-
- /* ignore the completion of this mbox command */
- set_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state);
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba,
struct bsg_job *job,
@@ -156,7 +119,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : unknown addr family %d\n",
dst_addr->sa_family);
- return -EINVAL;
+ return 0;
}
phwi_ctrlr = phba->phwi_ctrlr;
@@ -236,16 +199,19 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
}
/*
- * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
- * @phba: Driver priv structure
- * @nonemb_cmd: Address of the MBX command issued
- * @resp_buf: Buffer to copy the MBX cmd response
- * @resp_buf_len: respone lenght to be copied
+ * beiscsi_exec_nemb_cmd()- execute non-embedded MBX cmd
+ * @phba: driver priv structure
+ * @nonemb_cmd: DMA address of the MBX command to be issued
+ * @cbfn: callback func on MCC completion
+ * @resp_buf: buffer to copy the MBX cmd response
+ * @resp_buf_len: response length to be copied
*
**/
-static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
- struct be_dma_mem *nonemb_cmd, void *resp_buf,
- int resp_buf_len)
+static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
+ struct be_dma_mem *nonemb_cmd,
+ void (*cbfn)(struct beiscsi_hba *,
+ unsigned int),
+ void *resp_buf, u32 resp_buf_len)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
@@ -267,36 +233,54 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
sge->len = cpu_to_le32(nonemb_cmd->size);
+ if (cbfn) {
+ struct be_dma_mem *tag_mem;
+
+ set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+ ctrl->ptag_state[tag].cbfn = cbfn;
+ tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
+
+ /* store DMA mem to be freed in callback */
+ tag_mem->size = nonemb_cmd->size;
+ tag_mem->va = nonemb_cmd->va;
+ tag_mem->dma = nonemb_cmd->dma;
+ }
be_mcc_notify(phba, tag);
mutex_unlock(&ctrl->mbox_lock);
+ /* with cbfn set, its async cmd, don't wait */
+ if (cbfn)
+ return 0;
+
rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd);
+ /* copy the response, if any */
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
+ /**
+ * This is special case of NTWK_GET_IF_INFO where the size of
+ * response is not known. beiscsi_if_get_info checks the return
+ * value to free DMA buffer.
+ */
+ if (rc == -EAGAIN)
+ return rc;
- if (rc) {
- /* Check if the MBX Cmd needs to be re-issued */
- if (rc == -EAGAIN)
- return rc;
-
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
+ /**
+ * If FW is busy that is driver timed out, DMA buffer is saved with
+ * the tag, only when the cmd completes this buffer is freed.
+ */
+ if (rc == -EBUSY)
+ return rc;
- if (rc != -EBUSY)
- goto free_cmd;
- else
- return rc;
- }
free_cmd:
pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
return rc;
}
-static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
- int iscsi_cmd, int size)
+static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
+ struct be_dma_mem *cmd,
+ u8 subsystem, u8 opcode, u32 size)
{
cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
if (!cmd->va) {
@@ -305,13 +289,86 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
return -ENOMEM;
}
cmd->size = size;
- be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
+ be_cmd_hdr_prepare(cmd->va, subsystem, opcode, size);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BG_%d : subsystem iSCSI cmd %d size %d\n",
- iscsi_cmd, size);
+ "BG_%d : subsystem %u cmd %u size %u\n",
+ subsystem, opcode, size);
return 0;
}
+static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
+{
+ struct be_dma_mem *tag_mem;
+
+ /* status is ignored */
+ __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
+ tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
+ if (tag_mem->size) {
+ pci_free_consistent(phba->pcidev, tag_mem->size,
+ tag_mem->va, tag_mem->dma);
+ tag_mem->size = 0;
+ }
+}
+
+int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
+ struct be_set_eqd *set_eqd, int num)
+{
+ struct be_cmd_req_modify_eq_delay *req;
+ struct be_dma_mem nonemb_cmd;
+ int i, rc;
+
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+ if (rc)
+ return rc;
+
+ req = nonemb_cmd.va;
+ req->num_eq = cpu_to_le32(num);
+ for (i = 0; i < num; i++) {
+ req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+ req->delay[i].phase = 0;
+ req->delay[i].delay_multiplier =
+ cpu_to_le32(set_eqd[i].delay_multiplier);
+ }
+
+ return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd,
+ __beiscsi_eq_delay_compl, NULL, 0);
+}
+
+/**
+ * beiscsi_get_initiator_name - read initiator name from flash
+ * @phba: device priv structure
+ * @name: buffer pointer
+ * @cfg: fetch user configured
+ *
+ */
+int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg)
+{
+ struct be_dma_mem nonemb_cmd;
+ struct be_cmd_hba_name resp;
+ struct be_cmd_hba_name *req;
+ int rc;
+
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI_INI,
+ OPCODE_ISCSI_INI_CFG_GET_HBA_NAME, sizeof(resp));
+ if (rc)
+ return rc;
+
+ req = nonemb_cmd.va;
+ if (cfg)
+ req->hdr.version = 1;
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
+ &resp, sizeof(resp));
+ if (rc) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BS_%d : Initiator Name MBX Failed\n");
+ return rc;
+ }
+ rc = sprintf(name, "%s\n", resp.initiator_name);
+ return rc;
+}
+
unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
@@ -368,9 +425,9 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
struct be_dma_mem nonemb_cmd;
int rt_val;
- rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
- sizeof(*req));
+ rt_val = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
+ sizeof(*req));
if (rt_val)
return rt_val;
@@ -379,7 +436,7 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
req->ip_addr.ip_type = ip_type;
memcpy(req->ip_addr.addr, gw,
(ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
}
int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
@@ -420,17 +477,17 @@ int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
struct be_dma_mem nonemb_cmd;
int rc;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
- sizeof(*resp));
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
+ sizeof(*resp));
if (rc)
return rc;
req = nonemb_cmd.va;
req->ip_type = ip_type;
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, resp,
- sizeof(*resp));
+ return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
+ resp, sizeof(*resp));
}
static int
@@ -441,9 +498,9 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
struct be_dma_mem nonemb_cmd;
int rc;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
- sizeof(*req));
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+ sizeof(*req));
if (rc)
return rc;
@@ -461,7 +518,7 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba,
memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
if_info->ip_addr.subnet_mask,
sizeof(if_info->ip_addr.subnet_mask));
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
if (rc < 0 || req->ip_params.ip_record.status) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BG_%d : failed to clear IP: rc %d status %d\n",
@@ -479,9 +536,9 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
uint32_t ip_len;
int rc;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
- sizeof(*req));
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+ sizeof(*req));
if (rc)
return rc;
@@ -499,7 +556,7 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
subnet, ip_len);
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
/**
* In some cases, host needs to look into individual record status
* even though FW reported success for that IOCTL.
@@ -527,7 +584,8 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
return rc;
if (if_info->dhcp_state) {
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd,
+ CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
sizeof(*reldhcp));
if (rc)
@@ -536,7 +594,7 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
reldhcp = nonemb_cmd.va;
reldhcp->interface_hndl = phba->interface_handle;
reldhcp->ip_type = ip_type;
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
if (rc < 0) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : failed to release existing DHCP: %d\n",
@@ -606,7 +664,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
}
}
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
sizeof(*dhcpreq));
if (rc)
@@ -617,7 +675,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
dhcpreq->retry_count = 1;
dhcpreq->interface_hndl = phba->interface_handle;
dhcpreq->ip_type = ip_type;
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0);
exit:
kfree(if_info);
@@ -673,9 +731,10 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
return rc;
do {
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
- ioctl_size);
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd,
+ CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO,
+ ioctl_size);
if (rc)
return rc;
@@ -698,8 +757,8 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
return -ENOMEM;
}
- rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info,
- ioctl_size);
+ rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, *if_info,
+ ioctl_size);
/* Check if the error is because of Insufficent_Buffer */
if (rc == -EAGAIN) {
@@ -728,41 +787,14 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
struct be_dma_mem nonemb_cmd;
int rc;
- rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
- OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
- sizeof(*nic));
+ rc = beiscsi_prep_nemb_cmd(phba, &nonemb_cmd, CMD_SUBSYSTEM_ISCSI,
+ OPCODE_COMMON_ISCSI_NTWK_GET_NIC_CONFIG,
+ sizeof(*nic));
if (rc)
return rc;
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, nic, sizeof(*nic));
-}
-
-
-
-unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
-{
- unsigned int tag;
- struct be_mcc_wrb *wrb;
- struct be_cmd_hba_name *req;
- struct be_ctrl_info *ctrl = &phba->ctrl;
-
- if (mutex_lock_interruptible(&ctrl->mbox_lock))
- return 0;
- wrb = alloc_mcc_wrb(phba, &tag);
- if (!wrb) {
- mutex_unlock(&ctrl->mbox_lock);
- return 0;
- }
-
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
- OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
- sizeof(*req));
-
- be_mcc_notify(phba, tag);
- mutex_unlock(&ctrl->mbox_lock);
- return tag;
+ return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL,
+ nic, sizeof(*nic));
}
static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 06ddc5ad6874..0b22c99a7a22 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
@@ -157,7 +157,6 @@ struct be_bsg_vendor_cmd {
struct beiscsi_endpoint {
struct beiscsi_hba *phba;
- struct beiscsi_sess *sess;
struct beiscsi_conn *conn;
struct iscsi_endpoint *openiscsi_ep;
unsigned short ip_type;
@@ -169,15 +168,12 @@ struct beiscsi_endpoint {
u16 cid_vld;
};
-unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
- struct beiscsi_endpoint *beiscsi_ep,
- unsigned short cid,
- unsigned short issue_reset,
- unsigned short savecfg_flag);
int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
struct invldt_cmd_tbl *inv_tbl,
unsigned int nents);
+int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg);
+
int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index 475cf925d5e8..442fc3db8f1f 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o bfad_bsg.o
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 5caf5f3ff642..cf0466686804 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -692,9 +692,9 @@ ext:
}
void
-bfad_bfa_tmo(unsigned long data)
+bfad_bfa_tmo(struct timer_list *t)
{
- struct bfad_s *bfad = (struct bfad_s *) data;
+ struct bfad_s *bfad = from_timer(bfad, t, hal_tmo);
unsigned long flags;
struct list_head doneq;
@@ -719,9 +719,7 @@ bfad_bfa_tmo(unsigned long data)
void
bfad_init_timer(struct bfad_s *bfad)
{
- init_timer(&bfad->hal_tmo);
- bfad->hal_tmo.function = bfad_bfa_tmo;
- bfad->hal_tmo.data = (unsigned long)bfad;
+ timer_setup(&bfad->hal_tmo, bfad_bfa_tmo, 0);
mod_timer(&bfad->hal_tmo,
jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index b2e8c0dfc79c..72ca2a2e08e2 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3137,16 +3137,9 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
struct bfad_s *bfad = im_port->bfad;
- struct request_queue *request_q = job->req->q;
void *payload_kbuf;
int rc = -EINVAL;
- /*
- * Set the BSG device request_queue size to 256 to support
- * payloads larger than 512*1024K bytes.
- */
- blk_queue_max_segments(request_q, 256);
-
/* Allocate a temp buffer to hold the passed in user space command */
payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
if (!payload_kbuf) {
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 8dcd8c70c7ee..05f523971348 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -255,7 +255,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
struct bfad_s *bfad = port->bfad;
struct bfa_s *bfa = &bfad->bfa;
struct bfa_ioc_s *ioc = &bfa->ioc;
- int addr, len, rc, i;
+ int addr, rc, i;
+ u32 len;
u32 *regbuf;
void __iomem *rb, *reg_addr;
unsigned long flags;
@@ -266,7 +267,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
return PTR_ERR(kern_buf);
rc = sscanf(kern_buf, "%x:%x", &addr, &len);
- if (rc < 2) {
+ if (rc < 2 || len > (UINT_MAX >> 2)) {
printk(KERN_INFO
"bfad[%d]: %s failed to read user buf\n",
bfad->inst_no, __func__);
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index cfcfff48e8e1..4fe980a6441f 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -314,7 +314,7 @@ int bfad_setup_intr(struct bfad_s *bfad);
void bfad_remove_intr(struct bfad_s *bfad);
void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
-void bfad_bfa_tmo(unsigned long data);
+void bfad_bfa_tmo(struct timer_list *t);
void bfad_init_timer(struct bfad_s *bfad);
int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 6844ba361616..e6b9de7d41ac 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -823,7 +823,7 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
skb_queue_head_init(&port->fcoe_pending_queue);
port->fcoe_pending_queue_active = 0;
- setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
+ timer_setup(&port->timer, fcoe_queue_timer, 0);
fcoe_link_speed_update(lport);
@@ -845,9 +845,9 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
return 0;
}
-static void bnx2fc_destroy_timer(unsigned long data)
+static void bnx2fc_destroy_timer(struct timer_list *t)
{
- struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
+ struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer);
printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - "
"Destroy compl not received!!\n");
@@ -1946,11 +1946,10 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
{
if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) {
if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
- init_timer(&hba->destroy_timer);
+ timer_setup(&hba->destroy_timer, bnx2fc_destroy_timer,
+ 0);
hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
jiffies;
- hba->destroy_timer.function = bnx2fc_destroy_timer;
- hba->destroy_timer.data = (unsigned long)hba;
add_timer(&hba->destroy_timer);
wait_event_interruptible(hba->destroy_wait,
test_bit(BNX2FC_FLAG_DESTROY_CMPL,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 5b6153f23f01..8e2f767147cb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1084,24 +1084,35 @@ static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
{
struct bnx2fc_rport *tgt = io_req->tgt;
int rc = SUCCESS;
+ unsigned int time_left;
io_req->wait_for_comp = 1;
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
-
+ /*
+ * Can't wait forever on cleanup response lest we let the SCSI error
+ * handler wait forever
+ */
+ time_left = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_FW_TIMEOUT);
io_req->wait_for_comp = 0;
+ if (!time_left)
+ BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
+ __func__);
+
/*
- * release the reference taken in eh_abort to allow the
- * target to re-login after flushing IOs
+ * Release reference held by SCSI command the cleanup completion
+ * hits the BNX2FC_CLEANUP case in bnx2fc_process_cq_compl() and
+ * thus the SCSI command is not returnedi by bnx2fc_scsi_done().
*/
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_lock_bh(&tgt->tgt_lock);
return rc;
}
+
/**
* bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
* SCSI command
@@ -1118,6 +1129,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_lport *lport;
struct bnx2fc_rport *tgt;
int rc;
+ unsigned int time_left;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
@@ -1194,6 +1206,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
+ /*
+ * We don't want to hold off the upper layer timer so simply
+ * cleanup the command and return that I/O was successfully
+ * aborted.
+ */
rc = bnx2fc_abts_cleanup(io_req);
/* This only occurs when an task abort was requested while ABTS
is in progress. Setting the IO_CLEANUP flag will skip the
@@ -1201,7 +1218,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
was a result from the ABTS request rather than the CLEANUP
request */
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
- goto out;
+ goto done;
}
/* Cancel the current timer running on this io_req */
@@ -1221,7 +1238,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
}
spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
+ /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
+ time_left = wait_for_completion_timeout(&io_req->tm_done,
+ (2 * rp->r_a_tov + 1) * HZ);
+ if (time_left)
+ BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for tm_done");
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
@@ -1233,8 +1254,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);
+ /*
+ * Cleanup firmware residuals before returning control back
+ * to SCSI ML.
+ */
rc = bnx2fc_abts_cleanup(io_req);
- goto out;
+ goto done;
} else {
/*
* We come here even when there was a race condition
@@ -1249,7 +1274,6 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
done:
/* release the reference taken in eh_abort */
kref_put(&io_req->refcount, bnx2fc_cmd_release);
-out:
spin_unlock_bh(&tgt->tgt_lock);
return rc;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 59a2dfbcbc69..a8ae1a019eea 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -14,8 +14,8 @@
*/
#include "bnx2fc.h"
-static void bnx2fc_upld_timer(unsigned long data);
-static void bnx2fc_ofld_timer(unsigned long data);
+static void bnx2fc_upld_timer(struct timer_list *t);
+static void bnx2fc_ofld_timer(struct timer_list *t);
static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
struct fcoe_port *port,
struct fc_rport_priv *rdata);
@@ -27,10 +27,10 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
struct bnx2fc_rport *tgt);
static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
-static void bnx2fc_upld_timer(unsigned long data)
+static void bnx2fc_upld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+ struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
/* fake upload completion */
@@ -40,10 +40,10 @@ static void bnx2fc_upld_timer(unsigned long data)
wake_up_interruptible(&tgt->upld_wait);
}
-static void bnx2fc_ofld_timer(unsigned long data)
+static void bnx2fc_ofld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+ struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
/* NOTE: This function should never be called, as
@@ -65,7 +65,7 @@ static void bnx2fc_ofld_timer(unsigned long data)
static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
{
- setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
wait_event_interruptible(tgt->ofld_wait,
@@ -277,7 +277,7 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
{
- setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
wait_event_interruptible(tgt->upld_wait,
(test_bit(
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 89ef1a1678d1..663a63d4dae4 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -858,7 +858,7 @@ extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
struct bnx2i_endpoint *ep);
-extern void bnx2i_ep_ofld_timer(unsigned long data);
+extern void bnx2i_ep_ofld_timer(struct timer_list *t);
extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
struct bnx2i_hba *hba, u32 iscsi_cid);
extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 42921dbba927..e0640e0f259f 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -332,12 +332,10 @@ static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *task)
{
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_login_request *login_wqe;
struct iscsi_login_req *login_hdr;
u32 dword;
- bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
login_hdr = (struct iscsi_login_req *)task->hdr;
login_wqe = (struct bnx2i_login_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
@@ -391,12 +389,10 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
struct iscsi_tm *tmfabort_hdr;
struct scsi_cmnd *ref_sc;
struct iscsi_task *ctask;
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_tmf_request *tmfabort_wqe;
u32 dword;
u32 scsi_lun[2];
- bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
tmfabort_wqe = (struct bnx2i_tmf_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
@@ -463,12 +459,10 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *mtask)
{
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_text_request *text_wqe;
struct iscsi_text *text_hdr;
u32 dword;
- bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
text_hdr = (struct iscsi_text *)mtask->hdr;
text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
@@ -541,11 +535,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
char *datap, int data_len, int unsol)
{
struct bnx2i_endpoint *ep = bnx2i_conn->ep;
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_nop_out_request *nopout_wqe;
struct iscsi_nopout *nopout_hdr;
- bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
@@ -602,11 +594,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *task)
{
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_logout_request *logout_wqe;
struct iscsi_logout *logout_hdr;
- bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
logout_hdr = (struct iscsi_logout *)task->hdr;
logout_wqe = (struct bnx2i_logout_request *)
@@ -698,9 +688,9 @@ void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
*
* routine to handle connection offload/destroy request timeout
*/
-void bnx2i_ep_ofld_timer(unsigned long data)
+void bnx2i_ep_ofld_timer(struct timer_list *t)
{
- struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
+ struct bnx2i_endpoint *ep = from_timer(ep, t, ofld_timer);
if (ep->state == EP_STATE_OFLD_START) {
printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 03c104b47f31..de0a507577ef 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1611,9 +1611,8 @@ static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
* this should normally not sleep for a long time so it should
* not disrupt the caller.
*/
+ timer_setup(&bnx2i_conn->ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
- bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
- bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
add_timer(&bnx2i_conn->ep->ofld_timer);
/* update iSCSI context for this conn, wait for CNIC to complete */
wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
@@ -1729,10 +1728,8 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
}
ep->state = EP_STATE_CLEANUP_START;
- init_timer(&ep->ofld_timer);
+ timer_setup(&ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies;
- ep->ofld_timer.function = bnx2i_ep_ofld_timer;
- ep->ofld_timer.data = (unsigned long) ep;
add_timer(&ep->ofld_timer);
bnx2i_ep_destroy_list_add(hba, ep);
@@ -1835,10 +1832,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
bnx2i_ep->state = EP_STATE_OFLD_START;
bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
- init_timer(&bnx2i_ep->ofld_timer);
+ timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
- bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
- bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
add_timer(&bnx2i_ep->ofld_timer);
if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
@@ -2054,10 +2049,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
session = conn->session;
}
- init_timer(&bnx2i_ep->ofld_timer);
+ timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies;
- bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
- bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
add_timer(&bnx2i_ep->ofld_timer);
if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 6dc96c8dfe75..d4c2a2e4c5d4 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ASCII values for a number of symbolic constants, printing functions,
* etc.
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
index 3681a3fbd499..d047e22eac0d 100644
--- a/drivers/scsi/csiostor/Makefile
+++ b/drivers/scsi/csiostor/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
## Chelsio FCoE driver
#
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 5be0086142ca..0bd1131b6cc9 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3347,9 +3347,10 @@ csio_mberr_worker(void *data)
*
**/
static void
-csio_hw_mb_timer(uintptr_t data)
+csio_hw_mb_timer(struct timer_list *t)
{
- struct csio_hw *hw = (struct csio_hw *)data;
+ struct csio_mbm *mbm = from_timer(mbm, t, timer);
+ struct csio_hw *hw = mbm->hw;
struct csio_mb *mbp = NULL;
spin_lock_irq(&hw->lock);
@@ -3715,9 +3716,9 @@ csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
* Return - none.
*/
static void
-csio_mgmt_tmo_handler(uintptr_t data)
+csio_mgmt_tmo_handler(struct timer_list *t)
{
- struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
+ struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer);
struct list_head *tmp;
struct csio_ioreq *io_req;
@@ -3797,11 +3798,7 @@ csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
static int
csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
{
- struct timer_list *timer = &mgmtm->mgmt_timer;
-
- init_timer(timer);
- timer->function = csio_mgmt_tmo_handler;
- timer->data = (unsigned long)mgmtm;
+ timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0);
INIT_LIST_HEAD(&mgmtm->active_q);
INIT_LIST_HEAD(&mgmtm->cbfn_q);
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 667046419b19..30f5f523c8cc 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -368,6 +368,9 @@ struct csio_hw_stats {
#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts
* enabled?
*/
+#define CSIO_HWF_ROOT_NO_RELAXED_ORDERING 0x00000400 /* Is PCIe relaxed
+ * ordering enabled
+ */
#define csio_is_hw_intr_enabled(__hw) \
((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 28a9c7d706cb..cb1711a5d7a3 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -968,6 +968,9 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_exit;
}
+ if (!pcie_relaxed_ordering_enabled(pdev))
+ hw->flags |= CSIO_HWF_ROOT_NO_RELAXED_ORDERING;
+
pci_set_drvdata(pdev, hw);
rv = csio_hw_start(hw);
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 9451787ca7f2..931b1d8f9f3e 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -491,6 +491,7 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
uint32_t iq_start_stop = (iq_params->iq_start) ?
FW_IQ_CMD_IQSTART_F :
FW_IQ_CMD_IQSTOP_F;
+ int relaxed = !(hw->flags & CSIO_HWF_ROOT_NO_RELAXED_ORDERING);
/*
* If this IQ write is cascaded with IQ alloc request, do not
@@ -537,6 +538,8 @@ csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
cmdp->iqns_to_fl0congen |= htonl(
FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)|
FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) |
+ FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+ FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) |
FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen));
cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
@@ -1644,13 +1647,10 @@ csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
*/
int
csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
- void (*timer_fn)(uintptr_t))
+ void (*timer_fn)(struct timer_list *))
{
- struct timer_list *timer = &mbm->timer;
-
- init_timer(timer);
- timer->function = timer_fn;
- timer->data = (unsigned long)hw;
+ mbm->hw = hw;
+ timer_setup(&mbm->timer, timer_fn, 0);
INIT_LIST_HEAD(&mbm->req_q);
INIT_LIST_HEAD(&mbm->cbfn_q);
diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h
index 1bc82d0bc260..a6823df73015 100644
--- a/drivers/scsi/csiostor/csio_mb.h
+++ b/drivers/scsi/csiostor/csio_mb.h
@@ -137,6 +137,7 @@ struct csio_mbm {
uint32_t a_mbox; /* Async mbox num */
uint32_t intr_idx; /* Interrupt index */
struct timer_list timer; /* Mbox timer */
+ struct csio_hw *hw; /* Hardware pointer */
struct list_head req_q; /* Mbox request queue */
struct list_head cbfn_q; /* Mbox completion q */
struct csio_mb *mcurrent; /* Current mailbox */
@@ -252,7 +253,7 @@ void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp,
/* MB module functions */
int csio_mbm_init(struct csio_mbm *, struct csio_hw *,
- void (*)(uintptr_t));
+ void (*)(struct timer_list *));
void csio_mbm_exit(struct csio_mbm *);
void csio_mb_intr_enable(struct csio_hw *);
void csio_mb_intr_disable(struct csio_hw *);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 7b09e7ddf35e..bf07735275a4 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -545,10 +545,10 @@ static int act_open_rpl_status_to_errno(int status)
}
}
-static void act_open_retry_timer(unsigned long data)
+static void act_open_retry_timer(struct timer_list *t)
{
+ struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
struct sk_buff *skb;
- struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 1d02cf9fe06c..406e94312d4e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -872,10 +872,10 @@ static int act_open_rpl_status_to_errno(int status)
}
}
-static void csk_act_open_retry_timer(unsigned long data)
+static void csk_act_open_retry_timer(struct timer_list *t)
{
struct sk_buff *skb = NULL;
- struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
+ struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
struct l2t_entry *);
@@ -1575,6 +1575,7 @@ static void release_offload_resources(struct cxgbi_sock *csk)
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_free_cpl_skbs(csk);
+ cxgbi_sock_purge_write_queue(csk);
if (csk->wr_cred != csk->wr_max_cred) {
cxgbi_sock_purge_wr_queue(csk);
cxgbi_sock_reset_wr_list(csk);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 512c8f1ea5b0..ce1336414e0a 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -572,7 +572,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
kref_init(&csk->refcnt);
skb_queue_head_init(&csk->receive_queue);
skb_queue_head_init(&csk->write_queue);
- setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
+ timer_setup(&csk->retry_timer, NULL, 0);
rwlock_init(&csk->callback_lock);
csk->cdev = cdev;
csk->flags = 0;
@@ -688,8 +688,6 @@ rel_neigh:
rel_rt:
ip_rt_put(rt);
- if (csk)
- cxgbi_sock_closed(csk);
err_out:
return ERR_PTR(err);
}
@@ -1889,16 +1887,13 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
struct scsi_cmnd *sc = task->sc;
+ struct cxgbi_sock *csk = cconn->cep->csk;
+ struct net_device *ndev = cdev->ports[csk->port_id];
int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
tcp_task->dd_data = tdata;
task->hdr = NULL;
- if (tdata->skb) {
- kfree_skb(tdata->skb);
- tdata->skb = NULL;
- }
-
if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
(opcode == ISCSI_OP_SCSI_DATA_OUT ||
(opcode == ISCSI_OP_SCSI_CMD &&
@@ -1910,15 +1905,23 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
if (!tdata->skb) {
- struct cxgbi_sock *csk = cconn->cep->csk;
- struct net_device *ndev = cdev->ports[csk->port_id];
ndev->stats.tx_dropped++;
return -ENOMEM;
}
- skb_get(tdata->skb);
skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
- task->hdr = (struct iscsi_hdr *)tdata->skb->data;
+
+ if (task->sc) {
+ task->hdr = (struct iscsi_hdr *)tdata->skb->data;
+ } else {
+ task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
+ if (!task->hdr) {
+ __kfree_skb(tdata->skb);
+ tdata->skb = NULL;
+ ndev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ }
task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
/* data_out uses scsi_cmd's itt */
@@ -2062,9 +2065,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
unsigned int datalen;
int err;
- if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) {
+ if (!skb) {
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
- "task 0x%p, skb 0x%p\n", task, skb);
+ "task 0x%p\n", task);
return 0;
}
@@ -2076,6 +2079,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
return -EPIPE;
}
+ tdata->skb = NULL;
datalen = skb->data_len;
/* write ppod first if using ofldq to write ppod */
@@ -2089,6 +2093,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
/* continue. Let fl get the data */
}
+ if (!task->sc)
+ memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
+
err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
if (err > 0) {
int pdulen = err;
@@ -2104,7 +2111,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
pdulen += ISCSI_DIGEST_SIZE;
task->conn->txdata_octets += pdulen;
- cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
return 0;
}
@@ -2113,6 +2119,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
"task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
task, skb, skb->len, skb->data_len, err);
/* reset skb to send when we are called again */
+ tdata->skb = skb;
return err;
}
@@ -2120,8 +2127,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
- __kfree_skb(tdata->skb);
- tdata->skb = NULL;
+ __kfree_skb(skb);
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
@@ -2146,9 +2152,14 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
task, tdata->skb, task->hdr_itt);
tcp_task->dd_data = NULL;
+
+ if (!task->sc)
+ kfree(task->hdr);
+ task->hdr = NULL;
+
/* never reached the xmit task callout */
if (tdata->skb) {
- kfree_skb(tdata->skb);
+ __kfree_skb(tdata->skb);
tdata->skb = NULL;
}
@@ -2556,7 +2567,10 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
goto err_out;
}
- ifindex = hba->ndev->ifindex;
+ rtnl_lock();
+ if (!vlan_uses_dev(hba->ndev))
+ ifindex = hba->ndev->ifindex;
+ rtnl_unlock();
}
if (dst_addr->sa_family == AF_INET) {
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 31a5816c2e8d..dcb190e75343 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -205,7 +205,6 @@ enum cxgbi_skcb_flags {
SKCBF_TX_NEED_HDR, /* packet needs a header */
SKCBF_TX_MEM_WRITE, /* memory write */
SKCBF_TX_FLAG_COMPL, /* wr completion flag */
- SKCBF_TX_DONE, /* skb tx done */
SKCBF_RX_COALESCED, /* received whole pdu */
SKCBF_RX_HDR, /* received pdu header */
SKCBF_RX_DATA, /* received pdu payload */
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 76b8b7eed0c0..38b3a9c84fd1 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -1634,7 +1634,10 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
ssize_t vpd_size;
char vpd_data[CXLFLASH_VPD_LEN];
char tmp_buf[WWPN_BUF_LEN] = { 0 };
- char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
+ const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
+ cfg->dev_id->driver_data;
+ const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
+ const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
/* Get the VPD data from the device */
vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
@@ -1671,17 +1674,24 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
* value. Note that we must copy to a temporary buffer
* because the conversion service requires that the ASCII
* string be terminated.
+ *
+ * Allow for WWPN not being found for all devices, setting
+ * the returned WWPN to zero when not found. Notify with a
+ * log error for cards that should have had WWPN keywords
+ * in the VPD - cards requiring WWPN will not have their
+ * ports programmed and operate in an undefined state.
*/
for (k = 0; k < cfg->num_fc_ports; k++) {
j = ro_size;
i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
- if (unlikely(i < 0)) {
- dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
+ if (i < 0) {
+ if (wwpn_vpd_required)
+ dev_err(dev, "%s: Port %d WWPN not found\n",
+ __func__, k);
+ wwpn[k] = 0ULL;
+ continue;
}
j = pci_vpd_info_field_size(&vpd_data[i]);
@@ -3145,7 +3155,7 @@ static struct scsi_host_template driver_template = {
* Device dependent values
*/
static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
- 0ULL };
+ CXLFLASH_WWPN_VPD_REQUIRED };
static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
CXLFLASH_NOTIFY_SHUTDOWN };
static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
@@ -3383,12 +3393,6 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
goto out;
}
- if (unlikely(!access_ok(is_write ? VERIFY_READ : VERIFY_WRITE,
- ubuf, ulen))) {
- rc = -EFAULT;
- goto out;
- }
-
buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
if (unlikely(!buf)) {
rc = -ENOMEM;
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 880e348ed5c9..ba0108a7a9c2 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -95,7 +95,8 @@ enum undo_level {
struct dev_dependent_vals {
u64 max_sectors;
u64 flags;
-#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
+#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
+#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
};
struct asyc_intr_info {
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 09daa86670fc..bedf1ce2f33c 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -548,7 +548,4 @@ struct sisl_rht_entry_f1 {
#define TMF_LUN_RESET 0x1U
#define TMF_CLEAR_ACA 0x2U
-
-#define SISLITE_MAX_WS_BLOCKS 512
-
#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ed46e8df2e42..170fff5aeff6 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -165,7 +165,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
struct llun_info *lli = arg;
u64 ctxid = DECODE_CTXID(rctxid);
int rc;
- pid_t pid = current->tgid, ctxpid = 0;
+ pid_t pid = task_tgid_nr(current), ctxpid = 0;
if (ctx_ctrl & CTX_CTRL_FILE) {
lli = NULL;
@@ -173,7 +173,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
}
if (ctx_ctrl & CTX_CTRL_CLONE)
- pid = current->parent->tgid;
+ pid = task_ppid_nr(current);
if (likely(ctxid < MAX_CONTEXT)) {
while (true) {
@@ -824,7 +824,7 @@ static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->pid = current->tgid; /* tgid = pid */
+ ctxi->pid = task_tgid_nr(current); /* tgid = pid */
ctxi->ctx = ctx;
ctxi->cfg = cfg;
ctxi->file = file;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 703bf1e9a64a..5deef57a7834 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -428,12 +428,14 @@ static int write_same16(struct scsi_device *sdev,
u8 *sense_buf = NULL;
int rc = 0;
int result = 0;
- int ws_limit = SISLITE_MAX_WS_BLOCKS;
u64 offset = lba;
int left = nblks;
- u32 to = sdev->request_queue->rq_timeout;
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
+ const u32 s = ilog2(sdev->sector_size) - 9;
+ const u32 to = sdev->request_queue->rq_timeout;
+ const u32 ws_limit = blk_queue_get_max_sectors(sdev->request_queue,
+ REQ_OP_WRITE_SAME) >> s;
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 5ee7f44cf869..60ef8df42b95 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -395,7 +395,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
struct ScsiReqBlk *srb);
static void set_xfer_rate(struct AdapterCtlBlk *acb,
struct DeviceCtlBlk *dcb);
-static void waiting_timeout(unsigned long ptr);
+static void waiting_timeout(struct timer_list *t);
/*---------------------------------------------------------------------------
@@ -857,9 +857,6 @@ static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
{
if (timer_pending(&acb->waiting_timer))
return;
- init_timer(&acb->waiting_timer);
- acb->waiting_timer.function = waiting_timeout;
- acb->waiting_timer.data = (unsigned long) acb;
if (time_before(jiffies + to, acb->last_reset - HZ / 2))
acb->waiting_timer.expires =
acb->last_reset - HZ / 2 + 1;
@@ -936,10 +933,10 @@ static void waiting_process_next(struct AdapterCtlBlk *acb)
/* Wake up waiting queue */
-static void waiting_timeout(unsigned long ptr)
+static void waiting_timeout(struct timer_list *t)
{
unsigned long flags;
- struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
+ struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
dprintkdbg(DBG_1,
"waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
DC395x_LOCK_IO(acb->scsi_host, flags);
@@ -4366,8 +4363,8 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
INIT_LIST_HEAD(&acb->srb_free_list);
/* temp SRB for Q tag used or abort command used */
acb->tmp_srb = &acb->srb;
- init_timer(&acb->waiting_timer);
- init_timer(&acb->selto_timer);
+ timer_setup(&acb->waiting_timer, waiting_timeout, 0);
+ timer_setup(&acb->selto_timer, NULL, 0);
acb->srb_count = DC395x_MAX_SRB_CNT;
diff --git a/drivers/scsi/dc395x.h b/drivers/scsi/dc395x.h
index fbf35e37701e..5379a936141a 100644
--- a/drivers/scsi/dc395x.h
+++ b/drivers/scsi/dc395x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/************************************************************************/
/* */
/* dc395x.h */
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 0962fd544401..fd22dc6ab5d9 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1085,11 +1085,11 @@ static void alua_rescan(struct scsi_device *sdev)
static int alua_bus_attach(struct scsi_device *sdev)
{
struct alua_dh_data *h;
- int err, ret = -EINVAL;
+ int err;
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
- return -ENOMEM;
+ return SCSI_DH_NOMEM;
spin_lock_init(&h->pg_lock);
rcu_assign_pointer(h->pg, NULL);
h->init_error = SCSI_DH_OK;
@@ -1098,16 +1098,14 @@ static int alua_bus_attach(struct scsi_device *sdev)
mutex_init(&h->init_mutex);
err = alua_initialize(sdev, h);
- if (err == SCSI_DH_NOMEM)
- ret = -ENOMEM;
if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
goto failed;
sdev->handler_data = h;
- return 0;
+ return SCSI_DH_OK;
failed:
kfree(h);
- return ret;
+ return err;
}
/*
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 8654e940e1a8..6a2792f3a37e 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -490,7 +490,7 @@ static int clariion_bus_attach(struct scsi_device *sdev)
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
- return -ENOMEM;
+ return SCSI_DH_NOMEM;
h->lun_state = CLARIION_LUN_UNINITIALIZED;
h->default_sp = CLARIION_UNBOUND_LU;
h->current_sp = CLARIION_UNBOUND_LU;
@@ -510,11 +510,11 @@ static int clariion_bus_attach(struct scsi_device *sdev)
h->default_sp + 'A');
sdev->handler_data = h;
- return 0;
+ return SCSI_DH_OK;
failed:
kfree(h);
- return -EINVAL;
+ return err;
}
static void clariion_bus_detach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 62d314e07d11..e65a0ebb4b54 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -218,24 +218,28 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
- return -ENOMEM;
+ return SCSI_DH_NOMEM;
h->path_state = HP_SW_PATH_UNINITIALIZED;
h->retries = HP_SW_RETRIES;
h->sdev = sdev;
ret = hp_sw_tur(sdev, h);
- if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
+ if (ret != SCSI_DH_OK)
goto failed;
+ if (h->path_state == HP_SW_PATH_UNINITIALIZED) {
+ ret = SCSI_DH_NOSYS;
+ goto failed;
+ }
sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
"active":"passive");
sdev->handler_data = h;
- return 0;
+ return SCSI_DH_OK;
failed:
kfree(h);
- return -EINVAL;
+ return ret;
}
static void hp_sw_bus_detach( struct scsi_device *sdev )
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 2ceff585f189..7af31a1247ee 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -729,7 +729,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
- return -ENOMEM;
+ return SCSI_DH_NOMEM;
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
@@ -755,7 +755,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
lun_state[(int)h->lun_state]);
sdev->handler_data = h;
- return 0;
+ return SCSI_DH_OK;
clean_ctlr:
spin_lock(&list_lock);
@@ -764,7 +764,7 @@ clean_ctlr:
failed:
kfree(h);
- return -EINVAL;
+ return err;
}
static void rdac_bus_detach( struct scsi_device *sdev )
diff --git a/drivers/scsi/eata_generic.h b/drivers/scsi/eata_generic.h
index 5016af5cf860..1a396c5e7f73 100644
--- a/drivers/scsi/eata_generic.h
+++ b/drivers/scsi/eata_generic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/********************************************************
* Header file for eata_dma.c and eata_pio.c *
* Linux EATA SCSI drivers *
diff --git a/drivers/scsi/eata_pio.h b/drivers/scsi/eata_pio.h
index 7deeb935748b..5b5e3d13670b 100644
--- a/drivers/scsi/eata_pio.h
+++ b/drivers/scsi/eata_pio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/********************************************************
* Header file for eata_pio.c Linux EATA-PIO SCSI driver *
* (c) 1993-96 Michael Neuffer *
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 81f226be3e3b..4eb14301a497 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -1631,23 +1631,21 @@ void esas2r_adapter_tasklet(unsigned long context)
}
}
-static void esas2r_timer_callback(unsigned long context);
+static void esas2r_timer_callback(struct timer_list *t);
void esas2r_kickoff_timer(struct esas2r_adapter *a)
{
- init_timer(&a->timer);
+ timer_setup(&a->timer, esas2r_timer_callback, 0);
- a->timer.function = esas2r_timer_callback;
- a->timer.data = (unsigned long)a;
a->timer.expires = jiffies +
msecs_to_jiffies(100);
add_timer(&a->timer);
}
-static void esas2r_timer_callback(unsigned long context)
+static void esas2r_timer_callback(struct timer_list *t)
{
- struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+ struct esas2r_adapter *a = from_timer(a, t, timer);
set_bit(AF2_TIMER_TICK, &a->flags2);
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 7e8932ae91f8..8163dca2071b 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* esp_scsi.h: Defines and structures for the ESP driver.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 85f9a3eba387..f46b312d04bc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -155,7 +155,7 @@ static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
-
+static void fcoe_vport_remove(struct fc_lport *);
static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
.set_fcoe_ctlr_mode = fcoe_ctlr_mode,
@@ -501,11 +501,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
- rtnl_lock();
- if (!fcoe->removed)
- fcoe_interface_remove(fcoe);
- rtnl_unlock();
-
/* Release the self-reference taken during fcoe_interface_create() */
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
@@ -754,7 +749,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
skb_queue_head_init(&port->fcoe_pending_queue);
port->fcoe_pending_queue_active = 0;
- setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
+ timer_setup(&port->timer, fcoe_queue_timer, 0);
fcoe_link_speed_update(lport);
@@ -1014,6 +1009,8 @@ skip_oem:
* fcoe_if_destroy() - Tear down a SW FCoE instance
* @lport: The local port to be destroyed
*
+ * Locking: Must be called with the RTNL mutex held.
+ *
*/
static void fcoe_if_destroy(struct fc_lport *lport)
{
@@ -1035,14 +1032,12 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Free existing transmit skbs */
fcoe_clean_pending_queue(lport);
- rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
dev_uc_del(netdev, port->data_src_addr);
if (lport->vport)
synchronize_net();
else
fcoe_interface_remove(fcoe);
- rtnl_unlock();
/* Free queued packets for the per-CPU receive threads */
fcoe_percpu_clean(lport);
@@ -1903,7 +1898,14 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
port = lport_priv(ctlr->lp);
- queue_work(fcoe_wq, &port->destroy_work);
+ fcoe_vport_remove(lport);
+ mutex_lock(&fcoe_config_mutex);
+ fcoe_if_destroy(lport);
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
+ fcoe_interface_cleanup(fcoe);
+ mutex_unlock(&fcoe_config_mutex);
+ fcoe_ctlr_device_delete(fcoe_ctlr_to_ctlr_dev(ctlr));
goto out;
break;
case NETDEV_FEAT_CHANGE:
@@ -2108,30 +2110,10 @@ static void fcoe_destroy_work(struct work_struct *work)
struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
struct fcoe_interface *fcoe;
- struct Scsi_Host *shost;
- struct fc_host_attrs *fc_host;
- unsigned long flags;
- struct fc_vport *vport;
- struct fc_vport *next_vport;
port = container_of(work, struct fcoe_port, destroy_work);
- shost = port->lport->host;
- fc_host = shost_to_fc_host(shost);
-
- /* Loop through all the vports and mark them for deletion */
- spin_lock_irqsave(shost->host_lock, flags);
- list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
- if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
- continue;
- } else {
- vport->flags |= FC_VPORT_DELETING;
- queue_work(fc_host_work_q(shost),
- &vport->vport_delete_work);
- }
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
- flush_workqueue(fc_host_work_q(shost));
+ fcoe_vport_remove(port->lport);
mutex_lock(&fcoe_config_mutex);
@@ -2139,7 +2121,11 @@ static void fcoe_destroy_work(struct work_struct *work)
ctlr = fcoe_to_ctlr(fcoe);
cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+ rtnl_lock();
fcoe_if_destroy(port->lport);
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
+ rtnl_unlock();
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
@@ -2254,6 +2240,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
rc = -EIO;
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
rtnl_unlock();
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
@@ -2738,13 +2726,46 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex);
mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
fcoe_if_destroy(vn_port);
+ rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return 0;
}
/**
+ * fcoe_vport_remove() - remove attached vports
+ * @lport: lport for which the vports should be removed
+ */
+static void fcoe_vport_remove(struct fc_lport *lport)
+{
+ struct Scsi_Host *shost;
+ struct fc_host_attrs *fc_host;
+ unsigned long flags;
+ struct fc_vport *vport;
+ struct fc_vport *next_vport;
+
+ shost = lport->host;
+ fc_host = shost_to_fc_host(shost);
+
+ /* Loop through all the vports and mark them for deletion */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ continue;
+ } else {
+ vport->flags |= FC_VPORT_DELETING;
+ queue_work(fc_host_work_q(shost),
+ &vport->vport_delete_work);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ flush_workqueue(fc_host_work_q(shost));
+}
+
+/**
* fcoe_vport_disable() - change vport state
* @vport: vport to bring online/offline
* @disable: should the vport be disabled?
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index fff6f1851dc1..097f37de6ce9 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -49,7 +49,7 @@
#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
-static void fcoe_ctlr_timeout(unsigned long);
+static void fcoe_ctlr_timeout(struct timer_list *);
static void fcoe_ctlr_timer_work(struct work_struct *);
static void fcoe_ctlr_recv_work(struct work_struct *);
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
@@ -156,7 +156,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
mutex_init(&fip->ctlr_mutex);
spin_lock_init(&fip->ctlr_lock);
fip->flogi_oxid = FC_XID_UNKNOWN;
- setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
+ timer_setup(&fip->timer, fcoe_ctlr_timeout, 0);
INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
skb_queue_head_init(&fip->fip_recv_list);
@@ -1786,9 +1786,9 @@ unlock:
* fcoe_ctlr_timeout() - FIP timeout handler
* @arg: The FCoE controller that timed out
*/
-static void fcoe_ctlr_timeout(unsigned long arg)
+static void fcoe_ctlr_timeout(struct timer_list *t)
{
- struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
+ struct fcoe_ctlr *fip = from_timer(fip, t, timer);
schedule_work(&fip->timer_work);
}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 375c536cbc68..f4909cd206d3 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -32,13 +32,13 @@ MODULE_AUTHOR("Open-FCoE.org");
MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
MODULE_LICENSE("GPL v2");
-static int fcoe_transport_create(const char *, struct kernel_param *);
-static int fcoe_transport_destroy(const char *, struct kernel_param *);
+static int fcoe_transport_create(const char *, const struct kernel_param *);
+static int fcoe_transport_destroy(const char *, const struct kernel_param *);
static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
-static int fcoe_transport_enable(const char *, struct kernel_param *);
-static int fcoe_transport_disable(const char *, struct kernel_param *);
+static int fcoe_transport_enable(const char *, const struct kernel_param *);
+static int fcoe_transport_disable(const char *, const struct kernel_param *);
static int libfcoe_device_notification(struct notifier_block *notifier,
ulong event, void *ptr);
@@ -455,9 +455,11 @@ EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
*
* Calls fcoe_check_wait_queue on timeout
*/
-void fcoe_queue_timer(ulong lport)
+void fcoe_queue_timer(struct timer_list *t)
{
- fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
+ struct fcoe_port *port = from_timer(port, t, timer);
+
+ fcoe_check_wait_queue(port->lport, NULL);
}
EXPORT_SYMBOL_GPL(fcoe_queue_timer);
@@ -865,7 +867,8 @@ EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
*
* Returns: 0 for success
*/
-static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
+static int fcoe_transport_create(const char *buffer,
+ const struct kernel_param *kp)
{
int rc = -ENODEV;
struct net_device *netdev = NULL;
@@ -930,7 +933,8 @@ out_nodev:
*
* Returns: 0 for success
*/
-static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
+static int fcoe_transport_destroy(const char *buffer,
+ const struct kernel_param *kp)
{
int rc = -ENODEV;
struct net_device *netdev = NULL;
@@ -974,7 +978,8 @@ out_nodev:
*
* Returns: 0 for success
*/
-static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
+static int fcoe_transport_disable(const char *buffer,
+ const struct kernel_param *kp)
{
int rc = -ENODEV;
struct net_device *netdev = NULL;
@@ -1008,7 +1013,8 @@ out_nodev:
*
* Returns: 0 for success
*/
-static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
+static int fcoe_transport_enable(const char *buffer,
+ const struct kernel_param *kp)
{
int rc = -ENODEV;
struct net_device *netdev = NULL;
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h
index d3bb16d11401..b8bdfab51a58 100644
--- a/drivers/scsi/fcoe/libfcoe.h
+++ b/drivers/scsi/fcoe/libfcoe.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FCOE_LIBFCOE_H_
#define _FCOE_LIBFCOE_H_
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
index 383598fadf04..6214a6b2e96d 100644
--- a/drivers/scsi/fnic/Makefile
+++ b/drivers/scsi/fnic/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FCOE_FNIC) += fnic.o
fnic-y := \
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index aacadbf20b69..e52599f44170 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -407,18 +407,18 @@ static int fnic_notify_set(struct fnic *fnic)
return err;
}
-static void fnic_notify_timer(unsigned long data)
+static void fnic_notify_timer(struct timer_list *t)
{
- struct fnic *fnic = (struct fnic *)data;
+ struct fnic *fnic = from_timer(fnic, t, notify_timer);
fnic_handle_link_event(fnic);
mod_timer(&fnic->notify_timer,
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
-static void fnic_fip_notify_timer(unsigned long data)
+static void fnic_fip_notify_timer(struct timer_list *t)
{
- struct fnic *fnic = (struct fnic *)data;
+ struct fnic *fnic = from_timer(fnic, t, fip_timer);
fnic_handle_fip_timer(fnic);
}
@@ -777,8 +777,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
fnic->set_vlan = fnic_set_vlan;
fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
- setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
- (unsigned long)fnic);
+ timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
@@ -809,8 +808,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup notify timer when using MSI interrupts */
if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
- setup_timer(&fnic->notify_timer,
- fnic_notify_timer, (unsigned long)fnic);
+ timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);
/* allocate RQ buffers and post them to RQ*/
for (i = 0; i < fnic->rq_count; i++) {
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index a4473356a9dc..c35f05c4c6bb 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -3705,7 +3705,7 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
#ifdef GDTH_STATISTICS
static u8 gdth_timer_running;
-static void gdth_timeout(unsigned long data)
+static void gdth_timeout(struct timer_list *unused)
{
u32 i;
Scsi_Cmnd *nscp;
@@ -3743,8 +3743,6 @@ static void gdth_timer_init(void)
gdth_timer_running = 1;
TRACE2(("gdth_detect(): Initializing timer !\n"));
gdth_timer.expires = jiffies + HZ;
- gdth_timer.data = 0L;
- gdth_timer.function = gdth_timeout;
add_timer(&gdth_timer);
}
#else
@@ -5165,7 +5163,7 @@ static int __init gdth_init(void)
/* initializations */
gdth_polling = TRUE;
gdth_clear_events();
- init_timer(&gdth_timer);
+ timer_setup(&gdth_timer, gdth_timeout, 0);
/* As default we do not probe for EISA or ISA controllers */
if (probe_eisa_isa) {
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 3fd8b83ffbf9..95fc720c1b30 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _GDTH_H
#define _GDTH_H
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
index b004c6165887..4c91894ac244 100644
--- a/drivers/scsi/gdth_ioctl.h
+++ b/drivers/scsi/gdth_ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _GDTH_IOCTL_H
#define _GDTH_IOCTL_H
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index d08b2716752c..20add49cdd32 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* gdth_proc.c
* $Id: gdth_proc.c,v 1.43 2006/01/11 16:15:00 achim Exp $
*/
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
index aaa618198972..d7d0aa283695 100644
--- a/drivers/scsi/gdth_proc.h
+++ b/drivers/scsi/gdth_proc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _GDTH_PROC_H
#define _GDTH_PROC_H
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index 852913cde5dd..61c1a3584461 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef GVP11_H
/* $Id: gvp11.h,v 1.4 1997/01/19 23:07:12 davem Exp $
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 07f4a4cfbec1..83357b0367d8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -29,7 +29,7 @@
#define HISI_SAS_MAX_PHYS 9
#define HISI_SAS_MAX_QUEUES 32
#define HISI_SAS_QUEUE_SLOTS 512
-#define HISI_SAS_MAX_ITCT_ENTRIES 2048
+#define HISI_SAS_MAX_ITCT_ENTRIES 1024
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
@@ -96,6 +96,7 @@ struct hisi_sas_hw_error {
int shift;
const char *msg;
int reg;
+ const struct hisi_sas_hw_error *sub;
};
struct hisi_sas_phy {
@@ -103,7 +104,6 @@ struct hisi_sas_phy {
struct hisi_sas_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
- struct timer_list timer;
struct work_struct phyup_ws;
u64 port_id; /* from hw */
u64 dev_sas_addr;
@@ -198,7 +198,7 @@ struct hisi_sas_hw {
int (*slot_complete)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
void (*phys_init)(struct hisi_hba *hisi_hba);
- void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
+ void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
void (*get_events)(struct hisi_hba *hisi_hba, int phy_no);
@@ -342,7 +342,11 @@ struct hisi_sas_initial_fis {
};
struct hisi_sas_breakpoint {
- u8 data[128]; /*io128 byte*/
+ u8 data[128];
+};
+
+struct hisi_sas_sata_breakpoint {
+ struct hisi_sas_breakpoint tag[32];
};
struct hisi_sas_sge {
@@ -420,4 +424,6 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
struct hisi_sas_slot *slot);
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
+extern void hisi_sas_rst_work_handler(struct work_struct *work);
+extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 16664f2e15fb..5f503cb09508 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -185,13 +185,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
+ if (!task->lldd_task)
+ return;
+
+ task->lldd_task = NULL;
+
if (!sas_protocol_ata(task->task_proto))
if (slot->n_elem)
dma_unmap_sg(dev, task->scatter, slot->n_elem,
task->data_dir);
- task->lldd_task = NULL;
-
if (sas_dev)
atomic64_dec(&sas_dev->running_req);
}
@@ -199,8 +202,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
if (slot->buf)
dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
-
list_del_init(&slot->entry);
+ slot->buf = NULL;
slot->task = NULL;
slot->port = NULL;
hisi_sas_slot_index_free(hisi_hba, slot->idx);
@@ -401,7 +404,9 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
goto err_out_buf;
}
+ spin_lock_irqsave(&hisi_hba->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -505,9 +510,10 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
{
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_device *sas_dev = NULL;
+ unsigned long flags;
int i;
- spin_lock(&hisi_hba->lock);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
int queue = i % hisi_hba->queue_count;
@@ -524,7 +530,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
break;
}
}
- spin_unlock(&hisi_hba->lock);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return sas_dev;
}
@@ -627,7 +633,6 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
phy->hisi_hba = hisi_hba;
phy->port = NULL;
- init_timer(&phy->timer);
sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
@@ -762,7 +767,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
case PHY_FUNC_LINK_RESET:
hisi_hba->hw->phy_disable(hisi_hba, phy_no);
msleep(100);
- hisi_hba->hw->phy_enable(hisi_hba, phy_no);
+ hisi_hba->hw->phy_start(hisi_hba, phy_no);
break;
case PHY_FUNC_DISABLE:
@@ -792,9 +797,10 @@ static void hisi_sas_task_done(struct sas_task *task)
complete(&task->slow_task->completion);
}
-static void hisi_sas_tmf_timedout(unsigned long data)
+static void hisi_sas_tmf_timedout(struct timer_list *t)
{
- struct sas_task *task = (struct sas_task *)data;
+ struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task *task = slow->task;
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
@@ -833,7 +839,6 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
}
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.data = (unsigned long) task;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
@@ -1046,7 +1051,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
{
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
u32 old_state, state;
@@ -1074,7 +1078,6 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
hisi_sas_release_tasks(hisi_hba);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
- sas_ha->notify_ha_event(sas_ha, HAE_RESET);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
/* Init and wait for PHYs to come up and all libsas event finished. */
@@ -1160,7 +1163,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
- if (rc == TMF_RESP_FUNC_FAILED) {
+ if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_do_release_task(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1388,8 +1391,9 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
if (rc)
goto err_out_buf;
-
+ spin_lock_irqsave(&hisi_hba->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1447,7 +1451,6 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
task->dev = device;
task->task_proto = device->tproto;
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.data = (unsigned long)task;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
add_timer(&task->slow_task->timer);
@@ -1471,6 +1474,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (slot)
slot->task = NULL;
dev_err(dev, "internal task abort: timeout.\n");
+ goto exit;
}
}
@@ -1542,6 +1546,17 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
+void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+
+ tasklet_kill(&cq->tasklet);
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
@@ -1610,7 +1625,7 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
memset(hisi_hba->breakpoint, 0, s);
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
memset(hisi_hba->sata_breakpoint, 0, s);
}
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
@@ -1703,7 +1718,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
if (!hisi_hba->initial_fis)
goto err_out;
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
&hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
@@ -1768,7 +1783,7 @@ void hisi_sas_free(struct hisi_hba *hisi_hba)
hisi_hba->initial_fis,
hisi_hba->initial_fis_dma);
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
if (hisi_hba->sata_breakpoint)
dma_free_coherent(dev, s,
hisi_hba->sata_breakpoint,
@@ -1779,13 +1794,14 @@ void hisi_sas_free(struct hisi_hba *hisi_hba)
}
EXPORT_SYMBOL_GPL(hisi_sas_free);
-static void hisi_sas_rst_work_handler(struct work_struct *work)
+void hisi_sas_rst_work_handler(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, rst_work);
hisi_sas_controller_reset(hisi_hba);
}
+EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
@@ -1877,7 +1893,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
hisi_hba->shost = shost;
SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
- init_timer(&hisi_hba->timer);
+ timer_setup(&hisi_hba->timer, NULL, 0);
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 08eca20b0b81..dc6eca8d6afd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -807,9 +807,9 @@ static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
start_phy_v1_hw(hisi_hba, phy_no);
}
-static void start_phys_v1_hw(unsigned long data)
+static void start_phys_v1_hw(struct timer_list *t)
{
- struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
int i;
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -828,7 +828,7 @@ static void phys_init_v1_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK);
}
- setup_timer(timer, start_phys_v1_hw, (unsigned long)hisi_hba);
+ timer_setup(timer, start_phys_v1_hw, 0);
mod_timer(timer, jiffies + HZ);
}
@@ -1857,7 +1857,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.start_delivery = start_delivery_v1_hw,
.slot_complete = slot_complete_v1_hw,
.phys_init = phys_init_v1_hw,
- .phy_enable = enable_phy_v1_hw,
+ .phy_start = start_phy_v1_hw,
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
.phy_set_linkrate = phy_set_linkrate_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 779af979b6db..5d3467fd728d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -406,80 +406,70 @@ static const struct hisi_sas_hw_error one_bit_ecc_errors[] = {
.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF),
.msk = HGC_DQE_ECC_1B_ADDR_MSK,
.shift = HGC_DQE_ECC_1B_ADDR_OFF,
- .msg = "hgc_dqe_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_dqe_acc1b_intr found: Ram address is 0x%08X\n",
.reg = HGC_DQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF),
.msk = HGC_IOST_ECC_1B_ADDR_MSK,
.shift = HGC_IOST_ECC_1B_ADDR_OFF,
- .msg = "hgc_iost_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_iost_acc1b_intr found: Ram address is 0x%08X\n",
.reg = HGC_IOST_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF),
.msk = HGC_ITCT_ECC_1B_ADDR_MSK,
.shift = HGC_ITCT_ECC_1B_ADDR_OFF,
- .msg = "hgc_itct_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_itct_acc1b_intr found: am address is 0x%08X\n",
.reg = HGC_ITCT_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF),
.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
- .msg = "hgc_iostl_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_iostl_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF),
.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
- .msg = "hgc_itctl_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_itctl_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF),
.msk = HGC_CQE_ECC_1B_ADDR_MSK,
.shift = HGC_CQE_ECC_1B_ADDR_OFF,
- .msg = "hgc_cqe_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_cqe_acc1b_intr found: Ram address is 0x%08X\n",
.reg = HGC_CQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
- .msg = "rxm_mem0_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem0_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
- .msg = "rxm_mem1_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem1_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
- .msg = "rxm_mem2_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem2_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
- .msg = "rxm_mem3_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem3_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS15,
},
};
@@ -489,80 +479,70 @@ static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
.msk = HGC_DQE_ECC_MB_ADDR_MSK,
.shift = HGC_DQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_dqe_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_dqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_DQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
.msk = HGC_IOST_ECC_MB_ADDR_MSK,
.shift = HGC_IOST_ECC_MB_ADDR_OFF,
- .msg = "hgc_iost_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_iost_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_IOST_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
.msk = HGC_ITCT_ECC_MB_ADDR_MSK,
.shift = HGC_ITCT_ECC_MB_ADDR_OFF,
- .msg = "hgc_itct_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_itct_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_ITCT_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
- .msg = "hgc_iostl_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_iostl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
- .msg = "hgc_itctl_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_itctl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
.msk = HGC_CQE_ECC_MB_ADDR_MSK,
.shift = HGC_CQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_cqe_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_cqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_CQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
- .msg = "rxm_mem0_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem0_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
- .msg = "rxm_mem1_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem1_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
- .msg = "rxm_mem2_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem2_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
- .msg = "rxm_mem3_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem3_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS15,
},
};
@@ -728,7 +708,7 @@ enum {
#define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \
err_phase == 0x20 || err_phase == 0x40)
-static void link_timeout_disable_link(unsigned long data);
+static void link_timeout_disable_link(struct timer_list *t);
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
@@ -843,8 +823,9 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
struct hisi_sas_device *sas_dev = NULL;
int i, sata_dev = dev_is_sata(device);
int sata_idx = -1;
+ unsigned long flags;
- spin_lock(&hisi_hba->lock);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
if (sata_dev)
if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx))
@@ -874,7 +855,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
}
out:
- spin_unlock(&hisi_hba->lock);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return sas_dev;
}
@@ -1270,9 +1251,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
upper_32_bits(hisi_hba->initial_fis_dma));
}
-static void link_timeout_enable_link(unsigned long data)
+static void link_timeout_enable_link(struct timer_list *t)
{
- struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
int i, reg_val;
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -1291,9 +1272,9 @@ static void link_timeout_enable_link(unsigned long data)
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
}
-static void link_timeout_disable_link(unsigned long data)
+static void link_timeout_disable_link(struct timer_list *t)
{
- struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
int i, reg_val;
reg_val = hisi_sas_read32(hisi_hba, PHY_STATE);
@@ -1314,7 +1295,6 @@ static void link_timeout_disable_link(unsigned long data)
static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
{
- hisi_hba->timer.data = (unsigned long)hisi_hba;
hisi_hba->timer.function = link_timeout_disable_link;
hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&hisi_hba->timer);
@@ -2377,7 +2357,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
if (unlikely(aborted)) {
ts->stat = SAS_ABORTED_TASK;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -1;
}
@@ -2574,9 +2556,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
return 0;
}
-static void hisi_sas_internal_abort_quirk_timeout(unsigned long data)
+static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
{
- struct hisi_sas_slot *slot = (struct hisi_sas_slot *)data;
+ struct hisi_sas_slot *slot = from_timer(slot, t, internal_abort_timer);
struct hisi_sas_port *port = slot->port;
struct asd_sas_port *asd_sas_port;
struct asd_sas_phy *sas_phy;
@@ -2619,8 +2601,7 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
struct timer_list *timer = &slot->internal_abort_timer;
/* setup the quirk timer */
- setup_timer(timer, hisi_sas_internal_abort_quirk_timeout,
- (unsigned long)slot);
+ timer_setup(timer, hisi_sas_internal_abort_quirk_timeout, 0);
/* Set the timeout to 10ms less than internal abort timeout */
mod_timer(timer, jiffies + msecs_to_jiffies(100));
@@ -2953,25 +2934,58 @@ static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
-#define AXI_ERR_NR 8
-static const char axi_err_info[AXI_ERR_NR][32] = {
- "IOST_AXI_W_ERR",
- "IOST_AXI_R_ERR",
- "ITCT_AXI_W_ERR",
- "ITCT_AXI_R_ERR",
- "SATA_AXI_W_ERR",
- "SATA_AXI_R_ERR",
- "DQE_AXI_R_ERR",
- "CQE_AXI_W_ERR"
+static const struct hisi_sas_hw_error axi_error[] = {
+ { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
+ { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
+ { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
+ { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
+ { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
+ { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
+ { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
+ { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
+ {},
+};
+
+static const struct hisi_sas_hw_error fifo_error[] = {
+ { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" },
+ { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" },
+ { .msk = BIT(10), .msg = "GETDQE_FIFO" },
+ { .msk = BIT(11), .msg = "CMDP_FIFO" },
+ { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
+ {},
};
-#define FIFO_ERR_NR 5
-static const char fifo_err_info[FIFO_ERR_NR][32] = {
- "CQE_WINFO_FIFO",
- "CQE_MSG_FIFIO",
- "GETDQE_FIFO",
- "CMDP_FIFO",
- "AWTCTRL_FIFO"
+static const struct hisi_sas_hw_error fatal_axi_errors[] = {
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
+ .msg = "write pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
+ .msg = "iptt no match slot",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
+ .msg = "read pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = axi_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = fifo_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
+ .msg = "LM add/fetch list",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
+ .msg = "SAS_HGC_ABT fetch LM list",
+ },
};
static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
@@ -2979,98 +2993,47 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
struct hisi_hba *hisi_hba = p;
u32 irq_value, irq_msk, err_value;
struct device *dev = hisi_hba->dev;
+ const struct hisi_sas_hw_error *axi_error;
+ int i;
irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
- if (irq_value) {
- if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
- dev_warn(dev, "write pointer and depth error (0x%x) \
- found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 <<
- ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
- dev_warn(dev, "iptt no match slot error (0x%x) found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
- if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) {
- dev_warn(dev, "read pointer and depth error (0x%x) \
- found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
- int i;
-
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_AXI_OFF);
- err_value = hisi_sas_read32(hisi_hba,
- HGC_AXI_FIFO_ERR_INFO);
-
- for (i = 0; i < AXI_ERR_NR; i++) {
- if (err_value & BIT(i)) {
- dev_warn(dev, "%s (0x%x) found!\n",
- axi_err_info[i], irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
- }
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
- int i;
-
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_FIFO_OFF);
- err_value = hisi_sas_read32(hisi_hba,
- HGC_AXI_FIFO_ERR_INFO);
+ for (i = 0; i < ARRAY_SIZE(fatal_axi_errors); i++) {
+ axi_error = &fatal_axi_errors[i];
+ if (!(irq_value & axi_error->irq_msk))
+ continue;
- for (i = 0; i < FIFO_ERR_NR; i++) {
- if (err_value & BIT(AXI_ERR_NR + i)) {
- dev_warn(dev, "%s (0x%x) found!\n",
- fifo_err_info[i], irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << axi_error->shift);
+ if (axi_error->sub) {
+ const struct hisi_sas_hw_error *sub = axi_error->sub;
+
+ err_value = hisi_sas_read32(hisi_hba, axi_error->reg);
+ for (; sub->msk || sub->msg; sub++) {
+ if (!(err_value & sub->msk))
+ continue;
+ dev_warn(dev, "%s (0x%x) found!\n",
+ sub->msg, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
-
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_LM_OFF);
- dev_warn(dev, "LM add/fetch list error (0x%x) found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_ABT_OFF);
- dev_warn(dev, "SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
- irq_value);
+ } else {
+ dev_warn(dev, "%s (0x%x) found!\n",
+ axi_error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
+ }
- if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
- u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
- u32 dev_id = reg_val & ITCT_DEV_MSK;
- struct hisi_sas_device *sas_dev =
- &hisi_hba->devices[dev_id];
+ if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
+ u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ u32 dev_id = reg_val & ITCT_DEV_MSK;
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[dev_id];
- hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
- dev_dbg(dev, "clear ITCT ok\n");
- complete(sas_dev->completion);
- }
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ complete(sas_dev->completion);
}
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value);
@@ -3410,6 +3373,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
interrupt_disable_v2_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
@@ -3460,7 +3424,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.start_delivery = start_delivery_v2_hw,
.slot_complete = slot_complete_v2_hw,
.phys_init = phys_init_v2_hw,
- .phy_enable = enable_phy_v2_hw,
+ .phy_start = start_phy_v2_hw,
.phy_disable = disable_phy_v2_hw,
.phy_hard_reset = phy_hard_reset_v2_hw,
.get_events = phy_get_events_v2_hw,
@@ -3493,16 +3457,11 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- int i;
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
- for (i = 0; i < hisi_hba->queue_count; i++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[i];
-
- tasklet_kill(&cq->tasklet);
- }
+ hisi_sas_kill_tasklets(hisi_hba);
return hisi_sas_remove(pdev);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 2e5fa9717be8..19b1f2ffec17 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -53,6 +53,11 @@
#define HGC_IOMB_PROC1_STATUS 0x104
#define CFG_1US_TIMER_TRSH 0xcc
#define CHNL_INT_STATUS 0x148
+#define HGC_AXI_FIFO_ERR_INFO 0x154
+#define AXI_ERR_INFO_OFF 0
+#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
+#define FIFO_ERR_INFO_OFF 8
+#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
#define INT_COAL_EN 0x19c
#define OQ_INT_COAL_TIME 0x1a0
#define OQ_INT_COAL_CNT 0x1a4
@@ -135,6 +140,7 @@
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
#define STP_LINK_TIMER (PORT_BASE + 0x120)
+#define CON_CFG_DRIVER (PORT_BASE + 0x130)
#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
@@ -154,6 +160,10 @@
#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
+#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
+#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
+#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
@@ -171,8 +181,11 @@
#define DMA_RX_STATUS (PORT_BASE + 0x2e8)
#define DMA_RX_STATUS_BUSY_OFF 0
#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
+#define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
+#define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
+#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
+#define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
-#define MAX_ITCT_HW 4096 /* max the hw can support */
#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
#error Max ITCT exceeded
@@ -377,6 +390,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* Global registers init */
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
(u32)((1ULL << hisi_hba->queue_count) - 1));
+ hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
@@ -388,7 +402,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
@@ -407,7 +421,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -422,6 +436,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
0xa03e8);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER,
0x7f7a120);
+ hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER,
+ 0x2a0a80);
}
for (i = 0; i < hisi_hba->queue_count; i++) {
/* Delivery queue */
@@ -575,35 +591,24 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
static void free_device_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
+ DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
- struct device *dev = hisi_hba->dev;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ sas_dev->completion = &completion;
+
/* clear the itct interrupt state */
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
/* clear the itct table*/
- reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
- reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+ reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- udelay(10);
- reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
- if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
- dev_dbg(dev, "got clear ITCT done interrupt\n");
-
- /* invalid the itct state*/
- memset(itct, 0, sizeof(struct hisi_sas_itct));
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- ENT_INT_SRC3_ITC_INT_MSK);
-
- /* clear the itct */
- hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
- dev_dbg(dev, "clear ITCT ok\n");
- }
+ wait_for_completion(sas_dev->completion);
+ memset(itct, 0, sizeof(struct hisi_sas_itct));
}
static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
@@ -755,10 +760,12 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
{
int i, bitmap = 0;
u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
for (i = 0; i < hisi_hba->n_phy; i++)
- if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
- bitmap |= 1 << i;
+ if (phy_state & BIT(i))
+ if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ bitmap |= BIT(i);
return bitmap;
}
@@ -988,20 +995,6 @@ err_out_req:
return rc;
}
-static int get_ncq_tag_v3_hw(struct sas_task *task, u32 *tag)
-{
- struct ata_queued_cmd *qc = task->uldd_task;
-
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
- *tag = qc->tag;
- return 1;
- }
- }
- return 0;
-}
-
static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
@@ -1050,7 +1043,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
hdr->dw1 = cpu_to_le32(dw1);
/* dw2 */
- if (task->ata_task.use_ncq && get_ncq_tag_v3_hw(task, &hdr_tag)) {
+ if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
@@ -1276,6 +1269,25 @@ static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
return res;
}
+static const struct hisi_sas_hw_error port_axi_error[] = {
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
+ .msg = "dma_tx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
+ .msg = "dma_tx_axi_rd_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
+ .msg = "dma_rx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
+ .msg = "dma_rx_axi_rd_err",
+ },
+};
+
static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
@@ -1301,10 +1313,19 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
if ((irq_msk & (4 << (phy_no * 4))) &&
irq_value1) {
- if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
- CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
- dev_name(dev), irq_value1);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
+ const struct hisi_sas_hw_error *error =
+ &port_axi_error[i];
+
+ if (!(irq_value1 & error->irq_msk))
+ continue;
+
+ dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value1);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT1, irq_value1);
@@ -1331,6 +1352,114 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
+static const struct hisi_sas_hw_error axi_error[] = {
+ { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
+ { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
+ { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
+ { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
+ { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
+ { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
+ { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
+ { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
+ {},
+};
+
+static const struct hisi_sas_hw_error fifo_error[] = {
+ { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" },
+ { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" },
+ { .msk = BIT(10), .msg = "GETDQE_FIFO" },
+ { .msk = BIT(11), .msg = "CMDP_FIFO" },
+ { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
+ {},
+};
+
+static const struct hisi_sas_hw_error fatal_axi_error[] = {
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
+ .msg = "write pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
+ .msg = "iptt no match slot",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
+ .msg = "read pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = axi_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = fifo_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
+ .msg = "LM add/fetch list",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
+ .msg = "SAS_HGC_ABT fetch LM list",
+ },
+};
+
+static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
+{
+ u32 irq_value, irq_msk;
+ struct hisi_hba *hisi_hba = p;
+ struct device *dev = hisi_hba->dev;
+ int i;
+
+ irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
+
+ irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+
+ for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
+ const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
+
+ if (!(irq_value & error->irq_msk))
+ continue;
+
+ if (error->sub) {
+ const struct hisi_sas_hw_error *sub = error->sub;
+ u32 err_value = hisi_sas_read32(hisi_hba, error->reg);
+
+ for (; sub->msk || sub->msg; sub++) {
+ if (!(err_value & sub->msk))
+ continue;
+
+ dev_warn(dev, "%s error (0x%x) found!\n",
+ sub->msg, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ } else {
+ dev_warn(dev, "%s error (0x%x) found!\n",
+ error->msg, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
+ u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ u32 dev_id = reg_val & ITCT_DEV_MSK;
+ struct hisi_sas_device *sas_dev =
+ &hisi_hba->devices[dev_id];
+
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ complete(sas_dev->completion);
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
+
+ return IRQ_HANDLED;
+}
+
static void
slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
@@ -1414,7 +1543,9 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
ts->stat = SAS_ABORTED_TASK;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -1;
}
@@ -1629,6 +1760,15 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
goto free_phy_irq;
}
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
+ fatal_axi_int_v3_hw, 0,
+ DRV_NAME " fatal", hisi_hba);
+ if (rc) {
+ dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
+ rc = -ENOENT;
+ goto free_chnl_interrupt;
+ }
+
/* Init tasklets for cq only */
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
@@ -1656,6 +1796,8 @@ free_cq_irqs:
free_irq(pci_irq_vector(pdev, k+16), cq);
}
+ free_irq(pci_irq_vector(pdev, 11), hisi_hba);
+free_chnl_interrupt:
free_irq(pci_irq_vector(pdev, 2), hisi_hba);
free_phy_irq:
free_irq(pci_irq_vector(pdev, 1), hisi_hba);
@@ -1749,6 +1891,31 @@ static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
return hisi_sas_read32(hisi_hba, PHY_STATE);
}
+static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_phy *sphy = sas_phy->phy;
+ u32 reg_value;
+
+ /* loss dword sync */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
+ sphy->loss_of_dword_sync_count += reg_value;
+
+ /* phy reset problem */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
+ sphy->phy_reset_problem_count += reg_value;
+
+ /* invalid dword */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
+ sphy->invalid_dword_count += reg_value;
+
+ /* disparity err */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
+ sphy->running_disparity_error_count += reg_value;
+
+}
+
static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@@ -1757,6 +1924,7 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
interrupt_disable_v3_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
@@ -1793,7 +1961,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.start_delivery = start_delivery_v3_hw,
.slot_complete = slot_complete_v3_hw,
.phys_init = phys_init_v3_hw,
- .phy_enable = enable_phy_v3_hw,
+ .phy_start = start_phy_v3_hw,
.phy_disable = disable_phy_v3_hw,
.phy_hard_reset = phy_hard_reset_v3_hw,
.phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
@@ -1801,6 +1969,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.dereg_device = dereg_device_v3_hw,
.soft_reset = soft_reset_v3_hw,
.get_phys_state = get_phys_state_v3_hw,
+ .get_events = phy_get_events_v3_hw,
};
static struct Scsi_Host *
@@ -1817,13 +1986,14 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
}
hisi_hba = shost_priv(shost);
+ INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
hisi_hba->hw = &hisi_sas_v3_hw;
hisi_hba->pci_dev = pdev;
hisi_hba->dev = dev;
hisi_hba->shost = shost;
SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
- init_timer(&hisi_hba->timer);
+ timer_setup(&hisi_hba->timer, NULL, 0);
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
@@ -1960,11 +2130,11 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
free_irq(pci_irq_vector(pdev, 1), hisi_hba);
free_irq(pci_irq_vector(pdev, 2), hisi_hba);
+ free_irq(pci_irq_vector(pdev, 11), hisi_hba);
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
free_irq(pci_irq_vector(pdev, i+16), cq);
- tasklet_kill(&cq->tasklet);
}
pci_free_irq_vectors(pdev);
}
@@ -1980,6 +2150,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
+ hisi_sas_kill_tasklets(hisi_hba);
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4ed3d26ffdde..287e5eb0723f 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.20-0"
+#define HPSA_DRIVER_VERSION "3.4.20-125"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -787,7 +787,12 @@ static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
}
offload_enabled = hdev->offload_enabled;
spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "%d\n", offload_enabled);
+
+ if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
+ return snprintf(buf, 20, "%d\n", offload_enabled);
+ else
+ return snprintf(buf, 40, "%s\n",
+ "Not applicable for a controller");
}
#define MAX_PATHS 8
@@ -1270,7 +1275,7 @@ static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
dev->model,
label,
dev->offload_config ? '+' : '-',
- dev->offload_enabled ? '+' : '-',
+ dev->offload_to_be_enabled ? '+' : '-',
dev->expose_device);
}
@@ -1345,36 +1350,42 @@ lun_assigned:
(*nadded)++;
hpsa_show_dev_msg(KERN_INFO, h, device,
device->expose_device ? "added" : "masked");
- device->offload_to_be_enabled = device->offload_enabled;
- device->offload_enabled = 0;
return 0;
}
-/* Update an entry in h->dev[] array. */
+/*
+ * Called during a scan operation.
+ *
+ * Update an entry in h->dev[] array.
+ */
static void hpsa_scsi_update_entry(struct ctlr_info *h,
int entry, struct hpsa_scsi_dev_t *new_entry)
{
- int offload_enabled;
/* assumes h->devlock is held */
BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
/* Raid level changed. */
h->dev[entry]->raid_level = new_entry->raid_level;
+ /*
+ * ioacccel_handle may have changed for a dual domain disk
+ */
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+
/* Raid offload parameters changed. Careful about the ordering. */
- if (new_entry->offload_config && new_entry->offload_enabled) {
+ if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
/*
* if drive is newly offload_enabled, we want to copy the
* raid map data first. If previously offload_enabled and
* offload_config were set, raid map data had better be
- * the same as it was before. if raid map data is changed
+ * the same as it was before. If raid map data has changed
* then it had better be the case that
* h->dev[entry]->offload_enabled is currently 0.
*/
h->dev[entry]->raid_map = new_entry->raid_map;
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
}
- if (new_entry->hba_ioaccel_enabled) {
+ if (new_entry->offload_to_be_enabled) {
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
}
@@ -1385,17 +1396,18 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h,
/*
* We can turn off ioaccel offload now, but need to delay turning
- * it on until we can update h->dev[entry]->phys_disk[], but we
+ * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
* can't do that until all the devices are updated.
*/
- h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
- if (!new_entry->offload_enabled)
+ h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
+
+ /*
+ * turn ioaccel off immediately if told to do so.
+ */
+ if (!new_entry->offload_to_be_enabled)
h->dev[entry]->offload_enabled = 0;
- offload_enabled = h->dev[entry]->offload_enabled;
- h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
- h->dev[entry]->offload_enabled = offload_enabled;
}
/* Replace an entry from h->dev[] array. */
@@ -1421,9 +1433,8 @@ static void hpsa_scsi_replace_entry(struct ctlr_info *h,
h->dev[entry] = new_entry;
added[*nadded] = new_entry;
(*nadded)++;
+
hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
- new_entry->offload_to_be_enabled = new_entry->offload_enabled;
- new_entry->offload_enabled = 0;
}
/* Remove an entry from h->dev[] array. */
@@ -1513,11 +1524,22 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
return 1;
if (dev1->offload_config != dev2->offload_config)
return 1;
- if (dev1->offload_enabled != dev2->offload_enabled)
+ if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
return 1;
if (!is_logical_dev_addr_mode(dev1->scsi3addr))
if (dev1->queue_depth != dev2->queue_depth)
return 1;
+ /*
+ * This can happen for dual domain devices. An active
+ * path change causes the ioaccel handle to change
+ *
+ * for example note the handle differences between p0 and p1
+ * Device WWN ,WWN hash,Handle
+ * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
+ * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
+ */
+ if (dev1->ioaccel_handle != dev2->ioaccel_handle)
+ return 1;
return 0;
}
@@ -1727,6 +1749,11 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
* be 0, but we'll turn it off here just in case
*/
if (!logical_drive->phys_disk[i]) {
+ dev_warn(&h->pdev->dev,
+ "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
+ __func__,
+ h->scsi_host->host_no, logical_drive->bus,
+ logical_drive->target, logical_drive->lun);
logical_drive->offload_enabled = 0;
logical_drive->offload_to_be_enabled = 0;
logical_drive->queue_depth = 8;
@@ -1738,8 +1765,12 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
* way too high for partial stripe writes
*/
logical_drive->queue_depth = qdepth;
- else
- logical_drive->queue_depth = h->nr_cmds;
+ else {
+ if (logical_drive->external)
+ logical_drive->queue_depth = EXTERNAL_QD;
+ else
+ logical_drive->queue_depth = h->nr_cmds;
+ }
}
static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
@@ -1759,13 +1790,24 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
/*
* If offload is currently enabled, the RAID map and
* phys_disk[] assignment *better* not be changing
- * and since it isn't changing, we do not need to
- * update it.
+ * because we would be changing ioaccel phsy_disk[] pointers
+ * on a ioaccel volume processing I/O requests.
+ *
+ * If an ioaccel volume status changed, initially because it was
+ * re-configured and thus underwent a transformation, or
+ * a drive failed, we would have received a state change
+ * request and ioaccel should have been turned off. When the
+ * transformation completes, we get another state change
+ * request to turn ioaccel back on. In this case, we need
+ * to update the ioaccel information.
+ *
+ * Thus: If it is not currently enabled, but will be after
+ * the scan completes, make sure the ioaccel pointers
+ * are up to date.
*/
- if (dev[i]->offload_enabled)
- continue;
- hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
+ if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
+ hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
}
}
@@ -1823,11 +1865,13 @@ static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
break;
if (++waits > 20)
break;
+ msleep(1000);
+ }
+
+ if (waits > 20)
dev_warn(&h->pdev->dev,
"%s: removing device with %d outstanding commands!\n",
__func__, cmds);
- msleep(1000);
- }
}
static void hpsa_remove_device(struct ctlr_info *h,
@@ -1838,6 +1882,12 @@ static void hpsa_remove_device(struct ctlr_info *h,
if (!h->scsi_host)
return;
+ /*
+ * Allow for commands to drain
+ */
+ device->removed = 1;
+ hpsa_wait_for_outstanding_commands_for_dev(h, device);
+
if (is_logical_device(device)) { /* RAID */
sdev = scsi_device_lookup(h->scsi_host, device->bus,
device->target, device->lun);
@@ -1855,9 +1905,6 @@ static void hpsa_remove_device(struct ctlr_info *h,
}
} else { /* HBA */
- device->removed = 1;
- hpsa_wait_for_outstanding_commands_for_dev(h, device);
-
hpsa_remove_sas_device(device);
}
}
@@ -1965,8 +2012,13 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h,
}
hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
- /* Now that h->dev[]->phys_disk[] is coherent, we can enable
+ /*
+ * Now that h->dev[]->phys_disk[] is coherent, we can enable
* any logical drives that need it enabled.
+ *
+ * The raid map should be current by now.
+ *
+ * We are updating the device list used for I/O requests.
*/
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i] == NULL)
@@ -2441,7 +2493,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
/*
* Any RAID offload error results in retry which will use
- * the normal I/O path so the controller can handle whatever's
+ * the normal I/O path so the controller can handle whatever is
* wrong.
*/
if (is_logical_device(dev) &&
@@ -2913,6 +2965,57 @@ static void hpsa_scsi_interpret_error(struct ctlr_info *h,
}
}
+static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
+ u8 page, u8 *buf, size_t bufsize)
+{
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+ if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ if (rc)
+ goto out;
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+out:
+ cmd_free(h, c);
+ return rc;
+}
+
+static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
+ u8 *scsi3addr)
+{
+ u8 *buf;
+ u64 sa = 0;
+ int rc = 0;
+
+ buf = kzalloc(1024, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
+ buf, 1024);
+
+ if (rc)
+ goto out;
+
+ sa = get_unaligned_be64(buf+12);
+
+out:
+ kfree(buf);
+ return sa;
+}
+
static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
u16 page, unsigned char *buf,
unsigned char bufsize)
@@ -2929,7 +3032,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
goto out;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3213,7 +3316,7 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
return -1;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3256,7 +3359,7 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3284,7 +3387,7 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
goto out;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3315,7 +3418,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
@@ -3348,6 +3451,9 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+ encl_dev->sas_address =
+ hpsa_get_enclosure_logical_identifier(h, scsi3addr);
+
if (encl_dev->target == -1 || encl_dev->lun == -1) {
rc = IO_OK;
goto out;
@@ -3388,7 +3494,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
c->Request.CDB[5] = 0;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (rc)
goto out;
@@ -3472,6 +3578,30 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
dev->sas_address = sa;
}
+static void hpsa_ext_ctrl_present(struct ctlr_info *h,
+ struct ReportExtendedLUNdata *physdev)
+{
+ u32 nphysicals;
+ int i;
+
+ if (h->discovery_polling)
+ return;
+
+ nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
+
+ for (i = 0; i < nphysicals; i++) {
+ if (physdev->LUN[i].device_type ==
+ BMIC_DEVICE_TYPE_CONTROLLER
+ && !is_hba_lunid(physdev->LUN[i].lunid)) {
+ dev_info(&h->pdev->dev,
+ "External controller present, activate discovery polling and disable rld caching\n");
+ hpsa_disable_rld_caching(h);
+ h->discovery_polling = 1;
+ break;
+ }
+ }
+}
+
/* Get a device id from inquiry page 0x83 */
static bool hpsa_vpd_page_supported(struct ctlr_info *h,
unsigned char scsi3addr[], u8 page)
@@ -3516,6 +3646,13 @@ exit_supported:
return true;
}
+/*
+ * Called during a scan operation.
+ * Sets ioaccel status on the new device list, not the existing device list
+ *
+ * The device list used during I/O will be updated later in
+ * adjust_hpsa_scsi_table.
+ */
static void hpsa_get_ioaccel_status(struct ctlr_info *h,
unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
{
@@ -3544,12 +3681,12 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
this_device->offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
if (this_device->offload_config) {
- this_device->offload_enabled =
+ this_device->offload_to_be_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
if (hpsa_get_raid_map(h, scsi3addr, this_device))
- this_device->offload_enabled = 0;
+ this_device->offload_to_be_enabled = 0;
}
- this_device->offload_to_be_enabled = this_device->offload_enabled;
+
out:
kfree(buf);
return;
@@ -3604,7 +3741,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
if (extended_response)
c->Request.CDB[1] = extended_response;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3739,7 +3876,7 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (rc) {
cmd_free(h, c);
return HPSA_VPD_LV_STATUS_UNSUPPORTED;
@@ -4228,6 +4365,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
*/
ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
+ hpsa_ext_ctrl_present(h, physdev_list);
+
/* Allocate the per device structures */
for (i = 0; i < ndevs_to_allocate; i++) {
if (i >= HPSA_MAX_DEVICES) {
@@ -4258,6 +4397,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
int phys_dev_index = i - (raid_ctlr_position == 0);
bool skip_device = false;
+ memset(tmpdevice, 0, sizeof(*tmpdevice));
+
physical_device = i < nphysicals + (raid_ctlr_position == 0);
/* Figure out where the LUN ID info is coming from */
@@ -4279,7 +4420,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
continue;
}
- /* Get device type, vendor, model, device id */
+ /* Get device type, vendor, model, device id, raid_map */
rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
&is_OBDR);
if (rc == -ENOMEM) {
@@ -4296,18 +4437,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
this_device = currentsd[ncurrent];
- /* Turn on discovery_polling if there are ext target devices.
- * Event-based change notification is unreliable for those.
- */
- if (!h->discovery_polling) {
- if (tmpdevice->external) {
- h->discovery_polling = 1;
- dev_info(&h->pdev->dev,
- "External target, activate discovery polling.\n");
- }
- }
-
-
*this_device = *tmpdevice;
this_device->physical_device = physical_device;
@@ -6496,6 +6625,17 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[0] = HPSA_INQUIRY;
c->Request.CDB[4] = size & 0xFF;
break;
+ case RECEIVE_DIAGNOSTIC:
+ c->Request.CDBLen = 6;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ c->Request.CDB[1] = 1;
+ c->Request.CDB[2] = 1;
+ c->Request.CDB[3] = (size >> 8) & 0xFF;
+ c->Request.CDB[4] = size & 0xFF;
+ break;
case HPSA_REPORT_LOG:
case HPSA_REPORT_PHYS:
/* Talking to controller so It's a physical command
@@ -8007,6 +8147,10 @@ static void controller_lockup_detected(struct ctlr_info *h)
spin_unlock_irqrestore(&h->lock, flags);
dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
lockup_detected, h->heartbeat_sample_interval / HZ);
+ if (lockup_detected == 0xffff0000) {
+ dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
+ writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
+ }
pci_disable_device(h->pdev);
fail_all_outstanding_cmds(h);
}
@@ -8047,9 +8191,79 @@ static int detect_controller_lockup(struct ctlr_info *h)
return false;
}
-static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+/*
+ * Set ioaccel status for all ioaccel volumes.
+ *
+ * Called from monitor controller worker (hpsa_event_monitor_worker)
+ *
+ * A Volume (or Volumes that comprise an Array set may be undergoing a
+ * transformation, so we will be turning off ioaccel for all volumes that
+ * make up the Array.
+ */
+static void hpsa_set_ioaccel_status(struct ctlr_info *h)
{
+ int rc;
int i;
+ u8 ioaccel_status;
+ unsigned char *buf;
+ struct hpsa_scsi_dev_t *device;
+
+ if (!h)
+ return;
+
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ /*
+ * Run through current device list used during I/O requests.
+ */
+ for (i = 0; i < h->ndevices; i++) {
+ device = h->dev[i];
+
+ if (!device)
+ continue;
+ if (!device->scsi3addr)
+ continue;
+ if (!hpsa_vpd_page_supported(h, device->scsi3addr,
+ HPSA_VPD_LV_IOACCEL_STATUS))
+ continue;
+
+ memset(buf, 0, 64);
+
+ rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
+ VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
+ buf, 64);
+ if (rc != 0)
+ continue;
+
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+ device->offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+ if (device->offload_config)
+ device->offload_to_be_enabled =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+
+ /*
+ * Immediately turn off ioaccel for any volume the
+ * controller tells us to. Some of the reasons could be:
+ * transformation - change to the LVs of an Array.
+ * degraded volume - component failure
+ *
+ * If ioaccel is to be re-enabled, re-enable later during the
+ * scan operation so the driver can get a fresh raidmap
+ * before turning ioaccel back on.
+ *
+ */
+ if (!device->offload_to_be_enabled)
+ device->offload_enabled = 0;
+ }
+
+ kfree(buf);
+}
+
+static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+{
char *event_type;
if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
@@ -8067,10 +8281,7 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
event_type = "configuration change";
/* Stop sending new RAID offload reqs via the IO accelerator */
scsi_block_requests(h->scsi_host);
- for (i = 0; i < h->ndevices; i++) {
- h->dev[i]->offload_enabled = 0;
- h->dev[i]->offload_to_be_enabled = 0;
- }
+ hpsa_set_ioaccel_status(h);
hpsa_drain_accel_commands(h);
/* Set 'accelerator path config change' bit */
dev_warn(&h->pdev->dev,
@@ -8087,10 +8298,6 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
writel(h->events, &(h->cfgtable->clear_event_notify));
writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_clear_event_notify_ack(h);
-#if 0
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- hpsa_wait_for_mode_change_ack(h);
-#endif
}
return;
}
@@ -8241,7 +8448,6 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
hpsa_perform_rescan(h);
} else if (h->discovery_polling) {
- hpsa_disable_rld_caching(h);
if (hpsa_luns_changed(h)) {
dev_info(&h->pdev->dev,
"driver discovery polling rescan.\n");
@@ -8601,7 +8807,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8613,7 +8819,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_TODEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8623,7 +8829,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8684,6 +8890,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
+ hpsa_delete_sas_host(h);
+
/*
* Call before disabling interrupts.
* scsi_remove_host can trigger I/O operations especially
@@ -8718,8 +8926,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
h->lockup_detected = NULL; /* init_one 2 */
/* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
- hpsa_delete_sas_host(h);
-
kfree(h); /* init_one 1 */
}
@@ -9207,9 +9413,9 @@ static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
struct sas_phy *phy = hpsa_sas_phy->phy;
sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (hpsa_sas_phy->added_to_port)
list_del(&hpsa_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(hpsa_sas_phy);
}
@@ -9367,7 +9573,7 @@ static int hpsa_add_sas_host(struct ctlr_info *h)
struct hpsa_sas_port *hpsa_sas_port;
struct hpsa_sas_phy *hpsa_sas_phy;
- parent_dev = &h->scsi_host->shost_gendev;
+ parent_dev = &h->scsi_host->shost_dev;
hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
if (!hpsa_sas_node)
@@ -9458,7 +9664,7 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy)
static int
hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
- *identifier = 0;
+ *identifier = rphy->identify.sas_address;
return 0;
}
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 078afe448115..21a726e2eec6 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -142,6 +142,7 @@
#define DOORBELL_CTLR_RESET 0x00000004l
#define DOORBELL_CTLR_RESET2 0x00000020l
#define DOORBELL_CLEAR_EVENTS 0x00000040l
+#define DOORBELL_GENERATE_CHKPT 0x00000080l
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
@@ -779,6 +780,8 @@ struct bmic_identify_physical_device {
u8 phys_bay_in_box; /* phys drv bay this drive resides */
__le32 rpm; /* Drive rotational speed in rpm */
u8 device_type; /* type of drive */
+#define BMIC_DEVICE_TYPE_CONTROLLER 0x07
+
u8 sata_version; /* only valid when drive_type is SATA */
__le64 big_total_block_count;
__le64 ris_starting_lba;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b491af31a5f8..0d2f7eb3acb6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1393,8 +1393,9 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
*
* Called when an internally generated command times out
**/
-static void ibmvfc_timeout(struct ibmvfc_event *evt)
+static void ibmvfc_timeout(struct timer_list *t)
{
+ struct ibmvfc_event *evt = from_timer(evt, t, timer);
struct ibmvfc_host *vhost = evt->vhost;
dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
ibmvfc_reset_host(vhost);
@@ -1424,12 +1425,10 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
BUG();
list_add_tail(&evt->queue, &vhost->sent);
- init_timer(&evt->timer);
+ timer_setup(&evt->timer, ibmvfc_timeout, 0);
if (timeout) {
- evt->timer.data = (unsigned long) evt;
evt->timer.expires = jiffies + (timeout * HZ);
- evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
add_timer(&evt->timer);
}
@@ -3692,8 +3691,9 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
* out, reset the CRQ. When the ADISC comes back as cancelled,
* log back into the target.
**/
-static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
+static void ibmvfc_adisc_timeout(struct timer_list *t)
{
+ struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
@@ -3778,9 +3778,7 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
if (timer_pending(&tgt->timer))
mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
else {
- tgt->timer.data = (unsigned long) tgt;
tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
- tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout;
add_timer(&tgt->timer);
}
@@ -3912,7 +3910,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
tgt->vhost = vhost;
tgt->need_login = 1;
tgt->cancel_key = vhost->task_set++;
- init_timer(&tgt->timer);
+ timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
kref_init(&tgt->kref);
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
spin_lock_irqsave(vhost->host->host_lock, flags);
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 7d156b161482..17df76f0be3c 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -837,8 +837,9 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
*
* Called when an internally generated command times out
*/
-static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
+static void ibmvscsi_timeout(struct timer_list *t)
{
+ struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer);
struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
@@ -927,11 +928,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
*/
list_add_tail(&evt_struct->list, &hostdata->sent);
- init_timer(&evt_struct->timer);
+ timer_setup(&evt_struct->timer, ibmvscsi_timeout, 0);
if (timeout) {
- evt_struct->timer.data = (unsigned long) evt_struct;
evt_struct->timer.expires = jiffies + (timeout * HZ);
- evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
add_timer(&evt_struct->timer);
}
diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.h b/drivers/scsi/ibmvscsi_tgt/libsrp.h
index 9fec55b36322..832606ae2908 100644
--- a/drivers/scsi/ibmvscsi_tgt/libsrp.h
+++ b/drivers/scsi/ibmvscsi_tgt/libsrp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LIBSRP_H__
#define __LIBSRP_H__
diff --git a/drivers/scsi/imm.h b/drivers/scsi/imm.h
index 8f6f32fc61ff..7f2bb35b1b87 100644
--- a/drivers/scsi/imm.h
+++ b/drivers/scsi/imm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Driver for the Iomega MatchMaker parallel port SCSI HBA embedded in
* the Iomega ZIP Plus drive
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f838bd73befa..cc0187965eee 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -694,7 +694,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
ipr_cmd->sibling = NULL;
ipr_cmd->eh_comp = NULL;
ipr_cmd->fast_done = fast_done;
- init_timer(&ipr_cmd->timer);
+ timer_setup(&ipr_cmd->timer, NULL, 0);
}
/**
@@ -990,15 +990,14 @@ static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
**/
static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
void (*done) (struct ipr_cmnd *),
- void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
+ void (*timeout_func) (struct timer_list *), u32 timeout)
{
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = done;
- ipr_cmd->timer.data = (unsigned long) ipr_cmd;
ipr_cmd->timer.expires = jiffies + timeout;
- ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
+ ipr_cmd->timer.function = timeout_func;
add_timer(&ipr_cmd->timer);
@@ -1080,7 +1079,7 @@ static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
* none
**/
static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
- void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
+ void (*timeout_func) (struct timer_list *),
u32 timeout)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
@@ -2664,8 +2663,9 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
* Return value:
* none
**/
-static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
+static void ipr_timeout(struct timer_list *t)
{
+ struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
unsigned long lock_flags = 0;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
@@ -2696,8 +2696,9 @@ static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
* Return value:
* none
**/
-static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
+static void ipr_oper_timeout(struct timer_list *t)
{
+ struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
unsigned long lock_flags = 0;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
@@ -5449,8 +5450,9 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
* Return value:
* none
**/
-static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
+static void ipr_abort_timeout(struct timer_list *t)
{
+ struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
struct ipr_cmnd *reset_cmd;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_cmd_pkt *cmd_pkt;
@@ -8271,8 +8273,9 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
* Return value:
* none
**/
-static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
+static void ipr_reset_timer_done(struct timer_list *t)
{
+ struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
unsigned long lock_flags = 0;
@@ -8308,9 +8311,8 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = ipr_reset_ioa_job;
- ipr_cmd->timer.data = (unsigned long) ipr_cmd;
ipr_cmd->timer.expires = jiffies + timeout;
- ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
+ ipr_cmd->timer.function = ipr_reset_timer_done;
add_timer(&ipr_cmd->timer);
}
@@ -8394,9 +8396,8 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
}
}
- ipr_cmd->timer.data = (unsigned long) ipr_cmd;
ipr_cmd->timer.expires = jiffies + stage_time * HZ;
- ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
+ ipr_cmd->timer.function = ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
@@ -8466,9 +8467,8 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
return IPR_RC_JOB_CONTINUE;
}
- ipr_cmd->timer.data = (unsigned long) ipr_cmd;
ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
- ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
+ ipr_cmd->timer.function = ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
index 3359e10e0d8f..da6f04cae272 100644
--- a/drivers/scsi/isci/Makefile
+++ b/drivers/scsi/isci/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_ISCI) += isci.o
isci-objs := init.o phy.o request.o \
remote_device.o port.o \
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 609dafd661d1..13b37cdffa8e 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -958,9 +958,9 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
return status;
}
-static void phy_startup_timeout(unsigned long data)
+static void phy_startup_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_timer *tmr = from_timer(tmr, t, timer);
struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
unsigned long flags;
enum sci_status status;
@@ -1592,9 +1592,9 @@ static const struct sci_base_state sci_controller_state_table[] = {
[SCIC_FAILED] = {}
};
-static void controller_timeout(unsigned long data)
+static void controller_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_timer *tmr = from_timer(tmr, t, timer);
struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
struct sci_base_state_machine *sm = &ihost->sm;
unsigned long flags;
@@ -1737,9 +1737,9 @@ static u8 max_spin_up(struct isci_host *ihost)
MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
}
-static void power_control_timeout(unsigned long data)
+static void power_control_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_timer *tmr = from_timer(tmr, t, timer);
struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
struct isci_phy *iphy;
unsigned long flags;
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 234ab46fce33..680e30947671 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -498,12 +498,10 @@ struct sci_timer {
};
static inline
-void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
+void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t))
{
- tmr->timer.function = fn;
- tmr->timer.data = (unsigned long) tmr;
tmr->cancel = 0;
- init_timer(&tmr->timer);
+ timer_setup(&tmr->timer, fn, 0);
}
static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index cb87b2ef7c92..1deca8c5a94f 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -315,9 +315,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
return SCI_SUCCESS;
}
-static void phy_sata_timeout(unsigned long data)
+static void phy_sata_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_timer *tmr = from_timer(tmr, t, timer);
struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
struct isci_host *ihost = iphy->owning_port->owning_controller;
unsigned long flags;
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index a4dd5c91508c..1df45f028ea7 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -769,9 +769,9 @@ bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
return true;
}
-static void port_timeout(unsigned long data)
+static void port_timeout(struct timer_list *t)
{
- struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_timer *tmr = from_timer(tmr, t, timer);
struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
struct isci_host *ihost = iport->owning_controller;
unsigned long flags;
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index ac879745ef80..edb7be786c65 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -319,10 +319,10 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
return sci_port_configuration_agent_validate_ports(ihost, port_agent);
}
-static void mpc_agent_timeout(unsigned long data)
+static void mpc_agent_timeout(struct timer_list *t)
{
u8 index;
- struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_timer *tmr = from_timer(tmr, t, timer);
struct sci_port_configuration_agent *port_agent;
struct isci_host *ihost;
unsigned long flags;
@@ -654,10 +654,10 @@ static void sci_apc_agent_link_down(
}
/* configure the phys into ports when the timer fires */
-static void apc_agent_timeout(unsigned long data)
+static void apc_agent_timeout(struct timer_list *t)
{
u32 index;
- struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_timer *tmr = from_timer(tmr, t, timer);
struct sci_port_configuration_agent *port_agent;
struct isci_host *ihost;
unsigned long flags;
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
index 4bb23ac86a5c..65396f86c307 100644
--- a/drivers/scsi/libfc/Makefile
+++ b/drivers/scsi/libfc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# $Id: Makefile
obj-$(CONFIG_LIBFC) += libfc.o
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 772c35a5c49e..4fae253d4f3d 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -97,7 +97,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code);
-static void fc_fcp_timeout(unsigned long);
+static void fc_fcp_timeout(struct timer_list *);
static void fc_fcp_rec(struct fc_fcp_pkt *);
static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
@@ -155,8 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
fsp->lp = lport;
fsp->xfer_ddp = FC_XID_UNKNOWN;
refcount_set(&fsp->ref_cnt, 1);
- init_timer(&fsp->timer);
- fsp->timer.data = (unsigned long)fsp;
+ timer_setup(&fsp->timer, NULL, 0);
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
} else {
@@ -1215,7 +1214,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
fsp->seq_ptr = seq;
fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
- setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ fsp->timer.function = fc_fcp_timeout;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
@@ -1298,9 +1297,9 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
* fc_lun_reset_send() - Send LUN reset command
* @data: The FCP packet that identifies the LUN to be reset
*/
-static void fc_lun_reset_send(unsigned long data)
+static void fc_lun_reset_send(struct timer_list *t)
{
- struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+ struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
struct fc_lport *lport = fsp->lp;
if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
@@ -1308,7 +1307,7 @@ static void fc_lun_reset_send(unsigned long data)
return;
if (fc_fcp_lock_pkt(fsp))
return;
- setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
+ fsp->timer.function = fc_lun_reset_send;
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
fc_fcp_unlock_pkt(fsp);
}
@@ -1334,7 +1333,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
fsp->wait_for_comp = 1;
init_completion(&fsp->tm_done);
- fc_lun_reset_send((unsigned long)fsp);
+ fc_lun_reset_send(&fsp->timer);
/*
* wait for completion of reset
@@ -1431,9 +1430,9 @@ static void fc_fcp_cleanup(struct fc_lport *lport)
* received we see if data was received recently. If it has been then we
* continue waiting, otherwise, we abort the command.
*/
-static void fc_fcp_timeout(unsigned long data)
+static void fc_fcp_timeout(struct timer_list *t)
{
- struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+ struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
struct fc_rport *rport = fsp->rport;
struct fc_rport_libfc_priv *rpriv = rport->dd_data;
@@ -1446,7 +1445,7 @@ static void fc_fcp_timeout(unsigned long data)
if (fsp->lp->qfull) {
FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
fsp->timer_delay);
- setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ fsp->timer.function = fc_fcp_timeout;
fc_fcp_timer_set(fsp, fsp->timer_delay);
goto unlock;
}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 2fd0ec651170..5da46052e179 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -2083,7 +2083,6 @@ int fc_lport_bsg_request(struct bsg_job *job)
{
struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
- struct request *rsp = job->req->next_rq;
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport;
@@ -2092,8 +2091,6 @@ int fc_lport_bsg_request(struct bsg_job *job)
u32 did, tov;
bsg_reply->reply_payload_rcv_len = 0;
- if (rsp)
- scsi_req(rsp)->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f8dc1601efd5..9c50d2d9f27c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1805,9 +1805,9 @@ int iscsi_target_alloc(struct scsi_target *starget)
}
EXPORT_SYMBOL_GPL(iscsi_target_alloc);
-static void iscsi_tmf_timedout(unsigned long data)
+static void iscsi_tmf_timedout(struct timer_list *t)
{
- struct iscsi_conn *conn = (struct iscsi_conn *)data;
+ struct iscsi_conn *conn = from_timer(conn, t, tmf_timer);
struct iscsi_session *session = conn->session;
spin_lock(&session->frwd_lock);
@@ -1838,8 +1838,6 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
}
conn->tmfcmd_pdus_cnt++;
conn->tmf_timer.expires = timeout * HZ + jiffies;
- conn->tmf_timer.function = iscsi_tmf_timedout;
- conn->tmf_timer.data = (unsigned long)conn;
add_timer(&conn->tmf_timer);
ISCSI_DBG_EH(session, "tmf set timeout\n");
@@ -2089,9 +2087,9 @@ done:
}
EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
-static void iscsi_check_transport_timeouts(unsigned long data)
+static void iscsi_check_transport_timeouts(struct timer_list *t)
{
- struct iscsi_conn *conn = (struct iscsi_conn *)data;
+ struct iscsi_conn *conn = from_timer(conn, t, transport_timer);
struct iscsi_session *session = conn->session;
unsigned long recv_timeout, next_timeout = 0, last_recv;
@@ -2913,9 +2911,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
conn->exp_statsn = 0;
conn->tmf_state = TMF_INITIAL;
- init_timer(&conn->transport_timer);
- conn->transport_timer.data = (unsigned long)conn;
- conn->transport_timer.function = iscsi_check_transport_timeouts;
+ timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0);
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
@@ -2939,7 +2935,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
- init_timer(&conn->tmf_timer);
+ timer_setup(&conn->tmf_timer, iscsi_tmf_timedout, 0);
init_waitqueue_head(&conn->ehwait);
return cls_conn;
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index cd6f99c1ae7e..7e5d262e7a7d 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -24,10 +24,6 @@
#include "sas_dump.h"
-static const char *sas_hae_str[] = {
- [0] = "HAE_RESET",
-};
-
static const char *sas_porte_str[] = {
[0] = "PORTE_BYTES_DMAED",
[1] = "PORTE_BROADCAST_RCVD",
@@ -53,12 +49,6 @@ void sas_dprint_phye(int phyid, enum phy_event pe)
SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]);
}
-void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he)
-{
- SAS_DPRINTK("ha %s: %s event\n", dev_name(sas_ha->dev),
- sas_hae_str[he]);
-}
-
void sas_dump_port(struct asd_sas_port *port)
{
SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class);
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
index 800e4c69093f..6aaee6b0fcdb 100644
--- a/drivers/scsi/libsas/sas_dump.h
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -26,5 +26,4 @@
void sas_dprint_porte(int phyid, enum port_event pe);
void sas_dprint_phye(int phyid, enum phy_event pe);
-void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
void sas_dump_port(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index c0d0d979b76d..0bb9eefc08c8 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -37,7 +37,7 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
/* add it to the defer list, if not already pending */
if (list_empty(&sw->drain_node))
- list_add(&sw->drain_node, &ha->defer_q);
+ list_add_tail(&sw->drain_node, &ha->defer_q);
} else
rc = scsi_queue_work(ha->core.shost, &sw->work);
@@ -124,15 +124,7 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
mutex_unlock(&ha->disco_mutex);
}
-static int notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
-{
- BUG_ON(event >= HA_NUM_EVENTS);
-
- return sas_queue_event(event, &sas_ha->pending,
- &sas_ha->ha_events[event].work, sas_ha);
-}
-
-static int notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
{
struct sas_ha_struct *ha = phy->ha;
@@ -154,19 +146,7 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
int sas_init_events(struct sas_ha_struct *sas_ha)
{
- static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
- [HAE_RESET] = sas_hae_reset,
- };
-
- int i;
-
- for (i = 0; i < HA_NUM_EVENTS; i++) {
- INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
- sas_ha->ha_events[i].ha = sas_ha;
- }
-
- sas_ha->notify_ha_event = notify_ha_event;
- sas_ha->notify_port_event = notify_port_event;
+ sas_ha->notify_port_event = sas_notify_port_event;
sas_ha->notify_phy_event = sas_notify_phy_event;
return 0;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 6b4fd2375178..ca1566237ae7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -41,9 +41,10 @@ static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
/* ---------- SMP task management ---------- */
-static void smp_task_timedout(unsigned long _task)
+static void smp_task_timedout(struct timer_list *t)
{
- struct sas_task *task = (void *) _task;
+ struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task *task = slow->task;
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
@@ -91,7 +92,6 @@ static int smp_execute_task_sg(struct domain_device *dev,
task->task_done = smp_task_done;
- task->slow_task->timer.data = (unsigned long) task;
task->slow_task->timer.function = smp_task_timedout;
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 64e9cdda1c3c..64fa6f53cb8b 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -66,7 +66,8 @@ struct sas_task *sas_alloc_slow_task(gfp_t flags)
}
task->slow_task = slow;
- init_timer(&slow->timer);
+ slow->task = task;
+ timer_setup(&slow->timer, NULL, 0);
init_completion(&slow->completion);
return task;
@@ -106,17 +107,6 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
hashed[2] = r & 0xFF;
}
-
-/* ---------- HA events ---------- */
-
-void sas_hae_reset(struct work_struct *work)
-{
- struct sas_ha_event *ev = to_sas_ha_event(work);
- struct sas_ha_struct *ha = ev->ha;
-
- clear_bit(HAE_RESET, &ha->pending);
-}
-
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
int error = 0;
@@ -154,7 +144,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
-
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index ea8ad06ff582..58476b728c57 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -919,7 +919,7 @@ void sas_task_abort(struct sas_task *task)
return;
if (!del_timer(&slow->timer))
return;
- slow->timer.function(slow->timer.data);
+ slow->timer.function(&slow->timer);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8eb3f96fe068..231302273257 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -23,6 +23,7 @@
#include <scsi/scsi_host.h>
#include <linux/ktime.h>
+#include <linux/workqueue.h>
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
#define CONFIG_SCSI_LPFC_DEBUG_FS
@@ -653,6 +654,8 @@ struct lpfc_hba {
/* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba;
+ struct workqueue_struct *wq;
+
struct lpfc_sli sli;
uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c17677f494af..82f6e219ee34 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3134,7 +3134,8 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pring ? pring->txq_max : 0);
}
static DEVICE_ATTR(txq_hw, S_IRUGO,
@@ -3147,7 +3148,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pring ? pring->txcmplq_max : 0);
}
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
@@ -3246,6 +3248,11 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
continue;
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ if (ndlp->nrport)
+ nvme_fc_set_remoteport_devloss(ndlp->nrport->remoteport,
+ vport->cfg_devloss_tmo);
+#endif
}
spin_unlock_irq(shost->host_lock);
}
@@ -3375,7 +3382,7 @@ LPFC_ATTR_R(nvmet_mrq,
*/
LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
- "Define fc4 type to register with fabric.");
+ "Enable FC4 Protocol support - FCP / NVME");
/*
* lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
@@ -3391,7 +3398,7 @@ LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
* percentage will go to NVME.
*/
LPFC_ATTR_R(xri_split, 50, 10, 90,
- "Division of XRI resources between SCSI and NVME");
+ "Percentage of FCP XRI resources versus NVME");
/*
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index fe9e1c079c20..d89816222b23 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2911,7 +2911,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
}
}
- if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
+ if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
ret_val = -ENOMEM;
goto err_post_rxbufs_exit;
}
@@ -5421,6 +5421,8 @@ lpfc_bsg_timeout(struct bsg_job *job)
struct lpfc_iocbq *check_iocb, *next_iocb;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return -EIO;
/* if job's driver data is NULL, the command completed or is in the
* the process of completing. In this case, return status to request
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 7e300734b345..4e858b38529a 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -113,7 +113,7 @@ void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_cleanup(struct lpfc_vport *);
-void lpfc_disc_timeout(unsigned long);
+void lpfc_disc_timeout(struct timer_list *);
int lpfc_unregister_fcf_prep(struct lpfc_hba *);
struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
@@ -154,7 +154,7 @@ int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *,
int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
struct lpfc_nodelist *);
void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
-void lpfc_els_retry_delay(unsigned long);
+void lpfc_els_retry_delay(struct timer_list *);
void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
@@ -165,7 +165,7 @@ void lpfc_els_flush_all_cmd(struct lpfc_hba *);
void lpfc_els_flush_cmd(struct lpfc_vport *);
int lpfc_els_disc_adisc(struct lpfc_vport *);
int lpfc_els_disc_plogi(struct lpfc_vport *);
-void lpfc_els_timeout(unsigned long);
+void lpfc_els_timeout(struct timer_list *);
void lpfc_els_timeout_handler(struct lpfc_vport *);
struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
uint8_t, struct lpfc_nodelist *,
@@ -180,7 +180,7 @@ int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
-void lpfc_delayed_disc_tmo(unsigned long);
+void lpfc_delayed_disc_tmo(struct timer_list *);
void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
@@ -279,9 +279,9 @@ void lpfc_mem_free(struct lpfc_hba *);
void lpfc_mem_free_all(struct lpfc_hba *);
void lpfc_stop_vport_timers(struct lpfc_vport *);
-void lpfc_poll_timeout(unsigned long ptr);
+void lpfc_poll_timeout(struct timer_list *t);
void lpfc_poll_start_timer(struct lpfc_hba *);
-void lpfc_poll_eratt(unsigned long);
+void lpfc_poll_eratt(struct timer_list *);
int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
@@ -351,7 +351,7 @@ int
lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
uint16_t, uint64_t, lpfc_ctx_cmd);
-void lpfc_mbox_timeout(unsigned long);
+void lpfc_mbox_timeout(struct timer_list *t);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
@@ -445,7 +445,7 @@ extern unsigned int lpfc_fcp_look_ahead;
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
void lpfc_fabric_abort_hba(struct lpfc_hba *);
-void lpfc_fabric_block_timeout(unsigned long);
+void lpfc_fabric_block_timeout(struct timer_list *);
void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
void lpfc_rampdown_queue_depth(struct lpfc_hba *);
void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 33417681f5d4..f77673ab4a84 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -2884,9 +2884,9 @@ fdmi_cmd_exit:
* the worker thread.
**/
void
-lpfc_delayed_disc_tmo(unsigned long ptr)
+lpfc_delayed_disc_tmo(struct timer_list *t)
{
- struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+ struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long iflag;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index d50c481ec41c..2bf5ad3b1512 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2227,7 +2227,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
kfree(phba->nvmeio_trc);
/* Allocate new trace buffer and initialize */
- phba->nvmeio_trc = kmalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) *
+ phba->nvmeio_trc = kzalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) *
sz), GFP_KERNEL);
if (!phba->nvmeio_trc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -2235,8 +2235,6 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
"nvmeio_trc buffer\n");
return -ENOMEM;
}
- memset(phba->nvmeio_trc, 0,
- (sizeof(struct lpfc_debugfs_nvmeio_trc) * sz));
atomic_set(&phba->nvmeio_trc_cnt, 0);
phba->nvmeio_trc_on = 0;
phba->nvmeio_trc_output_idx = 0;
@@ -5457,7 +5455,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc;
/* Allocate trace buffer and initialize */
- phba->nvmeio_trc = kmalloc(
+ phba->nvmeio_trc = kzalloc(
(sizeof(struct lpfc_debugfs_nvmeio_trc) *
phba->nvmeio_trc_size), GFP_KERNEL);
@@ -5467,9 +5465,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
"nvmeio_trc buffer\n");
goto nvmeio_off;
}
- memset(phba->nvmeio_trc, 0,
- (sizeof(struct lpfc_debugfs_nvmeio_trc) *
- phba->nvmeio_trc_size));
phba->nvmeio_trc_on = 1;
phba->nvmeio_trc_output_idx = 0;
phba->nvmeio_trc = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 468a66371de9..39d5b146202e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3131,9 +3131,9 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
* to the event associated with the ndlp.
**/
void
-lpfc_els_retry_delay(unsigned long ptr)
+lpfc_els_retry_delay(struct timer_list *t)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
+ struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_hba *phba = vport->phba;
unsigned long flags;
@@ -5394,10 +5394,6 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
(len + pcmd), vport, ndlp);
len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
&rdp_context->link_stat);
- /* Check if nport is logged, BZ190632 */
- if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
- goto lpfc_skip_descriptor;
-
len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
&rdp_context->link_stat, vport);
len += lpfc_rdp_res_oed_temp_desc(phba,
@@ -5418,7 +5414,6 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
rdp_context->page_a0, vport);
-lpfc_skip_descriptor:
rdp_res->length = cpu_to_be32(len - 8);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
@@ -5540,7 +5535,6 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
-
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2422 ELS RDP Request "
"dec len %d tag x%x port_id %d len %d\n",
@@ -5549,12 +5543,6 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
be32_to_cpu(rdp_req->nport_id_desc.nport_id),
be32_to_cpu(rdp_req->nport_id_desc.length));
- if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
- !phba->cfg_enable_SmartSAN) {
- rjt_err = LSRJT_UNABLE_TPC;
- rjt_expl = LSEXP_PORT_LOGIN_REQ;
- goto error;
- }
if (sizeof(struct fc_rdp_nport_desc) !=
be32_to_cpu(rdp_req->rdp_des_length))
goto rjt_logerr;
@@ -7385,9 +7373,9 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
**/
void
-lpfc_els_timeout(unsigned long ptr)
+lpfc_els_timeout(struct timer_list *t)
{
- struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long iflag;
@@ -7430,6 +7418,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
timeout = (uint32_t)(phba->fc_ratov << 1);
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
if ((phba->pport->load_flag & FC_UNLOADING))
return;
@@ -9017,9 +9007,9 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* posted event WORKER_FABRIC_BLOCK_TMO.
**/
void
-lpfc_fabric_block_timeout(unsigned long ptr)
+lpfc_fabric_block_timeout(struct timer_list *t)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer);
unsigned long iflags;
uint32_t tmo_posted;
@@ -9310,6 +9300,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
+
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
list) {
@@ -9416,7 +9409,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
rxid, 1);
/* Check if TXQ queue needs to be serviced */
- if (!(list_empty(&pring->txq)))
+ if (pring && !list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 20808349a80e..2bafde2b7cfe 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -3324,7 +3324,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Unblock ELS traffic */
pring = lpfc_phba_elsring(phba);
- pring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ if (pring)
+ pring->flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */
if (mb->mbxStatus) {
@@ -4370,8 +4371,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
- setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
- (unsigned long)ndlp);
+ timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
@@ -4982,7 +4982,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
!(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
- !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+ !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
+ phba->sli_rev != LPFC_SLI_REV4) {
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
@@ -5430,6 +5431,8 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
psli = &phba->sli;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
/* Error matching iocb on txq or txcmplq
* First check the txq.
@@ -5508,9 +5511,9 @@ lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
*/
/*****************************************************************************/
void
-lpfc_disc_timeout(unsigned long ptr)
+lpfc_disc_timeout(struct timer_list *t)
{
- struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long flags = 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1db0a38683f4..2b145966c73f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -3636,7 +3636,7 @@ struct lpfc_mbx_get_port_name {
#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4
#define MB_CQE_STATUS_DMA_FAILED 0x5
-#define LPFC_MBX_WR_CONFIG_MAX_BDE 8
+#define LPFC_MBX_WR_CONFIG_MAX_BDE 1
struct lpfc_mbx_wr_object {
struct mbox_header header;
union {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 100bc4c8798d..2b7ea7e53e12 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1138,13 +1138,13 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
* be cleared by the worker thread after it has taken the event bitmap out.
**/
static void
-lpfc_hb_timeout(unsigned long ptr)
+lpfc_hb_timeout(struct timer_list *t)
{
struct lpfc_hba *phba;
uint32_t tmo_posted;
unsigned long iflag;
- phba = (struct lpfc_hba *)ptr;
+ phba = from_timer(phba, t, hb_tmofunc);
/* Check for heart beat timeout conditions */
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
@@ -1172,12 +1172,12 @@ lpfc_hb_timeout(unsigned long ptr)
* be cleared by the worker thread after it has taken the event bitmap out.
**/
static void
-lpfc_rrq_timeout(unsigned long ptr)
+lpfc_rrq_timeout(struct timer_list *t)
{
struct lpfc_hba *phba;
unsigned long iflag;
- phba = (struct lpfc_hba *)ptr;
+ phba = from_timer(phba, t, rrq_tmr);
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
if (!(phba->pport->load_flag & FC_UNLOADING))
phba->hba_flag |= HBA_RRQ_ACTIVE;
@@ -3216,6 +3216,9 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_destroy_vport_work_array(phba, vports);
lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
+
+ if (phba->wq)
+ flush_workqueue(phba->wq);
}
/**
@@ -3937,14 +3940,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
INIT_LIST_HEAD(&vport->rcv_buffer_list);
spin_lock_init(&vport->work_port_lock);
- setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
- (unsigned long)vport);
+ timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
- setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
- (unsigned long)vport);
+ timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
- setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
- (unsigned long)vport);
+ timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
@@ -4176,6 +4176,9 @@ void
lpfc_stop_port(struct lpfc_hba *phba)
{
phba->lpfc_stop_port(phba);
+
+ if (phba->wq)
+ flush_workqueue(phba->wq);
}
/**
@@ -4210,9 +4213,9 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
* worker thread context.
**/
static void
-lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
+lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
/* Don't send FCF rediscovery event if timer cancelled */
spin_lock_irq(&phba->hbalock);
@@ -5624,15 +5627,13 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->luns);
/* MBOX heartbeat timer */
- setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
+ timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
/* Fabric block timer */
- setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
- (unsigned long)phba);
+ timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
/* EA polling mode timer */
- setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
- (unsigned long)phba);
+ timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
/* Heartbeat timer */
- setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
+ timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
return 0;
}
@@ -5658,8 +5659,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
*/
/* FCP polling mode timer */
- setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
- (unsigned long)phba);
+ timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
/* Host attention work mask setup */
phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
@@ -5829,11 +5829,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* Initialize timers used by driver
*/
- setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
+ timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
/* FCF rediscover timer */
- setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
- (unsigned long)phba);
+ timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
/*
* Control structure for handling external multi-buffer mailbox
@@ -6370,6 +6369,9 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
return error;
}
+ /* workqueue for deferred irq use */
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+
return 0;
}
@@ -6384,6 +6386,12 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
static void
lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
{
+ if (phba->wq) {
+ flush_workqueue(phba->wq);
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
+ }
+
/* Stop kernel worker thread */
kthread_stop(phba->worker_thread);
}
@@ -11404,6 +11412,13 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
/* Remove FC host and then SCSI host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
+ /*
+ * Bring down the SLI Layer. This step disables all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA FCoE function.
+ */
+ lpfc_debugfs_terminate(vport);
+ lpfc_sli4_hba_unset(phba);
/* Perform ndlp cleanup on the physical port. The nvme and nvmet
* localports are destroyed after to cleanup all transport memory.
@@ -11412,14 +11427,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
lpfc_nvmet_destroy_targetport(phba);
lpfc_nvme_destroy_localport(vport);
- /*
- * Bring down the SLI Layer. This step disables all interrupts,
- * clears the rings, discards all mailbox commands, and resets
- * the HBA FCoE function.
- */
- lpfc_debugfs_terminate(vport);
- lpfc_sli4_hba_unset(phba);
+ lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock);
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index f3ad7cac355d..b6957d944b9a 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -216,7 +216,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
pring = lpfc_phba_elsring(phba);
/* In case of error recovery path, we might have a NULL pring here */
- if (!pring)
+ if (unlikely(!pring))
return;
/* Abort outstanding I/O on NPort <nlp_DID> */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 23bdb1ca106e..517ae570e507 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -416,6 +416,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -667,15 +670,17 @@ lpfc_nvme_ktime(struct lpfc_hba *phba,
struct lpfc_nvme_buf *lpfc_ncmd)
{
uint64_t seg1, seg2, seg3, seg4;
+ uint64_t segsum;
- if (!phba->ktime_on)
- return;
if (!lpfc_ncmd->ts_last_cmd ||
!lpfc_ncmd->ts_cmd_start ||
!lpfc_ncmd->ts_cmd_wqput ||
!lpfc_ncmd->ts_isr_cmpl ||
!lpfc_ncmd->ts_data_nvme)
return;
+
+ if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
+ return;
if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
return;
if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
@@ -695,15 +700,23 @@ lpfc_nvme_ktime(struct lpfc_hba *phba,
* cmpl is handled off to the NVME Layer.
*/
seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
- if (seg1 > 5000000) /* 5 ms - for sequential IOs */
- return;
+ if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
+ seg1 = 0;
/* Calculate times relative to start of IO */
seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
- seg3 = (lpfc_ncmd->ts_isr_cmpl -
- lpfc_ncmd->ts_cmd_start) - seg2;
- seg4 = (lpfc_ncmd->ts_data_nvme -
- lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
+ segsum = seg2;
+ seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
+ if (segsum > seg3)
+ return;
+ seg3 -= segsum;
+ segsum += seg3;
+
+ seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
+ if (segsum > seg4)
+ return;
+ seg4 -= segsum;
+
phba->ktime_data_samples++;
phba->ktime_seg1_total += seg1;
if (seg1 < phba->ktime_seg1_min)
@@ -840,7 +853,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
} else {
lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
LPFC_IOCB_STATUS_MASK);
- lpfc_ncmd->result = wcqe->parameter;
+ lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
/* For NVME, the only failure path that results in an
* IO error is when the adapter rejects it. All other
@@ -874,9 +887,20 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_ncmd->status, lpfc_ncmd->result,
wcqe->total_data_placed);
break;
+ case IOSTAT_LOCAL_REJECT:
+ /* Let fall through to set command final state. */
+ if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NVME_IOERR,
+ "6032 Delay Aborted cmd %p "
+ "nvme cmd %p, xri x%x, "
+ "xb %d\n",
+ lpfc_ncmd, nCmd,
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ bf_get(lpfc_wcqe_c_xb, wcqe));
default:
out_err:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6072 NVME Completion Error: xri %x "
"status x%x result x%x placed x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -902,7 +926,7 @@ out_err:
* owns the dma address.
*/
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (lpfc_ncmd->ts_cmd_start) {
lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
lpfc_ncmd->ts_data_nvme = ktime_get_ns();
phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
@@ -920,12 +944,18 @@ out_err:
#endif
freqpriv = nCmd->private;
freqpriv->nvme_buf = NULL;
- nCmd->done(nCmd);
+
+ /* NVME targets need completion held off until the abort exchange
+ * completes.
+ */
+ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
+ nCmd->done(nCmd);
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_ncmd->nrport = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* Call release with XB=1 to queue the IO into the abort list. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
}
@@ -1119,12 +1149,12 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
+ if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
"nvmeIO sg_cnt %d\n",
- phba->cfg_nvme_seg_cnt,
+ phba->cfg_nvme_seg_cnt + 1,
lpfc_ncmd->seg_cnt);
lpfc_ncmd->seg_cnt = 0;
return 1;
@@ -1225,6 +1255,21 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING) {
+ ret = -ENODEV;
+ goto out_fail;
+ }
+
+ /* Validate pointers. */
+ if (!pnvme_lport || !pnvme_rport || !freqpriv) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
+ "6117 No Send:IO submit ptrs NULL, lport %p, "
+ "rport %p fcreq_priv %p\n",
+ pnvme_lport, pnvme_rport, freqpriv);
+ ret = -ENODEV;
+ goto out_fail;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on)
start = ktime_get_ns();
@@ -1283,9 +1328,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (start) {
lpfc_ncmd->ts_cmd_start = start;
lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
+ } else {
+ lpfc_ncmd->ts_cmd_start = 0;
}
#endif
@@ -1327,7 +1374,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_dec(&ndlp->cmd_pending);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 FCP could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
ret, vport->fc_myDID, ndlp->nlp_DID,
@@ -1336,7 +1383,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
+ if (lpfc_ncmd->ts_cmd_start)
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
@@ -1387,7 +1434,7 @@ void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6145 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"req_tag x%x, status x%x, hwstatus x%x\n",
@@ -1938,14 +1985,13 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
* pci bus space for an I/O. The DMA buffer includes the
* number of SGE's necessary to support the sg_tablesize.
*/
- lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
- GFP_KERNEL,
- &lpfc_ncmd->dma_handle);
+ lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_KERNEL,
+ &lpfc_ncmd->dma_handle);
if (!lpfc_ncmd->data) {
kfree(lpfc_ncmd);
break;
}
- memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
@@ -2042,9 +2088,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
- if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_ncmd->cur_iocbq.sli4_lxritag))
- continue;
list_del_init(&lpfc_ncmd->list);
found = 1;
break;
@@ -2057,9 +2100,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
spin_unlock(&phba->nvme_buf_list_put_lock);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
- if (lpfc_test_rrq_active(
- phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
- continue;
list_del_init(&lpfc_ncmd->list);
found = 1;
break;
@@ -2096,7 +2136,6 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
iflag);
- lpfc_ncmd->nvmeCmd = NULL;
list_add_tail(&lpfc_ncmd->list,
&phba->sli4_hba.lpfc_abts_nvme_buf_list);
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
@@ -2296,6 +2335,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
+ struct lpfc_nodelist *prev_ndlp;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
"6006 Register NVME PORT. DID x%06x nlptype x%x\n",
@@ -2332,7 +2372,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* new rport.
*/
rport = remote_port->private;
- if (ndlp->nrport == rport) {
+ if (ndlp->nrport) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
"6014 Rebinding lport to "
@@ -2343,24 +2383,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
remote_port->port_role,
ndlp->nlp_type,
ndlp->nlp_DID);
- } else {
- /* New rport. */
- rport->remoteport = remote_port;
- rport->lport = lport;
- rport->ndlp = lpfc_nlp_get(ndlp);
- if (!rport->ndlp)
- return -1;
- ndlp->nrport = rport;
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_NVME_DISC | LOG_NODE,
- "6022 Binding new rport to "
- "lport %p Rport WWNN 0x%llx, "
- "Rport WWPN 0x%llx DID "
- "x%06x Role x%x\n",
- lport,
- rpinfo.node_name, rpinfo.port_name,
- rpinfo.port_id, rpinfo.port_role);
+ prev_ndlp = rport->ndlp;
+
+ /* Sever the ndlp<->rport connection before dropping
+ * the ndlp ref from register.
+ */
+ ndlp->nrport = NULL;
+ rport->ndlp = NULL;
+ if (prev_ndlp)
+ lpfc_nlp_put(ndlp);
}
+
+ /* Clean bind the rport to the ndlp. */
+ rport->remoteport = remote_port;
+ rport->lport = lport;
+ rport->ndlp = lpfc_nlp_get(ndlp);
+ if (!rport->ndlp)
+ return -1;
+ ndlp->nrport = rport;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NVME_DISC | LOG_NODE,
+ "6022 Binding new rport to "
+ "lport %p Rport WWNN 0x%llx, "
+ "Rport WWPN 0x%llx DID "
+ "x%06x Role x%x\n",
+ lport,
+ rpinfo.node_name, rpinfo.port_name,
+ rpinfo.port_id, rpinfo.port_role);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2454,18 +2503,18 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* @axri: pointer to the fcp xri abort wcqe structure.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
- * FCP aborted xri.
+ * NVME aborted xri. Aborted NVME IO commands are completed to the transport
+ * here.
**/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+ struct nvmefc_fcp_req *nvme_cmd = NULL;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
- int rrq_empty = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
@@ -2481,25 +2530,24 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
spin_unlock(
&phba->sli4_hba.abts_nvme_buf_list_lock);
- rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_ncmd->ndlp;
- if (ndlp) {
- lpfc_set_rrq_active(
- phba, ndlp,
- lpfc_ncmd->cur_iocbq.sli4_lxritag,
- rxid, 1);
+ if (ndlp)
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
- }
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6311 XRI Aborted xri x%x tag x%x "
- "released\n",
- xri, lpfc_ncmd->cur_iocbq.iotag);
-
+ "6311 nvme_cmd %p xri x%x tag x%x "
+ "abort complete and xri released\n",
+ lpfc_ncmd->nvmeCmd, xri,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
- if (rrq_empty)
- lpfc_worker_wake_up(phba);
return;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 0b7c1a49e203..84cf1b9079f7 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -76,7 +76,7 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
{
unsigned long iflag;
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6313 NVMET Defer ctx release xri x%x flg x%x\n",
ctxp->oxid, ctxp->flag);
@@ -221,9 +221,8 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (ctxp->ts_cmd_nvme) {
ctxp->ts_cmd_nvme = ktime_get_ns();
- ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
ctxp->ts_nvme_data = 0;
ctxp->ts_data_wqput = 0;
ctxp->ts_isr_data = 0;
@@ -289,9 +288,7 @@ lpfc_nvmet_ktime(struct lpfc_hba *phba,
{
uint64_t seg1, seg2, seg3, seg4, seg5;
uint64_t seg6, seg7, seg8, seg9, seg10;
-
- if (!phba->ktime_on)
- return;
+ uint64_t segsum;
if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
!ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
@@ -300,6 +297,8 @@ lpfc_nvmet_ktime(struct lpfc_hba *phba,
!ctxp->ts_isr_status || !ctxp->ts_status_nvme)
return;
+ if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
+ return;
if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
return;
if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
@@ -344,34 +343,66 @@ lpfc_nvmet_ktime(struct lpfc_hba *phba,
* (Segments 1 thru 4) for READDATA_RSP
*/
seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
- seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
- seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
- seg1 - seg2;
- seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3;
- seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 - seg4;
+ segsum = seg1;
+
+ seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
+ if (segsum > seg2)
+ return;
+ seg2 -= segsum;
+ segsum += seg2;
+
+ seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
+ if (segsum > seg3)
+ return;
+ seg3 -= segsum;
+ segsum += seg3;
+
+ seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
+ if (segsum > seg4)
+ return;
+ seg4 -= segsum;
+ segsum += seg4;
+
+ seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
+ if (segsum > seg5)
+ return;
+ seg5 -= segsum;
+ segsum += seg5;
+
/* For auto rsp commands seg6 thru seg10 will be 0 */
if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
- seg6 = (ctxp->ts_nvme_status -
- ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 - seg4 - seg5;
- seg7 = (ctxp->ts_status_wqput -
- ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 -
- seg4 - seg5 - seg6;
- seg8 = (ctxp->ts_isr_status -
- ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 - seg4 -
- seg5 - seg6 - seg7;
- seg9 = (ctxp->ts_status_nvme -
- ctxp->ts_isr_cmd) -
- seg1 - seg2 - seg3 - seg4 -
- seg5 - seg6 - seg7 - seg8;
+ seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
+ if (segsum > seg6)
+ return;
+ seg6 -= segsum;
+ segsum += seg6;
+
+ seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
+ if (segsum > seg7)
+ return;
+ seg7 -= segsum;
+ segsum += seg7;
+
+ seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
+ if (segsum > seg8)
+ return;
+ seg8 -= segsum;
+ segsum += seg8;
+
+ seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
+ if (segsum > seg9)
+ return;
+ seg9 -= segsum;
+ segsum += seg9;
+
+ if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
+ return;
seg10 = (ctxp->ts_isr_status -
ctxp->ts_isr_cmd);
} else {
+ if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
+ return;
seg6 = 0;
seg7 = 0;
seg8 = 0;
@@ -463,7 +494,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
struct lpfc_nvmet_rcv_ctx *ctxp;
- uint32_t status, result, op, start_clean;
+ uint32_t status, result, op, start_clean, logerr;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t id;
#endif
@@ -491,17 +522,21 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (tgtp)
atomic_inc(&tgtp->xmt_fcp_rsp_error);
+ logerr = LOG_NVME_IOERR;
+
/* pick up SLI4 exhange busy condition */
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
ctxp->flag |= LPFC_NVMET_XBUSY;
+ logerr |= LOG_NVME_ABTS;
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
- ctxp->oxid, status, result);
} else {
ctxp->flag &= ~LPFC_NVMET_XBUSY;
}
+ lpfc_printf_log(phba, KERN_INFO, logerr,
+ "6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
+ ctxp->oxid, status, result, ctxp->flag);
+
} else {
rsp->fcp_error = NVME_SC_SUCCESS;
if (op == NVMET_FCOP_RSP)
@@ -519,7 +554,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ctxp->entry_cnt++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_READDATA_RSP) {
ctxp->ts_isr_data =
cmdwqe->isr_timestamp;
@@ -553,7 +588,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
#endif
rsp->done(rsp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
+ if (ctxp->ts_cmd_nvme)
lpfc_nvmet_ktime(phba, ctxp);
#endif
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
@@ -563,7 +598,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
memset(((char *)cmdwqe) + start_clean, 0,
(sizeof(struct lpfc_iocbq) - start_clean));
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (ctxp->ts_cmd_nvme) {
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
ctxp->ts_data_nvme = ktime_get_ns();
}
@@ -597,6 +632,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
struct ulp_bde64 bpl;
int rc;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
@@ -678,8 +716,13 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
struct lpfc_iocbq *nvmewqeq;
int rc;
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ rc = -ENODEV;
+ goto aerr;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
ctxp->ts_nvme_status = ktime_get_ns();
else
@@ -734,7 +777,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
if (rc == WQE_SUCCESS) {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (!phba->ktime_on)
+ if (!ctxp->ts_cmd_nvme)
return 0;
if (rsp->op == NVMET_FCOP_RSP)
ctxp->ts_status_wqput = ktime_get_ns();
@@ -777,6 +820,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct lpfc_hba *phba = ctxp->phba;
unsigned long flags;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@@ -787,6 +833,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
spin_lock_irqsave(&ctxp->ctxlock, flags);
+ ctxp->state = LPFC_NVMET_STE_ABORT;
/* Since iaab/iaar are NOT set, we need to check
* if the firmware is in process of aborting IO
@@ -1125,9 +1172,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
}
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
- lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
- NVMET_FCTGTFEAT_CMD_IN_ISR |
- NVMET_FCTGTFEAT_OPDONE_IN_ISR;
+ lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
@@ -1138,9 +1183,14 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
#endif
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
- "6025 Cannot register NVME targetport "
- "x%x\n", error);
+ "6025 Cannot register NVME targetport x%x: "
+ "portnm %llx nodenm %llx segs %d qs %d\n",
+ error,
+ pinfo.port_name, pinfo.node_name,
+ lpfc_tgttemplate.max_sgl_segments,
+ lpfc_tgttemplate.max_hw_queues);
phba->targetport = NULL;
+ phba->nvmet_support = 0;
lpfc_nvmet_cleanup_io_context(phba);
@@ -1152,9 +1202,11 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6026 Registered NVME "
"targetport: %p, private %p "
- "portnm %llx nodenm %llx\n",
+ "portnm %llx nodenm %llx segs %d qs %d\n",
phba->targetport, tgtp,
- pinfo.port_name, pinfo.node_name);
+ pinfo.port_name, pinfo.node_name,
+ lpfc_tgttemplate.max_sgl_segments,
+ lpfc_tgttemplate.max_hw_queues);
atomic_set(&tgtp->rcv_ls_req_in, 0);
atomic_set(&tgtp->rcv_ls_req_out, 0);
@@ -1457,6 +1509,7 @@ static struct lpfc_nvmet_ctxbuf *
lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
struct lpfc_nvmet_ctx_info *current_infop)
{
+#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
struct lpfc_nvmet_ctx_info *get_infop;
int i;
@@ -1504,6 +1557,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
get_infop = get_infop->nvmet_ctx_next_cpu;
}
+#endif
/* Nothing found, all contexts for the MRQ are in-flight */
return NULL;
}
@@ -1631,7 +1685,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (isr_timestamp) {
ctxp->ts_isr_cmd = isr_timestamp;
ctxp->ts_cmd_nvme = ktime_get_ns();
ctxp->ts_nvme_data = 0;
@@ -1642,6 +1696,8 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->ts_status_wqput = 0;
ctxp->ts_isr_status = 0;
ctxp->ts_status_nvme = 0;
+ } else {
+ ctxp->ts_cmd_nvme = 0;
}
#endif
@@ -2320,7 +2376,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
atomic_inc(&tgtp->xmt_abort_rsp);
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6165 ABORT cmpl: xri x%x flg x%x (%d) "
"WCQE: %08x %08x %08x %08x\n",
ctxp->oxid, ctxp->flag, released,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1a6f122bb25d..c0cdaef4db24 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4501,9 +4501,9 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
* and FCP Ring interrupt is disable.
**/
-void lpfc_poll_timeout(unsigned long ptr)
+void lpfc_poll_timeout(struct timer_list *t)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8b119f87b51d..aecd2399005d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -80,8 +80,8 @@ static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_cqe *);
static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int);
-static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
- struct lpfc_eqe *eqe, uint32_t qidx);
+static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
+ struct lpfc_eqe *eqe, uint32_t qidx);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
@@ -2732,7 +2732,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*
* This function looks up the iocb_lookup table to get the command iocb
* corresponding to the given response iocb using the iotag of the
- * response iocb. This function is called with the hbalock held.
+ * response iocb. This function is called with the hbalock held
+ * for sli3 devices or the ring_lock for sli4 devices.
* This function returns the command iocb object if it finds the command
* iocb else returns NULL.
**/
@@ -2828,9 +2829,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
unsigned long iflag;
/* Based on the iotag field, get the cmd IOCB from the txcmplq */
- spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ else
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
if (cmdiocbp) {
if (cmdiocbp->iocb_cmpl) {
@@ -3004,13 +3011,13 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* and wake up worker thread to process it. Otherwise, it will set up the
* Error Attention polling timer for the next poll.
**/
-void lpfc_poll_eratt(unsigned long ptr)
+void lpfc_poll_eratt(struct timer_list *t)
{
struct lpfc_hba *phba;
uint32_t eratt = 0;
uint64_t sli_intr, cnt;
- phba = (struct lpfc_hba *)ptr;
+ phba = from_timer(phba, t, eratt_poll);
/* Here we will also keep track of interrupts per sec of the hba */
sli_intr = phba->sli.slistat.sli_intr;
@@ -7167,9 +7174,9 @@ out_free_mbox:
* done by the worker thread function lpfc_mbox_timeout_handler.
**/
void
-lpfc_mbox_timeout(unsigned long ptr)
+lpfc_mbox_timeout(struct timer_list *t)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
unsigned long iflag;
uint32_t tmo_posted;
@@ -9396,10 +9403,13 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
* for abort iocb hba_wqidx should already
* be setup based on what work queue we used.
*/
- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
+ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
piocb->hba_wqidx =
lpfc_sli4_scmd_to_wqidx_distr(phba,
piocb->context1);
+ piocb->hba_wqidx = piocb->hba_wqidx %
+ phba->cfg_fcp_io_channel;
+ }
return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
} else {
if (unlikely(!phba->sli4_hba.oas_wq))
@@ -10632,6 +10642,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
+ if (!pring) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ goto abort_iotag_exit;
+ }
+
/*
* If we're unloading, don't abort iocb on the ELS ring, but change
* the callback so that nothing happens when it finishes.
@@ -12500,6 +12518,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
unsigned long iflags;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return NULL;
wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -12507,19 +12527,21 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- /* Put the iocb back on the txcmplq */
- lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
-
if (unlikely(!cmdiocbq)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0386 ELS complete with no corresponding "
- "cmdiocb: iotag (%d)\n",
- bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
+ wcqe->word0, wcqe->total_data_placed,
+ wcqe->parameter, wcqe->word3);
lpfc_sli_release_iocbq(phba, irspiocbq);
return NULL;
}
+ /* Put the iocb back on the txcmplq */
+ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
/* Fake the irspiocbq and copy necessary response information */
lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
@@ -13010,14 +13032,11 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* completion queue, and then return.
*
**/
-static int
+static void
lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
struct lpfc_queue *speq)
{
struct lpfc_queue *cq = NULL, *childq;
- struct lpfc_cqe *cqe;
- bool workposted = false;
- int ecount = 0;
uint16_t cqid;
/* Get the reference to the corresponding CQ */
@@ -13034,48 +13053,84 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0365 Slow-path CQ identifier "
"(%d) does not exist\n", cqid);
- return 0;
+ return;
}
/* Save EQ associated with this CQ */
cq->assoc_qp = speq;
+ if (!queue_work(phba->wq, &cq->spwork))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0390 Cannot schedule soft IRQ "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id, smp_processor_id());
+}
+
+/**
+ * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine process a event queue entry from the slow-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on that completion queue, rearm the
+ * completion queue, and then return.
+ *
+ **/
+static void
+lpfc_sli4_sp_process_cq(struct work_struct *work)
+{
+ struct lpfc_queue *cq =
+ container_of(work, struct lpfc_queue, spwork);
+ struct lpfc_hba *phba = cq->phba;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ int ccount = 0;
+
/* Process all the entries to the CQ */
switch (cq->type) {
case LPFC_MCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
- if (!(++ecount % cq->entry_repost))
+ if (!(++ccount % cq->entry_repost))
break;
cq->CQ_mbox++;
}
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- if ((cq->subtype == LPFC_FCP) ||
- (cq->subtype == LPFC_NVME))
+ if (cq->subtype == LPFC_FCP ||
+ cq->subtype == LPFC_NVME) {
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ cq->isr_timestamp = ktime_get_ns();
+ else
+ cq->isr_timestamp = 0;
+#endif
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
cqe);
- else
+ } else {
workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
cqe);
- if (!(++ecount % cq->entry_repost))
+ }
+ if (!(++ccount % cq->entry_repost))
break;
}
/* Track the max number of CQEs processed in 1 EQ */
- if (ecount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ecount;
+ if (ccount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ccount;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0370 Invalid completion queue type (%d)\n",
cq->type);
- return 0;
+ return;
}
/* Catch the no cq entry condition, log an error */
- if (unlikely(ecount == 0))
+ if (unlikely(ccount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0371 No entry from the CQ: identifier "
"(x%x), type (%d)\n", cq->queue_id, cq->type);
@@ -13086,8 +13141,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
-
- return ecount;
}
/**
@@ -13143,11 +13196,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
return;
}
-
- if (cq->assoc_qp)
- cmdiocbq->isr_timestamp =
- cq->assoc_qp->isr_timestamp;
-
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ cmdiocbq->isr_timestamp = cq->isr_timestamp;
+#endif
if (cmdiocbq->iocb_cmpl == NULL) {
if (cmdiocbq->wqe_cmpl) {
if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
@@ -13292,7 +13343,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
lpfc_nvmet_unsol_fcp_event(
phba, idx, dma_buf,
- cq->assoc_qp->isr_timestamp);
+ cq->isr_timestamp);
return false;
}
drop:
@@ -13395,15 +13446,12 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* queue and process all the entries on the completion queue, rearm the
* completion queue, and then return.
**/
-static int
+static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint32_t qidx)
{
struct lpfc_queue *cq = NULL;
- struct lpfc_cqe *cqe;
- bool workposted = false;
uint16_t cqid, id;
- int ecount = 0;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -13411,7 +13459,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
"event: majorcode=x%x, minorcode=x%x\n",
bf_get_le32(lpfc_eqe_major_code, eqe),
bf_get_le32(lpfc_eqe_minor_code, eqe));
- return 0;
+ return;
}
/* Get the reference to the corresponding CQ */
@@ -13448,9 +13496,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* Otherwise this is a Slow path event */
if (cq == NULL) {
- ecount = lpfc_sli4_sp_handle_eqe(phba, eqe,
- phba->sli4_hba.hba_eq[qidx]);
- return ecount;
+ lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
+ return;
}
process_cq:
@@ -13459,26 +13506,61 @@ process_cq:
"0368 Miss-matched fast-path completion "
"queue identifier: eqcqid=%d, fcpcqid=%d\n",
cqid, cq->queue_id);
- return 0;
+ return;
}
/* Save EQ associated with this CQ */
cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
+ if (!queue_work(phba->wq, &cq->irqwork))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0363 Cannot schedule soft IRQ "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id, smp_processor_id());
+}
+
+/**
+ * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the fast-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on the completion queue, rearm the
+ * completion queue, and then return.
+ **/
+static void
+lpfc_sli4_hba_process_cq(struct work_struct *work)
+{
+ struct lpfc_queue *cq =
+ container_of(work, struct lpfc_queue, irqwork);
+ struct lpfc_hba *phba = cq->phba;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ int ccount = 0;
+
/* Process all the entries to the CQ */
while ((cqe = lpfc_sli4_cq_get(cq))) {
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ cq->isr_timestamp = ktime_get_ns();
+ else
+ cq->isr_timestamp = 0;
+#endif
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
- if (!(++ecount % cq->entry_repost))
+ if (!(++ccount % cq->entry_repost))
break;
}
/* Track the max number of CQEs processed in 1 EQ */
- if (ecount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ecount;
- cq->assoc_qp->EQ_cqe_cnt += ecount;
+ if (ccount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ccount;
+ cq->assoc_qp->EQ_cqe_cnt += ccount;
/* Catch the no cq entry condition */
- if (unlikely(ecount == 0))
+ if (unlikely(ccount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0369 No entry from fast-path completion "
"queue fcpcqid=%d\n", cq->queue_id);
@@ -13489,8 +13571,6 @@ process_cq:
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
-
- return ecount;
}
static void
@@ -13524,10 +13604,7 @@ static void
lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
{
struct lpfc_queue *cq;
- struct lpfc_cqe *cqe;
- bool workposted = false;
uint16_t cqid;
- int ecount = 0;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -13562,30 +13639,12 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/* Save EQ associated with this CQ */
cq->assoc_qp = phba->sli4_hba.fof_eq;
- /* Process all the entries to the OAS CQ */
- while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
- if (!(++ecount % cq->entry_repost))
- break;
- }
-
- /* Track the max number of CQEs processed in 1 EQ */
- if (ecount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ecount;
- cq->assoc_qp->EQ_cqe_cnt += ecount;
-
- /* Catch the no cq entry condition */
- if (unlikely(ecount == 0))
+ /* CQ work will be processed on CPU affinitized to this IRQ */
+ if (!queue_work(phba->wq, &cq->irqwork))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "9153 No entry from fast-path completion "
- "queue fcpcqid=%d\n", cq->queue_id);
-
- /* In any case, flash and re-arm the CQ */
- lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
-
- /* wake up worker thread if there are works to be done */
- if (workposted)
- lpfc_worker_wake_up(phba);
+ "0367 Cannot schedule soft IRQ "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id, smp_processor_id());
}
/**
@@ -13711,7 +13770,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
- int ccount = 0;
int hba_eqidx;
/* Get the driver's phba structure from the dev_id */
@@ -13729,11 +13787,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
if (unlikely(!fpeq))
return IRQ_NONE;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
- fpeq->isr_timestamp = ktime_get_ns();
-#endif
-
if (lpfc_fcp_look_ahead) {
if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
lpfc_sli4_eq_clr_intr(fpeq);
@@ -13760,12 +13813,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
* Process all the event on FCP fast-path EQ
*/
while ((eqe = lpfc_sli4_eq_get(fpeq))) {
- if (eqe == NULL)
- break;
-
- ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
- if (!(++ecount % fpeq->entry_repost) ||
- ccount > LPFC_MAX_ISR_CQE)
+ lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
+ if (!(++ecount % fpeq->entry_repost))
break;
fpeq->EQ_processed++;
}
@@ -13948,6 +13997,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
queue->entry_size = entry_size;
queue->entry_count = entry_count;
queue->phba = phba;
+ INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
+ INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
/* entry_repost will be set during q creation */
@@ -17137,7 +17188,8 @@ exit:
if (pcmd && pcmd->virt)
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
kfree(pcmd);
- lpfc_sli_release_iocbq(phba, iocbq);
+ if (iocbq)
+ lpfc_sli_release_iocbq(phba, iocbq);
lpfc_in_buf_free(phba, &dmabuf->dbuf);
}
@@ -18691,6 +18743,8 @@ lpfc_drain_txq(struct lpfc_hba *phba)
uint32_t txq_cnt = 0;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return 0;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_for_each_entry(piocbq, &pring->txq, list) {
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 60200385fe00..13b8f4d4da34 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -158,7 +158,6 @@ struct lpfc_queue {
#define LPFC_MQ_REPOST 8
#define LPFC_CQ_REPOST 64
#define LPFC_RQ_REPOST 64
-#define LPFC_MAX_ISR_CQE 64
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
@@ -202,6 +201,9 @@ struct lpfc_queue {
#define RQ_buf_posted q_cnt_3
#define RQ_rcv_buf q_cnt_4
+ struct work_struct irqwork;
+ struct work_struct spwork;
+
uint64_t isr_timestamp;
struct lpfc_queue *assoc_qp;
union sli4_qe qe[1]; /* array to index entries (must be last) */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6aa192b3e4bf..e0181371af09 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.4.0.3"
+#define LPFC_DRIVER_VERSION "11.4.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index c714482bf4c5..c9d33b1268cb 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -313,6 +313,15 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
goto error_out;
}
+ /* NPIV is not supported if HBA has NVME enabled */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+ "3189 Create VPORT failed: "
+ "NPIV is not supported on NVME\n");
+ rc = VPORT_INVAL;
+ goto error_out;
+ }
+
vpi = lpfc_alloc_vpi(phba);
if (vpi == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
diff --git a/drivers/scsi/mac53c94.h b/drivers/scsi/mac53c94.h
index 1ad24e4f0a85..5df6e81f78a8 100644
--- a/drivers/scsi/mac53c94.h
+++ b/drivers/scsi/mac53c94.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mac53c94.h: definitions for the driver for the 53c94 SCSI bus adaptor
* found on Power Macintosh computers, controlling the external SCSI chain.
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 508d65e5a518..21eba2fd465a 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MEGARAID_H__
#define __MEGARAID_H__
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index 5826ed509e3e..6e74d21227a5 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index 05f6e4ec3453..eedcbde46459 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/semaphore.h>
+#include <linux/timer.h>
#include "mbox_defs.h"
@@ -153,6 +154,11 @@ typedef struct uioc {
} __attribute__ ((aligned(1024),packed)) uioc_t;
+/* For on-stack uioc timers. */
+struct uioc_timeout {
+ struct timer_list timer;
+ uioc_t *uioc;
+};
/**
* struct mraid_hba_info - information about the controller
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index ec3c43854978..530358cdcb39 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -3904,19 +3904,19 @@ megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
wake_up(&raid_dev->sysfs_wait_q);
}
-
/**
* megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
- * @data : timed out packet
+ * @t : timed out timer
*
* Timeout routine to recover and return to application, in case the adapter
* has stopped responding. A timeout of 60 seconds for this command seems like
* a good value.
*/
static void
-megaraid_sysfs_get_ldmap_timeout(unsigned long data)
+megaraid_sysfs_get_ldmap_timeout(struct timer_list *t)
{
- uioc_t *uioc = (uioc_t *)data;
+ struct uioc_timeout *timeout = from_timer(timeout, t, timer);
+ uioc_t *uioc = timeout->uioc;
adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
@@ -3951,8 +3951,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
mbox64_t *mbox64;
mbox_t *mbox;
char *raw_mbox;
- struct timer_list sysfs_timer;
- struct timer_list *timerp;
+ struct uioc_timeout timeout;
caddr_t ldmap;
int rval = 0;
@@ -3988,14 +3987,12 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
/*
* Setup a timer to recover from a non-responding controller
*/
- timerp = &sysfs_timer;
- init_timer(timerp);
-
- timerp->function = megaraid_sysfs_get_ldmap_timeout;
- timerp->data = (unsigned long)uioc;
- timerp->expires = jiffies + 60 * HZ;
+ timeout.uioc = uioc;
+ timer_setup_on_stack(&timeout.timer,
+ megaraid_sysfs_get_ldmap_timeout, 0);
- add_timer(timerp);
+ timeout.timer.expires = jiffies + 60 * HZ;
+ add_timer(&timeout.timer);
/*
* Send the command to the firmware
@@ -4033,7 +4030,8 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
}
- del_timer_sync(timerp);
+ del_timer_sync(&timeout.timer);
+ destroy_timer_on_stack(&timeout.timer);
mutex_unlock(&raid_dev->sysfs_mtx);
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 65b6f6ace3a5..bb802b0c12b8 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -35,7 +35,7 @@ static int kioc_to_mimd(uioc_t *, mimd_t __user *);
static int handle_drvrcmd(void __user *, uint8_t, int *);
static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
static void ioctl_done(uioc_t *);
-static void lld_timedout(unsigned long);
+static void lld_timedout(struct timer_list *);
static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
@@ -686,8 +686,7 @@ static int
lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
{
int rval;
- struct timer_list timer;
- struct timer_list *tp = NULL;
+ struct uioc_timeout timeout = { };
kioc->status = -ENODATA;
rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
@@ -698,14 +697,12 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
* Start the timer
*/
if (adp->timeout > 0) {
- tp = &timer;
- init_timer(tp);
+ timeout.uioc = kioc;
+ timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
- tp->function = lld_timedout;
- tp->data = (unsigned long)kioc;
- tp->expires = jiffies + adp->timeout * HZ;
+ timeout.timer.expires = jiffies + adp->timeout * HZ;
- add_timer(tp);
+ add_timer(&timeout.timer);
}
/*
@@ -713,8 +710,9 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
* call, the ioctl either completed successfully or timedout.
*/
wait_event(wait_q, (kioc->status != -ENODATA));
- if (tp) {
- del_timer_sync(tp);
+ if (timeout.timer.function) {
+ del_timer_sync(&timeout.timer);
+ destroy_timer_on_stack(&timeout.timer);
}
/*
@@ -783,12 +781,13 @@ ioctl_done(uioc_t *kioc)
/**
* lld_timedout - callback from the expired timer
- * @ptr : ioctl packet that timed out
+ * @t : timer that timed out
*/
static void
-lld_timedout(unsigned long ptr)
+lld_timedout(struct timer_list *t)
{
- uioc_t *kioc = (uioc_t *)ptr;
+ struct uioc_timeout *timeout = from_timer(timeout, t, timer);
+ uioc_t *kioc = timeout->uioc;
kioc->status = -ETIME;
kioc->timedout = 1;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a6722c93a295..f5a36ccb8606 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.702.06.00-rc1"
-#define MEGASAS_RELDATE "June 21, 2017"
+#define MEGASAS_VERSION "07.703.05.00-rc1"
+#define MEGASAS_RELDATE "October 5, 2017"
/*
* Device IDs
@@ -57,6 +57,7 @@
#define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052
#define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053
#define PCI_DEVICE_ID_LSI_VENTURA 0x0014
+#define PCI_DEVICE_ID_LSI_CRUSADER 0x0015
#define PCI_DEVICE_ID_LSI_HARPOON 0x0016
#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017
#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B
@@ -186,16 +187,19 @@
/*
* MFI command opcodes
*/
-#define MFI_CMD_INIT 0x00
-#define MFI_CMD_LD_READ 0x01
-#define MFI_CMD_LD_WRITE 0x02
-#define MFI_CMD_LD_SCSI_IO 0x03
-#define MFI_CMD_PD_SCSI_IO 0x04
-#define MFI_CMD_DCMD 0x05
-#define MFI_CMD_ABORT 0x06
-#define MFI_CMD_SMP 0x07
-#define MFI_CMD_STP 0x08
-#define MFI_CMD_INVALID 0xff
+enum MFI_CMD_OP {
+ MFI_CMD_INIT = 0x0,
+ MFI_CMD_LD_READ = 0x1,
+ MFI_CMD_LD_WRITE = 0x2,
+ MFI_CMD_LD_SCSI_IO = 0x3,
+ MFI_CMD_PD_SCSI_IO = 0x4,
+ MFI_CMD_DCMD = 0x5,
+ MFI_CMD_ABORT = 0x6,
+ MFI_CMD_SMP = 0x7,
+ MFI_CMD_STP = 0x8,
+ MFI_CMD_OP_COUNT,
+ MFI_CMD_INVALID = 0xff
+};
#define MR_DCMD_CTRL_GET_INFO 0x01010000
#define MR_DCMD_LD_GET_LIST 0x03010000
@@ -1504,6 +1508,15 @@ enum FW_BOOT_CONTEXT {
#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25)
+
+enum MR_ADAPTER_TYPE {
+ MFI_SERIES = 1,
+ THUNDERBOLT_SERIES = 2,
+ INVADER_SERIES = 3,
+ VENTURA_SERIES = 4,
+};
+
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
@@ -1617,7 +1630,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:19;
+ u32 reserved:18;
+ u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
u32 support_qd_throttling:1;
u32 support_fp_rlbypass:1;
@@ -1645,7 +1659,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_fp_rlbypass:1;
u32 support_qd_throttling:1;
u32 support_pd_map_target_id:1;
- u32 reserved:19;
+ u32 support_64bit_mode:1;
+ u32 reserved:18;
#endif
} mfi_capabilities;
__le32 reg;
@@ -2114,6 +2129,19 @@ struct megasas_instance {
u32 *crash_dump_buf;
dma_addr_t crash_dump_h;
+
+ struct MR_PD_LIST *pd_list_buf;
+ dma_addr_t pd_list_buf_h;
+
+ struct megasas_ctrl_info *ctrl_info_buf;
+ dma_addr_t ctrl_info_buf_h;
+
+ struct MR_LD_LIST *ld_list_buf;
+ dma_addr_t ld_list_buf_h;
+
+ struct MR_LD_TARGETID_LIST *ld_targetid_list_buf;
+ dma_addr_t ld_targetid_list_buf_h;
+
void *crash_buf[MAX_CRASH_DUMP_SIZE];
unsigned int fw_crash_buffer_size;
unsigned int fw_crash_state;
@@ -2210,8 +2238,6 @@ struct megasas_instance {
/* Ptr to hba specific information */
void *ctrl_context;
- u32 ctrl_context_pages;
- struct megasas_ctrl_info *ctrl_info;
unsigned int msix_vectors;
struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
u64 map_id;
@@ -2236,12 +2262,13 @@ struct megasas_instance {
bool dev_handle;
bool fw_sync_cache_support;
u32 mfi_frame_size;
- bool is_ventura;
bool msix_combined;
u16 max_raid_mapsize;
/* preffered count to send as LDIO irrspective of FP capable.*/
u8 r1_ldio_hint_default;
u32 nvme_page_size;
+ u8 adapter_type;
+ bool consistent_mask_64bit;
};
struct MR_LD_VF_MAP {
u32 size;
@@ -2488,4 +2515,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
u32 mega_mod64(u64 dividend, u32 divisor);
int megasas_alloc_fusion_context(struct megasas_instance *instance);
void megasas_free_fusion_context(struct megasas_instance *instance);
+void megasas_set_dma_settings(struct megasas_instance *instance,
+ struct megasas_dcmd_frame *dcmd,
+ dma_addr_t dma_addr, u32 dma_len);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e518dadc8161..cc54bdb5c712 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -161,6 +161,7 @@ static struct pci_device_id megasas_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
/* VENTURA */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
@@ -205,6 +206,43 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
void megasas_fusion_ocr_wq(struct work_struct *work);
static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
int initial);
+static int
+megasas_set_dma_mask(struct megasas_instance *instance);
+static int
+megasas_alloc_ctrl_mem(struct megasas_instance *instance);
+static inline void
+megasas_free_ctrl_mem(struct megasas_instance *instance);
+static inline int
+megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
+static inline void
+megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
+static inline void
+megasas_init_ctrl_params(struct megasas_instance *instance);
+
+/**
+ * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
+ * @instance: Adapter soft state
+ * @dcmd: DCMD frame inside MFI command
+ * @dma_addr: DMA address of buffer to be passed to FW
+ * @dma_len: Length of DMA buffer to be passed to FW
+ * @return: void
+ */
+void megasas_set_dma_settings(struct megasas_instance *instance,
+ struct megasas_dcmd_frame *dcmd,
+ dma_addr_t dma_addr, u32 dma_len)
+{
+ if (instance->consistent_mask_64bit) {
+ dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
+ dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
+ dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
+
+ } else {
+ dcmd->sgl.sge32[0].phys_addr =
+ cpu_to_le32(lower_32_bits(dma_addr));
+ dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
+ dcmd->flags = cpu_to_le16(dcmd->flags);
+ }
+}
void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -2023,7 +2061,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
msleep(1000);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context)) {
+ (instance->adapter_type != MFI_SERIES)) {
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
/* Flush */
readl(&instance->reg_set->doorbell);
@@ -2114,22 +2152,19 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
megasas_check_and_restore_queue_depth(instance);
}
+static void megasas_sriov_heartbeat_handler(struct timer_list *t);
+
/**
- * megasas_start_timer - Initializes a timer object
+ * megasas_start_timer - Initializes sriov heartbeat timer object
* @instance: Adapter soft state
- * @timer: timer object to be initialized
- * @fn: timer function
- * @interval: time interval between timer function call
*
*/
-void megasas_start_timer(struct megasas_instance *instance,
- struct timer_list *timer,
- void *fn, unsigned long interval)
-{
- init_timer(timer);
- timer->expires = jiffies + interval;
- timer->data = (unsigned long)instance;
- timer->function = fn;
+void megasas_start_timer(struct megasas_instance *instance)
+{
+ struct timer_list *timer = &instance->sriov_heartbeat_timer;
+
+ timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
+ timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
add_timer(timer);
}
@@ -2488,13 +2523,15 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
+
+ megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
+ sizeof(struct MR_CTRL_HB_HOST_MEM));
dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
instance->host->host_no);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
retval = megasas_issue_blocked_cmd(instance, cmd,
MEGASAS_ROUTINE_WAIT_TIME_VF);
else
@@ -2515,10 +2552,10 @@ out:
}
/* Handler for SR-IOV heartbeat */
-void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+static void megasas_sriov_heartbeat_handler(struct timer_list *t)
{
struct megasas_instance *instance =
- (struct megasas_instance *)instance_addr;
+ from_timer(instance, t, sriov_heartbeat_timer);
if (instance->hb_host_mem->HB.fwCounter !=
instance->hb_host_mem->HB.driverCounter) {
@@ -2790,7 +2827,9 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
/*
* First wait for all commands to complete
*/
- if (instance->ctrl_context) {
+ if (instance->adapter_type == MFI_SERIES) {
+ ret = megasas_generic_reset(scmd);
+ } else {
struct megasas_cmd_fusion *cmd;
cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
if (cmd)
@@ -2798,8 +2837,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
ret = megasas_reset_fusion(scmd->device->host,
SCSIIO_TIMEOUT_OCR);
- } else
- ret = megasas_generic_reset(scmd);
+ }
return ret;
}
@@ -2816,7 +2854,7 @@ static int megasas_task_abort(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
ret = megasas_task_abort_fusion(scmd);
else {
sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
@@ -2838,7 +2876,7 @@ static int megasas_reset_target(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
ret = megasas_reset_target_fusion(scmd);
else {
sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
@@ -3283,6 +3321,9 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
+ megasas_complete_int_cmd(instance, cmd);
+ break;
+
case MFI_CMD_DCMD:
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
/* Check for LD map update */
@@ -3369,6 +3410,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
default:
dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
hdr->cmd);
+ megasas_complete_int_cmd(instance, cmd);
break;
}
}
@@ -3715,7 +3757,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context))
+ (instance->adapter_type != MFI_SERIES))
writel(
MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
@@ -3733,7 +3775,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context))
+ (instance->adapter_type != MFI_SERIES))
writel(MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
else
@@ -3753,11 +3795,11 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context)) {
+ (instance->adapter_type != MFI_SERIES)) {
writel(MFI_RESET_FLAGS,
&instance->reg_set->doorbell);
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
for (i = 0; i < (10 * 1000); i += 20) {
if (readl(
&instance->
@@ -3924,7 +3966,8 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
* max_sge_sz = 12 byte (sizeof megasas_sge64)
* Total 192 byte (3 MFI frame of 64 byte)
*/
- frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
+ frame_count = (instance->adapter_type == MFI_SERIES) ?
+ (15 + 1) : (3 + 1);
instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
/*
* Use DMA pool facility provided by PCI layer
@@ -3979,7 +4022,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
memset(cmd->frame, 0, instance->mfi_frame_size);
cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
- if (!instance->ctrl_context && reset_devices)
+ if ((instance->adapter_type == MFI_SERIES) && reset_devices)
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
}
@@ -4033,9 +4076,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
int j;
u16 max_cmd;
struct megasas_cmd *cmd;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
max_cmd = instance->max_mfi_cmds;
/*
@@ -4099,7 +4140,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
inline int
dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
- if (!instance->ctrl_context)
+ if (instance->adapter_type == MFI_SERIES)
return KILL_ADAPTER;
else if (instance->unload ||
test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
@@ -4135,15 +4176,17 @@ megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
+ sizeof(struct MR_PD_INFO));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -4206,6 +4249,9 @@ megasas_get_pd_list(struct megasas_instance *instance)
return ret;
}
+ ci = instance->pd_list_buf;
+ ci_h = instance->pd_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -4215,15 +4261,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4232,15 +4269,17 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
+ (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
@@ -4251,7 +4290,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
"failed/not supported by firmware\n");
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
megaraid_sas_kill_hba(instance);
else
instance->pd_list_not_supported = 1;
@@ -4308,10 +4347,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
}
- pci_free_consistent(instance->pdev,
- MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
- ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
@@ -4337,6 +4372,9 @@ megasas_get_ld_list(struct megasas_instance *instance)
dma_addr_t ci_h = 0;
u32 ld_count;
+ ci = instance->ld_list_buf;
+ ci_h = instance->ld_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -4346,16 +4384,6 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_LIST),
- &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4364,15 +4392,17 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->pad_0 = 0;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_LD_LIST));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
@@ -4426,8 +4456,6 @@ megasas_get_ld_list(struct megasas_instance *instance)
break;
}
- pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
@@ -4453,6 +4481,9 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dma_addr_t ci_h = 0;
u32 tgtid_count;
+ ci = instance->ld_targetid_list_buf;
+ ci_h = instance->ld_targetid_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -4463,16 +4494,6 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
-
- if (!ci) {
- dev_warn(&instance->pdev->dev,
- "Failed to alloc mem for ld_list_query\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4483,15 +4504,17 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->pad_0 = 0;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_LD_TARGETID_LIST));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -4542,9 +4565,6 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
break;
}
- pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
- ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
@@ -4566,9 +4586,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
return;
instance->supportmax256vd =
- instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+ instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
/* Below is additional check to address future FW enhancement */
- if (instance->ctrl_info->max_lds > 64)
+ if (instance->ctrl_info_buf->max_lds > 64)
instance->supportmax256vd = 1;
instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
@@ -4626,10 +4646,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
struct megasas_ctrl_info *ci;
- struct megasas_ctrl_info *ctrl_info;
dma_addr_t ci_h = 0;
- ctrl_info = instance->ctrl_info;
+ ci = instance->ctrl_info_buf;
+ ci_h = instance->ctrl_info_buf_h;
cmd = megasas_get_cmd(instance);
@@ -4640,45 +4660,37 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct megasas_ctrl_info), &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
dcmd->mbox.b[0] = 1;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct megasas_ctrl_info));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
switch (ret) {
case DCMD_SUCCESS:
- memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
/* Save required controller information in
* CPU endianness format.
*/
- le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
- le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
+ le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
+ le32_to_cpus((u32 *)&ci->adapterOperations2);
+ le32_to_cpus((u32 *)&ci->adapterOperations3);
+ le16_to_cpus((u16 *)&ci->adapter_operations4);
/* Update the latest Ext VD info.
* From Init path, store current firmware details.
@@ -4687,21 +4699,21 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
*/
megasas_update_ext_vd_details(instance);
instance->use_seqnum_jbod_fp =
- ctrl_info->adapterOperations3.useSeqNumJbodFP;
+ ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
- ctrl_info->adapter_operations4.support_pd_map_target_id;
+ ci->adapter_operations4.support_pd_map_target_id;
/*Check whether controller is iMR or MR */
- instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
+ instance->is_imr = (ci->memory_size ? 0 : 1);
dev_info(&instance->pdev->dev,
"controller type\t: %s(%dMB)\n",
instance->is_imr ? "iMR" : "MR",
- le16_to_cpu(ctrl_info->memory_size));
+ le16_to_cpu(ci->memory_size));
instance->disableOnlineCtrlReset =
- ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ ci->properties.OnOffProperties.disableOnlineCtrlReset;
instance->secure_jbod_support =
- ctrl_info->adapterOperations3.supportSecurityonJBOD;
+ ci->adapterOperations3.supportSecurityonJBOD;
dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
@@ -4729,9 +4741,6 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
}
- pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
- ci, ci_h);
-
megasas_return_cmd(instance, cmd);
@@ -4775,15 +4784,17 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
+ CRASH_DMA_BUF_SIZE);
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -5091,7 +5102,7 @@ megasas_setup_jbod_map(struct megasas_instance *instance)
(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
if (reset_devices || !fusion ||
- !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
+ !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
dev_info(&instance->pdev->dev,
"Jbod map is not supported %s %d\n",
__func__, __LINE__);
@@ -5170,7 +5181,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
reg_set = instance->reg_set;
- if (fusion)
+ if (instance->adapter_type != MFI_SERIES)
instance->instancet = &megasas_instance_template_fusion;
else {
switch (instance->pdev->device) {
@@ -5211,7 +5222,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
goto fail_ready_state;
}
- if (instance->is_ventura) {
+ megasas_init_ctrl_params(instance);
+
+ if (megasas_set_dma_mask(instance))
+ goto fail_ready_state;
+
+ if (megasas_alloc_ctrl_mem(instance))
+ goto fail_alloc_dma_buf;
+
+ if (megasas_alloc_ctrl_dma_buffers(instance))
+ goto fail_alloc_dma_buf;
+
+ fusion = instance->ctrl_context;
+
+ if (instance->adapter_type == VENTURA_SERIES) {
scratch_pad_3 =
readl(&instance->reg_set->outbound_scratch_pad_3);
instance->max_raid_mapsize = ((scratch_pad_3 >>
@@ -5229,7 +5253,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
if (fusion) {
- if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
+ if (instance->adapter_type == THUNDERBOLT_SERIES) {
+ /* Thunderbolt Series*/
instance->msix_vectors = (scratch_pad_2
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
fw_msix_count = instance->msix_vectors;
@@ -5304,11 +5329,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
- GFP_KERNEL);
- if (instance->ctrl_info == NULL)
- goto fail_init_adapter;
-
/*
* Below are default value for legacy Firmware.
* non-fusion based controllers
@@ -5319,7 +5339,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
scratch_pad_4 =
readl(&instance->reg_set->outbound_scratch_pad_4);
if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
@@ -5355,7 +5375,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
/* stream detection initialization */
- if (instance->is_ventura && fusion) {
+ if (instance->adapter_type == VENTURA_SERIES) {
fusion->stream_detect_by_ld =
kzalloc(sizeof(struct LD_STREAM_DETECT *)
* MAX_LOGICAL_DRIVES_EXT,
@@ -5397,7 +5417,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
* to calculate max_sectors_1. So the number ended up as zero always.
*/
tmp_sectors = 0;
- ctrl_info = instance->ctrl_info;
+ ctrl_info = instance->ctrl_info_buf;
max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
le16_to_cpu(ctrl_info->max_strips_per_io);
@@ -5493,10 +5513,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Launch SR-IOV heartbeat timer */
if (instance->requestorId) {
if (!megasas_sriov_start_heartbeat(instance, 1))
- megasas_start_timer(instance,
- &instance->sriov_heartbeat_timer,
- megasas_sriov_heartbeat_handler,
- MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ megasas_start_timer(instance);
else
instance->skip_heartbeat_timer_del = 1;
}
@@ -5511,9 +5528,10 @@ fail_setup_irqs:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
+fail_alloc_dma_buf:
+ megasas_free_ctrl_dma_buffers(instance);
+ megasas_free_ctrl_mem(instance);
fail_ready_state:
- kfree(instance->ctrl_info);
- instance->ctrl_info = NULL;
iounmap(instance->reg_set);
fail_ioremap:
@@ -5586,13 +5604,14 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+
+ megasas_set_dma_settings(instance, dcmd, el_info_h,
+ sizeof(struct megasas_evt_log_info));
if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
DCMD_SUCCESS) {
@@ -5717,7 +5736,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
@@ -5725,8 +5744,9 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->mbox.w[0] = cpu_to_le32(seq_num);
instance->last_seq_num = seq_num;
dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
+
+ megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
+ sizeof(struct megasas_evt_detail));
if (instance->aen_cmd != NULL) {
megasas_return_cmd(instance, cmd);
@@ -5793,18 +5813,18 @@ megasas_get_target_prop(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len =
cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
- dcmd->sgl.sge32[0].phys_addr =
- cpu_to_le32(instance->tgt_prop_h);
- dcmd->sgl.sge32[0].length =
- cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
+ sizeof(struct MR_TARGET_PROPERTIES));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance,
cmd, MFI_IO_TIMEOUT_SECS);
else
@@ -5929,234 +5949,408 @@ static int megasas_io_attach(struct megasas_instance *instance)
return 0;
}
+/**
+ * megasas_set_dma_mask - Set DMA mask for supported controllers
+ *
+ * @instance: Adapter soft state
+ * Description:
+ *
+ * For Ventura, driver/FW will operate in 64bit DMA addresses.
+ *
+ * For invader-
+ * By default, driver/FW will operate in 32bit DMA addresses
+ * for consistent DMA mapping but if 32 bit consistent
+ * DMA mask fails, driver will try with 64 bit consistent
+ * mask provided FW is true 64bit DMA capable
+ *
+ * For older controllers(Thunderbolt and MFI based adapters)-
+ * driver/FW will operate in 32 bit consistent DMA addresses.
+ */
static int
-megasas_set_dma_mask(struct pci_dev *pdev)
+megasas_set_dma_mask(struct megasas_instance *instance)
{
- /*
- * All our controllers are capable of performing 64-bit DMA
- */
+ u64 consistent_mask;
+ struct pci_dev *pdev;
+ u32 scratch_pad_2;
+
+ pdev = instance->pdev;
+ consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
+ DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+
if (IS_DMA64) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto fail_set_dma_mask;
+
+ if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
+ (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
+ /*
+ * If 32 bit DMA mask fails, then try for 64 bit mask
+ * for FW capable of handling 64 bit DMA.
+ */
+ scratch_pad_2 = readl
+ (&instance->reg_set->outbound_scratch_pad_2);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
+ goto fail_set_dma_mask;
+ else if (dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(64)))
goto fail_set_dma_mask;
}
- } else {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
- goto fail_set_dma_mask;
- }
- /*
- * Ensure that all data structures are allocated in 32-bit
- * memory.
- */
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- /* Try 32bit DMA mask and 32 bit Consistent dma mask */
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
- dev_info(&pdev->dev, "set 32bit DMA mask"
- "and 32 bit consistent mask\n");
- else
- goto fail_set_dma_mask;
- }
+ } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto fail_set_dma_mask;
+
+ if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
+ instance->consistent_mask_64bit = false;
+ else
+ instance->consistent_mask_64bit = true;
+
+ dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
+ ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
+ (instance->consistent_mask_64bit ? "64" : "32"));
return 0;
fail_set_dma_mask:
- return 1;
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ return -1;
+
}
-/**
- * megasas_probe_one - PCI hotplug entry point
- * @pdev: PCI device structure
- * @id: PCI ids of supported hotplugged adapter
+/*
+ * megasas_set_adapter_type - Set adapter type.
+ * Supported controllers can be divided in
+ * 4 categories- enum MR_ADAPTER_TYPE {
+ * MFI_SERIES = 1,
+ * THUNDERBOLT_SERIES = 2,
+ * INVADER_SERIES = 3,
+ * VENTURA_SERIES = 4,
+ * };
+ * @instance: Adapter soft state
+ * return: void
*/
-static int megasas_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static inline void megasas_set_adapter_type(struct megasas_instance *instance)
{
- int rval, pos;
- struct Scsi_Host *host;
- struct megasas_instance *instance;
- u16 control = 0;
- struct fusion_context *fusion = NULL;
-
- /* Reset MSI-X in the kdump kernel */
- if (reset_devices) {
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
- &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev,
- pos + PCI_MSIX_FLAGS,
- control &
- ~PCI_MSIX_FLAGS_ENABLE);
- }
+ if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
+ instance->adapter_type = MFI_SERIES;
+ } else {
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_VENTURA:
+ case PCI_DEVICE_ID_LSI_CRUSADER:
+ case PCI_DEVICE_ID_LSI_HARPOON:
+ case PCI_DEVICE_ID_LSI_TOMCAT:
+ case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
+ case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
+ instance->adapter_type = VENTURA_SERIES;
+ break;
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ instance->adapter_type = THUNDERBOLT_SERIES;
+ break;
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_INTRUDER:
+ case PCI_DEVICE_ID_LSI_INTRUDER_24:
+ case PCI_DEVICE_ID_LSI_CUTLASS_52:
+ case PCI_DEVICE_ID_LSI_CUTLASS_53:
+ case PCI_DEVICE_ID_LSI_FURY:
+ instance->adapter_type = INVADER_SERIES;
+ break;
+ default: /* For all other supported controllers */
+ instance->adapter_type = MFI_SERIES;
+ break;
}
}
+}
- /*
- * PCI prepping: enable device set bus mastering and dma mask
- */
- rval = pci_enable_device_mem(pdev);
+static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
+{
+ instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+ &instance->producer_h);
+ instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+ &instance->consumer_h);
- if (rval) {
- return rval;
+ if (!instance->producer || !instance->consumer) {
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate memory for producer, consumer\n");
+ return -1;
}
- pci_set_master(pdev);
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ return 0;
+}
- if (megasas_set_dma_mask(pdev))
- goto fail_set_dma_mask;
+/**
+ * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
+ * structures which are not common across MFI
+ * adapters and fusion adapters.
+ * For MFI based adapters, allocate producer and
+ * consumer buffers. For fusion adapters, allocate
+ * memory for fusion context.
+ * @instance: Adapter soft state
+ * return: 0 for SUCCESS
+ */
+static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
+{
+ switch (instance->adapter_type) {
+ case MFI_SERIES:
+ if (megasas_alloc_mfi_ctrl_mem(instance))
+ return -ENOMEM;
+ break;
+ case VENTURA_SERIES:
+ case THUNDERBOLT_SERIES:
+ case INVADER_SERIES:
+ if (megasas_alloc_fusion_context(instance))
+ return -ENOMEM;
+ break;
+ }
- host = scsi_host_alloc(&megasas_template,
- sizeof(struct megasas_instance));
+ return 0;
+}
- if (!host) {
- dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
- goto fail_alloc_instance;
+/*
+ * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
+ * producer, consumer buffers for MFI adapters
+ *
+ * @instance - Adapter soft instance
+ *
+ */
+static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
+{
+ if (instance->adapter_type == MFI_SERIES) {
+ if (instance->producer)
+ pci_free_consistent(instance->pdev, sizeof(u32),
+ instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(instance->pdev, sizeof(u32),
+ instance->consumer,
+ instance->consumer_h);
+ } else {
+ megasas_free_fusion_context(instance);
}
+}
- instance = (struct megasas_instance *)host->hostdata;
- memset(instance, 0, sizeof(*instance));
- atomic_set(&instance->fw_reset_no_pci_access, 0);
- instance->pdev = pdev;
+/**
+ * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
+ * driver load time
+ *
+ * @instance- Adapter soft instance
+ * @return- O for SUCCESS
+ */
+static inline
+int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
+{
+ struct pci_dev *pdev = instance->pdev;
+ struct fusion_context *fusion = instance->ctrl_context;
- switch (instance->pdev->device) {
- case PCI_DEVICE_ID_LSI_VENTURA:
- case PCI_DEVICE_ID_LSI_HARPOON:
- case PCI_DEVICE_ID_LSI_TOMCAT:
- case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
- case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
- instance->is_ventura = true;
- case PCI_DEVICE_ID_LSI_FUSION:
- case PCI_DEVICE_ID_LSI_PLASMA:
- case PCI_DEVICE_ID_LSI_INVADER:
- case PCI_DEVICE_ID_LSI_FURY:
- case PCI_DEVICE_ID_LSI_INTRUDER:
- case PCI_DEVICE_ID_LSI_INTRUDER_24:
- case PCI_DEVICE_ID_LSI_CUTLASS_52:
- case PCI_DEVICE_ID_LSI_CUTLASS_53:
- {
- if (megasas_alloc_fusion_context(instance)) {
- megasas_free_fusion_context(instance);
- goto fail_alloc_dma_buf;
- }
- fusion = instance->ctrl_context;
+ instance->evt_detail =
+ pci_alloc_consistent(pdev,
+ sizeof(struct megasas_evt_detail),
+ &instance->evt_detail_h);
- if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
- fusion->adapter_type = THUNDERBOLT_SERIES;
- else if (instance->is_ventura)
- fusion->adapter_type = VENTURA_SERIES;
- else
- fusion->adapter_type = INVADER_SERIES;
- }
- break;
- default: /* For all other supported controllers */
-
- instance->producer =
- pci_alloc_consistent(pdev, sizeof(u32),
- &instance->producer_h);
- instance->consumer =
- pci_alloc_consistent(pdev, sizeof(u32),
- &instance->consumer_h);
-
- if (!instance->producer || !instance->consumer) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
- "memory for producer, consumer\n");
- goto fail_alloc_dma_buf;
+ if (!instance->evt_detail) {
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate event detail buffer\n");
+ return -ENOMEM;
+ }
+
+ if (fusion) {
+ fusion->ioc_init_request =
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ &fusion->ioc_init_request_phys,
+ GFP_KERNEL);
+
+ if (!fusion->ioc_init_request) {
+ dev_err(&pdev->dev,
+ "Failed to allocate PD list buffer\n");
+ return -ENOMEM;
}
+ }
- *instance->producer = 0;
- *instance->consumer = 0;
- break;
+ instance->pd_list_buf =
+ pci_alloc_consistent(pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ &instance->pd_list_buf_h);
+
+ if (!instance->pd_list_buf) {
+ dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
+ return -ENOMEM;
}
- /* Crash dump feature related initialisation*/
- instance->drv_buf_index = 0;
- instance->drv_buf_alloc = 0;
- instance->crash_dump_fw_support = 0;
- instance->crash_dump_app_support = 0;
- instance->fw_crash_state = UNAVAILABLE;
- spin_lock_init(&instance->crashdump_lock);
- instance->crash_dump_buf = NULL;
+ instance->ctrl_info_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct megasas_ctrl_info),
+ &instance->ctrl_info_buf_h);
- megasas_poll_wait_aen = 0;
- instance->flag_ieee = 0;
- instance->ev = NULL;
- instance->issuepend_done = 1;
- atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
- instance->is_imr = 0;
+ if (!instance->ctrl_info_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate controller info buffer\n");
+ return -ENOMEM;
+ }
- instance->evt_detail = pci_alloc_consistent(pdev,
- sizeof(struct
- megasas_evt_detail),
- &instance->evt_detail_h);
+ instance->ld_list_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_LD_LIST),
+ &instance->ld_list_buf_h);
- if (!instance->evt_detail) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
- "event detail structure\n");
- goto fail_alloc_dma_buf;
+ if (!instance->ld_list_buf) {
+ dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
+ return -ENOMEM;
+ }
+
+ instance->ld_targetid_list_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_LD_TARGETID_LIST),
+ &instance->ld_targetid_list_buf_h);
+
+ if (!instance->ld_targetid_list_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate LD targetid list buffer\n");
+ return -ENOMEM;
}
if (!reset_devices) {
- instance->system_info_buf = pci_zalloc_consistent(pdev,
- sizeof(struct MR_DRV_SYSTEM_INFO),
- &instance->system_info_h);
- if (!instance->system_info_buf)
- dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
+ instance->system_info_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h);
+ instance->pd_info =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_PD_INFO),
+ &instance->pd_info_h);
+ instance->tgt_prop =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_TARGET_PROPERTIES),
+ &instance->tgt_prop_h);
+ instance->crash_dump_buf =
+ pci_alloc_consistent(pdev,
+ CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h);
- instance->pd_info = pci_alloc_consistent(pdev,
- sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+ if (!instance->system_info_buf)
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate system info buffer\n");
if (!instance->pd_info)
- dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
-
- instance->tgt_prop = pci_alloc_consistent(pdev,
- sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate pd_info buffer\n");
if (!instance->tgt_prop)
- dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate tgt_prop buffer\n");
- instance->crash_dump_buf = pci_alloc_consistent(pdev,
- CRASH_DMA_BUF_SIZE,
- &instance->crash_dump_h);
if (!instance->crash_dump_buf)
- dev_err(&pdev->dev, "Can't allocate Firmware "
- "crash dump DMA buffer\n");
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate crash dump buffer\n");
}
+ return 0;
+}
+
+/*
+ * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
+ * during driver load time
+ *
+ * @instance- Adapter soft instance
+ *
+ */
+static inline
+void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
+{
+ struct pci_dev *pdev = instance->pdev;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (fusion && fusion->ioc_init_request)
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ fusion->ioc_init_request,
+ fusion->ioc_init_request_phys);
+
+ if (instance->pd_list_buf)
+ pci_free_consistent(pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ instance->pd_list_buf,
+ instance->pd_list_buf_h);
+
+ if (instance->ld_list_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+ instance->ld_list_buf,
+ instance->ld_list_buf_h);
+
+ if (instance->ld_targetid_list_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ instance->ld_targetid_list_buf,
+ instance->ld_targetid_list_buf_h);
+
+ if (instance->ctrl_info_buf)
+ pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+ instance->ctrl_info_buf,
+ instance->ctrl_info_buf_h);
+
+ if (instance->system_info_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+ instance->system_info_buf,
+ instance->system_info_h);
+
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info, instance->pd_info_h);
+
+ if (instance->tgt_prop)
+ pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ instance->tgt_prop, instance->tgt_prop_h);
+
+ if (instance->crash_dump_buf)
+ pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf,
+ instance->crash_dump_h);
+}
+
+/*
+ * megasas_init_ctrl_params - Initialize controller's instance
+ * parameters before FW init
+ * @instance - Adapter soft instance
+ * @return - void
+ */
+static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
+{
+ instance->fw_crash_state = UNAVAILABLE;
+
+ megasas_poll_wait_aen = 0;
+ instance->issuepend_done = 1;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+
/*
* Initialize locks and queues
*/
INIT_LIST_HEAD(&instance->cmd_pool);
INIT_LIST_HEAD(&instance->internal_reset_pending_q);
- atomic_set(&instance->fw_outstanding,0);
+ atomic_set(&instance->fw_outstanding, 0);
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
+ spin_lock_init(&instance->crashdump_lock);
spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->reset_mutex);
mutex_init(&instance->hba_mutex);
-
- /*
- * Initialize PCI related and misc parameters
- */
- instance->host = host;
- instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
- instance->init_id = MEGASAS_DEFAULT_INIT_ID;
- instance->ctrl_info = NULL;
-
+ mutex_init(&instance->reset_mutex);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
instance->flag_ieee = 1;
megasas_dbg_lvl = 0;
@@ -6166,11 +6360,75 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->disableOnlineCtrlReset = 1;
instance->UnevenSpanSupport = 0;
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
- } else
+ } else {
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+ }
+}
+
+/**
+ * megasas_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int megasas_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rval, pos;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ u16 control = 0;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ pos + PCI_MSIX_FLAGS,
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ host = scsi_host_alloc(&megasas_template,
+ sizeof(struct megasas_instance));
+
+ if (!host) {
+ dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
+ goto fail_alloc_instance;
+ }
+
+ instance = (struct megasas_instance *)host->hostdata;
+ memset(instance, 0, sizeof(*instance));
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+
+ /*
+ * Initialize PCI related and misc parameters
+ */
+ instance->pdev = pdev;
+ instance->host = host;
+ instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+
+ megasas_set_adapter_type(instance);
/*
* Initialize MFI Firmware
@@ -6246,37 +6504,16 @@ fail_io_attach:
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
megasas_release_fusion(instance);
else
megasas_release_mfi(instance);
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
-fail_alloc_dma_buf:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
scsi_host_put(host);
fail_alloc_instance:
-fail_set_dma_mask:
pci_disable_device(pdev);
return -ENODEV;
@@ -6453,7 +6690,13 @@ megasas_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- if (megasas_set_dma_mask(pdev))
+ /*
+ * We expect the FW state to be READY
+ */
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+
+ if (megasas_set_dma_mask(instance))
goto fail_set_dma_mask;
/*
@@ -6462,12 +6705,6 @@ megasas_resume(struct pci_dev *pdev)
atomic_set(&instance->fw_outstanding, 0);
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
-
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
irq_flags = PCI_IRQ_MSIX;
@@ -6480,7 +6717,7 @@ megasas_resume(struct pci_dev *pdev)
if (rval < 0)
goto fail_reenable_msix;
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
megasas_free_cmds(instance);
@@ -6507,10 +6744,7 @@ megasas_resume(struct pci_dev *pdev)
/* Re-launch SR-IOV heartbeat timer */
if (instance->requestorId) {
if (!megasas_sriov_start_heartbeat(instance, 0))
- megasas_start_timer(instance,
- &instance->sriov_heartbeat_timer,
- megasas_sriov_heartbeat_handler,
- MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ megasas_start_timer(instance);
else {
instance->skip_heartbeat_timer_del = 1;
goto fail_init_mfi;
@@ -6530,30 +6764,13 @@ megasas_resume(struct pci_dev *pdev)
return 0;
fail_init_mfi:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
+ megasas_free_ctrl_dma_buffers(instance);
+ megasas_free_ctrl_mem(instance);
scsi_host_put(host);
+fail_reenable_msix:
fail_set_dma_mask:
fail_ready_state:
-fail_reenable_msix:
pci_disable_device(pdev);
@@ -6656,7 +6873,7 @@ skip_firing_dcmds:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
kfree(fusion->stream_detect_by_ld[i]);
kfree(fusion->stream_detect_by_ld);
@@ -6664,7 +6881,7 @@ skip_firing_dcmds:
}
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) *
@@ -6689,30 +6906,10 @@ skip_firing_dcmds:
fusion->pd_seq_sync[i],
fusion->pd_seq_phys[i]);
}
- megasas_free_fusion_context(instance);
} else {
megasas_release_mfi(instance);
- pci_free_consistent(pdev, sizeof(u32),
- instance->producer,
- instance->producer_h);
- pci_free_consistent(pdev, sizeof(u32),
- instance->consumer,
- instance->consumer_h);
}
- kfree(instance->ctrl_info);
-
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail, instance->evt_detail_h);
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
if (instance->vf_affiliation)
pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6730,13 +6927,9 @@ skip_firing_dcmds:
instance->hb_host_mem,
instance->hb_host_mem_h);
- if (instance->crash_dump_buf)
- pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
- instance->crash_dump_buf, instance->crash_dump_h);
+ megasas_free_ctrl_dma_buffers(instance);
- if (instance->system_info_buf)
- pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
- instance->system_info_buf, instance->system_info_h);
+ megasas_free_ctrl_mem(instance);
scsi_host_put(host);
@@ -6875,7 +7068,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
struct megasas_iocpacket __user * user_ioc,
struct megasas_iocpacket *ioc)
{
- struct megasas_sge32 *kern_sge32;
+ struct megasas_sge64 *kern_sge64 = NULL;
+ struct megasas_sge32 *kern_sge32 = NULL;
struct megasas_cmd *cmd;
void *kbuff_arr[MAX_IOCTL_SGE];
dma_addr_t buf_handle = 0;
@@ -6883,7 +7077,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
void *sense = NULL;
dma_addr_t sense_handle;
unsigned long *sense_ptr;
- u32 opcode;
+ u32 opcode = 0;
memset(kbuff_arr, 0, sizeof(kbuff_arr));
@@ -6893,6 +7087,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
return -EINVAL;
}
+ if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
+ dev_err(&instance->pdev->dev,
+ "Received invalid ioctl command 0x%x\n",
+ ioc->frame.hdr.cmd);
+ return -ENOTSUPP;
+ }
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
@@ -6908,10 +7109,18 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
cmd->frame->hdr.context = cpu_to_le32(cmd->index);
cmd->frame->hdr.pad_0 = 0;
- cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
- MFI_FRAME_SGL64 |
+
+ cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
+
+ if (instance->consistent_mask_64bit)
+ cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64));
+ else
+ cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
MFI_FRAME_SENSE64));
- opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+
+ if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
@@ -6934,8 +7143,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* kernel buffers in SGLs. The location of SGL is embedded in the
* struct iocpacket itself.
*/
- kern_sge32 = (struct megasas_sge32 *)
- ((unsigned long)cmd->frame + ioc->sgl_off);
+ if (instance->consistent_mask_64bit)
+ kern_sge64 = (struct megasas_sge64 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
+ else
+ kern_sge32 = (struct megasas_sge32 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
/*
* For each user buffer, create a mirror buffer and copy in
@@ -6958,8 +7171,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* We don't change the dma_coherent_mask, so
* pci_alloc_consistent only returns 32bit addresses
*/
- kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
- kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ if (instance->consistent_mask_64bit) {
+ kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
+ kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ } else {
+ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+ kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ }
/*
* We created a kernel buffer corresponding to the
@@ -6982,7 +7200,10 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
sense_ptr =
(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
- *sense_ptr = cpu_to_le32(sense_handle);
+ if (instance->consistent_mask_64bit)
+ *sense_ptr = cpu_to_le64(sense_handle);
+ else
+ *sense_ptr = cpu_to_le32(sense_handle);
}
/*
@@ -6993,8 +7214,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
cmd->sync_cmd = 0;
dev_err(&instance->pdev->dev,
- "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
- __func__, __LINE__, opcode, cmd->cmd_status_drv);
+ "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
+ __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
+ cmd->cmd_status_drv);
return -EBUSY;
}
@@ -7054,10 +7276,16 @@ out:
for (i = 0; i < ioc->sge_count; i++) {
if (kbuff_arr[i]) {
- dma_free_coherent(&instance->pdev->dev,
- le32_to_cpu(kern_sge32[i].length),
- kbuff_arr[i],
- le32_to_cpu(kern_sge32[i].phys_addr));
+ if (instance->consistent_mask_64bit)
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge64[i].length),
+ kbuff_arr[i],
+ le64_to_cpu(kern_sge64[i].phys_addr));
+ else
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge32[i].length),
+ kbuff_arr[i],
+ le32_to_cpu(kern_sge32[i].phys_addr));
kbuff_arr[i] = NULL;
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index ecc699a65bac..bfad9bfc313f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -737,7 +737,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if (instance->is_ventura &&
+ if ((instance->adapter_type == VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -747,8 +747,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
}
} else {
if ((raid->level >= 5) &&
- ((fusion->adapter_type == THUNDERBOLT_SERIES) ||
- ((fusion->adapter_type == INVADER_SERIES) &&
+ ((instance->adapter_type == THUNDERBOLT_SERIES) ||
+ ((instance->adapter_type == INVADER_SERIES) &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
@@ -762,7 +762,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -853,7 +853,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if (instance->is_ventura &&
+ if ((instance->adapter_type == VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -863,8 +863,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
} else {
if ((raid->level >= 5) &&
- ((fusion->adapter_type == THUNDERBOLT_SERIES) ||
- ((fusion->adapter_type == INVADER_SERIES) &&
+ ((instance->adapter_type == THUNDERBOLT_SERIES) ||
+ ((instance->adapter_type == INVADER_SERIES) &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
@@ -880,7 +880,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -1088,10 +1088,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
cpu_to_le16(raid->fpIoTimeoutForLd ?
raid->fpIoTimeoutForLd :
map->raidMap.fpPdIoTimeoutSec);
- if (fusion->adapter_type == INVADER_SERIES)
+ if (instance->adapter_type == INVADER_SERIES)
pRAID_Context->reg_lock_flags = (isRead) ?
raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
- else if (!instance->is_ventura)
+ else if (instance->adapter_type == THUNDERBOLT_SERIES)
pRAID_Context->reg_lock_flags = (isRead) ?
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
pRAID_Context->virtual_disk_tgt_id = raid->targetId;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 11bd2e698b84..65dc4fea6352 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -85,19 +85,45 @@ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
void megaraid_sas_kill_hba(struct megasas_instance *instance);
extern u32 megasas_dbg_lvl;
-void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
int initial);
-void megasas_start_timer(struct megasas_instance *instance,
- struct timer_list *timer,
- void *fn, unsigned long interval);
+void megasas_start_timer(struct megasas_instance *instance);
extern struct megasas_mgmt_info megasas_mgmt_info;
extern unsigned int resetwaittime;
extern unsigned int dual_qdepth_disable;
static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
static void megasas_free_reply_fusion(struct megasas_instance *instance);
+static inline
+void megasas_configure_queue_sizes(struct megasas_instance *instance);
+/**
+ * megasas_check_same_4gb_region - check if allocation
+ * crosses same 4GB boundary or not
+ * @instance - adapter's soft instance
+ * start_addr - start address of DMA allocation
+ * size - size of allocation in bytes
+ * return - true : allocation does not cross same
+ * 4GB boundary
+ * false: allocation crosses same
+ * 4GB boundary
+ */
+static inline bool megasas_check_same_4gb_region
+ (struct megasas_instance *instance, dma_addr_t start_addr, size_t size)
+{
+ dma_addr_t end_addr;
+
+ end_addr = start_addr + size;
+ if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) {
+ dev_err(&instance->pdev->dev,
+ "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n",
+ (unsigned long long)start_addr,
+ (unsigned long long)end_addr);
+ return false;
+ }
+
+ return true;
+}
/**
* megasas_enable_intr_fusion - Enables interrupts
@@ -200,7 +226,7 @@ static void
megasas_fire_cmd_fusion(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
{
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
writel(le32_to_cpu(req_desc->u.low),
&instance->reg_set->inbound_single_queue_port);
else {
@@ -243,7 +269,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
reg_set = instance->reg_set;
/* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
- if (!instance->is_ventura)
+ if (instance->adapter_type < VENTURA_SERIES)
cur_max_fw_cmds =
readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
@@ -254,8 +280,8 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
(instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
dev_info(&instance->pdev->dev,
- "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
- cur_max_fw_cmds, ldio_threshold);
+ "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
+ cur_max_fw_cmds, ldio_threshold);
if (fw_boot_context == OCR_CONTEXT) {
cur_max_fw_cmds = cur_max_fw_cmds - 1;
@@ -270,10 +296,6 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
instance->max_fw_cmds = cur_max_fw_cmds;
instance->ldio_threshold = ldio_threshold;
- if (!instance->is_rdpq)
- instance->max_fw_cmds =
- min_t(u16, instance->max_fw_cmds, 1024);
-
if (reset_devices)
instance->max_fw_cmds = min(instance->max_fw_cmds,
(u16)MEGASAS_KDUMP_QUEUE_DEPTH);
@@ -283,19 +305,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
* does not exceed max cmds that the FW can support
*/
instance->max_fw_cmds = instance->max_fw_cmds-1;
-
- instance->max_scsi_cmds = instance->max_fw_cmds -
- (MEGASAS_FUSION_INTERNAL_CMDS +
- MEGASAS_FUSION_IOCTL_CMDS);
- instance->cur_can_queue = instance->max_scsi_cmds;
- instance->host->can_queue = instance->cur_can_queue;
}
-
- if (instance->is_ventura)
- instance->max_mpt_cmds =
- instance->max_fw_cmds * RAID_1_PEER_CMDS;
- else
- instance->max_mpt_cmds = instance->max_fw_cmds;
}
/**
* megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
@@ -308,17 +318,23 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
struct fusion_context *fusion = instance->ctrl_context;
struct megasas_cmd_fusion *cmd;
- /* SG, Sense */
- for (i = 0; i < instance->max_mpt_cmds; i++) {
- cmd = fusion->cmd_list[i];
- if (cmd) {
- if (cmd->sg_frame)
- dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
- cmd->sg_frame_phys_addr);
- if (cmd->sense)
- dma_pool_free(fusion->sense_dma_pool, cmd->sense,
- cmd->sense_phys_addr);
+ if (fusion->sense)
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+
+ /* SG */
+ if (fusion->cmd_list) {
+ for (i = 0; i < instance->max_mpt_cmds; i++) {
+ cmd = fusion->cmd_list[i];
+ if (cmd) {
+ if (cmd->sg_frame)
+ dma_pool_free(fusion->sg_dma_pool,
+ cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+ }
+ kfree(cmd);
}
+ kfree(fusion->cmd_list);
}
if (fusion->sg_dma_pool) {
@@ -350,13 +366,6 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
dma_pool_destroy(fusion->io_request_frames_pool);
fusion->io_request_frames_pool = NULL;
}
-
-
- /* cmd_list */
- for (i = 0; i < instance->max_mpt_cmds; i++)
- kfree(fusion->cmd_list[i]);
-
- kfree(fusion->cmd_list);
}
/**
@@ -370,10 +379,12 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
u16 max_cmd;
struct fusion_context *fusion;
struct megasas_cmd_fusion *cmd;
+ int sense_sz;
+ u32 offset;
fusion = instance->ctrl_context;
max_cmd = instance->max_fw_cmds;
-
+ sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE;
fusion->sg_dma_pool =
dma_pool_create("mr_sg", &instance->pdev->dev,
@@ -382,7 +393,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
/* SCSI_SENSE_BUFFERSIZE = 96 bytes */
fusion->sense_dma_pool =
dma_pool_create("mr_sense", &instance->pdev->dev,
- SCSI_SENSE_BUFFERSIZE, 64, 0);
+ sense_sz, 64, 0);
if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
dev_err(&instance->pdev->dev,
@@ -390,6 +401,51 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
return -ENOMEM;
}
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL, &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ /* sense buffer, request frame and reply desc pool requires to be in
+ * same 4 gb region. Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * Older allocation and pool will be destroyed.
+ * Alignment will be used such a way that next allocation if success,
+ * will always meet same 4gb region requirement.
+ * Actual requirement is not alignment, but we need start and end of
+ * DMA address must have same upper 32 bit address.
+ */
+
+ if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr,
+ sense_sz)) {
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+ fusion->sense = NULL;
+ dma_pool_destroy(fusion->sense_dma_pool);
+
+ fusion->sense_dma_pool =
+ dma_pool_create("mr_sense_align", &instance->pdev->dev,
+ sense_sz, roundup_pow_of_two(sense_sz),
+ 0);
+ if (!fusion->sense_dma_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL,
+ &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
/*
* Allocate and attach a frame to each of the commands in cmd_list
*/
@@ -398,9 +454,11 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
GFP_KERNEL, &cmd->sg_frame_phys_addr);
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sg_frame || !cmd->sense) {
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
+ if (!cmd->sg_frame) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
@@ -410,13 +468,10 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
/* create sense buffer for the raid 1/10 fp */
for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
cmd = fusion->cmd_list[i];
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sense) {
- dev_err(&instance->pdev->dev,
- "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
}
return 0;
@@ -468,16 +523,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- fusion->req_frames_desc =
- dma_alloc_coherent(&instance->pdev->dev,
- fusion->request_alloc_sz,
- &fusion->req_frames_desc_phys, GFP_KERNEL);
- if (!fusion->req_frames_desc) {
- dev_err(&instance->pdev->dev,
- "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
-
+retry_alloc:
fusion->io_request_frames_pool =
dma_pool_create("mr_ioreq", &instance->pdev->dev,
fusion->io_frames_alloc_sz, 16, 0);
@@ -492,10 +538,62 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
dma_pool_alloc(fusion->io_request_frames_pool,
GFP_KERNEL, &fusion->io_request_frames_phys);
if (!fusion->io_request_frames) {
+ if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
+ instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
+ dma_pool_destroy(fusion->io_request_frames_pool);
+ megasas_configure_queue_sizes(instance);
+ goto retry_alloc;
+ } else {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->io_request_frames_phys,
+ fusion->io_frames_alloc_sz)) {
+ dma_pool_free(fusion->io_request_frames_pool,
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ fusion->io_request_frames = NULL;
+ dma_pool_destroy(fusion->io_request_frames_pool);
+
+ fusion->io_request_frames_pool =
+ dma_pool_create("mr_ioreq_align",
+ &instance->pdev->dev,
+ fusion->io_frames_alloc_sz,
+ roundup_pow_of_two(fusion->io_frames_alloc_sz),
+ 0);
+
+ if (!fusion->io_request_frames_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->io_request_frames =
+ dma_pool_alloc(fusion->io_request_frames_pool,
+ GFP_KERNEL,
+ &fusion->io_request_frames_phys);
+
+ if (!fusion->io_request_frames) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ fusion->req_frames_desc =
+ dma_alloc_coherent(&instance->pdev->dev,
+ fusion->request_alloc_sz,
+ &fusion->req_frames_desc_phys, GFP_KERNEL);
+ if (!fusion->req_frames_desc) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+
return 0;
}
@@ -526,6 +624,41 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->reply_frames_desc_phys[0],
+ (fusion->reply_alloc_sz * count))) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc[0],
+ fusion->reply_frames_desc_phys[0]);
+ fusion->reply_frames_desc[0] = NULL;
+ dma_pool_destroy(fusion->reply_frames_desc_pool);
+
+ fusion->reply_frames_desc_pool =
+ dma_pool_create("mr_reply_align",
+ &instance->pdev->dev,
+ fusion->reply_alloc_sz * count,
+ roundup_pow_of_two(fusion->reply_alloc_sz * count),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->reply_frames_desc[0] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL,
+ &fusion->reply_frames_desc_phys[0]);
+
+ if (!fusion->reply_frames_desc[0]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
reply_desc = fusion->reply_frames_desc[0];
for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = cpu_to_le64(ULLONG_MAX);
@@ -544,52 +677,124 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
int
megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
{
- int i, j, count;
+ int i, j, k, msix_count;
struct fusion_context *fusion;
union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT];
+ dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT];
+ u8 dma_alloc_count, abs_index;
+ u32 chunk_size, array_size, offset;
fusion = instance->ctrl_context;
+ chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
+ array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
+ MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size,
+ &fusion->rdpq_phys);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- memset(fusion->rdpq_virt, 0,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
- count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ memset(fusion->rdpq_virt, 0, array_size);
+ msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
&instance->pdev->dev,
- fusion->reply_alloc_sz,
- 16, 0);
-
- if (!fusion->reply_frames_desc_pool) {
+ chunk_size, 16, 0);
+ fusion->reply_frames_desc_pool_align =
+ dma_pool_create("mr_rdpq_align",
+ &instance->pdev->dev,
+ chunk_size,
+ roundup_pow_of_two(chunk_size),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool ||
+ !fusion->reply_frames_desc_pool_align) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- for (i = 0; i < count; i++) {
- fusion->reply_frames_desc[i] =
- dma_pool_alloc(fusion->reply_frames_desc_pool,
- GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
- if (!fusion->reply_frames_desc[i]) {
+/*
+ * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
+ * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be
+ * within 4GB boundary and also reply queues in a set must have same
+ * upper 32-bits in their memory address. so here driver is allocating the
+ * DMA'able memory for reply queues according. Driver uses limitation of
+ * VENTURA_SERIES to manage INVADER_SERIES as well.
+ */
+ dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+
+ for (i = 0; i < dma_alloc_count; i++) {
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+ /* reply desc pool requires to be in same 4 gb region.
+ * Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * For RDPQ buffers, driver always allocate two separate pci pool.
+ * Alignment will be used such a way that next allocation if
+ * success, will always meet same 4gb region requirement.
+ * rdpq_tracker keep track of each buffer's physical,
+ * virtual address and pci pool descriptor. It will help driver
+ * while freeing the resources.
+ *
+ */
+ if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i],
+ chunk_size)) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ rdpq_chunk_virt[i],
+ rdpq_chunk_phys[i]);
- fusion->rdpq_virt[i].RDPQBaseAddress =
- cpu_to_le64(fusion->reply_frames_desc_phys[i]);
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool_align,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool_align;
+ } else {
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool;
+ }
- reply_desc = fusion->reply_frames_desc[i];
- for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
- reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+ fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i];
+ fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i];
}
+
+ for (k = 0; k < dma_alloc_count; k++) {
+ for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) {
+ abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i;
+
+ if (abs_index == msix_count)
+ break;
+ offset = fusion->reply_alloc_sz * i;
+ fusion->rdpq_virt[abs_index].RDPQBaseAddress =
+ cpu_to_le64(rdpq_chunk_phys[k] + offset);
+ fusion->reply_frames_desc_phys[abs_index] =
+ rdpq_chunk_phys[k] + offset;
+ fusion->reply_frames_desc[abs_index] =
+ (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset);
+
+ reply_desc = fusion->reply_frames_desc[abs_index];
+ for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+ }
+ }
+
return 0;
}
@@ -601,15 +806,18 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
fusion = instance->ctrl_context;
- for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
- if (fusion->reply_frames_desc[i])
- dma_pool_free(fusion->reply_frames_desc_pool,
- fusion->reply_frames_desc[i],
- fusion->reply_frames_desc_phys[i]);
+ for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) {
+ if (fusion->rdpq_tracker[i].pool_entry_virt)
+ dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
+ fusion->rdpq_tracker[i].pool_entry_virt,
+ fusion->rdpq_tracker[i].pool_entry_phys);
+
}
if (fusion->reply_frames_desc_pool)
dma_pool_destroy(fusion->reply_frames_desc_pool);
+ if (fusion->reply_frames_desc_pool_align)
+ dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
pci_free_consistent(instance->pdev,
@@ -664,9 +872,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- if (megasas_alloc_cmdlist_fusion(instance))
- goto fail_exit;
-
if (megasas_alloc_request_fusion(instance))
goto fail_exit;
@@ -677,6 +882,11 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_reply_fusion(instance))
goto fail_exit;
+ if (megasas_alloc_cmdlist_fusion(instance))
+ goto fail_exit;
+
+ dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
+ instance->max_fw_cmds);
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -773,22 +983,34 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
unsigned long flags;
+ struct timeval tv;
+ bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
- cmd = megasas_get_cmd(instance);
+ ioc_init_handle = fusion->ioc_init_request_phys;
+ IOCInitMessage = fusion->ioc_init_request;
- if (!cmd) {
- dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
- ret = 1;
- goto fail_get_cmd;
- }
+ cmd = fusion->ioc_init_cmd;
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+ if (instance->adapter_type == INVADER_SERIES) {
+ cur_fw_64bit_dma_capable =
+ (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
+
+ if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
+ dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
+ "DMA mask, but upcoming FW does not support 64bit DMA mask\n");
+ megaraid_sas_kill_hba(instance);
+ ret = 1;
+ goto fail_fw_init;
+ }
+ }
+
if (instance->is_rdpq && !cur_rdpq_mode) {
dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
" from RDPQ mode to non RDPQ mode\n");
@@ -801,18 +1023,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
instance->fw_sync_cache_support ? "Yes" : "No");
- IOCInitMessage =
- dma_alloc_coherent(&instance->pdev->dev,
- sizeof(struct MPI2_IOC_INIT_REQUEST),
- &ioc_init_handle, GFP_KERNEL);
-
- if (!IOCInitMessage) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "IOCInitMessage\n");
- ret = 1;
- goto fail_fw_init;
- }
-
memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
@@ -828,8 +1038,15 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->MsgFlags = instance->is_rdpq ?
MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
+ IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
+
+ do_gettimeofday(&tv);
+ /* Convert to milliseconds as per FW requirement */
+ IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
+ (tv.tv_usec / 1000));
+
init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
@@ -845,7 +1062,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
/* driver support Extended MSIX */
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
drv_ops->mfi_capabilities.support_additional_msix = 1;
/* driver supports HA / Remote LUN over Fast Path interface */
drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
@@ -863,6 +1080,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+
+ if (instance->consistent_mask_64bit)
+ drv_ops->mfi_capabilities.support_64bit_mode = 1;
+
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -872,8 +1093,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
strlen(sys_info) > 64 ? 64 : strlen(sys_info));
instance->system_info_buf->systemIdLength =
strlen(sys_info) > 64 ? 64 : strlen(sys_info);
- init_frame->system_info_lo = instance->system_info_h;
- init_frame->system_info_hi = 0;
+ init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h));
+ init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h));
}
init_frame->queue_info_new_phys_addr_hi =
@@ -920,12 +1141,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
ret = 0;
fail_fw_init:
- megasas_return_cmd(instance, cmd);
- if (IOCInitMessage)
- dma_free_coherent(&instance->pdev->dev,
- sizeof(struct MPI2_IOC_INIT_REQUEST),
- IOCInitMessage, ioc_init_handle);
-fail_get_cmd:
dev_err(&instance->pdev->dev,
"Init cmd return status %s for SCSI host %d\n",
ret ? "FAILED" : "SUCCESS", instance->host->host_no);
@@ -970,6 +1185,15 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
memset(pd_sync, 0, pd_seq_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ if (pend) {
+ dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
+ instance->jbod_seq_cmd = cmd;
+ } else {
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ }
+
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -977,21 +1201,16 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
+
+ megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz);
if (pend) {
- dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
- instance->jbod_seq_cmd = cmd;
instance->instancet->issue_dcmd(instance, cmd);
return 0;
}
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
-
/* Below code is only for non pended DCMD */
- if (instance->ctrl_context && !instance->mask_interrupts)
+ if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
@@ -1004,7 +1223,7 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
ret = -EINVAL;
}
- if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ if (ret == DCMD_TIMEOUT)
megaraid_sas_kill_hba(instance);
if (ret == DCMD_SUCCESS)
@@ -1072,21 +1291,21 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
+
+ if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ if (ret == DCMD_TIMEOUT)
megaraid_sas_kill_hba(instance);
megasas_return_cmd(instance, cmd);
@@ -1176,15 +1395,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
instance->map_update_cmd = cmd;
@@ -1340,6 +1559,94 @@ ld_drv_map_alloc_fail:
}
/**
+ * megasas_configure_queue_sizes - Calculate size of request desc queue,
+ * reply desc queue,
+ * IO request frame queue, set can_queue.
+ * @instance: Adapter soft state
+ * @return: void
+ */
+static inline
+void megasas_configure_queue_sizes(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ u16 max_cmd;
+
+ fusion = instance->ctrl_context;
+ max_cmd = instance->max_fw_cmds;
+
+ if (instance->adapter_type == VENTURA_SERIES)
+ instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
+ else
+ instance->max_mpt_cmds = instance->max_fw_cmds;
+
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ instance->cur_can_queue = instance->max_scsi_cmds;
+ instance->host->can_queue = instance->cur_can_queue;
+
+ fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16;
+
+ fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *
+ instance->max_mpt_cmds;
+ fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *
+ (fusion->reply_q_depth);
+ fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+ * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
+}
+
+static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd;
+
+ fusion = instance->ctrl_context;
+
+ cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ cmd->frame = dma_alloc_coherent(&instance->pdev->dev,
+ IOC_INIT_FRAME_SIZE,
+ &cmd->frame_phys_addr, GFP_KERNEL);
+
+ if (!cmd->frame) {
+ dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
+ __func__, __LINE__);
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ fusion->ioc_init_cmd = cmd;
+ return 0;
+}
+
+/**
+ * megasas_free_ioc_init_cmd - Free IOC INIT command frame
+ * @instance: Adapter soft state
+ */
+static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame)
+ dma_free_coherent(&instance->pdev->dev,
+ IOC_INIT_FRAME_SIZE,
+ fusion->ioc_init_cmd->frame,
+ fusion->ioc_init_cmd->frame_phys_addr);
+
+ if (fusion->ioc_init_cmd)
+ kfree(fusion->ioc_init_cmd);
+}
+
+/**
* megasas_init_adapter_fusion - Initializes the FW
* @instance: Adapter soft state
*
@@ -1350,7 +1657,6 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
- u16 max_cmd;
u32 scratch_pad_2;
int i = 0, count;
@@ -1366,17 +1672,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
instance->max_mfi_cmds =
MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
- max_cmd = instance->max_fw_cmds;
-
- fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
-
- fusion->request_alloc_sz =
- sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
- fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
- *(fusion->reply_q_depth);
- fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
- (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
- * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
+ megasas_configure_queue_sizes(instance);
scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
@@ -1434,6 +1730,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
MEGASAS_FUSION_IOCTL_CMDS);
sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+ if (megasas_alloc_ioc_init_frame(instance))
+ return 1;
+
/*
* Allocate memory for descriptors
* Create a pool of commands
@@ -1471,6 +1770,7 @@ fail_ioc_init:
fail_alloc_cmds:
megasas_free_cmds(instance);
fail_alloc_mfi_cmds:
+ megasas_free_ioc_init_cmd(instance);
return 1;
}
@@ -1803,7 +2103,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
fusion = instance->ctrl_context;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
sgl_ptr_end->Flags = 0;
@@ -1813,7 +2113,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
sgl_ptr->Flags = 0;
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
if (i == sge_count - 1)
sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
sgl_ptr++;
@@ -1823,7 +2123,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
(sge_count > fusion->max_sge_in_main_msg)) {
struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
if ((le16_to_cpu(cmd->io_request->IoFlags) &
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
@@ -1839,7 +2139,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sg_chain = sgl_ptr;
/* Prepare chain element */
sg_chain->NextChainOffset = 0;
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
else
sg_chain->Flags =
@@ -2363,7 +2663,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
praid_context = &io_request->RaidContext;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
megasas_stream_detect(instance, cmd, &io_info);
spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
@@ -2416,7 +2716,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (instance->adapter_type == INVADER_SERIES) {
if (io_request->RaidContext.raid_context.reg_lock_flags ==
REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
@@ -2429,7 +2729,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_request->RaidContext.raid_context.reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
- } else if (instance->is_ventura) {
+ } else if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.nseg_type |=
(1 << RAID_CONTEXT_NSEG_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2448,7 +2748,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
&io_info, local_map_ptr);
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
io_request->RaidContext.raid_context_g35.span_arm
= io_info.span_arm;
else
@@ -2458,7 +2758,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
else
cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
@@ -2481,7 +2781,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (instance->adapter_type == INVADER_SERIES) {
if (io_info.do_fp_rlbypass ||
(io_request->RaidContext.raid_context.reg_lock_flags
== REGION_TYPE_UNUSED))
@@ -2494,7 +2794,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
io_request->RaidContext.raid_context.nseg = 0x1;
- } else if (instance->is_ventura) {
+ } else if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.routing_flags |=
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2569,7 +2869,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
/* set RAID context values */
pRAID_Context->config_seq_num = raid->seqNum;
- if (!instance->is_ventura)
+ if (instance->adapter_type != VENTURA_SERIES)
pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
pRAID_Context->timeout_value =
cpu_to_le16(raid->fpIoTimeoutForLd);
@@ -2654,7 +2954,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.routing_flags |=
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2702,7 +3002,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeout_value =
cpu_to_le16((os_timeout_value > timeout_limit) ?
timeout_limit : os_timeout_value);
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
@@ -2785,7 +3085,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
return 1;
}
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
@@ -2808,7 +3108,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->SGLOffset0 =
offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
- io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
+ io_request->SenseBufferLowAddress =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
@@ -2849,7 +3150,7 @@ void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
(fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
/*sense buffer is different for r1 command*/
r1_cmd->io_request->SenseBufferLowAddress =
- cpu_to_le32(r1_cmd->sense_phys_addr);
+ cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr));
r1_cmd->scmd = cmd->scmd;
req_desc2 = megasas_get_request_descriptor(instance,
(r1_cmd->index - 1));
@@ -3315,7 +3616,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
io_req = cmd->io_request;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
@@ -3389,6 +3690,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
void
megasas_release_fusion(struct megasas_instance *instance)
{
+ megasas_free_ioc_init_cmd(instance);
megasas_free_cmds(instance);
megasas_free_cmds_fusion(instance);
@@ -4247,7 +4549,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
/*check for extra commands issued by driver*/
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
megasas_return_cmd_fusion(instance, r1_cmd);
}
@@ -4348,7 +4650,7 @@ transition_to_ready:
megasas_set_dynamic_target_properties(sdev);
/* reset stream detection array */
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
memset(fusion->stream_detect_by_ld[j],
0, sizeof(struct LD_STREAM_DETECT));
@@ -4369,10 +4671,7 @@ transition_to_ready:
/* Restart SR-IOV heartbeat */
if (instance->requestorId) {
if (!megasas_sriov_start_heartbeat(instance, 0))
- megasas_start_timer(instance,
- &instance->sriov_heartbeat_timer,
- megasas_sriov_heartbeat_handler,
- MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ megasas_start_timer(instance);
else
instance->skip_heartbeat_timer_del = 1;
}
@@ -4404,10 +4703,7 @@ fail_kill_adapter:
} else {
/* For VF: Restart HB timer if we didn't OCR */
if (instance->requestorId) {
- megasas_start_timer(instance,
- &instance->sriov_heartbeat_timer,
- megasas_sriov_heartbeat_handler,
- MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ megasas_start_timer(instance);
}
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
instance->instancet->enable_intr(instance);
@@ -4502,20 +4798,31 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
{
struct fusion_context *fusion;
- instance->ctrl_context_pages = get_order(sizeof(struct fusion_context));
- instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- instance->ctrl_context_pages);
+ instance->ctrl_context = kzalloc(sizeof(struct fusion_context),
+ GFP_KERNEL);
if (!instance->ctrl_context) {
- /* fall back to using vmalloc for fusion_context */
- instance->ctrl_context = vzalloc(sizeof(struct fusion_context));
- if (!instance->ctrl_context) {
- dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
}
fusion = instance->ctrl_context;
+ fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(LD_SPAN_INFO));
+ fusion->log_to_span =
+ (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ fusion->log_to_span_pages);
+ if (!fusion->log_to_span) {
+ fusion->log_to_span = vzalloc(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(LD_SPAN_INFO));
+ if (!fusion->log_to_span) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
sizeof(struct LD_LOAD_BALANCE_INFO));
fusion->load_balance_info =
@@ -4546,11 +4853,15 @@ megasas_free_fusion_context(struct megasas_instance *instance)
fusion->load_balance_info_pages);
}
- if (is_vmalloc_addr(fusion))
- vfree(fusion);
- else
- free_pages((ulong)fusion,
- instance->ctrl_context_pages);
+ if (fusion->log_to_span) {
+ if (is_vmalloc_addr(fusion->log_to_span))
+ vfree(fusion->log_to_span);
+ else
+ free_pages((ulong)fusion->log_to_span,
+ fusion->log_to_span_pages);
+ }
+
+ kfree(fusion);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index d78d76112501..1814d79cb98d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -51,6 +51,8 @@
#define HOST_DIAG_RESET_ADAPTER 0x4
#define MEGASAS_FUSION_MAX_RESET_TRIES 3
#define MAX_MSIX_QUEUES_FUSION 128
+#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
+#define RDPQ_MAX_CHUNK_COUNT (MAX_MSIX_QUEUES_FUSION / RDPQ_MAX_INDEX_IN_ONE_CHUNK)
/* Invader defines */
#define MPI2_TYPE_CUDA 0x2
@@ -103,12 +105,8 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define THRESHOLD_REPLY_COUNT 50
#define RAID_1_PEER_CMDS 2
#define JBOD_MAPS_COUNT 2
-
-enum MR_FUSION_ADAPTER_TYPE {
- THUNDERBOLT_SERIES = 0,
- INVADER_SERIES = 1,
- VENTURA_SERIES = 2,
-};
+#define MEGASAS_REDUCE_QD_COUNT 64
+#define IOC_INIT_FRAME_SIZE 4096
/*
* Raid Context structure which describes MegaRAID specific IO Parameters
@@ -1270,6 +1268,12 @@ struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
u32 Reserved2;
};
+struct rdpq_alloc_detail {
+ struct dma_pool *dma_pool_ptr;
+ dma_addr_t pool_entry_phys;
+ union MPI2_REPLY_DESCRIPTORS_UNION *pool_entry_virt;
+};
+
struct fusion_context {
struct megasas_cmd_fusion **cmd_list;
dma_addr_t req_frames_desc_phys;
@@ -1282,9 +1286,14 @@ struct fusion_context {
struct dma_pool *sg_dma_pool;
struct dma_pool *sense_dma_pool;
+ u8 *sense;
+ dma_addr_t sense_phys_addr;
+
dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
+ struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
struct dma_pool *reply_frames_desc_pool;
+ struct dma_pool *reply_frames_desc_pool_align;
u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
@@ -1318,9 +1327,13 @@ struct fusion_context {
u8 fast_path_io;
struct LD_LOAD_BALANCE_INFO *load_balance_info;
u32 load_balance_info_pages;
- LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
- u8 adapter_type;
+ LD_SPAN_INFO *log_to_span;
+ u32 log_to_span_pages;
struct LD_STREAM_DETECT **stream_detect_by_ld;
+ dma_addr_t ioc_init_request_phys;
+ struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
+ struct megasas_cmd *ioc_init_cmd;
+
};
union desc_value {
diff --git a/drivers/scsi/mesh.h b/drivers/scsi/mesh.h
index 4fdb81fa55e2..ee53c05ace95 100644
--- a/drivers/scsi/mesh.h
+++ b/drivers/scsi/mesh.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* mesh.h: definitions for the driver for the MESH SCSI bus adaptor
* (Macintosh Enhanced SCSI Hardware) found on Power Macintosh computers.
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
index b7643f596c1e..84fb3fbdb0ca 100644
--- a/drivers/scsi/mpt3sas/Makefile
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# mpt3sas makefile
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
mpt3sas-y += mpt3sas_base.o \
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index a9a659fc2812..b015c30d2c32 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2015 Avago Technologies. All rights reserved.
*
@@ -8,7 +9,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.42
+ * mpi2.h Version: 02.00.48
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -103,6 +104,16 @@
* 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT.
* 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT
* 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT
+ * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines
+ * to be unique within first 32 characters.
+ * Removed AHCI support.
+ * Removed SOP support.
+ * Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -142,7 +153,7 @@
#define MPI2_VERSION_02_06 (0x0206)
/*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x2A)
+#define MPI2_HEADER_VERSION_UNIT (0x30)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -249,6 +260,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
+/* Defines for V7A/V7R HostDiagnostic Register */
+#define MPI26_DIAG_BOOT_DEVICE_SEL_64FLASH (0x00000000)
+#define MPI26_DIAG_BOOT_DEVICE_SEL_64HCDW (0x00000800)
+#define MPI26_DIAG_BOOT_DEVICE_SEL_32FLASH (0x00001000)
+#define MPI26_DIAG_BOOT_DEVICE_SEL_32HCDW (0x00001800)
+
#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
#define MPI2_DIAG_HCB_MODE (0x00000100)
@@ -367,6 +384,7 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
+#define MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED (0x10)
#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
@@ -425,6 +443,13 @@ typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
Mpi25FastPathSCSIIORequestDescriptor_t,
*pMpi25FastPathSCSIIORequestDescriptor_t;
+/*PCIe Encapsulated Request Descriptor */
+typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+ MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR,
+ *PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR,
+ Mpi26PCIeEncapsulatedRequestDescriptor_t,
+ *pMpi26PCIeEncapsulatedRequestDescriptor_t;
+
/*union of Request Descriptors */
typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
@@ -433,6 +458,7 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
+ MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated;
U64 Words;
} MPI2_REQUEST_DESCRIPTOR_UNION,
*PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
@@ -450,6 +476,7 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
* Atomic SCSI Target Request Descriptor
* Atomic RAID Accelerator Request Descriptor
* Atomic Fast Path SCSI IO Request Descriptor
+ * Atomic PCIe Encapsulated Request Descriptor
*/
/*Atomic Request Descriptor */
@@ -487,6 +514,7 @@ typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR {
#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
+#define MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS (0x08)
#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
/*values for marking a reply descriptor as unused */
@@ -565,6 +593,13 @@ typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
Mpi25FastPathSCSIIOSuccessReplyDescriptor_t,
*pMpi25FastPathSCSIIOSuccessReplyDescriptor_t;
+/*PCIe Encapsulated Success Reply Descriptor */
+typedef MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+ MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t,
+ *pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t;
+
/*union of Reply Descriptors */
typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
@@ -574,6 +609,8 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
+ MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR
+ PCIeEncapsulatedSuccess;
U64 Words;
} MPI2_REPLY_DESCRIPTORS_UNION,
*PTR_MPI2_REPLY_DESCRIPTORS_UNION,
@@ -616,6 +653,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
+#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33)
#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
@@ -1162,6 +1200,8 @@ typedef union _MPI25_SGE_IO_UNION {
#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
/*Data Location Address Space */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index fa61baf7c74d..ee117106d0f7 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2015 Avago Technologies. All rights reserved.
*
@@ -6,7 +7,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.35
+ * mpi2_cnfg.h Version: 02.00.40
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -189,6 +190,35 @@
* MPI2_CONFIG_PAGE_BIOS_1.
* 08-25-15 02.00.34 Bumped Header Version.
* 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4.
+ * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added Link field to PCIe Link Pages
+ * Added EnclosureLevel and ConnectorName to PCIe
+ * Device Page 0.
+ * Added define for PCIE IoUnit page 1 max rate shift.
+ * Added comment for reserved ExtPageTypes.
+ * Added SAS 4 22.5 gbs speed support.
+ * Added PCIe 4 16.0 GT/sec speec support.
+ * Removed AHCI support.
+ * Removed SOP support.
+ * Added NegotiatedLinkRate and NegotiatedPortWidth to
+ * PCIe device page 0.
+ * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines
+ * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types.
+ * Changed declaration of ConnectorName in PCIe DevicePage0
+ * to match SAS DevicePage 0.
+ * Added SATADeviceWaitTime to IO Unit Page 11.
+ * Added MPI26_MFGPAGE_DEVID_SAS4008
+ * Added x16 PCIe width to IO Unit Page 7
+ * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1
+ * phy data.
+ * Added InitStatus to PCIe IO Unit Page 1 header.
+ * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines.
+ * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and
+ * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats.
+ * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN.
+ * Added ChassisSlot field to SAS Enclosure Page 0.
+ * Added ChassisSlot Valid bit (bit 5) to the Flags field
+ * in SAS Enclosure Page 0.
* --------------------------------------------------------------------------
*/
@@ -272,6 +302,10 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
+#define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B)
+#define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C)
+#define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D)
+#define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E)
/*****************************************************************************
@@ -339,6 +373,12 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+/*Enclosure PageAddress format */
+#define MPI26_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI26_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI26_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
/*RAID Configuration PageAddress format */
#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
@@ -365,6 +405,33 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
+/*PCIe Switch PageAddress format */
+#define MPI26_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000)
+#define MPI26_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
+#define MPI26_PCIE_SWITCH_PGAD_FORM_HNDL_PORTNUM (0x10000000)
+#define MPI26_PCIE_SWITCH_EXPAND_PGAD_FORM_HNDL (0x20000000)
+
+#define MPI26_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00FF0000)
+#define MPI26_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16)
+
+
+/*PCIe Device PageAddress format */
+#define MPI26_PCIE_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+
+#define MPI26_PCIE_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+
+/*PCIe Link PageAddress format */
+#define MPI26_PCIE_LINK_PGAD_FORM_MASK (0xF0000000)
+#define MPI26_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000)
+#define MPI26_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000)
+
+#define MPI26_PCIE_DEVICE_PGAD_LINKNUM_MASK (0x000000FF)
+
+
+
/****************************************************************************
* Configuration messages
****************************************************************************/
@@ -484,6 +551,12 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD)
#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE)
#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF)
+#define MPI26_MFGPAGE_DEVID_SAS3716 (0x00D0)
+#define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1)
+#define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2)
+
+#define MPI26_MFGPAGE_DEVID_SAS4008 (0x00A1)
+
/*Manufacturing Page 0 */
@@ -726,6 +799,12 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088_A (0x0E)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_16i (0x0F)
+#define MPI2_MANPAGE7_PINOUT_SFF_8654_4i (0x10)
+#define MPI2_MANPAGE7_PINOUT_SFF_8654_8i (0x11)
+#define MPI2_MANPAGE7_PINOUT_SFF_8611_4i (0x12)
+#define MPI2_MANPAGE7_PINOUT_SFF_8611_8i (0x13)
/*defines for the Location field */
#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
@@ -736,6 +815,9 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+/*defines for the Slot field */
+#define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF)
+
/*
*Host code (drivers, BIOS, utilities, etc.) should leave this define set to
*one and check the value returned for NumPhys at runtime.
@@ -999,11 +1081,13 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X16 (0x10)
/*defines for IO Unit Page 7 PCIeSpeed field */
#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03)
/*defines for IO Unit Page 7 ProcessorState field */
#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
@@ -1970,6 +2054,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B)
+#define MPI26_SAS_NEG_LINK_RATE_22_5 (0x0C)
/*values for AttachedPhyInfo fields */
@@ -2037,12 +2122,14 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0)
+#define MPI26_SAS_PRATE_MAX_RATE_22_5 (0xC0)
#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B)
+#define MPI26_SAS_PRATE_MIN_RATE_22_5 (0x0C)
/*values for SAS HwLinkRate fields */
@@ -2051,11 +2138,13 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
+#define MPI26_SAS_HWRATE_MAX_RATE_22_5 (0xC0)
#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B)
+#define MPI26_SAS_HWRATE_MIN_RATE_22_5 (0x0C)
@@ -2240,11 +2329,13 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0)
+#define MPI26_SASIOUNIT1_MAX_RATE_22_5 (0xC0)
#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B)
+#define MPI26_SASIOUNIT1_MIN_RATE_22_5 (0x0C)
/*see mpi2_sas.h for values for
*SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
@@ -3158,37 +3249,29 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 {
/*SAS Enclosure Page 0 */
typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
- MPI2_CONFIG_EXTENDED_PAGE_HEADER
- Header; /*0x00 */
- U32
- Reserved1; /*0x08 */
- U64
- EnclosureLogicalID; /*0x0C */
- U16
- Flags; /*0x14 */
- U16
- EnclosureHandle; /*0x16 */
- U16
- NumSlots; /*0x18 */
- U16
- StartSlot; /*0x1A */
- U8
- Reserved2; /*0x1C */
- U8
- EnclosureLevel; /*0x1D */
- U16
- SEPDevHandle; /*0x1E */
- U32
- Reserved3; /*0x20 */
- U32
- Reserved4; /*0x24 */
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U64 EnclosureLogicalID; /*0x0C */
+ U16 Flags; /*0x14 */
+ U16 EnclosureHandle; /*0x16 */
+ U16 NumSlots; /*0x18 */
+ U16 StartSlot; /*0x1A */
+ U8 ChassisSlot; /*0x1C */
+ U8 EnclosureLeve; /*0x1D */
+ U16 SEPDevHandle; /*0x1E */
+ U32 Reserved3; /*0x20 */
+ U32 Reserved4; /*0x24 */
} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
*PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
- Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
+ Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t,
+ MPI26_CONFIG_PAGE_ENCLOSURE_0,
+ *PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0,
+ Mpi26EnclosurePage0_t, *pMpi26EnclosurePage0_t;
#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
/*values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
@@ -3198,6 +3281,18 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+#define MPI26_ENCLOSURE0_PAGEVERSION (0x04)
+
+/*Values for Enclosure Page 0 Flags field */
+#define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
+#define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
+#define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI26_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI26_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI26_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+#define MPI26_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+#define MPI26_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+#define MPI26_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
/****************************************************************************
* Log Config Page
@@ -3497,4 +3592,422 @@ typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
/*PageVersion should be provided by product-specific code */
+
+
+/****************************************************************************
+* values for fields used by several types of PCIe Config Pages
+****************************************************************************/
+
+/*values for NegotiatedLinkRates fields */
+#define MPI26_PCIE_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
+/*link rates used for Negotiated Physical Link Rate */
+#define MPI26_PCIE_NEG_LINK_RATE_UNKNOWN (0x00)
+#define MPI26_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI26_PCIE_NEG_LINK_RATE_2_5 (0x02)
+#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03)
+#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04)
+#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05)
+
+
+/****************************************************************************
+* PCIe IO Unit Config Pages (MPI v2.6 and later)
+****************************************************************************/
+
+/*PCIe IO Unit Page 0 */
+
+typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA {
+ U8 Link; /*0x00 */
+ U8 LinkFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 NegotiatedLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo;/*0x04 */
+ U16 AttachedDevHandle; /*0x08 */
+ U16 ControllerDevHandle; /*0x0A */
+ U32 EnumerationStatus; /*0x0C */
+ U32 Reserved1; /*0x10 */
+} MPI26_PCIE_IO_UNIT0_PHY_DATA,
+ *PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA,
+ Mpi26PCIeIOUnit0PhyData_t, *pMpi26PCIeIOUnit0PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI26_PCIE_IOUNIT0_PHY_MAX
+#define MPI26_PCIE_IOUNIT0_PHY_MAX (1)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 NumPhys; /*0x0C */
+ U8 InitStatus; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI26_PCIE_IO_UNIT0_PHY_DATA
+ PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /*0x10 */
+} MPI26_CONFIG_PAGE_PIOUNIT_0,
+ *PTR_MPI26_CONFIG_PAGE_PIOUNIT_0,
+ Mpi26PCIeIOUnitPage0_t, *pMpi26PCIeIOUnitPage0_t;
+
+#define MPI26_PCIEIOUNITPAGE0_PAGEVERSION (0x00)
+
+/*values for PCIe IO Unit Page 0 LinkFlags */
+#define MPI26_PCIEIOUNIT0_LINKFLAGS_ENUMERATION_IN_PROGRESS (0x08)
+
+/*values for PCIe IO Unit Page 0 PhyFlags */
+#define MPI26_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+
+/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
+ *values
+ */
+
+/*values for PCIe IO Unit Page 0 EnumerationStatus */
+#define MPI26_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000)
+#define MPI26_PCIEIOUNIT0_ES_MAX_DEVICES_EXCEEDED (0x20000000)
+
+
+/*PCIe IO Unit Page 1 */
+
+typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA {
+ U8 Link; /*0x00 */
+ U8 LinkFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 MaxMinLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo; /*0x04 */
+ U32 Reserved1; /*0x08 */
+} MPI26_PCIE_IO_UNIT1_PHY_DATA,
+ *PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA,
+ Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t;
+
+/*values for LinkFlags */
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS (0x00)
+#define MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS (0x01)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI26_PCIE_IOUNIT1_PHY_MAX
+#define MPI26_PCIE_IOUNIT1_PHY_MAX (1)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16 ControlFlags; /*0x08 */
+ U16 Reserved; /*0x0A */
+ U16 AdditionalControlFlags; /*0x0C */
+ U16 NVMeMaxQueueDepth; /*0x0E */
+ U8 NumPhys; /*0x10 */
+ U8 Reserved1; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ MPI26_PCIE_IO_UNIT1_PHY_DATA
+ PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */
+} MPI26_CONFIG_PAGE_PIOUNIT_1,
+ *PTR_MPI26_CONFIG_PAGE_PIOUNIT_1,
+ Mpi26PCIeIOUnitPage1_t, *pMpi26PCIeIOUnitPage1_t;
+
+#define MPI26_PCIEIOUNITPAGE1_PAGEVERSION (0x00)
+
+/*values for PCIe IO Unit Page 1 PhyFlags */
+#define MPI26_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI26_PCIEIOUNIT1_PHYFLAGS_ENDPOINT_ONLY (0x01)
+
+/*values for PCIe IO Unit Page 1 MaxMinLinkRate */
+#define MPI26_PCIEIOUNIT1_MAX_RATE_MASK (0xF0)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_SHIFT (4)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_2_5 (0x20)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
+
+/*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
+ *values
+ */
+
+
+/****************************************************************************
+* PCIe Switch Config Pages (MPI v2.6 and later)
+****************************************************************************/
+
+/*PCIe Switch Page 0 */
+
+typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 PhysicalPort; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 DevHandle; /*0x0C */
+ U16 ParentDevHandle; /*0x0E */
+ U8 NumPorts; /*0x10 */
+ U8 PCIeLevel; /*0x11 */
+ U16 Reserved3; /*0x12 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U32 Reserved6; /*0x1C */
+} MPI26_CONFIG_PAGE_PSWITCH_0, *PTR_MPI26_CONFIG_PAGE_PSWITCH_0,
+ Mpi26PCIeSwitchPage0_t, *pMpi26PCIeSwitchPage0_t;
+
+#define MPI26_PCIESWITCH0_PAGEVERSION (0x00)
+
+
+/*PCIe Switch Page 1 */
+
+typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 PhysicalPort; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U8 NumPorts; /*0x0C */
+ U8 PortNum; /*0x0D */
+ U16 AttachedDevHandle; /*0x0E */
+ U16 SwitchDevHandle; /*0x10 */
+ U8 NegotiatedPortWidth; /*0x12 */
+ U8 NegotiatedLinkRate; /*0x13 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+} MPI26_CONFIG_PAGE_PSWITCH_1, *PTR_MPI26_CONFIG_PAGE_PSWITCH_1,
+ Mpi26PCIeSwitchPage1_t, *pMpi26PCIeSwitchPage1_t;
+
+#define MPI26_PCIESWITCH1_PAGEVERSION (0x00)
+
+/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/****************************************************************************
+* PCIe Device Config Pages (MPI v2.6 and later)
+****************************************************************************/
+
+/*PCIe Device Page 0 */
+
+typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16 Slot; /*0x08 */
+ U16 EnclosureHandle; /*0x0A */
+ U64 WWID; /*0x0C */
+ U16 ParentDevHandle; /*0x14 */
+ U8 PortNum; /*0x16 */
+ U8 AccessStatus; /*0x17 */
+ U16 DevHandle; /*0x18 */
+ U8 PhysicalPort; /*0x1A */
+ U8 Reserved1; /*0x1B */
+ U32 DeviceInfo; /*0x1C */
+ U32 Flags; /*0x20 */
+ U8 SupportedLinkRates; /*0x24 */
+ U8 MaxPortWidth; /*0x25 */
+ U8 NegotiatedPortWidth; /*0x26 */
+ U8 NegotiatedLinkRate; /*0x27 */
+ U8 EnclosureLevel; /*0x28 */
+ U8 Reserved2; /*0x29 */
+ U16 Reserved3; /*0x2A */
+ U8 ConnectorName[4]; /*0x2C */
+ U32 Reserved4; /*0x30 */
+ U32 Reserved5; /*0x34 */
+} MPI26_CONFIG_PAGE_PCIEDEV_0, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_0,
+ Mpi26PCIeDevicePage0_t, *pMpi26PCIeDevicePage0_t;
+
+#define MPI26_PCIEDEVICE0_PAGEVERSION (0x01)
+
+/*values for PCIe Device Page 0 AccessStatus field */
+#define MPI26_PCIEDEV0_ASTATUS_NO_ERRORS (0x00)
+#define MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION (0x04)
+#define MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED (0x02)
+#define MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED (0x07)
+#define MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED (0x08)
+#define MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE (0x09)
+#define MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED (0x0A)
+#define MPI26_PCIEDEV0_ASTATUS_UNKNOWN (0x10)
+
+#define MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT (0x30)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x31)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED (0x32)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED (0x33)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED (0x34)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED (0x35)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x36)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT (0x37)
+#define MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS (0x38)
+
+#define MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX (0x3F)
+
+/*see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo
+ *field
+ */
+
+/*values for PCIe Device Page 0 Flags field */
+#define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
+#define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x4000)
+#define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x2000)
+#define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x0400)
+#define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x0200)
+#define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+#define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x0080)
+#define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x0040)
+#define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x0020)
+#define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x0010)
+#define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x0002)
+#define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x0001)
+
+/* values for PCIe Device Page 0 SupportedLinkRates field */
+#define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08)
+#define MPI26_PCIEDEV0_LINK_RATE_8_0_SUPPORTED (0x04)
+#define MPI26_PCIEDEV0_LINK_RATE_5_0_SUPPORTED (0x02)
+#define MPI26_PCIEDEV0_LINK_RATE_2_5_SUPPORTED (0x01)
+
+/*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/*PCIe Device Page 2 */
+
+typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x08 */
+ U16 Reserved1; /*0x0A */
+ U32 MaximumDataTransferSize;/*0x0C */
+ U32 Capabilities; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
+ Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
+
+#define MPI26_PCIEDEVICE2_PAGEVERSION (0x00)
+
+/*defines for PCIe Device Page 2 Capabilities field */
+#define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004)
+#define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002)
+#define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001)
+
+
+/****************************************************************************
+* PCIe Link Config Pages (MPI v2.6 and later)
+****************************************************************************/
+
+/*PCIe Link Page 1 */
+
+typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 Link; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 CorrectableErrorCount; /*0x0C */
+ U16 NonFatalErrorCount; /*0x10 */
+ U16 Reserved3; /*0x12 */
+ U16 FatalErrorCount; /*0x14 */
+ U16 Reserved4; /*0x16 */
+} MPI26_CONFIG_PAGE_PCIELINK_1, *PTR_MPI26_CONFIG_PAGE_PCIELINK_1,
+ Mpi26PcieLinkPage1_t, *pMpi26PcieLinkPage1_t;
+
+#define MPI26_PCIELINK1_PAGEVERSION (0x00)
+
+/*PCIe Link Page 2 */
+
+typedef struct _MPI26_PCIELINK2_LINK_EVENT {
+ U8 LinkEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 LinkEventInfo; /*0x04 */
+} MPI26_PCIELINK2_LINK_EVENT, *PTR_MPI26_PCIELINK2_LINK_EVENT,
+ Mpi26PcieLink2LinkEvent_t, *pMpi26PcieLink2LinkEvent_t;
+
+/*use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLinkEvents at runtime.
+ */
+#ifndef MPI26_PCIELINK2_LINK_EVENT_MAX
+#define MPI26_PCIELINK2_LINK_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 Link; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U8 NumLinkEvents; /*0x0C */
+ U8 Reserved3; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ MPI26_PCIELINK2_LINK_EVENT
+ LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /*0x10 */
+} MPI26_CONFIG_PAGE_PCIELINK_2, *PTR_MPI26_CONFIG_PAGE_PCIELINK_2,
+ Mpi26PcieLinkPage2_t, *pMpi26PcieLinkPage2_t;
+
+#define MPI26_PCIELINK2_PAGEVERSION (0x00)
+
+/*PCIe Link Page 3 */
+
+typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG {
+ U8 LinkEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 CounterType; /*0x04 */
+ U8 ThresholdWindow; /*0x05 */
+ U8 TimeUnits; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U32 EventThreshold; /*0x08 */
+ U16 ThresholdFlags; /*0x0C */
+ U16 Reserved4; /*0x0E */
+} MPI26_PCIELINK3_LINK_EVENT_CONFIG, *PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG,
+ Mpi26PcieLink3LinkEventConfig_t, *pMpi26PcieLink3LinkEventConfig_t;
+
+/*values for LinkEventCode field */
+#define MPI26_PCIELINK3_EVTCODE_NO_EVENT (0x00)
+#define MPI26_PCIELINK3_EVTCODE_CORRECTABLE_ERROR_RECEIVED (0x01)
+#define MPI26_PCIELINK3_EVTCODE_NON_FATAL_ERROR_RECEIVED (0x02)
+#define MPI26_PCIELINK3_EVTCODE_FATAL_ERROR_RECEIVED (0x03)
+#define MPI26_PCIELINK3_EVTCODE_DATA_LINK_ERROR_DETECTED (0x04)
+#define MPI26_PCIELINK3_EVTCODE_TRANSACTION_LAYER_ERROR_DETECTED (0x05)
+#define MPI26_PCIELINK3_EVTCODE_TLP_ECRC_ERROR_DETECTED (0x06)
+#define MPI26_PCIELINK3_EVTCODE_POISONED_TLP (0x07)
+#define MPI26_PCIELINK3_EVTCODE_RECEIVED_NAK_DLLP (0x08)
+#define MPI26_PCIELINK3_EVTCODE_SENT_NAK_DLLP (0x09)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_RECOVERY_STATE (0x0A)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_RXL0S_STATE (0x0B)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_TXL0S_STATE (0x0C)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_L1_STATE (0x0D)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_DISABLED_STATE (0x0E)
+#define MPI26_PCIELINK3_EVTCODE_LTSSM_HOT_RESET_STATE (0x0F)
+#define MPI26_PCIELINK3_EVTCODE_SYSTEM_ERROR (0x10)
+#define MPI26_PCIELINK3_EVTCODE_DECODE_ERROR (0x11)
+#define MPI26_PCIELINK3_EVTCODE_DISPARITY_ERROR (0x12)
+
+/*values for the CounterType field */
+#define MPI26_PCIELINK3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI26_PCIELINK3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI26_PCIELINK3_COUNTER_TYPE_PEAK_VALUE (0x02)
+
+/*values for the TimeUnits field */
+#define MPI26_PCIELINK3_TM_UNITS_10_MICROSECONDS (0x00)
+#define MPI26_PCIELINK3_TM_UNITS_100_MICROSECONDS (0x01)
+#define MPI26_PCIELINK3_TM_UNITS_1_MILLISECOND (0x02)
+#define MPI26_PCIELINK3_TM_UNITS_10_MILLISECONDS (0x03)
+
+/*values for the ThresholdFlags field */
+#define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLinkEvents at runtime.
+ */
+#ifndef MPI26_PCIELINK3_LINK_EVENT_MAX
+#define MPI26_PCIELINK3_LINK_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 Link; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U8 NumLinkEvents; /*0x0C */
+ U8 Reserved3; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ MPI26_PCIELINK3_LINK_EVENT_CONFIG
+ LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /*0x10 */
+} MPI26_CONFIG_PAGE_PCIELINK_3, *PTR_MPI26_CONFIG_PAGE_PCIELINK_3,
+ Mpi26PcieLinkPage3_t, *pMpi26PcieLinkPage3_t;
+
+#define MPI26_PCIELINK3_PAGEVERSION (0x00)
+
+
#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index bba56b61d36c..948a3ba682d7 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2015 Avago Technologies. All rights reserved.
*
@@ -6,7 +7,7 @@
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.20
+ * mpi2_init.h Version: 02.00.21
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -54,6 +55,8 @@
* 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset.
* 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message.
* 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message.
+ * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to
+ * be unique within first 32 characters.
* --------------------------------------------------------------------------
*/
@@ -373,6 +376,11 @@ typedef struct _MPI2_SCSI_IO_REPLY {
} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
+/*SCSI IO Reply MsgFlags bits */
+#define MPI26_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01)
+#define MPI26_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x02)
+#define MPI26_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x04)
+
/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
#define MPI2_SCSI_STATUS_GOOD (0x00)
@@ -446,11 +454,13 @@ typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST {
/*MsgFlags bits */
#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
+#define MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE (0x00)
#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+#define MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE (0x18)
/*SCSI Task Management Reply Message */
typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY {
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index af4be403582e..cc2aff7aa67b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2015 Avago Technologies. All rights reserved.
*
@@ -6,7 +7,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.27
+ * mpi2_ioc.h Version: 02.00.32
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -140,7 +141,32 @@
* Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
* MPI26_FW_HEADER_PID_FAMILY_3516_SAS.
* Added MPI26_CTRL_OP_SHUTDOWN.
- * 08-25-15 02.00.27 Added IC ARCH Class based signature defines
+ * 08-25-15 02.00.27 Added IC ARCH Class based signature defines.
+ * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event.
+ * Added ConigurationFlags field to IOCInit message to
+ * support NVMe SGL format control.
+ * Added PCIe SRIOV support.
+ * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support.
+ * Added PCIe 4 16.0 GT/sec speec support.
+ * Removed AHCI support.
+ * Removed SOP support.
+ * 07-01-16 02.00.29 Added Archclass for 4008 product.
+ * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED
+ * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload
+ * Request Message.
+ * Added new defines for the ImageType field of FWUpload
+ * Request Message.
+ * Added new values for the RegionType field in the Layout
+ * Data sections of the FLASH Layout Extended Image Data.
+ * Added new defines for the ReasonCode field of
+ * Active Cable Exception Event.
+ * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and
+ * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE.
+ * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and
+ * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR.
+ * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP.
+ * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related
+ * defines for the ReasonCode field.
* --------------------------------------------------------------------------
*/
@@ -212,6 +238,9 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
+/*ConfigurationFlags */
+#define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001)
+
/*minimum depth for a Reply Descriptor Post Queue */
#define MPI2_RDPQ_DEPTH_MIN (16)
@@ -299,6 +328,10 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
U16 MinDevHandle; /*0x3C */
U8 CurrentHostPageSize; /* 0x3E */
U8 Reserved4; /* 0x3F */
+ U8 SGEModifierMask; /*0x40 */
+ U8 SGEModifierValue; /*0x41 */
+ U8 SGEModifierShift; /*0x42 */
+ U8 Reserved5; /*0x43 */
} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
@@ -315,6 +348,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
/*IOCExceptions */
+#define MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0400)
#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200)
#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
@@ -335,6 +369,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
@@ -353,6 +388,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
/*ProtocolFlags */
+#define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008)
#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
@@ -402,6 +438,8 @@ typedef struct _MPI2_PORT_FACTS_REPLY {
#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
+#define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40)
+
/****************************************************************************
* PortEnable message
@@ -508,6 +546,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
+#define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D)
#define MPI2_EVENT_IR_VOLUME (0x001E)
#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
@@ -520,7 +559,12 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
#define MPI2_EVENT_HOST_MESSAGE (0x0028)
#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
+#define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030)
+#define MPI2_EVENT_PCIE_ENUMERATION (0x0031)
+#define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032)
+#define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033)
#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034)
+#define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035)
#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
@@ -617,11 +661,20 @@ typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT {
U8 ReasonCode; /* 0x04 */
U8 ReceptacleID; /* 0x05 */
U16 Reserved1; /* 0x06 */
-} MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+} MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+ *PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+ Mpi25EventDataActiveCableExcept_t,
+ *pMpi25EventDataActiveCableExcept_t,
+ MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
*PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
Mpi26EventDataActiveCableExcept_t,
*pMpi26EventDataActiveCableExcept_t;
+/*MPI2.5 defines for the ReasonCode field */
+#define MPI25_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
+#define MPI25_EVENT_ACTIVE_CABLE_PRESENT (0x01)
+#define MPI25_EVENT_ACTIVE_CABLE_DEGRADED (0x02)
+
/* defines for ReasonCode field */
#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01)
@@ -957,6 +1010,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST {
#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B)
+#define MPI26_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0C)
/*values for the PhyStatus field */
#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
@@ -982,12 +1036,37 @@ typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE {
} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
*PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
Mpi2EventDataSasEnclDevStatusChange_t,
- *pMpi2EventDataSasEnclDevStatusChange_t;
+ *pMpi2EventDataSasEnclDevStatusChange_t,
+ MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE,
+ *PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE,
+ Mpi26EventDataEnclDevStatusChange_t,
+ *pMpi26EventDataEnclDevStatusChange_t;
/*SAS Enclosure Device Status Change event ReasonCode values */
#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
+/*Enclosure Device Status Change event ReasonCode values */
+#define MPI26_EVENT_ENCL_RC_ADDED (0x01)
+#define MPI26_EVENT_ENCL_RC_NOT_RESPONDING (0x02)
+
+
+typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR {
+ U16 DevHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U32 Reserved1[2]; /*0x04 */
+ U64 SASAddress; /*0x0C */
+ U32 Reserved2[2]; /*0x14 */
+} MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR,
+ *PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR,
+ Mpi25EventDataSasDeviceDiscoveryError_t,
+ *pMpi25EventDataSasDeviceDiscoveryError_t;
+
+/*SAS Device Discovery Error Event data ReasonCode values */
+#define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01)
+#define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02)
+
/*SAS PHY Counter Event data */
typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
@@ -1073,6 +1152,174 @@ typedef struct _MPI2_EVENT_DATA_HBD_PHY {
/*values for the DescriptorType field */
#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+/*PCIe Device Status Change Event data (MPI v2.6 and later) */
+
+typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE {
+ U16 TaskTag; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U8 ASC; /*0x04 */
+ U8 ASCQ; /*0x05 */
+ U16 DevHandle; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ U64 WWID; /*0x0C */
+ U8 LUN[8]; /*0x14 */
+} MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE,
+ *PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE,
+ Mpi26EventDataPCIeDeviceStatusChange_t,
+ *pMpi26EventDataPCIeDeviceStatusChange_t;
+
+/*PCIe Device Status Change Event data ReasonCode values */
+#define MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA (0x05)
+#define MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED (0x07)
+#define MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+#define MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+#define MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+#define MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+#define MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+#define MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
+#define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
+#define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10)
+
+
+/*PCIe Enumeration Event data (MPI v2.6 and later) */
+
+typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION {
+ U8 Flags; /*0x00 */
+ U8 ReasonCode; /*0x01 */
+ U8 PhysicalPort; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 EnumerationStatus; /*0x04 */
+} MPI26_EVENT_DATA_PCIE_ENUMERATION,
+ *PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION,
+ Mpi26EventDataPCIeEnumeration_t,
+ *pMpi26EventDataPCIeEnumeration_t;
+
+/*PCIe Enumeration Event data Flags values */
+#define MPI26_EVENT_PCIE_ENUM_DEVICE_CHANGE (0x02)
+#define MPI26_EVENT_PCIE_ENUM_IN_PROGRESS (0x01)
+
+/*PCIe Enumeration Event data ReasonCode values */
+#define MPI26_EVENT_PCIE_ENUM_RC_STARTED (0x01)
+#define MPI26_EVENT_PCIE_ENUM_RC_COMPLETED (0x02)
+
+/*PCIe Enumeration Event data EnumerationStatus values */
+#define MPI26_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
+#define MPI26_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
+
+
+/*PCIe Topology Change List Event data (MPI v2.6 and later) */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumEntries at runtime.
+ */
+#ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT
+#define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1)
+#endif
+
+typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY {
+ U16 AttachedDevHandle; /*0x00 */
+ U8 PortStatus; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U8 CurrentPortInfo; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U8 PreviousPortInfo; /*0x06 */
+ U8 Reserved3; /*0x07 */
+} MPI26_EVENT_PCIE_TOPO_PORT_ENTRY,
+ *PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY,
+ Mpi26EventPCIeTopoPortEntry_t,
+ *pMpi26EventPCIeTopoPortEntry_t;
+
+/*PCIe Topology Change List Event data PortStatus values */
+#define MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED (0x01)
+#define MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02)
+#define MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03)
+#define MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04)
+#define MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
+
+/*PCIe Topology Change List Event data defines for CurrentPortInfo and
+ *PreviousPortInfo
+ */
+#define MPI26_EVENT_PCIE_TOPO_PI_LANE_MASK (0xF0)
+#define MPI26_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
+#define MPI26_EVENT_PCIE_TOPO_PI_1_LANE (0x10)
+#define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20)
+#define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30)
+#define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40)
+
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04)
+#define MPI26_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05)
+
+typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST {
+ U16 EnclosureHandle; /*0x00 */
+ U16 SwitchDevHandle; /*0x02 */
+ U8 NumPorts; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U8 NumEntries; /*0x08 */
+ U8 StartPortNum; /*0x09 */
+ U8 SwitchStatus; /*0x0A */
+ U8 PhysicalPort; /*0x0B */
+ MPI26_EVENT_PCIE_TOPO_PORT_ENTRY
+ PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /*0x0C */
+} MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
+ *PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
+ Mpi26EventDataPCIeTopologyChangeList_t,
+ *pMpi26EventDataPCIeTopologyChangeList_t;
+
+/*PCIe Topology Change List Event data SwitchStatus values */
+#define MPI26_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
+#define MPI26_EVENT_PCIE_TOPO_SS_ADDED (0x01)
+#define MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
+#define MPI26_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
+#define MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+
+/*PCIe Link Counter Event data (MPI v2.6 and later) */
+
+typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 LinkEventCode; /*0x0C */
+ U8 LinkNum; /*0x0D */
+ U16 Reserved2; /*0x0E */
+ U32 LinkEventInfo; /*0x10 */
+ U8 CounterType; /*0x14 */
+ U8 ThresholdWindow; /*0x15 */
+ U8 TimeUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+ U32 EventThreshold; /*0x18 */
+ U16 ThresholdFlags; /*0x1C */
+ U16 Reserved4; /*0x1E */
+} MPI26_EVENT_DATA_PCIE_LINK_COUNTER,
+ *PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER,
+ Mpi26EventDataPcieLinkCounter_t, *pMpi26EventDataPcieLinkCounter_t;
+
+
+/*use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode
+ *field
+ */
+
+/*use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType
+ *field
+ */
+
+/*use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits
+ *field
+ */
+
+/*use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags
+ *field
+ */
+
/****************************************************************************
* EventAck message
****************************************************************************/
@@ -1190,6 +1437,14 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
+#define MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP (0x0D)
+#define MPI2_FW_DOWNLOAD_ITYPE_SBR (0x0E)
+#define MPI2_FW_DOWNLOAD_ITYPE_SBR_BACKUP (0x0F)
+#define MPI2_FW_DOWNLOAD_ITYPE_HIIM (0x10)
+#define MPI2_FW_DOWNLOAD_ITYPE_HIIA (0x11)
+#define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12)
+#define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13)
+#define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14)
#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
/*MPI v2.0 FWDownload TransactionContext Element */
@@ -1276,6 +1531,14 @@ typedef struct _MPI2_FW_UPLOAD_REQUEST {
#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D)
+#define MPI2_FW_UPLOAD_ITYPE_SBR (0x0E)
+#define MPI2_FW_UPLOAD_ITYPE_SBR_BACKUP (0x0F)
+#define MPI2_FW_UPLOAD_ITYPE_HIIM (0x10)
+#define MPI2_FW_UPLOAD_ITYPE_HIIA (0x11)
+#define MPI2_FW_UPLOAD_ITYPE_CTLR (0x12)
+#define MPI2_FW_UPLOAD_ITYPE_IMR_FIRMWARE (0x13)
+#define MPI2_FW_UPLOAD_ITYPE_MR_NVDATA (0x14)
+
/*MPI v2.0 FWUpload TransactionContext Element */
typedef struct _MPI2_FW_UPLOAD_TCSGE {
@@ -1394,10 +1657,13 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00)
#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01)
/* legacy (0x5AEAA55A) */
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02)
#define MPI26_FW_HEADER_SIGNATURE0 \
(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
#define MPI26_FW_HEADER_SIGNATURE0_3516 \
(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
+#define MPI26_FW_HEADER_SIGNATURE0_4008 \
+ (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
/*Signature1 field */
#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
@@ -1541,6 +1807,13 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
+#define MPI2_FLASH_REGION_SBR (0x0E)
+#define MPI2_FLASH_REGION_SBR_BACKUP (0x0F)
+#define MPI2_FLASH_REGION_HIIM (0x10)
+#define MPI2_FLASH_REGION_HIIA (0x11)
+#define MPI2_FLASH_REGION_CTLR (0x12)
+#define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13)
+#define MPI2_FLASH_REGION_MR_NVDATA (0x14)
/*ImageRevision */
#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
@@ -1825,6 +2098,8 @@ typedef struct _MPI26_IOUNIT_CONTROL_REQUEST {
#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17)
#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18)
#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19)
+#define MPI26_CTRL_OP_ENABLE_NVME_SGL_FORMAT (0x1A)
+#define MPI26_CTRL_OP_DISABLE_NVME_SGL_FORMAT (0x1B)
#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80)
/* values for the PrimFlags field */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
new file mode 100644
index 000000000000..f0281f943ec9
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012-2015 Avago Technologies. All rights reserved.
+ *
+ *
+ * Name: mpi2_pci.h
+ * Title: MPI PCIe Attached Devices structures and definitions.
+ * Creation Date: October 9, 2012
+ *
+ * mpi2_pci.h Version: 02.00.02
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 03-16-15 02.00.00 Initial version.
+ * 02-17-16 02.00.01 Removed AHCI support.
+ * Removed SOP support.
+ * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to
+ * NVME Encapsulated Request.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_PCI_H
+#define MPI2_PCI_H
+
+
+/*
+ *Values for the PCIe DeviceInfo field used in PCIe Device Status Change Event
+ *data and PCIe Configuration pages.
+ */
+#define MPI26_PCIE_DEVINFO_DIRECT_ATTACH (0x00000010)
+
+#define MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE (0x0000000F)
+#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000)
+#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001)
+#define MPI26_PCIE_DEVINFO_NVME (0x00000003)
+
+
+/****************************************************************************
+* NVMe Encapsulated message
+****************************************************************************/
+
+/*NVME Encapsulated Request Message */
+typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 EncapsulatedCommandLength; /*0x04 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+ U64 ErrorResponseBaseAddress; /*0x10 */
+ U16 ErrorResponseAllocationLength; /*0x18 */
+ U16 Flags; /*0x1A */
+ U32 DataLength; /*0x1C */
+ U8 NVMe_Command[4]; /*0x20 */
+
+} MPI26_NVME_ENCAPSULATED_REQUEST, *PTR_MPI26_NVME_ENCAPSULATED_REQUEST,
+ Mpi26NVMeEncapsulatedRequest_t, *pMpi26NVMeEncapsulatedRequest_t;
+
+/*defines for the Flags field */
+#define MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP (0x0020)
+/*Submission Queue Type*/
+#define MPI26_NVME_FLAGS_SUBMISSIONQ_MASK (0x0010)
+#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
+#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010)
+/*Error Response Address Space */
+#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR (0x000C)
+#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR (0x0000)
+#define MPI26_NVME_FLAGS_IOCPLB_RSP_ADDR (0x0008)
+#define MPI26_NVME_FLAGS_IOCPLBNTA_RSP_ADDR (0x000C)
+/*Data Direction*/
+#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003)
+#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000)
+#define MPI26_NVME_FLAGS_WRITE (0x0001)
+#define MPI26_NVME_FLAGS_READ (0x0002)
+#define MPI26_NVME_FLAGS_BIDIRECTIONAL (0x0003)
+
+
+/*NVMe Encapuslated Reply Message */
+typedef struct _MPI26_NVME_ENCAPSULATED_ERROR_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 EncapsulatedCommandLength; /*0x04 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U16 ErrorResponseCount; /*0x14 */
+ U16 Reserved4; /*0x16 */
+} MPI26_NVME_ENCAPSULATED_ERROR_REPLY,
+ *PTR_MPI26_NVME_ENCAPSULATED_ERROR_REPLY,
+ Mpi26NVMeEncapsulatedErrorReply_t,
+ *pMpi26NVMeEncapsulatedErrorReply_t;
+
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
index 1c0eeeeb5eaf..b9bb1c178f12 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2014 Avago Technologies. All rights reserved.
*
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
index c10c2c02a945..afa17ff246b4 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2015 Avago Technologies. All rights reserved.
*
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 5f9289a1166f..629296ee9236 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2014 Avago Technologies. All rights reserved.
*
@@ -6,7 +7,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.13
+ * mpi2_tool.h Version: 02.00.14
*
* Version History
* ---------------
@@ -35,6 +36,8 @@
* 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
* 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
* 11-18-14 02.00.13 Updated copyright information.
+ * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean
+ * Tool Request Message.
* --------------------------------------------------------------------------
*/
@@ -105,6 +108,16 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST {
#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000)
#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
+#define MPI2_TOOLBOX_CLEAN_SBR (0x00800000)
+#define MPI2_TOOLBOX_CLEAN_SBR_BACKUP (0x00400000)
+#define MPI2_TOOLBOX_CLEAN_HIIM (0x00200000)
+#define MPI2_TOOLBOX_CLEAN_HIIA (0x00100000)
+#define MPI2_TOOLBOX_CLEAN_CTLR (0x00080000)
+#define MPI2_TOOLBOX_CLEAN_IMR_FIRMWARE (0x00040000)
+#define MPI2_TOOLBOX_CLEAN_MR_NVDATA (0x00020000)
+#define MPI2_TOOLBOX_CLEAN_RESERVED_5_16 (0x0001FFE0)
+#define MPI2_TOOLBOX_CLEAN_ALL_BUT_MPB (0x00000010)
+#define MPI2_TOOLBOX_CLEAN_ENTIRE_FLASH (0x00000008)
#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
index 92a81abc2c31..36494439a419 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2014 Avago Technologies. All rights reserved.
*
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 87999905bca3..8027de465d47 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -59,6 +59,7 @@
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/kthread.h>
+#include <asm/page.h> /* To get host page size per arch */
#include <linux/aer.h>
@@ -105,7 +106,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
*
*/
static int
-_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
+_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
struct MPT3SAS_ADAPTER *ioc;
@@ -556,6 +557,11 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
func_str = "smp_passthru";
break;
+ case MPI2_FUNCTION_NVME_ENCAPSULATED:
+ frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
+ ioc->sge_size;
+ func_str = "nvme_encapsulated";
+ break;
default:
frame_sz = 32;
func_str = "unknown";
@@ -655,7 +661,27 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
desc = "Temperature Threshold";
break;
case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
- desc = "Active cable exception";
+ desc = "Cable Event";
+ break;
+ case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
+ desc = "PCIE Device Status Change";
+ break;
+ case MPI2_EVENT_PCIE_ENUMERATION:
+ {
+ Mpi26EventDataPCIeEnumeration_t *event_data =
+ (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
+ pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
+ (event_data->ReasonCode ==
+ MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->EnumerationStatus)
+ pr_info("enumeration_status(0x%08x)",
+ le32_to_cpu(event_data->EnumerationStatus));
+ pr_info("\n");
+ return;
+ }
+ case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ desc = "PCIE Topology Change List";
break;
}
@@ -984,7 +1010,9 @@ _base_interrupt(int irq, void *bus_id)
if (request_desript_type ==
MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
request_desript_type ==
- MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+ MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
+ request_desript_type ==
+ MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
cb_idx = _base_get_cb_idx(ioc, smid);
if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
(likely(mpt_callbacks[cb_idx] != NULL))) {
@@ -1347,6 +1375,433 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
/* IEEE format sgls */
/**
+ * _base_build_nvme_prp - This function is called for NVMe end devices to build
+ * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
+ * entry of the NVMe message (PRP1). If the data buffer is small enough to be
+ * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
+ * used to describe a larger data buffer. If the data buffer is too large to
+ * describe using the two PRP entriess inside the NVMe message, then PRP1
+ * describes the first data memory segment, and PRP2 contains a pointer to a PRP
+ * list located elsewhere in memory to describe the remaining data memory
+ * segments. The PRP list will be contiguous.
+
+ * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
+ * consists of a list of PRP entries to describe a number of noncontigous
+ * physical memory segments as a single memory buffer, just as a SGL does. Note
+ * however, that this function is only used by the IOCTL call, so the memory
+ * given will be guaranteed to be contiguous. There is no need to translate
+ * non-contiguous SGL into a PRP in this case. All PRPs will describe
+ * contiguous space that is one page size each.
+ *
+ * Each NVMe message contains two PRP entries. The first (PRP1) either contains
+ * a PRP list pointer or a PRP element, depending upon the command. PRP2
+ * contains the second PRP element if the memory being described fits within 2
+ * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
+ *
+ * A PRP list pointer contains the address of a PRP list, structured as a linear
+ * array of PRP entries. Each PRP entry in this list describes a segment of
+ * physical memory.
+ *
+ * Each 64-bit PRP entry comprises an address and an offset field. The address
+ * always points at the beginning of a 4KB physical memory page, and the offset
+ * describes where within that 4KB page the memory segment begins. Only the
+ * first element in a PRP list may contain a non-zero offest, implying that all
+ * memory segments following the first begin at the start of a 4KB page.
+ *
+ * Each PRP element normally describes 4KB of physical memory, with exceptions
+ * for the first and last elements in the list. If the memory being described
+ * by the list begins at a non-zero offset within the first 4KB page, then the
+ * first PRP element will contain a non-zero offset indicating where the region
+ * begins within the 4KB page. The last memory segment may end before the end
+ * of the 4KB segment, depending upon the overall size of the memory being
+ * described by the PRP list.
+ *
+ * Since PRP entries lack any indication of size, the overall data buffer length
+ * is used to determine where the end of the data memory buffer is located, and
+ * how many PRP entries are required to describe it.
+ *
+ * @ioc: per adapter object
+ * @smid: system request message index for getting asscociated SGL
+ * @nvme_encap_request: the NVMe request msg frame pointer
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Returns nothing.
+ */
+static void
+_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ int prp_size = NVME_PRP_SIZE;
+ __le64 *prp_entry, *prp1_entry, *prp2_entry;
+ __le64 *prp_page;
+ dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
+ u32 offset, entry_len;
+ u32 page_mask_result, page_mask;
+ size_t length;
+
+ /*
+ * Not all commands require a data transfer. If no data, just return
+ * without constructing any PRP.
+ */
+ if (!data_in_sz && !data_out_sz)
+ return;
+ /*
+ * Set pointers to PRP1 and PRP2, which are in the NVMe command.
+ * PRP1 is located at a 24 byte offset from the start of the NVMe
+ * command. Then set the current PRP entry pointer to PRP1.
+ */
+ prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
+ NVME_CMD_PRP1_OFFSET);
+ prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
+ NVME_CMD_PRP2_OFFSET);
+ prp_entry = prp1_entry;
+ /*
+ * For the PRP entries, use the specially allocated buffer of
+ * contiguous memory.
+ */
+ prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
+ prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
+
+ /*
+ * Check if we are within 1 entry of a page boundary we don't
+ * want our first entry to be a PRP List entry.
+ */
+ page_mask = ioc->page_size - 1;
+ page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
+ if (!page_mask_result) {
+ /* Bump up to next page boundary. */
+ prp_page = (__le64 *)((u8 *)prp_page + prp_size);
+ prp_page_dma = prp_page_dma + prp_size;
+ }
+
+ /*
+ * Set PRP physical pointer, which initially points to the current PRP
+ * DMA memory page.
+ */
+ prp_entry_dma = prp_page_dma;
+
+ /* Get physical address and length of the data buffer. */
+ if (data_in_sz) {
+ dma_addr = data_in_dma;
+ length = data_in_sz;
+ } else {
+ dma_addr = data_out_dma;
+ length = data_out_sz;
+ }
+
+ /* Loop while the length is not zero. */
+ while (length) {
+ /*
+ * Check if we need to put a list pointer here if we are at
+ * page boundary - prp_size (8 bytes).
+ */
+ page_mask_result = (prp_entry_dma + prp_size) & page_mask;
+ if (!page_mask_result) {
+ /*
+ * This is the last entry in a PRP List, so we need to
+ * put a PRP list pointer here. What this does is:
+ * - bump the current memory pointer to the next
+ * address, which will be the next full page.
+ * - set the PRP Entry to point to that page. This
+ * is now the PRP List pointer.
+ * - bump the PRP Entry pointer the start of the
+ * next page. Since all of this PRP memory is
+ * contiguous, no need to get a new page - it's
+ * just the next address.
+ */
+ prp_entry_dma++;
+ *prp_entry = cpu_to_le64(prp_entry_dma);
+ prp_entry++;
+ }
+
+ /* Need to handle if entry will be part of a page. */
+ offset = dma_addr & page_mask;
+ entry_len = ioc->page_size - offset;
+
+ if (prp_entry == prp1_entry) {
+ /*
+ * Must fill in the first PRP pointer (PRP1) before
+ * moving on.
+ */
+ *prp1_entry = cpu_to_le64(dma_addr);
+
+ /*
+ * Now point to the second PRP entry within the
+ * command (PRP2).
+ */
+ prp_entry = prp2_entry;
+ } else if (prp_entry == prp2_entry) {
+ /*
+ * Should the PRP2 entry be a PRP List pointer or just
+ * a regular PRP pointer? If there is more than one
+ * more page of data, must use a PRP List pointer.
+ */
+ if (length > ioc->page_size) {
+ /*
+ * PRP2 will contain a PRP List pointer because
+ * more PRP's are needed with this command. The
+ * list will start at the beginning of the
+ * contiguous buffer.
+ */
+ *prp2_entry = cpu_to_le64(prp_entry_dma);
+
+ /*
+ * The next PRP Entry will be the start of the
+ * first PRP List.
+ */
+ prp_entry = prp_page;
+ } else {
+ /*
+ * After this, the PRP Entries are complete.
+ * This command uses 2 PRP's and no PRP list.
+ */
+ *prp2_entry = cpu_to_le64(dma_addr);
+ }
+ } else {
+ /*
+ * Put entry in list and bump the addresses.
+ *
+ * After PRP1 and PRP2 are filled in, this will fill in
+ * all remaining PRP entries in a PRP List, one per
+ * each time through the loop.
+ */
+ *prp_entry = cpu_to_le64(dma_addr);
+ prp_entry++;
+ prp_entry_dma++;
+ }
+
+ /*
+ * Bump the phys address of the command's data buffer by the
+ * entry_len.
+ */
+ dma_addr += entry_len;
+
+ /* Decrement length accounting for last partial page. */
+ if (entry_len > length)
+ length = 0;
+ else
+ length -= entry_len;
+ }
+}
+
+/**
+ * base_make_prp_nvme -
+ * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
+ *
+ * @ioc: per adapter object
+ * @scmd: SCSI command from the mid-layer
+ * @mpi_request: mpi request
+ * @smid: msg Index
+ * @sge_count: scatter gather element count.
+ *
+ * Returns: true: PRPs are built
+ * false: IEEE SGLs needs to be built
+ */
+static void
+base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd,
+ Mpi25SCSIIORequest_t *mpi_request,
+ u16 smid, int sge_count)
+{
+ int sge_len, num_prp_in_chain = 0;
+ Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
+ __le64 *curr_buff;
+ dma_addr_t msg_dma, sge_addr, offset;
+ u32 page_mask, page_mask_result;
+ struct scatterlist *sg_scmd;
+ u32 first_prp_len;
+ int data_len = scsi_bufflen(scmd);
+ u32 nvme_pg_size;
+
+ nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
+ /*
+ * Nvme has a very convoluted prp format. One prp is required
+ * for each page or partial page. Driver need to split up OS sg_list
+ * entries if it is longer than one page or cross a page
+ * boundary. Driver also have to insert a PRP list pointer entry as
+ * the last entry in each physical page of the PRP list.
+ *
+ * NOTE: The first PRP "entry" is actually placed in the first
+ * SGL entry in the main message as IEEE 64 format. The 2nd
+ * entry in the main message is the chain element, and the rest
+ * of the PRP entries are built in the contiguous pcie buffer.
+ */
+ page_mask = nvme_pg_size - 1;
+
+ /*
+ * Native SGL is needed.
+ * Put a chain element in main message frame that points to the first
+ * chain buffer.
+ *
+ * NOTE: The ChainOffset field must be 0 when using a chain pointer to
+ * a native SGL.
+ */
+
+ /* Set main message chain element pointer */
+ main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
+ /*
+ * For NVMe the chain element needs to be the 2nd SG entry in the main
+ * message.
+ */
+ main_chain_element = (Mpi25IeeeSgeChain64_t *)
+ ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
+
+ /*
+ * For the PRP entries, use the specially allocated buffer of
+ * contiguous memory. Normal chain buffers can't be used
+ * because each chain buffer would need to be the size of an OS
+ * page (4k).
+ */
+ curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
+ msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
+
+ main_chain_element->Address = cpu_to_le64(msg_dma);
+ main_chain_element->NextChainOffset = 0;
+ main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
+ MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
+
+ /* Build first prp, sge need not to be page aligned*/
+ ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
+ sg_scmd = scsi_sglist(scmd);
+ sge_addr = sg_dma_address(sg_scmd);
+ sge_len = sg_dma_len(sg_scmd);
+
+ offset = sge_addr & page_mask;
+ first_prp_len = nvme_pg_size - offset;
+
+ ptr_first_sgl->Address = cpu_to_le64(sge_addr);
+ ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
+
+ data_len -= first_prp_len;
+
+ if (sge_len > first_prp_len) {
+ sge_addr += first_prp_len;
+ sge_len -= first_prp_len;
+ } else if (data_len && (sge_len == first_prp_len)) {
+ sg_scmd = sg_next(sg_scmd);
+ sge_addr = sg_dma_address(sg_scmd);
+ sge_len = sg_dma_len(sg_scmd);
+ }
+
+ for (;;) {
+ offset = sge_addr & page_mask;
+
+ /* Put PRP pointer due to page boundary*/
+ page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
+ if (unlikely(!page_mask_result)) {
+ scmd_printk(KERN_NOTICE,
+ scmd, "page boundary curr_buff: 0x%p\n",
+ curr_buff);
+ msg_dma += 8;
+ *curr_buff = cpu_to_le64(msg_dma);
+ curr_buff++;
+ num_prp_in_chain++;
+ }
+
+ *curr_buff = cpu_to_le64(sge_addr);
+ curr_buff++;
+ msg_dma += 8;
+ num_prp_in_chain++;
+
+ sge_addr += nvme_pg_size;
+ sge_len -= nvme_pg_size;
+ data_len -= nvme_pg_size;
+
+ if (data_len <= 0)
+ break;
+
+ if (sge_len > 0)
+ continue;
+
+ sg_scmd = sg_next(sg_scmd);
+ sge_addr = sg_dma_address(sg_scmd);
+ sge_len = sg_dma_len(sg_scmd);
+ }
+
+ main_chain_element->Length =
+ cpu_to_le32(num_prp_in_chain * sizeof(u64));
+ return;
+}
+
+static bool
+base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
+{
+ u32 data_length = 0;
+ struct scatterlist *sg_scmd;
+ bool build_prp = true;
+
+ data_length = scsi_bufflen(scmd);
+ sg_scmd = scsi_sglist(scmd);
+
+ /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
+ * we built IEEE SGL
+ */
+ if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
+ build_prp = false;
+
+ return build_prp;
+}
+
+/**
+ * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
+ * determine if the driver needs to build a native SGL. If so, that native
+ * SGL is built in the special contiguous buffers allocated especially for
+ * PCIe SGL creation. If the driver will not build a native SGL, return
+ * TRUE and a normal IEEE SGL will be built. Currently this routine
+ * supports NVMe.
+ * @ioc: per adapter object
+ * @mpi_request: mf request pointer
+ * @smid: system request message index
+ * @scmd: scsi command
+ * @pcie_device: points to the PCIe device's info
+ *
+ * Returns 0 if native SGL was built, 1 if no SGL was built
+ */
+static int
+_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
+ Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
+ struct _pcie_device *pcie_device)
+{
+ struct scatterlist *sg_scmd;
+ int sges_left;
+
+ /* Get the SG list pointer and info. */
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ if (sges_left < 0) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "scsi_dma_map failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return 1;
+ }
+
+ /* Check if we need to build a native SG list. */
+ if (base_is_prp_possible(ioc, pcie_device,
+ scmd, sges_left) == 0) {
+ /* We built a native SG list, just return. */
+ goto out;
+ }
+
+ /*
+ * Build native NVMe PRP.
+ */
+ base_make_prp_nvme(ioc, scmd, mpi_request,
+ smid, sges_left);
+
+ return 0;
+out:
+ scsi_dma_unmap(scmd);
+ return 1;
+}
+
+/**
* _base_add_sg_single_ieee - add sg element for IEEE format
* @paddr: virtual address for SGE
* @flags: SGE flags
@@ -1391,9 +1846,11 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
/**
* _base_build_sg_scmd - main sg creation routine
+ * pcie_device is unused here!
* @ioc: per adapter object
* @scmd: scsi command
* @smid: system request message index
+ * @unused: unused pcie_device pointer
* Context: none.
*
* The main routine that builds scatter gather table from a given
@@ -1403,7 +1860,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
*/
static int
_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
- struct scsi_cmnd *scmd, u16 smid)
+ struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
{
Mpi2SCSIIORequest_t *mpi_request;
dma_addr_t chain_dma;
@@ -1537,6 +1994,8 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
* @ioc: per adapter object
* @scmd: scsi command
* @smid: system request message index
+ * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
+ * constructed on need.
* Context: none.
*
* The main routine that builds scatter gather table from a given
@@ -1546,9 +2005,9 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
*/
static int
_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
- struct scsi_cmnd *scmd, u16 smid)
+ struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
{
- Mpi2SCSIIORequest_t *mpi_request;
+ Mpi25SCSIIORequest_t *mpi_request;
dma_addr_t chain_dma;
struct scatterlist *sg_scmd;
void *sg_local, *chain;
@@ -1571,6 +2030,13 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ /* Check if we need to build a native SG list. */
+ if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
+ smid, scmd, pcie_device) == 0)) {
+ /* We built a native SG list, just return. */
+ return 0;
+ }
+
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
@@ -1582,12 +2048,12 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sg_local = &mpi_request->SGL;
sges_in_segment = (ioc->request_sz -
- offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
+ offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
if (sges_left <= sges_in_segment)
goto fill_in_last_segment;
mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
- (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
+ (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
/* fill in main message segment when there is a chain following */
while (sges_in_segment > 1) {
@@ -1990,7 +2456,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_count, max_msix_vectors);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
- local_max_msix_vectors = 8;
+ local_max_msix_vectors = (reset_devices) ? 1 : 8;
else
local_max_msix_vectors = max_msix_vectors;
@@ -2267,6 +2733,32 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
+ * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns virt pointer to a PCIe SGL.
+ */
+void *
+mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
+}
+
+/**
+ * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns phys pointer to the address of the PCIe buffer.
+ */
+dma_addr_t
+mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
+}
+
+/**
* mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
* @ioc: per adapter object
* @phys_addr: lower 32 physical addr of the reply
@@ -2544,6 +3036,30 @@ _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
+ * _base_put_smid_nvme_encap - send NVMe encapsulated request to
+ * firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.Default.RequestFlags =
+ MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
+ descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.Default.SMID = cpu_to_le16(smid);
+ descriptor.Default.LMID = 0;
+ descriptor.Default.DescriptorTypeDependent = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
* _base_put_smid_default - Default, primarily used for config pages
* @ioc: per adapter object
* @smid: system request message index
@@ -2634,6 +3150,27 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
}
/**
+ * _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi26AtomicRequestDescriptor_t descriptor;
+ u32 *request = (u32 *)&descriptor;
+
+ descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
+ descriptor.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SMID = cpu_to_le16(smid);
+
+ writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
* _base_put_smid_default - Default, primarily used for config pages
* use Atomic Request Descriptor
* @ioc: per adapter object
@@ -2945,6 +3482,11 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
_base_display_OEMs_branding(ioc);
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
+ pr_info("%sNVMe", i ? "," : "");
+ i++;
+ }
+
pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@@ -3245,6 +3787,17 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
kfree(ioc->reply_post);
}
+ if (ioc->pcie_sgl_dma_pool) {
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
+ pci_pool_free(ioc->pcie_sgl_dma_pool,
+ ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
+ ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
+ }
+ if (ioc->pcie_sgl_dma_pool)
+ pci_pool_destroy(ioc->pcie_sgl_dma_pool);
+ }
+
if (ioc->config_page) {
dexitprintk(ioc, pr_info(MPT3SAS_FMT
"config_page(0x%p): free\n", ioc->name,
@@ -3286,7 +3839,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz;
u32 retry_sz;
- u16 max_request_credit;
+ u16 max_request_credit, nvme_blocks_needed;
unsigned short sg_tablesize;
u16 sge_size;
int i;
@@ -3308,6 +3861,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sg_tablesize = MPT3SAS_SG_DEPTH;
}
+ /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
+ if (reset_devices)
+ sg_tablesize = min_t(unsigned short, sg_tablesize,
+ MPT_KDUMP_MIN_PHYS_SEGMENTS);
+
if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
@@ -3340,7 +3898,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->internal_depth, facts->RequestCredit);
if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
max_request_credit = MAX_HBA_QUEUE_DEPTH;
- } else
+ } else if (reset_devices)
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
+ else
max_request_credit = min_t(u16, facts->RequestCredit,
MAX_HBA_QUEUE_DEPTH);
@@ -3622,7 +4183,52 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
"internal(0x%p): depth(%d), start smid(%d)\n",
ioc->name, ioc->internal,
ioc->internal_depth, ioc->internal_smid));
+ /*
+ * The number of NVMe page sized blocks needed is:
+ * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
+ * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
+ * that is placed in the main message frame. 8 is the size of each PRP
+ * entry or PRP list pointer entry. 8 is subtracted from page_size
+ * because of the PRP list pointer entry at the end of a page, so this
+ * is not counted as a PRP entry. The 1 added page is a round up.
+ *
+ * To avoid allocation failures due to the amount of memory that could
+ * be required for NVMe PRP's, only each set of NVMe blocks will be
+ * contiguous, so a new set is allocated for each possible I/O.
+ */
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
+ nvme_blocks_needed =
+ (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
+ nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
+ nvme_blocks_needed++;
+
+ sz = nvme_blocks_needed * ioc->page_size;
+ ioc->pcie_sgl_dma_pool =
+ pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
+ if (!ioc->pcie_sgl_dma_pool) {
+ pr_info(MPT3SAS_FMT
+ "PCIe SGL pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
+ pci_pool_alloc(ioc->pcie_sgl_dma_pool,
+ GFP_KERNEL,
+ &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
+ if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
+ pr_info(MPT3SAS_FMT
+ "PCIe SGL pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
+ "element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
+ total_sz += sz * ioc->scsiio_depth;
+ }
/* sense buffers, 4 byte align */
sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
@@ -4446,7 +5052,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
ioc->ir_firmware = 1;
if ((facts->IOCCapabilities &
- MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
+ MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
ioc->rdpq_array_capable = 1;
if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
ioc->atomic_desc_capable = 1;
@@ -4467,6 +5073,19 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
le16_to_cpu(mpi_reply.HighPriorityCredit);
facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
+ facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
+
+ /*
+ * Get the Page Size from IOC Facts. If it's 0, default to 4k.
+ */
+ ioc->page_size = 1 << facts->CurrentHostPageSize;
+ if (ioc->page_size == 1) {
+ pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
+ "default host page size to 4k\n", ioc->name);
+ ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
+ ioc->name, facts->CurrentHostPageSize));
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"hba queue depth(%d), max chains per io(%d)\n",
@@ -4506,6 +5125,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
mpi_request.VP_ID = 0;
mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+ mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
if (_base_is_controller_msix_enabled(ioc))
mpi_request.HostMSIxVectors = ioc->reply_queue_count;
@@ -5374,6 +5994,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
*/
ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
ioc->build_sg = &_base_build_sg_ieee;
+ ioc->build_nvme_prp = &_base_build_nvme_prp;
ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
@@ -5385,11 +6006,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
+ ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap_atomic;
} else {
ioc->put_smid_default = &_base_put_smid_default;
ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
ioc->put_smid_fast_path = &_base_put_smid_fast_path;
ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+ ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap;
}
@@ -5517,9 +6140,16 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
- if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
- _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
-
+ _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
+ if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
+ if (ioc->is_gen35_ioc) {
+ _base_unmask_events(ioc,
+ MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
+ _base_unmask_events(ioc,
+ MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
+ }
+ }
r = _base_make_ioc_operational(ioc);
if (r)
goto out_free_resources;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index a77bb7dc12b1..60f42ca3954f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -54,6 +54,7 @@
#include "mpi/mpi2_raid.h"
#include "mpi/mpi2_tool.h"
#include "mpi/mpi2_sas.h"
+#include "mpi/mpi2_pci.h"
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -73,8 +74,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "15.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 15
+#define MPT3SAS_DRIVER_VERSION "17.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 17
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -92,6 +93,7 @@
*/
#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE
#define MPT_MIN_PHYS_SEGMENTS 16
+#define MPT_KDUMP_MIN_PHYS_SEGMENTS 32
#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE
@@ -111,9 +113,11 @@
#define MPT3SAS_SATA_QUEUE_DEPTH 32
#define MPT3SAS_SAS_QUEUE_DEPTH 254
#define MPT3SAS_RAID_QUEUE_DEPTH 128
+#define MPT3SAS_KDUMP_SCSI_IO_DEPTH 200
#define MPT3SAS_RAID_MAX_SECTORS 8192
-
+#define MPT3SAS_HOST_PAGE_SIZE_4K 12
+#define MPT3SAS_NVME_QUEUE_DEPTH 128
#define MPT_NAME_LENGTH 32 /* generic length of strings */
#define MPT_STRING_LENGTH 64
@@ -131,6 +135,15 @@
#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
/*
+ * NVMe defines
+ */
+#define NVME_PRP_SIZE 8 /* PRP size */
+#define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */
+#define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */
+#define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */
+#define NVME_PRP_PAGE_SIZE 4096 /* Page size */
+
+/*
* reset phases
*/
#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
@@ -159,6 +172,7 @@
#define MPT_TARGET_FLAGS_VOLUME 0x02
#define MPT_TARGET_FLAGS_DELETED 0x04
#define MPT_TARGET_FASTPATH_IO 0x08
+#define MPT_TARGET_FLAGS_PCIE_DEVICE 0x10
#define SAS2_PCI_DEVICE_B0_REVISION (0x01)
#define SAS3_PCI_DEVICE_C0_REVISION (0x02)
@@ -357,7 +371,8 @@ struct Mpi2ManufacturingPage11_t {
* @flags: MPT_TARGET_FLAGS_XXX flags
* @deleted: target flaged for deletion
* @tm_busy: target is busy with TM request.
- * @sdev: The sas_device associated with this target
+ * @sas_dev: The sas_device associated with this target
+ * @pcie_dev: The pcie device associated with this target
*/
struct MPT3SAS_TARGET {
struct scsi_target *starget;
@@ -368,7 +383,8 @@ struct MPT3SAS_TARGET {
u32 flags;
u8 deleted;
u8 tm_busy;
- struct _sas_device *sdev;
+ struct _sas_device *sas_dev;
+ struct _pcie_device *pcie_dev;
};
@@ -467,6 +483,8 @@ struct _internal_cmd {
* @pfa_led_on: flag for PFA LED status
* @pend_sas_rphy_add: flag to check if device is in sas_rphy_add()
* addition routine.
+ * @chassis_slot: chassis slot
+ * @is_chassis_slot_valid: chassis slot valid or not
*/
struct _sas_device {
struct list_head list;
@@ -489,6 +507,8 @@ struct _sas_device {
u8 pfa_led_on;
u8 pend_sas_rphy_add;
u8 enclosure_level;
+ u8 chassis_slot;
+ u8 is_chassis_slot_valid;
u8 connector_name[5];
struct kref refcount;
};
@@ -508,6 +528,89 @@ static inline void sas_device_put(struct _sas_device *s)
kref_put(&s->refcount, sas_device_free);
}
+/*
+ * struct _pcie_device - attached PCIe device information
+ * @list: pcie device list
+ * @starget: starget object
+ * @wwid: device WWID
+ * @handle: device handle
+ * @device_info: bitfield provides detailed info about the device
+ * @id: target id
+ * @channel: target channel
+ * @slot: slot number
+ * @port_num: port number
+ * @responding: used in _scsih_pcie_device_mark_responding
+ * @fast_path: fast path feature enable bit
+ * @nvme_mdts: MaximumDataTransferSize from PCIe Device Page 2 for
+ * NVMe device only
+ * @enclosure_handle: enclosure handle
+ * @enclosure_logical_id: enclosure logical identifier
+ * @enclosure_level: The level of device's enclosure from the controller
+ * @connector_name: ASCII value of the Connector's name
+ * @serial_number: pointer of serial number string allocated runtime
+ * @refcount: reference count for deletion
+ */
+struct _pcie_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ u64 wwid;
+ u16 handle;
+ u32 device_info;
+ int id;
+ int channel;
+ u16 slot;
+ u8 port_num;
+ u8 responding;
+ u8 fast_path;
+ u32 nvme_mdts;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 enclosure_level;
+ u8 connector_name[4];
+ u8 *serial_number;
+ struct kref refcount;
+};
+/**
+ * pcie_device_get - Increment the pcie device reference count
+ *
+ * @p: pcie_device object
+ *
+ * When ever this function called it will increment the
+ * reference count of the pcie device for which this function called.
+ *
+ */
+static inline void pcie_device_get(struct _pcie_device *p)
+{
+ kref_get(&p->refcount);
+}
+
+/**
+ * pcie_device_free - Release the pcie device object
+ * @r - kref object
+ *
+ * Free's the pcie device object. It will be called when reference count
+ * reaches to zero.
+ */
+static inline void pcie_device_free(struct kref *r)
+{
+ kfree(container_of(r, struct _pcie_device, refcount));
+}
+
+/**
+ * pcie_device_put - Decrement the pcie device reference count
+ *
+ * @p: pcie_device object
+ *
+ * When ever this function called it will decrement the
+ * reference count of the pcie device for which this function called.
+ *
+ * When refernce count reaches to Zero, this will call pcie_device_free to the
+ * pcie_device object.
+ */
+static inline void pcie_device_put(struct _pcie_device *p)
+{
+ kref_put(&p->refcount, pcie_device_free);
+}
/**
* struct _raid_device - raid volume link list
* @list: sas device list
@@ -556,12 +659,13 @@ struct _raid_device {
/**
* struct _boot_device - boot device info
- * @is_raid: flag to indicate whether this is volume
- * @device: holds pointer for either struct _sas_device or
- * struct _raid_device
+ *
+ * @channel: sas, raid, or pcie channel
+ * @device: holds pointer for struct _sas_device, struct _raid_device or
+ * struct _pcie_device
*/
struct _boot_device {
- u8 is_raid;
+ int channel;
void *device;
};
@@ -644,6 +748,16 @@ enum reset_type {
};
/**
+ * struct pcie_sg_list - PCIe SGL buffer (contiguous per I/O)
+ * @pcie_sgl: PCIe native SGL for NVMe devices
+ * @pcie_sgl_dma: physical address
+ */
+struct pcie_sg_list {
+ void *pcie_sgl;
+ dma_addr_t pcie_sgl_dma;
+};
+
+/**
* struct chain_tracker - firmware chain tracker
* @chain_buffer: chain buffer
* @chain_buffer_dma: physical address
@@ -669,6 +783,7 @@ struct scsiio_tracker {
struct scsi_cmnd *scmd;
u8 cb_idx;
u8 direct_io;
+ struct pcie_sg_list pcie_sg_list;
struct list_head chain_list;
struct list_head tracker_list;
u16 msix_io;
@@ -742,13 +857,19 @@ typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
/* SAS3.0 support */
typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc,
- struct scsi_cmnd *scmd, u16 smid);
+ struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device);
typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
dma_addr_t data_out_dma, size_t data_out_sz,
dma_addr_t data_in_dma, size_t data_in_sz);
typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
void *paddr);
+/* SAS3.5 support */
+typedef void (*NVME_BUILD_PRP)(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz);
+
/* To support atomic and non atomic descriptors*/
typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 funcdep);
@@ -791,6 +912,7 @@ struct mpt3sas_facts {
u16 MaxDevHandle;
u16 MaxPersistentEntries;
u16 MinDevHandle;
+ u8 CurrentHostPageSize;
};
struct mpt3sas_port_facts {
@@ -825,6 +947,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @bars: bitmask of BAR's that must be configured
* @mask_interrupts: ignore interrupt
* @dma_mask: used to set the consistent dma mask
+ * @pci_access_mutex: Mutex to synchronize ioctl, sysfs show path and
+ * pci resource handling
* @fault_reset_work_q_name: fw fault work queue
* @fault_reset_work_q: ""
* @fault_reset_work: ""
@@ -888,9 +1012,13 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @sas_device_list: sas device object list
* @sas_device_init_list: sas device object list (used only at init time)
* @sas_device_lock:
+ * @pcie_device_list: pcie device object list
+ * @pcie_device_init_list: pcie device object list (used only at init time)
+ * @pcie_device_lock:
* @io_missing_delay: time for IO completed by fw when PDR enabled
* @device_missing_delay: time for device missing by fw when PDR enabled
* @sas_id : used for setting volume target IDs
+ * @pcie_target_id: used for setting pcie target IDs
* @blocking_handles: bitmask used to identify which devices need blocking
* @pd_handles : bitmask for PD handles
* @pd_handles_sz : size of pd_handle bitmask
@@ -1056,6 +1184,9 @@ struct MPT3SAS_ADAPTER {
MPT_BUILD_SG build_sg_mpi;
MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi;
+ /* function ptr for NVMe PRP elements only */
+ NVME_BUILD_PRP build_nvme_prp;
+
/* event log */
u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
u32 event_context;
@@ -1086,11 +1217,16 @@ struct MPT3SAS_ADAPTER {
struct list_head sas_device_list;
struct list_head sas_device_init_list;
spinlock_t sas_device_lock;
+ struct list_head pcie_device_list;
+ struct list_head pcie_device_init_list;
+ spinlock_t pcie_device_lock;
+
struct list_head raid_device_list;
spinlock_t raid_device_lock;
u8 io_missing_delay;
u16 device_missing_delay;
int sas_id;
+ int pcie_target_id;
void *blocking_handles;
void *pd_handles;
@@ -1119,6 +1255,11 @@ struct MPT3SAS_ADAPTER {
int pending_io_count;
wait_queue_head_t reset_wq;
+ /* PCIe SGL */
+ struct dma_pool *pcie_sgl_dma_pool;
+ /* Host Page Size */
+ u32 page_size;
+
/* chain */
struct chain_tracker *chain_lookup;
struct list_head free_chain_list;
@@ -1216,6 +1357,7 @@ struct MPT3SAS_ADAPTER {
PUT_SMID_IO_FP_HIP put_smid_fast_path;
PUT_SMID_IO_FP_HIP put_smid_hi_priority;
PUT_SMID_DEFAULT put_smid_default;
+ PUT_SMID_DEFAULT put_smid_nvme_encap;
};
@@ -1252,7 +1394,8 @@ void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid);
__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
-
+void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
/* hi-priority queue */
@@ -1321,6 +1464,10 @@ struct _sas_device *mpt3sas_get_sdev_by_addr(
struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
struct _sas_device *__mpt3sas_get_sdev_by_addr(
struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+struct _sas_device *mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle);
+struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle);
void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
struct _raid_device *
@@ -1359,6 +1506,12 @@ int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
u32 form, u32 handle);
+int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
+ u32 form, u32 handle);
int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
u16 sz);
@@ -1466,7 +1619,7 @@ void
mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
u16 smid);
/* NCQ Prio Handling Check */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index dd6270125614..1c747cf419d5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -150,6 +150,24 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
desc = "driver_mapping";
break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_PORT:
+ desc = "sas_port";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING:
+ desc = "ext_manufacturing";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT:
+ desc = "pcie_io_unit";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH:
+ desc = "pcie_switch";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE:
+ desc = "pcie_device";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK:
+ desc = "pcie_link";
+ break;
}
break;
}
@@ -1053,6 +1071,88 @@ mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE;
+ mpi_request.Header.PageVersion = MPI26_PCIEDEVICE0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE;
+ mpi_request.Header.PageVersion = MPI26_PCIEDEVICE2_PAGEVERSION;
+ mpi_request.Header.PageNumber = 2;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_number_hba_phys - obtain number of phys on the host
* @ioc: per adapter object
* @num_phys: pointer returned with the number of phys
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index bdffb692bded..b4c374b08e5e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -79,32 +79,6 @@ enum block_state {
};
/**
- * _ctl_sas_device_find_by_handle - sas device search
- * @ioc: per adapter object
- * @handle: sas device handle (assigned by firmware)
- * Context: Calling function should acquire ioc->sas_device_lock
- *
- * This searches for sas_device based on sas_address, then return sas_device
- * object.
- */
-static struct _sas_device *
-_ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
-{
- struct _sas_device *sas_device, *r;
-
- r = NULL;
- list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
- if (sas_device->handle != handle)
- continue;
- r = sas_device;
- goto out;
- }
-
- out:
- return r;
-}
-
-/**
* _ctl_display_some_debug - debug routine
* @ioc: per adapter object
* @smid: system request message index
@@ -229,10 +203,9 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
Mpi2SCSIIOReply_t *scsi_reply =
(Mpi2SCSIIOReply_t *)mpi_reply;
struct _sas_device *sas_device = NULL;
- unsigned long flags;
+ struct _pcie_device *pcie_device = NULL;
- spin_lock_irqsave(&ioc->sas_device_lock, flags);
- sas_device = _ctl_sas_device_find_by_handle(ioc,
+ sas_device = mpt3sas_get_sdev_by_handle(ioc,
le16_to_cpu(scsi_reply->DevHandle));
if (sas_device) {
pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
@@ -242,8 +215,25 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
"\tenclosure_logical_id(0x%016llx), slot(%d)\n",
ioc->name, (unsigned long long)
sas_device->enclosure_logical_id, sas_device->slot);
+ sas_device_put(sas_device);
+ }
+ if (!sas_device) {
+ pcie_device = mpt3sas_get_pdev_by_handle(ioc,
+ le16_to_cpu(scsi_reply->DevHandle));
+ if (pcie_device) {
+ pr_warn(MPT3SAS_FMT
+ "\tWWID(0x%016llx), port(%d)\n", ioc->name,
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ pcie_device_put(pcie_device);
+ }
}
- spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
pr_info(MPT3SAS_FMT
"\tscsi_state(0x%02x), scsi_status"
@@ -272,6 +262,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
{
MPI2DefaultReply_t *mpi_reply;
Mpi2SCSIIOReply_t *scsiio_reply;
+ Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
const void *sense_data;
u32 sz;
@@ -298,7 +289,20 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
memcpy(ioc->ctl_cmds.sense, sense_data, sz);
}
}
+ /*
+ * Get Error Response data for NVMe device. The ctl_cmds.sense
+ * buffer is used to store the Error Response data.
+ */
+ if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
+ nvme_error_reply =
+ (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
+ sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
+ le32_to_cpu(nvme_error_reply->ErrorResponseCount));
+ sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
+ memcpy(ioc->ctl_cmds.sense, sense_data, sz);
+ }
}
+
_ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
complete(&ioc->ctl_cmds.done);
@@ -640,11 +644,12 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
{
MPI2RequestHeader_t *mpi_request = NULL, *request;
MPI2DefaultReply_t *mpi_reply;
+ Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
u32 ioc_state;
u16 smid;
unsigned long timeout;
u8 issue_reset;
- u32 sz;
+ u32 sz, sz_arg;
void *psge;
void *data_out = NULL;
dma_addr_t data_out_dma = 0;
@@ -741,7 +746,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
- mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) {
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
if (!device_handle || (device_handle >
@@ -792,6 +798,38 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
init_completion(&ioc->ctl_cmds.done);
switch (mpi_request->Function) {
+ case MPI2_FUNCTION_NVME_ENCAPSULATED:
+ {
+ nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
+ /*
+ * Get the Physical Address of the sense buffer.
+ * Use Error Response buffer address field to hold the sense
+ * buffer address.
+ * Clear the internal sense buffer, which will potentially hold
+ * the Completion Queue Entry on return, or 0 if no Entry.
+ * Build the PRPs and set direction bits.
+ * Send the request.
+ */
+ nvme_encap_request->ErrorResponseBaseAddress = ioc->sense_dma &
+ 0xFFFFFFFF00000000;
+ nvme_encap_request->ErrorResponseBaseAddress |=
+ (U64)mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ nvme_encap_request->ErrorResponseAllocationLength =
+ NVME_ERROR_RESPONSE_SIZE;
+ memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
+ ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
+ data_out_dma, data_out_sz, data_in_dma, data_in_sz);
+ if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
+ "ioctl failed due to device removal in progress\n",
+ ioc->name, device_handle));
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ ioc->put_smid_nvme_encap(ioc, smid);
+ break;
+ }
case MPI2_FUNCTION_SCSI_IO_REQUEST:
case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
{
@@ -1007,15 +1045,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
}
- /* copy out sense to user */
+ /* copy out sense/NVMe Error Response to user */
if (karg.max_sense_bytes && (mpi_request->Function ==
MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
- MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
- sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
+ MPI2_FUNCTION_NVME_ENCAPSULATED)) {
+ if (karg.sense_data_ptr == NULL) {
+ pr_info(MPT3SAS_FMT "Response buffer provided"
+ " by application is NULL; Response data will"
+ " not be returned.\n", ioc->name);
+ goto out;
+ }
+ sz_arg = (mpi_request->Function ==
+ MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
+ SCSI_SENSE_BUFFERSIZE;
+ sz = min_t(u32, karg.max_sense_bytes, sz_arg);
if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
sz)) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
- __LINE__, __func__);
+ __LINE__, __func__);
ret = -ENODATA;
goto out;
}
@@ -1065,12 +1113,6 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt3_ioctl_iocinfo karg;
- if (copy_from_user(&karg, arg, sizeof(karg))) {
- pr_err("failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- return -EFAULT;
- }
-
dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
__func__));
@@ -1295,6 +1337,42 @@ _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _ctl_btdh_search_pcie_device - searching for pcie device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->pcie_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == pcie_device->handle) {
+ btdh->bus = pcie_device->channel;
+ btdh->id = pcie_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == pcie_device->channel && btdh->id ==
+ pcie_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = pcie_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ return rc;
+}
+
+/**
* _ctl_btdh_search_raid_device - searching for raid device
* @ioc: per adapter object
* @btdh: btdh ioctl payload
@@ -1352,6 +1430,8 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
rc = _ctl_btdh_search_sas_device(ioc, &karg);
if (!rc)
+ rc = _ctl_btdh_search_pcie_device(ioc, &karg);
+ if (!rc)
_ctl_btdh_search_raid_device(ioc, &karg);
if (copy_to_user(arg, &karg, sizeof(karg))) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 22998cbd538f..b258f210120a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -60,6 +60,9 @@
#include "mpt3sas_base.h"
#define RAID_CHANNEL 1
+
+#define PCIE_CHANNEL 2
+
/* forward proto's */
static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_expander);
@@ -69,7 +72,11 @@ static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
struct _sas_device *sas_device);
static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u8 retry_count, u8 is_pd);
-
+static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device);
+static void
+_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
/* global parameters */
@@ -281,7 +288,7 @@ struct _scsi_io_transfer {
* Note: The logging levels are defined in mpt3sas_debug.h.
*/
static int
-_scsih_set_debug_level(const char *val, struct kernel_param *kp)
+_scsih_set_debug_level(const char *val, const struct kernel_param *kp)
{
int ret = param_set_int(val, kp);
struct MPT3SAS_ADAPTER *ioc;
@@ -406,11 +413,6 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
*sas_address = 0;
- if (handle <= ioc->sas_hba.num_phys) {
- *sas_address = ioc->sas_hba.sas_address;
- return 0;
- }
-
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
@@ -420,7 +422,15 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
- *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ /* For HBA, vSES doesn't return HBA SAS address. Instead return
+ * vSES's sas address.
+ */
+ if ((handle <= ioc->sas_hba.num_phys) &&
+ (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
+ MPI2_SAS_DEVICE_INFO_SEP)))
+ *sas_address = ioc->sas_hba.sas_address;
+ else
+ *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
return 0;
}
@@ -439,21 +449,22 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
/**
* _scsih_determine_boot_device - determine boot device.
* @ioc: per adapter object
- * @device: either sas_device or raid_device object
- * @is_raid: [flag] 1 = raid object, 0 = sas object
+ * @device: sas_device or pcie_device object
+ * @channel: SAS or PCIe channel
*
* Determines whether this device should be first reported device to
* to scsi-ml or sas transport, this purpose is for persistent boot device.
* There are primary, alternate, and current entries in bios page 2. The order
* priority is primary, alternate, then current. This routine saves
- * the corresponding device object and is_raid flag in the ioc object.
+ * the corresponding device object.
* The saved data to be used later in _scsih_probe_boot_devices().
*/
static void
-_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
- void *device, u8 is_raid)
+_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
+ u32 channel)
{
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
struct _raid_device *raid_device;
u64 sas_address;
u64 device_name;
@@ -468,18 +479,24 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
if (!ioc->bios_pg3.BiosVersion)
return;
- if (!is_raid) {
- sas_device = device;
- sas_address = sas_device->sas_address;
- device_name = sas_device->device_name;
- enclosure_logical_id = sas_device->enclosure_logical_id;
- slot = sas_device->slot;
- } else {
+ if (channel == RAID_CHANNEL) {
raid_device = device;
sas_address = raid_device->wwid;
device_name = 0;
enclosure_logical_id = 0;
slot = 0;
+ } else if (channel == PCIE_CHANNEL) {
+ pcie_device = device;
+ sas_address = pcie_device->wwid;
+ device_name = 0;
+ enclosure_logical_id = 0;
+ slot = 0;
+ } else {
+ sas_device = device;
+ sas_address = sas_device->sas_address;
+ device_name = sas_device->device_name;
+ enclosure_logical_id = sas_device->enclosure_logical_id;
+ slot = sas_device->slot;
}
if (!ioc->req_boot_device.device) {
@@ -493,7 +510,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__,
(unsigned long long)sas_address));
ioc->req_boot_device.device = device;
- ioc->req_boot_device.is_raid = is_raid;
+ ioc->req_boot_device.channel = channel;
}
}
@@ -508,7 +525,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__,
(unsigned long long)sas_address));
ioc->req_alt_boot_device.device = device;
- ioc->req_alt_boot_device.is_raid = is_raid;
+ ioc->req_alt_boot_device.channel = channel;
}
}
@@ -523,7 +540,7 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__,
(unsigned long long)sas_address));
ioc->current_boot_device.device = device;
- ioc->current_boot_device.is_raid = is_raid;
+ ioc->current_boot_device.channel = channel;
}
}
}
@@ -536,7 +553,7 @@ __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
assert_spin_locked(&ioc->sas_device_lock);
- ret = tgt_priv->sdev;
+ ret = tgt_priv->sas_dev;
if (ret)
sas_device_get(ret);
@@ -557,6 +574,44 @@ mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
return ret;
}
+static struct _pcie_device *
+__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
+ struct MPT3SAS_TARGET *tgt_priv)
+{
+ struct _pcie_device *ret;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ ret = tgt_priv->pcie_dev;
+ if (ret)
+ pcie_device_get(ret);
+
+ return ret;
+}
+
+/**
+ * mpt3sas_get_pdev_from_target - pcie device search
+ * @ioc: per adapter object
+ * @tgt_priv: starget private object
+ *
+ * Context: This function will acquire ioc->pcie_device_lock and will release
+ * before returning the pcie_device object.
+ *
+ * This searches for pcie_device from target, then return pcie_device object.
+ */
+static struct _pcie_device *
+mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
+ struct MPT3SAS_TARGET *tgt_priv)
+{
+ struct _pcie_device *ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return ret;
+}
struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
@@ -636,7 +691,7 @@ found_device:
* This searches for sas_device based on sas_address, then return sas_device
* object.
*/
-static struct _sas_device *
+struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
{
struct _sas_device *sas_device;
@@ -650,6 +705,69 @@ mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * _scsih_display_enclosure_chassis_info - display device location info
+ * @ioc: per adapter object
+ * @sas_device: per sas device object
+ * @sdev: scsi device struct
+ * @starget: scsi target struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device, struct scsi_device *sdev,
+ struct scsi_target *starget)
+{
+ if (sdev) {
+ if (sas_device->enclosure_handle != 0)
+ sdev_printk(KERN_INFO, sdev,
+ "enclosure logical id (0x%016llx), slot(%d) \n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ sdev_printk(KERN_INFO, sdev,
+ "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
+ if (sas_device->is_chassis_slot_valid)
+ sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
+ } else if (starget) {
+ if (sas_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), slot(%d) \n",
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ starget_printk(KERN_INFO, starget,
+ "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
+ if (sas_device->is_chassis_slot_valid)
+ starget_printk(KERN_INFO, starget,
+ "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
+ } else {
+ if (sas_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "enclosure logical id(0x%016llx), slot(%d) \n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->connector_name[0] != '\0')
+ pr_info(MPT3SAS_FMT
+ "enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, sas_device->enclosure_level,
+ sas_device->connector_name);
+ if (sas_device->is_chassis_slot_valid)
+ pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
+ ioc->name, sas_device->chassis_slot);
+ }
+}
+
+/**
* _scsih_sas_device_remove - remove sas_device from list.
* @ioc: per adapter object
* @sas_device: the sas_device object
@@ -670,17 +788,7 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
ioc->name, sas_device->handle,
(unsigned long long) sas_device->sas_address);
- if (sas_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
-
- if (sas_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name);
+ _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
/*
* The lock serializes access to the list, but we still need to verify
@@ -772,17 +880,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
ioc->name, __func__, sas_device->handle,
(unsigned long long)sas_device->sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot));
-
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__,
- sas_device->enclosure_level, sas_device->connector_name));
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device_get(sas_device);
@@ -832,17 +931,8 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
__func__, sas_device->handle,
(unsigned long long)sas_device->sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot));
-
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, sas_device->enclosure_level,
- sas_device->connector_name));
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device_get(sas_device);
@@ -851,6 +941,282 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _pcie_device *pcie_device;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
+ if (pcie_device->wwid == wwid)
+ goto found_device;
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
+ if (pcie_device->wwid == wwid)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ pcie_device_get(pcie_device);
+ return pcie_device;
+}
+
+
+/**
+ * mpt3sas_get_pdev_by_wwid - pcie device search
+ * @ioc: per adapter object
+ * @wwid: wwid
+ *
+ * Context: This function will acquire ioc->pcie_device_lock and will release
+ * before returning the pcie_device object.
+ *
+ * This searches for pcie_device based on wwid, then return pcie_device object.
+ */
+static struct _pcie_device *
+mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return pcie_device;
+}
+
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ struct _pcie_device *pcie_device;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
+ if (pcie_device->id == id && pcie_device->channel == channel)
+ goto found_device;
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
+ if (pcie_device->id == id && pcie_device->channel == channel)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ pcie_device_get(pcie_device);
+ return pcie_device;
+}
+
+static struct _pcie_device *
+__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _pcie_device *pcie_device;
+
+ assert_spin_locked(&ioc->pcie_device_lock);
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
+ if (pcie_device->handle == handle)
+ goto found_device;
+
+ list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
+ if (pcie_device->handle == handle)
+ goto found_device;
+
+ return NULL;
+
+found_device:
+ pcie_device_get(pcie_device);
+ return pcie_device;
+}
+
+
+/**
+ * mpt3sas_get_pdev_by_handle - pcie device search
+ * @ioc: per adapter object
+ * @handle: Firmware device handle
+ *
+ * Context: This function will acquire ioc->pcie_device_lock and will release
+ * before returning the pcie_device object.
+ *
+ * This searches for pcie_device based on handle, then return pcie_device
+ * object.
+ */
+struct _pcie_device *
+mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return pcie_device;
+}
+
+/**
+ * _scsih_pcie_device_remove - remove pcie_device from list.
+ * @ioc: per adapter object
+ * @pcie_device: the pcie_device object
+ * Context: This function will acquire ioc->pcie_device_lock.
+ *
+ * If pcie_device is on the list, remove it and decrement its reference count.
+ */
+static void
+_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+ int was_on_pcie_device_list = 0;
+
+ if (!pcie_device)
+ return;
+ pr_info(MPT3SAS_FMT
+ "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, pcie_device->handle,
+ (unsigned long long) pcie_device->wwid);
+ if (pcie_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "removing enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ pr_info(MPT3SAS_FMT
+ "removing enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, pcie_device->enclosure_level,
+ pcie_device->connector_name);
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ if (!list_empty(&pcie_device->list)) {
+ list_del_init(&pcie_device->list);
+ was_on_pcie_device_list = 1;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (was_on_pcie_device_list) {
+ kfree(pcie_device->serial_number);
+ pcie_device_put(pcie_device);
+ }
+}
+
+
+/**
+ * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+ int was_on_pcie_device_list = 0;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device) {
+ if (!list_empty(&pcie_device->list)) {
+ list_del_init(&pcie_device->list);
+ was_on_pcie_device_list = 1;
+ pcie_device_put(pcie_device);
+ }
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (was_on_pcie_device_list) {
+ _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ }
+}
+
+/**
+ * _scsih_pcie_device_add - add pcie_device object
+ * @ioc: per adapter object
+ * @pcie_device: pcie_device object
+ *
+ * This is added to the pcie_device_list link list.
+ */
+static void
+_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ ioc->name, __func__,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device_get(pcie_device);
+ list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
+ _scsih_pcie_device_remove(ioc, pcie_device);
+ } else if (!pcie_device->starget) {
+ if (!ioc->is_driver_loading) {
+/*TODO-- Need to find out whether this condition will occur or not*/
+ clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+ }
+ } else
+ clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+}
+
+/*
+ * _scsih_pcie_device_init_add - insert pcie_device to the init list.
+ * @ioc: per adapter object
+ * @pcie_device: the pcie_device object
+ * Context: This function will acquire ioc->pcie_device_lock.
+ *
+ * Adding new object at driver load time to the ioc->pcie_device_init_list.
+ */
+static void
+_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ ioc->name, __func__,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device_get(pcie_device);
+ list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
+ _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
/**
* _scsih_raid_device_find_by_id - raid device search
* @ioc: per adapter object
@@ -1062,6 +1428,23 @@ _scsih_is_end_device(u32 device_info)
}
/**
+ * _scsih_is_nvme_device - determines if device is an nvme device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
+ *
+ * Returns 1 if nvme device.
+ */
+static int
+_scsih_is_nvme_device(u32 device_info)
+{
+ if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+ == MPI26_PCIE_DEVINFO_NVME)
+ return 1;
+ else
+ return 0;
+}
+
+/**
* _scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
@@ -1278,6 +1661,7 @@ scsih_target_alloc(struct scsi_target *starget)
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _sas_device *sas_device;
struct _raid_device *raid_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
struct sas_rphy *rphy;
@@ -1307,6 +1691,28 @@ scsih_target_alloc(struct scsi_target *starget)
return 0;
}
+ /* PCIe devices */
+ if (starget->channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
+ starget->channel);
+ if (pcie_device) {
+ sas_target_priv_data->handle = pcie_device->handle;
+ sas_target_priv_data->sas_address = pcie_device->wwid;
+ sas_target_priv_data->pcie_dev = pcie_device;
+ pcie_device->starget = starget;
+ pcie_device->id = starget->id;
+ pcie_device->channel = starget->channel;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_PCIE_DEVICE;
+ if (pcie_device->fast_path)
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FASTPATH_IO;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ return 0;
+ }
+
/* sas/sata devices */
spin_lock_irqsave(&ioc->sas_device_lock, flags);
rphy = dev_to_rphy(starget->dev.parent);
@@ -1316,7 +1722,7 @@ scsih_target_alloc(struct scsi_target *starget)
if (sas_device) {
sas_target_priv_data->handle = sas_device->handle;
sas_target_priv_data->sas_address = sas_device->sas_address;
- sas_target_priv_data->sdev = sas_device;
+ sas_target_priv_data->sas_dev = sas_device;
sas_device->starget = starget;
sas_device->id = starget->id;
sas_device->channel = starget->channel;
@@ -1324,7 +1730,8 @@ scsih_target_alloc(struct scsi_target *starget)
sas_target_priv_data->flags |=
MPT_TARGET_FLAGS_RAID_COMPONENT;
if (sas_device->fast_path)
- sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FASTPATH_IO;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -1345,7 +1752,9 @@ scsih_target_destroy(struct scsi_target *starget)
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _sas_device *sas_device;
struct _raid_device *raid_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
+ struct sas_rphy *rphy;
sas_target_priv_data = starget->hostdata;
if (!sas_target_priv_data)
@@ -1363,7 +1772,29 @@ scsih_target_destroy(struct scsi_target *starget)
goto out;
}
+ if (starget->channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_from_target(ioc,
+ sas_target_priv_data);
+ if (pcie_device && (pcie_device->starget == starget) &&
+ (pcie_device->id == starget->id) &&
+ (pcie_device->channel == starget->channel))
+ pcie_device->starget = NULL;
+
+ if (pcie_device) {
+ /*
+ * Corresponding get() is in _scsih_target_alloc()
+ */
+ sas_target_priv_data->pcie_dev = NULL;
+ pcie_device_put(pcie_device);
+ pcie_device_put(pcie_device);
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ goto out;
+ }
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
if (sas_device && (sas_device->starget == starget) &&
(sas_device->id == starget->id) &&
@@ -1374,7 +1805,7 @@ scsih_target_destroy(struct scsi_target *starget)
/*
* Corresponding get() is in _scsih_target_alloc()
*/
- sas_target_priv_data->sdev = NULL;
+ sas_target_priv_data->sas_dev = NULL;
sas_device_put(sas_device);
sas_device_put(sas_device);
@@ -1403,6 +1834,7 @@ scsih_slave_alloc(struct scsi_device *sdev)
struct scsi_target *starget;
struct _raid_device *raid_device;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
@@ -1431,8 +1863,22 @@ scsih_slave_alloc(struct scsi_device *sdev)
raid_device->sdev = sdev; /* raid is single lun */
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
+ if (starget->channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
+ sas_target_priv_data->sas_address);
+ if (pcie_device && (pcie_device->starget == NULL)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s : pcie_device->starget set to starget @ %d\n",
+ __func__, __LINE__);
+ pcie_device->starget = starget;
+ }
+
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
sas_target_priv_data->sas_address);
@@ -1466,6 +1912,7 @@ scsih_slave_destroy(struct scsi_device *sdev)
struct Scsi_Host *shost;
struct MPT3SAS_ADAPTER *ioc;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
unsigned long flags;
if (!sdev->hostdata)
@@ -1478,7 +1925,19 @@ scsih_slave_destroy(struct scsi_device *sdev)
shost = dev_to_shost(&starget->dev);
ioc = shost_priv(shost);
- if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_from_target(ioc,
+ sas_target_priv_data);
+ if (pcie_device && !sas_target_priv_data->num_luns)
+ pcie_device->starget = NULL;
+
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_from_target(ioc,
sas_target_priv_data);
@@ -1562,6 +2021,14 @@ scsih_is_raid(struct device *dev)
return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
}
+static int
+scsih_is_nvme(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
+}
+
/**
* scsih_get_resync - get raid volume resync percent complete
* @dev the device struct object
@@ -1837,6 +2304,7 @@ scsih_slave_configure(struct scsi_device *sdev)
struct MPT3SAS_DEVICE *sas_device_priv_data;
struct MPT3SAS_TARGET *sas_target_priv_data;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
struct _raid_device *raid_device;
unsigned long flags;
int qdepth;
@@ -1967,6 +2435,55 @@ scsih_slave_configure(struct scsi_device *sdev)
}
}
+ /* PCIe handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (!pcie_device) {
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+ qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+ ds = "NVMe";
+ sdev_printk(KERN_INFO, sdev,
+ "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
+ ds, handle, (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure logical id(0x%016llx), slot(%d)\n",
+ ds,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure level(0x%04x),"
+ "connector name( %s)\n", ds,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
+ pcie_device_put(pcie_device);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ scsih_change_queue_depth(sdev, qdepth);
+
+ if (pcie_device->nvme_mdts)
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ pcie_device->nvme_mdts/512);
+ /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
+ ** merged and can eliminate holes created during merging
+ ** operation.
+ **/
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES,
+ sdev->request_queue);
+ blk_queue_virt_boundary(sdev->request_queue,
+ ioc->page_size - 1);
+ return 0;
+ }
+
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_by_addr(ioc,
sas_device_priv_data->sas_target->sas_address);
@@ -2005,16 +2522,8 @@ scsih_slave_configure(struct scsi_device *sdev)
"sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
ds, handle, (unsigned long long)sas_device->sas_address,
sas_device->phy, (unsigned long long)sas_device->device_name);
- if (sas_device->enclosure_handle != 0)
- sdev_printk(KERN_INFO, sdev,
- "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
- ds, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- sdev_printk(KERN_INFO, sdev,
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ds, sas_device->enclosure_level,
- sas_device->connector_name);
+
+ _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
sas_device_put(sas_device);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
@@ -2400,6 +2909,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *priv_target = starget->hostdata;
struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
unsigned long flags;
char *device_str = NULL;
@@ -2416,6 +2926,31 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
"%s handle(0x%04x), %s wwid(0x%016llx)\n",
device_str, priv_target->handle,
device_str, (unsigned long long)priv_target->sas_address);
+
+ } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
+ if (pcie_device) {
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
+ pcie_device->handle,
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), slot(%d)\n",
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ starget_printk(KERN_INFO, starget,
+ "enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
+ pcie_device_put(pcie_device);
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
} else {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
@@ -2433,17 +2968,9 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
sas_device->handle,
(unsigned long long)sas_device->sas_address,
sas_device->phy);
- if (sas_device->enclosure_handle != 0)
- starget_printk(KERN_INFO, starget,
- "enclosure_logical_id(0x%016llx), slot(%d)\n",
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- starget_printk(KERN_INFO, starget,
- "enclosure level(0x%04x),connector name(%s)\n",
- sas_device->enclosure_level,
- sas_device->connector_name);
+
+ _scsih_display_enclosure_chassis_info(NULL, sas_device,
+ NULL, starget);
sas_device_put(sas_device);
}
@@ -3007,8 +3534,6 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _sas_device *sas_device;
sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
- if (!sas_device)
- return;
shost_for_each_device(sdev, ioc->shost) {
sas_device_priv_data = sdev->hostdata;
@@ -3018,7 +3543,7 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
continue;
if (sas_device_priv_data->block)
continue;
- if (sas_device->pend_sas_rphy_add)
+ if (sas_device && sas_device->pend_sas_rphy_add)
continue;
if (sas_device_priv_data->ignore_delay_remove) {
sdev_printk(KERN_INFO, sdev,
@@ -3029,7 +3554,8 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
_scsih_internal_device_block(sdev, sas_device_priv_data);
}
- sas_device_put(sas_device);
+ if (sas_device)
+ sas_device_put(sas_device);
}
/**
@@ -3113,6 +3639,33 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_block_io_to_pcie_children_attached_directly
+ * @ioc: per adapter object
+ * @event_data: topology change event data
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * direct attached during device pull/reconnect.
+ */
+static void
+_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ if (reason_code ==
+ MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
+ _scsih_block_io_device(ioc, handle);
+ }
+}
+/**
* _scsih_tm_tr_send - send task management request
* @ioc: per adapter object
* @handle: device handle
@@ -3133,18 +3686,14 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
Mpi2SCSITaskManagementRequest_t *mpi_request;
u16 smid;
struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
u64 sas_address = 0;
unsigned long flags;
struct _tr_list *delayed_tr;
u32 ioc_state;
- if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host has been removed: handle(0x%04x)\n",
- __func__, ioc->name, handle));
- return;
- } else if (ioc->pci_error_recovery) {
+ if (ioc->pci_error_recovery) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: host in pci error recovery: handle(0x%04x)\n",
__func__, ioc->name,
@@ -3175,24 +3724,52 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_address = sas_device->sas_address;
}
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-
+ if (!sas_device) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ if (pcie_device && pcie_device->starget &&
+ pcie_device->starget->hostdata) {
+ sas_target_priv_data = pcie_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ sas_address = pcie_device->wwid;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ }
if (sas_target_priv_data) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, handle,
(unsigned long long)sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag:enclosure logical id(0x%016llx),"
- " slot(%d)\n", ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot));
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: enclosure level(0x%04x),"
- " connector name( %s)\n", ioc->name,
- sas_device->enclosure_level,
- sas_device->connector_name));
+ if (sas_device) {
+ if (sas_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag:enclosure logical "
+ "id(0x%016llx), slot(%d)\n", ioc->name,
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot));
+ if (sas_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: enclosure "
+ "level(0x%04x), connector name( %s)\n",
+ ioc->name, sas_device->enclosure_level,
+ sas_device->connector_name));
+ } else if (pcie_device) {
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: logical "
+ "id(0x%016llx), slot(%d)\n", ioc->name,
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag:, enclosure "
+ "level(0x%04x), "
+ "connector name( %s)\n", ioc->name,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
+ }
_scsih_ublock_io_device(ioc, sas_address);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
@@ -3227,6 +3804,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
out:
if (sas_device)
sas_device_put(sas_device);
+ if (pcie_device)
+ pcie_device_put(pcie_device);
}
/**
@@ -3731,6 +4310,81 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_check_pcie_topo_remove_events - sanity check on topo
+ * events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ *
+ * This handles the case where driver receives multiple switch
+ * or device add and delete events in a single shot. When there
+ * is a delete event the routine will void any pending add
+ * events waiting in the event queue.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data)
+{
+ struct fw_event_work *fw_event;
+ Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
+ unsigned long flags;
+ int i, reason_code;
+ u16 handle, switch_handle;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
+ _scsih_tm_tr_send(ioc, handle);
+ }
+
+ switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
+ if (!switch_handle) {
+ _scsih_block_io_to_pcie_children_attached_directly(
+ ioc, event_data);
+ return;
+ }
+ /* TODO We are not supporting cascaded PCIe Switch removal yet*/
+ if ((event_data->SwitchStatus
+ == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
+ (event_data->SwitchStatus ==
+ MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
+ _scsih_block_io_to_pcie_children_attached_directly(
+ ioc, event_data);
+
+ if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
+ return;
+
+ /* mark ignore flag for pending events */
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
+ if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
+ fw_event->ignore)
+ continue;
+ local_event_data =
+ (Mpi26EventDataPCIeTopologyChangeList_t *)
+ fw_event->event_data;
+ if (local_event_data->SwitchStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_ADDED ||
+ local_event_data->SwitchStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
+ if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
+ switch_handle) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting ignoring flag for switch event\n",
+ ioc->name));
+ fw_event->ignore = 1;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
* _scsih_set_volume_delete_flag - setting volume delete flag
* @ioc: per adapter object
* @handle: device handle
@@ -3979,7 +4633,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
*/
static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- Mpi2SCSIIORequest_t *mpi_request)
+ Mpi25SCSIIORequest_t *mpi_request)
{
u16 eedp_flags;
unsigned char prot_op = scsi_get_prot_op(scmd);
@@ -4082,7 +4736,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
struct _raid_device *raid_device;
struct request *rq = scmd->request;
int class;
- Mpi2SCSIIORequest_t *mpi_request;
+ Mpi25SCSIIORequest_t *mpi_request;
+ struct _pcie_device *pcie_device = NULL;
u32 mpi_control;
u16 smid;
u16 handle;
@@ -4159,8 +4814,9 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* Make sure Device is not raid volume.
* We do not expose raid functionality to upper layer for warpdrive.
*/
- if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
- && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
+ if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
+ && !scsih_is_nvme(&scmd->device->sdev_gendev))
+ && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
@@ -4170,7 +4826,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
- memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+ memset(mpi_request, 0, ioc->request_sz);
_scsih_setup_eedp(ioc, scmd, mpi_request);
if (scmd->cmd_len == 32)
@@ -4189,13 +4845,14 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
mpi_request->SenseBufferLowAddress =
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
- mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
+ mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
mpi_request->LUN);
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
if (mpi_request->DataLength) {
- if (ioc->build_sg_scmd(ioc, scmd, smid)) {
+ pcie_device = sas_target_priv_data->pcie_dev;
+ if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
mpt3sas_base_free_smid(ioc, smid);
goto out;
}
@@ -4204,8 +4861,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
raid_device = sas_target_priv_data->raid_device;
if (raid_device && raid_device->direct_io_enabled)
- mpt3sas_setup_direct_io(ioc, scmd, raid_device, mpi_request,
- smid);
+ mpt3sas_setup_direct_io(ioc, scmd,
+ raid_device, mpi_request, smid);
if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@ -4273,6 +4930,7 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
char *desc_scsi_state = ioc->tmp_string;
u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
struct _sas_device *sas_device = NULL;
+ struct _pcie_device *pcie_device = NULL;
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *priv_target = starget->hostdata;
char *device_str = NULL;
@@ -4405,6 +5063,28 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
device_str, (unsigned long long)priv_target->sas_address);
+ } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
+ pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
+ if (pcie_device) {
+ pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
+ ioc->name,
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
+ if (pcie_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "\tenclosure logical id(0x%016llx), "
+ "slot(%d)\n", ioc->name,
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0])
+ pr_info(MPT3SAS_FMT
+ "\tenclosure level(0x%04x),"
+ "connector name( %s)\n",
+ ioc->name, pcie_device->enclosure_level,
+ pcie_device->connector_name);
+ pcie_device_put(pcie_device);
+ }
} else {
sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
if (sas_device) {
@@ -4412,19 +5092,9 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
"\tsas_address(0x%016llx), phy(%d)\n",
ioc->name, (unsigned long long)
sas_device->sas_address, sas_device->phy);
- if (sas_device->enclosure_handle != 0)
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx),"
- "slot(%d)\n", ioc->name,
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0])
- pr_warn(MPT3SAS_FMT
- "\tenclosure level(0x%04x),"
- " connector name( %s)\n", ioc->name,
- sas_device->enclosure_level,
- sas_device->connector_name);
+
+ _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL);
sas_device_put(sas_device);
}
@@ -4451,11 +5121,10 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct sense_info data;
_scsih_normalize_sense(scmd->sense_buffer, &data);
pr_warn(MPT3SAS_FMT
- "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
- ioc->name, data.skey,
- data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
}
-
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
response_info = le32_to_cpu(mpi_reply->ResponseInfo);
response_bytes = (u8 *)&response_info;
@@ -4602,16 +5271,8 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
goto out_unlock;
- if (sas_device->enclosure_handle != 0)
- starget_printk(KERN_INFO, starget, "predicted fault, "
- "enclosure logical id(0x%016llx), slot(%d)\n",
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- starget_printk(KERN_WARNING, starget, "predicted fault, "
- "enclosure level(0x%04x), connector name( %s)\n",
- sas_device->enclosure_level,
- sas_device->connector_name);
+ _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
+
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
@@ -4666,7 +5327,7 @@ out_unlock:
static u8
_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
{
- Mpi2SCSIIORequest_t *mpi_request;
+ Mpi25SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
struct scsi_cmnd *scmd;
u16 ioc_status;
@@ -4731,9 +5392,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
if (!sas_device_priv_data->tlr_snoop_check) {
sas_device_priv_data->tlr_snoop_check++;
- if (!ioc->is_warpdrive &&
+ if ((!ioc->is_warpdrive &&
!scsih_is_raid(&scmd->device->sdev_gendev) &&
- sas_is_tlr_enabled(scmd->device) &&
+ !scsih_is_nvme(&scmd->device->sdev_gendev))
+ && sas_is_tlr_enabled(scmd->device) &&
response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
sas_disable_tlr(scmd->device);
sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
@@ -4804,6 +5466,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
scmd->result = DID_RESET << 16;
break;
+ } else if ((scmd->device->channel == RAID_CHANNEL) &&
+ (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
+ scmd->result = DID_RESET << 16;
+ break;
}
scmd->result = DID_SOFT_ERROR << 16;
break;
@@ -5274,8 +5941,6 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
spin_lock_irqsave(&ioc->sas_node_lock, flags);
sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
sas_address);
- if (sas_expander)
- list_del(&sas_expander->list);
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (sas_expander)
_scsih_expander_node_remove(ioc, sas_expander);
@@ -5386,6 +6051,52 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
}
/**
+ * _scsih_get_enclosure_logicalid_chassis_slot - get device's
+ * EnclosureLogicalID and ChassisSlot information.
+ * @ioc: per adapter object
+ * @sas_device_pg0: SAS device page0
+ * @sas_device: per sas device object
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_get_enclosure_logicalid_chassis_slot(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasDevicePage0_t *sas_device_pg0, struct _sas_device *sas_device)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+
+ if (!sas_device_pg0 || !sas_device)
+ return;
+
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0->EnclosureHandle);
+ sas_device->is_chassis_slot_valid = 0;
+
+ if (!le16_to_cpu(sas_device_pg0->EnclosureHandle))
+ return;
+
+ if (mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ le16_to_cpu(sas_device_pg0->EnclosureHandle))) {
+ pr_err(MPT3SAS_FMT
+ "Enclosure Pg0 read failed for handle(0x%04x)\n",
+ ioc->name, le16_to_cpu(sas_device_pg0->EnclosureHandle));
+ return;
+ }
+
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ if (le16_to_cpu(enclosure_pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot = enclosure_pg0.ChassisSlot;
+ }
+}
+
+
+/**
* _scsih_check_device - checking device responsiveness
* @ioc: per adapter object
* @parent_sas_address: sas address of parent expander or sas host
@@ -5409,7 +6120,6 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
struct MPT3SAS_TARGET *sas_target_priv_data;
u32 device_info;
-
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
return;
@@ -5456,6 +6166,9 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->enclosure_level = 0;
sas_device->connector_name[0] = '\0';
}
+
+ _scsih_get_enclosure_logicalid_chassis_slot(ioc,
+ &sas_device_pg0, sas_device);
}
/* check if device is present */
@@ -5507,6 +6220,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
u32 ioc_status;
u64 sas_address;
u32 device_info;
+ int encl_pg0_rc = -1;
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
@@ -5551,6 +6265,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
return -1;
}
+ if (sas_device_pg0.EnclosureHandle) {
+ encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_device_pg0.EnclosureHandle);
+ if (encl_pg0_rc)
+ pr_info(MPT3SAS_FMT
+ "Enclosure Pg0 read failed for handle(0x%04x)\n",
+ ioc->name, sas_device_pg0.EnclosureHandle);
+ }
+
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
if (!sas_device) {
@@ -5588,13 +6312,21 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
sas_device->enclosure_level = 0;
sas_device->connector_name[0] = '\0';
}
- /* get enclosure_logical_id */
- if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
- ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
- sas_device->enclosure_handle)))
+
+ /* get enclosure_logical_id & chassis_slot */
+ sas_device->is_chassis_slot_valid = 0;
+ if (encl_pg0_rc == 0) {
sas_device->enclosure_logical_id =
le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ if (le16_to_cpu(enclosure_pg0.Flags) &
+ MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
+ sas_device->is_chassis_slot_valid = 1;
+ sas_device->chassis_slot =
+ enclosure_pg0.ChassisSlot;
+ }
+ }
+
/* get device name */
sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
@@ -5625,23 +6357,15 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
_scsih_turn_off_pfa_led(ioc, sas_device);
sas_device->pfa_led_on = 0;
}
+
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, __func__,
sas_device->handle, (unsigned long long)
sas_device->sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot));
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__,
- sas_device->enclosure_level,
- sas_device->connector_name));
+
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
if (sas_device->starget && sas_device->starget->hostdata) {
sas_target_priv_data = sas_device->starget->hostdata;
@@ -5660,34 +6384,16 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
"removing handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, sas_device->handle,
(unsigned long long) sas_device->sas_address);
- if (sas_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing : enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name,
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot);
- if (sas_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name);
+
+ _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
ioc->name, __func__,
sas_device->handle, (unsigned long long)
sas_device->sas_address));
- if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)sas_device->enclosure_logical_id,
- sas_device->slot));
- if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure level(0x%04x), connector name(%s)\n",
- ioc->name, __func__, sas_device->enclosure_level,
- sas_device->connector_name));
+ dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
+ NULL, NULL));
}
/**
@@ -6028,7 +6734,705 @@ out:
sas_device_put(sas_device);
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+
+/**
+ * _scsih_check_pcie_access_status - check access flags
+ * @ioc: per adapter object
+ * @wwid: wwid
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
+ case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
+ desc = "PCIe device capability failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
+ desc = "PCIe device blocked";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
+ desc = "PCIe device mem space access failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
+ desc = "PCIe device unsupported";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
+ desc = "PCIe device MSIx Required";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
+ desc = "PCIe device init fail max";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
+ desc = "PCIe device status unknown";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
+ desc = "nvme ready timeout";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
+ desc = "nvme device configuration unsupported";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
+ desc = "nvme identify failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
+ desc = "nvme qconfig failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
+ desc = "nvme qcreation failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
+ desc = "nvme eventcfg failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
+ desc = "nvme get feature stat failed";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
+ desc = "nvme idle timeout";
+ break;
+ case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
+ desc = "nvme failure status";
+ break;
+ default:
+ pr_err(MPT3SAS_FMT
+ " NVMe discovery error(0x%02x): wwid(0x%016llx),"
+ "handle(0x%04x)\n", ioc->name, access_status,
+ (unsigned long long)wwid, handle);
+ return rc;
+ }
+
+ if (!rc)
+ return rc;
+
+ pr_info(MPT3SAS_FMT
+ "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
+ ioc->name, desc,
+ (unsigned long long)wwid, handle);
+ return rc;
+}
+
+/**
+ * _scsih_pcie_device_remove_from_sml - removing pcie device
+ * from SML and free up associated memory
+ * @ioc: per adapter object
+ * @pcie_device: the pcie_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ pcie_device->handle, (unsigned long long)
+ pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name, __func__,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ if (pcie_device->starget && pcie_device->starget->hostdata) {
+ sas_target_priv_data = pcie_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ _scsih_ublock_io_device(ioc, pcie_device->wwid);
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+
+ pr_info(MPT3SAS_FMT
+ "removing handle(0x%04x), wwid (0x%016llx)\n",
+ ioc->name, pcie_device->handle,
+ (unsigned long long) pcie_device->wwid);
+ if (pcie_device->enclosure_handle != 0)
+ pr_info(MPT3SAS_FMT
+ "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ if (pcie_device->connector_name[0] != '\0')
+ pr_info(MPT3SAS_FMT
+ "removing: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, pcie_device->enclosure_level,
+ pcie_device->connector_name);
+
+ if (pcie_device->starget)
+ scsi_remove_target(&pcie_device->starget->dev);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ pcie_device->handle, (unsigned long long)
+ pcie_device->wwid));
+ if (pcie_device->enclosure_handle != 0)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+ ioc->name, __func__,
+ (unsigned long long)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
+ if (pcie_device->connector_name[0] != '\0')
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
+ ioc->name, __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
+
+ kfree(pcie_device->serial_number);
+}
+
+
+/**
+ * _scsih_pcie_check_device - checking device responsiveness
+ * @ioc: per adapter object
+ * @handle: attached device handle
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
+ u32 ioc_status;
+ struct _pcie_device *pcie_device;
+ u64 wwid;
+ unsigned long flags;
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+ if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
+ if (!(_scsih_is_nvme_device(device_info)))
+ return;
+
+ wwid = le64_to_cpu(pcie_device_pg0.WWID);
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
+
+ if (!pcie_device) {
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(pcie_device->handle != handle)) {
+ starget = pcie_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget,
+ "handle changed from(0x%04x) to (0x%04x)!!!\n",
+ pcie_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ pcie_device->handle = handle;
+
+ if (le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
+ pcie_device->enclosure_level =
+ pcie_device_pg0.EnclosureLevel;
+ memcpy(&pcie_device->connector_name[0],
+ &pcie_device_pg0.ConnectorName[0], 4);
+ } else {
+ pcie_device->enclosure_level = 0;
+ pcie_device->connector_name[0] = '\0';
+ }
+ }
+
+ /* check if device is present */
+ if (!(le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
+ pr_info(MPT3SAS_FMT
+ "device is not present handle(0x%04x), flags!!!\n",
+ ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ pcie_device_put(pcie_device);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_pcie_access_status(ioc, wwid, handle,
+ pcie_device_pg0.AccessStatus)) {
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ pcie_device_put(pcie_device);
+ return;
+ }
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ pcie_device_put(pcie_device);
+
+ _scsih_ublock_io_device(ioc, wwid);
+
+ return;
+}
+
+/**
+ * _scsih_pcie_add_device - creating pcie device object
+ * @ioc: per adapter object
+ * @handle: pcie device handle
+ *
+ * Creating end device object, stored in ioc->pcie_device_list.
+ *
+ * Return 1 means queue the event later, 0 means complete the event
+ */
+static int
+_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
+ Mpi26PCIeDevicePage2_t pcie_device_pg2;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ struct _pcie_device *pcie_device;
+ u32 pcie_device_type;
+ u32 ioc_status;
+ u64 wwid;
+
+ if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ set_bit(handle, ioc->pend_os_device_add);
+ wwid = le64_to_cpu(pcie_device_pg0.WWID);
+
+ /* check if device is present */
+ if (!(le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT
+ "device is not present handle(0x04%x)!!!\n",
+ ioc->name, handle);
+ return 0;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_pcie_access_status(ioc, wwid, handle,
+ pcie_device_pg0.AccessStatus))
+ return 0;
+
+ if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
+ return 0;
+
+ pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
+ if (pcie_device) {
+ clear_bit(handle, ioc->pend_os_device_add);
+ pcie_device_put(pcie_device);
+ return 0;
+ }
+
+ pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
+ if (!pcie_device) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ kref_init(&pcie_device->refcount);
+ pcie_device->id = ioc->pcie_target_id++;
+ pcie_device->channel = PCIE_CHANNEL;
+ pcie_device->handle = handle;
+ pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
+ pcie_device->wwid = wwid;
+ pcie_device->port_num = pcie_device_pg0.PortNum;
+ pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+ pcie_device_type = pcie_device->device_info &
+ MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE;
+
+ pcie_device->enclosure_handle =
+ le16_to_cpu(pcie_device_pg0.EnclosureHandle);
+ if (pcie_device->enclosure_handle != 0)
+ pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
+
+ if (le16_to_cpu(pcie_device_pg0.Flags) &
+ MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
+ pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
+ memcpy(&pcie_device->connector_name[0],
+ &pcie_device_pg0.ConnectorName[0], 4);
+ } else {
+ pcie_device->enclosure_level = 0;
+ pcie_device->connector_name[0] = '\0';
+ }
+
+ /* get enclosure_logical_id */
+ if (pcie_device->enclosure_handle &&
+ !(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ pcie_device->enclosure_handle)))
+ pcie_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ /* TODO -- Add device name once FW supports it */
+ if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
+ &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ kfree(pcie_device);
+ return 0;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ kfree(pcie_device);
+ return 0;
+ }
+ pcie_device->nvme_mdts =
+ le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+
+ if (ioc->wait_for_discovery_to_complete)
+ _scsih_pcie_device_init_add(ioc, pcie_device);
+ else
+ _scsih_pcie_device_add(ioc, pcie_device);
+
+ pcie_device_put(pcie_device);
+ return 0;
+}
+
+/**
+ * _scsih_pcie_topology_change_event_debug - debug for topology
+ * event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ */
+static void
+_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 port_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->SwitchStatus) {
+ case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
+ status_str = "add";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
+ case 0:
+ status_str = "responding";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
+ ioc->name, status_str);
+ pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
+ "start_port(%02d), count(%d)\n",
+ le16_to_cpu(event_data->SwitchDevHandle),
+ le16_to_cpu(event_data->EnclosureHandle),
+ event_data->StartPortNum, event_data->NumEntries);
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ port_number = event_data->StartPortNum + i;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ switch (reason_code) {
+ case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
+ status_str = "target add";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ status_str = "link rate change";
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->PortEntry[i].CurrentPortInfo &
+ MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
+ MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
+ " link rate: new(0x%02x), old(0x%02x)\n", port_number,
+ handle, status_str, link_rate, prev_link_rate);
+ }
+}
+
+/**
+ * _scsih_pcie_topology_change_event - handle PCIe topology
+ * changes
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ */
+static int
+_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 link_rate, prev_link_rate;
+ unsigned long flags;
+ int rc;
+ int requeue_event;
+ Mpi26EventDataPCIeTopologyChangeList_t *event_data =
+ (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
+ struct _pcie_device *pcie_device;
+
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_pcie_topology_change_event_debug(ioc, event_data);
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery)
+ return 0;
+
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
+ ioc->name));
+ return 0;
+ }
+
+ /* handle siblings events */
+ for (i = 0; i < event_data->NumEntries; i++) {
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring switch event\n", ioc->name));
+ return 0;
+ }
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+ reason_code = event_data->PortEntry[i].PortStatus;
+ handle =
+ le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+
+ link_rate = event_data->PortEntry[i].CurrentPortInfo
+ & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+ prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
+ & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
+
+ switch (reason_code) {
+ case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
+ if (ioc->shost_recovery)
+ break;
+ if (link_rate == prev_link_rate)
+ break;
+ if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
+ break;
+
+ _scsih_pcie_check_device(ioc, handle);
+ /* This code after this point handles the test case
+ * where a device has been added, however its returning
+ * BUSY for sometime. Then before the Device Missing
+ * Delay expires and the device becomes READY, the
+ * device is removed and added back.
+ */
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ if (pcie_device) {
+ pcie_device_put(pcie_device);
+ break;
+ }
+
+ if (!test_bit(handle, ioc->pend_os_device_add))
+ break;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "handle(0x%04x) device not found: convert "
+ "event to a device add\n", ioc->name, handle));
+ event_data->PortEntry[i].PortStatus &= 0xF0;
+ event_data->PortEntry[i].PortStatus |=
+ MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
+ case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
+ if (ioc->shost_recovery)
+ break;
+ if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
+ break;
+
+ rc = _scsih_pcie_add_device(ioc, handle);
+ if (!rc) {
+ /* mark entry vacant */
+ /* TODO This needs to be reviewed and fixed,
+ * we dont have an entry
+ * to make an event void like vacant
+ */
+ event_data->PortEntry[i].PortStatus |=
+ MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
+ }
+ break;
+ case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
+ _scsih_pcie_device_remove_by_handle(ioc, handle);
+ break;
+ }
+ }
+ return requeue_event;
+}
+
+/**
+ * _scsih_pcie_device_status_change_event_debug - debug for
+ * device event
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
+ reason_str = "smart data";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
+ reason_str = "unsupported device discovered";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ reason_str = "internal device reset";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
+ reason_str = "internal task abort set";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
+ reason_str = "internal clear task set";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
+ reason_str = "internal query task";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
+ reason_str = "device init failure";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ reason_str = "internal device reset complete";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort complete";
+ break;
+ case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
+ reason_str = "internal async notification";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+
+ pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
+ "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
+ ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->WWID),
+ le16_to_cpu(event_data->TaskTag));
+ if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
+ pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ event_data->ASC, event_data->ASCQ);
+ pr_info("\n");
+}
+
+/**
+ * _scsih_pcie_device_status_change_event - handle device status
+ * change
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct MPT3SAS_TARGET *target_priv_data;
+ struct _pcie_device *pcie_device;
+ u64 wwid;
+ unsigned long flags;
+ Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
+ (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_pcie_device_status_change_event_debug(ioc,
+ event_data);
+
+ if (event_data->ReasonCode !=
+ MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+ event_data->ReasonCode !=
+ MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
+ return;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ wwid = le64_to_cpu(event_data->WWID);
+ pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
+
+ if (!pcie_device || !pcie_device->starget)
+ goto out;
+
+ target_priv_data = pcie_device->starget->hostdata;
+ if (!target_priv_data)
+ goto out;
+
+ if (event_data->ReasonCode ==
+ MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
+ target_priv_data->tm_busy = 1;
+ else
+ target_priv_data->tm_busy = 0;
+out:
+ if (pcie_device)
+ pcie_device_put(pcie_device);
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
}
/**
@@ -6282,6 +7686,35 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_pcie_enumeration_event - handle enumeration events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi26EventDataPCIeEnumeration_t *event_data =
+ (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
+
+ if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
+ return;
+
+ pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
+ ioc->name,
+ (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+ "started" : "completed",
+ event_data->Flags);
+ if (event_data->EnumerationStatus)
+ pr_cont("enumeration_status(0x%08x)",
+ le32_to_cpu(event_data->EnumerationStatus));
+ pr_cont("\n");
+}
+
+/**
* _scsih_ir_fastpath - turn on fastpath for IR physdisk
* @ioc: per adapter object
* @handle: device handle for physical disk
@@ -7085,7 +8518,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
{
struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
struct scsi_target *starget;
- struct _sas_device *sas_device;
+ struct _sas_device *sas_device = NULL;
unsigned long flags;
spin_lock_irqsave(&ioc->sas_device_lock, flags);
@@ -7126,6 +8559,9 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
sas_device->connector_name[0] = '\0';
}
+ _scsih_get_enclosure_logicalid_chassis_slot(ioc,
+ sas_device_pg0, sas_device);
+
if (sas_device->handle == sas_device_pg0->DevHandle)
goto out;
pr_info("\thandle changed from(0x%04x)!!!\n",
@@ -7190,6 +8626,130 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
+ * @ioc: per adapter object
+ * @pcie_device_pg0: PCIe Device page 0
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponding_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
+ Mpi26PCIeDevicePage0_t *pcie_device_pg0)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ struct scsi_target *starget;
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+ if ((pcie_device->wwid == pcie_device_pg0->WWID) &&
+ (pcie_device->slot == pcie_device_pg0->Slot)) {
+ pcie_device->responding = 1;
+ starget = pcie_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget) {
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), wwid(0x%016llx) ",
+ pcie_device->handle,
+ (unsigned long long)pcie_device->wwid);
+ if (pcie_device->enclosure_handle != 0)
+ starget_printk(KERN_INFO, starget,
+ "enclosure logical id(0x%016llx), "
+ "slot(%d)\n",
+ (unsigned long long)
+ pcie_device->enclosure_logical_id,
+ pcie_device->slot);
+ }
+
+ if (((le32_to_cpu(pcie_device_pg0->Flags)) &
+ MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
+ (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
+ pcie_device->enclosure_level =
+ pcie_device_pg0->EnclosureLevel;
+ memcpy(&pcie_device->connector_name[0],
+ &pcie_device_pg0->ConnectorName[0], 4);
+ } else {
+ pcie_device->enclosure_level = 0;
+ pcie_device->connector_name[0] = '\0';
+ }
+
+ if (pcie_device->handle == pcie_device_pg0->DevHandle)
+ goto out;
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ pcie_device->handle);
+ pcie_device->handle = pcie_device_pg0->DevHandle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle =
+ pcie_device_pg0->DevHandle;
+ goto out;
+ }
+ }
+
+ out:
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_pcie_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u32 device_info;
+
+ pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+
+ if (list_empty(&ioc->pcie_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from %s: "
+ "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
+ __func__, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(pcie_device_pg0.DevHandle);
+ device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
+ if (!(_scsih_is_nvme_device(device_info)))
+ continue;
+ pcie_device_pg0.WWID = le64_to_cpu(pcie_device_pg0.WWID),
+ pcie_device_pg0.Slot = le16_to_cpu(pcie_device_pg0.Slot);
+ pcie_device_pg0.Flags = le32_to_cpu(pcie_device_pg0.Flags);
+ pcie_device_pg0.DevHandle = handle;
+ _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
+ }
+out:
+ pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
+ ioc->name);
+}
+
+/**
* _scsih_mark_responding_raid_device - mark a raid_device as responding
* @ioc: per adapter object
* @wwid: world wide identifier for raid volume
@@ -7322,8 +8882,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
/**
* _scsih_mark_responding_expander - mark a expander as responding
* @ioc: per adapter object
- * @sas_address: sas address
- * @handle:
+ * @expander_pg0:SAS Expander Config Page0
*
* After host reset, find out whether devices are still responding.
* Used in _scsih_remove_unresponsive_expanders.
@@ -7331,18 +8890,41 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
* Return nothing.
*/
static void
-_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
- u16 handle)
+_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ExpanderPage0_t *expander_pg0)
{
- struct _sas_node *sas_expander;
+ struct _sas_node *sas_expander = NULL;
unsigned long flags;
- int i;
+ int i, encl_pg0_rc = -1;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u16 handle = le16_to_cpu(expander_pg0->DevHandle);
+ u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
+
+ if (le16_to_cpu(expander_pg0->EnclosureHandle)) {
+ encl_pg0_rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ le16_to_cpu(expander_pg0->EnclosureHandle));
+ if (encl_pg0_rc)
+ pr_info(MPT3SAS_FMT
+ "Enclosure Pg0 read failed for handle(0x%04x)\n",
+ ioc->name,
+ le16_to_cpu(expander_pg0->EnclosureHandle));
+ }
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
if (sas_expander->sas_address != sas_address)
continue;
sas_expander->responding = 1;
+
+ if (!encl_pg0_rc)
+ sas_expander->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ sas_expander->enclosure_handle =
+ le16_to_cpu(expander_pg0->EnclosureHandle);
+
if (sas_expander->handle == handle)
goto out;
pr_info("\texpander(0x%016llx): handle changed" \
@@ -7395,7 +8977,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
handle,
(unsigned long long)sas_address);
- _scsih_mark_responding_expander(ioc, sas_address, handle);
+ _scsih_mark_responding_expander(ioc, &expander_pg0);
}
out:
@@ -7403,17 +8985,18 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _scsih_remove_unresponding_sas_devices - removing unresponding devices
+ * _scsih_remove_unresponding_devices - removing unresponding devices
* @ioc: per adapter object
*
* Return nothing.
*/
static void
-_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
{
struct _sas_device *sas_device, *sas_device_next;
struct _sas_node *sas_expander, *sas_expander_next;
struct _raid_device *raid_device, *raid_device_next;
+ struct _pcie_device *pcie_device, *pcie_device_next;
struct list_head tmp_list;
unsigned long flags;
LIST_HEAD(head);
@@ -7447,6 +9030,26 @@ _scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
sas_device_put(sas_device);
}
+ pr_info(MPT3SAS_FMT
+ " Removing unresponding devices: pcie end-devices\n"
+ , ioc->name);
+ INIT_LIST_HEAD(&head);
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry_safe(pcie_device, pcie_device_next,
+ &ioc->pcie_device_list, list) {
+ if (!pcie_device->responding)
+ list_move_tail(&pcie_device->list, &head);
+ else
+ pcie_device->responding = 0;
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
+ _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
+
/* removing unresponding volumes */
if (ioc->ir_firmware) {
pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
@@ -7476,7 +9079,6 @@ _scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
list) {
- list_del(&sas_expander->list);
_scsih_expander_node_remove(ioc, sas_expander);
}
@@ -7520,6 +9122,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
{
Mpi2ExpanderPage0_t expander_pg0;
Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi26PCIeDevicePage0_t pcie_device_pg0;
Mpi2RaidVolPage1_t volume_pg1;
Mpi2RaidVolPage0_t volume_pg0;
Mpi2RaidPhysDiskPage0_t pd_pg0;
@@ -7530,6 +9133,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
u16 handle, parent_handle;
u64 sas_address;
struct _sas_device *sas_device;
+ struct _pcie_device *pcie_device;
struct _sas_node *expander_device;
static struct _raid_device *raid_device;
u8 retry_count;
@@ -7755,7 +9359,44 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
}
pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
ioc->name);
+ pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
+ ioc->name);
+ /* pcie devices */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
+ &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
+ " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(pcie_device_pg0.DevHandle);
+ if (!(_scsih_is_nvme_device(
+ le32_to_cpu(pcie_device_pg0.DeviceInfo))))
+ continue;
+ pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
+ le64_to_cpu(pcie_device_pg0.WWID));
+ if (pcie_device) {
+ pcie_device_put(pcie_device);
+ continue;
+ }
+ retry_count = 0;
+ parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
+ _scsih_pcie_add_device(ioc, handle);
+
+ pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
+ "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
+ handle,
+ (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
+ }
+ pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
+ ioc->name);
pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
}
/**
@@ -7805,6 +9446,7 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
!ioc->sas_hba.num_phys)) {
_scsih_prep_device_scan(ioc);
_scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_pcie_devices(ioc);
_scsih_search_responding_raid_devices(ioc);
_scsih_search_responding_expanders(ioc);
_scsih_error_recovery_delete_devices(ioc);
@@ -7849,7 +9491,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
goto out;
ssleep(1);
}
- _scsih_remove_unresponding_sas_devices(ioc);
+ _scsih_remove_unresponding_devices(ioc);
_scsih_scan_for_devices_after_reset(ioc);
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
@@ -7892,6 +9534,16 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
case MPI2_EVENT_IR_OPERATION_STATUS:
_scsih_sas_ir_operation_status_event(ioc, fw_event);
break;
+ case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
+ _scsih_pcie_device_status_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_PCIE_ENUMERATION:
+ _scsih_pcie_enumeration_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ _scsih_pcie_topology_change_event(ioc, fw_event);
+ return;
+ break;
}
out:
fw_event_work_put(fw_event);
@@ -7982,6 +9634,11 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
(Mpi2EventDataSasTopologyChangeList_t *)
mpi_reply->EventData);
break;
+ case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
+ _scsih_check_pcie_topo_remove_events(ioc,
+ (Mpi26EventDataPCIeTopologyChangeList_t *)
+ mpi_reply->EventData);
+ break;
case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
_scsih_check_ir_config_unhide_events(ioc,
(Mpi2EventDataIrConfigChangeList_t *)
@@ -8044,6 +9701,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_SAS_DISCOVERY:
case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
case MPI2_EVENT_IR_PHYSICAL_DISK:
+ case MPI2_EVENT_PCIE_ENUMERATION:
+ case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
break;
case MPI2_EVENT_TEMP_THRESHOLD:
@@ -8056,19 +9715,21 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
switch (ActiveCableEventData->ReasonCode) {
case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
- pr_notice(MPT3SAS_FMT "Receptacle ID %d: This active cable"
- " requires %d mW of power\n", ioc->name,
- ActiveCableEventData->ReceptacleID,
+ pr_notice(MPT3SAS_FMT
+ "Currently an active cable with ReceptacleID %d\n",
+ ioc->name, ActiveCableEventData->ReceptacleID);
+ pr_notice("cannot be powered and devices connected\n");
+ pr_notice("to this active cable will not be seen\n");
+ pr_notice("This active cable requires %d mW of power\n",
ActiveCableEventData->ActiveCablePowerRequirement);
- pr_notice(MPT3SAS_FMT "Receptacle ID %d: Devices connected"
- " to this active cable will not be seen\n",
- ioc->name, ActiveCableEventData->ReceptacleID);
break;
case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
- pr_notice(MPT3SAS_FMT "ReceptacleID %d: This cable",
- ioc->name, ActiveCableEventData->ReceptacleID);
- pr_notice(" is not running at an optimal speed(12 Gb/s)\n");
+ pr_notice(MPT3SAS_FMT
+ "Currently a cable with ReceptacleID %d\n",
+ ioc->name, ActiveCableEventData->ReceptacleID);
+ pr_notice(
+ "is not running at optimal speed(12 Gb/s rate)\n");
break;
}
@@ -8100,7 +9761,6 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
* _scsih_expander_node_remove - removing expander device from list.
* @ioc: per adapter object
* @sas_expander: the sas_device object
- * Context: Calling function should acquire ioc->sas_node_lock.
*
* Removing object and freeing associated memory from the
* ioc->sas_expander_list.
@@ -8112,6 +9772,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
struct _sas_node *sas_expander)
{
struct _sas_port *mpt3sas_port, *next;
+ unsigned long flags;
/* remove sibling ports attached to this expander */
list_for_each_entry_safe(mpt3sas_port, next,
@@ -8139,6 +9800,10 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
sas_expander->handle, (unsigned long long)
sas_expander->sas_address);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
kfree(sas_expander->phy);
kfree(sas_expander);
}
@@ -8231,6 +9896,7 @@ static void scsih_remove(struct pci_dev *pdev)
struct _sas_port *mpt3sas_port, *next_port;
struct _raid_device *raid_device, *next;
struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _pcie_device *pcie_device, *pcienext;
struct workqueue_struct *wq;
unsigned long flags;
@@ -8259,6 +9925,12 @@ static void scsih_remove(struct pci_dev *pdev)
(unsigned long long) raid_device->wwid);
_scsih_raid_device_remove(ioc, raid_device);
}
+ list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
+ list) {
+ _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
/* free ports attached to the sas_host */
list_for_each_entry_safe(mpt3sas_port, next_port,
@@ -8330,42 +10002,52 @@ scsih_shutdown(struct pci_dev *pdev)
static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
{
- u8 is_raid;
+ u32 channel;
void *device;
struct _sas_device *sas_device;
struct _raid_device *raid_device;
+ struct _pcie_device *pcie_device;
u16 handle;
u64 sas_address_parent;
u64 sas_address;
unsigned long flags;
int rc;
+ int tid;
/* no Bios, return immediately */
if (!ioc->bios_pg3.BiosVersion)
return;
device = NULL;
- is_raid = 0;
if (ioc->req_boot_device.device) {
device = ioc->req_boot_device.device;
- is_raid = ioc->req_boot_device.is_raid;
+ channel = ioc->req_boot_device.channel;
} else if (ioc->req_alt_boot_device.device) {
device = ioc->req_alt_boot_device.device;
- is_raid = ioc->req_alt_boot_device.is_raid;
+ channel = ioc->req_alt_boot_device.channel;
} else if (ioc->current_boot_device.device) {
device = ioc->current_boot_device.device;
- is_raid = ioc->current_boot_device.is_raid;
+ channel = ioc->current_boot_device.channel;
}
if (!device)
return;
- if (is_raid) {
+ if (channel == RAID_CHANNEL) {
raid_device = device;
rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
raid_device->id, 0);
if (rc)
_scsih_raid_device_remove(ioc, raid_device);
+ } else if (channel == PCIE_CHANNEL) {
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ pcie_device = device;
+ tid = pcie_device->id;
+ list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
+ if (rc)
+ _scsih_pcie_device_remove(ioc, pcie_device);
} else {
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = device;
@@ -8498,6 +10180,101 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * get_next_pcie_device - Get the next pcie device
+ * @ioc: per adapter object
+ *
+ * Get the next pcie device from pcie_device_init_list list.
+ *
+ * Returns pcie device structure if pcie_device_init_list list is not empty
+ * otherwise returns NULL
+ */
+static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ if (!list_empty(&ioc->pcie_device_init_list)) {
+ pcie_device = list_first_entry(&ioc->pcie_device_init_list,
+ struct _pcie_device, list);
+ pcie_device_get(pcie_device);
+ }
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+
+ return pcie_device;
+}
+
+/**
+ * pcie_device_make_active - Add pcie device to pcie_device_list list
+ * @ioc: per adapter object
+ * @pcie_device: pcie device object
+ *
+ * Add the pcie device which has registered with SCSI Transport Later to
+ * pcie_device_list list
+ */
+static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
+ struct _pcie_device *pcie_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+
+ if (!list_empty(&pcie_device->list)) {
+ list_del_init(&pcie_device->list);
+ pcie_device_put(pcie_device);
+ }
+ pcie_device_get(pcie_device);
+ list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
+
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
+ * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device;
+ int rc;
+
+ /* PCIe Device List */
+ while ((pcie_device = get_next_pcie_device(ioc))) {
+ if (pcie_device->starget) {
+ pcie_device_put(pcie_device);
+ continue;
+ }
+ rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
+ pcie_device->id, 0);
+ if (rc) {
+ _scsih_pcie_device_remove(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ continue;
+ } else if (!pcie_device->starget) {
+ /*
+ * When async scanning is enabled, its not possible to
+ * remove devices while scanning is turned on due to an
+ * oops in scsi_sysfs_add_sdev()->add_device()->
+ * sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading) {
+ /* TODO-- Need to find out whether this condition will
+ * occur or not
+ */
+ _scsih_pcie_device_remove(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ continue;
+ }
+ }
+ pcie_device_make_active(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ }
+}
+
+/**
* _scsih_probe_devices - probing for devices
* @ioc: per adapter object
*
@@ -8525,8 +10302,10 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
_scsih_probe_sas(ioc);
_scsih_probe_raid(ioc);
}
- } else
+ } else {
_scsih_probe_sas(ioc);
+ _scsih_probe_pcie(ioc);
+ }
}
/**
@@ -8740,6 +10519,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_SAS3516:
case MPI26_MFGPAGE_DEVID_SAS3516_1:
case MPI26_MFGPAGE_DEVID_SAS3416:
+ case MPI26_MFGPAGE_DEVID_SAS3616:
return MPI26_VERSION;
}
return 0;
@@ -8817,6 +10597,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI26_MFGPAGE_DEVID_SAS3516:
case MPI26_MFGPAGE_DEVID_SAS3516_1:
case MPI26_MFGPAGE_DEVID_SAS3416:
+ case MPI26_MFGPAGE_DEVID_SAS3616:
ioc->is_gen35_ioc = 1;
break;
default:
@@ -8867,11 +10648,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ioc->sas_node_lock);
spin_lock_init(&ioc->fw_event_lock);
spin_lock_init(&ioc->raid_device_lock);
+ spin_lock_init(&ioc->pcie_device_lock);
spin_lock_init(&ioc->diag_trigger_lock);
INIT_LIST_HEAD(&ioc->sas_device_list);
INIT_LIST_HEAD(&ioc->sas_device_init_list);
INIT_LIST_HEAD(&ioc->sas_expander_list);
+ INIT_LIST_HEAD(&ioc->pcie_device_list);
+ INIT_LIST_HEAD(&ioc->pcie_device_init_list);
INIT_LIST_HEAD(&ioc->fw_event_list);
INIT_LIST_HEAD(&ioc->raid_device_list);
INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
@@ -9273,6 +11057,9 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
PCI_ANY_ID, PCI_ANY_ID },
+ /* Mercator ~ 3616*/
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index 540bd5005149..ced7d9f6274c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -299,7 +299,7 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
u16 smid)
{
sector_t v_lba, p_lba, stripe_off, column, io_size;
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index 4f515700bdc3..e6b2b681fda3 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/blkdev.h>
diff --git a/drivers/scsi/mvme147.h b/drivers/scsi/mvme147.h
index bfd4566ef050..f75ff58ddcd0 100644
--- a/drivers/scsi/mvme147.h
+++ b/drivers/scsi/mvme147.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef MVME147_H
/* $Id: mvme147.h,v 1.4 1997/01/19 23:07:10 davem Exp $
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 718c88de328b..8c91637cd598 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -95,7 +95,7 @@ static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
phy->mvi = mvi;
phy->port = NULL;
- init_timer(&phy->timer);
+ timer_setup(&phy->timer, NULL, 0);
sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
@@ -248,7 +248,6 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
mvi->devices[i].dev_type = SAS_PHY_UNUSED;
mvi->devices[i].device_id = i;
mvi->devices[i].dev_status = MVS_DEV_NORMAL;
- init_timer(&mvi->devices[i].timer);
}
/*
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index ee81d10252e0..cff43bd9f675 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1283,9 +1283,10 @@ static void mvs_task_done(struct sas_task *task)
complete(&task->slow_task->completion);
}
-static void mvs_tmf_timedout(unsigned long data)
+static void mvs_tmf_timedout(struct timer_list *t)
{
- struct sas_task *task = (struct sas_task *)data;
+ struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task *task = slow->task;
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
@@ -1309,7 +1310,6 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = mvs_task_done;
- task->slow_task->timer.data = (unsigned long) task;
task->slow_task->timer.function = mvs_tmf_timedout;
task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
@@ -1954,9 +1954,9 @@ static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
return ret;
}
-static void mvs_sig_time_out(unsigned long tphy)
+static void mvs_sig_time_out(struct timer_list *t)
{
- struct mvs_phy *phy = (struct mvs_phy *)tphy;
+ struct mvs_phy *phy = from_timer(phy, t, timer);
struct mvs_info *mvi = phy->mvi;
u8 phy_no;
@@ -2020,7 +2020,6 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
tmp | PHYEV_SIG_FIS);
if (phy->timer.function == NULL) {
- phy->timer.data = (unsigned long)phy;
phy->timer.function = mvs_sig_time_out;
phy->timer.expires = jiffies + 5*HZ;
add_timer(&phy->timer);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index f9afd4cdd4c4..080676c1c9e5 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -247,7 +247,6 @@ struct mvs_device {
enum sas_device_type dev_type;
struct mvs_info *mvi_info;
struct domain_device *sas_device;
- struct timer_list timer;
u32 attached_phy;
u32 device_id;
u32 running_req;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 5b93ed810f6e..dc4e801b2cef 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -8093,9 +8093,9 @@ irqreturn_t ncr53c8xx_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void ncr53c8xx_timeout(unsigned long npref)
+static void ncr53c8xx_timeout(struct timer_list *t)
{
- struct ncb *np = (struct ncb *) npref;
+ struct ncb *np = from_timer(np, t, timer);
unsigned long flags;
struct scsi_cmnd *done_list;
@@ -8357,9 +8357,7 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
if (!np->scripth0)
goto attach_error;
- init_timer(&np->timer);
- np->timer.data = (unsigned long) np;
- np->timer.function = ncr53c8xx_timeout;
+ timer_setup(&np->timer, ncr53c8xx_timeout, 0);
/* Try to map the controller chip to virtual and physical memory. */
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 107e191bf023..8620ac5d6e41 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -604,7 +604,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
* check bus line
*/
phase = nsp32_read1(base, SCSI_BUS_MONITOR);
- if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) {
+ if ((phase & BUSMON_BSY) || (phase & BUSMON_SEL)) {
nsp32_msg(KERN_WARNING, "bus busy");
SCpnt->result = DID_BUS_BUSY << 16;
status = 1;
diff --git a/drivers/scsi/osst.h b/drivers/scsi/osst.h
index b4fea98ba276..b90ae280853d 100644
--- a/drivers/scsi/osst.h
+++ b/drivers/scsi/osst.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* $Header: /cvsroot/osst/Driver/osst.h,v 1.16 2005/01/01 21:13:35 wriede Exp $
*/
diff --git a/drivers/scsi/osst_detect.h b/drivers/scsi/osst_detect.h
index 21717d0e6974..83c1d4fb11db 100644
--- a/drivers/scsi/osst_detect.h
+++ b/drivers/scsi/osst_detect.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define SIGS_FROM_OSST \
{"OnStream", "SC-", "", "osst"}, \
{"OnStream", "DI-", "", "osst"}, \
diff --git a/drivers/scsi/osst_options.h b/drivers/scsi/osst_options.h
index ff1e610946ed..a6a389b88876 100644
--- a/drivers/scsi/osst_options.h
+++ b/drivers/scsi/osst_options.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
The compile-time configurable defaults for the Linux SCSI tape driver.
diff --git a/drivers/scsi/pcmcia/Makefile b/drivers/scsi/pcmcia/Makefile
index 683bf148b5b7..44eea2d43143 100644
--- a/drivers/scsi/pcmcia/Makefile
+++ b/drivers/scsi/pcmcia/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/scsi
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
index ce4cd87c7c66..02b7338999cc 100644
--- a/drivers/scsi/pm8001/Makefile
+++ b/drivers/scsi/pm8001/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Kernel configuration file for the PM8001 SAS/SATA 8x6G based HBA driver
#
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index be8269c8d127..596f3ff965f5 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -98,6 +98,58 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
}
}
static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
+
+/**
+ * pm8001_ctl_ila_version_show - ila version
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_ila_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ if (pm8001_ha->chip_id != chip_8001) {
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 24),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 16),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 8),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version));
+ }
+ return 0;
+}
+static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL);
+
+/**
+ * pm8001_ctl_inactive_fw_version_show - Inacative firmware version number
+ * @cdev: pointer to embedded class device
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t pm8001_ctl_inactive_fw_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+
+ if (pm8001_ha->chip_id != chip_8001) {
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 24),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 16),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 8),
+ (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version));
+ }
+ return 0;
+}
+static
+DEVICE_ATTR(inc_fw_ver, 0444, pm8001_ctl_inactive_fw_version_show, NULL);
+
/**
* pm8001_ctl_max_out_io_show - max outstanding io supported
* @cdev: pointer to embedded class device
@@ -748,6 +800,8 @@ struct device_attribute *pm8001_host_attrs[] = {
&dev_attr_bios_version,
&dev_attr_ib_log,
&dev_attr_ob_log,
+ &dev_attr_ila_version,
+ &dev_attr_inc_fw_ver,
NULL,
};
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 10546faac58c..db88a8e7ee0e 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3198,19 +3198,28 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
+ u32 tag;
struct local_phy_ctl_resp *pPayload =
(struct local_phy_ctl_resp *)(piomb + 4);
u32 status = le32_to_cpu(pPayload->status);
u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS;
u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
+ tag = le32_to_cpu(pPayload->tag);
if (status != 0) {
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("%x phy execute %x phy op failed!\n",
phy_id, phy_op));
- } else
+ } else {
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("%x phy execute %x phy op success!\n",
phy_id, phy_op));
+ pm8001_ha->phy[phy_id].reset_success = true;
+ }
+ if (pm8001_ha->phy[phy_id].enable_completion) {
+ complete(pm8001_ha->phy[phy_id].enable_completion);
+ pm8001_ha->phy[phy_id].enable_completion = NULL;
+ }
+ pm8001_tag_free(pm8001_ha, tag);
return 0;
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 0e013f76b582..7a697ca68501 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -132,7 +132,7 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
sas_phy->id = phy_id;
- sas_phy->sas_addr = &pm8001_ha->sas_addr[0];
+ sas_phy->sas_addr = (u8 *)&phy->dev_sas_addr;
sas_phy->frame_rcvd = &phy->frame_rcvd[0];
sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
sas_phy->lldd_phy = phy;
@@ -591,10 +591,12 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
for (i = 0; i < chip_info->n_phy; i++) {
sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
+ sha->sas_phy[i]->sas_addr =
+ (u8 *)&pm8001_ha->phy[i].dev_sas_addr;
}
sha->sas_ha_name = DRV_NAME;
sha->dev = pm8001_ha->dev;
-
+ sha->strict_wide_ports = 1;
sha->lldd_module = THIS_MODULE;
sha->sas_addr = &pm8001_ha->sas_addr[0];
sha->num_phys = chip_info->n_phy;
@@ -611,6 +613,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
{
u8 i, j;
+ u8 sas_add[8];
#ifdef PM8001_READ_VPD
/* For new SPC controllers WWN is stored in flash vpd
* For SPC/SPCve controllers WWN is stored in EEPROM
@@ -672,10 +675,12 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->sas_addr[j] =
payload.func_specific[0x804 + i];
}
-
+ memcpy(sas_add, pm8001_ha->sas_addr, SAS_ADDR_SIZE);
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ if (i && ((i % 4) == 0))
+ sas_add[7] = sas_add[7] + 4;
memcpy(&pm8001_ha->phy[i].dev_sas_addr,
- pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ sas_add, SAS_ADDR_SIZE);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("phy %d sas_addr = %016llx\n", i,
pm8001_ha->phy[i].dev_sas_addr));
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index ce584c31d36e..947d6017d004 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -656,9 +656,10 @@ void pm8001_task_done(struct sas_task *task)
complete(&task->slow_task->completion);
}
-static void pm8001_tmf_timedout(unsigned long data)
+static void pm8001_tmf_timedout(struct timer_list *t)
{
- struct sas_task *task = (struct sas_task *)data;
+ struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task *task = slow->task;
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
@@ -694,7 +695,6 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
task->task_proto = dev->tproto;
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = pm8001_task_done;
- task->slow_task->timer.data = (unsigned long)task;
task->slow_task->timer.function = pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
@@ -781,7 +781,6 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
task->dev = dev;
task->task_proto = dev->tproto;
task->task_done = pm8001_task_done;
- task->slow_task->timer.data = (unsigned long)task;
task->slow_task->timer.function = pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
add_timer(&task->slow_task->timer);
@@ -1159,40 +1158,42 @@ int pm8001_query_task(struct sas_task *task)
int pm8001_abort_task(struct sas_task *task)
{
unsigned long flags;
- u32 tag = 0xdeadbeef;
+ u32 tag;
u32 device_id;
struct domain_device *dev ;
- struct pm8001_hba_info *pm8001_ha = NULL;
- struct pm8001_ccb_info *ccb;
+ struct pm8001_hba_info *pm8001_ha;
struct scsi_lun lun;
struct pm8001_device *pm8001_dev;
struct pm8001_tmf_task tmf_task;
- int rc = TMF_RESP_FUNC_FAILED;
+ int rc = TMF_RESP_FUNC_FAILED, ret;
+ u32 phy_id;
+ struct sas_task_slow slow_task;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return rc;
+ return TMF_RESP_FUNC_FAILED;
+ dev = task->dev;
+ pm8001_dev = dev->lldd_dev;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ device_id = pm8001_dev->device_id;
+ phy_id = pm8001_dev->attached_phy;
+ rc = pm8001_find_tag(task, &tag);
+ if (rc == 0) {
+ pm8001_printk("no tag for task:%p\n", task);
+ return TMF_RESP_FUNC_FAILED;
+ }
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- rc = TMF_RESP_FUNC_COMPLETE;
- goto out;
+ return TMF_RESP_FUNC_COMPLETE;
+ }
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ if (task->slow_task == NULL) {
+ init_completion(&slow_task.completion);
+ task->slow_task = &slow_task;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (task->task_proto & SAS_PROTOCOL_SSP) {
struct scsi_cmnd *cmnd = task->uldd_task;
- dev = task->dev;
- ccb = task->lldd_task;
- pm8001_dev = dev->lldd_dev;
- pm8001_ha = pm8001_find_ha_by_dev(dev);
int_to_scsilun(cmnd->device->lun, &lun);
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
- printk(KERN_INFO "No such tag in %s\n", __func__);
- rc = TMF_RESP_FUNC_FAILED;
- return rc;
- }
- device_id = pm8001_dev->device_id;
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("abort io to deviceid= %d\n", device_id));
tmf_task.tmf = TMF_ABORT_TASK;
tmf_task.tag_of_task_to_be_managed = tag;
rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
@@ -1200,33 +1201,77 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_dev->sas_device, 0, tag);
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
- dev = task->dev;
- pm8001_dev = dev->lldd_dev;
- pm8001_ha = pm8001_find_ha_by_dev(dev);
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
- printk(KERN_INFO "No such tag in %s\n", __func__);
- rc = TMF_RESP_FUNC_FAILED;
- return rc;
+ if (pm8001_ha->chip_id == chip_8006) {
+ DECLARE_COMPLETION_ONSTACK(completion_reset);
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+
+ /* 1. Set Device state as Recovery */
+ pm8001_dev->setds_completion = &completion;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x03);
+ wait_for_completion(&completion);
+
+ /* 2. Send Phy Control Hard Reset */
+ reinit_completion(&completion);
+ phy->reset_success = false;
+ phy->enable_completion = &completion;
+ phy->reset_completion = &completion_reset;
+ ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+ PHY_HARD_RESET);
+ if (ret)
+ goto out;
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for local phy ctl\n"));
+ wait_for_completion(&completion);
+ if (!phy->reset_success)
+ goto out;
+
+ /* 3. Wait for Port Reset complete / Port reset TMO */
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for Port reset\n"));
+ wait_for_completion(&completion_reset);
+ if (phy->port_reset_status)
+ goto out;
+
+ /*
+ * 4. SATA Abort ALL
+ * we wait for the task to be aborted so that the task
+ * is removed from the ccb. on success the caller is
+ * going to free the task.
+ */
+ ret = pm8001_exec_internal_task_abort(pm8001_ha,
+ pm8001_dev, pm8001_dev->sas_device, 1, tag);
+ if (ret)
+ goto out;
+ ret = wait_for_completion_timeout(
+ &task->slow_task->completion,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret)
+ goto out;
+
+ /* 5. Set Device State as Operational */
+ reinit_completion(&completion);
+ pm8001_dev->setds_completion = &completion;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x01);
+ wait_for_completion(&completion);
+ } else {
+ rc = pm8001_exec_internal_task_abort(pm8001_ha,
+ pm8001_dev, pm8001_dev->sas_device, 0, tag);
}
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- pm8001_dev->sas_device, 0, tag);
+ rc = TMF_RESP_FUNC_COMPLETE;
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
- dev = task->dev;
- pm8001_dev = dev->lldd_dev;
- pm8001_ha = pm8001_find_ha_by_dev(dev);
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
- printk(KERN_INFO "No such tag in %s\n", __func__);
- rc = TMF_RESP_FUNC_FAILED;
- return rc;
- }
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
pm8001_dev->sas_device, 0, tag);
}
out:
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->slow_task == &slow_task)
+ task->slow_task = NULL;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
if (rc != TMF_RESP_FUNC_COMPLETE)
pm8001_printk("rc= %d\n", rc);
return rc;
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index e81a8fa7ef1a..80b4dd6df0c2 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -263,8 +263,15 @@ struct pm8001_phy {
u8 phy_state;
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
+ struct completion *reset_completion;
+ bool port_reset_status;
+ bool reset_success;
};
+/* port reset status */
+#define PORT_RESET_SUCCESS 0x00
+#define PORT_RESET_TMO 0x01
+
struct pm8001_device {
enum sas_device_type dev_type;
struct domain_device *sas_device;
@@ -404,6 +411,8 @@ union main_cfg_table {
u32 port_recovery_timer;
u32 interrupt_reassertion_delay;
u32 fatal_n_non_fatal_dump; /* 0x28 */
+ u32 ila_version;
+ u32 inc_fw_version;
} pm80xx_tbl;
};
@@ -531,6 +540,7 @@ struct pm8001_hba_info {
u32 smp_exp_mode;
const struct firmware *fw_image;
struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
+ u32 reset_in_progress;
};
struct pm8001_work {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index eb4fee61df72..42f0405601ad 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -312,6 +312,11 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
/* read port recover and reset timeout */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer =
pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER);
+ /* read ILA and inactive firmware version */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version =
+ pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE);
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version =
+ pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION);
}
/**
@@ -592,6 +597,12 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
PORT_RECOVERY_TIMEOUT;
+ if (pm8001_ha->chip_id == chip_8006) {
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &=
+ 0x0000ffff;
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
+ 0x140000;
+ }
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
}
@@ -1478,6 +1489,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
ccb->device = pm8001_ha_dev;
ccb->ccb_tag = ccb_tag;
ccb->task = task;
+ ccb->n_elem = 0;
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
@@ -1770,6 +1782,8 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
"task 0x%p done with io_status 0x%x resp 0x%x "
"stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
+ if (t->slow_task)
+ complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
@@ -3033,10 +3047,10 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+ u32 port_sata = (phy->phy_type & PORT_TYPE_SATA);
port->port_state = portstate;
phy->identify.device_type = 0;
phy->phy_attached = 0;
- memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
switch (portstate) {
case PORT_VALID:
break;
@@ -3045,7 +3059,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk(" PortInvalid portID %d\n", port_id));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
- if (phy->phy_type & PORT_TYPE_SATA) {
+ if (port_sata) {
phy->phy_type = 0;
port->port_attached = 0;
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
@@ -3067,7 +3081,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk(" Phy Down and PORT_LOSTCOMM\n"));
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk(" Last phy Down and port invalid\n"));
- if (phy->phy_type & PORT_TYPE_SATA) {
+ if (port_sata) {
port->port_attached = 0;
phy->phy_type = 0;
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
@@ -3083,6 +3097,11 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
}
+ if (port_sata && (portstate != PORT_IN_RESET)) {
+ struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+
+ sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ }
}
static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3185,12 +3204,14 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PHY_DOWN:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_PHY_DOWN\n"));
- if (phy->phy_type & PORT_TYPE_SATA)
- sas_ha->notify_phy_event(&phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ hw_event_phy_down(pm8001_ha, piomb);
+ if (pm8001_ha->reset_in_progress) {
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Reset in progress\n"));
+ return 0;
+ }
phy->phy_attached = 0;
phy->phy_state = 0;
- hw_event_phy_down(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_INVALID:
PM8001_MSG_DBG(pm8001_ha,
@@ -3297,9 +3318,17 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PORT_RESET_TIMER_TMO:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ if (pm8001_ha->phy[phy_id].reset_completion) {
+ pm8001_ha->phy[phy_id].port_reset_status =
+ PORT_RESET_TMO;
+ complete(pm8001_ha->phy[phy_id].reset_completion);
+ pm8001_ha->phy[phy_id].reset_completion = NULL;
+ }
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
PM8001_MSG_DBG(pm8001_ha,
@@ -3324,6 +3353,12 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PORT_RESET_COMPLETE:
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+ if (pm8001_ha->phy[phy_id].reset_completion) {
+ pm8001_ha->phy[phy_id].port_reset_status =
+ PORT_RESET_SUCCESS;
+ complete(pm8001_ha->phy[phy_id].reset_completion);
+ pm8001_ha->phy[phy_id].reset_completion = NULL;
+ }
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
PM8001_MSG_DBG(pm8001_ha,
@@ -4389,7 +4424,7 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
payload.sas_identify.dev_type = SAS_END_DEVICE;
payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
memcpy(payload.sas_identify.sas_addr,
- pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+ &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
return ret;
@@ -4496,17 +4531,20 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 phyId, u32 phy_op)
{
+ u32 tag;
+ int rc;
struct local_phy_ctl_req payload;
struct inbound_queue_table *circularQ;
- int ret;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
memset(&payload, 0, sizeof(payload));
+ rc = pm8001_tag_alloc(pm8001_ha, &tag);
+ if (rc)
+ return rc;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- payload.tag = cpu_to_le32(1);
+ payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
- return ret;
+ return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
}
static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 7a443bad6163..889e69ce3689 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -167,7 +167,7 @@
#define LINKMODE_AUTO (0x03 << 12)
#define LINKRATE_15 (0x01 << 8)
#define LINKRATE_30 (0x02 << 8)
-#define LINKRATE_60 (0x06 << 8)
+#define LINKRATE_60 (0x04 << 8)
#define LINKRATE_120 (0x08 << 8)
/* phy_profile */
@@ -229,6 +229,102 @@
#define IT_NEXUS_TIMEOUT 0x7D0
#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
+#ifdef __LITTLE_ENDIAN_BITFIELD
+struct sas_identify_frame_local {
+ /* Byte 0 */
+ u8 frame_type:4;
+ u8 dev_type:3;
+ u8 _un0:1;
+
+ /* Byte 1 */
+ u8 _un1;
+
+ /* Byte 2 */
+ union {
+ struct {
+ u8 _un20:1;
+ u8 smp_iport:1;
+ u8 stp_iport:1;
+ u8 ssp_iport:1;
+ u8 _un247:4;
+ };
+ u8 initiator_bits;
+ };
+
+ /* Byte 3 */
+ union {
+ struct {
+ u8 _un30:1;
+ u8 smp_tport:1;
+ u8 stp_tport:1;
+ u8 ssp_tport:1;
+ u8 _un347:4;
+ };
+ u8 target_bits;
+ };
+
+ /* Byte 4 - 11 */
+ u8 _un4_11[8];
+
+ /* Byte 12 - 19 */
+ u8 sas_addr[SAS_ADDR_SIZE];
+
+ /* Byte 20 */
+ u8 phy_id;
+
+ u8 _un21_27[7];
+
+} __packed;
+
+#elif defined(__BIG_ENDIAN_BITFIELD)
+struct sas_identify_frame_local {
+ /* Byte 0 */
+ u8 _un0:1;
+ u8 dev_type:3;
+ u8 frame_type:4;
+
+ /* Byte 1 */
+ u8 _un1;
+
+ /* Byte 2 */
+ union {
+ struct {
+ u8 _un247:4;
+ u8 ssp_iport:1;
+ u8 stp_iport:1;
+ u8 smp_iport:1;
+ u8 _un20:1;
+ };
+ u8 initiator_bits;
+ };
+
+ /* Byte 3 */
+ union {
+ struct {
+ u8 _un347:4;
+ u8 ssp_tport:1;
+ u8 stp_tport:1;
+ u8 smp_tport:1;
+ u8 _un30:1;
+ };
+ u8 target_bits;
+ };
+
+ /* Byte 4 - 11 */
+ u8 _un4_11[8];
+
+ /* Byte 12 - 19 */
+ u8 sas_addr[SAS_ADDR_SIZE];
+
+ /* Byte 20 */
+ u8 phy_id;
+
+ u8 _un21_27[7];
+} __packed;
+#else
+#error "Bitfield order not defined!"
+#endif
+
struct mpi_msg_hdr {
__le32 header; /* Bits [11:0] - Message operation code */
/* Bits [15:12] - Message Category */
@@ -248,7 +344,7 @@ struct mpi_msg_hdr {
struct phy_start_req {
__le32 tag;
__le32 ase_sh_lm_slr_phyid;
- struct sas_identify_frame sas_identify; /* 28 Bytes */
+ struct sas_identify_frame_local sas_identify; /* 28 Bytes */
__le32 spasti;
u32 reserved[21];
} __attribute__((packed, aligned(4)));
@@ -1349,6 +1445,8 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */
#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */
+#define MAIN_MPI_ILA_RELEASE_TYPE 0xA4 /* DWORD 0x29 */
+#define MAIN_MPI_INACTIVE_FW_VERSION 0XB0 /* DWORD 0x2C */
/* Gereral Status Table offset - byte offset */
#define GST_GSTLEN_MPIS_OFFSET 0x00
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b4d6cd8cd1ad..e58be98430b0 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -348,7 +348,7 @@ static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
cmd->sense_buffer = NULL;
cmd->sense_buffer_dma = 0;
cmd->dma_handle = 0;
- init_timer(&cmd->timer);
+ timer_setup(&cmd->timer, NULL, 0);
}
/**
@@ -557,8 +557,9 @@ static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
static void pmcraid_ioa_reset(struct pmcraid_cmd *);
-static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
+static void pmcraid_bist_done(struct timer_list *t)
{
+ struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
int rc;
@@ -572,9 +573,6 @@ static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
pmcraid_info("BIST not complete, waiting another 2 secs\n");
cmd->timer.expires = jiffies + cmd->time_left;
cmd->time_left = 0;
- cmd->timer.data = (unsigned long)cmd;
- cmd->timer.function =
- (void (*)(unsigned long))pmcraid_bist_done;
add_timer(&cmd->timer);
} else {
cmd->time_left = 0;
@@ -605,9 +603,8 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
doorbells, intrs);
cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
- cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
- cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
+ cmd->timer.function = pmcraid_bist_done;
add_timer(&cmd->timer);
}
@@ -617,8 +614,9 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
* Return value
* None
*/
-static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
+static void pmcraid_reset_alert_done(struct timer_list *t)
{
+ struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
struct pmcraid_instance *pinstance = cmd->drv_inst;
u32 status = ioread32(pinstance->ioa_status);
unsigned long lock_flags;
@@ -637,10 +635,8 @@ static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
pmcraid_info("critical op is not yet reset waiting again\n");
/* restart timer if some more time is available to wait */
cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
- cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
- cmd->timer.function =
- (void (*)(unsigned long))pmcraid_reset_alert_done;
+ cmd->timer.function = pmcraid_reset_alert_done;
add_timer(&cmd->timer);
}
}
@@ -676,10 +672,8 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
* bit to be reset.
*/
cmd->time_left = PMCRAID_RESET_TIMEOUT;
- cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
- cmd->timer.function =
- (void (*)(unsigned long))pmcraid_reset_alert_done;
+ cmd->timer.function = pmcraid_reset_alert_done;
add_timer(&cmd->timer);
iowrite32(DOORBELL_IOA_RESET_ALERT,
@@ -704,8 +698,9 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
* Return value:
* None
*/
-static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
+static void pmcraid_timeout_handler(struct timer_list *t)
{
+ struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
struct pmcraid_instance *pinstance = cmd->drv_inst;
unsigned long lock_flags;
@@ -919,7 +914,7 @@ static void pmcraid_send_cmd(
struct pmcraid_cmd *cmd,
void (*cmd_done) (struct pmcraid_cmd *),
unsigned long timeout,
- void (*timeout_func) (struct pmcraid_cmd *)
+ void (*timeout_func) (struct timer_list *)
)
{
/* initialize done function */
@@ -927,9 +922,8 @@ static void pmcraid_send_cmd(
if (timeout_func) {
/* setup timeout handler */
- cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies + timeout;
- cmd->timer.function = (void (*)(unsigned long))timeout_func;
+ cmd->timer.function = timeout_func;
add_timer(&cmd->timer);
}
@@ -1955,10 +1949,9 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
* would re-initiate a reset
*/
cmd->cmd_done = pmcraid_ioa_reset;
- cmd->timer.data = (unsigned long)cmd;
cmd->timer.expires = jiffies +
msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
- cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
+ cmd->timer.function = pmcraid_timeout_handler;
if (!timer_pending(&cmd->timer))
add_timer(&cmd->timer);
diff --git a/drivers/scsi/ppa.h b/drivers/scsi/ppa.h
index ba8021427b88..6a1f8a2d70eb 100644
--- a/drivers/scsi/ppa.h
+++ b/drivers/scsi/ppa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Driver for the PPA3 parallel port SCSI HBA embedded in
* the Iomega ZIP drive
*
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
index 2ff753ce6e27..d1db92d24889 100644
--- a/drivers/scsi/qedi/Kconfig
+++ b/drivers/scsi/qedi/Kconfig
@@ -4,6 +4,7 @@ config QEDI
depends on QED
select SCSI_ISCSI_ATTRS
select QED_LL2
+ select QED_OOO
select QED_ISCSI
select ISCSI_BOOT_SYSFS
---help---
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 93d54acd4a22..bd302d3cb9af 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -92,7 +92,6 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
int pld_len;
- u32 *tmp;
cmd = (struct qedi_cmd *)task->dd_data;
task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id);
@@ -108,7 +107,6 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
hton24(resp_hdr_ptr->dlength,
(cqe_text_response->hdr_second_dword &
ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK));
- tmp = (u32 *)resp_hdr_ptr->dlength;
resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
conn->session->age);
@@ -196,7 +194,6 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
struct iscsi_tm_rsp *resp_hdr_ptr;
struct iscsi_tm *tmf_hdr;
struct qedi_cmd *qedi_cmd = NULL;
- u32 *tmp;
cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
@@ -222,7 +219,6 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
hton24(resp_hdr_ptr->dlength,
(cqe_tmp_response->hdr_second_dword &
ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK));
- tmp = (u32 *)resp_hdr_ptr->dlength;
resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
conn->session->age);
resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn);
@@ -269,7 +265,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
int pld_len;
- u32 *tmp;
cmd = (struct qedi_cmd *)task->dd_data;
@@ -286,7 +281,6 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
hton24(resp_hdr_ptr->dlength,
(cqe_login_response->hdr_second_dword &
ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK));
- tmp = (u32 *)resp_hdr_ptr->dlength;
resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
conn->session->age);
resp_hdr_ptr->tsih = cqe_login_response->tsih;
@@ -590,7 +584,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
int datalen = 0;
struct qedi_conn *qedi_conn;
u32 iscsi_cid;
- bool mark_cmd_node_deleted = false;
u8 cqe_err_bits = 0;
iscsi_cid = cqe->cqe_common.conn_id;
@@ -674,7 +667,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
- mark_cmd_node_deleted = true;
}
spin_unlock(&qedi_conn->list_lock);
@@ -763,7 +755,7 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
u32 rtid = 0;
u32 iscsi_cid;
struct qedi_conn *qedi_conn;
- struct qedi_cmd *cmd_new, *dbg_cmd;
+ struct qedi_cmd *dbg_cmd;
struct iscsi_task *mtask;
struct iscsi_tm *tmf_hdr = NULL;
@@ -856,7 +848,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
}
qedi_conn->cmd_cleanup_cmpl++;
wake_up(&qedi_conn->wait_queue);
- cmd_new = task->dd_data;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
"Freeing tid=0x%x for cid=0x%x\n",
@@ -1029,7 +1020,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
- struct scsi_sge *req_sge = NULL;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
struct qedi_endpoint *ep;
@@ -1037,7 +1027,6 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
u16 sq_idx = 0;
int rval = 0;
- req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
ep = qedi_conn->ep;
@@ -1718,7 +1707,6 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
- struct scsi_sge *req_sge = NULL;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
struct qedi_endpoint *ep;
@@ -1727,7 +1715,6 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
u16 sq_idx = 0;
int rval = 0;
- req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
qedi_cmd = (struct qedi_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
@@ -1995,7 +1982,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
struct qedi_conn *qedi_conn = conn->dd_data;
struct scsi_cmnd *sc_cmd = task->sc;
unsigned long flags;
- u8 op;
spin_lock_irqsave(&qedi->io_trace_lock, flags);
@@ -2005,7 +1991,6 @@ void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task,
io_log->cid = qedi_conn->iscsi_conn_id;
io_log->lun = sc_cmd->device->lun;
io_log->op = sc_cmd->cmnd[0];
- op = sc_cmd->cmnd[0];
io_log->lba[0] = sc_cmd->cmnd[2];
io_log->lba[1] = sc_cmd->cmnd[3];
io_log->lba[2] = sc_cmd->cmnd[4];
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 8a29fb09db14..390775d5c918 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -758,9 +758,9 @@ enum action {
};
-static void qla1280_mailbox_timeout(unsigned long __data)
+static void qla1280_mailbox_timeout(struct timer_list *t)
{
- struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
+ struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer);
struct device_reg __iomem *reg;
reg = ha->iobase;
@@ -2465,7 +2465,6 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
uint16_t __iomem *mptr;
uint16_t data;
DECLARE_COMPLETION_ONSTACK(wait);
- struct timer_list timer;
ENTER("qla1280_mailbox_command");
@@ -2494,18 +2493,15 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
/* Issue set host interrupt command. */
/* set up a timer just in case we're really jammed */
- init_timer_on_stack(&timer);
- timer.expires = jiffies + 20*HZ;
- timer.data = (unsigned long)ha;
- timer.function = qla1280_mailbox_timeout;
- add_timer(&timer);
+ timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0);
+ mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ);
spin_unlock_irq(ha->host->host_lock);
WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
data = qla1280_debounce_register(&reg->istatus);
wait_for_completion(&wait);
- del_timer_sync(&timer);
+ del_timer_sync(&ha->mailbox_timer);
spin_lock_irq(ha->host->host_lock);
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index 834884b9eed5..1522aca2c8c8 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -1055,6 +1055,7 @@ struct scsi_qla_host {
struct list_head done_q; /* Done queue */
struct completion *mailbox_wait;
+ struct timer_list mailbox_timer;
volatile struct {
uint32_t online:1; /* 0 */
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 0b767a0bb308..17d5bc1cc56b 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2ea0ef93f5cb..e3ac7078d2aa 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -919,9 +919,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(response) + sizeof(uint8_t);
- fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
- sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
+ memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
+ sizeof(response));
fw_sts_ptr += sizeof(response);
*fw_sts_ptr = command_sent;
@@ -1116,14 +1116,13 @@ qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
return -EINVAL;
}
- mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
+ mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (!mn) {
ql_log(ql_log_warn, vha, 0x703c,
"DMA alloc failed for fw buffer.\n");
return -ENOMEM;
}
- memset(mn, 0, sizeof(struct access_chip_84xx));
mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
mn->entry_count = 1;
ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
@@ -2554,13 +2553,11 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command "
"failed.\n");
- scsi_req(bsg_job->req)->result =
bsg_reply->result = -EIO;
} else {
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command "
"success.\n");
- scsi_req(bsg_job->req)->result =
bsg_reply->result = 0;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -2571,7 +2568,7 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
- scsi_req(bsg_job->req)->result = bsg_reply->result = -ENXIO;
+ bsg_reply->result = -ENXIO;
return 0;
done:
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 486c075998f6..01a9b8971e88 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -323,6 +323,12 @@ struct els_logo_payload {
uint8_t wwpn[WWN_SIZE];
};
+struct els_plogi_payload {
+ uint8_t opcode;
+ uint8_t rsvd[3];
+ uint8_t data[112];
+};
+
struct ct_arg {
void *iocb;
u16 nport_handle;
@@ -358,6 +364,19 @@ struct srb_iocb {
dma_addr_t els_logo_pyld_dma;
} els_logo;
struct {
+#define ELS_DCMD_PLOGI 0x3
+ uint32_t flags;
+ uint32_t els_cmd;
+ struct completion comp;
+ struct els_plogi_payload *els_plogi_pyld;
+ struct els_plogi_payload *els_resp_pyld;
+ dma_addr_t els_plogi_pyld_dma;
+ dma_addr_t els_resp_pyld_dma;
+ uint32_t fw_status[3];
+ __le16 comp_status;
+ __le16 len;
+ } els_plogi;
+ struct {
/*
* Values for flags field below are as
* defined in tsk_mgmt_entry struct
@@ -922,6 +941,7 @@ struct mbx_cmd_32 {
#define INTR_RSP_QUE_UPDATE_83XX 0x14
#define INTR_ATIO_QUE_UPDATE 0x1C
#define INTR_ATIO_RSP_QUE_UPDATE 0x1D
+#define INTR_ATIO_QUE_UPDATE_27XX 0x1E
/* ISP mailbox loopback echo diagnostic error code */
#define MBS_LB_RESET 0x17
@@ -2302,6 +2322,7 @@ typedef struct fc_port {
unsigned int send_els_logo:1;
unsigned int login_pause:1;
unsigned int login_succ:1;
+ unsigned int query:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2347,6 +2368,7 @@ typedef struct fc_port {
uint8_t fc4_type;
uint8_t fc4f_nvme;
uint8_t scan_state;
+ uint8_t n2n_flag;
unsigned long last_queue_full;
unsigned long last_ramp_up;
@@ -2368,6 +2390,9 @@ typedef struct fc_port {
struct list_head gnl_entry;
struct work_struct del_work;
u8 iocb[IOCB_SIZE];
+ u8 current_login_state;
+ u8 last_login_state;
+ struct completion n2n_done;
} fc_port_t;
#define QLA_FCPORT_SCAN 1
@@ -4113,6 +4138,7 @@ typedef struct scsi_qla_host {
#define QPAIR_ONLINE_CHECK_NEEDED 27
#define SET_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
+#define N2N_LOGIN_NEEDED 30
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4223,6 +4249,9 @@ typedef struct scsi_qla_host {
wait_queue_head_t fcport_waitQ;
wait_queue_head_t vref_waitq;
uint8_t min_link_speed_feat;
+ uint8_t n2n_node_name[WWN_SIZE];
+ uint8_t n2n_port_name[WWN_SIZE];
+ uint16_t n2n_id;
} scsi_qla_host_t;
struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_devtbl.h b/drivers/scsi/qla2xxx/qla_devtbl.h
index d6ea69df7c5c..ffb9694be748 100644
--- a/drivers/scsi/qla2xxx/qla_devtbl.h
+++ b/drivers/scsi/qla2xxx/qla_devtbl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define QLA_MODEL_NAMES 0x5C
/*
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index bec641aae7b3..d5cef0727e72 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -753,9 +753,7 @@ struct els_entry_24xx {
uint8_t reserved_2;
uint8_t port_id[3];
- uint8_t reserved_3;
-
- uint16_t reserved_4;
+ uint8_t s_id[3];
uint16_t control_flags; /* Control flags. */
#define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13)
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f852ca60c49f..fa115c7433e5 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -45,6 +45,8 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
+extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *,
+ port_id_t);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
@@ -145,6 +147,7 @@ extern int ql2xmvasynctoatio;
extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
+extern int ql2xenablemsix;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -206,8 +209,8 @@ int qla24xx_async_abort_cmd(srb_t *);
*/
extern struct scsi_host_template qla2xxx_driver_template;
extern struct scsi_transport_template *qla2xxx_transport_vport_template;
-extern void qla2x00_timer(scsi_qla_host_t *);
-extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
+extern void qla2x00_timer(struct timer_list *);
+extern void qla2x00_start_timer(scsi_qla_host_t *, unsigned long);
extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
extern int qla24xx_disable_vp (scsi_qla_host_t *);
extern int qla24xx_enable_vp (scsi_qla_host_t *);
@@ -486,6 +489,8 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
uint16_t *);
int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
struct port_database_24xx *);
+int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
+ void *, uint16_t);
extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
@@ -753,7 +758,7 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
/* IOCB related functions */
extern int qla82xx_start_scsi(srb_t *);
extern void qla2x00_sp_free(void *);
-extern void qla2x00_sp_timeout(unsigned long);
+extern void qla2x00_sp_timeout(struct timer_list *);
extern void qla2x00_bsg_job_done(void *, int);
extern void qla2x00_bsg_sp_free(void *);
extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b5b48ddca962..1bafa043f9f1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -45,9 +45,9 @@ static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
/* SRB Extensions ---------------------------------------------------------- */
void
-qla2x00_sp_timeout(unsigned long __data)
+qla2x00_sp_timeout(struct timer_list *t)
{
- srb_t *sp = (srb_t *)__data;
+ srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
scsi_qla_host_t *vha = sp->vha;
struct req_que *req;
@@ -812,13 +812,12 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
ql_log(ql_log_warn, vha, 0xd043,
"Failed to allocate port database structure.\n");
goto done_free_sp;
}
- memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
mb = sp->u.iocb_cmd.u.mbx.out_mb;
mb[0] = MBC_GET_PORT_DATABASE;
@@ -1434,6 +1433,14 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
default:
+ if (ea->fcport->n2n_flag) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post fc4 prli\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->fc4f_nvme = 0;
+ ea->fcport->n2n_flag = 0;
+ qla24xx_post_prli_work(vha, ea->fcport);
+ }
ql_dbg(ql_dbg_disc, vha, 0x2119,
"%s %d %8phC unhandle event of %x\n",
__func__, __LINE__, ea->fcport->port_name, ea->data[0]);
@@ -4367,7 +4374,109 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
return (rval);
}
+/*
+ * N2N Login
+ * Updates Fibre Channel Device Database with local loop devices.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ */
+static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
+ fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int res = QLA_SUCCESS, rval;
+ int greater_wwpn = 0;
+ int logged_in = 0;
+
+ if (ha->current_topology != ISP_CFG_N)
+ return res;
+
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(vha->n2n_port_name)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2002,
+ "HBA WWPN is greater %llx > target %llx\n",
+ wwn_to_u64(vha->port_name),
+ wwn_to_u64(vha->n2n_port_name));
+ greater_wwpn = 1;
+ fcport->d_id.b24 = vha->n2n_id;
+ }
+
+ fcport->loop_id = vha->loop_id;
+ fcport->fc4f_nvme = 0;
+ fcport->query = 1;
+
+ ql_dbg(ql_dbg_disc, vha, 0x4001,
+ "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
+ fcport->d_id.b24, vha->loop_id);
+
+ /* Fill in member data. */
+ if (!greater_wwpn) {
+ rval = qla2x00_get_port_database(vha, fcport, 0);
+ ql_dbg(ql_dbg_disc, vha, 0x1051,
+ "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
+ fcport->current_login_state, fcport->last_login_state,
+ fcport->d_id.b24, fcport->loop_id, rval);
+
+ if (((fcport->current_login_state & 0xf) == 0x4) ||
+ ((fcport->current_login_state & 0xf) == 0x6))
+ logged_in = 1;
+ }
+
+ if (logged_in || greater_wwpn) {
+ if (!vha->nvme_local_port && vha->flags.nvme_enabled)
+ qla_nvme_register_hba(vha);
+
+ /* Set connected N_Port d_id */
+ if (vha->flags.nvme_enabled)
+ fcport->fc4f_nvme = 1;
+
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ fcport->disc_state = DSC_GNL;
+ fcport->n2n_flag = 1;
+ fcport->flags = 3;
+ vha->hw->flags.gpsc_supported = 0;
+
+ if (greater_wwpn) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
+ "%s %d PLOGI ELS %8phC\n",
+ __func__, __LINE__, fcport->port_name);
+
+ res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
+ fcport, fcport->d_id);
+ }
+
+ if (res != QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0xd04d,
+ "PLOGI Failed: portid=%06x - retrying\n",
+ fcport->d_id.b24);
+ res = QLA_SUCCESS;
+ } else {
+ /* State 0x6 means FCP PRLI complete */
+ if ((fcport->current_login_state & 0xf) == 0x6) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post GPDB work\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->chip_reset =
+ vha->hw->base_qpair->chip_reset;
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post NVMe PRLI\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ }
+ }
+ } else {
+ /* Wait for next database change */
+ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
+ }
+ return res;
+}
/*
* qla2x00_configure_local_loop
@@ -4438,6 +4547,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
}
+ /* Inititae N2N login. */
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ rval = qla24xx_n2n_handle_login(vha, new_fcport);
+ if (rval != QLA_SUCCESS)
+ goto cleanup_allocation;
+ return QLA_SUCCESS;
+ }
+
/* Add devices to port list. */
id_iter = (char *)ha->gid_list;
for (index = 0; index < entries; index++) {
@@ -4479,10 +4596,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
"Failed to retrieve fcport information "
"-- get_port_database=%x, loop_id=0x%04x.\n",
rval2, new_fcport->loop_id);
- ql_dbg(ql_dbg_disc, vha, 0x2105,
- "Scheduling resync.\n");
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- continue;
+ /* Skip retry if N2N */
+ if (ha->current_topology != ISP_CFG_N) {
+ ql_dbg(ql_dbg_disc, vha, 0x2105,
+ "Scheduling resync.\n");
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ continue;
+ }
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
@@ -7555,6 +7675,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
icb->firmware_options_3 |= BIT_0;
+ if (IS_QLA27XX(ha)) {
+ icb->firmware_options_3 |= BIT_8;
+ ql_dbg(ql_log_info, vha, 0x0075,
+ "Enabling direct connection.\n");
+ }
+
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
@@ -7910,7 +8036,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
return NULL;
}
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
if (qpair == NULL) {
ql_log(ql_log_warn, vha, 0x0182,
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 9a2c86eacf44..17d2c20f1f75 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -269,10 +269,8 @@ qla2x00_rel_sp(srb_t *sp)
static inline void
qla2x00_init_timer(srb_t *sp, unsigned long tmo)
{
- init_timer(&sp->u.iocb_cmd.timer);
+ timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
- sp->u.iocb_cmd.timer.data = (unsigned long)sp;
- sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 2f94159186d7..d810a447cb4a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2518,6 +2518,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
scsi_qla_host_t *vha = sp->vha;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
+ uint32_t dsd_len = 24;
els_iocb->entry_type = ELS_IOCB_TYPE;
els_iocb->entry_count = 1;
@@ -2534,24 +2535,198 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ els_iocb->s_id[0] = vha->d_id.b.al_pa;
+ els_iocb->s_id[1] = vha->d_id.b.area;
+ els_iocb->s_id[2] = vha->d_id.b.domain;
els_iocb->control_flags = 0;
- els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
- els_iocb->tx_address[0] =
- cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
- els_iocb->tx_address[1] =
- cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
- els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
+ if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
+ els_iocb->tx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->tx_address[0] =
+ cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
+ els_iocb->tx_address[1] =
+ cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
+ els_iocb->tx_len = dsd_len;
+
+ els_iocb->rx_dsd_count = 1;
+ els_iocb->rx_byte_count = sizeof(struct els_plogi_payload);
+ els_iocb->rx_address[0] =
+ cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
+ els_iocb->rx_address[1] =
+ cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
+ els_iocb->rx_len = dsd_len;
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
+ "PLOGI ELS IOCB:\n");
+ ql_dump_buffer(ql_log_info, vha, 0x0109,
+ (uint8_t *)els_iocb, 0x70);
+ } else {
+ els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
+ els_iocb->tx_address[0] =
+ cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
+ els_iocb->tx_address[1] =
+ cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
+ els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
- els_iocb->rx_byte_count = 0;
- els_iocb->rx_address[0] = 0;
- els_iocb->rx_address[1] = 0;
- els_iocb->rx_len = 0;
+ els_iocb->rx_byte_count = 0;
+ els_iocb->rx_address[0] = 0;
+ els_iocb->rx_address[1] = 0;
+ els_iocb->rx_len = 0;
+ }
sp->vha->qla_stats.control_requests++;
}
static void
+qla2x00_els_dcmd2_sp_free(void *data)
+{
+ srb_t *sp = data;
+ struct srb_iocb *elsio = &sp->u.iocb_cmd;
+
+ if (elsio->u.els_plogi.els_plogi_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
+ elsio->u.els_plogi.els_plogi_pyld,
+ elsio->u.els_plogi.els_plogi_pyld_dma);
+
+ if (elsio->u.els_plogi.els_resp_pyld)
+ dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
+ elsio->u.els_plogi.els_resp_pyld,
+ elsio->u.els_plogi.els_resp_pyld_dma);
+
+ del_timer(&elsio->timer);
+ qla2x00_rel_sp(sp);
+}
+
+static void
+qla2x00_els_dcmd2_iocb_timeout(void *data)
+{
+ srb_t *sp = data;
+ fc_port_t *fcport = sp->fcport;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ unsigned long flags = 0;
+ int res;
+
+ ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
+ "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
+ sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
+
+ /* Abort the exchange */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ res = ha->isp_ops->abort_command(sp);
+ ql_dbg(ql_dbg_io, vha, 0x3070,
+ "mbx abort_command %s\n",
+ (res == QLA_SUCCESS) ? "successful" : "failed");
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ complete(&lio->u.els_plogi.comp);
+}
+
+static void
+qla2x00_els_dcmd2_sp_done(void *ptr, int res)
+{
+ srb_t *sp = ptr;
+ fc_port_t *fcport = sp->fcport;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->vha;
+
+ ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
+ "%s ELS hdl=%x, portid=%06x done %8pC\n",
+ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+
+ complete(&lio->u.els_plogi.comp);
+}
+
+int
+qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
+ fc_port_t *fcport, port_id_t remote_did)
+{
+ srb_t *sp;
+ struct srb_iocb *elsio = NULL;
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS;
+ void *ptr, *resp_ptr;
+ dma_addr_t ptr_dma;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp) {
+ ql_log(ql_log_info, vha, 0x70e6,
+ "SRB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ elsio = &sp->u.iocb_cmd;
+ fcport->d_id.b.domain = remote_did.b.domain;
+ fcport->d_id.b.area = remote_did.b.area;
+ fcport->d_id.b.al_pa = remote_did.b.al_pa;
+
+ ql_dbg(ql_dbg_io, vha, 0x3073,
+ "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
+
+ sp->type = SRB_ELS_DCMD;
+ sp->name = "ELS_DCMD";
+ sp->fcport = fcport;
+ qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
+ elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
+ sp->done = qla2x00_els_dcmd2_sp_done;
+ sp->free = qla2x00_els_dcmd2_sp_free;
+
+ ptr = elsio->u.els_plogi.els_plogi_pyld =
+ dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
+ ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
+
+ if (!elsio->u.els_plogi.els_plogi_pyld) {
+ rval = QLA_FUNCTION_FAILED;
+ goto out;
+ }
+
+ resp_ptr = elsio->u.els_plogi.els_resp_pyld =
+ dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
+
+ if (!elsio->u.els_plogi.els_resp_pyld) {
+ rval = QLA_FUNCTION_FAILED;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
+
+ memset(ptr, 0, sizeof(struct els_plogi_payload));
+ memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
+ elsio->u.els_plogi.els_cmd = els_opcode;
+ elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
+ qla24xx_get_port_login_templ(vha, ptr_dma + 4,
+ &elsio->u.els_plogi.els_plogi_pyld->data[0],
+ sizeof(struct els_plogi_payload));
+
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109,
+ (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
+
+ init_completion(&elsio->u.els_plogi.comp);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_io, vha, 0x3074,
+ "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n",
+ sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24);
+
+ wait_for_completion(&elsio->u.els_plogi.comp);
+
+ if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
+ rval = QLA_FUNCTION_FAILED;
+
+out:
+ sp->free(sp);
+ return rval;
+}
+
+static void
qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
struct bsg_job *bsg_job = sp->u.bsg_job;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9d9668aac6f6..2fd79129bb2a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1041,6 +1041,7 @@ global_port_update:
*/
atomic_set(&vha->loop_down_timer, 0);
if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
+ !ha->flags.n2n_ae &&
atomic_read(&vha->loop_state) != LOOP_DEAD) {
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
@@ -1543,8 +1544,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
struct fc_bsg_reply *bsg_reply;
uint16_t comp_status;
uint32_t fw_status[3];
- uint8_t* fw_sts_ptr;
int res;
+ struct srb_iocb *els;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
@@ -1561,10 +1562,14 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
case SRB_ELS_DCMD:
type = "Driver ELS logo";
- ql_dbg(ql_dbg_user, vha, 0x5047,
- "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
- sp->done(sp, 0);
- return;
+ if (iocb_type != ELS_IOCB_TYPE) {
+ ql_dbg(ql_dbg_user, vha, 0x5047,
+ "Completing %s: (%p) type=%d.\n",
+ type, sp, sp->type);
+ sp->done(sp, 0);
+ return;
+ }
+ break;
case SRB_CT_PTHRU_CMD:
/* borrowing sts_entry_24xx.comp_status.
same location as ct_entry_24xx.comp_status
@@ -1584,6 +1589,33 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
+ if (iocb_type == ELS_IOCB_TYPE) {
+ els = &sp->u.iocb_cmd;
+ els->u.els_plogi.fw_status[0] = fw_status[0];
+ els->u.els_plogi.fw_status[1] = fw_status[1];
+ els->u.els_plogi.fw_status[2] = fw_status[2];
+ els->u.els_plogi.comp_status = fw_status[0];
+ if (comp_status == CS_COMPLETE) {
+ res = DID_OK << 16;
+ } else {
+ if (comp_status == CS_DATA_UNDERRUN) {
+ res = DID_OK << 16;
+ els->u.els_plogi.len =
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count);
+ } else {
+ els->u.els_plogi.len = 0;
+ res = DID_ERROR << 16;
+ }
+ }
+ ql_log(ql_log_info, vha, 0x503f,
+ "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
+ type, sp->handle, comp_status, fw_status[1], fw_status[2],
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count));
+ goto els_ct_done;
+ }
+
/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
* fc payload to the caller
*/
@@ -1604,11 +1636,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->total_byte_count));
- fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
- sizeof(struct fc_bsg_reply);
- memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
- }
- else {
+ } else {
ql_dbg(ql_dbg_user, vha, 0x5040,
"ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
@@ -1619,10 +1647,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
pkt)->error_subcode_2));
res = DID_ERROR << 16;
bsg_reply->reply_payload_rcv_len = 0;
- fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
- sizeof(struct fc_bsg_reply);
- memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
+ memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
+ fw_status, sizeof(fw_status));
ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
(uint8_t *)pkt, sizeof(*pkt));
}
@@ -1631,6 +1658,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
bsg_job->reply_len = 0;
}
+els_ct_done:
sp->done(sp, res);
}
@@ -3129,6 +3157,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
+ case INTR_ATIO_QUE_UPDATE_27XX:
case INTR_ATIO_QUE_UPDATE:{
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
@@ -3259,6 +3288,7 @@ qla24xx_msix_default(int irq, void *dev_id)
case INTR_RSP_QUE_UPDATE_83XX:
qla24xx_process_response_queue(vha, rsp);
break;
+ case INTR_ATIO_QUE_UPDATE_27XX:
case INTR_ATIO_QUE_UPDATE:{
unsigned long flags2;
spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
@@ -3347,7 +3377,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
.pre_vectors = QLA_BASE_VECTORS,
};
- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
+ IS_ATIO_MSIX_CAPABLE(ha)) {
desc.pre_vectors++;
min_vecs++;
}
@@ -3374,7 +3405,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
ha->msix_count, ret);
ha->msix_count = ret;
/* Recalculate queue values */
- if (ha->mqiobase && ql2xmqsupport) {
+ if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
ha->max_req_queues = ha->msix_count - 1;
/* ATIOQ needs 1 vector. That's 1 less QPair */
@@ -3432,7 +3463,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
* If target mode is enable, also request the vector for the ATIO
* queue.
*/
- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
+ IS_ATIO_MSIX_CAPABLE(ha)) {
qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
rsp->msix = qentry;
qentry->handle = rsp;
@@ -3486,11 +3518,14 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* If possible, enable MSI-X. */
- if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
- !IS_QLA27XX(ha))
+ if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
+ !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
+ !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
goto skip_msi;
+ if (ql2xenablemsix == 2)
+ goto skip_msix;
+
if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
(ha->pdev->subsystem_device == 0x7040 ||
ha->pdev->subsystem_device == 0x7041 ||
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 99502fa90810..cb717d47339f 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1782,13 +1782,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
"Entered %s.\n", __func__);
pd24 = NULL;
- pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
ql_log(ql_log_warn, vha, 0x1050,
"Failed to allocate port database structure.\n");
+ fcport->query = 0;
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
mcp->mb[0] = MBC_GET_PORT_DATABASE;
if (opt != 0 && !IS_FWI2_CAPABLE(ha))
@@ -1823,17 +1823,32 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
if (IS_FWI2_CAPABLE(ha)) {
uint64_t zero = 0;
+ u8 current_login_state, last_login_state;
+
pd24 = (struct port_database_24xx *) pd;
/* Check for logged in state. */
- if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
- pd24->last_login_state != PDS_PRLI_COMPLETE) {
- ql_dbg(ql_dbg_mbx, vha, 0x1051,
- "Unable to verify login-state (%x/%x) for "
- "loop_id %x.\n", pd24->current_login_state,
- pd24->last_login_state, fcport->loop_id);
+ if (fcport->fc4f_nvme) {
+ current_login_state = pd24->current_login_state >> 4;
+ last_login_state = pd24->last_login_state >> 4;
+ } else {
+ current_login_state = pd24->current_login_state & 0xf;
+ last_login_state = pd24->last_login_state & 0xf;
+ }
+ fcport->current_login_state = pd24->current_login_state;
+ fcport->last_login_state = pd24->last_login_state;
+
+ /* Check for logged in state. */
+ if (current_login_state != PDS_PRLI_COMPLETE &&
+ last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0x119a,
+ "Unable to verify login-state (%x/%x) for loop_id %x.\n",
+ current_login_state, last_login_state,
+ fcport->loop_id);
rval = QLA_FUNCTION_FAILED;
- goto gpd_error_out;
+
+ if (!fcport->query)
+ goto gpd_error_out;
}
if (fcport->loop_id == FC_NO_LOOP_ID ||
@@ -1912,6 +1927,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
gpd_error_out:
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+ fcport->query = 0;
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1052,
@@ -2255,13 +2271,12 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
else
req = ha->req_q_map[0];
- lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
+ lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
ql_log(ql_log_warn, vha, 0x1062,
"Failed to allocate login IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(lg, 0, sizeof(struct logio_entry_24xx));
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
lg->entry_count = 1;
@@ -2525,13 +2540,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
"Entered %s.\n", __func__);
- lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
+ lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
ql_log(ql_log_warn, vha, 0x106e,
"Failed to allocate logout IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(lg, 0, sizeof(struct logio_entry_24xx));
req = vha->req;
lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
@@ -2820,13 +2834,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
"Entered %s.\n", __func__);
- pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
+ pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
ql_log(ql_log_warn, vha, 0x1080,
"Memory alloc failed.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(pmap, 0, FCAL_MAP_SIZE);
mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
mcp->mb[2] = MSW(pmap_dma);
@@ -3014,13 +3027,12 @@ qla24xx_abort_command(srb_t *sp)
return QLA_FUNCTION_FAILED;
}
- abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
+ abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
if (abt == NULL) {
ql_log(ql_log_warn, vha, 0x108d,
"Failed to allocate abort IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(abt, 0, sizeof(struct abort_entry_24xx));
abt->entry_type = ABORT_IOCB_TYPE;
abt->entry_count = 1;
@@ -3098,13 +3110,12 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
rsp = req->rsp;
}
- tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
+ tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
ql_log(ql_log_warn, vha, 0x1093,
"Failed to allocate task management IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
tsk->p.tsk.entry_count = 1;
@@ -3753,6 +3764,38 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->vp_status,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: WWPN %8phC.\n",
+ vha->port_name);
+
+ /* N2N. direct connect */
+ if (IS_QLA27XX(ha) &&
+ ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
+ /* if our portname is higher then initiate N2N login */
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(rptid_entry->u.f1.port_name)) {
+ // ??? qlt_update_host_map(vha, id);
+ vha->n2n_id = 0x1;
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Setting n2n_update_needed for id %d\n",
+ vha->n2n_id);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0x5075,
+ "Format 1: Remote login - Waiting for WWPN %8phC.\n",
+ rptid_entry->u.f1.port_name);
+ }
+
+ memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
+ WWN_SIZE);
+ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ return;
+ }
/* buffer to buffer credit flag */
vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
@@ -3856,14 +3899,13 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
"Entered %s.\n", __func__);
- vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
+ vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
if (!vpmod) {
ql_log(ql_log_warn, vha, 0x10bc,
"Failed to allocate modify VP IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
vpmod->entry_count = 1;
vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
@@ -3934,13 +3976,12 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
return QLA_PARAMETER_ERROR;
- vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
+ vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
if (!vce) {
ql_log(ql_log_warn, vha, 0x10c2,
"Failed to allocate VP control IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
vce->entry_type = VP_CTRL_IOCB_TYPE;
vce->entry_count = 1;
@@ -4592,6 +4633,48 @@ qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
return rval;
}
+int
+qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
+ void *buf, uint16_t bufsiz)
+{
+ int rval, i;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint32_t *bp;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
+ mcp->mb[2] = MSW(buf_dma);
+ mcp->mb[3] = LSW(buf_dma);
+ mcp->mb[6] = MSW(MSD(buf_dma));
+ mcp->mb[7] = LSW(MSD(buf_dma));
+ mcp->mb[8] = bufsiz/4;
+ mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x115a,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+ "Done %s.\n", __func__);
+ bp = (uint32_t *) buf;
+ for (i = 0; i < (bufsiz-4)/4; i++, bp++)
+ *bp = cpu_to_be32(*bp);
+ }
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
@@ -6025,13 +6108,12 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
if (!vha->hw->flags.fw_started)
goto done;
- pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
ql_log(ql_log_warn, vha, 0xd047,
"Failed to allocate port database structure.\n");
goto done_free_sp;
}
- memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
memset(&mc, 0, sizeof(mc));
mc.mb[0] = MBC_GET_PORT_DATABASE;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c0f8f6c17b79..bd9f14bf7ac2 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -487,7 +487,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
+ qla2x00_start_timer(vha, WATCH_INTERVAL);
vha->req = base_vha->req;
host->can_queue = base_vha->req->length + 128;
@@ -606,7 +606,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair, *tqpair;
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
qp_list_elem)
qla2xxx_delete_qpair(vha, qpair);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index e23a3d4c36f3..d5da3981cefe 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -2245,8 +2245,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
memcpy(fstatus.reserved_3,
pkt->reserved_2, 20 * sizeof(uint8_t));
- fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
- sizeof(struct fc_bsg_reply);
+ fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
sizeof(struct qla_mt_iocb_rsp_fx00));
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3bd956d3bc5d..46f2d0cf7c0d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -268,6 +268,15 @@ MODULE_PARM_DESC(ql2xautodetectsfp,
"Detect SFP range and set appropriate distance.\n"
"1 (Default): Enable\n");
+int ql2xenablemsix = 1;
+module_param(ql2xenablemsix, int, 0444);
+MODULE_PARM_DESC(ql2xenablemsix,
+ "Set to enable MSI or MSI-X interrupt mechanism.\n"
+ " Default is 1, enable MSI-X interrupt mechanism.\n"
+ " 0 -- enable traditional pin-based mechanism.\n"
+ " 1 -- enable MSI-X interrupt mechanism.\n"
+ " 2 -- enable MSI interrupt mechanism.\n");
+
/*
* SCSI host template entry points
*/
@@ -330,12 +339,10 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
*/
__inline__ void
-qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
+qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
{
- init_timer(&vha->timer);
+ timer_setup(&vha->timer, qla2x00_timer, 0);
vha->timer.expires = jiffies + interval * HZ;
- vha->timer.data = (unsigned long)vha;
- vha->timer.function = (void (*)(unsigned long))func;
add_timer(&vha->timer);
vha->timer_active = 1;
}
@@ -388,7 +395,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
- qla_cpu_update(rsp->qpair, smp_processor_id());
+ qla_cpu_update(rsp->qpair, raw_smp_processor_id());
ha->base_qpair->pdev = ha->pdev;
if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
@@ -424,7 +431,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
qla_init_base_qpair(vha, req, rsp);
- if (ql2xmqsupport && ha->max_qpairs) {
+ if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
GFP_KERNEL);
if (!ha->queue_pair_map) {
@@ -1967,7 +1974,8 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
- if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ if (!ql2xmqsupport || !ql2xnvmeenable ||
+ (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
@@ -2064,7 +2072,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
* By default, driver uses at least two msix vectors
* (default & rspq)
*/
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
/* MB interrupt uses 1 vector */
ha->max_req_queues = ha->msix_count - 1;
@@ -3082,9 +3090,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg(ql_dbg_init, base_vha, 0x0192,
"blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
- } else
- ql_dbg(ql_dbg_init, base_vha, 0x0193,
- "blk/scsi-mq disabled.\n");
+ } else {
+ if (ql2xnvmeenable) {
+ host->nr_hw_queues = ha->max_qpairs;
+ ql_dbg(ql_dbg_init, base_vha, 0x0194,
+ "FC-NVMe support is enabled, HW queues=%d\n",
+ host->nr_hw_queues);
+ } else {
+ ql_dbg(ql_dbg_init, base_vha, 0x0193,
+ "blk/scsi-mq disabled.\n");
+ }
+ }
qlt_probe_one_stage1(base_vha, ha);
@@ -3212,6 +3228,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_log(ql_log_fatal, base_vha, 0x00ed,
"Failed to start DPC thread.\n");
ret = PTR_ERR(ha->dpc_thread);
+ ha->dpc_thread = NULL;
goto probe_failed;
}
ql_dbg(ql_dbg_init, base_vha, 0x00ee,
@@ -3246,7 +3263,7 @@ skip_dpc:
base_vha->host->irq = ha->pdev->irq;
/* Initialized the timer */
- qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
+ qla2x00_start_timer(base_vha, WATCH_INTERVAL);
ql_dbg(ql_dbg_init, base_vha, 0x00ef,
"Started qla2x00_timer with "
"interval=%d.\n", WATCH_INTERVAL);
@@ -4744,7 +4761,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (pla)
qlt_plogi_ack_unref(vha, pla);
else
- qla24xx_async_gnl(vha, fcport);
+ qla24xx_async_gffid(vha, fcport);
}
if (free_fcport) {
@@ -5995,8 +6012,9 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
* Context: Interrupt
***************************************************************************/
void
-qla2x00_timer(scsi_qla_host_t *vha)
+qla2x00_timer(struct timer_list *t)
{
+ scsi_qla_host_t *vha = from_timer(vha, t, timer);
unsigned long cpu_flags = 0;
int start_dpc = 0;
int index;
@@ -6292,7 +6310,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) {
case pci_channel_io_normal:
ha->flags.eeh_busy = 0;
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
@@ -6309,7 +6327,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
/* Return back all IOs */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
@@ -6317,7 +6335,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index f05cfc83c9c8..18069edd4773 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work)
if (logout_started) {
bool traced = false;
- while (!ACCESS_ONCE(sess->logout_completed)) {
+ while (!READ_ONCE(sess->logout_completed)) {
if (!traced) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
"%s: waiting for sess %p logout\n",
@@ -6546,6 +6546,7 @@ void
qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
+ struct init_cb_24xx *icb;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6553,14 +6554,19 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
- if (IS_ATIO_MSIX_CAPABLE(ha)) {
+ icb = (struct init_cb_24xx *)ha->init_cb;
+
+ if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) {
struct qla_msix_entry *msix = &ha->msix_entries[2];
- struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
icb->msix_atio = cpu_to_le16(msix->entry);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
+ } else if (ql2xenablemsix == 0) {
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
+ ql_dbg(ql_dbg_init, vha, 0xf07f,
+ "Registering INTx vector for ATIO.\n");
}
}
@@ -6805,7 +6811,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 8c4b505c9f66..b6ec02b96d3d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.01-k"
+#define QLA2XXX_VERSION "10.00.00.02-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 071035dfa99a..7550ba2831c3 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <target/target_core_base.h>
#include <linux/btree.h>
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 64c6fa563fdb..2b8a8ce2a431 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3955,16 +3955,15 @@ exit_session_conn_param:
/*
* Timer routines
*/
+static void qla4xxx_timer(struct timer_list *t);
-static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func,
+static void qla4xxx_start_timer(struct scsi_qla_host *ha,
unsigned long interval)
{
DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n",
__func__, ha->host->host_no));
- init_timer(&ha->timer);
+ timer_setup(&ha->timer, qla4xxx_timer, 0);
ha->timer.expires = jiffies + interval * HZ;
- ha->timer.data = (unsigned long)ha;
- ha->timer.function = (void (*)(unsigned long))func;
add_timer(&ha->timer);
ha->timer_active = 1;
}
@@ -4508,8 +4507,9 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
* qla4xxx_timer - checks every second for work to do.
* @ha: Pointer to host adapter structure.
**/
-static void qla4xxx_timer(struct scsi_qla_host *ha)
+static void qla4xxx_timer(struct timer_list *t)
{
+ struct scsi_qla_host *ha = from_timer(ha, t, timer);
int start_dpc = 0;
uint16_t w;
@@ -8805,7 +8805,7 @@ skip_retry_init:
ha->isp_ops->enable_intrs(ha);
/* Start timer thread. */
- qla4xxx_start_timer(ha, qla4xxx_timer, 1);
+ qla4xxx_start_timer(ha, 1);
set_bit(AF_INIT_DONE, &ha->flags);
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h
index f6b1216af79f..a971db11d293 100644
--- a/drivers/scsi/qlogicfas408.h
+++ b/drivers/scsi/qlogicfas408.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* to be used by qlogicfas and qlogic_cs */
#ifndef __QLOGICFAS408_H
#define __QLOGICFAS408_H
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index 892a0b058b99..884ad72ade57 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* qlogicpti.h: Performance Technologies QlogicISP sbus card defines.
*
* Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index d5a55fae60e0..6dcc4c685d1d 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* scsi.h Copyright (C) 1992 Drew Eckhardt
* Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index a75673bb82b3..40bc616cf8ab 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCSI functions used by both the initiator and the target code.
*/
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 09ba494f8896..e4f037f0f38b 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -953,9 +953,9 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
}
-static const char * inq_vendor_id = "Linux ";
-static const char * inq_product_id = "scsi_debug ";
-static const char *inq_product_rev = "0186"; /* version less '.' */
+static char sdebug_inq_vendor_id[9] = "Linux ";
+static char sdebug_inq_product_id[17] = "scsi_debug ";
+static char sdebug_inq_product_rev[5] = "0186"; /* version less '.' */
/* Use some locally assigned NAAs for SAS addresses. */
static const u64 naa3_comp_a = 0x3222222000000000ULL;
static const u64 naa3_comp_b = 0x3333333000000000ULL;
@@ -975,8 +975,8 @@ static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
arr[0] = 0x2; /* ASCII */
arr[1] = 0x1;
arr[2] = 0x0;
- memcpy(&arr[4], inq_vendor_id, 8);
- memcpy(&arr[12], inq_product_id, 16);
+ memcpy(&arr[4], sdebug_inq_vendor_id, 8);
+ memcpy(&arr[12], sdebug_inq_product_id, 16);
memcpy(&arr[28], dev_id_str, dev_id_str_len);
num = 8 + 16 + dev_id_str_len;
arr[3] = num;
@@ -1408,9 +1408,9 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[6] = 0x10; /* claim: MultiP */
/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
arr[7] = 0xa; /* claim: LINKED + CMDQUE */
- memcpy(&arr[8], inq_vendor_id, 8);
- memcpy(&arr[16], inq_product_id, 16);
- memcpy(&arr[32], inq_product_rev, 4);
+ memcpy(&arr[8], sdebug_inq_vendor_id, 8);
+ memcpy(&arr[16], sdebug_inq_product_id, 16);
+ memcpy(&arr[32], sdebug_inq_product_rev, 4);
/* version descriptors (2 bytes each) follow */
put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
@@ -3001,11 +3001,11 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
if (-1 == ret) {
write_unlock_irqrestore(&atomic_rw, iflags);
return DID_ERROR << 16;
- } else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
+ } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
sdev_printk(KERN_INFO, scp->device,
- "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+ "%s: %s: lb size=%u, IO sent=%d bytes\n",
my_name, "write same",
- num * sdebug_sector_size, ret);
+ sdebug_sector_size, ret);
/* Copy first sector to remaining blocks */
for (i = 1 ; i < num ; i++)
@@ -4151,6 +4151,12 @@ module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(guard, sdebug_guard, uint, S_IRUGO);
module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
+module_param_string(inq_vendor, sdebug_inq_vendor_id,
+ sizeof(sdebug_inq_vendor_id), S_IRUGO|S_IWUSR);
+module_param_string(inq_product, sdebug_inq_product_id,
+ sizeof(sdebug_inq_product_id), S_IRUGO|S_IWUSR);
+module_param_string(inq_rev, sdebug_inq_product_rev,
+ sizeof(sdebug_inq_product_rev), S_IRUGO|S_IWUSR);
module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
@@ -4202,6 +4208,9 @@ MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
+MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
+MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
+MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"0186\")");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 5e9755008aed..01f08c03f2c1 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/seq_file.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 28fea83ae2fe..78d4aa8df675 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/blkdev.h>
#include <linux/init.h>
@@ -21,7 +22,7 @@ struct scsi_dev_info_list {
struct list_head dev_info_list;
char vendor[8];
char model[16];
- unsigned flags;
+ blist_flags_t flags;
unsigned compatible; /* for use with scsi_static_device_list entries */
};
@@ -34,7 +35,7 @@ struct scsi_dev_info_list_table {
static const char spaces[] = " "; /* 16 of them */
-static unsigned scsi_default_dev_flags;
+static blist_flags_t scsi_default_dev_flags;
static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
@@ -51,7 +52,7 @@ static struct {
char *vendor;
char *model;
char *revision; /* revision known to be bad, unused */
- unsigned flags;
+ blist_flags_t flags;
} scsi_static_device_list[] __initdata = {
/*
* The following devices are known not to tolerate a lun != 0 scan
@@ -134,6 +135,7 @@ static struct {
{"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
{"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
+ {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
@@ -160,7 +162,7 @@ static struct {
{"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
{"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
+ {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
@@ -173,7 +175,7 @@ static struct {
{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
{"HITACHI", "HUS1530", "*", BLIST_NO_DIF},
- {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
+ {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
{"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -304,8 +306,8 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
*/
to[from_length] = '\0';
} else {
- /*
- * space pad the string if it is short.
+ /*
+ * space pad the string if it is short.
*/
strncpy(&to[from_length], spaces,
to_length - from_length);
@@ -325,15 +327,15 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
* @flags: if strflags NULL, use this flag value
*
* Description:
- * Create and add one dev_info entry for @vendor, @model, @strflags or
- * @flag. If @compatible, add to the tail of the list, do not space
- * pad, and set devinfo->compatible. The scsi_static_device_list entries
- * are added with @compatible 1 and @clfags NULL.
+ * Create and add one dev_info entry for @vendor, @model, @strflags or
+ * @flag. If @compatible, add to the tail of the list, do not space
+ * pad, and set devinfo->compatible. The scsi_static_device_list entries
+ * are added with @compatible 1 and @clfags NULL.
*
* Returns: 0 OK, -error on failure.
**/
static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
- char *strflags, int flags)
+ char *strflags, blist_flags_t flags)
{
return scsi_dev_info_list_add_keyed(compatible, vendor, model,
strflags, flags,
@@ -350,16 +352,16 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
* @key: specify list to use
*
* Description:
- * Create and add one dev_info entry for @vendor, @model,
- * @strflags or @flag in list specified by @key. If @compatible,
- * add to the tail of the list, do not space pad, and set
- * devinfo->compatible. The scsi_static_device_list entries are
- * added with @compatible 1 and @clfags NULL.
+ * Create and add one dev_info entry for @vendor, @model,
+ * @strflags or @flag in list specified by @key. If @compatible,
+ * add to the tail of the list, do not space pad, and set
+ * devinfo->compatible. The scsi_static_device_list entries are
+ * added with @compatible 1 and @clfags NULL.
*
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
- char *strflags, int flags, int key)
+ char *strflags, blist_flags_t flags, int key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -399,13 +401,13 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
* scsi_dev_info_list_find - find a matching dev_info list entry.
- * @vendor: vendor string
- * @model: model (product) string
+ * @vendor: full vendor string
+ * @model: full model (product) string
* @key: specify list to use
*
* Description:
* Finds the first dev_info entry matching @vendor, @model
- * in list specified by @key.
+ * in list specified by @key.
*
* Returns: pointer to matching entry, or ERR_PTR on failure.
**/
@@ -415,7 +417,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
- size_t vmax, mmax;
+ size_t vmax, mmax, mlen;
const char *vskip, *mskip;
if (IS_ERR(devinfo_table))
@@ -454,22 +456,25 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
dev_info_list) {
if (devinfo->compatible) {
/*
- * Behave like the older version of get_device_flags.
+ * vendor strings must be an exact match
*/
- if (memcmp(devinfo->vendor, vskip, vmax) ||
- (vmax < sizeof(devinfo->vendor) &&
- devinfo->vendor[vmax]))
+ if (vmax != strlen(devinfo->vendor) ||
+ memcmp(devinfo->vendor, vskip, vmax))
continue;
- if (memcmp(devinfo->model, mskip, mmax) ||
- (mmax < sizeof(devinfo->model) &&
- devinfo->model[mmax]))
+
+ /*
+ * @model specifies the full string, and
+ * must be larger or equal to devinfo->model
+ */
+ mlen = strlen(devinfo->model);
+ if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
continue;
return devinfo;
} else {
if (!memcmp(devinfo->vendor, vendor,
- sizeof(devinfo->vendor)) &&
- !memcmp(devinfo->model, model,
- sizeof(devinfo->model)))
+ sizeof(devinfo->vendor)) &&
+ !memcmp(devinfo->model, model,
+ sizeof(devinfo->model)))
return devinfo;
}
}
@@ -508,10 +513,10 @@ EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
* @dev_list: string of device flags to add
*
* Description:
- * Parse dev_list, and add entries to the scsi_dev_info_list.
- * dev_list is of the form "vendor:product:flag,vendor:product:flag".
- * dev_list is modified via strsep. Can be called for command line
- * addition, for proc or mabye a sysfs interface.
+ * Parse dev_list, and add entries to the scsi_dev_info_list.
+ * dev_list is of the form "vendor:product:flag,vendor:product:flag".
+ * dev_list is modified via strsep. Can be called for command line
+ * addition, for proc or mabye a sysfs interface.
*
* Returns: 0 if OK, -error on failure.
**/
@@ -566,9 +571,9 @@ static int scsi_dev_info_list_add_str(char *dev_list)
* matching flags value, else return the host or global default
* settings. Called during scan time.
**/
-int scsi_get_device_flags(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model)
+blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model)
{
return scsi_get_device_flags_keyed(sdev, vendor, model,
SCSI_DEVINFO_GLOBAL);
@@ -588,7 +593,7 @@ int scsi_get_device_flags(struct scsi_device *sdev,
* flags value, else return the host or global default settings.
* Called during scan time.
**/
-int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
int key)
@@ -701,7 +706,7 @@ static int proc_scsi_devinfo_open(struct inode *inode, struct file *file)
return seq_open(file, &scsi_devinfo_seq_ops);
}
-/*
+/*
* proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc.
*
* Description: Adds a black/white list entry for vendor and model with an
@@ -840,8 +845,8 @@ EXPORT_SYMBOL(scsi_dev_info_remove_list);
* scsi_init_devinfo - set up the dynamic device list.
*
* Description:
- * Add command line entries from scsi_dev_flags, then add
- * scsi_static_device_list entries to the scsi device info list.
+ * Add command line entries from scsi_dev_flags, then add
+ * scsi_static_device_list entries to the scsi device info list.
*/
int __init scsi_init_devinfo(void)
{
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 84addee05be6..2b785d09d5bd 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -126,20 +126,36 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
static int scsi_dh_handler_attach(struct scsi_device *sdev,
struct scsi_device_handler *scsi_dh)
{
- int error;
+ int error, ret = 0;
if (!try_module_get(scsi_dh->module))
return -EINVAL;
error = scsi_dh->attach(sdev);
- if (error) {
- sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n",
- scsi_dh->name, error);
+ if (error != SCSI_DH_OK) {
+ switch (error) {
+ case SCSI_DH_NOMEM:
+ ret = -ENOMEM;
+ break;
+ case SCSI_DH_RES_TEMP_UNAVAIL:
+ ret = -EAGAIN;
+ break;
+ case SCSI_DH_DEV_UNSUPP:
+ case SCSI_DH_NOSYS:
+ ret = -ENODEV;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret != -ENODEV)
+ sdev_printk(KERN_ERR, sdev, "%s: Attach failed (%d)\n",
+ scsi_dh->name, error);
module_put(scsi_dh->module);
} else
sdev->handler = scsi_dh;
- return error;
+ return ret;
}
/*
@@ -153,18 +169,20 @@ static void scsi_dh_handler_detach(struct scsi_device *sdev)
module_put(sdev->handler->module);
}
-int scsi_dh_add_device(struct scsi_device *sdev)
+void scsi_dh_add_device(struct scsi_device *sdev)
{
struct scsi_device_handler *devinfo = NULL;
const char *drv;
- int err = 0;
drv = scsi_dh_find_driver(sdev);
if (drv)
devinfo = __scsi_dh_lookup(drv);
+ /*
+ * device_handler is optional, so ignore errors
+ * from scsi_dh_handler_attach()
+ */
if (devinfo)
- err = scsi_dh_handler_attach(sdev, devinfo);
- return err;
+ (void)scsi_dh_handler_attach(sdev, devinfo);
}
void scsi_dh_release_device(struct scsi_device *sdev)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index dab876c65473..62b56de38ae8 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -403,6 +403,12 @@ static void scsi_report_sense(struct scsi_device *sdev,
"threshold.\n");
}
+ if (sshdr->asc == 0x29) {
+ evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Power-on or device reset occurred\n");
+ }
+
if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
@@ -579,6 +585,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */
+ sshdr.asc == 0x22 || /* Invalid function */
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26 || /* Parameter value invalid */
sshdr.asc == 0x27) { /* Write protected */
@@ -1747,16 +1754,12 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* that it indicates SUCCESS.
*/
return SUCCESS;
+ case DID_SOFT_ERROR:
/*
* when the low level driver returns did_soft_error,
* it is responsible for keeping an internal retry counter
* in order to avoid endless loops (db)
- *
- * actually this is a bug in this function here. we should
- * be mindful of the maximum number of retries specified
- * and not get stuck in a loop.
*/
- case DID_SOFT_ERROR:
goto maybe_retry;
case DID_IMM_RETRY:
return NEEDS_RETRY;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ad3ea24f0885..1cbc497e00bd 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -252,9 +252,9 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
- req = blk_get_request(sdev->request_queue,
+ req = blk_get_request_flags(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
@@ -268,7 +268,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
rq->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
- req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
+ req->rq_flags |= rq_flags | RQF_QUIET;
/*
* head injection *required* here otherwise quiesce won't work
@@ -1301,7 +1301,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
/*
* If the devices is blocked we defer normal commands.
*/
- if (!(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_DEFER;
break;
default:
@@ -1310,7 +1310,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
* special commands. In particular any user initiated
* command is not allowed.
*/
- if (!(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_KILL;
break;
}
@@ -1750,7 +1750,10 @@ static void scsi_done(struct scsi_cmnd *cmd)
*
* Returns: Nothing
*
- * Lock status: IO request lock assumed to be held when called.
+ * Lock status: request queue lock assumed to be held when called.
+ *
+ * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order
+ * protection for ZBC disks.
*/
static void scsi_request_fn(struct request_queue *q)
__releases(q->queue_lock)
@@ -1940,6 +1943,33 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
blk_mq_complete_request(cmd->request);
}
+static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+
+ atomic_dec(&sdev->device_busy);
+ put_device(&sdev->sdev_gendev);
+}
+
+static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+
+ if (!get_device(&sdev->sdev_gendev))
+ goto out;
+ if (!scsi_dev_queue_ready(q, sdev))
+ goto out_put_device;
+
+ return true;
+
+out_put_device:
+ put_device(&sdev->sdev_gendev);
+out:
+ return false;
+}
+
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1953,16 +1983,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
if (ret != BLK_STS_OK)
- goto out;
+ goto out_put_budget;
ret = BLK_STS_RESOURCE;
- if (!get_device(&sdev->sdev_gendev))
- goto out;
-
- if (!scsi_dev_queue_ready(q, sdev))
- goto out_put_device;
if (!scsi_target_queue_ready(shost, sdev))
- goto out_dec_device_busy;
+ goto out_put_budget;
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
@@ -1993,15 +2018,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ atomic_dec(&shost->host_busy);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
-out_dec_device_busy:
- atomic_dec(&sdev->device_busy);
-out_put_device:
- put_device(&sdev->sdev_gendev);
-out:
+out_put_budget:
+ scsi_mq_put_budget(hctx);
switch (ret) {
case BLK_STS_OK:
break;
@@ -2205,6 +2227,8 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
}
static const struct blk_mq_ops scsi_mq_ops = {
+ .get_budget = scsi_mq_get_budget,
+ .put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
@@ -2685,7 +2709,6 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
}
sdev->sdev_state = state;
- sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
return 0;
illegal:
@@ -2734,6 +2757,9 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
break;
+ case SDEV_EVT_POWER_ON_RESET_OCCURRED:
+ envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
+ break;
default:
/* do nothing */
break;
@@ -2838,6 +2864,7 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
case SDEV_EVT_LUN_CHANGE_REPORTED:
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
+ case SDEV_EVT_POWER_ON_RESET_OCCURRED:
default:
/* do nothing */
break;
@@ -2920,21 +2947,37 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
int
scsi_device_quiesce(struct scsi_device *sdev)
{
+ struct request_queue *q = sdev->request_queue;
int err;
+ /*
+ * It is allowed to call scsi_device_quiesce() multiple times from
+ * the same context but concurrent scsi_device_quiesce() calls are
+ * not allowed.
+ */
+ WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
+
+ blk_set_preempt_only(q);
+
+ blk_mq_freeze_queue(q);
+ /*
+ * Ensure that the effect of blk_set_preempt_only() will be visible
+ * for percpu_ref_tryget() callers that occur after the queue
+ * unfreeze even if the queue was already frozen before this function
+ * was called. See also https://lwn.net/Articles/573497/.
+ */
+ synchronize_rcu();
+ blk_mq_unfreeze_queue(q);
+
mutex_lock(&sdev->state_mutex);
err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+ if (err == 0)
+ sdev->quiesced_by = current;
+ else
+ blk_clear_preempt_only(q);
mutex_unlock(&sdev->state_mutex);
- if (err)
- return err;
-
- scsi_run_queue(sdev->request_queue);
- while (atomic_read(&sdev->device_busy)) {
- msleep_interruptible(200);
- scsi_run_queue(sdev->request_queue);
- }
- return 0;
+ return err;
}
EXPORT_SYMBOL(scsi_device_quiesce);
@@ -2954,9 +2997,11 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- if (sdev->sdev_state == SDEV_QUIESCE &&
- scsi_device_set_state(sdev, SDEV_RUNNING) == 0)
- scsi_run_queue(sdev->request_queue);
+ WARN_ON_ONCE(!sdev->quiesced_by);
+ sdev->quiesced_by = NULL;
+ blk_clear_preempt_only(sdev->request_queue);
+ if (sdev->sdev_state == SDEV_QUIESCE)
+ scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
@@ -3109,7 +3154,6 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
case SDEV_BLOCK:
case SDEV_TRANSPORT_OFFLINE:
sdev->sdev_state = new_state;
- sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
break;
case SDEV_CREATED_BLOCK:
if (new_state == SDEV_TRANSPORT_OFFLINE ||
@@ -3117,7 +3161,6 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
sdev->sdev_state = new_state;
else
sdev->sdev_state = SDEV_CREATED;
- sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
break;
case SDEV_CANCEL:
case SDEV_OFFLINE:
diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c
index 2ac3f3975f78..5723915275ad 100644
--- a/drivers/scsi/scsi_lib_dma.c
+++ b/drivers/scsi/scsi_lib_dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SCSI library functions depending on DMA
*/
diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h
index 7fe64a847143..836185de28c4 100644
--- a/drivers/scsi/scsi_logging.h
+++ b/drivers/scsi/scsi_logging.h
@@ -1,12 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_LOGGING_H
#define _SCSI_LOGGING_H
/*
- * This defines the scsi logging feature. It is a means by which the user
- * can select how much information they get about various goings on, and it
- * can be really useful for fault tracing. The logging word is divided into
- * 8 nibbles, each of which describes a loglevel. The division of things is
+ * This defines the scsi logging feature. It is a means by which the user can
+ * select how much information they get about various goings on, and it can be
+ * really useful for fault tracing. The logging word is divided into 10 3-bit
+ * bitfields, each of which describes a loglevel. The division of things is
* somewhat arbitrary, and the division of the word could be changed if it
* were really needed for any reason. The numbers below are the only place
* where these are specified. For a first go-around, 3 bits is more than
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 5c6d016a5ae9..a5946cd64caa 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_PRIV_H
#define _SCSI_PRIV_H
@@ -49,15 +50,16 @@ enum {
SCSI_DEVINFO_SPI,
};
-extern int scsi_get_device_flags(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model);
-extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model, int key);
+extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model);
+extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model,
+ int key);
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
- int flags, int key);
+ blist_flags_t flags, int key);
extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
extern int scsi_dev_info_add_list(int key, const char *name);
extern int scsi_dev_info_remove_list(int key);
@@ -176,10 +178,10 @@ extern struct async_domain scsi_sd_probe_domain;
/* scsi_dh.c */
#ifdef CONFIG_SCSI_DH
-int scsi_dh_add_device(struct scsi_device *sdev);
+void scsi_dh_add_device(struct scsi_device *sdev);
void scsi_dh_release_device(struct scsi_device *sdev);
#else
-static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; }
+static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 480a597b3877..7f0ceb65c3f3 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/scsi/scsi_proc.c
*
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
index e659912498bd..82fd548c5eee 100644
--- a/drivers/scsi/scsi_sas_internal.h
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_SAS_INTERNAL_H
#define _SCSI_SAS_INTERNAL_H
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 15590a063ad9..be5e919db0e8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* scsi_scan.c
*
@@ -565,7 +566,7 @@ EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
* are copied to the scsi_device any flags value is stored in *@bflags.
**/
static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
- int result_len, int *bflags)
+ int result_len, blist_flags_t *bflags)
{
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
@@ -987,6 +988,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
scsi_attach_vpd(sdev);
sdev->max_queue_depth = sdev->queue_depth;
+ sdev->sdev_bflags = *bflags;
/*
* Ok, the device is now all set up, we can
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index f796bd61f3f0..50e7d7e4a861 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -20,6 +20,7 @@
#include <scsi/scsi_dh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_driver.h>
+#include <scsi/scsi_devinfo.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
@@ -966,6 +967,41 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
+#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+static const char *const sdev_bflags_name[] = {
+#include "scsi_devinfo_tbl.c"
+};
+#undef BLIST_FLAG_NAME
+
+static ssize_t
+sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ int i;
+ ssize_t len = 0;
+
+ for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
+ const char *name = NULL;
+
+ if (!(sdev->sdev_bflags & BIT(i)))
+ continue;
+ if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
+ name = sdev_bflags_name[i];
+
+ if (name)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s%s", len ? " " : "", name);
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%sINVALID_BIT(%d)", len ? " " : "", i);
+ }
+ if (len)
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ return len;
+}
+static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL);
+
#ifdef CONFIG_SCSI_DH
static ssize_t
sdev_show_dh_state(struct device *dev, struct device_attribute *attr,
@@ -1151,6 +1187,7 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_queue_depth.attr,
&dev_attr_queue_type.attr,
&dev_attr_wwid.attr,
+ &dev_attr_blacklist.attr,
#ifdef CONFIG_SCSI_DH
&dev_attr_dh_state.attr,
&dev_attr_access_state.attr,
@@ -1234,13 +1271,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
scsi_autopm_get_device(sdev);
- error = scsi_dh_add_device(sdev);
- if (error)
- /*
- * device_handler is optional, so any error can be ignored
- */
- sdev_printk(KERN_INFO, sdev,
- "failed to add device handler: %d\n", error);
+ scsi_dh_add_device(sdev);
error = device_add(&sdev->sdev_gendev);
if (error) {
diff --git a/drivers/scsi/scsi_transport_api.h b/drivers/scsi/scsi_transport_api.h
index 934f0e62bb5c..f917766537c0 100644
--- a/drivers/scsi/scsi_transport_api.h
+++ b/drivers/scsi/scsi_transport_api.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_TRANSPORT_API_H
#define _SCSI_TRANSPORT_API_H
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 8c46a6d536af..4664024bd5d3 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -267,6 +267,8 @@ static const struct {
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
+ { FC_PORTSPEED_64BIT, "64 Gbit" },
+ { FC_PORTSPEED_128BIT, "128 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 7404d26895f5..f4b52b44b966 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3420,7 +3420,7 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
shost = scsi_host_lookup(ev->u.get_host_stats.host_no);
if (!shost) {
- pr_err("%s: failed. Cound not find host no %u\n",
+ pr_err("%s: failed. Could not find host no %u\n",
__func__, ev->u.get_host_stats.host_no);
return -ENODEV;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 319dff970237..736a1f4f9676 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -177,7 +177,7 @@ static int sas_smp_dispatch(struct bsg_job *job)
if (!scsi_is_host_device(job->dev))
rphy = dev_to_rphy(job->dev);
- if (!job->req->next_rq) {
+ if (!job->reply_payload.payload_len) {
dev_warn(job->dev, "space for a smp response is missing\n");
bsg_job_done(job, -EINVAL, 0);
return 0;
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 4f6f01cf9968..36f6190931bc 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -556,11 +556,8 @@ int srp_reconnect_rport(struct srp_rport *rport)
*/
shost_for_each_device(sdev, shost) {
mutex_lock(&sdev->state_mutex);
- if (sdev->sdev_state == SDEV_OFFLINE) {
+ if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
- sysfs_notify(&sdev->sdev_gendev.kobj,
- NULL, "state");
- }
mutex_unlock(&sdev->state_mutex);
}
} else if (rport->state == SRP_RPORT_RUNNING) {
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 31273468589c..e969138051c7 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* scsicam.c - SCSI CAM support functions, use for HDIO_GETGEO, etc.
*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d175c5c5ccf8..24fe68522716 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -231,11 +231,15 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
+ bool v;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_start_stop = v;
return count;
}
@@ -253,6 +257,7 @@ static ssize_t
allow_restart_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ bool v;
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -262,7 +267,10 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return -EINVAL;
- sdp->allow_restart = simple_strtoul(buf, NULL, 10);
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->allow_restart = v;
return count;
}
@@ -906,6 +914,26 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
else
sdkp->zeroing_mode = SD_ZERO_WRITE;
+ if (sdkp->max_ws_blocks &&
+ sdkp->physical_block_size > logical_block_size) {
+ /*
+ * Reporting a maximum number of blocks that is not aligned
+ * on the device physical size would cause a large write same
+ * request to be split into physically unaligned chunks by
+ * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
+ * even if the caller of these functions took care to align the
+ * large request. So make sure the maximum reported is aligned
+ * to the device physical block size. This is only an optional
+ * optimization for regular disks, but this is mandatory to
+ * avoid failure of large write same requests directed at
+ * sequential write required zones of host-managed ZBC disks.
+ */
+ sdkp->max_ws_blocks =
+ round_down(sdkp->max_ws_blocks,
+ bytes_to_logical(sdkp->device,
+ sdkp->physical_block_size));
+ }
+
out:
blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
(logical_block_size >> 9));
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 99c4dde9b6bf..320de758323e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_DISK_H
#define _SCSI_DISK_H
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 8aa54779aac1..27793b9f54c0 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -28,38 +28,18 @@
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_driver.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_eh.h>
#include "sd.h"
-#include "scsi_priv.h"
-
-enum zbc_zone_type {
- ZBC_ZONE_TYPE_CONV = 0x1,
- ZBC_ZONE_TYPE_SEQWRITE_REQ,
- ZBC_ZONE_TYPE_SEQWRITE_PREF,
- ZBC_ZONE_TYPE_RESERVED,
-};
-
-enum zbc_zone_cond {
- ZBC_ZONE_COND_NO_WP,
- ZBC_ZONE_COND_EMPTY,
- ZBC_ZONE_COND_IMP_OPEN,
- ZBC_ZONE_COND_EXP_OPEN,
- ZBC_ZONE_COND_CLOSED,
- ZBC_ZONE_COND_READONLY = 0xd,
- ZBC_ZONE_COND_FULL,
- ZBC_ZONE_COND_OFFLINE,
-};
/**
- * Convert a zone descriptor to a zone struct.
+ * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
+ * @sdkp: The disk the report originated from
+ * @buf: Address of the report zone descriptor
+ * @zone: the destination zone structure
+ *
+ * All LBA sized values are converted to 512B sectors unit.
*/
-static void sd_zbc_parse_report(struct scsi_disk *sdkp,
- u8 *buf,
+static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
struct blk_zone *zone)
{
struct scsi_device *sdp = sdkp->device;
@@ -82,7 +62,13 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp,
}
/**
- * Issue a REPORT ZONES scsi command.
+ * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
+ * @sdkp: The target disk
+ * @buf: Buffer to use for the reply
+ * @buflen: the buffer size
+ * @lba: Start LBA of the report
+ *
+ * For internal use during device validation.
*/
static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
unsigned int buflen, sector_t lba)
@@ -123,6 +109,12 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
return 0;
}
+/**
+ * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
+ * @cmd: The command to setup
+ *
+ * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
+ */
int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -165,6 +157,14 @@ int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
return BLKPREP_OK;
}
+/**
+ * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
+ * @scmd: The completed report zones command
+ * @good_bytes: reply size in bytes
+ *
+ * Convert all reported zone descriptors to struct blk_zone. The conversion
+ * is done in-place, directly in the request specified sg buffer.
+ */
static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
unsigned int good_bytes)
{
@@ -220,17 +220,32 @@ static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
local_irq_restore(flags);
}
+/**
+ * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
+ * @sdkp: The target disk
+ */
static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
{
return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
}
+/**
+ * sd_zbc_zone_no - Get the number of the zone conataining a sector.
+ * @sdkp: The target disk
+ * @sector: 512B sector address contained in the zone
+ */
static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp,
sector_t sector)
{
return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift;
}
+/**
+ * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
+ * @cmd: the command to setup
+ *
+ * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
+ */
int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -263,6 +278,23 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
return BLKPREP_OK;
}
+/**
+ * sd_zbc_write_lock_zone - Write lock a sequential zone.
+ * @cmd: write command
+ *
+ * Called from sd_init_cmd() for write requests (standard write, write same or
+ * write zeroes operations). If the request target zone is not already locked,
+ * the zone is locked and BLKPREP_OK returned, allowing the request to proceed
+ * through dispatch in scsi_request_fn(). Otherwise, BLKPREP_DEFER is returned,
+ * forcing the request to wait for the zone to be unlocked, that is, for the
+ * previously issued write request targeting the same zone to complete.
+ *
+ * This is called from blk_peek_request() context with the queue lock held and
+ * before the request is removed from the scheduler. As a result, multiple
+ * contexts executing concurrently scsi_request_fn() cannot result in write
+ * sequence reordering as only a single write request per zone is allowed to
+ * proceed.
+ */
int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -285,10 +317,7 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
* Do not issue more than one write at a time per
* zone. This solves write ordering problems due to
* the unlocking of the request queue in the dispatch
- * path in the non scsi-mq case. For scsi-mq, this
- * also avoids potential write reordering when multiple
- * threads running on different CPUs write to the same
- * zone (with a synchronized sequential pattern).
+ * path in the non scsi-mq case.
*/
if (sdkp->zones_wlock &&
test_and_set_bit(zno, sdkp->zones_wlock))
@@ -300,6 +329,13 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
return BLKPREP_OK;
}
+/**
+ * sd_zbc_write_unlock_zone - Write unlock a sequential zone.
+ * @cmd: write command
+ *
+ * Called from sd_uninit_cmd(). Unlocking the request target zone will allow
+ * dispatching the next write request for the zone.
+ */
void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -314,8 +350,16 @@ void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
}
}
-void sd_zbc_complete(struct scsi_cmnd *cmd,
- unsigned int good_bytes,
+/**
+ * sd_zbc_complete - ZBC command post processing.
+ * @cmd: Completed command
+ * @good_bytes: Command reply bytes
+ * @sshdr: command sense header
+ *
+ * Called from sd_done(). Process report zones reply and handle reset zone
+ * and write commands errors.
+ */
+void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr)
{
int result = cmd->result;
@@ -360,7 +404,11 @@ void sd_zbc_complete(struct scsi_cmnd *cmd,
}
/**
- * Read zoned block device characteristics (VPD page B6).
+ * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
+ * @sdkp: Target disk
+ * @buf: Buffer where to store the VPD page data
+ *
+ * Read VPD page B6.
*/
static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
unsigned char *buf)
@@ -375,25 +423,31 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
if (sdkp->device->type != TYPE_ZBC) {
/* Host-aware */
sdkp->urswrz = 1;
- sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]);
- sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]);
+ sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
+ sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
sdkp->zones_max_open = 0;
} else {
/* Host-managed */
sdkp->urswrz = buf[4] & 1;
sdkp->zones_optimal_open = 0;
sdkp->zones_optimal_nonseq = 0;
- sdkp->zones_max_open = get_unaligned_be64(&buf[16]);
+ sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
}
return 0;
}
/**
- * Check reported capacity.
+ * sd_zbc_check_capacity - Check reported capacity.
+ * @sdkp: Target disk
+ * @buf: Buffer to use for commands
+ *
+ * ZBC drive may report only the capacity of the first conventional zones at
+ * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
+ * Check this here. If the disk reported only its conventional zones capacity,
+ * get the total capacity by doing a report zones.
*/
-static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
- unsigned char *buf)
+static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
{
sector_t lba;
int ret;
@@ -421,8 +475,15 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
return 0;
}
-#define SD_ZBC_BUF_SIZE 131072
+#define SD_ZBC_BUF_SIZE 131072U
+/**
+ * sd_zbc_check_zone_size - Check the device zone sizes
+ * @sdkp: Target disk
+ *
+ * Check that all zones of the device are equal. The last zone can however
+ * be smaller. The zone size must also be a power of two number of LBAs.
+ */
static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
{
u64 zone_blocks;
@@ -465,10 +526,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
/* Parse REPORT ZONES header */
list_length = get_unaligned_be32(&buf[0]) + 64;
rec = buf + 64;
- if (list_length < SD_ZBC_BUF_SIZE)
- buf_len = list_length;
- else
- buf_len = SD_ZBC_BUF_SIZE;
+ buf_len = min(list_length, SD_ZBC_BUF_SIZE);
/* Parse zone descriptors */
while (rec < buf + buf_len) {
@@ -523,6 +581,7 @@ out:
}
sdkp->zone_blocks = zone_blocks;
+ sdkp->zone_shift = ilog2(zone_blocks);
return 0;
}
@@ -530,13 +589,15 @@ out:
static int sd_zbc_setup(struct scsi_disk *sdkp)
{
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
/* chunk_sectors indicates the zone size */
blk_queue_chunk_sectors(sdkp->disk->queue,
logical_to_sectors(sdkp->device, sdkp->zone_blocks));
- sdkp->zone_shift = ilog2(sdkp->zone_blocks);
- sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift;
- if (sdkp->capacity & (sdkp->zone_blocks - 1))
- sdkp->nr_zones++;
+ sdkp->nr_zones =
+ round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
if (!sdkp->zones_wlock) {
sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
@@ -549,8 +610,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp)
return 0;
}
-int sd_zbc_read_zones(struct scsi_disk *sdkp,
- unsigned char *buf)
+int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
int ret;
@@ -561,7 +621,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp,
*/
return 0;
-
/* Get zoned block device characteristics */
ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
if (ret)
@@ -598,10 +657,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp,
if (ret)
goto err;
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
-
return 0;
err:
diff --git a/drivers/scsi/sense_codes.h b/drivers/scsi/sense_codes.h
index e4e1dccd1f2f..201a536688de 100644
--- a/drivers/scsi/sense_codes.h
+++ b/drivers/scsi/sense_codes.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* The canonical list of T10 Additional Sense Codes is available at:
* http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index aa28874e8fb9..f098877eed4a 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -217,7 +217,7 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
if (sfp->parentdp->device->type == TYPE_SCANNER)
return 0;
- return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
+ return blk_verify_command(cmd, filp->f_mode);
}
static int
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 83bdbd84eb01..b2880c7709e6 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -40,11 +40,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.1.2-125"
+#define DRIVER_VERSION "1.1.2-126"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_RELEASE 2
-#define DRIVER_REVISION 125
+#define DRIVER_REVISION 126
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -1078,9 +1078,9 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
bad_raid_map:
dev_warn(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d %s\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target, device->lun, err_msg);
+ "logical device %08x%08x %s\n",
+ *((u32 *)&device->scsi3addr),
+ *((u32 *)&device->scsi3addr[4]), err_msg);
return -EINVAL;
}
@@ -2860,11 +2860,12 @@ out:
#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
-static void pqi_heartbeat_timer_handler(unsigned long data)
+static void pqi_heartbeat_timer_handler(struct timer_list *t)
{
int num_interrupts;
u32 heartbeat_count;
- struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
+ struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
+ heartbeat_timer);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
@@ -2902,8 +2903,6 @@ static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
ctrl_info->heartbeat_timer.expires =
jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
- ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
- ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
add_timer(&ctrl_info->heartbeat_timer);
}
@@ -6465,7 +6464,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
- init_timer(&ctrl_info->heartbeat_timer);
+ timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
sema_init(&ctrl_info->sync_request_sem,
@@ -6926,6 +6925,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1302)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1303)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1380)
},
{
diff --git a/drivers/scsi/snic/Makefile b/drivers/scsi/snic/Makefile
index ef7c0dd47f40..41546e3cb701 100644
--- a/drivers/scsi/snic/Makefile
+++ b/drivers/scsi/snic/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_SNIC) += snic.o
snic-y := \
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 1de33719ad8e..a2bb7b8bace5 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* sr.h by David Giller
* CD-ROM disk driver header file
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 4610c8c5693f..2a21f2d48592 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 11a238cb2222..e3b0ce25162b 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* -*-linux-c-*-
* vendor-specific code for SCSI CD-ROM's goes here.
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 94e402ed30f6..b141d7641a2e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4920,11 +4920,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
/* Try to fault in all of the necessary pages */
/* rw==READ means read from drive, write into memory area */
- res = get_user_pages_unlocked(
- uaddr,
- nr_pages,
- pages,
- rw == READ ? FOLL_WRITE : 0); /* don't force */
+ res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages);
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 8c732c8de015..95d2e7a7988d 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ST_H
#define _ST_H
diff --git a/drivers/scsi/st_options.h b/drivers/scsi/st_options.h
index d2f947935554..2b6cabd7b6aa 100644
--- a/drivers/scsi/st_options.h
+++ b/drivers/scsi/st_options.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
The compile-time configurable defaults for the Linux SCSI tape driver.
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 5e7200f05873..1b06cf0375dc 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -486,6 +486,9 @@ struct hv_host_device {
unsigned int port;
unsigned char path;
unsigned char target;
+ struct workqueue_struct *handle_error_wq;
+ struct work_struct host_scan_work;
+ struct Scsi_Host *host;
};
struct storvsc_scan_work {
@@ -514,13 +517,12 @@ done:
static void storvsc_host_scan(struct work_struct *work)
{
- struct storvsc_scan_work *wrk;
struct Scsi_Host *host;
struct scsi_device *sdev;
+ struct hv_host_device *host_device =
+ container_of(work, struct hv_host_device, host_scan_work);
- wrk = container_of(work, struct storvsc_scan_work, work);
- host = wrk->host;
-
+ host = host_device->host;
/*
* Before scanning the host, first check to see if any of the
* currrently known devices have been hot removed. We issue a
@@ -540,8 +542,6 @@ static void storvsc_host_scan(struct work_struct *work)
* Now scan the host to discover LUNs that may have been added.
*/
scsi_scan_host(host);
-
- kfree(wrk);
}
static void storvsc_remove_lun(struct work_struct *work)
@@ -922,6 +922,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
{
struct storvsc_scan_work *wrk;
void (*process_err_fn)(struct work_struct *work);
+ struct hv_host_device *host_dev = shost_priv(host);
bool do_work = false;
switch (SRB_STATUS(vm_srb->srb_status)) {
@@ -988,7 +989,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
wrk->lun = vm_srb->lun;
wrk->tgt_id = vm_srb->target_id;
INIT_WORK(&wrk->work, process_err_fn);
- schedule_work(&wrk->work);
+ queue_work(host_dev->handle_error_wq, &wrk->work);
}
@@ -1116,8 +1117,7 @@ static void storvsc_on_receive(struct storvsc_device *stor_device,
struct vstor_packet *vstor_packet,
struct storvsc_cmd_request *request)
{
- struct storvsc_scan_work *work;
-
+ struct hv_host_device *host_dev;
switch (vstor_packet->operation) {
case VSTOR_OPERATION_COMPLETE_IO:
storvsc_on_io_completion(stor_device, vstor_packet, request);
@@ -1125,13 +1125,9 @@ static void storvsc_on_receive(struct storvsc_device *stor_device,
case VSTOR_OPERATION_REMOVE_DEVICE:
case VSTOR_OPERATION_ENUMERATE_BUS:
- work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
- if (!work)
- return;
-
- INIT_WORK(&work->work, storvsc_host_scan);
- work->host = stor_device->host;
- schedule_work(&work->work);
+ host_dev = shost_priv(stor_device->host);
+ queue_work(
+ host_dev->handle_error_wq, &host_dev->host_scan_work);
break;
case VSTOR_OPERATION_FCHBA_DATA:
@@ -1744,6 +1740,7 @@ static int storvsc_probe(struct hv_device *device,
host_dev->port = host->host_no;
host_dev->dev = device;
+ host_dev->host = host;
stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
@@ -1803,10 +1800,20 @@ static int storvsc_probe(struct hv_device *device,
if (stor_device->num_sc != 0)
host->nr_hw_queues = stor_device->num_sc + 1;
+ /*
+ * Set the error handler work queue.
+ */
+ host_dev->handle_error_wq =
+ alloc_ordered_workqueue("storvsc_error_wq_%d",
+ WQ_MEM_RECLAIM,
+ host->host_no);
+ if (!host_dev->handle_error_wq)
+ goto err_out2;
+ INIT_WORK(&host_dev->host_scan_work, storvsc_host_scan);
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
if (ret != 0)
- goto err_out2;
+ goto err_out3;
if (!dev_is_ide) {
scsi_scan_host(host);
@@ -1815,7 +1822,7 @@ static int storvsc_probe(struct hv_device *device,
device->dev_instance.b[4]);
ret = scsi_add_device(host, 0, target, 0);
if (ret)
- goto err_out3;
+ goto err_out4;
}
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
if (host->transportt == fc_transport_template) {
@@ -1827,14 +1834,17 @@ static int storvsc_probe(struct hv_device *device,
fc_host_port_name(host) = stor_device->port_name;
stor_device->rport = fc_remote_port_add(host, 0, &ids);
if (!stor_device->rport)
- goto err_out3;
+ goto err_out4;
}
#endif
return 0;
-err_out3:
+err_out4:
scsi_remove_host(host);
+err_out3:
+ destroy_workqueue(host_dev->handle_error_wq);
+
err_out2:
/*
* Once we have connected with the host, we would need to
@@ -1858,6 +1868,7 @@ static int storvsc_remove(struct hv_device *dev)
{
struct storvsc_device *stor_device = hv_get_drvdata(dev);
struct Scsi_Host *host = stor_device->host;
+ struct hv_host_device *host_dev = shost_priv(host);
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
if (host->transportt == fc_transport_template) {
@@ -1865,6 +1876,7 @@ static int storvsc_remove(struct hv_device *dev)
fc_remove_host(host);
}
#endif
+ destroy_workqueue(host_dev->handle_error_wq);
scsi_remove_host(host);
storvsc_dev_remove(dev);
scsi_host_put(host);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index d32e3ba8863e..791a2182de53 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -565,9 +565,9 @@ static irqreturn_t sym53c8xx_intr(int irq, void *dev_id)
/*
* Linux entry point of the timer handler
*/
-static void sym53c8xx_timer(unsigned long npref)
+static void sym53c8xx_timer(struct timer_list *t)
{
- struct sym_hcb *np = (struct sym_hcb *)npref;
+ struct sym_hcb *np = from_timer(np, t, s.timer);
unsigned long flags;
spin_lock_irqsave(np->s.host->host_lock, flags);
@@ -1351,9 +1351,7 @@ static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
/*
* Start the timer daemon
*/
- init_timer(&np->s.timer);
- np->s.timer.data = (unsigned long) np;
- np->s.timer.function = sym53c8xx_timer;
+ timer_setup(&np->s.timer, sym53c8xx_timer, 0);
np->s.lasttime=0;
sym_timer (np);
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 6e77cb0bfee9..9310c6c83041 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c
index dc03e47f7c58..3a8bc6d9cb5b 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ b/drivers/scsi/ufs/tc-dwc-g210.c
@@ -26,7 +26,7 @@
*/
static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
{
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
{ UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
{ UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL },
@@ -90,7 +90,7 @@ static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
*/
static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
{
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
@@ -147,7 +147,7 @@ static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
int connected_tx_lanes = 0;
int ret = 0;
- const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
{ UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19,
@@ -158,7 +158,7 @@ static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
DME_LOCAL },
};
- const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
{ UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19,
@@ -222,7 +222,7 @@ static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
{
int ret = 0;
- const struct ufshcd_dme_attr_val setup_attrs[] = {
+ static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
{ UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
{ UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL },
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index c87d770b519a..2b38db2eeafa 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -273,15 +273,18 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
+ if (is_rate_B)
+ phy_set_mode(phy, PHY_MODE_UFS_HS_B);
+
/* Assert PHY reset and apply PHY calibration values */
ufs_qcom_assert_reset(hba);
/* provide 1ms delay to let the reset pulse propagate */
usleep_range(1000, 1100);
- ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
-
+ /* phy initialization - calibrate the phy */
+ ret = phy_init(phy);
if (ret) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
+ dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
goto out;
}
@@ -294,21 +297,22 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
* voltage, current to settle down before starting serdes.
*/
usleep_range(1000, 1100);
- ret = ufs_qcom_phy_start_serdes(phy);
+
+ /* power on phy - start serdes and phy's power and clocks */
+ ret = phy_power_on(phy);
if (ret) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
+ dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
__func__, ret);
- goto out;
+ goto out_disable_phy;
}
- ret = ufs_qcom_phy_is_pcs_ready(phy);
- if (ret)
- dev_err(hba->dev,
- "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
- __func__, ret);
-
ufs_qcom_select_unipro_mode(host);
+ return 0;
+
+out_disable_phy:
+ ufs_qcom_assert_reset(hba);
+ phy_exit(phy);
out:
return ret;
}
@@ -1273,14 +1277,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_phy_save_controller_version(host->generic_phy,
host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
- phy_init(host->generic_phy);
- err = phy_power_on(host->generic_phy);
- if (err)
- goto out_unregister_bus;
-
err = ufs_qcom_init_lane_clks(host);
if (err)
- goto out_disable_phy;
+ goto out_variant_clear;
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
@@ -1301,10 +1300,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out;
-out_disable_phy:
- phy_power_off(host->generic_phy);
-out_unregister_bus:
- phy_exit(host->generic_phy);
out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
@@ -1458,7 +1453,7 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
reg = ufshcd_readl(hba, REG_UFS_CFG1);
- reg |= UFS_BIT(17);
+ reg |= UTP_DBG_RAMS_EN;
ufshcd_writel(hba, reg, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
@@ -1471,7 +1466,7 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
/* clear bit 17 - UTP_DBG_RAMS_EN */
- ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 076f52813a4c..295f4bef6a0e 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -92,7 +92,8 @@ enum {
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
/* bit definitions for REG_UFS_CFG1 register */
-#define QUNIPRO_SEL UFS_BIT(0)
+#define QUNIPRO_SEL 0x1
+#define UTP_DBG_RAMS_EN 0x20000
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
#define UFS_REG_TEST_BUS_EN BIT(30)
@@ -213,13 +214,13 @@ struct ufs_qcom_host {
* Note: By default this capability will be kept enabled if host
* controller supports the QUniPro mode.
*/
- #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0)
+ #define UFS_QCOM_CAP_QUNIPRO 0x1
/*
* Set this capability if host controller can retain the secure
* configuration even after UFS controller core power collapse.
*/
- #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE UFS_BIT(1)
+ #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
u32 caps;
struct phy *generic_phy;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 794a4600e952..011c3369082c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -385,6 +385,8 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
tag, ktime_to_us(lrbp->issue_time_stamp));
+ dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
+ tag, ktime_to_us(lrbp->compl_time_stamp));
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
@@ -1746,6 +1748,7 @@ static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
hba->lrb[task_tag].issue_time_stamp = ktime_get();
+ hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -2195,10 +2198,11 @@ static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
u32 upiu_flags;
int ret = 0;
- if (hba->ufs_version == UFSHCI_VERSION_20)
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- else
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
@@ -2222,10 +2226,11 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
u32 upiu_flags;
int ret = 0;
- if (hba->ufs_version == UFSHCI_VERSION_20)
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- else
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
lrbp->command_type = UTP_CMD_TYPE_SCSI;
+ else
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
@@ -3586,7 +3591,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
status = ufshcd_get_upmcrs(hba);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
- "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
+ "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
cmd->command, status);
ret = (status != PWR_OK) ? status : -1;
}
@@ -4627,6 +4632,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
}
if (ufshcd_is_clkscaling_supported(hba))
hba->clk_scaling.active_reqs--;
+
+ lrbp->compl_time_stamp = ktime_get();
}
/* clear corresponding bits of completed commands */
@@ -5998,25 +6005,22 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
}
scsi_device_put(hba->sdev_ufs_device);
- sdev_boot = __scsi_add_device(hba->host, 0, 0,
- ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
- if (IS_ERR(sdev_boot)) {
- ret = PTR_ERR(sdev_boot);
- goto remove_sdev_ufs_device;
- }
- scsi_device_put(sdev_boot);
-
sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
if (IS_ERR(sdev_rpmb)) {
ret = PTR_ERR(sdev_rpmb);
- goto remove_sdev_boot;
+ goto remove_sdev_ufs_device;
}
scsi_device_put(sdev_rpmb);
+
+ sdev_boot = __scsi_add_device(hba->host, 0, 0,
+ ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
+ if (IS_ERR(sdev_boot))
+ dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
+ else
+ scsi_device_put(sdev_boot);
goto out;
-remove_sdev_boot:
- scsi_remove_device(sdev_boot);
remove_sdev_ufs_device:
scsi_remove_device(hba->sdev_ufs_device);
out:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index cdc8bd05f7df..1332e544da92 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -166,6 +166,7 @@ struct ufs_pm_lvl_states {
* @lun: LUN of the command
* @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
* @issue_time_stamp: time stamp for debug purposes
+ * @compl_time_stamp: time stamp for statistics
* @req_abort_skip: skip request abort task flag
*/
struct ufshcd_lrb {
@@ -189,6 +190,7 @@ struct ufshcd_lrb {
u8 lun; /* UPIU LUN id field is only 8-bit wide */
bool intr_cmd;
ktime_t issue_time_stamp;
+ ktime_t compl_time_stamp;
bool req_abort_skip;
};
@@ -544,13 +546,13 @@ struct ufs_hba {
bool is_irq_enabled;
/* Interrupt aggregation support is broken */
- #define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0)
+ #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
/*
* delay before each dme command is required as the unipro
* layer has shown instabilities
*/
- #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1)
+ #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
/*
* If UFS host controller is having issue in processing LCC (Line
@@ -559,21 +561,21 @@ struct ufs_hba {
* the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
* attribute of device to 0).
*/
- #define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2)
+ #define UFSHCD_QUIRK_BROKEN_LCC 0x4
/*
* The attribute PA_RXHSUNTERMCAP specifies whether or not the
* inbound Link supports unterminated line in HS mode. Setting this
* attribute to 1 fixes moving to HS gear.
*/
- #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3)
+ #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
/*
* This quirk needs to be enabled if the host contoller only allows
* accessing the peer dme attributes in AUTO mode (FAST AUTO or
* SLOW AUTO).
*/
- #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4)
+ #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
/*
* This quirk needs to be enabled if the host contoller doesn't
@@ -581,13 +583,13 @@ struct ufs_hba {
* is enabled, standard UFS host driver will call the vendor specific
* ops (get_ufs_hci_version) to get the correct version.
*/
- #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
+ #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
/*
* This quirk needs to be enabled if the host contoller regards
* resolution of the values of PRDTO and PRDTL in UTRD as byte.
*/
- #define UFSHCD_QUIRK_PRDT_BYTE_GRAN UFS_BIT(7)
+ #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index f60145d4a66e..277752b0fc6f 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -119,22 +119,23 @@ enum {
#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
-#define UFS_BIT(x) (1L << (x))
-
-#define UTP_TRANSFER_REQ_COMPL UFS_BIT(0)
-#define UIC_DME_END_PT_RESET UFS_BIT(1)
-#define UIC_ERROR UFS_BIT(2)
-#define UIC_TEST_MODE UFS_BIT(3)
-#define UIC_POWER_MODE UFS_BIT(4)
-#define UIC_HIBERNATE_EXIT UFS_BIT(5)
-#define UIC_HIBERNATE_ENTER UFS_BIT(6)
-#define UIC_LINK_LOST UFS_BIT(7)
-#define UIC_LINK_STARTUP UFS_BIT(8)
-#define UTP_TASK_REQ_COMPL UFS_BIT(9)
-#define UIC_COMMAND_COMPL UFS_BIT(10)
-#define DEVICE_FATAL_ERROR UFS_BIT(11)
-#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
-#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
+/*
+ * IS - Interrupt Status - 20h
+ */
+#define UTP_TRANSFER_REQ_COMPL 0x1
+#define UIC_DME_END_PT_RESET 0x2
+#define UIC_ERROR 0x4
+#define UIC_TEST_MODE 0x8
+#define UIC_POWER_MODE 0x10
+#define UIC_HIBERNATE_EXIT 0x20
+#define UIC_HIBERNATE_ENTER 0x40
+#define UIC_LINK_LOST 0x80
+#define UIC_LINK_STARTUP 0x100
+#define UTP_TASK_REQ_COMPL 0x200
+#define UIC_COMMAND_COMPL 0x400
+#define DEVICE_FATAL_ERROR 0x800
+#define CONTROLLER_FATAL_ERROR 0x10000
+#define SYSTEM_BUS_FATAL_ERROR 0x20000
#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\
UIC_HIBERNATE_EXIT |\
@@ -152,12 +153,10 @@ enum {
SYSTEM_BUS_FATAL_ERROR)
/* HCS - Host Controller Status 30h */
-#define DEVICE_PRESENT UFS_BIT(0)
-#define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1)
-#define UTP_TASK_REQ_LIST_READY UFS_BIT(2)
-#define UIC_COMMAND_READY UFS_BIT(3)
-#define HOST_ERROR_INDICATOR UFS_BIT(4)
-#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
+#define DEVICE_PRESENT 0x1
+#define UTP_TRANSFER_REQ_LIST_READY 0x2
+#define UTP_TASK_REQ_LIST_READY 0x4
+#define UIC_COMMAND_READY 0x8
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
@@ -174,46 +173,47 @@ enum {
};
/* HCE - Host Controller Enable 34h */
-#define CONTROLLER_ENABLE UFS_BIT(0)
+#define CONTROLLER_ENABLE 0x1
#define CONTROLLER_DISABLE 0x0
-#define CRYPTO_GENERAL_ENABLE UFS_BIT(1)
+#define CRYPTO_GENERAL_ENABLE 0x2
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
-#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
-#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
+#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
/* UECN - Host UIC Error Code Network Layer 40h */
-#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
+#define UIC_NETWORK_LAYER_ERROR 0x80000000
#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
/* UECT - Host UIC Error Code Transport Layer 44h */
-#define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31)
+#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
/* UECDME - Host UIC Error Code DME 48h */
-#define UIC_DME_ERROR UFS_BIT(31)
+#define UIC_DME_ERROR 0x80000000
#define UIC_DME_ERROR_CODE_MASK 0x1
+/* UTRIACR - Interrupt Aggregation control register - 0x4Ch */
#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
-#define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16)
-#define INT_AGGR_STATUS_BIT UFS_BIT(20)
-#define INT_AGGR_PARAM_WRITE UFS_BIT(24)
-#define INT_AGGR_ENABLE UFS_BIT(31)
+#define INT_AGGR_COUNTER_AND_TIMER_RESET 0x10000
+#define INT_AGGR_STATUS_BIT 0x100000
+#define INT_AGGR_PARAM_WRITE 0x1000000
+#define INT_AGGR_ENABLE 0x80000000
/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
-#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1
/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
-#define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
+#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
/* UICCMD - UIC Command */
#define COMMAND_OPCODE_MASK 0xFF
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
index 9c6dd45f95f5..0455b1633ca7 100644
--- a/drivers/scsi/wd719x.h
+++ b/drivers/scsi/wd719x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _WD719X_H_
#define _WD719X_H_
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 114203f32843..c53262835e85 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the SuperH specific drivers.
#
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 6ce7f0d26dcf..fa73c173b56a 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/sh_intc.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index bec81c2404f7..7525039d812c 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -300,7 +300,7 @@ static void maple_send(void)
mutex_unlock(&maple_wlist_lock);
if (maple_packets > 0) {
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
- dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
+ sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
@@ -642,8 +642,7 @@ static void maple_dma_handler(struct work_struct *work)
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mdev = mq->dev;
recvbuf = mq->recvbuf->buf;
- dma_cache_sync(&mdev->dev, recvbuf, 0x400,
- DMA_FROM_DEVICE);
+ sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
code = recvbuf[0];
kfree(mq->sendbuf);
list_del_init(&mq->list);
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 2fcaff864584..deecb16e7256 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux Kernel SOC specific device drivers.
#
@@ -10,7 +11,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
-obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
+obj-y += mediatek/
obj-$(CONFIG_ARCH_MESON) += amlogic/
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-y += renesas/
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index 22acf064531f..b04f6e4aedbc 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -9,4 +9,25 @@ config MESON_GX_SOCINFO
Say yes to support decoding of Amlogic Meson GX SoC family
information about the type, package and version.
+config MESON_GX_PM_DOMAINS
+ bool "Amlogic Meson GX Power Domains driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on PM && OF
+ default ARCH_MESON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Say yes to expose Amlogic Meson GX Power Domains as
+ Generic Power Domains.
+
+config MESON_MX_SOCINFO
+ bool "Amlogic Meson MX SoC Information driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default ARCH_MESON
+ select SOC_BUS
+ help
+ Say yes to support decoding of Amlogic Meson6, Meson8,
+ Meson8b and Meson8m2 SoC family information about the type
+ and version.
+
endmenu
diff --git a/drivers/soc/amlogic/Makefile b/drivers/soc/amlogic/Makefile
index 3e85fc462c21..8fa321893928 100644
--- a/drivers/soc/amlogic/Makefile
+++ b/drivers/soc/amlogic/Makefile
@@ -1 +1,3 @@
obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o
+obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
+obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
new file mode 100644
index 000000000000..2bdeebc48901
--- /dev/null
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+
+/* AO Offsets */
+
+#define AO_RTI_GEN_PWR_SLEEP0 (0x3a << 2)
+
+#define GEN_PWR_VPU_HDMI BIT(8)
+#define GEN_PWR_VPU_HDMI_ISO BIT(9)
+
+/* HHI Offsets */
+
+#define HHI_MEM_PD_REG0 (0x40 << 2)
+#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
+#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
+
+struct meson_gx_pwrc_vpu {
+ struct generic_pm_domain genpd;
+ struct regmap *regmap_ao;
+ struct regmap *regmap_hhi;
+ struct reset_control *rstc;
+ struct clk *vpu_clk;
+ struct clk *vapb_clk;
+};
+
+static inline
+struct meson_gx_pwrc_vpu *genpd_to_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct meson_gx_pwrc_vpu, genpd);
+}
+
+static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
+ udelay(20);
+
+ /* Power Down Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x2 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x2 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), BIT(i));
+ udelay(5);
+ }
+ udelay(20);
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
+
+ msleep(20);
+
+ clk_disable_unprepare(pd->vpu_clk);
+ clk_disable_unprepare(pd->vapb_clk);
+
+ return 0;
+}
+
+static int meson_gx_pwrc_vpu_setup_clk(struct meson_gx_pwrc_vpu *pd)
+{
+ int ret;
+
+ ret = clk_prepare_enable(pd->vpu_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(pd->vapb_clk);
+ if (ret)
+ clk_disable_unprepare(pd->vpu_clk);
+
+ return ret;
+}
+
+static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int ret;
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, 0);
+ udelay(20);
+
+ /* Power Up Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x2 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x2 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), 0);
+ udelay(5);
+ }
+ udelay(20);
+
+ ret = reset_control_assert(pd->rstc);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, 0);
+
+ ret = reset_control_deassert(pd->rstc);
+ if (ret)
+ return ret;
+
+ ret = meson_gx_pwrc_vpu_setup_clk(pd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool meson_gx_pwrc_vpu_get_power(struct meson_gx_pwrc_vpu *pd)
+{
+ u32 reg;
+
+ regmap_read(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0, &reg);
+
+ return (reg & GEN_PWR_VPU_HDMI);
+}
+
+static struct meson_gx_pwrc_vpu vpu_hdmi_pd = {
+ .genpd = {
+ .name = "vpu_hdmi",
+ .power_off = meson_gx_pwrc_vpu_power_off,
+ .power_on = meson_gx_pwrc_vpu_power_on,
+ },
+};
+
+static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap_ao, *regmap_hhi;
+ struct reset_control *rstc;
+ struct clk *vpu_clk;
+ struct clk *vapb_clk;
+ bool powered_off;
+ int ret;
+
+ regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
+ if (IS_ERR(regmap_ao)) {
+ dev_err(&pdev->dev, "failed to get regmap\n");
+ return PTR_ERR(regmap_ao);
+ }
+
+ regmap_hhi = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "amlogic,hhi-sysctrl");
+ if (IS_ERR(regmap_hhi)) {
+ dev_err(&pdev->dev, "failed to get HHI regmap\n");
+ return PTR_ERR(regmap_hhi);
+ }
+
+ rstc = devm_reset_control_array_get(&pdev->dev, false, false);
+ if (IS_ERR(rstc)) {
+ dev_err(&pdev->dev, "failed to get reset lines\n");
+ return PTR_ERR(rstc);
+ }
+
+ vpu_clk = devm_clk_get(&pdev->dev, "vpu");
+ if (IS_ERR(vpu_clk)) {
+ dev_err(&pdev->dev, "vpu clock request failed\n");
+ return PTR_ERR(vpu_clk);
+ }
+
+ vapb_clk = devm_clk_get(&pdev->dev, "vapb");
+ if (IS_ERR(vapb_clk)) {
+ dev_err(&pdev->dev, "vapb clock request failed\n");
+ return PTR_ERR(vapb_clk);
+ }
+
+ vpu_hdmi_pd.regmap_ao = regmap_ao;
+ vpu_hdmi_pd.regmap_hhi = regmap_hhi;
+ vpu_hdmi_pd.rstc = rstc;
+ vpu_hdmi_pd.vpu_clk = vpu_clk;
+ vpu_hdmi_pd.vapb_clk = vapb_clk;
+
+ powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
+
+ /* If already powered, sync the clock states */
+ if (!powered_off) {
+ ret = meson_gx_pwrc_vpu_setup_clk(&vpu_hdmi_pd);
+ if (ret)
+ return ret;
+ }
+
+ pm_genpd_init(&vpu_hdmi_pd.genpd, &pm_domain_always_on_gov,
+ powered_off);
+
+ return of_genpd_add_provider_simple(pdev->dev.of_node,
+ &vpu_hdmi_pd.genpd);
+}
+
+static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
+{
+ meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd);
+}
+
+static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
+ { .compatible = "amlogic,meson-gx-pwrc-vpu" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver meson_gx_pwrc_vpu_driver = {
+ .probe = meson_gx_pwrc_vpu_probe,
+ .shutdown = meson_gx_pwrc_vpu_shutdown,
+ .driver = {
+ .name = "meson_gx_pwrc_vpu",
+ .of_match_table = meson_gx_pwrc_vpu_match_table,
+ },
+};
+builtin_platform_driver(meson_gx_pwrc_vpu_driver);
diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c
new file mode 100644
index 000000000000..7bfff5ff22a2
--- /dev/null
+++ b/drivers/soc/amlogic/meson-mx-socinfo.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define MESON_SOCINFO_MAJOR_VER_MESON6 0x16
+#define MESON_SOCINFO_MAJOR_VER_MESON8 0x19
+#define MESON_SOCINFO_MAJOR_VER_MESON8B 0x1b
+
+#define MESON_MX_ASSIST_HW_REV 0x14c
+
+#define MESON_MX_ANALOG_TOP_METAL_REVISION 0x0
+
+#define MESON_MX_BOOTROM_MISC_VER 0x4
+
+static const char *meson_mx_socinfo_revision(unsigned int major_ver,
+ unsigned int misc_ver,
+ unsigned int metal_rev)
+{
+ unsigned int minor_ver;
+
+ switch (major_ver) {
+ case MESON_SOCINFO_MAJOR_VER_MESON6:
+ minor_ver = 0xa;
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8:
+ if (metal_rev == 0x11111112)
+ major_ver = 0x1d;
+
+ if (metal_rev == 0x11111111 || metal_rev == 0x11111112)
+ minor_ver = 0xa;
+ else if (metal_rev == 0x11111113)
+ minor_ver = 0xb;
+ else if (metal_rev == 0x11111133)
+ minor_ver = 0xc;
+ else
+ minor_ver = 0xd;
+
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8B:
+ if (metal_rev == 0x11111111)
+ minor_ver = 0xa;
+ else
+ minor_ver = 0xb;
+
+ break;
+
+ default:
+ minor_ver = 0x0;
+ break;
+ }
+
+ return kasprintf(GFP_KERNEL, "Rev%X (%x - 0:%X)", minor_ver, major_ver,
+ misc_ver);
+}
+
+static const char *meson_mx_socinfo_soc_id(unsigned int major_ver,
+ unsigned int metal_rev)
+{
+ const char *soc_id;
+
+ switch (major_ver) {
+ case MESON_SOCINFO_MAJOR_VER_MESON6:
+ soc_id = "Meson6 (AML8726-MX)";
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8:
+ if (metal_rev == 0x11111112)
+ soc_id = "Meson8m2 (S812)";
+ else
+ soc_id = "Meson8 (S802)";
+
+ break;
+
+ case MESON_SOCINFO_MAJOR_VER_MESON8B:
+ soc_id = "Meson8b (S805)";
+ break;
+
+ default:
+ soc_id = "Unknown";
+ break;
+ }
+
+ return kstrdup_const(soc_id, GFP_KERNEL);
+}
+
+static const struct of_device_id meson_mx_socinfo_analog_top_ids[] = {
+ { .compatible = "amlogic,meson8-analog-top", },
+ { .compatible = "amlogic,meson8b-analog-top", },
+ { /* sentinel */ }
+};
+
+int __init meson_mx_socinfo_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ struct regmap *assist_regmap, *bootrom_regmap, *analog_top_regmap;
+ unsigned int major_ver, misc_ver, metal_rev = 0;
+ int ret;
+
+ assist_regmap =
+ syscon_regmap_lookup_by_compatible("amlogic,meson-mx-assist");
+ if (IS_ERR(assist_regmap))
+ return PTR_ERR(assist_regmap);
+
+ bootrom_regmap =
+ syscon_regmap_lookup_by_compatible("amlogic,meson-mx-bootrom");
+ if (IS_ERR(bootrom_regmap))
+ return PTR_ERR(bootrom_regmap);
+
+ np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids);
+ if (np) {
+ analog_top_regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(analog_top_regmap))
+ return PTR_ERR(analog_top_regmap);
+
+ ret = regmap_read(analog_top_regmap,
+ MESON_MX_ANALOG_TOP_METAL_REVISION,
+ &metal_rev);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(assist_regmap, MESON_MX_ASSIST_HW_REV, &major_ver);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(bootrom_regmap, MESON_MX_BOOTROM_MISC_VER,
+ &misc_ver);
+ if (ret < 0)
+ return ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENODEV;
+
+ soc_dev_attr->family = "Amlogic Meson";
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &soc_dev_attr->machine);
+ of_node_put(np);
+
+ soc_dev_attr->revision = meson_mx_socinfo_revision(major_ver, misc_ver,
+ metal_rev);
+ soc_dev_attr->soc_id = meson_mx_socinfo_soc_id(major_ver, metal_rev);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree_const(soc_dev_attr->revision);
+ kfree_const(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr);
+ return PTR_ERR(soc_dev);
+ }
+
+ dev_info(soc_device_to_device(soc_dev), "Amlogic %s %s detected\n",
+ soc_dev_attr->soc_id, soc_dev_attr->revision);
+
+ return 0;
+}
+device_initcall(meson_mx_socinfo_init);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index c1363c83c352..4dd03b099c89 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -72,6 +72,8 @@ static const struct at91_soc __initconst socs[] = {
"sama5d21", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH,
"sama5d22", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D225C_D1M_EXID_MATCH,
+ "sama5d225c 16MiB SiP", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH,
"sama5d23", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH,
@@ -84,10 +86,16 @@ static const struct at91_soc __initconst socs[] = {
"sama5d27", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH,
"sama5d27", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D1G_EXID_MATCH,
+ "sama5d27c 128MiB SiP", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27C_D5M_EXID_MATCH,
+ "sama5d27c 64MiB SiP", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH,
"sama5d28", "sama5d2"),
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH,
"sama5d28", "sama5d2"),
+ AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28C_D1G_EXID_MATCH,
+ "sama5d28c 128MiB SiP", "sama5d2"),
AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH,
"sama5d31", "sama5d3"),
AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH,
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index a90bd5b0ef8f..94cd5d1ab502 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -64,14 +64,18 @@ at91_soc_init(const struct at91_soc *socs);
#define SAMA5D2_CIDR_MATCH 0x0a5c08c0
#define SAMA5D21CU_EXID_MATCH 0x0000005a
+#define SAMA5D225C_D1M_EXID_MATCH 0x00000053
#define SAMA5D22CU_EXID_MATCH 0x00000059
#define SAMA5D22CN_EXID_MATCH 0x00000069
#define SAMA5D23CU_EXID_MATCH 0x00000058
#define SAMA5D24CX_EXID_MATCH 0x00000004
#define SAMA5D24CU_EXID_MATCH 0x00000014
#define SAMA5D26CU_EXID_MATCH 0x00000012
+#define SAMA5D27C_D1G_EXID_MATCH 0x00000033
+#define SAMA5D27C_D5M_EXID_MATCH 0x00000032
#define SAMA5D27CU_EXID_MATCH 0x00000011
#define SAMA5D27CN_EXID_MATCH 0x00000021
+#define SAMA5D28C_D1G_EXID_MATCH 0x00000013
#define SAMA5D28CU_EXID_MATCH 0x00000010
#define SAMA5D28CN_EXID_MATCH 0x00000020
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index 49f1e2a75d61..055a845ed979 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -20,4 +20,6 @@ config SOC_BRCMSTB
If unsure, say N.
+source "drivers/soc/bcm/brcmstb/Kconfig"
+
endmenu
diff --git a/drivers/soc/bcm/brcmstb/Kconfig b/drivers/soc/bcm/brcmstb/Kconfig
new file mode 100644
index 000000000000..d36f6e03c1a6
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/Kconfig
@@ -0,0 +1,10 @@
+if SOC_BRCMSTB
+
+config BRCMSTB_PM
+ bool "Support suspend/resume for STB platforms"
+ default y
+ depends on PM
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ select ARM_CPU_SUSPEND if ARM
+
+endif # SOC_BRCMSTB
diff --git a/drivers/soc/bcm/brcmstb/Makefile b/drivers/soc/bcm/brcmstb/Makefile
index 9120b2715d3e..01687c26535b 100644
--- a/drivers/soc/bcm/brcmstb/Makefile
+++ b/drivers/soc/bcm/brcmstb/Makefile
@@ -1 +1,2 @@
obj-y += common.o biuctrl.o
+obj-$(CONFIG_BRCMSTB_PM) += pm/
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index 22e98a90468c..a71730da6385 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -40,6 +40,18 @@ bool soc_is_brcmstb(void)
return of_match_node(brcmstb_machine_match, root) != NULL;
}
+u32 brcmstb_get_family_id(void)
+{
+ return family_id;
+}
+EXPORT_SYMBOL(brcmstb_get_family_id);
+
+u32 brcmstb_get_product_id(void)
+{
+ return product_id;
+}
+EXPORT_SYMBOL(brcmstb_get_product_id);
+
static const struct of_device_id sun_top_ctrl_match[] = {
{ .compatible = "brcm,bcm7125-sun-top-ctrl", },
{ .compatible = "brcm,bcm7346-sun-top-ctrl", },
diff --git a/drivers/soc/bcm/brcmstb/pm/Makefile b/drivers/soc/bcm/brcmstb/pm/Makefile
new file mode 100644
index 000000000000..08bbd244ef11
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ARM) += s2-arm.o pm-arm.o
+AFLAGS_s2-arm.o := -march=armv7-a
+obj-$(CONFIG_BMIPS_GENERIC) += s2-mips.o s3-mips.o pm-mips.o
diff --git a/drivers/soc/bcm/brcmstb/pm/aon_defs.h b/drivers/soc/bcm/brcmstb/pm/aon_defs.h
new file mode 100644
index 000000000000..fb936abd847d
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/aon_defs.h
@@ -0,0 +1,113 @@
+/*
+ * Always ON (AON) register interface between bootloader and Linux
+ *
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BRCMSTB_AON_DEFS_H__
+#define __BRCMSTB_AON_DEFS_H__
+
+#include <linux/compiler.h>
+
+/* Magic number in upper 16-bits */
+#define BRCMSTB_S3_MAGIC_MASK 0xffff0000
+#define BRCMSTB_S3_MAGIC_SHORT 0x5AFE0000
+
+enum {
+ /* Restore random key for AES memory verification (off = fixed key) */
+ S3_FLAG_LOAD_RANDKEY = (1 << 0),
+
+ /* Scratch buffer page table is present */
+ S3_FLAG_SCRATCH_BUFFER_TABLE = (1 << 1),
+
+ /* Skip all memory verification */
+ S3_FLAG_NO_MEM_VERIFY = (1 << 2),
+
+ /*
+ * Modification of this bit reserved for bootloader only.
+ * 1=PSCI started Linux, 0=Direct jump to Linux.
+ */
+ S3_FLAG_PSCI_BOOT = (1 << 3),
+
+ /*
+ * Modification of this bit reserved for bootloader only.
+ * 1=64 bit boot, 0=32 bit boot.
+ */
+ S3_FLAG_BOOTED64 = (1 << 4),
+};
+
+#define BRCMSTB_HASH_LEN (128 / 8) /* 128-bit hash */
+
+#define AON_REG_MAGIC_FLAGS 0x00
+#define AON_REG_CONTROL_LOW 0x04
+#define AON_REG_CONTROL_HIGH 0x08
+#define AON_REG_S3_HASH 0x0c /* hash of S3 params */
+#define AON_REG_CONTROL_HASH_LEN 0x1c
+#define AON_REG_PANIC 0x20
+
+#define BRCMSTB_S3_MAGIC 0x5AFEB007
+#define BRCMSTB_PANIC_MAGIC 0x512E115E
+#define BOOTLOADER_SCRATCH_SIZE 64
+#define BRCMSTB_DTU_STATE_MAP_ENTRIES (8*1024)
+#define BRCMSTB_DTU_CONFIG_ENTRIES (512)
+#define BRCMSTB_DTU_COUNT (2)
+
+#define IMAGE_DESCRIPTORS_BUFSIZE (2 * 1024)
+#define S3_BOOTLOADER_RESERVED (S3_FLAG_PSCI_BOOT | S3_FLAG_BOOTED64)
+
+struct brcmstb_bootloader_dtu_table {
+ uint32_t dtu_state_map[BRCMSTB_DTU_STATE_MAP_ENTRIES];
+ uint32_t dtu_config[BRCMSTB_DTU_CONFIG_ENTRIES];
+};
+
+/*
+ * Bootloader utilizes a custom parameter block left in DRAM for handling S3
+ * warm resume
+ */
+struct brcmstb_s3_params {
+ /* scratch memory for bootloader */
+ uint8_t scratch[BOOTLOADER_SCRATCH_SIZE];
+
+ uint32_t magic; /* BRCMSTB_S3_MAGIC */
+ uint64_t reentry; /* PA */
+
+ /* descriptors */
+ uint32_t hash[BRCMSTB_HASH_LEN / 4];
+
+ /*
+ * If 0, then ignore this parameter (there is only one set of
+ * descriptors)
+ *
+ * If non-0, then a second set of descriptors is stored at:
+ *
+ * descriptors + desc_offset_2
+ *
+ * The MAC result of both descriptors is XOR'd and stored in @hash
+ */
+ uint32_t desc_offset_2;
+
+ /*
+ * (Physical) address of a brcmstb_bootloader_scratch_table, for
+ * providing a large DRAM buffer to the bootloader
+ */
+ uint64_t buffer_table;
+
+ uint32_t spare[70];
+
+ uint8_t descriptors[IMAGE_DESCRIPTORS_BUFSIZE];
+ /*
+ * Must be last member of struct. See brcmstb_pm_s3_finish() for reason.
+ */
+ struct brcmstb_bootloader_dtu_table dtu[BRCMSTB_DTU_COUNT];
+} __packed;
+
+#endif /* __BRCMSTB_AON_DEFS_H__ */
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
new file mode 100644
index 000000000000..dcf8c8065508
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -0,0 +1,822 @@
+/*
+ * ARM-specific support for Broadcom STB S2/S3/S5 power management
+ *
+ * S2: clock gate CPUs and as many peripherals as possible
+ * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
+ * self-refresh
+ * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
+ * treat this mode like a soft power-off, with wakeup allowed from AON
+ *
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "brcmstb-pm: " fmt
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/proc_fs.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/suspend.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+
+#include <asm/fncpy.h>
+#include <asm/setup.h>
+#include <asm/suspend.h>
+
+#include "pm.h"
+#include "aon_defs.h"
+
+#define SHIMPHY_DDR_PAD_CNTRL 0x8c
+
+/* Method #0 */
+#define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
+#define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
+
+/* Method #1 */
+#define PWRDWN_SEQ_NO_SEQUENCING 0
+#define PWRDWN_SEQ_HOLD_CHANNEL 1
+#define PWRDWN_SEQ_RESET_PLL 2
+#define PWRDWN_SEQ_POWERDOWN_PLL 3
+
+#define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
+#define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
+
+#define DDR_FORCE_CKE_RST_N BIT(3)
+#define DDR_PHY_RST_N BIT(2)
+#define DDR_PHY_CKE BIT(1)
+
+#define DDR_PHY_NO_CHANNEL 0xffffffff
+
+#define MAX_NUM_MEMC 3
+
+struct brcmstb_memc {
+ void __iomem *ddr_phy_base;
+ void __iomem *ddr_shimphy_base;
+ void __iomem *ddr_ctrl;
+};
+
+struct brcmstb_pm_control {
+ void __iomem *aon_ctrl_base;
+ void __iomem *aon_sram;
+ struct brcmstb_memc memcs[MAX_NUM_MEMC];
+
+ void __iomem *boot_sram;
+ size_t boot_sram_len;
+
+ bool support_warm_boot;
+ size_t pll_status_offset;
+ int num_memc;
+
+ struct brcmstb_s3_params *s3_params;
+ dma_addr_t s3_params_pa;
+ int s3entry_method;
+ u32 warm_boot_offset;
+ u32 phy_a_standby_ctrl_offs;
+ u32 phy_b_standby_ctrl_offs;
+ bool needs_ddr_pad;
+ struct platform_device *pdev;
+};
+
+enum bsp_initiate_command {
+ BSP_CLOCK_STOP = 0x00,
+ BSP_GEN_RANDOM_KEY = 0x4A,
+ BSP_RESTORE_RANDOM_KEY = 0x55,
+ BSP_GEN_FIXED_KEY = 0x63,
+};
+
+#define PM_INITIATE 0x01
+#define PM_INITIATE_SUCCESS 0x00
+#define PM_INITIATE_FAIL 0xfe
+
+static struct brcmstb_pm_control ctrl;
+
+static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
+ void __iomem *ddr_phy_pll_status);
+
+static int brcmstb_init_sram(struct device_node *dn)
+{
+ void __iomem *sram;
+ struct resource res;
+ int ret;
+
+ ret = of_address_to_resource(dn, 0, &res);
+ if (ret)
+ return ret;
+
+ /* Uncached, executable remapping of SRAM */
+ sram = __arm_ioremap_exec(res.start, resource_size(&res), false);
+ if (!sram)
+ return -ENOMEM;
+
+ ctrl.boot_sram = sram;
+ ctrl.boot_sram_len = resource_size(&res);
+
+ return 0;
+}
+
+static const struct of_device_id sram_dt_ids[] = {
+ { .compatible = "mmio-sram" },
+ { /* sentinel */ }
+};
+
+static int do_bsp_initiate_command(enum bsp_initiate_command cmd)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ int ret;
+ int timeo = 1000 * 1000; /* 1 second */
+
+ writel_relaxed(0, base + AON_CTRL_PM_INITIATE);
+ (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
+
+ /* Go! */
+ writel_relaxed((cmd << 1) | PM_INITIATE, base + AON_CTRL_PM_INITIATE);
+
+ /*
+ * If firmware doesn't support the 'ack', then just assume it's done
+ * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
+ */
+ if (of_machine_is_compatible("brcm,bcm74371a0")) {
+ (void)readl_relaxed(base + AON_CTRL_PM_INITIATE);
+ mdelay(10);
+ return 0;
+ }
+
+ for (;;) {
+ ret = readl_relaxed(base + AON_CTRL_PM_INITIATE);
+ if (!(ret & PM_INITIATE))
+ break;
+ if (timeo <= 0) {
+ pr_err("error: timeout waiting for BSP (%x)\n", ret);
+ break;
+ }
+ timeo -= 50;
+ udelay(50);
+ }
+
+ return (ret & 0xff) != PM_INITIATE_SUCCESS;
+}
+
+static int brcmstb_pm_handshake(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ u32 tmp;
+ int ret;
+
+ /* BSP power handshake, v1 */
+ tmp = readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
+ tmp &= ~1UL;
+ writel_relaxed(tmp, base + AON_CTRL_HOST_MISC_CMDS);
+ (void)readl_relaxed(base + AON_CTRL_HOST_MISC_CMDS);
+
+ ret = do_bsp_initiate_command(BSP_CLOCK_STOP);
+ if (ret)
+ pr_err("BSP handshake failed\n");
+
+ /*
+ * HACK: BSP may have internal race on the CLOCK_STOP command.
+ * Avoid touching the BSP for a few milliseconds.
+ */
+ mdelay(3);
+
+ return ret;
+}
+
+static inline void shimphy_set(u32 value, u32 mask)
+{
+ int i;
+
+ if (!ctrl.needs_ddr_pad)
+ return;
+
+ for (i = 0; i < ctrl.num_memc; i++) {
+ u32 tmp;
+
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_shimphy_base +
+ SHIMPHY_DDR_PAD_CNTRL);
+ tmp = value | (tmp & mask);
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_shimphy_base +
+ SHIMPHY_DDR_PAD_CNTRL);
+ }
+ wmb(); /* Complete sequence in order. */
+}
+
+static inline void ddr_ctrl_set(bool warmboot)
+{
+ int i;
+
+ for (i = 0; i < ctrl.num_memc; i++) {
+ u32 tmp;
+
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_ctrl +
+ ctrl.warm_boot_offset);
+ if (warmboot)
+ tmp |= 1;
+ else
+ tmp &= ~1; /* Cold boot */
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_ctrl +
+ ctrl.warm_boot_offset);
+ }
+ /* Complete sequence in order */
+ wmb();
+}
+
+static inline void s3entry_method0(void)
+{
+ shimphy_set(SHIMPHY_PAD_GATE_PLL_S3 | SHIMPHY_PAD_PLL_SEQUENCE,
+ 0xffffffff);
+}
+
+static inline void s3entry_method1(void)
+{
+ /*
+ * S3 Entry Sequence
+ * -----------------
+ * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
+ * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
+ */
+ shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+
+ ddr_ctrl_set(true);
+}
+
+static inline void s5entry_method1(void)
+{
+ int i;
+
+ /*
+ * S5 Entry Sequence
+ * -----------------
+ * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
+ * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
+ * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
+ * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
+ */
+ shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+
+ ddr_ctrl_set(false);
+
+ for (i = 0; i < ctrl.num_memc; i++) {
+ u32 tmp;
+
+ /* Step 3: Channel A (RST_N = CKE = 0) */
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_a_standby_ctrl_offs);
+ tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_a_standby_ctrl_offs);
+
+ /* Step 3: Channel B? */
+ if (ctrl.phy_b_standby_ctrl_offs != DDR_PHY_NO_CHANNEL) {
+ tmp = readl_relaxed(ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_b_standby_ctrl_offs);
+ tmp &= ~(DDR_PHY_RST_N | DDR_PHY_RST_N);
+ writel_relaxed(tmp, ctrl.memcs[i].ddr_phy_base +
+ ctrl.phy_b_standby_ctrl_offs);
+ }
+ }
+ /* Must complete */
+ wmb();
+}
+
+/*
+ * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
+ * into a low-power mode
+ */
+static void brcmstb_do_pmsm_power_down(unsigned long base_cmd, bool onewrite)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+
+ if ((ctrl.s3entry_method == 1) && (base_cmd == PM_COLD_CONFIG))
+ s5entry_method1();
+
+ /* pm_start_pwrdn transition 0->1 */
+ writel_relaxed(base_cmd, base + AON_CTRL_PM_CTRL);
+
+ if (!onewrite) {
+ (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
+
+ writel_relaxed(base_cmd | PM_PWR_DOWN, base + AON_CTRL_PM_CTRL);
+ (void)readl_relaxed(base + AON_CTRL_PM_CTRL);
+ }
+ wfi();
+}
+
+/* Support S5 cold boot out of "poweroff" */
+static void brcmstb_pm_poweroff(void)
+{
+ brcmstb_pm_handshake();
+
+ /* Clear magic S3 warm-boot value */
+ writel_relaxed(0, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+ (void)readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+
+ /* Skip wait-for-interrupt signal; just use a countdown */
+ writel_relaxed(0x10, ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
+ (void)readl_relaxed(ctrl.aon_ctrl_base + AON_CTRL_PM_CPU_WAIT_COUNT);
+
+ if (ctrl.s3entry_method == 1) {
+ shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+ ddr_ctrl_set(false);
+ brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG, true);
+ return; /* We should never actually get here */
+ }
+
+ brcmstb_do_pmsm_power_down(PM_COLD_CONFIG, false);
+}
+
+static void *brcmstb_pm_copy_to_sram(void *fn, size_t len)
+{
+ unsigned int size = ALIGN(len, FNCPY_ALIGN);
+
+ if (ctrl.boot_sram_len < size) {
+ pr_err("standby code will not fit in SRAM\n");
+ return NULL;
+ }
+
+ return fncpy(ctrl.boot_sram, fn, size);
+}
+
+/*
+ * S2 suspend/resume picks up where we left off, so we must execute carefully
+ * from SRAM, in order to allow DDR to come back up safely before we continue.
+ */
+static int brcmstb_pm_s2(void)
+{
+ /* A previous S3 can set a value hazardous to S2, so make sure. */
+ if (ctrl.s3entry_method == 1) {
+ shimphy_set((PWRDWN_SEQ_NO_SEQUENCING <<
+ SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT),
+ ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK);
+ ddr_ctrl_set(false);
+ }
+
+ brcmstb_pm_do_s2_sram = brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2,
+ brcmstb_pm_do_s2_sz);
+ if (!brcmstb_pm_do_s2_sram)
+ return -EINVAL;
+
+ return brcmstb_pm_do_s2_sram(ctrl.aon_ctrl_base,
+ ctrl.memcs[0].ddr_phy_base +
+ ctrl.pll_status_offset);
+}
+
+/*
+ * This function is called on a new stack, so don't allow inlining (which will
+ * generate stack references on the old stack). It cannot be made static because
+ * it is referenced from brcmstb_pm_s3()
+ */
+noinline int brcmstb_pm_s3_finish(void)
+{
+ struct brcmstb_s3_params *params = ctrl.s3_params;
+ dma_addr_t params_pa = ctrl.s3_params_pa;
+ phys_addr_t reentry = virt_to_phys(&cpu_resume);
+ enum bsp_initiate_command cmd;
+ u32 flags;
+
+ /*
+ * Clear parameter structure, but not DTU area, which has already been
+ * filled in. We know DTU is a the end, so we can just subtract its
+ * size.
+ */
+ memset(params, 0, sizeof(*params) - sizeof(params->dtu));
+
+ flags = readl_relaxed(ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+
+ flags &= S3_BOOTLOADER_RESERVED;
+ flags |= S3_FLAG_NO_MEM_VERIFY;
+ flags |= S3_FLAG_LOAD_RANDKEY;
+
+ /* Load random / fixed key */
+ if (flags & S3_FLAG_LOAD_RANDKEY)
+ cmd = BSP_GEN_RANDOM_KEY;
+ else
+ cmd = BSP_GEN_FIXED_KEY;
+ if (do_bsp_initiate_command(cmd)) {
+ pr_info("key loading failed\n");
+ return -EIO;
+ }
+
+ params->magic = BRCMSTB_S3_MAGIC;
+ params->reentry = reentry;
+
+ /* No more writes to DRAM */
+ flush_cache_all();
+
+ flags |= BRCMSTB_S3_MAGIC_SHORT;
+
+ writel_relaxed(flags, ctrl.aon_sram + AON_REG_MAGIC_FLAGS);
+ writel_relaxed(lower_32_bits(params_pa),
+ ctrl.aon_sram + AON_REG_CONTROL_LOW);
+ writel_relaxed(upper_32_bits(params_pa),
+ ctrl.aon_sram + AON_REG_CONTROL_HIGH);
+
+ switch (ctrl.s3entry_method) {
+ case 0:
+ s3entry_method0();
+ brcmstb_do_pmsm_power_down(PM_WARM_CONFIG, false);
+ break;
+ case 1:
+ s3entry_method1();
+ brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG, true);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Must have been interrupted from wfi()? */
+ return -EINTR;
+}
+
+static int brcmstb_pm_do_s3(unsigned long sp)
+{
+ unsigned long save_sp;
+ int ret;
+
+ asm volatile (
+ "mov %[save], sp\n"
+ "mov sp, %[new]\n"
+ "bl brcmstb_pm_s3_finish\n"
+ "mov %[ret], r0\n"
+ "mov %[new], sp\n"
+ "mov sp, %[save]\n"
+ : [save] "=&r" (save_sp), [ret] "=&r" (ret)
+ : [new] "r" (sp)
+ );
+
+ return ret;
+}
+
+static int brcmstb_pm_s3(void)
+{
+ void __iomem *sp = ctrl.boot_sram + ctrl.boot_sram_len;
+
+ return cpu_suspend((unsigned long)sp, brcmstb_pm_do_s3);
+}
+
+static int brcmstb_pm_standby(bool deep_standby)
+{
+ int ret;
+
+ if (brcmstb_pm_handshake())
+ return -EIO;
+
+ if (deep_standby)
+ ret = brcmstb_pm_s3();
+ else
+ ret = brcmstb_pm_s2();
+ if (ret)
+ pr_err("%s: standby failed\n", __func__);
+
+ return ret;
+}
+
+static int brcmstb_pm_enter(suspend_state_t state)
+{
+ int ret = -EINVAL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ ret = brcmstb_pm_standby(false);
+ break;
+ case PM_SUSPEND_MEM:
+ ret = brcmstb_pm_standby(true);
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmstb_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return true;
+ case PM_SUSPEND_MEM:
+ return ctrl.support_warm_boot;
+ default:
+ return false;
+ }
+}
+
+static const struct platform_suspend_ops brcmstb_pm_ops = {
+ .enter = brcmstb_pm_enter,
+ .valid = brcmstb_pm_valid,
+};
+
+static const struct of_device_id aon_ctrl_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-aon-ctrl" },
+ {}
+};
+
+struct ddr_phy_ofdata {
+ bool supports_warm_boot;
+ size_t pll_status_offset;
+ int s3entry_method;
+ u32 warm_boot_offset;
+ u32 phy_a_standby_ctrl_offs;
+ u32 phy_b_standby_ctrl_offs;
+};
+
+static struct ddr_phy_ofdata ddr_phy_71_1 = {
+ .supports_warm_boot = true,
+ .pll_status_offset = 0x0c,
+ .s3entry_method = 1,
+ .warm_boot_offset = 0x2c,
+ .phy_a_standby_ctrl_offs = 0x198,
+ .phy_b_standby_ctrl_offs = DDR_PHY_NO_CHANNEL
+};
+
+static struct ddr_phy_ofdata ddr_phy_72_0 = {
+ .supports_warm_boot = true,
+ .pll_status_offset = 0x10,
+ .s3entry_method = 1,
+ .warm_boot_offset = 0x40,
+ .phy_a_standby_ctrl_offs = 0x2a4,
+ .phy_b_standby_ctrl_offs = 0x8a4
+};
+
+static struct ddr_phy_ofdata ddr_phy_225_1 = {
+ .supports_warm_boot = false,
+ .pll_status_offset = 0x4,
+ .s3entry_method = 0
+};
+
+static struct ddr_phy_ofdata ddr_phy_240_1 = {
+ .supports_warm_boot = true,
+ .pll_status_offset = 0x4,
+ .s3entry_method = 0
+};
+
+static const struct of_device_id ddr_phy_dt_ids[] = {
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v71.1",
+ .data = &ddr_phy_71_1,
+ },
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v72.0",
+ .data = &ddr_phy_72_0,
+ },
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v225.1",
+ .data = &ddr_phy_225_1,
+ },
+ {
+ .compatible = "brcm,brcmstb-ddr-phy-v240.1",
+ .data = &ddr_phy_240_1,
+ },
+ {
+ /* Same as v240.1, for the registers we care about */
+ .compatible = "brcm,brcmstb-ddr-phy-v240.2",
+ .data = &ddr_phy_240_1,
+ },
+ {}
+};
+
+struct ddr_seq_ofdata {
+ bool needs_ddr_pad;
+ u32 warm_boot_offset;
+};
+
+static const struct ddr_seq_ofdata ddr_seq_b22 = {
+ .needs_ddr_pad = false,
+ .warm_boot_offset = 0x2c,
+};
+
+static const struct ddr_seq_ofdata ddr_seq = {
+ .needs_ddr_pad = true,
+};
+
+static const struct of_device_id ddr_shimphy_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-ddr-shimphy-v1.0" },
+ {}
+};
+
+static const struct of_device_id brcmstb_memc_of_match[] = {
+ {
+ .compatible = "brcm,brcmstb-memc-ddr-rev-b.2.2",
+ .data = &ddr_seq_b22,
+ },
+ {
+ .compatible = "brcm,brcmstb-memc-ddr",
+ .data = &ddr_seq,
+ },
+ {},
+};
+
+static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
+ int index, const void **ofdata)
+{
+ struct device_node *dn;
+ const struct of_device_id *match;
+
+ dn = of_find_matching_node_and_match(NULL, matches, &match);
+ if (!dn)
+ return ERR_PTR(-EINVAL);
+
+ if (ofdata)
+ *ofdata = match->data;
+
+ return of_io_request_and_map(dn, index, dn->full_name);
+}
+
+static int brcmstb_pm_panic_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ writel_relaxed(BRCMSTB_PANIC_MAGIC, ctrl.aon_sram + AON_REG_PANIC);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block brcmstb_pm_panic_nb = {
+ .notifier_call = brcmstb_pm_panic_notify,
+};
+
+static int brcmstb_pm_probe(struct platform_device *pdev)
+{
+ const struct ddr_phy_ofdata *ddr_phy_data;
+ const struct ddr_seq_ofdata *ddr_seq_data;
+ const struct of_device_id *of_id = NULL;
+ struct device_node *dn;
+ void __iomem *base;
+ int ret, i;
+
+ /* AON ctrl registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_CTRL\n");
+ return PTR_ERR(base);
+ }
+ ctrl.aon_ctrl_base = base;
+
+ /* AON SRAM registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
+ if (IS_ERR(base)) {
+ /* Assume standard offset */
+ ctrl.aon_sram = ctrl.aon_ctrl_base +
+ AON_CTRL_SYSTEM_DATA_RAM_OFS;
+ } else {
+ ctrl.aon_sram = base;
+ }
+
+ writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
+
+ /* DDR PHY registers */
+ base = brcmstb_ioremap_match(ddr_phy_dt_ids, 0,
+ (const void **)&ddr_phy_data);
+ if (IS_ERR(base)) {
+ pr_err("error mapping DDR PHY\n");
+ return PTR_ERR(base);
+ }
+ ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
+ ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
+ /* Only need DDR PHY 0 for now? */
+ ctrl.memcs[0].ddr_phy_base = base;
+ ctrl.s3entry_method = ddr_phy_data->s3entry_method;
+ ctrl.phy_a_standby_ctrl_offs = ddr_phy_data->phy_a_standby_ctrl_offs;
+ ctrl.phy_b_standby_ctrl_offs = ddr_phy_data->phy_b_standby_ctrl_offs;
+ /*
+ * Slightly grosss to use the phy ver to get a memc,
+ * offset but that is the only versioned things so far
+ * we can test for.
+ */
+ ctrl.warm_boot_offset = ddr_phy_data->warm_boot_offset;
+
+ /* DDR SHIM-PHY registers */
+ for_each_matching_node(dn, ddr_shimphy_dt_ids) {
+ i = ctrl.num_memc;
+ if (i >= MAX_NUM_MEMC) {
+ pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+ break;
+ }
+
+ base = of_io_request_and_map(dn, 0, dn->full_name);
+ if (IS_ERR(base)) {
+ if (!ctrl.support_warm_boot)
+ break;
+
+ pr_err("error mapping DDR SHIMPHY %d\n", i);
+ return PTR_ERR(base);
+ }
+ ctrl.memcs[i].ddr_shimphy_base = base;
+ ctrl.num_memc++;
+ }
+
+ /* Sequencer DRAM Param and Control Registers */
+ i = 0;
+ for_each_matching_node(dn, brcmstb_memc_of_match) {
+ base = of_iomap(dn, 0);
+ if (!base) {
+ pr_err("error mapping DDR Sequencer %d\n", i);
+ return -ENOMEM;
+ }
+
+ of_id = of_match_node(brcmstb_memc_of_match, dn);
+ if (!of_id) {
+ iounmap(base);
+ return -EINVAL;
+ }
+
+ ddr_seq_data = of_id->data;
+ ctrl.needs_ddr_pad = ddr_seq_data->needs_ddr_pad;
+ /* Adjust warm boot offset based on the DDR sequencer */
+ if (ddr_seq_data->warm_boot_offset)
+ ctrl.warm_boot_offset = ddr_seq_data->warm_boot_offset;
+
+ ctrl.memcs[i].ddr_ctrl = base;
+ i++;
+ }
+
+ pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
+ ctrl.support_warm_boot, ctrl.s3entry_method,
+ ctrl.warm_boot_offset);
+
+ dn = of_find_matching_node(NULL, sram_dt_ids);
+ if (!dn) {
+ pr_err("SRAM not found\n");
+ return -EINVAL;
+ }
+
+ ret = brcmstb_init_sram(dn);
+ if (ret) {
+ pr_err("error setting up SRAM for PM\n");
+ return ret;
+ }
+
+ ctrl.pdev = pdev;
+
+ ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
+ if (!ctrl.s3_params)
+ return -ENOMEM;
+ ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
+ sizeof(*ctrl.s3_params),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, ctrl.s3_params_pa)) {
+ pr_err("error mapping DMA memory\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &brcmstb_pm_panic_nb);
+
+ pm_power_off = brcmstb_pm_poweroff;
+ suspend_set_ops(&brcmstb_pm_ops);
+
+ return 0;
+
+out:
+ kfree(ctrl.s3_params);
+
+ pr_warn("PM: initialization failed with code %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver brcmstb_pm_driver = {
+ .driver = {
+ .name = "brcmstb-pm",
+ .of_match_table = aon_ctrl_dt_ids,
+ },
+};
+
+static int __init brcmstb_pm_init(void)
+{
+ return platform_driver_probe(&brcmstb_pm_driver,
+ brcmstb_pm_probe);
+}
+module_init(brcmstb_pm_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-mips.c b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
new file mode 100644
index 000000000000..9300b5f62e56
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm-mips.c
@@ -0,0 +1,461 @@
+/*
+ * MIPS-specific support for Broadcom STB S2/S3/S5 power management
+ *
+ * Copyright (C) 2016-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <asm/bmips.h>
+#include <asm/tlbflush.h>
+
+#include "pm.h"
+
+#define S2_NUM_PARAMS 6
+#define MAX_NUM_MEMC 3
+
+/* S3 constants */
+#define MAX_GP_REGS 16
+#define MAX_CP0_REGS 32
+#define NUM_MEMC_CLIENTS 128
+#define AON_CTRL_RAM_SIZE 128
+#define BRCMSTB_S3_MAGIC 0x5AFEB007
+
+#define CLEAR_RESET_MASK 0x01
+
+/* Index each CP0 register that needs to be saved */
+#define CONTEXT 0
+#define USER_LOCAL 1
+#define PGMK 2
+#define HWRENA 3
+#define COMPARE 4
+#define STATUS 5
+#define CONFIG 6
+#define MODE 7
+#define EDSP 8
+#define BOOT_VEC 9
+#define EBASE 10
+
+struct brcmstb_memc {
+ void __iomem *ddr_phy_base;
+ void __iomem *arb_base;
+};
+
+struct brcmstb_pm_control {
+ void __iomem *aon_ctrl_base;
+ void __iomem *aon_sram_base;
+ void __iomem *timers_base;
+ struct brcmstb_memc memcs[MAX_NUM_MEMC];
+ int num_memc;
+};
+
+struct brcm_pm_s3_context {
+ u32 cp0_regs[MAX_CP0_REGS];
+ u32 memc0_rts[NUM_MEMC_CLIENTS];
+ u32 sc_boot_vec;
+};
+
+struct brcmstb_mem_transfer;
+
+struct brcmstb_mem_transfer {
+ struct brcmstb_mem_transfer *next;
+ void *src;
+ void *dst;
+ dma_addr_t pa_src;
+ dma_addr_t pa_dst;
+ u32 len;
+ u8 key;
+ u8 mode;
+ u8 src_remapped;
+ u8 dst_remapped;
+ u8 src_dst_remapped;
+};
+
+#define AON_SAVE_SRAM(base, idx, val) \
+ __raw_writel(val, base + (idx << 2))
+
+/* Used for saving registers in asm */
+u32 gp_regs[MAX_GP_REGS];
+
+#define BSP_CLOCK_STOP 0x00
+#define PM_INITIATE 0x01
+
+static struct brcmstb_pm_control ctrl;
+
+static void brcm_pm_save_cp0_context(struct brcm_pm_s3_context *ctx)
+{
+ /* Generic MIPS */
+ ctx->cp0_regs[CONTEXT] = read_c0_context();
+ ctx->cp0_regs[USER_LOCAL] = read_c0_userlocal();
+ ctx->cp0_regs[PGMK] = read_c0_pagemask();
+ ctx->cp0_regs[HWRENA] = read_c0_cache();
+ ctx->cp0_regs[COMPARE] = read_c0_compare();
+ ctx->cp0_regs[STATUS] = read_c0_status();
+
+ /* Broadcom specific */
+ ctx->cp0_regs[CONFIG] = read_c0_brcm_config();
+ ctx->cp0_regs[MODE] = read_c0_brcm_mode();
+ ctx->cp0_regs[EDSP] = read_c0_brcm_edsp();
+ ctx->cp0_regs[BOOT_VEC] = read_c0_brcm_bootvec();
+ ctx->cp0_regs[EBASE] = read_c0_ebase();
+
+ ctx->sc_boot_vec = bmips_read_zscm_reg(0xa0);
+}
+
+static void brcm_pm_restore_cp0_context(struct brcm_pm_s3_context *ctx)
+{
+ /* Restore cp0 state */
+ bmips_write_zscm_reg(0xa0, ctx->sc_boot_vec);
+
+ /* Generic MIPS */
+ write_c0_context(ctx->cp0_regs[CONTEXT]);
+ write_c0_userlocal(ctx->cp0_regs[USER_LOCAL]);
+ write_c0_pagemask(ctx->cp0_regs[PGMK]);
+ write_c0_cache(ctx->cp0_regs[HWRENA]);
+ write_c0_compare(ctx->cp0_regs[COMPARE]);
+ write_c0_status(ctx->cp0_regs[STATUS]);
+
+ /* Broadcom specific */
+ write_c0_brcm_config(ctx->cp0_regs[CONFIG]);
+ write_c0_brcm_mode(ctx->cp0_regs[MODE]);
+ write_c0_brcm_edsp(ctx->cp0_regs[EDSP]);
+ write_c0_brcm_bootvec(ctx->cp0_regs[BOOT_VEC]);
+ write_c0_ebase(ctx->cp0_regs[EBASE]);
+}
+
+static void brcmstb_pm_handshake(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+ u32 tmp;
+
+ /* BSP power handshake, v1 */
+ tmp = __raw_readl(base + AON_CTRL_HOST_MISC_CMDS);
+ tmp &= ~1UL;
+ __raw_writel(tmp, base + AON_CTRL_HOST_MISC_CMDS);
+ (void)__raw_readl(base + AON_CTRL_HOST_MISC_CMDS);
+
+ __raw_writel(0, base + AON_CTRL_PM_INITIATE);
+ (void)__raw_readl(base + AON_CTRL_PM_INITIATE);
+ __raw_writel(BSP_CLOCK_STOP | PM_INITIATE,
+ base + AON_CTRL_PM_INITIATE);
+ /*
+ * HACK: BSP may have internal race on the CLOCK_STOP command.
+ * Avoid touching the BSP for a few milliseconds.
+ */
+ mdelay(3);
+}
+
+static void brcmstb_pm_s5(void)
+{
+ void __iomem *base = ctrl.aon_ctrl_base;
+
+ brcmstb_pm_handshake();
+
+ /* Clear magic s3 warm-boot value */
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 0, 0);
+
+ /* Set the countdown */
+ __raw_writel(0x10, base + AON_CTRL_PM_CPU_WAIT_COUNT);
+ (void)__raw_readl(base + AON_CTRL_PM_CPU_WAIT_COUNT);
+
+ /* Prepare to S5 cold boot */
+ __raw_writel(PM_COLD_CONFIG, base + AON_CTRL_PM_CTRL);
+ (void)__raw_readl(base + AON_CTRL_PM_CTRL);
+
+ __raw_writel((PM_COLD_CONFIG | PM_PWR_DOWN), base +
+ AON_CTRL_PM_CTRL);
+ (void)__raw_readl(base + AON_CTRL_PM_CTRL);
+
+ __asm__ __volatile__(
+ " wait\n"
+ : : : "memory");
+}
+
+static int brcmstb_pm_s3(void)
+{
+ struct brcm_pm_s3_context s3_context;
+ void __iomem *memc_arb_base;
+ unsigned long flags;
+ u32 tmp;
+ int i;
+
+ /* Prepare for s3 */
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 0, BRCMSTB_S3_MAGIC);
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 1, (u32)&s3_reentry);
+ AON_SAVE_SRAM(ctrl.aon_sram_base, 2, 0);
+
+ /* Clear RESET_HISTORY */
+ tmp = __raw_readl(ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL);
+ tmp &= ~CLEAR_RESET_MASK;
+ __raw_writel(tmp, ctrl.aon_ctrl_base + AON_CTRL_RESET_CTRL);
+
+ local_irq_save(flags);
+
+ /* Inhibit DDR_RSTb pulse for both MMCs*/
+ for (i = 0; i < ctrl.num_memc; i++) {
+ tmp = __raw_readl(ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+
+ tmp &= ~0x0f;
+ __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+ tmp |= (0x05 | BIT(5));
+ __raw_writel(tmp, ctrl.memcs[i].ddr_phy_base +
+ DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL);
+ }
+
+ /* Save CP0 context */
+ brcm_pm_save_cp0_context(&s3_context);
+
+ /* Save RTS(skip debug register) */
+ memc_arb_base = ctrl.memcs[0].arb_base + 4;
+ for (i = 0; i < NUM_MEMC_CLIENTS; i++) {
+ s3_context.memc0_rts[i] = __raw_readl(memc_arb_base);
+ memc_arb_base += 4;
+ }
+
+ /* Save I/O context */
+ local_flush_tlb_all();
+ _dma_cache_wback_inv(0, ~0);
+
+ brcm_pm_do_s3(ctrl.aon_ctrl_base, current_cpu_data.dcache.linesz);
+
+ /* CPU reconfiguration */
+ local_flush_tlb_all();
+ bmips_cpu_setup();
+ cpumask_clear(&bmips_booted_mask);
+
+ /* Restore RTS (skip debug register) */
+ memc_arb_base = ctrl.memcs[0].arb_base + 4;
+ for (i = 0; i < NUM_MEMC_CLIENTS; i++) {
+ __raw_writel(s3_context.memc0_rts[i], memc_arb_base);
+ memc_arb_base += 4;
+ }
+
+ /* restore CP0 context */
+ brcm_pm_restore_cp0_context(&s3_context);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int brcmstb_pm_s2(void)
+{
+ /*
+ * We need to pass 6 arguments to an assembly function. Lets avoid the
+ * stack and pass arguments in a explicit 4 byte array. The assembly
+ * code assumes all arguments are 4 bytes and arguments are ordered
+ * like so:
+ *
+ * 0: AON_CTRl base register
+ * 1: DDR_PHY base register
+ * 2: TIMERS base resgister
+ * 3: I-Cache line size
+ * 4: Restart vector address
+ * 5: Restart vector size
+ */
+ u32 s2_params[6];
+
+ /* Prepare s2 parameters */
+ s2_params[0] = (u32)ctrl.aon_ctrl_base;
+ s2_params[1] = (u32)ctrl.memcs[0].ddr_phy_base;
+ s2_params[2] = (u32)ctrl.timers_base;
+ s2_params[3] = (u32)current_cpu_data.icache.linesz;
+ s2_params[4] = (u32)BMIPS_WARM_RESTART_VEC;
+ s2_params[5] = (u32)(bmips_smp_int_vec_end -
+ bmips_smp_int_vec);
+
+ /* Drop to standby */
+ brcm_pm_do_s2(s2_params);
+
+ return 0;
+}
+
+static int brcmstb_pm_standby(bool deep_standby)
+{
+ brcmstb_pm_handshake();
+
+ /* Send IRQs to BMIPS_WARM_RESTART_VEC */
+ clear_c0_cause(CAUSEF_IV);
+ irq_disable_hazard();
+ set_c0_status(ST0_BEV);
+ irq_disable_hazard();
+
+ if (deep_standby)
+ brcmstb_pm_s3();
+ else
+ brcmstb_pm_s2();
+
+ /* Send IRQs to normal runtime vectors */
+ clear_c0_status(ST0_BEV);
+ irq_disable_hazard();
+ set_c0_cause(CAUSEF_IV);
+ irq_disable_hazard();
+
+ return 0;
+}
+
+static int brcmstb_pm_enter(suspend_state_t state)
+{
+ int ret = -EINVAL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ ret = brcmstb_pm_standby(false);
+ break;
+ case PM_SUSPEND_MEM:
+ ret = brcmstb_pm_standby(true);
+ break;
+ }
+
+ return ret;
+}
+
+static int brcmstb_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return true;
+ case PM_SUSPEND_MEM:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct platform_suspend_ops brcmstb_pm_ops = {
+ .enter = brcmstb_pm_enter,
+ .valid = brcmstb_pm_valid,
+};
+
+static const struct of_device_id aon_ctrl_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-aon-ctrl" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id ddr_phy_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-ddr-phy" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id arb_dt_ids[] = {
+ { .compatible = "brcm,brcmstb-memc-arb" },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id timers_ids[] = {
+ { .compatible = "brcm,brcmstb-timers" },
+ { /* sentinel */ }
+};
+
+static inline void __iomem *brcmstb_ioremap_node(struct device_node *dn,
+ int index)
+{
+ return of_io_request_and_map(dn, index, dn->full_name);
+}
+
+static void __iomem *brcmstb_ioremap_match(const struct of_device_id *matches,
+ int index, const void **ofdata)
+{
+ struct device_node *dn;
+ const struct of_device_id *match;
+
+ dn = of_find_matching_node_and_match(NULL, matches, &match);
+ if (!dn)
+ return ERR_PTR(-EINVAL);
+
+ if (ofdata)
+ *ofdata = match->data;
+
+ return brcmstb_ioremap_node(dn, index);
+}
+
+static int brcmstb_pm_init(void)
+{
+ struct device_node *dn;
+ void __iomem *base;
+ int i;
+
+ /* AON ctrl registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_CTRL\n");
+ goto aon_err;
+ }
+ ctrl.aon_ctrl_base = base;
+
+ /* AON SRAM registers */
+ base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 1, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping AON_SRAM\n");
+ goto sram_err;
+ }
+ ctrl.aon_sram_base = base;
+
+ ctrl.num_memc = 0;
+ /* Map MEMC DDR PHY registers */
+ for_each_matching_node(dn, ddr_phy_dt_ids) {
+ i = ctrl.num_memc;
+ if (i >= MAX_NUM_MEMC) {
+ pr_warn("Too many MEMCs (max %d)\n", MAX_NUM_MEMC);
+ break;
+ }
+ base = brcmstb_ioremap_node(dn, 0);
+ if (IS_ERR(base))
+ goto ddr_err;
+
+ ctrl.memcs[i].ddr_phy_base = base;
+ ctrl.num_memc++;
+ }
+
+ /* MEMC ARB registers */
+ base = brcmstb_ioremap_match(arb_dt_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping MEMC ARB\n");
+ goto ddr_err;
+ }
+ ctrl.memcs[0].arb_base = base;
+
+ /* Timer registers */
+ base = brcmstb_ioremap_match(timers_ids, 0, NULL);
+ if (IS_ERR(base)) {
+ pr_err("error mapping timers\n");
+ goto tmr_err;
+ }
+ ctrl.timers_base = base;
+
+ /* s3 cold boot aka s5 */
+ pm_power_off = brcmstb_pm_s5;
+
+ suspend_set_ops(&brcmstb_pm_ops);
+
+ return 0;
+
+tmr_err:
+ iounmap(ctrl.memcs[0].arb_base);
+ddr_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_phy_base);
+
+ iounmap(ctrl.aon_sram_base);
+sram_err:
+ iounmap(ctrl.aon_ctrl_base);
+aon_err:
+ return PTR_ERR(base);
+}
+arch_initcall(brcmstb_pm_init);
diff --git a/drivers/soc/bcm/brcmstb/pm/pm.h b/drivers/soc/bcm/brcmstb/pm/pm.h
new file mode 100644
index 000000000000..b7d35ac70e60
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/pm.h
@@ -0,0 +1,89 @@
+/*
+ * Definitions for Broadcom STB power management / Always ON (AON) block
+ *
+ * Copyright © 2016-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BRCMSTB_PM_H__
+#define __BRCMSTB_PM_H__
+
+#define AON_CTRL_RESET_CTRL 0x00
+#define AON_CTRL_PM_CTRL 0x04
+#define AON_CTRL_PM_STATUS 0x08
+#define AON_CTRL_PM_CPU_WAIT_COUNT 0x10
+#define AON_CTRL_PM_INITIATE 0x88
+#define AON_CTRL_HOST_MISC_CMDS 0x8c
+#define AON_CTRL_SYSTEM_DATA_RAM_OFS 0x200
+
+/* MIPS PM constants */
+/* MEMC0 offsets */
+#define DDR40_PHY_CONTROL_REGS_0_PLL_STATUS 0x10
+#define DDR40_PHY_CONTROL_REGS_0_STANDBY_CTRL 0xa4
+
+/* TIMER offsets */
+#define TIMER_TIMER1_CTRL 0x0c
+#define TIMER_TIMER1_STAT 0x1c
+
+/* TIMER defines */
+#define RESET_TIMER 0x0
+#define START_TIMER 0xbfffffff
+#define TIMER_MASK 0x3fffffff
+
+/* PM_CTRL bitfield (Method #0) */
+#define PM_FAST_PWRDOWN (1 << 6)
+#define PM_WARM_BOOT (1 << 5)
+#define PM_DEEP_STANDBY (1 << 4)
+#define PM_CPU_PWR (1 << 3)
+#define PM_USE_CPU_RDY (1 << 2)
+#define PM_PLL_PWRDOWN (1 << 1)
+#define PM_PWR_DOWN (1 << 0)
+
+/* PM_CTRL bitfield (Method #1) */
+#define PM_DPHY_STANDBY_CLEAR (1 << 20)
+#define PM_MIN_S3_WIDTH_TIMER_BYPASS (1 << 7)
+
+#define PM_S2_COMMAND (PM_PLL_PWRDOWN | PM_USE_CPU_RDY | PM_PWR_DOWN)
+
+/* Method 0 bitmasks */
+#define PM_COLD_CONFIG (PM_PLL_PWRDOWN | PM_DEEP_STANDBY)
+#define PM_WARM_CONFIG (PM_COLD_CONFIG | PM_USE_CPU_RDY | PM_WARM_BOOT)
+
+/* Method 1 bitmask */
+#define M1_PM_WARM_CONFIG (PM_DPHY_STANDBY_CLEAR | \
+ PM_MIN_S3_WIDTH_TIMER_BYPASS | \
+ PM_WARM_BOOT | PM_DEEP_STANDBY | \
+ PM_PLL_PWRDOWN | PM_PWR_DOWN)
+
+#define M1_PM_COLD_CONFIG (PM_DPHY_STANDBY_CLEAR | \
+ PM_MIN_S3_WIDTH_TIMER_BYPASS | \
+ PM_DEEP_STANDBY | \
+ PM_PLL_PWRDOWN | PM_PWR_DOWN)
+
+#ifndef __ASSEMBLY__
+
+#ifndef CONFIG_MIPS
+extern const unsigned long brcmstb_pm_do_s2_sz;
+extern asmlinkage int brcmstb_pm_do_s2(void __iomem *aon_ctrl_base,
+ void __iomem *ddr_phy_pll_status);
+#else
+/* s2 asm */
+extern asmlinkage int brcm_pm_do_s2(u32 *s2_params);
+
+/* s3 asm */
+extern asmlinkage int brcm_pm_do_s3(void __iomem *aon_ctrl_base,
+ int dcache_linesz);
+extern int s3_reentry;
+#endif /* CONFIG_MIPS */
+
+#endif
+
+#endif /* __BRCMSTB_PM_H__ */
diff --git a/drivers/soc/bcm/brcmstb/pm/s2-arm.S b/drivers/soc/bcm/brcmstb/pm/s2-arm.S
new file mode 100644
index 000000000000..1d472d564638
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s2-arm.S
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014-2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#include "pm.h"
+
+ .text
+ .align 3
+
+#define AON_CTRL_REG r10
+#define DDR_PHY_STATUS_REG r11
+
+/*
+ * r0: AON_CTRL base address
+ * r1: DDRY PHY PLL status register address
+ */
+ENTRY(brcmstb_pm_do_s2)
+ stmfd sp!, {r4-r11, lr}
+ mov AON_CTRL_REG, r0
+ mov DDR_PHY_STATUS_REG, r1
+
+ /* Flush memory transactions */
+ dsb
+
+ /* Cache DDR_PHY_STATUS_REG translation */
+ ldr r0, [DDR_PHY_STATUS_REG]
+
+ /* power down request */
+ ldr r0, =PM_S2_COMMAND
+ ldr r1, =0
+ str r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ ldr r1, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+
+ /* Wait for interrupt */
+ wfi
+ nop
+
+ /* Bring MEMC back up */
+1: ldr r0, [DDR_PHY_STATUS_REG]
+ ands r0, #1
+ beq 1b
+
+ /* Power-up handshake */
+ ldr r0, =1
+ str r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
+ ldr r0, [AON_CTRL_REG, #AON_CTRL_HOST_MISC_CMDS]
+
+ ldr r0, =0
+ str r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+ ldr r0, [AON_CTRL_REG, #AON_CTRL_PM_CTRL]
+
+ /* Return to caller */
+ ldr r0, =0
+ ldmfd sp!, {r4-r11, pc}
+
+ ENDPROC(brcmstb_pm_do_s2)
+
+ /* Place literal pool here */
+ .ltorg
+
+ENTRY(brcmstb_pm_do_s2_sz)
+ .word . - brcmstb_pm_do_s2
diff --git a/drivers/soc/bcm/brcmstb/pm/s2-mips.S b/drivers/soc/bcm/brcmstb/pm/s2-mips.S
new file mode 100644
index 000000000000..27a14bc46043
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s2-mips.S
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+#include "pm.h"
+
+ .text
+ .set noreorder
+ .align 5
+
+/*
+ * a0: u32 params array
+ */
+LEAF(brcm_pm_do_s2)
+
+ subu sp, 64
+ sw ra, 0(sp)
+ sw s0, 4(sp)
+ sw s1, 8(sp)
+ sw s2, 12(sp)
+ sw s3, 16(sp)
+ sw s4, 20(sp)
+ sw s5, 24(sp)
+ sw s6, 28(sp)
+ sw s7, 32(sp)
+
+ /*
+ * Dereference the params array
+ * s0: AON_CTRL base register
+ * s1: DDR_PHY base register
+ * s2: TIMERS base register
+ * s3: I-Cache line size
+ * s4: Restart vector address
+ * s5: Restart vector size
+ */
+ move t0, a0
+
+ lw s0, 0(t0)
+ lw s1, 4(t0)
+ lw s2, 8(t0)
+ lw s3, 12(t0)
+ lw s4, 16(t0)
+ lw s5, 20(t0)
+
+ /* Lock this asm section into the I-cache */
+ addiu t1, s3, -1
+ not t1
+
+ la t0, brcm_pm_do_s2
+ and t0, t1
+
+ la t2, asm_end
+ and t2, t1
+
+1: cache 0x1c, 0(t0)
+ bne t0, t2, 1b
+ addu t0, s3
+
+ /* Lock the interrupt vector into the I-cache */
+ move t0, zero
+
+2: move t1, s4
+ cache 0x1c, 0(t1)
+ addu t1, s3
+ addu t0, s3
+ ble t0, s5, 2b
+ nop
+
+ sync
+
+ /* Power down request */
+ li t0, PM_S2_COMMAND
+ sw zero, AON_CTRL_PM_CTRL(s0)
+ lw zero, AON_CTRL_PM_CTRL(s0)
+ sw t0, AON_CTRL_PM_CTRL(s0)
+ lw t0, AON_CTRL_PM_CTRL(s0)
+
+ /* Enable CP0 interrupt 2 and wait for interrupt */
+ mfc0 t0, CP0_STATUS
+ /* Save cp0 sr for restoring later */
+ move s6, t0
+
+ li t1, ~(ST0_IM | ST0_IE)
+ and t0, t1
+ ori t0, STATUSF_IP2
+ mtc0 t0, CP0_STATUS
+ nop
+ nop
+ nop
+ ori t0, ST0_IE
+ mtc0 t0, CP0_STATUS
+
+ /* Wait for interrupt */
+ wait
+ nop
+
+ /* Wait for memc0 */
+1: lw t0, DDR40_PHY_CONTROL_REGS_0_PLL_STATUS(s1)
+ andi t0, 1
+ beqz t0, 1b
+ nop
+
+ /* 1ms delay needed for stable recovery */
+ /* Use TIMER1 to count 1 ms */
+ li t0, RESET_TIMER
+ sw t0, TIMER_TIMER1_CTRL(s2)
+ lw t0, TIMER_TIMER1_CTRL(s2)
+
+ li t0, START_TIMER
+ sw t0, TIMER_TIMER1_CTRL(s2)
+ lw t0, TIMER_TIMER1_CTRL(s2)
+
+ /* Prepare delay */
+ li t0, TIMER_MASK
+ lw t1, TIMER_TIMER1_STAT(s2)
+ and t1, t0
+ /* 1ms delay */
+ addi t1, 27000
+
+ /* Wait for the timer value to exceed t1 */
+1: lw t0, TIMER_TIMER1_STAT(s2)
+ sgtu t2, t1, t0
+ bnez t2, 1b
+ nop
+
+ /* Power back up */
+ li t1, 1
+ sw t1, AON_CTRL_HOST_MISC_CMDS(s0)
+ lw t1, AON_CTRL_HOST_MISC_CMDS(s0)
+
+ sw zero, AON_CTRL_PM_CTRL(s0)
+ lw zero, AON_CTRL_PM_CTRL(s0)
+
+ /* Unlock I-cache */
+ addiu t1, s3, -1
+ not t1
+
+ la t0, brcm_pm_do_s2
+ and t0, t1
+
+ la t2, asm_end
+ and t2, t1
+
+1: cache 0x00, 0(t0)
+ bne t0, t2, 1b
+ addu t0, s3
+
+ /* Unlock interrupt vector */
+ move t0, zero
+
+2: move t1, s4
+ cache 0x00, 0(t1)
+ addu t1, s3
+ addu t0, s3
+ ble t0, s5, 2b
+ nop
+
+ /* Restore cp0 sr */
+ sync
+ nop
+ mtc0 s6, CP0_STATUS
+ nop
+
+ /* Set return value to success */
+ li v0, 0
+
+ /* Return to caller */
+ lw s7, 32(sp)
+ lw s6, 28(sp)
+ lw s5, 24(sp)
+ lw s4, 20(sp)
+ lw s3, 16(sp)
+ lw s2, 12(sp)
+ lw s1, 8(sp)
+ lw s0, 4(sp)
+ lw ra, 0(sp)
+ addiu sp, 64
+
+ jr ra
+ nop
+END(brcm_pm_do_s2)
+
+ .globl asm_end
+asm_end:
+ nop
+
diff --git a/drivers/soc/bcm/brcmstb/pm/s3-mips.S b/drivers/soc/bcm/brcmstb/pm/s3-mips.S
new file mode 100644
index 000000000000..1242308a8868
--- /dev/null
+++ b/drivers/soc/bcm/brcmstb/pm/s3-mips.S
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/bmips.h>
+
+#include "pm.h"
+
+ .text
+ .set noreorder
+ .align 5
+ .global s3_reentry
+
+/*
+ * a0: AON_CTRL base register
+ * a1: D-Cache line size
+ */
+LEAF(brcm_pm_do_s3)
+
+ /* Get the address of s3_context */
+ la t0, gp_regs
+ sw ra, 0(t0)
+ sw s0, 4(t0)
+ sw s1, 8(t0)
+ sw s2, 12(t0)
+ sw s3, 16(t0)
+ sw s4, 20(t0)
+ sw s5, 24(t0)
+ sw s6, 28(t0)
+ sw s7, 32(t0)
+ sw gp, 36(t0)
+ sw sp, 40(t0)
+ sw fp, 44(t0)
+
+ /* Save CP0 Status */
+ mfc0 t1, CP0_STATUS
+ sw t1, 48(t0)
+
+ /* Write-back gp registers - cache will be gone */
+ addiu t1, a1, -1
+ not t1
+ and t0, t1
+
+ /* Flush at least 64 bytes */
+ addiu t2, t0, 64
+ and t2, t1
+
+1: cache 0x17, 0(t0)
+ bne t0, t2, 1b
+ addu t0, a1
+
+ /* Drop to deep standby */
+ li t1, PM_WARM_CONFIG
+ sw zero, AON_CTRL_PM_CTRL(a0)
+ lw zero, AON_CTRL_PM_CTRL(a0)
+ sw t1, AON_CTRL_PM_CTRL(a0)
+ lw t1, AON_CTRL_PM_CTRL(a0)
+
+ li t1, (PM_WARM_CONFIG | PM_PWR_DOWN)
+ sw t1, AON_CTRL_PM_CTRL(a0)
+ lw t1, AON_CTRL_PM_CTRL(a0)
+
+ /* Enable CP0 interrupt 2 and wait for interrupt */
+ mfc0 t0, CP0_STATUS
+
+ li t1, ~(ST0_IM | ST0_IE)
+ and t0, t1
+ ori t0, STATUSF_IP2
+ mtc0 t0, CP0_STATUS
+ nop
+ nop
+ nop
+ ori t0, ST0_IE
+ mtc0 t0, CP0_STATUS
+
+ /* Wait for interrupt */
+ wait
+ nop
+
+s3_reentry:
+
+ /* Clear call/return stack */
+ li t0, (0x06 << 16)
+ mtc0 t0, $22, 2
+ ssnop
+ ssnop
+ ssnop
+
+ /* Clear jump target buffer */
+ li t0, (0x04 << 16)
+ mtc0 t0, $22, 2
+ ssnop
+ ssnop
+ ssnop
+
+ sync
+ nop
+
+ /* Setup mmu defaults */
+ mtc0 zero, CP0_WIRED
+ mtc0 zero, CP0_ENTRYHI
+ li k0, PM_DEFAULT_MASK
+ mtc0 k0, CP0_PAGEMASK
+
+ li sp, BMIPS_WARM_RESTART_VEC
+ la k0, plat_wired_tlb_setup
+ jalr k0
+ nop
+
+ /* Restore general purpose registers */
+ la t0, gp_regs
+ lw fp, 44(t0)
+ lw sp, 40(t0)
+ lw gp, 36(t0)
+ lw s7, 32(t0)
+ lw s6, 28(t0)
+ lw s5, 24(t0)
+ lw s4, 20(t0)
+ lw s3, 16(t0)
+ lw s2, 12(t0)
+ lw s1, 8(t0)
+ lw s0, 4(t0)
+ lw ra, 0(t0)
+
+ /* Restore CP0 status */
+ lw t1, 48(t0)
+ mtc0 t1, CP0_STATUS
+
+ /* Return to caller */
+ li v0, 0
+ jr ra
+ nop
+
+END(brcm_pm_do_s3)
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
index 95d77ec5c5d7..5abb08ffb74d 100644
--- a/drivers/soc/dove/pmu.c
+++ b/drivers/soc/dove/pmu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Marvell Dove PMU support
*/
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 6af7a11f09a5..d89a6a80c8ef 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -213,6 +213,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
{ .compatible = "fsl,ls1021a-dcfg", },
{ .compatible = "fsl,ls1043a-dcfg", },
{ .compatible = "fsl,ls2080a-dcfg", },
+ { .compatible = "fsl,ls1088a-dcfg", },
{}
};
MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
index 757033c0586c..fb4e6bf0a0c4 100644
--- a/drivers/soc/fsl/qbman/Kconfig
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -1,6 +1,6 @@
menuconfig FSL_DPAA
bool "Freescale DPAA 1.x support"
- depends on FSL_SOC_BOOKE
+ depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
select GENERIC_ALLOCATOR
help
The Freescale Data Path Acceleration Architecture (DPAA) is a set of
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
index 7ae199f1664e..811312ad526f 100644
--- a/drivers/soc/fsl/qbman/Makefile
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -1,6 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
bman_portal.o qman_portal.o \
- bman.o qman.o
+ bman.o qman.o dpaa_sys.o
obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
bman-test-y = bman_test.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index a3d6d7cfa929..f9485cedc648 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -35,6 +35,27 @@
/* Portal register assists */
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IER 0x3e40
+#define BM_REG_ISDR 0x3e80
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+#else
/* Cache-inhibited register offsets */
#define BM_REG_RCR_PI_CINH 0x0000
#define BM_REG_RCR_CI_CINH 0x0004
@@ -53,6 +74,7 @@
#define BM_CL_RCR 0x1000
#define BM_CL_RCR_PI_CENA 0x3000
#define BM_CL_RCR_CI_CENA 0x3100
+#endif
/*
* Portal modes.
@@ -154,7 +176,8 @@ struct bm_mc {
};
struct bm_addr {
- void __iomem *ce; /* cache-enabled */
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* Same as above but for direct access */
void __iomem *ci; /* cache-inhibited */
};
@@ -167,12 +190,12 @@ struct bm_portal {
/* Cache-inhibited register access. */
static inline u32 bm_in(struct bm_portal *p, u32 offset)
{
- return be32_to_cpu(__raw_readl(p->addr.ci + offset));
+ return ioread32be(p->addr.ci + offset);
}
static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
{
- __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
+ iowrite32be(val, p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -188,7 +211,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
{
- return be32_to_cpu(__raw_readl(p->addr.ce + offset));
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
}
struct bman_portal {
@@ -408,7 +431,7 @@ static int bm_mc_init(struct bm_portal *portal)
mc->cr = portal->addr.ce + BM_CL_CR;
mc->rr = portal->addr.ce + BM_CL_RR0;
- mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
+ mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
0 : 1;
mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
#ifdef CONFIG_FSL_DPAA_CHECKING
@@ -466,7 +489,7 @@ static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
* its command is submitted and completed. This includes the valid-bit,
* in case you were wondering...
*/
- if (!__raw_readb(&rr->verb)) {
+ if (!rr->verb) {
dpaa_invalidate_touch_ro(rr);
return NULL;
}
@@ -512,8 +535,9 @@ static int bman_create_portal(struct bman_portal *portal,
* config, everything that follows depends on it and "config" is more
* for (de)reference...
*/
- p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
- p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
dev_err(c->dev, "RCR initialisation failed\n");
goto fail_rcr;
@@ -607,7 +631,7 @@ int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
unsigned long irqflags;
local_irq_save(irqflags);
- set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+ p->irq_sources |= bits & BM_PIRQ_VISIBLE;
bm_out(&p->p, BM_REG_IER, p->irq_sources);
local_irq_restore(irqflags);
return 0;
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index eaa9585c7347..05c42235dd41 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -201,6 +201,21 @@ static int fsl_bman_probe(struct platform_device *pdev)
return -ENODEV;
}
+ /*
+ * If FBPR memory wasn't defined using the qbman compatible string
+ * try using the of_reserved_mem_device method
+ */
+ if (!fbpr_a) {
+ ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+
+ dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
+
bm_set_memory(fbpr_a, fbpr_sz);
err_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 39b39c8f1399..2f71f7df3465 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -91,7 +91,6 @@ static int bman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct bm_portal_config *pcfg;
struct resource *addr_phys[2];
- void __iomem *va;
int irq, cpu;
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
@@ -123,23 +122,21 @@ static int bman_portal_probe(struct platform_device *pdev)
}
pcfg->irq = irq;
- va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va) {
- dev_err(dev, "ioremap::CE failed\n");
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
goto err_ioremap1;
}
- pcfg->addr_virt[DPAA_PORTAL_CE] = va;
-
- va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
- _PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va) {
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
}
- pcfg->addr_virt[DPAA_PORTAL_CI] = va;
-
spin_lock(&bman_lock);
cpu = cpumask_next_zero(-1, &portal_cpus);
if (cpu >= nr_cpu_ids) {
@@ -164,9 +161,9 @@ static int bman_portal_probe(struct platform_device *pdev)
return 0;
err_portal_init:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
+ iounmap(pcfg->addr_virt_ci);
err_ioremap2:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+ memunmap(pcfg->addr_virt_ce);
err_ioremap1:
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
index f6896a2f6d90..751ce90383b7 100644
--- a/drivers/soc/fsl/qbman/bman_priv.h
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -46,11 +46,9 @@ extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
extern struct gen_pool *bm_bpalloc;
struct bm_portal_config {
- /*
- * Corenet portal addresses;
- * [0]==cache-enabled, [1]==cache-inhibited.
- */
- void __iomem *addr_virt[2];
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
/* Allow these to be joined in lists */
struct list_head list;
struct device *dev;
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
new file mode 100644
index 000000000000..9436aa83ff1b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -0,0 +1,78 @@
+/* Copyright 2017 NXP Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of NXP Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "dpaa_sys.h"
+
+/*
+ * Initialize a devices private memory region
+ */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size)
+{
+ int ret;
+ struct device_node *mem_node;
+ u64 size64;
+
+ ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
+ if (ret) {
+ dev_err(dev,
+ "of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n",
+ idx, ret);
+ return -ENODEV;
+ }
+ mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (mem_node) {
+ ret = of_property_read_u64(mem_node, "size", &size64);
+ if (ret) {
+ dev_err(dev, "of_address_to_resource fails 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ *size = size64;
+ } else {
+ dev_err(dev, "No memory-region found for index %d\n", idx);
+ return -ENODEV;
+ }
+
+ if (!dma_zalloc_coherent(dev, *size, addr, 0)) {
+ dev_err(dev, "DMA Alloc memory failed\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Disassociate the reserved memory area from the device
+ * because a device can only have one DMA memory area. This
+ * should be fine since the memory is allocated and initialized
+ * and only ever accessed by the QBMan device from now on
+ */
+ of_reserved_mem_device_release(dev);
+ return 0;
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index 2ce394aa4c95..9f379000da85 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -44,23 +44,21 @@
#include <linux/prefetch.h>
#include <linux/genalloc.h>
#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/delay.h>
/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
#define DPAA_PORTAL_CE 0
#define DPAA_PORTAL_CI 1
-#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
-#error "Unsupported Cacheline Size"
-#endif
-
static inline void dpaa_flush(void *p)
{
+ /*
+ * Only PPC needs to flush the cache currently - on ARM the mapping
+ * is non cacheable
+ */
#ifdef CONFIG_PPC
flush_dcache_range((unsigned long)p, (unsigned long)p+64);
-#elif defined(CONFIG_ARM32)
- __cpuc_flush_dcache_area(p, 64);
-#elif defined(CONFIG_ARM64)
- __flush_dcache_area(p, 64);
#endif
}
@@ -102,4 +100,15 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
/* Offset applied to genalloc pools due to zero being an error return */
#define DPAA_GENALLOC_OFF 0x80000000
+/* Initialize the devices private memory region */
+int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
+ size_t *size);
+
+/* memremap() attributes for different platforms */
+#ifdef CONFIG_PPC
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WB
+#else
+#define QBMAN_MEMREMAP_ATTR MEMREMAP_WC
+#endif
+
#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 18eefc3f1abe..e4f5bb056fd2 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -41,6 +41,43 @@
/* Portal register assists */
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IER 0x3640
+#define QM_REG_ISDR 0x3680
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+#else
/* Cache-inhibited register offsets */
#define QM_REG_EQCR_PI_CINH 0x0000
#define QM_REG_EQCR_CI_CINH 0x0004
@@ -75,6 +112,7 @@
#define QM_CL_CR 0x3800
#define QM_CL_RR0 0x3900
#define QM_CL_RR1 0x3940
+#endif
/*
* BTW, the drivers (and h/w programming model) already obtain the required
@@ -300,7 +338,8 @@ struct qm_mc {
};
struct qm_addr {
- void __iomem *ce; /* cache-enabled */
+ void *ce; /* cache-enabled */
+ __be32 *ce_be; /* same value as above but for direct access */
void __iomem *ci; /* cache-inhibited */
};
@@ -321,12 +360,12 @@ struct qm_portal {
/* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset)
{
- return be32_to_cpu(__raw_readl(p->addr.ci + offset));
+ return ioread32be(p->addr.ci + offset);
}
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{
- __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
+ iowrite32be(val, p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -342,7 +381,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{
- return be32_to_cpu(__raw_readl(p->addr.ce + offset));
+ return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
}
/* --- EQCR API --- */
@@ -646,11 +685,7 @@ static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
*/
dpaa_invalidate_touch_ro(res);
#endif
- /*
- * when accessing 'verb', use __raw_readb() to ensure that compiler
- * inlining doesn't try to optimise out "excess reads".
- */
- if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
if (!dqrr->pi)
dqrr->vbit ^= QM_DQRR_VERB_VBIT;
@@ -777,11 +812,8 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
DPAA_ASSERT(mr->pmode == qm_mr_pvb);
- /*
- * when accessing 'verb', use __raw_readb() to ensure that compiler
- * inlining doesn't try to optimise out "excess reads".
- */
- if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+
+ if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
if (!mr->pi)
mr->vbit ^= QM_MR_VERB_VBIT;
@@ -822,7 +854,7 @@ static inline int qm_mc_init(struct qm_portal *portal)
mc->cr = portal->addr.ce + QM_CL_CR;
mc->rr = portal->addr.ce + QM_CL_RR0;
- mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
+ mc->rridx = (mc->cr->_ncw_verb & QM_MCC_VERB_VBIT)
? 0 : 1;
mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
#ifdef CONFIG_FSL_DPAA_CHECKING
@@ -880,7 +912,7 @@ static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
* its command is submitted and completed. This includes the valid-bit,
* in case you were wondering...
*/
- if (!__raw_readb(&rr->verb)) {
+ if (!rr->verb) {
dpaa_invalidate_touch_ro(rr);
return NULL;
}
@@ -909,12 +941,12 @@ static inline int qm_mc_result_timeout(struct qm_portal *portal,
static inline void fq_set(struct qman_fq *fq, u32 mask)
{
- set_bits(mask, &fq->flags);
+ fq->flags |= mask;
}
static inline void fq_clear(struct qman_fq *fq, u32 mask)
{
- clear_bits(mask, &fq->flags);
+ fq->flags &= ~mask;
}
static inline int fq_isset(struct qman_fq *fq, u32 mask)
@@ -1084,11 +1116,7 @@ loop:
* entries well before the ring has been fully consumed, so
* we're being *really* paranoid here.
*/
- u64 now, then = jiffies;
-
- do {
- now = jiffies;
- } while ((then + 10000) > now);
+ msleep(1);
msg = qm_mr_current(p);
if (!msg)
return 0;
@@ -1124,8 +1152,9 @@ static int qman_create_portal(struct qman_portal *portal,
* config, everything that follows depends on it and "config" is more
* for (de)reference
*/
- p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
- p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ p->addr.ce = c->addr_virt_ce;
+ p->addr.ce_be = c->addr_virt_ce;
+ p->addr.ci = c->addr_virt_ci;
/*
* If CI-stashing is used, the current defaults use a threshold of 3,
* and stash with high-than-DQRR priority.
@@ -1566,7 +1595,7 @@ void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
unsigned long irqflags;
local_irq_save(irqflags);
- set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+ p->irq_sources |= bits & QM_PIRQ_VISIBLE;
qm_out(&p->p, QM_REG_IER, p->irq_sources);
local_irq_restore(irqflags);
}
@@ -1589,7 +1618,7 @@ void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
*/
local_irq_save(irqflags);
bits &= QM_PIRQ_VISIBLE;
- clear_bits(bits, &p->irq_sources);
+ p->irq_sources &= ~bits;
qm_out(&p->p, QM_REG_IER, p->irq_sources);
ier = qm_in(&p->p, QM_REG_IER);
/*
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 835ce947ffca..79cba58387a5 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -401,21 +401,42 @@ static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
}
/*
- * Ideally we would use the DMA API to turn rmem->base into a DMA address
- * (especially if iommu translations ever get involved). Unfortunately, the
- * DMA API currently does not allow mapping anything that is not backed with
- * a struct page.
+ * QMan needs two global memory areas initialized at boot time:
+ * 1) FQD: Frame Queue Descriptors used to manage frame queues
+ * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
+ * Both areas are reserved using the device tree reserved memory framework
+ * and the addresses and sizes are initialized when the QMan device is probed
*/
static dma_addr_t fqd_a, pfdr_a;
static size_t fqd_sz, pfdr_sz;
+#ifdef CONFIG_PPC
+/*
+ * Support for PPC Device Tree backward compatibility when compatible
+ * string is set to fsl-qman-fqd and fsl-qman-pfdr
+ */
+static int zero_priv_mem(phys_addr_t addr, size_t sz)
+{
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+ if (!tmpp)
+ return -ENOMEM;
+
+ memset_io(tmpp, 0, sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + sz);
+ iounmap(tmpp);
+
+ return 0;
+}
+
static int qman_fqd(struct reserved_mem *rmem)
{
fqd_a = rmem->base;
fqd_sz = rmem->size;
WARN_ON(!(fqd_a && fqd_sz));
-
return 0;
}
RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
@@ -431,32 +452,13 @@ static int qman_pfdr(struct reserved_mem *rmem)
}
RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+#endif
+
static unsigned int qm_get_fqid_maxcnt(void)
{
return fqd_sz / 64;
}
-/*
- * Flush this memory range from data cache so that QMAN originated
- * transactions for this memory region could be marked non-coherent.
- */
-static int zero_priv_mem(struct device *dev, struct device_node *node,
- phys_addr_t addr, size_t sz)
-{
- /* map as cacheable, non-guarded */
- void __iomem *tmpp = ioremap_prot(addr, sz, 0);
-
- if (!tmpp)
- return -ENOMEM;
-
- memset_io(tmpp, 0, sz);
- flush_dcache_range((unsigned long)tmpp,
- (unsigned long)tmpp + sz);
- iounmap(tmpp);
-
- return 0;
-}
-
static void log_edata_bits(struct device *dev, u32 bit_count)
{
u32 i, j, mask = 0xffffffff;
@@ -717,6 +719,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
qman_ip_rev = QMAN_REV30;
else if (major == 3 && minor == 1)
qman_ip_rev = QMAN_REV31;
+ else if (major == 3 && minor == 2)
+ qman_ip_rev = QMAN_REV32;
else {
dev_err(dev, "Unknown QMan version\n");
return -ENODEV;
@@ -727,10 +731,41 @@ static int fsl_qman_probe(struct platform_device *pdev)
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
}
- ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
- WARN_ON(ret);
- if (ret)
- return -ENODEV;
+ if (fqd_a) {
+#ifdef CONFIG_PPC
+ /*
+ * For PPC backward DT compatibility
+ * FQD memory MUST be zero'd by software
+ */
+ zero_priv_mem(fqd_a, fqd_sz);
+#else
+ WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
+#endif
+ } else {
+ /*
+ * Order of memory regions is assumed as FQD followed by PFDR
+ * in order to ensure allocations from the correct regions the
+ * driver initializes then allocates each piece in order
+ */
+ ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
+
+ if (!pfdr_a) {
+ /* Setup PFDR memory */
+ ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
+ if (ret) {
+ dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
+ ret);
+ return -ENODEV;
+ }
+ }
+ dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
ret = qman_init_ccsr(dev);
if (ret) {
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index cbacdf4f98ed..a120002b630e 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -224,7 +224,6 @@ static int qman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg;
struct resource *addr_phys[2];
- void __iomem *va;
int irq, cpu, err;
u32 val;
@@ -262,23 +261,21 @@ static int qman_portal_probe(struct platform_device *pdev)
}
pcfg->irq = irq;
- va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va) {
- dev_err(dev, "ioremap::CE failed\n");
+ pcfg->addr_virt_ce = memremap(addr_phys[0]->start,
+ resource_size(addr_phys[0]),
+ QBMAN_MEMREMAP_ATTR);
+ if (!pcfg->addr_virt_ce) {
+ dev_err(dev, "memremap::CE failed\n");
goto err_ioremap1;
}
- pcfg->addr_virt[DPAA_PORTAL_CE] = va;
-
- va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
- _PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va) {
+ pcfg->addr_virt_ci = ioremap(addr_phys[1]->start,
+ resource_size(addr_phys[1]));
+ if (!pcfg->addr_virt_ci) {
dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
}
- pcfg->addr_virt[DPAA_PORTAL_CI] = va;
-
pcfg->pools = qm_get_pools_sdqcr();
spin_lock(&qman_lock);
@@ -310,9 +307,9 @@ static int qman_portal_probe(struct platform_device *pdev)
return 0;
err_portal_init:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
+ iounmap(pcfg->addr_virt_ci);
err_ioremap2:
- iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+ memunmap(pcfg->addr_virt_ce);
err_ioremap1:
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 5fe9faf6232e..75a8f905f8f7 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -28,8 +28,6 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "dpaa_sys.h"
#include <soc/fsl/qman.h>
@@ -155,11 +153,9 @@ static inline void qman_cgrs_xor(struct qman_cgrs *dest,
void qman_init_cgr_all(void);
struct qm_portal_config {
- /*
- * Corenet portal addresses;
- * [0]==cache-enabled, [1]==cache-inhibited.
- */
- void __iomem *addr_virt[2];
+ /* Portal addresses */
+ void *addr_virt_ce;
+ void __iomem *addr_virt_ci;
struct device *dev;
struct iommu_domain *iommu_domain;
/* Allow these to be joined in lists */
@@ -187,6 +183,7 @@ struct qm_portal_config {
#define QMAN_REV20 0x0200
#define QMAN_REV30 0x0300
#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
index d5f8cb2260dc..41bdbc48cade 100644
--- a/drivers/soc/fsl/qbman/qman_test.h
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -30,7 +30,5 @@
#include "qman_priv.h"
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
int qman_test_stash(void);
int qman_test_api(void);
diff --git a/drivers/soc/fsl/qe/Makefile b/drivers/soc/fsl/qe/Makefile
index 2031d385bc7e..55a555304f3a 100644
--- a/drivers/soc/fsl/qe/Makefile
+++ b/drivers/soc/fsl/qe/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the linux ppc-specific parts of QE
#
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 609bb3424c14..a7d0667338f2 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -1,9 +1,11 @@
#
# MediaTek SoC drivers
#
+menu "MediaTek SoC drivers"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+
config MTK_INFRACFG
bool "MediaTek INFRACFG Support"
- depends on ARCH_MEDIATEK || COMPILE_TEST
select REGMAP
help
Say yes here to add support for the MediaTek INFRACFG controller. The
@@ -12,7 +14,6 @@ config MTK_INFRACFG
config MTK_PMIC_WRAP
tristate "MediaTek PMIC Wrapper Support"
- depends on ARCH_MEDIATEK
depends on RESET_CONTROLLER
select REGMAP
help
@@ -22,7 +23,6 @@ config MTK_PMIC_WRAP
config MTK_SCPSYS
bool "MediaTek SCPSYS Support"
- depends on ARCH_MEDIATEK || COMPILE_TEST
default ARCH_MEDIATEK
select REGMAP
select MTK_INFRACFG
@@ -30,3 +30,5 @@ config MTK_SCPSYS
help
Say yes here to add support for the MediaTek SCPSYS power domain
driver.
+
+endmenu
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index c2048382830f..e9e054a15b7d 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -70,6 +70,12 @@
PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE | \
PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE)
+/* Group of bits used for shown slave capability */
+#define PWRAP_SLV_CAP_SPI BIT(0)
+#define PWRAP_SLV_CAP_DUALIO BIT(1)
+#define PWRAP_SLV_CAP_SECURITY BIT(2)
+#define HAS_CAP(_c, _x) (((_c) & (_x)) == (_x))
+
/* defines for slave device wrapper registers */
enum dew_regs {
PWRAP_DEW_BASE,
@@ -208,6 +214,36 @@ enum pwrap_regs {
PWRAP_ADC_RDATA_ADDR1,
PWRAP_ADC_RDATA_ADDR2,
+ /* MT7622 only regs */
+ PWRAP_EINT_STA0_ADR,
+ PWRAP_EINT_STA1_ADR,
+ PWRAP_STA,
+ PWRAP_CLR,
+ PWRAP_DVFS_ADR8,
+ PWRAP_DVFS_WDATA8,
+ PWRAP_DVFS_ADR9,
+ PWRAP_DVFS_WDATA9,
+ PWRAP_DVFS_ADR10,
+ PWRAP_DVFS_WDATA10,
+ PWRAP_DVFS_ADR11,
+ PWRAP_DVFS_WDATA11,
+ PWRAP_DVFS_ADR12,
+ PWRAP_DVFS_WDATA12,
+ PWRAP_DVFS_ADR13,
+ PWRAP_DVFS_WDATA13,
+ PWRAP_DVFS_ADR14,
+ PWRAP_DVFS_WDATA14,
+ PWRAP_DVFS_ADR15,
+ PWRAP_DVFS_WDATA15,
+ PWRAP_EXT_CK,
+ PWRAP_ADC_RDATA_ADDR,
+ PWRAP_GPS_STA,
+ PWRAP_SW_RST,
+ PWRAP_DVFS_STEP_CTRL0,
+ PWRAP_DVFS_STEP_CTRL1,
+ PWRAP_DVFS_STEP_CTRL2,
+ PWRAP_SPI2_CTRL,
+
/* MT8135 only regs */
PWRAP_CSHEXT,
PWRAP_EVENT_IN_EN,
@@ -330,6 +366,118 @@ static int mt2701_regs[] = {
[PWRAP_ADC_RDATA_ADDR2] = 0x154,
};
+static int mt7622_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xC,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1C,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2C,
+ [PWRAP_EINT_STA0_ADR] = 0x30,
+ [PWRAP_EINT_STA1_ADR] = 0x34,
+ [PWRAP_STA] = 0x38,
+ [PWRAP_CLR] = 0x3C,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4C,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5C,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6C,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7C,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8C,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9C,
+ [PWRAP_WACS2_CMD] = 0xA0,
+ [PWRAP_WACS2_RDATA] = 0xA4,
+ [PWRAP_WACS2_VLDCLR] = 0xA8,
+ [PWRAP_INT_EN] = 0xAC,
+ [PWRAP_INT_FLG_RAW] = 0xB0,
+ [PWRAP_INT_FLG] = 0xB4,
+ [PWRAP_INT_CLR] = 0xB8,
+ [PWRAP_SIG_ADR] = 0xBC,
+ [PWRAP_SIG_MODE] = 0xC0,
+ [PWRAP_SIG_VALUE] = 0xC4,
+ [PWRAP_SIG_ERRVAL] = 0xC8,
+ [PWRAP_CRC_EN] = 0xCC,
+ [PWRAP_TIMER_EN] = 0xD0,
+ [PWRAP_TIMER_STA] = 0xD4,
+ [PWRAP_WDT_UNIT] = 0xD8,
+ [PWRAP_WDT_SRC_EN] = 0xDC,
+ [PWRAP_WDT_FLG] = 0xE0,
+ [PWRAP_DEBUG_INT_SEL] = 0xE4,
+ [PWRAP_DVFS_ADR0] = 0xE8,
+ [PWRAP_DVFS_WDATA0] = 0xEC,
+ [PWRAP_DVFS_ADR1] = 0xF0,
+ [PWRAP_DVFS_WDATA1] = 0xF4,
+ [PWRAP_DVFS_ADR2] = 0xF8,
+ [PWRAP_DVFS_WDATA2] = 0xFC,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10C,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11C,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_DVFS_ADR8] = 0x128,
+ [PWRAP_DVFS_WDATA8] = 0x12C,
+ [PWRAP_DVFS_ADR9] = 0x130,
+ [PWRAP_DVFS_WDATA9] = 0x134,
+ [PWRAP_DVFS_ADR10] = 0x138,
+ [PWRAP_DVFS_WDATA10] = 0x13C,
+ [PWRAP_DVFS_ADR11] = 0x140,
+ [PWRAP_DVFS_WDATA11] = 0x144,
+ [PWRAP_DVFS_ADR12] = 0x148,
+ [PWRAP_DVFS_WDATA12] = 0x14C,
+ [PWRAP_DVFS_ADR13] = 0x150,
+ [PWRAP_DVFS_WDATA13] = 0x154,
+ [PWRAP_DVFS_ADR14] = 0x158,
+ [PWRAP_DVFS_WDATA14] = 0x15C,
+ [PWRAP_DVFS_ADR15] = 0x160,
+ [PWRAP_DVFS_WDATA15] = 0x164,
+ [PWRAP_SPMINF_STA] = 0x168,
+ [PWRAP_CIPHER_KEY_SEL] = 0x16C,
+ [PWRAP_CIPHER_IV_SEL] = 0x170,
+ [PWRAP_CIPHER_EN] = 0x174,
+ [PWRAP_CIPHER_RDY] = 0x178,
+ [PWRAP_CIPHER_MODE] = 0x17C,
+ [PWRAP_CIPHER_SWRST] = 0x180,
+ [PWRAP_DCM_EN] = 0x184,
+ [PWRAP_DCM_DBC_PRD] = 0x188,
+ [PWRAP_EXT_CK] = 0x18C,
+ [PWRAP_ADC_CMD_ADDR] = 0x190,
+ [PWRAP_PWRAP_ADC_CMD] = 0x194,
+ [PWRAP_ADC_RDATA_ADDR] = 0x198,
+ [PWRAP_GPS_STA] = 0x19C,
+ [PWRAP_SW_RST] = 0x1A0,
+ [PWRAP_DVFS_STEP_CTRL0] = 0x238,
+ [PWRAP_DVFS_STEP_CTRL1] = 0x23C,
+ [PWRAP_DVFS_STEP_CTRL2] = 0x240,
+ [PWRAP_SPI2_CTRL] = 0x244,
+};
+
static int mt8173_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
@@ -487,18 +635,31 @@ static int mt8135_regs[] = {
enum pmic_type {
PMIC_MT6323,
+ PMIC_MT6380,
PMIC_MT6397,
};
enum pwrap_type {
PWRAP_MT2701,
+ PWRAP_MT7622,
PWRAP_MT8135,
PWRAP_MT8173,
};
+struct pmic_wrapper;
struct pwrap_slv_type {
const u32 *dew_regs;
enum pmic_type type;
+ const struct regmap_config *regmap;
+ /* Flags indicating the capability for the target slave */
+ u32 caps;
+ /*
+ * pwrap operations are highly associated with the PMIC types,
+ * so the pointers added increases flexibility allowing determination
+ * which type is used by the detection through device tree.
+ */
+ int (*pwrap_read)(struct pmic_wrapper *wrp, u32 adr, u32 *rdata);
+ int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata);
};
struct pmic_wrapper {
@@ -522,7 +683,7 @@ struct pmic_wrapper_type {
u32 int_en_all;
u32 spi_w;
u32 wdt_src;
- int has_bridge:1;
+ unsigned int has_bridge:1;
int (*init_reg_clock)(struct pmic_wrapper *wrp);
int (*init_soc_specific)(struct pmic_wrapper *wrp);
};
@@ -593,7 +754,7 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
} while (1);
}
-static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
int ret;
@@ -603,14 +764,54 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
return ret;
}
- pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata,
- PWRAP_WACS2_CMD);
+ pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ if (ret)
+ return ret;
+
+ *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
+ return 0;
+}
+
+static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
+{
+ int ret, msb;
+
+ *rdata = 0;
+ for (msb = 0; msb < 2; msb++) {
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, ((msb << 30) | (adr << 16)),
+ PWRAP_WACS2_CMD);
+
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
+ if (ret)
+ return ret;
+
+ *rdata += (PWRAP_GET_WACS_RDATA(pwrap_readl(wrp,
+ PWRAP_WACS2_RDATA)) << (16 * msb));
+
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+ }
return 0;
}
static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{
+ return wrp->slave->pwrap_read(wrp, adr, rdata);
+}
+
+static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
int ret;
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
@@ -619,19 +820,46 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
return ret;
}
- pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD);
+ pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata,
+ PWRAP_WACS2_CMD);
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr);
- if (ret)
- return ret;
+ return 0;
+}
- *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
+static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ int ret, msb, rdata;
- pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+ for (msb = 0; msb < 2; msb++) {
+ ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
+ return ret;
+ }
+
+ pwrap_writel(wrp, (1 << 31) | (msb << 30) | (adr << 16) |
+ ((wdata >> (msb * 16)) & 0xffff),
+ PWRAP_WACS2_CMD);
+
+ /*
+ * The pwrap_read operation is the requirement of hardware used
+ * for the synchronization between two successive 16-bit
+ * pwrap_writel operations composing one 32-bit bus writing.
+ * Otherwise, we'll find the result fails on the lower 16-bit
+ * pwrap writing.
+ */
+ if (!msb)
+ pwrap_read(wrp, adr, &rdata);
+ }
return 0;
}
+static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
+{
+ return wrp->slave->pwrap_write(wrp, adr, wdata);
+}
+
static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata)
{
return pwrap_read(context, adr, rdata);
@@ -711,23 +939,75 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
return 0;
}
-static int pwrap_mt8135_init_reg_clock(struct pmic_wrapper *wrp)
+static int pwrap_init_dual_io(struct pmic_wrapper *wrp)
{
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
+ int ret;
+ u32 rdata;
+
+ /* Enable dual IO mode */
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
+
+ /* Check IDLE & INIT_DONE in advance */
+ ret = pwrap_wait_for_state(wrp,
+ pwrap_is_fsm_idle_and_sync_idle);
+ if (ret) {
+ dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
+ return ret;
+ }
+
+ pwrap_writel(wrp, 1, PWRAP_DIO_EN);
+
+ /* Read Test */
+ pwrap_read(wrp,
+ wrp->slave->dew_regs[PWRAP_DEW_READ_TEST], &rdata);
+ if (rdata != PWRAP_DEW_READ_TEST_VAL) {
+ dev_err(wrp->dev,
+ "Read failed on DIO mode: 0x%04x!=0x%04x\n",
+ PWRAP_DEW_READ_TEST_VAL, rdata);
+ return -EFAULT;
+ }
return 0;
}
-static int pwrap_mt8173_init_reg_clock(struct pmic_wrapper *wrp)
+/*
+ * pwrap_init_chip_select_ext is used to configure CS extension time for each
+ * phase during data transactions on the pwrap bus.
+ */
+static void pwrap_init_chip_select_ext(struct pmic_wrapper *wrp, u8 hext_write,
+ u8 hext_read, u8 lext_start,
+ u8 lext_end)
{
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+ /*
+ * After finishing a write and read transaction, extends CS high time
+ * to be at least xT of BUS CLK as hext_write and hext_read specifies
+ * respectively.
+ */
+ pwrap_writel(wrp, hext_write, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, hext_read, PWRAP_CSHEXT_READ);
+
+ /*
+ * Extends CS low time after CSL and before CSH command to be at
+ * least xT of BUS CLK as lext_start and lext_end specifies
+ * respectively.
+ */
+ pwrap_writel(wrp, lext_start, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, lext_end, PWRAP_CSLEXT_END);
+}
+
+static int pwrap_common_init_reg_clock(struct pmic_wrapper *wrp)
+{
+ switch (wrp->master->type) {
+ case PWRAP_MT8173:
+ pwrap_init_chip_select_ext(wrp, 0, 4, 2, 2);
+ break;
+ case PWRAP_MT8135:
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+ pwrap_init_chip_select_ext(wrp, 0, 4, 0, 0);
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -737,20 +1017,16 @@ static int pwrap_mt2701_init_reg_clock(struct pmic_wrapper *wrp)
switch (wrp->slave->type) {
case PMIC_MT6397:
pwrap_writel(wrp, 0xc, PWRAP_RDDMY);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+ pwrap_init_chip_select_ext(wrp, 4, 0, 2, 2);
break;
case PMIC_MT6323:
pwrap_writel(wrp, 0x8, PWRAP_RDDMY);
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_RDDMY_NO],
0x8);
- pwrap_writel(wrp, 0x5, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+ pwrap_init_chip_select_ext(wrp, 5, 0, 2, 2);
+ break;
+ default:
break;
}
@@ -794,6 +1070,9 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
case PWRAP_MT8173:
pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
break;
+ case PWRAP_MT7622:
+ pwrap_writel(wrp, 0, PWRAP_CIPHER_EN);
+ break;
}
/* Config cipher mode @PMIC */
@@ -815,6 +1094,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN],
0x1);
break;
+ default:
+ break;
}
/* wait for cipher data ready@AP */
@@ -827,7 +1108,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
/* wait for cipher data ready@PMIC */
ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready);
if (ret) {
- dev_err(wrp->dev, "timeout waiting for cipher data ready@PMIC\n");
+ dev_err(wrp->dev,
+ "timeout waiting for cipher data ready@PMIC\n");
return ret;
}
@@ -854,6 +1136,30 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
return 0;
}
+static int pwrap_init_security(struct pmic_wrapper *wrp)
+{
+ int ret;
+
+ /* Enable encryption */
+ ret = pwrap_init_cipher(wrp);
+ if (ret)
+ return ret;
+
+ /* Signature checking - using CRC */
+ if (pwrap_write(wrp,
+ wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1))
+ return -EFAULT;
+
+ pwrap_writel(wrp, 0x1, PWRAP_CRC_EN);
+ pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE);
+ pwrap_writel(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL],
+ PWRAP_SIG_ADR);
+ pwrap_writel(wrp,
+ wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+
+ return 0;
+}
+
static int pwrap_mt8135_init_soc_specific(struct pmic_wrapper *wrp)
{
/* enable pwrap events and pwrap bridge in AP side */
@@ -911,10 +1217,18 @@ static int pwrap_mt2701_init_soc_specific(struct pmic_wrapper *wrp)
return 0;
}
+static int pwrap_mt7622_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ pwrap_writel(wrp, 0, PWRAP_STAUPD_PRD);
+ /* enable 2wire SPI master */
+ pwrap_writel(wrp, 0x8000000, PWRAP_SPI2_CTRL);
+
+ return 0;
+}
+
static int pwrap_init(struct pmic_wrapper *wrp)
{
int ret;
- u32 rdata;
reset_control_reset(wrp->rstc);
if (wrp->rstc_bridge)
@@ -926,10 +1240,12 @@ static int pwrap_init(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
}
- /* Reset SPI slave */
- ret = pwrap_reset_spislave(wrp);
- if (ret)
- return ret;
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) {
+ /* Reset SPI slave */
+ ret = pwrap_reset_spislave(wrp);
+ if (ret)
+ return ret;
+ }
pwrap_writel(wrp, 1, PWRAP_WRAP_EN);
@@ -941,45 +1257,26 @@ static int pwrap_init(struct pmic_wrapper *wrp)
if (ret)
return ret;
- /* Setup serial input delay */
- ret = pwrap_init_sidly(wrp);
- if (ret)
- return ret;
-
- /* Enable dual IO mode */
- pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
-
- /* Check IDLE & INIT_DONE in advance */
- ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
- if (ret) {
- dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret);
- return ret;
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SPI)) {
+ /* Setup serial input delay */
+ ret = pwrap_init_sidly(wrp);
+ if (ret)
+ return ret;
}
- pwrap_writel(wrp, 1, PWRAP_DIO_EN);
-
- /* Read Test */
- pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_READ_TEST], &rdata);
- if (rdata != PWRAP_DEW_READ_TEST_VAL) {
- dev_err(wrp->dev, "Read test failed after switch to DIO mode: 0x%04x != 0x%04x\n",
- PWRAP_DEW_READ_TEST_VAL, rdata);
- return -EFAULT;
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_DUALIO)) {
+ /* Enable dual I/O mode */
+ ret = pwrap_init_dual_io(wrp);
+ if (ret)
+ return ret;
}
- /* Enable encryption */
- ret = pwrap_init_cipher(wrp);
- if (ret)
- return ret;
-
- /* Signature checking - using CRC */
- if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1))
- return -EFAULT;
-
- pwrap_writel(wrp, 0x1, PWRAP_CRC_EN);
- pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE);
- pwrap_writel(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL],
- PWRAP_SIG_ADR);
- pwrap_writel(wrp, wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+ if (HAS_CAP(wrp->slave->caps, PWRAP_SLV_CAP_SECURITY)) {
+ /* Enable security on bus */
+ ret = pwrap_init_security(wrp);
+ if (ret)
+ return ret;
+ }
if (wrp->master->type == PWRAP_MT8135)
pwrap_writel(wrp, 0x7, PWRAP_RRARB_EN);
@@ -1023,7 +1320,7 @@ static irqreturn_t pwrap_interrupt(int irqno, void *dev_id)
return IRQ_HANDLED;
}
-static const struct regmap_config pwrap_regmap_config = {
+static const struct regmap_config pwrap_regmap_config16 = {
.reg_bits = 16,
.val_bits = 16,
.reg_stride = 2,
@@ -1032,14 +1329,42 @@ static const struct regmap_config pwrap_regmap_config = {
.max_register = 0xffff,
};
+static const struct regmap_config pwrap_regmap_config32 = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .reg_read = pwrap_regmap_read,
+ .reg_write = pwrap_regmap_write,
+ .max_register = 0xffff,
+};
+
static const struct pwrap_slv_type pmic_mt6323 = {
.dew_regs = mt6323_regs,
.type = PMIC_MT6323,
+ .regmap = &pwrap_regmap_config16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
+ PWRAP_SLV_CAP_SECURITY,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
+};
+
+static const struct pwrap_slv_type pmic_mt6380 = {
+ .dew_regs = NULL,
+ .type = PMIC_MT6380,
+ .regmap = &pwrap_regmap_config32,
+ .caps = 0,
+ .pwrap_read = pwrap_read32,
+ .pwrap_write = pwrap_write32,
};
static const struct pwrap_slv_type pmic_mt6397 = {
.dew_regs = mt6397_regs,
.type = PMIC_MT6397,
+ .regmap = &pwrap_regmap_config16,
+ .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO |
+ PWRAP_SLV_CAP_SECURITY,
+ .pwrap_read = pwrap_read16,
+ .pwrap_write = pwrap_write16,
};
static const struct of_device_id of_slave_match_tbl[] = {
@@ -1047,6 +1372,12 @@ static const struct of_device_id of_slave_match_tbl[] = {
.compatible = "mediatek,mt6323",
.data = &pmic_mt6323,
}, {
+ /* The MT6380 PMIC only implements a regulator, so we bind it
+ * directly instead of using a MFD.
+ */
+ .compatible = "mediatek,mt6380-regulator",
+ .data = &pmic_mt6380,
+ }, {
.compatible = "mediatek,mt6397",
.data = &pmic_mt6397,
}, {
@@ -1067,6 +1398,18 @@ static const struct pmic_wrapper_type pwrap_mt2701 = {
.init_soc_specific = pwrap_mt2701_init_soc_specific,
};
+static const struct pmic_wrapper_type pwrap_mt7622 = {
+ .regs = mt7622_regs,
+ .type = PWRAP_MT7622,
+ .arb_en_all = 0xff,
+ .int_en_all = ~(u32)BIT(31),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .has_bridge = 0,
+ .init_reg_clock = pwrap_common_init_reg_clock,
+ .init_soc_specific = pwrap_mt7622_init_soc_specific,
+};
+
static const struct pmic_wrapper_type pwrap_mt8135 = {
.regs = mt8135_regs,
.type = PWRAP_MT8135,
@@ -1075,7 +1418,7 @@ static const struct pmic_wrapper_type pwrap_mt8135 = {
.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
.wdt_src = PWRAP_WDT_SRC_MASK_ALL,
.has_bridge = 1,
- .init_reg_clock = pwrap_mt8135_init_reg_clock,
+ .init_reg_clock = pwrap_common_init_reg_clock,
.init_soc_specific = pwrap_mt8135_init_soc_specific,
};
@@ -1087,7 +1430,7 @@ static const struct pmic_wrapper_type pwrap_mt8173 = {
.spi_w = PWRAP_MAN_CMD_SPI_WRITE,
.wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD,
.has_bridge = 0,
- .init_reg_clock = pwrap_mt8173_init_reg_clock,
+ .init_reg_clock = pwrap_common_init_reg_clock,
.init_soc_specific = pwrap_mt8173_init_soc_specific,
};
@@ -1096,6 +1439,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt2701-pwrap",
.data = &pwrap_mt2701,
}, {
+ .compatible = "mediatek,mt7622-pwrap",
+ .data = &pwrap_mt7622,
+ }, {
.compatible = "mediatek,mt8135-pwrap",
.data = &pwrap_mt8135,
}, {
@@ -1159,23 +1505,27 @@ static int pwrap_probe(struct platform_device *pdev)
if (IS_ERR(wrp->bridge_base))
return PTR_ERR(wrp->bridge_base);
- wrp->rstc_bridge = devm_reset_control_get(wrp->dev, "pwrap-bridge");
+ wrp->rstc_bridge = devm_reset_control_get(wrp->dev,
+ "pwrap-bridge");
if (IS_ERR(wrp->rstc_bridge)) {
ret = PTR_ERR(wrp->rstc_bridge);
- dev_dbg(wrp->dev, "cannot get pwrap-bridge reset: %d\n", ret);
+ dev_dbg(wrp->dev,
+ "cannot get pwrap-bridge reset: %d\n", ret);
return ret;
}
}
wrp->clk_spi = devm_clk_get(wrp->dev, "spi");
if (IS_ERR(wrp->clk_spi)) {
- dev_dbg(wrp->dev, "failed to get clock: %ld\n", PTR_ERR(wrp->clk_spi));
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(wrp->clk_spi));
return PTR_ERR(wrp->clk_spi);
}
wrp->clk_wrap = devm_clk_get(wrp->dev, "wrap");
if (IS_ERR(wrp->clk_wrap)) {
- dev_dbg(wrp->dev, "failed to get clock: %ld\n", PTR_ERR(wrp->clk_wrap));
+ dev_dbg(wrp->dev, "failed to get clock: %ld\n",
+ PTR_ERR(wrp->clk_wrap));
return PTR_ERR(wrp->clk_wrap);
}
@@ -1220,12 +1570,13 @@ static int pwrap_probe(struct platform_device *pdev)
pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN);
irq = platform_get_irq(pdev, 0);
- ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH,
- "mt-pmic-pwrap", wrp);
+ ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt,
+ IRQF_TRIGGER_HIGH,
+ "mt-pmic-pwrap", wrp);
if (ret)
goto err_out2;
- wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, &pwrap_regmap_config);
+ wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regmap);
if (IS_ERR(wrp->regmap)) {
ret = PTR_ERR(wrp->regmap);
goto err_out2;
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index e1ce8b1b5090..e570b6af2e6f 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -361,17 +361,6 @@ out:
return ret;
}
-static bool scpsys_active_wakeup(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- struct scp_domain *scpd;
-
- genpd = pd_to_genpd(dev->pm_domain);
- scpd = container_of(genpd, struct scp_domain, genpd);
-
- return scpd->data->active_wakeup;
-}
-
static void init_clks(struct platform_device *pdev, struct clk **clk)
{
int i;
@@ -466,7 +455,8 @@ static struct scp *init_scp(struct platform_device *pdev,
genpd->name = data->name;
genpd->power_off = scpsys_power_off;
genpd->power_on = scpsys_power_on;
- genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
+ if (scpd->data->active_wakeup)
+ genpd->flags |= GENPD_FLAG_ACTIVE_WAKEUP;
}
return scp;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b00bccddcd3b..b81374bb6713 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -35,6 +35,17 @@ config QCOM_PM
modes. It interface with various system drivers to put the cores in
low power modes.
+config QCOM_RMTFS_MEM
+ tristate "Qualcomm Remote Filesystem memory driver"
+ depends on ARCH_QCOM
+ help
+ The Qualcomm remote filesystem memory driver is used for allocating
+ and exposing regions of shared memory with remote processors for the
+ purpose of exchanging sector-data between the remote filesystem
+ service and its clients.
+
+ Say y here if you intend to boot the modem remoteproc.
+
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f151de41eb93..40c56f67e94a 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,7 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
obj-$(CONFIG_QCOM_SMEM) += smem.o
obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
new file mode 100644
index 000000000000..ce35ff748adf
--- /dev/null
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/qcom_scm.h>
+
+#define QCOM_RMTFS_MEM_DEV_MAX (MINORMASK + 1)
+
+static dev_t qcom_rmtfs_mem_major;
+
+struct qcom_rmtfs_mem {
+ struct device dev;
+ struct cdev cdev;
+
+ void *base;
+ phys_addr_t addr;
+ phys_addr_t size;
+
+ unsigned int client_id;
+};
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL);
+
+static ssize_t qcom_rmtfs_mem_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ if (attr == &dev_attr_phys_addr)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->addr);
+ if (attr == &dev_attr_size)
+ return sprintf(buf, "%pa\n", &rmtfs_mem->size);
+ if (attr == &dev_attr_client_id)
+ return sprintf(buf, "%d\n", rmtfs_mem->client_id);
+
+ return -EINVAL;
+}
+
+static struct attribute *qcom_rmtfs_mem_attrs[] = {
+ &dev_attr_phys_addr.attr,
+ &dev_attr_size.attr,
+ &dev_attr_client_id.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(qcom_rmtfs_mem);
+
+static int qcom_rmtfs_mem_open(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(inode->i_cdev,
+ struct qcom_rmtfs_mem,
+ cdev);
+
+ get_device(&rmtfs_mem->dev);
+ filp->private_data = rmtfs_mem;
+
+ return 0;
+}
+static ssize_t qcom_rmtfs_mem_read(struct file *filp,
+ char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_to_user(buf, rmtfs_mem->base + *f_pos, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static ssize_t qcom_rmtfs_mem_write(struct file *filp,
+ const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ if (*f_pos >= rmtfs_mem->size)
+ return 0;
+
+ if (*f_pos + count >= rmtfs_mem->size)
+ count = rmtfs_mem->size - *f_pos;
+
+ if (copy_from_user(rmtfs_mem->base + *f_pos, buf, count))
+ return -EFAULT;
+
+ *f_pos += count;
+ return count;
+}
+
+static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filp->private_data;
+
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static const struct file_operations qcom_rmtfs_mem_fops = {
+ .owner = THIS_MODULE,
+ .open = qcom_rmtfs_mem_open,
+ .read = qcom_rmtfs_mem_read,
+ .write = qcom_rmtfs_mem_write,
+ .release = qcom_rmtfs_mem_release,
+ .llseek = default_llseek,
+};
+
+static void qcom_rmtfs_mem_release_device(struct device *dev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = container_of(dev,
+ struct qcom_rmtfs_mem,
+ dev);
+
+ kfree(rmtfs_mem);
+}
+
+static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct reserved_mem *rmem;
+ struct qcom_rmtfs_mem *rmtfs_mem;
+ u32 client_id;
+ int ret;
+
+ rmem = of_reserved_mem_lookup(node);
+ if (!rmem) {
+ dev_err(&pdev->dev, "failed to acquire memory region\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "qcom,client-id", &client_id);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse \"qcom,client-id\"\n");
+ return ret;
+
+ }
+
+ rmtfs_mem = kzalloc(sizeof(*rmtfs_mem), GFP_KERNEL);
+ if (!rmtfs_mem)
+ return -ENOMEM;
+
+ rmtfs_mem->addr = rmem->base;
+ rmtfs_mem->client_id = client_id;
+ rmtfs_mem->size = rmem->size;
+
+ device_initialize(&rmtfs_mem->dev);
+ rmtfs_mem->dev.parent = &pdev->dev;
+ rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
+
+ rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
+ rmtfs_mem->size, MEMREMAP_WC);
+ if (IS_ERR(rmtfs_mem->base)) {
+ dev_err(&pdev->dev, "failed to remap rmtfs_mem region\n");
+ ret = PTR_ERR(rmtfs_mem->base);
+ goto put_device;
+ }
+
+ cdev_init(&rmtfs_mem->cdev, &qcom_rmtfs_mem_fops);
+ rmtfs_mem->cdev.owner = THIS_MODULE;
+
+ dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
+ rmtfs_mem->dev.id = client_id;
+ rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
+
+ ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add cdev: %d\n", ret);
+ goto put_device;
+ }
+
+ rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
+
+ dev_set_drvdata(&pdev->dev, rmtfs_mem);
+
+ return 0;
+
+put_device:
+ put_device(&rmtfs_mem->dev);
+
+ return ret;
+}
+
+static int qcom_rmtfs_mem_remove(struct platform_device *pdev)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = dev_get_drvdata(&pdev->dev);
+
+ cdev_device_del(&rmtfs_mem->cdev, &rmtfs_mem->dev);
+ put_device(&rmtfs_mem->dev);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_rmtfs_mem_of_match[] = {
+ { .compatible = "qcom,rmtfs-mem" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_rmtfs_mem_of_match);
+
+static struct platform_driver qcom_rmtfs_mem_driver = {
+ .probe = qcom_rmtfs_mem_probe,
+ .remove = qcom_rmtfs_mem_remove,
+ .driver = {
+ .name = "qcom_rmtfs_mem",
+ .of_match_table = qcom_rmtfs_mem_of_match,
+ },
+};
+
+static int qcom_rmtfs_mem_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
+ QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&qcom_rmtfs_mem_driver);
+ if (ret < 0) {
+ pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
+ unregister_chrdev_region(qcom_rmtfs_mem_major,
+ QCOM_RMTFS_MEM_DEV_MAX);
+ }
+
+ return ret;
+}
+module_init(qcom_rmtfs_mem_init);
+
+static void qcom_rmtfs_mem_exit(void)
+{
+ platform_driver_unregister(&qcom_rmtfs_mem_driver);
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+}
+module_exit(qcom_rmtfs_mem_exit);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 18ec52f2078a..0b94d62fad2b 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -52,8 +52,13 @@
*
* Items in the non-cached region are allocated from the start of the partition
* while items in the cached region are allocated from the end. The free area
- * is hence the region between the cached and non-cached offsets.
+ * is hence the region between the cached and non-cached offsets. The header of
+ * cached items comes after the data.
*
+ * Version 12 (SMEM_GLOBAL_PART_VERSION) changes the item alloc/get procedure
+ * for the global heap. A new global partition is created from the global heap
+ * region with partition type (SMEM_GLOBAL_HOST) and the max smem item count is
+ * set by the bootloader.
*
* To synchronize allocations in the shared memory heaps a remote spinlock must
* be held - currently lock number 3 of the sfpb or tcsr is used for this on all
@@ -62,13 +67,13 @@
*/
/*
- * Item 3 of the global heap contains an array of versions for the various
- * software components in the SoC. We verify that the boot loader version is
- * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
+ * The version member of the smem header contains an array of versions for the
+ * various software components in the SoC. We verify that the boot loader
+ * version is a valid version as a sanity check.
*/
-#define SMEM_ITEM_VERSION 3
-#define SMEM_MASTER_SBL_VERSION_INDEX 7
-#define SMEM_EXPECTED_VERSION 11
+#define SMEM_MASTER_SBL_VERSION_INDEX 7
+#define SMEM_GLOBAL_HEAP_VERSION 11
+#define SMEM_GLOBAL_PART_VERSION 12
/*
* The first 8 items are only to be allocated by the boot loader while
@@ -82,8 +87,11 @@
/* Processor/host identifier for the application processor */
#define SMEM_HOST_APPS 0
+/* Processor/host identifier for the global partition */
+#define SMEM_GLOBAL_HOST 0xfffe
+
/* Max number of processors/hosts in a system */
-#define SMEM_HOST_COUNT 9
+#define SMEM_HOST_COUNT 10
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
@@ -140,6 +148,7 @@ struct smem_header {
* @flags: flags for the partition (currently unused)
* @host0: first processor/host with access to this partition
* @host1: second processor/host with access to this partition
+ * @cacheline: alignment for "cached" entries
* @reserved: reserved entries for later use
*/
struct smem_ptable_entry {
@@ -148,7 +157,8 @@ struct smem_ptable_entry {
__le32 flags;
__le16 host0;
__le16 host1;
- __le32 reserved[8];
+ __le32 cacheline;
+ __le32 reserved[7];
};
/**
@@ -213,6 +223,24 @@ struct smem_private_entry {
#define SMEM_PRIVATE_CANARY 0xa5a5
/**
+ * struct smem_info - smem region info located after the table of contents
+ * @magic: magic number, must be SMEM_INFO_MAGIC
+ * @size: size of the smem region
+ * @base_addr: base address of the smem region
+ * @reserved: for now reserved entry
+ * @num_items: highest accepted item number
+ */
+struct smem_info {
+ u8 magic[4];
+ __le32 size;
+ __le32 base_addr;
+ __le32 reserved;
+ __le16 num_items;
+};
+
+static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
+
+/**
* struct smem_region - representation of a chunk of memory used for smem
* @aux_base: identifier of aux_mem base
* @virt_base: virtual base address of memory with this aux_mem identifier
@@ -228,8 +256,12 @@ struct smem_region {
* struct qcom_smem - device data for the smem device
* @dev: device pointer
* @hwlock: reference to a hwspinlock
+ * @global_partition: pointer to global partition when in use
+ * @global_cacheline: cacheline size for global partition
* @partitions: list of pointers to partitions affecting the current
* processor/host
+ * @cacheline: list of cacheline sizes for each host
+ * @item_count: max accepted item number
* @num_regions: number of @regions
* @regions: list of the memory regions defining the shared memory
*/
@@ -238,21 +270,33 @@ struct qcom_smem {
struct hwspinlock *hwlock;
+ struct smem_partition_header *global_partition;
+ size_t global_cacheline;
struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+ size_t cacheline[SMEM_HOST_COUNT];
+ u32 item_count;
unsigned num_regions;
struct smem_region regions[0];
};
static struct smem_private_entry *
-phdr_to_last_private_entry(struct smem_partition_header *phdr)
+phdr_to_last_uncached_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
return p + le32_to_cpu(phdr->offset_free_uncached);
}
-static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
+static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr,
+ size_t cacheline)
+{
+ void *p = phdr;
+
+ return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*phdr), cacheline);
+}
+
+static void *phdr_to_last_cached_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
@@ -260,7 +304,7 @@ static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
}
static struct smem_private_entry *
-phdr_to_first_private_entry(struct smem_partition_header *phdr)
+phdr_to_first_uncached_entry(struct smem_partition_header *phdr)
{
void *p = phdr;
@@ -268,7 +312,7 @@ phdr_to_first_private_entry(struct smem_partition_header *phdr)
}
static struct smem_private_entry *
-private_entry_next(struct smem_private_entry *e)
+uncached_entry_next(struct smem_private_entry *e)
{
void *p = e;
@@ -276,13 +320,28 @@ private_entry_next(struct smem_private_entry *e)
le32_to_cpu(e->size);
}
-static void *entry_to_item(struct smem_private_entry *e)
+static struct smem_private_entry *
+cached_entry_next(struct smem_private_entry *e, size_t cacheline)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline);
+}
+
+static void *uncached_entry_to_item(struct smem_private_entry *e)
{
void *p = e;
return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
}
+static void *cached_entry_to_item(struct smem_private_entry *e)
+{
+ void *p = e;
+
+ return p - le32_to_cpu(e->size);
+}
+
/* Pointer to the one and only smem handle */
static struct qcom_smem *__smem;
@@ -290,32 +349,30 @@ static struct qcom_smem *__smem;
#define HWSPINLOCK_TIMEOUT 1000
static int qcom_smem_alloc_private(struct qcom_smem *smem,
- unsigned host,
+ struct smem_partition_header *phdr,
unsigned item,
size_t size)
{
- struct smem_partition_header *phdr;
struct smem_private_entry *hdr, *end;
size_t alloc_size;
void *cached;
- phdr = smem->partitions[host];
- hdr = phdr_to_first_private_entry(phdr);
- end = phdr_to_last_private_entry(phdr);
- cached = phdr_to_first_cached_entry(phdr);
+ hdr = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
+ cached = phdr_to_last_cached_entry(phdr);
while (hdr < end) {
if (hdr->canary != SMEM_PRIVATE_CANARY) {
dev_err(smem->dev,
- "Found invalid canary in host %d partition\n",
- host);
+ "Found invalid canary in hosts %d:%d partition\n",
+ phdr->host0, phdr->host1);
return -EINVAL;
}
if (le16_to_cpu(hdr->item) == item)
return -EEXIST;
- hdr = private_entry_next(hdr);
+ hdr = uncached_entry_next(hdr);
}
/* Check that we don't grow into the cached region */
@@ -346,11 +403,8 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
unsigned item,
size_t size)
{
- struct smem_header *header;
struct smem_global_entry *entry;
-
- if (WARN_ON(item >= SMEM_ITEM_COUNT))
- return -EINVAL;
+ struct smem_header *header;
header = smem->regions[0].virt_base;
entry = &header->toc[item];
@@ -389,6 +443,7 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
*/
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
+ struct smem_partition_header *phdr;
unsigned long flags;
int ret;
@@ -401,16 +456,24 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
return -EINVAL;
}
+ if (WARN_ON(item >= __smem->item_count))
+ return -EINVAL;
+
ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
HWSPINLOCK_TIMEOUT,
&flags);
if (ret)
return ret;
- if (host < SMEM_HOST_COUNT && __smem->partitions[host])
- ret = qcom_smem_alloc_private(__smem, host, item, size);
- else
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+ } else if (__smem->global_partition) {
+ phdr = __smem->global_partition;
+ ret = qcom_smem_alloc_private(__smem, phdr, item, size);
+ } else {
ret = qcom_smem_alloc_global(__smem, item, size);
+ }
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
@@ -428,9 +491,6 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
u32 aux_base;
unsigned i;
- if (WARN_ON(item >= SMEM_ITEM_COUNT))
- return ERR_PTR(-EINVAL);
-
header = smem->regions[0].virt_base;
entry = &header->toc[item];
if (!entry->allocated)
@@ -452,37 +512,58 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
}
static void *qcom_smem_get_private(struct qcom_smem *smem,
- unsigned host,
+ struct smem_partition_header *phdr,
+ size_t cacheline,
unsigned item,
size_t *size)
{
- struct smem_partition_header *phdr;
struct smem_private_entry *e, *end;
- phdr = smem->partitions[host];
- e = phdr_to_first_private_entry(phdr);
- end = phdr_to_last_private_entry(phdr);
+ e = phdr_to_first_uncached_entry(phdr);
+ end = phdr_to_last_uncached_entry(phdr);
while (e < end) {
- if (e->canary != SMEM_PRIVATE_CANARY) {
- dev_err(smem->dev,
- "Found invalid canary in host %d partition\n",
- host);
- return ERR_PTR(-EINVAL);
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
+ if (le16_to_cpu(e->item) == item) {
+ if (size != NULL)
+ *size = le32_to_cpu(e->size) -
+ le16_to_cpu(e->padding_data);
+
+ return uncached_entry_to_item(e);
}
+ e = uncached_entry_next(e);
+ }
+
+ /* Item was not found in the uncached list, search the cached list */
+
+ e = phdr_to_first_cached_entry(phdr, cacheline);
+ end = phdr_to_last_cached_entry(phdr);
+
+ while (e > end) {
+ if (e->canary != SMEM_PRIVATE_CANARY)
+ goto invalid_canary;
+
if (le16_to_cpu(e->item) == item) {
if (size != NULL)
*size = le32_to_cpu(e->size) -
le16_to_cpu(e->padding_data);
- return entry_to_item(e);
+ return cached_entry_to_item(e);
}
- e = private_entry_next(e);
+ e = cached_entry_next(e, cacheline);
}
return ERR_PTR(-ENOENT);
+
+invalid_canary:
+ dev_err(smem->dev, "Found invalid canary in hosts %d:%d partition\n",
+ phdr->host0, phdr->host1);
+
+ return ERR_PTR(-EINVAL);
}
/**
@@ -496,23 +577,35 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
*/
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
+ struct smem_partition_header *phdr;
unsigned long flags;
+ size_t cacheln;
int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);
if (!__smem)
return ptr;
+ if (WARN_ON(item >= __smem->item_count))
+ return ERR_PTR(-EINVAL);
+
ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
HWSPINLOCK_TIMEOUT,
&flags);
if (ret)
return ERR_PTR(ret);
- if (host < SMEM_HOST_COUNT && __smem->partitions[host])
- ptr = qcom_smem_get_private(__smem, host, item, size);
- else
+ if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+ phdr = __smem->partitions[host];
+ cacheln = __smem->cacheline[host];
+ ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+ } else if (__smem->global_partition) {
+ phdr = __smem->global_partition;
+ cacheln = __smem->global_cacheline;
+ ptr = qcom_smem_get_private(__smem, phdr, cacheln, item, size);
+ } else {
ptr = qcom_smem_get_global(__smem, item, size);
+ }
hwspin_unlock_irqrestore(__smem->hwlock, &flags);
@@ -541,6 +634,10 @@ int qcom_smem_get_free_space(unsigned host)
phdr = __smem->partitions[host];
ret = le32_to_cpu(phdr->offset_free_cached) -
le32_to_cpu(phdr->offset_free_uncached);
+ } else if (__smem->global_partition) {
+ phdr = __smem->global_partition;
+ ret = le32_to_cpu(phdr->offset_free_cached) -
+ le32_to_cpu(phdr->offset_free_uncached);
} else {
header = __smem->regions[0].virt_base;
ret = le32_to_cpu(header->available);
@@ -552,44 +649,131 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
{
+ struct smem_header *header;
__le32 *versions;
- size_t size;
- versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
- if (IS_ERR(versions)) {
- dev_err(smem->dev, "Unable to read the version item\n");
- return -ENOENT;
- }
-
- if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
- dev_err(smem->dev, "Version item is too small\n");
- return -EINVAL;
- }
+ header = smem->regions[0].virt_base;
+ versions = header->version;
return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
}
-static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
- unsigned local_host)
+static struct smem_ptable *qcom_smem_get_ptable(struct qcom_smem *smem)
{
- struct smem_partition_header *header;
- struct smem_ptable_entry *entry;
struct smem_ptable *ptable;
- unsigned remote_host;
- u32 version, host0, host1;
- int i;
+ u32 version;
ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
- return 0;
+ return ERR_PTR(-ENOENT);
version = le32_to_cpu(ptable->version);
if (version != 1) {
dev_err(smem->dev,
"Unsupported partition header version %d\n", version);
+ return ERR_PTR(-EINVAL);
+ }
+ return ptable;
+}
+
+static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
+{
+ struct smem_ptable *ptable;
+ struct smem_info *info;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR_OR_NULL(ptable))
+ return SMEM_ITEM_COUNT;
+
+ info = (struct smem_info *)&ptable->entry[ptable->num_entries];
+ if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
+ return SMEM_ITEM_COUNT;
+
+ return le16_to_cpu(info->num_items);
+}
+
+static int qcom_smem_set_global_partition(struct qcom_smem *smem)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry = NULL;
+ struct smem_ptable *ptable;
+ u32 host0, host1, size;
+ int i;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
+ for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
+ entry = &ptable->entry[i];
+ host0 = le16_to_cpu(entry->host0);
+ host1 = le16_to_cpu(entry->host1);
+
+ if (host0 == SMEM_GLOBAL_HOST && host0 == host1)
+ break;
+ }
+
+ if (!entry) {
+ dev_err(smem->dev, "Missing entry for global partition\n");
+ return -EINVAL;
+ }
+
+ if (!le32_to_cpu(entry->offset) || !le32_to_cpu(entry->size)) {
+ dev_err(smem->dev, "Invalid entry for global partition\n");
+ return -EINVAL;
+ }
+
+ if (smem->global_partition) {
+ dev_err(smem->dev, "Already found the global partition\n");
+ return -EINVAL;
+ }
+
+ header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
+ host0 = le16_to_cpu(header->host0);
+ host1 = le16_to_cpu(header->host1);
+
+ if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) {
+ dev_err(smem->dev, "Global partition has invalid magic\n");
+ return -EINVAL;
+ }
+
+ if (host0 != SMEM_GLOBAL_HOST && host1 != SMEM_GLOBAL_HOST) {
+ dev_err(smem->dev, "Global partition hosts are invalid\n");
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
+ dev_err(smem->dev, "Global partition has invalid size\n");
return -EINVAL;
}
+ size = le32_to_cpu(header->offset_free_uncached);
+ if (size > le32_to_cpu(header->size)) {
+ dev_err(smem->dev,
+ "Global partition has invalid free pointer\n");
+ return -EINVAL;
+ }
+
+ smem->global_partition = header;
+ smem->global_cacheline = le32_to_cpu(entry->cacheline);
+
+ return 0;
+}
+
+static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
+ unsigned int local_host)
+{
+ struct smem_partition_header *header;
+ struct smem_ptable_entry *entry;
+ struct smem_ptable *ptable;
+ unsigned int remote_host;
+ u32 host0, host1;
+ int i;
+
+ ptable = qcom_smem_get_ptable(smem);
+ if (IS_ERR(ptable))
+ return PTR_ERR(ptable);
+
for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
entry = &ptable->entry[i];
host0 = le16_to_cpu(entry->host0);
@@ -646,7 +830,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
return -EINVAL;
}
- if (header->size != entry->size) {
+ if (le32_to_cpu(header->size) != le32_to_cpu(entry->size)) {
dev_err(smem->dev,
"Partition %d has invalid size\n", i);
return -EINVAL;
@@ -659,6 +843,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
}
smem->partitions[remote_host] = header;
+ smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline);
}
return 0;
@@ -729,13 +914,23 @@ static int qcom_smem_probe(struct platform_device *pdev)
}
version = qcom_smem_get_sbl_version(smem);
- if (version >> 16 != SMEM_EXPECTED_VERSION) {
+ switch (version >> 16) {
+ case SMEM_GLOBAL_PART_VERSION:
+ ret = qcom_smem_set_global_partition(smem);
+ if (ret < 0)
+ return ret;
+ smem->item_count = qcom_smem_get_item_count(smem);
+ break;
+ case SMEM_GLOBAL_HEAP_VERSION:
+ smem->item_count = SMEM_ITEM_COUNT;
+ break;
+ default:
dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
return -EINVAL;
}
ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
- if (ret < 0)
+ if (ret < 0 && ret != -ENOENT)
return ret;
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 567414cb42ba..09550b1da56d 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -3,7 +3,8 @@ config SOC_RENESAS
default y if ARCH_RENESAS
select SOC_BUS
select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \
- ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77995
+ ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77970 || \
+ ARCH_R8A77995
select SYSC_R8A7743 if ARCH_R8A7743
select SYSC_R8A7745 if ARCH_R8A7745
select SYSC_R8A7779 if ARCH_R8A7779
@@ -13,6 +14,7 @@ config SOC_RENESAS
select SYSC_R8A7794 if ARCH_R8A7794
select SYSC_R8A7795 if ARCH_R8A7795
select SYSC_R8A7796 if ARCH_R8A7796
+ select SYSC_R8A77970 if ARCH_R8A77970
select SYSC_R8A77995 if ARCH_R8A77995
if SOC_RENESAS
@@ -54,6 +56,10 @@ config SYSC_R8A7796
bool "R-Car M3-W System Controller support" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A77970
+ bool "R-Car V3M System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A77995
bool "R-Car D3 System Controller support" if COMPILE_TEST
select SYSC_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 6b6e7f16104c..845d62a08ce1 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Generic, must be first because of soc_device_register()
obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
@@ -11,6 +12,7 @@ obj-$(CONFIG_SYSC_R8A7792) += r8a7792-sysc.o
obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o
obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o
obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o
+obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
# Family
diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c
new file mode 100644
index 000000000000..8c614164718e
--- /dev/null
+++ b/drivers/soc/renesas/r8a77970-sysc.c
@@ -0,0 +1,39 @@
+/*
+ * Renesas R-Car V3M System Controller
+ *
+ * Copyright (C) 2017 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a77970-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a77970_areas[] __initconst = {
+ { "always-on", 0, 0, R8A77970_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca53-scu", 0x140, 0, R8A77970_PD_CA53_SCU, R8A77970_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A77970_PD_CA53_CPU0, R8A77970_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A77970_PD_CA53_CPU1, R8A77970_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A77970_PD_CR7, R8A77970_PD_ALWAYS_ON },
+ { "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON },
+ { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_ALWAYS_ON },
+ { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A2IR0 },
+ { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A2IR0 },
+ { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A2IR0 },
+ { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_ALWAYS_ON },
+ { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A2SC0 },
+};
+
+const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
+ .areas = r8a77970_areas,
+ .num_areas = ARRAY_SIZE(r8a77970_areas),
+};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index baa47014e96b..3316b028f231 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -41,6 +41,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
/* R-Car Gen3 is handled like R-Car Gen2 */
{ .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen2 },
+ { .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen2 },
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index c8406e81640f..55a47e509e49 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -284,6 +284,9 @@ static const struct of_device_id rcar_sysc_matches[] = {
#ifdef CONFIG_SYSC_R8A7796
{ .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A77970
+ { .compatible = "renesas,r8a77970-sysc", .data = &r8a77970_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A77995
{ .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info },
#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 2f524922c4d2..9d9daf9eb91b 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -58,6 +58,7 @@ extern const struct rcar_sysc_info r8a7792_sysc_info;
extern const struct rcar_sysc_info r8a7794_sysc_info;
extern const struct rcar_sysc_info r8a7795_sysc_info;
extern const struct rcar_sysc_info r8a7796_sysc_info;
+extern const struct rcar_sysc_info r8a77970_sysc_info;
extern const struct rcar_sysc_info r8a77995_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 90d6b7a4340a..9f4ee2567c72 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -144,6 +144,11 @@ static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
.id = 0x52,
};
+static const struct renesas_soc soc_rcar_v3m __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x54,
+};
+
static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = {
.family = &fam_rcar_gen3,
.id = 0x58,
@@ -204,6 +209,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A7796
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
#endif
+#ifdef CONFIG_ARCH_R8A77970
+ { .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m },
+#endif
#ifdef CONFIG_ARCH_R8A77995
{ .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
#endif
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 40b75748835f..5c342167b9db 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -358,17 +358,6 @@ static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
pm_clk_destroy(dev);
}
-static bool rockchip_active_wakeup(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- struct rockchip_pm_domain *pd;
-
- genpd = pd_to_genpd(dev->pm_domain);
- pd = container_of(genpd, struct rockchip_pm_domain, genpd);
-
- return pd->info->active_wakeup;
-}
-
static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
struct device_node *node)
{
@@ -489,8 +478,9 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
pd->genpd.detach_dev = rockchip_pd_detach_dev;
- pd->genpd.dev_ops.active_wakeup = rockchip_active_wakeup;
pd->genpd.flags = GENPD_FLAG_PM_CLK;
+ if (pd_info->active_wakeup)
+ pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&pd->genpd, NULL, false);
pmu->genpd_data.domains[id] = &pd->genpd;
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index bd4a76f27bc2..938f8ccfcb74 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -60,12 +60,6 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode)
if (pmu_data->powerdown_conf_extra)
pmu_data->powerdown_conf_extra(mode);
-
- if (pmu_data->pmu_config_extra) {
- for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
- pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
- pmu_data->pmu_config_extra[i].offset);
- }
}
/*
@@ -89,9 +83,6 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = {
.compatible = "samsung,exynos4210-pmu",
.data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data),
}, {
- .compatible = "samsung,exynos4212-pmu",
- .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data),
- }, {
.compatible = "samsung,exynos4412-pmu",
.data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data),
}, {
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 40d4229abfb5..86b3f2f8966d 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -23,7 +23,6 @@ struct exynos_pmu_conf {
struct exynos_pmu_data {
const struct exynos_pmu_conf *pmu_config;
- const struct exynos_pmu_conf *pmu_config_extra;
void (*pmu_init)(void);
void (*powerdown_conf)(enum sys_powerdown);
@@ -36,7 +35,6 @@ extern void __iomem *pmu_base_addr;
/* list of all exported SoC specific data */
extern const struct exynos_pmu_data exynos3250_pmu_data;
extern const struct exynos_pmu_data exynos4210_pmu_data;
-extern const struct exynos_pmu_data exynos4212_pmu_data;
extern const struct exynos_pmu_data exynos4412_pmu_data;
extern const struct exynos_pmu_data exynos5250_pmu_data;
extern const struct exynos_pmu_data exynos5420_pmu_data;
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
index bc4fa73bed11..5dbfe4e31f4c 100644
--- a/drivers/soc/samsung/exynos4-pmu.c
+++ b/drivers/soc/samsung/exynos4-pmu.c
@@ -90,7 +90,7 @@ static const struct exynos_pmu_conf exynos4210_pmu_config[] = {
{ PMU_TABLE_END,},
};
-static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
+static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
{ S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
{ S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
@@ -195,10 +195,6 @@ static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
{ S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } },
{ S5P_CMU_SYSCLK_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
{ S5P_CMU_SYSCLK_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
- { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
{ S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
{ S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
@@ -212,11 +208,6 @@ const struct exynos_pmu_data exynos4210_pmu_data = {
.pmu_config = exynos4210_pmu_config,
};
-const struct exynos_pmu_data exynos4212_pmu_data = {
- .pmu_config = exynos4x12_pmu_config,
-};
-
const struct exynos_pmu_data exynos4412_pmu_data = {
- .pmu_config = exynos4x12_pmu_config,
- .pmu_config_extra = exynos4412_pmu_config,
+ .pmu_config = exynos4412_pmu_config,
};
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index 0e52b45721ac..482e108d28aa 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += fuse/
obj-y += common.o
diff --git a/drivers/soc/tegra/fuse/Makefile b/drivers/soc/tegra/fuse/Makefile
index 21bc27580178..ea8332cc3980 100644
--- a/drivers/soc/tegra/fuse/Makefile
+++ b/drivers/soc/tegra/fuse/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += fuse-tegra.o
obj-y += fuse-tegra30.o
obj-y += tegra-apbmisc.o
diff --git a/drivers/soc/tegra/powergate-bpmp.c b/drivers/soc/tegra/powergate-bpmp.c
index 8fc356039401..82c7e27cd8bb 100644
--- a/drivers/soc/tegra/powergate-bpmp.c
+++ b/drivers/soc/tegra/powergate-bpmp.c
@@ -42,6 +42,7 @@ static int tegra_bpmp_powergate_set_state(struct tegra_bpmp *bpmp,
{
struct mrq_pg_request request;
struct tegra_bpmp_message msg;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd = CMD_PG_SET_STATE;
@@ -53,7 +54,13 @@ static int tegra_bpmp_powergate_set_state(struct tegra_bpmp *bpmp,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err < 0)
+ return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp,
@@ -80,6 +87,8 @@ static int tegra_bpmp_powergate_get_state(struct tegra_bpmp *bpmp,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return PG_STATE_OFF;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
return response.get_state.state;
}
@@ -106,6 +115,8 @@ static int tegra_bpmp_powergate_get_max_id(struct tegra_bpmp *bpmp)
err = tegra_bpmp_transfer(bpmp, &msg);
if (err < 0)
return err;
+ else if (msg.rx.ret < 0)
+ return -EINVAL;
return response.get_max_id.max_id;
}
@@ -132,7 +143,7 @@ static char *tegra_bpmp_powergate_get_name(struct tegra_bpmp *bpmp,
msg.rx.size = sizeof(response);
err = tegra_bpmp_transfer(bpmp, &msg);
- if (err < 0)
+ if (err < 0 || msg.rx.ret < 0)
return NULL;
return kstrdup(response.get_name.name, GFP_KERNEL);
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index 7d572736c86e..8e205287f120 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# TI Keystone SOC drivers
#
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a75f2a2cf780..603783976b81 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -1,10 +1,6 @@
#
# SPI driver configuration
#
-# NOTE: the reason this doesn't show SPI slave support is mostly that
-# nobody's needed a slave side API yet. The master-role API is not
-# fully appropriate there, so it'd need some thought to do well.
-#
menuconfig SPI
bool "SPI support"
depends on HAS_IOMEM
@@ -379,7 +375,7 @@ config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
depends on HAS_DMA
- depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
@@ -626,6 +622,13 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
+config SPI_SPRD_ADI
+ tristate "Spreadtrum ADI controller"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HWSPINLOCK || (COMPILE_TEST && !HWSPINLOCK)
+ help
+ ADI driver based on SPI for Spreadtrum SoCs.
+
config SPI_STM32
tristate "STMicroelectronics STM32 SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index a3ae2b70cdc3..34c5f2832ddf 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for kernel SPI drivers.
#
@@ -90,6 +91,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 568e1c65aa82..77fe55ce790c 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -213,7 +213,7 @@ static void a3700_spi_mode_set(struct a3700_spi *a3700_spi,
}
static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
- unsigned int speed_hz, u16 mode)
+ unsigned int speed_hz)
{
u32 val;
u32 prescale;
@@ -231,17 +231,6 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
val |= A3700_SPI_CLK_CAPT_EDGE;
spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val);
}
-
- val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
- val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA);
-
- if (mode & SPI_CPOL)
- val |= A3700_SPI_CLK_POL;
-
- if (mode & SPI_CPHA)
- val |= A3700_SPI_CLK_PHA;
-
- spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len)
@@ -423,7 +412,7 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
a3700_spi = spi_master_get_devdata(spi->master);
- a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode);
+ a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
byte_len = xfer->bits_per_word >> 3;
@@ -584,6 +573,8 @@ static int a3700_spi_prepare_message(struct spi_master *master,
a3700_spi_bytelen_set(a3700_spi, 4);
+ a3700_spi_mode_set(a3700_spi, spi->mode);
+
return 0;
}
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 6ab4c7700228..68cfc351b47f 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -553,7 +553,7 @@ err_put_master:
static int spi_engine_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct spi_engine *spi_engine = spi_master_get_devdata(master);
int irq = platform_get_irq(pdev, 0);
@@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev)
free_irq(irq, master);
+ spi_master_put(master);
+
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
diff --git a/drivers/spi/spi-bcm53xx.h b/drivers/spi/spi-bcm53xx.h
index 73575dfe6916..03e3442086ec 100644
--- a/drivers/spi/spi-bcm53xx.h
+++ b/drivers/spi/spi-bcm53xx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SPI_BCM53XX_H
#define SPI_BCM53XX_H
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index 47bb9b898dfd..ae61d72c7d28 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Mix this utility code with some glue code to get one of several types of
* simple SPI master driver. Two do polled word-at-a-time I/O:
diff --git a/drivers/spi/spi-cavium.h b/drivers/spi/spi-cavium.h
index 1f91d61b745b..1f3ac463a20b 100644
--- a/drivers/spi/spi-cavium.h
+++ b/drivers/spi/spi-cavium.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SPI_CAVIUM_H
#define __SPI_CAVIUM_H
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index da5eab62df34..5c07cf8f19e0 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DW_SPI_HEADER_H
#define DW_SPI_HEADER_H
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index d89127f4a46d..f652f70cb8db 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -32,6 +32,7 @@
#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi-fsl-dspi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/time.h>
@@ -151,6 +152,11 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
.max_clock_factor = 8,
};
+static const struct fsl_dspi_devtype_data coldfire_data = {
+ .trans_mode = DSPI_EOQ_MODE,
+ .max_clock_factor = 8,
+};
+
struct fsl_dspi_dma {
/* Length of transfer in words of DSPI_FIFO_SIZE */
u32 curr_xfer_len;
@@ -741,6 +747,7 @@ static int dspi_setup(struct spi_device *spi)
{
struct chip_data *chip;
struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ struct fsl_dspi_platform_data *pdata;
u32 cs_sck_delay = 0, sck_cs_delay = 0;
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
unsigned char pasc = 0, asc = 0, fmsz = 0;
@@ -761,11 +768,18 @@ static int dspi_setup(struct spi_device *spi)
return -ENOMEM;
}
- of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
- &cs_sck_delay);
+ pdata = dev_get_platdata(&dspi->pdev->dev);
- of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
- &sck_cs_delay);
+ if (!pdata) {
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
+ } else {
+ cs_sck_delay = pdata->cs_sck_delay;
+ sck_cs_delay = pdata->sck_cs_delay;
+ }
chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
@@ -949,6 +963,7 @@ static int dspi_probe(struct platform_device *pdev)
struct fsl_dspi *dspi;
struct resource *res;
void __iomem *base;
+ struct fsl_dspi_platform_data *pdata;
int ret = 0, cs_num, bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
@@ -969,25 +984,34 @@ static int dspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
- ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
- goto out_master_put;
- }
- master->num_chipselect = cs_num;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ master->num_chipselect = pdata->cs_num;
+ master->bus_num = pdata->bus_num;
- ret = of_property_read_u32(np, "bus-num", &bus_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get bus-num\n");
- goto out_master_put;
- }
- master->bus_num = bus_num;
+ dspi->devtype_data = &coldfire_data;
+ } else {
- dspi->devtype_data = of_device_get_match_data(&pdev->dev);
- if (!dspi->devtype_data) {
- dev_err(&pdev->dev, "can't get devtype_data\n");
- ret = -EFAULT;
- goto out_master_put;
+ ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
+ goto out_master_put;
+ }
+ master->num_chipselect = cs_num;
+
+ ret = of_property_read_u32(np, "bus-num", &bus_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get bus-num\n");
+ goto out_master_put;
+ }
+ master->bus_num = bus_num;
+
+ dspi->devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!dspi->devtype_data) {
+ dev_err(&pdev->dev, "can't get devtype_data\n");
+ ret = -EFAULT;
+ goto out_master_put;
+ }
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index babb15f07995..79ddefe4180d 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -53,10 +53,13 @@
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
+#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
/* The maximum bytes that a sdma BD can transfer.*/
#define MAX_SDMA_BD_BYTES (1 << 15)
#define MX51_ECSPI_CTRL_MAX_BURST 512
+/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
+#define MX53_MAX_TRANSFER_BYTES 512
enum spi_imx_devtype {
IMX1_CSPI,
@@ -76,7 +79,9 @@ struct spi_imx_devtype_data {
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
+ void (*disable)(struct spi_imx_data *);
bool has_dmamode;
+ bool has_slavemode;
unsigned int fifo_size;
bool dynamic_burst;
enum spi_imx_devtype devtype;
@@ -108,6 +113,11 @@ struct spi_imx_data {
unsigned int dynamic_burst, read_u32;
unsigned int word_mask;
+ /* Slave mode */
+ bool slave_mode;
+ bool slave_aborted;
+ unsigned int slave_burst;
+
/* DMA */
bool usedma;
u32 wml;
@@ -221,6 +231,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
if (!master->dma_rx)
return false;
+ if (spi_imx->slave_mode)
+ return false;
+
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4)
@@ -262,6 +275,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
#define MX51_ECSPI_INT 0x10
#define MX51_ECSPI_INT_TEEN (1 << 0)
#define MX51_ECSPI_INT_RREN (1 << 3)
+#define MX51_ECSPI_INT_RDREN (1 << 4)
#define MX51_ECSPI_DMA 0x14
#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
@@ -378,6 +392,44 @@ static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
spi_imx_buf_tx_u16(spi_imx);
}
+static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
+
+ if (spi_imx->rx_buf) {
+ int n_bytes = spi_imx->slave_burst % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ memcpy(spi_imx->rx_buf,
+ ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
+
+ spi_imx->rx_buf += n_bytes;
+ spi_imx->slave_burst -= n_bytes;
+ }
+}
+
+static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = 0;
+ int n_bytes = spi_imx->count % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ if (spi_imx->tx_buf) {
+ memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
+ spi_imx->tx_buf, n_bytes);
+ val = cpu_to_be32(val);
+ spi_imx->tx_buf += n_bytes;
+ }
+
+ spi_imx->count -= n_bytes;
+
+ writel(val, spi_imx->base + MXC_CSPITXDATA);
+}
+
/* MX51 eCSPI */
static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
unsigned int fspi, unsigned int *fres)
@@ -427,6 +479,9 @@ static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
if (enable & MXC_INT_RR)
val |= MX51_ECSPI_INT_RREN;
+ if (enable & MXC_INT_RDR)
+ val |= MX51_ECSPI_INT_RDREN;
+
writel(val, spi_imx->base + MX51_ECSPI_INT);
}
@@ -439,6 +494,15 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
+static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
+{
+ u32 ctrl;
+
+ ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
static int mx51_ecspi_config(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
@@ -446,14 +510,11 @@ static int mx51_ecspi_config(struct spi_device *spi)
u32 clk = spi_imx->speed_hz, delay, reg;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
- /*
- * The hardware seems to have a race condition when changing modes. The
- * current assumption is that the selection of the channel arrives
- * earlier in the hardware than the mode bits when they are written at
- * the same time.
- * So set master mode for all channels as we do not support slave mode.
- */
- ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
+ /* set Master or Slave mode */
+ if (spi_imx->slave_mode)
+ ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
+ else
+ ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/*
* Enable SPI_RDY handling (falling edge/level triggered).
@@ -468,9 +529,22 @@ static int mx51_ecspi_config(struct spi_device *spi)
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
- ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ ctrl |= (spi_imx->slave_burst * 8 - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ else
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
- cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ /*
+ * eCSPI burst completion by Chip Select signal in Slave mode
+ * is not functional for imx53 Soc, config SPI burst completed when
+ * BURST_LENGTH + 1 bits are received
+ */
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ else
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
if (spi->mode & SPI_CPHA)
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
@@ -805,6 +879,7 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX1_CSPI,
};
@@ -817,6 +892,7 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX21_CSPI,
};
@@ -830,6 +906,7 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX27_CSPI,
};
@@ -842,6 +919,7 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX31_CSPI,
};
@@ -855,6 +933,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = true,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX35_CSPI,
};
@@ -867,6 +946,8 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
.devtype = IMX51_ECSPI,
};
@@ -878,6 +959,8 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
.reset = mx51_ecspi_reset,
.fifo_size = 64,
.has_dmamode = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
.devtype = IMX53_ECSPI,
};
@@ -945,14 +1028,16 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
spi_imx->txfifo++;
}
- spi_imx->devtype_data->trigger(spi_imx);
+ if (!spi_imx->slave_mode)
+ spi_imx->devtype_data->trigger(spi_imx);
}
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
{
struct spi_imx_data *spi_imx = dev_id;
- while (spi_imx->devtype_data->rx_available(spi_imx)) {
+ while (spi_imx->txfifo &&
+ spi_imx->devtype_data->rx_available(spi_imx)) {
spi_imx->rx(spi_imx);
spi_imx->txfifo--;
}
@@ -1034,7 +1119,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->speed_hz = t->speed_hz;
/* Initialize the functions for transfer */
- if (spi_imx->devtype_data->dynamic_burst) {
+ if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode) {
u32 mask;
spi_imx->dynamic_burst = 0;
@@ -1078,6 +1163,12 @@ static int spi_imx_setupxfer(struct spi_device *spi,
return ret;
}
+ if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
+ spi_imx->rx = mx53_ecspi_rx_slave;
+ spi_imx->tx = mx53_ecspi_tx_slave;
+ spi_imx->slave_burst = t->len;
+ }
+
spi_imx->devtype_data->config(spi);
return 0;
@@ -1262,11 +1353,61 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
return transfer->len;
}
+static int spi_imx_pio_transfer_slave(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ int ret = transfer->len;
+
+ if (is_imx53_ecspi(spi_imx) &&
+ transfer->len > MX53_MAX_TRANSFER_BYTES) {
+ dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
+ MX53_MAX_TRANSFER_BYTES);
+ return -EMSGSIZE;
+ }
+
+ spi_imx->tx_buf = transfer->tx_buf;
+ spi_imx->rx_buf = transfer->rx_buf;
+ spi_imx->count = transfer->len;
+ spi_imx->txfifo = 0;
+
+ reinit_completion(&spi_imx->xfer_done);
+ spi_imx->slave_aborted = false;
+
+ spi_imx_push(spi_imx);
+
+ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
+
+ if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
+ spi_imx->slave_aborted) {
+ dev_dbg(&spi->dev, "interrupted\n");
+ ret = -EINTR;
+ }
+
+ /* ecspi has a HW issue when works in Slave mode,
+ * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
+ * ECSPI_TXDATA keeps shift out the last word data,
+ * so we have to disable ECSPI when in slave mode after the
+ * transfer completes
+ */
+ if (spi_imx->devtype_data->disable)
+ spi_imx->devtype_data->disable(spi_imx);
+
+ return ret;
+}
+
static int spi_imx_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ /* flush rxfifo before transfer */
+ while (spi_imx->devtype_data->rx_available(spi_imx))
+ spi_imx->rx(spi_imx);
+
+ if (spi_imx->slave_mode)
+ return spi_imx_pio_transfer_slave(spi, transfer);
+
if (spi_imx->usedma)
return spi_imx_dma_transfer(spi_imx, transfer);
else
@@ -1323,6 +1464,16 @@ spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
return 0;
}
+static int spi_imx_slave_abort(struct spi_master *master)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ spi_imx->slave_aborted = true;
+ complete(&spi_imx->xfer_done);
+
+ return 0;
+}
+
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1334,13 +1485,23 @@ static int spi_imx_probe(struct platform_device *pdev)
struct spi_imx_data *spi_imx;
struct resource *res;
int i, ret, irq, spi_drctl;
+ const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data :
+ (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
+ bool slave_mode;
if (!np && !mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
+ slave_mode = devtype_data->has_slavemode &&
+ of_property_read_bool(np, "spi-slave");
+ if (slave_mode)
+ master = spi_alloc_slave(&pdev->dev,
+ sizeof(struct spi_imx_data));
+ else
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_imx_data));
if (!master)
return -ENOMEM;
@@ -1358,20 +1519,29 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = master;
spi_imx->dev = &pdev->dev;
+ spi_imx->slave_mode = slave_mode;
- spi_imx->devtype_data = of_id ? of_id->data :
- (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
+ spi_imx->devtype_data = devtype_data;
+ /* Get number of chip selects, either platform data or OF */
if (mxc_platform_info) {
master->num_chipselect = mxc_platform_info->num_chipselect;
- master->cs_gpios = devm_kzalloc(&master->dev,
- sizeof(int) * master->num_chipselect, GFP_KERNEL);
- if (!master->cs_gpios)
- return -ENOMEM;
+ if (mxc_platform_info->chipselect) {
+ master->cs_gpios = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios)
+ return -ENOMEM;
+
+ for (i = 0; i < master->num_chipselect; i++)
+ master->cs_gpios[i] = mxc_platform_info->chipselect[i];
+ }
+ } else {
+ u32 num_cs;
- for (i = 0; i < master->num_chipselect; i++)
- master->cs_gpios[i] = mxc_platform_info->chipselect[i];
- }
+ if (!of_property_read_u32(np, "num-cs", &num_cs))
+ master->num_chipselect = num_cs;
+ /* If not preset, default value of 1 is used */
+ }
spi_imx->bitbang.chipselect = spi_imx_chipselect;
spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
@@ -1380,6 +1550,7 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
+ spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS;
if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
@@ -1451,37 +1622,38 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
+
+ /* Request GPIO CS lines, if any */
+ if (!spi_imx->slave_mode && master->cs_gpios) {
+ for (i = 0; i < master->num_chipselect; i++) {
+ if (!gpio_is_valid(master->cs_gpios[i]))
+ continue;
+
+ ret = devm_gpio_request(&pdev->dev,
+ master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
+ master->cs_gpios[i]);
+ goto out_spi_bitbang;
+ }
+ }
+ }
+
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
goto out_clk_put;
}
- if (!master->cs_gpios) {
- dev_err(&pdev->dev, "No CS GPIOs available\n");
- ret = -EINVAL;
- goto out_clk_put;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- if (!gpio_is_valid(master->cs_gpios[i]))
- continue;
-
- ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
- master->cs_gpios[i]);
- goto out_clk_put;
- }
- }
-
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
clk_disable(spi_imx->clk_per);
return ret;
+out_spi_bitbang:
+ spi_bitbang_stop(&spi_imx->bitbang);
out_clk_put:
clk_disable_unprepare(spi_imx->clk_ipg);
out_put_per:
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 5b0e9a3e83f6..3d216b950b41 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -44,6 +44,7 @@
#include <linux/completion.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/stmp_device.h>
#include <linux/spi/spi.h>
@@ -442,6 +443,85 @@ static int mxs_spi_transfer_one(struct spi_master *master,
return status;
}
+static int mxs_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ clk_disable_unprepare(ssp->clk);
+
+ ret = pinctrl_pm_select_idle_state(dev);
+ if (ret) {
+ int ret2 = clk_prepare_enable(ssp->clk);
+
+ if (ret2)
+ dev_warn(dev, "Failed to reenable clock after failing pinctrl request (pinctrl: %d, clk: %d)\n",
+ ret, ret2);
+ }
+
+ return ret;
+}
+
+static int mxs_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(ssp->clk);
+ if (ret)
+ pinctrl_pm_select_idle_state(dev);
+
+ return ret;
+}
+
+static int __maybe_unused mxs_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ return mxs_spi_runtime_suspend(dev);
+ else
+ return 0;
+}
+
+static int __maybe_unused mxs_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pm_runtime_suspended(dev))
+ ret = mxs_spi_runtime_resume(dev);
+ else
+ ret = 0;
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(master);
+ if (ret < 0 && !pm_runtime_suspended(dev))
+ mxs_spi_runtime_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mxs_spi_pm = {
+ SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend,
+ mxs_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume)
+};
+
static const struct of_device_id mxs_spi_dt_ids[] = {
{ .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
{ .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
@@ -493,12 +573,15 @@ static int mxs_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ platform_set_drvdata(pdev, master);
+
master->transfer_one_message = mxs_spi_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->num_chipselect = 3;
master->dev.of_node = np;
master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->auto_runtime_pm = true;
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
@@ -521,28 +604,41 @@ static int mxs_spi_probe(struct platform_device *pdev)
goto out_master_free;
}
- ret = clk_prepare_enable(ssp->clk);
- if (ret)
- goto out_dma_release;
+ pm_runtime_enable(ssp->dev);
+ if (!pm_runtime_enabled(ssp->dev)) {
+ ret = mxs_spi_runtime_resume(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime resume failed\n");
+ goto out_dma_release;
+ }
+ }
+
+ ret = pm_runtime_get_sync(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime_get_sync failed\n");
+ goto out_pm_runtime_disable;
+ }
clk_set_rate(ssp->clk, clk_freq);
ret = stmp_reset_block(ssp->base);
if (ret)
- goto out_disable_clk;
-
- platform_set_drvdata(pdev, master);
+ goto out_pm_runtime_put;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
- goto out_disable_clk;
+ goto out_pm_runtime_put;
}
+ pm_runtime_put(ssp->dev);
+
return 0;
-out_disable_clk:
- clk_disable_unprepare(ssp->clk);
+out_pm_runtime_put:
+ pm_runtime_put(ssp->dev);
+out_pm_runtime_disable:
+ pm_runtime_disable(ssp->dev);
out_dma_release:
dma_release_channel(ssp->dmach);
out_master_free:
@@ -560,7 +656,10 @@ static int mxs_spi_remove(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
- clk_disable_unprepare(ssp->clk);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mxs_spi_runtime_suspend(&pdev->dev);
+
dma_release_channel(ssp->dmach);
return 0;
@@ -572,6 +671,7 @@ static struct platform_driver mxs_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = mxs_spi_dt_ids,
+ .pm = &mxs_spi_pm,
},
};
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 4b6dd73b80da..8974bb340b3a 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -671,7 +671,6 @@ static int orion_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"%pOF has no valid 'reg' property (%d)\n",
np, status);
- status = 0;
continue;
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2a10b3f94ff7..2ce875764ca6 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1221,7 +1221,6 @@ static int rspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct rspi_data *rspi;
int ret;
- const struct of_device_id *of_id;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
@@ -1229,9 +1228,8 @@ static int rspi_probe(struct platform_device *pdev)
if (master == NULL)
return -ENOMEM;
- of_id = of_match_device(rspi_of_match, &pdev->dev);
- if (of_id) {
- ops = of_id->data;
+ ops = of_device_get_match_data(&pdev->dev);
+ if (ops) {
ret = rspi_parse_dt(&pdev->dev, master);
if (ret)
goto error1;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index b392cca8fa4f..de7df20f8712 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -752,7 +752,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
- struct s3c64xx_spi_info *sci;
int err;
sdd = spi_master_get_devdata(spi->master);
@@ -788,8 +787,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
spi_set_ctldata(spi, cs);
}
- sci = sdd->cntrlr_info;
-
pm_runtime_get_sync(&sdd->pdev->dev);
/* Check if we can provide the requested rate */
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0eb1e9583485..fcd261f98b9f 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -900,7 +900,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
break;
copy32 = copy_bswap32;
} else if (bits <= 16) {
- if (l & 1)
+ if (l & 3)
break;
copy32 = copy_wswap32;
} else {
@@ -1021,6 +1021,8 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data },
@@ -1188,12 +1190,10 @@ free_tx_chan:
static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
{
struct spi_master *master = p->master;
- struct device *dev;
if (!master->dma_tx)
return;
- dev = &p->pdev->dev;
dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
PAGE_SIZE, DMA_FROM_DEVICE);
dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
@@ -1209,15 +1209,13 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
struct resource *r;
struct spi_master *master;
const struct sh_msiof_chipdata *chipdata;
- const struct of_device_id *of_id;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
int i;
int ret;
- of_id = of_match_device(sh_msiof_match, &pdev->dev);
- if (of_id) {
- chipdata = of_id->data;
+ chipdata = of_device_get_match_data(&pdev->dev);
+ if (chipdata) {
info = sh_msiof_spi_parse_dt(&pdev->dev);
} else {
chipdata = (const void *)pdev->id_entry->driver_data;
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
new file mode 100644
index 000000000000..5993bdbf79e4
--- /dev/null
+++ b/drivers/spi/spi-sprd-adi.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+/* Registers definitions for ADI controller */
+#define REG_ADI_CTRL0 0x4
+#define REG_ADI_CHN_PRIL 0x8
+#define REG_ADI_CHN_PRIH 0xc
+#define REG_ADI_INT_EN 0x10
+#define REG_ADI_INT_RAW 0x14
+#define REG_ADI_INT_MASK 0x18
+#define REG_ADI_INT_CLR 0x1c
+#define REG_ADI_GSSI_CFG0 0x20
+#define REG_ADI_GSSI_CFG1 0x24
+#define REG_ADI_RD_CMD 0x28
+#define REG_ADI_RD_DATA 0x2c
+#define REG_ADI_ARM_FIFO_STS 0x30
+#define REG_ADI_STS 0x34
+#define REG_ADI_EVT_FIFO_STS 0x38
+#define REG_ADI_ARM_CMD_STS 0x3c
+#define REG_ADI_CHN_EN 0x40
+#define REG_ADI_CHN_ADDR(id) (0x44 + (id - 2) * 4)
+#define REG_ADI_CHN_EN1 0x20c
+
+/* Bits definitions for register REG_ADI_GSSI_CFG0 */
+#define BIT_CLK_ALL_ON BIT(30)
+
+/* Bits definitions for register REG_ADI_RD_DATA */
+#define BIT_RD_CMD_BUSY BIT(31)
+#define RD_ADDR_SHIFT 16
+#define RD_VALUE_MASK GENMASK(15, 0)
+#define RD_ADDR_MASK GENMASK(30, 16)
+
+/* Bits definitions for register REG_ADI_ARM_FIFO_STS */
+#define BIT_FIFO_FULL BIT(11)
+#define BIT_FIFO_EMPTY BIT(10)
+
+/*
+ * ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
+ * The slave devices address offset is always 0x8000 and size is 4K.
+ */
+#define ADI_SLAVE_ADDR_SIZE SZ_4K
+#define ADI_SLAVE_OFFSET 0x8000
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define ADI_HWSPINLOCK_TIMEOUT 5000
+/*
+ * ADI controller has 50 channels including 2 software channels
+ * and 48 hardware channels.
+ */
+#define ADI_HW_CHNS 50
+
+#define ADI_FIFO_DRAIN_TIMEOUT 1000
+#define ADI_READ_TIMEOUT 2000
+#define REG_ADDR_LOW_MASK GENMASK(11, 0)
+
+struct sprd_adi {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ void __iomem *base;
+ struct hwspinlock *hwlock;
+ unsigned long slave_vbase;
+ unsigned long slave_pbase;
+};
+
+static int sprd_adi_check_paddr(struct sprd_adi *sadi, u32 paddr)
+{
+ if (paddr < sadi->slave_pbase || paddr >
+ (sadi->slave_pbase + ADI_SLAVE_ADDR_SIZE)) {
+ dev_err(sadi->dev,
+ "slave physical address is incorrect, addr = 0x%x\n",
+ paddr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long sprd_adi_to_vaddr(struct sprd_adi *sadi, u32 paddr)
+{
+ return (paddr - sadi->slave_pbase + sadi->slave_vbase);
+}
+
+static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ u32 sts;
+
+ do {
+ sts = readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS);
+ if (sts & BIT_FIFO_EMPTY)
+ break;
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "drain write fifo timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
+{
+ return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
+}
+
+static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
+{
+ int read_timeout = ADI_READ_TIMEOUT;
+ u32 val, rd_addr;
+
+ /*
+ * Set the physical register address need to read into RD_CMD register,
+ * then ADI controller will start to transfer automatically.
+ */
+ writel_relaxed(reg_paddr, sadi->base + REG_ADI_RD_CMD);
+
+ /*
+ * Wait read operation complete, the BIT_RD_CMD_BUSY will be set
+ * simultaneously when writing read command to register, and the
+ * BIT_RD_CMD_BUSY will be cleared after the read operation is
+ * completed.
+ */
+ do {
+ val = readl_relaxed(sadi->base + REG_ADI_RD_DATA);
+ if (!(val & BIT_RD_CMD_BUSY))
+ break;
+
+ cpu_relax();
+ } while (--read_timeout);
+
+ if (read_timeout == 0) {
+ dev_err(sadi->dev, "ADI read timeout\n");
+ return -EBUSY;
+ }
+
+ /*
+ * The return value includes data and read register address, from bit 0
+ * to bit 15 are data, and from bit 16 to bit 30 are read register
+ * address. Then we can check the returned register address to validate
+ * data.
+ */
+ rd_addr = (val & RD_ADDR_MASK ) >> RD_ADDR_SHIFT;
+
+ if (rd_addr != (reg_paddr & REG_ADDR_LOW_MASK)) {
+ dev_err(sadi->dev, "read error, reg addr = 0x%x, val = 0x%x\n",
+ reg_paddr, val);
+ return -EIO;
+ }
+
+ *read_val = val & RD_VALUE_MASK;
+ return 0;
+}
+
+static int sprd_adi_write(struct sprd_adi *sadi, unsigned long reg, u32 val)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ int ret;
+
+ ret = sprd_adi_drain_fifo(sadi);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * we should wait for write fifo is empty before writing data to PMIC
+ * registers.
+ */
+ do {
+ if (!sprd_adi_fifo_is_full(sadi)) {
+ writel_relaxed(val, (void __iomem *)reg);
+ break;
+ }
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "write fifo is full\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi_dev,
+ struct spi_transfer *t)
+{
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+ unsigned long flags, virt_reg;
+ u32 phy_reg, val;
+ int ret;
+
+ if (t->rx_buf) {
+ phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase;
+
+ ret = sprd_adi_check_paddr(sadi, phy_reg);
+ if (ret)
+ return ret;
+
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+
+ ret = sprd_adi_read(sadi, phy_reg, &val);
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (ret)
+ return ret;
+
+ *(u32 *)t->rx_buf = val;
+ } else if (t->tx_buf) {
+ u32 *p = (u32 *)t->tx_buf;
+
+ /*
+ * Get the physical register address need to write and convert
+ * the physical address to virtual address. Since we need
+ * virtual register address to write.
+ */
+ phy_reg = *p++ + sadi->slave_pbase;
+ ret = sprd_adi_check_paddr(sadi, phy_reg);
+ if (ret)
+ return ret;
+
+ virt_reg = sprd_adi_to_vaddr(sadi, phy_reg);
+ val = *p;
+
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+
+ ret = sprd_adi_write(sadi, virt_reg, val);
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (ret)
+ return ret;
+ } else {
+ dev_err(sadi->dev, "no buffer for transfer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sprd_adi_hw_init(struct sprd_adi *sadi)
+{
+ struct device_node *np = sadi->dev->of_node;
+ int i, size, chn_cnt;
+ const __be32 *list;
+ u32 tmp;
+
+ /* Address bits select default 12 bits */
+ writel_relaxed(0, sadi->base + REG_ADI_CTRL0);
+
+ /* Set all channels as default priority */
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL);
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH);
+
+ /* Set clock auto gate mode */
+ tmp = readl_relaxed(sadi->base + REG_ADI_GSSI_CFG0);
+ tmp &= ~BIT_CLK_ALL_ON;
+ writel_relaxed(tmp, sadi->base + REG_ADI_GSSI_CFG0);
+
+ /* Set hardware channels setting */
+ list = of_get_property(np, "sprd,hw-channels", &size);
+ if (!list || !size) {
+ dev_info(sadi->dev, "no hw channels setting in node\n");
+ return;
+ }
+
+ chn_cnt = size / 8;
+ for (i = 0; i < chn_cnt; i++) {
+ u32 value;
+ u32 chn_id = be32_to_cpu(*list++);
+ u32 chn_config = be32_to_cpu(*list++);
+
+ /* Channel 0 and 1 are software channels */
+ if (chn_id < 2)
+ continue;
+
+ writel_relaxed(chn_config, sadi->base +
+ REG_ADI_CHN_ADDR(chn_id));
+
+ if (chn_id < 32) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN);
+ value |= BIT(chn_id);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN);
+ } else if (chn_id < ADI_HW_CHNS) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN1);
+ value |= BIT(chn_id - 32);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN1);
+ }
+ }
+}
+
+static int sprd_adi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_controller *ctlr;
+ struct sprd_adi *sadi;
+ struct resource *res;
+ u32 num_chipselect;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "can not find the adi bus node\n");
+ return -ENODEV;
+ }
+
+ pdev->id = of_alias_get_id(np, "spi");
+ num_chipselect = of_get_child_count(np);
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+ sadi = spi_controller_get_devdata(ctlr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sadi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sadi->base)) {
+ ret = PTR_ERR(sadi->base);
+ goto put_ctlr;
+ }
+
+ sadi->slave_vbase = (unsigned long)sadi->base + ADI_SLAVE_OFFSET;
+ sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
+ sadi->ctlr = ctlr;
+ sadi->dev = &pdev->dev;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not get the hardware spinlock\n");
+ goto put_ctlr;
+ }
+
+ sadi->hwlock = hwspin_lock_request_specific(ret);
+ if (!sadi->hwlock) {
+ ret = -ENXIO;
+ goto put_ctlr;
+ }
+
+ sprd_adi_hw_init(sadi);
+
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = num_chipselect;
+ ctlr->flags = SPI_MASTER_HALF_DUPLEX;
+ ctlr->bits_per_word_mask = 0;
+ ctlr->transfer_one = sprd_adi_transfer_one;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register SPI controller\n");
+ goto free_hwlock;
+ }
+
+ return 0;
+
+free_hwlock:
+ hwspin_lock_free(sadi->hwlock);
+put_ctlr:
+ spi_controller_put(ctlr);
+ return ret;
+}
+
+static int sprd_adi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+
+ hwspin_lock_free(sadi->hwlock);
+ return 0;
+}
+
+static const struct of_device_id sprd_adi_of_match[] = {
+ {
+ .compatible = "sprd,sc9860-adi",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_adi_of_match);
+
+static struct platform_driver sprd_adi_driver = {
+ .driver = {
+ .name = "sprd-adi",
+ .of_match_table = sprd_adi_of_match,
+ },
+ .probe = sprd_adi_probe,
+ .remove = sprd_adi_remove,
+};
+module_platform_driver(sprd_adi_driver);
+
+MODULE_DESCRIPTION("Spreadtrum ADI Controller Driver");
+MODULE_AUTHOR("Baolin Wang <Baolin.Wang@spreadtrum.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 44550182a4a3..a76acedd7e2f 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -50,7 +50,7 @@
#define SPI_IDLE_SDA_PULL_LOW (2 << 18)
#define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
#define SPI_IDLE_SDA_MASK (3 << 18)
-#define SPI_CS_SS_VAL (1 << 20)
+#define SPI_CS_SW_VAL (1 << 20)
#define SPI_CS_SW_HW (1 << 21)
/* SPI_CS_POL_INACTIVE bits are default high */
/* n from 0 to 3 */
@@ -705,9 +705,9 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
command1 |= SPI_CS_SW_HW;
if (spi->mode & SPI_CS_HIGH)
- command1 |= SPI_CS_SS_VAL;
+ command1 |= SPI_CS_SW_VAL;
else
- command1 &= ~SPI_CS_SS_VAL;
+ command1 &= ~SPI_CS_SW_VAL;
tegra_spi_writel(tspi, 0, SPI_COMMAND2);
} else {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e8b5a5e21b2e..b33a727a0158 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2200,7 +2200,7 @@ static void devm_spi_unregister(struct device *dev, void *res)
* Context: can sleep
*
* Register a SPI device as with spi_register_controller() which will
- * automatically be unregister
+ * automatically be unregistered and freed.
*
* Return: zero on success, else a negative error code.
*/
@@ -2241,15 +2241,18 @@ static int __unregister(struct device *dev, void *null)
* only ones directly touching chip registers.
*
* This must be called from context that can sleep.
+ *
+ * Note that this function also drops a reference to the controller.
*/
void spi_unregister_controller(struct spi_controller *ctlr)
{
struct spi_controller *found;
+ int id = ctlr->bus_num;
int dummy;
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
- found = idr_find(&spi_master_idr, ctlr->bus_num);
+ found = idr_find(&spi_master_idr, id);
mutex_unlock(&board_lock);
if (found != ctlr) {
dev_dbg(&ctlr->dev,
@@ -2269,7 +2272,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
device_unregister(&ctlr->dev);
/* free bus id */
mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
+ idr_remove(&spi_master_idr, id);
mutex_unlock(&board_lock);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile
index 64a09681cee0..142d33df040f 100644
--- a/drivers/ssb/Makefile
+++ b/drivers/ssb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# core
ssb-y += main.o scan.o
ssb-$(CONFIG_SSB_EMBEDDED) += embedded.o
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index c2f5d3969c8b..ef9ac8efcab4 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_SSB_PRIVATE_H_
#define LINUX_SSB_PRIVATE_H_
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 8951c37d8d80..6e536020029a 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for staging directory
obj-y += media/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 5f14247392bf..687e0eac85bf 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -1,5 +1,4 @@
TODO:
- - checkpatch.pl cleanups
- sparse fixes
- rename files to be not so "generic"
- add proper arch dependencies as needed
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index eb7eeed6ae40..bb30bf8774a0 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o
obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o
diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
index d9f8b1424da1..c78989351f9c 100644
--- a/drivers/staging/android/ion/ion-ioctl.c
+++ b/drivers/staging/android/ion/ion-ioctl.c
@@ -27,19 +27,18 @@ union ion_ioctl_arg {
static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
{
- int ret = 0;
-
switch (cmd) {
case ION_IOC_HEAP_QUERY:
- ret = arg->query.reserved0 != 0;
- ret |= arg->query.reserved1 != 0;
- ret |= arg->query.reserved2 != 0;
+ if (arg->query.reserved0 ||
+ arg->query.reserved1 ||
+ arg->query.reserved2)
+ return -EINVAL;
break;
default:
break;
}
- return ret ? -EINVAL : 0;
+ return 0;
}
/* fix up the cases where the ioctl direction bits are incorrect */
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 93e2c90fa77d..a7d9b0e98572 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -81,7 +81,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
unsigned long flags)
{
struct ion_buffer *buffer;
- struct sg_table *table;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -109,7 +108,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
goto err1;
}
- table = buffer->sg_table;
buffer->dev = dev;
buffer->size = len;
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 621e5f7ceacb..f5f9cd63f8e9 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -86,6 +86,7 @@ struct ion_buffer {
struct sg_table *sg_table;
struct list_head attachments;
};
+
void ion_buffer_destroy(struct ion_buffer *buffer);
/**
@@ -299,7 +300,6 @@ size_t ion_heap_freelist_shrink(struct ion_heap *heap,
*/
size_t ion_heap_freelist_size(struct ion_heap *heap);
-
/**
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
diff --git a/drivers/staging/board/board.h b/drivers/staging/board/board.h
index 42ed12513220..5609daf4d869 100644
--- a/drivers/staging/board/board.h
+++ b/drivers/staging/board/board.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __BOARD_H__
#define __BOARD_H__
diff --git a/drivers/staging/board/kzm9d.c b/drivers/staging/board/kzm9d.c
index 05a6d434d307..d449a837414e 100644
--- a/drivers/staging/board/kzm9d.c
+++ b/drivers/staging/board/kzm9d.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Staging board support for KZM9D. Enable not-yet-DT-capable devices here. */
#include <linux/kernel.h>
diff --git a/drivers/staging/ccree/cc_hal.h b/drivers/staging/ccree/cc_hal.h
deleted file mode 100644
index eecc866dfc74..000000000000
--- a/drivers/staging/ccree/cc_hal.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* pseudo cc_hal.h for cc7x_perf_test_driver (to be able to include code from
- * CC drivers).
- */
-
-#ifndef __CC_HAL_H__
-#define __CC_HAL_H__
-
-#include <linux/io.h>
-
-#define READ_REGISTER(_addr) ioread32((_addr))
-#define WRITE_REGISTER(_addr, _data) iowrite32((_data), (_addr))
-
-#define CC_HAL_WRITE_REGISTER(offset, val) \
- WRITE_REGISTER(cc_base + (offset), val)
-#define CC_HAL_READ_REGISTER(offset) READ_REGISTER(cc_base + (offset))
-
-#endif
diff --git a/drivers/staging/ccree/cc_lli_defs.h b/drivers/staging/ccree/cc_lli_defs.h
index 851d3907167e..a9c417b07b04 100644
--- a/drivers/staging/ccree/cc_lli_defs.h
+++ b/drivers/staging/ccree/cc_lli_defs.h
@@ -59,7 +59,7 @@ static inline void cc_lli_set_addr(u32 *lli_p, dma_addr_t addr)
lli_p[LLI_WORD0_OFFSET] = (addr & U32_MAX);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
lli_p[LLI_WORD1_OFFSET] &= ~LLI_HADDR_MASK;
- lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 16));
+ lli_p[LLI_WORD1_OFFSET] |= FIELD_PREP(LLI_HADDR_MASK, (addr >> 32));
#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
}
diff --git a/drivers/staging/ccree/cc_regs.h b/drivers/staging/ccree/cc_regs.h
deleted file mode 100644
index 4a893a6ba6ef..000000000000
--- a/drivers/staging/ccree/cc_regs.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/*!
- * @file
- * @brief This file contains macro definitions for accessing ARM TrustZone
- * CryptoCell register space.
- */
-
-#ifndef _CC_REGS_H_
-#define _CC_REGS_H_
-
-#include <linux/bitfield.h>
-
-#define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP)
-#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
-
-#define AXIM_MON_BASE_OFFSET CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_COMP)
-#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
- DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
-
-/* Register Offset macro */
-#define CC_REG_OFFSET(unit_name, reg_name) \
- (DX_BASE_ ## unit_name + DX_ ## reg_name ## _REG_OFFSET)
-
-#endif /*_CC_REGS_H_*/
diff --git a/drivers/staging/ccree/dx_reg_base_host.h b/drivers/staging/ccree/dx_reg_base_host.h
deleted file mode 100644
index 47bbadbcd1df..000000000000
--- a/drivers/staging/ccree/dx_reg_base_host.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2012-2017 ARM Limited or its affiliates.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DX_REG_BASE_HOST_H__
-#define __DX_REG_BASE_HOST_H__
-
-#define DX_BASE_CC 0x80000000
-#define DX_BASE_HOST_RGF 0x0UL
-#define DX_BASE_CRY_KERNEL 0x0UL
-#define DX_BASE_ROM 0x40000000
-
-#endif /*__DX_REG_BASE_HOST_H__*/
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 5abe6b24ff8c..ba0954e4d2e5 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -92,18 +92,17 @@ static inline bool valid_assoclen(struct aead_request *req)
static void ssi_aead_exit(struct crypto_aead *tfm)
{
- struct device *dev = NULL;
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("Clearing context @%p for %s\n",
- crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
+ dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
+ crypto_tfm_alg_name(&tfm->base));
- dev = &ctx->drvdata->plat_dev->dev;
/* Unmap enckey buffer */
if (ctx->enckey) {
dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
- SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=%pad\n",
- ctx->enckey_dma_addr);
+ dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
+ &ctx->enckey_dma_addr);
ctx->enckey_dma_addr = 0;
ctx->enckey = NULL;
}
@@ -116,8 +115,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
xcbc->xcbc_keys,
xcbc->xcbc_keys_dma_addr);
}
- SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
- xcbc->xcbc_keys_dma_addr);
+ dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
+ &xcbc->xcbc_keys_dma_addr);
xcbc->xcbc_keys_dma_addr = 0;
xcbc->xcbc_keys = NULL;
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
@@ -127,8 +126,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
hmac->ipad_opad,
hmac->ipad_opad_dma_addr);
- SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
- hmac->ipad_opad_dma_addr);
+ dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
+ &hmac->ipad_opad_dma_addr);
hmac->ipad_opad_dma_addr = 0;
hmac->ipad_opad = NULL;
}
@@ -136,8 +135,8 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
hmac->padded_authkey,
hmac->padded_authkey_dma_addr);
- SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
- hmac->padded_authkey_dma_addr);
+ dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
+ &hmac->padded_authkey_dma_addr);
hmac->padded_authkey_dma_addr = 0;
hmac->padded_authkey = NULL;
}
@@ -146,29 +145,31 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
static int ssi_aead_init(struct crypto_aead *tfm)
{
- struct device *dev;
struct aead_alg *alg = crypto_aead_alg(tfm);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct ssi_crypto_alg *ssi_alg =
container_of(alg, struct ssi_crypto_alg, aead_alg);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&tfm->base));
+ struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
+ crypto_tfm_alg_name(&tfm->base));
/* Initialize modes in instance */
ctx->cipher_mode = ssi_alg->cipher_mode;
ctx->flow_mode = ssi_alg->flow_mode;
ctx->auth_mode = ssi_alg->auth_mode;
ctx->drvdata = ssi_alg->drvdata;
- dev = &ctx->drvdata->plat_dev->dev;
crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
/* Allocate key buffer, cache line aligned */
ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
&ctx->enckey_dma_addr, GFP_KERNEL);
if (!ctx->enckey) {
- SSI_LOG_ERR("Failed allocating key buffer\n");
+ dev_err(dev, "Failed allocating key buffer\n");
goto init_failed;
}
- SSI_LOG_DEBUG("Allocated enckey buffer in context ctx->enckey=@%p\n", ctx->enckey);
+ dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
+ ctx->enckey);
/* Set default authlen value */
@@ -182,7 +183,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
&xcbc->xcbc_keys_dma_addr,
GFP_KERNEL);
if (!xcbc->xcbc_keys) {
- SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
+ dev_err(dev, "Failed allocating buffer for XCBC keys\n");
goto init_failed;
}
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
@@ -196,12 +197,12 @@ static int ssi_aead_init(struct crypto_aead *tfm)
GFP_KERNEL);
if (!hmac->ipad_opad) {
- SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
+ dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
goto init_failed;
}
- SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
- hmac->ipad_opad);
+ dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
+ hmac->ipad_opad);
hmac->padded_authkey = dma_alloc_coherent(dev,
MAX_HMAC_BLOCK_SIZE,
@@ -209,7 +210,7 @@ static int ssi_aead_init(struct crypto_aead *tfm)
GFP_KERNEL);
if (!hmac->padded_authkey) {
- SSI_LOG_ERR("failed to allocate padded_authkey\n");
+ dev_err(dev, "failed to allocate padded_authkey\n");
goto init_failed;
}
} else {
@@ -240,8 +241,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
ctx->authsize) != 0) {
- SSI_LOG_DEBUG("Payload authentication failure, "
- "(auth-size=%d, cipher=%d).\n",
+ dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
ctx->authsize, ctx->cipher_mode);
/* In case of payload authentication failure, MUST NOT
* revealed the decrypted message --> zero its memory.
@@ -252,8 +252,11 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
} else { /*ENCRYPT*/
if (unlikely(areq_ctx->is_icv_fragmented))
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->mac_buf, areq_ctx->dst_sgl, areq->cryptlen + areq_ctx->dst_offset,
- areq->cryptlen + areq_ctx->dst_offset + ctx->authsize, SSI_SG_FROM_BUF);
+ dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
+ areq->cryptlen + areq_ctx->dst_offset,
+ (areq->cryptlen + areq_ctx->dst_offset +
+ ctx->authsize),
+ SSI_SG_FROM_BUF);
/* If an IV was generated, copy it back to the user provided buffer. */
if (areq_ctx->backup_giv) {
@@ -377,8 +380,10 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
{
- SSI_LOG_DEBUG("enc_keylen=%u authkeylen=%u\n",
- ctx->enc_keylen, ctx->auth_keylen);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n",
+ ctx->enc_keylen, ctx->auth_keylen);
switch (ctx->auth_mode) {
case DRV_HASH_SHA1:
@@ -395,22 +400,22 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
return -EINVAL;
break;
default:
- SSI_LOG_ERR("Invalid auth_mode=%d\n", ctx->auth_mode);
+ dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
return -EINVAL;
}
/* Check cipher key size */
if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
- SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
- ctx->enc_keylen);
+ dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
+ ctx->enc_keylen);
return -EINVAL;
}
} else { /* Default assumed to be AES ciphers */
if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
(ctx->enc_keylen != AES_KEYSIZE_192) &&
(ctx->enc_keylen != AES_KEYSIZE_256)) {
- SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
- ctx->enc_keylen);
+ dev_err(dev, "Invalid cipher(AES) key size: %u\n",
+ ctx->enc_keylen);
return -EINVAL;
}
}
@@ -426,7 +431,7 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
{
dma_addr_t key_dma_addr = 0;
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
ctx->drvdata, ctx->auth_mode);
struct ssi_crypto_req ssi_req = {};
@@ -455,8 +460,8 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
if (likely(keylen != 0)) {
key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
- SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
- " DMA failed\n", key, keylen);
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
return -ENOMEM;
}
if (keylen > blocksize) {
@@ -534,7 +539,7 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keyl
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (unlikely(rc != 0))
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
if (likely(key_dma_addr != 0))
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
@@ -551,10 +556,10 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
int seq_len = 0, rc = -EINVAL;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
- ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)),
- key, keylen);
+ dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
+ ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
/* STAT_PHASE_0: Init and sanity checks */
@@ -622,7 +627,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
break; /* No auth. key setup */
default:
- SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
rc = -ENOTSUPP;
goto badkey;
}
@@ -632,7 +637,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto setkey_error;
}
}
@@ -651,7 +656,6 @@ setkey_error:
static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- int rc = 0;
if (keylen < 3)
return -EINVAL;
@@ -659,9 +663,7 @@ static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 3;
memcpy(ctx->ctr_nonce, key + keylen, 3);
- rc = ssi_aead_setkey(tfm, key, keylen);
-
- return rc;
+ return ssi_aead_setkey(tfm, key, keylen);
}
#endif /*SSI_CC_HAS_AES_CCM*/
@@ -670,6 +672,7 @@ static int ssi_aead_setauthsize(
unsigned int authsize)
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
/* Unsupported auth. sizes */
if ((authsize == 0) ||
@@ -678,7 +681,7 @@ static int ssi_aead_setauthsize(
}
ctx->authsize = authsize;
- SSI_LOG_DEBUG("authlen=%d\n", ctx->authsize);
+ dev_dbg(dev, "authlen=%d\n", ctx->authsize);
return 0;
}
@@ -731,10 +734,11 @@ ssi_aead_create_assoc_desc(
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
switch (assoc_dma_type) {
case SSI_DMA_BUF_DLLI:
- SSI_LOG_DEBUG("ASSOC buffer type DLLI\n");
+ dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
areq->assoclen, NS_BIT); set_flow_mode(&desc[idx],
@@ -744,7 +748,7 @@ ssi_aead_create_assoc_desc(
set_din_not_last_indication(&desc[idx]);
break;
case SSI_DMA_BUF_MLLI:
- SSI_LOG_DEBUG("ASSOC buffer type MLLI\n");
+ dev_dbg(dev, "ASSOC buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
areq_ctx->assoc.mlli_nents, NS_BIT);
@@ -755,7 +759,7 @@ ssi_aead_create_assoc_desc(
break;
case SSI_DMA_BUF_NULL:
default:
- SSI_LOG_ERR("Invalid ASSOC buffer type\n");
+ dev_err(dev, "Invalid ASSOC buffer type\n");
}
*seq_size = (++idx);
@@ -772,6 +776,9 @@ ssi_aead_process_authenc_data_desc(
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
unsigned int idx = *seq_size;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
switch (data_dma_type) {
case SSI_DMA_BUF_DLLI:
@@ -783,7 +790,7 @@ ssi_aead_process_authenc_data_desc(
unsigned int offset =
(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
areq_ctx->dst_offset : areq_ctx->src_offset;
- SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
(sg_dma_address(cipher) + offset),
@@ -810,7 +817,7 @@ ssi_aead_process_authenc_data_desc(
}
}
- SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type MLLI\n");
+ dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
NS_BIT);
@@ -819,7 +826,7 @@ ssi_aead_process_authenc_data_desc(
}
case SSI_DMA_BUF_NULL:
default:
- SSI_LOG_ERR("AUTHENC: Invalid SRC/DST buffer type\n");
+ dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
}
*seq_size = (++idx);
@@ -835,13 +842,16 @@ ssi_aead_process_cipher_data_desc(
unsigned int idx = *seq_size;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
if (areq_ctx->cryptlen == 0)
return; /*null processing*/
switch (data_dma_type) {
case SSI_DMA_BUF_DLLI:
- SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
(sg_dma_address(areq_ctx->src_sgl) +
@@ -853,7 +863,7 @@ ssi_aead_process_cipher_data_desc(
set_flow_mode(&desc[idx], flow_mode);
break;
case SSI_DMA_BUF_MLLI:
- SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type MLLI\n");
+ dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
areq_ctx->src.mlli_nents, NS_BIT);
@@ -863,7 +873,7 @@ ssi_aead_process_cipher_data_desc(
break;
case SSI_DMA_BUF_NULL:
default:
- SSI_LOG_ERR("CIPHER: Invalid SRC/DST buffer type\n");
+ dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
}
*seq_size = (++idx);
@@ -1178,14 +1188,15 @@ static inline void ssi_aead_load_mlli_to_sram(
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
if (unlikely(
(req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
(req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
!req_ctx->is_single_pass)) {
- SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
- (unsigned int)ctx->drvdata->mlli_sram_addr,
- req_ctx->mlli_params.mlli_len);
+ dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
+ (unsigned int)ctx->drvdata->mlli_sram_addr,
+ req_ctx->mlli_params.mlli_len);
/* Copy MLLI table host-to-sram */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI,
@@ -1333,6 +1344,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int assoclen = req->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@@ -1371,7 +1383,7 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
areq_ctx->is_single_pass = false;
break;
default:
- SSI_LOG_ERR("Unexpected flow mode (%d)\n", ctx->flow_mode);
+ dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
goto data_size_err;
}
@@ -1554,6 +1566,7 @@ static int config_ccm_adata(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
//unsigned int size_of_a = 0, rem_a_size = 0;
unsigned int lp = req->iv[0];
@@ -1575,7 +1588,7 @@ static int config_ccm_adata(struct aead_request *req)
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
if (l < 2 || l > 8) {
- SSI_LOG_ERR("illegal iv value %X\n", req->iv[0]);
+ dev_err(dev, "illegal iv value %X\n", req->iv[0]);
return -EINVAL;
}
memcpy(b0, req->iv, AES_BLOCK_SIZE);
@@ -1588,8 +1601,10 @@ static int config_ccm_adata(struct aead_request *req)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
- if (rc != 0)
+ if (rc != 0) {
+ dev_err(dev, "message len overflow detected");
return rc;
+ }
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
@@ -1812,7 +1827,6 @@ static inline int ssi_aead_gcm(
unsigned int *seq_size)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
- unsigned int idx = *seq_size;
unsigned int cipher_flow_mode;
if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
@@ -1829,7 +1843,6 @@ static inline int ssi_aead_gcm(
ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
ssi_aead_process_gcm_result_desc(req, desc, seq_size);
- idx = *seq_size;
return 0;
}
@@ -1844,7 +1857,6 @@ static inline int ssi_aead_gcm(
ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
ssi_aead_process_gcm_result_desc(req, desc, seq_size);
- idx = *seq_size;
return 0;
}
@@ -1861,13 +1873,13 @@ static inline void ssi_aead_dump_gcm(
return;
if (title) {
- SSI_LOG_DEBUG("----------------------------------------------------------------------------------");
- SSI_LOG_DEBUG("%s\n", title);
+ dev_dbg(dev, "----------------------------------------------------------------------------------");
+ dev_dbg(dev, "%s\n", title);
}
- SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
- ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
- req->assoclen, req_ctx->cryptlen);
+ dev_dbg(dev, "cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
+ ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
+ req->assoclen, req_ctx->cryptlen);
if (ctx->enckey)
dump_byte_array("mac key", ctx->enckey, 16);
@@ -1897,6 +1909,7 @@ static int config_gcm_context(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
DRV_CRYPTO_DIRECTION_ENCRYPT) ?
@@ -1904,7 +1917,8 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- SSI_LOG_DEBUG("%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", __func__, cryptlen, req->assoclen, ctx->authsize);
+ dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1958,20 +1972,20 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ssi_crypto_req ssi_req = {};
- SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
- ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
- ctx, req, req->iv, sg_virt(req->src), req->src->offset,
- sg_virt(req->dst), req->dst->offset, req->cryptlen);
+ dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
+ ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
+ ctx, req, req->iv, sg_virt(req->src), req->src->offset,
+ sg_virt(req->dst), req->dst->offset, req->cryptlen);
/* STAT_PHASE_0: Init and sanity checks */
/* Check data length according to mode */
if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
- SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
+ req->cryptlen, req->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2017,7 +2031,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
if (ctx->cipher_mode == DRV_CIPHER_CCM) {
rc = config_ccm_adata(req);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("config_ccm_adata() returned with a failure %d!", rc);
+ dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
+ rc);
goto exit;
}
} else {
@@ -2031,7 +2046,8 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
rc = config_gcm_context(req);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("config_gcm_context() returned with a failure %d!", rc);
+ dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
+ rc);
goto exit;
}
}
@@ -2039,7 +2055,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("map_request() failed\n");
+ dev_err(dev, "map_request() failed\n");
goto exit;
}
@@ -2095,7 +2111,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
break;
#endif
default:
- SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
+ dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
ssi_buffer_mgr_unmap_aead_request(dev, req);
rc = -ENOTSUPP;
goto exit;
@@ -2106,7 +2122,7 @@ static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_aead_request(dev, req);
}
@@ -2139,10 +2155,13 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
/* Very similar to ssi_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2183,13 +2202,14 @@ static int ssi_aead_decrypt(struct aead_request *req)
#if SSI_CC_HAS_AES_CCM
static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_decrypt() above. */
-
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2214,9 +2234,9 @@ out:
static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- int rc = 0;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2224,17 +2244,15 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- rc = ssi_aead_setkey(tfm, key, keylen);
-
- return rc;
+ return ssi_aead_setkey(tfm, key, keylen);
}
static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- int rc = 0;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("%s() keylen %d, key %p\n", __func__, keylen, key);
+ dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2242,9 +2260,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- rc = ssi_aead_setkey(tfm, key, keylen);
-
- return rc;
+ return ssi_aead_setkey(tfm, key, keylen);
}
static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
@@ -2269,7 +2285,10 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- SSI_LOG_DEBUG("authsize %d\n", authsize);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
switch (authsize) {
case 8:
@@ -2286,7 +2305,10 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- SSI_LOG_DEBUG("authsize %d\n", authsize);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
+
+ dev_dbg(dev, "authsize %d\n", authsize);
if (authsize != 16)
return -EINVAL;
@@ -2298,11 +2320,14 @@ static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
{
/* Very similar to ssi_aead_encrypt() above. */
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2350,11 +2375,14 @@ static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
{
/* Very similar to ssi_aead_decrypt() above. */
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
if (!valid_assoclen(req)) {
- SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
+ dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
goto out;
}
@@ -2654,16 +2682,17 @@ static struct ssi_alg_template aead_algs[] = {
#endif /*SSI_CC_HAS_AES_GCM*/
};
-static struct ssi_crypto_alg *ssi_aead_create_alg(struct ssi_alg_template *template)
+static struct ssi_crypto_alg *ssi_aead_create_alg(
+ struct ssi_alg_template *template,
+ struct device *dev)
{
struct ssi_crypto_alg *t_alg;
struct aead_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- SSI_LOG_ERR("failed to allocate t_alg\n");
+ if (!t_alg)
return ERR_PTR(-ENOMEM);
- }
+
alg = &template->template_aead;
snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
@@ -2713,6 +2742,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
struct ssi_crypto_alg *t_alg;
int rc = -ENOMEM;
int alg;
+ struct device *dev = drvdata_to_dev(drvdata);
aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
if (!aead_handle) {
@@ -2720,36 +2750,36 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
goto fail0;
}
+ INIT_LIST_HEAD(&aead_handle->aead_list);
drvdata->aead_handle = aead_handle;
aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
drvdata, MAX_HMAC_DIGEST_SIZE);
if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
- SSI_LOG_ERR("SRAM pool exhausted\n");
+ dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto fail1;
}
- INIT_LIST_HEAD(&aead_handle->aead_list);
-
/* Linux crypto */
for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
- t_alg = ssi_aead_create_alg(&aead_algs[alg]);
+ t_alg = ssi_aead_create_alg(&aead_algs[alg], dev);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
- SSI_LOG_ERR("%s alg allocation failed\n",
- aead_algs[alg].driver_name);
+ dev_err(dev, "%s alg allocation failed\n",
+ aead_algs[alg].driver_name);
goto fail1;
}
t_alg->drvdata = drvdata;
rc = crypto_register_aead(&t_alg->aead_alg);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("%s alg registration failed\n",
- t_alg->aead_alg.base.cra_driver_name);
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->aead_alg.base.cra_driver_name);
goto fail2;
} else {
list_add_tail(&t_alg->entry, &aead_handle->aead_list);
- SSI_LOG_DEBUG("Registered %s\n", t_alg->aead_alg.base.cra_driver_name);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->aead_alg.base.cra_driver_name);
}
}
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index 63936091d524..1f8a225530a8 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -33,14 +33,10 @@
#include "ssi_hash.h"
#include "ssi_aead.h"
-#ifdef CC_DEBUG
#define GET_DMA_BUFFER_TYPE(buff_type) ( \
((buff_type) == SSI_DMA_BUF_NULL) ? "BUF_NULL" : \
((buff_type) == SSI_DMA_BUF_DLLI) ? "BUF_DLLI" : \
((buff_type) == SSI_DMA_BUF_MLLI) ? "BUF_MLLI" : "BUF_INVALID")
-#else
-#define GET_DMA_BUFFER_TYPE(buff_type)
-#endif
enum dma_buffer_type {
DMA_NULL_TYPE = -1,
@@ -76,16 +72,12 @@ struct buffer_array {
* @lbytes: [OUT] Returns the amount of bytes at the last entry
*/
static unsigned int ssi_buffer_mgr_get_sgl_nents(
- struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
+ struct device *dev, struct scatterlist *sg_list,
+ unsigned int nbytes, u32 *lbytes, bool *is_chained)
{
unsigned int nents = 0;
while (nbytes != 0) {
- if (sg_is_chain(sg_list)) {
- SSI_LOG_ERR("Unexpected chained entry "
- "in sg (entry =0x%X)\n", nents);
- BUG();
- }
if (sg_list->length != 0) {
nents++;
/* get the number of bytes in the last entry */
@@ -98,7 +90,7 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
*is_chained = true;
}
}
- SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
+ dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
}
@@ -134,20 +126,20 @@ void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
* @direct:
*/
void ssi_buffer_mgr_copy_scatterlist_portion(
- u8 *dest, struct scatterlist *sg,
- u32 to_skip, u32 end,
- enum ssi_sg_cpy_direct direct)
+ struct device *dev, u8 *dest,
+ struct scatterlist *sg, u32 to_skip,
+ u32 end, enum ssi_sg_cpy_direct direct)
{
u32 nents, lbytes;
- nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
+ nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, end, &lbytes, NULL);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == SSI_SG_TO_BUF));
}
static inline int ssi_buffer_mgr_render_buff_to_mlli(
- dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents,
- u32 **mlli_entry_pp)
+ struct device *dev, dma_addr_t buff_dma, u32 buff_size,
+ u32 *curr_nents, u32 **mlli_entry_pp)
{
u32 *mlli_entry_p = *mlli_entry_pp;
u32 new_nents;
@@ -161,9 +153,9 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
- SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
- mlli_entry_p[LLI_WORD0_OFFSET],
- mlli_entry_p[LLI_WORD1_OFFSET]);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
mlli_entry_p = mlli_entry_p + 2;
@@ -172,9 +164,9 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
/*Last entry */
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, buff_size);
- SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
- mlli_entry_p[LLI_WORD0_OFFSET],
- mlli_entry_p[LLI_WORD1_OFFSET]);
+ dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
+ *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
+ mlli_entry_p[LLI_WORD1_OFFSET]);
mlli_entry_p = mlli_entry_p + 2;
*mlli_entry_pp = mlli_entry_p;
(*curr_nents)++;
@@ -182,8 +174,9 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
}
static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
- struct scatterlist *sgl, u32 sgl_data_len, u32 sgl_offset,
- u32 *curr_nents, u32 **mlli_entry_pp)
+ struct device *dev, struct scatterlist *sgl,
+ u32 sgl_data_len, u32 sgl_offset, u32 *curr_nents,
+ u32 **mlli_entry_pp)
{
struct scatterlist *curr_sgl = sgl;
u32 *mlli_entry_p = *mlli_entry_pp;
@@ -197,8 +190,8 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
sgl_data_len;
sgl_data_len -= entry_data_len;
rc = ssi_buffer_mgr_render_buff_to_mlli(
- sg_dma_address(curr_sgl) + sgl_offset, entry_data_len,
- curr_nents, &mlli_entry_p);
+ dev, sg_dma_address(curr_sgl) + sgl_offset,
+ entry_data_len, curr_nents, &mlli_entry_p);
if (rc != 0)
return rc;
@@ -217,14 +210,14 @@ static int ssi_buffer_mgr_generate_mlli(
u32 total_nents = 0, prev_total_nents = 0;
int rc = 0, i;
- SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
+ dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
/* Allocate memory from the pointed pool */
mlli_params->mlli_virt_addr = dma_pool_alloc(
mlli_params->curr_pool, GFP_KERNEL,
&mlli_params->mlli_dma_addr);
if (unlikely(!mlli_params->mlli_virt_addr)) {
- SSI_LOG_ERR("dma_pool_alloc() failed\n");
+ dev_err(dev, "dma_pool_alloc() failed\n");
rc = -ENOMEM;
goto build_mlli_exit;
}
@@ -234,12 +227,12 @@ static int ssi_buffer_mgr_generate_mlli(
for (i = 0; i < sg_data->num_of_buffers; i++) {
if (sg_data->type[i] == DMA_SGL_TYPE)
rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
- sg_data->entry[i].sgl,
- sg_data->total_data_len[i], sg_data->offset[i], &total_nents,
- &mlli_p);
+ dev, sg_data->entry[i].sgl,
+ sg_data->total_data_len[i], sg_data->offset[i],
+ &total_nents, &mlli_p);
else /*DMA_BUFF_TYPE*/
rc = ssi_buffer_mgr_render_buff_to_mlli(
- sg_data->entry[i].buffer_dma,
+ dev, sg_data->entry[i].buffer_dma,
sg_data->total_data_len[i], &total_nents,
&mlli_p);
if (rc != 0)
@@ -259,26 +252,23 @@ static int ssi_buffer_mgr_generate_mlli(
/* Set MLLI size for the bypass operation */
mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
- SSI_LOG_DEBUG("MLLI params: "
- "virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
- mlli_params->mlli_virt_addr,
- mlli_params->mlli_dma_addr,
- mlli_params->mlli_len);
+ dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
+ mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
+ mlli_params->mlli_len);
build_mlli_exit:
return rc;
}
static inline void ssi_buffer_mgr_add_buffer_entry(
- struct buffer_array *sgl_data,
+ struct device *dev, struct buffer_array *sgl_data,
dma_addr_t buffer_dma, unsigned int buffer_len,
bool is_last_entry, u32 *mlli_nents)
{
unsigned int index = sgl_data->num_of_buffers;
- SSI_LOG_DEBUG("index=%u single_buff=%pad "
- "buffer_len=0x%08X is_last=%d\n",
- index, buffer_dma, buffer_len, is_last_entry);
+ dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
+ index, &buffer_dma, buffer_len, is_last_entry);
sgl_data->nents[index] = 1;
sgl_data->entry[index].buffer_dma = buffer_dma;
sgl_data->offset[index] = 0;
@@ -292,6 +282,7 @@ static inline void ssi_buffer_mgr_add_buffer_entry(
}
static inline void ssi_buffer_mgr_add_scatterlist_entry(
+ struct device *dev,
struct buffer_array *sgl_data,
unsigned int nents,
struct scatterlist *sgl,
@@ -302,8 +293,8 @@ static inline void ssi_buffer_mgr_add_scatterlist_entry(
{
unsigned int index = sgl_data->num_of_buffers;
- SSI_LOG_DEBUG("index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
- index, nents, sgl, data_len, is_last_table);
+ dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
+ index, nents, sgl, data_len, is_last_table);
sgl_data->nents[index] = nents;
sgl_data->entry[index].sgl = sgl;
sgl_data->offset[index] = data_offset;
@@ -327,7 +318,7 @@ ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
if (!l_sg)
break;
if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
- SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
+ dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err;
}
l_sg = sg_next(l_sg);
@@ -356,26 +347,22 @@ static int ssi_buffer_mgr_map_scatterlist(
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
- SSI_LOG_ERR("dma_map_sg() single buffer failed\n");
+ dev_err(dev, "dma_map_sg() single buffer failed\n");
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped sg: dma_address=%pad "
- "page=%p addr=%pK offset=%u "
- "length=%u\n",
- sg_dma_address(sg),
- sg_page(sg),
- sg_virt(sg),
- sg->offset, sg->length);
+ dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(sg), sg_page(sg), sg_virt(sg),
+ sg->offset, sg->length);
*lbytes = nbytes;
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = ssi_buffer_mgr_get_sgl_nents(sg, nbytes, lbytes,
+ *nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, nbytes, lbytes,
&is_chained);
if (*nents > max_sg_nents) {
*nents = 0;
- SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- *nents, max_sg_nents);
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ *nents, max_sg_nents);
return -ENOMEM;
}
if (!is_chained) {
@@ -385,7 +372,7 @@ static int ssi_buffer_mgr_map_scatterlist(
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
if (unlikely(*mapped_nents == 0)) {
*nents = 0;
- SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
} else {
@@ -398,7 +385,7 @@ static int ssi_buffer_mgr_map_scatterlist(
direction);
if (unlikely(*mapped_nents != *nents)) {
*nents = *mapped_nents;
- SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
}
@@ -414,26 +401,22 @@ ssi_aead_handle_config_buf(struct device *dev,
struct buffer_array *sg_data,
unsigned int assoclen)
{
- SSI_LOG_DEBUG(" handle additional data config set to DLLI\n");
+ dev_dbg(dev, " handle additional data config set to DLLI\n");
/* create sg for the current buffer */
sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
DMA_TO_DEVICE) != 1)) {
- SSI_LOG_ERR("dma_map_sg() "
- "config buffer failed\n");
- return -ENOMEM;
+ dev_err(dev, "dma_map_sg() config buffer failed\n");
+ return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
- "page=%p addr=%pK "
- "offset=%u length=%u\n",
- sg_dma_address(&areq_ctx->ccm_adata_sg),
- sg_page(&areq_ctx->ccm_adata_sg),
- sg_virt(&areq_ctx->ccm_adata_sg),
- areq_ctx->ccm_adata_sg.offset,
- areq_ctx->ccm_adata_sg.length);
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(&areq_ctx->ccm_adata_sg),
+ sg_page(&areq_ctx->ccm_adata_sg),
+ sg_virt(&areq_ctx->ccm_adata_sg),
+ areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
/* prepare for case of MLLI */
if (assoclen > 0) {
- ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1,
&areq_ctx->ccm_adata_sg,
(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
0, false, NULL);
@@ -447,28 +430,23 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
u32 curr_buff_cnt,
struct buffer_array *sg_data)
{
- SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt);
+ dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
DMA_TO_DEVICE) != 1)) {
- SSI_LOG_ERR("dma_map_sg() "
- "src buffer failed\n");
- return -ENOMEM;
+ dev_err(dev, "dma_map_sg() src buffer failed\n");
+ return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped curr_buff: dma_address=%pad "
- "page=%p addr=%pK "
- "offset=%u length=%u\n",
- sg_dma_address(areq_ctx->buff_sg),
- sg_page(areq_ctx->buff_sg),
- sg_virt(areq_ctx->buff_sg),
- areq_ctx->buff_sg->offset,
- areq_ctx->buff_sg->length);
+ dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
+ &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
+ sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
+ areq_ctx->buff_sg->length);
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
areq_ctx->curr_sg = areq_ctx->buff_sg;
areq_ctx->in_nents = 0;
/* prepare for case of MLLI */
- ssi_buffer_mgr_add_scatterlist_entry(sg_data, 1, areq_ctx->buff_sg,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1, areq_ctx->buff_sg,
curr_buff_cnt, 0, false, NULL);
return 0;
}
@@ -483,9 +461,8 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
if (likely(req_ctx->gen_ctx.iv_dma_addr != 0)) {
- SSI_LOG_DEBUG("Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
- req_ctx->gen_ctx.iv_dma_addr,
- ivsize);
+ dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+ &req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
@@ -499,11 +476,11 @@ void ssi_buffer_mgr_unmap_blkcipher_request(
}
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped req->src=%pK\n", sg_virt(src));
+ dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
if (src != dst) {
dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped req->dst=%pK\n", sg_virt(dst));
+ dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
}
}
@@ -519,7 +496,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
struct mlli_params *mlli_params = &req_ctx->mlli_params;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
struct buffer_array sg_data;
u32 dummy = 0;
int rc = 0;
@@ -539,13 +516,12 @@ int ssi_buffer_mgr_map_blkcipher_request(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev,
req_ctx->gen_ctx.iv_dma_addr))) {
- SSI_LOG_ERR("Mapping iv %u B at va=%pK "
- "for DMA failed\n", ivsize, info);
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ ivsize, info);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
- ivsize, info,
- req_ctx->gen_ctx.iv_dma_addr);
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
} else {
req_ctx->gen_ctx.iv_dma_addr = 0;
}
@@ -567,7 +543,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
/* Handle inplace operation */
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
req_ctx->out_nents = 0;
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
req_ctx->in_nents,
src, nbytes, 0,
true,
@@ -587,12 +563,12 @@ int ssi_buffer_mgr_map_blkcipher_request(
req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
req_ctx->in_nents,
src, nbytes, 0,
true,
&req_ctx->in_mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
req_ctx->out_nents,
dst, nbytes, 0,
true,
@@ -607,8 +583,8 @@ int ssi_buffer_mgr_map_blkcipher_request(
goto ablkcipher_exit;
}
- SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
+ dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
+ GET_DMA_BUFFER_TYPE(req_ctx->dma_buf_type));
return 0;
@@ -674,30 +650,34 @@ void ssi_buffer_mgr_unmap_aead_request(
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
- areq_ctx->mlli_params.mlli_dma_addr,
- areq_ctx->mlli_params.mlli_virt_addr);
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
}
- SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
+ dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
+ sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
+ req->assoclen, req->cryptlen);
size_to_unmap = req->assoclen + req->cryptlen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_unmap += areq_ctx->req_authsize;
if (areq_ctx->is_gcm4543)
size_to_unmap += crypto_aead_ivsize(tfm);
- dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src,
+ ssi_buffer_mgr_get_sgl_nents(dev, req->src, size_to_unmap,
+ &dummy, &chained),
+ DMA_BIDIRECTIONAL);
if (unlikely(req->src != req->dst)) {
- SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
- sg_virt(req->dst));
+ dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
+ sg_virt(req->dst));
dma_unmap_sg(dev, req->dst,
- ssi_buffer_mgr_get_sgl_nents(req->dst,
+ ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
size_to_unmap,
- &dummy,
- &chained),
+ &dummy, &chained),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -712,13 +692,14 @@ void ssi_buffer_mgr_unmap_aead_request(
* data memory overriding that caused by cache coherence problem.
*/
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->backup_mac, req->src,
+ dev, areq_ctx->backup_mac, req->src,
size_to_skip + req->cryptlen - areq_ctx->req_authsize,
size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
}
}
static inline int ssi_buffer_mgr_get_aead_icv_nents(
+ struct device *dev,
struct scatterlist *sgl,
unsigned int sgl_nents,
unsigned int authsize,
@@ -757,12 +738,12 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
nents = 2;
*is_icv_fragmented = true;
} else {
- SSI_LOG_ERR("Unsupported num. of ICV fragments (> %d)\n",
- MAX_ICV_NENTS_SUPPORTED);
+ dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
+ MAX_ICV_NENTS_SUPPORTED);
nents = -1; /*unsupported*/
}
- SSI_LOG_DEBUG("is_frag=%s icv_nents=%u\n",
- (*is_icv_fragmented ? "true" : "false"), nents);
+ dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
+ (*is_icv_fragmented ? "true" : "false"), nents);
return nents;
}
@@ -775,7 +756,7 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
if (unlikely(!req->iv)) {
@@ -786,22 +767,22 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, hw_iv_size,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
- SSI_LOG_ERR("Mapping iv %u B at va=%pK for DMA failed\n",
- hw_iv_size, req->iv);
+ dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+ hw_iv_size, req->iv);
rc = -ENOMEM;
goto chain_iv_exit;
}
- SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=%pad\n",
- hw_iv_size, req->iv,
- areq_ctx->gen_ctx.iv_dma_addr);
+ dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+ hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
if (do_chain && areq_ctx->plaintext_authenticate_only) { // TODO: what about CTR?? ask Ron
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
/* Chain to given list */
ssi_buffer_mgr_add_buffer_entry(
- sg_data, areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
+ dev, sg_data,
+ areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
iv_size_to_authenc, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
@@ -824,6 +805,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int sg_index = 0;
u32 size_of_assoc = req->assoclen;
+ struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
size_of_assoc += crypto_aead_ivsize(tfm);
@@ -837,9 +819,9 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
- SSI_LOG_DEBUG("Chain assoc of length 0: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
- areq_ctx->assoc.nents);
+ dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
goto chain_assoc_exit;
}
@@ -853,16 +835,16 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
current_sg = sg_next(current_sg);
//if have reached the end of the sgl, then this is unexpected
if (!current_sg) {
- SSI_LOG_ERR("reached end of sg list. unexpected\n");
- BUG();
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
}
sg_index += current_sg->length;
mapped_nents++;
}
}
if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
- SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->assoc.nents = mapped_nents;
@@ -873,9 +855,9 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (unlikely((mapped_nents + 1) >
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
- SSI_LOG_ERR("CCM case.Too many fragments. Current %d max %d\n",
- (areq_ctx->assoc.nents + 1),
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
+ dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
+ (areq_ctx->assoc.nents + 1),
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
rc = -ENOMEM;
goto chain_assoc_exit;
}
@@ -889,11 +871,11 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
if (unlikely((do_chain) ||
(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
- SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
- areq_ctx->assoc.nents);
+ dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
+ GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
+ areq_ctx->assoc.nents);
ssi_buffer_mgr_add_scatterlist_entry(
- sg_data, areq_ctx->assoc.nents,
+ dev, sg_data, areq_ctx->assoc.nents,
req->src, req->assoclen, 0, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
@@ -951,10 +933,11 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
unsigned int authsize = areq_ctx->req_authsize;
int rc = 0, icv_nents;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct device *dev = drvdata_to_dev(drvdata);
if (likely(req->src == req->dst)) {
/*INPLACE*/
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->src.nents,
areq_ctx->src_sgl,
areq_ctx->cryptlen,
@@ -962,7 +945,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
+ areq_ctx->src_sgl,
areq_ctx->src.nents,
authsize,
*src_last_bytes,
@@ -990,7 +974,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
skip += crypto_aead_ivsize(tfm);
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->backup_mac, req->src,
+ dev, areq_ctx->backup_mac,
+ req->src,
(skip + req->cryptlen -
areq_ctx->req_authsize),
skip + req->cryptlen,
@@ -1013,14 +998,14 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->src.nents,
areq_ctx->src_sgl,
areq_ctx->cryptlen,
areq_ctx->src_offset,
is_last_table,
&areq_ctx->src.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->dst.nents,
areq_ctx->dst_sgl,
areq_ctx->cryptlen,
@@ -1028,7 +1013,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
is_last_table,
&areq_ctx->dst.mlli_nents);
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->src_sgl,
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
+ areq_ctx->src_sgl,
areq_ctx->src.nents,
authsize,
*src_last_bytes,
@@ -1043,15 +1029,15 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
* verification is made by CPU compare in order to simplify
* MAC verification upon request completion
*/
- u32 size_to_skip = req->assoclen;
+ u32 size_to_skip = req->assoclen;
- if (areq_ctx->is_gcm4543)
- size_to_skip += crypto_aead_ivsize(tfm);
+ if (areq_ctx->is_gcm4543)
+ size_to_skip += crypto_aead_ivsize(tfm);
- ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
+ ssi_buffer_mgr_copy_scatterlist_portion(
+ dev, areq_ctx->backup_mac, req->src,
+ size_to_skip + req->cryptlen - areq_ctx->req_authsize,
+ size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
} else { /* Contig. ICV */
/*Should hanlde if the sg is not contig.*/
@@ -1065,14 +1051,14 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
} else {
/*NON-INPLACE and ENCRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->dst.nents,
areq_ctx->dst_sgl,
areq_ctx->cryptlen,
areq_ctx->dst_offset,
is_last_table,
&areq_ctx->dst.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
areq_ctx->src.nents,
areq_ctx->src_sgl,
areq_ctx->cryptlen,
@@ -1080,7 +1066,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dst_sgl,
+ icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
+ areq_ctx->dst_sgl,
areq_ctx->dst.nents,
authsize,
*dst_last_bytes,
@@ -1115,7 +1102,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
bool is_last_table, bool do_chain)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
int src_last_bytes = 0, dst_last_bytes = 0;
@@ -1134,10 +1121,9 @@ static inline int ssi_buffer_mgr_aead_chain_data(
offset = size_to_skip;
- if (!sg_data) {
- rc = -EINVAL;
- goto chain_data_exit;
- }
+ if (!sg_data)
+ return -EINVAL;
+
areq_ctx->src_sgl = req->src;
areq_ctx->dst_sgl = req->dst;
@@ -1145,7 +1131,10 @@ static inline int ssi_buffer_mgr_aead_chain_data(
size_for_map += crypto_aead_ivsize(tfm);
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
- src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
+ src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->src,
+ size_for_map,
+ &src_last_bytes,
+ &chained);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
@@ -1153,15 +1142,15 @@ static inline int ssi_buffer_mgr_aead_chain_data(
areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
//if have reached the end of the sgl, then this is unexpected
if (!areq_ctx->src_sgl) {
- SSI_LOG_ERR("reached end of sg list. unexpected\n");
- BUG();
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
}
sg_index += areq_ctx->src_sgl->length;
src_mapped_nents--;
}
if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
- SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
@@ -1187,7 +1176,10 @@ static inline int ssi_buffer_mgr_aead_chain_data(
}
}
- dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
+ dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
+ size_for_map,
+ &dst_last_bytes,
+ &chained);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
@@ -1197,15 +1189,15 @@ static inline int ssi_buffer_mgr_aead_chain_data(
areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
//if have reached the end of the sgl, then this is unexpected
if (!areq_ctx->dst_sgl) {
- SSI_LOG_ERR("reached end of sg list. unexpected\n");
- BUG();
+ dev_err(dev, "reached end of sg list. unexpected\n");
+ return -EINVAL;
}
sg_index += areq_ctx->dst_sgl->length;
dst_mapped_nents--;
}
if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
- SSI_LOG_ERR("Too many fragments. current %d max %d\n",
- dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
+ dev_err(dev, "Too many fragments. current %d max %d\n",
+ dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
}
areq_ctx->dst.nents = dst_mapped_nents;
@@ -1285,7 +1277,7 @@ int ssi_buffer_mgr_map_aead_request(
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
struct buffer_array sg_data;
unsigned int authsize = areq_ctx->req_authsize;
struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
@@ -1312,7 +1304,7 @@ int ssi_buffer_mgr_map_aead_request(
* data memory overriding that caused by cache coherence problem.
*/
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->backup_mac, req->src,
+ dev, areq_ctx->backup_mac, req->src,
size_to_skip + req->cryptlen - areq_ctx->req_authsize,
size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
}
@@ -1327,8 +1319,8 @@ int ssi_buffer_mgr_map_aead_request(
MAX_MAC_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->mac_buf_dma_addr))) {
- SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK for DMA failed\n",
- MAX_MAC_SIZE, areq_ctx->mac_buf);
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1340,9 +1332,10 @@ int ssi_buffer_mgr_map_aead_request(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->ccm_iv0_dma_addr))) {
- SSI_LOG_ERR("Mapping mac_buf %u B at va=%pK "
- "for DMA failed\n", AES_BLOCK_SIZE,
- (areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET));
+ dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE,
+ (areq_ctx->ccm_config +
+ CCM_CTR_COUNT_0_OFFSET));
areq_ctx->ccm_iv0_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
@@ -1362,8 +1355,8 @@ int ssi_buffer_mgr_map_aead_request(
AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, areq_ctx->hkey_dma_addr))) {
- SSI_LOG_ERR("Mapping hkey %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE, areq_ctx->hkey);
+ dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1373,8 +1366,8 @@ int ssi_buffer_mgr_map_aead_request(
AES_BLOCK_SIZE,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_block_len_dma_addr))) {
- SSI_LOG_ERR("Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
- AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
+ dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
goto aead_map_failure;
}
@@ -1385,9 +1378,8 @@ int ssi_buffer_mgr_map_aead_request(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc1_dma_addr))) {
- SSI_LOG_ERR("Mapping gcm_iv_inc1 %u B at va=%pK "
- "for DMA failed\n", AES_BLOCK_SIZE,
- (areq_ctx->gcm_iv_inc1));
+ dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
@@ -1399,9 +1391,8 @@ int ssi_buffer_mgr_map_aead_request(
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, areq_ctx->gcm_iv_inc2_dma_addr))) {
- SSI_LOG_ERR("Mapping gcm_iv_inc2 %u B at va=%pK "
- "for DMA failed\n", AES_BLOCK_SIZE,
- (areq_ctx->gcm_iv_inc2));
+ dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
+ AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0;
rc = -ENOMEM;
goto aead_map_failure;
@@ -1481,9 +1472,10 @@ int ssi_buffer_mgr_map_aead_request(
goto aead_map_failure;
ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
- SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
- SSI_LOG_DEBUG("src params mn %d\n", areq_ctx->src.mlli_nents);
- SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx->dst.mlli_nents);
+ dev_dbg(dev, "assoc params mn %d\n",
+ areq_ctx->assoc.mlli_nents);
+ dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
+ dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
}
return 0;
@@ -1496,7 +1488,7 @@ int ssi_buffer_mgr_map_hash_request_final(
struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
areq_ctx->buff0;
u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
@@ -1507,11 +1499,8 @@ int ssi_buffer_mgr_map_hash_request_final(
u32 dummy = 0;
u32 mapped_nents = 0;
- SSI_LOG_DEBUG(" final params : curr_buff=%pK "
- "curr_buff_cnt=0x%X nbytes = 0x%X "
- "src=%pK curr_index=%u\n",
- curr_buff, *curr_buff_cnt, nbytes,
- src, areq_ctx->buff_index);
+ dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
@@ -1557,7 +1546,7 @@ int ssi_buffer_mgr_map_hash_request_final(
if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
areq_ctx->in_nents,
src, nbytes, 0, true,
&areq_ctx->mlli_nents);
@@ -1568,8 +1557,8 @@ int ssi_buffer_mgr_map_hash_request_final(
}
/* change the buffer index for the unmap function */
areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
- SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
- GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
+ dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
+ GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
return 0;
fail_unmap_din:
@@ -1586,7 +1575,7 @@ int ssi_buffer_mgr_map_hash_request_update(
struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
u8 *curr_buff = areq_ctx->buff_index ? areq_ctx->buff1 :
areq_ctx->buff0;
u32 *curr_buff_cnt = areq_ctx->buff_index ? &areq_ctx->buff1_cnt :
@@ -1604,11 +1593,8 @@ int ssi_buffer_mgr_map_hash_request_update(
u32 dummy = 0;
u32 mapped_nents = 0;
- SSI_LOG_DEBUG(" update params : curr_buff=%pK "
- "curr_buff_cnt=0x%X nbytes=0x%X "
- "src=%pK curr_index=%u\n",
- curr_buff, *curr_buff_cnt, nbytes,
- src, areq_ctx->buff_index);
+ dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
+ curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_NULL;
mlli_params->curr_pool = NULL;
@@ -1617,14 +1603,11 @@ int ssi_buffer_mgr_map_hash_request_update(
areq_ctx->in_nents = 0;
if (unlikely(total_in_len < block_size)) {
- SSI_LOG_DEBUG(" less than one block: curr_buff=%pK "
- "*curr_buff_cnt=0x%X copy_to=%pK\n",
- curr_buff, *curr_buff_cnt,
- &curr_buff[*curr_buff_cnt]);
+ dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
+ curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
- ssi_buffer_mgr_get_sgl_nents(src,
- nbytes,
- &dummy, NULL);
+ ssi_buffer_mgr_get_sgl_nents(dev, src, nbytes, &dummy,
+ NULL);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
@@ -1636,17 +1619,15 @@ int ssi_buffer_mgr_map_hash_request_update(
/* update data len */
update_data_len = total_in_len - *next_buff_cnt;
- SSI_LOG_DEBUG(" temp length : *next_buff_cnt=0x%X "
- "update_data_len=0x%X\n",
+ dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
*next_buff_cnt, update_data_len);
/* Copy the new residue to next buffer */
if (*next_buff_cnt != 0) {
- SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
- " residue %u\n", next_buff,
- (update_data_len - *curr_buff_cnt),
- *next_buff_cnt);
- ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
+ dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
+ next_buff, (update_data_len - *curr_buff_cnt),
+ *next_buff_cnt);
+ ssi_buffer_mgr_copy_scatterlist_portion(dev, next_buff, src,
(update_data_len - *curr_buff_cnt),
nbytes, SSI_SG_TO_BUF);
/* change the buffer index for next operation */
@@ -1688,7 +1669,7 @@ int ssi_buffer_mgr_map_hash_request_update(
if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(&sg_data,
+ ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
areq_ctx->in_nents,
src,
(update_data_len - *curr_buff_cnt),
@@ -1725,29 +1706,26 @@ void ssi_buffer_mgr_unmap_hash_request(
*allocated and should be released
*/
if (areq_ctx->mlli_params.curr_pool) {
- SSI_LOG_DEBUG("free MLLI buffer: dma=%pad virt=%pK\n",
- areq_ctx->mlli_params.mlli_dma_addr,
- areq_ctx->mlli_params.mlli_virt_addr);
+ dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
+ &areq_ctx->mlli_params.mlli_dma_addr,
+ areq_ctx->mlli_params.mlli_virt_addr);
dma_pool_free(areq_ctx->mlli_params.curr_pool,
areq_ctx->mlli_params.mlli_virt_addr,
areq_ctx->mlli_params.mlli_dma_addr);
}
if ((src) && likely(areq_ctx->in_nents != 0)) {
- SSI_LOG_DEBUG("Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
- sg_virt(src),
- sg_dma_address(src),
- sg_dma_len(src));
+ dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
+ sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
areq_ctx->in_nents, DMA_TO_DEVICE);
}
if (*prev_len != 0) {
- SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
- " dma=%pad len 0x%X\n",
- sg_virt(areq_ctx->buff_sg),
- sg_dma_address(areq_ctx->buff_sg),
- sg_dma_len(areq_ctx->buff_sg));
+ dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
+ sg_virt(areq_ctx->buff_sg),
+ &sg_dma_address(areq_ctx->buff_sg),
+ sg_dma_len(areq_ctx->buff_sg));
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
if (!do_revert) {
/* clean the previous data length for update operation */
@@ -1761,7 +1739,7 @@ void ssi_buffer_mgr_unmap_hash_request(
int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle;
- struct device *dev = &drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(drvdata);
buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL);
if (!buff_mgr_handle)
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.h b/drivers/staging/ccree/ssi_buffer_mgr.h
index 41f5223730f8..1032f25edcab 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.h
+++ b/drivers/staging/ccree/ssi_buffer_mgr.h
@@ -80,7 +80,10 @@ int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ct
void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert);
-void ssi_buffer_mgr_copy_scatterlist_portion(u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct);
+void ssi_buffer_mgr_copy_scatterlist_portion(struct device *dev, u8 *dest,
+ struct scatterlist *sg,
+ u32 to_skip, u32 end,
+ enum ssi_sg_cpy_direct direct);
void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len);
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index 8d31a93fd8b7..ee85cbf7c9ae 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -181,45 +181,42 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
struct crypto_alg *alg = tfm->__crt_alg;
struct ssi_crypto_alg *ssi_alg =
container_of(alg, struct ssi_crypto_alg, crypto_alg);
- struct device *dev;
+ struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
int rc = 0;
unsigned int max_key_buf_size = get_max_keysize(tfm);
- SSI_LOG_DEBUG("Initializing context @%p for %s\n",
- ctx_p, crypto_tfm_alg_name(tfm));
+ dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
+ crypto_tfm_alg_name(tfm));
ctx_p->cipher_mode = ssi_alg->cipher_mode;
ctx_p->flow_mode = ssi_alg->flow_mode;
ctx_p->drvdata = ssi_alg->drvdata;
- dev = &ctx_p->drvdata->plat_dev->dev;
/* Allocate key buffer, cache line aligned */
ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
- if (!ctx_p->user.key) {
- SSI_LOG_ERR("Allocating key buffer in context failed\n");
- rc = -ENOMEM;
- }
- SSI_LOG_DEBUG("Allocated key buffer in context. key=@%p\n",
- ctx_p->user.key);
+ if (!ctx_p->user.key)
+ return -ENOMEM;
+
+ dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
+ ctx_p->user.key);
/* Map key buffer */
ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
max_key_buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
- SSI_LOG_ERR("Mapping Key %u B at va=%pK for DMA failed\n",
- max_key_buf_size, ctx_p->user.key);
+ dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+ max_key_buf_size, ctx_p->user.key);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=%pad\n",
- max_key_buf_size, ctx_p->user.key,
- ctx_p->user.key_dma_addr);
+ dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+ max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Alloc hash tfm for essiv */
ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
if (IS_ERR(ctx_p->shash_tfm)) {
- SSI_LOG_ERR("Error allocating hash tfm for ESSIV.\n");
+ dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
return PTR_ERR(ctx_p->shash_tfm);
}
}
@@ -230,11 +227,11 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- struct device *dev = &ctx_p->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
unsigned int max_key_buf_size = get_max_keysize(tfm);
- SSI_LOG_DEBUG("Clearing context @%p for %s\n",
- crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+ dev_dbg(dev, "Clearing context @%p for %s\n",
+ crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
/* Free hash tfm for essiv */
@@ -245,12 +242,12 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
/* Unmap key buffer */
dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=%pad\n",
- ctx_p->user.key_dma_addr);
+ dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
+ &ctx_p->user.key_dma_addr);
/* Free key buffer in context */
kfree(ctx_p->user.key);
- SSI_LOG_DEBUG("Free key buffer in context. key=@%p\n", ctx_p->user.key);
+ dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
}
struct tdes_keys {
@@ -298,16 +295,14 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
unsigned int keylen)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- struct device *dev = &ctx_p->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
u32 tmp[DES_EXPKEY_WORDS];
unsigned int max_key_buf_size = get_max_keysize(tfm);
- SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
- ctx_p, crypto_tfm_alg_name(tfm), keylen);
+ dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+ ctx_p, crypto_tfm_alg_name(tfm), keylen);
dump_byte_array("key", (u8 *)key, keylen);
- SSI_LOG_DEBUG("after FIPS check");
-
/* STAT_PHASE_0: Init and sanity checks */
#if SSI_CC_HAS_MULTI2
@@ -317,7 +312,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
#endif /*SSI_CC_HAS_MULTI2*/
if (unlikely(validate_keys_sizes(ctx_p, keylen) != 0)) {
- SSI_LOG_ERR("Unsupported key size %d.\n", keylen);
+ dev_err(dev, "Unsupported key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -327,13 +322,14 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
- SSI_LOG_ERR("HW key not supported for non-AES flows\n");
+ dev_err(dev, "HW key not supported for non-AES flows\n");
return -EINVAL;
}
ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) {
- SSI_LOG_ERR("Unsupported hw key1 number (%d)\n", hki->hw_key1);
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki->hw_key1);
return -EINVAL;
}
@@ -341,18 +337,20 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
(ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
(ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)) {
if (unlikely(hki->hw_key1 == hki->hw_key2)) {
- SSI_LOG_ERR("Illegal hw key numbers (%d,%d)\n", hki->hw_key1, hki->hw_key2);
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki->hw_key1, hki->hw_key2);
return -EINVAL;
}
ctx_p->hw.key2_slot = hw_key_to_cc_hw_key(hki->hw_key2);
if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) {
- SSI_LOG_ERR("Unsupported hw key2 number (%d)\n", hki->hw_key2);
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki->hw_key2);
return -EINVAL;
}
}
ctx_p->keylen = keylen;
- SSI_LOG_DEBUG("ssi_is_hw_key ret 0");
+ dev_dbg(dev, "ssi_is_hw_key ret 0");
return 0;
}
@@ -362,19 +360,19 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
if (unlikely(!des_ekey(tmp, key)) &&
(crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- SSI_LOG_DEBUG("weak DES key");
+ dev_dbg(dev, "weak DES key");
return -EINVAL;
}
}
if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
xts_check_key(tfm, key, keylen) != 0) {
- SSI_LOG_DEBUG("weak XTS key");
+ dev_dbg(dev, "weak XTS key");
return -EINVAL;
}
if ((ctx_p->flow_mode == S_DIN_to_DES) &&
(keylen == DES3_EDE_KEY_SIZE) &&
ssi_verify_3des_keys(key, keylen) != 0) {
- SSI_LOG_DEBUG("weak 3DES key");
+ dev_dbg(dev, "weak 3DES key");
return -EINVAL;
}
@@ -389,7 +387,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS ||
ctx_p->key_round_number > CC_MULTI2_MAX_NUM_ROUNDS) {
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- SSI_LOG_DEBUG("SSI_CC_HAS_MULTI2 einval");
+ dev_dbg(dev, "SSI_CC_HAS_MULTI2 einval");
return -EINVAL;
#endif /*SSI_CC_HAS_MULTI2*/
} else {
@@ -407,7 +405,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
if (err) {
- SSI_LOG_ERR("Failed to hash ESSIV key.\n");
+ dev_err(dev, "Failed to hash ESSIV key.\n");
return err;
}
}
@@ -416,7 +414,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
max_key_buf_size, DMA_TO_DEVICE);
ctx_p->keylen = keylen;
- SSI_LOG_DEBUG("return safely");
+ dev_dbg(dev, "return safely");
return 0;
}
@@ -430,6 +428,7 @@ ssi_blkcipher_create_setup_desc(
unsigned int *seq_size)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
int cipher_mode = ctx_p->cipher_mode;
int flow_mode = ctx_p->flow_mode;
int direction = req_ctx->gen_ctx.op_type;
@@ -540,8 +539,7 @@ ssi_blkcipher_create_setup_desc(
(*seq_size)++;
break;
default:
- SSI_LOG_ERR("Unsupported cipher mode (%d)\n", cipher_mode);
- BUG();
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
}
}
@@ -601,6 +599,7 @@ ssi_blkcipher_create_data_desc(
unsigned int *seq_size)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
unsigned int flow_mode = ctx_p->flow_mode;
switch (ctx_p->flow_mode) {
@@ -616,15 +615,15 @@ ssi_blkcipher_create_data_desc(
break;
#endif /*SSI_CC_HAS_MULTI2*/
default:
- SSI_LOG_ERR("invalid flow mode, flow_mode = %d\n", flow_mode);
+ dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
return;
}
/* Process */
if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
- SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n",
- sg_dma_address(src), nbytes);
- SSI_LOG_DEBUG(" data params addr %pad length 0x%X\n",
- sg_dma_address(dst), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(src), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(dst), nbytes);
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
nbytes, NS_BIT);
@@ -637,9 +636,8 @@ ssi_blkcipher_create_data_desc(
(*seq_size)++;
} else {
/* bypass */
- SSI_LOG_DEBUG(" bypass params addr %pad "
- "length 0x%X addr 0x%08X\n",
- req_ctx->mlli_params.mlli_dma_addr,
+ dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
+ &req_ctx->mlli_params.mlli_dma_addr,
req_ctx->mlli_params.mlli_len,
(unsigned int)ctx_p->drvdata->mlli_sram_addr);
hw_desc_init(&desc[*seq_size]);
@@ -657,21 +655,18 @@ ssi_blkcipher_create_data_desc(
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT);
if (req_ctx->out_nents == 0) {
- SSI_LOG_DEBUG(" din/dout params addr 0x%08X "
- "addr 0x%08X\n",
- (unsigned int)ctx_p->drvdata->mlli_sram_addr,
- (unsigned int)ctx_p->drvdata->mlli_sram_addr);
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr,
+ (unsigned int)ctx_p->drvdata->mlli_sram_addr);
set_dout_mlli(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT,
(!areq ? 0 : 1));
} else {
- SSI_LOG_DEBUG(" din/dout params "
- "addr 0x%08X addr 0x%08X\n",
+ dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
(unsigned int)ctx_p->drvdata->mlli_sram_addr +
- (u32)LLI_ENTRY_BYTE_SIZE *
- req_ctx->in_nents);
+ (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
set_dout_mlli(&desc[*seq_size],
(ctx_p->drvdata->mlli_sram_addr +
(LLI_ENTRY_BYTE_SIZE *
@@ -697,16 +692,10 @@ static int ssi_blkcipher_complete(struct device *dev,
void __iomem *cc_base)
{
int completion_error = 0;
- u32 inflight_counter;
struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
-
- /*Set the inflight couter value to local variable*/
- inflight_counter = ctx_p->drvdata->inflight_counter;
- /*Decrease the inflight counter*/
- if (ctx_p->flow_mode == BYPASS && ctx_p->drvdata->inflight_counter > 0)
- ctx_p->drvdata->inflight_counter--;
+ kfree(req_ctx->iv);
if (areq) {
/*
@@ -742,20 +731,20 @@ static int ssi_blkcipher_process(
enum drv_crypto_direction direction)
{
struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- struct device *dev = &ctx_p->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
struct ssi_crypto_req ssi_req = {};
int rc, seq_len = 0, cts_restore_flag = 0;
- SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n",
- ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
- areq, info, nbytes);
+ dev_dbg(dev, "%s areq=%p info=%p nbytes=%d\n",
+ ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+ "Encrypt" : "Decrypt"), areq, info, nbytes);
/* STAT_PHASE_0: Init and sanity checks */
/* TODO: check data length according to mode */
if (unlikely(validate_data_size(ctx_p, nbytes))) {
- SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
+ dev_err(dev, "Unsupported data size %d.\n", nbytes);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
rc = -EINVAL;
goto exit_process;
@@ -765,6 +754,17 @@ static int ssi_blkcipher_process(
rc = 0;
goto exit_process;
}
+
+ /* The IV we are handed may be allocted from the stack so
+ * we must copy it to a DMAable buffer before use.
+ */
+ req_ctx->iv = kmalloc(ivsize, GFP_KERNEL);
+ if (!req_ctx->iv) {
+ rc = -ENOMEM;
+ goto exit_process;
+ }
+ memcpy(req_ctx->iv, info, ivsize);
+
/*For CTS in case of data size aligned to 16 use CBC mode*/
if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
ctx_p->cipher_mode = DRV_CIPHER_CBC;
@@ -786,9 +786,11 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_1: Map buffers */
- rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
+ rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx,
+ ivsize, nbytes, req_ctx->iv,
+ src, dst);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("map_request() failed\n");
+ dev_err(dev, "map_request() failed\n");
goto exit_process;
}
@@ -838,8 +840,10 @@ exit_process:
if (cts_restore_flag != 0)
ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS) {
kfree(req_ctx->backup_info);
+ kfree(req_ctx->iv);
+ }
return rc;
}
@@ -1245,16 +1249,15 @@ static struct ssi_alg_template blkcipher_algs[] = {
};
static
-struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *template)
+struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template
+ *template, struct device *dev)
{
struct ssi_crypto_alg *t_alg;
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- SSI_LOG_ERR("failed to allocate t_alg\n");
+ if (!t_alg)
return ERR_PTR(-ENOMEM);
- }
alg = &t_alg->crypto_alg;
@@ -1285,10 +1288,6 @@ int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
struct ssi_crypto_alg *t_alg, *n;
struct ssi_blkcipher_handle *blkcipher_handle =
drvdata->blkcipher_handle;
- struct device *dev;
-
- dev = &drvdata->plat_dev->dev;
-
if (blkcipher_handle) {
/* Remove registered algs */
list_for_each_entry_safe(t_alg, n,
@@ -1308,6 +1307,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
{
struct ssi_blkcipher_handle *ablkcipher_handle;
struct ssi_crypto_alg *t_alg;
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = -ENOMEM;
int alg;
@@ -1315,37 +1315,38 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
if (!ablkcipher_handle)
return -ENOMEM;
- drvdata->blkcipher_handle = ablkcipher_handle;
-
INIT_LIST_HEAD(&ablkcipher_handle->blkcipher_alg_list);
+ drvdata->blkcipher_handle = ablkcipher_handle;
/* Linux crypto */
- SSI_LOG_DEBUG("Number of algorithms = %zu\n", ARRAY_SIZE(blkcipher_algs));
+ dev_dbg(dev, "Number of algorithms = %zu\n",
+ ARRAY_SIZE(blkcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
- SSI_LOG_DEBUG("creating %s\n", blkcipher_algs[alg].driver_name);
- t_alg = ssi_ablkcipher_create_alg(&blkcipher_algs[alg]);
+ dev_dbg(dev, "creating %s\n", blkcipher_algs[alg].driver_name);
+ t_alg = ssi_ablkcipher_create_alg(&blkcipher_algs[alg], dev);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
- SSI_LOG_ERR("%s alg allocation failed\n",
- blkcipher_algs[alg].driver_name);
+ dev_err(dev, "%s alg allocation failed\n",
+ blkcipher_algs[alg].driver_name);
goto fail0;
}
t_alg->drvdata = drvdata;
- SSI_LOG_DEBUG("registering %s\n", blkcipher_algs[alg].driver_name);
+ dev_dbg(dev, "registering %s\n",
+ blkcipher_algs[alg].driver_name);
rc = crypto_register_alg(&t_alg->crypto_alg);
- SSI_LOG_DEBUG("%s alg registration rc = %x\n",
- t_alg->crypto_alg.cra_driver_name, rc);
+ dev_dbg(dev, "%s alg registration rc = %x\n",
+ t_alg->crypto_alg.cra_driver_name, rc);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
+ dev_err(dev, "%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
goto fail0;
} else {
list_add_tail(&t_alg->entry,
&ablkcipher_handle->blkcipher_alg_list);
- SSI_LOG_DEBUG("Registered %s\n",
- t_alg->crypto_alg.cra_driver_name);
+ dev_dbg(dev, "Registered %s\n",
+ t_alg->crypto_alg.cra_driver_name);
}
}
return 0;
diff --git a/drivers/staging/ccree/ssi_cipher.h b/drivers/staging/ccree/ssi_cipher.h
index 296b375d5d89..25e6335c0d94 100644
--- a/drivers/staging/ccree/ssi_cipher.h
+++ b/drivers/staging/ccree/ssi_cipher.h
@@ -27,11 +27,11 @@
#include "ssi_buffer_mgr.h"
/* Crypto cipher flags */
-#define CC_CRYPTO_CIPHER_KEY_KFDE0 (1 << 0)
-#define CC_CRYPTO_CIPHER_KEY_KFDE1 (1 << 1)
-#define CC_CRYPTO_CIPHER_KEY_KFDE2 (1 << 2)
-#define CC_CRYPTO_CIPHER_KEY_KFDE3 (1 << 3)
-#define CC_CRYPTO_CIPHER_DU_SIZE_512B (1 << 4)
+#define CC_CRYPTO_CIPHER_KEY_KFDE0 BIT(0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1 BIT(1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2 BIT(2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3 BIT(3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B BIT(4)
#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | CC_CRYPTO_CIPHER_KEY_KFDE1 | CC_CRYPTO_CIPHER_KEY_KFDE2 | CC_CRYPTO_CIPHER_KEY_KFDE3)
@@ -43,6 +43,7 @@ struct blkcipher_req_ctx {
u32 out_nents;
u32 out_mlli_nents;
u8 *backup_info; /*store iv for generated IV flow*/
+ u8 *iv;
bool is_giv;
struct mlli_params mlli_params;
};
@@ -75,7 +76,7 @@ struct arm_hw_key_info {
static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
{
- return 0;
+ return false;
}
#endif /* CRYPTO_TFM_REQ_HW_KEY */
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
index 9c6f1200c130..1a3c481fa92a 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/ssi_driver.c
@@ -74,70 +74,46 @@
#include "ssi_fips.h"
#ifdef DX_DUMP_BYTES
-void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
+void dump_byte_array(const char *name, const u8 *buf, size_t len)
{
- int i, line_offset = 0, ret = 0;
- const u8 *cur_byte;
- char line_buf[80];
+ char prefix[NAME_LEN];
- if (!the_array) {
- SSI_LOG_ERR("cannot dump array - NULL pointer\n");
+ if (!buf)
return;
- }
- ret = snprintf(line_buf, sizeof(line_buf), "%s[%lu]: ", name, size);
- if (ret < 0) {
- SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
- return;
- }
- line_offset = ret;
- for (i = 0, cur_byte = the_array;
- (i < size) && (line_offset < sizeof(line_buf)); i++, cur_byte++) {
- ret = snprintf(line_buf + line_offset,
- sizeof(line_buf) - line_offset,
- "0x%02X ", *cur_byte);
- if (ret < 0) {
- SSI_LOG_ERR("snprintf returned %d . aborting buffer array dump\n", ret);
- return;
- }
- line_offset += ret;
- if (line_offset > 75) { /* Cut before line end */
- SSI_LOG_DEBUG("%s\n", line_buf);
- line_offset = 0;
- }
- }
+ snprintf(prefix, sizeof(prefix), "%s[%lu]: ", name, len);
- if (line_offset > 0) /* Dump remaining line */
- SSI_LOG_DEBUG("%s\n", line_buf);
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, len,
+ false);
}
#endif
static irqreturn_t cc_isr(int irq, void *dev_id)
{
struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
- void __iomem *cc_base = drvdata->cc_base;
+ struct device *dev = drvdata_to_dev(drvdata);
u32 irr;
u32 imr;
/* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
/* read the interrupt status */
- irr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
- SSI_LOG_DEBUG("Got IRR=0x%08X\n", irr);
+ irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "Got IRR=0x%08X\n", irr);
if (unlikely(irr == 0)) { /* Probably shared interrupt line */
- SSI_LOG_ERR("Got interrupt with empty IRR\n");
+ dev_err(dev, "Got interrupt with empty IRR\n");
return IRQ_NONE;
}
- imr = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR));
+ imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
/* clear interrupt - must be before processing events */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), irr);
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
drvdata->irq = irr;
/* Completion interrupt - most probable */
if (likely((irr & SSI_COMP_IRQ_MASK) != 0)) {
/* Mask AXI completion interrupt - will be unmasked in Deferred service handler */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_COMP_IRQ_MASK);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_COMP_IRQ_MASK);
irr &= ~SSI_COMP_IRQ_MASK;
complete_request(drvdata);
}
@@ -145,7 +121,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* TEE FIPS interrupt */
if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
/* Mask interrupt - will be unmasked in Deferred service handler */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | SSI_GPR0_IRQ_MASK);
irr &= ~SSI_GPR0_IRQ_MASK;
fips_handler(drvdata);
}
@@ -155,14 +131,16 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
u32 axi_err;
/* Read the AXI error ID */
- axi_err = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
- SSI_LOG_DEBUG("AXI completion error: axim_mon_err=0x%08X\n", axi_err);
+ axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
+ dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
+ axi_err);
irr &= ~SSI_AXI_ERR_IRQ_MASK;
}
if (unlikely(irr != 0)) {
- SSI_LOG_DEBUG("IRR includes unknown cause bits (0x%08X)\n", irr);
+ dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
+ irr);
/* Just warning */
}
@@ -172,48 +150,48 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
{
unsigned int val, cache_params;
- void __iomem *cc_base = drvdata->cc_base;
+ struct device *dev = drvdata_to_dev(drvdata);
/* Unmask all AXI interrupt sources AXI_CFG1 register */
- val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG));
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
- SSI_LOG_DEBUG("AXIM_CFG=0x%08X\n", CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CFG)));
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~SSI_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
/* Clear all pending interrupts */
- val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
- SSI_LOG_DEBUG("IRR=0x%08X\n", val);
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), val);
+ val = cc_ioread(drvdata, CC_REG(HOST_IRR));
+ dev_dbg(dev, "IRR=0x%08X\n", val);
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
/* Unmask relevant interrupt cause */
- val = (~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK | SSI_GPR0_IRQ_MASK));
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
+ val = (unsigned int)(~(SSI_COMP_IRQ_MASK | SSI_AXI_ERR_IRQ_MASK |
+ SSI_GPR0_IRQ_MASK));
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
#ifdef DX_HOST_IRQ_TIMER_INIT_VAL_REG_OFFSET
#ifdef DX_IRQ_DELAY
/* Set CC IRQ delay */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL),
- DX_IRQ_DELAY);
+ cc_iowrite(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL), DX_IRQ_DELAY);
#endif
- if (CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)) > 0) {
- SSI_LOG_DEBUG("irq_delay=%d CC cycles\n",
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRQ_TIMER_INIT_VAL)));
+ if (cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)) > 0) {
+ dev_dbg(dev, "irq_delay=%d CC cycles\n",
+ cc_ioread(drvdata, CC_REG(HOST_IRQ_TIMER_INIT_VAL)));
}
#endif
cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
- val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
+ val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
if (is_probe)
- SSI_LOG_INFO("Cache params previous: 0x%08X\n", val);
+ dev_info(dev, "Cache params previous: 0x%08X\n", val);
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS),
- cache_params);
- val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
+ cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
+ val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
if (is_probe)
- SSI_LOG_INFO("Cache params current: 0x%08X (expect: 0x%08X)\n",
- val, cache_params);
+ dev_info(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
+ val, cache_params);
return 0;
}
@@ -222,181 +200,172 @@ static int init_cc_resources(struct platform_device *plat_dev)
{
struct resource *req_mem_cc_regs = NULL;
void __iomem *cc_base = NULL;
- bool irq_registered = false;
- struct ssi_drvdata *new_drvdata = kzalloc(sizeof(*new_drvdata),
- GFP_KERNEL);
+ struct ssi_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
u32 signature_val;
+ dma_addr_t dma_mask;
int rc = 0;
- if (unlikely(!new_drvdata)) {
- SSI_LOG_ERR("Failed to allocate drvdata");
- rc = -ENOMEM;
- goto init_cc_res_err;
- }
+ new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
+ if (!new_drvdata)
+ return -ENOMEM;
+
+ platform_set_drvdata(plat_dev, new_drvdata);
+ new_drvdata->plat_dev = plat_dev;
new_drvdata->clk = of_clk_get(np, 0);
new_drvdata->coherent = of_dma_is_coherent(np);
- /*Initialize inflight counter used in dx_ablkcipher_secure_complete used for count of BYSPASS blocks operations*/
- new_drvdata->inflight_counter = 0;
-
- dev_set_drvdata(&plat_dev->dev, new_drvdata);
/* Get device resources */
/* First CC registers space */
- new_drvdata->res_mem = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
- if (unlikely(!new_drvdata->res_mem)) {
- SSI_LOG_ERR("Failed getting IO memory resource\n");
- rc = -ENODEV;
- goto init_cc_res_err;
- }
- SSI_LOG_DEBUG("Got MEM resource (%s): start=%pad end=%pad\n",
- new_drvdata->res_mem->name,
- new_drvdata->res_mem->start,
- new_drvdata->res_mem->end);
+ req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */
- req_mem_cc_regs = request_mem_region(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem), "arm_cc7x_regs");
- if (unlikely(!req_mem_cc_regs)) {
- SSI_LOG_ERR("Couldn't allocate registers memory region at "
- "0x%08X\n", (unsigned int)new_drvdata->res_mem->start);
- rc = -EBUSY;
- goto init_cc_res_err;
+ new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
+ if (IS_ERR(new_drvdata->cc_base)) {
+ dev_err(dev, "Failed to ioremap registers");
+ return PTR_ERR(new_drvdata->cc_base);
}
- cc_base = ioremap(new_drvdata->res_mem->start, resource_size(new_drvdata->res_mem));
- if (unlikely(!cc_base)) {
- SSI_LOG_ERR("ioremap[CC](0x%08X,0x%08X) failed\n",
- (unsigned int)new_drvdata->res_mem->start,
- (unsigned int)resource_size(new_drvdata->res_mem));
- rc = -ENOMEM;
- goto init_cc_res_err;
- }
- SSI_LOG_DEBUG("CC registers mapped from %pa to 0x%p\n", &new_drvdata->res_mem->start, cc_base);
- new_drvdata->cc_base = cc_base;
+
+ dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
+ req_mem_cc_regs);
+ dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
+ &req_mem_cc_regs->start, new_drvdata->cc_base);
+
+ cc_base = new_drvdata->cc_base;
/* Then IRQ */
- new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
- if (unlikely(!new_drvdata->res_irq)) {
- SSI_LOG_ERR("Failed getting IRQ resource\n");
- rc = -ENODEV;
- goto init_cc_res_err;
- }
- rc = request_irq(new_drvdata->res_irq->start, cc_isr,
- IRQF_SHARED, "arm_cc7x", new_drvdata);
- if (unlikely(rc != 0)) {
- SSI_LOG_ERR("Could not register to interrupt %llu\n",
- (unsigned long long)new_drvdata->res_irq->start);
- goto init_cc_res_err;
+ new_drvdata->irq = platform_get_irq(plat_dev, 0);
+ if (new_drvdata->irq < 0) {
+ dev_err(dev, "Failed getting IRQ resource\n");
+ return new_drvdata->irq;
}
- init_completion(&new_drvdata->icache_setup_completion);
- irq_registered = true;
- SSI_LOG_DEBUG("Registered to IRQ (%s) %llu\n",
- new_drvdata->res_irq->name,
- (unsigned long long)new_drvdata->res_irq->start);
+ rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
+ IRQF_SHARED, "arm_cc7x", new_drvdata);
+ if (rc) {
+ dev_err(dev, "Could not register to interrupt %d\n",
+ new_drvdata->irq);
+ return rc;
+ }
+ dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);
- new_drvdata->plat_dev = plat_dev;
+ if (!plat_dev->dev.dma_mask)
+ plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;
- rc = cc_clk_on(new_drvdata);
- if (rc)
- goto init_cc_res_err;
+ dma_mask = (dma_addr_t)(DMA_BIT_MASK(DMA_BIT_MASK_LEN));
+ while (dma_mask > 0x7fffffffUL) {
+ if (dma_supported(&plat_dev->dev, dma_mask)) {
+ rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
+ if (!rc)
+ break;
+ }
+ dma_mask >>= 1;
+ }
- if (!new_drvdata->plat_dev->dev.dma_mask)
- new_drvdata->plat_dev->dev.dma_mask = &new_drvdata->plat_dev->dev.coherent_dma_mask;
+ if (rc) {
+ dev_err(dev, "Failed in dma_set_mask, mask=%par\n",
+ &dma_mask);
+ return rc;
+ }
- if (!new_drvdata->plat_dev->dev.coherent_dma_mask)
- new_drvdata->plat_dev->dev.coherent_dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
+ rc = cc_clk_on(new_drvdata);
+ if (rc) {
+ dev_err(dev, "Failed to enable clock");
+ return rc;
+ }
/* Verify correct mapping */
- signature_val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
+ signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
if (signature_val != DX_DEV_SIGNATURE) {
- SSI_LOG_ERR("Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, (u32)DX_DEV_SIGNATURE);
+ dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+ signature_val, (u32)DX_DEV_SIGNATURE);
rc = -EINVAL;
- goto init_cc_res_err;
+ goto post_clk_err;
}
- SSI_LOG_DEBUG("CC SIGNATURE=0x%08X\n", signature_val);
+ dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
/* Display HW versions */
- SSI_LOG(KERN_INFO, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n", SSI_DEV_NAME_STR,
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_VERSION)), DRV_MODULE_VERSION);
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
+ SSI_DEV_NAME_STR,
+ cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
+ DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("init_cc_regs failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "init_cc_regs failed\n");
+ goto post_clk_err;
}
#ifdef ENABLE_CC_SYSFS
- rc = ssi_sysfs_init(&plat_dev->dev.kobj, new_drvdata);
+ rc = ssi_sysfs_init(&dev->kobj, new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("init_stat_db failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "init_stat_db failed\n");
+ goto post_regs_err;
}
#endif
+ rc = ssi_fips_init(new_drvdata);
+ if (unlikely(rc != 0)) {
+ dev_err(dev, "SSI_FIPS_INIT failed 0x%x\n", rc);
+ goto post_sysfs_err;
+ }
rc = ssi_sram_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_sram_mgr_init failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_sram_mgr_init failed\n");
+ goto post_fips_init_err;
}
new_drvdata->mlli_sram_addr =
ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
- SSI_LOG_ERR("Failed to alloc MLLI Sram buffer\n");
+ dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
- goto init_cc_res_err;
+ goto post_sram_mgr_err;
}
rc = request_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("request_mgr_init failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "request_mgr_init failed\n");
+ goto post_sram_mgr_err;
}
rc = ssi_buffer_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("buffer_mgr_init failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "buffer_mgr_init failed\n");
+ goto post_req_mgr_err;
}
rc = ssi_power_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_power_mgr_init failed\n");
- goto init_cc_res_err;
- }
-
- rc = ssi_fips_init(new_drvdata);
- if (unlikely(rc != 0)) {
- SSI_LOG_ERR("SSI_FIPS_INIT failed 0x%x\n", rc);
- goto init_cc_res_err;
+ dev_err(dev, "ssi_power_mgr_init failed\n");
+ goto post_buf_mgr_err;
}
rc = ssi_ivgen_init(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_ivgen_init failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_ivgen_init failed\n");
+ goto post_power_mgr_err;
}
/* Allocate crypto algs */
rc = ssi_ablkcipher_alloc(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_ablkcipher_alloc failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_ablkcipher_alloc failed\n");
+ goto post_ivgen_err;
}
/* hash must be allocated before aead since hash exports APIs */
rc = ssi_hash_alloc(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_hash_alloc failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_hash_alloc failed\n");
+ goto post_cipher_err;
}
rc = ssi_aead_alloc(new_drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("ssi_aead_alloc failed\n");
- goto init_cc_res_err;
+ dev_err(dev, "ssi_aead_alloc failed\n");
+ goto post_hash_err;
}
/* If we got here and FIPS mode is enabled
@@ -407,52 +376,43 @@ static int init_cc_resources(struct platform_device *plat_dev)
return 0;
-init_cc_res_err:
- SSI_LOG_ERR("Freeing CC HW resources!\n");
-
- if (new_drvdata) {
- ssi_aead_free(new_drvdata);
- ssi_hash_free(new_drvdata);
- ssi_ablkcipher_free(new_drvdata);
- ssi_ivgen_fini(new_drvdata);
- ssi_power_mgr_fini(new_drvdata);
- ssi_buffer_mgr_fini(new_drvdata);
- request_mgr_fini(new_drvdata);
- ssi_sram_mgr_fini(new_drvdata);
- ssi_fips_fini(new_drvdata);
+post_hash_err:
+ ssi_hash_free(new_drvdata);
+post_cipher_err:
+ ssi_ablkcipher_free(new_drvdata);
+post_ivgen_err:
+ ssi_ivgen_fini(new_drvdata);
+post_power_mgr_err:
+ ssi_power_mgr_fini(new_drvdata);
+post_buf_mgr_err:
+ ssi_buffer_mgr_fini(new_drvdata);
+post_req_mgr_err:
+ request_mgr_fini(new_drvdata);
+post_sram_mgr_err:
+ ssi_sram_mgr_fini(new_drvdata);
+post_fips_init_err:
+ ssi_fips_fini(new_drvdata);
+post_sysfs_err:
#ifdef ENABLE_CC_SYSFS
- ssi_sysfs_fini();
+ ssi_sysfs_fini();
#endif
-
- if (req_mem_cc_regs) {
- if (irq_registered) {
- free_irq(new_drvdata->res_irq->start, new_drvdata);
- new_drvdata->res_irq = NULL;
- iounmap(cc_base);
- new_drvdata->cc_base = NULL;
- }
- release_mem_region(new_drvdata->res_mem->start,
- resource_size(new_drvdata->res_mem));
- new_drvdata->res_mem = NULL;
- }
- kfree(new_drvdata);
- dev_set_drvdata(&plat_dev->dev, NULL);
- }
-
+post_regs_err:
+ fini_cc_regs(new_drvdata);
+post_clk_err:
+ cc_clk_off(new_drvdata);
return rc;
}
void fini_cc_regs(struct ssi_drvdata *drvdata)
{
/* Mask all interrupts */
- WRITE_REGISTER(drvdata->cc_base +
- CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
}
static void cleanup_cc_resources(struct platform_device *plat_dev)
{
struct ssi_drvdata *drvdata =
- (struct ssi_drvdata *)dev_get_drvdata(&plat_dev->dev);
+ (struct ssi_drvdata *)platform_get_drvdata(plat_dev);
ssi_aead_free(drvdata);
ssi_hash_free(drvdata);
@@ -466,22 +426,8 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
#ifdef ENABLE_CC_SYSFS
ssi_sysfs_fini();
#endif
-
fini_cc_regs(drvdata);
cc_clk_off(drvdata);
- free_irq(drvdata->res_irq->start, drvdata);
- drvdata->res_irq = NULL;
-
- if (drvdata->cc_base) {
- iounmap(drvdata->cc_base);
- release_mem_region(drvdata->res_mem->start,
- resource_size(drvdata->res_mem));
- drvdata->cc_base = NULL;
- drvdata->res_mem = NULL;
- }
-
- kfree(drvdata);
- dev_set_drvdata(&plat_dev->dev, NULL);
}
int cc_clk_on(struct ssi_drvdata *drvdata)
@@ -514,18 +460,19 @@ void cc_clk_off(struct ssi_drvdata *drvdata)
static int cc7x_probe(struct platform_device *plat_dev)
{
int rc;
+ struct device *dev = &plat_dev->dev;
#if defined(CONFIG_ARM) && defined(CC_DEBUG)
u32 ctr, cacheline_size;
asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
cacheline_size = 4 << ((ctr >> 16) & 0xf);
- SSI_LOG_DEBUG("CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
- cacheline_size, L1_CACHE_BYTES);
+ dev_dbg(dev, "CP15(L1_CACHE_BYTES) = %u , Kconfig(L1_CACHE_BYTES) = %u\n",
+ cacheline_size, L1_CACHE_BYTES);
asm volatile("mrc p15, 0, %0, c0, c0, 0" : "=r" (ctr));
- SSI_LOG_DEBUG("Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
- (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
- (ctr >> 20) & 0xF, ctr & 0xF);
+ dev_dbg(dev, "Main ID register (MIDR): Implementer 0x%02X, Arch 0x%01X, Part 0x%03X, Rev r%dp%d\n",
+ (ctr >> 24), (ctr >> 16) & 0xF, (ctr >> 4) & 0xFFF,
+ (ctr >> 20) & 0xF, ctr & 0xF);
#endif
/* Map registers space */
@@ -533,18 +480,20 @@ static int cc7x_probe(struct platform_device *plat_dev)
if (rc != 0)
return rc;
- SSI_LOG(KERN_INFO, "ARM cc7x_ree device initialized\n");
+ dev_info(dev, "ARM ccree device initialized\n");
return 0;
}
static int cc7x_remove(struct platform_device *plat_dev)
{
- SSI_LOG_DEBUG("Releasing cc7x resources...\n");
+ struct device *dev = &plat_dev->dev;
+
+ dev_dbg(dev, "Releasing cc7x resources...\n");
cleanup_cc_resources(plat_dev);
- SSI_LOG(KERN_INFO, "ARM cc7x_ree device terminated\n");
+ dev_info(dev, "ARM ccree device terminated\n");
return 0;
}
diff --git a/drivers/staging/ccree/ssi_driver.h b/drivers/staging/ccree/ssi_driver.h
index b6ad89ae9bee..94c755cafb47 100644
--- a/drivers/staging/ccree/ssi_driver.h
+++ b/drivers/staging/ccree/ssi_driver.h
@@ -37,13 +37,11 @@
#include <crypto/hash.h>
#include <linux/version.h>
#include <linux/clk.h>
+#include <linux/platform_device.h>
/* Registers definitions from shared/hw/ree_include */
-#include "dx_reg_base_host.h"
#include "dx_host.h"
-#include "cc_regs.h"
#include "dx_reg_common.h"
-#include "cc_hal.h"
#define CC_SUPPORT_SHA DX_DEV_SHA_MAX
#include "cc_crypto_ctx.h"
#include "ssi_sysfs.h"
@@ -68,12 +66,19 @@
#define SSI_AXI_IRQ_MASK ((1 << DX_AXIM_CFG_BRESPMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
(1 << DX_AXIM_CFG_INFLTMASK_BIT_SHIFT) | (1 << DX_AXIM_CFG_COMPMASK_BIT_SHIFT))
-#define SSI_AXI_ERR_IRQ_MASK (1 << DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
+#define SSI_AXI_ERR_IRQ_MASK BIT(DX_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
-#define SSI_COMP_IRQ_MASK (1 << DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+#define SSI_COMP_IRQ_MASK BIT(DX_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+
+#define AXIM_MON_COMP_VALUE GENMASK(DX_AXIM_MON_COMP_VALUE_BIT_SIZE + \
+ DX_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
+ DX_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+
+/* Register name mangling macro */
+#define CC_REG(reg_name) DX_ ## reg_name ## _REG_OFFSET
/* TEE FIPS status interrupt */
-#define SSI_GPR0_IRQ_MASK (1 << DX_HOST_IRR_GPR0_BIT_SHIFT)
+#define SSI_GPR0_IRQ_MASK BIT(DX_HOST_IRR_GPR0_BIT_SHIFT)
#define SSI_CRA_PRIO 3000
@@ -90,19 +95,6 @@
* field in the HW descriptor. The DMA engine +8 that value.
*/
-/* Logging macros */
-#define SSI_LOG(level, format, ...) \
- printk(level "cc715ree::%s: " format, __func__, ##__VA_ARGS__)
-#define SSI_LOG_ERR(format, ...) SSI_LOG(KERN_ERR, format, ##__VA_ARGS__)
-#define SSI_LOG_WARNING(format, ...) SSI_LOG(KERN_WARNING, format, ##__VA_ARGS__)
-#define SSI_LOG_NOTICE(format, ...) SSI_LOG(KERN_NOTICE, format, ##__VA_ARGS__)
-#define SSI_LOG_INFO(format, ...) SSI_LOG(KERN_INFO, format, ##__VA_ARGS__)
-#ifdef CC_DEBUG
-#define SSI_LOG_DEBUG(format, ...) SSI_LOG(KERN_DEBUG, format, ##__VA_ARGS__)
-#else /* Debug log messages are removed at compile time for non-DEBUG config. */
-#define SSI_LOG_DEBUG(format, ...) do {} while (0)
-#endif
-
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
@@ -128,10 +120,8 @@ struct ssi_crypto_req {
* @fw_ver: SeP loaded firmware version
*/
struct ssi_drvdata {
- struct resource *res_mem;
- struct resource *res_irq;
void __iomem *cc_base;
- unsigned int irq;
+ int irq;
u32 irq_mask;
u32 fw_ver;
/* Calibration time of start/stop
@@ -140,7 +130,6 @@ struct ssi_drvdata {
u32 monitor_null_cycles;
struct platform_device *plat_dev;
ssi_sram_addr_t mlli_sram_addr;
- struct completion icache_setup_completion;
void *buff_mgr_handle;
void *hash_handle;
void *aead_handle;
@@ -149,7 +138,6 @@ struct ssi_drvdata {
void *fips_handle;
void *ivgen_handle;
void *sram_mgr_handle;
- u32 inflight_counter;
struct clk *clk;
bool coherent;
};
@@ -187,11 +175,16 @@ struct async_gen_req_ctx {
enum drv_crypto_direction op_type;
};
+static inline struct device *drvdata_to_dev(struct ssi_drvdata *drvdata)
+{
+ return &drvdata->plat_dev->dev;
+}
+
#ifdef DX_DUMP_BYTES
void dump_byte_array(const char *name, const u8 *the_array, unsigned long size);
#else
-#define dump_byte_array(name, array, size) do { \
-} while (0);
+static inline void dump_byte_array(const char *name, const u8 *the_array,
+ unsigned long size) {};
#endif
int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe);
@@ -199,5 +192,15 @@ void fini_cc_regs(struct ssi_drvdata *drvdata);
int cc_clk_on(struct ssi_drvdata *drvdata);
void cc_clk_off(struct ssi_drvdata *drvdata);
+static inline void cc_iowrite(struct ssi_drvdata *drvdata, u32 reg, u32 val)
+{
+ iowrite32(val, (drvdata->cc_base + reg));
+}
+
+static inline u32 cc_ioread(struct ssi_drvdata *drvdata, u32 reg)
+{
+ return ioread32(drvdata->cc_base + reg);
+}
+
#endif /*__SSI_DRIVER_H__*/
diff --git a/drivers/staging/ccree/ssi_fips.c b/drivers/staging/ccree/ssi_fips.c
index 33d53d64603d..4aea99fa129f 100644
--- a/drivers/staging/ccree/ssi_fips.c
+++ b/drivers/staging/ccree/ssi_fips.c
@@ -19,7 +19,6 @@
#include "ssi_config.h"
#include "ssi_driver.h"
-#include "cc_hal.h"
#include "ssi_fips.h"
static void fips_dsr(unsigned long devarg);
@@ -34,9 +33,8 @@ struct ssi_fips_handle {
static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
{
u32 reg;
- void __iomem *cc_base = drvdata->cc_base;
- reg = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+ reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
}
@@ -46,12 +44,11 @@ static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
*/
void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
{
- void __iomem *cc_base = drvdata->cc_base;
int val = CC_FIPS_SYNC_REE_STATUS;
val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR);
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), val);
+ cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
}
void ssi_fips_fini(struct ssi_drvdata *drvdata)
@@ -76,41 +73,42 @@ void fips_handler(struct ssi_drvdata *drvdata)
tasklet_schedule(&fips_handle_ptr->tasklet);
}
-static inline void tee_fips_error(void)
+static inline void tee_fips_error(struct device *dev)
{
if (fips_enabled)
panic("ccree: TEE reported cryptographic error in fips mode!\n");
else
- SSI_LOG_ERR("TEE reported error!\n");
+ dev_err(dev, "TEE reported error!\n");
}
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
- void __iomem *cc_base = drvdata->cc_base;
+ struct device *dev = drvdata_to_dev(drvdata);
u32 irq, state, val;
irq = (drvdata->irq & (SSI_GPR0_IRQ_MASK));
if (irq) {
- state = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
+ state = cc_ioread(drvdata, CC_REG(GPR_HOST));
if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- tee_fips_error();
+ tee_fips_error(dev);
}
/* after verifing that there is nothing to do,
* unmask AXI completion interrupt.
*/
- val = (CC_REG_OFFSET(HOST_RGF, HOST_IMR) & ~irq);
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR), val);
+ val = (CC_REG(HOST_IMR) & ~irq);
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), val);
}
/* The function called once at driver entry point .*/
int ssi_fips_init(struct ssi_drvdata *p_drvdata)
{
struct ssi_fips_handle *fips_h;
+ struct device *dev = drvdata_to_dev(p_drvdata);
fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
if (!fips_h)
@@ -118,11 +116,11 @@ int ssi_fips_init(struct ssi_drvdata *p_drvdata)
p_drvdata->fips_handle = fips_h;
- SSI_LOG_DEBUG("Initializing fips tasklet\n");
+ dev_dbg(dev, "Initializing fips tasklet\n");
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
if (!cc_get_tee_fips_status(p_drvdata))
- tee_fips_error();
+ tee_fips_error(dev);
return 0;
}
diff --git a/drivers/staging/ccree/ssi_fips.h b/drivers/staging/ccree/ssi_fips.h
index 369ddf9478e7..63bcca7f3af9 100644
--- a/drivers/staging/ccree/ssi_fips.h
+++ b/drivers/staging/ccree/ssi_fips.h
@@ -40,8 +40,8 @@ static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
}
static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
-void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok) {}
-void fips_handler(struct ssi_drvdata *drvdata) {}
+static inline void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok) {}
+static inline void fips_handler(struct ssi_drvdata *drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index 13291aeaf350..d79090ed7f9c 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -134,14 +134,13 @@ static int ssi_hash_map_result(struct device *dev,
digestsize,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
- SSI_LOG_ERR("Mapping digest result buffer %u B for DMA failed\n",
- digestsize);
+ dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
+ digestsize);
return -ENOMEM;
}
- SSI_LOG_DEBUG("Mapped digest result buffer %u B "
- "at va=%pK to dma=%pad\n",
+ dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
digestsize, state->digest_result_buff,
- state->digest_result_dma_addr);
+ &state->digest_result_dma_addr);
return 0;
}
@@ -158,54 +157,50 @@ static int ssi_hash_map_request(struct device *dev,
int rc = -ENOMEM;
state->buff0 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->buff0) {
- SSI_LOG_ERR("Allocating buff0 in context failed\n");
+ if (!state->buff0)
goto fail0;
- }
+
state->buff1 = kzalloc(SSI_MAX_HASH_BLCK_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->buff1) {
- SSI_LOG_ERR("Allocating buff1 in context failed\n");
+ if (!state->buff1)
goto fail_buff0;
- }
+
state->digest_result_buff = kzalloc(SSI_MAX_HASH_DIGEST_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->digest_result_buff) {
- SSI_LOG_ERR("Allocating digest_result_buff in context failed\n");
+ if (!state->digest_result_buff)
goto fail_buff1;
- }
+
state->digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
- if (!state->digest_buff) {
- SSI_LOG_ERR("Allocating digest-buffer in context failed\n");
+ if (!state->digest_buff)
goto fail_digest_result_buff;
- }
- SSI_LOG_DEBUG("Allocated digest-buffer in context ctx->digest_buff=@%p\n", state->digest_buff);
+ dev_dbg(dev, "Allocated digest-buffer in context ctx->digest_buff=@%p\n",
+ state->digest_buff);
if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
state->digest_bytes_len = kzalloc(HASH_LEN_SIZE, GFP_KERNEL | GFP_DMA);
- if (!state->digest_bytes_len) {
- SSI_LOG_ERR("Allocating digest-bytes-len in context failed\n");
+ if (!state->digest_bytes_len)
goto fail1;
- }
- SSI_LOG_DEBUG("Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n", state->digest_bytes_len);
+
+ dev_dbg(dev, "Allocated digest-bytes-len in context state->>digest_bytes_len=@%p\n",
+ state->digest_bytes_len);
} else {
state->digest_bytes_len = NULL;
}
state->opad_digest_buff = kzalloc(ctx->inter_digestsize, GFP_KERNEL | GFP_DMA);
- if (!state->opad_digest_buff) {
- SSI_LOG_ERR("Allocating opad-digest-buffer in context failed\n");
+ if (!state->opad_digest_buff)
goto fail2;
- }
- SSI_LOG_DEBUG("Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n", state->opad_digest_buff);
+
+ dev_dbg(dev, "Allocated opad-digest-buffer in context state->digest_bytes_len=@%p\n",
+ state->opad_digest_buff);
state->digest_buff_dma_addr = dma_map_single(dev, (void *)state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
- SSI_LOG_ERR("Mapping digest len %d B at va=%pK for DMA failed\n",
- ctx->inter_digestsize, state->digest_buff);
+ dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize, state->digest_buff);
goto fail3;
}
- SSI_LOG_DEBUG("Mapped digest %d B at va=%pK to dma=%pad\n",
- ctx->inter_digestsize, state->digest_buff,
- state->digest_buff_dma_addr);
+ dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->digest_buff,
+ &state->digest_buff_dma_addr);
if (is_hmac) {
dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
@@ -240,7 +235,7 @@ static int ssi_hash_map_request(struct device *dev,
rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto fail4;
}
}
@@ -248,13 +243,13 @@ static int ssi_hash_map_request(struct device *dev,
if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
state->digest_bytes_len_dma_addr = dma_map_single(dev, (void *)state->digest_bytes_len, HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
- SSI_LOG_ERR("Mapping digest len %u B at va=%pK for DMA failed\n",
- HASH_LEN_SIZE, state->digest_bytes_len);
+ dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
+ HASH_LEN_SIZE, state->digest_bytes_len);
goto fail4;
}
- SSI_LOG_DEBUG("Mapped digest len %u B at va=%pK to dma=%pad\n",
- HASH_LEN_SIZE, state->digest_bytes_len,
- state->digest_bytes_len_dma_addr);
+ dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
+ HASH_LEN_SIZE, state->digest_bytes_len,
+ &state->digest_bytes_len_dma_addr);
} else {
state->digest_bytes_len_dma_addr = 0;
}
@@ -262,14 +257,14 @@ static int ssi_hash_map_request(struct device *dev,
if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
state->opad_digest_dma_addr = dma_map_single(dev, (void *)state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
- SSI_LOG_ERR("Mapping opad digest %d B at va=%pK for DMA failed\n",
- ctx->inter_digestsize,
- state->opad_digest_buff);
+ dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
+ ctx->inter_digestsize,
+ state->opad_digest_buff);
goto fail5;
}
- SSI_LOG_DEBUG("Mapped opad digest %d B at va=%pK to dma=%pad\n",
- ctx->inter_digestsize, state->opad_digest_buff,
- state->opad_digest_dma_addr);
+ dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
+ ctx->inter_digestsize, state->opad_digest_buff,
+ &state->opad_digest_dma_addr);
} else {
state->opad_digest_dma_addr = 0;
}
@@ -316,22 +311,22 @@ static void ssi_hash_unmap_request(struct device *dev,
if (state->digest_buff_dma_addr != 0) {
dma_unmap_single(dev, state->digest_buff_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
- state->digest_buff_dma_addr);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &state->digest_buff_dma_addr);
state->digest_buff_dma_addr = 0;
}
if (state->digest_bytes_len_dma_addr != 0) {
dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
HASH_LEN_SIZE, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
- state->digest_bytes_len_dma_addr);
+ dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
+ &state->digest_bytes_len_dma_addr);
state->digest_bytes_len_dma_addr = 0;
}
if (state->opad_digest_dma_addr != 0) {
dma_unmap_single(dev, state->opad_digest_dma_addr,
ctx->inter_digestsize, DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
- state->opad_digest_dma_addr);
+ dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
+ &state->opad_digest_dma_addr);
state->opad_digest_dma_addr = 0;
}
@@ -352,11 +347,9 @@ static void ssi_hash_unmap_result(struct device *dev,
state->digest_result_dma_addr,
digestsize,
DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("unmpa digest result buffer "
- "va (%pK) pa (%pad) len %u\n",
- state->digest_result_buff,
- state->digest_result_dma_addr,
- digestsize);
+ dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
+ state->digest_result_buff,
+ &state->digest_result_dma_addr, digestsize);
memcpy(result,
state->digest_result_buff,
digestsize);
@@ -369,7 +362,7 @@ static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __i
struct ahash_request *req = (struct ahash_request *)ssi_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
- SSI_LOG_DEBUG("req=%pK\n", req);
+ dev_dbg(dev, "req=%pK\n", req);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
req->base.complete(&req->base, 0);
@@ -383,7 +376,7 @@ static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __i
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- SSI_LOG_DEBUG("req=%pK\n", req);
+ dev_dbg(dev, "req=%pK\n", req);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
@@ -399,7 +392,7 @@ static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *c
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
u32 digestsize = crypto_ahash_digestsize(tfm);
- SSI_LOG_DEBUG("req=%pK\n", req);
+ dev_dbg(dev, "req=%pK\n", req);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
@@ -414,7 +407,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
unsigned int nbytes, u8 *result,
void *async_req)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
@@ -423,20 +416,21 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
int idx = 0;
int rc = 0;
- SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
+ dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
- SSI_LOG_ERR("map_ahash_source() failed\n");
+ dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
@@ -553,7 +547,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
ssi_hash_unmap_request(dev, state, ctx);
@@ -561,7 +555,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
} else {
ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
@@ -579,14 +573,14 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
unsigned int nbytes,
void *async_req)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
u32 idx = 0;
int rc;
- SSI_LOG_DEBUG("===== %s-update (%d) ====\n", ctx->is_hmac ?
- "hmac" : "hash", nbytes);
+ dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
+ "hmac" : "hash", nbytes);
if (nbytes == 0) {
/* no real updates required */
@@ -596,12 +590,12 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
if (unlikely(rc)) {
if (rc == 1) {
- SSI_LOG_DEBUG(" data size not require HW update %x\n",
- nbytes);
+ dev_dbg(dev, " data size not require HW update %x\n",
+ nbytes);
/* No hardware updates are required */
return 0;
}
- SSI_LOG_ERR("map_ahash_request_update() failed\n");
+ dev_err(dev, "map_ahash_request_update() failed\n");
return -ENOMEM;
}
@@ -653,13 +647,13 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
}
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
} else {
ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
@@ -676,21 +670,22 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
u8 *result,
void *async_req)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
int rc;
- SSI_LOG_DEBUG("===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
+ dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -783,14 +778,14 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
}
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
} else {
@@ -810,22 +805,23 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
u8 *result,
void *async_req)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
int rc;
- SSI_LOG_DEBUG("===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
+ dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
+ nbytes);
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -927,14 +923,14 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
}
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
} else {
@@ -948,7 +944,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
state->xcbc_count = 0;
@@ -970,10 +966,12 @@ static int ssi_hash_setkey(void *hash,
int i, idx = 0, rc = 0;
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
ssi_sram_addr_t larval_addr;
-
- SSI_LOG_DEBUG("start keylen: %d", keylen);
+ struct device *dev;
ctx = crypto_ahash_ctx(((struct crypto_ahash *)hash));
+ dev = drvdata_to_dev(ctx->drvdata);
+ dev_dbg(dev, "start keylen: %d", keylen);
+
blocksize = crypto_tfm_alg_blocksize(&((struct crypto_ahash *)hash)->base);
digestsize = crypto_ahash_digestsize(((struct crypto_ahash *)hash));
@@ -989,18 +987,16 @@ static int ssi_hash_setkey(void *hash,
if (keylen != 0) {
ctx->key_params.key_dma_addr = dma_map_single(
- &ctx->drvdata->plat_dev->dev,
- (void *)key,
+ dev, (void *)key,
keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
+ if (unlikely(dma_mapping_error(dev,
ctx->key_params.key_dma_addr))) {
- SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
- " DMA failed\n", key, keylen);
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
return -ENOMEM;
}
- SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
- "keylen=%u\n", ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
if (keylen > blocksize) {
/* Load hash initial state */
@@ -1079,7 +1075,7 @@ static int ssi_hash_setkey(void *hash,
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto out;
}
@@ -1139,12 +1135,10 @@ out:
crypto_ahash_set_flags((struct crypto_ahash *)hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
if (ctx->key_params.key_dma_addr) {
- dma_unmap_single(&ctx->drvdata->plat_dev->dev,
- ctx->key_params.key_dma_addr,
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
- ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
}
return rc;
}
@@ -1154,10 +1148,11 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
{
struct ssi_crypto_req ssi_req = {};
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
int idx = 0, rc = 0;
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
- SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
switch (keylen) {
case AES_KEYSIZE_128:
@@ -1171,19 +1166,15 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = dma_map_single(
- &ctx->drvdata->plat_dev->dev,
- (void *)key,
+ dev, (void *)key,
keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&ctx->drvdata->plat_dev->dev,
- ctx->key_params.key_dma_addr))) {
- SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
- " DMA failed\n", key, keylen);
+ if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
+ dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
+ key, keylen);
return -ENOMEM;
}
- SSI_LOG_DEBUG("mapping key-buffer: key_dma_addr=%pad "
- "keylen=%u\n",
- ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
ctx->is_hmac = true;
/* 1. Load the AES key */
@@ -1226,12 +1217,10 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
if (rc != 0)
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
- dma_unmap_single(&ctx->drvdata->plat_dev->dev,
- ctx->key_params.key_dma_addr,
+ dma_unmap_single(dev, ctx->key_params.key_dma_addr,
ctx->key_params.keylen, DMA_TO_DEVICE);
- SSI_LOG_DEBUG("Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
- ctx->key_params.key_dma_addr,
- ctx->key_params.keylen);
+ dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
+ &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
return rc;
}
@@ -1241,8 +1230,9 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
+ dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
ctx->is_hmac = true;
@@ -1259,16 +1249,14 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
/* STAT_PHASE_1: Copy key to ctx */
- dma_sync_single_for_cpu(&ctx->drvdata->plat_dev->dev,
- ctx->opad_tmp_keys_dma_addr,
+ dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
memcpy(ctx->opad_tmp_keys_buff, key, keylen);
if (keylen == 24)
memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
- dma_sync_single_for_device(&ctx->drvdata->plat_dev->dev,
- ctx->opad_tmp_keys_dma_addr,
+ dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
keylen, DMA_TO_DEVICE);
ctx->key_params.keylen = keylen;
@@ -1279,23 +1267,21 @@ static int ssi_cmac_setkey(struct crypto_ahash *ahash,
static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
if (ctx->digest_buff_dma_addr != 0) {
dma_unmap_single(dev, ctx->digest_buff_dma_addr,
sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped digest-buffer: "
- "digest_buff_dma_addr=%pad\n",
- ctx->digest_buff_dma_addr);
+ dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
+ &ctx->digest_buff_dma_addr);
ctx->digest_buff_dma_addr = 0;
}
if (ctx->opad_tmp_keys_dma_addr != 0) {
dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
sizeof(ctx->opad_tmp_keys_buff),
DMA_BIDIRECTIONAL);
- SSI_LOG_DEBUG("Unmapped opad-digest: "
- "opad_tmp_keys_dma_addr=%pad\n",
- ctx->opad_tmp_keys_dma_addr);
+ dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
+ &ctx->opad_tmp_keys_dma_addr);
ctx->opad_tmp_keys_dma_addr = 0;
}
@@ -1304,30 +1290,30 @@ static void ssi_hash_free_ctx(struct ssi_hash_ctx *ctx)
static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
{
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
ctx->key_params.keylen = 0;
ctx->digest_buff_dma_addr = dma_map_single(dev, (void *)ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
- SSI_LOG_ERR("Mapping digest len %zu B at va=%pK for DMA failed\n",
- sizeof(ctx->digest_buff), ctx->digest_buff);
+ dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff);
goto fail;
}
- SSI_LOG_DEBUG("Mapped digest %zu B at va=%pK to dma=%pad\n",
- sizeof(ctx->digest_buff), ctx->digest_buff,
- ctx->digest_buff_dma_addr);
+ dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->digest_buff), ctx->digest_buff,
+ &ctx->digest_buff_dma_addr);
ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
- SSI_LOG_ERR("Mapping opad digest %zu B at va=%pK for DMA failed\n",
- sizeof(ctx->opad_tmp_keys_buff),
- ctx->opad_tmp_keys_buff);
+ dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
+ sizeof(ctx->opad_tmp_keys_buff),
+ ctx->opad_tmp_keys_buff);
goto fail;
}
- SSI_LOG_DEBUG("Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
- sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
- ctx->opad_tmp_keys_dma_addr);
+ dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
+ sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
+ &ctx->opad_tmp_keys_dma_addr);
ctx->is_hmac = false;
return 0;
@@ -1361,8 +1347,9 @@ static int ssi_ahash_cra_init(struct crypto_tfm *tfm)
static void ssi_hash_cra_exit(struct crypto_tfm *tfm)
{
struct ssi_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("ssi_hash_cra_exit");
+ dev_dbg(dev, "ssi_hash_cra_exit");
ssi_hash_free_ctx(ctx);
}
@@ -1371,7 +1358,7 @@ static int ssi_mac_update(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
@@ -1388,12 +1375,12 @@ static int ssi_mac_update(struct ahash_request *req)
rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
if (unlikely(rc)) {
if (rc == 1) {
- SSI_LOG_DEBUG(" data size not require HW update %x\n",
- req->nbytes);
+ dev_dbg(dev, " data size not require HW update %x\n",
+ req->nbytes);
/* No hardware updates are required */
return 0;
}
- SSI_LOG_ERR("map_ahash_request_update() failed\n");
+ dev_err(dev, "map_ahash_request_update() failed\n");
return -ENOMEM;
}
@@ -1420,7 +1407,7 @@ static int ssi_mac_update(struct ahash_request *req)
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
}
return rc;
@@ -1431,7 +1418,7 @@ static int ssi_mac_final(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
@@ -1451,15 +1438,15 @@ static int ssi_mac_final(struct ahash_request *req)
key_len = ctx->key_params.keylen;
}
- SSI_LOG_DEBUG("===== final xcbc reminder (%d) ====\n", rem_cnt);
+ dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -1530,7 +1517,7 @@ static int ssi_mac_final(struct ahash_request *req)
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
}
@@ -1542,7 +1529,7 @@ static int ssi_mac_finup(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
int idx = 0;
@@ -1550,18 +1537,18 @@ static int ssi_mac_finup(struct ahash_request *req)
u32 key_len = 0;
u32 digestsize = crypto_ahash_digestsize(tfm);
- SSI_LOG_DEBUG("===== finup xcbc(%d) ====\n", req->nbytes);
+ dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
if (state->xcbc_count > 0 && req->nbytes == 0) {
- SSI_LOG_DEBUG("No data to update. Call to fdx_mac_final\n");
+ dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
return ssi_mac_final(req);
}
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -1601,7 +1588,7 @@ static int ssi_mac_finup(struct ahash_request *req)
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
}
@@ -1613,7 +1600,7 @@ static int ssi_mac_digest(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 digestsize = crypto_ahash_digestsize(tfm);
struct ssi_crypto_req ssi_req = {};
struct cc_hw_desc desc[SSI_MAX_AHASH_SEQ_LEN];
@@ -1621,19 +1608,19 @@ static int ssi_mac_digest(struct ahash_request *req)
int idx = 0;
int rc;
- SSI_LOG_DEBUG("===== -digest mac (%d) ====\n", req->nbytes);
+ dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
if (unlikely(ssi_hash_map_request(dev, state, ctx) != 0)) {
- SSI_LOG_ERR("map_ahash_source() failed\n");
+ dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_hash_map_result(dev, state, digestsize) != 0)) {
- SSI_LOG_ERR("map_ahash_digest() failed\n");
+ dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
- SSI_LOG_ERR("map_ahash_request_final() failed\n");
+ dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
@@ -1673,7 +1660,7 @@ static int ssi_mac_digest(struct ahash_request *req)
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
- SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
+ dev_err(dev, "send_request() failed (rc=%d)\n", rc);
ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
ssi_hash_unmap_request(dev, state, ctx);
@@ -1727,8 +1714,9 @@ static int ssi_ahash_init(struct ahash_request *req)
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
- SSI_LOG_DEBUG("===== init (%d) ====\n", req->nbytes);
+ dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
return ssi_hash_init(state, ctx);
}
@@ -1737,7 +1725,7 @@ static int ssi_ahash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx(req);
u8 *curr_buff = state->buff_index ? state->buff1 : state->buff0;
u32 curr_buff_cnt = state->buff_index ? state->buff1_cnt :
@@ -1778,7 +1766,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
- struct device *dev = &ctx->drvdata->plat_dev->dev;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ahash_req_ctx *state = ahash_request_ctx(req);
u32 tmp;
int rc;
@@ -2054,17 +2042,17 @@ static struct ssi_hash_template driver_hash[] = {
};
static struct ssi_hash_alg *
-ssi_hash_create_alg(struct ssi_hash_template *template, bool keyed)
+ssi_hash_create_alg(struct ssi_hash_template *template, struct device *dev,
+ bool keyed)
{
struct ssi_hash_alg *t_crypto_alg;
struct crypto_alg *alg;
struct ahash_alg *halg;
t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
- if (!t_crypto_alg) {
- SSI_LOG_ERR("failed to allocate t_crypto_alg\n");
+ if (!t_crypto_alg)
return ERR_PTR(-ENOMEM);
- }
+
t_crypto_alg->ahash_alg = template->template_ahash;
halg = &t_crypto_alg->ahash_alg;
@@ -2107,6 +2095,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
unsigned int larval_seq_len = 0;
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
#if (DX_DEV_SHA_MAX > 256)
int i;
@@ -2191,7 +2180,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
+ dev_err(dev, "send_request() failed (rc = %d)\n", rc);
goto init_digest_const_err;
}
larval_seq_len = 0;
@@ -2209,7 +2198,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("send_request() failed (rc = %d)\n", rc);
+ dev_err(dev, "send_request() failed (rc = %d)\n", rc);
goto init_digest_const_err;
}
#endif
@@ -2223,17 +2212,15 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
struct ssi_hash_handle *hash_handle;
ssi_sram_addr_t sram_buff;
u32 sram_size_to_alloc;
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
int alg;
hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
- if (!hash_handle) {
- SSI_LOG_ERR("kzalloc failed to allocate %zu B\n",
- sizeof(*hash_handle));
- rc = -ENOMEM;
- goto fail;
- }
+ if (!hash_handle)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&hash_handle->hash_list);
drvdata->hash_handle = hash_handle;
sram_size_to_alloc = sizeof(digest_len_init) +
@@ -2249,7 +2236,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
- SSI_LOG_ERR("SRAM pool exhausted\n");
+ dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto fail;
}
@@ -2260,31 +2247,29 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
/*must be set before the alg registration as it is being used there*/
rc = ssi_hash_init_sram_digest_consts(drvdata);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("Init digest CONST failed (rc=%d)\n", rc);
+ dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
goto fail;
}
- INIT_LIST_HEAD(&hash_handle->hash_list);
-
/* ahash registration */
for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
struct ssi_hash_alg *t_alg;
int hw_mode = driver_hash[alg].hw_mode;
/* register hmac version */
- t_alg = ssi_hash_create_alg(&driver_hash[alg], true);
+ t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, true);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
- SSI_LOG_ERR("%s alg allocation failed\n",
- driver_hash[alg].driver_name);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
goto fail;
}
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
if (unlikely(rc)) {
- SSI_LOG_ERR("%s alg registration failed\n",
- driver_hash[alg].driver_name);
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
kfree(t_alg);
goto fail;
} else {
@@ -2297,19 +2282,19 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
continue;
/* register hash version */
- t_alg = ssi_hash_create_alg(&driver_hash[alg], false);
+ t_alg = ssi_hash_create_alg(&driver_hash[alg], dev, false);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
- SSI_LOG_ERR("%s alg allocation failed\n",
- driver_hash[alg].driver_name);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
goto fail;
}
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
if (unlikely(rc)) {
- SSI_LOG_ERR("%s alg registration failed\n",
- driver_hash[alg].driver_name);
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
kfree(t_alg);
goto fail;
} else {
@@ -2443,6 +2428,7 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
unsigned int *seq_size)
{
unsigned int idx = *seq_size;
+ struct device *dev = drvdata_to_dev(ctx->drvdata);
if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
hw_desc_init(&desc[idx]);
@@ -2453,7 +2439,7 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
idx++;
} else {
if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) {
- SSI_LOG_DEBUG(" NULL mode\n");
+ dev_dbg(dev, " NULL mode\n");
/* nothing to build */
return;
}
@@ -2493,6 +2479,7 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
{
struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
struct ssi_hash_handle *hash_handle = _drvdata->hash_handle;
+ struct device *dev = drvdata_to_dev(_drvdata);
switch (mode) {
case DRV_HASH_NULL:
@@ -2527,7 +2514,7 @@ ssi_sram_addr_t ssi_ahash_get_larval_digest_sram_addr(void *drvdata, u32 mode)
sizeof(sha384_init));
#endif
default:
- SSI_LOG_ERR("Invalid hash mode (%d)\n", mode);
+ dev_err(dev, "Invalid hash mode (%d)\n", mode);
}
/*This is valid wrong value to avoid kernel crash*/
diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/ssi_ivgen.c
index b01e03231947..3f082f41ae8f 100644
--- a/drivers/staging/ccree/ssi_ivgen.c
+++ b/drivers/staging/ccree/ssi_ivgen.c
@@ -193,12 +193,9 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
/* Allocate "this" context */
drvdata->ivgen_handle = kzalloc(sizeof(*drvdata->ivgen_handle),
GFP_KERNEL);
- if (!drvdata->ivgen_handle) {
- SSI_LOG_ERR("Not enough memory to allocate IVGEN context "
- "(%zu B)\n", sizeof(*drvdata->ivgen_handle));
- rc = -ENOMEM;
- goto out;
- }
+ if (!drvdata->ivgen_handle)
+ return -ENOMEM;
+
ivgen_ctx = drvdata->ivgen_handle;
/* Allocate pool's header for intial enc. key/IV */
@@ -206,15 +203,15 @@ int ssi_ivgen_init(struct ssi_drvdata *drvdata)
&ivgen_ctx->pool_meta_dma,
GFP_KERNEL);
if (!ivgen_ctx->pool_meta) {
- SSI_LOG_ERR("Not enough memory to allocate DMA of pool_meta "
- "(%u B)\n", SSI_IVPOOL_META_SIZE);
+ dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
+ SSI_IVPOOL_META_SIZE);
rc = -ENOMEM;
goto out;
}
/* Allocate IV pool in SRAM */
ivgen_ctx->pool = ssi_sram_mgr_alloc(drvdata, SSI_IVPOOL_SIZE);
if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
- SSI_LOG_ERR("SRAM pool exhausted\n");
+ dev_err(device, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto out;
}
@@ -248,6 +245,7 @@ int ssi_ivgen_getiv(
{
struct ssi_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
unsigned int idx = *iv_seq_len;
+ struct device *dev = drvdata_to_dev(drvdata);
unsigned int t;
if ((iv_out_size != CC_AES_IV_SIZE) &&
@@ -291,7 +289,7 @@ int ssi_ivgen_getiv(
ivgen_ctx->next_iv_ofs += iv_out_size;
if ((SSI_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
- SSI_LOG_DEBUG("Pool exhausted, regenerating iv-pool\n");
+ dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
/* pool is drained -regenerate it! */
return ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, iv_seq_len);
}
diff --git a/drivers/staging/ccree/ssi_pm.c b/drivers/staging/ccree/ssi_pm.c
index 31325e6cd4b4..36a498098a70 100644
--- a/drivers/staging/ccree/ssi_pm.c
+++ b/drivers/staging/ccree/ssi_pm.c
@@ -40,11 +40,12 @@ int ssi_power_mgr_runtime_suspend(struct device *dev)
(struct ssi_drvdata *)dev_get_drvdata(dev);
int rc;
- SSI_LOG_DEBUG("set HOST_POWER_DOWN_EN\n");
- WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
+ dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = ssi_request_mgr_runtime_suspend_queue(drvdata);
if (rc != 0) {
- SSI_LOG_ERR("ssi_request_mgr_runtime_suspend_queue (%x)\n", rc);
+ dev_err(dev, "ssi_request_mgr_runtime_suspend_queue (%x)\n",
+ rc);
return rc;
}
fini_cc_regs(drvdata);
@@ -58,24 +59,24 @@ int ssi_power_mgr_runtime_resume(struct device *dev)
struct ssi_drvdata *drvdata =
(struct ssi_drvdata *)dev_get_drvdata(dev);
- SSI_LOG_DEBUG("unset HOST_POWER_DOWN_EN\n");
- WRITE_REGISTER(drvdata->cc_base + CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
+ dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = cc_clk_on(drvdata);
if (rc) {
- SSI_LOG_ERR("failed getting clock back on. We're toast.\n");
+ dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
rc = init_cc_regs(drvdata, false);
if (rc != 0) {
- SSI_LOG_ERR("init_cc_regs (%x)\n", rc);
+ dev_err(dev, "init_cc_regs (%x)\n", rc);
return rc;
}
rc = ssi_request_mgr_runtime_resume_queue(drvdata);
if (rc != 0) {
- SSI_LOG_ERR("ssi_request_mgr_runtime_resume_queue (%x)\n", rc);
+ dev_err(dev, "ssi_request_mgr_runtime_resume_queue (%x)\n", rc);
return rc;
}
@@ -109,7 +110,8 @@ int ssi_power_mgr_runtime_put_suspend(struct device *dev)
rc = pm_runtime_put_autosuspend(dev);
} else {
/* Something wrong happens*/
- BUG();
+ dev_err(dev, "request to suspend already suspended queue");
+ rc = -EBUSY;
}
return rc;
}
@@ -120,16 +122,17 @@ int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
{
int rc = 0;
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- struct platform_device *plat_dev = drvdata->plat_dev;
+ struct device *dev = drvdata_to_dev(drvdata);
+
/* must be before the enabling to avoid resdundent suspending */
- pm_runtime_set_autosuspend_delay(&plat_dev->dev, SSI_SUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(&plat_dev->dev);
+ pm_runtime_set_autosuspend_delay(dev, SSI_SUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
/* activate the PM module */
- rc = pm_runtime_set_active(&plat_dev->dev);
+ rc = pm_runtime_set_active(dev);
if (rc != 0)
return rc;
/* enable the PM module*/
- pm_runtime_enable(&plat_dev->dev);
+ pm_runtime_enable(dev);
#endif
return rc;
}
@@ -137,8 +140,6 @@ int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
void ssi_power_mgr_fini(struct ssi_drvdata *drvdata)
{
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- struct platform_device *plat_dev = drvdata->plat_dev;
-
- pm_runtime_disable(&plat_dev->dev);
+ pm_runtime_disable(drvdata_to_dev(drvdata));
#endif
}
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index e5c2f92857f6..a8a7dc672d4c 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -49,7 +49,6 @@ struct ssi_request_mgr_handle {
dma_addr_t dummy_comp_buff_dma;
struct cc_hw_desc monitor_desc;
- volatile unsigned long monitor_lock;
#ifdef COMP_IN_WQ
struct workqueue_struct *workq;
struct delayed_work compwork;
@@ -69,19 +68,19 @@ static void comp_work_handler(struct work_struct *work);
void request_mgr_fini(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
if (!req_mgr_h)
return; /* Not allocated */
if (req_mgr_h->dummy_comp_buff_dma != 0) {
- dma_free_coherent(&drvdata->plat_dev->dev,
- sizeof(u32), req_mgr_h->dummy_comp_buff,
+ dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
req_mgr_h->dummy_comp_buff_dma);
}
- SSI_LOG_DEBUG("max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
+ dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
req_mgr_h->min_free_hw_slots));
- SSI_LOG_DEBUG("max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
+ dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
#ifdef COMP_IN_WQ
flush_workqueue(req_mgr_h->workq);
@@ -98,6 +97,7 @@ void request_mgr_fini(struct ssi_drvdata *drvdata)
int request_mgr_init(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *req_mgr_h;
+ struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
@@ -110,24 +110,24 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
spin_lock_init(&req_mgr_h->hw_lock);
#ifdef COMP_IN_WQ
- SSI_LOG_DEBUG("Initializing completion workqueue\n");
+ dev_dbg(dev, "Initializing completion workqueue\n");
req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
if (unlikely(!req_mgr_h->workq)) {
- SSI_LOG_ERR("Failed creating work queue\n");
+ dev_err(dev, "Failed creating work queue\n");
rc = -ENOMEM;
goto req_mgr_init_err;
}
INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
#else
- SSI_LOG_DEBUG("Initializing completion tasklet\n");
+ dev_dbg(dev, "Initializing completion tasklet\n");
tasklet_init(&req_mgr_h->comptask, comp_handler, (unsigned long)drvdata);
#endif
- req_mgr_h->hw_queue_size = READ_REGISTER(drvdata->cc_base +
- CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_SRAM_SIZE));
- SSI_LOG_DEBUG("hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
+ req_mgr_h->hw_queue_size = cc_ioread(drvdata,
+ CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
+ dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
- SSI_LOG_ERR("Invalid HW queue size = %u (Min. required is %u)\n",
- req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
+ dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
+ req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
rc = -ENOMEM;
goto req_mgr_init_err;
}
@@ -135,13 +135,12 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
req_mgr_h->max_used_sw_slots = 0;
/* Allocate DMA word for "dummy" completion descriptor use */
- req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
- sizeof(u32),
+ req_mgr_h->dummy_comp_buff = dma_alloc_coherent(dev, sizeof(u32),
&req_mgr_h->dummy_comp_buff_dma,
GFP_KERNEL);
if (!req_mgr_h->dummy_comp_buff) {
- SSI_LOG_ERR("Not enough memory to allocate DMA (%zu) dropped "
- "buffer\n", sizeof(u32));
+ dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
+ sizeof(u32));
rc = -ENOMEM;
goto req_mgr_init_err;
}
@@ -168,17 +167,17 @@ static inline void enqueue_seq(
int i;
for (i = 0; i < seq_len; i++) {
- writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
- writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[0], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[1], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[2], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[3], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[4], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
wmb();
- writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_WORD0)));
+ writel_relaxed(seq[i].word[5], (volatile void __iomem *)(cc_base + CC_REG(DSCRPTR_QUEUE_WORD0)));
#ifdef DX_DUMP_DESCS
- SSI_LOG_DEBUG("desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i,
- seq[i].word[0], seq[i].word[1], seq[i].word[2],
- seq[i].word[3], seq[i].word[4], seq[i].word[5]);
+ dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i, seq[i].word[0], seq[i].word[1], seq[i].word[2],
+ seq[i].word[3], seq[i].word[4], seq[i].word[5]);
#endif
}
}
@@ -198,11 +197,12 @@ static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __io
}
static inline int request_mgr_queues_status_check(
+ struct ssi_drvdata *drvdata,
struct ssi_request_mgr_handle *req_mgr_h,
- void __iomem *cc_base,
unsigned int total_seq_len)
{
unsigned long poll_queue;
+ struct device *dev = drvdata_to_dev(drvdata);
/* SW queue is checked only once as it will not
* be chaned during the poll becasue the spinlock_bh
@@ -211,8 +211,8 @@ static inline int request_mgr_queues_status_check(
if (unlikely(((req_mgr_h->req_queue_head + 1) &
(MAX_REQUEST_QUEUE_SIZE - 1)) ==
req_mgr_h->req_queue_tail)) {
- SSI_LOG_ERR("SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
- req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
+ dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
return -EBUSY;
}
@@ -222,8 +222,7 @@ static inline int request_mgr_queues_status_check(
/* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
req_mgr_h->q_free_slots =
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
- DSCRPTR_QUEUE_CONTENT));
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
if (unlikely(req_mgr_h->q_free_slots <
req_mgr_h->min_free_hw_slots)) {
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
@@ -234,16 +233,13 @@ static inline int request_mgr_queues_status_check(
return 0;
}
- SSI_LOG_DEBUG("HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->q_free_slots, total_seq_len);
+ dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->q_free_slots, total_seq_len);
}
/* No room in the HW queue try again later */
- SSI_LOG_DEBUG("HW FIFO full, timeout. req_queue_head=%d "
- "sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
- req_mgr_h->req_queue_head,
- MAX_REQUEST_QUEUE_SIZE,
- req_mgr_h->q_free_slots,
- total_seq_len);
+ dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
+ req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
+ req_mgr_h->q_free_slots, total_seq_len);
return -EAGAIN;
}
@@ -270,16 +266,17 @@ int send_request(
unsigned int iv_seq_len = 0;
unsigned int total_seq_len = len; /*initial sequence length*/
struct cc_hw_desc iv_seq[SSI_IVPOOL_SEQ_LEN];
+ struct device *dev = drvdata_to_dev(drvdata);
int rc;
unsigned int max_required_seq_len = (total_seq_len +
((ssi_req->ivgen_dma_addr_len == 0) ? 0 :
SSI_IVPOOL_SEQ_LEN) +
- ((is_dout == 0) ? 1 : 0));
+ (!is_dout ? 1 : 0));
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_get(&drvdata->plat_dev->dev);
+ rc = ssi_power_mgr_runtime_get(dev);
if (rc != 0) {
- SSI_LOG_ERR("ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
return rc;
}
#endif
@@ -291,7 +288,7 @@ int send_request(
* in case iv gen add the max size and in case of no dout add 1
* for the internal completion descriptor
*/
- rc = request_mgr_queues_status_check(req_mgr_h, cc_base,
+ rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
max_required_seq_len);
if (likely(rc == 0))
/* There is enough place in the queue */
@@ -304,7 +301,7 @@ int send_request(
* (SW queue is full)
*/
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
+ ssi_power_mgr_runtime_put_suspend(dev);
#endif
return rc;
}
@@ -324,12 +321,12 @@ int send_request(
}
if (ssi_req->ivgen_dma_addr_len > 0) {
- SSI_LOG_DEBUG("Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
- ssi_req->ivgen_dma_addr_len,
- ssi_req->ivgen_dma_addr[0],
- ssi_req->ivgen_dma_addr[1],
- ssi_req->ivgen_dma_addr[2],
- ssi_req->ivgen_size);
+ dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
+ ssi_req->ivgen_dma_addr_len,
+ &ssi_req->ivgen_dma_addr[0],
+ &ssi_req->ivgen_dma_addr[1],
+ &ssi_req->ivgen_dma_addr[2],
+ ssi_req->ivgen_size);
/* Acquire IV from pool */
rc = ssi_ivgen_getiv(drvdata, ssi_req->ivgen_dma_addr,
@@ -337,10 +334,10 @@ int send_request(
ssi_req->ivgen_size, iv_seq, &iv_seq_len);
if (unlikely(rc != 0)) {
- SSI_LOG_ERR("Failed to generate IV (rc=%d)\n", rc);
+ dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
spin_unlock_bh(&req_mgr_h->hw_lock);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(&drvdata->plat_dev->dev);
+ ssi_power_mgr_runtime_put_suspend(dev);
#endif
return rc;
}
@@ -357,7 +354,7 @@ int send_request(
req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
/* TODO: Use circ_buf.h ? */
- SSI_LOG_DEBUG("Enqueue request head=%u\n", req_mgr_h->req_queue_head);
+ dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
#ifdef FLUSH_CACHE_ALL
flush_cache_all();
@@ -369,11 +366,16 @@ int send_request(
enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) {
- /*This means that there was a problem with the resume*/
- BUG();
+ /* This situation should never occur. Maybe indicating problem
+ * with resuming power. Set the free slot count to 0 and hope
+ * for the best.
+ */
+ dev_err(dev, "HW free slot count mismatch.");
+ req_mgr_h->q_free_slots = 0;
+ } else {
+ /* Update the free slots in HW queue */
+ req_mgr_h->q_free_slots -= total_seq_len;
}
- /* Update the free slots in HW queue */
- req_mgr_h->q_free_slots -= total_seq_len;
spin_unlock_bh(&req_mgr_h->hw_lock);
@@ -383,10 +385,9 @@ int send_request(
*/
wait_for_completion(&ssi_req->seq_compl);
return 0;
- } else {
- /* Operation still in process */
- return -EINPROGRESS;
}
+ /* Operation still in process */
+ return -EINPROGRESS;
}
/*!
@@ -409,7 +410,8 @@ int send_request_init(
int rc = 0;
/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
- rc = request_mgr_queues_status_check(req_mgr_h, cc_base, total_seq_len);
+ rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
+ total_seq_len);
if (unlikely(rc != 0))
return rc;
@@ -418,8 +420,8 @@ int send_request_init(
enqueue_seq(cc_base, desc, len);
/* Update the free slots in HW queue */
- req_mgr_h->q_free_slots = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL,
- DSCRPTR_QUEUE_CONTENT));
+ req_mgr_h->q_free_slots =
+ cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
return 0;
}
@@ -448,7 +450,7 @@ static void comp_work_handler(struct work_struct *work)
static void proc_completions(struct ssi_drvdata *drvdata)
{
struct ssi_crypto_req *ssi_req;
- struct platform_device *plat_dev = drvdata->plat_dev;
+ struct device *dev = drvdata_to_dev(drvdata);
struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
@@ -460,8 +462,13 @@ static void proc_completions(struct ssi_drvdata *drvdata)
/* Dequeue request */
if (unlikely(request_mgr_handle->req_queue_head == request_mgr_handle->req_queue_tail)) {
- SSI_LOG_ERR("Request queue is empty req_queue_head==req_queue_tail==%u\n", request_mgr_handle->req_queue_head);
- BUG();
+ /* We are supposed to handle a completion but our
+ * queue is empty. This is not normal. Return and
+ * hope for the best.
+ */
+ dev_err(dev, "Request queue is empty head == tail %u\n",
+ request_mgr_handle->req_queue_head);
+ break;
}
ssi_req = &request_mgr_handle->req_queue[request_mgr_handle->req_queue_tail];
@@ -476,39 +483,40 @@ static void proc_completions(struct ssi_drvdata *drvdata)
u32 axi_err;
int i;
- SSI_LOG_INFO("Delay\n");
+ dev_info(dev, "Delay\n");
for (i = 0; i < 1000000; i++)
- axi_err = READ_REGISTER(drvdata->cc_base + CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
+ axi_err = cc_ioread(drvdata,
+ CC_REG(AXIM_MON_ERR));
}
#endif /* COMPLETION_DELAY */
if (likely(ssi_req->user_cb))
- ssi_req->user_cb(&plat_dev->dev, ssi_req->user_arg, drvdata->cc_base);
+ ssi_req->user_cb(dev, ssi_req->user_arg,
+ drvdata->cc_base);
request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
- SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle->req_queue_tail);
- SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle->axi_completed);
+ dev_dbg(dev, "Dequeue request tail=%u\n",
+ request_mgr_handle->req_queue_tail);
+ dev_dbg(dev, "Request completed. axi_completed=%d\n",
+ request_mgr_handle->axi_completed);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_put_suspend(&plat_dev->dev);
+ rc = ssi_power_mgr_runtime_put_suspend(dev);
if (rc != 0)
- SSI_LOG_ERR("Failed to set runtime suspension %d\n", rc);
+ dev_err(dev, "Failed to set runtime suspension %d\n",
+ rc);
#endif
}
}
-static inline u32 cc_axi_comp_count(void __iomem *cc_base)
+static inline u32 cc_axi_comp_count(struct ssi_drvdata *drvdata)
{
- /* The CC_HAL_READ_REGISTER macro implictly requires and uses
- * a base MMIO register address variable named cc_base.
- */
return FIELD_GET(AXIM_MON_COMP_VALUE,
- CC_HAL_READ_REGISTER(AXIM_MON_BASE_OFFSET));
+ cc_ioread(drvdata, CC_REG(AXIM_MON_COMP)));
}
/* Deferred service handler, run as interrupt-fired tasklet */
static void comp_handler(unsigned long devarg)
{
struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
- void __iomem *cc_base = drvdata->cc_base;
struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
@@ -517,12 +525,16 @@ static void comp_handler(unsigned long devarg)
irq = (drvdata->irq & SSI_COMP_IRQ_MASK);
if (irq & SSI_COMP_IRQ_MASK) {
- /* To avoid the interrupt from firing as we unmask it, we clear it now */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), SSI_COMP_IRQ_MASK);
- /* Avoid race with above clear: Test completion counter once more */
+ /* Avoid race with above clear: Test completion counter
+ * once more
+ */
request_mgr_handle->axi_completed +=
- cc_axi_comp_count(cc_base);
+ cc_axi_comp_count(drvdata);
while (request_mgr_handle->axi_completed) {
do {
@@ -531,20 +543,21 @@ static void comp_handler(unsigned long devarg)
* request_mgr_handle->axi_completed is 0.
*/
request_mgr_handle->axi_completed =
- cc_axi_comp_count(cc_base);
+ cc_axi_comp_count(drvdata);
} while (request_mgr_handle->axi_completed > 0);
- /* To avoid the interrupt from firing as we unmask it, we clear it now */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_ICR), SSI_COMP_IRQ_MASK);
+ cc_iowrite(drvdata, CC_REG(HOST_ICR),
+ SSI_COMP_IRQ_MASK);
- /* Avoid race with above clear: Test completion counter once more */
request_mgr_handle->axi_completed +=
- cc_axi_comp_count(cc_base);
+ cc_axi_comp_count(drvdata);
}
}
- /* after verifing that there is nothing to do, Unmask AXI completion interrupt */
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),
- CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
+ /* after verifing that there is nothing to do,
+ * unmask AXI completion interrupt
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_IMR),
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
}
/*
diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/ssi_sram_mgr.c
index f11116afe89a..07260d168c91 100644
--- a/drivers/staging/ccree/ssi_sram_mgr.c
+++ b/drivers/staging/ccree/ssi_sram_mgr.c
@@ -50,28 +50,14 @@ void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
*/
int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
{
- struct ssi_sram_mgr_ctx *smgr_ctx;
- int rc;
-
/* Allocate "this" context */
- drvdata->sram_mgr_handle = kzalloc(
- sizeof(struct ssi_sram_mgr_ctx), GFP_KERNEL);
- if (!drvdata->sram_mgr_handle) {
- SSI_LOG_ERR("Not enough memory to allocate SRAM_MGR ctx (%zu)\n",
- sizeof(struct ssi_sram_mgr_ctx));
- rc = -ENOMEM;
- goto out;
- }
- smgr_ctx = drvdata->sram_mgr_handle;
+ drvdata->sram_mgr_handle = kzalloc(sizeof(struct ssi_sram_mgr_ctx),
+ GFP_KERNEL);
- /* Pool starts at start of SRAM */
- smgr_ctx->sram_free_offset = 0;
+ if (!drvdata->sram_mgr_handle)
+ return -ENOMEM;
return 0;
-
-out:
- ssi_sram_mgr_fini(drvdata);
- return rc;
}
/*!
@@ -86,22 +72,23 @@ out:
ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
{
struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
ssi_sram_addr_t p;
if (unlikely((size & 0x3) != 0)) {
- SSI_LOG_ERR("Requested buffer size (%u) is not multiple of 4",
- size);
+ dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
+ size);
return NULL_SRAM_ADDR;
}
if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) {
- SSI_LOG_ERR("Not enough space to allocate %u B (at offset %llu)\n",
- size, smgr_ctx->sram_free_offset);
+ dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
+ size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR;
}
p = smgr_ctx->sram_free_offset;
smgr_ctx->sram_free_offset += size;
- SSI_LOG_DEBUG("Allocated %u B @ %u\n", size, (unsigned int)p);
+ dev_dbg(dev, "Allocated %u B @ %u\n", size, (unsigned int)p);
return p;
}
diff --git a/drivers/staging/ccree/ssi_sysfs.c b/drivers/staging/ccree/ssi_sysfs.c
index 0655658bba4d..5d39f15cdb59 100644
--- a/drivers/staging/ccree/ssi_sysfs.c
+++ b/drivers/staging/ccree/ssi_sysfs.c
@@ -24,277 +24,22 @@
static struct ssi_drvdata *sys_get_drvdata(void);
-#ifdef CC_CYCLE_COUNT
-
-#include <asm/timex.h>
-
-struct stat_item {
- unsigned int min;
- unsigned int max;
- cycles_t sum;
- unsigned int count;
-};
-
-struct stat_name {
- const char *op_type_name;
- const char *stat_phase_name[MAX_STAT_PHASES];
-};
-
-static struct stat_name stat_name_db[MAX_STAT_OP_TYPES] = {
- {
- /* STAT_OP_TYPE_NULL */
- .op_type_name = "NULL",
- .stat_phase_name = {NULL},
- },
- {
- .op_type_name = "Encode",
- .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
- .stat_phase_name[STAT_PHASE_1] = "Map buffers",
- .stat_phase_name[STAT_PHASE_2] = "Create sequence",
- .stat_phase_name[STAT_PHASE_3] = "Send Request",
- .stat_phase_name[STAT_PHASE_4] = "HW-Q push",
- .stat_phase_name[STAT_PHASE_5] = "Sequence completion",
- .stat_phase_name[STAT_PHASE_6] = "HW cycles",
- },
- { .op_type_name = "Decode",
- .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
- .stat_phase_name[STAT_PHASE_1] = "Map buffers",
- .stat_phase_name[STAT_PHASE_2] = "Create sequence",
- .stat_phase_name[STAT_PHASE_3] = "Send Request",
- .stat_phase_name[STAT_PHASE_4] = "HW-Q push",
- .stat_phase_name[STAT_PHASE_5] = "Sequence completion",
- .stat_phase_name[STAT_PHASE_6] = "HW cycles",
- },
- { .op_type_name = "Setkey",
- .stat_phase_name[STAT_PHASE_0] = "Init and sanity checks",
- .stat_phase_name[STAT_PHASE_1] = "Copy key to ctx",
- .stat_phase_name[STAT_PHASE_2] = "Create sequence",
- .stat_phase_name[STAT_PHASE_3] = "Send Request",
- .stat_phase_name[STAT_PHASE_4] = "HW-Q push",
- .stat_phase_name[STAT_PHASE_5] = "Sequence completion",
- .stat_phase_name[STAT_PHASE_6] = "HW cycles",
- },
- {
- .op_type_name = "Generic",
- .stat_phase_name[STAT_PHASE_0] = "Interrupt",
- .stat_phase_name[STAT_PHASE_1] = "ISR-to-Tasklet",
- .stat_phase_name[STAT_PHASE_2] = "Tasklet start-to-end",
- .stat_phase_name[STAT_PHASE_3] = "Tasklet:user_cb()",
- .stat_phase_name[STAT_PHASE_4] = "Tasklet:dx_X_complete() - w/o X_complete()",
- .stat_phase_name[STAT_PHASE_5] = "",
- .stat_phase_name[STAT_PHASE_6] = "HW cycles",
- }
-};
-
-/*
- * Structure used to create a directory
- * and its attributes in sysfs.
- */
-struct sys_dir {
- struct kobject *sys_dir_kobj;
- struct attribute_group sys_dir_attr_group;
- struct attribute **sys_dir_attr_list;
- u32 num_of_attrs;
- struct ssi_drvdata *drvdata; /* Associated driver context */
-};
-
-/* top level directory structures */
-struct sys_dir sys_top_dir;
-
-static DEFINE_SPINLOCK(stat_lock);
-
-/* List of DBs */
-static struct stat_item stat_host_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
-static struct stat_item stat_cc_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
-
-static void init_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
-{
- unsigned int i, j;
-
- /* Clear db */
- for (i = 0; i < MAX_STAT_OP_TYPES; i++) {
- for (j = 0; j < MAX_STAT_PHASES; j++) {
- item[i][j].min = 0xFFFFFFFF;
- item[i][j].max = 0;
- item[i][j].sum = 0;
- item[i][j].count = 0;
- }
- }
-}
-
-static void update_db(struct stat_item *item, unsigned int result)
-{
- item->count++;
- item->sum += result;
- if (result < item->min)
- item->min = result;
- if (result > item->max)
- item->max = result;
-}
-
-static void display_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
-{
- unsigned int i, j;
- u64 avg;
-
- for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
- for (j = 0; j < MAX_STAT_PHASES; j++) {
- if (item[i][j].count > 0) {
- avg = (u64)item[i][j].sum;
- do_div(avg, item[i][j].count);
- SSI_LOG_ERR("%s, %s: min=%d avg=%d max=%d sum=%lld count=%d\n",
- stat_name_db[i].op_type_name,
- stat_name_db[i].stat_phase_name[j],
- item[i][j].min, (int)avg,
- item[i][j].max,
- (long long)item[i][j].sum,
- item[i][j].count);
- }
- }
- }
-}
-
-/**************************************
- * Attributes show functions section *
- **************************************/
-
-static ssize_t ssi_sys_stats_host_db_clear(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- init_db(stat_host_db);
- return count;
-}
-
-static ssize_t ssi_sys_stats_cc_db_clear(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- init_db(stat_cc_db);
- return count;
-}
-
-static ssize_t ssi_sys_stat_host_db_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int i, j;
- char line[512];
- u32 min_cyc, max_cyc;
- u64 avg;
- ssize_t buf_len, tmp_len = 0;
-
- buf_len = scnprintf(buf, PAGE_SIZE,
- "phase\t\t\t\t\t\t\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
- if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
- return buf_len;
- for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
- for (j = 0; j < MAX_STAT_PHASES - 1; j++) {
- if (stat_host_db[i][j].count > 0) {
- avg = (u64)stat_host_db[i][j].sum;
- do_div(avg, stat_host_db[i][j].count);
- min_cyc = stat_host_db[i][j].min;
- max_cyc = stat_host_db[i][j].max;
- } else {
- avg = min_cyc = max_cyc = 0;
- }
- tmp_len = scnprintf(line, 512,
- "%s::%s\t\t\t\t\t%6u\t%6u\t%6u\t%7u\n",
- stat_name_db[i].op_type_name,
- stat_name_db[i].stat_phase_name[j],
- min_cyc, (unsigned int)avg, max_cyc,
- stat_host_db[i][j].count);
- if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
- return buf_len;
- if (buf_len + tmp_len >= PAGE_SIZE)
- return buf_len;
- buf_len += tmp_len;
- strncat(buf, line, 512);
- }
- }
- return buf_len;
-}
-
-static ssize_t ssi_sys_stat_cc_db_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int i;
- char line[256];
- u32 min_cyc, max_cyc;
- u64 avg;
- ssize_t buf_len, tmp_len = 0;
-
- buf_len = scnprintf(buf, PAGE_SIZE,
- "phase\tmin[cy]\tavg[cy]\tmax[cy]\t#samples\n");
- if (buf_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
- return buf_len;
- for (i = STAT_OP_TYPE_ENCODE; i < MAX_STAT_OP_TYPES; i++) {
- if (stat_cc_db[i][STAT_PHASE_6].count > 0) {
- avg = (u64)stat_cc_db[i][STAT_PHASE_6].sum;
- do_div(avg, stat_cc_db[i][STAT_PHASE_6].count);
- min_cyc = stat_cc_db[i][STAT_PHASE_6].min;
- max_cyc = stat_cc_db[i][STAT_PHASE_6].max;
- } else {
- avg = min_cyc = max_cyc = 0;
- }
- tmp_len = scnprintf(line, 256, "%s\t%6u\t%6u\t%6u\t%7u\n",
- stat_name_db[i].op_type_name, min_cyc,
- (unsigned int)avg, max_cyc,
- stat_cc_db[i][STAT_PHASE_6].count);
-
- if (tmp_len < 0)/* scnprintf shouldn't return negative value according to its implementation*/
- return buf_len;
-
- if (buf_len + tmp_len >= PAGE_SIZE)
- return buf_len;
- buf_len += tmp_len;
- strncat(buf, line, 256);
- }
- return buf_len;
-}
-
-void update_host_stat(unsigned int op_type, unsigned int phase, cycles_t result)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&stat_lock, flags);
- update_db(&stat_host_db[op_type][phase], (unsigned int)result);
- spin_unlock_irqrestore(&stat_lock, flags);
-}
-
-void update_cc_stat(
- unsigned int op_type,
- unsigned int phase,
- unsigned int elapsed_cycles)
-{
- update_db(&stat_cc_db[op_type][phase], elapsed_cycles);
-}
-
-void display_all_stat_db(void)
-{
- SSI_LOG_ERR("\n======= CYCLE COUNT STATS =======\n");
- display_db(stat_host_db);
- SSI_LOG_ERR("\n======= CC HW CYCLE COUNT STATS =======\n");
- display_db(stat_cc_db);
-}
-#endif /*CC_CYCLE_COUNT*/
-
static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct ssi_drvdata *drvdata = sys_get_drvdata();
u32 register_value;
- void __iomem *cc_base = drvdata->cc_base;
int offset = 0;
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_SIGNATURE));
+ register_value = cc_ioread(drvdata, CC_REG(HOST_SIGNATURE));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_SIGNATURE ", DX_HOST_SIGNATURE_REG_OFFSET, register_value);
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IRR));
+ register_value = cc_ioread(drvdata, CC_REG(HOST_IRR));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_IRR ", DX_HOST_IRR_REG_OFFSET, register_value);
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_POWER_DOWN_EN));
+ register_value = cc_ioread(drvdata, CC_REG(HOST_POWER_DOWN_EN));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "HOST_POWER_DOWN_EN ", DX_HOST_POWER_DOWN_EN_REG_OFFSET, register_value);
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
+ register_value = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "AXIM_MON_ERR ", DX_AXIM_MON_ERR_REG_OFFSET, register_value);
- register_value = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, DSCRPTR_QUEUE_CONTENT));
+ register_value = cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s \t(0x%lX)\t 0x%08X\n", "DSCRPTR_QUEUE_CONTENT", DX_DSCRPTR_QUEUE_CONTENT_REG_OFFSET, register_value);
return offset;
}
@@ -304,12 +49,6 @@ static ssize_t ssi_sys_help_show(struct kobject *kobj,
{
char *help_str[] = {
"cat reg_dump ", "Print several of CC register values",
- #if defined CC_CYCLE_COUNT
- "cat stats_host ", "Print host statistics",
- "echo <number> > stats_host", "Clear host statistics database",
- "cat stats_cc ", "Print CC statistics",
- "echo <number> > stats_cc ", "Clear CC statistics database",
- #endif
};
int i = 0, offset = 0;
@@ -376,7 +115,7 @@ static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
return -ENOMEM;
/* allocate memory for directory's attributes list */
sys_dir->sys_dir_attr_list =
- kzalloc(sizeof(struct attribute *) * (num_of_attrs + 1),
+ kcalloc(num_of_attrs + 1, sizeof(struct attribute *),
GFP_KERNEL);
if (!(sys_dir->sys_dir_attr_list)) {
@@ -413,14 +152,9 @@ static void sys_free_dir(struct sys_dir *sys_dir)
int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata)
{
int retval;
+ struct device *dev = drvdata_to_dev(drvdata);
-#if defined CC_CYCLE_COUNT
- /* Init. statistics */
- init_db(stat_host_db);
- init_db(stat_cc_db);
-#endif
-
- SSI_LOG_ERR("setup sysfs under %s\n", sys_dev_obj->name);
+ dev_info(dev, "setup sysfs under %s\n", sys_dev_obj->name);
/* Initialize top directory */
retval = sys_init_dir(&sys_top_dir, drvdata, sys_dev_obj, "cc_info",
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 7a655ed071a3..4218fc0e17f1 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -785,8 +785,8 @@ config COMEDI_ADV_PCI_DIO
---help---
Enable support for Advantech PCI DIO cards
PCI-1730, PCI-1733, PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U,
- PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756 and
- PCI-1762
+ PCI-1750, PCI-1751, PCI-1752, PCI-1753/E, PCI-1754, PCI-1756,
+ PCI-1761 and PCI-1762
To compile this driver as a module, choose M here: the module will be
called adv_pci_dio.
diff --git a/drivers/staging/comedi/Makefile b/drivers/staging/comedi/Makefile
index 7f9dfb3923ab..6af5da3b4315 100644
--- a/drivers/staging/comedi/Makefile
+++ b/drivers/staging/comedi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
comedi-y := comedi_fops.o range.o drivers.o \
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index 6246f4a78ca6..515f293a5d26 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _COMEDI_INTERNAL_H
#define _COMEDI_INTERNAL_H
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 0c8cfa738727..736e7e55219d 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for individual comedi drivers
#
ccflags-$(CONFIG_COMEDI_DEBUG) := -DDEBUG
diff --git a/drivers/staging/comedi/drivers/addi_tcw.h b/drivers/staging/comedi/drivers/addi_tcw.h
index db6d5a4e8889..2b44d3a04484 100644
--- a/drivers/staging/comedi/drivers/addi_tcw.h
+++ b/drivers/staging/comedi/drivers/addi_tcw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ADDI_TCW_H
#define _ADDI_TCW_H
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.h b/drivers/staging/comedi/drivers/addi_watchdog.h
index b049cfba9813..7523084a0742 100644
--- a/drivers/staging/comedi/drivers/addi_watchdog.h
+++ b/drivers/staging/comedi/drivers/addi_watchdog.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ADDI_WATCHDOG_H
#define _ADDI_WATCHDOG_H
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 620cec13d74c..a8186687ca2c 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -12,9 +12,9 @@
* Devices: [Advantech] PCI-1730 (adv_pci_dio), PCI-1733,
* PCI-1734, PCI-1735U, PCI-1736UP, PCI-1739U, PCI-1750,
* PCI-1751, PCI-1752, PCI-1753, PCI-1753+PCI-1753E,
- * PCI-1754, PCI-1756, PCI-1762
+ * PCI-1754, PCI-1756, PCI-1761, PCI-1762
* Author: Michal Dobes <dobes@tesnet.cz>
- * Updated: Mon, 09 Jan 2012 12:40:46 +0000
+ * Updated: Fri, 25 Aug 2017 07:23:06 +0300
* Status: untested
*
* Configuration Options: not applicable, uses PCI auto config
@@ -50,6 +50,11 @@
/* PCI-1752, PCI-1756 special registers */
#define PCI1752_CFC_REG 0x12 /* R/W: channel freeze function */
+/* PCI-1761 interrupt control registers */
+#define PCI1761_INT_EN_REG 0x03 /* R/W: enable/disable interrupts */
+#define PCI1761_INT_RF_REG 0x04 /* R/W: falling/rising edge */
+#define PCI1761_INT_CLR_REG 0x05 /* R/W: clear interrupts */
+
/* PCI-1762 interrupt control registers */
#define PCI1762_INT_REG 0x06 /* R/W: status/control */
@@ -72,6 +77,7 @@ enum pci_dio_boardid {
TYPE_PCI1753E,
TYPE_PCI1754,
TYPE_PCI1756,
+ TYPE_PCI1761,
TYPE_PCI1762
};
@@ -181,6 +187,13 @@ static const struct dio_boardtype boardtypes[] = {
.id_reg = 0x10,
.is_16bit = 1,
},
+ [TYPE_PCI1761] = {
+ .name = "pci1761",
+ .nsubdevs = 3,
+ .sdi[1] = { 8, 0x01 }, /* ISO DI 0-7 */
+ .sdo[1] = { 8, 0x00 }, /* RELAY DO 0-7 */
+ .id_reg = 0x02,
+ },
[TYPE_PCI1762] = {
.name = "pci1762",
.nsubdevs = 3,
@@ -309,6 +322,14 @@ static int pci_dio_reset(struct comedi_device *dev, unsigned long cardtype)
outw(0x08, dev->iobase + PCI1754_INT_REG(3));
}
break;
+ case TYPE_PCI1761:
+ /* disable interrupts */
+ outb(0, dev->iobase + PCI1761_INT_EN_REG);
+ /* clear interrupts */
+ outb(0xff, dev->iobase + PCI1761_INT_CLR_REG);
+ /* set rising edge trigger */
+ outb(0, dev->iobase + PCI1761_INT_RF_REG);
+ break;
case TYPE_PCI1762:
outw(0x0101, dev->iobase + PCI1762_INT_REG);
break;
@@ -496,6 +517,7 @@ static const struct pci_device_id adv_pci_dio_pci_table[] = {
{ PCI_VDEVICE(ADVANTECH, 0x1753), TYPE_PCI1753 },
{ PCI_VDEVICE(ADVANTECH, 0x1754), TYPE_PCI1754 },
{ PCI_VDEVICE(ADVANTECH, 0x1756), TYPE_PCI1756 },
+ { PCI_VDEVICE(ADVANTECH, 0x1761), TYPE_PCI1761 },
{ PCI_VDEVICE(ADVANTECH, 0x1762), TYPE_PCI1762 },
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
index f03e4c8c2021..f738b91b2052 100644
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/amcc_s5933.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Stuff for AMCC S5933 PCI Controller
*
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index ccfd642998be..c7e8194984e5 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -93,6 +93,7 @@ struct waveform_private {
unsigned int ai_scan_period; /* AI scan period in usec */
unsigned int ai_convert_period; /* AI conversion period in usec */
struct timer_list ao_timer; /* timer for AO commands */
+ struct comedi_device *dev; /* parent comedi device */
u64 ao_last_scan_time; /* time of previous AO scan in usec */
unsigned int ao_scan_period; /* AO scan period in usec */
unsigned short ao_loopbacks[N_CHANS];
@@ -201,10 +202,10 @@ static unsigned short fake_waveform(struct comedi_device *dev,
* It should run in the background; therefore it is scheduled by
* a timer mechanism.
*/
-static void waveform_ai_timer(unsigned long arg)
+static void waveform_ai_timer(struct timer_list *t)
{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct waveform_private *devpriv = dev->private;
+ struct waveform_private *devpriv = from_timer(devpriv, t, ai_timer);
+ struct comedi_device *dev = devpriv->dev;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
@@ -438,10 +439,10 @@ static int waveform_ai_insn_read(struct comedi_device *dev,
* This is the background routine to handle AO commands, scheduled by
* a timer mechanism.
*/
-static void waveform_ao_timer(unsigned long arg)
+static void waveform_ao_timer(struct timer_list *t)
{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct waveform_private *devpriv = dev->private;
+ struct waveform_private *devpriv = from_timer(devpriv, t, ao_timer);
+ struct comedi_device *dev = devpriv->dev;
struct comedi_subdevice *s = dev->write_subdev;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
@@ -686,8 +687,9 @@ static int waveform_common_attach(struct comedi_device *dev,
for (i = 0; i < s->n_chan; i++)
devpriv->ao_loopbacks[i] = s->maxdata / 2;
- setup_timer(&devpriv->ai_timer, waveform_ai_timer, (unsigned long)dev);
- setup_timer(&devpriv->ao_timer, waveform_ao_timer, (unsigned long)dev);
+ devpriv->dev = dev;
+ timer_setup(&devpriv->ai_timer, waveform_ai_timer, 0);
+ timer_setup(&devpriv->ao_timer, waveform_ao_timer, 0);
dev_info(dev->class_dev,
"%s: %u microvolt, %u microsecond waveform attached\n",
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index 5d157951f63f..ddd4aeab6365 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -440,6 +440,7 @@ static inline int timer_period(void)
struct das16_private_struct {
struct comedi_isadma *dma;
+ struct comedi_device *dev;
unsigned int clockbase;
unsigned int ctrl_reg;
unsigned int divisor1;
@@ -525,10 +526,10 @@ static void das16_interrupt(struct comedi_device *dev)
comedi_handle_events(dev, s);
}
-static void das16_timer_interrupt(unsigned long arg)
+static void das16_timer_interrupt(struct timer_list *t)
{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct das16_private_struct *devpriv = dev->private;
+ struct das16_private_struct *devpriv = from_timer(devpriv, t, timer);
+ struct comedi_device *dev = devpriv->dev;
unsigned long flags;
das16_interrupt(dev);
@@ -934,6 +935,8 @@ static void das16_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
{
struct das16_private_struct *devpriv = dev->private;
+ timer_setup(&devpriv->timer, das16_timer_interrupt, 0);
+
/* only DMA channels 3 and 1 are valid */
if (!(dma_chan == 1 || dma_chan == 3))
return;
@@ -941,10 +944,6 @@ static void das16_alloc_dma(struct comedi_device *dev, unsigned int dma_chan)
/* DMA uses two buffers */
devpriv->dma = comedi_isadma_alloc(dev, 2, dma_chan, dma_chan,
DAS16_DMA_SIZE, COMEDI_ISADMA_READ);
- if (devpriv->dma) {
- setup_timer(&devpriv->timer, das16_timer_interrupt,
- (unsigned long)dev);
- }
}
static void das16_free_dma(struct comedi_device *dev)
@@ -952,8 +951,7 @@ static void das16_free_dma(struct comedi_device *dev)
struct das16_private_struct *devpriv = dev->private;
if (devpriv) {
- if (devpriv->timer.data)
- del_timer_sync(&devpriv->timer);
+ del_timer_sync(&devpriv->timer);
comedi_isadma_free(devpriv->dma);
}
}
@@ -1046,6 +1044,7 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
+ devpriv->dev = dev;
if (board->size < 0x400) {
ret = comedi_request_region(dev, it->options[0], board->size);
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index d5295bbdd28c..217a4b884689 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -1062,6 +1062,8 @@ static void dt282x_alloc_dma(struct comedi_device *dev,
PAGE_SIZE, 0);
if (!devpriv->dma)
free_irq(irq_num, dev);
+ else
+ dev->irq = irq_num;
}
static void dt282x_free_dma(struct comedi_device *dev)
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index f1c2a20a7d4d..cbff3b41bb45 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -96,6 +96,7 @@ struct jr3_pci_poll_delay {
struct jr3_pci_dev_private {
struct timer_list timer;
+ struct comedi_device *dev;
};
union jr3_pci_single_range {
@@ -585,10 +586,10 @@ jr3_pci_poll_subdevice(struct comedi_subdevice *s)
return result;
}
-static void jr3_pci_poll_dev(unsigned long data)
+static void jr3_pci_poll_dev(struct timer_list *t)
{
- struct comedi_device *dev = (struct comedi_device *)data;
- struct jr3_pci_dev_private *devpriv = dev->private;
+ struct jr3_pci_dev_private *devpriv = from_timer(devpriv, t, timer);
+ struct comedi_device *dev = devpriv->dev;
struct jr3_pci_subdev_private *spriv;
struct comedi_subdevice *s;
unsigned long flags;
@@ -770,7 +771,8 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
spriv->next_time_min = jiffies + msecs_to_jiffies(500);
}
- setup_timer(&devpriv->timer, jr3_pci_poll_dev, (unsigned long)dev);
+ devpriv->dev = dev;
+ timer_setup(&devpriv->timer, jr3_pci_poll_dev, 0);
devpriv->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&devpriv->timer);
diff --git a/drivers/staging/comedi/drivers/jr3_pci.h b/drivers/staging/comedi/drivers/jr3_pci.h
index 28ff0c2aa3b8..acd4e5456ceb 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.h
+++ b/drivers/staging/comedi/drivers/jr3_pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Helper types to take care of the fact that the DSP card memory
* is 16 bits, but aligned on a 32 bit PCI boundary
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.h b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
index e93f79050e60..f06f9353cb6c 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_isadma.h
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ni_labpc ISA DMA support.
*/
diff --git a/drivers/staging/comedi/drivers/ni_labpc_regs.h b/drivers/staging/comedi/drivers/ni_labpc_regs.h
index 6003e9d5fe37..ace40065a25b 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_regs.h
+++ b/drivers/staging/comedi/drivers/ni_labpc_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ni_labpc register definitions.
*/
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index c80527db9c19..e226275972c0 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -408,9 +408,8 @@ static int s526_gpct_winsn(struct comedi_device *dev,
*/
if ((data[1] <= data[0]) || !data[0])
return -EINVAL;
-
- /* Fall thru to write the PULSE_WIDTH */
-
+ /* to write the PULSE_WIDTH */
+ /* fall through */
case INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
case INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
s526_gpct_write(dev, chan, data[0]);
diff --git a/drivers/staging/comedi/drivers/z8536.h b/drivers/staging/comedi/drivers/z8536.h
index 47eadbf4dcc0..3ef5f9e79b89 100644
--- a/drivers/staging/comedi/drivers/z8536.h
+++ b/drivers/staging/comedi/drivers/z8536.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Z8536 CIO Internal registers
*/
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index c1b6079384e9..d9bf5da1b8e5 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -291,7 +291,7 @@ static void dgnc_free_irq(struct dgnc_board *brd)
* waiter needs to be woken up, and (b) whether the poller needs to
* be rescheduled.
*/
-static void dgnc_poll_handler(ulong dummy)
+static void dgnc_poll_handler(struct timer_list *unused)
{
struct dgnc_board *brd;
unsigned long flags;
@@ -323,7 +323,7 @@ static void dgnc_poll_handler(ulong dummy)
if ((ulong)new_time >= 2 * dgnc_poll_tick)
dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
- setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
+ timer_setup(&dgnc_poll_timer, dgnc_poll_handler, 0);
dgnc_poll_timer.expires = dgnc_poll_time;
spin_unlock_irqrestore(&dgnc_poll_lock, flags);
@@ -392,8 +392,6 @@ static int dgnc_start(void)
unsigned long flags;
struct device *dev;
- init_timer(&dgnc_poll_timer);
-
rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
if (rc < 0) {
pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
@@ -419,7 +417,7 @@ static int dgnc_start(void)
/* Start the poller */
spin_lock_irqsave(&dgnc_poll_lock, flags);
- setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
+ timer_setup(&dgnc_poll_timer, dgnc_poll_handler, 0);
dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
dgnc_poll_timer.expires = dgnc_poll_time;
spin_unlock_irqrestore(&dgnc_poll_lock, flags);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index 764d6fe0d030..efdb11a5e27f 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -104,7 +104,6 @@ struct board_ops {
* struct dgnc_board - Per board information.
* @boardnum: Board number (0 - 32).
*
- * @type: Type of board.
* @name: Product name.
* @pdev: Pointer to the pci_dev structure.
* @bd_flags: Board flags.
@@ -140,13 +139,9 @@ struct board_ops {
* @dpastatus: Board status as defined by DPA.
* @bd_dividend: Board/UART's specific dividend.
* @bd_ops: Pointer to board operations structure.
- * @proc_entry_pointer: Proc/<board> entry
- * @dgnc_board_table: Proc/<board> entry
*/
struct dgnc_board {
int boardnum;
-
- int type;
char *name;
struct pci_dev *pdev;
unsigned long bd_flags;
@@ -200,10 +195,6 @@ struct dgnc_board {
uint bd_dividend;
struct board_ops *bd_ops;
-
- struct proc_dir_entry *proc_entry_pointer;
- struct dgnc_proc_entry *dgnc_board_table;
-
};
/* Unit flag definitions for un_flags. */
@@ -233,7 +224,6 @@ struct device;
*/
struct un_t {
struct channel_t *un_ch;
- ulong un_time;
uint un_type;
uint un_open_count;
struct tty_struct *un_tty;
@@ -321,8 +311,6 @@ struct un_t {
* @ch_err_overrun: Count of overruns on channel.
* @ch_xon_sends: Count of xons transmitted.
* @ch_xoff_sends: Count of xoffs transmitted.
- * @proc_entry_pointer: Proc/<board>/<channel> entry.
- * @dgnc_channel_table: Proc/<board>/<channel> entry.
*/
struct channel_t {
struct dgnc_board *ch_bd;
@@ -391,10 +379,6 @@ struct channel_t {
ulong ch_xon_sends;
ulong ch_xoff_sends;
-
- struct proc_dir_entry *proc_entry_pointer;
- struct dgnc_proc_entry *dgnc_channel_table;
-
};
extern uint dgnc_major; /* Our driver/mgmt major */
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 1943e66fec57..0ae229c3aaaa 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -1182,7 +1182,6 @@ static int neo_drain(struct tty_struct *tty, uint seconds)
unsigned long flags;
struct channel_t *ch;
struct un_t *un;
- int rc = 0;
if (!tty)
return -ENXIO;
@@ -1199,12 +1198,10 @@ static int neo_drain(struct tty_struct *tty, uint seconds)
un->un_flags |= UN_EMPTY;
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = wait_event_interruptible_timeout(un->un_flags_wait,
- ((un->un_flags & UN_EMPTY) == 0),
- msecs_to_jiffies(seconds * 1000));
-
- /* If ret is non-zero, user ctrl-c'ed us */
- return rc;
+ /* If returned value is non-zero, user ctrl-c'ed us */
+ return wait_event_interruptible_timeout(un->un_flags_wait,
+ ((un->un_flags & UN_EMPTY) == 0),
+ msecs_to_jiffies(seconds * 1000));
}
/*
diff --git a/drivers/staging/dgnc/dgnc_utils.c b/drivers/staging/dgnc/dgnc_utils.c
index e07ff8d2f972..620f5741a1ed 100644
--- a/drivers/staging/dgnc/dgnc_utils.c
+++ b/drivers/staging/dgnc/dgnc_utils.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/tty.h>
#include <linux/sched/signal.h>
#include "dgnc_utils.h"
diff --git a/drivers/staging/dgnc/dgnc_utils.h b/drivers/staging/dgnc/dgnc_utils.h
index d1f07a5735c6..b30527f0889d 100644
--- a/drivers/staging/dgnc/dgnc_utils.h
+++ b/drivers/staging/dgnc/dgnc_utils.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _DGNC_UTILS_H
#define _DGNC_UTILS_H
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 05ae9fbf906e..6bc03311c9c7 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Core module
obj-$(CONFIG_FB_TFT) += fbtft.o
fbtft-y += fbtft-core.o fbtft-sysfs.o fbtft-bus.o fbtft-io.o
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index a899614ce829..6d1cad85957b 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -253,7 +253,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- __be16 *txbuf16 = par->txbuf.buf;
+ __be16 *txbuf16;
size_t remain;
size_t to_copy;
size_t tx_array_size;
@@ -267,10 +267,10 @@ static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
remain = len / 2;
vmem16 = (u16 *)(par->info->screen_buffer + offset);
tx_array_size = par->txbuf.len / 2;
- txbuf16 = par->txbuf.buf + 1;
- tx_array_size -= 2;
- *(u8 *)(par->txbuf.buf) = 0x00;
- startbyte_size = 1;
+ txbuf16 = par->txbuf.buf + 1;
+ tx_array_size -= 2;
+ *(u8 *)(par->txbuf.buf) = 0x00;
+ startbyte_size = 1;
while (remain) {
to_copy = min(tx_array_size, remain);
diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
index 9aa9864fcf30..e4a759b54ba0 100644
--- a/drivers/staging/fbtft/fb_ssd1331.c
+++ b/drivers/staging/fbtft/fb_ssd1331.c
@@ -26,7 +26,13 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
write_reg(par, 0xae); /* Display Off */
- write_reg(par, 0xa0, 0x70 | (par->bgr << 2)); /* Set Colour Depth */
+
+ /* Set Column Address Mapping, COM Scan Direction and Colour Depth */
+ if (par->info->var.rotate == 180)
+ write_reg(par, 0xa0, 0x60 | (par->bgr << 2));
+ else
+ write_reg(par, 0xa0, 0x72 | (par->bgr << 2));
+
write_reg(par, 0x72); /* RGB colour */
write_reg(par, 0xa1, 0x00); /* Set Display Start Line */
write_reg(par, 0xa2, 0x00); /* Set Display Offset */
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 48e3b3fd9fed..8eb5e7f10fb5 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -225,7 +225,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
u8 *buf8 = par->txbuf.buf;
u16 *buf16 = par->txbuf.buf;
int line_length = par->info->fix.line_length;
- int y_start = (offset / line_length);
+ int y_start = offset / line_length;
int y_end = (offset + len - 1) / line_length;
int x, y, i;
int ret = 0;
diff --git a/drivers/staging/fbtft/fb_uc1701.c b/drivers/staging/fbtft/fb_uc1701.c
index b78045fe5393..78899a172c7e 100644
--- a/drivers/staging/fbtft/fb_uc1701.c
+++ b/drivers/staging/fbtft/fb_uc1701.c
@@ -127,7 +127,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16 = (u16 *)par->info->screen_buffer;
- u8 *buf = par->txbuf.buf;
+ u8 *buf;
int x, y, i;
int ret = 0;
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index a80b5d115ff8..a263bce260c9 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/gpio.h>
@@ -10,33 +11,33 @@
*
*****************************************************************************/
-#define define_fbtft_write_reg(func, type, modifier) \
+#define define_fbtft_write_reg(func, buffer_type, data_type, modifier) \
void func(struct fbtft_par *par, int len, ...) \
{ \
va_list args; \
int i, ret; \
int offset = 0; \
- type *buf = (type *)par->buf; \
+ buffer_type *buf = (buffer_type *)par->buf; \
\
if (unlikely(par->debug & DEBUG_WRITE_REGISTER)) { \
va_start(args, len); \
for (i = 0; i < len; i++) { \
- buf[i] = (type)va_arg(args, unsigned int); \
+ buf[i] = modifier((data_type)va_arg(args, unsigned int)); \
} \
va_end(args); \
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, type, buf, len, "%s: ", __func__); \
+ fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par, par->info->device, buffer_type, buf, len, "%s: ", __func__); \
} \
\
va_start(args, len); \
\
if (par->startbyte) { \
*(u8 *)par->buf = par->startbyte; \
- buf = (type *)(par->buf + 1); \
+ buf = (buffer_type *)(par->buf + 1); \
offset = 1; \
} \
\
- *buf = modifier((type)va_arg(args, unsigned int)); \
- ret = fbtft_write_buf_dc(par, par->buf, sizeof(type) + offset, 0); \
+ *buf = modifier((data_type)va_arg(args, unsigned int)); \
+ ret = fbtft_write_buf_dc(par, par->buf, sizeof(data_type) + offset, 0); \
if (ret < 0) \
goto out; \
len--; \
@@ -47,18 +48,18 @@ void func(struct fbtft_par *par, int len, ...) \
if (len) { \
i = len; \
while (i--) \
- *buf++ = modifier((type)va_arg(args, unsigned int)); \
+ *buf++ = modifier((data_type)va_arg(args, unsigned int)); \
fbtft_write_buf_dc(par, par->buf, \
- len * (sizeof(type) + offset), 1); \
+ len * (sizeof(data_type) + offset), 1); \
} \
out: \
va_end(args); \
} \
EXPORT_SYMBOL(func);
-define_fbtft_write_reg(fbtft_write_reg8_bus8, u8, )
-define_fbtft_write_reg(fbtft_write_reg16_bus8, u16, cpu_to_be16)
-define_fbtft_write_reg(fbtft_write_reg16_bus16, u16, )
+define_fbtft_write_reg(fbtft_write_reg8_bus8, u8, u8, )
+define_fbtft_write_reg(fbtft_write_reg16_bus8, __be16, u16, cpu_to_be16)
+define_fbtft_write_reg(fbtft_write_reg16_bus16, u16, u16, )
void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...)
{
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index ffb9a3b4d454..f4a591919f62 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/gpio.h>
diff --git a/drivers/staging/fbtft/fbtft-sysfs.c b/drivers/staging/fbtft/fbtft-sysfs.c
index 5bfd67b526b5..712096659aa0 100644
--- a/drivers/staging/fbtft/fbtft-sysfs.c
+++ b/drivers/staging/fbtft/fbtft-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "fbtft.h"
#include "internal.h"
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
index 26017fe9df93..0d8ed002adcb 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
@@ -104,9 +104,11 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
/* We don't support any other format */
return;
- /* For S/G frames, we first need to free all SG entries */
+ /* For S/G frames, we first need to free all SG entries
+ * except the first one, which was taken care of already
+ */
sgt = vaddr + dpaa2_fd_get_offset(fd);
- for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
+ for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
@@ -131,16 +133,15 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
u16 fd_offset = dpaa2_fd_get_offset(fd);
u32 fd_length = dpaa2_fd_get_len(fd);
- skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+ ch->buf_count--;
+
+ skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
if (unlikely(!skb))
return NULL;
skb_reserve(skb, fd_offset);
skb_put(skb, fd_length);
- ch->buf_count--;
-
return skb;
}
@@ -176,10 +177,21 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
if (i == 0) {
/* We build the skb around the first data buffer */
- skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- if (unlikely(!skb))
- return NULL;
+ skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
+ if (unlikely(!skb)) {
+ /* Free the first SG entry now, since we already
+ * unmapped it and obtained the virtual address
+ */
+ skb_free_frag(sg_vaddr);
+
+ /* We still need to subtract the buffers used
+ * by this FD from our software counter
+ */
+ while (!dpaa2_sg_is_final(&sgt[i]) &&
+ i < DPAA2_ETH_MAX_SG_ENTRIES)
+ i++;
+ break;
+ }
sg_offset = dpaa2_sg_get_offset(sge);
skb_reserve(skb, sg_offset);
@@ -206,6 +218,8 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
break;
}
+ WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
+
/* Count all data buffers + SG table buffer */
ch->buf_count -= i + 2;
@@ -557,10 +571,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
- if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
+ if (unlikely(skb_headroom(skb) < dpaa2_eth_needed_headroom(priv))) {
struct sk_buff *ns;
- ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
+ ns = skb_realloc_headroom(skb, dpaa2_eth_needed_headroom(priv));
if (unlikely(!ns)) {
percpu_stats->tx_dropped++;
goto err_alloc_headroom;
@@ -718,6 +732,23 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
return 0;
}
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ skb_free_frag(vaddr);
+ }
+}
+
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
@@ -727,17 +758,17 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
void *buf;
dma_addr_t addr;
- int i;
+ int i, err;
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
/* Allocate buffer visible to WRIOP + skb shared info +
* alignment padding
*/
- buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
+ buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
if (unlikely(!buf))
goto err_alloc;
- buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
+ buf = PTR_ALIGN(buf, priv->rx_buf_align);
addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
DMA_FROM_DEVICE);
@@ -748,28 +779,33 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
/* tracing point */
trace_dpaa2_eth_buf_seed(priv->net_dev,
- buf, DPAA2_ETH_BUF_RAW_SIZE,
+ buf, dpaa2_eth_buf_raw_size(priv),
addr, DPAA2_ETH_RX_BUF_SIZE,
bpid);
}
release_bufs:
- /* In case the portal is busy, retry until successful.
- * The buffer release function would only fail if the QBMan portal
- * was busy, which implies portal contention (i.e. more CPUs than
- * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
- * there is little we can realistically do, short of giving up -
- * in which case we'd risk depleting the buffer pool and never again
- * receiving the Rx interrupt which would kick-start the refill logic.
- * So just keep retrying, at the risk of being moved to ksoftirqd.
- */
- while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
+ /* In case the portal is busy, retry until successful */
+ while ((err = dpaa2_io_service_release(NULL, bpid,
+ buf_array, i)) == -EBUSY)
cpu_relax();
+
+ /* If release command failed, clean up and bail out;
+ * not much else we can do about it
+ */
+ if (err) {
+ free_bufs(priv, buf_array, i);
+ return 0;
+ }
+
return i;
err_map:
skb_free_frag(buf);
err_alloc:
+ /* If we managed to allocate at least some buffers,
+ * release them to hardware
+ */
if (i)
goto release_bufs;
@@ -811,10 +847,8 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
*/
static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
- struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
- void *vaddr;
- int ret, i;
+ int ret;
do {
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
@@ -823,15 +857,7 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
- for (i = 0; i < ret; i++) {
- /* Same logic as on regular Rx path */
- vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
- buf_array[i]);
- dma_unmap_single(dev, buf_array[i],
- DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- skb_free_frag(vaddr);
- }
+ free_bufs(priv, buf_array, ret);
} while (ret);
}
@@ -927,13 +953,14 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
break;
}
- if (cleaned < budget) {
- napi_complete_done(napi, cleaned);
+ if (cleaned < budget && napi_complete_done(napi, cleaned)) {
/* Re-enable data available notifications */
do {
err = dpaa2_io_service_rearm(NULL, &ch->nctx);
cpu_relax();
} while (err == -EBUSY);
+ WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
+ ch->nctx.desired_cpu);
}
ch->stats.frames += cleaned;
@@ -1415,34 +1442,32 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_open() failed\n");
- goto err_open;
+ goto free;
}
err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_reset() failed\n");
- goto err_reset;
+ goto close;
}
err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
if (err) {
dev_err(dev, "dpcon_get_attributes() failed\n");
- goto err_get_attr;
+ goto close;
}
err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
if (err) {
dev_err(dev, "dpcon_enable() failed\n");
- goto err_enable;
+ goto close;
}
return dpcon;
-err_enable:
-err_get_attr:
-err_reset:
+close:
dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
-err_open:
+free:
fsl_mc_object_free(dpcon);
return NULL;
@@ -1749,66 +1774,32 @@ static void free_dpbp(struct dpaa2_eth_priv *priv)
fsl_mc_object_free(priv->dpbp_dev);
}
-/* Configure the DPNI object this interface is associated with */
-static int setup_dpni(struct fsl_mc_device *ls_dev)
+static int set_buffer_layout(struct dpaa2_eth_priv *priv)
{
- struct device *dev = &ls_dev->dev;
- struct dpaa2_eth_priv *priv;
- struct net_device *net_dev;
+ struct device *dev = priv->net_dev->dev.parent;
struct dpni_buffer_layout buf_layout = {0};
int err;
- net_dev = dev_get_drvdata(dev);
- priv = netdev_priv(net_dev);
-
- /* get a handle for the DPNI object */
- err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
- if (err) {
- dev_err(dev, "dpni_open() failed\n");
- goto err_open;
- }
-
- ls_dev->mc_io = priv->mc_io;
- ls_dev->mc_handle = priv->mc_token;
-
- err = dpni_reset(priv->mc_io, 0, priv->mc_token);
- if (err) {
- dev_err(dev, "dpni_reset() failed\n");
- goto err_reset;
- }
-
- err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
- &priv->dpni_attrs);
- if (err) {
- dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
- goto err_get_attr;
- }
+ /* We need to check for WRIOP version 1.0.0, but depending on the MC
+ * version, this number is not always provided correctly on rev1.
+ * We need to check for both alternatives in this situation.
+ */
+ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
+ priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
+ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
+ else
+ priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
- /* Configure buffer layouts */
- /* rx buffer */
- buf_layout.pass_parser_result = true;
+ /* tx buffer */
buf_layout.pass_frame_status = true;
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
- buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN;
- buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
- DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
- DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
- DPNI_BUF_LAYOUT_OPT_DATA_ALIGN;
- err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
- DPNI_QUEUE_RX, &buf_layout);
- if (err) {
- dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
- goto err_buf_layout;
- }
-
- /* tx buffer */
buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &buf_layout);
if (err) {
dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
- goto err_buf_layout;
+ return err;
}
/* tx-confirm buffer */
@@ -1817,7 +1808,7 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
DPNI_QUEUE_TX_CONFIRM, &buf_layout);
if (err) {
dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
- goto err_buf_layout;
+ return err;
}
/* Now that we've set our tx buffer layout, retrieve the minimum
@@ -1827,24 +1818,76 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
&priv->tx_data_offset);
if (err) {
dev_err(dev, "dpni_get_tx_data_offset() failed\n");
- goto err_data_offset;
+ return err;
}
if ((priv->tx_data_offset % 64) != 0)
dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
priv->tx_data_offset);
- /* Accommodate software annotation space (SWA) */
- priv->tx_data_offset += DPAA2_ETH_SWA_SIZE;
+ /* rx buffer */
+ buf_layout.pass_parser_result = true;
+ buf_layout.data_align = priv->rx_buf_align;
+ buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
+ buf_layout.private_data_size = 0;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
+ return err;
+ }
return 0;
+}
-err_data_offset:
-err_buf_layout:
-err_get_attr:
-err_reset:
+/* Configure the DPNI object this interface is associated with */
+static int setup_dpni(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_eth_priv *priv;
+ struct net_device *net_dev;
+ int err;
+
+ net_dev = dev_get_drvdata(dev);
+ priv = netdev_priv(net_dev);
+
+ /* get a handle for the DPNI object */
+ err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_open() failed\n");
+ return err;
+ }
+
+ ls_dev->mc_io = priv->mc_io;
+ ls_dev->mc_handle = priv->mc_token;
+
+ err = dpni_reset(priv->mc_io, 0, priv->mc_token);
+ if (err) {
+ dev_err(dev, "dpni_reset() failed\n");
+ goto close;
+ }
+
+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
+ &priv->dpni_attrs);
+ if (err) {
+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
+ goto close;
+ }
+
+ err = set_buffer_layout(priv);
+ if (err)
+ goto close;
+
+
+ return 0;
+
+close:
dpni_close(priv->mc_io, 0, priv->mc_token);
-err_open:
+
return err;
}
@@ -2085,7 +2128,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
*/
err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
if (err)
- netdev_err(net_dev, "Failed to configure hashing\n");
+ dev_err(dev, "Failed to configure hashing\n");
/* Configure handling of error frames */
err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
@@ -2230,6 +2273,7 @@ static int netdev_init(struct net_device *net_dev)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u16 rx_headroom, req_headroom;
u8 bcast_addr[ETH_ALEN];
u8 num_queues;
int err;
@@ -2251,7 +2295,20 @@ static int netdev_init(struct net_device *net_dev)
/* Reserve enough space to align buffer as per hardware requirement;
* NOTE: priv->tx_data_offset MUST be initialized at this point.
*/
- net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv);
+ net_dev->needed_headroom = dpaa2_eth_needed_headroom(priv);
+
+ /* If headroom guaranteed by hardware in the Rx frame buffer is
+ * smaller than the Tx headroom required by the stack, issue a
+ * one time warning. This will most likely mean skbs forwarded to
+ * another DPAA2 network interface will get reallocated, with a
+ * significant performance impact.
+ */
+ req_headroom = LL_RESERVED_SPACE(net_dev) - ETH_HLEN;
+ rx_headroom = ALIGN(DPAA2_ETH_RX_HWA_SIZE +
+ dpaa2_eth_rx_head_room(priv), priv->rx_buf_align);
+ if (req_headroom > rx_headroom)
+ dev_info_once(dev, "Required headroom (%d) greater than available (%d)\n",
+ req_headroom, rx_headroom);
/* Set MTU limits */
net_dev->min_mtu = 68;
@@ -2303,7 +2360,7 @@ static irqreturn_t dpni_irq0_handler(int irq_num, void *arg)
static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
{
- u32 status = 0, clear = 0;
+ u32 status = ~0;
struct device *dev = (struct device *)arg;
struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
struct net_device *net_dev = dev_get_drvdata(dev);
@@ -2313,18 +2370,12 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
DPNI_IRQ_INDEX, &status);
if (unlikely(err)) {
netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
- clear = 0xffffffff;
- goto out;
+ return IRQ_HANDLED;
}
- if (status & DPNI_IRQ_EVENT_LINK_CHANGED) {
- clear |= DPNI_IRQ_EVENT_LINK_CHANGED;
+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
link_state_update(netdev_priv(net_dev));
- }
-out:
- dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
- DPNI_IRQ_INDEX, clear);
return IRQ_HANDLED;
}
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
index bfbabae1aad8..5b3ab9f62d5e 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h
@@ -45,6 +45,8 @@
#include "dpaa2-eth-trace.h"
+#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
+
#define DPAA2_ETH_STORE_SIZE 16
/* Maximum number of scatter-gather entries in an ingress frame,
@@ -80,23 +82,21 @@
*/
#define DPAA2_ETH_BUFS_PER_CMD 7
-/* Hardware requires alignment for ingress/egress buffer addresses
- * and ingress buffer lengths.
- */
-#define DPAA2_ETH_RX_BUF_SIZE 2048
+/* Hardware requires alignment for ingress/egress buffer addresses */
#define DPAA2_ETH_TX_BUF_ALIGN 64
-#define DPAA2_ETH_RX_BUF_ALIGN 256
-#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \
- ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN)
-/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress
- * buffers large enough to allow building an skb around them and also account
- * for alignment restrictions
+#define DPAA2_ETH_RX_BUF_SIZE 2048
+#define DPAA2_ETH_SKB_SIZE \
+ (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* Hardware annotation area in RX buffers */
+#define DPAA2_ETH_RX_HWA_SIZE 64
+
+/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
+ * to 256B. For newer revisions, the requirement is only for 64B alignment
*/
-#define DPAA2_ETH_BUF_RAW_SIZE \
- (DPAA2_ETH_RX_BUF_SIZE + \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
- DPAA2_ETH_RX_BUF_ALIGN)
+#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
+#define DPAA2_ETH_RX_BUF_ALIGN 64
/* We are accommodating a skb backpointer and some S/G info
* in the frame's software annotation. The hardware
@@ -134,7 +134,7 @@ struct dpaa2_eth_swa {
DPAA2_FD_CTRL_FAERR)
/* Annotation bits in FD CTRL */
-#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+#define DPAA2_FD_CTRL_ASAL 0x00010000 /* ASAL = 64 */
#define DPAA2_FD_CTRL_PTA 0x00800000
#define DPAA2_FD_CTRL_PTV1 0x00400000
@@ -318,6 +318,7 @@ struct dpaa2_eth_priv {
struct iommu_domain *iommu_domain;
u16 tx_qdid;
+ u16 rx_buf_align;
struct fsl_mc_io *mc_io;
/* Cores which have an affine DPIO/DPCON.
* This is the cpu set on which Rx and Tx conf frames are processed
@@ -353,6 +354,29 @@ struct dpaa2_eth_priv {
extern const struct ethtool_ops dpaa2_ethtool_ops;
extern const char dpaa2_eth_drv_version[];
+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
+ * the buffer also needs space for its shared info struct, and we need
+ * to allocate enough to accommodate hardware alignment restrictions
+ */
+static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
+{
+ return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
+}
+
+static inline
+unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv)
+{
+ return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - HH_DATA_MOD;
+}
+
+/* Extra headroom space requested to hardware, in order to make sure there's
+ * no realloc'ing in forwarding scenarios
+ */
+static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
+{
+ return dpaa2_eth_needed_headroom(priv) - DPAA2_ETH_RX_HWA_SIZE;
+}
+
static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
{
return priv->dpni_attrs.num_queues;
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
index 031179ab3a22..ebe8fd6ccf2c 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c
@@ -76,10 +76,22 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u16 fw_major, fw_minor;
+ int err;
+
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, dpaa2_eth_drv_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+
+ err = dpni_get_api_version(priv->mc_io, 0, &fw_major, &fw_minor);
+ if (!err)
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", fw_major, fw_minor);
+ else
+ strlcpy(drvinfo->fw_version, "N/A",
+ sizeof(drvinfo->fw_version));
+
strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
index 57df22292233..3120e22496d0 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h
@@ -538,4 +538,9 @@ struct dpni_rsp_get_taildrop {
__le32 threshold;
};
+struct dpni_rsp_get_api_version {
+ u16 major;
+ u16 minor;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
index 04a5b14bc1c5..e8be76181c36 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.c
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c
@@ -1594,3 +1594,35 @@ int dpni_get_taildrop(struct fsl_mc_io *mc_io,
return 0;
}
+
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path network interface API
+ * @minor_ver: Minor version of data path network interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct dpni_rsp_get_api_version *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpni_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
index 282e5e85ffa7..ce86a816af45 100644
--- a/drivers/staging/fsl-dpaa2/ethernet/dpni.h
+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h
@@ -829,4 +829,9 @@ struct dpni_rule_cfg {
u8 key_size;
};
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
index f8096828f5b7..a609ec82daf3 100644
--- a/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-service.c
@@ -76,7 +76,7 @@ static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
if (d)
return d;
- if (unlikely(cpu >= num_possible_cpus()))
+ if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
return NULL;
/*
@@ -121,7 +121,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
return NULL;
/* check if CPU is out of range (-1 means any cpu) */
- if (desc->cpu >= num_possible_cpus()) {
+ if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
kfree(obj);
return NULL;
}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index 038da4d1ebd0..f74a6f1764bb 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -137,7 +137,7 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
{
struct irq_chip *chip = info->chip;
- if (WARN_ON((!chip)))
+ if (WARN_ON(!chip))
return;
/*
diff --git a/drivers/staging/fsl-mc/include/dpaa2-io.h b/drivers/staging/fsl-mc/include/dpaa2-io.h
index c5646096c5d4..afc2d060d077 100644
--- a/drivers/staging/fsl-mc/include/dpaa2-io.h
+++ b/drivers/staging/fsl-mc/include/dpaa2-io.h
@@ -54,6 +54,8 @@ struct device;
* for dequeue.
*/
+#define DPAA2_IO_ANY_CPU -1
+
/**
* struct dpaa2_io_desc - The DPIO descriptor
* @receives_notifications: Use notificaton mode. Non-zero if the DPIO
@@ -91,8 +93,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
* @cb: The callback to be invoked when the notification arrives
* @is_cdan: Zero for FQDAN, non-zero for CDAN
* @id: FQID or channel ID, needed for rearm
- * @desired_cpu: The cpu on which the notifications will show up. -1 means
- * any CPU.
+ * @desired_cpu: The cpu on which the notifications will show up. Use
+ * DPAA2_IO_ANY_CPU if don't care
* @dpio_id: The dpio index
* @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
* @node: The list node
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 41a49c8194e5..bba7e9c888b3 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -217,13 +217,6 @@ static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
}
}
-static void fwtty_txn_constructor(void *this)
-{
- struct fwtty_transaction *txn = this;
-
- init_timer(&txn->fw_txn.split_timeout_timer);
-}
-
static void fwtty_common_callback(struct fw_card *card, int rcode,
void *payload, size_t len, void *cb_data)
{
@@ -1806,9 +1799,9 @@ static void fwserial_release_port(struct fwtty_port *port, bool reset)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
}
-static void fwserial_plug_timeout(unsigned long data)
+static void fwserial_plug_timeout(struct timer_list *t)
{
- struct fwtty_peer *peer = (struct fwtty_peer *)data;
+ struct fwtty_peer *peer = from_timer(peer, t, timer);
struct fwtty_port *port;
spin_lock_bh(&peer->lock);
@@ -1860,7 +1853,6 @@ static int fwserial_connect_peer(struct fwtty_peer *peer)
fill_plug_req(pkt, peer->port);
- setup_timer(&peer->timer, fwserial_plug_timeout, (unsigned long)peer);
mod_timer(&peer->timer, jiffies + VIRT_CABLE_PLUG_TIMEOUT);
spin_unlock_bh(&peer->lock);
@@ -2098,7 +2090,7 @@ static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
spin_lock_init(&peer->lock);
peer->port = NULL;
- init_timer(&peer->timer);
+ timer_setup(&peer->timer, fwserial_plug_timeout, 0);
INIT_WORK(&peer->work, fwserial_peer_workfn);
INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
@@ -2863,7 +2855,7 @@ static int __init fwserial_init(void)
fwtty_txn_cache = kmem_cache_create("fwtty_txn_cache",
sizeof(struct fwtty_transaction),
- 0, 0, fwtty_txn_constructor);
+ 0, 0, NULL);
if (!fwtty_txn_cache) {
err = -ENOMEM;
goto unregister_loop;
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 30b2481fe32b..1d15f183e0fa 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FIREWIRE_FWSERIAL_H
#define _FIREWIRE_FWSERIAL_H
diff --git a/drivers/staging/gdm724x/Makefile b/drivers/staging/gdm724x/Makefile
index ba7f11a6a097..e61b95788c9f 100644
--- a/drivers/staging/gdm724x/Makefile
+++ b/drivers/staging/gdm724x/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_LTE_GDM724X) := gdmulte.o
gdmulte-y += gdm_lte.o netlink_k.o
gdmulte-y += gdm_usb.o gdm_endian.o
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 9ab6ce231f11..0527b0d1c1d0 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -26,6 +26,7 @@
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/uaccess.h>
+#include <linux/errno.h>
#include <net/ndisc.h>
#include "gdm_lte.h"
@@ -118,6 +119,10 @@ static int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
void *mac_header_data;
u32 mac_header_len;
+ /* Check for skb->len, discard if empty */
+ if (skb_in->len == 0)
+ return -ENODATA;
+
/* Format the mac header so that it can be put to skb */
if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
@@ -241,13 +246,13 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
if (ntohs(vlan_eth.h_vlan_encapsulated_proto) != ETH_P_IPV6)
- return -1;
+ return -EPROTONOSUPPORT;
mac_header_data = &vlan_eth;
mac_header_len = VLAN_ETH_HLEN;
} else {
memcpy(&eth, skb_in->data, sizeof(struct ethhdr));
if (ntohs(eth.h_proto) != ETH_P_IPV6)
- return -1;
+ return -EPROTONOSUPPORT;
mac_header_data = &eth;
mac_header_len = ETH_HLEN;
}
@@ -255,13 +260,13 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
/* Check if this is IPv6 ICMP packet */
ipv6_in = (struct ipv6hdr *)(skb_in->data + mac_header_len);
if (ipv6_in->version != 6 || ipv6_in->nexthdr != IPPROTO_ICMPV6)
- return -1;
+ return -EPROTONOSUPPORT;
/* Check if this is NDP packet */
icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len +
sizeof(struct ipv6hdr));
if (icmp6_in->icmp6_type == NDISC_ROUTER_SOLICITATION) { /* Check RS */
- return -1;
+ return -EPROTONOSUPPORT;
} else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
/* Check NS */
u8 icmp_na[sizeof(struct icmp6hdr) +
@@ -305,7 +310,7 @@ static int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out,
(u16 *)icmp_na, sizeof(icmp_na));
} else {
- return -1;
+ return -EINVAL;
}
/* Fill the destination mac with source mac of the received packet */
@@ -412,7 +417,7 @@ static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
nic_type = gdm_lte_tx_nic_type(dev, skb);
if (nic_type == 0) {
netdev_err(dev, "tx - invalid nic_type\n");
- return -1;
+ return -EMEDIUMTYPE;
}
if (nic_type & NIC_TYPE_ARP) {
@@ -539,7 +544,7 @@ int gdm_lte_event_init(void)
}
pr_err("event init failed\n");
- return -1;
+ return -ENODATA;
}
void gdm_lte_event_exit(void)
diff --git a/drivers/staging/greybus/Documentation/firmware/authenticate.c b/drivers/staging/greybus/Documentation/firmware/authenticate.c
index b836f0a20c36..806e75b7f405 100644
--- a/drivers/staging/greybus/Documentation/firmware/authenticate.c
+++ b/drivers/staging/greybus/Documentation/firmware/authenticate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Sample code to test CAP protocol
*
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware.c b/drivers/staging/greybus/Documentation/firmware/firmware.c
index c73dee9d13c1..31d9c23e2eeb 100644
--- a/drivers/staging/greybus/Documentation/firmware/firmware.c
+++ b/drivers/staging/greybus/Documentation/firmware/firmware.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Sample code to test firmware-management protocol
*
diff --git a/drivers/staging/greybus/Makefile b/drivers/staging/greybus/Makefile
index 23e1cb7bff8e..2551ed16b742 100644
--- a/drivers/staging/greybus/Makefile
+++ b/drivers/staging/greybus/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Greybus core
greybus-y := core.o \
debugfs.o \
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index 0412f3d06efb..b0c66112eb18 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Arche Platform driver to control APB.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/clk.h>
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 21ac92d0f533..ace4eb365c0e 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Arche Platform driver to enable Unipro link.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/clk.h>
diff --git a/drivers/staging/greybus/arche_platform.h b/drivers/staging/greybus/arche_platform.h
index bcffc69d0960..02056351d25a 100644
--- a/drivers/staging/greybus/arche_platform.h
+++ b/drivers/staging/greybus/arche_platform.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Arche Platform driver to enable Unipro link.
*
* Copyright 2015-2016 Google Inc.
* Copyright 2015-2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __ARCHE_PLATFORM_H
diff --git a/drivers/staging/greybus/arpc.h b/drivers/staging/greybus/arpc.h
index c0b63c0130c5..3534ba1a4e6c 100644
--- a/drivers/staging/greybus/arpc.h
+++ b/drivers/staging/greybus/arpc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/drivers/staging/greybus/audio_apbridgea.c b/drivers/staging/greybus/audio_apbridgea.c
index 1b4252d5d255..7ebb1bde5cb7 100644
--- a/drivers/staging/greybus/audio_apbridgea.c
+++ b/drivers/staging/greybus/audio_apbridgea.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Audio Device Class Protocol helpers
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
index b94cb05c89e4..42ac6059bfc7 100644
--- a/drivers/staging/greybus/audio_apbridgea.h
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: BSD-3-Clause
/**
* Copyright (c) 2015-2016 Google Inc.
* All rights reserved.
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index a6d01f0761f3..fdb9e83cc34b 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* APBridge ALSA SoC dummy codec driver
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index 6fb064c69a36..161b37c8ef17 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus audio driver
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __LINUX_GBAUDIO_CODEC_H
diff --git a/drivers/staging/greybus/audio_gb.c b/drivers/staging/greybus/audio_gb.c
index 7884d8482dc0..8894f1c87d48 100644
--- a/drivers/staging/greybus/audio_gb.c
+++ b/drivers/staging/greybus/audio_gb.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Audio Device Class Protocol helpers
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
index aa6508b44fab..d44b070d8862 100644
--- a/drivers/staging/greybus/audio_manager.c
+++ b/drivers/staging/greybus/audio_manager.c
@@ -1,16 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
#include <linux/idr.h>
#include "audio_manager.h"
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
index 5ab8f5e0ed3f..dcb1a20f5ff1 100644
--- a/drivers/staging/greybus/audio_manager.h
+++ b/drivers/staging/greybus/audio_manager.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#ifndef _GB_AUDIO_MANAGER_H_
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
index adc16977452d..52342e832e3b 100644
--- a/drivers/staging/greybus/audio_manager_module.c
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/slab.h>
diff --git a/drivers/staging/greybus/audio_manager_private.h b/drivers/staging/greybus/audio_manager_private.h
index 079ce953c256..9d9b58fc54c4 100644
--- a/drivers/staging/greybus/audio_manager_private.h
+++ b/drivers/staging/greybus/audio_manager_private.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#ifndef _GB_AUDIO_MANAGER_PRIVATE_H_
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
index 34ebd147052f..283fbed0c8ed 100644
--- a/drivers/staging/greybus/audio_manager_sysfs.c
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2015-2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/string.h>
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
index 094c3be79b33..d065334efa23 100644
--- a/drivers/staging/greybus/audio_module.c
+++ b/drivers/staging/greybus/audio_module.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus audio driver
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 07fac3948f3a..de4b1b2b12f3 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus audio driver
* Copyright 2015-2016 Google Inc.
* Copyright 2015-2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "audio_codec.h"
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
index 6c5dcb1c226b..16cc65e1472b 100644
--- a/drivers/staging/greybus/authentication.c
+++ b/drivers/staging/greybus/authentication.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Component Authentication Protocol (CAP) Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
index 06df0ce03150..e85ffae85dff 100644
--- a/drivers/staging/greybus/bootrom.c
+++ b/drivers/staging/greybus/bootrom.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* BOOTROM Greybus driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/firmware.h>
diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c
index d2ef57d090be..81c018da1248 100644
--- a/drivers/staging/greybus/bundle.c
+++ b/drivers/staging/greybus/bundle.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus bundles
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/bundle.h b/drivers/staging/greybus/bundle.h
index 0c3491def96c..ffcfd43802cc 100644
--- a/drivers/staging/greybus/bundle.h
+++ b/drivers/staging/greybus/bundle.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus bundles
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __BUNDLE_H
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index a64517eabff4..f13f16b63d7e 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Camera protocol driver.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/debugfs.h>
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
index 2cf64640e8ec..2103168b585e 100644
--- a/drivers/staging/greybus/connection.c
+++ b/drivers/staging/greybus/connection.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/workqueue.h>
diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h
index 4d9f4c64176c..ec3f1d3ef3b9 100644
--- a/drivers/staging/greybus/connection.h
+++ b/drivers/staging/greybus/connection.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus connections
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __CONNECTION_H
diff --git a/drivers/staging/greybus/control.c b/drivers/staging/greybus/control.c
index 5b30be30a3a4..35f945a12b11 100644
--- a/drivers/staging/greybus/control.c
+++ b/drivers/staging/greybus/control.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus CPort control protocol.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/control.h b/drivers/staging/greybus/control.h
index 4dcaec8b9cfe..643ddb9e0f92 100644
--- a/drivers/staging/greybus/control.h
+++ b/drivers/staging/greybus/control.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus CPort control protocol
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __CONTROL_H
diff --git a/drivers/staging/greybus/core.c b/drivers/staging/greybus/core.c
index ba761905b790..dafa430d176e 100644
--- a/drivers/staging/greybus/core.c
+++ b/drivers/staging/greybus/core.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus "Core"
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/greybus/debugfs.c b/drivers/staging/greybus/debugfs.c
index a9d4d3da99a0..56e20c30feb5 100644
--- a/drivers/staging/greybus/debugfs.c
+++ b/drivers/staging/greybus/debugfs.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus debugfs code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/debugfs.h>
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index f7b24e0eaa6f..b082d81833a0 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus "AP" USB driver for "ES2" controller chips
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kthread.h>
#include <linux/sizes.h>
@@ -761,6 +760,7 @@ static int check_urb_status(struct urb *urb)
case -EOVERFLOW:
dev_err(dev, "%s: overflow actual length is %d\n",
__func__, urb->actual_length);
+ /* fall through */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
diff --git a/drivers/staging/greybus/firmware.h b/drivers/staging/greybus/firmware.h
index f4f0db1cefe8..946221307ef6 100644
--- a/drivers/staging/greybus/firmware.h
+++ b/drivers/staging/greybus/firmware.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Firmware Management Header
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __FIRMWARE_H
diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c
index 454a98957ba5..388866d92f5b 100644
--- a/drivers/staging/greybus/fw-core.c
+++ b/drivers/staging/greybus/fw-core.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Firmware Core Bundle Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
index 8a1a413c6cb3..d3b7cccbc10d 100644
--- a/drivers/staging/greybus/fw-download.c
+++ b/drivers/staging/greybus/fw-download.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Firmware Download Protocol Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/firmware.h>
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
index 3cd6cf0a656b..71aec14f8181 100644
--- a/drivers/staging/greybus/fw-management.c
+++ b/drivers/staging/greybus/fw-management.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Firmware Management Protocol Driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/cdev.h>
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
index d45dabc5b367..ee293e461fc3 100644
--- a/drivers/staging/greybus/gb-camera.h
+++ b/drivers/staging/greybus/gb-camera.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Camera protocol driver.
*
* Copyright 2015 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#ifndef __GB_CAMERA_H
#define __GB_CAMERA_H
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index 80c1da8224e6..6cb85c3d3572 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Bridged-Phy Bus driver
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/staging/greybus/gbphy.h b/drivers/staging/greybus/gbphy.h
index 8ee68055ccc4..99463489d7d6 100644
--- a/drivers/staging/greybus/gbphy.h
+++ b/drivers/staging/greybus/gbphy.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Bridged-Phy Bus driver
*
* Copyright 2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#ifndef __GBPHY_H
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index ee5f998b174f..b1d4698019a1 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPIO Greybus driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/greybus.h b/drivers/staging/greybus/greybus.h
index c9bb93f23927..d03ddb7c9df0 100644
--- a/drivers/staging/greybus/greybus.h
+++ b/drivers/staging/greybus/greybus.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus driver and device API
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __LINUX_GREYBUS_H
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
index 4784ed98e8a3..03ea9615b217 100644
--- a/drivers/staging/greybus/greybus_authentication.h
+++ b/drivers/staging/greybus/greybus_authentication.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Greybus Component Authentication User Header
*
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
index 277a2acce6fd..b58281a63ba4 100644
--- a/drivers/staging/greybus/greybus_firmware.h
+++ b/drivers/staging/greybus/greybus_firmware.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Greybus Firmware Management User Header
*
diff --git a/drivers/staging/greybus/greybus_id.h b/drivers/staging/greybus/greybus_id.h
index 4bb1fc1b811d..f4c8440093e4 100644
--- a/drivers/staging/greybus/greybus_id.h
+++ b/drivers/staging/greybus/greybus_id.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* FIXME
* move this to include/linux/mod_devicetable.h when merging
*/
diff --git a/drivers/staging/greybus/greybus_manifest.h b/drivers/staging/greybus/greybus_manifest.h
index d135945cefe1..2cec5cf7a846 100644
--- a/drivers/staging/greybus/greybus_manifest.h
+++ b/drivers/staging/greybus/greybus_manifest.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus manifest definition
*
diff --git a/drivers/staging/greybus/greybus_protocols.h b/drivers/staging/greybus/greybus_protocols.h
index b1be0b0af464..9bd7b6dfb476 100644
--- a/drivers/staging/greybus/greybus_protocols.h
+++ b/drivers/staging/greybus/greybus_protocols.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/drivers/staging/greybus/greybus_trace.h b/drivers/staging/greybus/greybus_trace.h
index f8feae4dc3b5..7b5e2c6b1f6b 100644
--- a/drivers/staging/greybus/greybus_trace.h
+++ b/drivers/staging/greybus/greybus_trace.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus driver and device API
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM greybus
diff --git a/drivers/staging/greybus/hd.c b/drivers/staging/greybus/hd.c
index 185ae3fa10fd..969f86697673 100644
--- a/drivers/staging/greybus/hd.c
+++ b/drivers/staging/greybus/hd.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Host Device
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/hd.h b/drivers/staging/greybus/hd.h
index e7927bb1761c..6cf024a20a58 100644
--- a/drivers/staging/greybus/hd.h
+++ b/drivers/staging/greybus/hd.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Host Device
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __HD_H
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
index 465101bbab69..04053ff075a6 100644
--- a/drivers/staging/greybus/hid.c
+++ b/drivers/staging/greybus/hid.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* HID class driver for the Greybus.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/bitops.h>
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
index c2a50087000c..58a37deb6579 100644
--- a/drivers/staging/greybus/i2c.c
+++ b/drivers/staging/greybus/i2c.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* I2C bridge driver for the Greybus "generic" I2C module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/interface.c b/drivers/staging/greybus/interface.c
index 71e5cc234e78..d7b5b89a2f40 100644
--- a/drivers/staging/greybus/interface.c
+++ b/drivers/staging/greybus/interface.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus interface code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/delay.h>
diff --git a/drivers/staging/greybus/interface.h b/drivers/staging/greybus/interface.h
index bd31b8c18d5b..1c00c5bb3ec9 100644
--- a/drivers/staging/greybus/interface.h
+++ b/drivers/staging/greybus/interface.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Interface Block code
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __INTERFACE_H
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 3f4148c92308..010ae1e9c7fb 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Lights protocol driver.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
@@ -925,6 +924,8 @@ static void __gb_lights_led_unregister(struct gb_channel *channel)
return;
led_classdev_unregister(cdev);
+ kfree(cdev->name);
+ cdev->name = NULL;
channel->led = NULL;
}
@@ -998,11 +999,7 @@ static int gb_lights_channel_config(struct gb_light *light,
light->has_flash = true;
- ret = gb_lights_channel_flash_config(channel);
- if (ret < 0)
- return ret;
-
- return ret;
+ return gb_lights_channel_flash_config(channel);
}
static int gb_lights_light_config(struct gb_lights *glights, u8 id)
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
index 5c5bedaf69a6..15a88574dbb0 100644
--- a/drivers/staging/greybus/log.c
+++ b/drivers/staging/greybus/log.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus driver for the log protocol
*
* Copyright 2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index 08e255884206..42f6f3de967c 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Loopback bridge driver for the Greybus loopback module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -58,12 +57,7 @@ static struct gb_loopback_device gb_dev;
struct gb_loopback_async_operation {
struct gb_loopback *gb;
struct gb_operation *operation;
- struct timeval ts;
- struct timer_list timer;
- struct list_head entry;
- struct work_struct work;
- struct kref kref;
- bool pending;
+ ktime_t ts;
int (*completion)(struct gb_loopback_async_operation *op_async);
};
@@ -72,7 +66,6 @@ struct gb_loopback {
struct dentry *file;
struct kfifo kfifo_lat;
- struct kfifo kfifo_ts;
struct mutex mutex;
struct task_struct *task;
struct list_head entry;
@@ -82,7 +75,7 @@ struct gb_loopback {
atomic_t outstanding_operations;
/* Per connection stats */
- struct timeval ts;
+ ktime_t ts;
struct gb_loopback_stats latency;
struct gb_loopback_stats throughput;
struct gb_loopback_stats requests_per_second;
@@ -262,7 +255,6 @@ static void gb_loopback_check_attr(struct gb_loopback *gb)
gb->iteration_max, kfifo_depth);
}
kfifo_reset_out(&gb->kfifo_lat);
- kfifo_reset_out(&gb->kfifo_ts);
switch (gb->type) {
case GB_LOOPBACK_TYPE_PING:
@@ -377,21 +369,9 @@ static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
return NSEC_PER_DAY - t2 + t1;
}
-static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
-{
- u64 t1, t2;
-
- t1 = timeval_to_ns(ts);
- t2 = timeval_to_ns(te);
-
- return __gb_loopback_calc_latency(t1, t2);
-}
-
-static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
- struct timeval *ts, struct timeval *te)
+static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te)
{
- kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
- kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
+ return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te));
}
static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
@@ -399,10 +379,10 @@ static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
void *response, int response_size)
{
struct gb_operation *operation;
- struct timeval ts, te;
+ ktime_t ts, te;
int ret;
- do_gettimeofday(&ts);
+ ts = ktime_get();
operation = gb_operation_create(gb->connection, type, request_size,
response_size, GFP_KERNEL);
if (!operation)
@@ -430,11 +410,10 @@ static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
}
}
- do_gettimeofday(&te);
+ te = ktime_get();
/* Calculate the total time the message took */
- gb_loopback_push_latency_ts(gb, &ts, &te);
- gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
+ gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te);
out_put_operation:
gb_operation_put(operation);
@@ -442,56 +421,6 @@ out_put_operation:
return ret;
}
-static void __gb_loopback_async_operation_destroy(struct kref *kref)
-{
- struct gb_loopback_async_operation *op_async;
-
- op_async = container_of(kref, struct gb_loopback_async_operation, kref);
-
- list_del(&op_async->entry);
- if (op_async->operation)
- gb_operation_put(op_async->operation);
- atomic_dec(&op_async->gb->outstanding_operations);
- wake_up(&op_async->gb->wq_completion);
- kfree(op_async);
-}
-
-static void gb_loopback_async_operation_get(struct gb_loopback_async_operation
- *op_async)
-{
- kref_get(&op_async->kref);
-}
-
-static void gb_loopback_async_operation_put(struct gb_loopback_async_operation
- *op_async)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&gb_dev.lock, flags);
- kref_put(&op_async->kref, __gb_loopback_async_operation_destroy);
- spin_unlock_irqrestore(&gb_dev.lock, flags);
-}
-
-static struct gb_loopback_async_operation *
- gb_loopback_operation_find(u16 id)
-{
- struct gb_loopback_async_operation *op_async;
- bool found = false;
- unsigned long flags;
-
- spin_lock_irqsave(&gb_dev.lock, flags);
- list_for_each_entry(op_async, &gb_dev.list_op_async, entry) {
- if (op_async->operation->id == id) {
- gb_loopback_async_operation_get(op_async);
- found = true;
- break;
- }
- }
- spin_unlock_irqrestore(&gb_dev.lock, flags);
-
- return found ? op_async : NULL;
-}
-
static void gb_loopback_async_wait_all(struct gb_loopback *gb)
{
wait_event(gb->wq_completion,
@@ -502,87 +431,42 @@ static void gb_loopback_async_operation_callback(struct gb_operation *operation)
{
struct gb_loopback_async_operation *op_async;
struct gb_loopback *gb;
- struct timeval te;
- bool err = false;
-
- do_gettimeofday(&te);
- op_async = gb_loopback_operation_find(operation->id);
- if (!op_async)
- return;
+ ktime_t te;
+ int result;
+ te = ktime_get();
+ result = gb_operation_result(operation);
+ op_async = gb_operation_get_data(operation);
gb = op_async->gb;
+
mutex_lock(&gb->mutex);
- if (!op_async->pending || gb_operation_result(operation)) {
- err = true;
+ if (!result && op_async->completion)
+ result = op_async->completion(op_async);
+
+ if (!result) {
+ gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te);
} else {
- if (op_async->completion)
- if (op_async->completion(op_async))
- err = true;
+ gb->error++;
+ if (result == -ETIMEDOUT)
+ gb->requests_timedout++;
}
- if (!err) {
- gb_loopback_push_latency_ts(gb, &op_async->ts, &te);
- gb->elapsed_nsecs = gb_loopback_calc_latency(&op_async->ts,
- &te);
- }
+ gb->iteration_count++;
+ gb_loopback_calculate_stats(gb, result);
- if (op_async->pending) {
- if (err)
- gb->error++;
- gb->iteration_count++;
- op_async->pending = false;
- del_timer_sync(&op_async->timer);
- gb_loopback_async_operation_put(op_async);
- gb_loopback_calculate_stats(gb, err);
- }
mutex_unlock(&gb->mutex);
dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
operation->id);
- gb_loopback_async_operation_put(op_async);
-}
-
-static void gb_loopback_async_operation_work(struct work_struct *work)
-{
- struct gb_loopback *gb;
- struct gb_operation *operation;
- struct gb_loopback_async_operation *op_async;
-
- op_async = container_of(work, struct gb_loopback_async_operation, work);
- gb = op_async->gb;
- operation = op_async->operation;
-
- mutex_lock(&gb->mutex);
- if (op_async->pending) {
- gb->requests_timedout++;
- gb->error++;
- gb->iteration_count++;
- op_async->pending = false;
- gb_loopback_async_operation_put(op_async);
- gb_loopback_calculate_stats(gb, true);
- }
- mutex_unlock(&gb->mutex);
-
- dev_dbg(&gb->connection->bundle->dev, "timeout operation %d\n",
- operation->id);
-
- gb_operation_cancel(operation, -ETIMEDOUT);
- gb_loopback_async_operation_put(op_async);
-}
-
-static void gb_loopback_async_operation_timeout(unsigned long data)
-{
- struct gb_loopback_async_operation *op_async;
- u16 id = data;
+ /* Wake up waiters */
+ atomic_dec(&op_async->gb->outstanding_operations);
+ wake_up(&gb->wq_completion);
- op_async = gb_loopback_operation_find(id);
- if (!op_async) {
- pr_err("operation %d not found - time out ?\n", id);
- return;
- }
- schedule_work(&op_async->work);
+ /* Release resources */
+ gb_operation_put(operation);
+ kfree(op_async);
}
static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
@@ -593,15 +477,11 @@ static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
struct gb_loopback_async_operation *op_async;
struct gb_operation *operation;
int ret;
- unsigned long flags;
op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
if (!op_async)
return -ENOMEM;
- INIT_WORK(&op_async->work, gb_loopback_async_operation_work);
- kref_init(&op_async->kref);
-
operation = gb_operation_create(gb->connection, type, request_size,
response_size, GFP_KERNEL);
if (!operation) {
@@ -612,35 +492,24 @@ static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
if (request_size)
memcpy(operation->request->payload, request, request_size);
+ gb_operation_set_data(operation, op_async);
+
op_async->gb = gb;
op_async->operation = operation;
op_async->completion = completion;
- spin_lock_irqsave(&gb_dev.lock, flags);
- list_add_tail(&op_async->entry, &gb_dev.list_op_async);
- spin_unlock_irqrestore(&gb_dev.lock, flags);
+ op_async->ts = ktime_get();
- do_gettimeofday(&op_async->ts);
- op_async->pending = true;
atomic_inc(&gb->outstanding_operations);
- mutex_lock(&gb->mutex);
ret = gb_operation_request_send(operation,
gb_loopback_async_operation_callback,
- 0,
+ jiffies_to_msecs(gb->jiffy_timeout),
GFP_KERNEL);
- if (ret)
- goto error;
-
- setup_timer(&op_async->timer, gb_loopback_async_operation_timeout,
- (unsigned long)operation->id);
- op_async->timer.expires = jiffies + gb->jiffy_timeout;
- add_timer(&op_async->timer);
-
- goto done;
-error:
- gb_loopback_async_operation_put(op_async);
-done:
- mutex_unlock(&gb->mutex);
+ if (ret) {
+ atomic_dec(&gb->outstanding_operations);
+ gb_operation_put(operation);
+ kfree(op_async);
+ }
return ret;
}
@@ -854,7 +723,7 @@ static void gb_loopback_reset_stats(struct gb_loopback *gb)
/* Should be initialized at least once per transaction set */
gb->apbridge_latency_ts = 0;
gb->gbphy_latency_ts = 0;
- memset(&gb->ts, 0, sizeof(struct timeval));
+ gb->ts = ktime_set(0, 0);
}
static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
@@ -937,15 +806,15 @@ static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
{
u64 nlat;
u32 lat;
- struct timeval te;
+ ktime_t te;
if (!error) {
gb->requests_completed++;
gb_loopback_calculate_latency_stats(gb);
}
- do_gettimeofday(&te);
- nlat = gb_loopback_calc_latency(&gb->ts, &te);
+ te = ktime_get();
+ nlat = gb_loopback_calc_latency(gb->ts, te);
if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
lat = gb_loopback_nsec_to_usec_latency(nlat);
@@ -1029,9 +898,8 @@ static int gb_loopback_fn(void *data)
size = gb->size;
us_wait = gb->us_wait;
type = gb->type;
- if (gb->ts.tv_usec == 0 && gb->ts.tv_sec == 0)
- do_gettimeofday(&gb->ts);
- mutex_unlock(&gb->mutex);
+ if (ktime_to_ns(gb->ts) == 0)
+ gb->ts = ktime_get();
/* Else operations to perform */
if (gb->async) {
@@ -1042,8 +910,10 @@ static int gb_loopback_fn(void *data)
else if (type == GB_LOOPBACK_TYPE_SINK)
error = gb_loopback_async_sink(gb, size);
- if (error)
+ if (error) {
gb->error++;
+ gb->iteration_count++;
+ }
} else {
/* We are effectively single threaded here */
if (type == GB_LOOPBACK_TYPE_PING)
@@ -1059,6 +929,7 @@ static int gb_loopback_fn(void *data)
gb_loopback_calculate_stats(gb, !!error);
}
gb->send_count++;
+ mutex_unlock(&gb->mutex);
if (us_wait) {
if (us_wait < 20000)
@@ -1241,18 +1112,12 @@ static int gb_loopback_probe(struct gb_bundle *bundle,
retval = -ENOMEM;
goto out_conn;
}
- if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
- GFP_KERNEL)) {
- retval = -ENOMEM;
- goto out_kfifo0;
- }
-
/* Fork worker thread */
mutex_init(&gb->mutex);
gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
if (IS_ERR(gb->task)) {
retval = PTR_ERR(gb->task);
- goto out_kfifo1;
+ goto out_kfifo;
}
spin_lock_irqsave(&gb_dev.lock, flags);
@@ -1266,9 +1131,7 @@ static int gb_loopback_probe(struct gb_bundle *bundle,
return 0;
-out_kfifo1:
- kfifo_free(&gb->kfifo_ts);
-out_kfifo0:
+out_kfifo:
kfifo_free(&gb->kfifo_lat);
out_conn:
device_unregister(dev);
@@ -1302,7 +1165,6 @@ static void gb_loopback_disconnect(struct gb_bundle *bundle)
kthread_stop(gb->task);
kfifo_free(&gb->kfifo_lat);
- kfifo_free(&gb->kfifo_ts);
gb_connection_latency_tag_disable(gb->connection);
debugfs_remove(gb->file);
diff --git a/drivers/staging/greybus/manifest.c b/drivers/staging/greybus/manifest.c
index 7b903770a684..08db49264f2b 100644
--- a/drivers/staging/greybus/manifest.c
+++ b/drivers/staging/greybus/manifest.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus manifest parsing
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/manifest.h b/drivers/staging/greybus/manifest.h
index d96428407cd7..f3c95a255631 100644
--- a/drivers/staging/greybus/manifest.h
+++ b/drivers/staging/greybus/manifest.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus manifest parsing
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __MANIFEST_H
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
index 660b4674a76f..b785382192de 100644
--- a/drivers/staging/greybus/module.c
+++ b/drivers/staging/greybus/module.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Module code
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include "greybus.h"
diff --git a/drivers/staging/greybus/module.h b/drivers/staging/greybus/module.h
index 88a97ce04243..b1ebcc6636db 100644
--- a/drivers/staging/greybus/module.h
+++ b/drivers/staging/greybus/module.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Module code
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __MODULE_H
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
index 3023012808d9..c462b1c046cd 100644
--- a/drivers/staging/greybus/operation.c
+++ b/drivers/staging/greybus/operation.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
@@ -294,9 +293,9 @@ static void gb_operation_work(struct work_struct *work)
gb_operation_put(operation);
}
-static void gb_operation_timeout(unsigned long arg)
+static void gb_operation_timeout(struct timer_list *t)
{
- struct gb_operation *operation = (void *)arg;
+ struct gb_operation *operation = from_timer(operation, t, timer);
if (gb_operation_result_set(operation, -ETIMEDOUT)) {
/*
@@ -541,8 +540,7 @@ gb_operation_create_common(struct gb_connection *connection, u8 type,
goto err_request;
}
- setup_timer(&operation->timer, gb_operation_timeout,
- (unsigned long)operation);
+ timer_setup(&operation->timer, gb_operation_timeout, 0);
}
operation->flags = op_flags;
diff --git a/drivers/staging/greybus/operation.h b/drivers/staging/greybus/operation.h
index 7529f01b2529..40b7b02fff88 100644
--- a/drivers/staging/greybus/operation.h
+++ b/drivers/staging/greybus/operation.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus operations
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __OPERATION_H
@@ -105,6 +104,8 @@ struct gb_operation {
int active;
struct list_head links; /* connection->operations */
+
+ void *private;
};
static inline bool
@@ -206,6 +207,17 @@ static inline int gb_operation_unidirectional(struct gb_connection *connection,
request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
}
+static inline void *gb_operation_get_data(struct gb_operation *operation)
+{
+ return operation->private;
+}
+
+static inline void gb_operation_set_data(struct gb_operation *operation,
+ void *data)
+{
+ operation->private = data;
+}
+
int gb_operation_init(void);
void gb_operation_exit(void);
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
index 20cac20518d7..0529e5628c24 100644
--- a/drivers/staging/greybus/power_supply.c
+++ b/drivers/staging/greybus/power_supply.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Power Supply driver for a Greybus module.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index f0404bc37123..4a6d394b6c44 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PWM Greybus driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
index 729d25811568..838acbe84ca0 100644
--- a/drivers/staging/greybus/raw.c
+++ b/drivers/staging/greybus/raw.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus driver for the Raw protocol
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index 101ca5097fc9..38e85033fc4b 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SD/MMC Greybus driver.
*
* Copyright 2014-2015 Google Inc.
* Copyright 2014-2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/greybus/spi.c b/drivers/staging/greybus/spi.c
index c893552b5c0b..47d896992b35 100644
--- a/drivers/staging/greybus/spi.c
+++ b/drivers/staging/greybus/spi.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SPI bridge PHY driver.
*
* Copyright 2014-2016 Google Inc.
* Copyright 2014-2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/module.h>
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
index e97b19148497..2e07c6b41334 100644
--- a/drivers/staging/greybus/spilib.c
+++ b/drivers/staging/greybus/spilib.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus SPI library
*
* Copyright 2014-2016 Google Inc.
* Copyright 2014-2016 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/bitops.h>
@@ -544,12 +543,15 @@ int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
return 0;
-exit_spi_unregister:
- spi_unregister_master(master);
exit_spi_put:
spi_master_put(master);
return ret;
+
+exit_spi_unregister:
+ spi_unregister_master(master);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(gb_spilib_master_init);
@@ -558,7 +560,6 @@ void gb_spilib_master_exit(struct gb_connection *connection)
struct spi_master *master = gb_connection_get_data(connection);
spi_unregister_master(master);
- spi_master_put(master);
}
EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
diff --git a/drivers/staging/greybus/spilib.h b/drivers/staging/greybus/spilib.h
index cb6092578a92..043d4d32c3ee 100644
--- a/drivers/staging/greybus/spilib.h
+++ b/drivers/staging/greybus/spilib.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus SPI library header
*
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
index 516f827e5ed9..a874fed761a1 100644
--- a/drivers/staging/greybus/svc.c
+++ b/drivers/staging/greybus/svc.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SVC Greybus driver.
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/debugfs.h>
diff --git a/drivers/staging/greybus/svc.h b/drivers/staging/greybus/svc.h
index 226c2a396fc8..ad01783bac9c 100644
--- a/drivers/staging/greybus/svc.h
+++ b/drivers/staging/greybus/svc.h
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus SVC code
*
* Copyright 2015 Google Inc.
* Copyright 2015 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#ifndef __SVC_H
diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/staging/greybus/svc_watchdog.c
index 779fbea5d4ba..7868ad8211c5 100644
--- a/drivers/staging/greybus/svc_watchdog.c
+++ b/drivers/staging/greybus/svc_watchdog.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SVC Greybus "watchdog" driver.
*
* Copyright 2016 Google Inc.
- *
- * Released under the GPLv2 only.
*/
#include <linux/delay.h>
diff --git a/drivers/staging/greybus/tools/Makefile b/drivers/staging/greybus/tools/Makefile
index 852b12b71149..ad0ae8053b79 100644
--- a/drivers/staging/greybus/tools/Makefile
+++ b/drivers/staging/greybus/tools/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ifeq ($(strip $(V)), 1)
Q =
else
diff --git a/drivers/staging/greybus/tools/lbtest b/drivers/staging/greybus/tools/lbtest
index d7353f1a2a6f..47c481239e98 100755
--- a/drivers/staging/greybus/tools/lbtest
+++ b/drivers/staging/greybus/tools/lbtest
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2015 Google, Inc.
# Copyright (c) 2015 Linaro, Ltd.
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
index fbe589fca840..c51610ce24af 100644
--- a/drivers/staging/greybus/tools/loopback_test.c
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: BSD-3-Clause
/*
* Loopback test application
*
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index c6d01b800d3c..8a006323c3c1 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UART driver for the Greybus "generic" UART module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
- * Released under the GPLv2 only.
- *
* Heavily based on drivers/usb/class/cdc-acm.c and
* drivers/usb/serial/usb-serial.c.
*/
diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c
index f93a76d02de6..1c246c73a085 100644
--- a/drivers/staging/greybus/usb.c
+++ b/drivers/staging/greybus/usb.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB host driver for the Greybus "generic" USB module.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
- *
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
index 5cd8a50d41ad..3e5dedeacd5c 100644
--- a/drivers/staging/greybus/vibrator.c
+++ b/drivers/staging/greybus/vibrator.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Greybus Vibrator protocol driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
- *
- * Released under the GPLv2 only.
*/
#include <linux/kernel.h>
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index bcbdc7340b55..fa8b27e091a2 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -106,7 +106,7 @@ static int readmagic_bitstream(u8 *bitdata, int *offset)
read_bitstream(bitdata, buf, offset, 13);
r = memcmp(buf, bits_magic, 13);
if (r) {
- pr_err("error: corrupted header");
+ pr_err("error: corrupted header\n");
return -EINVAL;
}
pr_info("bitstream file magic number Ok\n");
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 54ef0deed28f..ec42544a46aa 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -26,9 +26,6 @@ Then fill in the following:
pointer to a structure with elements that tend to be fixed for
large sets of different parts supported by a given driver.
This contains:
- * info->driver_module:
- Set to THIS_MODULE. Used to ensure correct ownership
- of various resources allocate by the core.
* info->event_attrs:
Attributes used to enable / disable hardware events.
* info->attrs:
diff --git a/drivers/staging/iio/Documentation/trigger.txt b/drivers/staging/iio/Documentation/trigger.txt
index 7c0e505e4f04..299a1add98bf 100644
--- a/drivers/staging/iio/Documentation/trigger.txt
+++ b/drivers/staging/iio/Documentation/trigger.txt
@@ -10,10 +10,6 @@ struct iio_trig *trig = iio_trigger_alloc("<trigger format string>", ...);
allocates a trigger structure. The key elements to then fill in within
a driver are:
-trig->owner
- Typically set to THIS_MODULE. Used to ensure correct
- ownership of core allocated resources.
-
trig->set_trigger_state:
Function that enables / disables the underlying source of the trigger.
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index 0cfd05d5bf49..455bffc29649 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the industrial I/O core.
#
diff --git a/drivers/staging/iio/accel/adis16201.c b/drivers/staging/iio/accel/adis16201.c
index fbc240663621..2ebd27536216 100644
--- a/drivers/staging/iio/accel/adis16201.c
+++ b/drivers/staging/iio/accel/adis16201.c
@@ -284,7 +284,6 @@ static const struct iio_info adis16201_info = {
.read_raw = adis16201_read_raw,
.write_raw = adis16201_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis16201_status_error_msgs[] = {
diff --git a/drivers/staging/iio/accel/adis16203.c b/drivers/staging/iio/accel/adis16203.c
index 4e3fa7592d3f..b3e4571340ab 100644
--- a/drivers/staging/iio/accel/adis16203.c
+++ b/drivers/staging/iio/accel/adis16203.c
@@ -232,7 +232,6 @@ static const struct iio_info adis16203_info = {
.read_raw = adis16203_read_raw,
.write_raw = adis16203_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis16203_status_error_msgs[] = {
diff --git a/drivers/staging/iio/accel/adis16209.c b/drivers/staging/iio/accel/adis16209.c
index 8485c024e3f5..7fcef9a2590a 100644
--- a/drivers/staging/iio/accel/adis16209.c
+++ b/drivers/staging/iio/accel/adis16209.c
@@ -285,7 +285,6 @@ static const struct iio_info adis16209_info = {
.read_raw = adis16209_read_raw,
.write_raw = adis16209_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis16209_status_error_msgs[] = {
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
index 109cd94b5ac3..fff6d99089cc 100644
--- a/drivers/staging/iio/accel/adis16240.c
+++ b/drivers/staging/iio/accel/adis16240.c
@@ -361,7 +361,6 @@ static const struct iio_info adis16240_info = {
.read_raw = adis16240_read_raw,
.write_raw = adis16240_write_raw,
.update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
};
static const char * const adis16240_status_error_msgs[] = {
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index bf18bdd7c99d..ebe83c1ad362 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for industrial I/O ADC drivers
#
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 6150d2780e22..cadfb96734ed 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -162,6 +162,7 @@ struct ad7192_state {
u32 scale_avail[8][2];
u8 gpocon;
u8 devid;
+ struct mutex lock; /* protect sensor state */
struct ad_sigma_delta sd;
};
@@ -461,10 +462,10 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->lock);
*val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
*val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_NANO;
case IIO_TEMP:
*val = 0;
@@ -508,6 +509,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
+ mutex_lock(&st->lock);
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
if (val2 == st->scale_avail[i][1]) {
ret = 0;
@@ -521,6 +523,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
ad7192_calibrate_all(st);
break;
}
+ mutex_unlock(&st->lock);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
if (!val) {
@@ -567,7 +570,6 @@ static const struct iio_info ad7192_info = {
.write_raw_get_fmt = ad7192_write_raw_get_fmt,
.attrs = &ad7192_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ad7195_info = {
@@ -576,7 +578,6 @@ static const struct iio_info ad7195_info = {
.write_raw_get_fmt = ad7192_write_raw_get_fmt,
.attrs = &ad7195_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec ad7192_channels[] = {
@@ -632,6 +633,8 @@ static int ad7192_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
+ mutex_init(&st->lock);
+
st->avdd = devm_regulator_get(&spi->dev, "avdd");
if (IS_ERR(st->avdd))
return PTR_ERR(st->avdd);
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index f85dde9805e0..b736275c10f5 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -828,7 +828,6 @@ static const struct iio_info ad7280_info = {
.read_raw = ad7280_read_raw,
.event_attrs = &ad7280_event_attrs_group,
.attrs = &ad7280_attrs_group,
- .driver_module = THIS_MODULE,
};
static const struct ad7280_platform_data ad7793_default_pdata = {
diff --git a/drivers/staging/iio/adc/ad7606.c b/drivers/staging/iio/adc/ad7606.c
index 18f5f139117e..25b9fcd5e3a4 100644
--- a/drivers/staging/iio/adc/ad7606.c
+++ b/drivers/staging/iio/adc/ad7606.c
@@ -373,26 +373,22 @@ static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
};
static const struct iio_info ad7606_info_no_os_or_range = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
};
static const struct iio_info ad7606_info_os_and_range = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os_and_range,
};
static const struct iio_info ad7606_info_os = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_os,
};
static const struct iio_info ad7606_info_range = {
- .driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
.write_raw = &ad7606_write_raw,
.attrs = &ad7606_attribute_group_range,
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index dec3ba6eba8a..a7797af579b9 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -155,7 +155,6 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
static const struct iio_info ad7780_info = {
.read_raw = ad7780_read_raw,
- .driver_module = THIS_MODULE,
};
static int ad7780_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 17d280581e24..bfe180a475ee 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -335,7 +335,6 @@ static const struct attribute_group ad7816_event_attribute_group = {
static const struct iio_info ad7816_info = {
.attrs = &ad7816_attribute_group,
.event_attrs = &ad7816_event_attribute_group,
- .driver_module = THIS_MODULE,
};
/*
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index b2bce26499f5..2d33632c00e8 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2087,13 +2087,11 @@ EXPORT_SYMBOL_GPL(adt7316_pm_ops);
static const struct iio_info adt7316_info = {
.attrs = &adt7316_attribute_group,
.event_attrs = &adt7316_event_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info adt7516_info = {
.attrs = &adt7516_attribute_group,
.event_attrs = &adt7516_event_attribute_group,
- .driver_module = THIS_MODULE,
};
/*
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index a6f249e9c1e1..2fe916c48848 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -569,7 +569,6 @@ static const struct attribute_group ad7150_event_attribute_group = {
static const struct iio_info ad7150_info = {
.event_attrs = &ad7150_event_attribute_group,
- .driver_module = THIS_MODULE,
.read_raw = &ad7150_read_raw,
.read_event_config = &ad7150_read_event_config,
.write_event_config = &ad7150_write_event_config,
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index ff10d1f0a7e4..61377ca444de 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -441,7 +441,6 @@ static const struct iio_info ad7152_info = {
.read_raw = ad7152_read_raw,
.write_raw = ad7152_write_raw,
.write_raw_get_fmt = ad7152_write_raw_get_fmt,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec ad7152_channels[] = {
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index cdcb4fccf3fe..a124853a05f0 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -667,7 +667,6 @@ static const struct iio_info ad7746_info = {
.attrs = &ad7746_attribute_group,
.read_raw = ad7746_read_raw,
.write_raw = ad7746_write_raw,
- .driver_module = THIS_MODULE,
};
/*
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 6da46ede7ee0..c73eff1f8d73 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -290,7 +290,6 @@ static const struct attribute_group ad9832_attribute_group = {
static const struct iio_info ad9832_info = {
.attrs = &ad9832_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad9832_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 995acdd7c942..4c6d4043903e 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -381,12 +381,10 @@ static const struct attribute_group ad9833_attribute_group = {
static const struct iio_info ad9834_info = {
.attrs = &ad9834_attribute_group,
- .driver_module = THIS_MODULE,
};
static const struct iio_info ad9833_info = {
.attrs = &ad9833_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad9834_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index 967524583d8a..4e7630caf7d3 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -103,7 +103,6 @@ static int adis16060_read_raw(struct iio_dev *indio_dev,
static const struct iio_info adis16060_info = {
.read_raw = adis16060_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec adis16060_channels[] = {
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 3d539eeb0e26..2b28fb9c0048 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -575,7 +575,6 @@ out:
static const struct iio_info ad5933_info = {
.read_raw = ad5933_read_raw,
.attrs = &ad5933_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad5933_ring_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/light/tsl2x7x.c b/drivers/staging/iio/light/tsl2x7x.c
index 786e93f16ce9..42ed9c015aaf 100644
--- a/drivers/staging/iio/light/tsl2x7x.c
+++ b/drivers/staging/iio/light/tsl2x7x.c
@@ -15,112 +15,112 @@
* more details.
*/
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include "tsl2x7x.h"
/* Cal defs*/
-#define PROX_STAT_CAL 0
-#define PROX_STAT_SAMP 1
-#define MAX_SAMPLES_CAL 200
+#define PROX_STAT_CAL 0
+#define PROX_STAT_SAMP 1
+#define MAX_SAMPLES_CAL 200
/* TSL2X7X Device ID */
-#define TRITON_ID 0x00
-#define SWORDFISH_ID 0x30
-#define HALIBUT_ID 0x20
+#define TRITON_ID 0x00
+#define SWORDFISH_ID 0x30
+#define HALIBUT_ID 0x20
/* Lux calculation constants */
-#define TSL2X7X_LUX_CALC_OVER_FLOW 65535
+#define TSL2X7X_LUX_CALC_OVER_FLOW 65535
/* TAOS Register definitions - note:
* depending on device, some of these register are not used and the
* register address is benign.
*/
/* 2X7X register offsets */
-#define TSL2X7X_MAX_CONFIG_REG 16
+#define TSL2X7X_MAX_CONFIG_REG 16
/* Device Registers and Masks */
-#define TSL2X7X_CNTRL 0x00
-#define TSL2X7X_ALS_TIME 0X01
-#define TSL2X7X_PRX_TIME 0x02
-#define TSL2X7X_WAIT_TIME 0x03
-#define TSL2X7X_ALS_MINTHRESHLO 0X04
-#define TSL2X7X_ALS_MINTHRESHHI 0X05
-#define TSL2X7X_ALS_MAXTHRESHLO 0X06
-#define TSL2X7X_ALS_MAXTHRESHHI 0X07
-#define TSL2X7X_PRX_MINTHRESHLO 0X08
-#define TSL2X7X_PRX_MINTHRESHHI 0X09
-#define TSL2X7X_PRX_MAXTHRESHLO 0X0A
-#define TSL2X7X_PRX_MAXTHRESHHI 0X0B
-#define TSL2X7X_PERSISTENCE 0x0C
-#define TSL2X7X_PRX_CONFIG 0x0D
-#define TSL2X7X_PRX_COUNT 0x0E
-#define TSL2X7X_GAIN 0x0F
-#define TSL2X7X_NOTUSED 0x10
-#define TSL2X7X_REVID 0x11
-#define TSL2X7X_CHIPID 0x12
-#define TSL2X7X_STATUS 0x13
-#define TSL2X7X_ALS_CHAN0LO 0x14
-#define TSL2X7X_ALS_CHAN0HI 0x15
-#define TSL2X7X_ALS_CHAN1LO 0x16
-#define TSL2X7X_ALS_CHAN1HI 0x17
-#define TSL2X7X_PRX_LO 0x18
-#define TSL2X7X_PRX_HI 0x19
+#define TSL2X7X_CNTRL 0x00
+#define TSL2X7X_ALS_TIME 0X01
+#define TSL2X7X_PRX_TIME 0x02
+#define TSL2X7X_WAIT_TIME 0x03
+#define TSL2X7X_ALS_MINTHRESHLO 0X04
+#define TSL2X7X_ALS_MINTHRESHHI 0X05
+#define TSL2X7X_ALS_MAXTHRESHLO 0X06
+#define TSL2X7X_ALS_MAXTHRESHHI 0X07
+#define TSL2X7X_PRX_MINTHRESHLO 0X08
+#define TSL2X7X_PRX_MINTHRESHHI 0X09
+#define TSL2X7X_PRX_MAXTHRESHLO 0X0A
+#define TSL2X7X_PRX_MAXTHRESHHI 0X0B
+#define TSL2X7X_PERSISTENCE 0x0C
+#define TSL2X7X_PRX_CONFIG 0x0D
+#define TSL2X7X_PRX_COUNT 0x0E
+#define TSL2X7X_GAIN 0x0F
+#define TSL2X7X_NOTUSED 0x10
+#define TSL2X7X_REVID 0x11
+#define TSL2X7X_CHIPID 0x12
+#define TSL2X7X_STATUS 0x13
+#define TSL2X7X_ALS_CHAN0LO 0x14
+#define TSL2X7X_ALS_CHAN0HI 0x15
+#define TSL2X7X_ALS_CHAN1LO 0x16
+#define TSL2X7X_ALS_CHAN1HI 0x17
+#define TSL2X7X_PRX_LO 0x18
+#define TSL2X7X_PRX_HI 0x19
/* tsl2X7X cmd reg masks */
-#define TSL2X7X_CMD_REG 0x80
-#define TSL2X7X_CMD_SPL_FN 0x60
+#define TSL2X7X_CMD_REG 0x80
+#define TSL2X7X_CMD_SPL_FN 0x60
-#define TSL2X7X_CMD_PROX_INT_CLR 0X05
-#define TSL2X7X_CMD_ALS_INT_CLR 0x06
-#define TSL2X7X_CMD_PROXALS_INT_CLR 0X07
+#define TSL2X7X_CMD_PROX_INT_CLR 0X05
+#define TSL2X7X_CMD_ALS_INT_CLR 0x06
+#define TSL2X7X_CMD_PROXALS_INT_CLR 0X07
/* tsl2X7X cntrl reg masks */
-#define TSL2X7X_CNTL_ADC_ENBL 0x02
-#define TSL2X7X_CNTL_PWR_ON 0x01
+#define TSL2X7X_CNTL_ADC_ENBL 0x02
+#define TSL2X7X_CNTL_PWR_ON 0x01
/* tsl2X7X status reg masks */
-#define TSL2X7X_STA_ADC_VALID 0x01
-#define TSL2X7X_STA_PRX_VALID 0x02
-#define TSL2X7X_STA_ADC_PRX_VALID (TSL2X7X_STA_ADC_VALID |\
- TSL2X7X_STA_PRX_VALID)
-#define TSL2X7X_STA_ALS_INTR 0x10
-#define TSL2X7X_STA_PRX_INTR 0x20
+#define TSL2X7X_STA_ADC_VALID 0x01
+#define TSL2X7X_STA_PRX_VALID 0x02
+#define TSL2X7X_STA_ADC_PRX_VALID (TSL2X7X_STA_ADC_VALID | \
+ TSL2X7X_STA_PRX_VALID)
+#define TSL2X7X_STA_ALS_INTR 0x10
+#define TSL2X7X_STA_PRX_INTR 0x20
/* tsl2X7X cntrl reg masks */
-#define TSL2X7X_CNTL_REG_CLEAR 0x00
-#define TSL2X7X_CNTL_PROX_INT_ENBL 0X20
-#define TSL2X7X_CNTL_ALS_INT_ENBL 0X10
-#define TSL2X7X_CNTL_WAIT_TMR_ENBL 0X08
-#define TSL2X7X_CNTL_PROX_DET_ENBL 0X04
-#define TSL2X7X_CNTL_PWRON 0x01
-#define TSL2X7X_CNTL_ALSPON_ENBL 0x03
-#define TSL2X7X_CNTL_INTALSPON_ENBL 0x13
-#define TSL2X7X_CNTL_PROXPON_ENBL 0x0F
-#define TSL2X7X_CNTL_INTPROXPON_ENBL 0x2F
+#define TSL2X7X_CNTL_REG_CLEAR 0x00
+#define TSL2X7X_CNTL_PROX_INT_ENBL 0X20
+#define TSL2X7X_CNTL_ALS_INT_ENBL 0X10
+#define TSL2X7X_CNTL_WAIT_TMR_ENBL 0X08
+#define TSL2X7X_CNTL_PROX_DET_ENBL 0X04
+#define TSL2X7X_CNTL_PWRON 0x01
+#define TSL2X7X_CNTL_ALSPON_ENBL 0x03
+#define TSL2X7X_CNTL_INTALSPON_ENBL 0x13
+#define TSL2X7X_CNTL_PROXPON_ENBL 0x0F
+#define TSL2X7X_CNTL_INTPROXPON_ENBL 0x2F
/*Prox diode to use */
-#define TSL2X7X_DIODE0 0x10
-#define TSL2X7X_DIODE1 0x20
-#define TSL2X7X_DIODE_BOTH 0x30
+#define TSL2X7X_DIODE0 0x10
+#define TSL2X7X_DIODE1 0x20
+#define TSL2X7X_DIODE_BOTH 0x30
/* LED Power */
-#define TSL2X7X_mA100 0x00
-#define TSL2X7X_mA50 0x40
-#define TSL2X7X_mA25 0x80
-#define TSL2X7X_mA13 0xD0
-#define TSL2X7X_MAX_TIMER_CNT (0xFF)
+#define TSL2X7X_100_mA 0x00
+#define TSL2X7X_50_mA 0x40
+#define TSL2X7X_25_mA 0x80
+#define TSL2X7X_13_mA 0xD0
+#define TSL2X7X_MAX_TIMER_CNT 0xFF
-#define TSL2X7X_MIN_ITIME 3
+#define TSL2X7X_MIN_ITIME 3
/* TAOS txx2x7x Device family members */
enum {
@@ -142,11 +142,6 @@ enum {
TSL2X7X_CHIP_SUSPENDED = 2
};
-struct tsl2x7x_parse_result {
- int integer;
- int fract;
-};
-
/* Per-device data */
struct tsl2x7x_als_info {
u16 als_ch0;
@@ -174,7 +169,7 @@ struct tsl2X7X_chip {
struct i2c_client *client;
u16 prox_data;
struct tsl2x7x_als_info als_cur_info;
- struct tsl2x7x_settings tsl2x7x_settings;
+ struct tsl2x7x_settings settings;
struct tsl2X7X_platform_data *pdata;
int als_time_scale;
int als_saturation;
@@ -192,25 +187,25 @@ struct tsl2X7X_chip {
};
/* Different devices require different coefficents */
-static const struct tsl2x7x_lux tsl2x71_lux_table[] = {
+static const struct tsl2x7x_lux tsl2x71_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
{ 14461, 611, 1211 },
{ 18540, 352, 623 },
{ 0, 0, 0 },
};
-static const struct tsl2x7x_lux tmd2x71_lux_table[] = {
+static const struct tsl2x7x_lux tmd2x71_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
{ 11635, 115, 256 },
{ 15536, 87, 179 },
{ 0, 0, 0 },
};
-static const struct tsl2x7x_lux tsl2x72_lux_table[] = {
+static const struct tsl2x7x_lux tsl2x72_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
{ 14013, 466, 917 },
{ 18222, 310, 552 },
{ 0, 0, 0 },
};
-static const struct tsl2x7x_lux tmd2x72_lux_table[] = {
+static const struct tsl2x7x_lux tmd2x72_lux_table[TSL2X7X_DEF_LUX_TABLE_SZ] = {
{ 13218, 130, 262 },
{ 17592, 92, 169 },
{ 0, 0, 0 },
@@ -248,14 +243,14 @@ static const struct tsl2x7x_settings tsl2x7x_default_settings = {
.prox_pulse_count = 8
};
-static const s16 tsl2X7X_als_gainadj[] = {
+static const s16 tsl2x7x_als_gain[] = {
1,
8,
16,
120
};
-static const s16 tsl2X7X_prx_gainadj[] = {
+static const s16 tsl2x7x_prx_gain[] = {
1,
2,
4,
@@ -353,9 +348,9 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* clear any existing interrupt status */
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG |
+ TSL2X7X_CMD_REG |
TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_ALS_INT_CLR));
+ TSL2X7X_CMD_ALS_INT_CLR);
if (ret < 0) {
dev_err(&chip->client->dev,
"i2c_write_command failed - err = %d\n", ret);
@@ -369,7 +364,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
chip->als_cur_info.als_ch0 = ch0;
chip->als_cur_info.als_ch1 = ch1;
- if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation)) {
+ if (ch0 >= chip->als_saturation || ch1 >= chip->als_saturation) {
lux = TSL2X7X_LUX_CALC_OVER_FLOW;
goto return_max;
}
@@ -389,11 +384,10 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
if (p->ratio == 0) {
lux = 0;
} else {
- ch0lux = DIV_ROUND_UP(ch0 * p->ch0,
- tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
- ch1lux = DIV_ROUND_UP(ch1 * p->ch1,
- tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain]);
- lux = ch0lux - ch1lux;
+ lux = DIV_ROUND_UP(ch0 * p->ch0,
+ tsl2x7x_als_gain[chip->settings.als_gain]) -
+ DIV_ROUND_UP(ch1 * p->ch1,
+ tsl2x7x_als_gain[chip->settings.als_gain]);
}
/* note: lux is 31 bit max at this point */
@@ -419,7 +413,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
*/
lux64 = lux;
- lux64 = lux64 * chip->tsl2x7x_settings.als_gain_trim;
+ lux64 = lux64 * chip->settings.als_gain_trim;
lux64 >>= 8;
lux = lux64;
lux = (lux + 500) / 1000;
@@ -472,7 +466,7 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
case tmd2771:
if (!(ret & TSL2X7X_STA_ADC_VALID))
goto prox_poll_err;
- break;
+ break;
case tsl2572:
case tsl2672:
case tmd2672:
@@ -480,7 +474,7 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
case tmd2772:
if (!(ret & TSL2X7X_STA_PRX_VALID))
goto prox_poll_err;
- break;
+ break;
}
for (i = 0; i < 2; i++) {
@@ -514,12 +508,10 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
{
/* If Operational settings defined elsewhere.. */
if (chip->pdata && chip->pdata->platform_default_settings)
- memcpy(&chip->tsl2x7x_settings,
- chip->pdata->platform_default_settings,
+ memcpy(&chip->settings, chip->pdata->platform_default_settings,
sizeof(tsl2x7x_default_settings));
else
- memcpy(&chip->tsl2x7x_settings,
- &tsl2x7x_default_settings,
+ memcpy(&chip->settings, &tsl2x7x_default_settings,
sizeof(tsl2x7x_default_settings));
/* Load up the proper lux table. */
@@ -529,8 +521,8 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
sizeof(chip->pdata->platform_lux_table));
else
memcpy(chip->tsl2x7x_device_lux,
- (struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id],
- MAX_DEFAULT_TABLE_BYTES);
+ tsl2x7x_default_lux_table_group[chip->id],
+ TSL2X7X_DEFAULT_TABLE_BYTES);
}
/**
@@ -542,9 +534,7 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int gain_trim_val;
- int ret;
- int lux_val;
+ int ret, lux_val;
ret = i2c_smbus_read_byte_data(chip->client,
TSL2X7X_CMD_REG | TSL2X7X_CNTRL);
@@ -575,16 +565,16 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
return lux_val;
}
- gain_trim_val = ((chip->tsl2x7x_settings.als_cal_target)
- * chip->tsl2x7x_settings.als_gain_trim) / lux_val;
- if ((gain_trim_val < 250) || (gain_trim_val > 4000))
+ ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
+ lux_val;
+ if (ret < 250 || ret > 4000)
return -ERANGE;
- chip->tsl2x7x_settings.als_gain_trim = gain_trim_val;
+ chip->settings.als_gain_trim = ret;
dev_info(&chip->client->dev,
"%s als_calibrate completed\n", chip->client->name);
- return (int)gain_trim_val;
+ return ret;
}
static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
@@ -602,34 +592,30 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
chip->pdata->power_on(indio_dev);
/* Non calculated parameters */
- chip->tsl2x7x_config[TSL2X7X_PRX_TIME] =
- chip->tsl2x7x_settings.prx_time;
- chip->tsl2x7x_config[TSL2X7X_WAIT_TIME] =
- chip->tsl2x7x_settings.wait_time;
- chip->tsl2x7x_config[TSL2X7X_PRX_CONFIG] =
- chip->tsl2x7x_settings.prox_config;
+ chip->tsl2x7x_config[TSL2X7X_PRX_TIME] = chip->settings.prx_time;
+ chip->tsl2x7x_config[TSL2X7X_WAIT_TIME] = chip->settings.wait_time;
+ chip->tsl2x7x_config[TSL2X7X_PRX_CONFIG] = chip->settings.prox_config;
chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHLO] =
- (chip->tsl2x7x_settings.als_thresh_low) & 0xFF;
+ (chip->settings.als_thresh_low) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_ALS_MINTHRESHHI] =
- (chip->tsl2x7x_settings.als_thresh_low >> 8) & 0xFF;
+ (chip->settings.als_thresh_low >> 8) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHLO] =
- (chip->tsl2x7x_settings.als_thresh_high) & 0xFF;
+ (chip->settings.als_thresh_high) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_ALS_MAXTHRESHHI] =
- (chip->tsl2x7x_settings.als_thresh_high >> 8) & 0xFF;
- chip->tsl2x7x_config[TSL2X7X_PERSISTENCE] =
- chip->tsl2x7x_settings.persistence;
+ (chip->settings.als_thresh_high >> 8) & 0xFF;
+ chip->tsl2x7x_config[TSL2X7X_PERSISTENCE] = chip->settings.persistence;
chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
- chip->tsl2x7x_settings.prox_pulse_count;
+ chip->settings.prox_pulse_count;
chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
- (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
+ (chip->settings.prox_thres_low) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
- (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
+ (chip->settings.prox_thres_low >> 8) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
- (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
+ (chip->settings.prox_thres_high) & 0xFF;
chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
- (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
+ (chip->settings.prox_thres_high >> 8) & 0xFF;
/* and make sure we're not already on */
if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
@@ -639,7 +625,7 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
}
/* determine als integration register */
- als_count = (chip->tsl2x7x_settings.als_time * 100 + 135) / 270;
+ als_count = (chip->settings.als_time * 100 + 135) / 270;
if (!als_count)
als_count = 1; /* ensure at least one cycle */
@@ -649,9 +635,9 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
/* Set the gain based on tsl2x7x_settings struct */
chip->tsl2x7x_config[TSL2X7X_GAIN] =
- chip->tsl2x7x_settings.als_gain |
- (TSL2X7X_mA100 | TSL2X7X_DIODE1)
- | ((chip->tsl2x7x_settings.prox_gain) << 2);
+ chip->settings.als_gain |
+ (TSL2X7X_100_mA | TSL2X7X_DIODE1) |
+ (chip->settings.prox_gain << 2);
/* set chip struct re scaling and saturation */
chip->als_saturation = als_count * 922; /* 90% of full scale */
@@ -706,18 +692,18 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
chip->tsl2x7x_chip_status = TSL2X7X_CHIP_WORKING;
- if (chip->tsl2x7x_settings.interrupts_en != 0) {
+ if (chip->settings.interrupts_en != 0) {
dev_info(&chip->client->dev, "Setting Up Interrupt(s)\n");
reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
- if ((chip->tsl2x7x_settings.interrupts_en == 0x20) ||
- (chip->tsl2x7x_settings.interrupts_en == 0x30))
+ if (chip->settings.interrupts_en == 0x20 ||
+ chip->settings.interrupts_en == 0x30)
reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
- reg_val |= chip->tsl2x7x_settings.interrupts_en;
+ reg_val |= chip->settings.interrupts_en;
ret = i2c_smbus_write_byte_data(chip->client,
- (TSL2X7X_CMD_REG |
- TSL2X7X_CNTRL), reg_val);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL,
+ reg_val);
if (ret < 0)
dev_err(&chip->client->dev,
"%s: failed in tsl2x7x_IOCTL_INT_SET.\n",
@@ -766,8 +752,7 @@ static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
* put device back into proper state, and unlock
* resource.
*/
-static
-int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
+static int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
int device_status = chip->tsl2x7x_chip_status;
@@ -791,9 +776,8 @@ unlock:
return ret;
}
-static
-void tsl2x7x_prox_calculate(int *data, int length,
- struct tsl2x7x_prox_stat *statP)
+static void tsl2x7x_prox_calculate(int *data, int length,
+ struct tsl2x7x_prox_stat *statP)
{
int i;
int sample_sum;
@@ -837,25 +821,25 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
u8 tmp_irq_settings;
u8 current_state = chip->tsl2x7x_chip_status;
- if (chip->tsl2x7x_settings.prox_max_samples_cal > MAX_SAMPLES_CAL) {
+ if (chip->settings.prox_max_samples_cal > MAX_SAMPLES_CAL) {
dev_err(&chip->client->dev,
"max prox samples cal is too big: %d\n",
- chip->tsl2x7x_settings.prox_max_samples_cal);
- chip->tsl2x7x_settings.prox_max_samples_cal = MAX_SAMPLES_CAL;
+ chip->settings.prox_max_samples_cal);
+ chip->settings.prox_max_samples_cal = MAX_SAMPLES_CAL;
}
/* have to stop to change settings */
tsl2x7x_chip_off(indio_dev);
/* Enable proximity detection save just in case prox not wanted yet*/
- tmp_irq_settings = chip->tsl2x7x_settings.interrupts_en;
- chip->tsl2x7x_settings.interrupts_en |= TSL2X7X_CNTL_PROX_INT_ENBL;
+ tmp_irq_settings = chip->settings.interrupts_en;
+ chip->settings.interrupts_en |= TSL2X7X_CNTL_PROX_INT_ENBL;
/*turn on device if not already on*/
tsl2x7x_chip_on(indio_dev);
/*gather the samples*/
- for (i = 0; i < chip->tsl2x7x_settings.prox_max_samples_cal; i++) {
+ for (i = 0; i < chip->settings.prox_max_samples_cal; i++) {
usleep_range(15000, 17500);
tsl2x7x_get_prox(indio_dev);
prox_history[i] = chip->prox_data;
@@ -866,25 +850,25 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
tsl2x7x_chip_off(indio_dev);
calP = &prox_stat_data[PROX_STAT_CAL];
tsl2x7x_prox_calculate(prox_history,
- chip->tsl2x7x_settings.prox_max_samples_cal,
- calP);
- chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean;
+ chip->settings.prox_max_samples_cal, calP);
+ chip->settings.prox_thres_high = (calP->max << 1) - calP->mean;
dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
calP->min, calP->mean, calP->max);
dev_info(&chip->client->dev,
"%s proximity threshold set to %d\n",
- chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
+ chip->client->name, chip->settings.prox_thres_high);
/* back to the way they were */
- chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings;
+ chip->settings.interrupts_en = tmp_irq_settings;
if (current_state == TSL2X7X_CHIP_WORKING)
tsl2x7x_chip_on(indio_dev);
}
-static ssize_t in_illuminance0_calibscale_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t
+in_illuminance0_calibscale_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -900,69 +884,23 @@ static ssize_t in_illuminance0_calibscale_available_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", "1 8 16 120");
}
-static ssize_t in_proximity0_calibscale_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
-}
-
-static ssize_t in_illuminance0_integration_time_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int y, z;
-
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
- y /= 1000;
- z %= 1000;
-
- return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
-}
-
-static ssize_t in_illuminance0_integration_time_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- struct tsl2x7x_parse_result result;
- int ret;
-
- ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
- if (ret)
- return ret;
-
- result.fract /= 3;
- chip->tsl2x7x_settings.als_time =
- TSL2X7X_MAX_TIMER_CNT - (u8)result.fract;
-
- dev_info(&chip->client->dev, "%s: als time = %d",
- __func__, chip->tsl2x7x_settings.als_time);
-
- tsl2x7x_invoke_change(indio_dev);
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
+static IIO_CONST_ATTR(in_proximity0_calibscale_available, "1 2 4 8");
static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
".00272 - .696");
static ssize_t in_illuminance0_target_input_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n",
- chip->tsl2x7x_settings.als_cal_target);
+ return snprintf(buf, PAGE_SIZE, "%d\n", chip->settings.als_cal_target);
}
static ssize_t in_illuminance0_target_input_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -973,7 +911,7 @@ static ssize_t in_illuminance0_target_input_store(struct device *dev,
return -EINVAL;
if (value)
- chip->tsl2x7x_settings.als_cal_target = value;
+ chip->settings.als_cal_target = value;
ret = tsl2x7x_invoke_change(indio_dev);
if (ret < 0)
@@ -982,111 +920,9 @@ static ssize_t in_illuminance0_target_input_store(struct device *dev,
return len;
}
-/* persistence settings */
-static ssize_t in_intensity0_thresh_period_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int y, z, filter_delay;
-
- /* Determine integration time */
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
- filter_delay = z * (chip->tsl2x7x_settings.persistence & 0x0F);
- y = filter_delay / 1000;
- z = filter_delay % 1000;
-
- return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
-}
-
-static ssize_t in_intensity0_thresh_period_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- struct tsl2x7x_parse_result result;
- int y, z, filter_delay;
- int ret;
-
- ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
- if (ret)
- return ret;
-
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.als_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
-
- filter_delay =
- DIV_ROUND_UP((result.integer * 1000) + result.fract, z);
-
- chip->tsl2x7x_settings.persistence &= 0xF0;
- chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F);
-
- dev_info(&chip->client->dev, "%s: als persistence = %d",
- __func__, filter_delay);
-
- ret = tsl2x7x_invoke_change(indio_dev);
- if (ret < 0)
- return ret;
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
-static ssize_t in_proximity0_thresh_period_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
- int y, z, filter_delay;
-
- /* Determine integration time */
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
- filter_delay = z * ((chip->tsl2x7x_settings.persistence & 0xF0) >> 4);
- y = filter_delay / 1000;
- z = filter_delay % 1000;
-
- return snprintf(buf, PAGE_SIZE, "%d.%03d\n", y, z);
-}
-
-static ssize_t in_proximity0_thresh_period_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- struct tsl2x7x_parse_result result;
- int y, z, filter_delay;
- int ret;
-
- ret = iio_str_to_fixpoint(buf, 100, &result.integer, &result.fract);
- if (ret)
- return ret;
-
- y = (TSL2X7X_MAX_TIMER_CNT - (u8)chip->tsl2x7x_settings.prx_time) + 1;
- z = y * TSL2X7X_MIN_ITIME;
-
- filter_delay =
- DIV_ROUND_UP((result.integer * 1000) + result.fract, z);
-
- chip->tsl2x7x_settings.persistence &= 0x0F;
- chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0);
-
- dev_info(&chip->client->dev, "%s: prox persistence = %d",
- __func__, filter_delay);
-
- ret = tsl2x7x_invoke_change(indio_dev);
- if (ret < 0)
- return ret;
-
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
static ssize_t in_illuminance0_calibrate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1106,14 +942,14 @@ static ssize_t in_illuminance0_calibrate_store(struct device *dev,
}
static ssize_t in_illuminance0_lux_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int i = 0;
int offset = 0;
- while (i < (TSL2X7X_MAX_LUX_TABLE_SIZE * 3)) {
+ while (i < TSL2X7X_MAX_LUX_TABLE_SIZE) {
offset += snprintf(buf + offset, PAGE_SIZE, "%u,%u,%u,",
chip->tsl2x7x_device_lux[i].ratio,
chip->tsl2x7x_device_lux[i].ch0,
@@ -1134,8 +970,8 @@ static ssize_t in_illuminance0_lux_table_show(struct device *dev,
}
static ssize_t in_illuminance0_lux_table_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1176,8 +1012,8 @@ static ssize_t in_illuminance0_lux_table_store(struct device *dev,
}
static ssize_t in_proximity0_calibrate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1205,9 +1041,9 @@ static int tsl2x7x_read_interrupt_config(struct iio_dev *indio_dev,
int ret;
if (chan->type == IIO_INTENSITY)
- ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x10);
+ ret = !!(chip->settings.interrupts_en & 0x10);
else
- ret = !!(chip->tsl2x7x_settings.interrupts_en & 0x20);
+ ret = !!(chip->settings.interrupts_en & 0x20);
return ret;
}
@@ -1223,14 +1059,14 @@ static int tsl2x7x_write_interrupt_config(struct iio_dev *indio_dev,
if (chan->type == IIO_INTENSITY) {
if (val)
- chip->tsl2x7x_settings.interrupts_en |= 0x10;
+ chip->settings.interrupts_en |= 0x10;
else
- chip->tsl2x7x_settings.interrupts_en &= 0x20;
+ chip->settings.interrupts_en &= 0x20;
} else {
if (val)
- chip->tsl2x7x_settings.interrupts_en |= 0x20;
+ chip->settings.interrupts_en |= 0x20;
else
- chip->tsl2x7x_settings.interrupts_en &= 0x10;
+ chip->settings.interrupts_en &= 0x10;
}
ret = tsl2x7x_invoke_change(indio_dev);
@@ -1248,18 +1084,19 @@ static int tsl2x7x_write_event_value(struct iio_dev *indio_dev,
int val, int val2)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret = -EINVAL, y, z, filter_delay;
+ u8 time;
switch (info) {
case IIO_EV_INFO_VALUE:
if (chan->type == IIO_INTENSITY) {
switch (dir) {
case IIO_EV_DIR_RISING:
- chip->tsl2x7x_settings.als_thresh_high = val;
+ chip->settings.als_thresh_high = val;
ret = 0;
break;
case IIO_EV_DIR_FALLING:
- chip->tsl2x7x_settings.als_thresh_low = val;
+ chip->settings.als_thresh_low = val;
ret = 0;
break;
default:
@@ -1268,11 +1105,11 @@ static int tsl2x7x_write_event_value(struct iio_dev *indio_dev,
} else {
switch (dir) {
case IIO_EV_DIR_RISING:
- chip->tsl2x7x_settings.prox_thres_high = val;
+ chip->settings.prox_thres_high = val;
ret = 0;
break;
case IIO_EV_DIR_FALLING:
- chip->tsl2x7x_settings.prox_thres_low = val;
+ chip->settings.prox_thres_low = val;
ret = 0;
break;
default:
@@ -1280,6 +1117,33 @@ static int tsl2x7x_write_event_value(struct iio_dev *indio_dev,
}
}
break;
+ case IIO_EV_INFO_PERIOD:
+ if (chan->type == IIO_INTENSITY)
+ time = chip->settings.als_time;
+ else
+ time = chip->settings.prx_time;
+
+ y = (TSL2X7X_MAX_TIMER_CNT - time) + 1;
+ z = y * TSL2X7X_MIN_ITIME;
+
+ filter_delay = DIV_ROUND_UP((val * 1000) + val2, z);
+
+ if (chan->type == IIO_INTENSITY) {
+ chip->settings.persistence &= 0xF0;
+ chip->settings.persistence |=
+ (filter_delay & 0x0F);
+ dev_info(&chip->client->dev, "%s: ALS persistence = %d",
+ __func__, filter_delay);
+ } else {
+ chip->settings.persistence &= 0x0F;
+ chip->settings.persistence |=
+ ((filter_delay << 4) & 0xF0);
+ dev_info(&chip->client->dev,
+ "%s: Proximity persistence = %d",
+ __func__, filter_delay);
+ }
+ ret = 0;
+ break;
default:
break;
}
@@ -1298,18 +1162,19 @@ static int tsl2x7x_read_event_value(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret = -EINVAL, filter_delay, mult;
+ u8 time;
switch (info) {
case IIO_EV_INFO_VALUE:
if (chan->type == IIO_INTENSITY) {
switch (dir) {
case IIO_EV_DIR_RISING:
- *val = chip->tsl2x7x_settings.als_thresh_high;
+ *val = chip->settings.als_thresh_high;
ret = IIO_VAL_INT;
break;
case IIO_EV_DIR_FALLING:
- *val = chip->tsl2x7x_settings.als_thresh_low;
+ *val = chip->settings.als_thresh_low;
ret = IIO_VAL_INT;
break;
default:
@@ -1318,11 +1183,11 @@ static int tsl2x7x_read_event_value(struct iio_dev *indio_dev,
} else {
switch (dir) {
case IIO_EV_DIR_RISING:
- *val = chip->tsl2x7x_settings.prox_thres_high;
+ *val = chip->settings.prox_thres_high;
ret = IIO_VAL_INT;
break;
case IIO_EV_DIR_FALLING:
- *val = chip->tsl2x7x_settings.prox_thres_low;
+ *val = chip->settings.prox_thres_low;
ret = IIO_VAL_INT;
break;
default:
@@ -1330,6 +1195,23 @@ static int tsl2x7x_read_event_value(struct iio_dev *indio_dev,
}
}
break;
+ case IIO_EV_INFO_PERIOD:
+ if (chan->type == IIO_INTENSITY) {
+ time = chip->settings.als_time;
+ mult = chip->settings.persistence & 0x0F;
+ } else {
+ time = chip->settings.prx_time;
+ mult = (chip->settings.persistence & 0xF0) >> 4;
+ }
+
+ /* Determine integration time */
+ *val = (TSL2X7X_MAX_TIMER_CNT - time) + 1;
+ *val2 = *val * TSL2X7X_MIN_ITIME;
+ filter_delay = *val2 * mult;
+ *val = filter_delay / 1000;
+ *val2 = filter_delay % 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
default:
break;
}
@@ -1379,18 +1261,20 @@ static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_LIGHT)
- *val =
- tsl2X7X_als_gainadj[chip->tsl2x7x_settings.als_gain];
+ *val = tsl2x7x_als_gain[chip->settings.als_gain];
else
- *val =
- tsl2X7X_prx_gainadj[chip->tsl2x7x_settings.prox_gain];
+ *val = tsl2x7x_prx_gain[chip->settings.prox_gain];
ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_CALIBBIAS:
- *val = chip->tsl2x7x_settings.als_gain_trim;
+ *val = chip->settings.als_gain_trim;
ret = IIO_VAL_INT;
break;
-
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = (TSL2X7X_MAX_TIMER_CNT - chip->settings.als_time) + 1;
+ *val2 = ((*val * TSL2X7X_MIN_ITIME) % 1000) / 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
default:
ret = -EINVAL;
}
@@ -1411,13 +1295,13 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
if (chan->type == IIO_INTENSITY) {
switch (val) {
case 1:
- chip->tsl2x7x_settings.als_gain = 0;
+ chip->settings.als_gain = 0;
break;
case 8:
- chip->tsl2x7x_settings.als_gain = 1;
+ chip->settings.als_gain = 1;
break;
case 16:
- chip->tsl2x7x_settings.als_gain = 2;
+ chip->settings.als_gain = 2;
break;
case 120:
switch (chip->id) {
@@ -1428,7 +1312,7 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
case tmd2772:
return -EINVAL;
}
- chip->tsl2x7x_settings.als_gain = 3;
+ chip->settings.als_gain = 3;
break;
case 128:
switch (chip->id) {
@@ -1439,7 +1323,7 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
case tmd2771:
return -EINVAL;
}
- chip->tsl2x7x_settings.als_gain = 3;
+ chip->settings.als_gain = 3;
break;
default:
return -EINVAL;
@@ -1447,16 +1331,16 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
} else {
switch (val) {
case 1:
- chip->tsl2x7x_settings.prox_gain = 0;
+ chip->settings.prox_gain = 0;
break;
case 2:
- chip->tsl2x7x_settings.prox_gain = 1;
+ chip->settings.prox_gain = 1;
break;
case 4:
- chip->tsl2x7x_settings.prox_gain = 2;
+ chip->settings.prox_gain = 2;
break;
case 8:
- chip->tsl2x7x_settings.prox_gain = 3;
+ chip->settings.prox_gain = 3;
break;
default:
return -EINVAL;
@@ -1464,9 +1348,15 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
}
break;
case IIO_CHAN_INFO_CALIBBIAS:
- chip->tsl2x7x_settings.als_gain_trim = val;
+ chip->settings.als_gain_trim = val;
break;
+ case IIO_CHAN_INFO_INT_TIME:
+ chip->settings.als_time =
+ TSL2X7X_MAX_TIMER_CNT - (val2 / TSL2X7X_MIN_ITIME);
+ dev_info(&chip->client->dev, "%s: als time = %d",
+ __func__, chip->settings.als_time);
+ break;
default:
return -EINVAL;
}
@@ -1474,12 +1364,8 @@ static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
return tsl2x7x_invoke_change(indio_dev);
}
-static DEVICE_ATTR_RO(in_proximity0_calibscale_available);
-
static DEVICE_ATTR_RO(in_illuminance0_calibscale_available);
-static DEVICE_ATTR_RW(in_illuminance0_integration_time);
-
static DEVICE_ATTR_RW(in_illuminance0_target_input);
static DEVICE_ATTR_WO(in_illuminance0_calibrate);
@@ -1488,10 +1374,6 @@ static DEVICE_ATTR_WO(in_proximity0_calibrate);
static DEVICE_ATTR_RW(in_illuminance0_lux_table);
-static DEVICE_ATTR_RW(in_intensity0_thresh_period);
-
-static DEVICE_ATTR_RW(in_proximity0_thresh_period);
-
/* Use the default register values to identify the Taos device */
static int tsl2x7x_device_id(int *id, int target)
{
@@ -1559,7 +1441,6 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
static struct attribute *tsl2x7x_ALS_device_attrs[] = {
&dev_attr_in_illuminance0_calibscale_available.attr,
- &dev_attr_in_illuminance0_integration_time.attr,
&iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
@@ -1574,49 +1455,31 @@ static struct attribute *tsl2x7x_PRX_device_attrs[] = {
static struct attribute *tsl2x7x_ALSPRX_device_attrs[] = {
&dev_attr_in_illuminance0_calibscale_available.attr,
- &dev_attr_in_illuminance0_integration_time.attr,
&iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
- &dev_attr_in_proximity0_calibrate.attr,
+ &iio_const_attr_in_proximity0_calibscale_available.dev_attr.attr,
NULL
};
static struct attribute *tsl2x7x_PRX2_device_attrs[] = {
&dev_attr_in_proximity0_calibrate.attr,
- &dev_attr_in_proximity0_calibscale_available.attr,
+ &iio_const_attr_in_proximity0_calibscale_available.dev_attr.attr,
NULL
};
static struct attribute *tsl2x7x_ALSPRX2_device_attrs[] = {
&dev_attr_in_illuminance0_calibscale_available.attr,
- &dev_attr_in_illuminance0_integration_time.attr,
&iio_const_attr_in_illuminance0_integration_time_available.dev_attr.attr,
&dev_attr_in_illuminance0_target_input.attr,
&dev_attr_in_illuminance0_calibrate.attr,
&dev_attr_in_illuminance0_lux_table.attr,
&dev_attr_in_proximity0_calibrate.attr,
- &dev_attr_in_proximity0_calibscale_available.attr,
+ &iio_const_attr_in_proximity0_calibscale_available.dev_attr.attr,
NULL
};
-static struct attribute *tsl2X7X_ALS_event_attrs[] = {
- &dev_attr_in_intensity0_thresh_period.attr,
- NULL,
-};
-
-static struct attribute *tsl2X7X_PRX_event_attrs[] = {
- &dev_attr_in_proximity0_thresh_period.attr,
- NULL,
-};
-
-static struct attribute *tsl2X7X_ALSPRX_event_attrs[] = {
- &dev_attr_in_intensity0_thresh_period.attr,
- &dev_attr_in_proximity0_thresh_period.attr,
- NULL,
-};
-
static const struct attribute_group tsl2X7X_device_attr_group_tbl[] = {
[ALS] = {
.attrs = tsl2x7x_ALS_device_attrs,
@@ -1635,26 +1498,9 @@ static const struct attribute_group tsl2X7X_device_attr_group_tbl[] = {
},
};
-static const struct attribute_group tsl2X7X_event_attr_group_tbl[] = {
- [ALS] = {
- .attrs = tsl2X7X_ALS_event_attrs,
- .name = "events",
- },
- [PRX] = {
- .attrs = tsl2X7X_PRX_event_attrs,
- .name = "events",
- },
- [ALSPRX] = {
- .attrs = tsl2X7X_ALSPRX_event_attrs,
- .name = "events",
- },
-};
-
static const struct iio_info tsl2X7X_device_info[] = {
[ALS] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALS],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[ALS],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1664,8 +1510,6 @@ static const struct iio_info tsl2X7X_device_info[] = {
},
[PRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1675,8 +1519,6 @@ static const struct iio_info tsl2X7X_device_info[] = {
},
[ALSPRX] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1686,8 +1528,6 @@ static const struct iio_info tsl2X7X_device_info[] = {
},
[PRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[PRX2],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[PRX],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1697,8 +1537,6 @@ static const struct iio_info tsl2X7X_device_info[] = {
},
[ALSPRX2] = {
.attrs = &tsl2X7X_device_attr_group_tbl[ALSPRX2],
- .event_attrs = &tsl2X7X_event_attr_group_tbl[ALSPRX],
- .driver_module = THIS_MODULE,
.read_raw = &tsl2x7x_read_raw,
.write_raw = &tsl2x7x_write_raw,
.read_event_value = &tsl2x7x_read_event_value,
@@ -1719,6 +1557,10 @@ static const struct iio_event_spec tsl2x7x_events[] = {
.dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD),
},
};
@@ -1729,7 +1571,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
@@ -1768,7 +1611,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED)
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
@@ -1816,7 +1660,8 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
.type = IIO_LIGHT,
.indexed = 1,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
}, {
.type = IIO_INTENSITY,
.indexed = 1,
@@ -1874,7 +1719,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
return -EINVAL;
}
- ret = i2c_smbus_write_byte(clientp, (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ ret = i2c_smbus_write_byte(clientp, TSL2X7X_CMD_REG | TSL2X7X_CNTRL);
if (ret < 0) {
dev_err(&clientp->dev, "write to cmd reg failed. err = %d\n",
ret);
@@ -1982,7 +1827,7 @@ static int tsl2x7x_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id tsl2x7x_idtable[] = {
+static const struct i2c_device_id tsl2x7x_idtable[] = {
{ "tsl2571", tsl2571 },
{ "tsl2671", tsl2671 },
{ "tmd2671", tmd2671 },
diff --git a/drivers/staging/iio/light/tsl2x7x.h b/drivers/staging/iio/light/tsl2x7x.h
index ecae92211216..df00f2ec1719 100644
--- a/drivers/staging/iio/light/tsl2x7x.h
+++ b/drivers/staging/iio/light/tsl2x7x.h
@@ -23,18 +23,19 @@
#define __TSL2X7X_H
#include <linux/pm.h>
-/* Max number of segments allowable in LUX table */
-#define TSL2X7X_MAX_LUX_TABLE_SIZE 9
-#define MAX_DEFAULT_TABLE_BYTES (sizeof(int) * TSL2X7X_MAX_LUX_TABLE_SIZE)
-
-struct iio_dev;
-
struct tsl2x7x_lux {
unsigned int ratio;
unsigned int ch0;
unsigned int ch1;
};
+/* Max number of segments allowable in LUX table */
+#define TSL2X7X_MAX_LUX_TABLE_SIZE 9
+/* The default LUX tables all have 3 elements. */
+#define TSL2X7X_DEF_LUX_TABLE_SZ 3
+#define TSL2X7X_DEFAULT_TABLE_BYTES (sizeof(struct tsl2x7x_lux) * \
+ TSL2X7X_DEF_LUX_TABLE_SZ)
+
/**
* struct tsl2x7x_default_settings - power on defaults unless
* overridden by platform data.
diff --git a/drivers/staging/iio/meter/Makefile b/drivers/staging/iio/meter/Makefile
index de3863d6b078..19e7982f5563 100644
--- a/drivers/staging/iio/meter/Makefile
+++ b/drivers/staging/iio/meter/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for metering ic drivers
#
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index ce26abdeab92..c44eb577dc35 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -80,7 +80,7 @@
* @us: actual spi_device
* @tx: transmit buffer
* @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
+ * @buf_lock: mutex to protect tx, rx and write frequency
**/
struct ade7753_state {
struct spi_device *us;
@@ -107,6 +107,19 @@ static int ade7753_spi_write_reg_8(struct device *dev,
return ret;
}
+static int __ade7753_spi_write_reg_16(struct device *dev, u8 reg_address,
+ u16 value)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ade7753_state *st = iio_priv(indio_dev);
+
+ st->tx[0] = ADE7753_WRITE_REG(reg_address);
+ st->tx[1] = (value >> 8) & 0xFF;
+ st->tx[2] = value & 0xFF;
+
+ return spi_write(st->us, st->tx, 3);
+}
+
static int ade7753_spi_write_reg_16(struct device *dev, u8 reg_address,
u16 value)
{
@@ -115,10 +128,7 @@ static int ade7753_spi_write_reg_16(struct device *dev, u8 reg_address,
struct ade7753_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
- st->tx[0] = ADE7753_WRITE_REG(reg_address);
- st->tx[1] = (value >> 8) & 0xFF;
- st->tx[2] = value & 0xFF;
- ret = spi_write(st->us, st->tx, 3);
+ ret = __ade7753_spi_write_reg_16(dev, reg_address, value);
mutex_unlock(&st->buf_lock);
return ret;
@@ -483,7 +493,7 @@ static ssize_t ade7753_write_frequency(struct device *dev,
if (!val)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->buf_lock);
t = 27900 / val;
if (t > 0)
@@ -501,10 +511,10 @@ static ssize_t ade7753_write_frequency(struct device *dev,
reg &= ~(3 << 11);
reg |= t << 11;
- ret = ade7753_spi_write_reg_16(dev, ADE7753_MODE, reg);
+ ret = __ade7753_spi_write_reg_16(dev, ADE7753_MODE, reg);
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->buf_lock);
return ret ? ret : len;
}
@@ -561,7 +571,6 @@ static const struct attribute_group ade7753_attribute_group = {
static const struct iio_info ade7753_info = {
.attrs = &ade7753_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ade7753_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index be0df3fe4230..3a1e342d75fb 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -601,7 +601,6 @@ static const struct attribute_group ade7754_attribute_group = {
static const struct iio_info ade7754_info = {
.attrs = &ade7754_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ade7754_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 40498af4dc46..7b7ffe5ed186 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -827,7 +827,6 @@ static const struct iio_info ade7758_info = {
.attrs = &ade7758_attribute_group,
.read_raw = &ade7758_read_raw,
.write_raw = &ade7758_write_raw,
- .driver_module = THIS_MODULE,
};
static int ade7758_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/meter/ade7758_trigger.c b/drivers/staging/iio/meter/ade7758_trigger.c
index 5b35a7f08f4f..1f0d1a0cf889 100644
--- a/drivers/staging/iio/meter/ade7758_trigger.c
+++ b/drivers/staging/iio/meter/ade7758_trigger.c
@@ -53,7 +53,6 @@ static int ade7758_trig_try_reen(struct iio_trigger *trig)
}
static const struct iio_trigger_ops ade7758_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = &ade7758_data_rdy_trigger_set_state,
.try_reenable = &ade7758_trig_try_reen,
};
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 02573c517d9d..d99cf508d8d0 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -60,7 +60,7 @@
/**
* struct ade7759_state - device instance specific data
* @us: actual spi_device
- * @buf_lock: mutex to protect tx and rx
+ * @buf_lock: mutex to protect tx and rx and write frequency
* @tx: transmit buffer
* @rx: receive buffer
**/
@@ -89,19 +89,30 @@ static int ade7759_spi_write_reg_8(struct device *dev,
return ret;
}
-static int ade7759_spi_write_reg_16(struct device *dev,
+/*Unlocked version of ade7759_spi_write_reg_16 function */
+static int __ade7759_spi_write_reg_16(struct device *dev,
u8 reg_address,
u16 value)
{
- int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7759_state *st = iio_priv(indio_dev);
- mutex_lock(&st->buf_lock);
st->tx[0] = ADE7759_WRITE_REG(reg_address);
st->tx[1] = (value >> 8) & 0xFF;
st->tx[2] = value & 0xFF;
- ret = spi_write(st->us, st->tx, 3);
+ return spi_write(st->us, st->tx, 3);
+}
+
+static int ade7759_spi_write_reg_16(struct device *dev,
+ u8 reg_address,
+ u16 value)
+{
+ int ret;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ade7759_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&st->buf_lock);
+ ret = __ade7759_spi_write_reg_16(dev, reg_address, value);
mutex_unlock(&st->buf_lock);
return ret;
@@ -429,7 +440,7 @@ static ssize_t ade7759_write_frequency(struct device *dev,
if (!val)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
+ mutex_lock(&st->buf_lock);
t = 27900 / val;
if (t > 0)
@@ -447,10 +458,10 @@ static ssize_t ade7759_write_frequency(struct device *dev,
reg &= ~(3 << 13);
reg |= t << 13;
- ret = ade7759_spi_write_reg_16(dev, ADE7759_MODE, reg);
+ ret = __ade7759_spi_write_reg_16(dev, ADE7759_MODE, reg);
out:
- mutex_unlock(&indio_dev->mlock);
+ mutex_unlock(&st->buf_lock);
return ret ? ret : len;
}
@@ -493,7 +504,6 @@ static const struct attribute_group ade7759_attribute_group = {
static const struct iio_info ade7759_info = {
.attrs = &ade7759_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ade7759_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 70612da64a8b..90d07cdca4b8 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -530,7 +530,6 @@ static const struct attribute_group ade7854_attribute_group = {
static const struct iio_info ade7854_info = {
.attrs = &ade7854_attribute_group,
- .driver_module = THIS_MODULE,
};
int ade7854_probe(struct iio_dev *indio_dev, struct device *dev)
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
index dbd97def9cd8..c27247a7891a 100644
--- a/drivers/staging/iio/meter/ade7854.h
+++ b/drivers/staging/iio/meter/ade7854.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ADE7854_H
#define _ADE7854_H
diff --git a/drivers/staging/iio/meter/meter.h b/drivers/staging/iio/meter/meter.h
index 0e37f23853f1..edf26302fa57 100644
--- a/drivers/staging/iio/meter/meter.h
+++ b/drivers/staging/iio/meter/meter.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _METER_H
#define _METER_H
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index a37e199225f4..aa62c64e9bc4 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -98,7 +98,6 @@ static const struct iio_chan_spec ad2s1200_channels[] = {
static const struct iio_info ad2s1200_info = {
.read_raw = ad2s1200_read_raw,
- .driver_module = THIS_MODULE,
};
static int ad2s1200_probe(struct spi_device *spi)
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 3e00df74b18c..f8baab061eba 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -638,7 +638,6 @@ error_ret:
static const struct iio_info ad2s1210_info = {
.read_raw = ad2s1210_read_raw,
.attrs = &ad2s1210_attribute_group,
- .driver_module = THIS_MODULE,
};
static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index b2270908f26f..59586947a936 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -48,7 +48,6 @@ error_ret:
static const struct iio_info ad2s90_info = {
.read_raw = ad2s90_read_raw,
- .driver_module = THIS_MODULE,
};
static const struct iio_chan_spec ad2s90_chan = {
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 4e0b4eedb53d..d80dcf82eba9 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -172,7 +172,6 @@ static int iio_bfin_tmr_get_number(int irq)
}
static const struct iio_trigger_ops iio_bfin_tmr_trigger_ops = {
- .owner = THIS_MODULE,
.set_trigger_state = iio_bfin_tmr_set_state,
};
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.h b/drivers/staging/iio/trigger/iio-trig-bfin-timer.h
index c07321f8d94c..fb05a2a8397c 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.h
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IIO_BFIN_TIMER_TRIGGER_H__
#define __IIO_BFIN_TIMER_TRIGGER_H__
diff --git a/drivers/staging/irda/drivers/ali-ircc.c b/drivers/staging/irda/drivers/ali-ircc.c
index 35f198d83701..589cd01797f4 100644
--- a/drivers/staging/irda/drivers/ali-ircc.c
+++ b/drivers/staging/irda/drivers/ali-ircc.c
@@ -1876,8 +1876,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
self->stamp = ktime_get();
skb = dev_alloc_skb(len+1);
- if (skb == NULL)
- {
+ if (!skb) {
self->netdev->stats.rx_dropped++;
return FALSE;
diff --git a/drivers/staging/irda/drivers/au1k_ir.c b/drivers/staging/irda/drivers/au1k_ir.c
index be4ea6aa57a9..73e3e4b041bf 100644
--- a/drivers/staging/irda/drivers/au1k_ir.c
+++ b/drivers/staging/irda/drivers/au1k_ir.c
@@ -290,8 +290,7 @@ static int au1k_irda_set_speed(struct net_device *dev, int speed)
while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
msleep(20);
if (!timeout--) {
- printk(KERN_ERR "%s: rx/tx disable timeout\n",
- dev->name);
+ netdev_err(dev, "rx/tx disable timeout\n");
break;
}
}
@@ -349,7 +348,7 @@ static int au1k_irda_set_speed(struct net_device *dev, int speed)
IR_RX_ENABLE);
break;
default:
- printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
+ netdev_err(dev, "unsupported speed %x\n", speed);
ret = -EINVAL;
break;
}
@@ -361,18 +360,18 @@ static int au1k_irda_set_speed(struct net_device *dev, int speed)
irda_write(aup, IR_RING_PROMPT, 0);
if (control & (1 << 14)) {
- printk(KERN_ERR "%s: configuration error\n", dev->name);
+ netdev_err(dev, "configuration error\n");
} else {
if (control & (1 << 11))
- printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
+ netdev_debug(dev, "Valid SIR config\n");
if (control & (1 << 12))
- printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
+ netdev_debug(dev, "Valid MIR config\n");
if (control & (1 << 13))
- printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
+ netdev_debug(dev, "Valid FIR config\n");
if (control & (1 << 10))
- printk(KERN_DEBUG "%s TX enabled\n", dev->name);
+ netdev_debug(dev, "TX enabled\n");
if (control & (1 << 9))
- printk(KERN_DEBUG "%s RX enabled\n", dev->name);
+ netdev_debug(dev, "RX enabled\n");
}
return ret;
@@ -584,23 +583,21 @@ static int au1k_irda_start(struct net_device *dev)
retval = au1k_init(dev);
if (retval) {
- printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
+ netdev_err(dev, "error in au1k_init\n");
return retval;
}
retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
dev->name, dev);
if (retval) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
- dev->name, dev->irq);
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
return retval;
}
retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
dev->name, dev);
if (retval) {
free_irq(aup->irq_tx, dev);
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
- dev->name, dev->irq);
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
return retval;
}
@@ -673,12 +670,12 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
flags = ptxd->flags;
if (flags & AU_OWN) {
- printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netdev_debug(dev, "tx_full\n");
netif_stop_queue(dev);
aup->tx_full = 1;
return 1;
} else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
- printk(KERN_DEBUG "%s: tx_full\n", dev->name);
+ netdev_debug(dev, "tx_full\n");
netif_stop_queue(dev);
aup->tx_full = 1;
return 1;
@@ -688,7 +685,7 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
#if 0
if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
- printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
+ netdev_debug(dev, "tx warning: rx byte cnt %x\n",
irda_read(aup, IR_RX_BYTE_CNT));
}
#endif
@@ -726,7 +723,7 @@ static void au1k_tx_timeout(struct net_device *dev)
u32 speed;
struct au1k_private *aup = netdev_priv(dev);
- printk(KERN_ERR "%s: tx timeout\n", dev->name);
+ netdev_err(dev, "tx timeout\n");
speed = aup->speed;
aup->speed = 0;
au1k_irda_set_speed(dev, speed);
@@ -751,8 +748,7 @@ static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
ret = au1k_irda_set_speed(dev,
rq->ifr_baudrate);
else {
- printk(KERN_ERR "%s ioctl: !netif_running\n",
- dev->name);
+ netdev_err(dev, "ioctl: !netif_running\n");
ret = 0;
}
}
@@ -868,7 +864,7 @@ out3:
out2:
kfree(aup->rx_buff.head);
out1:
- printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
+ netdev_err(dev, "au1k_irda_net_init() failed. Returns %d\n");
return retval;
}
@@ -934,7 +930,7 @@ static int au1k_irda_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
+ netdev_info(dev, "IrDA: Registered device\n");
return 0;
out4:
diff --git a/drivers/staging/irda/drivers/bfin_sir.c b/drivers/staging/irda/drivers/bfin_sir.c
index 3151b580dbd6..59e409b68349 100644
--- a/drivers/staging/irda/drivers/bfin_sir.c
+++ b/drivers/staging/irda/drivers/bfin_sir.c
@@ -22,6 +22,8 @@ static int max_rate = 57600;
static int max_rate = 115200;
#endif
+static void bfin_sir_rx_dma_timeout(struct timer_list *t);
+
static void turnaround_delay(int mtt)
{
long ticks;
@@ -57,7 +59,7 @@ static void bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device
sp->clk = get_sclk();
#ifdef CONFIG_SIR_BFIN_DMA
sp->tx_done = 1;
- init_timer(&(sp->rx_dma_timer));
+ timer_setup(&sp->rx_dma_timer, bfin_sir_rx_dma_timeout, 0);
#endif
}
@@ -317,10 +319,12 @@ static void bfin_sir_dma_rx_chars(struct net_device *dev)
async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]);
}
-void bfin_sir_rx_dma_timeout(struct net_device *dev)
+static void bfin_sir_rx_dma_timeout(struct timer_list *t)
{
+ struct bfin_sir_port *port = from_timer(port, t, rx_dma_timer);
+ struct net_device *dev = port->dev;
struct bfin_sir_self *self = netdev_priv(dev);
- struct bfin_sir_port *port = self->sir_port;
+
int x_pos, pos;
unsigned long flags;
@@ -405,8 +409,6 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
set_dma_start_addr(port->rx_dma_channel, (unsigned long)port->rx_dma_buf.buf);
enable_dma(port->rx_dma_channel);
- port->rx_dma_timer.data = (unsigned long)(dev);
- port->rx_dma_timer.function = (void *)bfin_sir_rx_dma_timeout;
#else
diff --git a/drivers/staging/irda/drivers/esi-sir.c b/drivers/staging/irda/drivers/esi-sir.c
index 019a3e848bcb..eb7aa6430bea 100644
--- a/drivers/staging/irda/drivers/esi-sir.c
+++ b/drivers/staging/irda/drivers/esi-sir.c
@@ -1,5 +1,5 @@
/*********************************************************************
- *
+ *
* Filename: esi.c
* Version: 1.6
* Description: Driver for the Extended Systems JetEye PC dongle
@@ -8,25 +8,25 @@
* Created at: Sat Feb 21 18:54:38 1998
* Modified at: Sun Oct 27 22:01:04 2002
* Modified by: Martin Diehl <mad@mdiehl.de>
- *
+ *
* Copyright (c) 1999 Dag Brattli, <dagb@cs.uit.no>,
* Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
* Copyright (c) 2002 Martin Diehl, <mad@mdiehl.de>,
* All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
+ *
+ * You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
+ *
********************************************************************/
#include <linux/module.h>
@@ -97,7 +97,7 @@ static int esi_change_speed(struct sir_dev *dev, unsigned speed)
{
int ret = 0;
int dtr, rts;
-
+
switch (speed) {
case 19200:
dtr = TRUE;
diff --git a/drivers/staging/irda/drivers/irda-usb.c b/drivers/staging/irda/drivers/irda-usb.c
index 723e49bc4baa..bda6bdc6c70b 100644
--- a/drivers/staging/irda/drivers/irda-usb.c
+++ b/drivers/staging/irda/drivers/irda-usb.c
@@ -117,7 +117,7 @@ static void irda_usb_close(struct irda_usb_cb *self);
static void speed_bulk_callback(struct urb *urb);
static void write_bulk_callback(struct urb *urb);
static void irda_usb_receive(struct urb *urb);
-static void irda_usb_rx_defer_expired(unsigned long data);
+static void irda_usb_rx_defer_expired(struct timer_list *t);
static int irda_usb_net_open(struct net_device *dev);
static int irda_usb_net_close(struct net_device *dev);
static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -334,9 +334,9 @@ static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
urb->transfer_flags = 0;
/* Irq disabled -> GFP_ATOMIC */
- if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
net_warn_ratelimited("%s(), failed Speed URB\n", __func__);
- }
}
/*------------------------------------------------------------------*/
@@ -846,8 +846,7 @@ static void irda_usb_receive(struct urb *urb)
* hot unplug of the dongle...
* Lowest effective timer is 10ms...
* Jean II */
- self->rx_defer_timer.function = irda_usb_rx_defer_expired;
- self->rx_defer_timer.data = (unsigned long) urb;
+ self->rx_defer_timer_urb = urb;
mod_timer(&self->rx_defer_timer,
jiffies + msecs_to_jiffies(10));
@@ -953,20 +952,13 @@ done:
* In case of errors, we want the USB layer to have time to recover.
* Now, it is time to resubmit ouur Rx URB...
*/
-static void irda_usb_rx_defer_expired(unsigned long data)
+static void irda_usb_rx_defer_expired(struct timer_list *t)
{
- struct urb *urb = (struct urb *) data;
+ struct irda_usb_cb *self = from_timer(self, t, rx_defer_timer);
+ struct urb *urb = self->rx_defer_timer_urb;
struct sk_buff *skb = (struct sk_buff *) urb->context;
- struct irda_usb_cb *self;
- struct irda_skb_cb *cb;
struct urb *next_urb;
- /* Find ourselves */
- cb = (struct irda_skb_cb *) skb->cb;
- IRDA_ASSERT(cb != NULL, return;);
- self = (struct irda_usb_cb *) cb->context;
- IRDA_ASSERT(self != NULL, return;);
-
/* Same stuff as when Rx is done, see above... */
next_urb = self->idle_rx_urb;
urb->context = NULL;
@@ -1622,7 +1614,7 @@ static int irda_usb_probe(struct usb_interface *intf,
self = netdev_priv(net);
self->netdev = net;
spin_lock_init(&self->lock);
- init_timer(&self->rx_defer_timer);
+ timer_setup(&self->rx_defer_timer, irda_usb_rx_defer_expired, 0);
self->capability = id->driver_info;
self->needspatch = ((self->capability & IUC_STIR421X) != 0);
diff --git a/drivers/staging/irda/drivers/irda-usb.h b/drivers/staging/irda/drivers/irda-usb.h
index 8ac389fa9348..56ee8c16c5e2 100644
--- a/drivers/staging/irda/drivers/irda-usb.h
+++ b/drivers/staging/irda/drivers/irda-usb.h
@@ -170,5 +170,6 @@ struct irda_usb_cb {
int needspatch; /* device needs firmware patch */
struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
+ struct urb *rx_defer_timer_urb; /* URB attached to rx_defer_timer */
};
diff --git a/drivers/staging/irda/drivers/mcs7780.c b/drivers/staging/irda/drivers/mcs7780.c
index c3f0b254b344..d52e9f4b9770 100644
--- a/drivers/staging/irda/drivers/mcs7780.c
+++ b/drivers/staging/irda/drivers/mcs7780.c
@@ -605,19 +605,22 @@ static int mcs_speed_change(struct mcs_cb *mcs)
if (mcs->new_speed <= 115200) {
rval &= ~MCS_FIR;
- if ((rst = (mcs->speed > 115200)))
+ rst = mcs->speed > 115200;
+ if (rst)
mcs_set_reg(mcs, MCS_MINRXPW_REG, 0);
} else if (mcs->new_speed <= 1152000) {
rval &= ~MCS_FIR;
- if ((rst = !(mcs->speed == 576000 || mcs->speed == 1152000)))
+ rst = !(mcs->speed == 576000 || mcs->speed == 1152000);
+ if (rst)
mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
} else {
rval |= MCS_FIR;
- if ((rst = (mcs->speed != 4000000)))
+ rst = mcs->speed != 4000000;
+ if (rst)
mcs_set_reg(mcs, MCS_MINRXPW_REG, 5);
}
diff --git a/drivers/staging/irda/drivers/vlsi_ir.c b/drivers/staging/irda/drivers/vlsi_ir.c
index 6638784c082e..3dff3c55ddf5 100644
--- a/drivers/staging/irda/drivers/vlsi_ir.c
+++ b/drivers/staging/irda/drivers/vlsi_ir.c
@@ -170,10 +170,10 @@ static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask);
- seq_printf(seq, "hw registers: ");
+ seq_puts(seq, "hw registers: ");
for (i = 0; i < 0x20; i++)
seq_printf(seq, "%02x", (unsigned)inb((iobase+i)));
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
}
static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
@@ -193,7 +193,7 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
if (!netif_running(ndev))
return;
- seq_printf(seq, "\nhw-state:\n");
+ seq_puts(seq, "\nhw-state:\n");
pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
seq_printf(seq, "IRMISC:%s%s%s uart%s",
(byte&IRMISC_IRRAIL) ? " irrail" : "",
@@ -274,7 +274,7 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word);
- seq_printf(seq, "\nsw-state:\n");
+ seq_puts(seq, "\nsw-state:\n");
seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
(idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx),
@@ -305,10 +305,10 @@ static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
t = atomic_read(&r->tail) & r->mask;
seq_printf(seq, "head = %d / tail = %d ", h, t);
if (h == t)
- seq_printf(seq, "(empty)\n");
+ seq_puts(seq, "(empty)\n");
else {
if (((t+1)&r->mask) == h)
- seq_printf(seq, "(full)\n");
+ seq_puts(seq, "(full)\n");
else
seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask));
rd = &r->rd[h];
@@ -355,13 +355,13 @@ static int vlsi_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\nPCI controller down - resume_ok = %d\n",
idev->resume_ok);
if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
- seq_printf(seq, "\n--------- RX ring -----------\n\n");
+ seq_puts(seq, "\n--------- RX ring -----------\n\n");
vlsi_proc_ring(seq, idev->rx_ring);
- seq_printf(seq, "\n--------- TX ring -----------\n\n");
+ seq_puts(seq, "\n--------- TX ring -----------\n\n");
vlsi_proc_ring(seq, idev->tx_ring);
}
}
- seq_printf(seq, "\n");
+ seq_putc(seq, '\n');
spin_unlock_irqrestore(&idev->lock, flags);
return 0;
diff --git a/drivers/staging/irda/include/net/irda/irlmp_event.h b/drivers/staging/irda/include/net/irda/irlmp_event.h
index 9e4ec17a7449..a1a082fe384e 100644
--- a/drivers/staging/irda/include/net/irda/irlmp_event.h
+++ b/drivers/staging/irda/include/net/irda/irlmp_event.h
@@ -82,9 +82,9 @@ typedef enum {
extern const char *const irlmp_state[];
extern const char *const irlsap_state[];
-void irlmp_watchdog_timer_expired(void *data);
-void irlmp_discovery_timer_expired(void *data);
-void irlmp_idle_timer_expired(void *data);
+void irlmp_watchdog_timer_expired(struct timer_list *t);
+void irlmp_discovery_timer_expired(struct timer_list *t);
+void irlmp_idle_timer_expired(struct timer_list *t);
void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb);
diff --git a/drivers/staging/irda/include/net/irda/qos.h b/drivers/staging/irda/include/net/irda/qos.h
index 05a5a249956f..a0315b50ac27 100644
--- a/drivers/staging/irda/include/net/irda/qos.h
+++ b/drivers/staging/irda/include/net/irda/qos.h
@@ -58,23 +58,23 @@
#define IR_16000000 0x02
/* Quality of Service information */
-typedef struct {
+struct qos_value {
__u32 value;
__u16 bits; /* LSB is first byte, MSB is second byte */
-} qos_value_t;
+};
struct qos_info {
magic_t magic;
- qos_value_t baud_rate; /* IR_11520O | ... */
- qos_value_t max_turn_time;
- qos_value_t data_size;
- qos_value_t window_size;
- qos_value_t additional_bofs;
- qos_value_t min_turn_time;
- qos_value_t link_disc_time;
+ struct qos_value baud_rate; /* IR_11520O | ... */
+ struct qos_value max_turn_time;
+ struct qos_value data_size;
+ struct qos_value window_size;
+ struct qos_value additional_bofs;
+ struct qos_value min_turn_time;
+ struct qos_value link_disc_time;
- qos_value_t power;
+ struct qos_value power;
};
extern int sysctl_max_baud_rate;
diff --git a/drivers/staging/irda/include/net/irda/timer.h b/drivers/staging/irda/include/net/irda/timer.h
index d784f242cf7b..6dab15f5dae1 100644
--- a/drivers/staging/irda/include/net/irda/timer.h
+++ b/drivers/staging/irda/include/net/irda/timer.h
@@ -72,14 +72,11 @@ struct lap_cb;
#define WATCHDOG_TIMEOUT (20*HZ) /* 20 sec */
-typedef void (*TIMER_CALLBACK)(void *);
-
-static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
- void* data, TIMER_CALLBACK callback)
+static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
+ void (*callback)(struct timer_list *))
{
- ptimer->function = (void (*)(unsigned long)) callback;
- ptimer->data = (unsigned long) data;
-
+ ptimer->function = callback;
+
/* Set new value for timer (update or add timer).
* We use mod_timer() because it's more efficient and also
* safer with respect to race conditions - Jean II */
diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c
index 23fa7c8b09a5..b82a47b9ef0b 100644
--- a/drivers/staging/irda/net/af_irda.c
+++ b/drivers/staging/irda/net/af_irda.c
@@ -429,11 +429,11 @@ static void irda_selective_discovery_indication(discinfo_t *discovery,
* We were waiting for a node to be discovered, but nothing has come up
* so far. Wake up the user and tell him that we failed...
*/
-static void irda_discovery_timeout(u_long priv)
+static void irda_discovery_timeout(struct timer_list *t)
{
struct irda_sock *self;
- self = (struct irda_sock *) priv;
+ self = from_timer(self, t, watchdog);
BUG_ON(self == NULL);
/* Nothing for the caller */
@@ -2505,8 +2505,7 @@ bed:
/* Set watchdog timer to expire in <val> ms. */
self->errno = 0;
- setup_timer(&self->watchdog, irda_discovery_timeout,
- (unsigned long)self);
+ timer_setup(&self->watchdog, irda_discovery_timeout, 0);
mod_timer(&self->watchdog,
jiffies + msecs_to_jiffies(val));
diff --git a/drivers/staging/irda/net/discovery.c b/drivers/staging/irda/net/discovery.c
index 364d70aed068..1e54954a4081 100644
--- a/drivers/staging/irda/net/discovery.c
+++ b/drivers/staging/irda/net/discovery.c
@@ -179,7 +179,7 @@ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force)
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
- if (buffer == NULL) {
+ if (!buffer) {
spin_unlock_irqrestore(&log->hb_spinlock, flags);
return;
}
@@ -291,7 +291,7 @@ struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
- if (buffer == NULL) {
+ if (!buffer) {
spin_unlock_irqrestore(&log->hb_spinlock, flags);
return NULL;
}
diff --git a/drivers/staging/irda/net/ircomm/ircomm_tty.c b/drivers/staging/irda/net/ircomm/ircomm_tty.c
index ec157c3419b5..473abfaffe7b 100644
--- a/drivers/staging/irda/net/ircomm/ircomm_tty.c
+++ b/drivers/staging/irda/net/ircomm/ircomm_tty.c
@@ -395,7 +395,7 @@ static int ircomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
/* Init some important stuff */
- init_timer(&self->watchdog_timer);
+ timer_setup(&self->watchdog_timer, NULL, 0);
spin_lock_init(&self->spinlock);
/*
diff --git a/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c b/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c
index 0a411019c098..e2d5ce8ba0db 100644
--- a/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c
+++ b/drivers/staging/irda/net/ircomm/ircomm_tty_attach.c
@@ -52,7 +52,7 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id,
struct ias_value *value, void *priv);
static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
int timeout);
-static void ircomm_tty_watchdog_timer_expired(void *data);
+static void ircomm_tty_watchdog_timer_expired(struct timer_list *timer);
static int ircomm_tty_state_idle(struct ircomm_tty_cb *self,
IRCOMM_TTY_EVENT event,
@@ -587,7 +587,7 @@ static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
- irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
+ irda_start_timer(&self->watchdog_timer, timeout,
ircomm_tty_watchdog_timer_expired);
}
@@ -597,9 +597,9 @@ static void ircomm_tty_start_watchdog_timer(struct ircomm_tty_cb *self,
* Called when the connect procedure have taken to much time.
*
*/
-static void ircomm_tty_watchdog_timer_expired(void *data)
+static void ircomm_tty_watchdog_timer_expired(struct timer_list *t)
{
- struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data;
+ struct ircomm_tty_cb *self = from_timer(self, t, watchdog_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
diff --git a/drivers/staging/irda/net/irda_device.c b/drivers/staging/irda/net/irda_device.c
index 890b90d055d5..682b4eea15e0 100644
--- a/drivers/staging/irda/net/irda_device.c
+++ b/drivers/staging/irda/net/irda_device.c
@@ -54,29 +54,30 @@
static void __irda_task_delete(struct irda_task *task);
-static hashbin_t *dongles = NULL;
-static hashbin_t *tasks = NULL;
+static hashbin_t *dongles;
+static hashbin_t *tasks;
-static void irda_task_timer_expired(void *data);
+static void irda_task_timer_expired(struct timer_list *timer);
-int __init irda_device_init( void)
+int __init irda_device_init(void)
{
dongles = hashbin_new(HB_NOLOCK);
- if (dongles == NULL) {
+ if (!dongles) {
net_warn_ratelimited("IrDA: Can't allocate dongles hashbin!\n");
return -ENOMEM;
}
spin_lock_init(&dongles->hb_spinlock);
tasks = hashbin_new(HB_LOCK);
- if (tasks == NULL) {
+ if (!tasks) {
net_warn_ratelimited("IrDA: Can't allocate tasks hashbin!\n");
hashbin_delete(dongles, NULL);
return -ENOMEM;
}
/* We no longer initialise the driver ourselves here, we let
- * the system do it for us... - Jean II */
+ * the system do it for us... - Jean II
+ */
return 0;
}
@@ -84,6 +85,7 @@ int __init irda_device_init( void)
static void leftover_dongle(void *arg)
{
struct dongle_reg *reg = arg;
+
net_warn_ratelimited("IrDA: Dongle type %x not unregistered\n",
reg->type);
}
@@ -107,7 +109,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status)
pr_debug("%s(%s)\n", __func__, status ? "TRUE" : "FALSE");
- self = (struct irlap_cb *) dev->atalk_ptr;
+ self = (struct irlap_cb *)dev->atalk_ptr;
/* Some drivers may enable the receive interrupt before calling
* irlap_open(), or they may disable the receive interrupt
@@ -115,7 +117,8 @@ void irda_device_set_media_busy(struct net_device *dev, int status)
* The IrDA stack is protected from this in irlap_driver_rcv().
* However, the driver calls directly the wrapper, that calls
* us directly. Make sure we protect ourselves.
- * Jean II */
+ * Jean II
+ */
if (!self || self->magic != LAP_MAGIC)
return;
@@ -133,7 +136,6 @@ void irda_device_set_media_busy(struct net_device *dev, int status)
}
EXPORT_SYMBOL(irda_device_set_media_busy);
-
/*
* Function irda_device_is_receiving (dev)
*
@@ -169,7 +171,7 @@ static void __irda_task_delete(struct irda_task *task)
static void irda_task_delete(struct irda_task *task)
{
/* Unregister task */
- hashbin_remove(tasks, (long) task, NULL);
+ hashbin_remove(tasks, (long)task, NULL);
__irda_task_delete(task);
}
@@ -231,7 +233,7 @@ static int irda_task_kick(struct irda_task *task)
}
irda_task_delete(task);
} else if (timeout > 0) {
- irda_start_timer(&task->timer, timeout, (void *) task,
+ irda_start_timer(&task->timer, timeout,
irda_task_timer_expired);
finished = FALSE;
} else {
@@ -249,11 +251,9 @@ static int irda_task_kick(struct irda_task *task)
* Task time has expired. We now try to execute task (again), and restart
* the timer if the task has not finished yet
*/
-static void irda_task_timer_expired(void *data)
+static void irda_task_timer_expired(struct timer_list *t)
{
- struct irda_task *task;
-
- task = data;
+ struct irda_task *task = from_timer(task, t, timer);
irda_task_kick(task);
}
@@ -280,8 +280,8 @@ static void irda_device_setup(struct net_device *dev)
/*
* Funciton alloc_irdadev
- * Allocates and sets up an IRDA device in a manner similar to
- * alloc_etherdev.
+ * Allocates and sets up an IRDA device in a manner similar to
+ * alloc_etherdev.
*/
struct net_device *alloc_irdadev(int sizeof_priv)
{
diff --git a/drivers/staging/irda/net/iriap.c b/drivers/staging/irda/net/iriap.c
index 1138eaf5c682..d64192e9db8b 100644
--- a/drivers/staging/irda/net/iriap.c
+++ b/drivers/staging/irda/net/iriap.c
@@ -76,12 +76,12 @@ static void iriap_connect_confirm(void *instance, void *sap,
static int iriap_data_indication(void *instance, void *sap,
struct sk_buff *skb);
-static void iriap_watchdog_timer_expired(void *data);
+static void iriap_watchdog_timer_expired(struct timer_list *t);
static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
int timeout)
{
- irda_start_timer(&self->watchdog_timer, timeout, self,
+ irda_start_timer(&self->watchdog_timer, timeout,
iriap_watchdog_timer_expired);
}
@@ -199,7 +199,7 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
* we connect, so this must have a sane value... Jean II */
self->max_header_size = LMP_MAX_HEADER;
- init_timer(&self->watchdog_timer);
+ timer_setup(&self->watchdog_timer, NULL, 0);
hashbin_insert(iriap, (irda_queue_t *) self, (long) self, NULL);
@@ -946,9 +946,9 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb)
* Query has taken too long time, so abort
*
*/
-static void iriap_watchdog_timer_expired(void *data)
+static void iriap_watchdog_timer_expired(struct timer_list *t)
{
- struct iriap_cb *self = (struct iriap_cb *) data;
+ struct iriap_cb *self = from_timer(self, t, watchdog_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
diff --git a/drivers/staging/irda/net/irlan/irlan_client.c b/drivers/staging/irda/net/irlan/irlan_client.c
index c5837a40c78e..0b65e80849ae 100644
--- a/drivers/staging/irda/net/irlan/irlan_client.c
+++ b/drivers/staging/irda/net/irlan/irlan_client.c
@@ -68,9 +68,9 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param,
char *value, int val_len);
static void irlan_client_open_ctrl_tsap(struct irlan_cb *self);
-static void irlan_client_kick_timer_expired(void *data)
+static void irlan_client_kick_timer_expired(struct timer_list *t)
{
- struct irlan_cb *self = (struct irlan_cb *) data;
+ struct irlan_cb *self = from_timer(self, t, client.kick_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -89,7 +89,7 @@ static void irlan_client_kick_timer_expired(void *data)
static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout)
{
- irda_start_timer(&self->client.kick_timer, timeout, (void *) self,
+ irda_start_timer(&self->client.kick_timer, timeout,
irlan_client_kick_timer_expired);
}
diff --git a/drivers/staging/irda/net/irlan/irlan_common.c b/drivers/staging/irda/net/irlan/irlan_common.c
index 481bbc2a4349..fdcd7147007d 100644
--- a/drivers/staging/irda/net/irlan/irlan_common.c
+++ b/drivers/staging/irda/net/irlan/irlan_common.c
@@ -228,8 +228,8 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr)
self->media = MEDIA_802_3;
self->disconnect_reason = LM_USER_REQUEST;
- init_timer(&self->watchdog_timer);
- init_timer(&self->client.kick_timer);
+ timer_setup(&self->watchdog_timer, NULL, 0);
+ timer_setup(&self->client.kick_timer, NULL, 0);
init_waitqueue_head(&self->open_wait);
skb_queue_head_init(&self->client.txq);
diff --git a/drivers/staging/irda/net/irlap.c b/drivers/staging/irda/net/irlap.c
index 1cde711bcab5..d7d894423b4f 100644
--- a/drivers/staging/irda/net/irlap.c
+++ b/drivers/staging/irda/net/irlap.c
@@ -148,14 +148,14 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
/* Copy to the driver */
memcpy(dev->dev_addr, &self->saddr, 4);
- init_timer(&self->slot_timer);
- init_timer(&self->query_timer);
- init_timer(&self->discovery_timer);
- init_timer(&self->final_timer);
- init_timer(&self->poll_timer);
- init_timer(&self->wd_timer);
- init_timer(&self->backoff_timer);
- init_timer(&self->media_busy_timer);
+ timer_setup(&self->slot_timer, NULL, 0);
+ timer_setup(&self->query_timer, NULL, 0);
+ timer_setup(&self->discovery_timer, NULL, 0);
+ timer_setup(&self->final_timer, NULL, 0);
+ timer_setup(&self->poll_timer, NULL, 0);
+ timer_setup(&self->wd_timer, NULL, 0);
+ timer_setup(&self->backoff_timer, NULL, 0);
+ timer_setup(&self->media_busy_timer, NULL, 0);
irlap_apply_default_connection_parameters(self);
diff --git a/drivers/staging/irda/net/irlap_event.c b/drivers/staging/irda/net/irlap_event.c
index 0e1b4d79f745..634188b07e0a 100644
--- a/drivers/staging/irda/net/irlap_event.c
+++ b/drivers/staging/irda/net/irlap_event.c
@@ -163,9 +163,9 @@ static int (*state[])(struct irlap_cb *self, IRLAP_EVENT event,
* Poll timer has expired. Normally we must now send a RR frame to the
* remote device
*/
-static void irlap_poll_timer_expired(void *data)
+static void irlap_poll_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, poll_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -222,7 +222,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout)
if (timeout == 0)
irlap_do_event(self, POLL_TIMER_EXPIRED, NULL, NULL);
else
- irda_start_timer(&self->poll_timer, timeout, self,
+ irda_start_timer(&self->poll_timer, timeout,
irlap_poll_timer_expired);
}
diff --git a/drivers/staging/irda/net/irlmp.c b/drivers/staging/irda/net/irlmp.c
index 43964594aa12..34355061ab0b 100644
--- a/drivers/staging/irda/net/irlmp.c
+++ b/drivers/staging/irda/net/irlmp.c
@@ -109,7 +109,7 @@ int __init irlmp_init(void)
irlmp->last_lsap_sel = 0x0f; /* Reserved 0x00-0x0f */
strcpy(sysctl_devname, "Linux");
- init_timer(&irlmp->discovery_timer);
+ timer_setup(&irlmp->discovery_timer, NULL, 0);
/* Do discovery every 3 seconds, conditionally */
if (sysctl_discovery)
@@ -185,7 +185,7 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
self->dlsap_sel = LSAP_ANY;
/* self->connected = FALSE; -> already NULL via memset() */
- init_timer(&self->watchdog_timer);
+ timer_setup(&self->watchdog_timer, NULL, 0);
self->notify = *notify;
@@ -311,7 +311,7 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
lap->lap_state = LAP_STANDBY;
- init_timer(&lap->idle_timer);
+ timer_setup(&lap->idle_timer, NULL, 0);
/*
* Insert into queue of LMP links
@@ -655,7 +655,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
/* Not everything is the same */
new->notify.instance = instance;
- init_timer(&new->watchdog_timer);
+ timer_setup(&new->watchdog_timer, NULL, 0);
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) new,
(long) new, NULL);
diff --git a/drivers/staging/irda/net/irlmp_event.c b/drivers/staging/irda/net/irlmp_event.c
index e306cf2c1e04..ddad0994b6dc 100644
--- a/drivers/staging/irda/net/irlmp_event.c
+++ b/drivers/staging/irda/net/irlmp_event.c
@@ -165,7 +165,7 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event,
(*lap_state[self->lap_state]) (self, event, skb);
}
-void irlmp_discovery_timer_expired(void *data)
+void irlmp_discovery_timer_expired(struct timer_list *t)
{
/* We always cleanup the log (active & passive discovery) */
irlmp_do_expiry();
@@ -176,9 +176,9 @@ void irlmp_discovery_timer_expired(void *data)
irlmp_start_discovery_timer(irlmp, sysctl_discovery_timeout * HZ);
}
-void irlmp_watchdog_timer_expired(void *data)
+void irlmp_watchdog_timer_expired(struct timer_list *t)
{
- struct lsap_cb *self = (struct lsap_cb *) data;
+ struct lsap_cb *self = from_timer(self, t, watchdog_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;);
@@ -186,9 +186,9 @@ void irlmp_watchdog_timer_expired(void *data)
irlmp_do_lsap_event(self, LM_WATCHDOG_TIMEOUT, NULL);
}
-void irlmp_idle_timer_expired(void *data)
+void irlmp_idle_timer_expired(struct timer_list *t)
{
- struct lap_cb *self = (struct lap_cb *) data;
+ struct lap_cb *self = from_timer(self, t, idle_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
diff --git a/drivers/staging/irda/net/irqueue.c b/drivers/staging/irda/net/irqueue.c
index 160dc89335e2..14291cbc4097 100644
--- a/drivers/staging/irda/net/irqueue.c
+++ b/drivers/staging/irda/net/irqueue.c
@@ -217,7 +217,8 @@ static __u32 hash( const char* name)
while(*name) {
h = (h<<4) + *name++;
- if ((g = (h & 0xf0000000)))
+ g = h & 0xf0000000;
+ if (g)
h ^=g>>24;
h &=~g;
}
diff --git a/drivers/staging/irda/net/irttp.c b/drivers/staging/irda/net/irttp.c
index b6ab41d5b3a3..741a94f39b4e 100644
--- a/drivers/staging/irda/net/irttp.c
+++ b/drivers/staging/irda/net/irttp.c
@@ -62,7 +62,6 @@ static void irttp_run_rx_queue(struct tsap_cb *self);
static void irttp_flush_queues(struct tsap_cb *self);
static void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *skb);
static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self);
-static void irttp_todo_expired(unsigned long data);
static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
int get);
@@ -160,9 +159,9 @@ static inline void irttp_start_todo_timer(struct tsap_cb *self, int timeout)
* killed (need user context), and we can't guarantee that here...
* Jean II
*/
-static void irttp_todo_expired(unsigned long data)
+static void irttp_todo_expired(struct timer_list *t)
{
- struct tsap_cb *self = (struct tsap_cb *) data;
+ struct tsap_cb *self = from_timer(self, t, todo_timer);
/* Check that we still exist */
if (!self || self->magic != TTP_TSAP_MAGIC)
@@ -374,7 +373,7 @@ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
static void irttp_init_tsap(struct tsap_cb *tsap)
{
spin_lock_init(&tsap->lock);
- init_timer(&tsap->todo_timer);
+ timer_setup(&tsap->todo_timer, irttp_todo_expired, 0);
skb_queue_head_init(&tsap->rx_queue);
skb_queue_head_init(&tsap->tx_queue);
@@ -410,10 +409,6 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
/* Initialize internal objects */
irttp_init_tsap(self);
- /* Initialise todo timer */
- self->todo_timer.data = (unsigned long) self;
- self->todo_timer.function = &irttp_todo_expired;
-
/* Initialize callbacks for IrLMP to use */
irda_notify_init(&ttp_notify);
ttp_notify.connect_confirm = irttp_connect_confirm;
diff --git a/drivers/staging/irda/net/timer.c b/drivers/staging/irda/net/timer.c
index f2280f73b057..cf00c0d848aa 100644
--- a/drivers/staging/irda/net/timer.c
+++ b/drivers/staging/irda/net/timer.c
@@ -34,16 +34,16 @@
extern int sysctl_slot_timeout;
-static void irlap_slot_timer_expired(void* data);
-static void irlap_query_timer_expired(void* data);
-static void irlap_final_timer_expired(void* data);
-static void irlap_wd_timer_expired(void* data);
-static void irlap_backoff_timer_expired(void* data);
-static void irlap_media_busy_expired(void* data);
+static void irlap_slot_timer_expired(struct timer_list *t);
+static void irlap_query_timer_expired(struct timer_list *t);
+static void irlap_final_timer_expired(struct timer_list *t);
+static void irlap_wd_timer_expired(struct timer_list *t);
+static void irlap_backoff_timer_expired(struct timer_list *t);
+static void irlap_media_busy_expired(struct timer_list *t);
void irlap_start_slot_timer(struct irlap_cb *self, int timeout)
{
- irda_start_timer(&self->slot_timer, timeout, (void *) self,
+ irda_start_timer(&self->slot_timer, timeout,
irlap_slot_timer_expired);
}
@@ -66,32 +66,32 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s)
/* Set or re-set the timer. We reset the timer for each received
* discovery query, which allow us to automatically adjust to
* the speed of the peer discovery (faster or slower). Jean II */
- irda_start_timer( &self->query_timer, timeout, (void *) self,
+ irda_start_timer(&self->query_timer, timeout,
irlap_query_timer_expired);
}
void irlap_start_final_timer(struct irlap_cb *self, int timeout)
{
- irda_start_timer(&self->final_timer, timeout, (void *) self,
+ irda_start_timer(&self->final_timer, timeout,
irlap_final_timer_expired);
}
void irlap_start_wd_timer(struct irlap_cb *self, int timeout)
{
- irda_start_timer(&self->wd_timer, timeout, (void *) self,
+ irda_start_timer(&self->wd_timer, timeout,
irlap_wd_timer_expired);
}
void irlap_start_backoff_timer(struct irlap_cb *self, int timeout)
{
- irda_start_timer(&self->backoff_timer, timeout, (void *) self,
+ irda_start_timer(&self->backoff_timer, timeout,
irlap_backoff_timer_expired);
}
void irlap_start_mbusy_timer(struct irlap_cb *self, int timeout)
{
irda_start_timer(&self->media_busy_timer, timeout,
- (void *) self, irlap_media_busy_expired);
+ irlap_media_busy_expired);
}
void irlap_stop_mbusy_timer(struct irlap_cb *self)
@@ -110,19 +110,19 @@ void irlap_stop_mbusy_timer(struct irlap_cb *self)
void irlmp_start_watchdog_timer(struct lsap_cb *self, int timeout)
{
- irda_start_timer(&self->watchdog_timer, timeout, (void *) self,
+ irda_start_timer(&self->watchdog_timer, timeout,
irlmp_watchdog_timer_expired);
}
void irlmp_start_discovery_timer(struct irlmp_cb *self, int timeout)
{
- irda_start_timer(&self->discovery_timer, timeout, (void *) self,
+ irda_start_timer(&self->discovery_timer, timeout,
irlmp_discovery_timer_expired);
}
void irlmp_start_idle_timer(struct lap_cb *self, int timeout)
{
- irda_start_timer(&self->idle_timer, timeout, (void *) self,
+ irda_start_timer(&self->idle_timer, timeout,
irlmp_idle_timer_expired);
}
@@ -138,9 +138,9 @@ void irlmp_stop_idle_timer(struct lap_cb *self)
* IrLAP slot timer has expired
*
*/
-static void irlap_slot_timer_expired(void *data)
+static void irlap_slot_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, slot_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -154,9 +154,9 @@ static void irlap_slot_timer_expired(void *data)
* IrLAP query timer has expired
*
*/
-static void irlap_query_timer_expired(void *data)
+static void irlap_query_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, query_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -170,9 +170,9 @@ static void irlap_query_timer_expired(void *data)
*
*
*/
-static void irlap_final_timer_expired(void *data)
+static void irlap_final_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, final_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -186,9 +186,9 @@ static void irlap_final_timer_expired(void *data)
*
*
*/
-static void irlap_wd_timer_expired(void *data)
+static void irlap_wd_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, wd_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -202,9 +202,9 @@ static void irlap_wd_timer_expired(void *data)
*
*
*/
-static void irlap_backoff_timer_expired(void *data)
+static void irlap_backoff_timer_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, backoff_timer);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LAP_MAGIC, return;);
@@ -218,9 +218,9 @@ static void irlap_backoff_timer_expired(void *data)
*
*
*/
-static void irlap_media_busy_expired(void *data)
+static void irlap_media_busy_expired(struct timer_list *t)
{
- struct irlap_cb *self = (struct irlap_cb *) data;
+ struct irlap_cb *self = from_timer(self, t, media_busy_timer);
IRDA_ASSERT(self != NULL, return;);
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
index ae03f7477324..dca2a142e834 100644
--- a/drivers/staging/ks7010/eap_packet.h
+++ b/drivers/staging/ks7010/eap_packet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef EAP_PACKET_H
#define EAP_PACKET_H
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 0f9348ba5d84..880085e2f24a 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -114,7 +114,7 @@ int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
}
static
-void ks_wlan_update_phyinfo_timeout(unsigned long ptr)
+void ks_wlan_update_phyinfo_timeout(struct timer_list *unused)
{
DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
atomic_set(&update_phyinfo, 0);
@@ -473,13 +473,16 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.rate_set.body[3] =
TX_RATE_11M;
i++;
+ /* fall through */
case 5500000:
priv->reg.rate_set.body[2] = TX_RATE_5M;
i++;
+ /* fall through */
case 2000000:
priv->reg.rate_set.body[1] =
TX_RATE_2M | BASIC_RATE;
i++;
+ /* fall through */
case 1000000:
priv->reg.rate_set.body[0] =
TX_RATE_1M | BASIC_RATE;
@@ -535,14 +538,17 @@ static int ks_wlan_set_rate(struct net_device *dev,
priv->reg.rate_set.body[11] =
TX_RATE_54M;
i++;
+ /* fall through */
case 48000000:
priv->reg.rate_set.body[10] =
TX_RATE_48M;
i++;
+ /* fall through */
case 36000000:
priv->reg.rate_set.body[9] =
TX_RATE_36M;
i++;
+ /* fall through */
case 24000000:
case 18000000:
case 12000000:
@@ -619,14 +625,17 @@ static int ks_wlan_set_rate(struct net_device *dev,
TX_RATE_6M | BASIC_RATE;
i++;
}
+ /* fall through */
case 5500000:
priv->reg.rate_set.body[2] =
TX_RATE_5M | BASIC_RATE;
i++;
+ /* fall through */
case 2000000:
priv->reg.rate_set.body[1] =
TX_RATE_2M | BASIC_RATE;
i++;
+ /* fall through */
case 1000000:
priv->reg.rate_set.body[0] =
TX_RATE_1M | BASIC_RATE;
@@ -2010,6 +2019,7 @@ static int ks_wlan_set_mlme(struct net_device *dev,
case IW_MLME_DEAUTH:
if (mlme->reason_code == WLAN_REASON_MIC_FAILURE)
return 0;
+ /* fall through */
case IW_MLME_DISASSOC:
mode = 1;
return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
@@ -2941,8 +2951,7 @@ int ks_wlan_net_start(struct net_device *dev)
/* phy information update timer */
atomic_set(&update_phyinfo, 0);
- setup_timer(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout,
- (unsigned long)priv);
+ timer_setup(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout, 0);
/* dummy address set */
memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index 1ea27c9e3708..3cb3f086148e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index b48e2f093bcc..6ad8867e5451 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 6d8752a368fa..6d132f941281 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index 3f773a4a344b..e5c156e9d907 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index e7c37415a0c7..1b98f0953afb 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index fedb46dff696..d6fc3164e7e7 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 0cc2fc465c1a..5a27220cc608 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index 8c75d5075590..d4c5965c43b1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 709771d27f89..2f4ff595fac9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index 41795d9b3b9b..1191764c431a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
index 008da4497bda..9699646decb9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index 2accd9a85472..fc780f608e57 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index fa0808d2953b..30e333af8d0d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index dd0cd0442b86..854c84358ab4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index 709e1ce98d8d..aece13698eb4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
index 9c37f3e4b134..31fcd33171b4 100644
--- a/drivers/staging/lustre/include/linux/lnet/api.h
+++ b/drivers/staging/lustre/include/linux/lnet/api.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index e0968ab8d95e..c1626726fa05 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index eea3b8e5e406..cfe8ee424e94 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -160,9 +161,9 @@ struct lnet_libmd {
} md_iov;
};
-#define LNET_MD_FLAG_ZOMBIE (1 << 0)
-#define LNET_MD_FLAG_AUTO_UNLINK (1 << 1)
-#define LNET_MD_FLAG_ABORTED (1 << 2)
+#define LNET_MD_FLAG_ZOMBIE BIT(0)
+#define LNET_MD_FLAG_AUTO_UNLINK BIT(1)
+#define LNET_MD_FLAG_ABORTED BIT(2)
struct lnet_test_peer {
/* info about peers we are trying to fail */
@@ -287,9 +288,9 @@ struct lnet_ni {
* of old LNet, so there shouldn't be any compatibility issue
*/
#define LNET_PING_FEAT_INVAL (0) /* no feature */
-#define LNET_PING_FEAT_BASE (1 << 0) /* just a ping */
-#define LNET_PING_FEAT_NI_STATUS (1 << 1) /* return NI status */
-#define LNET_PING_FEAT_RTE_DISABLED (1 << 2) /* Routing enabled */
+#define LNET_PING_FEAT_BASE BIT(0) /* just a ping */
+#define LNET_PING_FEAT_NI_STATUS BIT(1) /* return NI status */
+#define LNET_PING_FEAT_RTE_DISABLED BIT(2) /* Routing enabled */
#define LNET_PING_FEAT_MASK (LNET_PING_FEAT_BASE | \
LNET_PING_FEAT_NI_STATUS)
@@ -440,23 +441,21 @@ struct lnet_rtrbuf {
enum lnet_match_flags {
/* Didn't match anything */
- LNET_MATCHMD_NONE = (1 << 0),
+ LNET_MATCHMD_NONE = BIT(0),
/* Matched OK */
- LNET_MATCHMD_OK = (1 << 1),
+ LNET_MATCHMD_OK = BIT(1),
/* Must be discarded */
- LNET_MATCHMD_DROP = (1 << 2),
+ LNET_MATCHMD_DROP = BIT(2),
/* match and buffer is exhausted */
- LNET_MATCHMD_EXHAUSTED = (1 << 3),
+ LNET_MATCHMD_EXHAUSTED = BIT(3),
/* match or drop */
LNET_MATCHMD_FINISH = (LNET_MATCHMD_OK | LNET_MATCHMD_DROP),
};
/* Options for lnet_portal::ptl_options */
-#define LNET_PTL_LAZY (1 << 0)
-#define LNET_PTL_MATCH_UNIQUE (1 << 1) /* unique match, for RDMA */
-#define LNET_PTL_MATCH_WILDCARD (1 << 2) /* wildcard match,
- * request portal
- */
+#define LNET_PTL_LAZY BIT(0)
+#define LNET_PTL_MATCH_UNIQUE BIT(1) /* unique match, for RDMA */
+#define LNET_PTL_MATCH_WILDCARD BIT(2) /* wildcard match, request portal */
/* parameter for matching operations (GET, PUT) */
struct lnet_match_info {
diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h
index 553fb64b3e80..6bd1bca190a3 100644
--- a/drivers/staging/lustre/include/linux/lnet/socklnd.h
+++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
index 2b5930150cda..6bcb53d0c6f4 100644
--- a/drivers/staging/lustre/lnet/Kconfig
+++ b/drivers/staging/lustre/lnet/Kconfig
@@ -34,7 +34,7 @@ config LNET_SELFTEST
config LNET_XPRT_IB
tristate "LNET infiniband support"
- depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS
+ depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
default LNET && INFINIBAND
help
This option allows the LNET users to use infiniband as an
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 64763aacda57..8024843521ab 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index a1e994a1cc84..171eced213f8 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 8fc191d99927..40e3af5d8b04 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index 3fe4d4858eba..a71b765215ad 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index fbbd8a5489e9..986c2a40d978 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -176,12 +177,9 @@ struct ksock_peer *
ksocknal_find_peer_locked(struct lnet_ni *ni, struct lnet_process_id id)
{
struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
- struct list_head *tmp;
struct ksock_peer *peer;
- list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, struct ksock_peer, ksnp_list);
-
+ list_for_each_entry(peer, peer_list, ksnp_list) {
LASSERT(!peer->ksnp_closing);
if (peer->ksnp_ni != ni)
@@ -453,7 +451,6 @@ int
ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
int port)
{
- struct list_head *tmp;
struct ksock_peer *peer;
struct ksock_peer *peer2;
struct ksock_route *route;
@@ -491,9 +488,7 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
}
route2 = NULL;
- list_for_each(tmp, &peer->ksnp_routes) {
- route2 = list_entry(tmp, struct ksock_route, ksnr_list);
-
+ list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
if (route2->ksnr_ipaddr == ipaddr)
break;
@@ -1688,10 +1683,10 @@ ksocknal_destroy_conn(struct ksock_conn *conn)
case SOCKNAL_RX_LNET_PAYLOAD:
last_rcv = conn->ksnc_rx_deadline -
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
- CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
+ CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %zd, left: %d, last alive is %ld secs ago\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
&conn->ksnc_ipaddr, conn->ksnc_port,
- conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
+ iov_iter_count(&conn->ksnc_rx_to), conn->ksnc_rx_nob_left,
cfs_duration_sec(cfs_time_sub(cfs_time_current(),
last_rcv)));
lnet_finalize(conn->ksnc_peer->ksnp_ni,
@@ -1854,12 +1849,10 @@ ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when)
peer = ksocknal_find_peer_locked(ni, id);
if (peer) {
- struct list_head *tmp;
struct ksock_conn *conn;
int bufnob;
- list_for_each(tmp, &peer->ksnp_conns) {
- conn = list_entry(tmp, struct ksock_conn, ksnc_list);
+ list_for_each_entry(conn, &peer->ksnp_conns, ksnc_list) {
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
if (bufnob < conn->ksnc_tx_bufnob) {
@@ -2316,7 +2309,7 @@ ksocknal_base_shutdown(void)
switch (ksocknal_data.ksnd_init) {
default:
LASSERT(0);
-
+ /* fall through */
case SOCKNAL_INIT_ALL:
case SOCKNAL_INIT_DATA:
LASSERT(ksocknal_data.ksnd_peers);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index e6428c4b7aec..d50ebdf863fa 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*
@@ -357,11 +358,7 @@ struct ksock_conn {
__u8 ksnc_rx_scheduled; /* being progressed */
__u8 ksnc_rx_state; /* what is being read */
int ksnc_rx_nob_left; /* # bytes to next hdr/body */
- int ksnc_rx_nob_wanted;/* bytes actually wanted */
- int ksnc_rx_niov; /* # iovec frags */
- struct kvec *ksnc_rx_iov; /* the iovec frags */
- int ksnc_rx_nkiov; /* # page frags */
- struct bio_vec *ksnc_rx_kiov; /* the page frags */
+ struct iov_iter ksnc_rx_to; /* copy destination */
union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
__u32 ksnc_rx_csum; /* partial checksum for incoming
* data
@@ -700,8 +697,7 @@ int ksocknal_lib_setup_sock(struct socket *so);
int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx);
int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx);
void ksocknal_lib_eager_ack(struct ksock_conn *conn);
-int ksocknal_lib_recv_iov(struct ksock_conn *conn);
-int ksocknal_lib_recv_kiov(struct ksock_conn *conn);
+int ksocknal_lib_recv(struct ksock_conn *conn);
int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
int *rxmem, int *nagle);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 6b38d5a8fe92..27c56d5ae4e5 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*
@@ -249,66 +250,16 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
}
static int
-ksocknal_recv_iov(struct ksock_conn *conn)
+ksocknal_recv_iter(struct ksock_conn *conn)
{
- struct kvec *iov = conn->ksnc_rx_iov;
int nob;
int rc;
- LASSERT(conn->ksnc_rx_niov > 0);
-
- /*
- * Never touch conn->ksnc_rx_iov or change connection
- * status inside ksocknal_lib_recv_iov
- */
- rc = ksocknal_lib_recv_iov(conn);
-
- if (rc <= 0)
- return rc;
-
- /* received something... */
- nob = rc;
-
- conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
- conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- mb(); /* order with setting rx_started */
- conn->ksnc_rx_started = 1;
-
- conn->ksnc_rx_nob_wanted -= nob;
- conn->ksnc_rx_nob_left -= nob;
-
- do {
- LASSERT(conn->ksnc_rx_niov > 0);
-
- if (nob < (int)iov->iov_len) {
- iov->iov_len -= nob;
- iov->iov_base += nob;
- return -EAGAIN;
- }
-
- nob -= iov->iov_len;
- conn->ksnc_rx_iov = ++iov;
- conn->ksnc_rx_niov--;
- } while (nob);
-
- return rc;
-}
-
-static int
-ksocknal_recv_kiov(struct ksock_conn *conn)
-{
- struct bio_vec *kiov = conn->ksnc_rx_kiov;
- int nob;
- int rc;
-
- LASSERT(conn->ksnc_rx_nkiov > 0);
-
/*
- * Never touch conn->ksnc_rx_kiov or change connection
- * status inside ksocknal_lib_recv_iov
+ * Never touch conn->ksnc_rx_to or change connection
+ * status inside ksocknal_lib_recv
*/
- rc = ksocknal_lib_recv_kiov(conn);
+ rc = ksocknal_lib_recv(conn);
if (rc <= 0)
return rc;
@@ -322,22 +273,11 @@ ksocknal_recv_kiov(struct ksock_conn *conn)
mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
- conn->ksnc_rx_nob_wanted -= nob;
conn->ksnc_rx_nob_left -= nob;
- do {
- LASSERT(conn->ksnc_rx_nkiov > 0);
-
- if (nob < (int)kiov->bv_len) {
- kiov->bv_offset += nob;
- kiov->bv_len -= nob;
- return -EAGAIN;
- }
-
- nob -= kiov->bv_len;
- conn->ksnc_rx_kiov = ++kiov;
- conn->ksnc_rx_nkiov--;
- } while (nob);
+ iov_iter_advance(&conn->ksnc_rx_to, nob);
+ if (iov_iter_count(&conn->ksnc_rx_to))
+ return -EAGAIN;
return 1;
}
@@ -347,7 +287,7 @@ ksocknal_receive(struct ksock_conn *conn)
{
/*
* Return 1 on success, 0 on EOF, < 0 on error.
- * Caller checks ksnc_rx_nob_wanted to determine
+ * Caller checks ksnc_rx_to to determine
* progress/completion.
*/
int rc;
@@ -364,11 +304,7 @@ ksocknal_receive(struct ksock_conn *conn)
}
for (;;) {
- if (conn->ksnc_rx_niov)
- rc = ksocknal_recv_iov(conn);
- else
- rc = ksocknal_recv_kiov(conn);
-
+ rc = ksocknal_recv_iter(conn);
if (rc <= 0) {
/* error/EOF or partial receive */
if (rc == -EAGAIN) {
@@ -382,7 +318,7 @@ ksocknal_receive(struct ksock_conn *conn)
/* Completed a fragment */
- if (!conn->ksnc_rx_nob_wanted) {
+ if (!iov_iter_count(&conn->ksnc_rx_to)) {
rc = 1;
break;
}
@@ -1050,6 +986,7 @@ int
ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
+ struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;
int nob;
unsigned int niov;
@@ -1070,32 +1007,26 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
case KSOCK_PROTO_V2:
case KSOCK_PROTO_V3:
conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
-
- conn->ksnc_rx_nob_wanted = offsetof(struct ksock_msg, ksm_u);
+ kvec->iov_base = &conn->ksnc_msg;
+ kvec->iov_len = offsetof(struct ksock_msg, ksm_u);
conn->ksnc_rx_nob_left = offsetof(struct ksock_msg, ksm_u);
- conn->ksnc_rx_iov[0].iov_len = offsetof(struct ksock_msg, ksm_u);
+ iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
+ 1, offsetof(struct ksock_msg, ksm_u));
break;
case KSOCK_PROTO_V1:
/* Receiving bare struct lnet_hdr */
conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- conn->ksnc_rx_nob_wanted = sizeof(struct lnet_hdr);
+ kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
+ kvec->iov_len = sizeof(struct lnet_hdr);
conn->ksnc_rx_nob_left = sizeof(struct lnet_hdr);
-
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof(struct lnet_hdr);
+ iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
+ 1, sizeof(struct lnet_hdr));
break;
default:
LBUG();
}
- conn->ksnc_rx_niov = 1;
-
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_nkiov = 0;
conn->ksnc_rx_csum = ~0;
return 1;
}
@@ -1106,15 +1037,14 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
*/
conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
conn->ksnc_rx_nob_left = nob_to_skip;
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
skipped = 0;
niov = 0;
do {
nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
- conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
- conn->ksnc_rx_iov[niov].iov_len = nob;
+ kvec[niov].iov_base = ksocknal_slop_buffer;
+ kvec[niov].iov_len = nob;
niov++;
skipped += nob;
nob_to_skip -= nob;
@@ -1122,16 +1052,14 @@ ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
} while (nob_to_skip && /* mustn't overflow conn's rx iov */
niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
- conn->ksnc_rx_niov = niov;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_nob_wanted = skipped;
+ iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec, niov, skipped);
return 0;
}
static int
ksocknal_process_receive(struct ksock_conn *conn)
{
+ struct kvec *kvec = (struct kvec *)&conn->ksnc_rx_iov_space;
struct lnet_hdr *lhdr;
struct lnet_process_id *id;
int rc;
@@ -1145,7 +1073,7 @@ ksocknal_process_receive(struct ksock_conn *conn)
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
again:
- if (conn->ksnc_rx_nob_wanted) {
+ if (iov_iter_count(&conn->ksnc_rx_to)) {
rc = ksocknal_receive(conn);
if (rc <= 0) {
@@ -1170,7 +1098,7 @@ ksocknal_process_receive(struct ksock_conn *conn)
return (!rc ? -ESHUTDOWN : rc);
}
- if (conn->ksnc_rx_nob_wanted) {
+ if (iov_iter_count(&conn->ksnc_rx_to)) {
/* short read */
return -EAGAIN;
}
@@ -1233,16 +1161,13 @@ ksocknal_process_receive(struct ksock_conn *conn)
}
conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
- conn->ksnc_rx_nob_wanted = sizeof(struct ksock_lnet_msg);
conn->ksnc_rx_nob_left = sizeof(struct ksock_lnet_msg);
- conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
- conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof(struct ksock_lnet_msg);
+ kvec->iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
+ kvec->iov_len = sizeof(struct ksock_lnet_msg);
- conn->ksnc_rx_niov = 1;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_nkiov = 0;
+ iov_iter_kvec(&conn->ksnc_rx_to, READ|ITER_KVEC, kvec,
+ 1, sizeof(struct ksock_lnet_msg));
goto again; /* read lnet header now */
@@ -1344,26 +1269,9 @@ ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
LASSERT(to->nr_segs <= LNET_MAX_IOV);
conn->ksnc_cookie = msg;
- conn->ksnc_rx_nob_wanted = iov_iter_count(to);
conn->ksnc_rx_nob_left = rlen;
- if (to->type & ITER_KVEC) {
- conn->ksnc_rx_nkiov = 0;
- conn->ksnc_rx_kiov = NULL;
- conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
- conn->ksnc_rx_niov =
- lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
- to->nr_segs, to->kvec,
- to->iov_offset, iov_iter_count(to));
- } else {
- conn->ksnc_rx_niov = 0;
- conn->ksnc_rx_iov = NULL;
- conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
- conn->ksnc_rx_nkiov =
- lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
- to->nr_segs, to->bvec,
- to->iov_offset, iov_iter_count(to));
- }
+ conn->ksnc_rx_to = *to;
LASSERT(conn->ksnc_rx_scheduled);
@@ -2328,12 +2236,12 @@ ksocknal_find_timed_out_conn(struct ksock_peer *peer)
conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
- CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
+ CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %zd left %d\n",
libcfs_id2str(peer->ksnp_id),
&conn->ksnc_ipaddr,
conn->ksnc_port,
conn->ksnc_rx_state,
- conn->ksnc_rx_nob_wanted,
+ iov_iter_count(&conn->ksnc_rx_to),
conn->ksnc_rx_nob_left);
return conn;
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 9c328dc6537b..cb28dd2baf2f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -161,94 +162,39 @@ ksocknal_lib_eager_ack(struct ksock_conn *conn)
sizeof(opt));
}
-int
-ksocknal_lib_recv_iov(struct ksock_conn *conn)
+static int lustre_csum(struct kvec *v, void *context)
{
- unsigned int niov = conn->ksnc_rx_niov;
- struct kvec *iov = conn->ksnc_rx_iov;
- struct msghdr msg = {
- .msg_flags = 0
- };
- int nob;
- int i;
- int rc;
- int fragnob;
- int sum;
- __u32 saved_csum;
-
- LASSERT(niov > 0);
-
- for (nob = i = 0; i < niov; i++)
- nob += iov[i].iov_len;
-
- LASSERT(nob <= conn->ksnc_rx_nob_wanted);
-
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, niov, nob);
- rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);
-
- saved_csum = 0;
- if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
- saved_csum = conn->ksnc_msg.ksm_csum;
- conn->ksnc_msg.ksm_csum = 0;
- }
-
- if (saved_csum) {
- /* accumulate checksum */
- for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
- LASSERT(i < niov);
-
- fragnob = iov[i].iov_len;
- if (fragnob > sum)
- fragnob = sum;
-
- conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum,
- iov[i].iov_base,
- fragnob);
- }
- conn->ksnc_msg.ksm_csum = saved_csum;
- }
-
- return rc;
+ struct ksock_conn *conn = context;
+ conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum,
+ v->iov_base, v->iov_len);
+ return 0;
}
int
-ksocknal_lib_recv_kiov(struct ksock_conn *conn)
+ksocknal_lib_recv(struct ksock_conn *conn)
{
- unsigned int niov = conn->ksnc_rx_nkiov;
- struct bio_vec *kiov = conn->ksnc_rx_kiov;
- struct msghdr msg = {
- .msg_flags = 0
- };
- int nob;
- int i;
+ struct msghdr msg = { .msg_iter = conn->ksnc_rx_to };
+ __u32 saved_csum;
int rc;
- void *base;
- int sum;
- int fragnob;
- for (nob = i = 0; i < niov; i++)
- nob += kiov[i].bv_len;
-
- LASSERT(nob <= conn->ksnc_rx_nob_wanted);
-
- iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, kiov, niov, nob);
rc = sock_recvmsg(conn->ksnc_sock, &msg, MSG_DONTWAIT);
+ if (rc <= 0)
+ return rc;
- if (conn->ksnc_msg.ksm_csum) {
- for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
- LASSERT(i < niov);
-
- base = kmap(kiov[i].bv_page) + kiov[i].bv_offset;
- fragnob = kiov[i].bv_len;
- if (fragnob > sum)
- fragnob = sum;
+ saved_csum = conn->ksnc_msg.ksm_csum;
+ if (!saved_csum)
+ return rc;
- conn->ksnc_rx_csum = crc32_le(conn->ksnc_rx_csum,
- base, fragnob);
+ /* header is included only in V2 - V3 checksums only the bulk data */
+ if (!(conn->ksnc_rx_to.type & ITER_BVEC) &&
+ conn->ksnc_proto != &ksocknal_protocol_v2x)
+ return rc;
+
+ /* accumulate checksum */
+ conn->ksnc_msg.ksm_csum = 0;
+ iov_iter_for_each_range(&conn->ksnc_rx_to, rc, lustre_csum, conn);
+ conn->ksnc_msg.ksm_csum = saved_csum;
- kunmap(kiov[i].bv_page);
- }
- }
return rc;
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
index fc7eec83ac07..5663a4ca94d4 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 84be9a518190..d827f770e831 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
*
diff --git a/drivers/staging/lustre/lnet/libcfs/Makefile b/drivers/staging/lustre/lnet/libcfs/Makefile
index 215fa23827d1..1607570ef8de 100644
--- a/drivers/staging/lustre/lnet/libcfs/Makefile
+++ b/drivers/staging/lustre/lnet/libcfs/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 1ab394c1fabc..551c45bf4108 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index 24f4701a7a1e..5d501beeb622 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index 49a04a2b4ec4..f4f67d2b301e 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
index 2ddd09a83cd0..e3a4c67a66b5 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 77fd3d06cde9..f6a0040f4ab1 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index 1a0c7cad5983..df93d8f77ea2 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index 333e47febf87..bcac5074bf80 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 2da051c0d251..51823ce71773 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
index db0572733712..2e5d311d2438 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 55663390b608..80072b2a443c 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
index d0b3aa80cfa6..5616e9ea1450 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
index 528d49794881..1d8949f1a4fa 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 972677bdf6bc..0092166af258 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
index 3f5dec153571..963df0ef4afb 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index 435722175cce..b5746230ab31 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 4e331e71083d..6f92ea272186 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 16a3ae791bb6..7928d7182634 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index 6aed98fc9688..4ead55920e79 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
index 963ef4ae93b1..f47cf67a92e3 100644
--- a/drivers/staging/lustre/lnet/libcfs/prng.c
+++ b/drivers/staging/lustre/lnet/libcfs/prng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index f916b475e767..da2844f37edf 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index c3547cd4c72c..a29d6eb3a785 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index 038ed8c52107..6a05d9bab8dc 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile
index fd8585cd0ce2..0a9d70924fe0 100644
--- a/drivers/staging/lustre/lnet/lnet/Makefile
+++ b/drivers/staging/lustre/lnet/lnet/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index be2823f8eb02..ee85cab6f437 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index ad835035fffa..7caff290c146 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 26841a7b6213..0cf0f4f99435 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 6b446a51eeac..daf744277003 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index a0aef4b9bce3..ac5b9593d597 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index f52a5e8ed386..dd5d3cf6d3e2 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index bc0779c02d97..68d16ffec980 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -889,7 +890,7 @@ lnet_return_rx_credits_locked(struct lnet_msg *msg)
*/
LASSERT(msg->msg_kiov);
- rb = list_entry(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
+ rb = container_of(msg->msg_kiov, struct lnet_rtrbuf, rb_kiov[0]);
rbp = rb->rb_pool;
msg->msg_kiov = NULL;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index d04875e3956f..c72ef05b2420 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index 5946848a7846..8ae93bf6fd1b 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 7d0add0c0de3..539a26444f31 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index 80c06f4b0c8d..7456b989e451 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 7d12a7fb36a4..c0c4723f72fd 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c
index 03f3d18a1a29..5a5d1811ffbe 100644
--- a/drivers/staging/lustre/lnet/lnet/net_fault.c
+++ b/drivers/staging/lustre/lnet/lnet/net_fault.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -629,6 +630,7 @@ delayed_msg_process(struct list_head *msg_list, bool drop)
case LNET_CREDIT_OK:
lnet_ni_recv(ni, msg->msg_private, msg, 0,
0, msg->msg_len, msg->msg_len);
+ /* fall through */
case LNET_CREDIT_WAIT:
continue;
default: /* failures */
@@ -698,9 +700,9 @@ lnet_delay_rule_daemon(void *arg)
}
static void
-delay_timer_cb(unsigned long arg)
+delay_timer_cb(struct timer_list *t)
{
- struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg;
+ struct lnet_delay_rule *rule = from_timer(rule, t, dl_timer);
spin_lock_bh(&delay_dd.dd_lock);
if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
@@ -760,7 +762,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
}
- setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule);
+ timer_setup(&rule->dl_timer, delay_timer_cb, 0);
spin_lock_init(&rule->dl_lock);
INIT_LIST_HEAD(&rule->dl_msg_list);
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index 7bd1e6f389aa..05b120c2d45a 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index 4d55df8ff74e..5e94ad349454 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 3df101bafd9f..88283ca3f860 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
@@ -222,15 +223,12 @@ struct lnet_remotenet *
lnet_find_net_locked(__u32 net)
{
struct lnet_remotenet *rnet;
- struct list_head *tmp;
struct list_head *rn_list;
LASSERT(!the_lnet.ln_shutdown);
rn_list = lnet_net2rnethash(net);
- list_for_each(tmp, rn_list) {
- rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
-
+ list_for_each_entry(rnet, rn_list, lrn_list) {
if (rnet->lrn_net == net)
return rnet;
}
@@ -243,7 +241,6 @@ static void lnet_shuffle_seed(void)
__u32 lnd_type, seed[2];
struct timespec64 ts;
struct lnet_ni *ni;
- struct list_head *tmp;
if (seeded)
return;
@@ -254,8 +251,7 @@ static void lnet_shuffle_seed(void)
* Nodes with small feet have little entropy
* the NID for this node gives the most entropy in the low bits
*/
- list_for_each(tmp, &the_lnet.ln_nis) {
- ni = list_entry(tmp, struct lnet_ni, ni_list);
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
if (lnd_type != LOLND)
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 4a994d113c7d..d32d653edcb0 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index f8b9175f08d4..f1ee219bc8f3 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 9619ecbf8bdf..082c0afacf23 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -151,6 +152,7 @@ lst_debug_ioctl(struct lstio_debug_args *args)
case LST_OPC_BATCHSRV:
client = 0;
+ /* fall through */
case LST_OPC_BATCHCLI:
if (!name)
goto out;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 196d23c10921..6a0f770e0e24 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 239323679baa..374a5f31ef6f 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 289b202c3b36..a2662638d599 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 143eae9b8d71..3933ed4cca93 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index ef27bfffc230..fe889607ff3f 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index b5d556fa48ab..1d44d912f014 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -57,10 +58,13 @@ lnet_selftest_exit(void)
switch (lst_init_step) {
case LST_INIT_CONSOLE:
lstcon_console_fini();
+ /* fall through */
case LST_INIT_FW:
sfw_shutdown();
+ /* fall through */
case LST_INIT_RPC:
srpc_shutdown();
+ /* fall through */
case LST_INIT_WI_TEST:
for (i = 0;
i < cfs_cpt_number(lnet_cpt_table()); i++) {
@@ -72,7 +76,7 @@ lnet_selftest_exit(void)
sizeof(lst_sched_test[0]) *
cfs_cpt_number(lnet_cpt_table()));
lst_sched_test = NULL;
-
+ /* fall through */
case LST_INIT_WI_SERIAL:
cfs_wi_sched_destroy(lst_sched_serial);
lst_sched_serial = NULL;
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 9653ac6fd619..f54bd630dbf8 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 77c222cca230..ab7e8a8e58b8 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1037,6 +1038,7 @@ srpc_handle_rpc(struct swi_workitem *wi)
ev->ev_status = rc;
}
}
+ /* fall through */
case SWI_STATE_BULK_STARTED:
LASSERT(!rpc->srpc_bulk || ev->ev_fired);
@@ -1237,7 +1239,8 @@ srpc_send_rpc(struct swi_workitem *wi)
break;
wi->swi_state = SWI_STATE_REQUEST_SENT;
- /* perhaps more events, fall thru */
+ /* perhaps more events */
+ /* fall through */
case SWI_STATE_REQUEST_SENT: {
enum srpc_msg_type type = srpc_service2reply(rpc->crpc_service);
@@ -1269,6 +1272,7 @@ srpc_send_rpc(struct swi_workitem *wi)
wi->swi_state = SWI_STATE_REPLY_RECEIVED;
}
+ /* fall through */
case SWI_STATE_REPLY_RECEIVED:
if (do_bulk && !rpc->crpc_bulkev.ev_fired)
break;
@@ -1448,6 +1452,7 @@ srpc_lnet_ev_handler(struct lnet_event *ev)
srpc_data.rpc_counters.rpcs_sent++;
spin_unlock(&srpc_data.rpc_glock);
}
+ /* fall through */
case SRPC_REPLY_RCVD:
case SRPC_BULK_REQ_RCVD:
crpc = rpcev->ev_data;
@@ -1570,7 +1575,7 @@ srpc_lnet_ev_handler(struct lnet_event *ev)
if (!ev->unlinked)
break; /* wait for final event */
-
+ /* fall through */
case SRPC_BULK_PUT_SENT:
if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
spin_lock(&srpc_data.rpc_glock);
@@ -1582,6 +1587,7 @@ srpc_lnet_ev_handler(struct lnet_event *ev)
spin_unlock(&srpc_data.rpc_glock);
}
+ /* fall through */
case SRPC_REPLY_SENT:
srpc = rpcev->ev_data;
scd = srpc->srpc_scd;
@@ -1674,14 +1680,14 @@ srpc_shutdown(void)
spin_unlock(&srpc_data.rpc_glock);
stt_shutdown();
-
+ /* fall through */
case SRPC_STATE_EQ_INIT:
rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
LASSERT(!rc);
rc = LNetEQFree(srpc_data.rpc_lnet_eq);
LASSERT(!rc); /* the EQ should have no user by now */
-
+ /* fall through */
case SRPC_STATE_NI_INIT:
LNetNIFini();
}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index 7bb442a8e698..465b5b534423 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 7adad4302dcf..8c10f0f149d5 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 2fe692df19d0..ab125a8524c5 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
index 441d6d6b4f8e..7f0ef9bd0cda 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.h
+++ b/drivers/staging/lustre/lnet/selftest/timer.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h
index f48ab9d21428..b7b8f900df8e 100644
--- a/drivers/staging/lustre/lustre/fid/fid_internal.h
+++ b/drivers/staging/lustre/lustre/fid/fid_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c
index c21a5f5b7621..9577da33e666 100644
--- a/drivers/staging/lustre/lustre/fid/fid_lib.c
+++ b/drivers/staging/lustre/lustre/fid/fid_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index ba736239243c..009c2367f74e 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -279,7 +280,8 @@ int seq_client_alloc_fid(const struct lu_env *env,
*fid = seq->lcs_fid;
mutex_unlock(&seq->lcs_mutex);
- CDEBUG(D_INFO, "%s: Allocated FID " DFID "\n", seq->lcs_name, PFID(fid));
+ CDEBUG(D_INFO,
+ "%s: Allocated FID " DFID "\n", seq->lcs_name, PFID(fid));
return rc;
}
EXPORT_SYMBOL(seq_client_alloc_fid);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 1a269fbc4b47..083419f77697 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index b723ece02eff..7d6a7106c0a5 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index fe6f278a7d9f..b5e3abaa508a 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 5b180830eec0..068c364adda8 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -425,7 +426,8 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
target = fld_client_get_target(fld, seq);
LASSERT(target);
- CDEBUG(D_INFO, "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n",
+ CDEBUG(D_INFO,
+ "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n",
fld->lcf_name, seq, fld_target_name(target), target->ft_idx);
res.lsr_start = seq;
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index 6cae803fc8d2..1a6a76110c3e 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 9ba184b6017f..90419dca2e1e 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/interval_tree.h b/drivers/staging/lustre/lustre/include/interval_tree.h
index a4d7280e1fa4..7d119c1a0469 100644
--- a/drivers/staging/lustre/lustre/include/interval_tree.h
+++ b/drivers/staging/lustre/lustre/include/interval_tree.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/llog_swab.h b/drivers/staging/lustre/lustre/include/llog_swab.h
index 925271db4554..0433b79efdcb 100644
--- a/drivers/staging/lustre/lustre/include/llog_swab.h
+++ b/drivers/staging/lustre/lustre/include/llog_swab.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 98d6b1364c21..835a729dd8d0 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 4f213c408cfa..34e35fbff978 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1130,7 +1131,7 @@ struct lu_context_key {
{ \
type *value; \
\
- BUILD_BUG_ON(PAGE_SIZE < sizeof(*value)); \
+ BUILD_BUG_ON(sizeof(*value) > PAGE_SIZE); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \
@@ -1303,8 +1304,6 @@ struct lu_buf {
size_t lb_len;
};
-#define DLUBUF "(%p %zu)"
-#define PLUBUF(buf) (buf)->lb_buf, (buf)->lb_len
/**
* One-time initializers, called at obdclass module initialization, not
* exported.
diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h
index f7dfd83951ee..ad0c24d29ffa 100644
--- a/drivers/staging/lustre/lustre/include/lu_ref.h
+++ b/drivers/staging/lustre/lustre/include/lu_ref.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
diff --git a/drivers/staging/lustre/lustre/include/lustre_acl.h b/drivers/staging/lustre/lustre/include/lustre_acl.h
index 9786f6caaade..35ff61ce4e9d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_acl.h
+++ b/drivers/staging/lustre/lustre/include/lustre_acl.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h
index 69bfd6a6e0f9..9f488e605083 100644
--- a/drivers/staging/lustre/lustre/include/lustre_compat.h
+++ b/drivers/staging/lustre/lustre/include/lustre_compat.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_debug.h b/drivers/staging/lustre/lustre/include/lustre_debug.h
index 0be6a534f712..721a81f923e3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_debug.h
+++ b/drivers/staging/lustre/lustre/include/lustre_debug.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 2d862b32265b..8f1a22527006 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 13c3d2fd31a8..e0b17052b2ea 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 11331ae81d58..53db031c4c8c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* -*- buffer-read-only: t -*- vi: set ro:
*
* This program is free software; you can redistribute it and/or modify
@@ -136,7 +137,8 @@
#define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23)
/**
- * measure lock contention and return -EUSERS if locking contention is high */
+ * measure lock contention and return -EUSERS if locking contention is high
+ */
#define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL /* bit 30 */
#define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG((_l), 1ULL << 30)
#define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG((_l), 1ULL << 30)
@@ -144,7 +146,8 @@
/**
* These are flags that are mapped into the flags and ASTs of blocking
- * locks Add FL_DISCARD to blocking ASTs */
+ * locks Add FL_DISCARD to blocking ASTs
+ */
#define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL /* bit 31 */
#define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG((_l), 1ULL << 31)
#define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG((_l), 1ULL << 31)
diff --git a/drivers/staging/lustre/lustre/include/lustre_errno.h b/drivers/staging/lustre/lustre/include/lustre_errno.h
index 35aefa2cdad1..59fbb9f47ff1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_errno.h
+++ b/drivers/staging/lustre/lustre/include/lustre_errno.h
@@ -70,16 +70,14 @@
#define LUSTRE_EROFS 30 /* Read-only file system */
#define LUSTRE_EMLINK 31 /* Too many links */
#define LUSTRE_EPIPE 32 /* Broken pipe */
-#define LUSTRE_EDOM 33 /* Math argument out of domain of
- func */
+#define LUSTRE_EDOM 33 /* Math argument out of func domain */
#define LUSTRE_ERANGE 34 /* Math result not representable */
#define LUSTRE_EDEADLK 35 /* Resource deadlock would occur */
#define LUSTRE_ENAMETOOLONG 36 /* File name too long */
#define LUSTRE_ENOLCK 37 /* No record locks available */
#define LUSTRE_ENOSYS 38 /* Function not implemented */
#define LUSTRE_ENOTEMPTY 39 /* Directory not empty */
-#define LUSTRE_ELOOP 40 /* Too many symbolic links
- encountered */
+#define LUSTRE_ELOOP 40 /* Too many symbolic links found */
#define LUSTRE_ENOMSG 42 /* No message of desired type */
#define LUSTRE_EIDRM 43 /* Identifier removed */
#define LUSTRE_ECHRNG 44 /* Channel number out of range */
@@ -112,23 +110,17 @@
#define LUSTRE_EMULTIHOP 72 /* Multihop attempted */
#define LUSTRE_EDOTDOT 73 /* RFS specific error */
#define LUSTRE_EBADMSG 74 /* Not a data message */
-#define LUSTRE_EOVERFLOW 75 /* Value too large for defined data
- type */
+#define LUSTRE_EOVERFLOW 75 /* Value too large for data type */
#define LUSTRE_ENOTUNIQ 76 /* Name not unique on network */
#define LUSTRE_EBADFD 77 /* File descriptor in bad state */
#define LUSTRE_EREMCHG 78 /* Remote address changed */
-#define LUSTRE_ELIBACC 79 /* Can not access a needed shared
- library */
-#define LUSTRE_ELIBBAD 80 /* Accessing a corrupted shared
- library */
+#define LUSTRE_ELIBACC 79 /* Can't access needed shared library */
+#define LUSTRE_ELIBBAD 80 /* Access corrupted shared library */
#define LUSTRE_ELIBSCN 81 /* .lib section in a.out corrupted */
-#define LUSTRE_ELIBMAX 82 /* Attempting to link in too many shared
- libraries */
-#define LUSTRE_ELIBEXEC 83 /* Cannot exec a shared library
- directly */
+#define LUSTRE_ELIBMAX 82 /* Trying to link too many libraries */
+#define LUSTRE_ELIBEXEC 83 /* Cannot exec a shared lib directly */
#define LUSTRE_EILSEQ 84 /* Illegal byte sequence */
-#define LUSTRE_ERESTART 85 /* Interrupted system call should be
- restarted */
+#define LUSTRE_ERESTART 85 /* Restart interrupted system call */
#define LUSTRE_ESTRPIPE 86 /* Streams pipe error */
#define LUSTRE_EUSERS 87 /* Too many users */
#define LUSTRE_ENOTSOCK 88 /* Socket operation on non-socket */
@@ -138,26 +130,20 @@
#define LUSTRE_ENOPROTOOPT 92 /* Protocol not available */
#define LUSTRE_EPROTONOSUPPORT 93 /* Protocol not supported */
#define LUSTRE_ESOCKTNOSUPPORT 94 /* Socket type not supported */
-#define LUSTRE_EOPNOTSUPP 95 /* Operation not supported on transport
- endpoint */
+#define LUSTRE_EOPNOTSUPP 95 /* Operation not supported */
#define LUSTRE_EPFNOSUPPORT 96 /* Protocol family not supported */
-#define LUSTRE_EAFNOSUPPORT 97 /* Address family not supported by
- protocol */
+#define LUSTRE_EAFNOSUPPORT 97 /* Address family not supported */
#define LUSTRE_EADDRINUSE 98 /* Address already in use */
#define LUSTRE_EADDRNOTAVAIL 99 /* Cannot assign requested address */
#define LUSTRE_ENETDOWN 100 /* Network is down */
#define LUSTRE_ENETUNREACH 101 /* Network is unreachable */
-#define LUSTRE_ENETRESET 102 /* Network dropped connection because of
- reset */
+#define LUSTRE_ENETRESET 102 /* Network connection drop for reset */
#define LUSTRE_ECONNABORTED 103 /* Software caused connection abort */
#define LUSTRE_ECONNRESET 104 /* Connection reset by peer */
#define LUSTRE_ENOBUFS 105 /* No buffer space available */
-#define LUSTRE_EISCONN 106 /* Transport endpoint is already
- connected */
-#define LUSTRE_ENOTCONN 107 /* Transport endpoint is not
- connected */
-#define LUSTRE_ESHUTDOWN 108 /* Cannot send after transport endpoint
- shutdown */
+#define LUSTRE_EISCONN 106 /* Transport endpoint is connected */
+#define LUSTRE_ENOTCONN 107 /* Transport endpoint not connected */
+#define LUSTRE_ESHUTDOWN 108 /* Cannot send after shutdown */
#define LUSTRE_ETOOMANYREFS 109 /* Too many references: cannot splice */
#define LUSTRE_ETIMEDOUT 110 /* Connection timed out */
#define LUSTRE_ECONNREFUSED 111 /* Connection refused */
@@ -185,8 +171,7 @@
#define LUSTRE_ERESTARTNOINTR 513
#define LUSTRE_ERESTARTNOHAND 514 /* restart if no handler.. */
#define LUSTRE_ENOIOCTLCMD 515 /* No ioctl command */
-#define LUSTRE_ERESTART_RESTARTBLOCK 516 /* restart by calling
- sys_restart_syscall */
+#define LUSTRE_ERESTART_RESTARTBLOCK 516 /* restart via sys_restart_syscall */
#define LUSTRE_EBADHANDLE 521 /* Illegal NFS file handle */
#define LUSTRE_ENOTSYNC 522 /* Update synchronization mismatch */
#define LUSTRE_EBADCOOKIE 523 /* Cookie is stale */
@@ -194,10 +179,8 @@
#define LUSTRE_ETOOSMALL 525 /* Buffer or request is too small */
#define LUSTRE_ESERVERFAULT 526 /* An untranslatable error occurred */
#define LUSTRE_EBADTYPE 527 /* Type not supported by server */
-#define LUSTRE_EJUKEBOX 528 /* Request initiated, but will not
- complete before timeout */
-#define LUSTRE_EIOCBQUEUED 529 /* iocb queued, will get completion
- event */
+#define LUSTRE_EJUKEBOX 528 /* Request won't finish until timeout */
+#define LUSTRE_EIOCBQUEUED 529 /* iocb queued await completion event */
#define LUSTRE_EIOCBRETRY 530 /* iocb queued, will trigger a retry */
/*
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 3631a69a5c6f..66ac9dc7302a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index e0f2b8295775..d19c7a27ee48 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 6125eb0d3395..4055bbd24c55 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index dec1e99d594d..cbd68985ada9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
index d49932628f32..c48c97362cf6 100644
--- a/drivers/staging/lustre/lustre/include/lustre_handles.h
+++ b/drivers/staging/lustre/lustre/include/lustre_handles.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index d71d0473a4eb..ea158e0630e2 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_intent.h b/drivers/staging/lustre/lustre/include/lustre_intent.h
index ed2b6c674109..519e94fc089d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_intent.h
+++ b/drivers/staging/lustre/lustre/include/lustre_intent.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
index f1899a3d7a40..2b3fa8430185 100644
--- a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 81b9cbffc050..ca1dce15337e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_linkea.h b/drivers/staging/lustre/lustre/include/lustre_linkea.h
index 3ff008fee13d..03db1511bfd3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_linkea.h
+++ b/drivers/staging/lustre/lustre/include/lustre_linkea.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h
index 98a82be2037f..f4298e5f7543 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lmv.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lmv.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 24a7777424f6..07f4e600386b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index c0c44974cb1c..007e1ec3f0f4 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_mds.h b/drivers/staging/lustre/lustre/include/lustre_mds.h
index c424e1239fd5..6937546f1d46 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mds.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mds.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index c6d1646f102a..3ff5de4770e8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs.h b/drivers/staging/lustre/lustre/include/lustre_nrs.h
index 51f45f7776df..ffa7317da35b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_nrs.h
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
index 3b5418eac6c4..b70d97d4acbb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_obdo.h b/drivers/staging/lustre/lustre/include/lustre_obdo.h
index 53379f861161..d67dcbb84f18 100644
--- a/drivers/staging/lustre/lustre/include/lustre_obdo.h
+++ b/drivers/staging/lustre/lustre/include/lustre_obdo.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
index 5842cb18b49e..ce28ed5c1ef8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/lustre_patchless_compat.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index cd62ccd53e2c..213d0a01adcf 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 03a970bcac55..a40f706a53a1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/lustre_swab.h b/drivers/staging/lustre/lustre/include/lustre_swab.h
index 765e923c2fc9..9d786bbe7f3f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_swab.h
+++ b/drivers/staging/lustre/lustre/include/lustre_swab.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index a986737ec010..4368f4e9f208 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index cda3d2808d2f..e5f7bb20415d 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 976005a1e0b2..67c535c5aa98 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index aea193a882a2..3f4fe290f6ea 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/include/seq_range.h b/drivers/staging/lustre/lustre/include/seq_range.h
index d7175485944d..9450da728160 100644
--- a/drivers/staging/lustre/lustre/include/seq_range.h
+++ b/drivers/staging/lustre/lustre/include/seq_range.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index 19e285dd2ee1..8df7a4463c21 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index 57fd84effdfa..0662cec14b81 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index 2cc6dc2b281f..fac9d19d50b6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -207,7 +208,8 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
continue;
if (ldlm_extent_overlap(&lck->l_req_extent,
&lock->l_req_extent)) {
- CDEBUG(D_ERROR, "granting conflicting lock %p %p\n",
+ CDEBUG(D_ERROR,
+ "granting conflicting lock %p %p\n",
lck, lock);
ldlm_resource_dump(D_ERROR, res);
LBUG();
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index cb826e9e840e..657ab95091a0 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -59,17 +60,6 @@
#include <linux/list.h>
#include "ldlm_internal.h"
-/**
- * list_for_remaining_safe - iterate over the remaining entries in a list
- * and safeguard against removal of a list entry.
- * \param pos the &struct list_head to use as a loop counter. pos MUST
- * have been initialized prior to using it in this macro.
- * \param n another &struct list_head to use as temporary storage
- * \param head the head for your list.
- */
-#define list_for_remaining_safe(pos, n, head) \
- for (n = pos->next; pos != (head); pos = n, n = pos->next)
-
static inline int
ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
{
@@ -88,24 +78,23 @@ ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
}
static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode)
{
- LDLM_DEBUG(lock, "%s(mode: %d, flags: 0x%llx)",
- __func__, mode, flags);
+ LDLM_DEBUG(lock, "%s(mode: %d)",
+ __func__, mode);
/* Safe to not lock here, since it should be empty anyway */
LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
list_del_init(&lock->l_res_link);
- if (flags == LDLM_FL_WAIT_NOREPROC) {
- /* client side - set a flag to prevent sending a CANCEL */
- lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
- /* when reaching here, it is under lock_res_and_lock(). Thus,
- * need call the nolock version of ldlm_lock_decref_internal
- */
- ldlm_lock_decref_internal_nolock(lock, mode);
- }
+ /* client side - set a flag to prevent sending a CANCEL */
+ lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
+
+ /* when reaching here, it is under lock_res_and_lock(). Thus,
+ * need call the nolock version of ldlm_lock_decref_internal
+ */
+ ldlm_lock_decref_internal_nolock(lock, mode);
ldlm_lock_destroy_nolock(lock);
}
@@ -121,129 +110,45 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
* It is also responsible for splitting a lock if a portion of the lock
* is released.
*
- * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
- * - blocking ASTs have already been sent
- *
- * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
- * - blocking ASTs have not been sent yet, so list of conflicting locks
- * would be collected and ASTs sent.
*/
-static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
- int first_enq, enum ldlm_error *err,
- struct list_head *work_list)
+static int ldlm_process_flock_lock(struct ldlm_lock *req)
{
struct ldlm_resource *res = req->l_resource;
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- struct list_head *tmp;
- struct list_head *ownlocks = NULL;
- struct ldlm_lock *lock = NULL;
+ struct ldlm_lock *tmp;
+ struct ldlm_lock *lock;
struct ldlm_lock *new = req;
struct ldlm_lock *new2 = NULL;
enum ldlm_mode mode = req->l_req_mode;
int added = (mode == LCK_NL);
- int overlaps = 0;
int splitted = 0;
const struct ldlm_callback_suite null_cbs = { };
CDEBUG(D_DLMTRACE,
- "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
- *flags, new->l_policy_data.l_flock.owner,
+ "owner %llu pid %u mode %u start %llu end %llu\n",
+ new->l_policy_data.l_flock.owner,
new->l_policy_data.l_flock.pid, mode,
req->l_policy_data.l_flock.start,
req->l_policy_data.l_flock.end);
- *err = ELDLM_OK;
-
/* No blocking ASTs are sent to the clients for
* Posix file & record locks
*/
req->l_blocking_ast = NULL;
reprocess:
- if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
- /* This loop determines where this processes locks start
- * in the resource lr_granted list.
- */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
- if (ldlm_same_flock_owner(lock, req)) {
- ownlocks = tmp;
- break;
- }
- }
- } else {
- int reprocess_failed = 0;
-
- lockmode_verify(mode);
-
- /* This loop determines if there are existing locks
- * that conflict with the new lock request.
- */
- list_for_each(tmp, &res->lr_granted) {
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
-
- if (ldlm_same_flock_owner(lock, req)) {
- if (!ownlocks)
- ownlocks = tmp;
- continue;
- }
-
- /* locks are compatible, overlap doesn't matter */
- if (lockmode_compat(lock->l_granted_mode, mode))
- continue;
-
- if (!ldlm_flocks_overlap(lock, req))
- continue;
-
- if (!first_enq) {
- reprocess_failed = 1;
- continue;
- }
-
- if (*flags & LDLM_FL_BLOCK_NOWAIT) {
- ldlm_flock_destroy(req, mode, *flags);
- *err = -EAGAIN;
- return LDLM_ITER_STOP;
- }
-
- if (*flags & LDLM_FL_TEST_LOCK) {
- ldlm_flock_destroy(req, mode, *flags);
- req->l_req_mode = lock->l_granted_mode;
- req->l_policy_data.l_flock.pid =
- lock->l_policy_data.l_flock.pid;
- req->l_policy_data.l_flock.start =
- lock->l_policy_data.l_flock.start;
- req->l_policy_data.l_flock.end =
- lock->l_policy_data.l_flock.end;
- *flags |= LDLM_FL_LOCK_CHANGED;
- return LDLM_ITER_STOP;
- }
-
- ldlm_resource_add_lock(res, &res->lr_waiting, req);
- *flags |= LDLM_FL_BLOCK_GRANTED;
- return LDLM_ITER_STOP;
- }
- if (reprocess_failed)
- return LDLM_ITER_CONTINUE;
- }
-
- if (*flags & LDLM_FL_TEST_LOCK) {
- ldlm_flock_destroy(req, mode, *flags);
- req->l_req_mode = LCK_NL;
- *flags |= LDLM_FL_LOCK_CHANGED;
- return LDLM_ITER_STOP;
- }
+ /* This loop determines where this processes locks start
+ * in the resource lr_granted list.
+ */
+ list_for_each_entry(lock, &res->lr_granted, l_res_link)
+ if (ldlm_same_flock_owner(lock, req))
+ break;
- /* Scan the locks owned by this process that overlap this request.
+ /* Scan the locks owned by this process to find the insertion point
+ * (as locks are ordered), and to handle overlaps.
* We may have to merge or split existing locks.
*/
- if (!ownlocks)
- ownlocks = &res->lr_granted;
-
- list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
- lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
+ list_for_each_entry_safe_from(lock, tmp, &res->lr_granted, l_res_link) {
if (!ldlm_same_flock_owner(lock, new))
break;
@@ -283,7 +188,7 @@ reprocess:
}
if (added) {
- ldlm_flock_destroy(lock, mode, *flags);
+ ldlm_flock_destroy(lock, mode);
} else {
new = lock;
added = 1;
@@ -299,8 +204,6 @@ reprocess:
lock->l_policy_data.l_flock.start)
break;
- ++overlaps;
-
if (new->l_policy_data.l_flock.start <=
lock->l_policy_data.l_flock.start) {
if (new->l_policy_data.l_flock.end <
@@ -309,7 +212,7 @@ reprocess:
new->l_policy_data.l_flock.end + 1;
break;
}
- ldlm_flock_destroy(lock, lock->l_req_mode, *flags);
+ ldlm_flock_destroy(lock, lock->l_req_mode);
continue;
}
if (new->l_policy_data.l_flock.end >=
@@ -340,9 +243,7 @@ reprocess:
NULL, 0, LVB_T_NONE);
lock_res_and_lock(req);
if (IS_ERR(new2)) {
- ldlm_flock_destroy(req, lock->l_granted_mode,
- *flags);
- *err = PTR_ERR(new2);
+ ldlm_flock_destroy(req, lock->l_granted_mode);
return LDLM_ITER_STOP;
}
goto reprocess;
@@ -371,12 +272,11 @@ reprocess:
&new2->l_remote_handle,
&new2->l_exp_hash);
}
- if (*flags == LDLM_FL_WAIT_NOREPROC)
- ldlm_lock_addref_internal_nolock(new2,
- lock->l_granted_mode);
+ ldlm_lock_addref_internal_nolock(new2,
+ lock->l_granted_mode);
/* insert new2 at lock */
- ldlm_resource_add_lock(res, ownlocks, new2);
+ ldlm_resource_add_lock(res, &lock->l_res_link, new2);
LDLM_LOCK_RELEASE(new2);
break;
}
@@ -390,17 +290,12 @@ reprocess:
if (!added) {
list_del_init(&req->l_res_link);
- /* insert new lock before ownlocks in list. */
- ldlm_resource_add_lock(res, ownlocks, req);
- }
-
- if (*flags != LDLM_FL_WAIT_NOREPROC) {
- /* The only one possible case for client-side calls flock
- * policy function is ldlm_flock_completion_ast inside which
- * carries LDLM_FL_WAIT_NOREPROC flag.
+ /* insert new lock before "lock", which might be the
+ * next lock for this owner, or might be the first
+ * lock for the next owner, or might not be a lock at
+ * all, but instead points at the head of the list
*/
- CERROR("Illegal parameter for client-side-only module.\n");
- LBUG();
+ ldlm_resource_add_lock(res, &lock->l_res_link, req);
}
/* In case we're reprocessing the requested lock we can't destroy
@@ -409,7 +304,7 @@ reprocess:
* could be freed before the completion AST can be sent.
*/
if (added)
- ldlm_flock_destroy(req, mode, *flags);
+ ldlm_flock_destroy(req, mode);
ldlm_resource_dump(D_INFO, res);
return LDLM_ITER_CONTINUE;
@@ -417,7 +312,6 @@ reprocess:
struct ldlm_flock_wait_data {
struct ldlm_lock *fwd_lock;
- int fwd_generation;
};
static void
@@ -448,12 +342,9 @@ int
ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
{
struct file_lock *getlk = lock->l_ast_data;
- struct obd_device *obd;
- struct obd_import *imp = NULL;
- struct ldlm_flock_wait_data fwd;
- struct l_wait_info lwi;
- enum ldlm_error err;
- int rc = 0;
+ struct ldlm_flock_wait_data fwd;
+ struct l_wait_info lwi;
+ int rc = 0;
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
@@ -479,20 +370,9 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
+ LDLM_DEBUG(lock,
+ "client-side enqueue returned a blocked lock, sleeping");
fwd.fwd_lock = lock;
- obd = class_exp2obd(lock->l_conn_export);
-
- /* if this is a local lock, there is no import */
- if (obd)
- imp = obd->u.cli.cl_import;
-
- if (imp) {
- spin_lock(&imp->imp_lock);
- fwd.fwd_generation = imp->imp_generation;
- spin_unlock(&imp->imp_lock);
- }
-
lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
/* Go to sleep until the lock is granted. */
@@ -561,10 +441,11 @@ granted:
mode = lock->l_granted_mode;
if (ldlm_is_flock_deadlock(lock)) {
- LDLM_DEBUG(lock, "client-side enqueue deadlock received");
+ LDLM_DEBUG(lock,
+ "client-side enqueue deadlock received");
rc = -EDEADLK;
}
- ldlm_flock_destroy(lock, mode, LDLM_FL_WAIT_NOREPROC);
+ ldlm_flock_destroy(lock, mode);
unlock_res_and_lock(lock);
/* Need to wake up the waiter if we were evicted */
@@ -585,7 +466,7 @@ granted:
* in the lock changes we can decref the appropriate refcount.
*/
LASSERT(ldlm_is_test_lock(lock));
- ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
+ ldlm_flock_destroy(lock, getlk->fl_type);
switch (lock->l_granted_mode) {
case LCK_PR:
getlk->fl_type = F_RDLCK;
@@ -600,12 +481,10 @@ granted:
getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
} else {
- __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
-
/* We need to reprocess the lock to do merges or splits
* with existing locks owned by this process.
*/
- ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
+ ldlm_process_flock_lock(lock);
}
unlock_res_and_lock(lock);
return rc;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
index fcb6e44bd319..2926208cdfa1 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 36808dbe8790..bc33ca100620 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -88,7 +89,7 @@ struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client);
/* ldlm_request.c */
/* Cancel lru flag, it indicates we cancel aged locks. */
enum {
- LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel aged locks (non lru resize). */
+ LDLM_LRU_FLAG_AGED = BIT(0), /* Cancel old non-LRU resize locks */
LDLM_LRU_FLAG_PASSED = BIT(1), /* Cancel passed number of locks. */
LDLM_LRU_FLAG_SHRINK = BIT(2), /* Cancel locks from shrinker. */
LDLM_LRU_FLAG_LRUR = BIT(3), /* Cancel locks from lru resize. */
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 22600c2a73ea..9efd26ec59dd 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index b5d84f3f6071..7cb61e2e7d3b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1035,7 +1036,8 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
ldlm_extent_add_lock(res, lock);
} else if (res->lr_type == LDLM_FLOCK) {
/*
- * We should not add locks to granted list in the following cases:
+ * We should not add locks to granted list in
+ * the following cases:
* - this is an UNLOCK but not a real lock;
* - this is a TEST lock;
* - this is a F_CANCELLK lock (async flock has req_mode == 0)
@@ -2051,13 +2053,16 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
libcfs_debug_vmsg2(msgdata, fmt, args,
" ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
va_end(args);
return;
}
@@ -2067,7 +2072,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
libcfs_debug_vmsg2(msgdata, fmt, args,
" ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s [%llu->%llu] (req %llu->%llu) flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
@@ -2076,8 +2082,10 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
ldlm_typename[resource->lr_type],
lock->l_policy_data.l_extent.start,
lock->l_policy_data.l_extent.end,
- lock->l_req_extent.start, lock->l_req_extent.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_req_extent.start,
+ lock->l_req_extent.end,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
@@ -2087,7 +2095,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
libcfs_debug_vmsg2(msgdata, fmt, args,
" ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " rrc: %d type: %s pid: %d [%llu->%llu] flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu\n",
ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
ldlm_lockname[lock->l_req_mode],
@@ -2097,7 +2106,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
lock->l_policy_data.l_flock.pid,
lock->l_policy_data.l_flock.start,
lock->l_policy_data.l_flock.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout);
break;
@@ -2115,7 +2125,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
lock->l_policy_data.l_inodebits.bits,
atomic_read(&resource->lr_refcount),
ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
@@ -2133,7 +2144,8 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
PLDLMRES(resource),
atomic_read(&resource->lr_refcount),
ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
+ lock->l_flags, nid,
+ lock->l_remote_handle.cookie,
exp ? atomic_read(&exp->exp_refcount) : -99,
lock->l_pid, lock->l_callback_timeout,
lock->l_lvb_type);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index e2707336586c..2d5a2c932ddc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -184,7 +185,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
LASSERT(lock->l_lvb_data);
if (unlikely(lock->l_lvb_len < lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
+ LDLM_ERROR(lock,
+ "Replied LVB is larger than expectation, expected = %d, replied = %d",
lock->l_lvb_len, lvb_len);
rc = -EINVAL;
goto out;
@@ -598,7 +600,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
if (!lock) {
- CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock disappeared\n",
+ CDEBUG(D_DLMTRACE,
+ "callback on lock %#llx - lock disappeared\n",
dlm_req->lock_handle[0].cookie);
rc = ldlm_callback_reply(req, -EINVAL);
ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
index 1ca605fe25ff..33b5a3f96fcb 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index d77bf0baa84f..da65d00a7811 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -386,7 +387,8 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
pl->pl_recalc_period;
if (recalc_interval_sec <= 0) {
/* DEBUG: should be re-removed after LU-4536 is fixed */
- CDEBUG(D_DLMTRACE, "%s: Negative interval(%ld), too short period(%ld)\n",
+ CDEBUG(D_DLMTRACE,
+ "%s: Negative interval(%ld), too short period(%ld)\n",
pl->pl_name, (long)recalc_interval_sec,
(long)pl->pl_recalc_period);
@@ -415,7 +417,8 @@ static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
lprocfs_counter_add(pl->pl_stats,
LDLM_POOL_SHRINK_FREED_STAT,
cancel);
- CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n",
+ CDEBUG(D_DLMTRACE,
+ "%s: request to shrink %d locks, shrunk %d\n",
pl->pl_name, nr, cancel);
}
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index f3bf238d0748..02ea14c9b089 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -121,7 +122,8 @@ static int ldlm_expired_completion_wait(void *data)
if (!lock->l_conn_export) {
static unsigned long next_dump, last_dump;
- LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
+ LDLM_ERROR(lock,
+ "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
(s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() -
lock->l_last_activity));
@@ -139,7 +141,8 @@ static int ldlm_expired_completion_wait(void *data)
obd = lock->l_conn_export->exp_obd;
imp = obd->u.cli.cl_import;
ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
- LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
+ LDLM_ERROR(lock,
+ "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
(s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() - lock->l_last_activity),
obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
@@ -218,7 +221,8 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
return ldlm_completion_tail(lock, data);
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
+ LDLM_DEBUG(lock,
+ "client-side enqueue returned a blocked lock, going forward");
return 0;
}
EXPORT_SYMBOL(ldlm_completion_ast_async);
@@ -264,7 +268,8 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
+ LDLM_DEBUG(lock,
+ "client-side enqueue returned a blocked lock, sleeping");
noreproc:
@@ -414,7 +419,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = size;
goto cleanup;
} else if (unlikely(size > lvb_len)) {
- LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
+ LDLM_ERROR(lock,
+ "Replied LVB is larger than expectation, expected = %d, replied = %d",
lvb_len, size);
rc = -EINVAL;
goto cleanup;
@@ -473,8 +479,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
&lock->l_resource->lr_name)) {
- CDEBUG(D_INFO, "remote intent success, locking " DLDLMRES
- " instead of " DLDLMRES "\n",
+ CDEBUG(D_INFO,
+ "remote intent success, locking " DLDLMRES " instead of " DLDLMRES "\n",
PLDLMRES(&reply->lock_desc.l_resource),
PLDLMRES(lock->l_resource));
@@ -850,7 +856,8 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
unlock_res_and_lock(lock);
if (local_only) {
- CDEBUG(D_DLMTRACE, "not sending request (at caller's instruction)\n");
+ CDEBUG(D_DLMTRACE,
+ "not sending request (at caller's instruction)\n");
rc = LDLM_FL_LOCAL_ONLY;
}
ldlm_lock_cancel(lock);
@@ -963,7 +970,8 @@ static int ldlm_cli_cancel_req(struct obd_export *exp,
rc = ptlrpc_queue_wait(req);
if (rc == LUSTRE_ESTALE) {
- CDEBUG(D_DLMTRACE, "client/server (nid %s) out of sync -- not fatal\n",
+ CDEBUG(D_DLMTRACE,
+ "client/server (nid %s) out of sync -- not fatal\n",
libcfs_nid2str(req->rq_import->
imp_connection->c_peer.nid));
rc = 0;
@@ -1175,6 +1183,7 @@ ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
case LDLM_IBITS:
if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
break;
+ /* fall through */
default:
result = LDLM_POLICY_SKIP_LOCK;
lock_res_and_lock(lock);
@@ -1363,13 +1372,14 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
* flags & LDLM_LRU_FLAG_LRUR - use LRU resize policy (SLV from server) to
* cancel not more than \a count locks;
*
- * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located at
- * the beginning of LRU list);
+ * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located
+ * at the beginning of LRU list);
*
- * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according to
- * memory pressure policy function;
+ * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according
+ * to memory pressure policy function;
*
- * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to "aged policy".
+ * flags & LDLM_LRU_FLAG_AGED - cancel \a count locks according to
+ * "aged policy".
*
* flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
* (typically before replaying locks) w/o
@@ -1383,7 +1393,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
- int no_wait = flags & (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
+ int no_wait = flags &
+ (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
@@ -2034,7 +2045,8 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
int canceled;
LIST_HEAD(cancels);
- CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
+ CDEBUG(D_DLMTRACE,
+ "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
ldlm_ns_name(ns), ns->ns_nr_unused);
/* We don't need to care whether or not LRU resize is enabled
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index c2ddf7312571..2689ffdf10e3 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1358,7 +1359,8 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
LDLM_DEBUG_LIMIT(level, lock, "###");
if (!(level & D_CANTMASK) &&
++granted > ldlm_dump_granted_max) {
- CDEBUG(level, "only dump %d granted locks to avoid DDOS.\n",
+ CDEBUG(level,
+ "only dump %d granted locks to avoid DDOS.\n",
granted);
break;
}
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index ef7adef4ccc5..519fd747e3ad 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 3670fcaf373f..549369739d80 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -141,7 +142,8 @@ void ll_intent_drop_lock(struct lookup_intent *it)
handle.cookie = it->it_lock_handle;
- CDEBUG(D_DLMTRACE, "releasing lock with cookie %#llx from it %p\n",
+ CDEBUG(D_DLMTRACE,
+ "releasing lock with cookie %#llx from it %p\n",
handle.cookie, it);
ldlm_lock_decref(&handle, it->it_lock_mode);
@@ -152,7 +154,8 @@ void ll_intent_drop_lock(struct lookup_intent *it)
if (it->it_remote_lock_mode != 0) {
handle.cookie = it->it_remote_lock_handle;
- CDEBUG(D_DLMTRACE, "releasing remote lock with cookie%#llx from it %p\n",
+ CDEBUG(D_DLMTRACE,
+ "releasing remote lock with cookie%#llx from it %p\n",
handle.cookie, it);
ldlm_lock_decref(&handle,
it->it_remote_lock_mode);
@@ -185,7 +188,8 @@ void ll_invalidate_aliases(struct inode *inode)
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
- CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
+ CDEBUG(D_DENTRY,
+ "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
dentry, dentry, dentry->d_parent,
d_inode(dentry), dentry->d_flags);
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 1db3e7f345c5..5b2e47c246f3 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -303,7 +304,8 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
struct md_op_data *op_data;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p) pos/size %lu/%llu 32bit_api %d\n",
+ CDEBUG(D_VFSTRACE,
+ "VFS Op:inode=" DFID "(%p) pos/size %lu/%llu 32bit_api %d\n",
PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
i_size_read(inode), api32);
@@ -502,7 +504,8 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
break;
}
default: {
- CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
+ CDEBUG(D_IOCTL,
+ "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
lump->lmm_magic, LOV_USER_MAGIC_V1,
LOV_USER_MAGIC_V3);
return -EINVAL;
@@ -732,10 +735,10 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc != 0) {
- CDEBUG(D_HSM, "Could not read file data version of "
- DFID " (rc = %d). Archive request (%#llx) could not be done.\n",
- PFID(&copy->hc_hai.hai_fid), rc,
- copy->hc_hai.hai_cookie);
+ CDEBUG(D_HSM,
+ "Could not read file data version of " DFID " (rc = %d). Archive request (%#llx) could not be done.\n",
+ PFID(&copy->hc_hai.hai_fid), rc,
+ copy->hc_hai.hai_cookie);
hpk.hpk_flags |= HP_FLAG_RETRY;
/* hpk_errval must be >= 0 */
hpk.hpk_errval = -rc;
@@ -816,7 +819,8 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc) {
- CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
+ CDEBUG(D_HSM,
+ "Could not read file data version. Request could not be confirmed.\n");
if (hpk.hpk_errval == 0)
hpk.hpk_errval = -rc;
goto progress;
@@ -832,8 +836,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
*/
if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
(copy->hc_data_version != data_version)) {
- CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
- DFID ", start:%#llx current:%#llx\n",
+ CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. " DFID ", start:%#llx current:%#llx\n",
PFID(&copy->hc_hai.hai_fid),
copy->hc_data_version, data_version);
/* File was changed, send error to cdt. Do not ask for
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index be665454f407..938b859b6650 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -386,8 +387,8 @@ static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
ll_finish_md_op_data(op_data);
if (rc == -ESTALE) {
/* reason for keep own exit path - don`t flood log
- * with messages with -ESTALE errors.
- */
+ * with messages with -ESTALE errors.
+ */
if (!it_disposition(itp, DISP_OPEN_OPEN) ||
it_open_error(DISP_OPEN_OPEN, itp))
goto out;
@@ -605,7 +606,8 @@ restart:
* to get file with different fid.
*/
it->it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID;
- rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it);
+ rc = ll_intent_file_open(file->f_path.dentry,
+ NULL, 0, it);
if (rc)
goto out_openerr;
@@ -1014,7 +1016,7 @@ static bool file_is_noatime(const struct file *file)
if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
- if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
return false;
@@ -1119,7 +1121,8 @@ out:
cl_io_fini(env, io);
if ((!rc || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
- CDEBUG(D_VFSTRACE, "%s: restart %s from %lld, count:%zu, result: %zd\n",
+ CDEBUG(D_VFSTRACE,
+ "%s: restart %s from %lld, count:%zu, result: %zd\n",
file_dentry(file)->d_name.name,
iot == CIT_READ ? "read" : "write",
*ppos, count, result);
@@ -3455,7 +3458,8 @@ out:
if (rc == 0)
rc = -EAGAIN;
- CDEBUG(D_INODE, "%s: file=" DFID " waiting layout return: %d.\n",
+ CDEBUG(D_INODE,
+ "%s: file=" DFID " waiting layout return: %d.\n",
ll_get_fsname(inode->i_sb, NULL, 0),
PFID(&lli->lli_fid), rc);
}
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index 34c2cfecf4b8..c43ac574274c 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index d2392e4c6872..df5c0c0ae703 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index 422f410d95c1..a246b955306e 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 0287c751e1cd..b133fd00c08c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -84,7 +85,7 @@ struct ll_dentry_data {
struct ll_getname_data {
struct dir_context ctx;
- char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
+ char *lgd_name; /* points to buffer with NAME_MAX+1 size */
struct lu_fid lgd_fid; /* target fid we are looking for */
int lgd_found; /* inode matched? */
};
@@ -392,7 +393,8 @@ enum stats_track_type {
#define LL_SBI_XATTR_CACHE 0x80000 /* support for xattr cache */
#define LL_SBI_NOROOTSQUASH 0x100000 /* do not apply root squash */
#define LL_SBI_ALWAYS_PING 0x200000 /* always ping even if server
- * suppress_pings */
+ * suppress_pings
+ */
#define LL_SBI_FLAGS { \
"nolck", \
@@ -637,7 +639,8 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
#if BITS_PER_LONG == 32
return 1;
#elif defined(CONFIG_COMPAT)
- return unlikely(in_compat_syscall() || (sbi->ll_flags & LL_SBI_32BIT_API));
+ return unlikely(in_compat_syscall() ||
+ (sbi->ll_flags & LL_SBI_32BIT_API));
#else
return unlikely(sbi->ll_flags & LL_SBI_32BIT_API);
#endif
@@ -1065,7 +1068,7 @@ struct ll_statahead_info {
* hidden entries
*/
sai_agl_valid:1,/* AGL is valid for the dir */
- sai_in_readpage:1;/* statahead is in readdir() */
+ sai_in_readpage:1;/* statahead in readdir() */
wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
struct ptlrpc_thread sai_thread; /* stat-ahead thread */
struct ptlrpc_thread sai_agl_thread; /* AGL thread */
@@ -1198,7 +1201,7 @@ typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
* Return value:
* A magic pointer will be returned if success;
* otherwise, NULL will be returned.
- * */
+ */
void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
void ll_iocontrol_unregister(void *magic);
@@ -1261,7 +1264,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
handle.cookie = it->it_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode " DFID "%p for lock %#llx\n",
+ CDEBUG(D_DLMTRACE,
+ "setting l_data to inode " DFID "%p for lock %#llx\n",
PFID(ll_inode2fid(inode)), inode, handle.cookie);
md_set_lock_data(exp, &handle, inode, &it->it_lock_bits);
@@ -1284,7 +1288,8 @@ static inline int d_lustre_invalid(const struct dentry *dentry)
*/
static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
{
- CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
+ CDEBUG(D_DENTRY,
+ "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
dentry, dentry,
dentry->d_parent, d_inode(dentry), d_count(dentry));
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 25393e3a0fe8..8666f1e81ade 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -231,7 +232,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
data, NULL);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x14f, "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ LCONSOLE_ERROR_MSG(0x14f,
+ "An MDT (md %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
md);
goto out;
} else if (err) {
@@ -279,7 +281,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
}
obd_connect_flags2str(buf, PAGE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
- LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
+ LCONSOLE_ERROR_MSG(0x170,
+ "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
kfree(buf);
err = -EPROTO;
@@ -310,11 +313,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
}
if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
sbi->ll_flags |= LL_SBI_ACL;
} else {
LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
- sb->s_flags &= ~MS_POSIXACL;
+ sb->s_flags &= ~SB_POSIXACL;
sbi->ll_flags &= ~LL_SBI_ACL;
}
@@ -380,7 +383,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
- CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
+ CDEBUG(D_RPCTRACE,
+ "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
data->ocd_connect_flags,
data->ocd_version, data->ocd_grant);
@@ -392,7 +396,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
err = obd_connect(NULL, &sbi->ll_dt_exp, obd, &sbi->ll_sb_uuid, data,
NULL);
if (err == -EBUSY) {
- LCONSOLE_ERROR_MSG(0x150, "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
+ LCONSOLE_ERROR_MSG(0x150,
+ "An OST (dt %s) is performing recovery, of which this client is not a part. Please wait for recovery to complete, abort, or time out.\n",
dt);
goto out_md;
} else if (err) {
@@ -655,7 +660,7 @@ void ll_kill_super(struct super_block *sb)
struct ll_sb_info *sbi;
/* not init sb ?*/
- if (!(sb->s_flags & MS_ACTIVE))
+ if (!(sb->s_flags & SB_ACTIVE))
return;
sbi = ll_s2sbi(sb);
@@ -915,7 +920,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
lprof = class_get_profile(profilenm);
if (!lprof) {
- LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
+ LCONSOLE_ERROR_MSG(0x156,
+ "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
profilenm);
err = -EINVAL;
goto out_free;
@@ -1042,7 +1048,8 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
} else {
inode = lock->l_resource->lr_lvb_inode;
LDLM_DEBUG_LIMIT(inode->i_state & I_FREEING ? D_INFO :
- D_WARNING, lock, "lr_lvb_inode %p is bogus: magic %08x",
+ D_WARNING, lock,
+ "lr_lvb_inode %p is bogus: magic %08x",
lock->l_resource->lr_lvb_inode,
lli->lli_inode_magic);
inode = NULL;
@@ -1744,7 +1751,8 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
}
if (body->mbo_valid & OBD_MD_FLMTIME) {
if (body->mbo_mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
+ CDEBUG(D_INODE,
+ "setting ino %lu mtime from %lu to %llu\n",
inode->i_ino, LTIME_S(inode->i_mtime),
body->mbo_mtime);
LTIME_S(inode->i_mtime) = body->mbo_mtime;
@@ -2031,8 +2039,8 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
int err;
__u32 read_only;
- if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
- read_only = *flags & MS_RDONLY;
+ if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
+ read_only = *flags & SB_RDONLY;
err = obd_set_info_async(NULL, sbi->ll_md_exp,
sizeof(KEY_READ_ONLY),
KEY_READ_ONLY, sizeof(read_only),
@@ -2045,9 +2053,9 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
}
if (read_only)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
else
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
if (sbi->ll_flags & LL_SBI_VERBOSE)
LCONSOLE_WARN("Remounted %s %s\n", profilenm,
@@ -2254,7 +2262,8 @@ int ll_process_config(struct lustre_cfg *lcfg)
return -EINVAL;
sb = (void *)x;
/* This better be a real Lustre superblock! */
- LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
+ LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic ==
+ LMD_MAGIC);
/* Note we have not called client_common_fill_super yet, so
* proc fns must be able to handle that!
@@ -2571,8 +2580,9 @@ static int ll_linkea_decode(struct linkea_data *ldata, unsigned int linkno,
*
* \param[in] file - File descriptor against which to perform the operation
* \param[in,out] arg - User-filled structure containing the linkno to operate
- * on and the available size. It is eventually filled with
- * the requested information or left untouched on error
+ * on and the available size. It is eventually filled
+ * with the requested information or left untouched on
+ * error
*
* \retval - 0 on success
* \retval - Appropriate negative error code on failure
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index ccc7ae15a943..c0533bd6f352 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -208,7 +209,8 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
*/
unlock_page(vmpage);
- CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
+ CDEBUG(D_MMAP,
+ "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
vmpage, vmpage->index);
*retry = true;
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index e50c637fab54..a6a1d80c711a 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -127,7 +128,8 @@ struct lustre_nfs_fid {
};
static struct dentry *
-ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *parent)
+ll_iget_for_nfs(struct super_block *sb,
+ struct lu_fid *fid, struct lu_fid *parent)
{
struct inode *inode;
struct dentry *result;
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index e3bd2d18eac5..644bea2f9d37 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 5cc2b3255207..a2687f46a16d 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -204,7 +205,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
if (!fid_res_name_eq(ll_inode2fid(inode),
&lock->l_resource->lr_name)) {
- LDLM_ERROR(lock, "data mismatch with object " DFID "(%p)",
+ LDLM_ERROR(lock,
+ "data mismatch with object " DFID "(%p)",
PFID(ll_inode2fid(inode)), inode);
LBUG();
}
@@ -289,7 +291,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
* we have to invalidate the negative children
* on master inode
*/
- CDEBUG(D_INODE, "Invalidate s" DFID " m" DFID "\n",
+ CDEBUG(D_INODE,
+ "Invalidate s" DFID " m" DFID "\n",
PFID(ll_inode2fid(inode)),
PFID(&lli->lli_pfid));
@@ -736,7 +739,8 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
*opened |= FILE_CREATED;
}
- if (d_really_is_positive(dentry) && it_disposition(it, DISP_OPEN_OPEN)) {
+ if (d_really_is_positive(dentry) &&
+ it_disposition(it, DISP_OPEN_OPEN)) {
/* Open dentry. */
if (S_ISFIFO(d_inode(dentry)->i_mode)) {
/* We cannot call open here as it might
@@ -949,7 +953,9 @@ static int ll_mknod(struct inode *dir, struct dentry *dchild,
switch (mode & S_IFMT) {
case 0:
- mode |= S_IFREG; /* for mode = 0 case, fallthrough */
+ mode |= S_IFREG;
+ /* for mode = 0 case */
+ /* fall through */
case S_IFREG:
case S_IFCHR:
case S_IFBLK:
@@ -980,7 +986,8 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
{
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir=" DFID "(%p), flags=%u, excl=%d\n",
+ CDEBUG(D_VFSTRACE,
+ "VFS Op:name=%pd, dir=" DFID "(%p), flags=%u, excl=%d\n",
dentry, PFID(ll_inode2fid(dir)), dir, mode, want_excl);
rc = ll_mknod(dir, dentry, mode, 0);
@@ -1101,7 +1108,8 @@ static int ll_link(struct dentry *old_dentry, struct inode *dir,
struct md_op_data *op_data;
int err;
- CDEBUG(D_VFSTRACE, "VFS Op: inode=" DFID "(%p), dir=" DFID "(%p), target=%pd\n",
+ CDEBUG(D_VFSTRACE,
+ "VFS Op: inode=" DFID "(%p), dir=" DFID "(%p), target=%pd\n",
PFID(ll_inode2fid(src)), src, PFID(ll_inode2fid(dir)), dir,
new_dentry);
diff --git a/drivers/staging/lustre/lustre/llite/range_lock.c b/drivers/staging/lustre/lustre/llite/range_lock.c
index a32598bacdfb..cc9565f6bfe2 100644
--- a/drivers/staging/lustre/lustre/llite/range_lock.c
+++ b/drivers/staging/lustre/lustre/llite/range_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/range_lock.h b/drivers/staging/lustre/lustre/llite/range_lock.h
index 1e1519b1e006..38b2be4e378f 100644
--- a/drivers/staging/lustre/lustre/llite/range_lock.h
+++ b/drivers/staging/lustre/lustre/llite/range_lock.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index e72090572bcc..3e008ce7275d 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -297,7 +298,8 @@ stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
else
pg_count = start_left + st_pgs * (end - start - 1) + end_left;
- CDEBUG(D_READA, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
+ CDEBUG(D_READA,
+ "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
st_off, st_len, st_pgs, off, length, pg_count);
return pg_count;
@@ -404,7 +406,8 @@ ll_read_ahead_pages(const struct lu_env *env, struct cl_io *io,
* forward read-ahead, it will be fixed when backward
* read-ahead is implemented
*/
- LASSERTF(page_idx >= ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
+ LASSERTF(page_idx >= ria->ria_stoff,
+ "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
page_idx,
ria->ria_start, ria->ria_end, ria->ria_stoff,
ria->ria_length, ria->ria_pages);
@@ -669,8 +672,9 @@ static void ras_stride_increase_window(struct ll_readahead_state *ras,
unsigned long stride_len;
LASSERT(ras->ras_stride_length > 0);
- LASSERTF(ras->ras_window_start + ras->ras_window_len
- >= ras->ras_stride_offset, "window_start %lu, window_len %lu stride_offset %lu\n",
+ LASSERTF(ras->ras_window_start + ras->ras_window_len >=
+ ras->ras_stride_offset,
+ "window_start %lu, window_len %lu stride_offset %lu\n",
ras->ras_window_start,
ras->ras_window_len, ras->ras_stride_offset);
@@ -766,7 +770,8 @@ static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
PAGE_SHIFT;
CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
- ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
+ ra->ra_max_read_ahead_whole_pages,
+ ra->ra_max_pages_per_file);
if (kms_pages &&
kms_pages <= ra->ra_max_read_ahead_whole_pages) {
@@ -884,7 +889,8 @@ static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
/* The initial ras_window_len is set to the request size. To avoid
* uselessly reading and discarding pages for random IO the window is
- * only increased once per consecutive request received. */
+ * only increased once per consecutive request received.
+ */
if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
!ras->ras_request_index)
ras_increase_window(inode, ras, ra);
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 3619cd8bb5f3..722e5ea1af5f 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index ea9d59f07b78..90c7324575e4 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1082,7 +1083,8 @@ static int ll_statahead_thread(void *arg)
struct ll_inode_info *clli;
clli = list_entry(sai->sai_agls.next,
- struct ll_inode_info, lli_agl_list);
+ struct ll_inode_info,
+ lli_agl_list);
list_del_init(&clli->lli_agl_list);
spin_unlock(&lli->lli_agl_lock);
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 0da4af81b830..0bda111a096e 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -88,7 +89,8 @@ static int __init lustre_init(void)
struct timespec64 ts;
int i, rc, seed[2];
- BUILD_BUG_ON(sizeof(LUSTRE_VOLATILE_HDR) != LUSTRE_VOLATILE_HDR_LEN + 1);
+ BUILD_BUG_ON(sizeof(LUSTRE_VOLATILE_HDR) !=
+ LUSTRE_VOLATILE_HDR_LEN + 1);
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 3cd33483afaf..0690fdbf49f5 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index f9d9a161bd4e..8ccc8b799c02 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index adce0ff4ae44..02ea5161d635 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index c83853fa1bb4..bfae98e82d6f 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index e522f7c00617..4b6c7143bd2c 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 3953750b334e..05ad3b322a29 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 687c0c79d621..6eb0565ddc22 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 0be55623bac4..532384c91447 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 80ee3920481a..4dc799d60a9f 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2012 Xyratex Technology Limited
*
@@ -364,7 +365,8 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
}
if (oit->it_status < 0) {
- CDEBUG(D_CACHE, "getxattr intent returned %d for fid " DFID "\n",
+ CDEBUG(D_CACHE,
+ "getxattr intent returned %d for fid " DFID "\n",
oit->it_status, PFID(ll_inode2fid(inode)));
rc = oit->it_status;
/* xattr data is so large that we don't want to cache it */
diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c
index 391fb25ac31d..93ec07531ac7 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_security.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_security.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index 5937468080b8..00dc858c10c9 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 22c247a7d8ca..1793c9f79b24 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index a0475231dd90..c27c3c32188d 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 6e16c930a021..c2c57f65431e 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index f16cfa435f77..30727b7acccc 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/Makefile b/drivers/staging/lustre/lustre/lov/Makefile
index 3abfb4eab3d3..1ebf0193f61a 100644
--- a/drivers/staging/lustre/lustre/lov/Makefile
+++ b/drivers/staging/lustre/lustre/lov/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 89d92b05b48c..1185eceaf497 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index cea5f9dcd04e..c7db23472346 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 1124fd5ab32f..d563dd73343a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index a21f074008af..ae28ddf80d9b 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 9e3b150967b4..c5f5d1b106dc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index e12dc5afc14f..2fcdeb707ff9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 916336115989..3796bbb25305 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index fefd3c588681..7ce01026a409 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 334ecb1bc049..105b707eed14 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index 899d12c41aab..3e16e647b334 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 24fb2a97532b..e5b11c4085a9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index de43c609cf3d..cfae1294d77a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index d774ee2a3675..ecd9329cd073 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 9d3b3f3e9f10..3bdf48e4edb4 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index d4646a0949d2..7e89a2e485fc 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index d29f0bb33980..ea492be2eef3 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 011296ee16e6..13d452086b61 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index b2e68c3e820d..915520bcdd60 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index 9bb7e9ea0a6a..721440feef72 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index f68513771527..6cce32491eb5 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index cbf011501005..e0300c34ca3a 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index ba13f0894e0d..46eefdc09e3a 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index cbfea3dd0319..3114907ac5ff 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index f45c91d1b4ae..488b98007558 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 6ef8ddec4ab6..03e55bca4ada 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1151,7 +1152,7 @@ static int mdc_read_page_remote(void *data, struct page *page0)
}
for (npages = 1; npages < max_pages; npages++) {
- page = page_cache_alloc_cold(inode->i_mapping);
+ page = page_cache_alloc(inode->i_mapping);
if (!page)
break;
page_pool[npages] = page;
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
index 2ec2d7f731d3..636770624e8f 100644
--- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
+++ b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_internal.h b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
index 7a2f2b7bc6b1..2c571c180578 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_internal.h
+++ b/drivers/staging/lustre/lustre/mgc/mgc_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 3d2b969c90a7..77fa8fea0249 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index fa0ad6548ecd..e3fa9acff4c4 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
index 7b403fbd5f94..a0db830ca841 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index 2a70e21ae07f..6ec5218a18c1 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 20e64051d2d6..d415f8396038 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 95c7fa3b532c..fdd27ce46fda 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 3dc084cb93bc..7f65439f9b95 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 2df218b010e1..2985bca4dc4c 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 7964cad7e780..2156a82a613a 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index 739bfb9421ca..b1d6ba4a3190 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
index 4f0a42633d5a..b9bf81607bbf 100644
--- a/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
+++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/linkea.c b/drivers/staging/lustre/lustre/obdclass/linkea.c
index 9af86d3d56e4..fe1638b0916e 100644
--- a/drivers/staging/lustre/lustre/obdclass/linkea.c
+++ b/drivers/staging/lustre/lustre/obdclass/linkea.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 6df911112731..fc59f29a4290 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index e92cccceefa1..e5e8687784ee 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 98021a2d7238..cd051e31233e 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index 8fa969101650..d9c63adff206 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_internal.h b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
index 8de90bc638b4..4991d4e589dc 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/llog_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index 3c42de966077..28bbaa2136ac 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index d2d3114ce008..b431c3408fe4 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
index e4829880dc10..c83b7d7f8e72 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index e79485b4bf7f..05d71f568837 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -1507,12 +1508,16 @@ int lprocfs_write_frac_u64_helper(const char __user *buffer,
switch (tolower(*end)) {
case 'p':
units <<= 10;
+ /* fall through */
case 't':
units <<= 10;
+ /* fall through */
case 'g':
units <<= 10;
+ /* fall through */
case 'm':
units <<= 10;
+ /* fall through */
case 'k':
units <<= 10;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 09c98184a291..b938a3f9d50a 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ref.c b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
index fa690b2bd643..54fc88206534 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_ref.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_ref.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
index e1273c997b5f..71329adc0318 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 2798d35ad318..e286a2665423 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 94a940faca5d..c0e192ae22a9 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 1256034b60c1..2a79a223b98a 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 7083f8786e9a..c4503bc36591 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
index 89abea26a1f8..355e888885f4 100644
--- a/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
+++ b/drivers/staging/lustre/lustre/obdclass/statfs_pack.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
index 9b1872b99f2a..6cf7a03f048f 100644
--- a/drivers/staging/lustre/lustre/obdclass/uuid.c
+++ b/drivers/staging/lustre/lustre/obdclass/uuid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index f9808d1cc352..b9c1dc7e61b0 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
index 966414fd5424..42faa164fabb 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h
+++ b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index ae13eb055229..dc76c35ae801 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index e1207c227b79..5767ac2a7d16 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -227,6 +228,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
rc = 65;
goto out;
}
+ /* fall through */
default:
if (atomic_read(&ext->oe_users) > 0) {
rc = 70;
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index 35bdbfb8660d..1449013722f6 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index cf7b8879d7f0..2b5f324743e2 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index a536908fb26a..feda61bcdb9b 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index f7969e33f28a..76743faf3e6d 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index b4f1f74dead8..fe8ed0d0497a 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 945ae6e5a8b1..f82c87a77550 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index ed8a0dc18ee5..20094b6309f9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index a6118f8ba446..ce1731dc604f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 4c68c42b2281..53eda4c99142 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/Makefile b/drivers/staging/lustre/lustre/ptlrpc/Makefile
index a518001cdfe8..1deb1971b39e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/Makefile
+++ b/drivers/staging/lustre/lustre/ptlrpc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/include
subdir-ccflags-y += -I$(srctree)/drivers/staging/lustre/lustre/include
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index b1d379a6a70f..2a9f2f2ebaa8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index cfdcbcec2779..dfdb4587d49d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/errno.c b/drivers/staging/lustre/lustre/ptlrpc/errno.c
index cb788364a553..54f0c36dc2bd 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/errno.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/errno.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 62951f19b2ce..811b7ab3a582 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 21f528957b73..5b0f65536c29 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 85854d9a376d..18769d335751 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 480c20a6a792..254488be7093 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
index bc5aa7bcdba8..bc4398b9bd1d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 1392ae9747bd..36eea50a77e7 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 12149fb64719..047d712e850c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 2969d8da270e..4847f9a90cc9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
index df330e43bfe5..8251cbf2ad68 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index aad4ff191d95..a64e125df95f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -786,7 +787,7 @@ __u32 lustre_msg_get_flags(struct lustre_msg *msg)
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
}
- /* no break */
+ /* fall through */
default:
/* flags might be printed in debug code while message
* uninitialized
@@ -854,7 +855,7 @@ __u32 lustre_msg_get_op_flags(struct lustre_msg *msg)
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
}
- /* no break */
+ /* fall through */
default:
return 0;
}
@@ -1035,7 +1036,7 @@ int lustre_msg_get_status(struct lustre_msg *msg)
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
}
- /* no break */
+ /* fall through */
default:
/* status might be printed in debug code while message
* uninitialized
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index 643388b03af7..2466868afb9c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index e4de50e18d08..fe6b47bfe8be 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index c38e166f1502..f9decbd1459d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index 38e488dd5409..131fc6d9646e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 0e476828cf75..8b865294d933 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 72a19a379e2f..e4d3f23e9f3a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index cd7a5391a574..617e004d00f8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -847,7 +848,7 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req)
if (req->rq_pool || !req->rq_reqbuf)
return;
- kfree(req->rq_reqbuf);
+ kvfree(req->rq_reqbuf);
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 059294aad172..77a3721beaee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index 0f4af66688a3..2389f9a8f534 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
index d10a8053d04f..8d1e0edfcede 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
index 7792132eb145..fd609b63d2de 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
index dc39a54c5e1a..80cea0b24693 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 6aa9b65b1926..44e34056515b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 155f6a45cc8b..63be6e7273f3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
@@ -328,11 +329,11 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
return -1;
}
-static void ptlrpc_at_timer(unsigned long castmeharder)
+static void ptlrpc_at_timer(struct timer_list *t)
{
struct ptlrpc_service_part *svcpt;
- svcpt = (struct ptlrpc_service_part *)castmeharder;
+ svcpt = from_timer(svcpt, t, scp_at_timer);
svcpt->scp_at_check = 1;
svcpt->scp_at_checktime = cfs_time_current();
@@ -505,8 +506,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
if (!array->paa_reqs_count)
goto free_reqs_array;
- setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
- (unsigned long)svcpt);
+ timer_setup(&svcpt->scp_at_timer, ptlrpc_at_timer, 0);
/* At SOW, service time should be quick; 10s seems generous. If client
* timeout is less than this, we'll be sending an early reply.
@@ -925,7 +925,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
at_early_margin);
if (next <= 0) {
- ptlrpc_at_timer((unsigned long)svcpt);
+ ptlrpc_at_timer(&svcpt->scp_at_timer);
} else {
mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
CDEBUG(D_INFO, "armed %s at %+ds\n",
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 07b86a1b6550..2f64eb417e77 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPL HEADER START
*
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index ac090c5fce30..be732cf932fd 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
index 8eb13c3ba29c..27f078749148 100644
--- a/drivers/staging/media/atomisp/Kconfig
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -1,9 +1,10 @@
menuconfig INTEL_ATOMISP
- bool "Enable support to Intel MIPI camera drivers"
- depends on X86 && EFI && MEDIA_CONTROLLER && PCI && ACPI
- help
- Enable support for the Intel ISP2 camera interfaces and MIPI
- sensor drivers.
+ bool "Enable support to Intel MIPI camera drivers"
+ depends on X86 && EFI && MEDIA_CONTROLLER && PCI && ACPI
+ select COMMON_CLK
+ help
+ Enable support for the Intel ISP2 camera interfaces and MIPI
+ sensor drivers.
if INTEL_ATOMISP
source "drivers/staging/media/atomisp/pci/Kconfig"
diff --git a/drivers/staging/media/atomisp/TODO b/drivers/staging/media/atomisp/TODO
index 737452cbf8a0..255ce3630c2a 100644
--- a/drivers/staging/media/atomisp/TODO
+++ b/drivers/staging/media/atomisp/TODO
@@ -36,13 +36,23 @@
there are any specific things that can be done to fold in support for
multiple firmware versions.
+8. Switch to V4L2 async API to set up sensor, lens and flash devices.
+ Control those devices using V4L2 sub-device API without custom
+ extensions.
-Limitations:
+9. Switch to standard V4L2 sub-device API for sensor and lens. In
+ particular, the user space API needs to support V4L2 controls as
+ defined in the V4L2 spec and references to atomisp must be removed from
+ these drivers.
+
+10. Use LED flash API for flash LED drivers such as LM3554 (which already
+ has a LED class driver).
-1. Currently the patch only support some camera sensors
- gc2235/gc0310/0v2680/ov2722/ov5693/mt9m114...
+11. Switch from videobuf1 to videobuf2. Videobuf1 is being removed!
+
+Limitations:
-2. To test the patches, you also need the ISP firmware
+1. To test the patches, you also need the ISP firmware
for BYT:/lib/firmware/shisp_2400b0_v21.bin
for CHT:/lib/firmware/shisp_2401a0_v21.bin
@@ -51,14 +61,14 @@ Limitations:
device but can also be extracted from the upgrade kit if you've managed
to lose them somehow.
-3. Without a 3A libary the capture behaviour is not very good. To take a good
+2. Without a 3A libary the capture behaviour is not very good. To take a good
picture, you need tune ISP parameters by IOCTL functions or use a 3A libary
such as libxcam.
-4. The driver is intended to drive the PCI exposed versions of the device.
+3. The driver is intended to drive the PCI exposed versions of the device.
It will not detect those devices enumerated via ACPI as a field of the
i915 GPU driver.
-5. The driver supports only v2 of the IPU/Camera. It will not work with the
+4. The driver supports only v2 of the IPU/Camera. It will not work with the
versions of the hardware in other SoCs.
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
index b80d29d53e65..db054d3c7ed6 100644
--- a/drivers/staging/media/atomisp/i2c/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -3,104 +3,96 @@
#
source "drivers/staging/media/atomisp/i2c/ov5693/Kconfig"
-source "drivers/staging/media/atomisp/i2c/imx/Kconfig"
-config VIDEO_OV2722
+config VIDEO_ATOMISP_OV2722
tristate "OVT ov2722 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the OVT
- OV2722 raw camera.
+ This is a Video4Linux2 sensor-level driver for the OVT
+ OV2722 raw camera.
- OVT is a 2M raw sensor.
+ OVT is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_GC2235
+config VIDEO_ATOMISP_GC2235
tristate "Galaxy gc2235 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the OVT
- GC2235 raw camera.
+ This is a Video4Linux2 sensor-level driver for the OVT
+ GC2235 raw camera.
- GC2235 is a 2M raw sensor.
+ GC2235 is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_OV8858
+config VIDEO_ATOMISP_OV8858
tristate "Omnivision ov8858 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2 && VIDEO_ATOMISP
---help---
- This is a Video4Linux2 sensor-level driver for the Omnivision
- ov8858 RAW sensor.
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ ov8858 RAW sensor.
OV8858 is a 8M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_MSRLIST_HELPER
+config VIDEO_ATOMISP_MSRLIST_HELPER
tristate "Helper library to load, parse and apply large register lists."
depends on I2C
---help---
- This is a helper library to be used from a sensor driver to load, parse
- and apply large register lists.
+ This is a helper library to be used from a sensor driver to load, parse
+ and apply large register lists.
- To compile this driver as a module, choose M here: the
- module will be called libmsrlisthelper.
+ To compile this driver as a module, choose M here: the
+ module will be called libmsrlisthelper.
-config VIDEO_MT9M114
+config VIDEO_ATOMISP_MT9M114
tristate "Aptina mt9m114 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
- mt9m114 1.3 Mpixel camera.
+ This is a Video4Linux2 sensor-level driver for the Micron
+ mt9m114 1.3 Mpixel camera.
- mt9m114 is video camera sensor.
+ mt9m114 is video camera sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
-config VIDEO_AP1302
- tristate "AP1302 external ISP support"
- depends on I2C && VIDEO_V4L2
- select REGMAP_I2C
- ---help---
- This is a Video4Linux2 sensor-level driver for the external
- ISP AP1302.
-
- AP1302 is an exteral ISP.
-
- It currently only works with the atomisp driver.
-
-config VIDEO_GC0310
+config VIDEO_ATOMISP_GC0310
tristate "GC0310 sensor support"
- depends on I2C && VIDEO_V4L2
- ---help---
- This is a Video4Linux2 sensor-level driver for the Galaxycore
- GC0310 0.3MP sensor.
+ depends on ACPI
+ depends on I2C && VIDEO_V4L2
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Galaxycore
+ GC0310 0.3MP sensor.
-config VIDEO_OV2680
+config VIDEO_ATOMISP_OV2680
tristate "Omnivision OV2680 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the Omnivision
- OV2680 raw camera.
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ OV2680 raw camera.
- ov2680 is a 2M raw sensor.
+ ov2680 is a 2M raw sensor.
- It currently only works with the atomisp driver.
+ It currently only works with the atomisp driver.
#
# Kconfig for flash drivers
#
-config VIDEO_LM3554
+config VIDEO_ATOMISP_LM3554
tristate "LM3554 flash light driver"
+ depends on ACPI
depends on VIDEO_V4L2 && I2C
---help---
- This is a Video4Linux2 sub-dev driver for the LM3554
- flash light driver.
-
- To compile this driver as a module, choose M here: the
- module will be called lm3554
-
+ This is a Video4Linux2 sub-dev driver for the LM3554
+ flash light driver.
+ To compile this driver as a module, choose M here: the
+ module will be called lm3554
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index be13fab92175..99ea35c043fd 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -1,23 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for sensor drivers
#
-obj-$(CONFIG_VIDEO_IMX) += imx/
-obj-$(CONFIG_VIDEO_OV5693) += ov5693/
-obj-$(CONFIG_VIDEO_MT9M114) += mt9m114.o
-obj-$(CONFIG_VIDEO_GC2235) += gc2235.o
-obj-$(CONFIG_VIDEO_OV2722) += ov2722.o
-obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
-obj-$(CONFIG_VIDEO_GC0310) += gc0310.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += ov5693/
+obj-$(CONFIG_VIDEO_ATOMISP_MT9M114) += atomisp-mt9m114.o
+obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o
+obj-$(CONFIG_VIDEO_ATOMISP_OV2680) += atomisp-ov2680.o
+obj-$(CONFIG_VIDEO_ATOMISP_GC0310) += atomisp-gc0310.o
-obj-$(CONFIG_VIDEO_MSRLIST_HELPER) += libmsrlisthelper.o
-
-obj-$(CONFIG_VIDEO_AP1302) += ap1302.o
+obj-$(CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER) += atomisp-libmsrlisthelper.o
# Makefile for flash drivers
#
-obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
+obj-$(CONFIG_VIDEO_ATOMISP_LM3554) += atomisp-lm3554.o
# HACK! While this driver is in bad shape, don't enable several warnings
# that would be otherwise enabled with W=1
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.c b/drivers/staging/media/atomisp/i2c/ap1302.c
deleted file mode 100644
index 2f772a020c8b..000000000000
--- a/drivers/staging/media/atomisp/i2c/ap1302.c
+++ /dev/null
@@ -1,1255 +0,0 @@
-/*
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include "../include/linux/atomisp.h"
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include "ap1302.h"
-
-#define to_ap1302_device(sub_dev) \
- container_of(sub_dev, struct ap1302_device, sd)
-
-/* Static definitions */
-static struct regmap_config ap1302_reg16_config = {
- .reg_bits = 16,
- .val_bits = 16,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .val_format_endian = REGMAP_ENDIAN_BIG,
-};
-
-static struct regmap_config ap1302_reg32_config = {
- .reg_bits = 16,
- .val_bits = 32,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .val_format_endian = REGMAP_ENDIAN_BIG,
-};
-
-static enum ap1302_contexts ap1302_cntx_mapping[] = {
- CONTEXT_PREVIEW, /* Invalid atomisp run mode */
- CONTEXT_VIDEO, /* ATOMISP_RUN_MODE_VIDEO */
- CONTEXT_SNAPSHOT, /* ATOMISP_RUN_MODE_STILL_CAPTURE */
- CONTEXT_SNAPSHOT, /* ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE */
- CONTEXT_PREVIEW, /* ATOMISP_RUN_MODE_PREVIEW */
-};
-
-static struct ap1302_res_struct ap1302_preview_res[] = {
- {
- .width = 640,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 720,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 1280,
- .height = 720,
- .fps = 30,
- },
- {
- .width = 1920,
- .height = 1080,
- .fps = 30,
- }
-};
-
-static struct ap1302_res_struct ap1302_snapshot_res[] = {
- {
- .width = 640,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 720,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 1280,
- .height = 720,
- .fps = 30,
- },
- {
- .width = 1920,
- .height = 1080,
- .fps = 30,
- }
-};
-
-static struct ap1302_res_struct ap1302_video_res[] = {
- {
- .width = 640,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 720,
- .height = 480,
- .fps = 30,
- },
- {
- .width = 1280,
- .height = 720,
- .fps = 30,
- },
- {
- .width = 1920,
- .height = 1080,
- .fps = 30,
- }
-};
-
-static enum ap1302_contexts stream_to_context[] = {
- CONTEXT_SNAPSHOT,
- CONTEXT_PREVIEW,
- CONTEXT_PREVIEW,
- CONTEXT_VIDEO
-};
-
-static u16 aux_stream_config[CONTEXT_NUM][CONTEXT_NUM] = {
- {0, 0, 0}, /* Preview: No aux streams. */
- {1, 0, 2}, /* Snapshot: 1 for postview. 2 for video */
- {1, 0, 0}, /* Video: 1 for preview. */
-};
-
-static struct ap1302_context_info context_info[] = {
- {CNTX_WIDTH, AP1302_REG16, "width"},
- {CNTX_HEIGHT, AP1302_REG16, "height"},
- {CNTX_ROI_X0, AP1302_REG16, "roi_x0"},
- {CNTX_ROI_X1, AP1302_REG16, "roi_x1"},
- {CNTX_ROI_Y0, AP1302_REG16, "roi_y0"},
- {CNTX_ROI_Y1, AP1302_REG16, "roi_y1"},
- {CNTX_ASPECT, AP1302_REG16, "aspect"},
- {CNTX_LOCK, AP1302_REG16, "lock"},
- {CNTX_ENABLE, AP1302_REG16, "enable"},
- {CNTX_OUT_FMT, AP1302_REG16, "out_fmt"},
- {CNTX_SENSOR_MODE, AP1302_REG16, "sensor_mode"},
- {CNTX_MIPI_CTRL, AP1302_REG16, "mipi_ctrl"},
- {CNTX_MIPI_II_CTRL, AP1302_REG16, "mipi_ii_ctrl"},
- {CNTX_LINE_TIME, AP1302_REG32, "line_time"},
- {CNTX_MAX_FPS, AP1302_REG16, "max_fps"},
- {CNTX_AE_USG, AP1302_REG16, "ae_usg"},
- {CNTX_AE_UPPER_ET, AP1302_REG32, "ae_upper_et"},
- {CNTX_AE_MAX_ET, AP1302_REG32, "ae_max_et"},
- {CNTX_SS, AP1302_REG16, "ss"},
- {CNTX_S1_SENSOR_MODE, AP1302_REG16, "s1_sensor_mode"},
- {CNTX_HINF_CTRL, AP1302_REG16, "hinf_ctrl"},
-};
-
-/* This array stores the description list for metadata.
- The metadata contains exposure settings and face
- detection results. */
-static u16 ap1302_ss_list[] = {
- 0xb01c, /* From 0x0186 with size 0x1C are exposure settings. */
- 0x0186,
- 0xb002, /* 0x71c0 is for F-number */
- 0x71c0,
- 0xb010, /* From 0x03dc with size 0x10 are face general infos. */
- 0x03dc,
- 0xb0a0, /* From 0x03e4 with size 0xa0 are face detail infos. */
- 0x03e4,
- 0xb020, /* From 0x0604 with size 0x20 are smile rate infos. */
- 0x0604,
- 0x0000
-};
-
-/* End of static definitions */
-
-static int ap1302_i2c_read_reg(struct v4l2_subdev *sd,
- u16 reg, u16 len, void *val)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (len == AP1302_REG16)
- ret = regmap_read(dev->regmap16, reg, val);
- else if (len == AP1302_REG32)
- ret = regmap_read(dev->regmap32, reg, val);
- else
- ret = -EINVAL;
- if (ret) {
- dev_dbg(&client->dev, "Read reg failed. reg=0x%04X\n", reg);
- return ret;
- }
- if (len == AP1302_REG16)
- dev_dbg(&client->dev, "read_reg[0x%04X] = 0x%04X\n",
- reg, *(u16 *)val);
- else
- dev_dbg(&client->dev, "read_reg[0x%04X] = 0x%08X\n",
- reg, *(u32 *)val);
- return ret;
-}
-
-static int ap1302_i2c_write_reg(struct v4l2_subdev *sd,
- u16 reg, u16 len, u32 val)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- if (len == AP1302_REG16)
- ret = regmap_write(dev->regmap16, reg, val);
- else if (len == AP1302_REG32)
- ret = regmap_write(dev->regmap32, reg, val);
- else
- ret = -EINVAL;
- if (ret) {
- dev_dbg(&client->dev, "Write reg failed. reg=0x%04X\n", reg);
- return ret;
- }
- if (len == AP1302_REG16)
- dev_dbg(&client->dev, "write_reg[0x%04X] = 0x%04X\n",
- reg, (u16)val);
- else
- dev_dbg(&client->dev, "write_reg[0x%04X] = 0x%08X\n",
- reg, (u32)val);
- return ret;
-}
-
-static u16
-ap1302_calculate_context_reg_addr(enum ap1302_contexts context, u16 offset)
-{
- u16 reg_addr;
- /* The register offset is defined according to preview/video registers.
- Preview and video context have the same register definition.
- But snapshot context does not have register S1_SENSOR_MODE.
- When setting snapshot registers, if the offset exceeds
- S1_SENSOR_MODE, the actual offset needs to minus 2. */
- if (context == CONTEXT_SNAPSHOT) {
- if (offset == CNTX_S1_SENSOR_MODE)
- return 0;
- if (offset > CNTX_S1_SENSOR_MODE)
- offset -= 2;
- }
- if (context == CONTEXT_PREVIEW)
- reg_addr = REG_PREVIEW_BASE + offset;
- else if (context == CONTEXT_VIDEO)
- reg_addr = REG_VIDEO_BASE + offset;
- else
- reg_addr = REG_SNAPSHOT_BASE + offset;
- return reg_addr;
-}
-
-static int ap1302_read_context_reg(struct v4l2_subdev *sd,
- enum ap1302_contexts context, u16 offset, u16 len)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- u16 reg_addr = ap1302_calculate_context_reg_addr(context, offset);
- if (reg_addr == 0)
- return -EINVAL;
- return ap1302_i2c_read_reg(sd, reg_addr, len,
- ((u8 *)&dev->cntx_config[context]) + offset);
-}
-
-static int ap1302_write_context_reg(struct v4l2_subdev *sd,
- enum ap1302_contexts context, u16 offset, u16 len)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- u16 reg_addr = ap1302_calculate_context_reg_addr(context, offset);
- if (reg_addr == 0)
- return -EINVAL;
- return ap1302_i2c_write_reg(sd, reg_addr, len,
- *(u32 *)(((u8 *)&dev->cntx_config[context]) + offset));
-}
-
-static int ap1302_dump_context_reg(struct v4l2_subdev *sd,
- enum ap1302_contexts context)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ap1302_device *dev = to_ap1302_device(sd);
- int i;
- dev_dbg(&client->dev, "Dump registers for context[%d]:\n", context);
- for (i = 0; i < ARRAY_SIZE(context_info); i++) {
- struct ap1302_context_info *info = &context_info[i];
- u8 *var = (u8 *)&dev->cntx_config[context] + info->offset;
- /* Snapshot context does not have s1_sensor_mode register. */
- if (context == CONTEXT_SNAPSHOT &&
- info->offset == CNTX_S1_SENSOR_MODE)
- continue;
- ap1302_read_context_reg(sd, context, info->offset, info->len);
- if (info->len == AP1302_REG16)
- dev_dbg(&client->dev, "context.%s = 0x%04X (%d)\n",
- info->name, *(u16 *)var, *(u16 *)var);
- else
- dev_dbg(&client->dev, "context.%s = 0x%08X (%d)\n",
- info->name, *(u32 *)var, *(u32 *)var);
- }
- return 0;
-}
-
-static int ap1302_request_firmware(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
- ret = request_firmware(&dev->fw, "ap1302_fw.bin", &client->dev);
- if (ret)
- dev_err(&client->dev,
- "ap1302_request_firmware failed. ret=%d\n", ret);
- return ret;
-}
-
-/* When loading firmware, host writes firmware data from address 0x8000.
- When the address reaches 0x9FFF, the next address should return to 0x8000.
- This function handles this address window and load firmware data to AP1302.
- win_pos indicates the offset within this window. Firmware loading procedure
- may call this function several times. win_pos records the current position
- that has been written to.*/
-static int ap1302_write_fw_window(struct v4l2_subdev *sd,
- u16 *win_pos, const u8 *buf, u32 len)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
- u32 pos;
- u32 sub_len;
- for (pos = 0; pos < len; pos += sub_len) {
- if (len - pos < AP1302_FW_WINDOW_SIZE - *win_pos)
- sub_len = len - pos;
- else
- sub_len = AP1302_FW_WINDOW_SIZE - *win_pos;
- ret = regmap_raw_write(dev->regmap16,
- *win_pos + AP1302_FW_WINDOW_OFFSET,
- buf + pos, sub_len);
- if (ret)
- return ret;
- *win_pos += sub_len;
- if (*win_pos >= AP1302_FW_WINDOW_SIZE)
- *win_pos = 0;
- }
- return 0;
-}
-
-static int ap1302_load_firmware(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ap1302_device *dev = to_ap1302_device(sd);
- const struct ap1302_firmware *fw;
- const u8 *fw_data;
- u16 reg_val = 0;
- u16 win_pos = 0;
- int ret;
-
- dev_info(&client->dev, "Start to load firmware.\n");
- if (!dev->fw) {
- dev_err(&client->dev, "firmware not requested.\n");
- return -EINVAL;
- }
- fw = (const struct ap1302_firmware *) dev->fw->data;
- if (dev->fw->size != (sizeof(*fw) + fw->total_size)) {
- dev_err(&client->dev, "firmware size does not match.\n");
- return -EINVAL;
- }
- /* The fw binary contains a header of struct ap1302_firmware.
- Following the header is the bootdata of AP1302.
- The bootdata pointer can be referenced as &fw[1]. */
- fw_data = (u8 *)&fw[1];
-
- /* Clear crc register. */
- ret = ap1302_i2c_write_reg(sd, REG_SIP_CRC, AP1302_REG16, 0xFFFF);
- if (ret)
- return ret;
-
- /* Load FW data for PLL init stage. */
- ret = ap1302_write_fw_window(sd, &win_pos, fw_data, fw->pll_init_size);
- if (ret)
- return ret;
-
- /* Write 2 to bootdata_stage register to apply basic_init_hp
- settings and enable PLL. */
- ret = ap1302_i2c_write_reg(sd, REG_BOOTDATA_STAGE,
- AP1302_REG16, 0x0002);
- if (ret)
- return ret;
-
- /* Wait 1ms for PLL to lock. */
- msleep(20);
-
- /* Load the rest of bootdata content. */
- ret = ap1302_write_fw_window(sd, &win_pos, fw_data + fw->pll_init_size,
- fw->total_size - fw->pll_init_size);
- if (ret)
- return ret;
-
- /* Check crc. */
- ret = ap1302_i2c_read_reg(sd, REG_SIP_CRC, AP1302_REG16, &reg_val);
- if (ret)
- return ret;
- if (reg_val != fw->crc) {
- dev_err(&client->dev,
- "crc does not match. T:0x%04X F:0x%04X\n",
- fw->crc, reg_val);
- return -EAGAIN;
- }
-
- /* Write 0xFFFF to bootdata_stage register to indicate AP1302 that
- the whole bootdata content has been loaded. */
- ret = ap1302_i2c_write_reg(sd, REG_BOOTDATA_STAGE,
- AP1302_REG16, 0xFFFF);
- if (ret)
- return ret;
- dev_info(&client->dev, "Load firmware successfully.\n");
-
- return 0;
-}
-
-static int __ap1302_s_power(struct v4l2_subdev *sd, int on, int load_fw)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret, i;
- u16 ss_ptr;
-
- dev_info(&client->dev, "ap1302_s_power is called.\n");
- ret = dev->platform_data->power_ctrl(sd, on);
- if (ret) {
- dev_err(&client->dev,
- "ap1302_s_power error. on=%d ret=%d\n", on, ret);
- return ret;
- }
- dev->power_on = on;
- if (!on || !load_fw)
- return 0;
- /* Load firmware after power on. */
- ret = ap1302_load_firmware(sd);
- if (ret) {
- dev_err(&client->dev,
- "ap1302_load_firmware failed. ret=%d\n", ret);
- return ret;
- }
- ret = ap1302_i2c_read_reg(sd, REG_SS_HEAD_PT0, AP1302_REG16, &ss_ptr);
- if (ret)
- return ret;
- for (i = 0; i < ARRAY_SIZE(ap1302_ss_list); i++) {
- ret = ap1302_i2c_write_reg(sd, ss_ptr + i * 2,
- AP1302_REG16, ap1302_ss_list[i]);
- if (ret)
- return ret;
- }
- return ret;
-}
-
-static int ap1302_s_power(struct v4l2_subdev *sd, int on)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
-
- mutex_lock(&dev->input_lock);
- ret = __ap1302_s_power(sd, on, 1);
- dev->sys_activated = 0;
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static int ap1302_s_config(struct v4l2_subdev *sd, void *pdata)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct camera_mipi_info *mipi_info;
- u16 reg_val = 0;
- int ret;
-
- dev_info(&client->dev, "ap1302_s_config is called.\n");
- if (pdata == NULL)
- return -ENODEV;
-
- dev->platform_data = pdata;
-
- mutex_lock(&dev->input_lock);
-
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret)
- goto fail_power;
- }
-
- ret = __ap1302_s_power(sd, 1, 0);
- if (ret)
- goto fail_power;
-
- /* Detect for AP1302 */
- ret = ap1302_i2c_read_reg(sd, REG_CHIP_VERSION, AP1302_REG16, &reg_val);
- if (ret || (reg_val != AP1302_CHIP_ID)) {
- dev_err(&client->dev,
- "Chip version does no match. ret=%d ver=0x%04x\n",
- ret, reg_val);
- goto fail_config;
- }
- dev_info(&client->dev, "AP1302 Chip ID is 0x%X\n", reg_val);
-
- /* Detect revision for AP1302 */
- ret = ap1302_i2c_read_reg(sd, REG_CHIP_REV, AP1302_REG16, &reg_val);
- if (ret)
- goto fail_config;
- dev_info(&client->dev, "AP1302 Chip Rev is 0x%X\n", reg_val);
- ret = dev->platform_data->csi_cfg(sd, 1);
- if (ret)
- goto fail_config;
-
- mipi_info = v4l2_get_subdev_hostdata(sd);
- if (!mipi_info)
- goto fail_config;
- dev->num_lanes = mipi_info->num_lanes;
-
- ret = __ap1302_s_power(sd, 0, 0);
- if (ret)
- goto fail_power;
-
- mutex_unlock(&dev->input_lock);
-
- return ret;
-
-fail_config:
- __ap1302_s_power(sd, 0, 0);
-fail_power:
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "ap1302_s_config failed\n");
- return ret;
-}
-
-static enum ap1302_contexts ap1302_get_context(struct v4l2_subdev *sd)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- return dev->cur_context;
-}
-
-static int ap1302_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->index)
- return -EINVAL;
-
- code->code = MEDIA_BUS_FMT_UYVY8_1X16;
-
- return 0;
-}
-
-static int ap1302_match_resolution(struct ap1302_context_res *res,
- struct v4l2_mbus_framefmt *fmt)
-{
- s32 w0, h0, mismatch, distance;
- s32 w1 = fmt->width;
- s32 h1 = fmt->height;
- s32 min_distance = INT_MAX;
- s32 i, idx = -1;
-
- if (w1 == 0 || h1 == 0)
- return -1;
-
- for (i = 0; i < res->res_num; i++) {
- w0 = res->res_table[i].width;
- h0 = res->res_table[i].height;
- if (w0 < w1 || h0 < h1)
- continue;
- mismatch = abs(w0 * h1 - w1 * h0) * 8192 / w1 / h0;
- if (mismatch > 8192 * AP1302_MAX_RATIO_MISMATCH / 100)
- continue;
- distance = (w0 * h1 + w1 * h0) * 8192 / w1 / h1;
- if (distance < min_distance) {
- min_distance = distance;
- idx = i;
- }
- }
-
- return idx;
-}
-
-static s32 ap1302_try_mbus_fmt_locked(struct v4l2_subdev *sd,
- enum ap1302_contexts context,
- struct v4l2_mbus_framefmt *fmt)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct ap1302_res_struct *res_table;
- s32 res_num, idx = -1;
-
- res_table = dev->cntx_res[context].res_table;
- res_num = dev->cntx_res[context].res_num;
-
- if ((fmt->width <= res_table[res_num - 1].width) &&
- (fmt->height <= res_table[res_num - 1].height))
- idx = ap1302_match_resolution(&dev->cntx_res[context], fmt);
- if (idx == -1)
- idx = res_num - 1;
-
- fmt->width = res_table[idx].width;
- fmt->height = res_table[idx].height;
- fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
- return idx;
-}
-
-
-static int ap1302_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct ap1302_device *dev = to_ap1302_device(sd);
- enum ap1302_contexts context;
- struct ap1302_res_struct *res_table;
- s32 cur_res;
- if (format->pad)
- return -EINVAL;
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- res_table = dev->cntx_res[context].res_table;
- cur_res = dev->cntx_res[context].cur_res;
- fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
- fmt->width = res_table[cur_res].width;
- fmt->height = res_table[cur_res].height;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int ap1302_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct atomisp_input_stream_info *stream_info =
- (struct atomisp_input_stream_info *)fmt->reserved;
- enum ap1302_contexts context, main_context;
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
- mutex_lock(&dev->input_lock);
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- context = ap1302_get_context(sd);
- ap1302_try_mbus_fmt_locked(sd, context, fmt);
- cfg->try_fmt = *fmt;
- mutex_unlock(&dev->input_lock);
- return 0;
- }
- context = stream_to_context[stream_info->stream];
- dev_dbg(&client->dev, "ap1302_set_mbus_fmt. stream=%d context=%d\n",
- stream_info->stream, context);
- dev->cntx_res[context].cur_res =
- ap1302_try_mbus_fmt_locked(sd, context, fmt);
- dev->cntx_config[context].width = fmt->width;
- dev->cntx_config[context].height = fmt->height;
- ap1302_write_context_reg(sd, context, CNTX_WIDTH, AP1302_REG16);
- ap1302_write_context_reg(sd, context, CNTX_HEIGHT, AP1302_REG16);
- ap1302_read_context_reg(sd, context, CNTX_OUT_FMT, AP1302_REG16);
- dev->cntx_config[context].out_fmt &= ~OUT_FMT_TYPE_MASK;
- dev->cntx_config[context].out_fmt |= AP1302_FMT_UYVY422;
- ap1302_write_context_reg(sd, context, CNTX_OUT_FMT, AP1302_REG16);
-
- main_context = ap1302_get_context(sd);
- if (context == main_context) {
- ap1302_read_context_reg(sd, context,
- CNTX_MIPI_CTRL, AP1302_REG16);
- dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_IMGVC_MASK;
- dev->cntx_config[context].mipi_ctrl |=
- (context << MIPI_CTRL_IMGVC_OFFSET);
- dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_SSVC_MASK;
- dev->cntx_config[context].mipi_ctrl |=
- (context << MIPI_CTRL_SSVC_OFFSET);
- dev->cntx_config[context].mipi_ctrl &= ~MIPI_CTRL_SSTYPE_MASK;
- dev->cntx_config[context].mipi_ctrl |=
- (0x12 << MIPI_CTRL_SSTYPE_OFFSET);
- ap1302_write_context_reg(sd, context,
- CNTX_MIPI_CTRL, AP1302_REG16);
- ap1302_read_context_reg(sd, context,
- CNTX_SS, AP1302_REG16);
- dev->cntx_config[context].ss = AP1302_SS_CTRL;
- ap1302_write_context_reg(sd, context,
- CNTX_SS, AP1302_REG16);
- } else {
- /* Configure aux stream */
- ap1302_read_context_reg(sd, context,
- CNTX_MIPI_II_CTRL, AP1302_REG16);
- dev->cntx_config[context].mipi_ii_ctrl &= ~MIPI_CTRL_IMGVC_MASK;
- dev->cntx_config[context].mipi_ii_ctrl |=
- (context << MIPI_CTRL_IMGVC_OFFSET);
- ap1302_write_context_reg(sd, context,
- CNTX_MIPI_II_CTRL, AP1302_REG16);
- if (stream_info->enable) {
- ap1302_read_context_reg(sd, main_context,
- CNTX_OUT_FMT, AP1302_REG16);
- dev->cntx_config[context].out_fmt |=
- (aux_stream_config[main_context][context]
- << OUT_FMT_IIS_OFFSET);
- ap1302_write_context_reg(sd, main_context,
- CNTX_OUT_FMT, AP1302_REG16);
- }
- }
- stream_info->ch_id = context;
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-
-static int ap1302_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- enum ap1302_contexts context;
- struct ap1302_res_struct *res_table;
- u32 cur_res;
-
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- res_table = dev->cntx_res[context].res_table;
- cur_res = dev->cntx_res[context].cur_res;
- interval->interval.denominator = res_table[cur_res].fps;
- interval->interval.numerator = 1;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int ap1302_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- enum ap1302_contexts context;
- struct ap1302_res_struct *res_table;
- int index = fse->index;
-
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- if (index >= dev->cntx_res[context].res_num) {
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
-
- res_table = dev->cntx_res[context].res_table;
- fse->min_width = res_table[index].width;
- fse->min_height = res_table[index].height;
- fse->max_width = res_table[index].width;
- fse->max_height = res_table[index].height;
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-
-static int ap1302_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
-{
- *frames = 0;
- return 0;
-}
-
-static int ap1302_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- enum ap1302_contexts context;
- u32 reg_val;
- int ret;
-
- mutex_lock(&dev->input_lock);
- context = ap1302_get_context(sd);
- dev_dbg(&client->dev, "ap1302_s_stream. context=%d enable=%d\n",
- context, enable);
- /* Switch context */
- ap1302_i2c_read_reg(sd, REG_CTRL,
- AP1302_REG16, &reg_val);
- reg_val &= ~CTRL_CNTX_MASK;
- reg_val |= (context<<CTRL_CNTX_OFFSET);
- ap1302_i2c_write_reg(sd, REG_CTRL,
- AP1302_REG16, reg_val);
- /* Select sensor */
- ap1302_i2c_read_reg(sd, REG_SENSOR_SELECT,
- AP1302_REG16, &reg_val);
- reg_val &= ~SENSOR_SELECT_MASK;
- reg_val |= (AP1302_SENSOR_PRI<<SENSOR_SELECT_OFFSET);
- ap1302_i2c_write_reg(sd, REG_SENSOR_SELECT,
- AP1302_REG16, reg_val);
- if (enable) {
- dev_info(&client->dev, "Start stream. context=%d\n", context);
- ap1302_dump_context_reg(sd, context);
- if (!dev->sys_activated) {
- reg_val = AP1302_SYS_ACTIVATE;
- dev->sys_activated = 1;
- } else {
- reg_val = AP1302_SYS_SWITCH;
- }
- } else {
- dev_info(&client->dev, "Stop stream. context=%d\n", context);
- reg_val = AP1302_SYS_SWITCH;
- }
- ret = ap1302_i2c_write_reg(sd, REG_SYS_START, AP1302_REG16, reg_val);
- if (ret)
- dev_err(&client->dev,
- "AP1302 set stream failed. enable=%d\n", enable);
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static u16 ap1302_ev_values[] = {0xfd00, 0xfe80, 0x0, 0x180, 0x300};
-
-static int ap1302_set_exposure_off(struct v4l2_subdev *sd, s32 val)
-{
- val -= AP1302_MIN_EV;
- return ap1302_i2c_write_reg(sd, REG_AE_BV_OFF, AP1302_REG16,
- ap1302_ev_values[val]);
-}
-
-static u16 ap1302_wb_values[] = {
- 0, /* V4L2_WHITE_BALANCE_MANUAL */
- 0xf, /* V4L2_WHITE_BALANCE_AUTO */
- 0x2, /* V4L2_WHITE_BALANCE_INCANDESCENT */
- 0x4, /* V4L2_WHITE_BALANCE_FLUORESCENT */
- 0x5, /* V4L2_WHITE_BALANCE_FLUORESCENT_H */
- 0x1, /* V4L2_WHITE_BALANCE_HORIZON */
- 0x5, /* V4L2_WHITE_BALANCE_DAYLIGHT */
- 0xf, /* V4L2_WHITE_BALANCE_FLASH */
- 0x6, /* V4L2_WHITE_BALANCE_CLOUDY */
- 0x6, /* V4L2_WHITE_BALANCE_SHADE */
-};
-
-static int ap1302_set_wb_mode(struct v4l2_subdev *sd, s32 val)
-{
- int ret = 0;
- u16 reg_val;
-
- ret = ap1302_i2c_read_reg(sd, REG_AWB_CTRL, AP1302_REG16, &reg_val);
- if (ret)
- return ret;
- reg_val &= ~AWB_CTRL_MODE_MASK;
- reg_val |= ap1302_wb_values[val] << AWB_CTRL_MODE_OFFSET;
- if (val == V4L2_WHITE_BALANCE_FLASH)
- reg_val |= AWB_CTRL_FLASH_MASK;
- else
- reg_val &= ~AWB_CTRL_FLASH_MASK;
- ret = ap1302_i2c_write_reg(sd, REG_AWB_CTRL, AP1302_REG16, reg_val);
- return ret;
-}
-
-static int ap1302_set_zoom(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_DZ_TGT_FCT, AP1302_REG16,
- val * 4 + 0x100);
- return 0;
-}
-
-static u16 ap1302_sfx_values[] = {
- 0x00, /* V4L2_COLORFX_NONE */
- 0x03, /* V4L2_COLORFX_BW */
- 0x0d, /* V4L2_COLORFX_SEPIA */
- 0x07, /* V4L2_COLORFX_NEGATIVE */
- 0x04, /* V4L2_COLORFX_EMBOSS */
- 0x0f, /* V4L2_COLORFX_SKETCH */
- 0x08, /* V4L2_COLORFX_SKY_BLUE */
- 0x09, /* V4L2_COLORFX_GRASS_GREEN */
- 0x0a, /* V4L2_COLORFX_SKIN_WHITEN */
- 0x00, /* V4L2_COLORFX_VIVID */
- 0x00, /* V4L2_COLORFX_AQUA */
- 0x00, /* V4L2_COLORFX_ART_FREEZE */
- 0x00, /* V4L2_COLORFX_SILHOUETTE */
- 0x10, /* V4L2_COLORFX_SOLARIZATION */
- 0x02, /* V4L2_COLORFX_ANTIQUE */
- 0x00, /* V4L2_COLORFX_SET_CBCR */
-};
-
-static int ap1302_set_special_effect(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_SFX_MODE, AP1302_REG16,
- ap1302_sfx_values[val]);
- return 0;
-}
-
-static u16 ap1302_scene_mode_values[] = {
- 0x00, /* V4L2_SCENE_MODE_NONE */
- 0x07, /* V4L2_SCENE_MODE_BACKLIGHT */
- 0x0a, /* V4L2_SCENE_MODE_BEACH_SNOW */
- 0x06, /* V4L2_SCENE_MODE_CANDLE_LIGHT */
- 0x00, /* V4L2_SCENE_MODE_DAWN_DUSK */
- 0x00, /* V4L2_SCENE_MODE_FALL_COLORS */
- 0x0d, /* V4L2_SCENE_MODE_FIREWORKS */
- 0x02, /* V4L2_SCENE_MODE_LANDSCAPE */
- 0x05, /* V4L2_SCENE_MODE_NIGHT */
- 0x0c, /* V4L2_SCENE_MODE_PARTY_INDOOR */
- 0x01, /* V4L2_SCENE_MODE_PORTRAIT */
- 0x03, /* V4L2_SCENE_MODE_SPORTS */
- 0x0e, /* V4L2_SCENE_MODE_SUNSET */
- 0x0b, /* V4L2_SCENE_MODE_TEXT */
-};
-
-static int ap1302_set_scene_mode(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_SCENE_CTRL, AP1302_REG16,
- ap1302_scene_mode_values[val]);
- return 0;
-}
-
-static u16 ap1302_flicker_values[] = {
- 0x0, /* OFF */
- 0x3201, /* 50HZ */
- 0x3c01, /* 60HZ */
- 0x2 /* AUTO */
-};
-
-static int ap1302_set_flicker_freq(struct v4l2_subdev *sd, s32 val)
-{
- ap1302_i2c_write_reg(sd, REG_FLICK_CTRL, AP1302_REG16,
- ap1302_flicker_values[val]);
- return 0;
-}
-
-static int ap1302_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ap1302_device *dev = container_of(
- ctrl->handler, struct ap1302_device, ctrl_handler);
-
- switch (ctrl->id) {
- case V4L2_CID_RUN_MODE:
- dev->cur_context = ap1302_cntx_mapping[ctrl->val];
- break;
- case V4L2_CID_EXPOSURE:
- ap1302_set_exposure_off(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
- ap1302_set_wb_mode(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_ZOOM_ABSOLUTE:
- ap1302_set_zoom(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_COLORFX:
- ap1302_set_special_effect(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_SCENE_MODE:
- ap1302_set_scene_mode(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_POWER_LINE_FREQUENCY:
- ap1302_set_flicker_freq(&dev->sd, ctrl->val);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ap1302_g_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
- u32 reg_val;
-
- if (reg->size != AP1302_REG16 &&
- reg->size != AP1302_REG32)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- if (dev->power_on)
- ret = ap1302_i2c_read_reg(sd, reg->reg, reg->size, &reg_val);
- else
- ret = -EIO;
- mutex_unlock(&dev->input_lock);
- if (ret)
- return ret;
-
- reg->val = reg_val;
-
- return 0;
-}
-
-static int ap1302_s_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct ap1302_device *dev = to_ap1302_device(sd);
- int ret;
-
- if (reg->size != AP1302_REG16 &&
- reg->size != AP1302_REG32)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- if (dev->power_on)
- ret = ap1302_i2c_write_reg(sd, reg->reg, reg->size, reg->val);
- else
- ret = -EIO;
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static long ap1302_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- long ret = 0;
- switch (cmd) {
- case VIDIOC_DBG_G_REGISTER:
- ret = ap1302_g_register(sd, arg);
- break;
- case VIDIOC_DBG_S_REGISTER:
- ret = ap1302_s_register(sd, arg);
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = ap1302_s_ctrl,
-};
-
-static const char * const ctrl_run_mode_menu[] = {
- NULL,
- "Video",
- "Still capture",
- "Continuous capture",
- "Preview",
-};
-
-static const struct v4l2_ctrl_config ctrls[] = {
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_RUN_MODE,
- .name = "Run Mode",
- .type = V4L2_CTRL_TYPE_MENU,
- .min = 1,
- .def = 4,
- .max = 4,
- .qmenu = ctrl_run_mode_menu,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE,
- .name = "Exposure",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = AP1302_MIN_EV,
- .def = 0,
- .max = AP1302_MAX_EV,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
- .name = "White Balance",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 9,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ZOOM_ABSOLUTE,
- .name = "Zoom Absolute",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 1024,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_COLORFX,
- .name = "Color Special Effect",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 15,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_SCENE_MODE,
- .name = "Scene Mode",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 0,
- .max = 13,
- .step = 1,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .name = "Light frequency filter",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .def = 3,
- .max = 3,
- .step = 1,
- },
-};
-
-static const struct v4l2_subdev_sensor_ops ap1302_sensor_ops = {
- .g_skip_frames = ap1302_g_skip_frames,
-};
-
-static const struct v4l2_subdev_video_ops ap1302_video_ops = {
- .s_stream = ap1302_s_stream,
- .g_frame_interval = ap1302_g_frame_interval,
-};
-
-static const struct v4l2_subdev_core_ops ap1302_core_ops = {
- .s_power = ap1302_s_power,
- .ioctl = ap1302_ioctl,
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = ap1302_g_register,
- .s_register = ap1302_s_register,
-#endif
-};
-
-static const struct v4l2_subdev_pad_ops ap1302_pad_ops = {
- .enum_mbus_code = ap1302_enum_mbus_code,
- .enum_frame_size = ap1302_enum_frame_size,
- .get_fmt = ap1302_get_fmt,
- .set_fmt = ap1302_set_fmt,
-};
-
-static const struct v4l2_subdev_ops ap1302_ops = {
- .core = &ap1302_core_ops,
- .pad = &ap1302_pad_ops,
- .video = &ap1302_video_ops,
- .sensor = &ap1302_sensor_ops
-};
-
-static int ap1302_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ap1302_device *dev = to_ap1302_device(sd);
-
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
- release_firmware(dev->fw);
-
- media_entity_cleanup(&dev->sd.entity);
- dev->platform_data->csi_cfg(sd, 0);
- v4l2_device_unregister_subdev(sd);
-
- return 0;
-}
-
-static int ap1302_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct ap1302_device *dev;
- int ret;
- unsigned int i;
-
- dev_info(&client->dev, "ap1302 probe called.\n");
-
- /* allocate device & init sub device */
- dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "%s: out of memory\n", __func__);
- return -ENOMEM;
- }
-
- mutex_init(&dev->input_lock);
-
- v4l2_i2c_subdev_init(&(dev->sd), client, &ap1302_ops);
-
- ret = ap1302_request_firmware(&(dev->sd));
- if (ret) {
- dev_err(&client->dev, "Cannot request ap1302 firmware.\n");
- goto out_free;
- }
-
- dev->regmap16 = devm_regmap_init_i2c(client, &ap1302_reg16_config);
- if (IS_ERR(dev->regmap16)) {
- ret = PTR_ERR(dev->regmap16);
- dev_err(&client->dev,
- "Failed to allocate 16bit register map: %d\n", ret);
- return ret;
- }
-
- dev->regmap32 = devm_regmap_init_i2c(client, &ap1302_reg32_config);
- if (IS_ERR(dev->regmap32)) {
- ret = PTR_ERR(dev->regmap32);
- dev_err(&client->dev,
- "Failed to allocate 32bit register map: %d\n", ret);
- return ret;
- }
-
- if (client->dev.platform_data) {
- ret = ap1302_s_config(&dev->sd, client->dev.platform_data);
- if (ret)
- goto out_free;
- }
-
- dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
- dev->cntx_res[CONTEXT_PREVIEW].res_num = ARRAY_SIZE(ap1302_preview_res);
- dev->cntx_res[CONTEXT_PREVIEW].res_table = ap1302_preview_res;
- dev->cntx_res[CONTEXT_SNAPSHOT].res_num =
- ARRAY_SIZE(ap1302_snapshot_res);
- dev->cntx_res[CONTEXT_SNAPSHOT].res_table = ap1302_snapshot_res;
- dev->cntx_res[CONTEXT_VIDEO].res_num = ARRAY_SIZE(ap1302_video_res);
- dev->cntx_res[CONTEXT_VIDEO].res_table = ap1302_video_res;
-
- ret = v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ctrls));
- if (ret) {
- ap1302_remove(client);
- return ret;
- }
-
- for (i = 0; i < ARRAY_SIZE(ctrls); i++)
- v4l2_ctrl_new_custom(&dev->ctrl_handler, &ctrls[i], NULL);
-
- if (dev->ctrl_handler.error) {
- ap1302_remove(client);
- return dev->ctrl_handler.error;
- }
-
- /* Use same lock for controls as for everything else. */
- dev->ctrl_handler.lock = &dev->input_lock;
- dev->sd.ctrl_handler = &dev->ctrl_handler;
- v4l2_ctrl_handler_setup(&dev->ctrl_handler);
-
- dev->run_mode = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_RUN_MODE);
- v4l2_ctrl_s_ctrl(dev->run_mode, ATOMISP_RUN_MODE_PREVIEW);
-
- ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
- if (ret)
- ap1302_remove(client);
- return ret;
-out_free:
- v4l2_device_unregister_subdev(&dev->sd);
- return ret;
-}
-
-static const struct i2c_device_id ap1302_id[] = {
- {AP1302_NAME, 0},
- {}
-};
-MODULE_DEVICE_TABLE(i2c, ap1302_id);
-
-static struct i2c_driver ap1302_driver = {
- .driver = {
- .name = AP1302_NAME,
- },
- .probe = ap1302_probe,
- .remove = ap1302_remove,
- .id_table = ap1302_id,
-};
-
-module_i2c_driver(ap1302_driver);
-
-MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
-MODULE_DESCRIPTION("AP1302 Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.h b/drivers/staging/media/atomisp/i2c/ap1302.h
deleted file mode 100644
index 4d0b181a9671..000000000000
--- a/drivers/staging/media/atomisp/i2c/ap1302.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __AP1302_H__
-#define __AP1302_H__
-
-#include "../include/linux/atomisp_platform.h"
-#include <linux/regmap.h>
-#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-subdev.h>
-
-#define AP1302_NAME "ap1302"
-#define AP1302_CHIP_ID 0x265
-#define AP1302_I2C_MAX_LEN 65534
-#define AP1302_FW_WINDOW_OFFSET 0x8000
-#define AP1302_FW_WINDOW_SIZE 0x2000
-
-#define AP1302_REG16 2
-#define AP1302_REG32 4
-
-#define REG_CHIP_VERSION 0x0000
-#define REG_CHIP_REV 0x0050
-#define REG_MF_ID 0x0004
-#define REG_ERROR 0x0006
-#define REG_CTRL 0x1000
-#define REG_DZ_TGT_FCT 0x1010
-#define REG_SFX_MODE 0x1016
-#define REG_SS_HEAD_PT0 0x1174
-#define REG_AE_BV_OFF 0x5014
-#define REG_AE_BV_BIAS 0x5016
-#define REG_AWB_CTRL 0x5100
-#define REG_FLICK_CTRL 0x5440
-#define REG_SCENE_CTRL 0x5454
-#define REG_BOOTDATA_STAGE 0x6002
-#define REG_SENSOR_SELECT 0x600C
-#define REG_SYS_START 0x601A
-#define REG_SIP_CRC 0xF052
-
-#define REG_PREVIEW_BASE 0x2000
-#define REG_SNAPSHOT_BASE 0x3000
-#define REG_VIDEO_BASE 0x4000
-#define CNTX_WIDTH 0x00
-#define CNTX_HEIGHT 0x02
-#define CNTX_ROI_X0 0x04
-#define CNTX_ROI_Y0 0x06
-#define CNTX_ROI_X1 0x08
-#define CNTX_ROI_Y1 0x0A
-#define CNTX_ASPECT 0x0C
-#define CNTX_LOCK 0x0E
-#define CNTX_ENABLE 0x10
-#define CNTX_OUT_FMT 0x12
-#define CNTX_SENSOR_MODE 0x14
-#define CNTX_MIPI_CTRL 0x16
-#define CNTX_MIPI_II_CTRL 0x18
-#define CNTX_LINE_TIME 0x1C
-#define CNTX_MAX_FPS 0x20
-#define CNTX_AE_USG 0x22
-#define CNTX_AE_UPPER_ET 0x24
-#define CNTX_AE_MAX_ET 0x28
-#define CNTX_SS 0x2C
-#define CNTX_S1_SENSOR_MODE 0x2E
-#define CNTX_HINF_CTRL 0x30
-
-#define CTRL_CNTX_MASK 0x03
-#define CTRL_CNTX_OFFSET 0x00
-#define HINF_CTRL_LANE_MASK 0x07
-#define HINF_CTRL_LANE_OFFSET 0x00
-#define MIPI_CTRL_IMGVC_MASK 0xC0
-#define MIPI_CTRL_IMGVC_OFFSET 0x06
-#define MIPI_CTRL_IMGTYPE_AUTO 0x3F
-#define MIPI_CTRL_SSVC_MASK 0xC000
-#define MIPI_CTRL_SSVC_OFFSET 0x0E
-#define MIPI_CTRL_SSTYPE_MASK 0x3F00
-#define MIPI_CTRL_SSTYPE_OFFSET 0x08
-#define OUT_FMT_IIS_MASK 0x30
-#define OUT_FMT_IIS_OFFSET 0x08
-#define OUT_FMT_SS_MASK 0x1000
-#define OUT_FMT_SS_OFFSET 0x12
-#define OUT_FMT_TYPE_MASK 0xFF
-#define SENSOR_SELECT_MASK 0x03
-#define SENSOR_SELECT_OFFSET 0x00
-#define AWB_CTRL_MODE_MASK 0x0F
-#define AWB_CTRL_MODE_OFFSET 0x00
-#define AWB_CTRL_FLASH_MASK 0x100
-
-#define AP1302_FMT_UYVY422 0x50
-
-#define AP1302_SYS_ACTIVATE 0x8010
-#define AP1302_SYS_SWITCH 0x8140
-#define AP1302_SENSOR_PRI 0x01
-#define AP1302_SENSOR_SEC 0x02
-#define AP1302_SS_CTRL 0x31
-
-#define AP1302_MAX_RATIO_MISMATCH 10 /* Unit in percentage */
-#define AP1302_MAX_EV 2
-#define AP1302_MIN_EV -2
-
-enum ap1302_contexts {
- CONTEXT_PREVIEW = 0,
- CONTEXT_SNAPSHOT,
- CONTEXT_VIDEO,
- CONTEXT_NUM
-};
-
-/* The context registers are defined according to preview/video registers.
- Preview and video context have the same register definition.
- But snapshot context does not have register S1_SENSOR_MODE.
- When setting snapshot registers, if the offset exceeds
- S1_SENSOR_MODE, the actual offset needs to minus 2. */
-struct ap1302_context_config {
- u16 width;
- u16 height;
- u16 roi_x0;
- u16 roi_y0;
- u16 roi_x1;
- u16 roi_y1;
- u16 aspect_factor;
- u16 lock;
- u16 enable;
- u16 out_fmt;
- u16 sensor_mode;
- u16 mipi_ctrl;
- u16 mipi_ii_ctrl;
- u16 padding;
- u32 line_time;
- u16 max_fps;
- u16 ae_usg;
- u32 ae_upper_et;
- u32 ae_max_et;
- u16 ss;
- u16 s1_sensor_mode;
- u16 hinf_ctrl;
- u32 reserved;
-};
-
-struct ap1302_res_struct {
- u16 width;
- u16 height;
- u16 fps;
-};
-
-struct ap1302_context_res {
- u32 res_num;
- u32 cur_res;
- struct ap1302_res_struct *res_table;
-};
-
-struct ap1302_device {
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct camera_sensor_platform_data *platform_data;
- const struct firmware *fw;
- struct mutex input_lock; /* serialize sensor's ioctl */
- struct v4l2_mbus_framefmt format;
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *run_mode;
- struct ap1302_context_config cntx_config[CONTEXT_NUM];
- struct ap1302_context_res cntx_res[CONTEXT_NUM];
- enum ap1302_contexts cur_context;
- unsigned int num_lanes;
- struct regmap *regmap16;
- struct regmap *regmap32;
- bool sys_activated;
- bool power_on;
-};
-
-struct ap1302_firmware {
- u32 crc;
- u32 pll_init_size;
- u32 total_size;
- u32 reserved;
-};
-
-struct ap1302_context_info {
- u16 offset;
- u16 len;
- char *name;
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.c b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
index 35ed51ffe944..e70d8afcc229 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include <linux/io.h>
@@ -738,10 +737,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
/* The upstream module driver (written to Crystal
* Cove) had this logic to pulse the rails low first.
@@ -772,10 +767,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* GPIO0 == "reset" (active low), GPIO1 == "power down" */
if (flag) {
/* Pulse reset, then release power down */
@@ -1165,13 +1156,6 @@ static int gc0310_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- dev_err(&client->dev, "platform init err\n");
- goto platform_init_failed;
- }
- }
/* power off the module, then power on it in future
* as first power on by board may not fulfill the
* power on sequqence needed by the module
@@ -1216,9 +1200,6 @@ fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
fail_power_off:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-platform_init_failed:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1362,9 +1343,6 @@ static int gc0310_remove(struct i2c_client *client)
struct gc0310_device *dev = to_gc0310_sensor(sd);
dev_dbg(&client->dev, "gc0310_remove...\n");
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
dev->platform_data->csi_cfg(sd, 0);
v4l2_device_unregister_subdev(sd);
@@ -1375,8 +1353,7 @@ static int gc0310_remove(struct i2c_client *client)
return 0;
}
-static int gc0310_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int gc0310_probe(struct i2c_client *client)
{
struct gc0310_device *dev;
int ret;
@@ -1385,10 +1362,8 @@ static int gc0310_probe(struct i2c_client *client,
pr_info("%s S\n", __func__);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1457,33 +1432,17 @@ static const struct acpi_device_id gc0310_acpi_match[] = {
{"INT0310"},
{},
};
-
MODULE_DEVICE_TABLE(acpi, gc0310_acpi_match);
-MODULE_DEVICE_TABLE(i2c, gc0310_id);
static struct i2c_driver gc0310_driver = {
.driver = {
- .name = GC0310_NAME,
- .acpi_match_table = ACPI_PTR(gc0310_acpi_match),
+ .name = "gc0310",
+ .acpi_match_table = gc0310_acpi_match,
},
- .probe = gc0310_probe,
+ .probe_new = gc0310_probe,
.remove = gc0310_remove,
- .id_table = gc0310_id,
};
-
-static int init_gc0310(void)
-{
- return i2c_add_driver(&gc0310_driver);
-}
-
-static void exit_gc0310(void)
-{
-
- i2c_del_driver(&gc0310_driver);
-}
-
-module_init(init_gc0310);
-module_exit(exit_gc0310);
+module_i2c_driver(gc0310_driver);
MODULE_AUTHOR("Lai, Angie <angie.lai@intel.com>");
MODULE_DESCRIPTION("A low-level driver for GalaxyCore GC0310 sensors");
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.c b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
index e43d31ea9676..85da5fe24033 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include "../include/linux/atomisp_gmin_platform.h"
@@ -548,10 +547,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret = dev->platform_data->v1p8_ctrl(sd, 1);
usleep_range(60, 90);
@@ -572,10 +567,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
ret |= dev->platform_data->gpio1_ctrl(sd, !flag);
usleep_range(60, 90);
return dev->platform_data->gpio0_ctrl(sd, flag);
@@ -906,13 +897,6 @@ static int gc2235_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- dev_err(&client->dev, "platform init err\n");
- goto platform_init_failed;
- }
- }
/* power off the module, then power on it in future
* as first power on by board may not fulfill the
* power on sequqence needed by the module
@@ -956,9 +940,6 @@ fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
fail_power_off:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-platform_init_failed:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1101,9 +1082,6 @@ static int gc2235_remove(struct i2c_client *client)
struct gc2235_device *dev = to_gc2235_sensor(sd);
dev_dbg(&client->dev, "gc2235_remove...\n");
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
dev->platform_data->csi_cfg(sd, 0);
v4l2_device_unregister_subdev(sd);
@@ -1114,8 +1092,7 @@ static int gc2235_remove(struct i2c_client *client)
return 0;
}
-static int gc2235_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int gc2235_probe(struct i2c_client *client)
{
struct gc2235_device *dev;
void *gcpdev;
@@ -1123,10 +1100,8 @@ static int gc2235_probe(struct i2c_client *client,
unsigned int i;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1187,32 +1162,17 @@ static const struct acpi_device_id gc2235_acpi_match[] = {
{ "INT33F8" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, gc2235_acpi_match);
-MODULE_DEVICE_TABLE(i2c, gc2235_id);
+
static struct i2c_driver gc2235_driver = {
.driver = {
- .name = GC2235_NAME,
- .acpi_match_table = ACPI_PTR(gc2235_acpi_match),
+ .name = "gc2235",
+ .acpi_match_table = gc2235_acpi_match,
},
- .probe = gc2235_probe,
+ .probe_new = gc2235_probe,
.remove = gc2235_remove,
- .id_table = gc2235_id,
};
-
-static int init_gc2235(void)
-{
- return i2c_add_driver(&gc2235_driver);
-}
-
-static void exit_gc2235(void)
-{
-
- i2c_del_driver(&gc2235_driver);
-}
-
-module_init(init_gc2235);
-module_exit(exit_gc2235);
+module_i2c_driver(gc2235_driver);
MODULE_AUTHOR("Shuguang Gong <Shuguang.Gong@intel.com>");
MODULE_DESCRIPTION("A low-level driver for GC2235 sensors");
diff --git a/drivers/staging/media/atomisp/i2c/libmsrlisthelper.c b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
index decb65cfd7c9..81e5ec0c2b64 100644
--- a/drivers/staging/media/atomisp/i2c/libmsrlisthelper.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/i2c.h>
diff --git a/drivers/staging/media/atomisp/i2c/lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
index 679176f7c542..4fd9f538ac95 100644
--- a/drivers/staging/media/atomisp/i2c/lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/module.h>
@@ -171,10 +167,9 @@ static int lm3554_set_config1(struct lm3554 *flash)
/* -----------------------------------------------------------------------------
* Hardware trigger
*/
-static void lm3554_flash_off_delay(long unsigned int arg)
+static void lm3554_flash_off_delay(struct timer_list *t)
{
- struct v4l2_subdev *sd = i2c_get_clientdata((struct i2c_client *)arg);
- struct lm3554 *flash = to_lm3554(sd);
+ struct lm3554 *flash = from_timer(flash, t, flash_off_delay);
struct lm3554_platform_data *pdata = flash->pdata;
gpio_set_value(pdata->gpio_strobe, 0);
@@ -862,8 +857,7 @@ static void *lm3554_platform_data_func(struct i2c_client *client)
return &platform_data;
}
-static int lm3554_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3554_probe(struct i2c_client *client)
{
int err = 0;
struct lm3554 *flash;
@@ -871,10 +865,8 @@ static int lm3554_probe(struct i2c_client *client,
int ret;
flash = kzalloc(sizeof(*flash), GFP_KERNEL);
- if (!flash) {
- dev_err(&client->dev, "out of memory\n");
+ if (!flash)
return -ENOMEM;
- }
flash->pdata = client->dev.platform_data;
@@ -915,8 +907,7 @@ static int lm3554_probe(struct i2c_client *client,
mutex_init(&flash->power_lock);
- setup_timer(&flash->flash_off_delay, lm3554_flash_off_delay,
- (unsigned long)client);
+ timer_setup(&flash->flash_off_delay, lm3554_flash_off_delay, 0);
err = lm3554_gpio_init(client);
if (err) {
@@ -962,13 +953,6 @@ fail:
return ret;
}
-static const struct i2c_device_id lm3554_id[] = {
- {LM3554_NAME, 0},
- {},
-};
-
-MODULE_DEVICE_TABLE(i2c, lm3554_id);
-
static const struct dev_pm_ops lm3554_pm_ops = {
.suspend = lm3554_suspend,
.resume = lm3554_resume,
@@ -978,32 +962,19 @@ static const struct acpi_device_id lm3554_acpi_match[] = {
{ "INTCF1C" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, lm3554_acpi_match);
static struct i2c_driver lm3554_driver = {
.driver = {
- .name = LM3554_NAME,
+ .name = "lm3554",
.pm = &lm3554_pm_ops,
- .acpi_match_table = ACPI_PTR(lm3554_acpi_match),
+ .acpi_match_table = lm3554_acpi_match,
},
- .probe = lm3554_probe,
+ .probe_new = lm3554_probe,
.remove = lm3554_remove,
- .id_table = lm3554_id,
};
+module_i2c_driver(lm3554_driver);
-static __init int init_lm3554(void)
-{
- return i2c_add_driver(&lm3554_driver);
-}
-
-static __exit void exit_lm3554(void)
-{
- i2c_del_driver(&lm3554_driver);
-}
-
-module_init(init_lm3554);
-module_exit(exit_lm3554);
MODULE_AUTHOR("Jing Tao <jing.tao@intel.com>");
MODULE_DESCRIPTION("LED flash driver for LM3554");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
index 3c837cb8859c..55882bea2049 100644
--- a/drivers/staging/media/atomisp/i2c/mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -32,7 +28,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/acpi.h>
#include "../include/linux/atomisp_gmin_platform.h"
#include <media/v4l2-device.h>
@@ -455,10 +450,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret = dev->platform_data->v2p8_ctrl(sd, 1);
if (ret == 0) {
@@ -481,10 +472,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* Note: current modules wire only one GPIO signal (RESET#),
* but the schematic wires up two to the connector. BIOS
* versions have been unfortunately inconsistent with which
@@ -1584,13 +1571,6 @@ mt9m114_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
dev->platform_data =
(struct camera_sensor_platform_data *)platform_data;
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- v4l2_err(client, "mt9m114 platform init err\n");
- return ret;
- }
- }
ret = power_up(sd);
if (ret) {
v4l2_err(client, "mt9m114 power-up err");
@@ -1844,8 +1824,6 @@ static int mt9m114_remove(struct i2c_client *client)
dev = container_of(sd, struct mt9m114_device, sd);
dev->platform_data->csi_cfg(sd, 0);
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
@@ -1853,8 +1831,7 @@ static int mt9m114_remove(struct i2c_client *client)
return 0;
}
-static int mt9m114_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int mt9m114_probe(struct i2c_client *client)
{
struct mt9m114_device *dev;
int ret = 0;
@@ -1863,10 +1840,8 @@ static int mt9m114_probe(struct i2c_client *client,
/* Setup sensor configuration structure */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
v4l2_i2c_subdev_init(&dev->sd, client, &mt9m114_ops);
pdata = client->dev.platform_data;
@@ -1926,38 +1901,22 @@ static int mt9m114_probe(struct i2c_client *client,
return 0;
}
-MODULE_DEVICE_TABLE(i2c, mt9m114_id);
-
static const struct acpi_device_id mt9m114_acpi_match[] = {
{ "INT33F0" },
{ "CRMT1040" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, mt9m114_acpi_match);
static struct i2c_driver mt9m114_driver = {
.driver = {
.name = "mt9m114",
- .acpi_match_table = ACPI_PTR(mt9m114_acpi_match),
+ .acpi_match_table = mt9m114_acpi_match,
},
- .probe = mt9m114_probe,
+ .probe_new = mt9m114_probe,
.remove = mt9m114_remove,
- .id_table = mt9m114_id,
};
-
-static __init int init_mt9m114(void)
-{
- return i2c_add_driver(&mt9m114_driver);
-}
-
-static __exit void exit_mt9m114(void)
-{
- i2c_del_driver(&mt9m114_driver);
-}
-
-module_init(init_mt9m114);
-module_exit(exit_mt9m114);
+module_i2c_driver(mt9m114_driver);
MODULE_AUTHOR("Shuguang Gong <Shuguang.gong@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index 51b7d61df0f5..cd67d38f183a 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include <linux/io.h>
@@ -253,8 +252,8 @@ static int ov2680_write_reg_array(struct i2c_client *client,
if (!__ov2680_write_reg_is_consecutive(client, &ctrl,
next)) {
err = __ov2680_flush_reg_array(client, &ctrl);
- if (err)
- return err;
+ if (err)
+ return err;
}
err = __ov2680_buf_reg_array(client, &ctrl, next);
if (err) {
@@ -399,7 +398,9 @@ static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
u16 vts,hts;
int ret,exp_val;
- dev_dbg(&client->dev, "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",coarse_itg, gain, digitgain);
+ dev_dbg(&client->dev,
+ "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",
+ coarse_itg, gain, digitgain);
hts = ov2680_res[dev->fmt_idx].pixels_per_line;
vts = ov2680_res[dev->fmt_idx].lines_per_frame;
@@ -847,10 +848,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret |= dev->platform_data->v1p8_ctrl(sd, 1);
ret |= dev->platform_data->v2p8_ctrl(sd, 1);
@@ -872,10 +869,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* The OV2680 documents only one GPIO input (#XSHUTDN), but
* existing integrations often wire two (reset/power_down)
* because that is the way other sensors work. There is no
@@ -1438,8 +1431,7 @@ static int ov2680_remove(struct i2c_client *client)
return 0;
}
-static int ov2680_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov2680_probe(struct i2c_client *client)
{
struct ov2680_device *dev;
int ret;
@@ -1447,10 +1439,8 @@ static int ov2680_probe(struct i2c_client *client,
unsigned int i;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1523,35 +1513,16 @@ static const struct acpi_device_id ov2680_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, ov2680_acpi_match);
-
-MODULE_DEVICE_TABLE(i2c, ov2680_id);
static struct i2c_driver ov2680_driver = {
.driver = {
- .owner = THIS_MODULE,
- .name = OV2680_NAME,
- .acpi_match_table = ACPI_PTR(ov2680_acpi_match),
-
+ .name = "ov2680",
+ .acpi_match_table = ov2680_acpi_match,
},
- .probe = ov2680_probe,
+ .probe_new = ov2680_probe,
.remove = ov2680_remove,
- .id_table = ov2680_id,
};
-
-static int init_ov2680(void)
-{
- return i2c_add_driver(&ov2680_driver);
-}
-
-static void exit_ov2680(void)
-{
-
- i2c_del_driver(&ov2680_driver);
-}
-
-module_init(init_ov2680);
-module_exit(exit_ov2680);
+module_i2c_driver(ov2680_driver);
MODULE_AUTHOR("Jacky Wang <Jacky_wang@ovt.com>");
MODULE_DESCRIPTION("A low-level driver for OmniVision 2680 sensors");
MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
index 10094ac56561..4df7eba8d375 100644
--- a/drivers/staging/media/atomisp/i2c/ov2722.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include "../include/linux/atomisp_gmin_platform.h"
@@ -651,10 +650,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (flag) {
ret = dev->platform_data->v1p8_ctrl(sd, 1);
if (ret == 0) {
@@ -678,10 +673,6 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
/* Note: the GPIO order is asymmetric: always RESET#
* before PWDN# when turning it on or off.
*/
@@ -1044,13 +1035,6 @@ static int ov2722_s_config(struct v4l2_subdev *sd,
(struct camera_sensor_platform_data *)platform_data;
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- dev_err(&client->dev, "platform init err\n");
- goto platform_init_failed;
- }
- }
/* power off the module, then power on it in future
* as first power on by board may not fulfill the
@@ -1095,9 +1079,6 @@ fail_power_on:
power_down(sd);
dev_err(&client->dev, "sensor power-gating failed\n");
fail_power_off:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-platform_init_failed:
mutex_unlock(&dev->input_lock);
return ret;
}
@@ -1241,9 +1222,6 @@ static int ov2722_remove(struct i2c_client *client)
struct ov2722_device *dev = to_ov2722_sensor(sd);
dev_dbg(&client->dev, "ov2722_remove...\n");
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
dev->platform_data->csi_cfg(sd, 0);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister_subdev(sd);
@@ -1276,8 +1254,7 @@ static int __ov2722_init_ctrl_handler(struct ov2722_device *dev)
return 0;
}
-static int ov2722_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov2722_probe(struct i2c_client *client)
{
struct ov2722_device *dev;
void *ovpdev;
@@ -1285,10 +1262,8 @@ static int ov2722_probe(struct i2c_client *client,
struct acpi_device *adev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -1335,38 +1310,21 @@ out_free:
return ret;
}
-MODULE_DEVICE_TABLE(i2c, ov2722_id);
-
static const struct acpi_device_id ov2722_acpi_match[] = {
{ "INT33FB" },
{},
};
-
MODULE_DEVICE_TABLE(acpi, ov2722_acpi_match);
static struct i2c_driver ov2722_driver = {
.driver = {
- .name = OV2722_NAME,
- .acpi_match_table = ACPI_PTR(ov2722_acpi_match),
+ .name = "ov2722",
+ .acpi_match_table = ov2722_acpi_match,
},
- .probe = ov2722_probe,
+ .probe_new = ov2722_probe,
.remove = ov2722_remove,
- .id_table = ov2722_id,
};
-
-static int init_ov2722(void)
-{
- return i2c_add_driver(&ov2722_driver);
-}
-
-static void exit_ov2722(void)
-{
-
- i2c_del_driver(&ov2722_driver);
-}
-
-module_init(init_ov2722);
-module_exit(exit_ov2722);
+module_i2c_driver(ov2722_driver);
MODULE_AUTHOR("Wei Liu <wei.liu@intel.com>");
MODULE_DESCRIPTION("A low-level driver for OmniVision 2722 sensors");
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h
index 7d8a0aeecb6c..c422d0398fc7 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.h
+++ b/drivers/staging/media/atomisp/i2c/gc0310.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -36,8 +32,6 @@
#include "../include/linux/atomisp_platform.h"
-#define GC0310_NAME "gc0310"
-
/* Defines for register writes and register array processing */
#define I2C_MSG_LENGTH 1
#define I2C_RETRY_COUNT 5
@@ -196,11 +190,6 @@ struct gc0310_write_ctrl {
struct gc0310_write_buffer buffer;
};
-static const struct i2c_device_id gc0310_id[] = {
- {GC0310_NAME, 0},
- {}
-};
-
/*
* Register settings for various resolution
*/
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
index a8d6aa9c9a5d..3c30a05c3991 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.h
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -33,8 +33,6 @@
#include "../include/linux/atomisp_platform.h"
-#define GC2235_NAME "gc2235"
-
/* Defines for register writes and register array processing */
#define I2C_MSG_LENGTH 0x2
#define I2C_RETRY_COUNT 5
@@ -200,11 +198,6 @@ struct gc2235_write_ctrl {
struct gc2235_write_buffer buffer;
};
-static const struct i2c_device_id gc2235_id[] = {
- {GC2235_NAME, 0},
- {}
-};
-
static struct gc2235_reg const gc2235_stream_on[] = {
{ GC2235_8BIT, 0xfe, 0x03}, /* switch to P3 */
{ GC2235_8BIT, 0x10, 0x91}, /* start mipi */
diff --git a/drivers/staging/media/atomisp/i2c/imx/Kconfig b/drivers/staging/media/atomisp/i2c/imx/Kconfig
deleted file mode 100644
index a39eeb3b6ad4..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config VIDEO_IMX
- tristate "sony imx sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_MSRLIST_HELPER && m
- ---help---
- This is a Video4Linux2 sensor-level driver for the Sony
- IMX RAW sensor.
-
- It currently depends on internal V4L2 extensions defined in
- atomisp driver.
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
deleted file mode 100644
index b6578f09546e..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-obj-$(CONFIG_VIDEO_IMX) += imx1x5.o
-
-imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o otp_imx.o otp_brcc064_e2prom.o otp_e2prom.o
-
-ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
-obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o
-
-# HACK! While this driver is in bad shape, don't enable several warnings
-# that would be otherwise enabled with W=1
-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
-ccflags-y += $(call cc-disable-warning, unused-const-variable)
-ccflags-y += $(call cc-disable-warning, missing-prototypes)
-ccflags-y += $(call cc-disable-warning, missing-declarations)
diff --git a/drivers/staging/media/atomisp/i2c/imx/ad5816g.c b/drivers/staging/media/atomisp/i2c/imx/ad5816g.c
deleted file mode 100644
index 558dcdf135d9..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/ad5816g.c
+++ /dev/null
@@ -1,216 +0,0 @@
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-
-#include "ad5816g.h"
-
-struct ad5816g_device ad5816g_dev;
-
-static int ad5816g_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2];
- buf[0] = reg;
- buf[1] = 0;
-
- msg[0].addr = AD5816G_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &buf[0];
-
- msg[1].addr = AD5816G_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
- return 0;
-}
-
-static int ad5816g_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2];
- buf[0] = reg;
- buf[1] = val;
- msg.addr = AD5816G_VCM_ADDR;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int ad5816g_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3];
- buf[0] = reg;
- buf[1] = (u8)(val >> 8);
- buf[2] = (u8)(val & 0xff);
- msg.addr = AD5816G_VCM_ADDR;
- msg.flags = 0;
- msg.len = 3;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int ad5816g_set_arc_mode(struct i2c_client *client)
-{
- int ret;
-
- ret = ad5816g_i2c_wr8(client, AD5816G_CONTROL, AD5816G_ARC_EN);
- if (ret)
- return ret;
-
- ret = ad5816g_i2c_wr8(client, AD5816G_MODE,
- AD5816G_MODE_2_5M_SWITCH_CLOCK);
- if (ret)
- return ret;
-
- ret = ad5816g_i2c_wr8(client, AD5816G_VCM_FREQ, AD5816G_DEF_FREQ);
- return ret;
-}
-
-int ad5816g_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 ad5816g_id;
-
- /* Enable power */
- ret = ad5816g_dev.platform_data->power_ctrl(sd, 1);
- if (ret)
- return ret;
- /* waiting time AD5816G(vcm) - t1 + t2
- * t1(1ms) -Time from VDD high to first i2c cmd
- * t2(100us) - exit power-down mode time
- */
- usleep_range(1100, 2200);
- /* Detect device */
- ret = ad5816g_i2c_rd8(client, AD5816G_IC_INFO, &ad5816g_id);
- if (ret < 0)
- goto fail_powerdown;
- if (ad5816g_id != AD5816G_ID) {
- ret = -ENXIO;
- goto fail_powerdown;
- }
- ret = ad5816g_set_arc_mode(client);
- if (ret)
- return ret;
-
- /* set the VCM_THRESHOLD */
- ret = ad5816g_i2c_wr8(client, AD5816G_VCM_THRESHOLD,
- AD5816G_DEF_THRESHOLD);
-
- return ret;
-
-fail_powerdown:
- ad5816g_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int ad5816g_vcm_power_down(struct v4l2_subdev *sd)
-{
- return ad5816g_dev.platform_data->power_ctrl(sd, 0);
-}
-
-
-static int ad5816g_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 data = val & VCM_CODE_MASK;
-
- return ad5816g_i2c_wr16(client, AD5816G_VCM_CODE_MSB, data);
-}
-
-int ad5816g_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- int ret;
-
- value = clamp(value, 0, AD5816G_MAX_FOCUS_POS);
- ret = ad5816g_t_focus_vcm(sd, value);
- if (ret == 0) {
- ad5816g_dev.number_of_steps = value - ad5816g_dev.focus;
- ad5816g_dev.focus = value;
- getnstimeofday(&(ad5816g_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int ad5816g_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
-
- return ad5816g_t_focus_abs(sd, ad5816g_dev.focus + value);
-}
-
-int ad5816g_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct timespec temptime;
- const struct timespec timedelay = {
- 0,
- min_t(u32, abs(ad5816g_dev.number_of_steps) * DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS),
- };
-
- ktime_get_ts(&temptime);
-
- temptime = timespec_sub(temptime, (ad5816g_dev.timestamp_t_focus_abs));
-
- if (timespec_compare(&temptime, &timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
- *value = status;
-
- return 0;
-}
-
-int ad5816g_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- s32 val;
-
- ad5816g_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = ad5816g_dev.focus - ad5816g_dev.number_of_steps;
- else
- *value = ad5816g_dev.focus;
-
- return 0;
-}
-
-int ad5816g_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/ad5816g.h b/drivers/staging/media/atomisp/i2c/imx/ad5816g.h
deleted file mode 100644
index f995c2eeada4..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/ad5816g.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef __AD5816G_H__
-#define __AD5816G_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-#include <linux/time.h>
-
-#define AD5816G_VCM_ADDR 0x0e
-
-/* ad5816g device structure */
-struct ad5816g_device {
- const struct camera_af_platform_data *platform_data;
- struct timespec timestamp_t_focus_abs;
- struct timespec focus_time; /* Time when focus was last time set */
- s32 focus; /* Current focus value */
- s16 number_of_steps;
-};
-
-#define AD5816G_INVALID_CONFIG 0xffffffff
-#define AD5816G_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-/* Register Definitions */
-#define AD5816G_IC_INFO 0x00
-#define AD5816G_IC_VERSION 0x01
-#define AD5816G_CONTROL 0x02
-#define AD5816G_VCM_CODE_MSB 0x03
-#define AD5816G_VCM_CODE_LSB 0x04
-#define AD5816G_STATUS 0x05
-#define AD5816G_MODE 0x06
-#define AD5816G_VCM_FREQ 0x07
-#define AD5816G_VCM_THRESHOLD 0x08
-
-/* ARC MODE ENABLE */
-#define AD5816G_ARC_EN 0x02
-/* ARC RES2 MODE */
-#define AD5816G_ARC_RES2 0x01
-/* ARC VCM FREQ - 78.1Hz */
-#define AD5816G_DEF_FREQ 0x7a
-/* ARC VCM THRESHOLD - 0x08 << 1 */
-#define AD5816G_DEF_THRESHOLD 0x64
-#define AD5816G_ID 0x24
-#define VCM_CODE_MASK 0x03ff
-
-#define AD5816G_MODE_2_5M_SWITCH_CLOCK 0x14
-
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/common.h b/drivers/staging/media/atomisp/i2c/imx/common.h
deleted file mode 100644
index 7e525cef56ef..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/common.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#ifndef __COMMON_H__
-#define __COMMON_H__
-
-#define MAX_FPS_OPTIONS_SUPPORTED 3
-#define I2C_MSG_LENGTH 0x2
-#define E2PROM_2ADDR 0x80000000
-#define E2PROM_ADDR_MASK 0x7fffffff
-
-/* Defines for register writes and register array processing */
-#define IMX_BYTE_MAX 32
-#define IMX_SHORT_MAX 16
-#define I2C_RETRY_COUNT 5
-#define IMX_TOK_MASK 0xfff0
-
-enum imx_tok_type {
- IMX_8BIT = 0x0001,
- IMX_16BIT = 0x0002,
- IMX_TOK_TERM = 0xf000, /* terminating token for reg list */
- IMX_TOK_DELAY = 0xfe00 /* delay token for reg list */
-};
-
-/**
- * struct imx_reg - MI sensor register format
- * @type: type of the register
- * @reg: 16-bit offset to register
- * @val: 8/16/32-bit register value
- *
- * Define a structure for sensor register initialization values
- */
-struct imx_reg {
- enum imx_tok_type type;
- u16 sreg;
- u32 val; /* @set value for read/mod/write, @mask */
-};
-
-struct imx_fps_setting {
- int fps;
- unsigned short pixels_per_line;
- unsigned short lines_per_frame;
- int mipi_freq; /* MIPI lane frequency in kHz */
- const struct imx_reg *regs; /* regs that the fps setting needs */
-};
-
-struct imx_resolution {
- const struct imx_fps_setting fps_options[MAX_FPS_OPTIONS_SUPPORTED];
- u8 *desc;
- const struct imx_reg *regs;
- int res;
- int width;
- int height;
- int fps;
- unsigned short pixels_per_line;
- unsigned short lines_per_frame;
- int mipi_freq; /* MIPI lane frequency in kHz */
- unsigned short skip_frames;
- u8 bin_factor_x;
- u8 bin_factor_y;
- bool used;
-};
-
-#define GROUPED_PARAMETER_HOLD_ENABLE {IMX_8BIT, 0x0104, 0x1}
-#define GROUPED_PARAMETER_HOLD_DISABLE {IMX_8BIT, 0x0104, 0x0}
-
-int imx_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val);
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/drv201.c b/drivers/staging/media/atomisp/i2c/imx/drv201.c
deleted file mode 100644
index 6d9d4c968722..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/drv201.c
+++ /dev/null
@@ -1,209 +0,0 @@
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include <asm/intel-mid.h>
-
-#include "drv201.h"
-
-static struct drv201_device drv201_dev;
-
-static int drv201_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2];
- buf[0] = reg;
- buf[1] = 0;
-
- msg[0].addr = DRV201_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &buf[0];
-
- msg[1].addr = DRV201_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
- return 0;
-}
-
-static int drv201_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2];
- buf[0] = reg;
- buf[1] = val;
- msg.addr = DRV201_VCM_ADDR;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-static int drv201_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3];
- buf[0] = reg;
- buf[1] = (u8)(val >> 8);
- buf[2] = (u8)(val & 0xff);
- msg.addr = DRV201_VCM_ADDR;
- msg.flags = 0;
- msg.len = 3;
- msg.buf = &buf[0];
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
- return 0;
-}
-
-int drv201_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 value;
-
- /* Enable power */
- ret = drv201_dev.platform_data->power_ctrl(sd, 1);
- if (ret)
- return ret;
- /* Wait for VBAT to stabilize */
- udelay(1);
- /*
- * Jiggle SCL pin to wake up device.
- * Drv201 expect SCL from low to high to wake device up.
- * So the 1st access to i2c would fail.
- * Using following function to wake device up.
- */
- drv201_i2c_wr8(client, DRV201_CONTROL, DRV201_RESET);
-
- /* Need 100us to transit from SHUTDOWN to STANDBY*/
- usleep_range(WAKEUP_DELAY_US, WAKEUP_DELAY_US * 10);
-
- /* Reset device */
- ret = drv201_i2c_wr8(client, DRV201_CONTROL, DRV201_RESET);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Detect device */
- ret = drv201_i2c_rd8(client, DRV201_CONTROL, &value);
- if (ret < 0)
- goto fail_powerdown;
- if (value != DEFAULT_CONTROL_VAL) {
- ret = -ENXIO;
- goto fail_powerdown;
- }
-
- drv201_dev.focus = DRV201_MAX_FOCUS_POS;
- drv201_dev.initialized = true;
-
- return 0;
-fail_powerdown:
- drv201_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int drv201_vcm_power_down(struct v4l2_subdev *sd)
-{
- return drv201_dev.platform_data->power_ctrl(sd, 0);
-}
-
-
-static int drv201_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 data = val & VCM_CODE_MASK;
-
- if (!drv201_dev.initialized)
- return -ENODEV;
- return drv201_i2c_wr16(client, DRV201_VCM_CURRENT, data);
-}
-
-int drv201_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- int ret;
-
- value = clamp(value, 0, DRV201_MAX_FOCUS_POS);
- ret = drv201_t_focus_vcm(sd, value);
- if (ret == 0) {
- drv201_dev.number_of_steps = value - drv201_dev.focus;
- drv201_dev.focus = value;
- getnstimeofday(&(drv201_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int drv201_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- return drv201_t_focus_abs(sd, drv201_dev.focus + value);
-}
-
-int drv201_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct timespec temptime;
- const struct timespec timedelay = {
- 0,
- min_t(u32, abs(drv201_dev.number_of_steps)*DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS),
- };
-
- ktime_get_ts(&temptime);
-
- temptime = timespec_sub(temptime, (drv201_dev.timestamp_t_focus_abs));
-
- if (timespec_compare(&temptime, &timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
- *value = status;
-
- return 0;
-}
-
-int drv201_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- s32 val;
-
- drv201_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = drv201_dev.focus - drv201_dev.number_of_steps;
- else
- *value = drv201_dev.focus;
-
- return 0;
-}
-
-int drv201_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/drv201.h b/drivers/staging/media/atomisp/i2c/imx/drv201.h
deleted file mode 100644
index 8fc0ad116630..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/drv201.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef __DRV201_H__
-#define __DRV201_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-#include <linux/time.h>
-
-#define DRV201_VCM_ADDR 0x0e
-
-/* drv201 device structure */
-struct drv201_device {
- const struct camera_af_platform_data *platform_data;
- struct timespec timestamp_t_focus_abs;
- struct timespec focus_time; /* Time when focus was last time set */
- s32 focus; /* Current focus value */
- s16 number_of_steps;
- bool initialized; /* true if drv201 is detected */
-};
-
-#define DRV201_INVALID_CONFIG 0xffffffff
-#define DRV201_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-#define DRV201_CONTROL 2
-#define DRV201_VCM_CURRENT 3
-#define DRV201_STATUS 5
-#define DRV201_MODE 6
-#define DRV201_VCM_FREQ 7
-
-#define DEFAULT_CONTROL_VAL 2
-#define DRV201_RESET 1
-#define WAKEUP_DELAY_US 100
-#define VCM_CODE_MASK 0x03ff
-
-#endif
-
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9714.c b/drivers/staging/media/atomisp/i2c/imx/dw9714.c
deleted file mode 100644
index 6397a7ee0af6..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9714.c
+++ /dev/null
@@ -1,223 +0,0 @@
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include <asm/intel-mid.h>
-
-#include "dw9714.h"
-
-static struct dw9714_device dw9714_dev;
-static int dw9714_i2c_write(struct i2c_client *client, u16 data)
-{
- struct i2c_msg msg;
- const int num_msg = 1;
- int ret;
- u16 val;
-
- val = cpu_to_be16(data);
- msg.addr = DW9714_VCM_ADDR;
- msg.flags = 0;
- msg.len = DW9714_16BIT;
- msg.buf = (u8 *)&val;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
-
- return ret == num_msg ? 0 : -EIO;
-}
-
-int dw9714_vcm_power_up(struct v4l2_subdev *sd)
-{
- int ret;
-
- /* Enable power */
- ret = dw9714_dev.platform_data->power_ctrl(sd, 1);
- /* waiting time requested by DW9714A(vcm) */
- usleep_range(12000, 12500);
- return ret;
-}
-
-int dw9714_vcm_power_down(struct v4l2_subdev *sd)
-{
- return dw9714_dev.platform_data->power_ctrl(sd, 0);
-}
-
-
-static int dw9714_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = -EINVAL;
- u8 mclk = vcm_step_mclk(dw9714_dev.vcm_settings.step_setting);
- u8 s = vcm_step_s(dw9714_dev.vcm_settings.step_setting);
-
- /*
- * For different mode, VCM_PROTECTION_OFF/ON required by the
- * control procedure. For DW9714_DIRECT/DLC mode, slew value is
- * VCM_DEFAULT_S(0).
- */
- switch (dw9714_dev.vcm_mode) {
- case DW9714_DIRECT:
- if (dw9714_dev.vcm_settings.update) {
- ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, DIRECT_VCM);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dw9714_dev.vcm_settings.update = false;
- }
- ret = dw9714_i2c_write(client,
- vcm_val(val, VCM_DEFAULT_S));
- break;
- case DW9714_LSC:
- if (dw9714_dev.vcm_settings.update) {
- ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_dlc_mclk(DLC_DISABLE, mclk));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_tsrc(dw9714_dev.vcm_settings.t_src));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dw9714_dev.vcm_settings.update = false;
- }
- ret = dw9714_i2c_write(client, vcm_val(val, s));
- break;
- case DW9714_DLC:
- if (dw9714_dev.vcm_settings.update) {
- ret = dw9714_i2c_write(client, VCM_PROTECTION_OFF);
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_dlc_mclk(DLC_ENABLE, mclk));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client,
- vcm_tsrc(dw9714_dev.vcm_settings.t_src));
- if (ret)
- return ret;
- ret = dw9714_i2c_write(client, VCM_PROTECTION_ON);
- if (ret)
- return ret;
- dw9714_dev.vcm_settings.update = false;
- }
- ret = dw9714_i2c_write(client,
- vcm_val(val, VCM_DEFAULT_S));
- break;
- }
- return ret;
-}
-
-int dw9714_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- int ret;
-
- value = clamp(value, 0, DW9714_MAX_FOCUS_POS);
- ret = dw9714_t_focus_vcm(sd, value);
- if (ret == 0) {
- dw9714_dev.number_of_steps = value - dw9714_dev.focus;
- dw9714_dev.focus = value;
- getnstimeofday(&(dw9714_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int dw9714_t_focus_abs_init(struct v4l2_subdev *sd)
-{
- int ret;
-
- ret = dw9714_t_focus_vcm(sd, DW9714_DEFAULT_FOCUS_POS);
- if (ret == 0) {
- dw9714_dev.number_of_steps =
- DW9714_DEFAULT_FOCUS_POS - dw9714_dev.focus;
- dw9714_dev.focus = DW9714_DEFAULT_FOCUS_POS;
- getnstimeofday(&(dw9714_dev.timestamp_t_focus_abs));
- }
-
- return ret;
-}
-
-int dw9714_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
-
- return dw9714_t_focus_abs(sd, dw9714_dev.focus + value);
-}
-
-int dw9714_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- u32 status = 0;
- struct timespec temptime;
- const struct timespec timedelay = {
- 0,
- min_t(u32, abs(dw9714_dev.number_of_steps)*DELAY_PER_STEP_NS,
- DELAY_MAX_PER_STEP_NS),
- };
-
- ktime_get_ts(&temptime);
-
- temptime = timespec_sub(temptime, (dw9714_dev.timestamp_t_focus_abs));
-
- if (timespec_compare(&temptime, &timedelay) <= 0) {
- status |= ATOMISP_FOCUS_STATUS_MOVING;
- status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
- } else {
- status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- status |= ATOMISP_FOCUS_HP_COMPLETE;
- }
- *value = status;
-
- return 0;
-}
-
-int dw9714_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- s32 val;
-
- dw9714_q_focus_status(sd, &val);
-
- if (val & ATOMISP_FOCUS_STATUS_MOVING)
- *value = dw9714_dev.focus - dw9714_dev.number_of_steps;
- else
- *value = dw9714_dev.focus;
-
- return 0;
-}
-
-int dw9714_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- dw9714_dev.vcm_settings.step_setting = value;
- dw9714_dev.vcm_settings.update = true;
-
- return 0;
-}
-
-int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- dw9714_dev.vcm_settings.t_src = value;
- dw9714_dev.vcm_settings.update = true;
-
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9714.h b/drivers/staging/media/atomisp/i2c/imx/dw9714.h
deleted file mode 100644
index 5a98a9c97182..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9714.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#ifndef __DW9714_H__
-#define __DW9714_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-
-
-#define DW9714_VCM_ADDR 0x0c
-
-enum dw9714_tok_type {
- DW9714_8BIT = 0x0001,
- DW9714_16BIT = 0x0002,
-};
-
-struct dw9714_vcm_settings {
- u16 code; /* bit[9:0]: Data[9:0] */
- u8 t_src; /* bit[4:0]: T_SRC[4:0] */
- u8 step_setting; /* bit[3:0]: S[3:0]/bit[5:4]: MCLK[1:0] */
- bool update;
-};
-
-enum dw9714_vcm_mode {
- DW9714_DIRECT = 0x1, /* direct control */
- DW9714_LSC = 0x2, /* linear slope control */
- DW9714_DLC = 0x3, /* dual level control */
-};
-
-/* dw9714 device structure */
-struct dw9714_device {
- struct dw9714_vcm_settings vcm_settings;
- struct timespec timestamp_t_focus_abs;
- enum dw9714_vcm_mode vcm_mode;
- s16 number_of_steps;
- bool initialized; /* true if dw9714 is detected */
- s32 focus; /* Current focus value */
- struct timespec focus_time; /* Time when focus was last time set */
- __u8 buffer[4]; /* Used for i2c transactions */
- const struct camera_af_platform_data *platform_data;
-};
-
-#define DW9714_INVALID_CONFIG 0xffffffff
-#define DW9714_MAX_FOCUS_POS 1023
-#define DW9714_DEFAULT_FOCUS_POS 290
-
-
-/* MCLK[1:0] = 01 T_SRC[4:0] = 00001 S[3:0] = 0111 */
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-#define DLC_ENABLE 1
-#define DLC_DISABLE 0
-#define VCM_PROTECTION_OFF 0xeca3
-#define VCM_PROTECTION_ON 0xdc51
-#define VCM_DEFAULT_S 0x0
-
-#define vcm_step_s(a) (u8)(a & 0xf)
-#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3)
-#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104)
-#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200)
-#define vcm_val(data, s) (u16)(data << 4 | s)
-#define DIRECT_VCM vcm_dlc_mclk(0, 0)
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9718.c b/drivers/staging/media/atomisp/i2c/imx/dw9718.c
deleted file mode 100644
index c02b9f0a2440..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9718.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Support for dw9718 vcm driver.
- *
- * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/delay.h>
-#include "dw9718.h"
-
-static struct dw9718_device dw9718_dev;
-
-static int dw9718_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2] = { reg };
-
- msg[0].addr = DW9718_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = buf;
-
- msg[1].addr = DW9718_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
-
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
-
- return 0;
-}
-
-static int dw9718_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2] = { reg, val};
-
- msg.addr = DW9718_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-static int dw9718_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3] = { reg, (u8)(val >> 8), (u8)(val & 0xff)};
-
- msg.addr = DW9718_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- value = clamp(value, 0, DW9718_MAX_FOCUS_POS);
- ret = dw9718_i2c_wr16(client, DW9718_DATA_M, value);
- /*pr_info("%s: value = %d\n", __func__, value);*/
- if (ret < 0)
- return ret;
-
- getnstimeofday(&dw9718_dev.focus_time);
- dw9718_dev.focus = value;
-
- return 0;
-}
-
-int dw9718_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 value;
-
- if (dw9718_dev.power_on)
- return 0;
-
- /* Enable power */
- ret = dw9718_dev.platform_data->power_ctrl(sd, 1);
- if (ret) {
- dev_err(&client->dev, "DW9718_PD power_ctrl failed %d\n", ret);
- return ret;
- }
- /* Wait for VBAT to stabilize */
- udelay(100);
-
- /* Detect device */
- ret = dw9718_i2c_rd8(client, DW9718_SACT, &value);
- if (ret < 0) {
- dev_err(&client->dev, "read DW9718_SACT failed %d\n", ret);
- goto fail_powerdown;
- }
- /*
- * WORKAROUND: for module P8V12F-203 which are used on
- * Cherrytrail Refresh Davis Reef AoB, register SACT is not
- * returning default value as spec. But VCM works as expected and
- * root cause is still under discussion with vendor.
- * workaround here to avoid aborting the power up sequence and just
- * give a warning about this error.
- */
- if (value != DW9718_SACT_DEFAULT_VAL)
- dev_warn(&client->dev, "%s error, incorrect ID\n", __func__);
-
- /* Initialize according to recommended settings */
- ret = dw9718_i2c_wr8(client, DW9718_CONTROL,
- DW9718_CONTROL_SW_LINEAR |
- DW9718_CONTROL_S_SAC4 |
- DW9718_CONTROL_OCP_DISABLE |
- DW9718_CONTROL_UVLO_DISABLE);
- if (ret < 0) {
- dev_err(&client->dev, "write DW9718_CONTROL failed %d\n", ret);
- goto fail_powerdown;
- }
- ret = dw9718_i2c_wr8(client, DW9718_SACT,
- DW9718_SACT_MULT_TWO |
- DW9718_SACT_PERIOD_8_8MS);
- if (ret < 0) {
- dev_err(&client->dev, "write DW9718_SACT failed %d\n", ret);
- goto fail_powerdown;
- }
-
- ret = dw9718_t_focus_abs(sd, dw9718_dev.focus);
- if (ret)
- return ret;
- dw9718_dev.initialized = true;
- dw9718_dev.power_on = 1;
-
- return 0;
-
-fail_powerdown:
- dev_err(&client->dev, "%s error, powerup failed\n", __func__);
- dw9718_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int dw9718_vcm_power_down(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- if (!dw9718_dev.power_on)
- return 0;
-
- ret = dw9718_dev.platform_data->power_ctrl(sd, 0);
- if (ret) {
- dev_err(&client->dev, "%s power_ctrl failed\n",
- __func__);
- return ret;
- }
- dw9718_dev.power_on = 0;
-
- return 0;
-}
-
-int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- static const struct timespec move_time = {
- .tv_sec = 0,
- .tv_nsec = 60000000
- };
- struct timespec current_time, finish_time, delta_time;
-
- getnstimeofday(&current_time);
- finish_time = timespec_add(dw9718_dev.focus_time, move_time);
- delta_time = timespec_sub(current_time, finish_time);
- if (delta_time.tv_sec >= 0 && delta_time.tv_nsec >= 0) {
- *value = ATOMISP_FOCUS_HP_COMPLETE |
- ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- } else {
- *value = ATOMISP_FOCUS_STATUS_MOVING |
- ATOMISP_FOCUS_HP_IN_PROGRESS;
- }
-
- return 0;
-}
-
-int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- return dw9718_t_focus_abs(sd, dw9718_dev.focus + value);
-}
-
-int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- *value = dw9718_dev.focus;
- return 0;
-}
-int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int dw9718_vcm_init(struct v4l2_subdev *sd)
-{
- dw9718_dev.platform_data = camera_get_af_platform_data();
- dw9718_dev.focus = DW9718_DEFAULT_FOCUS_POSITION;
- dw9718_dev.power_on = 0;
- return (NULL == dw9718_dev.platform_data) ? -ENODEV : 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9718.h b/drivers/staging/media/atomisp/i2c/imx/dw9718.h
deleted file mode 100644
index 4a1040c3149f..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9718.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Support for dw9719 vcm driver.
- *
- * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __DW9718_H__
-#define __DW9718_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-
-#define DW9718_VCM_ADDR (0x18 >> 1)
-
-/* dw9718 device structure */
-struct dw9718_device {
- struct timespec timestamp_t_focus_abs;
- s16 number_of_steps;
- bool initialized; /* true if dw9718 is detected */
- s32 focus; /* Current focus value */
- struct timespec focus_time; /* Time when focus was last time set */
- __u8 buffer[4]; /* Used for i2c transactions */
- const struct camera_af_platform_data *platform_data;
- __u8 power_on;
-};
-
-#define DW9718_MAX_FOCUS_POS 1023
-
-/* Register addresses */
-#define DW9718_PD 0x00
-#define DW9718_CONTROL 0x01
-#define DW9718_DATA_M 0x02
-#define DW9718_DATA_L 0x03
-#define DW9718_SW 0x04
-#define DW9718_SACT 0x05
-#define DW9718_FLAG 0x10
-
-#define DW9718_CONTROL_SW_LINEAR BIT(0)
-#define DW9718_CONTROL_S_SAC4 (BIT(1) | BIT(3))
-#define DW9718_CONTROL_OCP_DISABLE BIT(4)
-#define DW9718_CONTROL_UVLO_DISABLE BIT(5)
-
-#define DW9718_SACT_MULT_TWO 0x00
-#define DW9718_SACT_PERIOD_8_8MS 0x19
-#define DW9718_SACT_DEFAULT_VAL 0x60
-
-#define DW9718_DEFAULT_FOCUS_POSITION 300
-
-#endif /* __DW9718_H__ */
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9719.c b/drivers/staging/media/atomisp/i2c/imx/dw9719.c
deleted file mode 100644
index 565237796bb4..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9719.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Support for dw9719 vcm driver.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/delay.h>
-#include "dw9719.h"
-
-static struct dw9719_device dw9719_dev;
-
-static int dw9719_i2c_rd8(struct i2c_client *client, u8 reg, u8 *val)
-{
- struct i2c_msg msg[2];
- u8 buf[2] = { reg };
-
- msg[0].addr = DW9719_VCM_ADDR;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = buf;
-
- msg[1].addr = DW9719_VCM_ADDR;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 1;
- msg[1].buf = &buf[1];
- *val = 0;
-
- if (i2c_transfer(client->adapter, msg, 2) != 2)
- return -EIO;
- *val = buf[1];
-
- return 0;
-}
-
-static int dw9719_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
- struct i2c_msg msg;
- u8 buf[2] = { reg, val };
-
- msg.addr = DW9719_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-static int dw9719_i2c_wr16(struct i2c_client *client, u8 reg, u16 val)
-{
- struct i2c_msg msg;
- u8 buf[3] = { reg, (u8)(val >> 8), (u8)(val & 0xff)};
-
- msg.addr = DW9719_VCM_ADDR;
- msg.flags = 0;
- msg.len = sizeof(buf);
- msg.buf = buf;
-
- if (i2c_transfer(client->adapter, &msg, 1) != 1)
- return -EIO;
-
- return 0;
-}
-
-int dw9719_vcm_power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 value;
-
- /* Enable power */
- ret = dw9719_dev.platform_data->power_ctrl(sd, 1);
- /* waiting time requested by DW9714A(vcm) */
- if (ret)
- return ret;
- /* Wait for VBAT to stabilize */
- udelay(1);
-
- /*
- * Jiggle SCL pin to wake up device.
- */
- ret = dw9719_i2c_wr8(client, DW9719_CONTROL, 1);
- /* Need 100us to transit from SHUTDOWN to STANDBY*/
- usleep_range(100, 1000);
-
- /* Enable the ringing compensation */
- ret = dw9719_i2c_wr8(client, DW9719_CONTROL, DW9719_ENABLE_RINGING);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Use SAC3 mode */
- ret = dw9719_i2c_wr8(client, DW9719_MODE, DW9719_MODE_SAC3);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Set the resonance frequency */
- ret = dw9719_i2c_wr8(client, DW9719_VCM_FREQ, DW9719_DEFAULT_VCM_FREQ);
- if (ret < 0)
- goto fail_powerdown;
-
- /* Detect device */
- ret = dw9719_i2c_rd8(client, DW9719_INFO, &value);
- if (ret < 0)
- goto fail_powerdown;
- if (value != DW9719_ID) {
- ret = -ENXIO;
- goto fail_powerdown;
- }
- dw9719_dev.focus = 0;
- dw9719_dev.initialized = true;
-
- return 0;
-
-fail_powerdown:
- dw9719_dev.platform_data->power_ctrl(sd, 0);
- return ret;
-}
-
-int dw9719_vcm_power_down(struct v4l2_subdev *sd)
-{
- return dw9719_dev.platform_data->power_ctrl(sd, 0);
-}
-
-int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- static const struct timespec move_time = {
-
- .tv_sec = 0,
- .tv_nsec = 60000000
- };
- struct timespec current_time, finish_time, delta_time;
-
- getnstimeofday(&current_time);
- finish_time = timespec_add(dw9719_dev.focus_time, move_time);
- delta_time = timespec_sub(current_time, finish_time);
- if (delta_time.tv_sec >= 0 && delta_time.tv_nsec >= 0) {
- *value = ATOMISP_FOCUS_HP_COMPLETE |
- ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
- } else {
- *value = ATOMISP_FOCUS_STATUS_MOVING |
- ATOMISP_FOCUS_HP_IN_PROGRESS;
- }
-
- return 0;
-}
-
-int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- value = clamp(value, 0, DW9719_MAX_FOCUS_POS);
- ret = dw9719_i2c_wr16(client, DW9719_VCM_CURRENT, value);
- if (ret < 0)
- return ret;
-
- getnstimeofday(&dw9719_dev.focus_time);
- dw9719_dev.focus = value;
-
- return 0;
-}
-
-int dw9719_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- return dw9719_t_focus_abs(sd, dw9719_dev.focus + value);
-}
-
-int dw9719_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- *value = dw9719_dev.focus;
- return 0;
-}
-int dw9719_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
-
-int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- return 0;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/dw9719.h b/drivers/staging/media/atomisp/i2c/imx/dw9719.h
deleted file mode 100644
index 711f412aef2a..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/dw9719.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Support for dw9719 vcm driver.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __DW9719_H__
-#define __DW9719_H__
-
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/types.h>
-
-#define DW9719_VCM_ADDR (0x18 >> 1)
-
-/* dw9719 device structure */
-struct dw9719_device {
- struct timespec timestamp_t_focus_abs;
- s16 number_of_steps;
- bool initialized; /* true if dw9719 is detected */
- s32 focus; /* Current focus value */
- struct timespec focus_time; /* Time when focus was last time set */
- __u8 buffer[4]; /* Used for i2c transactions */
- const struct camera_af_platform_data *platform_data;
-};
-
-#define DW9719_INVALID_CONFIG 0xffffffff
-#define DW9719_MAX_FOCUS_POS 1023
-#define DELAY_PER_STEP_NS 1000000
-#define DELAY_MAX_PER_STEP_NS (1000000 * 1023)
-
-#define DW9719_INFO 0
-#define DW9719_ID 0xF1
-#define DW9719_CONTROL 2
-#define DW9719_VCM_CURRENT 3
-
-#define DW9719_MODE 6
-#define DW9719_VCM_FREQ 7
-
-#define DW9719_MODE_SAC3 0x40
-#define DW9719_DEFAULT_VCM_FREQ 0x04
-#define DW9719_ENABLE_RINGING 0x02
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.c b/drivers/staging/media/atomisp/i2c/imx/imx.c
deleted file mode 100644
index 49ab0af87096..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx.c
+++ /dev/null
@@ -1,2480 +0,0 @@
-/*
- * Support for Sony imx 8MP camera sensor.
- *
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <asm/intel-mid.h>
-#include "../../include/linux/atomisp_platform.h"
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/gpio.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include "../../include/linux/libmsrlisthelper.h"
-#include <linux/mm.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include "imx.h"
-
-/*
- * The imx135 embedded data info:
- * embedded data line num: 2
- * line 0 effective data size(byte): 76
- * line 1 effective data size(byte): 113
- */
-static const uint32_t
- imx135_embedded_effective_size[IMX135_EMBEDDED_DATA_LINE_NUM]
- = {76, 113};
-
-static enum atomisp_bayer_order imx_bayer_order_mapping[] = {
- atomisp_bayer_order_rggb,
- atomisp_bayer_order_grbg,
- atomisp_bayer_order_gbrg,
- atomisp_bayer_order_bggr
-};
-
-static const unsigned int
-IMX227_BRACKETING_LUT_FRAME_ENTRY[IMX_MAX_AE_LUT_LENGTH] = {
- 0x0E10, 0x0E1E, 0x0E2C, 0x0E3A, 0x0E48};
-
-static int
-imx_read_reg(struct i2c_client *client, u16 len, u16 reg, u16 *val)
-{
- struct i2c_msg msg[2];
- u16 data[IMX_SHORT_MAX];
- int ret, i;
- int retry = 0;
-
- if (len > IMX_BYTE_MAX) {
- dev_err(&client->dev, "%s error, invalid data length\n",
- __func__);
- return -EINVAL;
- }
-
- do {
- memset(msg, 0 , sizeof(msg));
- memset(data, 0 , sizeof(data));
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = I2C_MSG_LENGTH;
- msg[0].buf = (u8 *)data;
- /* high byte goes first */
- data[0] = cpu_to_be16(reg);
-
- msg[1].addr = client->addr;
- msg[1].len = len;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = (u8 *)data;
-
- ret = i2c_transfer(client->adapter, msg, 2);
- if (ret != 2) {
- dev_err(&client->dev,
- "retrying i2c read from offset 0x%x error %d... %d\n",
- reg, ret, retry);
- msleep(20);
- }
- } while (ret != 2 && retry++ < I2C_RETRY_COUNT);
-
- if (ret != 2)
- return -EIO;
-
- /* high byte comes first */
- if (len == IMX_8BIT) {
- *val = (u8)data[0];
- } else {
- /* 16-bit access is default when len > 1 */
- for (i = 0; i < (len >> 1); i++)
- val[i] = be16_to_cpu(data[i]);
- }
-
- return 0;
-}
-
-static int imx_i2c_write(struct i2c_client *client, u16 len, u8 *data)
-{
- struct i2c_msg msg;
- int ret;
- int retry = 0;
-
- do {
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = len;
- msg.buf = data;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret != 1) {
- dev_err(&client->dev,
- "retrying i2c write transfer... %d\n", retry);
- msleep(20);
- }
- } while (ret != 1 && retry++ < I2C_RETRY_COUNT);
-
- return ret == 1 ? 0 : -EIO;
-}
-
-int
-imx_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val)
-{
- int ret;
- unsigned char data[4] = {0};
- u16 *wreg = (u16 *)data;
- const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
-
- if (data_length != IMX_8BIT && data_length != IMX_16BIT) {
- v4l2_err(client, "%s error, invalid data_length\n", __func__);
- return -EINVAL;
- }
-
- /* high byte goes out first */
- *wreg = cpu_to_be16(reg);
-
- if (data_length == IMX_8BIT)
- data[2] = (u8)(val);
- else {
- /* IMX_16BIT */
- u16 *wdata = (u16 *)&data[2];
- *wdata = cpu_to_be16(val);
- }
-
- ret = imx_i2c_write(client, len, data);
- if (ret)
- dev_err(&client->dev,
- "write error: wrote 0x%x to offset 0x%x error %d",
- val, reg, ret);
-
- return ret;
-}
-
-/*
- * imx_write_reg_array - Initializes a list of imx registers
- * @client: i2c driver client structure
- * @reglist: list of registers to be written
- *
- * This function initializes a list of registers. When consecutive addresses
- * are found in a row on the list, this function creates a buffer and sends
- * consecutive data in a single i2c_transfer().
- *
- * __imx_flush_reg_array, __imx_buf_reg_array() and
- * __imx_write_reg_is_consecutive() are internal functions to
- * imx_write_reg_array_fast() and should be not used anywhere else.
- *
- */
-
-static int __imx_flush_reg_array(struct i2c_client *client,
- struct imx_write_ctrl *ctrl)
-{
- u16 size;
-
- if (ctrl->index == 0)
- return 0;
-
- size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
- ctrl->buffer.addr = cpu_to_be16(ctrl->buffer.addr);
- ctrl->index = 0;
-
- return imx_i2c_write(client, size, (u8 *)&ctrl->buffer);
-}
-
-static int __imx_buf_reg_array(struct i2c_client *client,
- struct imx_write_ctrl *ctrl,
- const struct imx_reg *next)
-{
- int size;
- u16 *data16;
-
- switch (next->type) {
- case IMX_8BIT:
- size = 1;
- ctrl->buffer.data[ctrl->index] = (u8)next->val;
- break;
- case IMX_16BIT:
- size = 2;
- data16 = (u16 *)&ctrl->buffer.data[ctrl->index];
- *data16 = cpu_to_be16((u16)next->val);
- break;
- default:
- return -EINVAL;
- }
-
- /* When first item is added, we need to store its starting address */
- if (ctrl->index == 0)
- ctrl->buffer.addr = next->sreg;
-
- ctrl->index += size;
-
- /*
- * Buffer cannot guarantee free space for u32? Better flush it to avoid
- * possible lack of memory for next item.
- */
- if (ctrl->index + sizeof(u16) >= IMX_MAX_WRITE_BUF_SIZE)
- return __imx_flush_reg_array(client, ctrl);
-
- return 0;
-}
-
-static int
-__imx_write_reg_is_consecutive(struct i2c_client *client,
- struct imx_write_ctrl *ctrl,
- const struct imx_reg *next)
-{
- if (ctrl->index == 0)
- return 1;
-
- return ctrl->buffer.addr + ctrl->index == next->sreg;
-}
-
-static int imx_write_reg_array(struct i2c_client *client,
- const struct imx_reg *reglist)
-{
- const struct imx_reg *next = reglist;
- struct imx_write_ctrl ctrl;
- int err;
-
- ctrl.index = 0;
- for (; next->type != IMX_TOK_TERM; next++) {
- switch (next->type & IMX_TOK_MASK) {
- case IMX_TOK_DELAY:
- err = __imx_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- msleep(next->val);
- break;
-
- default:
- /*
- * If next address is not consecutive, data needs to be
- * flushed before proceed.
- */
- if (!__imx_write_reg_is_consecutive(client, &ctrl,
- next)) {
- err = __imx_flush_reg_array(client, &ctrl);
- if (err)
- return err;
- }
- err = __imx_buf_reg_array(client, &ctrl, next);
- if (err) {
- v4l2_err(client, "%s: write error, aborted\n",
- __func__);
- return err;
- }
- break;
- }
- }
-
- return __imx_flush_reg_array(client, &ctrl);
-}
-
-static int __imx_min_fps_diff(int fps, const struct imx_fps_setting *fps_list)
-{
- int diff = INT_MAX;
- int i;
-
- if (fps == 0)
- return 0;
-
- for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
- if (!fps_list[i].fps)
- break;
- if (abs(fps_list[i].fps - fps) < diff)
- diff = abs(fps_list[i].fps - fps);
- }
-
- return diff;
-}
-
-static int __imx_nearest_fps_index(int fps,
- const struct imx_fps_setting *fps_list)
-{
- int fps_index = 0;
- int i;
-
- for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
- if (!fps_list[i].fps)
- break;
- if (abs(fps_list[i].fps - fps)
- < abs(fps_list[fps_index].fps - fps))
- fps_index = i;
- }
- return fps_index;
-}
-
-/*
- * This is to choose the nearest fps setting above the requested fps
- * fps_list should be in ascendant order.
- */
-static int __imx_above_nearest_fps_index(int fps,
- const struct imx_fps_setting *fps_list)
-{
- int fps_index = 0;
- int i;
-
- for (i = 0; i < MAX_FPS_OPTIONS_SUPPORTED; i++) {
- if (!fps_list[i].fps)
- break;
- if (fps <= fps_list[i].fps) {
- fps_index = i;
- break;
- }
- }
-
- return fps_index;
-}
-
-static int imx_get_lanes(struct v4l2_subdev *sd)
-{
- struct camera_mipi_info *imx_info = v4l2_get_subdev_hostdata(sd);
-
- if (!imx_info)
- return -ENOSYS;
- if (imx_info->num_lanes < 1 || imx_info->num_lanes > 4 ||
- imx_info->num_lanes == 3)
- return -EINVAL;
-
- return imx_info->num_lanes;
-}
-
-static int __imx_update_exposure_timing(struct i2c_client *client, u16 exposure,
- u16 llp, u16 fll)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
- int ret = 0;
-
- if (dev->sensor_id != IMX227_ID) {
- /* Increase the VTS to match exposure + margin */
- if (exposure > fll - IMX_INTEGRATION_TIME_MARGIN)
- fll = exposure + IMX_INTEGRATION_TIME_MARGIN;
- }
-
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->line_length_pixels, llp);
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->frame_length_lines, fll);
- if (ret)
- return ret;
-
- if (exposure)
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->coarse_integration_time, exposure);
-
- return ret;
-}
-
-static int __imx_update_gain(struct v4l2_subdev *sd, u16 gain)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- /* set global gain */
- ret = imx_write_reg(client, IMX_8BIT, dev->reg_addr->global_gain, gain);
- if (ret)
- return ret;
-
- /* set short analog gain */
- if (dev->sensor_id == IMX135_ID)
- ret = imx_write_reg(client, IMX_8BIT, IMX_SHORT_AGC_GAIN, gain);
-
- return ret;
-}
-
-static int __imx_update_digital_gain(struct i2c_client *client, u16 digitgain)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
- struct imx_write_buffer digit_gain;
-
- digit_gain.addr = cpu_to_be16(dev->reg_addr->dgc_adj);
- digit_gain.data[0] = (digitgain >> 8) & 0xFF;
- digit_gain.data[1] = digitgain & 0xFF;
-
- if (dev->sensor_id == IMX219_ID) {
- return imx_i2c_write(client, IMX219_DGC_LEN, (u8 *)&digit_gain);
- } else if (dev->sensor_id == IMX227_ID) {
- return imx_i2c_write(client, IMX227_DGC_LEN, (u8 *)&digit_gain);
- } else {
- digit_gain.data[2] = (digitgain >> 8) & 0xFF;
- digit_gain.data[3] = digitgain & 0xFF;
- digit_gain.data[4] = (digitgain >> 8) & 0xFF;
- digit_gain.data[5] = digitgain & 0xFF;
- digit_gain.data[6] = (digitgain >> 8) & 0xFF;
- digit_gain.data[7] = digitgain & 0xFF;
- return imx_i2c_write(client, IMX_DGC_LEN, (u8 *)&digit_gain);
- }
- return 0;
-}
-
-static int imx_set_exposure_gain(struct v4l2_subdev *sd, u16 coarse_itg,
- u16 gain, u16 digitgain)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int lanes = imx_get_lanes(sd);
- unsigned int digitgain_scaled;
- int ret = 0;
-
- /* Validate exposure: cannot exceed VTS-4 where VTS is 16bit */
- coarse_itg = clamp_t(u16, coarse_itg, 0, IMX_MAX_EXPOSURE_SUPPORTED);
-
- /* Validate gain: must not exceed maximum 8bit value */
- gain = clamp_t(u16, gain, 0, IMX_MAX_GLOBAL_GAIN_SUPPORTED);
-
- mutex_lock(&dev->input_lock);
-
- if (dev->sensor_id == IMX227_ID) {
- ret = imx_write_reg_array(client, imx_param_hold);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- }
-
- /* For imx175, setting gain must be delayed by one */
- if ((dev->sensor_id == IMX175_ID) && dev->digital_gain)
- digitgain_scaled = dev->digital_gain;
- else
- digitgain_scaled = digitgain;
- /* imx132 with two lanes needs more gain to saturate at max */
- if (dev->sensor_id == IMX132_ID && lanes > 1) {
- digitgain_scaled *= IMX132_2LANES_GAINFACT;
- digitgain_scaled >>= IMX132_2LANES_GAINFACT_SHIFT;
- }
- /* Validate digital gain: must not exceed 12 bit value*/
- digitgain_scaled = clamp_t(unsigned int, digitgain_scaled,
- 0, IMX_MAX_DIGITAL_GAIN_SUPPORTED);
-
- ret = __imx_update_exposure_timing(client, coarse_itg,
- dev->pixels_per_line, dev->lines_per_frame);
- if (ret)
- goto out;
- dev->coarse_itg = coarse_itg;
-
- if (dev->sensor_id == IMX175_ID)
- ret = __imx_update_gain(sd, dev->gain);
- else
- ret = __imx_update_gain(sd, gain);
- if (ret)
- goto out;
- dev->gain = gain;
-
- ret = __imx_update_digital_gain(client, digitgain_scaled);
- if (ret)
- goto out;
- dev->digital_gain = digitgain;
-
-out:
- if (dev->sensor_id == IMX227_ID)
- ret = imx_write_reg_array(client, imx_param_update);
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-static long imx_s_exposure(struct v4l2_subdev *sd,
- struct atomisp_exposure *exposure)
-{
- return imx_set_exposure_gain(sd, exposure->integration_time[0],
- exposure->gain[0], exposure->gain[1]);
-}
-
-/* FIXME -To be updated with real OTP reading */
-static int imx_g_priv_int_data(struct v4l2_subdev *sd,
- struct v4l2_private_int_data *priv)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- u8 __user *to = priv->data;
- u32 read_size = priv->size;
- int ret;
-
- /* No need to copy data if size is 0 */
- if (!read_size)
- goto out;
-
- if (IS_ERR(dev->otp_data)) {
- dev_err(&client->dev, "OTP data not available");
- return PTR_ERR(dev->otp_data);
- }
- /* Correct read_size value only if bigger than maximum */
- if (read_size > dev->otp_driver->size)
- read_size = dev->otp_driver->size;
-
- ret = copy_to_user(to, dev->otp_data, read_size);
- if (ret) {
- dev_err(&client->dev, "%s: failed to copy OTP data to user\n",
- __func__);
- return -EFAULT;
- }
-out:
- /* Return correct size */
- priv->size = dev->otp_driver->size;
-
- return 0;
-}
-
-static int __imx_init(struct v4l2_subdev *sd, u32 val)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- int lanes = imx_get_lanes(sd);
- int ret;
-
- if (dev->sensor_id == IMX_ID_DEFAULT)
- return 0;
-
- /* The default is no flip at sensor initialization */
- dev->h_flip->cur.val = 0;
- dev->v_flip->cur.val = 0;
- /* Sets the default FPS */
- dev->fps_index = 0;
- dev->curr_res_table = dev->mode_tables->res_preview;
- dev->entries_curr_table = dev->mode_tables->n_res_preview;
-
- ret = imx_write_reg_array(client, dev->mode_tables->init_settings);
- if (ret)
- return ret;
-
- if (dev->sensor_id == IMX132_ID && lanes > 0) {
- static const u8 imx132_rglanesel[] = {
- IMX132_RGLANESEL_1LANE, /* 1 lane */
- IMX132_RGLANESEL_2LANES, /* 2 lanes */
- IMX132_RGLANESEL_1LANE, /* undefined */
- IMX132_RGLANESEL_4LANES, /* 4 lanes */
- };
- ret = imx_write_reg(client, IMX_8BIT,
- IMX132_RGLANESEL, imx132_rglanesel[lanes - 1]);
- }
-
- return ret;
-}
-
-static int imx_init(struct v4l2_subdev *sd, u32 val)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- int ret = 0;
-
- mutex_lock(&dev->input_lock);
- ret = __imx_init(sd, val);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static long imx_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
-
- switch (cmd) {
- case ATOMISP_IOC_S_EXPOSURE:
- return imx_s_exposure(sd, arg);
- case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA:
- return imx_g_priv_int_data(sd, arg);
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int power_up(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- int ret;
-
- /* power control */
- ret = dev->platform_data->power_ctrl(sd, 1);
- if (ret)
- goto fail_power;
-
- /* flis clock control */
- ret = dev->platform_data->flisclk_ctrl(sd, 1);
- if (ret)
- goto fail_clk;
-
- /* gpio ctrl */
- ret = dev->platform_data->gpio_ctrl(sd, 1);
- if (ret) {
- dev_err(&client->dev, "gpio failed\n");
- goto fail_gpio;
- }
-
- return 0;
-fail_gpio:
- dev->platform_data->gpio_ctrl(sd, 0);
-fail_clk:
- dev->platform_data->flisclk_ctrl(sd, 0);
-fail_power:
- dev->platform_data->power_ctrl(sd, 0);
- dev_err(&client->dev, "sensor power-up failed\n");
-
- return ret;
-}
-
-static int power_down(struct v4l2_subdev *sd)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- ret = dev->platform_data->flisclk_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "flisclk failed\n");
-
- /* gpio ctrl */
- ret = dev->platform_data->gpio_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "gpio failed\n");
-
- /* power control */
- ret = dev->platform_data->power_ctrl(sd, 0);
- if (ret)
- dev_err(&client->dev, "vprog failed.\n");
-
- return ret;
-}
-
-static int __imx_s_power(struct v4l2_subdev *sd, int on)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- int ret = 0;
- int r = 0;
-
- if (on == 0) {
- ret = power_down(sd);
- if (dev->vcm_driver && dev->vcm_driver->power_down)
- r = dev->vcm_driver->power_down(sd);
- if (ret == 0)
- ret = r;
- dev->power = 0;
- } else {
- if (dev->vcm_driver && dev->vcm_driver->power_up)
- ret = dev->vcm_driver->power_up(sd);
- if (ret)
- return ret;
- ret = power_up(sd);
- if (!ret) {
- dev->power = 1;
- return __imx_init(sd, 0);
- }
- }
-
- return ret;
-}
-
-static int imx_s_power(struct v4l2_subdev *sd, int on)
-{
- int ret;
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- ret = __imx_s_power(sd, on);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-
-static int imx_get_intg_factor(struct i2c_client *client,
- struct camera_mipi_info *info,
- const struct imx_reg *reglist)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
- int lanes = imx_get_lanes(sd);
- u32 vt_pix_clk_div;
- u32 vt_sys_clk_div;
- u32 pre_pll_clk_div;
- u32 pll_multiplier;
-
- const int ext_clk_freq_hz = 19200000;
- struct atomisp_sensor_mode_data *buf = &info->data;
- int ret;
- u16 data[IMX_INTG_BUF_COUNT];
-
- u32 vt_pix_clk_freq_mhz;
- u32 coarse_integration_time_min;
- u32 coarse_integration_time_max_margin;
- u32 read_mode;
- u32 div;
-
- if (info == NULL)
- return -EINVAL;
-
- memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
- ret = imx_read_reg(client, 1, IMX_VT_PIX_CLK_DIV, data);
- if (ret)
- return ret;
- vt_pix_clk_div = data[0] & IMX_MASK_5BIT;
-
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID) {
- static const int rgpltd[] = { 2, 4, 1, 1 };
- ret = imx_read_reg(client, 1, IMX132_208_VT_RGPLTD, data);
- if (ret)
- return ret;
- vt_sys_clk_div = rgpltd[data[0] & IMX_MASK_2BIT];
- } else {
- ret = imx_read_reg(client, 1, IMX_VT_SYS_CLK_DIV, data);
- if (ret)
- return ret;
- vt_sys_clk_div = data[0] & IMX_MASK_2BIT;
- }
- ret = imx_read_reg(client, 1, IMX_PRE_PLL_CLK_DIV, data);
- if (ret)
- return ret;
- pre_pll_clk_div = data[0] & IMX_MASK_4BIT;
-
- ret = imx_read_reg(client, 2,
- (dev->sensor_id == IMX132_ID ||
- dev->sensor_id == IMX219_ID ||
- dev->sensor_id == IMX208_ID) ?
- IMX132_208_219_PLL_MULTIPLIER : IMX_PLL_MULTIPLIER, data);
- if (ret)
- return ret;
- pll_multiplier = data[0] & IMX_MASK_11BIT;
-
- memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
- ret = imx_read_reg(client, 4, IMX_COARSE_INTG_TIME_MIN, data);
- if (ret)
- return ret;
- coarse_integration_time_min = data[0];
- coarse_integration_time_max_margin = data[1];
-
- /* Get the cropping and output resolution to ISP for this mode. */
- ret = imx_read_reg(client, 2, dev->reg_addr->horizontal_start_h, data);
- if (ret)
- return ret;
- buf->crop_horizontal_start = data[0];
-
- ret = imx_read_reg(client, 2, dev->reg_addr->vertical_start_h, data);
- if (ret)
- return ret;
- buf->crop_vertical_start = data[0];
-
- ret = imx_read_reg(client, 2, dev->reg_addr->horizontal_end_h, data);
- if (ret)
- return ret;
- buf->crop_horizontal_end = data[0];
-
- ret = imx_read_reg(client, 2, dev->reg_addr->vertical_end_h, data);
- if (ret)
- return ret;
- buf->crop_vertical_end = data[0];
-
- ret = imx_read_reg(client, 2,
- dev->reg_addr->horizontal_output_size_h, data);
- if (ret)
- return ret;
- buf->output_width = data[0];
-
- ret = imx_read_reg(client, 2,
- dev->reg_addr->vertical_output_size_h, data);
- if (ret)
- return ret;
- buf->output_height = data[0];
-
- memset(data, 0, IMX_INTG_BUF_COUNT * sizeof(u16));
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID ||
- dev->sensor_id == IMX219_ID)
- read_mode = 0;
- else {
- if (dev->sensor_id == IMX227_ID)
- ret = imx_read_reg(client, 1, IMX227_READ_MODE, data);
- else
- ret = imx_read_reg(client, 1, IMX_READ_MODE, data);
-
- if (ret)
- return ret;
- read_mode = data[0] & IMX_MASK_2BIT;
- }
-
- div = pre_pll_clk_div*vt_sys_clk_div*vt_pix_clk_div;
- if (div == 0)
- return -EINVAL;
-
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID)
- vt_pix_clk_freq_mhz = ext_clk_freq_hz / div;
- else if (dev->sensor_id == IMX227_ID) {
- /* according to IMX227 datasheet:
- * vt_pix_freq_mhz = * num_of_vt_lanes(4) * ivt_pix_clk_freq_mhz
- */
- vt_pix_clk_freq_mhz =
- (u64)4 * ext_clk_freq_hz * pll_multiplier;
- do_div(vt_pix_clk_freq_mhz, div);
- } else
- vt_pix_clk_freq_mhz = 2 * ext_clk_freq_hz / div;
-
- vt_pix_clk_freq_mhz *= pll_multiplier;
- if (dev->sensor_id == IMX132_ID && lanes > 0)
- vt_pix_clk_freq_mhz *= lanes;
-
- dev->vt_pix_clk_freq_mhz = vt_pix_clk_freq_mhz;
-
- buf->vt_pix_clk_freq_mhz = vt_pix_clk_freq_mhz;
- buf->coarse_integration_time_min = coarse_integration_time_min;
- buf->coarse_integration_time_max_margin =
- coarse_integration_time_max_margin;
-
- buf->fine_integration_time_min = IMX_FINE_INTG_TIME;
- buf->fine_integration_time_max_margin = IMX_FINE_INTG_TIME;
- buf->fine_integration_time_def = IMX_FINE_INTG_TIME;
- buf->frame_length_lines = dev->lines_per_frame;
- buf->line_length_pck = dev->pixels_per_line;
- buf->read_mode = read_mode;
-
- if (dev->sensor_id == IMX132_ID || dev->sensor_id == IMX208_ID ||
- dev->sensor_id == IMX219_ID) {
- buf->binning_factor_x = 1;
- buf->binning_factor_y = 1;
- } else {
- if (dev->sensor_id == IMX227_ID)
- ret = imx_read_reg(client, 1, IMX227_BINNING_ENABLE,
- data);
- else
- ret = imx_read_reg(client, 1, IMX_BINNING_ENABLE, data);
-
- if (ret)
- return ret;
- /* 1:binning enabled, 0:disabled */
- if (data[0] == 1) {
- if (dev->sensor_id == IMX227_ID)
- ret = imx_read_reg(client, 1,
- IMX227_BINNING_TYPE, data);
- else
- ret = imx_read_reg(client, 1,
- IMX_BINNING_TYPE, data);
-
- if (ret)
- return ret;
- buf->binning_factor_x = data[0] >> 4 & 0x0f;
- if (!buf->binning_factor_x)
- buf->binning_factor_x = 1;
- buf->binning_factor_y = data[0] & 0xf;
- if (!buf->binning_factor_y)
- buf->binning_factor_y = 1;
- /* WOWRKAROUND, NHD setting for IMX227 should have 4x4
- * binning but the register setting does not reflect
- * this, I am asking vendor why this happens. this is
- * workaround for INTEL BZ 216560.
- */
- if (dev->sensor_id == IMX227_ID) {
- if (dev->curr_res_table[dev->fmt_idx].width ==
- 376 &&
- dev->curr_res_table[dev->fmt_idx].height ==
- 656) {
- buf->binning_factor_x = 4;
- buf->binning_factor_y = 4;
- }
- }
- } else {
- buf->binning_factor_x = 1;
- buf->binning_factor_y = 1;
- }
- }
-
- return 0;
-}
-
-/* This returns the exposure time being used. This should only be used
- for filling in EXIF data, not for actual image processing. */
-static int imx_q_exposure(struct v4l2_subdev *sd, s32 *value)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- u16 coarse;
- int ret;
-
- /* the fine integration time is currently not calculated */
- ret = imx_read_reg(client, IMX_16BIT,
- dev->reg_addr->coarse_integration_time, &coarse);
- *value = coarse;
-
- return ret;
-}
-
-static int imx_test_pattern(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- int ret;
-
- if (dev->power == 0)
- return 0;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_R,
- (u16)(dev->tp_r->val >> 22));
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_GR,
- (u16)(dev->tp_gr->val >> 22));
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_GB,
- (u16)(dev->tp_gb->val >> 22));
- if (ret)
- return ret;
-
- ret = imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_COLOR_B,
- (u16)(dev->tp_b->val >> 22));
- if (ret)
- return ret;
-
- return imx_write_reg(client, IMX_16BIT, IMX_TEST_PATTERN_MODE,
- (u16)(dev->tp_mode->val));
-}
-
-static u32 imx_translate_bayer_order(enum atomisp_bayer_order code)
-{
- switch (code) {
- case atomisp_bayer_order_rggb:
- return MEDIA_BUS_FMT_SRGGB10_1X10;
- case atomisp_bayer_order_grbg:
- return MEDIA_BUS_FMT_SGRBG10_1X10;
- case atomisp_bayer_order_bggr:
- return MEDIA_BUS_FMT_SBGGR10_1X10;
- case atomisp_bayer_order_gbrg:
- return MEDIA_BUS_FMT_SGBRG10_1X10;
- }
- return 0;
-}
-
-static int imx_v_flip(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct camera_mipi_info *imx_info = NULL;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u16 val;
-
- if (dev->power == 0)
- return -EIO;
-
- ret = imx_write_reg_array(client, dev->param_hold);
- if (ret)
- return ret;
-
- ret = imx_read_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, &val);
- if (ret)
- return ret;
- if (value)
- val |= IMX_VFLIP_BIT;
- else
- val &= ~IMX_VFLIP_BIT;
-
- ret = imx_write_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, val);
- if (ret)
- return ret;
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info) {
- val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
- imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
- }
-
- return imx_write_reg_array(client, dev->param_update);
-}
-
-static int imx_h_flip(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct camera_mipi_info *imx_info = NULL;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u16 val;
-
- if (dev->power == 0)
- return -EIO;
-
- ret = imx_write_reg_array(client, dev->param_hold);
- if (ret)
- return ret;
- ret = imx_read_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, &val);
- if (ret)
- return ret;
- if (value)
- val |= IMX_HFLIP_BIT;
- else
- val &= ~IMX_HFLIP_BIT;
- ret = imx_write_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, val);
- if (ret)
- return ret;
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info) {
- val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
- imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
- }
-
- return imx_write_reg_array(client, dev->param_update);
-}
-
-static int imx_g_focal(struct v4l2_subdev *sd, s32 *val)
-{
- *val = (IMX_FOCAL_LENGTH_NUM << 16) | IMX_FOCAL_LENGTH_DEM;
- return 0;
-}
-
-static int imx_g_fnumber(struct v4l2_subdev *sd, s32 *val)
-{
- /*const f number for imx*/
- *val = (IMX_F_NUMBER_DEFAULT_NUM << 16) | IMX_F_NUMBER_DEM;
- return 0;
-}
-
-static int imx_g_fnumber_range(struct v4l2_subdev *sd, s32 *val)
-{
- *val = (IMX_F_NUMBER_DEFAULT_NUM << 24) |
- (IMX_F_NUMBER_DEM << 16) |
- (IMX_F_NUMBER_DEFAULT_NUM << 8) | IMX_F_NUMBER_DEM;
- return 0;
-}
-
-static int imx_g_bin_factor_x(struct v4l2_subdev *sd, s32 *val)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- *val = dev->curr_res_table[dev->fmt_idx].bin_factor_x;
-
- return 0;
-}
-
-static int imx_g_bin_factor_y(struct v4l2_subdev *sd, s32 *val)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- *val = dev->curr_res_table[dev->fmt_idx].bin_factor_y;
-
- return 0;
-}
-
-static int imx_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_focus_abs)
- return dev->vcm_driver->t_focus_abs(sd, value);
- return 0;
-}
-
-static int imx_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_focus_rel)
- return dev->vcm_driver->t_focus_rel(sd, value);
- return 0;
-}
-
-static int imx_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->q_focus_status)
- return dev->vcm_driver->q_focus_status(sd, value);
- return 0;
-}
-
-static int imx_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->q_focus_abs)
- return dev->vcm_driver->q_focus_abs(sd, value);
- return 0;
-}
-
-static int imx_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_vcm_slew)
- return dev->vcm_driver->t_vcm_slew(sd, value);
- return 0;
-}
-
-static int imx_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (dev->vcm_driver && dev->vcm_driver->t_vcm_timing)
- return dev->vcm_driver->t_vcm_timing(sd, value);
- return 0;
-}
-
-static int imx_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct imx_device *dev = container_of(
- ctrl->handler, struct imx_device, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_TEST_PATTERN:
- ret = imx_test_pattern(&dev->sd);
- break;
- case V4L2_CID_VFLIP:
- dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n",
- __func__, ctrl->val);
- ret = imx_v_flip(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_HFLIP:
- dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n",
- __func__, ctrl->val);
- ret = imx_h_flip(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_FOCUS_ABSOLUTE:
- ret = imx_t_focus_abs(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_FOCUS_RELATIVE:
- ret = imx_t_focus_rel(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_VCM_SLEW:
- ret = imx_t_vcm_slew(&dev->sd, ctrl->val);
- break;
- case V4L2_CID_VCM_TIMEING:
- ret = imx_t_vcm_timing(&dev->sd, ctrl->val);
- break;
- }
-
- return ret;
-}
-
-static int imx_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct imx_device *dev = container_of(
- ctrl->handler, struct imx_device, ctrl_handler);
- int ret = 0;
- unsigned int val;
-
- switch (ctrl->id) {
- case V4L2_CID_EXPOSURE_ABSOLUTE:
- ret = imx_q_exposure(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCUS_ABSOLUTE:
- ret = imx_q_focus_abs(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCUS_STATUS:
- ret = imx_q_focus_status(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FOCAL_ABSOLUTE:
- ret = imx_g_focal(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FNUMBER_ABSOLUTE:
- ret = imx_g_fnumber(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_FNUMBER_RANGE:
- ret = imx_g_fnumber_range(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_BIN_FACTOR_HORZ:
- ret = imx_g_bin_factor_x(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_BIN_FACTOR_VERT:
- ret = imx_g_bin_factor_y(&dev->sd, &ctrl->val);
- break;
- case V4L2_CID_VBLANK:
- ctrl->val = dev->lines_per_frame -
- dev->curr_res_table[dev->fmt_idx].height;
- break;
- case V4L2_CID_HBLANK:
- ctrl->val = dev->pixels_per_line -
- dev->curr_res_table[dev->fmt_idx].width;
- break;
- case V4L2_CID_PIXEL_RATE:
- ctrl->val = dev->vt_pix_clk_freq_mhz;
- break;
- case V4L2_CID_LINK_FREQ:
- val = dev->curr_res_table[dev->fmt_idx].
- fps_options[dev->fps_index].mipi_freq;
- if (val == 0)
- val = dev->curr_res_table[dev->fmt_idx].mipi_freq;
- if (val == 0)
- return -EINVAL;
- ctrl->val = val * 1000; /* To Hz */
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
- .s_ctrl = imx_s_ctrl,
- .g_volatile_ctrl = imx_g_volatile_ctrl
-};
-
-static const struct v4l2_ctrl_config imx_controls[] = {
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_EXPOSURE_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "exposure",
- .min = 0x0,
- .max = 0xffff,
- .step = 0x01,
- .def = 0x00,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern",
- .min = 0,
- .max = 0xffff,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_R,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color R",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_GR,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color GR",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_GB,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color GB",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_TEST_PATTERN_COLOR_B,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Test pattern solid color B",
- .min = INT_MIN,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flip",
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus move absolute",
- .min = 0,
- .max = IMX_MAX_FOCUS_POS,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_RELATIVE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus move relative",
- .min = IMX_MAX_FOCUS_NEG,
- .max = IMX_MAX_FOCUS_POS,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCUS_STATUS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focus status",
- .min = 0,
- .max = 100, /* allow enum to grow in the future */
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VCM_SLEW,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vcm slew",
- .min = 0,
- .max = IMX_VCM_SLEW_STEP_MAX,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VCM_TIMEING,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vcm step time",
- .min = 0,
- .max = IMX_VCM_SLEW_TIME_MAX,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FOCAL_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "focal length",
- .min = IMX_FOCAL_LENGTH_DEFAULT,
- .max = IMX_FOCAL_LENGTH_DEFAULT,
- .step = 0x01,
- .def = IMX_FOCAL_LENGTH_DEFAULT,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FNUMBER_ABSOLUTE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "f-number",
- .min = IMX_F_NUMBER_DEFAULT,
- .max = IMX_F_NUMBER_DEFAULT,
- .step = 0x01,
- .def = IMX_F_NUMBER_DEFAULT,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_FNUMBER_RANGE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "f-number range",
- .min = IMX_F_NUMBER_RANGE,
- .max = IMX_F_NUMBER_RANGE,
- .step = 0x01,
- .def = IMX_F_NUMBER_RANGE,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_BIN_FACTOR_HORZ,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "horizontal binning factor",
- .min = 0,
- .max = IMX_BIN_FACTOR_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_BIN_FACTOR_VERT,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "vertical binning factor",
- .min = 0,
- .max = IMX_BIN_FACTOR_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_LINK_FREQ,
- .name = "Link Frequency",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 1,
- .max = 1500000 * 1000,
- .step = 1,
- .def = 1,
- .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_PIXEL_RATE,
- .name = "Pixel Rate",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = INT_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HBLANK,
- .name = "Horizontal Blanking",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = SHRT_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VBLANK,
- .name = "Vertical Blanking",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = SHRT_MAX,
- .step = 1,
- .def = 0,
- .flags = V4L2_CTRL_FLAG_VOLATILE,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_HFLIP,
- .name = "Horizontal Flip",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
- {
- .ops = &ctrl_ops,
- .id = V4L2_CID_VFLIP,
- .name = "Vertical Flip",
- .type = V4L2_CTRL_TYPE_INTEGER,
- .min = 0,
- .max = 1,
- .step = 1,
- .def = 0,
- .flags = 0,
- },
-};
-
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between resolution and w/h.
- * res->width/height smaller than w/h wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 600
-static int distance(struct imx_resolution const *res, u32 w, u32 h,
- bool keep_ratio)
-{
- unsigned int w_ratio;
- unsigned int h_ratio;
- int match;
- unsigned int allowed_ratio_mismatch = LARGEST_ALLOWED_RATIO_MISMATCH;
-
- if (!keep_ratio)
- allowed_ratio_mismatch = ~0;
-
- if (w == 0)
- return -1;
- w_ratio = (res->width << 13) / w;
- if (h == 0)
- return -1;
- h_ratio = (res->height << 13) / h;
- if (h_ratio == 0)
- return -1;
- match = abs(((w_ratio << 13) / h_ratio) - ((int)8192));
-
- if ((w_ratio < (int)8192) || (h_ratio < (int)8192) ||
- (match > allowed_ratio_mismatch))
- return -1;
-
- return w_ratio + h_ratio;
-}
-
-/* Return the nearest higher resolution index */
-static int nearest_resolution_index(struct v4l2_subdev *sd, int w, int h)
-{
- int i;
- int idx = -1;
- int dist;
- int fps_diff;
- int min_fps_diff = INT_MAX;
- int min_dist = INT_MAX;
- const struct imx_resolution *tmp_res = NULL;
- struct imx_device *dev = to_imx_sensor(sd);
- bool again = 1;
-retry:
- for (i = 0; i < dev->entries_curr_table; i++) {
- tmp_res = &dev->curr_res_table[i];
- dist = distance(tmp_res, w, h, again);
- if (dist == -1)
- continue;
- if (dist < min_dist) {
- min_dist = dist;
- idx = i;
- }
- if (dist == min_dist) {
- fps_diff = __imx_min_fps_diff(dev->targetfps,
- tmp_res->fps_options);
- if (fps_diff < min_fps_diff) {
- min_fps_diff = fps_diff;
- idx = i;
- }
- }
- }
-
- /*
- * FIXME!
- * only IMX135 for Saltbay and IMX227 use this algorithm
- */
- if (idx == -1 && again == true && dev->new_res_sel_method) {
- again = false;
- goto retry;
- }
- return idx;
-}
-
-/* Call with ctrl_handler.lock hold */
-static int __adjust_hvblank(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
- u16 new_frame_length_lines, new_line_length_pck;
- int ret;
-
- /*
- * No need to adjust h/v blank if not set dbg value
- * Note that there is no other checking on the h/v blank value,
- * as h/v blank can be set to any value above zero for debug purpose
- */
- if (!dev->v_blank->val || !dev->h_blank->val)
- return 0;
-
- new_frame_length_lines = dev->curr_res_table[dev->fmt_idx].height +
- dev->v_blank->val;
- new_line_length_pck = dev->curr_res_table[dev->fmt_idx].width +
- dev->h_blank->val;
-
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->line_length_pixels, new_line_length_pck);
- if (ret)
- return ret;
- ret = imx_write_reg(client, IMX_16BIT,
- dev->reg_addr->frame_length_lines, new_frame_length_lines);
- if (ret)
- return ret;
-
- dev->lines_per_frame = new_frame_length_lines;
- dev->pixels_per_line = new_line_length_pck;
-
- return 0;
-}
-
-static int imx_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct imx_device *dev = to_imx_sensor(sd);
- struct camera_mipi_info *imx_info = NULL;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct imx_resolution *res;
- int lanes = imx_get_lanes(sd);
- int ret;
- u16 data, val;
- int idx;
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info == NULL)
- return -EINVAL;
- if ((fmt->width > imx_max_res[dev->sensor_id].res_max_width)
- || (fmt->height > imx_max_res[dev->sensor_id].res_max_height)) {
- fmt->width = imx_max_res[dev->sensor_id].res_max_width;
- fmt->height = imx_max_res[dev->sensor_id].res_max_height;
- } else {
- idx = nearest_resolution_index(sd, fmt->width, fmt->height);
-
- /*
- * nearest_resolution_index() doesn't return smaller
- * resolutions. If it fails, it means the requested
- * resolution is higher than wecan support. Fallback
- * to highest possible resolution in this case.
- */
- if (idx == -1)
- idx = dev->entries_curr_table - 1;
-
- fmt->width = dev->curr_res_table[idx].width;
- fmt->height = dev->curr_res_table[idx].height;
- }
-
- fmt->code = dev->format.code;
- if(format->which == V4L2_SUBDEV_FORMAT_TRY) {
- cfg->try_fmt = *fmt;
- return 0;
- }
- mutex_lock(&dev->input_lock);
-
- dev->fmt_idx = nearest_resolution_index(sd, fmt->width, fmt->height);
- if (dev->fmt_idx == -1) {
- ret = -EINVAL;
- goto out;
- }
- res = &dev->curr_res_table[dev->fmt_idx];
-
- /* Adjust the FPS selection based on the resolution selected */
- dev->fps_index = __imx_nearest_fps_index(dev->targetfps,
- res->fps_options);
- dev->fps = res->fps_options[dev->fps_index].fps;
- dev->regs = res->fps_options[dev->fps_index].regs;
- if (!dev->regs)
- dev->regs = res->regs;
-
- ret = imx_write_reg_array(client, dev->regs);
- if (ret)
- goto out;
-
- if (dev->sensor_id == IMX132_ID && lanes > 0) {
- static const u8 imx132_rgpltd[] = {
- 2, /* 1 lane: /1 */
- 0, /* 2 lanes: /2 */
- 0, /* undefined */
- 1, /* 4 lanes: /4 */
- };
- ret = imx_write_reg(client, IMX_8BIT, IMX132_208_VT_RGPLTD,
- imx132_rgpltd[lanes - 1]);
- if (ret)
- goto out;
- }
-
- dev->pixels_per_line = res->fps_options[dev->fps_index].pixels_per_line;
- dev->lines_per_frame = res->fps_options[dev->fps_index].lines_per_frame;
-
- /* dbg h/v blank time */
- __adjust_hvblank(sd);
-
- ret = __imx_update_exposure_timing(client, dev->coarse_itg,
- dev->pixels_per_line, dev->lines_per_frame);
- if (ret)
- goto out;
-
- ret = __imx_update_gain(sd, dev->gain);
- if (ret)
- goto out;
-
- ret = __imx_update_digital_gain(client, dev->digital_gain);
- if (ret)
- goto out;
-
- ret = imx_write_reg_array(client, dev->param_update);
- if (ret)
- goto out;
-
- ret = imx_get_intg_factor(client, imx_info, dev->regs);
- if (ret)
- goto out;
-
- ret = imx_read_reg(client, IMX_8BIT,
- dev->reg_addr->img_orientation, &val);
- if (ret)
- goto out;
- val &= (IMX_VFLIP_BIT|IMX_HFLIP_BIT);
- imx_info->raw_bayer_order = imx_bayer_order_mapping[val];
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
-
- /*
- * Fill meta data info. add imx135 metadata setting for RAW10 format
- */
- switch (dev->sensor_id) {
- case IMX135_ID:
- ret = imx_read_reg(client, 2,
- IMX135_OUTPUT_DATA_FORMAT_REG, &data);
- if (ret)
- goto out;
- /*
- * The IMX135 can support various resolutions like
- * RAW6/8/10/12/14.
- * 1.The data format is RAW10:
- * matadata width = current resolution width(pixel) * 10 / 8
- * 2.The data format is RAW6 or RAW8:
- * matadata width = current resolution width(pixel);
- * 3.other data format(RAW12/14 etc):
- * TBD.
- */
- if (data == IMX135_OUTPUT_FORMAT_RAW10)
- /* the data format is RAW10. */
- imx_info->metadata_width = res->width * 10 / 8;
- else
- /* The data format is RAW6/8/12/14/ etc. */
- imx_info->metadata_width = res->width;
-
- imx_info->metadata_height = IMX135_EMBEDDED_DATA_LINE_NUM;
-
- if (imx_info->metadata_effective_width == NULL)
- imx_info->metadata_effective_width =
- imx135_embedded_effective_size;
-
- break;
- case IMX227_ID:
- ret = imx_read_reg(client, 2, IMX227_OUTPUT_DATA_FORMAT_REG,
- &data);
- if (ret)
- goto out;
- if (data == IMX227_OUTPUT_FORMAT_RAW10)
- /* the data format is RAW10. */
- imx_info->metadata_width = res->width * 10 / 8;
- else
- /* The data format is RAW6/8/12/14/ etc. */
- imx_info->metadata_width = res->width;
-
- imx_info->metadata_height = IMX227_EMBEDDED_DATA_LINE_NUM;
-
- if (imx_info->metadata_effective_width == NULL)
- imx_info->metadata_effective_width =
- imx227_embedded_effective_size;
-
- break;
- default:
- imx_info->metadata_width = 0;
- imx_info->metadata_height = 0;
- imx_info->metadata_effective_width = NULL;
- break;
- }
-
-out:
- mutex_unlock(&dev->input_lock);
- return ret;
-}
-
-
-static int imx_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *fmt = &format->format;
- struct imx_device *dev = to_imx_sensor(sd);
-
- if (format->pad)
- return -EINVAL;
- if (!fmt)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- fmt->width = dev->curr_res_table[dev->fmt_idx].width;
- fmt->height = dev->curr_res_table[dev->fmt_idx].height;
- fmt->code = dev->format.code;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int imx_detect(struct i2c_client *client, u16 *id, u8 *revision)
-{
- struct i2c_adapter *adapter = client->adapter;
-
- /* i2c check */
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
- /* check sensor chip ID */
- if (imx_read_reg(client, IMX_16BIT, IMX132_175_208_219_CHIP_ID, id)) {
- v4l2_err(client, "sensor_id = 0x%x\n", *id);
- return -ENODEV;
- }
-
- if (*id == IMX132_ID || *id == IMX175_ID ||
- *id == IMX208_ID || *id == IMX219_ID)
- goto found;
-
- if (imx_read_reg(client, IMX_16BIT, IMX134_135_227_CHIP_ID, id)) {
- v4l2_err(client, "sensor_id = 0x%x\n", *id);
- return -ENODEV;
- }
- if (*id != IMX134_ID && *id != IMX135_ID && *id != IMX227_ID) {
- v4l2_err(client, "no imx sensor found\n");
- return -ENODEV;
- }
-found:
- v4l2_info(client, "sensor_id = 0x%x\n", *id);
-
- /* TODO - need to be updated */
- *revision = 0;
-
- return 0;
-}
-
-static void __imx_print_timing(struct v4l2_subdev *sd)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u16 width = dev->curr_res_table[dev->fmt_idx].width;
- u16 height = dev->curr_res_table[dev->fmt_idx].height;
-
- dev_dbg(&client->dev, "Dump imx timing in stream on:\n");
- dev_dbg(&client->dev, "width: %d:\n", width);
- dev_dbg(&client->dev, "height: %d:\n", height);
- dev_dbg(&client->dev, "pixels_per_line: %d:\n", dev->pixels_per_line);
- dev_dbg(&client->dev, "line per frame: %d:\n", dev->lines_per_frame);
- dev_dbg(&client->dev, "pix freq: %d:\n", dev->vt_pix_clk_freq_mhz);
- dev_dbg(&client->dev, "init fps: %d:\n", dev->vt_pix_clk_freq_mhz /
- dev->pixels_per_line / dev->lines_per_frame);
- dev_dbg(&client->dev, "HBlank: %d nS:\n",
- 1000 * (dev->pixels_per_line - width) /
- (dev->vt_pix_clk_freq_mhz / 1000000));
- dev_dbg(&client->dev, "VBlank: %d uS:\n",
- (dev->lines_per_frame - height) * dev->pixels_per_line /
- (dev->vt_pix_clk_freq_mhz / 1000000));
-}
-
-/*
- * imx stream on/off
- */
-static int imx_s_stream(struct v4l2_subdev *sd, int enable)
-{
- int ret;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- if (enable) {
- /* Noise reduction & dead pixel applied before streaming */
- if (dev->fw == NULL) {
- dev_warn(&client->dev, "No MSR loaded from library");
- } else {
- ret = apply_msr_data(client, dev->fw);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- }
- ret = imx_test_pattern(sd);
- if (ret) {
- v4l2_err(client, "Configure test pattern failed.\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- __imx_print_timing(sd);
- ret = imx_write_reg_array(client, imx_streaming);
- if (ret != 0) {
- v4l2_err(client, "write_reg_array err\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- dev->streaming = 1;
- if (dev->vcm_driver && dev->vcm_driver->t_focus_abs_init)
- dev->vcm_driver->t_focus_abs_init(sd);
- } else {
- ret = imx_write_reg_array(client, imx_soft_standby);
- if (ret != 0) {
- v4l2_err(client, "write_reg_array err\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
- dev->streaming = 0;
- dev->targetfps = 0;
- }
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-static int __update_imx_device_settings(struct imx_device *dev, u16 sensor_id)
-{
- /* IMX on other platform is not supported yet */
- return -EINVAL;
-}
-
-static int imx_s_config(struct v4l2_subdev *sd,
- int irq, void *pdata)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 sensor_revision;
- u16 sensor_id;
- int ret;
- if (pdata == NULL)
- return -ENODEV;
-
- dev->platform_data = pdata;
-
- mutex_lock(&dev->input_lock);
-
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "imx platform init err\n");
- return ret;
- }
- }
- /*
- * power off the module first.
- *
- * As first power on by board have undecided state of power/gpio pins.
- */
- ret = __imx_s_power(sd, 0);
- if (ret) {
- v4l2_err(client, "imx power-down err.\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
-
- ret = __imx_s_power(sd, 1);
- if (ret) {
- v4l2_err(client, "imx power-up err.\n");
- mutex_unlock(&dev->input_lock);
- return ret;
- }
-
- ret = dev->platform_data->csi_cfg(sd, 1);
- if (ret)
- goto fail_csi_cfg;
-
- /* config & detect sensor */
- ret = imx_detect(client, &sensor_id, &sensor_revision);
- if (ret) {
- v4l2_err(client, "imx_detect err s_config.\n");
- goto fail_detect;
- }
-
- dev->sensor_id = sensor_id;
- dev->sensor_revision = sensor_revision;
-
- /* Resolution settings depend on sensor type and platform */
- ret = __update_imx_device_settings(dev, dev->sensor_id);
- if (ret)
- goto fail_detect;
- /* Read sensor's OTP data */
- dev->otp_data = dev->otp_driver->otp_read(sd,
- dev->otp_driver->dev_addr, dev->otp_driver->start_addr,
- dev->otp_driver->size);
-
- /* power off sensor */
- ret = __imx_s_power(sd, 0);
-
- mutex_unlock(&dev->input_lock);
- if (ret)
- v4l2_err(client, "imx power-down err.\n");
-
- return ret;
-
-fail_detect:
- dev->platform_data->csi_cfg(sd, 0);
-fail_csi_cfg:
- __imx_s_power(sd, 0);
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "sensor power-gating failed\n");
- return ret;
-}
-
-static int
-imx_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- if (code->index >= MAX_FMTS)
- return -EINVAL;
-
- mutex_lock(&dev->input_lock);
- code->code = dev->format.code;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int
-imx_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- int index = fse->index;
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- if (index >= dev->entries_curr_table) {
- mutex_unlock(&dev->input_lock);
- return -EINVAL;
- }
-
- fse->min_width = dev->curr_res_table[index].width;
- fse->min_height = dev->curr_res_table[index].height;
- fse->max_width = dev->curr_res_table[index].width;
- fse->max_height = dev->curr_res_table[index].height;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int
-imx_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *param)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- dev->run_mode = param->parm.capture.capturemode;
-
- switch (dev->run_mode) {
- case CI_MODE_VIDEO:
- dev->curr_res_table = dev->mode_tables->res_video;
- dev->entries_curr_table = dev->mode_tables->n_res_video;
- break;
- case CI_MODE_STILL_CAPTURE:
- dev->curr_res_table = dev->mode_tables->res_still;
- dev->entries_curr_table = dev->mode_tables->n_res_still;
- break;
- default:
- dev->curr_res_table = dev->mode_tables->res_preview;
- dev->entries_curr_table = dev->mode_tables->n_res_preview;
- }
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int imx_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- interval->interval.denominator = dev->fps;
- interval->interval.numerator = 1;
- mutex_unlock(&dev->input_lock);
- return 0;
-}
-
-static int __imx_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- const struct imx_resolution *res =
- &dev->curr_res_table[dev->fmt_idx];
- struct camera_mipi_info *imx_info = NULL;
- unsigned short pixels_per_line;
- unsigned short lines_per_frame;
- unsigned int fps_index;
- int fps;
- int ret = 0;
-
-
- imx_info = v4l2_get_subdev_hostdata(sd);
- if (imx_info == NULL)
- return -EINVAL;
-
- if (!interval->interval.numerator)
- interval->interval.numerator = 1;
-
- fps = interval->interval.denominator / interval->interval.numerator;
-
- if (!fps)
- return -EINVAL;
-
- dev->targetfps = fps;
- /* No need to proceed further if we are not streaming */
- if (!dev->streaming)
- return 0;
-
- /* Ignore if we are already using the required FPS. */
- if (fps == dev->fps)
- return 0;
-
- /*
- * Start here, sensor is already streaming, so adjust fps dynamically
- */
- fps_index = __imx_above_nearest_fps_index(fps, res->fps_options);
- if (fps > res->fps_options[fps_index].fps) {
- /*
- * if does not have high fps setting, not support increase fps
- * by adjust lines per frame.
- */
- dev_err(&client->dev, "Could not support fps: %d.\n", fps);
- return -EINVAL;
- }
-
- if (res->fps_options[fps_index].regs &&
- res->fps_options[fps_index].regs != dev->regs) {
- /*
- * if need a new setting, but the new setting has difference
- * with current setting, not use this one, as may have
- * unexpected result, e.g. PLL, IQ.
- */
- dev_dbg(&client->dev,
- "Sensor is streaming, not apply new sensor setting\n");
- if (fps > res->fps_options[dev->fps_index].fps) {
- /*
- * Does not support increase fps based on low fps
- * setting, as the high fps setting could not be used,
- * and fps requested is above current setting fps.
- */
- dev_warn(&client->dev,
- "Could not support fps: %d, keep current: %d.\n",
- fps, dev->fps);
- return 0;
- }
- } else {
- dev->fps_index = fps_index;
- dev->fps = res->fps_options[dev->fps_index].fps;
- }
-
- /* Update the new frametimings based on FPS */
- pixels_per_line = res->fps_options[dev->fps_index].pixels_per_line;
- lines_per_frame = res->fps_options[dev->fps_index].lines_per_frame;
-
- if (fps > res->fps_options[fps_index].fps) {
- /*
- * if does not have high fps setting, not support increase fps
- * by adjust lines per frame.
- */
- dev_warn(&client->dev, "Could not support fps: %d. Use:%d.\n",
- fps, res->fps_options[fps_index].fps);
- goto done;
- }
-
- /* if the new setting does not match exactly */
- if (dev->fps != fps) {
-#define MAX_LINES_PER_FRAME 0xffff
- dev_dbg(&client->dev, "adjusting fps using lines_per_frame\n");
- /*
- * FIXME!
- * 1: check DS on max value of lines_per_frame
- * 2: consider use pixel per line for more range?
- */
- if (dev->lines_per_frame * dev->fps / fps >
- MAX_LINES_PER_FRAME) {
- dev_warn(&client->dev,
- "adjust lines_per_frame out of range, try to use max value.\n");
- lines_per_frame = MAX_LINES_PER_FRAME;
- } else {
- lines_per_frame = lines_per_frame * dev->fps / fps;
- }
- }
-done:
- /* Update the new frametimings based on FPS */
- dev->pixels_per_line = pixels_per_line;
- dev->lines_per_frame = lines_per_frame;
-
- /* Update the new values so that user side knows the current settings */
- ret = __imx_update_exposure_timing(client,
- dev->coarse_itg, dev->pixels_per_line, dev->lines_per_frame);
- if (ret)
- return ret;
-
- dev->fps = fps;
-
- ret = imx_get_intg_factor(client, imx_info, dev->regs);
- if (ret)
- return ret;
-
- interval->interval.denominator = res->fps_options[dev->fps_index].fps;
- interval->interval.numerator = 1;
- __imx_print_timing(sd);
-
- return ret;
-}
-
-static int imx_s_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- struct imx_device *dev = to_imx_sensor(sd);
- int ret;
-
- mutex_lock(&dev->input_lock);
- ret = __imx_s_frame_interval(sd, interval);
- mutex_unlock(&dev->input_lock);
-
- return ret;
-}
-static int imx_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
-{
- struct imx_device *dev = to_imx_sensor(sd);
-
- mutex_lock(&dev->input_lock);
- *frames = dev->curr_res_table[dev->fmt_idx].skip_frames;
- mutex_unlock(&dev->input_lock);
-
- return 0;
-}
-
-static const struct v4l2_subdev_sensor_ops imx_sensor_ops = {
- .g_skip_frames = imx_g_skip_frames,
-};
-
-static const struct v4l2_subdev_video_ops imx_video_ops = {
- .s_stream = imx_s_stream,
- .s_parm = imx_s_parm,
- .g_frame_interval = imx_g_frame_interval,
- .s_frame_interval = imx_s_frame_interval,
-};
-
-static const struct v4l2_subdev_core_ops imx_core_ops = {
- .s_power = imx_s_power,
- .ioctl = imx_ioctl,
- .init = imx_init,
-};
-
-static const struct v4l2_subdev_pad_ops imx_pad_ops = {
- .enum_mbus_code = imx_enum_mbus_code,
- .enum_frame_size = imx_enum_frame_size,
- .get_fmt = imx_get_fmt,
- .set_fmt = imx_set_fmt,
-};
-
-static const struct v4l2_subdev_ops imx_ops = {
- .core = &imx_core_ops,
- .video = &imx_video_ops,
- .pad = &imx_pad_ops,
- .sensor = &imx_sensor_ops,
-};
-
-static const struct media_entity_operations imx_entity_ops = {
- .link_setup = NULL,
-};
-
-static int imx_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx_device *dev = to_imx_sensor(sd);
-
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
-
- media_entity_cleanup(&dev->sd.entity);
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
- dev->platform_data->csi_cfg(sd, 0);
- v4l2_device_unregister_subdev(sd);
- release_msr_list(client, dev->fw);
- kfree(dev);
-
- return 0;
-}
-
-static int __imx_init_ctrl_handler(struct imx_device *dev)
-{
- struct v4l2_ctrl_handler *hdl;
- int i;
-
- hdl = &dev->ctrl_handler;
-
- v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(imx_controls));
-
- for (i = 0; i < ARRAY_SIZE(imx_controls); i++)
- v4l2_ctrl_new_custom(&dev->ctrl_handler,
- &imx_controls[i], NULL);
-
- dev->pixel_rate = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_PIXEL_RATE);
- dev->h_blank = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_HBLANK);
- dev->v_blank = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_VBLANK);
- dev->link_freq = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_LINK_FREQ);
- dev->h_flip = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_HFLIP);
- dev->v_flip = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_VFLIP);
- dev->tp_mode = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN);
- dev->tp_r = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_R);
- dev->tp_gr = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_GR);
- dev->tp_gb = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_GB);
- dev->tp_b = v4l2_ctrl_find(&dev->ctrl_handler,
- V4L2_CID_TEST_PATTERN_COLOR_B);
-
- if (dev->ctrl_handler.error || dev->pixel_rate == NULL
- || dev->h_blank == NULL || dev->v_blank == NULL
- || dev->h_flip == NULL || dev->v_flip == NULL
- || dev->link_freq == NULL) {
- return dev->ctrl_handler.error;
- }
-
- dev->ctrl_handler.lock = &dev->input_lock;
- dev->sd.ctrl_handler = hdl;
- v4l2_ctrl_handler_setup(&dev->ctrl_handler);
-
- return 0;
-}
-
-static void imx_update_reg_info(struct imx_device *dev)
-{
- if (dev->sensor_id == IMX219_ID) {
- dev->reg_addr = &imx219_addr;
- dev->param_hold = imx219_param_hold;
- dev->param_update = imx219_param_update;
- } else {
- dev->reg_addr = &imx_addr;
- dev->param_hold = imx_param_hold;
- dev->param_update = imx_param_update;
- }
-}
-
-static int imx_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct imx_device *dev;
- struct camera_mipi_info *imx_info = NULL;
- int ret;
- char *msr_file_name = NULL;
-
- /* allocate sensor device & init sub device */
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- v4l2_err(client, "%s: out of memory\n", __func__);
- return -ENOMEM;
- }
-
- mutex_init(&dev->input_lock);
-
- dev->i2c_id = id->driver_data;
- dev->fmt_idx = 0;
- dev->sensor_id = IMX_ID_DEFAULT;
- dev->vcm_driver = &imx_vcms[IMX_ID_DEFAULT];
- dev->digital_gain = 256;
-
- v4l2_i2c_subdev_init(&(dev->sd), client, &imx_ops);
-
- if (client->dev.platform_data) {
- ret = imx_s_config(&dev->sd, client->irq,
- client->dev.platform_data);
- if (ret)
- goto out_free;
- }
- imx_info = v4l2_get_subdev_hostdata(&dev->sd);
-
- /*
- * sd->name is updated with sensor driver name by the v4l2.
- * change it to sensor name in this case.
- */
- imx_update_reg_info(dev);
- snprintf(dev->sd.name, sizeof(dev->sd.name), "%s%x %d-%04x",
- IMX_SUBDEV_PREFIX, dev->sensor_id,
- i2c_adapter_id(client->adapter), client->addr);
-
- ret = __imx_init_ctrl_handler(dev);
- if (ret)
- goto out_ctrl_handler_free;
-
- dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- dev->pad.flags = MEDIA_PAD_FL_SOURCE;
- dev->format.code = imx_translate_bayer_order(
- imx_info->raw_bayer_order);
- dev->sd.entity.ops = &imx_entity_ops;
- dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
- ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
- if (ret) {
- imx_remove(client);
- return ret;
- }
-
- /* Load the Noise reduction, Dead pixel registers from cpf file*/
- if (dev->platform_data->msr_file_name != NULL)
- msr_file_name = dev->platform_data->msr_file_name();
- if (msr_file_name) {
- ret = load_msr_list(client, msr_file_name, &dev->fw);
- if (ret) {
- imx_remove(client);
- return ret;
- }
- } else {
- dev_warn(&client->dev, "Drvb file not present");
- }
-
- return ret;
-
-out_ctrl_handler_free:
- v4l2_ctrl_handler_free(&dev->ctrl_handler);
-
-out_free:
- v4l2_device_unregister_subdev(&dev->sd);
- kfree(dev);
- return ret;
-}
-
-static const struct i2c_device_id imx_ids[] = {
- {IMX_NAME_175, IMX175_ID},
- {IMX_NAME_135, IMX135_ID},
- {IMX_NAME_135_FUJI, IMX135_FUJI_ID},
- {IMX_NAME_134, IMX134_ID},
- {IMX_NAME_132, IMX132_ID},
- {IMX_NAME_208, IMX208_ID},
- {IMX_NAME_219, IMX219_ID},
- {IMX_NAME_227, IMX227_ID},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, imx_ids);
-
-static struct i2c_driver imx_driver = {
- .driver = {
- .name = IMX_DRIVER,
- },
- .probe = imx_probe,
- .remove = imx_remove,
- .id_table = imx_ids,
-};
-
-static __init int init_imx(void)
-{
- return i2c_add_driver(&imx_driver);
-}
-
-static __exit void exit_imx(void)
-{
- i2c_del_driver(&imx_driver);
-}
-
-module_init(init_imx);
-module_exit(exit_imx);
-
-MODULE_DESCRIPTION("A low-level driver for Sony IMX sensors");
-MODULE_AUTHOR("Shenbo Huang <shenbo.huang@intel.com>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.h b/drivers/staging/media/atomisp/i2c/imx/imx.h
deleted file mode 100644
index 30beb2a0ed93..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx.h
+++ /dev/null
@@ -1,737 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX_H__
-#define __IMX_H__
-#include "../../include/linux/atomisp_platform.h"
-#include "../../include/linux/atomisp.h"
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/videodev2.h>
-#include <linux/v4l2-mediabus.h>
-#include <media/media-entity.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-subdev.h>
-#include "imx175.h"
-#include "imx135.h"
-#include "imx134.h"
-#include "imx132.h"
-#include "imx208.h"
-#include "imx219.h"
-#include "imx227.h"
-
-#define IMX_MCLK 192
-
-/* TODO - This should be added into include/linux/videodev2.h */
-#ifndef V4L2_IDENT_IMX
-#define V4L2_IDENT_IMX 8245
-#endif
-
-#define IMX_MAX_AE_LUT_LENGTH 5
-/*
- * imx System control registers
- */
-#define IMX_MASK_5BIT 0x1F
-#define IMX_MASK_4BIT 0xF
-#define IMX_MASK_3BIT 0x7
-#define IMX_MASK_2BIT 0x3
-#define IMX_MASK_8BIT 0xFF
-#define IMX_MASK_11BIT 0x7FF
-#define IMX_INTG_BUF_COUNT 2
-
-#define IMX_FINE_INTG_TIME 0x1E8
-
-#define IMX_VT_PIX_CLK_DIV 0x0301
-#define IMX_VT_SYS_CLK_DIV 0x0303
-#define IMX_PRE_PLL_CLK_DIV 0x0305
-#define IMX227_IOP_PRE_PLL_CLK_DIV 0x030D
-#define IMX227_PLL_MULTIPLIER 0x0306
-#define IMX227_IOP_PLL_MULTIPLIER 0x030E
-#define IMX227_PLL_MULTI_DRIVE 0x0310
-#define IMX227_OP_PIX_CLK_DIV 0x0309
-#define IMX227_OP_SYS_CLK_DIV 0x030B
-#define IMX_PLL_MULTIPLIER 0x030C
-#define IMX_OP_PIX_DIV 0x0309
-#define IMX_OP_SYS_DIV 0x030B
-#define IMX_FRAME_LENGTH_LINES 0x0340
-#define IMX_LINE_LENGTH_PIXELS 0x0342
-#define IMX_COARSE_INTG_TIME_MIN 0x1004
-#define IMX_COARSE_INTG_TIME_MAX 0x1006
-#define IMX_BINNING_ENABLE 0x0390
-#define IMX227_BINNING_ENABLE 0x0900
-#define IMX_BINNING_TYPE 0x0391
-#define IMX227_BINNING_TYPE 0x0901
-#define IMX_READ_MODE 0x0390
-#define IMX227_READ_MODE 0x0900
-
-#define IMX_HORIZONTAL_START_H 0x0344
-#define IMX_VERTICAL_START_H 0x0346
-#define IMX_HORIZONTAL_END_H 0x0348
-#define IMX_VERTICAL_END_H 0x034a
-#define IMX_HORIZONTAL_OUTPUT_SIZE_H 0x034c
-#define IMX_VERTICAL_OUTPUT_SIZE_H 0x034e
-
-/* Post Divider setting register for imx132 and imx208 */
-#define IMX132_208_VT_RGPLTD 0x30A4
-
-/* Multiplier setting register for imx132, imx208, and imx219 */
-#define IMX132_208_219_PLL_MULTIPLIER 0x0306
-
-#define IMX_COARSE_INTEGRATION_TIME 0x0202
-#define IMX_TEST_PATTERN_MODE 0x0600
-#define IMX_TEST_PATTERN_COLOR_R 0x0602
-#define IMX_TEST_PATTERN_COLOR_GR 0x0604
-#define IMX_TEST_PATTERN_COLOR_B 0x0606
-#define IMX_TEST_PATTERN_COLOR_GB 0x0608
-#define IMX_IMG_ORIENTATION 0x0101
-#define IMX_VFLIP_BIT 2
-#define IMX_HFLIP_BIT 1
-#define IMX_GLOBAL_GAIN 0x0205
-#define IMX_SHORT_AGC_GAIN 0x0233
-#define IMX_DGC_ADJ 0x020E
-#define IMX_DGC_LEN 10
-#define IMX227_DGC_LEN 4
-#define IMX_MAX_EXPOSURE_SUPPORTED 0xfffb
-#define IMX_MAX_GLOBAL_GAIN_SUPPORTED 0x00ff
-#define IMX_MAX_DIGITAL_GAIN_SUPPORTED 0x0fff
-
-#define MAX_FMTS 1
-#define IMX_OTP_DATA_SIZE 1280
-
-#define IMX_SUBDEV_PREFIX "imx"
-#define IMX_DRIVER "imx1x5"
-
-/* Sensor ids from identification register */
-#define IMX_NAME_134 "imx134"
-#define IMX_NAME_135 "imx135"
-#define IMX_NAME_175 "imx175"
-#define IMX_NAME_132 "imx132"
-#define IMX_NAME_208 "imx208"
-#define IMX_NAME_219 "imx219"
-#define IMX_NAME_227 "imx227"
-#define IMX175_ID 0x0175
-#define IMX135_ID 0x0135
-#define IMX134_ID 0x0134
-#define IMX132_ID 0x0132
-#define IMX208_ID 0x0208
-#define IMX219_ID 0x0219
-#define IMX227_ID 0x0227
-
-/* Sensor id based on i2c_device_id table
- * (Fuji module can not be detected based on sensor registers) */
-#define IMX135_FUJI_ID 0x0136
-#define IMX_NAME_135_FUJI "imx135fuji"
-
-/* imx175 - use dw9714 vcm */
-#define IMX175_MERRFLD 0x175
-#define IMX175_VALLEYVIEW 0x176
-#define IMX135_SALTBAY 0x135
-#define IMX135_VICTORIABAY 0x136
-#define IMX132_SALTBAY 0x132
-#define IMX134_VALLEYVIEW 0x134
-#define IMX208_MOFD_PD2 0x208
-#define IMX219_MFV0_PRH 0x219
-#define IMX227_SAND 0x227
-
-/* otp - specific settings */
-#define E2PROM_ADDR 0xa0
-#define E2PROM_LITEON_12P1BA869D_ADDR 0xa0
-#define E2PROM_ABICO_SS89A839_ADDR 0xa8
-#define DEFAULT_OTP_SIZE 1280
-#define IMX135_OTP_SIZE 1280
-#define IMX219_OTP_SIZE 2048
-#define IMX227_OTP_SIZE 2560
-#define E2PROM_LITEON_12P1BA869D_SIZE 544
-
-#define IMX_ID_DEFAULT 0x0000
-#define IMX132_175_208_219_CHIP_ID 0x0000
-#define IMX134_135_CHIP_ID 0x0016
-#define IMX134_135_227_CHIP_ID 0x0016
-
-#define IMX175_RES_WIDTH_MAX 3280
-#define IMX175_RES_HEIGHT_MAX 2464
-#define IMX135_RES_WIDTH_MAX 4208
-#define IMX135_RES_HEIGHT_MAX 3120
-#define IMX132_RES_WIDTH_MAX 1936
-#define IMX132_RES_HEIGHT_MAX 1096
-#define IMX134_RES_WIDTH_MAX 3280
-#define IMX134_RES_HEIGHT_MAX 2464
-#define IMX208_RES_WIDTH_MAX 1936
-#define IMX208_RES_HEIGHT_MAX 1096
-#define IMX219_RES_WIDTH_MAX 3280
-#define IMX219_RES_HEIGHT_MAX 2464
-#define IMX227_RES_WIDTH_MAX 2400
-#define IMX227_RES_HEIGHT_MAX 2720
-
-/* Defines for lens/VCM */
-#define IMX_FOCAL_LENGTH_NUM 369 /*3.69mm*/
-#define IMX_FOCAL_LENGTH_DEM 100
-#define IMX_F_NUMBER_DEFAULT_NUM 22
-#define IMX_F_NUMBER_DEM 10
-#define IMX_INVALID_CONFIG 0xffffffff
-#define IMX_MAX_FOCUS_POS 1023
-#define IMX_MAX_FOCUS_NEG (-1023)
-#define IMX_VCM_SLEW_STEP_MAX 0x3f
-#define IMX_VCM_SLEW_TIME_MAX 0x1f
-
-#define IMX_BIN_FACTOR_MAX 4
-#define IMX_INTEGRATION_TIME_MARGIN 4
-/*
- * focal length bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_FOCAL_LENGTH_DEFAULT 0x1710064
-
-/*
- * current f-number bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_F_NUMBER_DEFAULT 0x16000a
-
-/*
- * f-number range bits definition:
- * bits 31-24: max f-number numerator
- * bits 23-16: max f-number denominator
- * bits 15-8: min f-number numerator
- * bits 7-0: min f-number denominator
- */
-#define IMX_F_NUMBER_RANGE 0x160a160a
-
-struct imx_vcm {
- int (*power_up)(struct v4l2_subdev *sd);
- int (*power_down)(struct v4l2_subdev *sd);
- int (*t_focus_abs)(struct v4l2_subdev *sd, s32 value);
- int (*t_focus_abs_init)(struct v4l2_subdev *sd);
- int (*t_focus_rel)(struct v4l2_subdev *sd, s32 value);
- int (*q_focus_status)(struct v4l2_subdev *sd, s32 *value);
- int (*q_focus_abs)(struct v4l2_subdev *sd, s32 *value);
- int (*t_vcm_slew)(struct v4l2_subdev *sd, s32 value);
- int (*t_vcm_timing)(struct v4l2_subdev *sd, s32 value);
-};
-
-struct imx_otp {
- void * (*otp_read)(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
- u32 start_addr;
- u32 size;
- u8 dev_addr;
-};
-
-struct max_res {
- int res_max_width;
- int res_max_height;
-};
-
-struct max_res imx_max_res[] = {
- [IMX175_ID] = {
- .res_max_width = IMX175_RES_WIDTH_MAX,
- .res_max_height = IMX175_RES_HEIGHT_MAX,
- },
- [IMX135_ID] = {
- .res_max_width = IMX135_RES_WIDTH_MAX,
- .res_max_height = IMX135_RES_HEIGHT_MAX,
- },
- [IMX132_ID] = {
- .res_max_width = IMX132_RES_WIDTH_MAX,
- .res_max_height = IMX132_RES_HEIGHT_MAX,
- },
- [IMX134_ID] = {
- .res_max_width = IMX134_RES_WIDTH_MAX,
- .res_max_height = IMX134_RES_HEIGHT_MAX,
- },
- [IMX208_ID] = {
- .res_max_width = IMX208_RES_WIDTH_MAX,
- .res_max_height = IMX208_RES_HEIGHT_MAX,
- },
- [IMX219_ID] = {
- .res_max_width = IMX219_RES_WIDTH_MAX,
- .res_max_height = IMX219_RES_HEIGHT_MAX,
- },
- [IMX227_ID] = {
- .res_max_width = IMX227_RES_WIDTH_MAX,
- .res_max_height = IMX227_RES_HEIGHT_MAX,
- },
-};
-
-struct imx_settings {
- struct imx_reg const *init_settings;
- struct imx_resolution *res_preview;
- struct imx_resolution *res_still;
- struct imx_resolution *res_video;
- int n_res_preview;
- int n_res_still;
- int n_res_video;
-};
-
-struct imx_settings imx_sets[] = {
- [IMX175_MERRFLD] = {
- .init_settings = imx175_init_settings,
- .res_preview = imx175_res_preview,
- .res_still = imx175_res_still,
- .res_video = imx175_res_video,
- .n_res_preview = ARRAY_SIZE(imx175_res_preview),
- .n_res_still = ARRAY_SIZE(imx175_res_still),
- .n_res_video = ARRAY_SIZE(imx175_res_video),
- },
- [IMX175_VALLEYVIEW] = {
- .init_settings = imx175_init_settings,
- .res_preview = imx175_res_preview,
- .res_still = imx175_res_still,
- .res_video = imx175_res_video,
- .n_res_preview = ARRAY_SIZE(imx175_res_preview),
- .n_res_still = ARRAY_SIZE(imx175_res_still),
- .n_res_video = ARRAY_SIZE(imx175_res_video),
- },
- [IMX135_SALTBAY] = {
- .init_settings = imx135_init_settings,
- .res_preview = imx135_res_preview,
- .res_still = imx135_res_still,
- .res_video = imx135_res_video,
- .n_res_preview = ARRAY_SIZE(imx135_res_preview),
- .n_res_still = ARRAY_SIZE(imx135_res_still),
- .n_res_video = ARRAY_SIZE(imx135_res_video),
- },
- [IMX135_VICTORIABAY] = {
- .init_settings = imx135_init_settings,
- .res_preview = imx135_res_preview_mofd,
- .res_still = imx135_res_still_mofd,
- .res_video = imx135_res_video,
- .n_res_preview = ARRAY_SIZE(imx135_res_preview_mofd),
- .n_res_still = ARRAY_SIZE(imx135_res_still_mofd),
- .n_res_video = ARRAY_SIZE(imx135_res_video),
- },
- [IMX132_SALTBAY] = {
- .init_settings = imx132_init_settings,
- .res_preview = imx132_res_preview,
- .res_still = imx132_res_still,
- .res_video = imx132_res_video,
- .n_res_preview = ARRAY_SIZE(imx132_res_preview),
- .n_res_still = ARRAY_SIZE(imx132_res_still),
- .n_res_video = ARRAY_SIZE(imx132_res_video),
- },
- [IMX134_VALLEYVIEW] = {
- .init_settings = imx134_init_settings,
- .res_preview = imx134_res_preview,
- .res_still = imx134_res_still,
- .res_video = imx134_res_video,
- .n_res_preview = ARRAY_SIZE(imx134_res_preview),
- .n_res_still = ARRAY_SIZE(imx134_res_still),
- .n_res_video = ARRAY_SIZE(imx134_res_video),
- },
- [IMX208_MOFD_PD2] = {
- .init_settings = imx208_init_settings,
- .res_preview = imx208_res_preview,
- .res_still = imx208_res_still,
- .res_video = imx208_res_video,
- .n_res_preview = ARRAY_SIZE(imx208_res_preview),
- .n_res_still = ARRAY_SIZE(imx208_res_still),
- .n_res_video = ARRAY_SIZE(imx208_res_video),
- },
- [IMX219_MFV0_PRH] = {
- .init_settings = imx219_init_settings,
- .res_preview = imx219_res_preview,
- .res_still = imx219_res_still,
- .res_video = imx219_res_video,
- .n_res_preview = ARRAY_SIZE(imx219_res_preview),
- .n_res_still = ARRAY_SIZE(imx219_res_still),
- .n_res_video = ARRAY_SIZE(imx219_res_video),
- },
- [IMX227_SAND] = {
- .init_settings = imx227_init_settings,
- .res_preview = imx227_res_preview,
- .res_still = imx227_res_still,
- .res_video = imx227_res_video,
- .n_res_preview = ARRAY_SIZE(imx227_res_preview),
- .n_res_still = ARRAY_SIZE(imx227_res_still),
- .n_res_video = ARRAY_SIZE(imx227_res_video),
- },
-};
-
-struct imx_reg_addr {
- u16 frame_length_lines;
- u16 line_length_pixels;
- u16 horizontal_start_h;
- u16 vertical_start_h;
- u16 horizontal_end_h;
- u16 vertical_end_h;
- u16 horizontal_output_size_h;
- u16 vertical_output_size_h;
- u16 coarse_integration_time;
- u16 img_orientation;
- u16 global_gain;
- u16 dgc_adj;
-};
-
-struct imx_reg_addr imx_addr = {
- IMX_FRAME_LENGTH_LINES,
- IMX_LINE_LENGTH_PIXELS,
- IMX_HORIZONTAL_START_H,
- IMX_VERTICAL_START_H,
- IMX_HORIZONTAL_END_H,
- IMX_VERTICAL_END_H,
- IMX_HORIZONTAL_OUTPUT_SIZE_H,
- IMX_VERTICAL_OUTPUT_SIZE_H,
- IMX_COARSE_INTEGRATION_TIME,
- IMX_IMG_ORIENTATION,
- IMX_GLOBAL_GAIN,
- IMX_DGC_ADJ,
-};
-
-struct imx_reg_addr imx219_addr = {
- IMX219_FRAME_LENGTH_LINES,
- IMX219_LINE_LENGTH_PIXELS,
- IMX219_HORIZONTAL_START_H,
- IMX219_VERTICAL_START_H,
- IMX219_HORIZONTAL_END_H,
- IMX219_VERTICAL_END_H,
- IMX219_HORIZONTAL_OUTPUT_SIZE_H,
- IMX219_VERTICAL_OUTPUT_SIZE_H,
- IMX219_COARSE_INTEGRATION_TIME,
- IMX219_IMG_ORIENTATION,
- IMX219_GLOBAL_GAIN,
- IMX219_DGC_ADJ,
-};
-
-#define v4l2_format_capture_type_entry(_width, _height, \
- _pixelformat, _bytesperline, _colorspace) \
- {\
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,\
- .fmt.pix.width = (_width),\
- .fmt.pix.height = (_height),\
- .fmt.pix.pixelformat = (_pixelformat),\
- .fmt.pix.bytesperline = (_bytesperline),\
- .fmt.pix.colorspace = (_colorspace),\
- .fmt.pix.sizeimage = (_height)*(_bytesperline),\
- }
-
-#define s_output_format_entry(_width, _height, _pixelformat, \
- _bytesperline, _colorspace, _fps) \
- {\
- .v4l2_fmt = v4l2_format_capture_type_entry(_width, \
- _height, _pixelformat, _bytesperline, \
- _colorspace),\
- .fps = (_fps),\
- }
-
-#define s_output_format_reg_entry(_width, _height, _pixelformat, \
- _bytesperline, _colorspace, _fps, _reg_setting) \
- {\
- .s_fmt = s_output_format_entry(_width, _height,\
- _pixelformat, _bytesperline, \
- _colorspace, _fps),\
- .reg_setting = (_reg_setting),\
- }
-
-/* imx device structure */
-struct imx_device {
- struct v4l2_subdev sd;
- struct media_pad pad;
- struct v4l2_mbus_framefmt format;
- struct camera_sensor_platform_data *platform_data;
- struct mutex input_lock; /* serialize sensor's ioctl */
- int fmt_idx;
- int status;
- int streaming;
- int power;
- int run_mode;
- int vt_pix_clk_freq_mhz;
- int fps_index;
- u32 focus;
- u16 sensor_id; /* Sensor id from registers */
- u16 i2c_id; /* Sensor id from i2c_device_id */
- u16 coarse_itg;
- u16 fine_itg;
- u16 digital_gain;
- u16 gain;
- u16 pixels_per_line;
- u16 lines_per_frame;
- u8 targetfps;
- u8 fps;
- const struct imx_reg *regs;
- u8 res;
- u8 type;
- u8 sensor_revision;
- u8 *otp_data;
- struct imx_settings *mode_tables;
- struct imx_vcm *vcm_driver;
- struct imx_otp *otp_driver;
- const struct imx_resolution *curr_res_table;
- unsigned long entries_curr_table;
- const struct firmware *fw;
- struct imx_reg_addr *reg_addr;
- const struct imx_reg *param_hold;
- const struct imx_reg *param_update;
-
- /* used for h/b blank tuning */
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_ctrl *pixel_rate;
- struct v4l2_ctrl *h_blank;
- struct v4l2_ctrl *v_blank;
- struct v4l2_ctrl *link_freq;
- struct v4l2_ctrl *h_flip;
- struct v4l2_ctrl *v_flip;
-
- /* Test pattern control */
- struct v4l2_ctrl *tp_mode;
- struct v4l2_ctrl *tp_r;
- struct v4l2_ctrl *tp_gr;
- struct v4l2_ctrl *tp_gb;
- struct v4l2_ctrl *tp_b;
-
- /* FIXME! */
- bool new_res_sel_method;
-};
-
-#define to_imx_sensor(x) container_of(x, struct imx_device, sd)
-
-#define IMX_MAX_WRITE_BUF_SIZE 32
-struct imx_write_buffer {
- u16 addr;
- u8 data[IMX_MAX_WRITE_BUF_SIZE];
-};
-
-struct imx_write_ctrl {
- int index;
- struct imx_write_buffer buffer;
-};
-
-static const struct imx_reg imx_soft_standby[] = {
- {IMX_8BIT, 0x0100, 0x00},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx_streaming[] = {
- {IMX_8BIT, 0x0100, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx_param_hold[] = {
- {IMX_8BIT, 0x0104, 0x01}, /* GROUPED_PARAMETER_HOLD */
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx_param_update[] = {
- {IMX_8BIT, 0x0104, 0x00}, /* GROUPED_PARAMETER_HOLD */
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx219_param_hold[] = {
- {IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx219_param_update[] = {
- {IMX_TOK_TERM, 0, 0}
-};
-
-extern int ad5816g_vcm_power_up(struct v4l2_subdev *sd);
-extern int ad5816g_vcm_power_down(struct v4l2_subdev *sd);
-extern int ad5816g_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int ad5816g_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int ad5816g_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int ad5816g_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int ad5816g_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int ad5816g_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int drv201_vcm_power_up(struct v4l2_subdev *sd);
-extern int drv201_vcm_power_down(struct v4l2_subdev *sd);
-extern int drv201_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int drv201_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int drv201_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int drv201_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int drv201_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int drv201_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int dw9714_vcm_power_up(struct v4l2_subdev *sd);
-extern int dw9714_vcm_power_down(struct v4l2_subdev *sd);
-extern int dw9714_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int dw9714_t_focus_abs_init(struct v4l2_subdev *sd);
-extern int dw9714_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int dw9714_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int dw9714_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int dw9714_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int dw9714_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int dw9719_vcm_power_up(struct v4l2_subdev *sd);
-extern int dw9719_vcm_power_down(struct v4l2_subdev *sd);
-extern int dw9719_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int dw9719_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int dw9719_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int dw9719_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int dw9719_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int dw9719_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int dw9718_vcm_power_up(struct v4l2_subdev *sd);
-extern int dw9718_vcm_power_down(struct v4l2_subdev *sd);
-extern int dw9718_t_focus_abs(struct v4l2_subdev *sd, s32 value);
-extern int dw9718_t_focus_rel(struct v4l2_subdev *sd, s32 value);
-extern int dw9718_q_focus_status(struct v4l2_subdev *sd, s32 *value);
-extern int dw9718_q_focus_abs(struct v4l2_subdev *sd, s32 *value);
-extern int dw9718_t_vcm_slew(struct v4l2_subdev *sd, s32 value);
-extern int dw9718_t_vcm_timing(struct v4l2_subdev *sd, s32 value);
-
-extern int vcm_power_up(struct v4l2_subdev *sd);
-extern int vcm_power_down(struct v4l2_subdev *sd);
-
-struct imx_vcm imx_vcms[] = {
- [IMX175_MERRFLD] = {
- .power_up = drv201_vcm_power_up,
- .power_down = drv201_vcm_power_down,
- .t_focus_abs = drv201_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = drv201_t_focus_rel,
- .q_focus_status = drv201_q_focus_status,
- .q_focus_abs = drv201_q_focus_abs,
- .t_vcm_slew = drv201_t_vcm_slew,
- .t_vcm_timing = drv201_t_vcm_timing,
- },
- [IMX175_VALLEYVIEW] = {
- .power_up = dw9714_vcm_power_up,
- .power_down = dw9714_vcm_power_down,
- .t_focus_abs = dw9714_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = dw9714_t_focus_rel,
- .q_focus_status = dw9714_q_focus_status,
- .q_focus_abs = dw9714_q_focus_abs,
- .t_vcm_slew = dw9714_t_vcm_slew,
- .t_vcm_timing = dw9714_t_vcm_timing,
- },
- [IMX135_SALTBAY] = {
- .power_up = ad5816g_vcm_power_up,
- .power_down = ad5816g_vcm_power_down,
- .t_focus_abs = ad5816g_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = ad5816g_t_focus_rel,
- .q_focus_status = ad5816g_q_focus_status,
- .q_focus_abs = ad5816g_q_focus_abs,
- .t_vcm_slew = ad5816g_t_vcm_slew,
- .t_vcm_timing = ad5816g_t_vcm_timing,
- },
- [IMX135_VICTORIABAY] = {
- .power_up = dw9719_vcm_power_up,
- .power_down = dw9719_vcm_power_down,
- .t_focus_abs = dw9719_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = dw9719_t_focus_rel,
- .q_focus_status = dw9719_q_focus_status,
- .q_focus_abs = dw9719_q_focus_abs,
- .t_vcm_slew = dw9719_t_vcm_slew,
- .t_vcm_timing = dw9719_t_vcm_timing,
- },
- [IMX134_VALLEYVIEW] = {
- .power_up = dw9714_vcm_power_up,
- .power_down = dw9714_vcm_power_down,
- .t_focus_abs = dw9714_t_focus_abs,
- .t_focus_abs_init = dw9714_t_focus_abs_init,
- .t_focus_rel = dw9714_t_focus_rel,
- .q_focus_status = dw9714_q_focus_status,
- .q_focus_abs = dw9714_q_focus_abs,
- .t_vcm_slew = dw9714_t_vcm_slew,
- .t_vcm_timing = dw9714_t_vcm_timing,
- },
- [IMX219_MFV0_PRH] = {
- .power_up = dw9718_vcm_power_up,
- .power_down = dw9718_vcm_power_down,
- .t_focus_abs = dw9718_t_focus_abs,
- .t_focus_abs_init = NULL,
- .t_focus_rel = dw9718_t_focus_rel,
- .q_focus_status = dw9718_q_focus_status,
- .q_focus_abs = dw9718_q_focus_abs,
- .t_vcm_slew = dw9718_t_vcm_slew,
- .t_vcm_timing = dw9718_t_vcm_timing,
- },
- [IMX_ID_DEFAULT] = {
- .power_up = NULL,
- .power_down = NULL,
- .t_focus_abs_init = NULL,
- },
-};
-
-extern void *dummy_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *imx_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *brcc064_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *imx227_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-extern void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size);
-struct imx_otp imx_otps[] = {
- [IMX175_MERRFLD] = {
- .otp_read = imx_otp_read,
- .dev_addr = E2PROM_ADDR,
- .start_addr = 0,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX175_VALLEYVIEW] = {
- .otp_read = e2prom_otp_read,
- .dev_addr = E2PROM_ABICO_SS89A839_ADDR,
- .start_addr = E2PROM_2ADDR,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX135_SALTBAY] = {
- .otp_read = e2prom_otp_read,
- .dev_addr = E2PROM_ADDR,
- .start_addr = 0,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX135_VICTORIABAY] = {
- .otp_read = imx_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX134_VALLEYVIEW] = {
- .otp_read = e2prom_otp_read,
- .dev_addr = E2PROM_LITEON_12P1BA869D_ADDR,
- .start_addr = 0,
- .size = E2PROM_LITEON_12P1BA869D_SIZE,
- },
- [IMX132_SALTBAY] = {
- .otp_read = dummy_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX208_MOFD_PD2] = {
- .otp_read = dummy_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
- [IMX219_MFV0_PRH] = {
- .otp_read = brcc064_otp_read,
- .dev_addr = E2PROM_ADDR,
- .start_addr = 0,
- .size = IMX219_OTP_SIZE,
- },
- [IMX227_SAND] = {
- .otp_read = imx227_otp_read,
- .size = IMX227_OTP_SIZE,
- },
- [IMX_ID_DEFAULT] = {
- .otp_read = dummy_otp_read,
- .size = DEFAULT_OTP_SIZE,
- },
-};
-
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx132.h b/drivers/staging/media/atomisp/i2c/imx/imx132.h
deleted file mode 100644
index 98f047b8a1ba..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx132.h
+++ /dev/null
@@ -1,566 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX132_H__
-#define __IMX132_H__
-#include "common.h"
-
-/********************** registers define ********************************/
-#define IMX132_RGLANESEL 0x3301 /* Number of lanes */
-#define IMX132_RGLANESEL_1LANE 0x01
-#define IMX132_RGLANESEL_2LANES 0x00
-#define IMX132_RGLANESEL_4LANES 0x03
-
-#define IMX132_2LANES_GAINFACT 2096 /* 524/256 * 2^10 */
-#define IMX132_2LANES_GAINFACT_SHIFT 10
-
-/********************** settings for imx from vendor*********************/
-static struct imx_reg imx132_1080p_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x14},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0xA3},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x07},
- {IMX_8BIT, 0x034D, 0x90},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx132_1456x1096_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0x04},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0xB3},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x05},
- {IMX_8BIT, 0x034D, 0xB0},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx132_1636x1096_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0xAA},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x0D},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x06},
- {IMX_8BIT, 0x034D, 0x64},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx132_1336x1096_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- /* PLL setting */
- {IMX_8BIT, 0x0305, 0x02},
- {IMX_8BIT, 0x0307, 0x50},
- {IMX_8BIT, 0x30A4, 0x02},
- {IMX_8BIT, 0x303C, 0x3C},
- /* Mode setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0x2C},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x32},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0x77},
- {IMX_8BIT, 0x034A, 0x04},
- {IMX_8BIT, 0x034B, 0x79},
- {IMX_8BIT, 0x034C, 0x05},
- {IMX_8BIT, 0x034D, 0x38},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x48},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x303D, 0x10},
- {IMX_8BIT, 0x303E, 0x5A},
- {IMX_8BIT, 0x3040, 0x00},
- {IMX_8BIT, 0x3041, 0x00},
- {IMX_8BIT, 0x3048, 0x00},
- {IMX_8BIT, 0x304C, 0x2F},
- {IMX_8BIT, 0x304D, 0x02},
- {IMX_8BIT, 0x3064, 0x92},
- {IMX_8BIT, 0x306A, 0x10},
- {IMX_8BIT, 0x309B, 0x00},
- {IMX_8BIT, 0x309E, 0x41},
- {IMX_8BIT, 0x30A0, 0x10},
- {IMX_8BIT, 0x30A1, 0x0B},
- {IMX_8BIT, 0x30B2, 0x00},
- {IMX_8BIT, 0x30D5, 0x00},
- {IMX_8BIT, 0x30D6, 0x00},
- {IMX_8BIT, 0x30D7, 0x00},
- {IMX_8BIT, 0x30D8, 0x00},
- {IMX_8BIT, 0x30D9, 0x00},
- {IMX_8BIT, 0x30DA, 0x00},
- {IMX_8BIT, 0x30DB, 0x00},
- {IMX_8BIT, 0x30DC, 0x00},
- {IMX_8BIT, 0x30DD, 0x00},
- {IMX_8BIT, 0x30DE, 0x00},
- {IMX_8BIT, 0x3102, 0x0C},
- {IMX_8BIT, 0x3103, 0x33},
- {IMX_8BIT, 0x3104, 0x18},
- {IMX_8BIT, 0x3105, 0x00},
- {IMX_8BIT, 0x3106, 0x65},
- {IMX_8BIT, 0x3107, 0x00},
- {IMX_8BIT, 0x3108, 0x06},
- {IMX_8BIT, 0x3109, 0x04},
- {IMX_8BIT, 0x310A, 0x04},
- {IMX_8BIT, 0x315C, 0x3D},
- {IMX_8BIT, 0x315D, 0x3C},
- {IMX_8BIT, 0x316E, 0x3E},
- {IMX_8BIT, 0x316F, 0x3D},
- /* Global timing */
- {IMX_8BIT, 0x3304, 0x07}, /* RGTLPX[5:0] TLPX */
- {IMX_8BIT, 0x3305, 0x06}, /* RGTCLKPREPARE[3:0] TCLK-PREPARE */
- {IMX_8BIT, 0x3306, 0x19}, /* RGTCLKZERO[5:0] TCLK-ZERO */
- {IMX_8BIT, 0x3307, 0x03}, /* RGTCLKPRE[5:0] TCLK-PRE */
- {IMX_8BIT, 0x3308, 0x0F}, /* RGTCLKPOST[5:0] TCLK-POST */
- {IMX_8BIT, 0x3309, 0x07}, /* RGTCLKTRAIL[3:0] TCLK-TRAIL */
- {IMX_8BIT, 0x330A, 0x0C}, /* RGTHSEXIT[5:0] THS-EXIT */
- {IMX_8BIT, 0x330B, 0x06}, /* RGTHSPREPARE[3:0] THS-PREPARE */
- {IMX_8BIT, 0x330C, 0x0B}, /* RGTHSZERO[5:0] THS-ZERO */
- {IMX_8BIT, 0x330D, 0x07}, /* RGTHSTRAIL[3:0] THS-TRAIL */
- {IMX_8BIT, 0x330E, 0x03},
- {IMX_8BIT, 0x3318, 0x62},
- {IMX_8BIT, 0x3322, 0x09},
- {IMX_8BIT, 0x3342, 0x00},
- {IMX_8BIT, 0x3348, 0xE0},
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-/********************** settings for imx - reference *********************/
-static struct imx_reg const imx132_init_settings[] = {
- /* sw reset */
- { IMX_8BIT, 0x0100, 0x00 },
- { IMX_8BIT, 0x0103, 0x01 },
- { IMX_TOK_DELAY, 0, 5},
- { IMX_8BIT, 0x0103, 0x00 },
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Global Settings */
- {IMX_8BIT, 0x3087, 0x53},
- {IMX_8BIT, 0x308B, 0x5A},
- {IMX_8BIT, 0x3094, 0x11},
- {IMX_8BIT, 0x309D, 0xA4},
- {IMX_8BIT, 0x30AA, 0x01},
- {IMX_8BIT, 0x30C6, 0x00},
- {IMX_8BIT, 0x30C7, 0x00},
- {IMX_8BIT, 0x3118, 0x2F},
- {IMX_8BIT, 0x312A, 0x00},
- {IMX_8BIT, 0x312B, 0x0B},
- {IMX_8BIT, 0x312C, 0x0B},
- {IMX_8BIT, 0x312D, 0x13},
- GROUPED_PARAMETER_HOLD_DISABLE,
- { IMX_TOK_TERM, 0, 0}
-};
-
-struct imx_resolution imx132_res_preview[] = {
- {
- .desc = "imx132_1080p_30fps",
- .regs = imx132_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
-};
-
-struct imx_resolution imx132_res_still[] = {
- {
- .desc = "imx132_1080p_30fps",
- .regs = imx132_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
-};
-
-struct imx_resolution imx132_res_video[] = {
- {
- .desc = "imx132_1336x1096_30fps",
- .regs = imx132_1336x1096_30fps,
- .width = 1336,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
- {
- .desc = "imx132_1456x1096_30fps",
- .regs = imx132_1456x1096_30fps,
- .width = 1456,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
- {
- .desc = "imx132_1636x1096_30fps",
- .regs = imx132_1636x1096_30fps,
- .width = 1636,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
- {
- .desc = "imx132_1080p_30fps",
- .regs = imx132_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08F2,
- .lines_per_frame = 0x045C,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 384000,
- },
-};
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx134.h b/drivers/staging/media/atomisp/i2c/imx/imx134.h
deleted file mode 100644
index cf35197ed77f..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx134.h
+++ /dev/null
@@ -1,2464 +0,0 @@
-#ifndef __IMX134_H__
-#define __IMX134_H__
-
-/********************** imx134 setting - version 1 *********************/
-static struct imx_reg const imx134_init_settings[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* Basic settings */
- { IMX_8BIT, 0x0105, 0x01 },
- { IMX_8BIT, 0x0220, 0x01 },
- { IMX_8BIT, 0x3302, 0x11 },
- { IMX_8BIT, 0x3833, 0x20 },
- { IMX_8BIT, 0x3893, 0x00 },
- { IMX_8BIT, 0x3906, 0x08 },
- { IMX_8BIT, 0x3907, 0x01 },
- { IMX_8BIT, 0x391B, 0x01 },
- { IMX_8BIT, 0x3C09, 0x01 },
- { IMX_8BIT, 0x600A, 0x00 },
-
- /* Analog settings */
- { IMX_8BIT, 0x3008, 0xB0 },
- { IMX_8BIT, 0x320A, 0x01 },
- { IMX_8BIT, 0x320D, 0x10 },
- { IMX_8BIT, 0x3216, 0x2E },
- { IMX_8BIT, 0x322C, 0x02 },
- { IMX_8BIT, 0x3409, 0x0C },
- { IMX_8BIT, 0x340C, 0x2D },
- { IMX_8BIT, 0x3411, 0x39 },
- { IMX_8BIT, 0x3414, 0x1E },
- { IMX_8BIT, 0x3427, 0x04 },
- { IMX_8BIT, 0x3480, 0x1E },
- { IMX_8BIT, 0x3484, 0x1E },
- { IMX_8BIT, 0x3488, 0x1E },
- { IMX_8BIT, 0x348C, 0x1E },
- { IMX_8BIT, 0x3490, 0x1E },
- { IMX_8BIT, 0x3494, 0x1E },
- { IMX_8BIT, 0x3511, 0x8F },
- { IMX_8BIT, 0x3617, 0x2D },
-
- GROUPED_PARAMETER_HOLD_DISABLE,
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 3280x2464 8M 30fps, vendor provide */
-static struct imx_reg const imx134_8M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* clock setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 },
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x0C }, /* x_output_size[15:8]: 3280*/
- { IMX_8BIT, 0x034D, 0xD0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x09 }, /* y_output_size[15:8]:2464 */
- { IMX_8BIT, 0x034F, 0xA0 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C },
- { IMX_8BIT, 0x0355, 0xD0 },
- { IMX_8BIT, 0x0356, 0x09 },
- { IMX_8BIT, 0x0357, 0xA0 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x0C },
- { IMX_8BIT, 0x3311, 0xD0 },
- { IMX_8BIT, 0x3312, 0x09 },
- { IMX_8BIT, 0x3313, 0xA0 },
- { IMX_8BIT, 0x331C, 0x01 },
- { IMX_8BIT, 0x331D, 0xAE },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global timing setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration time setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/2 binning 30fps 1640x1232, vendor provide */
-static struct imx_reg const imx134_1640_1232_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1640 */
- { IMX_8BIT, 0x034D, 0x68 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1232 */
- { IMX_8BIT, 0x034F, 0xD0 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x68 },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0xD0 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x68 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0xD0 },
-
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x06 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning 30fps 820x616, vendor provide */
-static struct imx_reg const imx134_820_616_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3279 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]:820 */
- { IMX_8BIT, 0x034D, 0x34 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x68 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x03 },
- { IMX_8BIT, 0x0355, 0x34 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x68 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0x34 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x68 },
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning 30fps 820x552 */
-static struct imx_reg const imx134_820_552_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0345, 0x00 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:128 */
- { IMX_8BIT, 0x0347, 0x80 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3280-1 */
- { IMX_8BIT, 0x0349, 0xCF }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2208+128-1 */
- { IMX_8BIT, 0x034B, 0x1F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]: */
- { IMX_8BIT, 0x034D, 0x34 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x28 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x03 },
- { IMX_8BIT, 0x0355, 0x34 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x28 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0x34 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x28 },
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning 30fps 720x592 */
-static struct imx_reg const imx134_720_592_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:200 */
- { IMX_8BIT, 0x0345, 0xC8 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:40 */
- { IMX_8BIT, 0x0347, 0x28 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:2880+200-1 */
- { IMX_8BIT, 0x0349, 0x07 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2368+40-1 */
- { IMX_8BIT, 0x034B, 0x67 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x02 }, /* x_output_size[15:8]: */
- { IMX_8BIT, 0x034D, 0xD0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x50 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x02 },
- { IMX_8BIT, 0x0355, 0xD0 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x50 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x02 },
- { IMX_8BIT, 0x3311, 0xD0 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x50 },
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-static struct imx_reg const imx134_752_616_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x44 }, /* 4x4 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* no resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:136 */
- { IMX_8BIT, 0x0345, 0x88 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3145+134-1 */
- { IMX_8BIT, 0x0349, 0x47 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0x9F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x02 }, /* x_output_size[15:8]: 752*/
- { IMX_8BIT, 0x034D, 0xF0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:616 */
- { IMX_8BIT, 0x034F, 0x68 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
-
- { IMX_8BIT, 0x0354, 0x02 },
- { IMX_8BIT, 0x0355, 0xF0 },
- { IMX_8BIT, 0x0356, 0x02 },
- { IMX_8BIT, 0x0357, 0x68 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x02 },
- { IMX_8BIT, 0x3311, 0xF0 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x68 },
-
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 1424x1168 */
-static struct imx_reg const imx134_1424_1168_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* binning */
- { IMX_8BIT, 0x0391, 0x11 }, /* no binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x22 }, /* 34/16=2.125 */
- { IMX_8BIT, 0x4082, 0x00 }, /* ?? */
- { IMX_8BIT, 0x4083, 0x00 }, /* ?? */
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:136 */
- { IMX_8BIT, 0x0345, 0x80 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:0 */
- { IMX_8BIT, 0x0347, 0x00 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3145+134-1 */
- { IMX_8BIT, 0x0349, 0x51 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2463 */
- { IMX_8BIT, 0x034B, 0xB1 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x05 }, /* x_output_size[15:8]: 1424*/
- { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1168 */
- { IMX_8BIT, 0x034F, 0x90 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
-
- { IMX_8BIT, 0x0354, 0x0B },
- { IMX_8BIT, 0x0355, 0xD2 },
- { IMX_8BIT, 0x0356, 0x09 },
- { IMX_8BIT, 0x0357, 0xB2 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x05 },
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x90 },
-
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
- { IMX_8BIT, 0x4084, 0x05 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x90 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/4 binning, 16/35 down scaling, 30fps, dvs */
-static struct imx_reg const imx134_240_196_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /*4x4 binning */
- { IMX_8BIT, 0x0391, 0x44 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x23 }, /* down scaling = 16/35 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x02 }, /* x_addr_start[15:8]:590 */
- { IMX_8BIT, 0x0345, 0x4E }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:366 */
- { IMX_8BIT, 0x0347, 0x6E }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:2104+590-1 */
- { IMX_8BIT, 0x0349, 0x85 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:1720+366-1 */
- { IMX_8BIT, 0x034B, 0x25 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x00 }, /* x_output_size[15:8]: 240*/
- { IMX_8BIT, 0x034D, 0xF0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x00 }, /* y_output_size[15:8]:196 */
- { IMX_8BIT, 0x034F, 0xC4 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x02 }, /* crop_x: 526 */
- { IMX_8BIT, 0x0355, 0x0E },
- { IMX_8BIT, 0x0356, 0x01 }, /* crop_y: 430 */
- { IMX_8BIT, 0x0357, 0xAE },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x00 },
- { IMX_8BIT, 0x3311, 0xF0 },
- { IMX_8BIT, 0x3312, 0x00 },
- { IMX_8BIT, 0x3313, 0xC4 },
-
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x4C },
-
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0xF0 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0xC4 },
-
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x0A },
- { IMX_8BIT, 0x0203, 0x88 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane, 1/2 binning, 16/38 downscaling, 30fps, dvs */
-static struct imx_reg const imx134_448_366_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* 2x2 binning */
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x26 }, /* down scaling = 16/38 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x02 }, /* x_addr_start[15:8]:590 */
- { IMX_8BIT, 0x0345, 0x4E }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:366 */
- { IMX_8BIT, 0x0347, 0x6E }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:2128+590-1 */
- { IMX_8BIT, 0x0349, 0x9D }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:1740+366-1 */
- { IMX_8BIT, 0x034B, 0x39 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x01 }, /* x_output_size[15:8]: 448*/
- { IMX_8BIT, 0x034D, 0xC0 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x01 }, /* y_output_size[15:8]:366 */
- { IMX_8BIT, 0x034F, 0x6E }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x04 }, /* crop_x: 1064 */
- { IMX_8BIT, 0x0355, 0x28 },
- { IMX_8BIT, 0x0356, 0x03 }, /* crop_y: 870 */
- { IMX_8BIT, 0x0357, 0x66 },
-
- { IMX_8BIT, 0x301D, 0x30 },
-
- { IMX_8BIT, 0x3310, 0x01 },
- { IMX_8BIT, 0x3311, 0xC0 },
- { IMX_8BIT, 0x3312, 0x01 },
- { IMX_8BIT, 0x3313, 0x6E },
-
- { IMX_8BIT, 0x331C, 0x02 },
- { IMX_8BIT, 0x331D, 0xD0 },
-
- { IMX_8BIT, 0x4084, 0x01 },
- { IMX_8BIT, 0x4085, 0xC0 },
- { IMX_8BIT, 0x4086, 0x01 },
- { IMX_8BIT, 0x4087, 0x6E },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 2336x1312, 30fps, for 1080p dvs, vendor provide */
-static struct imx_reg const imx134_2336_1312_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x16 }, /* down scaling = 16/22 = 8/11 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:34 */
- { IMX_8BIT, 0x0345, 0x22 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:332 */
- { IMX_8BIT, 0x0347, 0x4C }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3245 */
- { IMX_8BIT, 0x0349, 0xAD }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2135 */
- { IMX_8BIT, 0x034B, 0x57 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x09 }, /* x_output_size[15:8]:2336 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x05 }, /* y_output_size[15:8]:1312 */
- { IMX_8BIT, 0x034F, 0x20 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C },
- { IMX_8BIT, 0x0355, 0x8C },
- { IMX_8BIT, 0x0356, 0x07 },
- { IMX_8BIT, 0x0357, 0x0C },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x20 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xEB },
- { IMX_8BIT, 0x4084, 0x09 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x05 },
- { IMX_8BIT, 0x4087, 0x20 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1920x1080, 30fps, for 720p still capture */
-static struct imx_reg const imx134_1936_1096_30fps_v1[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
- { IMX_8BIT, 0x0391, 0x11 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1A }, /* downscaling 16/26*/
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:64 */
- { IMX_8BIT, 0x0345, 0x40 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:340 */
- { IMX_8BIT, 0x0347, 0x54 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3209 */
- { IMX_8BIT, 0x0349, 0x89 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2121 */
- { IMX_8BIT, 0x034B, 0x49 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x07 }, /* x_output_size[15:8]:1936 */
- { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1096 */
- { IMX_8BIT, 0x034F, 0x48 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C }, /* crop x:3146 */
- { IMX_8BIT, 0x0355, 0x4A },
- { IMX_8BIT, 0x0356, 0x06 }, /* xrop y:1782 */
- { IMX_8BIT, 0x0357, 0xF6 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 },
- { IMX_8BIT, 0x3311, 0x80 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x38 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x1E },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x80 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x38 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1920x1080, 30fps, for 720p still capture, vendor provide */
-static struct imx_reg const imx134_1936_1096_30fps_v2[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* disable binning */
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1B }, /* downscaling 16/27*/
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* Optionnal Function setting */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:64 */
- { IMX_8BIT, 0x0345, 0x06 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:340 */
- { IMX_8BIT, 0x0347, 0x34 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3209 */
- { IMX_8BIT, 0x0349, 0xC9 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2121 */
- { IMX_8BIT, 0x034B, 0x6F }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x07 }, /* x_output_size[15:8]:1936 */
- { IMX_8BIT, 0x034D, 0x90 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x04 }, /* y_output_size[15:8]:1096 */
- { IMX_8BIT, 0x034F, 0x48 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0C }, /* crop x:3146 */
- { IMX_8BIT, 0x0355, 0xC4 },
- { IMX_8BIT, 0x0356, 0x07 }, /* xrop y:1782 */
- { IMX_8BIT, 0x0357, 0x3A },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 }, /* decide by mode and output size */
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x48 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x1E },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x48 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Setting */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1296x736, 30fps, for 720p still capture, vendor provide */
-static struct imx_reg const imx134_1296_736_30fps_v2[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning */
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x14 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:40 */
- { IMX_8BIT, 0x0345, 0x14 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:332 */
- { IMX_8BIT, 0x0347, 0x38 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3239 */
- { IMX_8BIT, 0x0349, 0xBB }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2131 */
- { IMX_8BIT, 0x034B, 0x67 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x05 }, /* x_output_size[15:8]:1280 */
- { IMX_8BIT, 0x034D, 0x10 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:720 */
- { IMX_8BIT, 0x034F, 0xE0 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x54 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x98 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x05 },
- { IMX_8BIT, 0x3311, 0x10 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0xE0 },
- { IMX_8BIT, 0x331C, 0x01 },
- { IMX_8BIT, 0x331D, 0x10 },
- { IMX_8BIT, 0x4084, 0x05 },
- { IMX_8BIT, 0x4085, 0x10 },
- { IMX_8BIT, 0x4086, 0x02 },
- { IMX_8BIT, 0x4087, 0xE0 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-/* 4 lane 1280x720, 30fps, for 720p dvs, vendor provide */
-static struct imx_reg const imx134_1568_880_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xA9 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
- { IMX_8BIT, 0x034B, 0x43 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
- { IMX_8BIT, 0x034F, 0x70 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x70 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xF2 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xAF },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-static struct imx_reg const imx134_1568_876_60fps_0625[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0x8F },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
- { IMX_8BIT, 0x034B, 0x3B }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
- { IMX_8BIT, 0x034F, 0x6C }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x6C },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x6C },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xF2 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x6F },
- { IMX_8BIT, 0x0831, 0x27 },
- { IMX_8BIT, 0x0832, 0x4F },
- { IMX_8BIT, 0x0833, 0x2F },
- { IMX_8BIT, 0x0834, 0x2F },
- { IMX_8BIT, 0x0835, 0x2F },
- { IMX_8BIT, 0x0836, 0x9F },
- { IMX_8BIT, 0x0837, 0x37 },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-
-/* 4 lane for 720p dvs, vendor provide */
-static struct imx_reg const imx134_1568_880[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xC8 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0x48 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x01 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x64 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0x87 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x08 }, /* y_addr_end[15:8]:2115 */
- { IMX_8BIT, 0x034B, 0x43 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x06 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x03 }, /* y_output_size[15:8]:880 */
- { IMX_8BIT, 0x034F, 0x70 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x03 },
- { IMX_8BIT, 0x0357, 0x70 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xF2 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x5F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x37 },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xBF },
- { IMX_8BIT, 0x0837, 0x3F },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x09 },
- { IMX_8BIT, 0x0203, 0xD2 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-/* 4 lane for 480p dvs, default 60fps, vendor provide */
-static struct imx_reg const imx134_880_592[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xC8 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x22 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1D }, /* downscaling ratio = 16/29 */
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* x_addr_start[15:8]:44 */
- { IMX_8BIT, 0x0345, 0x2C }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x00 }, /* y_addr_start[15:8]:160 */
- { IMX_8BIT, 0x0347, 0xA0 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0C }, /* x_addr_end[15:8]:3235 */
- { IMX_8BIT, 0x0349, 0xA3 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x09 }, /* y_addr_end[15:8]:2307 */
- { IMX_8BIT, 0x034B, 0x03 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x03 }, /* x_output_size[15:8]:880 */
- { IMX_8BIT, 0x034D, 0x70 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x02 }, /* y_output_size[15:8]:592 */
- { IMX_8BIT, 0x034F, 0x50 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x06 },
- { IMX_8BIT, 0x0355, 0x3C },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x32 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0x70 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x50 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x4C },
- { IMX_8BIT, 0x4084, 0x03 },
- { IMX_8BIT, 0x4085, 0x70 },
- { IMX_8BIT, 0x4086, 0x02 },
- { IMX_8BIT, 0x4087, 0x50 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x5F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x37 },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xBF },
- { IMX_8BIT, 0x0837, 0x3F },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x05 },
- { IMX_8BIT, 0x0203, 0x42 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-static struct imx_reg const imx134_2336_1308_60fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- /* mode set clear */
- { IMX_8BIT, 0x3A43, 0x01 },
- /* Clock Setting */
- { IMX_8BIT, 0x011E, 0x13 },
- { IMX_8BIT, 0x011F, 0x33 },
- { IMX_8BIT, 0x0301, 0x05 },
- { IMX_8BIT, 0x0303, 0x01 },
- { IMX_8BIT, 0x0305, 0x0C },
- { IMX_8BIT, 0x0309, 0x05 },
- { IMX_8BIT, 0x030B, 0x01 },
- { IMX_8BIT, 0x030C, 0x01 },
- { IMX_8BIT, 0x030D, 0xC8 },
- { IMX_8BIT, 0x030E, 0x01 },
- { IMX_8BIT, 0x3A06, 0x11 },
-
- /* Mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 }, /* binning*/
- { IMX_8BIT, 0x0391, 0x11 }, /* 2x2 binning */
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 }, /* H/V resize */
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 }, /* down scaling 16/16 = 1 */
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
-
- /* OptionnalFunction settig */
- { IMX_8BIT, 0x0700, 0x00 },
- { IMX_8BIT, 0x3A63, 0x00 },
- { IMX_8BIT, 0x4100, 0xF8 },
- { IMX_8BIT, 0x4203, 0xFF },
- { IMX_8BIT, 0x4344, 0x00 },
- { IMX_8BIT, 0x441C, 0x01 },
-
- /* Size setting */
- { IMX_8BIT, 0x0344, 0x01 }, /* x_addr_start[15:8]:72 */
- { IMX_8BIT, 0x0345, 0xD8 }, /* x_addr_start[7:0] */
- { IMX_8BIT, 0x0346, 0x02 }, /* y_addr_start[15:8]:356 */
- { IMX_8BIT, 0x0347, 0x44 }, /* y_addr_start[7:0] */
- { IMX_8BIT, 0x0348, 0x0A }, /* x_addr_end[15:8]:3207 */
- { IMX_8BIT, 0x0349, 0xF7 }, /* x_addr_end[7:0] */
- { IMX_8BIT, 0x034A, 0x07 }, /* y_addr_end[15:8]:2107 */
- { IMX_8BIT, 0x034B, 0x5F+4 }, /* y_addr_end[7:0] */
- { IMX_8BIT, 0x034C, 0x09 }, /* x_output_size[15:8]:1568 */
- { IMX_8BIT, 0x034D, 0x20 }, /* x_output_size[7:0] */
- { IMX_8BIT, 0x034E, 0x05 }, /* y_output_size[15:8]:876 */
- { IMX_8BIT, 0x034F, 0x1C+4 }, /* y_output_size[7:0] */
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x09 },
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x05 },
- { IMX_8BIT, 0x0357, 0x1C+4 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x1C+4 },
- { IMX_8BIT, 0x331C, 0x03 },
- { IMX_8BIT, 0x331D, 0xE8 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
-
- /* Global Timing Setting */
- { IMX_8BIT, 0x0830, 0x77 },
- { IMX_8BIT, 0x0831, 0x2F },
- { IMX_8BIT, 0x0832, 0x5F },
- { IMX_8BIT, 0x0833, 0x37 },
- { IMX_8BIT, 0x0834, 0x37 },
- { IMX_8BIT, 0x0835, 0x37 },
- { IMX_8BIT, 0x0836, 0xBF },
- { IMX_8BIT, 0x0837, 0x3F },
- { IMX_8BIT, 0x0839, 0x1F },
- { IMX_8BIT, 0x083A, 0x17 },
- { IMX_8BIT, 0x083B, 0x02 },
-
- /* Integration Time Settin */
- { IMX_8BIT, 0x0202, 0x05 },
- { IMX_8BIT, 0x0203, 0x42 },
-
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00 },
- { IMX_8BIT, 0x0231, 0x00 },
- { IMX_8BIT, 0x0233, 0x00 },
- { IMX_8BIT, 0x0234, 0x00 },
- { IMX_8BIT, 0x0235, 0x40 },
- { IMX_8BIT, 0x0238, 0x00 },
- { IMX_8BIT, 0x0239, 0x04 },
- { IMX_8BIT, 0x023B, 0x00 },
- { IMX_8BIT, 0x023C, 0x01 },
- { IMX_8BIT, 0x33B0, 0x04 },
- { IMX_8BIT, 0x33B1, 0x00 },
- { IMX_8BIT, 0x33B3, 0x00 },
- { IMX_8BIT, 0x33B4, 0x01 },
- { IMX_8BIT, 0x3800, 0x00 },
- { IMX_TOK_TERM, 0, 0 }
-};
-
-struct imx_resolution imx134_res_preview[] = {
- {
- .desc = "imx134_CIF_30fps",
- .regs = imx134_720_592_30fps,
- .width = 720,
- .height = 592,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_820_552_30fps_preview",
- .regs = imx134_820_552_30fps,
- .width = 820,
- .height = 552,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_820_616_preview_30fps",
- .regs = imx134_820_616_30fps,
- .width = 820,
- .height = 616,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_preview_30fps",
- .regs = imx134_1936_1096_30fps_v2,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- .desc = "imx134_1640_1232_preview_30fps",
- .regs = imx134_1640_1232_30fps,
- .width = 1640,
- .height = 1232,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_8M_preview_30fps",
- .regs = imx134_8M_30fps,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
-};
-
-struct imx_resolution imx134_res_still[] = {
- {
- .desc = "imx134_CIF_30fps",
- .regs = imx134_1424_1168_30fps,
- .width = 1424,
- .height = 1168,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_VGA_still_30fps",
- .regs = imx134_1640_1232_30fps,
- .width = 1640,
- .height = 1232,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_still_30fps",
- .regs = imx134_1936_1096_30fps_v2,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- .desc = "imx134_1640_1232_still_30fps",
- .regs = imx134_1640_1232_30fps,
- .width = 1640,
- .height = 1232,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_8M_still_30fps",
- .regs = imx134_8M_30fps,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- {
- /* WORKAROUND for FW performance limitation */
- .fps = 8,
- .pixels_per_line = 6400,
- .lines_per_frame = 5312,
- },
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
-};
-
-struct imx_resolution imx134_res_video[] = {
- {
- .desc = "imx134_QCIF_DVS_30fps",
- .regs = imx134_240_196_30fps,
- .width = 240,
- .height = 196,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_CIF_DVS_30fps",
- .regs = imx134_448_366_30fps,
- .width = 448,
- .height = 366,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_VGA_30fps",
- .regs = imx134_820_616_30fps,
- .width = 820,
- .height = 616,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- },
- {
- .desc = "imx134_480p",
- .regs = imx134_880_592,
- .width = 880,
- .height = 592,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2700,
- },
- {
- .fps = 60,
- .pixels_per_line = 3600,
- .lines_per_frame = 1350,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_1568_880",
- .regs = imx134_1568_880,
- .width = 1568,
- .height = 880,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2700,
- },
- {
- .fps = 60,
- .pixels_per_line = 3600,
- .lines_per_frame = 1350,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_dvs_30fps",
- .regs = imx134_2336_1312_30fps,
- .width = 2336,
- .height = 1312,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- .desc = "imx134_1080p_dvs_60fps",
- .regs = imx134_2336_1308_60fps,
- .width = 2336,
- .height = 1312,
- .fps_options = {
- {
- .fps = 60,
- .pixels_per_line = 3600,
- .lines_per_frame = 1350,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
- {
- /*This setting only be used for SDV mode*/
- .desc = "imx134_8M_sdv_30fps",
- .regs = imx134_8M_30fps,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 3600,
- .lines_per_frame = 2518,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- },
-};
-
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx135.h b/drivers/staging/media/atomisp/i2c/imx/imx135.h
deleted file mode 100644
index 58b43af909f2..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx135.h
+++ /dev/null
@@ -1,3374 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX135_H__
-#define __IMX135_H__
-
-#include "common.h"
-
-#define IMX_SC_CMMN_CHIP_ID_H 0x0016
-#define IMX_SC_CMMN_CHIP_ID_L 0x0017
-
-/*
- * focal length bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_FOCAL_LENGTH_DEFAULT 0x1710064
-
-/*
- * current f-number bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define IMX_F_NUMBER_DEFAULT 0x16000a
-
-/*
- * f-number range bits definition:
- * bits 31-24: max f-number numerator
- * bits 23-16: max f-number denominator
- * bits 15-8: min f-number numerator
- * bits 7-0: min f-number denominator
- */
-#define IMX_F_NUMBER_RANGE 0x160a160a
-
-#define GROUPED_PARAMETER_HOLD_ENABLE {IMX_8BIT, 0x0104, 0x1}
-#define GROUPED_PARAMETER_HOLD_DISABLE {IMX_8BIT, 0x0104, 0x0}
-
-#define IMX135_EMBEDDED_DATA_LINE_NUM 2
-#define IMX135_OUTPUT_DATA_FORMAT_REG 0x0112
-#define IMX135_OUTPUT_FORMAT_RAW10 0x0a0a
-/*
- * We use three different MIPI rates for our modes based on the resolution and
- * FPS requirements. So we have three PLL configurationa and these are based
- * on the EMC friendly MIPI values.
- *
- * Maximum clock: Pix clock @ 360.96MHz MIPI @ 451.2MHz 902.4mbps
- * Reduced clock: Pix clock @ 273.00MHz MIPI @ 342.0MHz 684.0mbps
- * Binning modes: Pix clock @ 335.36MHz MIPI @ 209.6MHz 419.2mbps
- * Global Timing registers are based on the data rates and these are part of
- * the below clock definitions.
- */
-/* MIPI 499.2MHz 998.4mbps PIXCLK: 399.36MHz */
-#define PLL_SETTINGS_FOR_MIPI_499_2MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x0c}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x01}, \
- {IMX_8BIT, 0x030c, 0x02}, \
- {IMX_8BIT, 0x030d, 0x70}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x11}, \
- {IMX_8BIT, 0x0830, 0x7f}, \
- {IMX_8BIT, 0x0831, 0x37}, \
- {IMX_8BIT, 0x0832, 0x67}, \
- {IMX_8BIT, 0x0833, 0x3f}, \
- {IMX_8BIT, 0x0834, 0x3f}, \
- {IMX_8BIT, 0x0835, 0x47}, \
- {IMX_8BIT, 0x0836, 0xdf}, \
- {IMX_8BIT, 0x0837, 0x47}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* MIPI 451.2MHz 902.4mbps PIXCLK: 360.96MHz */
-#define PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x0c}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x01}, \
- {IMX_8BIT, 0x030c, 0x02}, \
- {IMX_8BIT, 0x030d, 0x34}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x11}, \
- {IMX_8BIT, 0x0830, 0x7f}, \
- {IMX_8BIT, 0x0831, 0x37}, \
- {IMX_8BIT, 0x0832, 0x67}, \
- {IMX_8BIT, 0x0833, 0x3f}, \
- {IMX_8BIT, 0x0834, 0x3f}, \
- {IMX_8BIT, 0x0835, 0x47}, \
- {IMX_8BIT, 0x0836, 0xdf}, \
- {IMX_8BIT, 0x0837, 0x47}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* MIPI 209.6MHz, 419.2mbps PIXCLK: 335.36 MHz */
-#define PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x06}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x02}, \
- {IMX_8BIT, 0x030c, 0x01}, \
- {IMX_8BIT, 0x030d, 0x06}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x12}, \
- {IMX_8BIT, 0x0830, 0x5f}, \
- {IMX_8BIT, 0x0831, 0x1f}, \
- {IMX_8BIT, 0x0832, 0x3f}, \
- {IMX_8BIT, 0x0833, 0x1f}, \
- {IMX_8BIT, 0x0834, 0x1f}, \
- {IMX_8BIT, 0x0835, 0x17}, \
- {IMX_8BIT, 0x0836, 0x67}, \
- {IMX_8BIT, 0x0837, 0x27}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* MIPI 342MHz 684mbps PIXCLK: 273.6MHz */
-#define PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY \
- {IMX_8BIT, 0x011e, 0x13}, \
- {IMX_8BIT, 0x011f, 0x33}, \
- {IMX_8BIT, 0x0301, 0x05}, \
- {IMX_8BIT, 0x0303, 0x01}, \
- {IMX_8BIT, 0x0305, 0x08}, \
- {IMX_8BIT, 0x0309, 0x05}, \
- {IMX_8BIT, 0x030b, 0x01}, \
- {IMX_8BIT, 0x030c, 0x01}, \
- {IMX_8BIT, 0x030d, 0x1d}, \
- {IMX_8BIT, 0x030e, 0x01}, \
- {IMX_8BIT, 0x3a06, 0x11}, \
- {IMX_8BIT, 0x0830, 0x77}, \
- {IMX_8BIT, 0x0831, 0x2f}, \
- {IMX_8BIT, 0x0832, 0x4f}, \
- {IMX_8BIT, 0x0833, 0x37}, \
- {IMX_8BIT, 0x0834, 0x2f}, \
- {IMX_8BIT, 0x0835, 0x37}, \
- {IMX_8BIT, 0x0836, 0xa7}, \
- {IMX_8BIT, 0x0837, 0x37}, \
- {IMX_8BIT, 0x0839, 0x1f}, \
- {IMX_8BIT, 0x083a, 0x17}, \
- {IMX_8BIT, 0x083b, 0x02}
-
-/* Basic settings: Applied only once after the sensor power up */
-static struct imx_reg const imx135_init_settings[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- { IMX_8BIT, 0x0220, 0x01},
- { IMX_8BIT, 0x3008, 0xB0},
- { IMX_8BIT, 0x320A, 0x01},
- { IMX_8BIT, 0x320D, 0x10},
- { IMX_8BIT, 0x3216, 0x2E},
- { IMX_8BIT, 0x3230, 0x0A},
- { IMX_8BIT, 0x3228, 0x05},
- { IMX_8BIT, 0x3229, 0x02},
- { IMX_8BIT, 0x322C, 0x02},
- { IMX_8BIT, 0x3302, 0x10},
- { IMX_8BIT, 0x3390, 0x45},
- { IMX_8BIT, 0x3409, 0x0C},
- { IMX_8BIT, 0x340B, 0xF5},
- { IMX_8BIT, 0x340C, 0x2D},
- { IMX_8BIT, 0x3412, 0x41},
- { IMX_8BIT, 0x3413, 0xAD},
- { IMX_8BIT, 0x3414, 0x1E},
- { IMX_8BIT, 0x3427, 0x04},
- { IMX_8BIT, 0x3480, 0x1E},
- { IMX_8BIT, 0x3484, 0x1E},
- { IMX_8BIT, 0x3488, 0x1E},
- { IMX_8BIT, 0x348C, 0x1E},
- { IMX_8BIT, 0x3490, 0x1E},
- { IMX_8BIT, 0x3494, 0x1E},
- { IMX_8BIT, 0x349C, 0x38},
- { IMX_8BIT, 0x34A3, 0x38},
- { IMX_8BIT, 0x3511, 0x8F},
- { IMX_8BIT, 0x3518, 0x00},
- { IMX_8BIT, 0x3519, 0x94},
- { IMX_8BIT, 0x3833, 0x20},
- { IMX_8BIT, 0x3893, 0x01},
- { IMX_8BIT, 0x38C2, 0x08},
- { IMX_8BIT, 0x38C3, 0x08},
- { IMX_8BIT, 0x3C09, 0x01},
- { IMX_8BIT, 0x4000, 0x0E},
- { IMX_8BIT, 0x4300, 0x00},
- { IMX_8BIT, 0x4316, 0x12},
- { IMX_8BIT, 0x4317, 0x22},
- { IMX_8BIT, 0x4318, 0x00},
- { IMX_8BIT, 0x4319, 0x00},
- { IMX_8BIT, 0x431A, 0x00},
- { IMX_8BIT, 0x4324, 0x03},
- { IMX_8BIT, 0x4325, 0x20},
- { IMX_8BIT, 0x4326, 0x03},
- { IMX_8BIT, 0x4327, 0x84},
- { IMX_8BIT, 0x4328, 0x03},
- { IMX_8BIT, 0x4329, 0x20},
- { IMX_8BIT, 0x432A, 0x03},
- { IMX_8BIT, 0x432B, 0x84},
- { IMX_8BIT, 0x432C, 0x01},
- { IMX_8BIT, 0x4401, 0x3F},
- { IMX_8BIT, 0x4402, 0xFF},
- { IMX_8BIT, 0x4412, 0x3F},
- { IMX_8BIT, 0x4413, 0xFF},
- { IMX_8BIT, 0x441D, 0x28},
- { IMX_8BIT, 0x4444, 0x00},
- { IMX_8BIT, 0x4445, 0x00},
- { IMX_8BIT, 0x4446, 0x3F},
- { IMX_8BIT, 0x4447, 0xFF},
- { IMX_8BIT, 0x4452, 0x00},
- { IMX_8BIT, 0x4453, 0xA0},
- { IMX_8BIT, 0x4454, 0x08},
- { IMX_8BIT, 0x4455, 0x00},
- { IMX_8BIT, 0x4458, 0x18},
- { IMX_8BIT, 0x4459, 0x18},
- { IMX_8BIT, 0x445A, 0x3F},
- { IMX_8BIT, 0x445B, 0x3A},
- { IMX_8BIT, 0x4462, 0x00},
- { IMX_8BIT, 0x4463, 0x00},
- { IMX_8BIT, 0x4464, 0x00},
- { IMX_8BIT, 0x4465, 0x00},
- { IMX_8BIT, 0x446E, 0x01},
- { IMX_8BIT, 0x4500, 0x1F},
- { IMX_8BIT, 0x600a, 0x00},
- { IMX_8BIT, 0x380a, 0x00},
- { IMX_8BIT, 0x380b, 0x00},
- { IMX_8BIT, 0x4103, 0x00},
- { IMX_8BIT, 0x4243, 0x9a},
- { IMX_8BIT, 0x4330, 0x01},
- { IMX_8BIT, 0x4331, 0x90},
- { IMX_8BIT, 0x4332, 0x02},
- { IMX_8BIT, 0x4333, 0x58},
- { IMX_8BIT, 0x4334, 0x03},
- { IMX_8BIT, 0x4335, 0x20},
- { IMX_8BIT, 0x4336, 0x03},
- { IMX_8BIT, 0x4337, 0x84},
- { IMX_8BIT, 0x433C, 0x01},
- { IMX_8BIT, 0x4340, 0x02},
- { IMX_8BIT, 0x4341, 0x58},
- { IMX_8BIT, 0x4342, 0x03},
- { IMX_8BIT, 0x4343, 0x52},
- { IMX_8BIT, 0x4364, 0x0b},
- { IMX_8BIT, 0x4368, 0x00},
- { IMX_8BIT, 0x4369, 0x0f},
- { IMX_8BIT, 0x436a, 0x03},
- { IMX_8BIT, 0x436b, 0xa8},
- { IMX_8BIT, 0x436c, 0x00},
- { IMX_8BIT, 0x436d, 0x00},
- { IMX_8BIT, 0x436e, 0x00},
- { IMX_8BIT, 0x436f, 0x06},
- { IMX_8BIT, 0x4281, 0x21},
- { IMX_8BIT, 0x4282, 0x18},
- { IMX_8BIT, 0x4283, 0x04},
- { IMX_8BIT, 0x4284, 0x08},
- { IMX_8BIT, 0x4287, 0x7f},
- { IMX_8BIT, 0x4288, 0x08},
- { IMX_8BIT, 0x428c, 0x08},
- { IMX_8BIT, 0x4297, 0x00},
- { IMX_8BIT, 0x4299, 0x7E},
- { IMX_8BIT, 0x42A4, 0xFB},
- { IMX_8BIT, 0x42A5, 0x7E},
- { IMX_8BIT, 0x42A6, 0xDF},
- { IMX_8BIT, 0x42A7, 0xB7},
- { IMX_8BIT, 0x42AF, 0x03},
- { IMX_8BIT, 0x4207, 0x03},
- { IMX_8BIT, 0x4218, 0x00},
- { IMX_8BIT, 0x421B, 0x20},
- { IMX_8BIT, 0x421F, 0x04},
- { IMX_8BIT, 0x4222, 0x02},
- { IMX_8BIT, 0x4223, 0x22},
- { IMX_8BIT, 0x422E, 0x54},
- { IMX_8BIT, 0x422F, 0xFB},
- { IMX_8BIT, 0x4230, 0xFF},
- { IMX_8BIT, 0x4231, 0xFE},
- { IMX_8BIT, 0x4232, 0xFF},
- { IMX_8BIT, 0x4235, 0x58},
- { IMX_8BIT, 0x4236, 0xF7},
- { IMX_8BIT, 0x4237, 0xFD},
- { IMX_8BIT, 0x4239, 0x4E},
- { IMX_8BIT, 0x423A, 0xFC},
- { IMX_8BIT, 0x423B, 0xFD},
- { IMX_8BIT, 0x4300, 0x00},
- { IMX_8BIT, 0x4316, 0x12},
- { IMX_8BIT, 0x4317, 0x22},
- { IMX_8BIT, 0x4318, 0x00},
- { IMX_8BIT, 0x4319, 0x00},
- { IMX_8BIT, 0x431A, 0x00},
- { IMX_8BIT, 0x4324, 0x03},
- { IMX_8BIT, 0x4325, 0x20},
- { IMX_8BIT, 0x4326, 0x03},
- { IMX_8BIT, 0x4327, 0x84},
- { IMX_8BIT, 0x4328, 0x03},
- { IMX_8BIT, 0x4329, 0x20},
- { IMX_8BIT, 0x432A, 0x03},
- { IMX_8BIT, 0x432B, 0x20},
- { IMX_8BIT, 0x432C, 0x01},
- { IMX_8BIT, 0x432D, 0x01},
- { IMX_8BIT, 0x4338, 0x02},
- { IMX_8BIT, 0x4339, 0x00},
- { IMX_8BIT, 0x433A, 0x00},
- { IMX_8BIT, 0x433B, 0x02},
- { IMX_8BIT, 0x435A, 0x03},
- { IMX_8BIT, 0x435B, 0x84},
- { IMX_8BIT, 0x435E, 0x01},
- { IMX_8BIT, 0x435F, 0xFF},
- { IMX_8BIT, 0x4360, 0x01},
- { IMX_8BIT, 0x4361, 0xF4},
- { IMX_8BIT, 0x4362, 0x03},
- { IMX_8BIT, 0x4363, 0x84},
- { IMX_8BIT, 0x437B, 0x01},
- { IMX_8BIT, 0x4400, 0x00}, /* STATS off ISP do not support STATS*/
- { IMX_8BIT, 0x4401, 0x3F},
- { IMX_8BIT, 0x4402, 0xFF},
- { IMX_8BIT, 0x4404, 0x13},
- { IMX_8BIT, 0x4405, 0x26},
- { IMX_8BIT, 0x4406, 0x07},
- { IMX_8BIT, 0x4408, 0x20},
- { IMX_8BIT, 0x4409, 0xE5},
- { IMX_8BIT, 0x440A, 0xFB},
- { IMX_8BIT, 0x440C, 0xF6},
- { IMX_8BIT, 0x440D, 0xEA},
- { IMX_8BIT, 0x440E, 0x20},
- { IMX_8BIT, 0x4410, 0x00},
- { IMX_8BIT, 0x4411, 0x00},
- { IMX_8BIT, 0x4412, 0x3F},
- { IMX_8BIT, 0x4413, 0xFF},
- { IMX_8BIT, 0x4414, 0x1F},
- { IMX_8BIT, 0x4415, 0xFF},
- { IMX_8BIT, 0x4416, 0x20},
- { IMX_8BIT, 0x4417, 0x00},
- { IMX_8BIT, 0x4418, 0x1F},
- { IMX_8BIT, 0x4419, 0xFF},
- { IMX_8BIT, 0x441A, 0x20},
- { IMX_8BIT, 0x441B, 0x00},
- { IMX_8BIT, 0x441D, 0x40},
- { IMX_8BIT, 0x441E, 0x1E},
- { IMX_8BIT, 0x441F, 0x38},
- { IMX_8BIT, 0x4420, 0x01},
- { IMX_8BIT, 0x4444, 0x00},
- { IMX_8BIT, 0x4445, 0x00},
- { IMX_8BIT, 0x4446, 0x1D},
- { IMX_8BIT, 0x4447, 0xF9},
- { IMX_8BIT, 0x4452, 0x00},
- { IMX_8BIT, 0x4453, 0xA0},
- { IMX_8BIT, 0x4454, 0x08},
- { IMX_8BIT, 0x4455, 0x00},
- { IMX_8BIT, 0x4456, 0x0F},
- { IMX_8BIT, 0x4457, 0xFF},
- { IMX_8BIT, 0x4458, 0x18},
- { IMX_8BIT, 0x4459, 0x18},
- { IMX_8BIT, 0x445A, 0x3F},
- { IMX_8BIT, 0x445B, 0x3A},
- { IMX_8BIT, 0x445C, 0x00},
- { IMX_8BIT, 0x445D, 0x28},
- { IMX_8BIT, 0x445E, 0x01},
- { IMX_8BIT, 0x445F, 0x90},
- { IMX_8BIT, 0x4460, 0x00},
- { IMX_8BIT, 0x4461, 0x60},
- { IMX_8BIT, 0x4462, 0x00},
- { IMX_8BIT, 0x4463, 0x00},
- { IMX_8BIT, 0x4464, 0x00},
- { IMX_8BIT, 0x4465, 0x00},
- { IMX_8BIT, 0x446C, 0x00},
- { IMX_8BIT, 0x446D, 0x00},
- { IMX_8BIT, 0x446E, 0x00},
- { IMX_8BIT, 0x452A, 0x02},
- { IMX_8BIT, 0x0712, 0x01},
- { IMX_8BIT, 0x0713, 0x00},
- { IMX_8BIT, 0x0714, 0x01},
- { IMX_8BIT, 0x0715, 0x00},
- { IMX_8BIT, 0x0716, 0x01},
- { IMX_8BIT, 0x0717, 0x00},
- { IMX_8BIT, 0x0718, 0x01},
- { IMX_8BIT, 0x0719, 0x00},
- { IMX_8BIT, 0x4500, 0x1F },
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- { IMX_8BIT, 0x0205, 0x00},
- { IMX_8BIT, 0x020E, 0x01},
- { IMX_8BIT, 0x020F, 0x00},
- { IMX_8BIT, 0x0210, 0x02},
- { IMX_8BIT, 0x0211, 0x00},
- { IMX_8BIT, 0x0212, 0x02},
- { IMX_8BIT, 0x0213, 0x00},
- { IMX_8BIT, 0x0214, 0x01},
- { IMX_8BIT, 0x0215, 0x00},
- /* HDR Setting */
- { IMX_8BIT, 0x0230, 0x00},
- { IMX_8BIT, 0x0231, 0x00},
- { IMX_8BIT, 0x0233, 0x00},
- { IMX_8BIT, 0x0234, 0x00},
- { IMX_8BIT, 0x0235, 0x40},
- { IMX_8BIT, 0x0238, 0x00},
- { IMX_8BIT, 0x0239, 0x04},
- { IMX_8BIT, 0x023B, 0x00},
- { IMX_8BIT, 0x023C, 0x01},
- { IMX_8BIT, 0x33B0, 0x04},
- { IMX_8BIT, 0x33B1, 0x00},
- { IMX_8BIT, 0x33B3, 0x00},
- { IMX_8BIT, 0x33B4, 0x01},
- { IMX_8BIT, 0x3800, 0x00},
- GROUPED_PARAMETER_HOLD_DISABLE,
- { IMX_TOK_TERM, 0, 0}
-};
-
-/********* Preview, continuous capture and still modes *****************/
-
-static struct imx_reg const imx135_13m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size Setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 0, 0, 4207,3119 4208x3120 */
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x00},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6F},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x2F},
- {IMX_8BIT, 0x034C, 0x10},
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x0C},
- {IMX_8BIT, 0x034F, 0x30},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* 4208x3120 */
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x30},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x0C},
- {IMX_8BIT, 0x3313, 0x30},
- {IMX_8BIT, 0x331C, 0x00},
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* If scaling, Fill this */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/* 13MP reduced pixel clock MIPI 342MHz is EMC friendly*/
-static struct imx_reg const imx135_13m_for_mipi_342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size Setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x00},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6F},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x2F},
- {IMX_8BIT, 0x034C, 0x10},
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x0C},
- {IMX_8BIT, 0x034F, 0x30},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10},
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x30},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x0C},
- {IMX_8BIT, 0x3313, 0x30},
- {IMX_8BIT, 0x331C, 0x00},
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* If scaling, Fill this */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_10m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 0, 376, 4207, 2743 */
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x78},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6f},
- {IMX_8BIT, 0x034A, 0x0a},
- {IMX_8BIT, 0x034B, 0xb7},
- {IMX_8BIT, 0x034C, 0x10}, /* 4208x2368 */
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0x40},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10},
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x40},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0x40},
- {IMX_8BIT, 0x331C, 0x01},
- {IMX_8BIT, 0x331D, 0x68},
- {IMX_8BIT, 0x4084, 0x00},
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_10m_for_mipi_342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 0, 376, 4207, 2743 */
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x78},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x6f},
- {IMX_8BIT, 0x034A, 0x0a},
- {IMX_8BIT, 0x034B, 0xb7},
- {IMX_8BIT, 0x034C, 0x10}, /* 4208x2368 */
- {IMX_8BIT, 0x034D, 0x70},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0x40},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10},
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x40},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x10},
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0x40},
- {IMX_8BIT, 0x331C, 0x01},
- {IMX_8BIT, 0x331D, 0x68},
- {IMX_8BIT, 0x4084, 0x00},
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 8.5 DS from (3:2)8m cropped setting.
- *
- * The 8m(3:2) cropped setting is 2992x2448 effective res.
- * The ISP effect cropped setting should be 1408x1152 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 368x304
- * cropped region is 3128x2584
- */
-static struct imx_reg const imx135_368x304_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11}, /* no binning */
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* resize */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x88}, /* 136/16=8.5 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x02}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0x1C}, /* 540 */
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x0C}, /* 268 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0x53}, /* 3667 */
- {IMX_8BIT, 0x034A, 0x0B}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0x23}, /* 2851 */
- {IMX_8BIT, 0x034C, 0x01}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x70}, /* 368 */
- {IMX_8BIT, 0x034E, 0x01}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x30}, /* 304 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0C}, /* Cut out siz same as the size after crop */
- {IMX_8BIT, 0x0355, 0x38},
- {IMX_8BIT, 0x0356, 0x0A},
- {IMX_8BIT, 0x0357, 0x18},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x01}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x01},
- {IMX_8BIT, 0x3313, 0x30},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0xD0},
- {IMX_8BIT, 0x4084, 0x01}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x70},
- {IMX_8BIT, 0x4086, 0x01},
- {IMX_8BIT, 0x4087, 0x30},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 1/4 binning from 8m cropped setting.
- *
- * The 8m cropped setting is 3264x2448 effective res.
- * The xga cropped setting should be 816x612 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 832x628
- * cropped region is 3328x2512
- */
-static struct imx_reg const imx135_xga_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x44},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
-/* {IMX_8BIT, 0x4203, 0xFF}, */
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0xB8}, /* 440 */
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x30}, /* 304 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0xB7}, /* 4207-440=3767 */
- {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xFF}, /* 3119-304=2815 */
- {IMX_8BIT, 0x034C, 0x03}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x40}, /* 832 */
- {IMX_8BIT, 0x034E, 0x02}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x74}, /* 628 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x03}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x40},
- {IMX_8BIT, 0x0356, 0x02},
- {IMX_8BIT, 0x0357, 0x74},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x03}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x40},
- {IMX_8BIT, 0x3312, 0x02},
- {IMX_8BIT, 0x3313, 0x74},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0x21},
- {IMX_8BIT, 0x4084, 0x03}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x40},
- {IMX_8BIT, 0x4086, 0x02},
- {IMX_8BIT, 0x4087, 0x74},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 28/16 DS from (16:9)8m cropped setting.
- *
- * The 8m(16:9) cropped setting is 3360x1890 effective res.
- * - this is larger then the expected 3264x1836 FOV
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 1936x1096
- * cropped region is 3388x1918
- */
-static struct imx_reg const imx135_1936x1096_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11}, /* no binning */
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* resize */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x1C}, /* 28/16 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0x9A}, /* 410 */
- {IMX_8BIT, 0x0346, 0x02}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x58}, /* 600 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0xD5}, /* 3797 */
- {IMX_8BIT, 0x034A, 0x09}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xD5}, /* 2517 */
- {IMX_8BIT, 0x034C, 0x07}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x90}, /* 1936 */
- {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x48}, /* 1096 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0D}, /* Cut out siz same as the size after crop */
- {IMX_8BIT, 0x0355, 0x3C},
- {IMX_8BIT, 0x0356, 0x07},
- {IMX_8BIT, 0x0357, 0x7E},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x07}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x90},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0x48},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x07}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x90},
- {IMX_8BIT, 0x4086, 0x04},
- {IMX_8BIT, 0x4087, 0x48},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 2.125 DS from (3:2)8m cropped setting.
- *
- * The 8m(3:2) cropped setting is 2992x2448 effective res.
- * The ISP effect cropped setting should be 1408x1152 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 1424x1168
- * cropped region is 3026x2482
- */
-static struct imx_reg const imx135_1424x1168_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11}, /* no binning */
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* resize */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x22}, /* 34/16=2.125 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x02}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0x4E}, /* 590 */
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x3E}, /* 318 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0x1F}, /* 3615 */
- {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xEF}, /* 2799 */
- {IMX_8BIT, 0x034C, 0x05}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x90}, /* 1424 */
- {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0x90}, /* 1168 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0B}, /* Cut out siz same as the size after crop */
- {IMX_8BIT, 0x0355, 0xD2},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0xB2},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x05}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x90},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0x90},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x05}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x90},
- {IMX_8BIT, 0x4086, 0x04},
- {IMX_8BIT, 0x4087, 0x90},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * It is 1/2 binning from 8m cropped setting.
- *
- * The 8m cropped setting is 3264x2448 effective res.
- * The 2m cropped setting should be 1632x1224 effect res.
- *
- * Consider ISP 16x16 padding:
- * sensor outputs 1648x1240
- * cropped region is 3296x2480
- */
-static struct imx_reg const imx135_2m_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* X_ADD_STA */
- {IMX_8BIT, 0x0345, 0xC8}, /* 464(1D0) -> 456(1C8)*/
- {IMX_8BIT, 0x0346, 0x01}, /* Y_ADD_STA */
- {IMX_8BIT, 0x0347, 0x40}, /* 320 */
- {IMX_8BIT, 0x0348, 0x0E}, /* X_ADD_END */
- {IMX_8BIT, 0x0349, 0xA7}, /* 4207-456=3751 */
- {IMX_8BIT, 0x034A, 0x0A}, /* Y_ADD_END */
- {IMX_8BIT, 0x034B, 0xEF}, /* 3119-320=2799 */
- {IMX_8BIT, 0x034C, 0x06}, /* X_OUT_SIZE */
- {IMX_8BIT, 0x034D, 0x70}, /* 1648 */
- {IMX_8BIT, 0x034E, 0x04}, /* Y_OUT_SIZE */
- {IMX_8BIT, 0x034F, 0xD8}, /* 1240 */
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x06}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x70},
- {IMX_8BIT, 0x0356, 0x04},
- {IMX_8BIT, 0x0357, 0xD8},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x06}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0x70},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0xD8},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * 8M Cropped 16:9 setting
- *
- * Effect res: 3264x1836
- * Sensor out: 3280x1852
- */
-static struct imx_reg const imx135_6m_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xD0},
- {IMX_8BIT, 0x0346, 0x02}, /* 634 */
- {IMX_8BIT, 0x0347, 0x7A},
- {IMX_8BIT, 0x0348, 0x0E},
- {IMX_8BIT, 0x0349, 0x9F},
- {IMX_8BIT, 0x034A, 0x09}, /* 2485 */
- {IMX_8BIT, 0x034B, 0xB5},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x07}, /* 1852 */
- {IMX_8BIT, 0x034F, 0x3C},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0C}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0xD0},
- {IMX_8BIT, 0x0356, 0x07},
- {IMX_8BIT, 0x0357, 0x3C},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x07},
- {IMX_8BIT, 0x3313, 0x3C},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_8m_cropped[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xD0},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x48},
- {IMX_8BIT, 0x0348, 0x0E},
- {IMX_8BIT, 0x0349, 0x9F},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0xE7},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x09}, /* 2464 */
- {IMX_8BIT, 0x034F, 0xA0},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x0C}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0xD0},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0xA0},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0xA0},
- {IMX_8BIT, 0x331C, 0x00}, /* ?? */
- {IMX_8BIT, 0x331D, 0x10},
- {IMX_8BIT, 0x4084, 0x00}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_8m_scaled_from_12m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* Scaling */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x14},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x1B},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x2464 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0xA0},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x08},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0xA0},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C}, /* Scaling related? */
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x09},
- {IMX_8BIT, 0x4087, 0xA0},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_8m_scaled_from_12m_for_mipi342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* Scaling */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x14},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x1B},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x2464 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x09},
- {IMX_8BIT, 0x034F, 0xA0},
- {IMX_8BIT, 0x0350, 0x00}, /* No Dig crop */
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* Cut out size same as the size after crop */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x0C},
- {IMX_8BIT, 0x0357, 0x08},
- {IMX_8BIT, 0x301D, 0x30}, /* ?? */
- {IMX_8BIT, 0x3310, 0x0C}, /* Write H and V size same as output size? */
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x09},
- {IMX_8BIT, 0x3313, 0xA0},
- {IMX_8BIT, 0x331C, 0x02}, /* ?? */
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C}, /* Resize IMG Hand V size-> Scaling related?*/
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x09},
- {IMX_8BIT, 0x4087, 0xA0},
- {IMX_8BIT, 0x4400, 0x00}, /* STATS off */
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_6m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 36, 194, 1039, a9f 4100x2316 */
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x94},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0x9F},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x1852 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x07},
- {IMX_8BIT, 0x034F, 0x3C},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* 4100x2316 */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x0C},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x0C},
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x07},
- {IMX_8BIT, 0x3313, 0x3C},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C},
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x07},
- {IMX_8BIT, 0x4087, 0x3C},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_6m_for_mipi_342[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_342MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x00},
- {IMX_8BIT, 0x0391, 0x11},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x14},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00}, /* 36, 194, 1039, a9f 4100x2316 */
- {IMX_8BIT, 0x0345, 0x36},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x94},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x39},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0x9F},
- {IMX_8BIT, 0x034C, 0x0C}, /* 3280x1852 */
- {IMX_8BIT, 0x034D, 0xD0},
- {IMX_8BIT, 0x034E, 0x07},
- {IMX_8BIT, 0x034F, 0x3C},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x10}, /* 4100x2316 */
- {IMX_8BIT, 0x0355, 0x04},
- {IMX_8BIT, 0x0356, 0x09},
- {IMX_8BIT, 0x0357, 0x0C},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x0C},
- {IMX_8BIT, 0x3311, 0xD0},
- {IMX_8BIT, 0x3312, 0x07},
- {IMX_8BIT, 0x3313, 0x3C},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0xA0},
- {IMX_8BIT, 0x4084, 0x0C},
- {IMX_8BIT, 0x4085, 0xD0},
- {IMX_8BIT, 0x4086, 0x07},
- {IMX_8BIT, 0x4087, 0x3C},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/*
- * FOV is: 3280x2464, larger then 3264x2448.
- * Sensor output: 336x256
- * Cropping region: 3444x2624
- */
-static struct imx_reg const imx135_336x256[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02}, /* 2x binning */
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x52}, /* scaling: 82/16 */
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01}, /* x_start: 374 */
- {IMX_8BIT, 0x0345, 0x76},
- {IMX_8BIT, 0x0346, 0x00}, /* y_start: 248 */
- {IMX_8BIT, 0x0347, 0xF8},
- {IMX_8BIT, 0x0348, 0x0E}, /* x_end: 3817 */
- {IMX_8BIT, 0x0349, 0xE9},
- {IMX_8BIT, 0x034A, 0x0B}, /* y_end: 2871 */
- {IMX_8BIT, 0x034B, 0x37},
- {IMX_8BIT, 0x034C, 0x01}, /* x_out: 336 */
- {IMX_8BIT, 0x034D, 0x50},
- {IMX_8BIT, 0x034E, 0x01}, /* y_out: 256 */
- {IMX_8BIT, 0x034F, 0x00},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x06}, /* dig x_out: 1722 */
- {IMX_8BIT, 0x0355, 0xBA},
- {IMX_8BIT, 0x0356, 0x05}, /* dig y_out: 1312 */
- {IMX_8BIT, 0x0357, 0x20},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x01}, /* ?: x_out */
- {IMX_8BIT, 0x3311, 0x50},
- {IMX_8BIT, 0x3312, 0x01}, /* ?: y_out */
- {IMX_8BIT, 0x3313, 0x00},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0x4E},
- {IMX_8BIT, 0x4084, 0x01}, /* ?: x_out */
- {IMX_8BIT, 0x4085, 0x50},
- {IMX_8BIT, 0x4086, 0x01}, /* ?: y_out */
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_1m[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x1F},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x58},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x28},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x17},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x07},
- {IMX_8BIT, 0x034C, 0x04},
- {IMX_8BIT, 0x034D, 0x10},
- {IMX_8BIT, 0x034E, 0x03},
- {IMX_8BIT, 0x034F, 0x10},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x07},
- {IMX_8BIT, 0x0355, 0xE0},
- {IMX_8BIT, 0x0356, 0x05},
- {IMX_8BIT, 0x0357, 0xF0},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x04},
- {IMX_8BIT, 0x3311, 0x10},
- {IMX_8BIT, 0x3312, 0x03},
- {IMX_8BIT, 0x3313, 0x10},
- {IMX_8BIT, 0x331C, 0x02},
- {IMX_8BIT, 0x331D, 0x4E},
- {IMX_8BIT, 0x4084, 0x04},
- {IMX_8BIT, 0x4085, 0x10},
- {IMX_8BIT, 0x4086, 0x03},
- {IMX_8BIT, 0x4087, 0x10},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg const imx135_3m_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01}, /* Binning */
- {IMX_8BIT, 0x0391, 0x22}, /* 2x2 binning */
- {IMX_8BIT, 0x0392, 0x00}, /* average */
- {IMX_8BIT, 0x0401, 0x00},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x10},
- {IMX_8BIT, 0x4082, 0x01},
- {IMX_8BIT, 0x4083, 0x01},
- {IMX_8BIT, 0x4203, 0xFF},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x28},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x08},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x47},
- {IMX_8BIT, 0x034A, 0x0C},
- {IMX_8BIT, 0x034B, 0x27},
- {IMX_8BIT, 0x034C, 0x08},
- {IMX_8BIT, 0x034D, 0x10},
- {IMX_8BIT, 0x034E, 0x06},
- {IMX_8BIT, 0x034F, 0x10},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x08},
- {IMX_8BIT, 0x0355, 0x10},
- {IMX_8BIT, 0x0356, 0x06},
- {IMX_8BIT, 0x0357, 0x10},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x08},
- {IMX_8BIT, 0x3311, 0x10},
- {IMX_8BIT, 0x3312, 0x06},
- {IMX_8BIT, 0x3313, 0x10},
- {IMX_8BIT, 0x331C, 0x00},
- {IMX_8BIT, 0x331D, 0xAA},
- {IMX_8BIT, 0x4084, 0x00},
- {IMX_8BIT, 0x4085, 0x00},
- {IMX_8BIT, 0x4086, 0x00},
- {IMX_8BIT, 0x4087, 0x00},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-/* 1080P 1936x1104 */
-static struct imx_reg const imx135_1080p_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03},
- {IMX_8BIT, 0x0112, 0x0A},
- {IMX_8BIT, 0x0113, 0x0A},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0387, 0x01},
- {IMX_8BIT, 0x0390, 0x01},
- {IMX_8BIT, 0x0391, 0x22},
- {IMX_8BIT, 0x0392, 0x00},
- {IMX_8BIT, 0x0401, 0x02},
- {IMX_8BIT, 0x0404, 0x00},
- {IMX_8BIT, 0x0405, 0x11},
- {IMX_8BIT, 0x4082, 0x00},
- {IMX_8BIT, 0x4083, 0x00},
- {IMX_8BIT, 0x7006, 0x04},
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x2E},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x84},
- {IMX_8BIT, 0x0348, 0x10},
- {IMX_8BIT, 0x0349, 0x41},
- {IMX_8BIT, 0x034A, 0x0A},
- {IMX_8BIT, 0x034B, 0xAF},
- {IMX_8BIT, 0x034C, 0x07},
- {IMX_8BIT, 0x034D, 0x90},
- {IMX_8BIT, 0x034E, 0x04},
- {IMX_8BIT, 0x034F, 0x50},
- {IMX_8BIT, 0x0350, 0x00},
- {IMX_8BIT, 0x0351, 0x00},
- {IMX_8BIT, 0x0352, 0x00},
- {IMX_8BIT, 0x0353, 0x00},
- {IMX_8BIT, 0x0354, 0x08},
- {IMX_8BIT, 0x0355, 0x0A},
- {IMX_8BIT, 0x0356, 0x04},
- {IMX_8BIT, 0x0357, 0x96},
- {IMX_8BIT, 0x301D, 0x30},
- {IMX_8BIT, 0x3310, 0x07},
- {IMX_8BIT, 0x3311, 0x90},
- {IMX_8BIT, 0x3312, 0x04},
- {IMX_8BIT, 0x3313, 0x50},
- {IMX_8BIT, 0x331C, 0x01},
- {IMX_8BIT, 0x331D, 0x00},
- {IMX_8BIT, 0x4084, 0x07},
- {IMX_8BIT, 0x4085, 0x90},
- {IMX_8BIT, 0x4086, 0x04},
- {IMX_8BIT, 0x4087, 0x50},
- {IMX_8BIT, 0x4400, 0x00},
- {IMX_TOK_TERM, 0, 0},
-};
-
-static const struct imx_reg imx135_1080p_nodvs_fullfov_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 168,464,4039,2655: 3872x2192 */
- { IMX_8BIT, 0x0345, 0xA8 },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0xD0 },
- { IMX_8BIT, 0x0348, 0x0F },
- { IMX_8BIT, 0x0349, 0xC7 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x5F },
- { IMX_8BIT, 0x034C, 0x07 }, /*1936 x 1096 */
- { IMX_8BIT, 0x034D, 0x90 },
- { IMX_8BIT, 0x034E, 0x04 },
- { IMX_8BIT, 0x034F, 0x48 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x07 }, /*1936 x 1096 */
- { IMX_8BIT, 0x0355, 0x90 },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x48 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 },
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x48 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xB0 },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x48 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 1080P NODVS 1936x1096 */
-static const struct imx_reg imx135_1080p_nodvs_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x11 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 46,396,4161,2727: 4116x2332 */
- { IMX_8BIT, 0x0345, 0x2E },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x8C },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x41 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0xA7 },
- { IMX_8BIT, 0x034C, 0x07 }, /*1936 x 1096 */
- { IMX_8BIT, 0x034D, 0x90 },
- { IMX_8BIT, 0x034E, 0x04 },
- { IMX_8BIT, 0x034F, 0x48 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1166 */
- { IMX_8BIT, 0x0355, 0x0A },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x8E },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x07 },
- { IMX_8BIT, 0x3311, 0x90 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0x48 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xB0 },
- { IMX_8BIT, 0x4084, 0x07 },
- { IMX_8BIT, 0x4085, 0x90 },
- { IMX_8BIT, 0x4086, 0x04 },
- { IMX_8BIT, 0x4087, 0x48 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 1080P 10%DVS 2104x1184 */
-static const struct imx_reg imx135_1080p_10_dvs_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x00 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x10 },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 0,376,4207,2743: 4208x2368 */
- { IMX_8BIT, 0x0345, 0x00 },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x78 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x6F },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0xB7 },
- { IMX_8BIT, 0x034C, 0x08 }, /* 2104 x 1184 */
- { IMX_8BIT, 0x034D, 0x38 },
- { IMX_8BIT, 0x034E, 0x04 },
- { IMX_8BIT, 0x034F, 0xA0 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2104 x 1184 */
- { IMX_8BIT, 0x0355, 0x38 },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0xA0 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x08 },
- { IMX_8BIT, 0x3311, 0x38 },
- { IMX_8BIT, 0x3312, 0x04 },
- { IMX_8BIT, 0x3313, 0xA0 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xB0 },
- { IMX_8BIT, 0x4084, 0x00 },
- { IMX_8BIT, 0x4085, 0x00 },
- { IMX_8BIT, 0x4086, 0x00 },
- { IMX_8BIT, 0x4087, 0x00 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-static const struct imx_reg imx135_720pdvs_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x15 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 46,404,4161,2715: 4116x2312 */
- { IMX_8BIT, 0x0345, 0x2E },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x94 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x41 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x9B },
- { IMX_8BIT, 0x034C, 0x06 }, /*1568 x 880 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x03 },
- { IMX_8BIT, 0x034F, 0x70 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /*2058 x 1156 */
- { IMX_8BIT, 0x0355, 0x0A },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x84 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0x4C },
- { IMX_8BIT, 0x4084, 0x06 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x03 },
- { IMX_8BIT, 0x4087, 0x70 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/******************* Video Modes ******************/
-
-/* 1080P DVS 2336x1320 */
-static const struct imx_reg imx135_2336x1320_max_clock[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_451_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 },
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1C },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 60,404,4147,2715: 4088x2312 */
- { IMX_8BIT, 0x0345, 0x3C },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x94 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x33 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x9B },
- { IMX_8BIT, 0x034C, 0x09 }, /*2336 x 1320 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x05 },
- { IMX_8BIT, 0x034F, 0x28 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x0F }, /* 4088x2312 */
- { IMX_8BIT, 0x0355, 0xF8 },
- { IMX_8BIT, 0x0356, 0x09 },
- { IMX_8BIT, 0x0357, 0x08 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x28 },
- { IMX_8BIT, 0x331C, 0x04 },
- { IMX_8BIT, 0x331D, 0xE2 },
- { IMX_8BIT, 0x4084, 0x09 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x05 },
- { IMX_8BIT, 0x4087, 0x28 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 1080P DVS 2336x1320 Cropped */
-static const struct imx_reg imx135_2336x1320_cropped_mipi499[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_499_2MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x00 },
- { IMX_8BIT, 0x0391, 0x11 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x1C },
- { IMX_8BIT, 0x4082, 0x01 },
- { IMX_8BIT, 0x4083, 0x01 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x03 }, /* 936,900,3271,2219: 2336x1320 */
- { IMX_8BIT, 0x0345, 0xA8 },
- { IMX_8BIT, 0x0346, 0x03 },
- { IMX_8BIT, 0x0347, 0x84 },
- { IMX_8BIT, 0x0348, 0x0C },
- { IMX_8BIT, 0x0349, 0xC7 },
- { IMX_8BIT, 0x034A, 0x08 },
- { IMX_8BIT, 0x034B, 0xAB },
- { IMX_8BIT, 0x034C, 0x09 }, /* 2336 x 1320 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x05 },
- { IMX_8BIT, 0x034F, 0x28 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x09 }, /* 2336 x 1320 */
- { IMX_8BIT, 0x0355, 0x20 },
- { IMX_8BIT, 0x0356, 0x05 },
- { IMX_8BIT, 0x0357, 0x28 },
- { IMX_8BIT, 0x301D, 0x30 },
- { IMX_8BIT, 0x3310, 0x09 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x05 },
- { IMX_8BIT, 0x3313, 0x28 },
- { IMX_8BIT, 0x331C, 0x00 },
- { IMX_8BIT, 0x331D, 0xB4 },
- { IMX_8BIT, 0x4084, 0x09 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x05 },
- { IMX_8BIT, 0x4087, 0x28 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* 720P DVS 1568 x 880 */
-static const struct imx_reg imx135_720p_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x15 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 46,404,4161,2715: 4116x2312 */
- { IMX_8BIT, 0x0345, 0x2e },
- { IMX_8BIT, 0x0346, 0x01 },
- { IMX_8BIT, 0x0347, 0x94 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x41 },
- { IMX_8BIT, 0x034A, 0x0A },
- { IMX_8BIT, 0x034B, 0x9B },
- { IMX_8BIT, 0x034C, 0x06 }, /*1568 x 880 */
- { IMX_8BIT, 0x034D, 0x20 },
- { IMX_8BIT, 0x034E, 0x03 },
- { IMX_8BIT, 0x034F, 0x70 },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1156 */
- { IMX_8BIT, 0x0355, 0x0a },
- { IMX_8BIT, 0x0356, 0x04 },
- { IMX_8BIT, 0x0357, 0x84 },
- { IMX_8BIT, 0x301D, 0x30 }, /* TODO! */
- { IMX_8BIT, 0x3310, 0x06 },
- { IMX_8BIT, 0x3311, 0x20 },
- { IMX_8BIT, 0x3312, 0x03 },
- { IMX_8BIT, 0x3313, 0x70 },
- { IMX_8BIT, 0x331C, 0x01 }, /* TODO! */
- { IMX_8BIT, 0x331D, 0xd6 }, /* TODO! */
- { IMX_8BIT, 0x4084, 0x06 },
- { IMX_8BIT, 0x4085, 0x20 },
- { IMX_8BIT, 0x4086, 0x03 },
- { IMX_8BIT, 0x4087, 0x70 },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* wvga: H : 1640 V : 1024 */
-static const struct imx_reg imx135_wvga_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x22 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x14 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 },
- {IMX_8BIT, 0x0345, 0x36 },
- {IMX_8BIT, 0x0346, 0x01 },
- {IMX_8BIT, 0x0347, 0x18 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x39 },
- {IMX_8BIT, 0x034A, 0x0B },
- {IMX_8BIT, 0x034B, 0x17 },
- {IMX_8BIT, 0x034C, 0x06 },
- {IMX_8BIT, 0x034D, 0x68 },
- {IMX_8BIT, 0x034E, 0x04 },
- {IMX_8BIT, 0x034F, 0x00 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x08 },
- {IMX_8BIT, 0x0355, 0x02 },
- {IMX_8BIT, 0x0356, 0x05 },
- {IMX_8BIT, 0x0357, 0x00 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x06 },
- {IMX_8BIT, 0x3311, 0x68 },
- {IMX_8BIT, 0x3312, 0x04 },
- {IMX_8BIT, 0x3313, 0x00 },
- {IMX_8BIT, 0x331C, 0x01 },
- {IMX_8BIT, 0x331D, 0xBD },
- {IMX_8BIT, 0x4084, 0x06 },
- {IMX_8BIT, 0x4085, 0x68 },
- {IMX_8BIT, 0x4086, 0x04 },
- {IMX_8BIT, 0x4087, 0x00 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* 480P 1036 x 696 */
-static const struct imx_reg imx135_480p_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x00 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x10 },/* No scal */
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4144x2784*/
- {IMX_8BIT, 0x0345, 0x20 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0xA8 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x4F },
- {IMX_8BIT, 0x034A, 0x0B },
- {IMX_8BIT, 0x034B, 0x88 },
- {IMX_8BIT, 0x034C, 0x04 }, /* 1036 * 696 */
- {IMX_8BIT, 0x034D, 0x0C },
- {IMX_8BIT, 0x034E, 0x02 },
- {IMX_8BIT, 0x034F, 0xB8 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x04 }, /* 1036x696 */
- {IMX_8BIT, 0x0355, 0x0C },
- {IMX_8BIT, 0x0356, 0x02 },
- {IMX_8BIT, 0x0357, 0xB8 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x04 },
- {IMX_8BIT, 0x3311, 0x0C },
- {IMX_8BIT, 0x3312, 0x02 },
- {IMX_8BIT, 0x3313, 0xB8 },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x04 },
- {IMX_8BIT, 0x4085, 0x0C },
- {IMX_8BIT, 0x4086, 0x02 },
- {IMX_8BIT, 0x4087, 0xB8 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* 480P DVS 936 x 602 */
-static const struct imx_reg imx135_480p_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* mode setting */
- { IMX_8BIT, 0x0108, 0x03 },
- { IMX_8BIT, 0x0112, 0x0A },
- { IMX_8BIT, 0x0113, 0x0A },
- { IMX_8BIT, 0x0381, 0x01 },
- { IMX_8BIT, 0x0383, 0x01 },
- { IMX_8BIT, 0x0385, 0x01 },
- { IMX_8BIT, 0x0387, 0x01 },
- { IMX_8BIT, 0x0390, 0x01 },
- { IMX_8BIT, 0x0391, 0x22 },
- { IMX_8BIT, 0x0392, 0x00 },
- { IMX_8BIT, 0x0401, 0x02 },
- { IMX_8BIT, 0x0404, 0x00 },
- { IMX_8BIT, 0x0405, 0x23 },
- { IMX_8BIT, 0x4082, 0x00 },
- { IMX_8BIT, 0x4083, 0x00 },
- { IMX_8BIT, 0x7006, 0x04 },
- /* size setting */
- { IMX_8BIT, 0x0344, 0x00 }, /* 56,244,4151,2877: 4096x2634 */
- { IMX_8BIT, 0x0345, 0x38 },
- { IMX_8BIT, 0x0346, 0x00 },
- { IMX_8BIT, 0x0347, 0xf4 },
- { IMX_8BIT, 0x0348, 0x10 },
- { IMX_8BIT, 0x0349, 0x37 },
- { IMX_8BIT, 0x034A, 0x0b },
- { IMX_8BIT, 0x034B, 0x3d },
- { IMX_8BIT, 0x034C, 0x03 }, /* 936 x 602 */
- { IMX_8BIT, 0x034D, 0xa8 },
- { IMX_8BIT, 0x034E, 0x02 },
- { IMX_8BIT, 0x034F, 0x5a },
- { IMX_8BIT, 0x0350, 0x00 },
- { IMX_8BIT, 0x0351, 0x00 },
- { IMX_8BIT, 0x0352, 0x00 },
- { IMX_8BIT, 0x0353, 0x00 },
- { IMX_8BIT, 0x0354, 0x08 }, /* 2058x1156 */
- { IMX_8BIT, 0x0355, 0x00 },
- { IMX_8BIT, 0x0356, 0x05 },
- { IMX_8BIT, 0x0357, 0x25 },
- { IMX_8BIT, 0x301D, 0x30 }, /* TODO! */
- { IMX_8BIT, 0x3310, 0x03 },
- { IMX_8BIT, 0x3311, 0xa8 },
- { IMX_8BIT, 0x3312, 0x02 },
- { IMX_8BIT, 0x3313, 0x5a },
- { IMX_8BIT, 0x331C, 0x01 }, /* TODO! */
- { IMX_8BIT, 0x331D, 0xd6 },
- { IMX_8BIT, 0x4084, 0x03 },
- { IMX_8BIT, 0x4085, 0xa8 },
- { IMX_8BIT, 0x4086, 0x02 },
- { IMX_8BIT, 0x4087, 0x5a },
- { IMX_8BIT, 0x4400, 0x00 },
- { IMX_TOK_TERM, 0, 0}
-};
-
-/* VGA: H : 1036 V : 780 */
-static const struct imx_reg imx135_vga_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x00 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x10 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4144x3120*/
- {IMX_8BIT, 0x0345, 0x20 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x00 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x4F },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x2F },
- {IMX_8BIT, 0x034C, 0x04 }, /* 1036x780 */
- {IMX_8BIT, 0x034D, 0x0C },
- {IMX_8BIT, 0x034E, 0x03 },
- {IMX_8BIT, 0x034F, 0x0C },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x04 }, /* 1036x780 */
- {IMX_8BIT, 0x0355, 0x0C },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x0C },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x04 },
- {IMX_8BIT, 0x3311, 0x0C },
- {IMX_8BIT, 0x3312, 0x03 },
- {IMX_8BIT, 0x3313, 0x0C },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x04 },
- {IMX_8BIT, 0x4085, 0x0C },
- {IMX_8BIT, 0x4086, 0x03 },
- {IMX_8BIT, 0x4087, 0x0C },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* VGA: H : 820 V : 616 */
-static const struct imx_reg imx135_vga_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x14 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 52,20,4155, 3099 4104x3080*/
- {IMX_8BIT, 0x0345, 0x34 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x14 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x3B },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x1B },
- {IMX_8BIT, 0x034C, 0x03 }, /* 820x616 */
- {IMX_8BIT, 0x034D, 0x34 },
- {IMX_8BIT, 0x034E, 0x02 },
- {IMX_8BIT, 0x034F, 0x68 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x04 }, /* 1026x770 */
- {IMX_8BIT, 0x0355, 0x02 },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x02 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x03 },
- {IMX_8BIT, 0x3311, 0x34 },
- {IMX_8BIT, 0x3312, 0x02 },
- {IMX_8BIT, 0x3313, 0x68 },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x03 },
- {IMX_8BIT, 0x4085, 0x34 },
- {IMX_8BIT, 0x4086, 0x02 },
- {IMX_8BIT, 0x4087, 0x68 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* VGA: H : 436 V : 360 */
-static const struct imx_reg imx135_436x360_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x22 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 212,0,3995,3119 3784x3120 */
- {IMX_8BIT, 0x0345, 0xD4 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x00 },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x9B },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x2F },
-
- {IMX_8BIT, 0x034C, 0x01 }, /* 436x360 */
- {IMX_8BIT, 0x034D, 0xB4 },
- {IMX_8BIT, 0x034E, 0x01 },
- {IMX_8BIT, 0x034F, 0x68 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x12 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x0C },
-
- {IMX_8BIT, 0x0354, 0x03 }, /* 928x768 crop from 946x780*/
- {IMX_8BIT, 0x0355, 0xA0 },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x00 },
-
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x01 },
- {IMX_8BIT, 0x3311, 0xB4 },
- {IMX_8BIT, 0x3312, 0x01 },
- {IMX_8BIT, 0x3313, 0x68 },
- {IMX_8BIT, 0x331C, 0x02 },
- {IMX_8BIT, 0x331D, 0x21 },
- {IMX_8BIT, 0x4084, 0x01 },
- {IMX_8BIT, 0x4085, 0xB4 },
- {IMX_8BIT, 0x4086, 0x01 },
- {IMX_8BIT, 0x4087, 0x68 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* QVGA: H : 408 V : 308 */
-static const struct imx_reg imx135_qvga__dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x28 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 64,20,4143,3099 4080x3080 */
- {IMX_8BIT, 0x0345, 0x40 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x14 },
- {IMX_8BIT, 0x0348, 0x10 },
- {IMX_8BIT, 0x0349, 0x2F },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x1B },
- {IMX_8BIT, 0x034C, 0x01 }, /* 408x308 */
- {IMX_8BIT, 0x034D, 0x98 },
- {IMX_8BIT, 0x034E, 0x01 },
- {IMX_8BIT, 0x034F, 0x34 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x03 }, /* 1020x770 */
- {IMX_8BIT, 0x0355, 0xFC },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x02 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x01 },
- {IMX_8BIT, 0x3311, 0x98 },
- {IMX_8BIT, 0x3312, 0x01 },
- {IMX_8BIT, 0x3313, 0x34 },
- {IMX_8BIT, 0x331C, 0x01 },
- {IMX_8BIT, 0x331D, 0x68 },
- {IMX_8BIT, 0x4084, 0x01 },
- {IMX_8BIT, 0x4085, 0x98 },
- {IMX_8BIT, 0x4086, 0x01 },
- {IMX_8BIT, 0x4087, 0x34 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* CIF H : 368 V : 304 */
-static const struct imx_reg imx135_cif_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x28 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x01 }, /* 264,42,3943,3081 3680x3040 */
- {IMX_8BIT, 0x0345, 0x08 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x2a },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x67 },
- {IMX_8BIT, 0x034A, 0x0c },
- {IMX_8BIT, 0x034B, 0x09 },
- {IMX_8BIT, 0x034C, 0x01 }, /* 368x304 */
- {IMX_8BIT, 0x034D, 0x70 },
- {IMX_8BIT, 0x034E, 0x01 },
- {IMX_8BIT, 0x034F, 0x30 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x03 }, /* 920x760 */
- {IMX_8BIT, 0x0355, 0x98 },
- {IMX_8BIT, 0x0356, 0x02 },
- {IMX_8BIT, 0x0357, 0xf8 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x01 },
- {IMX_8BIT, 0x3311, 0x70 },
- {IMX_8BIT, 0x3312, 0x01 },
- {IMX_8BIT, 0x3313, 0x30 },
- {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c? */
- {IMX_8BIT, 0x331D, 0x1C },
- {IMX_8BIT, 0x4084, 0x01 },
- {IMX_8BIT, 0x4085, 0x70 },
- {IMX_8BIT, 0x4086, 0x01 },
- {IMX_8BIT, 0x4087, 0x30 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* CIF H : 1888 V : 1548 */
-static const struct imx_reg imx135_cif_binning_1888x1548[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x22 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x00 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x10 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 264,42, 3776x3096 */
- {IMX_8BIT, 0x0345, 0xD8 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x0C },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x97 },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x23 },
- {IMX_8BIT, 0x034C, 0x07 }, /* 1888x1548 */
- {IMX_8BIT, 0x034D, 0x60 },
- {IMX_8BIT, 0x034E, 0x06 },
- {IMX_8BIT, 0x034F, 0x0C },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x07 }, /* 1888x1548 */
- {IMX_8BIT, 0x0355, 0x60 },
- {IMX_8BIT, 0x0356, 0x06 },
- {IMX_8BIT, 0x0357, 0x0C },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x07 },
- {IMX_8BIT, 0x3311, 0x60 },
- {IMX_8BIT, 0x3312, 0x06 },
- {IMX_8BIT, 0x3313, 0x0C },
- {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c? */
- {IMX_8BIT, 0x331D, 0x1C },
- {IMX_8BIT, 0x4084, 0x07 },
- {IMX_8BIT, 0x4085, 0x60 },
- {IMX_8BIT, 0x4086, 0x06 },
- {IMX_8BIT, 0x4087, 0x0C },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* QCIF H : 216 V : 176 */
-static const struct imx_reg imx135_qcif_dvs_binning[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- PLL_SETTINGS_FOR_MIPI_209_6MHZ_SALTBAY,
- /* Mode setting */
- {IMX_8BIT, 0x0108, 0x03 },
- {IMX_8BIT, 0x0112, 0x0A },
- {IMX_8BIT, 0x0113, 0x0A },
- {IMX_8BIT, 0x0381, 0x01 },
- {IMX_8BIT, 0x0383, 0x01 },
- {IMX_8BIT, 0x0385, 0x01 },
- {IMX_8BIT, 0x0387, 0x01 },
- {IMX_8BIT, 0x0390, 0x01 },
- {IMX_8BIT, 0x0391, 0x44 },
- {IMX_8BIT, 0x0392, 0x00 },
- {IMX_8BIT, 0x0401, 0x02 },
- {IMX_8BIT, 0x0404, 0x00 },
- {IMX_8BIT, 0x0405, 0x46 },
- {IMX_8BIT, 0x4082, 0x00 },
- {IMX_8BIT, 0x4083, 0x00 },
- {IMX_8BIT, 0x7006, 0x04 },
- /* Size setting */
- {IMX_8BIT, 0x0344, 0x00 }, /* 212,20,3995,3099 3784x3080 */
- {IMX_8BIT, 0x0345, 0xD4 },
- {IMX_8BIT, 0x0346, 0x00 },
- {IMX_8BIT, 0x0347, 0x14 },
- {IMX_8BIT, 0x0348, 0x0F },
- {IMX_8BIT, 0x0349, 0x9B },
- {IMX_8BIT, 0x034A, 0x0C },
- {IMX_8BIT, 0x034B, 0x1B },
- {IMX_8BIT, 0x034C, 0x00 }, /* 216x176 */
- {IMX_8BIT, 0x034D, 0xD8 },
- {IMX_8BIT, 0x034E, 0x00 },
- {IMX_8BIT, 0x034F, 0xB0 },
- {IMX_8BIT, 0x0350, 0x00 },
- {IMX_8BIT, 0x0351, 0x00 },
- {IMX_8BIT, 0x0352, 0x00 },
- {IMX_8BIT, 0x0353, 0x00 },
- {IMX_8BIT, 0x0354, 0x03 }, /* 946x770 */
- {IMX_8BIT, 0x0355, 0xB2 },
- {IMX_8BIT, 0x0356, 0x03 },
- {IMX_8BIT, 0x0357, 0x02 },
- {IMX_8BIT, 0x301D, 0x30 },
- {IMX_8BIT, 0x3310, 0x00 },
- {IMX_8BIT, 0x3311, 0xD8 },
- {IMX_8BIT, 0x3312, 0x00 },
- {IMX_8BIT, 0x3313, 0xB0 },
- {IMX_8BIT, 0x331C, 0x02 }, /* TODO! binning 4x4 must be 021c */
- {IMX_8BIT, 0x331D, 0x1C },
- {IMX_8BIT, 0x4084, 0x00 },
- {IMX_8BIT, 0x4085, 0xD8 },
- {IMX_8BIT, 0x4086, 0x00 },
- {IMX_8BIT, 0x4087, 0xB0 },
- {IMX_8BIT, 0x4400, 0x00 },
- {IMX_TOK_TERM, 0, 0}
-};
-
-/*
- * ISP Scaling is now supported in offine capture use cases. Because of that
- * we need only few modes to cover the different aspect ratios from the
- * sensor and the ISP will scale it based on the requested resolution from HAL.
- *
- * There is a performance impact when continuous view finder option is chose
- * for resolutions above 8MP. So 8MP and 6MP resolution are kept, so that lower
- * than these take 8MP or 6MP espectively for down scaling based on the
- * aspect ratio.
- */
-struct imx_resolution imx135_res_preview_mofd[] = {
- {
- .desc = "imx135_cif_binning_preview",
- .regs = imx135_cif_binning,
- .width = 368,
- .height = 304,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 9114,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_vga_binning_preview",
- .regs = imx135_vga_binning,
- .width = 1036,
- .height = 780,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_480p_preview",
- .regs = imx135_480p_binning,
- .width = 1036,
- .height = 696,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1080p_binning_preview",
- .regs = imx135_1080p_binning,
- .width = 1936,
- .height = 1104,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_3m__cont_cap",
- .regs = imx135_3m_binning,
- .width = 2064,
- .height = 1552,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_6m_cont_cap",
- .regs = imx135_6m,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Binning Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_8m_scaled_from_12m__cont_cap",
- .regs = imx135_8m_scaled_from_12m,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 24,
- .pixels_per_line = 4572,
- .lines_per_frame = 3280,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_10m__cont_cap",
- .regs = imx135_10m,
- .width = 4208,
- .height = 2368,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2632,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_13m__cont_cap",
- .regs = imx135_13m,
- .width = 4208,
- .height = 3120,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 24,
- .pixels_per_line = 4572,
- .lines_per_frame = 3290,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-struct imx_resolution imx135_res_preview[] = {
- {
- .desc = "imx135_xga_cropped_video",
- .regs = imx135_xga_cropped,
- .width = 832,
- .height = 628,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
-
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_2m_cropped_video",
- .regs = imx135_2m_cropped,
- .width = 1648,
- .height = 1240,
- .fps_options = {
- { /* Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
-
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1936x1096_cropped",
- .regs = imx135_1936x1096_cropped,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- { /* Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
-
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_8m_cropped_video",
- .regs = imx135_8m_cropped,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-/*
- * ISP Scaling is now supported in online capture use cases. Because of that
- * we need only few modes to cover the different aspect ratios from the
- * sensor and the ISP will scale it based on the requested resolution from HAL.
- *
- * There is a performance impact when continuous view finder option is chose
- * for resolutions above 8MP. So 8MP and 6MP resolution are kept, so that lower
- * than these take 8MP or 6MP espectively for down scaling based on the
- * aspect ratio.
- */
-struct imx_resolution imx135_res_still_mofd[] = {
- {
- .desc = "imx135_cif_binning_still",
- .regs = imx135_cif_binning_1888x1548,
- .width = 1888,
- .height = 1548,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_vga_binning_preview",
- .regs = imx135_vga_binning,
- .width = 1036,
- .height = 780,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_480p_preview",
- .regs = imx135_480p_binning,
- .width = 1036,
- .height = 696,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1080p_binning_still",
- .regs = imx135_1080p_binning,
- .width = 1936,
- .height = 1104,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 15,
- .pixels_per_line = 9114,
- .lines_per_frame = 2453,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_3m__still",
- .regs = imx135_3m_binning,
- .width = 2064,
- .height = 1552,
- .fps_options = {
- { /* Binning Pixel clock: 335.36MHz */
- .fps = 15,
- .pixels_per_line = 9114,
- .lines_per_frame = 2453,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_6m_for_mipi_342_still",
- .regs = imx135_6m_for_mipi_342,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 11,
- .pixels_per_line = 9114,
- .lines_per_frame = 2664,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
- {
- .desc = "imx135_8m_scaled_from_12m_for_mipi342_still",
- .regs = imx135_8m_scaled_from_12m_for_mipi342,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 8,
- .pixels_per_line = 7672,
- .lines_per_frame = 4458,
- },
- { /* Pixel clock: 273.6MHz */
- .fps = 15,
- .pixels_per_line = 5500,
- .lines_per_frame = 3314,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
- {
- .desc = "imx135_10m_for_mipi_342_still",
- .regs = imx135_10m_for_mipi_342,
- .width = 4208,
- .height = 2368,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 11,
- .pixels_per_line = 9144,
- .lines_per_frame = 2664,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
- {
- .desc = "imx135_13m_still",
- .regs = imx135_13m_for_mipi_342,
- .width = 4208,
- .height = 3120,
- .fps_options = {
- { /* Pixel clock: 273.6MHz */
- .fps = 5,
- .pixels_per_line = 9144,
- .lines_per_frame = 5990,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 342000,
- },
-};
-
-struct imx_resolution imx135_res_still[] = {
- {
- .desc = "imx135_qvga",
- .regs = imx135_336x256,
- .width = 336,
- .height = 256,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_cif",
- .regs = imx135_368x304_cropped,
- .width = 368,
- .height = 304,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_xga_cropped_video",
- .regs = imx135_xga_cropped,
- .width = 832,
- .height = 628,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_2M_for_11:9",
- .regs = imx135_1424x1168_cropped,
- .width = 1424,
- .height = 1168,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_2m_cropped_video",
- .regs = imx135_2m_cropped,
- .width = 1648,
- .height = 1240,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 15,
- .pixels_per_line = 6466,
- .lines_per_frame = 3710,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_6m_cropped_video",
- .regs = imx135_6m_cropped,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 8,
- .pixels_per_line = 8850,
- .lines_per_frame = 5080,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_8m_cropped_video",
- .regs = imx135_8m_cropped,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 8,
- .pixels_per_line = 8850,
- .lines_per_frame = 5080,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-/*
- * ISP scaling is not supported in case of video modes. So we need to have
- * separate sensor mode for video use cases
- */
-struct imx_resolution imx135_res_video[] = {
- /* For binning modes pix clock is 335.36 MHz. */
- {
- .desc = "imx135_qcif_dvs_binning_video",
- .regs = imx135_qcif_dvs_binning,
- .width = 216,
- .height = 176,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_cif_binning_video",
- .regs = imx135_cif_binning,
- .width = 368,
- .height = 304,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_qvga__dvs_binning_video",
- .regs = imx135_qvga__dvs_binning,
- .width = 408,
- .height = 308,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_436x360_binning_video",
- .regs = imx135_436x360_binning,
- .width = 436,
- .height = 360,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_vga_dvs_binning_video",
- .regs = imx135_vga_dvs_binning,
- .width = 820,
- .height = 616,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 9144,
- .lines_per_frame = 1226,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_480p_dvs_binning_video",
- .regs = imx135_480p_dvs_binning,
- .width = 936,
- .height = 602,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_720P_dvs_video",
- .regs = imx135_720pdvs_max_clock,
- .width = 1568,
- .height = 880,
- .fps_options = {
- {/* Pixel Clock : 360.96 MHz */
- .fps = 30,
- .pixels_per_line = 5850,
- .lines_per_frame = 2000,
- },
- {/* Pixel Clock : 360.96 MHz */
- .fps = 60,
- .pixels_per_line = 4572,
- .lines_per_frame = 1310,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_wvga_dvs_binning_video",
- .regs = imx135_wvga_dvs_binning,
- .width = 1640,
- .height = 1024,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 5464,
- .lines_per_frame = 2046,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 209600,
- },
- {
- .desc = "imx135_1936_1096_fullfov_max_clock",
- .regs = imx135_1080p_nodvs_max_clock,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {/* Pixel Clock : 360.96 MHz */
- .fps = 30,
- .pixels_per_line = 5850,
- .lines_per_frame = 2000,
- },
- {/* Pixel Clock : 360.96 MHz */
- .fps = 60,
- .pixels_per_line = 4572,
- .lines_per_frame = 1310,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_1080P_dvs_video",
- .regs = imx135_2336x1320_max_clock,
- .width = 2336,
- .height = 1320,
- .fps_options = {
- {/* Pixel Clock : 360.96 MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2632,
- .regs = imx135_2336x1320_max_clock,
- .mipi_freq = 451200,
- },
- {/* Pixel Clock : 399.36MHz */
- .fps = 60,
- .pixels_per_line = 4754,
- .lines_per_frame = 1400,
- .regs = imx135_2336x1320_cropped_mipi499,
- .mipi_freq = 499200,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_6m_cont_cap",
- .regs = imx135_6m,
- .width = 3280,
- .height = 1852,
- .fps_options = {
- { /* Binning Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
- {
- .desc = "imx135_8m_cropped_video",
- .regs = imx135_8m_cropped,
- .width = 3280,
- .height = 2464,
- .fps_options = {
- { /* Pixel clock: 360.96MHz */
- .fps = 30,
- .pixels_per_line = 4572,
- .lines_per_frame = 2624,
- },
- {
- }
- },
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .mipi_freq = 451200,
- },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx175.h b/drivers/staging/media/atomisp/i2c/imx/imx175.h
deleted file mode 100644
index 5f409ccedc85..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx175.h
+++ /dev/null
@@ -1,1959 +0,0 @@
-#ifndef __IMX175_H__
-#define __IMX175_H__
-#include "common.h"
-
-/************************** settings for imx *************************/
-static struct imx_reg const imx_STILL_8M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x09}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_8M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x09}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_3M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x08}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x06}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x10}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x19}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_3M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x08}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x06}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x10}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x19}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-static struct imx_reg const imx_STILL_5M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0A}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x90}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x14}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_5M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0A}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x90}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x14}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_6M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x32}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6D}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x3C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_6M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEF}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x32}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6D}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x0C}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD0}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x07}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x3C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_2M_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x8C}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xC4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x66}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_2M_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x0A}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x8C}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x2c},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0B}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xB8}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x16}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x44}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x77},
- {IMX_8BIT, 0x3371, 0x2F},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x2F},
- {IMX_8BIT, 0x3375, 0x37},
- {IMX_8BIT, 0x3376, 0x9F},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x09},
- {IMX_8BIT, 0x33D7, 0xA0},
-
- {IMX_8BIT, 0x030e, 0x01},
- {IMX_8BIT, 0x41c0, 0x01},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_PREVIEW_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x03},
- {IMX_8BIT, 0x33D5, 0x34},
- {IMX_8BIT, 0x33D6, 0x02},
- {IMX_8BIT, 0x33D7, 0x68},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_WIDE_PREVIEW_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x0D}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x70}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x10}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x00}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x14}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x8C}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xBC}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x68},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0xBC},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/*****************************video************************/
-static struct imx_reg const imx_1080p_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x06}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x4C}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xA4}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0xC6}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xDB}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x42}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xEA}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x61}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x09}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x05}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x20}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_1080p_no_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x08}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xD5}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x07}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xD0}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0F}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x3C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x34}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x07}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x94}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x44}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x1B}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_1080p_no_dvs_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x08}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xD5}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x09}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xA6}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x18}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x34}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x6B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x07}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x94}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x44}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x1B}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-/*****************************video************************/
-static struct imx_reg const imx_720p_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xD7}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x3E}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xEE}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x65}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x70}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x00}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x18}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_480p_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xD4}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0xC8}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0A}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xF1}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0xDB}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x70}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x50}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x02}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x15}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_720p_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x14}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x28}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x64}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x87}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x3B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x20}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x6C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_STILL_720p_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x08}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0xCA}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x18}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x38}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x64}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x87}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x3B}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x20}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x6C}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_WVGA_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xEC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x09}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x01},
- {IMX_8BIT, 0x030D, 0x12},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x13}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x9C}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0xD0}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x08}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0xCF}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x06}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x68}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x00}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x01}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x57},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x6F},
- {IMX_8BIT, 0x3371, 0x27},
- {IMX_8BIT, 0x3372, 0x4F},
- {IMX_8BIT, 0x3373, 0x2F},
- {IMX_8BIT, 0x3374, 0x27},
- {IMX_8BIT, 0x3375, 0x2F},
- {IMX_8BIT, 0x3376, 0x97},
- {IMX_8BIT, 0x3377, 0x37},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x0C},
- {IMX_8BIT, 0x33D5, 0xD0},
- {IMX_8BIT, 0x33D6, 0x07},
- {IMX_8BIT, 0x33D7, 0x38},
- {IMX_TOK_TERM, 0, 0}
-};
-static struct imx_reg const imx_CIF_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0xDB}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x01}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x70}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x01}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x30}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_VGA_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x06}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x00}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x11}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x94}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_VGA_strong_dvs_15fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0xFC}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x04}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x07}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x9E}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x1C}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0xB6}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x0C}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0xCF}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x09}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x9F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x03}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x34}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x68}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x06},
- {IMX_8BIT, 0x33D5, 0x20},
- {IMX_8BIT, 0x33D6, 0x03},
- {IMX_8BIT, 0x33D7, 0x6C},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_QVGA_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x03}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0x38}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x02}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x68}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x09}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x97}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x07}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x37}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x01}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0x98}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x01}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0x34}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x01},
- {IMX_8BIT, 0x33D5, 0x98},
- {IMX_8BIT, 0x33D6, 0x01},
- {IMX_8BIT, 0x33D7, 0x34},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_QCIF_strong_dvs_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- /* shutter */
- {IMX_8BIT, 0x0202, 0x05}, /* coarse _integration_time[15:8] */
- {IMX_8BIT, 0x0203, 0x44}, /* coarse _integration_time[7:0] */
- /* pll */
- {IMX_8BIT, 0x0301, 0x05}, /* vt_pix_clk_div[7:0] */
- {IMX_8BIT, 0x0303, 0x01}, /* vt_sys_clk_div[7:0] */
- {IMX_8BIT, 0x0305, 0x06}, /* pre_pll_clk_div[7:0] */
- {IMX_8BIT, 0x0309, 0x05}, /* op_pix_clk_div[7:0] */
- {IMX_8BIT, 0x030B, 0x01}, /* op_sys_clk_div[7:0] */
- {IMX_8BIT, 0x030C, 0x00},
- {IMX_8BIT, 0x030D, 0x6D},
- /* image sizing */
- {IMX_8BIT, 0x0340, 0x05}, /* frame_length_lines[15:8] */
- {IMX_8BIT, 0x0341, 0x48}, /* frame_length_lines[7:0] */
- {IMX_8BIT, 0x0342, 0x0D}, /* line_length_pck[15:8] */
- {IMX_8BIT, 0x0343, 0x70}, /* line_length_pck[7:0] */
- {IMX_8BIT, 0x0344, 0x04}, /* x_addr_start[15:8] */
- {IMX_8BIT, 0x0345, 0xB8}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x03}, /* y_addr_start[15:8] */
- {IMX_8BIT, 0x0347, 0x70}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x08}, /* x_addr_end[15:8] */
- {IMX_8BIT, 0x0349, 0x17}, /* x_addr_end[7:0] */
- {IMX_8BIT, 0x034A, 0x06}, /* y_addr_end[15:8] */
- {IMX_8BIT, 0x034B, 0x2F}, /* y_addr_end[7:0] */
- {IMX_8BIT, 0x034C, 0x00}, /* x_output_size[15:8] */
- {IMX_8BIT, 0x034D, 0xD8}, /* x_output_size[7:0] */
- {IMX_8BIT, 0x034E, 0x00}, /* y_output_size[15:8] */
- {IMX_8BIT, 0x034F, 0xB0}, /* y_output_size[7:0] */
- /* binning & scaling */
- {IMX_8BIT, 0x0390, 0x02}, /* binning mode */
- {IMX_8BIT, 0x0401, 0x00}, /* scaling mode*/
- {IMX_8BIT, 0x0405, 0x10}, /* scale_m[7:0] */
- /* timer */
- {IMX_8BIT, 0x3344, 0x37},
- {IMX_8BIT, 0x3345, 0x1F},
- /* timing */
- {IMX_8BIT, 0x3370, 0x5F},
- {IMX_8BIT, 0x3371, 0x17},
- {IMX_8BIT, 0x3372, 0x37},
- {IMX_8BIT, 0x3373, 0x17},
- {IMX_8BIT, 0x3374, 0x17},
- {IMX_8BIT, 0x3375, 0x0F},
- {IMX_8BIT, 0x3376, 0x57},
- {IMX_8BIT, 0x3377, 0x27},
- {IMX_8BIT, 0x33C8, 0x01},
- {IMX_8BIT, 0x33D4, 0x00},
- {IMX_8BIT, 0x33D5, 0xD8},
- {IMX_8BIT, 0x33D6, 0x00},
- {IMX_8BIT, 0x33D7, 0xB0},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx175_init_settings[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0103, 0x01},
- /* misc control */
- {IMX_8BIT, 0x3020, 0x10},
- {IMX_8BIT, 0x302D, 0x02},
- {IMX_8BIT, 0x302F, 0x80},
- {IMX_8BIT, 0x3032, 0xA3},
- {IMX_8BIT, 0x3033, 0x20},
- {IMX_8BIT, 0x3034, 0x24},
- {IMX_8BIT, 0x3041, 0x15},
- {IMX_8BIT, 0x3042, 0x87},
- {IMX_8BIT, 0x3050, 0x35},
- {IMX_8BIT, 0x3056, 0x57},
- {IMX_8BIT, 0x305D, 0x41},
- {IMX_8BIT, 0x3097, 0x69},
- {IMX_8BIT, 0x3109, 0x41},
- {IMX_8BIT, 0x3148, 0x3F},
- {IMX_8BIT, 0x330F, 0x07},
- /* csi & inck */
- {IMX_8BIT, 0x3364, 0x00},
- {IMX_8BIT, 0x3368, 0x13},
- {IMX_8BIT, 0x3369, 0x33},
- /* znr */
- {IMX_8BIT, 0x4100, 0x0E},
- {IMX_8BIT, 0x4104, 0x32},
- {IMX_8BIT, 0x4105, 0x32},
- {IMX_8BIT, 0x4108, 0x01},
- {IMX_8BIT, 0x4109, 0x7C},
- {IMX_8BIT, 0x410A, 0x00},
- {IMX_8BIT, 0x410B, 0x00},
- GROUPED_PARAMETER_HOLD_DISABLE,
- {IMX_TOK_TERM, 0, 0}
-};
-/* TODO settings of preview/still/video will be updated with new use case */
-struct imx_resolution imx175_res_preview[] = {
- {
- .desc = "CIF_strong_dvs_30fps",
- .regs = imx_CIF_strong_dvs_30fps,
- .width = 368,
- .height = 304,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x11DB,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261500,
-
- },
- {
- .desc = "VGA_strong_dvs_30fps",
- .regs = imx_VGA_strong_dvs_30fps,
- .width = 820,
- .height = 616,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x11DB,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "WIDE_PREVIEW_30fps",
- .regs = imx_WIDE_PREVIEW_30fps,
- .width = 1640,
- .height = 956,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1000,
- .lines_per_frame = 0x0D70,
- },
- {
- }
- },
- .mipi_freq = 174500,
- },
- {
- .desc = "STILL_720p_30fps",
- .regs = imx_STILL_720p_30fps,
- .width = 1568,
- .height = 876,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1428,
- .lines_per_frame = 0x0548,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "STILL_2M_30fps",
- .regs = imx_STILL_2M_30fps,
- .width = 1640,
- .height = 1232,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "1080p_strong_dvs_30fps",
- .regs = imx_1080p_no_dvs_30fps,
- .width = 1940,
- .height = 1092,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0F3C,
- .lines_per_frame = 0x07D0,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "STILL_3M_30fps",
- .regs = imx_STILL_3M_30fps,
- .width = 2064,
- .height = 1552,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_5M_30fps",
- .regs = imx_STILL_5M_30fps,
- .width = 2576,
- .height = 1936,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_6M_30fps",
- .regs = imx_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_8M_30fps",
- .regs = imx_STILL_8M_30fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D66,
- .lines_per_frame = 0x09C4,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
-};
-
-struct imx_resolution imx175_res_still[] = {
- {
- .desc = "CIF_strong_dvs_30fps",
- .regs = imx_CIF_strong_dvs_30fps,
- .width = 368,
- .height = 304,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x11DB,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261000,
- },
- {
- .desc = "VGA_strong_dvs_15fps",
- .regs = imx_VGA_strong_dvs_15fps,
- .width = 820,
- .height = 616,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1C86,
- .lines_per_frame = 0x079E,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "imx_STILL_720p_15fps",
- .regs = imx_STILL_720p_15fps,
- .width = 1568,
- .height = 876,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1838,
- .lines_per_frame = 0x08CA,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "STILL_2M_15fps",
- .regs = imx_STILL_2M_15fps,
- .width = 1640,
- .height = 1232,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "1080p_strong_dvs_15fps",
- .regs = imx_1080p_no_dvs_15fps,
- .width = 1940,
- .height = 1092,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x189C,
- .lines_per_frame = 0x09A6,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "STILL_3M_15fps",
- .regs = imx_STILL_3M_15fps,
- .width = 2064,
- .height = 1552,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_5M_15fps",
- .regs = imx_STILL_5M_15fps,
- .width = 2576,
- .height = 1936,
- .fps = 15,
- .pixels_per_line = 0x1646, /* consistent with regs arrays */
- .lines_per_frame = 0x0BB8, /* consistent with regs arrays */
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_6M_15fps",
- .regs = imx_STILL_6M_15fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
- {
- .desc = "STILL_8M_15fps",
- .regs = imx_STILL_8M_15fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 15,
- .pixels_per_line = 0x1646,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
- .mipi_freq = 320000,
- },
-};
-
-struct imx_resolution imx175_res_video[] = {
- {
- .desc = "QCIF_strong_dvs_30fps",
- .regs = imx_QCIF_strong_dvs_30fps,
- .width = 216,
- .height = 176,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D70,
- .lines_per_frame = 0x0548,
- },
- {
- }
- },
- .mipi_freq = 174500,
- },
- {
- .desc = "QVGA_strong_dvs_30fps",
- .regs = imx_QVGA_strong_dvs_30fps,
- .width = 408,
- .height = 308,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D70,
- .lines_per_frame = 0x0548,
- },
- {
- }
- },
- .mipi_freq = 174500,
- },
- {
- .desc = "VGA_strong_dvs_30fps",
- .regs = imx_VGA_strong_dvs_30fps,
- .width = 820,
- .height = 616,
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1194,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 261500,
- },
- {
- .desc = "720p_strong_dvs_30fps",
- .regs = imx_720p_strong_dvs_30fps,
- .width = 1552,
- .height = 880,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x139C,
- .lines_per_frame = 0x0600,
- },
- {
- .fps = 60,
- .pixels_per_line = 0xD70,
- .lines_per_frame = 0x444,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "480p_strong_dvs_30fps",
- .regs = imx_480p_strong_dvs_30fps,
- .width = 880,
- .height = 592,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x139C,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "WVGA_strong_dvs_30fps",
- .regs = imx_WVGA_strong_dvs_30fps,
- .width = 1640,
- .height = 1024,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x139C,
- .lines_per_frame = 0x0600,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
- {
- .desc = "1080p_strong_dvs_30fps",
- .regs = imx_1080p_strong_dvs_30fps,
- .width = 2320,
- .height = 1312,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x11C6,
- .lines_per_frame = 0x06A4,
- },
- {
- }
- },
- .mipi_freq = 292500,
- },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx208.h b/drivers/staging/media/atomisp/i2c/imx/imx208.h
deleted file mode 100644
index fed387f42f99..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx208.h
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- * Support for Sony IMX camera sensor.
- *
- * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IMX208_H__
-#define __IMX208_H__
-#include "common.h"
-
-/********************** settings for imx from vendor*********************/
-static struct imx_reg imx208_1080p_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x00}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x00}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0x00}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x07}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x8F}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x04}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x47}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x07}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x90}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x04}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0x48}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_1296x736_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x40}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0xB4}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x06}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x4F}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x93}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x05}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x02}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0xE0}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_1296x976_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x01}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x40}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0x3C}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x06}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x4F}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x04}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x0B}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x05}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x10}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x03}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0xD0}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x01}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x01}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x00}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x00}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x61}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_336x256_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x02}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x78}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x01}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0x24}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x05}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x17}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x23}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x01}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0x50}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x01}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0x00}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x01}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x03}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x01}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x03}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x01}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x03}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x01}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x66}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-
-static struct imx_reg imx208_192x160_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0305, 0x02}, /* PREPLLCK DIV */
- {IMX_8BIT, 0x0307, 0x54}, /* PLL MPY */
- {IMX_8BIT, 0x303C, 0x3C}, /* PLL oscillation stable wait time */
- {IMX_8BIT, 0x30A4, 0x02}, /* Default */
- {IMX_8BIT, 0x0112, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0113, 0x0A}, /* CCP_data_format : RAW 10bit */
- {IMX_8BIT, 0x0340, 0x04}, /* frame length line [15:8] */
- {IMX_8BIT, 0x0341, 0xAA}, /* frame length line [7:0] */
- {IMX_8BIT, 0x0342, 0x08}, /* line length pck [15:8] */
- {IMX_8BIT, 0x0343, 0xC8}, /* line length pck [7:0] */
- {IMX_8BIT, 0x0344, 0x02}, /* x_addr_start[12:8] */
- {IMX_8BIT, 0x0345, 0x48}, /* x_addr_start[7:0] */
- {IMX_8BIT, 0x0346, 0x00}, /* y_addr_start[12:8] */
- {IMX_8BIT, 0x0347, 0xE4}, /* y_addr_start[7:0] */
- {IMX_8BIT, 0x0348, 0x05}, /* x_addr_end [12:8] */
- {IMX_8BIT, 0x0349, 0x47}, /* x_addr_end [7:0] */
- {IMX_8BIT, 0x034A, 0x03}, /* y_addr_end [12:8] */
- {IMX_8BIT, 0x034B, 0x63}, /* y_addr_end [7:0] */
- {IMX_8BIT, 0x034C, 0x00}, /* x_output_size [ 12:8] */
- {IMX_8BIT, 0x034D, 0xC0}, /* x_output_size [7:0] */
- {IMX_8BIT, 0x034E, 0x00}, /* y_output_size [11:8] */
- {IMX_8BIT, 0x034F, 0xA0}, /* y_output_size [7:0] */
- {IMX_8BIT, 0x0381, 0x03}, /* x_even_inc */
- {IMX_8BIT, 0x0383, 0x05}, /* x_odd_inc */
- {IMX_8BIT, 0x0385, 0x03}, /* y_even_inc */
- {IMX_8BIT, 0x0387, 0x05}, /* y_odd_inc */
- {IMX_8BIT, 0x3048, 0x01}, /* VMODEFDS binning operation */
- {IMX_8BIT, 0x304E, 0x0A}, /* VTPXCK_DIV */
- {IMX_8BIT, 0x3050, 0x02}, /* OPSYCK_DIV */
- {IMX_8BIT, 0x309B, 0x00}, /* RGDAFDSUMEN */
- {IMX_8BIT, 0x30D5, 0x03}, /* HADDEN ( binning ) */
- {IMX_8BIT, 0x3301, 0x11}, /* RGLANESEL */
- {IMX_8BIT, 0x3318, 0x74}, /* MIPI Global Timing */
- {IMX_8BIT, 0x0202, 0x01}, /* coarse integration time */
- {IMX_8BIT, 0x0203, 0x90}, /* coarse integration time */
- {IMX_8BIT, 0x0205, 0x00}, /* ana global gain */
-
- {IMX_TOK_TERM, 0, 0},
-};
-/********************** settings for imx - reference *********************/
-static struct imx_reg const imx208_init_settings[] = {
- { IMX_TOK_TERM, 0, 0}
-};
-
-struct imx_resolution imx208_res_preview[] = {
- {
- .desc = "imx208_1080p_30fps",
- .regs = imx208_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x976_30fps",
- .regs = imx208_1296x976_30fps,
- .width = 1296,
- .height = 976,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x736_30fps",
- .regs = imx208_1296x736_30fps,
- .width = 1296,
- .height = 736,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_336x256_30fps",
- .regs = imx208_336x256_30fps,
- .width = 336,
- .height = 256,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 201600,
- },
- {
- .desc = "imx208_192x160_30fps",
- .regs = imx208_192x160_30fps,
- .width = 192,
- .height = 160,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 100800,
- },
-};
-
-struct imx_resolution imx208_res_still[] = {
- {
- .desc = "imx208_1080p_30fps",
- .regs = imx208_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x976_30fps",
- .regs = imx208_1296x976_30fps,
- .width = 1296,
- .height = 976,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x736_30fps",
- .regs = imx208_1296x736_30fps,
- .width = 1296,
- .height = 736,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_336x256_30fps",
- .regs = imx208_336x256_30fps,
- .width = 336,
- .height = 256,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 201600,
- },
- {
- .desc = "imx208_192x160_30fps",
- .regs = imx208_192x160_30fps,
- .width = 192,
- .height = 160,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 100800,
- },
-};
-
-struct imx_resolution imx208_res_video[] = {
- {
- .desc = "imx208_1080p_30fps",
- .regs = imx208_1080p_30fps,
- .width = 1936,
- .height = 1096,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x976_30fps",
- .regs = imx208_1296x976_30fps,
- .width = 1296,
- .height = 976,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_1296x736_30fps",
- .regs = imx208_1296x736_30fps,
- .width = 1296,
- .height = 736,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 403200,
- },
- {
- .desc = "imx208_336x256_30fps",
- .regs = imx208_336x256_30fps,
- .width = 336,
- .height = 256,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 201600,
- },
- {
- .desc = "imx208_192x160_30fps",
- .regs = imx208_192x160_30fps,
- .width = 192,
- .height = 160,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x08C8,
- .lines_per_frame = 0x04AA,
- },
- {
- }
- },
- .bin_factor_x = 4,
- .bin_factor_y = 4,
- .used = 0,
- .skip_frames = 2,
- .mipi_freq = 100800,
- },
-};
-#endif
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx219.h b/drivers/staging/media/atomisp/i2c/imx/imx219.h
deleted file mode 100644
index 52df582c56d8..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx219.h
+++ /dev/null
@@ -1,227 +0,0 @@
-#ifndef __IMX219_H__
-#define __IMX219_H__
-#include "common.h"
-
-#define IMX219_FRAME_LENGTH_LINES 0x0160
-#define IMX219_LINE_LENGTH_PIXELS 0x0162
-#define IMX219_HORIZONTAL_START_H 0x0164
-#define IMX219_VERTICAL_START_H 0x0168
-#define IMX219_HORIZONTAL_END_H 0x0166
-#define IMX219_VERTICAL_END_H 0x016A
-#define IMX219_HORIZONTAL_OUTPUT_SIZE_H 0x016c
-#define IMX219_VERTICAL_OUTPUT_SIZE_H 0x016E
-#define IMX219_COARSE_INTEGRATION_TIME 0x015A
-#define IMX219_IMG_ORIENTATION 0x0172
-#define IMX219_GLOBAL_GAIN 0x0157
-#define IMX219_DGC_ADJ 0x0158
-
-#define IMX219_DGC_LEN 4
-
-/************************** settings for imx *************************/
-static struct imx_reg const imx219_STILL_8M_30fps[] = {
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x0C}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300A, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300B, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x09}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x0114, 0x03}, /*CSI_LANE_MODE[1:0}*/
- {IMX_8BIT, 0x0128, 0x00}, /*DPHY_CNTRL*/
- {IMX_8BIT, 0x012A, 0x13}, /*EXCK_FREQ[15:8]*/
- {IMX_8BIT, 0x012B, 0x34}, /*EXCK_FREQ[7:0]*/
- {IMX_8BIT, 0x0160, 0x0A}, /*FRM_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0161, 0x94}, /*FRM_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0162, 0x0D}, /*LINE_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0163, 0x78}, /*LINE_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0164, 0x00}, /*X_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0165, 0x00}, /*X_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x0166, 0x0C}, /*X_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x0167, 0xCF}, /*X_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x0168, 0x00}, /*Y_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0169, 0x00}, /*Y_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x016A, 0x09}, /*Y_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x016B, 0x9F}, /*Y_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x016C, 0x0C}, /*X_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016D, 0xD0}, /*X_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x016E, 0x09}, /*Y_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016F, 0xA0}, /*Y_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x0170, 0x01}, /*X_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0171, 0x01}, /*Y_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0174, 0x00}, /*BINNING_MODE_H_A*/
- {IMX_8BIT, 0x0175, 0x00}, /*BINNING_MODE_V_A*/
- {IMX_8BIT, 0x018C, 0x0A}, /*CSI_DATA_FORMAT_A[15:8]*/
- {IMX_8BIT, 0x018D, 0x0A}, /*CSI_DATA_FORMAT_A[7:0]*/
- {IMX_8BIT, 0x0301, 0x05}, /*VTPXCK_DIV*/
- {IMX_8BIT, 0x0303, 0x01}, /*VTSYCK_DIV*/
- {IMX_8BIT, 0x0304, 0x02}, /*PREPLLCK_VT_DIV[3:0]*/
- {IMX_8BIT, 0x0305, 0x02}, /*PREPLLCK_OP_DIV[3:0]*/
- {IMX_8BIT, 0x0306, 0x00}, /*PLL_VT_MPY[10:8]*/
- {IMX_8BIT, 0x0307, 0x49}, /*PLL_VT_MPY[7:0]*/
- {IMX_8BIT, 0x0309, 0x0A}, /*OPPXCK_DIV[4:0]*/
- {IMX_8BIT, 0x030B, 0x01}, /*OPSYCK_DIV*/
- {IMX_8BIT, 0x030C, 0x00}, /*PLL_OP_MPY[10:8]*/
- {IMX_8BIT, 0x030D, 0x4C}, /*PLL_OP_MPY[7:0]*/
- {IMX_8BIT, 0x4767, 0x0F}, /*CIS Tuning*/
- {IMX_8BIT, 0x4750, 0x14}, /*CIS Tuning*/
- {IMX_8BIT, 0x47B4, 0x14}, /*CIS Tuning*/
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx219_STILL_6M_30fps[] = {
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x0C}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300A, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x300B, 0xFF}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x05}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x30EB, 0x09}, /*Access Code for address over 0x3000*/
- {IMX_8BIT, 0x0114, 0x03}, /*CSI_LANE_MODE[1:0}*/
- {IMX_8BIT, 0x0128, 0x00}, /*DPHY_CNTRL*/
- {IMX_8BIT, 0x012A, 0x13}, /*EXCK_FREQ[15:8]*/
- {IMX_8BIT, 0x012B, 0x34}, /*EXCK_FREQ[7:0]*/
- {IMX_8BIT, 0x0160, 0x07}, /*FRM_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0161, 0x64}, /*FRM_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0162, 0x0D}, /*LINE_LENGTH_A[15:8]*/
- {IMX_8BIT, 0x0163, 0x78}, /*LINE_LENGTH_A[7:0]*/
- {IMX_8BIT, 0x0164, 0x00}, /*X_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0165, 0x00}, /*X_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x0166, 0x0C}, /*X_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x0167, 0xCF}, /*X_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x0168, 0x01}, /*Y_ADD_STA_A[11:8]*/
- {IMX_8BIT, 0x0169, 0x32}, /*Y_ADD_STA_A[7:0]*/
- {IMX_8BIT, 0x016A, 0x08}, /*Y_ADD_END_A[11:8]*/
- {IMX_8BIT, 0x016B, 0x6D}, /*Y_ADD_END_A[7:0]*/
- {IMX_8BIT, 0x016C, 0x0C}, /*X_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016D, 0xD0}, /*X_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x016E, 0x07}, /*Y_OUTPUT_SIZE_A[11:8]*/
- {IMX_8BIT, 0x016F, 0x3C}, /*Y_OUTPUT_SIZE_A[7:0]*/
- {IMX_8BIT, 0x0170, 0x01}, /*X_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0171, 0x01}, /*Y_ODD_INC_A[2:0]*/
- {IMX_8BIT, 0x0174, 0x00}, /*BINNING_MODE_H_A*/
- {IMX_8BIT, 0x0175, 0x00}, /*BINNING_MODE_V_A*/
- {IMX_8BIT, 0x018C, 0x0A}, /*CSI_DATA_FORMAT_A[15:8]*/
- {IMX_8BIT, 0x018D, 0x0A}, /*CSI_DATA_FORMAT_A[7:0]*/
- {IMX_8BIT, 0x0301, 0x05}, /*VTPXCK_DIV*/
- {IMX_8BIT, 0x0303, 0x01}, /*VTSYCK_DIV*/
- {IMX_8BIT, 0x0304, 0x02}, /*PREPLLCK_VT_DIV[3:0]*/
- {IMX_8BIT, 0x0305, 0x02}, /*PREPLLCK_OP_DIV[3:0]*/
- {IMX_8BIT, 0x0306, 0x00}, /*PLL_VT_MPY[10:8]*/
- {IMX_8BIT, 0x0307, 0x33}, /*PLL_VT_MPY[7:0]*/
- {IMX_8BIT, 0x0309, 0x0A}, /*OPPXCK_DIV[4:0]*/
- {IMX_8BIT, 0x030B, 0x01}, /*OPSYCK_DIV*/
- {IMX_8BIT, 0x030C, 0x00}, /*PLL_OP_MPY[10:8]*/
- {IMX_8BIT, 0x030D, 0x36}, /*PLL_OP_MPY[7:0]*/
- {IMX_8BIT, 0x4767, 0x0F}, /*CIS Tuning*/
- {IMX_8BIT, 0x4750, 0x14}, /*CIS Tuning*/
- {IMX_8BIT, 0x47B4, 0x14}, /*CIS Tuning*/
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx219_init_settings[] = {
- {IMX_TOK_TERM, 0, 0}
-};
-
-struct imx_resolution imx219_res_preview[] = {
- {
- .desc = "STILL_6M_30fps",
- .regs = imx219_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0764,
- },
- {
- }
- },
- .mipi_freq = 259000,
- },
- {
- .desc = "STILL_8M_30fps",
- .regs = imx219_STILL_8M_30fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0A94,
- },
- {
- }
- },
- .mipi_freq = 365000,
- },
-};
-
-struct imx_resolution imx219_res_still[] = {
- {
- .desc = "STILL_6M_30fps",
- .regs = imx219_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0764,
- },
- {
- }
- },
- .mipi_freq = 259000,
- },
- {
- .desc = "STILL_8M_30fps",
- .regs = imx219_STILL_8M_30fps,
- .width = 3280,
- .height = 2464,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0A94,
- },
- {
- }
- },
- .mipi_freq = 365000,
- },
-};
-
-struct imx_resolution imx219_res_video[] = {
- {
- .desc = "STILL_6M_30fps",
- .regs = imx219_STILL_6M_30fps,
- .width = 3280,
- .height = 1852,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0D78,
- .lines_per_frame = 0x0764,
- },
- {
- }
- },
- .mipi_freq = 259000,
- },
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx227.h b/drivers/staging/media/atomisp/i2c/imx/imx227.h
deleted file mode 100644
index 10e5b86f6687..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/imx227.h
+++ /dev/null
@@ -1,726 +0,0 @@
-#ifndef __IMX227_H__
-#define __IMX227_H__
-
-#include "common.h"
-
-#define IMX227_EMBEDDED_DATA_LINE_NUM 2
-#define IMX227_OUTPUT_DATA_FORMAT_REG 0x0112
-#define IMX227_OUTPUT_FORMAT_RAW10 0x0a0a
-
-/* AE Bracketing Registers */
-#define IMX227_BRACKETING_LUT_MODE_BIT_CONTINUE_STREAMING 0x1
-#define IMX227_BRACKETING_LUT_MODE_BIT_LOOP_MODE 0x2
-
-#define IMX227_BRACKETING_LUT_CONTROL 0x0E00
-#define IMX227_BRACKETING_LUT_MODE 0x0E01
-#define IMX227_BRACKETING_LUT_ENTRY_CONTROL 0x0E02
-
-/*
- * The imx135 embedded data info:
- * embedded data line num: 2
- * line 0 effective data size(byte): 76
- * line 1 effective data size(byte): 113
- */
-static const uint32_t
-imx227_embedded_effective_size[IMX227_EMBEDDED_DATA_LINE_NUM] = {160, 62};
-
-/************************** settings for imx *************************/
-/* Full Output Mode */
-static struct imx_reg const imx_STILL_6_5M_25fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x6259, 0x06}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd0}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* 4:3 Output Mode */
-static struct imx_reg const imx_STILL_5_5M_3X4_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0xb0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x00},
- {IMX_8BIT, 0x0348, 0x08},
- {IMX_8BIT, 0x0349, 0xaf},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x9f},
- {IMX_8BIT, 0x034c, 0x08},
- {IMX_8BIT, 0x034d, 0x00},
- {IMX_8BIT, 0x034e, 0x0a},
- {IMX_8BIT, 0x034f, 0xa0},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd8}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Square Output Mode */
-static struct imx_reg const imx_STILL_5_7M_1X1_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0344, 0x00},
- {IMX_8BIT, 0x0345, 0x00},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0xa0},
- {IMX_8BIT, 0x0348, 0x09},
- {IMX_8BIT, 0x0349, 0x5f},
- {IMX_8BIT, 0x034a, 0x09},
- {IMX_8BIT, 0x034b, 0xff},
- {IMX_8BIT, 0x034c, 0x09},
- {IMX_8BIT, 0x034d, 0x60},
- {IMX_8BIT, 0x034e, 0x09},
- {IMX_8BIT, 0x034f, 0x60},
-
- {IMX_8BIT, 0x6259, 0x06}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd4}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Full Frame 1080P Mode (use ISP scaler)*/
-static struct imx_reg const imx_VIDEO_4M_9X16_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdc}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Cropped 1080P Mode */
-static struct imx_reg const imx_VIDEO_2M_9X16_45fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x02},
- {IMX_8BIT, 0x0345, 0x8a},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0x88},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0xd1},
- {IMX_8BIT, 0x034a, 0x09},
- {IMX_8BIT, 0x034b, 0x17},
- {IMX_8BIT, 0x034c, 0x04},
- {IMX_8BIT, 0x034d, 0x48},
- {IMX_8BIT, 0x034e, 0x07},
- {IMX_8BIT, 0x034f, 0x90},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x01},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x04},
- {IMX_8BIT, 0x040d, 0x48},
- {IMX_8BIT, 0x040e, 0x07},
- {IMX_8BIT, 0x040f, 0x90},
-
- {IMX_8BIT, 0x0900, 0x00},
- {IMX_8BIT, 0x0901, 0x00},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdc}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
-
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Moment mode */
-static struct imx_reg const imx_VIDEO_1_3M_3X4_60fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xd9}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* High Speed 3:4 mode */
-static struct imx_reg const imx_VIDEO_VGA_3X4_120fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x9004, 0xca}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3f}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-/* Binned 720P mode */
-static struct imx_reg const imx_VIDEO_1M_9X16_60fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xd0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x40},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x8f},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x5f},
- {IMX_8BIT, 0x034c, 0x02},
- {IMX_8BIT, 0x034d, 0xe0},
- {IMX_8BIT, 0x034e, 0x05},
- {IMX_8BIT, 0x034f, 0x10},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x01},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x02},
- {IMX_8BIT, 0x040d, 0xe0},
- {IMX_8BIT, 0x040e, 0x05},
- {IMX_8BIT, 0x040f, 0x10},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdd}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* Binned 496x868 mode */
-static struct imx_reg const imx_VIDEO_496x868_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x02},
- {IMX_8BIT, 0x0345, 0xc0},
- {IMX_8BIT, 0x0346, 0x01},
- {IMX_8BIT, 0x0347, 0xec},
- {IMX_8BIT, 0x0348, 0x06},
- {IMX_8BIT, 0x0349, 0x9f},
- {IMX_8BIT, 0x034a, 0x08},
- {IMX_8BIT, 0x034b, 0xb3},
- {IMX_8BIT, 0x034c, 0x01},
- {IMX_8BIT, 0x034d, 0xf0},
- {IMX_8BIT, 0x034e, 0x03},
- {IMX_8BIT, 0x034f, 0x64},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x01},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x01},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x01},
- {IMX_8BIT, 0x040d, 0xf0},
- {IMX_8BIT, 0x040e, 0x03},
- {IMX_8BIT, 0x040f, 0x64},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xdd}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-/* Hangout mode */
-static struct imx_reg const imx_PREVIEW_374X652_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xc0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x30},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x9f},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x6f},
- {IMX_8BIT, 0x034c, 0x01},
- {IMX_8BIT, 0x034d, 0x78},
- {IMX_8BIT, 0x034e, 0x02},
- {IMX_8BIT, 0x034f, 0x90},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x03},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x03},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x02},
- {IMX_8BIT, 0x040c, 0x01},
- {IMX_8BIT, 0x040d, 0x76},
- {IMX_8BIT, 0x040e, 0x02},
- {IMX_8BIT, 0x040f, 0x8c},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xde}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-static struct imx_reg const imx_VIDEO_NHD_9X16_30fps[] = {
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0112, 0x0a},
- {IMX_8BIT, 0x0113, 0x0a},
- {IMX_8BIT, 0x0344, 0x01},
- {IMX_8BIT, 0x0345, 0xc0},
- {IMX_8BIT, 0x0346, 0x00},
- {IMX_8BIT, 0x0347, 0x30},
- {IMX_8BIT, 0x0348, 0x07},
- {IMX_8BIT, 0x0349, 0x9f},
- {IMX_8BIT, 0x034a, 0x0a},
- {IMX_8BIT, 0x034b, 0x6f},
- {IMX_8BIT, 0x034c, 0x01},
- {IMX_8BIT, 0x034d, 0x78},
- {IMX_8BIT, 0x034e, 0x02},
- {IMX_8BIT, 0x034f, 0x90},
-
- {IMX_8BIT, 0x0380, 0x00},
- {IMX_8BIT, 0x0381, 0x01},
- {IMX_8BIT, 0x0382, 0x00},
- {IMX_8BIT, 0x0383, 0x03},
- {IMX_8BIT, 0x0384, 0x00},
- {IMX_8BIT, 0x0385, 0x01},
- {IMX_8BIT, 0x0386, 0x00},
- {IMX_8BIT, 0x0387, 0x03},
-
- {IMX_8BIT, 0x0408, 0x00},
- {IMX_8BIT, 0x0409, 0x00},
- {IMX_8BIT, 0x040a, 0x00},
- {IMX_8BIT, 0x040b, 0x00},
- {IMX_8BIT, 0x040c, 0x01},
- {IMX_8BIT, 0x040d, 0x78},
- {IMX_8BIT, 0x040e, 0x02},
- {IMX_8BIT, 0x040f, 0x90},
-
- {IMX_8BIT, 0x0900, 0x01},
- {IMX_8BIT, 0x0901, 0x22},
-
- {IMX_8BIT, 0x6259, 0x05}, /* latency ctrl */
- {IMX_8BIT, 0x9004, 0xde}, /* preset_sel */
- {IMX_8BIT, 0x9005, 0x3c}, /* preset_en */
- {IMX_8BIT, 0x0136, 0x13},
- {IMX_8BIT, 0x0137, 0x33},
- {IMX_TOK_TERM, 0, 0}
-};
-
-
-static struct imx_reg const imx227_init_settings[] = {
- {IMX_8BIT, 0x0100, 0x00}, /* mode_select */
- GROUPED_PARAMETER_HOLD_ENABLE,
- {IMX_8BIT, 0x0306, 0x00},
- {IMX_8BIT, 0x0307, 0xBB},
- {IMX_8BIT, 0x030E, 0x03},
- {IMX_8BIT, 0x030F, 0x0D},
- {IMX_8BIT, 0x463b, 0x30},
- {IMX_8BIT, 0x463e, 0x05},
- {IMX_8BIT, 0x4612, 0x66},
- {IMX_8BIT, 0x4815, 0x65},
- {IMX_8BIT, 0x4991, 0x00},
- {IMX_8BIT, 0x4992, 0x01},
- {IMX_8BIT, 0x4993, 0xff},
- {IMX_8BIT, 0x458b, 0x00},
- {IMX_8BIT, 0x452a, 0x02},
- {IMX_8BIT, 0x4a7c, 0x00},
- {IMX_8BIT, 0x4a7d, 0x1c},
- {IMX_8BIT, 0x4a7e, 0x00},
- {IMX_8BIT, 0x4a7f, 0x17},
- {IMX_8BIT, 0x462C, 0x2E},
- {IMX_8BIT, 0x461B, 0x28},
- {IMX_8BIT, 0x4663, 0x29},
- {IMX_8BIT, 0x461A, 0x7C},
- {IMX_8BIT, 0x4619, 0x28},
- {IMX_8BIT, 0x4667, 0x22},
- {IMX_8BIT, 0x466B, 0x23},
- {IMX_8BIT, 0x40AD, 0xFF},
- {IMX_8BIT, 0x40BE, 0x00},
- {IMX_8BIT, 0x40BF, 0x6E},
- {IMX_8BIT, 0x40CE, 0x00},
- {IMX_8BIT, 0x40CF, 0x0A},
- {IMX_8BIT, 0x40CA, 0x00},
- {IMX_8BIT, 0x40CB, 0x1F},
- {IMX_8BIT, 0x4D16, 0x00},
- {IMX_8BIT, 0x6204, 0x01},
- {IMX_8BIT, 0x6209, 0x00},
- {IMX_8BIT, 0x621F, 0x01},
- {IMX_8BIT, 0x621E, 0x10},
- GROUPED_PARAMETER_HOLD_DISABLE,
- {IMX_TOK_TERM, 0, 0}
-};
-
-/* TODO settings of preview/still/video will be updated with new use case */
-struct imx_resolution imx227_res_preview[] = {
- {
- .desc = "imx_PREVIEW_374X652_30fps",
- .regs = imx_PREVIEW_374X652_30fps,
- .width = 374,
- .height = 652,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C0A,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_496x868_30fps",
- .regs = imx_VIDEO_496x868_30fps,
- .width = 496,
- .height = 868,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
- },
- {
- .desc = "imx_STILL_5_5M_3X4_30fps",
- .regs = imx_STILL_5_5M_3X4_30fps,
- .width = 2048,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0ED8,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_5_7M_1X1_30fps",
- .regs = imx_STILL_5_7M_1X1_30fps,
- .width = 2400,
- .height = 2400,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0A1E,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_6_5M_25fps",
- .regs = imx_STILL_6_5M_25fps,
- .width = 2400,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 25,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0C24,
- },
- {
- }
- },
- }
-};
-
-struct imx_resolution imx227_res_still[] = {
- {
- .desc = "imx_STILL_5_5M_3X4_30fps",
- .regs = imx_STILL_5_5M_3X4_30fps,
- .width = 2048,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 6,
- .pixels_per_line = 0x2130,
- .lines_per_frame = 0x1A22,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x0ED8,
- .lines_per_frame = 0x0BB8,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_5_7M_1X1_30fps",
- .regs = imx_STILL_5_7M_1X1_30fps,
- .width = 2400,
- .height = 2400,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 6,
- .pixels_per_line = 0x266E,
- .lines_per_frame = 0x1704,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0A1E,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_STILL_6_5M_25fps",
- .regs = imx_STILL_6_5M_25fps,
- .width = 2400,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 25,
- .pixels_per_line = 0x1130,
- .lines_per_frame = 0x0C24,
- },
- {
- }
- },
- },
-};
-
-struct imx_resolution imx227_res_video[] = {
- {
- .desc = "imx_VIDEO_4M_9X16_30fps",
- .regs = imx_VIDEO_4M_9X16_30fps,
- .width = 1536,
- .height = 2720,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_VIDEO_2M_9X16_45fps",
- .regs = imx_VIDEO_2M_9X16_45fps,
- .width = 1096,
- .height = 1936,
- .bin_factor_x = 0,
- .bin_factor_y = 0,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- .fps = 45,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0800,
- },
- {
- }
- },
-
- },
- {
- .desc = "imx_VIDEO_1_3M_3X4_60fps",
- .regs = imx_VIDEO_1_3M_3X4_60fps,
- .width = 1024,
- .height = 1360,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 60,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0604,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_496x868_30fps",
- .regs = imx_VIDEO_496x868_30fps,
- .width = 496,
- .height = 868,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_1M_9X16_60fps",
- .regs = imx_VIDEO_1M_9X16_60fps,
- .width = 736,
- .height = 1296,
- .bin_factor_x = 1,
- .bin_factor_y = 1,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 60,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0604,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C10,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_VGA_3X4_120fps",
- .regs = imx_VIDEO_VGA_3X4_120fps,
- .width = 512,
- .height = 680,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 120,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0302,
- },
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C08,
- },
- {
- }
- },
- },
- {
- .desc = "imx_VIDEO_NHD_9X16_30fps",
- .regs = imx_VIDEO_NHD_9X16_30fps,
- .width = 376,
- .height = 656,
- .bin_factor_x = 2,
- .bin_factor_y = 2,
- .mipi_freq = 499000,
- .used = 0,
- .fps_options = {
- {
- .fps = 30,
- .pixels_per_line = 0x0E70,
- .lines_per_frame = 0x0C0A,
- },
- {
- }
- },
- },
-};
-
-#endif /* __IMX227_H__ */
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp.c b/drivers/staging/media/atomisp/i2c/imx/otp.c
deleted file mode 100644
index 462275038046..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/i2c.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-
-void *dummy_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
-
- buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- return buf;
-}
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c b/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c
deleted file mode 100644
index b11f90c5960c..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp_brcc064_e2prom.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include "common.h"
-
-/*
- * Read EEPROM data from brcc064 and store
- * it into a kmalloced buffer. On error return NULL.
- * @size: set to the size of the returned EEPROM data.
- */
-void *brcc064_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- unsigned int e2prom_i2c_addr = dev_addr >> 1;
- static const unsigned int max_read_size = 30;
- int addr;
- u32 s_addr = start_addr & E2PROM_ADDR_MASK;
- unsigned char *buffer;
-
- buffer = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buffer)
- return NULL;
-
- for (addr = s_addr; addr < size; addr += max_read_size) {
- struct i2c_msg msg[2];
- unsigned int i2c_addr = e2prom_i2c_addr;
- u16 addr_buf;
- int r;
-
- msg[0].flags = 0;
- msg[0].addr = i2c_addr;
- addr_buf = cpu_to_be16(addr & 0xFFFF);
- msg[0].len = 2;
- msg[0].buf = (u8 *)&addr_buf;
-
- msg[1].addr = i2c_addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = min(max_read_size, size - addr);
- msg[1].buf = &buffer[addr];
-
- r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (r != ARRAY_SIZE(msg)) {
- dev_err(&client->dev, "read failed at 0x%03x\n", addr);
- return NULL;
- }
- }
- return buffer;
-
-}
-
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c b/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c
deleted file mode 100644
index 73d041f97811..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp_e2prom.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include "common.h"
-
-/*
- * Read EEPROM data from the gerneral e2prom chip(eg.
- * CAT24C08, CAT24C128, le24l042cs, and store
- * it into a kmalloced buffer. On error return NULL.
- * @size: set to the size of the returned EEPROM data.
- */
-void *e2prom_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- unsigned int e2prom_i2c_addr = dev_addr >> 1;
- static const unsigned int max_read_size = 30;
- int addr;
- u32 s_addr = start_addr & E2PROM_ADDR_MASK;
- bool two_addr = (start_addr & E2PROM_2ADDR) >> 31;
- char *buffer;
-
- buffer = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buffer)
- return NULL;
-
- for (addr = s_addr; addr < size; addr += max_read_size) {
- struct i2c_msg msg[2];
- unsigned int i2c_addr = e2prom_i2c_addr;
- u16 addr_buf;
- int r;
-
- msg[0].flags = 0;
- if (two_addr) {
- msg[0].addr = i2c_addr;
- addr_buf = cpu_to_be16(addr & 0xFFFF);
- msg[0].len = 2;
- msg[0].buf = (u8 *)&addr_buf;
- } else {
- i2c_addr |= (addr >> 8) & 0x7;
- msg[0].addr = i2c_addr;
- addr_buf = addr & 0xFF;
- msg[0].len = 1;
- msg[0].buf = (u8 *)&addr_buf;
- }
-
- msg[1].addr = i2c_addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = min(max_read_size, size - addr);
- msg[1].buf = &buffer[addr];
-
- r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (r != ARRAY_SIZE(msg)) {
- dev_err(&client->dev, "read failed at 0x%03x\n", addr);
- return NULL;
- }
- }
- return buffer;
-}
-
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/otp_imx.c b/drivers/staging/media/atomisp/i2c/imx/otp_imx.c
deleted file mode 100644
index 1ca27c26ef75..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/otp_imx.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include <asm/intel-mid.h>
-#include "common.h"
-
-/* Defines for OTP Data Registers */
-#define IMX_OTP_START_ADDR 0x3B04
-#define IMX_OTP_PAGE_SIZE 64
-#define IMX_OTP_READY_REG 0x3B01
-#define IMX_OTP_PAGE_REG 0x3B02
-#define IMX_OTP_MODE_REG 0x3B00
-#define IMX_OTP_PAGE_MAX 20
-#define IMX_OTP_READY_REG_DONE 1
-#define IMX_OTP_READ_ONETIME 32
-#define IMX_OTP_MODE_READ 1
-#define IMX227_OTP_START_ADDR 0x0A04
-#define IMX227_OTP_ENABLE_REG 0x0A00
-#define IMX227_OTP_READY_REG 0x0A01
-#define IMX227_OTP_PAGE_REG 0x0A02
-#define IMX227_OTP_READY_REG_DONE 1
-#define IMX227_OTP_MODE_READ 1
-
-static int
-imx_read_otp_data(struct i2c_client *client, u16 len, u16 reg, void *val)
-{
- struct i2c_msg msg[2];
- u16 data[IMX_SHORT_MAX] = { 0 };
- int err;
-
- if (len > IMX_BYTE_MAX) {
- dev_err(&client->dev, "%s error, invalid data length\n",
- __func__);
- return -EINVAL;
- }
-
- memset(msg, 0 , sizeof(msg));
- memset(data, 0 , sizeof(data));
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = I2C_MSG_LENGTH;
- msg[0].buf = (u8 *)data;
- /* high byte goes first */
- data[0] = cpu_to_be16(reg);
-
- msg[1].addr = client->addr;
- msg[1].len = len;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = (u8 *)data;
-
- err = i2c_transfer(client->adapter, msg, 2);
- if (err != 2) {
- if (err >= 0)
- err = -EIO;
- goto error;
- }
-
- memcpy(val, data, len);
- return 0;
-
-error:
- dev_err(&client->dev, "read from offset 0x%x error %d", reg, err);
- return err;
-}
-
-static int imx_read_otp_reg_array(struct i2c_client *client, u16 size, u16 addr,
- u8 *buf)
-{
- u16 index;
- int ret;
-
- for (index = 0; index + IMX_OTP_READ_ONETIME <= size;
- index += IMX_OTP_READ_ONETIME) {
- ret = imx_read_otp_data(client, IMX_OTP_READ_ONETIME,
- addr + index, &buf[index]);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-void *imx_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
- int ret;
- int i;
-
- buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < IMX_OTP_PAGE_MAX; i++) {
-
- /*set page NO.*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX_OTP_PAGE_REG, i & 0xff);
- if (ret)
- goto fail;
-
- /*set read mode*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX_OTP_MODE_REG, IMX_OTP_MODE_READ);
- if (ret)
- goto fail;
-
- /* Reading the OTP data array */
- ret = imx_read_otp_reg_array(client, IMX_OTP_PAGE_SIZE,
- IMX_OTP_START_ADDR, buf + i * IMX_OTP_PAGE_SIZE);
- if (ret)
- goto fail;
- }
-
- return buf;
-fail:
- /* Driver has failed to find valid data */
- dev_err(&client->dev, "sensor found no valid OTP data\n");
- return ERR_PTR(ret);
-}
-
-void *imx227_otp_read(struct v4l2_subdev *sd, u8 dev_addr,
- u32 start_addr, u32 size)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 *buf;
- int ret;
- int i;
-
- buf = devm_kzalloc(&client->dev, size, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < IMX_OTP_PAGE_MAX; i++) {
-
- /*set page NO.*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX227_OTP_PAGE_REG, i & 0xff);
- if (ret)
- goto fail;
-
- /*set read mode*/
- ret = imx_write_reg(client, IMX_8BIT,
- IMX227_OTP_ENABLE_REG, IMX227_OTP_MODE_READ);
- if (ret)
- goto fail;
-
- /* Reading the OTP data array */
- ret = imx_read_otp_reg_array(client, IMX_OTP_PAGE_SIZE,
- IMX227_OTP_START_ADDR, buf + i * IMX_OTP_PAGE_SIZE);
- if (ret)
- goto fail;
- }
-
- return buf;
-fail:
- /* Driver has failed to find valid data */
- dev_err(&client->dev, "sensor found no valid OTP data\n");
- return ERR_PTR(ret);
-}
-
diff --git a/drivers/staging/media/atomisp/i2c/imx/vcm.c b/drivers/staging/media/atomisp/i2c/imx/vcm.c
deleted file mode 100644
index 2d2df04c800a..000000000000
--- a/drivers/staging/media/atomisp/i2c/imx/vcm.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <media/v4l2-device.h>
-#include "../../include/linux/atomisp_platform.h"
-
-int vcm_power_up(struct v4l2_subdev *sd)
-{
- const struct camera_af_platform_data *vcm_platform_data;
-
- vcm_platform_data = camera_get_af_platform_data();
- if (NULL == vcm_platform_data)
- return -ENODEV;
- /* Enable power */
- return vcm_platform_data->power_ctrl(sd, 1);
-}
-
-int vcm_power_down(struct v4l2_subdev *sd)
-{
- const struct camera_af_platform_data *vcm_platform_data;
-
- vcm_platform_data = camera_get_af_platform_data();
- if (NULL == vcm_platform_data)
- return -ENODEV;
- return vcm_platform_data->power_ctrl(sd, 0);
-}
-
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.h b/drivers/staging/media/atomisp/i2c/mt9m114.h
index 5e7d79d2e01b..0af79d77a404 100644
--- a/drivers/staging/media/atomisp/i2c/mt9m114.h
+++ b/drivers/staging/media/atomisp/i2c/mt9m114.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -394,11 +390,6 @@ static struct mt9m114_res_struct mt9m114_res[] = {
};
#define N_RES (ARRAY_SIZE(mt9m114_res))
-static const struct i2c_device_id mt9m114_id[] = {
- {"mt9m114", 0},
- {}
-};
-
static struct misensor_reg const mt9m114_exitstandby[] = {
{MISENSOR_16BIT, 0x098E, 0xDC00},
/* exit-standby */
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
index ab8907e6c9ef..bf4897347df7 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.h
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -35,10 +31,6 @@
#include "../include/linux/atomisp_platform.h"
-#define OV2680_NAME "ov2680"
-#define OV2680B_NAME "ov2680b"
-#define OV2680F_NAME "ov2680f"
-
/* Defines for register writes and register array processing */
#define I2C_MSG_LENGTH 0x2
#define I2C_RETRY_COUNT 5
@@ -227,12 +219,6 @@ struct ov2680_format {
struct ov2680_write_buffer buffer;
};
- static const struct i2c_device_id ov2680_id[] = {
- {OV2680B_NAME, 0},
- {OV2680F_NAME, 0},
- {}
- };
-
static struct ov2680_reg const ov2680_global_setting[] = {
{OV2680_8BIT, 0x0103, 0x01},
{OV2680_8BIT, 0x3002, 0x00},
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h
index 73ecb1679718..d8a973d71699 100644
--- a/drivers/staging/media/atomisp/i2c/ov2722.h
+++ b/drivers/staging/media/atomisp/i2c/ov2722.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -35,8 +31,6 @@
#include "../include/linux/atomisp_platform.h"
-#define OV2722_NAME "ov2722"
-
#define OV2722_POWER_UP_RETRY_NUM 5
/* Defines for register writes and register array processing */
@@ -257,11 +251,6 @@ struct ov2722_write_ctrl {
struct ov2722_write_buffer buffer;
};
-static const struct i2c_device_id ov2722_id[] = {
- {OV2722_NAME, 0},
- {}
-};
-
/*
* Register settings for various resolution
*/
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
index 9fb1bffbe9b3..3f527f2047a7 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Kconfig
@@ -1,11 +1,11 @@
-config VIDEO_OV5693
+config VIDEO_ATOMISP_OV5693
tristate "Omnivision ov5693 sensor support"
+ depends on ACPI
depends on I2C && VIDEO_V4L2
---help---
- This is a Video4Linux2 sensor-level driver for the Micron
- ov5693 5 Mpixel camera.
+ This is a Video4Linux2 sensor-level driver for the Micron
+ ov5693 5 Mpixel camera.
- ov5693 is video camera sensor.
-
- It currently only works with the atomisp driver.
+ ov5693 is video camera sensor.
+ It currently only works with the atomisp driver.
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
index 4e3833aaec05..aa6be85c5a60 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -1,4 +1,5 @@
-obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += atomisp-ov5693.o
# HACK! While this driver is in bad shape, don't enable several warnings
# that would be otherwise enabled with W=1
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
index 2dd894989cd9..4de44569fe54 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index 123642557aa8..3e7c3851280f 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -31,7 +27,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/moduleparam.h>
#include <media/v4l2-device.h>
#include <linux/io.h>
@@ -391,8 +386,8 @@ static int ov5693_write_reg_array(struct i2c_client *client,
if (!__ov5693_write_reg_is_consecutive(client, &ctrl,
next)) {
err = __ov5693_flush_reg_array(client, &ctrl);
- if (err)
- return err;
+ if (err)
+ return err;
}
err = __ov5693_buf_reg_array(client, &ctrl, next);
if (err) {
@@ -945,12 +940,8 @@ static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value)
{
- int ret;
-
value = min(value, AD5823_MAX_FOCUS_POS);
- ret = ad5823_t_focus_vcm(sd, value);
-
- return ret;
+ return ad5823_t_focus_vcm(sd, value);
}
static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value)
@@ -1302,10 +1293,6 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
/* This driver assumes "internal DVDD, PWDNB tied to DOVDD".
* In this set up only gpio0 (XSHUTDN) should be available
* but in some products (for example ECS) gpio1 (PWDNB) is
@@ -1332,19 +1319,12 @@ static int power_ctrl(struct v4l2_subdev *sd, bool flag)
static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
{
- int ret;
struct ov5693_device *dev = to_ov5693_sensor(sd);
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
- ret = dev->platform_data->gpio0_ctrl(sd, flag);
-
- return ret;
+ return dev->platform_data->gpio0_ctrl(sd, flag);
}
static int __power_up(struct v4l2_subdev *sd)
@@ -1942,8 +1922,7 @@ static int ov5693_remove(struct i2c_client *client)
return 0;
}
-static int ov5693_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov5693_probe(struct i2c_client *client)
{
struct ov5693_device *dev;
int i2c;
@@ -1965,10 +1944,8 @@ static int ov5693_probe(struct i2c_client *client,
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "out of memory\n");
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
@@ -2030,8 +2007,6 @@ out_free:
return ret;
}
-MODULE_DEVICE_TABLE(i2c, ov5693_id);
-
static const struct acpi_device_id ov5693_acpi_match[] = {
{"INT33BE"},
{},
@@ -2040,27 +2015,13 @@ MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
static struct i2c_driver ov5693_driver = {
.driver = {
- .name = OV5693_NAME,
- .acpi_match_table = ACPI_PTR(ov5693_acpi_match),
+ .name = "ov5693",
+ .acpi_match_table = ov5693_acpi_match,
},
- .probe = ov5693_probe,
+ .probe_new = ov5693_probe,
.remove = ov5693_remove,
- .id_table = ov5693_id,
};
-
-static int init_ov5693(void)
-{
- return i2c_add_driver(&ov5693_driver);
-}
-
-static void exit_ov5693(void)
-{
-
- i2c_del_driver(&ov5693_driver);
-}
-
-module_init(init_ov5693);
-module_exit(exit_ov5693);
+module_i2c_driver(ov5693_driver);
MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
index 8c2e6794463b..2ea63807c56d 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -35,8 +31,6 @@
#include "../../include/linux/atomisp_platform.h"
-#define OV5693_NAME "ov5693"
-
#define OV5693_POWER_UP_RETRY_NUM 5
/* Defines for register writes and register array processing */
@@ -278,11 +272,6 @@ struct ov5693_write_ctrl {
struct ov5693_write_buffer buffer;
};
-static const struct i2c_device_id ov5693_id[] = {
- {OV5693_NAME, 0},
- {}
-};
-
static struct ov5693_reg const ov5693_global_setting[] = {
{OV5693_8BIT, 0x0103, 0x01},
{OV5693_8BIT, 0x3001, 0x0a},
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.c b/drivers/staging/media/atomisp/i2c/ov8858.c
index 43e1638fd674..ba147ac2e36f 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858.c
+++ b/drivers/staging/media/atomisp/i2c/ov8858.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -480,8 +476,6 @@ static int ov8858_priv_int_data_init(struct v4l2_subdev *sd)
if (!dev->otp_data) {
dev->otp_data = devm_kzalloc(&client->dev, size, GFP_KERNEL);
if (!dev->otp_data) {
- dev_err(&client->dev, "%s: can't allocate memory",
- __func__);
r = -ENOMEM;
goto error3;
}
@@ -714,10 +708,6 @@ static int __power_ctrl(struct v4l2_subdev *sd, bool flag)
if (!dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->power_ctrl)
- return dev->platform_data->power_ctrl(sd, flag);
-
if (dev->platform_data->v1p2_ctrl) {
ret = dev->platform_data->v1p2_ctrl(sd, flag);
if (ret) {
@@ -769,10 +759,6 @@ static int __gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (!client || !dev || !dev->platform_data)
return -ENODEV;
- /* Non-gmin platforms use the legacy callback */
- if (dev->platform_data->gpio_ctrl)
- return dev->platform_data->gpio_ctrl(sd, flag);
-
if (dev->platform_data->gpio0_ctrl)
return dev->platform_data->gpio0_ctrl(sd, flag);
@@ -1575,15 +1561,6 @@ static int ov8858_s_config(struct v4l2_subdev *sd,
mutex_lock(&dev->input_lock);
- if (dev->platform_data->platform_init) {
- ret = dev->platform_data->platform_init(client);
- if (ret) {
- mutex_unlock(&dev->input_lock);
- dev_err(&client->dev, "platform init error %d!\n", ret);
- return ret;
- }
- }
-
ret = __ov8858_s_power(sd, 1);
if (ret) {
dev_err(&client->dev, "power-up error %d!\n", ret);
@@ -1628,8 +1605,6 @@ fail_detect:
fail_csi_cfg:
__ov8858_s_power(sd, 0);
fail_update:
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
mutex_unlock(&dev->input_lock);
dev_err(&client->dev, "sensor power-gating failed\n");
return ret;
@@ -1930,8 +1905,6 @@ static int ov8858_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct ov8858_device *dev = to_ov8858_sensor(sd);
- if (dev->platform_data->platform_deinit)
- dev->platform_data->platform_deinit();
media_entity_cleanup(&dev->sd.entity);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
@@ -2082,8 +2055,7 @@ static const struct v4l2_ctrl_config ctrls[] = {
}
};
-static int ov8858_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ov8858_probe(struct i2c_client *client)
{
struct ov8858_device *dev;
unsigned int i;
@@ -2094,15 +2066,11 @@ static int ov8858_probe(struct i2c_client *client,
/* allocate sensor device & init sub device */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- dev_err(&client->dev, "%s: out of memory\n", __func__);
+ if (!dev)
return -ENOMEM;
- }
mutex_init(&dev->input_lock);
- if (id)
- dev->i2c_id = id->driver_data;
dev->fmt_idx = 0;
dev->sensor_id = OV_ID_DEFAULT;
dev->vcm_driver = &ov8858_vcms[OV8858_ID_DEFAULT];
@@ -2182,40 +2150,21 @@ out_free:
return ret;
}
-static const struct i2c_device_id ov8858_id[] = {
- {OV8858_NAME, 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, ov8858_id);
-
static const struct acpi_device_id ov8858_acpi_match[] = {
{"INT3477"},
{},
};
+MODULE_DEVICE_TABLE(acpi, ov8858_acpi_match);
static struct i2c_driver ov8858_driver = {
.driver = {
- .name = OV8858_NAME,
- .acpi_match_table = ACPI_PTR(ov8858_acpi_match),
+ .name = "ov8858",
+ .acpi_match_table = ov8858_acpi_match,
},
- .probe = ov8858_probe,
+ .probe_new = ov8858_probe,
.remove = ov8858_remove,
- .id_table = ov8858_id,
};
-
-static __init int ov8858_init_mod(void)
-{
- return i2c_add_driver(&ov8858_driver);
-}
-
-static __exit void ov8858_exit_mod(void)
-{
- i2c_del_driver(&ov8858_driver);
-}
-
-module_init(ov8858_init_mod);
-module_exit(ov8858_exit_mod);
+module_i2c_driver(ov8858_driver);
MODULE_DESCRIPTION("A low-level driver for Omnivision OV8858 sensors");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.h b/drivers/staging/media/atomisp/i2c/ov8858.h
index 638d1a803a2b..6c89568bb44e 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -113,7 +109,6 @@
#define OV_SUBDEV_PREFIX "ov"
#define OV_ID_DEFAULT 0x0000
-#define OV8858_NAME "ov8858"
#define OV8858_CHIP_ID 0x8858
#define OV8858_LONG_EXPO 0x3500
diff --git a/drivers/staging/media/atomisp/i2c/ov8858_btns.h b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
index 7d74a8899fae..f81851306832 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858_btns.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -113,7 +109,6 @@
#define OV_SUBDEV_PREFIX "ov"
#define OV_ID_DEFAULT 0x0000
-#define OV8858_NAME "ov8858"
#define OV8858_CHIP_ID 0x8858
#define OV8858_LONG_EXPO 0x3500
diff --git a/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h b/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h
deleted file mode 100644
index c5e22bba455a..000000000000
--- a/drivers/staging/media/atomisp/include/asm/intel_mid_pcihelpers.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Access to message bus through three registers
- * in CUNIT(0:0:0) PCI configuration space.
- * MSGBUS_CTRL_REG(0xD0):
- * 31:24 = message bus opcode
- * 23:16 = message bus port
- * 15:8 = message bus address, low 8 bits.
- * 7:4 = message bus byte enables
- * MSGBUS_CTRL_EXT_REG(0xD8):
- * 31:8 = message bus address, high 24 bits.
- * MSGBUS_DATA_REG(0xD4):
- * hold the data for write or read
- */
-#define PCI_ROOT_MSGBUS_CTRL_REG 0xD0
-#define PCI_ROOT_MSGBUS_DATA_REG 0xD4
-#define PCI_ROOT_MSGBUS_CTRL_EXT_REG 0xD8
-#define PCI_ROOT_MSGBUS_READ 0x10
-#define PCI_ROOT_MSGBUS_WRITE 0x11
-#define PCI_ROOT_MSGBUS_DWORD_ENABLE 0xf0
-
-/* In BYT platform for all internal PCI devices d3 delay
- * of 3 ms is sufficient. Default value of 10 ms is overkill.
- */
-#define INTERNAL_PCI_PM_D3_WAIT 3
-
-#define ISP_SUB_CLASS 0x80
-#define SUB_CLASS_MASK 0xFF00
-
-u32 intel_mid_msgbus_read32_raw(u32 cmd);
-u32 intel_mid_msgbus_read32(u8 port, u32 addr);
-void intel_mid_msgbus_write32_raw(u32 cmd, u32 data);
-void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data);
-u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext);
-void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data);
-u32 intel_mid_soc_stepping(void);
-int intel_mid_dw_i2c_acquire_ownership(void);
-int intel_mid_dw_i2c_release_ownership(void);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h
index d67dd658cff9..b5533197226d 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifdef CSS15
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
index 5390b97ac6e7..7e3ca12dd4e9 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h
@@ -17,9 +17,6 @@
#include "atomisp_platform.h"
-const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
-const struct atomisp_platform_data *atomisp_get_platform_data(void);
-const struct camera_af_platform_data *camera_get_af_platform_data(void);
int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
struct camera_sensor_platform_data *plat_data,
enum intel_v4l2_subdev_type type);
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index dbac2b777dad..e0f0c379e7ce 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef ATOMISP_PLATFORM_H_
@@ -205,20 +201,13 @@ struct camera_vcm_control {
};
struct camera_sensor_platform_data {
- int (*gpio_ctrl)(struct v4l2_subdev *subdev, int flag);
int (*flisclk_ctrl)(struct v4l2_subdev *subdev, int flag);
- int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
int (*csi_cfg)(struct v4l2_subdev *subdev, int flag);
- bool (*low_fps)(void);
- int (*platform_init)(struct i2c_client *);
- int (*platform_deinit)(void);
- char *(*msr_file_name)(void);
- struct atomisp_camera_caps *(*get_camera_caps)(void);
- int (*gpio_intr_ctrl)(struct v4l2_subdev *subdev);
- /* New G-Min power and GPIO interface, replaces
- * power/gpio_ctrl with methods to control individual
- * lines as implemented on all known camera modules. */
+ /*
+ * New G-Min power and GPIO interface to control individual
+ * lines as implemented on all known camera modules.
+ */
int (*gpio0_ctrl)(struct v4l2_subdev *subdev, int on);
int (*gpio1_ctrl)(struct v4l2_subdev *subdev, int on);
int (*v1p8_ctrl)(struct v4l2_subdev *subdev, int on);
@@ -228,12 +217,6 @@ struct camera_sensor_platform_data {
char *module_id);
};
-struct camera_af_platform_data {
- int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
-};
-
-const struct camera_af_platform_data *camera_get_af_platform_data(void);
-
struct camera_mipi_info {
enum atomisp_camera_port port;
unsigned int num_lanes;
diff --git a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
index 589f4eae38ca..8988b37943b3 100644
--- a/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
+++ b/drivers/staging/media/atomisp/include/linux/libmsrlisthelper.h
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __LIBMSRLISTHELPER_H__
diff --git a/drivers/staging/media/atomisp/include/media/lm3554.h b/drivers/staging/media/atomisp/include/media/lm3554.h
index 7d6a8c05dd52..9276ce44d907 100644
--- a/drivers/staging/media/atomisp/include/media/lm3554.h
+++ b/drivers/staging/media/atomisp/include/media/lm3554.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef _LM3554_H_
@@ -24,7 +20,6 @@
#include <linux/videodev2.h>
#include <media/v4l2-subdev.h>
-#define LM3554_NAME "lm3554"
#define LM3554_ID 3554
#define v4l2_queryctrl_entry_integer(_id, _name,\
diff --git a/drivers/staging/media/atomisp/include/media/lm3642.h b/drivers/staging/media/atomisp/include/media/lm3642.h
deleted file mode 100644
index 545d95763335..000000000000
--- a/drivers/staging/media/atomisp/include/media/lm3642.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * include/media/lm3642.h
- *
- * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- */
-
-#ifndef _LM3642_H_
-#define _LM3642_H_
-
-#include <linux/videodev2.h>
-#include <media/v4l2-subdev.h>
-
-#define LM3642_NAME "lm3642"
-#define LM3642_ID 3642
-
-#define v4l2_queryctrl_entry_integer(_id, _name,\
- _minimum, _maximum, _step, \
- _default_value, _flags) \
- {\
- .id = (_id), \
- .type = V4L2_CTRL_TYPE_INTEGER, \
- .name = _name, \
- .minimum = (_minimum), \
- .maximum = (_maximum), \
- .step = (_step), \
- .default_value = (_default_value),\
- .flags = (_flags),\
- }
-#define v4l2_queryctrl_entry_boolean(_id, _name,\
- _default_value, _flags) \
- {\
- .id = (_id), \
- .type = V4L2_CTRL_TYPE_BOOLEAN, \
- .name = _name, \
- .minimum = 0, \
- .maximum = 1, \
- .step = 1, \
- .default_value = (_default_value),\
- .flags = (_flags),\
- }
-
-#define s_ctrl_id_entry_integer(_id, _name, \
- _minimum, _maximum, _step, \
- _default_value, _flags, \
- _s_ctrl, _g_ctrl) \
- {\
- .qc = v4l2_queryctrl_entry_integer(_id, _name,\
- _minimum, _maximum, _step,\
- _default_value, _flags), \
- .s_ctrl = _s_ctrl, \
- .g_ctrl = _g_ctrl, \
- }
-
-#define s_ctrl_id_entry_boolean(_id, _name, \
- _default_value, _flags, \
- _s_ctrl, _g_ctrl) \
- {\
- .qc = v4l2_queryctrl_entry_boolean(_id, _name,\
- _default_value, _flags), \
- .s_ctrl = _s_ctrl, \
- .g_ctrl = _g_ctrl, \
- }
-
-
-/* Default Values */
-#define LM3642_DEFAULT_TIMEOUT 300U
-#define LM3642_DEFAULT_RAMP_TIME 0x10 /* 1.024ms */
-#define LM3642_DEFAULT_INDICATOR_CURRENT 0x01 /* 1.88A */
-#define LM3642_DEFAULT_FLASH_CURRENT 0x0f /* 1500mA */
-
-/* Value settings for Flash Time-out Duration*/
-#define LM3642_MIN_TIMEOUT 100U
-#define LM3642_MAX_TIMEOUT 800U
-#define LM3642_TIMEOUT_STEPSIZE 100U
-
-/* Flash modes */
-#define LM3642_MODE_SHUTDOWN 0
-#define LM3642_MODE_INDICATOR 1
-#define LM3642_MODE_TORCH 2
-#define LM3642_MODE_FLASH 3
-
-/* timer delay time */
-#define LM3642_TIMER_DELAY 5
-
-/* Percentage <-> value macros */
-#define LM3642_MIN_PERCENT 0U
-#define LM3642_MAX_PERCENT 100U
-#define LM3642_CLAMP_PERCENTAGE(val) \
- clamp(val, LM3642_MIN_PERCENT, LM3642_MAX_PERCENT)
-
-#define LM3642_VALUE_TO_PERCENT(v, step) \
- (((((unsigned long)((v)+1))*(step))+50)/100)
-#define LM3642_PERCENT_TO_VALUE(p, step) \
- (((((unsigned long)(p))*100)+((step)>>1))/(step)-1)
-
-/* Product specific limits
- * TODO: get these from platform data */
-#define LM3642_FLASH_MAX_LVL 0x0F /* 1500mA */
-#define LM3642_TORCH_MAX_LVL 0x07 /* 187mA */
-#define LM3642_INDICATOR_MAX_LVL 0x01 /* 1.88A */
-
-/* Flash brightness, input is percentage, output is [0..15] */
-#define LM3642_FLASH_STEP \
- ((100ul*(LM3642_MAX_PERCENT) \
- +((LM3642_FLASH_MAX_LVL+1)>>1)) \
- /((LM3642_FLASH_MAX_LVL+1)))
-#define LM3642_FLASH_DEFAULT_BRIGHTNESS \
- LM3642_VALUE_TO_PERCENT(15, LM3642_FLASH_STEP)
-
-/* Torch brightness, input is percentage, output is [0..7] */
-#define LM3642_TORCH_STEP \
- ((100ul*(LM3642_MAX_PERCENT) \
- +((LM3642_TORCH_MAX_LVL+1)>>1)) \
- /((LM3642_TORCH_MAX_LVL+1)))
-#define LM3642_TORCH_DEFAULT_BRIGHTNESS \
- LM3642_VALUE_TO_PERCENT(0, LM3642_TORCH_STEP)
-
-/* Indicator brightness, input is percentage, output is [0..1] */
-#define LM3642_INDICATOR_STEP \
- ((100ul*(LM3642_MAX_PERCENT) \
- +((LM3642_INDICATOR_MAX_LVL+1)>>1)) \
- /((LM3642_INDICATOR_MAX_LVL+1)))
-#define LM3642_INDICATOR_DEFAULT_BRIGHTNESS \
- LM3642_VALUE_TO_PERCENT(1, LM3642_INDICATOR_STEP)
-
-/*
- * lm3642_platform_data - Flash controller platform data
- */
-struct lm3642_platform_data {
- int gpio_torch;
- int gpio_strobe;
- int (*power_ctrl)(struct v4l2_subdev *subdev, int on);
-
- unsigned int torch_en;
- unsigned int flash_en;
- unsigned int tx_en;
- unsigned int ivfm_en;
-};
-
-#endif /* _LM3642_H_ */
-
diff --git a/drivers/staging/media/atomisp/pci/Kconfig b/drivers/staging/media/atomisp/pci/Kconfig
index a72421431c7a..41f116d52060 100644
--- a/drivers/staging/media/atomisp/pci/Kconfig
+++ b/drivers/staging/media/atomisp/pci/Kconfig
@@ -3,11 +3,12 @@
#
config VIDEO_ATOMISP
- tristate "Intel Atom Image Signal Processor Driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
- select VIDEOBUF_VMALLOC
- ---help---
- Say Y here if your platform supports Intel Atom SoC
- camera imaging subsystem.
- To compile this driver as a module, choose M here: the
- module will be called atomisp
+ tristate "Intel Atom Image Signal Processor Driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select IOSF_MBI
+ select VIDEOBUF_VMALLOC
+ ---help---
+ Say Y here if your platform supports Intel Atom SoC
+ camera imaging subsystem.
+ To compile this driver as a module, choose M here: the
+ module will be called atomisp
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
index 2bd98f0667ec..ac3805345f20 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
atomisp-objs += \
atomisp_drvfs.o \
atomisp_file.o \
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
index 513a430ee01a..5d102a4f8aff 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp-regs.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
index 1eac329339b7..a6638edee360 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
index 5b58e7d9ca5b..56386154643b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_acc.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
index f48bf451c1f5..8a18c528cad4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/firmware.h>
@@ -27,7 +23,8 @@
#include <linux/kfifo.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
-#include <asm/intel-mid.h>
+
+#include <asm/iosf_mbi.h>
#include <media/v4l2-event.h>
#include <media/videobuf-vmalloc.h>
@@ -143,36 +140,36 @@ static int write_target_freq_to_hw(struct atomisp_device *isp,
unsigned int ratio, timeout, guar_ratio;
u32 isp_sspm1 = 0;
int i;
+
if (!isp->hpll_freq) {
dev_err(isp->dev, "failed to get hpll_freq. no change to freq\n");
return -EINVAL;
}
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
if (isp_sspm1 & ISP_FREQ_VALID_MASK) {
dev_dbg(isp->dev, "clearing ISPSSPM1 valid bit.\n");
- intel_mid_msgbus_write32(PUNIT_PORT, ISPSSPM1,
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
isp_sspm1 & ~(1 << ISP_FREQ_VALID_OFFSET));
}
ratio = (2 * isp->hpll_freq + new_freq / 2) / new_freq - 1;
guar_ratio = (2 * isp->hpll_freq + 200 / 2) / 200 - 1;
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
isp_sspm1 &= ~(0x1F << ISP_REQ_FREQ_OFFSET);
for (i = 0; i < ISP_DFS_TRY_TIMES; i++) {
- intel_mid_msgbus_write32(PUNIT_PORT, ISPSSPM1,
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
isp_sspm1
| ratio << ISP_REQ_FREQ_OFFSET
| 1 << ISP_FREQ_VALID_OFFSET
| guar_ratio << ISP_REQ_GUAR_FREQ_OFFSET);
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
-
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
timeout = 20;
while ((isp_sspm1 & ISP_FREQ_VALID_MASK) && timeout) {
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
dev_dbg(isp->dev, "waiting for ISPSSPM1 valid bit to be 0.\n");
udelay(100);
timeout--;
@@ -187,10 +184,10 @@ static int write_target_freq_to_hw(struct atomisp_device *isp,
return -EINVAL;
}
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
timeout = 10;
while (((isp_sspm1 >> ISP_FREQ_STAT_OFFSET) != ratio) && timeout) {
- isp_sspm1 = intel_mid_msgbus_read32(PUNIT_PORT, ISPSSPM1);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
dev_dbg(isp->dev, "waiting for ISPSSPM1 status bit to be 0x%x.\n",
new_freq);
udelay(100);
@@ -1660,20 +1657,15 @@ void atomisp_css_flush(struct atomisp_device *isp)
dev_dbg(isp->dev, "atomisp css flush done\n");
}
-#ifndef ISP2401
-void atomisp_wdt(unsigned long isp_addr)
-#else
-void atomisp_wdt(unsigned long pipe_addr)
-#endif
+void atomisp_wdt(struct timer_list *t)
{
#ifndef ISP2401
- struct atomisp_device *isp = (struct atomisp_device *)isp_addr;
+ struct atomisp_sub_device *asd = from_timer(asd, t, wdt);
#else
- struct atomisp_video_pipe *pipe =
- (struct atomisp_video_pipe *)pipe_addr;
+ struct atomisp_video_pipe *pipe = from_timer(pipe, t, wdt);
struct atomisp_sub_device *asd = pipe->asd;
- struct atomisp_device *isp = asd->isp;
#endif
+ struct atomisp_device *isp = asd->isp;
#ifdef ISP2401
atomic_inc(&pipe->wdt_count);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
index 31ba4e613d13..bdc73862fb79 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -85,11 +81,7 @@ static inline void __iomem *atomisp_get_io_virt_addr(unsigned int address)
void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev);
void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev);
void atomisp_wdt_work(struct work_struct *work);
-#ifndef ISP2401
-void atomisp_wdt(unsigned long isp_addr);
-#else
-void atomisp_wdt(unsigned long pipe_addr);
-#endif
+void atomisp_wdt(struct timer_list *t);
void atomisp_setup_flash(struct atomisp_sub_device *asd);
irqreturn_t atomisp_isr(int irq, void *dev);
irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
index 69d1526da362..2558193045a6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_common.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
index fb8b8fab4e92..3ef850cd25bd 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
index 05897b747349..6e87aa5aab4c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -155,7 +151,7 @@ static void atomisp_css2_hw_store(hrt_address addr,
const void *from, uint32_t n)
{
unsigned long flags;
- unsigned i;
+ unsigned int i;
unsigned int _to = (unsigned int)addr;
const char *_from = (const char *)from;
@@ -168,7 +164,7 @@ static void atomisp_css2_hw_store(hrt_address addr,
static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n)
{
unsigned long flags;
- unsigned i;
+ unsigned int i;
char *_to = (char *)to;
unsigned int _from = (unsigned int)addr;
@@ -232,9 +228,11 @@ static void __dump_pipe_config(struct atomisp_sub_device *asd,
unsigned int pipe_id)
{
struct atomisp_device *isp = asd->isp;
+
if (stream_env->pipes[pipe_id]) {
struct ia_css_pipe_config *p_config;
struct ia_css_pipe_extra_config *pe_config;
+
p_config = &stream_env->pipe_configs[pipe_id];
pe_config = &stream_env->pipe_extra_configs[pipe_id];
dev_dbg(isp->dev, "dumping pipe[%d] config:\n", pipe_id);
@@ -507,6 +505,7 @@ static int __destroy_stream(struct atomisp_sub_device *asd,
static int __destroy_streams(struct atomisp_sub_device *asd, bool force)
{
int ret, i;
+
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
ret = __destroy_stream(asd, &asd->stream_env[i], force);
if (ret)
@@ -573,6 +572,7 @@ static int __destroy_stream_pipes(struct atomisp_sub_device *asd,
struct atomisp_device *isp = asd->isp;
int ret = 0;
int i;
+
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
if (!stream_env->pipes[i] ||
!(force || stream_env->update_pipe[i]))
@@ -892,12 +892,12 @@ static inline int __set_css_print_env(struct atomisp_device *isp, int opt)
{
int ret = 0;
- if (0 == opt)
+ if (opt == 0)
isp->css_env.isp_css_env.print_env.debug_print = NULL;
- else if (1 == opt)
+ else if (opt == 1)
isp->css_env.isp_css_env.print_env.debug_print =
atomisp_css2_dbg_ftrace_print;
- else if (2 == opt)
+ else if (opt == 2)
isp->css_env.isp_css_env.print_env.debug_print =
atomisp_css2_dbg_print;
else
@@ -1051,6 +1051,7 @@ int atomisp_css_irq_enable(struct atomisp_device *isp,
void atomisp_css_init_struct(struct atomisp_sub_device *asd)
{
int i, j;
+
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
asd->stream_env[i].stream = NULL;
for (j = 0; j < IA_CSS_PIPE_MODE_NUM; j++) {
@@ -1189,6 +1190,7 @@ int atomisp_css_start(struct atomisp_sub_device *asd,
struct atomisp_device *isp = asd->isp;
bool sp_is_started = false;
int ret = 0, i = 0;
+
if (in_reset) {
if (__destroy_streams(asd, true))
dev_warn(isp->dev, "destroy stream failed.\n");
@@ -1976,6 +1978,7 @@ void atomisp_css_enable_raw_binning(struct atomisp_sub_device *asd,
void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable)
{
int i;
+
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.pipe_configs[i].enable_dz = enable;
@@ -2002,6 +2005,7 @@ void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
int i;
struct atomisp_device *isp = asd->isp;
unsigned int size_mem_words;
+
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
asd->stream_env[i].stream_config.mode = mode;
@@ -2275,6 +2279,7 @@ int atomisp_css_stop(struct atomisp_sub_device *asd,
if (!in_reset) {
struct atomisp_stream_env *stream_env;
int i, j;
+
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
stream_env = &asd->stream_env[i];
for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
@@ -2801,6 +2806,7 @@ static void __configure_video_vf_output(struct atomisp_sub_device *asd,
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
struct ia_css_frame_info *css_output_info;
+
stream_env->pipe_configs[pipe_id].mode =
__pipe_id_to_pipe_mode(asd, pipe_id);
stream_env->update_pipe[pipe_id] = true;
@@ -4464,7 +4470,8 @@ int atomisp_css_load_acc_binary(struct atomisp_sub_device *asd,
static struct atomisp_sub_device *__get_atomisp_subdev(
struct ia_css_pipe *css_pipe,
struct atomisp_device *isp,
- enum atomisp_input_stream_id *stream_id) {
+ enum atomisp_input_stream_id *stream_id)
+{
int i, j, k;
struct atomisp_sub_device *asd;
struct atomisp_stream_env *stream_env;
@@ -4515,7 +4522,7 @@ int atomisp_css_isr_thread(struct atomisp_device *isp,
for (i = 0; i < isp->num_of_streams; i++)
atomisp_wdt_stop(&isp->asd[i], 0);
#ifndef ISP2401
- atomisp_wdt((unsigned long)isp);
+ atomisp_wdt(&isp->asd[0].wdt);
#else
queue_work(isp->wdt_work_queue, &isp->wdt_work);
#endif
@@ -4659,7 +4666,7 @@ int atomisp_css_dump_sp_raw_copy_linecount(bool reduced)
int atomisp_css_dump_blob_infor(void)
{
struct ia_css_blob_descr *bd = sh_css_blob_info;
- unsigned i, nm = sh_css_num_binaries;
+ unsigned int i, nm = sh_css_num_binaries;
if (nm == 0)
return -EPERM;
@@ -4695,7 +4702,7 @@ int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt)
int ret;
ret = __set_css_print_env(isp, opt);
- if (0 == ret)
+ if (ret == 0)
dbg_func = opt;
return ret;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
index b62ad9082018..b03711668eda 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
index 0592ac1f2832..44c21813a06e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifdef CONFIG_COMPAT
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
index 750478f614d6..685da0f48bab 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_COMPAT_IOCTL32_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
index 2c5036685447..fa03b78c3580 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
index faa9cf7e05c0..0191d28a55bc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_csi2.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_CSI2_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
index 204d941cdb6c..54e28605b5de 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_dfs_tables.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_DFS_TABLES_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
index 1ae2358de8d4..7129b88456cb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -162,7 +158,7 @@ static ssize_t iunit_dbgopt_store(struct device_driver *drv, const char *buf,
return size;
}
-static struct driver_attribute iunit_drvfs_attrs[] = {
+static const struct driver_attribute iunit_drvfs_attrs[] = {
__ATTR(dbglvl, 0644, iunit_dbglvl_show, iunit_dbglvl_store),
__ATTR(dbgfun, 0644, iunit_dbgfun_show, iunit_dbgfun_store),
__ATTR(dbgopt, 0644, iunit_dbgopt_show, iunit_dbgopt_store),
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
index 5cb717b0c1c2..b91bfef21639 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_drvfs.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
index c766119bf798..377ec2a9fa6d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
index 1b86abd35c38..61fdeb5ee60a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_file.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
index d8cfed358d55..dd7596d8763d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
@@ -1137,10 +1133,8 @@ static int remove_pad_from_frame(struct atomisp_device *isp,
ia_css_ptr store = load;
buffer = kmalloc(width*sizeof(load), GFP_KERNEL);
- if (!buffer) {
- dev_err(isp->dev, "out of memory.\n");
+ if (!buffer)
return -ENOMEM;
- }
load += ISP_LEFT_PAD;
for (i = 0; i < height; i++) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
index 8471e391501a..2faab3429d43 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
index e9650cb75ba5..55ba185b43a0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_helper.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef _atomisp_helper_h_
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
index 7542a72f1d0f..52a6f8002048 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_INTERNAL_H__
@@ -29,9 +25,6 @@
#include <linux/pm_qos.h>
#include <linux/idr.h>
-#include <asm/intel-mid.h>
-#include "../../include/asm/intel_mid_pcihelpers.h"
-
#include <media/media-device.h>
#include <media/v4l2-subdev.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
index 717647951fb6..339b5d31e1f1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
@@ -14,17 +14,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/delay.h>
#include <linux/pci.h>
-#include <asm/intel-mid.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
@@ -943,10 +938,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
dev_dbg(isp->dev, "allocating %d 3a buffers\n", count);
while (count--) {
s3a_buf = kzalloc(sizeof(struct atomisp_s3a_buf), GFP_KERNEL);
- if (!s3a_buf) {
- dev_err(isp->dev, "s3a stat buf alloc failed\n");
+ if (!s3a_buf)
goto error;
- }
if (atomisp_css_allocate_stat_buffers(
asd, stream_id, s3a_buf, NULL, NULL)) {
@@ -965,7 +958,6 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
while (count--) {
dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
if (!dis_buf) {
- dev_err(isp->dev, "dis stat buf alloc failed\n");
kfree(s3a_buf);
goto error;
}
@@ -990,10 +982,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
while (count--) {
md_buf = kzalloc(sizeof(struct atomisp_metadata_buf),
GFP_KERNEL);
- if (!md_buf) {
- dev_err(isp->dev, "metadata buf alloc failed\n");
+ if (!md_buf)
goto error;
- }
if (atomisp_css_allocate_stat_buffers(
asd, stream_id, NULL, NULL, md_buf)) {
@@ -2943,13 +2933,15 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
#else
if (isp->motor)
#endif
- err = v4l2_subdev_call(
#ifndef ISP2401
+ err = v4l2_subdev_call(
isp->inputs[asd->input_curr].motor,
+ core, ioctl, cmd, arg);
#else
+ err = v4l2_subdev_call(
isp->motor,
-#endif
core, ioctl, cmd, arg);
+#endif
else
err = v4l2_subdev_call(
isp->inputs[asd->input_curr].camera,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
index fb5fadb5332b..0d2785b9ef99 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
index 744ab6eb42a0..70b53988553c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/module.h>
@@ -25,7 +21,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <asm/intel-mid.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mediabus.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
index ba5c2ab14253..f3d61827ae8c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_SUBDEV_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
index af09218d8b71..319ded6a96da 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tables.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __ATOMISP_TABLES_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
index 48b96048cab4..b71cc7bcdbab 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
index 64ab60f02e85..af354c4bfd3e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
index 5ce282d6c939..462b296554c7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_trace_event.h
@@ -12,10 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#undef TRACE_SYSTEM
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
index 663aa916e3ca..3c260f8b52e2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include <linux/module.h>
@@ -28,6 +24,8 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <asm/iosf_mbi.h>
+
#include "../../include/linux/atomisp_gmin_platform.h"
#include "atomisp_cmd.h"
@@ -46,7 +44,6 @@
#include "hrt/hive_isp_css_mm_hrt.h"
#include "device_access.h"
-#include <asm/intel-mid.h>
/* G-Min addition: pull this in from intel_mid_pm.h */
#define CSTATE_EXIT_LATENCY_C1 1
@@ -386,28 +383,23 @@ done:
*/
static void punit_ddr_dvfs_enable(bool enable)
{
- int reg = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSDVFS);
int door_bell = 1 << 8;
int max_wait = 30;
+ int reg;
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, &reg);
if (enable) {
reg &= ~(MRFLD_BIT0 | MRFLD_BIT1);
} else {
reg |= (MRFLD_BIT1 | door_bell);
reg &= ~(MRFLD_BIT0);
}
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSDVFS, reg);
- intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSDVFS, reg);
-
- /*Check Req_ACK to see freq status, wait until door_bell is cleared*/
- if (reg & door_bell) {
- while (max_wait--) {
- if (0 == (intel_mid_msgbus_read32(PUNIT_PORT,
- MRFLD_ISPSSDVFS) & door_bell))
- break;
-
- usleep_range(100, 500);
- }
+ /* Check Req_ACK to see freq status, wait until door_bell is cleared */
+ while ((reg & door_bell) && max_wait--) {
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, &reg);
+ usleep_range(100, 500);
}
if (max_wait == -1)
@@ -421,10 +413,10 @@ int atomisp_mrfld_power_down(struct atomisp_device *isp)
u32 reg_value;
/* writing 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK;
reg_value |= MRFLD_ISPSSPM0_IUNIT_POWER_OFF;
- intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSPM0, reg_value);
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSPM0, reg_value);
/*WA:Enable DVFS*/
if (IS_CHT)
@@ -437,8 +429,7 @@ int atomisp_mrfld_power_down(struct atomisp_device *isp)
*/
timeout = jiffies + msecs_to_jiffies(50);
while (1) {
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT,
- MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
dev_dbg(isp->dev, "power-off in progress, ISPSSPM0: 0x%x\n",
reg_value);
/* wait until ISPSSPM0 bit[25:24] shows 0x3 */
@@ -477,14 +468,14 @@ int atomisp_mrfld_power_up(struct atomisp_device *isp)
msleep(10);
/* writing 0x0 to ISPSSPM0 bit[1:0] to power off the IUNIT */
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
reg_value &= ~MRFLD_ISPSSPM0_ISPSSC_MASK;
- intel_mid_msgbus_write32(PUNIT_PORT, MRFLD_ISPSSPM0, reg_value);
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSPM0, reg_value);
/* FIXME: experienced value for delay */
timeout = jiffies + msecs_to_jiffies(50);
while (1) {
- reg_value = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSPM0);
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &reg_value);
dev_dbg(isp->dev, "power-on in progress, ISPSSPM0: 0x%x\n",
reg_value);
/* wait until ISPSSPM0 bit[25:24] shows 0x0 */
@@ -755,7 +746,6 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
&subdevs->v4l2_subdev.board_info;
struct i2c_adapter *adapter =
i2c_get_adapter(subdevs->v4l2_subdev.i2c_adapter_id);
- struct camera_sensor_platform_data *sensor_pdata;
int sensor_num, i;
if (adapter == NULL) {
@@ -807,13 +797,7 @@ static int atomisp_subdev_probe(struct atomisp_device *isp)
* pixel_format.
*/
isp->inputs[isp->input_cnt].frame_size.pixel_format = 0;
- sensor_pdata = (struct camera_sensor_platform_data *)
- board_info->platform_data;
- if (sensor_pdata->get_camera_caps)
- isp->inputs[isp->input_cnt].camera_caps =
- sensor_pdata->get_camera_caps();
- else
- isp->inputs[isp->input_cnt].camera_caps =
+ isp->inputs[isp->input_cnt].camera_caps =
atomisp_get_default_camera_caps();
sensor_num = isp->inputs[isp->input_cnt]
.camera_caps->sensor_num;
@@ -1020,7 +1004,7 @@ csi_and_subdev_probe_failed:
v4l2_device_unregister(&isp->v4l2_dev);
v4l2_device_failed:
media_device_unregister(&isp->media_dev);
- media_device_cleanup(&isp->media_dev);
+ media_device_cleanup(&isp->media_dev);
return ret;
}
@@ -1155,17 +1139,12 @@ static int init_atomisp_wdts(struct atomisp_device *isp)
struct atomisp_sub_device *asd = &isp->asd[i];
asd = &isp->asd[i];
#ifndef ISP2401
- setup_timer(&asd->wdt, atomisp_wdt, (unsigned long)isp);
+ timer_setup(&asd->wdt, atomisp_wdt, 0);
#else
- setup_timer(&asd->video_out_capture.wdt,
- atomisp_wdt, (unsigned long)&asd->video_out_capture);
- setup_timer(&asd->video_out_preview.wdt,
- atomisp_wdt, (unsigned long)&asd->video_out_preview);
- setup_timer(&asd->video_out_vf.wdt,
- atomisp_wdt, (unsigned long)&asd->video_out_vf);
- setup_timer(&asd->video_out_video_capture.wdt,
- atomisp_wdt,
- (unsigned long)&asd->video_out_video_capture);
+ timer_setup(&asd->video_out_capture.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_preview.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_vf.wdt, atomisp_wdt, 0);
+ timer_setup(&asd->video_out_video_capture.wdt, atomisp_wdt, 0);
#endif
}
return 0;
@@ -1323,7 +1302,7 @@ static int atomisp_pci_probe(struct pci_dev *dev,
isp->dfs = &dfs_config_cht;
isp->pdev->d3cold_delay = 0;
- val = intel_mid_msgbus_read32(CCK_PORT, CCK_FUSE_REG_0);
+ iosf_mbi_read(CCK_PORT, MBI_REG_READ, CCK_FUSE_REG_0, &val);
switch (val & CCK_FUSE_HPLL_FREQ_MASK) {
case 0x00:
isp->hpll_freq = HPLL_FREQ_800MHZ;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
index 191b2e57a810..944a6cf40a2f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
index 766218ed3649..914aa7f98700 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf.h
@@ -18,7 +18,6 @@
#include <sp.h>
#include <type_support.h>
#include <math_support.h>
-#include <storage_class.h>
#include <assert_support.h>
#include <platform_support.h>
#include "ia_css_circbuf_comm.h"
@@ -45,7 +44,7 @@ struct ia_css_circbuf_s {
* @param elems An array of elements.
* @param desc The descriptor set to the size using ia_css_circbuf_desc_init().
*/
-STORAGE_CLASS_EXTERN void ia_css_circbuf_create(
+extern void ia_css_circbuf_create(
ia_css_circbuf_t *cb,
ia_css_circbuf_elem_t *elems,
ia_css_circbuf_desc_t *desc);
@@ -55,7 +54,7 @@ STORAGE_CLASS_EXTERN void ia_css_circbuf_create(
*
* @param cb The pointer to the circular buffer.
*/
-STORAGE_CLASS_EXTERN void ia_css_circbuf_destroy(
+extern void ia_css_circbuf_destroy(
ia_css_circbuf_t *cb);
/**
@@ -68,7 +67,7 @@ STORAGE_CLASS_EXTERN void ia_css_circbuf_destroy(
*
* @return the pop-out value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_pop(
+extern uint32_t ia_css_circbuf_pop(
ia_css_circbuf_t *cb);
/**
@@ -82,7 +81,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_pop(
*
* @return the extracted value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_extract(
+extern uint32_t ia_css_circbuf_extract(
ia_css_circbuf_t *cb,
int offset);
@@ -97,7 +96,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_extract(
* @param elem The pointer to the element.
* @param val The value to be set.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_elem_set_val(
+static inline void ia_css_circbuf_elem_set_val(
ia_css_circbuf_elem_t *elem,
uint32_t val)
{
@@ -111,7 +110,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_elem_set_val(
*
* @param elem The pointer to the element.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_elem_init(
+static inline void ia_css_circbuf_elem_init(
ia_css_circbuf_elem_t *elem)
{
OP___assert(elem != NULL);
@@ -124,7 +123,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_elem_init(
* @param src The element as the copy source.
* @param dest The element as the copy destination.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_elem_cpy(
+static inline void ia_css_circbuf_elem_cpy(
ia_css_circbuf_elem_t *src,
ia_css_circbuf_elem_t *dest)
{
@@ -143,7 +142,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_elem_cpy(
*
* @return the position at offset.
*/
-STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_get_pos_at_offset(
+static inline uint8_t ia_css_circbuf_get_pos_at_offset(
ia_css_circbuf_t *cb,
uint32_t base,
int offset)
@@ -176,7 +175,7 @@ STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_get_pos_at_offset(
*
* @return the offset.
*/
-STORAGE_CLASS_INLINE int ia_css_circbuf_get_offset(
+static inline int ia_css_circbuf_get_offset(
ia_css_circbuf_t *cb,
uint32_t src_pos,
uint32_t dest_pos)
@@ -201,7 +200,7 @@ STORAGE_CLASS_INLINE int ia_css_circbuf_get_offset(
*
* TODO: Test this API.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_size(
+static inline uint32_t ia_css_circbuf_get_size(
ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
@@ -217,7 +216,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_size(
*
* @return the number of available elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_num_elems(
+static inline uint32_t ia_css_circbuf_get_num_elems(
ia_css_circbuf_t *cb)
{
int num;
@@ -239,7 +238,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_num_elems(
* - true when it is empty.
* - false when it is not empty.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_is_empty(
+static inline bool ia_css_circbuf_is_empty(
ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
@@ -257,7 +256,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_is_empty(
* - true when it is full.
* - false when it is not full.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
+static inline bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
OP___assert(cb->desc != NULL);
@@ -274,7 +273,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_is_full(ia_css_circbuf_t *cb)
* @param cb The pointer to the circular buffer.
* @param elem The new element.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_write(
+static inline void ia_css_circbuf_write(
ia_css_circbuf_t *cb,
ia_css_circbuf_elem_t elem)
{
@@ -298,7 +297,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_write(
* @param cb The pointer to the circular buffer.
* @param val The value to be pushed in.
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_push(
+static inline void ia_css_circbuf_push(
ia_css_circbuf_t *cb,
uint32_t val)
{
@@ -321,7 +320,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_push(
*
* @return: The number of free elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_free_elems(
+static inline uint32_t ia_css_circbuf_get_free_elems(
ia_css_circbuf_t *cb)
{
OP___assert(cb != NULL);
@@ -338,7 +337,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_get_free_elems(
*
* @return the elements value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek(
+extern uint32_t ia_css_circbuf_peek(
ia_css_circbuf_t *cb,
int offset);
@@ -350,7 +349,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek(
*
* @return the elements value.
*/
-STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek_from_start(
+extern uint32_t ia_css_circbuf_peek_from_start(
ia_css_circbuf_t *cb,
int offset);
@@ -369,7 +368,7 @@ STORAGE_CLASS_EXTERN uint32_t ia_css_circbuf_peek_from_start(
* @return true on succesfully increasing the size
* false on failure
*/
-STORAGE_CLASS_EXTERN bool ia_css_circbuf_increase_size(
+extern bool ia_css_circbuf_increase_size(
ia_css_circbuf_t *cb,
unsigned int sz_delta,
ia_css_circbuf_elem_t *elems);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
index a8447d409c31..8dd7cd6cd3d8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/base/circbuf/interface/ia_css_circbuf_desc.h
@@ -17,7 +17,6 @@
#include <type_support.h>
#include <math_support.h>
-#include <storage_class.h>
#include <platform_support.h>
#include <sp.h>
#include "ia_css_circbuf_comm.h"
@@ -35,7 +34,7 @@
* - true when it is empty.
* - false when it is not empty.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_empty(
+static inline bool ia_css_circbuf_desc_is_empty(
ia_css_circbuf_desc_t *cb_desc)
{
OP___assert(cb_desc != NULL);
@@ -52,7 +51,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_empty(
* - true when it is full.
* - false when it is not full.
*/
-STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_full(
+static inline bool ia_css_circbuf_desc_is_full(
ia_css_circbuf_desc_t *cb_desc)
{
OP___assert(cb_desc != NULL);
@@ -65,7 +64,7 @@ STORAGE_CLASS_INLINE bool ia_css_circbuf_desc_is_full(
* @param cb_desc The pointer circular buffer descriptor
* @param size The size of the circular buffer
*/
-STORAGE_CLASS_INLINE void ia_css_circbuf_desc_init(
+static inline void ia_css_circbuf_desc_init(
ia_css_circbuf_desc_t *cb_desc,
int8_t size)
{
@@ -82,7 +81,7 @@ STORAGE_CLASS_INLINE void ia_css_circbuf_desc_init(
*
* @return the position in the circular buffer descriptor.
*/
-STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_desc_get_pos_at_offset(
+static inline uint8_t ia_css_circbuf_desc_get_pos_at_offset(
ia_css_circbuf_desc_t *cb_desc,
uint32_t base,
int offset)
@@ -114,7 +113,7 @@ STORAGE_CLASS_INLINE uint8_t ia_css_circbuf_desc_get_pos_at_offset(
*
* @return the offset.
*/
-STORAGE_CLASS_INLINE int ia_css_circbuf_desc_get_offset(
+static inline int ia_css_circbuf_desc_get_offset(
ia_css_circbuf_desc_t *cb_desc,
uint32_t src_pos,
uint32_t dest_pos)
@@ -135,7 +134,7 @@ STORAGE_CLASS_INLINE int ia_css_circbuf_desc_get_offset(
*
* @return The number of available elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_num_elems(
+static inline uint32_t ia_css_circbuf_desc_get_num_elems(
ia_css_circbuf_desc_t *cb_desc)
{
int num;
@@ -155,7 +154,7 @@ STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_num_elems(
*
* @return: The number of free elements.
*/
-STORAGE_CLASS_INLINE uint32_t ia_css_circbuf_desc_get_free_elems(
+static inline uint32_t ia_css_circbuf_desc_get_free_elems(
ia_css_circbuf_desc_t *cb_desc)
{
uint32_t num;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
index 17d3b7de93ba..98a2a3e9b3e6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/pipe/src/pipe_binarydesc.c
@@ -22,6 +22,7 @@
#include <assert_support.h>
/* HRT_GDC_N */
#include "gdc_device.h"
+#include <linux/kernel.h>
/* This module provides a binary descriptions to used to find a binary. Since,
* every stage is associated with a binary, it implicity helps stage
@@ -147,11 +148,9 @@ enum ia_css_err sh_css_bds_factor_get_numerator_denominator(
unsigned int *bds_factor_denominator)
{
unsigned int i;
- unsigned int bds_list_size = sizeof(bds_factors_list) /
- sizeof(struct sh_css_bds_factor);
/* Loop over all bds factors until a match is found */
- for (i = 0; i < bds_list_size; i++) {
+ for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
if (bds_factors_list[i].bds_factor == bds_factor) {
*bds_factor_numerator = bds_factors_list[i].numerator;
*bds_factor_denominator = bds_factors_list[i].denominator;
@@ -170,8 +169,6 @@ enum ia_css_err binarydesc_calculate_bds_factor(
unsigned int *bds_factor)
{
unsigned int i;
- unsigned int bds_list_size = sizeof(bds_factors_list) /
- sizeof(struct sh_css_bds_factor);
unsigned int in_w = input_res.width,
in_h = input_res.height,
out_w = output_res.width, out_h = output_res.height;
@@ -186,7 +183,7 @@ enum ia_css_err binarydesc_calculate_bds_factor(
assert(out_w != 0 && out_h != 0);
/* Loop over all bds factors until a match is found */
- for (i = 0; i < bds_list_size; i++) {
+ for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
unsigned num = bds_factors_list[i].numerator;
unsigned den = bds_factors_list[i].denominator;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
index 08f486e20a65..54193789a809 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/camera/util/src/util.c
@@ -111,7 +111,7 @@ unsigned int ia_css_util_input_format_bpp(
break;
}
-return rval;
+ return rval;
}
enum ia_css_err ia_css_util_check_vf_info(
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
index 16bfe1d80bc9..7766f78cd123 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2400_system/hrt/input_formatter_subsystem_defs.h
@@ -12,7 +12,7 @@
* more details.
*/
-#ifndef _if_subsystem_defs_h
+#ifndef _if_subsystem_defs_h__
#define _if_subsystem_defs_h__
#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
index 5819bcff5e55..6720ab55d6f5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/host/csi_rx_private.h
@@ -34,7 +34,7 @@
* @brief Get the csi rx fe state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_state(
+static inline void csi_rx_fe_ctrl_get_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state)
{
@@ -73,7 +73,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_state(
* @brief Get the state of the csi rx fe dlane process.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_dlane_state(
+static inline void csi_rx_fe_ctrl_get_dlane_state(
const csi_rx_frontend_ID_t ID,
const uint32_t lane,
csi_rx_fe_ctrl_lane_t *dlane_state)
@@ -89,7 +89,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_get_dlane_state(
* @brief dump the csi rx fe state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_dump_state(
+static inline void csi_rx_fe_ctrl_dump_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state)
{
@@ -118,7 +118,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_dump_state(
* @brief Get the csi rx be state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_get_state(
+static inline void csi_rx_be_ctrl_get_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state)
{
@@ -181,7 +181,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_get_state(
* @brief Dump the csi rx be state.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_dump_state(
+static inline void csi_rx_be_ctrl_dump_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state)
{
@@ -225,7 +225,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_dump_state(
* @brief Load the register value.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_fe_ctrl_reg_load(
+static inline hrt_data csi_rx_fe_ctrl_reg_load(
const csi_rx_frontend_ID_t ID,
const hrt_address reg)
{
@@ -239,7 +239,7 @@ STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_fe_ctrl_reg_load(
* @brief Store a value to the register.
* Refer to "ibuf_ctrl_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_reg_store(
+static inline void csi_rx_fe_ctrl_reg_store(
const csi_rx_frontend_ID_t ID,
const hrt_address reg,
const hrt_data value)
@@ -253,7 +253,7 @@ STORAGE_CLASS_CSI_RX_C void csi_rx_fe_ctrl_reg_store(
* @brief Load the register value.
* Refer to "csi_rx_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_be_ctrl_reg_load(
+static inline hrt_data csi_rx_be_ctrl_reg_load(
const csi_rx_backend_ID_t ID,
const hrt_address reg)
{
@@ -267,7 +267,7 @@ STORAGE_CLASS_CSI_RX_C hrt_data csi_rx_be_ctrl_reg_load(
* @brief Store a value to the register.
* Refer to "ibuf_ctrl_public.h" for details.
*/
-STORAGE_CLASS_CSI_RX_C void csi_rx_be_ctrl_reg_store(
+static inline void csi_rx_be_ctrl_reg_store(
const csi_rx_backend_ID_t ID,
const hrt_address reg,
const hrt_data value)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
index 16bfe1d80bc9..7766f78cd123 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_csi2p_system/hrt/input_formatter_subsystem_defs.h
@@ -12,7 +12,7 @@
* more details.
*/
-#ifndef _if_subsystem_defs_h
+#ifndef _if_subsystem_defs_h__
#define _if_subsystem_defs_h__
#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
index 16bfe1d80bc9..7766f78cd123 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/css_2401_system/hrt/input_formatter_subsystem_defs.h
@@ -12,7 +12,7 @@
* more details.
*/
-#ifndef _if_subsystem_defs_h
+#ifndef _if_subsystem_defs_h__
#define _if_subsystem_defs_h__
#define HIVE_IFMT_GP_REGS_INPUT_SWITCH_LUT_REG_0 0
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
index 87a25d4289ec..770db7dff5d3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/dma.c
@@ -12,7 +12,7 @@
* more details.
*/
-#include <stddef.h> /* NULL */
+#include <linux/kernel.h>
#include "dma.h"
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
index 9d3a29696094..bcfb734c2ed3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/event_fifo_private.h
@@ -28,7 +28,7 @@ STORAGE_CLASS_EVENT_C void event_wait_for(const event_ID_t ID)
assert(ID < N_EVENT_ID);
assert(event_source_addr[ID] != ((hrt_address)-1));
(void)ia_css_device_load_uint32(event_source_addr[ID]);
-return;
+ return;
}
STORAGE_CLASS_EVENT_C void cnd_event_wait_for(const event_ID_t ID,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
index 1087944d637f..1bf292401adc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor.c
@@ -38,12 +38,12 @@ STORAGE_CLASS_FIFO_MONITOR_DATA unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH] = {
#include "fifo_monitor_private.h"
#endif /* __INLINE_FIFO_MONITOR__ */
-STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
+static inline bool fifo_monitor_status_valid (
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id);
-STORAGE_CLASS_INLINE bool fifo_monitor_status_accept(
+static inline bool fifo_monitor_status_accept(
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id);
@@ -546,7 +546,7 @@ void fifo_monitor_get_state(
return;
}
-STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
+static inline bool fifo_monitor_status_valid (
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id)
@@ -556,7 +556,7 @@ STORAGE_CLASS_INLINE bool fifo_monitor_status_valid (
return (data >> (((port_id * 2) + _hive_str_mon_valid_offset))) & 0x1;
}
-STORAGE_CLASS_INLINE bool fifo_monitor_status_accept(
+static inline bool fifo_monitor_status_accept(
const fifo_monitor_ID_t ID,
const unsigned int reg,
const unsigned int port_id)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
index 618b2f7e9c75..d58cd7d1828d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/fifo_monitor_private.h
@@ -33,26 +33,26 @@ STORAGE_CLASS_FIFO_MONITOR_C void fifo_switch_set(
const fifo_switch_t switch_id,
const hrt_data sel)
{
-assert(ID == FIFO_MONITOR0_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
-assert(switch_id < N_FIFO_SWITCH);
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ assert(switch_id < N_FIFO_SWITCH);
(void)ID;
gp_device_reg_store(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id], sel);
-return;
+ return;
}
STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_switch_get(
const fifo_monitor_ID_t ID,
const fifo_switch_t switch_id)
{
-assert(ID == FIFO_MONITOR0_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
-assert(switch_id < N_FIFO_SWITCH);
+ assert(ID == FIFO_MONITOR0_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ assert(switch_id < N_FIFO_SWITCH);
(void)ID;
-return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
+ return gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]);
}
@@ -61,19 +61,19 @@ STORAGE_CLASS_FIFO_MONITOR_C void fifo_monitor_reg_store(
const unsigned int reg,
const hrt_data value)
{
-assert(ID < N_FIFO_MONITOR_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_FIFO_MONITOR_C hrt_data fifo_monitor_reg_load(
const fifo_monitor_ID_t ID,
const unsigned int reg)
{
-assert(ID < N_FIFO_MONITOR_ID);
-assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_FIFO_MONITOR_ID);
+ assert(FIFO_MONITOR_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(FIFO_MONITOR_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __FIFO_MONITOR_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
index 69fa616889b1..1966b147f8ab 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gdc.c
@@ -22,12 +22,12 @@
/*
* Local function declarations
*/
-STORAGE_CLASS_INLINE void gdc_reg_store(
+static inline void gdc_reg_store(
const gdc_ID_t ID,
const unsigned int reg,
const hrt_data value);
-STORAGE_CLASS_INLINE hrt_data gdc_reg_load(
+static inline hrt_data gdc_reg_load(
const gdc_ID_t ID,
const unsigned int reg);
@@ -62,7 +62,7 @@ void gdc_lut_store(
gdc_reg_store(ID, lut_offset++, word_0);
gdc_reg_store(ID, lut_offset++, word_1);
}
-return;
+ return;
}
/*
@@ -103,25 +103,25 @@ int gdc_get_unity(
{
assert(ID < N_GDC_ID);
(void)ID;
-return (int)(1UL << HRT_GDC_FRAC_BITS);
+ return (int)(1UL << HRT_GDC_FRAC_BITS);
}
/*
* Local function implementations
*/
-STORAGE_CLASS_INLINE void gdc_reg_store(
+static inline void gdc_reg_store(
const gdc_ID_t ID,
const unsigned int reg,
const hrt_data value)
{
ia_css_device_store_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
-STORAGE_CLASS_INLINE hrt_data gdc_reg_load(
+static inline hrt_data gdc_reg_load(
const gdc_ID_t ID,
const unsigned int reg)
{
-return ia_css_device_load_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data));
+ return ia_css_device_load_uint32(GDC_BASE[ID] + reg*sizeof(hrt_data));
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
index 9a34ac052adf..da88aa3af664 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device.c
@@ -104,5 +104,5 @@ void gp_device_get_state(
_REG_GP_SYNCGEN_FRAME_CNT_ADDR);
state->soft_reset = gp_device_reg_load(ID,
_REG_GP_SOFT_RESET_ADDR);
-return;
+ return;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
index bce1fdf79114..7c0362c29411 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gp_device_private.h
@@ -26,21 +26,21 @@ STORAGE_CLASS_GP_DEVICE_C void gp_device_reg_store(
const unsigned int reg_addr,
const hrt_data value)
{
-assert(ID < N_GP_DEVICE_ID);
-assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
ia_css_device_store_uint32(GP_DEVICE_BASE[ID] + reg_addr, value);
-return;
+ return;
}
STORAGE_CLASS_GP_DEVICE_C hrt_data gp_device_reg_load(
const gp_device_ID_t ID,
const hrt_address reg_addr)
{
-assert(ID < N_GP_DEVICE_ID);
-assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
-return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr);
+ assert(ID < N_GP_DEVICE_ID);
+ assert(GP_DEVICE_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(GP_DEVICE_BASE[ID] + reg_addr);
}
#endif /* __GP_DEVICE_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
index 6ace2184b522..b6ebf34eaa9d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/gpio_private.h
@@ -29,7 +29,7 @@ STORAGE_CLASS_GPIO_C void gpio_reg_store(
OP___assert(ID < N_GPIO_ID);
OP___assert(GPIO_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_GPIO_C hrt_data gpio_reg_load(
@@ -38,7 +38,7 @@ STORAGE_CLASS_GPIO_C hrt_data gpio_reg_load(
{
OP___assert(ID < N_GPIO_ID);
OP___assert(GPIO_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data));
+ return ia_css_device_load_uint32(GPIO_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __GPIO_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
index 2b636e0e6482..32a780380e11 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/hmem_private.h
@@ -22,9 +22,9 @@
STORAGE_CLASS_HMEM_C size_t sizeof_hmem(
const hmem_ID_t ID)
{
-assert(ID < N_HMEM_ID);
+ assert(ID < N_HMEM_ID);
(void)ID;
-return HMEM_SIZE*sizeof(hmem_data_t);
+ return HMEM_SIZE*sizeof(hmem_data_t);
}
#endif /* __HMEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
index d34933e44aa9..2f42a9c2771c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_formatter_private.h
@@ -26,21 +26,21 @@ STORAGE_CLASS_INPUT_FORMATTER_C void input_formatter_reg_store(
const hrt_address reg_addr,
const hrt_data value)
{
-assert(ID < N_INPUT_FORMATTER_ID);
-assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
ia_css_device_store_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr, value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_FORMATTER_C hrt_data input_formatter_reg_load(
const input_formatter_ID_t ID,
const unsigned int reg_addr)
{
-assert(ID < N_INPUT_FORMATTER_ID);
-assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
-assert((reg_addr % sizeof(hrt_data)) == 0);
-return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr);
+ assert(ID < N_INPUT_FORMATTER_ID);
+ assert(INPUT_FORMATTER_BASE[ID] != (hrt_address)-1);
+ assert((reg_addr % sizeof(hrt_data)) == 0);
+ return ia_css_device_load_uint32(INPUT_FORMATTER_BASE[ID] + reg_addr);
}
#endif /* __INPUT_FORMATTER_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
index f35e18987b67..bd6821e436b2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system.c
@@ -81,27 +81,27 @@ static input_system_error_t input_system_multiplexer_cfg(
-STORAGE_CLASS_INLINE void capture_unit_get_state(
+static inline void capture_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
capture_unit_state_t *state);
-STORAGE_CLASS_INLINE void acquisition_unit_get_state(
+static inline void acquisition_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
acquisition_unit_state_t *state);
-STORAGE_CLASS_INLINE void ctrl_unit_get_state(
+static inline void ctrl_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
ctrl_unit_state_t *state);
-STORAGE_CLASS_INLINE void mipi_port_get_state(
+static inline void mipi_port_get_state(
const rx_ID_t ID,
const mipi_port_ID_t port_ID,
mipi_port_state_t *state);
-STORAGE_CLASS_INLINE void rx_channel_get_state(
+static inline void rx_channel_get_state(
const rx_ID_t ID,
const unsigned int ch_id,
rx_channel_state_t *state);
@@ -173,7 +173,7 @@ void input_system_get_state(
&(state->ctrl_unit_state[sub_id - CTRL_UNIT0_ID]));
}
-return;
+ return;
}
void receiver_get_state(
@@ -245,7 +245,7 @@ void receiver_get_state(
state->be_irq_clear = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX);
-return;
+ return;
}
bool is_mipi_format_yuv420(
@@ -258,7 +258,7 @@ bool is_mipi_format_yuv420(
(mipi_format == MIPI_FORMAT_YUV420_10_SHIFT));
/* MIPI_FORMAT_YUV420_8_LEGACY is not YUV420 */
-return is_yuv420;
+ return is_yuv420;
}
void receiver_set_compression(
@@ -300,7 +300,7 @@ void receiver_set_compression(
reg = ((field_id < 6)?(val << (field_id * 5)):(val << ((field_id - 6) * 5)));
receiver_reg_store(ID, addr, reg);
-return;
+ return;
}
void receiver_port_enable(
@@ -319,7 +319,7 @@ void receiver_port_enable(
receiver_port_reg_store(ID, port_ID,
_HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, reg);
-return;
+ return;
}
bool is_receiver_port_enabled(
@@ -328,7 +328,7 @@ bool is_receiver_port_enabled(
{
hrt_data reg = receiver_port_reg_load(ID, port_ID,
_HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
-return ((reg & 0x01) != 0);
+ return ((reg & 0x01) != 0);
}
void receiver_irq_enable(
@@ -338,14 +338,14 @@ void receiver_irq_enable(
{
receiver_port_reg_store(ID,
port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, irq_info);
-return;
+ return;
}
rx_irq_info_t receiver_get_irq_info(
const rx_ID_t ID,
const mipi_port_ID_t port_ID)
{
-return receiver_port_reg_load(ID,
+ return receiver_port_reg_load(ID,
port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
}
@@ -356,10 +356,10 @@ void receiver_irq_clear(
{
receiver_port_reg_store(ID,
port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void capture_unit_get_state(
+static inline void capture_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
capture_unit_state_t *state)
@@ -418,10 +418,10 @@ STORAGE_CLASS_INLINE void capture_unit_get_state(
sub_id,
CAPT_FSM_STATE_INFO_REG_ID);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void acquisition_unit_get_state(
+static inline void acquisition_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
acquisition_unit_state_t *state)
@@ -468,10 +468,10 @@ STORAGE_CLASS_INLINE void acquisition_unit_get_state(
sub_id,
ACQ_INT_CNTR_INFO_REG_ID);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void ctrl_unit_get_state(
+static inline void ctrl_unit_get_state(
const input_system_ID_t ID,
const sub_system_ID_t sub_id,
ctrl_unit_state_t *state)
@@ -551,10 +551,10 @@ STORAGE_CLASS_INLINE void ctrl_unit_get_state(
sub_id,
ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID);
-return;
+ return;
}
-STORAGE_CLASS_INLINE void mipi_port_get_state(
+static inline void mipi_port_get_state(
const rx_ID_t ID,
const mipi_port_ID_t port_ID,
mipi_port_state_t *state)
@@ -587,10 +587,10 @@ STORAGE_CLASS_INLINE void mipi_port_get_state(
state->lane_rx_count[i] = (uint8_t)((state->rx_count)>>(i*8));
}
-return;
+ return;
}
-STORAGE_CLASS_INLINE void rx_channel_get_state(
+static inline void rx_channel_get_state(
const rx_ID_t ID,
const unsigned int ch_id,
rx_channel_state_t *state)
@@ -602,30 +602,30 @@ STORAGE_CLASS_INLINE void rx_channel_get_state(
assert(state != NULL);
switch (ch_id) {
- case 0:
- state->comp_scheme0 = receiver_reg_load(ID,
+ case 0:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
- break;
- case 1:
- state->comp_scheme0 = receiver_reg_load(ID,
+ break;
+ case 1:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
- break;
- case 2:
- state->comp_scheme0 = receiver_reg_load(ID,
+ break;
+ case 2:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
- break;
- case 3:
- state->comp_scheme0 = receiver_reg_load(ID,
+ break;
+ case 3:
+ state->comp_scheme0 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX);
- state->comp_scheme1 = receiver_reg_load(ID,
+ state->comp_scheme1 = receiver_reg_load(ID,
_HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
- break;
+ break;
}
/* See Table 7.1.17,..., 7.1.24 */
@@ -640,7 +640,7 @@ STORAGE_CLASS_INLINE void rx_channel_get_state(
state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
}
-return;
+ return;
}
// MW: "2400" in the name is not good, but this is to avoid a naming conflict
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
index ed1b947b00f9..118185eb86e9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/input_system_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_INPUT_SYSTEM_C void input_system_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_reg_load(
const input_system_ID_t ID,
const hrt_address reg)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store(
@@ -46,19 +46,19 @@ STORAGE_CLASS_INPUT_SYSTEM_C void receiver_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_RX_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(RX_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_reg_load(
const rx_ID_t ID,
const hrt_address reg)
{
-assert(ID < N_RX_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(RX_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_RX_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store(
@@ -67,12 +67,12 @@ STORAGE_CLASS_INPUT_SYSTEM_C void receiver_port_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_RX_ID);
-assert(port_ID < N_MIPI_PORT_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
-assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
ia_css_device_store_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load(
@@ -80,11 +80,11 @@ STORAGE_CLASS_INPUT_SYSTEM_C hrt_data receiver_port_reg_load(
const mipi_port_ID_t port_ID,
const hrt_address reg)
{
-assert(ID < N_RX_ID);
-assert(port_ID < N_MIPI_PORT_ID);
-assert(RX_BASE[ID] != (hrt_address)-1);
-assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data));
+ assert(ID < N_RX_ID);
+ assert(port_ID < N_MIPI_PORT_ID);
+ assert(RX_BASE[ID] != (hrt_address)-1);
+ assert(MIPI_PORT_OFFSET[port_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(RX_BASE[ID] + MIPI_PORT_OFFSET[port_ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store(
@@ -93,12 +93,12 @@ STORAGE_CLASS_INPUT_SYSTEM_C void input_system_sub_system_reg_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(sub_ID < N_SUB_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
-assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
ia_css_device_store_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load(
@@ -106,11 +106,11 @@ STORAGE_CLASS_INPUT_SYSTEM_C hrt_data input_system_sub_system_reg_load(
const sub_system_ID_t sub_ID,
const hrt_address reg)
{
-assert(ID < N_INPUT_SYSTEM_ID);
-assert(sub_ID < N_SUB_SYSTEM_ID);
-assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
-assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data));
+ assert(ID < N_INPUT_SYSTEM_ID);
+ assert(sub_ID < N_SUB_SYSTEM_ID);
+ assert(INPUT_SYSTEM_BASE[ID] != (hrt_address)-1);
+ assert(SUB_SYSTEM_OFFSET[sub_ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(INPUT_SYSTEM_BASE[ID] + SUB_SYSTEM_OFFSET[sub_ID] + reg*sizeof(hrt_data));
}
#endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
index 6b58bc13dc1b..51daf76c2aea 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq.c
@@ -22,13 +22,13 @@
#include "platform_support.h" /* hrt_sleep() */
-STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
+static inline void irq_wait_for_write_complete(
const irq_ID_t ID);
-STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
+static inline bool any_irq_channel_enabled(
const irq_ID_t ID);
-STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
+static inline irq_ID_t virq_get_irq_id(
const virq_id_t irq_ID,
unsigned int *channel_ID);
@@ -69,7 +69,7 @@ void irq_clear_all(
irq_reg_store(ID,
_HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, mask);
-return;
+ return;
}
/*
@@ -114,7 +114,7 @@ void irq_enable_channel(
irq_wait_for_write_complete(ID);
-return;
+ return;
}
void irq_enable_pulse(
@@ -129,7 +129,7 @@ void irq_enable_pulse(
/* output is given as edge, not pulse */
irq_reg_store(ID,
_HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX, edge_out);
-return;
+ return;
}
void irq_disable_channel(
@@ -160,7 +160,7 @@ void irq_disable_channel(
irq_wait_for_write_complete(ID);
-return;
+ return;
}
enum hrt_isp_css_irq_status irq_get_channel_id(
@@ -195,7 +195,7 @@ enum hrt_isp_css_irq_status irq_get_channel_id(
if (irq_id != NULL)
*irq_id = (unsigned int)idx;
-return status;
+ return status;
}
static const hrt_address IRQ_REQUEST_ADDR[N_IRQ_SW_CHANNEL_ID] = {
@@ -220,7 +220,7 @@ void irq_raise(
(unsigned int)addr, 1);
gp_device_reg_store(GP_DEVICE0_ID,
(unsigned int)addr, 0);
-return;
+ return;
}
void irq_controller_get_state(
@@ -240,7 +240,7 @@ void irq_controller_get_state(
_HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
state->irq_level_not_pulse = irq_reg_load(ID,
_HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX);
-return;
+ return;
}
bool any_virq_signal(void)
@@ -248,7 +248,7 @@ bool any_virq_signal(void)
unsigned int irq_status = irq_reg_load(IRQ0_ID,
_HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
-return (irq_status != 0);
+ return (irq_status != 0);
}
void cnd_virq_enable_channel(
@@ -279,7 +279,7 @@ void cnd_virq_enable_channel(
irq_disable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]);
}
}
-return;
+ return;
}
@@ -290,7 +290,7 @@ void virq_clear_all(void)
for (irq_id = (irq_ID_t)0; irq_id < N_IRQ_ID; irq_id++) {
irq_clear_all(irq_id);
}
-return;
+ return;
}
enum hrt_isp_css_irq_status virq_get_channel_signals(
@@ -320,7 +320,7 @@ enum hrt_isp_css_irq_status virq_get_channel_signals(
}
}
-return irq_status;
+ return irq_status;
}
void virq_clear_info(
@@ -333,7 +333,7 @@ void virq_clear_info(
for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) {
irq_info->irq_status_reg[ID] = 0;
}
-return;
+ return;
}
enum hrt_isp_css_irq_status virq_get_channel_id(
@@ -403,10 +403,10 @@ enum hrt_isp_css_irq_status virq_get_channel_id(
if (irq_id != NULL)
*irq_id = (virq_id_t)idx;
-return status;
+ return status;
}
-STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
+static inline void irq_wait_for_write_complete(
const irq_ID_t ID)
{
assert(ID < N_IRQ_ID);
@@ -415,7 +415,7 @@ STORAGE_CLASS_INLINE void irq_wait_for_write_complete(
_HRT_IRQ_CONTROLLER_ENABLE_REG_IDX*sizeof(hrt_data));
}
-STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
+static inline bool any_irq_channel_enabled(
const irq_ID_t ID)
{
hrt_data en_reg;
@@ -425,10 +425,10 @@ STORAGE_CLASS_INLINE bool any_irq_channel_enabled(
en_reg = irq_reg_load(ID,
_HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
-return (en_reg != 0);
+ return (en_reg != 0);
}
-STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
+static inline irq_ID_t virq_get_irq_id(
const virq_id_t irq_ID,
unsigned int *channel_ID)
{
@@ -444,5 +444,5 @@ STORAGE_CLASS_INLINE irq_ID_t virq_get_irq_id(
*channel_ID = (unsigned int)irq_ID - IRQ_N_ID_OFFSET[ID];
-return ID;
+ return ID;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
index eb325e870e88..23a13ac696c2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/irq_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_IRQ_C void irq_reg_store(
const unsigned int reg,
const hrt_data value)
{
-assert(ID < N_IRQ_ID);
-assert(IRQ_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_IRQ_C hrt_data irq_reg_load(
const irq_ID_t ID,
const unsigned int reg)
{
-assert(ID < N_IRQ_ID);
-assert(IRQ_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_IRQ_ID);
+ assert(IRQ_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(IRQ_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __IRQ_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
index 47c21e486c25..531c932a48f5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/isp.c
@@ -34,7 +34,7 @@ void cnd_isp_irq_enable(
isp_ctrl_clearbit(ID, ISP_IRQ_READY_REG,
ISP_IRQ_READY_BIT);
}
-return;
+ return;
}
void isp_get_state(
@@ -94,7 +94,7 @@ void isp_get_state(
!isp_ctrl_getbit(ID, ISP_ICACHE_MT_SINK_REG,
ISP_ICACHE_MT_SINK_BIT);
*/
-return;
+ return;
}
/* ISP functions to control the ISP state from the host, even in crun. */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
index b75d0f85d524..a28b67eb66ea 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu.c
@@ -24,20 +24,20 @@ void mmu_set_page_table_base_index(
const hrt_data base_index)
{
mmu_reg_store(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX, base_index);
-return;
+ return;
}
hrt_data mmu_get_page_table_base_index(
const mmu_ID_t ID)
{
-return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX);
+ return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX);
}
void mmu_invalidate_cache(
const mmu_ID_t ID)
{
mmu_reg_store(ID, _HRT_MMU_INVALIDATE_TLB_REG_IDX, 1);
-return;
+ return;
}
void mmu_invalidate_cache_all(void)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
index 392b6cc24e8f..7377666f6eb7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/mmu_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_MMU_H void mmu_reg_store(
const unsigned int reg,
const hrt_data value)
{
-assert(ID < N_MMU_ID);
-assert(MMU_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_MMU_H hrt_data mmu_reg_load(
const mmu_ID_t ID,
const unsigned int reg)
{
-assert(ID < N_MMU_ID);
-assert(MMU_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_MMU_ID);
+ assert(MMU_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(MMU_BASE[ID] + reg*sizeof(hrt_data));
}
#endif /* __MMU_PRIVATE_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
index e6283bf67ad3..5ea81c0e82d1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_common/host/sp_private.h
@@ -26,19 +26,19 @@ STORAGE_CLASS_SP_C void sp_ctrl_store(
const hrt_address reg,
const hrt_data value)
{
-assert(ID < N_SP_ID);
-assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
ia_css_device_store_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data), value);
-return;
+ return;
}
STORAGE_CLASS_SP_C hrt_data sp_ctrl_load(
const sp_ID_t ID,
const hrt_address reg)
{
-assert(ID < N_SP_ID);
-assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
-return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data));
+ assert(ID < N_SP_ID);
+ assert(SP_CTRL_BASE[ID] != (hrt_address)-1);
+ return ia_css_device_load_uint32(SP_CTRL_BASE[ID] + reg*sizeof(hrt_data));
}
STORAGE_CLASS_SP_C bool sp_ctrl_getbit(
@@ -47,7 +47,7 @@ STORAGE_CLASS_SP_C bool sp_ctrl_getbit(
const unsigned int bit)
{
hrt_data val = sp_ctrl_load(ID, reg);
-return (val & (1UL << bit)) != 0;
+ return (val & (1UL << bit)) != 0;
}
STORAGE_CLASS_SP_C void sp_ctrl_setbit(
@@ -57,7 +57,7 @@ STORAGE_CLASS_SP_C void sp_ctrl_setbit(
{
hrt_data data = sp_ctrl_load(ID, reg);
sp_ctrl_store(ID, reg, (data | (1UL << bit)));
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_ctrl_clearbit(
@@ -67,7 +67,7 @@ STORAGE_CLASS_SP_C void sp_ctrl_clearbit(
{
hrt_data data = sp_ctrl_load(ID, reg);
sp_ctrl_store(ID, reg, (data & ~(1UL << bit)));
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store(
@@ -76,10 +76,10 @@ STORAGE_CLASS_SP_C void sp_dmem_store(
const void *data,
const size_t size)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
ia_css_device_store(SP_DMEM_BASE[ID] + addr, data, size);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_load(
@@ -88,10 +88,10 @@ STORAGE_CLASS_SP_C void sp_dmem_load(
void *data,
const size_t size)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
ia_css_device_load(SP_DMEM_BASE[ID] + addr, data, size);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store_uint8(
@@ -99,11 +99,11 @@ STORAGE_CLASS_SP_C void sp_dmem_store_uint8(
hrt_address addr,
const uint8_t data)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
ia_css_device_store_uint8(SP_DMEM_BASE[SP0_ID] + addr, data);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store_uint16(
@@ -111,11 +111,11 @@ STORAGE_CLASS_SP_C void sp_dmem_store_uint16(
hrt_address addr,
const uint16_t data)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
ia_css_device_store_uint16(SP_DMEM_BASE[SP0_ID] + addr, data);
-return;
+ return;
}
STORAGE_CLASS_SP_C void sp_dmem_store_uint32(
@@ -123,19 +123,19 @@ STORAGE_CLASS_SP_C void sp_dmem_store_uint32(
hrt_address addr,
const uint32_t data)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
ia_css_device_store_uint32(SP_DMEM_BASE[SP0_ID] + addr, data);
-return;
+ return;
}
STORAGE_CLASS_SP_C uint8_t sp_dmem_load_uint8(
const sp_ID_t ID,
const hrt_address addr)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
return ia_css_device_load_uint8(SP_DMEM_BASE[SP0_ID] + addr);
}
@@ -144,8 +144,8 @@ STORAGE_CLASS_SP_C uint16_t sp_dmem_load_uint16(
const sp_ID_t ID,
const hrt_address addr)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
return ia_css_device_load_uint16(SP_DMEM_BASE[SP0_ID] + addr);
}
@@ -154,8 +154,8 @@ STORAGE_CLASS_SP_C uint32_t sp_dmem_load_uint32(
const sp_ID_t ID,
const hrt_address addr)
{
-assert(ID < N_SP_ID);
-assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
+ assert(ID < N_SP_ID);
+ assert(SP_DMEM_BASE[ID] != (hrt_address)-1);
(void)ID;
return ia_css_device_load_uint32(SP_DMEM_BASE[SP0_ID] + addr);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
index 92fb15d04703..fd0d92e87c36 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/assert_support.h
@@ -15,7 +15,6 @@
#ifndef __ASSERT_SUPPORT_H_INCLUDED__
#define __ASSERT_SUPPORT_H_INCLUDED__
-#include "storage_class.h"
/**
* The following macro can help to test the size of a struct at compile
@@ -92,7 +91,7 @@
* The implemenation for the pipe generation tool is in see support.isp.h */
#define OP___assert(cnd) assert(cnd)
-STORAGE_CLASS_INLINE void compile_time_assert (unsigned cond)
+static inline void compile_time_assert (unsigned cond)
{
/* Call undefined function if cond is false */
extern void _compile_time_assert (void);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
index d71e08f27a42..6928965cf513 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/bamem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "bamem_local.h"
#ifndef __INLINE_BAMEM__
-#define STORAGE_CLASS_BAMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_BAMEM_H extern
#define STORAGE_CLASS_BAMEM_C
#include "bamem_public.h"
#else /* __INLINE_BAMEM__ */
-#define STORAGE_CLASS_BAMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_BAMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_BAMEM_H static inline
+#define STORAGE_CLASS_BAMEM_C static inline
#include "bamem_private.h"
#endif /* __INLINE_BAMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
index 0398f5802f05..917ee8cdb1d9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/csi_rx.h
@@ -30,18 +30,13 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "csi_rx_local.h"
#ifndef __INLINE_CSI_RX__
-#define STORAGE_CLASS_CSI_RX_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_CSI_RX_C
#include "csi_rx_public.h"
#else /* __INLINE_CSI_RX__ */
-#define STORAGE_CLASS_CSI_RX_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_CSI_RX_C STORAGE_CLASS_INLINE
#include "csi_rx_private.h"
#endif /* __INLINE_CSI_RX__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
index 7d8011735033..0aa22446e27e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/debug.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "debug_local.h"
#ifndef __INLINE_DEBUG__
-#define STORAGE_CLASS_DEBUG_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_DEBUG_H extern
#define STORAGE_CLASS_DEBUG_C
#include "debug_public.h"
#else /* __INLINE_DEBUG__ */
-#define STORAGE_CLASS_DEBUG_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_DEBUG_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_DEBUG_H static inline
+#define STORAGE_CLASS_DEBUG_C static inline
#include "debug_private.h"
#endif /* __INLINE_DEBUG__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
index b266191f21ef..d9dee691e3f8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/dma.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "dma_local.h"
#ifndef __INLINE_DMA__
-#define STORAGE_CLASS_DMA_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_DMA_H extern
#define STORAGE_CLASS_DMA_C
#include "dma_public.h"
#else /* __INLINE_DMA__ */
-#define STORAGE_CLASS_DMA_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_DMA_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_DMA_H static inline
+#define STORAGE_CLASS_DMA_C static inline
#include "dma_private.h"
#endif /* __INLINE_DMA__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
index 78827c554cc3..df579e902796 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/event_fifo.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "event_fifo_local.h"
#ifndef __INLINE_EVENT__
-#define STORAGE_CLASS_EVENT_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_EVENT_H extern
#define STORAGE_CLASS_EVENT_C
#include "event_fifo_public.h"
#else /* __INLINE_EVENT__ */
-#define STORAGE_CLASS_EVENT_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_EVENT_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_EVENT_H static inline
+#define STORAGE_CLASS_EVENT_C static inline
#include "event_fifo_private.h"
#endif /* __INLINE_EVENT__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
index 3bdd260bcaa5..f10c4fa2e32b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/fifo_monitor.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "fifo_monitor_local.h"
#ifndef __INLINE_FIFO_MONITOR__
-#define STORAGE_CLASS_FIFO_MONITOR_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_FIFO_MONITOR_H extern
#define STORAGE_CLASS_FIFO_MONITOR_C
#include "fifo_monitor_public.h"
#else /* __INLINE_FIFO_MONITOR__ */
-#define STORAGE_CLASS_FIFO_MONITOR_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_FIFO_MONITOR_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_FIFO_MONITOR_H static inline
+#define STORAGE_CLASS_FIFO_MONITOR_C static inline
#include "fifo_monitor_private.h"
#endif /* __INLINE_FIFO_MONITOR__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
index 016132ba0b7f..75c6854c8e7b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gdc_device.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "gdc_local.h"
#ifndef __INLINE_GDC__
-#define STORAGE_CLASS_GDC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GDC_H extern
#define STORAGE_CLASS_GDC_C
#include "gdc_public.h"
#else /* __INLINE_GDC__ */
-#define STORAGE_CLASS_GDC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GDC_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GDC_H static inline
+#define STORAGE_CLASS_GDC_C static inline
#include "gdc_private.h"
#endif /* __INLINE_GDC__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
index 766d2532d8f9..aba94e623043 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_device.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "gp_device_local.h"
#ifndef __INLINE_GP_DEVICE__
-#define STORAGE_CLASS_GP_DEVICE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GP_DEVICE_H extern
#define STORAGE_CLASS_GP_DEVICE_C
#include "gp_device_public.h"
#else /* __INLINE_GP_DEVICE__ */
-#define STORAGE_CLASS_GP_DEVICE_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GP_DEVICE_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GP_DEVICE_H static inline
+#define STORAGE_CLASS_GP_DEVICE_C static inline
#include "gp_device_private.h"
#endif /* __INLINE_GP_DEVICE__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
index ca70f5603bf8..d5d2df24e11a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gp_timer.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h" /*GP_TIMER_BASE address */
#include "gp_timer_local.h" /*GP_TIMER register offsets */
#ifndef __INLINE_GP_TIMER__
-#define STORAGE_CLASS_GP_TIMER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GP_TIMER_H extern
#define STORAGE_CLASS_GP_TIMER_C
#include "gp_timer_public.h" /* functions*/
#else /* __INLINE_GP_TIMER__ */
-#define STORAGE_CLASS_GP_TIMER_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GP_TIMER_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GP_TIMER_H static inline
+#define STORAGE_CLASS_GP_TIMER_C static inline
#include "gp_timer_private.h" /* inline functions*/
#endif /* __INLINE_GP_TIMER__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
index dec21bcb6f47..d37f7166aa4a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/gpio.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "gpio_local.h"
#ifndef __INLINE_GPIO__
-#define STORAGE_CLASS_GPIO_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_GPIO_H extern
#define STORAGE_CLASS_GPIO_C
#include "gpio_public.h"
#else /* __INLINE_GPIO__ */
-#define STORAGE_CLASS_GPIO_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_GPIO_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_GPIO_H static inline
+#define STORAGE_CLASS_GPIO_C static inline
#include "gpio_private.h"
#endif /* __INLINE_GPIO__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
index 671dd5b5fca6..a82fd3a21e98 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/hmem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "hmem_local.h"
#ifndef __INLINE_HMEM__
-#define STORAGE_CLASS_HMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_HMEM_H extern
#define STORAGE_CLASS_HMEM_C
#include "hmem_public.h"
#else /* __INLINE_HMEM__ */
-#define STORAGE_CLASS_HMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_HMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_HMEM_H static inline
+#define STORAGE_CLASS_HMEM_C static inline
#include "hmem_private.h"
#endif /* __INLINE_HMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
index 396240954bed..3b5df85fc510 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/csi_rx_public.h
@@ -28,7 +28,7 @@
* @param[in] id The global unique ID of the csi rx fe controller.
* @param[out] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_state(
+extern void csi_rx_fe_ctrl_get_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state);
/**
@@ -38,7 +38,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_state(
* @param[in] id The global unique ID of the csi rx fe controller.
* @param[in] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_dump_state(
+extern void csi_rx_fe_ctrl_dump_state(
const csi_rx_frontend_ID_t ID,
csi_rx_fe_ctrl_state_t *state);
/**
@@ -49,7 +49,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_dump_state(
* @param[in] lane The lane ID.
* @param[out] state Point to the dlane state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_dlane_state(
+extern void csi_rx_fe_ctrl_get_dlane_state(
const csi_rx_frontend_ID_t ID,
const uint32_t lane,
csi_rx_fe_ctrl_lane_t *dlane_state);
@@ -60,7 +60,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_get_dlane_state(
* @param[in] id The global unique ID of the csi rx be controller.
* @param[out] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_get_state(
+extern void csi_rx_be_ctrl_get_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state);
/**
@@ -70,7 +70,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_get_state(
* @param[in] id The global unique ID of the csi rx be controller.
* @param[in] state Point to the register-state.
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_dump_state(
+extern void csi_rx_be_ctrl_dump_state(
const csi_rx_backend_ID_t ID,
csi_rx_be_ctrl_state_t *state);
/** end of NCI */
@@ -89,7 +89,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_dump_state(
*
* @return the value of the register.
*/
-STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_fe_ctrl_reg_load(
+extern hrt_data csi_rx_fe_ctrl_reg_load(
const csi_rx_frontend_ID_t ID,
const hrt_address reg);
/**
@@ -101,7 +101,7 @@ STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_fe_ctrl_reg_load(
* @param[in] value The value to be stored.
*
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_reg_store(
+extern void csi_rx_fe_ctrl_reg_store(
const csi_rx_frontend_ID_t ID,
const hrt_address reg,
const hrt_data value);
@@ -114,7 +114,7 @@ STORAGE_CLASS_CSI_RX_H void csi_rx_fe_ctrl_reg_store(
*
* @return the value of the register.
*/
-STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_be_ctrl_reg_load(
+extern hrt_data csi_rx_be_ctrl_reg_load(
const csi_rx_backend_ID_t ID,
const hrt_address reg);
/**
@@ -126,7 +126,7 @@ STORAGE_CLASS_CSI_RX_H hrt_data csi_rx_be_ctrl_reg_load(
* @param[in] value The value to be stored.
*
*/
-STORAGE_CLASS_CSI_RX_H void csi_rx_be_ctrl_reg_store(
+extern void csi_rx_be_ctrl_reg_store(
const csi_rx_backend_ID_t ID,
const hrt_address reg,
const hrt_data value);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
index d27f87a719db..d09d1e320306 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/gdc_public.h
@@ -33,7 +33,7 @@
\return none, GDC[ID].lut[0...3][0...HRT_GDC_N-1] = data
*/
-STORAGE_CLASS_EXTERN void gdc_lut_store(
+extern void gdc_lut_store(
const gdc_ID_t ID,
const int data[4][HRT_GDC_N]);
@@ -43,7 +43,7 @@ STORAGE_CLASS_EXTERN void gdc_lut_store(
\param in_lut[in] The data matrix to be converted
\param out_lut[out] The data matrix as the output of conversion
*/
-STORAGE_CLASS_EXTERN void gdc_lut_convert_to_isp_format(
+extern void gdc_lut_convert_to_isp_format(
const int in_lut[4][HRT_GDC_N],
int out_lut[4][HRT_GDC_N]);
@@ -53,7 +53,7 @@ STORAGE_CLASS_EXTERN void gdc_lut_convert_to_isp_format(
\return unity
*/
-STORAGE_CLASS_EXTERN int gdc_get_unity(
+extern int gdc_get_unity(
const gdc_ID_t ID);
#endif /* __GDC_PUBLIC_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
index 9b8e7c92442d..8538f86ab5e6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/hmem_public.h
@@ -15,10 +15,10 @@
#ifndef __HMEM_PUBLIC_H_INCLUDED__
#define __HMEM_PUBLIC_H_INCLUDED__
-#include <stddef.h> /* size_t */
+#include <linux/types.h> /* size_t */
/*! Return the size of HMEM[ID]
-
+
\param ID[in] HMEM identifier
\Note: The size is the byte size of the area it occupies
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
index 2251f372145b..a025ad562bd2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op1w.h
@@ -27,14 +27,13 @@
* Prerequisites:
*
*/
-#include "storage_class.h"
#ifdef INLINE_ISP_OP1W
-#define STORAGE_CLASS_ISP_OP1W_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISP_OP1W_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_ISP_OP1W_FUNC_H static inline
+#define STORAGE_CLASS_ISP_OP1W_DATA_H static inline_DATA
#else /* INLINE_ISP_OP1W */
-#define STORAGE_CLASS_ISP_OP1W_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_ISP_OP1W_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_ISP_OP1W_FUNC_H extern
+#define STORAGE_CLASS_ISP_OP1W_DATA_H extern_DATA
#endif /* INLINE_ISP_OP1W */
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
index 1cfe6d717283..cf7e7314842d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/isp_op2w.h
@@ -27,14 +27,13 @@
* Prerequisites:
*
*/
-#include "storage_class.h"
#ifdef INLINE_ISP_OP2W
-#define STORAGE_CLASS_ISP_OP2W_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISP_OP2W_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_ISP_OP2W_FUNC_H static inline
+#define STORAGE_CLASS_ISP_OP2W_DATA_H static inline_DATA
#else /* INLINE_ISP_OP2W */
-#define STORAGE_CLASS_ISP_OP2W_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_ISP_OP2W_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_ISP_OP2W_FUNC_H extern
+#define STORAGE_CLASS_ISP_OP2W_DATA_H extern_DATA
#endif /* INLINE_ISP_OP2W */
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
index 4258fa872087..0a13eda73607 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/mmu_public.h
@@ -24,7 +24,7 @@
\return none, MMU[ID].page_table_base_index = base_index
*/
-STORAGE_CLASS_EXTERN void mmu_set_page_table_base_index(
+extern void mmu_set_page_table_base_index(
const mmu_ID_t ID,
const hrt_data base_index);
@@ -35,7 +35,7 @@ STORAGE_CLASS_EXTERN void mmu_set_page_table_base_index(
\return MMU[ID].page_table_base_index
*/
-STORAGE_CLASS_EXTERN hrt_data mmu_get_page_table_base_index(
+extern hrt_data mmu_get_page_table_base_index(
const mmu_ID_t ID);
/*! Invalidate the page table cache of MMU[ID]
@@ -44,7 +44,7 @@ STORAGE_CLASS_EXTERN hrt_data mmu_get_page_table_base_index(
\return none
*/
-STORAGE_CLASS_EXTERN void mmu_invalidate_cache(
+extern void mmu_invalidate_cache(
const mmu_ID_t ID);
@@ -52,7 +52,7 @@ STORAGE_CLASS_EXTERN void mmu_invalidate_cache(
\return none
*/
-STORAGE_CLASS_EXTERN void mmu_invalidate_cache_all(void);
+extern void mmu_invalidate_cache_all(void);
/*! Write to a control register of MMU[ID]
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
index 3e955fca2a94..a202d6dce106 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/host/ref_vector_func.h
@@ -15,14 +15,13 @@
#ifndef _REF_VECTOR_FUNC_H_INCLUDED_
#define _REF_VECTOR_FUNC_H_INCLUDED_
-#include "storage_class.h"
#ifdef INLINE_VECTOR_FUNC
-#define STORAGE_CLASS_REF_VECTOR_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_REF_VECTOR_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_REF_VECTOR_FUNC_H static inline
+#define STORAGE_CLASS_REF_VECTOR_DATA_H static inline_DATA
#else /* INLINE_VECTOR_FUNC */
-#define STORAGE_CLASS_REF_VECTOR_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_REF_VECTOR_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_REF_VECTOR_FUNC_H extern
+#define STORAGE_CLASS_REF_VECTOR_DATA_H extern_DATA
#endif /* INLINE_VECTOR_FUNC */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
index f5de0df7981e..c7d9095472b1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/ibuf_ctrl.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "ibuf_ctrl_local.h"
#ifndef __INLINE_IBUF_CTRL__
-#define STORAGE_CLASS_IBUF_CTRL_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_IBUF_CTRL_H extern
#define STORAGE_CLASS_IBUF_CTRL_C
#include "ibuf_ctrl_public.h"
#else /* __INLINE_IBUF_CTRL__ */
-#define STORAGE_CLASS_IBUF_CTRL_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_IBUF_CTRL_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_IBUF_CTRL_H static inline
+#define STORAGE_CLASS_IBUF_CTRL_C static inline
#include "ibuf_ctrl_private.h"
#endif /* __INLINE_IBUF_CTRL__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
index 041c8b660aa4..eeaaecdd57ba 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_formatter.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "input_formatter_local.h"
#ifndef __INLINE_INPUT_FORMATTER__
-#define STORAGE_CLASS_INPUT_FORMATTER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_INPUT_FORMATTER_H extern
#define STORAGE_CLASS_INPUT_FORMATTER_C
#include "input_formatter_public.h"
#else /* __INLINE_INPUT_FORMATTER__ */
-#define STORAGE_CLASS_INPUT_FORMATTER_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_INPUT_FORMATTER_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_INPUT_FORMATTER_H static inline
+#define STORAGE_CLASS_INPUT_FORMATTER_C static inline
#include "input_formatter_private.h"
#endif /* __INLINE_INPUT_FORMATTER__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
index 182867367b48..3f02d9ec9588 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/input_system.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "input_system_local.h"
#ifndef __INLINE_INPUT_SYSTEM__
-#define STORAGE_CLASS_INPUT_SYSTEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_INPUT_SYSTEM_H extern
#define STORAGE_CLASS_INPUT_SYSTEM_C
#include "input_system_public.h"
#else /* __INLINE_INPUT_SYSTEM__ */
-#define STORAGE_CLASS_INPUT_SYSTEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_INPUT_SYSTEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_INPUT_SYSTEM_H static inline
+#define STORAGE_CLASS_INPUT_SYSTEM_C static inline
#include "input_system_private.h"
#endif /* __INLINE_INPUT_SYSTEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
index 1dc443892cc5..e1446388dee5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/irq.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "irq_local.h"
#ifndef __INLINE_IRQ__
-#define STORAGE_CLASS_IRQ_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_IRQ_H extern
#define STORAGE_CLASS_IRQ_C
#include "irq_public.h"
#else /* __INLINE_IRQ__ */
-#define STORAGE_CLASS_IRQ_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_IRQ_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_IRQ_H static inline
+#define STORAGE_CLASS_IRQ_C static inline
#include "irq_private.h"
#endif /* __INLINE_IRQ__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
index 49190d0abc30..b916953e7f47 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isp.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "isp_local.h"
#ifndef __INLINE_ISP__
-#define STORAGE_CLASS_ISP_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISP_H extern
#define STORAGE_CLASS_ISP_C
#include "isp_public.h"
#else /* __INLINE_iSP__ */
-#define STORAGE_CLASS_ISP_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISP_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISP_H static inline
+#define STORAGE_CLASS_ISP_C static inline
#include "isp_private.h"
#endif /* __INLINE_ISP__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
index 9a608f07adcb..76aba114a5c1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_dma.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "isys_dma_local.h"
#ifndef __INLINE_ISYS2401_DMA__
-#define STORAGE_CLASS_ISYS2401_DMA_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISYS2401_DMA_H extern
#define STORAGE_CLASS_ISYS2401_DMA_C
#include "isys_dma_public.h"
#else /* __INLINE_ISYS2401_DMA__ */
-#define STORAGE_CLASS_ISYS2401_DMA_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISYS2401_DMA_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISYS2401_DMA_H static inline
+#define STORAGE_CLASS_ISYS2401_DMA_C static inline
#include "isys_dma_private.h"
#endif /* __INLINE_ISYS2401_DMA__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
index cf858bcc8e45..d3f64cfd0b7d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_irq.h
@@ -16,21 +16,20 @@
#define __IA_CSS_ISYS_IRQ_H__
#include <type_support.h>
-#include <storage_class.h>
#include <system_local.h>
#if defined(USE_INPUT_SYSTEM_VERSION_2401)
#ifndef __INLINE_ISYS2401_IRQ__
-#define STORAGE_CLASS_ISYS2401_IRQ_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_ISYS2401_IRQ_C STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_ISYS2401_IRQ_H extern
+#define STORAGE_CLASS_ISYS2401_IRQ_C extern
#include "isys_irq_public.h"
#else /* __INLINE_ISYS2401_IRQ__ */
-#define STORAGE_CLASS_ISYS2401_IRQ_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_ISYS2401_IRQ_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_ISYS2401_IRQ_H static inline
+#define STORAGE_CLASS_ISYS2401_IRQ_C static inline
#include "isys_irq_private.h"
#endif /* __INLINE_ISYS2401_IRQ__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
index 3e8cfe555ad5..16fbf9d25eba 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/isys_stream2mmio.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "isys_stream2mmio_local.h"
#ifndef __INLINE_STREAM2MMIO__
-#define STORAGE_CLASS_STREAM2MMIO_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_STREAM2MMIO_H extern
#define STORAGE_CLASS_STREAM2MMIO_C
#include "isys_stream2mmio_public.h"
#else /* __INLINE_STREAM2MMIO__ */
-#define STORAGE_CLASS_STREAM2MMIO_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_STREAM2MMIO_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_STREAM2MMIO_H static inline
+#define STORAGE_CLASS_STREAM2MMIO_C static inline
#include "isys_stream2mmio_private.h"
#endif /* __INLINE_STREAM2MMIO__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
index f74b405b0f39..e85e5c889c15 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
@@ -15,7 +15,6 @@
#ifndef __MATH_SUPPORT_H
#define __MATH_SUPPORT_H
-#include "storage_class.h" /* for STORAGE_CLASS_INLINE */
#if defined(__KERNEL__)
#include <linux/kernel.h> /* Override the definition of max/min from linux kernel*/
#endif /*__KERNEL__*/
@@ -110,60 +109,60 @@ Leaving out the other math utility functions as they are newly added
#else /* !defined(INLINE_MATH_SUPPORT_UTILS) */
-STORAGE_CLASS_INLINE int max(int a, int b)
+static inline int max(int a, int b)
{
return MAX(a, b);
}
-STORAGE_CLASS_INLINE int min(int a, int b)
+static inline int min(int a, int b)
{
return MIN(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_div(unsigned int a, unsigned int b)
+static inline unsigned int ceil_div(unsigned int a, unsigned int b)
{
return CEIL_DIV(a, b);
}
#endif /* !defined(INLINE_MATH_SUPPORT_UTILS) */
-STORAGE_CLASS_INLINE unsigned int umax(unsigned int a, unsigned int b)
+static inline unsigned int umax(unsigned int a, unsigned int b)
{
return MAX(a, b);
}
-STORAGE_CLASS_INLINE unsigned int umin(unsigned int a, unsigned int b)
+static inline unsigned int umin(unsigned int a, unsigned int b)
{
return MIN(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_mul(unsigned int a, unsigned int b)
+static inline unsigned int ceil_mul(unsigned int a, unsigned int b)
{
return CEIL_MUL(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_mul2(unsigned int a, unsigned int b)
+static inline unsigned int ceil_mul2(unsigned int a, unsigned int b)
{
return CEIL_MUL2(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_shift(unsigned int a, unsigned int b)
+static inline unsigned int ceil_shift(unsigned int a, unsigned int b)
{
return CEIL_SHIFT(a, b);
}
-STORAGE_CLASS_INLINE unsigned int ceil_shift_mul(unsigned int a, unsigned int b)
+static inline unsigned int ceil_shift_mul(unsigned int a, unsigned int b)
{
return CEIL_SHIFT_MUL(a, b);
}
#ifdef ISP2401
-STORAGE_CLASS_INLINE unsigned int round_half_down_div(unsigned int a, unsigned int b)
+static inline unsigned int round_half_down_div(unsigned int a, unsigned int b)
{
return ROUND_HALF_DOWN_DIV(a, b);
}
-STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, unsigned int b)
+static inline unsigned int round_half_down_mul(unsigned int a, unsigned int b)
{
return ROUND_HALF_DOWN_MUL(a, b);
}
@@ -187,7 +186,7 @@ STORAGE_CLASS_INLINE unsigned int round_half_down_mul(unsigned int a, unsigned i
*
*/
-STORAGE_CLASS_INLINE unsigned int ceil_pow2(unsigned int a)
+static inline unsigned int ceil_pow2(unsigned int a)
{
if (a == 0) {
return 1;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
index 1b2017b029f2..519e850ec390 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mmu_device.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "mmu_local.h"
#ifndef __INLINE_MMU__
-#define STORAGE_CLASS_MMU_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_MMU_H extern
#define STORAGE_CLASS_MMU_C
#include "mmu_public.h"
#else /* __INLINE_MMU__ */
-#define STORAGE_CLASS_MMU_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_MMU_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_MMU_H static inline
+#define STORAGE_CLASS_MMU_C static inline
#include "mmu_private.h"
#endif /* __INLINE_MMU__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
index 565983aafa4d..cd938375e02e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/mpmath.h
@@ -15,14 +15,13 @@
#ifndef __MPMATH_H_INCLUDED__
#define __MPMATH_H_INCLUDED__
-#include "storage_class.h"
#ifdef INLINE_MPMATH
-#define STORAGE_CLASS_MPMATH_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_MPMATH_DATA_H STORAGE_CLASS_INLINE_DATA
+#define STORAGE_CLASS_MPMATH_FUNC_H static inline
+#define STORAGE_CLASS_MPMATH_DATA_H static inline_DATA
#else /* INLINE_MPMATH */
-#define STORAGE_CLASS_MPMATH_FUNC_H STORAGE_CLASS_EXTERN
-#define STORAGE_CLASS_MPMATH_DATA_H STORAGE_CLASS_EXTERN_DATA
+#define STORAGE_CLASS_MPMATH_FUNC_H extern
+#define STORAGE_CLASS_MPMATH_DATA_H extern_DATA
#endif /* INLINE_MPMATH */
#include <type_support.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
index 6e48ea9afc29..a607242c5f1a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/osys.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "osys_local.h"
#ifndef __INLINE_OSYS__
-#define STORAGE_CLASS_OSYS_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_OSYS_H extern
#define STORAGE_CLASS_OSYS_C
#include "osys_public.h"
#else /* __INLINE_OSYS__ */
-#define STORAGE_CLASS_OSYS_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_OSYS_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_OSYS_H static inline
+#define STORAGE_CLASS_OSYS_C static inline
#include "osys_private.h"
#endif /* __INLINE_OSYS__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
index 67f7f3a14231..418d02382d76 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/pixelgen.h
@@ -31,18 +31,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "pixelgen_local.h"
#ifndef __INLINE_PIXELGEN__
-#define STORAGE_CLASS_PIXELGEN_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_PIXELGEN_H extern
#define STORAGE_CLASS_PIXELGEN_C
#include "pixelgen_public.h"
#else /* __INLINE_PIXELGEN__ */
-#define STORAGE_CLASS_PIXELGEN_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_PIXELGEN_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_PIXELGEN_H static inline
+#define STORAGE_CLASS_PIXELGEN_C static inline
#include "pixelgen_private.h"
#endif /* __INLINE_PIXELGEN__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
index 02f9eee67ff3..39a125ba563d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/platform_support.h
@@ -20,7 +20,6 @@
* Platform specific includes and functionality.
*/
-#include "storage_class.h"
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
index cfbc222ea0c1..ca0fbbb57788 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/print_support.h
@@ -15,7 +15,6 @@
#ifndef __PRINT_SUPPORT_H_INCLUDED__
#define __PRINT_SUPPORT_H_INCLUDED__
-#include "storage_class.h"
#include <stdarg.h>
#if !defined(__KERNEL__)
@@ -24,7 +23,7 @@
extern int (*sh_css_printf) (const char *fmt, va_list args);
/* depends on host supplied print function in ia_css_init() */
-STORAGE_CLASS_INLINE void ia_css_print(const char *fmt, ...)
+static inline void ia_css_print(const char *fmt, ...)
{
va_list ap;
if (sh_css_printf) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
index a3d874b9516a..aa5fadf5aadb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/queue.h
@@ -29,18 +29,17 @@
*
*/
-#include <storage_class.h>
#include "queue_local.h"
#ifndef __INLINE_QUEUE__
-#define STORAGE_CLASS_QUEUE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_QUEUE_H extern
#define STORAGE_CLASS_QUEUE_C
/* #include "queue_public.h" */
#include "ia_css_queue.h"
#else /* __INLINE_QUEUE__ */
-#define STORAGE_CLASS_QUEUE_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_QUEUE_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_QUEUE_H static inline
+#define STORAGE_CLASS_QUEUE_C static inline
#include "queue_private.h"
#endif /* __INLINE_QUEUE__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
index 82c55acd0380..bd9f53e6b680 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/resource.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "resource_local.h"
#ifndef __INLINE_RESOURCE__
-#define STORAGE_CLASS_RESOURCE_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_RESOURCE_H extern
#define STORAGE_CLASS_RESOURCE_C
#include "resource_public.h"
#else /* __INLINE_RESOURCE__ */
-#define STORAGE_CLASS_RESOURCE_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_RESOURCE_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_RESOURCE_H static inline
+#define STORAGE_CLASS_RESOURCE_C static inline
#include "resource_private.h"
#endif /* __INLINE_RESOURCE__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
index c34c2e75c51f..43cfb0cb4aa8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/socket.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "socket_local.h"
#ifndef __INLINE_SOCKET__
-#define STORAGE_CLASS_SOCKET_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_SOCKET_H extern
#define STORAGE_CLASS_SOCKET_C
#include "socket_public.h"
#else /* __INLINE_SOCKET__ */
-#define STORAGE_CLASS_SOCKET_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_SOCKET_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_SOCKET_H static inline
+#define STORAGE_CLASS_SOCKET_C static inline
#include "socket_private.h"
#endif /* __INLINE_SOCKET__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
index 150fc2f6129b..8f57f2060791 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/sp.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "sp_local.h"
#ifndef __INLINE_SP__
-#define STORAGE_CLASS_SP_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_SP_H extern
#define STORAGE_CLASS_SP_C
#include "sp_public.h"
#else /* __INLINE_SP__ */
-#define STORAGE_CLASS_SP_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_SP_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_SP_H static inline
+#define STORAGE_CLASS_SP_C static inline
#include "sp_private.h"
#endif /* __INLINE_SP__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h
deleted file mode 100644
index 3908e668dacd..000000000000
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/storage_class.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __STORAGE_CLASS_H_INCLUDED__
-#define __STORAGE_CLASS_H_INCLUDED__
-
-/**
-* @file
-* Platform specific includes and functionality.
-*/
-
-#define STORAGE_CLASS_EXTERN extern
-
-#if defined(_MSC_VER)
-#define STORAGE_CLASS_INLINE static __inline
-#else
-#define STORAGE_CLASS_INLINE static inline
-#endif
-
-#define STORAGE_CLASS_EXTERN_DATA extern const
-#define STORAGE_CLASS_INLINE_DATA static const
-
-#endif /* __STORAGE_CLASS_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
index 8e41f60b5d39..53d535e4f2ae 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/stream_buffer.h
@@ -30,18 +30,17 @@
*
*/
-#include "storage_class.h"
#include "system_local.h"
#include "stream_buffer_local.h"
#ifndef __INLINE_STREAM_BUFFER__
-#define STORAGE_CLASS_STREAM_BUFFER_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_STREAM_BUFFER_H extern
#define STORAGE_CLASS_STREAM_BUFFER_C
#include "stream_buffer_public.h"
#else /* __INLINE_STREAM_BUFFER__ */
-#define STORAGE_CLASS_STREAM_BUFFER_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_STREAM_BUFFER_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_STREAM_BUFFER_H static inline
+#define STORAGE_CLASS_STREAM_BUFFER_C static inline
#include "stream_buffer_private.h"
#endif /* __INLINE_STREAM_BUFFER__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
index c53241a7a281..d80437c58bde 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
@@ -16,7 +16,6 @@
#define __STRING_SUPPORT_H_INCLUDED__
#include <platform_support.h>
#include <type_support.h>
-#include <storage_class.h>
#if !defined(_MSC_VER)
/*
@@ -34,7 +33,7 @@
* @return EINVAL on Invalid arguments
* @return ERANGE on Destination size too small
*/
-STORAGE_CLASS_INLINE int memcpy_s(
+static inline int memcpy_s(
void* dest_buf,
size_t dest_size,
const void* src_buf,
@@ -89,7 +88,7 @@ static size_t strnlen_s(
* @return Returns EINVAL on invalid arguments
* @return Returns ERANGE on destination size too small
*/
-STORAGE_CLASS_INLINE int strncpy_s(
+static inline int strncpy_s(
char* dest_str,
size_t dest_size,
const char* src_str,
@@ -130,7 +129,7 @@ STORAGE_CLASS_INLINE int strncpy_s(
* @return Returns EINVAL on invalid arguments
* @return Returns ERANGE on destination size too small
*/
-STORAGE_CLASS_INLINE int strcpy_s(
+static inline int strcpy_s(
char* dest_str,
size_t dest_size,
const char* src_str)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
index 7385fd11c95f..ace695643369 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/tag.h
@@ -29,17 +29,16 @@
*
*/
-#include "storage_class.h"
#include "tag_local.h"
#ifndef __INLINE_TAG__
-#define STORAGE_CLASS_TAG_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_TAG_H extern
#define STORAGE_CLASS_TAG_C
#include "tag_public.h"
#else /* __INLINE_TAG__ */
-#define STORAGE_CLASS_TAG_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_TAG_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_TAG_H static inline
+#define STORAGE_CLASS_TAG_C static inline
#include "tag_private.h"
#endif /* __INLINE_TAG__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
index ed13451c9261..f6bc1c47553f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/timed_ctrl.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "timed_ctrl_local.h"
#ifndef __INLINE_TIMED_CTRL__
-#define STORAGE_CLASS_TIMED_CTRL_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_TIMED_CTRL_H extern
#define STORAGE_CLASS_TIMED_CTRL_C
#include "timed_ctrl_public.h"
#else /* __INLINE_TIMED_CTRL__ */
-#define STORAGE_CLASS_TIMED_CTRL_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_TIMED_CTRL_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_TIMED_CTRL_H static inline
+#define STORAGE_CLASS_TIMED_CTRL_C static inline
#include "timed_ctrl_private.h"
#endif /* __INLINE_TIMED_CTRL__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
index b82fa3eba79f..bc77537fa73a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/type_support.h
@@ -30,27 +30,6 @@
#define IA_CSS_INT32_T_BITS 32
#define IA_CSS_UINT64_T_BITS 64
-#if defined(_MSC_VER)
-#include <stdint.h>
-/* For ATE compilation define the bool */
-#if defined(_ATE_)
-#define bool int
-#define true 1
-#define false 0
-#else
-#include <stdbool.h>
-#endif
-#include <stddef.h>
-#include <limits.h>
-#include <errno.h>
-#if defined(_M_X64)
-#define HOST_ADDRESS(x) (unsigned long long)(x)
-#else
-#define HOST_ADDRESS(x) (unsigned long)(x)
-#endif
-
-#elif defined(__KERNEL__)
-
#define CHAR_BIT (8)
#include <linux/types.h>
@@ -58,25 +37,4 @@
#include <linux/errno.h>
#define HOST_ADDRESS(x) (unsigned long)(x)
-#elif defined(__GNUC__)
-#ifndef __STDC_LIMIT_MACROS
-#define __STDC_LIMIT_MACROS 1
-#endif
-#include <stdint.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <limits.h>
-#include <errno.h>
-#define HOST_ADDRESS(x) (unsigned long)(x)
-
-#else /* default is for the FIST environment */
-#include <stdint.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <limits.h>
-#include <errno.h>
-#define HOST_ADDRESS(x) (unsigned long)(x)
-
-#endif
-
#endif /* __TYPE_SUPPORT_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
index acf932e1f563..82d447bf9704 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vamem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "vamem_local.h"
#ifndef __INLINE_VAMEM__
-#define STORAGE_CLASS_VAMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VAMEM_H extern
#define STORAGE_CLASS_VAMEM_C
#include "vamem_public.h"
#else /* __INLINE_VAMEM__ */
-#define STORAGE_CLASS_VAMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VAMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VAMEM_H static inline
+#define STORAGE_CLASS_VAMEM_C static inline
#include "vamem_private.h"
#endif /* __INLINE_VAMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
index 5d3be31759e4..5368b9062897 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_func.h
@@ -15,7 +15,6 @@
#ifndef __VECTOR_FUNC_H_INCLUDED__
#define __VECTOR_FUNC_H_INCLUDED__
-#include "storage_class.h"
/* TODO: Later filters will be moved to types directory,
* and we should only include matrix_MxN types */
@@ -27,12 +26,12 @@
#include "vector_func_local.h"
#ifndef __INLINE_VECTOR_FUNC__
-#define STORAGE_CLASS_VECTOR_FUNC_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VECTOR_FUNC_H extern
#define STORAGE_CLASS_VECTOR_FUNC_C
#include "vector_func_public.h"
#else /* __INLINE_VECTOR_FUNC__ */
-#define STORAGE_CLASS_VECTOR_FUNC_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VECTOR_FUNC_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VECTOR_FUNC_H static inline
+#define STORAGE_CLASS_VECTOR_FUNC_C static inline
#include "vector_func_private.h"
#endif /* __INLINE_VECTOR_FUNC__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
index 261f87378ce5..4923f2d5518b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vector_ops.h
@@ -15,17 +15,16 @@
#ifndef __VECTOR_OPS_H_INCLUDED__
#define __VECTOR_OPS_H_INCLUDED__
-#include "storage_class.h"
#include "vector_ops_local.h"
#ifndef __INLINE_VECTOR_OPS__
-#define STORAGE_CLASS_VECTOR_OPS_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VECTOR_OPS_H extern
#define STORAGE_CLASS_VECTOR_OPS_C
#include "vector_ops_public.h"
#else /* __INLINE_VECTOR_OPS__ */
-#define STORAGE_CLASS_VECTOR_OPS_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VECTOR_OPS_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VECTOR_OPS_H static inline
+#define STORAGE_CLASS_VECTOR_OPS_C static inline
#include "vector_ops_private.h"
#endif /* __INLINE_VECTOR_OPS__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
index 79a36755bfd9..d3375729c441 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/vmem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "vmem_local.h"
#ifndef __INLINE_VMEM__
-#define STORAGE_CLASS_VMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_VMEM_H extern
#define STORAGE_CLASS_VMEM_C
#include "vmem_public.h"
#else /* __INLINE_VMEM__ */
-#define STORAGE_CLASS_VMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_VMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_VMEM_H static inline
+#define STORAGE_CLASS_VMEM_C static inline
#include "vmem_private.h"
#endif /* __INLINE_VMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
index 9169e04f9b4b..13083fe55141 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/xmem.h
@@ -29,18 +29,17 @@
* - local: system and cell specific constants and identifiers
*/
-#include "storage_class.h"
#include "system_local.h"
#include "xmem_local.h"
#ifndef __INLINE_XMEM__
-#define STORAGE_CLASS_XMEM_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_XMEM_H extern
#define STORAGE_CLASS_XMEM_C
#include "xmem_public.h"
#else /* __INLINE_XMEM__ */
-#define STORAGE_CLASS_XMEM_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_XMEM_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_XMEM_H static inline
+#define STORAGE_CLASS_XMEM_C static inline
#include "xmem_private.h"
#endif /* __INLINE_XMEM__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
index 8ef6c54ee813..aa733674f42b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
@@ -321,7 +321,7 @@ ia_css_s3a_dmem_decode(
}
/* MW: this is an ISP function */
-STORAGE_CLASS_INLINE int
+static inline int
merge_hi_lo_14(unsigned short hi, unsigned short lo)
{
int val = (int) ((((unsigned int) hi << 14) & 0xfffc000) |
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
index 9f8a125f0d74..e028e460ae4c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
@@ -1697,11 +1697,11 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
}
#endif
if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */
- req_vf_info && /* and we need vf output. */
+ req_vf_info && /* and we need vf output. */
/* check if the required vf format
is supported. */
- !binary_supports_output_format(xcandidate, req_vf_info->format)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ !binary_supports_output_format(xcandidate, req_vf_info->format)) {
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n",
__LINE__, xcandidate->num_output_pins, 1,
req_vf_info,
@@ -1711,8 +1711,8 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
/* Check if vf_veceven supports the requested vf format */
if (xcandidate->num_output_pins == 1 &&
- req_vf_info && candidate->enable.vf_veceven &&
- !binary_supports_vf_format(xcandidate, req_vf_info->format)) {
+ req_vf_info && candidate->enable.vf_veceven &&
+ !binary_supports_vf_format(xcandidate, req_vf_info->format)) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n",
__LINE__, xcandidate->num_output_pins, 1,
@@ -1723,7 +1723,7 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
/* Check if vf_veceven supports the requested vf width */
if (xcandidate->num_output_pins == 1 &&
- req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
+ req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */
if (req_vf_info->res.width > candidate->output.max_width) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: (%d < %d)\n",
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
index 5d40afd482f5..42d9a8508858 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
@@ -280,7 +280,7 @@ static ia_css_queue_t *bufq_get_qhandle(
/* Local function to initialize a buffer queue. This reduces
* the chances of copy-paste errors or typos.
*/
-STORAGE_CLASS_INLINE void
+static inline void
init_bufq(unsigned int desc_offset,
unsigned int elems_offset,
ia_css_queue_t *handle)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
index 91c105cc6204..3c8dcfd4bbc6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
@@ -130,7 +130,7 @@ enum ia_css_debug_enable_param_dump {
* @param[in] fmt printf like format string
* @param[in] args arguments for the format string
*/
-STORAGE_CLASS_INLINE void
+static inline void
ia_css_debug_vdtrace(unsigned int level, const char *fmt, va_list args)
{
if (ia_css_debug_trace_level >= level)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
index a7c6bba7e094..11d3995ba0db 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/ifmtr/src/ifmtr.c
@@ -29,6 +29,7 @@ more details.
#endif
#include "system_global.h"
+#include <linux/kernel.h>
#ifdef USE_INPUT_SYSTEM_VERSION_2
@@ -487,7 +488,7 @@ static void ifmtr_set_if_blocking_mode(
{
int i;
bool block[] = { false, false, false, false };
- assert(N_INPUT_FORMATTER_ID <= (sizeof(block) / sizeof(block[0])));
+ assert(N_INPUT_FORMATTER_ID <= (ARRAY_SIZE(block)));
#if !defined(IS_ISP_2400_SYSTEM)
#error "ifmtr_set_if_blocking_mode: ISP_SYSTEM must be one of {IS_ISP_2400_SYSTEM}"
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
index cf02970d4f59..d9a5f3e9283a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/inputfifo/src/inputfifo.c
@@ -105,7 +105,7 @@ static struct inputfifo_instance
/* Streaming to MIPI */
static unsigned inputfifo_wrap_marker(
-/* STORAGE_CLASS_INLINE unsigned inputfifo_wrap_marker( */
+/* static inline unsigned inputfifo_wrap_marker( */
unsigned marker)
{
return marker |
@@ -113,7 +113,7 @@ static unsigned inputfifo_wrap_marker(
(inputfifo_curr_fmt_type << _HIVE_STR_TO_MIPI_FMT_TYPE_LSB);
}
-STORAGE_CLASS_INLINE void
+static inline void
_sh_css_fifo_snd(unsigned token)
{
while (!can_event_send_token(STR2MIPI_EVENT_ID))
@@ -123,7 +123,7 @@ _sh_css_fifo_snd(unsigned token)
}
static void inputfifo_send_data_a(
-/* STORAGE_CLASS_INLINE void inputfifo_send_data_a( */
+/* static inline void inputfifo_send_data_a( */
unsigned int data)
{
unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
@@ -135,7 +135,7 @@ unsigned int data)
static void inputfifo_send_data_b(
-/* STORAGE_CLASS_INLINE void inputfifo_send_data_b( */
+/* static inline void inputfifo_send_data_b( */
unsigned int data)
{
unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
@@ -147,7 +147,7 @@ static void inputfifo_send_data_b(
static void inputfifo_send_data(
-/* STORAGE_CLASS_INLINE void inputfifo_send_data( */
+/* static inline void inputfifo_send_data( */
unsigned int a,
unsigned int b)
{
@@ -162,7 +162,7 @@ static void inputfifo_send_data(
static void inputfifo_send_sol(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_sol(void) */
+/* static inline void inputfifo_send_sol(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_SOL_BIT);
@@ -174,7 +174,7 @@ static void inputfifo_send_sol(void)
static void inputfifo_send_eol(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_eol(void) */
+/* static inline void inputfifo_send_eol(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_EOL_BIT);
@@ -185,7 +185,7 @@ static void inputfifo_send_eol(void)
static void inputfifo_send_sof(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_sof(void) */
+/* static inline void inputfifo_send_sof(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_SOF_BIT);
@@ -197,7 +197,7 @@ static void inputfifo_send_sof(void)
static void inputfifo_send_eof(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_eof(void) */
+/* static inline void inputfifo_send_eof(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_EOF_BIT);
@@ -209,7 +209,7 @@ static void inputfifo_send_eof(void)
#ifdef __ON__
static void inputfifo_send_ch_id(
-/* STORAGE_CLASS_INLINE void inputfifo_send_ch_id( */
+/* static inline void inputfifo_send_ch_id( */
unsigned int ch_id)
{
hrt_data token;
@@ -223,7 +223,7 @@ static void inputfifo_send_ch_id(
}
static void inputfifo_send_fmt_type(
-/* STORAGE_CLASS_INLINE void inputfifo_send_fmt_type( */
+/* static inline void inputfifo_send_fmt_type( */
unsigned int fmt_type)
{
hrt_data token;
@@ -240,7 +240,7 @@ static void inputfifo_send_fmt_type(
static void inputfifo_send_ch_id_and_fmt_type(
-/* STORAGE_CLASS_INLINE
+/* static inline
void inputfifo_send_ch_id_and_fmt_type( */
unsigned int ch_id,
unsigned int fmt_type)
@@ -259,7 +259,7 @@ void inputfifo_send_ch_id_and_fmt_type( */
static void inputfifo_send_empty_token(void)
-/* STORAGE_CLASS_INLINE void inputfifo_send_empty_token(void) */
+/* static inline void inputfifo_send_empty_token(void) */
{
hrt_data token = inputfifo_wrap_marker(0);
_sh_css_fifo_snd(token);
@@ -269,7 +269,7 @@ static void inputfifo_send_empty_token(void)
static void inputfifo_start_frame(
-/* STORAGE_CLASS_INLINE void inputfifo_start_frame( */
+/* static inline void inputfifo_start_frame( */
unsigned int ch_id,
unsigned int fmt_type)
{
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
index 95542fc82217..62d13978475d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/pipeline/src/pipeline.c
@@ -603,7 +603,7 @@ static enum ia_css_err pipeline_stage_create(
/* Verify input parameters*/
if (!(stage_desc->in_frame) && !(stage_desc->firmware)
&& (stage_desc->binary) && !(stage_desc->binary->online)) {
- err = IA_CSS_ERR_INTERNAL_ERROR;
+ err = IA_CSS_ERR_INTERNAL_ERROR;
goto ERR;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
index a0bb9f663ce6..9f78e709b3d0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/interface/ia_css_rmgr.h
@@ -31,15 +31,14 @@ more details.
#ifndef _IA_CSS_RMGR_H
#define _IA_CSS_RMGR_H
-#include "storage_class.h"
#include <ia_css_err.h>
#ifndef __INLINE_RMGR__
-#define STORAGE_CLASS_RMGR_H STORAGE_CLASS_EXTERN
+#define STORAGE_CLASS_RMGR_H extern
#define STORAGE_CLASS_RMGR_C
#else /* __INLINE_RMGR__ */
-#define STORAGE_CLASS_RMGR_H STORAGE_CLASS_INLINE
-#define STORAGE_CLASS_RMGR_C STORAGE_CLASS_INLINE
+#define STORAGE_CLASS_RMGR_H static inline
+#define STORAGE_CLASS_RMGR_C static inline
#endif /* __INLINE_RMGR__ */
/**
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
index fa92d8da8f1c..e56006c07ee8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/rmgr_vbuf.c
@@ -174,7 +174,7 @@ void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_rmgr_uninit_vbuf()\n");
if (pool == NULL) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "ia_css_rmgr_uninit_vbuf(): NULL argument\n");
- return;
+ return;
}
if (pool->handles != NULL) {
/* free the hmm buffers */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
index d9178e80dab2..6d9bceb60196 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
@@ -37,7 +37,7 @@ more details.
#include "ia_css_spctrl.h"
#include "ia_css_debug.h"
-typedef struct {
+struct spctrl_context_info {
struct ia_css_sp_init_dmem_cfg dmem_config;
uint32_t spctrl_config_dmem_addr; /** location of dmem_cfg in SP dmem */
uint32_t spctrl_state_dmem_addr;
@@ -45,9 +45,9 @@ typedef struct {
hrt_vaddress code_addr; /* sp firmware location in host mem-DDR*/
uint32_t code_size;
char *program_name; /* used in case of PLATFORM_SIM */
-} spctrl_context_info;
+};
-static spctrl_context_info spctrl_cofig_info[N_SP_ID];
+static struct spctrl_context_info spctrl_cofig_info[N_SP_ID];
static bool spctrl_loaded[N_SP_ID] = {0};
/* Load firmware */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
index e882b5596813..f92b6a9f77eb 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
@@ -451,8 +451,6 @@ static enum ia_css_frame_format yuv422_copy_formats[] = {
IA_CSS_FRAME_FORMAT_YUYV
};
-#define array_length(array) (sizeof(array)/sizeof(array[0]))
-
/* Verify whether the selected output format is can be produced
* by the copy binary given the stream format.
* */
@@ -468,7 +466,7 @@ verify_copy_out_frame_format(struct ia_css_pipe *pipe)
switch (pipe->stream->config.input_config.format) {
case IA_CSS_STREAM_FORMAT_YUV420_8_LEGACY:
case IA_CSS_STREAM_FORMAT_YUV420_8:
- for (i=0; i<array_length(yuv420_copy_formats) && !found; i++)
+ for (i=0; i<ARRAY_SIZE(yuv420_copy_formats) && !found; i++)
found = (out_fmt == yuv420_copy_formats[i]);
break;
case IA_CSS_STREAM_FORMAT_YUV420_10:
@@ -476,7 +474,7 @@ verify_copy_out_frame_format(struct ia_css_pipe *pipe)
found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
break;
case IA_CSS_STREAM_FORMAT_YUV422_8:
- for (i=0; i<array_length(yuv422_copy_formats) && !found; i++)
+ for (i=0; i<ARRAY_SIZE(yuv422_copy_formats) && !found; i++)
found = (out_fmt == yuv422_copy_formats[i]);
break;
case IA_CSS_STREAM_FORMAT_YUV422_10:
@@ -3781,6 +3779,7 @@ static enum ia_css_err
create_host_acc_pipeline(struct ia_css_pipe *pipe)
{
enum ia_css_err err = IA_CSS_SUCCESS;
+ const struct ia_css_fw_info *fw;
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
@@ -3794,14 +3793,12 @@ create_host_acc_pipeline(struct ia_css_pipe *pipe)
if (pipe->config.acc_extension)
pipe->pipeline.pipe_qos_config = 0;
-{
- const struct ia_css_fw_info *fw = pipe->vf_stage;
+ fw = pipe->vf_stage;
for (i = 0; fw; fw = fw->next){
err = sh_css_pipeline_add_acc_stage(&pipe->pipeline, fw);
if (err != IA_CSS_SUCCESS)
goto ERR;
}
-}
for (i=0; i<pipe->config.num_acc_stages; i++) {
struct ia_css_fw_info *fw = pipe->config.acc_stages[i];
@@ -4333,12 +4330,13 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
}
}
} else if ((buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
- || (buf_type == IA_CSS_BUFFER_TYPE_METADATA)) {
- return_err = ia_css_bufq_enqueue_buffer(thread_id,
+ || (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME)
+ || (buf_type == IA_CSS_BUFFER_TYPE_METADATA)) {
+
+ return_err = ia_css_bufq_enqueue_buffer(thread_id,
queue_id,
(uint32_t)h_vbuf->vptr);
#if defined(SH_CSS_ENABLE_PER_FRAME_PARAMS)
@@ -5607,13 +5605,13 @@ static enum ia_css_err load_video_binaries(struct ia_css_pipe *pipe)
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
sizeof(struct ia_css_binary), GFP_KERNEL);
- if (mycs->yuv_scaler_binary == NULL) {
+ if (!mycs->yuv_scaler_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
return err;
}
mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage
* sizeof(bool), GFP_KERNEL);
- if (mycs->is_output_stage == NULL) {
+ if (!mycs->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
return err;
}
@@ -5797,13 +5795,15 @@ static enum ia_css_err load_video_binaries(struct ia_css_pipe *pipe)
#endif
/* Make tnr reference buffers output block height align */
- tnr_info.res.height =
#ifndef ISP2401
+ tnr_info.res.height =
CEIL_MUL(tnr_info.res.height,
+ mycs->video_binary.info->sp.block.output_block_height);
#else
+ tnr_info.res.height =
CEIL_MUL(tnr_height,
+ mycs->video_binary.info->sp.block.output_block_height);
#endif
- mycs->video_binary.info->sp.block.output_block_height);
} else {
tnr_info = mycs->video_binary.internal_frame_info;
}
@@ -6027,7 +6027,7 @@ sh_css_pipe_configure_viewfinder(struct ia_css_pipe *pipe, unsigned int width,
err = ia_css_util_check_res(width, height);
if (err != IA_CSS_SUCCESS) {
- IA_CSS_LEAVE_ERR_PRIVATE(err);
+ IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (pipe->vf_output_info[idx].res.width != width ||
@@ -6258,14 +6258,14 @@ static enum ia_css_err load_primary_binaries(
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
sizeof(struct ia_css_binary), GFP_KERNEL);
- if (mycs->yuv_scaler_binary == NULL) {
+ if (!mycs->yuv_scaler_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
sizeof(bool), GFP_KERNEL);
- if (mycs->is_output_stage == NULL) {
+ if (!mycs->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
@@ -6982,27 +6982,27 @@ static enum ia_css_err ia_css_pipe_create_cas_scaler_desc_single_output(
}
descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->in_info == NULL) {
+ if (!descr->in_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->internal_out_info == NULL) {
+ if (!descr->internal_out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->out_info == NULL) {
+ if (!descr->out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->vf_info == NULL) {
+ if (!descr->vf_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool), GFP_KERNEL);
- if (descr->is_output_stage == NULL) {
+ if (!descr->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -7118,22 +7118,22 @@ static enum ia_css_err ia_css_pipe_create_cas_scaler_desc(struct ia_css_pipe *pi
descr->num_stage = num_stages;
descr->in_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->in_info == NULL) {
+ if (!descr->in_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->internal_out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->internal_out_info == NULL) {
+ if (!descr->internal_out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->out_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->out_info == NULL) {
+ if (!descr->out_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
descr->vf_info = kmalloc(descr->num_stage * sizeof(struct ia_css_frame_info), GFP_KERNEL);
- if (descr->vf_info == NULL) {
+ if (!descr->vf_info) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -7276,13 +7276,13 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kzalloc(cas_scaler_descr.num_stage *
sizeof(struct ia_css_binary), GFP_KERNEL);
- if (mycs->yuv_scaler_binary == NULL) {
+ if (!mycs->yuv_scaler_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
mycs->is_output_stage = kzalloc(cas_scaler_descr.num_stage *
sizeof(bool), GFP_KERNEL);
- if (mycs->is_output_stage == NULL) {
+ if (!mycs->is_output_stage) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -7383,7 +7383,7 @@ load_yuvpp_binaries(struct ia_css_pipe *pipe)
}
mycs->vf_pp_binary = kzalloc(mycs->num_vf_pp * sizeof(struct ia_css_binary),
GFP_KERNEL);
- if (mycs->vf_pp_binary == NULL) {
+ if (!mycs->vf_pp_binary) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
goto ERR;
}
@@ -8689,9 +8689,9 @@ enum ia_css_err ia_css_stream_capture(
/* Check if the tag descriptor is valid */
if (num_captures < SH_CSS_MINIMUM_TAG_ID) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_stream_capture() leave: return_err=%d\n",
- IA_CSS_ERR_INVALID_ARGUMENTS);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
+ "ia_css_stream_capture() leave: return_err=%d\n",
+ IA_CSS_ERR_INVALID_ARGUMENTS);
return IA_CSS_ERR_INVALID_ARGUMENTS;
}
@@ -9445,7 +9445,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* allocate the stream instance */
curr_stream = kmalloc(sizeof(struct ia_css_stream), GFP_KERNEL);
- if (curr_stream == NULL) {
+ if (!curr_stream) {
err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
IA_CSS_LEAVE_ERR(err);
return err;
@@ -9457,7 +9457,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* allocate pipes */
curr_stream->num_pipes = num_pipes;
curr_stream->pipes = kzalloc(num_pipes * sizeof(struct ia_css_pipe *), GFP_KERNEL);
- if (curr_stream->pipes == NULL) {
+ if (!curr_stream->pipes) {
curr_stream->num_pipes = 0;
kfree(curr_stream);
curr_stream = NULL;
@@ -9780,23 +9780,22 @@ ERR:
if (err == IA_CSS_SUCCESS)
{
/* working mode: enter into the seed list */
- if (my_css_save.mode == sh_css_mode_working)
- for(i = 0; i < MAX_ACTIVE_STREAMS; i++)
- if (my_css_save.stream_seeds[i].stream == NULL)
- {
- IA_CSS_LOG("entered stream into loc=%d", i);
- my_css_save.stream_seeds[i].orig_stream = stream;
- my_css_save.stream_seeds[i].stream = curr_stream;
- my_css_save.stream_seeds[i].num_pipes = num_pipes;
- my_css_save.stream_seeds[i].stream_config = *stream_config;
- for(j = 0; j < num_pipes; j++)
- {
- my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
- my_css_save.stream_seeds[i].pipes[j] = pipes[j];
- my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
- }
- break;
+ if (my_css_save.mode == sh_css_mode_working) {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++)
+ if (!my_css_save.stream_seeds[i].stream) {
+ IA_CSS_LOG("entered stream into loc=%d", i);
+ my_css_save.stream_seeds[i].orig_stream = stream;
+ my_css_save.stream_seeds[i].stream = curr_stream;
+ my_css_save.stream_seeds[i].num_pipes = num_pipes;
+ my_css_save.stream_seeds[i].stream_config = *stream_config;
+ for (j = 0; j < num_pipes; j++) {
+ my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
+ my_css_save.stream_seeds[i].pipes[j] = pipes[j];
+ my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
+ }
+ break;
}
+ }
#else
if (err == IA_CSS_SUCCESS) {
err = ia_css_save_stream(curr_stream);
@@ -9970,32 +9969,32 @@ ia_css_stream_load(struct ia_css_stream *stream)
enum ia_css_err err;
assert(stream != NULL);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() enter, \n");
- for(i=0;i<MAX_ACTIVE_STREAMS;i++)
- if (my_css_save.stream_seeds[i].stream == stream)
- {
+ for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
+ if (my_css_save.stream_seeds[i].stream == stream) {
int j;
- for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
- if ((err = ia_css_pipe_create(&(my_css_save.stream_seeds[i].pipe_config[j]), &my_css_save.stream_seeds[i].pipes[j])) != IA_CSS_SUCCESS)
- {
- if (j)
- {
+ for ( j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++) {
+ if ((err = ia_css_pipe_create(&(my_css_save.stream_seeds[i].pipe_config[j]), &my_css_save.stream_seeds[i].pipes[j])) != IA_CSS_SUCCESS) {
+ if (j) {
int k;
for(k=0;k<j;k++)
ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[k]);
}
return err;
}
- err = ia_css_stream_create(&(my_css_save.stream_seeds[i].stream_config), my_css_save.stream_seeds[i].num_pipes,
- my_css_save.stream_seeds[i].pipes, &(my_css_save.stream_seeds[i].stream));
- if (err != IA_CSS_SUCCESS)
- {
+ }
+ err = ia_css_stream_create(&(my_css_save.stream_seeds[i].stream_config),
+ my_css_save.stream_seeds[i].num_pipes,
+ my_css_save.stream_seeds[i].pipes,
+ &(my_css_save.stream_seeds[i].stream));
+ if (err != IA_CSS_SUCCESS) {
ia_css_stream_destroy(stream);
- for(j=0;j<my_css_save.stream_seeds[i].num_pipes;j++)
+ for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
return err;
}
break;
}
+ }
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_load() exit, \n");
return IA_CSS_SUCCESS;
#else
@@ -10524,7 +10523,7 @@ ia_css_update_continuous_frames(struct ia_css_stream *stream)
ia_css_debug_dtrace(
IA_CSS_DEBUG_TRACE,
"sh_css_update_continuous_frames() leave: invalid stream, return_void\n");
- return IA_CSS_ERR_INVALID_ARGUMENTS;
+ return IA_CSS_ERR_INVALID_ARGUMENTS;
}
pipe = stream->continuous_pipe;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
index 63582161050a..8158ea40d069 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
@@ -145,9 +145,9 @@ sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi, struct ia
size_t configstruct_size = sizeof(struct ia_css_config_memory_offsets);
size_t statestruct_size = sizeof(struct ia_css_state_memory_offsets);
- char *parambuf = (char *)kmalloc(paramstruct_size + configstruct_size + statestruct_size,
- GFP_KERNEL);
- if (parambuf == NULL)
+ char *parambuf = kmalloc(paramstruct_size + configstruct_size + statestruct_size,
+ GFP_KERNEL);
+ if (!parambuf)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = NULL;
@@ -229,14 +229,15 @@ sh_css_load_firmware(const char *fw_data,
sh_css_blob_info = kmalloc(
(sh_css_num_binaries - NUM_OF_SPS) *
sizeof(*sh_css_blob_info), GFP_KERNEL);
- if (sh_css_blob_info == NULL)
+ if (!sh_css_blob_info)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
} else {
sh_css_blob_info = NULL;
}
- fw_minibuffer = kzalloc(sh_css_num_binaries * sizeof(struct fw_param), GFP_KERNEL);
- if (fw_minibuffer == NULL)
+ fw_minibuffer = kcalloc(sh_css_num_binaries, sizeof(struct fw_param),
+ GFP_KERNEL);
+ if (!fw_minibuffer)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
for (i = 0; i < sh_css_num_binaries; i++) {
@@ -295,10 +296,8 @@ void sh_css_unload_firmware(void)
}
memset(&sh_css_sp_fw, 0, sizeof(sh_css_sp_fw));
- if (sh_css_blob_info) {
- kfree(sh_css_blob_info);
- sh_css_blob_info = NULL;
- }
+ kfree(sh_css_blob_info);
+ sh_css_blob_info = NULL;
sh_css_num_binaries = 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
index 0bfebced63af..716d808d56db 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_hrt.c
@@ -80,5 +80,5 @@ enum ia_css_err sh_css_hrt_sp_wait(void)
hrt_sleep();
}
-return IA_CSS_SUCCESS;
+ return IA_CSS_SUCCESS;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
index 5b2b78f96dc5..0910021286a4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
@@ -961,7 +961,7 @@ struct host_sp_queues {
extern int (*sh_css_printf)(const char *fmt, va_list args);
-STORAGE_CLASS_INLINE void
+static inline void
sh_css_print(const char *fmt, ...)
{
va_list ap;
@@ -973,7 +973,7 @@ sh_css_print(const char *fmt, ...)
}
}
-STORAGE_CLASS_INLINE void
+static inline void
sh_css_vprint(const char *fmt, va_list args)
{
if (sh_css_printf)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
index eaf60e7b2dac..e6ebd1b08f0d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_param_shading.c
@@ -365,10 +365,8 @@ ia_css_shading_table_alloc(
IA_CSS_ENTER("");
me = kmalloc(sizeof(*me), GFP_KERNEL);
- if (me == NULL) {
- IA_CSS_ERROR("out of memory");
+ if (!me)
return me;
- }
me->width = width;
me->height = height;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
index 48224370b8bf..fbb36112fe3c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
@@ -4266,33 +4266,33 @@ sh_css_params_write_to_ddr_internal(
size_t *virt_size_tetra_y[
IA_CSS_MORPH_TABLE_NUM_PLANES];
- virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
- virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
- virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
- virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
- virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
- virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
-
- virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
- virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
- virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
- virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
- virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
- virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
-
- virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
- virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
- virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
- virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
- virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
- virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
-
- virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
- virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
- virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
- virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
- virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
- virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
+ virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
+ virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
+ virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
+ virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
+ virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
+ virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
+
+ virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
+ virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
+ virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
+ virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
+ virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
+ virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
+
+ virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
+ virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
+ virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
+ virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
+ virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
+ virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
+
+ virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
+ virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
+ virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
+ virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
+ virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
+ virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
buff_realloced = false;
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
index b8aae4ba5a78..a1c81c12718c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
index 11162f595fc7..79bd540d7882 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -58,7 +54,7 @@ static unsigned int nr_to_order_bottom(unsigned int nr)
return fls(nr) - 1;
}
-struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
+static struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
{
struct hmm_buffer_object *bo;
@@ -99,7 +95,7 @@ static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
return 0;
}
-struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
+static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
struct rb_node *node, unsigned int pgnr)
{
struct hmm_buffer_object *this, *ret_bo, *temp_bo;
@@ -150,7 +146,7 @@ remove_bo_and_return:
return temp_bo;
}
-struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
+static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
ia_css_ptr start)
{
struct rb_node *n = root->rb_node;
@@ -175,8 +171,8 @@ struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
return NULL;
}
-struct hmm_buffer_object *__bo_search_by_addr_in_range(struct rb_root *root,
- unsigned int start)
+static struct hmm_buffer_object *__bo_search_by_addr_in_range(
+ struct rb_root *root, unsigned int start)
{
struct rb_node *n = root->rb_node;
struct hmm_buffer_object *bo;
@@ -258,7 +254,7 @@ static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
rb_insert_color(&bo->node, root);
}
-struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
+static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
struct hmm_buffer_object *bo,
unsigned int pgnr)
{
@@ -331,7 +327,7 @@ static void __bo_take_off_handling(struct hmm_buffer_object *bo)
}
}
-struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
+static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
struct hmm_buffer_object *next_bo)
{
struct hmm_bo_device *bdev;
@@ -725,12 +721,10 @@ static int alloc_private_pages(struct hmm_buffer_object *bo,
pgnr = bo->pgnr;
- bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * pgnr,
+ bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
GFP_KERNEL);
- if (unlikely(!bo->page_obj)) {
- dev_err(atomisp_dev, "out of memory for bo->page_obj\n");
+ if (unlikely(!bo->page_obj))
return -ENOMEM;
- }
i = 0;
alloc_pgnr = 0;
@@ -990,16 +984,13 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
struct vm_area_struct *vma;
struct page **pages;
- pages = kmalloc(sizeof(struct page *) * bo->pgnr, GFP_KERNEL);
- if (unlikely(!pages)) {
- dev_err(atomisp_dev, "out of memory for pages...\n");
+ pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(!pages))
return -ENOMEM;
- }
- bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * bo->pgnr,
+ bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
GFP_KERNEL);
if (unlikely(!bo->page_obj)) {
- dev_err(atomisp_dev, "out of memory for bo->page_obj...\n");
kfree(pages);
return -ENOMEM;
}
@@ -1029,10 +1020,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
} else {
/*Handle frame buffer allocated in user space*/
mutex_unlock(&bo->mutex);
- down_read(&current->mm->mmap_sem);
- page_nr = get_user_pages((unsigned long)userptr,
- (int)(bo->pgnr), 1, pages, NULL);
- up_read(&current->mm->mmap_sem);
+ page_nr = get_user_pages_fast((unsigned long)userptr,
+ (int)(bo->pgnr), 1, pages);
mutex_lock(&bo->mutex);
bo->mem_type = HMM_BO_MEM_TYPE_USER;
}
@@ -1168,13 +1157,9 @@ status_err2:
int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
{
- int ret;
-
check_bo_null_return(bo, 0);
- ret = bo->status & HMM_BO_PAGE_ALLOCED;
-
- return ret;
+ return bo->status & HMM_BO_PAGE_ALLOCED;
}
/*
@@ -1363,10 +1348,9 @@ void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
}
- pages = kmalloc(sizeof(*pages) * bo->pgnr, GFP_KERNEL);
+ pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
if (unlikely(!pages)) {
mutex_unlock(&bo->mutex);
- dev_err(atomisp_dev, "out of memory for pages...\n");
return NULL;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
index 19e0e9ee37de..f59fd9908257 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_dynamic_pool.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -116,8 +112,6 @@ static void free_pages_to_dynamic_pool(void *pool,
hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
GFP_KERNEL);
if (!hmm_page) {
- dev_err(atomisp_dev, "out of memory for hmm_page.\n");
-
/* free page directly */
ret = set_pages_wb(page_obj->page, 1);
if (ret)
@@ -151,10 +145,8 @@ static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
GFP_KERNEL);
- if (unlikely(!dypool_info)) {
- dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ if (unlikely(!dypool_info))
return -ENOMEM;
- }
dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
sizeof(struct hmm_page), 0,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
index bf6586805f7f..f300e7547997 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_reserved_pool.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -92,15 +88,12 @@ static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
GFP_KERNEL);
- if (unlikely(!pool_info)) {
- dev_err(atomisp_dev, "out of memory for repool_info.\n");
+ if (unlikely(!pool_info))
return -ENOMEM;
- }
pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
GFP_KERNEL);
if (unlikely(!pool_info->pages)) {
- dev_err(atomisp_dev, "out of memory for repool_info->pages.\n");
kfree(pool_info);
return -ENOMEM;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
index 0722a68a49e7..0df96e661983 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
@@ -89,10 +85,8 @@ static struct hmm_vm_node *alloc_hmm_vm_node(unsigned int pgnr,
struct hmm_vm_node *node;
node = kmem_cache_alloc(vm->cache, GFP_KERNEL);
- if (!node) {
- dev_err(atomisp_dev, "out of memory.\n");
+ if (!node)
return NULL;
- }
INIT_LIST_HEAD(&node->list);
node->pgnr = pgnr;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
index 46a5d29e2d3a..fb38fc540b81 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_custom_host_hrt.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef _hive_isp_css_custom_host_hrt_h_
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
index 2e78976bb2ac..a94958bde718 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
index 1328944a7afd..15c2dfb6794e 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
index 6b9fb1b2caaf..1e135c7c6d9b 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
index dffd6e9cf693..bd44ebbc427c 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
index a9446adb4c70..9e51a657ece4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo_dev.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
index f1593aa38ce1..00885203fb14 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_common.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
index 1ba360433d88..bf24e44462bc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_pool.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef __HMM_POOL_H__
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
index 07d40662de32..52098161082d 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_vm.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
index 6b4eefc929e2..560014add005 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/isp_mmu.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
index 06041e94cbb2..031c0398bf65 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#ifndef SH_MMU_H_
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
index b9bad9f06235..662e98f41da2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/include/mmu/sh_mmu_mrfld.h
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
index 706bd43e8b1b..e36c2a33b41a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/isp_mmu.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
/*
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c b/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
index 97546bd124cd..c59bcc982966 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/mmu/sh_mmu_mrfld.c
@@ -14,10 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*
*/
#include "type_support.h"
diff --git a/drivers/staging/media/atomisp/platform/Makefile b/drivers/staging/media/atomisp/platform/Makefile
index df157630bda9..0e3b7e1c81c6 100644
--- a/drivers/staging/media/atomisp/platform/Makefile
+++ b/drivers/staging/media/atomisp/platform/Makefile
@@ -2,5 +2,4 @@
# Makefile for camera drivers.
#
-obj-$(CONFIG_INTEL_ATOMISP) += clock/
obj-$(CONFIG_INTEL_ATOMISP) += intel-mid/
diff --git a/drivers/staging/media/atomisp/platform/clock/Makefile b/drivers/staging/media/atomisp/platform/clock/Makefile
deleted file mode 100644
index 82fbe8b6968a..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for clock devices.
-#
-
-obj-$(CONFIG_INTEL_ATOMISP) += vlv2_plat_clock.o
-obj-$(CONFIG_INTEL_ATOMISP) += platform_vlv2_plat_clk.o
diff --git a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c
deleted file mode 100644
index 0aae9b0283bb..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * platform_vlv2_plat_clk.c - VLV2 platform clock driver
- * Copyright (C) 2013 Intel Corporation
- *
- * Author: Asutosh Pathak <asutosh.pathak@intel.com>
- * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
- * Author: Sergio Aguirre <sergio.a.aguirre.rodriguez@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/printk.h>
-
-static int __init vlv2_plat_clk_init(void)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_simple("vlv2_plat_clk", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- pr_err("platform_vlv2_plat_clk:register failed: %ld\n",
- PTR_ERR(pdev));
- return PTR_ERR(pdev);
- }
-
- return 0;
-}
-
-device_initcall(vlv2_plat_clk_init);
diff --git a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h b/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h
deleted file mode 100644
index b730ab0e8223..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/platform_vlv2_plat_clk.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * platform_vlv2_plat_clk.h: platform clock driver library header file
- * Copyright (C) 2013 Intel Corporation
- *
- * Author: Asutosh Pathak <asutosh.pathak@intel.com>
- * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
- * Author: Sergio Aguirre <sergio.a.aguirre.rodriguez@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- */
-#ifndef _PLATFORM_VLV2_PLAT_CLK_H_
-#define _PLATFORM_VLV2_PLAT_CLK_H_
-
-#include <linux/sfi.h>
-#include <asm/intel-mid.h>
-
-extern void __init *vlv2_plat_clk_device_platform_data(
- void *info) __attribute__((weak));
-#endif
diff --git a/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c b/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c
deleted file mode 100644
index f96789a31819..000000000000
--- a/drivers/staging/media/atomisp/platform/clock/vlv2_plat_clock.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * vlv2_plat_clock.c - VLV2 platform clock driver
- * Copyright (C) 2013 Intel Corporation
- *
- * Author: Asutosh Pathak <asutosh.pathak@intel.com>
- * Author: Chandra Sekhar Anagani <chandra.sekhar.anagani@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include "../../include/linux/vlv2_plat_clock.h"
-
-/* NOTE: Most of below constants could come from platform data.
- * To be fixed when appropriate ACPI support comes.
- */
-#define VLV2_PMC_CLK_BASE_ADDRESS 0xfed03060
-#define PLT_CLK_CTL_OFFSET(x) (0x04 * (x))
-
-#define CLK_CONFG_BIT_POS 0
-#define CLK_CONFG_BIT_LEN 2
-#define CLK_CONFG_D3_GATED 0
-#define CLK_CONFG_FORCE_ON 1
-#define CLK_CONFG_FORCE_OFF 2
-
-#define CLK_FREQ_TYPE_BIT_POS 2
-#define CLK_FREQ_TYPE_BIT_LEN 1
-#define CLK_FREQ_TYPE_XTAL 0 /* 25 MHz */
-#define CLK_FREQ_TYPE_PLL 1 /* 19.2 MHz */
-
-#define MAX_CLK_COUNT 5
-
-/* Helper macros to manipulate bitfields */
-#define REG_MASK(n) (((1 << (n##_BIT_LEN)) - 1) << (n##_BIT_POS))
-#define REG_SET_FIELD(r, n, v) (((r) & ~REG_MASK(n)) | \
- (((v) << (n##_BIT_POS)) & REG_MASK(n)))
-#define REG_GET_FIELD(r, n) (((r) & REG_MASK(n)) >> n##_BIT_POS)
-/*
- * vlv2 platform has 6 platform clocks, controlled by 4 byte registers
- * Total size required for mapping is 6*4 = 24 bytes
- */
-#define PMC_MAP_SIZE 24
-
-static DEFINE_MUTEX(clk_mutex);
-static void __iomem *pmc_base;
-
-/*
- * vlv2_plat_set_clock_freq - Set clock frequency to a specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- * @freq_type: Clock frequency (0-25 MHz(XTAL), 1-19.2 MHz(PLL) )
- */
-int vlv2_plat_set_clock_freq(int clk_num, int freq_type)
-{
- void __iomem *addr;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (freq_type != CLK_FREQ_TYPE_XTAL &&
- freq_type != CLK_FREQ_TYPE_PLL) {
- pr_err("wrong clock type\n");
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- addr = pmc_base + PLT_CLK_CTL_OFFSET(clk_num);
-
- mutex_lock(&clk_mutex);
- writel(REG_SET_FIELD(readl(addr), CLK_FREQ_TYPE, freq_type), addr);
- mutex_unlock(&clk_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_set_clock_freq);
-
-/*
- * vlv2_plat_get_clock_freq - Get the status of specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- *
- * Returns 0 for 25 MHz(XTAL) and 1 for 19.2 MHz(PLL)
- */
-int vlv2_plat_get_clock_freq(int clk_num)
-{
- u32 ret;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- mutex_lock(&clk_mutex);
- ret = REG_GET_FIELD(readl(pmc_base + PLT_CLK_CTL_OFFSET(clk_num)),
- CLK_FREQ_TYPE);
- mutex_unlock(&clk_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_get_clock_freq);
-
-/*
- * vlv2_plat_configure_clock - Configure the specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- * @conf: Clock gating:
- * 0 - Clock gated on D3 state
- * 1 - Force on
- * 2,3 - Force off
- */
-int vlv2_plat_configure_clock(int clk_num, u32 conf)
-{
- void __iomem *addr;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (conf != CLK_CONFG_D3_GATED &&
- conf != CLK_CONFG_FORCE_ON &&
- conf != CLK_CONFG_FORCE_OFF) {
- pr_err("Invalid clock configuration requested\n");
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- addr = pmc_base + PLT_CLK_CTL_OFFSET(clk_num);
-
- mutex_lock(&clk_mutex);
- writel(REG_SET_FIELD(readl(addr), CLK_CONFG, conf), addr);
- mutex_unlock(&clk_mutex);
- return 0;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_configure_clock);
-
-/*
- * vlv2_plat_get_clock_status - Get the status of specified platform clock
- * @clk_num: Platform clock number (i.e. 0, 1, 2, ...,5)
- *
- * Returns 1 - On, 0 - Off
- */
-int vlv2_plat_get_clock_status(int clk_num)
-{
- int ret;
-
- if (clk_num < 0 || clk_num >= MAX_CLK_COUNT) {
- pr_err("Clock number out of range (%d)\n", clk_num);
- return -EINVAL;
- }
-
- if (!pmc_base) {
- pr_err("memio map is not set\n");
- return -EINVAL;
- }
-
- mutex_lock(&clk_mutex);
- ret = (int)REG_GET_FIELD(readl(pmc_base + PLT_CLK_CTL_OFFSET(clk_num)),
- CLK_CONFG);
- mutex_unlock(&clk_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(vlv2_plat_get_clock_status);
-
-static int vlv2_plat_clk_probe(struct platform_device *pdev)
-{
- int i = 0;
-
- pmc_base = ioremap_nocache(VLV2_PMC_CLK_BASE_ADDRESS, PMC_MAP_SIZE);
- if (!pmc_base) {
- dev_err(&pdev->dev, "I/O memory remapping failed\n");
- return -ENOMEM;
- }
-
- /* Initialize all clocks as disabled */
- for (i = 0; i < MAX_CLK_COUNT; i++)
- vlv2_plat_configure_clock(i, CLK_CONFG_FORCE_OFF);
-
- dev_info(&pdev->dev, "vlv2_plat_clk initialized\n");
- return 0;
-}
-
-static const struct platform_device_id vlv2_plat_clk_id[] = {
- {"vlv2_plat_clk", 0},
- {}
-};
-
-static int vlv2_resume(struct device *device)
-{
- int i;
-
- /* Initialize all clocks as disabled */
- for (i = 0; i < MAX_CLK_COUNT; i++)
- vlv2_plat_configure_clock(i, CLK_CONFG_FORCE_OFF);
-
- return 0;
-}
-
-static int vlv2_suspend(struct device *device)
-{
- return 0;
-}
-
-static const struct dev_pm_ops vlv2_pm_ops = {
- .suspend = vlv2_suspend,
- .resume = vlv2_resume,
-};
-
-static struct platform_driver vlv2_plat_clk_driver = {
- .probe = vlv2_plat_clk_probe,
- .id_table = vlv2_plat_clk_id,
- .driver = {
- .name = "vlv2_plat_clk",
- .pm = &vlv2_pm_ops,
- },
-};
-
-static int __init vlv2_plat_clk_init(void)
-{
- return platform_driver_register(&vlv2_plat_clk_driver);
-}
-arch_initcall(vlv2_plat_clk_init);
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/Makefile b/drivers/staging/media/atomisp/platform/intel-mid/Makefile
index 4621261c35db..c53db1364e21 100644
--- a/drivers/staging/media/atomisp/platform/intel-mid/Makefile
+++ b/drivers/staging/media/atomisp/platform/intel-mid/Makefile
@@ -1,5 +1,4 @@
#
# Makefile for intel-mid devices.
#
-obj-$(CONFIG_INTEL_ATOMISP) += intel_mid_pcihelpers.o
obj-$(CONFIG_INTEL_ATOMISP) += atomisp_gmin_platform.o
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
index edaae93af8f9..bf9f34b7ad72 100644
--- a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
@@ -4,10 +4,10 @@
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <media/v4l2-subdev.h>
#include <linux/mfd/intel_soc_pmic.h>
-#include "../../include/linux/vlv2_plat_clock.h"
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio.h>
@@ -17,11 +17,7 @@
#define MAX_SUBDEVS 8
-/* Should be defined in vlv2_plat_clock API, isn't: */
-#define VLV2_CLK_PLL_19P2MHZ 1
-#define VLV2_CLK_XTAL_19P2MHZ 0
-#define VLV2_CLK_ON 1
-#define VLV2_CLK_OFF 2
+#define VLV2_CLK_PLL_19P2MHZ 1 /* XTAL on CHT */
#define ELDO1_SEL_REG 0x19
#define ELDO1_1P8V 0x16
#define ELDO1_CTRL_SHIFT 0x00
@@ -33,6 +29,8 @@ struct gmin_subdev {
struct v4l2_subdev *subdev;
int clock_num;
int clock_src;
+ bool clock_on;
+ struct clk *pmc_clk;
struct gpio_desc *gpio0;
struct gpio_desc *gpio1;
struct regulator *v1p8_reg;
@@ -108,49 +106,6 @@ const struct atomisp_platform_data *atomisp_get_platform_data(void)
}
EXPORT_SYMBOL_GPL(atomisp_get_platform_data);
-static int af_power_ctrl(struct v4l2_subdev *subdev, int flag)
-{
- struct gmin_subdev *gs = find_gmin_subdev(subdev);
-
- if (gs && gs->v2p8_vcm_on == flag)
- return 0;
- gs->v2p8_vcm_on = flag;
-
- /*
- * The power here is used for dw9817,
- * regulator is from rear sensor
- */
- if (gs->v2p8_vcm_reg) {
- if (flag)
- return regulator_enable(gs->v2p8_vcm_reg);
- else
- return regulator_disable(gs->v2p8_vcm_reg);
- }
- return 0;
-}
-
-/*
- * Used in a handful of modules. Focus motor control, I think. Note
- * that there is no configurability in the API, so this needs to be
- * fixed where it is used.
- *
- * struct camera_af_platform_data {
- * int (*power_ctrl)(struct v4l2_subdev *subdev, int flag);
- * };
- *
- * Note that the implementation in MCG platform_camera.c is stubbed
- * out anyway (i.e. returns zero from the callback) on BYT. So
- * neither needed on gmin platforms or supported upstream.
- */
-const struct camera_af_platform_data *camera_get_af_platform_data(void)
-{
- static struct camera_af_platform_data afpd = {
- .power_ctrl = af_power_ctrl,
- };
- return &afpd;
-}
-EXPORT_SYMBOL_GPL(camera_get_af_platform_data);
-
int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
struct camera_sensor_platform_data *plat_data,
enum intel_v4l2_subdev_type type)
@@ -334,15 +289,8 @@ static const struct {
#define CFG_VAR_NAME_MAX 64
-static int gmin_platform_init(struct i2c_client *client)
-{
- return 0;
-}
-
-static int gmin_platform_deinit(void)
-{
- return 0;
-}
+#define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */
+static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME];
static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
{
@@ -377,21 +325,42 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
- if (!IS_ERR(gmin_subdevs[i].gpio0)) {
- ret = gpiod_direction_output(gmin_subdevs[i].gpio0, 0);
- if (ret)
- dev_err(dev, "gpio0 set output failed: %d\n", ret);
- } else {
- gmin_subdevs[i].gpio0 = NULL;
+ /* get PMC clock with clock framework */
+ snprintf(gmin_pmc_clk_name,
+ sizeof(gmin_pmc_clk_name),
+ "%s_%d", "pmc_plt_clk", gmin_subdevs[i].clock_num);
+
+ gmin_subdevs[i].pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
+ if (IS_ERR(gmin_subdevs[i].pmc_clk)) {
+ ret = PTR_ERR(gmin_subdevs[i].pmc_clk);
+
+ dev_err(dev,
+ "Failed to get clk from %s : %d\n",
+ gmin_pmc_clk_name,
+ ret);
+
+ return NULL;
}
- if (!IS_ERR(gmin_subdevs[i].gpio1)) {
- ret = gpiod_direction_output(gmin_subdevs[i].gpio1, 0);
- if (ret)
- dev_err(dev, "gpio1 set output failed: %d\n", ret);
- } else {
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(gmin_subdevs[i].pmc_clk);
+ if (!ret)
+ clk_disable_unprepare(gmin_subdevs[i].pmc_clk);
+
+ if (IS_ERR(gmin_subdevs[i].gpio0))
+ gmin_subdevs[i].gpio0 = NULL;
+
+ if (IS_ERR(gmin_subdevs[i].gpio1))
gmin_subdevs[i].gpio1 = NULL;
- }
if (pmic_id == PMIC_REGULATOR) {
gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX");
@@ -539,13 +508,27 @@ static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
{
int ret = 0;
struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+
+ if (gs->clock_on == !!on)
+ return 0;
+
+ if (on) {
+ ret = clk_set_rate(gs->pmc_clk, gs->clock_src);
+
+ if (ret)
+ dev_err(&client->dev, "unable to set PMC rate %d\n",
+ gs->clock_src);
+
+ ret = clk_prepare_enable(gs->pmc_clk);
+ if (ret == 0)
+ gs->clock_on = true;
+ } else {
+ clk_disable_unprepare(gs->pmc_clk);
+ gs->clock_on = false;
+ }
- if (on)
- ret = vlv2_plat_set_clock_freq(gs->clock_num, gs->clock_src);
- if (ret)
- return ret;
- return vlv2_plat_configure_clock(gs->clock_num,
- on ? VLV2_CLK_ON : VLV2_CLK_OFF);
+ return ret;
}
static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag)
@@ -592,8 +575,6 @@ static struct camera_sensor_platform_data gmin_plat = {
.v2p8_ctrl = gmin_v2p8_ctrl,
.v1p2_ctrl = gmin_v1p2_ctrl,
.flisclk_ctrl = gmin_flisclk_ctrl,
- .platform_init = gmin_platform_init,
- .platform_deinit = gmin_platform_deinit,
.csi_cfg = gmin_csi_cfg,
.get_vcm_ctrl = gmin_get_vcm_ctrl,
};
@@ -739,10 +720,8 @@ int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
if (flag) {
csi = kzalloc(sizeof(*csi), GFP_KERNEL);
- if (!csi) {
- dev_err(&client->dev, "out of memory\n");
+ if (!csi)
return -ENOMEM;
- }
csi->port = port;
csi->num_lanes = lanes;
csi->input_format = format;
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c b/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
deleted file mode 100644
index cd452cc20fea..000000000000
--- a/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
+++ /dev/null
@@ -1,297 +0,0 @@
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/pm_qos.h>
-#include <linux/delay.h>
-
-/* G-Min addition: "platform_is()" lives in intel_mid_pm.h in the MCG
- * tree, but it's just platform ID info and we don't want to pull in
- * the whole SFI-based PM architecture.
- */
-#define INTEL_ATOM_MRST 0x26
-#define INTEL_ATOM_MFLD 0x27
-#define INTEL_ATOM_CLV 0x35
-#define INTEL_ATOM_MRFLD 0x4a
-#define INTEL_ATOM_BYT 0x37
-#define INTEL_ATOM_MOORFLD 0x5a
-#define INTEL_ATOM_CHT 0x4c
-/* synchronization for sharing the I2C controller */
-#define PUNIT_PORT 0x04
-#define PUNIT_DOORBELL_OPCODE (0xE0)
-#define PUNIT_DOORBELL_REG (0x0)
-#ifndef CSTATE_EXIT_LATENCY
-#define CSTATE_EXIT_LATENCY_C1 1
-#endif
-static inline int platform_is(u8 model)
-{
- return (boot_cpu_data.x86_model == model);
-}
-
-#include "../../include/asm/intel_mid_pcihelpers.h"
-
-/* Unified message bus read/write operation */
-static DEFINE_SPINLOCK(msgbus_lock);
-
-static struct pci_dev *pci_root;
-static struct pm_qos_request pm_qos;
-
-#define DW_I2C_NEED_QOS (platform_is(INTEL_ATOM_BYT))
-
-static int intel_mid_msgbus_init(void)
-{
- pci_root = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
- if (!pci_root) {
- pr_err("%s: Error: msgbus PCI handle NULL\n", __func__);
- return -ENODEV;
- }
-
- if (DW_I2C_NEED_QOS) {
- pm_qos_add_request(&pm_qos,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
- }
- return 0;
-}
-fs_initcall(intel_mid_msgbus_init);
-
-u32 intel_mid_msgbus_read32_raw(u32 cmd)
-{
- unsigned long irq_flags;
- u32 data;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-
- return data;
-}
-EXPORT_SYMBOL(intel_mid_msgbus_read32_raw);
-
-/*
- * GU: this function is only used by the VISA and 'VXD' drivers.
- */
-u32 intel_mid_msgbus_read32_raw_ext(u32 cmd, u32 cmd_ext)
-{
- unsigned long irq_flags;
- u32 data;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-
- return data;
-}
-EXPORT_SYMBOL(intel_mid_msgbus_read32_raw_ext);
-
-void intel_mid_msgbus_write32_raw(u32 cmd, u32 data)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-}
-EXPORT_SYMBOL(intel_mid_msgbus_write32_raw);
-
-/*
- * GU: this function is only used by the VISA and 'VXD' drivers.
- */
-void intel_mid_msgbus_write32_raw_ext(u32 cmd, u32 cmd_ext, u32 data)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG, cmd_ext);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-}
-EXPORT_SYMBOL(intel_mid_msgbus_write32_raw_ext);
-
-u32 intel_mid_msgbus_read32(u8 port, u32 addr)
-{
- unsigned long irq_flags;
- u32 data;
- u32 cmd;
- u32 cmdext;
-
- cmd = (PCI_ROOT_MSGBUS_READ << 24) | (port << 16) |
- ((addr & 0xff) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
- cmdext = addr & 0xffffff00;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
-
- if (cmdext) {
- /* This resets to 0 automatically, no need to write 0 */
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
- cmdext);
- }
-
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- pci_read_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, &data);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-
- return data;
-}
-EXPORT_SYMBOL(intel_mid_msgbus_read32);
-
-void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data)
-{
- unsigned long irq_flags;
- u32 cmd;
- u32 cmdext;
-
- cmd = (PCI_ROOT_MSGBUS_WRITE << 24) | (port << 16) |
- ((addr & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
- cmdext = addr & 0xffffff00;
-
- spin_lock_irqsave(&msgbus_lock, irq_flags);
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_DATA_REG, data);
-
- if (cmdext) {
- /* This resets to 0 automatically, no need to write 0 */
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_EXT_REG,
- cmdext);
- }
-
- pci_write_config_dword(pci_root, PCI_ROOT_MSGBUS_CTRL_REG, cmd);
- spin_unlock_irqrestore(&msgbus_lock, irq_flags);
-}
-EXPORT_SYMBOL(intel_mid_msgbus_write32);
-
-/* called only from where is later then fs_initcall */
-u32 intel_mid_soc_stepping(void)
-{
- return pci_root->revision;
-}
-EXPORT_SYMBOL(intel_mid_soc_stepping);
-
-static bool is_south_complex_device(struct pci_dev *dev)
-{
- unsigned int base_class = dev->class >> 16;
- unsigned int sub_class = (dev->class & SUB_CLASS_MASK) >> 8;
-
- /* other than camera, pci bridges and display,
- * everything else are south complex devices.
- */
- if (((base_class == PCI_BASE_CLASS_MULTIMEDIA) &&
- (sub_class == ISP_SUB_CLASS)) ||
- (base_class == PCI_BASE_CLASS_BRIDGE) ||
- ((base_class == PCI_BASE_CLASS_DISPLAY) && !sub_class))
- return false;
- else
- return true;
-}
-
-/* In BYT platform, d3_delay for internal south complex devices,
- * they are not subject to 10 ms d3 to d0 delay required by pci spec.
- */
-static void pci_d3_delay_fixup(struct pci_dev *dev)
-{
- if (platform_is(INTEL_ATOM_BYT) ||
- platform_is(INTEL_ATOM_CHT)) {
- /* All internal devices are in bus 0. */
- if (dev->bus->number == 0 && is_south_complex_device(dev)) {
- dev->d3_delay = INTERNAL_PCI_PM_D3_WAIT;
- dev->d3cold_delay = INTERNAL_PCI_PM_D3_WAIT;
- }
- }
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3_delay_fixup);
-
-#define PUNIT_SEMAPHORE (platform_is(INTEL_ATOM_BYT) ? 0x7 : 0x10E)
-#define GET_SEM() (intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE) & 0x1)
-
-static void reset_semaphore(void)
-{
- u32 data;
-
- data = intel_mid_msgbus_read32(PUNIT_PORT, PUNIT_SEMAPHORE);
- smp_mb();
- data = data & 0xfffffffc;
- intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, data);
- smp_mb();
-
-}
-
-int intel_mid_dw_i2c_acquire_ownership(void)
-{
- u32 ret = 0;
- u32 data = 0; /* data sent to PUNIT */
- u32 cmd;
- u32 cmdext;
- int timeout = 1000;
-
- if (DW_I2C_NEED_QOS)
- pm_qos_update_request(&pm_qos, CSTATE_EXIT_LATENCY_C1 - 1);
-
- /*
- * We need disable irq. Otherwise, the main thread
- * might be preempted and the other thread jumps to
- * disable irq for a long time. Another case is
- * some irq handlers might trigger power voltage change
- */
- BUG_ON(irqs_disabled());
- local_irq_disable();
-
- /* host driver writes 0x2 to side band register 0x7 */
- intel_mid_msgbus_write32(PUNIT_PORT, PUNIT_SEMAPHORE, 0x2);
- smp_mb();
-
- /* host driver sends 0xE0 opcode to PUNIT and writes 0 register */
- cmd = (PUNIT_DOORBELL_OPCODE << 24) | (PUNIT_PORT << 16) |
- ((PUNIT_DOORBELL_REG & 0xFF) << 8) | PCI_ROOT_MSGBUS_DWORD_ENABLE;
- cmdext = PUNIT_DOORBELL_REG & 0xffffff00;
-
- if (cmdext)
- intel_mid_msgbus_write32_raw_ext(cmd, cmdext, data);
- else
- intel_mid_msgbus_write32_raw(cmd, data);
-
- /* host driver waits for bit 0 to be set in side band 0x7 */
- while (GET_SEM() != 0x1) {
- udelay(100);
- timeout--;
- if (timeout <= 0) {
- pr_err("Timeout: semaphore timed out, reset sem\n");
- ret = -ETIMEDOUT;
- reset_semaphore();
- /*Delay 1ms in case race with punit*/
- udelay(1000);
- if (GET_SEM() != 0) {
- /*Reset again as kernel might race with punit*/
- reset_semaphore();
- }
- pr_err("PUNIT SEM: %d\n",
- intel_mid_msgbus_read32(PUNIT_PORT,
- PUNIT_SEMAPHORE));
- local_irq_enable();
-
- if (DW_I2C_NEED_QOS) {
- pm_qos_update_request(&pm_qos,
- PM_QOS_DEFAULT_VALUE);
- }
-
- return ret;
- }
- }
- smp_mb();
-
- return ret;
-}
-EXPORT_SYMBOL(intel_mid_dw_i2c_acquire_ownership);
-
-int intel_mid_dw_i2c_release_ownership(void)
-{
- reset_semaphore();
- local_irq_enable();
-
- if (DW_I2C_NEED_QOS)
- pm_qos_update_request(&pm_qos, PM_QOS_DEFAULT_VALUE);
-
- return 0;
-}
-EXPORT_SYMBOL(intel_mid_dw_i2c_release_ownership);
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 58adaea44eb5..5d3b0e5a1283 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -1964,7 +1964,7 @@ static ssize_t bcm2048_##prop##_write(struct device *dev, \
return err < 0 ? err : count; \
}
-#define property_read(prop, size, mask) \
+#define property_read(prop, mask) \
static ssize_t bcm2048_##prop##_read(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
@@ -1999,9 +1999,9 @@ static ssize_t bcm2048_##prop##_read(struct device *dev, \
return sprintf(buf, mask "\n", value); \
}
-#define DEFINE_SYSFS_PROPERTY(prop, signal, size, mask, check) \
-property_write(prop, signal size, mask, check) \
-property_read(prop, size, mask)
+#define DEFINE_SYSFS_PROPERTY(prop, prop_type, mask, check) \
+property_write(prop, prop_type, mask, check) \
+property_read(prop, mask) \
#define property_str_read(prop, size) \
static ssize_t bcm2048_##prop##_read(struct device *dev, \
@@ -2027,39 +2027,39 @@ static ssize_t bcm2048_##prop##_read(struct device *dev, \
return count; \
}
-DEFINE_SYSFS_PROPERTY(power_state, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(mute, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(audio_route, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(dac_output, unsigned, int, "%u", 0)
-
-DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned, int, "%u", value > 3)
-
-DEFINE_SYSFS_PROPERTY(rds, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned, int, "%u", 0)
-DEFINE_SYSFS_PROPERTY(rds_wline, unsigned, int, "%u", 0)
-property_read(rds_pi, unsigned int, "%x")
+DEFINE_SYSFS_PROPERTY(power_state, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(mute, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(audio_route, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(dac_output, unsigned int, "%u", 0)
+
+DEFINE_SYSFS_PROPERTY(fm_hi_lo_injection, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_frequency, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_af_frequency, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_deemphasis, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_rds_mask, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_best_tune_mode, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_rssi_threshold, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_mode_direction, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(fm_search_tune_mode, unsigned int, "%u", value > 3)
+
+DEFINE_SYSFS_PROPERTY(rds, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_mask, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_b_block_match, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_mask, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_pi_match, unsigned int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(rds_wline, unsigned int, "%u", 0)
+property_read(rds_pi, "%x")
property_str_read(rds_rt, (BCM2048_MAX_RDS_RT + 1))
property_str_read(rds_ps, (BCM2048_MAX_RDS_PS + 1))
-property_read(fm_rds_flags, unsigned int, "%u")
+property_read(fm_rds_flags, "%u")
property_str_read(rds_data, BCM2048_MAX_RDS_RADIO_TEXT * 5)
-property_read(region_bottom_frequency, unsigned int, "%u")
-property_read(region_top_frequency, unsigned int, "%u")
+property_read(region_bottom_frequency, "%u")
+property_read(region_top_frequency, "%u")
property_signed_read(fm_carrier_error, int, "%d")
property_signed_read(fm_rssi, int, "%d")
-DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0)
+DEFINE_SYSFS_PROPERTY(region, unsigned int, "%u", 0)
static struct device_attribute attrs[] = {
__ATTR(power_state, 0644, bcm2048_power_state_read,
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index 3569625b6305..698a4210316e 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
imx-media-objs := imx-media-dev.o imx-media-internal-sd.o imx-media-of.o
imx-media-common-objs := imx-media-utils.o imx-media-fim.o
imx-media-ic-objs := imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
index c2bb5ef2acb4..9e41987f9884 100644
--- a/drivers/staging/media/imx/imx-ic-prp.c
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -320,9 +320,10 @@ static int prp_link_validate(struct v4l2_subdev *sd,
* the ->PRPENC link cannot be enabled if the source
* is the VDIC
*/
- if (priv->sink_sd_prpenc)
+ if (priv->sink_sd_prpenc) {
ret = -EINVAL;
- goto out;
+ goto out;
+ }
} else {
/* the source is a CSI */
if (!csi) {
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 0790b3d9e255..143038c6c403 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -293,9 +293,9 @@ static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
-static void prp_eof_timeout(unsigned long data)
+static void prp_eof_timeout(struct timer_list *t)
{
- struct prp_priv *priv = (struct prp_priv *)data;
+ struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_ic_priv *ic_priv = priv->ic_priv;
@@ -1292,8 +1292,7 @@ static int prp_init(struct imx_ic_priv *ic_priv)
priv->ic_priv = ic_priv;
spin_lock_init(&priv->irqlock);
- setup_timer(&priv->eof_timeout_timer, prp_eof_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
priv->vdev = imx_media_capture_device_init(&ic_priv->sd,
PRPENCVF_SRC_PAD);
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 6d856118c223..bb1d6dafca83 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -254,9 +254,9 @@ static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
-static void csi_idmac_eof_timeout(unsigned long data)
+static void csi_idmac_eof_timeout(struct timer_list *t)
{
- struct csi_priv *priv = (struct csi_priv *)data;
+ struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
v4l2_err(&priv->sd, "EOF timeout\n");
@@ -1739,8 +1739,7 @@ static int imx_csi_probe(struct platform_device *pdev)
priv->csi_id = pdata->csi;
priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
- setup_timer(&priv->eof_timeout_timer, csi_idmac_eof_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
spin_lock_init(&priv->irqlock);
v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index b55e5ebba8b4..47c4c954fed5 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -440,6 +440,11 @@ unlock:
return media_device_register(&imxmd->md);
}
+static const struct v4l2_async_notifier_operations imx_media_subdev_ops = {
+ .bound = imx_media_subdev_bound,
+ .complete = imx_media_probe_complete,
+};
+
/*
* adds controls to a video device from an entity subdevice.
* Continues upstream from the entity's sink pads.
@@ -608,8 +613,7 @@ static int imx_media_probe(struct platform_device *pdev)
/* prepare the async subdev notifier and register it */
imxmd->subdev_notifier.subdevs = imxmd->async_ptrs;
- imxmd->subdev_notifier.bound = imx_media_subdev_bound;
- imxmd->subdev_notifier.complete = imx_media_probe_complete;
+ imxmd->subdev_notifier.ops = &imx_media_subdev_ops;
ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
&imxmd->subdev_notifier);
if (ret) {
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 71af13bd0ebd..6bd0717bf76e 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -99,13 +99,14 @@ struct IR {
struct kref ref;
struct list_head list;
- /* FIXME spinlock access to l.features */
- struct lirc_driver l;
+ /* FIXME spinlock access to l->features */
+ struct lirc_dev *l;
struct lirc_buffer rbuf;
struct mutex ir_lock;
atomic_t open_count;
+ struct device *dev;
struct i2c_adapter *adapter;
spinlock_t rx_ref_lock; /* struct IR_rx kref get()/put() */
@@ -183,10 +184,8 @@ static void release_ir_device(struct kref *ref)
* ir->open_count == 0 - happens on final close()
* ir_lock, tx_ref_lock, rx_ref_lock, all released
*/
- if (ir->l.minor >= 0) {
- lirc_unregister_driver(ir->l.minor);
- ir->l.minor = -1;
- }
+ if (ir->l)
+ lirc_unregister_device(ir->l);
if (kfifo_initialized(&ir->rbuf.fifo))
lirc_buffer_free(&ir->rbuf);
@@ -243,7 +242,7 @@ static void release_ir_rx(struct kref *ref)
* and releasing the ir reference can cause a sleep. That work is
* performed by put_ir_rx()
*/
- ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
+ ir->l->features &= ~LIRC_CAN_REC_LIRCCODE;
/* Don't put_ir_device(rx->ir) here; lock can't be freed yet */
ir->rx = NULL;
/* Don't do the kfree(rx) here; we still need to kill the poll thread */
@@ -288,7 +287,7 @@ static void release_ir_tx(struct kref *ref)
struct IR_tx *tx = container_of(ref, struct IR_tx, ref);
struct IR *ir = tx->ir;
- ir->l.features &= ~LIRC_CAN_SEND_LIRCCODE;
+ ir->l->features &= ~LIRC_CAN_SEND_LIRCCODE;
/* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */
ir->tx = NULL;
kfree(tx);
@@ -317,12 +316,12 @@ static int add_to_buf(struct IR *ir)
int ret;
int failures = 0;
unsigned char sendbuf[1] = { 0 };
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
struct IR_rx *rx;
struct IR_tx *tx;
if (lirc_buffer_full(rbuf)) {
- dev_dbg(ir->l.dev, "buffer overflow\n");
+ dev_dbg(ir->dev, "buffer overflow\n");
return -EOVERFLOW;
}
@@ -368,17 +367,17 @@ static int add_to_buf(struct IR *ir)
*/
ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
- dev_err(ir->l.dev, "i2c_master_send failed with %d\n",
+ dev_err(ir->dev, "i2c_master_send failed with %d\n",
ret);
if (failures >= 3) {
mutex_unlock(&ir->ir_lock);
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"unable to read from the IR chip after 3 resets, giving up\n");
break;
}
/* Looks like the chip crashed, reset it */
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"polling the IR receiver chip failed, trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -405,14 +404,14 @@ static int add_to_buf(struct IR *ir)
ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"i2c_master_recv failed with %d -- keeping last read buffer\n",
ret);
} else {
rx->b[0] = keybuf[3];
rx->b[1] = keybuf[4];
rx->b[2] = keybuf[5];
- dev_dbg(ir->l.dev,
+ dev_dbg(ir->dev,
"key (0x%02x/0x%02x)\n",
rx->b[0], rx->b[1]);
}
@@ -463,9 +462,9 @@ static int add_to_buf(struct IR *ir)
static int lirc_thread(void *arg)
{
struct IR *ir = arg;
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
- dev_dbg(ir->l.dev, "poll thread started\n");
+ dev_dbg(ir->dev, "poll thread started\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -493,7 +492,7 @@ static int lirc_thread(void *arg)
wake_up_interruptible(&rbuf->wait_poll);
}
- dev_dbg(ir->l.dev, "poll thread ended\n");
+ dev_dbg(ir->dev, "poll thread ended\n");
return 0;
}
@@ -646,10 +645,10 @@ static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
buf[0] = (unsigned char)(i + 1);
for (j = 0; j < tosend; ++j)
buf[1 + j] = data_block[i + j];
- dev_dbg(tx->ir->l.dev, "%*ph", 5, buf);
+ dev_dbg(tx->ir->dev, "%*ph", 5, buf);
ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -674,7 +673,7 @@ static int send_boot_data(struct IR_tx *tx)
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -691,22 +690,22 @@ static int send_boot_data(struct IR_tx *tx)
}
if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_recv failed with %d\n", ret);
return 0;
}
if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
- dev_err(tx->ir->l.dev, "unexpected IR TX init response: %02x\n",
+ dev_err(tx->ir->dev, "unexpected IR TX init response: %02x\n",
buf[0]);
return 0;
}
- dev_notice(tx->ir->l.dev,
+ dev_notice(tx->ir->dev,
"Zilog/Hauppauge IR blaster firmware version %d.%d.%d loaded\n",
buf[1], buf[2], buf[3]);
@@ -751,15 +750,15 @@ static int fw_load(struct IR_tx *tx)
}
/* Request codeset data file */
- ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
+ ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->dev);
if (ret != 0) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"firmware haup-ir-blaster.bin not available (%d)\n",
ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
- dev_dbg(tx->ir->l.dev, "firmware of size %zu loaded\n", fw_entry->size);
+ dev_dbg(tx->ir->dev, "firmware of size %zu loaded\n", fw_entry->size);
/* Parse the file */
tx_data = vmalloc(sizeof(*tx_data));
@@ -787,7 +786,7 @@ static int fw_load(struct IR_tx *tx)
if (!read_uint8(&data, tx_data->endp, &version))
goto corrupt;
if (version != 1) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"unsupported code set file version (%u, expected 1) -- please upgrade to a newer driver\n",
version);
fw_unload_locked();
@@ -804,7 +803,7 @@ static int fw_load(struct IR_tx *tx)
&tx_data->num_code_sets))
goto corrupt;
- dev_dbg(tx->ir->l.dev, "%u IR blaster codesets loaded\n",
+ dev_dbg(tx->ir->dev, "%u IR blaster codesets loaded\n",
tx_data->num_code_sets);
tx_data->code_sets = vmalloc(
@@ -869,7 +868,7 @@ static int fw_load(struct IR_tx *tx)
goto out;
corrupt:
- dev_err(tx->ir->l.dev, "firmware is corrupt\n");
+ dev_err(tx->ir->dev, "firmware is corrupt\n");
fw_unload_locked();
ret = -EFAULT;
@@ -882,16 +881,16 @@ out:
static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
loff_t *ppos)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
int ret = 0, written = 0, retries = 0;
unsigned int m;
DECLARE_WAITQUEUE(wait, current);
- dev_dbg(ir->l.dev, "read called\n");
+ dev_dbg(ir->dev, "read called\n");
if (n % rbuf->chunk_size) {
- dev_dbg(ir->l.dev, "read result = -EINVAL\n");
+ dev_dbg(ir->dev, "read result = -EINVAL\n");
return -EINVAL;
}
@@ -935,7 +934,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
unsigned char buf[MAX_XFER_SIZE];
if (rbuf->chunk_size > sizeof(buf)) {
- dev_err(ir->l.dev,
+ dev_err(ir->dev,
"chunk_size is too big (%d)!\n",
rbuf->chunk_size);
ret = -EINVAL;
@@ -950,7 +949,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
retries++;
}
if (retries >= 5) {
- dev_err(ir->l.dev, "Buffer read failed!\n");
+ dev_err(ir->dev, "Buffer read failed!\n");
ret = -EIO;
}
}
@@ -960,7 +959,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
put_ir_rx(rx, false);
set_current_state(TASK_RUNNING);
- dev_dbg(ir->l.dev, "read result = %d (%s)\n", ret,
+ dev_dbg(ir->dev, "read result = %d (%s)\n", ret,
ret ? "Error" : "OK");
return ret ? ret : written;
@@ -977,7 +976,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = get_key_data(data_block, code, key);
if (ret == -EPROTO) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"failed to get data for code %u, key %u -- check lircd.conf entries\n",
code, key);
return ret;
@@ -995,7 +994,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x40;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1008,18 +1007,18 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
}
if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
if (buf[0] != 0xA0) {
- dev_err(tx->ir->l.dev, "unexpected IR TX response #1: %02x\n",
+ dev_err(tx->ir->dev, "unexpected IR TX response #1: %02x\n",
buf[0]);
return -EFAULT;
}
@@ -1029,7 +1028,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
buf[1] = 0x80;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
- dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
@@ -1039,7 +1038,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
* going to skip this whole mess and say we're done on the HD PVR
*/
if (!tx->post_tx_ready_poll) {
- dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1055,12 +1054,12 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
- dev_dbg(tx->ir->l.dev,
+ dev_dbg(tx->ir->dev,
"NAK expected: i2c_master_send failed with %d (try %d)\n",
ret, i + 1);
}
if (ret != 1) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"IR TX chip never got ready: last i2c_master_send failed with %d\n",
ret);
return ret < 0 ? ret : -EFAULT;
@@ -1069,17 +1068,17 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
/* Seems to be an 'ok' response */
i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
- dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret);
+ dev_err(tx->ir->dev, "i2c_master_recv failed with %d\n", ret);
return -EFAULT;
}
if (buf[0] != 0x80) {
- dev_err(tx->ir->l.dev, "unexpected IR TX response #2: %02x\n",
+ dev_err(tx->ir->dev, "unexpected IR TX response #2: %02x\n",
buf[0]);
return -EFAULT;
}
/* Oh good, it worked */
- dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key);
+ dev_dbg(tx->ir->dev, "sent code %u, key %u\n", code, key);
return 0;
}
@@ -1092,7 +1091,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
static ssize_t write(struct file *filep, const char __user *buf, size_t n,
loff_t *ppos)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
struct IR_tx *tx;
size_t i;
int failures = 0;
@@ -1165,11 +1164,11 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
*/
if (ret != 0) {
/* Looks like the chip crashed, reset it */
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"sending to the IR transmitter chip failed, trying reset\n");
if (failures >= 3) {
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"unable to send to the IR chip after 3 resets, giving up\n");
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
@@ -1200,12 +1199,12 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
/* copied from lirc_dev */
static unsigned int poll(struct file *filep, poll_table *wait)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
- struct lirc_buffer *rbuf = ir->l.rbuf;
+ struct lirc_buffer *rbuf = ir->l->buf;
unsigned int ret;
- dev_dbg(ir->l.dev, "%s called\n", __func__);
+ dev_dbg(ir->dev, "%s called\n", __func__);
rx = get_ir_rx(ir);
if (!rx) {
@@ -1213,7 +1212,7 @@ static unsigned int poll(struct file *filep, poll_table *wait)
* Revisit this, if our poll function ever reports writeable
* status for Tx
*/
- dev_dbg(ir->l.dev, "%s result = POLLERR\n", __func__);
+ dev_dbg(ir->dev, "%s result = POLLERR\n", __func__);
return POLLERR;
}
@@ -1226,19 +1225,19 @@ static unsigned int poll(struct file *filep, poll_table *wait)
/* Indicate what ops could happen immediately without blocking */
ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN | POLLRDNORM);
- dev_dbg(ir->l.dev, "%s result = %s\n", __func__,
+ dev_dbg(ir->dev, "%s result = %s\n", __func__,
ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
- struct IR *ir = filep->private_data;
+ struct IR *ir = lirc_get_pdata(filep);
unsigned long __user *uptr = (unsigned long __user *)arg;
int result;
unsigned long mode, features;
- features = ir->l.features;
+ features = ir->l->features;
switch (cmd) {
case LIRC_GET_LENGTH:
@@ -1283,46 +1282,18 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
return result;
}
-static struct IR *get_ir_device_by_minor(unsigned int minor)
-{
- struct IR *ir;
- struct IR *ret = NULL;
-
- mutex_lock(&ir_devices_lock);
-
- if (!list_empty(&ir_devices_list)) {
- list_for_each_entry(ir, &ir_devices_list, list) {
- if (ir->l.minor == minor) {
- ret = get_ir_device(ir, true);
- break;
- }
- }
- }
-
- mutex_unlock(&ir_devices_lock);
- return ret;
-}
-
/*
- * Open the IR device. Get hold of our IR structure and
- * stash it in private_data for the file
+ * Open the IR device.
*/
static int open(struct inode *node, struct file *filep)
{
struct IR *ir;
- unsigned int minor = MINOR(node->i_rdev);
- /* find our IR struct */
- ir = get_ir_device_by_minor(minor);
-
- if (!ir)
- return -ENODEV;
+ lirc_init_pdata(node, filep);
+ ir = lirc_get_pdata(filep);
atomic_inc(&ir->open_count);
- /* stash our IR struct */
- filep->private_data = ir;
-
nonseekable_open(node, filep);
return 0;
}
@@ -1330,14 +1301,7 @@ static int open(struct inode *node, struct file *filep)
/* Close the IR device */
static int close(struct inode *node, struct file *filep)
{
- /* find our IR struct */
- struct IR *ir = filep->private_data;
-
- if (!ir) {
- pr_err("ir: %s: no private_data attached to the file!\n",
- __func__);
- return -ENODEV;
- }
+ struct IR *ir = lirc_get_pdata(filep);
atomic_dec(&ir->open_count);
@@ -1383,16 +1347,6 @@ static const struct file_operations lirc_fops = {
.release = close
};
-static struct lirc_driver lirc_template = {
- .name = "lirc_zilog",
- .minor = -1,
- .code_length = 13,
- .buffer_size = BUFLEN / 2,
- .chunk_size = 2,
- .fops = &lirc_fops,
- .owner = THIS_MODULE,
-};
-
static int ir_remove(struct i2c_client *client)
{
if (strncmp("ir_tx_z8", client->name, 8) == 0) {
@@ -1476,27 +1430,42 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
list_add_tail(&ir->list, &ir_devices_list);
ir->adapter = adap;
+ ir->dev = &adap->dev;
mutex_init(&ir->ir_lock);
atomic_set(&ir->open_count, 0);
spin_lock_init(&ir->tx_ref_lock);
spin_lock_init(&ir->rx_ref_lock);
/* set lirc_dev stuff */
- memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
+ ir->l = lirc_allocate_device();
+ if (!ir->l) {
+ ret = -ENOMEM;
+ goto out_put_ir;
+ }
+
+ snprintf(ir->l->name, sizeof(ir->l->name), "lirc_zilog");
+ ir->l->code_length = 13;
+ ir->l->fops = &lirc_fops;
+ ir->l->owner = THIS_MODULE;
+ ir->l->dev.parent = &adap->dev;
+
/*
* FIXME this is a pointer reference to us, but no refcount.
*
* This OK for now, since lirc_dev currently won't touch this
* buffer as we provide our own lirc_fops.
*
- * Currently our own lirc_fops rely on this ir->l.rbuf pointer
+ * Currently our own lirc_fops rely on this ir->l->buf pointer
*/
- ir->l.rbuf = &ir->rbuf;
- ir->l.dev = &adap->dev;
- ret = lirc_buffer_init(ir->l.rbuf,
- ir->l.chunk_size, ir->l.buffer_size);
- if (ret)
+ ir->l->buf = &ir->rbuf;
+ /* This will be returned by lirc_get_pdata() */
+ ir->l->data = ir;
+ ret = lirc_buffer_init(ir->l->buf, 2, BUFLEN / 2);
+ if (ret) {
+ lirc_free_device(ir->l);
+ ir->l = NULL;
goto out_put_ir;
+ }
}
if (tx_probe) {
@@ -1512,7 +1481,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
kref_init(&tx->ref);
ir->tx = tx;
- ir->l.features |= LIRC_CAN_SEND_LIRCCODE;
+ ir->l->features |= LIRC_CAN_SEND_LIRCCODE;
mutex_init(&tx->client_lock);
tx->c = client;
tx->need_boot = 1;
@@ -1538,7 +1507,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Proceed only if the Rx client is also ready or not needed */
if (!rx && !tx_only) {
- dev_info(tx->ir->l.dev,
+ dev_info(tx->ir->dev,
"probe of IR Tx on %s (i2c-%d) done. Waiting on IR Rx.\n",
adap->name, adap->nr);
goto out_ok;
@@ -1556,7 +1525,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
kref_init(&rx->ref);
ir->rx = rx;
- ir->l.features |= LIRC_CAN_REC_LIRCCODE;
+ ir->l->features |= LIRC_CAN_REC_LIRCCODE;
mutex_init(&rx->client_lock);
rx->c = client;
rx->hdpvr_data_fmt =
@@ -1578,7 +1547,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
"zilog-rx-i2c-%d", adap->nr);
if (IS_ERR(rx->task)) {
ret = PTR_ERR(rx->task);
- dev_err(tx->ir->l.dev,
+ dev_err(tx->ir->dev,
"%s: could not start IR Rx polling thread\n",
__func__);
/* Failed kthread, so put back the ir ref */
@@ -1586,7 +1555,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Failure exit, so put back rx ref from i2c_client */
i2c_set_clientdata(client, NULL);
put_ir_rx(rx, true);
- ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
+ ir->l->features &= ~LIRC_CAN_REC_LIRCCODE;
goto out_put_tx;
}
@@ -1599,17 +1568,19 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
/* register with lirc */
- ir->l.minor = lirc_register_driver(&ir->l);
- if (ir->l.minor < 0) {
- dev_err(tx->ir->l.dev,
- "%s: lirc_register_driver() failed: %i\n",
- __func__, ir->l.minor);
- ret = -EBADRQC;
+ ret = lirc_register_device(ir->l);
+ if (ret < 0) {
+ dev_err(tx->ir->dev,
+ "%s: lirc_register_device() failed: %i\n",
+ __func__, ret);
+ lirc_free_device(ir->l);
+ ir->l = NULL;
goto out_put_xx;
}
- dev_info(ir->l.dev,
+
+ dev_info(ir->dev,
"IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
- adap->name, adap->nr, ir->l.minor);
+ adap->name, adap->nr, ir->l->minor);
out_ok:
if (rx)
@@ -1617,7 +1588,7 @@ out_ok:
if (tx)
put_ir_tx(tx, true);
put_ir_device(ir, true);
- dev_info(ir->l.dev,
+ dev_info(ir->dev,
"probe of IR %s on %s (i2c-%d) done\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_unlock(&ir_devices_lock);
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
index 9ee981c7786b..f5bbb9deaab5 100644
--- a/drivers/staging/most/Makefile
+++ b/drivers/staging/most/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOSTCORE) += mostcore/
obj-$(CONFIG_AIM_CDEV) += aim-cdev/
obj-$(CONFIG_AIM_NETWORK) += aim-network/
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 85775da293fb..667dacac81f0 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -744,9 +744,9 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel,
* The handler runs in interrupt context. That's why we need to defer the
* tasks to a work queue.
*/
-static void link_stat_timer_handler(unsigned long data)
+static void link_stat_timer_handler(struct timer_list *t)
{
- struct most_dev *mdev = (struct most_dev *)data;
+ struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
schedule_work(&mdev->poll_work_obj);
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
@@ -1138,8 +1138,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
num_endpoints = usb_iface_desc->desc.bNumEndpoints;
mutex_init(&mdev->io_mutex);
INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
- setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
- (unsigned long)mdev);
+ timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
mdev->usb_device = usb_dev;
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index e05ae4645d91..30532d8c310b 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -364,39 +364,39 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
stats->tx_bytes = xlr_nae_rdreg(priv->base_addr, TX_BYTE_COUNTER);
stats->tx_errors = xlr_nae_rdreg(priv->base_addr, TX_FCS_ERROR_COUNTER);
stats->rx_dropped = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
+ RX_DROP_PACKET_COUNTER);
stats->tx_dropped = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
+ TX_DROP_FRAME_COUNTER);
stats->multicast = xlr_nae_rdreg(priv->base_addr,
- RX_MULTICAST_PACKET_COUNTER);
+ RX_MULTICAST_PACKET_COUNTER);
stats->collisions = xlr_nae_rdreg(priv->base_addr,
- TX_TOTAL_COLLISION_COUNTER);
+ TX_TOTAL_COLLISION_COUNTER);
stats->rx_length_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FRAME_LENGTH_ERROR_COUNTER);
+ RX_FRAME_LENGTH_ERROR_COUNTER);
stats->rx_over_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
+ RX_DROP_PACKET_COUNTER);
stats->rx_crc_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FCS_ERROR_COUNTER);
+ RX_FCS_ERROR_COUNTER);
stats->rx_frame_errors = xlr_nae_rdreg(priv->base_addr,
- RX_ALIGNMENT_ERROR_COUNTER);
+ RX_ALIGNMENT_ERROR_COUNTER);
stats->rx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
+ RX_DROP_PACKET_COUNTER);
stats->rx_missed_errors = xlr_nae_rdreg(priv->base_addr,
- RX_CARRIER_SENSE_ERROR_COUNTER);
+ RX_CARRIER_SENSE_ERROR_COUNTER);
stats->rx_errors = (stats->rx_over_errors + stats->rx_crc_errors +
- stats->rx_frame_errors + stats->rx_fifo_errors +
- stats->rx_missed_errors);
+ stats->rx_frame_errors + stats->rx_fifo_errors +
+ stats->rx_missed_errors);
stats->tx_aborted_errors = xlr_nae_rdreg(priv->base_addr,
TX_EXCESSIVE_COLLISION_PACKET_COUNTER);
stats->tx_carrier_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
+ TX_DROP_FRAME_COUNTER);
stats->tx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
+ TX_DROP_FRAME_COUNTER);
}
static const struct net_device_ops xlr_netdev_ops = {
@@ -448,41 +448,35 @@ static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv)
{
priv->frin_spill = xlr_config_spill(priv,
- R_REG_FRIN_SPILL_MEM_START_0,
- R_REG_FRIN_SPILL_MEM_START_1,
- R_REG_FRIN_SPILL_MEM_SIZE,
- MAX_FRIN_SPILL *
- sizeof(u64));
+ R_REG_FRIN_SPILL_MEM_START_0,
+ R_REG_FRIN_SPILL_MEM_START_1,
+ R_REG_FRIN_SPILL_MEM_SIZE,
+ MAX_FRIN_SPILL * sizeof(u64));
priv->frout_spill = xlr_config_spill(priv,
- R_FROUT_SPILL_MEM_START_0,
- R_FROUT_SPILL_MEM_START_1,
- R_FROUT_SPILL_MEM_SIZE,
- MAX_FROUT_SPILL *
- sizeof(u64));
+ R_FROUT_SPILL_MEM_START_0,
+ R_FROUT_SPILL_MEM_START_1,
+ R_FROUT_SPILL_MEM_SIZE,
+ MAX_FROUT_SPILL * sizeof(u64));
priv->class_0_spill = xlr_config_spill(priv,
- R_CLASS0_SPILL_MEM_START_0,
- R_CLASS0_SPILL_MEM_START_1,
- R_CLASS0_SPILL_MEM_SIZE,
- MAX_CLASS_0_SPILL *
- sizeof(u64));
+ R_CLASS0_SPILL_MEM_START_0,
+ R_CLASS0_SPILL_MEM_START_1,
+ R_CLASS0_SPILL_MEM_SIZE,
+ MAX_CLASS_0_SPILL * sizeof(u64));
priv->class_1_spill = xlr_config_spill(priv,
- R_CLASS1_SPILL_MEM_START_0,
- R_CLASS1_SPILL_MEM_START_1,
- R_CLASS1_SPILL_MEM_SIZE,
- MAX_CLASS_1_SPILL *
- sizeof(u64));
+ R_CLASS1_SPILL_MEM_START_0,
+ R_CLASS1_SPILL_MEM_START_1,
+ R_CLASS1_SPILL_MEM_SIZE,
+ MAX_CLASS_1_SPILL * sizeof(u64));
priv->class_2_spill = xlr_config_spill(priv,
- R_CLASS2_SPILL_MEM_START_0,
- R_CLASS2_SPILL_MEM_START_1,
- R_CLASS2_SPILL_MEM_SIZE,
- MAX_CLASS_2_SPILL *
- sizeof(u64));
+ R_CLASS2_SPILL_MEM_START_0,
+ R_CLASS2_SPILL_MEM_START_1,
+ R_CLASS2_SPILL_MEM_SIZE,
+ MAX_CLASS_2_SPILL * sizeof(u64));
priv->class_3_spill = xlr_config_spill(priv,
- R_CLASS3_SPILL_MEM_START_0,
- R_CLASS3_SPILL_MEM_START_1,
- R_CLASS3_SPILL_MEM_SIZE,
- MAX_CLASS_3_SPILL *
- sizeof(u64));
+ R_CLASS3_SPILL_MEM_START_0,
+ R_CLASS3_SPILL_MEM_START_1,
+ R_CLASS3_SPILL_MEM_SIZE,
+ MAX_CLASS_3_SPILL * sizeof(u64));
}
/*
diff --git a/drivers/staging/nvec/Makefile b/drivers/staging/nvec/Makefile
index 0db0e1f43337..f0cff8f9fdf6 100644
--- a/drivers/staging/nvec/Makefile
+++ b/drivers/staging/nvec/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SERIO_NVEC_PS2) += nvec_ps2.o
obj-$(CONFIG_MFD_NVEC) += nvec.o
obj-$(CONFIG_NVEC_POWER) += nvec_power.o
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 215e7ec4dea2..8fbde5d3b4a6 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef OLPC_DCON_H_
#define OLPC_DCON_H_
diff --git a/drivers/staging/pi433/Documentation/pi433.txt b/drivers/staging/pi433/Documentation/pi433.txt
index 38b83b86c334..245fef33d688 100644
--- a/drivers/staging/pi433/Documentation/pi433.txt
+++ b/drivers/staging/pi433/Documentation/pi433.txt
@@ -20,7 +20,7 @@ Discription of driver operation
a) transmission
Each transmission can take place with a different configuration of the rf
-module. Therfore each application can set its own set of parameters. The driver
+module. Therefore each application can set its own set of parameters. The driver
takes care, that each transmission takes place with the parameterset of the
application, that requests the transmission. To allow the transmission to take
place in the background, a tx thread is introduced.
@@ -33,7 +33,7 @@ there is no receive request or the receiver is still waiting for something in
the air, the rf module is set to standby, the parameters for transmission gets
set, the hardware fifo of the rf chip gets preloaded and the transmission gets
started. Upon hardware fifo threshold interrupt it gets reloaded, thus enabling
-much longer telegrams then hardware fifo size. If the telegram is send and there
+much longer telegrams than the hardware fifo size. If the telegram is sent and there
is more data available in the kfifo, the procedure is repeated. If not the
transmission cycle ends.
@@ -41,7 +41,7 @@ b) reception
Since there is only one application allowed to receive data at a time, for
reception there is only one configuration set.
-As soon as an application sets an request for receiving a telegram, the reception
+As soon as an application sets a request for receiving a telegram, the reception
configuration set is written to the rf module and it gets set into receiving mode.
Now the driver is waiting, that a predefined RSSI level (signal strength at the
receiver) is reached. Until this hasn't happened, the reception can be
@@ -123,7 +123,7 @@ packet format:
optionOff - no preamble will be generated
enable_sync
optionOn - a sync word will be automatically added to
- the telegram after preamble
+ the telegram after the preamble
optionOff - no sync word will be added
Attention: While possible to generate sync without preamble, the
receiver won't be able to detect the sync without preamble.
@@ -136,7 +136,7 @@ packet format:
Attention: should be used in combination with sync, only
enable_address_byte
optionOn - the address byte will be automatically added to the
- telgram. It's part of the payload
+ telegram. It's part of the payload
optionOff - the address byte will not be added to the telegram.
The address byte can be used for address filtering, so the receiver
will only receive telegrams with a given address byte.
@@ -161,7 +161,7 @@ packet format:
one byte, used as address byte on address byte option.
-The rx configuration is transfered via struct pi433_rx_cfg, the parameterset for receiving. It is devided into two sections: rf parameters and packet format.
+The rx configuration is transferred via struct pi433_rx_cfg, the parameterset for receiving. It is divided into two sections: rf parameters and packet format.
rf params:
frequency
@@ -178,7 +178,7 @@ rf params:
OOK - on off key
rssi_threshold
threshold value for the signal strength on the receiver input.
- If this value is exeeded, a reception cycle starts
+ If this value is exceeded, a reception cycle starts
Allowed values: 0...255
thresholdDecrement
in order to adapt to different levels of singnal strength, over
@@ -198,7 +198,7 @@ rf params:
twohundretOhm - for antennas with an impedance of 200Ohm
lnaGain
sets the gain of the low noise amp
- automatic - lna gain is determed by an agc
+ automatic - lna gain is determined by an agc
max - lna gain is set to maximum
maxMinus6 - lna gain is set to 6db below max
maxMinus12 - lna gain is set to 12db below max
@@ -232,7 +232,7 @@ rf params:
amount of bytes that were requested by the read request.
Attention: should be used in combination with sync, only
enable_address_filtering;
- filteringOff - no adress filtering will take place
+ filteringOff - no address filtering will take place
nodeAddress - all telegrams, not matching the node
address will be internally discarded
nodeOrBroadcastAddress - all telegrams, neither matching the
@@ -245,7 +245,7 @@ rf params:
calculated crc doesn't match to two bytes,
that follow the payload, the telegram will be
internally discarded.
- Attention: This option is only operational, if sync on and fixed length
+ Attention: This option is only operational if sync on and fixed length
or length byte is used
sync_length
Gives the length of the payload.
@@ -255,9 +255,9 @@ rf params:
Overrides the telegram length either given by the first byte of
payload or by the read request.
bytes_to_drop
- gives the number of bytes, that will be dropped before transfering
+ gives the number of bytes, that will be dropped before transferring
data to the read buffer
- This option is only usefull, if all packet helper are switched
+ This option is only useful if all packet helper are switched
off and the rf chip is used in raw receiving mode. This may be
needed, if a telegram of a third party device should be received,
using a protocol not compatible with the packet engine of the rf69 chip.
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 93c01680f016..2a205c6173dc 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -136,17 +136,17 @@ static irqreturn_t DIO0_irq_handler(int irq, void *dev_id)
if (device->irq_state[DIO0] == DIO_PacketSent)
{
device->free_in_fifo = FIFO_SIZE;
- printk("DIO0 irq: Packet sent\n"); // TODO: printk() should include KERN_ facility level
+ dev_dbg(device->dev, "DIO0 irq: Packet sent\n");
wake_up_interruptible(&device->fifo_wait_queue);
}
else if (device->irq_state[DIO0] == DIO_Rssi_DIO0)
{
- printk("DIO0 irq: RSSI level over threshold\n");
+ dev_dbg(device->dev, "DIO0 irq: RSSI level over threshold\n");
wake_up_interruptible(&device->rx_wait_queue);
}
else if (device->irq_state[DIO0] == DIO_PayloadReady)
{
- printk("DIO0 irq: PayloadReady\n");
+ dev_dbg(device->dev, "DIO0 irq: PayloadReady\n");
device->free_in_fifo = 0;
wake_up_interruptible(&device->fifo_wait_queue);
}
@@ -167,7 +167,8 @@ static irqreturn_t DIO1_irq_handler(int irq, void *dev_id)
if (device->rx_active) device->free_in_fifo = FIFO_THRESHOLD - 1;
else device->free_in_fifo = FIFO_SIZE - FIFO_THRESHOLD - 1;
}
- printk("DIO1 irq: %d bytes free in fifo\n", device->free_in_fifo); // TODO: printk() should include KERN_ facility level
+ dev_dbg(device->dev,
+ "DIO1 irq: %d bytes free in fifo\n", device->free_in_fifo);
wake_up_interruptible(&device->fifo_wait_queue);
return IRQ_HANDLED;
@@ -284,8 +285,7 @@ rf69_set_tx_cfg(struct pi433_device *dev, struct pi433_tx_cfg *tx_cfg)
SET_CHECKED(rf69_set_crc_enable (dev->spi, tx_cfg->enable_crc));
/* configure sync, if enabled */
- if (tx_cfg->enable_sync == optionOn)
- {
+ if (tx_cfg->enable_sync == optionOn) {
SET_CHECKED(rf69_set_sync_size(dev->spi, tx_cfg->sync_length));
SET_CHECKED(rf69_set_sync_values(dev->spi, tx_cfg->sync_pattern));
}
@@ -407,8 +407,7 @@ pi433_receive(void *data)
if (retval) goto abort; /* wait was interrupted */
rf69_read_fifo(spi, (u8 *)&bytes_total, 1);
- if (bytes_total > dev->rx_buffer_size)
- {
+ if (bytes_total > dev->rx_buffer_size) {
retval = -1;
goto abort;
}
@@ -466,7 +465,7 @@ pi433_receive(void *data)
}
- /* rx done, wait was interrupted or error occured */
+ /* rx done, wait was interrupted or error occurred */
abort:
dev->interrupt_rx_allowed = true;
SET_CHECKED(rf69_set_mode(dev->spi, standby));
@@ -508,16 +507,14 @@ pi433_tx_thread(void *data)
mutex_lock(&device->tx_fifo_lock);
retval = kfifo_out(&device->tx_fifo, &tx_cfg, sizeof(tx_cfg));
- if (retval != sizeof(tx_cfg))
- {
+ if (retval != sizeof(tx_cfg)) {
dev_dbg(device->dev, "reading tx_cfg from fifo failed: got %d byte(s), expected %d", retval, (unsigned int)sizeof(tx_cfg) );
mutex_unlock(&device->tx_fifo_lock);
continue;
}
retval = kfifo_out(&device->tx_fifo, &size, sizeof(size_t));
- if (retval != sizeof(size_t))
- {
+ if (retval != sizeof(size_t)) {
dev_dbg(device->dev, "reading msg size from fifo failed: got %d, expected %d", retval, (unsigned int)sizeof(size_t) );
mutex_unlock(&device->tx_fifo_lock);
continue;
@@ -649,8 +646,7 @@ pi433_tx_thread(void *data)
SET_CHECKED(rf69_set_mode(spi, standby));
/* everything sent? */
- if ( kfifo_is_empty(&device->tx_fifo) )
- {
+ if (kfifo_is_empty(&device->tx_fifo)) {
abort:
if (rx_interrupted)
{
@@ -704,8 +700,7 @@ pi433_read(struct file *filp, char __user *buf, size_t size, loff_t *f_pos)
mutex_unlock(&device->rx_lock);
/* if read was successful copy to user space*/
- if (bytes_received > 0)
- {
+ if (bytes_received > 0) {
retval = copy_to_user(buf, device->rx_buffer, bytes_received);
if (retval)
return -EFAULT;
@@ -767,32 +762,15 @@ abort:
static long
pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- int err = 0;
int retval = 0;
struct pi433_instance *instance;
struct pi433_device *device;
- u32 tmp;
+ void __user *argp = (void __user *)arg;
/* Check type and command number */
if (_IOC_TYPE(cmd) != PI433_IOC_MAGIC)
return -ENOTTY;
- /* Check access direction once here; don't repeat below.
- * IOC_DIR is from the user perspective, while access_ok is
- * from the kernel perspective; so they look reversed.
- */
- if (_IOC_DIR(cmd) & _IOC_READ)
- err = !access_ok(VERIFY_WRITE,
- (void __user *)arg,
- _IOC_SIZE(cmd));
-
- if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
- err = !access_ok(VERIFY_READ,
- (void __user *)arg,
- _IOC_SIZE(cmd));
- if (err)
- return -EFAULT;
-
/* TODO? guard against device removal before, or while,
* we issue this ioctl. --> device_get()
*/
@@ -804,80 +782,33 @@ pi433_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case PI433_IOC_RD_TX_CFG:
- tmp = _IOC_SIZE(cmd);
- if ( (tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0) )
- {
- retval = -EINVAL;
- break;
- }
-
- if (__copy_to_user((void __user *)arg,
- &instance->tx_cfg,
- tmp))
- {
- retval = -EFAULT;
- break;
- }
-
+ if (copy_to_user(argp, &instance->tx_cfg,
+ sizeof(struct pi433_tx_cfg)))
+ return -EFAULT;
break;
case PI433_IOC_WR_TX_CFG:
- tmp = _IOC_SIZE(cmd);
- if ( (tmp == 0) || ((tmp % sizeof(struct pi433_tx_cfg)) != 0) )
- {
- retval = -EINVAL;
- break;
- }
-
- if (__copy_from_user(&instance->tx_cfg,
- (void __user *)arg,
- tmp))
- {
- retval = -EFAULT;
- break;
- }
-
+ if (copy_from_user(&instance->tx_cfg, argp,
+ sizeof(struct pi433_tx_cfg)))
+ return -EFAULT;
break;
-
case PI433_IOC_RD_RX_CFG:
- tmp = _IOC_SIZE(cmd);
- if ( (tmp == 0) || ((tmp % sizeof(struct pi433_rx_cfg)) != 0) ) {
- retval = -EINVAL;
- break;
- }
-
- if (__copy_to_user((void __user *)arg,
- &device->rx_cfg,
- tmp))
- {
- retval = -EFAULT;
- break;
- }
-
+ if (copy_to_user(argp, &device->rx_cfg,
+ sizeof(struct pi433_rx_cfg)))
+ return -EFAULT;
break;
case PI433_IOC_WR_RX_CFG:
- tmp = _IOC_SIZE(cmd);
mutex_lock(&device->rx_lock);
/* during pendig read request, change of config not allowed */
if (device->rx_active) {
- retval = -EAGAIN;
- mutex_unlock(&device->rx_lock);
- break;
- }
-
- if ( (tmp == 0) || ((tmp % sizeof(struct pi433_rx_cfg)) != 0) ) {
- retval = -EINVAL;
mutex_unlock(&device->rx_lock);
- break;
+ return -EAGAIN;
}
- if (__copy_from_user(&device->rx_cfg,
- (void __user *)arg,
- tmp))
- {
- retval = -EFAULT;
+ if (copy_from_user(&device->rx_cfg, argp,
+ sizeof(struct pi433_rx_cfg))) {
mutex_unlock(&device->rx_lock);
- break;
+ return -EFAULT;
}
mutex_unlock(&device->rx_lock);
@@ -916,8 +847,7 @@ static int pi433_open(struct inode *inode, struct file *filp)
if (!device->rx_buffer) {
device->rx_buffer = kmalloc(MAX_MSG_SIZE, GFP_KERNEL);
- if (!device->rx_buffer)
- {
+ if (!device->rx_buffer) {
dev_dbg(device->dev, "open/ENOMEM\n");
return -ENOMEM;
}
@@ -925,8 +855,7 @@ static int pi433_open(struct inode *inode, struct file *filp)
device->users++;
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
- if (!instance)
- {
+ if (!instance) {
kfree(device->rx_buffer);
device->rx_buffer = NULL;
return -ENOMEM;
@@ -986,8 +915,7 @@ static int setup_GPIOs(struct pi433_device *device)
snprintf(name, sizeof(name), "DIO%d", i);
device->gpiod[i] = gpiod_get(&device->spi->dev, name, 0 /*GPIOD_IN*/);
- if (device->gpiod[i] == ERR_PTR(-ENOENT))
- {
+ if (device->gpiod[i] == ERR_PTR(-ENOENT)) {
dev_dbg(&device->spi->dev, "Could not find entry for %s. Ignoring.", name);
continue;
}
@@ -1016,8 +944,7 @@ static int setup_GPIOs(struct pi433_device *device)
/* configure irq */
device->irq_num[i] = gpiod_to_irq(device->gpiod[i]);
- if (device->irq_num[i] < 0)
- {
+ if (device->irq_num[i] < 0) {
device->gpiod[i] = ERR_PTR(-EINVAL);//(struct gpio_desc *)device->irq_num[i];
return device->irq_num[i];
}
@@ -1030,7 +957,7 @@ static int setup_GPIOs(struct pi433_device *device)
if (retval)
return retval;
- dev_dbg(&device->spi->dev, "%s succesfully configured", name);
+ dev_dbg(&device->spi->dev, "%s successfully configured", name);
}
return 0;
@@ -1156,8 +1083,7 @@ static int pi433_probe(struct spi_device *spi)
/* setup GPIO (including irq_handler) for the different DIOs */
retval = setup_GPIOs(device);
- if (retval)
- {
+ if (retval) {
dev_dbg(&spi->dev, "setup of GPIOs failed");
goto GPIO_failed;
}
@@ -1175,16 +1101,14 @@ static int pi433_probe(struct spi_device *spi)
device->tx_task_struct = kthread_run(pi433_tx_thread,
device,
"pi433_tx_task");
- if (IS_ERR(device->tx_task_struct))
- {
+ if (IS_ERR(device->tx_task_struct)) {
dev_dbg(device->dev, "start of send thread failed");
goto send_thread_failed;
}
/* determ minor number */
retval = pi433_get_minor(device);
- if (retval)
- {
+ if (retval) {
dev_dbg(device->dev, "get of minor number failed");
goto minor_failed;
}
@@ -1213,8 +1137,7 @@ static int pi433_probe(struct spi_device *spi)
device->cdev->owner = THIS_MODULE;
cdev_init(device->cdev, &pi433_fops);
retval = cdev_add(device->cdev, device->devt, 1);
- if (retval)
- {
+ if (retval) {
dev_dbg(device->dev, "register of cdev failed");
goto cdev_failed;
}
@@ -1306,15 +1229,13 @@ static int __init pi433_init(void)
return status;
pi433_class = class_create(THIS_MODULE, "pi433");
- if (IS_ERR(pi433_class))
- {
+ if (IS_ERR(pi433_class)) {
unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
return PTR_ERR(pi433_class);
}
status = spi_register_driver(&pi433_spi_driver);
- if (status < 0)
- {
+ if (status < 0) {
class_destroy(pi433_class);
unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
}
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 290b419aa9dd..e69a2153c999 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -34,7 +34,7 @@
/*-------------------------------------------------------------------------*/
#define READ_REG(x) rf69_read_reg (spi, x)
-#define WRITE_REG(x,y) rf69_write_reg(spi, x, y)
+#define WRITE_REG(x, y) rf69_write_reg(spi, x, y)
/*-------------------------------------------------------------------------*/
@@ -164,9 +164,12 @@ int rf69_set_bit_rate(struct spi_device *spi, u16 bitRate)
// transmit to RF 69
retval = WRITE_REG(REG_BITRATE_MSB, msb);
- if (retval) return retval;
+ if (retval)
+ return retval;
+
retval = WRITE_REG(REG_BITRATE_LSB, lsb);
- if (retval) return retval;
+ if (retval)
+ return retval;
return 0;
}
@@ -196,7 +199,7 @@ int rf69_set_deviation(struct spi_device *spi, u32 deviation)
// calculate register settings
f_reg = deviation * factor;
- do_div(f_reg , f_step);
+ do_div(f_reg, f_step);
msb = (f_reg&0xff00) >> 8;
lsb = (f_reg&0xff);
@@ -209,9 +212,12 @@ int rf69_set_deviation(struct spi_device *spi, u32 deviation)
// write to chip
retval = WRITE_REG(REG_FDEV_MSB, msb);
- if (retval) return retval;
+ if (retval)
+ return retval;
+
retval = WRITE_REG(REG_FDEV_LSB, lsb);
- if (retval) return retval;
+ if (retval)
+ return retval;
return 0;
}
@@ -244,7 +250,7 @@ int rf69_set_frequency(struct spi_device *spi, u32 frequency)
// calculate reg settings
f_reg = frequency * factor;
- do_div(f_reg , f_step);
+ do_div(f_reg, f_step);
msb = (f_reg&0xff0000) >> 16;
mid = (f_reg&0xff00) >> 8;
@@ -252,11 +258,16 @@ int rf69_set_frequency(struct spi_device *spi, u32 frequency)
// write to chip
retval = WRITE_REG(REG_FRF_MSB, msb);
- if (retval) return retval;
+ if (retval)
+ return retval;
+
retval = WRITE_REG(REG_FRF_MID, mid);
- if (retval) return retval;
+ if (retval)
+ return retval;
+
retval = WRITE_REG(REG_FRF_LSB, lsb);
- if (retval) return retval;
+ if (retval)
+ return retval;
return 0;
}
@@ -267,9 +278,9 @@ int rf69_set_amplifier_0(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: amp #0");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA0) );
- case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA0) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA0));
+ case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA0));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -282,9 +293,9 @@ int rf69_set_amplifier_1(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: amp #1");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA1) );
- case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA1) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA1));
+ case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA1));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -297,9 +308,9 @@ int rf69_set_amplifier_2(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: amp #2");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA2) );
- case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA2) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) | MASK_PALEVEL_PA2));
+ case optionOff: return WRITE_REG(REG_PALEVEL, (READ_REG(REG_PALEVEL) & ~MASK_PALEVEL_PA2));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -312,7 +323,7 @@ int rf69_set_output_power_level(struct spi_device *spi, u8 powerLevel)
dev_dbg(&spi->dev, "set: power level");
#endif
- powerLevel +=18; // TODO Abhängigkeit von PA0,1,2 setting
+ powerLevel += 18; // TODO Abhängigkeit von PA0,1,2 setting
// check input value
if (powerLevel > 0x1f) {
@@ -330,7 +341,7 @@ int rf69_set_pa_ramp(struct spi_device *spi, enum paRamp paRamp)
dev_dbg(&spi->dev, "set: pa ramp");
#endif
- switch(paRamp) {
+ switch (paRamp) {
case ramp3400: return WRITE_REG(REG_PARAMP, PARAMP_3400);
case ramp2000: return WRITE_REG(REG_PARAMP, PARAMP_2000);
case ramp1000: return WRITE_REG(REG_PARAMP, PARAMP_1000);
@@ -359,9 +370,9 @@ int rf69_set_antenna_impedance(struct spi_device *spi, enum antennaImpedance ant
dev_dbg(&spi->dev, "set: antenna impedance");
#endif
- switch(antennaImpedance) {
- case fiftyOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) & ~MASK_LNA_ZIN) );
- case twohundretOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) | MASK_LNA_ZIN) );
+ switch (antennaImpedance) {
+ case fiftyOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) & ~MASK_LNA_ZIN));
+ case twohundretOhm: return WRITE_REG(REG_LNA, (READ_REG(REG_LNA) | MASK_LNA_ZIN));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -374,14 +385,14 @@ int rf69_set_lna_gain(struct spi_device *spi, enum lnaGain lnaGain)
dev_dbg(&spi->dev, "set: lna gain");
#endif
- switch(lnaGain) {
- case automatic: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_AUTO) );
- case max: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX) );
- case maxMinus6: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_6) );
- case maxMinus12: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_12) );
- case maxMinus24: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_24) );
- case maxMinus36: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_36) );
- case maxMinus48: return WRITE_REG(REG_LNA, ( (READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_48) );
+ switch (lnaGain) {
+ case automatic: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_AUTO));
+ case max: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX));
+ case maxMinus6: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_6));
+ case maxMinus12: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_12));
+ case maxMinus24: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_24));
+ case maxMinus36: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_36));
+ case maxMinus48: return WRITE_REG(REG_LNA, ((READ_REG(REG_LNA) & ~MASK_LNA_GAIN) & LNA_GAIN_MAX_MINUS_48));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -410,17 +421,17 @@ enum lnaGain rf69_get_lna_gain(struct spi_device *spi)
}
}
-int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi ,u8 reg, enum dccPercent dccPercent)
+int rf69_set_dc_cut_off_frequency_intern(struct spi_device *spi, u8 reg, enum dccPercent dccPercent)
{
switch (dccPercent) {
- case dcc16Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_16_PERCENT) );
- case dcc8Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_8_PERCENT) );
- case dcc4Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_4_PERCENT) );
- case dcc2Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_2_PERCENT) );
- case dcc1Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_1_PERCENT) );
- case dcc0_5Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_5_PERCENT) );
- case dcc0_25Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_25_PERCENT) );
- case dcc0_125Percent: return WRITE_REG(reg, ( (READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_125_PERCENT) );
+ case dcc16Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_16_PERCENT));
+ case dcc8Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_8_PERCENT));
+ case dcc4Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_4_PERCENT));
+ case dcc2Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_2_PERCENT));
+ case dcc1Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_1_PERCENT));
+ case dcc0_5Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_5_PERCENT));
+ case dcc0_25Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_25_PERCENT));
+ case dcc0_125Percent: return WRITE_REG(reg, ((READ_REG(reg) & ~MASK_BW_DCC_FREQ) | BW_DCC_0_125_PERCENT));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -470,10 +481,16 @@ static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg,
newValue = newValue & MASK_BW_DCC_FREQ;
// add new mantisse
- switch(mantisse) {
- case mantisse16: newValue = newValue | BW_MANT_16; break;
- case mantisse20: newValue = newValue | BW_MANT_20; break;
- case mantisse24: newValue = newValue | BW_MANT_24; break;
+ switch (mantisse) {
+ case mantisse16:
+ newValue = newValue | BW_MANT_16;
+ break;
+ case mantisse20:
+ newValue = newValue | BW_MANT_20;
+ break;
+ case mantisse24:
+ newValue = newValue | BW_MANT_24;
+ break;
}
// add new exponent
@@ -508,9 +525,9 @@ int rf69_set_ook_threshold_type(struct spi_device *spi, enum thresholdType thres
#endif
switch (thresholdType) {
- case fixed: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_FIXED) );
- case peak: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_PEAK) );
- case average: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_AVERAGE) );
+ case fixed: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_FIXED));
+ case peak: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_PEAK));
+ case average: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESTYPE) | OOKPEAK_THRESHTYPE_AVERAGE));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -524,14 +541,14 @@ int rf69_set_ook_threshold_step(struct spi_device *spi, enum thresholdStep thres
#endif
switch (thresholdStep) {
- case step_0_5db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_0_5_DB) );
- case step_1_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_0_DB) );
- case step_1_5db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_5_DB) );
- case step_2_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_2_0_DB) );
- case step_3_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_3_0_DB) );
- case step_4_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_4_0_DB) );
- case step_5_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_5_0_DB) );
- case step_6_0db: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_6_0_DB) );
+ case step_0_5db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_0_5_DB));
+ case step_1_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_0_DB));
+ case step_1_5db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_1_5_DB));
+ case step_2_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_2_0_DB));
+ case step_3_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_3_0_DB));
+ case step_4_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_4_0_DB));
+ case step_5_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_5_0_DB));
+ case step_6_0db: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESSTEP) | OOKPEAK_THRESHSTEP_6_0_DB));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -545,14 +562,14 @@ int rf69_set_ook_threshold_dec(struct spi_device *spi, enum thresholdDecrement t
#endif
switch (thresholdDecrement) {
- case dec_every8th: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_8TH) );
- case dec_every4th: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_4TH) );
- case dec_every2nd: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_2ND) );
- case dec_once: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_ONCE) );
- case dec_twice: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_TWICE) );
- case dec_4times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_4_TIMES) );
- case dec_8times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_8_TIMES) );
- case dec_16times: return WRITE_REG(REG_OOKPEAK, ( (READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_16_TIMES) );
+ case dec_every8th: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_8TH));
+ case dec_every4th: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_4TH));
+ case dec_every2nd: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_EVERY_2ND));
+ case dec_once: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_ONCE));
+ case dec_twice: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_TWICE));
+ case dec_4times: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_4_TIMES));
+ case dec_8times: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_8_TIMES));
+ case dec_16times: return WRITE_REG(REG_OOKPEAK, ((READ_REG(REG_OOKPEAK) & ~MASK_OOKPEAK_THRESDEC) | OOKPEAK_THRESHDEC_16_TIMES));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -571,25 +588,37 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
#endif
switch (DIONumber) {
- case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break;
- case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break;
- case 2: mask=MASK_DIO2; shift=SHIFT_DIO2; regaddr=REG_DIOMAPPING1; break;
- case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break;
- case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break;
- case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break;
+ case 0:
+ mask = MASK_DIO0; shift = SHIFT_DIO0; regaddr = REG_DIOMAPPING1;
+ break;
+ case 1:
+ mask = MASK_DIO1; shift = SHIFT_DIO1; regaddr = REG_DIOMAPPING1;
+ break;
+ case 2:
+ mask = MASK_DIO2; shift = SHIFT_DIO2; regaddr = REG_DIOMAPPING1;
+ break;
+ case 3:
+ mask = MASK_DIO3; shift = SHIFT_DIO3; regaddr = REG_DIOMAPPING1;
+ break;
+ case 4:
+ mask = MASK_DIO4; shift = SHIFT_DIO4; regaddr = REG_DIOMAPPING2;
+ break;
+ case 5:
+ mask = MASK_DIO5; shift = SHIFT_DIO5; regaddr = REG_DIOMAPPING2;
+ break;
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
}
// read reg
- regValue=READ_REG(regaddr);
+ regValue = READ_REG(regaddr);
// delete old value
regValue = regValue & ~mask;
// add new value
regValue = regValue | value << shift;
// write back
- return WRITE_REG(regaddr,regValue);
+ return WRITE_REG(regaddr, regValue);
}
bool rf69_get_flag(struct spi_device *spi, enum flag flag)
@@ -598,7 +627,7 @@ bool rf69_get_flag(struct spi_device *spi, enum flag flag)
dev_dbg(&spi->dev, "get: flag");
#endif
- switch(flag) {
+ switch (flag) {
case modeSwitchCompleted: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_MODE_READY);
case readyToReceive: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_RX_READY);
case readyToSend: return (READ_REG(REG_IRQFLAGS1) & MASK_IRQFLAGS1_TX_READY);
@@ -626,7 +655,7 @@ int rf69_reset_flag(struct spi_device *spi, enum flag flag)
dev_dbg(&spi->dev, "reset: flag");
#endif
- switch(flag) {
+ switch (flag) {
case rssiExceededThreshold: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_RSSI);
case syncAddressMatch: return WRITE_REG(REG_IRQFLAGS1, MASK_IRQFLAGS1_SYNC_ADDRESS_MATCH);
case fifoOverrun: return WRITE_REG(REG_IRQFLAGS2, MASK_IRQFLAGS2_FIFO_OVERRUN);
@@ -686,10 +715,9 @@ int rf69_set_preamble_length(struct spi_device *spi, u16 preambleLength)
/* transmit to chip */
retval = WRITE_REG(REG_PREAMBLE_MSB, msb);
- if (retval) return retval;
- retval = WRITE_REG(REG_PREAMBLE_LSB, lsb);
-
- return retval;
+ if (retval)
+ return retval;
+ return WRITE_REG(REG_PREAMBLE_LSB, lsb);
}
int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
@@ -698,9 +726,9 @@ int rf69_set_sync_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: sync enable");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_SYNC_ON) );
- case optionOff: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_ON) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_SYNC_ON));
+ case optionOff: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_ON));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -713,9 +741,9 @@ int rf69_set_fifo_fill_condition(struct spi_device *spi, enum fifoFillCondition
dev_dbg(&spi->dev, "set: fifo fill condition");
#endif
- switch(fifoFillCondition) {
- case always: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_FIFO_FILL_CONDITION) );
- case afterSyncInterrupt: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_FIFO_FILL_CONDITION) );
+ switch (fifoFillCondition) {
+ case always: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) | MASK_SYNC_CONFIG_FIFO_FILL_CONDITION));
+ case afterSyncInterrupt: return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_FIFO_FILL_CONDITION));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -735,7 +763,7 @@ int rf69_set_sync_size(struct spi_device *spi, u8 syncSize)
}
// write value
- return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | (syncSize << 3) );
+ return WRITE_REG(REG_SYNC_CONFIG, (READ_REG(REG_SYNC_CONFIG) & ~MASK_SYNC_CONFIG_SYNC_SIZE) | (syncSize << 3));
}
int rf69_set_sync_tolerance(struct spi_device *spi, u8 syncTolerance)
@@ -780,9 +808,9 @@ int rf69_set_packet_format(struct spi_device *spi, enum packetFormat packetForma
dev_dbg(&spi->dev, "set: packet format");
#endif
- switch(packetFormat) {
- case packetLengthVar: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE) );
- case packetLengthFix: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE) );
+ switch (packetFormat) {
+ case packetLengthVar: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE));
+ case packetLengthFix: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_PAKET_FORMAT_VARIABLE));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -795,9 +823,9 @@ int rf69_set_crc_enable(struct spi_device *spi, enum optionOnOff optionOnOff)
dev_dbg(&spi->dev, "set: crc enable");
#endif
- switch(optionOnOff) {
- case optionOn: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_CRC_ON) );
- case optionOff: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_CRC_ON) );
+ switch (optionOnOff) {
+ case optionOn: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) | MASK_PACKETCONFIG1_CRC_ON));
+ case optionOff: return WRITE_REG(REG_PACKETCONFIG1, (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_CRC_ON));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -811,9 +839,9 @@ int rf69_set_adressFiltering(struct spi_device *spi, enum addressFiltering addre
#endif
switch (addressFiltering) {
- case filteringOff: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_OFF) );
- case nodeAddress: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODE) );
- case nodeOrBroadcastAddress: return WRITE_REG(REG_PACKETCONFIG1, ( (READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST) );
+ case filteringOff: return WRITE_REG(REG_PACKETCONFIG1, ((READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_OFF));
+ case nodeAddress: return WRITE_REG(REG_PACKETCONFIG1, ((READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODE));
+ case nodeOrBroadcastAddress: return WRITE_REG(REG_PACKETCONFIG1, ((READ_REG(REG_PACKETCONFIG1) & ~MASK_PACKETCONFIG1_ADDRESSFILTERING) | PACKETCONFIG1_ADDRESSFILTERING_NODEBROADCAST));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -862,9 +890,9 @@ int rf69_set_tx_start_condition(struct spi_device *spi, enum txStartCondition tx
dev_dbg(&spi->dev, "set: start condition");
#endif
- switch(txStartCondition) {
- case fifoLevel: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_TXSTART) );
- case fifoNotEmpty: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) | MASK_FIFO_THRESH_TXSTART) );
+ switch (txStartCondition) {
+ case fifoLevel: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) & ~MASK_FIFO_THRESH_TXSTART));
+ case fifoNotEmpty: return WRITE_REG(REG_FIFO_THRESH, (READ_REG(REG_FIFO_THRESH) | MASK_FIFO_THRESH_TXSTART));
default:
dev_dbg(&spi->dev, "set: illegal input param");
return -EINVAL;
@@ -891,7 +919,7 @@ int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold)
return retval;
// access the fifo to activate new threshold
- return rf69_read_fifo (spi, (u8*) &retval, 1); // retval used as buffer
+ return rf69_read_fifo(spi, (u8 *)&retval, 1); // retval used as buffer
}
int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
@@ -900,7 +928,7 @@ int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
dev_dbg(&spi->dev, "set: dagc");
#endif
- switch(dagc) {
+ switch (dagc) {
case normalMode: return WRITE_REG(REG_TESTDAGC, DAGC_NORMAL);
case improve: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA0);
case improve4LowModulationIndex: return WRITE_REG(REG_TESTDAGC, DAGC_IMPROVED_LOWBETA1);
@@ -931,14 +959,14 @@ int rf69_read_fifo (struct spi_device *spi, u8 *buffer, unsigned int size)
/* prepare a bidirectional transfer */
local_buffer[0] = REG_FIFO;
memset(&transfer, 0, sizeof(transfer));
- transfer.tx_buf = local_buffer;
- transfer.rx_buf = local_buffer;
+ transfer.tx_buf = local_buffer;
+ transfer.rx_buf = local_buffer;
transfer.len = size+1;
retval = spi_sync_transfer(spi, &transfer, 1);
#ifdef DEBUG_FIFO_ACCESS
- for (i=0; i<size; i++)
+ for (i = 0; i < size; i++)
dev_dbg(&spi->dev, "%d - 0x%x\n", i, local_buffer[i+1]);
#endif
@@ -966,8 +994,8 @@ int rf69_write_fifo(struct spi_device *spi, u8 *buffer, unsigned int size)
memcpy(&local_buffer[1], buffer, size); // TODO: ohne memcopy wäre schöner
#ifdef DEBUG_FIFO_ACCESS
- for (i=0; i<size; i++)
- dev_dbg(&spi->dev, "0x%x\n",buffer[i]);
+ for (i = 0; i < size; i++)
+ dev_dbg(&spi->dev, "0x%x\n", buffer[i]);
#endif
return spi_write (spi, local_buffer, size + 1);
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index 27af86e05098..033fb2e6950d 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
r8188eu-y := \
core/rtw_ap.o \
core/rtw_cmd.o \
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 32a483769975..fa611455109a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -754,7 +754,7 @@ static void start_bss_network(struct adapter *padapter, u8 *pbuf)
}
/* setting only at first time */
- if (!(pmlmepriv->cur_network.join_res)) {
+ if (pmlmepriv->cur_network.join_res != true) {
/* WEP Key will be set before this function, do not
* clear CAM.
*/
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 9461bce883ea..be8542676adf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -333,7 +333,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
else
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
if (!pcmd) {
res = _FAIL;
goto exit;
@@ -508,7 +508,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
- cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
+ cmdobj = kzalloc(sizeof(*cmdobj), GFP_ATOMIC);
if (!cmdobj) {
res = _FAIL;
kfree(param);
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index b9bdff0490ca..2c4c8c43b1ad 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -48,7 +48,7 @@ void Efuse_PowerSwitch(
if (PwrState) {
usb_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
- /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid */
+ /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), default valid */
tmpV16 = usb_read16(pAdapter, REG_SYS_ISO_CTRL);
if (!(tmpV16 & PWC_EV12V)) {
tmpV16 |= PWC_EV12V;
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index 1b9bc9817a57..c4335893d8f6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -22,9 +22,9 @@
/* Callback function of LED BlinkTimer, */
/* it just schedules to corresponding BlinkWorkItem/led_blink_hdl */
/* */
-void BlinkTimerCallback(unsigned long data)
+static void BlinkTimerCallback(struct timer_list *t)
{
- struct LED_871x *pLed = (struct LED_871x *)data;
+ struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
struct adapter *padapter = pLed->padapter;
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
@@ -73,8 +73,7 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
ResetLedStatus(pLed);
- setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
- (unsigned long)pLed);
+ timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index f663e6c41f8a..1cd49e292804 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -106,10 +106,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
- rtw_free_mlme_priv_ie_data(pmlmepriv);
-
- if (pmlmepriv)
+ if (pmlmepriv) {
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
vfree(pmlmepriv->free_bss_buf);
+ }
}
struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
@@ -1135,7 +1135,7 @@ static u8 search_max_mac_id(struct adapter *padapter)
#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- for (aid = (pstapriv->max_num_sta); aid > 0; aid--) {
+ for (aid = pstapriv->max_num_sta; aid > 0; aid--) {
if (pstapriv->sta_aid[aid-1])
break;
}
@@ -1143,7 +1143,7 @@ static u8 search_max_mac_id(struct adapter *padapter)
} else
#endif
{/* adhoc id = 31~2 */
- for (mac_id = (NUM_STA-1); mac_id >= IBSS_START_MAC_ID; mac_id--) {
+ for (mac_id = NUM_STA-1; mac_id >= IBSS_START_MAC_ID; mac_id--) {
if (pmlmeinfo->FW_sta_info[mac_id].status == 1)
break;
}
@@ -1329,12 +1329,13 @@ void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
}
/*
- * _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+ * _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
* @adapter: pointer to struct adapter structure
*/
-void _rtw_join_timeout_handler (unsigned long data)
+void _rtw_join_timeout_handler (struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.assoc_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
int do_join_r;
@@ -1373,9 +1374,10 @@ void _rtw_join_timeout_handler (unsigned long data)
* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
* @adapter: pointer to struct adapter structure
*/
-void rtw_scan_timeout_handler (unsigned long data)
+void rtw_scan_timeout_handler (struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
@@ -1400,9 +1402,10 @@ static void rtw_auto_scan_handler(struct adapter *padapter)
}
}
-void rtw_dynamic_check_timer_handlder(unsigned long data)
+void rtw_dynamic_check_timer_handlder(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
struct registry_priv *pregistrypriv = &adapter->registrypriv;
if (!adapter)
@@ -1569,7 +1572,7 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm;
pcmd->cmdcode = _SetAuth_CMD_;
pcmd->parmbuf = (unsigned char *)psetauthparm;
- pcmd->cmdsz = (sizeof(struct setauth_parm));
+ pcmd->cmdsz = sizeof(struct setauth_parm);
pcmd->rsp = NULL;
pcmd->rspsz = 0;
INIT_LIST_HEAD(&pcmd->list);
@@ -1648,7 +1651,7 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
}
pcmd->cmdcode = _SetKey_CMD_;
pcmd->parmbuf = (u8 *)psetkeyparm;
- pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->cmdsz = sizeof(struct setkey_parm);
pcmd->rsp = NULL;
pcmd->rspsz = 0;
INIT_LIST_HEAD(&pcmd->list);
@@ -1814,45 +1817,45 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter)
struct security_priv *psecuritypriv = &adapter->securitypriv;
struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
- pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0); /* adhoc no 802.1x */
+ pdev_network->Privacy = psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0; /* adhoc no 802.1x */
pdev_network->Rssi = 0;
switch (pregistrypriv->wireless_mode) {
case WIRELESS_11B:
- pdev_network->NetworkTypeInUse = (Ndis802_11DS);
+ pdev_network->NetworkTypeInUse = Ndis802_11DS;
break;
case WIRELESS_11G:
case WIRELESS_11BG:
case WIRELESS_11_24N:
case WIRELESS_11G_24N:
case WIRELESS_11BG_24N:
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM24;
break;
case WIRELESS_11A:
case WIRELESS_11A_5N:
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM5;
break;
case WIRELESS_11ABGN:
if (pregistrypriv->channel > 14)
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM5;
else
- pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ pdev_network->NetworkTypeInUse = Ndis802_11OFDM24;
break;
default:
/* TODO */
break;
}
- pdev_network->Configuration.DSConfig = (pregistrypriv->channel);
+ pdev_network->Configuration.DSConfig = pregistrypriv->channel;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("pregistrypriv->channel=%d, pdev_network->Configuration.DSConfig=0x%x\n",
pregistrypriv->channel, pdev_network->Configuration.DSConfig));
if (cur_network->network.InfrastructureMode == Ndis802_11IBSS)
- pdev_network->Configuration.ATIMWindow = (0);
+ pdev_network->Configuration.ATIMWindow = 0;
- pdev_network->InfrastructureMode = (cur_network->network.InfrastructureMode);
+ pdev_network->InfrastructureMode = cur_network->network.InfrastructureMode;
/* 1. Supported rates */
/* 2. IE */
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 611c9409bb98..d73e9bdc80cc 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -605,7 +605,9 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
dump_mgntframe(padapter, pmgntframe);
}
-static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, bool wait_ack)
+static int issue_probereq(struct adapter *padapter,
+ struct ndis_802_11_ssid *pssid, u8 *da,
+ bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -707,7 +709,7 @@ static int issue_probereq_ex(struct adapter *padapter,
unsigned long start = jiffies;
do {
- ret = issue_probereq(padapter, pssid, da, wait_ms > 0 ? true : false);
+ ret = issue_probereq(padapter, pssid, da, wait_ms > 0);
i++;
@@ -1196,7 +1198,8 @@ exit:
}
/* when wait_ack is true, this function should be called at process context */
-static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
+static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
+ unsigned int power_mode, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -1269,7 +1272,8 @@ exit:
/* when wait_ms > 0 , this function should be called at process context */
/* da == NULL for station mode */
-int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
+int issue_nulldata(struct adapter *padapter, unsigned char *da,
+ unsigned int power_mode, int try_cnt, int wait_ms)
{
int ret;
int i = 0;
@@ -1283,7 +1287,7 @@ int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int pow
da = pnetwork->MacAddress;
do {
- ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0 ? true : false);
+ ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0);
i++;
@@ -1316,7 +1320,8 @@ exit:
}
/* when wait_ack is true, this function should be called at process context */
-static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
+static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
+ u16 tid, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -1396,7 +1401,8 @@ exit:
/* when wait_ms > 0 , this function should be called at process context */
/* da == NULL for station mode */
-int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
+int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
+ u16 tid, int try_cnt, int wait_ms)
{
int ret;
int i = 0;
@@ -1410,7 +1416,7 @@ int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int
da = pnetwork->MacAddress;
do {
- ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0 ? true : false);
+ ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0);
i++;
@@ -1442,7 +1448,8 @@ exit:
return ret;
}
-static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason, u8 wait_ack)
+static int _issue_deauth(struct adapter *padapter, unsigned char *da,
+ unsigned short reason, bool wait_ack)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
@@ -1502,7 +1509,8 @@ exit:
return ret;
}
-int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason)
+int issue_deauth(struct adapter *padapter, unsigned char *da,
+ unsigned short reason)
{
DBG_88E("%s to %pM\n", __func__, da);
return _issue_deauth(padapter, da, reason, false);
@@ -1517,7 +1525,7 @@ static int issue_deauth_ex(struct adapter *padapter, u8 *da,
unsigned long start = jiffies;
do {
- ret = _issue_deauth(padapter, da, reason, wait_ms > 0 ? true : false);
+ ret = _issue_deauth(padapter, da, reason, wait_ms > 0);
i++;
@@ -3413,7 +3421,7 @@ static unsigned int OnAssocRsp(struct adapter *padapter,
/* following are moved to join event callback function */
/* to handle HT, WMM, rate adaptive, update MAC reg */
/* for not to handle the synchronous IO in the tasklet */
- for (i = (6 + WLAN_HDR_A3_LEN); i < pkt_len;) {
+ for (i = 6 + WLAN_HDR_A3_LEN; i < pkt_len;) {
pIE = (struct ndis_802_11_var_ie *)(pframe + i);
switch (pIE->ElementID) {
@@ -3757,7 +3765,8 @@ static unsigned int on_action_public_vendor(struct recv_frame *precv_frame)
return ret;
}
-static unsigned int on_action_public_default(struct recv_frame *precv_frame, u8 action)
+static unsigned int on_action_public_default(struct recv_frame *precv_frame,
+ u8 action)
{
unsigned int ret = _FAIL;
u8 *pframe = precv_frame->pkt->data;
@@ -3972,9 +3981,10 @@ static int has_channel(struct rt_channel_info *channel_set,
return 0;
}
-static void init_channel_list(struct adapter *padapter, struct rt_channel_info *channel_set,
- u8 chanset_size,
- struct p2p_channels *channel_list)
+static void init_channel_list(struct adapter *padapter,
+ struct rt_channel_info *channel_set,
+ u8 chanset_size,
+ struct p2p_channels *channel_list)
{
struct p2p_oper_class_map op_class[] = {
{ IEEE80211G, 81, 1, 13, 1, BW20 },
@@ -3999,7 +4009,7 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
continue;
if ((0 == (padapter->registrypriv.cbw40_enable & BIT(1))) &&
- ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ ((o->bw == BW40MINUS) || (o->bw == BW40PLUS)))
continue;
if (reg == NULL) {
@@ -4015,7 +4025,8 @@ static void init_channel_list(struct adapter *padapter, struct rt_channel_info *
channel_list->reg_classes = cla;
}
-static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_channel_info *channel_set)
+static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan,
+ struct rt_channel_info *channel_set)
{
u8 index, chanset_size = 0;
u8 b2_4GBand = false;
@@ -4030,7 +4041,7 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
b2_4GBand = true;
- if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ if (ChannelPlan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
else
Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
@@ -4040,14 +4051,14 @@ static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_c
for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
- if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
- (RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G == ChannelPlan)) {
+ if ((ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN) ||/* Channel 1~11 is active, and 12~14 is passive */
+ (ChannelPlan == RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G)) {
if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
channel_set[chanset_size].ScanType = SCAN_PASSIVE;
- } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
- RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) {/* channel 12~13, passive scan */
+ } else if (ChannelPlan == RT_CHANNEL_DOMAIN_WORLD_WIDE_13 ||
+ Index2G == RT_CHANNEL_DOMAIN_2G_WORLD) {/* channel 12~13, passive scan */
if (channel_set[chanset_size].ChannelNum <= 11)
channel_set[chanset_size].ScanType = SCAN_ACTIVE;
else
@@ -4105,7 +4116,9 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
}
}
-static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame)
+static void _mgt_dispatcher(struct adapter *padapter,
+ struct mlme_handler *ptable,
+ struct recv_frame *precv_frame)
{
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 *pframe = precv_frame->pkt->data;
@@ -4351,7 +4364,8 @@ void report_join_res(struct adapter *padapter, int res)
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
-void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr,
+ unsigned short reason)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
@@ -4406,7 +4420,8 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
}
-void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
+void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr,
+ int cam_idx)
{
struct cmd_obj *pcmd_obj;
u8 *pevtcmd;
@@ -4767,7 +4782,7 @@ void linked_status_chk(struct adapter *padapter)
if (pmlmeinfo->FW_sta_info[i].status == 1) {
psta = pmlmeinfo->FW_sta_info[i].psta;
- if (NULL == psta)
+ if (psta == NULL)
continue;
if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
if (pmlmeinfo->FW_sta_info[i].retry < 3) {
@@ -4788,9 +4803,10 @@ void linked_status_chk(struct adapter *padapter)
}
}
-void survey_timer_hdl(unsigned long data)
+void survey_timer_hdl(struct timer_list *t)
{
- struct adapter *padapter = (struct adapter *)data;
+ struct adapter *padapter = from_timer(padapter, t,
+ mlmeextpriv.survey_timer);
struct cmd_obj *ph2c;
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
@@ -4828,9 +4844,10 @@ exit_survey_timer_hdl:
return;
}
-void link_timer_hdl(unsigned long data)
+void link_timer_hdl(struct timer_list *t)
{
- struct adapter *padapter = (struct adapter *)data;
+ struct adapter *padapter = from_timer(padapter, t,
+ mlmeextpriv.link_timer);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
@@ -4864,9 +4881,9 @@ void link_timer_hdl(unsigned long data)
}
}
-void addba_timer_hdl(unsigned long data)
+void addba_timer_hdl(struct timer_list *t)
{
- struct sta_info *psta = (struct sta_info *)data;
+ struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
struct ht_priv *phtpriv;
if (!psta)
@@ -5125,8 +5142,10 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
return H2C_SUCCESS;
}
-static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_channel *out,
- u32 out_num, struct rtw_ieee80211_channel *in, u32 in_num)
+static int rtw_scan_ch_decision(struct adapter *padapter,
+ struct rtw_ieee80211_channel *out,
+ u32 out_num,
+ struct rtw_ieee80211_channel *in, u32 in_num)
{
int i, j;
int set_idx;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index f86c9cebf09a..ac27f9a023bc 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -37,7 +37,7 @@ static int rtw_hw_suspend(struct adapter *padapter)
/* system suspend */
LeaveAllPowerSaveMode(padapter);
- DBG_88E("==> rtw_hw_suspend\n");
+ DBG_88E("==> %s\n", __func__);
mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->bips_processing = true;
/* s1. */
@@ -89,7 +89,7 @@ static int rtw_hw_resume(struct adapter *padapter)
/* system resume */
- DBG_88E("==> rtw_hw_resume\n");
+ DBG_88E("==> %s\n", __func__);
mutex_lock(&pwrpriv->mutex_lock);
pwrpriv->bips_processing = true;
rtw_reset_drv_sw(padapter);
@@ -146,7 +146,7 @@ void ips_enter(struct adapter *padapter)
pwrpriv->ips_mode = pwrpriv->ips_mode_req;
pwrpriv->ips_enter_cnts++;
- DBG_88E("==>ips_enter cnts:%d\n", pwrpriv->ips_enter_cnts);
+ DBG_88E("==>%s:%d\n", __func__, pwrpriv->ips_enter_cnts);
if (rf_off == pwrpriv->change_rfpwrstate) {
pwrpriv->bpower_saving = true;
DBG_88E_LEVEL(_drv_info_, "nolinked power save enter\n");
@@ -177,7 +177,7 @@ int ips_leave(struct adapter *padapter)
pwrpriv->bips_processing = true;
pwrpriv->change_rfpwrstate = rf_on;
pwrpriv->ips_leave_cnts++;
- DBG_88E("==>ips_leave cnts:%d\n", pwrpriv->ips_leave_cnts);
+ DBG_88E("==>%s:%d\n", __func__, pwrpriv->ips_leave_cnts);
result = rtw_ips_pwr_up(padapter);
if (result == _SUCCESS)
@@ -198,7 +198,7 @@ int ips_leave(struct adapter *padapter)
}
}
- DBG_88E("==> ips_leave.....LED(0x%08x)...\n", usb_read32(padapter, 0x4c));
+ DBG_88E("==> %s.....LED(0x%08x)...\n", __func__, usb_read32(padapter, 0x4c));
pwrpriv->bips_processing = false;
pwrpriv->bkeepfwalive = false;
@@ -276,9 +276,11 @@ exit:
pwrpriv->ps_processing = false;
}
-static void pwr_state_check_handler(unsigned long data)
+static void pwr_state_check_handler(struct timer_list *t)
{
- struct adapter *padapter = (struct adapter *)data;
+ struct adapter *padapter =
+ from_timer(padapter, t,
+ pwrctrlpriv.pwr_state_check_timer);
rtw_ps_cmd(padapter);
}
@@ -332,7 +334,7 @@ void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
rpwm = pslv | pwrpriv->tog;
RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
- ("rtw_set_rpwm: rpwm=0x%02x cpwm=0x%02x\n", rpwm, pwrpriv->cpwm));
+ ("%s: rpwm=0x%02x cpwm=0x%02x\n", __func__, rpwm, pwrpriv->cpwm));
pwrpriv->rpwm = pslv;
@@ -525,7 +527,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE;
else
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
pwrctrlpriv->bFwCurrentInPSMode = false;
@@ -540,9 +542,8 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->btcoex_rfon = false;
- setup_timer(&pwrctrlpriv->pwr_state_check_timer,
- pwr_state_check_handler,
- (unsigned long)padapter);
+ timer_setup(&pwrctrlpriv->pwr_state_check_timer,
+ pwr_state_check_handler, 0);
}
/*
@@ -569,7 +570,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
DBG_88E("%s wait ps_processing...\n", __func__);
while (pwrpriv->ps_processing &&
jiffies_to_msecs(jiffies - start) <= 3000)
- usleep_range(1000, 3000);
+ udelay(1500);
if (pwrpriv->ps_processing)
DBG_88E("%s wait ps_processing timeout\n", __func__);
else
@@ -595,7 +596,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
}
if (rf_off == pwrpriv->rf_pwrstate) {
DBG_88E("%s call ips_leave....\n", __func__);
- if (_FAIL == ips_leave(padapter)) {
+ if (ips_leave(padapter) == _FAIL) {
DBG_88E("======> ips_leave fail.............\n");
ret = _FAIL;
goto exit;
@@ -628,12 +629,12 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
if (mode < PS_MODE_NUM) {
if (pwrctrlpriv->power_mgnt != mode) {
- if (PS_MODE_ACTIVE == mode)
+ if (mode == PS_MODE_ACTIVE)
LeaveAllPowerSaveMode(padapter);
else
pwrctrlpriv->LpsIdleCount = 2;
pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ pwrctrlpriv->bLeisurePs = (pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE) ? true : false;
}
} else {
ret = -EINVAL;
@@ -653,7 +654,7 @@ int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
} else if (mode == IPS_NONE) {
rtw_ips_mode_req(pwrctrlpriv, mode);
DBG_88E("%s %s\n", __func__, "IPS_NONE");
- if ((padapter->bSurpriseRemoved == 0) && (_FAIL == rtw_pwr_wakeup(padapter)))
+ if ((padapter->bSurpriseRemoved == 0) && (rtw_pwr_wakeup(padapter) == _FAIL))
return -EFAULT;
} else {
return -EINVAL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 3fd5f4102b36..6506a1587df0 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -39,7 +39,7 @@ static u8 rtw_rfc1042_header[] = {
0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
};
-static void rtw_signal_stat_timer_hdl(unsigned long data);
+static void rtw_signal_stat_timer_hdl(struct timer_list *t);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
@@ -86,9 +86,8 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
}
res = rtw_hal_init_recv_priv(padapter);
- setup_timer(&precvpriv->signal_stat_timer,
- rtw_signal_stat_timer_hdl,
- (unsigned long)padapter);
+ timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl,
+ 0);
precvpriv->signal_stat_sampling_interval = 1000; /* ms */
@@ -193,7 +192,7 @@ void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfre
plist = phead->next;
while (phead != plist) {
- hdr = container_of(plist, struct recv_frame, list);
+ hdr = list_entry(plist, struct recv_frame, list);
plist = plist->next;
@@ -237,32 +236,40 @@ static int recvframe_chkmic(struct adapter *adapter,
stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
if (prxattrib->encrypt == _TKIP_) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:prxattrib->encrypt==_TKIP_\n"));
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
- prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2], prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+ prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
/* calculate mic code */
if (stainfo != NULL) {
if (IS_MCAST(prxattrib->ra)) {
if (!psecuritypriv) {
res = _FAIL;
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n"));
- DBG_88E("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n");
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n %s: didn't install group key!!!!!!!!!!\n", __func__));
+ DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
goto exit;
}
mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic: bcmc key\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: bcmc key\n", __func__));
} else {
mickey = &stainfo->dot11tkiprxmickey.skey[0];
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic: unicast key\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n %s: unicast key\n", __func__));
}
/* icv_len included the mic code */
- datalen = precvframe->pkt->len-prxattrib->hdrlen - 8;
+ datalen = precvframe->pkt->len-prxattrib->hdrlen -
+ prxattrib->iv_len-prxattrib->icv_len-8;
pframe = precvframe->pkt->data;
- payload = pframe+prxattrib->hdrlen;
+ payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
(unsigned char)prxattrib->priority); /* care the length of the data */
@@ -273,8 +280,8 @@ static int recvframe_chkmic(struct adapter *adapter,
for (i = 0; i < 8; i++) {
if (miccode[i] != *(pframemic+i)) {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- ("recvframe_chkmic:miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
- i, miccode[i], i, *(pframemic+i)));
+ ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
+ __func__, i, miccode[i], i, *(pframemic + i)));
bmic_err = true;
}
}
@@ -346,7 +353,8 @@ static int recvframe_chkmic(struct adapter *adapter,
}
}
} else {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic: rtw_get_stainfo==NULL!!!\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
}
skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
@@ -407,15 +415,9 @@ static struct recv_frame *decryptor(struct adapter *padapter,
default:
break;
}
- if (res != _FAIL) {
- memmove(precv_frame->pkt->data + precv_frame->attrib.iv_len, precv_frame->pkt->data, precv_frame->attrib.hdrlen);
- skb_pull(precv_frame->pkt, precv_frame->attrib.iv_len);
- skb_trim(precv_frame->pkt, precv_frame->pkt->len - precv_frame->attrib.icv_len);
- }
} else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
- (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_)) {
- psecuritypriv->hw_decrypted = true;
- }
+ (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_))
+ psecuritypriv->hw_decrypted = true;
if (res == _FAIL) {
rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue);
@@ -456,7 +458,7 @@ static struct recv_frame *portctrl(struct adapter *adapter,
if (auth_alg == 2) {
/* get ether_type */
- ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
+ ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE + pfhdr->attrib.iv_len;
memcpy(&be_tmp, ptr, 2);
ether_type = ntohs(be_tmp);
@@ -943,7 +945,7 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
xmitframe_plist = xmitframe_phead->next;
if (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
+ pxmitframe = list_entry(xmitframe_plist, struct xmit_frame, list);
xmitframe_plist = xmitframe_plist->next;
@@ -1011,7 +1013,8 @@ static int validate_recv_mgnt_frame(struct adapter *padapter,
precv_frame = recvframe_chk_defrag(padapter, precv_frame);
if (precv_frame == NULL) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("%s: fragment packet\n", __func__));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("%s: fragment packet\n", __func__));
return _SUCCESS;
}
@@ -1138,8 +1141,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
}
if (pattrib->privacy) {
- struct sk_buff *skb = precv_frame->pkt;
-
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy));
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra)));
@@ -1148,13 +1149,6 @@ static int validate_recv_data_frame(struct adapter *adapter,
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt));
SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
-
- if (pattrib->bdecrypted == 1 && pattrib->encrypt > 0) {
- memmove(skb->data + pattrib->iv_len,
- skb->data, pattrib->hdrlen);
- skb_pull(skb, pattrib->iv_len);
- skb_trim(skb, skb->len - pattrib->icv_len);
- }
} else {
pattrib->encrypt = 0;
pattrib->iv_len = 0;
@@ -1274,7 +1268,6 @@ static int validate_recv_frame(struct adapter *adapter,
* Hence forward the frame to the monitor anyway to preserve the order
* in which frames were received.
*/
-
rtl88eu_mon_recv_hook(adapter->pmondev, precv_frame);
exit:
@@ -1296,8 +1289,11 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
u8 *ptr = precvframe->pkt->data;
struct rx_pkt_attrib *pattrib = &precvframe->attrib;
- psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen);
- psnap_type = ptr+pattrib->hdrlen + SNAP_SIZE;
+ if (pattrib->encrypt)
+ skb_trim(precvframe->pkt, precvframe->pkt->len - pattrib->icv_len);
+
+ psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
+ psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
/* convert hdr + possible LLC headers into Ethernet header */
if ((!memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
(!memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
@@ -1310,9 +1306,12 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
bsnaphdr = false;
}
- rmv_len = pattrib->hdrlen + (bsnaphdr ? SNAP_SIZE : 0);
+ rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
len = precvframe->pkt->len - rmv_len;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len));
+
memcpy(&be_tmp, ptr+rmv_len, 2);
eth_type = ntohs(be_tmp); /* pattrib->ether_type */
pattrib->eth_type = eth_type;
@@ -1337,6 +1336,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
struct __queue *defrag_q)
{
struct list_head *plist, *phead;
+ u8 wlanhdr_offset;
u8 curfragnum;
struct recv_frame *pfhdr, *pnfhdr;
struct recv_frame *prframe, *pnextrframe;
@@ -1347,7 +1347,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
phead = get_list_head(defrag_q);
plist = phead->next;
- pfhdr = container_of(plist, struct recv_frame, list);
+ pfhdr = list_entry(plist, struct recv_frame, list);
prframe = pfhdr;
list_del_init(&(prframe->list));
@@ -1367,7 +1367,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
plist = plist->next;
while (phead != plist) {
- pnfhdr = container_of(plist, struct recv_frame, list);
+ pnfhdr = list_entry(plist, struct recv_frame, list);
pnextrframe = pnfhdr;
/* check the fragment sequence (2nd ~n fragment frame) */
@@ -1385,7 +1385,12 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
/* copy the 2nd~n fragment frame's payload to the first fragment */
/* get the 2nd~last fragment frame's payload */
- skb_pull(pnextrframe->pkt, pnfhdr->attrib.hdrlen);
+ wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
+
+ skb_pull(pnextrframe->pkt, wlanhdr_offset);
+
+ /* append to first fragment frame's tail (if privacy frame, pull the ICV) */
+ skb_trim(prframe->pkt, prframe->pkt->len - pfhdr->attrib.icv_len);
/* memcpy */
memcpy(skb_tail_pointer(pfhdr->pkt), pnfhdr->pkt->data,
@@ -1393,7 +1398,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
skb_put(prframe->pkt, pnfhdr->pkt->len);
- pfhdr->attrib.icv_len = 0;
+ pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
plist = plist->next;
}
@@ -1519,6 +1524,11 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
nr_subframes = 0;
pattrib = &prframe->attrib;
+ skb_pull(prframe->pkt, prframe->attrib.hdrlen);
+
+ if (prframe->attrib.iv_len > 0)
+ skb_pull(prframe->pkt, prframe->attrib.iv_len);
+
a_len = prframe->pkt->len;
pdata = prframe->pkt->data;
@@ -1655,7 +1665,7 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
plist = phead->next;
while (phead != plist) {
- hdr = container_of(plist, struct recv_frame, list);
+ hdr = list_entry(plist, struct recv_frame, list);
pnextattrib = &hdr->attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
@@ -1690,7 +1700,7 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
if (list_empty(phead))
return true;
- prhdr = container_of(plist, struct recv_frame, list);
+ prhdr = list_entry(plist, struct recv_frame, list);
pattrib = &prhdr->attrib;
preorder_ctrl->indicate_seq = pattrib->seq_num;
}
@@ -1698,7 +1708,7 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
/* Prepare indication list and indication. */
/* Check if there is any packet need indicate. */
while (!list_empty(phead)) {
- prhdr = container_of(plist, struct recv_frame, list);
+ prhdr = list_entry(plist, struct recv_frame, list);
prframe = prhdr;
pattrib = &prframe->attrib;
@@ -1829,9 +1839,10 @@ _err_exit:
return _FAIL;
}
-void rtw_reordering_ctrl_timeout_handler(unsigned long data)
+void rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
{
- struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)data;
+ struct recv_reorder_ctrl *preorder_ctrl = from_timer(preorder_ctrl, t,
+ reordering_ctrl_timer);
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
@@ -1887,6 +1898,24 @@ static int process_recv_indicatepkts(struct adapter *padapter,
return retval;
}
+static int recv_func_prehandle(struct adapter *padapter,
+ struct recv_frame *rframe)
+{
+ int ret = _SUCCESS;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ /* check the frame crtl field and decache */
+ ret = validate_recv_frame(padapter, rframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
+ rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
static int recv_func_posthandle(struct adapter *padapter,
struct recv_frame *prframe)
{
@@ -1939,7 +1968,6 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
struct rx_pkt_attrib *prxattrib = &rframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *mlmepriv = &padapter->mlmepriv;
- struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
/* check if need to handle uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
@@ -1951,12 +1979,9 @@ static int recv_func(struct adapter *padapter, struct recv_frame *rframe)
}
}
- /* check the frame crtl field and decache */
- ret = validate_recv_frame(padapter, rframe);
- if (ret != _SUCCESS) {
- RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
- rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
- } else {
+ ret = recv_func_prehandle(padapter, rframe);
+
+ if (ret == _SUCCESS) {
/* check if need to enqueue into uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
!IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 &&
@@ -1999,9 +2024,10 @@ _recv_entry_drop:
return ret;
}
-static void rtw_signal_stat_timer_hdl(unsigned long data)
+static void rtw_signal_stat_timer_hdl(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)data;
+ struct adapter *adapter =
+ from_timer(adapter, t, recvpriv.signal_stat_timer);
struct recv_priv *recvpriv = &adapter->recvpriv;
u32 tmp_s, tmp_q;
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index b283a4903369..5b1ef229df2a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -1690,4 +1690,4 @@ do { \
d##1 = TE0(s##1) ^ TE1(s##2) ^ TE2(s##3) ^ TE3(s##0) ^ rk[4 * i + 1]; \
d##2 = TE0(s##2) ^ TE1(s##3) ^ TE2(s##0) ^ TE3(s##1) ^ rk[4 * i + 2]; \
d##3 = TE0(s##3) ^ TE1(s##0) ^ TE2(s##1) ^ TE3(s##2) ^ rk[4 * i + 3]; \
-} while (0);
+} while (0)
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 22cf362b8528..2fd2a9e2416e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -239,8 +239,8 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
}
/* init for DM */
- psta->rssi_stat.UndecoratedSmoothedPWDB = (-1);
- psta->rssi_stat.UndecoratedSmoothedCCK = (-1);
+ psta->rssi_stat.UndecoratedSmoothedPWDB = -1;
+ psta->rssi_stat.UndecoratedSmoothedCCK = -1;
/* init for the sequence number of received management frame */
psta->RxMgmtFrameSeqNum = 0xffff;
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index be2f46eb9f78..e8d9858f2942 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -93,7 +93,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
for (i = 0; i < NR_XMITFRAME; i++) {
- INIT_LIST_HEAD(&(pxframe->list));
+ INIT_LIST_HEAD(&pxframe->list);
pxframe->padapter = padapter;
pxframe->frame_tag = NULL_FRAMETAG;
@@ -103,7 +103,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
- list_add_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
+ list_add_tail(&pxframe->list, &pxmitpriv->free_xmit_queue.queue);
pxframe++;
}
@@ -148,7 +148,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitbuf->flags = XMIT_VO_QUEUE;
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmitbuf_queue.queue);
pxmitbuf++;
}
@@ -182,7 +182,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto exit;
}
- list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmit_extbuf_queue.queue);
pxmitbuf++;
}
@@ -258,8 +258,8 @@ static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *
u32 sz;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct sta_info *psta = pattrib->psta;
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (pattrib->nr_frags != 1)
sz = padapter->xmitpriv.frag_len;
@@ -697,7 +697,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum=%d length=%d pattrib->icv_len=%d", curfragnum, length, pattrib->icv_len));
}
}
- rtw_secgetmic(&micdata, &(mic[0]));
+ rtw_secgetmic(&micdata, &mic[0]);
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: before add mic code!!!\n"));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: pattrib->last_txcmdsz=%d!!!\n", pattrib->last_txcmdsz));
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: mic[0]=0x%.2x , mic[1]=0x%.2x , mic[2]= 0x%.2x, mic[3]=0x%.2x\n\
@@ -705,18 +705,18 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]));
/* add mic code and add the mic code length in last_txcmdsz */
- memcpy(payload, &(mic[0]), 8);
+ memcpy(payload, &mic[0], 8);
pattrib->last_txcmdsz += 8;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ======== last pkt ========\n"));
payload = payload-pattrib->last_txcmdsz+8;
for (curfragnum = 0; curfragnum < pattrib->last_txcmdsz; curfragnum = curfragnum+8)
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
- (" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ",
- *(payload+curfragnum), *(payload+curfragnum+1),
- *(payload+curfragnum+2), *(payload+curfragnum+3),
- *(payload+curfragnum+4), *(payload+curfragnum+5),
- *(payload+curfragnum+6), *(payload+curfragnum+7)));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ (" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ",
+ *(payload + curfragnum), *(payload + curfragnum + 1),
+ *(payload + curfragnum + 2), *(payload + curfragnum + 3),
+ *(payload + curfragnum + 4), *(payload + curfragnum + 5),
+ *(payload + curfragnum + 6), *(payload + curfragnum + 7)));
} else {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: rtw_get_stainfo==NULL!!!\n"));
}
@@ -786,7 +786,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
SetFrameSubType(fctrl, pattrib->subtype);
if (pattrib->subtype & WIFI_DATA_TYPE) {
- if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
/* to_ds = 1, fr_ds = 0; */
/* Data transfer to AP */
SetToDs(fctrl);
@@ -899,20 +899,20 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pat
switch (priority) {
case 1:
case 2:
- ptxservq = &(psta->sta_xmitpriv.bk_q);
+ ptxservq = &psta->sta_xmitpriv.bk_q;
break;
case 4:
case 5:
- ptxservq = &(psta->sta_xmitpriv.vi_q);
+ ptxservq = &psta->sta_xmitpriv.vi_q;
break;
case 6:
case 7:
- ptxservq = &(psta->sta_xmitpriv.vo_q);
+ ptxservq = &psta->sta_xmitpriv.vo_q;
break;
case 0:
case 3:
default:
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ ptxservq = &psta->sta_xmitpriv.be_q;
break;
}
@@ -1229,7 +1229,7 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
+ list_add_tail(&pxmitbuf->list, get_list_head(pfree_queue));
pxmitpriv->free_xmit_extbuf_cnt++;
spin_unlock_irqrestore(&pfree_queue->lock, irql);
@@ -1283,7 +1283,7 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
list_del_init(&pxmitbuf->list);
- list_add_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
+ list_add_tail(&pxmitbuf->list, get_list_head(pfree_xmitbuf_queue));
pxmitpriv->free_xmitbuf_cnt++;
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
@@ -1395,7 +1395,7 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct xmit_frame *pxmitframe;
- spin_lock_bh(&(pframequeue->lock));
+ spin_lock_bh(&pframequeue->lock);
phead = get_list_head(pframequeue);
plist = phead->next;
@@ -1407,7 +1407,7 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
- spin_unlock_bh(&(pframequeue->lock));
+ spin_unlock_bh(&pframequeue->lock);
}
@@ -1503,26 +1503,26 @@ struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *
switch (up) {
case 1:
case 2:
- ptxservq = &(psta->sta_xmitpriv.bk_q);
+ ptxservq = &psta->sta_xmitpriv.bk_q;
*(ac) = 3;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BK\n"));
break;
case 4:
case 5:
- ptxservq = &(psta->sta_xmitpriv.vi_q);
+ ptxservq = &psta->sta_xmitpriv.vi_q;
*(ac) = 1;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VI\n"));
break;
case 6:
case 7:
- ptxservq = &(psta->sta_xmitpriv.vo_q);
+ ptxservq = &psta->sta_xmitpriv.vo_q;
*(ac) = 0;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VO\n"));
break;
case 0:
case 3:
default:
- ptxservq = &(psta->sta_xmitpriv.be_q);
+ ptxservq = &psta->sta_xmitpriv.be_q;
*(ac) = 2;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BE\n"));
break;
@@ -1845,21 +1845,21 @@ void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
pstapriv->sta_dz_bitmap |= BIT(psta->aid);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
+ list_del_init(&pstaxmitpriv->vo_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
- list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
+ list_del_init(&pstaxmitpriv->vi_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
- list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
+ list_del_init(&pstaxmitpriv->bk_q.tx_pending);
/* for BC/MC Frames */
pstaxmitpriv = &psta_bmc->sta_xmitpriv;
dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
- list_del_init(&(pstaxmitpriv->be_q.tx_pending));
+ list_del_init(&pstaxmitpriv->be_q.tx_pending);
spin_unlock_bh(&pxmitpriv->lock);
}
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index 81bf4944ef44..bbb981c6bcec 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*++
Copyright (c) Realtek Semiconductor Corp. All rights reserved.
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index ec8aae76bf40..001d6267b56e 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -382,7 +382,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
}
/* add by Neil Chen to avoid PSD is processing */
- if (pDM_Odm->bDMInitialGainEnable == false) {
+ if (!pDM_Odm->bDMInitialGainEnable) {
ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: PSD is Processing\n"));
return;
}
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 0555e42a3787..5fcbe5639e99 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -109,7 +109,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
dm_odm->PhyDbgInfo.NumQryPhyStatusCCK++;
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
cck_highpwr = dm_odm->bCckHighPower;
@@ -223,7 +223,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
}
- /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ /* (2)PWDB, Average PWDB calculated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 3039bbe44a25..20253b5b6679 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -820,9 +820,8 @@ static void save_adda_registers(struct adapter *adapt, u32 *addareg,
{
u32 i;
- for (i = 0; i < register_num; i++) {
+ for (i = 0; i < register_num; i++)
backup[i] = phy_query_bb_reg(adapt, addareg[i], bMaskDWord);
- }
}
static void save_mac_registers(struct adapter *adapt, u32 *mac_reg,
@@ -830,9 +829,9 @@ static void save_mac_registers(struct adapter *adapt, u32 *mac_reg,
{
u32 i;
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
backup[i] = usb_read8(adapt, mac_reg[i]);
- }
+
backup[i] = usb_read32(adapt, mac_reg[i]);
}
@@ -850,9 +849,9 @@ static void reload_mac_registers(struct adapter *adapt,
{
u32 i;
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
usb_write8(adapt, mac_reg[i], (u8)backup[i]);
- }
+
usb_write32(adapt, mac_reg[i], backup[i]);
}
@@ -880,9 +879,9 @@ static void mac_setting_calibration(struct adapter *adapt, u32 *mac_reg, u32 *ba
usb_write8(adapt, mac_reg[i], 0x3F);
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) {
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
usb_write8(adapt, mac_reg[i], (u8)(backup[i]&(~BIT(3))));
- }
+
usb_write8(adapt, mac_reg[i], (u8)(backup[i]&(~BIT(5))));
}
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 674ac5396d00..17967c944946 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -1745,6 +1745,7 @@ void rtw_hal_get_hwreg(struct adapter *Adapter, u8 variable, u8 *val)
switch (variable) {
case HW_VAR_BASIC_RATE:
*((u16 *)(val)) = Adapter->HalData->BasicRateSet;
+ /* fall through */
case HW_VAR_TXPAUSE:
val[0] = usb_read8(Adapter, REG_TXPAUSE);
break;
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 550ad62e7064..4e5d7fc6de07 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -75,7 +75,8 @@ enum rf_radio_path {
#define CHANNEL_MAX_NUMBER 14 /* 14 is the max chnl number */
#define MAX_CHNL_GROUP_24G 6 /* ch1~2, ch3~5, ch6~8,
*ch9~11, ch12~13, CH 14
- * total three groups */
+ * total three groups
+ */
#define CHANNEL_GROUP_MAX_88E 6
enum wireless_mode {
@@ -116,35 +117,45 @@ struct bb_reg_def {
/* 0x80c~0x80f [4 bytes] */
u32 rfHSSIPara1; /* wire parameter control1 : */
/* 0x820~0x823,0x828~0x82b,
- * 0x830~0x833, 0x838~0x83b [16 bytes] */
+ * 0x830~0x833, 0x838~0x83b [16 bytes]
+ */
u32 rfHSSIPara2; /* wire parameter control2 : */
/* 0x824~0x827,0x82c~0x82f, 0x834~0x837,
- * 0x83c~0x83f [16 bytes] */
+ * 0x83c~0x83f [16 bytes]
+ */
u32 rfSwitchControl; /* Tx Rx antenna control : */
/* 0x858~0x85f [16 bytes] */
u32 rfAGCControl1; /* AGC parameter control1 : */
/* 0xc50~0xc53,0xc58~0xc5b, 0xc60~0xc63,
- * 0xc68~0xc6b [16 bytes] */
+ * 0xc68~0xc6b [16 bytes]
+ */
u32 rfAGCControl2; /* AGC parameter control2 : */
/* 0xc54~0xc57,0xc5c~0xc5f, 0xc64~0xc67,
- * 0xc6c~0xc6f [16 bytes] */
+ * 0xc6c~0xc6f [16 bytes]
+ */
u32 rfRxIQImbalance; /* OFDM Rx IQ imbalance matrix : */
/* 0xc14~0xc17,0xc1c~0xc1f, 0xc24~0xc27,
- * 0xc2c~0xc2f [16 bytes] */
+ * 0xc2c~0xc2f [16 bytes]
+ */
u32 rfRxAFE; /* Rx IQ DC ofset and Rx digital filter,
- * Rx DC notch filter : */
+ * Rx DC notch filter :
+ */
/* 0xc10~0xc13,0xc18~0xc1b, 0xc20~0xc23,
- * 0xc28~0xc2b [16 bytes] */
+ * 0xc28~0xc2b [16 bytes]
+ */
u32 rfTxIQImbalance; /* OFDM Tx IQ imbalance matrix */
/* 0xc80~0xc83,0xc88~0xc8b, 0xc90~0xc93,
- * 0xc98~0xc9b [16 bytes] */
+ * 0xc98~0xc9b [16 bytes]
+ */
u32 rfTxAFE; /* Tx IQ DC Offset and Tx DFIR type */
/* 0xc84~0xc87,0xc8c~0xc8f, 0xc94~0xc97,
- * 0xc9c~0xc9f [16 bytes] */
+ * 0xc9c~0xc9f [16 bytes]
+ */
u32 rfLSSIReadBack; /* LSSI RF readback data SI mode */
/* 0x8a0~0x8af [16 bytes] */
u32 rfLSSIReadBackPi; /* LSSI RF readback data PI mode 0x8b8-8bc for
- * Path A and B */
+ * Path A and B
+ */
};
/*------------------------------Define structure----------------------------*/
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
index 04159a9f90d3..8cbba85e1587 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -136,7 +136,8 @@
#define rCCK0_CCA 0xa08
/* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold,
- * RX LNA Threshold useless now. Not the same as 90 series */
+ * RX LNA Threshold useless now. Not the same as 90 series
+ */
#define rCCK0_RxAGC1 0xa0c
#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h b/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
index 21996a1173ef..96ebda93b4ee 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
@@ -1,19 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INC_RA_H
#define __INC_RA_H
-/*++
-Copyright (c) Realtek Semiconductor Corp. All rights reserved.
-
-Module Name:
- RateAdaptive.h
-
-Abstract:
- Prototype of RA and related data structure.
-
-Major Change History:
- When Who What
- ---------- --------------- -------------------------------
- 2011-08-12 Page Create.
---*/
+/*
+ * Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+ *
+ * Module Name:
+ * RateAdaptive.h
+ *
+ * Abstract:
+ * Prototype of RA and related data structure.
+ *
+ * Major Change History:
+ * When Who What
+ * ---------- --------------- -------------------------------
+ * 2011-08-12 Page Create.
+ */
/* Rate adaptive define */
#define PERENTRY 23
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
index 69c4d49f43ab..73cc86705cf3 100644
--- a/drivers/staging/rtl8188eu/include/basic_types.h
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -1,4 +1,4 @@
-/******************************************************************************
+ /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
@@ -20,14 +20,15 @@
/* port from fw */
/* TODO: Macros Below are Sync from SD7-Driver. It is necessary
- * to check correctness */
+ * to check correctness
+ */
/*
* Call endian free function when
* 1. Read/write packet content.
* 2. Before write integer to IO.
* 3. After read integer from IO.
-*/
+ */
/* Convert little data endian to host ordering */
#define EF1BYTE(_val) \
@@ -74,9 +75,10 @@
#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
(EF1BYTE(*((u8 *)(__pstart))))
-/*Description:
-Translate subfield (continuous bits in little-endian) of 4-byte
-value to host byte ordering.*/
+/* Description:
+ * Translate subfield (continuous bits in little-endian) of 4-byte
+ * value to host byte ordering.
+ */
#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
( \
(LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index c3517c0903ca..2734565ce802 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -128,7 +128,8 @@ struct dvobj_priv {
static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
{
/* todo: get interface type from dvobj and the return
- * the dev accordingly */
+ * the dev accordingly
+ */
return &dvobj->pusbintf->dev;
};
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index dfdbd0254886..da4ee1561c36 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -96,9 +96,11 @@ enum hw_variables {
HW_VAR_FIFO_CLEARN_UP,
HW_VAR_CHECK_TXBUF,
HW_VAR_APFM_ON_MAC, /* Auto FSM to Turn On, include clock, isolation,
- * power control for MAC only */
+ * power control for MAC only
+ */
/* The valid upper nav range for the HW updating, if the true value is
- * larger than the upper range, the HW won't update it. */
+ * larger than the upper range, the HW won't update it.
+ */
/* Unit in microsecond. 0 means disable this function. */
HW_VAR_NAV_UPPER,
HW_VAR_RPT_TIMER_SETTING,
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index 284db7d00f50..9f480ccec531 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -202,9 +202,9 @@ enum NETWORK_TYPE {
#define IsSupportedTxCCK(NetType) \
((NetType) & (WIRELESS_11B) ? true : false)
#define IsSupportedTxOFDM(NetType) \
- ((NetType) & (WIRELESS_11G|WIRELESS_11A) ? true : false)
+ ((NetType) & (WIRELESS_11G | WIRELESS_11A) ? true : false)
#define IsSupportedTxMCS(NetType) \
- ((NetType) & (WIRELESS_11_24N|WIRELESS_11_5N) ? true : false)
+ ((NetType) & (WIRELESS_11_24N | WIRELESS_11_5N) ? true : false)
struct ieee_param {
@@ -276,12 +276,13 @@ struct sta_data {
#define IEEE80211_DATA_LEN 2304
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
- 6.2.1.1.2.
+ * 6.2.1.1.2.
- The figure in section 7.1.2 suggests a body size of up to 2312
- bytes is allowed, which is a bit confusing, I suspect this
- represents the 2304 bytes of real data, plus a possible 8 bytes of
- WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+ * The figure in section 7.1.2 suggests a body size of up to 2312
+ * bytes is allowed, which is a bit confusing, I suspect this
+ * represents the 2304 bytes of real data, plus a possible 8 bytes of
+ * WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro)
+ */
#define IEEE80211_HLEN 30
@@ -358,11 +359,11 @@ struct ieee80211_snap_hdr {
#define IEEE80211_DATA_HDR3_LEN 24
#define IEEE80211_DATA_HDR4_LEN 30
-#define IEEE80211_CCK_MODULATION (1<<0)
-#define IEEE80211_OFDM_MODULATION (1<<1)
+#define IEEE80211_CCK_MODULATION BIT(0)
+#define IEEE80211_OFDM_MODULATION BIT(1)
-#define IEEE80211_24GHZ_BAND (1<<0)
-#define IEEE80211_52GHZ_BAND (1<<1)
+#define IEEE80211_24GHZ_BAND BIT(0)
+#define IEEE80211_52GHZ_BAND BIT(1)
#define IEEE80211_CCK_RATE_LEN 4
#define IEEE80211_NUM_OFDM_RATESLEN 8
@@ -383,18 +384,18 @@ struct ieee80211_snap_hdr {
#define IEEE80211_OFDM_RATE_54MB 0x6C
#define IEEE80211_BASIC_RATE_MASK 0x80
-#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
-#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
-#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
-#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
-#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
-#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
-#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
-#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
-#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
-#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
-#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
-#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
+#define IEEE80211_CCK_RATE_1MB_MASK BIT(0)
+#define IEEE80211_CCK_RATE_2MB_MASK BIT(1)
+#define IEEE80211_CCK_RATE_5MB_MASK BIT(2)
+#define IEEE80211_CCK_RATE_11MB_MASK BIT(3)
+#define IEEE80211_OFDM_RATE_6MB_MASK BIT(4)
+#define IEEE80211_OFDM_RATE_9MB_MASK BIT(5)
+#define IEEE80211_OFDM_RATE_12MB_MASK BIT(6)
+#define IEEE80211_OFDM_RATE_18MB_MASK BIT(7)
+#define IEEE80211_OFDM_RATE_24MB_MASK BIT(8)
+#define IEEE80211_OFDM_RATE_36MB_MASK BIT(9)
+#define IEEE80211_OFDM_RATE_48MB_MASK BIT(10)
+#define IEEE80211_OFDM_RATE_54MB_MASK BIT(11)
#define IEEE80211_CCK_RATES_MASK 0x0000000F
#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
@@ -423,18 +424,19 @@ struct ieee80211_snap_hdr {
/* IEEE 802.11 requires that STA supports concurrent reception of at least
* three fragmented frames. This define can be increased to support more
* concurrent frames, but it should be noted that each entry can consume about
- * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
+ * 2 kB of RAM and increasing cache size will slow down frame reassembly.
+ */
#define IEEE80211_FRAG_CACHE_LEN 4
-#define SEC_KEY_1 (1<<0)
-#define SEC_KEY_2 (1<<1)
-#define SEC_KEY_3 (1<<2)
-#define SEC_KEY_4 (1<<3)
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
+#define SEC_KEY_1 BIT(0)
+#define SEC_KEY_2 BIT(1)
+#define SEC_KEY_3 BIT(2)
+#define SEC_KEY_4 BIT(3)
+#define SEC_ACTIVE_KEY BIT(4)
+#define SEC_AUTH_MODE BIT(5)
+#define SEC_UNICAST_GROUP BIT(6)
+#define SEC_LEVEL BIT(7)
+#define SEC_ENABLED BIT(8)
#define SEC_LEVEL_0 0 /* None */
#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
@@ -451,7 +453,8 @@ struct ieee80211_snap_hdr {
/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
* only use 8, and then use extended rates for the remaining supported
* rates. Other APs, however, stick all of their supported rates on the
- * main rates information element... */
+ * main rates information element...
+ */
#define MAX_RATES_LENGTH ((u8)12)
#define MAX_RATES_EX_LENGTH ((u8)16)
#define MAX_NETWORK_COUNT 128
@@ -467,17 +470,17 @@ struct ieee80211_snap_hdr {
#define MAX_P2P_IE_LEN (256)
#define MAX_WFD_IE_LEN (128)
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
+#define NETWORK_EMPTY_ESSID BIT(0)
+#define NETWORK_HAS_OFDM BIT(1)
+#define NETWORK_HAS_CCK BIT(2)
#define IW_ESSID_MAX_SIZE 32
/*
-join_res:
--1: authentication fail
--2: association fail
-> 0: TID
-*/
+ * join_res:
+ * -1: authentication fail
+ * -2: association fail
+ * > 0: TID
+ */
enum ieee80211_state {
/* the card is not linked at all */
@@ -531,15 +534,15 @@ static inline int is_broadcast_mac_addr(const u8 *addr)
(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff);
}
-#define CFG_IEEE80211_RESERVE_FCS (1<<0)
-#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+#define CFG_IEEE80211_RESERVE_FCS BIT(0)
+#define CFG_IEEE80211_COMPUTE_FCS BIT(1)
#define MAXTID 16
-#define IEEE_A (1<<0)
-#define IEEE_B (1<<1)
-#define IEEE_G (1<<2)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+#define IEEE_A BIT(0)
+#define IEEE_B BIT(1)
+#define IEEE_G BIT(2)
+#define IEEE_MODE_MASK (IEEE_A | IEEE_B | IEEE_G)
/* Action category code */
enum rtw_ieee80211_category {
@@ -615,7 +618,8 @@ enum rtw_ieee80211_back_parties {
};
#define OUI_MICROSOFT 0x0050f2 /* Microsoft (also used in Wi-Fi specs)
- * 00:50:F2 */
+ * 00:50:F2
+ */
#define WME_OUI_TYPE 2
#define WME_OUI_SUBTYPE_INFORMATION_ELEMENT 0
#define WME_OUI_SUBTYPE_PARAMETER_ELEMENT 1
@@ -655,12 +659,12 @@ enum rtw_ieee80211_back_parties {
* is not permitted.
*/
enum rtw_ieee80211_channel_flags {
- RTW_IEEE80211_CHAN_DISABLED = 1<<0,
- RTW_IEEE80211_CHAN_PASSIVE_SCAN = 1<<1,
- RTW_IEEE80211_CHAN_NO_IBSS = 1<<2,
- RTW_IEEE80211_CHAN_RADAR = 1<<3,
- RTW_IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
- RTW_IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
+ RTW_IEEE80211_CHAN_DISABLED = BIT(0),
+ RTW_IEEE80211_CHAN_PASSIVE_SCAN = BIT(1),
+ RTW_IEEE80211_CHAN_NO_IBSS = BIT(2),
+ RTW_IEEE80211_CHAN_RADAR = BIT(3),
+ RTW_IEEE80211_CHAN_NO_HT40PLUS = BIT(4),
+ RTW_IEEE80211_CHAN_NO_HT40MINUS = BIT(5),
};
#define RTW_IEEE80211_CHAN_NO_HT40 \
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 4fb3bb07ceaa..95426b7c6dbf 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -36,7 +36,8 @@
/* Mainly, it just retains last scan result and scan again. */
/* After that, it compares the scan result to see which one gets better
* RSSI. It selects antenna with better receiving power and returns better
- * scan result. */
+ * scan result.
+ */
#define TP_MODE 0
#define RSSI_MODE 1
@@ -173,10 +174,12 @@ struct rx_hpc {
/* This indicates two different steps. */
/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to
- * the signal on the air. */
+ * the signal on the air.
+ */
/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
* SWAW_STEP_PEAK with original RSSI to determine if it is necessary to
- * switch antenna. */
+ * switch antenna.
+ */
#define SWAW_STEP_PEAK 0
#define SWAW_STEP_DETERMINE 1
@@ -265,7 +268,8 @@ struct odm_phy_status_info {
s8 RxPower; /* in dBm Translate from PWdB */
s8 RecvSignalPower;/* Real power in dBm for this packet, no
* beautification and aggregation. Keep this raw
- * info to be used for the other procedures. */
+ * info to be used for the other procedures.
+ */
u8 BTRxRSSIPercentage;
u8 SignalStrength; /* in 0-100 index. */
u8 RxPwr[MAX_PATH_NUM_92CS];/* per-path's pwdb */
@@ -478,7 +482,7 @@ enum odm_operation_mode {
/* ODM_CMNINFO_WM_MODE */
enum odm_wireless_mode {
- ODM_WM_UNKNOW = 0x0,
+ ODM_WM_UNKNOWN = 0x0,
ODM_WM_B = BIT(0),
ODM_WM_G = BIT(1),
ODM_WM_A = BIT(2),
@@ -509,7 +513,7 @@ enum odm_security {
ODM_SEC_RESERVE = 3,
ODM_SEC_AESCCMP = 4,
ODM_SEC_WEP104 = 5,
- ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
+ ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
ODM_SEC_SMS4 = 7,
};
@@ -567,7 +571,8 @@ struct odm_ra_info {
u8 PTPreRssi; /* if RSSI change 5% do PT */
u8 PTModeSS; /* decide whitch rate should do PT */
u8 RAstage; /* StageRA, decide how many times RA will be done
- * between PT */
+ * between PT
+ */
u8 PTSmoothFactor;
};
@@ -587,12 +592,14 @@ struct odm_rf_cal {
u8 TXPowercount;
bool bTXPowerTracking;
u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking
- * as default */
+ * as default
+ */
u8 TM_Trigger;
u8 InternalPA5G[2]; /* pathA / pathB */
u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0,
- * and 1 for RFIC1 */
+ * and 1 for RFIC1
+ */
u8 ThermalValue;
u8 ThermalValue_LCK;
u8 ThermalValue_IQK;
@@ -688,7 +695,7 @@ enum ant_div_type {
/* Copy from SD4 defined structure. We use to support PHY DM integration. */
struct odm_dm_struct {
- /* Add for different team use temporarily */
+ /* Add for different team use temporarily */
struct adapter *Adapter; /* For CE/NIC team */
struct rtl8192cd_priv *priv; /* For AP/ADSL team */
/* WHen you use above pointers, they must be initialized. */
@@ -714,7 +721,8 @@ struct odm_dm_struct {
/* ODM PCIE/USB/SDIO/GSPI = 0/1/2/3 */
u8 SupportInterface;
/* ODM composite or independent. Bit oriented/ 92C+92D+ .... or any
- * other type = 1/2/3/... */
+ * other type = 1/2/3/...
+ */
u32 SupportICType;
/* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
u8 CutVersion;
@@ -788,19 +796,21 @@ struct odm_dm_struct {
bool bBtHsOperation; /* BT HS mode is under progress */
u8 btHsDigVal; /* use BT rssi to decide the DIG value */
bool bBtDisableEdcaTurbo;/* Under some condition, don't enable the
- * EDCA Turbo */
+ * EDCA Turbo
+ */
bool bBtBusy; /* BT is busy. */
/* CALL BY VALUE------------- */
/* 2 Define STA info. */
/* _ODM_STA_INFO */
- /* For MP, we need to reduce one array pointer for default port.?? */
+ /* For MP, we need to reduce one array pointer for default port.??*/
struct sta_info *pODM_StaInfo[ODM_ASSOCIATE_ENTRY_NUM];
u16 CurrminRptTime;
struct odm_ra_info RAInfo[ODM_ASSOCIATE_ENTRY_NUM]; /* Use MacID as
- * array index. STA MacID=0,
- * VWiFi Client MacID={1, ODM_ASSOCIATE_ENTRY_NUM-1} */
+ * array index. STA MacID=0,
+ * VWiFi Client MacID={1, ODM_ASSOCIATE_ENTRY_NUM-1}
+ */
/* */
/* 2012/02/14 MH Add to share 88E ra with other SW team. */
/* We need to colelct all support abilit to a proper area. */
@@ -1029,9 +1039,11 @@ extern u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8];
/* 20100514 Joseph: Add definition for antenna switching test after link. */
/* This indicates two different the steps. */
/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the
- * signal on the air. */
+ * signal on the air.
+ */
/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
- * SWAW_STEP_PEAK */
+ * SWAW_STEP_PEAK
+ */
/* with original RSSI to determine if it is necessary to switch antenna. */
#define SWAW_STEP_PEAK 0
#define SWAW_STEP_DETERMINE 1
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index 97d3d8504184..f1fb3d511a45 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -26,7 +26,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter);
u8 rtw_free_drv_sw(struct adapter *padapter);
u8 rtw_reset_drv_sw(struct adapter *padapter);
-void rtw_stop_drv_threads (struct adapter *padapter);
+void rtw_stop_drv_threads(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
diff --git a/drivers/staging/rtl8188eu/include/phy.h b/drivers/staging/rtl8188eu/include/phy.h
index cd387e998574..e99ac3910787 100644
--- a/drivers/staging/rtl8188eu/include/phy.h
+++ b/drivers/staging/rtl8188eu/include/phy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <odm.h>
#define IQK_DELAY_TIME_88E 10
diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h
index addf90b60ce9..bd77a50c0d41 100644
--- a/drivers/staging/rtl8188eu/include/pwrseq.h
+++ b/drivers/staging/rtl8188eu/include/pwrseq.h
@@ -20,28 +20,28 @@
#include "pwrseqcmd.h"
/*
- Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
- There are 6 HW Power States:
- 0: POFF--Power Off
- 1: PDN--Power Down
- 2: CARDEMU--Card Emulation
- 3: ACT--Active Mode
- 4: LPS--Low Power State
- 5: SUS--Suspend
-
- The transition from different states are defined below
- TRANS_CARDEMU_TO_ACT
- TRANS_ACT_TO_CARDEMU
- TRANS_CARDEMU_TO_SUS
- TRANS_SUS_TO_CARDEMU
- TRANS_CARDEMU_TO_PDN
- TRANS_ACT_TO_LPS
- TRANS_LPS_TO_ACT
-
- TRANS_END
-
- PWR SEQ Version: rtl8188E_PwrSeq_V09.h
-*/
+ * Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
+ * There are 6 HW Power States:
+ * 0: POFF--Power Off
+ * 1: PDN--Power Down
+ * 2: CARDEMU--Card Emulation
+ * 3: ACT--Active Mode
+ * 4: LPS--Low Power State
+ * 5: SUS--Suspend
+ *
+ * The transition from different states are defined below
+ * TRANS_CARDEMU_TO_ACT
+ * TRANS_ACT_TO_CARDEMU
+ * TRANS_CARDEMU_TO_SUS
+ * TRANS_SUS_TO_CARDEMU
+ * TRANS_CARDEMU_TO_PDN
+ * TRANS_ACT_TO_LPS
+ * TRANS_LPS_TO_ACT
+ *
+ * TRANS_END
+ *
+ * PWR SEQ Version: rtl8188E_PwrSeq_V09.h
+ */
#define RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS 10
#define RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS 10
#define RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS 10
diff --git a/drivers/staging/rtl8188eu/include/rf.h b/drivers/staging/rtl8188eu/include/rf.h
index 98a5551f5ae7..ed3241c020ad 100644
--- a/drivers/staging/rtl8188eu/include/rf.h
+++ b/drivers/staging/rtl8188eu/include/rf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define RF6052_MAX_TX_PWR 0x3F
#define RF6052_MAX_REG 0x3F
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 9330361da4ad..b4b5e217105a 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -42,7 +42,7 @@
#define RTL8188E_PHY_REG_PG "rtl8188E\\PHY_REG_PG.txt"
#define RTL8188E_PHY_REG_MP "rtl8188E\\PHY_REG_MP.txt"
-/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
+/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow
#define Rtl8188E_NIC_RF_OFF_FLOW rtl8188E_radio_off_flow
#define Rtl8188E_NIC_DISABLE_FLOW rtl8188E_card_disable_flow
@@ -81,7 +81,8 @@ enum usb_rx_agg_mode {
#define MAX_RX_DMA_BUFFER_SIZE_88E \
0x2400 /* 9k for 88E nornal chip , MaxRxBuff=10k-max(TxReportSize(64*8),
- * WOLPattern(16*24)) */
+ * WOLPattern(16*24))
+ */
#define MAX_TX_REPORT_BUFFER_SIZE 0x0400 /* 1k */
@@ -94,11 +95,13 @@ enum usb_rx_agg_mode {
#define TX_SELE_NQ BIT(2) /* Normal Queue */
/* Note: We will divide number of page equally for each queue other
- * than public queue! */
+ * than public queue!
+ */
/* 22k = 22528 bytes = 176 pages (@page = 128 bytes) */
/* must reserved about 7 pages for LPS => 176-7 = 169 (0xA9) */
/* 2*BCN / 1*ps-poll / 1*null-data /1*prob_rsp /1*QOS null-data /1*BT QOS
- * null-data */
+ * null-data
+ */
#define TX_TOTAL_PAGE_NUMBER_88E 0xA9/* 169 (21632=> 21k) */
@@ -110,7 +113,7 @@ enum usb_rx_agg_mode {
#define WMM_NORMAL_TX_PAGE_BOUNDARY_88E \
(WMM_NORMAL_TX_TOTAL_PAGE_NUMBER + 1) /* 0xA9 */
-/* Chip specific */
+/* Chip specific */
#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
#define CHIP_BONDING_92C_1T2R 0x1
#define CHIP_BONDING_88C_USB_MCARD 0x2
@@ -118,7 +121,7 @@ enum usb_rx_agg_mode {
#include "HalVerDef.h"
#include "hal_com.h"
-/* Channel Plan */
+/* Channel Plan */
enum ChannelPlan {
CHPL_FCC = 0,
CHPL_IC = 1,
@@ -168,7 +171,8 @@ struct txpowerinfo24g {
#define AVAILABLE_EFUSE_ADDR_88E(addr) \
(addr < EFUSE_REAL_CONTENT_LEN_88E)
/* To prevent out of boundary programming case, leave 1byte and program
- * full section */
+ * full section
+ */
/* 9bytes + 1byt + 5bytes and pre 1byte. */
/* For worst case: */
/* | 2byte|----8bytes----|1byte|--7bytes--| 92D */
@@ -176,7 +180,7 @@ struct txpowerinfo24g {
#define EFUSE_OOB_PROTECT_BYTES_88E 18
#define EFUSE_PROTECT_BYTES_BANK_88E 16
-/* EFUSE for BT definition */
+/* EFUSE for BT definition */
#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
@@ -293,7 +297,8 @@ struct hal_data_8188e {
u8 bDumpRxPkt;/* for debug */
u8 bDumpTxPkt;/* for debug */
u8 FwRsvdPageStartOffset; /* Reserve page start offset except
- * beacon in TxQ. */
+ * beacon in TxQ.
+ */
/* 2010/08/09 MH Add CU power down mode. */
bool pwrdown;
@@ -307,7 +312,8 @@ struct hal_data_8188e {
u16 EfuseUsedBytes;
/* Auto FSM to Turn On, include clock, isolation, power control
- * for MAC only */
+ * for MAC only
+ */
u8 bMacPwrCtrlOn;
u32 UsbBulkOutSize;
@@ -324,7 +330,8 @@ struct hal_data_8188e {
enum usb_rx_agg_mode UsbRxAggMode;
u8 UsbRxAggBlockCount; /* USB Block count. Block size is
* 512-byte in high speed and 64-byte
- * in full speed */
+ * in full speed
+ */
u8 UsbRxAggBlockTimeout;
u8 UsbRxAggPageCount; /* 8192C DMA page count */
u8 UsbRxAggPageTimeout;
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index c93e19d1c50f..71e2b817e20a 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -15,7 +15,7 @@
#ifndef __RTL8188E_SPEC_H__
#define __RTL8188E_SPEC_H__
-/* 8192C Regsiter offset definition */
+/* 8192C Register offset definition */
#define HAL_PS_TIMER_INT_DELAY 50 /* 50 microseconds */
#define HAL_92C_NAV_UPPER_UNIT 128 /* micro-second */
@@ -62,12 +62,15 @@
#define REG_HSIMR 0x0058
#define REG_HSISR 0x005c
#define REG_GPIO_PIN_CTRL_2 0x0060 /* RTL8723 WIFI/BT/GPS
- * Multi-Function GPIO Pin Control. */
+ * Multi-Function GPIO Pin Control.
+ */
#define REG_GPIO_IO_SEL_2 0x0062 /* RTL8723 WIFI/BT/GPS
- * Multi-Function GPIO Select. */
+ * Multi-Function GPIO Select.
+ */
#define REG_BB_PAD_CTRL 0x0064
#define REG_MULTI_FUNC_CTRL 0x0068 /* RTL8723 WIFI/BT/GPS
- * Multi-Function control source. */
+ * Multi-Function control source.
+ */
#define REG_GPIO_OUTPUT 0x006c
#define REG_AFE_XTAL_CTRL_EXT 0x0078 /* RTL8188E */
#define REG_XCK_OUT_CTRL 0x007c /* RTL8188E */
@@ -87,7 +90,8 @@
#define REG_HIMRE_88E 0x00B8
#define REG_HISRE_88E 0x00BC
#define REG_EFUSE_ACCESS 0x00CF /* Efuse access protection
- * for RTL8723 */
+ * for RTL8723
+ */
#define REG_BIST_SCAN 0x00D0
#define REG_BIST_RPT 0x00D4
#define REG_BIST_ROM_RPT 0x00D8
@@ -119,9 +123,9 @@
#define REG_FWISR 0x0134
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_PKTBUF_DBG_ADDR (REG_PKTBUF_DBG_CTRL)
-#define REG_RXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+2)
-#define REG_TXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+3)
-#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
+#define REG_RXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL + 2)
+#define REG_TXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL + 3)
+#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL + 2)
#define REG_PKTBUF_DBG_DATA_L 0x0144
#define REG_PKTBUF_DBG_DATA_H 0x0148
@@ -252,21 +256,24 @@
#define REG_TXPAUSE 0x0522
#define REG_DIS_TXREQ_CLR 0x0523
#define REG_RD_CTRL 0x0524
-/* Format for offset 540h-542h: */
-/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
- * beacon content before TBTT. */
-/* [7:4]: Reserved. */
-/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
- * to send the beacon packet. */
-/* [23:20]: Reserved */
-/* Description: */
-/* | */
-/* |<--Setup--|--Hold------------>| */
-/* --------------|---------------------- */
-/* | */
-/* TBTT */
-/* Note: We cannot update beacon content to HW or send any AC packets during
- * the time between Setup and Hold. */
+/* Format for offset 540h-542h:
+ * [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
+ * beacon content before TBTT.
+ *
+ * [7:4]: Reserved.
+ * [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
+ * to send the beacon packet.
+ *
+ * [23:20]: Reserved
+ * Description:
+ * |
+ * |<--Setup--|--Hold------------>|
+ * --------------|----------------------
+ * |
+ * TBTT
+ * Note: We cannot update beacon content to HW or send any AC packets during
+ * the time between Setup and Hold.
+ */
#define REG_TBTT_PROHIBIT 0x0540
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
@@ -340,13 +347,14 @@
#define RXERR_RPT_RST BIT(27)
#define _RXERR_RPT_SEL(type) ((type) << 28)
-/* Note: */
-/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
+/* Note:
+ * The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
* The default value is always too small, but the WiFi TestPlan test
* by 25,000 microseconds of NAV through sending CTS in the air.
* We must update this value greater than 25,000 microseconds to pass
* the item. The offset of NAV_UPPER in 8192C Spec is incorrect, and
- * the offset should be 0x0652. */
+ * the offset should be 0x0652.
+ */
#define REG_NAV_UPPER 0x0652 /* unit of 128 */
/* WMA, BA, CCX */
@@ -453,11 +461,12 @@
/* GPIO pins input value */
#define GPIO_IN REG_GPIO_PIN_CTRL
/* GPIO pins output value */
-#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
+#define GPIO_OUT (REG_GPIO_PIN_CTRL + 1)
/* GPIO pins output enable when a bit is set to "1"; otherwise,
- * input is configured. */
-#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
-#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
+ * input is configured.
+ */
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3)
/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
#define HSIMR_GPIO12_0_INT_EN BIT(0)
@@ -475,13 +484,13 @@
/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
/*
-Network Type
-00: No link
-01: Link in ad hoc network
-10: Link in infrastructure network
-11: AP mode
-Default: 00b.
-*/
+ * Network Type
+ * 00: No link
+ * 01: Link in ad hoc network
+ * 10: Link in infrastructure network
+ * 11: AP mode
+ * Default: 00b.
+ */
#define MSR_NOLINK 0x00
#define MSR_ADHOC 0x01
#define MSR_INFRA 0x02
@@ -635,26 +644,27 @@ So the following defines for 92C is not entire!!!!!!
=====================================================================
=====================================================================*/
/*
-Based on Datasheet V33---090401
-Register Summary
-Current IOREG MAP
-0x0000h ~ 0x00FFh System Configuration (256 Bytes)
-0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
-0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
-0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
-0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
-0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
-0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
-0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
-0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
-*/
+ * Based on Datasheet V33---090401
+ * Register Summary
+ * Current IOREG MAP
+ * 0x0000h ~ 0x00FFh System Configuration (256 Bytes)
+ * 0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
+ * 0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
+ * 0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
+ * 0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
+ * 0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
+ * 0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
+ * 0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
+ * 0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
+ */
/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
-/* Note: */
-/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
- * RTL8192S/RTL8192C are wrong, */
-/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
- * and BK - Bit3. */
-/* 8723 and 88E may be not correct either in the earlier version. */
+/* Note:
+ * The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
+ * RTL8192S/RTL8192C are wrong,
+ * the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
+ * and BK - Bit3.
+ * 8723 and 88E may be not correct either in the earlier version.
+ */
#define StopBecon BIT(6)
#define StopHigh BIT(5)
#define StopMgt BIT(4)
@@ -680,7 +690,8 @@ Current IOREG MAP
#define RCR_AICV BIT(9) /* Accept ICV error packet */
#define RCR_ACRC32 BIT(8) /* Accept CRC32 error packet */
#define RCR_CBSSID_BCN BIT(7) /* Accept BSSID match packet
- * (Rx beacon, probe rsp) */
+ * (Rx beacon, probe rsp)
+ */
#define RCR_CBSSID_DATA BIT(6) /* Accept BSSID match (Data)*/
#define RCR_CBSSID RCR_CBSSID_DATA /* Accept BSSID match */
#define RCR_APWRMGT BIT(5) /* Accept power management pkt*/
@@ -701,7 +712,7 @@ Current IOREG MAP
#define REG_USB_HRPWM 0xFE58
#define REG_USB_HCPWM 0xFE57
-/* 8192C Regsiter Bit and Content definition */
+/* 8192C Register Bit and Content definition */
/* 0x0000h ~ 0x00FFh System Configuration */
/* 2 SYS_ISO_CTRL */
@@ -798,7 +809,7 @@ Current IOREG MAP
/* 2 EFUSE_TEST (For RTL8723 partially) */
#define EF_TRPT BIT(7)
/* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
-#define EF_CELL_SEL (BIT(8)|BIT(9))
+#define EF_CELL_SEL (BIT(8) | BIT(9))
#define LDOE25_EN BIT(31)
#define EFUSE_SEL(x) (((x) & 0x3) << 8)
#define EFUSE_SEL_MASK 0x300
@@ -835,7 +846,7 @@ Current IOREG MAP
#define BD_MAC2 BIT(9)
#define BD_MAC1 BIT(10)
#define IC_MACPHY_MODE BIT(11)
-#define CHIP_VER (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define CHIP_VER (BIT(12) | BIT(13) | BIT(14) | BIT(15))
#define BT_FUNC BIT(16)
#define VENDOR_ID BIT(19)
#define PAD_HWPD_IDN BIT(22)
@@ -849,9 +860,9 @@ Current IOREG MAP
#define CHIP_VER_RTL_SHIFT 12
/* 2REG_GPIO_OUTSTS (For RTL8723 only) */
-#define EFS_HCI_SEL (BIT(0)|BIT(1))
-#define PAD_HCI_SEL (BIT(2)|BIT(3))
-#define HCI_SEL (BIT(4)|BIT(5))
+#define EFS_HCI_SEL (BIT(0) | BIT(1))
+#define PAD_HCI_SEL (BIT(2) | BIT(3))
+#define HCI_SEL (BIT(4) | BIT(5))
#define PKG_SEL_HCI BIT(6)
#define FEN_GPS BIT(7)
#define FEN_BT BIT(8)
@@ -868,7 +879,7 @@ Current IOREG MAP
#define UPHY_SUSB BIT(21)
#define PCI_SUSEN BIT(22)
#define USB_SUSEN BIT(23)
-#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
+#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
/* 2SYS_CFG */
#define RTL_ID BIT(23) /* TestChip ID, 1:Test(RLE); 0:MP(RL) */
@@ -931,12 +942,12 @@ Current IOREG MAP
#define HQSEL_HIQ BIT(5)
/* For normal driver, 0x10C */
-#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
-#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
-#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
-#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
-#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
-#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
+#define _TXDMA_HIQ_MAP(x) (((x) & 0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x) & 0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x) & 0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x) & 0x3) << 8)
+#define _TXDMA_VIQ_MAP(x) (((x) & 0x3) << 6)
+#define _TXDMA_VOQ_MAP(x) (((x) & 0x3) << 4)
#define QUEUE_LOW 1
#define QUEUE_NORMAL 2
@@ -1242,10 +1253,12 @@ Current IOREG MAP
/* 2REG_C2HEVT_CLEAR */
/* Set by driver and notify FW that the driver has read
- * the C2H command message */
+ * the C2H command message
+ */
#define C2H_EVT_HOST_CLOSE 0x00
/* Set by FW indicating that FW had set the C2H command
- * message and it's not yet read by driver. */
+ * message and it's not yet read by driver.
+ */
#define C2H_EVT_FW_CLOSE 0xFF
/* 2REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index f79feeb4e38f..2c026bf6fecb 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -97,13 +97,13 @@ enum RFINTFS {
};
/*
-Caller Mode: Infra, Ad-HoC(C)
-
-Notes: To disconnect the current associated BSS
-
-Command Mode
-
-*/
+ * Caller Mode: Infra, Ad-HoC(C)
+ *
+ * Notes: To disconnect the current associated BSS
+ *
+ * Command Mode
+ *
+ */
struct disconnect_parm {
u32 deauth_timeout_ms;
};
@@ -114,13 +114,13 @@ struct setopmode_parm {
};
/*
-Caller Mode: AP, Ad-HoC, Infra
-
-Notes: To ask RTL8711 performing site-survey
-
-Command-Event Mode
-
-*/
+ * Caller Mode: AP, Ad-HoC, Infra
+ *
+ * Notes: To ask RTL8711 performing site-survey
+ *
+ * Command-Event Mode
+ *
+ */
#define RTW_SSID_SCAN_AMOUNT 9 /* for WEXT_CSCAN_AMOUNT 9 */
#define RTW_CHANNEL_SCAN_AMOUNT (14+37)
@@ -133,13 +133,13 @@ struct sitesurvey_parm {
};
/*
-Caller Mode: Any
-
-Notes: To set the auth type of RTL8711. open/shared/802.1x
-
-Command Mode
-
-*/
+ * Caller Mode: Any
+ *
+ * Notes: To set the auth type of RTL8711. open/shared/802.1x
+ *
+ * Command Mode
+ *
+ */
struct setauth_parm {
u8 mode; /* 0: legacy open, 1: legacy shared 2: 802.1x */
u8 _1x; /* 0: PSK, 1: TLS */
@@ -147,40 +147,42 @@ struct setauth_parm {
};
/*
-Caller Mode: Infra
-
-a. algorithm: wep40, wep104, tkip & aes
-b. keytype: grp key/unicast key
-c. key contents
-
-when shared key ==> keyid is the camid
-when 802.1x ==> keyid [0:1] ==> grp key
-when 802.1x ==> keyid > 2 ==> unicast key
-
-*/
+ * Caller Mode: Infra
+ *
+ * a. algorithm: wep40, wep104, tkip & aes
+ * b. keytype: grp key/unicast key
+ * c. key contents
+ *
+ * when shared key ==> keyid is the camid
+ * when 802.1x ==> keyid [0:1] ==> grp key
+ * when 802.1x ==> keyid > 2 ==> unicast key
+ *
+ */
struct setkey_parm {
u8 algorithm; /* could be none, wep40, TKIP, CCMP, wep104 */
u8 keyid;
u8 grpkey; /* 1: this is the grpkey for 802.1x.
- * 0: this is the unicast key for 802.1x */
+ * 0: this is the unicast key for 802.1x
+ */
u8 set_tx; /* 1: main tx key for wep. 0: other key. */
u8 key[16]; /* this could be 40 or 104 */
};
/*
-When in AP or Ad-Hoc mode, this is used to
-allocate an sw/hw entry for a newly associated sta.
-
-Command
-
-when shared key ==> algorithm/keyid
-
-*/
+ * When in AP or Ad-Hoc mode, this is used to
+ * allocate an sw/hw entry for a newly associated sta.
+ *
+ * Command
+ *
+ * when shared key ==> algorithm/keyid
+ *
+ */
struct set_stakey_parm {
u8 addr[ETH_ALEN];
u8 algorithm;
u8 id;/* currently for erasing cam entry if
- * algorithm == _NO_PRIVACY_ */
+ * algorithm == _NO_PRIVACY_
+ */
u8 key[16];
};
@@ -191,15 +193,15 @@ struct set_stakey_rsp {
};
/*
-Caller Ad-Hoc/AP
-
-Command -Rsp(AID == CAMID) mode
-
-This is to force fw to add an sta_data entry per driver's request.
-
-FW will write an cam entry associated with it.
-
-*/
+ * Caller Ad-Hoc/AP
+ *
+ * Command -Rsp(AID == CAMID) mode
+ *
+ * This is to force fw to add an sta_data entry per driver's request.
+ *
+ * FW will write an cam entry associated with it.
+ *
+ */
struct set_assocsta_parm {
u8 addr[ETH_ALEN];
};
@@ -210,55 +212,57 @@ struct set_assocsta_rsp {
};
/*
- Notes: This command is used for H2C/C2H loopback testing
-
- mac[0] == 0
- ==> CMD mode, return H2C_SUCCESS.
- The following condition must be true under CMD mode
- mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
- s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
- s2 == (b1 << 8 | b0);
-
- mac[0] == 1
- ==> CMD_RSP mode, return H2C_SUCCESS_RSP
-
- The rsp layout shall be:
- rsp: parm:
- mac[0] = mac[5];
- mac[1] = mac[4];
- mac[2] = mac[3];
- mac[3] = mac[2];
- mac[4] = mac[1];
- mac[5] = mac[0];
- s0 = s1;
- s1 = swap16(s0);
- w0 = swap32(w1);
- b0 = b1
- s2 = s0 + s1
- b1 = b0
- w1 = w0
-
- mac[0] == 2
- ==> CMD_EVENT mode, return H2C_SUCCESS
- The event layout shall be:
- event: parm:
- mac[0] = mac[5];
- mac[1] = mac[4];
- mac[2] = event's seq no, starting from 1 to parm's marc[3]
- mac[3] = mac[2];
- mac[4] = mac[1];
- mac[5] = mac[0];
- s0 = swap16(s0) - event.mac[2];
- s1 = s1 + event.mac[2];
- w0 = swap32(w0);
- b0 = b1
- s2 = s0 + event.mac[2]
- b1 = b0
- w1 = swap32(w1) - event.mac[2];
-
- parm->mac[3] is the total event counts that host requested.
- event will be the same with the cmd's param.
-*/
+ * Notes: This command is used for H2C/C2H loopback testing
+ *
+ * mac[0] == 0
+ * ==> CMD mode, return H2C_SUCCESS.
+ * The following condition must be true under CMD mode
+ * mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
+ * s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
+ * s2 == (b1 << 8 | b0);
+ *
+ * mac[0] == 1
+ * ==> CMD_RSP mode, return H2C_SUCCESS_RSP
+ *
+ * The rsp layout shall be:
+ * rsp: parm:
+ * mac[0] = mac[5];
+ * mac[1] = mac[4];
+ * mac[2] = mac[3];
+ * mac[3] = mac[2];
+ * mac[4] = mac[1];
+ * mac[5] = mac[0];
+ * s0 = s1;
+ * s1 = swap16(s0);
+ * w0 = swap32(w1);
+ * b0 = b1
+ * s2 = s0 + s1
+ * b1 = b0
+ * w1 = w0
+ *
+ * mac[0] == 2
+ * ==> CMD_EVENT mode, return H2C_SUCCESS
+ * The event layout shall be:
+ * event: parm:
+ * mac[0] = mac[5];
+ * mac[1] = mac[4];
+ * mac[2] = event's seq no, starting from 1 to parm's marc[3]
+ * mac[2] = event's seq no, starting from 1 to parm's marc[3]
+ * mac[2] = event's seq no, starting from 1 to parm's marc[3]
+ * mac[3] = mac[2];
+ * mac[4] = mac[1];
+ * mac[5] = mac[0];
+ * s0 = swap16(s0) - event.mac[2];
+ * s1 = s1 + event.mac[2];
+ * w0 = swap32(w0);
+ * b0 = b1
+ * s2 = s0 + event.mac[2]
+ * b1 = b0
+ * w1 = swap32(w1) - event.mac[2];
+ *
+ * parm->mac[3] is the total event counts that host requested.
+ * event will be the same with the cmd's param.
+ */
/* CMD param Format for driver extra cmd handler */
struct drvextra_cmd_parm {
@@ -285,15 +289,15 @@ struct SetChannelPlan_param {
};
/*
-
-Result:
-0x00: success
-0x01: success, and check Response.
-0x02: cmd ignored due to duplicated sequcne number
-0x03: cmd dropped due to invalid cmd code
-0x04: reserved.
-
-*/
+ *
+ * Result:
+ * 0x00: success
+ * 0x01: success, and check Response.
+ * 0x02: cmd ignored due to duplicated sequcne number
+ * 0x03: cmd dropped due to invalid cmd code
+ * 0x04: reserved.
+ *
+ */
#define H2C_SUCCESS 0x00
#define H2C_SUCCESS_RSP 0x01
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index 9cc4b8c7c166..4873ba49900c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -106,9 +106,9 @@ extern u32 GlobalDebugLevel;
(((__i + 1) % 4) == 0) ? \
" " : " "); \
if (((__i + 1) % 16) == 0) \
- printk("\n"); \
+ pr_cont("\n"); \
} \
- printk("\n"); \
+ pr_cont("\n"); \
} \
} while (0)
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
index 5dd73841dd9e..11d1cb6de506 100644
--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -55,7 +55,8 @@
/* This variable is initiailzed through EEPROM or registry, */
/* however, its definition may be different with that in EEPROM for */
/* EEPROM size consideration. So, we have to perform proper translation
- * between them. */
+ * between them.
+ */
/* Besides, CustomerID of registry has precedence of that of EEPROM. */
/* defined below. 060703, by rcnjko. */
enum RT_CUSTOMER_ID {
@@ -79,7 +80,8 @@ enum RT_CUSTOMER_ID {
RT_CID_819x_Sitecom = 17,
RT_CID_CCX = 18, /* It's set under CCX logo test and isn't demanded
* for CCX functions, but for test behavior like retry
- * limit and tx report. By Bruce, 2009-02-17. */
+ * limit and tx report. By Bruce, 2009-02-17.
+ */
RT_CID_819x_Lenovo = 19,
RT_CID_819x_QMI = 20,
RT_CID_819x_Edimax_Belkin = 21,
@@ -89,7 +91,8 @@ enum RT_CUSTOMER_ID {
RT_CID_819x_Acer = 25,
RT_CID_819x_AzWave_ASUS = 26,
RT_CID_819x_AzWave = 27, /* For AzWave in PCIe,i
- * The ID is AzWave use and not only Asus */
+ * The ID is AzWave use and not only Asus
+ */
RT_CID_819x_HP = 28,
RT_CID_819x_WNC_COREGA = 29,
RT_CID_819x_Arcadyan_Belkin = 30,
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
index 1c5ebde97091..e798e794d962 100644
--- a/drivers/staging/rtl8188eu/include/rtw_event.h
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -22,42 +22,42 @@
#include <linux/sem.h>
/*
-Used to report a bss has been scanned
-*/
+ * Used to report a bss has been scanned
+ */
struct survey_event {
struct wlan_bssid_ex bss;
};
/*
-Used to report that the requested site survey has been done.
-
-bss_cnt indicates the number of bss that has been reported.
-
-
-*/
+ * Used to report that the requested site survey has been done.
+ *
+ * bss_cnt indicates the number of bss that has been reported.
+ *
+ *
+ */
struct surveydone_event {
unsigned int bss_cnt;
};
/*
-Used to report the link result of joinning the given bss
-
-
-join_res:
--1: authentication fail
--2: association fail
-> 0: TID
-
-*/
+ * Used to report the link result of joinning the given bss
+ *
+ *
+ * join_res:
+ * -1: authentication fail
+ * -2: association fail
+ * > 0: TID
+ *
+ */
struct joinbss_event {
struct wlan_network network;
};
/*
-Used to report a given STA has joinned the created BSS.
-It is used in AP/Ad-HoC(M) mode.
-*/
+ * Used to report a given STA has joinned the created BSS.
+ * It is used in AP/Ad-HoC(M) mode.
+ */
struct stassoc_event {
unsigned char macaddr[6];
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index 607d1ba56a46..884e1397755a 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -58,7 +58,8 @@ struct LED_871x {
enum LED_STATE_871x CurrLedState; /* Current LED state. */
enum LED_STATE_871x BlinkingLedState; /* Next state for blinking,
- * either RTW_LED_ON or RTW_LED_OFF are. */
+ * either RTW_LED_ON or RTW_LED_OFF are.
+ */
u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
@@ -75,7 +76,8 @@ struct LED_871x {
u8 bLedLinkBlinkInProgress;
u8 bLedScanBlinkInProgress;
struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
- * manipulate H/W to blink LED. */
+ * manipulate H/W to blink LED.
+ */
};
#define IS_LED_WPS_BLINKING(_LED_871x) \
@@ -91,7 +93,6 @@ struct led_priv {
/* add for led control */
};
-void BlinkTimerCallback(unsigned long data);
void BlinkWorkItemCallback(struct work_struct *work);
void ResetLedStatus(struct LED_871x *pLed);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 5c5d0ae8bdd1..e6d4175af3a2 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -70,25 +70,28 @@ enum rt_scan_type {
enum SCAN_RESULT_TYPE {
SCAN_RESULT_P2P_ONLY = 0, /* Will return all the P2P devices. */
SCAN_RESULT_ALL = 1, /* Will return all the scanned device,
- * include AP. */
+ * include AP.
+ */
SCAN_RESULT_WFD_TYPE = 2 /* Will just return the correct WFD
- * device. */
+ * device.
+ */
/* If this device is Miracast sink
* device, it will just return all the
- * Miracast source devices. */
+ * Miracast source devices.
+ */
};
/*
-there are several "locks" in mlme_priv,
-since mlme_priv is a shared resource between many threads,
-like ISR/Call-Back functions, the OID handlers, and even timer functions.
-
-Each _queue has its own locks, already.
-Other items are protected by mlme_priv.lock.
-
-To avoid possible dead lock, any thread trying to modifiying mlme_priv
-SHALL not lock up more than one lock at a time!
-*/
+ * there are several "locks" in mlme_priv,
+ * since mlme_priv is a shared resource between many threads,
+ * like ISR/Call-Back functions, the OID handlers, and even timer functions.
+ *
+ * Each _queue has its own locks, already.
+ * Other items are protected by mlme_priv.lock.
+ *
+ * To avoid possible dead lock, any thread trying to modifiying mlme_priv
+ * SHALL not lock up more than one lock at a time!
+ */
#define traffic_threshold 10
#define traffic_scan_period 500
@@ -102,9 +105,11 @@ struct rt_link_detect {
bool bRxBusyTraffic;
bool bHigherBusyTraffic; /* For interrupt migration purpose. */
bool bHigherBusyRxTraffic; /* We may disable Tx interrupt according
- * to Rx traffic. */
+ * to Rx traffic.
+ */
bool bHigherBusyTxTraffic; /* We may disable Tx interrupt according
- * to Tx traffic. */
+ * to Tx traffic.
+ */
};
struct mlme_priv {
@@ -164,7 +169,8 @@ struct mlme_priv {
#if defined(CONFIG_88EU_AP_MODE)
/* Number of associated Non-ERP stations (i.e., stations using 802.11b
- * in 802.11g BSS) */
+ * in 802.11g BSS)
+ */
int num_sta_non_erp;
/* Number of associated stations that do not support Short Slot Time */
@@ -325,10 +331,10 @@ void rtw_update_registrypriv_dev_network(struct adapter *adapter);
void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter);
-void _rtw_join_timeout_handler(unsigned long data);
-void rtw_scan_timeout_handler(unsigned long data);
+void _rtw_join_timeout_handler(struct timer_list *t);
+void rtw_scan_timeout_handler(struct timer_list *t);
-void rtw_dynamic_check_timer_handlder(unsigned long data);
+void rtw_dynamic_check_timer_handlder(struct timer_list *t);
#define rtw_is_scan_deny(adapter) false
#define rtw_clear_scan_deny(adapter) do {} while (0)
#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 1b1caaf583c9..118bf5509d97 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -101,9 +101,11 @@ extern unsigned char WMM_PARA_OUI[];
/* Channel Plan Type. */
/* Note: */
/* We just add new channel plan when the new channel plan is different
- * from any of the following channel plan. */
+ * from any of the following channel plan.
+ */
/* If you just want to customize the actions(scan period or join actions)
- * about one of the channel plan, */
+ * about one of the channel plan,
+ */
/* customize them in struct rt_channel_info in the RT_CHANNEL_LIST. */
enum RT_CHANNEL_DOMAIN {
/* old channel plan mapping ===== */
@@ -319,7 +321,8 @@ struct mlme_ext_info {
u32 authModeToggle;
u32 enc_algo;/* encrypt algorithm; */
u32 key_index; /* this is only valid for legacy wep,
- * 0~3 for key id. */
+ * 0~3 for key id.
+ */
u32 iv;
u8 chg_txt[128];
u16 aid;
@@ -353,16 +356,19 @@ struct mlme_ext_info {
struct HT_info_element HT_info;
struct wlan_bssid_ex network;/* join network or bss_network,
* if in ap mode, it is the same
- * as cur_network.network */
+ * as cur_network.network
+ */
struct FW_Sta_Info FW_sta_info[NUM_STA];
};
/* The channel information about this channel including joining,
- * scanning, and power constraints. */
+ * scanning, and power constraints.
+ */
struct rt_channel_info {
u8 ChannelNum; /* The channel number. */
enum rt_scan_type ScanType; /* Scan type such as passive
- * or active scan. */
+ * or active scan.
+ */
u32 rx_count;
};
@@ -413,7 +419,8 @@ struct mlme_ext_priv {
unsigned char cur_wireless_mode; /* NETWORK_TYPE */
unsigned char oper_channel; /* saved chan info when call
- * set_channel_bw */
+ * set_channel_bw
+ */
unsigned char oper_bwmode;
unsigned char oper_ch_offset;/* PRIME_CHNL_OFFSET */
@@ -427,7 +434,8 @@ struct mlme_ext_priv {
struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including
* current scan/connecting/connected
* related info. For ap mode,
- * network includes ap's cap_info*/
+ * network includes ap's cap_info
+ */
struct timer_list survey_timer;
struct timer_list link_timer;
u16 chan_scan_time;
@@ -572,9 +580,9 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter,
void linked_status_chk(struct adapter *padapter);
-void survey_timer_hdl(unsigned long data);
-void link_timer_hdl(unsigned long data);
-void addba_timer_hdl(unsigned long data);
+void survey_timer_hdl(struct timer_list *t);
+void link_timer_hdl(struct timer_list *t);
+void addba_timer_hdl(struct timer_list *t);
#define set_survey_timer(mlmeext, ms) \
mod_timer(&mlmeext->survey_timer, jiffies + \
@@ -690,7 +698,8 @@ enum rtw_c2h_event {
_C2HBCN_EVT_,
_ReportPwrState_EVT_, /* filen: only for PCIE, USB */
_CloseRF_EVT_, /* filen: only for PCIE,
- * work around ASPM */
+ * work around ASPM
+ */
MAX_C2HEVT
};
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
index 4872a21b3103..aa353aefed3d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -521,14 +521,16 @@
#define bCCKRxPhase 0x4
#if (RTL92SE_FPGA_VERIFY == 1)
#define bLSSIReadAddress 0x3f000000 /* LSSI "Read" Address
- Reg 0x824 rFPGA0_XA_HSSIParameter2 */
+ * Reg 0x824 rFPGA0_XA_HSSIParameter2
+ */
#else
#define bLSSIReadAddress 0x7f800000 /* T65 RF */
#endif
#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
#if (RTL92SE_FPGA_VERIFY == 1)
#define bLSSIReadBackData 0xfff /* Reg 0x8a0
- rFPGA0_XA_LSSIReadBack */
+ * rFPGA0_XA_LSSIReadBack
+ */
#else
#define bLSSIReadBackData 0xfffff /* T65 RF */
#endif
@@ -548,13 +550,16 @@
#define bDA6Swing 0x380000
#define bADClkPhase 0x4000000 /* Reg 0x880
- rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
+ * rFPGA0_AnalogParameter1 20/40 CCK
+ * support switch 40/80 BB MHZ
+ */
#define b80MClkDelay 0x18000000 /* Useless */
#define bAFEWatchDogEnable 0x20000000
#define bXtalCap01 0xc0000000 /* Reg 0x884
- rFPGA0_AnalogParameter2 Crystal cap */
+ * rFPGA0_AnalogParameter2 Crystal cap
+ */
#define bXtalCap23 0x3
#define bXtalCap92x 0x0f000000
#define bXtalCap 0x0f000000
@@ -598,7 +603,8 @@
#define bCCKTxOn 0x1
#define bOFDMTxOn 0x2
#define bDebugPage 0xfff /* reset debug page and HWord,
- * LWord */
+ * LWord
+ */
#define bDebugItem 0xff /* reset debug page and LWord */
#define bAntL 0x10
#define bAntNonHT 0x100
@@ -1071,7 +1077,8 @@
#define RCR_EnCS1 BIT(29) /* enable carrier sense method 1 */
#define RCR_EnCS2 BIT(30) /* enable carrier sense method 2 */
#define RCR_OnlyErlPkt BIT(31) /* Rx Early mode is performed for
- * packet size greater than 1536 */
+ * packet size greater than 1536
+ */
/*--------------------------Define Parameters-------------------------------*/
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index 18a9e744fcbe..f39e90cfc031 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -51,11 +51,11 @@ enum power_mgnt {
};
/*
- BIT[2:0] = HW state
- BIT[3] = Protocol PS state, 0: register active state,
- 1: register sleep state
- BIT[4] = sub-state
-*/
+ * BIT[2:0] = HW state
+ * BIT[3] = Protocol PS state, 0: register active state,
+ * 1: register sleep state
+ * BIT[4] = sub-state
+ */
#define PS_DPS BIT(0)
#define PS_LCLK (PS_DPS)
@@ -115,9 +115,11 @@ enum rt_rf_power_state {
#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /* FW free, re-download the FW*/
#define RT_RF_OFF_LEVL_FW_32K BIT(5) /* FW in 32k */
#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) /* Always enable ASPM and Clock
- * Req in initialization. */
+ * Req in initialization.
+ */
#define RT_RF_LPS_DISALBE_2R BIT(30) /* When LPS is on, disable 2R
- * if no packet is RX or TX. */
+ * if no packet is RX or TX.
+ */
#define RT_RF_LPS_LEVEL_ASPM BIT(31) /* LPS with ASPM */
#define RT_IN_PS_LEVEL(ppsc, _PS_FLAG) \
@@ -145,7 +147,8 @@ struct pwrctrl_priv {
struct mutex mutex_lock;
volatile u8 rpwm; /* requested power state for fw */
volatile u8 cpwm; /* fw current power state. updated when
- * 1. read from HCPWM 2. driver lowers power level */
+ * 1. read from HCPWM 2. driver lowers power level
+ */
volatile u8 tog; /* toggling */
volatile u8 cpwm_tog; /* toggling */
@@ -170,7 +173,8 @@ struct pwrctrl_priv {
u8 ips_mode;
u8 ips_mode_req; /* used to accept the mode setting request,
- * will update to ipsmode later */
+ * will update to ipsmode later
+ */
uint bips_processing;
unsigned long ips_deny_time; /* will deny IPS when system time less than this */
u8 ps_processing; /* temp used to mark whether in rtw_ps_processor */
diff --git a/drivers/staging/rtl8188eu/include/rtw_qos.h b/drivers/staging/rtl8188eu/include/rtw_qos.h
index 45a77f6f8427..576dff68d0dc 100644
--- a/drivers/staging/rtl8188eu/include/rtw_qos.h
+++ b/drivers/staging/rtl8188eu/include/rtw_qos.h
@@ -19,7 +19,8 @@
struct qos_priv {
unsigned int qos_option; /* bit mask option: u-apsd,
- * s-apsd, ts, block ack... */
+ * s-apsd, ts, block ack...
+ */
};
#endif /* _RTL871X_QOS_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index 121150860450..7e85f700acb3 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -46,23 +46,23 @@ struct recv_reorder_ctrl {
struct stainfo_rxcache {
u16 tid_rxseq[16];
/*
- unsigned short tid0_rxseq;
- unsigned short tid1_rxseq;
- unsigned short tid2_rxseq;
- unsigned short tid3_rxseq;
- unsigned short tid4_rxseq;
- unsigned short tid5_rxseq;
- unsigned short tid6_rxseq;
- unsigned short tid7_rxseq;
- unsigned short tid8_rxseq;
- unsigned short tid9_rxseq;
- unsigned short tid10_rxseq;
- unsigned short tid11_rxseq;
- unsigned short tid12_rxseq;
- unsigned short tid13_rxseq;
- unsigned short tid14_rxseq;
- unsigned short tid15_rxseq;
-*/
+ * unsigned short tid0_rxseq;
+ * unsigned short tid1_rxseq;
+ * unsigned short tid2_rxseq;
+ * unsigned short tid3_rxseq;
+ * unsigned short tid4_rxseq;
+ * unsigned short tid5_rxseq;
+ * unsigned short tid6_rxseq;
+ * unsigned short tid7_rxseq;
+ * unsigned short tid8_rxseq;
+ * unsigned short tid9_rxseq;
+ * unsigned short tid10_rxseq;
+ * unsigned short tid11_rxseq;
+ * unsigned short tid12_rxseq;
+ * unsigned short tid13_rxseq;
+ * unsigned short tid14_rxseq;
+ * unsigned short tid15_rxseq;
+ */
};
struct signal_stat {
@@ -79,7 +79,8 @@ struct phy_info {
u8 RxMIMOSignalStrength[MAX_PATH_NUM_92CS];/* in 0~100 index */
s8 RxPower; /* in dBm Translate from PWdB */
/* Real power in dBm for this packet, no beautification and aggregation.
- * Keep this raw info to be used for the other procedures. */
+ * Keep this raw info to be used for the other procedures.
+ */
s8 recvpower;
u8 BTRxRSSIPercentage;
u8 SignalStrength; /* in 0-100 index. */
@@ -106,7 +107,8 @@ struct rx_pkt_attrib {
u8 privacy; /* in frame_ctrl field */
u8 bdecrypted;
u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
- * indicate the encrypt algorithm */
+ * indicate the encrypt algorithm
+ */
u8 iv_len;
u8 icv_len;
u8 crc_err;
@@ -152,12 +154,12 @@ struct recv_stat {
};
/*
-accesser of recv_priv: rtw_recv_entry(dispatch / passive level);
-recv_thread(passive) ; returnpkt(dispatch)
-; halt(passive) ;
-
-using enter_critical section to protect
-*/
+ * accesser of recv_priv: rtw_recv_entry(dispatch / passive level);
+ * recv_thread(passive) ; returnpkt(dispatch)
+ * ; halt(passive) ;
+ *
+ * using enter_critical section to protect
+ */
struct recv_priv {
struct __queue free_recv_queue;
struct __queue recv_pending_queue;
@@ -209,20 +211,20 @@ struct recv_buf {
};
/*
- head ----->
-
- data ----->
-
- payload
-
- tail ----->
-
-
- end ----->
-
- len = (unsigned int )(tail - data);
-
-*/
+ * head ----->
+ *
+ * data ----->
+ *
+ * payload
+ *
+ * tail ----->
+ *
+ *
+ * end ----->
+ *
+ * len = (unsigned int )(tail - data);
+ *
+ */
struct recv_frame {
struct list_head list;
struct sk_buff *pkt;
@@ -247,7 +249,7 @@ void rtw_free_recvframe_queue(struct __queue *pframequeue,
struct __queue *pfree_recv_queue);
u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
-void rtw_reordering_ctrl_timeout_handler(unsigned long data);
+void rtw_reordering_ctrl_timeout_handler(struct timer_list *t);
static inline s32 translate_percentage_to_dbm(u32 sig_stren_index)
{
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index 66896af02042..0718a29e7c9d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -31,7 +31,8 @@
#define RTL8711_RF_DEF_SENS 4
/* We now define the following channels as the max channels in each
- * channel plan. */
+ * channel plan.
+ */
/* 2G, total 14 chnls */
/* {1,2,3,4,5,6,7,8,9,10,11,12,13,14} */
#define MAX_CHANNEL_NUM_2G 14
@@ -65,7 +66,8 @@ enum capability {
cChannelAgility = 0x0080,
cSpectrumMgnt = 0x0100,
cQos = 0x0200, /* For HCCA, use with CF-Pollable
- * and CF-PollReq */
+ * and CF-PollReq
+ */
cShortSlotTime = 0x0400,
cAPSD = 0x0800,
cRM = 0x1000, /* RRM (Radio Request Measurement) */
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index 74fe664787e5..a0c6cf706218 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -100,20 +100,26 @@ struct rt_pmkid_list {
struct security_priv {
u32 dot11AuthAlgrthm; /* 802.11 auth, could be open,
- * shared, 8021x and authswitch */
+ * shared, 8021x and authswitch
+ */
u32 dot11PrivacyAlgrthm; /* This specify the privacy for
- * shared auth. algorithm. */
+ * shared auth. algorithm.
+ */
/* WEP */
u32 dot11PrivacyKeyIndex; /* this is only valid for legendary
- * wep, 0~3 for key id.(tx key index) */
+ * wep, 0~3 for key id.(tx key index)
+ */
union Keytype dot11DefKey[4]; /* this is only valid for def. key */
u32 dot11DefKeylen[4];
u32 dot118021XGrpPrivacy; /* This specify the privacy algthm.
- * used for Grp key */
+ * used for Grp key
+ */
u32 dot118021XGrpKeyid; /* key id used for Grp Key
- * ( tx key index) */
+ * ( tx key index)
+ */
union Keytype dot118021XGrpKey[4]; /* 802.1x Group Key,
- * for inx0 and inx1 */
+ * for inx0 and inx1
+ */
union Keytype dot118021XGrptxmickey[4];
union Keytype dot118021XGrprxmickey[4];
union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit.*/
@@ -134,10 +140,12 @@ struct security_priv {
u8 bcheck_grpkey;
u8 bgrpkey_handshake;
s32 hw_decrypted;/* if the rx packets is hw_decrypted==false,i
- * it means the hw has not been ready. */
+ * it means the hw has not been ready.
+ */
/* keeps the auth_type & enc_status from upper layer
- * ioctl(wpa_supplicant or wzc) */
+ * ioctl(wpa_supplicant or wzc)
+ */
u32 ndisauthtype; /* NDIS_802_11_AUTHENTICATION_MODE */
u32 ndisencryptstatus; /* NDIS_802_11_ENCRYPTION_STATUS */
struct wlan_bssid_ex sec_bss; /* for joinbss (h2c buffer) usage */
@@ -256,7 +264,8 @@ static inline u32 rotr(u32 val, int bits)
/* ===== start - public domain SHA256 implementation ===== */
/* This is based on SHA256 implementation in LibTomCrypt that was released into
- * public domain by Tom St Denis. */
+ * public domain by Tom St Denis.
+ */
/* the K array */
static const unsigned long K[64] = {
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index dd6b7a9a8d4a..b4b3d13ace9e 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -115,11 +115,13 @@ struct pkt_attrib {
u16 seqnum;
u16 hdrlen; /* the WLAN Header Len */
u32 pktlen; /* the original 802.3 pkt raw_data len (not include
- * ether_hdr data) */
+ * ether_hdr data)
+ */
u32 last_txcmdsz;
u8 nr_frags;
u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
- * indicate the encrypt algorith */
+ * indicate the encrypt algorith
+ */
u8 iv_len;
u8 icv_len;
u8 iv[18];
@@ -234,7 +236,8 @@ struct sta_xmit_priv {
spinlock_t lock;
int option;
int apsd_setting; /* When bit mask is on, the associated edca
- * queue supports APSD. */
+ * queue supports APSD.
+ */
struct tx_servq be_q; /* priority == 0,3 */
struct tx_servq bk_q; /* priority == 1,2 */
struct tx_servq vi_q; /* priority == 4,5 */
@@ -280,7 +283,8 @@ struct xmit_priv {
u8 hwxmit_entry;
u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength
* from large to small. it's value is 0->vo,
- * 1->vi, 2->be, 3->bk. */
+ * 1->vi, 2->be, 3->bk.
+ */
u8 txirp_cnt;/* */
struct tasklet_struct xmit_tasklet;
/* per AC pending irp */
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index 42a035123365..8f01deed6e4a 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -126,7 +126,8 @@ struct sta_info {
/* Notes: */
/* STA_Mode: */
/* curr_network(mlme_priv/security_priv/qos/ht) +
- * sta_info: (STA & AP) CAP/INFO */
+ * sta_info: (STA & AP) CAP/INFO
+ */
/* scan_q: AP CAP/INFO */
/* AP_Mode: */
@@ -184,7 +185,8 @@ struct sta_info {
/* ================ODM Relative Info======================= */
/* Please be careful, don't declare too much structure here.
- * It will cost memory * STA support num. */
+ * It will cost memory * STA support num.
+ */
/* 2011/10/20 MH Add for ODM STA info. */
/* Driver Write */
u8 bValid; /* record the sta status link or not? */
@@ -318,9 +320,11 @@ struct sta_priv {
struct sta_info *sta_aid[NUM_STA];
u16 sta_dz_bitmap;/* only support 15 stations, station aid bitmap
- * for sleeping sta. */
+ * for sleeping sta.
+ */
u16 tim_bitmap; /* only support 15 stations, aid=0~15 mapping
- * bit0~bit15 */
+ * bit0~bit15
+ */
u16 max_num_sta;
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index cb46d353327b..084a246eec19 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -35,7 +35,8 @@
/* This value is tested by WiFi 11n Test Plan 5.2.3. */
/* This test verifies the WLAN NIC can update the NAV through sending
- * the CTS with large duration. */
+ * the CTS with large duration.
+ */
#define WiFiNavUpperUs 30000 /* 30 ms */
enum WIFI_FRAME_TYPE {
@@ -459,14 +460,14 @@ static inline int IsFrameTypeCtrl(unsigned char *pframe)
#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
/*
-#define _NO_PRIVACY_ 0
-#define _WEP_40_PRIVACY_ 1
-#define _TKIP_PRIVACY_ 2
-#define _WRAP_PRIVACY_ 3
-#define _CCMP_PRIVACY_ 4
-#define _WEP_104_PRIVACY_ 5
-#define _WEP_WPA_MIXED_PRIVACY_ 6 WEP + WPA
-*/
+ * #define _NO_PRIVACY_ 0
+ * #define _WEP_40_PRIVACY_ 1
+ * #define _TKIP_PRIVACY_ 2
+ * #define _WRAP_PRIVACY_ 3
+ * #define _CCMP_PRIVACY_ 4
+ * #define _WEP_104_PRIVACY_ 5
+ * #define _WEP_WPA_MIXED_PRIVACY_ 6 WEP + WPA
+ */
/*-----------------------------------------------------------------------------
Below is the definition for WMM
@@ -771,10 +772,12 @@ enum ht_cap_ampdu_factor {
#define P2P_PROVISIONING_SCAN_CNT 3
/* default value, used when: (1)p2p disabled or (2)p2p enabled
- * but only do 1 scan phase */
+ * but only do 1 scan phase
+ */
#define P2P_FINDPHASE_EX_NONE 0
/* used when p2p enabled and want to do 1 scan phase and
- * P2P_FINDPHASE_EX_MAX-1 find phase */
+ * P2P_FINDPHASE_EX_MAX-1 find phase
+ */
#define P2P_FINDPHASE_EX_FULL 1
#define P2P_FINDPHASE_EX_SOCIAL_FIRST (P2P_FINDPHASE_EX_FULL+1)
#define P2P_FINDPHASE_EX_MAX 4
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index e1931dd04da0..d7b25d2f933a 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -86,7 +86,8 @@ struct ndis_802_11_var_ie {
* + NDIS_802_11_LENGTH_RATES_EX + IELength
*
* Except the IELength, all other fields are fixed length.
- * Therefore, we can define a macro to represent the partial sum. */
+ * Therefore, we can define a macro to represent the partial sum.
+ */
enum ndis_802_11_auth_mode {
Ndis802_11AuthModeOpen,
@@ -130,7 +131,8 @@ enum ndis_802_11_reload_def {
struct ndis_802_11_wep {
u32 Length; /* Length of this structure */
u32 KeyIndex; /* 0 is the per-client key,
- * 1-N are the global keys */
+ * 1-N are the global keys
+ */
u32 KeyLength; /* length of key in bytes */
u8 KeyMaterial[16];/* variable len depending on above field */
};
@@ -140,7 +142,8 @@ enum ndis_802_11_status_type {
Ndis802_11StatusType_MediaStreamMode,
Ndis802_11StatusType_PMKID_CandidateList,
Ndis802_11StatusTypeMax /* not a real type, defined as
- * an upper bound */
+ * an upper bound
+ */
};
/* mask for authentication/integrity fields */
@@ -166,7 +169,8 @@ struct wlan_phy_info {
struct wlan_bcn_info {
/* these infor get from rtw_get_encrypt_info when
- * * translate scan to UI */
+ * * translate scan to UI
+ */
u8 encryp_protocol;/* ENCRYP_PROTOCOL_E: OPEN/WEP/WPA/WPA2/WAPI */
int group_cipher; /* WPA/WPA2 group cipher */
int pairwise_cipher;/* WPA/WPA2/WEP pairwise cipher */
@@ -178,8 +182,8 @@ struct wlan_bcn_info {
};
/* temporally add #pragma pack for structure alignment issue of
-* struct wlan_bssid_ex and get_struct wlan_bssid_ex_sz()
-*/
+ * struct wlan_bssid_ex and get_struct wlan_bssid_ex_sz()
+ */
struct wlan_bssid_ex {
u32 Length;
unsigned char MacAddress[ETH_ALEN];
@@ -194,7 +198,8 @@ struct wlan_bssid_ex {
struct wlan_phy_info PhyInfo;
u32 IELength;
u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and
- * capability information) */
+ * capability information)
+ */
} __packed;
static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
@@ -206,7 +211,8 @@ struct wlan_network {
struct list_head list;
int network_type; /* refer to ieee80211.h for WIRELESS_11A/B/G */
int fixed; /* set fixed when not to be removed
- * in site-surveying */
+ * in site-surveying
+ */
unsigned long last_scanned; /* timestamp for the network */
int aid; /* will only be valid when a BSS is joinned. */
int join_res;
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index bc756267c7fc..831c1ecc5e28 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -24,12 +24,10 @@ void rtw_init_mlme_timer(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- setup_timer(&pmlmepriv->assoc_timer, _rtw_join_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->dynamic_chk_timer,
- rtw_dynamic_check_timer_handlder, (unsigned long)padapter);
+ timer_setup(&pmlmepriv->assoc_timer, _rtw_join_timeout_handler, 0);
+ timer_setup(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler, 0);
+ timer_setup(&pmlmepriv->dynamic_chk_timer,
+ rtw_dynamic_check_timer_handlder, 0);
}
void rtw_os_indicate_connect(struct adapter *adapter)
@@ -125,18 +123,15 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
{
- setup_timer(&psta->addba_retry_timer, addba_timer_hdl,
- (unsigned long)psta);
+ timer_setup(&psta->addba_retry_timer, addba_timer_hdl, 0);
}
void init_mlme_ext_timer(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- setup_timer(&pmlmeext->survey_timer, survey_timer_hdl,
- (unsigned long)padapter);
- setup_timer(&pmlmeext->link_timer, link_timer_hdl,
- (unsigned long)padapter);
+ timer_setup(&pmlmeext->survey_timer, survey_timer_hdl, 0);
+ timer_setup(&pmlmeext->link_timer, link_timer_hdl, 0);
}
#ifdef CONFIG_88EU_AP_MODE
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index 37fd52d7364f..225c23fc69dc 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -66,34 +66,6 @@ static void mon_recv_decrypted(struct net_device *dev, const u8 *data,
netif_rx(skb);
}
-static void mon_recv_decrypted_recv(struct net_device *dev, const u8 *data,
- int data_len)
-{
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- int hdr_len;
-
- skb = netdev_alloc_skb(dev, data_len);
- if (!skb)
- return;
- memcpy(skb_put(skb, data_len), data, data_len);
-
- /*
- * Frame data is not encrypted. Strip off protection so
- * userspace doesn't think that it is.
- */
-
- hdr = (struct ieee80211_hdr *)skb->data;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- if (ieee80211_has_protected(hdr->frame_control))
- hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_PROTECTED);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
-}
-
static void mon_recv_encrypted(struct net_device *dev, const u8 *data,
int data_len)
{
@@ -110,6 +82,7 @@ static void mon_recv_encrypted(struct net_device *dev, const u8 *data,
void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame)
{
struct rx_pkt_attrib *attr;
+ int iv_len, icv_len;
int data_len;
u8 *data;
@@ -122,8 +95,11 @@ void rtl88eu_mon_recv_hook(struct net_device *dev, struct recv_frame *frame)
data = frame->pkt->data;
data_len = frame->pkt->len;
+ /* Broadcast and multicast frames don't have attr->{iv,icv}_len set */
+ SET_ICE_IV_LEN(iv_len, icv_len, attr->encrypt);
+
if (attr->bdecrypted)
- mon_recv_decrypted_recv(dev, data, data_len);
+ mon_recv_decrypted(dev, data, data_len, iv_len, icv_len);
else
mon_recv_encrypted(dev, data, data_len);
}
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index d14bc2b68d98..bda4ab879f58 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -155,7 +155,6 @@ _recv_indicatepkt_drop:
void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
- setup_timer(&preorder_ctrl->reordering_ctrl_timer,
- rtw_reordering_ctrl_timeout_handler,
- (unsigned long)preorder_ctrl);
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+ rtw_reordering_ctrl_timeout_handler, 0);
}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 64397b6f1248..7e75030475f7 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -407,6 +407,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
case -ENODEV:
case -ESHUTDOWN:
adapt->bSurpriseRemoved = true;
+ /* fall through */
case -ENOENT:
adapt->bDriverStopped = true;
RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete:bDriverStopped=true\n"));
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index 7101fcc8871b..6af519938868 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rtllib-objs := \
dot11d.o \
rtllib_module.o \
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 017fe04ebe2d..88f89d77b511 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -128,12 +128,16 @@ void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
pTriple = (struct chnl_txpow_triple *)(pCoutryIe + 3);
for (i = 0; i < NumTriples; i++) {
if (MaxChnlNum >= pTriple->FirstChnl) {
- netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
+ netdev_info(dev->dev,
+ "%s: Invalid country IE, skip it......1\n",
+ __func__);
return;
}
if (MAX_CHANNEL_NUMBER < (pTriple->FirstChnl +
pTriple->NumChnls)) {
- netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
+ netdev_info(dev->dev,
+ "%s: Invalid country IE, skip it......2\n",
+ __func__);
return;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/Makefile b/drivers/staging/rtl8192e/rtl8192e/Makefile
index 176a4a2b8b20..75e6ec510555 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/rtl8192e/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
r8192e_pci-objs := \
r8192E_dev.o \
r8192E_phy.o \
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index aca52654825b..d2605158546b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -85,7 +85,7 @@ static struct pci_driver rtl8192_pci_driver = {
static short _rtl92e_is_tx_queue_empty(struct net_device *dev);
static void _rtl92e_watchdog_wq_cb(void *data);
-static void _rtl92e_watchdog_timer_cb(unsigned long data);
+static void _rtl92e_watchdog_timer_cb(struct timer_list *t);
static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
int rate);
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -766,12 +766,12 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
priv->bfirst_init = false;
if (priv->polling_timer_on == 0)
- rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
+ rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
if (priv->rtllib->state != RTLLIB_LINKED)
rtllib_softmac_start_protocol(priv->rtllib, 0);
rtllib_reset_queue(priv->rtllib);
- _rtl92e_watchdog_timer_cb((unsigned long)dev);
+ _rtl92e_watchdog_timer_cb(&priv->watch_dog_timer);
if (!netif_queue_stopped(dev))
netif_start_queue(dev);
@@ -1075,13 +1075,10 @@ static short _rtl92e_init(struct net_device *dev)
rtl92e_dm_init(dev);
- setup_timer(&priv->watch_dog_timer,
- _rtl92e_watchdog_timer_cb,
- (unsigned long)dev);
+ timer_setup(&priv->watch_dog_timer, _rtl92e_watchdog_timer_cb, 0);
- setup_timer(&priv->gpio_polling_timer,
- rtl92e_check_rfctrl_gpio_timer,
- (unsigned long)dev);
+ timer_setup(&priv->gpio_polling_timer, rtl92e_check_rfctrl_gpio_timer,
+ 0);
rtl92e_irq_disable(dev);
if (request_irq(dev->irq, _rtl92e_irq, IRQF_SHARED, dev->name, dev)) {
@@ -1531,9 +1528,9 @@ static void _rtl92e_watchdog_wq_cb(void *data)
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
}
-static void _rtl92e_watchdog_timer_cb(unsigned long data)
+static void _rtl92e_watchdog_timer_cb(struct timer_list *t)
{
- struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, watch_dog_timer);
schedule_delayed_work(&priv->watch_dog_wq, 0);
mod_timer(&priv->watch_dog_timer, jiffies +
@@ -2535,7 +2532,7 @@ static int _rtl92e_pci_probe(struct pci_dev *pdev,
RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
if (priv->polling_timer_on == 0)
- rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
+ rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
RT_TRACE(COMP_INIT, "Driver probe completed\n");
return 0;
@@ -2648,9 +2645,9 @@ bool rtl92e_disable_nic(struct net_device *dev)
module_pci_driver(rtl8192_pci_driver);
-void rtl92e_check_rfctrl_gpio_timer(unsigned long data)
+void rtl92e_check_rfctrl_gpio_timer(struct timer_list *t)
{
- struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, gpio_polling_timer);
priv->polling_timer_on = 1;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 9d3089cb6a5a..866fe4d4cb28 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -587,7 +587,7 @@ void rtl92e_tx_enable(struct net_device *);
void rtl92e_hw_sleep_wq(void *data);
void rtl92e_commit(struct net_device *dev);
-void rtl92e_check_rfctrl_gpio_timer(unsigned long data);
+void rtl92e_check_rfctrl_gpio_timer(struct timer_list *t);
void rtl92e_hw_wakeup_wq(void *data);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index b8205ebafd72..9bf95bd0ad13 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -196,7 +196,7 @@ static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev);
static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev);
static void _rtl92e_dm_check_fsync(struct net_device *dev);
static void _rtl92e_dm_check_rf_ctrl_gpio(void *data);
-static void _rtl92e_dm_fsync_timer_callback(unsigned long data);
+static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t);
/*---------------------Define local function prototype-----------------------*/
@@ -2125,8 +2125,7 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev)
priv->rtllib->fsync_state = Default_Fsync;
priv->framesyncMonitor = 1;
- setup_timer(&priv->fsync_timer, _rtl92e_dm_fsync_timer_callback,
- (unsigned long)dev);
+ timer_setup(&priv->fsync_timer, _rtl92e_dm_fsync_timer_callback, 0);
}
@@ -2137,10 +2136,10 @@ static void _rtl92e_dm_deinit_fsync(struct net_device *dev)
del_timer_sync(&priv->fsync_timer);
}
-static void _rtl92e_dm_fsync_timer_callback(unsigned long data)
+static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
+ struct net_device *dev = priv->rtllib->dev;
u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
bool bDoubleTimeInterval = false;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 3e3273d3e043..81a68b0b4a7f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -91,7 +91,7 @@ int rtl92e_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D0, 0);
if (priv->polling_timer_on == 0)
- rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
+ rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
if (!netif_running(dev)) {
netdev_info(dev,
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 1720e1b6ae04..eb6d841f7c45 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -528,18 +528,20 @@ void TsInitDelBA(struct rtllib_device *ieee,
}
}
-void BaSetupTimeOut(unsigned long data)
+void BaSetupTimeOut(struct timer_list *t)
{
- struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
+ struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
+ TxPendingBARecord.Timer);
pTxTs->bAddBaReqInProgress = false;
pTxTs->bAddBaReqDelayed = true;
pTxTs->TxPendingBARecord.bValid = false;
}
-void TxBaInactTimeout(unsigned long data)
+void TxBaInactTimeout(struct timer_list *t)
{
- struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
+ struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
+ TxAdmittedBARecord.Timer);
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[pTxTs->num]);
TxTsDeleteBA(ieee, pTxTs);
@@ -548,9 +550,10 @@ void TxBaInactTimeout(unsigned long data)
DELBA_REASON_TIMEOUT);
}
-void RxBaInactTimeout(unsigned long data)
+void RxBaInactTimeout(struct timer_list *t)
{
- struct rx_ts_record *pRxTs = (struct rx_ts_record *)data;
+ struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
+ RxAdmittedBARecord.Timer);
struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
RxTsRecord[pRxTs->num]);
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index dcc4eb691889..f839d2447b85 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -16,17 +16,18 @@
#include <linux/etherdevice.h>
#include "rtl819x_TS.h"
-static void TsSetupTimeOut(unsigned long data)
+static void TsSetupTimeOut(struct timer_list *unused)
{
}
-static void TsInactTimeout(unsigned long data)
+static void TsInactTimeout(struct timer_list *unused)
{
}
-static void RxPktPendingTimeout(unsigned long data)
+static void RxPktPendingTimeout(struct timer_list *t)
{
- struct rx_ts_record *pRxTs = (struct rx_ts_record *)data;
+ struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
+ RxPktPendingTimer);
struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
RxTsRecord[pRxTs->num]);
@@ -96,9 +97,9 @@ static void RxPktPendingTimeout(unsigned long data)
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
-static void TsAddBaProcess(unsigned long data)
+static void TsAddBaProcess(struct timer_list *t)
{
- struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
+ struct tx_ts_record *pTxTs = from_timer(pTxTs, t, TsAddBaTimer);
u8 num = pTxTs->num;
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[num]);
@@ -150,24 +151,18 @@ void TSInitialize(struct rtllib_device *ieee)
for (count = 0; count < TOTAL_TS_NUM; count++) {
pTxTS->num = count;
- setup_timer(&pTxTS->TsCommonInfo.SetupTimer,
- TsSetupTimeOut,
- (unsigned long) pTxTS);
+ timer_setup(&pTxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ 0);
- setup_timer(&pTxTS->TsCommonInfo.InactTimer,
- TsInactTimeout,
- (unsigned long) pTxTS);
+ timer_setup(&pTxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ 0);
- setup_timer(&pTxTS->TsAddBaTimer,
- TsAddBaProcess,
- (unsigned long) pTxTS);
+ timer_setup(&pTxTS->TsAddBaTimer, TsAddBaProcess, 0);
- setup_timer(&pTxTS->TxPendingBARecord.Timer,
- BaSetupTimeOut,
- (unsigned long) pTxTS);
- setup_timer(&pTxTS->TxAdmittedBARecord.Timer,
- TxBaInactTimeout,
- (unsigned long) pTxTS);
+ timer_setup(&pTxTS->TxPendingBARecord.Timer, BaSetupTimeOut,
+ 0);
+ timer_setup(&pTxTS->TxAdmittedBARecord.Timer,
+ TxBaInactTimeout, 0);
ResetTxTsEntry(pTxTS);
list_add_tail(&pTxTS->TsCommonInfo.List,
@@ -182,21 +177,16 @@ void TSInitialize(struct rtllib_device *ieee)
pRxTS->num = count;
INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
- setup_timer(&pRxTS->TsCommonInfo.SetupTimer,
- TsSetupTimeOut,
- (unsigned long) pRxTS);
+ timer_setup(&pRxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ 0);
- setup_timer(&pRxTS->TsCommonInfo.InactTimer,
- TsInactTimeout,
- (unsigned long) pRxTS);
+ timer_setup(&pRxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ 0);
- setup_timer(&pRxTS->RxAdmittedBARecord.Timer,
- RxBaInactTimeout,
- (unsigned long) pRxTS);
+ timer_setup(&pRxTS->RxAdmittedBARecord.Timer,
+ RxBaInactTimeout, 0);
- setup_timer(&pRxTS->RxPktPendingTimer,
- RxPktPendingTimeout,
- (unsigned long) pRxTS);
+ timer_setup(&pRxTS->RxPktPendingTimer, RxPktPendingTimeout, 0);
ResetRxTsEntry(pRxTS);
list_add_tail(&pRxTS->TsCommonInfo.List,
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 0042a0f6cf79..c01474a6db1e 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -2113,9 +2113,9 @@ void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
void TsInitDelBA(struct rtllib_device *ieee,
struct ts_common_info *pTsCommonInfo,
enum tr_select TxRxSelect);
-void BaSetupTimeOut(unsigned long data);
-void TxBaInactTimeout(unsigned long data);
-void RxBaInactTimeout(unsigned long data);
+void BaSetupTimeOut(struct timer_list *t);
+void TxBaInactTimeout(struct timer_list *t);
+void RxBaInactTimeout(struct timer_list *t);
void ResetBaEntry(struct ba_record *pBA);
bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *Addr,
u8 TID, enum tr_select TxRxSelect, bool bAddNewTs);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index e4be85af31e7..c2b9ffba354a 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -393,10 +393,10 @@ static void rtllib_send_beacon(struct rtllib_device *ieee)
}
-static void rtllib_send_beacon_cb(unsigned long _ieee)
+static void rtllib_send_beacon_cb(struct timer_list *t)
{
struct rtllib_device *ieee =
- (struct rtllib_device *) _ieee;
+ from_timer(ieee, t, beacon_timer);
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock, flags);
@@ -1427,9 +1427,11 @@ static void rtllib_associate_abort(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static void rtllib_associate_abort_cb(unsigned long dev)
+static void rtllib_associate_abort_cb(struct timer_list *t)
{
- rtllib_associate_abort((struct rtllib_device *) dev);
+ struct rtllib_device *dev = from_timer(dev, t, associate_timer);
+
+ rtllib_associate_abort(dev);
}
static void rtllib_associate_step1(struct rtllib_device *ieee, u8 *daddr)
@@ -2811,8 +2813,9 @@ exit:
static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
{
- const u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
+ static const u8 broadcast_addr[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
struct sk_buff *skb;
struct rtllib_probe_response *b;
@@ -3011,13 +3014,9 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
ieee->tx_pending.txb = NULL;
- setup_timer(&ieee->associate_timer,
- rtllib_associate_abort_cb,
- (unsigned long) ieee);
+ timer_setup(&ieee->associate_timer, rtllib_associate_abort_cb, 0);
- setup_timer(&ieee->beacon_timer,
- rtllib_send_beacon_cb,
- (unsigned long) ieee);
+ timer_setup(&ieee->beacon_timer, rtllib_send_beacon_cb, 0);
INIT_DELAYED_WORK_RSL(&ieee->link_change_wq,
(void *)rtllib_link_change_wq, ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index f7eba01b5d15..03fbff067fa4 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -694,8 +694,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
deauth = true;
- /* leave break out intentionly */
-
+ /* fall through */
case IW_MLME_DISASSOC:
if (deauth)
netdev_info(ieee->dev, "disauth packet !\n");
diff --git a/drivers/staging/rtl8192u/Makefile b/drivers/staging/rtl8192u/Makefile
index 703c1505ea5f..3022728a364c 100644
--- a/drivers/staging/rtl8192u/Makefile
+++ b/drivers/staging/rtl8192u/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
NIC_SELECT = RTL8192U
ccflags-y := -std=gnu89
diff --git a/drivers/staging/rtl8192u/ieee80211/Makefile b/drivers/staging/rtl8192u/ieee80211/Makefile
index 9e3f432e5355..0d4d6489f767 100644
--- a/drivers/staging/rtl8192u/ieee80211/Makefile
+++ b/drivers/staging/rtl8192u/ieee80211/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
NIC_SELECT = RTL8192U
ccflags-y := -O2
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index 00b6052fbbac..64b13a5da3cb 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Implement 802.11d. */
#include "dot11d.h"
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.h b/drivers/staging/rtl8192u/ieee80211/dot11d.h
index 8ae673b217d8..88bc298305bd 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.h
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INC_DOT11D_H
#define __INC_DOT11D_H
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index b062cad052b9..3addaa65085a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -323,7 +323,7 @@ typedef struct ieee_param {
u8 key[0];
} crypt;
} u;
-}ieee_param;
+} ieee_param;
// linux under 2.6.9 release may not support it, so modify it for common use
@@ -412,15 +412,15 @@ typedef struct ieee_param {
#define IEEE80211_QCTL_TID 0x000F
#define FC_QOS_BIT BIT(7)
-#define IsDataFrame(pdu) ( ((pdu[0] & 0x0C)==0x08) ? true : false )
-#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0]&FC_QOS_BIT)) )
+#define IsDataFrame(pdu) (((pdu[0] & 0x0C) == 0x08) ? true : false)
+#define IsLegacyDataFrame(pdu) (IsDataFrame(pdu) && (!(pdu[0] & FC_QOS_BIT)))
//added by wb. Is this right?
-#define IsQoSDataFrame(pframe) ((*(u16 *)pframe&(IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA))
-#define Frame_Order(pframe) (*(u16 *)pframe&IEEE80211_FCTL_ORDER)
-#define SN_LESS(a, b) (((a-b)&0x800)!=0)
+#define IsQoSDataFrame(pframe) ((*(u16 *)pframe & (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA)) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
+#define Frame_Order(pframe) (*(u16 *)pframe & IEEE80211_FCTL_ORDER)
+#define SN_LESS(a, b) (((a - b) & 0x800) != 0)
#define SN_EQUAL(a, b) (a == b)
#define MAX_DEV_ADDR_SIZE 8
-typedef enum _ACT_CATEGORY{
+typedef enum _ACT_CATEGORY {
ACT_CAT_QOS = 1,
ACT_CAT_DLS = 2,
ACT_CAT_BA = 3,
@@ -428,24 +428,24 @@ typedef enum _ACT_CATEGORY{
ACT_CAT_WMM = 17,
} ACT_CATEGORY, *PACT_CATEGORY;
-typedef enum _TS_ACTION{
+typedef enum _TS_ACTION {
ACT_ADDTSREQ = 0,
ACT_ADDTSRSP = 1,
ACT_DELTS = 2,
ACT_SCHEDULE = 3,
} TS_ACTION, *PTS_ACTION;
-typedef enum _BA_ACTION{
+typedef enum _BA_ACTION {
ACT_ADDBAREQ = 0,
ACT_ADDBARSP = 1,
ACT_DELBA = 2,
} BA_ACTION, *PBA_ACTION;
-typedef enum _InitialGainOpType{
- IG_Backup=0,
+typedef enum _InitialGainOpType {
+ IG_Backup = 0,
IG_Restore,
IG_Max
-}InitialGainOpType;
+} InitialGainOpType;
/* debug macros */
#define CONFIG_IEEE80211_DEBUG
@@ -457,22 +457,22 @@ do { if (ieee80211_debug_level & (level)) \
//wb added to debug out data buf
//if you want print DATA buffer related BA, please set ieee80211_debug_level to DATA|BA
#define IEEE80211_DEBUG_DATA(level, data, datalen) \
- do{ if ((ieee80211_debug_level & (level)) == (level)) \
+ do { if ((ieee80211_debug_level & (level)) == (level)) \
{ \
int i; \
u8 *pdata = (u8 *) data; \
printk(KERN_DEBUG "ieee80211: %s()\n", __func__); \
- for(i=0; i<(int)(datalen); i++) \
+ for (i = 0; i < (int)(datalen); i++) \
{ \
printk("%2x ", pdata[i]); \
- if ((i+1)%16 == 0) printk("\n"); \
+ if ((i + 1) % 16 == 0) printk("\n"); \
} \
printk("\n"); \
} \
} while (0)
#else
-#define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
-#define IEEE80211_DEBUG_DATA(level, data, datalen) do {} while(0)
+#define IEEE80211_DEBUG (level, fmt, args...) do {} while (0)
+#define IEEE80211_DEBUG_DATA (level, data, datalen) do {} while(0)
#endif /* CONFIG_IEEE80211_DEBUG */
/* debug macros not dependent on CONFIG_IEEE80211_DEBUG */
@@ -876,9 +876,9 @@ enum ieee80211_mfie {
MFIE_TYPE_ERP = 42,
MFIE_TYPE_RSN = 48,
MFIE_TYPE_RATES_EX = 50,
- MFIE_TYPE_HT_CAP= 45,
- MFIE_TYPE_HT_INFO= 61,
- MFIE_TYPE_AIRONET=133,
+ MFIE_TYPE_HT_CAP = 45,
+ MFIE_TYPE_HT_INFO = 61,
+ MFIE_TYPE_AIRONET = 133,
MFIE_TYPE_GENERIC = 221,
MFIE_TYPE_QOS_PARAMETER = 222,
};
@@ -1051,7 +1051,7 @@ typedef union _frameqos {
u16 ack_policy:2;
u16 reserved:1;
u16 txop:8;
- }field;
+ } field;
} frameqos, *pframeqos;
/* SWEEP TABLE ENTRIES NUMBER*/
@@ -1196,7 +1196,7 @@ static inline u8 Frame_QoSTID(u8 *buf)
u16 fc;
hdr = (struct rtl_80211_hdr_3addr *)buf;
fc = le16_to_cpu(hdr->frame_ctl);
- return (u8)((frameqos *)(buf + (((fc & IEEE80211_FCTL_TODS)&&(fc & IEEE80211_FCTL_FROMDS))? 30 : 24)))->field.tid;
+ return (u8)((frameqos *)(buf + (((fc & IEEE80211_FCTL_TODS) && (fc & IEEE80211_FCTL_FROMDS)) ? 30 : 24)))->field.tid;
}
//added by amy for reorder
@@ -1209,7 +1209,7 @@ struct eapol {
u16 length;
} __packed;
-struct ieee80211_softmac_stats{
+struct ieee80211_softmac_stats {
unsigned int rx_ass_ok;
unsigned int rx_ass_err;
unsigned int rx_probe_rq;
@@ -1320,7 +1320,7 @@ struct ether_header {
#define ETHERTYPE_IP 0x0800 /* IP protocol */
#endif
-typedef enum _erp_t{
+typedef enum _erp_t {
ERP_NonERPpresent = 0x01,
ERP_UseProtection = 0x02,
ERP_BarkerPreambleMode = 0x04,
@@ -1479,37 +1479,35 @@ typedef struct _RX_REORDER_ENTRY {
struct ieee80211_rxb *prxb;
} RX_REORDER_ENTRY, *PRX_REORDER_ENTRY;
//added by amy for order
-typedef enum _Fsync_State{
+typedef enum _Fsync_State {
Default_Fsync,
HW_Fsync,
SW_Fsync
-}Fsync_State;
+} Fsync_State;
// Power save mode configured.
-typedef enum _RT_PS_MODE
-{
+typedef enum _RT_PS_MODE {
eActive, // Active/Continuous access.
eMaxPs, // Max power save mode.
eFastPs // Fast power save mode.
-}RT_PS_MODE;
+} RT_PS_MODE;
-typedef enum _IPS_CALLBACK_FUNCION
-{
+typedef enum _IPS_CALLBACK_FUNCION {
IPS_CALLBACK_NONE = 0,
IPS_CALLBACK_MGNT_LINK_REQUEST = 1,
IPS_CALLBACK_JOIN_REQUEST = 2,
-}IPS_CALLBACK_FUNCION;
+} IPS_CALLBACK_FUNCION;
-typedef enum _RT_JOIN_ACTION{
+typedef enum _RT_JOIN_ACTION {
RT_JOIN_INFRA = 1,
RT_JOIN_IBSS = 2,
RT_START_IBSS = 3,
RT_NO_ACTION = 4,
-}RT_JOIN_ACTION;
+} RT_JOIN_ACTION;
-typedef struct _IbssParms{
+typedef struct _IbssParms {
u16 atimWin;
-}IbssParms, *PIbssParms;
+} IbssParms, *PIbssParms;
#define MAX_NUM_RATES 264 // Max num of support rates element: 8, Max num of ext. support rate: 255. 061122, by rcnjko.
// RF state.
@@ -1517,7 +1515,7 @@ typedef enum _RT_RF_POWER_STATE {
eRfOn,
eRfSleep,
eRfOff
-}RT_RF_POWER_STATE;
+} RT_RF_POWER_STATE;
typedef struct _RT_POWER_SAVE_CONTROL {
@@ -1572,8 +1570,7 @@ typedef u32 RT_RF_CHANGE_SOURCE;
#define RF_CHANGE_BY_IPS BIT(28)
#define RF_CHANGE_BY_INIT 0 // Do not change the RFOff reason. Defined by Bruce, 2008-01-17.
-typedef enum
-{
+typedef enum {
COUNTRY_CODE_FCC = 0,
COUNTRY_CODE_IC = 1,
COUNTRY_CODE_ETSI = 2,
@@ -1585,10 +1582,10 @@ typedef enum
COUNTRY_CODE_TELEC,
COUNTRY_CODE_MIC,
COUNTRY_CODE_GLOBAL_DOMAIN
-}country_code_type_t;
+} country_code_type_t;
#define RT_MAX_LD_SLOT_NUM 10
-typedef struct _RT_LINK_DETECT_T{
+typedef struct _RT_LINK_DETECT_T {
u32 NumRecvBcnInPeriod;
u32 NumRecvDataInPeriod;
@@ -1601,7 +1598,7 @@ typedef struct _RT_LINK_DETECT_T{
u32 NumTxOkInPeriod;
u32 NumRxOkInPeriod;
bool bBusyTraffic;
-}RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
+} RT_LINK_DETECT_T, *PRT_LINK_DETECT_T;
struct ieee80211_device {
@@ -1917,11 +1914,11 @@ struct ieee80211_device {
struct net_device *dev);
int (*reset_port)(struct net_device *dev);
- int (*is_queue_full) (struct net_device *dev, int pri);
+ int (*is_queue_full)(struct net_device *dev, int pri);
- int (*handle_management) (struct net_device *dev,
+ int (*handle_management)(struct net_device *dev,
struct ieee80211_network *network, u16 type);
- int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
+ int (*is_qos_active)(struct net_device *dev, struct sk_buff *skb);
/* Softmac-generated frames (management) are TXed via this
* callback if the flag IEEE_SOFTMAC_SINGLE_QUEUE is
@@ -1989,16 +1986,16 @@ struct ieee80211_device {
* stop_send_bacons is NOT guaranteed to be called only
* after start_send_beacons.
*/
- void (*start_send_beacons) (struct net_device *dev,u16 tx_rate);
- void (*stop_send_beacons) (struct net_device *dev);
+ void (*start_send_beacons)(struct net_device *dev, u16 tx_rate);
+ void (*stop_send_beacons)(struct net_device *dev);
/* power save mode related */
- void (*sta_wake_up) (struct net_device *dev);
- void (*ps_request_tx_ack) (struct net_device *dev);
- void (*enter_sleep_state) (struct net_device *dev, u32 th, u32 tl);
- short (*ps_is_queue_empty) (struct net_device *dev);
- int (*handle_beacon) (struct net_device *dev, struct ieee80211_beacon *beacon, struct ieee80211_network *network);
- int (*handle_assoc_response) (struct net_device *dev, struct ieee80211_assoc_response_frame *resp, struct ieee80211_network *network);
+ void (*sta_wake_up)(struct net_device *dev);
+ void (*ps_request_tx_ack)(struct net_device *dev);
+ void (*enter_sleep_state)(struct net_device *dev, u32 th, u32 tl);
+ short (*ps_is_queue_empty)(struct net_device *dev);
+ int (*handle_beacon)(struct net_device *dev, struct ieee80211_beacon *beacon, struct ieee80211_network *network);
+ int (*handle_assoc_response)(struct net_device *dev, struct ieee80211_assoc_response_frame *resp, struct ieee80211_network *network);
/* check whether Tx hw resource available */
@@ -2023,7 +2020,7 @@ struct ieee80211_device {
#define IEEE_G (1<<2)
#define IEEE_N_24G (1<<4)
#define IEEE_N_5G (1<<5)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+#define IEEE_MODE_MASK (IEEE_A | IEEE_B | IEEE_G)
/* Generate a 802.11 header */
@@ -2112,7 +2109,7 @@ static inline int ieee80211_get_hdrlen(u16 fc)
case IEEE80211_FTYPE_DATA:
if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
hdrlen = IEEE80211_4ADDR_LEN; /* Addr4 */
- if(IEEE80211_QOS_HAS_SEQ(fc))
+ if (IEEE80211_QOS_HAS_SEQ(fc))
hdrlen += 2; /* QOS ctrl*/
break;
case IEEE80211_FTYPE_CTL:
@@ -2379,7 +2376,7 @@ void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee,
u8 HTGetHighestMCSRate(struct ieee80211_device *ieee,
u8 *pMCSRateSet, u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
-extern u16 MCS_DATA_RATE[2][2][77] ;
+extern u16 MCS_DATA_RATE[2][2][77];
u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
//extern void HTSetConnectBwModeCallback(unsigned long data);
void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
@@ -2395,9 +2392,9 @@ void TsInitAddBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTS,
u8 Policy, u8 bOverwritePending);
void TsInitDelBA(struct ieee80211_device *ieee,
PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
-void BaSetupTimeOut(unsigned long data);
-void TxBaInactTimeout(unsigned long data);
-void RxBaInactTimeout(unsigned long data);
+void BaSetupTimeOut(struct timer_list *t);
+void TxBaInactTimeout(struct timer_list *t);
+void RxBaInactTimeout(struct timer_list *t);
void ResetBaEntry(PBA_RECORD pBA);
//function in TS.c
bool GetTs(
@@ -2426,7 +2423,8 @@ static inline int ieee80211_get_scans(struct ieee80211_device *ieee)
return ieee->scans;
}
-static inline const char *escape_essid(const char *essid, u8 essid_len) {
+static inline const char *escape_essid(const char *essid, u8 essid_len)
+{
static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
if (ieee80211_is_empty_essid(essid, essid_len)) {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
index 48e80be90ba5..6f457812e5a3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
@@ -57,9 +57,9 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
}
}
-void ieee80211_crypt_deinit_handler(unsigned long data)
+void ieee80211_crypt_deinit_handler(struct timer_list *t)
{
- struct ieee80211_device *ieee = (struct ieee80211_device *)data;
+ struct ieee80211_device *ieee = from_timer(ieee, t, crypt_deinit_timer);
unsigned long flags;
spin_lock_irqsave(&ieee->lock, flags);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
index a0aa0f5be63a..1f2aea7e0e55 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h
@@ -83,7 +83,7 @@ int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force);
-void ieee80211_crypt_deinit_handler(unsigned long data);
+void ieee80211_crypt_deinit_handler(struct timer_list *t);
void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
struct ieee80211_crypt_data **crypt);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 8f236b332a47..90a097f2cd4e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -133,8 +133,8 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
INIT_LIST_HEAD(&ieee->crypt_deinit_list);
- setup_timer(&ieee->crypt_deinit_timer,
- ieee80211_crypt_deinit_handler, (unsigned long)ieee);
+ timer_setup(&ieee->crypt_deinit_timer, ieee80211_crypt_deinit_handler,
+ 0);
spin_lock_init(&ieee->lock);
spin_lock_init(&ieee->wpax_suitlist_lock);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index fe6f38b7ec35..f56fdc7a4b61 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -391,10 +391,10 @@ static void ieee80211_send_beacon(struct ieee80211_device *ieee)
}
-static void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(struct timer_list *t)
{
struct ieee80211_device *ieee =
- (struct ieee80211_device *) _ieee;
+ from_timer(ieee, t, beacon_timer);
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock, flags);
@@ -1251,9 +1251,11 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(struct timer_list *t)
{
- ieee80211_associate_abort((struct ieee80211_device *) dev);
+ struct ieee80211_device *dev = from_timer(dev, t, associate_timer);
+
+ ieee80211_associate_abort(dev);
}
@@ -2718,11 +2720,9 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->enable_rx_imm_BA = true;
ieee->tx_pending.txb = NULL;
- setup_timer(&ieee->associate_timer, ieee80211_associate_abort_cb,
- (unsigned long)ieee);
+ timer_setup(&ieee->associate_timer, ieee80211_associate_abort_cb, 0);
- setup_timer(&ieee->beacon_timer, ieee80211_send_beacon_cb,
- (unsigned long)ieee);
+ timer_setup(&ieee->beacon_timer, ieee80211_send_beacon_cb, 0);
INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
@@ -2948,8 +2948,9 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
struct ieee_param *param, int param_len)
{
int ret = 0;
+ const char *module = NULL;
- struct ieee80211_crypto_ops *ops;
+ struct ieee80211_crypto_ops *ops = NULL;
struct ieee80211_crypt_data **crypt;
struct ieee80211_security sec = {
@@ -2995,19 +2996,17 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
strcmp(param->u.crypt.alg, "TKIP"))
goto skip_host_crypt;
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
- request_module("ieee80211_crypt_wep");
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
- request_module("ieee80211_crypt_tkip");
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
- request_module("ieee80211_crypt_ccmp");
- ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
- }
- if (ops == NULL) {
+ //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place
+ if (!strcmp(param->u.crypt.alg, "WEP"))
+ module = "ieee80211_crypt_wep";
+ else if (!strcmp(param->u.crypt.alg, "TKIP"))
+ module = "ieee80211_crypt_tkip";
+ else if (!strcmp(param->u.crypt.alg, "CCMP"))
+ module = "ieee80211_crypt_ccmp";
+ if (module)
+ ops = try_then_request_module(ieee80211_get_crypto_ops(param->u.crypt.alg),
+ module);
+ if (!ops) {
printk("unknown crypto alg '%s'\n", param->u.crypt.alg);
param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
ret = -EINVAL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index f58971a4a2e3..9a1a84548bc6 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -302,7 +302,6 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
}
}
-#define SN_LESS(a, b) (((a-b)&0x800)!=0)
static void ieee80211_tx_query_agg_cap(struct ieee80211_device *ieee,
struct sk_buff *skb, struct cb_desc *tcb_desc)
{
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
index c925e53bf013..f2fcdec9bd17 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
@@ -364,11 +364,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
GFP_KERNEL);
if (!new_crypt)
return -ENOMEM;
- new_crypt->ops = ieee80211_get_crypto_ops("WEP");
- if (!new_crypt->ops) {
- request_module("ieee80211_crypt_wep");
- new_crypt->ops = ieee80211_get_crypto_ops("WEP");
- }
+ new_crypt->ops = try_then_request_module(ieee80211_get_crypto_ops("WEP"),
+ "ieee80211_crypt_wep");
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(key);
@@ -591,12 +588,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
}
printk("alg name:%s\n",alg);
- ops = ieee80211_get_crypto_ops(alg);
- if (ops == NULL) {
- request_module(module);
- ops = ieee80211_get_crypto_ops(alg);
- }
- if (ops == NULL) {
+ ops = try_then_request_module(ieee80211_get_crypto_ops(alg), module);
+ if (!ops) {
IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
dev->name, ext->alg);
printk("========>unknown crypto alg %d\n", ext->alg);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
index 7abedc27d7c1..b6a76aae4832 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BA.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BATYPE_H_
#define _BATYPE_H_
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 8aa38dcf0dfd..21b55fd5b717 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/********************************************************************************************************************************
* This file is created to process BA Action Frame. According to 802.11 spec, there are 3 BA action types at all. And as BA is
* related to TS, this part need some structure defined in QOS side code. Also TX RX is going to be resturctured, so how to send
@@ -143,7 +144,7 @@ static struct sk_buff *ieee80211_ADDBA(struct ieee80211_device *ieee, u8 *Dst, P
if (ACT_ADDBARSP == type) {
// Status Code
- printk(KERN_INFO "=====>to send ADDBARSP\n");
+ netdev_info(ieee->dev, "=====>to send ADDBARSP\n");
put_unaligned_le16(StatusCode, tag);
tag += 2;
@@ -345,7 +346,7 @@ int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb)
pBaTimeoutVal = (u16 *)(tag + 5);
pBaStartSeqCtrl = (PSEQUENCE_CONTROL)(req + 7);
- printk(KERN_INFO "====================>rx ADDBAREQ from :%pM\n", dst);
+ netdev_info(ieee->dev, "====================>rx ADDBAREQ from :%pM\n", dst);
//some other capability is not ready now.
if ((ieee->current_network.qos_data.active == 0) ||
(!ieee->pHTInfo->bCurrentHTSupport)) //||
@@ -672,18 +673,18 @@ TsInitDelBA( struct ieee80211_device *ieee, PTS_COMMON_INFO pTsCommonInfo, TR_SE
* return: NULL
* notice:
********************************************************************************************************************/
-void BaSetupTimeOut(unsigned long data)
+void BaSetupTimeOut(struct timer_list *t)
{
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
+ PTX_TS_RECORD pTxTs = from_timer(pTxTs, t, TxPendingBARecord.Timer);
pTxTs->bAddBaReqInProgress = false;
pTxTs->bAddBaReqDelayed = true;
pTxTs->TxPendingBARecord.bValid = false;
}
-void TxBaInactTimeout(unsigned long data)
+void TxBaInactTimeout(struct timer_list *t)
{
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
+ PTX_TS_RECORD pTxTs = from_timer(pTxTs, t, TxAdmittedBARecord.Timer);
struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[pTxTs->num]);
TxTsDeleteBA(ieee, pTxTs);
ieee80211_send_DELBA(
@@ -694,9 +695,9 @@ void TxBaInactTimeout(unsigned long data)
DELBA_REASON_TIMEOUT);
}
-void RxBaInactTimeout(unsigned long data)
+void RxBaInactTimeout(struct timer_list *t)
{
- PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
+ PRX_TS_RECORD pRxTs = from_timer(pRxTs, t, RxAdmittedBARecord.Timer);
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
RxTsDeleteBA(ieee, pRxTs);
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
index 5f54d93dfb66..a85036022aa8 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RTL819XU_HTTYPE_H_
#define _RTL819XU_HTTYPE_H_
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index 9248dbcf3370..bf7b7122d042 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
//As this function is mainly ported from Windows driver, so leave the name little changed. If any confusion caused, tell me. Created by WB. 2008.05.08
#include "ieee80211.h"
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
index 49c23c720f78..71df9d9e2e99 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_Qos.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INC_QOS_TYPE_H
#define __INC_QOS_TYPE_H
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h b/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
index e25b69777ee7..3a0ff08c687a 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TS.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TSTYPE_H_
#define _TSTYPE_H_
#include "rtl819x_Qos.h"
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index f98bb03aa293..e60a26250682 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -1,15 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
#include "ieee80211.h"
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include "rtl819x_TS.h"
-static void TsSetupTimeOut(unsigned long data)
+static void TsSetupTimeOut(struct timer_list *unused)
{
// Not implement yet
// This is used for WMMSA and ACM , that would send ADDTSReq frame.
}
-static void TsInactTimeout(unsigned long data)
+static void TsInactTimeout(struct timer_list *unused)
{
// Not implement yet
// This is used for WMMSA and ACM.
@@ -22,9 +23,9 @@ static void TsInactTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-static void RxPktPendingTimeout(unsigned long data)
+static void RxPktPendingTimeout(struct timer_list *t)
{
- PRX_TS_RECORD pRxTs = (PRX_TS_RECORD)data;
+ PRX_TS_RECORD pRxTs = from_timer(pRxTs, t, RxPktPendingTimer);
struct ieee80211_device *ieee = container_of(pRxTs, struct ieee80211_device, RxTsRecord[pRxTs->num]);
PRX_REORDER_ENTRY pReorderEntry = NULL;
@@ -89,9 +90,9 @@ static void RxPktPendingTimeout(unsigned long data)
* return: NULL
* notice:
********************************************************************************************************************/
-static void TsAddBaProcess(unsigned long data)
+static void TsAddBaProcess(struct timer_list *t)
{
- PTX_TS_RECORD pTxTs = (PTX_TS_RECORD)data;
+ PTX_TS_RECORD pTxTs = from_timer(pTxTs, t, TsAddBaTimer);
u8 num = pTxTs->num;
struct ieee80211_device *ieee = container_of(pTxTs, struct ieee80211_device, TxTsRecord[num]);
@@ -145,16 +146,15 @@ void TSInitialize(struct ieee80211_device *ieee)
pTxTS->num = count;
// The timers for the operation of Traffic Stream and Block Ack.
// DLS related timer will be add here in the future!!
- setup_timer(&pTxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
- (unsigned long)pTxTS);
- setup_timer(&pTxTS->TsCommonInfo.InactTimer, TsInactTimeout,
- (unsigned long)pTxTS);
- setup_timer(&pTxTS->TsAddBaTimer, TsAddBaProcess,
- (unsigned long)pTxTS);
- setup_timer(&pTxTS->TxPendingBARecord.Timer, BaSetupTimeOut,
- (unsigned long)pTxTS);
- setup_timer(&pTxTS->TxAdmittedBARecord.Timer,
- TxBaInactTimeout, (unsigned long)pTxTS);
+ timer_setup(&pTxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ 0);
+ timer_setup(&pTxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ 0);
+ timer_setup(&pTxTS->TsAddBaTimer, TsAddBaProcess, 0);
+ timer_setup(&pTxTS->TxPendingBARecord.Timer, BaSetupTimeOut,
+ 0);
+ timer_setup(&pTxTS->TxAdmittedBARecord.Timer,
+ TxBaInactTimeout, 0);
ResetTxTsEntry(pTxTS);
list_add_tail(&pTxTS->TsCommonInfo.List, &ieee->Tx_TS_Unused_List);
pTxTS++;
@@ -167,14 +167,13 @@ void TSInitialize(struct ieee80211_device *ieee)
for(count = 0; count < TOTAL_TS_NUM; count++) {
pRxTS->num = count;
INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
- setup_timer(&pRxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
- (unsigned long)pRxTS);
- setup_timer(&pRxTS->TsCommonInfo.InactTimer, TsInactTimeout,
- (unsigned long)pRxTS);
- setup_timer(&pRxTS->RxAdmittedBARecord.Timer,
- RxBaInactTimeout, (unsigned long)pRxTS);
- setup_timer(&pRxTS->RxPktPendingTimer, RxPktPendingTimeout,
- (unsigned long)pRxTS);
+ timer_setup(&pRxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
+ 0);
+ timer_setup(&pRxTS->TsCommonInfo.InactTimer, TsInactTimeout,
+ 0);
+ timer_setup(&pRxTS->RxAdmittedBARecord.Timer,
+ RxBaInactTimeout, 0);
+ timer_setup(&pRxTS->RxPktPendingTimer, RxPktPendingTimeout, 0);
ResetRxTsEntry(pRxTS);
list_add_tail(&pRxTS->TsCommonInfo.List, &ieee->Rx_TS_Unused_List);
pRxTS++;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 46b3f19e0878..09f66b386e44 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -497,7 +497,7 @@ inline void force_pci_posting(struct net_device *dev)
static struct net_device_stats *rtl8192_stats(struct net_device *dev);
static void rtl8192_restart(struct work_struct *work);
-static void watch_dog_timer_callback(unsigned long data);
+static void watch_dog_timer_callback(struct timer_list *t);
/****************************************************************************
* -----------------------------PROCFS STUFF-------------------------
@@ -1687,9 +1687,13 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
#ifndef JACKSON_NEW_RX
for (i = 0; i < (MAX_RX_URB + 1); i++) {
priv->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!priv->rx_urb[i])
+ return -ENOMEM;
priv->rx_urb[i]->transfer_buffer =
kmalloc(RX_URB_SIZE, GFP_KERNEL);
+ if (!priv->rx_urb[i]->transfer_buffer)
+ return -ENOMEM;
priv->rx_urb[i]->transfer_buffer_length = RX_URB_SIZE;
}
@@ -2690,15 +2694,11 @@ static short rtl8192_init(struct net_device *dev)
err = rtl8192_read_eeprom_info(dev);
if (err) {
DMESG("Reading EEPROM info failed");
- kfree(priv->pFirmware);
- priv->pFirmware = NULL;
- free_ieee80211(dev);
return err;
}
rtl8192_get_channel_map(dev);
init_hal_dm(dev);
- setup_timer(&priv->watch_dog_timer, watch_dog_timer_callback,
- (unsigned long)dev);
+ timer_setup(&priv->watch_dog_timer, watch_dog_timer_callback, 0);
if (rtl8192_usb_initendpoints(dev) != 0) {
DMESG("Endopoints initialization failed");
return -ENOMEM;
@@ -3499,9 +3499,9 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
}
-static void watch_dog_timer_callback(unsigned long data)
+static void watch_dog_timer_callback(struct timer_list *t)
{
- struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, watch_dog_timer);
schedule_delayed_work(&priv->watch_dog_wq, 0);
mod_timer(&priv->watch_dog_timer,
@@ -3528,7 +3528,7 @@ static int _rtl8192_up(struct net_device *dev)
if (priv->ieee80211->state != IEEE80211_LINKED)
ieee80211_softmac_start_protocol(priv->ieee80211);
ieee80211_reset_queue(priv->ieee80211);
- watch_dog_timer_callback((unsigned long)dev);
+ watch_dog_timer_callback(&priv->watch_dog_timer);
if (!netif_queue_stopped(dev))
netif_start_queue(dev);
else
@@ -4994,11 +4994,11 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
fail2:
rtl8192_down(dev);
+fail:
kfree(priv->pFirmware);
priv->pFirmware = NULL;
rtl8192_usb_deleteendpoints(dev);
mdelay(10);
-fail:
free_ieee80211(dev);
RT_TRACE(COMP_ERR, "wlan driver load failed\n");
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index e6f8d1da65d9..e1b81d34f1ad 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*++
Copyright-c Realtek Semiconductor Corp. All rights reserved.
@@ -2729,8 +2730,7 @@ static void dm_init_fsync(struct net_device *dev)
priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
priv->ieee80211->fsync_state = Default_Fsync;
priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */
- setup_timer(&priv->fsync_timer, dm_fsync_timer_callback,
- (unsigned long)dev);
+ timer_setup(&priv->fsync_timer, dm_fsync_timer_callback, 0);
}
static void dm_deInit_fsync(struct net_device *dev)
@@ -2740,10 +2740,10 @@ static void dm_deInit_fsync(struct net_device *dev)
del_timer_sync(&priv->fsync_timer);
}
-void dm_fsync_timer_callback(unsigned long data)
+void dm_fsync_timer_callback(struct timer_list *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
+ struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
+ struct net_device *dev = priv->ieee80211->dev;
u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
bool bDoubleTimeInterval = false;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 2d0232fb3f9b..8f3d618dcfdb 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*****************************************************************************
* Copyright(c) 2007, RealTEK Technology Inc. All Right Reserved.
*
@@ -226,7 +227,7 @@ void dm_force_tx_fw_info(struct net_device *dev,
void dm_init_edca_turbo(struct net_device *dev);
void dm_rf_operation_test_callback(unsigned long data);
void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-void dm_fsync_timer_callback(unsigned long data);
+void dm_fsync_timer_callback(struct timer_list *t);
void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
void dm_shadow_init(struct net_device *dev);
void dm_initialize_txpower_tracking(struct net_device *dev);
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index a9545386fbc5..e4e6c979bedf 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -964,7 +964,7 @@ struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
return wstats;
}
-struct iw_handler_def r8192_wx_handlers_def = {
+const struct iw_handler_def r8192_wx_handlers_def = {
.standard = r8192_wx_handlers,
.num_standard = ARRAY_SIZE(r8192_wx_handlers),
.private = r8192_private_handler,
diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h
index fb5f808433d1..a6c2b95e2e69 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.h
+++ b/drivers/staging/rtl8192u/r8192U_wx.h
@@ -17,7 +17,7 @@
#ifndef R8180_WX_H
#define R8180_WX_H
-extern struct iw_handler_def r8192_wx_handlers_def;
+extern const struct iw_handler_def r8192_wx_handlers_def;
/* Enable the rtl819x_core.c to share this function, david 2008.9.22 */
struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev);
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index ae9a4f1ac8fd..80672100ea26 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.h b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
index ad0f6003570d..85fb49ca7bc8 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.h
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef R819XUSB_CMDPKT_H
#define R819XUSB_CMDPKT_H
/* Different command packet have dedicated message length and definition. */
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index 35d1786703a7..9c7e19aedff1 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**************************************************************************************************
* Procedure: Init boot code/firmware code/data session
*
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.h b/drivers/staging/rtl8192u/r819xU_firmware.h
index 24b63f2ec509..cccd1c82ffe0 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.h
+++ b/drivers/staging/rtl8192u/r819xU_firmware.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __INC_FIRMWARE_H
#define __INC_FIRMWARE_H
diff --git a/drivers/staging/rtl8192u/r819xU_firmware_img.c b/drivers/staging/rtl8192u/r819xU_firmware_img.c
index 4eb43cfe5690..0af062036688 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware_img.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware_img.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*Created on 2008/ 7/16, 5:31*/
#include <linux/types.h>
#include "r819xU_firmware_img.h"
diff --git a/drivers/staging/rtl8192u/r819xU_firmware_img.h b/drivers/staging/rtl8192u/r819xU_firmware_img.h
index 18d0a6b5cbae..355da9157be1 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware_img.h
+++ b/drivers/staging/rtl8192u/r819xU_firmware_img.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef IMG_H
#define IMG_H
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index 3874f8307117..12750671c860 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "r8192U.h"
#include "r8192U_hw.h"
#include "r819xU_phy.h"
diff --git a/drivers/staging/rtl8192u/r819xU_phy.h b/drivers/staging/rtl8192u/r819xU_phy.h
index e672126330f3..0a42a6092ea9 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.h
+++ b/drivers/staging/rtl8192u/r819xU_phy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _R819XU_PHY_H
#define _R819XU_PHY_H
diff --git a/drivers/staging/rtl8192u/r819xU_phyreg.h b/drivers/staging/rtl8192u/r819xU_phyreg.h
index b855627e9816..c058a9537526 100644
--- a/drivers/staging/rtl8192u/r819xU_phyreg.h
+++ b/drivers/staging/rtl8192u/r819xU_phyreg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _R819XU_PHYREG_H
#define _R819XU_PHYREG_H
diff --git a/drivers/staging/rtl8712/Makefile b/drivers/staging/rtl8712/Makefile
index 6f8500c2d922..3ae216b6621b 100644
--- a/drivers/staging/rtl8712/Makefile
+++ b/drivers/staging/rtl8712/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
r8712u-y := \
rtl871x_cmd.o \
rtl8712_cmd.o \
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index c83d7ebb164f..de832b0b5eec 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -216,9 +216,9 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
emem_sz = fwhdr.img_SRAM_size;
do {
memset(ptx_desc, 0, TXDESC_SIZE);
- if (emem_sz > MAX_DUMP_FWSZ) /* max=48k */
+ if (emem_sz > MAX_DUMP_FWSZ) { /* max=48k */
dump_emem_sz = MAX_DUMP_FWSZ;
- else {
+ } else {
dump_emem_sz = emem_sz;
ptx_desc->txdw0 |= cpu_to_le32(BIT(28));
}
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index a077069d6227..3c7c4a4faeb2 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -32,39 +32,45 @@
#include "drv_types.h"
#include "mlme_osdep.h"
-static void sitesurvey_ctrl_handler(unsigned long data)
+static void sitesurvey_ctrl_handler(struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t,
+ mlmepriv.sitesurveyctrl.sitesurvey_ctrl_timer);
_r8712_sitesurvey_ctrl_handler(adapter);
mod_timer(&adapter->mlmepriv.sitesurveyctrl.sitesurvey_ctrl_timer,
jiffies + msecs_to_jiffies(3000));
}
-static void join_timeout_handler (unsigned long data)
+static void join_timeout_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, mlmepriv.assoc_timer);
_r8712_join_timeout_handler(adapter);
}
-static void _scan_timeout_handler (unsigned long data)
+static void _scan_timeout_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, mlmepriv.scan_to_timer);
r8712_scan_timeout_handler(adapter);
}
-static void dhcp_timeout_handler (unsigned long data)
+static void dhcp_timeout_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, mlmepriv.dhcp_timer);
_r8712_dhcp_timeout_handler(adapter);
}
-static void wdg_timeout_handler (unsigned long data)
+static void wdg_timeout_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, mlmepriv.wdg_timer);
_r8712_wdg_timeout_handler(adapter);
@@ -76,17 +82,12 @@ void r8712_init_mlme_timer(struct _adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- setup_timer(&pmlmepriv->assoc_timer, join_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->sitesurveyctrl.sitesurvey_ctrl_timer,
- sitesurvey_ctrl_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->scan_to_timer, _scan_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->dhcp_timer, dhcp_timeout_handler,
- (unsigned long)padapter);
- setup_timer(&pmlmepriv->wdg_timer, wdg_timeout_handler,
- (unsigned long)padapter);
+ timer_setup(&pmlmepriv->assoc_timer, join_timeout_handler, 0);
+ timer_setup(&pmlmepriv->sitesurveyctrl.sitesurvey_ctrl_timer,
+ sitesurvey_ctrl_handler, 0);
+ timer_setup(&pmlmepriv->scan_to_timer, _scan_timeout_handler, 0);
+ timer_setup(&pmlmepriv->dhcp_timer, dhcp_timeout_handler, 0);
+ timer_setup(&pmlmepriv->wdg_timer, wdg_timeout_handler, 0);
}
void r8712_os_indicate_connect(struct _adapter *adapter)
@@ -118,9 +119,8 @@ void r8712_os_indicate_disconnect(struct _adapter *adapter)
adapter->securitypriv.btkip_countermeasure;
memset((unsigned char *)&adapter->securitypriv, 0,
sizeof(struct security_priv));
- setup_timer(&adapter->securitypriv.tkip_timer,
- r8712_use_tkipkey_handler,
- (unsigned long)adapter);
+ timer_setup(&adapter->securitypriv.tkip_timer,
+ r8712_use_tkipkey_handler, 0);
/* Restore the PMK information to securitypriv structure
* for the following connection.
*/
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index e698f6ede449..95caf8df9a13 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -313,8 +313,8 @@ u8 r8712_init_drv_sw(struct _adapter *padapter)
_r8712_init_recv_priv(&padapter->recvpriv, padapter);
memset((unsigned char *)&padapter->securitypriv, 0,
sizeof(struct security_priv));
- setup_timer(&padapter->securitypriv.tkip_timer,
- r8712_use_tkipkey_handler, (unsigned long)padapter);
+ timer_setup(&padapter->securitypriv.tkip_timer,
+ r8712_use_tkipkey_handler, 0);
_r8712_init_sta_priv(&padapter->stapriv);
padapter->stapriv.padapter = padapter;
r8712_init_bcmc_stainfo(padapter);
@@ -385,11 +385,11 @@ static int netdev_open(struct net_device *pnetdev)
padapter->bup = true;
if (rtl871x_hal_init(padapter) != _SUCCESS)
goto netdev_open_error;
- if (!r8712_initmac)
+ if (!r8712_initmac) {
/* Use the mac address stored in the Efuse */
memcpy(pnetdev->dev_addr,
padapter->eeprompriv.mac_addr, ETH_ALEN);
- else {
+ } else {
/* We have to inform f/w to use user-supplied MAC
* address.
*/
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 576c15d25a0f..986a55bb9877 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -138,17 +138,16 @@ _recv_indicatepkt_drop:
precvpriv->rx_drop++;
}
-static void _r8712_reordering_ctrl_timeout_handler (unsigned long data)
+static void _r8712_reordering_ctrl_timeout_handler (struct timer_list *t)
{
struct recv_reorder_ctrl *preorder_ctrl =
- (struct recv_reorder_ctrl *)data;
+ from_timer(preorder_ctrl, t, reordering_ctrl_timer);
r8712_reordering_ctrl_timeout_handler(preorder_ctrl);
}
void r8712_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
- setup_timer(&preorder_ctrl->reordering_ctrl_timer,
- _r8712_reordering_ctrl_timeout_handler,
- (unsigned long)preorder_ctrl);
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+ _r8712_reordering_ctrl_timeout_handler, 0);
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 0104aced113e..3c88994fdfcd 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -356,11 +356,11 @@ _next:
if ((wr_sz % 64) == 0)
blnPending = 1;
}
- if (blnPending) /* 32 bytes for TX Desc - 8 offset */
+ if (blnPending) { /* 32 bytes for TX Desc - 8 offset */
pdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE +
OFFSET_SZ + 8) << OFFSET_SHT) &
0x00ff0000);
- else {
+ } else {
pdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE +
OFFSET_SZ) <<
OFFSET_SHT) &
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.h b/drivers/staging/rtl8712/rtl8712_efuse.h
index 6a64f91ad75f..dbba51cd40fb 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.h
+++ b/drivers/staging/rtl8712/rtl8712_efuse.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RTL8712_EFUSE_H__
#define __RTL8712_EFUSE_H__
diff --git a/drivers/staging/rtl8712/rtl8712_event.h b/drivers/staging/rtl8712/rtl8712_event.h
index b38374025c93..cad7085c3f8a 100644
--- a/drivers/staging/rtl8712/rtl8712_event.h
+++ b/drivers/staging/rtl8712/rtl8712_event.h
@@ -60,7 +60,6 @@ enum rtl8712_c2h_event {
MAX_C2HEVT
};
-
#ifdef _RTL8712_CMD_C_
static struct fwevent wlanevents[] = {
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index da1d4a641dcd..455fba721135 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -74,7 +74,7 @@ enum _LED_STATE_871x {
* Prototype of protected function.
*===========================================================================
*/
-static void BlinkTimerCallback(unsigned long data);
+static void BlinkTimerCallback(struct timer_list *t);
static void BlinkWorkItemCallback(struct work_struct *work);
/*===========================================================================
@@ -99,8 +99,7 @@ static void InitLed871x(struct _adapter *padapter, struct LED_871x *pLed,
pLed->bLedBlinkInProgress = false;
pLed->BlinkTimes = 0;
pLed->BlinkingLedState = LED_UNKNOWN;
- setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
- (unsigned long)pLed);
+ timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
}
@@ -825,9 +824,9 @@ static void SwLedBlink6(struct LED_871x *pLed)
* Callback function of LED BlinkTimer,
* it just schedules to corresponding BlinkWorkItem.
*/
-static void BlinkTimerCallback(unsigned long data)
+static void BlinkTimerCallback(struct timer_list *t)
{
- struct LED_871x *pLed = (struct LED_871x *)data;
+ struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
/* This fixed the crash problem on Fedora 12 when trying to do the
* insmod;ifconfig up;rmmod commands.
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index ea3eb94b28b3..8f555e6e1b3f 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -149,7 +149,7 @@ int r8712_free_recvframe(union recv_frame *precvframe,
list_add_tail(&(precvframe->u.hdr.list), &pfree_recv_queue->queue);
if (padapter != NULL) {
if (pfree_recv_queue == &precvpriv->free_recv_queue)
- precvpriv->free_recvframe_cnt++;
+ precvpriv->free_recvframe_cnt++;
}
spin_unlock_irqrestore(&pfree_recv_queue->lock, irqL);
return _SUCCESS;
@@ -883,10 +883,10 @@ static void query_rx_phy_status(struct _adapter *padapter,
* from 0~100. It is assigned to the BSS List in
* GetValueFromBeaconOrProbeRsp().
*/
- if (bcck_rate)
+ if (bcck_rate) {
prframe->u.hdr.attrib.signal_strength =
(u8)r8712_signal_scale_mapping(pwdb_all);
- else {
+ } else {
if (rf_rx_num != 0)
prframe->u.hdr.attrib.signal_strength =
(u8)(r8712_signal_scale_mapping(total_rssi /=
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 04638f1e4e88..a424f447a725 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -899,9 +899,10 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
if (!pwlan)
goto createbss_cmd_fail;
pwlan->last_scanned = jiffies;
- } else
+ } else {
list_add_tail(&(pwlan->list),
&pmlmepriv->scanned_queue.queue);
+ }
pnetwork->Length = r8712_get_wlan_bssid_ex_sz(pnetwork);
memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
pwlan->fixed = true;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl.h b/drivers/staging/rtl8712/rtl871x_ioctl.h
index 08bcb3b41bbd..634e67461712 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IOCTL_H
#define __IOCTL_H
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 01a150446f5a..8a5ced4fa9d3 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -78,10 +78,10 @@ static u8 do_join(struct _adapter *padapter)
int ret;
ret = r8712_select_and_join_from_scan(pmlmepriv);
- if (ret == _SUCCESS)
+ if (ret == _SUCCESS) {
mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
- else {
+ } else {
if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
/* submit r8712_createbss_cmd to change to an
* ADHOC_MASTER pmlmepriv->lock has been
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index bf1ac22bae1c..111c809afc51 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -574,10 +574,10 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
if (r8712_select_and_join_from_scan(pmlmepriv)
- == _SUCCESS)
+ == _SUCCESS) {
mod_timer(&pmlmepriv->assoc_timer, jiffies +
msecs_to_jiffies(MAX_JOIN_TIMEOUT));
- else {
+ } else {
struct wlan_bssid_ex *pdev_network =
&(adapter->registrypriv.dev_network);
u8 *pibss =
diff --git a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
index 11bcfb7bf77c..d479f739ff08 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_phy_regdef.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*****************************************************************************
* Copyright(c) 2008, RealTEK Technology Inc. All Right Reserved.
*
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index e42fc1404c35..ae4c9567bb55 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -164,9 +164,10 @@ static void rpwm_workitem_callback(struct work_struct *work)
}
}
-static void rpwm_check_handler (unsigned long data)
+static void rpwm_check_handler (struct timer_list *t)
{
- struct _adapter *adapter = (struct _adapter *)data;
+ struct _adapter *adapter =
+ from_timer(adapter, t, pwrctrlpriv.rpwm_check_timer);
_rpwm_check_handler(adapter);
}
@@ -185,8 +186,7 @@ void r8712_init_pwrctrl_priv(struct _adapter *padapter)
r8712_write8(padapter, 0x1025FE58, 0);
INIT_WORK(&pwrctrlpriv->SetPSModeWorkItem, SetPSModeWorkItemCallback);
INIT_WORK(&pwrctrlpriv->rpwm_workitem, rpwm_workitem_callback);
- setup_timer(&pwrctrlpriv->rpwm_check_timer, rpwm_check_handler,
- (unsigned long)padapter);
+ timer_setup(&pwrctrlpriv->rpwm_check_timer, rpwm_check_handler, 0);
}
/*
diff --git a/drivers/staging/rtl8712/rtl871x_recv.h b/drivers/staging/rtl8712/rtl871x_recv.h
index 9de06c5fe620..f87b2ff5de1c 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.h
+++ b/drivers/staging/rtl8712/rtl871x_recv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _RTL871X_RECV_H_
#define _RTL871X_RECV_H_
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index bd83fb492c45..56d36f6f9c46 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -1402,9 +1402,10 @@ u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe)
return _SUCCESS;
}
-void r8712_use_tkipkey_handler(unsigned long data)
+void r8712_use_tkipkey_handler(struct timer_list *t)
{
- struct _adapter *padapter = (struct _adapter *)data;
+ struct _adapter *padapter =
+ from_timer(padapter, t, securitypriv.tkip_timer);
padapter->securitypriv.busetkipkey = true;
}
diff --git a/drivers/staging/rtl8712/rtl871x_security.h b/drivers/staging/rtl8712/rtl871x_security.h
index fa952e17975b..46b88a41d236 100644
--- a/drivers/staging/rtl8712/rtl871x_security.h
+++ b/drivers/staging/rtl8712/rtl871x_security.h
@@ -224,7 +224,7 @@ void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe);
u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe);
u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe);
void r8712_wep_decrypt(struct _adapter *padapter, u8 *precvframe);
-void r8712_use_tkipkey_handler(unsigned long data);
+void r8712_use_tkipkey_handler(struct timer_list *t);
#endif /*__RTL871X_SECURITY_H_ */
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index b3e266bd57ab..85eadddfaf06 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -590,9 +590,10 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
mac[0] &= 0xFE;
dev_info(&udev->dev,
"r8712u: MAC Address from user = %pM\n", mac);
- } else
+ } else {
dev_info(&udev->dev,
"r8712u: MAC Address from efuse = %pM\n", mac);
+ }
ether_addr_copy(pnetdev->dev_addr, mac);
}
/* step 6. Load the firmware asynchronously */
diff --git a/drivers/staging/rtl8723bs/Makefile b/drivers/staging/rtl8723bs/Makefile
index 4e7b460a9c73..f236acfd3afa 100644
--- a/drivers/staging/rtl8723bs/Makefile
+++ b/drivers/staging/rtl8723bs/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
r8723bs-y = \
core/rtw_ap.o \
core/rtw_btcoex.o \
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index d3007c1c45e3..0b530ea7fd81 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -1581,15 +1581,13 @@ u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(
- sizeof(struct set_stakey_parm)
- );
+ psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
kfree((u8 *) ph2c);
res = _FAIL;
@@ -1630,12 +1628,12 @@ static int rtw_ap_set_key(
/* DBG_871X("%s\n", __func__); */
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL;
goto exit;
}
- psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
if (psetkeyparm == NULL) {
kfree((unsigned char *)pcmd);
res = _FAIL;
diff --git a/drivers/staging/rtl8723bs/core/rtw_btcoex.c b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
index 01f78d1671de..79aa02afad01 100644
--- a/drivers/staging/rtl8723bs/core/rtw_btcoex.c
+++ b/drivers/staging/rtl8723bs/core/rtw_btcoex.c
@@ -207,11 +207,11 @@ void rtw_btcoex_RejectApAggregatedPacket(struct adapter *padapter, u8 enable)
psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
if (true == enable) {
- pmlmeinfo->bAcceptAddbaReq = false;
+ pmlmeinfo->accept_addba_req = false;
if (psta)
send_delba(padapter, 0, psta->hwaddr);
} else{
- pmlmeinfo->bAcceptAddbaReq = true;
+ pmlmeinfo->accept_addba_req = true;
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index d381827dba3b..9ac2dea6dff1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -408,7 +408,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
}
/* free cmd_obj */
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
}
@@ -613,13 +613,13 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
}
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL)
return _FAIL;
- psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
if (psurveyPara == NULL) {
- kfree((unsigned char *) ph2c);
+ kfree(ph2c);
return _FAIL;
}
@@ -681,15 +681,15 @@ u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pbsetdataratepara = (struct setdatarate_parm *)rtw_zmalloc(sizeof(struct setdatarate_parm));
+ pbsetdataratepara = rtw_zmalloc(sizeof(struct setdatarate_parm));
if (pbsetdataratepara == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -706,8 +706,8 @@ exit:
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
/* rtw_free_cmd_obj(pcmd); */
- kfree((unsigned char *) pcmd->parmbuf);
- kfree((unsigned char *) pcmd);
+ kfree(pcmd->parmbuf);
+ kfree(pcmd);
}
u8 rtw_createbss_cmd(struct adapter *padapter)
@@ -724,7 +724,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
}
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL;
goto exit;
@@ -757,7 +757,7 @@ u8 rtw_startbss_cmd(struct adapter *padapter, int flags)
start_bss_network(padapter, (u8 *)&(padapter->mlmepriv.cur_network.network));
} else {
/* need enqueue, prepare cmd_obj and enqueue */
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL;
goto exit;
@@ -815,7 +815,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
}
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL;
RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
@@ -847,7 +847,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
if (psecnetwork == NULL) {
if (pcmd != NULL)
- kfree((unsigned char *)pcmd);
+ kfree(pcmd);
res = _FAIL;
@@ -943,7 +943,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_disassoc_cmd\n"));
/* prepare cmd parameter */
- param = (struct disconnect_parm *)rtw_zmalloc(sizeof(*param));
+ param = rtw_zmalloc(sizeof(*param));
if (param == NULL) {
res = _FAIL;
goto exit;
@@ -952,10 +952,10 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
- cmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(*cmdobj));
+ cmdobj = rtw_zmalloc(sizeof(*cmdobj));
if (cmdobj == NULL) {
res = _FAIL;
- kfree((u8 *)param);
+ kfree(param);
goto exit;
}
init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_);
@@ -964,7 +964,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
/* no need to enqueue, do the cmd hdl directly and free cmd parameter */
if (H2C_SUCCESS != disconnect_hdl(padapter, (u8 *)param))
res = _FAIL;
- kfree((u8 *)param);
+ kfree(param);
}
exit:
@@ -979,7 +979,7 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum NDIS_802_11_NETWORK_INFRAST
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- psetop = (struct setopmode_parm *)rtw_zmalloc(sizeof(struct setopmode_parm));
+ psetop = rtw_zmalloc(sizeof(struct setopmode_parm));
if (psetop == NULL) {
res = _FAIL;
@@ -988,9 +988,9 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum NDIS_802_11_NETWORK_INFRAST
psetop->mode = (u8)networktype;
if (enqueue) {
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
- kfree((u8 *)psetop);
+ kfree(psetop);
res = _FAIL;
goto exit;
}
@@ -999,7 +999,7 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum NDIS_802_11_NETWORK_INFRAST
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else{
setopmode_hdl(padapter, (u8 *)psetop);
- kfree((u8 *)psetop);
+ kfree(psetop);
}
exit:
return res;
@@ -1016,7 +1016,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
struct security_priv *psecuritypriv = &padapter->securitypriv;
u8 res = _SUCCESS;
- psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
res = _FAIL;
goto exit;
@@ -1040,17 +1040,17 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
padapter->securitypriv.busetkipkey = true;
if (enqueue) {
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
- kfree((u8 *) psetstakey_para);
+ kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
- psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp));
if (psetstakey_rsp == NULL) {
- kfree((u8 *) ph2c);
- kfree((u8 *) psetstakey_para);
+ kfree(ph2c);
+ kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
@@ -1061,7 +1061,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else{
set_stakey_hdl(padapter, (u8 *)psetstakey_para);
- kfree((u8 *) psetstakey_para);
+ kfree(psetstakey_para);
}
exit:
return res;
@@ -1083,23 +1083,23 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 enqueu
rtw_camid_free(padapter, cam_id);
}
} else{
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
if (psetstakey_para == NULL) {
- kfree((u8 *) ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
- psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp));
if (psetstakey_rsp == NULL) {
- kfree((u8 *) ph2c);
- kfree((u8 *) psetstakey_para);
+ kfree(ph2c);
+ kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
@@ -1128,15 +1128,15 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- paddbareq_parm = (struct addBaReq_parm *)rtw_zmalloc(sizeof(struct addBaReq_parm));
+ paddbareq_parm = rtw_zmalloc(sizeof(struct addBaReq_parm));
if (paddbareq_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1162,15 +1162,15 @@ u8 rtw_reset_securitypriv_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1197,15 +1197,15 @@ u8 rtw_free_assoc_resources_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1233,15 +1233,15 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
/* only primary padapter does this cmd */
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1283,7 +1283,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconf
}
/* prepare cmd parameter */
- setChannelPlan_param = (struct SetChannelPlan_param *)rtw_zmalloc(sizeof(struct SetChannelPlan_param));
+ setChannelPlan_param = rtw_zmalloc(sizeof(struct SetChannelPlan_param));
if (setChannelPlan_param == NULL) {
res = _FAIL;
goto exit;
@@ -1292,9 +1292,9 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconf
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
- pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmdobj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmdobj == NULL) {
- kfree((u8 *)setChannelPlan_param);
+ kfree(setChannelPlan_param);
res = _FAIL;
goto exit;
}
@@ -1306,7 +1306,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconf
if (H2C_SUCCESS != set_chplan_hdl(padapter, (unsigned char *)setChannelPlan_param))
res = _FAIL;
- kfree((u8 *)setChannelPlan_param);
+ kfree(setChannelPlan_param);
}
/* do something based on res... */
@@ -1553,15 +1553,15 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
/* return res; */
if (enqueue) {
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1595,15 +1595,15 @@ u8 rtw_dm_in_lps_wk_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1668,15 +1668,15 @@ u8 rtw_dm_ra_mask_wk_cmd(struct adapter *padapter, u8 *psta)
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1719,15 +1719,15 @@ u8 rtw_ps_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ppscmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ppscmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (ppscmd == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ppscmd);
+ kfree(ppscmd);
res = _FAIL;
goto exit;
}
@@ -1791,15 +1791,15 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((unsigned char *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1890,15 +1890,15 @@ u8 rtw_c2h_packet_wk_cmd(struct adapter *padapter, u8 *pbuf, u16 length)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((u8 *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1925,15 +1925,15 @@ u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
if (pdrvextra_cmd_parm == NULL) {
- kfree((u8 *)ph2c);
+ kfree(ph2c);
res = _FAIL;
goto exit;
}
@@ -1967,7 +1967,7 @@ static void c2h_wk_callback(_workitem *work)
/* This C2H event is read, clear it */
c2h_evt_clear(adapter);
} else{
- c2h_evt = (u8 *)rtw_malloc(16);
+ c2h_evt = rtw_malloc(16);
if (c2h_evt != NULL) {
/* This C2H event is not read, read & clear now */
if (rtw_hal_c2h_evt_read(adapter, c2h_evt) != _SUCCESS) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_debug.c b/drivers/staging/rtl8723bs/core/rtw_debug.c
index 3db02e9f27ab..b5dd244fee8f 100644
--- a/drivers/staging/rtl8723bs/core/rtw_debug.c
+++ b/drivers/staging/rtl8723bs/core/rtw_debug.c
@@ -1122,7 +1122,8 @@ int proc_get_rx_ampdu(struct seq_file *m, void *v)
if (pregpriv)
DBG_871X_SEL_NL(m,
- "bAcceptAddbaReq = %d , 0:Reject AP's Add BA req, 1:Accept AP's Add BA req.\n", pmlmeinfo->bAcceptAddbaReq
+ "accept_addba_req = %d , 0:Reject AP's Add BA req, 1:Accept AP's Add BA req.\n",
+ pmlmeinfo->accept_addba_req
);
return 0;
@@ -1146,8 +1147,9 @@ ssize_t proc_set_rx_ampdu(struct file *file, const char __user *buffer, size_t c
sscanf(tmp, "%d ", &mode);
if (pregpriv && mode < 2) {
- pmlmeinfo->bAcceptAddbaReq = mode;
- DBG_871X("pmlmeinfo->bAcceptAddbaReq =%d\n", pmlmeinfo->bAcceptAddbaReq);
+ pmlmeinfo->accept_addba_req = mode;
+ DBG_871X("pmlmeinfo->accept_addba_req =%d\n",
+ pmlmeinfo->accept_addba_req);
if (mode == 0) {
/*tear down Rx AMPDU*/
send_delba(padapter, 0, get_my_bssid(&(pmlmeinfo->network)));/* recipient*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 7b37e085b793..9167900b5f7d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -1189,9 +1189,9 @@ void rtw_macaddr_cfg(struct device *dev, u8 *mac_addr)
(mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) ||
((mac[0] == 0x00) && (mac[1] == 0x00) && (mac[2] == 0x00) &&
(mac[3] == 0x00) && (mac[4] == 0x00) && (mac[5] == 0x00))) {
- if (np &&
- (addr = of_get_property(np, "local-mac-address", &len)) &&
- len == ETH_ALEN) {
+ if (np &&
+ (addr = of_get_property(np, "local-mac-address", &len)) &&
+ len == ETH_ALEN) {
memcpy(mac_addr, addr, ETH_ALEN);
} else {
mac[0] = 0x00;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
index d815a693fa64..e5354cec8dd5 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -590,14 +590,10 @@ u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum NDIS_802_11
u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
{
- u8 bdefaultkey;
- u8 btransmitkey;
sint keyid, res;
struct security_priv *psecuritypriv = &(padapter->securitypriv);
u8 ret = _SUCCESS;
- bdefaultkey = (wep->KeyIndex & 0x40000000) > 0 ? false : true; /* for ??? */
- btransmitkey = (wep->KeyIndex & 0x80000000) > 0 ? true : false; /* for ??? */
keyid = wep->KeyIndex & 0x3fffffff;
if (keyid >= 4) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index cb8a95aabd6c..fe739eb2cf7d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -28,9 +28,6 @@ sint _rtw_init_mlme_priv(struct adapter *padapter)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
sint res = _SUCCESS;
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
- /* memset((u8 *)pmlmepriv, 0, sizeof(struct mlme_priv)); */
-
pmlmepriv->nic_hdl = (u8 *)padapter;
pmlmepriv->pscanned = NULL;
@@ -1817,8 +1814,10 @@ void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf)
* _rtw_join_timeout_handler - Timeout/failure handler for CMD JoinBss
* @adapter: pointer to struct adapter structure
*/
-void _rtw_join_timeout_handler (struct adapter *adapter)
+void _rtw_join_timeout_handler(struct timer_list *t)
{
+ struct adapter *adapter = from_timer(adapter, t,
+ mlmepriv.assoc_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_871X("%s, fw_state =%x\n", __func__, get_fwstate(pmlmepriv));
@@ -1870,8 +1869,10 @@ void _rtw_join_timeout_handler (struct adapter *adapter)
* rtw_scan_timeout_handler - Timeout/Failure handler for CMD SiteSurvey
* @adapter: pointer to struct adapter structure
*/
-void rtw_scan_timeout_handler (struct adapter *adapter)
+void rtw_scan_timeout_handler(struct timer_list *t)
{
+ struct adapter *adapter = from_timer(adapter, t,
+ mlmepriv.scan_to_timer);
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
DBG_871X(FUNC_ADPT_FMT" fw_state =%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
@@ -1934,7 +1935,7 @@ exit:
return;
}
-void rtw_dynamic_check_timer_handlder(struct adapter *adapter)
+void rtw_dynamic_check_timer_handler(struct adapter *adapter)
{
if (!adapter)
return;
@@ -2271,13 +2272,13 @@ sint rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
sint res = _SUCCESS;
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
res = _FAIL; /* try again */
goto exit;
}
- psetauthparm = (struct setauth_parm *)rtw_zmalloc(sizeof(struct setauth_parm));
+ psetauthparm = rtw_zmalloc(sizeof(struct setauth_parm));
if (psetauthparm == NULL) {
kfree((unsigned char *)pcmd);
res = _FAIL;
@@ -2312,7 +2313,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
sint res = _SUCCESS;
- psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
if (psetkeyparm == NULL) {
res = _FAIL;
goto exit;
@@ -2364,7 +2365,7 @@ sint rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, s
if (enqueue) {
- pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd == NULL) {
kfree((unsigned char *)psetkeyparm);
res = _FAIL; /* try again */
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index b6d137f505e1..7d7756e40bcb 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -17,6 +17,7 @@
#include <drv_types.h>
#include <rtw_debug.h>
#include <rtw_wifi_regd.h>
+#include <linux/kernel.h>
static struct mlme_handler mlme_sta_tbl[] = {
@@ -474,15 +475,12 @@ int init_mlme_ext_priv(struct adapter *padapter)
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
- /* memset((u8 *)pmlmeext, 0, sizeof(struct mlme_ext_priv)); */
-
pmlmeext->padapter = padapter;
/* fill_fwpriv(padapter, &(pmlmeext->fwpriv)); */
init_mlme_ext_priv_value(padapter);
- pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
+ pmlmeinfo->accept_addba_req = pregistrypriv->accept_addba_req;
init_mlme_ext_timer(padapter);
@@ -510,7 +508,7 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
if (!padapter)
return;
- if (padapter->bDriverStopped == true) {
+ if (padapter->bDriverStopped) {
del_timer_sync(&pmlmeext->survey_timer);
del_timer_sync(&pmlmeext->link_timer);
/* del_timer_sync(&pmlmeext->ADDBA_timer); */
@@ -562,7 +560,7 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
index = GetFrameSubType(pframe) >> 4;
- if (index >= (sizeof(mlme_sta_tbl) / sizeof(struct mlme_handler))) {
+ if (index >= ARRAY_SIZE(mlme_sta_tbl)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Currently we do not support reserved sub-fr-type =%d\n", index));
return;
}
@@ -582,11 +580,11 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
switch (GetFrameSubType(pframe)) {
case WIFI_AUTH:
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
ptable->func = &OnAuth;
else
ptable->func = &OnAuthClient;
- /* pass through */
+ /* fall through */
case WIFI_ASSOCREQ:
case WIFI_REASSOCREQ:
_mgt_dispatcher(padapter, ptable, precv_frame);
@@ -637,8 +635,8 @@ unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
/* DBG_871X("+OnProbeReq\n"); */
#ifdef CONFIG_AUTO_AP_MODE
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
- pmlmepriv->cur_network.join_res == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) &&
+ pmlmepriv->cur_network.join_res) {
struct sta_info *psta;
u8 *mac_addr, *peer_addr;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -752,7 +750,7 @@ _non_rc_device:
/* check (wildcard) SSID */
if (p != NULL) {
- if (is_valid_p2p_probereq == true)
+ if (is_valid_p2p_probereq)
goto _issue_probersp;
if ((ielen != 0 && false == !memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength))
@@ -761,8 +759,8 @@ _non_rc_device:
return _SUCCESS;
_issue_probersp:
- if (((check_fwstate(pmlmepriv, _FW_LINKED) == true &&
- pmlmepriv->cur_network.join_res == true)) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) &&
+ pmlmepriv->cur_network.join_res) || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
/* DBG_871X("+issue_probersp during ap mode\n"); */
issue_probersp(padapter, get_sa(pframe), is_valid_p2p_probereq);
}
@@ -818,7 +816,7 @@ unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
if (!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
/* we should update current network before auth, or some IE is wrong */
- pbss = (struct wlan_bssid_ex *)rtw_malloc(sizeof(struct wlan_bssid_ex));
+ pbss = rtw_malloc(sizeof(struct wlan_bssid_ex));
if (pbss) {
if (collect_bss_info(padapter, precv_frame, pbss) == _SUCCESS) {
update_network(&(pmlmepriv->cur_network.network), pbss, padapter, true);
@@ -1773,7 +1771,7 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
DBG_871X("%s Reason code(%d)\n", __func__, reason);
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1848,7 +1846,7 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
DBG_871X("%s Reason code(%d)\n", __func__, reason);
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1976,7 +1974,7 @@ unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_fra
/* process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), GetAddr3Ptr(pframe)); */
process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
- if (pmlmeinfo->bAcceptAddbaReq == true) {
+ if (pmlmeinfo->accept_addba_req) {
issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
} else{
issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
@@ -2227,7 +2225,7 @@ unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
category = frame_body[0];
- for (i = 0; i < sizeof(OnAction_tbl)/sizeof(struct action_handler); i++) {
+ for (i = 0; i < ARRAY_SIZE(OnAction_tbl); i++) {
ptable = &OnAction_tbl[i];
if (category == ptable->num)
@@ -2350,8 +2348,8 @@ void update_mgntframe_attrib_addr(struct adapter *padapter, struct xmit_frame *p
void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe)
{
- if (padapter->bSurpriseRemoved == true ||
- padapter->bDriverStopped == true) {
+ if (padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped) {
rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
return;
@@ -2368,8 +2366,8 @@ s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntfr
struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
struct submit_ctx sctx;
- if (padapter->bSurpriseRemoved == true ||
- padapter->bDriverStopped == true) {
+ if (padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped) {
rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
return ret;
@@ -2397,8 +2395,8 @@ s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmg
u32 timeout_ms = 500;/* 500ms */
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- if (padapter->bSurpriseRemoved == true ||
- padapter->bDriverStopped == true) {
+ if (padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped) {
rtw_free_xmitbuf(&padapter->xmitpriv, pmgntframe->pxmitbuf);
rtw_free_xmitframe(&padapter->xmitpriv, pmgntframe);
return -1;
@@ -2833,7 +2831,9 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
}
-static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, u8 ch, bool append_wps, int wait_ack)
+static int _issue_probereq(struct adapter *padapter,
+ struct ndis_802_11_ssid *pssid,
+ u8 *da, u8 ch, bool append_wps, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -3394,7 +3394,7 @@ void issue_assocreq(struct adapter *padapter)
pframe = rtw_set_ie(pframe, EID_WPA2, pIE->Length, pIE->data, &(pattrib->pktlen));
break;
case EID_HTCapability:
- if (padapter->mlmepriv.htpriv.ht_option == true) {
+ if (padapter->mlmepriv.htpriv.ht_option) {
if (!(is_ap_in_tkip(padapter))) {
memcpy(&(pmlmeinfo->HT_caps), pIE->data, sizeof(struct HT_caps_element));
pframe = rtw_set_ie(pframe, EID_HTCapability, pIE->Length, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
@@ -3403,7 +3403,7 @@ void issue_assocreq(struct adapter *padapter)
break;
case EID_EXTCapability:
- if (padapter->mlmepriv.htpriv.ht_option == true)
+ if (padapter->mlmepriv.htpriv.ht_option)
pframe = rtw_set_ie(pframe, EID_EXTCapability, pIE->Length, pIE->data, &(pattrib->pktlen));
break;
default:
@@ -3432,7 +3432,8 @@ exit:
}
/* when wait_ack is ture, this function shoule be called at process context */
-static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
+static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
+ unsigned int power_mode, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -3593,7 +3594,8 @@ s32 issue_nulldata_in_interrupt(struct adapter *padapter, u8 *da)
}
/* when wait_ack is ture, this function shoule be called at process context */
-static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
+static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
+ u16 tid, bool wait_ack)
{
int ret = _FAIL;
struct xmit_frame *pmgntframe;
@@ -3717,7 +3719,8 @@ exit:
return ret;
}
-static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason, u8 wait_ack)
+static int _issue_deauth(struct adapter *padapter, unsigned char *da,
+ unsigned short reason, bool wait_ack)
{
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
@@ -4219,7 +4222,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
if (initiator == 0) {/* recipient */
for (tid = 0; tid < MAXTID; tid++) {
- if (psta->recvreorder_ctrl[tid].enable == true) {
+ if (psta->recvreorder_ctrl[tid].enable) {
DBG_871X("rx agg disable tid(%d)\n", tid);
issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
psta->recvreorder_ctrl[tid].enable = false;
@@ -4408,7 +4411,7 @@ void site_survey(struct adapter *padapter)
Restore_DM_Func_Flag(padapter);
/* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
- if (is_client_associated_to_ap(padapter) == true)
+ if (is_client_associated_to_ap(padapter))
issue_nulldata(padapter, NULL, 0, 3, 500);
val8 = 0; /* survey done */
@@ -5049,12 +5052,12 @@ void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame
pmlmeext = &padapter->mlmeextpriv;
pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5102,12 +5105,12 @@ void report_surveydone_event(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5149,12 +5152,12 @@ void report_join_res(struct adapter *padapter, int res)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5200,12 +5203,12 @@ void report_wmm_edca_update(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct wmm_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5246,13 +5249,13 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL) {
return;
}
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5302,12 +5305,12 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
if (pcmd_obj == NULL)
return;
cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ pevtcmd = rtw_zmalloc(cmdsz);
if (pevtcmd == NULL) {
kfree((u8 *)pcmd_obj);
return;
@@ -5445,7 +5448,7 @@ static void rtw_mlmeext_disconnect(struct adapter *padapter)
pmlmeinfo->state = WIFI_FW_NULL_STATE;
if (state_backup == WIFI_FW_STATION_STATE) {
- if (rtw_port_switch_chk(padapter) == true) {
+ if (rtw_port_switch_chk(padapter)) {
rtw_hal_set_hwreg(padapter, HW_VAR_PORT_SWITCH, NULL);
{
struct adapter *port0_iface = dvobj_get_port0_adapter(adapter_to_dvobj(padapter));
@@ -5534,7 +5537,7 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
rtw_hal_macid_wakeup(padapter, psta->mac_id);
}
- if (rtw_port_switch_chk(padapter) == true)
+ if (rtw_port_switch_chk(padapter))
rtw_hal_set_hwreg(padapter, HW_VAR_PORT_SWITCH, NULL);
join_type = 2;
@@ -5659,7 +5662,7 @@ void _linked_info_dump(struct adapter *padapter)
}
for (i = 0; i < NUM_STA; i++) {
- if (pdvobj->macid[i] == true) {
+ if (pdvobj->macid[i]) {
if (i != 1) /* skip bc/mc sta */
/* tx info ============ */
rtw_hal_get_def_var(padapter, HW_DEF_RA_INFO_DUMP, &i);
@@ -5827,8 +5830,10 @@ void linked_status_chk(struct adapter *padapter)
}
-void survey_timer_hdl(struct adapter *padapter)
+void survey_timer_hdl(struct timer_list *t)
{
+ struct adapter *padapter =
+ from_timer(padapter, t, mlmeextpriv.survey_timer);
struct cmd_obj *ph2c;
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
@@ -5842,7 +5847,7 @@ void survey_timer_hdl(struct adapter *padapter)
pmlmeext->sitesurvey_res.channel_idx++;
}
- if (pmlmeext->scan_abort == true) {
+ if (pmlmeext->scan_abort) {
{
pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
DBG_871X("%s idx:%d\n", __func__
@@ -5853,12 +5858,12 @@ void survey_timer_hdl(struct adapter *padapter)
pmlmeext->scan_abort = false;/* reset */
}
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
goto exit_survey_timer_hdl;
}
- psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
if (psurveyPara == NULL) {
kfree((unsigned char *)ph2c);
goto exit_survey_timer_hdl;
@@ -5874,8 +5879,10 @@ exit_survey_timer_hdl:
return;
}
-void link_timer_hdl(struct adapter *padapter)
+void link_timer_hdl(struct timer_list *t)
{
+ struct adapter *padapter =
+ from_timer(padapter, t, mlmeextpriv.link_timer);
/* static unsigned int rx_pkt = 0; */
/* static u64 tx_cnt = 0; */
/* struct xmit_priv *pxmitpriv = &(padapter->xmitpriv); */
@@ -5924,8 +5931,9 @@ void link_timer_hdl(struct adapter *padapter)
return;
}
-void addba_timer_hdl(struct sta_info *psta)
+void addba_timer_hdl(struct timer_list *t)
{
+ struct sta_info *psta = from_timer(psta, t, addba_retry_timer);
struct ht_priv *phtpriv;
if (!psta)
@@ -5933,20 +5941,22 @@ void addba_timer_hdl(struct sta_info *psta)
phtpriv = &psta->htpriv;
- if ((phtpriv->ht_option == true) && (phtpriv->ampdu_enable == true)) {
+ if (phtpriv->ht_option && phtpriv->ampdu_enable) {
if (phtpriv->candidate_tid_bitmap)
phtpriv->candidate_tid_bitmap = 0x0;
}
}
-void sa_query_timer_hdl(struct adapter *padapter)
+void sa_query_timer_hdl(struct timer_list *t)
{
+ struct adapter *padapter =
+ from_timer(padapter, t, mlmeextpriv.sa_query_timer);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
/* disconnect */
spin_lock_bh(&pmlmepriv->lock);
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
rtw_disassoc_cmd(padapter, 0, true);
rtw_indicate_disconnect(padapter);
rtw_free_assoc_resources(padapter, 1);
@@ -6084,7 +6094,7 @@ u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
rtw_auto_ap_start_beacon(padapter);
#endif
- if (rtw_port_switch_chk(padapter) == true) {
+ if (rtw_port_switch_chk(padapter)) {
rtw_hal_set_hwreg(padapter, HW_VAR_PORT_SWITCH, NULL);
if (psetop->mode == Ndis802_11APMode)
@@ -6358,7 +6368,7 @@ static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_c
set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED)
&& set_idx >= 0
- && rtw_mlme_band_check(padapter, in[i].hw_value) == true
+ && rtw_mlme_band_check(padapter, in[i].hw_value)
) {
if (j >= out_num) {
DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" out_num:%u not enough\n",
@@ -6383,7 +6393,7 @@ static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_c
DBG_871X(FUNC_ADPT_FMT" ch:%u\n", FUNC_ADPT_ARG(padapter), pmlmeext->channel_set[i].ChannelNum);
- if (rtw_mlme_band_check(padapter, pmlmeext->channel_set[i].ChannelNum) == true) {
+ if (rtw_mlme_band_check(padapter, pmlmeext->channel_set[i].ChannelNum)) {
if (j >= out_num) {
DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT" out_num:%u not enough\n",
@@ -6435,7 +6445,7 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode;
/* issue null data if associating to the AP */
- if (is_client_associated_to_ap(padapter) == true) {
+ if (is_client_associated_to_ap(padapter)) {
pmlmeext->sitesurvey_res.state = SCAN_TXNULL;
issue_nulldata(padapter, NULL, 1, 3, 500);
@@ -6602,7 +6612,7 @@ u8 chk_bmc_sleepq_cmd(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
u8 res = _SUCCESS;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
@@ -6626,13 +6636,13 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
int len_diff = 0;
- ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
if (ph2c == NULL) {
res = _FAIL;
goto exit;
}
- ptxBeacon_parm = (struct Tx_Beacon_param *)rtw_zmalloc(sizeof(struct Tx_Beacon_param));
+ ptxBeacon_parm = rtw_zmalloc(sizeof(struct Tx_Beacon_param));
if (ptxBeacon_parm == NULL) {
kfree((unsigned char *)ph2c);
res = _FAIL;
@@ -6767,7 +6777,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
pxmitframe->attrib.triggered = 1;
- if (xmitframe_hiq_filter(pxmitframe) == true)
+ if (xmitframe_hiq_filter(pxmitframe))
pxmitframe->attrib.qsel = 0x11;/* HIQ */
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
@@ -6809,7 +6819,7 @@ int rtw_chk_start_clnt_join(struct adapter *padapter, u8 *ch, u8 *bw, u8 *offset
connect_allow = false;
}
- if (connect_allow == true) {
+ if (connect_allow) {
DBG_871X("start_join_set_ch_bw: ch =%d, bwmode =%d, ch_offset =%d\n", cur_ch, cur_bw, cur_ch_offset);
*ch = cur_ch;
*bw = cur_bw;
diff --git a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
index aabdaafcbdd3..4a6af72013fa 100644
--- a/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8723bs/core/rtw_pwrctrl.c
@@ -34,7 +34,7 @@ void _ips_enter(struct adapter *padapter)
if (rf_off == pwrpriv->change_rfpwrstate) {
pwrpriv->bpower_saving = true;
- DBG_871X_LEVEL(_drv_always_, "nolinked power save enter\n");
+ DBG_871X("nolinked power save enter\n");
if (pwrpriv->ips_mode == IPS_LEVEL_2)
pwrpriv->bkeepfwalive = true;
@@ -73,7 +73,7 @@ int _ips_leave(struct adapter *padapter)
if (result == _SUCCESS) {
pwrpriv->rf_pwrstate = rf_on;
}
- DBG_871X_LEVEL(_drv_always_, "nolinked power save leave\n");
+ DBG_871X("nolinked power save leave\n");
DBG_871X("==> ips_leave.....LED(0x%08x)...\n", rtw_read32(padapter, 0x4c));
pwrpriv->bips_processing = false;
@@ -201,10 +201,12 @@ exit:
return;
}
-void pwr_state_check_handler(RTW_TIMER_HDL_ARGS);
-void pwr_state_check_handler(RTW_TIMER_HDL_ARGS)
+static void pwr_state_check_handler(struct timer_list *t)
{
- struct adapter *padapter = (struct adapter *)FunctionContext;
+ struct pwrctrl_priv *pwrctrlpriv =
+ from_timer(pwrctrlpriv, t, pwr_state_check_timer);
+ struct adapter *padapter = pwrctrlpriv->adapter;
+
rtw_ps_cmd(padapter);
}
@@ -823,14 +825,10 @@ exit:
/*
* This function is a timer handler, can't do any IO in it.
*/
-static void pwr_rpwm_timeout_handler(void *FunctionContext)
+static void pwr_rpwm_timeout_handler(struct timer_list *t)
{
- struct adapter *padapter;
- struct pwrctrl_priv *pwrpriv;
-
+ struct pwrctrl_priv *pwrpriv = from_timer(pwrpriv, t, pwr_rpwm_timer);
- padapter = FunctionContext;
- pwrpriv = adapter_to_pwrctl(padapter);
DBG_871X("+%s: rpwm = 0x%02X cpwm = 0x%02X\n", __func__, pwrpriv->rpwm, pwrpriv->cpwm);
if ((pwrpriv->rpwm == pwrpriv->cpwm) || (pwrpriv->cpwm >= PS_STATE_S2)) {
@@ -1154,7 +1152,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->LpsIdleCount = 0;
pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?true:false;
+ pwrctrlpriv->bLeisurePs = pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
pwrctrlpriv->bFwCurrentInPSMode = false;
@@ -1173,10 +1171,11 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
_init_workitem(&pwrctrlpriv->cpwm_event, cpwm_event_callback, NULL);
pwrctrlpriv->brpwmtimeout = false;
+ pwrctrlpriv->adapter = padapter;
_init_workitem(&pwrctrlpriv->rpwmtimeoutwi, rpwmtimeout_workitem_callback, NULL);
- _init_timer(&pwrctrlpriv->pwr_rpwm_timer, padapter->pnetdev, pwr_rpwm_timeout_handler, padapter);
-
- rtw_init_timer(&pwrctrlpriv->pwr_state_check_timer, padapter, pwr_state_check_handler);
+ timer_setup(&pwrctrlpriv->pwr_rpwm_timer, pwr_rpwm_timeout_handler, 0);
+ timer_setup(&pwrctrlpriv->pwr_state_check_timer,
+ pwr_state_check_handler, 0);
pwrctrlpriv->wowlan_mode = false;
pwrctrlpriv->wowlan_ap_mode = false;
@@ -1193,8 +1192,6 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
void rtw_free_pwrctrl_priv(struct adapter *adapter)
{
- /* memset((unsigned char *)pwrctrlpriv, 0, sizeof(struct pwrctrl_priv)); */
-
#ifdef CONFIG_PNO_SUPPORT
if (pwrctrlpriv->pnlo_info != NULL)
printk("****** pnlo_info memory leak********\n");
@@ -1327,7 +1324,8 @@ int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
pwrctrlpriv->LpsIdleCount = 2;
pwrctrlpriv->power_mgnt = mode;
- pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt)?true:false;
+ pwrctrlpriv->bLeisurePs =
+ pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE;
}
} else
ret = -EINVAL;
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 68a6303e2754..9c7c3be0553a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -26,7 +26,7 @@ u8 rtw_rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
u8 rtw_bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
-void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
+static void rtw_signal_stat_timer_hdl(struct timer_list *t);
void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
{
@@ -46,9 +46,6 @@ sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
union recv_frame *precvframe;
sint res = _SUCCESS;
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
- /* memset((unsigned char *)precvpriv, 0, sizeof (struct recv_priv)); */
-
spin_lock_init(&precvpriv->lock);
_rtw_init_queue(&precvpriv->free_recv_queue);
@@ -65,7 +62,6 @@ sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
res = _FAIL;
goto exit;
}
- /* memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ); */
precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
/* precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf + RXFRAME_ALIGN_SZ - */
@@ -90,7 +86,8 @@ sint _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
res = rtw_hal_init_recv_priv(padapter);
- rtw_init_timer(&precvpriv->signal_stat_timer, padapter, rtw_signal_stat_timer_hdl);
+ timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl,
+ 0);
precvpriv->signal_stat_sampling_interval = 2000; /* ms */
@@ -129,7 +126,7 @@ union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
plist = get_next(phead);
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ precvframe = (union recv_frame *)plist;
list_del_init(&precvframe->u.hdr.list);
padapter = precvframe->u.hdr.adapter;
@@ -243,7 +240,7 @@ void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfre
plist = get_next(phead);
while (phead != plist) {
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ precvframe = (union recv_frame *)plist;
plist = get_next(plist);
@@ -1732,7 +1729,7 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
phead = get_list_head(defrag_q);
plist = get_next(phead);
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = (union recv_frame *)plist;
pfhdr = &prframe->u.hdr;
list_del_init(&(prframe->u.list));
@@ -1754,7 +1751,7 @@ static union recv_frame *recvframe_defrag(struct adapter *adapter,
data = get_recvframe_data(prframe);
while (phead != plist) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = (union recv_frame *)plist;
pnfhdr = &pnextrframe->u.hdr;
@@ -2071,7 +2068,7 @@ int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union rec
plist = get_next(phead);
while (phead != plist) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = (union recv_frame *)plist;
pnextattrib = &pnextrframe->u.hdr.attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
@@ -2146,7 +2143,7 @@ int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctr
return true;
}
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = (union recv_frame *)plist;
pattrib = &prframe->u.hdr.attrib;
#ifdef DBG_RX_SEQ
@@ -2162,7 +2159,7 @@ int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctr
/* Check if there is any packet need indicate. */
while (!list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = (union recv_frame *)plist;
pattrib = &prframe->u.hdr.attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
@@ -2358,9 +2355,10 @@ _err_exit:
}
-void rtw_reordering_ctrl_timeout_handler(void *pcontext)
+void rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
{
- struct recv_reorder_ctrl *preorder_ctrl = pcontext;
+ struct recv_reorder_ctrl *preorder_ctrl =
+ from_timer(preorder_ctrl, t, reordering_ctrl_timer);
struct adapter *padapter = preorder_ctrl->padapter;
struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
@@ -2601,9 +2599,10 @@ _recv_entry_drop:
return ret;
}
-void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS)
+static void rtw_signal_stat_timer_hdl(struct timer_list *t)
{
- struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct adapter *adapter =
+ from_timer(adapter, t, recvpriv.signal_stat_timer);
struct recv_priv *recvpriv = &adapter->recvpriv;
u32 tmp_s, tmp_q;
diff --git a/drivers/staging/rtl8723bs/core/rtw_rf.c b/drivers/staging/rtl8723bs/core/rtw_rf.c
index b87ea4e388c0..07f5577cc073 100644
--- a/drivers/staging/rtl8723bs/core/rtw_rf.c
+++ b/drivers/staging/rtl8723bs/core/rtw_rf.c
@@ -15,6 +15,7 @@
#define _RTW_RF_C_
#include <drv_types.h>
+#include <linux/kernel.h>
struct ch_freq {
@@ -44,20 +45,18 @@ static struct ch_freq ch_freq_map[] = {
{216, 5080},/* Japan, means J16 */
};
-static int ch_freq_map_num = (sizeof(ch_freq_map) / sizeof(struct ch_freq));
-
u32 rtw_ch2freq(u32 channel)
{
u8 i;
u32 freq = 0;
- for (i = 0; i < ch_freq_map_num; i++) {
+ for (i = 0; i < ARRAY_SIZE(ch_freq_map); i++) {
if (channel == ch_freq_map[i].channel) {
freq = ch_freq_map[i].frequency;
break;
}
}
- if (i == ch_freq_map_num)
+ if (i == ARRAY_SIZE(ch_freq_map))
freq = 2412;
return freq;
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index 06a7e4059fbb..aadf67bd0559 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -2272,7 +2272,7 @@ static void *aes_encrypt_init(u8 *key, size_t len)
u32 *rk;
if (len != 16)
return NULL;
- rk = (u32 *)rtw_malloc(AES_PRIV_SIZE);
+ rk = rtw_malloc(AES_PRIV_SIZE);
if (rk == NULL)
return NULL;
rijndaelKeySetupEnc(rk, key);
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index cb43ec90a648..03dd6848daa1 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -429,7 +429,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
plist = get_next(phead);
while (!list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = (union recv_frame *)plist;
plist = get_next(plist);
@@ -604,10 +604,10 @@ struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
{
- u8 res = true;
+ bool res = true;
struct list_head *plist, *phead;
struct rtw_wlan_acl_node *paclnode;
- u8 match = false;
+ bool match = false;
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
@@ -630,10 +630,10 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
if (pacl_list->mode == 1) /* accept unless in deny list */
- res = (match == true) ? false:true;
+ res = !match;
else if (pacl_list->mode == 2)/* deny unless in accept list */
- res = (match == true) ? true:false;
+ res = match;
else
res = true;
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index f485f541e36d..f6dc26c8bd3d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -1359,7 +1359,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
return true;
}
- bssid = (struct wlan_bssid_ex *)rtw_zmalloc(sizeof(struct wlan_bssid_ex));
+ bssid = rtw_zmalloc(sizeof(struct wlan_bssid_ex));
if (bssid == NULL) {
DBG_871X("%s rtw_zmalloc fail !!!\n", __func__);
return true;
@@ -1946,7 +1946,7 @@ void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
preorder_ctrl->indicate_seq = 0xffff;
#endif
- preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq == true) ? true : false;
+ preorder_ctrl->enable = pmlmeinfo->accept_addba_req;
}
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 022f654419e4..be54186fb223 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -51,9 +51,6 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
struct xmit_frame *pxframe;
sint res = _SUCCESS;
- /* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
- /* memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); */
-
spin_lock_init(&pxmitpriv->lock);
spin_lock_init(&pxmitpriv->lock_sctx);
sema_init(&pxmitpriv->xmit_sema, 0);
@@ -2166,7 +2163,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
pxmitpriv->hwxmits = NULL;
- pxmitpriv->hwxmits = (struct hw_xmit *)rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
+ pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
if (pxmitpriv->hwxmits == NULL) {
DBG_871X("alloc hwxmits fail!...\n");
diff --git a/drivers/staging/rtl8723bs/hal/Hal8723BReg.h b/drivers/staging/rtl8723bs/hal/Hal8723BReg.h
index 152a198c8f17..ce02457922b7 100644
--- a/drivers/staging/rtl8723bs/hal/Hal8723BReg.h
+++ b/drivers/staging/rtl8723bs/hal/Hal8723BReg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*****************************************************************************
*Copyright(c) 2009, RealTEK Technology Inc. All Right Reserved.
*
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
index 51d4219177d3..951585467ab1 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_BB.c
@@ -13,7 +13,7 @@
*
******************************************************************************/
-
+#include <linux/kernel.h>
#include "odm_precomp.h"
static bool CheckPositive(
@@ -268,7 +268,7 @@ static u32 Array_MP_8723B_AGC_TAB[] = {
void ODM_ReadAndConfig_MP_8723B_AGC_TAB(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_AGC_TAB)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_AGC_TAB);
u32 *Array = Array_MP_8723B_AGC_TAB;
ODM_RT_TRACE(
@@ -537,7 +537,7 @@ static u32 Array_MP_8723B_PHY_REG[] = {
void ODM_ReadAndConfig_MP_8723B_PHY_REG(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_PHY_REG)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_PHY_REG);
u32 *Array = Array_MP_8723B_PHY_REG;
ODM_RT_TRACE(
@@ -617,7 +617,6 @@ static u32 Array_MP_8723B_PHY_REG_PG[] = {
void ODM_ReadAndConfig_MP_8723B_PHY_REG_PG(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_PHY_REG_PG)/sizeof(u32);
u32 *Array = Array_MP_8723B_PHY_REG_PG;
ODM_RT_TRACE(
@@ -630,7 +629,7 @@ void ODM_ReadAndConfig_MP_8723B_PHY_REG_PG(PDM_ODM_T pDM_Odm)
pDM_Odm->PhyRegPgVersion = 1;
pDM_Odm->PhyRegPgValueType = PHY_REG_PG_EXACT_VALUE;
- for (i = 0; i < ArrayLen; i += 6) {
+ for (i = 0; i < ARRAY_SIZE(Array_MP_8723B_PHY_REG_PG); i += 6) {
u32 v1 = Array[i];
u32 v2 = Array[i+1];
u32 v3 = Array[i+2];
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
index b868e26f20ac..7f8afa1be1ca 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_MAC.c
@@ -13,7 +13,7 @@
*
******************************************************************************/
-
+#include <linux/kernel.h>
#include "odm_precomp.h"
static bool CheckPositive(
@@ -239,7 +239,7 @@ static u32 Array_MP_8723B_MAC_REG[] = {
void ODM_ReadAndConfig_MP_8723B_MAC_REG(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_MAC_REG)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_MAC_REG);
u32 *Array = Array_MP_8723B_MAC_REG;
ODM_RT_TRACE(
diff --git a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
index 84a0be7ba697..fadfcbd91858 100644
--- a/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
+++ b/drivers/staging/rtl8723bs/hal/HalHWImg8723B_RF.c
@@ -13,7 +13,7 @@
*
******************************************************************************/
-
+#include <linux/kernel.h>
#include "odm_precomp.h"
static bool CheckPositive(
@@ -270,7 +270,7 @@ static u32 Array_MP_8723B_RadioA[] = {
void ODM_ReadAndConfig_MP_8723B_RadioA(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_RadioA)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_MP_8723B_RadioA);
u32 *Array = Array_MP_8723B_RadioA;
ODM_RT_TRACE(
@@ -766,7 +766,6 @@ static u8 *Array_MP_8723B_TXPWR_LMT[] = {
void ODM_ReadAndConfig_MP_8723B_TXPWR_LMT(PDM_ODM_T pDM_Odm)
{
u32 i = 0;
- u32 ArrayLen = sizeof(Array_MP_8723B_TXPWR_LMT)/sizeof(u8 *);
u8 **Array = Array_MP_8723B_TXPWR_LMT;
ODM_RT_TRACE(
@@ -776,7 +775,7 @@ void ODM_ReadAndConfig_MP_8723B_TXPWR_LMT(PDM_ODM_T pDM_Odm)
("===> ODM_ReadAndConfig_MP_8723B_TXPWR_LMT\n")
);
- for (i = 0; i < ArrayLen; i += 7) {
+ for (i = 0; i < ARRAY_SIZE(Array_MP_8723B_TXPWR_LMT); i += 7) {
u8 *regulation = Array[i];
u8 *band = Array[i+1];
u8 *bandwidth = Array[i+2];
diff --git a/drivers/staging/rtl8723bs/hal/hal_btcoex.c b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
index 86fee109e42d..7d4df5a8832e 100644
--- a/drivers/staging/rtl8723bs/hal/hal_btcoex.c
+++ b/drivers/staging/rtl8723bs/hal/hal_btcoex.c
@@ -463,7 +463,7 @@ static u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
break;
case BTC_GET_BL_WIFI_UNDER_5G:
- *pu8 = (pHalData->CurrentBandType == 1) ? true : false;
+ *pu8 = pHalData->CurrentBandType == 1;
break;
case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
@@ -1411,15 +1411,8 @@ void hal_btcoex_SetSingleAntPath(struct adapter *padapter, u8 singleAntPath)
u8 hal_btcoex_Initialize(struct adapter *padapter)
{
- u8 ret1;
- u8 ret2;
-
-
memset(&GLBtCoexist, 0, sizeof(GLBtCoexist));
- ret1 = EXhalbtcoutsrc_InitlizeVariables((void *)padapter);
- ret2 = (ret1 == true) ? true : false;
-
- return ret2;
+ return EXhalbtcoutsrc_InitlizeVariables((void *)padapter);
}
void hal_btcoex_PowerOnSetting(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/hal/hal_com.c b/drivers/staging/rtl8723bs/hal/hal_com.c
index 3e63b6d9c097..dec887a5b338 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com.c
@@ -14,6 +14,7 @@
******************************************************************************/
#define _HAL_COM_C_
+#include <linux/kernel.h>
#include <drv_types.h>
#include <rtw_debug.h>
#include "hal_com_h2c.h"
@@ -1622,7 +1623,7 @@ void rtw_get_raw_rssi_info(void *sel, struct adapter *padapter)
psample_pkt_rssi->pwdball, psample_pkt_rssi->pwr_all
);
- isCCKrate = (psample_pkt_rssi->data_rate <= DESC_RATE11M) ? true : false;
+ isCCKrate = psample_pkt_rssi->data_rate <= DESC_RATE11M;
if (isCCKrate)
psample_pkt_rssi->mimo_singal_strength[0] = psample_pkt_rssi->pwdball;
@@ -1655,7 +1656,7 @@ void rtw_dump_raw_rssi_info(struct adapter *padapter)
DBG_871X("RxRate = %s, PWDBALL = %d(%%), rx_pwr_all = %d(dBm)\n",
HDATA_RATE(psample_pkt_rssi->data_rate), psample_pkt_rssi->pwdball, psample_pkt_rssi->pwr_all);
- isCCKrate = (psample_pkt_rssi->data_rate <= DESC_RATE11M) ? true : false;
+ isCCKrate = psample_pkt_rssi->data_rate <= DESC_RATE11M;
if (isCCKrate)
psample_pkt_rssi->mimo_singal_strength[0] = psample_pkt_rssi->pwdball;
@@ -1683,7 +1684,7 @@ void rtw_store_phy_info(struct adapter *padapter, union recv_frame *prframe)
struct rx_raw_rssi *psample_pkt_rssi = &padapter->recvpriv.raw_rssi_info;
psample_pkt_rssi->data_rate = pattrib->data_rate;
- isCCKrate = (pattrib->data_rate <= DESC_RATE11M) ? true : false;
+ isCCKrate = pattrib->data_rate <= DESC_RATE11M;
psample_pkt_rssi->pwdball = pPhyInfo->RxPWDBAll;
psample_pkt_rssi->pwr_all = pPhyInfo->RecvSignalPower;
@@ -1716,7 +1717,6 @@ void rtw_bb_rf_gain_offset(struct adapter *padapter)
{
u8 value = padapter->eeprompriv.EEPROMRFGainOffset;
u32 res, i = 0;
- u32 ArrayLen = sizeof(Array_kfreemap)/sizeof(u32);
u32 *Array = Array_kfreemap;
u32 v1 = 0, v2 = 0, target = 0;
/* DBG_871X("+%s value: 0x%02x+\n", __func__, value); */
@@ -1729,7 +1729,7 @@ void rtw_bb_rf_gain_offset(struct adapter *padapter)
res &= 0xfff87fff;
DBG_871X("Offset RF Gain. before reg 0x7f = 0x%08x\n", res);
/* res &= 0xfff87fff; */
- for (i = 0; i < ArrayLen; i += 2) {
+ for (i = 0; i < ARRAY_SIZE(Array_kfreemap); i += 2) {
v1 = Array[i];
v2 = Array[i+1];
if (v1 == padapter->eeprompriv.EEPROMRFGainVal) {
diff --git a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
index 566b6f0997da..e6787c22e00b 100644
--- a/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
+++ b/drivers/staging/rtl8723bs/hal/hal_com_phycfg.c
@@ -17,6 +17,7 @@
#include <drv_types.h>
#include <rtw_debug.h>
#include <hal_data.h>
+#include <linux/kernel.h>
u8 PHY_GetTxPowerByRateBase(struct adapter *Adapter, u8 Band, u8 RfPath,
u8 TxNum, enum RATE_SECTION RateSection)
@@ -860,7 +861,7 @@ struct adapter *padapter
for (txNum = RF_1TX; txNum < RF_MAX_TX_NUM; ++txNum) {
/* CCK */
base = PHY_GetTxPowerByRate(padapter, band, path, txNum, MGN_11M);
- for (i = 0; i < sizeof(cckRates); ++i) {
+ for (i = 0; i < ARRAY_SIZE(cckRates); ++i) {
value = PHY_GetTxPowerByRate(padapter, band, path, txNum, cckRates[i]);
PHY_SetTxPowerByRate(padapter, band, path, txNum, cckRates[i], value - base);
}
@@ -939,58 +940,78 @@ void PHY_SetTxPowerIndexByRateSection(
if (RateSection == CCK) {
u8 cckRates[] = {MGN_1M, MGN_2M, MGN_5_5M, MGN_11M};
if (pHalData->CurrentBandType == BAND_ON_2_4G)
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- cckRates, sizeof(cckRates)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, cckRates,
+ ARRAY_SIZE(cckRates));
} else if (RateSection == OFDM) {
u8 ofdmRates[] = {MGN_6M, MGN_9M, MGN_12M, MGN_18M, MGN_24M, MGN_36M, MGN_48M, MGN_54M};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- ofdmRates, sizeof(ofdmRates)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, ofdmRates,
+ ARRAY_SIZE(ofdmRates));
} else if (RateSection == HT_MCS0_MCS7) {
u8 htRates1T[] = {MGN_MCS0, MGN_MCS1, MGN_MCS2, MGN_MCS3, MGN_MCS4, MGN_MCS5, MGN_MCS6, MGN_MCS7};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- htRates1T, sizeof(htRates1T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, htRates1T,
+ ARRAY_SIZE(htRates1T));
} else if (RateSection == HT_MCS8_MCS15) {
u8 htRates2T[] = {MGN_MCS8, MGN_MCS9, MGN_MCS10, MGN_MCS11, MGN_MCS12, MGN_MCS13, MGN_MCS14, MGN_MCS15};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- htRates2T, sizeof(htRates2T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, htRates2T,
+ ARRAY_SIZE(htRates2T));
} else if (RateSection == HT_MCS16_MCS23) {
u8 htRates3T[] = {MGN_MCS16, MGN_MCS17, MGN_MCS18, MGN_MCS19, MGN_MCS20, MGN_MCS21, MGN_MCS22, MGN_MCS23};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- htRates3T, sizeof(htRates3T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, htRates3T,
+ ARRAY_SIZE(htRates3T));
} else if (RateSection == HT_MCS24_MCS31) {
u8 htRates4T[] = {MGN_MCS24, MGN_MCS25, MGN_MCS26, MGN_MCS27, MGN_MCS28, MGN_MCS29, MGN_MCS30, MGN_MCS31};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- htRates4T, sizeof(htRates4T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, htRates4T,
+ ARRAY_SIZE(htRates4T));
} else if (RateSection == VHT_1SSMCS0_1SSMCS9) {
u8 vhtRates1T[] = {MGN_VHT1SS_MCS0, MGN_VHT1SS_MCS1, MGN_VHT1SS_MCS2, MGN_VHT1SS_MCS3, MGN_VHT1SS_MCS4,
MGN_VHT1SS_MCS5, MGN_VHT1SS_MCS6, MGN_VHT1SS_MCS7, MGN_VHT1SS_MCS8, MGN_VHT1SS_MCS9};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- vhtRates1T, sizeof(vhtRates1T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, vhtRates1T,
+ ARRAY_SIZE(vhtRates1T));
} else if (RateSection == VHT_2SSMCS0_2SSMCS9) {
u8 vhtRates2T[] = {MGN_VHT2SS_MCS0, MGN_VHT2SS_MCS1, MGN_VHT2SS_MCS2, MGN_VHT2SS_MCS3, MGN_VHT2SS_MCS4,
MGN_VHT2SS_MCS5, MGN_VHT2SS_MCS6, MGN_VHT2SS_MCS7, MGN_VHT2SS_MCS8, MGN_VHT2SS_MCS9};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- vhtRates2T, sizeof(vhtRates2T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, vhtRates2T,
+ ARRAY_SIZE(vhtRates2T));
} else if (RateSection == VHT_3SSMCS0_3SSMCS9) {
u8 vhtRates3T[] = {MGN_VHT3SS_MCS0, MGN_VHT3SS_MCS1, MGN_VHT3SS_MCS2, MGN_VHT3SS_MCS3, MGN_VHT3SS_MCS4,
MGN_VHT3SS_MCS5, MGN_VHT3SS_MCS6, MGN_VHT3SS_MCS7, MGN_VHT3SS_MCS8, MGN_VHT3SS_MCS9};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- vhtRates3T, sizeof(vhtRates3T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, vhtRates3T,
+ ARRAY_SIZE(vhtRates3T));
} else if (RateSection == VHT_4SSMCS0_4SSMCS9) {
u8 vhtRates4T[] = {MGN_VHT4SS_MCS0, MGN_VHT4SS_MCS1, MGN_VHT4SS_MCS2, MGN_VHT4SS_MCS3, MGN_VHT4SS_MCS4,
MGN_VHT4SS_MCS5, MGN_VHT4SS_MCS6, MGN_VHT4SS_MCS7, MGN_VHT4SS_MCS8, MGN_VHT4SS_MCS9};
- PHY_SetTxPowerIndexByRateArray(padapter, RFPath, pHalData->CurrentChannelBW, Channel,
- vhtRates4T, sizeof(vhtRates4T)/sizeof(u8));
+ PHY_SetTxPowerIndexByRateArray(padapter, RFPath,
+ pHalData->CurrentChannelBW,
+ Channel, vhtRates4T,
+ ARRAY_SIZE(vhtRates4T));
} else
DBG_871X("Invalid RateSection %d in %s", RateSection, __func__);
}
@@ -1012,7 +1033,7 @@ static bool phy_GetChnlIndex(u8 Channel, u8 *ChannelIdx)
} else {
bIn24G = false;
- for (i = 0; i < sizeof(channel5G)/sizeof(u8); ++i) {
+ for (i = 0; i < ARRAY_SIZE(channel5G); ++i) {
if (channel5G[i] == Channel) {
*ChannelIdx = i;
return bIn24G;
@@ -1149,7 +1170,7 @@ u8 PHY_GetTxPowerIndexBase(
} else if (BandWidth == CHANNEL_WIDTH_80) { /* BW80-1S, BW80-2S */
/* <20121220, Kordan> Get the index of array "Index5G_BW80_Base". */
u8 channel5G_80M[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
- for (i = 0; i < sizeof(channel5G_80M)/sizeof(u8); ++i)
+ for (i = 0; i < ARRAY_SIZE(channel5G_80M); ++i)
if (channel5G_80M[i] == Channel)
chnlIdx = i;
@@ -1588,7 +1609,7 @@ static s8 phy_GetChannelIndexOfTxPowerLimit(u8 Band, u8 Channel)
if (Band == BAND_ON_2_4G)
channelIndex = Channel - 1;
else if (Band == BAND_ON_5G) {
- for (i = 0; i < sizeof(channel5G)/sizeof(u8); ++i) {
+ for (i = 0; i < ARRAY_SIZE(channel5G); ++i) {
if (channel5G[i] == Channel)
channelIndex = i;
}
diff --git a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
index 9cde6c66235b..71853e6f7106 100644
--- a/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
+++ b/drivers/staging/rtl8723bs/hal/odm_CfoTracking.c
@@ -23,7 +23,7 @@ static void odm_SetCrystalCap(void *pDM_VOID, u8 CrystalCap)
struct adapter *Adapter = pDM_Odm->Adapter;
struct hal_com_data *pHalData = GET_HAL_DATA(Adapter);
- bEEPROMCheck = (pHalData->EEPROMVersion >= 0x01) ? true : false;
+ bEEPROMCheck = pHalData->EEPROMVersion >= 0x01;
if (pCfoTrack->CrystalCap == CrystalCap)
return;
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index 0bde9444471d..f02eb63a45ce 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -553,7 +553,7 @@ void odm_DIG(void *pDM_VOID)
dm_dig_min = DM_DIG_MIN_NIC;
DIG_MaxOfMin = DM_DIG_MAX_AP;
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Absolutly upper bound = 0x%x, lower bound = 0x%x\n", dm_dig_max, dm_dig_min));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Absolutely upper bound = 0x%x, lower bound = 0x%x\n", dm_dig_max, dm_dig_min));
/* 1 Adjust boundary by RSSI */
if (pDM_Odm->bLinked && bPerformance) {
diff --git a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
index ba2700135b60..8dd6da8a4e26 100644
--- a/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723bs/hal/odm_HWConfig.c
@@ -106,7 +106,7 @@ static void odm_RxPhyStatus92CSeries_Parsing(
u8 LNA_idx, VGA_idx;
PPHY_STATUS_RPT_8192CD_T pPhyStaRpt = (PPHY_STATUS_RPT_8192CD_T)pPhyStatus;
- isCCKrate = (pPktinfo->DataRate <= DESC_RATE11M) ? true : false;
+ isCCKrate = pPktinfo->DataRate <= DESC_RATE11M;
pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = -1;
pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 1565f2d67ea4..d6cef9e8378d 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -891,7 +891,7 @@ static void hal_ReadEFuse_WiFi(
return;
}
- efuseTbl = (u8 *)rtw_malloc(EFUSE_MAX_MAP_LEN);
+ efuseTbl = rtw_malloc(EFUSE_MAX_MAP_LEN);
if (efuseTbl == NULL) {
DBG_8192C("%s: alloc efuseTbl fail!\n", __func__);
return;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index d0b317077511..6281dfa1a3ca 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -113,7 +113,7 @@ query_free_page:
RT_TRACE(
_module_hal_xmit_c_,
_drv_notice_,
- ("%s: bSurpriseRemoved(wirte port)\n", __func__)
+ ("%s: bSurpriseRemoved(write port)\n", __func__)
);
goto free_xmitbuf;
}
diff --git a/drivers/staging/rtl8723bs/hal/sdio_halinit.c b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
index 6dfb06a49d41..1af77add6af4 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_halinit.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_halinit.c
@@ -1019,8 +1019,8 @@ static u32 rtl8723bs_hal_init(struct adapter *padapter)
rtw_btcoex_IQKNotify(padapter, true);
- restore_iqk_rst = (pwrpriv->bips_processing == true) ? true : false;
- b2Ant = pHalData->EEPROMBluetoothAntNum == Ant_x2 ? true : false;
+ restore_iqk_rst = pwrpriv->bips_processing;
+ b2Ant = pHalData->EEPROMBluetoothAntNum == Ant_x2;
PHY_IQCalibrate_8723B(padapter, false, restore_iqk_rst, b2Ant, pHalData->ant_path);
pHalData->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
diff --git a/drivers/staging/rtl8723bs/hal/sdio_ops.c b/drivers/staging/rtl8723bs/hal/sdio_ops.c
index 1d1b14dedd35..9a4c24861947 100644
--- a/drivers/staging/rtl8723bs/hal/sdio_ops.c
+++ b/drivers/staging/rtl8723bs/hal/sdio_ops.c
@@ -218,7 +218,7 @@ static u32 sdio_read32(struct intf_hdl *pintfhdl, u32 addr)
} else {
u8 *ptmpbuf;
- ptmpbuf = (u8 *)rtw_malloc(8);
+ ptmpbuf = rtw_malloc(8);
if (NULL == ptmpbuf) {
DBG_8192C(KERN_ERR "%s: Allocate memory FAIL!(size =8) addr = 0x%x\n", __func__, addr);
return SDIO_ERR_VAL32;
@@ -594,7 +594,7 @@ static s32 _sdio_local_read(
}
n = RND4(cnt);
- ptmpbuf = (u8 *)rtw_malloc(n);
+ ptmpbuf = rtw_malloc(n);
if (!ptmpbuf)
return (-1);
@@ -637,7 +637,7 @@ s32 sdio_local_read(
}
n = RND4(cnt);
- ptmpbuf = (u8 *)rtw_malloc(n);
+ ptmpbuf = rtw_malloc(n);
if (!ptmpbuf)
return (-1);
@@ -684,7 +684,7 @@ s32 sdio_local_write(
return err;
}
- ptmpbuf = (u8 *)rtw_malloc(cnt);
+ ptmpbuf = rtw_malloc(cnt);
if (!ptmpbuf)
return (-1);
@@ -1108,7 +1108,7 @@ void sd_int_dpc(struct adapter *padapter)
struct c2h_evt_hdr_88xx *c2h_evt;
DBG_8192C("%s: C2H Command\n", __func__);
- c2h_evt = (struct c2h_evt_hdr_88xx *)rtw_zmalloc(16);
+ c2h_evt = rtw_zmalloc(16);
if (c2h_evt != NULL) {
if (rtw_hal_c2h_evt_read(padapter, (u8 *)c2h_evt) == _SUCCESS) {
if (c2h_id_filter_ccx_8723b((u8 *)c2h_evt)) {
diff --git a/drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h b/drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h
index 796449c3f430..130a94879805 100644
--- a/drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h
+++ b/drivers/staging/rtl8723bs/include/Hal8723BPwrSeq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef REALTEK_POWER_SEQUENCE_8723B
#define REALTEK_POWER_SEQUENCE_8723B
diff --git a/drivers/staging/rtl8723bs/include/drv_types.h b/drivers/staging/rtl8723bs/include/drv_types.h
index 4d14fbc5a1fe..32129ac8e169 100644
--- a/drivers/staging/rtl8723bs/include/drv_types.h
+++ b/drivers/staging/rtl8723bs/include/drv_types.h
@@ -177,7 +177,8 @@ struct registry_priv
u8 bt_ampdu;
s8 ant_num;
- bool bAcceptAddbaReq;
+ /* false:Reject AP's Add BA req, true:accept AP's Add BA req */
+ bool accept_addba_req;
u8 antdiv_cfg;
u8 antdiv_type;
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index ac9ffe0e3b84..e62ed71e1d80 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -96,8 +96,8 @@ typedef enum mstat_status{
#define rtw_mstat_update(flag, status, sz) do {} while (0)
#define rtw_mstat_dump(sel) do {} while (0)
-u8*_rtw_zmalloc(u32 sz);
-u8*_rtw_malloc(u32 sz);
+void *_rtw_zmalloc(u32 sz);
+void *_rtw_malloc(u32 sz);
void _kfree(u8 *pbuf, u32 sz);
struct sk_buff *_rtw_skb_alloc(u32 sz);
@@ -118,8 +118,6 @@ int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb);
extern void _rtw_init_queue(struct __queue *pqueue);
-extern void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc);
-
static __inline void thread_enter(char *name)
{
allow_signal(SIGTERM);
diff --git a/drivers/staging/rtl8723bs/include/osdep_service_linux.h b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
index 0c9b4f622fee..711863d74a01 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service_linux.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service_linux.h
@@ -86,17 +86,7 @@ __inline static struct list_head *get_list_head(struct __queue *queue)
#define LIST_CONTAINOR(ptr, type, member) \
- ((type *)((char *)(ptr)-(__kernel_size_t)(&((type *)0)->member)))
-
-#define RTW_TIMER_HDL_ARGS void *FunctionContext
-
-__inline static void _init_timer(_timer *ptimer, _nic_hdl nic_hdl, void *pfunc, void* cntx)
-{
- /* setup_timer(ptimer, pfunc, (u32)cntx); */
- ptimer->function = pfunc;
- ptimer->data = (unsigned long)cntx;
- init_timer(ptimer);
-}
+ container_of(ptr, type, member)
__inline static void _set_timer(_timer *ptimer, u32 delay_time)
{
@@ -109,7 +99,6 @@ __inline static void _cancel_timer(_timer *ptimer, u8 *bcancelled)
*bcancelled = true;/* true == 1; false == 0 */
}
-
__inline static void _init_workitem(_workitem *pwork, void *pfunc, void *cntx)
{
INIT_WORK(pwork, pfunc);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme.h b/drivers/staging/rtl8723bs/include/rtw_mlme.h
index d88ef67ce8d6..00b3d92c9f51 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme.h
@@ -518,8 +518,8 @@ extern void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf);
extern void rtw_cpwm_event_callback(struct adapter *adapter, u8 *pbuf);
extern void rtw_wmm_event_callback(struct adapter *padapter, u8 *pbuf);
-extern void rtw_join_timeout_handler(RTW_TIMER_HDL_ARGS);
-extern void _rtw_scan_timeout_handler(RTW_TIMER_HDL_ARGS);
+extern void rtw_join_timeout_handler(struct timer_list *t);
+extern void _rtw_scan_timeout_handler(struct timer_list *t);
int event_thread(void *context);
@@ -618,10 +618,10 @@ extern void rtw_update_registrypriv_dev_network(struct adapter *adapter);
extern void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter);
-extern void _rtw_join_timeout_handler(struct adapter *adapter);
-extern void rtw_scan_timeout_handler(struct adapter *adapter);
+extern void _rtw_join_timeout_handler(struct timer_list *t);
+extern void rtw_scan_timeout_handler(struct timer_list *t);
-extern void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
+extern void rtw_dynamic_check_timer_handler(struct adapter *adapter);
bool rtw_is_scan_deny(struct adapter *adapter);
void rtw_clear_scan_deny(struct adapter *adapter);
void rtw_set_scan_deny_timer_hdl(struct adapter *adapter);
diff --git a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
index f3952463697e..6613dea2b283 100644
--- a/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723bs/include/rtw_mlme_ext.h
@@ -424,7 +424,7 @@ struct mlme_ext_info
u8 candidate_tid_bitmap;
u8 dialogToken;
/* Accept ADDBA Request */
- bool bAcceptAddbaReq;
+ bool accept_addba_req;
u8 bwmode_updated;
u8 hidden_ssid_mode;
u8 VHT_enable;
@@ -719,10 +719,10 @@ void linked_status_chk(struct adapter *padapter);
void _linked_info_dump(struct adapter *padapter);
-void survey_timer_hdl (struct adapter *padapter);
-void link_timer_hdl (struct adapter *padapter);
-void addba_timer_hdl(struct sta_info *psta);
-void sa_query_timer_hdl(struct adapter *padapter);
+void survey_timer_hdl (struct timer_list *t);
+void link_timer_hdl (struct timer_list *t);
+void addba_timer_hdl(struct timer_list *t);
+void sa_query_timer_hdl(struct timer_list *t);
/* void reauth_timer_hdl(struct adapter *padapter); */
/* void reassoc_timer_hdl(struct adapter *padapter); */
diff --git a/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
index cf8e766a27a8..faf91022f54a 100644
--- a/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8723bs/include/rtw_pwrctrl.h
@@ -250,7 +250,7 @@ struct pwrctrl_priv
u8 ips_mode;
u8 ips_org_mode;
u8 ips_mode_req; /* used to accept the mode setting request, will update to ipsmode later */
- uint bips_processing;
+ bool bips_processing;
unsigned long ips_deny_time; /* will deny IPS when system time is smaller than this */
u8 pre_ips_type;/* 0: default flow, 1: carddisbale flow */
@@ -300,6 +300,7 @@ struct pwrctrl_priv
u64 wowlan_fw_iv;
#endif /* CONFIG_WOWLAN */
_timer pwr_state_check_timer;
+ struct adapter *adapter;
int pwr_state_check_interval;
u8 pwr_state_check_cnts;
diff --git a/drivers/staging/rtl8723bs/include/rtw_recv.h b/drivers/staging/rtl8723bs/include/rtw_recv.h
index 570a3c333aa0..71039ca79e4b 100644
--- a/drivers/staging/rtl8723bs/include/rtw_recv.h
+++ b/drivers/staging/rtl8723bs/include/rtw_recv.h
@@ -411,7 +411,7 @@ sint rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queu
sint rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue);
struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue);
-void rtw_reordering_ctrl_timeout_handler(void *pcontext);
+void rtw_reordering_ctrl_timeout_handler(struct timer_list *t);
__inline static u8 *get_rxmem(union recv_frame *precvframe)
{
diff --git a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
index d97ca1630bd4..ab5a8627d371 100644
--- a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
+++ b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/******************************************************************************
*
* Copyright(c) 2009-2010 Realtek Corporation.
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index bd4352fe2de3..51d48de24a24 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -136,11 +136,9 @@ static struct ieee80211_supported_band *rtw_spt_band_alloc(
goto exit;
}
- spt_band = (struct ieee80211_supported_band *)rtw_zmalloc(
- sizeof(struct ieee80211_supported_band)
- + sizeof(struct ieee80211_channel)*n_channels
- + sizeof(struct ieee80211_rate)*n_bitrates
- );
+ spt_band = rtw_zmalloc(sizeof(struct ieee80211_supported_band) +
+ sizeof(struct ieee80211_channel) * n_channels +
+ sizeof(struct ieee80211_rate) * n_bitrates);
if (!spt_band)
goto exit;
@@ -1094,7 +1092,7 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
DBG_871X("pairwise =%d\n", pairwise);
param_len = sizeof(struct ieee_param) + params->key_len;
- param = (struct ieee_param *)rtw_malloc(param_len);
+ param = rtw_malloc(param_len);
if (param == NULL)
return -1;
@@ -2183,7 +2181,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
{
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
- pwep =(struct ndis_802_11_wep *) rtw_malloc(wep_total_len);
+ pwep = rtw_malloc(wep_total_len);
if (pwep == NULL) {
DBG_871X(" wpa_set_encryption: pwep allocate fail !!!\n");
ret = -ENOMEM;
@@ -2677,7 +2675,7 @@ static int rtw_cfg80211_add_monitor_if (struct adapter *padapter, char *name, st
pnpi->sizeof_priv = sizeof(struct adapter);
/* wdev */
- mon_wdev = (struct wireless_dev *)rtw_zmalloc(sizeof(struct wireless_dev));
+ mon_wdev = rtw_zmalloc(sizeof(struct wireless_dev));
if (!mon_wdev) {
DBG_871X(FUNC_ADPT_FMT" allocate mon_wdev fail\n", FUNC_ADPT_ARG(padapter));
ret = -ENOMEM;
@@ -3497,7 +3495,7 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
}
/* wdev */
- wdev = (struct wireless_dev *)rtw_zmalloc(sizeof(struct wireless_dev));
+ wdev = rtw_zmalloc(sizeof(struct wireless_dev));
if (!wdev) {
DBG_8192C("Couldn't allocate wireless device\n");
ret = -ENOMEM;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index d5e5f830f2a1..3fca0c2d4c8d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -18,6 +18,7 @@
#include <rtw_debug.h>
#include <rtw_mp.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV+30)
@@ -557,7 +558,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
- pwep =(struct ndis_802_11_wep *) rtw_malloc(wep_total_len);
+ pwep = rtw_malloc(wep_total_len);
if (pwep == NULL) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, (" wpa_set_encryption: pwep allocate fail !!!\n"));
goto exit;
@@ -2123,12 +2124,9 @@ static int rtw_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- int ret;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- ret = rtw_set_wpa_ie(padapter, extra, wrqu->data.length);
-
- return ret;
+ return rtw_set_wpa_ie(padapter, extra, wrqu->data.length);
}
static int rtw_wx_set_auth(struct net_device *dev,
@@ -2238,7 +2236,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
int ret = 0;
param_len = sizeof(struct ieee_param) + pext->key_len;
- param = (struct ieee_param *)rtw_malloc(param_len);
+ param = rtw_malloc(param_len);
if (param == NULL)
return -1;
@@ -2347,7 +2345,7 @@ static int rtw_wx_read32(struct net_device *dev,
if (0 == len)
return -EINVAL;
- ptmp = (u8 *)rtw_malloc(len);
+ ptmp = rtw_malloc(len);
if (NULL == ptmp)
return -ENOMEM;
@@ -3500,7 +3498,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
goto out;
}
- param = (struct ieee_param *)rtw_malloc(p->length);
+ param = rtw_malloc(p->length);
if (param == NULL) {
ret = -ENOMEM;
goto out;
@@ -3621,7 +3619,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
- pwep =(struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
+ pwep = rtw_malloc(wep_total_len);
if (pwep == NULL) {
DBG_871X(" r871x_set_encryption: pwep allocate fail !!!\n");
goto exit;
@@ -3857,7 +3855,6 @@ static int rtw_hostapd_sta_flush(struct net_device *dev)
{
/* _irqL irqL; */
/* struct list_head *phead, *plist; */
- int ret = 0;
/* struct sta_info *psta = NULL; */
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
/* struct sta_priv *pstapriv = &padapter->stapriv; */
@@ -3866,9 +3863,7 @@ static int rtw_hostapd_sta_flush(struct net_device *dev)
flush_all_cam_entry(padapter); /* clear CAM */
- ret = rtw_sta_flush(padapter);
-
- return ret;
+ return rtw_sta_flush(padapter);
}
@@ -4266,7 +4261,6 @@ static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param,
static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
{
- int ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -4279,15 +4273,12 @@ static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *p
return -EINVAL;
}
- ret = rtw_acl_remove_sta(padapter, param->sta_addr);
-
- return ret;
+ return rtw_acl_remove_sta(padapter, param->sta_addr);
}
static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
{
- int ret = 0;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
@@ -4300,9 +4291,7 @@ static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *para
return -EINVAL;
}
- ret = rtw_acl_add_sta(padapter, param->sta_addr);
-
- return ret;
+ return rtw_acl_add_sta(padapter, param->sta_addr);
}
@@ -4345,7 +4334,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
goto out;
}
- param = (struct ieee_param *)rtw_malloc(p->length);
+ param = rtw_malloc(p->length);
if (param == NULL) {
ret = -ENOMEM;
goto out;
@@ -4673,7 +4662,7 @@ static int rtw_test(
DBG_871X("+%s\n", __func__);
len = wrqu->data.length;
- pbuf = (u8 *)rtw_zmalloc(len);
+ pbuf = rtw_zmalloc(len);
if (pbuf == NULL) {
DBG_871X("%s: no memory!\n", __func__);
return -ENOMEM;
@@ -5029,12 +5018,12 @@ static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
struct iw_handler_def rtw_handlers_def = {
.standard = rtw_handlers,
- .num_standard = sizeof(rtw_handlers) / sizeof(iw_handler),
+ .num_standard = ARRAY_SIZE(rtw_handlers),
#if defined(CONFIG_WEXT_PRIV)
.private = rtw_private_handler,
.private_args = (struct iw_priv_args *)rtw_private_args,
- .num_private = sizeof(rtw_private_handler) / sizeof(iw_handler),
- .num_private_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args),
+ .num_private = ARRAY_SIZE(rtw_private_handler),
+ .num_private_args = ARRAY_SIZE(rtw_private_args),
#endif
.get_wireless_stats = rtw_get_wireless_stats,
};
@@ -5121,8 +5110,8 @@ static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_
priv = rtw_private_handler;
priv_args = rtw_private_args;
- num_priv = sizeof(rtw_private_handler) / sizeof(iw_handler);
- num_priv_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args);
+ num_priv = ARRAY_SIZE(rtw_private_handler);
+ num_priv_args = ARRAY_SIZE(rtw_private_args);
if (num_priv_args == 0) {
err = -EOPNOTSUPP;
diff --git a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
index 80ca2d781c5d..a4ef5789d794 100644
--- a/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/mlme_linux.c
@@ -19,18 +19,21 @@
#include <drv_types.h>
#include <rtw_debug.h>
-static void _dynamic_check_timer_handlder (void *FunctionContext)
+static void _dynamic_check_timer_handler(struct timer_list *t)
{
- struct adapter *adapter = FunctionContext;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
- rtw_dynamic_check_timer_handlder(adapter);
+ rtw_dynamic_check_timer_handler(adapter);
_set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
}
-static void _rtw_set_scan_deny_timer_hdl(void *FunctionContext)
+static void _rtw_set_scan_deny_timer_hdl(struct timer_list *t)
{
- struct adapter *adapter = FunctionContext;
+ struct adapter *adapter =
+ from_timer(adapter, t, mlmepriv.set_scan_deny_timer);
+
rtw_set_scan_deny_timer_hdl(adapter);
}
@@ -38,21 +41,20 @@ void rtw_init_mlme_timer(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- _init_timer(&(pmlmepriv->assoc_timer), padapter->pnetdev, _rtw_join_timeout_handler, padapter);
- /* _init_timer(&(pmlmepriv->sitesurveyctrl.sitesurvey_ctrl_timer), padapter->pnetdev, sitesurvey_ctrl_handler, padapter); */
- _init_timer(&(pmlmepriv->scan_to_timer), padapter->pnetdev, rtw_scan_timeout_handler, padapter);
-
- _init_timer(&(pmlmepriv->dynamic_chk_timer), padapter->pnetdev, _dynamic_check_timer_handlder, padapter);
-
- _init_timer(&(pmlmepriv->set_scan_deny_timer), padapter->pnetdev, _rtw_set_scan_deny_timer_hdl, padapter);
+ timer_setup(&pmlmepriv->assoc_timer, _rtw_join_timeout_handler, 0);
+ timer_setup(&pmlmepriv->scan_to_timer, rtw_scan_timeout_handler, 0);
+ timer_setup(&pmlmepriv->dynamic_chk_timer,
+ _dynamic_check_timer_handler, 0);
+ timer_setup(&pmlmepriv->set_scan_deny_timer,
+ _rtw_set_scan_deny_timer_hdl, 0);
}
void rtw_os_indicate_connect(struct adapter *adapter)
{
struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
- if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ==true) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ==true))
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
{
rtw_cfg80211_ibss_indicate_connect(adapter);
}
@@ -99,7 +101,7 @@ void rtw_reset_securitypriv(struct adapter *adapter)
/* reset RX BIP packet number */
pmlmeext->mgnt_80211w_IPN_rx = 0;
- memset((unsigned char *)&adapter->securitypriv, 0, sizeof (struct security_priv));
+ memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
/* Added by Albert 2009/02/18 */
/* Restore the PMK information to securitypriv structure for the following connection. */
@@ -116,9 +118,9 @@ void rtw_reset_securitypriv(struct adapter *adapter)
{
/* if (adapter->mlmepriv.fw_state & WIFI_STATION_STATE) */
/* */
- struct security_priv *psec_priv =&adapter->securitypriv;
+ struct security_priv *psec_priv = &adapter->securitypriv;
- psec_priv->dot11AuthAlgrthm =dot11AuthAlgrthm_Open; /* open system */
+ psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
psec_priv->dot11PrivacyKeyIndex = 0;
@@ -150,7 +152,7 @@ void rtw_os_indicate_disconnect(struct adapter *adapter)
void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
{
uint len;
- u8 *buff,*p, i;
+ u8 *buff, *p, i;
union iwreq_data wrqu;
RT_TRACE(_module_mlme_osdep_c_, _drv_info_, ("+rtw_report_sec_ie, authmode =%d\n", authmode));
@@ -168,22 +170,22 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
}
p = buff;
- p+=sprintf(p,"ASSOCINFO(ReqIEs =");
+ p += sprintf(p, "ASSOCINFO(ReqIEs =");
- len = sec_ie[1]+2;
- len = (len < IW_CUSTOM_MAX) ? len:IW_CUSTOM_MAX;
+ len = sec_ie[1] + 2;
+ len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
- for (i = 0;i<len;i++) {
- p+=sprintf(p,"%02x", sec_ie[i]);
+ for (i = 0; i < len; i++) {
+ p += sprintf(p, "%02x", sec_ie[i]);
}
- p+=sprintf(p,")");
+ p += sprintf(p, ")");
memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length =p-buff;
+ wrqu.data.length = p - buff;
- wrqu.data.length = (wrqu.data.length<IW_CUSTOM_MAX) ? wrqu.data.length:IW_CUSTOM_MAX;
+ wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ? wrqu.data.length : IW_CUSTOM_MAX;
kfree(buff);
}
@@ -191,14 +193,14 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
{
- _init_timer(&psta->addba_retry_timer, padapter->pnetdev, addba_timer_hdl, psta);
+ timer_setup(&psta->addba_retry_timer, addba_timer_hdl, 0);
}
void init_mlme_ext_timer(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- _init_timer(&pmlmeext->survey_timer, padapter->pnetdev, survey_timer_hdl, padapter);
- _init_timer(&pmlmeext->link_timer, padapter->pnetdev, link_timer_hdl, padapter);
- _init_timer(&pmlmeext->sa_query_timer, padapter->pnetdev, sa_query_timer_hdl, padapter);
+ timer_setup(&pmlmeext->survey_timer, survey_timer_hdl, 0);
+ timer_setup(&pmlmeext->link_timer, link_timer_hdl, 0);
+ timer_setup(&pmlmeext->sa_query_timer, sa_query_timer_hdl, 0);
}
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 021589913681..fc5e3d4739c0 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -24,9 +24,9 @@ MODULE_AUTHOR("Realtek Semiconductor Corp.");
MODULE_VERSION(DRIVERVERSION);
/* module param defaults */
-static int rtw_chip_version = 0x00;
+static int rtw_chip_version;
static int rtw_rfintfs = HWPI;
-static int rtw_lbkmode = 0;/* RTL8712_AIR_TRX; */
+static int rtw_lbkmode;/* RTL8712_AIR_TRX; */
static int rtw_network_mode = Ndis802_11IBSS;/* Ndis802_11Infrastructure;infra, ad-hoc, auto */
@@ -40,12 +40,12 @@ static int rtw_frag_thresh = 2346;/* */
static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */
static int rtw_scan_mode = 1;/* active, passive */
static int rtw_adhoc_tx_pwr = 1;
-static int rtw_soft_ap = 0;
+static int rtw_soft_ap;
/* int smart_ps = 1; */
static int rtw_power_mgnt = 1;
static int rtw_ips_mode = IPS_NORMAL;
module_param(rtw_ips_mode, int, 0644);
-MODULE_PARM_DESC(rtw_ips_mode,"The default IPS mode");
+MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode");
static int rtw_smart_ps = 2;
@@ -61,18 +61,18 @@ static int rtw_busy_thresh = 40;
/* int qos_enable = 0; */
static int rtw_ack_policy = NORMAL_ACK;
-static int rtw_software_encrypt = 0;
-static int rtw_software_decrypt = 0;
+static int rtw_software_encrypt;
+static int rtw_software_decrypt;
-static int rtw_acm_method = 0;/* 0:By SW 1:By HW. */
+static int rtw_acm_method;/* 0:By SW 1:By HW. */
static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
-static int rtw_uapsd_enable = 0;
+static int rtw_uapsd_enable;
static int rtw_uapsd_max_sp = NO_LIMIT;
-static int rtw_uapsd_acbk_en = 0;
-static int rtw_uapsd_acbe_en = 0;
-static int rtw_uapsd_acvi_en = 0;
-static int rtw_uapsd_acvo_en = 0;
+static int rtw_uapsd_acbk_en;
+static int rtw_uapsd_acbe_en;
+static int rtw_uapsd_acvi_en;
+static int rtw_uapsd_acvo_en;
int rtw_ht_enable = 1;
/* 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160MHz, 4: 80+80MHz */
@@ -81,7 +81,7 @@ int rtw_ht_enable = 1;
static int rtw_bw_mode = 0x21;
static int rtw_ampdu_enable = 1;/* for enable tx_ampdu ,0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
static int rtw_rx_stbc = 1;/* 0: disable, 1:enable 2.4g */
-static int rtw_ampdu_amsdu = 0;/* 0: disabled, 1:enabled, 2:auto . There is an IOT issu with DLINK DIR-629 when the flag turn on */
+static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto . There is an IOT issu with DLINK DIR-629 when the flag turn on */
/* Short GI support Bit Map */
/* BIT0 - 20MHz, 0: non-support, 1: support */
/* BIT1 - 40MHz, 0: non-support, 1: support */
@@ -99,8 +99,8 @@ static int rtw_lowrate_two_xmit = 1;/* Use 2 path Tx to transmit MCS0~7 and lega
/* int rf_config = RF_1T2R; 1T2R */
static int rtw_rf_config = RF_MAX_TYPE; /* auto */
-static int rtw_low_power = 0;
-static int rtw_wifi_spec = 0;
+static int rtw_low_power;
+static int rtw_wifi_spec;
static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
static int rtw_btcoex_enable = 1;
@@ -113,40 +113,38 @@ static int rtw_ant_num = -1; /* <0: undefined, >0: Antenna number */
module_param(rtw_ant_num, int, 0644);
MODULE_PARM_DESC(rtw_ant_num, "Antenna number setting");
-static int rtw_AcceptAddbaReq = true;/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
-
static int rtw_antdiv_cfg = 1; /* 0:OFF , 1:ON, 2:decide by Efuse config */
-static int rtw_antdiv_type = 0 ; /* 0:decide by efuse 1: for 88EE, 1Tx and 1RxCG are diversity.(2 Ant with SPDT), 2: for 88EE, 1Tx and 2Rx are diversity.(2 Ant, Tx and RxCG are both on aux port, RxCS is on main port), 3: for 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
+static int rtw_antdiv_type; /* 0:decide by efuse 1: for 88EE, 1Tx and 1RxCG are diversity.(2 Ant with SPDT), 2: for 88EE, 1Tx and 2Rx are diversity.(2 Ant, Tx and RxCG are both on aux port, RxCS is on main port), 3: for 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
-static int rtw_enusbss = 0;/* 0:disable, 1:enable */
+static int rtw_enusbss;/* 0:disable, 1:enable */
-static int rtw_hwpdn_mode =2;/* 0:disable, 1:enable, 2: by EFUSE config */
+static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */
#ifdef CONFIG_HW_PWRP_DETECTION
static int rtw_hwpwrp_detect = 1;
#else
-static int rtw_hwpwrp_detect = 0; /* HW power ping detect 0:disable , 1:enable */
+static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */
#endif
-static int rtw_hw_wps_pbc = 0;
+static int rtw_hw_wps_pbc;
int rtw_mc2u_disable = 0;
-static int rtw_80211d = 0;
+static int rtw_80211d;
#ifdef CONFIG_QOS_OPTIMIZATION
static int rtw_qos_opt_enable = 1;/* 0: disable, 1:enable */
#else
-static int rtw_qos_opt_enable = 0;/* 0: disable, 1:enable */
+static int rtw_qos_opt_enable;/* 0: disable, 1:enable */
#endif
module_param(rtw_qos_opt_enable, int, 0644);
-static char* ifname = "wlan%d";
+static char *ifname = "wlan%d";
module_param(ifname, charp, 0644);
MODULE_PARM_DESC(ifname, "The default name to allocate for first interface");
-char* rtw_initmac = NULL; /* temp mac address if users want to use instead of the mac address in Efuse */
+char *rtw_initmac = NULL; /* temp mac address if users want to use instead of the mac address in Efuse */
module_param(rtw_initmac, charp, 0644);
module_param(rtw_channel_plan, int, 0644);
@@ -183,16 +181,16 @@ module_param(rtw_hwpwrp_detect, int, 0644);
module_param(rtw_hw_wps_pbc, int, 0644);
-static uint rtw_max_roaming_times =2;
+static uint rtw_max_roaming_times = 2;
module_param(rtw_max_roaming_times, uint, 0644);
-MODULE_PARM_DESC(rtw_max_roaming_times,"The max roaming times to try");
+MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try");
module_param(rtw_mc2u_disable, int, 0644);
module_param(rtw_80211d, int, 0644);
MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism");
-static uint rtw_notch_filter = 0;
+static uint rtw_notch_filter;
module_param(rtw_notch_filter, uint, 0644);
MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P");
@@ -202,14 +200,14 @@ static uint rtw_hiq_filter = CONFIG_RTW_HIQ_FILTER;
module_param(rtw_hiq_filter, uint, 0644);
MODULE_PARM_DESC(rtw_hiq_filter, "0:allow all, 1:allow special, 2:deny all");
-static int rtw_tx_pwr_lmt_enable = 0;
-static int rtw_tx_pwr_by_rate = 0;
+static int rtw_tx_pwr_lmt_enable;
+static int rtw_tx_pwr_by_rate;
module_param(rtw_tx_pwr_lmt_enable, int, 0644);
-MODULE_PARM_DESC(rtw_tx_pwr_lmt_enable,"0:Disable, 1:Enable, 2: Depend on efuse");
+MODULE_PARM_DESC(rtw_tx_pwr_lmt_enable, "0:Disable, 1:Enable, 2: Depend on efuse");
module_param(rtw_tx_pwr_by_rate, int, 0644);
-MODULE_PARM_DESC(rtw_tx_pwr_by_rate,"0:Disable, 1:Enable, 2: Depend on efuse");
+MODULE_PARM_DESC(rtw_tx_pwr_by_rate, "0:Disable, 1:Enable, 2: Depend on efuse");
char *rtw_phy_file_path = "";
module_param(rtw_phy_file_path, charp, 0644);
@@ -222,12 +220,12 @@ MODULE_PARM_DESC(rtw_phy_file_path, "The path of phy parameter");
/* BIT4 - RF, 0: non-support, 1: support */
/* BIT5 - RF_TXPWR_TRACK, 0: non-support, 1: support */
/* BIT6 - RF_TXPWR_LMT, 0: non-support, 1: support */
-static int rtw_load_phy_file = (BIT2|BIT6);
+static int rtw_load_phy_file = (BIT2 | BIT6);
module_param(rtw_load_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_load_phy_file,"PHY File Bit Map");
-static int rtw_decrypt_phy_file = 0;
+MODULE_PARM_DESC(rtw_load_phy_file, "PHY File Bit Map");
+static int rtw_decrypt_phy_file;
module_param(rtw_decrypt_phy_file, int, 0644);
-MODULE_PARM_DESC(rtw_decrypt_phy_file,"Enable Decrypt PHY File");
+MODULE_PARM_DESC(rtw_decrypt_phy_file, "Enable Decrypt PHY File");
int _netdev_open(struct net_device *pnetdev);
int netdev_open (struct net_device *pnetdev);
@@ -255,8 +253,8 @@ static uint loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ;
registry_par->vcs_type = (u8)rtw_vcs_type;
- registry_par->rts_thresh =(u16)rtw_rts_thresh;
- registry_par->frag_thresh =(u16)rtw_frag_thresh;
+ registry_par->rts_thresh = (u16)rtw_rts_thresh;
+ registry_par->frag_thresh = (u16)rtw_frag_thresh;
registry_par->preamble = (u8)rtw_preamble;
registry_par->scan_mode = (u8)rtw_scan_mode;
registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr;
@@ -311,7 +309,7 @@ static uint loadparam(struct adapter *padapter, _nic_hdl pnetdev)
registry_par->bt_ampdu = (u8)rtw_bt_ampdu;
registry_par->ant_num = (s8)rtw_ant_num;
- registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
+ registry_par->accept_addba_req = true;
registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
registry_par->antdiv_type = (u8)rtw_antdiv_type;
@@ -351,8 +349,7 @@ static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
struct sockaddr *addr = p;
- if (padapter->bup == false)
- {
+ if (padapter->bup == false) {
/* DBG_871X("r8711_net_set_mac_address(), MAC =%x:%x:%x:%x:%x:%x\n", addr->sa_data[0], addr->sa_data[1], addr->sa_data[2], addr->sa_data[3], */
/* addr->sa_data[4], addr->sa_data[5]); */
memcpy(padapter->eeprompriv.mac_addr, addr->sa_data, ETH_ALEN);
@@ -425,9 +422,7 @@ static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb
skb->priority = rtw_classify8021d(skb);
if (pmlmepriv->acm_mask != 0)
- {
skb->priority = qos_acm(pmlmepriv->acm_mask, skb->priority);
- }
return rtw_1d_to_queue[skb->priority];
}
@@ -440,12 +435,12 @@ u16 rtw_recv_select_queue(struct sk_buff *skb)
u32 priority;
u8 *pdata = skb->data;
- memcpy(&eth_type, pdata+(ETH_ALEN<<1), 2);
+ memcpy(&eth_type, pdata + (ETH_ALEN << 1), 2);
switch (be16_to_cpu(eth_type)) {
case ETH_P_IP:
- piphdr = (struct iphdr *)(pdata+ETH_HLEN);
+ piphdr = (struct iphdr *)(pdata + ETH_HLEN);
dscp = piphdr->tos & 0xfc;
@@ -457,17 +452,16 @@ u16 rtw_recv_select_queue(struct sk_buff *skb)
}
return rtw_1d_to_queue[priority];
-
}
-static int rtw_ndev_notifier_call(struct notifier_block * nb, unsigned long state, void *ptr)
+static int rtw_ndev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (dev->netdev_ops->ndo_do_ioctl != rtw_ioctl)
return NOTIFY_DONE;
- DBG_871X_LEVEL(_drv_info_, FUNC_NDEV_FMT" state:%lu\n", FUNC_NDEV_ARG(dev), state);
+ DBG_871X_LEVEL(_drv_info_, FUNC_NDEV_FMT " state:%lu\n", FUNC_NDEV_ARG(dev), state);
switch (state) {
case NETDEV_CHANGENAME:
@@ -497,7 +491,7 @@ static int rtw_ndev_init(struct net_device *dev)
{
struct adapter *adapter = rtw_netdev_priv(dev);
- DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(adapter));
strncpy(adapter->old_ifname, dev->name, IFNAMSIZ);
rtw_adapter_proc_init(dev);
@@ -508,7 +502,7 @@ static void rtw_ndev_uninit(struct net_device *dev)
{
struct adapter *adapter = rtw_netdev_priv(dev);
- DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(adapter));
+ DBG_871X_LEVEL(_drv_always_, FUNC_ADPT_FMT "\n", FUNC_ADPT_ARG(adapter));
rtw_adapter_proc_deinit(dev);
}
@@ -561,7 +555,7 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
pnetdev->netdev_ops = &rtw_netdev_ops;
/* pnetdev->tx_timeout = NULL; */
- pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */
+ pnetdev->watchdog_timeo = HZ * 3; /* 3 second timeout */
pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
/* step 2. */
@@ -597,7 +591,7 @@ u32 rtw_start_drv_threads(struct adapter *padapter)
_status = _FAIL;
padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter, "RTW_CMD_THREAD");
- if (IS_ERR(padapter->cmdThread))
+ if (IS_ERR(padapter->cmdThread))
_status = _FAIL;
else
down(&padapter->cmdpriv.terminate_cmdthread_sema); /* wait for cmd_thread to run */
@@ -623,7 +617,7 @@ void rtw_stop_drv_threads (struct adapter *padapter)
static u8 rtw_init_default_value(struct adapter *padapter)
{
u8 ret = _SUCCESS;
- struct registry_priv* pregistrypriv = &padapter->registrypriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -652,8 +646,8 @@ static u8 rtw_init_default_value(struct adapter *padapter)
#ifdef CONFIG_GTK_OL
psecuritypriv->binstallKCK_KEK = _FAIL;
#endif /* CONFIG_GTK_OL */
- psecuritypriv->sw_encrypt =pregistrypriv->software_encrypt;
- psecuritypriv->sw_decrypt =pregistrypriv->software_decrypt;
+ psecuritypriv->sw_encrypt = pregistrypriv->software_encrypt;
+ psecuritypriv->sw_decrypt = pregistrypriv->software_decrypt;
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
@@ -691,7 +685,8 @@ struct dvobj_priv *devobj_init(void)
{
struct dvobj_priv *pdvobj = NULL;
- if ((pdvobj = (struct dvobj_priv*)rtw_zmalloc(sizeof(*pdvobj))) == NULL)
+ pdvobj = rtw_zmalloc(sizeof(*pdvobj));
+ if (pdvobj == NULL)
return NULL;
mutex_init(&pdvobj->hw_init_mutex);
@@ -722,7 +717,7 @@ void devobj_deinit(struct dvobj_priv *pdvobj)
mutex_destroy(&pdvobj->setch_mutex);
mutex_destroy(&pdvobj->setbw_mutex);
- kfree((u8 *)pdvobj);
+ kfree(pdvobj);
}
u8 rtw_reset_drv_sw(struct adapter *padapter)
@@ -748,7 +743,7 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0;
pmlmepriv->LinkDetectInfo.LowPowerTransitionCount = 0;
- _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY |_FW_UNDER_LINKING);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
pwrctrlpriv->pwr_state_check_cnts = 0;
@@ -777,7 +772,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
goto exit;
}
- padapter->cmdpriv.padapter =padapter;
+ padapter->cmdpriv.padapter = padapter;
if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) {
RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n"));
@@ -935,7 +930,7 @@ static int _rtw_drv_register_netdev(struct adapter *padapter, char *name)
goto error_register_netdev;
}
- DBG_871X("%s, MAC Address (if%d) = " MAC_FMT "\n", __func__, (padapter->iface_id+1), MAC_ARG(pnetdev->dev_addr));
+ DBG_871X("%s, MAC Address (if%d) = " MAC_FMT "\n", __func__, (padapter->iface_id + 1), MAC_ARG(pnetdev->dev_addr));
return ret;
@@ -984,9 +979,9 @@ int _netdev_open(struct net_device *pnetdev)
goto netdev_open_error;
}
- DBG_871X("MAC Address = "MAC_FMT"\n", MAC_ARG(pnetdev->dev_addr));
+ DBG_871X("MAC Address = " MAC_FMT "\n", MAC_ARG(pnetdev->dev_addr));
- status =rtw_start_drv_threads(padapter);
+ status = rtw_start_drv_threads(padapter);
if (status == _FAIL) {
DBG_871X("Initialize driver software resource Failed!\n");
goto netdev_open_error;
@@ -1027,7 +1022,6 @@ netdev_open_error:
DBG_871X("-871x_drv - drv_open fail, bup =%d\n", padapter->bup);
return (-1);
-
}
int netdev_open(struct net_device *pnetdev)
@@ -1036,8 +1030,7 @@ int netdev_open(struct net_device *pnetdev)
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter);
- if (pwrctrlpriv->bInSuspend == true)
- {
+ if (pwrctrlpriv->bInSuspend == true) {
DBG_871X("+871x_drv - drv_open, bInSuspend =%d\n", pwrctrlpriv->bInSuspend);
return 0;
}
@@ -1066,16 +1059,13 @@ static int ips_netdrv_open(struct adapter *padapter)
/* padapter->bup = true; */
status = rtw_hal_init(padapter);
- if (status == _FAIL)
- {
+ if (status == _FAIL) {
RT_TRACE(_module_os_intfs_c_, _drv_err_, ("ips_netdrv_open(): Can't init h/w!\n"));
goto netdev_open_error;
}
if (padapter->intf_start)
- {
padapter->intf_start(padapter);
- }
_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
@@ -1098,7 +1088,6 @@ int rtw_ips_pwr_up(struct adapter *padapter)
DBG_871X("<=== rtw_ips_pwr_up..............\n");
return result;
-
}
void rtw_ips_pwr_down(struct adapter *padapter)
@@ -1119,10 +1108,7 @@ void rtw_ips_dev_unload(struct adapter *padapter)
if (padapter->bSurpriseRemoved == false)
- {
rtw_hal_deinit(padapter);
- }
-
}
@@ -1132,15 +1118,14 @@ static int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- if (true == bnormal)
- {
+ if (true == bnormal) {
if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->hw_init_mutex)) == 0) {
status = _netdev_open(pnetdev);
mutex_unlock(&(adapter_to_dvobj(padapter)->hw_init_mutex));
}
}
else
- status = (_SUCCESS == ips_netdrv_open(padapter))?(0):(-1);
+ status = (_SUCCESS == ips_netdrv_open(padapter)) ? (0) : (-1);
return status;
}
@@ -1152,8 +1137,7 @@ static int netdev_close(struct net_device *pnetdev)
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+871x_drv - drv_close\n"));
- if (pwrctl->bInternalAutoSuspend == true)
- {
+ if (pwrctl->bInternalAutoSuspend == true) {
/* rtw_pwr_wakeup(padapter); */
if (pwrctl->rf_pwrstate == rf_off)
pwrctl->ps_flag = true;
@@ -1174,8 +1158,7 @@ static int netdev_close(struct net_device *pnetdev)
DBG_871X("(2)871x_drv - drv_close, bup =%d, hw_init_completed =%d\n", padapter->bup, padapter->hw_init_completed);
/* s1. */
- if (pnetdev)
- {
+ if (pnetdev) {
if (!rtw_netif_queue_stopped(pnetdev))
rtw_netif_stop_queue(pnetdev);
}
@@ -1198,12 +1181,11 @@ static int netdev_close(struct net_device *pnetdev)
DBG_871X("-871x_drv - drv_close, bup =%d\n", padapter->bup);
return 0;
-
}
void rtw_ndev_destructor(struct net_device *ndev)
{
- DBG_871X(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(ndev));
+ DBG_871X(FUNC_NDEV_FMT "\n", FUNC_NDEV_ARG(ndev));
if (ndev->ieee80211_ptr)
kfree((u8 *)ndev->ieee80211_ptr);
@@ -1219,8 +1201,7 @@ void rtw_dev_unload(struct adapter *padapter)
RT_TRACE(_module_hci_intfs_c_, _drv_notice_, ("+%s\n", __func__));
- if (padapter->bup == true)
- {
+ if (padapter->bup == true) {
DBG_871X("===> %s\n", __func__);
padapter->bDriverStopped = true;
@@ -1257,12 +1238,11 @@ void rtw_dev_unload(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "%s: driver not in IPS\n", __func__);
}
- if (padapter->bSurpriseRemoved == false)
- {
+ if (padapter->bSurpriseRemoved == false) {
rtw_btcoex_IpsNotify(padapter, pwrctl->ips_mode_req);
#ifdef CONFIG_WOWLAN
if (pwrctl->bSupportRemoteWakeup == true &&
- pwrctl->wowlan_mode ==true) {
+ pwrctl->wowlan_mode == true) {
DBG_871X_LEVEL(_drv_always_, "%s bSupportRemoteWakeup ==true do not run rtw_hal_deinit()\n", __func__);
}
else
@@ -1292,12 +1272,11 @@ static int rtw_suspend_free_assoc_resource(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME)) {
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
- && check_fwstate(pmlmepriv, _FW_LINKED))
- {
+ && check_fwstate(pmlmepriv, _FW_LINKED)) {
DBG_871X("%s %s(" MAC_FMT "), length:%d assoc_ssid.length:%d\n", __func__,
pmlmepriv->cur_network.network.Ssid.Ssid,
MAC_ARG(pmlmepriv->cur_network.network.MacAddress),
@@ -1307,14 +1286,12 @@ static int rtw_suspend_free_assoc_resource(struct adapter *padapter)
}
}
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && check_fwstate(pmlmepriv, _FW_LINKED))
- {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && check_fwstate(pmlmepriv, _FW_LINKED)) {
rtw_disassoc_cmd(padapter, 0, false);
/* s2-2. indicate disconnect to os */
rtw_indicate_disconnect(padapter);
}
- else if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- {
+ else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
rtw_sta_flush(padapter);
}
@@ -1327,13 +1304,12 @@ static int rtw_suspend_free_assoc_resource(struct adapter *padapter)
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
rtw_indicate_scan_done(padapter, 1);
- if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true)
- {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
DBG_871X_LEVEL(_drv_always_, "%s: fw_under_linking\n", __func__);
rtw_indicate_disconnect(padapter);
}
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return _SUCCESS;
}
@@ -1347,7 +1323,7 @@ int rtw_suspend_wow(struct adapter *padapter)
struct wowlan_ioctl_param poidparam;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
DBG_871X("wowlan_mode: %d\n", pwrpriv->wowlan_mode);
@@ -1379,8 +1355,7 @@ int rtw_suspend_wow(struct adapter *padapter)
padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_WOWLAN, (u8 *)&poidparam);
if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME)) {
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
- && check_fwstate(pmlmepriv, _FW_LINKED))
- {
+ && check_fwstate(pmlmepriv, _FW_LINKED)) {
DBG_871X("%s %s(" MAC_FMT "), length:%d assoc_ssid.length:%d\n", __func__,
pmlmepriv->cur_network.network.Ssid.Ssid,
MAC_ARG(pmlmepriv->cur_network.network.MacAddress),
@@ -1393,15 +1368,14 @@ int rtw_suspend_wow(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "%s: wowmode suspending\n", __func__);
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
- {
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
DBG_871X_LEVEL(_drv_always_, "%s: fw_under_survey\n", __func__);
rtw_indicate_scan_done(padapter, 1);
clr_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
}
if (rtw_get_ch_setting_union(padapter, &ch, &bw, &offset) != 0) {
- DBG_871X(FUNC_ADPT_FMT" back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
+ DBG_871X(FUNC_ADPT_FMT " back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
FUNC_ADPT_ARG(padapter), ch, bw, offset);
set_channel_bwmode(padapter, ch, offset, bw);
}
@@ -1410,13 +1384,11 @@ int rtw_suspend_wow(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "%s: pno: %d\n", __func__, pwrpriv->wowlan_pno_enable);
else
rtw_set_ps_mode(padapter, PS_MODE_DTIM, 0, 0, "WOWLAN");
-
}
- else
- {
+ else {
DBG_871X_LEVEL(_drv_always_, "%s: ### ERROR ### wowlan_mode =%d\n", __func__, pwrpriv->wowlan_mode);
}
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
#endif /* ifdef CONFIG_WOWLAN */
@@ -1430,7 +1402,7 @@ int rtw_suspend_ap_wow(struct adapter *padapter)
struct wowlan_ioctl_param poidparam;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
pwrpriv->wowlan_ap_mode = true;
@@ -1462,14 +1434,14 @@ int rtw_suspend_ap_wow(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "%s: wowmode suspending\n", __func__);
if (rtw_get_ch_setting_union(padapter, &ch, &bw, &offset) != 0) {
- DBG_871X(FUNC_ADPT_FMT" back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
+ DBG_871X(FUNC_ADPT_FMT " back to linked/linking union - ch:%u, bw:%u, offset:%u\n",
FUNC_ADPT_ARG(padapter), ch, bw, offset);
set_channel_bwmode(padapter, ch, offset, bw);
}
rtw_set_ps_mode(padapter, PS_MODE_MIN, 0, 0, "AP-WOWLAN");
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
#endif /* ifdef CONFIG_AP_WOWLAN */
@@ -1480,7 +1452,7 @@ static int rtw_suspend_normal(struct adapter *padapter)
struct net_device *pnetdev = padapter->pnetdev;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
if (pnetdev) {
netif_carrier_off(pnetdev);
rtw_netif_stop_queue(pnetdev);
@@ -1489,10 +1461,8 @@ static int rtw_suspend_normal(struct adapter *padapter)
rtw_suspend_free_assoc_resource(padapter);
if ((rtw_hal_check_ips_status(padapter) == true)
- || (adapter_to_pwrctl(padapter)->rf_pwrstate == rf_off))
- {
+ || (adapter_to_pwrctl(padapter)->rf_pwrstate == rf_off)) {
DBG_871X_LEVEL(_drv_always_, "%s: ### ERROR #### driver in IPS ####ERROR###!!!\n", __func__);
-
}
rtw_dev_unload(padapter);
@@ -1501,7 +1471,7 @@ static int rtw_suspend_normal(struct adapter *padapter)
if (padapter->intf_deinit)
padapter->intf_deinit(adapter_to_dvobj(padapter));
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
@@ -1524,8 +1494,7 @@ int rtw_suspend_common(struct adapter *padapter)
while (pwrpriv->bips_processing == true)
msleep(1);
- if ((!padapter->bup) || (padapter->bDriverStopped)||(padapter->bSurpriseRemoved))
- {
+ if ((!padapter->bup) || (padapter->bDriverStopped) || (padapter->bSurpriseRemoved)) {
DBG_871X("%s bup =%d bDriverStopped =%d bSurpriseRemoved = %d\n", __func__
, padapter->bup, padapter->bDriverStopped, padapter->bSurpriseRemoved);
pdbgpriv->dbg_suspend_error_cnt++;
@@ -1599,7 +1568,7 @@ int rtw_resume_process_wow(struct adapter *padapter)
struct sta_info *psta = NULL;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
if (padapter) {
pnetdev = padapter->pnetdev;
@@ -1641,7 +1610,7 @@ int rtw_resume_process_wow(struct adapter *padapter)
}
/* Disable WOW, set H2C command */
- poidparam.subcode =WOWLAN_DISABLE;
+ poidparam.subcode = WOWLAN_DISABLE;
padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_WOWLAN, (u8 *)&poidparam);
psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
@@ -1667,11 +1636,10 @@ int rtw_resume_process_wow(struct adapter *padapter)
}
}
else {
-
DBG_871X_LEVEL(_drv_always_, "%s: ### ERROR ### wowlan_mode =%d\n", __func__, pwrpriv->wowlan_mode);
}
- if (padapter->pid[1]!= 0) {
+ if (padapter->pid[1] != 0) {
DBG_871X("pid[1]:%d\n", padapter->pid[1]);
rtw_signal_process(padapter->pid[1], SIGUSR2);
}
@@ -1680,7 +1648,6 @@ int rtw_resume_process_wow(struct adapter *padapter)
if (pwrpriv->wowlan_wake_reason == FWDecisionDisconnect ||
pwrpriv->wowlan_wake_reason == Rx_DisAssoc ||
pwrpriv->wowlan_wake_reason == Rx_DeAuth) {
-
DBG_871X("%s: disconnect reason: %02x\n", __func__,
pwrpriv->wowlan_wake_reason);
rtw_indicate_disconnect(padapter);
@@ -1705,12 +1672,12 @@ int rtw_resume_process_wow(struct adapter *padapter)
DBG_871X_LEVEL(_drv_always_, "do not reset timer\n");
}
- pwrpriv->wowlan_mode =false;
+ pwrpriv->wowlan_mode = false;
/* clean driver side wake up reason. */
pwrpriv->wowlan_wake_reason = 0;
exit:
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
#endif /* ifdef CONFIG_WOWLAN */
@@ -1725,7 +1692,7 @@ int rtw_resume_process_ap_wow(struct adapter *padapter)
struct wowlan_ioctl_param poidparam;
int ret = _SUCCESS;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
if (padapter) {
pnetdev = padapter->pnetdev;
@@ -1774,7 +1741,7 @@ int rtw_resume_process_ap_wow(struct adapter *padapter)
rtw_netif_wake_queue(pnetdev);
}
- if (padapter->pid[1]!= 0) {
+ if (padapter->pid[1] != 0) {
DBG_871X("pid[1]:%d\n", padapter->pid[1]);
rtw_signal_process(padapter->pid[1], SIGUSR2);
}
@@ -1785,7 +1752,7 @@ int rtw_resume_process_ap_wow(struct adapter *padapter)
/* clean driver side wake up reason. */
pwrpriv->wowlan_wake_reason = 0;
exit:
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
return ret;
}
#endif /* ifdef CONFIG_APWOWLAN */
@@ -1811,19 +1778,17 @@ static int rtw_resume_process_normal(struct adapter *padapter)
psdpriv = padapter->dvobj;
pdbgpriv = &psdpriv->drv_dbg;
- DBG_871X("==> "FUNC_ADPT_FMT" entry....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("==> " FUNC_ADPT_FMT " entry....\n", FUNC_ADPT_ARG(padapter));
/* interface init */
/* if (sdio_init(adapter_to_dvobj(padapter)) != _SUCCESS) */
- if ((padapter->intf_init) && (padapter->intf_init(adapter_to_dvobj(padapter)) != _SUCCESS))
- {
+ if ((padapter->intf_init) && (padapter->intf_init(adapter_to_dvobj(padapter)) != _SUCCESS)) {
ret = -1;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("%s: initialize SDIO Failed!!\n", __func__));
goto exit;
}
rtw_hal_disable_interrupt(padapter);
/* if (sdio_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS) */
- if ((padapter->intf_alloc_irq) && (padapter->intf_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS))
- {
+ if ((padapter->intf_alloc_irq) && (padapter->intf_alloc_irq(adapter_to_dvobj(padapter)) != _SUCCESS)) {
ret = -1;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("%s: sdio_alloc_irq Failed!!\n", __func__));
goto exit;
@@ -1842,28 +1807,28 @@ static int rtw_resume_process_normal(struct adapter *padapter)
netif_device_attach(pnetdev);
netif_carrier_on(pnetdev);
- if (padapter->pid[1]!= 0) {
+ if (padapter->pid[1] != 0) {
DBG_871X("pid[1]:%d\n", padapter->pid[1]);
rtw_signal_process(padapter->pid[1], SIGUSR2);
}
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_STATION_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ DBG_871X(FUNC_ADPT_FMT " fwstate:0x%08x - WIFI_STATION_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
if (rtw_chk_roam_flags(padapter, RTW_ROAM_ON_RESUME))
rtw_roaming(padapter, NULL);
} else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_AP_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ DBG_871X(FUNC_ADPT_FMT " fwstate:0x%08x - WIFI_AP_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
rtw_ap_restore_network(padapter);
} else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
- DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - WIFI_ADHOC_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ DBG_871X(FUNC_ADPT_FMT " fwstate:0x%08x - WIFI_ADHOC_STATE\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
} else {
- DBG_871X(FUNC_ADPT_FMT" fwstate:0x%08x - ???\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
+ DBG_871X(FUNC_ADPT_FMT " fwstate:0x%08x - ???\n", FUNC_ADPT_ARG(padapter), get_fwstate(pmlmepriv));
}
- DBG_871X("<== "FUNC_ADPT_FMT" exit....\n", FUNC_ADPT_ARG(padapter));
+ DBG_871X("<== " FUNC_ADPT_FMT " exit....\n", FUNC_ADPT_ARG(padapter));
exit:
return ret;
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index a05daf06a870..f4221952dd1b 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -30,22 +30,17 @@ inline int RTW_STATUS_CODE(int error_code)
return _FAIL;
}
-u8 *_rtw_malloc(u32 sz)
+void *_rtw_malloc(u32 sz)
{
- u8 *pbuf = NULL;
-
- pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
-
- return pbuf;
+ return kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
}
-u8 *_rtw_zmalloc(u32 sz)
+void *_rtw_zmalloc(u32 sz)
{
- u8 *pbuf = _rtw_malloc(sz);
+ void *pbuf = _rtw_malloc(sz);
- if (pbuf != NULL) {
+ if (pbuf)
memset(pbuf, 0, sz);
- }
return pbuf;
}
@@ -71,13 +66,6 @@ inline int _rtw_netif_rx(_nic_hdl ndev, struct sk_buff *skb)
return netif_rx(skb);
}
-void rtw_init_timer(_timer *ptimer, void *padapter, void *pfunc)
-{
- struct adapter *adapter = padapter;
-
- _init_timer(ptimer, adapter->pnetdev, pfunc, adapter);
-}
-
void _rtw_init_queue(struct __queue *pqueue)
{
INIT_LIST_HEAD(&(pqueue->queue));
@@ -470,7 +458,7 @@ struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
{
struct rtw_cbuf *cbuf;
- cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) + sizeof(void*)*size);
+ cbuf = rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
if (cbuf) {
cbuf->write = cbuf->read = 0;
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index f42e00081e0e..e804b430931c 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -356,8 +356,7 @@ _recv_indicatepkt_drop:
void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
- struct adapter *padapter = preorder_ctrl->padapter;
-
- _init_timer(&(preorder_ctrl->reordering_ctrl_timer), padapter->pnetdev, rtw_reordering_ctrl_timeout_handler, preorder_ctrl);
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+ rtw_reordering_ctrl_timeout_handler, 0);
}
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
index ce1dd6f9036f..9a885e626d1c 100644
--- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
+++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
@@ -87,7 +87,7 @@ static ssize_t proc_set_log_level(struct file *file, const char __user *buffer,
* rtw_drv_proc:
* init/deinit when register/unregister driver
*/
-static const struct rtw_proc_hdl drv_proc_hdls [] = {
+static const struct rtw_proc_hdl drv_proc_hdls[] = {
{"ver_info", proc_get_drv_version, NULL},
{"log_level", proc_get_log_level, proc_set_log_level},
};
@@ -365,7 +365,7 @@ static int proc_get_cam_cache(struct seq_file *m, void *v)
* rtw_adapter_proc:
* init/deinit when register/unregister net_device
*/
-static const struct rtw_proc_hdl adapter_proc_hdls [] = {
+static const struct rtw_proc_hdl adapter_proc_hdls[] = {
{"write_reg", proc_get_dummy, proc_set_write_reg},
{"read_reg", proc_get_read_reg, proc_set_read_reg},
{"fwstate", proc_get_fwstate, NULL},
@@ -600,7 +600,7 @@ ssize_t proc_set_odm_adaptivity(struct file *file, const char __user *buffer, si
* rtw_odm_proc:
* init/deinit when register/unregister net_device, along with rtw_adapter_proc
*/
-static const struct rtw_proc_hdl odm_proc_hdls [] = {
+static const struct rtw_proc_hdl odm_proc_hdls[] = {
{"dbg_comp", proc_get_odm_dbg_comp, proc_set_odm_dbg_comp},
{"dbg_level", proc_get_odm_dbg_level, proc_set_odm_dbg_level},
{"ability", proc_get_odm_ability, proc_set_odm_ability},
diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
index 305e88a6b2ca..aa2f62acc994 100644
--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
*
* Copyright(c) 2009-2010 Realtek Corporation.
diff --git a/drivers/staging/rtlwifi/base.c b/drivers/staging/rtlwifi/base.c
index b88b0e8edd3d..c947def37d31 100644
--- a/drivers/staging/rtlwifi/base.c
+++ b/drivers/staging/rtlwifi/base.c
@@ -465,10 +465,10 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* <1> timer */
- setup_timer(&rtlpriv->works.watchdog_timer,
- rtl_watch_dog_timer_callback, (unsigned long)hw);
- setup_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
- rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
+ timer_setup(&rtlpriv->works.watchdog_timer,
+ rtl_watch_dog_timer_callback, 0);
+ timer_setup(&rtlpriv->works.dualmac_easyconcurrent_retrytimer,
+ rtl_easy_concurrent_retrytimer_callback, 0);
/* <2> work queue */
rtlpriv->works.hw = hw;
rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
@@ -637,7 +637,7 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20;
sgi_80 = sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80;
- if ((!sta->ht_cap.ht_supported) && (!sta->vht_cap.vht_supported))
+ if (!sta->ht_cap.ht_supported && !sta->vht_cap.vht_supported)
return;
if (!sgi_40 && !sgi_20)
@@ -734,10 +734,10 @@ u8 rtl_mrate_idx_to_arfr_id(
ret = RATEID_IDX_B;
break;
case RATR_INX_WIRELESS_MC:
- if ((wirelessmode == WIRELESS_MODE_B) ||
- (wirelessmode == WIRELESS_MODE_G) ||
- (wirelessmode == WIRELESS_MODE_N_24G) ||
- (wirelessmode == WIRELESS_MODE_AC_24G))
+ if (wirelessmode == WIRELESS_MODE_B ||
+ wirelessmode == WIRELESS_MODE_G ||
+ wirelessmode == WIRELESS_MODE_N_24G ||
+ wirelessmode == WIRELESS_MODE_AC_24G)
ret = RATEID_IDX_BG;
else
ret = RATEID_IDX_G;
@@ -920,7 +920,7 @@ static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw,
else if ((tx_mcs_map & 0x000c) >> 2 ==
IEEE80211_VHT_MCS_SUPPORT_0_8)
hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9];
+ rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS8];
else
hw_rate =
rtlpriv->cfg->maps[RTL_RC_VHT_RATE_2SS_MCS9];
@@ -932,7 +932,7 @@ static u8 _rtl_get_vht_highest_n_rate(struct ieee80211_hw *hw,
else if ((tx_mcs_map & 0x0003) ==
IEEE80211_VHT_MCS_SUPPORT_0_8)
hw_rate =
- rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9];
+ rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS8];
else
hw_rate =
rtlpriv->cfg->maps[RTL_RC_VHT_RATE_1SS_MCS9];
@@ -948,8 +948,8 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 hw_rate;
- if ((get_rf_type(rtlphy) == RF_2T2R) &&
- (sta->ht_cap.mcs.rx_mask[1] != 0))
+ if (get_rf_type(rtlphy) == RF_2T2R &&
+ sta->ht_cap.mcs.rx_mask[1] != 0)
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
else
hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
@@ -1277,7 +1277,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
tcb_desc->hw_rate =
_rtl_get_vht_highest_n_rate(hw, sta);
} else {
- if (sta && (sta->ht_cap.ht_supported)) {
+ if (sta && sta->ht_cap.ht_supported) {
tcb_desc->hw_rate =
_rtl_get_highest_n_rate(hw, sta);
} else {
@@ -2080,9 +2080,9 @@ void rtl_watchdog_wq_callback(void *data)
rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv))
goto label_lps_done;
- if (((rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2))
+ if (rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod > 8 ||
+ rtlpriv->link_info.num_rx_inperiod > 2)
rtl_lps_leave(hw);
else
rtl_lps_enter(hw);
@@ -2161,10 +2161,9 @@ label_lps_done:
rtl_scan_list_expire(hw);
}
-void rtl_watch_dog_timer_callback(unsigned long data)
+void rtl_watch_dog_timer_callback(struct timer_list *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *rtlpriv = from_timer(rtlpriv, t, works.watchdog_timer);
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.watchdog_wq, 0);
@@ -2270,10 +2269,11 @@ void rtl_c2hcmd_wq_callback(void *data)
rtl_c2hcmd_launcher(hw, 1);
}
-void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
+void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *rtlpriv =
+ from_timer(rtlpriv, t, works.dualmac_easyconcurrent_retrytimer);
+ struct ieee80211_hw *hw = rtlpriv->hw;
struct rtl_priv *buddy_priv = rtlpriv->buddy_priv;
if (!buddy_priv)
@@ -2334,9 +2334,7 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
case IEEE80211_SMPS_AUTOMATIC:/* 0 */
case IEEE80211_SMPS_NUM_MODES:/* 4 */
WARN_ON(1);
- /* Here will get a 'MISSING_BREAK' in Coverity Test, just ignore it.
- * According to Kernel Code, here is right.
- */
+ /* fall through */
case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
@@ -2552,8 +2550,8 @@ bool rtl_check_beacon_key(struct ieee80211_hw *hw, void *data, unsigned int len)
bcn_key.valid = true;
/* update cur_beacon_keys or compare beacon key */
- if ((rtlpriv->mac80211.link_state != MAC80211_LINKED) &&
- (rtlpriv->mac80211.link_state != MAC80211_LINKED_SCANNING))
+ if (rtlpriv->mac80211.link_state != MAC80211_LINKED &&
+ rtlpriv->mac80211.link_state != MAC80211_LINKED_SCANNING)
return true;
if (!cur_bcn_key->valid) {
@@ -2576,8 +2574,8 @@ bool rtl_check_beacon_key(struct ieee80211_hw *hw, void *data, unsigned int len)
goto chk_exit;
}
- if ((cur_bcn_key->bcn_channel == bcn_key.bcn_channel) &&
- (cur_bcn_key->ht_cap_info == bcn_key.ht_cap_info)) {
+ if (cur_bcn_key->bcn_channel == bcn_key.bcn_channel &&
+ cur_bcn_key->ht_cap_info == bcn_key.ht_cap_info) {
/* Beacon HT info IE, secondary channel offset check */
/* 40M -> 20M */
if (cur_bcn_key->ht_info_infos_0_sco >
diff --git a/drivers/staging/rtlwifi/base.h b/drivers/staging/rtlwifi/base.h
index 1829712dc4e2..b7f92b32978e 100644
--- a/drivers/staging/rtlwifi/base.h
+++ b/drivers/staging/rtlwifi/base.h
@@ -120,7 +120,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw);
void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
-void rtl_watch_dog_timer_callback(unsigned long data);
+void rtl_watch_dog_timer_callback(struct timer_list *t);
void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -176,7 +176,7 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
-void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
+void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t);
extern struct rtl_global_var rtl_global_var;
void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
bool rtl_check_beacon_key(struct ieee80211_hw *hw, void *data,
diff --git a/drivers/staging/rtlwifi/core.c b/drivers/staging/rtlwifi/core.c
index d33847d0550d..b00e51df984f 100644
--- a/drivers/staging/rtlwifi/core.c
+++ b/drivers/staging/rtlwifi/core.c
@@ -49,43 +49,6 @@ u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {
42, 58, 106, 122, 138, 155, 171
};
-void rtl_addr_delay(u32 addr)
-{
- if (addr == 0xfe)
- mdelay(50);
- else if (addr == 0xfd)
- msleep(5);
- else if (addr == 0xfc)
- msleep(1);
- else if (addr == 0xfb)
- usleep_range(50, 100);
- else if (addr == 0xfa)
- usleep_range(5, 10);
- else if (addr == 0xf9)
- usleep_range(1, 2);
-}
-
-void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
- u32 mask, u32 data)
-{
- if (addr >= 0xf9 && addr <= 0xfe) {
- rtl_addr_delay(addr);
- } else {
- rtl_set_rfreg(hw, rfpath, addr, mask, data);
- udelay(1);
- }
-}
-
-void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
-{
- if (addr >= 0xf9 && addr <= 0xfe) {
- rtl_addr_delay(addr);
- } else {
- rtl_set_bbreg(hw, addr, MASKDWORD, data);
- udelay(1);
- }
-}
-
static void rtl_fw_do_work(const struct firmware *firmware, void *context,
bool is_wow)
{
@@ -153,7 +116,7 @@ static int rtl_op_start(struct ieee80211_hw *hw)
mutex_lock(&rtlpriv->locks.conf_mutex);
err = rtlpriv->intf_ops->adapter_start(hw);
if (!err)
- rtl_watch_dog_timer_callback((unsigned long)hw);
+ rtl_watch_dog_timer_callback(&rtlpriv->works.watchdog_timer);
mutex_unlock(&rtlpriv->locks.conf_mutex);
return err;
}
@@ -339,9 +302,9 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&rtlpriv->locks.conf_mutex);
/* Free beacon resources */
- if ((vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
if (mac->beacon_enabled == 1) {
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -449,7 +412,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
for (i = 0; i < wow->n_patterns; i++) {
memset(&rtl_pattern, 0, sizeof(struct rtl_wow_pattern));
memset(mask, 0, MAX_WOL_BIT_MASK_SIZE);
- if (patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
+ if (patterns[i].pattern_len < 0 ||
+ patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
RT_TRACE(rtlpriv, COMP_POWER, DBG_WARNING,
"Pattern[%d] is too long\n", i);
continue;
@@ -856,8 +820,8 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
* here just used for linked scanning, & linked
* and nolink check bssid is set in set network_type
*/
- if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
- (mac->link_state >= MAC80211_LINKED)) {
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC &&
+ mac->link_state >= MAC80211_LINKED) {
if (mac->opmode != NL80211_IFTYPE_AP &&
mac->opmode != NL80211_IFTYPE_MESH_POINT) {
if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
@@ -1078,10 +1042,10 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
mutex_lock(&rtlpriv->locks.conf_mutex);
- if ((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
- if ((changed & BSS_CHANGED_BEACON) ||
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) {
+ if (changed & BSS_CHANGED_BEACON ||
(changed & BSS_CHANGED_BEACON_ENABLED &&
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
@@ -1160,7 +1124,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlpriv->dm.supp_phymode_switch) {
if (sta->ht_cap.ht_supported)
rtl_send_smps_action(hw, sta,
- IEEE80211_SMPS_STATIC);
+ IEEE80211_SMPS_STATIC);
}
if (rtlhal->current_bandtype == BAND_ON_5G) {
@@ -1224,7 +1188,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
cfg80211_unlink_bss(hw->wiphy, bss);
cfg80211_put_bss(hw->wiphy, bss);
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "cfg80211_unlink !!\n");
+ "cfg80211_unlink !!\n");
}
eth_zero_addr(mac->bssid);
@@ -1621,8 +1585,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -ENOSPC; /*User disabled HW-crypto */
}
/* To support IBSS, use sw-crypto for GTK */
- if (((vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) &&
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -ENOSPC;
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
@@ -1697,7 +1661,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->cfg->ops->enable_hw_sec(hw);
}
} else {
- if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) ||
+ if (!group_key || vif->type == NL80211_IFTYPE_ADHOC ||
rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
if (rtlpriv->sec.pairwise_enc_algorithm ==
NO_ENCRYPTION &&
@@ -1885,7 +1849,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
break;
case PWR_CMD_WRITE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_WRITE\n", __func__);
+ "%s(): PWR_CMD_WRITE\n", __func__);
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
/*Read the value from system register*/
diff --git a/drivers/staging/rtlwifi/core.h b/drivers/staging/rtlwifi/core.h
index 782ac2fc4b28..4c2b69412621 100644
--- a/drivers/staging/rtlwifi/core.h
+++ b/drivers/staging/rtlwifi/core.h
@@ -75,10 +75,6 @@ enum dm_dig_connect_e {
extern const struct ieee80211_ops rtl_ops;
void rtl_fw_cb(const struct firmware *firmware, void *context);
void rtl_wowlan_fw_cb(const struct firmware *firmware, void *context);
-void rtl_addr_delay(u32 addr);
-void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
- u32 mask, u32 data);
-void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
bool rtl_btc_status_false(void);
void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval);
diff --git a/drivers/staging/rtlwifi/debug.c b/drivers/staging/rtlwifi/debug.c
index 7446d71c41d1..be8d72cb63db 100644
--- a/drivers/staging/rtlwifi/debug.c
+++ b/drivers/staging/rtlwifi/debug.c
@@ -33,7 +33,7 @@ void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- (level <= rtlpriv->cfg->mod_params->debug_level))) {
+ level <= rtlpriv->cfg->mod_params->debug_level)) {
struct va_format vaf;
va_list args;
@@ -52,7 +52,7 @@ void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- (level <= rtlpriv->cfg->mod_params->debug_level))) {
+ level <= rtlpriv->cfg->mod_params->debug_level)) {
struct va_format vaf;
va_list args;
@@ -127,10 +127,10 @@ static int rtl_debug_get_mac_page(struct seq_file *m, void *v)
return 0;
}
-#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr) \
-struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = { \
- .cb_read = rtl_debug_get_mac_page, \
- .cb_data = addr, \
+#define RTL_DEBUG_IMPL_MAC_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_mac_ ##page = { \
+ .cb_read = rtl_debug_get_mac_page, \
+ .cb_data = addr, \
}
RTL_DEBUG_IMPL_MAC_SERIES(0, 0x0000);
@@ -169,10 +169,10 @@ static int rtl_debug_get_bb_page(struct seq_file *m, void *v)
return 0;
}
-#define RTL_DEBUG_IMPL_BB_SERIES(page, addr) \
-struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = { \
- .cb_read = rtl_debug_get_bb_page, \
- .cb_data = addr, \
+#define RTL_DEBUG_IMPL_BB_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_bb_ ##page = { \
+ .cb_read = rtl_debug_get_bb_page, \
+ .cb_data = addr, \
}
RTL_DEBUG_IMPL_BB_SERIES(8, 0x0800);
@@ -216,10 +216,10 @@ static int rtl_debug_get_reg_rf(struct seq_file *m, void *v)
return 0;
}
-#define RTL_DEBUG_IMPL_RF_SERIES(page, addr) \
-struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = { \
- .cb_read = rtl_debug_get_reg_rf, \
- .cb_data = addr, \
+#define RTL_DEBUG_IMPL_RF_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_rf_ ##page = { \
+ .cb_read = rtl_debug_get_reg_rf, \
+ .cb_data = addr, \
}
RTL_DEBUG_IMPL_RF_SERIES(a, RF90_PATH_A);
@@ -271,10 +271,10 @@ static int rtl_debug_get_cam_register(struct seq_file *m, void *v)
return 0;
}
-#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr) \
-struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = { \
- .cb_read = rtl_debug_get_cam_register, \
- .cb_data = addr, \
+#define RTL_DEBUG_IMPL_CAM_SERIES(page, addr) \
+static struct rtl_debugfs_priv rtl_debug_priv_cam_ ##page = { \
+ .cb_read = rtl_debug_get_cam_register, \
+ .cb_data = addr, \
}
RTL_DEBUG_IMPL_CAM_SERIES(1, 0);
diff --git a/drivers/staging/rtlwifi/efuse.c b/drivers/staging/rtlwifi/efuse.c
index 6d5e657017c6..d74c80d512c9 100644
--- a/drivers/staging/rtlwifi/efuse.c
+++ b/drivers/staging/rtlwifi/efuse.c
@@ -252,12 +252,11 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
sizeof(u8), GFP_ATOMIC);
if (!efuse_tbl)
return;
- efuse_word = kzalloc(EFUSE_MAX_WORD_UNIT * sizeof(u16 *), GFP_ATOMIC);
+ efuse_word = kcalloc(EFUSE_MAX_WORD_UNIT, sizeof(u16 *), GFP_ATOMIC);
if (!efuse_word)
goto out;
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- efuse_word[i] = kzalloc(efuse_max_section * sizeof(u16),
- GFP_ATOMIC);
+ efuse_word[i] = kcalloc(efuse_max_section, sizeof(u16), GFP_ATOMIC);
if (!efuse_word[i])
goto done;
}
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
index edbf6af1c8b7..448b1379d220 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
@@ -3391,8 +3391,10 @@ halmac_cfg_txbf_88xx(struct halmac_adapter *halmac_adapter, u8 userid,
switch (bw) {
case HALMAC_BW_80:
temp42C |= BIT_R_TXBF0_80M;
+ /* fall through */
case HALMAC_BW_40:
temp42C |= BIT_R_TXBF0_40M;
+ /* fall through */
case HALMAC_BW_20:
temp42C |= BIT_R_TXBF0_20M;
break;
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
index 544f638ed3ef..c4cb217d3d1f 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
@@ -276,17 +276,13 @@ halmac_dump_efuse_drv_88xx(struct halmac_adapter *halmac_adapter)
if (!halmac_adapter->hal_efuse_map) {
halmac_adapter->hal_efuse_map = kzalloc(efuse_size, GFP_KERNEL);
- if (!halmac_adapter->hal_efuse_map) {
- pr_err("[ERR]halmac allocate efuse map Fail!!\n");
+ if (!halmac_adapter->hal_efuse_map)
return HALMAC_RET_MALLOC_FAIL;
- }
}
efuse_map = kzalloc(efuse_size, GFP_KERNEL);
- if (!efuse_map) {
- /* out of memory */
+ if (!efuse_map)
return HALMAC_RET_MALLOC_FAIL;
- }
if (halmac_read_hw_efuse_88xx(halmac_adapter, 0, efuse_size,
efuse_map) != HALMAC_RET_SUCCESS) {
@@ -325,10 +321,8 @@ halmac_dump_efuse_fw_88xx(struct halmac_adapter *halmac_adapter)
if (!halmac_adapter->hal_efuse_map) {
halmac_adapter->hal_efuse_map = kzalloc(
halmac_adapter->hw_config_info.efuse_size, GFP_KERNEL);
- if (!halmac_adapter->hal_efuse_map) {
- /* out of memory */
+ if (!halmac_adapter->hal_efuse_map)
return HALMAC_RET_MALLOC_FAIL;
- }
}
if (!halmac_adapter->hal_efuse_map_valid) {
@@ -537,10 +531,8 @@ halmac_read_logical_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
if (!halmac_adapter->hal_efuse_map_valid) {
efuse_map = kzalloc(efuse_size, GFP_KERNEL);
- if (!efuse_map) {
- pr_err("[ERR]halmac allocate local efuse map Fail!!\n");
+ if (!efuse_map)
return HALMAC_RET_MALLOC_FAIL;
- }
status = halmac_func_read_efuse_88xx(halmac_adapter, 0,
efuse_size, efuse_map);
@@ -554,7 +546,6 @@ halmac_read_logical_efuse_map_88xx(struct halmac_adapter *halmac_adapter,
halmac_adapter->hal_efuse_map =
kzalloc(efuse_size, GFP_KERNEL);
if (!halmac_adapter->hal_efuse_map) {
- pr_err("[ERR]halmac allocate efuse map Fail!!\n");
kfree(efuse_map);
return HALMAC_RET_MALLOC_FAIL;
}
@@ -592,10 +583,8 @@ halmac_func_write_logical_efuse_88xx(struct halmac_adapter *halmac_adapter,
driver_adapter = halmac_adapter->driver_adapter;
eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
+ if (!eeprom_map)
return HALMAC_RET_MALLOC_FAIL;
- }
memset(eeprom_map, 0xFF, eeprom_size);
status = halmac_read_logical_efuse_map_88xx(halmac_adapter, eeprom_map);
@@ -687,10 +676,8 @@ halmac_func_pg_efuse_by_map_88xx(struct halmac_adapter *halmac_adapter,
enum halmac_ret_status status = HALMAC_RET_SUCCESS;
eeprom_mask_updated = kzalloc(eeprom_mask_size, GFP_KERNEL);
- if (!eeprom_mask_updated) {
- /* out of memory */
+ if (!eeprom_mask_updated)
return HALMAC_RET_MALLOC_FAIL;
- }
status = halmac_update_eeprom_mask_88xx(halmac_adapter, pg_efuse_info,
eeprom_mask_updated);
@@ -743,12 +730,10 @@ halmac_update_eeprom_mask_88xx(struct halmac_adapter *halmac_adapter,
driver_adapter = halmac_adapter->driver_adapter;
eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
+ if (!eeprom_map)
return HALMAC_RET_MALLOC_FAIL;
- }
- memset(eeprom_map, 0xFF, eeprom_size);
+ memset(eeprom_map, 0xFF, eeprom_size);
memset(eeprom_mask_updated, 0x00, pg_efuse_info->efuse_mask_size);
status = halmac_read_logical_efuse_map_88xx(halmac_adapter, eeprom_map);
@@ -1036,7 +1021,7 @@ halmac_dlfw_to_mem_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
if (halmac_send_fwpkt_88xx(
halmac_adapter, code_ptr + mem_offset,
send_pkt_size) != HALMAC_RET_SUCCESS) {
- pr_err("halmac_send_fwpkt_88xx fail!!");
+ pr_err("halmac_send_fwpkt_88xx fail!!\n");
return HALMAC_RET_DLFW_FAIL;
}
@@ -1046,7 +1031,7 @@ halmac_dlfw_to_mem_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
halmac_adapter->hw_config_info.txdesc_size,
dest + mem_offset, send_pkt_size,
first_part) != HALMAC_RET_SUCCESS) {
- pr_err("halmac_iddma_dlfw_88xx fail!!");
+ pr_err("halmac_iddma_dlfw_88xx fail!!\n");
return HALMAC_RET_DLFW_FAIL;
}
@@ -1057,7 +1042,7 @@ halmac_dlfw_to_mem_88xx(struct halmac_adapter *halmac_adapter, u8 *ram_code,
if (halmac_check_fw_chksum_88xx(halmac_adapter, dest) !=
HALMAC_RET_SUCCESS) {
- pr_err("halmac_check_fw_chksum_88xx fail!!");
+ pr_err("halmac_check_fw_chksum_88xx fail!!\n");
return HALMAC_RET_DLFW_FAIL;
}
@@ -2549,10 +2534,8 @@ halmac_parse_efuse_data_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
halmac_adapter->efuse_segment_size = segment_size;
eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
+ if (!eeprom_map)
return HALMAC_RET_MALLOC_FAIL;
- }
memset(eeprom_map, 0xFF, eeprom_size);
spin_lock(&halmac_adapter->efuse_lock);
@@ -3355,10 +3338,8 @@ enum halmac_ret_status halmac_query_dump_logical_efuse_status_88xx(
*size = eeprom_size;
eeprom_map = kzalloc(eeprom_size, GFP_KERNEL);
- if (!eeprom_map) {
- /* out of memory */
+ if (!eeprom_map)
return HALMAC_RET_MALLOC_FAIL;
- }
memset(eeprom_map, 0xFF, eeprom_size);
if (halmac_eeprom_parser_88xx(
@@ -3579,10 +3560,8 @@ halmac_verify_send_rsvd_page_88xx(struct halmac_adapter *halmac_adapter)
rsvd_buf = kzalloc(h2c_pkt_verify_size, GFP_KERNEL);
- if (!rsvd_buf) {
- /*pr_err("[ERR]rsvd buffer malloc fail!!\n");*/
+ if (!rsvd_buf)
return HALMAC_RET_MALLOC_FAIL;
- }
memset(rsvd_buf, (u8)h2c_pkt_verify_payload, h2c_pkt_verify_size);
@@ -3599,7 +3578,6 @@ halmac_verify_send_rsvd_page_88xx(struct halmac_adapter *halmac_adapter)
GFP_KERNEL);
if (!rsvd_page) {
- pr_err("[ERR]rsvd page malloc fail!!\n");
kfree(rsvd_buf);
return HALMAC_RET_MALLOC_FAIL;
}
diff --git a/drivers/staging/rtlwifi/halmac/rtl_halmac.c b/drivers/staging/rtlwifi/halmac/rtl_halmac.c
index 6448a8bfc14b..66f0a6dfc52c 100644
--- a/drivers/staging/rtlwifi/halmac/rtl_halmac.c
+++ b/drivers/staging/rtlwifi/halmac/rtl_halmac.c
@@ -617,7 +617,7 @@ static int _send_general_info(struct rtl_priv *rtlpriv)
RT_TRACE(rtlpriv, COMP_HALMAC, DBG_WARNING,
"%s: halmac_send_general_info() fail because fw not dl!\n",
__func__);
- /* fallthrough here */
+ /* fall through */
default:
return -1;
}
diff --git a/drivers/staging/rtlwifi/pci.c b/drivers/staging/rtlwifi/pci.c
index 4035b8835bd1..70a64a5f564a 100644
--- a/drivers/staging/rtlwifi/pci.c
+++ b/drivers/staging/rtlwifi/pci.c
@@ -649,7 +649,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
prio, ring->idx,
skb_queue_len(&ring->queue));
- ieee80211_wake_queue(hw, skb_get_queue_mapping (skb));
+ ieee80211_wake_queue(hw, skb_get_queue_mapping(skb));
}
tx_status_ok:
skb = NULL;
diff --git a/drivers/staging/rtlwifi/phydm/halphyrf_ce.c b/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
index 684e383201d6..5986892e767e 100644
--- a/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
+++ b/drivers/staging/rtlwifi/phydm/halphyrf_ce.c
@@ -798,7 +798,7 @@ void odm_txpowertracking_callback_thermal_meter(void *dm_void)
if (xtal_offset_eanble != 0 &&
cali_info->txpowertrack_control &&
- (rtlefu->eeprom_thermalmeter != 0xff)) {
+ rtlefu->eeprom_thermalmeter != 0xff) {
ODM_RT_TRACE(
dm, ODM_COMP_TX_PWR_TRACK,
"**********Enter Xtal Tracking**********\n");
diff --git a/drivers/staging/rtlwifi/phydm/phydm.c b/drivers/staging/rtlwifi/phydm/phydm.c
index 37888c3087a4..8b2a180cc13c 100644
--- a/drivers/staging/rtlwifi/phydm/phydm.c
+++ b/drivers/staging/rtlwifi/phydm/phydm.c
@@ -1338,7 +1338,7 @@ static void odm_update_power_training_state(struct phy_dm_struct *dm)
return;
/* First connect */
- if ((dm->is_linked) && !dig_tab->is_media_connect_0) {
+ if (dm->is_linked && !dig_tab->is_media_connect_0) {
dm->PT_score = 0;
dm->is_change_state = true;
dm->phy_dbg_info.num_qry_phy_status_ofdm = 0;
@@ -1360,7 +1360,7 @@ static void odm_update_power_training_state(struct phy_dm_struct *dm)
(u32)(dm->phy_dbg_info.num_qry_phy_status_cck);
if ((false_alm_cnt->cnt_cca_all > 31 && rx_pkt_cnt > 31) &&
- (false_alm_cnt->cnt_cca_all >= rx_pkt_cnt)) {
+ false_alm_cnt->cnt_cca_all >= rx_pkt_cnt) {
if ((rx_pkt_cnt + (rx_pkt_cnt >> 1)) <=
false_alm_cnt->cnt_cca_all)
score = 0;
@@ -1697,7 +1697,7 @@ static u8 phydm_calculate_fc(void *dm_void, u32 channel, u32 bw, u32 second_ch,
fc = 2412 + (channel - 1) * 5;
- if (bw == 40 && (second_ch == PHYDM_ABOVE)) {
+ if (bw == 40 && second_ch == PHYDM_ABOVE) {
if (channel >= 10) {
ODM_RT_TRACE(
dm, ODM_COMP_API,
@@ -1774,7 +1774,7 @@ static u8 phydm_calculate_intf_distance(void *dm_void, u32 bw, u32 fc,
"[f_l, fc, fh] = [ %d, %d, %d ], f_int = ((%d))\n", bw_low,
fc, bw_up, f_interference);
- if ((f_interference >= bw_low) && (f_interference <= bw_up)) {
+ if (f_interference >= bw_low && f_interference <= bw_up) {
int_distance = (fc >= f_interference) ? (fc - f_interference) :
(f_interference - fc);
tone_idx_tmp =
diff --git a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
index 4f9e267409f6..103a774f9c8f 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_adaptivity.c
@@ -762,7 +762,7 @@ void phydm_adaptivity(void *dm_void)
dm->rssi_min, adaptivity->adajust_igi_level,
dm->adaptivity_flag, dm->adaptivity_enable);
- if (adaptivity->dynamic_link_adaptivity && (!dm->is_linked) &&
+ if (adaptivity->dynamic_link_adaptivity && !dm->is_linked &&
!dm->adaptivity_enable) {
phydm_set_edcca_threshold(dm, 0x7f, 0x7f);
ODM_RT_TRACE(
@@ -773,7 +773,7 @@ void phydm_adaptivity(void *dm_void)
if (dm->support_ic_type &
(ODM_IC_11AC_GAIN_IDX_EDCCA | ODM_IC_11N_GAIN_IDX_EDCCA)) {
- if ((adaptivity->adajust_igi_level > IGI) &&
+ if (adaptivity->adajust_igi_level > IGI &&
dm->adaptivity_enable)
diff = adaptivity->adajust_igi_level - IGI;
diff --git a/drivers/staging/rtlwifi/phydm/phydm_debug.c b/drivers/staging/rtlwifi/phydm/phydm_debug.c
index a5f90afdae9b..e18ba2cca2bd 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_debug.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_debug.c
@@ -29,6 +29,7 @@
#include "mp_precomp.h"
#include "phydm_precomp.h"
+#include <linux/kernel.h>
bool phydm_api_set_txagc(struct phy_dm_struct *, u32, enum odm_rf_radio_path,
u8, bool);
@@ -1441,9 +1442,9 @@ static void phydm_get_per_path_txagc(void *dm_void, u8 path, u32 *_used,
u32 out_len = *_out_len;
if (((dm->support_ic_type & (ODM_RTL8822B | ODM_RTL8197F)) &&
- (path <= ODM_RF_PATH_B)) ||
+ path <= ODM_RF_PATH_B) ||
((dm->support_ic_type & (ODM_RTL8821C)) &&
- (path <= ODM_RF_PATH_A))) {
+ path <= ODM_RF_PATH_A)) {
for (rate_idx = 0; rate_idx <= 0x53; rate_idx++) {
if (rate_idx == ODM_RATE1M)
PHYDM_SNPRINTF(output + used, out_len - used,
@@ -2107,8 +2108,7 @@ void phydm_cmd_parser(struct phy_dm_struct *dm, char input[][MAX_ARGV],
/* Parsing Cmd ID */
if (input_num) {
- phydm_ary_size =
- sizeof(phy_dm_ary) / sizeof(struct phydm_command);
+ phydm_ary_size = ARRAY_SIZE(phy_dm_ary);
for (i = 0; i < phydm_ary_size; i++) {
if (strcmp(phy_dm_ary[i].name, input[0]) == 0) {
id = phy_dm_ary[i].id;
@@ -2530,7 +2530,7 @@ void phydm_cmd_parser(struct phy_dm_struct *dm, char input[][MAX_ARGV],
}
/* NMH trigger */
- if ((var1[0] <= 2) && (var1[0] != 0)) {
+ if (var1[0] <= 2 && var1[0] != 0) {
ccx_info->echo_NHM_en = true;
ccx_info->echo_IGI =
(u8)odm_get_bb_reg(dm, 0xC50, MASKBYTE0);
@@ -2808,7 +2808,7 @@ void phydm_fw_trace_handler(void *dm_void, u8 *cmd_buf, u8 cmd_len)
freg_num = (buf_0 & 0xf);
c2h_seq = (buf_0 & 0xf0) >> 4;
- if ((c2h_seq != dm->pre_c2h_seq) && !dm->fw_buff_is_enpty) {
+ if (c2h_seq != dm->pre_c2h_seq && !dm->fw_buff_is_enpty) {
dm->fw_debug_trace[dm->c2h_cmd_start] = '\0';
ODM_RT_TRACE(dm, ODM_FW_DEBUG_TRACE,
"[FW Dbg Queue Overflow] %s\n",
diff --git a/drivers/staging/rtlwifi/phydm/phydm_dig.c b/drivers/staging/rtlwifi/phydm/phydm_dig.c
index 31a4f3fcad19..f851ff12dc35 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_dig.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_dig.c
@@ -198,7 +198,7 @@ static u8 odm_forbidden_igi_check(void *dm_void, u8 dig_dynamic_min,
if ((fa_cnt->cnt_all >
(fa_cnt->cnt_all_pre + (fa_cnt->cnt_all_pre >> 3) +
(fa_cnt->cnt_all_pre >> 4))) &&
- (current_igi < dig_tab->pre_ig_value)) {
+ current_igi < dig_tab->pre_ig_value) {
if (dig_tab->large_fa_hit != 3)
dig_tab->large_fa_hit++;
@@ -319,7 +319,7 @@ void odm_write_dig(void *dm_void, u8 current_igi)
__func__, ODM_REG(IGI_A, dm), ODM_BIT(IGI, dm));
/* 1 Check initial gain by upper bound */
- if ((!dig_tab->is_psd_in_progress) && dm->is_linked) {
+ if (!dig_tab->is_psd_in_progress && dm->is_linked) {
if (current_igi > dig_tab->rx_gain_range_max) {
ODM_RT_TRACE(
dm, ODM_COMP_DIG,
@@ -353,7 +353,7 @@ void odm_write_dig(void *dm_void, u8 current_igi)
/*Add by YuChen for USB IO too slow issue*/
if ((dm->support_ability & ODM_BB_ADAPTIVITY) &&
- (current_igi > dig_tab->cur_ig_value)) {
+ current_igi > dig_tab->cur_ig_value) {
dig_tab->cur_ig_value = current_igi;
phydm_adaptivity(dm);
}
@@ -388,7 +388,7 @@ void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type,
ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s()=========> level = %d\n", __func__,
pause_level);
- if ((dig_tab->pause_dig_level == 0) &&
+ if (dig_tab->pause_dig_level == 0 &&
(!(dm->support_ability & ODM_BB_DIG) ||
!(dm->support_ability & ODM_BB_FA_CNT))) {
ODM_RT_TRACE(
@@ -490,6 +490,8 @@ void odm_pause_dig(void *dm_void, enum phydm_pause_type pause_type,
break;
}
+ /* pin max_level to be >= 0 */
+ max_level = max_t(s8, 0, max_level);
/* write IGI of lower level */
odm_write_dig(dm, dig_tab->pause_dig_value[max_level]);
ODM_RT_TRACE(dm, ODM_COMP_DIG,
@@ -718,7 +720,7 @@ void odm_DIG(void *dm_void)
/* 4 Modify DIG upper bound for 92E, 8723A\B, 8821 & 8812 BT */
if ((dm->support_ic_type & (ODM_RTL8192E | ODM_RTL8723B |
ODM_RTL8812 | ODM_RTL8821)) &&
- (dm->is_bt_limited_dig == 1)) {
+ dm->is_bt_limited_dig == 1) {
offset = 10;
ODM_RT_TRACE(
dm, ODM_COMP_DIG,
@@ -817,8 +819,8 @@ void odm_DIG(void *dm_void)
if (dm->is_linked && !first_connect) {
ODM_RT_TRACE(dm, ODM_COMP_DIG, "Beacon Num (%d)\n",
dm->phy_dbg_info.num_qry_beacon_pkt);
- if ((dm->phy_dbg_info.num_qry_beacon_pkt < 5) &&
- (dm->bsta_state)) {
+ if (dm->phy_dbg_info.num_qry_beacon_pkt < 5 &&
+ dm->bsta_state) {
dig_tab->rx_gain_range_min = 0x1c;
ODM_RT_TRACE(
dm, ODM_COMP_DIG,
@@ -880,9 +882,9 @@ void odm_DIG(void *dm_void)
current_igi = current_igi - 2;
/* 4 Abnormal # beacon case */
- if ((dm->phy_dbg_info.num_qry_beacon_pkt < 5) &&
- (fa_cnt->cnt_all < DM_DIG_FA_TH1) &&
- (dm->bsta_state)) {
+ if (dm->phy_dbg_info.num_qry_beacon_pkt < 5 &&
+ fa_cnt->cnt_all < DM_DIG_FA_TH1 &&
+ dm->bsta_state) {
current_igi = dig_tab->rx_gain_range_min;
ODM_RT_TRACE(
dm, ODM_COMP_DIG,
@@ -1319,7 +1321,7 @@ void odm_pause_cck_packet_detection(void *dm_void,
ODM_RT_TRACE(dm, ODM_COMP_DIG, "%s()=========> level = %d\n", __func__,
pause_level);
- if ((dig_tab->pause_cckpd_level == 0) &&
+ if (dig_tab->pause_cckpd_level == 0 &&
(!(dm->support_ability & ODM_BB_CCK_PD) ||
!(dm->support_ability & ODM_BB_FA_CNT))) {
ODM_RT_TRACE(
diff --git a/drivers/staging/rtlwifi/phydm/phydm_interface.c b/drivers/staging/rtlwifi/phydm/phydm_interface.c
index 102576a46c04..2f9bf6708c54 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_interface.c
+++ b/drivers/staging/rtlwifi/phydm/phydm_interface.c
@@ -179,29 +179,6 @@ void ODM_sleep_ms(u32 ms) { msleep(ms); }
void ODM_sleep_us(u32 us) { usleep_range(us, us + 1); }
-void odm_set_timer(struct phy_dm_struct *dm, struct timer_list *timer,
- u32 ms_delay)
-{
- mod_timer(timer, jiffies + msecs_to_jiffies(ms_delay));
-}
-
-void odm_initialize_timer(struct phy_dm_struct *dm, struct timer_list *timer,
- void *call_back_func, void *context,
- const char *sz_id)
-{
- init_timer(timer);
- timer->function = call_back_func;
- timer->data = (unsigned long)dm;
- /*mod_timer(timer, jiffies+RTL_MILISECONDS_TO_JIFFIES(10)); */
-}
-
-void odm_cancel_timer(struct phy_dm_struct *dm, struct timer_list *timer)
-{
- del_timer(timer);
-}
-
-void odm_release_timer(struct phy_dm_struct *dm, struct timer_list *timer) {}
-
static u8 phydm_trans_h2c_id(struct phy_dm_struct *dm, u8 phydm_h2c_id)
{
u8 platform_h2c_id = phydm_h2c_id;
diff --git a/drivers/staging/rtlwifi/phydm/phydm_interface.h b/drivers/staging/rtlwifi/phydm/phydm_interface.h
index d315c79c962a..53ba5585bf33 100644
--- a/drivers/staging/rtlwifi/phydm/phydm_interface.h
+++ b/drivers/staging/rtlwifi/phydm/phydm_interface.h
@@ -172,17 +172,6 @@ void ODM_sleep_ms(u32 ms);
void ODM_sleep_us(u32 us);
-void odm_set_timer(struct phy_dm_struct *dm, struct timer_list *timer,
- u32 ms_delay);
-
-void odm_initialize_timer(struct phy_dm_struct *dm, struct timer_list *timer,
- void *call_back_func, void *context,
- const char *sz_id);
-
-void odm_cancel_timer(struct phy_dm_struct *dm, struct timer_list *timer);
-
-void odm_release_timer(struct phy_dm_struct *dm, struct timer_list *timer);
-
/*
* ODM FW relative API.
*/
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
index 4e7946019fcb..29d19f2b300e 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c
@@ -26,6 +26,7 @@
/*Image2HeaderVersion: 3.2*/
#include "../mp_precomp.h"
#include "../phydm_precomp.h"
+#include <linux/kernel.h>
static bool check_positive(struct phy_dm_struct *dm, const u32 condition1,
const u32 condition2, const u32 condition3,
@@ -1350,7 +1351,6 @@ void odm_read_and_config_mp_8822b_agc_tab(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_agc_tab) / sizeof(u32);
u32 *array = array_mp_8822b_agc_tab;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -1358,7 +1358,7 @@ void odm_read_and_config_mp_8822b_agc_tab(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_agc_tab); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
@@ -1843,7 +1843,6 @@ void odm_read_and_config_mp_8822b_phy_reg(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_phy_reg) / sizeof(u32);
u32 *array = array_mp_8822b_phy_reg;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -1851,7 +1850,7 @@ void odm_read_and_config_mp_8822b_phy_reg(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_phy_reg); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
@@ -1947,7 +1946,6 @@ static u32 array_mp_8822b_phy_reg_pg[] = {
void odm_read_and_config_mp_8822b_phy_reg_pg(struct phy_dm_struct *dm)
{
u32 i = 0;
- u32 array_len = sizeof(array_mp_8822b_phy_reg_pg) / sizeof(u32);
u32 *array = array_mp_8822b_phy_reg_pg;
ODM_RT_TRACE(dm, ODM_COMP_INIT,
@@ -1956,7 +1954,7 @@ void odm_read_and_config_mp_8822b_phy_reg_pg(struct phy_dm_struct *dm)
dm->phy_reg_pg_version = 1;
dm->phy_reg_pg_value_type = PHY_REG_PG_EXACT_VALUE;
- for (i = 0; i < array_len; i += 6) {
+ for (i = 0; i < ARRAY_SIZE(array_mp_8822b_phy_reg_pg); i += 6) {
u32 v1 = array[i];
u32 v2 = array[i + 1];
u32 v3 = array[i + 2];
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
index 1a9daed2e609..70924f002541 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c
@@ -26,6 +26,7 @@
/*Image2HeaderVersion: 3.2*/
#include "../mp_precomp.h"
#include "../phydm_precomp.h"
+#include <linux/kernel.h>
static bool check_positive(struct phy_dm_struct *dm, const u32 condition1,
const u32 condition2, const u32 condition3,
@@ -173,7 +174,6 @@ void odm_read_and_config_mp_8822b_mac_reg(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_mac_reg) / sizeof(u32);
u32 *array = array_mp_8822b_mac_reg;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -181,7 +181,7 @@ void odm_read_and_config_mp_8822b_mac_reg(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_mac_reg); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
diff --git a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
index 84cdc0644207..0ff3a9a712d6 100644
--- a/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
+++ b/drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c
@@ -26,6 +26,7 @@
/*Image2HeaderVersion: 3.2*/
#include "../mp_precomp.h"
#include "../phydm_precomp.h"
+#include <linux/kernel.h>
static bool check_positive(struct phy_dm_struct *dm, const u32 condition1,
const u32 condition2, const u32 condition3,
@@ -1346,7 +1347,6 @@ void odm_read_and_config_mp_8822b_radioa(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_radioa) / sizeof(u32);
u32 *array = array_mp_8822b_radioa;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -1354,7 +1354,7 @@ void odm_read_and_config_mp_8822b_radioa(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_radioa); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
@@ -2506,7 +2506,6 @@ void odm_read_and_config_mp_8822b_radiob(struct phy_dm_struct *dm)
u32 i = 0;
u8 c_cond;
bool is_matched = true, is_skipped = false;
- u32 array_len = sizeof(array_mp_8822b_radiob) / sizeof(u32);
u32 *array = array_mp_8822b_radiob;
u32 v1 = 0, v2 = 0, pre_v1 = 0, pre_v2 = 0;
@@ -2514,7 +2513,7 @@ void odm_read_and_config_mp_8822b_radiob(struct phy_dm_struct *dm)
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (; (i + 1) < array_len; i = i + 2) {
+ for (; (i + 1) < ARRAY_SIZE(array_mp_8822b_radiob); i = i + 2) {
v1 = array[i];
v2 = array[i + 1];
@@ -4239,13 +4238,12 @@ static const char *const array_mp_8822b_txpwr_lmt[] = {
void odm_read_and_config_mp_8822b_txpwr_lmt(struct phy_dm_struct *dm)
{
u32 i = 0;
- u32 array_len = sizeof(array_mp_8822b_txpwr_lmt) / sizeof(u8 *);
u8 **array = (u8 **)array_mp_8822b_txpwr_lmt;
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> %s\n", __func__);
- for (i = 0; i < array_len; i += 7) {
+ for (i = 0; i < ARRAY_SIZE(array_mp_8822b_txpwr_lmt); i += 7) {
u8 *regulation = array[i];
u8 *band = array[i + 1];
u8 *bandwidth = array[i + 2];
@@ -4723,13 +4721,12 @@ static const char *const array_mp_8822b_txpwr_lmt_type5[] = {
void odm_read_and_config_mp_8822b_txpwr_lmt_type5(struct phy_dm_struct *dm)
{
u32 i = 0;
- u32 array_len = sizeof(array_mp_8822b_txpwr_lmt_type5) / sizeof(u8 *);
u8 **array = (u8 **)array_mp_8822b_txpwr_lmt_type5;
ODM_RT_TRACE(dm, ODM_COMP_INIT,
"===> odm_read_and_config_mp_8822b_txpwr_lmt_type5\n");
- for (i = 0; i < array_len; i += 7) {
+ for (i = 0; i < ARRAY_SIZE(array_mp_8822b_txpwr_lmt_type5); i += 7) {
u8 *regulation = array[i];
u8 *band = array[i + 1];
u8 *bandwidth = array[i + 2];
diff --git a/drivers/staging/rtlwifi/ps.c b/drivers/staging/rtlwifi/ps.c
index 9172cee45f74..7856fc5d10bd 100644
--- a/drivers/staging/rtlwifi/ps.c
+++ b/drivers/staging/rtlwifi/ps.c
@@ -61,7 +61,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->enable_interrupt(hw);
/*<enable timer> */
- rtl_watch_dog_timer_callback((unsigned long)hw);
+ rtl_watch_dog_timer_callback(&rtlpriv->works.watchdog_timer);
return true;
}
diff --git a/drivers/staging/rtlwifi/rc.c b/drivers/staging/rtlwifi/rc.c
index 65de0c7b5a67..c835be91f398 100644
--- a/drivers/staging/rtlwifi/rc.c
+++ b/drivers/staging/rtlwifi/rc.c
@@ -125,8 +125,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
}
rate->count = tries;
rate->idx = rix >= 0x00 ? rix : 0x00;
- if (((rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE) ||
- (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8822BE)) &&
+ if ((rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8812AE ||
+ rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8822BE) &&
wireless_mode == WIRELESS_MODE_AC_5G)
rate->idx |= 0x10;/*2NSS for 8812AE, 8822BE*/
@@ -138,7 +138,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sta && (sta->ht_cap.cap &
IEEE80211_HT_CAP_SUP_WIDTH_20_40))
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (sta && (sta->vht_cap.vht_supported))
+ if (sta && sta->vht_cap.vht_supported)
rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
} else {
if (mac->bw_80)
@@ -150,8 +150,8 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
if (sgi_20 || sgi_40 || sgi_80)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
if (sta && sta->ht_cap.ht_supported &&
- ((wireless_mode == WIRELESS_MODE_N_5G) ||
- (wireless_mode == WIRELESS_MODE_N_24G)))
+ (wireless_mode == WIRELESS_MODE_N_5G ||
+ wireless_mode == WIRELESS_MODE_N_24G))
rate->flags |= IEEE80211_TX_RC_MCS;
if (sta && sta->vht_cap.vht_supported &&
(wireless_mode == WIRELESS_MODE_AC_5G ||
@@ -232,7 +232,7 @@ static void rtl_tx_status(void *ppriv,
if (sta) {
/* Check if aggregation has to be enabled for this tid */
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- if ((sta->ht_cap.ht_supported) &&
+ if (sta->ht_cap.ht_supported &&
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 tid = rtl_get_tid(skb);
@@ -281,10 +281,8 @@ static void *rtl_rate_alloc_sta(void *ppriv,
struct rtl_rate_priv *rate_priv;
rate_priv = kzalloc(sizeof(*rate_priv), gfp);
- if (!rate_priv) {
- pr_err("Unable to allocate private rc structure\n");
+ if (!rate_priv)
return NULL;
- }
rtlpriv->rate_priv = rate_priv;
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index 8e24da16752c..f45487122517 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -330,7 +330,7 @@ void rtl8822be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
byte5 = btc_ops->btc_get_lps_val(rtlpriv);
power_state = btc_ops->btc_get_rpwm_val(rtlpriv);
- if ((rlbm == 2) && (byte5 & BIT(4))) {
+ if (rlbm == 2 && (byte5 & BIT(4))) {
/* Keep awake interval to 1 to prevent from
* decreasing coex performance
*/
@@ -419,7 +419,7 @@ static bool _rtl8822be_send_bcn_or_cmd_packet(struct ieee80211_hw *hw,
dma_addr = rtlpriv->cfg->ops->get_desc(
hw, (u8 *)pbd_desc, true, HW_DESC_TXBUFF_ADDR);
- pci_unmap_single(rtlpci->pdev, dma_addr, skb->len,
+ pci_unmap_single(rtlpci->pdev, dma_addr, pskb->len,
PCI_DMA_TODEVICE);
kfree_skb(pskb);
@@ -766,9 +766,10 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
rtl8822be_fill_h2c_cmd(hw, H2C_8822B_RSVDPAGE,
sizeof(u1_rsvd_page_loc),
u1_rsvd_page_loc);
- } else
+ } else {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Set RSVD page location to Fw FAIL!!!!!!.\n");
+ }
}
/* Should check FW support p2p or not. */
@@ -834,7 +835,7 @@ void rtl8822be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
rtl_write_dword(rtlpriv, 0x5EC,
p2pinfo->noa_count_type[i]);
}
- if ((p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0)) {
+ if (p2pinfo->opp_ps == 1 || p2pinfo->noa_num > 0) {
/* rst p2p circuit */
rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST_8822B, BIT(4));
p2p_ps_offload->offload_en = 1;
diff --git a/drivers/staging/rtlwifi/rtl8822be/led.c b/drivers/staging/rtlwifi/rtl8822be/led.c
index f4b5af8ab116..0054c892dce6 100644
--- a/drivers/staging/rtlwifi/rtl8822be/led.c
+++ b/drivers/staging/rtlwifi/rtl8822be/led.c
@@ -114,7 +114,7 @@ void rtl8822be_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ if (ppsc->rfoff_reason > RF_CHANGE_BY_PS &&
(ledaction == LED_CTL_TX || ledaction == LED_CTL_RX ||
ledaction == LED_CTL_SITE_SURVEY || ledaction == LED_CTL_LINK ||
ledaction == LED_CTL_NO_LINK ||
diff --git a/drivers/staging/rtlwifi/rtl8822be/phy.c b/drivers/staging/rtlwifi/rtl8822be/phy.c
index 4cba2adc3165..ef37ae98c803 100644
--- a/drivers/staging/rtlwifi/rtl8822be/phy.c
+++ b/drivers/staging/rtlwifi/rtl8822be/phy.c
@@ -890,7 +890,7 @@ bool rtl8822be_load_txpower_by_rate(struct ieee80211_hw *hw)
rtstatus = rtlpriv->phydm.ops->phydm_load_txpower_by_rate(rtlpriv);
if (!rtstatus) {
- pr_err("BB_PG Reg Fail!!");
+ pr_err("BB_PG Reg Fail!!\n");
return false;
}
@@ -915,7 +915,7 @@ bool rtl8822be_load_txpower_limit(struct ieee80211_hw *hw)
rtstatus = rtlpriv->phydm.ops->phydm_load_txpower_limit(rtlpriv);
if (!rtstatus) {
- pr_err("RF TxPwr Limit Fail!!");
+ pr_err("RF TxPwr Limit Fail!!\n");
return false;
}
@@ -1562,9 +1562,10 @@ static char _rtl8822be_phy_get_txpower_limit(struct ieee80211_hw *hw, u8 band,
[channel_index]
[rate_section]
[channel_index][rf_path];
- } else
+ } else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"No power limit table of the specified band\n");
+ }
return power_limit;
}
@@ -1609,9 +1610,9 @@ u8 rtl8822be_get_txpower_index(struct ieee80211_hw *hw, u8 path, u8 rate,
char limit;
char powerdiff_byrate = 0;
- if (((rtlhal->current_bandtype == BAND_ON_2_4G) &&
+ if ((rtlhal->current_bandtype == BAND_ON_2_4G &&
(channel > 14 || channel < 1)) ||
- ((rtlhal->current_bandtype == BAND_ON_5G) && (channel <= 14))) {
+ (rtlhal->current_bandtype == BAND_ON_5G && channel <= 14)) {
index = 0;
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Illegal channel!!\n");
@@ -1755,9 +1756,9 @@ static void _rtl8822be_phy_set_txpower_index(struct ieee80211_hw *hw,
static u32 index;
/*
- * For 8822B, phydm api use 4 bytes txagc value
- * driver must combine every four 1 byte to one 4 byte and send to phydm
- */
+ * For 8822B, phydm api use 4 bytes txagc value driver must
+ * combine every four 1 byte to one 4 byte and send to phydm
+ */
shift = rate & 0x03;
index |= ((u32)power_index << (shift * 8));
@@ -1912,8 +1913,8 @@ static u8 _rtl8822be_phy_get_pri_ch_id(struct rtl_priv *rtlpriv)
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
/* primary channel is at lower subband of 80MHz & 40MHz */
- if ((mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER) &&
- (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) {
+ if (mac->cur_40_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER &&
+ mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER) {
pri_ch_idx = VHT_DATA_SC_20_LOWEST_OF_80MHZ;
/* primary channel is at
* lower subband of 80MHz & upper subband of 40MHz
@@ -2141,7 +2142,7 @@ static bool _rtl8822be_phy_set_rf_power_state(struct ieee80211_hw *hw,
switch (rfpwr_state) {
case ERFON:
- if ((ppsc->rfpwr_state == ERFOFF) &&
+ if (ppsc->rfpwr_state == ERFOFF &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
bool rtstatus = false;
u32 initialize_count = 0;
diff --git a/drivers/staging/rtlwifi/rtl8822be/trx.c b/drivers/staging/rtlwifi/rtl8822be/trx.c
index 38f80e48a399..87e15e419252 100644
--- a/drivers/staging/rtlwifi/rtl8822be/trx.c
+++ b/drivers/staging/rtlwifi/rtl8822be/trx.c
@@ -165,7 +165,7 @@ static bool rtl8822be_get_rxdesc_is_ht(struct ieee80211_hw *hw, u8 *pdesc)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
- if ((rx_rate >= DESC_RATEMCS0) && (rx_rate <= DESC_RATEMCS15))
+ if (rx_rate >= DESC_RATEMCS0 && rx_rate <= DESC_RATEMCS15)
return true;
else
return false;
@@ -193,8 +193,8 @@ static u8 rtl8822be_get_rx_vht_nss(struct ieee80211_hw *hw, u8 *pdesc)
rx_rate = GET_RX_DESC_RX_RATE(pdesc);
- if ((rx_rate >= DESC_RATEVHT1SS_MCS0) &&
- (rx_rate <= DESC_RATEVHT1SS_MCS9))
+ if (rx_rate >= DESC_RATEVHT1SS_MCS0 &&
+ rx_rate <= DESC_RATEVHT1SS_MCS9)
vht_nss = 1;
else if ((rx_rate >= DESC_RATEVHT2SS_MCS0) &&
(rx_rate <= DESC_RATEVHT2SS_MCS9))
@@ -510,8 +510,8 @@ static u8 rtl8822be_bw_mapping(struct ieee80211_hw *hw,
else
bw_setting_of_desc = 0;
} else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- if ((ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) ||
- (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80))
+ if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40 ||
+ ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80)
bw_setting_of_desc = 1;
else
bw_setting_of_desc = 0;
@@ -546,10 +546,10 @@ static u8 rtl8822be_sc_mapping(struct ieee80211_hw *hw,
"%s: Not Correct Primary40MHz Setting\n",
__func__);
} else {
- if ((mac->cur_40_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER) &&
- (mac->cur_80_prime_sc ==
- HAL_PRIME_CHNL_OFFSET_LOWER))
+ if (mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER &&
+ mac->cur_80_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER)
sc_setting_of_desc =
VHT_DATA_SC_20_LOWEST_OF_80MHZ;
else if ((mac->cur_40_prime_sc ==
@@ -571,9 +571,9 @@ static u8 rtl8822be_sc_mapping(struct ieee80211_hw *hw,
sc_setting_of_desc =
VHT_DATA_SC_20_UPPERST_OF_80MHZ;
else
- RT_TRACE(
- rtlpriv, COMP_SEND, DBG_LOUD,
- "rtl8822be_sc_mapping: Not Correct Primary40MHz Setting\n");
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
+ "%s: Not Correct Primary40MHz Setting\n",
+ __func__);
}
} else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) {
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 4033a2cf7ac9..d548bc695f9e 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -4125,12 +4125,6 @@ RTY_SEND_CMD:
rtsx_trace(chip);
return STATUS_FAIL;
}
-
- } else if (rsp_type == SD_RSP_TYPE_R0) {
- if ((ptr[3] & 0x1E) != 0x03) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
}
}
}
diff --git a/drivers/staging/rts5208/trace.c b/drivers/staging/rts5208/trace.c
index 1bddbdf3454b..c878e75293f7 100644
--- a/drivers/staging/rts5208/trace.c
+++ b/drivers/staging/rts5208/trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/staging/skein/Makefile b/drivers/staging/skein/Makefile
index b7f947fb98f0..86b7966d694e 100644
--- a/drivers/staging/skein/Makefile
+++ b/drivers/staging/skein/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the skein secure hash algorithm
#
diff --git a/drivers/staging/skein/skein_iv.h b/drivers/staging/skein/skein_iv.h
index 509d464c65a3..916f029da726 100644
--- a/drivers/staging/skein/skein_iv.h
+++ b/drivers/staging/skein/skein_iv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SKEIN_IV_H_
#define _SKEIN_IV_H_
diff --git a/drivers/staging/skein/threefish_api.c b/drivers/staging/skein/threefish_api.c
index 2b649abb78c7..e69cefa6b16a 100644
--- a/drivers/staging/skein/threefish_api.c
+++ b/drivers/staging/skein/threefish_api.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include "threefish_api.h"
diff --git a/drivers/staging/skein/threefish_api.h b/drivers/staging/skein/threefish_api.h
index 615e467579ee..21539c3cc7a0 100644
--- a/drivers/staging/skein/threefish_api.h
+++ b/drivers/staging/skein/threefish_api.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef THREEFISHAPI_H
#define THREEFISHAPI_H
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index 50640656c10d..87f055890544 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include "threefish_api.h"
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 4754f7a20684..313b99104398 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/sizes.h>
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 2c7a9b9a7c8a..aee82fcaf669 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DDK750_CHIP_H__
#define DDK750_CHIP_H__
#define DEFAULT_INPUT_CLOCK 14318181 /* Default reference clock */
@@ -17,7 +18,7 @@ static inline u32 peek32(u32 addr)
return readl(addr + mmio750);
}
-static inline void poke32(u32 data, u32 addr)
+static inline void poke32(u32 addr, u32 data)
{
writel(data, addr + mmio750);
}
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 9b116ed6ecc7..c6fd90191530 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "ddk750_reg.h"
#include "ddk750_chip.h"
#include "ddk750_display.h"
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index 609bf742efff..523bbf33521c 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DDK750_DISPLAY_H__
#define DDK750_DISPLAY_H__
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index 87a199d6cdaf..b20d16198c17 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define USE_DVICHIP
#ifdef USE_DVICHIP
#include "ddk750_chip.h"
@@ -10,7 +11,7 @@
* function API. Please set the function pointer to NULL whenever the function
* is not supported.
*/
-static dvi_ctrl_device_t g_dcftSupportedDviController[] = {
+static struct dvi_ctrl_device g_dcftSupportedDviController[] = {
#ifdef DVI_CTRL_SII164
{
.pfnInit = sii164InitChip,
@@ -40,7 +41,7 @@ int dviInit(unsigned char edgeSelect,
unsigned char pllFilterEnable,
unsigned char pllFilterValue)
{
- dvi_ctrl_device_t *pCurrentDviCtrl;
+ struct dvi_ctrl_device *pCurrentDviCtrl;
pCurrentDviCtrl = g_dcftSupportedDviController;
if (pCurrentDviCtrl->pfnInit) {
diff --git a/drivers/staging/sm750fb/ddk750_dvi.h b/drivers/staging/sm750fb/ddk750_dvi.h
index 4a8394561f76..1c7a565b617a 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.h
+++ b/drivers/staging/sm750fb/ddk750_dvi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DDK750_DVI_H__
#define DDK750_DVI_H__
@@ -25,7 +26,7 @@ typedef unsigned char (*PFN_DVICTRL_CHECKINTERRUPT)(void);
typedef void (*PFN_DVICTRL_CLEARINTERRUPT)(void);
/* Structure to hold all the function pointer to the DVI Controller. */
-typedef struct _dvi_ctrl_device_t {
+struct dvi_ctrl_device {
PFN_DVICTRL_INIT pfnInit;
PFN_DVICTRL_RESETCHIP pfnResetChip;
PFN_DVICTRL_GETCHIPSTRING pfnGetChipString;
@@ -36,7 +37,7 @@ typedef struct _dvi_ctrl_device_t {
PFN_DVICTRL_ISCONNECTED pfnIsConnected;
PFN_DVICTRL_CHECKINTERRUPT pfnCheckInterrupt;
PFN_DVICTRL_CLEARINTERRUPT pfnClearInterrupt;
-} dvi_ctrl_device_t;
+};
#define DVI_CTRL_SII164
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index ec556a978a98..8482689b665b 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define USE_HW_I2C
#ifdef USE_HW_I2C
#include "ddk750_chip.h"
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.h b/drivers/staging/sm750fb/ddk750_hwi2c.h
index 46e22dce2570..337c6493ca61 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.h
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DDK750_HWI2C_H__
#define DDK750_HWI2C_H__
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index bb673e18999b..2cdd87b78e58 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "ddk750_reg.h"
#include "ddk750_mode.h"
diff --git a/drivers/staging/sm750fb/ddk750_mode.h b/drivers/staging/sm750fb/ddk750_mode.h
index d5eae36d85cb..259a9d6a4eb2 100644
--- a/drivers/staging/sm750fb/ddk750_mode.h
+++ b/drivers/staging/sm750fb/ddk750_mode.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DDK750_MODE_H__
#define DDK750_MODE_H__
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 222ae1a06feb..12834f78eef7 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -1,8 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
#include "ddk750_chip.h"
#include "ddk750_reg.h"
#include "ddk750_power.h"
-void ddk750_set_dpms(DPMS_t state)
+void ddk750_set_dpms(enum dpms state)
{
unsigned int value;
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 44c4fc587e96..e48c74ecbbd1 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -1,20 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DDK750_POWER_H__
#define DDK750_POWER_H__
-typedef enum _DPMS_t {
+enum dpms {
crtDPMS_ON = 0x0,
crtDPMS_STANDBY = 0x1,
crtDPMS_SUSPEND = 0x2,
crtDPMS_OFF = 0x3,
-}
-DPMS_t;
+};
#define setDAC(off) { \
poke32(MISC_CTRL, \
(peek32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
}
-void ddk750_set_dpms(DPMS_t state);
+void ddk750_set_dpms(enum dpms state);
void sm750_set_power_mode(unsigned int powerMode);
void sm750_set_current_gate(unsigned int gate);
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index f9b989b7a152..fe412ead72e5 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DDK750_REG_H__
#define DDK750_REG_H__
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 0431833de781..c787a74c4f9c 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define USE_DVICHIP
#ifdef USE_DVICHIP
@@ -296,7 +297,8 @@ void sii164SetPower(unsigned char powerUp)
* sii164SelectHotPlugDetectionMode
* This function selects the mode of the hot plug detection.
*/
-static void sii164SelectHotPlugDetectionMode(sii164_hot_plug_mode_t hotPlugMode)
+static
+void sii164SelectHotPlugDetectionMode(enum sii164_hot_plug_mode hotPlugMode)
{
unsigned char detectReg;
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index 6968cf532f16..2e9a88cd6af3 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -1,15 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DDK750_SII164_H__
#define DDK750_SII164_H__
#define USE_DVICHIP
/* Hot Plug detection mode structure */
-typedef enum _sii164_hot_plug_mode_t {
+enum sii164_hot_plug_mode {
SII164_HOTPLUG_DISABLE = 0, /* Disable Hot Plug output bit (always high). */
SII164_HOTPLUG_USE_MDI, /* Use Monitor Detect Interrupt bit. */
SII164_HOTPLUG_USE_RSEN, /* Use Receiver Sense detect bit. */
SII164_HOTPLUG_USE_HTPLG /* Use Hot Plug detect bit. */
-} sii164_hot_plug_mode_t;
+};
/* Silicon Image SiI164 chip prototype */
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index 4386122799b2..ce90adcb449d 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LYNXDRV_H_
#define LYNXDRV_H_
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 4b720cfa05de..1035e91e7cd3 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index 4b0ff8feb9a0..c4f42002a50f 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ACCEL_H__
#define ACCEL_H__
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index aa47a16ac75c..bbbef27cb329 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -18,8 +19,6 @@
#include "sm750.h"
#include "sm750_cursor.h"
-
-
#define poke32(addr, data) \
writel((data), cursor->mmio + (addr))
@@ -45,7 +44,6 @@ writel((data), cursor->mmio + (addr))
#define HWC_COLOR_3 0xC
#define HWC_COLOR_3_RGB565_MASK 0xffff
-
/* hw_cursor_xxx works for voyager,718 and 750 */
void sm750_hw_cursor_enable(struct lynx_cursor *cursor)
{
@@ -134,7 +132,6 @@ void sm750_hw_cursor_setData(struct lynx_cursor *cursor, u16 rop,
}
}
-
void sm750_hw_cursor_setData2(struct lynx_cursor *cursor, u16 rop,
const u8 *pcol, const u8 *pmsk)
{
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index c7b86ae235b4..16ac07eb58d6 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LYNX_CURSOR_H__
#define LYNX_CURSOR_H__
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index baf1bbdc92ff..ffd114a6d09b 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/staging/speakup/Makefile b/drivers/staging/speakup/Makefile
index c864ea69c40d..5befb4933b85 100644
--- a/drivers/staging/speakup/Makefile
+++ b/drivers/staging/speakup/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SPEAKUP_SYNTH_ACNTSA) += speakup_acntsa.o
obj-$(CONFIG_SPEAKUP_SYNTH_ACNTPC) += speakup_acntpc.o
obj-$(CONFIG_SPEAKUP_SYNTH_APOLLO) += speakup_apollo.o
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index f459e4004bfa..6137fa83c609 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -27,7 +27,7 @@ void speakup_start_ttys(void)
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (speakup_console[i] && speakup_console[i]->tty_stopped)
continue;
- if ((vc_cons[i].d) && (vc_cons[i].d->port.tty))
+ if (vc_cons[i].d && vc_cons[i].d->port.tty)
start_tty(vc_cons[i].d->port.tty);
}
}
@@ -38,7 +38,7 @@ static void speakup_stop_ttys(void)
int i;
for (i = 0; i < MAX_NR_CONSOLES; i++)
- if ((vc_cons[i].d && (vc_cons[i].d->port.tty)))
+ if (vc_cons[i].d && vc_cons[i].d->port.tty)
stop_tty(vc_cons[i].d->port.tty);
}
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index 58abd1d85105..d920256328c3 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
#include <linux/types.h>
diff --git a/drivers/staging/speakup/i18n.c b/drivers/staging/speakup/i18n.c
index 7809867f5d28..cea8707653f5 100644
--- a/drivers/staging/speakup/i18n.c
+++ b/drivers/staging/speakup/i18n.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Internationalization implementation. Includes definitions of English
* string arrays, and the i18n pointer.
*/
diff --git a/drivers/staging/speakup/i18n.h b/drivers/staging/speakup/i18n.h
index 8fcce566653f..2ec6e659d02b 100644
--- a/drivers/staging/speakup/i18n.h
+++ b/drivers/staging/speakup/i18n.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef I18N_H
#define I18N_H
/* Internationalization declarations */
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 56f7be6af1f6..aae868509e13 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -447,7 +447,7 @@ static void speak_char(u16 ch)
cp = spk_characters[ch];
if (!cp) {
- pr_info("speak_char: cp == NULL!\n");
+ pr_info("%s: cp == NULL!\n", __func__);
return;
}
if (IS_CHAR(ch, B_CAP)) {
@@ -1164,8 +1164,8 @@ static void spkup_write(const u16 *in_buf, int count)
static const int NUM_CTL_LABELS = (MSG_CTL_END - MSG_CTL_START + 1);
static void read_all_doc(struct vc_data *vc);
-static void cursor_done(u_long data);
-static DEFINE_TIMER(cursor_timer, cursor_done, 0, 0);
+static void cursor_done(struct timer_list *unused);
+static DEFINE_TIMER(cursor_timer, cursor_done);
static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
{
@@ -1682,7 +1682,7 @@ static int speak_highlight(struct vc_data *vc)
return 0;
}
-static void cursor_done(u_long data)
+static void cursor_done(struct timer_list *unused)
{
struct vc_data *vc = vc_cons[cursor_con].d;
unsigned long flags;
@@ -2101,7 +2101,7 @@ speakup_key(struct vc_data *vc, int shift_state, int keycode, u_short keysym,
u_char shift_info, offset;
int ret = 0;
- if (synth == NULL)
+ if (!synth)
return 0;
spin_lock_irqsave(&speakup_info.spinlock, flags);
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 08f68fc2864e..66061b5c3427 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h> /* for kmalloc */
#include <linux/consolemap.h>
#include <linux/interrupt.h>
diff --git a/drivers/staging/speakup/serialio.h b/drivers/staging/speakup/serialio.h
index 89de6fff9cb2..aa691e4a6916 100644
--- a/drivers/staging/speakup/serialio.h
+++ b/drivers/staging/speakup/serialio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SPEAKUP_SERIAL_H
#define _SPEAKUP_SERIAL_H
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index a654334c98b9..3d8bda8b9620 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SPEAKUP_H
#define _SPEAKUP_H
diff --git a/drivers/staging/speakup/speakup_acnt.h b/drivers/staging/speakup/speakup_acnt.h
index 107ec1155f51..cffa938ae580 100644
--- a/drivers/staging/speakup/speakup_acnt.h
+++ b/drivers/staging/speakup/speakup_acnt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* speakup_acntpc.h - header file for speakups Accent-PC driver. */
#define SYNTH_IO_EXTENT 0x02
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index 0e10404e2e8c..43315849b7b6 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -136,7 +136,7 @@ static int synth_probe(struct spk_synth *synth)
}
module_param_named(ser, synth_acntsa.ser, int, 0444);
-module_param_named(dev, synth_acntsa.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_acntsa.dev_name, charp, 0444);
module_param_named(start, synth_acntsa.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 2edb56c8a559..dcf0c3b59fdd 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -200,7 +200,7 @@ static void do_catch_up(struct spk_synth *synth)
}
module_param_named(ser, synth_apollo.ser, int, 0444);
-module_param_named(dev, synth_apollo.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_apollo.dev_name, charp, 0444);
module_param_named(start, synth_apollo.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index 8ae826eba71c..45b5721441ba 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -163,7 +163,7 @@ static int synth_probe(struct spk_synth *synth)
}
module_param_named(ser, synth_audptr.ser, int, 0444);
-module_param_named(dev, synth_audptr.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_audptr.dev_name, charp, 0444);
module_param_named(start, synth_audptr.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c
index 60bcf0df8123..402b0fbfb94d 100644
--- a/drivers/staging/speakup/speakup_bns.c
+++ b/drivers/staging/speakup/speakup_bns.c
@@ -120,7 +120,7 @@ static struct spk_synth synth_bns = {
};
module_param_named(ser, synth_bns.ser, int, 0444);
-module_param_named(dev, synth_bns.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_bns.dev_name, charp, 0444);
module_param_named(start, synth_bns.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index 95f4b2116d0c..4310c2c276c4 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -227,7 +227,7 @@ static void synth_flush(struct spk_synth *synth)
}
module_param_named(ser, synth_decext.ser, int, 0444);
-module_param_named(dev, synth_decext.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_decext.dev_name, charp, 0444);
module_param_named(start, synth_decext.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index f06995480022..5d6a861c9b1e 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -299,7 +299,7 @@ static void synth_flush(struct spk_synth *synth)
}
module_param_named(ser, synth_dectlk.ser, int, 0444);
-module_param_named(dev, synth_dectlk.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_dectlk.dev_name, charp, 0444);
module_param_named(start, synth_dectlk.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_dtlk.h b/drivers/staging/speakup/speakup_dtlk.h
index 51ac0f2fcded..9c378b58066e 100644
--- a/drivers/staging/speakup/speakup_dtlk.h
+++ b/drivers/staging/speakup/speakup_dtlk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* speakup_dtlk.h - header file for speakups DoubleTalk driver. */
#define SYNTH_IO_EXTENT 0x02
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index 851953d5eefb..ea3b2911cab9 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -122,7 +122,7 @@ static struct spk_synth synth_dummy = {
};
module_param_named(ser, synth_dummy.ser, int, 0444);
-module_param_named(dev, synth_dummy.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_dummy.dev_name, charp, 0444);
module_param_named(start, synth_dummy.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c
index 423795f88f53..95efaab73813 100644
--- a/drivers/staging/speakup/speakup_ltlk.c
+++ b/drivers/staging/speakup/speakup_ltlk.c
@@ -167,7 +167,7 @@ static int synth_probe(struct spk_synth *synth)
}
module_param_named(ser, synth_ltlk.ser, int, 0444);
-module_param_named(dev, synth_ltlk.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_ltlk.dev_name, charp, 0444);
module_param_named(start, synth_ltlk.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index 9ca21edc42ce..1037aa0d085a 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -131,7 +131,7 @@ static void synth_flush(struct spk_synth *synth)
}
module_param_named(ser, synth_spkout.ser, int, 0444);
-module_param_named(dev, synth_spkout.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_spkout.dev_name, charp, 0444);
module_param_named(start, synth_spkout.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index 831ee404e7a1..e160034e4a68 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -119,7 +119,7 @@ static struct spk_synth synth_txprt = {
};
module_param_named(ser, synth_txprt.ser, int, 0444);
-module_param_named(dev, synth_txprt.dev_name, charp, S_IRUGO);
+module_param_named(dev, synth_txprt.dev_name, charp, 0444);
module_param_named(start, synth_txprt.startup, short, 0444);
MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based).");
diff --git a/drivers/staging/speakup/speakupmap.h b/drivers/staging/speakup/speakupmap.h
index f1c0dd3b2c3a..c60d7339b89a 100644
--- a/drivers/staging/speakup/speakupmap.h
+++ b/drivers/staging/speakup/speakupmap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
119, 62, 6,
0, 16, 20, 17, 32, 48, 0,
2, 0, 78, 0, 0, 0, 0,
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index 4d7d8f2f66ea..513cebbd161c 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -51,10 +51,8 @@ static int spk_ttyio_ldisc_open(struct tty_struct *tty)
speakup_tty = tty;
ldisc_data = kmalloc(sizeof(struct spk_ldisc_data), GFP_KERNEL);
- if (!ldisc_data) {
- pr_err("speakup: Failed to allocate ldisc_data.\n");
+ if (!ldisc_data)
return -ENOMEM;
- }
sema_init(&ldisc_data->sem, 0);
ldisc_data->buf_free = true;
@@ -90,7 +88,8 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty,
return 0;
/* Make sure the consumer has read buf before we have seen
- * buf_free == true and overwrite buf */
+ * buf_free == true and overwrite buf
+ */
mb();
ldisc_data->buf = cp[0];
@@ -276,7 +275,8 @@ static unsigned char ttyio_in(int timeout)
rv = ldisc_data->buf;
/* Make sure we have read buf before we set buf_free to let
- * the producer overwrite it */
+ * the producer overwrite it
+ */
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index 22f657d45e46..c50de6035a9a 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SPEAKUP_TYPES_H
#define SPEAKUP_TYPES_H
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index a1ca68c76579..aac29c816d09 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -153,12 +153,12 @@ int spk_synth_is_alive_restart(struct spk_synth *synth)
}
EXPORT_SYMBOL_GPL(spk_synth_is_alive_restart);
-static void thread_wake_up(u_long data)
+static void thread_wake_up(struct timer_list *unused)
{
wake_up_interruptible_all(&speakup_event);
}
-static DEFINE_TIMER(thread_timer, thread_wake_up, 0, 0);
+static DEFINE_TIMER(thread_timer, thread_wake_up);
void synth_start(void)
{
diff --git a/drivers/staging/typec/Kconfig b/drivers/staging/typec/Kconfig
index 37a0781b0d0c..5359f556d203 100644
--- a/drivers/staging/typec/Kconfig
+++ b/drivers/staging/typec/Kconfig
@@ -1,13 +1,5 @@
menu "USB Power Delivery and Type-C drivers"
-config TYPEC_TCPM
- tristate "USB Type-C Port Controller Manager"
- depends on USB
- select TYPEC
- help
- The Type-C Port Controller Manager provides a USB PD and USB Type-C
- state machine for use with Type-C Port Controllers.
-
if TYPEC_TCPM
config TYPEC_TCPCI
@@ -17,8 +9,6 @@ config TYPEC_TCPCI
help
Type-C Port Controller driver for TCPCI-compliant controller.
-source "drivers/staging/typec/fusb302/Kconfig"
-
endif
endmenu
diff --git a/drivers/staging/typec/Makefile b/drivers/staging/typec/Makefile
index 30a7e29cbc9e..53d649abcb53 100644
--- a/drivers/staging/typec/Makefile
+++ b/drivers/staging/typec/Makefile
@@ -1,3 +1 @@
-obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
-obj-y += fusb302/
diff --git a/drivers/staging/typec/TODO b/drivers/staging/typec/TODO
index bc1f97a2d1bf..53fe2f726c88 100644
--- a/drivers/staging/typec/TODO
+++ b/drivers/staging/typec/TODO
@@ -1,13 +1,3 @@
-tcpm:
-- Add documentation (at the very least for the API to low level drivers)
-- Split PD code into separate file
-- Check if it makes sense to use tracepoints instead of debugfs for debug logs
-- Implement Alternate Mode handling
-- Address "#if 0" code if not addressed with the above
-- Validate all comments marked with "XXX"; either address or remove comments
-- Add support for USB PD 3.0. While not mandatory, at least fast role swap
- as well as authentication support would be very desirable.
-
tcpci:
- Test with real hardware
diff --git a/drivers/staging/typec/fusb302/TODO b/drivers/staging/typec/fusb302/TODO
deleted file mode 100644
index 19b466eb585d..000000000000
--- a/drivers/staging/typec/fusb302/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-fusb302:
-- Find a better logging scheme, at least not having the same debugging/logging
- code replicated here and in tcpm
-- Find a non-hacky way to coordinate between PM and I2C access
-- Documentation? The FUSB302 datasheet provides information on the chip to help
- understand the code. But it may still be helpful to have a documentation.
-- We may want to replace the "fcs,max-snk-microvolt", "fcs,max-snk-microamp",
- "fcs,max-snk-microwatt" and "fcs,operating-snk-microwatt" device(tree)
- properties with properties which are part of a generic type-c controller
- devicetree binding.
diff --git a/drivers/staging/typec/pd.h b/drivers/staging/typec/pd.h
deleted file mode 100644
index 30b32ad72acd..000000000000
--- a/drivers/staging/typec/pd.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright 2015-2017 Google, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_USB_PD_H
-#define __LINUX_USB_PD_H
-
-#include <linux/types.h>
-#include <linux/usb/typec.h>
-
-/* USB PD Messages */
-enum pd_ctrl_msg_type {
- /* 0 Reserved */
- PD_CTRL_GOOD_CRC = 1,
- PD_CTRL_GOTO_MIN = 2,
- PD_CTRL_ACCEPT = 3,
- PD_CTRL_REJECT = 4,
- PD_CTRL_PING = 5,
- PD_CTRL_PS_RDY = 6,
- PD_CTRL_GET_SOURCE_CAP = 7,
- PD_CTRL_GET_SINK_CAP = 8,
- PD_CTRL_DR_SWAP = 9,
- PD_CTRL_PR_SWAP = 10,
- PD_CTRL_VCONN_SWAP = 11,
- PD_CTRL_WAIT = 12,
- PD_CTRL_SOFT_RESET = 13,
- /* 14-15 Reserved */
-};
-
-enum pd_data_msg_type {
- /* 0 Reserved */
- PD_DATA_SOURCE_CAP = 1,
- PD_DATA_REQUEST = 2,
- PD_DATA_BIST = 3,
- PD_DATA_SINK_CAP = 4,
- /* 5-14 Reserved */
- PD_DATA_VENDOR_DEF = 15,
-};
-
-#define PD_REV10 0x0
-#define PD_REV20 0x1
-
-#define PD_HEADER_CNT_SHIFT 12
-#define PD_HEADER_CNT_MASK 0x7
-#define PD_HEADER_ID_SHIFT 9
-#define PD_HEADER_ID_MASK 0x7
-#define PD_HEADER_PWR_ROLE BIT(8)
-#define PD_HEADER_REV_SHIFT 6
-#define PD_HEADER_REV_MASK 0x3
-#define PD_HEADER_DATA_ROLE BIT(5)
-#define PD_HEADER_TYPE_SHIFT 0
-#define PD_HEADER_TYPE_MASK 0xf
-
-#define PD_HEADER(type, pwr, data, id, cnt) \
- ((((type) & PD_HEADER_TYPE_MASK) << PD_HEADER_TYPE_SHIFT) | \
- ((pwr) == TYPEC_SOURCE ? PD_HEADER_PWR_ROLE : 0) | \
- ((data) == TYPEC_HOST ? PD_HEADER_DATA_ROLE : 0) | \
- (PD_REV20 << PD_HEADER_REV_SHIFT) | \
- (((id) & PD_HEADER_ID_MASK) << PD_HEADER_ID_SHIFT) | \
- (((cnt) & PD_HEADER_CNT_MASK) << PD_HEADER_CNT_SHIFT))
-
-#define PD_HEADER_LE(type, pwr, data, id, cnt) \
- cpu_to_le16(PD_HEADER((type), (pwr), (data), (id), (cnt)))
-
-static inline unsigned int pd_header_cnt(u16 header)
-{
- return (header >> PD_HEADER_CNT_SHIFT) & PD_HEADER_CNT_MASK;
-}
-
-static inline unsigned int pd_header_cnt_le(__le16 header)
-{
- return pd_header_cnt(le16_to_cpu(header));
-}
-
-static inline unsigned int pd_header_type(u16 header)
-{
- return (header >> PD_HEADER_TYPE_SHIFT) & PD_HEADER_TYPE_MASK;
-}
-
-static inline unsigned int pd_header_type_le(__le16 header)
-{
- return pd_header_type(le16_to_cpu(header));
-}
-
-static inline unsigned int pd_header_msgid(u16 header)
-{
- return (header >> PD_HEADER_ID_SHIFT) & PD_HEADER_ID_MASK;
-}
-
-static inline unsigned int pd_header_msgid_le(__le16 header)
-{
- return pd_header_msgid(le16_to_cpu(header));
-}
-
-#define PD_MAX_PAYLOAD 7
-
-struct pd_message {
- __le16 header;
- __le32 payload[PD_MAX_PAYLOAD];
-} __packed;
-
-/* PDO: Power Data Object */
-#define PDO_MAX_OBJECTS 7
-
-enum pd_pdo_type {
- PDO_TYPE_FIXED = 0,
- PDO_TYPE_BATT = 1,
- PDO_TYPE_VAR = 2,
-};
-
-#define PDO_TYPE_SHIFT 30
-#define PDO_TYPE_MASK 0x3
-
-#define PDO_TYPE(t) ((t) << PDO_TYPE_SHIFT)
-
-#define PDO_VOLT_MASK 0x3ff
-#define PDO_CURR_MASK 0x3ff
-#define PDO_PWR_MASK 0x3ff
-
-#define PDO_FIXED_DUAL_ROLE BIT(29) /* Power role swap supported */
-#define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (Source) */
-#define PDO_FIXED_HIGHER_CAP BIT(28) /* Requires more than vSafe5V (Sink) */
-#define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */
-#define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */
-#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */
-#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
-#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
-
-#define PDO_FIXED_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_FIXED_VOLT_SHIFT)
-#define PDO_FIXED_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_FIXED_CURR_SHIFT)
-
-#define PDO_FIXED(mv, ma, flags) \
- (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \
- PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma))
-
-#define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */
-#define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */
-#define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */
-
-#define PDO_BATT_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MIN_VOLT_SHIFT)
-#define PDO_BATT_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MAX_VOLT_SHIFT)
-#define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT)
-
-#define PDO_BATT(min_mv, max_mv, max_mw) \
- (PDO_TYPE(PDO_TYPE_BATT) | PDO_BATT_MIN_VOLT(min_mv) | \
- PDO_BATT_MAX_VOLT(max_mv) | PDO_BATT_MAX_POWER(max_mw))
-
-#define PDO_VAR_MAX_VOLT_SHIFT 20 /* 50mV units */
-#define PDO_VAR_MIN_VOLT_SHIFT 10 /* 50mV units */
-#define PDO_VAR_MAX_CURR_SHIFT 0 /* 10mA units */
-
-#define PDO_VAR_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MIN_VOLT_SHIFT)
-#define PDO_VAR_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MAX_VOLT_SHIFT)
-#define PDO_VAR_MAX_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_VAR_MAX_CURR_SHIFT)
-
-#define PDO_VAR(min_mv, max_mv, max_ma) \
- (PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \
- PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma))
-
-static inline enum pd_pdo_type pdo_type(u32 pdo)
-{
- return (pdo >> PDO_TYPE_SHIFT) & PDO_TYPE_MASK;
-}
-
-static inline unsigned int pdo_fixed_voltage(u32 pdo)
-{
- return ((pdo >> PDO_FIXED_VOLT_SHIFT) & PDO_VOLT_MASK) * 50;
-}
-
-static inline unsigned int pdo_min_voltage(u32 pdo)
-{
- return ((pdo >> PDO_VAR_MIN_VOLT_SHIFT) & PDO_VOLT_MASK) * 50;
-}
-
-static inline unsigned int pdo_max_voltage(u32 pdo)
-{
- return ((pdo >> PDO_VAR_MAX_VOLT_SHIFT) & PDO_VOLT_MASK) * 50;
-}
-
-static inline unsigned int pdo_max_current(u32 pdo)
-{
- return ((pdo >> PDO_VAR_MAX_CURR_SHIFT) & PDO_CURR_MASK) * 10;
-}
-
-static inline unsigned int pdo_max_power(u32 pdo)
-{
- return ((pdo >> PDO_BATT_MAX_PWR_SHIFT) & PDO_PWR_MASK) * 250;
-}
-
-/* RDO: Request Data Object */
-#define RDO_OBJ_POS_SHIFT 28
-#define RDO_OBJ_POS_MASK 0x7
-#define RDO_GIVE_BACK BIT(27) /* Supports reduced operating current */
-#define RDO_CAP_MISMATCH BIT(26) /* Not satisfied by source caps */
-#define RDO_USB_COMM BIT(25) /* USB communications capable */
-#define RDO_NO_SUSPEND BIT(24) /* USB Suspend not supported */
-
-#define RDO_PWR_MASK 0x3ff
-#define RDO_CURR_MASK 0x3ff
-
-#define RDO_FIXED_OP_CURR_SHIFT 10
-#define RDO_FIXED_MAX_CURR_SHIFT 0
-
-#define RDO_OBJ(idx) (((idx) & RDO_OBJ_POS_MASK) << RDO_OBJ_POS_SHIFT)
-
-#define PDO_FIXED_OP_CURR(ma) ((((ma) / 10) & RDO_CURR_MASK) << RDO_FIXED_OP_CURR_SHIFT)
-#define PDO_FIXED_MAX_CURR(ma) ((((ma) / 10) & RDO_CURR_MASK) << RDO_FIXED_MAX_CURR_SHIFT)
-
-#define RDO_FIXED(idx, op_ma, max_ma, flags) \
- (RDO_OBJ(idx) | (flags) | \
- PDO_FIXED_OP_CURR(op_ma) | PDO_FIXED_MAX_CURR(max_ma))
-
-#define RDO_BATT_OP_PWR_SHIFT 10 /* 250mW units */
-#define RDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */
-
-#define RDO_BATT_OP_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_OP_PWR_SHIFT)
-#define RDO_BATT_MAX_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_MAX_PWR_SHIFT)
-
-#define RDO_BATT(idx, op_mw, max_mw, flags) \
- (RDO_OBJ(idx) | (flags) | \
- RDO_BATT_OP_PWR(op_mw) | RDO_BATT_MAX_PWR(max_mw))
-
-static inline unsigned int rdo_index(u32 rdo)
-{
- return (rdo >> RDO_OBJ_POS_SHIFT) & RDO_OBJ_POS_MASK;
-}
-
-static inline unsigned int rdo_op_current(u32 rdo)
-{
- return ((rdo >> RDO_FIXED_OP_CURR_SHIFT) & RDO_CURR_MASK) * 10;
-}
-
-static inline unsigned int rdo_max_current(u32 rdo)
-{
- return ((rdo >> RDO_FIXED_MAX_CURR_SHIFT) &
- RDO_CURR_MASK) * 10;
-}
-
-static inline unsigned int rdo_op_power(u32 rdo)
-{
- return ((rdo >> RDO_BATT_OP_PWR_SHIFT) & RDO_PWR_MASK) * 250;
-}
-
-static inline unsigned int rdo_max_power(u32 rdo)
-{
- return ((rdo >> RDO_BATT_MAX_PWR_SHIFT) & RDO_PWR_MASK) * 250;
-}
-
-/* USB PD timers and counters */
-#define PD_T_NO_RESPONSE 5000 /* 4.5 - 5.5 seconds */
-#define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */
-#define PD_T_SEND_SOURCE_CAP 150 /* 100 - 200 ms */
-#define PD_T_SENDER_RESPONSE 60 /* 24 - 30 ms, relaxed */
-#define PD_T_SOURCE_ACTIVITY 45
-#define PD_T_SINK_ACTIVITY 135
-#define PD_T_SINK_WAIT_CAP 240
-#define PD_T_PS_TRANSITION 500
-#define PD_T_SRC_TRANSITION 35
-#define PD_T_DRP_SNK 40
-#define PD_T_DRP_SRC 30
-#define PD_T_PS_SOURCE_OFF 920
-#define PD_T_PS_SOURCE_ON 480
-#define PD_T_PS_HARD_RESET 30
-#define PD_T_SRC_RECOVER 760
-#define PD_T_SRC_RECOVER_MAX 1000
-#define PD_T_SRC_TURN_ON 275
-#define PD_T_SAFE_0V 650
-#define PD_T_VCONN_SOURCE_ON 100
-#define PD_T_SINK_REQUEST 100 /* 100 ms minimum */
-#define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */
-#define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */
-#define PD_T_NEWSRC 250 /* Maximum of 275ms */
-
-#define PD_T_DRP_TRY 100 /* 75 - 150 ms */
-#define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */
-
-#define PD_T_CC_DEBOUNCE 200 /* 100 - 200 ms */
-#define PD_T_PD_DEBOUNCE 20 /* 10 - 20 ms */
-
-#define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
-#define PD_N_HARD_RESET_COUNT 2
-
-#endif /* __LINUX_USB_PD_H */
diff --git a/drivers/staging/typec/pd_bdo.h b/drivers/staging/typec/pd_bdo.h
deleted file mode 100644
index 90b94d9fea5d..000000000000
--- a/drivers/staging/typec/pd_bdo.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright 2015-2017 Google, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_USB_PD_BDO_H
-#define __LINUX_USB_PD_BDO_H
-
-/* BDO : BIST Data Object */
-#define BDO_MODE_RECV (0 << 28)
-#define BDO_MODE_TRANSMIT (1 << 28)
-#define BDO_MODE_COUNTERS (2 << 28)
-#define BDO_MODE_CARRIER0 (3 << 28)
-#define BDO_MODE_CARRIER1 (4 << 28)
-#define BDO_MODE_CARRIER2 (5 << 28)
-#define BDO_MODE_CARRIER3 (6 << 28)
-#define BDO_MODE_EYE (7 << 28)
-#define BDO_MODE_TESTDATA (8 << 28)
-
-#define BDO_MODE_MASK(mode) ((mode) & 0xf0000000)
-
-#endif
diff --git a/drivers/staging/typec/pd_vdo.h b/drivers/staging/typec/pd_vdo.h
deleted file mode 100644
index d92259f8de0a..000000000000
--- a/drivers/staging/typec/pd_vdo.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Copyright 2015-2017 Google, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_USB_PD_VDO_H
-#define __LINUX_USB_PD_VDO_H
-
-#include "pd.h"
-
-/*
- * VDO : Vendor Defined Message Object
- * VDM object is minimum of VDM header + 6 additional data objects.
- */
-
-#define VDO_MAX_OBJECTS 6
-#define VDO_MAX_SIZE (VDO_MAX_OBJECTS + 1)
-
-/*
- * VDM header
- * ----------
- * <31:16> :: SVID
- * <15> :: VDM type ( 1b == structured, 0b == unstructured )
- * <14:13> :: Structured VDM version (can only be 00 == 1.0 currently)
- * <12:11> :: reserved
- * <10:8> :: object position (1-7 valid ... used for enter/exit mode only)
- * <7:6> :: command type (SVDM only?)
- * <5> :: reserved (SVDM), command type (UVDM)
- * <4:0> :: command
- */
-#define VDO(vid, type, custom) \
- (((vid) << 16) | \
- ((type) << 15) | \
- ((custom) & 0x7FFF))
-
-#define VDO_SVDM_TYPE (1 << 15)
-#define VDO_SVDM_VERS(x) ((x) << 13)
-#define VDO_OPOS(x) ((x) << 8)
-#define VDO_CMDT(x) ((x) << 6)
-#define VDO_OPOS_MASK VDO_OPOS(0x7)
-#define VDO_CMDT_MASK VDO_CMDT(0x3)
-
-#define CMDT_INIT 0
-#define CMDT_RSP_ACK 1
-#define CMDT_RSP_NAK 2
-#define CMDT_RSP_BUSY 3
-
-/* reserved for SVDM ... for Google UVDM */
-#define VDO_SRC_INITIATOR (0 << 5)
-#define VDO_SRC_RESPONDER (1 << 5)
-
-#define CMD_DISCOVER_IDENT 1
-#define CMD_DISCOVER_SVID 2
-#define CMD_DISCOVER_MODES 3
-#define CMD_ENTER_MODE 4
-#define CMD_EXIT_MODE 5
-#define CMD_ATTENTION 6
-
-#define VDO_CMD_VENDOR(x) (((10 + (x)) & 0x1f))
-
-/* ChromeOS specific commands */
-#define VDO_CMD_VERSION VDO_CMD_VENDOR(0)
-#define VDO_CMD_SEND_INFO VDO_CMD_VENDOR(1)
-#define VDO_CMD_READ_INFO VDO_CMD_VENDOR(2)
-#define VDO_CMD_REBOOT VDO_CMD_VENDOR(5)
-#define VDO_CMD_FLASH_ERASE VDO_CMD_VENDOR(6)
-#define VDO_CMD_FLASH_WRITE VDO_CMD_VENDOR(7)
-#define VDO_CMD_ERASE_SIG VDO_CMD_VENDOR(8)
-#define VDO_CMD_PING_ENABLE VDO_CMD_VENDOR(10)
-#define VDO_CMD_CURRENT VDO_CMD_VENDOR(11)
-#define VDO_CMD_FLIP VDO_CMD_VENDOR(12)
-#define VDO_CMD_GET_LOG VDO_CMD_VENDOR(13)
-#define VDO_CMD_CCD_EN VDO_CMD_VENDOR(14)
-
-#define PD_VDO_VID(vdo) ((vdo) >> 16)
-#define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1)
-#define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7)
-#define PD_VDO_CMD(vdo) ((vdo) & 0x1f)
-#define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3)
-
-/*
- * SVDM Identity request -> response
- *
- * Request is simply properly formatted SVDM header
- *
- * Response is 4 data objects:
- * [0] :: SVDM header
- * [1] :: Identitiy header
- * [2] :: Cert Stat VDO
- * [3] :: (Product | Cable) VDO
- * [4] :: AMA VDO
- *
- */
-#define VDO_INDEX_HDR 0
-#define VDO_INDEX_IDH 1
-#define VDO_INDEX_CSTAT 2
-#define VDO_INDEX_CABLE 3
-#define VDO_INDEX_PRODUCT 3
-#define VDO_INDEX_AMA 4
-
-/*
- * SVDM Identity Header
- * --------------------
- * <31> :: data capable as a USB host
- * <30> :: data capable as a USB device
- * <29:27> :: product type
- * <26> :: modal operation supported (1b == yes)
- * <25:16> :: Reserved, Shall be set to zero
- * <15:0> :: USB-IF assigned VID for this cable vendor
- */
-#define IDH_PTYPE_UNDEF 0
-#define IDH_PTYPE_HUB 1
-#define IDH_PTYPE_PERIPH 2
-#define IDH_PTYPE_PCABLE 3
-#define IDH_PTYPE_ACABLE 4
-#define IDH_PTYPE_AMA 5
-
-#define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \
- ((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \
- | (is_modal) << 26 | ((vid) & 0xffff))
-
-#define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7)
-#define PD_IDH_VID(vdo) ((vdo) & 0xffff)
-#define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26))
-
-/*
- * Cert Stat VDO
- * -------------
- * <31:0> : USB-IF assigned XID for this cable
- */
-#define PD_CSTAT_XID(vdo) (vdo)
-
-/*
- * Product VDO
- * -----------
- * <31:16> : USB Product ID
- * <15:0> : USB bcdDevice
- */
-#define VDO_PRODUCT(pid, bcd) (((pid) & 0xffff) << 16 | ((bcd) & 0xffff))
-#define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff)
-
-/*
- * Cable VDO
- * ---------
- * <31:28> :: Cable HW version
- * <27:24> :: Cable FW version
- * <23:20> :: Reserved, Shall be set to zero
- * <19:18> :: type-C to Type-A/B/C (00b == A, 01 == B, 10 == C)
- * <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle)
- * <16:13> :: cable latency (0001 == <10ns(~1m length))
- * <12:11> :: cable termination type (11b == both ends active VCONN req)
- * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
- * <9> :: SSTX2 Directionality support
- * <8> :: SSRX1 Directionality support
- * <7> :: SSRX2 Directionality support
- * <6:5> :: Vbus current handling capability
- * <4> :: Vbus through cable (0b == no, 1b == yes)
- * <3> :: SOP" controller present? (0b == no, 1b == yes)
- * <2:0> :: USB SS Signaling support
- */
-#define CABLE_ATYPE 0
-#define CABLE_BTYPE 1
-#define CABLE_CTYPE 2
-#define CABLE_PLUG 0
-#define CABLE_RECEPTACLE 1
-#define CABLE_CURR_1A5 0
-#define CABLE_CURR_3A 1
-#define CABLE_CURR_5A 2
-#define CABLE_USBSS_U2_ONLY 0
-#define CABLE_USBSS_U31_GEN1 1
-#define CABLE_USBSS_U31_GEN2 2
-#define VDO_CABLE(hw, fw, cbl, gdr, lat, term, tx1d, tx2d, rx1d, rx2d, cur,\
- vps, sopp, usbss) \
- (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
- | (gdr) << 17 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 \
- | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \
- | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \
- | ((usbss) & 0x7))
-
-/*
- * AMA VDO
- * ---------
- * <31:28> :: Cable HW version
- * <27:24> :: Cable FW version
- * <23:12> :: Reserved, Shall be set to zero
- * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
- * <10> :: SSTX2 Directionality support
- * <9> :: SSRX1 Directionality support
- * <8> :: SSRX2 Directionality support
- * <7:5> :: Vconn power
- * <4> :: Vconn power required
- * <3> :: Vbus power required
- * <2:0> :: USB SS Signaling support
- */
-#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
- (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \
- | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \
- | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \
- | ((usbss) & 0x7))
-
-#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
-#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
-
-#define AMA_VCONN_PWR_1W 0
-#define AMA_VCONN_PWR_1W5 1
-#define AMA_VCONN_PWR_2W 2
-#define AMA_VCONN_PWR_3W 3
-#define AMA_VCONN_PWR_4W 4
-#define AMA_VCONN_PWR_5W 5
-#define AMA_VCONN_PWR_6W 6
-#define AMA_USBSS_U2_ONLY 0
-#define AMA_USBSS_U31_GEN1 1
-#define AMA_USBSS_U31_GEN2 2
-#define AMA_USBSS_BBONLY 3
-
-/*
- * SVDM Discover SVIDs request -> response
- *
- * Request is properly formatted VDM Header with discover SVIDs command.
- * Response is a set of SVIDs of all all supported SVIDs with all zero's to
- * mark the end of SVIDs. If more than 12 SVIDs are supported command SHOULD be
- * repeated.
- */
-#define VDO_SVID(svid0, svid1) (((svid0) & 0xffff) << 16 | ((svid1) & 0xffff))
-#define PD_VDO_SVID_SVID0(vdo) ((vdo) >> 16)
-#define PD_VDO_SVID_SVID1(vdo) ((vdo) & 0xffff)
-
-/* USB-IF SIDs */
-#define USB_SID_PD 0xff00 /* power delivery */
-#define USB_SID_DISPLAYPORT 0xff01
-#define USB_SID_MHL 0xff02 /* Mobile High-Definition Link */
-
-/* VDM command timeouts (in ms) */
-
-#define PD_T_VDM_UNSTRUCTURED 500
-#define PD_T_VDM_BUSY 100
-#define PD_T_VDM_WAIT_MODE_E 100
-#define PD_T_VDM_SNDR_RSP 30
-#define PD_T_VDM_E_MODE 25
-#define PD_T_VDM_RCVR_RSP 15
-
-#endif /* __LINUX_USB_PD_VDO_H */
diff --git a/drivers/staging/typec/tcpci.c b/drivers/staging/typec/tcpci.c
index df72d8b01e73..b6abaf79ef0b 100644
--- a/drivers/staging/typec/tcpci.c
+++ b/drivers/staging/typec/tcpci.c
@@ -20,11 +20,11 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
-#include "pd.h"
#include "tcpci.h"
-#include "tcpm.h"
#define PD_RETRY_COUNT 3
@@ -139,6 +139,7 @@ static enum typec_cc_status tcpci_to_typec_cc(unsigned int cc, bool sink)
case 0x3:
if (sink)
return TYPEC_CC_RP_3_0;
+ /* fall through */
case 0x0:
default:
return TYPEC_CC_OPEN;
diff --git a/drivers/staging/typec/tcpm.h b/drivers/staging/typec/tcpm.h
deleted file mode 100644
index 7e9a6b7b5cd6..000000000000
--- a/drivers/staging/typec/tcpm.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2015-2017 Google, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_USB_TCPM_H
-#define __LINUX_USB_TCPM_H
-
-#include <linux/bitops.h>
-#include <linux/usb/typec.h>
-#include "pd.h"
-
-enum typec_cc_status {
- TYPEC_CC_OPEN,
- TYPEC_CC_RA,
- TYPEC_CC_RD,
- TYPEC_CC_RP_DEF,
- TYPEC_CC_RP_1_5,
- TYPEC_CC_RP_3_0,
-};
-
-enum typec_cc_polarity {
- TYPEC_POLARITY_CC1,
- TYPEC_POLARITY_CC2,
-};
-
-/* Time to wait for TCPC to complete transmit */
-#define PD_T_TCPC_TX_TIMEOUT 100 /* in ms */
-#define PD_ROLE_SWAP_TIMEOUT (MSEC_PER_SEC * 10)
-
-enum tcpm_transmit_status {
- TCPC_TX_SUCCESS = 0,
- TCPC_TX_DISCARDED = 1,
- TCPC_TX_FAILED = 2,
-};
-
-enum tcpm_transmit_type {
- TCPC_TX_SOP = 0,
- TCPC_TX_SOP_PRIME = 1,
- TCPC_TX_SOP_PRIME_PRIME = 2,
- TCPC_TX_SOP_DEBUG_PRIME = 3,
- TCPC_TX_SOP_DEBUG_PRIME_PRIME = 4,
- TCPC_TX_HARD_RESET = 5,
- TCPC_TX_CABLE_RESET = 6,
- TCPC_TX_BIST_MODE_2 = 7
-};
-
-struct tcpc_config {
- const u32 *src_pdo;
- unsigned int nr_src_pdo;
-
- const u32 *snk_pdo;
- unsigned int nr_snk_pdo;
-
- const u32 *snk_vdo;
- unsigned int nr_snk_vdo;
-
- unsigned int max_snk_mv;
- unsigned int max_snk_ma;
- unsigned int max_snk_mw;
- unsigned int operating_snk_mw;
-
- enum typec_port_type type;
- enum typec_role default_role;
- bool try_role_hw; /* try.{src,snk} implemented in hardware */
-
- const struct typec_altmode_desc *alt_modes;
-};
-
-enum tcpc_usb_switch {
- TCPC_USB_SWITCH_CONNECT,
- TCPC_USB_SWITCH_DISCONNECT,
- TCPC_USB_SWITCH_RESTORE, /* TODO FIXME */
-};
-
-/* Mux state attributes */
-#define TCPC_MUX_USB_ENABLED BIT(0) /* USB enabled */
-#define TCPC_MUX_DP_ENABLED BIT(1) /* DP enabled */
-#define TCPC_MUX_POLARITY_INVERTED BIT(2) /* Polarity inverted */
-
-/* Mux modes, decoded to attributes */
-enum tcpc_mux_mode {
- TYPEC_MUX_NONE = 0, /* Open switch */
- TYPEC_MUX_USB = TCPC_MUX_USB_ENABLED, /* USB only */
- TYPEC_MUX_DP = TCPC_MUX_DP_ENABLED, /* DP only */
- TYPEC_MUX_DOCK = TCPC_MUX_USB_ENABLED | /* Both USB and DP */
- TCPC_MUX_DP_ENABLED,
-};
-
-struct tcpc_mux_dev {
- int (*set)(struct tcpc_mux_dev *dev, enum tcpc_mux_mode mux_mode,
- enum tcpc_usb_switch usb_config,
- enum typec_cc_polarity polarity);
- bool dfp_only;
- void *priv_data;
-};
-
-struct tcpc_dev {
- const struct tcpc_config *config;
-
- int (*init)(struct tcpc_dev *dev);
- int (*get_vbus)(struct tcpc_dev *dev);
- /*
- * This optional callback gets called by the tcpm core when configured
- * as a snk and cc=Rp-def. This allows the tcpm to provide a fallback
- * current-limit detection method for the cc=Rp-def case. E.g. some
- * tcpcs may include BC1.2 charger detection and use that in this case.
- */
- int (*get_current_limit)(struct tcpc_dev *dev);
- int (*set_cc)(struct tcpc_dev *dev, enum typec_cc_status cc);
- int (*get_cc)(struct tcpc_dev *dev, enum typec_cc_status *cc1,
- enum typec_cc_status *cc2);
- int (*set_polarity)(struct tcpc_dev *dev,
- enum typec_cc_polarity polarity);
- int (*set_vconn)(struct tcpc_dev *dev, bool on);
- int (*set_vbus)(struct tcpc_dev *dev, bool on, bool charge);
- int (*set_current_limit)(struct tcpc_dev *dev, u32 max_ma, u32 mv);
- int (*set_pd_rx)(struct tcpc_dev *dev, bool on);
- int (*set_roles)(struct tcpc_dev *dev, bool attached,
- enum typec_role role, enum typec_data_role data);
- int (*start_drp_toggling)(struct tcpc_dev *dev,
- enum typec_cc_status cc);
- int (*try_role)(struct tcpc_dev *dev, int role);
- int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
- const struct pd_message *msg);
- struct tcpc_mux_dev *mux;
-};
-
-struct tcpm_port;
-
-struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc);
-void tcpm_unregister_port(struct tcpm_port *port);
-
-void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo);
-void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
- unsigned int nr_pdo,
- unsigned int max_snk_mv,
- unsigned int max_snk_ma,
- unsigned int max_snk_mw,
- unsigned int operating_snk_mw);
-
-void tcpm_vbus_change(struct tcpm_port *port);
-void tcpm_cc_change(struct tcpm_port *port);
-void tcpm_pd_receive(struct tcpm_port *port,
- const struct pd_message *msg);
-void tcpm_pd_transmit_complete(struct tcpm_port *port,
- enum tcpm_transmit_status status);
-void tcpm_pd_hard_reset(struct tcpm_port *port);
-void tcpm_tcpc_reset(struct tcpm_port *port);
-
-#endif /* __LINUX_USB_TCPM_H */
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
index 1f0425bf3583..aaddc619c329 100644
--- a/drivers/staging/unisys/MAINTAINERS
+++ b/drivers/staging/unisys/MAINTAINERS
@@ -1,5 +1,5 @@
Unisys s-Par drivers
M: David Kershner <sparmaintainer@unisys.com>
S: Maintained
-F: Documentation/s-Par/overview.txt
+F: drivers/staging/unisys/Documentation/overview.txt
F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index a70760f48566..5cd407ca2251 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -44,7 +44,7 @@
#include <linux/uuid.h>
#include <linux/skbuff.h>
-#include "channel.h"
+#include "visorchannel.h"
/*
* Must increment these whenever you insert or delete fields within this channel
@@ -348,10 +348,9 @@ struct sense_data {
* the start of the NETWORK LAYER HEADER.
*
* NOTE:
- * The full packet is described in frags but the ethernet header is
- * separately kept in ethhdr so that uisnic doesn't have "MAP" the
- * guest memory to get to the header. uisnic needs ethhdr to
- * determine how to route the packet.
+ * The full packet is described in frags but the ethernet header is separately
+ * kept in ethhdr so that uisnic doesn't have "MAP" the guest memory to get to
+ * the header. uisnic needs ethhdr to determine how to route the packet.
*/
struct net_pkt_xmt {
int len;
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index e4ee38c3dbe4..1a0986ba3d24 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -28,87 +28,20 @@
#define __VISORBUS_H__
#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "channel.h"
-
-struct visor_device;
-extern struct bus_type visorbus_type;
-
-typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
- int status);
+#include "visorchannel.h"
struct visorchipset_state {
u32 created:1;
u32 attached:1;
u32 configured:1;
u32 running:1;
- /* Add new fields above. */
- /* Remaining bits in this 32-bit word are unused. */
-};
-
-/*
- * This struct describes a specific Supervisor channel, by providing its
- * GUID, name, and sizes.
- */
-struct visor_channeltype_descriptor {
- const guid_t guid;
- const char *name;
+ /* Remaining bits in this 32-bit word are reserved. */
};
/**
- * struct visor_driver - Information provided by each visor driver when it
- * registers with the visorbus driver.
- * @name: Name of the visor driver.
- * @owner: The module owner.
- * @channel_types: Types of channels handled by this driver, ending with
- * a zero GUID. Our specialized BUS.match() method knows
- * about this list, and uses it to determine whether this
- * driver will in fact handle a new device that it has
- * detected.
- * @probe: Called when a new device comes online, by our probe()
- * function specified by driver.probe() (triggered
- * ultimately by some call to driver_register(),
- * bus_add_driver(), or driver_attach()).
- * @remove: Called when a new device is removed, by our remove()
- * function specified by driver.remove() (triggered
- * ultimately by some call to device_release_driver()).
- * @channel_interrupt: Called periodically, whenever there is a possiblity
- * that "something interesting" may have happened to the
- * channel.
- * @pause: Called to initiate a change of the device's state. If
- * the return valu`e is < 0, there was an error and the
- * state transition will NOT occur. If the return value
- * is >= 0, then the state transition was INITIATED
- * successfully, and complete_func() will be called (or
- * was just called) with the final status when either the
- * state transition fails or completes successfully.
- * @resume: Behaves similar to pause.
- * @driver: Private reference to the device driver. For use by bus
- * driver only.
- */
-struct visor_driver {
- const char *name;
- struct module *owner;
- struct visor_channeltype_descriptor *channel_types;
- int (*probe)(struct visor_device *dev);
- void (*remove)(struct visor_device *dev);
- void (*channel_interrupt)(struct visor_device *dev);
- int (*pause)(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
- int (*resume)(struct visor_device *dev,
- visorbus_state_complete_func complete_func);
-
- /* These fields are for private use by the bus driver only. */
- struct device_driver driver;
-};
-
-#define to_visor_driver(x) (container_of(x, struct visor_driver, driver))
-
-/**
* struct visor_device - A device type for things "plugged" into the visorbus
- * bus
+ * bus
* @visorchannel: Points to the channel that the device is
* associated with.
* @channel_type_guid: Identifies the channel type to the bus driver.
@@ -139,7 +72,6 @@ struct visor_driver {
* same across all visor_devices in the current
* guest. Private use by bus driver only.
*/
-
struct visor_device {
struct visorchannel *visorchannel;
guid_t channel_type_guid;
@@ -161,11 +93,74 @@ struct visor_device {
void *vbus_hdr_info;
guid_t partition_guid;
struct dentry *debugfs_dir;
- struct dentry *debugfs_client_bus_info;
+ struct dentry *debugfs_bus_info;
};
#define to_visor_device(x) container_of(x, struct visor_device, device)
+typedef void (*visorbus_state_complete_func) (struct visor_device *dev,
+ int status);
+
+/*
+ * This struct describes a specific visor channel, by providing its GUID, name,
+ * and sizes.
+ */
+struct visor_channeltype_descriptor {
+ const guid_t guid;
+ const char *name;
+ u64 min_bytes;
+ u32 version;
+};
+
+/**
+ * struct visor_driver - Information provided by each visor driver when it
+ * registers with the visorbus driver
+ * @name: Name of the visor driver.
+ * @owner: The module owner.
+ * @channel_types: Types of channels handled by this driver, ending with
+ * a zero GUID. Our specialized BUS.match() method knows
+ * about this list, and uses it to determine whether this
+ * driver will in fact handle a new device that it has
+ * detected.
+ * @probe: Called when a new device comes online, by our probe()
+ * function specified by driver.probe() (triggered
+ * ultimately by some call to driver_register(),
+ * bus_add_driver(), or driver_attach()).
+ * @remove: Called when a new device is removed, by our remove()
+ * function specified by driver.remove() (triggered
+ * ultimately by some call to device_release_driver()).
+ * @channel_interrupt: Called periodically, whenever there is a possiblity
+ * that "something interesting" may have happened to the
+ * channel.
+ * @pause: Called to initiate a change of the device's state. If
+ * the return valu`e is < 0, there was an error and the
+ * state transition will NOT occur. If the return value
+ * is >= 0, then the state transition was INITIATED
+ * successfully, and complete_func() will be called (or
+ * was just called) with the final status when either the
+ * state transition fails or completes successfully.
+ * @resume: Behaves similar to pause.
+ * @driver: Private reference to the device driver. For use by bus
+ * driver only.
+ */
+struct visor_driver {
+ const char *name;
+ struct module *owner;
+ struct visor_channeltype_descriptor *channel_types;
+ int (*probe)(struct visor_device *dev);
+ void (*remove)(struct visor_device *dev);
+ void (*channel_interrupt)(struct visor_device *dev);
+ int (*pause)(struct visor_device *dev,
+ visorbus_state_complete_func complete_func);
+ int (*resume)(struct visor_device *dev,
+ visorbus_state_complete_func complete_func);
+
+ /* These fields are for private use by the bus driver only. */
+ struct device_driver driver;
+};
+
+#define to_visor_driver(x) (container_of(x, struct visor_driver, driver))
+
int visor_check_channel(struct channel_header *ch, struct device *dev,
const guid_t *expected_uuid, char *chname,
u64 expected_min_bytes, u32 expected_version,
@@ -182,26 +177,6 @@ int visorbus_write_channel(struct visor_device *dev,
int visorbus_enable_channel_interrupts(struct visor_device *dev);
void visorbus_disable_channel_interrupts(struct visor_device *dev);
-/*
- * Levels of severity for diagnostic events, in order from lowest severity to
- * highest (i.e. fatal errors are the most severe, and should always be logged,
- * but info events rarely need to be logged except during debugging). The
- * values DIAG_SEVERITY_ENUM_BEGIN and DIAG_SEVERITY_ENUM_END are not valid
- * severity values. They exist merely to dilineate the list, so that future
- * additions won't require changes to the driver (i.e. when checking for
- * out-of-range severities in SetSeverity). The values DIAG_SEVERITY_OVERRIDE
- * and DIAG_SEVERITY_SHUTOFF are not valid severity values for logging events
- * but they are valid for controlling the amount of event data. Changes made
- * to the enum, need to be reflected in s-Par.
- */
-enum diag_severity {
- DIAG_SEVERITY_VERBOSE = 0,
- DIAG_SEVERITY_INFO = 1,
- DIAG_SEVERITY_WARNING = 2,
- DIAG_SEVERITY_ERR = 3,
- DIAG_SEVERITY_PRINT = 4,
-};
-
int visorchannel_signalremove(struct visorchannel *channel, u32 queue,
void *msg);
int visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/visorchannel.h
index 2babe93631f3..33945749c8b6 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/visorchannel.h
@@ -14,17 +14,13 @@
* details.
*/
-#ifndef __CHANNEL_H__
-#define __CHANNEL_H__
+#ifndef __VISORCHANNEL_H__
+#define __VISORCHANNEL_H__
#include <linux/types.h>
-#include <linux/io.h>
#include <linux/uuid.h>
-#define SIGNATURE_16(A, B) ((A) | ((B) << 8))
-#define SIGNATURE_32(A, B, C, D) \
- (SIGNATURE_16(A, B) | (SIGNATURE_16(C, D) << 16))
-#define VISOR_CHANNEL_SIGNATURE SIGNATURE_32('E', 'C', 'N', 'L')
+#define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E')
/*
* enum channel_serverstate
@@ -183,7 +179,7 @@ struct signal_queue_header {
u8 filler[12];
} __packed;
-/* CHANNEL Guids */
+/* VISORCHANNEL Guids */
/* {414815ed-c58c-11da-95a9-00e08161165f} */
#define VISOR_VHBA_CHANNEL_GUID \
GUID_INIT(0x414815ed, 0xc58c, 0x11da, \
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile
index f3730d8c953e..784cdc1f9d6a 100644
--- a/drivers/staging/unisys/visorbus/Makefile
+++ b/drivers/staging/unisys/visorbus/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Unisys visorbus
#
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index 32ff5c1bb6ba..9ee9886a9aed 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -17,7 +17,8 @@
#define __CONTROLVMCHANNEL_H__
#include <linux/uuid.h>
-#include "channel.h"
+
+#include "visorchannel.h"
/* {2B3C2D10-7EF5-4ad8-B966-3448B7386B3D} */
#define VISOR_CONTROLVM_CHANNEL_GUID \
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index 27e04de14818..981b180f3c4b 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -26,8 +26,7 @@
*/
#include <linux/uuid.h>
-#include <linux/ctype.h>
-#include "channel.h"
+#include "visorchannel.h"
/* {193b331b-c58f-11da-95a9-00e08161165f} */
#define VISOR_VBUS_CHANNEL_GUID \
@@ -50,9 +49,9 @@
* @infostrs: Kernel vversion.
* @reserved: Pad size to 256 bytes.
*
- * An array of this struct is present in the channel area for each vbus.
- * (See vbuschannel.h.). It is filled in by the client side to provide info
- * about the device and driver from the client's perspective.
+ * An array of this struct is present in the channel area for each vbus. It is
+ * filled in by the client side to provide info about the device and driver from
+ * the client's perspective.
*/
struct visor_vbus_deviceinfo {
u8 devtype[16];
@@ -73,7 +72,7 @@ struct visor_vbus_deviceinfo {
* BusInfo struct.
* @dev_info_offset: Byte offset from beginning of this struct to the
* DevInfo array.
- * @reserved: Natural Alignment
+ * @reserved: Natural alignment.
*/
struct visor_vbus_headerinfo {
u32 struct_bytes;
@@ -97,7 +96,6 @@ struct visor_vbus_headerinfo {
struct visor_vbus_channel {
struct channel_header channel_header;
struct visor_vbus_headerinfo hdr_info;
- /* The remainder of this channel is filled in by the client */
struct visor_vbus_deviceinfo chp_info;
struct visor_vbus_deviceinfo bus_info;
struct visor_vbus_deviceinfo dev_info[0];
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 2bc7ff7bb96a..6cb6eb0673c6 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -13,7 +13,10 @@
* details.
*/
+#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/uuid.h>
#include "visorbus.h"
@@ -69,12 +72,9 @@ static LIST_HEAD(list_all_device_instances);
* Note that <logCtx> is only needed for callers in the EFI environment, and
* is used to pass the EFI_DIAG_CAPTURE_PROTOCOL needed to log messages.
*/
-int visor_check_channel(struct channel_header *ch,
- struct device *dev,
- const guid_t *expected_guid,
- char *chname,
- u64 expected_min_bytes,
- u32 expected_version,
+int visor_check_channel(struct channel_header *ch, struct device *dev,
+ const guid_t *expected_guid, char *chname,
+ u64 expected_min_bytes, u32 expected_version,
u64 expected_signature)
{
if (!guid_is_null(expected_guid)) {
@@ -125,7 +125,6 @@ static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
dev = to_visor_device(xdev);
guid = visorchannel_get_guid(dev->visorchannel);
-
return add_uevent_var(env, "MODALIAS=visorbus:%pUl", guid);
}
@@ -144,17 +143,24 @@ static int visorbus_match(struct device *xdev, struct device_driver *xdrv)
int i;
struct visor_device *dev;
struct visor_driver *drv;
+ struct visorchannel *chan;
dev = to_visor_device(xdev);
channel_type = visorchannel_get_guid(dev->visorchannel);
drv = to_visor_driver(xdrv);
+ chan = dev->visorchannel;
if (!drv->channel_types)
return 0;
-
for (i = 0; !guid_is_null(&drv->channel_types[i].guid); i++)
- if (guid_equal(&drv->channel_types[i].guid, channel_type))
+ if (guid_equal(&drv->channel_types[i].guid, channel_type) &&
+ visor_check_channel(visorchannel_get_header(chan),
+ xdev,
+ &drv->channel_types[i].guid,
+ (char *)drv->channel_types[i].name,
+ drv->channel_types[i].min_bytes,
+ drv->channel_types[i].version,
+ VISOR_CHANNEL_SIGNATURE))
return i + 1;
-
return 0;
}
@@ -162,13 +168,48 @@ static int visorbus_match(struct device *xdev, struct device_driver *xdrv)
* This describes the TYPE of bus.
* (Don't confuse this with an INSTANCE of the bus.)
*/
-struct bus_type visorbus_type = {
+static struct bus_type visorbus_type = {
.name = "visorbus",
.match = visorbus_match,
.uevent = visorbus_uevent,
.dev_groups = visorbus_dev_groups,
};
+struct visor_busdev {
+ u32 bus_no;
+ u32 dev_no;
+};
+
+static int match_visorbus_dev_by_id(struct device *dev, void *data)
+{
+ struct visor_device *vdev = to_visor_device(dev);
+ struct visor_busdev *id = data;
+
+ if (vdev->chipset_bus_no == id->bus_no &&
+ vdev->chipset_dev_no == id->dev_no)
+ return 1;
+ return 0;
+}
+
+struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
+ struct visor_device *from)
+{
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct visor_busdev id = {
+ .bus_no = bus_no,
+ .dev_no = dev_no
+ };
+
+ if (from)
+ dev_start = &from->device;
+ dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
+ match_visorbus_dev_by_id);
+ if (!dev)
+ return NULL;
+ return to_visor_device(dev);
+}
+
/*
* visorbus_release_busdevice() - called when device_unregister() is called for
* the bus device instance, after all other tasks
@@ -179,8 +220,9 @@ static void visorbus_release_busdevice(struct device *xdev)
{
struct visor_device *dev = dev_get_drvdata(xdev);
- debugfs_remove(dev->debugfs_client_bus_info);
+ debugfs_remove(dev->debugfs_bus_info);
debugfs_remove_recursive(dev->debugfs_dir);
+ visorchannel_destroy(dev->visorchannel);
kfree(dev);
}
@@ -198,7 +240,7 @@ static void visorbus_release_device(struct device *xdev)
}
/*
- * begin implementation of specific channel attributes to appear under
+ * BUS specific channel attributes to appear under
* /sys/bus/visorbus<x>/dev<y>/channel
*/
@@ -218,7 +260,7 @@ static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
struct visor_device *vdev = to_visor_device(dev);
return sprintf(buf, "0x%lx\n",
- visorchannel_get_nbytes(vdev->visorchannel));
+ visorchannel_get_nbytes(vdev->visorchannel));
}
static DEVICE_ATTR_RO(nbytes);
@@ -284,18 +326,14 @@ static struct attribute *channel_attrs[] = {
ATTRIBUTE_GROUPS(channel);
-/* end implementation of specific channel attributes */
-
/*
* BUS instance attributes
*
* define & implement display of bus attributes under
* /sys/bus/visorbus/devices/visorbus<n>.
*/
-
static ssize_t partition_handle_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
@@ -305,8 +343,7 @@ static ssize_t partition_handle_show(struct device *dev,
static DEVICE_ATTR_RO(partition_handle);
static ssize_t partition_guid_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
@@ -315,8 +352,7 @@ static ssize_t partition_guid_show(struct device *dev,
static DEVICE_ATTR_RO(partition_guid);
static ssize_t partition_name_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
@@ -325,8 +361,7 @@ static ssize_t partition_name_show(struct device *dev,
static DEVICE_ATTR_RO(partition_name);
static ssize_t channel_addr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
@@ -336,8 +371,7 @@ static ssize_t channel_addr_show(struct device *dev,
static DEVICE_ATTR_RO(channel_addr);
static ssize_t channel_bytes_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
@@ -347,8 +381,7 @@ static ssize_t channel_bytes_show(struct device *dev,
static DEVICE_ATTR_RO(channel_bytes);
static ssize_t channel_id_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
int len = 0;
@@ -356,7 +389,6 @@ static ssize_t channel_id_show(struct device *dev,
visorchannel_id(vdev->visorchannel, buf);
len = strlen(buf);
buf[len++] = '\n';
-
return len;
}
static DEVICE_ATTR_RO(channel_id);
@@ -396,13 +428,11 @@ static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
/* uninitialized vbus device entry */
if (!isprint(devinfo->devtype[0]))
return;
-
if (devix >= 0)
seq_printf(seq, "[%d]", devix);
else
/* vbus device entry is for bus or chipset */
seq_puts(seq, " ");
-
/*
* Note: because the s-Par back-end is free to scribble in this area,
* we never assume '\0'-termination.
@@ -415,7 +445,7 @@ static void vbuschannel_print_devinfo(struct visor_vbus_deviceinfo *devinfo,
devinfo->infostrs);
}
-static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
+static int bus_info_debugfs_show(struct seq_file *seq, void *v)
{
int i = 0;
unsigned long off;
@@ -427,10 +457,9 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
return 0;
seq_printf(seq,
- "Client device / client driver info for %s partition (vbus #%u):\n",
+ "Client device/driver info for %s partition (vbus #%u):\n",
((vdev->name) ? (char *)(vdev->name) : ""),
vdev->chipset_bus_no);
-
if (visorchannel_read(channel,
offsetof(struct visor_vbus_channel, chp_info),
&dev_info, sizeof(dev_info)) >= 0)
@@ -448,27 +477,25 @@ static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
off += sizeof(dev_info);
i++;
}
-
return 0;
}
-static int client_bus_info_debugfs_open(struct inode *inode, struct file *file)
+static int bus_info_debugfs_open(struct inode *inode, struct file *file)
{
- return single_open(file, client_bus_info_debugfs_show,
- inode->i_private);
+ return single_open(file, bus_info_debugfs_show, inode->i_private);
}
-static const struct file_operations client_bus_info_debugfs_fops = {
+static const struct file_operations bus_info_debugfs_fops = {
.owner = THIS_MODULE,
- .open = client_bus_info_debugfs_open,
+ .open = bus_info_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
-static void dev_periodic_work(unsigned long __opaque)
+static void dev_periodic_work(struct timer_list *t)
{
- struct visor_device *dev = (struct visor_device *)__opaque;
+ struct visor_device *dev = from_timer(dev, t, timer);
struct visor_driver *drv = to_visor_driver(dev->device.driver);
drv->channel_interrupt(dev);
@@ -479,6 +506,7 @@ static int dev_start_periodic_work(struct visor_device *dev)
{
if (dev->being_removed || dev->timer_active)
return -EINVAL;
+
/* now up by at least 2 */
get_device(&dev->device);
dev->timer.expires = jiffies + POLLJIFFIES_NORMALCHANNEL;
@@ -491,6 +519,7 @@ static void dev_stop_periodic_work(struct visor_device *dev)
{
if (!dev->timer_active)
return;
+
del_timer_sync(&dev->timer);
dev->timer_active = false;
put_device(&dev->device);
@@ -508,20 +537,15 @@ static void dev_stop_periodic_work(struct visor_device *dev)
*/
static int visordriver_remove_device(struct device *xdev)
{
- struct visor_device *dev;
- struct visor_driver *drv;
-
- dev = to_visor_device(xdev);
- drv = to_visor_driver(xdev->driver);
+ struct visor_device *dev = to_visor_device(xdev);
+ struct visor_driver *drv = to_visor_driver(xdev->driver);
mutex_lock(&dev->visordriver_callback_lock);
dev->being_removed = true;
drv->remove(dev);
mutex_unlock(&dev->visordriver_callback_lock);
-
dev_stop_periodic_work(dev);
put_device(&dev->device);
-
return 0;
}
@@ -546,8 +570,7 @@ EXPORT_SYMBOL_GPL(visorbus_unregister_visor_driver);
* @dest: the destination buffer that is written into from the channel
* @nbytes: the number of bytes to read from the channel
*
- * If receiving a message, use the visorchannel_signalremove()
- * function instead.
+ * If receiving a message, use the visorchannel_signalremove() function instead.
*
* Return: integer indicating success (zero) or failure (non-zero)
*/
@@ -566,8 +589,7 @@ EXPORT_SYMBOL_GPL(visorbus_read_channel);
* @src: the source buffer that is written into the channel
* @nbytes: the number of bytes to write into the channel
*
- * If sending a message, use the visorchannel_signalinsert()
- * function instead.
+ * If sending a message, use the visorchannel_signalinsert() function instead.
*
* Return: integer indicating success (zero) or failure (non-zero)
*/
@@ -618,17 +640,16 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
*
* This is how everything starts from the device end.
* This function is called when a channel first appears via a ControlVM
- * message. In response, this function allocates a visor_device to
- * correspond to the new channel, and attempts to connect it the appropriate
- * driver. If the appropriate driver is found, the visor_driver.probe()
- * function for that driver will be called, and will be passed the new
- * visor_device that we just created.
+ * message. In response, this function allocates a visor_device to correspond
+ * to the new channel, and attempts to connect it the appropriate * driver. If
+ * the appropriate driver is found, the visor_driver.probe() function for that
+ * driver will be called, and will be passed the new * visor_device that we
+ * just created.
*
* It's ok if the appropriate driver is not yet loaded, because in that case
* the new device struct will just stick around in the bus' list of devices.
* When the appropriate driver calls visorbus_register_visor_driver(), the
- * visor_driver.probe() for the new driver will be called with the new
- * device.
+ * visor_driver.probe() for the new driver will be called with the new device.
*
* Return: 0 if successful, otherwise the negative value returned by
* device_add() indicating the reason for failure
@@ -646,18 +667,16 @@ int create_visor_device(struct visor_device *dev)
dev->device.release = visorbus_release_device;
/* keep a reference just for us (now 2) */
get_device(&dev->device);
- setup_timer(&dev->timer, dev_periodic_work, (unsigned long)dev);
-
+ timer_setup(&dev->timer, dev_periodic_work, 0);
/*
- * bus_id must be a unique name with respect to this bus TYPE
- * (NOT bus instance). That's why we need to include the bus
- * number within the name.
+ * bus_id must be a unique name with respect to this bus TYPE (NOT bus
+ * instance). That's why we need to include the bus number within the
+ * name.
*/
err = dev_set_name(&dev->device, "vbus%u:dev%u",
chipset_bus_no, chipset_dev_no);
if (err)
goto err_put;
-
/*
* device_add does this:
* bus_add_device(dev)
@@ -671,14 +690,13 @@ int create_visor_device(struct visor_device *dev)
* if (!drv.probe(dev)) [visordriver_probe_device]
* dev.drv = NULL
*
- * Note that device_add does NOT fail if no driver failed to
- * claim the device. The device will be linked onto
- * bus_type.klist_devices regardless (use bus_for_each_dev).
+ * Note that device_add does NOT fail if no driver failed to claim the
+ * device. The device will be linked onto bus_type.klist_devices
+ * regardless (use bus_for_each_dev).
*/
err = device_add(&dev->device);
if (err < 0)
goto err_put;
-
list_add_tail(&dev->list_all, &list_all_device_instances);
dev->state.created = 1;
visorbus_response(dev, err, CONTROLVM_DEVICE_CREATE);
@@ -695,8 +713,9 @@ void remove_visor_device(struct visor_device *dev)
{
list_del(&dev->list_all);
put_device(&dev->device);
+ if (dev->pending_msg_hdr)
+ visorbus_response(dev, 0, CONTROLVM_DEVICE_DESTROY);
device_unregister(&dev->device);
- visorbus_response(dev, 0, CONTROLVM_DEVICE_DESTROY);
}
static int get_vbus_header_info(struct visorchannel *chan,
@@ -718,14 +737,11 @@ static int get_vbus_header_info(struct visorchannel *chan,
sizeof(*hdr_info));
if (err < 0)
return err;
-
if (hdr_info->struct_bytes < sizeof(struct visor_vbus_headerinfo))
return -EINVAL;
-
if (hdr_info->device_info_struct_bytes <
sizeof(struct visor_vbus_deviceinfo))
return -EINVAL;
-
return 0;
}
@@ -746,11 +762,12 @@ static void write_vbus_chp_info(struct visorchannel *chan,
struct visor_vbus_headerinfo *hdr_info,
struct visor_vbus_deviceinfo *info)
{
- int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
+ int off;
if (hdr_info->chp_info_offset == 0)
return;
+ off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
visorchannel_write(chan, off, info, sizeof(*info));
}
@@ -771,11 +788,12 @@ static void write_vbus_bus_info(struct visorchannel *chan,
struct visor_vbus_headerinfo *hdr_info,
struct visor_vbus_deviceinfo *info)
{
- int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
+ int off;
if (hdr_info->bus_info_offset == 0)
return;
+ off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
visorchannel_write(chan, off, info, sizeof(*info));
}
@@ -798,13 +816,12 @@ static void write_vbus_dev_info(struct visorchannel *chan,
struct visor_vbus_deviceinfo *info,
unsigned int devix)
{
- int off =
- (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
- (hdr_info->device_info_struct_bytes * devix);
+ int off;
if (hdr_info->dev_info_offset == 0)
return;
-
+ off = (sizeof(struct channel_header) + hdr_info->dev_info_offset) +
+ (hdr_info->device_info_struct_bytes * devix);
visorchannel_write(chan, off, info, sizeof(*info));
}
@@ -844,7 +861,6 @@ static void publish_vbus_dev_info(struct visor_device *visordev)
if (!visordev->device.driver)
return;
-
bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bdev)
return;
@@ -860,14 +876,12 @@ static void publish_vbus_dev_info(struct visor_device *visordev)
* type name
*/
for (i = 0; visordrv->channel_types[i].name; i++) {
- if (memcmp(&visordrv->channel_types[i].guid,
- &visordev->channel_type_guid,
- sizeof(visordrv->channel_types[i].guid)) == 0) {
+ if (guid_equal(&visordrv->channel_types[i].guid,
+ &visordev->channel_type_guid)) {
chan_type_name = visordrv->channel_types[i].name;
break;
}
}
-
bus_device_info_init(&dev_info, chan_type_name, visordrv->name);
write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
@@ -892,36 +906,32 @@ static void publish_vbus_dev_info(struct visor_device *visordev)
*/
static int visordriver_probe_device(struct device *xdev)
{
- int res;
- struct visor_driver *drv;
- struct visor_device *dev;
-
- dev = to_visor_device(xdev);
- drv = to_visor_driver(xdev->driver);
+ int err;
+ struct visor_driver *drv = to_visor_driver(xdev->driver);
+ struct visor_device *dev = to_visor_device(xdev);
mutex_lock(&dev->visordriver_callback_lock);
dev->being_removed = false;
-
- res = drv->probe(dev);
- if (res >= 0) {
- /* success: reference kept via unmatched get_device() */
- get_device(&dev->device);
- publish_vbus_dev_info(dev);
+ err = drv->probe(dev);
+ if (err) {
+ mutex_unlock(&dev->visordriver_callback_lock);
+ return err;
}
-
+ /* success: reference kept via unmatched get_device() */
+ get_device(&dev->device);
+ publish_vbus_dev_info(dev);
mutex_unlock(&dev->visordriver_callback_lock);
- return res;
+ return 0;
}
/*
- * visorbus_register_visor_driver() - registers the provided visor driver
- * for handling one or more visor device
+ * visorbus_register_visor_driver() - registers the provided visor driver for
+ * handling one or more visor device
* types (channel_types)
* @drv: the driver to register
*
- * A visor function driver calls this function to register
- * the driver. The caller MUST fill in the following fields within the
- * #drv structure:
+ * A visor function driver calls this function to register the driver. The
+ * caller MUST fill in the following fields within the #drv structure:
* name, version, owner, channel_types, probe, remove
*
* Here's how the whole Linux bus / driver / device model works.
@@ -967,16 +977,12 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
/* can't register on a nonexistent bus */
if (!initialized)
return -ENODEV;
-
if (!drv->probe)
return -EINVAL;
-
if (!drv->remove)
return -EINVAL;
-
if (!drv->pause)
return -EINVAL;
-
if (!drv->resume)
return -EINVAL;
@@ -985,7 +991,6 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
drv->driver.probe = visordriver_probe_device;
drv->driver.remove = visordriver_remove_device;
drv->driver.owner = drv->owner;
-
/*
* driver_register does this:
* bus_add_driver(drv)
@@ -998,7 +1003,6 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
* if (!drv.probe(dev)) [visordriver_probe_device]
* dev.drv = NULL
*/
-
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
@@ -1019,39 +1023,28 @@ int visorbus_create_instance(struct visor_device *dev)
hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
if (!hdr_info)
return -ENOMEM;
-
dev_set_name(&dev->device, "visorbus%d", id);
dev->device.bus = &visorbus_type;
dev->device.groups = visorbus_groups;
dev->device.release = visorbus_release_busdevice;
-
dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
visorbus_debugfs_dir);
- dev->debugfs_client_bus_info =
- debugfs_create_file("client_bus_info", 0440,
- dev->debugfs_dir, dev,
- &client_bus_info_debugfs_fops);
-
+ dev->debugfs_bus_info = debugfs_create_file("client_bus_info", 0440,
+ dev->debugfs_dir, dev,
+ &bus_info_debugfs_fops);
dev_set_drvdata(&dev->device, dev);
err = get_vbus_header_info(dev->visorchannel, &dev->device, hdr_info);
if (err < 0)
goto err_debugfs_dir;
-
err = device_register(&dev->device);
if (err < 0)
goto err_debugfs_dir;
-
list_add_tail(&dev->list_all, &list_all_bus_instances);
-
dev->state.created = 1;
dev->vbus_hdr_info = (void *)hdr_info;
- write_vbus_chp_info(dev->visorchannel, hdr_info,
- &chipset_driverinfo);
- write_vbus_bus_info(dev->visorchannel, hdr_info,
- &clientbus_driverinfo);
-
+ write_vbus_chp_info(dev->visorchannel, hdr_info, &chipset_driverinfo);
+ write_vbus_bus_info(dev->visorchannel, hdr_info, &clientbus_driverinfo);
visorbus_response(dev, err, CONTROLVM_BUS_CREATE);
-
return 0;
err_debugfs_dir:
@@ -1075,11 +1068,11 @@ void visorbus_remove_instance(struct visor_device *dev)
* successfully been able to trace thru the code to see where/how
* release() gets called. But I know it does.
*/
- visorchannel_destroy(dev->visorchannel);
kfree(dev->vbus_hdr_info);
list_del(&dev->list_all);
+ if (dev->pending_msg_hdr)
+ visorbus_response(dev, 0, CONTROLVM_BUS_DESTROY);
device_unregister(&dev->device);
- visorbus_response(dev, 0, CONTROLVM_BUS_DESTROY);
}
/*
@@ -1090,9 +1083,9 @@ static void remove_all_visor_devices(void)
struct list_head *listentry, *listtmp;
list_for_each_safe(listentry, listtmp, &list_all_device_instances) {
- struct visor_device *dev = list_entry(listentry,
- struct visor_device,
- list_all);
+ struct visor_device *dev;
+
+ dev = list_entry(listentry, struct visor_device, list_all);
remove_visor_device(dev);
}
}
@@ -1131,7 +1124,6 @@ static void resume_state_change_complete(struct visor_device *dev, int status)
return;
dev->resuming = false;
-
/*
* Notify the chipset driver that the resume is complete,
* which will presumably want to send some sort of response to
@@ -1156,7 +1148,7 @@ static int visorchipset_initiate_device_pause_resume(struct visor_device *dev,
bool is_pause)
{
int err;
- struct visor_driver *drv = NULL;
+ struct visor_driver *drv;
/* If no driver associated with the device nothing to pause/resume */
if (!dev->device.driver)
@@ -1177,7 +1169,6 @@ static int visorchipset_initiate_device_pause_resume(struct visor_device *dev,
dev->resuming = true;
err = drv->resume(dev, resume_state_change_complete);
}
-
return err;
}
@@ -1198,7 +1189,6 @@ int visorchipset_device_pause(struct visor_device *dev_info)
dev_info->pausing = false;
return err;
}
-
return 0;
}
@@ -1219,7 +1209,6 @@ int visorchipset_device_resume(struct visor_device *dev_info)
dev_info->resuming = false;
return err;
}
-
return 0;
}
@@ -1228,18 +1217,12 @@ int visorbus_init(void)
int err;
visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
- if (!visorbus_debugfs_dir)
- return -ENOMEM;
-
bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
-
err = bus_register(&visorbus_type);
if (err < 0)
return err;
-
initialized = true;
bus_device_info_init(&chipset_driverinfo, "chipset", "visorchipset");
-
return 0;
}
@@ -1248,14 +1231,12 @@ void visorbus_exit(void)
struct list_head *listentry, *listtmp;
remove_all_visor_devices();
-
list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
- struct visor_device *dev = list_entry(listentry,
- struct visor_device,
- list_all);
+ struct visor_device *dev;
+
+ dev = list_entry(listentry, struct visor_device, list_all);
visorbus_remove_instance(dev);
}
-
bus_unregister(&visorbus_type);
initialized = false;
debugfs_remove_recursive(visorbus_debugfs_dir);
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index e878d65ab668..4a8b12d7cfaa 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -23,25 +23,23 @@
#include "vbuschannel.h"
#include "visorbus.h"
+struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
+ struct visor_device *from);
int visorbus_create_instance(struct visor_device *dev);
void visorbus_remove_instance(struct visor_device *bus_info);
int create_visor_device(struct visor_device *dev_info);
void remove_visor_device(struct visor_device *dev_info);
int visorchipset_device_pause(struct visor_device *dev_info);
int visorchipset_device_resume(struct visor_device *dev_info);
-
void visorbus_response(struct visor_device *p, int response, int controlvm_id);
void visorbus_device_changestate_response(struct visor_device *p, int response,
struct visor_segment_state state);
-
int visorbus_init(void);
void visorbus_exit(void);
/* visorchannel access functions */
struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
- const guid_t *guid);
-struct visorchannel *visorchannel_create_with_lock(u64 physaddr, gfp_t gfp,
- const guid_t *guid);
+ const guid_t *guid, bool needs_lock);
void visorchannel_destroy(struct visorchannel *channel);
int visorchannel_read(struct visorchannel *channel, ulong offset,
void *dest, ulong nbytes);
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 2a000fee3119..aae16073ba03 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -20,6 +20,7 @@
#include <linux/uuid.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include "visorbus.h"
#include "visorbus_private.h"
@@ -41,8 +42,8 @@ struct visorchannel {
struct channel_header chan_hdr;
guid_t guid;
/*
- * channel creator knows if more than one
- * thread will be inserting or removing
+ * channel creator knows if more than one thread will be inserting or
+ * removing
*/
bool needs_lock;
/* protect head writes in chan_hdr */
@@ -57,6 +58,7 @@ void visorchannel_destroy(struct visorchannel *channel)
{
if (!channel)
return;
+
if (channel->mapped) {
memunmap(channel->mapped);
if (channel->requested)
@@ -122,7 +124,6 @@ int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest,
return -EIO;
memcpy(dest, channel->mapped + offset, nbytes);
-
return 0;
}
@@ -140,9 +141,7 @@ int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest,
memcpy(((char *)(&channel->chan_hdr)) + offset,
dest, copy_size);
}
-
memcpy(channel->mapped + offset, dest, nbytes);
-
return 0;
}
@@ -173,8 +172,8 @@ static int sig_data_offset(struct channel_header *chan_hdr, int q,
}
/*
- * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back
- * into host memory
+ * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back into
+ * host memory
*/
#define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
visorchannel_write(channel, \
@@ -226,32 +225,25 @@ static int signalremove_inner(struct visorchannel *channel, u32 queue,
error = sig_read_header(channel, queue, &sig_hdr);
if (error)
return error;
-
/* No signals to remove; have caller try again. */
if (sig_hdr.head == sig_hdr.tail)
return -EAGAIN;
-
sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
-
error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg);
if (error)
return error;
-
sig_hdr.num_received++;
-
/*
- * For each data field in SIGNAL_QUEUE_HEADER that was modified,
- * update host memory. Required for channel sync.
+ * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
+ * host memory. Required for channel sync.
*/
mb();
-
error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail);
if (error)
return error;
error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received);
if (error)
return error;
-
return 0;
}
@@ -288,13 +280,12 @@ static bool queue_empty(struct visorchannel *channel, u32 queue)
if (sig_read_header(channel, queue, &sig_hdr))
return true;
-
return (sig_hdr.head == sig_hdr.tail);
}
/**
- * visorchannel_signalempty() - checks if the designated channel/queue
- * contains any messages
+ * visorchannel_signalempty() - checks if the designated channel/queue contains
+ * any messages
* @channel: the channel to query
* @queue: the queue in the channel to query
*
@@ -308,11 +299,9 @@ bool visorchannel_signalempty(struct visorchannel *channel, u32 queue)
if (!channel->needs_lock)
return queue_empty(channel, queue);
-
spin_lock_irqsave(&channel->remove_lock, flags);
rc = queue_empty(channel, queue);
spin_unlock_irqrestore(&channel->remove_lock, flags);
-
return rc;
}
EXPORT_SYMBOL_GPL(visorchannel_signalempty);
@@ -326,7 +315,6 @@ static int signalinsert_inner(struct visorchannel *channel, u32 queue,
err = sig_read_header(channel, queue, &sig_hdr);
if (err)
return err;
-
sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
if (sig_hdr.head == sig_hdr.tail) {
sig_hdr.num_overflows++;
@@ -335,33 +323,28 @@ static int signalinsert_inner(struct visorchannel *channel, u32 queue,
return err;
return -EIO;
}
-
err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg);
if (err)
return err;
-
sig_hdr.num_sent++;
-
/*
- * For each data field in SIGNAL_QUEUE_HEADER that was modified,
- * update host memory. Required for channel sync.
+ * For each data field in SIGNAL_QUEUE_HEADER that was modified, update
+ * host memory. Required for channel sync.
*/
mb();
-
err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head);
if (err)
return err;
err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent);
if (err)
return err;
-
return 0;
}
/*
- * visorchannel_create_guts() - creates the struct visorchannel abstraction
- * for a data area in memory, but does NOT modify
- * this data area
+ * visorchannel_create() - creates the struct visorchannel abstraction for a
+ * data area in memory, but does NOT modify this data
+ * area
* @physaddr: physical address of start of channel
* @gfp: gfp_t to use when allocating memory for the data struct
* @guid: GUID that identifies channel type;
@@ -372,9 +355,8 @@ static int signalinsert_inner(struct visorchannel *channel, u32 queue,
* Return: pointer to visorchannel that was created if successful,
* otherwise NULL
*/
-static struct visorchannel *visorchannel_create_guts(u64 physaddr, gfp_t gfp,
- const guid_t *guid,
- bool needs_lock)
+struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
+ const guid_t *guid, bool needs_lock)
{
struct visorchannel *channel;
int err;
@@ -386,37 +368,30 @@ static struct visorchannel *visorchannel_create_guts(u64 physaddr, gfp_t gfp,
channel = kzalloc(sizeof(*channel), gfp);
if (!channel)
return NULL;
-
channel->needs_lock = needs_lock;
spin_lock_init(&channel->insert_lock);
spin_lock_init(&channel->remove_lock);
-
/*
- * Video driver constains the efi framebuffer so it will get a
- * conflict resource when requesting its full mem region. Since
- * we are only using the efi framebuffer for video we can ignore
- * this. Remember that we haven't requested it so we don't try to
- * release later on.
+ * Video driver constains the efi framebuffer so it will get a conflict
+ * resource when requesting its full mem region. Since we are only
+ * using the efi framebuffer for video we can ignore this. Remember that
+ * we haven't requested it so we don't try to release later on.
*/
channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME);
if (!channel->requested && !guid_equal(guid, &visor_video_guid))
/* we only care about errors if this is not the video channel */
goto err_destroy_channel;
-
channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
if (!channel->mapped) {
release_mem_region(physaddr, size);
goto err_destroy_channel;
}
-
channel->physaddr = physaddr;
channel->nbytes = size;
-
err = visorchannel_read(channel, 0, &channel->chan_hdr, size);
if (err)
goto err_destroy_channel;
size = (ulong)channel->chan_hdr.size;
-
memunmap(channel->mapped);
if (channel->requested)
release_mem_region(channel->physaddr, channel->nbytes);
@@ -426,13 +401,11 @@ static struct visorchannel *visorchannel_create_guts(u64 physaddr, gfp_t gfp,
if (!channel->requested && !guid_equal(guid, &visor_video_guid))
/* we only care about errors if this is not the video channel */
goto err_destroy_channel;
-
channel->mapped = memremap(channel->physaddr, size, MEMREMAP_WB);
if (!channel->mapped) {
release_mem_region(channel->physaddr, size);
goto err_destroy_channel;
}
-
channel->nbytes = size;
guid_copy(&channel->guid, guid);
return channel;
@@ -442,18 +415,6 @@ err_destroy_channel:
return NULL;
}
-struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
- const guid_t *guid)
-{
- return visorchannel_create_guts(physaddr, gfp, guid, false);
-}
-
-struct visorchannel *visorchannel_create_with_lock(u64 physaddr, gfp_t gfp,
- const guid_t *guid)
-{
- return visorchannel_create_guts(physaddr, gfp, guid, true);
-}
-
/**
* visorchannel_signalinsert() - inserts a message into the designated
* channel/queue
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 27ecf6fb49fd..fed554a43151 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -27,8 +27,8 @@ static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
-#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
-#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
+#define POLLJIFFIES_CONTROLVM_FAST 1
+#define POLLJIFFIES_CONTROLVM_SLOW 100
#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
@@ -128,7 +128,6 @@ static ssize_t toolaction_show(struct device *dev,
&tool_action, sizeof(u8));
if (err)
return err;
-
return sprintf(buf, "%u\n", tool_action);
}
@@ -141,7 +140,6 @@ static ssize_t toolaction_store(struct device *dev,
if (kstrtou8(buf, 10, &tool_action))
return -EINVAL;
-
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
tool_action),
@@ -178,7 +176,6 @@ static ssize_t boottotool_store(struct device *dev,
if (kstrtoint(buf, 10, &val))
return -EINVAL;
-
efi_visor_indication.boot_to_tool = val;
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
@@ -214,7 +211,6 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &error))
return -EINVAL;
-
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_error),
@@ -237,7 +233,6 @@ static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
&text_id, sizeof(u32));
if (err)
return err;
-
return sprintf(buf, "%u\n", text_id);
}
@@ -249,7 +244,6 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &text_id))
return -EINVAL;
-
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_text_id),
@@ -272,7 +266,6 @@ static ssize_t remaining_steps_show(struct device *dev,
&remaining_steps, sizeof(u16));
if (err)
return err;
-
return sprintf(buf, "%hu\n", remaining_steps);
}
@@ -285,7 +278,6 @@ static ssize_t remaining_steps_store(struct device *dev,
if (kstrtou16(buf, 10, &remaining_steps))
return -EINVAL;
-
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_remaining_steps),
@@ -296,43 +288,6 @@ static ssize_t remaining_steps_store(struct device *dev,
}
static DEVICE_ATTR_RW(remaining_steps);
-struct visor_busdev {
- u32 bus_no;
- u32 dev_no;
-};
-
-static int match_visorbus_dev_by_id(struct device *dev, void *data)
-{
- struct visor_device *vdev = to_visor_device(dev);
- struct visor_busdev *id = data;
-
- if ((vdev->chipset_bus_no == id->bus_no) &&
- (vdev->chipset_dev_no == id->dev_no))
- return 1;
-
- return 0;
-}
-
-struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
- struct visor_device *from)
-{
- struct device *dev;
- struct device *dev_start = NULL;
- struct visor_device *vdev = NULL;
- struct visor_busdev id = {
- .bus_no = bus_no,
- .dev_no = dev_no
- };
-
- if (from)
- dev_start = &from->device;
- dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
- match_visorbus_dev_by_id);
- if (dev)
- vdev = to_visor_device(dev);
- return vdev;
-}
-
static void controlvm_init_response(struct controlvm_message *msg,
struct controlvm_message_header *msg_hdr,
int response)
@@ -374,18 +329,13 @@ static int chipset_init(struct controlvm_message *inmsg)
goto out_respond;
}
chipset_inited = 1;
-
/*
* Set features to indicate we support parahotplug (if Command also
- * supports it).
+ * supports it). Set the "reply" bit so Command knows this is a
+ * features-aware driver.
*/
features = inmsg->cmd.init_chipset.features &
VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
-
- /*
- * Set the "reply" bit so Command knows this is a features-aware
- * driver.
- */
features |= VISOR_CHIPSET_FEATURE_REPLY;
out_respond:
@@ -396,20 +346,17 @@ out_respond:
}
static int controlvm_respond(struct controlvm_message_header *msg_hdr,
- int response,
- struct visor_segment_state *state)
+ int response, struct visor_segment_state *state)
{
struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msg_hdr, response);
if (outmsg.hdr.flags.test_message == 1)
return -EINVAL;
-
if (state) {
outmsg.cmd.device_change_state.state = *state;
outmsg.cmd.device_change_state.flags.phys_device = 1;
}
-
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@@ -435,13 +382,11 @@ static int save_crash_message(struct controlvm_message *msg,
"failed to read message count\n");
return err;
}
-
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
dev_err(&chipset_dev->acpi_device->dev,
"invalid number of messages\n");
return -EIO;
}
-
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
saved_crash_message_offset),
@@ -451,13 +396,11 @@ static int save_crash_message(struct controlvm_message *msg,
"failed to read offset\n");
return err;
}
-
switch (cr_type) {
case CRASH_DEV:
local_crash_msg_offset += sizeof(struct controlvm_message);
err = visorchannel_write(chipset_dev->controlvm_channel,
- local_crash_msg_offset,
- msg,
+ local_crash_msg_offset, msg,
sizeof(struct controlvm_message));
if (err) {
dev_err(&chipset_dev->acpi_device->dev,
@@ -467,8 +410,7 @@ static int save_crash_message(struct controlvm_message *msg,
break;
case CRASH_BUS:
err = visorchannel_write(chipset_dev->controlvm_channel,
- local_crash_msg_offset,
- msg,
+ local_crash_msg_offset, msg,
sizeof(struct controlvm_message));
if (err) {
dev_err(&chipset_dev->acpi_device->dev,
@@ -488,33 +430,25 @@ static int controlvm_responder(enum controlvm_id cmd_id,
struct controlvm_message_header *pending_msg_hdr,
int response)
{
- if (!pending_msg_hdr)
- return -EIO;
-
if (pending_msg_hdr->id != (u32)cmd_id)
return -EINVAL;
return controlvm_respond(pending_msg_hdr, response, NULL);
}
-static int device_changestate_responder(
- enum controlvm_id cmd_id,
- struct visor_device *p, int response,
- struct visor_segment_state response_state)
+static int device_changestate_responder(enum controlvm_id cmd_id,
+ struct visor_device *p, int response,
+ struct visor_segment_state state)
{
struct controlvm_message outmsg;
- if (!p->pending_msg_hdr)
- return -EIO;
if (p->pending_msg_hdr->id != cmd_id)
return -EINVAL;
controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
-
outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
- outmsg.cmd.device_change_state.state = response_state;
-
+ outmsg.cmd.device_change_state.state = state;
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
@@ -522,64 +456,55 @@ static int device_changestate_responder(
static int visorbus_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
+ struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->create_bus.bus_no;
struct visor_device *bus_info;
struct visorchannel *visorchannel;
int err;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
- if (bus_info && (bus_info->state.created == 1)) {
+ if (bus_info && bus_info->state.created == 1) {
dev_err(&chipset_dev->acpi_device->dev,
"failed %s: already exists\n", __func__);
err = -EEXIST;
goto err_respond;
}
-
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
err = -ENOMEM;
goto err_respond;
}
-
INIT_LIST_HEAD(&bus_info->list_all);
bus_info->chipset_bus_no = bus_no;
bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
-
if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
err = save_crash_message(inmsg, CRASH_BUS);
if (err)
goto err_free_bus_info;
}
-
if (inmsg->hdr.flags.response_expected == 1) {
- pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
- GFP_KERNEL);
+ pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_free_bus_info;
}
-
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
bus_info->pending_msg_hdr = pmsg_hdr;
}
-
visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
GFP_KERNEL,
- &cmd->create_bus.bus_data_type_guid);
+ &cmd->create_bus.bus_data_type_guid,
+ false);
if (!visorchannel) {
err = -ENOMEM;
goto err_free_pending_msg;
}
-
bus_info->visorchannel = visorchannel;
-
/* Response will be handled by visorbus_create_instance on success */
err = visorbus_create_instance(bus_info);
if (err)
goto err_destroy_channel;
-
return 0;
err_destroy_channel:
@@ -599,9 +524,8 @@ err_respond:
static int visorbus_destroy(struct controlvm_message *inmsg)
{
- struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
- u32 bus_no = cmd->destroy_bus.bus_no;
+ struct controlvm_message_header *pmsg_hdr;
+ u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
struct visor_device *bus_info;
int err;
@@ -625,12 +549,10 @@ static int visorbus_destroy(struct controlvm_message *inmsg)
err = -ENOMEM;
goto err_respond;
}
-
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
bus_info->pending_msg_hdr = pmsg_hdr;
}
-
/* Response will be handled by visorbus_remove_instance */
visorbus_remove_instance(bus_info);
return 0;
@@ -646,51 +568,33 @@ static const guid_t *parser_id_get(struct parser_context *ctx)
return &ctx->data.id;
}
-static void *parser_string_get(struct parser_context *ctx)
+static void *parser_string_get(u8 *pscan, int nscan)
{
- u8 *pscan;
- unsigned long nscan;
int value_length;
void *value;
- int i;
- pscan = ctx->curr;
- if (!pscan)
- return NULL;
- nscan = ctx->bytes_remaining;
if (nscan == 0)
return NULL;
- for (i = 0, value_length = -1; i < nscan; i++)
- if (pscan[i] == '\0') {
- value_length = i;
- break;
- }
- /* '\0' was not included in the length */
- if (value_length < 0)
- value_length = nscan;
-
- value = kmalloc(value_length + 1, GFP_KERNEL);
+ value_length = strnlen(pscan, nscan);
+ value = kzalloc(value_length + 1, GFP_KERNEL);
if (!value)
return NULL;
if (value_length > 0)
memcpy(value, pscan, value_length);
- ((u8 *)(value))[value_length] = '\0';
return value;
}
static void *parser_name_get(struct parser_context *ctx)
{
- struct visor_controlvm_parameters_header *phdr = NULL;
+ struct visor_controlvm_parameters_header *phdr;
phdr = &ctx->data;
-
if (phdr->name_offset + phdr->name_length > ctx->param_bytes)
return NULL;
-
ctx->curr = (char *)&phdr + phdr->name_offset;
ctx->bytes_remaining = phdr->name_length;
- return parser_string_get(ctx);
+ return parser_string_get(ctx->curr, phdr->name_length);
}
static int visorbus_configure(struct controlvm_message *inmsg,
@@ -715,20 +619,16 @@ static int visorbus_configure(struct controlvm_message *inmsg,
err = -EIO;
goto err_respond;
}
-
- err = visorchannel_set_clientpartition
- (bus_info->visorchannel,
- cmd->configure_bus.guest_handle);
+ err = visorchannel_set_clientpartition(bus_info->visorchannel,
+ cmd->configure_bus.guest_handle);
if (err)
goto err_respond;
-
if (parser_ctx) {
const guid_t *partition_guid = parser_id_get(parser_ctx);
guid_copy(&bus_info->partition_guid, partition_guid);
bus_info->name = parser_name_get(parser_ctx);
}
-
if (inmsg->hdr.flags.response_expected == 1)
controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return 0;
@@ -744,10 +644,10 @@ err_respond:
static int visorbus_device_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
+ struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->create_device.bus_no;
u32 dev_no = cmd->create_device.dev_no;
- struct visor_device *dev_info = NULL;
+ struct visor_device *dev_info;
struct visor_device *bus_info;
struct visorchannel *visorchannel;
int err;
@@ -765,9 +665,8 @@ static int visorbus_device_create(struct controlvm_message *inmsg)
err = -EINVAL;
goto err_respond;
}
-
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
- if (dev_info && (dev_info->state.created == 1)) {
+ if (dev_info && dev_info->state.created == 1) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to get bus by id: %d/%d\n", bus_no, dev_no);
err = -EEXIST;
@@ -779,16 +678,14 @@ static int visorbus_device_create(struct controlvm_message *inmsg)
err = -ENOMEM;
goto err_respond;
}
-
dev_info->chipset_bus_no = bus_no;
dev_info->chipset_dev_no = dev_no;
guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
dev_info->device.parent = &bus_info->device;
-
- visorchannel =
- visorchannel_create_with_lock(cmd->create_device.channel_addr,
- GFP_KERNEL,
- &cmd->create_device.data_type_guid);
+ visorchannel = visorchannel_create(cmd->create_device.channel_addr,
+ GFP_KERNEL,
+ &cmd->create_device.data_type_guid,
+ true);
if (!visorchannel) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to create visorchannel: %d/%d\n",
@@ -797,20 +694,20 @@ static int visorbus_device_create(struct controlvm_message *inmsg)
goto err_free_dev_info;
}
dev_info->visorchannel = visorchannel;
- guid_copy(&dev_info->channel_type_guid, &cmd->create_device.data_type_guid);
- if (guid_equal(&cmd->create_device.data_type_guid, &visor_vhba_channel_guid)) {
+ guid_copy(&dev_info->channel_type_guid,
+ &cmd->create_device.data_type_guid);
+ if (guid_equal(&cmd->create_device.data_type_guid,
+ &visor_vhba_channel_guid)) {
err = save_crash_message(inmsg, CRASH_DEV);
if (err)
goto err_destroy_visorchannel;
}
-
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_destroy_visorchannel;
}
-
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
dev_info->pending_msg_hdr = pmsg_hdr;
@@ -837,7 +734,7 @@ err_respond:
static int visorbus_device_changestate(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
+ struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->device_change_state.bus_no;
u32 dev_no = cmd->device_change_state.dev_no;
struct visor_segment_state state = cmd->device_change_state.state;
@@ -858,18 +755,17 @@ static int visorbus_device_changestate(struct controlvm_message *inmsg)
err = -EIO;
goto err_respond;
}
+
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_respond;
}
-
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
dev_info->pending_msg_hdr = pmsg_hdr;
}
-
if (state.alive == segment_state_running.alive &&
state.operating == segment_state_running.operating)
/* Response will be sent from visorchipset_device_resume */
@@ -884,7 +780,6 @@ static int visorbus_device_changestate(struct controlvm_message *inmsg)
err = visorchipset_device_pause(dev_info);
if (err)
goto err_respond;
-
return 0;
err_respond:
@@ -897,7 +792,7 @@ err_respond:
static int visorbus_device_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
- struct controlvm_message_header *pmsg_hdr = NULL;
+ struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->destroy_device.bus_no;
u32 dev_no = cmd->destroy_device.dev_no;
struct visor_device *dev_info;
@@ -928,7 +823,6 @@ static int visorbus_device_destroy(struct controlvm_message *inmsg)
sizeof(struct controlvm_message_header));
dev_info->pending_msg_hdr = pmsg_hdr;
}
-
kfree(dev_info->name);
remove_visor_device(dev_info);
return 0;
@@ -995,11 +889,9 @@ static struct parahotplug_request *parahotplug_request_create(
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
-
req->id = parahotplug_next_id();
req->expiration = parahotplug_next_expiration();
req->msg = *msg;
-
return req;
}
@@ -1031,13 +923,12 @@ static int parahotplug_request_complete(int id, u16 active)
{
struct list_head *pos;
struct list_head *tmp;
+ struct parahotplug_request *req;
spin_lock(&parahotplug_request_list_lock);
-
/* Look for a request matching "id". */
list_for_each_safe(pos, tmp, &parahotplug_request_list) {
- struct parahotplug_request *req =
- list_entry(pos, struct parahotplug_request, list);
+ req = list_entry(pos, struct parahotplug_request, list);
if (req->id == id) {
/*
* Found a match. Remove it from the list and
@@ -1054,7 +945,6 @@ static int parahotplug_request_complete(int id, u16 active)
return 0;
}
}
-
spin_unlock(&parahotplug_request_list_lock);
return -EINVAL;
}
@@ -1081,7 +971,6 @@ static ssize_t devicedisabled_store(struct device *dev,
if (kstrtouint(buf, 10, &id))
return -EINVAL;
-
err = parahotplug_request_complete(id, 0);
if (err < 0)
return err;
@@ -1110,7 +999,6 @@ static ssize_t deviceenabled_store(struct device *dev,
if (kstrtouint(buf, 10, &id))
return -EINVAL;
-
parahotplug_request_complete(id, 1);
return count;
}
@@ -1158,9 +1046,9 @@ static int parahotplug_request_kickoff(struct parahotplug_request *req)
{
struct controlvm_message_packet *cmd = &req->msg.cmd;
char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
- env_func[40];
- char *envp[] = {
- env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
+ env_func[40];
+ char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
+ env_func, NULL
};
sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
@@ -1173,7 +1061,6 @@ static int parahotplug_request_kickoff(struct parahotplug_request *req)
cmd->device_change_state.dev_no >> 3);
sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
cmd->device_change_state.dev_no & 0x7);
-
return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
KOBJ_CHANGE, envp);
}
@@ -1191,7 +1078,6 @@ static int parahotplug_process_message(struct controlvm_message *inmsg)
req = parahotplug_request_create(inmsg);
if (!req)
return -ENOMEM;
-
/*
* For enable messages, just respond with success right away, we don't
* need to wait to see if the enable was successful.
@@ -1205,7 +1091,6 @@ static int parahotplug_process_message(struct controlvm_message *inmsg)
parahotplug_request_destroy(req);
return 0;
}
-
/*
* For disable messages, add the request to the request list before
* kicking off the udev script. It won't get responded to until the
@@ -1214,7 +1099,6 @@ static int parahotplug_process_message(struct controlvm_message *inmsg)
spin_lock(&parahotplug_request_list_lock);
list_add_tail(&req->list, &parahotplug_request_list);
spin_unlock(&parahotplug_request_list_lock);
-
err = parahotplug_request_kickoff(req);
if (err)
goto err_respond;
@@ -1237,12 +1121,9 @@ static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
{
int res;
- res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_ONLINE);
-
+ res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
-
return res;
}
@@ -1262,10 +1143,8 @@ static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
KOBJ_CHANGE, envp);
-
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
-
return res;
}
@@ -1279,11 +1158,10 @@ static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
{
int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
- KOBJ_OFFLINE);
+ KOBJ_OFFLINE);
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
-
return res;
}
@@ -1296,17 +1174,15 @@ static int unisys_vmcall(unsigned long tuple, unsigned long param)
reg_ebx = param & 0xFFFFFFFF;
reg_ecx = param >> 32;
-
cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
if (!(cpuid_ecx & 0x80000000))
return -EPERM;
-
__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
- "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
+ "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
if (result)
goto error;
-
return 0;
+
/* Need to convert from VMCALL error codes to Linux */
error:
switch (result) {
@@ -1330,8 +1206,8 @@ static int controlvm_channel_create(struct visorchipset_device *dev)
if (err)
return err;
addr = dev->controlvm_params.address;
- chan = visorchannel_create_with_lock(addr, GFP_KERNEL,
- &visor_controlvm_channel_guid);
+ chan = visorchannel_create(addr, GFP_KERNEL,
+ &visor_controlvm_channel_guid, true);
if (!chan)
return -ENOMEM;
dev->controlvm_channel = chan;
@@ -1350,9 +1226,7 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
msg.hdr.id = CONTROLVM_CHIPSET_INIT;
msg.cmd.init_chipset.bus_count = 23;
msg.cmd.init_chipset.switch_count = 0;
-
chipset_init(&msg);
-
/* get saved message count */
if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
@@ -1362,13 +1236,10 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
"failed to read channel\n");
return;
}
-
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
- dev_err(&chipset_dev->acpi_device->dev,
- "invalid count\n");
+ dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
return;
}
-
/* get saved crash message offset */
if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
@@ -1378,7 +1249,6 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
"failed to read channel\n");
return;
}
-
/* read create device message for storage bus offset */
if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset,
@@ -1388,7 +1258,6 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
"failed to read channel\n");
return;
}
-
/* read create device message for storage device */
if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset +
@@ -1399,7 +1268,6 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
"failed to read channel\n");
return;
}
-
/* reuse IOVM create bus message */
if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
dev_err(&chipset_dev->acpi_device->dev,
@@ -1407,7 +1275,6 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
return;
}
visorbus_create(&local_crash_bus_msg);
-
/* reuse create device message for storage device */
if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
dev_err(&chipset_dev->acpi_device->dev,
@@ -1420,8 +1287,10 @@ static void setup_crash_devices_work_queue(struct work_struct *work)
void visorbus_response(struct visor_device *bus_info, int response,
int controlvm_id)
{
- controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
+ if (!bus_info->pending_msg_hdr)
+ return;
+ controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
kfree(bus_info->pending_msg_hdr);
bus_info->pending_msg_hdr = NULL;
}
@@ -1430,9 +1299,11 @@ void visorbus_device_changestate_response(struct visor_device *dev_info,
int response,
struct visor_segment_state state)
{
- device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
- dev_info, response, state);
+ if (!dev_info->pending_msg_hdr)
+ return;
+ device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
+ response, state);
kfree(dev_info->pending_msg_hdr);
dev_info->pending_msg_hdr = NULL;
}
@@ -1451,12 +1322,11 @@ static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
void *mapping;
*retry = false;
-
/* alloc an extra byte to ensure payload is \0 terminated */
allocbytes = bytes + 1 + (sizeof(struct parser_context) -
sizeof(struct visor_controlvm_parameters_header));
- if ((chipset_dev->controlvm_payload_bytes_buffered + bytes)
- > MAX_CONTROLVM_PAYLOAD_BYTES) {
+ if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
+ MAX_CONTROLVM_PAYLOAD_BYTES) {
*retry = true;
return NULL;
}
@@ -1465,7 +1335,6 @@ static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
*retry = true;
return NULL;
}
-
ctx->allocbytes = allocbytes;
ctx->param_bytes = bytes;
mapping = memremap(addr, bytes, MEMREMAP_WB);
@@ -1475,7 +1344,6 @@ static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
memunmap(mapping);
ctx->byte_stream = true;
chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
-
return ctx;
err_finish_ctx:
@@ -1508,14 +1376,13 @@ static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
/* create parsing context if necessary */
parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
parm_bytes = inmsg.hdr.payload_bytes;
-
/*
* Parameter and channel addresses within test messages actually lie
* within our OS-controlled memory. We need to know that, because it
* makes a difference in how we compute the virtual address.
*/
if (parm_bytes) {
- bool retry = false;
+ bool retry;
parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
if (!parser_ctx && retry)
@@ -1526,7 +1393,6 @@ static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
CONTROLVM_QUEUE_ACK, &ackmsg);
if (err)
return err;
-
switch (inmsg.hdr.id) {
case CONTROLVM_CHIPSET_INIT:
err = chipset_init(&inmsg);
@@ -1580,7 +1446,6 @@ static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
-CONTROLVM_RESP_ID_UNKNOWN, NULL);
break;
}
-
if (parser_ctx) {
parser_done(parser_ctx);
parser_ctx = NULL;
@@ -1599,14 +1464,13 @@ static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
static int read_controlvm_event(struct controlvm_message *msg)
{
int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
- CONTROLVM_QUEUE_EVENT, msg);
+ CONTROLVM_QUEUE_EVENT, msg);
+
if (err)
return err;
-
/* got a message */
if (msg->hdr.flags.test_message == 1)
return -EINVAL;
-
return 0;
}
@@ -1620,14 +1484,12 @@ static void parahotplug_process_list(void)
struct list_head *tmp;
spin_lock(&parahotplug_request_list_lock);
-
list_for_each_safe(pos, tmp, &parahotplug_request_list) {
struct parahotplug_request *req =
list_entry(pos, struct parahotplug_request, list);
if (!time_after_eq(jiffies, req->expiration))
continue;
-
list_del(pos);
if (req->msg.hdr.flags.response_expected)
controlvm_respond(
@@ -1636,7 +1498,6 @@ static void parahotplug_process_list(void)
&req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
}
-
spin_unlock(&parahotplug_request_list_lock);
}
@@ -1652,10 +1513,8 @@ static void controlvm_periodic_work(struct work_struct *work)
CONTROLVM_QUEUE_RESPONSE,
&inmsg);
} while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
-
if (err != -EAGAIN)
goto schedule_out;
-
if (chipset_dev->controlvm_pending_msg_valid) {
/*
* we throttled processing of a prior msg, so try to process
@@ -1667,7 +1526,6 @@ static void controlvm_periodic_work(struct work_struct *work)
} else {
err = read_controlvm_event(&inmsg);
}
-
while (!err) {
chipset_dev->most_recent_message_jiffies = jiffies;
err = handle_command(inmsg,
@@ -1681,7 +1539,6 @@ static void controlvm_periodic_work(struct work_struct *work)
err = read_controlvm_event(&inmsg);
}
-
/* parahotplug_worker */
parahotplug_process_list();
@@ -1697,17 +1554,12 @@ schedule_out:
* it's been longer than MIN_IDLE_SECONDS since we processed
* our last controlvm message; slow down the polling
*/
- if (chipset_dev->poll_jiffies !=
- POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
- chipset_dev->poll_jiffies =
- POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
} else {
- if (chipset_dev->poll_jiffies !=
- POLLJIFFIES_CONTROLVMCHANNEL_FAST)
- chipset_dev->poll_jiffies =
- POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
}
-
schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
chipset_dev->poll_jiffies);
}
@@ -1720,20 +1572,16 @@ static int visorchipset_init(struct acpi_device *acpi_device)
chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
if (!chipset_dev)
goto error;
-
err = controlvm_channel_create(chipset_dev);
if (err)
goto error_free_chipset_dev;
-
acpi_device->driver_data = chipset_dev;
chipset_dev->acpi_device = acpi_device;
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
-
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
visorchipset_dev_groups);
if (err < 0)
goto error_destroy_channel;
-
controlvm_channel = chipset_dev->controlvm_channel;
if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
&chipset_dev->acpi_device->dev,
@@ -1743,7 +1591,6 @@ static int visorchipset_init(struct acpi_device *acpi_device)
VISOR_CONTROLVM_CHANNEL_VERSIONID,
VISOR_CHANNEL_SIGNATURE))
goto error_delete_groups;
-
/* if booting in a crash kernel */
if (is_kdump_kernel())
INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
@@ -1751,16 +1598,13 @@ static int visorchipset_init(struct acpi_device *acpi_device)
else
INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
controlvm_periodic_work);
-
chipset_dev->most_recent_message_jiffies = jiffies;
- chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
+ chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
chipset_dev->poll_jiffies);
-
err = visorbus_init();
if (err < 0)
goto error_cancel_work;
-
return 0;
error_cancel_work:
@@ -1787,10 +1631,8 @@ static int visorchipset_exit(struct acpi_device *acpi_device)
cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
visorchipset_dev_groups);
-
visorchannel_destroy(chipset_dev->controlvm_channel);
kfree(chipset_dev);
-
return 0;
}
@@ -1832,11 +1674,9 @@ static int __init init_unisys(void)
if (!visorutil_spar_detect())
return -ENODEV;
-
result = acpi_bus_register_driver(&unisys_acpi_driver);
if (result)
return -ENODEV;
-
pr_info("Unisys Visorchipset Driver Loaded.\n");
return 0;
};
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 419dba89af06..0bcd3acb7b0c 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/kthread.h>
#include <linux/idr.h>
+#include <linux/module.h>
#include <linux/seq_file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -39,7 +40,8 @@ static struct visor_channeltype_descriptor visorhba_channel_types[] = {
/* Note that the only channel type we expect to be reported by the
* bus driver is the VISOR_VHBA channel.
*/
- { VISOR_VHBA_CHANNEL_GUID, "sparvhba" },
+ { VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
+ VISOR_VHBA_CHANNEL_VERSIONID },
{}
};
@@ -818,9 +820,9 @@ static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
/* Do not log errors for disk-not-present inquiries */
- if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
+ if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
(host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
- (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
+ cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
return;
/* Okay see what our error_count is here.... */
vdisk = scsidev->hostdata;
@@ -868,8 +870,8 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
struct visordisk_info *vdisk;
scsidev = scsicmd->device;
- if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
- (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
+ if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
+ cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
if (cmdrsp->scsi.no_disk_result == 0)
return;
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 9d8cbc52de8b..450f003743c0 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -23,6 +23,7 @@
#include <linux/fb.h>
#include <linux/input.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/uuid.h>
#include "visorbus.h"
@@ -711,8 +712,9 @@ out:
/* GUIDS for all channel types supported by this driver. */
static struct visor_channeltype_descriptor visorinput_channel_types[] = {
- { VISOR_KEYBOARD_CHANNEL_GUID, "keyboard"},
- { VISOR_MOUSE_CHANNEL_GUID, "mouse"},
+ { VISOR_KEYBOARD_CHANNEL_GUID, "keyboard",
+ sizeof(struct channel_header), 0 },
+ { VISOR_MOUSE_CHANNEL_GUID, "mouse", sizeof(struct channel_header), 0 },
{}
};
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index dc390eae2960..6d8239163ba5 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -20,6 +20,7 @@
#include <linux/debugfs.h>
#include <linux/etherdevice.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/kthread.h>
#include <linux/skbuff.h>
@@ -48,7 +49,8 @@ static struct visor_channeltype_descriptor visornic_channel_types[] = {
/* Note that the only channel type we expect to be reported by the
* bus driver is the VISOR_VNIC channel.
*/
- { VISOR_VNIC_CHANNEL_GUID, "ultravnic" },
+ { VISOR_VNIC_CHANNEL_GUID, "ultravnic", sizeof(struct channel_header),
+ VISOR_VNIC_CHANNEL_VERSIONID },
{}
};
MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
@@ -899,7 +901,7 @@ static int visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if ((len < ETH_MIN_PACKET_SIZE) &&
+ if (len < ETH_MIN_PACKET_SIZE &&
((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
/* pad the packet out to minimum size */
padlen = ETH_MIN_PACKET_SIZE - len;
@@ -1450,7 +1452,7 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
rcu_read_lock();
for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
/* Only consider netdevs that are visornic, and are open */
- if ((dev->netdev_ops != &visornic_dev_ops) ||
+ if (dev->netdev_ops != &visornic_dev_ops ||
(!netif_queue_stopped(dev)))
continue;
@@ -1680,7 +1682,7 @@ static void service_resp_queue(struct uiscmdrsp *cmdrsp,
/* only call queue wake if we stopped it */
netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
/* ASSERT netdev == vnicinfo->netdev; */
- if ((netdev == devdata->netdev) &&
+ if (netdev == devdata->netdev &&
netif_queue_stopped(netdev)) {
/* check if we have crossed the lower watermark
* for netif_wake_queue()
@@ -1764,9 +1766,10 @@ static int visornic_poll(struct napi_struct *napi, int budget)
* Main function of the vnic_incoming thread. Periodically check the response
* queue and drain it if needed.
*/
-static void poll_for_irq(unsigned long v)
+static void poll_for_irq(struct timer_list *t)
{
- struct visornic_devdata *devdata = (struct visornic_devdata *)v;
+ struct visornic_devdata *devdata = from_timer(devdata, t,
+ irq_poll_timer);
if (!visorchannel_signalempty(
devdata->dev->visorchannel,
@@ -1897,8 +1900,7 @@ static int visornic_probe(struct visor_device *dev)
/* Let's start our threads to get responses */
netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
- setup_timer(&devdata->irq_poll_timer, poll_for_irq,
- (unsigned long)devdata);
+ timer_setup(&devdata->irq_poll_timer, poll_for_irq, 0);
/* Note: This time has to start running before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
diff --git a/drivers/staging/vboxvideo/Makefile b/drivers/staging/vboxvideo/Makefile
index 2d0b3bc7ad73..3f6094aa9cdf 100644
--- a/drivers/staging/vboxvideo/Makefile
+++ b/drivers/staging/vboxvideo/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Iinclude/drm
vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
index 4b9302703b36..eeac4f0cb2c6 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -137,8 +137,8 @@ struct vbox_connector {
char name[32];
struct vbox_crtc *vbox_crtc;
struct {
- u16 width;
- u16 height;
+ u32 width;
+ u32 height;
bool disconnected;
} mode_hint;
};
@@ -150,8 +150,8 @@ struct vbox_crtc {
unsigned int crtc_id;
u32 fb_offset;
bool cursor_enabled;
- u16 x_hint;
- u16 y_hint;
+ u32 x_hint;
+ u32 y_hint;
};
struct vbox_encoder {
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c
index 3ca8bec62ac4..74abdf02d9fd 100644
--- a/drivers/staging/vboxvideo/vbox_irq.c
+++ b/drivers/staging/vboxvideo/vbox_irq.c
@@ -150,8 +150,8 @@ static void vbox_update_mode_hints(struct vbox_private *vbox)
disconnected = !(hints->enabled);
crtc_id = vbox_conn->vbox_crtc->crtc_id;
- vbox_conn->mode_hint.width = hints->cx & 0x8fff;
- vbox_conn->mode_hint.height = hints->cy & 0x8fff;
+ vbox_conn->mode_hint.width = hints->cx;
+ vbox_conn->mode_hint.height = hints->cy;
vbox_conn->vbox_crtc->x_hint = hints->dx;
vbox_conn->vbox_crtc->y_hint = hints->dy;
vbox_conn->mode_hint.disconnected = disconnected;
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index c745a0402c68..b265fe924556 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -553,12 +553,22 @@ static int vbox_get_modes(struct drm_connector *connector)
++num_modes;
}
vbox_set_edid(connector, preferred_width, preferred_height);
- drm_object_property_set_value(
- &connector->base, vbox->dev->mode_config.suggested_x_property,
- vbox_connector->vbox_crtc->x_hint);
- drm_object_property_set_value(
- &connector->base, vbox->dev->mode_config.suggested_y_property,
- vbox_connector->vbox_crtc->y_hint);
+
+ if (vbox_connector->vbox_crtc->x_hint != -1)
+ drm_object_property_set_value(&connector->base,
+ vbox->dev->mode_config.suggested_x_property,
+ vbox_connector->vbox_crtc->x_hint);
+ else
+ drm_object_property_set_value(&connector->base,
+ vbox->dev->mode_config.suggested_x_property, 0);
+
+ if (vbox_connector->vbox_crtc->y_hint != -1)
+ drm_object_property_set_value(&connector->base,
+ vbox->dev->mode_config.suggested_y_property,
+ vbox_connector->vbox_crtc->y_hint);
+ else
+ drm_object_property_set_value(&connector->base,
+ vbox->dev->mode_config.suggested_y_property, 0);
return num_modes;
}
@@ -640,9 +650,9 @@ static int vbox_connector_init(struct drm_device *dev,
drm_mode_create_suggested_offset_properties(dev);
drm_object_attach_property(&connector->base,
- dev->mode_config.suggested_x_property, -1);
+ dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
- dev->mode_config.suggested_y_property, -1);
+ dev->mode_config.suggested_y_property, 0);
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index 9e2763663ab8..f5aaf7d629f0 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -19,18 +19,6 @@ config BCM2835_VCHIQ
Defaults to Y when the Broadcom Videocore services
are included in the build, N otherwise.
-if BCM2835_VCHIQ
-
-config BCM2835_VCHIQ_SUPPORT_MEMDUMP
- bool "Support dumping memory contents to debug log"
- help
- BCM2835 VCHIQ supports the ability to dump the
- contents of memory to the debug log. This
- is typically only needed by diagnostic tools used
- to debug issues with VideoCore.
-
-endif
-
source "drivers/staging/vc04_services/bcm2835-audio/Kconfig"
source "drivers/staging/vc04_services/bcm2835-camera/Kconfig"
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index e9a8e1343cbb..1ecb261e04ae 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_BCM2835_VCHIQ) += vchiq.o
vchiq-objs := \
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index 94654c0c7bba..7e68b3e28246 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -65,7 +65,6 @@ void bcm2835_playback_fifo(struct bcm2835_alsa_stream *alsa_stream)
unsigned int consumed = 0;
int new_period = 0;
-
audio_info("alsa_stream=%p substream=%p\n", alsa_stream,
alsa_stream ? alsa_stream->substream : 0);
@@ -111,7 +110,6 @@ static int snd_bcm2835_playback_open_generic(
int idx;
int err;
-
if (mutex_lock_interruptible(&chip->audio_mutex)) {
audio_error("Interrupted whilst waiting for lock\n");
return -EINTR;
@@ -184,7 +182,6 @@ static int snd_bcm2835_playback_open_generic(
out:
mutex_unlock(&chip->audio_mutex);
-
return err;
}
@@ -207,7 +204,6 @@ static int snd_bcm2835_playback_close(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime;
struct bcm2835_alsa_stream *alsa_stream;
-
chip = snd_pcm_substream_chip(substream);
if (mutex_lock_interruptible(&chip->audio_mutex)) {
audio_error("Interrupted whilst waiting for lock\n");
@@ -259,7 +255,6 @@ static int snd_bcm2835_pcm_hw_params(struct snd_pcm_substream *substream,
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
int err;
-
err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
if (err < 0) {
audio_error
@@ -289,7 +284,6 @@ static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
int channels;
int err;
-
if (mutex_lock_interruptible(&chip->audio_mutex))
return -EINTR;
@@ -307,13 +301,11 @@ static int snd_bcm2835_pcm_prepare(struct snd_pcm_substream *substream)
if (err < 0)
audio_error(" error setting hw params\n");
-
bcm2835_audio_setup(alsa_stream);
/* in preparation of the stream, set the controls (volume level) of the stream */
bcm2835_audio_set_ctls(alsa_stream->chip);
-
memset(&alsa_stream->pcm_indirect, 0, sizeof(alsa_stream->pcm_indirect));
alsa_stream->pcm_indirect.hw_buffer_size =
@@ -364,7 +356,6 @@ static int snd_bcm2835_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
int err = 0;
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
audio_debug("bcm2835_AUDIO_TRIGGER_START running=%d\n",
@@ -416,7 +407,6 @@ snd_bcm2835_pcm_pointer(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct bcm2835_alsa_stream *alsa_stream = runtime->private_data;
-
audio_debug("pcm_pointer... (%d) hwptr=%d appl=%d pos=%d\n", 0,
frames_to_bytes(runtime, runtime->status->hw_ptr),
frames_to_bytes(runtime, runtime->control->appl_ptr),
@@ -493,7 +483,6 @@ int snd_bcm2835_new_pcm(struct bcm2835_chip *chip, u32 numchannels)
snd_bcm2835_playback_hw.buffer_bytes_max,
snd_bcm2835_playback_hw.buffer_bytes_max);
-
out:
mutex_unlock(&chip->audio_mutex);
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 4be864dbd41c..3c6f1d91d22d 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -337,7 +337,6 @@ static int vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance)
{
unsigned int i;
-
if (!instance) {
LOG_ERR("%s: invalid handle %p\n", __func__, instance);
@@ -369,7 +368,6 @@ static int vc_vchi_audio_deinit(struct bcm2835_audio_instance *instance)
kfree(instance);
-
return 0;
}
@@ -382,7 +380,6 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
(struct bcm2835_audio_instance *)alsa_stream->instance;
int ret;
-
LOG_INFO("%s: start\n", __func__);
BUG_ON(instance);
if (instance) {
@@ -438,7 +435,6 @@ int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
int status;
int ret;
-
my_workqueue_init(alsa_stream);
ret = bcm2835_audio_open_connection(alsa_stream);
@@ -486,7 +482,6 @@ static int bcm2835_audio_set_ctls_chan(struct bcm2835_alsa_stream *alsa_stream,
int status;
int ret;
-
LOG_INFO(" Setting ALSA dest(%d), volume(%d)\n",
chip->dest, chip->volume);
@@ -570,7 +565,6 @@ int bcm2835_audio_set_params(struct bcm2835_alsa_stream *alsa_stream,
int status;
int ret;
-
LOG_INFO(" Setting ALSA channels(%d), samplerate(%d), bits-per-sample(%d)\n",
channels, samplerate, bps);
@@ -631,7 +625,6 @@ unlock:
int bcm2835_audio_setup(struct bcm2835_alsa_stream *alsa_stream)
{
-
return 0;
}
@@ -642,7 +635,6 @@ static int bcm2835_audio_start_worker(struct bcm2835_alsa_stream *alsa_stream)
int status;
int ret;
-
if (mutex_lock_interruptible(&instance->vchi_mutex)) {
LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
instance->num_connections);
@@ -679,7 +671,6 @@ static int bcm2835_audio_stop_worker(struct bcm2835_alsa_stream *alsa_stream)
int status;
int ret;
-
if (mutex_lock_interruptible(&instance->vchi_mutex)) {
LOG_DBG("Interrupted whilst waiting for lock on (%d)\n",
instance->num_connections);
@@ -717,7 +708,6 @@ int bcm2835_audio_close(struct bcm2835_alsa_stream *alsa_stream)
int status;
int ret;
-
my_workqueue_quit(alsa_stream);
if (mutex_lock_interruptible(&instance->vchi_mutex)) {
@@ -775,7 +765,6 @@ static int bcm2835_audio_write_worker(struct bcm2835_alsa_stream *alsa_stream,
int status;
int ret;
-
LOG_INFO(" Writing %d bytes from %p\n", count, src);
if (mutex_lock_interruptible(&instance->vchi_mutex)) {
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index 379604d3554e..f1e43e45fd67 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -123,8 +123,6 @@ struct bcm2835_alsa_stream {
struct snd_pcm_indirect pcm_indirect;
spinlock_t lock;
- volatile unsigned int control;
- volatile unsigned int status;
int open;
int running;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/Makefile b/drivers/staging/vc04_services/bcm2835-camera/Makefile
index 8307f30517d5..2a4565e682d8 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/Makefile
+++ b/drivers/staging/vc04_services/bcm2835-camera/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
bcm2835-v4l2-$(CONFIG_VIDEO_BCM2835) := \
bcm2835-camera.o \
controls.o \
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
index 52cdf4da1b47..5a1b2a7d8eb0 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-msg.h
@@ -70,7 +70,7 @@ enum mmal_msg_type {
/* port action request messages differ depending on the action type */
enum mmal_msg_port_action_type {
- MMAL_MSG_PORT_ACTION_TYPE_UNKNOWN = 0, /* Unkown action */
+ MMAL_MSG_PORT_ACTION_TYPE_UNKNOWN = 0, /* Unknown action */
MMAL_MSG_PORT_ACTION_TYPE_ENABLE, /* Enable a port */
MMAL_MSG_PORT_ACTION_TYPE_DISABLE, /* Disable a port */
MMAL_MSG_PORT_ACTION_TYPE_FLUSH, /* Flush a port */
@@ -217,36 +217,36 @@ struct mmal_msg_port_action_reply {
#define MMAL_VC_SHORT_DATA 128
/** Signals that the current payload is the end of the stream of data */
-#define MMAL_BUFFER_HEADER_FLAG_EOS (1<<0)
+#define MMAL_BUFFER_HEADER_FLAG_EOS BIT(0)
/** Signals that the start of the current payload starts a frame */
-#define MMAL_BUFFER_HEADER_FLAG_FRAME_START (1<<1)
+#define MMAL_BUFFER_HEADER_FLAG_FRAME_START BIT(1)
/** Signals that the end of the current payload ends a frame */
-#define MMAL_BUFFER_HEADER_FLAG_FRAME_END (1<<2)
+#define MMAL_BUFFER_HEADER_FLAG_FRAME_END BIT(2)
/** Signals that the current payload contains only complete frames (>1) */
#define MMAL_BUFFER_HEADER_FLAG_FRAME \
(MMAL_BUFFER_HEADER_FLAG_FRAME_START|MMAL_BUFFER_HEADER_FLAG_FRAME_END)
/** Signals that the current payload is a keyframe (i.e. self decodable) */
-#define MMAL_BUFFER_HEADER_FLAG_KEYFRAME (1<<3)
+#define MMAL_BUFFER_HEADER_FLAG_KEYFRAME BIT(3)
/** Signals a discontinuity in the stream of data (e.g. after a seek).
* Can be used for instance by a decoder to reset its state
*/
-#define MMAL_BUFFER_HEADER_FLAG_DISCONTINUITY (1<<4)
+#define MMAL_BUFFER_HEADER_FLAG_DISCONTINUITY BIT(4)
/** Signals a buffer containing some kind of config data for the component
* (e.g. codec config data)
*/
-#define MMAL_BUFFER_HEADER_FLAG_CONFIG (1<<5)
+#define MMAL_BUFFER_HEADER_FLAG_CONFIG BIT(5)
/** Signals an encrypted payload */
-#define MMAL_BUFFER_HEADER_FLAG_ENCRYPTED (1<<6)
+#define MMAL_BUFFER_HEADER_FLAG_ENCRYPTED BIT(6)
/** Signals a buffer containing side information */
-#define MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO (1<<7)
+#define MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO BIT(7)
/** Signals a buffer which is the snapshot/postview image from a stills
* capture
*/
-#define MMAL_BUFFER_HEADER_FLAGS_SNAPSHOT (1<<8)
+#define MMAL_BUFFER_HEADER_FLAGS_SNAPSHOT BIT(8)
/** Signals a buffer which contains data known to be corrupted */
-#define MMAL_BUFFER_HEADER_FLAG_CORRUPTED (1<<9)
+#define MMAL_BUFFER_HEADER_FLAG_CORRUPTED BIT(9)
/** Signals that a buffer failed to be transmitted */
-#define MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED (1<<10)
+#define MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED BIT(10)
struct mmal_driver_buffer {
u32 magic;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index 4360db6d4392..6ea7fb0ea50e 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -1963,7 +1963,7 @@ int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
status = vchi_service_close(instance->handle);
if (status != 0)
- pr_err("mmal-vchiq: VCHIQ close failed");
+ pr_err("mmal-vchiq: VCHIQ close failed\n");
mutex_unlock(&instance->vchiq_mutex);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
index 63db053532bf..db39900c9d91 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
@@ -130,7 +130,7 @@ int vchiq_mmal_component_disable(
/* enable a mmal port
*
* enables a port and if a buffer callback provided enque buffer
- * headers as apropriate for the port.
+ * headers as appropriate for the port.
*/
int vchiq_mmal_port_enable(
struct vchiq_mmal_instance *instance,
diff --git a/drivers/staging/vc04_services/interface/vchi/connections/connection.h b/drivers/staging/vc04_services/interface/vchi/connections/connection.h
index e793cdf2847c..67c84386c65a 100644
--- a/drivers/staging/vc04_services/interface/vchi/connections/connection.h
+++ b/drivers/staging/vc04_services/interface/vchi/connections/connection.h
@@ -54,7 +54,6 @@ typedef struct opaque_vchi_connection_info_t VCHI_CONNECTION_STATE_T;
typedef struct vchi_connection_t VCHI_CONNECTION_T;
-
/******************************************************************************
API
*****************************************************************************/
@@ -212,7 +211,6 @@ typedef void * (*VCHI_BUFFER_ALLOCATE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_
// free memory allocated by buffer_allocate
typedef void (*VCHI_BUFFER_FREE)(VCHI_CONNECTION_SERVICE_HANDLE_T service_handle, void * address);
-
/******************************************************************************
System driver struct
*****************************************************************************/
@@ -321,7 +319,6 @@ struct vchi_connection_t {
#endif
};
-
#endif /* CONNECTION_H_ */
/****************************** End of file **********************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h b/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h
index a7740a425388..834263f278cf 100644
--- a/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h
+++ b/drivers/staging/vc04_services/interface/vchi/message_drivers/message.h
@@ -41,7 +41,6 @@
#include "interface/vchi/vchi_cfg_internal.h"
#include "interface/vchi/vchi_common.h"
-
typedef enum message_event_type {
MESSAGE_EVENT_NONE,
MESSAGE_EVENT_NOP,
@@ -111,7 +110,6 @@ typedef struct rx_bulk_slotinfo_t {
VCHI_FLAGS_T flags;
} RX_BULK_SLOTINFO_T;
-
/* ----------------------------------------------------------------------
* each connection driver will have a pool of the following struct.
*
@@ -155,7 +153,6 @@ typedef struct {
} MESSAGE_EVENT_T;
-
// callbacks
typedef void VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T( void *state );
@@ -163,7 +160,6 @@ typedef struct {
VCHI_MESSAGE_DRIVER_EVENT_CALLBACK_T *event_callback;
} VCHI_MESSAGE_DRIVER_OPEN_T;
-
// handle to this instance of message driver (as returned by ->open)
typedef struct opaque_mhandle_t *VCHI_MDRIVER_HANDLE_T;
@@ -195,7 +191,6 @@ struct opaque_vchi_message_driver_t {
void (*debug)( VCHI_MDRIVER_HANDLE_T *handle );
};
-
#endif // _VCHI_MESSAGE_H_
/****************************** End of file ***********************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index addb7b00b688..66a3a060fad2 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -39,7 +39,6 @@
#include "interface/vchi/connections/connection.h"
#include "vchi_mh.h"
-
/******************************************************************************
Global defs
*****************************************************************************/
@@ -92,7 +91,6 @@ typedef struct vchi_msg_vector_ex {
} u;
} VCHI_MSG_VECTOR_EX_T;
-
// Construct an entry in a msg vector for a pointer (p) of length (l)
#define VCHI_VEC_POINTER(p,l) VCHI_VEC_POINTER, { { (VCHI_MEM_HANDLE_T)(p), (l) } }
@@ -103,7 +101,6 @@ typedef struct vchi_msg_vector_ex {
#define MAKE_FOURCC(x) ((int32_t)( (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3] ))
#define FOURCC_TO_CHAR(x) (x >> 24) & 0xFF,(x >> 16) & 0xFF,(x >> 8) & 0xFF, x & 0xFF
-
// Opaque service information
struct opaque_vchi_service_t;
@@ -114,8 +111,6 @@ typedef struct {
void *message;
} VCHI_HELD_MSG_T;
-
-
// structure used to provide the information needed to open a server or a client
typedef struct {
struct vchi_version version;
@@ -162,7 +157,6 @@ extern "C" {
extern /*@observer@*/ VCHI_CONNECTION_T * vchi_create_connection( const VCHI_CONNECTION_API_T * function_table,
const VCHI_MESSAGE_DRIVER_T * low_level);
-
// Routine used to initialise the vchi on both local + remote connections
extern int32_t vchi_initialise( VCHI_INSTANCE_T *instance_handle );
@@ -185,7 +179,6 @@ extern void * vchi_allocate_buffer(VCHI_SERVICE_HANDLE_T handle, uint32_t *lengt
extern void vchi_free_buffer(VCHI_SERVICE_HANDLE_T handle, void *address);
extern uint32_t vchi_current_time(VCHI_INSTANCE_T instance_handle);
-
/******************************************************************************
Global service API
*****************************************************************************/
@@ -194,7 +187,7 @@ extern int32_t vchi_service_create( VCHI_INSTANCE_T instance_handle,
SERVICE_CREATION_T *setup,
VCHI_SERVICE_HANDLE_T *handle );
-// Routine to destory a service
+// Routine to destroy a service
extern int32_t vchi_service_destroy( const VCHI_SERVICE_HANDLE_T handle );
// Routine to open a named service
@@ -307,7 +300,6 @@ extern int32_t vchi_msg_iter_hold_next( VCHI_MSG_ITER_T *iter,
uint32_t *msg_size, // }
VCHI_HELD_MSG_T *message );
-
/******************************************************************************
Global bulk API
*****************************************************************************/
@@ -319,7 +311,6 @@ extern int32_t vchi_bulk_queue_receive( VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
void *transfer_handle );
-
// Prepare interface for a transfer from the other side into relocatable memory.
int32_t vchi_bulk_queue_receive_reloc( const VCHI_SERVICE_HANDLE_T handle,
VCHI_MEM_HANDLE_T h_dst,
@@ -335,7 +326,6 @@ extern int32_t vchi_bulk_queue_transmit( VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
void *transfer_handle );
-
/******************************************************************************
Configuration plumbing
*****************************************************************************/
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_common.h b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
index 45c2070d46b0..76e10fe65d9b 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi_common.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi_common.h
@@ -34,7 +34,6 @@
#ifndef VCHI_COMMON_H_
#define VCHI_COMMON_H_
-
//flags used when sending messages (must be bitmapped)
typedef enum {
VCHI_FLAGS_NONE = 0x0,
@@ -118,14 +117,11 @@ typedef enum {
VCHI_SERVICE_OPTION_MAX
} VCHI_SERVICE_OPTION_T;
-
//Callback used by all services / bulk transfers
typedef void (*VCHI_CALLBACK_T)(void *callback_param, //my service local param
VCHI_CALLBACK_REASON_T reason,
void *handle); //for transmitting msg's only
-
-
/*
* Define vector struct for scatter-gather (vector) operations
* Vectors can be nested - if a vector element has negative length, then
@@ -154,7 +150,6 @@ typedef struct opaque_vchi_connection_api_t VCHI_CONNECTION_API_T;
// Opaque type for a message driver
typedef struct opaque_vchi_message_driver_t VCHI_MESSAGE_DRIVER_T;
-
// Iterator structure for reading ahead through received message queue. Allocated by client,
// initialised by vchi_msg_look_ahead. Fields are for internal VCHI use only.
// Iterates over messages in queue at the instant of the call to vchi_msg_lookahead -
@@ -168,5 +163,4 @@ typedef struct {
void *remove;
} VCHI_MSG_ITER_T;
-
#endif // VCHI_COMMON_H_
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index be08849175ea..315b49c1de3b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -59,10 +59,10 @@
#define BELL0 0x00
#define BELL2 0x08
-typedef struct vchiq_2835_state_struct {
+struct vchiq_2835_state {
int inited;
VCHIQ_ARM_STATE_T arm_state;
-} VCHIQ_2835_ARM_STATE_T;
+};
struct vchiq_pagelist_info {
PAGELIST_T *pagelist;
@@ -84,16 +84,13 @@ static char *g_free_fragments;
static struct semaphore g_free_fragments_sema;
static struct device *g_dev;
-extern int vchiq_arm_log_level;
-
static DEFINE_SEMAPHORE(g_free_fragments_mutex);
static irqreturn_t
vchiq_doorbell_irq(int irq, void *dev_id);
static struct vchiq_pagelist_info *
-create_pagelist(char __user *buf, size_t count, unsigned short type,
- struct task_struct *task);
+create_pagelist(char __user *buf, size_t count, unsigned short type);
static void
free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
@@ -206,25 +203,31 @@ VCHIQ_STATUS_T
vchiq_platform_init_state(VCHIQ_STATE_T *state)
{
VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
+ struct vchiq_2835_state *platform_state;
+
+ state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
+ platform_state = (struct vchiq_2835_state *)state->platform_state;
+
+ platform_state->inited = 1;
+ status = vchiq_arm_init_state(state, &platform_state->arm_state);
- state->platform_state = kzalloc(sizeof(VCHIQ_2835_ARM_STATE_T), GFP_KERNEL);
- ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 1;
- status = vchiq_arm_init_state(state, &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state);
if (status != VCHIQ_SUCCESS)
- {
- ((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited = 0;
- }
+ platform_state->inited = 0;
+
return status;
}
VCHIQ_ARM_STATE_T*
vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
{
- if (!((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->inited)
- {
+ struct vchiq_2835_state *platform_state;
+
+ platform_state = (struct vchiq_2835_state *)state->platform_state;
+
+ if (!platform_state->inited)
BUG();
- }
- return &((VCHIQ_2835_ARM_STATE_T *)state->platform_state)->arm_state;
+
+ return &platform_state->arm_state;
}
void
@@ -251,8 +254,7 @@ vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
pagelistinfo = create_pagelist((char __user *)offset, size,
(dir == VCHIQ_BULK_RECEIVE)
? PAGELIST_READ
- : PAGELIST_WRITE,
- current);
+ : PAGELIST_WRITE);
if (!pagelistinfo)
return VCHIQ_ERROR;
@@ -383,16 +385,15 @@ cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
}
/* There is a potential problem with partial cache lines (pages?)
-** at the ends of the block when reading. If the CPU accessed anything in
-** the same line (page?) then it may have pulled old data into the cache,
-** obscuring the new data underneath. We can solve this by transferring the
-** partial cache lines separately, and allowing the ARM to copy into the
-** cached area.
-*/
+ * at the ends of the block when reading. If the CPU accessed anything in
+ * the same line (page?) then it may have pulled old data into the cache,
+ * obscuring the new data underneath. We can solve this by transferring the
+ * partial cache lines separately, and allowing the ARM to copy into the
+ * cached area.
+ */
static struct vchiq_pagelist_info *
-create_pagelist(char __user *buf, size_t count, unsigned short type,
- struct task_struct *task)
+create_pagelist(char __user *buf, size_t count, unsigned short type)
{
PAGELIST_T *pagelist;
struct vchiq_pagelist_info *pagelistinfo;
@@ -415,15 +416,15 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
sizeof(struct vchiq_pagelist_info);
/* Allocate enough storage to hold the page pointers and the page
- ** list
- */
+ * list
+ */
pagelist = dma_zalloc_coherent(g_dev,
pagelist_size,
&dma_addr,
GFP_KERNEL);
- vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
- pagelist);
+ vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
+
if (!pagelist)
return NULL;
@@ -472,20 +473,16 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
}
/* do not try and release vmalloc pages */
} else {
- down_read(&task->mm->mmap_sem);
- actual_pages = get_user_pages(
- (unsigned long)buf & PAGE_MASK,
+ actual_pages = get_user_pages_fast(
+ (unsigned long)buf & PAGE_MASK,
num_pages,
- (type == PAGELIST_READ) ? FOLL_WRITE : 0,
- pages,
- NULL /*vmas */);
- up_read(&task->mm->mmap_sem);
+ type == PAGELIST_READ,
+ pages);
if (actual_pages != num_pages) {
vchiq_log_info(vchiq_arm_log_level,
- "create_pagelist - only %d/%d pages locked",
- actual_pages,
- num_pages);
+ "%s - only %d/%d pages locked",
+ __func__, actual_pages, num_pages);
/* This is probably due to the process being killed */
while (actual_pages > 0)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 314ffac50bb8..411539f8ff8c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -109,9 +109,7 @@ static const char *const resume_state_names[] = {
* requested */
#define FORCE_SUSPEND_TIMEOUT_MS 200
-
-static void suspend_timer_callback(unsigned long context);
-
+static void suspend_timer_callback(struct timer_list *t);
typedef struct user_service_struct {
VCHIQ_SERVICE_T *service;
@@ -195,11 +193,6 @@ static const char *const ioctl_names[] = {
vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
(VCHIQ_IOC_MAX + 1));
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
-static void
-dump_phys_mem(void *virt_addr, u32 num_bytes);
-#endif
-
/****************************************************************************
*
* add_completion
@@ -1161,20 +1154,6 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
args.handle, args.option, args.value);
} break;
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
- case VCHIQ_IOC_DUMP_PHYS_MEM: {
- VCHIQ_DUMP_MEM_T args;
-
- if (copy_from_user
- (&args, (const void __user *)arg,
- sizeof(args)) != 0) {
- ret = -EFAULT;
- break;
- }
- dump_phys_mem(args.virt_addr, args.num_bytes);
- } break;
-#endif
-
case VCHIQ_IOC_LIB_VERSION: {
unsigned int lib_version = (unsigned int)arg;
@@ -1654,42 +1633,6 @@ vchiq_compat_ioctl_get_config(struct file *file,
return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args);
}
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
-
-struct vchiq_dump_mem32 {
- compat_uptr_t virt_addr;
- u32 num_bytes;
-};
-
-#define VCHIQ_IOC_DUMP_PHYS_MEM32 \
- _IOW(VCHIQ_IOC_MAGIC, 15, struct vchiq_dump_mem32)
-
-static long
-vchiq_compat_ioctl_dump_phys_mem(struct file *file,
- unsigned int cmd,
- unsigned long arg)
-{
- VCHIQ_DUMP_MEM_T *args;
- struct vchiq_dump_mem32 args32;
-
- args = compat_alloc_user_space(sizeof(*args));
- if (!args)
- return -EFAULT;
-
- if (copy_from_user(&args32,
- (struct vchiq_dump_mem32 *)arg,
- sizeof(args32)))
- return -EFAULT;
-
- if (put_user(compat_ptr(args32.virt_addr), &args->virt_addr) ||
- put_user(args32.num_bytes, &args->num_bytes))
- return -EFAULT;
-
- return vchiq_ioctl(file, VCHIQ_IOC_DUMP_PHYS_MEM, (unsigned long)args);
-}
-
-#endif
-
static long
vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
@@ -1707,10 +1650,6 @@ vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return vchiq_compat_ioctl_dequeue_message(file, cmd, arg);
case VCHIQ_IOC_GET_CONFIG32:
return vchiq_compat_ioctl_get_config(file, cmd, arg);
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
- case VCHIQ_IOC_DUMP_PHYS_MEM32:
- return vchiq_compat_ioctl_dump_phys_mem(file, cmd, arg);
-#endif
default:
return vchiq_ioctl(file, cmd, arg);
}
@@ -2050,98 +1989,6 @@ vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
/****************************************************************************
*
-* dump_user_mem
-*
-***************************************************************************/
-
-#if defined(CONFIG_BCM2835_VCHIQ_SUPPORT_MEMDUMP)
-
-static void
-dump_phys_mem(void *virt_addr, u32 num_bytes)
-{
- int rc;
- u8 *end_virt_addr = virt_addr + num_bytes;
- int num_pages;
- int offset;
- int end_offset;
- int page_idx;
- int prev_idx;
- struct page *page;
- struct page **pages;
- u8 *kmapped_virt_ptr;
-
- /* Align virt_addr and end_virt_addr to 16 byte boundaries. */
-
- virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
- end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
- ~0x0fuL);
-
- offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
- end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
-
- num_pages = DIV_ROUND_UP(offset + num_bytes, PAGE_SIZE);
-
- pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
- if (!pages) {
- vchiq_log_error(vchiq_arm_log_level,
- "Unable to allocation memory for %d pages\n",
- num_pages);
- return;
- }
-
- down_read(&current->mm->mmap_sem);
- rc = get_user_pages(
- (unsigned long)virt_addr, /* start */
- num_pages, /* len */
- 0, /* gup_flags */
- pages, /* pages (array of page pointers) */
- NULL); /* vmas */
- up_read(&current->mm->mmap_sem);
-
- prev_idx = -1;
- page = NULL;
-
- if (rc < 0) {
- vchiq_log_error(vchiq_arm_log_level,
- "Failed to get user pages: %d\n", rc);
- goto out;
- }
-
- while (offset < end_offset) {
- int page_offset = offset % PAGE_SIZE;
-
- page_idx = offset / PAGE_SIZE;
- if (page_idx != prev_idx) {
- if (page != NULL)
- kunmap(page);
- page = pages[page_idx];
- kmapped_virt_ptr = kmap(page);
- prev_idx = page_idx;
- }
-
- if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
- vchiq_log_dump_mem("ph",
- (u32)(unsigned long)&kmapped_virt_ptr[
- page_offset],
- &kmapped_virt_ptr[page_offset], 16);
-
- offset += 16;
- }
-
-out:
- if (page != NULL)
- kunmap(page);
-
- for (page_idx = 0; page_idx < num_pages; page_idx++)
- put_page(pages[page_idx]);
-
- kfree(pages);
-}
-
-#endif
-
-/****************************************************************************
-*
* vchiq_read
*
***************************************************************************/
@@ -2307,8 +2154,6 @@ exit:
return 0;
}
-
-
VCHIQ_STATUS_T
vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
{
@@ -2339,8 +2184,9 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
arm_state->suspend_timer_running = 0;
- setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
- (unsigned long)(state));
+ arm_state->state = state;
+ timer_setup(&arm_state->suspend_timer, suspend_timer_callback,
+ 0);
arm_state->first_connect = 0;
@@ -2469,7 +2315,6 @@ set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
}
}
-
/* should be called with the write lock held */
inline void
start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
@@ -2589,7 +2434,6 @@ vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
status = VCHIQ_SUCCESS;
-
switch (arm_state->vc_suspend_state) {
case VC_SUSPEND_REQUESTED:
vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
@@ -2654,7 +2498,6 @@ out:
return;
}
-
static void
output_timeout_error(VCHIQ_STATE_T *state)
{
@@ -2834,7 +2677,6 @@ out:
return;
}
-
int
vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
{
@@ -2996,7 +2838,6 @@ vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
"%s %s count %d, state count %d",
__func__, entity, *entity_uc, local_uc);
-
write_unlock_bh(&arm_state->susp_res_lock);
/* Completion is in a done state when we're not suspended, so this won't
@@ -3177,18 +3018,14 @@ vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
instance->trace = (trace != 0);
}
-static void suspend_timer_callback(unsigned long context)
+static void suspend_timer_callback(struct timer_list *t)
{
- VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
- VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
+ VCHIQ_ARM_STATE_T *arm_state = from_timer(arm_state, t, suspend_timer);
+ VCHIQ_STATE_T *state = arm_state->state;
- if (!arm_state)
- goto out;
vchiq_log_info(vchiq_susp_log_level,
"%s - suspend timer expired - check suspend", __func__);
vchiq_check_suspend(state);
-out:
- return;
}
VCHIQ_STATUS_T
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index bfbd81d9db33..40bb0c63b1a9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -42,7 +42,6 @@
#include "vchiq_core.h"
#include "vchiq_debugfs.h"
-
enum vc_suspend_status {
VC_SUSPEND_FORCE_CANCELED = -3, /* Force suspend canceled, too busy */
VC_SUSPEND_REJECTED = -2, /* Videocore rejected suspend request */
@@ -61,15 +60,12 @@ enum vc_resume_status {
VC_RESUME_RESUMED /* Videocore resumed successfully (active) */
};
-
enum USE_TYPE_E {
USE_TYPE_SERVICE,
USE_TYPE_SERVICE_NO_RESUME,
USE_TYPE_VCHIQ
};
-
-
typedef struct vchiq_arm_state_struct {
/* Keepalive-related data */
struct task_struct *ka_thread;
@@ -87,6 +83,7 @@ typedef struct vchiq_arm_state_struct {
unsigned int wake_address;
+ VCHIQ_STATE_T *state;
struct timer_list suspend_timer;
int suspend_timer_timeout;
int suspend_timer_running;
@@ -216,5 +213,4 @@ set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
extern void
start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state);
-
#endif /* VCHIQ_ARM_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 486be990d7fc..ecff92bae200 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -130,7 +130,6 @@ static const char *const conn_state_names[] = {
"RESUME_TIMEOUT"
};
-
static void
release_message_sync(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header);
@@ -2168,7 +2167,6 @@ slot_handler_func(void *v)
break;
}
-
}
DEBUG_TRACE(SLOT_HANDLER_LINE);
@@ -2177,7 +2175,6 @@ slot_handler_func(void *v)
return 0;
}
-
/* Called by the recycle thread */
static int
recycle_func(void *v)
@@ -2193,7 +2190,6 @@ recycle_func(void *v)
return 0;
}
-
/* Called by the sync thread */
static int
sync_func(void *v)
@@ -2301,7 +2297,6 @@ sync_func(void *v)
return 0;
}
-
static void
init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
{
@@ -2312,14 +2307,12 @@ init_bulk_queue(VCHIQ_BULK_QUEUE_T *queue)
queue->remove = 0;
}
-
inline const char *
get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
{
return conn_state_names[conn_state];
}
-
VCHIQ_SLOT_ZERO_T *
vchiq_init_slots(void *mem_base, int mem_size)
{
@@ -2958,8 +2951,7 @@ vchiq_close_service_internal(VCHIQ_SERVICE_T *service, int close_recvd)
case VCHIQ_SRVSTATE_OPENSYNC:
mutex_lock(&state->sync_mutex);
- /* Drop through */
-
+ /* fall through */
case VCHIQ_SRVSTATE_OPEN:
if (state->is_master || close_recvd) {
if (!do_abort_bulks(service))
@@ -3296,7 +3288,6 @@ vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
return status;
}
-
/* This function may be called by kernel threads or user threads.
* User threads may receive VCHIQ_RETRY to indicate that a signal has been
* received and the call should be retried after being returned to user
@@ -3876,7 +3867,6 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
vchiq_dump_platform_service_state(dump_context, service);
}
-
void
vchiq_loud_error_header(void)
{
@@ -3901,7 +3891,6 @@ vchiq_loud_error_footer(void)
"================");
}
-
VCHIQ_STATUS_T vchiq_send_remote_use(VCHIQ_STATE_T *state)
{
VCHIQ_STATUS_T status = VCHIQ_RETRY;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 9e164652548a..afc1d8144a84 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -702,7 +702,6 @@ vchiq_platform_handle_timeout(VCHIQ_STATE_T *state);
extern void
vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate);
-
extern void
vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
size_t numBytes);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 9367a9a5aa3c..766b4fe5f32c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -32,7 +32,6 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#include <linux/debugfs.h>
#include "vchiq_core.h"
#include "vchiq_arm.h"
@@ -52,7 +51,6 @@
#define VCHIQ_LOG_INFO_STR "info"
#define VCHIQ_LOG_TRACE_STR "trace"
-
/* Top-level debug info */
struct vchiq_debugfs_info {
/* Global 'vchiq' debugfs entry used by all instances */
@@ -316,7 +314,6 @@ void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance)
debugfs_remove_recursive(node->dentry);
}
-
int vchiq_debugfs_init(void)
{
BUG_ON(debugfs_info.vchiq_cfg_dir != NULL);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_genversion b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_genversion
index 9f5b6344b9b7..dd1f324a8654 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_genversion
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_genversion
@@ -1,4 +1,5 @@
#!/usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0
use strict;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 8af95fc361ed..d465e1cf5db9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -41,14 +41,14 @@
#define vchiq_status_to_vchi(status) ((int32_t)status)
-typedef struct {
+struct shim_service {
VCHIQ_SERVICE_HANDLE_T handle;
VCHIU_QUEUE_T queue;
VCHI_CALLBACK_T callback;
void *callback_param;
-} SHIM_SERVICE_T;
+};
/* ----------------------------------------------------------------------
* return pointer to the mphi message driver function table
@@ -84,7 +84,6 @@ VCHI_CONNECTION_T *vchi_create_connection(
* void **data,
* uint32_t *msg_size,
-
* VCHI_FLAGS_T flags
*
* Description: Routine to return a pointer to the current message (to allow in
@@ -99,7 +98,7 @@ int32_t vchi_msg_peek(VCHI_SERVICE_HANDLE_T handle,
uint32_t *msg_size,
VCHI_FLAGS_T flags)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_HEADER_T *header;
WARN_ON((flags != VCHI_FLAGS_NONE) &&
@@ -131,7 +130,7 @@ EXPORT_SYMBOL(vchi_msg_peek);
***********************************************************/
int32_t vchi_msg_remove(VCHI_SERVICE_HANDLE_T handle)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_HEADER_T *header;
header = vchiu_queue_pop(&service->queue);
@@ -163,7 +162,7 @@ int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
void *context,
uint32_t data_size)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_STATUS_T status;
while (1) {
@@ -262,7 +261,7 @@ int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
void *bulk_handle)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_BULK_MODE_T mode;
VCHIQ_STATUS_T status;
@@ -322,7 +321,7 @@ int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
void *bulk_handle)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_BULK_MODE_T mode;
VCHIQ_STATUS_T status;
@@ -384,7 +383,7 @@ int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
uint32_t *actual_msg_size,
VCHI_FLAGS_T flags)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_HEADER_T *header;
WARN_ON((flags != VCHI_FLAGS_NONE) &&
@@ -458,7 +457,7 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
VCHI_FLAGS_T flags,
VCHI_HELD_MSG_T *message_handle)
{
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_HEADER_T *header;
WARN_ON((flags != VCHI_FLAGS_NONE) &&
@@ -541,7 +540,6 @@ int32_t vchi_connect(VCHI_CONNECTION_T **connections,
}
EXPORT_SYMBOL(vchi_connect);
-
/***********************************************************
* Name: vchi_disconnect
*
@@ -561,7 +559,6 @@ int32_t vchi_disconnect(VCHI_INSTANCE_T instance_handle)
}
EXPORT_SYMBOL(vchi_disconnect);
-
/***********************************************************
* Name: vchi_service_open
* Name: vchi_service_create
@@ -579,8 +576,8 @@ EXPORT_SYMBOL(vchi_disconnect);
static VCHIQ_STATUS_T shim_callback(VCHIQ_REASON_T reason,
VCHIQ_HEADER_T *header, VCHIQ_SERVICE_HANDLE_T handle, void *bulk_user)
{
- SHIM_SERVICE_T *service =
- (SHIM_SERVICE_T *)VCHIQ_GET_SERVICE_USERDATA(handle);
+ struct shim_service *service =
+ (struct shim_service *)VCHIQ_GET_SERVICE_USERDATA(handle);
if (!service->callback)
goto release;
@@ -637,10 +634,10 @@ done:
return VCHIQ_SUCCESS;
}
-static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
+static struct shim_service *service_alloc(VCHIQ_INSTANCE_T instance,
SERVICE_CREATION_T *setup)
{
- SHIM_SERVICE_T *service = kzalloc(sizeof(SHIM_SERVICE_T), GFP_KERNEL);
+ struct shim_service *service = kzalloc(sizeof(struct shim_service), GFP_KERNEL);
(void)instance;
@@ -657,7 +654,7 @@ static SHIM_SERVICE_T *service_alloc(VCHIQ_INSTANCE_T instance,
return service;
}
-static void service_free(SHIM_SERVICE_T *service)
+static void service_free(struct shim_service *service)
{
if (service) {
vchiu_queue_delete(&service->queue);
@@ -670,7 +667,7 @@ int32_t vchi_service_open(VCHI_INSTANCE_T instance_handle,
VCHI_SERVICE_HANDLE_T *handle)
{
VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
- SHIM_SERVICE_T *service = service_alloc(instance, setup);
+ struct shim_service *service = service_alloc(instance, setup);
*handle = (VCHI_SERVICE_HANDLE_T)service;
@@ -703,7 +700,7 @@ int32_t vchi_service_create(VCHI_INSTANCE_T instance_handle,
VCHI_SERVICE_HANDLE_T *handle)
{
VCHIQ_INSTANCE_T instance = (VCHIQ_INSTANCE_T)instance_handle;
- SHIM_SERVICE_T *service = service_alloc(instance, setup);
+ struct shim_service *service = service_alloc(instance, setup);
*handle = (VCHI_SERVICE_HANDLE_T)service;
@@ -733,7 +730,7 @@ EXPORT_SYMBOL(vchi_service_create);
int32_t vchi_service_close(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service) {
VCHIQ_STATUS_T status = vchiq_close_service(service->handle);
@@ -751,7 +748,7 @@ EXPORT_SYMBOL(vchi_service_close);
int32_t vchi_service_destroy(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service) {
VCHIQ_STATUS_T status = vchiq_remove_service(service->handle);
@@ -772,7 +769,7 @@ int32_t vchi_service_set_option(const VCHI_SERVICE_HANDLE_T handle,
int value)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
VCHIQ_SERVICE_OPTION_T vchiq_option;
switch (option) {
@@ -801,7 +798,7 @@ EXPORT_SYMBOL(vchi_service_set_option);
int32_t vchi_get_peer_version(const VCHI_SERVICE_HANDLE_T handle, short *peer_version)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service)
{
@@ -828,7 +825,7 @@ int32_t vchi_service_use(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service)
ret = vchiq_status_to_vchi(vchiq_use_service(service->handle));
return ret;
@@ -849,7 +846,7 @@ int32_t vchi_service_release(const VCHI_SERVICE_HANDLE_T handle)
{
int32_t ret = -1;
- SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
+ struct shim_service *service = (struct shim_service *)handle;
if (service)
ret = vchiq_status_to_vchi(
vchiq_release_service(service->handle));
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme/devices/Kconfig
index 1d2ff0cc41f1..c548dd8c91e1 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme/devices/Kconfig
@@ -10,16 +10,3 @@ config VME_USER
To compile this driver as a module, choose M here. The module will
be called vme_user. If unsure, say N.
-
-config VME_PIO2
- tristate "GE PIO2 VME"
- depends on STAGING && GPIOLIB
- help
- Say Y here to include support for the GE PIO2. The PIO2 is a 6U VME
- slave card, implementing 32 solid-state relay switched IO lines, in
- 4 groups of 8. Each bank of IO lines is built to function as input,
- output or both depending on the variant of the card.
-
- To compile this driver as a module, choose M here. The module will
- be called vme_pio2. If unsure, say N.
-
diff --git a/drivers/staging/vme/devices/Makefile b/drivers/staging/vme/devices/Makefile
index 172512cb5dbf..459742a75283 100644
--- a/drivers/staging/vme/devices/Makefile
+++ b/drivers/staging/vme/devices/Makefile
@@ -3,6 +3,3 @@
#
obj-$(CONFIG_VME_USER) += vme_user.o
-
-vme_pio2-objs := vme_pio2_cntr.o vme_pio2_gpio.o vme_pio2_core.o
-obj-$(CONFIG_VME_PIO2) += vme_pio2.o
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
deleted file mode 100644
index ac4a4bad4091..000000000000
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ /dev/null
@@ -1,244 +0,0 @@
-#ifndef _VME_PIO2_H_
-#define _VME_PIO2_H_
-
-#define PIO2_CARDS_MAX 32
-
-#define PIO2_VARIANT_LENGTH 5
-
-#define PIO2_NUM_CHANNELS 32
-#define PIO2_NUM_IRQS 11
-#define PIO2_NUM_CNTRS 6
-
-#define PIO2_REGS_SIZE 0x40
-
-#define PIO2_REGS_DATA0 0x0
-#define PIO2_REGS_DATA1 0x1
-#define PIO2_REGS_DATA2 0x2
-#define PIO2_REGS_DATA3 0x3
-
-static const int PIO2_REGS_DATA[4] = { PIO2_REGS_DATA0, PIO2_REGS_DATA1,
- PIO2_REGS_DATA2, PIO2_REGS_DATA3 };
-
-#define PIO2_REGS_INT_STAT0 0x8
-#define PIO2_REGS_INT_STAT1 0x9
-#define PIO2_REGS_INT_STAT2 0xa
-#define PIO2_REGS_INT_STAT3 0xb
-
-static const int PIO2_REGS_INT_STAT[4] = { PIO2_REGS_INT_STAT0,
- PIO2_REGS_INT_STAT1,
- PIO2_REGS_INT_STAT2,
- PIO2_REGS_INT_STAT3 };
-
-#define PIO2_REGS_INT_STAT_CNTR 0xc
-#define PIO2_REGS_INT_MASK0 0x10
-#define PIO2_REGS_INT_MASK1 0x11
-#define PIO2_REGS_INT_MASK2 0x12
-#define PIO2_REGS_INT_MASK3 0x13
-#define PIO2_REGS_INT_MASK4 0x14
-#define PIO2_REGS_INT_MASK5 0x15
-#define PIO2_REGS_INT_MASK6 0x16
-#define PIO2_REGS_INT_MASK7 0x17
-
-static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
- PIO2_REGS_INT_MASK1,
- PIO2_REGS_INT_MASK2,
- PIO2_REGS_INT_MASK3,
- PIO2_REGS_INT_MASK4,
- PIO2_REGS_INT_MASK5,
- PIO2_REGS_INT_MASK6,
- PIO2_REGS_INT_MASK7 };
-
-#define PIO2_REGS_CTRL 0x18
-#define PIO2_REGS_VME_VECTOR 0x19
-#define PIO2_REGS_CNTR0 0x20
-#define PIO2_REGS_CNTR1 0x22
-#define PIO2_REGS_CNTR2 0x24
-#define PIO2_REGS_CTRL_WRD0 0x26
-#define PIO2_REGS_CNTR3 0x28
-#define PIO2_REGS_CNTR4 0x2a
-#define PIO2_REGS_CNTR5 0x2c
-#define PIO2_REGS_CTRL_WRD1 0x2e
-
-#define PIO2_REGS_ID 0x30
-
-/* PIO2_REGS_DATAx (0x0 - 0x3) */
-
-static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3 };
-
-#define PIO2_CHANNEL0_BIT BIT(0)
-#define PIO2_CHANNEL1_BIT BIT(1)
-#define PIO2_CHANNEL2_BIT BIT(2)
-#define PIO2_CHANNEL3_BIT BIT(3)
-#define PIO2_CHANNEL4_BIT BIT(4)
-#define PIO2_CHANNEL5_BIT BIT(5)
-#define PIO2_CHANNEL6_BIT BIT(6)
-#define PIO2_CHANNEL7_BIT BIT(7)
-#define PIO2_CHANNEL8_BIT BIT(0)
-#define PIO2_CHANNEL9_BIT BIT(1)
-#define PIO2_CHANNEL10_BIT BIT(2)
-#define PIO2_CHANNEL11_BIT BIT(3)
-#define PIO2_CHANNEL12_BIT BIT(4)
-#define PIO2_CHANNEL13_BIT BIT(5)
-#define PIO2_CHANNEL14_BIT BIT(6)
-#define PIO2_CHANNEL15_BIT BIT(7)
-#define PIO2_CHANNEL16_BIT BIT(0)
-#define PIO2_CHANNEL17_BIT BIT(1)
-#define PIO2_CHANNEL18_BIT BIT(2)
-#define PIO2_CHANNEL19_BIT BIT(3)
-#define PIO2_CHANNEL20_BIT BIT(4)
-#define PIO2_CHANNEL21_BIT BIT(5)
-#define PIO2_CHANNEL22_BIT BIT(6)
-#define PIO2_CHANNEL23_BIT BIT(7)
-#define PIO2_CHANNEL24_BIT BIT(0)
-#define PIO2_CHANNEL25_BIT BIT(1)
-#define PIO2_CHANNEL26_BIT BIT(2)
-#define PIO2_CHANNEL27_BIT BIT(3)
-#define PIO2_CHANNEL28_BIT BIT(4)
-#define PIO2_CHANNEL29_BIT BIT(5)
-#define PIO2_CHANNEL30_BIT BIT(6)
-#define PIO2_CHANNEL31_BIT BIT(7)
-
-static const int PIO2_CHANNEL_BIT[32] = { PIO2_CHANNEL0_BIT, PIO2_CHANNEL1_BIT,
- PIO2_CHANNEL2_BIT, PIO2_CHANNEL3_BIT,
- PIO2_CHANNEL4_BIT, PIO2_CHANNEL5_BIT,
- PIO2_CHANNEL6_BIT, PIO2_CHANNEL7_BIT,
- PIO2_CHANNEL8_BIT, PIO2_CHANNEL9_BIT,
- PIO2_CHANNEL10_BIT, PIO2_CHANNEL11_BIT,
- PIO2_CHANNEL12_BIT, PIO2_CHANNEL13_BIT,
- PIO2_CHANNEL14_BIT, PIO2_CHANNEL15_BIT,
- PIO2_CHANNEL16_BIT, PIO2_CHANNEL17_BIT,
- PIO2_CHANNEL18_BIT, PIO2_CHANNEL19_BIT,
- PIO2_CHANNEL20_BIT, PIO2_CHANNEL21_BIT,
- PIO2_CHANNEL22_BIT, PIO2_CHANNEL23_BIT,
- PIO2_CHANNEL24_BIT, PIO2_CHANNEL25_BIT,
- PIO2_CHANNEL26_BIT, PIO2_CHANNEL27_BIT,
- PIO2_CHANNEL28_BIT, PIO2_CHANNEL29_BIT,
- PIO2_CHANNEL30_BIT, PIO2_CHANNEL31_BIT
- };
-
-/* PIO2_REGS_INT_STAT_CNTR (0xc) */
-#define PIO2_COUNTER0 BIT(0)
-#define PIO2_COUNTER1 BIT(1)
-#define PIO2_COUNTER2 BIT(2)
-#define PIO2_COUNTER3 BIT(3)
-#define PIO2_COUNTER4 BIT(4)
-#define PIO2_COUNTER5 BIT(5)
-
-static const int PIO2_COUNTER[6] = { PIO2_COUNTER0, PIO2_COUNTER1,
- PIO2_COUNTER2, PIO2_COUNTER3,
- PIO2_COUNTER4, PIO2_COUNTER5 };
-
-/* PIO2_REGS_CTRL (0x18) */
-#define PIO2_VME_INT_MASK 0x7
-#define PIO2_LED BIT(6)
-#define PIO2_LOOP BIT(7)
-
-/* PIO2_REGS_VME_VECTOR (0x19) */
-#define PIO2_VME_VECTOR_SPUR 0x0
-#define PIO2_VME_VECTOR_BANK0 0x1
-#define PIO2_VME_VECTOR_BANK1 0x2
-#define PIO2_VME_VECTOR_BANK2 0x3
-#define PIO2_VME_VECTOR_BANK3 0x4
-#define PIO2_VME_VECTOR_CNTR0 0x5
-#define PIO2_VME_VECTOR_CNTR1 0x6
-#define PIO2_VME_VECTOR_CNTR2 0x7
-#define PIO2_VME_VECTOR_CNTR3 0x8
-#define PIO2_VME_VECTOR_CNTR4 0x9
-#define PIO2_VME_VECTOR_CNTR5 0xa
-
-#define PIO2_VME_VECTOR_MASK 0xf0
-
-static const int PIO2_VECTOR_BANK[4] = { PIO2_VME_VECTOR_BANK0,
- PIO2_VME_VECTOR_BANK1,
- PIO2_VME_VECTOR_BANK2,
- PIO2_VME_VECTOR_BANK3 };
-
-static const int PIO2_VECTOR_CNTR[6] = { PIO2_VME_VECTOR_CNTR0,
- PIO2_VME_VECTOR_CNTR1,
- PIO2_VME_VECTOR_CNTR2,
- PIO2_VME_VECTOR_CNTR3,
- PIO2_VME_VECTOR_CNTR4,
- PIO2_VME_VECTOR_CNTR5 };
-
-/* PIO2_REGS_CNTRx (0x20 - 0x24 & 0x28 - 0x2c) */
-
-static const int PIO2_CNTR_DATA[6] = { PIO2_REGS_CNTR0, PIO2_REGS_CNTR1,
- PIO2_REGS_CNTR2, PIO2_REGS_CNTR3,
- PIO2_REGS_CNTR4, PIO2_REGS_CNTR5 };
-
-/* PIO2_REGS_CTRL_WRDx (0x26 & 0x2e) */
-
-static const int PIO2_CNTR_CTRL[6] = { PIO2_REGS_CTRL_WRD0,
- PIO2_REGS_CTRL_WRD0,
- PIO2_REGS_CTRL_WRD0,
- PIO2_REGS_CTRL_WRD1,
- PIO2_REGS_CTRL_WRD1,
- PIO2_REGS_CTRL_WRD1 };
-
-#define PIO2_CNTR_SC_DEV0 0
-#define PIO2_CNTR_SC_DEV1 (1 << 6)
-#define PIO2_CNTR_SC_DEV2 (2 << 6)
-#define PIO2_CNTR_SC_RDBACK (3 << 6)
-
-static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1,
- PIO2_CNTR_SC_DEV2, PIO2_CNTR_SC_DEV0,
- PIO2_CNTR_SC_DEV1, PIO2_CNTR_SC_DEV2 };
-
-#define PIO2_CNTR_RW_LATCH 0
-#define PIO2_CNTR_RW_LSB (1 << 4)
-#define PIO2_CNTR_RW_MSB (2 << 4)
-#define PIO2_CNTR_RW_BOTH (3 << 4)
-
-#define PIO2_CNTR_MODE0 0
-#define PIO2_CNTR_MODE1 (1 << 1)
-#define PIO2_CNTR_MODE2 (2 << 1)
-#define PIO2_CNTR_MODE3 (3 << 1)
-#define PIO2_CNTR_MODE4 (4 << 1)
-#define PIO2_CNTR_MODE5 (5 << 1)
-
-#define PIO2_CNTR_BCD 1
-
-enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH };
-enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 };
-
-/* Bank configuration structure */
-struct pio2_io_bank {
- enum pio2_bank_config config;
- u8 value;
- enum pio2_int_config irq[8];
-};
-
-/* Counter configuration structure */
-struct pio2_cntr {
- int mode;
- int count;
-};
-
-struct pio2_card {
- int id;
- int bus;
- long base;
- int irq_vector;
- int irq_level;
- char variant[6];
- int led;
-
- struct vme_dev *vdev;
- struct vme_resource *window;
-
- struct gpio_chip gc;
- struct pio2_io_bank bank[4];
-
- struct pio2_cntr cntr[6];
-};
-
-int pio2_cntr_reset(struct pio2_card *card);
-
-int pio2_gpio_reset(struct pio2_card *card);
-int pio2_gpio_init(struct pio2_card *card);
-void pio2_gpio_exit(struct pio2_card *card);
-
-#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_cntr.c b/drivers/staging/vme/devices/vme_pio2_cntr.c
deleted file mode 100644
index 486c30c4956f..000000000000
--- a/drivers/staging/vme/devices/vme_pio2_cntr.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * GE PIO2 Counter Driver
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * The PIO-2 has 6 counters, currently this code just disables the interrupts
- * and leaves them alone.
- *
- */
-
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/gpio.h>
-#include <linux/vme.h>
-
-#include "vme_pio2.h"
-
-static int pio2_cntr_irq_set(struct pio2_card *card, int id)
-{
- int retval;
- u8 data;
-
- data = PIO2_CNTR_SC_DEV[id] | PIO2_CNTR_RW_BOTH | card->cntr[id].mode;
- retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_CTRL[id]);
- if (retval < 0)
- return retval;
-
- data = card->cntr[id].count & 0xFF;
- retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
- if (retval < 0)
- return retval;
-
- data = (card->cntr[id].count >> 8) & 0xFF;
- retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
- if (retval < 0)
- return retval;
-
- return 0;
-}
-
-int pio2_cntr_reset(struct pio2_card *card)
-{
- int i, retval = 0;
- u8 reg;
-
- /* Clear down all timers */
- for (i = 0; i < 6; i++) {
- card->cntr[i].mode = PIO2_CNTR_MODE5;
- card->cntr[i].count = 0;
- retval = pio2_cntr_irq_set(card, i);
- if (retval < 0)
- return retval;
- }
-
- /* Ensure all counter interrupts are cleared */
- do {
- retval = vme_master_read(card->window, &reg, 1,
- PIO2_REGS_INT_STAT_CNTR);
- if (retval < 0)
- return retval;
- } while (reg != 0);
-
- return retval;
-}
-
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
deleted file mode 100644
index 367535b4b77f..000000000000
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ /dev/null
@@ -1,493 +0,0 @@
-/*
- * GE PIO2 6U VME I/O Driver
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/ctype.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/vme.h>
-
-#include "vme_pio2.h"
-
-static const char driver_name[] = "pio2";
-
-static int bus[PIO2_CARDS_MAX];
-static int bus_num;
-static long base[PIO2_CARDS_MAX];
-static int base_num;
-static int vector[PIO2_CARDS_MAX];
-static int vector_num;
-static int level[PIO2_CARDS_MAX];
-static int level_num;
-static char *variant[PIO2_CARDS_MAX];
-static int variant_num;
-
-static bool loopback;
-
-static int pio2_match(struct vme_dev *);
-static int pio2_probe(struct vme_dev *);
-static int pio2_remove(struct vme_dev *);
-
-static int pio2_get_led(struct pio2_card *card)
-{
- /* Can't read hardware, state saved in structure */
- return card->led;
-}
-
-static int pio2_set_led(struct pio2_card *card, int state)
-{
- u8 reg;
- int retval;
-
- reg = card->irq_level;
-
- /* Register state inverse of led state */
- if (!state)
- reg |= PIO2_LED;
-
- if (loopback)
- reg |= PIO2_LOOP;
-
- retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
- if (retval < 0)
- return retval;
-
- card->led = state ? 1 : 0;
-
- return 0;
-}
-
-static void pio2_int(int level, int vector, void *ptr)
-{
- int vec, i, channel, retval;
- u8 reg;
- struct pio2_card *card = ptr;
-
- vec = vector & ~PIO2_VME_VECTOR_MASK;
-
- switch (vec) {
- case 0:
- dev_warn(&card->vdev->dev, "Spurious Interrupt\n");
- break;
- case 1:
- case 2:
- case 3:
- case 4:
- /* Channels 0 to 7 */
- retval = vme_master_read(card->window, &reg, 1,
- PIO2_REGS_INT_STAT[vec - 1]);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to read IRQ status register\n");
- return;
- }
- for (i = 0; i < 8; i++) {
- channel = ((vec - 1) * 8) + i;
- if (reg & PIO2_CHANNEL_BIT[channel])
- dev_info(&card->vdev->dev,
- "Interrupt on I/O channel %d\n",
- channel);
- }
- break;
- case 5:
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- /* Counters are dealt with by their own handler */
- dev_err(&card->vdev->dev,
- "Counter interrupt\n");
- break;
- }
-}
-
-/*
- * We return whether this has been successful - this is used in the probe to
- * ensure we have a valid card.
- */
-static int pio2_reset_card(struct pio2_card *card)
-{
- int retval = 0;
- u8 data = 0;
-
- /* Clear main register*/
- retval = vme_master_write(card->window, &data, 1, PIO2_REGS_CTRL);
- if (retval < 0)
- return retval;
-
- /* Clear VME vector */
- retval = vme_master_write(card->window, &data, 1, PIO2_REGS_VME_VECTOR);
- if (retval < 0)
- return retval;
-
- /* Reset GPIO */
- retval = pio2_gpio_reset(card);
- if (retval < 0)
- return retval;
-
- /* Reset counters */
- retval = pio2_cntr_reset(card);
- if (retval < 0)
- return retval;
-
- return 0;
-}
-
-static struct vme_driver pio2_driver = {
- .name = driver_name,
- .match = pio2_match,
- .probe = pio2_probe,
- .remove = pio2_remove,
-};
-
-static int __init pio2_init(void)
-{
- if (bus_num == 0) {
- pr_err("No cards, skipping registration\n");
- return -ENODEV;
- }
-
- if (bus_num > PIO2_CARDS_MAX) {
- pr_err("Driver only able to handle %d PIO2 Cards\n",
- PIO2_CARDS_MAX);
- bus_num = PIO2_CARDS_MAX;
- }
-
- /* Register the PIO2 driver */
- return vme_register_driver(&pio2_driver, bus_num);
-}
-
-static int pio2_match(struct vme_dev *vdev)
-{
- if (vdev->num >= bus_num) {
- dev_err(&vdev->dev,
- "The enumeration of the VMEbus to which the board is connected must be specified\n");
- return 0;
- }
-
- if (vdev->num >= base_num) {
- dev_err(&vdev->dev,
- "The VME address for the cards registers must be specified\n");
- return 0;
- }
-
- if (vdev->num >= vector_num) {
- dev_err(&vdev->dev,
- "The IRQ vector used by the card must be specified\n");
- return 0;
- }
-
- if (vdev->num >= level_num) {
- dev_err(&vdev->dev,
- "The IRQ level used by the card must be specified\n");
- return 0;
- }
-
- if (vdev->num >= variant_num) {
- dev_err(&vdev->dev, "The variant of the card must be specified\n");
- return 0;
- }
-
- return 1;
-}
-
-static int pio2_probe(struct vme_dev *vdev)
-{
- struct pio2_card *card;
- int retval;
- int i;
- u8 reg;
- int vec;
-
- card = devm_kzalloc(&vdev->dev, sizeof(*card), GFP_KERNEL);
- if (!card)
- return -ENOMEM;
-
- card->id = vdev->num;
- card->bus = bus[card->id];
- card->base = base[card->id];
- card->irq_vector = vector[card->id];
- card->irq_level = level[card->id] & PIO2_VME_INT_MASK;
- strncpy(card->variant, variant[card->id], PIO2_VARIANT_LENGTH);
- card->vdev = vdev;
-
- for (i = 0; i < PIO2_VARIANT_LENGTH; i++) {
- if (!isdigit(card->variant[i])) {
- dev_err(&card->vdev->dev, "Variant invalid\n");
- return -EINVAL;
- }
- }
-
- /*
- * Bottom 4 bits of VME interrupt vector used to determine source,
- * provided vector should only use upper 4 bits.
- */
- if (card->irq_vector & ~PIO2_VME_VECTOR_MASK) {
- dev_err(&card->vdev->dev,
- "Invalid VME IRQ Vector, vector must not use lower 4 bits\n");
- return -EINVAL;
- }
-
- /*
- * There is no way to determine the build variant or whether each bank
- * is input, output or both at run time. The inputs are also inverted
- * if configured as both.
- *
- * We pass in the board variant and use that to determine the
- * configuration of the banks.
- */
- for (i = 1; i < PIO2_VARIANT_LENGTH; i++) {
- switch (card->variant[i]) {
- case '0':
- card->bank[i - 1].config = NOFIT;
- break;
- case '1':
- case '2':
- case '3':
- case '4':
- card->bank[i - 1].config = INPUT;
- break;
- case '5':
- card->bank[i - 1].config = OUTPUT;
- break;
- case '6':
- case '7':
- case '8':
- case '9':
- card->bank[i - 1].config = BOTH;
- break;
- }
- }
-
- /* Get a master window and position over regs */
- card->window = vme_master_request(vdev, VME_A24, VME_SCT, VME_D16);
- if (!card->window) {
- dev_err(&card->vdev->dev,
- "Unable to assign VME master resource\n");
- return -EIO;
- }
-
- retval = vme_master_set(card->window, 1, card->base, 0x10000, VME_A24,
- VME_SCT | VME_USER | VME_DATA, VME_D16);
- if (retval) {
- dev_err(&card->vdev->dev,
- "Unable to configure VME master resource\n");
- goto err_set;
- }
-
- /*
- * There is also no obvious register which we can probe to determine
- * whether the provided base is valid. If we can read the "ID Register"
- * offset and the reset function doesn't error, assume we have a valid
- * location.
- */
- retval = vme_master_read(card->window, &reg, 1, PIO2_REGS_ID);
- if (retval < 0) {
- dev_err(&card->vdev->dev, "Unable to read from device\n");
- goto err_read;
- }
-
- dev_dbg(&card->vdev->dev, "ID Register:%x\n", reg);
-
- /*
- * Ensure all the I/O is cleared. We can't read back the states, so
- * this is the only method we have to ensure that the I/O is in a known
- * state.
- */
- retval = pio2_reset_card(card);
- if (retval) {
- dev_err(&card->vdev->dev,
- "Failed to reset card, is location valid?\n");
- retval = -ENODEV;
- goto err_reset;
- }
-
- /* Configure VME Interrupts */
- reg = card->irq_level;
- if (pio2_get_led(card))
- reg |= PIO2_LED;
- if (loopback)
- reg |= PIO2_LOOP;
- retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
- if (retval < 0)
- return retval;
-
- /* Set VME vector */
- retval = vme_master_write(card->window, &card->irq_vector, 1,
- PIO2_REGS_VME_VECTOR);
- if (retval < 0)
- return retval;
-
- /* Attach spurious interrupt handler. */
- vec = card->irq_vector | PIO2_VME_VECTOR_SPUR;
-
- retval = vme_irq_request(vdev, card->irq_level, vec,
- &pio2_int, card);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
- vec, card->irq_level);
- goto err_irq;
- }
-
- /* Attach GPIO interrupt handlers. */
- for (i = 0; i < 4; i++) {
- vec = card->irq_vector | PIO2_VECTOR_BANK[i];
-
- retval = vme_irq_request(vdev, card->irq_level, vec,
- &pio2_int, card);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
- vec, card->irq_level);
- goto err_gpio_irq;
- }
- }
-
- /* Attach counter interrupt handlers. */
- for (i = 0; i < 6; i++) {
- vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
-
- retval = vme_irq_request(vdev, card->irq_level, vec,
- &pio2_int, card);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
- vec, card->irq_level);
- goto err_cntr_irq;
- }
- }
-
- /* Register IO */
- retval = pio2_gpio_init(card);
- if (retval < 0) {
- dev_err(&card->vdev->dev,
- "Unable to register with GPIO framework\n");
- goto err_gpio;
- }
-
- /* Set LED - This also sets interrupt level */
- retval = pio2_set_led(card, 0);
- if (retval < 0) {
- dev_err(&card->vdev->dev, "Unable to set LED\n");
- goto err_led;
- }
-
- dev_set_drvdata(&card->vdev->dev, card);
-
- dev_info(&card->vdev->dev,
- "PIO2 (variant %s) configured at 0x%lx\n", card->variant,
- card->base);
-
- return 0;
-
-err_led:
- pio2_gpio_exit(card);
-err_gpio:
- i = 6;
-err_cntr_irq:
- while (i > 0) {
- i--;
- vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
- vme_irq_free(vdev, card->irq_level, vec);
- }
-
- i = 4;
-err_gpio_irq:
- while (i > 0) {
- i--;
- vec = card->irq_vector | PIO2_VECTOR_BANK[i];
- vme_irq_free(vdev, card->irq_level, vec);
- }
-
- vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
- vme_irq_free(vdev, card->irq_level, vec);
-err_irq:
- pio2_reset_card(card);
-err_reset:
-err_read:
- vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
-err_set:
- vme_master_free(card->window);
- return retval;
-}
-
-static int pio2_remove(struct vme_dev *vdev)
-{
- int vec;
- int i;
-
- struct pio2_card *card = dev_get_drvdata(&vdev->dev);
-
- pio2_gpio_exit(card);
-
- for (i = 0; i < 6; i++) {
- vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
- vme_irq_free(vdev, card->irq_level, vec);
- }
-
- for (i = 0; i < 4; i++) {
- vec = card->irq_vector | PIO2_VECTOR_BANK[i];
- vme_irq_free(vdev, card->irq_level, vec);
- }
-
- vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
- vme_irq_free(vdev, card->irq_level, vec);
-
- pio2_reset_card(card);
-
- vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
-
- vme_master_free(card->window);
-
- return 0;
-}
-
-static void __exit pio2_exit(void)
-{
- vme_unregister_driver(&pio2_driver);
-}
-
-/* These are required for each board */
-MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected");
-module_param_hw_array(bus, int, other, &bus_num, 0444);
-
-MODULE_PARM_DESC(base, "Base VME address for PIO2 Registers");
-module_param_hw_array(base, long, other, &base_num, 0444);
-
-MODULE_PARM_DESC(vector, "VME IRQ Vector (Lower 4 bits masked)");
-module_param_hw_array(vector, int, other, &vector_num, 0444);
-
-MODULE_PARM_DESC(level, "VME IRQ Level");
-module_param_hw_array(level, int, other, &level_num, 0444);
-
-MODULE_PARM_DESC(variant, "Last 4 characters of PIO2 board variant");
-module_param_array(variant, charp, &variant_num, 0444);
-
-/* This is for debugging */
-MODULE_PARM_DESC(loopback, "Enable loopback mode on all cards");
-module_param(loopback, bool, 0444);
-
-MODULE_DESCRIPTION("GE PIO2 6U VME I/O Driver");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
-MODULE_LICENSE("GPL");
-
-module_init(pio2_init);
-module_exit(pio2_exit);
-
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
deleted file mode 100644
index ba9fe3bc2642..000000000000
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * GE PIO2 GPIO Driver
- *
- * Author: Martyn Welch <martyn.welch@ge.com>
- * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/ctype.h>
-#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <linux/vme.h>
-
-#include "vme_pio2.h"
-
-static const char driver_name[] = "pio2_gpio";
-
-static int pio2_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- u8 reg;
- int retval;
- struct pio2_card *card = gpiochip_get_data(chip);
-
- if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
- (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
- dev_err(&card->vdev->dev, "Channel not available as input\n");
- return 0;
- }
-
- retval = vme_master_read(card->window, &reg, 1,
- PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
- if (retval < 0) {
- dev_err(&card->vdev->dev, "Unable to read from GPIO\n");
- return 0;
- }
-
- /*
- * Remember, input on channels configured as both input and output
- * are inverted!
- */
- if (reg & PIO2_CHANNEL_BIT[offset]) {
- if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
- return 0;
-
- return 1;
- }
-
- if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
- return 1;
-
- return 0;
-}
-
-static void pio2_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- u8 reg;
- int retval;
- struct pio2_card *card = gpiochip_get_data(chip);
-
- if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
- (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
- dev_err(&card->vdev->dev, "Channel not available as output\n");
- return;
- }
-
- if (value)
- reg = card->bank[PIO2_CHANNEL_BANK[offset]].value |
- PIO2_CHANNEL_BIT[offset];
- else
- reg = card->bank[PIO2_CHANNEL_BANK[offset]].value &
- ~PIO2_CHANNEL_BIT[offset];
-
- retval = vme_master_write(card->window, &reg, 1,
- PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
- if (retval < 0) {
- dev_err(&card->vdev->dev, "Unable to write to GPIO\n");
- return;
- }
-
- card->bank[PIO2_CHANNEL_BANK[offset]].value = reg;
-}
-
-/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
-{
- int data;
- struct pio2_card *card = gpiochip_get_data(chip);
-
- if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
- (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
- dev_err(&card->vdev->dev,
- "Channel directionality not configurable at runtime\n");
-
- data = -EINVAL;
- } else {
- data = 0;
- }
-
- return data;
-}
-
-/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_out(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- int data;
- struct pio2_card *card = gpiochip_get_data(chip);
-
- if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
- (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
- dev_err(&card->vdev->dev,
- "Channel directionality not configurable at runtime\n");
-
- data = -EINVAL;
- } else {
- data = 0;
- }
-
- return data;
-}
-
-/*
- * We return whether this has been successful - this is used in the probe to
- * ensure we have a valid card.
- */
-int pio2_gpio_reset(struct pio2_card *card)
-{
- int retval = 0;
- int i, j;
-
- u8 data = 0;
-
- /* Zero output registers */
- for (i = 0; i < 4; i++) {
- retval = vme_master_write(card->window, &data, 1,
- PIO2_REGS_DATA[i]);
- if (retval < 0)
- return retval;
- card->bank[i].value = 0;
- }
-
- /* Set input interrupt masks */
- for (i = 0; i < 4; i++) {
- retval = vme_master_write(card->window, &data, 1,
- PIO2_REGS_INT_MASK[i * 2]);
- if (retval < 0)
- return retval;
-
- retval = vme_master_write(card->window, &data, 1,
- PIO2_REGS_INT_MASK[(i * 2) + 1]);
- if (retval < 0)
- return retval;
-
- for (j = 0; j < 8; j++)
- card->bank[i].irq[j] = NONE;
- }
-
- /* Ensure all I/O interrupts are cleared */
- for (i = 0; i < 4; i++) {
- do {
- retval = vme_master_read(card->window, &data, 1,
- PIO2_REGS_INT_STAT[i]);
- if (retval < 0)
- return retval;
- } while (data != 0);
- }
-
- return 0;
-}
-
-int pio2_gpio_init(struct pio2_card *card)
-{
- int retval = 0;
- char *label;
-
- label = kasprintf(GFP_KERNEL,
- "%s@%s", driver_name, dev_name(&card->vdev->dev));
- if (!label)
- return -ENOMEM;
-
- card->gc.label = label;
-
- card->gc.ngpio = PIO2_NUM_CHANNELS;
- /* Dynamic allocation of base */
- card->gc.base = -1;
- /* Setup pointers to chip functions */
- card->gc.direction_input = pio2_gpio_dir_in;
- card->gc.direction_output = pio2_gpio_dir_out;
- card->gc.get = pio2_gpio_get;
- card->gc.set = pio2_gpio_set;
-
- /* This function adds a memory mapped GPIO chip */
- retval = gpiochip_add_data(&card->gc, card);
- if (retval) {
- dev_err(&card->vdev->dev, "Unable to register GPIO\n");
- kfree(card->gc.label);
- }
-
- return retval;
-};
-
-void pio2_gpio_exit(struct pio2_card *card)
-{
- const char *label = card->gc.label;
-
- gpiochip_remove(&card->gc);
- kfree(label);
-}
-
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index a6cb75686fa4..19ecb05781cc 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VME_USER_H_
#define _VME_USER_H_
diff --git a/drivers/staging/vt6655/Makefile b/drivers/staging/vt6655/Makefile
index d55c3baade53..a151f30fc46f 100644
--- a/drivers/staging/vt6655/Makefile
+++ b/drivers/staging/vt6655/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# TODO: all of these should be removed
ccflags-y := -DLINUX -D__KERNEL__ -D__NO_VERSION__
ccflags-y += -DHOSTAP
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 9fcf2e223f71..1123b4f1e1d6 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1693,10 +1693,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
MACbShutdown(priv);
pci_disable_device(pcid);
- pci_set_power_state(pcid, pci_choose_state(pcid, state));
spin_unlock_irqrestore(&priv->lock, flags);
+ pci_set_power_state(pcid, pci_choose_state(pcid, state));
+
return 0;
}
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index d7ede73a1a01..d891993b20cf 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -56,17 +56,19 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
}
switch (key_type) {
- /* fallthrough */
case VNT_KEY_DEFAULTKEY:
/* default key last entry */
entry = MAX_KEY_TABLE - 1;
key->hw_key_idx = entry;
+ /* fall through */
case VNT_KEY_ALLGROUP:
key_mode |= VNT_KEY_ALLGROUP;
if (onfly_latch)
key_mode |= VNT_KEY_ONFLY_ALL;
+ /* fall through */
case VNT_KEY_GROUP_ADDRESS:
key_mode |= mode;
+ /* fall through */
case VNT_KEY_GROUP:
key_mode |= (mode << 4);
key_mode |= VNT_KEY_GROUP;
diff --git a/drivers/staging/vt6656/Makefile b/drivers/staging/vt6656/Makefile
index 3dbe1f89dd25..b64c0d87f612 100644
--- a/drivers/staging/vt6656/Makefile
+++ b/drivers/staging/vt6656/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# TODO: all of these should be removed
ccflags-y := -DLINUX -D__KERNEL__ -DEXPORT_SYMTAB -D__NO_VERSION__
ccflags-y += -DHOSTAP
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index c61422ea8846..4fd9cd64c6e8 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -382,11 +382,13 @@ void vnt_update_ifs(struct vnt_private *priv)
priv->difs -= 1;
break;
}
+ /* fall through */
case RF_AIROHA7230:
case RF_AL2230:
case RF_AL2230S:
if (priv->bb_type != BB_TYPE_11B)
break;
+ /* fall through */
case RF_RFMD2959:
case RF_VT3226:
case RF_VT3342A0:
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index d22628314305..ee7e26b886a5 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_WILC1000) += wilc1000.o
ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index 622994795222..8cf886d32afb 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "coreconfigurator.h"
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h
index 5256f40524bf..3f5da8c58815 100644
--- a/drivers/staging/wilc1000/coreconfigurator.h
+++ b/drivers/staging/wilc1000/coreconfigurator.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*!
* @file coreconfigurator.h
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 7b620658ec38..d69248a8c7b5 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/kthread.h>
@@ -238,6 +239,7 @@ static struct completion hif_driver_comp;
static struct completion hif_wait_response;
static struct mutex hif_deinit_lock;
static struct timer_list periodic_rssi;
+static struct wilc_vif *periodic_rssi_vif;
u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
@@ -2272,7 +2274,7 @@ static int Handle_RemainOnChan(struct wilc_vif *vif,
ERRORHANDLER:
{
P2P_LISTEN_STATE = 1;
- hif_drv->remain_on_ch_timer.data = (unsigned long)vif;
+ hif_drv->remain_on_ch_timer_vif = vif;
mod_timer(&hif_drv->remain_on_ch_timer,
jiffies +
msecs_to_jiffies(pstrHostIfRemainOnChan->duration));
@@ -2360,11 +2362,13 @@ _done_:
return result;
}
-static void ListenTimerCB(unsigned long arg)
+static void ListenTimerCB(struct timer_list *t)
{
+ struct host_if_drv *hif_drv = from_timer(hif_drv, t,
+ remain_on_ch_timer);
+ struct wilc_vif *vif = hif_drv->remain_on_ch_timer_vif;
s32 result = 0;
struct host_if_msg msg;
- struct wilc_vif *vif = (struct wilc_vif *)arg;
del_timer(&vif->hif_drv->remain_on_ch_timer);
@@ -2417,9 +2421,9 @@ static void Handle_SetMulticastFilter(struct wilc_vif *vif,
pu8CurrByte = wid.val;
*pu8CurrByte++ = (strHostIfSetMulti->enabled & 0xFF);
- *pu8CurrByte++ = ((strHostIfSetMulti->enabled >> 8) & 0xFF);
- *pu8CurrByte++ = ((strHostIfSetMulti->enabled >> 16) & 0xFF);
- *pu8CurrByte++ = ((strHostIfSetMulti->enabled >> 24) & 0xFF);
+ *pu8CurrByte++ = 0;
+ *pu8CurrByte++ = 0;
+ *pu8CurrByte++ = 0;
*pu8CurrByte++ = (strHostIfSetMulti->cnt & 0xFF);
*pu8CurrByte++ = ((strHostIfSetMulti->cnt >> 8) & 0xFF);
@@ -2643,9 +2647,10 @@ free_msg:
complete(&hif_thread_comp);
}
-static void TimerCB_Scan(unsigned long arg)
+static void TimerCB_Scan(struct timer_list *t)
{
- struct wilc_vif *vif = (struct wilc_vif *)arg;
+ struct host_if_drv *hif_drv = from_timer(hif_drv, t, scan_timer);
+ struct wilc_vif *vif = hif_drv->scan_timer_vif;
struct host_if_msg msg;
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -2655,9 +2660,11 @@ static void TimerCB_Scan(unsigned long arg)
wilc_enqueue_cmd(&msg);
}
-static void TimerCB_Connect(unsigned long arg)
+static void TimerCB_Connect(struct timer_list *t)
{
- struct wilc_vif *vif = (struct wilc_vif *)arg;
+ struct host_if_drv *hif_drv = from_timer(hif_drv, t,
+ connect_timer);
+ struct wilc_vif *vif = hif_drv->connect_timer_vif;
struct host_if_msg msg;
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3040,7 +3047,7 @@ int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
return -EFAULT;
}
- hif_drv->connect_timer.data = (unsigned long)vif;
+ hif_drv->connect_timer_vif = vif;
mod_timer(&hif_drv->connect_timer,
jiffies + msecs_to_jiffies(HOST_IF_CONNECT_TIMEOUT));
@@ -3283,7 +3290,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
return -EINVAL;
}
- hif_drv->scan_timer.data = (unsigned long)vif;
+ hif_drv->scan_timer_vif = vif;
mod_timer(&hif_drv->scan_timer,
jiffies + msecs_to_jiffies(HOST_IF_SCAN_TIMEOUT));
@@ -3309,9 +3316,9 @@ int wilc_hif_set_cfg(struct wilc_vif *vif,
return wilc_enqueue_cmd(&msg);
}
-static void GetPeriodicRSSI(unsigned long arg)
+static void GetPeriodicRSSI(struct timer_list *unused)
{
- struct wilc_vif *vif = (struct wilc_vif *)arg;
+ struct wilc_vif *vif = periodic_rssi_vif;
if (!vif->hif_drv) {
netdev_err(vif->ndev, "Driver handler is NULL\n");
@@ -3321,7 +3328,6 @@ static void GetPeriodicRSSI(unsigned long arg)
if (vif->hif_drv->hif_state == HOST_IF_CONNECTED)
wilc_get_statistics(vif, &vif->wilc->dummy_statistics);
- periodic_rssi.data = (unsigned long)vif;
mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
}
@@ -3374,14 +3380,14 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
goto _fail_;
}
- setup_timer(&periodic_rssi, GetPeriodicRSSI,
- (unsigned long)vif);
+ periodic_rssi_vif = vif;
+ timer_setup(&periodic_rssi, GetPeriodicRSSI, 0);
mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
}
- setup_timer(&hif_drv->scan_timer, TimerCB_Scan, 0);
- setup_timer(&hif_drv->connect_timer, TimerCB_Connect, 0);
- setup_timer(&hif_drv->remain_on_ch_timer, ListenTimerCB, 0);
+ timer_setup(&hif_drv->scan_timer, TimerCB_Scan, 0);
+ timer_setup(&hif_drv->connect_timer, TimerCB_Connect, 0);
+ timer_setup(&hif_drv->remain_on_ch_timer, ListenTimerCB, 0);
mutex_init(&hif_drv->cfg_values_lock);
mutex_lock(&hif_drv->cfg_values_lock);
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 1ce5ead318c7..aa914d69ab0d 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef HOST_INT_H
#define HOST_INT_H
#include <linux/ieee80211.h>
@@ -279,8 +280,13 @@ struct host_if_drv {
struct completion comp_inactive_time;
struct timer_list scan_timer;
+ struct wilc_vif *scan_timer_vif;
+
struct timer_list connect_timer;
+ struct wilc_vif *connect_timer_vif;
+
struct timer_list remain_on_ch_timer;
+ struct wilc_vif *remain_on_ch_timer_vif;
bool IFC_UP;
int driver_handler_id;
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 01efa80b4f88..91d49c4738dc 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*!
* @file linux_mon.c
* @brief File Operations OS wrapper functionality
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index ac5aaafa461c..028da1dc1b81 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "wilc_wfi_cfgoperations.h"
#include "host_interface.h"
#include <linux/errno.h>
@@ -266,7 +267,7 @@ static void update_scan_time(void)
last_scanned_shadow[i].time_scan = jiffies;
}
-static void remove_network_from_shadow(unsigned long arg)
+static void remove_network_from_shadow(struct timer_list *unused)
{
unsigned long now = jiffies;
int i, j;
@@ -287,12 +288,11 @@ static void remove_network_from_shadow(unsigned long arg)
}
if (last_scanned_cnt != 0) {
- hAgingTimer.data = arg;
mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
}
}
-static void clear_duringIP(unsigned long arg)
+static void clear_duringIP(struct timer_list *unused)
{
wilc_optaining_ip = false;
}
@@ -304,7 +304,6 @@ static int is_network_in_shadow(struct network_info *pstrNetworkInfo,
int i;
if (last_scanned_cnt == 0) {
- hAgingTimer.data = (unsigned long)user_void;
mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
state = -1;
} else {
@@ -1111,7 +1110,6 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
g_key_gtk_params.key = NULL;
kfree(g_key_gtk_params.seq);
g_key_gtk_params.seq = NULL;
-
}
if (key_index >= 0 && key_index <= 3) {
@@ -1617,7 +1615,7 @@ static int mgmt_tx(struct wiphy *wiphy,
*cookie = (unsigned long)buf;
priv->u64tx_cookie = *cookie;
- mgmt = (const struct ieee80211_mgmt *) buf;
+ mgmt = (const struct ieee80211_mgmt *)buf;
if (ieee80211_is_mgmt(mgmt->frame_control)) {
mgmt_tx = kmalloc(sizeof(struct p2p_mgmt_data), GFP_KERNEL);
@@ -2280,8 +2278,8 @@ int wilc_init_host_int(struct net_device *net)
priv = wdev_priv(net->ieee80211_ptr);
if (op_ifcs == 0) {
- setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
- setup_timer(&wilc_during_ip_timer, clear_duringIP, 0);
+ timer_setup(&hAgingTimer, remove_network_from_shadow, 0);
+ timer_setup(&wilc_during_ip_timer, clear_duringIP, 0);
}
op_ifcs++;
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
index 85a3810d7bb5..dfb7ec272935 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*!
* @file wilc_wfi_cfgoperations.h
* @brief Definitions for the network module
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 7a36561a599e..e6f4d84971c3 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*!
* @file wilc_wfi_netdevice.h
* @brief Definitions for the network module
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 9addef1f1e12..f49dfa82f1b8 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
char *bssid = ((struct tx_complete_data *)(tqe->priv))->bssid;
buffer_offset = ETH_ETHERNET_HDR_OFFSET;
- memcpy(&txb[offset + 4], bssid, 6);
+ memcpy(&txb[offset + 8], bssid, 6);
} else {
buffer_offset = HOST_HDR_OFFSET;
}
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index 7a5eba9b5f47..da7173105497 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef WILC_WLAN_H
#define WILC_WLAN_H
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index d3e5b1b302f4..19e4f85fdd27 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* ////////////////////////////////////////////////////////////////////////// */
/* */
/* Copyright (c) Atmel Corporation. All rights reserved. */
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.h b/drivers/staging/wilc1000/wilc_wlan_cfg.h
index b8641a273547..08092a551840 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.h
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* ////////////////////////////////////////////////////////////////////////// */
/* */
/* Copyright (c) Atmel Corporation. All rights reserved. */
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index f4d60057a06e..c1693cfc076d 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* ///////////////////////////////////////////////////////////////////////// */
/* */
/* Copyright (c) Atmel Corporation. All rights reserved. */
diff --git a/drivers/staging/wlan-ng/Makefile b/drivers/staging/wlan-ng/Makefile
index 32b69f238c69..1d24b0f86eee 100644
--- a/drivers/staging/wlan-ng/Makefile
+++ b/drivers/staging/wlan-ng/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PRISM2_USB) += prism2_usb.o
prism2_usb-y := prism2usb.o \
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 178f6f5d4613..42912257e2b9 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* cfg80211 Interface for prism2_usb module */
#include "hfa384x.h"
#include "prism2mgmt.h"
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index d1e8218f96fb..197f5a914e8f 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -184,11 +184,11 @@ static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
static void hfa384x_usbctlxq_run(struct hfa384x *hw);
-static void hfa384x_usbctlx_reqtimerfn(unsigned long data);
+static void hfa384x_usbctlx_reqtimerfn(struct timer_list *t);
-static void hfa384x_usbctlx_resptimerfn(unsigned long data);
+static void hfa384x_usbctlx_resptimerfn(struct timer_list *t);
-static void hfa384x_usb_throttlefn(unsigned long data);
+static void hfa384x_usb_throttlefn(struct timer_list *t);
static void hfa384x_usbctlx_completion_task(unsigned long data);
@@ -558,13 +558,11 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
INIT_WORK(&hw->link_bh, prism2sta_processing_defer);
INIT_WORK(&hw->usb_work, hfa384x_usb_defer);
- setup_timer(&hw->throttle, hfa384x_usb_throttlefn, (unsigned long)hw);
+ timer_setup(&hw->throttle, hfa384x_usb_throttlefn, 0);
- setup_timer(&hw->resptimer, hfa384x_usbctlx_resptimerfn,
- (unsigned long)hw);
+ timer_setup(&hw->resptimer, hfa384x_usbctlx_resptimerfn, 0);
- setup_timer(&hw->reqtimer, hfa384x_usbctlx_reqtimerfn,
- (unsigned long)hw);
+ timer_setup(&hw->reqtimer, hfa384x_usbctlx_reqtimerfn, 0);
usb_init_urb(&hw->rx_urb);
usb_init_urb(&hw->tx_urb);
@@ -574,8 +572,7 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
hw->state = HFA384x_STATE_INIT;
INIT_WORK(&hw->commsqual_bh, prism2sta_commsqual_defer);
- setup_timer(&hw->commsqual_timer, prism2sta_commsqual_timer,
- (unsigned long)hw);
+ timer_setup(&hw->commsqual_timer, prism2sta_commsqual_timer, 0);
}
/*----------------------------------------------------------------
@@ -2460,7 +2457,7 @@ int hfa384x_drvr_start(struct hfa384x *hw)
* ok
*/
result =
- usb_get_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_in, &status);
+ usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_in, &status);
if (result < 0) {
netdev_err(hw->wlandev->netdev, "Cannot get bulk in endpoint status.\n");
goto done;
@@ -2469,7 +2466,7 @@ int hfa384x_drvr_start(struct hfa384x *hw)
netdev_err(hw->wlandev->netdev, "Failed to reset bulk in endpoint.\n");
result =
- usb_get_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_out, &status);
+ usb_get_std_status(hw->usb, USB_RECIP_ENDPOINT, hw->endp_out, &status);
if (result < 0) {
netdev_err(hw->wlandev->netdev, "Cannot get bulk out endpoint status.\n");
goto done;
@@ -3800,9 +3797,9 @@ delresp:
* interrupt
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
+static void hfa384x_usbctlx_reqtimerfn(struct timer_list *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_timer(hw, t, reqtimer);
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3859,9 +3856,9 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
* interrupt
*----------------------------------------------------------------
*/
-static void hfa384x_usbctlx_resptimerfn(unsigned long data)
+static void hfa384x_usbctlx_resptimerfn(struct timer_list *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_timer(hw, t, resptimer);
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3899,9 +3896,9 @@ static void hfa384x_usbctlx_resptimerfn(unsigned long data)
* Interrupt
*----------------------------------------------------------------
*/
-static void hfa384x_usb_throttlefn(unsigned long data)
+static void hfa384x_usb_throttlefn(struct timer_list *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_timer(hw, t, throttle);
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index c4aa9e7e7003..72070593394a 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -394,8 +394,9 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
count = HFA384x_SCANRESULT_MAX;
if (req->bssindex.data >= count) {
- pr_debug("requested index (%d) out of range (%d)\n",
- req->bssindex.data, count);
+ netdev_dbg(wlandev->netdev,
+ "requested index (%d) out of range (%d)\n",
+ req->bssindex.data, count);
result = 2;
req->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
goto exit;
@@ -684,7 +685,8 @@ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
goto done;
failed:
- pr_debug("Failed to set a config option, result=%d\n", result);
+ netdev_dbg(wlandev->netdev,
+ "Failed to set a config option, result=%d\n", result);
msg->resultcode.data = P80211ENUM_resultcode_invalid_parameters;
done:
@@ -1120,15 +1122,17 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
/* Disable monitor mode */
result = hfa384x_cmd_monitor(hw, HFA384x_MONITOR_DISABLE);
if (result) {
- pr_debug("failed to disable monitor mode, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to disable monitor mode, result=%d\n",
+ result);
goto failed;
}
/* Disable port 0 */
result = hfa384x_drvr_disable(hw, 0);
if (result) {
- pr_debug
- ("failed to disable port 0 after sniffing, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to disable port 0 after sniffing, result=%d\n",
result);
goto failed;
}
@@ -1140,8 +1144,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFWEPFLAGS,
hw->presniff_wepflags);
if (result) {
- pr_debug
- ("failed to restore wepflags=0x%04x, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to restore wepflags=0x%04x, result=%d\n",
hw->presniff_wepflags, result);
goto failed;
}
@@ -1153,8 +1158,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFPORTTYPE,
word);
if (result) {
- pr_debug
- ("failed to restore porttype, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to restore porttype, result=%d\n",
result);
goto failed;
}
@@ -1162,8 +1168,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
/* Enable the port */
result = hfa384x_drvr_enable(hw, 0);
if (result) {
- pr_debug("failed to enable port to presniff setting, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to enable port to presniff setting, result=%d\n",
+ result);
goto failed;
}
} else {
@@ -1182,8 +1189,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFPORTTYPE,
&(hw->presniff_port_type));
if (result) {
- pr_debug
- ("failed to read porttype, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to read porttype, result=%d\n",
result);
goto failed;
}
@@ -1192,24 +1200,27 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFWEPFLAGS,
&(hw->presniff_wepflags));
if (result) {
- pr_debug
- ("failed to read wepflags, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to read wepflags, result=%d\n",
result);
goto failed;
}
hfa384x_drvr_stop(hw);
result = hfa384x_drvr_start(hw);
if (result) {
- pr_debug("failed to restart the card for sniffing, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to restart the card for sniffing, result=%d\n",
+ result);
goto failed;
}
} else {
/* Disable the port */
result = hfa384x_drvr_disable(hw, 0);
if (result) {
- pr_debug("failed to enable port for sniffing, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to enable port for sniffing, result=%d\n",
+ result);
goto failed;
}
}
@@ -1225,8 +1236,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
hw->sniff_channel = word;
if (result) {
- pr_debug("failed to set channel %d, result=%d\n",
- word, result);
+ netdev_dbg(wlandev->netdev,
+ "failed to set channel %d, result=%d\n",
+ word, result);
goto failed;
}
@@ -1238,8 +1250,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
HFA384x_RID_CNFPORTTYPE,
word);
if (result) {
- pr_debug
- ("failed to set porttype %d, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to set porttype %d, result=%d\n",
word, result);
goto failed;
}
@@ -1257,8 +1270,9 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
}
if (result) {
- pr_debug
- ("failed to set wepflags=0x%04x, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to set wepflags=0x%04x, result=%d\n",
word, result);
goto failed;
}
@@ -1283,16 +1297,18 @@ int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
/* Enable the port */
result = hfa384x_drvr_enable(hw, 0);
if (result) {
- pr_debug
- ("failed to enable port for sniffing, result=%d\n",
+ netdev_dbg
+ (wlandev->netdev,
+ "failed to enable port for sniffing, result=%d\n",
result);
goto failed;
}
/* Enable monitor mode */
result = hfa384x_cmd_monitor(hw, HFA384x_MONITOR_ENABLE);
if (result) {
- pr_debug("failed to enable monitor mode, result=%d\n",
- result);
+ netdev_dbg(wlandev->netdev,
+ "failed to enable monitor mode, result=%d\n",
+ result);
goto failed;
}
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 88b979ff68b3..c062418f1202 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -109,7 +109,7 @@ int prism2mgmt_get_grpaddr_index(u32 did);
void prism2sta_processing_defer(struct work_struct *data);
void prism2sta_commsqual_defer(struct work_struct *data);
-void prism2sta_commsqual_timer(unsigned long data);
+void prism2sta_commsqual_timer(struct timer_list *t);
/* Interface callback functions, passing data back up to the cfg80211 layer */
void prism2_connect_result(struct wlandevice *wlandev, u8 failed);
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index c9df45063ab3..99316b9a4e49 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -1447,7 +1447,7 @@ static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
{
struct hfa384x *hw = wlandev->priv;
- hw->link_status_new = inf->info.linkstatus.linkstatus;
+ hw->link_status_new = le16_to_cpu(inf->info.linkstatus.linkstatus);
schedule_work(&hw->link_bh);
}
@@ -2004,9 +2004,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
mod_timer(&hw->commsqual_timer, jiffies + HZ);
}
-void prism2sta_commsqual_timer(unsigned long data)
+void prism2sta_commsqual_timer(struct timer_list *t)
{
- struct hfa384x *hw = (struct hfa384x *)data;
+ struct hfa384x *hw = from_timer(hw, t, commsqual_timer);
schedule_work(&hw->commsqual_bh);
}
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index bfb6b0a6528d..b5ba176004c1 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "hfa384x_usb.c"
#include "prism2mgmt.c"
#include "prism2mib.c"
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 7a80a90f229f..a3af1cbbf8ee 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _XGIFB_MAIN
#define _XGIFB_MAIN
/* ------------------- Constant Definitions ------------------------- */
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index b450c740f626..b813f1d460ce 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1697,7 +1697,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (XGIfb_get_dram_size(xgifb_info)) {
xgifb_info->video_size = min_t(unsigned long, video_size_max,
- SZ_16M);
+ SZ_16M);
} else if (xgifb_info->video_size > video_size_max) {
xgifb_info->video_size = video_size_max;
}
@@ -1736,7 +1736,7 @@ static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw_info->pjVideoMemoryAddress =
ioremap_wc(xgifb_info->video_base, xgifb_info->video_size);
xgifb_info->mmio_vbase = ioremap(xgifb_info->mmio_base,
- xgifb_info->mmio_size);
+ xgifb_info->mmio_size);
dev_info(&pdev->dev,
"Framebuffer at 0x%llx, mapped to 0x%p, size %dk\n",
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index af50362395d5..982c676c16c6 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_XGIFB
#define _LINUX_XGIFB
#include "vgatypes.h"
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 94e2e3c7c264..0311e2682d27 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VB_DEF_
#define _VB_DEF_
#include "../../video/fbdev/sis/initdef.h"
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 591a3c9babf5..ac1c815a3c5e 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/delay.h>
#include <linux/vmalloc.h>
diff --git a/drivers/staging/xgifb/vb_init.h b/drivers/staging/xgifb/vb_init.h
index e835054b87bf..2f8a70133ebd 100644
--- a/drivers/staging/xgifb/vb_init.h
+++ b/drivers/staging/xgifb/vb_init.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VBINIT_
#define _VBINIT_
unsigned char XGIInitNew(struct pci_dev *pdev);
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index cea128bede52..e9d930f150cb 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/delay.h>
#include "XGIfb.h"
@@ -5046,7 +5047,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- const u8 LCDARefreshIndex[] = {
+ static const u8 LCDARefreshIndex[] = {
0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 };
unsigned short RefreshRateTableIndex, i, index, temp;
@@ -5479,8 +5480,9 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
ModeIdIndex))
return 0;
- pVBInfo->ModeType = XGI330_EModeIDTable[ModeIdIndex].
- Ext_ModeFlag & ModeTypeMask;
+ pVBInfo->ModeType =
+ XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag
+ & ModeTypeMask;
pVBInfo->SetFlag = 0;
pVBInfo->VBInfo = DisableCRT2Display;
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
index c6317ab00474..5904ed1f2686 100644
--- a/drivers/staging/xgifb/vb_setmode.h
+++ b/drivers/staging/xgifb/vb_setmode.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VBSETMODE_
#define _VBSETMODE_
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index 2fd1a5935e1d..e256f72f6d8a 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VB_STRUCT_
#define _VB_STRUCT_
#include "../../video/fbdev/sis/vstruct.h"
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index 31dd52c513df..0da63e1da32f 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VB_TABLE_
#define _VB_TABLE_
static const struct SiS_MCLKData XGI340New_MCLKData[] = {
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index 052694e75053..0f6d5aac04f6 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VBUTIL_
#define _VBUTIL_
static inline void xgifb_reg_set(unsigned long port, u8 index, u8 data)
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index de80e5c108dc..22919f2368d5 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VGATYPES_
#define _VGATYPES_
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index e619c0266a79..45634747377e 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
target_core_mod-y := target_core_configfs.o \
target_core_device.o \
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 0f18295e05bc..8c9ae96b760d 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
iscsi_target_mod-y += iscsi_target_parameters.o \
iscsi_target_seq_pdu_list.o \
iscsi_target_auth.o \
diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile
index 4893ec29b6b3..d16aaae7ba2a 100644
--- a/drivers/target/iscsi/cxgbit/Makefile
+++ b/drivers/target/iscsi/cxgbit/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
ccflags-y += -Idrivers/target/iscsi
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
index 90388698c222..417b9e66b0cd 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit.h
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -165,6 +165,7 @@ enum cxgbit_csk_flags {
CSK_LOGIN_PDU_DONE,
CSK_LOGIN_DONE,
CSK_DDP_ENABLE,
+ CSK_ABORT_RPL_WAIT,
};
struct cxgbit_sock_common {
@@ -321,6 +322,7 @@ int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
int cxgbit_setup_conn_digest(struct cxgbit_sock *);
int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_abort_conn(struct cxgbit_sock *csk);
void cxgbit_free_conn(struct iscsi_conn *);
extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index d4fa41be80f9..92eb57e2adaf 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
+static void
+__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ __kfree_skb(skb);
+
+ if (csk->com.state != CSK_STATE_ESTABLISHED)
+ goto no_abort;
+
+ set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
+ csk->com.state = CSK_STATE_ABORTING;
+
+ cxgbit_send_abort_req(csk);
+
+ return;
+
+no_abort:
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
+ cxgbit_put_csk(csk);
+}
+
+void cxgbit_abort_conn(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
+ __skb_queue_tail(&csk->backlogq, skb);
+ } else {
+ __cxgbit_abort_conn(csk, skb);
+ }
+ spin_unlock_bh(&csk->lock);
+
+ cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
+ csk->tid, 600, __func__);
+}
+
void cxgbit_free_conn(struct iscsi_conn *conn)
{
struct cxgbit_sock *csk = conn->context;
@@ -1709,12 +1749,17 @@ rel_skb:
static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
switch (csk->com.state) {
case CSK_STATE_ABORTING:
csk->com.state = CSK_STATE_DEAD;
+ if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
+ cxgbit_wake_up(&csk->com.wr_wait, __func__,
+ rpl->status);
cxgbit_put_csk(csk);
break;
default:
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 5fdb57cac968..768cce0ccb80 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -275,6 +275,14 @@ void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ /* Abort the TCP conn if DDP is not complete to
+ * avoid any possibility of DDP after freeing
+ * the cmd.
+ */
+ if (unlikely(cmd->write_data_done !=
+ cmd->se_cmd.data_length))
+ cxgbit_abort_conn(csk);
+
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 4fd775ace541..f3f8856bfb68 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -446,6 +446,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
case CPL_RX_ISCSI_DDP:
case CPL_FW4_ACK:
lro_flush = false;
+ /* fall through */
case CPL_ABORT_RPL_RSS:
case CPL_PASS_ESTABLISH:
case CPL_PEER_CLOSE:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 5001261f5d69..9eb10d34682c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -372,6 +372,8 @@ struct iscsi_np *iscsit_add_np(
init_completion(&np->np_restart_comp);
INIT_LIST_HEAD(&np->np_list);
+ timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0);
+
ret = iscsi_target_setup_login_socket(np, sockaddr);
if (ret != 0) {
kfree(np);
@@ -500,7 +502,7 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
EXPORT_SYMBOL(iscsit_aborted_task);
static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
- u32, u32, u8 *, u8 *);
+ u32, u32, const void *, void *);
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
static int
@@ -521,7 +523,7 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -548,9 +550,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
data_buf, data_buf_len,
- padding,
- (u8 *)&cmd->pad_bytes,
- (u8 *)&cmd->data_crc);
+ padding, &cmd->pad_bytes,
+ &cmd->data_crc);
iov[niov].iov_base = &cmd->data_crc;
iov[niov++].iov_len = ISCSI_CRC_LEN;
@@ -595,7 +596,7 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -834,6 +835,7 @@ static int iscsit_add_reject_from_cmd(
unsigned char *buf)
{
struct iscsi_conn *conn;
+ const bool do_put = cmd->se_cmd.se_tfo != NULL;
if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -864,7 +866,7 @@ static int iscsit_add_reject_from_cmd(
* Perform the kref_put now if se_cmd has already been setup by
* scsit_setup_scsi_cmd()
*/
- if (cmd->se_cmd.se_tfo != NULL) {
+ if (do_put) {
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
target_put_sess_cmd(&cmd->se_cmd);
}
@@ -1408,13 +1410,9 @@ static u32 iscsit_do_crypto_hash_sg(
return data_crc;
}
-static void iscsit_do_crypto_hash_buf(
- struct ahash_request *hash,
- const void *buf,
- u32 payload_length,
- u32 padding,
- u8 *pad_bytes,
- u8 *data_crc)
+static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
+ const void *buf, u32 payload_length, u32 padding,
+ const void *pad_bytes, void *data_crc)
{
struct scatterlist sg[2];
@@ -1460,9 +1458,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
iscsit_mod_dataout_timer(cmd);
if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
- pr_err("DataOut Offset: %u, Length %u greater than"
- " iSCSI Command EDTL %u, protocol error.\n",
- hdr->offset, payload_length, cmd->se_cmd.data_length);
+ pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
+ be32_to_cpu(hdr->offset), payload_length,
+ cmd->se_cmd.data_length);
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
}
@@ -1876,10 +1874,9 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- ping_data, payload_length,
- padding, cmd->pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
+ payload_length, padding,
+ cmd->pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
@@ -1960,7 +1957,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct iscsi_tmr_req *tmr_req;
struct iscsi_tm *hdr;
int out_of_order_cmdsn = 0, ret;
- bool sess_ref = false;
u8 function, tcm_function = TMR_UNKNOWN;
hdr = (struct iscsi_tm *) buf;
@@ -1993,22 +1989,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->data_direction = DMA_NONE;
cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
- if (!cmd->tmr_req)
+ if (!cmd->tmr_req) {
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
buf);
+ }
+
+ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+ target_get_sess_cmd(&cmd->se_cmd, true);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
* LIO-Target $FABRIC_MOD
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, 0, DMA_NONE,
- TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
-
- target_get_sess_cmd(&cmd->se_cmd, true);
- sess_ref = true;
tcm_function = iscsit_convert_tmf(function);
if (tcm_function == TMR_UNKNOWN) {
pr_err("Unknown iSCSI TMR Function:"
@@ -2099,12 +2096,14 @@ attach:
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
out_of_order_cmdsn = 1;
- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
return -1;
+ }
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
@@ -2124,12 +2123,8 @@ attach:
* For connection recovery, this is also the default action for
* TMR TASK_REASSIGN.
*/
- if (sess_ref) {
- pr_debug("Handle TMR, using sess_ref=true check\n");
- target_put_sess_cmd(&cmd->se_cmd);
- }
-
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
@@ -2285,10 +2280,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- text_in, payload_length,
- padding, (u8 *)&pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
+ payload_length, padding,
+ &pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
@@ -3976,9 +3970,9 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
return;
}
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- buffer, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)&checksum);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
+ ISCSI_HDR_LEN, 0, NULL,
+ &checksum);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index e0db2ceb0f87..42de1843aa40 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_H
#define ISCSI_TARGET_H
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index 1b91c13cc965..d5600ac30b53 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ISCSI_CHAP_H_
#define _ISCSI_CHAP_H_
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0dd4c45f7575..0ebc4818e132 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
if (ret < 0)
- return NULL;
+ goto free_out;
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
if (ret != 0)
@@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
return &tpg->tpg_se_tpg;
out:
core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
kfree(tpg);
return NULL;
}
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
index 16edeeeb7777..a420fbd37969 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.h
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_DATAIN_VALUES_H
#define ISCSI_TARGET_DATAIN_VALUES_H
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
index 06dbff5cd520..ab2166f17785 100644
--- a/drivers/target/iscsi/iscsi_target_device.h
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_DEVICE_H
#define ISCSI_TARGET_DEVICE_H
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 7fe2aa73cff6..718fe9a1b709 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -749,9 +749,9 @@ int iscsit_check_post_dataout(
}
}
-static void iscsit_handle_time2retain_timeout(unsigned long data)
+void iscsit_handle_time2retain_timeout(struct timer_list *t)
{
- struct iscsi_session *sess = (struct iscsi_session *) data;
+ struct iscsi_session *sess = from_timer(sess, t, time2retain_timer);
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
@@ -809,14 +809,10 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
pr_debug("Starting Time2Retain timer for %u seconds on"
" SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
- init_timer(&sess->time2retain_timer);
- sess->time2retain_timer.expires =
- (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
- sess->time2retain_timer.data = (unsigned long)sess;
- sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&sess->time2retain_timer);
+ mod_timer(&sess->time2retain_timer,
+ jiffies + sess->sess_ops->DefaultTime2Retain * HZ);
}
/*
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index 3822d9cd1230..883ebf6d36cf 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_ERL0_H
#define ISCSI_TARGET_ERL0_H
@@ -11,6 +12,7 @@ extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
extern void iscsit_start_time2retain_handler(struct iscsi_session *);
+extern void iscsit_handle_time2retain_timeout(struct timer_list *t);
extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index fe9b7f1e44ac..5efa42b939a1 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -34,7 +34,7 @@
#include "iscsi_target_erl2.h"
#include "iscsi_target.h"
-#define OFFLOAD_BUF_SIZE 32768
+#define OFFLOAD_BUF_SIZE 32768U
/*
* Used to dump excess datain payload for certain error recovery
@@ -56,7 +56,7 @@ int iscsit_dump_data_payload(
if (conn->sess->sess_ops->RDMAExtensions)
return 0;
- length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+ length = min(buf_len, OFFLOAD_BUF_SIZE);
buf = kzalloc(length, GFP_ATOMIC);
if (!buf) {
@@ -67,8 +67,7 @@ int iscsit_dump_data_payload(
memset(&iov, 0, sizeof(struct kvec));
while (offset < buf_len) {
- size = ((offset + length) > buf_len) ?
- (buf_len - offset) : length;
+ size = min(buf_len - offset, length);
iov.iov_len = size;
iov.iov_base = buf;
@@ -1148,11 +1147,11 @@ static int iscsit_set_dataout_timeout_values(
/*
* NOTE: Called from interrupt (timer) context.
*/
-static void iscsit_handle_dataout_timeout(unsigned long data)
+void iscsit_handle_dataout_timeout(struct timer_list *t)
{
u32 pdu_length = 0, pdu_offset = 0;
u32 r2t_length = 0, r2t_offset = 0;
- struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
+ struct iscsi_cmd *cmd = from_timer(cmd, t, dataout_timer);
struct iscsi_conn *conn = cmd->conn;
struct iscsi_session *sess = NULL;
struct iscsi_node_attrib *na;
@@ -1264,13 +1263,9 @@ void iscsit_start_dataout_timer(
pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
" CID: %hu.\n", cmd->init_task_tag, conn->cid);
- init_timer(&cmd->dataout_timer);
- cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
- cmd->dataout_timer.data = (unsigned long)cmd;
- cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&cmd->dataout_timer);
+ mod_timer(&cmd->dataout_timer, jiffies + na->dataout_timeout * HZ);
}
void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
index 54d36bd25bea..1f6973f87fea 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.h
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_ERL1_H
#define ISCSI_TARGET_ERL1_H
@@ -29,6 +30,7 @@ extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
+extern void iscsit_handle_dataout_timeout(struct timer_list *t);
extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 634d01e13652..93e180d68d07 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_ERL2_H
#define ISCSI_TARGET_ERL2_H
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index dc13afbd4c88..64c5a57b92e4 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -333,6 +333,9 @@ static int iscsi_login_zero_tsih_s1(
spin_lock_init(&sess->session_usage_lock);
spin_lock_init(&sess->ttt_lock);
+ timer_setup(&sess->time2retain_timer,
+ iscsit_handle_time2retain_timeout, 0);
+
idr_preload(GFP_KERNEL);
spin_lock_bh(&sess_idr_lock);
ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
@@ -839,9 +842,9 @@ void iscsi_post_login_handler(
iscsit_dec_conn_usage_count(conn);
}
-static void iscsi_handle_login_thread_timeout(unsigned long data)
+void iscsi_handle_login_thread_timeout(struct timer_list *t)
{
- struct iscsi_np *np = (struct iscsi_np *) data;
+ struct iscsi_np *np = from_timer(np, t, np_login_timer);
spin_lock_bh(&np->np_thread_lock);
pr_err("iSCSI Login timeout on Network Portal %pISpc\n",
@@ -866,13 +869,9 @@ static void iscsi_start_login_thread_timer(struct iscsi_np *np)
* point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
*/
spin_lock_bh(&np->np_thread_lock);
- init_timer(&np->np_login_timer);
- np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
- np->np_login_timer.data = (unsigned long)np;
- np->np_login_timer.function = iscsi_handle_login_thread_timeout;
np->np_login_timer_flags &= ~ISCSI_TF_STOP;
np->np_login_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&np->np_login_timer);
+ mod_timer(&np->np_login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
pr_debug("Added timeout timer to iSCSI login request for"
" %u seconds.\n", TA_LOGIN_TIMEOUT);
@@ -1266,6 +1265,10 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
conn->conn_state = TARG_CONN_STATE_FREE;
+ timer_setup(&conn->nopin_response_timer,
+ iscsit_handle_nopin_response_timeout, 0);
+ timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
+
if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
kfree(conn);
return 1;
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 0e1fd6cedd54..74ac3abc44a0 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_LOGIN_H
#define ISCSI_TARGET_LOGIN_H
@@ -24,5 +25,6 @@ extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8)
extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
bool, bool);
extern int iscsi_target_login_thread(void *);
+extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 7a6751fecd32..b686e2ce9c0e 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -559,9 +559,15 @@ static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login
iscsi_target_login_sess_out(conn, np, zero_tsih, true);
}
-static void iscsi_target_login_timeout(unsigned long data)
+struct conn_timeout {
+ struct timer_list timer;
+ struct iscsi_conn *conn;
+};
+
+static void iscsi_target_login_timeout(struct timer_list *t)
{
- struct iscsi_conn *conn = (struct iscsi_conn *)data;
+ struct conn_timeout *timeout = from_timer(timeout, t, timer);
+ struct iscsi_conn *conn = timeout->conn;
pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
@@ -580,7 +586,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
struct iscsi_np *np = login->np;
struct iscsi_portal_group *tpg = conn->tpg;
struct iscsi_tpg_np *tpg_np = conn->tpg_np;
- struct timer_list login_timer;
+ struct conn_timeout timeout;
int rc, zero_tsih = login->zero_tsih;
bool state;
@@ -618,15 +624,14 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
conn->login_kworker = current;
allow_signal(SIGINT);
- init_timer(&login_timer);
- login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
- login_timer.data = (unsigned long)conn;
- login_timer.function = iscsi_target_login_timeout;
- add_timer(&login_timer);
- pr_debug("Starting login_timer for %s/%d\n", current->comm, current->pid);
+ timeout.conn = conn;
+ timer_setup_on_stack(&timeout.timer, iscsi_target_login_timeout, 0);
+ mod_timer(&timeout.timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
+ pr_debug("Starting login timer for %s/%d\n", current->comm, current->pid);
rc = conn->conn_transport->iscsit_get_login_rx(conn, login);
- del_timer_sync(&login_timer);
+ del_timer_sync(&timeout.timer);
+ destroy_timer_on_stack(&timeout.timer);
flush_signals(current);
conn->login_kworker = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index 53438bfca4c6..835e1b769b3f 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_NEGO_H
#define ISCSI_TARGET_NEGO_H
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index 79cdf06ade48..ce074cb54579 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_NODEATTRIB_H
#define ISCSI_TARGET_NODEATTRIB_H
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index caab1045742d..29a37b242d30 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1380,10 +1380,8 @@ int iscsi_decode_text_input(
char *key, *value;
struct iscsi_param *param;
- if (iscsi_extract_key_value(start, &key, &value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_extract_key_value(start, &key, &value) < 0)
+ goto free_buffer;
pr_debug("Got key: %s=%s\n", key, value);
@@ -1396,38 +1394,37 @@ int iscsi_decode_text_input(
param = iscsi_check_key(key, phase, sender, param_list);
if (!param) {
- if (iscsi_add_notunderstood_response(key,
- value, param_list) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_add_notunderstood_response(key, value,
+ param_list) < 0)
+ goto free_buffer;
+
start += strlen(key) + strlen(value) + 2;
continue;
}
- if (iscsi_check_value(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_value(param, value) < 0)
+ goto free_buffer;
start += strlen(key) + strlen(value) + 2;
if (IS_PSTATE_PROPOSER(param)) {
- if (iscsi_check_proposer_state(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_proposer_state(param, value) < 0)
+ goto free_buffer;
+
SET_PSTATE_RESPONSE_GOT(param);
} else {
- if (iscsi_check_acceptor_state(param, value, conn) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_acceptor_state(param, value, conn) < 0)
+ goto free_buffer;
+
SET_PSTATE_ACCEPTOR(param);
}
}
kfree(tmpbuf);
return 0;
+
+free_buffer:
+ kfree(tmpbuf);
+ return -1;
}
int iscsi_encode_text_output(
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index c47b73f57528..daf47f38e081 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index e446a09c886b..f65e5e584212 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -25,8 +25,6 @@
#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
-#define OFFLOAD_BUF_SIZE 32768
-
#ifdef DEBUG
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
{
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
index be1234362271..5a0907027973 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_SEQ_AND_PDU_LIST_H
#define ISCSI_SEQ_AND_PDU_LIST_H
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 411cb266a47d..df0a39811dc2 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -187,7 +187,7 @@ static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_instance_cit = {
+const struct config_item_type iscsi_stat_instance_cit = {
.ct_attrs = iscsi_stat_instance_attrs,
.ct_owner = THIS_MODULE,
};
@@ -249,7 +249,7 @@ static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_sess_err_cit = {
+const struct config_item_type iscsi_stat_sess_err_cit = {
.ct_attrs = iscsi_stat_sess_err_attrs,
.ct_owner = THIS_MODULE,
};
@@ -390,7 +390,7 @@ static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_tgt_attr_cit = {
+const struct config_item_type iscsi_stat_tgt_attr_cit = {
.ct_attrs = iscsi_stat_tgt_attr_attrs,
.ct_owner = THIS_MODULE,
};
@@ -522,7 +522,7 @@ static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_login_cit = {
+const struct config_item_type iscsi_stat_login_cit = {
.ct_attrs = iscsi_stat_login_stats_attrs,
.ct_owner = THIS_MODULE,
};
@@ -579,7 +579,7 @@ static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_logout_cit = {
+const struct config_item_type iscsi_stat_logout_cit = {
.ct_attrs = iscsi_stat_logout_stats_attrs,
.ct_owner = THIS_MODULE,
};
@@ -801,7 +801,7 @@ static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
NULL,
};
-struct config_item_type iscsi_stat_sess_cit = {
+const struct config_item_type iscsi_stat_sess_cit = {
.ct_attrs = iscsi_stat_sess_stats_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
index 64cc5c07e47c..301f0936bd8e 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.h
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_TMR_H
#define ISCSI_TARGET_TMR_H
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 594d07a1e995..4b34f71547c6 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -90,10 +90,10 @@ int iscsit_load_discovery_tpg(void)
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
- goto out;
+ goto free_pl_out;
if (iscsi_update_param_value(param, "CHAP,None") < 0)
- goto out;
+ goto free_pl_out;
tpg->tpg_attrib.authentication = 0;
@@ -105,6 +105,8 @@ int iscsit_load_discovery_tpg(void)
pr_debug("CORE[0] - Allocated Discovery TPG\n");
return 0;
+free_pl_out:
+ iscsi_release_param_list(tpg->param_list);
out:
if (tpg->sid == 1)
core_tpg_deregister(&tpg->tpg_se_tpg);
@@ -119,6 +121,7 @@ void iscsit_release_discovery_tpg(void)
if (!tpg)
return;
+ iscsi_release_param_list(tpg->param_list);
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 59fd3cabe89d..88576f5d0ca4 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_TPG_H
#define ISCSI_TARGET_TPG_H
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index c4eb141c6435..036940518bfe 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/module.h>
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1e36f83b5961..4435bf374d2d 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -176,6 +176,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
spin_lock_init(&cmd->istate_lock);
spin_lock_init(&cmd->error_lock);
spin_lock_init(&cmd->r2t_lock);
+ timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0);
return cmd;
}
@@ -694,6 +695,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
struct iscsi_session *sess;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->conn)
sess = cmd->conn->sess;
else
@@ -716,6 +719,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
{
struct iscsi_conn *conn = cmd->conn;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
iscsit_free_r2ts_from_list(cmd);
@@ -880,9 +885,9 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
return 0;
}
-static void iscsit_handle_nopin_response_timeout(unsigned long data)
+void iscsit_handle_nopin_response_timeout(struct timer_list *t)
{
- struct iscsi_conn *conn = (struct iscsi_conn *) data;
+ struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
iscsit_inc_conn_usage_count(conn);
@@ -949,14 +954,10 @@ void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
return;
}
- init_timer(&conn->nopin_response_timer);
- conn->nopin_response_timer.expires =
- (get_jiffies_64() + na->nopin_response_timeout * HZ);
- conn->nopin_response_timer.data = (unsigned long)conn;
- conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&conn->nopin_response_timer);
+ mod_timer(&conn->nopin_response_timer,
+ jiffies + na->nopin_response_timeout * HZ);
pr_debug("Started NOPIN Response Timer on CID: %d to %u"
" seconds\n", conn->cid, na->nopin_response_timeout);
@@ -980,9 +981,9 @@ void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
spin_unlock_bh(&conn->nopin_timer_lock);
}
-static void iscsit_handle_nopin_timeout(unsigned long data)
+void iscsit_handle_nopin_timeout(struct timer_list *t)
{
- struct iscsi_conn *conn = (struct iscsi_conn *) data;
+ struct iscsi_conn *conn = from_timer(conn, t, nopin_timer);
iscsit_inc_conn_usage_count(conn);
@@ -1015,13 +1016,9 @@ void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
return;
- init_timer(&conn->nopin_timer);
- conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
- conn->nopin_timer.data = (unsigned long)conn;
- conn->nopin_timer.function = iscsit_handle_nopin_timeout;
conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&conn->nopin_timer);
+ mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
pr_debug("Started NOPIN Timer on CID: %d at %u second"
" interval\n", conn->cid, na->nopin_timeout);
@@ -1043,13 +1040,9 @@ void iscsit_start_nopin_timer(struct iscsi_conn *conn)
return;
}
- init_timer(&conn->nopin_timer);
- conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
- conn->nopin_timer.data = (unsigned long)conn;
- conn->nopin_timer.function = iscsit_handle_nopin_timeout;
conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&conn->nopin_timer);
+ mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
pr_debug("Started NOPIN Timer on CID: %d at %u second"
" interval\n", conn->cid, na->nopin_timeout);
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 425160565d0c..d66dfc212624 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_UTIL_H
#define ISCSI_TARGET_UTIL_H
@@ -47,9 +48,11 @@ extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *,
extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_handle_nopin_response_timeout(struct timer_list *t);
extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_handle_nopin_timeout(struct timer_list *t);
extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
extern void iscsit_start_nopin_timer(struct iscsi_conn *);
extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 3acc43c05117..d3110909a213 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/types.h>
#include <linux/device.h>
#include <target/target_core_base.h> /* struct se_cmd */
diff --git a/drivers/target/sbp/sbp_target.h b/drivers/target/sbp/sbp_target.h
index 73bcb1208832..1d101ac86527 100644
--- a/drivers/target/sbp/sbp_target.h
+++ b/drivers/target/sbp/sbp_target.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SBP_BASE_H
#define _SBP_BASE_H
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 928127642574..e46ca968009c 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -918,7 +918,7 @@ static int core_alua_update_tpg_primary_metadata(
{
unsigned char *md_buf;
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
- char path[ALUA_METADATA_PATH_LEN];
+ char *path;
int len, rc;
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
@@ -927,8 +927,6 @@ static int core_alua_update_tpg_primary_metadata(
return -ENOMEM;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
@@ -937,11 +935,14 @@ static int core_alua_update_tpg_primary_metadata(
tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
- snprintf(path, ALUA_METADATA_PATH_LEN,
- "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
- config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
-
- rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = -ENOMEM;
+ path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
+ &wwn->unit_serial[0],
+ config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+ if (path) {
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+ }
kfree(md_buf);
return rc;
}
@@ -1209,7 +1210,7 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
{
struct se_portal_group *se_tpg = lun->lun_tpg;
unsigned char *md_buf;
- char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+ char *path;
int len, rc;
mutex_lock(&lun->lun_tg_pt_md_mutex);
@@ -1221,28 +1222,32 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
goto out_unlock;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
- memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
-
- len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
- se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
-
- if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
- snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&lun->lun_tg_pt_secondary_offline),
lun->lun_tg_pt_secondary_stat);
- snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
- db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
- lun->unpacked_lun);
+ if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
+ lun->unpacked_lun);
+ } else {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ lun->unpacked_lun);
+ }
+ if (!path) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+out_free:
kfree(md_buf);
-
out_unlock:
mutex_unlock(&lun->lun_tg_pt_md_mutex);
return rc;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index c69c11baf07f..fc9637cce825 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_ALUA_H
#define TARGET_CORE_ALUA_H
@@ -71,15 +72,6 @@
*/
#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
-/*
- * Used by core_alua_update_tpg_primary_metadata() and
- * core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_METADATA_PATH_LEN 512
-/*
- * Used by core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_SECONDARY_METADATA_WWN_LEN 256
/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
#define ALUA_MD_BUF_LEN 1024
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 7e87d952bb7a..72b1cd1bf9d9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -307,7 +307,7 @@ static struct configfs_attribute *target_core_fabric_item_attrs[] = {
/*
* Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
*/
-static struct config_item_type target_core_fabrics_item = {
+static const struct config_item_type target_core_fabrics_item = {
.ct_group_ops = &target_core_fabric_group_ops,
.ct_attrs = target_core_fabric_item_attrs,
.ct_owner = THIS_MODULE,
@@ -1611,12 +1611,12 @@ static match_table_t tokens = {
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
- {Opt_mapped_lun, "mapped_lun=%lld"},
+ {Opt_mapped_lun, "mapped_lun=%u"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
- {Opt_target_lun, "target_lun=%lld"},
+ {Opt_target_lun, "target_lun=%u"},
{Opt_err, NULL}
};
@@ -1693,7 +1693,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
}
break;
case Opt_sa_res_key:
- ret = kstrtoull(args->from, 0, &tmp_ll);
+ ret = match_u64(args, &tmp_ll);
if (ret < 0) {
pr_err("kstrtoull() failed for sa_res_key=\n");
goto out;
@@ -1727,10 +1727,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- mapped_lun = (u64)arg;
+ mapped_lun = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Target Port
@@ -1768,10 +1768,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
goto out;
break;
case Opt_target_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- target_lun = (u64)arg;
+ target_lun = (u64)tmp_ll;
break;
default:
break;
@@ -2376,7 +2376,7 @@ static struct configfs_item_operations target_core_alua_lu_gp_ops = {
.release = target_core_alua_lu_gp_release,
};
-static struct config_item_type target_core_alua_lu_gp_cit = {
+static const struct config_item_type target_core_alua_lu_gp_cit = {
.ct_item_ops = &target_core_alua_lu_gp_ops,
.ct_attrs = target_core_alua_lu_gp_attrs,
.ct_owner = THIS_MODULE,
@@ -2434,7 +2434,7 @@ static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
.drop_item = &target_core_alua_drop_lu_gp,
};
-static struct config_item_type target_core_alua_lu_gps_cit = {
+static const struct config_item_type target_core_alua_lu_gps_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_alua_lu_gps_group_ops,
.ct_owner = THIS_MODULE,
@@ -2813,7 +2813,7 @@ static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
.release = target_core_alua_tg_pt_gp_release,
};
-static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
.ct_item_ops = &target_core_alua_tg_pt_gp_ops,
.ct_attrs = target_core_alua_tg_pt_gp_attrs,
.ct_owner = THIS_MODULE,
@@ -2884,7 +2884,7 @@ TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NU
* core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
* target_core_alua_cit in target_core_init_configfs() below.
*/
-static struct config_item_type target_core_alua_cit = {
+static const struct config_item_type target_core_alua_cit = {
.ct_item_ops = NULL,
.ct_attrs = NULL,
.ct_owner = THIS_MODULE,
@@ -3105,7 +3105,7 @@ static struct configfs_item_operations target_core_hba_item_ops = {
.release = target_core_hba_release,
};
-static struct config_item_type target_core_hba_cit = {
+static const struct config_item_type target_core_hba_cit = {
.ct_item_ops = &target_core_hba_item_ops,
.ct_group_ops = &target_core_hba_group_ops,
.ct_attrs = target_core_hba_attrs,
@@ -3188,7 +3188,7 @@ static struct configfs_group_operations target_core_group_ops = {
.drop_item = target_core_call_delhbafromtarget,
};
-static struct config_item_type target_core_cit = {
+static const struct config_item_type target_core_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_group_ops,
.ct_attrs = NULL,
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index e9e917cc6441..e1416b007aa4 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -623,8 +623,6 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
NULL,
};
-extern struct configfs_item_operations target_core_dev_item_ops;
-
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c629817a8854..9b2c0c773022 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
struct inode *inode = file->f_mapping->host;
int ret;
+ if (!nolb) {
+ return 0;
+ }
+
if (cmd->se_dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_unmap(cmd, lba, nolb);
if (ret)
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 526595a072de..53be5ffd3261 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_FILE_H
#define TARGET_CORE_FILE_H
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index f2a5797217d4..b4aeb2584ad4 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_IBLOCK_H
#define TARGET_CORE_IBLOCK_H
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index f30e8ac13386..9384d19a7326 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_INTERNAL_H
#define TARGET_CORE_INTERNAL_H
@@ -88,6 +89,7 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data);
/* target_core_configfs.c */
+extern struct configfs_item_operations target_core_dev_item_ops;
void target_setup_backend_cits(struct target_backend *);
/* target_core_fabric_configfs.c */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index dd2cd8048582..b024613f9217 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -58,8 +58,10 @@ void core_pr_dump_initiator_port(
char *buf,
u32 size)
{
- if (!pr_reg->isid_present_at_reg)
+ if (!pr_reg->isid_present_at_reg) {
buf[0] = '\0';
+ return;
+ }
snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
}
@@ -351,6 +353,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
/*
* Some commands are only allowed for registered I_T Nexuses.
@@ -359,6 +362,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
/*
* Each registered I_T Nexus is a reservation holder.
@@ -1521,7 +1525,7 @@ core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1533,7 +1537,7 @@ core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1553,7 +1557,7 @@ core_scsi3_decode_spec_i_port(
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
@@ -1767,7 +1771,7 @@ core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_unmap;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -1971,24 +1975,21 @@ static int __core_scsi3_write_aptpl_to_file(
struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
int flags = O_RDWR | O_CREAT | O_TRUNC;
- char path[512];
+ char *path;
u32 pr_aptpl_buf_len;
int ret;
loff_t pos = 0;
- memset(path, 0, 512);
-
- if (strlen(&wwn->unit_serial[0]) >= 512) {
- pr_err("WWN value for struct se_device does not fit"
- " into path buffer\n");
- return -EMSGSIZE;
- }
+ path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
+ &wwn->unit_serial[0]);
+ if (!path)
+ return -ENOMEM;
- snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
+ kfree(path);
return PTR_ERR(file);
}
@@ -1999,6 +2000,7 @@ static int __core_scsi3_write_aptpl_to_file(
if (ret < 0)
pr_debug("Error writing APTPL metadata file: %s\n", path);
fput(file);
+ kfree(path);
return (ret < 0) ? -EIO : 0;
}
@@ -2103,7 +2105,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
register_type, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
} else {
/*
@@ -3215,7 +3217,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
*/
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
@@ -3267,7 +3269,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
proto_ident = (buf[24] & 0x0f);
@@ -3466,7 +3468,7 @@ after_iport_check:
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
spin_lock(&dev->dev_reservation_lock);
@@ -3528,8 +3530,6 @@ after_iport_check:
core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
- transport_kunmap_data_sg(cmd);
-
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
@@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
put_unaligned_be32(desc_len, &buf[off]);
+ off += 4;
/*
* Size of full desctipor header minus TransportID
* containing $FABRIC_MOD specific) initiator device/port
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 772f9148e75c..198fad5c89dc 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_PR_H
#define TARGET_CORE_PR_H
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index b86fb0e1b783..e8458b5e85c9 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_PSCSI_H
#define TARGET_CORE_PSCSI_H
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 91fc1a34791d..8b88f9b14c3f 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_RD_H
#define TARGET_CORE_RD_H
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 8038255b21e8..f0db91ebd735 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -96,7 +96,7 @@ static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_dev_cit = {
+static const struct config_item_type target_stat_scsi_dev_cit = {
.ct_attrs = target_stat_scsi_dev_attrs,
.ct_owner = THIS_MODULE,
};
@@ -193,7 +193,7 @@ static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_tgt_dev_cit = {
+static const struct config_item_type target_stat_scsi_tgt_dev_cit = {
.ct_attrs = target_stat_scsi_tgt_dev_attrs,
.ct_owner = THIS_MODULE,
};
@@ -414,7 +414,7 @@ static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_lu_cit = {
+static const struct config_item_type target_stat_scsi_lu_cit = {
.ct_attrs = target_stat_scsi_lu_attrs,
.ct_owner = THIS_MODULE,
};
@@ -540,7 +540,7 @@ static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_port_cit = {
+static const struct config_item_type target_stat_scsi_port_cit = {
.ct_attrs = target_stat_scsi_port_attrs,
.ct_owner = THIS_MODULE,
};
@@ -724,7 +724,7 @@ static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_tgt_port_cit = {
+static const struct config_item_type target_stat_scsi_tgt_port_cit = {
.ct_attrs = target_stat_scsi_tgt_port_attrs,
.ct_owner = THIS_MODULE,
};
@@ -844,7 +844,7 @@ static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_transport_cit = {
+static const struct config_item_type target_stat_scsi_transport_cit = {
.ct_attrs = target_stat_scsi_transport_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1206,7 +1206,7 @@ static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_auth_intr_cit = {
+static const struct config_item_type target_stat_scsi_auth_intr_cit = {
.ct_attrs = target_stat_scsi_auth_intr_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1378,7 +1378,7 @@ static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
NULL,
};
-static struct config_item_type target_stat_scsi_att_intr_port_cit = {
+static const struct config_item_type target_stat_scsi_att_intr_port_cit = {
.ct_attrs = target_stat_scsi_ath_intr_port_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index e22847bd79b9..9c7bc1ca341a 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
spin_unlock(&se_cmd->t_state_lock);
return false;
}
+ if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
+ if (se_cmd->scsi_status) {
+ pr_debug("Attempted to abort io tag: %llu early failure"
+ " status: 0x%02x\n", se_cmd->tag,
+ se_cmd->scsi_status);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ }
if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
@@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del_init(&tmr->tmr_list);
+ if (tmr)
+ list_del_init(&tmr->tmr_list);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
cmd = tmr_p->task_cmd;
if (!cmd) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 836d552b0385..58caacd54a3b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -67,7 +67,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
-static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
@@ -668,7 +667,7 @@ int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
if (transport_cmd_check_stop_to_fabric(cmd))
return 1;
if (remove && ack_kref)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
return ret;
}
@@ -1730,9 +1729,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
{
int ret = 0, post_ret = 0;
- if (transport_check_aborted_status(cmd, 1))
- return;
-
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
target_show_cmd("-----[ ", cmd);
@@ -1741,6 +1737,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* For SAM Task Attribute emulation for failed struct se_cmd
*/
transport_complete_task_attr(cmd);
+
/*
* Handle special case for COMPARE_AND_WRITE failure, where the
* callback is expected to drop the per device ->caw_sem.
@@ -1749,6 +1746,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
+ if (transport_check_aborted_status(cmd, 1))
+ return;
+
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
@@ -1772,8 +1772,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
break;
case TCM_OUT_OF_RESOURCES:
- sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
+ cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ goto queue_status;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
@@ -1795,11 +1795,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
}
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo->queue_status(cmd);
- if (ret)
- goto queue_full;
- goto check_stop;
+
+ goto queue_status;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0], sense_reason);
@@ -1816,6 +1813,11 @@ check_stop:
transport_cmd_check_stop_to_fabric(cmd);
return;
+queue_status:
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (!ret)
+ goto check_stop;
queue_full:
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
@@ -1973,6 +1975,7 @@ void target_execute_cmd(struct se_cmd *cmd)
}
cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
@@ -2010,6 +2013,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
+ cmd->transport_state |= CMD_T_SENT;
+
__target_execute_cmd(cmd, true);
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2045,6 +2050,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
+ cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
restart:
target_restart_delayed_cmds(dev);
}
@@ -2090,7 +2097,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
ret = cmd->se_tfo->queue_data_in(cmd);
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
@@ -2268,7 +2275,7 @@ queue_rsp:
goto queue_full;
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
@@ -2352,22 +2359,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
cmd->t_bidi_data_nents = 0;
}
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd: command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
- /*
- * If this cmd has been setup with target_get_sess_cmd(), drop
- * the kref and call ->release_cmd() in kref callback.
- */
- return target_put_sess_cmd(cmd);
-}
-
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@@ -2570,7 +2561,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
static void transport_write_pending_qf(struct se_cmd *cmd)
{
+ unsigned long flags;
int ret;
+ bool stop;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (stop) {
+ pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
+ complete_all(&cmd->t_transport_stop_comp);
+ return;
+ }
ret = cmd->se_tfo->write_pending(cmd);
if (ret) {
@@ -2603,7 +2607,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
} else {
if (wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
@@ -2619,7 +2623,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
transport_lun_remove_cmd(cmd);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
}
/*
* If the task has been internally aborted due to TMR ABORT_TASK
@@ -2664,6 +2668,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
ret = -ESHUTDOWN;
goto out;
}
+ se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -3145,6 +3150,21 @@ static const struct sense_info sense_info_table[] = {
.key = NOT_READY,
.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
},
+ [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
+ /*
+ * From spc4r22 section5.7.7,5.7.8
+ * If a PERSISTENT RESERVE OUT command with a REGISTER service action
+ * or a REGISTER AND IGNORE EXISTING KEY service action or
+ * REGISTER AND MOVE service actionis attempted,
+ * but there are insufficient device server resources to complete the
+ * operation, then the command shall be terminated with CHECK CONDITION
+ * status, with the sense key set to ILLEGAL REQUEST,and the additonal
+ * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
+ */
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x55,
+ .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
+ },
};
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 97402856a8f0..b0f4205a96cd 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_UA_H
#define TARGET_CORE_UA_H
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 942d094269fb..a415d87f22d2 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -150,6 +150,8 @@ struct tcmu_dev {
wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
+
+ int nl_reply_supported;
};
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
@@ -430,7 +432,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- int cmd_id;
tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
if (!tcmu_cmd)
@@ -438,9 +439,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- if (udev->cmd_time_out)
- tcmu_cmd->deadline = jiffies +
- msecs_to_jiffies(udev->cmd_time_out);
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
@@ -451,19 +449,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
return NULL;
}
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&udev->commands_lock);
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
- USHRT_MAX, GFP_NOWAIT);
- spin_unlock_irq(&udev->commands_lock);
- idr_preload_end();
-
- if (cmd_id < 0) {
- tcmu_free_cmd(tcmu_cmd);
- return NULL;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
return tcmu_cmd;
}
@@ -746,6 +731,30 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
+{
+ struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ unsigned long tmo = udev->cmd_time_out;
+ int cmd_id;
+
+ if (tcmu_cmd->cmd_id)
+ return 0;
+
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ return cmd_id;
+ }
+ tcmu_cmd->cmd_id = cmd_id;
+
+ if (!tmo)
+ return 0;
+
+ tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
+ mod_timer(&udev->timeout, tcmu_cmd->deadline);
+ return 0;
+}
+
static sense_reason_t
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
@@ -839,7 +848,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry = (void *) mb + CMDR_OFF + cmd_head;
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
- entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/* Handle allocating space from the data area */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -877,6 +885,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
}
entry->req.iov_bidi_cnt = iov_cnt;
+ ret = tcmu_setup_cmd_timer(tcmu_cmd);
+ if (ret) {
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ mutex_unlock(&udev->cmdr_lock);
+ return TCM_OUT_OF_RESOURCES;
+ }
+ entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+
/*
* Recalaulate the command's base size and size according
* to the actual needs
@@ -910,8 +926,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
{
- struct se_device *se_dev = se_cmd->se_dev;
- struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
sense_reason_t ret;
@@ -922,9 +936,6 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
ret = tcmu_queue_cmd_ring(tcmu_cmd);
if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
- spin_lock_irq(&udev->commands_lock);
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
- spin_unlock_irq(&udev->commands_lock);
tcmu_free_cmd(tcmu_cmd);
}
@@ -985,7 +996,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
mb = udev->mb_addr;
tcmu_flush_dcache_range(mb, sizeof(*mb));
- while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
+ while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
struct tcmu_cmd *cmd;
@@ -1044,9 +1055,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
return 0;
}
-static void tcmu_device_timedout(unsigned long data)
+static void tcmu_device_timedout(struct timer_list *t)
{
- struct tcmu_dev *udev = (struct tcmu_dev *)data;
+ struct tcmu_dev *udev = from_timer(udev, t, timeout);
unsigned long flags;
spin_lock_irqsave(&udev->commands_lock, flags);
@@ -1106,12 +1117,13 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
idr_init(&udev->commands);
spin_lock_init(&udev->commands_lock);
- setup_timer(&udev->timeout, tcmu_device_timedout,
- (unsigned long)udev);
+ timer_setup(&udev->timeout, tcmu_device_timedout, 0);
init_waitqueue_head(&udev->nl_cmd_wq);
spin_lock_init(&udev->nl_cmd_lock);
+ INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+
return &udev->se_dev;
}
@@ -1280,10 +1292,54 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
kfree(udev);
}
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void tcmu_blocks_release(struct tcmu_dev *udev)
+{
+ int i;
+ struct page *page;
+
+ /* Try to release all block pages */
+ mutex_lock(&udev->cmdr_lock);
+ for (i = 0; i <= udev->dbi_max; i++) {
+ page = radix_tree_delete(&udev->data_blocks, i);
+ if (page) {
+ __free_page(page);
+ atomic_dec(&global_db_count);
+ }
+ }
+ mutex_unlock(&udev->cmdr_lock);
+}
+
static void tcmu_dev_kref_release(struct kref *kref)
{
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
struct se_device *dev = &udev->se_dev;
+ struct tcmu_cmd *cmd;
+ bool all_expired = true;
+ int i;
+
+ vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
+
+ /* Upper layer should drain all requests before calling this */
+ spin_lock_irq(&udev->commands_lock);
+ idr_for_each_entry(&udev->commands, cmd, i) {
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+ all_expired = false;
+ }
+ idr_destroy(&udev->commands);
+ spin_unlock_irq(&udev->commands_lock);
+ WARN_ON(!all_expired);
+
+ tcmu_blocks_release(udev);
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}
@@ -1306,6 +1362,10 @@ static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
if (!tcmu_kern_cmd_reply_supported)
return;
+
+ if (udev->nl_reply_supported <= 0)
+ return;
+
relock:
spin_lock(&udev->nl_cmd_lock);
@@ -1332,6 +1392,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
if (!tcmu_kern_cmd_reply_supported)
return 0;
+ if (udev->nl_reply_supported <= 0)
+ return 0;
+
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
@@ -1476,8 +1539,6 @@ static int tcmu_configure_device(struct se_device *dev)
WARN_ON(udev->data_size % PAGE_SIZE);
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
- INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
-
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
@@ -1506,6 +1567,12 @@ static int tcmu_configure_device(struct se_device *dev)
dev->dev_attrib.emulate_write_cache = 0;
dev->dev_attrib.hw_queue_depth = 128;
+ /* If user didn't explicitly disable netlink reply support, use
+ * module scope setting.
+ */
+ if (udev->nl_reply_supported >= 0)
+ udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
+
/*
* Get a ref incase userspace does a close on the uio device before
* LIO has initiated tcmu_free_device.
@@ -1527,6 +1594,7 @@ err_netlink:
uio_unregister_device(&udev->uio_info);
err_register:
vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
err_vzalloc:
kfree(info->name);
info->name = NULL;
@@ -1534,37 +1602,11 @@ err_vzalloc:
return ret;
}
-static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
-{
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
- kmem_cache_free(tcmu_cmd_cache, cmd);
- return 0;
- }
- return -EINVAL;
-}
-
static bool tcmu_dev_configured(struct tcmu_dev *udev)
{
return udev->uio_info.uio_dev ? true : false;
}
-static void tcmu_blocks_release(struct tcmu_dev *udev)
-{
- int i;
- struct page *page;
-
- /* Try to release all block pages */
- mutex_lock(&udev->cmdr_lock);
- for (i = 0; i <= udev->dbi_max; i++) {
- page = radix_tree_delete(&udev->data_blocks, i);
- if (page) {
- __free_page(page);
- atomic_dec(&global_db_count);
- }
- }
- mutex_unlock(&udev->cmdr_lock);
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1576,9 +1618,6 @@ static void tcmu_free_device(struct se_device *dev)
static void tcmu_destroy_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- struct tcmu_cmd *cmd;
- bool all_expired = true;
- int i;
del_timer_sync(&udev->timeout);
@@ -1586,20 +1625,6 @@ static void tcmu_destroy_device(struct se_device *dev)
list_del(&udev->node);
mutex_unlock(&root_udev_mutex);
- vfree(udev->mb_addr);
-
- /* Upper layer should drain all requests before calling this */
- spin_lock_irq(&udev->commands_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
- if (tcmu_check_and_free_pending_cmd(cmd) != 0)
- all_expired = false;
- }
- idr_destroy(&udev->commands);
- spin_unlock_irq(&udev->commands_lock);
- WARN_ON(!all_expired);
-
- tcmu_blocks_release(udev);
-
tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
uio_unregister_device(&udev->uio_info);
@@ -1610,7 +1635,7 @@ static void tcmu_destroy_device(struct se_device *dev)
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
- Opt_err,
+ Opt_nl_reply_supported, Opt_err,
};
static match_table_t tokens = {
@@ -1618,6 +1643,7 @@ static match_table_t tokens = {
{Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"},
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_err, NULL}
};
@@ -1692,6 +1718,17 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
ret = tcmu_set_dev_attrib(&args[0],
&(dev->dev_attrib.hw_max_sectors));
break;
+ case Opt_nl_reply_supported:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
+ kfree(arg_p);
+ if (ret < 0)
+ pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ break;
default:
break;
}
@@ -1734,8 +1771,7 @@ static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
- struct tcmu_dev *udev = container_of(da->da_dev,
- struct tcmu_dev, se_dev);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
}
@@ -1842,6 +1878,34 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
}
CONFIGFS_ATTR(tcmu_, dev_size);
+static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
+}
+
+static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ s8 val;
+ int ret;
+
+ ret = kstrtos8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ udev->nl_reply_supported = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, nl_reply_supported);
+
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
char *page)
{
@@ -1884,6 +1948,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
+ &tcmu_attr_nl_reply_supported,
NULL,
};
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 7c0b105cbe1b..26ba4c3c9cff 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <target/target_core_base.h>
#define XCOPY_HDR_LEN 16
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
index 20b14bb087c9..a7d1593ab5af 100644
--- a/drivers/target/tcm_fc/Makefile
+++ b/drivers/target/tcm_fc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
tcm_fc-y += tfc_cmd.o \
tfc_conf.o \
tfc_io.o \
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
index 7a4e4a1ac39c..21f51fd88b07 100644
--- a/drivers/tee/Makefile
+++ b/drivers/tee/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEE) += tee.o
tee-objs += tee_core.o
tee-objs += tee_shm.o
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 92fe5789bcce..d526fb88d9c5 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_OPTEE) += optee.o
optee-objs += core.o
optee-objs += call.o
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 07002df4f83a..315ae2926e20 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -206,6 +206,7 @@ config HISI_THERMAL
config IMX_THERMAL
tristate "Temperature sensor driver for Freescale i.MX SoCs"
depends on (ARCH_MXC && CPU_THERMAL) || COMPILE_TEST
+ depends on NVMEM || !NVMEM
depends on MFD_SYSCON
depends on OF
help
@@ -408,7 +409,7 @@ config MTK_THERMAL
controller present in Mediatek SoCs
menu "Broadcom thermal drivers"
-depends on ARCH_BCM || COMPILE_TEST
+depends on ARCH_BCM || ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST
source "drivers/thermal/broadcom/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 8b79bca23536..610344eb3e03 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for sensor chip drivers.
#
@@ -54,7 +55,7 @@ obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
obj-$(CONFIG_QCOM_TSENS) += qcom/
-obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/
+obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index ae75328945f7..706d74798cbe 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -58,7 +58,7 @@ struct armada_thermal_data {
/* Test for a valid sensor value (optional) */
bool (*is_valid)(struct armada_thermal_priv *);
- /* Formula coeficients: temp = (b + m * reg) / div */
+ /* Formula coeficients: temp = (b - m * reg) / div */
unsigned long coef_b;
unsigned long coef_m;
unsigned long coef_div;
diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig
index 42c098e86f84..c106a15bf7f9 100644
--- a/drivers/thermal/broadcom/Kconfig
+++ b/drivers/thermal/broadcom/Kconfig
@@ -6,6 +6,13 @@ config BCM2835_THERMAL
help
Support for thermal sensors on Broadcom bcm2835 SoCs.
+config BRCMSTB_THERMAL
+ tristate "Broadcom STB AVS TMON thermal driver"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ help
+ Enable this driver if you have a Broadcom STB SoC and would like
+ thermal framework support.
+
config BCM_NS_THERMAL
tristate "Northstar thermal driver"
depends on ARCH_BCM_IPROC || COMPILE_TEST
diff --git a/drivers/thermal/broadcom/Makefile b/drivers/thermal/broadcom/Makefile
index c6f62e4fd0ee..fae10ecafaef 100644
--- a/drivers/thermal/broadcom/Makefile
+++ b/drivers/thermal/broadcom/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_BCM2835_THERMAL) += bcm2835_thermal.o
+obj-$(CONFIG_BRCMSTB_THERMAL) += brcmstb_thermal.o
obj-$(CONFIG_BCM_NS_THERMAL) += ns-thermal.o
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
new file mode 100644
index 000000000000..1919f91fa756
--- /dev/null
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -0,0 +1,387 @@
+/*
+ * Broadcom STB AVS TMON thermal sensor driver
+ *
+ * Copyright (c) 2015-2017 Broadcom
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DRV_NAME "brcmstb_thermal"
+
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/thermal.h>
+
+#define AVS_TMON_STATUS 0x00
+ #define AVS_TMON_STATUS_valid_msk BIT(11)
+ #define AVS_TMON_STATUS_data_msk GENMASK(10, 1)
+ #define AVS_TMON_STATUS_data_shift 1
+
+#define AVS_TMON_EN_OVERTEMP_RESET 0x04
+ #define AVS_TMON_EN_OVERTEMP_RESET_msk BIT(0)
+
+#define AVS_TMON_RESET_THRESH 0x08
+ #define AVS_TMON_RESET_THRESH_msk GENMASK(10, 1)
+ #define AVS_TMON_RESET_THRESH_shift 1
+
+#define AVS_TMON_INT_IDLE_TIME 0x10
+
+#define AVS_TMON_EN_TEMP_INT_SRCS 0x14
+ #define AVS_TMON_EN_TEMP_INT_SRCS_high BIT(1)
+ #define AVS_TMON_EN_TEMP_INT_SRCS_low BIT(0)
+
+#define AVS_TMON_INT_THRESH 0x18
+ #define AVS_TMON_INT_THRESH_high_msk GENMASK(26, 17)
+ #define AVS_TMON_INT_THRESH_high_shift 17
+ #define AVS_TMON_INT_THRESH_low_msk GENMASK(10, 1)
+ #define AVS_TMON_INT_THRESH_low_shift 1
+
+#define AVS_TMON_TEMP_INT_CODE 0x1c
+#define AVS_TMON_TP_TEST_ENABLE 0x20
+
+/* Default coefficients */
+#define AVS_TMON_TEMP_SLOPE -487
+#define AVS_TMON_TEMP_OFFSET 410040
+
+/* HW related temperature constants */
+#define AVS_TMON_TEMP_MAX 0x3ff
+#define AVS_TMON_TEMP_MIN -88161
+#define AVS_TMON_TEMP_MASK AVS_TMON_TEMP_MAX
+
+enum avs_tmon_trip_type {
+ TMON_TRIP_TYPE_LOW = 0,
+ TMON_TRIP_TYPE_HIGH,
+ TMON_TRIP_TYPE_RESET,
+ TMON_TRIP_TYPE_MAX,
+};
+
+struct avs_tmon_trip {
+ /* HW bit to enable the trip */
+ u32 enable_offs;
+ u32 enable_mask;
+
+ /* HW field to read the trip temperature */
+ u32 reg_offs;
+ u32 reg_msk;
+ int reg_shift;
+};
+
+static struct avs_tmon_trip avs_tmon_trips[] = {
+ /* Trips when temperature is below threshold */
+ [TMON_TRIP_TYPE_LOW] = {
+ .enable_offs = AVS_TMON_EN_TEMP_INT_SRCS,
+ .enable_mask = AVS_TMON_EN_TEMP_INT_SRCS_low,
+ .reg_offs = AVS_TMON_INT_THRESH,
+ .reg_msk = AVS_TMON_INT_THRESH_low_msk,
+ .reg_shift = AVS_TMON_INT_THRESH_low_shift,
+ },
+ /* Trips when temperature is above threshold */
+ [TMON_TRIP_TYPE_HIGH] = {
+ .enable_offs = AVS_TMON_EN_TEMP_INT_SRCS,
+ .enable_mask = AVS_TMON_EN_TEMP_INT_SRCS_high,
+ .reg_offs = AVS_TMON_INT_THRESH,
+ .reg_msk = AVS_TMON_INT_THRESH_high_msk,
+ .reg_shift = AVS_TMON_INT_THRESH_high_shift,
+ },
+ /* Automatically resets chip when above threshold */
+ [TMON_TRIP_TYPE_RESET] = {
+ .enable_offs = AVS_TMON_EN_OVERTEMP_RESET,
+ .enable_mask = AVS_TMON_EN_OVERTEMP_RESET_msk,
+ .reg_offs = AVS_TMON_RESET_THRESH,
+ .reg_msk = AVS_TMON_RESET_THRESH_msk,
+ .reg_shift = AVS_TMON_RESET_THRESH_shift,
+ },
+};
+
+struct brcmstb_thermal_priv {
+ void __iomem *tmon_base;
+ struct device *dev;
+ struct thermal_zone_device *thermal;
+};
+
+static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
+ int *offset)
+{
+ *slope = thermal_zone_get_slope(tz);
+ *offset = thermal_zone_get_offset(tz);
+}
+
+/* Convert a HW code to a temperature reading (millidegree celsius) */
+static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
+ u32 code)
+{
+ const int val = code & AVS_TMON_TEMP_MASK;
+ int slope, offset;
+
+ avs_tmon_get_coeffs(tz, &slope, &offset);
+
+ return slope * val + offset;
+}
+
+/*
+ * Convert a temperature value (millidegree celsius) to a HW code
+ *
+ * @temp: temperature to convert
+ * @low: if true, round toward the low side
+ */
+static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
+ int temp, bool low)
+{
+ int slope, offset;
+
+ if (temp < AVS_TMON_TEMP_MIN)
+ return AVS_TMON_TEMP_MAX; /* Maximum code value */
+
+ avs_tmon_get_coeffs(tz, &slope, &offset);
+
+ if (temp >= offset)
+ return 0; /* Minimum code value */
+
+ if (low)
+ return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
+ else
+ return (u32)((offset - temp) / abs(slope));
+}
+
+static int brcmstb_get_temp(void *data, int *temp)
+{
+ struct brcmstb_thermal_priv *priv = data;
+ u32 val;
+ long t;
+
+ val = __raw_readl(priv->tmon_base + AVS_TMON_STATUS);
+
+ if (!(val & AVS_TMON_STATUS_valid_msk)) {
+ dev_err(priv->dev, "reading not valid\n");
+ return -EIO;
+ }
+
+ val = (val & AVS_TMON_STATUS_data_msk) >> AVS_TMON_STATUS_data_shift;
+
+ t = avs_tmon_code_to_temp(priv->thermal, val);
+ if (t < 0)
+ *temp = 0;
+ else
+ *temp = t;
+
+ return 0;
+}
+
+static void avs_tmon_trip_enable(struct brcmstb_thermal_priv *priv,
+ enum avs_tmon_trip_type type, int en)
+{
+ struct avs_tmon_trip *trip = &avs_tmon_trips[type];
+ u32 val = __raw_readl(priv->tmon_base + trip->enable_offs);
+
+ dev_dbg(priv->dev, "%sable trip, type %d\n", en ? "en" : "dis", type);
+
+ if (en)
+ val |= trip->enable_mask;
+ else
+ val &= ~trip->enable_mask;
+
+ __raw_writel(val, priv->tmon_base + trip->enable_offs);
+}
+
+static int avs_tmon_get_trip_temp(struct brcmstb_thermal_priv *priv,
+ enum avs_tmon_trip_type type)
+{
+ struct avs_tmon_trip *trip = &avs_tmon_trips[type];
+ u32 val = __raw_readl(priv->tmon_base + trip->reg_offs);
+
+ val &= trip->reg_msk;
+ val >>= trip->reg_shift;
+
+ return avs_tmon_code_to_temp(priv->thermal, val);
+}
+
+static void avs_tmon_set_trip_temp(struct brcmstb_thermal_priv *priv,
+ enum avs_tmon_trip_type type,
+ int temp)
+{
+ struct avs_tmon_trip *trip = &avs_tmon_trips[type];
+ u32 val, orig;
+
+ dev_dbg(priv->dev, "set temp %d to %d\n", type, temp);
+
+ /* round toward low temp for the low interrupt */
+ val = avs_tmon_temp_to_code(priv->thermal, temp,
+ type == TMON_TRIP_TYPE_LOW);
+
+ val <<= trip->reg_shift;
+ val &= trip->reg_msk;
+
+ orig = __raw_readl(priv->tmon_base + trip->reg_offs);
+ orig &= ~trip->reg_msk;
+ orig |= val;
+ __raw_writel(orig, priv->tmon_base + trip->reg_offs);
+}
+
+static int avs_tmon_get_intr_temp(struct brcmstb_thermal_priv *priv)
+{
+ u32 val;
+
+ val = __raw_readl(priv->tmon_base + AVS_TMON_TEMP_INT_CODE);
+ return avs_tmon_code_to_temp(priv->thermal, val);
+}
+
+static irqreturn_t brcmstb_tmon_irq_thread(int irq, void *data)
+{
+ struct brcmstb_thermal_priv *priv = data;
+ int low, high, intr;
+
+ low = avs_tmon_get_trip_temp(priv, TMON_TRIP_TYPE_LOW);
+ high = avs_tmon_get_trip_temp(priv, TMON_TRIP_TYPE_HIGH);
+ intr = avs_tmon_get_intr_temp(priv);
+
+ dev_dbg(priv->dev, "low/intr/high: %d/%d/%d\n",
+ low, intr, high);
+
+ /* Disable high-temp until next threshold shift */
+ if (intr >= high)
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_HIGH, 0);
+ /* Disable low-temp until next threshold shift */
+ if (intr <= low)
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_LOW, 0);
+
+ /*
+ * Notify using the interrupt temperature, in case the temperature
+ * changes before it can next be read out
+ */
+ thermal_zone_device_update(priv->thermal, intr);
+
+ return IRQ_HANDLED;
+}
+
+static int brcmstb_set_trips(void *data, int low, int high)
+{
+ struct brcmstb_thermal_priv *priv = data;
+
+ dev_dbg(priv->dev, "set trips %d <--> %d\n", low, high);
+
+ /*
+ * Disable low-temp if "low" is too small. As per thermal framework
+ * API, we use -INT_MAX rather than INT_MIN.
+ */
+ if (low <= -INT_MAX) {
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_LOW, 0);
+ } else {
+ avs_tmon_set_trip_temp(priv, TMON_TRIP_TYPE_LOW, low);
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_LOW, 1);
+ }
+
+ /* Disable high-temp if "high" is too big. */
+ if (high == INT_MAX) {
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_HIGH, 0);
+ } else {
+ avs_tmon_set_trip_temp(priv, TMON_TRIP_TYPE_HIGH, high);
+ avs_tmon_trip_enable(priv, TMON_TRIP_TYPE_HIGH, 1);
+ }
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops of_ops = {
+ .get_temp = brcmstb_get_temp,
+ .set_trips = brcmstb_set_trips,
+};
+
+static const struct of_device_id brcmstb_thermal_id_table[] = {
+ { .compatible = "brcm,avs-tmon" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmstb_thermal_id_table);
+
+static int brcmstb_thermal_probe(struct platform_device *pdev)
+{
+ struct thermal_zone_device *thermal;
+ struct brcmstb_thermal_priv *priv;
+ struct resource *res;
+ int irq, ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->tmon_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->tmon_base))
+ return PTR_ERR(priv->tmon_base);
+
+ priv->dev = &pdev->dev;
+ platform_set_drvdata(pdev, priv);
+
+ thermal = thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &of_ops);
+ if (IS_ERR(thermal)) {
+ ret = PTR_ERR(thermal);
+ dev_err(&pdev->dev, "could not register sensor: %d\n", ret);
+ return ret;
+ }
+
+ priv->thermal = thermal;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "could not get IRQ\n");
+ ret = irq;
+ goto err;
+ }
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ brcmstb_tmon_irq_thread, IRQF_ONESHOT,
+ DRV_NAME, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", ret);
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "registered AVS TMON of-sensor driver\n");
+
+ return 0;
+
+err:
+ thermal_zone_of_sensor_unregister(&pdev->dev, thermal);
+ return ret;
+}
+
+static int brcmstb_thermal_exit(struct platform_device *pdev)
+{
+ struct brcmstb_thermal_priv *priv = platform_get_drvdata(pdev);
+ struct thermal_zone_device *thermal = priv->thermal;
+
+ if (thermal)
+ thermal_zone_of_sensor_unregister(&pdev->dev, priv->thermal);
+
+ return 0;
+}
+
+static struct platform_driver brcmstb_thermal_driver = {
+ .probe = brcmstb_thermal_probe,
+ .remove = brcmstb_thermal_exit,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = brcmstb_thermal_id_table,
+ },
+};
+module_platform_driver(brcmstb_thermal_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("Broadcom STB AVS TMON thermal driver");
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 908a8014cf76..dc63aba092e4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -696,7 +696,7 @@ __cpufreq_cooling_register(struct device_node *np,
bool first;
if (IS_ERR_OR_NULL(policy)) {
- pr_err("%s: cpufreq policy isn't valid: %p", __func__, policy);
+ pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index bd3572c41585..2d855a96cdd9 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -23,222 +23,450 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/of_device.h>
#include "thermal_core.h"
-#define TEMP0_TH (0x4)
-#define TEMP0_RST_TH (0x8)
-#define TEMP0_CFG (0xC)
-#define TEMP0_EN (0x10)
-#define TEMP0_INT_EN (0x14)
-#define TEMP0_INT_CLR (0x18)
-#define TEMP0_RST_MSK (0x1C)
-#define TEMP0_VALUE (0x28)
-
-#define HISI_TEMP_BASE (-60)
-#define HISI_TEMP_RESET (100000)
-
-#define HISI_MAX_SENSORS 4
+#define HI6220_TEMP0_LAG (0x0)
+#define HI6220_TEMP0_TH (0x4)
+#define HI6220_TEMP0_RST_TH (0x8)
+#define HI6220_TEMP0_CFG (0xC)
+#define HI6220_TEMP0_CFG_SS_MSK (0xF000)
+#define HI6220_TEMP0_CFG_HDAK_MSK (0x30)
+#define HI6220_TEMP0_EN (0x10)
+#define HI6220_TEMP0_INT_EN (0x14)
+#define HI6220_TEMP0_INT_CLR (0x18)
+#define HI6220_TEMP0_RST_MSK (0x1C)
+#define HI6220_TEMP0_VALUE (0x28)
+
+#define HI3660_OFFSET(chan) ((chan) * 0x40)
+#define HI3660_TEMP(chan) (HI3660_OFFSET(chan) + 0x1C)
+#define HI3660_TH(chan) (HI3660_OFFSET(chan) + 0x20)
+#define HI3660_LAG(chan) (HI3660_OFFSET(chan) + 0x28)
+#define HI3660_INT_EN(chan) (HI3660_OFFSET(chan) + 0x2C)
+#define HI3660_INT_CLR(chan) (HI3660_OFFSET(chan) + 0x30)
+
+#define HI6220_TEMP_BASE (-60000)
+#define HI6220_TEMP_RESET (100000)
+#define HI6220_TEMP_STEP (785)
+#define HI6220_TEMP_LAG (3500)
+
+#define HI3660_TEMP_BASE (-63780)
+#define HI3660_TEMP_STEP (205)
+#define HI3660_TEMP_LAG (4000)
+
+#define HI6220_DEFAULT_SENSOR 2
+#define HI3660_DEFAULT_SENSOR 1
struct hisi_thermal_sensor {
- struct hisi_thermal_data *thermal;
struct thermal_zone_device *tzd;
-
- long sensor_temp;
uint32_t id;
uint32_t thres_temp;
};
struct hisi_thermal_data {
- struct mutex thermal_lock; /* protects register data */
+ int (*get_temp)(struct hisi_thermal_data *data);
+ int (*enable_sensor)(struct hisi_thermal_data *data);
+ int (*disable_sensor)(struct hisi_thermal_data *data);
+ int (*irq_handler)(struct hisi_thermal_data *data);
struct platform_device *pdev;
struct clk *clk;
- struct hisi_thermal_sensor sensors[HISI_MAX_SENSORS];
-
- int irq, irq_bind_sensor;
- bool irq_enabled;
-
+ struct hisi_thermal_sensor sensor;
void __iomem *regs;
+ int irq;
};
-/* in millicelsius */
-static inline int _step_to_temp(int step)
+/*
+ * The temperature computation on the tsensor is as follow:
+ * Unit: millidegree Celsius
+ * Step: 200/255 (0.7843)
+ * Temperature base: -60°C
+ *
+ * The register is programmed in temperature steps, every step is 785
+ * millidegree and begins at -60 000 m°C
+ *
+ * The temperature from the steps:
+ *
+ * Temp = TempBase + (steps x 785)
+ *
+ * and the steps from the temperature:
+ *
+ * steps = (Temp - TempBase) / 785
+ *
+ */
+static inline int hi6220_thermal_step_to_temp(int step)
{
- /*
- * Every step equals (1 * 200) / 255 celsius, and finally
- * need convert to millicelsius.
- */
- return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
+ return HI6220_TEMP_BASE + (step * HI6220_TEMP_STEP);
}
-static inline long _temp_to_step(long temp)
+static inline int hi6220_thermal_temp_to_step(int temp)
{
- return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
+ return DIV_ROUND_UP(temp - HI6220_TEMP_BASE, HI6220_TEMP_STEP);
}
-static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
- struct hisi_thermal_sensor *sensor)
+/*
+ * for Hi3660,
+ * Step: 189/922 (0.205)
+ * Temperature base: -63.780°C
+ *
+ * The register is programmed in temperature steps, every step is 205
+ * millidegree and begins at -63 780 m°C
+ */
+static inline int hi3660_thermal_step_to_temp(int step)
{
- long val;
+ return HI3660_TEMP_BASE + step * HI3660_TEMP_STEP;
+}
- mutex_lock(&data->thermal_lock);
+static inline int hi3660_thermal_temp_to_step(int temp)
+{
+ return DIV_ROUND_UP(temp - HI3660_TEMP_BASE, HI3660_TEMP_STEP);
+}
- /* disable interrupt */
- writel(0x0, data->regs + TEMP0_INT_EN);
- writel(0x1, data->regs + TEMP0_INT_CLR);
+/*
+ * The lag register contains 5 bits encoding the temperature in steps.
+ *
+ * Each time the temperature crosses the threshold boundary, an
+ * interrupt is raised. It could be when the temperature is going
+ * above the threshold or below. However, if the temperature is
+ * fluctuating around this value due to the load, we can receive
+ * several interrupts which may not desired.
+ *
+ * We can setup a temperature representing the delta between the
+ * threshold and the current temperature when the temperature is
+ * decreasing.
+ *
+ * For instance: the lag register is 5°C, the threshold is 65°C, when
+ * the temperature reaches 65°C an interrupt is raised and when the
+ * temperature decrease to 65°C - 5°C another interrupt is raised.
+ *
+ * A very short lag can lead to an interrupt storm, a long lag
+ * increase the latency to react to the temperature changes. In our
+ * case, that is not really a problem as we are polling the
+ * temperature.
+ *
+ * [0:4] : lag register
+ *
+ * The temperature is coded in steps, cf. HI6220_TEMP_STEP.
+ *
+ * Min : 0x00 : 0.0 °C
+ * Max : 0x1F : 24.3 °C
+ *
+ * The 'value' parameter is in milliCelsius.
+ */
+static inline void hi6220_thermal_set_lag(void __iomem *addr, int value)
+{
+ writel(DIV_ROUND_UP(value, HI6220_TEMP_STEP) & 0x1F,
+ addr + HI6220_TEMP0_LAG);
+}
- /* disable module firstly */
- writel(0x0, data->regs + TEMP0_EN);
+static inline void hi6220_thermal_alarm_clear(void __iomem *addr, int value)
+{
+ writel(value, addr + HI6220_TEMP0_INT_CLR);
+}
- /* select sensor id */
- writel((sensor->id << 12), data->regs + TEMP0_CFG);
+static inline void hi6220_thermal_alarm_enable(void __iomem *addr, int value)
+{
+ writel(value, addr + HI6220_TEMP0_INT_EN);
+}
- /* enable module */
- writel(0x1, data->regs + TEMP0_EN);
+static inline void hi6220_thermal_alarm_set(void __iomem *addr, int temp)
+{
+ writel(hi6220_thermal_temp_to_step(temp) | 0x0FFFFFF00,
+ addr + HI6220_TEMP0_TH);
+}
- usleep_range(3000, 5000);
+static inline void hi6220_thermal_reset_set(void __iomem *addr, int temp)
+{
+ writel(hi6220_thermal_temp_to_step(temp), addr + HI6220_TEMP0_RST_TH);
+}
- val = readl(data->regs + TEMP0_VALUE);
- val = _step_to_temp(val);
+static inline void hi6220_thermal_reset_enable(void __iomem *addr, int value)
+{
+ writel(value, addr + HI6220_TEMP0_RST_MSK);
+}
- mutex_unlock(&data->thermal_lock);
+static inline void hi6220_thermal_enable(void __iomem *addr, int value)
+{
+ writel(value, addr + HI6220_TEMP0_EN);
+}
- return val;
+static inline int hi6220_thermal_get_temperature(void __iomem *addr)
+{
+ return hi6220_thermal_step_to_temp(readl(addr + HI6220_TEMP0_VALUE));
}
-static void hisi_thermal_enable_bind_irq_sensor
- (struct hisi_thermal_data *data)
+/*
+ * [0:6] lag register
+ *
+ * The temperature is coded in steps, cf. HI3660_TEMP_STEP.
+ *
+ * Min : 0x00 : 0.0 °C
+ * Max : 0x7F : 26.0 °C
+ *
+ */
+static inline void hi3660_thermal_set_lag(void __iomem *addr,
+ int id, int value)
{
- struct hisi_thermal_sensor *sensor;
+ writel(DIV_ROUND_UP(value, HI3660_TEMP_STEP) & 0x7F,
+ addr + HI3660_LAG(id));
+}
- mutex_lock(&data->thermal_lock);
+static inline void hi3660_thermal_alarm_clear(void __iomem *addr,
+ int id, int value)
+{
+ writel(value, addr + HI3660_INT_CLR(id));
+}
- sensor = &data->sensors[data->irq_bind_sensor];
+static inline void hi3660_thermal_alarm_enable(void __iomem *addr,
+ int id, int value)
+{
+ writel(value, addr + HI3660_INT_EN(id));
+}
- /* setting the hdak time */
- writel(0x0, data->regs + TEMP0_CFG);
+static inline void hi3660_thermal_alarm_set(void __iomem *addr,
+ int id, int value)
+{
+ writel(value, addr + HI3660_TH(id));
+}
+
+static inline int hi3660_thermal_get_temperature(void __iomem *addr, int id)
+{
+ return hi3660_thermal_step_to_temp(readl(addr + HI3660_TEMP(id)));
+}
+
+/*
+ * Temperature configuration register - Sensor selection
+ *
+ * Bits [19:12]
+ *
+ * 0x0: local sensor (default)
+ * 0x1: remote sensor 1 (ACPU cluster 1)
+ * 0x2: remote sensor 2 (ACPU cluster 0)
+ * 0x3: remote sensor 3 (G3D)
+ */
+static inline void hi6220_thermal_sensor_select(void __iomem *addr, int sensor)
+{
+ writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_SS_MSK) |
+ (sensor << 12), addr + HI6220_TEMP0_CFG);
+}
+
+/*
+ * Temperature configuration register - Hdak conversion polling interval
+ *
+ * Bits [5:4]
+ *
+ * 0x0 : 0.768 ms
+ * 0x1 : 6.144 ms
+ * 0x2 : 49.152 ms
+ * 0x3 : 393.216 ms
+ */
+static inline void hi6220_thermal_hdak_set(void __iomem *addr, int value)
+{
+ writel((readl(addr + HI6220_TEMP0_CFG) & ~HI6220_TEMP0_CFG_HDAK_MSK) |
+ (value << 4), addr + HI6220_TEMP0_CFG);
+}
+
+static int hi6220_thermal_irq_handler(struct hisi_thermal_data *data)
+{
+ hi6220_thermal_alarm_clear(data->regs, 1);
+ return 0;
+}
+
+static int hi3660_thermal_irq_handler(struct hisi_thermal_data *data)
+{
+ hi3660_thermal_alarm_clear(data->regs, data->sensor.id, 1);
+ return 0;
+}
+
+static int hi6220_thermal_get_temp(struct hisi_thermal_data *data)
+{
+ return hi6220_thermal_get_temperature(data->regs);
+}
+
+static int hi3660_thermal_get_temp(struct hisi_thermal_data *data)
+{
+ return hi3660_thermal_get_temperature(data->regs, data->sensor.id);
+}
+
+static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data)
+{
+ /* disable sensor module */
+ hi6220_thermal_enable(data->regs, 0);
+ hi6220_thermal_alarm_enable(data->regs, 0);
+ hi6220_thermal_reset_enable(data->regs, 0);
+
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static int hi3660_thermal_disable_sensor(struct hisi_thermal_data *data)
+{
+ /* disable sensor module */
+ hi3660_thermal_alarm_enable(data->regs, data->sensor.id, 0);
+ return 0;
+}
+
+static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data)
+{
+ struct hisi_thermal_sensor *sensor = &data->sensor;
+ int ret;
+
+ /* enable clock for tsensor */
+ ret = clk_prepare_enable(data->clk);
+ if (ret)
+ return ret;
/* disable module firstly */
- writel(0x0, data->regs + TEMP0_RST_MSK);
- writel(0x0, data->regs + TEMP0_EN);
+ hi6220_thermal_reset_enable(data->regs, 0);
+ hi6220_thermal_enable(data->regs, 0);
/* select sensor id */
- writel((sensor->id << 12), data->regs + TEMP0_CFG);
+ hi6220_thermal_sensor_select(data->regs, sensor->id);
+
+ /* setting the hdak time */
+ hi6220_thermal_hdak_set(data->regs, 0);
+
+ /* setting lag value between current temp and the threshold */
+ hi6220_thermal_set_lag(data->regs, HI6220_TEMP_LAG);
/* enable for interrupt */
- writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00,
- data->regs + TEMP0_TH);
+ hi6220_thermal_alarm_set(data->regs, sensor->thres_temp);
- writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH);
+ hi6220_thermal_reset_set(data->regs, HI6220_TEMP_RESET);
/* enable module */
- writel(0x1, data->regs + TEMP0_RST_MSK);
- writel(0x1, data->regs + TEMP0_EN);
-
- writel(0x0, data->regs + TEMP0_INT_CLR);
- writel(0x1, data->regs + TEMP0_INT_EN);
+ hi6220_thermal_reset_enable(data->regs, 1);
+ hi6220_thermal_enable(data->regs, 1);
- usleep_range(3000, 5000);
+ hi6220_thermal_alarm_clear(data->regs, 0);
+ hi6220_thermal_alarm_enable(data->regs, 1);
- mutex_unlock(&data->thermal_lock);
+ return 0;
}
-static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data)
+static int hi3660_thermal_enable_sensor(struct hisi_thermal_data *data)
{
- mutex_lock(&data->thermal_lock);
+ unsigned int value;
+ struct hisi_thermal_sensor *sensor = &data->sensor;
- /* disable sensor module */
- writel(0x0, data->regs + TEMP0_INT_EN);
- writel(0x0, data->regs + TEMP0_RST_MSK);
- writel(0x0, data->regs + TEMP0_EN);
+ /* disable interrupt */
+ hi3660_thermal_alarm_enable(data->regs, sensor->id, 0);
- mutex_unlock(&data->thermal_lock);
-}
+ /* setting lag value between current temp and the threshold */
+ hi3660_thermal_set_lag(data->regs, sensor->id, HI3660_TEMP_LAG);
-static int hisi_thermal_get_temp(void *_sensor, int *temp)
-{
- struct hisi_thermal_sensor *sensor = _sensor;
- struct hisi_thermal_data *data = sensor->thermal;
+ /* set interrupt threshold */
+ value = hi3660_thermal_temp_to_step(sensor->thres_temp);
+ hi3660_thermal_alarm_set(data->regs, sensor->id, value);
- int sensor_id = -1, i;
- long max_temp = 0;
+ /* enable interrupt */
+ hi3660_thermal_alarm_clear(data->regs, sensor->id, 1);
+ hi3660_thermal_alarm_enable(data->regs, sensor->id, 1);
- *temp = hisi_thermal_get_sensor_temp(data, sensor);
+ return 0;
+}
- sensor->sensor_temp = *temp;
+static int hi6220_thermal_probe(struct hisi_thermal_data *data)
+{
+ struct platform_device *pdev = data->pdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
- for (i = 0; i < HISI_MAX_SENSORS; i++) {
- if (!data->sensors[i].tzd)
- continue;
+ data->get_temp = hi6220_thermal_get_temp;
+ data->enable_sensor = hi6220_thermal_enable_sensor;
+ data->disable_sensor = hi6220_thermal_disable_sensor;
+ data->irq_handler = hi6220_thermal_irq_handler;
- if (data->sensors[i].sensor_temp >= max_temp) {
- max_temp = data->sensors[i].sensor_temp;
- sensor_id = i;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ dev_err(dev, "failed to get io address\n");
+ return PTR_ERR(data->regs);
}
- /* If no sensor has been enabled, then skip to enable irq */
- if (sensor_id == -1)
- return 0;
-
- mutex_lock(&data->thermal_lock);
- data->irq_bind_sensor = sensor_id;
- mutex_unlock(&data->thermal_lock);
-
- dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%d, thres=%d\n",
- sensor->id, data->irq_enabled, *temp, sensor->thres_temp);
- /*
- * Bind irq to sensor for two cases:
- * Reenable alarm IRQ if temperature below threshold;
- * if irq has been enabled, always set it;
- */
- if (data->irq_enabled) {
- hisi_thermal_enable_bind_irq_sensor(data);
- return 0;
+ data->clk = devm_clk_get(dev, "thermal_clk");
+ if (IS_ERR(data->clk)) {
+ ret = PTR_ERR(data->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get thermal clk: %d\n", ret);
+ return ret;
}
- if (max_temp < sensor->thres_temp) {
- data->irq_enabled = true;
- hisi_thermal_enable_bind_irq_sensor(data);
- enable_irq(data->irq);
- }
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ data->sensor.id = HI6220_DEFAULT_SENSOR;
return 0;
}
-static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
- .get_temp = hisi_thermal_get_temp,
-};
+static int hi3660_thermal_probe(struct hisi_thermal_data *data)
+{
+ struct platform_device *pdev = data->pdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ data->get_temp = hi3660_thermal_get_temp;
+ data->enable_sensor = hi3660_thermal_enable_sensor;
+ data->disable_sensor = hi3660_thermal_disable_sensor;
+ data->irq_handler = hi3660_thermal_irq_handler;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ dev_err(dev, "failed to get io address\n");
+ return PTR_ERR(data->regs);
+ }
-static irqreturn_t hisi_thermal_alarm_irq(int irq, void *dev)
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ data->sensor.id = HI3660_DEFAULT_SENSOR;
+
+ return 0;
+}
+
+static int hisi_thermal_get_temp(void *__data, int *temp)
{
- struct hisi_thermal_data *data = dev;
+ struct hisi_thermal_data *data = __data;
+ struct hisi_thermal_sensor *sensor = &data->sensor;
- disable_irq_nosync(irq);
- data->irq_enabled = false;
+ *temp = data->get_temp(data);
- return IRQ_WAKE_THREAD;
+ dev_dbg(&data->pdev->dev, "id=%d, temp=%d, thres=%d\n",
+ sensor->id, *temp, sensor->thres_temp);
+
+ return 0;
}
+static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
+ .get_temp = hisi_thermal_get_temp,
+};
+
static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
{
struct hisi_thermal_data *data = dev;
- struct hisi_thermal_sensor *sensor;
- int i;
+ struct hisi_thermal_sensor *sensor = &data->sensor;
+ int temp = 0;
- mutex_lock(&data->thermal_lock);
- sensor = &data->sensors[data->irq_bind_sensor];
+ data->irq_handler(data);
- dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n",
- sensor->thres_temp / 1000);
- mutex_unlock(&data->thermal_lock);
+ hisi_thermal_get_temp(data, &temp);
- for (i = 0; i < HISI_MAX_SENSORS; i++) {
- if (!data->sensors[i].tzd)
- continue;
+ if (temp >= sensor->thres_temp) {
+ dev_crit(&data->pdev->dev, "THERMAL ALARM: %d > %d\n",
+ temp, sensor->thres_temp);
- thermal_zone_device_update(data->sensors[i].tzd,
+ thermal_zone_device_update(data->sensor.tzd,
THERMAL_EVENT_UNSPECIFIED);
+
+ } else {
+ dev_crit(&data->pdev->dev, "THERMAL ALARM stopped: %d < %d\n",
+ temp, sensor->thres_temp);
}
return IRQ_HANDLED;
@@ -246,17 +474,14 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
static int hisi_thermal_register_sensor(struct platform_device *pdev,
struct hisi_thermal_data *data,
- struct hisi_thermal_sensor *sensor,
- int index)
+ struct hisi_thermal_sensor *sensor)
{
int ret, i;
const struct thermal_trip *trip;
- sensor->id = index;
- sensor->thermal = data;
-
sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->id, sensor, &hisi_of_thermal_ops);
+ sensor->id, data,
+ &hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
sensor->tzd = NULL;
@@ -278,7 +503,14 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
}
static const struct of_device_id of_hisi_thermal_match[] = {
- { .compatible = "hisilicon,tsensor" },
+ {
+ .compatible = "hisilicon,tsensor",
+ .data = hi6220_thermal_probe
+ },
+ {
+ .compatible = "hisilicon,hi3660-tsensor",
+ .data = hi3660_thermal_probe
+ },
{ /* end */ }
};
MODULE_DEVICE_TABLE(of, of_hisi_thermal_match);
@@ -295,88 +527,63 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor,
static int hisi_thermal_probe(struct platform_device *pdev)
{
struct hisi_thermal_data *data;
- struct resource *res;
- int i;
+ int const (*platform_probe)(struct hisi_thermal_data *);
+ struct device *dev = &pdev->dev;
int ret;
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- mutex_init(&data->thermal_lock);
data->pdev = pdev;
+ platform_set_drvdata(pdev, data);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(&pdev->dev, "failed to get io address\n");
- return PTR_ERR(data->regs);
+ platform_probe = of_device_get_match_data(dev);
+ if (!platform_probe) {
+ dev_err(dev, "failed to get probe func\n");
+ return -EINVAL;
}
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0)
- return data->irq;
-
- ret = devm_request_threaded_irq(&pdev->dev, data->irq,
- hisi_thermal_alarm_irq,
- hisi_thermal_alarm_irq_thread,
- 0, "hisi_thermal", data);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
+ ret = platform_probe(data);
+ if (ret)
return ret;
- }
- platform_set_drvdata(pdev, data);
-
- data->clk = devm_clk_get(&pdev->dev, "thermal_clk");
- if (IS_ERR(data->clk)) {
- ret = PTR_ERR(data->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get thermal clk: %d\n", ret);
+ ret = hisi_thermal_register_sensor(pdev, data,
+ &data->sensor);
+ if (ret) {
+ dev_err(dev, "failed to register thermal sensor: %d\n", ret);
return ret;
}
- /* enable clock for thermal */
- ret = clk_prepare_enable(data->clk);
+ ret = data->enable_sensor(data);
if (ret) {
- dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+ dev_err(dev, "Failed to setup the sensor: %d\n", ret);
return ret;
}
- hisi_thermal_enable_bind_irq_sensor(data);
- irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED,
- &data->irq_enabled);
-
- for (i = 0; i < HISI_MAX_SENSORS; ++i) {
- ret = hisi_thermal_register_sensor(pdev, data,
- &data->sensors[i], i);
- if (ret)
- dev_err(&pdev->dev,
- "failed to register thermal sensor: %d\n", ret);
- else
- hisi_thermal_toggle_sensor(&data->sensors[i], true);
+ if (data->irq) {
+ ret = devm_request_threaded_irq(dev, data->irq, NULL,
+ hisi_thermal_alarm_irq_thread,
+ IRQF_ONESHOT, "hisi_thermal", data);
+ if (ret < 0) {
+ dev_err(dev, "failed to request alarm irq: %d\n", ret);
+ return ret;
+ }
}
+ hisi_thermal_toggle_sensor(&data->sensor, true);
+
return 0;
}
static int hisi_thermal_remove(struct platform_device *pdev)
{
struct hisi_thermal_data *data = platform_get_drvdata(pdev);
- int i;
+ struct hisi_thermal_sensor *sensor = &data->sensor;
- for (i = 0; i < HISI_MAX_SENSORS; i++) {
- struct hisi_thermal_sensor *sensor = &data->sensors[i];
+ hisi_thermal_toggle_sensor(sensor, false);
- if (!sensor->tzd)
- continue;
-
- hisi_thermal_toggle_sensor(sensor, false);
- }
-
- hisi_thermal_disable_sensor(data);
- clk_disable_unprepare(data->clk);
+ data->disable_sensor(data);
return 0;
}
@@ -386,10 +593,7 @@ static int hisi_thermal_suspend(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
- hisi_thermal_disable_sensor(data);
- data->irq_enabled = false;
-
- clk_disable_unprepare(data->clk);
+ data->disable_sensor(data);
return 0;
}
@@ -397,16 +601,8 @@ static int hisi_thermal_suspend(struct device *dev)
static int hisi_thermal_resume(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
- int ret;
- ret = clk_prepare_enable(data->clk);
- if (ret)
- return ret;
-
- data->irq_enabled = true;
- hisi_thermal_enable_bind_irq_sensor(data);
-
- return 0;
+ return data->enable_sensor(data);
}
#endif
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 4798b4b1fd77..e7d4ffc3de7f 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/thermal.h>
#include <linux/types.h>
+#include <linux/nvmem-consumer.h>
#define REG_SET 0x4
#define REG_CLR 0x8
@@ -94,7 +95,7 @@ struct imx_thermal_data {
struct thermal_cooling_device *cdev;
enum thermal_device_mode mode;
struct regmap *tempmon;
- u32 c1, c2; /* See formula in imx_get_sensor_data() */
+ u32 c1, c2; /* See formula in imx_init_calib() */
int temp_passive;
int temp_critical;
int temp_max;
@@ -177,7 +178,7 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp)
n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT;
- /* See imx_get_sensor_data() for formula derivation */
+ /* See imx_init_calib() for formula derivation */
*temp = data->c2 - n_meas * data->c1;
/* Update alarm value to next higher trip point for TEMPMON_IMX6Q */
@@ -346,29 +347,12 @@ static struct thermal_zone_device_ops imx_tz_ops = {
.set_trip_temp = imx_set_trip_temp,
};
-static int imx_get_sensor_data(struct platform_device *pdev)
+static int imx_init_calib(struct platform_device *pdev, u32 val)
{
struct imx_thermal_data *data = platform_get_drvdata(pdev);
- struct regmap *map;
int t1, n1;
- int ret;
- u32 val;
u64 temp64;
- map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "fsl,tempmon-data");
- if (IS_ERR(map)) {
- ret = PTR_ERR(map);
- dev_err(&pdev->dev, "failed to get sensor regmap: %d\n", ret);
- return ret;
- }
-
- ret = regmap_read(map, OCOTP_ANA1, &val);
- if (ret) {
- dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret);
- return ret;
- }
-
if (val == 0 || val == ~0) {
dev_err(&pdev->dev, "invalid sensor calibration data\n");
return -EINVAL;
@@ -405,12 +389,12 @@ static int imx_get_sensor_data(struct platform_device *pdev)
data->c1 = temp64;
data->c2 = n1 * data->c1 + 1000 * t1;
- /* use OTP for thermal grade */
- ret = regmap_read(map, OCOTP_MEM0, &val);
- if (ret) {
- dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret);
- return ret;
- }
+ return 0;
+}
+
+static void imx_init_temp_grade(struct platform_device *pdev, u32 val)
+{
+ struct imx_thermal_data *data = platform_get_drvdata(pdev);
/* The maximum die temp is specified by the Temperature Grade */
switch ((val >> 6) & 0x3) {
@@ -438,6 +422,55 @@ static int imx_get_sensor_data(struct platform_device *pdev)
*/
data->temp_critical = data->temp_max - (1000 * 5);
data->temp_passive = data->temp_max - (1000 * 10);
+}
+
+static int imx_init_from_tempmon_data(struct platform_device *pdev)
+{
+ struct regmap *map;
+ int ret;
+ u32 val;
+
+ map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "fsl,tempmon-data");
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ dev_err(&pdev->dev, "failed to get sensor regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(map, OCOTP_ANA1, &val);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret);
+ return ret;
+ }
+ ret = imx_init_calib(pdev, val);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, OCOTP_MEM0, &val);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to read sensor data: %d\n", ret);
+ return ret;
+ }
+ imx_init_temp_grade(pdev, val);
+
+ return 0;
+}
+
+static int imx_init_from_nvmem_cells(struct platform_device *pdev)
+{
+ int ret;
+ u32 val;
+
+ ret = nvmem_cell_read_u32(&pdev->dev, "calib", &val);
+ if (ret)
+ return ret;
+ imx_init_calib(pdev, val);
+
+ ret = nvmem_cell_read_u32(&pdev->dev, "temp_grade", &val);
+ if (ret)
+ return ret;
+ imx_init_temp_grade(pdev, val);
return 0;
}
@@ -514,10 +547,21 @@ static int imx_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- ret = imx_get_sensor_data(pdev);
- if (ret) {
- dev_err(&pdev->dev, "failed to get sensor data\n");
- return ret;
+ if (of_find_property(pdev->dev.of_node, "nvmem-cells", NULL)) {
+ ret = imx_init_from_nvmem_cells(pdev);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init from nvmem: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = imx_init_from_tempmon_data(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init from from fsl,tempmon-data\n");
+ return ret;
+ }
}
/* Make sure sensor is in known good state for measurements */
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index df0df055e7ff..287eb0a1476d 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal_zone.o
obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h
index 65075b174329..58822575fd54 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.h
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ACPI_ACPI_THERMAL_H
#define __ACPI_ACPI_THERMAL_H
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index f02341f7134d..80bbf9ce2fb6 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -30,6 +30,10 @@
/* Skylake thermal reporting device */
#define PCI_DEVICE_ID_PROC_SKL_THERMAL 0x1903
+/* CannonLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_CNL_THERMAL 0x5a03
+#define PCI_DEVICE_ID_PROC_CFL_THERMAL 0x3E83
+
/* Braswell thermal reporting device */
#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
@@ -461,6 +465,8 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT1_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTX_THERMAL)},
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTP_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CNL_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_CFL_THERMAL)},
{ 0, },
};
diff --git a/drivers/thermal/intel_bxt_pmic_thermal.c b/drivers/thermal/intel_bxt_pmic_thermal.c
index ef6b32242ccb..94cfd0064c43 100644
--- a/drivers/thermal/intel_bxt_pmic_thermal.c
+++ b/drivers/thermal/intel_bxt_pmic_thermal.c
@@ -166,7 +166,7 @@ static irqreturn_t pmic_thermal_irq_handler(int irq, void *data)
struct pmic_thermal_data *td;
struct intel_soc_pmic *pmic;
struct regmap *regmap;
- u8 reg_val, mask, irq_stat, trip;
+ u8 reg_val, mask, irq_stat;
u16 reg, evt_stat_reg;
int i, j, ret;
@@ -201,7 +201,6 @@ static irqreturn_t pmic_thermal_irq_handler(int irq, void *data)
if (regmap_read(regmap, evt_stat_reg, &ret))
return IRQ_HANDLED;
- trip = td->maps[i].trip_config[j].trip_num;
tzd = thermal_zone_get_zone_by_name(td->maps[i].handle);
if (!IS_ERR(tzd))
thermal_zone_device_update(tzd,
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index c60b1cfcc64e..8a7f69b4b022 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -30,6 +30,8 @@
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
#define PCH_THERMAL_DID_SKL_H 0xA131 /* Skylake PCH 100 series */
+#define PCH_THERMAL_DID_CNL 0x9Df9 /* CNL PCH */
+#define PCH_THERMAL_DID_CNL_H 0xA379 /* CNL-H PCH */
/* Wildcat Point-LP PCH Thermal registers */
#define WPT_TEMP 0x0000 /* Temperature */
@@ -278,6 +280,7 @@ enum board_ids {
board_hsw,
board_wpt,
board_skl,
+ board_cnl,
};
static const struct board_info {
@@ -296,6 +299,10 @@ static const struct board_info {
.name = "pch_skylake",
.ops = &pch_dev_ops_wpt,
},
+ [board_cnl] = {
+ .name = "pch_cannonlake",
+ .ops = &pch_dev_ops_wpt,
+ },
};
static int intel_pch_thermal_probe(struct pci_dev *pdev,
@@ -398,6 +405,10 @@ static const struct pci_device_id intel_pch_thermal_id[] = {
.driver_data = board_skl, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL_H),
.driver_data = board_skl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL),
+ .driver_data = board_cnl, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_H),
+ .driver_data = board_cnl, },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index d718cd179ddb..4540e892b61d 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -675,13 +675,13 @@ static int __init powerclamp_probe(void)
{
if (!x86_match_cpu(intel_powerclamp_ids)) {
- pr_err("CPU does not support MWAIT");
+ pr_err("CPU does not support MWAIT\n");
return -ENODEV;
}
/* The goal for idle time alignment is to achieve package cstate. */
if (!has_pkg_state_counter()) {
- pr_info("No package C-state available");
+ pr_info("No package C-state available\n");
return -ENODEV;
}
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index f50241962ad2..95f987d5aa71 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -125,7 +125,7 @@ static int qpnp_tm_get_temp(void *data, int *temp)
if (!temp)
return -EINVAL;
- if (IS_ERR(chip->adc)) {
+ if (!chip->adc) {
ret = qpnp_tm_update_temp_no_adc(chip);
if (ret < 0)
return ret;
@@ -224,67 +224,53 @@ static int qpnp_tm_probe(struct platform_device *pdev)
return irq;
/* ADC based measurements are optional */
- chip->adc = iio_channel_get(&pdev->dev, "thermal");
- if (PTR_ERR(chip->adc) == -EPROBE_DEFER)
- return PTR_ERR(chip->adc);
+ chip->adc = devm_iio_channel_get(&pdev->dev, "thermal");
+ if (IS_ERR(chip->adc)) {
+ ret = PTR_ERR(chip->adc);
+ chip->adc = NULL;
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ }
chip->base = res;
ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type);
if (ret < 0) {
dev_err(&pdev->dev, "could not read type\n");
- goto fail;
+ return ret;
}
ret = qpnp_tm_read(chip, QPNP_TM_REG_SUBTYPE, &subtype);
if (ret < 0) {
dev_err(&pdev->dev, "could not read subtype\n");
- goto fail;
+ return ret;
}
if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) {
dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
type, subtype);
- ret = -ENODEV;
- goto fail;
+ return -ENODEV;
}
ret = qpnp_tm_init(chip);
if (ret < 0) {
dev_err(&pdev->dev, "init failed\n");
- goto fail;
+ return ret;
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr,
IRQF_ONESHOT, node->name, chip);
if (ret < 0)
- goto fail;
+ return ret;
chip->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
&qpnp_tm_sensor_ops);
if (IS_ERR(chip->tz_dev)) {
dev_err(&pdev->dev, "failed to register sensor\n");
- ret = PTR_ERR(chip->tz_dev);
- goto fail;
+ return PTR_ERR(chip->tz_dev);
}
return 0;
-
-fail:
- if (!IS_ERR(chip->adc))
- iio_channel_release(chip->adc);
-
- return ret;
-}
-
-static int qpnp_tm_remove(struct platform_device *pdev)
-{
- struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
-
- if (!IS_ERR(chip->adc))
- iio_channel_release(chip->adc);
-
- return 0;
}
static const struct of_device_id qpnp_tm_match_table[] = {
@@ -299,7 +285,6 @@ static struct platform_driver qpnp_tm_driver = {
.of_match_table = qpnp_tm_match_table,
},
.probe = qpnp_tm_probe,
- .remove = qpnp_tm_remove,
};
module_platform_driver(qpnp_tm_driver);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 203aca44a2bb..561a0a332208 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
#include <linux/thermal.h>
#include "thermal_core.h"
@@ -90,10 +91,6 @@ struct rcar_gen3_thermal_priv {
struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM];
unsigned int num_tscs;
spinlock_t lock; /* Protect interrupts on and off */
- const struct rcar_gen3_thermal_data *data;
-};
-
-struct rcar_gen3_thermal_data {
void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc);
};
@@ -278,7 +275,12 @@ static irqreturn_t rcar_gen3_thermal_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void r8a7795_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
+static const struct soc_device_attribute r8a7795es1[] = {
+ { .soc_id = "r8a7795", .revision = "ES1.*" },
+ { /* sentinel */ }
+};
+
+static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_tsc *tsc)
{
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, CTSR_THBGR);
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, 0x0);
@@ -303,7 +305,7 @@ static void r8a7795_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
usleep_range(1000, 2000);
}
-static void r8a7796_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
+static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
{
u32 reg_val;
@@ -324,17 +326,9 @@ static void r8a7796_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
usleep_range(1000, 2000);
}
-static const struct rcar_gen3_thermal_data r8a7795_data = {
- .thermal_init = r8a7795_thermal_init,
-};
-
-static const struct rcar_gen3_thermal_data r8a7796_data = {
- .thermal_init = r8a7796_thermal_init,
-};
-
static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
- { .compatible = "renesas,r8a7795-thermal", .data = &r8a7795_data},
- { .compatible = "renesas,r8a7796-thermal", .data = &r8a7796_data},
+ { .compatible = "renesas,r8a7795-thermal", },
+ { .compatible = "renesas,r8a7796-thermal", },
{},
};
MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
@@ -371,7 +365,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->data = of_device_get_match_data(dev);
+ priv->thermal_init = rcar_gen3_thermal_init;
+ if (soc_device_match(r8a7795es1))
+ priv->thermal_init = rcar_gen3_thermal_init_r8a7795es1;
spin_lock_init(&priv->lock);
@@ -423,7 +419,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
priv->tscs[i] = tsc;
- priv->data->thermal_init(tsc);
+ priv->thermal_init(tsc);
rcar_gen3_thermal_calc_coefs(&tsc->coef, ptat, thcode[i]);
zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
@@ -476,7 +472,7 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
- priv->data->thermal_init(tsc);
+ priv->thermal_init(tsc);
rcar_gen3_thermal_set_trips(tsc, tsc->low, tsc->high);
}
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 206035139110..f36375d5a16c 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -242,6 +242,45 @@ struct tsadc_table {
int temp;
};
+static const struct tsadc_table rv1108_table[] = {
+ {0, -40000},
+ {374, -40000},
+ {382, -35000},
+ {389, -30000},
+ {397, -25000},
+ {405, -20000},
+ {413, -15000},
+ {421, -10000},
+ {429, -5000},
+ {436, 0},
+ {444, 5000},
+ {452, 10000},
+ {460, 15000},
+ {468, 20000},
+ {476, 25000},
+ {483, 30000},
+ {491, 35000},
+ {499, 40000},
+ {507, 45000},
+ {515, 50000},
+ {523, 55000},
+ {531, 60000},
+ {539, 65000},
+ {547, 70000},
+ {555, 75000},
+ {562, 80000},
+ {570, 85000},
+ {578, 90000},
+ {586, 95000},
+ {594, 100000},
+ {602, 105000},
+ {610, 110000},
+ {618, 115000},
+ {626, 120000},
+ {634, 125000},
+ {TSADCV2_DATA_MASK, 125000},
+};
+
static const struct tsadc_table rk3228_code_table[] = {
{0, -40000},
{588, -40000},
@@ -779,6 +818,30 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
writel_relaxed(val, regs + TSADCV2_INT_EN);
}
+static const struct rockchip_tsadc_chip rv1108_tsadc_data = {
+ .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+ .chn_num = 1, /* one channel for tsadc */
+
+ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv2_initialize,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+ .table = {
+ .id = rv1108_table,
+ .length = ARRAY_SIZE(rv1108_table),
+ .data_mask = TSADCV2_DATA_MASK,
+ .mode = ADC_INCREMENT,
+ },
+};
+
static const struct rockchip_tsadc_chip rk3228_tsadc_data = {
.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
.chn_num = 1, /* one channel for tsadc */
@@ -928,6 +991,10 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
static const struct of_device_id of_rockchip_thermal_match[] = {
{
+ .compatible = "rockchip,rv1108-tsadc",
+ .data = (void *)&rv1108_tsadc_data,
+ },
+ {
.compatible = "rockchip,rk3228-tsadc",
.data = (void *)&rk3228_tsadc_data,
},
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index be95826631b7..ee047ca43084 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -31,8 +31,7 @@
* If the temperature is higher than a trip point,
* a. if the trend is THERMAL_TREND_RAISING, use higher cooling
* state for this trip point
- * b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
- * state for this trip point
+ * b. if the trend is THERMAL_TREND_DROPPING, do nothing
* c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
* for this trip point
* d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
@@ -94,9 +93,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
if (!throttle)
next_target = THERMAL_NO_TARGET;
} else {
- next_target = cur_state - 1;
- if (next_target > instance->upper)
- next_target = instance->upper;
+ if (!throttle) {
+ next_target = cur_state - 1;
+ if (next_target > instance->upper)
+ next_target = instance->upper;
+ }
}
break;
case THERMAL_TREND_DROP_FULL:
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
index cec586ec7e4b..f8740f7852e3 100644
--- a/drivers/thermal/tegra/Kconfig
+++ b/drivers/thermal/tegra/Kconfig
@@ -10,4 +10,11 @@ config TEGRA_SOCTHERM
zones to manage temperatures. This option is also required for the
emergency thermal reset (thermtrip) feature to function.
+config TEGRA_BPMP_THERMAL
+ tristate "Tegra BPMP thermal sensing"
+ depends on TEGRA_BPMP || COMPILE_TEST
+ help
+ Enable this option for support for sensing system temperature of NVIDIA
+ Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
+
endmenu
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile
index 1ce1af2cf0f5..0f2b66edf0d2 100644
--- a/drivers/thermal/tegra/Makefile
+++ b/drivers/thermal/tegra/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o
+obj-$(CONFIG_TEGRA_BPMP_THERMAL) += tegra-bpmp-thermal.o
tegra-soctherm-y := soctherm.o soctherm-fuse.o
tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 7d2db23d71a3..075db1de5e53 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -483,7 +483,7 @@ static int throttrip_program(struct device *dev,
unsigned int throt;
u32 r, reg_off;
- if (!dev || !sg || !stc || !stc->init)
+ if (!sg || !stc || !stc->init)
return -EINVAL;
temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c
new file mode 100644
index 000000000000..b0980dbca3b3
--- /dev/null
+++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Mikko Perttunen <mperttunen@nvidia.com>
+ * Aapo Vienamo <avienamo@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/workqueue.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+struct tegra_bpmp_thermal_zone {
+ struct tegra_bpmp_thermal *tegra;
+ struct thermal_zone_device *tzd;
+ struct work_struct tz_device_update_work;
+ unsigned int idx;
+};
+
+struct tegra_bpmp_thermal {
+ struct device *dev;
+ struct tegra_bpmp *bpmp;
+ unsigned int num_zones;
+ struct tegra_bpmp_thermal_zone **zones;
+};
+
+static int tegra_bpmp_thermal_get_temp(void *data, int *out_temp)
+{
+ struct tegra_bpmp_thermal_zone *zone = data;
+ struct mrq_thermal_host_to_bpmp_request req;
+ union mrq_thermal_bpmp_to_host_response reply;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&req, 0, sizeof(req));
+ req.type = CMD_THERMAL_GET_TEMP;
+ req.get_temp.zone = zone->idx;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_THERMAL;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &reply;
+ msg.rx.size = sizeof(reply);
+
+ err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
+ if (err)
+ return err;
+
+ *out_temp = reply.get_temp.temp;
+
+ return 0;
+}
+
+static int tegra_bpmp_thermal_set_trips(void *data, int low, int high)
+{
+ struct tegra_bpmp_thermal_zone *zone = data;
+ struct mrq_thermal_host_to_bpmp_request req;
+ struct tegra_bpmp_message msg;
+
+ memset(&req, 0, sizeof(req));
+ req.type = CMD_THERMAL_SET_TRIP;
+ req.set_trip.zone = zone->idx;
+ req.set_trip.enabled = true;
+ req.set_trip.low = low;
+ req.set_trip.high = high;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_THERMAL;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+
+ return tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
+}
+
+static void tz_device_update_work_fn(struct work_struct *work)
+{
+ struct tegra_bpmp_thermal_zone *zone;
+
+ zone = container_of(work, struct tegra_bpmp_thermal_zone,
+ tz_device_update_work);
+
+ thermal_zone_device_update(zone->tzd, THERMAL_TRIP_VIOLATED);
+}
+
+static void bpmp_mrq_thermal(unsigned int mrq, struct tegra_bpmp_channel *ch,
+ void *data)
+{
+ struct mrq_thermal_bpmp_to_host_request *req;
+ struct tegra_bpmp_thermal *tegra = data;
+ int i;
+
+ req = (struct mrq_thermal_bpmp_to_host_request *)ch->ib->data;
+
+ if (req->type != CMD_THERMAL_HOST_TRIP_REACHED) {
+ dev_err(tegra->dev, "%s: invalid request type: %d\n",
+ __func__, req->type);
+ tegra_bpmp_mrq_return(ch, -EINVAL, NULL, 0);
+ return;
+ }
+
+ for (i = 0; i < tegra->num_zones; ++i) {
+ if (tegra->zones[i]->idx != req->host_trip_reached.zone)
+ continue;
+
+ schedule_work(&tegra->zones[i]->tz_device_update_work);
+ tegra_bpmp_mrq_return(ch, 0, NULL, 0);
+ return;
+ }
+
+ dev_err(tegra->dev, "%s: invalid thermal zone: %d\n", __func__,
+ req->host_trip_reached.zone);
+ tegra_bpmp_mrq_return(ch, -EINVAL, NULL, 0);
+}
+
+static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp,
+ int *num_zones)
+{
+ struct mrq_thermal_host_to_bpmp_request req;
+ union mrq_thermal_bpmp_to_host_response reply;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&req, 0, sizeof(req));
+ req.type = CMD_THERMAL_GET_NUM_ZONES;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_THERMAL;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &reply;
+ msg.rx.size = sizeof(reply);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+
+ *num_zones = reply.get_num_zones.num;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops tegra_bpmp_of_thermal_ops = {
+ .get_temp = tegra_bpmp_thermal_get_temp,
+ .set_trips = tegra_bpmp_thermal_set_trips,
+};
+
+static int tegra_bpmp_thermal_probe(struct platform_device *pdev)
+{
+ struct tegra_bpmp *bpmp = dev_get_drvdata(pdev->dev.parent);
+ struct tegra_bpmp_thermal *tegra;
+ struct thermal_zone_device *tzd;
+ unsigned int i, max_num_zones;
+ int err;
+
+ tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ tegra->dev = &pdev->dev;
+ tegra->bpmp = bpmp;
+
+ err = tegra_bpmp_thermal_get_num_zones(bpmp, &max_num_zones);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get the number of zones: %d\n",
+ err);
+ return err;
+ }
+
+ tegra->zones = devm_kcalloc(&pdev->dev, max_num_zones,
+ sizeof(*tegra->zones), GFP_KERNEL);
+ if (!tegra->zones)
+ return -ENOMEM;
+
+ for (i = 0; i < max_num_zones; ++i) {
+ struct tegra_bpmp_thermal_zone *zone;
+ int temp;
+
+ zone = devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
+ if (!zone)
+ return -ENOMEM;
+
+ zone->idx = i;
+ zone->tegra = tegra;
+
+ err = tegra_bpmp_thermal_get_temp(zone, &temp);
+ if (err < 0) {
+ devm_kfree(&pdev->dev, zone);
+ continue;
+ }
+
+ tzd = devm_thermal_zone_of_sensor_register(
+ &pdev->dev, i, zone, &tegra_bpmp_of_thermal_ops);
+ if (IS_ERR(tzd)) {
+ if (PTR_ERR(tzd) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ devm_kfree(&pdev->dev, zone);
+ continue;
+ }
+
+ zone->tzd = tzd;
+ INIT_WORK(&zone->tz_device_update_work,
+ tz_device_update_work_fn);
+
+ tegra->zones[tegra->num_zones++] = zone;
+ }
+
+ err = tegra_bpmp_request_mrq(bpmp, MRQ_THERMAL, bpmp_mrq_thermal,
+ tegra);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register mrq handler: %d\n",
+ err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, tegra);
+
+ return 0;
+}
+
+static int tegra_bpmp_thermal_remove(struct platform_device *pdev)
+{
+ struct tegra_bpmp_thermal *tegra = platform_get_drvdata(pdev);
+
+ tegra_bpmp_free_mrq(tegra->bpmp, MRQ_THERMAL, tegra);
+
+ return 0;
+}
+
+static const struct of_device_id tegra_bpmp_thermal_of_match[] = {
+ { .compatible = "nvidia,tegra186-bpmp-thermal" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_bpmp_thermal_of_match);
+
+static struct platform_driver tegra_bpmp_thermal_driver = {
+ .probe = tegra_bpmp_thermal_probe,
+ .remove = tegra_bpmp_thermal_remove,
+ .driver = {
+ .name = "tegra-bpmp-thermal",
+ .of_match_table = tegra_bpmp_thermal_of_match,
+ },
+};
+module_platform_driver(tegra_bpmp_thermal_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra BPMP thermal sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index 73f55d6a1721..46d3005335c7 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -126,38 +126,23 @@ static int gadc_thermal_probe(struct platform_device *pdev)
gti->dev = &pdev->dev;
platform_set_drvdata(pdev, gti);
- gti->channel = iio_channel_get(&pdev->dev, "sensor-channel");
+ gti->channel = devm_iio_channel_get(&pdev->dev, "sensor-channel");
if (IS_ERR(gti->channel)) {
ret = PTR_ERR(gti->channel);
dev_err(&pdev->dev, "IIO channel not found: %d\n", ret);
return ret;
}
- gti->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0,
- gti, &gadc_thermal_ops);
+ gti->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, gti,
+ &gadc_thermal_ops);
if (IS_ERR(gti->tz_dev)) {
ret = PTR_ERR(gti->tz_dev);
dev_err(&pdev->dev, "Thermal zone sensor register failed: %d\n",
ret);
- goto sensor_fail;
+ return ret;
}
return 0;
-
-sensor_fail:
- iio_channel_release(gti->channel);
-
- return ret;
-}
-
-static int gadc_thermal_remove(struct platform_device *pdev)
-{
- struct gadc_thermal_info *gti = platform_get_drvdata(pdev);
-
- thermal_zone_of_sensor_unregister(&pdev->dev, gti->tz_dev);
- iio_channel_release(gti->channel);
-
- return 0;
}
static const struct of_device_id of_adc_thermal_match[] = {
@@ -172,7 +157,6 @@ static struct platform_driver gadc_thermal_driver = {
.of_match_table = of_adc_thermal_match,
},
.probe = gadc_thermal_probe,
- .remove = gadc_thermal_remove,
};
module_platform_driver(gadc_thermal_driver);
diff --git a/drivers/thermal/ti-soc-thermal/Makefile b/drivers/thermal/ti-soc-thermal/Makefile
index 0f89bdf03790..f180ebead858 100644
--- a/drivers/thermal/ti-soc-thermal/Makefile
+++ b/drivers/thermal/ti-soc-thermal/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal.o
ti-soc-thermal-y := ti-bandgap.o
ti-soc-thermal-$(CONFIG_TI_THERMAL) += ti-thermal-common.o
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index c211a8e4a210..b4f981daeaf2 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -278,7 +278,8 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id)
if (data) {
cpufreq_cooling_unregister(data->cool_dev);
- cpufreq_cpu_put(data->policy);
+ if (data->policy)
+ cpufreq_cpu_put(data->policy);
}
return 0;
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 4900febc6c8a..f2f0de27252b 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
-thunderbolt-objs += domain.o dma_port.o icm.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 38bc27a5ce4f..c2277b8ee88d 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - capabilities lookup
*
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index fb40dd0588b9..37a7f4c735d0 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - control channel and configuration commands
*
@@ -289,20 +290,6 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
}
}
-static void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
-{
- int i;
- for (i = 0; i < len; i++)
- dst[i] = cpu_to_be32(src[i]);
-}
-
-static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
-{
- int i;
- for (i = 0; i < len; i++)
- dst[i] = be32_to_cpu(src[i]);
-}
-
static __be32 tb_crc(const void *data, size_t len)
{
return cpu_to_be32(~__crc32c_le(~0, data, len));
@@ -373,7 +360,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
cpu_to_be32_array(pkg->buffer, data, len / 4);
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
- res = ring_tx(ctl->tx, &pkg->frame);
+ res = tb_ring_tx(ctl->tx, &pkg->frame);
if (res) /* ring is stopped */
tb_ctl_pkg_free(pkg);
return res;
@@ -382,15 +369,15 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
/**
* tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
*/
-static void tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
+static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
struct ctl_pkg *pkg, size_t size)
{
- ctl->callback(ctl->callback_data, type, pkg->buffer, size);
+ return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
}
static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
{
- ring_rx(pkg->ctl->rx, &pkg->frame); /*
+ tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
* We ignore failures during stop.
* All rx packets are referenced
* from ctl->rx_packets, so we do
@@ -458,6 +445,8 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
break;
case TB_CFG_PKG_EVENT:
+ case TB_CFG_PKG_XDOMAIN_RESP:
+ case TB_CFG_PKG_XDOMAIN_REQ:
if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
tb_ctl_err(pkg->ctl,
"RX: checksum mismatch, dropping packet\n");
@@ -465,8 +454,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
}
/* Fall through */
case TB_CFG_PKG_ICM_EVENT:
- tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size);
- goto rx;
+ if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
+ goto rx;
+ break;
default:
break;
@@ -625,11 +615,12 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
if (!ctl->frame_pool)
goto err;
- ctl->tx = ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
+ ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
if (!ctl->tx)
goto err;
- ctl->rx = ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
+ ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
+ 0xffff, NULL, NULL);
if (!ctl->rx)
goto err;
@@ -662,9 +653,9 @@ void tb_ctl_free(struct tb_ctl *ctl)
return;
if (ctl->rx)
- ring_free(ctl->rx);
+ tb_ring_free(ctl->rx);
if (ctl->tx)
- ring_free(ctl->tx);
+ tb_ring_free(ctl->tx);
/* free RX packets */
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
@@ -683,8 +674,8 @@ void tb_ctl_start(struct tb_ctl *ctl)
{
int i;
tb_ctl_info(ctl, "control channel starting...\n");
- ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
- ring_start(ctl->rx);
+ tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
+ tb_ring_start(ctl->rx);
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
tb_ctl_rx_submit(ctl->rx_packets[i]);
@@ -705,8 +696,8 @@ void tb_ctl_stop(struct tb_ctl *ctl)
ctl->running = false;
mutex_unlock(&ctl->request_queue_lock);
- ring_stop(ctl->rx);
- ring_stop(ctl->tx);
+ tb_ring_stop(ctl->rx);
+ tb_ring_stop(ctl->tx);
if (!list_empty(&ctl->request_queue))
tb_ctl_WARN(ctl, "dangling request in request_queue\n");
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 36fd28b1c1c5..3062e0b5f71e 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - control channel and configuration commands
*
@@ -8,6 +9,7 @@
#define _TB_CFG
#include <linux/kref.h>
+#include <linux/thunderbolt.h>
#include "nhi.h"
#include "tb_msgs.h"
@@ -15,7 +17,7 @@
/* control channel */
struct tb_ctl;
-typedef void (*event_cb)(void *data, enum tb_cfg_pkg_type type,
+typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size);
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 9f2dcd48974d..9b90115319ce 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -20,6 +20,98 @@
static DEFINE_IDA(tb_domain_ida);
+static bool match_service_id(const struct tb_service_id *id,
+ const struct tb_service *svc)
+{
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
+ if (strcmp(id->protocol_key, svc->key))
+ return false;
+ }
+
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
+ if (id->protocol_id != svc->prtcid)
+ return false;
+ }
+
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
+ if (id->protocol_version != svc->prtcvers)
+ return false;
+ }
+
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
+ if (id->protocol_revision != svc->prtcrevs)
+ return false;
+ }
+
+ return true;
+}
+
+static const struct tb_service_id *__tb_service_match(struct device *dev,
+ struct device_driver *drv)
+{
+ struct tb_service_driver *driver;
+ const struct tb_service_id *ids;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc)
+ return NULL;
+
+ driver = container_of(drv, struct tb_service_driver, driver);
+ if (!driver->id_table)
+ return NULL;
+
+ for (ids = driver->id_table; ids->match_flags != 0; ids++) {
+ if (match_service_id(ids, svc))
+ return ids;
+ }
+
+ return NULL;
+}
+
+static int tb_service_match(struct device *dev, struct device_driver *drv)
+{
+ return !!__tb_service_match(dev, drv);
+}
+
+static int tb_service_probe(struct device *dev)
+{
+ struct tb_service *svc = tb_to_service(dev);
+ struct tb_service_driver *driver;
+ const struct tb_service_id *id;
+
+ driver = container_of(dev->driver, struct tb_service_driver, driver);
+ id = __tb_service_match(dev, &driver->driver);
+
+ return driver->probe(svc, id);
+}
+
+static int tb_service_remove(struct device *dev)
+{
+ struct tb_service *svc = tb_to_service(dev);
+ struct tb_service_driver *driver;
+
+ driver = container_of(dev->driver, struct tb_service_driver, driver);
+ if (driver->remove)
+ driver->remove(svc);
+
+ return 0;
+}
+
+static void tb_service_shutdown(struct device *dev)
+{
+ struct tb_service_driver *driver;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc || !dev->driver)
+ return;
+
+ driver = container_of(dev->driver, struct tb_service_driver, driver);
+ if (driver->shutdown)
+ driver->shutdown(svc);
+}
+
static const char * const tb_security_names[] = {
[TB_SECURITY_NONE] = "none",
[TB_SECURITY_USER] = "user",
@@ -52,6 +144,10 @@ static const struct attribute_group *domain_attr_groups[] = {
struct bus_type tb_bus_type = {
.name = "thunderbolt",
+ .match = tb_service_match,
+ .probe = tb_service_probe,
+ .remove = tb_service_remove,
+ .shutdown = tb_service_shutdown,
};
static void tb_domain_release(struct device *dev)
@@ -128,17 +224,26 @@ err_free:
return NULL;
}
-static void tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
+static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
struct tb *tb = data;
if (!tb->cm_ops->handle_event) {
tb_warn(tb, "domain does not have event handler\n");
- return;
+ return true;
}
- tb->cm_ops->handle_event(tb, type, buf, size);
+ switch (type) {
+ case TB_CFG_PKG_XDOMAIN_REQ:
+ case TB_CFG_PKG_XDOMAIN_RESP:
+ return tb_xdomain_handle_request(tb, type, buf, size);
+
+ default:
+ tb->cm_ops->handle_event(tb, type, buf, size);
+ }
+
+ return true;
}
/**
@@ -443,9 +548,92 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
return tb->cm_ops->disconnect_pcie_paths(tb);
}
+/**
+ * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
+ * @tb: Domain enabling the DMA paths
+ * @xd: XDomain DMA paths are created to
+ *
+ * Calls connection manager specific method to enable DMA paths to the
+ * XDomain in question.
+ *
+ * Return: 0% in case of success and negative errno otherwise. In
+ * particular returns %-ENOTSUPP if the connection manager
+ * implementation does not support XDomains.
+ */
+int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ if (!tb->cm_ops->approve_xdomain_paths)
+ return -ENOTSUPP;
+
+ return tb->cm_ops->approve_xdomain_paths(tb, xd);
+}
+
+/**
+ * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
+ * @tb: Domain disabling the DMA paths
+ * @xd: XDomain whose DMA paths are disconnected
+ *
+ * Calls connection manager specific method to disconnect DMA paths to
+ * the XDomain in question.
+ *
+ * Return: 0% in case of success and negative errno otherwise. In
+ * particular returns %-ENOTSUPP if the connection manager
+ * implementation does not support XDomains.
+ */
+int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ if (!tb->cm_ops->disconnect_xdomain_paths)
+ return -ENOTSUPP;
+
+ return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
+}
+
+static int disconnect_xdomain(struct device *dev, void *data)
+{
+ struct tb_xdomain *xd;
+ struct tb *tb = data;
+ int ret = 0;
+
+ xd = tb_to_xdomain(dev);
+ if (xd && xd->tb == tb)
+ ret = tb_xdomain_disable_paths(xd);
+
+ return ret;
+}
+
+/**
+ * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
+ * @tb: Domain whose paths are disconnected
+ *
+ * This function can be used to disconnect all paths (PCIe, XDomain) for
+ * example in preparation for host NVM firmware upgrade. After this is
+ * called the paths cannot be established without resetting the switch.
+ *
+ * Return: %0 in case of success and negative errno otherwise.
+ */
+int tb_domain_disconnect_all_paths(struct tb *tb)
+{
+ int ret;
+
+ ret = tb_domain_disconnect_pcie_paths(tb);
+ if (ret)
+ return ret;
+
+ return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
+}
+
int tb_domain_init(void)
{
- return bus_register(&tb_bus_type);
+ int ret;
+
+ ret = tb_xdomain_init();
+ if (ret)
+ return ret;
+ ret = bus_register(&tb_bus_type);
+ if (ret)
+ tb_xdomain_exit();
+
+ return ret;
}
void tb_domain_exit(void)
@@ -453,4 +641,5 @@ void tb_domain_exit(void)
bus_unregister(&tb_bus_type);
ida_destroy(&tb_domain_ida);
tb_switch_exit();
+ tb_xdomain_exit();
}
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index fe2f00ceafc5..3e8caf22c294 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - eeprom access
*
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 53250fc057e1..ab02d13f40b7 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -60,6 +60,8 @@
* @get_route: Find a route string for given switch
* @device_connected: Handle device connected ICM message
* @device_disconnected: Handle device disconnected ICM message
+ * @xdomain_connected - Handle XDomain connected ICM message
+ * @xdomain_disconnected - Handle XDomain disconnected ICM message
*/
struct icm {
struct mutex request_lock;
@@ -74,6 +76,10 @@ struct icm {
const struct icm_pkg_header *hdr);
void (*device_disconnected)(struct tb *tb,
const struct icm_pkg_header *hdr);
+ void (*xdomain_connected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
+ void (*xdomain_disconnected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
};
struct icm_notification {
@@ -89,7 +95,10 @@ static inline struct tb *icm_to_tb(struct icm *icm)
static inline u8 phy_port_from_route(u64 route, u8 depth)
{
- return tb_switch_phy_port_from_link(route >> ((depth - 1) * 8));
+ u8 link;
+
+ link = depth ? route >> ((depth - 1) * 8) : route;
+ return tb_phy_port_from_link(link);
}
static inline u8 dual_link_from_link(u8 link)
@@ -320,6 +329,51 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
return 0;
}
+static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ struct icm_fr_pkg_approve_xdomain_response reply;
+ struct icm_fr_pkg_approve_xdomain request;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.hdr.code = ICM_APPROVE_XDOMAIN;
+ request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
+ memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+ request.transmit_path = xd->transmit_path;
+ request.transmit_ring = xd->transmit_ring;
+ request.receive_path = xd->receive_path;
+ request.receive_ring = xd->receive_ring;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ u8 phy_port;
+ u8 cmd;
+
+ phy_port = tb_phy_port_from_link(xd->link);
+ if (phy_port == 0)
+ cmd = NHI_MAILBOX_DISCONNECT_PA;
+ else
+ cmd = NHI_MAILBOX_DISCONNECT_PB;
+
+ nhi_mailbox_cmd(tb->nhi, cmd, 1);
+ usleep_range(10, 50);
+ nhi_mailbox_cmd(tb->nhi, cmd, 2);
+ return 0;
+}
+
static void remove_switch(struct tb_switch *sw)
{
struct tb_switch *parent_sw;
@@ -475,6 +529,141 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
tb_switch_put(sw);
}
+static void remove_xdomain(struct tb_xdomain *xd)
+{
+ struct tb_switch *sw;
+
+ sw = tb_to_switch(xd->dev.parent);
+ tb_port_at(xd->route, sw)->xdomain = NULL;
+ tb_xdomain_remove(xd);
+}
+
+static void
+icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_xdomain_connected *pkg =
+ (const struct icm_fr_event_xdomain_connected *)hdr;
+ struct tb_xdomain *xd;
+ struct tb_switch *sw;
+ u8 link, depth;
+ bool approved;
+ u64 route;
+
+ /*
+ * After NVM upgrade adding root switch device fails because we
+ * initiated reset. During that time ICM might still send
+ * XDomain connected message which we ignore here.
+ */
+ if (!tb->root_switch)
+ return;
+
+ link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+ depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+ ICM_LINK_INFO_DEPTH_SHIFT;
+ approved = pkg->link_info & ICM_LINK_INFO_APPROVED;
+
+ if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
+ tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
+ return;
+ }
+
+ route = get_route(pkg->local_route_hi, pkg->local_route_lo);
+
+ xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+ if (xd) {
+ u8 xd_phy_port, phy_port;
+
+ xd_phy_port = phy_port_from_route(xd->route, xd->depth);
+ phy_port = phy_port_from_route(route, depth);
+
+ if (xd->depth == depth && xd_phy_port == phy_port) {
+ xd->link = link;
+ xd->route = route;
+ xd->is_unplugged = false;
+ tb_xdomain_put(xd);
+ return;
+ }
+
+ /*
+ * If we find an existing XDomain connection remove it
+ * now. We need to go through login handshake and
+ * everything anyway to be able to re-establish the
+ * connection.
+ */
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ /*
+ * Look if there already exists an XDomain in the same place
+ * than the new one and in that case remove it because it is
+ * most likely another host that got disconnected.
+ */
+ xd = tb_xdomain_find_by_link_depth(tb, link, depth);
+ if (!xd) {
+ u8 dual_link;
+
+ dual_link = dual_link_from_link(link);
+ if (dual_link)
+ xd = tb_xdomain_find_by_link_depth(tb, dual_link,
+ depth);
+ }
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ /*
+ * If the user disconnected a switch during suspend and
+ * connected another host to the same port, remove the switch
+ * first.
+ */
+ sw = get_switch_at_route(tb->root_switch, route);
+ if (sw)
+ remove_switch(sw);
+
+ sw = tb_switch_find_by_link_depth(tb, link, depth);
+ if (!sw) {
+ tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
+ depth);
+ return;
+ }
+
+ xd = tb_xdomain_alloc(sw->tb, &sw->dev, route,
+ &pkg->local_uuid, &pkg->remote_uuid);
+ if (!xd) {
+ tb_switch_put(sw);
+ return;
+ }
+
+ xd->link = link;
+ xd->depth = depth;
+
+ tb_port_at(route, sw)->xdomain = xd;
+
+ tb_xdomain_add(xd);
+ tb_switch_put(sw);
+}
+
+static void
+icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_xdomain_disconnected *pkg =
+ (const struct icm_fr_event_xdomain_disconnected *)hdr;
+ struct tb_xdomain *xd;
+
+ /*
+ * If the connection is through one or multiple devices, the
+ * XDomain device is removed along with them so it is fine if we
+ * cannot find it here.
+ */
+ xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+}
+
static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
{
struct pci_dev *parent;
@@ -594,6 +783,12 @@ static void icm_handle_notification(struct work_struct *work)
case ICM_EVENT_DEVICE_DISCONNECTED:
icm->device_disconnected(tb, n->pkg);
break;
+ case ICM_EVENT_XDOMAIN_CONNECTED:
+ icm->xdomain_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_DISCONNECTED:
+ icm->xdomain_disconnected(tb, n->pkg);
+ break;
}
mutex_unlock(&tb->lock);
@@ -927,6 +1122,10 @@ static void icm_unplug_children(struct tb_switch *sw)
if (tb_is_upstream_port(port))
continue;
+ if (port->xdomain) {
+ port->xdomain->is_unplugged = true;
+ continue;
+ }
if (!port->remote)
continue;
@@ -943,6 +1142,13 @@ static void icm_free_unplugged_children(struct tb_switch *sw)
if (tb_is_upstream_port(port))
continue;
+
+ if (port->xdomain && port->xdomain->is_unplugged) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
+ continue;
+ }
+
if (!port->remote)
continue;
@@ -1009,8 +1215,10 @@ static int icm_start(struct tb *tb)
tb->root_switch->no_nvm_upgrade = x86_apple_machine;
ret = tb_switch_add(tb->root_switch);
- if (ret)
+ if (ret) {
tb_switch_put(tb->root_switch);
+ tb->root_switch = NULL;
+ }
return ret;
}
@@ -1042,6 +1250,8 @@ static const struct tb_cm_ops icm_fr_ops = {
.add_switch_key = icm_fr_add_switch_key,
.challenge_switch_key = icm_fr_challenge_switch_key,
.disconnect_pcie_paths = icm_disconnect_pcie_paths,
+ .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
+ .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
};
struct tb *icm_probe(struct tb_nhi *nhi)
@@ -1064,6 +1274,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->get_route = icm_fr_get_route;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
+ icm->xdomain_connected = icm_fr_xdomain_connected;
+ icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
tb->cm_ops = &icm_fr_ops;
break;
@@ -1077,6 +1289,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
icm->get_route = icm_ar_get_route;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
+ icm->xdomain_connected = icm_fr_xdomain_connected;
+ icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
tb->cm_ops = &icm_fr_ops;
break;
}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 05af126a2435..419a7a90bce0 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -22,6 +22,14 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
/*
+ * Used to enable end-to-end workaround for missing RX packets. Do not
+ * use this ring for anything else.
+ */
+#define RING_E2E_UNUSED_HOPID 2
+/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
+#define RING_FIRST_USABLE_HOPID 8
+
+/*
* Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths.
*/
@@ -206,8 +214,10 @@ static void ring_work(struct work_struct *work)
struct tb_ring *ring = container_of(work, typeof(*ring), work);
struct ring_frame *frame;
bool canceled = false;
+ unsigned long flags;
LIST_HEAD(done);
- mutex_lock(&ring->lock);
+
+ spin_lock_irqsave(&ring->lock, flags);
if (!ring->running) {
/* Move all frames to done and mark them as canceled. */
@@ -229,30 +239,14 @@ static void ring_work(struct work_struct *work)
frame->eof = ring->descriptors[ring->tail].eof;
frame->sof = ring->descriptors[ring->tail].sof;
frame->flags = ring->descriptors[ring->tail].flags;
- if (frame->sof != 0)
- dev_WARN(&ring->nhi->pdev->dev,
- "%s %d got unexpected SOF: %#x\n",
- RING_TYPE(ring), ring->hop,
- frame->sof);
- /*
- * known flags:
- * raw not enabled, interupt not set: 0x2=0010
- * raw enabled: 0xa=1010
- * raw not enabled: 0xb=1011
- * partial frame (>MAX_FRAME_SIZE): 0xe=1110
- */
- if (frame->flags != 0xa)
- dev_WARN(&ring->nhi->pdev->dev,
- "%s %d got unexpected flags: %#x\n",
- RING_TYPE(ring), ring->hop,
- frame->flags);
}
ring->tail = (ring->tail + 1) % ring->size;
}
ring_write_descriptors(ring);
invoke_callback:
- mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
+ /* allow callbacks to schedule new work */
+ spin_unlock_irqrestore(&ring->lock, flags);
while (!list_empty(&done)) {
frame = list_first_entry(&done, typeof(*frame), list);
/*
@@ -260,29 +254,128 @@ invoke_callback:
* Do not hold on to it.
*/
list_del_init(&frame->list);
- frame->callback(ring, frame, canceled);
+ if (frame->callback)
+ frame->callback(ring, frame, canceled);
}
}
-int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
+int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
{
+ unsigned long flags;
int ret = 0;
- mutex_lock(&ring->lock);
+
+ spin_lock_irqsave(&ring->lock, flags);
if (ring->running) {
list_add_tail(&frame->list, &ring->queue);
ring_write_descriptors(ring);
} else {
ret = -ESHUTDOWN;
}
- mutex_unlock(&ring->lock);
+ spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
+EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
+
+/**
+ * tb_ring_poll() - Poll one completed frame from the ring
+ * @ring: Ring to poll
+ *
+ * This function can be called when @start_poll callback of the @ring
+ * has been called. It will read one completed frame from the ring and
+ * return it to the caller. Returns %NULL if there is no more completed
+ * frames.
+ */
+struct ring_frame *tb_ring_poll(struct tb_ring *ring)
+{
+ struct ring_frame *frame = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (!ring->running)
+ goto unlock;
+ if (ring_empty(ring))
+ goto unlock;
+
+ if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
+ frame = list_first_entry(&ring->in_flight, typeof(*frame),
+ list);
+ list_del_init(&frame->list);
+
+ if (!ring->is_tx) {
+ frame->size = ring->descriptors[ring->tail].length;
+ frame->eof = ring->descriptors[ring->tail].eof;
+ frame->sof = ring->descriptors[ring->tail].sof;
+ frame->flags = ring->descriptors[ring->tail].flags;
+ }
+
+ ring->tail = (ring->tail + 1) % ring->size;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return frame;
+}
+EXPORT_SYMBOL_GPL(tb_ring_poll);
+
+static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
+{
+ int idx = ring_interrupt_index(ring);
+ int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
+ int bit = idx % 32;
+ u32 val;
+
+ val = ioread32(ring->nhi->iobase + reg);
+ if (mask)
+ val &= ~BIT(bit);
+ else
+ val |= BIT(bit);
+ iowrite32(val, ring->nhi->iobase + reg);
+}
+
+/* Both @nhi->lock and @ring->lock should be held */
+static void __ring_interrupt(struct tb_ring *ring)
+{
+ if (!ring->running)
+ return;
+
+ if (ring->start_poll) {
+ __ring_interrupt_mask(ring, false);
+ ring->start_poll(ring->poll_data);
+ } else {
+ schedule_work(&ring->work);
+ }
+}
+
+/**
+ * tb_ring_poll_complete() - Re-start interrupt for the ring
+ * @ring: Ring to re-start the interrupt
+ *
+ * This will re-start (unmask) the ring interrupt once the user is done
+ * with polling.
+ */
+void tb_ring_poll_complete(struct tb_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->nhi->lock, flags);
+ spin_lock(&ring->lock);
+ if (ring->start_poll)
+ __ring_interrupt_mask(ring, false);
+ spin_unlock(&ring->lock);
+ spin_unlock_irqrestore(&ring->nhi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
static irqreturn_t ring_msix(int irq, void *data)
{
struct tb_ring *ring = data;
- schedule_work(&ring->work);
+ spin_lock(&ring->nhi->lock);
+ spin_lock(&ring->lock);
+ __ring_interrupt(ring);
+ spin_unlock(&ring->lock);
+ spin_unlock(&ring->nhi->lock);
+
return IRQ_HANDLED;
}
@@ -320,30 +413,81 @@ static void ring_release_msix(struct tb_ring *ring)
ring->irq = 0;
}
-static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
- bool transmit, unsigned int flags)
+static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
+{
+ int ret = 0;
+
+ spin_lock_irq(&nhi->lock);
+
+ if (ring->hop < 0) {
+ unsigned int i;
+
+ /*
+ * Automatically allocate HopID from the non-reserved
+ * range 8 .. hop_count - 1.
+ */
+ for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
+ if (ring->is_tx) {
+ if (!nhi->tx_rings[i]) {
+ ring->hop = i;
+ break;
+ }
+ } else {
+ if (!nhi->rx_rings[i]) {
+ ring->hop = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
+ dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ if (ring->is_tx && nhi->tx_rings[ring->hop]) {
+ dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
+ ring->hop);
+ ret = -EBUSY;
+ goto err_unlock;
+ } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
+ dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
+ ring->hop);
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ if (ring->is_tx)
+ nhi->tx_rings[ring->hop] = ring;
+ else
+ nhi->rx_rings[ring->hop] = ring;
+
+err_unlock:
+ spin_unlock_irq(&nhi->lock);
+
+ return ret;
+}
+
+static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
+ bool transmit, unsigned int flags,
+ u16 sof_mask, u16 eof_mask,
+ void (*start_poll)(void *),
+ void *poll_data)
{
struct tb_ring *ring = NULL;
dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
transmit ? "TX" : "RX", hop, size);
- mutex_lock(&nhi->lock);
- if (hop >= nhi->hop_count) {
- dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
- goto err;
- }
- if (transmit && nhi->tx_rings[hop]) {
- dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
- goto err;
- } else if (!transmit && nhi->rx_rings[hop]) {
- dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
- goto err;
- }
+ /* Tx Ring 2 is reserved for E2E workaround */
+ if (transmit && hop == RING_E2E_UNUSED_HOPID)
+ return NULL;
+
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
- goto err;
+ return NULL;
- mutex_init(&ring->lock);
+ spin_lock_init(&ring->lock);
INIT_LIST_HEAD(&ring->queue);
INIT_LIST_HEAD(&ring->in_flight);
INIT_WORK(&ring->work, ring_work);
@@ -353,55 +497,88 @@ static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
ring->is_tx = transmit;
ring->size = size;
ring->flags = flags;
+ ring->sof_mask = sof_mask;
+ ring->eof_mask = eof_mask;
ring->head = 0;
ring->tail = 0;
ring->running = false;
-
- if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
- goto err;
+ ring->start_poll = start_poll;
+ ring->poll_data = poll_data;
ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
size * sizeof(*ring->descriptors),
&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
if (!ring->descriptors)
- goto err;
+ goto err_free_ring;
+
+ if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
+ goto err_free_descs;
+
+ if (nhi_alloc_hop(nhi, ring))
+ goto err_release_msix;
- if (transmit)
- nhi->tx_rings[hop] = ring;
- else
- nhi->rx_rings[hop] = ring;
- mutex_unlock(&nhi->lock);
return ring;
-err:
- if (ring)
- mutex_destroy(&ring->lock);
+err_release_msix:
+ ring_release_msix(ring);
+err_free_descs:
+ dma_free_coherent(&ring->nhi->pdev->dev,
+ ring->size * sizeof(*ring->descriptors),
+ ring->descriptors, ring->descriptors_dma);
+err_free_ring:
kfree(ring);
- mutex_unlock(&nhi->lock);
+
return NULL;
}
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags)
+/**
+ * tb_ring_alloc_tx() - Allocate DMA ring for transmit
+ * @nhi: Pointer to the NHI the ring is to be allocated
+ * @hop: HopID (ring) to allocate
+ * @size: Number of entries in the ring
+ * @flags: Flags for the ring
+ */
+struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags)
{
- return ring_alloc(nhi, hop, size, true, flags);
+ return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
}
+EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags)
+/**
+ * tb_ring_alloc_rx() - Allocate DMA ring for receive
+ * @nhi: Pointer to the NHI the ring is to be allocated
+ * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
+ * @size: Number of entries in the ring
+ * @flags: Flags for the ring
+ * @sof_mask: Mask of PDF values that start a frame
+ * @eof_mask: Mask of PDF values that end a frame
+ * @start_poll: If not %NULL the ring will call this function when an
+ * interrupt is triggered and masked, instead of callback
+ * in each Rx frame.
+ * @poll_data: Optional data passed to @start_poll
+ */
+struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags, u16 sof_mask, u16 eof_mask,
+ void (*start_poll)(void *), void *poll_data)
{
- return ring_alloc(nhi, hop, size, false, flags);
+ return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
+ start_poll, poll_data);
}
+EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
/**
- * ring_start() - enable a ring
+ * tb_ring_start() - enable a ring
*
- * Must not be invoked in parallel with ring_stop().
+ * Must not be invoked in parallel with tb_ring_stop().
*/
-void ring_start(struct tb_ring *ring)
+void tb_ring_start(struct tb_ring *ring)
{
- mutex_lock(&ring->nhi->lock);
- mutex_lock(&ring->lock);
+ u16 frame_size;
+ u32 flags;
+
+ spin_lock_irq(&ring->nhi->lock);
+ spin_lock(&ring->lock);
if (ring->nhi->going_away)
goto err;
if (ring->running) {
@@ -411,43 +588,65 @@ void ring_start(struct tb_ring *ring)
dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
RING_TYPE(ring), ring->hop);
+ if (ring->flags & RING_FLAG_FRAME) {
+ /* Means 4096 */
+ frame_size = 0;
+ flags = RING_FLAG_ENABLE;
+ } else {
+ frame_size = TB_FRAME_SIZE;
+ flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
+ }
+
+ if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
+ u32 hop;
+
+ /*
+ * In order not to lose Rx packets we enable end-to-end
+ * workaround which transfers Rx credits to an unused Tx
+ * HopID.
+ */
+ hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
+ hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
+ flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
+ }
+
ring_iowrite64desc(ring, ring->descriptors_dma, 0);
if (ring->is_tx) {
ring_iowrite32desc(ring, ring->size, 12);
ring_iowrite32options(ring, 0, 4); /* time releated ? */
- ring_iowrite32options(ring,
- RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
+ ring_iowrite32options(ring, flags, 0);
} else {
- ring_iowrite32desc(ring,
- (TB_FRAME_SIZE << 16) | ring->size, 12);
- ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
- ring_iowrite32options(ring,
- RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
+ u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
+
+ ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
+ ring_iowrite32options(ring, sof_eof_mask, 4);
+ ring_iowrite32options(ring, flags, 0);
}
ring_interrupt_active(ring, true);
ring->running = true;
err:
- mutex_unlock(&ring->lock);
- mutex_unlock(&ring->nhi->lock);
+ spin_unlock(&ring->lock);
+ spin_unlock_irq(&ring->nhi->lock);
}
-
+EXPORT_SYMBOL_GPL(tb_ring_start);
/**
- * ring_stop() - shutdown a ring
+ * tb_ring_stop() - shutdown a ring
*
* Must not be invoked from a callback.
*
- * This method will disable the ring. Further calls to ring_tx/ring_rx will
- * return -ESHUTDOWN until ring_stop has been called.
+ * This method will disable the ring. Further calls to
+ * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
+ * called.
*
* All enqueued frames will be canceled and their callbacks will be executed
* with frame->canceled set to true (on the callback thread). This method
* returns only after all callback invocations have finished.
*/
-void ring_stop(struct tb_ring *ring)
+void tb_ring_stop(struct tb_ring *ring)
{
- mutex_lock(&ring->nhi->lock);
- mutex_lock(&ring->lock);
+ spin_lock_irq(&ring->nhi->lock);
+ spin_lock(&ring->lock);
dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
RING_TYPE(ring), ring->hop);
if (ring->nhi->going_away)
@@ -468,8 +667,8 @@ void ring_stop(struct tb_ring *ring)
ring->running = false;
err:
- mutex_unlock(&ring->lock);
- mutex_unlock(&ring->nhi->lock);
+ spin_unlock(&ring->lock);
+ spin_unlock_irq(&ring->nhi->lock);
/*
* schedule ring->work to invoke callbacks on all remaining frames.
@@ -477,9 +676,10 @@ err:
schedule_work(&ring->work);
flush_work(&ring->work);
}
+EXPORT_SYMBOL_GPL(tb_ring_stop);
/*
- * ring_free() - free ring
+ * tb_ring_free() - free ring
*
* When this method returns all invocations of ring->callback will have
* finished.
@@ -488,9 +688,9 @@ err:
*
* Must NOT be called from ring_frame->callback!
*/
-void ring_free(struct tb_ring *ring)
+void tb_ring_free(struct tb_ring *ring)
{
- mutex_lock(&ring->nhi->lock);
+ spin_lock_irq(&ring->nhi->lock);
/*
* Dissociate the ring from the NHI. This also ensures that
* nhi_interrupt_work cannot reschedule ring->work.
@@ -504,6 +704,7 @@ void ring_free(struct tb_ring *ring)
dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
RING_TYPE(ring), ring->hop);
}
+ spin_unlock_irq(&ring->nhi->lock);
ring_release_msix(ring);
@@ -520,16 +721,15 @@ void ring_free(struct tb_ring *ring)
RING_TYPE(ring),
ring->hop);
- mutex_unlock(&ring->nhi->lock);
/**
* ring->work can no longer be scheduled (it is scheduled only
* by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
* to finish before freeing the ring.
*/
flush_work(&ring->work);
- mutex_destroy(&ring->lock);
kfree(ring);
}
+EXPORT_SYMBOL_GPL(tb_ring_free);
/**
* nhi_mailbox_cmd() - Send a command through NHI mailbox
@@ -595,7 +795,7 @@ static void nhi_interrupt_work(struct work_struct *work)
int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
struct tb_ring *ring;
- mutex_lock(&nhi->lock);
+ spin_lock_irq(&nhi->lock);
/*
* Starting at REG_RING_NOTIFY_BASE there are three status bitfields
@@ -630,10 +830,12 @@ static void nhi_interrupt_work(struct work_struct *work)
hop);
continue;
}
- /* we do not check ring->running, this is done in ring->work */
- schedule_work(&ring->work);
+
+ spin_lock(&ring->lock);
+ __ring_interrupt(ring);
+ spin_unlock(&ring->lock);
}
- mutex_unlock(&nhi->lock);
+ spin_unlock_irq(&nhi->lock);
}
static irqreturn_t nhi_msi(int irq, void *data)
@@ -651,6 +853,22 @@ static int nhi_suspend_noirq(struct device *dev)
return tb_domain_suspend_noirq(tb);
}
+static void nhi_enable_int_throttling(struct tb_nhi *nhi)
+{
+ /* Throttling is specified in 256ns increments */
+ u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
+ unsigned int i;
+
+ /*
+ * Configure interrupt throttling for all vectors even if we
+ * only use few.
+ */
+ for (i = 0; i < MSIX_MAX_VECS; i++) {
+ u32 reg = REG_INT_THROTTLING_RATE + i * 4;
+ iowrite32(throttle, nhi->iobase + reg);
+ }
+}
+
static int nhi_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -663,6 +881,8 @@ static int nhi_resume_noirq(struct device *dev)
*/
if (!pci_device_is_present(pdev))
tb->nhi->going_away = true;
+ else
+ nhi_enable_int_throttling(tb->nhi);
return tb_domain_resume_noirq(tb);
}
@@ -705,7 +925,6 @@ static void nhi_shutdown(struct tb_nhi *nhi)
devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
flush_work(&nhi->interrupt_work);
}
- mutex_destroy(&nhi->lock);
ida_destroy(&nhi->msix_ida);
}
@@ -717,6 +936,8 @@ static int nhi_init_msi(struct tb_nhi *nhi)
/* In case someone left them on. */
nhi_disable_interrupts(nhi);
+ nhi_enable_int_throttling(nhi);
+
ida_init(&nhi->msix_ida);
/*
@@ -792,13 +1013,10 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return res;
}
- mutex_init(&nhi->lock);
+ spin_lock_init(&nhi->lock);
pci_set_master(pdev);
- /* magic value - clock related? */
- iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
-
tb = icm_probe(nhi);
if (!tb)
tb = tb_probe(nhi);
@@ -926,5 +1144,5 @@ static void __exit nhi_unload(void)
tb_domain_exit();
}
-module_init(nhi_init);
+fs_initcall(nhi_init);
module_exit(nhi_unload);
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 5b5bb2c436be..4476ab4cfd0c 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - NHI driver
*
@@ -7,144 +8,7 @@
#ifndef DSL3510_H_
#define DSL3510_H_
-#include <linux/idr.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
-
-/**
- * struct tb_nhi - thunderbolt native host interface
- * @lock: Must be held during ring creation/destruction. Is acquired by
- * interrupt_work when dispatching interrupts to individual rings.
- * @pdev: Pointer to the PCI device
- * @iobase: MMIO space of the NHI
- * @tx_rings: All Tx rings available on this host controller
- * @rx_rings: All Rx rings available on this host controller
- * @msix_ida: Used to allocate MSI-X vectors for rings
- * @going_away: The host controller device is about to disappear so when
- * this flag is set, avoid touching the hardware anymore.
- * @interrupt_work: Work scheduled to handle ring interrupt when no
- * MSI-X is used.
- * @hop_count: Number of rings (end point hops) supported by NHI.
- */
-struct tb_nhi {
- struct mutex lock;
- struct pci_dev *pdev;
- void __iomem *iobase;
- struct tb_ring **tx_rings;
- struct tb_ring **rx_rings;
- struct ida msix_ida;
- bool going_away;
- struct work_struct interrupt_work;
- u32 hop_count;
-};
-
-/**
- * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
- * @lock: Lock serializing actions to this ring. Must be acquired after
- * nhi->lock.
- * @nhi: Pointer to the native host controller interface
- * @size: Size of the ring
- * @hop: Hop (DMA channel) associated with this ring
- * @head: Head of the ring (write next descriptor here)
- * @tail: Tail of the ring (complete next descriptor here)
- * @descriptors: Allocated descriptors for this ring
- * @queue: Queue holding frames to be transferred over this ring
- * @in_flight: Queue holding frames that are currently in flight
- * @work: Interrupt work structure
- * @is_tx: Is the ring Tx or Rx
- * @running: Is the ring running
- * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
- * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
- * @flags: Ring specific flags
- */
-struct tb_ring {
- struct mutex lock;
- struct tb_nhi *nhi;
- int size;
- int hop;
- int head;
- int tail;
- struct ring_desc *descriptors;
- dma_addr_t descriptors_dma;
- struct list_head queue;
- struct list_head in_flight;
- struct work_struct work;
- bool is_tx:1;
- bool running:1;
- int irq;
- u8 vector;
- unsigned int flags;
-};
-
-/* Leave ring interrupt enabled on suspend */
-#define RING_FLAG_NO_SUSPEND BIT(0)
-
-struct ring_frame;
-typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
-
-/**
- * struct ring_frame - for use with ring_rx/ring_tx
- */
-struct ring_frame {
- dma_addr_t buffer_phy;
- ring_cb callback;
- struct list_head list;
- u32 size:12; /* TX: in, RX: out*/
- u32 flags:12; /* RX: out */
- u32 eof:4; /* TX:in, RX: out */
- u32 sof:4; /* TX:in, RX: out */
-};
-
-#define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
-
-struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags);
-struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags);
-void ring_start(struct tb_ring *ring);
-void ring_stop(struct tb_ring *ring);
-void ring_free(struct tb_ring *ring);
-
-int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
-
-/**
- * ring_rx() - enqueue a frame on an RX ring
- *
- * frame->buffer, frame->buffer_phy and frame->callback have to be set. The
- * buffer must contain at least TB_FRAME_SIZE bytes.
- *
- * frame->callback will be invoked with frame->size, frame->flags, frame->eof,
- * frame->sof set once the frame has been received.
- *
- * If ring_stop is called after the packet has been enqueued frame->callback
- * will be called with canceled set to true.
- *
- * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
- */
-static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame)
-{
- WARN_ON(ring->is_tx);
- return __ring_enqueue(ring, frame);
-}
-
-/**
- * ring_tx() - enqueue a frame on an TX ring
- *
- * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof
- * and frame->sof have to be set.
- *
- * frame->callback will be invoked with once the frame has been transmitted.
- *
- * If ring_stop is called after the packet has been enqueued frame->callback
- * will be called with canceled set to true.
- *
- * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
- */
-static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
-{
- WARN_ON(!ring->is_tx);
- return __ring_enqueue(ring, frame);
-}
+#include <linux/thunderbolt.h>
enum nhi_fw_mode {
NHI_FW_SAFE_MODE,
@@ -157,6 +21,8 @@ enum nhi_mailbox_cmd {
NHI_MAILBOX_SAVE_DEVS = 0x05,
NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06,
NHI_MAILBOX_DRV_UNLOADS = 0x07,
+ NHI_MAILBOX_DISCONNECT_PA = 0x10,
+ NHI_MAILBOX_DISCONNECT_PB = 0x11,
NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23,
};
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 09ed574e92ff..b3e49d19c01e 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt driver - NHI registers
*
@@ -17,13 +18,6 @@ enum ring_flags {
RING_FLAG_ENABLE = 1 << 31,
};
-enum ring_desc_flags {
- RING_DESC_ISOCH = 0x1, /* TX only? */
- RING_DESC_COMPLETED = 0x2, /* set by NHI */
- RING_DESC_POSTED = 0x4, /* always set this */
- RING_DESC_INTERRUPT = 0x8, /* request an interrupt on completion */
-};
-
/**
* struct ring_desc - TX/RX ring entry
*
@@ -77,6 +71,8 @@ struct ring_desc {
* ..: unknown
*/
#define REG_RX_OPTIONS_BASE 0x29800
+#define REG_RX_OPTIONS_E2E_HOP_MASK GENMASK(22, 12)
+#define REG_RX_OPTIONS_E2E_HOP_SHIFT 12
/*
* three bitfields: tx, rx, rx overflow
@@ -95,6 +91,8 @@ struct ring_desc {
#define REG_RING_INTERRUPT_BASE 0x38200
#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
+#define REG_INT_THROTTLING_RATE 0x38c00
+
/* Interrupt Vector Allocation */
#define REG_INT_VEC_ALLOC_BASE 0x38c40
#define REG_INT_VEC_ALLOC_BITS 4
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 9562cd026dc0..ff49ad880bfd 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - path/tunnel functionality
*
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
new file mode 100644
index 000000000000..8fe913a95b4a
--- /dev/null
+++ b/drivers/thunderbolt/property.c
@@ -0,0 +1,670 @@
+/*
+ * Thunderbolt XDomain property support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+#include <linux/thunderbolt.h>
+
+struct tb_property_entry {
+ u32 key_hi;
+ u32 key_lo;
+ u16 length;
+ u8 reserved;
+ u8 type;
+ u32 value;
+};
+
+struct tb_property_rootdir_entry {
+ u32 magic;
+ u32 length;
+ struct tb_property_entry entries[];
+};
+
+struct tb_property_dir_entry {
+ u32 uuid[4];
+ struct tb_property_entry entries[];
+};
+
+#define TB_PROPERTY_ROOTDIR_MAGIC 0x55584401
+
+static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
+ size_t block_len, unsigned int dir_offset, size_t dir_len,
+ bool is_root);
+
+static inline void parse_dwdata(void *dst, const void *src, size_t dwords)
+{
+ be32_to_cpu_array(dst, src, dwords);
+}
+
+static inline void format_dwdata(void *dst, const void *src, size_t dwords)
+{
+ cpu_to_be32_array(dst, src, dwords);
+}
+
+static bool tb_property_entry_valid(const struct tb_property_entry *entry,
+ size_t block_len)
+{
+ switch (entry->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ case TB_PROPERTY_TYPE_DATA:
+ case TB_PROPERTY_TYPE_TEXT:
+ if (entry->length > block_len)
+ return false;
+ if (entry->value + entry->length > block_len)
+ return false;
+ break;
+
+ case TB_PROPERTY_TYPE_VALUE:
+ if (entry->length != 1)
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static bool tb_property_key_valid(const char *key)
+{
+ return key && strlen(key) <= TB_PROPERTY_KEY_SIZE;
+}
+
+static struct tb_property *
+tb_property_alloc(const char *key, enum tb_property_type type)
+{
+ struct tb_property *property;
+
+ property = kzalloc(sizeof(*property), GFP_KERNEL);
+ if (!property)
+ return NULL;
+
+ strcpy(property->key, key);
+ property->type = type;
+ INIT_LIST_HEAD(&property->list);
+
+ return property;
+}
+
+static struct tb_property *tb_property_parse(const u32 *block, size_t block_len,
+ const struct tb_property_entry *entry)
+{
+ char key[TB_PROPERTY_KEY_SIZE + 1];
+ struct tb_property *property;
+ struct tb_property_dir *dir;
+
+ if (!tb_property_entry_valid(entry, block_len))
+ return NULL;
+
+ parse_dwdata(key, entry, 2);
+ key[TB_PROPERTY_KEY_SIZE] = '\0';
+
+ property = tb_property_alloc(key, entry->type);
+ if (!property)
+ return NULL;
+
+ property->length = entry->length;
+
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ dir = __tb_property_parse_dir(block, block_len, entry->value,
+ entry->length, false);
+ if (!dir) {
+ kfree(property);
+ return NULL;
+ }
+ property->value.dir = dir;
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ property->value.data = kcalloc(property->length, sizeof(u32),
+ GFP_KERNEL);
+ if (!property->value.data) {
+ kfree(property);
+ return NULL;
+ }
+ parse_dwdata(property->value.data, block + entry->value,
+ entry->length);
+ break;
+
+ case TB_PROPERTY_TYPE_TEXT:
+ property->value.text = kcalloc(property->length, sizeof(u32),
+ GFP_KERNEL);
+ if (!property->value.text) {
+ kfree(property);
+ return NULL;
+ }
+ parse_dwdata(property->value.text, block + entry->value,
+ entry->length);
+ /* Force null termination */
+ property->value.text[property->length * 4 - 1] = '\0';
+ break;
+
+ case TB_PROPERTY_TYPE_VALUE:
+ property->value.immediate = entry->value;
+ break;
+
+ default:
+ property->type = TB_PROPERTY_TYPE_UNKNOWN;
+ break;
+ }
+
+ return property;
+}
+
+static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
+ size_t block_len, unsigned int dir_offset, size_t dir_len, bool is_root)
+{
+ const struct tb_property_entry *entries;
+ size_t i, content_len, nentries;
+ unsigned int content_offset;
+ struct tb_property_dir *dir;
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ if (is_root) {
+ content_offset = dir_offset + 2;
+ content_len = dir_len;
+ } else {
+ dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid),
+ GFP_KERNEL);
+ content_offset = dir_offset + 4;
+ content_len = dir_len - 4; /* Length includes UUID */
+ }
+
+ entries = (const struct tb_property_entry *)&block[content_offset];
+ nentries = content_len / (sizeof(*entries) / 4);
+
+ INIT_LIST_HEAD(&dir->properties);
+
+ for (i = 0; i < nentries; i++) {
+ struct tb_property *property;
+
+ property = tb_property_parse(block, block_len, &entries[i]);
+ if (!property) {
+ tb_property_free_dir(dir);
+ return NULL;
+ }
+
+ list_add_tail(&property->list, &dir->properties);
+ }
+
+ return dir;
+}
+
+/**
+ * tb_property_parse_dir() - Parses properties from given property block
+ * @block: Property block to parse
+ * @block_len: Number of dword elements in the property block
+ *
+ * This function parses the XDomain properties data block into format that
+ * can be traversed using the helper functions provided by this module.
+ * Upon success returns the parsed directory. In case of error returns
+ * %NULL. The resulting &struct tb_property_dir needs to be released by
+ * calling tb_property_free_dir() when not needed anymore.
+ *
+ * The @block is expected to be root directory.
+ */
+struct tb_property_dir *tb_property_parse_dir(const u32 *block,
+ size_t block_len)
+{
+ const struct tb_property_rootdir_entry *rootdir =
+ (const struct tb_property_rootdir_entry *)block;
+
+ if (rootdir->magic != TB_PROPERTY_ROOTDIR_MAGIC)
+ return NULL;
+ if (rootdir->length > block_len)
+ return NULL;
+
+ return __tb_property_parse_dir(block, block_len, 0, rootdir->length,
+ true);
+}
+
+/**
+ * tb_property_create_dir() - Creates new property directory
+ * @uuid: UUID used to identify the particular directory
+ *
+ * Creates new, empty property directory. If @uuid is %NULL then the
+ * directory is assumed to be root directory.
+ */
+struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid)
+{
+ struct tb_property_dir *dir;
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ INIT_LIST_HEAD(&dir->properties);
+ if (uuid) {
+ dir->uuid = kmemdup(uuid, sizeof(*dir->uuid), GFP_KERNEL);
+ if (!dir->uuid) {
+ kfree(dir);
+ return NULL;
+ }
+ }
+
+ return dir;
+}
+EXPORT_SYMBOL_GPL(tb_property_create_dir);
+
+static void tb_property_free(struct tb_property *property)
+{
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ tb_property_free_dir(property->value.dir);
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ kfree(property->value.data);
+ break;
+
+ case TB_PROPERTY_TYPE_TEXT:
+ kfree(property->value.text);
+ break;
+
+ default:
+ break;
+ }
+
+ kfree(property);
+}
+
+/**
+ * tb_property_free_dir() - Release memory allocated for property directory
+ * @dir: Directory to release
+ *
+ * This will release all the memory the directory occupies including all
+ * descendants. It is OK to pass %NULL @dir, then the function does
+ * nothing.
+ */
+void tb_property_free_dir(struct tb_property_dir *dir)
+{
+ struct tb_property *property, *tmp;
+
+ if (!dir)
+ return;
+
+ list_for_each_entry_safe(property, tmp, &dir->properties, list) {
+ list_del(&property->list);
+ tb_property_free(property);
+ }
+ kfree(dir->uuid);
+ kfree(dir);
+}
+EXPORT_SYMBOL_GPL(tb_property_free_dir);
+
+static size_t tb_property_dir_length(const struct tb_property_dir *dir,
+ bool recurse, size_t *data_len)
+{
+ const struct tb_property *property;
+ size_t len = 0;
+
+ if (dir->uuid)
+ len += sizeof(*dir->uuid) / 4;
+ else
+ len += sizeof(struct tb_property_rootdir_entry) / 4;
+
+ list_for_each_entry(property, &dir->properties, list) {
+ len += sizeof(struct tb_property_entry) / 4;
+
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ if (recurse) {
+ len += tb_property_dir_length(
+ property->value.dir, recurse, data_len);
+ }
+ /* Reserve dword padding after each directory */
+ if (data_len)
+ *data_len += 1;
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ case TB_PROPERTY_TYPE_TEXT:
+ if (data_len)
+ *data_len += property->length;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t __tb_property_format_dir(const struct tb_property_dir *dir,
+ u32 *block, unsigned int start_offset, size_t block_len)
+{
+ unsigned int data_offset, dir_end;
+ const struct tb_property *property;
+ struct tb_property_entry *entry;
+ size_t dir_len, data_len = 0;
+ int ret;
+
+ /*
+ * The structure of property block looks like following. Leaf
+ * data/text is included right after the directory and each
+ * directory follows each other (even nested ones).
+ *
+ * +----------+ <-- start_offset
+ * | header | <-- root directory header
+ * +----------+ ---
+ * | entry 0 | -^--------------------.
+ * +----------+ | |
+ * | entry 1 | -|--------------------|--.
+ * +----------+ | | |
+ * | entry 2 | -|-----------------. | |
+ * +----------+ | | | |
+ * : : | dir_len | | |
+ * . . | | | |
+ * : : | | | |
+ * +----------+ | | | |
+ * | entry n | v | | |
+ * +----------+ <-- data_offset | | |
+ * | data 0 | <------------------|--' |
+ * +----------+ | |
+ * | data 1 | <------------------|-----'
+ * +----------+ |
+ * | 00000000 | padding |
+ * +----------+ <-- dir_end <------'
+ * | UUID | <-- directory UUID (child directory)
+ * +----------+
+ * | entry 0 |
+ * +----------+
+ * | entry 1 |
+ * +----------+
+ * : :
+ * . .
+ * : :
+ * +----------+
+ * | entry n |
+ * +----------+
+ * | data 0 |
+ * +----------+
+ *
+ * We use dir_end to hold pointer to the end of the directory. It
+ * will increase as we add directories and each directory should be
+ * added starting from previous dir_end.
+ */
+ dir_len = tb_property_dir_length(dir, false, &data_len);
+ data_offset = start_offset + dir_len;
+ dir_end = start_offset + data_len + dir_len;
+
+ if (data_offset > dir_end)
+ return -EINVAL;
+ if (dir_end > block_len)
+ return -EINVAL;
+
+ /* Write headers first */
+ if (dir->uuid) {
+ struct tb_property_dir_entry *pe;
+
+ pe = (struct tb_property_dir_entry *)&block[start_offset];
+ memcpy(pe->uuid, dir->uuid, sizeof(pe->uuid));
+ entry = pe->entries;
+ } else {
+ struct tb_property_rootdir_entry *re;
+
+ re = (struct tb_property_rootdir_entry *)&block[start_offset];
+ re->magic = TB_PROPERTY_ROOTDIR_MAGIC;
+ re->length = dir_len - sizeof(*re) / 4;
+ entry = re->entries;
+ }
+
+ list_for_each_entry(property, &dir->properties, list) {
+ const struct tb_property_dir *child;
+
+ format_dwdata(entry, property->key, 2);
+ entry->type = property->type;
+
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ child = property->value.dir;
+ ret = __tb_property_format_dir(child, block, dir_end,
+ block_len);
+ if (ret < 0)
+ return ret;
+ entry->length = tb_property_dir_length(child, false,
+ NULL);
+ entry->value = dir_end;
+ dir_end = ret;
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ format_dwdata(&block[data_offset], property->value.data,
+ property->length);
+ entry->length = property->length;
+ entry->value = data_offset;
+ data_offset += entry->length;
+ break;
+
+ case TB_PROPERTY_TYPE_TEXT:
+ format_dwdata(&block[data_offset], property->value.text,
+ property->length);
+ entry->length = property->length;
+ entry->value = data_offset;
+ data_offset += entry->length;
+ break;
+
+ case TB_PROPERTY_TYPE_VALUE:
+ entry->length = property->length;
+ entry->value = property->value.immediate;
+ break;
+
+ default:
+ break;
+ }
+
+ entry++;
+ }
+
+ return dir_end;
+}
+
+/**
+ * tb_property_format_dir() - Formats directory to the packed XDomain format
+ * @dir: Directory to format
+ * @block: Property block where the packed data is placed
+ * @block_len: Length of the property block
+ *
+ * This function formats the directory to the packed format that can be
+ * then send over the thunderbolt fabric to receiving host. Returns %0 in
+ * case of success and negative errno on faulure. Passing %NULL in @block
+ * returns number of entries the block takes.
+ */
+ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
+ size_t block_len)
+{
+ ssize_t ret;
+
+ if (!block) {
+ size_t dir_len, data_len = 0;
+
+ dir_len = tb_property_dir_length(dir, true, &data_len);
+ return dir_len + data_len;
+ }
+
+ ret = __tb_property_format_dir(dir, block, 0, block_len);
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * tb_property_add_immediate() - Add immediate property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @value: Immediate value to store with the property
+ */
+int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
+ u32 value)
+{
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_VALUE);
+ if (!property)
+ return -ENOMEM;
+
+ property->length = 1;
+ property->value.immediate = value;
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_immediate);
+
+/**
+ * tb_property_add_data() - Adds arbitrary data property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @buf: Data buffer to add
+ * @buflen: Number of bytes in the data buffer
+ *
+ * Function takes a copy of @buf and adds it to the directory.
+ */
+int tb_property_add_data(struct tb_property_dir *parent, const char *key,
+ const void *buf, size_t buflen)
+{
+ /* Need to pad to dword boundary */
+ size_t size = round_up(buflen, 4);
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_DATA);
+ if (!property)
+ return -ENOMEM;
+
+ property->length = size / 4;
+ property->value.data = kzalloc(size, GFP_KERNEL);
+ memcpy(property->value.data, buf, buflen);
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_data);
+
+/**
+ * tb_property_add_text() - Adds string property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @text: String to add
+ *
+ * Function takes a copy of @text and adds it to the directory.
+ */
+int tb_property_add_text(struct tb_property_dir *parent, const char *key,
+ const char *text)
+{
+ /* Need to pad to dword boundary */
+ size_t size = round_up(strlen(text) + 1, 4);
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_TEXT);
+ if (!property)
+ return -ENOMEM;
+
+ property->length = size / 4;
+ property->value.data = kzalloc(size, GFP_KERNEL);
+ strcpy(property->value.text, text);
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_text);
+
+/**
+ * tb_property_add_dir() - Adds a directory to the parent directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @dir: Directory to add
+ */
+int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
+ struct tb_property_dir *dir)
+{
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_DIRECTORY);
+ if (!property)
+ return -ENOMEM;
+
+ property->value.dir = dir;
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_dir);
+
+/**
+ * tb_property_remove() - Removes property from a parent directory
+ * @property: Property to remove
+ *
+ * Note memory for @property is released as well so it is not allowed to
+ * touch the object after call to this function.
+ */
+void tb_property_remove(struct tb_property *property)
+{
+ list_del(&property->list);
+ kfree(property);
+}
+EXPORT_SYMBOL_GPL(tb_property_remove);
+
+/**
+ * tb_property_find() - Find a property from a directory
+ * @dir: Directory where the property is searched
+ * @key: Key to look for
+ * @type: Type of the property
+ *
+ * Finds and returns property from the given directory. Does not recurse
+ * into sub-directories. Returns %NULL if the property was not found.
+ */
+struct tb_property *tb_property_find(struct tb_property_dir *dir,
+ const char *key, enum tb_property_type type)
+{
+ struct tb_property *property;
+
+ list_for_each_entry(property, &dir->properties, list) {
+ if (property->type == type && !strcmp(property->key, key))
+ return property;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tb_property_find);
+
+/**
+ * tb_property_get_next() - Get next property from directory
+ * @dir: Directory holding properties
+ * @prev: Previous property in the directory (%NULL returns the first)
+ */
+struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
+ struct tb_property *prev)
+{
+ if (prev) {
+ if (list_is_last(&prev->list, &dir->properties))
+ return NULL;
+ return list_next_entry(prev, list);
+ }
+ return list_first_entry_or_null(&dir->properties, struct tb_property,
+ list);
+}
+EXPORT_SYMBOL_GPL(tb_property_get_next);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 53f40c57df59..da54ace4dd2f 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - switch/port utility functions
*
@@ -171,11 +172,11 @@ static int nvm_authenticate_host(struct tb_switch *sw)
/*
* Root switch NVM upgrade requires that we disconnect the
- * existing PCIe paths first (in case it is not in safe mode
+ * existing paths first (in case it is not in safe mode
* already).
*/
if (!sw->safe_mode) {
- ret = tb_domain_disconnect_pcie_paths(sw->tb);
+ ret = tb_domain_disconnect_all_paths(sw->tb);
if (ret)
return ret;
/*
@@ -1363,6 +1364,9 @@ void tb_switch_remove(struct tb_switch *sw)
if (sw->ports[i].remote)
tb_switch_remove(sw->ports[i].remote->sw);
sw->ports[i].remote = NULL;
+ if (sw->ports[i].xdomain)
+ tb_xdomain_remove(sw->ports[i].xdomain);
+ sw->ports[i].xdomain = NULL;
}
if (!sw->is_unplugged)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 0b22ad9d68b4..1424581fd9af 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
*
@@ -224,6 +225,7 @@ static void tb_activate_pcie_devices(struct tb *tb)
tb_port_info(up_port,
"PCIe tunnel activation failed, aborting\n");
tb_pci_free(tunnel);
+ continue;
}
list_add(&tunnel->list, &tcm->tunnel_list);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index e0deee4f1eb0..895c57a0a090 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
*
@@ -9,6 +10,7 @@
#include <linux/nvmem-provider.h>
#include <linux/pci.h>
+#include <linux/thunderbolt.h>
#include <linux/uuid.h>
#include "tb_regs.h"
@@ -39,23 +41,7 @@ struct tb_switch_nvm {
bool authenticating;
};
-/**
- * enum tb_security_level - Thunderbolt security level
- * @TB_SECURITY_NONE: No security, legacy mode
- * @TB_SECURITY_USER: User approval required at minimum
- * @TB_SECURITY_SECURE: One time saved key required at minimum
- * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
- */
-enum tb_security_level {
- TB_SECURITY_NONE,
- TB_SECURITY_USER,
- TB_SECURITY_SECURE,
- TB_SECURITY_DPONLY,
-};
-
#define TB_SWITCH_KEY_SIZE 32
-/* Each physical port contains 2 links on modern controllers */
-#define TB_SWITCH_LINKS_PER_PHY_PORT 2
/**
* struct tb_switch - a thunderbolt switch
@@ -125,14 +111,25 @@ struct tb_switch {
/**
* struct tb_port - a thunderbolt port, part of a tb_switch
+ * @config: Cached port configuration read from registers
+ * @sw: Switch the port belongs to
+ * @remote: Remote port (%NULL if not connected)
+ * @xdomain: Remote host (%NULL if not connected)
+ * @cap_phy: Offset, zero if not found
+ * @port: Port number on switch
+ * @disabled: Disabled by eeprom
+ * @dual_link_port: If the switch is connected using two ports, points
+ * to the other port.
+ * @link_nr: Is this primary or secondary port on the dual_link.
*/
struct tb_port {
struct tb_regs_port_header config;
struct tb_switch *sw;
- struct tb_port *remote; /* remote port, NULL if not connected */
- int cap_phy; /* offset, zero if not found */
- u8 port; /* port number on switch */
- bool disabled; /* disabled by eeprom */
+ struct tb_port *remote;
+ struct tb_xdomain *xdomain;
+ int cap_phy;
+ u8 port;
+ bool disabled;
struct tb_port *dual_link_port;
u8 link_nr:1;
};
@@ -205,6 +202,8 @@ struct tb_path {
* @add_switch_key: Add key to switch
* @challenge_switch_key: Challenge switch using key
* @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
+ * @approve_xdomain_paths: Approve (establish) XDomain DMA paths
+ * @disconnect_xdomain_paths: Disconnect XDomain DMA paths
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
@@ -221,33 +220,8 @@ struct tb_cm_ops {
int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
const u8 *challenge, u8 *response);
int (*disconnect_pcie_paths)(struct tb *tb);
-};
-
-/**
- * struct tb - main thunderbolt bus structure
- * @dev: Domain device
- * @lock: Big lock. Must be held when accessing any struct
- * tb_switch / struct tb_port.
- * @nhi: Pointer to the NHI structure
- * @ctl: Control channel for this domain
- * @wq: Ordered workqueue for all domain specific work
- * @root_switch: Root switch of this domain
- * @cm_ops: Connection manager specific operations vector
- * @index: Linux assigned domain number
- * @security_level: Current security level
- * @privdata: Private connection manager specific data
- */
-struct tb {
- struct device dev;
- struct mutex lock;
- struct tb_nhi *nhi;
- struct tb_ctl *ctl;
- struct workqueue_struct *wq;
- struct tb_switch *root_switch;
- const struct tb_cm_ops *cm_ops;
- int index;
- enum tb_security_level security_level;
- unsigned long privdata[0];
+ int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
+ int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
};
static inline void *tb_priv(struct tb *tb)
@@ -368,13 +342,14 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
struct tb *icm_probe(struct tb_nhi *nhi);
struct tb *tb_probe(struct tb_nhi *nhi);
-extern struct bus_type tb_bus_type;
extern struct device_type tb_domain_type;
extern struct device_type tb_switch_type;
int tb_domain_init(void);
void tb_domain_exit(void);
void tb_switch_exit(void);
+int tb_xdomain_init(void);
+void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
int tb_domain_add(struct tb *tb);
@@ -387,6 +362,9 @@ int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
int tb_domain_disconnect_pcie_paths(struct tb *tb);
+int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
+int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
+int tb_domain_disconnect_all_paths(struct tb *tb);
static inline void tb_domain_put(struct tb *tb)
{
@@ -409,11 +387,6 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
u8 depth);
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
-static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
-{
- return (link - 1) / TB_SWITCH_LINKS_PER_PHY_PORT;
-}
-
static inline void tb_switch_put(struct tb_switch *sw)
{
put_device(&sw->dev);
@@ -471,4 +444,14 @@ static inline u64 tb_downstream_route(struct tb_port *port)
| ((u64) port->port << (port->sw->config.depth * 8));
}
+bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size);
+struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
+ u64 route, const uuid_t *local_uuid,
+ const uuid_t *remote_uuid);
+void tb_xdomain_add(struct tb_xdomain *xd);
+void tb_xdomain_remove(struct tb_xdomain *xd);
+struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
+ u8 depth);
+
#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index de6441e4a060..b0a092baa605 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -15,23 +15,6 @@
#include <linux/types.h>
#include <linux/uuid.h>
-enum tb_cfg_pkg_type {
- TB_CFG_PKG_READ = 1,
- TB_CFG_PKG_WRITE = 2,
- TB_CFG_PKG_ERROR = 3,
- TB_CFG_PKG_NOTIFY_ACK = 4,
- TB_CFG_PKG_EVENT = 5,
- TB_CFG_PKG_XDOMAIN_REQ = 6,
- TB_CFG_PKG_XDOMAIN_RESP = 7,
- TB_CFG_PKG_OVERRIDE = 8,
- TB_CFG_PKG_RESET = 9,
- TB_CFG_PKG_ICM_EVENT = 10,
- TB_CFG_PKG_ICM_CMD = 11,
- TB_CFG_PKG_ICM_RESP = 12,
- TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
-
-};
-
enum tb_cfg_space {
TB_CFG_HOPS = 0,
TB_CFG_PORT = 1,
@@ -118,11 +101,14 @@ enum icm_pkg_code {
ICM_CHALLENGE_DEVICE = 0x5,
ICM_ADD_DEVICE_KEY = 0x6,
ICM_GET_ROUTE = 0xa,
+ ICM_APPROVE_XDOMAIN = 0x10,
};
enum icm_event_code {
ICM_EVENT_DEVICE_CONNECTED = 3,
ICM_EVENT_DEVICE_DISCONNECTED = 4,
+ ICM_EVENT_XDOMAIN_CONNECTED = 6,
+ ICM_EVENT_XDOMAIN_DISCONNECTED = 7,
};
struct icm_pkg_header {
@@ -130,7 +116,7 @@ struct icm_pkg_header {
u8 flags;
u8 packet_id;
u8 total_packets;
-} __packed;
+};
#define ICM_FLAGS_ERROR BIT(0)
#define ICM_FLAGS_NO_KEY BIT(1)
@@ -139,20 +125,20 @@ struct icm_pkg_header {
struct icm_pkg_driver_ready {
struct icm_pkg_header hdr;
-} __packed;
+};
struct icm_pkg_driver_ready_response {
struct icm_pkg_header hdr;
u8 romver;
u8 ramver;
u16 security_level;
-} __packed;
+};
/* Falcon Ridge & Alpine Ridge common messages */
struct icm_fr_pkg_get_topology {
struct icm_pkg_header hdr;
-} __packed;
+};
#define ICM_GET_TOPOLOGY_PACKETS 14
@@ -167,7 +153,7 @@ struct icm_fr_pkg_get_topology_response {
u32 reserved[2];
u32 ports[16];
u32 port_hop_info[16];
-} __packed;
+};
#define ICM_SWITCH_USED BIT(0)
#define ICM_SWITCH_UPSTREAM_PORT_MASK GENMASK(7, 1)
@@ -184,7 +170,7 @@ struct icm_fr_event_device_connected {
u8 connection_id;
u16 link_info;
u32 ep_name[55];
-} __packed;
+};
#define ICM_LINK_INFO_LINK_MASK 0x7
#define ICM_LINK_INFO_DEPTH_SHIFT 4
@@ -197,13 +183,32 @@ struct icm_fr_pkg_approve_device {
u8 connection_key;
u8 connection_id;
u16 reserved;
-} __packed;
+};
struct icm_fr_event_device_disconnected {
struct icm_pkg_header hdr;
u16 reserved;
u16 link_info;
-} __packed;
+};
+
+struct icm_fr_event_xdomain_connected {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ uuid_t local_uuid;
+ u32 local_route_hi;
+ u32 local_route_lo;
+ u32 remote_route_hi;
+ u32 remote_route_lo;
+};
+
+struct icm_fr_event_xdomain_disconnected {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+};
struct icm_fr_pkg_add_device_key {
struct icm_pkg_header hdr;
@@ -212,7 +217,7 @@ struct icm_fr_pkg_add_device_key {
u8 connection_id;
u16 reserved;
u32 key[8];
-} __packed;
+};
struct icm_fr_pkg_add_device_key_response {
struct icm_pkg_header hdr;
@@ -220,7 +225,7 @@ struct icm_fr_pkg_add_device_key_response {
u8 connection_key;
u8 connection_id;
u16 reserved;
-} __packed;
+};
struct icm_fr_pkg_challenge_device {
struct icm_pkg_header hdr;
@@ -229,7 +234,7 @@ struct icm_fr_pkg_challenge_device {
u8 connection_id;
u16 reserved;
u32 challenge[8];
-} __packed;
+};
struct icm_fr_pkg_challenge_device_response {
struct icm_pkg_header hdr;
@@ -239,7 +244,29 @@ struct icm_fr_pkg_challenge_device_response {
u16 reserved;
u32 challenge[8];
u32 response[8];
-} __packed;
+};
+
+struct icm_fr_pkg_approve_xdomain {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ u16 transmit_path;
+ u16 transmit_ring;
+ u16 receive_path;
+ u16 receive_ring;
+};
+
+struct icm_fr_pkg_approve_xdomain_response {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ u16 transmit_path;
+ u16 transmit_ring;
+ u16 receive_path;
+ u16 receive_ring;
+};
/* Alpine Ridge only messages */
@@ -247,7 +274,7 @@ struct icm_ar_pkg_get_route {
struct icm_pkg_header hdr;
u16 reserved;
u16 link_info;
-} __packed;
+};
struct icm_ar_pkg_get_route_response {
struct icm_pkg_header hdr;
@@ -255,6 +282,85 @@ struct icm_ar_pkg_get_route_response {
u16 link_info;
u32 route_hi;
u32 route_lo;
-} __packed;
+};
+
+/* XDomain messages */
+
+struct tb_xdomain_header {
+ u32 route_hi;
+ u32 route_lo;
+ u32 length_sn;
+};
+
+#define TB_XDOMAIN_LENGTH_MASK GENMASK(5, 0)
+#define TB_XDOMAIN_SN_MASK GENMASK(28, 27)
+#define TB_XDOMAIN_SN_SHIFT 27
+
+enum tb_xdp_type {
+ UUID_REQUEST_OLD = 1,
+ UUID_RESPONSE = 2,
+ PROPERTIES_REQUEST,
+ PROPERTIES_RESPONSE,
+ PROPERTIES_CHANGED_REQUEST,
+ PROPERTIES_CHANGED_RESPONSE,
+ ERROR_RESPONSE,
+ UUID_REQUEST = 12,
+};
+
+struct tb_xdp_header {
+ struct tb_xdomain_header xd_hdr;
+ uuid_t uuid;
+ u32 type;
+};
+
+struct tb_xdp_properties {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ uuid_t dst_uuid;
+ u16 offset;
+ u16 reserved;
+};
+
+struct tb_xdp_properties_response {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ uuid_t dst_uuid;
+ u16 offset;
+ u16 data_length;
+ u32 generation;
+ u32 data[0];
+};
+
+/*
+ * Max length of data array single XDomain property response is allowed
+ * to carry.
+ */
+#define TB_XDP_PROPERTIES_MAX_DATA_LENGTH \
+ (((256 - 4 - sizeof(struct tb_xdp_properties_response))) / 4)
+
+/* Maximum size of the total property block in dwords we allow */
+#define TB_XDP_PROPERTIES_MAX_LENGTH 500
+
+struct tb_xdp_properties_changed {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+};
+
+struct tb_xdp_properties_changed_response {
+ struct tb_xdp_header hdr;
+};
+
+enum tb_xdp_error {
+ ERROR_SUCCESS,
+ ERROR_UNKNOWN_PACKET,
+ ERROR_UNKNOWN_DOMAIN,
+ ERROR_NOT_SUPPORTED,
+ ERROR_NOT_READY,
+};
+
+struct tb_xdp_error_response {
+ struct tb_xdp_header hdr;
+ u32 error;
+};
#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 582bd1f156dc..5d94142afda6 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - Port/Switch config area registers
*
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
index ca4475907d7a..0637537ea53f 100644
--- a/drivers/thunderbolt/tunnel_pci.c
+++ b/drivers/thunderbolt/tunnel_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Cactus Ridge driver - PCIe tunnel
*
diff --git a/drivers/thunderbolt/tunnel_pci.h b/drivers/thunderbolt/tunnel_pci.h
index a67f93c140fa..f9b65fa1fd4d 100644
--- a/drivers/thunderbolt/tunnel_pci.h
+++ b/drivers/thunderbolt/tunnel_pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt Cactus Ridge driver - PCIe tunnel
*
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
new file mode 100644
index 000000000000..f25d88d4552b
--- /dev/null
+++ b/drivers/thunderbolt/xdomain.c
@@ -0,0 +1,1570 @@
+/*
+ * Thunderbolt XDomain discovery protocol support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/utsname.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+
+#include "tb.h"
+
+#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
+#define XDOMAIN_PROPERTIES_RETRIES 60
+#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
+
+struct xdomain_request_work {
+ struct work_struct work;
+ struct tb_xdp_header *pkg;
+ struct tb *tb;
+};
+
+/* Serializes access to the properties and protocol handlers below */
+static DEFINE_MUTEX(xdomain_lock);
+
+/* Properties exposed to the remote domains */
+static struct tb_property_dir *xdomain_property_dir;
+static u32 *xdomain_property_block;
+static u32 xdomain_property_block_len;
+static u32 xdomain_property_block_gen;
+
+/* Additional protocol handlers */
+static LIST_HEAD(protocol_handlers);
+
+/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
+static const uuid_t tb_xdp_uuid =
+ UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
+ 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
+
+static bool tb_xdomain_match(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ switch (pkg->frame.eof) {
+ case TB_CFG_PKG_ERROR:
+ return true;
+
+ case TB_CFG_PKG_XDOMAIN_RESP: {
+ const struct tb_xdp_header *res_hdr = pkg->buffer;
+ const struct tb_xdp_header *req_hdr = req->request;
+
+ if (pkg->frame.size < req->response_size / 4)
+ return false;
+
+ /* Make sure route matches */
+ if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
+ req_hdr->xd_hdr.route_hi)
+ return false;
+ if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
+ return false;
+
+ /* Check that the XDomain protocol matches */
+ if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
+ return false;
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+static bool tb_xdomain_copy(struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ memcpy(req->response, pkg->buffer, req->response_size);
+ req->result.err = 0;
+ return true;
+}
+
+static void response_ready(void *data)
+{
+ tb_cfg_request_put(data);
+}
+
+static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
+ size_t size, enum tb_cfg_pkg_type type)
+{
+ struct tb_cfg_request *req;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = tb_xdomain_match;
+ req->copy = tb_xdomain_copy;
+ req->request = response;
+ req->request_size = size;
+ req->request_type = type;
+
+ return tb_cfg_request(ctl, req, response_ready, req);
+}
+
+/**
+ * tb_xdomain_response() - Send a XDomain response message
+ * @xd: XDomain to send the message
+ * @response: Response to send
+ * @size: Size of the response
+ * @type: PDF type of the response
+ *
+ * This can be used to send a XDomain response message to the other
+ * domain. No response for the message is expected.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
+ size_t size, enum tb_cfg_pkg_type type)
+{
+ return __tb_xdomain_response(xd->tb->ctl, response, size, type);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_response);
+
+static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
+ size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
+ size_t response_size, enum tb_cfg_pkg_type response_type,
+ unsigned int timeout_msec)
+{
+ struct tb_cfg_request *req;
+ struct tb_cfg_result res;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = tb_xdomain_match;
+ req->copy = tb_xdomain_copy;
+ req->request = request;
+ req->request_size = request_size;
+ req->request_type = request_type;
+ req->response = response;
+ req->response_size = response_size;
+ req->response_type = response_type;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ return res.err == 1 ? -EIO : res.err;
+}
+
+/**
+ * tb_xdomain_request() - Send a XDomain request
+ * @xd: XDomain to send the request
+ * @request: Request to send
+ * @request_size: Size of the request in bytes
+ * @request_type: PDF type of the request
+ * @response: Response is copied here
+ * @response_size: Expected size of the response in bytes
+ * @response_type: Expected PDF type of the response
+ * @timeout_msec: Timeout in milliseconds to wait for the response
+ *
+ * This function can be used to send XDomain control channel messages to
+ * the other domain. The function waits until the response is received
+ * or when timeout triggers. Whichever comes first.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
+ size_t request_size, enum tb_cfg_pkg_type request_type,
+ void *response, size_t response_size,
+ enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
+{
+ return __tb_xdomain_request(xd->tb->ctl, request, request_size,
+ request_type, response, response_size,
+ response_type, timeout_msec);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_request);
+
+static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
+ u8 sequence, enum tb_xdp_type type, size_t size)
+{
+ u32 length_sn;
+
+ length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
+ length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
+
+ hdr->xd_hdr.route_hi = upper_32_bits(route);
+ hdr->xd_hdr.route_lo = lower_32_bits(route);
+ hdr->xd_hdr.length_sn = length_sn;
+ hdr->type = type;
+ memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
+}
+
+static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
+{
+ const struct tb_xdp_error_response *error;
+
+ if (hdr->type != ERROR_RESPONSE)
+ return 0;
+
+ error = (const struct tb_xdp_error_response *)hdr;
+
+ switch (error->error) {
+ case ERROR_UNKNOWN_PACKET:
+ case ERROR_UNKNOWN_DOMAIN:
+ return -EIO;
+ case ERROR_NOT_SUPPORTED:
+ return -ENOTSUPP;
+ case ERROR_NOT_READY:
+ return -EAGAIN;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
+ enum tb_xdp_error error)
+{
+ struct tb_xdp_error_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
+ sizeof(res));
+ res.error = error;
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
+ const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
+ u32 **block, u32 *generation)
+{
+ struct tb_xdp_properties_response *res;
+ struct tb_xdp_properties req;
+ u16 data_len, len;
+ size_t total_size;
+ u32 *data = NULL;
+ int ret;
+
+ total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
+ res = kzalloc(total_size, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
+ sizeof(req));
+ memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
+ memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
+
+ len = 0;
+ data_len = 0;
+
+ do {
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, res,
+ total_size, TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ goto err;
+
+ ret = tb_xdp_handle_error(&res->hdr);
+ if (ret)
+ goto err;
+
+ /*
+ * Package length includes the whole payload without the
+ * XDomain header. Validate first that the package is at
+ * least size of the response structure.
+ */
+ len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
+ if (len < sizeof(*res) / 4) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ len += sizeof(res->hdr.xd_hdr) / 4;
+ len -= sizeof(*res) / 4;
+
+ if (res->offset != req.offset) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * First time allocate block that has enough space for
+ * the whole properties block.
+ */
+ if (!data) {
+ data_len = res->data_length;
+ if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
+ ret = -E2BIG;
+ goto err;
+ }
+
+ data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ memcpy(data + req.offset, res->data, len * 4);
+ req.offset += len;
+ } while (!data_len || req.offset < data_len);
+
+ *block = data;
+ *generation = res->generation;
+
+ kfree(res);
+
+ return data_len;
+
+err:
+ kfree(data);
+ kfree(res);
+
+ return ret;
+}
+
+static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
+ u64 route, u8 sequence, const uuid_t *src_uuid,
+ const struct tb_xdp_properties *req)
+{
+ struct tb_xdp_properties_response *res;
+ size_t total_size;
+ u16 len;
+ int ret;
+
+ /*
+ * Currently we expect all requests to be directed to us. The
+ * protocol supports forwarding, though which we might add
+ * support later on.
+ */
+ if (!uuid_equal(src_uuid, &req->dst_uuid)) {
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_UNKNOWN_DOMAIN);
+ return 0;
+ }
+
+ mutex_lock(&xdomain_lock);
+
+ if (req->offset >= xdomain_property_block_len) {
+ mutex_unlock(&xdomain_lock);
+ return -EINVAL;
+ }
+
+ len = xdomain_property_block_len - req->offset;
+ len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
+ total_size = sizeof(*res) + len * 4;
+
+ res = kzalloc(total_size, GFP_KERNEL);
+ if (!res) {
+ mutex_unlock(&xdomain_lock);
+ return -ENOMEM;
+ }
+
+ tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
+ total_size);
+ res->generation = xdomain_property_block_gen;
+ res->data_length = xdomain_property_block_len;
+ res->offset = req->offset;
+ uuid_copy(&res->src_uuid, src_uuid);
+ uuid_copy(&res->dst_uuid, &req->src_uuid);
+ memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
+
+ mutex_unlock(&xdomain_lock);
+
+ ret = __tb_xdomain_response(ctl, res, total_size,
+ TB_CFG_PKG_XDOMAIN_RESP);
+
+ kfree(res);
+ return ret;
+}
+
+static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
+ int retry, const uuid_t *uuid)
+{
+ struct tb_xdp_properties_changed_response res;
+ struct tb_xdp_properties_changed req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4,
+ PROPERTIES_CHANGED_REQUEST, sizeof(req));
+ uuid_copy(&req.src_uuid, uuid);
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return tb_xdp_handle_error(&res.hdr);
+}
+
+static int
+tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
+{
+ struct tb_xdp_properties_changed_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence,
+ PROPERTIES_CHANGED_RESPONSE, sizeof(res));
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+/**
+ * tb_register_protocol_handler() - Register protocol handler
+ * @handler: Handler to register
+ *
+ * This allows XDomain service drivers to hook into incoming XDomain
+ * messages. After this function is called the service driver needs to
+ * be able to handle calls to callback whenever a package with the
+ * registered protocol is received.
+ */
+int tb_register_protocol_handler(struct tb_protocol_handler *handler)
+{
+ if (!handler->uuid || !handler->callback)
+ return -EINVAL;
+ if (uuid_equal(handler->uuid, &tb_xdp_uuid))
+ return -EINVAL;
+
+ mutex_lock(&xdomain_lock);
+ list_add_tail(&handler->list, &protocol_handlers);
+ mutex_unlock(&xdomain_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
+
+/**
+ * tb_unregister_protocol_handler() - Unregister protocol handler
+ * @handler: Handler to unregister
+ *
+ * Removes the previously registered protocol handler.
+ */
+void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
+{
+ mutex_lock(&xdomain_lock);
+ list_del_init(&handler->list);
+ mutex_unlock(&xdomain_lock);
+}
+EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
+
+static void tb_xdp_handle_request(struct work_struct *work)
+{
+ struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
+ const struct tb_xdp_header *pkg = xw->pkg;
+ const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
+ struct tb *tb = xw->tb;
+ struct tb_ctl *ctl = tb->ctl;
+ const uuid_t *uuid;
+ int ret = 0;
+ u32 sequence;
+ u64 route;
+
+ route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
+ sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
+ sequence >>= TB_XDOMAIN_SN_SHIFT;
+
+ mutex_lock(&tb->lock);
+ if (tb->root_switch)
+ uuid = tb->root_switch->uuid;
+ else
+ uuid = NULL;
+ mutex_unlock(&tb->lock);
+
+ if (!uuid) {
+ tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
+ goto out;
+ }
+
+ switch (pkg->type) {
+ case PROPERTIES_REQUEST:
+ ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
+ (const struct tb_xdp_properties *)pkg);
+ break;
+
+ case PROPERTIES_CHANGED_REQUEST: {
+ const struct tb_xdp_properties_changed *xchg =
+ (const struct tb_xdp_properties_changed *)pkg;
+ struct tb_xdomain *xd;
+
+ ret = tb_xdp_properties_changed_response(ctl, route, sequence);
+
+ /*
+ * Since the properties have been changed, let's update
+ * the xdomain related to this connection as well in
+ * case there is a change in services it offers.
+ */
+ xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
+ if (xd) {
+ queue_delayed_work(tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(50));
+ tb_xdomain_put(xd);
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (ret) {
+ tb_warn(tb, "failed to send XDomain response for %#x\n",
+ pkg->type);
+ }
+
+out:
+ kfree(xw->pkg);
+ kfree(xw);
+}
+
+static void
+tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
+ size_t size)
+{
+ struct xdomain_request_work *xw;
+
+ xw = kmalloc(sizeof(*xw), GFP_KERNEL);
+ if (!xw)
+ return;
+
+ INIT_WORK(&xw->work, tb_xdp_handle_request);
+ xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
+ xw->tb = tb;
+
+ queue_work(tb->wq, &xw->work);
+}
+
+/**
+ * tb_register_service_driver() - Register XDomain service driver
+ * @drv: Driver to register
+ *
+ * Registers new service driver from @drv to the bus.
+ */
+int tb_register_service_driver(struct tb_service_driver *drv)
+{
+ drv->driver.bus = &tb_bus_type;
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(tb_register_service_driver);
+
+/**
+ * tb_unregister_service_driver() - Unregister XDomain service driver
+ * @xdrv: Driver to unregister
+ *
+ * Unregisters XDomain service driver from the bus.
+ */
+void tb_unregister_service_driver(struct tb_service_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
+
+static ssize_t key_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ /*
+ * It should be null terminated but anything else is pretty much
+ * allowed.
+ */
+ return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
+}
+static DEVICE_ATTR_RO(key);
+
+static int get_modalias(struct tb_service *svc, char *buf, size_t size)
+{
+ return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
+ svc->prtcid, svc->prtcvers, svc->prtcrevs);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ /* Full buffer size except new line and null termination */
+ get_modalias(svc, buf, PAGE_SIZE - 2);
+ return sprintf(buf, "%s\n", buf);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "%u\n", svc->prtcid);
+}
+static DEVICE_ATTR_RO(prtcid);
+
+static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "%u\n", svc->prtcvers);
+}
+static DEVICE_ATTR_RO(prtcvers);
+
+static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "%u\n", svc->prtcrevs);
+}
+static DEVICE_ATTR_RO(prtcrevs);
+
+static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "0x%08x\n", svc->prtcstns);
+}
+static DEVICE_ATTR_RO(prtcstns);
+
+static struct attribute *tb_service_attrs[] = {
+ &dev_attr_key.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_prtcid.attr,
+ &dev_attr_prtcvers.attr,
+ &dev_attr_prtcrevs.attr,
+ &dev_attr_prtcstns.attr,
+ NULL,
+};
+
+static struct attribute_group tb_service_attr_group = {
+ .attrs = tb_service_attrs,
+};
+
+static const struct attribute_group *tb_service_attr_groups[] = {
+ &tb_service_attr_group,
+ NULL,
+};
+
+static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+ char modalias[64];
+
+ get_modalias(svc, modalias, sizeof(modalias));
+ return add_uevent_var(env, "MODALIAS=%s", modalias);
+}
+
+static void tb_service_release(struct device *dev)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+ struct tb_xdomain *xd = tb_service_parent(svc);
+
+ ida_simple_remove(&xd->service_ids, svc->id);
+ kfree(svc->key);
+ kfree(svc);
+}
+
+struct device_type tb_service_type = {
+ .name = "thunderbolt_service",
+ .groups = tb_service_attr_groups,
+ .uevent = tb_service_uevent,
+ .release = tb_service_release,
+};
+EXPORT_SYMBOL_GPL(tb_service_type);
+
+static int remove_missing_service(struct device *dev, void *data)
+{
+ struct tb_xdomain *xd = data;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc)
+ return 0;
+
+ if (!tb_property_find(xd->properties, svc->key,
+ TB_PROPERTY_TYPE_DIRECTORY))
+ device_unregister(dev);
+
+ return 0;
+}
+
+static int find_service(struct device *dev, void *data)
+{
+ const struct tb_property *p = data;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc)
+ return 0;
+
+ return !strcmp(svc->key, p->key);
+}
+
+static int populate_service(struct tb_service *svc,
+ struct tb_property *property)
+{
+ struct tb_property_dir *dir = property->value.dir;
+ struct tb_property *p;
+
+ /* Fill in standard properties */
+ p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcid = p->value.immediate;
+ p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcvers = p->value.immediate;
+ p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcrevs = p->value.immediate;
+ p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcstns = p->value.immediate;
+
+ svc->key = kstrdup(property->key, GFP_KERNEL);
+ if (!svc->key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void enumerate_services(struct tb_xdomain *xd)
+{
+ struct tb_service *svc;
+ struct tb_property *p;
+ struct device *dev;
+
+ /*
+ * First remove all services that are not available anymore in
+ * the updated property block.
+ */
+ device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
+
+ /* Then re-enumerate properties creating new services as we go */
+ tb_property_for_each(xd->properties, p) {
+ if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
+ continue;
+
+ /* If the service exists already we are fine */
+ dev = device_find_child(&xd->dev, p, find_service);
+ if (dev) {
+ put_device(dev);
+ continue;
+ }
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ break;
+
+ if (populate_service(svc, p)) {
+ kfree(svc);
+ break;
+ }
+
+ svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ svc->dev.bus = &tb_bus_type;
+ svc->dev.type = &tb_service_type;
+ svc->dev.parent = &xd->dev;
+ dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
+
+ if (device_register(&svc->dev)) {
+ put_device(&svc->dev);
+ break;
+ }
+ }
+}
+
+static int populate_properties(struct tb_xdomain *xd,
+ struct tb_property_dir *dir)
+{
+ const struct tb_property *p;
+
+ /* Required properties */
+ p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
+ if (!p)
+ return -EINVAL;
+ xd->device = p->value.immediate;
+
+ p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
+ if (!p)
+ return -EINVAL;
+ xd->vendor = p->value.immediate;
+
+ kfree(xd->device_name);
+ xd->device_name = NULL;
+ kfree(xd->vendor_name);
+ xd->vendor_name = NULL;
+
+ /* Optional properties */
+ p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
+ if (p)
+ xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
+ p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
+ if (p)
+ xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
+
+ return 0;
+}
+
+/* Called with @xd->lock held */
+static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
+{
+ if (!xd->resume)
+ return;
+
+ xd->resume = false;
+ if (xd->transmit_path) {
+ dev_dbg(&xd->dev, "re-establishing DMA path\n");
+ tb_domain_approve_xdomain_paths(xd->tb, xd);
+ }
+}
+
+static void tb_xdomain_get_properties(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
+ get_properties_work.work);
+ struct tb_property_dir *dir;
+ struct tb *tb = xd->tb;
+ bool update = false;
+ u32 *block = NULL;
+ u32 gen = 0;
+ int ret;
+
+ ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
+ xd->remote_uuid, xd->properties_retries,
+ &block, &gen);
+ if (ret < 0) {
+ if (xd->properties_retries-- > 0) {
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+ } else {
+ /* Give up now */
+ dev_err(&xd->dev,
+ "failed read XDomain properties from %pUb\n",
+ xd->remote_uuid);
+ }
+ return;
+ }
+
+ xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+
+ mutex_lock(&xd->lock);
+
+ /* Only accept newer generation properties */
+ if (xd->properties && gen <= xd->property_block_gen) {
+ /*
+ * On resume it is likely that the properties block is
+ * not changed (unless the other end added or removed
+ * services). However, we need to make sure the existing
+ * DMA paths are restored properly.
+ */
+ tb_xdomain_restore_paths(xd);
+ goto err_free_block;
+ }
+
+ dir = tb_property_parse_dir(block, ret);
+ if (!dir) {
+ dev_err(&xd->dev, "failed to parse XDomain properties\n");
+ goto err_free_block;
+ }
+
+ ret = populate_properties(xd, dir);
+ if (ret) {
+ dev_err(&xd->dev, "missing XDomain properties in response\n");
+ goto err_free_dir;
+ }
+
+ /* Release the existing one */
+ if (xd->properties) {
+ tb_property_free_dir(xd->properties);
+ update = true;
+ }
+
+ xd->properties = dir;
+ xd->property_block_gen = gen;
+
+ tb_xdomain_restore_paths(xd);
+
+ mutex_unlock(&xd->lock);
+
+ kfree(block);
+
+ /*
+ * Now the device should be ready enough so we can add it to the
+ * bus and let userspace know about it. If the device is already
+ * registered, we notify the userspace that it has changed.
+ */
+ if (!update) {
+ if (device_add(&xd->dev)) {
+ dev_err(&xd->dev, "failed to add XDomain device\n");
+ return;
+ }
+ } else {
+ kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
+ }
+
+ enumerate_services(xd);
+ return;
+
+err_free_dir:
+ tb_property_free_dir(dir);
+err_free_block:
+ kfree(block);
+ mutex_unlock(&xd->lock);
+}
+
+static void tb_xdomain_properties_changed(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
+ properties_changed_work.work);
+ int ret;
+
+ ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
+ xd->properties_changed_retries, xd->local_uuid);
+ if (ret) {
+ if (xd->properties_changed_retries-- > 0)
+ queue_delayed_work(xd->tb->wq,
+ &xd->properties_changed_work,
+ msecs_to_jiffies(1000));
+ return;
+ }
+
+ xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+}
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sprintf(buf, "%#x\n", xd->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t
+device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ int ret;
+
+ if (mutex_lock_interruptible(&xd->lock))
+ return -ERESTARTSYS;
+ ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(device_name);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sprintf(buf, "%#x\n", xd->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t
+vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ int ret;
+
+ if (mutex_lock_interruptible(&xd->lock))
+ return -ERESTARTSYS;
+ ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(vendor_name);
+
+static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sprintf(buf, "%pUb\n", xd->remote_uuid);
+}
+static DEVICE_ATTR_RO(unique_id);
+
+static struct attribute *xdomain_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_device_name.attr,
+ &dev_attr_unique_id.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_vendor_name.attr,
+ NULL,
+};
+
+static struct attribute_group xdomain_attr_group = {
+ .attrs = xdomain_attrs,
+};
+
+static const struct attribute_group *xdomain_attr_groups[] = {
+ &xdomain_attr_group,
+ NULL,
+};
+
+static void tb_xdomain_release(struct device *dev)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ put_device(xd->dev.parent);
+
+ tb_property_free_dir(xd->properties);
+ ida_destroy(&xd->service_ids);
+
+ kfree(xd->local_uuid);
+ kfree(xd->remote_uuid);
+ kfree(xd->device_name);
+ kfree(xd->vendor_name);
+ kfree(xd);
+}
+
+static void start_handshake(struct tb_xdomain *xd)
+{
+ xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+ xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+
+ /* Start exchanging properties with the other host */
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(100));
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+}
+
+static void stop_handshake(struct tb_xdomain *xd)
+{
+ xd->properties_retries = 0;
+ xd->properties_changed_retries = 0;
+
+ cancel_delayed_work_sync(&xd->get_properties_work);
+ cancel_delayed_work_sync(&xd->properties_changed_work);
+}
+
+static int __maybe_unused tb_xdomain_suspend(struct device *dev)
+{
+ stop_handshake(tb_to_xdomain(dev));
+ return 0;
+}
+
+static int __maybe_unused tb_xdomain_resume(struct device *dev)
+{
+ struct tb_xdomain *xd = tb_to_xdomain(dev);
+
+ /*
+ * Ask tb_xdomain_get_properties() restore any existing DMA
+ * paths after properties are re-read.
+ */
+ xd->resume = true;
+ start_handshake(xd);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tb_xdomain_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
+};
+
+struct device_type tb_xdomain_type = {
+ .name = "thunderbolt_xdomain",
+ .release = tb_xdomain_release,
+ .pm = &tb_xdomain_pm_ops,
+};
+EXPORT_SYMBOL_GPL(tb_xdomain_type);
+
+/**
+ * tb_xdomain_alloc() - Allocate new XDomain object
+ * @tb: Domain where the XDomain belongs
+ * @parent: Parent device (the switch through the connection to the
+ * other domain is reached).
+ * @route: Route string used to reach the other domain
+ * @local_uuid: Our local domain UUID
+ * @remote_uuid: UUID of the other domain
+ *
+ * Allocates new XDomain structure and returns pointer to that. The
+ * object must be released by calling tb_xdomain_put().
+ */
+struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
+ u64 route, const uuid_t *local_uuid,
+ const uuid_t *remote_uuid)
+{
+ struct tb_xdomain *xd;
+
+ xd = kzalloc(sizeof(*xd), GFP_KERNEL);
+ if (!xd)
+ return NULL;
+
+ xd->tb = tb;
+ xd->route = route;
+ ida_init(&xd->service_ids);
+ mutex_init(&xd->lock);
+ INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
+ INIT_DELAYED_WORK(&xd->properties_changed_work,
+ tb_xdomain_properties_changed);
+
+ xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
+ if (!xd->local_uuid)
+ goto err_free;
+
+ xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
+ if (!xd->remote_uuid)
+ goto err_free_local_uuid;
+
+ device_initialize(&xd->dev);
+ xd->dev.parent = get_device(parent);
+ xd->dev.bus = &tb_bus_type;
+ xd->dev.type = &tb_xdomain_type;
+ xd->dev.groups = xdomain_attr_groups;
+ dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
+
+ return xd;
+
+err_free_local_uuid:
+ kfree(xd->local_uuid);
+err_free:
+ kfree(xd);
+
+ return NULL;
+}
+
+/**
+ * tb_xdomain_add() - Add XDomain to the bus
+ * @xd: XDomain to add
+ *
+ * This function starts XDomain discovery protocol handshake and
+ * eventually adds the XDomain to the bus. After calling this function
+ * the caller needs to call tb_xdomain_remove() in order to remove and
+ * release the object regardless whether the handshake succeeded or not.
+ */
+void tb_xdomain_add(struct tb_xdomain *xd)
+{
+ /* Start exchanging properties with the other host */
+ start_handshake(xd);
+}
+
+static int unregister_service(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+/**
+ * tb_xdomain_remove() - Remove XDomain from the bus
+ * @xd: XDomain to remove
+ *
+ * This will stop all ongoing configuration work and remove the XDomain
+ * along with any services from the bus. When the last reference to @xd
+ * is released the object will be released as well.
+ */
+void tb_xdomain_remove(struct tb_xdomain *xd)
+{
+ stop_handshake(xd);
+
+ device_for_each_child_reverse(&xd->dev, xd, unregister_service);
+
+ if (!device_is_registered(&xd->dev))
+ put_device(&xd->dev);
+ else
+ device_unregister(&xd->dev);
+}
+
+/**
+ * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ * @transmit_path: HopID of the transmit path the other end is using to
+ * send packets
+ * @transmit_ring: DMA ring used to receive packets from the other end
+ * @receive_path: HopID of the receive path the other end is using to
+ * receive packets
+ * @receive_ring: DMA ring used to send packets to the other end
+ *
+ * The function enables DMA paths accordingly so that after successful
+ * return the caller can send and receive packets using high-speed DMA
+ * path.
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
+ u16 transmit_ring, u16 receive_path,
+ u16 receive_ring)
+{
+ int ret;
+
+ mutex_lock(&xd->lock);
+
+ if (xd->transmit_path) {
+ ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
+ goto exit_unlock;
+ }
+
+ xd->transmit_path = transmit_path;
+ xd->transmit_ring = transmit_ring;
+ xd->receive_path = receive_path;
+ xd->receive_ring = receive_ring;
+
+ ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
+
+exit_unlock:
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
+
+/**
+ * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ *
+ * This does the opposite of tb_xdomain_enable_paths(). After call to
+ * this the caller is not expected to use the rings anymore.
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_xdomain_disable_paths(struct tb_xdomain *xd)
+{
+ int ret = 0;
+
+ mutex_lock(&xd->lock);
+ if (xd->transmit_path) {
+ xd->transmit_path = 0;
+ xd->transmit_ring = 0;
+ xd->receive_path = 0;
+ xd->receive_ring = 0;
+
+ ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
+ }
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
+
+struct tb_xdomain_lookup {
+ const uuid_t *uuid;
+ u8 link;
+ u8 depth;
+};
+
+static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
+ const struct tb_xdomain_lookup *lookup)
+{
+ int i;
+
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ struct tb_port *port = &sw->ports[i];
+ struct tb_xdomain *xd;
+
+ if (tb_is_upstream_port(port))
+ continue;
+
+ if (port->xdomain) {
+ xd = port->xdomain;
+
+ if (lookup->uuid) {
+ if (uuid_equal(xd->remote_uuid, lookup->uuid))
+ return xd;
+ } else if (lookup->link == xd->link &&
+ lookup->depth == xd->depth) {
+ return xd;
+ }
+ } else if (port->remote) {
+ xd = switch_find_xdomain(port->remote->sw, lookup);
+ if (xd)
+ return xd;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
+ * @tb: Domain where the XDomain belongs to
+ * @uuid: UUID to look for
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
+{
+ struct tb_xdomain_lookup lookup;
+ struct tb_xdomain *xd;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.uuid = uuid;
+
+ xd = switch_find_xdomain(tb->root_switch, &lookup);
+ if (xd) {
+ get_device(&xd->dev);
+ return xd;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
+
+/**
+ * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
+ * @tb: Domain where the XDomain belongs to
+ * @link: Root switch link number
+ * @depth: Depth in the link
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
+ u8 depth)
+{
+ struct tb_xdomain_lookup lookup;
+ struct tb_xdomain *xd;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.link = link;
+ lookup.depth = depth;
+
+ xd = switch_find_xdomain(tb->root_switch, &lookup);
+ if (xd) {
+ get_device(&xd->dev);
+ return xd;
+ }
+
+ return NULL;
+}
+
+bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
+{
+ const struct tb_protocol_handler *handler, *tmp;
+ const struct tb_xdp_header *hdr = buf;
+ unsigned int length;
+ int ret = 0;
+
+ /* We expect the packet is at least size of the header */
+ length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
+ if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
+ return true;
+ if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
+ return true;
+
+ /*
+ * Handle XDomain discovery protocol packets directly here. For
+ * other protocols (based on their UUID) we call registered
+ * handlers in turn.
+ */
+ if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
+ if (type == TB_CFG_PKG_XDOMAIN_REQ) {
+ tb_xdp_schedule_request(tb, hdr, size);
+ return true;
+ }
+ return false;
+ }
+
+ mutex_lock(&xdomain_lock);
+ list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
+ if (!uuid_equal(&hdr->uuid, handler->uuid))
+ continue;
+
+ mutex_unlock(&xdomain_lock);
+ ret = handler->callback(buf, size, handler->data);
+ mutex_lock(&xdomain_lock);
+
+ if (ret)
+ break;
+ }
+ mutex_unlock(&xdomain_lock);
+
+ return ret > 0;
+}
+
+static int rebuild_property_block(void)
+{
+ u32 *block, len;
+ int ret;
+
+ ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ len = ret;
+
+ block = kcalloc(len, sizeof(u32), GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ ret = tb_property_format_dir(xdomain_property_dir, block, len);
+ if (ret) {
+ kfree(block);
+ return ret;
+ }
+
+ kfree(xdomain_property_block);
+ xdomain_property_block = block;
+ xdomain_property_block_len = len;
+ xdomain_property_block_gen++;
+
+ return 0;
+}
+
+static int update_xdomain(struct device *dev, void *data)
+{
+ struct tb_xdomain *xd;
+
+ xd = tb_to_xdomain(dev);
+ if (xd) {
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(50));
+ }
+
+ return 0;
+}
+
+static void update_all_xdomains(void)
+{
+ bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
+}
+
+static bool remove_directory(const char *key, const struct tb_property_dir *dir)
+{
+ struct tb_property *p;
+
+ p = tb_property_find(xdomain_property_dir, key,
+ TB_PROPERTY_TYPE_DIRECTORY);
+ if (p && p->value.dir == dir) {
+ tb_property_remove(p);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tb_register_property_dir() - Register property directory to the host
+ * @key: Key (name) of the directory to add
+ * @dir: Directory to add
+ *
+ * Service drivers can use this function to add new property directory
+ * to the host available properties. The other connected hosts are
+ * notified so they can re-read properties of this host if they are
+ * interested.
+ *
+ * Return: %0 on success and negative errno on failure
+ */
+int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
+{
+ int ret;
+
+ if (WARN_ON(!xdomain_property_dir))
+ return -EAGAIN;
+
+ if (!key || strlen(key) > 8)
+ return -EINVAL;
+
+ mutex_lock(&xdomain_lock);
+ if (tb_property_find(xdomain_property_dir, key,
+ TB_PROPERTY_TYPE_DIRECTORY)) {
+ ret = -EEXIST;
+ goto err_unlock;
+ }
+
+ ret = tb_property_add_dir(xdomain_property_dir, key, dir);
+ if (ret)
+ goto err_unlock;
+
+ ret = rebuild_property_block();
+ if (ret) {
+ remove_directory(key, dir);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&xdomain_lock);
+ update_all_xdomains();
+ return 0;
+
+err_unlock:
+ mutex_unlock(&xdomain_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tb_register_property_dir);
+
+/**
+ * tb_unregister_property_dir() - Removes property directory from host
+ * @key: Key (name) of the directory
+ * @dir: Directory to remove
+ *
+ * This will remove the existing directory from this host and notify the
+ * connected hosts about the change.
+ */
+void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
+{
+ int ret = 0;
+
+ mutex_lock(&xdomain_lock);
+ if (remove_directory(key, dir))
+ ret = rebuild_property_block();
+ mutex_unlock(&xdomain_lock);
+
+ if (!ret)
+ update_all_xdomains();
+}
+EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
+
+int tb_xdomain_init(void)
+{
+ int ret;
+
+ xdomain_property_dir = tb_property_create_dir(NULL);
+ if (!xdomain_property_dir)
+ return -ENOMEM;
+
+ /*
+ * Initialize standard set of properties without any service
+ * directories. Those will be added by service drivers
+ * themselves when they are loaded.
+ */
+ tb_property_add_immediate(xdomain_property_dir, "vendorid",
+ PCI_VENDOR_ID_INTEL);
+ tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
+ tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
+ tb_property_add_text(xdomain_property_dir, "deviceid",
+ utsname()->nodename);
+ tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
+
+ ret = rebuild_property_block();
+ if (ret) {
+ tb_property_free_dir(xdomain_property_dir);
+ xdomain_property_dir = NULL;
+ }
+
+ return ret;
+}
+
+void tb_xdomain_exit(void)
+{
+ kfree(xdomain_property_block);
+ tb_property_free_dir(xdomain_property_dir);
+}
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 16330a819685..8ce3a8661b31 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
tty_buffer.o tty_port.o tty_mutex.o \
tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 9820e20993db..32d7ce430b02 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial driver for the amiga builtin port.
*
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index ce24182f8514..c369bf27a67b 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* TTY over Blackfin JTAG Communication
*
* Copyright 2008-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#define DRV_NAME "bfin-jtag-comm"
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index d272bc4e7fb5..cf0bde3bb927 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#undef BLOCKMOVE
#define Z_WAKE
#undef Z_EXT_CHARS_IN_BUFFER
@@ -278,16 +279,15 @@ static unsigned detect_isa_irq(void __iomem *);
#endif /* CONFIG_ISA */
#ifndef CONFIG_CYZ_INTR
-static void cyz_poll(unsigned long);
+static void cyz_poll(struct timer_list *);
/* The Cyclades-Z polling cycle is defined by this variable */
static long cyz_polling_cycle = CZ_DEF_POLL;
-static DEFINE_TIMER(cyz_timerlist, cyz_poll, 0, 0);
+static DEFINE_TIMER(cyz_timerlist, cyz_poll);
#else /* CONFIG_CYZ_INTR */
-static void cyz_rx_restart(unsigned long);
-static struct timer_list cyz_rx_full_timer[NR_PORTS];
+static void cyz_rx_restart(struct timer_list *);
#endif /* CONFIG_CYZ_INTR */
static void cyy_writeb(struct cyclades_port *port, u32 reg, u8 val)
@@ -992,10 +992,8 @@ static void cyz_handle_rx(struct cyclades_port *info)
else
char_count = rx_put - rx_get + rx_bufsize;
if (char_count >= readl(&buf_ctrl->rx_threshold) &&
- !timer_pending(&cyz_rx_full_timer[
- info->line]))
- mod_timer(&cyz_rx_full_timer[info->line],
- jiffies + 1);
+ !timer_pending(&info->rx_full_timer))
+ mod_timer(&info->rx_full_timer, jiffies + 1);
#endif
info->idle_stats.recv_idle = jiffies;
tty_schedule_flip(&info->port);
@@ -1197,9 +1195,9 @@ static irqreturn_t cyz_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
} /* cyz_interrupt */
-static void cyz_rx_restart(unsigned long arg)
+static void cyz_rx_restart(struct timer_list *t)
{
- struct cyclades_port *info = (struct cyclades_port *)arg;
+ struct cyclades_port *info = from_timer(info, t, rx_full_timer);
struct cyclades_card *card = info->card;
int retval;
__u32 channel = info->line - card->first_line;
@@ -1216,7 +1214,7 @@ static void cyz_rx_restart(unsigned long arg)
#else /* CONFIG_CYZ_INTR */
-static void cyz_poll(unsigned long arg)
+static void cyz_poll(struct timer_list *unused)
{
struct cyclades_card *cinfo;
struct cyclades_port *info;
@@ -3097,8 +3095,7 @@ static int cy_init_card(struct cyclades_card *cinfo)
else
info->xmit_fifo_size = 4 * CYZ_FIFO_SIZE;
#ifdef CONFIG_CYZ_INTR
- setup_timer(&cyz_rx_full_timer[port],
- cyz_rx_restart, (unsigned long)info);
+ timer_setup(&info->rx_full_timer, cyz_rx_restart, 0);
#endif
} else {
unsigned short chip_number;
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index a1c7125cb968..47ac56817c43 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/* ePAPR hypervisor byte channel device driver
*
* Copyright 2009-2011 Freescale Semiconductor, Inc.
*
* Author: Timur Tabi <timur@freescale.com>
*
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
* This driver support three distinct interfaces, all of which are related to
* ePAPR hypervisor byte channels.
*
@@ -328,7 +325,7 @@ console_initcall(ehv_bc_console_init);
/******************************** TTY DRIVER ********************************/
/*
- * byte channel receive interupt handler
+ * byte channel receive interrupt handler
*
* This ISR is called whenever data is available on a byte channel.
*/
@@ -428,7 +425,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc)
}
/*
- * byte channel transmit interupt handler
+ * byte channel transmit interrupt handler
*
* This ISR is called whenever space becomes available for transmitting
* characters on a byte channel.
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 381e981dee06..7f657bb5113c 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2017 Imagination Technologies Ltd.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
index 6a2702be76d1..0b02ec7f1dfd 100644
--- a/drivers/tty/hvc/Makefile
+++ b/drivers/tty/hvc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi_lib.o
obj-$(CONFIG_HVC_OPAL) += hvc_opal.o hvsi_lib.o
obj-$(CONFIG_HVC_OLD_HVSI) += hvsi.o
diff --git a/drivers/tty/hvc/hvc_bfin_jtag.c b/drivers/tty/hvc/hvc_bfin_jtag.c
index 31d6cc6a77af..dd7cae4c195b 100644
--- a/drivers/tty/hvc/hvc_bfin_jtag.c
+++ b/drivers/tty/hvc/hvc_bfin_jtag.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Console via Blackfin JTAG Communication
*
* Copyright 2008-2011 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index a8d399188242..7709fcc707f4 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
* Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
@@ -6,20 +7,6 @@
*
* Additional Author(s):
* Ryan S. Arnold <rsa@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 798c48d0d32c..ea63090e013f 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* hvc_console.h
* Copyright (C) 2005 IBM Corporation
@@ -8,20 +9,6 @@
* hvc_console header information:
* moved here from arch/powerpc/include/asm/hvconsole.h
* and drivers/char/hvc_console.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef HVC_CONSOLE_H
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 82f240fb98f0..02629a1f193d 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,14 +1,5 @@
-/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved. */
#include <linux/init.h>
diff --git a/drivers/tty/hvc/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
index bc7a96874637..4b255dfef2cc 100644
--- a/drivers/tty/hvc/hvc_irq.c
+++ b/drivers/tty/hvc/hvc_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001,2008
*
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index 8b70a1627356..a74680729825 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* z/VM IUCV hypervisor console (HVC) device driver
*
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 16331a90c1e8..2ed07ca6389e 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* opal driver interface to hvc_console.c
*
* Copyright 2011 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#undef DEBUG
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 08c87920b74a..e8b8c645482b 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* IBM RTAS driver interface to hvc_console.c
*
@@ -11,20 +12,6 @@
*
* inspired by drivers/char/hvc_console.c
* written by Anton Blanchard and Paul Mackerras
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
index 9da1e842bbe9..b517c0661abb 100644
--- a/drivers/tty/hvc/hvc_tile.c
+++ b/drivers/tty/hvc/hvc_tile.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
* Tilera TILE Processor hypervisor console
*/
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 9cf573d06a29..a4c9913f76a0 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* udbg interface to hvc_console.c
*
* (C) Copyright David Gibson, IBM Corporation 2008.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index a1d272ac82bb..59eaa620bf13 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* vio driver interface to hvc_console.c
*
@@ -14,20 +15,6 @@
* Additional Author(s):
* Ryan S. Arnold <rsa@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* TODO:
*
* - handle error in sending hvsi protocol packets
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 5e87e4866bcb..dc43fa96c3de 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* xen console driver interface to hvc_console.c
*
* (c) 2007 Gerd Hoffmann <kraxel@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 63c29fe9d21f..1db1d97e72e7 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* IBM eServer Hypervisor Virtual Console Server Device Driver
* Copyright (C) 2003, 2004 IBM Corp.
* Ryan S. Arnold (rsa@us.ibm.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Author(s) : Ryan S. Arnold <rsa@us.ibm.com>
*
* This is the device driver for the IBM Hypervisor Virtual Console Server,
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 2e578d6433af..66f95f758be0 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004 Hollis Blanchard <hollisb@us.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* Host Virtual Serial Interface (HVSI) is a protocol between the hosted OS
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index a270f04588d7..09289c8154ae 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index df0204b6148f..b0baa4ce10f9 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IPWireless 3G PCMCIA Network Driver
*
@@ -32,7 +33,7 @@ static void handle_received_SETUP_packet(struct ipw_hardware *ipw,
unsigned int address,
const unsigned char *data, int len,
int is_last);
-static void ipwireless_setup_timer(unsigned long data);
+static void ipwireless_setup_timer(struct timer_list *t);
static void handle_received_CTRL_packet(struct ipw_hardware *hw,
unsigned int channel_idx, const unsigned char *data, int len);
@@ -1634,8 +1635,7 @@ struct ipw_hardware *ipwireless_hardware_create(void)
spin_lock_init(&hw->lock);
tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
INIT_WORK(&hw->work_rx, ipw_receive_data_work);
- setup_timer(&hw->setup_timer, ipwireless_setup_timer,
- (unsigned long) hw);
+ timer_setup(&hw->setup_timer, ipwireless_setup_timer, 0);
return hw;
}
@@ -1669,12 +1669,12 @@ void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
hw->init_loops = 0;
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": waiting for card to start up...\n");
- ipwireless_setup_timer((unsigned long) hw);
+ ipwireless_setup_timer(&hw->setup_timer);
}
-static void ipwireless_setup_timer(unsigned long data)
+static void ipwireless_setup_timer(struct timer_list *t)
{
- struct ipw_hardware *hw = (struct ipw_hardware *) data;
+ struct ipw_hardware *hw = from_timer(hw, t, setup_timer);
hw->init_loops++;
diff --git a/drivers/tty/ipwireless/hardware.h b/drivers/tty/ipwireless/hardware.h
index 90a8590e43b0..e524a8fcc2ad 100644
--- a/drivers/tty/ipwireless/hardware.h
+++ b/drivers/tty/ipwireless/hardware.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* IPWireless 3G PCMCIA Network Driver
*
diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
index 655c7948261c..3475e841ef5c 100644
--- a/drivers/tty/ipwireless/main.c
+++ b/drivers/tty/ipwireless/main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IPWireless 3G PCMCIA Network Driver
*
diff --git a/drivers/tty/ipwireless/main.h b/drivers/tty/ipwireless/main.h
index f2cbb116bccb..73818bb64416 100644
--- a/drivers/tty/ipwireless/main.h
+++ b/drivers/tty/ipwireless/main.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* IPWireless 3G PCMCIA Network Driver
*
diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
index c2f9a3263b37..695439c03147 100644
--- a/drivers/tty/ipwireless/network.c
+++ b/drivers/tty/ipwireless/network.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IPWireless 3G PCMCIA Network Driver
*
diff --git a/drivers/tty/ipwireless/network.h b/drivers/tty/ipwireless/network.h
index 561f765b3334..784932a59a73 100644
--- a/drivers/tty/ipwireless/network.h
+++ b/drivers/tty/ipwireless/network.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* IPWireless 3G PCMCIA Network Driver
*
diff --git a/drivers/tty/ipwireless/setup_protocol.h b/drivers/tty/ipwireless/setup_protocol.h
index 002c34e72521..d4a7ae257ca5 100644
--- a/drivers/tty/ipwireless/setup_protocol.h
+++ b/drivers/tty/ipwireless/setup_protocol.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* IPWireless 3G PCMCIA Network Driver
*
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 2685d59d2724..1ef751c27ac6 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IPWireless 3G PCMCIA Network Driver
*
diff --git a/drivers/tty/ipwireless/tty.h b/drivers/tty/ipwireless/tty.h
index 747b2d637860..ec698d9f338b 100644
--- a/drivers/tty/ipwireless/tty.h
+++ b/drivers/tty/ipwireless/tty.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* IPWireless 3G PCMCIA Network Driver
*
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 61ecdd6b2fc2..015686ff4825 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -1,9 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Original driver code supplied by Multi-Tech
*
* Changes
@@ -174,10 +170,10 @@ static struct pci_driver isicom_driver = {
static int prev_card = 3; /* start servicing isi_card[0] */
static struct tty_driver *isicom_normal;
-static void isicom_tx(unsigned long _data);
+static void isicom_tx(struct timer_list *unused);
static void isicom_start(struct tty_struct *tty);
-static DEFINE_TIMER(tx, isicom_tx, 0, 0);
+static DEFINE_TIMER(tx, isicom_tx);
/* baud index mappings from linux defns to isi */
@@ -398,7 +394,7 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
* will do the rest of the work for us.
*/
-static void isicom_tx(unsigned long _data)
+static void isicom_tx(struct timer_list *unused)
{
unsigned long flags, base;
unsigned int retries;
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
index 25ccef2fe748..99eaed4b2dbc 100644
--- a/drivers/tty/metag_da.c
+++ b/drivers/tty/metag_da.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dashtty.c - tty driver for Dash channels interface.
*
* Copyright (C) 2007,2008,2012 Imagination Technologies
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
*/
#include <linux/atomic.h>
@@ -309,7 +305,7 @@ static int put_data(void *arg)
/*
* This gets called every DA_TTY_POLL and polls the channels for data
*/
-static void dashtty_timer(unsigned long ignored)
+static void dashtty_timer(struct timer_list *poll_timer)
{
int channel;
@@ -323,12 +319,12 @@ static void dashtty_timer(unsigned long ignored)
if (channel >= 0)
fetch_data(channel);
- mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
+ mod_timer(poll_timer, jiffies + DA_TTY_POLL);
}
static void add_poll_timer(struct timer_list *poll_timer)
{
- setup_pinned_timer(poll_timer, dashtty_timer, 0);
+ timer_setup(poll_timer, dashtty_timer, TIMER_PINNED);
poll_timer->expires = jiffies + DA_TTY_POLL;
/*
@@ -461,7 +457,7 @@ static void dashtty_hangup(struct tty_struct *tty)
* buffers. It is used to delay the expensive writeout until the writer has
* stopped writing.
*/
-static void dashtty_put_timer(unsigned long ignored)
+static void dashtty_put_timer(struct timer_list *unused)
{
if (atomic_read(&dashtty_xmit_cnt))
wake_up_interruptible(&dashtty_waitqueue);
@@ -603,7 +599,7 @@ static int __init dashtty_init(void)
complete(&dport->xmit_empty);
}
- setup_timer(&put_timer, dashtty_put_timer, 0);
+ timer_setup(&put_timer, dashtty_put_timer, 0);
init_waitqueue_head(&dashtty_waitqueue);
dashtty_thread = kthread_create(put_data, NULL, "ttyDA");
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index a2dab3fb8751..4c1cd49ae95b 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TTY driver for MIPS EJTAG Fast Debug Channels.
*
* Copyright (C) 2007-2015 Imagination Technologies Ltd
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for more
- * details.
*/
#include <linux/atomic.h>
@@ -683,9 +680,9 @@ static irqreturn_t mips_ejtag_fdc_isr(int irq, void *dev_id)
* It simply triggers the common FDC handler code and arranges for further
* polling.
*/
-static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
+static void mips_ejtag_fdc_tty_timer(struct timer_list *t)
{
- struct mips_ejtag_fdc_tty *priv = (void *)opaque;
+ struct mips_ejtag_fdc_tty *priv = from_timer(priv, t, poll_timer);
mips_ejtag_fdc_handle(priv);
if (!priv->removing)
@@ -1002,8 +999,8 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
raw_spin_unlock_irq(&priv->lock);
} else {
/* If we didn't get an usable IRQ, poll instead */
- setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
- (unsigned long)priv);
+ timer_setup(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+ TIMER_PINNED);
priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
/*
* Always attach the timer to the right CPU. The channels are
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 7f3d4cb0341b..68cbc03aab4b 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************/
/*
* moxa.c -- MOXA Intellio family multiport serial driver.
@@ -7,11 +8,6 @@
*
* This code is loosely based on the Linux serial driver, written by
* Linus Torvalds, Theodore T'so and others.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
@@ -202,7 +198,7 @@ static void moxa_hangup(struct tty_struct *);
static int moxa_tiocmget(struct tty_struct *tty);
static int moxa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static void moxa_poll(unsigned long);
+static void moxa_poll(struct timer_list *);
static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
static void moxa_shutdown(struct tty_port *);
static int moxa_carrier_raised(struct tty_port *);
@@ -428,7 +424,7 @@ static const struct tty_port_operations moxa_port_ops = {
};
static struct tty_driver *moxaDriver;
-static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0);
+static DEFINE_TIMER(moxaTimer, moxa_poll);
/*
* HW init
@@ -1433,7 +1429,7 @@ put:
return 0;
}
-static void moxa_poll(unsigned long ignored)
+static void moxa_poll(struct timer_list *unused)
{
struct moxa_board_conf *brd;
u16 __iomem *ip;
diff --git a/drivers/tty/moxa.h b/drivers/tty/moxa.h
index 87d16ce57be7..8ce89fd36c7b 100644
--- a/drivers/tty/moxa.h
+++ b/drivers/tty/moxa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef MOXA_H_FILE
#define MOXA_H_FILE
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 7dd38047ba23..8bc15cb67a58 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mxser.c -- MOXA Smartio/Industio family multiport serial driver.
*
@@ -8,11 +9,6 @@
* Linux serial driver, written by Linus Torvalds, Theodore T'so and
* others.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
* <alan@lxorguk.ukuu.org.uk>. The original 1.8 code is available on
* www.moxa.com.
@@ -642,8 +638,7 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
* This routine is called to set the UART divisor registers to match
* the specified baud rate for a serial port.
*/
-static int mxser_change_speed(struct tty_struct *tty,
- struct ktermios *old_termios)
+static int mxser_change_speed(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
unsigned cflag, cval, fcr;
@@ -945,7 +940,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
/*
* and set the speed of the serial port
*/
- mxser_change_speed(tty, NULL);
+ mxser_change_speed(tty);
spin_unlock_irqrestore(&info->slock, flags);
return 0;
@@ -1288,7 +1283,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
if (tty_port_initialized(port)) {
if (flags != (port->flags & ASYNC_SPD_MASK)) {
spin_lock_irqsave(&info->slock, sl_flags);
- mxser_change_speed(tty, NULL);
+ mxser_change_speed(tty);
spin_unlock_irqrestore(&info->slock, sl_flags);
}
} else {
@@ -1946,7 +1941,7 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
- mxser_change_speed(tty, old_termios);
+ mxser_change_speed(tty);
spin_unlock_irqrestore(&info->slock, flags);
if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
@@ -2375,8 +2370,7 @@ static void mxser_release_ISA_res(struct mxser_board *brd)
mxser_release_vector(brd);
}
-static int mxser_initbrd(struct mxser_board *brd,
- struct pci_dev *pdev)
+static int mxser_initbrd(struct mxser_board *brd)
{
struct mxser_port *info;
unsigned int i;
@@ -2640,7 +2634,7 @@ static int mxser_probe(struct pci_dev *pdev,
}
/* mxser_initbrd will hook ISR. */
- retval = mxser_initbrd(brd, pdev);
+ retval = mxser_initbrd(brd);
if (retval)
goto err_rel3;
@@ -2746,7 +2740,7 @@ static int __init mxser_module_init(void)
brd->info->name, ioaddr[b]);
/* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(brd, NULL) < 0) {
+ if (mxser_initbrd(brd) < 0) {
mxser_release_ISA_res(brd);
brd->info = NULL;
continue;
diff --git a/drivers/tty/mxser.h b/drivers/tty/mxser.h
index 0bf794313ffd..e6cb15626567 100644
--- a/drivers/tty/mxser.h
+++ b/drivers/tty/mxser.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MXSER_H
#define _MXSER_H
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 0a3c9665e015..5131bdc9e765 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* n_gsm.c GSM 0710 tty multiplexor
* Copyright (c) 2009/10 Intel Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
*
* TO DO:
@@ -1322,9 +1310,9 @@ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
* gsm->pending_cmd will be NULL and we just let the timer expire.
*/
-static void gsm_control_retransmit(unsigned long data)
+static void gsm_control_retransmit(struct timer_list *t)
{
- struct gsm_mux *gsm = (struct gsm_mux *)data;
+ struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
struct gsm_control *ctrl;
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
@@ -1465,9 +1453,9 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
* end will get a DM response)
*/
-static void gsm_dlci_t1(unsigned long data)
+static void gsm_dlci_t1(struct timer_list *t)
{
- struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+ struct gsm_dlci *dlci = from_timer(dlci, t, t1);
struct gsm_mux *gsm = dlci->gsm;
switch (dlci->state) {
@@ -1646,9 +1634,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
}
skb_queue_head_init(&dlci->skb_list);
- init_timer(&dlci->t1);
- dlci->t1.function = gsm_dlci_t1;
- dlci->t1.data = (unsigned long)dlci;
+ timer_setup(&dlci->t1, gsm_dlci_t1, 0);
tty_port_init(&dlci->port);
dlci->port.ops = &gsm_port_ops;
dlci->gsm = gsm;
@@ -2102,7 +2088,7 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
struct gsm_dlci *dlci;
int i = 0;
- setup_timer(&gsm->t2_timer, gsm_control_retransmit, (unsigned long)gsm);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
spin_lock_init(&gsm->tx_lock);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 7b2a466616d6..eea7b6cb3cc4 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/* generic HDLC line discipline for Linux
*
* Written by Paul Fulghum paulkf@microgate.com
@@ -11,8 +12,6 @@
*
* Original release 01/11/99
*
- * This code is released under the GNU General Public License (GPL)
- *
* This module implements the tty line discipline N_HDLC for use with
* tty device drivers that support bit-synchronous HDLC communications.
*
diff --git a/drivers/tty/n_null.c b/drivers/tty/n_null.c
index d63261c36e42..96feabae4740 100644
--- a/drivers/tty/n_null.c
+++ b/drivers/tty/n_null.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/tty.h>
@@ -7,19 +8,6 @@
* n_null.c - Null line discipline used in the failure path
*
* Copyright (C) Intel 2017
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
static int n_null_open(struct tty_struct *tty)
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 305b6490d405..30bb0900cd2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/* r3964 linediscipline for linux
*
* -----------------------------------------------------------
@@ -5,9 +6,6 @@
* Philips Automation Projects
* Kassel (Germany)
* -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
* Author:
* L. Haag
*
@@ -117,7 +115,7 @@ static void retry_transmit(struct r3964_info *pInfo);
static void transmit_block(struct r3964_info *pInfo);
static void receive_char(struct r3964_info *pInfo, const unsigned char c);
static void receive_error(struct r3964_info *pInfo, const char flag);
-static void on_timeout(unsigned long priv);
+static void on_timeout(struct timer_list *t);
static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
unsigned char __user * buf);
@@ -690,9 +688,9 @@ static void receive_error(struct r3964_info *pInfo, const char flag)
}
}
-static void on_timeout(unsigned long priv)
+static void on_timeout(struct timer_list *t)
{
- struct r3964_info *pInfo = (void *)priv;
+ struct r3964_info *pInfo = from_timer(pInfo, t, tmr);
switch (pInfo->state) {
case R3964_TX_REQUEST:
@@ -995,7 +993,7 @@ static int r3964_open(struct tty_struct *tty)
tty->disc_data = pInfo;
tty->receive_room = 65536;
- setup_timer(&pInfo->tmr, on_timeout, (unsigned long)pInfo);
+ timer_setup(&pInfo->tmr, on_timeout, 0);
return 0;
}
diff --git a/drivers/tty/n_tracerouter.c b/drivers/tty/n_tracerouter.c
index ac5716979bc1..4479af4d2fa5 100644
--- a/drivers/tty/n_tracerouter.c
+++ b/drivers/tty/n_tracerouter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* n_tracerouter.c - Trace data router through tty space
*
@@ -5,17 +6,6 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* This trace router uses the Linux line discipline framework to route
* trace data coming from a HW Modem to a PTI (Parallel Trace Module) port.
* The solution is not specific to a HW modem and this line disciple can
diff --git a/drivers/tty/n_tracesink.c b/drivers/tty/n_tracesink.c
index 4616870a6b1b..d96ba82cc356 100644
--- a/drivers/tty/n_tracesink.c
+++ b/drivers/tty/n_tracesink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* n_tracesink.c - Trace data router and sink path through tty space.
*
@@ -5,17 +6,6 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* The trace sink uses the Linux line discipline framework to receive
* trace data coming from the PTI source line discipline driver
* to a user-desired tty port, like USB.
diff --git a/drivers/tty/n_tracesink.h b/drivers/tty/n_tracesink.h
index a68bb44f1ef5..1b846330c855 100644
--- a/drivers/tty/n_tracesink.h
+++ b/drivers/tty/n_tracesink.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* n_tracesink.h - Kernel driver API to route trace data in kernel space.
*
@@ -5,17 +6,6 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
* The PTI (Parallel Trace Interface) driver directs trace data routed from
* various parts in the system out through the Intel Penwell PTI port and
* out of the mobile device for analysis with a debugging tool
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index bdf0e6e89991..427e0d5d8f13 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* n_tty.c --- implements the N_TTY line discipline.
*
@@ -15,9 +16,6 @@
* This file also contains code originally written by Linus Torvalds,
* Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994.
*
- * This file may be redistributed under the terms of the GNU General Public
- * License.
- *
* Reduced memory usage for older ARM systems - Russell King.
*
* 2000/01/20 Fixed SMP locking on put_tty_queue using bits of
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 39b3723a32a6..b57b35066ebe 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
*
@@ -21,20 +22,6 @@
* Copyright (c) 2006 Option Wireless n/v
* All rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
* --------------------------------------------------------------------------
*/
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 26dcb3b60fb9..64338442050e 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 20d79a6007d5..bdd17d2aaafd 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* RocketPort device driver for Linux
*
* Written by Theodore Ts'o, 1995, 1996, 1997, 1998, 1999, 2000.
*
* Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2003 by Comtrol, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -99,7 +86,7 @@
/****** RocketPort Local Variables ******/
-static void rp_do_poll(unsigned long dummy);
+static void rp_do_poll(struct timer_list *unused);
static struct tty_driver *rocket_driver;
@@ -111,7 +98,7 @@ static struct r_port *rp_table[MAX_RP_PORTS]; /* The main repository of
static unsigned int xmit_flags[NUM_BOARDS]; /* Bit significant, indicates port had data to transmit. */
/* eg. Bit 0 indicates port 0 has xmit data, ... */
static atomic_t rp_num_ports_open; /* Number of serial ports open */
-static DEFINE_TIMER(rocket_timer, rp_do_poll, 0, 0);
+static DEFINE_TIMER(rocket_timer, rp_do_poll);
static unsigned long board1; /* ISA addresses, retrieved from rocketport.conf */
static unsigned long board2;
@@ -538,7 +525,7 @@ static void rp_handle_port(struct r_port *info)
/*
* The top level polling routine. Repeats every 1/100 HZ (10ms).
*/
-static void rp_do_poll(unsigned long dummy)
+static void rp_do_poll(struct timer_list *unused)
{
CONTROLLER_t *ctlp;
int ctrl, aiop, ch, line;
diff --git a/drivers/tty/rocket.h b/drivers/tty/rocket.h
index c11a9392f219..d0560203f215 100644
--- a/drivers/tty/rocket.h
+++ b/drivers/tty/rocket.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* rocket.h --- the exported interface of the rocket driver to its configuration program.
*
diff --git a/drivers/tty/rocket_int.h b/drivers/tty/rocket_int.h
index ef1e1be6b26d..727e50dbb92f 100644
--- a/drivers/tty/rocket_int.h
+++ b/drivers/tty/rocket_int.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* rocket_int.h --- internal header file for rocket.c
*
diff --git a/drivers/tty/serdev/Kconfig b/drivers/tty/serdev/Kconfig
index cdc6b820cf93..1dbc8352e027 100644
--- a/drivers/tty/serdev/Kconfig
+++ b/drivers/tty/serdev/Kconfig
@@ -6,11 +6,19 @@ menuconfig SERIAL_DEV_BUS
help
Core support for devices connected via a serial port.
+ Note that you typically also want to enable TTY port controller support.
+
if SERIAL_DEV_BUS
config SERIAL_DEV_CTRL_TTYPORT
bool "Serial device TTY port controller"
+ help
+ Say Y here if you want to use the Serial device bus with common TTY
+ drivers (e.g. serial drivers).
+
+ If unsure, say Y.
depends on TTY
depends on SERIAL_DEV_BUS != m
+ default y
endif
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index c68fb3a8ea1c..1bef39828ca7 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -1,19 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
*
* Based on drivers/spmi/spmi.c:
* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/idr.h>
#include <linux/kernel.h>
@@ -49,13 +42,22 @@ static const struct device_type serdev_ctrl_type = {
static int serdev_device_match(struct device *dev, struct device_driver *drv)
{
- /* TODO: ACPI and platform matching */
+ /* TODO: platform matching */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
return of_driver_match_device(dev, drv);
}
static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- /* TODO: ACPI and platform modalias */
+ int rc;
+
+ /* TODO: platform modalias */
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
return of_device_uevent_modalias(dev, env);
}
@@ -65,21 +67,32 @@ static int serdev_uevent(struct device *dev, struct kobj_uevent_env *env)
*/
int serdev_device_add(struct serdev_device *serdev)
{
+ struct serdev_controller *ctrl = serdev->ctrl;
struct device *parent = serdev->dev.parent;
int err;
dev_set_name(&serdev->dev, "%s-%d", dev_name(parent), serdev->nr);
+ /* Only a single slave device is currently supported. */
+ if (ctrl->serdev) {
+ dev_err(&serdev->dev, "controller busy\n");
+ return -EBUSY;
+ }
+ ctrl->serdev = serdev;
+
err = device_add(&serdev->dev);
if (err < 0) {
dev_err(&serdev->dev, "Can't add %s, status %d\n",
dev_name(&serdev->dev), err);
- goto err_device_add;
+ goto err_clear_serdev;
}
dev_dbg(&serdev->dev, "device %s registered\n", dev_name(&serdev->dev));
-err_device_add:
+ return 0;
+
+err_clear_serdev:
+ ctrl->serdev = NULL;
return err;
}
EXPORT_SYMBOL_GPL(serdev_device_add);
@@ -90,7 +103,10 @@ EXPORT_SYMBOL_GPL(serdev_device_add);
*/
void serdev_device_remove(struct serdev_device *serdev)
{
+ struct serdev_controller *ctrl = serdev->ctrl;
+
device_unregister(&serdev->dev);
+ ctrl->serdev = NULL;
}
EXPORT_SYMBOL_GPL(serdev_device_remove);
@@ -260,6 +276,12 @@ static int serdev_drv_remove(struct device *dev)
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
+
return of_device_modalias(dev, buf, PAGE_SIZE);
}
DEVICE_ATTR_RO(modalias);
@@ -295,7 +317,6 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *ctrl)
return NULL;
serdev->ctrl = ctrl;
- ctrl->serdev = serdev;
device_initialize(&serdev->dev);
serdev->dev.parent = &ctrl->dev;
serdev->dev.bus = &serdev_bus_type;
@@ -329,26 +350,31 @@ struct serdev_controller *serdev_controller_alloc(struct device *parent,
if (!ctrl)
return NULL;
- device_initialize(&ctrl->dev);
- ctrl->dev.type = &serdev_ctrl_type;
- ctrl->dev.bus = &serdev_bus_type;
- ctrl->dev.parent = parent;
- ctrl->dev.of_node = parent->of_node;
- serdev_controller_set_drvdata(ctrl, &ctrl[1]);
-
id = ida_simple_get(&ctrl_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
dev_err(parent,
"unable to allocate serdev controller identifier.\n");
- serdev_controller_put(ctrl);
- return NULL;
+ goto err_free;
}
ctrl->nr = id;
+
+ device_initialize(&ctrl->dev);
+ ctrl->dev.type = &serdev_ctrl_type;
+ ctrl->dev.bus = &serdev_bus_type;
+ ctrl->dev.parent = parent;
+ ctrl->dev.of_node = parent->of_node;
+ serdev_controller_set_drvdata(ctrl, &ctrl[1]);
+
dev_set_name(&ctrl->dev, "serial%d", id);
dev_dbg(&ctrl->dev, "allocated controller 0x%p id %d\n", ctrl, id);
return ctrl;
+
+err_free:
+ kfree(ctrl);
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(serdev_controller_alloc);
@@ -385,6 +411,75 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
return 0;
}
+#ifdef CONFIG_ACPI
+static acpi_status acpi_serdev_register_device(struct serdev_controller *ctrl,
+ struct acpi_device *adev)
+{
+ struct serdev_device *serdev = NULL;
+ int err;
+
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
+ return AE_OK;
+
+ serdev = serdev_device_alloc(ctrl);
+ if (!serdev) {
+ dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
+ dev_name(&adev->dev));
+ return AE_NO_MEMORY;
+ }
+
+ ACPI_COMPANION_SET(&serdev->dev, adev);
+ acpi_device_set_enumerated(adev);
+
+ err = serdev_device_add(serdev);
+ if (err) {
+ dev_err(&serdev->dev,
+ "failure adding ACPI serdev device. status %d\n", err);
+ serdev_device_put(serdev);
+ }
+
+ return AE_OK;
+}
+
+static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct serdev_controller *ctrl = data;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ return acpi_serdev_register_device(ctrl, adev);
+}
+
+static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(ctrl->dev.parent);
+ if (!handle)
+ return -ENODEV;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_serdev_add_device, NULL, ctrl, NULL);
+ if (ACPI_FAILURE(status))
+ dev_dbg(&ctrl->dev, "failed to enumerate serdev slaves\n");
+
+ if (!ctrl->serdev)
+ return -ENODEV;
+
+ return 0;
+}
+#else
+static inline int acpi_serdev_register_devices(struct serdev_controller *ctrl)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_ACPI */
+
/**
* serdev_controller_add() - Add an serdev controller
* @ctrl: controller to be registered.
@@ -394,7 +489,7 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl)
*/
int serdev_controller_add(struct serdev_controller *ctrl)
{
- int ret;
+ int ret_of, ret_acpi, ret;
/* Can't register until after driver model init */
if (WARN_ON(!is_registered))
@@ -404,9 +499,14 @@ int serdev_controller_add(struct serdev_controller *ctrl)
if (ret)
return ret;
- ret = of_serdev_register_devices(ctrl);
- if (ret)
+ ret_of = of_serdev_register_devices(ctrl);
+ ret_acpi = acpi_serdev_register_devices(ctrl);
+ if (ret_of && ret_acpi) {
+ dev_dbg(&ctrl->dev, "no devices registered: of:%d acpi:%d\n",
+ ret_of, ret_acpi);
+ ret = -ENODEV;
goto out_dev_del;
+ }
dev_dbg(&ctrl->dev, "serdev%d registered: dev:%p\n",
ctrl->nr, &ctrl->dev);
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 302018d67efa..ce7ad0acee7a 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016-2017 Linaro Ltd., Rob Herring <robh@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/serdev.h>
@@ -96,16 +88,21 @@ static int ttyport_open(struct serdev_controller *ctrl)
struct serport *serport = serdev_controller_get_drvdata(ctrl);
struct tty_struct *tty;
struct ktermios ktermios;
+ int ret;
tty = tty_init_dev(serport->tty_drv, serport->tty_idx);
if (IS_ERR(tty))
return PTR_ERR(tty);
serport->tty = tty;
- if (tty->ops->open)
- tty->ops->open(serport->tty, NULL);
- else
- tty_port_open(serport->port, tty, NULL);
+ if (!tty->ops->open || !tty->ops->close) {
+ ret = -ENODEV;
+ goto err_unlock;
+ }
+
+ ret = tty->ops->open(serport->tty, NULL);
+ if (ret)
+ goto err_close;
/* Bring the UART into a known 8 bits no parity hw fc state */
ktermios = tty->termios;
@@ -122,6 +119,14 @@ static int ttyport_open(struct serdev_controller *ctrl)
tty_unlock(serport->tty);
return 0;
+
+err_close:
+ tty->ops->close(tty, NULL);
+err_unlock:
+ tty_unlock(tty);
+ tty_release_struct(tty, serport->tty_idx);
+
+ return ret;
}
static void ttyport_close(struct serdev_controller *ctrl)
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 804632b4a929..32b3acf8150a 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the serial port on the 21285 StrongArm-110 core logic chip.
*
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index b2bdc35f7495..ebfb0bd5bef5 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for 8250/16550-type serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2001 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/serial_8250.h>
diff --git a/drivers/tty/serial/8250/8250_accent.c b/drivers/tty/serial/8250/8250_accent.c
index 522aeae05192..1691f1a57f89 100644
--- a/drivers/tty/serial/8250/8250_accent.c
+++ b/drivers/tty/serial/8250/8250_accent.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_acorn.c b/drivers/tty/serial/8250/8250_acorn.c
index 402dfdd4940e..758c4aa203ab 100644
--- a/drivers/tty/serial/8250/8250_acorn.c
+++ b/drivers/tty/serial/8250/8250_acorn.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/serial/acorn.c
*
* Copyright (C) 1996-2003 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 33a801353114..74a408d9db24 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Serial Port driver for Aspeed VUART device
*
* Copyright (C) 2016 Jeremy Kerr <jk@ozlabs.org>, IBM Corp.
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#include <linux/device.h>
#include <linux/module.h>
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index a23c7da42ea8..bd53661103eb 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial port driver for BCM2835AUX UART
*
@@ -5,11 +6,6 @@
*
* Based on 8250_lpc18xx.c:
* Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/8250/8250_boca.c b/drivers/tty/serial/8250/8250_boca.c
index a63b5998e383..a9b97c034653 100644
--- a/drivers/tty/serial/8250/8250_boca.c
+++ b/drivers/tty/serial/8250/8250_boca.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index d29b512a7d9f..9342fc2ee7df 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Universal/legacy driver for 8250/16550-type serial ports
*
@@ -11,11 +12,6 @@
* userspace-configurable "phantom" ports
* "serial8250" platform devices
* serial8250_register_8250_port() ports
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
@@ -262,17 +258,17 @@ static void serial_unlink_irq_chain(struct uart_8250_port *up)
* barely passable results for a 16550A. (Although at the expense
* of much CPU overhead).
*/
-static void serial8250_timeout(unsigned long data)
+static void serial8250_timeout(struct timer_list *t)
{
- struct uart_8250_port *up = (struct uart_8250_port *)data;
+ struct uart_8250_port *up = from_timer(up, t, timer);
up->port.handle_irq(&up->port);
mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
}
-static void serial8250_backup_timeout(unsigned long data)
+static void serial8250_backup_timeout(struct timer_list *t)
{
- struct uart_8250_port *up = (struct uart_8250_port *)data;
+ struct uart_8250_port *up = from_timer(up, t, timer);
unsigned int iir, ier = 0, lsr;
unsigned long flags;
@@ -330,7 +326,6 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
pr_debug("ttyS%d - using backup timer\n", serial_index(port));
up->timer.function = serial8250_backup_timeout;
- up->timer.data = (unsigned long)up;
mod_timer(&up->timer, jiffies +
uart_poll_timeout(port) + HZ / 5);
}
@@ -341,7 +336,6 @@ static int univ8250_setup_irq(struct uart_8250_port *up)
* driver used to do this with IRQ0.
*/
if (!port->irq) {
- up->timer.data = (unsigned long)up;
mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
} else
retval = serial_link_irq_chain(up);
@@ -525,8 +519,7 @@ static void __init serial8250_isa_init_ports(void)
base_ops = port->ops;
port->ops = &univ8250_port_ops;
- init_timer(&up->timer);
- up->timer.function = serial8250_timeout;
+ timer_setup(&up->timer, serial8250_timeout, 0);
up->ops = &univ8250_driver_ops;
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 26f17456b0d7..bfa1a857f3ff 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* 8250_dma.c - DMA Engine API support for 8250.c
*
* Copyright (C) 2013 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/tty.h>
#include <linux/tty_flip.h>
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 7e638997bfc2..5bb0c42c88dd 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Synopsys DesignWare 8250 driver.
*
* Copyright 2011 Picochip, Jamie Iles.
* Copyright 2013 Intel Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* The Synopsys DesignWare 8250 has an extra feature whereby it detects if the
* LCR is written whilst busy. If it is, then a busy detect interrupt is
* raised, the LCR needs to be rewritten and the uart status register read.
@@ -256,25 +252,31 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
struct ktermios *old)
{
unsigned int baud = tty_termios_baud_rate(termios);
+ unsigned int target_rate, min_rate, max_rate;
struct dw8250_data *d = p->private_data;
long rate;
- int ret;
+ int i, ret;
if (IS_ERR(d->clk) || !old)
goto out;
- clk_disable_unprepare(d->clk);
- rate = clk_round_rate(d->clk, baud * 16);
- if (rate < 0)
- ret = rate;
- else if (rate == 0)
- ret = -ENOENT;
- else
- ret = clk_set_rate(d->clk, rate);
- clk_prepare_enable(d->clk);
+ /* Find a clk rate within +/-1.6% of an integer multiple of baudx16 */
+ target_rate = baud * 16;
+ min_rate = target_rate - (target_rate >> 6);
+ max_rate = target_rate + (target_rate >> 6);
- if (!ret)
- p->uartclk = rate;
+ for (i = 1; i <= UART_DIV_MAX; i++) {
+ rate = clk_round_rate(d->clk, i * target_rate);
+ if (rate >= i * min_rate && rate <= i * max_rate)
+ break;
+ }
+ if (i <= UART_DIV_MAX) {
+ clk_disable_unprepare(d->clk);
+ ret = clk_set_rate(d->clk, rate);
+ clk_prepare_enable(d->clk);
+ if (!ret)
+ p->uartclk = rate;
+ }
out:
p->status &= ~UPSTAT_AUTOCTS;
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index af72ec32e404..362c25ff188a 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Early serial console for 8250/16550 devices
*
* (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Based on the 8250.c serial driver, Copyright (C) 2001 Russell King,
* and on early_printk.c by Andi Kleen.
*
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index 0b6381214917..f6a86f2bc4e5 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Emma Mobile 8250 driver
*
* Copyright (C) 2012 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/device.h>
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index c55624703fdf..a402878c9f30 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Probe module for 8250/16550-type Exar chips PCI serial ports.
*
* Based on drivers/tty/serial/8250/8250_pci.c,
*
* Copyright (C) 2017 Sudip Mukherjee, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#include <linux/acpi.h>
#include <linux/dmi.h>
diff --git a/drivers/tty/serial/8250/8250_exar_st16c554.c b/drivers/tty/serial/8250/8250_exar_st16c554.c
index 3a7cb8262bb9..933811ebfaac 100644
--- a/drivers/tty/serial/8250/8250_exar_st16c554.c
+++ b/drivers/tty/serial/8250/8250_exar_st16c554.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Written by Paul B Schroeder < pschroeder "at" uplogix "dot" com >
* Based on 8250_boca.
*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index e500f7dd2470..79a4958b3f5c 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Probe for F81216A LPC to 4 UART
*
* Copyright (C) 2014-2016 Ricardo Ribalda, Qtechnology A/S
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
#include <linux/pci.h>
@@ -40,6 +36,16 @@
#define IRQ_LEVEL_LOW 0
#define IRQ_EDGE_HIGH BIT(5)
+/*
+ * F81216H clock source register, the value and mask is the same with F81866,
+ * but it's on F0h.
+ *
+ * Clock speeds for UART (register F0h)
+ * 00: 1.8432MHz.
+ * 01: 18.432MHz.
+ * 10: 24MHz.
+ * 11: 14.769MHz.
+ */
#define RS485 0xF0
#define RTS_INVERT BIT(5)
#define RS485_URA BIT(4)
@@ -118,6 +124,9 @@ static int fintek_8250_enter_key(u16 base_port, u8 key)
if (!request_muxed_region(base_port, 2, "8250_fintek"))
return -EBUSY;
+ /* Force to deactive all SuperIO in this base_port */
+ outb(EXIT_KEY, base_port + ADDR_PORT);
+
outb(key, base_port + ADDR_PORT);
outb(key, base_port + ADDR_PORT);
return 0;
@@ -188,14 +197,27 @@ static int fintek_8250_rs485_config(struct uart_port *port,
if (!pdata)
return -EINVAL;
- if (rs485->flags & SER_RS485_ENABLED)
+ /* Hardware do not support same RTS level on send and receive */
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND) ==
+ !(rs485->flags & SER_RS485_RTS_AFTER_SEND))
+ return -EINVAL;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
memset(rs485->padding, 0, sizeof(rs485->padding));
- else
+ config |= RS485_URA;
+ } else {
memset(rs485, 0, sizeof(*rs485));
+ }
rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
SER_RS485_RTS_AFTER_SEND;
+ /* Only the first port supports delays */
+ if (pdata->index) {
+ rs485->delay_rts_before_send = 0;
+ rs485->delay_rts_after_send = 0;
+ }
+
if (rs485->delay_rts_before_send) {
rs485->delay_rts_before_send = 1;
config |= TXW4C_IRA;
@@ -206,12 +228,6 @@ static int fintek_8250_rs485_config(struct uart_port *port,
config |= RXW4C_IRA;
}
- if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
- (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
- rs485->flags &= SER_RS485_ENABLED;
- else
- config |= RS485_URA;
-
if (rs485->flags & SER_RS485_RTS_ON_SEND)
config |= RTS_INVERT;
@@ -280,13 +296,91 @@ static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
F81866_UART_CLK_MASK,
F81866_UART_CLK_14_769MHZ);
- uart->port.uartclk = 921600 * 16;
+ uart->port.uartclk = 921600 * 16;
break;
default: /* leave clock speed untouched */
break;
}
}
+void fintek_8250_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct fintek_8250 *pdata = port->private_data;
+ unsigned int baud = tty_termios_baud_rate(termios);
+ int i;
+ u8 reg;
+ static u32 baudrate_table[] = {115200, 921600, 1152000, 1500000};
+ static u8 clock_table[] = { F81866_UART_CLK_1_8432MHZ,
+ F81866_UART_CLK_14_769MHZ, F81866_UART_CLK_18_432MHZ,
+ F81866_UART_CLK_24MHZ };
+
+ /*
+ * We'll use serial8250_do_set_termios() for baud = 0, otherwise It'll
+ * crash on baudrate_table[i] % baud with "division by zero".
+ */
+ if (!baud)
+ goto exit;
+
+ switch (pdata->pid) {
+ case CHIP_ID_F81216H:
+ reg = RS485;
+ break;
+ case CHIP_ID_F81866:
+ reg = F81866_UART_CLK;
+ break;
+ default:
+ /* Don't change clocksource with unknown PID */
+ dev_warn(port->dev,
+ "%s: pid: %x Not support. use default set_termios.\n",
+ __func__, pdata->pid);
+ goto exit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(baudrate_table); ++i) {
+ if (baud > baudrate_table[i] || baudrate_table[i] % baud != 0)
+ continue;
+
+ if (port->uartclk == baudrate_table[i] * 16)
+ break;
+
+ if (fintek_8250_enter_key(pdata->base_port, pdata->key))
+ continue;
+
+ port->uartclk = baudrate_table[i] * 16;
+
+ sio_write_reg(pdata, LDN, pdata->index);
+ sio_write_mask_reg(pdata, reg, F81866_UART_CLK_MASK,
+ clock_table[i]);
+
+ fintek_8250_exit_key(pdata->base_port);
+ break;
+ }
+
+ if (i == ARRAY_SIZE(baudrate_table)) {
+ baud = tty_termios_baud_rate(old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ }
+
+exit:
+ serial8250_do_set_termios(port, termios, old);
+}
+
+static void fintek_8250_set_termios_handler(struct uart_8250_port *uart)
+{
+ struct fintek_8250 *pdata = uart->port.private_data;
+
+ switch (pdata->pid) {
+ case CHIP_ID_F81216H:
+ case CHIP_ID_F81866:
+ uart->port.set_termios = fintek_8250_set_termios;
+ break;
+
+ default:
+ break;
+ }
+}
+
static int probe_setup_port(struct fintek_8250 *pdata,
struct uart_8250_port *uart)
{
@@ -373,6 +467,7 @@ int fintek_8250_probe(struct uart_8250_port *uart)
memcpy(pdata, &probe_data, sizeof(probe_data));
uart->port.private_data = pdata;
fintek_8250_set_rs485_handler(uart);
+ fintek_8250_set_termios_handler(uart);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_fourport.c b/drivers/tty/serial/8250/8250_fourport.c
index 4045180a8cfc..3215b9b7afde 100644
--- a/drivers/tty/serial/8250/8250_fourport.c
+++ b/drivers/tty/serial/8250/8250_fourport.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index 910bfee5a88b..6640a4c7ddd1 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
@@ -6,10 +7,6 @@
/*
* Freescale 16550 UART "driver", Copyright (C) 2011 Paul Gortmaker.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This isn't a full driver; it just provides an alternate IRQ
* handler to deal with an errata. Everything else is just
* using the bog standard 8250 support.
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index df2931e1e086..0809ae2aa9b1 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Serial Device Initialisation for Lasi/Asp/Wax/Dino
*
* (c) Copyright Matthew Wilcox <willy@debian.org> 2001-2002
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/errno.h>
diff --git a/drivers/tty/serial/8250/8250_hp300.c b/drivers/tty/serial/8250/8250_hp300.c
index 115190b7962a..3012ea03d22c 100644
--- a/drivers/tty/serial/8250/8250_hp300.c
+++ b/drivers/tty/serial/8250/8250_hp300.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the 98626/98644/internal serial interface on hp300/hp400
* (based on the National Semiconductor INS8250/NS16550AF/WD16C552 UARTs)
diff --git a/drivers/tty/serial/8250/8250_hub6.c b/drivers/tty/serial/8250/8250_hub6.c
index 27124e21eb96..273f59b9bca5 100644
--- a/drivers/tty/serial/8250/8250_hub6.c
+++ b/drivers/tty/serial/8250/8250_hub6.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Russell King.
* Data taken from include/asm-i386/serial.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index 464389b28900..6af84900870e 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2010 Lars-Peter Clausen <lars@metafoo.de>
* Copyright (C) 2015 Imagination Technologies
*
* Ingenic SoC UART support
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 99cd478851ff..eddf119374e1 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial port driver for NXP LPC18xx/43xx UART
*
@@ -6,11 +7,6 @@
* Based on 8250_mtk.c:
* Copyright (c) 2014 MundoReader S.L.
* Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 7dddd7e6a01c..98dbc796353f 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* 8250_lpss.c - Driver for UART on Intel Braswell and various other Intel SoCs
*
* Copyright (C) 2016 Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index ec957cce8c9a..efa0515139f8 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* 8250_mid.c - Driver for UART on Intel Penwell and various other Intel SOCs
*
* Copyright (C) 2015 Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
@@ -23,10 +20,11 @@
#define PCI_DEVICE_ID_INTEL_PNW_UART2 0x081c
#define PCI_DEVICE_ID_INTEL_PNW_UART3 0x081d
#define PCI_DEVICE_ID_INTEL_TNG_UART 0x1191
+#define PCI_DEVICE_ID_INTEL_CDF_UART 0x18d8
#define PCI_DEVICE_ID_INTEL_DNV_UART 0x19d8
/* Intel MID Specific registers */
-#define INTEL_MID_UART_DNV_FISR 0x08
+#define INTEL_MID_UART_FISR 0x08
#define INTEL_MID_UART_PS 0x30
#define INTEL_MID_UART_MUL 0x34
#define INTEL_MID_UART_DIV 0x38
@@ -130,7 +128,7 @@ static int dnv_handle_irq(struct uart_port *p)
{
struct mid8250 *mid = p->private_data;
struct uart_8250_port *up = up_to_u8250p(p);
- unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
+ unsigned int fisr = serial_port_in(p, INTEL_MID_UART_FISR);
u32 status;
int ret = 0;
int err;
@@ -377,6 +375,7 @@ static const struct pci_device_id pci_ids[] = {
MID_DEVICE(PCI_DEVICE_ID_INTEL_PNW_UART2, pnw_board),
MID_DEVICE(PCI_DEVICE_ID_INTEL_PNW_UART3, pnw_board),
MID_DEVICE(PCI_DEVICE_ID_INTEL_TNG_UART, tng_board),
+ MID_DEVICE(PCI_DEVICE_ID_INTEL_CDF_UART, dnv_board),
MID_DEVICE(PCI_DEVICE_ID_INTEL_DNV_UART, dnv_board),
{ },
};
diff --git a/drivers/tty/serial/8250/8250_moxa.c b/drivers/tty/serial/8250/8250_moxa.c
index d5069b2d4d79..1ee4cd94d4fa 100644
--- a/drivers/tty/serial/8250/8250_moxa.c
+++ b/drivers/tty/serial/8250/8250_moxa.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* 8250_moxa.c - MOXA Smartio/Industio MUE multiport serial driver.
*
* Author: Mathieu OTHACEHE <m.othacehe@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index fb45770d47aa..dd5e1cede2b5 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Mediatek 8250 driver.
*
* Copyright (c) 2014 MundoReader S.L.
* Author: Matthias Brugger <matthias.bgg@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/io.h>
@@ -61,7 +52,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
* registers to their default values.
*/
baud = uart_get_baud_rate(port, termios, old,
- port->uartclk / 16 / 0xffff,
+ port->uartclk / 16 / UART_DIV_MAX,
port->uartclk);
if (baud <= 115200) {
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 1222c005fb98..1e67a7e4a5fd 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Serial Port driver for Open Firmware platform devices
*
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
*/
#include <linux/console.h>
#include <linux/module.h>
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 833771bca0a5..bd40ba402410 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* 8250-core based driver for the OMAP internal UART
*
@@ -199,7 +200,7 @@ static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
* Old custom speed handling.
*/
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) {
- priv->quot = port->custom_divisor & 0xffff;
+ priv->quot = port->custom_divisor & UART_DIV_MAX;
/*
* I assume that nobody is using this. But hey, if somebody
* would like to specify the divisor _and_ the mode then the
@@ -358,7 +359,7 @@ static void omap_8250_set_termios(struct uart_port *port,
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old,
- port->uartclk / 16 / 0xffff,
+ port->uartclk / 16 / UART_DIV_MAX,
port->uartclk / 13);
omap_8250_get_divisor(port, baud, priv);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 0c101a7470b0..b7e0e3416641 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Probe module for 8250/16550-type PCI serial ports.
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2001 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#undef DEBUG
#include <linux/module.h>
@@ -3368,6 +3365,7 @@ static const struct pci_device_id blacklist[] = {
{ PCI_VDEVICE(INTEL, 0x081c), },
{ PCI_VDEVICE(INTEL, 0x081d), },
{ PCI_VDEVICE(INTEL, 0x1191), },
+ { PCI_VDEVICE(INTEL, 0x18d8), },
{ PCI_VDEVICE(INTEL, 0x19d8), },
/* Intel platforms with DesignWare UART */
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 34f05ed78b68..431e69a5a6a0 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Probe for 8250/16550-type ISAPNP serial ports.
*
@@ -6,10 +7,6 @@
* Copyright (C) 2001 Russell King, All Rights Reserved.
*
* Ported to the Linux PnP Layer - (C) Adam Belay.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f0cc04f62b67..11434551ac0a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Base port operations for 8250/16550-type serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
* Split from 8250_core.c, Copyright (C) 2001 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* A note about mapbase / membase
*
* mapbase is the physical address of the IO port.
@@ -1516,7 +1512,6 @@ static inline void __stop_tx(struct uart_8250_port *p)
return;
em485->active_timer = NULL;
- hrtimer_cancel(&em485->start_tx_timer);
__stop_tx_rs485(p);
}
@@ -1580,8 +1575,6 @@ static inline void start_tx_rs485(struct uart_port *port)
serial8250_stop_rx(&up->port);
em485->active_timer = NULL;
- if (hrtimer_is_queued(&em485->stop_tx_timer))
- hrtimer_cancel(&em485->stop_tx_timer);
mcr = serial8250_in_MCR(up);
if (!!(up->port.rs485.flags & SER_RS485_RTS_ON_SEND) !=
@@ -2586,8 +2579,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
serial_dl_write(up, quot);
/* XR17V35x UARTs have an extra fractional divisor register (DLD) */
- if (up->port.type == PORT_XR17V35X)
+ if (up->port.type == PORT_XR17V35X) {
+ /* Preserve bits not related to baudrate; DLD[7:4]. */
+ quot_frac |= serial_port_in(port, 0x2) & 0xf0;
serial_port_out(port, 0x2, quot_frac);
+ }
}
static unsigned int serial8250_get_baud_rate(struct uart_port *port,
@@ -2601,7 +2597,7 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
* causing transmission errors.
*/
return uart_get_baud_rate(port, termios, old,
- port->uartclk / 16 / 0xffff,
+ port->uartclk / 16 / UART_DIV_MAX,
port->uartclk);
}
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 4d68731af534..b9bcbe20a2be 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/tty/serial/8250/8250_pxa.c -- driver for PXA on-board UARTS
* Copyright: (C) 2013 Sergei Ianovich <ynvich@gmail.com>
@@ -7,12 +8,6 @@
* Copyright: (C) 2003 Monta Vista Software, Inc.
*
* Based on drivers/serial/8250.c by Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/device.h>
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 8a10b10e27aa..45ef506293ae 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 6a18d2d768fe..18751bc63a84 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the 8250 serial device drivers.
#
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 933c2688dd7e..9963a766dcfb 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
/*======================================================================
A driver for PCMCIA serial devices
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index fe88a75d9a59..842d185d697e 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel serial device drivers.
#
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 0475f5d261ce..c90e503d6b57 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* altera_jtaguart.c -- Altera JTAG UART driver
*
@@ -6,11 +7,6 @@
* (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
* (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
* (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 3e4b717670d7..b88b05f8e81e 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* altera_uart.c -- Altera UART driver
*
@@ -6,11 +7,6 @@
* (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
* (C) Copyright 2008, Thomas Chou <thomas@wytron.com.tw>
* (C) Copyright 2010, Tobias Klauser <tklauser@distanz.ch>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -288,10 +284,10 @@ static irqreturn_t altera_uart_interrupt(int irq, void *data)
return IRQ_RETVAL(isr);
}
-static void altera_uart_timer(unsigned long data)
+static void altera_uart_timer(struct timer_list *t)
{
- struct uart_port *port = (void *)data;
- struct altera_uart *pp = container_of(port, struct altera_uart, port);
+ struct altera_uart *pp = from_timer(pp, t, tmr);
+ struct uart_port *port = &pp->port;
altera_uart_interrupt(0, port);
mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
@@ -314,7 +310,7 @@ static int altera_uart_startup(struct uart_port *port)
int ret;
if (!port->irq) {
- setup_timer(&pp->tmr, altera_uart_timer, (unsigned long)port);
+ timer_setup(&pp->tmr, altera_uart_timer, 0);
mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port));
return 0;
}
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index 9ec4b8d2879f..2c37d11726ab 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for AMBA serial ports
*
@@ -6,20 +7,6 @@
* Copyright 1999 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This is a generic driver for ARM AMBA-type serial ports. They
* have a lot of 16550-like features, but are not register compatible.
* Note that although they do have CTS, DCD and DSR inputs, they do
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 111e6a950779..04af8de8617e 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for AMBA serial ports
*
@@ -7,20 +8,6 @@
* Copyright (C) 2000 Deep Blue Solutions Ltd.
* Copyright (C) 2010 ST-Ericsson SA
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This is a generic driver for ARM AMBA-type serial ports. They
* have a lot of 16550-like features, but are not register compatible.
* Note that although they do have CTS, DCD and DSR inputs, they do
@@ -281,7 +268,6 @@ struct uart_amba_port {
unsigned int old_status;
unsigned int fifosize; /* vendor-specific */
unsigned int old_cr; /* state during shutdown */
- bool autorts;
unsigned int fixed_baud; /* vendor-set fixed baud rate */
char type[12];
#ifdef CONFIG_DMA_ENGINE
@@ -1078,9 +1064,9 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
* Every polling, It checks the residue in the dma buffer and transfer
* data to the tty. Also, last_residue is updated for the next polling.
*/
-static void pl011_dma_rx_poll(unsigned long args)
+static void pl011_dma_rx_poll(struct timer_list *t)
{
- struct uart_amba_port *uap = (struct uart_amba_port *)args;
+ struct uart_amba_port *uap = from_timer(uap, t, dmarx.timer);
struct tty_port *port = &uap->port.state->port;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = uap->dmarx.chan;
@@ -1192,9 +1178,7 @@ skip_rx:
dev_dbg(uap->port.dev, "could not trigger initial "
"RX DMA job, fall back to interrupt mode\n");
if (uap->dmarx.poll_rate) {
- init_timer(&(uap->dmarx.timer));
- uap->dmarx.timer.function = pl011_dma_rx_poll;
- uap->dmarx.timer.data = (unsigned long)uap;
+ timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
mod_timer(&uap->dmarx.timer,
jiffies +
msecs_to_jiffies(uap->dmarx.poll_rate));
@@ -1588,7 +1572,7 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
- if (uap->autorts) {
+ if (port->status & UPSTAT_AUTORTS) {
/* We need to disable auto-RTS if we want to turn RTS off */
TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
}
@@ -1842,7 +1826,7 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
{
unsigned int cr;
- uap->autorts = false;
+ uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
spin_lock_irq(&uap->port.lock);
cr = pl011_read(uap, REG_CR);
uap->old_cr = cr;
@@ -2028,10 +2012,10 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
old_cr |= UART011_CR_RTSEN;
old_cr |= UART011_CR_CTSEN;
- uap->autorts = true;
+ port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
} else {
old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
- uap->autorts = false;
+ port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
}
if (uap->vendor->oversampling) {
diff --git a/drivers/tty/serial/amba-pl011.h b/drivers/tty/serial/amba-pl011.h
index 411c60e1f9a4..077eb12a3472 100644
--- a/drivers/tty/serial/amba-pl011.h
+++ b/drivers/tty/serial/amba-pl011.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef AMBA_PL011_H
#define AMBA_PL011_H
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index dd60ed96a0ad..60cd133ffbbc 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for GRLIB serial ports (APBUART)
*
diff --git a/drivers/tty/serial/apbuart.h b/drivers/tty/serial/apbuart.h
index 5faf87c8d2bc..81baf007694f 100644
--- a/drivers/tty/serial/apbuart.h
+++ b/drivers/tty/serial/apbuart.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GRLIB_APBUART_H__
#define __GRLIB_APBUART_H__
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index decc7f3c1ab2..db5df3d54818 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Atheros AR933X SoC built-in UART driver
*
* Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 77fe306690c4..2599f9ecccfe 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ARC On-Chip(fpga) UART Driver
*
* Copyright (C) 2010-2012 Synopsys, Inc. (www.synopsys.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* vineetg: July 10th 2012
* -Decoupled the driver from arch/arc
* +Using platform_get_resource() for irq/membase (thx to bfin_uart.c)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 7551cab438ff..efa25611ca0c 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Atmel AT91 Serial ports
* Copyright (C) 2003 Rick Bronson
@@ -6,21 +7,6 @@
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* DMA support added by Chip Coldwell.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/tty.h>
#include <linux/ioport.h>
@@ -171,6 +157,7 @@ struct atmel_uart_port {
bool has_hw_timer;
struct timer_list uart_timer;
+ bool tx_stopped;
bool suspended;
unsigned int pending;
unsigned int pending_status;
@@ -380,6 +367,10 @@ static int atmel_config_rs485(struct uart_port *port,
*/
static u_int atmel_tx_empty(struct uart_port *port)
{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (atmel_port->tx_stopped)
+ return TIOCSER_TEMT;
return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
TIOCSER_TEMT :
0;
@@ -485,6 +476,7 @@ static void atmel_stop_tx(struct uart_port *port)
* is fully transmitted.
*/
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
+ atmel_port->tx_stopped = true;
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
@@ -521,6 +513,7 @@ static void atmel_start_tx(struct uart_port *port)
/* re-enable the transmitter */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+ atmel_port->tx_stopped = false;
}
/*
@@ -1178,10 +1171,11 @@ chan_err:
return -EINVAL;
}
-static void atmel_uart_timer_callback(unsigned long data)
+static void atmel_uart_timer_callback(struct timer_list *t)
{
- struct uart_port *port = (void *)data;
- struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct atmel_uart_port *atmel_port = from_timer(atmel_port, t,
+ uart_timer);
+ struct uart_port *port = &atmel_port->uart;
if (!atomic_read(&atmel_port->tasklet_shutdown)) {
tasklet_schedule(&atmel_port->tasklet_rx);
@@ -1667,29 +1661,6 @@ static void atmel_init_property(struct atmel_uart_port *atmel_port,
}
}
-static void atmel_init_rs485(struct uart_port *port,
- struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
-
- struct serial_rs485 *rs485conf = &port->rs485;
- u32 rs485_delay[2];
-
- /* rs485 properties */
- if (of_property_read_u32_array(np, "rs485-rts-delay",
- rs485_delay, 2) == 0) {
- rs485conf->delay_rts_before_send = rs485_delay[0];
- rs485conf->delay_rts_after_send = rs485_delay[1];
- rs485conf->flags = 0;
- }
-
- if (of_get_property(np, "rs485-rx-during-tx", NULL))
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
-
- if (of_get_property(np, "linux,rs485-enabled-at-boot-time", NULL))
- rs485conf->flags |= SER_RS485_ENABLED;
-}
-
static void atmel_set_ops(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -1866,10 +1837,9 @@ static int atmel_startup(struct uart_port *port)
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
/* enable xmit & rcvr */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_port->tx_stopped = false;
- setup_timer(&atmel_port->uart_timer,
- atmel_uart_timer_callback,
- (unsigned long)port);
+ timer_setup(&atmel_port->uart_timer, atmel_uart_timer_callback, 0);
if (atmel_use_pdc_rx(port)) {
/* set UART timeout */
@@ -2122,6 +2092,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
/* disable receiver and transmitter */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
+ atmel_port->tx_stopped = true;
/* mode */
if (port->rs485.flags & SER_RS485_ENABLED) {
@@ -2207,6 +2178,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
atmel_uart_writel(port, ATMEL_US_BRGR, quot);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_port->tx_stopped = false;
/* restore interrupts */
atmel_uart_writel(port, ATMEL_US_IER, imr);
@@ -2373,7 +2345,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
- atmel_init_rs485(port, pdev);
+ of_get_rs485_mode(pdev->dev.of_node, &port->rs485);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
@@ -2450,6 +2422,7 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
/* Make sure that tx path is actually able to send characters */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+ atmel_port->tx_stopped = false;
uart_console_write(port, s, count, atmel_console_putchar);
@@ -2511,6 +2484,7 @@ static int __init atmel_console_setup(struct console *co, char *options)
{
int ret;
struct uart_port *port = &atmel_ports[co->index].uart;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int baud = 115200;
int bits = 8;
int parity = 'n';
@@ -2528,6 +2502,7 @@ static int __init atmel_console_setup(struct console *co, char *options)
atmel_uart_writel(port, ATMEL_US_IDR, -1);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ atmel_port->tx_stopped = false;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
diff --git a/drivers/tty/serial/atmel_serial.h b/drivers/tty/serial/atmel_serial.h
index bd2560502f3c..ba3a2437cde4 100644
--- a/drivers/tty/serial/atmel_serial.h
+++ b/drivers/tty/serial/atmel_serial.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* include/linux/atmel_serial.h
*
@@ -6,11 +7,6 @@
*
* USART registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef ATMEL_SERIAL_H
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 8c48c3784831..b7adc6127b3d 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Derived from many drivers using generic_serial interface.
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
@@ -846,8 +843,10 @@ static int bcm_uart_probe(struct platform_device *pdev)
if (!res_irq)
return -ENODEV;
- clk = pdev->dev.of_node ? of_clk_get(pdev->dev.of_node, 0) :
- clk_get(&pdev->dev, "periph");
+ clk = clk_get(&pdev->dev, "refclk");
+ if (IS_ERR(clk) && pdev->dev.of_node)
+ clk = of_clk_get(pdev->dev.of_node, 0);
+
if (IS_ERR(clk))
return -ENODEV;
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index 6b03fb12cd19..4ccca5d22f4f 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Blackfin On-Chip Sport Emulated UART Driver
*
* Copyright 2006-2009 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
/*
@@ -584,7 +583,7 @@ static void sport_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&up->port.lock, flags);
}
-struct uart_ops sport_uart_ops = {
+static const struct uart_ops sport_uart_ops = {
.tx_empty = sport_tx_empty,
.set_mctrl = sport_set_mctrl,
.get_mctrl = sport_get_mctrl,
diff --git a/drivers/tty/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
index e4510ea135ce..4b12f45d6580 100644
--- a/drivers/tty/serial/bfin_sport_uart.h
+++ b/drivers/tty/serial/bfin_sport_uart.h
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Blackfin On-Chip Sport Emulated UART Driver
*
* Copyright 2006-2008 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
/*
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 293ecbb00684..4755fa696321 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Blackfin On-Chip Serial Driver
*
* Copyright 2006-2011 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -456,8 +455,9 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
tty_flip_buffer_push(&uart->port.state->port);
}
-void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
+void bfin_serial_rx_dma_timeout(struct timer_list *t)
{
+ struct bfin_serial_port *uart = from_timer(uart, t, rx_dma_timer);
int x_pos, pos;
unsigned long flags;
@@ -624,8 +624,6 @@ static int bfin_serial_startup(struct uart_port *port)
set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf);
enable_dma(uart->rx_dma_channel);
- uart->rx_dma_timer.data = (unsigned long)(uart);
- uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout;
uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
add_timer(&(uart->rx_dma_timer));
#else
@@ -1316,7 +1314,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
}
uart->rx_dma_channel = res->start;
- init_timer(&(uart->rx_dma_timer));
+ timer_setup(&uart->rx_dma_timer, bfin_serial_rx_dma_timeout, 0);
#endif
#if defined(SERIAL_BFIN_CTSRTS) || \
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index ac1328629baa..98f193a83392 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CLPS711x serial ports
*
@@ -5,11 +6,6 @@
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#if defined(CONFIG_SERIAL_CLPS711X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
index 0ad027b95873..9f175a92fb5d 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for CPM (SCC/SMC) serial ports
*
@@ -5,11 +6,6 @@
*
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
*/
#ifndef CPM_UART_H
#define CPM_UART_H
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 9ac142cfc1f1..24a5f05e769b 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CPM (SCC/SMC) serial ports; core driver
*
@@ -12,21 +13,6 @@
* (C) 2004 Intracom, S.A.
* (C) 2005-2006 MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
index 6d3b22e93246..4eba17f3d293 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CPM (SCC/SMC) serial ports; CPM1 definitions
*
@@ -8,21 +9,6 @@
* (C) 2004 Intracom, S.A.
* (C) 2006 MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
index 60c7e94cde1e..18ec0849918a 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for CPM (SCC/SMC) serial ports
*
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
index f46d2ca87209..e3bff068dc3c 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CPM (SCC/SMC) serial ports; CPM2 definitions
*
@@ -8,21 +9,6 @@
* (C) 2004 Intracom, S.A.
* (C) 2006 MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
index 51e651a69938..051a8509c3e5 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for CPM (SCC/SMC) serial ports
*
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 59a2a7e18b5a..c9458a033e3c 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Serial port driver for the ETRAX 100LX chip
*
@@ -2058,7 +2059,7 @@ static void flush_timeout_function(unsigned long data)
static struct timer_list flush_timer;
static void
-timed_flush_handler(unsigned long ptr)
+timed_flush_handler(struct timer_list *unused)
{
struct e100_serial *info;
int i;
@@ -4136,7 +4137,7 @@ static int __init rs_init(void)
/* Setup the timed flush handler system */
#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
- setup_timer(&flush_timer, timed_flush_handler, 0);
+ timer_setup(&flush_timer, timed_flush_handler, 0);
mod_timer(&flush_timer, jiffies + 5);
#endif
diff --git a/drivers/tty/serial/crisv10.h b/drivers/tty/serial/crisv10.h
index 15a52ee58251..79ba2bc95d3d 100644
--- a/drivers/tty/serial/crisv10.h
+++ b/drivers/tty/serial/crisv10.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* serial.h: Arch-dep definitions for the Etrax100 serial driver.
*
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index 02ad6953b167..f460cca139e2 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Conexant Digicolor serial ports (USART)
*
* Author: Baruch Siach <baruch@tkos.co.il>
*
* Copyright (C) 2014 Paradox Innovation Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index ff465ff43577..7b57e840e255 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dz.c: Serial port driver for DECstations equipped
* with the DZ chipset.
diff --git a/drivers/tty/serial/dz.h b/drivers/tty/serial/dz.h
index faf169ed27b3..3b3e31954f24 100644
--- a/drivers/tty/serial/dz.h
+++ b/drivers/tty/serial/dz.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* dz.h: Serial port driver for DECstations equipped
* with the DZ chipset.
diff --git a/drivers/tty/serial/earlycon-arm-semihost.c b/drivers/tty/serial/earlycon-arm-semihost.c
index 6bbeb699777c..fa096c10b591 100644
--- a/drivers/tty/serial/earlycon-arm-semihost.c
+++ b/drivers/tty/serial/earlycon-arm-semihost.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 ARM Ltd.
* Author: Marc Zyngier <marc.zyngier@arm.com>
@@ -5,18 +6,6 @@
* Adapted for ARM and earlycon:
* Copyright (C) 2014 Linaro Ltd.
* Author: Rob Herring <robh@kernel.org>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/console.h>
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 98928f082d87..4c8b80f1c688 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Linaro Ltd.
* Author: Rob Herring <robh@kernel.org>
@@ -5,10 +6,6 @@
* Based on 8250 earlycon:
* (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 9fff25be87f9..d6b5e5463746 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#if defined(CONFIG_SERIAL_EFM32_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index 2f80bc7e44fb..24bf6bfb29b4 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/init.h>
#include <linux/console.h>
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index f0252184291e..1c4d3f387138 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale lpuart serial port driver
*
* Copyright 2012-2014 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#if defined(CONFIG_SERIAL_FSL_LPUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -970,9 +966,9 @@ static void lpuart_dma_rx_complete(void *arg)
lpuart_copy_rx_to_tty(sport);
}
-static void lpuart_timer_func(unsigned long data)
+static void lpuart_timer_func(struct timer_list *t)
{
- struct lpuart_port *sport = (struct lpuart_port *)data;
+ struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
lpuart_copy_rx_to_tty(sport);
}
@@ -1267,8 +1263,7 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
static void rx_dma_timer_init(struct lpuart_port *sport)
{
- setup_timer(&sport->lpuart_timer, lpuart_timer_func,
- (unsigned long)sport);
+ timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
add_timer(&sport->lpuart_timer);
}
@@ -1632,12 +1627,11 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
{
struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
unsigned long flags;
- unsigned long ctrl, old_ctrl, bd, modem;
+ unsigned long ctrl, old_ctrl, modem;
unsigned int baud;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
ctrl = old_ctrl = lpuart32_read(&sport->port, UARTCTRL);
- bd = lpuart32_read(&sport->port, UARTBAUD);
modem = lpuart32_read(&sport->port, UARTMODIR);
/*
* only support CS8 and CS7, and for CS7 must enable PE.
@@ -2212,6 +2206,24 @@ static int lpuart_probe(struct platform_device *pdev)
if (ret)
goto failed_attach_port;
+ of_get_rs485_mode(np, &sport->port.rs485);
+
+ if (sport->port.rs485.flags & SER_RS485_RX_DURING_TX) {
+ dev_err(&pdev->dev, "driver doesn't support RX during TX\n");
+ return -ENOSYS;
+ }
+
+ if (sport->port.rs485.delay_rts_before_send ||
+ sport->port.rs485.delay_rts_after_send) {
+ dev_err(&pdev->dev, "driver doesn't support RTS delays\n");
+ return -ENOSYS;
+ }
+
+ if (sport->port.rs485.flags & SER_RS485_ENABLED) {
+ sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
+ writeb(UARTMODEM_TXRTSE, sport->port.membase + UARTMODEM);
+ }
+
sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
if (!sport->dma_tx_chan)
dev_info(sport->port.dev, "DMA tx channel request failed, "
@@ -2222,12 +2234,6 @@ static int lpuart_probe(struct platform_device *pdev)
dev_info(sport->port.dev, "DMA rx channel request failed, "
"operating without rx DMA\n");
- if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time")) {
- sport->port.rs485.flags |= SER_RS485_ENABLED;
- sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
- writeb(UARTMODEM_TXRTSE, sport->port.membase + UARTMODEM);
- }
-
return 0;
failed_attach_port:
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index fe92d74f4ea5..ad374f7c476d 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* icom.c
*
@@ -6,21 +7,6 @@
* Serial device driver.
*
* Based on code from serial.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/tty/serial/icom.h b/drivers/tty/serial/icom.h
index c8029e0025c9..8a77e739b333 100644
--- a/drivers/tty/serial/icom.h
+++ b/drivers/tty/serial/icom.h
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* icom.h
*
* Copyright (C) 2001 Michael Anderson, IBM Corporation
*
* Serial device driver include file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index f190a84a0246..ffefd218761e 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/****************************************************************************
*
* Driver for the IFX 6x60 spi modem.
@@ -10,20 +11,6 @@
* Copyright (C) 2009, 2010 Intel Corp
* Russ Gorby <russ.gorby@intel.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA
- *
* Driver modified by Intel from Option gtm501l_spi.c
*
* Notes
@@ -276,9 +263,9 @@ static void mrdy_assert(struct ifx_spi_device *ifx_dev)
* The SPI has timed out: hang up the tty. Users will then see a hangup
* and error events.
*/
-static void ifx_spi_timeout(unsigned long arg)
+static void ifx_spi_timeout(struct timer_list *t)
{
- struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
+ struct ifx_spi_device *ifx_dev = from_timer(ifx_dev, t, spi_timer);
dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
tty_port_tty_hangup(&ifx_dev->tty_port, false);
@@ -1029,9 +1016,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
spin_lock_init(&ifx_dev->write_lock);
spin_lock_init(&ifx_dev->power_lock);
ifx_dev->power_status = 0;
- init_timer(&ifx_dev->spi_timer);
- ifx_dev->spi_timer.function = ifx_spi_timeout;
- ifx_dev->spi_timer.data = (unsigned long)ifx_dev;
+ timer_setup(&ifx_dev->spi_timer, ifx_spi_timeout, 0);
ifx_dev->modem = pl_data->modem_type;
ifx_dev->use_dma = pl_data->use_dma;
ifx_dev->max_hz = pl_data->max_hz;
diff --git a/drivers/tty/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
index 4fbddc297839..c5a2514212ff 100644
--- a/drivers/tty/serial/ifx6x60.h
+++ b/drivers/tty/serial/ifx6x60.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/****************************************************************************
*
* Driver for the IFX spi modem.
@@ -5,23 +6,6 @@
* Copyright (C) 2009, 2010 Intel Corp
* Jim Stanley <jim.stanley@intel.com>
*
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA
- *
- *
- *
*****************************************************************************/
#ifndef _IFX6X60_H
#define _IFX6X60_H
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index dfeff3951f93..e4b3d9123a03 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Motorola/Freescale IMX serial ports
*
@@ -5,16 +6,6 @@
*
* Author: Sascha Hauer <sascha@saschahauer.de>
* Copyright (C) 2004 Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -334,7 +325,8 @@ static void imx_port_rts_active(struct imx_port *sport, unsigned long *ucr2)
{
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
- mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS);
+ sport->port.mctrl |= TIOCM_RTS;
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl);
}
static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2)
@@ -342,7 +334,8 @@ static void imx_port_rts_inactive(struct imx_port *sport, unsigned long *ucr2)
*ucr2 &= ~UCR2_CTSC;
*ucr2 |= UCR2_CTS;
- mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS);
+ sport->port.mctrl &= ~TIOCM_RTS;
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl);
}
static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2)
@@ -714,8 +707,6 @@ static void imx_disable_rx_int(struct imx_port *sport)
{
unsigned long temp;
- sport->dma_is_rxing = 1;
-
/* disable the receiver ready and aging timer interrupts */
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_RRDYEN);
@@ -732,29 +723,6 @@ static void imx_disable_rx_int(struct imx_port *sport)
}
static void clear_rx_errors(struct imx_port *sport);
-static int start_rx_dma(struct imx_port *sport);
-/*
- * If the RXFIFO is filled with some data, and then we
- * arise a DMA operation to receive them.
- */
-static void imx_dma_rxint(struct imx_port *sport)
-{
- unsigned long temp;
- unsigned long flags;
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
- temp = readl(sport->port.membase + USR2);
- if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
-
- imx_disable_rx_int(sport);
-
- /* tell the DMA to receive the data. */
- start_rx_dma(sport);
- }
-
- spin_unlock_irqrestore(&sport->port.lock, flags);
-}
/*
* We have a modem side uart, so the meanings of RTS and CTS are inverted.
@@ -816,11 +784,8 @@ static irqreturn_t imx_int(int irq, void *dev_id)
sts = readl(sport->port.membase + USR1);
sts2 = readl(sport->port.membase + USR2);
- if (sts & (USR1_RRDY | USR1_AGTIM)) {
- if (sport->dma_is_enabled)
- imx_dma_rxint(sport);
- else
- imx_rxint(irq, dev_id);
+ if (!sport->dma_is_enabled && (sts & (USR1_RRDY | USR1_AGTIM))) {
+ imx_rxint(irq, dev_id);
ret = IRQ_HANDLED;
}
@@ -941,9 +906,9 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void imx_timeout(unsigned long data)
+static void imx_timeout(struct timer_list *t)
{
- struct imx_port *sport = (struct imx_port *)data;
+ struct imx_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
@@ -1074,6 +1039,7 @@ static int start_rx_dma(struct imx_port *sport)
desc->callback_param = sport;
dev_dbg(dev, "RX: prepare for the DMA.\n");
+ sport->dma_is_rxing = 1;
sport->rx_cookie = dmaengine_submit(desc);
dma_async_issue_pending(chan);
return 0;
@@ -1165,7 +1131,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
goto err;
}
- sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ sport->rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
if (!sport->rx_buf) {
ret = -ENOMEM;
goto err;
@@ -1207,10 +1173,6 @@ static void imx_enable_dma(struct imx_port *sport)
temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
writel(temp, sport->port.membase + UCR1);
- temp = readl(sport->port.membase + UCR2);
- temp |= UCR2_ATEN;
- writel(temp, sport->port.membase + UCR2);
-
imx_setup_ufcr(sport, TXTL_DMA, RXTL_DMA);
sport->dma_is_enabled = 1;
@@ -1411,15 +1373,19 @@ static void imx_flush_buffer(struct uart_port *port)
temp = readl(sport->port.membase + UCR1);
temp &= ~UCR1_TDMAEN;
writel(temp, sport->port.membase + UCR1);
- sport->dma_is_txing = false;
+ sport->dma_is_txing = 0;
}
/*
* According to the Reference Manual description of the UART SRST bit:
+ *
* "Reset the transmit and receive state machines,
* all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
- * and UTS[6-3]". As we don't need to restore the old values from
- * USR1, USR2, URXD, UTXD, only save/restore the other four registers
+ * and UTS[6-3]".
+ *
+ * We don't need to restore the old values from USR1, USR2, URXD and
+ * UTXD. UBRC is read only, so only save/restore the other three
+ * registers.
*/
ubir = readl(sport->port.membase + UBIR);
ubmr = readl(sport->port.membase + UBMR);
@@ -2051,6 +2017,8 @@ static int serial_imx_probe_dt(struct imx_port *sport,
if (of_get_property(np, "rts-gpios", NULL))
sport->have_rtsgpio = 1;
+ of_get_rs485_mode(np, &sport->port.rs485);
+
return 0;
}
#else
@@ -2112,12 +2080,9 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.fifosize = 32;
sport->port.ops = &imx_pops;
sport->port.rs485_config = imx_rs485_config;
- sport->port.rs485.flags =
- SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX;
+ sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
sport->port.flags = UPF_BOOT_AUTOCONF;
- init_timer(&sport->timer);
- sport->timer.function = imx_timeout;
- sport->timer.data = (unsigned long)sport;
+ timer_setup(&sport->timer, imx_timeout, 0);
sport->gpios = mctrl_gpio_init(&sport->port, 0);
if (IS_ERR(sport->gpios))
@@ -2346,11 +2311,39 @@ static int imx_serial_port_resume(struct device *dev)
return 0;
}
+static int imx_serial_port_freeze(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_port *sport = platform_get_drvdata(pdev);
+
+ uart_suspend_port(&imx_reg, &sport->port);
+
+ /* Needed to enable clock in suspend_noirq */
+ return clk_prepare(sport->clk_ipg);
+}
+
+static int imx_serial_port_thaw(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx_port *sport = platform_get_drvdata(pdev);
+
+ uart_resume_port(&imx_reg, &sport->port);
+
+ clk_unprepare(sport->clk_ipg);
+
+ return 0;
+}
+
static const struct dev_pm_ops imx_serial_port_pm_ops = {
.suspend_noirq = imx_serial_port_suspend_noirq,
.resume_noirq = imx_serial_port_resume_noirq,
+ .freeze_noirq = imx_serial_port_suspend_noirq,
+ .restore_noirq = imx_serial_port_resume_noirq,
.suspend = imx_serial_port_suspend,
.resume = imx_serial_port_resume,
+ .freeze = imx_serial_port_freeze,
+ .thaw = imx_serial_port_thaw,
+ .restore = imx_serial_port_thaw,
};
static struct platform_driver serial_imx_driver = {
diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
index 906ee770ff4a..d8a1cdd6a53d 100644
--- a/drivers/tty/serial/ioc3_serial.c
+++ b/drivers/tty/serial/ioc3_serial.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2005 Silicon Graphics, Inc. All Rights Reserved.
*/
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index 43d7d32eb150..db5b979e5a0c 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
* Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 7ddddb4c3844..8c810733df3d 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Zilog serial chips found on SGI workstations and
* servers. This driver could actually be made more generic.
diff --git a/drivers/tty/serial/ip22zilog.h b/drivers/tty/serial/ip22zilog.h
index a59a9a8341d2..b52801fe2d0d 100644
--- a/drivers/tty/serial/ip22zilog.h
+++ b/drivers/tty/serial/ip22zilog.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IP22_ZILOG_H
#define _IP22_ZILOG_H
diff --git a/drivers/tty/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
index 0b79b87df47d..7a128aaa3a66 100644
--- a/drivers/tty/serial/jsm/jsm.h
+++ b/drivers/tty/serial/jsm/jsm.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
* Copyright 2003 Digi International (www.digi.com)
*
* Copyright (C) 2004 IBM Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ibm.com>
diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c
index 4eb12a9cae76..c061a7b7bd23 100644
--- a/drivers/tty/serial/jsm/jsm_cls.c
+++ b/drivers/tty/serial/jsm/jsm_cls.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
*
* This is shared code between Digi's CVS archive and the
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 102d499814ac..592e51d8944e 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
* Copyright 2003 Digi International (www.digi.com)
*
* Copyright (C) 2004 IBM Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ibm.com>
diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
index c6fdd6369534..4718560b8fdc 100644
--- a/drivers/tty/serial/jsm/jsm_neo.c
+++ b/drivers/tty/serial/jsm/jsm_neo.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
* Copyright 2003 Digi International (www.digi.com)
*
* Copyright (C) 2004 IBM Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ibm.com>
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index ec7d8383900f..469927d37b41 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
* Copyright 2003 Digi International (www.digi.com)
*
* Copyright (C) 2004 IBM Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
* Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com>
* Ananda Venkatarman <mansarov@us.ibm.com>
@@ -36,7 +27,7 @@ static void jsm_carrier(struct jsm_channel *ch);
static inline int jsm_get_mstat(struct jsm_channel *ch)
{
unsigned char mstat;
- unsigned result;
+ int result;
jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "start\n");
@@ -124,6 +115,7 @@ static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void jsm_tty_write(struct uart_port *port)
{
struct jsm_channel *channel;
+
channel = container_of(port, struct jsm_channel, uart_port);
channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
}
@@ -275,14 +267,12 @@ static int jsm_tty_open(struct uart_port *port)
static void jsm_tty_close(struct uart_port *port)
{
struct jsm_board *bd;
- struct ktermios *ts;
struct jsm_channel *channel =
container_of(port, struct jsm_channel, uart_port);
jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "start\n");
bd = channel->ch_bd;
- ts = &port->state->port.tty->termios;
channel->ch_flags &= ~(CH_STOPI);
@@ -473,12 +463,11 @@ int jsm_uart_port_init(struct jsm_board *brd)
} else
set_bit(line, linemap);
brd->channels[i]->uart_port.line = line;
- rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port);
- if (rc){
+ rc = uart_add_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
+ if (rc) {
printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
return rc;
- }
- else
+ } else
printk(KERN_INFO "jsm: Port %d added\n", i);
}
@@ -541,7 +530,7 @@ void jsm_input(struct jsm_channel *ch)
tp = port->tty;
bd = ch->ch_bd;
- if(!bd)
+ if (!bd)
return;
spin_lock_irqsave(&ch->ch_lock, lock_flags);
@@ -781,7 +770,7 @@ void jsm_check_queue_flow_control(struct jsm_channel *ch)
if (qleft < 256) {
/* HWFLOW */
if (ch->ch_c_cflag & CRTSCTS) {
- if(!(ch->ch_flags & CH_RECEIVER_OFF)) {
+ if (!(ch->ch_flags & CH_RECEIVER_OFF)) {
bd_ops->disable_receiver(ch);
ch->ch_flags |= (CH_RECEIVER_OFF);
jsm_dbg(READ, &ch->ch_bd->pci_dev,
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 117df151627d..4029272891f9 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* KGDB NMI serial console
*
@@ -6,10 +7,6 @@
* Colin Cross <ccross@android.com>
* Copyright 2012 Linaro Ltd.
* Anton Vorontsov <anton.vorontsov@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -191,9 +188,9 @@ bool kgdb_nmi_poll_knock(void)
* The tasklet is cheap, it does not cause wakeups when reschedules itself,
* instead it waits for the next tick.
*/
-static void kgdb_nmi_tty_receiver(unsigned long data)
+static void kgdb_nmi_tty_receiver(struct timer_list *t)
{
- struct kgdb_nmi_tty_priv *priv = (void *)data;
+ struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
char ch;
priv->timer.expires = jiffies + (HZ/100);
@@ -244,7 +241,7 @@ static int kgdb_nmi_tty_install(struct tty_driver *drv, struct tty_struct *tty)
return -ENOMEM;
INIT_KFIFO(priv->fifo);
- setup_timer(&priv->timer, kgdb_nmi_tty_receiver, (unsigned long)priv);
+ timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0);
tty_port_init(&priv->port);
priv->port.ops = &kgdb_nmi_tty_port_ops;
tty->driver_data = priv;
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index a260cde743e2..b4ba2b1dab76 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Based on the same principle as kgdboe using the NETPOLL api, this
* driver uses a console polling api to implement a gdb serial inteface
@@ -6,10 +7,6 @@
* Maintainer: Jason Wessel <jason.wessel@windriver.com>
*
* 2007-2008 (c) Jason Wessel - Wind River Systems, Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
@@ -245,7 +242,8 @@ static void kgdboc_put_char(u8 chr)
kgdb_tty_line, chr);
}
-static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
+static int param_set_kgdboc_var(const char *kmessage,
+ const struct kernel_param *kp)
{
int len = strlen(kmessage);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 22df94f107e5..044128277248 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Copyright (C) 2004 Infineon IFAP DC COM CPE
* Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2007 John Crispin <john@phrozen.org>
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index cea57ff32c33..d1d73261575b 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* High Speed Serial Ports on NXP LPC32xx SoC
*
@@ -6,16 +7,6 @@
*
* Copyright (C) 2010 NXP Semiconductors
* Copyright (C) 2012 Roland Stigge
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 5b3bd9511993..7b83a8aab495 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* m32r_sio.c
*
@@ -8,11 +9,6 @@
*
* Copyright (C) 2001 Russell King.
* Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
@@ -511,9 +507,9 @@ static void serial_unlink_irq_chain(struct uart_sio_port *up)
/*
* This function is used to handle ports that do not have an interrupt.
*/
-static void m32r_sio_timeout(unsigned long data)
+static void m32r_sio_timeout(struct timer_list *t)
{
- struct uart_sio_port *up = (struct uart_sio_port *)data;
+ struct uart_sio_port *up = from_timer(up, t, timer);
unsigned int timeout;
unsigned int sts;
@@ -576,7 +572,6 @@ static int m32r_sio_startup(struct uart_port *port)
timeout = timeout > 6 ? (timeout / 2 - 2) : 1;
- up->timer.data = (unsigned long)up;
mod_timer(&up->timer, jiffies + timeout);
} else {
retval = serial_link_irq_chain(up);
@@ -907,8 +902,7 @@ static void __init m32r_sio_register_ports(struct uart_driver *drv)
up->port.line = i;
up->port.ops = &m32r_sio_pops;
- init_timer(&up->timer);
- up->timer.function = m32r_sio_timeout;
+ timer_setup(&up->timer, m32r_sio_timeout, 0);
uart_add_one_port(drv, &up->port);
}
diff --git a/drivers/tty/serial/m32r_sio_reg.h b/drivers/tty/serial/m32r_sio_reg.h
index 4671473793e3..6eed48828f94 100644
--- a/drivers/tty/serial/m32r_sio_reg.h
+++ b/drivers/tty/serial/m32r_sio_reg.h
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* m32r_sio_reg.h
*
* Copyright (C) 1992, 1994 by Theodore Ts'o.
* Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
*
- * Redistribution of this file is permitted under the terms of the GNU
- * Public License (GPL)
- *
* These are the UART port assignments, expressed as offsets from the base
* register. These assignments should hold for any serial port based on
* a 8250, 16450, or 16550(A).
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index ace82645b123..371569a0fd00 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
*
* Copyright (C) 2008 Christian Pellegrin <chripell@evolware.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
* Notes: the MAX3100 doesn't provide an interrupt on CTS so we have
* to use polling for flow control. TX empty IRQ is unusable, since
* writing conf clears FIFO buffer and we cannot have this interrupt
@@ -183,9 +178,9 @@ static void max3100_dowork(struct max3100_port *s)
queue_work(s->workqueue, &s->work);
}
-static void max3100_timeout(unsigned long data)
+static void max3100_timeout(struct timer_list *t)
{
- struct max3100_port *s = (struct max3100_port *)data;
+ struct max3100_port *s = from_timer(s, t, timer);
if (s->port.state) {
max3100_dowork(s);
@@ -263,7 +258,7 @@ static void max3100_work(struct work_struct *w)
struct max3100_port *s = container_of(w, struct max3100_port, work);
int rxchars;
u16 tx, rx;
- int conf, cconf, rts, crts;
+ int conf, cconf, crts;
struct circ_buf *xmit = &s->port.state->xmit;
dev_dbg(&s->spi->dev, "%s\n", __func__);
@@ -274,7 +269,6 @@ static void max3100_work(struct work_struct *w)
conf = s->conf;
cconf = s->conf_commit;
s->conf_commit = 0;
- rts = s->rts;
crts = s->rts_commit;
s->rts_commit = 0;
spin_unlock(&s->conf_lock);
@@ -436,7 +430,6 @@ max3100_set_termios(struct uart_port *port, struct ktermios *termios,
dev_dbg(&s->spi->dev, "%s\n", __func__);
cflag = termios->c_cflag;
- param_new = 0;
param_mask = 0;
baud = tty_termios_baud_rate(termios);
@@ -787,9 +780,7 @@ static int max3100_probe(struct spi_device *spi)
max3100s[i]->poll_time = 1;
max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
max3100s[i]->minor = i;
- init_timer(&max3100s[i]->timer);
- max3100s[i]->timer.function = max3100_timeout;
- max3100s[i]->timer.data = (unsigned long) max3100s[i];
+ timer_setup(&max3100s[i]->timer, max3100_timeout, 0);
dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
max3100s[i]->port.irq = max3100s[i]->irq;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 9dfedbe6c071..ecb6513a6505 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
@@ -6,11 +7,6 @@
* Based on max3100.c, by Christian Pellegrin <chripell@evolware.org>
* Based on max3110.c, by Feng Tang <feng.tang@intel.com>
* Based on max3107.c, by Aavamobile
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/bitops.h>
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 02eb32217685..7dbfb4cde124 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/****************************************************************************/
/*
* mcf.c -- Freescale ColdFire UART driver
*
* (C) Copyright 2003-2007, Greg Ungerer <gerg@uclinux.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/****************************************************************************/
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index e72ea61c70db..ef89534dd760 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MEN 16z135 High Speed UART
*
* Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
* Author: Johannes Thumshirn <johannes.thumshirn@men.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; version 2 of the License.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 07c0f98be3ac..daafe60175da 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -1,19 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Based on meson_uart.c, by AMLOGIC, INC.
*
* Copyright (C) 2014 Carlo Caione <carlo@caione.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
+#if defined(CONFIG_SERIAL_MESON_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -183,12 +178,12 @@ static void meson_receive_chars(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
char flag;
- u32 status, ch, mode;
+ u32 ostatus, status, ch, mode;
do {
flag = TTY_NORMAL;
port->icount.rx++;
- status = readl(port->membase + AML_UART_STATUS);
+ ostatus = status = readl(port->membase + AML_UART_STATUS);
if (status & AML_UART_ERR) {
if (status & AML_UART_TX_FIFO_WERR)
@@ -216,6 +211,16 @@ static void meson_receive_chars(struct uart_port *port)
ch = readl(port->membase + AML_UART_RFIFO);
ch &= 0xff;
+ if ((ostatus & AML_UART_FRAME_ERR) && (ch == 0)) {
+ port->icount.brk++;
+ flag = TTY_BREAK;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+
if ((status & port->ignore_status_mask) == 0)
tty_insert_flip_char(tport, ch, flag);
@@ -362,7 +367,7 @@ static void meson_uart_set_termios(struct uart_port *port,
writel(val, port->membase + AML_UART_CONTROL);
- baud = uart_get_baud_rate(port, termios, old, 9600, 4000000);
+ baud = uart_get_baud_rate(port, termios, old, 50, 4000000);
meson_uart_change_speed(port, baud);
port->read_status_mask = AML_UART_TX_FIFO_WERR;
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 791c4c74f6d6..3a75ee08d619 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs.
*
@@ -23,10 +24,6 @@
* Grant Likely <grant.likely@secretlab.ca>
* Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#undef DEBUG
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
index 492ec4b375a0..9f8f63719126 100644
--- a/drivers/tty/serial/mps2-uart.c
+++ b/drivers/tty/serial/mps2-uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MPS2 UART driver
*
@@ -5,10 +6,6 @@
*
* Author: Vladimir Murzin <vladimir.murzin@arm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* TODO: support for SysRq
*/
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index 67ffecc50e42..1f60d6fe4ff2 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
* GT64260, MV64340, MV64360, GT96100, ... ).
@@ -10,10 +11,7 @@
* taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
* by Russell King.
*
- * 2004 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * 2004 (c) MontaVista, Software, Inc.
*/
/*
* The MPSC interface is much like a typical network controller's interface.
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 1db79ee8a886..ee96cf0d0057 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for msm7k serial device and console
*
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/mux.c b/drivers/tty/serial/mux.c
index 2bff69e70e4b..00ce31e8d19a 100644
--- a/drivers/tty/serial/mux.c
+++ b/drivers/tty/serial/mux.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
** mux.c:
** serial driver for the Mux console found in some PA-RISC servers.
@@ -5,11 +6,6 @@
** (c) Copyright 2002 Ryan Bradetich
** (c) Copyright 2002 Hewlett-Packard Company
**
-** This program is free software; you can redistribute it and/or modify
-** it under the terms of the GNU General Public License as published by
-** the Free Software Foundation; either version 2 of the License, or
-** (at your option) any later version.
-**
** This Driver currently only supports the console (port 0) on the MUX.
** Additional work will be needed on this driver to enable the full
** functionality of the MUX.
@@ -375,7 +371,7 @@ static int mux_verify_port(struct uart_port *port, struct serial_struct *ser)
*
* This function periodically polls the Serial MUX to check for new data.
*/
-static void mux_poll(unsigned long unused)
+static void mux_poll(struct timer_list *unused)
{
int i;
@@ -576,8 +572,7 @@ static int __init mux_init(void)
if(port_cnt > 0) {
/* Start the Mux timer */
- init_timer(&mux_timer);
- mux_timer.function = mux_poll;
+ timer_setup(&mux_timer, mux_poll, 0);
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 45b57c294d13..a100e98259d7 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ***************************************************************************
* Marvell Armada-3700 Serial Driver
* Author: Wilson Ding <dingwei@marvell.com>
* Copyright (C) 2015 Marvell International Ltd.
* ***************************************************************************
-* This program is free software: you can redistribute it and/or modify it
-* under the terms of the GNU General Public License as published by the Free
-* Software Foundation, either version 2 of the License, or any later version.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program. If not, see <http://www.gnu.org/licenses/>.
-* ***************************************************************************
*/
#include <linux/clk.h>
@@ -38,46 +27,40 @@
#include <linux/tty_flip.h>
/* Register Map */
-#define UART_RBR 0x00
-#define RBR_BRK_DET BIT(15)
-#define RBR_FRM_ERR_DET BIT(14)
-#define RBR_PAR_ERR_DET BIT(13)
-#define RBR_OVR_ERR_DET BIT(12)
+#define UART_STD_RBR 0x00
+#define UART_EXT_RBR 0x18
-#define UART_TSH 0x04
+#define UART_STD_TSH 0x04
+#define UART_EXT_TSH 0x1C
-#define UART_CTRL 0x08
+#define UART_STD_CTRL1 0x08
+#define UART_EXT_CTRL1 0x04
#define CTRL_SOFT_RST BIT(31)
#define CTRL_TXFIFO_RST BIT(15)
#define CTRL_RXFIFO_RST BIT(14)
-#define CTRL_ST_MIRR_EN BIT(13)
-#define CTRL_LPBK_EN BIT(12)
#define CTRL_SND_BRK_SEQ BIT(11)
-#define CTRL_PAR_EN BIT(10)
-#define CTRL_TWO_STOP BIT(9)
-#define CTRL_TX_HFL_INT BIT(8)
-#define CTRL_RX_HFL_INT BIT(7)
-#define CTRL_TX_EMP_INT BIT(6)
-#define CTRL_TX_RDY_INT BIT(5)
-#define CTRL_RX_RDY_INT BIT(4)
#define CTRL_BRK_DET_INT BIT(3)
#define CTRL_FRM_ERR_INT BIT(2)
#define CTRL_PAR_ERR_INT BIT(1)
#define CTRL_OVR_ERR_INT BIT(0)
-#define CTRL_RX_INT (CTRL_RX_RDY_INT | CTRL_BRK_DET_INT |\
- CTRL_FRM_ERR_INT | CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
+#define CTRL_BRK_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \
+ CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
+
+#define UART_STD_CTRL2 UART_STD_CTRL1
+#define UART_EXT_CTRL2 0x20
+#define CTRL_STD_TX_RDY_INT BIT(5)
+#define CTRL_EXT_TX_RDY_INT BIT(6)
+#define CTRL_STD_RX_RDY_INT BIT(4)
+#define CTRL_EXT_RX_RDY_INT BIT(5)
-#define UART_STAT 0x0c
+#define UART_STAT 0x0C
#define STAT_TX_FIFO_EMP BIT(13)
-#define STAT_RX_FIFO_EMP BIT(12)
#define STAT_TX_FIFO_FUL BIT(11)
-#define STAT_TX_FIFO_HFL BIT(10)
-#define STAT_RX_TOGL BIT(9)
-#define STAT_RX_FIFO_FUL BIT(8)
-#define STAT_RX_FIFO_HFL BIT(7)
#define STAT_TX_EMP BIT(6)
-#define STAT_TX_RDY BIT(5)
-#define STAT_RX_RDY BIT(4)
+#define STAT_STD_TX_RDY BIT(5)
+#define STAT_EXT_TX_RDY BIT(15)
+#define STAT_STD_RX_RDY BIT(4)
+#define STAT_EXT_RX_RDY BIT(14)
#define STAT_BRK_DET BIT(3)
#define STAT_FRM_ERR BIT(2)
#define STAT_PAR_ERR BIT(1)
@@ -86,18 +69,73 @@
| STAT_PAR_ERR | STAT_OVR_ERR)
#define UART_BRDV 0x10
+#define BRDV_BAUD_MASK 0x3FF
-#define MVEBU_NR_UARTS 1
+#define MVEBU_NR_UARTS 2
#define MVEBU_UART_TYPE "mvebu-uart"
+#define DRIVER_NAME "mvebu_serial"
+
+enum {
+ /* Either there is only one summed IRQ... */
+ UART_IRQ_SUM = 0,
+ /* ...or there are two separate IRQ for RX and TX */
+ UART_RX_IRQ = 0,
+ UART_TX_IRQ,
+ UART_IRQ_COUNT
+};
-static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
+/* Diverging register offsets */
+struct uart_regs_layout {
+ unsigned int rbr;
+ unsigned int tsh;
+ unsigned int ctrl;
+ unsigned int intr;
+};
+
+/* Diverging flags */
+struct uart_flags {
+ unsigned int ctrl_tx_rdy_int;
+ unsigned int ctrl_rx_rdy_int;
+ unsigned int stat_tx_rdy;
+ unsigned int stat_rx_rdy;
+};
+
+/* Driver data, a structure for each UART port */
+struct mvebu_uart_driver_data {
+ bool is_ext;
+ struct uart_regs_layout regs;
+ struct uart_flags flags;
+};
-struct mvebu_uart_data {
+/* MVEBU UART driver structure */
+struct mvebu_uart {
struct uart_port *port;
- struct clk *clk;
+ struct clk *clk;
+ int irq[UART_IRQ_COUNT];
+ unsigned char __iomem *nb;
+ struct mvebu_uart_driver_data *data;
};
+static struct mvebu_uart *to_mvuart(struct uart_port *port)
+{
+ return (struct mvebu_uart *)port->private_data;
+}
+
+#define IS_EXTENDED(port) (to_mvuart(port)->data->is_ext)
+
+#define UART_RBR(port) (to_mvuart(port)->data->regs.rbr)
+#define UART_TSH(port) (to_mvuart(port)->data->regs.tsh)
+#define UART_CTRL(port) (to_mvuart(port)->data->regs.ctrl)
+#define UART_INTR(port) (to_mvuart(port)->data->regs.intr)
+
+#define CTRL_TX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_tx_rdy_int)
+#define CTRL_RX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_rx_rdy_int)
+#define STAT_TX_RDY(port) (to_mvuart(port)->data->flags.stat_tx_rdy)
+#define STAT_RX_RDY(port) (to_mvuart(port)->data->flags.stat_rx_rdy)
+
+static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
+
/* Core UART Driver Operations */
static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
{
@@ -127,26 +165,39 @@ static void mvebu_uart_set_mctrl(struct uart_port *port,
static void mvebu_uart_stop_tx(struct uart_port *port)
{
- unsigned int ctl = readl(port->membase + UART_CTRL);
+ unsigned int ctl = readl(port->membase + UART_INTR(port));
- ctl &= ~CTRL_TX_RDY_INT;
- writel(ctl, port->membase + UART_CTRL);
+ ctl &= ~CTRL_TX_RDY_INT(port);
+ writel(ctl, port->membase + UART_INTR(port));
}
static void mvebu_uart_start_tx(struct uart_port *port)
{
- unsigned int ctl = readl(port->membase + UART_CTRL);
+ unsigned int ctl;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) {
+ writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
- ctl |= CTRL_TX_RDY_INT;
- writel(ctl, port->membase + UART_CTRL);
+ ctl = readl(port->membase + UART_INTR(port));
+ ctl |= CTRL_TX_RDY_INT(port);
+ writel(ctl, port->membase + UART_INTR(port));
}
static void mvebu_uart_stop_rx(struct uart_port *port)
{
- unsigned int ctl = readl(port->membase + UART_CTRL);
+ unsigned int ctl;
+
+ ctl = readl(port->membase + UART_CTRL(port));
+ ctl &= ~CTRL_BRK_INT;
+ writel(ctl, port->membase + UART_CTRL(port));
- ctl &= ~CTRL_RX_INT;
- writel(ctl, port->membase + UART_CTRL);
+ ctl = readl(port->membase + UART_INTR(port));
+ ctl &= ~CTRL_RX_RDY_INT(port);
+ writel(ctl, port->membase + UART_INTR(port));
}
static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
@@ -155,12 +206,12 @@ static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
- ctl = readl(port->membase + UART_CTRL);
+ ctl = readl(port->membase + UART_CTRL(port));
if (brk == -1)
ctl |= CTRL_SND_BRK_SEQ;
else
ctl &= ~CTRL_SND_BRK_SEQ;
- writel(ctl, port->membase + UART_CTRL);
+ writel(ctl, port->membase + UART_CTRL(port));
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -171,8 +222,8 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
char flag = 0;
do {
- if (status & STAT_RX_RDY) {
- ch = readl(port->membase + UART_RBR);
+ if (status & STAT_RX_RDY(port)) {
+ ch = readl(port->membase + UART_RBR(port));
ch &= 0xff;
flag = TTY_NORMAL;
port->icount.rx++;
@@ -198,7 +249,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
goto ignore_char;
if (status & port->ignore_status_mask & STAT_PAR_ERR)
- status &= ~STAT_RX_RDY;
+ status &= ~STAT_RX_RDY(port);
status &= port->read_status_mask;
@@ -207,7 +258,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
status &= ~port->ignore_status_mask;
- if (status & STAT_RX_RDY)
+ if (status & STAT_RX_RDY(port))
tty_insert_flip_char(tport, ch, flag);
if (status & STAT_BRK_DET)
@@ -221,7 +272,7 @@ static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
ignore_char:
status = readl(port->membase + UART_STAT);
- } while (status & (STAT_RX_RDY | STAT_BRK_DET));
+ } while (status & (STAT_RX_RDY(port) | STAT_BRK_DET));
tty_flip_buffer_push(tport);
}
@@ -233,7 +284,7 @@ static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
unsigned int st;
if (port->x_char) {
- writel(port->x_char, port->membase + UART_TSH);
+ writel(port->x_char, port->membase + UART_TSH(port));
port->icount.tx++;
port->x_char = 0;
return;
@@ -245,7 +296,7 @@ static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
}
for (count = 0; count < port->fifosize; count++) {
- writel(xmit->buf[xmit->tail], port->membase + UART_TSH);
+ writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
@@ -269,10 +320,34 @@ static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
struct uart_port *port = (struct uart_port *)dev_id;
unsigned int st = readl(port->membase + UART_STAT);
- if (st & (STAT_RX_RDY | STAT_OVR_ERR | STAT_FRM_ERR | STAT_BRK_DET))
+ if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
+ STAT_BRK_DET))
mvebu_uart_rx_chars(port, st);
- if (st & STAT_TX_RDY)
+ if (st & STAT_TX_RDY(port))
+ mvebu_uart_tx_chars(port, st);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mvebu_uart_rx_isr(int irq, void *dev_id)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ unsigned int st = readl(port->membase + UART_STAT);
+
+ if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
+ STAT_BRK_DET))
+ mvebu_uart_rx_chars(port, st);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mvebu_uart_tx_isr(int irq, void *dev_id)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ unsigned int st = readl(port->membase + UART_STAT);
+
+ if (st & STAT_TX_RDY(port))
mvebu_uart_tx_chars(port, st);
return IRQ_HANDLED;
@@ -280,18 +355,57 @@ static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
static int mvebu_uart_startup(struct uart_port *port)
{
+ struct mvebu_uart *mvuart = to_mvuart(port);
+ unsigned int ctl;
int ret;
writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST,
- port->membase + UART_CTRL);
+ port->membase + UART_CTRL(port));
udelay(1);
- writel(CTRL_RX_INT, port->membase + UART_CTRL);
- ret = request_irq(port->irq, mvebu_uart_isr, port->irqflags, "serial",
- port);
- if (ret) {
- dev_err(port->dev, "failed to request irq\n");
- return ret;
+ /* Clear the error bits of state register before IRQ request */
+ ret = readl(port->membase + UART_STAT);
+ ret |= STAT_BRK_ERR;
+ writel(ret, port->membase + UART_STAT);
+
+ writel(CTRL_BRK_INT, port->membase + UART_CTRL(port));
+
+ ctl = readl(port->membase + UART_INTR(port));
+ ctl |= CTRL_RX_RDY_INT(port);
+ writel(ctl, port->membase + UART_INTR(port));
+
+ if (!mvuart->irq[UART_TX_IRQ]) {
+ /* Old bindings with just one interrupt (UART0 only) */
+ ret = devm_request_irq(port->dev, mvuart->irq[UART_IRQ_SUM],
+ mvebu_uart_isr, port->irqflags,
+ dev_name(port->dev), port);
+ if (ret) {
+ dev_err(port->dev, "unable to request IRQ %d\n",
+ mvuart->irq[UART_IRQ_SUM]);
+ return ret;
+ }
+ } else {
+ /* New bindings with an IRQ for RX and TX (both UART) */
+ ret = devm_request_irq(port->dev, mvuart->irq[UART_RX_IRQ],
+ mvebu_uart_rx_isr, port->irqflags,
+ dev_name(port->dev), port);
+ if (ret) {
+ dev_err(port->dev, "unable to request IRQ %d\n",
+ mvuart->irq[UART_RX_IRQ]);
+ return ret;
+ }
+
+ ret = devm_request_irq(port->dev, mvuart->irq[UART_TX_IRQ],
+ mvebu_uart_tx_isr, port->irqflags,
+ dev_name(port->dev),
+ port);
+ if (ret) {
+ dev_err(port->dev, "unable to request IRQ %d\n",
+ mvuart->irq[UART_TX_IRQ]);
+ devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ],
+ port);
+ return ret;
+ }
}
return 0;
@@ -299,9 +413,41 @@ static int mvebu_uart_startup(struct uart_port *port)
static void mvebu_uart_shutdown(struct uart_port *port)
{
- writel(0, port->membase + UART_CTRL);
+ struct mvebu_uart *mvuart = to_mvuart(port);
+
+ writel(0, port->membase + UART_INTR(port));
+
+ if (!mvuart->irq[UART_TX_IRQ]) {
+ devm_free_irq(port->dev, mvuart->irq[UART_IRQ_SUM], port);
+ } else {
+ devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], port);
+ devm_free_irq(port->dev, mvuart->irq[UART_TX_IRQ], port);
+ }
+}
+
+static int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
+{
+ struct mvebu_uart *mvuart = to_mvuart(port);
+ unsigned int baud_rate_div;
+ u32 brdv;
+
+ if (IS_ERR(mvuart->clk))
+ return -PTR_ERR(mvuart->clk);
+
+ /*
+ * The UART clock is divided by the value of the divisor to generate
+ * UCLK_OUT clock, which is 16 times faster than the baudrate.
+ * This prescaler can achieve all standard baudrates until 230400.
+ * Higher baudrates could be achieved for the extended UART by using the
+ * programmable oversampling stack (also called fractional divisor).
+ */
+ baud_rate_div = DIV_ROUND_UP(port->uartclk, baud * 16);
+ brdv = readl(port->membase + UART_BRDV);
+ brdv &= ~BRDV_BAUD_MASK;
+ brdv |= baud_rate_div;
+ writel(brdv, port->membase + UART_BRDV);
- free_irq(port->irq, port);
+ return 0;
}
static void mvebu_uart_set_termios(struct uart_port *port,
@@ -313,8 +459,8 @@ static void mvebu_uart_set_termios(struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
- port->read_status_mask = STAT_RX_RDY | STAT_OVR_ERR |
- STAT_TX_RDY | STAT_TX_FIFO_FUL;
+ port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
+ STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
if (termios->c_iflag & INPCK)
port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR;
@@ -325,13 +471,32 @@ static void mvebu_uart_set_termios(struct uart_port *port,
STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR;
if ((termios->c_cflag & CREAD) == 0)
- port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
-
- if (old)
- tty_termios_copy_hw(termios, old);
+ port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
+
+ /*
+ * Maximum achievable frequency with simple baudrate divisor is 230400.
+ * Since the error per bit frame would be of more than 15%, achieving
+ * higher frequencies would require to implement the fractional divisor
+ * feature.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 0, 230400);
+ if (mvebu_uart_baud_rate_set(port, baud)) {
+ /* No clock available, baudrate cannot be changed */
+ if (old)
+ baud = uart_get_baud_rate(port, old, NULL, 0, 230400);
+ } else {
+ tty_termios_encode_baud_rate(termios, baud, baud);
+ uart_update_timeout(port, termios->c_cflag, baud);
+ }
- baud = uart_get_baud_rate(port, termios, old, 0, 460800);
- uart_update_timeout(port, termios->c_cflag, baud);
+ /* Only the following flag changes are supported */
+ if (old) {
+ termios->c_iflag &= INPCK | IGNPAR;
+ termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
+ termios->c_cflag &= CREAD | CBAUD;
+ termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
+ termios->c_lflag = old->c_lflag;
+ }
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -356,10 +521,10 @@ static int mvebu_uart_get_poll_char(struct uart_port *port)
{
unsigned int st = readl(port->membase + UART_STAT);
- if (!(st & STAT_RX_RDY))
+ if (!(st & STAT_RX_RDY(port)))
return NO_POLL_CHAR;
- return readl(port->membase + UART_RBR);
+ return readl(port->membase + UART_RBR(port));
}
static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
@@ -375,7 +540,7 @@ static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
udelay(1);
}
- writel(c, port->membase + UART_TSH);
+ writel(c, port->membase + UART_TSH(port));
}
#endif
@@ -413,7 +578,8 @@ static void mvebu_uart_putc(struct uart_port *port, int c)
break;
}
- writel(c, port->membase + UART_TSH);
+ /* At early stage, DT is not parsed yet, only use UART0 */
+ writel(c, port->membase + UART_STD_TSH);
for (;;) {
st = readl(port->membase + UART_STAT);
@@ -458,7 +624,7 @@ static void wait_for_xmitr(struct uart_port *port)
static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
- writel(ch, port->membase + UART_TSH);
+ writel(ch, port->membase + UART_TSH(port));
}
static void mvebu_uart_console_write(struct console *co, const char *s,
@@ -466,7 +632,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
{
struct uart_port *port = &mvebu_uart_ports[co->index];
unsigned long flags;
- unsigned int ier;
+ unsigned int ier, intr, ctl;
int locked = 1;
if (oops_in_progress)
@@ -474,16 +640,23 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
else
spin_lock_irqsave(&port->lock, flags);
- ier = readl(port->membase + UART_CTRL) &
- (CTRL_RX_INT | CTRL_TX_RDY_INT);
- writel(0, port->membase + UART_CTRL);
+ ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
+ intr = readl(port->membase + UART_INTR(port)) &
+ (CTRL_RX_RDY_INT(port) | CTRL_TX_RDY_INT(port));
+ writel(0, port->membase + UART_CTRL(port));
+ writel(0, port->membase + UART_INTR(port));
uart_console_write(port, s, count, mvebu_uart_console_putchar);
wait_for_xmitr(port);
if (ier)
- writel(ier, port->membase + UART_CTRL);
+ writel(ier, port->membase + UART_CTRL(port));
+
+ if (intr) {
+ ctl = intr | readl(port->membase + UART_INTR(port));
+ writel(ctl, port->membase + UART_INTR(port));
+ }
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
@@ -538,7 +711,7 @@ console_initcall(mvebu_uart_console_init);
static struct uart_driver mvebu_uart_driver = {
.owner = THIS_MODULE,
- .driver_name = "mvebu_serial",
+ .driver_name = DRIVER_NAME,
.dev_name = "ttyMV",
.nr = MVEBU_NR_UARTS,
#ifdef CONFIG_SERIAL_MVEBU_CONSOLE
@@ -546,20 +719,39 @@ static struct uart_driver mvebu_uart_driver = {
#endif
};
+static const struct of_device_id mvebu_uart_of_match[];
+
+/* Counter to keep track of each UART port id when not using CONFIG_OF */
+static int uart_num_counter;
+
static int mvebu_uart_probe(struct platform_device *pdev)
{
struct resource *reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ const struct of_device_id *match = of_match_device(mvebu_uart_of_match,
+ &pdev->dev);
struct uart_port *port;
- struct mvebu_uart_data *data;
- int ret;
+ struct mvebu_uart *mvuart;
+ int ret, id, irq;
+
+ if (!reg) {
+ dev_err(&pdev->dev, "no registers defined\n");
+ return -EINVAL;
+ }
+
+ /* Assume that all UART ports have a DT alias or none has */
+ id = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (!pdev->dev.of_node || id < 0)
+ pdev->id = uart_num_counter++;
+ else
+ pdev->id = id;
- if (!reg || !irq) {
- dev_err(&pdev->dev, "no registers/irq defined\n");
+ if (pdev->id >= MVEBU_NR_UARTS) {
+ dev_err(&pdev->dev, "cannot have more than %d UART ports\n",
+ MVEBU_NR_UARTS);
return -EINVAL;
}
- port = &mvebu_uart_ports[0];
+ port = &mvebu_uart_ports[pdev->id];
spin_lock_init(&port->lock);
@@ -571,9 +763,14 @@ static int mvebu_uart_probe(struct platform_device *pdev)
port->fifosize = 32;
port->iotype = UPIO_MEM32;
port->flags = UPF_FIXED_PORT;
- port->line = 0; /* single port: force line number to 0 */
-
- port->irq = irq->start;
+ port->line = pdev->id;
+
+ /*
+ * IRQ number is not stored in this structure because we may have two of
+ * them per port (RX and TX). Instead, use the driver UART structure
+ * array so called ->irq[].
+ */
+ port->irq = 0;
port->irqflags = 0;
port->mapbase = reg->start;
@@ -581,15 +778,70 @@ static int mvebu_uart_probe(struct platform_device *pdev)
if (IS_ERR(port->membase))
return -PTR_ERR(port->membase);
- data = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart_data),
- GFP_KERNEL);
- if (!data)
+ mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
+ GFP_KERNEL);
+ if (!mvuart)
return -ENOMEM;
- data->port = port;
+ /* Get controller data depending on the compatible string */
+ mvuart->data = (struct mvebu_uart_driver_data *)match->data;
+ mvuart->port = port;
+
+ port->private_data = mvuart;
+ platform_set_drvdata(pdev, mvuart);
+
+ /* Get fixed clock frequency */
+ mvuart->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mvuart->clk)) {
+ if (PTR_ERR(mvuart->clk) == -EPROBE_DEFER)
+ return PTR_ERR(mvuart->clk);
+
+ if (IS_EXTENDED(port)) {
+ dev_err(&pdev->dev, "unable to get UART clock\n");
+ return PTR_ERR(mvuart->clk);
+ }
+ } else {
+ if (!clk_prepare_enable(mvuart->clk))
+ port->uartclk = clk_get_rate(mvuart->clk);
+ }
+
+ /* Manage interrupts */
+ if (platform_irq_count(pdev) == 1) {
+ /* Old bindings: no name on the single unamed UART0 IRQ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to get UART IRQ\n");
+ return irq;
+ }
+
+ mvuart->irq[UART_IRQ_SUM] = irq;
+ } else {
+ /*
+ * New bindings: named interrupts (RX, TX) for both UARTS,
+ * only make use of uart-rx and uart-tx interrupts, do not use
+ * uart-sum of UART0 port.
+ */
+ irq = platform_get_irq_byname(pdev, "uart-rx");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to get 'uart-rx' IRQ\n");
+ return irq;
+ }
+
+ mvuart->irq[UART_RX_IRQ] = irq;
- port->private_data = data;
- platform_set_drvdata(pdev, data);
+ irq = platform_get_irq_byname(pdev, "uart-tx");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to get 'uart-tx' IRQ\n");
+ return irq;
+ }
+
+ mvuart->irq[UART_TX_IRQ] = irq;
+ }
+
+ /* UART Soft Reset*/
+ writel(CTRL_SOFT_RST, port->membase + UART_CTRL(port));
+ udelay(1);
+ writel(0, port->membase + UART_CTRL(port));
ret = uart_add_one_port(&mvebu_uart_driver, port);
if (ret)
@@ -597,9 +849,40 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return 0;
}
+static struct mvebu_uart_driver_data uart_std_driver_data = {
+ .is_ext = false,
+ .regs.rbr = UART_STD_RBR,
+ .regs.tsh = UART_STD_TSH,
+ .regs.ctrl = UART_STD_CTRL1,
+ .regs.intr = UART_STD_CTRL2,
+ .flags.ctrl_tx_rdy_int = CTRL_STD_TX_RDY_INT,
+ .flags.ctrl_rx_rdy_int = CTRL_STD_RX_RDY_INT,
+ .flags.stat_tx_rdy = STAT_STD_TX_RDY,
+ .flags.stat_rx_rdy = STAT_STD_RX_RDY,
+};
+
+static struct mvebu_uart_driver_data uart_ext_driver_data = {
+ .is_ext = true,
+ .regs.rbr = UART_EXT_RBR,
+ .regs.tsh = UART_EXT_TSH,
+ .regs.ctrl = UART_EXT_CTRL1,
+ .regs.intr = UART_EXT_CTRL2,
+ .flags.ctrl_tx_rdy_int = CTRL_EXT_TX_RDY_INT,
+ .flags.ctrl_rx_rdy_int = CTRL_EXT_RX_RDY_INT,
+ .flags.stat_tx_rdy = STAT_EXT_TX_RDY,
+ .flags.stat_rx_rdy = STAT_EXT_RX_RDY,
+};
+
/* Match table for of_platform binding */
static const struct of_device_id mvebu_uart_of_match[] = {
- { .compatible = "marvell,armada-3700-uart", },
+ {
+ .compatible = "marvell,armada-3700-uart",
+ .data = (void *)&uart_std_driver_data,
+ },
+ {
+ .compatible = "marvell,armada-3700-uart-ext",
+ .data = (void *)&uart_ext_driver_data,
+ },
{}
};
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index be94246b6fcc..efb4fd3784ed 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Application UART driver for:
* Freescale STMP37XX/STMP378X
@@ -9,10 +10,6 @@
* Provide Alphascale ASM9260 support.
* Copyright 2008-2010 Freescale Semiconductor, Inc.
* Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
*/
#if defined(CONFIG_SERIAL_MXS_AUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index 207a0a032ed1..b3556863491f 100644
--- a/drivers/tty/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 7754053deeda..53d59e9b944a 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for OMAP-UART controller.
* Based on drivers/serial/8250.c
@@ -8,11 +9,6 @@
* Govindraj R <govindraj.raja@ti.com>
* Thara Gopinath <thara@ti.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Note: This driver is made separate from 8250 driver as we cannot
* over load 8250 driver with omap platform specific configuration for
* features like DMA, it makes easier to implement features like DMA and
@@ -610,7 +606,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
default:
break;
}
- } while (!(iir & UART_IIR_NO_INT) && max_count--);
+ } while (max_count--);
spin_unlock(&up->port.lock);
@@ -693,7 +689,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
up->efr |= UART_EFR_RTS;
else
- up->efr &= UART_EFR_RTS;
+ up->efr &= ~UART_EFR_RTS;
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, lcr);
@@ -1606,7 +1602,6 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
struct device_node *np)
{
struct serial_rs485 *rs485conf = &up->port.rs485;
- u32 rs485_delay[2];
enum of_gpio_flags flags;
int ret;
@@ -1637,17 +1632,7 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
up->rts_gpio = -EINVAL;
}
- if (of_property_read_u32_array(np, "rs485-rts-delay",
- rs485_delay, 2) == 0) {
- rs485conf->delay_rts_before_send = rs485_delay[0];
- rs485conf->delay_rts_after_send = rs485_delay[1];
- }
-
- if (of_property_read_bool(np, "rs485-rx-during-tx"))
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
-
- if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time"))
- rs485conf->flags |= SER_RS485_ENABLED;
+ of_get_rs485_mode(np, rs485conf);
return 0;
}
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index b9c859365334..29a6dc6a8d23 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Actions Semi Owl family serial console
*
@@ -5,19 +6,6 @@
* Author: Actions Semi, Inc.
*
* Copyright (c) 2016-2017 Andreas Färber
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d9123f995705..760d5dd0aada 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
*Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
- *
- *This program is free software; you can redistribute it and/or modify
- *it under the terms of the GNU General Public License as published by
- *the Free Software Foundation; version 2 of the License.
- *
- *This program is distributed in the hope that it will be useful,
- *but WITHOUT ANY WARRANTY; without even the implied warranty of
- *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- *GNU General Public License for more details.
- *
- *You should have received a copy of the GNU General Public License
- *along with this program; if not, write to the Free Software
- *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
#if defined(CONFIG_SERIAL_PCH_UART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 00a33eb859d3..fd80d999308d 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* PIC32 Integrated Serial Driver.
*
@@ -5,8 +6,6 @@
*
* Authors:
* Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
- *
- * Licensed under GPLv2 or later.
*/
#include <linux/kernel.h>
diff --git a/drivers/tty/serial/pic32_uart.h b/drivers/tty/serial/pic32_uart.h
index ec379da55ebb..2f2b56927dc6 100644
--- a/drivers/tty/serial/pic32_uart.h
+++ b/drivers/tty/serial/pic32_uart.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* PIC32 Integrated Serial Driver.
*
@@ -5,8 +6,6 @@
*
* Authors:
* Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
- *
- * Licensed under GPLv2 or later.
*/
#ifndef __DT_PIC32_UART_H__
#define __DT_PIC32_UART_H__
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 6ccdd018fb45..3d21790d961e 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for PowerMac Z85c30 based ESCC cell found in the
* "macio" ASICs of various PowerMac models
@@ -13,20 +14,6 @@
* and once done, I expect that driver to remain fairly stable in
* the long term, unless we change the driver model again...
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* 2004-08-06 Harald Welte <laforge@gnumonks.org>
* - Enable BREAK interrupt
* - Add support for sysreq
diff --git a/drivers/tty/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index 3483242ee3e0..bb874e76810e 100644
--- a/drivers/tty/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PMAC_ZILOG_H__
#define __PMAC_ZILOG_H__
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index dab2668d3879..223a9499104e 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UART driver for PNX8XXX SoCs
*
@@ -7,11 +8,6 @@
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of
- * any kind, whether express or implied.
- *
*/
#if defined(CONFIG_SERIAL_PNX8XXX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -107,9 +103,9 @@ static void pnx8xxx_mctrl_check(struct pnx8xxx_port *sport)
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void pnx8xxx_timeout(unsigned long data)
+static void pnx8xxx_timeout(struct timer_list *t)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data;
+ struct pnx8xxx_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
@@ -666,9 +662,7 @@ static void __init pnx8xxx_init_ports(void)
first = 0;
for (i = 0; i < NR_PORTS; i++) {
- init_timer(&pnx8xxx_ports[i].timer);
- pnx8xxx_ports[i].timer.function = pnx8xxx_timeout;
- pnx8xxx_ports[i].timer.data = (unsigned long)&pnx8xxx_ports[i];
+ timer_setup(&pnx8xxx_ports[i].timer, pnx8xxx_timeout, 0);
pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
}
}
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 905631df1f8b..baf552944d56 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Based on drivers/serial/8250.c by Russell King.
*
@@ -5,11 +6,6 @@
* Created: Feb 20, 2003
* Copyright: (C) 2003 Monta Vista Software, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Note 1: This driver is made separate from the already too overloaded
* 8250.c because it needs some kirks of its own and that'll make it
* easier to add DMA support.
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 056f91b3a4ca..520b43b23543 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Comtrol RocketPort EXPRESS/INFINITY cards
*
@@ -10,10 +11,6 @@
*
* rocketport_infinity_express-linux-1.20.tar.gz
* Copyright (C) 2004-2011 Comtrol, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
*/
#include <linux/bitops.h>
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index fd3d1329d48c..a399772be3fc 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SA11x0 serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -123,9 +110,9 @@ static void sa1100_mctrl_check(struct sa1100_port *sport)
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void sa1100_timeout(unsigned long data)
+static void sa1100_timeout(struct timer_list *t)
{
- struct sa1100_port *sport = (struct sa1100_port *)data;
+ struct sa1100_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
@@ -640,9 +627,7 @@ static void __init sa1100_init_ports(void)
sa1100_ports[i].port.fifosize = 8;
sa1100_ports[i].port.line = i;
sa1100_ports[i].port.iotype = UPIO_MEM;
- init_timer(&sa1100_ports[i].timer);
- sa1100_ports[i].timer.function = sa1100_timeout;
- sa1100_ports[i].timer.data = (unsigned long)&sa1100_ports[i];
+ timer_setup(&sa1100_ports[i].timer, sa1100_timeout, 0);
}
/*
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 8aca18c4cdea..f9fecc5ed0ce 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver core for Samsung SoC onboard UARTs.
*
* Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/* Hote on 2410 error handling
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index 965199b6c16f..f93022113f59 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#ifndef __SAMSUNG_H
#define __SAMSUNG_H
@@ -6,10 +7,6 @@
*
* Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 041625cc24bb..329aced26bd8 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Support for the asynchronous serial interface (DUART) included
* in the BCM1250 and derived System-On-a-Chip (SOC) devices.
@@ -9,11 +10,6 @@
*
* Copyright (c) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* References:
*
* "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index ca54ce074a5f..65792a3539d0 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* SC16IS7xx tty serial driver - Copyright (C) 2014 GridPoint
* Author: Jon Ringle <jringle@gridpoint.com>
*
* Based on max310x.c, by Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index b9c7a904c1ea..d6ae3086c2a2 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* NXP (Philips) SCC+++(SCN+++) serial driver
*
* Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
*
* Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#if defined(CONFIG_SERIAL_SCCNXP_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -465,9 +461,9 @@ static void sccnxp_handle_events(struct sccnxp_port *s)
} while (1);
}
-static void sccnxp_timer(unsigned long data)
+static void sccnxp_timer(struct timer_list *t)
{
- struct sccnxp_port *s = (struct sccnxp_port *)data;
+ struct sccnxp_port *s = from_timer(s, t, timer);
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
@@ -987,8 +983,7 @@ static int sccnxp_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Unable to reguest IRQ %i\n", s->irq);
} else {
- init_timer(&s->timer);
- setup_timer(&s->timer, sccnxp_timer, (unsigned long)s);
+ timer_setup(&s->timer, sccnxp_timer, 0);
mod_timer(&s->timer, jiffies +
usecs_to_jiffies(s->pdata.poll_time_us));
return 0;
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index cf9b736f26f8..af2a29cfbbe9 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* serial_tegra.c
*
@@ -6,18 +7,6 @@
* Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 3a14cccbd7ff..854995e1cae7 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver core for serial ports
*
@@ -5,20 +6,6 @@
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/tty.h>
@@ -1482,10 +1469,10 @@ out:
static void uart_close(struct tty_struct *tty, struct file *filp)
{
struct uart_state *state = tty->driver_data;
- struct tty_port *port;
if (!state) {
struct uart_driver *drv = tty->driver->driver_state;
+ struct tty_port *port;
state = drv->state + tty->index;
port = &state->port;
@@ -1495,7 +1482,6 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
return;
}
- port = &state->port;
pr_debug("uart_close(%d) called\n", tty->index);
tty_port_close(tty->port, tty, filp);
@@ -3026,5 +3012,41 @@ EXPORT_SYMBOL(uart_resume_port);
EXPORT_SYMBOL(uart_add_one_port);
EXPORT_SYMBOL(uart_remove_one_port);
+/**
+ * of_get_rs485_mode() - Implement parsing rs485 properties
+ * @np: uart node
+ * @rs485conf: output parameter
+ *
+ * This function implements the device tree binding described in
+ * Documentation/devicetree/bindings/serial/rs485.txt.
+ */
+void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf)
+{
+ u32 rs485_delay[2];
+ int ret;
+
+ ret = of_property_read_u32_array(np, "rs485-rts-delay", rs485_delay, 2);
+ if (!ret) {
+ rs485conf->delay_rts_before_send = rs485_delay[0];
+ rs485conf->delay_rts_after_send = rs485_delay[1];
+ } else {
+ rs485conf->delay_rts_before_send = 0;
+ rs485conf->delay_rts_after_send = 0;
+ }
+
+ /*
+ * clear full-duplex and enabled flags to get to a defined state with
+ * the two following properties.
+ */
+ rs485conf->flags &= ~(SER_RS485_RX_DURING_TX | SER_RS485_ENABLED);
+
+ if (of_property_read_bool(np, "rs485-rx-during-tx"))
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
+ if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time"))
+ rs485conf->flags |= SER_RS485_ENABLED;
+}
+EXPORT_SYMBOL_GPL(of_get_rs485_mode);
+
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index 57f152394af5..b461d791188c 100644
--- a/drivers/tty/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for KS8695 serial ports
*
* Based on drivers/serial/serial_amba.c, by Kam Lee.
*
* Copyright 2002-2005 Micrel Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/module.h>
#include <linux/tty.h>
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index d2da6aa7f27d..1c06325beaca 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Helpers for controlling modem lines via GPIO
*
* Copyright (C) 2014 Paratronic S.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/err.h>
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index fa000bcff217..b7d3cca48ede 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -1,18 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Helpers for controlling modem lines via GPIO
*
* Copyright (C) 2014 Paratronic S.A.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __SERIAL_MCTRL_GPIO__
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index f80fead6c5fc..1b4008d022bf 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Derived from many drivers using generic_serial interface,
* especially serial_tx3912.c by Steven J. Hill and r39xx_serial.c
@@ -8,10 +9,6 @@
* Copyright (C) 2001 Steven J. Hill (sjhill@realitydiluted.com)
* Copyright (C) 2000-2002 Toshiba Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Serial driver for TX3927/TX4927/TX4925/TX4938 internal SIO controller
*/
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 784dd42002ea..d9f399c4e90c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
*
@@ -13,10 +14,6 @@
* Modified to support SecureEdge. David McCullough (2002)
* Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
* Removed SH7300 support (Jul 2007).
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
@@ -40,6 +37,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
@@ -152,6 +150,7 @@ struct sci_port {
int rx_trigger;
struct timer_list rx_fifo_timer;
int rx_fifo_timeout;
+ u16 hscif_tot;
bool has_rtscts;
bool autorts;
@@ -1059,9 +1058,9 @@ static int scif_rtrg_enabled(struct uart_port *port)
(SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
}
-static void rx_fifo_timer_fn(unsigned long arg)
+static void rx_fifo_timer_fn(struct timer_list *t)
{
- struct sci_port *s = (struct sci_port *)arg;
+ struct sci_port *s = from_timer(s, t, rx_fifo_timer);
struct uart_port *port = &s->port;
dev_dbg(port->dev, "Rx timed out\n");
@@ -1107,8 +1106,14 @@ static ssize_t rx_fifo_timeout_show(struct device *dev,
{
struct uart_port *port = dev_get_drvdata(dev);
struct sci_port *sci = to_sci_port(port);
+ int v;
+
+ if (port->type == PORT_HSCIF)
+ v = sci->hscif_tot >> HSSCR_TOT_SHIFT;
+ else
+ v = sci->rx_fifo_timeout;
- return sprintf(buf, "%d\n", sci->rx_fifo_timeout);
+ return sprintf(buf, "%d\n", v);
}
static ssize_t rx_fifo_timeout_store(struct device *dev,
@@ -1124,11 +1129,18 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
ret = kstrtol(buf, 0, &r);
if (ret)
return ret;
- sci->rx_fifo_timeout = r;
- scif_set_rtrg(port, 1);
- if (r > 0)
- setup_timer(&sci->rx_fifo_timer, rx_fifo_timer_fn,
- (unsigned long)sci);
+
+ if (port->type == PORT_HSCIF) {
+ if (r < 0 || r > 3)
+ return -EINVAL;
+ sci->hscif_tot = r << HSSCR_TOT_SHIFT;
+ } else {
+ sci->rx_fifo_timeout = r;
+ scif_set_rtrg(port, 1);
+ if (r > 0)
+ timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0);
+ }
+
return count;
}
@@ -1210,8 +1222,11 @@ static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
sg_dma_address(&s->sg_rx[0]));
dma_release_channel(chan);
- if (enable_pio)
+ if (enable_pio) {
+ spin_lock_irqsave(&port->lock, flags);
sci_start_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
}
static void sci_dma_rx_complete(void *arg)
@@ -1278,8 +1293,11 @@ static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE,
DMA_TO_DEVICE);
dma_release_channel(chan);
- if (enable_pio)
+ if (enable_pio) {
+ spin_lock_irqsave(&port->lock, flags);
sci_start_tx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
}
static void sci_submit_rx(struct sci_port *s)
@@ -1373,9 +1391,9 @@ static void work_fn_tx(struct work_struct *work)
dma_async_issue_pending(chan);
}
-static void rx_timer_fn(unsigned long arg)
+static void rx_timer_fn(struct timer_list *t)
{
- struct sci_port *s = (struct sci_port *)arg;
+ struct sci_port *s = from_timer(s, t, rx_timer);
struct dma_chan *chan = s->chan_rx;
struct uart_port *port = &s->port;
struct dma_tx_state state;
@@ -1491,6 +1509,14 @@ static void sci_request_dma(struct uart_port *port)
return;
s->cookie_tx = -EINVAL;
+
+ /*
+ * Don't request a dma channel if no channel was specified
+ * in the device tree.
+ */
+ if (!of_find_property(port->dev->of_node, "dmas", NULL))
+ return;
+
chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV);
dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
if (chan) {
@@ -1545,7 +1571,7 @@ static void sci_request_dma(struct uart_port *port)
dma += s->buf_len_rx;
}
- setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
+ timer_setup(&s->rx_timer, rx_timer_fn, 0);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
sci_submit_rx(s);
@@ -1980,6 +2006,7 @@ static void sci_enable_ms(struct uart_port *port)
static void sci_break_ctl(struct uart_port *port, int break_state)
{
unsigned short scscr, scsptr;
+ unsigned long flags;
/* check wheter the port has SCSPTR */
if (!sci_getreg(port, SCSPTR)->size) {
@@ -1990,6 +2017,7 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
return;
}
+ spin_lock_irqsave(&port->lock, flags);
scsptr = serial_port_in(port, SCSPTR);
scscr = serial_port_in(port, SCSCR);
@@ -2003,6 +2031,7 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
serial_port_out(port, SCSPTR, scsptr);
serial_port_out(port, SCSCR, scscr);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static int sci_startup(struct uart_port *port)
@@ -2037,9 +2066,13 @@ static void sci_shutdown(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
sci_stop_rx(port);
sci_stop_tx(port);
- /* Stop RX and TX, disable related interrupts, keep clock source */
+ /*
+ * Stop RX and TX, disable related interrupts, keep clock source
+ * and HSCIF TOT bits
+ */
scr = serial_port_in(port, SCSCR);
- serial_port_out(port, SCSCR, scr & (SCSCR_CKE1 | SCSCR_CKE0));
+ serial_port_out(port, SCSCR, scr &
+ (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
spin_unlock_irqrestore(&port->lock, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -2186,7 +2219,7 @@ static void sci_reset(struct uart_port *port)
unsigned int status;
struct sci_port *s = to_sci_port(port);
- serial_port_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
+ serial_port_out(port, SCSCR, s->hscif_tot); /* TE=0, RE=0, CKE1=0 */
reg = sci_getreg(port, SCFCR);
if (reg->size)
@@ -2204,8 +2237,7 @@ static void sci_reset(struct uart_port *port)
if (s->rx_trigger > 1) {
if (s->rx_fifo_timeout) {
scif_set_rtrg(port, 1);
- setup_timer(&s->rx_fifo_timer, rx_fifo_timer_fn,
- (unsigned long)s);
+ timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0);
} else {
if (port->type == PORT_SCIFA ||
port->type == PORT_SCIFB)
@@ -2227,6 +2259,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
int min_err = INT_MAX, err;
unsigned long max_freq = 0;
int best_clk = -1;
+ unsigned long flags;
if ((termios->c_cflag & CSIZE) == CS7)
smr_val |= SCSMR_CHR;
@@ -2336,6 +2369,8 @@ done:
serial_port_out(port, SCCKS, sccks);
}
+ spin_lock_irqsave(&port->lock, flags);
+
sci_reset(port);
uart_update_timeout(port, termios->c_cflag, baud);
@@ -2353,10 +2388,7 @@ done:
case 27: smr_val |= SCSMR_SRC_27; break;
}
smr_val |= cks;
- dev_dbg(port->dev,
- "SCR 0x%x SMR 0x%x BRR %u CKS 0x%x DL %u SRR %u\n",
- scr_val, smr_val, brr, sccks, dl, srr);
- serial_port_out(port, SCSCR, scr_val);
+ serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
serial_port_out(port, SCSMR, smr_val);
serial_port_out(port, SCBRR, brr);
if (sci_getreg(port, HSSRR)->size)
@@ -2369,8 +2401,7 @@ done:
scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0);
smr_val |= serial_port_in(port, SCSMR) &
(SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS);
- dev_dbg(port->dev, "SCR 0x%x SMR 0x%x\n", scr_val, smr_val);
- serial_port_out(port, SCSCR, scr_val);
+ serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
serial_port_out(port, SCSMR, smr_val);
}
@@ -2406,8 +2437,7 @@ done:
scr_val |= SCSCR_RE | SCSCR_TE |
(s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0));
- dev_dbg(port->dev, "SCSCR 0x%x\n", scr_val);
- serial_port_out(port, SCSCR, scr_val);
+ serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
if ((srr + 1 == 5) &&
(port->type == PORT_SCIFA || port->type == PORT_SCIFB)) {
/*
@@ -2453,8 +2483,6 @@ done:
s->rx_frame = (100 * bits * HZ) / (baud / 10);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
s->rx_timeout = DIV_ROUND_UP(s->buf_len_rx * 2 * s->rx_frame, 1000);
- dev_dbg(port->dev, "DMA Rx t-out %ums, tty t-out %u jiffies\n",
- s->rx_timeout * 1000 / HZ, port->timeout);
if (s->rx_timeout < msecs_to_jiffies(20))
s->rx_timeout = msecs_to_jiffies(20);
#endif
@@ -2462,6 +2490,8 @@ done:
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+
sci_port_disable(s);
if (UART_ENABLE_MS(port, termios->c_cflag))
@@ -2773,6 +2803,7 @@ static int sci_init_single(struct platform_device *dev,
}
sci_port->rx_fifo_timeout = 0;
+ sci_port->hscif_tot = 0;
/* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
* match the SoC datasheet, this should be investigated. Let platform
@@ -2860,7 +2891,7 @@ static void serial_console_write(struct console *co, const char *s,
ctrl_temp = SCSCR_RE | SCSCR_TE |
(sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) |
(ctrl & (SCSCR_CKE1 | SCSCR_CKE0));
- serial_port_out(port, SCSCR, ctrl_temp);
+ serial_port_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot);
uart_console_write(port, s, count, serial_console_putchar);
@@ -2988,7 +3019,8 @@ static int sci_remove(struct platform_device *dev)
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_trigger.attr);
}
- if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB) {
+ if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
+ port->port.type == PORT_HSCIF) {
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_timeout.attr);
}
@@ -3044,17 +3076,15 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
unsigned int *dev_id)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match;
struct plat_sci_port *p;
struct sci_port *sp;
+ const void *data;
int id;
if (!IS_ENABLED(CONFIG_OF) || !np)
return NULL;
- match = of_match_node(of_sci_match, np);
- if (!match)
- return NULL;
+ data = of_device_get_match_data(&pdev->dev);
p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
if (!p)
@@ -3070,8 +3100,8 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
sp = &sci_ports[id];
*dev_id = id;
- p->type = SCI_OF_TYPE(match->data);
- p->regtype = SCI_OF_REGTYPE(match->data);
+ p->type = SCI_OF_TYPE(data);
+ p->regtype = SCI_OF_REGTYPE(data);
sp->has_rtscts = of_property_read_bool(np, "uart-has-rtscts");
@@ -3173,7 +3203,8 @@ static int sci_probe(struct platform_device *dev)
if (ret)
return ret;
}
- if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB) {
+ if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB ||
+ sp->port.type == PORT_HSCIF) {
ret = sysfs_create_file(&dev->dev.kobj,
&dev_attr_rx_fifo_timeout.attr);
if (ret) {
@@ -3244,7 +3275,7 @@ early_platform_init_buffer("earlyprintk", &sci_driver,
early_serial_buf, ARRAY_SIZE(early_serial_buf));
#endif
#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
-static struct __init plat_sci_port port_cfg;
+static struct plat_sci_port port_cfg __initdata;
static int __init early_console_setup(struct earlycon_device *device,
int type)
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index 971b2ab088d8..a5f792fd48d9 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/bitops.h>
#include <linux/serial_core.h>
#include <linux/io.h>
@@ -62,6 +63,9 @@ enum {
#define SCSCR_TDRQE BIT(15) /* Tx Data Transfer Request Enable */
#define SCSCR_RDRQE BIT(14) /* Rx Data Transfer Request Enable */
+/* Serial Control Register, HSCIF-only bits */
+#define HSSCR_TOT_SHIFT 14
+
/* SCxSR (Serial Status Register) on SCI */
#define SCI_TDRE BIT(7) /* Transmit Data Register Empty */
#define SCI_RDRF BIT(6) /* Receive Data Register Full */
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 684cb8dd8050..9925b00a9777 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for CSR SiRFprimaII onboard UARTs.
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
*/
#include <linux/module.h>
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 43756bd9111c..004ca684d3ae 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Drivers for CSR SiRFprimaII onboard UARTs.
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- *
- * Licensed under GPLv2 or later.
*/
#include <linux/bitops.h>
#include <linux/log2.h>
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index 9e0e6586c698..42b9aded4eb1 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -8,25 +8,6 @@
*
* Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
* Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane,
* Mountain View, CA 94043, or:
*
@@ -631,9 +612,9 @@ static irqreturn_t sn_sal_interrupt(int irq, void *dev_id)
* Obviously not used in interrupt mode
*
*/
-static void sn_sal_timer_poll(unsigned long data)
+static void sn_sal_timer_poll(struct timer_list *t)
{
- struct sn_cons_port *port = (struct sn_cons_port *)data;
+ struct sn_cons_port *port = from_timer(port, t, sc_timer);
unsigned long flags;
if (!port)
@@ -687,9 +668,7 @@ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
* timer to poll for input and push data from the console
* buffer.
*/
- init_timer(&port->sc_timer);
- port->sc_timer.function = sn_sal_timer_poll;
- port->sc_timer.data = (unsigned long)port;
+ timer_setup(&port->sc_timer, sn_sal_timer_poll, 0);
if (IS_RUNNING_ON_SIMULATOR())
port->sc_interrupt_timeout = 6;
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index e902494ebbd5..828f1143859c 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012-2015 Spreadtrum Communications Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#if defined(CONFIG_SERIAL_SPRD_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index b313a792b149..c763253514e9 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* st-asc.c: ST Asynchronous serial controller (ASC) driver
*
* Copyright (C) 2003-2013 STMicroelectronics (R&D) Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#if defined(CONFIG_SERIAL_ST_ASC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 03a583264d9e..0fa735b60f2d 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics SA 2017
* Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
* Gerald Baeza <gerald.baeza@st.com>
- * License terms: GNU General Public License (GPL), version 2
*
* Inspired by st-asc.c from STMicroelectronics (c)
*/
@@ -736,11 +736,8 @@ static struct stm32_port *stm32_of_get_stm32_port(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id stm32_match[] = {
- { .compatible = "st,stm32-usart", .data = &stm32f4_info},
{ .compatible = "st,stm32-uart", .data = &stm32f4_info},
- { .compatible = "st,stm32f7-usart", .data = &stm32f7_info},
{ .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
- { .compatible = "st,stm32h7-usart", .data = &stm32h7_info},
{ .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
{},
};
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index ffc0c5285e51..8a5ff54d0f42 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -1,9 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Maxime Coquelin 2015
* Copyright (C) STMicroelectronics SA 2017
* Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
* Gerald Baeza <gerald_baeza@yahoo.fr>
- * License terms: GNU General Public License (GPL), version 2
*/
#define DRIVER_NAME "stm32-usart"
diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
index 127472bd6a7c..70a4ea4eaa6e 100644
--- a/drivers/tty/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* suncore.c
*
* Common SUN serial routines. Based entirely
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 46e46894e918..63e34d868de8 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunhv.c: Serial driver for SUN4V hypervisor console.
*
* Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 653a076d89d3..b93d0225f8c9 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunsab.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
diff --git a/drivers/tty/serial/sunsab.h b/drivers/tty/serial/sunsab.h
index b78e1f7b8050..1644031aacda 100644
--- a/drivers/tty/serial/sunsab.h
+++ b/drivers/tty/serial/sunsab.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* sunsab.h: Register Definitions for the Siemens SAB82532 DUSCC
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 95d34d7565c9..6cf3e9b0728f 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* su.c: Small serial driver for keyboard/mouse interface on sparc32/PCI
*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 252cea49c068..bc7af8b08a72 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunzilog.c: Zilog serial driver for Sparc systems.
*
* Driver for Zilog serial chips found on Sun workstations and
diff --git a/drivers/tty/serial/sunzilog.h b/drivers/tty/serial/sunzilog.h
index 5dec7b47cc38..6d6764f0ac98 100644
--- a/drivers/tty/serial/sunzilog.h
+++ b/drivers/tty/serial/sunzilog.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SUNZILOG_H
#define _SUNZILOG_H
diff --git a/drivers/tty/serial/tilegx.c b/drivers/tty/serial/tilegx.c
index 453215f5420d..f0a3ae57f881 100644
--- a/drivers/tty/serial/tilegx.c
+++ b/drivers/tty/serial/tilegx.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
* TILEGx UART driver.
*/
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 5da7fe40e391..19d38b504e27 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* timbuart.c timberdale FPGA UART driver
* Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
diff --git a/drivers/tty/serial/timbuart.h b/drivers/tty/serial/timbuart.h
index 7e566766bc43..fb00b172117d 100644
--- a/drivers/tty/serial/timbuart.h
+++ b/drivers/tty/serial/timbuart.h
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* timbuart.c timberdale FPGA GPIO driver
* Copyright (c) 2009 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index c9b8d702dadc..c47db7826189 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uartlite.c: Serial driver for Xilinx uartlite serial controller
*
* Copyright (C) 2006 Peter Korsgaard <jacmet@sunsite.dk>
* Copyright (C) 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/platform_device.h>
@@ -739,7 +736,7 @@ static int __init ulite_init(void)
err_plat:
uart_unregister_driver(&ulite_uart_driver);
err_uart:
- pr_err("registering uartlite driver failed: err=%i", ret);
+ pr_err("registering uartlite driver failed: err=%i\n", ret);
return ret;
}
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 55b702775786..2b6376e6e5ad 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Freescale QUICC Engine UART device driver
*
* Author: Timur Tabi <timur@freescale.com>
*
- * Copyright 2007 Freescale Semiconductor, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * Copyright 2007 Freescale Semiconductor, Inc.
*
* This driver adds support for UART devices via Freescale's QUICC Engine
* found on some Freescale SOCs.
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 439057e8107a..6d106e33f842 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for NEC VR4100 series Serial Interface Unit.
*
* Copyright (C) 2004-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* Based on drivers/serial/8250.c, by Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_VR41XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 435a6f3260be..3d58e9b34553 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* Based on msm_serial.c, which is:
* Copyright (C) 2007 Google, Inc.
* Author: Robert Love <rlove@google.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#if defined(CONFIG_SERIAL_VT8500_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 31a630ae0870..b9b2bc76bcac 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cadence UART driver (found in Xilinx Zynq)
*
* 2011 - 2014 (C) Xilinx Inc.
*
- * This program is free software; you can redistribute it
- * and/or modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2 of the License, or (at your option) any
- * later version.
- *
* This driver has originally been pushed by Xilinx using a Zynq-branding. This
* still shows in the naming of this file, the kconfig symbols and some symbols
* in the code.
@@ -1673,7 +1668,7 @@ static void __exit cdns_uart_exit(void)
uart_unregister_driver(&cdns_uart_uart_driver);
}
-module_init(cdns_uart_init);
+arch_initcall(cdns_uart_init);
module_exit(cdns_uart_exit);
MODULE_DESCRIPTION("Driver for Cadence UART");
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index d32bd499d684..b03d3e458ea2 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* zs.c: Serial port driver for IOASIC DECstations.
*
diff --git a/drivers/tty/serial/zs.h b/drivers/tty/serial/zs.h
index aa921b57d827..26ef8eafa1c1 100644
--- a/drivers/tty/serial/zs.h
+++ b/drivers/tty/serial/zs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* zs.h: Definitions for the DECstation Z85C30 serial driver.
*
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 3be981101297..3c4ad71f261d 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
*
@@ -13,8 +14,6 @@
*
* Original release 01/11/99
*
- * This code is released under the GNU General Public License (GPL)
- *
* This driver is primarily intended for use in synchronous
* HDLC mode. Asynchronous mode is also provided.
*
@@ -701,7 +700,7 @@ static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
static void usc_loopback_frame( struct mgsl_struct *info );
-static void mgsl_tx_timeout(unsigned long context);
+static void mgsl_tx_timeout(struct timer_list *t);
static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
@@ -1769,7 +1768,7 @@ static int startup(struct mgsl_struct * info)
memset(&info->icount, 0, sizeof(info->icount));
- setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
/* Allocate and claim adapter resources */
retval = mgsl_claim_resources(info);
@@ -4098,8 +4097,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
if (request_dma(info->dma_level,info->device_name) < 0){
printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
__FILE__,__LINE__,info->device_name, info->dma_level );
- mgsl_release_resources( info );
- return -ENODEV;
+ goto errout;
}
info->dma_requested = true;
@@ -7519,9 +7517,9 @@ static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int coun
* Arguments: context pointer to device instance data
* Return Value: None
*/
-static void mgsl_tx_timeout(unsigned long context)
+static void mgsl_tx_timeout(struct timer_list *t)
{
- struct mgsl_struct *info = (struct mgsl_struct*)context;
+ struct mgsl_struct *info = from_timer(info, t, tx_timer);
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 636b8ae29b46..255c49687877 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Device driver for Microgate SyncLink GT serial adapters.
*
@@ -6,8 +7,6 @@
*
* Microgate and SyncLink are trademarks of Microgate Corporation
*
- * This code is released under the GNU General Public License (GPL)
- *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@@ -494,8 +493,8 @@ static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
static int alloc_tmp_rbuf(struct slgt_info *info);
static void free_tmp_rbuf(struct slgt_info *info);
-static void tx_timeout(unsigned long context);
-static void rx_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void rx_timeout(struct timer_list *t);
/*
* ioctl handlers
@@ -3598,8 +3597,8 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev
info->adapter_num = adapter_num;
info->port_num = port_num;
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
- setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
+ timer_setup(&info->rx_timer, rx_timeout, 0);
/* Copy configuration info to device instance data */
info->pdev = pdev;
@@ -5113,9 +5112,9 @@ static int adapter_test(struct slgt_info *info)
/*
* transmit timeout handler
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- struct slgt_info *info = (struct slgt_info*)context;
+ struct slgt_info *info = from_timer(info, t, tx_timer);
unsigned long flags;
DBGINFO(("%s tx_timeout\n", info->device_name));
@@ -5137,9 +5136,9 @@ static void tx_timeout(unsigned long context)
/*
* receive buffer polling timer
*/
-static void rx_timeout(unsigned long context)
+static void rx_timeout(struct timer_list *t)
{
- struct slgt_info *info = (struct slgt_info*)context;
+ struct slgt_info *info = from_timer(info, t, rx_timer);
unsigned long flags;
DBGINFO(("%s rx_timeout\n", info->device_name));
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 4fed9e7b281f..75f11ce1f0a1 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* $Id: synclinkmp.c,v 4.38 2005/07/15 13:29:44 paulkf Exp $
*
@@ -10,7 +11,6 @@
* Microgate and SyncLink are trademarks of Microgate Corporation
*
* Derived from serial.c written by Theodore Ts'o and Linus Torvalds
- * This code is released under the GNU General Public License (GPL)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
@@ -615,8 +615,8 @@ static void free_tmp_rx_buf(SLMP_INFO *info);
static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count);
static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit);
-static void tx_timeout(unsigned long context);
-static void status_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void status_timeout(struct timer_list *t);
static unsigned char read_reg(SLMP_INFO *info, unsigned char addr);
static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val);
@@ -3782,9 +3782,8 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
info->bus_type = MGSL_BUS_TYPE_PCI;
info->irq_flags = IRQF_SHARED;
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
- setup_timer(&info->status_timer, status_timeout,
- (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
+ timer_setup(&info->status_timer, status_timeout, 0);
/* Store the PCI9050 misc control register value because a flaw
* in the PCI9050 prevents LCR registers from being read if
@@ -5468,9 +5467,9 @@ static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
/* called when HDLC frame times out
* update stats and do tx completion processing
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- SLMP_INFO *info = (SLMP_INFO*)context;
+ SLMP_INFO *info = from_timer(info, t, tx_timer);
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
@@ -5495,10 +5494,10 @@ static void tx_timeout(unsigned long context)
/* called to periodically check the DSR/RI modem signal input status
*/
-static void status_timeout(unsigned long context)
+static void status_timeout(struct timer_list *t)
{
u16 status = 0;
- SLMP_INFO *info = (SLMP_INFO*)context;
+ SLMP_INFO *info = from_timer(info, t, status_timer);
unsigned long flags;
unsigned char delta;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 3ffc1ce29023..b674793be478 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux Magic System Request Key Hacks
*
@@ -245,8 +246,10 @@ static void sysrq_handle_showallcpus(int key)
* architecture has no support for it:
*/
if (!trigger_all_cpu_backtrace()) {
- struct pt_regs *regs = get_irq_regs();
+ struct pt_regs *regs = NULL;
+ if (in_irq())
+ regs = get_irq_regs();
if (regs) {
pr_info("CPU%d:\n", smp_processor_id());
show_regs(regs);
@@ -265,7 +268,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
static void sysrq_handle_showregs(int key)
{
- struct pt_regs *regs = get_irq_regs();
+ struct pt_regs *regs = NULL;
+
+ if (in_irq())
+ regs = get_irq_regs();
if (regs)
show_regs(regs);
perf_event_print_debug();
@@ -648,9 +654,9 @@ static void sysrq_parse_reset_sequence(struct sysrq_state *state)
state->reset_seq_version = sysrq_reset_seq_version;
}
-static void sysrq_do_reset(unsigned long _state)
+static void sysrq_do_reset(struct timer_list *t)
{
- struct sysrq_state *state = (struct sysrq_state *) _state;
+ struct sysrq_state *state = from_timer(state, t, keyreset_timer);
state->reset_requested = true;
@@ -667,7 +673,7 @@ static void sysrq_handle_reset_request(struct sysrq_state *state)
mod_timer(&state->keyreset_timer,
jiffies + msecs_to_jiffies(sysrq_reset_downtime_ms));
else
- sysrq_do_reset((unsigned long)state);
+ sysrq_do_reset(&state->keyreset_timer);
}
static void sysrq_detect_reset_sequence(struct sysrq_state *state,
@@ -903,8 +909,7 @@ static int sysrq_connect(struct input_handler *handler,
sysrq->handle.handler = handler;
sysrq->handle.name = "sysrq";
sysrq->handle.private = sysrq;
- setup_timer(&sysrq->keyreset_timer,
- sysrq_do_reset, (unsigned long)sysrq);
+ timer_setup(&sysrq->keyreset_timer, sysrq_do_reset, 0);
error = input_register_handle(&sysrq->handle);
if (error) {
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index df2d735338e2..e30aa6bf9ff9 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Creating audit events from TTY input.
*
- * Copyright (C) 2007 Red Hat, Inc. All rights reserved. This copyrighted
- * material is made available to anyone wishing to use, modify, copy, or
- * redistribute it subject to the terms and conditions of the GNU General
- * Public License v.2.
+ * Copyright (C) 2007 Red Hat, Inc. All rights reserved.
*
* Authors: Miloslav Trmac <mitr@redhat.com>
*/
diff --git a/drivers/tty/tty_baudrate.c b/drivers/tty/tty_baudrate.c
index 5c33fd25676d..6ff8cdfc9d2a 100644
--- a/drivers/tty/tty_baudrate.c
+++ b/drivers/tty/tty_baudrate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index f8eba1c5412f..c996b6859c5e 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tty buffer allocation management
*/
@@ -446,7 +447,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
* Callers other than flush_to_ldisc() need to exclude the kworker
* from concurrent use of the line discipline, see paste_selection().
*
- * Returns the number of bytes not processed
+ * Returns the number of bytes processed
*/
int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
char *f, int count)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 94cccb6efa32..dc60aeea87d8 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index efa96e6c4c1b..d9b561d89432 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
diff --git a/drivers/tty/tty_jobctrl.c b/drivers/tty/tty_jobctrl.c
index e7032309ee87..c4ecd66fafef 100644
--- a/drivers/tty/tty_jobctrl.c
+++ b/drivers/tty/tty_jobctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 84a8ac2a779f..24ec5c7e6b20 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kmod.h>
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 52b7baef4f7a..37a91b3df980 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Ldisc rw semaphore
*
@@ -22,9 +23,6 @@
* Michel Lespinasse <walken@google.com>.
*
* Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
- *
- * This file may be redistributed under the terms of the GNU General Public
- * License v2.
*/
#include <linux/list.h>
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index d8bae67a6174..2640635ee177 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 6b137194069f..25d736880013 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tty port functions
*/
@@ -78,7 +79,7 @@ EXPORT_SYMBOL(tty_port_init);
* @driver: tty_driver for this device
* @index: index of the tty
*
- * Provide the tty layer wit ha link from a tty (specified by @index) to a
+ * Provide the tty layer with a link from a tty (specified by @index) to a
* tty_port (@port). Use this only if neither tty_port_register_device nor
* tty_port_install is used in the driver. If used, this has to be called before
* tty_register_driver.
@@ -235,7 +236,7 @@ EXPORT_SYMBOL(tty_port_free_xmit_buf);
/**
* tty_port_destroy -- destroy inited port
- * @port: tty port to be doestroyed
+ * @port: tty port to be destroyed
*
* When a port was initialized using tty_port_init, one has to destroy the
* port by this function. Either indirectly by using tty_port refcounting
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index ef01d24858cd..58b454c34560 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -361,17 +361,13 @@ done:
return rv;
}
-static void vcc_rx_timer(unsigned long index)
+static void vcc_rx_timer(struct timer_list *t)
{
+ struct vcc_port *port = from_timer(port, t, rx_timer);
struct vio_driver_state *vio;
- struct vcc_port *port;
unsigned long flags;
int rv;
- port = vcc_get_ne(index);
- if (!port)
- return;
-
spin_lock_irqsave(&port->lock, flags);
port->rx_timer.expires = 0;
@@ -391,18 +387,14 @@ done:
vcc_put(port, false);
}
-static void vcc_tx_timer(unsigned long index)
+static void vcc_tx_timer(struct timer_list *t)
{
- struct vcc_port *port;
+ struct vcc_port *port = from_timer(port, t, tx_timer);
struct vio_vcc *pkt;
unsigned long flags;
int tosend = 0;
int rv;
- port = vcc_get_ne(index);
- if (!port)
- return;
-
spin_lock_irqsave(&port->lock, flags);
port->tx_timer.expires = 0;
@@ -645,13 +637,8 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (rv)
goto free_domain;
- init_timer(&port->rx_timer);
- port->rx_timer.function = vcc_rx_timer;
- port->rx_timer.data = port->index;
-
- init_timer(&port->tx_timer);
- port->tx_timer.function = vcc_tx_timer;
- port->tx_timer.data = port->index;
+ timer_setup(&port->rx_timer, vcc_rx_timer, 0);
+ timer_setup(&port->tx_timer, vcc_tx_timer, 0);
dev_set_drvdata(&vdev->dev, port);
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index 17ae94cb29f8..edbbe0ccdb83 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# This file contains the font map for the default (hardware) font
#
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index a5f88cf0f61d..722a6690c70d 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* consolemap.c
*
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index f4166263bb3a..5d412df8e943 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Written for linux by Johan Myreen as a translation from
* the assembly version by Linus (with diacriticals added)
@@ -243,14 +244,14 @@ static int kd_sound_helper(struct input_handle *handle, void *data)
return 0;
}
-static void kd_nosound(unsigned long ignored)
+static void kd_nosound(struct timer_list *unused)
{
static unsigned int zero;
input_handler_for_each_handle(&kbd_handler, &zero, kd_sound_helper);
}
-static DEFINE_TIMER(kd_mksound_timer, kd_nosound, 0, 0);
+static DEFINE_TIMER(kd_mksound_timer, kd_nosound);
void kd_mksound(unsigned int hz, unsigned int ticks)
{
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index accbd1257bc4..af4da9507180 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This module exports the functions:
*
@@ -155,42 +156,34 @@ static int store_utf8(u16 c, char *p)
int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty)
{
struct vc_data *vc = vc_cons[fg_console].d;
- int sel_mode, new_sel_start, new_sel_end, spc;
+ int new_sel_start, new_sel_end, spc;
+ struct tiocl_selection v;
char *bp, *obp;
int i, ps, pe, multiplier;
u16 c;
int mode;
poke_blanked_console();
+ if (copy_from_user(&v, sel, sizeof(*sel)))
+ return -EFAULT;
- { unsigned short xs, ys, xe, ye;
+ v.xs = limit(v.xs - 1, vc->vc_cols - 1);
+ v.ys = limit(v.ys - 1, vc->vc_rows - 1);
+ v.xe = limit(v.xe - 1, vc->vc_cols - 1);
+ v.ye = limit(v.ye - 1, vc->vc_rows - 1);
+ ps = v.ys * vc->vc_size_row + (v.xs << 1);
+ pe = v.ye * vc->vc_size_row + (v.xe << 1);
- if (!access_ok(VERIFY_READ, sel, sizeof(*sel)))
- return -EFAULT;
- __get_user(xs, &sel->xs);
- __get_user(ys, &sel->ys);
- __get_user(xe, &sel->xe);
- __get_user(ye, &sel->ye);
- __get_user(sel_mode, &sel->sel_mode);
- xs--; ys--; xe--; ye--;
- xs = limit(xs, vc->vc_cols - 1);
- ys = limit(ys, vc->vc_rows - 1);
- xe = limit(xe, vc->vc_cols - 1);
- ye = limit(ye, vc->vc_rows - 1);
- ps = ys * vc->vc_size_row + (xs << 1);
- pe = ye * vc->vc_size_row + (xe << 1);
-
- if (sel_mode == TIOCL_SELCLEAR) {
- /* useful for screendump without selection highlights */
- clear_selection();
- return 0;
- }
-
- if (mouse_reporting() && (sel_mode & TIOCL_SELMOUSEREPORT)) {
- mouse_report(tty, sel_mode & TIOCL_SELBUTTONMASK, xs, ys);
- return 0;
- }
- }
+ if (v.sel_mode == TIOCL_SELCLEAR) {
+ /* useful for screendump without selection highlights */
+ clear_selection();
+ return 0;
+ }
+
+ if (mouse_reporting() && (v.sel_mode & TIOCL_SELMOUSEREPORT)) {
+ mouse_report(tty, v.sel_mode & TIOCL_SELBUTTONMASK, v.xs, v.ys);
+ return 0;
+ }
if (ps > pe) /* make sel_start <= sel_end */
{
@@ -209,7 +202,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
else
use_unicode = 0;
- switch (sel_mode)
+ switch (v.sel_mode)
{
case TIOCL_SELCHAR: /* character-by-character selection */
new_sel_start = ps;
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 56dcff6059d3..85b6634f518a 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Provide access to virtual console memory.
* /dev/vcs0: the screen as it is being viewed right now (possibly scrolled)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 2ebaba16f785..88b902c525d7 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
@@ -102,6 +103,7 @@
#include <linux/uaccess.h>
#include <linux/kdb.h>
#include <linux/ctype.h>
+#include <linux/bsearch.h>
#define MAX_NR_CON_DRIVER 16
@@ -156,7 +158,7 @@ static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
static void con_driver_unregister_callback(struct work_struct *ignored);
-static void blank_screen_t(unsigned long dummy);
+static void blank_screen_t(struct timer_list *unused);
static void set_palette(struct vc_data *vc);
#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
@@ -228,7 +230,7 @@ static int scrollback_delta;
*/
int (*console_blank_hook)(int);
-static DEFINE_TIMER(console_timer, blank_screen_t, 0, 0);
+static DEFINE_TIMER(console_timer, blank_screen_t);
static int blank_state;
static int blank_timer_expired;
enum {
@@ -2142,22 +2144,15 @@ struct interval {
uint32_t last;
};
-static int bisearch(uint32_t ucs, const struct interval *table, int max)
+static int ucs_cmp(const void *key, const void *elt)
{
- int min = 0;
- int mid;
+ uint32_t ucs = *(uint32_t *)key;
+ struct interval e = *(struct interval *) elt;
- if (ucs < table[0].first || ucs > table[max].last)
- return 0;
- while (max >= min) {
- mid = (min + max) / 2;
- if (ucs > table[mid].last)
- min = mid + 1;
- else if (ucs < table[mid].first)
- max = mid - 1;
- else
- return 1;
- }
+ if (ucs > e.last)
+ return 1;
+ else if (ucs < e.first)
+ return -1;
return 0;
}
@@ -2169,7 +2164,12 @@ static int is_double_width(uint32_t ucs)
{ 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
{ 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
};
- return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
+ if (ucs < double_width[0].first ||
+ ucs > double_width[ARRAY_SIZE(double_width) - 1].last)
+ return 0;
+
+ return bsearch(&ucs, double_width, ARRAY_SIZE(double_width),
+ sizeof(struct interval), ucs_cmp) != NULL;
}
static void con_flush(struct vc_data *vc, unsigned long draw_from,
@@ -2205,7 +2205,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
console_lock();
vc = tty->driver_data;
if (vc == NULL) {
- printk(KERN_ERR "vt: argh, driver_data is NULL !\n");
+ pr_err("vt: argh, driver_data is NULL !\n");
console_unlock();
return 0;
}
@@ -3190,20 +3190,21 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
pr_info("Console: switching ");
if (!deflt)
- printk(KERN_CONT "consoles %d-%d ", first+1, last+1);
+ pr_cont("consoles %d-%d ", first + 1, last + 1);
if (j >= 0) {
struct vc_data *vc = vc_cons[j].d;
- printk(KERN_CONT "to %s %s %dx%d\n",
- vc->vc_can_do_color ? "colour" : "mono",
- desc, vc->vc_cols, vc->vc_rows);
+ pr_cont("to %s %s %dx%d\n",
+ vc->vc_can_do_color ? "colour" : "mono",
+ desc, vc->vc_cols, vc->vc_rows);
if (k >= 0) {
vc = vc_cons[k].d;
update_screen(vc);
}
- } else
- printk(KERN_CONT "to %s\n", desc);
+ } else {
+ pr_cont("to %s\n", desc);
+ }
retval = 0;
err:
@@ -3622,9 +3623,8 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
con_driver, con_dev_groups,
"vtcon%i", con_driver->node);
if (IS_ERR(con_driver->dev)) {
- printk(KERN_WARNING "Unable to create device for %s; "
- "errno = %ld\n", con_driver->desc,
- PTR_ERR(con_driver->dev));
+ pr_warn("Unable to create device for %s; errno = %ld\n",
+ con_driver->desc, PTR_ERR(con_driver->dev));
con_driver->dev = NULL;
} else {
vtconsole_init_device(con_driver);
@@ -3761,8 +3761,8 @@ static int __init vtconsole_class_init(void)
vtconsole_class = class_create(THIS_MODULE, "vtconsole");
if (IS_ERR(vtconsole_class)) {
- printk(KERN_WARNING "Unable to create vt console class; "
- "errno = %ld\n", PTR_ERR(vtconsole_class));
+ pr_warn("Unable to create vt console class; errno = %ld\n",
+ PTR_ERR(vtconsole_class));
vtconsole_class = NULL;
}
@@ -3778,9 +3778,8 @@ static int __init vtconsole_class_init(void)
"vtcon%i", con->node);
if (IS_ERR(con->dev)) {
- printk(KERN_WARNING "Unable to create "
- "device for %s; errno = %ld\n",
- con->desc, PTR_ERR(con->dev));
+ pr_warn("Unable to create device for %s; errno = %ld\n",
+ con->desc, PTR_ERR(con->dev));
con->dev = NULL;
} else {
vtconsole_init_device(con);
@@ -3930,7 +3929,7 @@ void unblank_screen(void)
* (console operations can still happen at irq time, but only from printk which
* has the console mutex. Not perfect yet, but better than no locking
*/
-static void blank_screen_t(unsigned long dummy)
+static void blank_screen_t(struct timer_list *unused)
{
blank_timer_expired = 1;
schedule_work(&console_work);
@@ -4121,37 +4120,45 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
return -EINVAL;
if (op->charcount > 512)
return -EINVAL;
+ if (op->width <= 0 || op->width > 32 || op->height > 32)
+ return -EINVAL;
+ size = (op->width+7)/8 * 32 * op->charcount;
+ if (size > max_font_size)
+ return -ENOSPC;
+
+ font.data = memdup_user(op->data, size);
+ if (IS_ERR(font.data))
+ return PTR_ERR(font.data);
+
if (!op->height) { /* Need to guess font height [compat] */
int h, i;
- u8 __user *charmap = op->data;
- u8 tmp;
-
- /* If from KDFONTOP ioctl, don't allow things which can be done in userland,
- so that we can get rid of this soon */
- if (!(op->flags & KD_FONT_FLAG_OLD))
+ u8 *charmap = font.data;
+
+ /*
+ * If from KDFONTOP ioctl, don't allow things which can be done
+ * in userland,so that we can get rid of this soon
+ */
+ if (!(op->flags & KD_FONT_FLAG_OLD)) {
+ kfree(font.data);
return -EINVAL;
+ }
+
for (h = 32; h > 0; h--)
- for (i = 0; i < op->charcount; i++) {
- if (get_user(tmp, &charmap[32*i+h-1]))
- return -EFAULT;
- if (tmp)
+ for (i = 0; i < op->charcount; i++)
+ if (charmap[32*i+h-1])
goto nonzero;
- }
+
+ kfree(font.data);
return -EINVAL;
+
nonzero:
op->height = h;
}
- if (op->width <= 0 || op->width > 32 || op->height > 32)
- return -EINVAL;
- size = (op->width+7)/8 * 32 * op->charcount;
- if (size > max_font_size)
- return -ENOSPC;
+
font.charcount = op->charcount;
- font.height = op->height;
font.width = op->width;
- font.data = memdup_user(op->data, size);
- if (IS_ERR(font.data))
- return PTR_ERR(font.data);
+ font.height = op->height;
+
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 96d389cb506c..d61be307256a 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1992 obz under the linux copyright
*
@@ -842,58 +843,44 @@ int vt_ioctl(struct tty_struct *tty,
case VT_RESIZEX:
{
- struct vt_consize __user *vtconsize = up;
- ushort ll,cc,vlin,clin,vcol,ccol;
+ struct vt_consize v;
if (!perm)
return -EPERM;
- if (!access_ok(VERIFY_READ, vtconsize,
- sizeof(struct vt_consize))) {
- ret = -EFAULT;
- break;
- }
+ if (copy_from_user(&v, up, sizeof(struct vt_consize)))
+ return -EFAULT;
/* FIXME: Should check the copies properly */
- __get_user(ll, &vtconsize->v_rows);
- __get_user(cc, &vtconsize->v_cols);
- __get_user(vlin, &vtconsize->v_vlin);
- __get_user(clin, &vtconsize->v_clin);
- __get_user(vcol, &vtconsize->v_vcol);
- __get_user(ccol, &vtconsize->v_ccol);
- vlin = vlin ? vlin : vc->vc_scan_lines;
- if (clin) {
- if (ll) {
- if (ll != vlin/clin) {
- /* Parameters don't add up */
- ret = -EINVAL;
- break;
- }
- } else
- ll = vlin/clin;
+ if (!v.v_vlin)
+ v.v_vlin = vc->vc_scan_lines;
+ if (v.v_clin) {
+ int rows = v.v_vlin/v.v_clin;
+ if (v.v_rows != rows) {
+ if (v.v_rows) /* Parameters don't add up */
+ return -EINVAL;
+ v.v_rows = rows;
+ }
}
- if (vcol && ccol) {
- if (cc) {
- if (cc != vcol/ccol) {
- ret = -EINVAL;
- break;
- }
- } else
- cc = vcol/ccol;
+ if (v.v_vcol && v.v_ccol) {
+ int cols = v.v_vcol/v.v_ccol;
+ if (v.v_cols != cols) {
+ if (v.v_cols)
+ return -EINVAL;
+ v.v_cols = cols;
+ }
}
- if (clin > 32) {
- ret = -EINVAL;
- break;
- }
-
+ if (v.v_clin > 32)
+ return -EINVAL;
+
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (!vc_cons[i].d)
continue;
console_lock();
- if (vlin)
- vc_cons[i].d->vc_scan_lines = vlin;
- if (clin)
- vc_cons[i].d->vc_font.height = clin;
+ if (v.v_vlin)
+ vc_cons[i].d->vc_scan_lines = v.v_vlin;
+ if (v.v_clin)
+ vc_cons[i].d->vc_font.height = v.v_clin;
vc_cons[i].d->vc_resize_user = 1;
- vc_resize(vc_cons[i].d, cc, ll);
+ vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
console_unlock();
}
break;
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index e9663bb8a4c7..c285dd2a4539 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_UIO) += uio.o
obj-$(CONFIG_UIO_CIF) += uio_cif.o
obj-$(CONFIG_UIO_PDRV_GENIRQ) += uio_pdrv_genirq.o
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 939a63bca82f..f699abab1787 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -77,11 +77,12 @@ config USB_PCI
depends on PCI
default y
---help---
- A lot of embeded system SOC (e.g. freescale T2080) have both
- PCI and USB modules. But USB module is controlled by registers
- directly, it have no relationship with PCI module.
+ Many embedded system SOCs (e.g. freescale T2080) have both
+ PCI and USB modules with the USB module directly controlled by
+ registers and having no relationship to the PCI module.
- When say N here it will not build PCI related code in USB driver.
+ If you have such a device you may say N here and PCI related code
+ will not be built in the USB driver.
if USB
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 9650b351c26c..060643a1b5c8 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the kernel USB device drivers.
#
diff --git a/drivers/usb/atm/Makefile b/drivers/usb/atm/Makefile
index ac278946b06c..7ac65ce1aa8a 100644
--- a/drivers/usb/atm/Makefile
+++ b/drivers/usb/atm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB ATM/xDSL drivers
#
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 5160a4a966b3..8af797252af2 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* cxacru.c - driver for USB ADSL modems based on
* Conexant AccessRunner chipset
@@ -6,21 +7,6 @@
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
* Copyright (C) 2007 Simon Arlott
* Copyright (C) 2009 Simon Arlott
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
/*
@@ -424,6 +410,7 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
case CXPOLL_STOPPING:
/* abort stop request */
instance->poll_state = CXPOLL_POLLING;
+ /* fall through */
case CXPOLL_POLLING:
case CXPOLL_SHUTDOWN:
/* don't start polling */
@@ -560,23 +547,30 @@ static void cxacru_blocking_completion(struct urb *urb)
complete(urb->context);
}
-static void cxacru_timeout_kill(unsigned long data)
+struct cxacru_timer {
+ struct timer_list timer;
+ struct urb *urb;
+};
+
+static void cxacru_timeout_kill(struct timer_list *t)
{
- usb_unlink_urb((struct urb *) data);
+ struct cxacru_timer *timer = from_timer(timer, t, timer);
+
+ usb_unlink_urb(timer->urb);
}
static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
int *actual_length)
{
- struct timer_list timer;
+ struct cxacru_timer timer = {
+ .urb = urb,
+ };
- init_timer(&timer);
- timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT);
- timer.data = (unsigned long) urb;
- timer.function = cxacru_timeout_kill;
- add_timer(&timer);
+ timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0);
+ mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT));
wait_for_completion(done);
- del_timer_sync(&timer);
+ del_timer_sync(&timer.timer);
+ destroy_timer_on_stack(&timer.timer);
if (actual_length)
*actual_length = urb->actual_length;
@@ -797,6 +791,7 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
case CXPOLL_STOPPING:
/* abort stop request */
instance->poll_state = CXPOLL_POLLING;
+ /* fall through */
case CXPOLL_POLLING:
case CXPOLL_SHUTDOWN:
/* don't start polling */
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 3676adb40d89..973548b5c15c 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* speedtch.c - Alcatel SpeedTouch USB xDSL modem driver
*
@@ -6,21 +7,6 @@
* Copyright (C) 2004, David Woodhouse
*
* Based on "modem_run.c", copyright (C) 2001, Benoit Papillault
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <asm/page.h>
@@ -571,9 +557,10 @@ static void speedtch_check_status(struct work_struct *work)
}
}
-static void speedtch_status_poll(unsigned long data)
+static void speedtch_status_poll(struct timer_list *t)
{
- struct speedtch_instance_data *instance = (void *)data;
+ struct speedtch_instance_data *instance = from_timer(instance, t,
+ status_check_timer);
schedule_work(&instance->status_check_work);
@@ -584,9 +571,10 @@ static void speedtch_status_poll(unsigned long data)
atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
}
-static void speedtch_resubmit_int(unsigned long data)
+static void speedtch_resubmit_int(struct timer_list *t)
{
- struct speedtch_instance_data *instance = (void *)data;
+ struct speedtch_instance_data *instance = from_timer(instance, t,
+ resubmit_timer);
struct urb *int_urb = instance->int_urb;
int ret;
@@ -874,16 +862,11 @@ static int speedtch_bind(struct usbatm_data *usbatm,
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
INIT_WORK(&instance->status_check_work, speedtch_check_status);
- init_timer(&instance->status_check_timer);
-
- instance->status_check_timer.function = speedtch_status_poll;
- instance->status_check_timer.data = (unsigned long)instance;
+ timer_setup(&instance->status_check_timer, speedtch_status_poll, 0);
instance->last_status = 0xff;
instance->poll_delay = MIN_POLL_DELAY;
- init_timer(&instance->resubmit_timer);
- instance->resubmit_timer.function = speedtch_resubmit_int;
- instance->resubmit_timer.data = (unsigned long)instance;
+ timer_setup(&instance->resubmit_timer, speedtch_resubmit_int, 0);
instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index ba7616395db2..ab75690044bb 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*-
* Copyright (c) 2003, 2004
* Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 8607af758bbd..dbea28495e1d 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* usbatm.c - Generic USB xDSL driver core
*
* Copyright (C) 2001, Alcatel
* Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas
* Copyright (C) 2004, David Woodhouse, Roman Kagan
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
/*
@@ -1003,18 +989,18 @@ static int usbatm_heavy_init(struct usbatm_data *instance)
return 0;
}
-static void usbatm_tasklet_schedule(unsigned long data)
+static void usbatm_tasklet_schedule(struct timer_list *t)
{
- tasklet_schedule((struct tasklet_struct *) data);
+ struct usbatm_channel *channel = from_timer(channel, t, delay);
+
+ tasklet_schedule(&channel->tasklet);
}
static void usbatm_init_channel(struct usbatm_channel *channel)
{
spin_lock_init(&channel->lock);
INIT_LIST_HEAD(&channel->list);
- channel->delay.function = usbatm_tasklet_schedule;
- channel->delay.data = (unsigned long) &channel->tasklet;
- init_timer(&channel->delay);
+ timer_setup(&channel->delay, usbatm_tasklet_schedule, 0);
}
int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index f3eecd967a8a..d3bdc4cc47aa 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* usbatm.h - Generic USB xDSL driver core
*
* Copyright (C) 2001, Alcatel
* Copyright (C) 2003, Duncan Sands, SolNegro, Josep Comas
* Copyright (C) 2004, David Woodhouse
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#ifndef _USBATM_H_
diff --git a/drivers/usb/atm/xusbatm.c b/drivers/usb/atm/xusbatm.c
index c73c1ec3005e..ffc9810070a3 100644
--- a/drivers/usb/atm/xusbatm.c
+++ b/drivers/usb/atm/xusbatm.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/******************************************************************************
* xusbatm.c - dumb usbatm-based driver for modems initialized in userspace
*
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/module.h>
diff --git a/drivers/usb/c67x00/Makefile b/drivers/usb/c67x00/Makefile
index da5f314a5de0..0cde62d06e5d 100644
--- a/drivers/usb/c67x00/Makefile
+++ b/drivers/usb/c67x00/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Cypress C67X00 USB Controller
#
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 5796c8820514..53838e7d4eef 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-drv.c: Cypress C67X00 USB Common infrastructure
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
/*
diff --git a/drivers/usb/c67x00/c67x00-hcd.c b/drivers/usb/c67x00/c67x00-hcd.c
index 30d3f346686e..c39eee17c0e4 100644
--- a/drivers/usb/c67x00/c67x00-hcd.c
+++ b/drivers/usb/c67x00/c67x00-hcd.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-hcd.c: Cypress C67X00 USB Host Controller Driver
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#include <linux/device.h>
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
index cf8a455a6403..3b181d4c7a03 100644
--- a/drivers/usb/c67x00/c67x00-hcd.h
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-hcd.h: Cypress C67X00 USB HCD
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#ifndef _USB_C67X00_HCD_H
diff --git a/drivers/usb/c67x00/c67x00-ll-hpi.c b/drivers/usb/c67x00/c67x00-ll-hpi.c
index b58151841e10..e1fe3603140a 100644
--- a/drivers/usb/c67x00/c67x00-ll-hpi.c
+++ b/drivers/usb/c67x00/c67x00-ll-hpi.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-ll-hpi.c: Cypress C67X00 USB Low level interface using HPI
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#include <asm/byteorder.h>
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index 7311ed61e99a..633c52de3bb3 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#include <linux/kthread.h>
@@ -966,13 +952,11 @@ static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00,
static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td)
{
struct urb *urb = td->urb;
- struct c67x00_urb_priv *urbp;
int cnt;
if (!urb)
return;
- urbp = urb->hcpriv;
cnt = td->privdata;
if (td->status & TD_ERROR_MASK)
diff --git a/drivers/usb/c67x00/c67x00.h b/drivers/usb/c67x00/c67x00.h
index a26e9ded0f32..7ce10928b037 100644
--- a/drivers/usb/c67x00/c67x00.h
+++ b/drivers/usb/c67x00/c67x00.h
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* c67x00.h: Cypress C67X00 USB register and field definitions
*
* Copyright (C) 2006-2008 Barco N.V.
* Derived from the Cypress cy7c67200/300 ezusb linux driver and
* based on multiple host controller drivers inside the linux kernel.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301 USA.
*/
#ifndef _USB_C67X00_H
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 51f4157bbecf..785f0ed037f7 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -9,7 +9,7 @@ config USB_CHIPIDEA
Dual-role switch (ID, OTG FSM, sysfs), Host-only, and
Peripheral-only.
- When compiled dynamically, the module will be called ci-hdrc.ko.
+ When compiled dynamically, the module will be called ci_hdrc.ko.
if USB_CHIPIDEA
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index ddcbddf8361a..e3d5e728fa53 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o
ci_hdrc-y := core.o otg.o debug.o
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index e462f55c8b99..98da99510be7 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bits.h - register bits of the ChipIdea USB IP core
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_BITS_H
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6743f85b1b7a..98b7cb3d0064 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ci.h - common structures, functions, and macros of the ChipIdea driver
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_CI_H
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 5f4a8157fad8..3b45c25f296e 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <marex@denx.de>
* on behalf of DENX Software Engineering GmbH
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index d666c9f036ba..204275f47573 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -1,12 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#ifndef __DRIVER_USB_CHIPIDEA_CI_HDRC_IMX_H
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index bb626120296f..3593ce0ec641 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -1,9 +1,5 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. */
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index 39414e4b2d81..49a61549cee6 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ci_hdrc_pci.c - MIPS USB IP core family device controller
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index bfcee2702d50..7b65a1040d2c 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016, NVIDIA Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 99425db9ba62..c044fba463e4 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
* Antoine Tenart <antoine.tenart@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/chipidea/ci_hdrc_zevio.c b/drivers/usb/chipidea/ci_hdrc_zevio.c
index 1264de505527..e1634da4a4b1 100644
--- a/drivers/usb/chipidea/ci_hdrc_zevio.c
+++ b/drivers/usb/chipidea/ci_hdrc_zevio.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
* Based off drivers/usb/chipidea/ci_hdrc_msm.c
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 43ea5fb87b9a..dd2dd9391bb7 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* core.c - ChipIdea USB IP core family device controller
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 1c31e8a08810..c9e1a165ed82 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/types.h>
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 18cb8e46262d..19d60ed7e41f 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* host.c - ChipIdea USB host controller driver
*
* Copyright (c) 2012 Intel Corporation
*
* Author: Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 0f12f131bdd3..70112cf0f195 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DRIVERS_USB_CHIPIDEA_HOST_H
#define __DRIVERS_USB_CHIPIDEA_HOST_H
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 10236fe71522..db4ceffcf2a6 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* otg.c - ChipIdea USB IP core OTG driver
*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*
* Author: Peter Chen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*
diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
index 9ecb598e48f0..7e7428e48bfa 100644
--- a/drivers/usb/chipidea/otg.h
+++ b/drivers/usb/chipidea/otg.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013-2014 Freescale Semiconductor, Inc.
*
* Author: Peter Chen
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_OTG_H
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 5ea0246f650d..9e2d300060bc 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* otg_fsm.c - ChipIdea USB IP core OTG FSM driver
*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* Author: Jun Li
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*
diff --git a/drivers/usb/chipidea/otg_fsm.h b/drivers/usb/chipidea/otg_fsm.h
index 6366fe398ba6..2b49d29bf2fb 100644
--- a/drivers/usb/chipidea/otg_fsm.h
+++ b/drivers/usb/chipidea/otg_fsm.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
*
* Author: Jun Li
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_OTG_FSM_H
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index fe8a90543ea3..9852ec5e6e01 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* udc.c - ChipIdea UDC driver
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
@@ -1526,6 +1523,10 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
gadget_ready = 1;
spin_unlock_irqrestore(&ci->lock, flags);
+ if (ci->usb_phy)
+ usb_phy_set_charger_state(ci->usb_phy, is_active ?
+ USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
+
if (gadget_ready) {
if (is_active) {
pm_runtime_get_sync(&_gadget->dev);
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index 2ecd1174d66c..e023735d94b7 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* udc.h - ChipIdea UDC structures
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DRIVERS_USB_CHIPIDEA_UDC_H
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 1219583dc1b2..6da42dcd2888 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Linaro Ltd.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 9f4a0185dd60..8cdf0af156c6 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -1,12 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Freescale Semiconductor, Inc.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
diff --git a/drivers/usb/class/Makefile b/drivers/usb/class/Makefile
index 32e85277b5cf..5d393a28f7f2 100644
--- a/drivers/usb/class/Makefile
+++ b/drivers/usb/class/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB Class drivers
# (one step up from the misc category)
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 18c923a4c16e..8e0636c963a7 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* cdc-acm.c
*
@@ -12,20 +13,6 @@
* USB Abstract Control Model driver for USB modems and ISDN adapters
*
* Sponsored by SuSE
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#undef DEBUG
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 7a2b3deafc90..eacc116e83da 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
*
* Includes for cdc-acm.c
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 3e865dbf878c..6c181a625daf 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cdc-wdm.c
*
@@ -483,7 +484,7 @@ static ssize_t wdm_read
if (rv < 0)
return -ERESTARTSYS;
- cntr = ACCESS_ONCE(desc->length);
+ cntr = READ_ONCE(desc->length);
if (cntr == 0) {
desc->read = 0;
retry:
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index fb87c17ed6fa..c454885ef4a0 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* usblp.c
*
@@ -31,22 +32,6 @@
* none - Maintained in Linux kernel after v0.13
*/
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 6ebfabfa0dc7..0b8b0f3bdd2f 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1,22 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/**
* drivers/usb/class/usbtmc.c - USB Test & Measurement class driver
*
* Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
* Copyright (C) 2008 Novell, Inc.
* Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * The GNU General Public License is available at
- * http://www.gnu.org/copyleft/gpl.html.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1343,6 +1331,7 @@ static void usbtmc_interrupt(struct urb *urb)
case -EOVERFLOW:
dev_err(dev, "overflow with length %d, actual length is %d\n",
data->iin_wMaxPacketSize, urb->actual_length);
+ /* fall through */
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
index 6bbb3ec17018..0a7c45e85481 100644
--- a/drivers/usb/common/Makefile
+++ b/drivers/usb/common/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the usb common parts.
#
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 552ff7ac5a6b..50a2362ed3ea 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Provides code common for host and device side USB.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
* If either host side (ie. CONFIG_USB=y) or device side USB stack
* (ie. CONFIG_USB_GADGET=y) is compiled in the kernel, this module is
* compiled-in as well. Otherwise, if either of the two stacks is
diff --git a/drivers/usb/common/led.c b/drivers/usb/common/led.c
index df23da00a901..7bd81166b77d 100644
--- a/drivers/usb/common/led.c
+++ b/drivers/usb/common/led.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* LED Triggers for USB Activity
*
* Copyright 2014 Michal Sojka <sojka@merica.cz>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 4aa5195db8ea..8b351444cc40 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* ulpi.c - USB ULPI PHY bus
*
* Copyright (C) 2015 Intel Corporation
*
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/ulpi/interface.h>
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index b8fe31e409a5..3740cf95560e 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* OTG Finite State Machine from OTG spec
*
@@ -5,20 +6,6 @@
*
* Author: Li Yang <LeoLi@freescale.com>
* Jerry Huang <Chang-Ming.Huang@freescale.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index 250ec1d662d9..92c9cefb4317 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB Core files and filesystem
#
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index b64568cf572c..77eef8acff94 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* DMA memory management for framework level HCD code (hc_driver)
*
@@ -5,7 +6,6 @@
* and should work with all USB controllers, regardless of bus type.
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 883549ee946c..da8acd980fc6 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/usb.h>
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 55dea2e7828f..c2cf62b7043a 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* devices.c
* (C) Copyright 1999 Randy Dunlap.
@@ -5,20 +6,6 @@
* (proc file per device)
* (C) Copyright 1999 Deti Fliegl (new USB architecture)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*************************************************************
*
* <mountpoint>/devices contains USB topology, device, config, class,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e9326f31db8d..705c573d0257 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************/
/*
@@ -5,20 +6,6 @@
*
* Copyright (C) 1999-2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* This file implements the usbfs/x/y files, where
* x is the bus number and y the device number.
*
@@ -150,7 +137,7 @@ static int usbfs_increase_memory_usage(u64 amount)
{
u64 lim;
- lim = ACCESS_ONCE(usbfs_memory_mb);
+ lim = READ_ONCE(usbfs_memory_mb);
lim <<= 20;
atomic64_add(amount, &usbfs_memory_usage);
@@ -1833,6 +1820,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
return 0;
}
+static void compute_isochronous_actual_length(struct urb *urb)
+{
+ unsigned int i;
+
+ if (urb->number_of_packets > 0) {
+ urb->actual_length = 0;
+ for (i = 0; i < urb->number_of_packets; i++)
+ urb->actual_length +=
+ urb->iso_frame_desc[i].actual_length;
+ }
+}
+
static int processcompl(struct async *as, void __user * __user *arg)
{
struct urb *urb = as->urb;
@@ -1840,6 +1839,7 @@ static int processcompl(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
+ compute_isochronous_actual_length(urb);
if (as->userbuffer && urb->actual_length) {
if (copy_urb_data_to_user(as->userbuffer, urb))
goto err_out;
@@ -2008,6 +2008,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
+ compute_isochronous_actual_length(urb);
if (as->userbuffer && urb->actual_length) {
if (copy_urb_data_to_user(as->userbuffer, urb))
return -EFAULT;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index eb87a259d55c..64262a9a8829 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/driver.c - most of the driver model stuff for usb
*
@@ -16,7 +17,6 @@
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
@@ -1340,8 +1340,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
int err;
u16 devstat;
- err = usb_get_status(udev, USB_RECIP_DEVICE, 0,
- &devstat);
+ err = usb_get_std_status(udev, USB_RECIP_DEVICE, 0,
+ &devstat);
if (err) {
dev_err(&udev->dev,
"Failed to suspend device, error %d\n",
@@ -1461,6 +1461,7 @@ static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev = to_usb_device(dev);
+ int r;
unbind_no_pm_drivers_interfaces(udev);
@@ -1469,7 +1470,14 @@ int usb_suspend(struct device *dev, pm_message_t msg)
* so we may still need to unbind and rebind upon resume
*/
choose_wakeup(udev, msg);
- return usb_suspend_both(udev, msg);
+ r = usb_suspend_both(udev, msg);
+ if (r)
+ return r;
+
+ if (udev->quirks & USB_QUIRK_DISCONNECT_SUSPEND)
+ usb_port_disable(udev);
+
+ return 0;
}
/* The device lock is held by the PM core */
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index a60bc830a056..1c2c04079676 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/endpoint.c
*
@@ -6,7 +7,6 @@
* (C) Copyright 2006 Novell Inc.
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*
* Endpoint sysfs stuff
*/
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 87ad6b6bfee8..65de6f73b672 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/file.c
*
@@ -14,7 +15,6 @@
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index bd3e0c5a6db2..83c14dda6300 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/generic.c - generic driver for USB devices (not interfaces)
*
@@ -16,7 +17,6 @@
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/usb.h>
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index ea829ad798c0..66fe1b78d952 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright David Brownell 2000-2002
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 75ad6718858c..fc32391a34d5 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright Linus Torvalds 1999
* (C) Copyright Johannes Erdfelt 1999-2001
@@ -6,20 +7,6 @@
* (C) Copyright Deti Fliegl 1999
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2002
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bcd.h>
@@ -801,9 +788,11 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
/* timer callback */
-static void rh_timer_func (unsigned long _hcd)
+static void rh_timer_func (struct timer_list *t)
{
- usb_hcd_poll_rh_status((struct usb_hcd *) _hcd);
+ struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
+
+ usb_hcd_poll_rh_status(_hcd);
}
/*-------------------------------------------------------------------------*/
@@ -2558,9 +2547,7 @@ struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
hcd->self.bus_name = bus_name;
hcd->self.uses_dma = (sysdev->dma_mask != NULL);
- init_timer(&hcd->rh_timer);
- hcd->rh_timer.function = rh_timer_func;
- hcd->rh_timer.data = (unsigned long) hcd;
+ timer_setup(&hcd->rh_timer, rh_timer_func, 0);
#ifdef CONFIG_PM
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index e9ce6bb0b22d..7ccdd3d4db84 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB hub driver.
*
@@ -7,7 +8,6 @@
* (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au)
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/kernel.h>
@@ -1482,7 +1482,7 @@ static int hub_configure(struct usb_hub *hub,
/* power budgeting mostly matters with bus-powered hubs,
* and battery-powered root hubs (may provide just 8 mA).
*/
- ret = usb_get_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus);
+ ret = usb_get_std_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus);
if (ret) {
message = "can't get hub status";
goto fail;
@@ -3279,7 +3279,7 @@ static int finish_port_resume(struct usb_device *udev)
*/
if (status == 0) {
devstatus = 0;
- status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
+ status = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
/* If a normal resume failed, try doing a reset-resume */
if (status && !udev->reset_resume && udev->persist_enabled) {
@@ -3303,7 +3303,7 @@ static int finish_port_resume(struct usb_device *udev)
if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
status = usb_disable_remote_wakeup(udev);
} else {
- status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
+ status = usb_get_std_status(udev, USB_RECIP_INTERFACE, 0,
&devstatus);
if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
| USB_INTRF_STAT_FUNC_RW))
@@ -4183,6 +4183,19 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
return ret;
}
+/*
+ * usb_port_disable - disable a usb device's upstream port
+ * @udev: device to disable
+ * Context: @udev locked, must be able to sleep.
+ *
+ * Disables a USB device that isn't in active use.
+ */
+int usb_port_disable(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+
+ return hub_port_disable(hub, udev->portnum, 0);
+}
/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
*
@@ -4853,7 +4866,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
&& udev->bus_mA <= unit_load) {
u16 devstat;
- status = usb_get_status(udev, USB_RECIP_DEVICE, 0,
+ status = usb_get_std_status(udev, USB_RECIP_DEVICE, 0,
&devstat);
if (status) {
dev_dbg(&udev->dev, "get status %d ?\n", status);
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 34c1a7e22aae..2a700ccc868c 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* usb hub driver head file
*
@@ -8,15 +9,6 @@
* Copyright (C) 2012 Intel Corp (tianyu.lan@intel.com)
*
* move struct usb_hub to this file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
*/
#include <linux/usb.h>
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index 1af877942110..9dbb429cd471 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB port LED trigger
*
* Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 371a07d874a3..77001bcfc504 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1,8 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* message.c - synchronous message handling
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/pci.h> /* for scatterlist macros */
@@ -918,7 +918,8 @@ int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
/**
* usb_get_status - issues a GET_STATUS call
* @dev: the device whose status is being checked
- * @type: USB_RECIP_*; for device, interface, or endpoint
+ * @recip: USB_RECIP_*; for device, interface, or endpoint
+ * @type: USB_STATUS_TYPE_*; for standard or PTM status types
* @target: zero (for device), else interface or endpoint number
* @data: pointer to two bytes of bitmap data
* Context: !in_interrupt ()
@@ -937,24 +938,58 @@ int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
* Returns 0 and the status value in *@data (in host byte order) on success,
* or else the status code from the underlying usb_control_msg() call.
*/
-int usb_get_status(struct usb_device *dev, int type, int target, void *data)
+int usb_get_status(struct usb_device *dev, int recip, int type, int target,
+ void *data)
{
int ret;
- __le16 *status = kmalloc(sizeof(*status), GFP_KERNEL);
+ void *status;
+ int length;
+
+ switch (type) {
+ case USB_STATUS_TYPE_STANDARD:
+ length = 2;
+ break;
+ case USB_STATUS_TYPE_PTM:
+ if (recip != USB_RECIP_DEVICE)
+ return -EINVAL;
+ length = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status = kmalloc(length, GFP_KERNEL);
if (!status)
return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- USB_REQ_GET_STATUS, USB_DIR_IN | type, 0, target, status,
- sizeof(*status), USB_CTRL_GET_TIMEOUT);
+ USB_REQ_GET_STATUS, USB_DIR_IN | recip, USB_STATUS_TYPE_STANDARD,
+ target, status, length, USB_CTRL_GET_TIMEOUT);
- if (ret == 2) {
- *(u16 *) data = le16_to_cpu(*status);
+ switch (ret) {
+ case 4:
+ if (type != USB_STATUS_TYPE_PTM) {
+ ret = -EIO;
+ break;
+ }
+
+ *(u32 *) data = le32_to_cpu(*(__le32 *) status);
+ ret = 0;
+ break;
+ case 2:
+ if (type != USB_STATUS_TYPE_STANDARD) {
+ ret = -EIO;
+ break;
+ }
+
+ *(u16 *) data = le16_to_cpu(*(__le16 *) status);
ret = 0;
- } else if (ret >= 0) {
+ break;
+ default:
ret = -EIO;
}
+
kfree(status);
return ret;
}
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index b12a463a3e22..ab474b11523e 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* All the USB notify logic
*
@@ -7,7 +8,6 @@
* but fixed up to not be so broken.
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index 3863bb1ce8c5..2be968353257 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* of.c The helpers for hcd device tree support
*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Author: Peter Chen <peter.chen@freescale.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/of.h>
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index 085049d37d7a..2ae90158ded7 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/usb/core/otg_whitelist.h
*
* Copyright (C) 2004 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 460c855be0d0..1a01e9ad3804 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* usb port device code
*
* Copyright (C) 2012 Intel Corp
*
* Author: Lan Tianyu <tianyu.lan@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
*/
#include <linux/slab.h>
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a6aaf2f193a4..f1dbab6f798f 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB device quirk handling logic and table
*
* Copyright (c) 2007 Oliver Neukum
* Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
- *
*/
#include <linux/usb.h>
@@ -203,6 +198,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* Huawei 4G LTE module */
+ { USB_DEVICE(0x12d1, 0x15bb), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+ { USB_DEVICE(0x12d1, 0x15c3), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+
/* SKYMEDI USB_DRIVE */
{ USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -221,6 +222,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Corsair K70 LUX */
+ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* MIDI keyboard WORLDE MINI */
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index d930bfda4010..27bb34043053 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/sysfs.c
*
@@ -8,7 +9,6 @@
* All of the sysfs file attributes for usb devices and interfaces.
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
@@ -654,7 +654,8 @@ static int add_power_attributes(struct device *dev)
if (udev->usb2_hw_lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb2_hardware_lpm_attr_group);
- if (udev->speed == USB_SPEED_SUPER &&
+ if ((udev->speed == USB_SPEED_SUPER ||
+ udev->speed == USB_SPEED_SUPER_PLUS) &&
udev->lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb3_hardware_lpm_attr_group);
@@ -973,7 +974,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
char *string;
intf = to_usb_interface(dev);
- string = ACCESS_ONCE(intf->cur_altsetting->string);
+ string = READ_ONCE(intf->cur_altsetting->string);
if (!string)
return 0;
return sprintf(buf, "%s\n", string);
@@ -989,7 +990,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
intf = to_usb_interface(dev);
udev = interface_to_usbdev(intf);
- alt = ACCESS_ONCE(intf->cur_altsetting);
+ alt = READ_ONCE(intf->cur_altsetting);
return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
"ic%02Xisc%02Xip%02Xin%02X\n",
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 47903d510955..9fdf137c4865 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/module.h>
@@ -187,6 +187,31 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
/*-------------------------------------------------------------------*/
+static const int pipetypes[4] = {
+ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+};
+
+/**
+ * usb_urb_ep_type_check - sanity check of endpoint in the given urb
+ * @urb: urb to be checked
+ *
+ * This performs a light-weight sanity check for the endpoint in the
+ * given urb. It returns 0 if the urb contains a valid endpoint, otherwise
+ * a negative error code.
+ */
+int usb_urb_ep_type_check(const struct urb *urb)
+{
+ const struct usb_host_endpoint *ep;
+
+ ep = usb_pipe_endpoint(urb->dev, urb->pipe);
+ if (!ep)
+ return -EINVAL;
+ if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
+
/**
* usb_submit_urb - issue an asynchronous transfer request for an endpoint
* @urb: pointer to the urb describing the request
@@ -326,9 +351,6 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
*/
int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
{
- static int pipetypes[4] = {
- PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
- };
int xfertype, max;
struct usb_device *dev;
struct usb_host_endpoint *ep;
@@ -444,7 +466,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
*/
/* Check that the pipe's type matches the endpoint's type */
- if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
+ if (usb_urb_ep_type_check(urb))
dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
usb_pipetype(urb->pipe), pipetypes[xfertype]);
@@ -492,6 +514,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if ((urb->interval < 6)
&& (xfertype == USB_ENDPOINT_XFER_INT))
return -EINVAL;
+ /* fall through */
default:
if (urb->interval <= 0)
return -EINVAL;
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index ef9cf4a21afe..84da17460568 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB-ACPI glue code
*
* Copyright 2012 Red Hat <mjg@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
*/
#include <linux/module.h>
#include <linux/usb.h>
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 17681d5638ac..845286f08ab0 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/core/usb.c
*
@@ -13,7 +14,6 @@
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index dc6949248823..2bee08d084ae 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Released under the GPLv2 only.
- * SPDX-License-Identifier: GPL-2.0
*/
#include <linux/pm.h>
@@ -73,6 +73,7 @@ extern void usb_hub_cleanup(void);
extern int usb_major_init(void);
extern void usb_major_cleanup(void);
extern int usb_device_supports_lpm(struct usb_device *udev);
+extern int usb_port_disable(struct usb_device *udev);
#ifdef CONFIG_PM
diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile
index b9237e1e45d0..440320cc20a4 100644
--- a/drivers/usb/dwc2/Makefile
+++ b/drivers/usb/dwc2/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_USB_DWC2_DEBUG) += -DDEBUG
ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 1b6612c2cdda..82a7d98c3436 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* core.c - DesignWare HS OTG Controller common routines
*
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 8367d4f985c1..f66c94130cac 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* core.h - DesignWare HS OTG Controller common declarations
*
@@ -395,6 +396,9 @@ enum dwc2_ep0_state {
* (default when phy_type is UTMI+ or ULPI)
* 1 - 6 MHz
* (default when phy_type is Full Speed)
+ * @oc_disable: Flag to disable overcurrent condition.
+ * 0 - Allow overcurrent condition to get detected
+ * 1 - Disable overcurrent condtion to get detected
* @ts_dline: Enable Term Select Dline pulsing
* 0 - No (default)
* 1 - Yes
@@ -492,6 +496,7 @@ struct dwc2_core_params {
bool dma_desc_fs_enable;
bool host_support_fs_ls_low_power;
bool host_ls_low_power_phy_clk;
+ bool oc_disable;
u8 host_channels;
u16 host_rx_fifo_size;
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index b8bcb007c92a..ab3fa1630853 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* core_intr.c - DesignWare HS OTG Controller common interrupt handling
*
diff --git a/drivers/usb/dwc2/debug.h b/drivers/usb/dwc2/debug.h
index 8222783e6822..6f23219c13cb 100644
--- a/drivers/usb/dwc2/debug.h
+++ b/drivers/usb/dwc2/debug.h
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* debug.h - Designware USB2 DRD controller debug header
*
* Copyright (C) 2015 Intel Corporation
* Mian Yousaf Kaukab <yousaf.kaukab@intel.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "core.h"
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 794b959a7c8c..f4650a88be78 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* debugfs.c - Designware USB2 DRD controller debugfs
*
* Copyright (C) 2015 Intel Corporation
* Mian Yousaf Kaukab <yousaf.kaukab@intel.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/spinlock.h>
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 0d8e09ccb59c..88529d092503 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -8,10 +9,6 @@
* http://armlinux.simtec.co.uk/
*
* S3C USB2.0 High-speed / OtG driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -3202,6 +3199,8 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
call_gadget(hsotg, disconnect);
hsotg->lx_state = DWC2_L3;
+
+ usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
}
/**
@@ -4004,6 +4003,11 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
return -EINVAL;
}
+ if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
+ dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
+ return -EINVAL;
+ }
+
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index c2631145f404..7b6eb0ad513b 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd.c - DesignWare HS OTG Controller host-mode routines
*
@@ -213,6 +214,11 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
if (hsotg->params.phy_ulpi_ddr)
usbcfg |= GUSBCFG_DDRSEL;
+
+ /* Set external VBUS indicator as needed. */
+ if (hsotg->params.oc_disable)
+ usbcfg |= (GUSBCFG_ULPI_INT_VBUS_IND |
+ GUSBCFG_INDICATORPASSTHROUGH);
break;
case DWC2_PHY_TYPE_PARAM_UTMI:
/* UTMI+ interface */
@@ -3277,7 +3283,6 @@ static void dwc2_conn_id_status_change(struct work_struct *work)
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
spin_lock_irqsave(&hsotg->lock, flags);
- dwc2_hsotg_disconnect(hsotg);
dwc2_hsotg_core_init_disconnected(hsotg, false);
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hsotg_core_connect(hsotg);
@@ -3296,8 +3301,12 @@ host:
if (count > 250)
dev_err(hsotg->dev,
"Connection id status change timed out\n");
- hsotg->op_state = OTG_STATE_A_HOST;
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_hsotg_disconnect(hsotg);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ hsotg->op_state = OTG_STATE_A_HOST;
/* Initialize the Core for Host mode */
dwc2_core_init(hsotg, false);
dwc2_enable_global_interrupts(hsotg);
@@ -3305,9 +3314,9 @@ host:
}
}
-static void dwc2_wakeup_detected(unsigned long data)
+static void dwc2_wakeup_detected(struct timer_list *t)
{
- struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
+ struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
u32 hprt0;
dev_dbg(hsotg->dev, "%s()\n", __func__);
@@ -5146,8 +5155,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
}
INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
- setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
- (unsigned long)hsotg);
+ timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
/* Initialize the non-periodic schedule */
INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 11c3c145b793..78e9e01051b5 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd.h - DesignWare HS OTG Controller host-mode declarations
*
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index b8bdf545c3a7..28c8898b3b66 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
*
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 28a8210710b1..916d991b96b8 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
*
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 3ae8b1bbaa55..fcd1676c7f0b 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hcd_queue.c - DesignWare HS OTG Controller host queuing routines
*
@@ -1274,9 +1275,9 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
*
* @work: Pointer to a qh unreserve_work.
*/
-static void dwc2_unreserve_timer_fn(unsigned long data)
+static void dwc2_unreserve_timer_fn(struct timer_list *t)
{
- struct dwc2_qh *qh = (struct dwc2_qh *)data;
+ struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
struct dwc2_hsotg *hsotg = qh->hsotg;
unsigned long flags;
@@ -1466,8 +1467,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
/* Initialize QH */
qh->hsotg = hsotg;
- setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
- (unsigned long)qh);
+ timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 4592012c4743..2c906d8ee465 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* hw.h - DesignWare HS OTG Controller hardware definitions
*
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index a3ffe97170ff..ef73af6e03a9 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright (C) 2004-2016 Synopsys, Inc.
*
@@ -136,6 +137,15 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
p->activate_stm_fs_transceiver = true;
}
+static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->host_rx_fifo_size = 622;
+ p->host_nperio_tx_fifo_size = 128;
+ p->host_perio_tx_fifo_size = 256;
+}
+
const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params },
{ .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params },
@@ -154,6 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "st,stm32f4x9-fsotg",
.data = dwc2_set_stm32f4x9_fsotg_params },
{ .compatible = "st,stm32f4x9-hsotg" },
+ { .compatible = "st,stm32f7xx-hsotg",
+ .data = dwc2_set_stm32f7xx_hsotg_params },
{},
};
MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
@@ -335,6 +347,9 @@ static void dwc2_get_device_properties(struct dwc2_hsotg *hsotg)
num);
}
}
+
+ if (of_find_property(hsotg->dev->of_node, "disable-over-current", NULL))
+ p->oc_disable = true;
}
static void dwc2_check_param_otg_cap(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index fdeb8c7bf30a..3ecc951a1aea 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* pci.c - DesignWare HS OTG Controller PCI driver
*
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index daf0d37acb37..3e26550d13dd 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* platform.c - DesignWare HS OTG Controller platform driver
*
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index f15fabbd1e59..7ac725038f8d 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# define_trace.h needs to know how to find our header
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 03474d3575ab..07832509584f 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* core.c - DesignWare USB3 DRD Controller Core file
*
@@ -5,18 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/version.h>
@@ -156,9 +145,8 @@ static void __dwc3_set_mode(struct work_struct *work)
} else {
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
- if (dwc->usb2_generic_phy)
- phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
-
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
}
break;
case DWC3_GCTL_PRTCAP_DEVICE:
@@ -166,8 +154,8 @@ static void __dwc3_set_mode(struct work_struct *work)
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
- if (dwc->usb2_generic_phy)
- phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret)
@@ -927,12 +915,13 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
+ dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
- if (dwc->usb2_generic_phy)
- phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
@@ -942,12 +931,13 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
}
break;
case USB_DR_MODE_HOST:
+ dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
- if (dwc->usb2_generic_phy)
- phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
ret = dwc3_host_init(dwc);
if (ret) {
@@ -1293,21 +1283,19 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
{
unsigned long flags;
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
+ dwc3_core_exit(dwc);
break;
- case USB_DR_MODE_HOST:
+ case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
}
- dwc3_core_exit(dwc);
-
return 0;
}
@@ -1316,18 +1304,17 @@ static int dwc3_resume_common(struct dwc3 *dwc)
unsigned long flags;
int ret;
- ret = dwc3_core_init(dwc);
- if (ret)
- return ret;
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ ret = dwc3_core_init(dwc);
+ if (ret)
+ return ret;
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
- /* FALLTHROUGH */
- case USB_DR_MODE_HOST:
+ break;
+ case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
@@ -1338,7 +1325,7 @@ static int dwc3_resume_common(struct dwc3 *dwc)
static int dwc3_runtime_checks(struct dwc3 *dwc)
{
- switch (dwc->dr_mode) {
+ switch (dwc->current_dr_role) {
case USB_DR_MODE_PERIPHERAL:
case USB_DR_MODE_OTG:
if (dwc->connected)
@@ -1381,19 +1368,17 @@ static int dwc3_runtime_resume(struct device *dev)
if (ret)
return ret;
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
dwc3_gadget_process_pending_events(dwc);
break;
- case USB_DR_MODE_HOST:
+ case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
}
pm_runtime_mark_last_busy(dev);
- pm_runtime_put(dev);
return 0;
}
@@ -1402,13 +1387,12 @@ static int dwc3_runtime_idle(struct device *dev)
{
struct dwc3 *dwc = dev_get_drvdata(dev);
- switch (dwc->dr_mode) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
if (dwc3_runtime_checks(dwc))
return -EBUSY;
break;
- case USB_DR_MODE_HOST:
+ case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index ea910acb4bb0..4a4a4c98508c 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* core.h - DesignWare USB3 DRD Core Header
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DRIVERS_USB_DWC3_CORE_H
@@ -529,6 +521,7 @@ struct dwc3_event_buffer {
* @number: endpoint number (1 - 15)
* @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
* @resource_index: Resource transfer index
+ * @frame_number: set to the frame number we want this transfer to start (ISOC)
* @interval: the interval on which the ISOC transfer is started
* @allocated_requests: number of requests allocated
* @queued_requests: number of requests queued for transfer
@@ -581,6 +574,7 @@ struct dwc3_ep {
u8 resource_index;
u32 allocated_requests;
u32 queued_requests;
+ u32 frame_number;
u32 interval;
char name[20];
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 5e9c070ec874..368f8e59219a 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* debug.h - DesignWare USB3 DRD Controller Debug Header
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DWC3_DEBUG_H
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 4e09be80e59f..00e65530c81e 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* debugfs.c - DesignWare USB3 DRD Controller DebugFS file
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 2765c51c7ef5..cc8ab9a8e9d2 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -1,21 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* drd.c - DesignWare USB3 DRD Controller Dual-role support
*
* Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com
*
* Authors: Roger Quadros <rogerq@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/extcon.h>
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index e089df72f766..a94fb1ba8f2c 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-exynos.c - Samsung EXYNOS DWC3 Specific Glue layer
*
@@ -5,15 +6,6 @@
* http://www.samsung.com
*
* Author: Anton Tikhomirov <av.tikhomirov@samsung.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index d2ed9523e77c..193a9a88222a 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-keystone.c - Keystone Specific Glue layer
*
* Copyright (C) 2010-2013 Texas Instruments Incorporated - http://www.ti.com
*
* Author: WingMan Kwok <w-kwok2@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index a26d1fde0f5e..c4a4d7bd2766 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-of-simple.c - OF glue layer for simple integrations
*
@@ -5,15 +6,6 @@
*
* Author: Felipe Balbi <balbi@ti.com>
*
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This is a combination of the old dwc3-qcom.c by Ivan T. Ivanov
* <iivanov@mm-sol.com> and the original patch adding support for Xilinx' SoC
* by Subbaraya Sundeep Bhatta <subbaraya.sundeep.bhatta@xilinx.com>
@@ -28,11 +20,13 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
struct dwc3_of_simple {
struct device *dev;
struct clk **clks;
int num_clocks;
+ struct reset_control *resets;
};
static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
@@ -95,10 +89,21 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, simple);
simple->dev = dev;
+ simple->resets = of_reset_control_array_get_optional_exclusive(np);
+ if (IS_ERR(simple->resets)) {
+ ret = PTR_ERR(simple->resets);
+ dev_err(dev, "failed to get device resets, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(simple->resets);
+ if (ret)
+ goto err_resetc_put;
+
ret = dwc3_of_simple_clk_init(simple, of_count_phandle_with_args(np,
"clocks", "#clock-cells"));
if (ret)
- return ret;
+ goto err_resetc_assert;
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
@@ -107,7 +112,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
clk_put(simple->clks[i]);
}
- return ret;
+ goto err_resetc_assert;
}
pm_runtime_set_active(dev);
@@ -115,6 +120,13 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
pm_runtime_get_sync(dev);
return 0;
+
+err_resetc_assert:
+ reset_control_assert(simple->resets);
+
+err_resetc_put:
+ reset_control_put(simple->resets);
+ return ret;
}
static int dwc3_of_simple_remove(struct platform_device *pdev)
@@ -123,12 +135,15 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int i;
+ of_platform_depopulate(dev);
+
for (i = 0; i < simple->num_clocks; i++) {
clk_disable_unprepare(simple->clks[i]);
clk_put(simple->clks[i]);
}
- of_platform_depopulate(dev);
+ reset_control_assert(simple->resets);
+ reset_control_put(simple->resets);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 3530795bbb8f..a4719e853b85 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-omap.c - OMAP Specific Glue layer
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 54343fbd85ee..3ba11136ebf0 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* dwc3-pci.c - PCI Specific glue layer
*
@@ -5,21 +6,13 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
+#include <linux/workqueue.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
@@ -61,6 +54,7 @@ struct dwc3_pci {
guid_t guid;
unsigned int has_dsm_for_pm:1;
+ struct work_struct wakeup_work;
};
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
@@ -174,6 +168,22 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
return 0;
}
+#ifdef CONFIG_PM
+static void dwc3_pci_resume_work(struct work_struct *work)
+{
+ struct dwc3_pci *dwc = container_of(work, struct dwc3_pci, wakeup_work);
+ struct platform_device *dwc3 = dwc->dwc3;
+ int ret;
+
+ ret = pm_runtime_get_sync(&dwc3->dev);
+ if (ret)
+ return;
+
+ pm_runtime_mark_last_busy(&dwc3->dev);
+ pm_runtime_put_sync_autosuspend(&dwc3->dev);
+}
+#endif
+
static int dwc3_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
@@ -232,6 +242,9 @@ static int dwc3_pci_probe(struct pci_dev *pci,
device_init_wakeup(dev, true);
pci_set_drvdata(pci, dwc);
pm_runtime_put(dev);
+#ifdef CONFIG_PM
+ INIT_WORK(&dwc->wakeup_work, dwc3_pci_resume_work);
+#endif
return 0;
err:
@@ -243,6 +256,9 @@ static void dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *dwc = pci_get_drvdata(pci);
+#ifdef CONFIG_PM
+ cancel_work_sync(&dwc->wakeup_work);
+#endif
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
platform_device_unregister(dwc->dwc3);
@@ -318,14 +334,15 @@ static int dwc3_pci_runtime_suspend(struct device *dev)
static int dwc3_pci_runtime_resume(struct device *dev)
{
struct dwc3_pci *dwc = dev_get_drvdata(dev);
- struct platform_device *dwc3 = dwc->dwc3;
int ret;
ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
if (ret)
return ret;
- return pm_runtime_get(&dwc3->dev);
+ queue_work(pm_wq, &dwc->wakeup_work);
+
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 505676fd3ba4..16081383c401 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/**
* dwc3-st.c Support for dwc3 platform devices on ST Microelectronics platforms
*
@@ -10,11 +11,6 @@
* Contributors: Aymen Bouattay <aymen.bouattay@st.com>
* Peter Griffin <peter.griffin@linaro.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Inspired by dwc3-omap.c and dwc3-exynos.c.
*/
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 75e6cb044eb2..fd3e7ad2eb0e 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -487,14 +479,10 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc,
static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
- enum usb_device_state state;
u32 wValue;
- u32 wIndex;
int ret = 0;
wValue = le16_to_cpu(ctrl->wValue);
- wIndex = le16_to_cpu(ctrl->wIndex);
- state = dwc->gadget.state;
switch (wValue) {
case USB_INTRF_FUNC_SUSPEND:
@@ -517,14 +505,10 @@ static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
struct dwc3_ep *dep;
- enum usb_device_state state;
u32 wValue;
- u32 wIndex;
int ret;
wValue = le16_to_cpu(ctrl->wValue);
- wIndex = le16_to_cpu(ctrl->wIndex);
- state = dwc->gadget.state;
switch (wValue) {
case USB_ENDPOINT_HALT:
@@ -551,10 +535,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
{
u32 recip;
int ret;
- enum usb_device_state state;
recip = ctrl->bRequestType & USB_RECIP_MASK;
- state = dwc->gadget.state;
switch (recip) {
case USB_RECIP_DEVICE:
@@ -712,12 +694,10 @@ static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
struct dwc3_ep *dep;
enum usb_device_state state = dwc->gadget.state;
u16 wLength;
- u16 wValue;
if (state == USB_STATE_DEFAULT)
return -EINVAL;
- wValue = le16_to_cpu(ctrl->wValue);
wLength = le16_to_cpu(ctrl->wLength);
if (wLength != 6) {
@@ -842,9 +822,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
struct usb_request *ur;
struct dwc3_trb *trb;
struct dwc3_ep *ep0;
- unsigned maxp;
- unsigned remaining_ur_length;
- void *buf;
u32 transferred = 0;
u32 status;
u32 length;
@@ -871,11 +848,8 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
}
ur = &r->request;
- buf = ur->buf;
- remaining_ur_length = ur->length;
length = trb->size & DWC3_TRB_SIZE_MASK;
- maxp = ep0->endpoint.maxpacket;
transferred = ur->length - length;
ur->actual += transferred;
@@ -1001,7 +975,6 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
} else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
req->request.length && req->request.zero) {
u32 maxpacket;
- u32 rem;
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
@@ -1009,7 +982,6 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
return;
maxpacket = dep->endpoint.maxpacket;
- rem = req->request.length % maxpacket;
/* prepare normal TRB */
dwc3_ep0_prepare_one_trb(dep, req->request.dma,
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f064f1549333..981fd986cf82 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -1151,9 +1143,6 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
- if (!dwc3_calc_trbs_left(dep))
- return;
-
/*
* We can get in a situation where there's a request in the started list
* but there weren't enough TRBs to fully kick it in the first time
@@ -1194,7 +1183,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
}
}
-static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
+static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_request *req;
@@ -1202,6 +1191,9 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
int ret;
u32 cmd;
+ if (!dwc3_calc_trbs_left(dep))
+ return 0;
+
starting = !(dep->flags & DWC3_EP_BUSY);
dwc3_prepare_trbs(dep);
@@ -1216,8 +1208,10 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
if (starting) {
params.param0 = upper_32_bits(req->trb_dma);
params.param1 = lower_32_bits(req->trb_dma);
- cmd = DWC3_DEPCMD_STARTTRANSFER |
- DWC3_DEPCMD_PARAM(cmd_param);
+ cmd = DWC3_DEPCMD_STARTTRANSFER;
+
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
} else {
cmd = DWC3_DEPCMD_UPDATETRANSFER |
DWC3_DEPCMD_PARAM(dep->resource_index);
@@ -1258,8 +1252,6 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
struct dwc3_ep *dep, u32 cur_uf)
{
- u32 uf;
-
if (list_empty(&dep->pending_list)) {
dev_info(dwc->dev, "%s: ran out of requests\n",
dep->name);
@@ -1271,9 +1263,8 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
* Schedule the first trb for one interval in the future or at
* least 4 microframes.
*/
- uf = cur_uf + max_t(u32, 4, dep->interval);
-
- __dwc3_gadget_kick_transfer(dep, uf);
+ dep->frame_number = cur_uf + max_t(u32, 4, dep->interval);
+ __dwc3_gadget_kick_transfer(dep);
}
static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
@@ -1290,7 +1281,6 @@ static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
- int ret = 0;
if (!dep->endpoint.desc) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
@@ -1337,24 +1327,14 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
}
if ((dep->flags & DWC3_EP_BUSY) &&
- !(dep->flags & DWC3_EP_MISSED_ISOC)) {
- WARN_ON_ONCE(!dep->resource_index);
- ret = __dwc3_gadget_kick_transfer(dep,
- dep->resource_index);
- }
-
- goto out;
- }
+ !(dep->flags & DWC3_EP_MISSED_ISOC))
+ goto out;
- if (!dwc3_calc_trbs_left(dep))
return 0;
+ }
- ret = __dwc3_gadget_kick_transfer(dep, 0);
out:
- if (ret == -EBUSY)
- ret = 0;
-
- return ret;
+ return __dwc3_gadget_kick_transfer(dep);
}
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
@@ -2347,7 +2327,7 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
req->request.actual = length - req->remaining;
if ((req->request.actual < length) && req->num_pending_sgs)
- return __dwc3_gadget_kick_transfer(dep, 0);
+ return __dwc3_gadget_kick_transfer(dep);
dwc3_gadget_giveback(dep, req, status);
@@ -2440,13 +2420,8 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
if (!dep->endpoint.desc)
return;
- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- int ret;
-
- ret = __dwc3_gadget_kick_transfer(dep, 0);
- if (!ret || ret == -EBUSY)
- return;
- }
+ if (!usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ __dwc3_gadget_kick_transfer(dep);
}
static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
@@ -2487,15 +2462,10 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dwc3_endpoint_transfer_complete(dwc, dep, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
dwc3_gadget_start_isoc(dwc, dep, event);
- } else {
- int ret;
-
- ret = __dwc3_gadget_kick_transfer(dep, 0);
- if (!ret || ret == -EBUSY)
- return;
- }
+ else
+ __dwc3_gadget_kick_transfer(dep);
break;
case DWC3_DEPEVT_STREAMEVT:
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 4a3227543255..578aa856f986 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gadget.h - DesignWare USB3 DRD Gadget Header
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DRIVERS_USB_DWC3_GADGET_H
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 76f0b0df37c1..1a3878a3be78 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* host.c - DesignWare USB3 DRD Controller Host Glue
*
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
*
* Authors: Felipe Balbi <balbi@ti.com>,
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/platform_device.h>
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index c69b06696824..70acdf94a0bf 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* io.h - DesignWare USB3 DRD IO Header
*
@@ -5,15 +6,6 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DRIVERS_USB_DWC3_IO_H
diff --git a/drivers/usb/dwc3/trace.c b/drivers/usb/dwc3/trace.c
index 6cd166412ad0..f8886f3f3c9e 100644
--- a/drivers/usb/dwc3/trace.c
+++ b/drivers/usb/dwc3/trace.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* trace.c - DesignWare USB3 DRD Controller Trace Support
*
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 6504b116da04..a9dd5c64e6c7 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* trace.h - DesignWare USB3 DRD Controller Trace Support
*
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index e87ce8e9edee..f62b5f3c2d67 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* ulpi.c - DesignWare USB3 Controller's ULPI PHY interface
*
* Copyright (C) 2015 Intel Corporation
*
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/ulpi/regs.h>
diff --git a/drivers/usb/early/Makefile b/drivers/usb/early/Makefile
index fcde2286da1c..7b77b49d3b8c 100644
--- a/drivers/usb/early/Makefile
+++ b/drivers/usb/early/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for early USB devices
#
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index e2654443e8eb..d633c2abe5a4 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Standalone EHCI usb debug driver
*
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 12fe70beae69..8a700b45b9a9 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* xhci-dbc.c - xHCI debug capability early driver
*
* Copyright (C) 2016 Intel Corporation
*
* Author: Lu Baolu <baolu.lu@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
index 2df0f6e613fe..673686eeddd7 100644
--- a/drivers/usb/early/xhci-dbc.h
+++ b/drivers/usb/early/xhci-dbc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xhci-dbc.h - xHCI debug capability early driver
*
* Copyright (C) 2016 Intel Corporation
*
* Author: Lu Baolu <baolu.lu@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_XHCI_DBC_H
@@ -90,8 +87,8 @@ struct xdbc_context {
#define XDBC_INFO_CONTEXT_SIZE 48
#define XDBC_MAX_STRING_LENGTH 64
-#define XDBC_STRING_MANUFACTURER "Linux"
-#define XDBC_STRING_PRODUCT "Remote GDB"
+#define XDBC_STRING_MANUFACTURER "Linux Foundation"
+#define XDBC_STRING_PRODUCT "Linux USB GDB Target"
#define XDBC_STRING_SERIAL "0001"
struct xdbc_strings {
@@ -103,7 +100,7 @@ struct xdbc_strings {
#define XDBC_PROTOCOL 1 /* GNU Remote Debug Command Set */
#define XDBC_VENDOR_ID 0x1d6b /* Linux Foundation 0x1d6b */
-#define XDBC_PRODUCT_ID 0x0004 /* __le16 idProduct; device 0004 */
+#define XDBC_PRODUCT_ID 0x0011 /* __le16 idProduct; device 0011 */
#define XDBC_DEVICE_REV 0x0010 /* 0.10 */
/*
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 598a67d6ba05..130dad7130b6 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB peripheral controller drivers
#
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5d061b3d8224..eec14e6ed20b 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* composite.c - infrastructure for Composite USB Gadgets
*
* Copyright (C) 2006-2008 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -170,20 +166,20 @@ int config_ep_by_speed(struct usb_gadget *g,
want_comp_desc = 1;
break;
}
- /* else: Fall trough */
+ /* fall through */
case USB_SPEED_SUPER:
if (gadget_is_superspeed(g)) {
speed_desc = f->ss_descriptors;
want_comp_desc = 1;
break;
}
- /* else: Fall trough */
+ /* fall through */
case USB_SPEED_HIGH:
if (gadget_is_dualspeed(g)) {
speed_desc = f->hs_descriptors;
break;
}
- /* else: fall through */
+ /* fall through */
default:
speed_desc = f->fs_descriptors;
}
@@ -224,6 +220,7 @@ ep_found:
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
_ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
+ /* fall through */
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
_ep->maxburst = comp_desc->bMaxBurst + 1;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 17a6077b89a4..2d115353424c 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* usb/gadget/config.c -- simplify building config descriptors
*
* Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/errno.h>
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index aeb9f3c40521..efba66ca0719 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/configfs.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -505,13 +506,13 @@ static struct configfs_attribute *gadget_config_attrs[] = {
NULL,
};
-static struct config_item_type gadget_config_type = {
+static const struct config_item_type gadget_config_type = {
.ct_item_ops = &gadget_config_item_ops,
.ct_attrs = gadget_config_attrs,
.ct_owner = THIS_MODULE,
};
-static struct config_item_type gadget_root_type = {
+static const struct config_item_type gadget_root_type = {
.ct_item_ops = &gadget_root_item_ops,
.ct_attrs = gadget_root_attrs,
.ct_owner = THIS_MODULE,
@@ -593,7 +594,7 @@ static struct configfs_group_operations functions_ops = {
.drop_item = &function_drop,
};
-static struct config_item_type functions_type = {
+static const struct config_item_type functions_type = {
.ct_group_ops = &functions_ops,
.ct_owner = THIS_MODULE,
};
@@ -694,7 +695,7 @@ static struct configfs_group_operations config_desc_ops = {
.drop_item = &config_desc_drop,
};
-static struct config_item_type config_desc_type = {
+static const struct config_item_type config_desc_type = {
.ct_group_ops = &config_desc_ops,
.ct_owner = THIS_MODULE,
};
@@ -1476,7 +1477,7 @@ static struct configfs_group_operations gadgets_ops = {
.drop_item = &gadgets_drop,
};
-static struct config_item_type gadgets_type = {
+static const struct config_item_type gadgets_type = {
.ct_group_ops = &gadgets_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
index 540d5e92ed22..3b6f5298b2e8 100644
--- a/drivers/usb/gadget/configfs.h
+++ b/drivers/usb/gadget/configfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef USB__GADGET__CONFIGFS__H
#define USB__GADGET__CONFIGFS__H
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 30fdab0ae383..71b15c65b90f 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* epautoconf.c -- endpoint autoconfiguration for usb gadget drivers
*
* Copyright (C) 2004 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 86e825269947..5d3a6cf02218 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB peripheral controller drivers
#
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 5e3828d9dac7..9fc98de83624 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_acm.c -- USB CDC serial (ACM) function driver
*
@@ -6,10 +7,6 @@
* Copyright (C) 2008 by Nokia Corporation
* Copyright (C) 2009 by Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -786,7 +783,7 @@ static struct configfs_attribute *acm_attrs[] = {
NULL,
};
-static struct config_item_type acm_func_type = {
+static const struct config_item_type acm_func_type = {
.ct_item_ops = &acm_item_ops,
.ct_attrs = acm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 4c488d15b6f6..b104ed0c1ab5 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_ecm.c -- USB CDC Ethernet (ECM) link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -845,7 +841,7 @@ static struct configfs_attribute *ecm_attrs[] = {
NULL,
};
-static struct config_item_type ecm_func_type = {
+static const struct config_item_type ecm_func_type = {
.ct_item_ops = &ecm_item_ops,
.ct_attrs = ecm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index 007ec6e4a5d4..37557651b600 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_eem.c -- USB CDC Ethernet (EEM) link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 EF Johnson Technologies
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -556,7 +552,7 @@ static struct configfs_attribute *eem_attrs[] = {
NULL,
};
-static struct config_item_type eem_func_type = {
+static const struct config_item_type eem_func_type = {
.ct_item_ops = &eem_item_ops,
.ct_attrs = eem_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8b342587f8ad..97ea059a7aa4 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_fs.c -- user mode file system API for USB composite function controllers
*
@@ -7,11 +8,6 @@
* Based on inode.c (GadgetFS) which was:
* Copyright (C) 2003-2004 David Brownell
* Copyright (C) 2003 Agilent Technologies
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
@@ -3385,7 +3381,7 @@ static struct configfs_item_operations ffs_item_ops = {
.release = ffs_attr_release,
};
-static struct config_item_type ffs_func_type = {
+static const struct config_item_type ffs_func_type = {
.ct_item_ops = &ffs_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -3677,6 +3673,7 @@ static void ffs_closed(struct ffs_data *ffs)
goto done;
ffs_obj->desc_ready = false;
+ ffs_obj->ffs_data = NULL;
if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
ffs_obj->ffs_closed_callback)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index d8e359ef6eb1..daae35318a3a 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_hid.c -- USB HID function driver
*
* Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -992,7 +988,7 @@ static struct configfs_attribute *hid_attrs[] = {
NULL,
};
-static struct config_item_type hid_func_type = {
+static const struct config_item_type hid_func_type = {
.ct_item_ops = &hidg_item_ops,
.ct_attrs = hid_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index e70093835e14..1803646b3678 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_loopback.c - USB peripheral loopback configuration driver
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -556,7 +552,7 @@ static struct configfs_attribute *lb_attrs[] = {
NULL,
};
-static struct config_item_type lb_func_type = {
+static const struct config_item_type lb_func_type = {
.ct_item_ops = &lb_item_ops,
.ct_attrs = lb_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 5153e29870c3..acecd13dcbd9 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* f_mass_storage.c -- Mass Storage USB Composite Function
*
@@ -3140,7 +3141,7 @@ static struct configfs_attribute *fsg_lun_attrs[] = {
NULL,
};
-static struct config_item_type fsg_lun_type = {
+static const struct config_item_type fsg_lun_type = {
.ct_item_ops = &fsg_lun_item_ops,
.ct_attrs = fsg_lun_attrs,
.ct_owner = THIS_MODULE,
@@ -3331,7 +3332,7 @@ static struct configfs_group_operations fsg_group_ops = {
.drop_item = fsg_lun_drop,
};
-static struct config_item_type fsg_func_type = {
+static const struct config_item_type fsg_func_type = {
.ct_item_ops = &fsg_item_ops,
.ct_group_ops = &fsg_group_ops,
.ct_attrs = fsg_attrs,
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index dc05ca0c4359..58857fcf199f 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef USB_F_MASS_STORAGE_H
#define USB_F_MASS_STORAGE_H
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 5d3d7941d2c2..4eb96b91cc40 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_midi.c -- USB MIDI class function driver
*
@@ -15,8 +16,6 @@
* and drivers/usb/gadget/midi.c,
* Copyright (C) 2006 Thumtronics Pty Ltd.
* Ben Williamson <ben.williamson@greyinnovation.com>
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/kernel.h>
@@ -1189,7 +1188,7 @@ static struct configfs_attribute *midi_attrs[] = {
NULL,
};
-static struct config_item_type midi_func_type = {
+static const struct config_item_type midi_func_type = {
.ct_item_ops = &midi_item_ops,
.ct_attrs = midi_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 45b334ceaf2e..c5bce8e22983 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_ncm.c -- USB CDC Network (NCM) link function driver
*
@@ -8,11 +9,6 @@
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -1568,7 +1564,7 @@ static struct configfs_attribute *ncm_attrs[] = {
NULL,
};
-static struct config_item_type ncm_func_type = {
+static const struct config_item_type ncm_func_type = {
.ct_item_ops = &ncm_item_ops,
.ct_attrs = ncm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index d43e86cea74f..55b7f57d2dc7 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_obex.c -- USB CDC OBEX function driver
*
@@ -5,11 +6,6 @@
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
*
* Based on f_acm.c by Al Borchers and David Brownell.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -411,7 +407,7 @@ static struct configfs_attribute *acm_attrs[] = {
NULL,
};
-static struct config_item_type obex_func_type = {
+static const struct config_item_type obex_func_type = {
.ct_item_ops = &obex_item_ops,
.ct_attrs = acm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 9c4c58e4a1a2..7889bcc0509a 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* f_phonet.c -- USB CDC Phonet function
*
* Copyright (C) 2007-2008 Nokia Corporation. All rights reserved.
*
* Author: Rémi Denis-Courmont
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/mm.h>
@@ -215,6 +212,7 @@ static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
case -ESHUTDOWN: /* disconnected */
case -ECONNRESET: /* disabled */
dev->stats.tx_aborted_errors++;
+ /* fall through */
default:
dev->stats.tx_errors++;
}
@@ -362,6 +360,7 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
/* Do resubmit in these cases: */
case -EOVERFLOW: /* request buffer overflow */
dev->stats.rx_over_errors++;
+ /* fall through */
default:
dev->stats.rx_errors++;
break;
@@ -599,7 +598,7 @@ static struct configfs_attribute *phonet_attrs[] = {
NULL,
};
-static struct config_item_type phonet_func_type = {
+static const struct config_item_type phonet_func_type = {
.ct_item_ops = &phonet_item_ops,
.ct_attrs = phonet_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index ea0da35a44e2..dd607b99eb1d 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_printer.c - USB printer function driver
*
@@ -8,11 +9,6 @@
*
* Copyright (C) 2003-2005 David Brownell
* Copyright (C) 2006 Craig W. Nadler
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
@@ -1261,7 +1257,7 @@ static struct configfs_attribute *printer_attrs[] = {
NULL,
};
-static struct config_item_type printer_func_type = {
+static const struct config_item_type printer_func_type = {
.ct_item_ops = &printer_item_ops,
.ct_attrs = printer_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index c7c5b3ce1d98..d48df36622b7 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_rndis.c -- RNDIS link function driver
*
@@ -6,11 +7,6 @@
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -890,7 +886,7 @@ static struct configfs_attribute *rndis_attrs[] = {
NULL,
};
-static struct config_item_type rndis_func_type = {
+static const struct config_item_type rndis_func_type = {
.ct_item_ops = &rndis_item_ops,
.ct_attrs = rndis_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index cb00ada21d9c..c860f30a0ea2 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_serial.c - generic USB serial function driver
*
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
#include <linux/slab.h>
@@ -281,7 +278,7 @@ static struct configfs_attribute *acm_attrs[] = {
NULL,
};
-static struct config_item_type serial_func_type = {
+static const struct config_item_type serial_func_type = {
.ct_item_ops = &serial_item_ops,
.ct_attrs = acm_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 8784fa12ea2c..9cdef108fb1b 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_sourcesink.c - USB peripheral source/sink configuration driver
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -1230,7 +1226,7 @@ static struct configfs_attribute *ss_attrs[] = {
NULL,
};
-static struct config_item_type ss_func_type = {
+static const struct config_item_type ss_func_type = {
.ct_item_ops = &ss_item_ops,
.ct_attrs = ss_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
index 434b983f3b4c..4d945254905d 100644
--- a/drivers/usb/gadget/function/f_subset.c
+++ b/drivers/usb/gadget/function/f_subset.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_subset.c -- "CDC Subset" Ethernet link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/slab.h>
@@ -412,7 +408,7 @@ static struct configfs_attribute *gether_attrs[] = {
NULL,
};
-static struct config_item_type gether_func_type = {
+static const struct config_item_type gether_func_type = {
.ct_item_ops = &gether_item_ops,
.ct_attrs = gether_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index a82e2bd5ea34..da81cf16b850 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/* Target based USB-Gadget
*
* UAS protocol handling, target callbacks, configfs handling,
* BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
*
* Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
- * License: GPLv2 as published by FSF.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -1145,6 +1145,7 @@ static int usbg_submit_command(struct f_uas *fu,
default:
pr_debug_once("Unsupported prio_attr: %02x.\n",
cmd_iu->prio_attr);
+ /* fall through */
case UAS_SIMPLE_TAG:
cmd->prio_attr = TCM_SIMPLE_TAG;
break;
@@ -2166,7 +2167,7 @@ static struct configfs_item_operations tcm_item_ops = {
.release = tcm_attr_release,
};
-static struct config_item_type tcm_func_type = {
+static const struct config_item_type tcm_func_type = {
.ct_item_ops = &tcm_item_ops,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 29efbedc91f9..2746a926a8d9 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_uac1.c -- USB Audio Class 1.0 Function (using u_audio API)
*
@@ -10,11 +11,6 @@
* This file is based on f_uac1.c which is
* Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
* Copyright (C) 2008 Analog Devices, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/usb/audio.h>
@@ -709,7 +705,7 @@ static struct configfs_attribute *f_uac1_attrs[] = {
NULL,
};
-static struct config_item_type f_uac1_func_type = {
+static const struct config_item_type f_uac1_func_type = {
.ct_item_ops = &f_uac1_item_ops,
.ct_attrs = f_uac1_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c
index 5d229e72912e..04f4b2862256 100644
--- a/drivers/usb/gadget/function/f_uac1_legacy.c
+++ b/drivers/usb/gadget/function/f_uac1_legacy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_audio.c -- USB Audio class function driver
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008 Analog Devices, Inc
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/slab.h>
@@ -921,7 +920,7 @@ static struct configfs_attribute *f_uac1_attrs[] = {
NULL,
};
-static struct config_item_type f_uac1_func_type = {
+static const struct config_item_type f_uac1_func_type = {
.ct_item_ops = &f_uac1_item_ops,
.ct_attrs = f_uac1_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index f05c3f3e6103..11fe788b4308 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_uac2.c -- USB Audio Class 2.0 Function
*
* Copyright (C) 2011
* Yadwinder Singh (yadi.brar01@gmail.com)
* Jaswinder Singh (jaswinder.singh@linaro.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/usb/audio.h>
@@ -921,7 +917,7 @@ static struct configfs_attribute *f_uac2_attrs[] = {
NULL,
};
-static struct config_item_type f_uac2_func_type = {
+static const struct config_item_type f_uac2_func_type = {
.ct_item_ops = &f_uac2_item_ops,
.ct_attrs = f_uac2_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index f8a1881609a2..439eba660e95 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_gadget.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/function/f_uvc.h b/drivers/usb/gadget/function/f_uvc.h
index d0a73bdcbba1..81defe4557fe 100644
--- a/drivers/usb/gadget/function/f_uvc.h
+++ b/drivers/usb/gadget/function/f_uvc.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* f_uvc.h -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _F_UVC_H_
diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h
index 492924d0d599..98b8462ad538 100644
--- a/drivers/usb/gadget/function/g_zero.h
+++ b/drivers/usb/gadget/function/g_zero.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This header declares the utility functions used by "Gadget Zero", plus
* interfaces to its two single-configuration function drivers.
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index d6341045c631..51dd3e90b06c 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RNDIS MSG parser
*
* Authors: Benedikt Spranger, Pengutronix
* Robert Schwebel, Pengutronix
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
* This software was originally developed in conformance with
* Microsoft's Remote NDIS Specification License Agreement.
*
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 21e0430ffb98..c7e3a70ce6c1 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* RNDIS Definitions for Remote NDIS
*
* Authors: Benedikt Spranger, Pengutronix
* Robert Schwebel, Pengutronix
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
* This software was originally developed in conformance with
* Microsoft's Remote NDIS Specification License Agreement.
*/
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index 8fbf6861690d..f7e6c42558eb 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* storage_common.c -- Common definitions for mass storage functionality
*
* Copyright (C) 2003-2008 Alan Stern
* Copyeight (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index e0814a960132..e5e3a2553aaa 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef USB_STORAGE_COMMON_H
#define USB_STORAGE_COMMON_H
diff --git a/drivers/usb/gadget/function/tcm.h b/drivers/usb/gadget/function/tcm.h
index a27e6e34db0b..3cd565794ad7 100644
--- a/drivers/usb/gadget/function/tcm.h
+++ b/drivers/usb/gadget/function/tcm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __TARGET_USB_GADGET_H__
#define __TARGET_USB_GADGET_H__
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 3971bbab88bd..a72295c953bb 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_audio.c -- interface to USB gadget "ALSA sound card" utilities
*
@@ -9,16 +10,6 @@
* Copyright (C) 2011
* Yadwinder Singh (yadi.brar01@gmail.com)
* Jaswinder Singh (jaswinder.singh@linaro.org)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
index 07e13784cbb8..81d3d4ed6dfb 100644
--- a/drivers/usb/gadget/function/u_audio.h
+++ b/drivers/usb/gadget/function/u_audio.h
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_audio.h -- interface to USB gadget "ALSA sound card" utilities
*
* Copyright (C) 2016
* Author: Ruslan Bilovol <ruslan.bilovol@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __U_AUDIO_H
diff --git a/drivers/usb/gadget/function/u_ecm.h b/drivers/usb/gadget/function/u_ecm.h
index 262cc03cc2c0..050aa672ee7f 100644
--- a/drivers/usb/gadget/function/u_ecm.h
+++ b/drivers/usb/gadget/function/u_ecm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_ecm.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_ECM_H
diff --git a/drivers/usb/gadget/function/u_eem.h b/drivers/usb/gadget/function/u_eem.h
index e3ae97874c4f..de3828d3e8f0 100644
--- a/drivers/usb/gadget/function/u_eem.h
+++ b/drivers/usb/gadget/function/u_eem.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_eem.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_EEM_H
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index bdbc3fdc7c4f..6fcda62f55ea 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index c77145bd6b5b..332307d54292 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_ether.h -- interface to USB gadget "ethernet link" utilities
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __U_ETHER_H
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index e4c3f84af4c3..cd33cee4d78b 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_ether_configfs.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __U_ETHER_CONFIGFS_H
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 79f70ebf85dc..c3aba4dfa958 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_fs.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_FFS_H
diff --git a/drivers/usb/gadget/function/u_gether.h b/drivers/usb/gadget/function/u_gether.h
index d4078426ba5d..5b7e2eb90336 100644
--- a/drivers/usb/gadget/function/u_gether.h
+++ b/drivers/usb/gadget/function/u_gether.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_gether.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_GETHER_H
diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h
index aaa0e368a159..2f5ca4bfa7ff 100644
--- a/drivers/usb/gadget/function/u_hid.h
+++ b/drivers/usb/gadget/function/u_hid.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_hid.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_HID_H
diff --git a/drivers/usb/gadget/function/u_midi.h b/drivers/usb/gadget/function/u_midi.h
index 22510189758e..5599aa5fc977 100644
--- a/drivers/usb/gadget/function/u_midi.h
+++ b/drivers/usb/gadget/function/u_midi.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_midi.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_MIDI_H
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index ce0f3a78ca13..67324f983343 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_ncm.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_NCM_H
diff --git a/drivers/usb/gadget/function/u_phonet.h b/drivers/usb/gadget/function/u_phonet.h
index 98ced18779ea..12fb613f85d1 100644
--- a/drivers/usb/gadget/function/u_phonet.h
+++ b/drivers/usb/gadget/function/u_phonet.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_phonet.h - interface to Phonet
*
* Copyright (C) 2007-2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
#ifndef __U_PHONET_H
diff --git a/drivers/usb/gadget/function/u_printer.h b/drivers/usb/gadget/function/u_printer.h
index 8d30b7577f87..6088ff744194 100644
--- a/drivers/usb/gadget/function/u_printer.h
+++ b/drivers/usb/gadget/function/u_printer.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_printer.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_PRINTER_H
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index efdb7ac381d9..d65fb4ebac3c 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_rndis.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_RNDIS_H
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 4176216d54be..4d653d2960d4 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_serial.c - utilities for USB gadget "serial port"/TTY support
*
@@ -9,10 +10,6 @@
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2000 Peter Berger (pberger@brimson.com)
* Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -1078,6 +1075,7 @@ static void gs_complete_out(struct usb_ep *ep, struct usb_request *req)
default:
pr_warn("%s: unexpected %s status %d\n",
__func__, ep->name, req->status);
+ /* fall through */
case 0:
/* normal completion */
spin_lock(&info->con_lock);
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index c20210c0babd..9acaac1cbb75 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_serial.h - interface to USB gadget "serial port"/TTY utilities
*
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
#ifndef __U_SERIAL_H
diff --git a/drivers/usb/gadget/function/u_tcm.h b/drivers/usb/gadget/function/u_tcm.h
index 0bd751e0483f..3f7ccecb0f9b 100644
--- a/drivers/usb/gadget/function/u_tcm.h
+++ b/drivers/usb/gadget/function/u_tcm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_tcm.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@xxxxxxxxxxx>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_TCM_H
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index 6f188fd8633f..6f1a9d73defe 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_uac1.h - Utility definitions for UAC1 function
*
* Copyright (C) 2016 Ruslan Bilovol <ruslan.bilovol@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __U_UAC1_H
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.c b/drivers/usb/gadget/function/u_uac1_legacy.c
index fa4684a1c54c..cbc868d117af 100644
--- a/drivers/usb/gadget/function/u_uac1_legacy.c
+++ b/drivers/usb/gadget/function/u_uac1_legacy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_uac1.c -- ALSA audio utilities for Gadget stack
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008 Analog Devices, Inc
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.h b/drivers/usb/gadget/function/u_uac1_legacy.h
index d715b1af56a4..dd69e408a3d9 100644
--- a/drivers/usb/gadget/function/u_uac1_legacy.h
+++ b/drivers/usb/gadget/function/u_uac1_legacy.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008 Analog Devices, Inc
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#ifndef __U_UAC1_LEGACY_H
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index 19eeb83538a5..8362ee572e1e 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_uac2.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_UAC2_H
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 4676b60a5063..d00d3ded71c0 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_uvc.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef U_UVC_H
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 11d70dead32b..a64e07e61f8c 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_gadget.h -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _UVC_GADGET_H_
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 844cb738bafd..c9b8cc4aae5a 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uvc_configfs.c
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include "u_uvc.h"
#include "uvc_configfs.h"
@@ -127,7 +124,7 @@ static struct configfs_attribute *uvcg_control_header_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_control_header_type = {
+static const struct config_item_type uvcg_control_header_type = {
.ct_attrs = uvcg_control_header_attrs,
.ct_owner = THIS_MODULE,
};
@@ -170,7 +167,7 @@ static struct configfs_group_operations uvcg_control_header_grp_ops = {
.drop_item = uvcg_control_header_drop,
};
-static struct config_item_type uvcg_control_header_grp_type = {
+static const struct config_item_type uvcg_control_header_grp_type = {
.ct_group_ops = &uvcg_control_header_grp_ops,
.ct_owner = THIS_MODULE,
};
@@ -265,7 +262,7 @@ static struct configfs_attribute *uvcg_default_processing_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_default_processing_type = {
+static const struct config_item_type uvcg_default_processing_type = {
.ct_attrs = uvcg_default_processing_attrs,
.ct_owner = THIS_MODULE,
};
@@ -277,7 +274,7 @@ static struct uvcg_processing_grp {
struct config_group group;
} uvcg_processing_grp;
-static struct config_item_type uvcg_processing_grp_type = {
+static const struct config_item_type uvcg_processing_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -382,7 +379,7 @@ static struct configfs_attribute *uvcg_default_camera_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_default_camera_type = {
+static const struct config_item_type uvcg_default_camera_type = {
.ct_attrs = uvcg_default_camera_attrs,
.ct_owner = THIS_MODULE,
};
@@ -394,7 +391,7 @@ static struct uvcg_camera_grp {
struct config_group group;
} uvcg_camera_grp;
-static struct config_item_type uvcg_camera_grp_type = {
+static const struct config_item_type uvcg_camera_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -460,7 +457,7 @@ static struct configfs_attribute *uvcg_default_output_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_default_output_type = {
+static const struct config_item_type uvcg_default_output_type = {
.ct_attrs = uvcg_default_output_attrs,
.ct_owner = THIS_MODULE,
};
@@ -472,7 +469,7 @@ static struct uvcg_output_grp {
struct config_group group;
} uvcg_output_grp;
-static struct config_item_type uvcg_output_grp_type = {
+static const struct config_item_type uvcg_output_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -481,7 +478,7 @@ static struct uvcg_terminal_grp {
struct config_group group;
} uvcg_terminal_grp;
-static struct config_item_type uvcg_terminal_grp_type = {
+static const struct config_item_type uvcg_terminal_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -586,7 +583,7 @@ static struct configfs_item_operations uvcg_control_class_item_ops = {
.drop_link = uvcg_control_class_drop_link,
};
-static struct config_item_type uvcg_control_class_type = {
+static const struct config_item_type uvcg_control_class_type = {
.ct_item_ops = &uvcg_control_class_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -596,7 +593,7 @@ static struct uvcg_control_class_grp {
struct config_group group;
} uvcg_control_class_grp;
-static struct config_item_type uvcg_control_class_grp_type = {
+static const struct config_item_type uvcg_control_class_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -605,7 +602,7 @@ static struct uvcg_control_grp {
struct config_group group;
} uvcg_control_grp;
-static struct config_item_type uvcg_control_grp_type = {
+static const struct config_item_type uvcg_control_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -857,7 +854,7 @@ static struct configfs_attribute *uvcg_streaming_header_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_streaming_header_type = {
+static const struct config_item_type uvcg_streaming_header_type = {
.ct_item_ops = &uvcg_streaming_header_item_ops,
.ct_attrs = uvcg_streaming_header_attrs,
.ct_owner = THIS_MODULE,
@@ -901,7 +898,7 @@ static struct configfs_group_operations uvcg_streaming_header_grp_ops = {
.drop_item = uvcg_streaming_header_drop,
};
-static struct config_item_type uvcg_streaming_header_grp_type = {
+static const struct config_item_type uvcg_streaming_header_grp_type = {
.ct_group_ops = &uvcg_streaming_header_grp_ops,
.ct_owner = THIS_MODULE,
};
@@ -1150,7 +1147,7 @@ static struct configfs_attribute *uvcg_frame_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_frame_type = {
+static const struct config_item_type uvcg_frame_type = {
.ct_attrs = uvcg_frame_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1419,7 +1416,7 @@ static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_uncompressed_type = {
+static const struct config_item_type uvcg_uncompressed_type = {
.ct_group_ops = &uvcg_uncompressed_group_ops,
.ct_attrs = uvcg_uncompressed_attrs,
.ct_owner = THIS_MODULE,
@@ -1469,7 +1466,7 @@ static struct configfs_group_operations uvcg_uncompressed_grp_ops = {
.drop_item = uvcg_uncompressed_drop,
};
-static struct config_item_type uvcg_uncompressed_grp_type = {
+static const struct config_item_type uvcg_uncompressed_grp_type = {
.ct_group_ops = &uvcg_uncompressed_grp_ops,
.ct_owner = THIS_MODULE,
};
@@ -1619,7 +1616,7 @@ static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_mjpeg_type = {
+static const struct config_item_type uvcg_mjpeg_type = {
.ct_group_ops = &uvcg_mjpeg_group_ops,
.ct_attrs = uvcg_mjpeg_attrs,
.ct_owner = THIS_MODULE,
@@ -1663,7 +1660,7 @@ static struct configfs_group_operations uvcg_mjpeg_grp_ops = {
.drop_item = uvcg_mjpeg_drop,
};
-static struct config_item_type uvcg_mjpeg_grp_type = {
+static const struct config_item_type uvcg_mjpeg_grp_type = {
.ct_group_ops = &uvcg_mjpeg_grp_ops,
.ct_owner = THIS_MODULE,
};
@@ -1728,7 +1725,7 @@ static struct configfs_attribute *uvcg_default_color_matching_attrs[] = {
NULL,
};
-static struct config_item_type uvcg_default_color_matching_type = {
+static const struct config_item_type uvcg_default_color_matching_type = {
.ct_attrs = uvcg_default_color_matching_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1740,7 +1737,7 @@ static struct uvcg_color_matching_grp {
struct config_group group;
} uvcg_color_matching_grp;
-static struct config_item_type uvcg_color_matching_grp_type = {
+static const struct config_item_type uvcg_color_matching_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -2085,7 +2082,7 @@ static struct configfs_item_operations uvcg_streaming_class_item_ops = {
.drop_link = uvcg_streaming_class_drop_link,
};
-static struct config_item_type uvcg_streaming_class_type = {
+static const struct config_item_type uvcg_streaming_class_type = {
.ct_item_ops = &uvcg_streaming_class_item_ops,
.ct_owner = THIS_MODULE,
};
@@ -2095,7 +2092,7 @@ static struct uvcg_streaming_class_grp {
struct config_group group;
} uvcg_streaming_class_grp;
-static struct config_item_type uvcg_streaming_class_grp_type = {
+static const struct config_item_type uvcg_streaming_class_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -2104,7 +2101,7 @@ static struct uvcg_streaming_grp {
struct config_group group;
} uvcg_streaming_grp;
-static struct config_item_type uvcg_streaming_grp_type = {
+static const struct config_item_type uvcg_streaming_grp_type = {
.ct_owner = THIS_MODULE,
};
@@ -2190,7 +2187,7 @@ static struct configfs_attribute *uvc_attrs[] = {
NULL,
};
-static struct config_item_type uvc_func_type = {
+static const struct config_item_type uvc_func_type = {
.ct_item_ops = &uvc_item_ops,
.ct_attrs = uvc_attrs,
.ct_owner = THIS_MODULE,
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index 085e67be7c71..8549c0b27b9d 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uvc_configfs.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef UVC_CONFIGFS_H
#define UVC_CONFIGFS_H
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 6377e9fee6e5..278d50ff1eea 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_queue.c -- USB Video Class driver - Buffers management
*
* Copyright (C) 2005-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/atomic.h>
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index ac461a9a1a70..51ee94e5cf2b 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _UVC_QUEUE_H_
#define _UVC_QUEUE_H_
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 3e22b45687d3..f3069db6f08e 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_v4l2.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
@@ -354,7 +350,7 @@ static unsigned long uvcg_v4l2_get_unmapped_area(struct file *file,
}
#endif
-struct v4l2_file_operations uvc_v4l2_fops = {
+const struct v4l2_file_operations uvc_v4l2_fops = {
.owner = THIS_MODULE,
.open = uvc_v4l2_open,
.release = uvc_v4l2_release,
diff --git a/drivers/usb/gadget/function/uvc_v4l2.h b/drivers/usb/gadget/function/uvc_v4l2.h
index 2683b92fda65..a75e9c397446 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.h
+++ b/drivers/usb/gadget/function/uvc_v4l2.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uvc_v4l2.h -- USB Video Class Gadget driver
*
@@ -7,16 +8,12 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __UVC_V4L2_H__
#define __UVC_V4L2_H__
extern const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops;
-extern struct v4l2_file_operations uvc_v4l2_fops;
+extern const struct v4l2_file_operations uvc_v4l2_fops;
#endif /* __UVC_V4L2_H__ */
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 0f01c04d7cbd..d3567b90343a 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* uvc_video.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index ef00f06fa00b..6c20aa75f966 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* uvc_video.h -- USB Video Class Gadget driver
*
@@ -7,10 +8,6 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __UVC_VIDEO_H__
#define __UVC_VIDEO_H__
diff --git a/drivers/usb/gadget/functions.c b/drivers/usb/gadget/functions.c
index b13f839e7368..203361a64212 100644
--- a/drivers/usb/gadget/functions.c
+++ b/drivers/usb/gadget/functions.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/usb/gadget/legacy/Makefile b/drivers/usb/gadget/legacy/Makefile
index 7f485f25705e..abd0c3e66a05 100644
--- a/drivers/usb/gadget/legacy/Makefile
+++ b/drivers/usb/gadget/legacy/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# USB gadget drivers
#
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index c39de65a448b..af16672d5118 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* acm_ms.c -- Composite driver, with ACM and mass storage support
*
@@ -7,11 +8,6 @@
* Modified: Klaus Schwarzkopf <schwarzkopf@sensortherm.de>
*
* Heavily based on multi.c and cdc2.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index 1f5cdbe162df..7b11dce98b94 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* audio.c -- Audio gadget driver
*
@@ -5,8 +6,6 @@
* Copyright (C) 2008 Analog Devices, Inc
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index 51c08682de84..da1c37933ca1 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* cdc2.c -- CDC Composite driver, with ECM and ACM support
*
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index 99ca3dabc4f3..e1d566c9918a 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dbgp.c -- EHCI Debug Port device gadget
*
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index 25a2c2e48592..30313b233680 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ether.c -- Ethernet gadget driver, with CDC and non-CDC options
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index 6da7316f8e87..b640ed3fcf70 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* g_ffs.c -- user mode file system API for USB composite function controllers
*
* Copyright (C) 2010 Samsung Electronics
* Author: Michal Nazarewicz <mina86@mina86.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#define pr_fmt(fmt) "g_ffs: " fmt
diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c
index 0bf39c3ccdb1..9eea2d18f2bf 100644
--- a/drivers/usb/gadget/legacy/gmidi.c
+++ b/drivers/usb/gadget/legacy/gmidi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gmidi.c -- USB MIDI Gadget Driver
*
@@ -5,9 +6,6 @@
* Developed for Thumtronics by Grey Innovation
* Ben Williamson <ben.williamson@greyinnovation.com>
*
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
* This code is based in part on:
*
* Gadget Zero driver, Copyright (C) 2003-2004 David Brownell.
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index a71a884f79fc..c4eda7fe7ab4 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* hid.c -- HID Composite driver
*
* Based on multi.c
*
* Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 5c28bee327e1..9343ec436485 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* inode.c -- user mode filesystem api for usb gadget controllers
*
* Copyright (C) 2003-2004 David Brownell
* Copyright (C) 2003 Agilent Technologies
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index fcba59782f26..ef3d25259b0e 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mass_storage.c -- Mass Storage USB Gadget
*
@@ -5,11 +6,6 @@
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz <mina86@mina86.com>
* All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index a70a406580ea..50515f9e1022 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* multi.c -- Multifunction Composite driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 Samsung Electronics
* Author: Michal Nazarewicz (mina86@mina86.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index 0aba68253e3d..fcee1ee0bf66 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ncm.c -- NCM gadget driver
*
@@ -9,11 +10,6 @@
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2008 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define DEBUG */
diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
index b1e535f4022e..978c1a34a932 100644
--- a/drivers/usb/gadget/legacy/nokia.c
+++ b/drivers/usb/gadget/legacy/nokia.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nokia.c -- Nokia Composite Gadget Driver
*
@@ -9,10 +10,6 @@
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * version 2 of that License.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
index 4c9cfff34a03..57858f0c2b6c 100644
--- a/drivers/usb/gadget/legacy/printer.c
+++ b/drivers/usb/gadget/legacy/printer.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* printer.c -- Printer gadget driver
*
* Copyright (C) 2003-2005 David Brownell
* Copyright (C) 2006 Craig W. Nadler
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index 9d89adce756d..de30d7628eef 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* serial.c -- USB gadget serial driver
*
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 by David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This software is distributed under the terms of the GNU General
- * Public License ("GPL") as published by the Free Software Foundation,
- * either version 2 of that License or (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 0b0bb98319cd..682bf99dcf76 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -1,10 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/* Target based USB-Gadget
*
* UAS protocol handling, target callbacks, configfs handling,
* BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
*
* Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
- * License: GPLv2 as published by FSF.
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
index 82c13fce9232..6b86568c9157 100644
--- a/drivers/usb/gadget/legacy/webcam.c
+++ b/drivers/usb/gadget/legacy/webcam.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* webcam.c -- USB webcam gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index d02e2ce73ea5..6e84b44c8a3b 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* zero.c -- Gadget Zero, for USB development
*
* Copyright (C) 2003-2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
@@ -154,10 +150,11 @@ static struct usb_gadget_strings *dev_strings[] = {
/*-------------------------------------------------------------------------*/
static struct timer_list autoresume_timer;
+static struct usb_composite_dev *autoresume_cdev;
-static void zero_autoresume(unsigned long _c)
+static void zero_autoresume(struct timer_list *unused)
{
- struct usb_composite_dev *cdev = (void *)_c;
+ struct usb_composite_dev *cdev = autoresume_cdev;
struct usb_gadget *g = cdev->gadget;
/* unconfigured devices can't issue wakeups */
@@ -282,7 +279,8 @@ static int zero_bind(struct usb_composite_dev *cdev)
device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
device_desc.iSerialNumber = strings_dev[USB_GADGET_SERIAL_IDX].id;
- setup_timer(&autoresume_timer, zero_autoresume, (unsigned long) cdev);
+ autoresume_cdev = cdev;
+ timer_setup(&autoresume_timer, zero_autoresume, 0);
func_inst_ss = usb_get_function_instance("SourceSink");
if (IS_ERR(func_inst_ss))
diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
index 18839732c840..dbaa46eee853 100644
--- a/drivers/usb/gadget/u_f.c
+++ b/drivers/usb/gadget/u_f.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_f.c -- USB function utilities for Gadget stack
*
@@ -5,10 +6,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include "u_f.h"
diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
index 7d53a4773d1a..c3fbef2bb5db 100644
--- a/drivers/usb/gadget/u_f.h
+++ b/drivers/usb/gadget/u_f.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_f.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __U_F_H__
diff --git a/drivers/usb/gadget/u_os_desc.h b/drivers/usb/gadget/u_os_desc.h
index 947b7ddff691..8acd21779ac8 100644
--- a/drivers/usb/gadget/u_os_desc.h
+++ b/drivers/usb/gadget/u_os_desc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* u_os_desc.h
*
@@ -7,10 +8,6 @@
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __U_OS_DESC_H__
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index ea9e1c7f1923..ce865b129fd6 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# define_trace.h needs to know how to find our header
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/usb/gadget/udc/amd5536udc.h b/drivers/usb/gadget/udc/amd5536udc.h
index 4fe22d432af2..dfdef6a28904 100644
--- a/drivers/usb/gadget/udc/amd5536udc.h
+++ b/drivers/usb/gadget/udc/amd5536udc.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* amd5536.h -- header for AMD 5536 UDC high/full speed USB device controller
*
* Copyright (C) 2007 AMD (http://www.amd.com)
* Author: Thomas Dahlmann
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef AMD5536UDC_H
diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
index 57a13f080a79..57b6f66331cf 100644
--- a/drivers/usb/gadget/udc/amd5536udc_pci.c
+++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* amd5536udc_pci.c -- AMD 5536 UDC high/full speed USB device controller
*
* Copyright (C) 2005-2007 AMD (http://www.amd.com)
* Author: Thomas Dahlmann
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index 8bc78418d40e..ad743a8493be 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* at91_udc -- driver for at91-series USB peripheral controller
*
* Copyright (C) 2004 by Thomas Rathbone
* Copyright (C) 2005 by HP Labs
* Copyright (C) 2005 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#undef VERBOSE_DEBUG
@@ -1554,9 +1550,9 @@ static void at91_vbus_timer_work(struct work_struct *work)
mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
}
-static void at91_vbus_timer(unsigned long data)
+static void at91_vbus_timer(struct timer_list *t)
{
- struct at91_udc *udc = (struct at91_udc *)data;
+ struct at91_udc *udc = from_timer(udc, t, vbus_timer);
/*
* If we are polling vbus it is likely that the gpio is on an
@@ -1922,8 +1918,7 @@ static int at91udc_probe(struct platform_device *pdev)
if (udc->board.vbus_polled) {
INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
- setup_timer(&udc->vbus_timer, at91_vbus_timer,
- (unsigned long)udc);
+ timer_setup(&udc->vbus_timer, at91_vbus_timer, 0);
mod_timer(&udc->vbus_timer,
jiffies + VBUS_POLL_TIMEOUT);
} else {
diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h
index 9bbe72764f31..fd58c5b81826 100644
--- a/drivers/usb/gadget/udc/at91_udc.h
+++ b/drivers/usb/gadget/udc/at91_udc.h
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004 by Thomas Rathbone, HP Labs
* Copyright (C) 2005 by Ivan Kokshaysky
* Copyright (C) 2006 by SAN People
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef AT91_UDC_H
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index a884c022df7a..075eaaa8a408 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Atmel USBA high speed USB device controller
*
* Copyright (C) 2005-2007 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/clk/at91_pmc.h>
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index f8ebe0389bd4..860a00a6fdd0 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Atmel USBA high speed USB device controller
*
* Copyright (C) 2005-2007 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_USB_GADGET_USBA_UDC_H__
#define __LINUX_USB_GADGET_USBA_UDC_H__
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index f78503203f42..29f254793592 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
*
* Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
* Copyright (C) 2012 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/bitops.h>
diff --git a/drivers/usb/gadget/udc/bdc/Makefile b/drivers/usb/gadget/udc/bdc/Makefile
index 5cf6a3bcdf0f..52cb5ea48bbe 100644
--- a/drivers/usb/gadget/udc/bdc/Makefile
+++ b/drivers/usb/gadget/udc/bdc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_BDC_UDC) += bdc.o
bdc-y := bdc_core.o bdc_cmd.o bdc_ep.o bdc_udc.o
diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h
index 6df0352cdc50..6e1e881dc51e 100644
--- a/drivers/usb/gadget/udc/bdc/bdc.h
+++ b/drivers/usb/gadget/udc/bdc/bdc.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc.h - header for the BRCM BDC USB3.0 device controller
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_BDC_H__
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 6e920f1dce02..6305bf2c8b59 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_cmd.c - BRCM BDC USB3.0 device controller
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/scatterlist.h>
#include <linux/slab.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.h b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
index 61d0e3bf9853..29cc988a671a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_cmd.h - header for the BDC debug functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_BDC_CMD_H__
#define __LINUX_BDC_CMD_H__
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 7a8af4b916cf..d39f070acbd7 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_core.c - BRCM BDC USB3.0 device controller core operations
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.c b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
index ac98f6f681b7..7ba7448ad743 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_dbg.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_dbg.c - BRCM BDC USB3.0 device controller debug functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include "bdc.h"
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.h b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
index 338a6c701315..373d5abffbb8 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_dbg.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_dbg.h - header for the BDC debug functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_BDC_DBG_H__
#define __LINUX_BDC_DBG_H__
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index bfd8f7ade935..f40d4c13cfa4 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
*
@@ -6,12 +7,6 @@
* Author: Ashwini Pahuja
*
* Based on drivers under drivers/usb/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.h b/drivers/usb/gadget/udc/bdc/bdc_ep.h
index 8a6b36cbf2ea..a37ff8033b4f 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_ep.h - header for the BDC debug functions
*
* Copyright (C) 2014 Broadcom Corporation
*
* Author: Ashwini Pahuja
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_BDC_EP_H__
#define __LINUX_BDC_EP_H__
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
index 02968842b359..1e940f054cb8 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_pci.c - BRCM BDC USB3.0 device controller PCI interface file.
*
@@ -6,12 +7,6 @@
* Author: Ashwini Pahuja
*
* Based on drivers under drivers/usb/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index c84346146456..7bfd58c846f7 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* bdc_udc.c - BRCM BDC USB3.0 device controller gagdet ops
*
@@ -6,12 +7,6 @@
* Author: Ashwini Pahuja
*
* Based on drivers under drivers/usb/gadget/udc/
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index d41d07aae0ce..61422d624ad0 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* udc.c - Core UDC Framework
*
* Copyright (C) 2010 Texas Instruments
* Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -912,7 +901,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
return 0;
type = usb_endpoint_type(desc);
- max = 0x7ff & usb_endpoint_maxp(desc);
+ max = usb_endpoint_maxp(desc);
if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
return 0;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index f04e91ef9e7c..d0128f92ec5a 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver.
*
@@ -5,11 +6,6 @@
*
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 Alan Stern
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
@@ -23,6 +19,8 @@
*
* Having this all in one kernel can help some stages of development,
* bypassing some hardware (and driver) issues. UML could help too.
+ *
+ * Note: The emulation does not include isochronous transfers!
*/
#include <linux/module.h>
@@ -137,6 +135,9 @@ static const struct {
.caps = _caps, \
}
+/* we don't provide isochronous endpoints since we don't support them */
+#define TYPE_BULK_OR_INT (USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
+
/* everyone has ep0 */
EP_INFO(ep0name,
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
@@ -145,64 +146,72 @@ static const struct {
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep2out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+/*
EP_INFO("ep3in-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep4out-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+*/
EP_INFO("ep5in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep6in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep7out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+/*
EP_INFO("ep8in-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep9out-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+*/
EP_INFO("ep10in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep11in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep12out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+/*
EP_INFO("ep13in-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep14out-iso",
USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+*/
EP_INFO("ep15in-int",
USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+
/* or like sa1100: two fixed function endpoints */
EP_INFO("ep1out-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep2in-bulk",
USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+
/* and now some generic EPs so we have enough in multi config */
EP_INFO("ep3out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep4in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep5out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep6out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep7in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep8out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep9in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep10out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep11out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep12in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep13out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
EP_INFO("ep14in",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_IN)),
EP_INFO("ep15out",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+ USB_EP_CAPS(TYPE_BULK_OR_INT, USB_EP_CAPS_DIR_OUT)),
#undef EP_INFO
};
@@ -556,10 +565,12 @@ static int dummy_enable(struct usb_ep *_ep,
if (max <= 1024)
break;
/* save a return statement */
+ /* fall through */
case USB_SPEED_FULL:
if (max <= 64)
break;
/* save a return statement */
+ /* fall through */
default:
if (max <= 8)
break;
@@ -577,6 +588,7 @@ static int dummy_enable(struct usb_ep *_ep,
if (max <= 1024)
break;
/* save a return statement */
+ /* fall through */
case USB_SPEED_FULL:
if (max <= 1023)
break;
@@ -1759,9 +1771,9 @@ static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
/* drive both sides of the transfers; looks like irq handlers to
* both drivers except the callbacks aren't in_irq().
*/
-static void dummy_timer(unsigned long _dum_hcd)
+static void dummy_timer(struct timer_list *t)
{
- struct dummy_hcd *dum_hcd = (struct dummy_hcd *) _dum_hcd;
+ struct dummy_hcd *dum_hcd = from_timer(dum_hcd, t, timer);
struct dummy *dum = dum_hcd->dum;
struct urbp *urbp, *tmp;
unsigned long flags;
@@ -1769,6 +1781,7 @@ static void dummy_timer(unsigned long _dum_hcd)
int i;
/* simplistic model for one frame's bandwidth */
+ /* FIXME: account for transaction and packet overhead */
switch (dum->gadget.speed) {
case USB_SPEED_LOW:
total = 8/*bytes*/ * 12/*packets*/;
@@ -1813,7 +1826,6 @@ restart:
struct dummy_request *req;
u8 address;
struct dummy_ep *ep = NULL;
- int type;
int status = -EINPROGRESS;
/* stop when we reach URBs queued after the timer interrupt */
@@ -1825,14 +1837,10 @@ restart:
goto return_urb;
else if (dum_hcd->rh_state != DUMMY_RH_RUNNING)
continue;
- type = usb_pipetype(urb->pipe);
- /* used up this frame's non-periodic bandwidth?
- * FIXME there's infinite bandwidth for control and
- * periodic transfers ... unrealistic.
- */
- if (total <= 0 && type == PIPE_BULK)
- continue;
+ /* Used up this frame's bandwidth? */
+ if (total <= 0)
+ break;
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
@@ -1930,13 +1938,17 @@ restart:
limit = total;
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
- /* FIXME is it urb->interval since the last xfer?
- * use urb->iso_frame_desc[i].
- * complete whether or not ep has requests queued.
- * report random errors, to debug drivers.
+ /*
+ * We don't support isochronous. But if we did,
+ * here are some of the issues we'd have to face:
+ *
+ * Is it urb->interval since the last xfer?
+ * Use urb->iso_frame_desc[i].
+ * Complete whether or not ep has requests queued.
+ * Report random errors, to debug drivers.
*/
limit = max(limit, periodic_bytes(dum, ep));
- status = -ENOSYS;
+ status = -EINVAL; /* fail all xfers */
break;
case PIPE_INTERRUPT:
@@ -2433,9 +2445,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- init_timer(&dum_hcd->timer);
- dum_hcd->timer.function = dummy_timer;
- dum_hcd->timer.data = (unsigned long)dum_hcd;
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2464,9 +2474,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- init_timer(&dum_hcd->timer);
- dum_hcd->timer.function = dummy_timer;
- dum_hcd->timer.data = (unsigned long)dum_hcd;
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 78d0204e3e20..53a48f561458 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* FOTG210 UDC Driver supports Bulk transfer so far
*
* Copyright (C) 2013 Faraday Technology Corporation
*
* Author : Yuan-Hsin Chen <yhchen@faraday-tech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/gadget/udc/fotg210.h b/drivers/usb/gadget/udc/fotg210.h
index bbf991bcbe7c..08c32957503b 100644
--- a/drivers/usb/gadget/udc/fotg210.h
+++ b/drivers/usb/gadget/udc/fotg210.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Faraday FOTG210 USB OTG controller
*
* Copyright (C) 2013 Faraday Technology Corporation
* Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
index f16e149c5b3e..f29cf5c6160c 100644
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/udc/fsl_mxc_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2009
* Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
@@ -5,11 +6,6 @@
* Description:
* Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
* driver to function correctly on these systems.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index a3e72d690eef..2707be628298 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* driver/usb/gadget/fsl_qe_udc.c
*
@@ -11,11 +12,6 @@
* Freescle QE/CPM USB Pheripheral Controller Driver
* The controller can be found on MPC8360, MPC8272, and etc.
* MPC8360 Rev 1.1 may need QE mircocode update
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#undef USB_TRACE
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.h b/drivers/usb/gadget/udc/fsl_qe_udc.h
index 7026919fc901..2c537a904ee7 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.h
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/usb/gadget/qe_udc.h
*
@@ -8,11 +9,6 @@
*
* Description:
* Freescale USB device/endpoint management registers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
*/
#ifndef __FSL_QE_UDC_H
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 6f2f71c054be..d606d4f13098 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004-2007,2011-2012 Freescale Semiconductor, Inc.
* All rights reserved.
@@ -10,11 +11,6 @@
* This can be found on MPC8349E/MPC8313E/MPC5121E cpus.
* The driver is previously named as mpc_udc. Based on bare board
* code from Dave Liu and Shlomi Gridish.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#undef VERBOSE
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
index e92b8408b6f6..4ba651ae9048 100644
--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2004,2012 Freescale Semiconductor, Inc
* All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Freescale USB device/endpoint management registers
*/
#ifndef __FSL_USB2_UDC_H
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index e0c1b0099265..263804d154a7 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Fusb300 UDC (USB gadget)
*
* Copyright (C) 2010 Faraday Technology Corp.
*
* Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/dma-mapping.h>
#include <linux/err.h>
diff --git a/drivers/usb/gadget/udc/fusb300_udc.h b/drivers/usb/gadget/udc/fusb300_udc.h
index ad39f892d200..eb3d6d379ba7 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.h
+++ b/drivers/usb/gadget/udc/fusb300_udc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Fusb300 UDC (USB gadget)
*
* Copyright (C) 2010 Faraday Technology Corp.
*
* Author : Yuan-hsin Chen <yhchen@faraday-tech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index 8433c22900dc..4504d0b202db 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Toshiba TC86C001 ("Goku-S") USB Device Controller driver
*
@@ -5,10 +6,6 @@
* by Stuart Lynne, Tom Rushworth, and Bruce Balden
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2003 MontaVista Software (source@mvista.com)
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
/*
@@ -127,11 +124,15 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
mode = 0;
max = get_unaligned_le16(&desc->wMaxPacketSize);
switch (max) {
- case 64: mode++;
- case 32: mode++;
- case 16: mode++;
- case 8: mode <<= 3;
- break;
+ case 64:
+ mode++; /* fall through */
+ case 32:
+ mode++; /* fall through */
+ case 16:
+ mode++; /* fall through */
+ case 8:
+ mode <<= 3;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
index 86d2adafe149..26601bf4e7a9 100644
--- a/drivers/usb/gadget/udc/goku_udc.h
+++ b/drivers/usb/gadget/udc/goku_udc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Toshiba TC86C001 ("Goku-S") USB Device Controller driver
*
@@ -5,10 +6,6 @@
* by Stuart Lynne, Tom Rushworth, and Bruce Balden
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2003 MontaVista Software (source@mvista.com)
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
/*
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 1f9941145746..b3fb1bbdb854 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
*
@@ -9,11 +10,6 @@
* Full documentation of the GRUSBDC core can be found here:
* http://www.gaisler.com/products/grlib/grip.pdf
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Contributors:
* - Andreas Larsson <andreas@gaisler.com>
* - Marko Isomaki
@@ -1261,7 +1257,7 @@ static int gr_handle_in_ep(struct gr_ep *ep)
if (!req->last_desc)
return 0;
- if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
+ if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
return 0; /* Not put in hardware buffers yet */
if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
@@ -1290,7 +1286,7 @@ static int gr_handle_out_ep(struct gr_ep *ep)
if (!req->curr_desc)
return 0;
- ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
+ ctrl = READ_ONCE(req->curr_desc->ctrl);
if (ctrl & GR_DESC_OUT_CTRL_EN)
return 0; /* Not received yet */
@@ -1538,7 +1534,7 @@ static int gr_ep_enable(struct usb_ep *_ep,
* Bits 10-0 set the max payload. 12-11 set the number of
* additional transactions.
*/
- max = 0x7ff & usb_endpoint_maxp(desc);
+ max = usb_endpoint_maxp(desc);
nt = usb_endpoint_maxp_mult(desc) - 1;
buffer_size = GR_BUFFER_SIZE(epctrl);
if (nt && (mode == 0 || mode == 2)) {
diff --git a/drivers/usb/gadget/udc/gr_udc.h b/drivers/usb/gadget/udc/gr_udc.h
index 4297c4e8021f..3e913268c8c5 100644
--- a/drivers/usb/gadget/udc/gr_udc.h
+++ b/drivers/usb/gadget/udc/gr_udc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
*
@@ -9,11 +10,6 @@
* Full documentation of the GRUSBDC core can be found here:
* http://www.gaisler.com/products/grlib/grip.pdf
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* Contributors:
* - Andreas Larsson <andreas@gaisler.com>
* - Marko Isomaki
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 8f32b5ee7734..b0781771704e 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Gadget driver for LPC32xx
*
@@ -12,20 +13,6 @@
*
* Note: This driver is based on original work done by Mike James for
* the LPC3180.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/clk.h>
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 46ce7bc15f2b..a8288df6aadf 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* M66592 UDC (USB gadget)
*
* Copyright (C) 2006-2007 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/module.h>
@@ -1262,9 +1259,9 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
return IRQ_HANDLED;
}
-static void m66592_timer(unsigned long _m66592)
+static void m66592_timer(struct timer_list *t)
{
- struct m66592 *m66592 = (struct m66592 *)_m66592;
+ struct m66592 *m66592 = from_timer(m66592, t, timer);
unsigned long flags;
u16 tmp;
@@ -1592,9 +1589,7 @@ static int m66592_probe(struct platform_device *pdev)
m66592->gadget.max_speed = USB_SPEED_HIGH;
m66592->gadget.name = udc_name;
- init_timer(&m66592->timer);
- m66592->timer.function = m66592_timer;
- m66592->timer.data = (unsigned long)m66592;
+ timer_setup(&m66592->timer, m66592_timer, 0);
m66592->reg = reg;
ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
diff --git a/drivers/usb/gadget/udc/m66592-udc.h b/drivers/usb/gadget/udc/m66592-udc.h
index 96d49d7bfb6b..01a64685b8a3 100644
--- a/drivers/usb/gadget/udc/m66592-udc.h
+++ b/drivers/usb/gadget/udc/m66592-udc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* M66592 UDC (USB gadget)
*
* Copyright (C) 2006-2007 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#ifndef __M66592_UDC_H__
diff --git a/drivers/usb/gadget/udc/mv_u3d.h b/drivers/usb/gadget/udc/mv_u3d.h
index e32a787ac373..982625b7197a 100644
--- a/drivers/usb/gadget/udc/mv_u3d.h
+++ b/drivers/usb/gadget/udc/mv_u3d.h
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#ifndef __MV_U3D_H
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 772049afe166..35e02a8d0091 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/mv_udc.h b/drivers/usb/gadget/udc/mv_udc.h
index be77f207dbaf..b3f759c0962c 100644
--- a/drivers/usb/gadget/udc/mv_udc.h
+++ b/drivers/usb/gadget/udc/mv_udc.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __MV_UDC_H
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 4103bf7cf52a..95f52232493b 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <chao.xie@marvell.com>
* Neil Zhang <zhangwm@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 8f85a51bd2b3..660878a19505 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for PLX NET2272 USB device controller
*
* Copyright (C) 2005-2006 PLX Technology, Inc.
* Copyright (C) 2006-2011 Analog Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/delay.h>
diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h
index 69bc9c3c6ce4..8e644627992d 100644
--- a/drivers/usb/gadget/udc/net2272.h
+++ b/drivers/usb/gadget/udc/net2272.h
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* PLX NET2272 high/full speed USB device controller
*
* Copyright (C) 2005-2006 PLX Technology, Inc.
* Copyright (C) 2006-2011 Analog Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __NET2272_H__
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f608c1f85e61..318246d8b2e2 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for the PLX NET2280 USB device controller.
* Specs and errata are available from <http://www.plxtech.com>.
@@ -31,11 +32,6 @@
*
* Modified Ricardo Ribalda Qtechnology AS to provide compatibility
* with usb 338x chip. Based on PLX driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 1088c3745999..b65a797544d7 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* NetChip 2280 high/full speed USB device controller.
* Unlike many such controllers, this one talks PCI.
@@ -7,11 +8,6 @@
* Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
* Copyright (C) 2003 David Brownell
* Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/usb/net2280.h>
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index f05ba6825bfe..dc35a54bad90 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* omap_udc.c -- for OMAP full speed udc; most chips support OTG.
*
@@ -5,11 +6,6 @@
* Copyright (C) 2004-2005 David Brownell
*
* OMAP2 & DMA support by Kyungmin Park <kyungmin.park@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#undef DEBUG
@@ -1858,9 +1854,9 @@ static irqreturn_t omap_udc_irq(int irq, void *_udc)
#define PIO_OUT_TIMEOUT (jiffies + HZ/3)
#define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
-static void pio_out_timer(unsigned long _ep)
+static void pio_out_timer(struct timer_list *t)
{
- struct omap_ep *ep = (void *) _ep;
+ struct omap_ep *ep = from_timer(ep, t, timer);
unsigned long flags;
u16 stat_flg;
@@ -2546,9 +2542,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
}
if (dbuf && addr)
epn_rxtx |= UDC_EPN_RX_DB;
- init_timer(&ep->timer);
- ep->timer.function = pio_out_timer;
- ep->timer.data = (unsigned long) ep;
+ timer_setup(&ep->timer, pio_out_timer, 0);
}
if (addr)
epn_rxtx |= UDC_EPN_RX_VALID;
diff --git a/drivers/usb/gadget/udc/omap_udc.h b/drivers/usb/gadget/udc/omap_udc.h
index 26974196cf44..00f9e608e755 100644
--- a/drivers/usb/gadget/udc/omap_udc.h
+++ b/drivers/usb/gadget/udc/omap_udc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* omap_udc.h -- for omap 3.2 udc, with OTG support
*
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 84dcbcd756f0..afaea11ec771 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index a238da906115..0e3f5faa000e 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Intel PXA25x and IXP4xx on-chip full speed USB device controllers
*
@@ -6,11 +7,6 @@
* Copyright (C) 2003 Benedikt Spranger, Pengutronix
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003 Joshua Wise
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/* #define VERBOSE_DEBUG */
@@ -1628,9 +1624,9 @@ static inline void clear_ep_state (struct pxa25x_udc *dev)
nuke(&dev->ep[i], -ECONNABORTED);
}
-static void udc_watchdog(unsigned long _dev)
+static void udc_watchdog(struct timer_list *t)
{
- struct pxa25x_udc *dev = (void *)_dev;
+ struct pxa25x_udc *dev = from_timer(dev, t, timer);
local_irq_disable();
if (dev->ep0state == EP0_STALL
@@ -2417,9 +2413,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
gpio_direction_output(dev->mach->gpio_pullup, 0);
}
- init_timer(&dev->timer);
- dev->timer.function = udc_watchdog;
- dev->timer.data = (unsigned long) dev;
+ timer_setup(&dev->timer, udc_watchdog, 0);
the_controller = dev;
platform_set_drvdata(pdev, dev);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index a458bec2536d..ccc6b921f067 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Intel PXA25x on-chip full speed USB device controller
*
* Copyright (C) 2003 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix
* Copyright (C) 2003 David Brownell
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_USB_GADGET_PXA25X_H
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index d48e239660c3..be2761f1b3f5 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Handles the Intel 27x USB Device Controller (UDC)
*
* Inspired by original driver by Frank Becker, David Brownell, and others.
* Copyright (C) 2008 Robert Jarzmik
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.h b/drivers/usb/gadget/udc/pxa27x_udc.h
index cea2cb79b30c..1128d39a4255 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.h
+++ b/drivers/usb/gadget/udc/pxa27x_udc.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* linux/drivers/usb/gadget/pxa27x_udc.h
* Intel PXA27x on-chip full speed USB device controller
*
* Inspired by original driver by Frank Becker, David Brownell, and others.
* Copyright (C) 2008 Robert Jarzmik
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_USB_GADGET_PXA27X_H
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 118ad70f1af0..a3ecce62662b 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 UDC (USB gadget)
*
* Copyright (C) 2006-2009 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/module.h>
@@ -1517,9 +1514,9 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
return IRQ_HANDLED;
}
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
unsigned long flags;
u16 tmp;
@@ -1877,9 +1874,7 @@ static int r8a66597_probe(struct platform_device *pdev)
r8a66597->gadget.max_speed = USB_SPEED_HIGH;
r8a66597->gadget.name = udc_name;
- init_timer(&r8a66597->timer);
- r8a66597->timer.function = r8a66597_timer;
- r8a66597->timer.data = (unsigned long)r8a66597;
+ timer_setup(&r8a66597->timer, r8a66597_timer, 0);
r8a66597->reg = reg;
if (r8a66597->pdata->on_chip) {
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.h b/drivers/usb/gadget/udc/r8a66597-udc.h
index 45c4b2df1785..9a115caba661 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.h
+++ b/drivers/usb/gadget/udc/r8a66597-udc.h
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 UDC
*
* Copyright (C) 2007-2009 Renesas Solutions Corp.
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#ifndef __R8A66597_H__
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 63a206122058..bc37f40baacf 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1,22 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB3.0 Peripheral driver (USB gadget)
*
- * Copyright (C) 2015 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * Copyright (C) 2015-2017 Renesas Electronics Corporation
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
@@ -334,6 +332,7 @@ struct renesas_usb3 {
struct usb_gadget_driver *driver;
struct extcon_dev *extcon;
struct work_struct extcon_work;
+ struct phy *phy;
struct renesas_usb3_ep *usb3_ep;
int num_usb3_eps;
@@ -2056,7 +2055,7 @@ static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
const struct usb_endpoint_descriptor *desc)
{
int i;
- const u32 max_packet_array[] = {8, 16, 32, 64, 512};
+ static const u32 max_packet_array[] = {8, 16, 32, 64, 512};
u32 mpkt = PN_RAMMAP_MPKT(1024);
for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
@@ -2239,7 +2238,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
/* hook up the driver */
usb3->driver = driver;
- pm_runtime_enable(usb3_to_dev(usb3));
+ if (usb3->phy)
+ phy_init(usb3->phy);
+
pm_runtime_get_sync(usb3_to_dev(usb3));
renesas_usb3_init_controller(usb3);
@@ -2256,8 +2257,10 @@ static int renesas_usb3_stop(struct usb_gadget *gadget)
usb3->driver = NULL;
renesas_usb3_stop_controller(usb3);
+ if (usb3->phy)
+ phy_exit(usb3->phy);
+
pm_runtime_put(usb3_to_dev(usb3));
- pm_runtime_disable(usb3_to_dev(usb3));
return 0;
}
@@ -2405,6 +2408,9 @@ static int renesas_usb3_remove(struct platform_device *pdev)
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
__renesas_usb3_ep_free_request(usb3->ep0_req);
+ if (usb3->phy)
+ phy_put(usb3->phy);
+ pm_runtime_disable(usb3_to_dev(usb3));
return 0;
}
@@ -2560,20 +2566,15 @@ static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
struct resource *res;
- const struct of_device_id *match;
int irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
- match = of_match_node(usb3_of_match, pdev->dev.of_node);
- if (!match)
- return -ENODEV;
-
attr = soc_device_match(renesas_usb3_quirks_match);
if (attr)
priv = attr->data;
else
- priv = match->data;
+ priv = of_device_get_match_data(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -2635,11 +2636,20 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (ret < 0)
goto err_dev_create;
+ /*
+ * This is an optional. So, if this driver cannot get a phy,
+ * this driver will not handle a phy anymore.
+ */
+ usb3->phy = devm_phy_get(&pdev->dev, "usb");
+ if (IS_ERR(usb3->phy))
+ usb3->phy = NULL;
+
usb3->workaround_for_vbus = priv->workaround_for_vbus;
renesas_usb3_debugfs_init(usb3, &pdev->dev);
- dev_info(&pdev->dev, "probed\n");
+ dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : "");
+ pm_runtime_enable(usb3_to_dev(usb3));
return 0;
@@ -2655,11 +2665,49 @@ err_alloc_prd:
return ret;
}
+#ifdef CONFIG_PM_SLEEP
+static int renesas_usb3_suspend(struct device *dev)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+
+ /* Not started */
+ if (!usb3->driver)
+ return 0;
+
+ renesas_usb3_stop_controller(usb3);
+ if (usb3->phy)
+ phy_exit(usb3->phy);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int renesas_usb3_resume(struct device *dev)
+{
+ struct renesas_usb3 *usb3 = dev_get_drvdata(dev);
+
+ /* Not started */
+ if (!usb3->driver)
+ return 0;
+
+ if (usb3->phy)
+ phy_init(usb3->phy);
+ pm_runtime_get_sync(dev);
+ renesas_usb3_init_controller(usb3);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(renesas_usb3_pm_ops, renesas_usb3_suspend,
+ renesas_usb3_resume);
+
static struct platform_driver renesas_usb3_driver = {
.probe = renesas_usb3_probe,
.remove = renesas_usb3_remove,
.driver = {
.name = (char *)udc_name,
+ .pm = &renesas_usb3_pm_ops,
.of_match_table = of_match_ptr(usb3_of_match),
},
};
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 42587b738a1f..31c7c5587cf9 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* linux/drivers/usb/gadget/s3c-hsudc.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
@@ -8,11 +9,7 @@
* The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
* Each endpoint can be configured as either in or out endpoint. Endpoints
* can be configured for Bulk or Interrupt transfer mode.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index 394abd5d65c0..f154f49e98c8 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* linux/drivers/usb/gadget/s3c2410_udc.c
*
@@ -5,11 +6,6 @@
*
* Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#define pr_fmt(fmt) "s3c2410_udc: " fmt
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.h b/drivers/usb/gadget/udc/s3c2410_udc.h
index 93bf225f1969..bdcaa8dd300f 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.h
+++ b/drivers/usb/gadget/udc/s3c2410_udc.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* linux/drivers/usb/gadget/s3c2410_udc.h
* Samsung on-chip full speed USB device controllers
*
* Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _S3C2410_UDC_H
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index 38a165dbf924..d4da47f4f6f4 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* amd5536.c -- AMD 5536 UDC high/full speed USB device controller
*
* Copyright (C) 2005-2007 AMD (http://www.amd.com)
* Author: Thomas Dahlmann
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
/*
@@ -1733,7 +1729,7 @@ static void udc_soft_reset(struct udc *dev)
}
/* RDE timer callback to set RDE bit */
-static void udc_timer_function(unsigned long v)
+static void udc_timer_function(struct timer_list *unused)
{
u32 tmp;
@@ -1813,7 +1809,7 @@ static void udc_handle_halt_state(struct udc_ep *ep)
}
/* Stall timer callback to poll S bit and set it again after */
-static void udc_pollstall_timer_function(unsigned long v)
+static void udc_pollstall_timer_function(struct timer_list *unused)
{
struct udc_ep *ep;
int halted = 0;
@@ -3067,14 +3063,12 @@ void udc_remove(struct udc *dev)
stop_timer++;
if (timer_pending(&udc_timer))
wait_for_completion(&on_exit);
- if (udc_timer.data)
- del_timer_sync(&udc_timer);
+ del_timer_sync(&udc_timer);
/* remove pollstall timer */
stop_pollstall_timer++;
if (timer_pending(&udc_pollstall_timer))
wait_for_completion(&on_pollstall_exit);
- if (udc_pollstall_timer.data)
- del_timer_sync(&udc_pollstall_timer);
+ del_timer_sync(&udc_pollstall_timer);
udc = NULL;
}
EXPORT_SYMBOL_GPL(udc_remove);
@@ -3164,10 +3158,6 @@ int udc_probe(struct udc *dev)
u32 reg;
int retval;
- /* mark timer as not initialized */
- udc_timer.data = 0;
- udc_pollstall_timer.data = 0;
-
/* device struct setup */
dev->gadget.ops = &udc_ops;
@@ -3207,13 +3197,8 @@ int udc_probe(struct udc *dev)
goto finished;
/* timer init */
- init_timer(&udc_timer);
- udc_timer.function = udc_timer_function;
- udc_timer.data = 1;
- /* timer pollstall init */
- init_timer(&udc_pollstall_timer);
- udc_pollstall_timer.function = udc_pollstall_timer_function;
- udc_pollstall_timer.data = 1;
+ timer_setup(&udc_timer, udc_timer_function, 0);
+ timer_setup(&udc_pollstall_timer, udc_pollstall_timer_function, 0);
/* set SD */
reg = readl(&dev->regs->ctl);
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index e8a5fdaee37d..32f1d3e90c26 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* snps_udc_plat.c - Synopsys UDC Platform Driver
*
* Copyright (C) 2016 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/extcon.h>
diff --git a/drivers/usb/gadget/udc/trace.c b/drivers/usb/gadget/udc/trace.c
index 8c551ab91ad8..7430624c0bd7 100644
--- a/drivers/usb/gadget/udc/trace.c
+++ b/drivers/usb/gadget/udc/trace.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* trace.c - USB Gadget Framework Trace Support
*
* Copyright (C) 2016 Intel Corporation
* Author: Felipe Balbi <felipe.balbi@linux.intel.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
index da29874b5366..f07ddb3f4bb9 100644
--- a/drivers/usb/gadget/udc/trace.h
+++ b/drivers/usb/gadget/udc/trace.h
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* udc.c - Core UDC Framework
*
* Copyright (C) 2016 Intel Corporation
* Author: Felipe Balbi <felipe.balbi@linux.intel.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index de207a90571e..7da2b9ce8cb3 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Xilinx USB peripheral controller driver
*
@@ -8,12 +9,6 @@
*
* Some parts of this driver code is based on the driver for at91-series
* USB peripheral controller (at91_udc.c).
- *
- * This program is free software; you can redistribute it
- * and/or modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation;
- * either version 2 of the License, or (at your option) any
- * later version.
*/
#include <linux/delay.h>
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 73a4dfba0edb..566ab261e8b7 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1+
/*
* Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
*/
#include <linux/errno.h>
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index fa5692dec832..b80a94e632af 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -45,12 +45,12 @@ config USB_XHCI_PLATFORM
If unsure, say N.
config USB_XHCI_MTK
- tristate "xHCI support for Mediatek MT65xx/MT7621"
+ tristate "xHCI support for MediaTek SoCs"
select MFD_SYSCON
depends on (MIPS && SOC_MT7621) || ARCH_MEDIATEK || COMPILE_TEST
---help---
Say 'Y' to enable the support for the xHCI host controller
- found in Mediatek MT65xx SoCs.
+ found in MediaTek SoCs.
If unsure, say N.
config USB_XHCI_MVEBU
@@ -222,18 +222,6 @@ config USB_EHCI_HCD_AT91
Enables support for the on-chip EHCI controller on
Atmel chips.
-config USB_EHCI_MSM
- tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
- depends on ARCH_QCOM
- select USB_EHCI_ROOT_HUB_TT
- ---help---
- Enables support for the USB Host controller present on the
- Qualcomm chipsets. Root Hub has inbuilt TT.
- This driver depends on OTG driver for PHY initialization,
- clock management, powering up VBUS, and power management.
- This driver is not supported on boards like trout which
- has an external PHY.
-
config USB_EHCI_TEGRA
tristate "NVIDIA Tegra HCD support"
depends on ARCH_TEGRA
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index cf2691fffcc0..32b036e2ffef 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB Host Controller Drivers
#
@@ -25,6 +26,10 @@ ifneq ($(CONFIG_USB_XHCI_RCAR), )
xhci-plat-hcd-y += xhci-rcar.o
endif
+ifneq ($(CONFIG_DEBUG_FS),)
+ xhci-hcd-y += xhci-debugfs.o
+endif
+
obj-$(CONFIG_USB_WHCI_HCD) += whci/
obj-$(CONFIG_USB_PCI) += pci-quirks.o
@@ -39,7 +44,6 @@ obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
obj-$(CONFIG_USB_EHCI_HCD_STI) += ehci-st.o
obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
-obj-$(CONFIG_USB_EHCI_MSM) += ehci-msm.o
obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
obj-$(CONFIG_USB_W90X900_EHCI) += ehci-w90x900.o
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 5f425c89faf1..2400a826397a 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Broadcom specific Advanced Microcontroller Bus
* Broadcom USB-core driver (BCMA bus glue)
@@ -16,8 +17,6 @@
*
* Derived from the USBcore related parts of Broadcom-SB
* Copyright 2005-2011 Broadcom Corporation
- *
- * Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 7440722bfbf0..3ba140ceaf52 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for EHCI UHP on Atmel chips
*
@@ -5,10 +6,6 @@
* Nicolas Ferre <nicolas.ferre@atmel.com>
*
* Based on various ehci-*.c drivers
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
*/
#include <linux/clk.h>
@@ -205,7 +202,8 @@ static int __maybe_unused ehci_atmel_drv_resume(struct device *dev)
struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
atmel_start_clock(atmel_ehci);
- return ehci_resume(hcd, false);
+ ehci_resume(hcd, false);
+ return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index cbb9b8e12c3c..19f00424f53e 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -1,16 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 26b641100639..d9145a8f35d2 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* SAMSUNG EXYNOS USB HOST EHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <jg1.han@samsung.com>
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index d025cc06dda7..c5094cb88cd5 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2005-2009 MontaVista Software, Inc.
* Copyright 2008,2012,2015 Freescale Semiconductor, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
* by Hunter Wu.
* Power Management support by Dave Liu <daveliu@freescale.com>,
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 1a8a60a57cf2..cbc422032e50 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2005-2010,2012 Freescale Semiconductor, Inc.
* Copyright (c) 2005 MontaVista Software
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _EHCI_FSL_H
#define _EHCI_FSL_H
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index 21650044b09e..656b8c08efc8 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Aeroflex Gaisler GRLIB GRUSBHC EHCI host controller
*
@@ -9,20 +10,6 @@
* (c) Valentine Barshak <vbarshak@ru.mvista.com>
* and in turn based on "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
* and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/err.h>
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6e834b83a104..7f0737449df7 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Enhanced Host Controller Interface (EHCI) driver for USB.
*
* Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* Copyright (c) 2000-2004 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -1012,7 +999,7 @@ idle_timeout:
qh_destroy(ehci, qh);
break;
}
- /* else FALL THROUGH */
+ /* fall through */
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index df169c8e7225..facafdf8fb95 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2001-2004 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 9b7e63977215..21307d862af6 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
deleted file mode 100644
index 2f8d3af811ce..000000000000
--- a/drivers/usb/host/ehci-msm.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/* ehci-msm.c - HSUSB Host Controller Driver Implementation
- *
- * Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
- *
- * Partly derived from ehci-fsl.c and ehci-hcd.c
- * Copyright (c) 2000-2004 by David Brownell
- * Copyright (c) 2005 MontaVista Software
- *
- * All source code in this file is licensed under the following license except
- * where indicated.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * See the GNU General Public License for more details.
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can find it at http://www.fsf.org
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/msm_hsusb_hw.h>
-#include <linux/usb.h>
-#include <linux/usb/hcd.h>
-#include <linux/acpi.h>
-
-#include "ehci.h"
-
-#define MSM_USB_BASE (hcd->regs)
-
-#define DRIVER_DESC "Qualcomm On-Chip EHCI Host Controller"
-
-static const char hcd_name[] = "ehci-msm";
-static struct hc_driver __read_mostly msm_hc_driver;
-
-static int ehci_msm_reset(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
-
- ehci->caps = USB_CAPLENGTH;
- hcd->has_tt = 1;
-
- retval = ehci_setup(hcd);
- if (retval)
- return retval;
-
- /* select ULPI phy and clear other status/control bits in PORTSC */
- writel(PORTSC_PTS_ULPI, USB_PORTSC);
- /* bursts of unspecified length. */
- writel(0, USB_AHBBURST);
- /* Use the AHB transactor, allow posted data writes */
- writel(0x8, USB_AHBMODE);
- /* Disable streaming mode and select host mode */
- writel(0x13, USB_USBMODE);
- /* Disable ULPI_TX_PKT_EN_CLR_FIX which is valid only for HSIC */
- writel(readl(USB_GENCONFIG_2) & ~ULPI_TX_PKT_EN_CLR_FIX, USB_GENCONFIG_2);
-
- return 0;
-}
-
-static int ehci_msm_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct resource *res;
- struct usb_phy *phy;
- int ret;
-
- dev_dbg(&pdev->dev, "ehci_msm proble\n");
-
- hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd) {
- dev_err(&pdev->dev, "Unable to create HCD\n");
- return -ENOMEM;
- }
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to get IRQ resource\n");
- goto put_hcd;
- }
- hcd->irq = ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get memory resource\n");
- ret = -ENODEV;
- goto put_hcd;
- }
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
- hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -ENOMEM;
- goto put_hcd;
- }
-
- /*
- * If there is an OTG driver, let it take care of PHY initialization,
- * clock management, powering up VBUS, mapping of registers address
- * space and power management.
- */
- if (pdev->dev.of_node)
- phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
- else
- phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
-
- if (IS_ERR(phy)) {
- if (PTR_ERR(phy) == -EPROBE_DEFER) {
- dev_err(&pdev->dev, "unable to find transceiver\n");
- ret = -EPROBE_DEFER;
- goto put_hcd;
- }
- phy = NULL;
- }
-
- hcd->usb_phy = phy;
- device_init_wakeup(&pdev->dev, 1);
-
- if (phy && phy->otg) {
- /*
- * MSM OTG driver takes care of adding the HCD and
- * placing hardware into low power mode via runtime PM.
- */
- ret = otg_set_host(phy->otg, &hcd->self);
- if (ret < 0) {
- dev_err(&pdev->dev, "unable to register with transceiver\n");
- goto put_hcd;
- }
-
- pm_runtime_no_callbacks(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- } else {
- ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
- if (ret)
- goto put_hcd;
- }
-
- return 0;
-
-put_hcd:
- usb_put_hcd(hcd);
-
- return ret;
-}
-
-static int ehci_msm_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- device_init_wakeup(&pdev->dev, 0);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
-
- if (hcd->usb_phy && hcd->usb_phy->otg)
- otg_set_host(hcd->usb_phy->otg, NULL);
- else
- usb_remove_hcd(hcd);
-
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ehci_msm_pm_suspend(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- bool do_wakeup = device_may_wakeup(dev);
-
- dev_dbg(dev, "ehci-msm PM suspend\n");
-
- /* Only call ehci_suspend if ehci_setup has been done */
- if (ehci->sbrn)
- return ehci_suspend(hcd, do_wakeup);
-
- return 0;
-}
-
-static int ehci_msm_pm_resume(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- dev_dbg(dev, "ehci-msm PM resume\n");
-
- /* Only call ehci_resume if ehci_setup has been done */
- if (ehci->sbrn)
- ehci_resume(hcd, false);
-
- return 0;
-}
-
-#else
-#define ehci_msm_pm_suspend NULL
-#define ehci_msm_pm_resume NULL
-#endif
-
-static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
- .suspend = ehci_msm_pm_suspend,
- .resume = ehci_msm_pm_resume,
-};
-
-static const struct acpi_device_id msm_ehci_acpi_ids[] = {
- { "QCOM8040", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, msm_ehci_acpi_ids);
-
-static const struct of_device_id msm_ehci_dt_match[] = {
- { .compatible = "qcom,ehci-host", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msm_ehci_dt_match);
-
-static struct platform_driver ehci_msm_driver = {
- .probe = ehci_msm_probe,
- .remove = ehci_msm_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = "msm_hsusb_host",
- .pm = &ehci_msm_dev_pm_ops,
- .of_match_table = msm_ehci_dt_match,
- .acpi_match_table = ACPI_PTR(msm_ehci_acpi_ids),
- },
-};
-
-static const struct ehci_driver_overrides msm_overrides __initconst = {
- .reset = ehci_msm_reset,
-};
-
-static int __init ehci_msm_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
-
- pr_info("%s: " DRIVER_DESC "\n", hcd_name);
- ehci_init_driver(&msm_hc_driver, &msm_overrides);
- return platform_driver_register(&ehci_msm_driver);
-}
-module_init(ehci_msm_init);
-
-static void __exit ehci_msm_cleanup(void)
-{
- platform_driver_unregister(&ehci_msm_driver);
-}
-module_exit(ehci_msm_cleanup);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_ALIAS("platform:msm-ehci");
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 849806a75f1c..de764459e05a 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <chao.xie@marvell.com>
* Neil Zhang <zhangwm@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index c7a9b31eeaef..c9f91e6c72b6 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
* Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 4d308533bc83..854b146a457d 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* ehci-omap.c - driver for USBHOST on OMAP3/4 processors
*
@@ -14,21 +15,6 @@
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
*
* Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1aec87ec68df..1ad72647a069 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/host/ehci-orion.c
*
* Tzachi Perelstein <tzachi@marvell.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 93326974ff4b..fe9422d3bcdc 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* EHCI HCD (Host Controller Driver) PCI Bus Glue.
*
* Copyright (c) 2000-2004 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index f1908ea9fbd8..b065a960adc2 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic platform ehci driver
*
@@ -16,8 +17,6 @@
* Copyright 2000-2002 David Brownell
* Copyright 1999 Linus Torvalds
* Copyright 1999 Gregory P. Smith
- *
- * Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/acpi.h>
#include <linux/clk.h>
@@ -40,12 +39,11 @@
#define DRIVER_DESC "EHCI generic platform driver"
#define EHCI_MAX_CLKS 4
-#define EHCI_MAX_RSTS 4
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
struct ehci_platform_priv {
struct clk *clks[EHCI_MAX_CLKS];
- struct reset_control *rsts[EHCI_MAX_RSTS];
+ struct reset_control *rsts;
struct phy **phys;
int num_phys;
bool reset_on_resume;
@@ -151,7 +149,7 @@ static int ehci_platform_probe(struct platform_device *dev)
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv;
struct ehci_hcd *ehci;
- int err, irq, phy_num, clk = 0, rst;
+ int err, irq, phy_num, clk = 0;
if (usb_disabled())
return -ENODEV;
@@ -239,22 +237,16 @@ static int ehci_platform_probe(struct platform_device *dev)
}
}
- for (rst = 0; rst < EHCI_MAX_RSTS; rst++) {
- priv->rsts[rst] = devm_reset_control_get_shared_by_index(
- &dev->dev, rst);
- if (IS_ERR(priv->rsts[rst])) {
- err = PTR_ERR(priv->rsts[rst]);
- if (err == -EPROBE_DEFER)
- goto err_reset;
- priv->rsts[rst] = NULL;
- break;
- }
-
- err = reset_control_deassert(priv->rsts[rst]);
- if (err)
- goto err_reset;
+ priv->rsts = devm_reset_control_array_get_optional_shared(&dev->dev);
+ if (IS_ERR(priv->rsts)) {
+ err = PTR_ERR(priv->rsts);
+ goto err_put_clks;
}
+ err = reset_control_deassert(priv->rsts);
+ if (err)
+ goto err_put_clks;
+
if (pdata->big_endian_desc)
ehci->big_endian_desc = 1;
if (pdata->big_endian_mmio)
@@ -310,8 +302,7 @@ err_power:
if (pdata->power_off)
pdata->power_off(dev);
err_reset:
- while (--rst >= 0)
- reset_control_assert(priv->rsts[rst]);
+ reset_control_assert(priv->rsts);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -329,15 +320,14 @@ static int ehci_platform_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
- int clk, rst;
+ int clk;
usb_remove_hcd(hcd);
if (pdata->power_off)
pdata->power_off(dev);
- for (rst = 0; rst < EHCI_MAX_RSTS && priv->rsts[rst]; rst++)
- reset_control_assert(priv->rsts[rst]);
+ reset_control_assert(priv->rsts);
for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 342816a7f8b1..46e160370d6e 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PMC MSP EHCI (Host Controller Driver) for USB.
*
* (C) Copyright 2006-2010 PMC-Sierra Inc
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
*/
/* includes */
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 1a10c8d542ca..576f7d79ad4e 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* EHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 7934ff9b35e1..8c733492d8fe 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PS3 EHCI Host Controller driver
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/firmware.h>
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 8f3f055c05fa..88158324dcae 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2001-2004 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 6bc6304672bc..e56db44708bc 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2004 by David Brownell
* Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 5caf88d679e4..a9ee767952c1 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH EHCI host controller driver
*
* Copyright (C) 2010 Paul Mundt
*
* Based on ohci-sh.c and ehci-atmel.c.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 1f25c7985f5b..add796c78561 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for EHCI HCD on SPEAr SOC
*
@@ -5,10 +6,6 @@
* Deepak Sikri <deepak.sikri@st.com>
*
* Based on various ehci-*.c drivers
-*
-* This file is subject to the terms and conditions of the GNU General Public
-* License. See the file COPYING in the main directory of this archive for
-* more details.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index be4a2788fc58..3c1362ab70be 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ST EHCI driver
*
@@ -6,10 +7,6 @@
* Author: Peter Griffin <peter.griffin@linaro.org>
*
* Derived from ehci-platform.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
index 5216f2b09d63..71fb61dd4a87 100644
--- a/drivers/usb/host/ehci-sysfs.c
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2007 by Alan Stern
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 9a3d7db5be57..c809f7d2f08f 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
*
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2009 - 2013 NVIDIA Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index bdb93b6a356f..610ed437ed2c 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
*/
/*
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 0b6cdb723192..4fcebda4b79d 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2012 by Alan Stern
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
*/
/* This file is part of ehci-hcd.c */
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index 63b9d0c67963..6d77ace1697b 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/driver/usb/host/ehci-w90x900.c
*
* Copyright (c) 2008 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;version 2 of the License.
- *
*/
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index f54480850bb8..d2a27578e440 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* EHCI HCD (Host Controller Driver) for USB.
*
@@ -8,21 +9,6 @@
* Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com>
* and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
* and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/err.h>
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index a8e36170d8b8..c8e9a48e1d51 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2001-2002 by David Brownell
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_EHCI_HCD_H
diff --git a/drivers/usb/host/fhci-dbg.c b/drivers/usb/host/fhci-dbg.c
index b58e7a60913a..fafa91189e45 100644
--- a/drivers/usb/host/fhci-dbg.c
+++ b/drivers/usb/host/fhci-dbg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 55a0ae6f2d74..48fe9e6c2465 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/usb/host/fhci-hub.c b/drivers/usb/host/fhci-hub.c
index 60d55eb3de0d..c359dcdb9b13 100644
--- a/drivers/usb/host/fhci-hub.c
+++ b/drivers/usb/host/fhci-hub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-mem.c b/drivers/usb/host/fhci-mem.c
index b0b88f57a5ac..658aedc6adc1 100644
--- a/drivers/usb/host/fhci-mem.c
+++ b/drivers/usb/host/fhci-mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-q.c b/drivers/usb/host/fhci-q.c
index 03be7494a476..669c240523fe 100644
--- a/drivers/usb/host/fhci-q.c
+++ b/drivers/usb/host/fhci-q.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index 2f162faabbca..3d12cdd5f999 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index f82ad5df1b0d..3a4e8f616751 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h
index 3fc82c1c3c73..e7ec41d62410 100644
--- a/drivers/usb/host/fhci.h
+++ b/drivers/usb/host/fhci.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QUICC Engine USB Host Controller Driver
*
@@ -8,11 +9,6 @@
* Peter Barada <peterb@logicpd.com>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __FHCI_H
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 457cc6525abd..62fc955085a1 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Faraday FOTG210 EHCI-like driver
*
* Copyright (c) 2013 Faraday Technology Corporation
@@ -7,20 +8,6 @@
* Po-Yu Chuang <ratbert.chuang@gmail.com>
*
* Most of code borrowed from the Linux-3.7 EHCI driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/device.h>
@@ -5449,7 +5436,7 @@ idle_timeout:
qh_destroy(fotg210, qh);
break;
}
- /* else FALL THROUGH */
+ /* fall through */
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
index b5cfa7aeb277..7fcd785c7bc8 100644
--- a/drivers/usb/host/fotg210.h
+++ b/drivers/usb/host/fotg210.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_FOTG210_H
#define __LINUX_FOTG210_H
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index ba557cdba8ef..677f9d592109 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Setup platform devices needed by the Freescale multi-port host
* and/or dual-role USB controller modules based on the description
* in flat device tree.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index da3b18038d23..684d6f074c3a 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host Wire Adapter:
* Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* The HWA driver is a simple layer that forwards requests to the WAHC
* (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
* Controller) layers.
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
index 4f320d050da7..b964f9a51d87 100644
--- a/drivers/usb/host/imx21-dbg.c
+++ b/drivers/usb/host/imx21-dbg.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2009 by Martin Fuzzey
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of imx21-hcd.c */
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 39ae7fb64b6f..3a8bbfe43a8e 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Host Controller Driver for IMX21
*
@@ -5,20 +6,6 @@
* Copyright (C) 2009 Martin Fuzzey
* Originally written by Jay Monkman <jtm@lopingdog.com>
* Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
index 05122f8a6983..7b9cf0a38d6e 100644
--- a/drivers/usb/host/imx21-hcd.h
+++ b/drivers/usb/host/imx21-hcd.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Macros and prototypes for i.MX21
*
@@ -5,20 +6,6 @@
* Copyright (C) 2009 Martin Fuzzey
* Originally written by Jay Monkman <jtm@lopingdog.com>
* Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_IMX21_HCD_H__
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 73fec38754f9..5f9234b9cf7b 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ISP116x HCD (Host Controller Driver) for USB.
*
@@ -1018,6 +1019,7 @@ static int isp116x_hub_control(struct usb_hcd *hcd,
spin_lock_irqsave(&isp116x->lock, flags);
isp116x_write_reg32(isp116x, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp116x->lock, flags);
+ /* fall through */
case C_HUB_LOCAL_POWER:
DBG("C_HUB_LOCAL_POWER\n");
break;
@@ -1433,8 +1435,10 @@ static int isp116x_bus_suspend(struct usb_hcd *hcd)
isp116x_write_reg32(isp116x, HCCONTROL,
(val & ~HCCONTROL_HCFS) |
HCCONTROL_USB_RESET);
+ /* fall through */
case HCCONTROL_USB_RESET:
ret = -EBUSY;
+ /* fall through */
default: /* HCCONTROL_USB_SUSPEND */
spin_unlock_irqrestore(&isp116x->lock, flags);
break;
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h
index dd34b7a33965..a5e929c10d53 100644
--- a/drivers/usb/host/isp116x.h
+++ b/drivers/usb/host/isp116x.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ISP116x register declarations and HCD data structures
*
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 9b7e307e2d54..b21c386e6a46 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ISP1362 HCD (Host Controller Driver) for USB.
*
@@ -712,7 +713,7 @@ static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int fl
static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
{
- int index = epq->free_ptd;
+ int index;
prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
index = claim_ptd_buffers(epq, ep, ep->length);
@@ -1578,6 +1579,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
+ break;
case C_HUB_LOCAL_POWER:
DBG(0, "C_HUB_LOCAL_POWER\n");
break;
@@ -2251,7 +2253,6 @@ static int isp1362_mem_config(struct usb_hcd *hcd)
return -ENOMEM;
}
- total = istl_size + intl_size + atl_size;
spin_lock_irqsave(&isp1362_hcd->lock, flags);
for (i = 0; i < 2; i++) {
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index 3b0b4847c3a9..da79e36ced89 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ISP1362 HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 0ece9a9341e5..afa321ab55fc 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MAX3421 Host Controller driver for USB.
*
@@ -60,6 +61,7 @@
#include <linux/spi/spi.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/of.h>
#include <linux/platform_data/max3421-hcd.h>
@@ -85,6 +87,8 @@
USB_PORT_STAT_C_OVERCURRENT | \
USB_PORT_STAT_C_RESET) << 16)
+#define MAX3421_GPOUT_COUNT 8
+
enum max3421_rh_state {
MAX3421_RH_RESET,
MAX3421_RH_SUSPENDED,
@@ -1672,7 +1676,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value)
u8 mask, idx;
--pin_number;
- if (pin_number > 7)
+ if (pin_number >= MAX3421_GPOUT_COUNT)
return;
mask = 1u << (pin_number % 4);
@@ -1696,10 +1700,10 @@ max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
unsigned long flags;
int retval = 0;
- spin_lock_irqsave(&max3421_hcd->lock, flags);
-
pdata = spi->dev.platform_data;
+ spin_lock_irqsave(&max3421_hcd->lock, flags);
+
switch (type_req) {
case ClearHubFeature:
break;
@@ -1832,10 +1836,34 @@ static const struct hc_driver max3421_hcd_desc = {
};
static int
+max3421_of_vbus_en_pin(struct device *dev, struct max3421_hcd_platform_data *pdata)
+{
+ int retval;
+ uint32_t value[2];
+
+ if (!pdata)
+ return -EINVAL;
+
+ retval = of_property_read_u32_array(dev->of_node, "maxim,vbus-en-pin", value, 2);
+ if (retval) {
+ dev_err(dev, "device tree node property 'maxim,vbus-en-pin' is missing\n");
+ return retval;
+ }
+ dev_info(dev, "property 'maxim,vbus-en-pin' value is <%d %d>\n", value[0], value[1]);
+
+ pdata->vbus_gpout = value[0];
+ pdata->vbus_active_level = value[1];
+
+ return 0;
+}
+
+static int
max3421_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct max3421_hcd *max3421_hcd;
struct usb_hcd *hcd = NULL;
+ struct max3421_hcd_platform_data *pdata = NULL;
int retval = -ENOMEM;
if (spi_setup(spi) < 0) {
@@ -1843,6 +1871,41 @@ max3421_probe(struct spi_device *spi)
return -EFAULT;
}
+ if (!spi->irq) {
+ dev_err(dev, "Failed to get SPI IRQ");
+ return -EFAULT;
+ }
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ retval = -ENOMEM;
+ goto error;
+ }
+ retval = max3421_of_vbus_en_pin(dev, pdata);
+ if (retval)
+ goto error;
+
+ spi->dev.platform_data = pdata;
+ }
+
+ pdata = spi->dev.platform_data;
+ if (!pdata) {
+ dev_err(&spi->dev, "driver configuration data is not provided\n");
+ retval = -EFAULT;
+ goto error;
+ }
+ if (pdata->vbus_active_level > 1) {
+ dev_err(&spi->dev, "vbus active level value %d is out of range (0/1)\n", pdata->vbus_active_level);
+ retval = -EINVAL;
+ goto error;
+ }
+ if (pdata->vbus_gpout < 1 || pdata->vbus_gpout > MAX3421_GPOUT_COUNT) {
+ dev_err(&spi->dev, "vbus gpout value %d is out of range (1..8)\n", pdata->vbus_gpout);
+ retval = -EINVAL;
+ goto error;
+ }
+
hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
dev_name(&spi->dev));
if (!hcd) {
@@ -1885,6 +1948,11 @@ max3421_probe(struct spi_device *spi)
return 0;
error:
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node && pdata) {
+ devm_kfree(&spi->dev, pdata);
+ spi->dev.platform_data = NULL;
+ }
+
if (hcd) {
kfree(max3421_hcd->tx);
kfree(max3421_hcd->rx);
@@ -1929,11 +1997,18 @@ max3421_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id max3421_of_match_table[] = {
+ { .compatible = "maxim,max3421", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, max3421_of_match_table);
+
static struct spi_driver max3421_driver = {
.probe = max3421_probe,
.remove = max3421_remove,
.driver = {
.name = "max3421-hcd",
+ .of_match_table = of_match_ptr(max3421_of_match_table),
},
};
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 5302f988e7e6..5ad9e9bdc8ee 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 05da2cb59612..0c507a0cfe1f 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OHCI HCD (Host Controller Driver) for USB.
*
@@ -5,10 +6,6 @@
*
* Derived from: ohci-omap.c and ohci-s3c2410.c
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index c3eded317495..ac7d4ac34b02 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 6865b919403f..a39fae41bc70 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* SAMSUNG EXYNOS USB HOST OHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <jg1.han@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 44924824fa41..ee9676349333 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Open Host Controller Interface (OHCI) driver for USB.
*
@@ -79,7 +80,7 @@ static const char hcd_name [] = "ohci_hcd";
static void ohci_dump(struct ohci_hcd *ohci);
static void ohci_stop(struct usb_hcd *hcd);
-static void io_watchdog_func(unsigned long _ohci);
+static void io_watchdog_func(struct timer_list *t);
#include "ohci-hub.c"
#include "ohci-dbg.c"
@@ -382,7 +383,7 @@ sanitize:
ed_free (ohci, ed);
break;
}
- /* else FALL THROUGH */
+ /* fall through */
default:
/* caller was supposed to have unlinked any requests;
* that's not our job. can't recover; must leak ed.
@@ -499,8 +500,7 @@ static int ohci_init (struct ohci_hcd *ohci)
if (ohci->hcca)
return 0;
- setup_timer(&ohci->io_watchdog, io_watchdog_func,
- (unsigned long) ohci);
+ timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
@@ -722,9 +722,9 @@ static int ohci_start(struct usb_hcd *hcd)
* the unlink list. As a result, URBs could never be dequeued and
* endpoints could never be released.
*/
-static void io_watchdog_func(unsigned long _ohci)
+static void io_watchdog_func(struct timer_list *t)
{
- struct ohci_hcd *ohci = (struct ohci_hcd *) _ohci;
+ struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog);
bool takeback_all_pending = false;
u32 status;
u32 head;
@@ -785,7 +785,7 @@ static void io_watchdog_func(unsigned long _ohci)
}
/* find the last TD processed by the controller. */
- head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK;
+ head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK;
td_start = td;
td_next = list_prepare_entry(td, &ed->td_list, td_list);
list_for_each_entry_continue(td_next, &ed->td_list, td_list) {
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 248eb7702463..fb7aaa3b9d06 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index ed8a762b8670..b3da3f12e5b1 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index 6df8e2ed40fd..f5f532601092 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* driver for NXP USB Host devices
*
@@ -13,10 +14,7 @@
* NOTE: This driver does not have suspend/resume functionality
* This driver is intended for engineering development purposes only
*
- * 2005-2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * 2005-2006 (c) MontaVista Software, Inc.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 91393ec7d850..0201c49bc4fc 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index a84aebe9b0a9..fbcd34911025 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 61fe2b985070..1e6c954f4b3f 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic platform ohci driver
*
@@ -11,8 +12,6 @@
* Copyright 2000-2002 David Brownell
* Copyright 1999 Linus Torvalds
* Copyright 1999 Gregory P. Smith
- *
- * Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/clk.h>
@@ -34,12 +33,11 @@
#define DRIVER_DESC "OHCI generic platform driver"
#define OHCI_MAX_CLKS 3
-#define OHCI_MAX_RESETS 2
#define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
struct ohci_platform_priv {
struct clk *clks[OHCI_MAX_CLKS];
- struct reset_control *resets[OHCI_MAX_RESETS];
+ struct reset_control *resets;
struct phy **phys;
int num_phys;
};
@@ -119,7 +117,7 @@ static int ohci_platform_probe(struct platform_device *dev)
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv;
struct ohci_hcd *ohci;
- int err, irq, phy_num, clk = 0, rst = 0;
+ int err, irq, phy_num, clk = 0;
if (usb_disabled())
return -ENODEV;
@@ -204,21 +202,17 @@ static int ohci_platform_probe(struct platform_device *dev)
break;
}
}
- for (rst = 0; rst < OHCI_MAX_RESETS; rst++) {
- priv->resets[rst] =
- devm_reset_control_get_shared_by_index(
- &dev->dev, rst);
- if (IS_ERR(priv->resets[rst])) {
- err = PTR_ERR(priv->resets[rst]);
- if (err == -EPROBE_DEFER)
- goto err_reset;
- priv->resets[rst] = NULL;
- break;
- }
- err = reset_control_deassert(priv->resets[rst]);
- if (err)
- goto err_reset;
+
+ priv->resets = devm_reset_control_array_get_optional_shared(
+ &dev->dev);
+ if (IS_ERR(priv->resets)) {
+ err = PTR_ERR(priv->resets);
+ goto err_put_clks;
}
+
+ err = reset_control_deassert(priv->resets);
+ if (err)
+ goto err_put_clks;
}
if (pdata->big_endian_desc)
@@ -279,8 +273,7 @@ err_power:
pdata->power_off(dev);
err_reset:
pm_runtime_disable(&dev->dev);
- while (--rst >= 0)
- reset_control_assert(priv->resets[rst]);
+ reset_control_assert(priv->resets);
err_put_clks:
while (--clk >= 0)
clk_put(priv->clks[clk]);
@@ -298,7 +291,7 @@ static int ohci_platform_remove(struct platform_device *dev)
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
- int clk, rst;
+ int clk;
pm_runtime_get_sync(&dev->dev);
usb_remove_hcd(hcd);
@@ -306,8 +299,7 @@ static int ohci_platform_remove(struct platform_device *dev)
if (pdata->power_off)
pdata->power_off(dev);
- for (rst = 0; rst < OHCI_MAX_RESETS && priv->resets[rst]; rst++)
- reset_control_assert(priv->resets[rst]);
+ reset_control_assert(priv->resets);
for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
clk_put(priv->clks[clk]);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 4f87a5c61b08..76a9b40b08f1 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 71d8bc4c27f6..20a23d795adf 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PS3 OHCI Host Controller driver
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/firmware.h>
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 21c010ffb03c..3e2474959735 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 641fed609911..b2ec8c399363 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index b006b93126f7..4511e27e9da8 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 3a9ea32508df..ebec9a7699e3 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
@@ -42,7 +43,7 @@
#if 0
static void dump_hci_status(struct usb_hcd *hcd, const char *label)
{
- unsigned long status = sa1111_readl(hcd->regs + USB_STATUS);
+ unsigned long status = readl_relaxed(hcd->regs + USB_STATUS);
printk(KERN_DEBUG "%s USB_STATUS = { %s%s%s%s%s}\n", label,
((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
@@ -134,7 +135,7 @@ static int sa1111_start_hc(struct sa1111_dev *dev)
* Configure the power sense and control lines. Place the USB
* host controller in reset.
*/
- sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
+ writel_relaxed(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + USB_RESET);
/*
@@ -144,7 +145,7 @@ static int sa1111_start_hc(struct sa1111_dev *dev)
ret = sa1111_enable_device(dev);
if (ret == 0) {
udelay(11);
- sa1111_writel(usb_rst, dev->mapbase + USB_RESET);
+ writel_relaxed(usb_rst, dev->mapbase + USB_RESET);
}
return ret;
@@ -159,8 +160,8 @@ static void sa1111_stop_hc(struct sa1111_dev *dev)
/*
* Put the USB host controller into reset.
*/
- usb_rst = sa1111_readl(dev->mapbase + USB_RESET);
- sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
+ usb_rst = readl_relaxed(dev->mapbase + USB_RESET);
+ writel_relaxed(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + USB_RESET);
/*
@@ -178,7 +179,7 @@ static void sa1111_stop_hc(struct sa1111_dev *dev)
static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
{
struct usb_hcd *hcd;
- int ret;
+ int ret, irq;
if (usb_disabled())
return -ENODEV;
@@ -196,6 +197,12 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
hcd->rsrc_start = dev->res.start;
hcd->rsrc_len = resource_size(&dev->res);
+ irq = sa1111_get_irq(dev, 1);
+ if (irq <= 0) {
+ ret = irq ? : -ENXIO;
+ goto err1;
+ }
+
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&dev->dev, "request_mem_region failed\n");
ret = -EBUSY;
@@ -208,7 +215,7 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
if (ret)
goto err2;
- ret = usb_add_hcd(hcd, dev->irq[1], 0);
+ ret = usb_add_hcd(hcd, irq, 0);
if (ret == 0) {
device_wakeup_enable(hcd->self.controller);
return ret;
@@ -241,8 +248,9 @@ static int ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
return 0;
}
-static void ohci_hcd_sa1111_shutdown(struct sa1111_dev *dev)
+static void ohci_hcd_sa1111_shutdown(struct device *_dev)
{
+ struct sa1111_dev *dev = to_sa1111_device(_dev);
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
@@ -255,9 +263,9 @@ static struct sa1111_driver ohci_hcd_sa1111_driver = {
.drv = {
.name = "sa1111-ohci",
.owner = THIS_MODULE,
+ .shutdown = ohci_hcd_sa1111_shutdown,
},
.devid = SA1111_DEVID_USB,
.probe = ohci_hcd_sa1111_probe,
.remove = ohci_hcd_sa1111_remove,
- .shutdown = ohci_hcd_sa1111_shutdown,
};
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index d4e0f7cd96fa..c9233cddf9a2 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 56478ed2f932..69fa04697793 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OHCI HCD (Host Controller Driver) for USB.
*
@@ -5,10 +6,6 @@
* Deepak Sikri<deepak.sikri@st.com>
*
* Based on various ohci-*.c drivers
-*
-* This file is licensed under the terms of the GNU General Public
-* License version 2. This program is licensed "as is" without any
-* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index 02816a1515a1..992807c9850a 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ST OHCI driver
*
@@ -6,10 +7,6 @@
* Author: Peter Griffin <peter.griffin@linaro.org>
*
* Derived from ohci-platform.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
index e1b208da460a..d21ca3ce9a30 100644
--- a/drivers/usb/host/ohci-tilegx.c
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
*/
/*
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 16d081a093bb..a631dbb369d7 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OHCI HCD(Host Controller Driver) for USB.
*
@@ -18,10 +19,6 @@
* Written from sparse documentation from Toshiba and Sharp's driver
* for the 2.4 kernel,
* usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*#include <linux/fs.h>
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 12742d002d2d..508a803139dd 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* OHCI HCD (Host Controller Driver) for USB.
*
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index ed20fb34c897..c5e6e8d0b5ef 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
*
* This code is *strongly* based on EHCI-HCD code by David Brownell since
* the chip is a quasi-EHCI compatible.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
@@ -2552,9 +2539,9 @@ static irqreturn_t oxu_irq(struct usb_hcd *hcd)
return ret;
}
-static void oxu_watchdog(unsigned long param)
+static void oxu_watchdog(struct timer_list *t)
{
- struct oxu_hcd *oxu = (struct oxu_hcd *) param;
+ struct oxu_hcd *oxu = from_timer(oxu, t, watchdog);
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
@@ -2590,7 +2577,7 @@ static int oxu_hcd_init(struct usb_hcd *hcd)
spin_lock_init(&oxu->lock);
- setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
+ timer_setup(&oxu->watchdog, oxu_watchdog, 0);
/*
* hw default: 1K periodic list heads, one per frame.
@@ -3040,7 +3027,7 @@ idle_timeout:
qh_put(qh);
break;
}
- /* else FALL THROUGH */
+ /* fall through */
default:
nogood:
/* caller was supposed to have unlinked any requests;
diff --git a/drivers/usb/host/oxu210hp.h b/drivers/usb/host/oxu210hp.h
index 1c216ad9aad2..437044147862 100644
--- a/drivers/usb/host/oxu210hp.h
+++ b/drivers/usb/host/oxu210hp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Host interface registers
*/
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 6dda3623a276..161536717025 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* This file contains code to reset and initialize USB host controllers.
* Some of it includes work-arounds for PCI hardware and BIOS quirks.
@@ -841,7 +842,7 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
ehci_bios_handoff(pdev, op_reg_base, cap, offset);
break;
case 0: /* Illegal reserved cap, set cap=0 so we exit */
- cap = 0; /* then fallthrough... */
+ cap = 0; /* fall through */
default:
dev_warn(&pdev->dev,
"EHCI: unrecognized capability %02x\n",
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 5582cbafecd4..b68dcb5dd0fd 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_USB_PCI_QUIRKS_H
#define __LINUX_USB_PCI_QUIRKS_H
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 5e5fc9d7d533..984892dd72f5 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 HCD (Host Controller Driver)
*
@@ -7,20 +8,6 @@
* Portions Copyright (C) 1999 Roman Weissgaerber
*
* Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/module.h>
@@ -1273,7 +1260,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td)
break;
}
- mod_timer(&r8a66597->td_timer[td->pipenum],
+ mod_timer(&r8a66597->timers[td->pipenum].td,
jiffies + msecs_to_jiffies(time));
}
}
@@ -1733,9 +1720,10 @@ static void r8a66597_root_hub_control(struct r8a66597 *r8a66597, int port)
}
}
-static void r8a66597_interval_timer(unsigned long _r8a66597)
+static void r8a66597_interval_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597_timers *timers = from_timer(timers, t, interval);
+ struct r8a66597 *r8a66597 = timers->r8a66597;
unsigned long flags;
u16 pipenum;
struct r8a66597_td *td;
@@ -1745,7 +1733,7 @@ static void r8a66597_interval_timer(unsigned long _r8a66597)
for (pipenum = 0; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
if (!(r8a66597->interval_map & (1 << pipenum)))
continue;
- if (timer_pending(&r8a66597->interval_timer[pipenum]))
+ if (timer_pending(&r8a66597->timers[pipenum].interval))
continue;
td = r8a66597_get_td(r8a66597, pipenum);
@@ -1756,9 +1744,10 @@ static void r8a66597_interval_timer(unsigned long _r8a66597)
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
-static void r8a66597_td_timer(unsigned long _r8a66597)
+static void r8a66597_td_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597_timers *timers = from_timer(timers, t, td);
+ struct r8a66597 *r8a66597 = timers->r8a66597;
unsigned long flags;
u16 pipenum;
struct r8a66597_td *td, *new_td = NULL;
@@ -1768,7 +1757,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
for (pipenum = 0; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
if (!(r8a66597->timeout_map & (1 << pipenum)))
continue;
- if (timer_pending(&r8a66597->td_timer[pipenum]))
+ if (timer_pending(&r8a66597->timers[pipenum].td))
continue;
td = r8a66597_get_td(r8a66597, pipenum);
@@ -1809,9 +1798,9 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
unsigned long flags;
int port;
@@ -1942,7 +1931,7 @@ static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
if (request) {
if (td->pipe->info.timer_interval) {
r8a66597->interval_map |= 1 << td->pipenum;
- mod_timer(&r8a66597->interval_timer[td->pipenum],
+ mod_timer(&r8a66597->timers[td->pipenum].interval,
jiffies + msecs_to_jiffies(
td->pipe->info.timer_interval));
} else {
@@ -2483,8 +2472,7 @@ static int r8a66597_probe(struct platform_device *pdev)
r8a66597->max_root_hub = 2;
spin_lock_init(&r8a66597->lock);
- setup_timer(&r8a66597->rh_timer, r8a66597_timer,
- (unsigned long)r8a66597);
+ timer_setup(&r8a66597->rh_timer, r8a66597_timer, 0);
r8a66597->reg = reg;
/* make sure no interrupts are pending */
@@ -2495,11 +2483,10 @@ static int r8a66597_probe(struct platform_device *pdev)
for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
INIT_LIST_HEAD(&r8a66597->pipe_queue[i]);
- setup_timer(&r8a66597->td_timer[i], r8a66597_td_timer,
- (unsigned long)r8a66597);
- setup_timer(&r8a66597->interval_timer[i],
- r8a66597_interval_timer,
- (unsigned long)r8a66597);
+ r8a66597->timers[i].r8a66597 = r8a66597;
+ timer_setup(&r8a66597->timers[i].td, r8a66597_td_timer, 0);
+ timer_setup(&r8a66597->timers[i].interval,
+ r8a66597_interval_timer, 0);
}
INIT_LIST_HEAD(&r8a66597->child_device);
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index 672cea307abb..51973a923526 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* R8A66597 HCD (Host Controller Driver)
*
@@ -7,20 +8,6 @@
* Portions Copyright (C) 1999 Roman Weissgaerber
*
* Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef __R8A66597_H__
@@ -107,6 +94,14 @@ struct r8a66597_root_hub {
struct r8a66597_device *dev;
};
+struct r8a66597;
+
+struct r8a66597_timers {
+ struct timer_list td;
+ struct timer_list interval;
+ struct r8a66597 *r8a66597;
+};
+
struct r8a66597 {
spinlock_t lock;
void __iomem *reg;
@@ -117,8 +112,7 @@ struct r8a66597 {
struct list_head pipe_queue[R8A66597_MAX_NUM_PIPE];
struct timer_list rh_timer;
- struct timer_list td_timer[R8A66597_MAX_NUM_PIPE];
- struct timer_list interval_timer[R8A66597_MAX_NUM_PIPE];
+ struct r8a66597_timers timers[R8A66597_MAX_NUM_PIPE];
unsigned short address_map;
unsigned short timeout_map;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 24ad1d6cec25..fa88a903fa2e 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SL811HS HCD (Host Controller Driver) for USB.
*
@@ -1118,9 +1119,9 @@ sl811h_hub_descriptor (
}
static void
-sl811h_timer(unsigned long _sl811)
+sl811h_timer(struct timer_list *t)
{
- struct sl811 *sl811 = (void *) _sl811;
+ struct sl811 *sl811 = from_timer(sl811, t, timer);
unsigned long flags;
u8 irqstat;
u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
@@ -1691,7 +1692,7 @@ sl811h_probe(struct platform_device *dev)
spin_lock_init(&sl811->lock);
INIT_LIST_HEAD(&sl811->async);
sl811->board = dev_get_platdata(&dev->dev);
- setup_timer(&sl811->timer, sl811h_timer, (unsigned long)sl811);
+ timer_setup(&sl811->timer, sl811h_timer, 0);
sl811->addr_reg = addr_reg;
sl811->data_reg = data_reg;
diff --git a/drivers/usb/host/sl811.h b/drivers/usb/host/sl811.h
index 1e23ef49bec1..2abe51a5db44 100644
--- a/drivers/usb/host/sl811.h
+++ b/drivers/usb/host/sl811.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* SL811HS register declarations and HCD data structures
*
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 88a9bffe93df..72136373ffab 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCMCIA driver for SL811HS (as found in REX-CFU1U)
* Filename: sl811_cs.c
diff --git a/drivers/usb/host/ssb-hcd.c b/drivers/usb/host/ssb-hcd.c
index 62b6b7804c66..016987764afb 100644
--- a/drivers/usb/host/ssb-hcd.c
+++ b/drivers/usb/host/ssb-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Sonics Silicon Backplane
* Broadcom USB-core driver (SSB bus glue)
@@ -15,8 +16,6 @@
*
* Derived from the USBcore related parts of Broadcom-SB
* Copyright 2005-2011 Broadcom Corporation
- *
- * Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index c38855aed62c..032b8652910a 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Host Controller Driver for the Elan Digital Systems U132 adapter
*
@@ -7,11 +8,6 @@
* Author and Maintainer - Tony Olech - Elan Digital Systems
* tony.olech@elandigitalsystems.com
*
-* This program is free software;you can redistribute it and/or
-* modify it under the terms of the GNU General Public License as
-* published by the Free Software Foundation, version 2.
-*
-*
* This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
* based on various USB host drivers in the 2.6.15 linux kernel
* with constant reference to the 3rd Edition of Linux Device Drivers
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c
index 9c6635d43db0..c4e67c4b51f6 100644
--- a/drivers/usb/host/uhci-debug.c
+++ b/drivers/usb/host/uhci-debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UHCI-specific debugging code. Invaluable when something
* goes wrong, but don't get in my face.
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index 0342991c9507..2103b1ed0f8f 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UHCI HCD (Host Controller Driver) for GRLIB GRUSBHC
*
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index c3267a78c94e..f5c90217777a 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Universal Host Controller Interface driver for USB.
*
@@ -584,8 +585,7 @@ static int uhci_start(struct usb_hcd *hcd)
hcd->self.sg_tablesize = ~0;
spin_lock_init(&uhci->lock);
- setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
- (unsigned long) uhci);
+ timer_setup(&uhci->fsbr_timer, uhci_fsbr_timeout, 0);
INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh);
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index 91b22b2ea3aa..f1cc47292a59 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_UHCI_HCD_H
#define __LINUX_UHCI_HCD_H
@@ -186,7 +187,7 @@ struct uhci_qh {
* We need a special accessor for the element pointer because it is
* subject to asynchronous updates by the controller.
*/
-#define qh_element(qh) ACCESS_ONCE((qh)->element)
+#define qh_element(qh) READ_ONCE((qh)->element)
#define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \
cpu_to_hc32((uhci), (qh)->dma_handle))
@@ -274,7 +275,7 @@ struct uhci_td {
* subject to asynchronous updates by the controller.
*/
#define td_status(uhci, td) hc32_to_cpu((uhci), \
- ACCESS_ONCE((td)->status))
+ READ_ONCE((td)->status))
#define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle))
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index ece9e37e89fe..47106dd8ca7c 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Universal Host Controller Interface driver for USB.
*
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 49effdc0d857..0dd944277c99 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UHCI HCD (Host Controller Driver) PCI Bus Glue.
*
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 1b4e086c33a0..6cb16d4b2257 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic UHCI HCD (Host Controller Driver) for Platform Devices
*
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index c17ea1589b83..d40438238938 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Universal Host Controller Interface driver for USB.
*
@@ -89,9 +90,9 @@ static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
}
}
-static void uhci_fsbr_timeout(unsigned long _uhci)
+static void uhci_fsbr_timeout(struct timer_list *t)
{
- struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
+ struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
unsigned long flags;
spin_lock_irqsave(&uhci->lock, flags);
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
index 773249306031..c5ac9efb076a 100644
--- a/drivers/usb/host/whci/asl.c
+++ b/drivers/usb/host/whci/asl.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) asynchronous schedule management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index 774b89d28fae..f154e5791bfd 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) debug.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index cf84269c3e6d..8af9dcfea127 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) driver.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c
index 6afa2e379160..22b3b7f7419d 100644
--- a/drivers/usb/host/whci/hw.c
+++ b/drivers/usb/host/whci/hw.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) hardware access helpers.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
index ad8eb575c30a..82416973f773 100644
--- a/drivers/usb/host/whci/init.c
+++ b/drivers/usb/host/whci/init.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) initialization.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c
index 0c086b2790d1..7e4ad1b8f3e3 100644
--- a/drivers/usb/host/whci/int.c
+++ b/drivers/usb/host/whci/int.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) interrupt handling.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/uwb/umc.h>
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
index 33c5580b4d25..bb84366f7bd3 100644
--- a/drivers/usb/host/whci/pzl.c
+++ b/drivers/usb/host/whci/pzl.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) periodic schedule management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index c0e6812426b3..925166a207aa 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) qset management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
index c80c7d93bc4a..139476997e7c 100644
--- a/drivers/usb/host/whci/whcd.h
+++ b/drivers/usb/host/whci/whcd.h
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) private header.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef __WHCD_H
#define __WHCD_H
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
index 4d4cbc0730bf..5a86a57a80cc 100644
--- a/drivers/usb/host/whci/whci-hc.h
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) data structures.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
#ifndef _WHCI_WHCI_HC_H
#define _WHCI_WHCI_HC_H
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c
index 8d2762682869..8a4d805ff63a 100644
--- a/drivers/usb/host/whci/wusb.c
+++ b/drivers/usb/host/whci/wusb.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless Host Controller (WHC) WUSB operations.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/uwb/umc.h>
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 2c83b37ae8f2..584d7b9a3683 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "xhci.h"
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
new file mode 100644
index 000000000000..4f7895dbcf88
--- /dev/null
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xhci-debugfs.c - xHCI debugfs interface
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#include <linux/slab.h>
+
+#include "xhci.h"
+#include "xhci-debugfs.h"
+
+static const struct debugfs_reg32 xhci_cap_regs[] = {
+ dump_register(CAPLENGTH),
+ dump_register(HCSPARAMS1),
+ dump_register(HCSPARAMS2),
+ dump_register(HCSPARAMS3),
+ dump_register(HCCPARAMS1),
+ dump_register(DOORBELLOFF),
+ dump_register(RUNTIMEOFF),
+ dump_register(HCCPARAMS2),
+};
+
+static const struct debugfs_reg32 xhci_op_regs[] = {
+ dump_register(USBCMD),
+ dump_register(USBSTS),
+ dump_register(PAGESIZE),
+ dump_register(DNCTRL),
+ dump_register(CRCR),
+ dump_register(DCBAAP_LOW),
+ dump_register(DCBAAP_HIGH),
+ dump_register(CONFIG),
+};
+
+static const struct debugfs_reg32 xhci_runtime_regs[] = {
+ dump_register(MFINDEX),
+ dump_register(IR0_IMAN),
+ dump_register(IR0_IMOD),
+ dump_register(IR0_ERSTSZ),
+ dump_register(IR0_ERSTBA_LOW),
+ dump_register(IR0_ERSTBA_HIGH),
+ dump_register(IR0_ERDP_LOW),
+ dump_register(IR0_ERDP_HIGH),
+};
+
+static const struct debugfs_reg32 xhci_extcap_legsup[] = {
+ dump_register(EXTCAP_USBLEGSUP),
+ dump_register(EXTCAP_USBLEGCTLSTS),
+};
+
+static const struct debugfs_reg32 xhci_extcap_protocol[] = {
+ dump_register(EXTCAP_REVISION),
+ dump_register(EXTCAP_NAME),
+ dump_register(EXTCAP_PORTINFO),
+ dump_register(EXTCAP_PORTTYPE),
+ dump_register(EXTCAP_MANTISSA1),
+ dump_register(EXTCAP_MANTISSA2),
+ dump_register(EXTCAP_MANTISSA3),
+ dump_register(EXTCAP_MANTISSA4),
+ dump_register(EXTCAP_MANTISSA5),
+ dump_register(EXTCAP_MANTISSA6),
+};
+
+static const struct debugfs_reg32 xhci_extcap_dbc[] = {
+ dump_register(EXTCAP_DBC_CAPABILITY),
+ dump_register(EXTCAP_DBC_DOORBELL),
+ dump_register(EXTCAP_DBC_ERSTSIZE),
+ dump_register(EXTCAP_DBC_ERST_LOW),
+ dump_register(EXTCAP_DBC_ERST_HIGH),
+ dump_register(EXTCAP_DBC_ERDP_LOW),
+ dump_register(EXTCAP_DBC_ERDP_HIGH),
+ dump_register(EXTCAP_DBC_CONTROL),
+ dump_register(EXTCAP_DBC_STATUS),
+ dump_register(EXTCAP_DBC_PORTSC),
+ dump_register(EXTCAP_DBC_CONT_LOW),
+ dump_register(EXTCAP_DBC_CONT_HIGH),
+ dump_register(EXTCAP_DBC_DEVINFO1),
+ dump_register(EXTCAP_DBC_DEVINFO2),
+};
+
+static struct dentry *xhci_debugfs_root;
+
+static struct xhci_regset *xhci_debugfs_alloc_regset(struct xhci_hcd *xhci)
+{
+ struct xhci_regset *regset;
+
+ regset = kzalloc(sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return NULL;
+
+ /*
+ * The allocation and free of regset are executed in order.
+ * We needn't a lock here.
+ */
+ INIT_LIST_HEAD(&regset->list);
+ list_add_tail(&regset->list, &xhci->regset_list);
+
+ return regset;
+}
+
+static void xhci_debugfs_free_regset(struct xhci_regset *regset)
+{
+ if (!regset)
+ return;
+
+ list_del(&regset->list);
+ kfree(regset);
+}
+
+static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
+ const struct debugfs_reg32 *regs,
+ size_t nregs, struct dentry *parent,
+ const char *fmt, ...)
+{
+ struct xhci_regset *rgs;
+ va_list args;
+ struct debugfs_regset32 *regset;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
+ rgs = xhci_debugfs_alloc_regset(xhci);
+ if (!rgs)
+ return;
+
+ va_start(args, fmt);
+ vsnprintf(rgs->name, sizeof(rgs->name), fmt, args);
+ va_end(args);
+
+ regset = &rgs->regset;
+ regset->regs = regs;
+ regset->nregs = nregs;
+ regset->base = hcd->regs + base;
+
+ debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
+}
+
+static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
+ const struct debugfs_reg32 *regs,
+ size_t n, const char *cap_name)
+{
+ u32 offset;
+ int index = 0;
+ size_t psic, nregs = n;
+ void __iomem *base = &xhci->cap_regs->hc_capbase;
+
+ offset = xhci_find_next_ext_cap(base, 0, cap_id);
+ while (offset) {
+ if (cap_id == XHCI_EXT_CAPS_PROTOCOL) {
+ psic = XHCI_EXT_PORT_PSIC(readl(base + offset + 8));
+ nregs = min(4 + psic, n);
+ }
+
+ xhci_debugfs_regset(xhci, offset, regs, nregs,
+ xhci->debugfs_root, "%s:%02d",
+ cap_name, index);
+ offset = xhci_find_next_ext_cap(base, offset, cap_id);
+ index++;
+ }
+}
+
+static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
+{
+ dma_addr_t dma;
+ struct xhci_ring *ring = s->private;
+
+ dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
+ seq_printf(s, "%pad\n", &dma);
+
+ return 0;
+}
+
+static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
+{
+ dma_addr_t dma;
+ struct xhci_ring *ring = s->private;
+
+ dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
+ seq_printf(s, "%pad\n", &dma);
+
+ return 0;
+}
+
+static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
+{
+ struct xhci_ring *ring = s->private;
+
+ seq_printf(s, "%d\n", ring->cycle_state);
+
+ return 0;
+}
+
+static void xhci_ring_dump_segment(struct seq_file *s,
+ struct xhci_segment *seg)
+{
+ int i;
+ dma_addr_t dma;
+ union xhci_trb *trb;
+
+ for (i = 0; i < TRBS_PER_SEGMENT; i++) {
+ trb = &seg->trbs[i];
+ dma = seg->dma + i * sizeof(*trb);
+ seq_printf(s, "%pad: %s\n", &dma,
+ xhci_decode_trb(trb->generic.field[0],
+ trb->generic.field[1],
+ trb->generic.field[2],
+ trb->generic.field[3]));
+ }
+}
+
+static int xhci_ring_trb_show(struct seq_file *s, void *unused)
+{
+ int i;
+ struct xhci_ring *ring = s->private;
+ struct xhci_segment *seg = ring->first_seg;
+
+ for (i = 0; i < ring->num_segs; i++) {
+ xhci_ring_dump_segment(s, seg);
+ seg = seg->next;
+ }
+
+ return 0;
+}
+
+static struct xhci_file_map ring_files[] = {
+ {"enqueue", xhci_ring_enqueue_show, },
+ {"dequeue", xhci_ring_dequeue_show, },
+ {"cycle", xhci_ring_cycle_show, },
+ {"trbs", xhci_ring_trb_show, },
+};
+
+static int xhci_ring_open(struct inode *inode, struct file *file)
+{
+ int i;
+ struct xhci_file_map *f_map;
+ const char *file_name = file_dentry(file)->d_iname;
+
+ for (i = 0; i < ARRAY_SIZE(ring_files); i++) {
+ f_map = &ring_files[i];
+
+ if (strcmp(f_map->name, file_name) == 0)
+ break;
+ }
+
+ return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations xhci_ring_fops = {
+ .open = xhci_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int xhci_slot_context_show(struct seq_file *s, void *unused)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_slot_ctx *slot_ctx;
+ struct xhci_slot_priv *priv = s->private;
+ struct xhci_virt_device *dev = priv->dev;
+
+ xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
+ slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
+ seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
+ xhci_decode_slot_context(slot_ctx->dev_info,
+ slot_ctx->dev_info2,
+ slot_ctx->tt_info,
+ slot_ctx->dev_state));
+
+ return 0;
+}
+
+static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
+{
+ int dci;
+ dma_addr_t dma;
+ struct xhci_hcd *xhci;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_slot_priv *priv = s->private;
+ struct xhci_virt_device *dev = priv->dev;
+
+ xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
+
+ for (dci = 1; dci < 32; dci++) {
+ ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
+ dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
+ seq_printf(s, "%pad: %s\n", &dma,
+ xhci_decode_ep_context(ep_ctx->ep_info,
+ ep_ctx->ep_info2,
+ ep_ctx->deq,
+ ep_ctx->tx_info));
+ }
+
+ return 0;
+}
+
+static int xhci_device_name_show(struct seq_file *s, void *unused)
+{
+ struct xhci_slot_priv *priv = s->private;
+ struct xhci_virt_device *dev = priv->dev;
+
+ seq_printf(s, "%s\n", dev_name(&dev->udev->dev));
+
+ return 0;
+}
+
+static struct xhci_file_map context_files[] = {
+ {"name", xhci_device_name_show, },
+ {"slot-context", xhci_slot_context_show, },
+ {"ep-context", xhci_endpoint_context_show, },
+};
+
+static int xhci_context_open(struct inode *inode, struct file *file)
+{
+ int i;
+ struct xhci_file_map *f_map;
+ const char *file_name = file_dentry(file)->d_iname;
+
+ for (i = 0; i < ARRAY_SIZE(context_files); i++) {
+ f_map = &context_files[i];
+
+ if (strcmp(f_map->name, file_name) == 0)
+ break;
+ }
+
+ return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations xhci_context_fops = {
+ .open = xhci_context_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
+ struct xhci_file_map *files,
+ size_t nentries, void *data,
+ struct dentry *parent,
+ const struct file_operations *fops)
+{
+ int i;
+
+ for (i = 0; i < nentries; i++)
+ debugfs_create_file(files[i].name, 0444, parent, data, fops);
+}
+
+static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
+ struct xhci_ring *ring,
+ const char *name,
+ struct dentry *parent)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(name, parent);
+ xhci_debugfs_create_files(xhci, ring_files, ARRAY_SIZE(ring_files),
+ ring, dir, &xhci_ring_fops);
+
+ return dir;
+}
+
+static void xhci_debugfs_create_context_files(struct xhci_hcd *xhci,
+ struct dentry *parent,
+ int slot_id)
+{
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+
+ xhci_debugfs_create_files(xhci, context_files,
+ ARRAY_SIZE(context_files),
+ dev->debugfs_private,
+ parent, &xhci_context_fops);
+}
+
+void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *dev,
+ int ep_index)
+{
+ struct xhci_ep_priv *epriv;
+ struct xhci_slot_priv *spriv = dev->debugfs_private;
+
+ if (spriv->eps[ep_index])
+ return;
+
+ epriv = kzalloc(sizeof(*epriv), GFP_KERNEL);
+ if (!epriv)
+ return;
+
+ snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
+ epriv->root = xhci_debugfs_create_ring_dir(xhci,
+ dev->eps[ep_index].new_ring,
+ epriv->name,
+ spriv->root);
+ spriv->eps[ep_index] = epriv;
+}
+
+void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *dev,
+ int ep_index)
+{
+ struct xhci_ep_priv *epriv;
+ struct xhci_slot_priv *spriv = dev->debugfs_private;
+
+ if (!spriv || !spriv->eps[ep_index])
+ return;
+
+ epriv = spriv->eps[ep_index];
+ debugfs_remove_recursive(epriv->root);
+ spriv->eps[ep_index] = NULL;
+ kfree(epriv);
+}
+
+void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
+{
+ struct xhci_slot_priv *priv;
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return;
+
+ snprintf(priv->name, sizeof(priv->name), "%02d", slot_id);
+ priv->root = debugfs_create_dir(priv->name, xhci->debugfs_slots);
+ priv->dev = dev;
+ dev->debugfs_private = priv;
+
+ xhci_debugfs_create_ring_dir(xhci, dev->eps[0].ring,
+ "ep00", priv->root);
+
+ xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
+}
+
+void xhci_debugfs_remove_slot(struct xhci_hcd *xhci, int slot_id)
+{
+ int i;
+ struct xhci_slot_priv *priv;
+ struct xhci_virt_device *dev = xhci->devs[slot_id];
+
+ if (!dev || !dev->debugfs_private)
+ return;
+
+ priv = dev->debugfs_private;
+
+ debugfs_remove_recursive(priv->root);
+
+ for (i = 0; i < 31; i++)
+ kfree(priv->eps[i]);
+
+ kfree(priv);
+ dev->debugfs_private = NULL;
+}
+
+void xhci_debugfs_init(struct xhci_hcd *xhci)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.controller;
+
+ xhci->debugfs_root = debugfs_create_dir(dev_name(dev),
+ xhci_debugfs_root);
+
+ INIT_LIST_HEAD(&xhci->regset_list);
+
+ xhci_debugfs_regset(xhci,
+ 0,
+ xhci_cap_regs, ARRAY_SIZE(xhci_cap_regs),
+ xhci->debugfs_root, "reg-cap");
+
+ xhci_debugfs_regset(xhci,
+ HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)),
+ xhci_op_regs, ARRAY_SIZE(xhci_op_regs),
+ xhci->debugfs_root, "reg-op");
+
+ xhci_debugfs_regset(xhci,
+ readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK,
+ xhci_runtime_regs, ARRAY_SIZE(xhci_runtime_regs),
+ xhci->debugfs_root, "reg-runtime");
+
+ xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_LEGACY,
+ xhci_extcap_legsup,
+ ARRAY_SIZE(xhci_extcap_legsup),
+ "reg-ext-legsup");
+
+ xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_PROTOCOL,
+ xhci_extcap_protocol,
+ ARRAY_SIZE(xhci_extcap_protocol),
+ "reg-ext-protocol");
+
+ xhci_debugfs_extcap_regset(xhci, XHCI_EXT_CAPS_DEBUG,
+ xhci_extcap_dbc,
+ ARRAY_SIZE(xhci_extcap_dbc),
+ "reg-ext-dbc");
+
+ xhci_debugfs_create_ring_dir(xhci, xhci->cmd_ring,
+ "command-ring",
+ xhci->debugfs_root);
+
+ xhci_debugfs_create_ring_dir(xhci, xhci->event_ring,
+ "event-ring",
+ xhci->debugfs_root);
+
+ xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root);
+}
+
+void xhci_debugfs_exit(struct xhci_hcd *xhci)
+{
+ struct xhci_regset *rgs, *tmp;
+
+ debugfs_remove_recursive(xhci->debugfs_root);
+ xhci->debugfs_root = NULL;
+ xhci->debugfs_slots = NULL;
+
+ list_for_each_entry_safe(rgs, tmp, &xhci->regset_list, list)
+ xhci_debugfs_free_regset(rgs);
+}
+
+void __init xhci_debugfs_create_root(void)
+{
+ xhci_debugfs_root = debugfs_create_dir("xhci", usb_debug_root);
+}
+
+void __exit xhci_debugfs_remove_root(void)
+{
+ debugfs_remove_recursive(xhci_debugfs_root);
+ xhci_debugfs_root = NULL;
+}
diff --git a/drivers/usb/host/xhci-debugfs.h b/drivers/usb/host/xhci-debugfs.h
new file mode 100644
index 000000000000..ac5bc40f5c3a
--- /dev/null
+++ b/drivers/usb/host/xhci-debugfs.h
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xhci-debugfs.h - xHCI debugfs interface
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ */
+
+#ifndef __LINUX_XHCI_DEBUGFS_H
+#define __LINUX_XHCI_DEBUGFS_H
+
+#include <linux/debugfs.h>
+
+#define DEBUGFS_NAMELEN 32
+
+#define REG_CAPLENGTH 0x00
+#define REG_HCSPARAMS1 0x04
+#define REG_HCSPARAMS2 0x08
+#define REG_HCSPARAMS3 0x0c
+#define REG_HCCPARAMS1 0x10
+#define REG_DOORBELLOFF 0x14
+#define REG_RUNTIMEOFF 0x18
+#define REG_HCCPARAMS2 0x1c
+
+#define REG_USBCMD 0x00
+#define REG_USBSTS 0x04
+#define REG_PAGESIZE 0x08
+#define REG_DNCTRL 0x14
+#define REG_CRCR 0x18
+#define REG_DCBAAP_LOW 0x30
+#define REG_DCBAAP_HIGH 0x34
+#define REG_CONFIG 0x38
+
+#define REG_MFINDEX 0x00
+#define REG_IR0_IMAN 0x20
+#define REG_IR0_IMOD 0x24
+#define REG_IR0_ERSTSZ 0x28
+#define REG_IR0_ERSTBA_LOW 0x30
+#define REG_IR0_ERSTBA_HIGH 0x34
+#define REG_IR0_ERDP_LOW 0x38
+#define REG_IR0_ERDP_HIGH 0x3c
+
+#define REG_EXTCAP_USBLEGSUP 0x00
+#define REG_EXTCAP_USBLEGCTLSTS 0x04
+
+#define REG_EXTCAP_REVISION 0x00
+#define REG_EXTCAP_NAME 0x04
+#define REG_EXTCAP_PORTINFO 0x08
+#define REG_EXTCAP_PORTTYPE 0x0c
+#define REG_EXTCAP_MANTISSA1 0x10
+#define REG_EXTCAP_MANTISSA2 0x14
+#define REG_EXTCAP_MANTISSA3 0x18
+#define REG_EXTCAP_MANTISSA4 0x1c
+#define REG_EXTCAP_MANTISSA5 0x20
+#define REG_EXTCAP_MANTISSA6 0x24
+
+#define REG_EXTCAP_DBC_CAPABILITY 0x00
+#define REG_EXTCAP_DBC_DOORBELL 0x04
+#define REG_EXTCAP_DBC_ERSTSIZE 0x08
+#define REG_EXTCAP_DBC_ERST_LOW 0x10
+#define REG_EXTCAP_DBC_ERST_HIGH 0x14
+#define REG_EXTCAP_DBC_ERDP_LOW 0x18
+#define REG_EXTCAP_DBC_ERDP_HIGH 0x1c
+#define REG_EXTCAP_DBC_CONTROL 0x20
+#define REG_EXTCAP_DBC_STATUS 0x24
+#define REG_EXTCAP_DBC_PORTSC 0x28
+#define REG_EXTCAP_DBC_CONT_LOW 0x30
+#define REG_EXTCAP_DBC_CONT_HIGH 0x34
+#define REG_EXTCAP_DBC_DEVINFO1 0x38
+#define REG_EXTCAP_DBC_DEVINFO2 0x3c
+
+#define dump_register(nm) \
+{ \
+ .name = __stringify(nm), \
+ .offset = REG_ ##nm, \
+}
+
+struct xhci_regset {
+ char name[DEBUGFS_NAMELEN];
+ struct debugfs_regset32 regset;
+ size_t nregs;
+ struct dentry *parent;
+ struct list_head list;
+};
+
+struct xhci_file_map {
+ const char *name;
+ int (*show)(struct seq_file *s, void *unused);
+};
+
+struct xhci_ep_priv {
+ char name[DEBUGFS_NAMELEN];
+ struct dentry *root;
+};
+
+struct xhci_slot_priv {
+ char name[DEBUGFS_NAMELEN];
+ struct dentry *root;
+ struct xhci_ep_priv *eps[31];
+ struct xhci_virt_device *dev;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void xhci_debugfs_init(struct xhci_hcd *xhci);
+void xhci_debugfs_exit(struct xhci_hcd *xhci);
+void __init xhci_debugfs_create_root(void);
+void __exit xhci_debugfs_remove_root(void);
+void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id);
+void xhci_debugfs_remove_slot(struct xhci_hcd *xhci, int slot_id);
+void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index);
+void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index);
+#else
+static inline void xhci_debugfs_init(struct xhci_hcd *xhci) { }
+static inline void xhci_debugfs_exit(struct xhci_hcd *xhci) { }
+static inline void __init xhci_debugfs_create_root(void) { }
+static inline void __exit xhci_debugfs_remove_root(void) { }
+static inline void xhci_debugfs_create_slot(struct xhci_hcd *x, int s) { }
+static inline void xhci_debugfs_remove_slot(struct xhci_hcd *x, int s) { }
+static inline void
+xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index) { }
+static inline void
+xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ int ep_index) { }
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* __LINUX_XHCI_DEBUGFS_H */
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 28deea584884..bf7316e130d3 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Up to 16 ms to halt an HC */
#define XHCI_MAX_HALT_USEC (16*1000)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a2336deb5e36..2a90229be7a6 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
@@ -634,7 +622,10 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Disable all slots\n");
spin_unlock_irqrestore(&xhci->lock, *flags);
for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
- retval = xhci_disable_slot(xhci, NULL, i);
+ if (!xhci->devs[i])
+ continue;
+
+ retval = xhci_disable_slot(xhci, i);
if (retval)
xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
i, retval);
@@ -1374,6 +1365,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex);
+ /* fall through */
case USB_PORT_FEAT_C_RESET:
case USB_PORT_FEAT_C_BH_PORT_RESET:
case USB_PORT_FEAT_C_CONNECTION:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 2a82c927ded2..e1fba4688509 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/usb.h>
@@ -28,6 +16,7 @@
#include "xhci.h"
#include "xhci-trace.h"
+#include "xhci-debugfs.h"
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
@@ -465,8 +454,6 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
return 0;
}
-#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-
static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
@@ -801,8 +788,8 @@ void xhci_free_stream_info(struct xhci_hcd *xhci,
static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
- setup_timer(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
- (unsigned long)ep);
+ timer_setup(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
+ 0);
ep->xhci = xhci;
}
@@ -961,6 +948,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
}
}
/* we are now at a leaf device */
+ xhci_debugfs_remove_slot(xhci, slot_id);
xhci_free_virt_device(xhci, slot_id);
}
@@ -1311,6 +1299,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
* since it uses the same rules as low speed interrupt
* endpoints.
*/
+ /* fall through */
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
@@ -1496,7 +1485,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
EP_AVG_TRB_LENGTH(avg_trb_len));
- /* FIXME Debug endpoint context */
return 0;
}
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 6e7ddf6cafae..eea7360a18fc 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015 MediaTek Inc.
* Author:
* Zhigang.Wei <zhigang.wei@mediatek.com>
* Chunfeng.Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/kernel.h>
@@ -287,12 +278,13 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
{
+ struct xhci_hcd *xhci = hcd_to_xhci(mtk->hcd);
struct mu3h_sch_bw_info *sch_array;
int num_usb_bus;
int i;
/* ss IN and OUT are separated */
- num_usb_bus = mtk->num_u3_ports * 2 + mtk->num_u2_ports;
+ num_usb_bus = xhci->num_usb3_ports * 2 + xhci->num_usb2_ports;
sch_array = kcalloc(num_usb_bus, sizeof(*sch_array), GFP_KERNEL);
if (sch_array == NULL)
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 8fb60657ed4f..b62a1d23244b 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MediaTek xHCI Host Controller Driver
*
* Copyright (c) 2015 MediaTek Inc.
* Author:
* Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/clk.h>
@@ -43,6 +34,7 @@
/* ip_pw_sts1 register */
#define STS1_IP_SLEEP_STS BIT(30)
+#define STS1_U3_MAC_RST BIT(16)
#define STS1_XHCI_RST BIT(11)
#define STS1_SYS125_RST BIT(10)
#define STS1_REF_RST BIT(8)
@@ -91,6 +83,7 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
{
struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
u32 value, check_val;
+ int u3_ports_disabed = 0;
int ret;
int i;
@@ -102,8 +95,13 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
value &= ~CTRL1_IP_HOST_PDN;
writel(value, &ippc->ip_pw_ctr1);
- /* power on and enable all u3 ports */
+ /* power on and enable u3 ports except skipped ones */
for (i = 0; i < mtk->num_u3_ports; i++) {
+ if ((0x1 << i) & mtk->u3p_dis_msk) {
+ u3_ports_disabed++;
+ continue;
+ }
+
value = readl(&ippc->u3_ctrl_p[i]);
value &= ~(CTRL_U3_PORT_PDN | CTRL_U3_PORT_DIS);
value |= CTRL_U3_PORT_HOST_SEL;
@@ -125,6 +123,9 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
check_val = STS1_SYSPLL_STABLE | STS1_REF_RST |
STS1_SYS125_RST | STS1_XHCI_RST;
+ if (mtk->num_u3_ports > u3_ports_disabed)
+ check_val |= STS1_U3_MAC_RST;
+
ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
(check_val == (value & check_val)), 100, 20000);
if (ret) {
@@ -145,8 +146,11 @@ static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
if (!mtk->has_ippc)
return 0;
- /* power down all u3 ports */
+ /* power down u3 ports except skipped ones */
for (i = 0; i < mtk->num_u3_ports; i++) {
+ if ((0x1 << i) & mtk->u3p_dis_msk)
+ continue;
+
value = readl(&ippc->u3_ctrl_p[i]);
value |= CTRL_U3_PORT_PDN;
writel(value, &ippc->u3_ctrl_p[i]);
@@ -208,6 +212,41 @@ static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
return xhci_mtk_host_enable(mtk);
}
+/* ignore the error if the clock does not exist */
+static struct clk *optional_clk_get(struct device *dev, const char *id)
+{
+ struct clk *opt_clk;
+
+ opt_clk = devm_clk_get(dev, id);
+ /* ignore error number except EPROBE_DEFER */
+ if (IS_ERR(opt_clk) && (PTR_ERR(opt_clk) != -EPROBE_DEFER))
+ opt_clk = NULL;
+
+ return opt_clk;
+}
+
+static int xhci_mtk_clks_get(struct xhci_hcd_mtk *mtk)
+{
+ struct device *dev = mtk->dev;
+
+ mtk->sys_clk = devm_clk_get(dev, "sys_ck");
+ if (IS_ERR(mtk->sys_clk)) {
+ dev_err(dev, "fail to get sys_ck\n");
+ return PTR_ERR(mtk->sys_clk);
+ }
+
+ mtk->ref_clk = optional_clk_get(dev, "ref_ck");
+ if (IS_ERR(mtk->ref_clk))
+ return PTR_ERR(mtk->ref_clk);
+
+ mtk->mcu_clk = optional_clk_get(dev, "mcu_ck");
+ if (IS_ERR(mtk->mcu_clk))
+ return PTR_ERR(mtk->mcu_clk);
+
+ mtk->dma_clk = optional_clk_get(dev, "dma_ck");
+ return PTR_ERR_OR_ZERO(mtk->dma_clk);
+}
+
static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk)
{
int ret;
@@ -224,37 +263,34 @@ static int xhci_mtk_clks_enable(struct xhci_hcd_mtk *mtk)
goto sys_clk_err;
}
- if (mtk->wakeup_src) {
- ret = clk_prepare_enable(mtk->wk_deb_p0);
- if (ret) {
- dev_err(mtk->dev, "failed to enable wk_deb_p0\n");
- goto usb_p0_err;
- }
+ ret = clk_prepare_enable(mtk->mcu_clk);
+ if (ret) {
+ dev_err(mtk->dev, "failed to enable mcu_clk\n");
+ goto mcu_clk_err;
+ }
- ret = clk_prepare_enable(mtk->wk_deb_p1);
- if (ret) {
- dev_err(mtk->dev, "failed to enable wk_deb_p1\n");
- goto usb_p1_err;
- }
+ ret = clk_prepare_enable(mtk->dma_clk);
+ if (ret) {
+ dev_err(mtk->dev, "failed to enable dma_clk\n");
+ goto dma_clk_err;
}
+
return 0;
-usb_p1_err:
- clk_disable_unprepare(mtk->wk_deb_p0);
-usb_p0_err:
+dma_clk_err:
+ clk_disable_unprepare(mtk->mcu_clk);
+mcu_clk_err:
clk_disable_unprepare(mtk->sys_clk);
sys_clk_err:
clk_disable_unprepare(mtk->ref_clk);
ref_clk_err:
- return -EINVAL;
+ return ret;
}
static void xhci_mtk_clks_disable(struct xhci_hcd_mtk *mtk)
{
- if (mtk->wakeup_src) {
- clk_disable_unprepare(mtk->wk_deb_p1);
- clk_disable_unprepare(mtk->wk_deb_p0);
- }
+ clk_disable_unprepare(mtk->dma_clk);
+ clk_disable_unprepare(mtk->mcu_clk);
clk_disable_unprepare(mtk->sys_clk);
clk_disable_unprepare(mtk->ref_clk);
}
@@ -358,18 +394,6 @@ static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
if (!mtk->wakeup_src)
return 0;
- mtk->wk_deb_p0 = devm_clk_get(dev, "wakeup_deb_p0");
- if (IS_ERR(mtk->wk_deb_p0)) {
- dev_err(dev, "fail to get wakeup_deb_p0\n");
- return PTR_ERR(mtk->wk_deb_p0);
- }
-
- mtk->wk_deb_p1 = devm_clk_get(dev, "wakeup_deb_p1");
- if (IS_ERR(mtk->wk_deb_p1)) {
- dev_err(dev, "fail to get wakeup_deb_p1\n");
- return PTR_ERR(mtk->wk_deb_p1);
- }
-
mtk->pericfg = syscon_regmap_lookup_by_phandle(dn,
"mediatek,syscon-wakeup");
if (IS_ERR(mtk->pericfg)) {
@@ -492,7 +516,6 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
/* called during probe() after chip reset completes */
static int xhci_mtk_setup(struct usb_hcd *hcd)
{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
int ret;
@@ -507,8 +530,6 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
return ret;
if (usb_hcd_is_primary_hcd(hcd)) {
- mtk->num_u3_ports = xhci->num_usb3_ports;
- mtk->num_u2_ports = xhci->num_usb2_ports;
ret = xhci_mtk_sch_init(mtk);
if (ret)
return ret;
@@ -552,26 +573,14 @@ static int xhci_mtk_probe(struct platform_device *pdev)
return PTR_ERR(mtk->vusb33);
}
- mtk->sys_clk = devm_clk_get(dev, "sys_ck");
- if (IS_ERR(mtk->sys_clk)) {
- dev_err(dev, "fail to get sys_ck\n");
- return PTR_ERR(mtk->sys_clk);
- }
-
- /*
- * reference clock is usually a "fixed-clock", make it optional
- * for backward compatibility and ignore the error if it does
- * not exist.
- */
- mtk->ref_clk = devm_clk_get(dev, "ref_ck");
- if (IS_ERR(mtk->ref_clk)) {
- if (PTR_ERR(mtk->ref_clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- mtk->ref_clk = NULL;
- }
+ ret = xhci_mtk_clks_get(mtk);
+ if (ret)
+ return ret;
mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
+ /* optional property, ignore the error if it does not exist */
+ of_property_read_u32(node, "mediatek,u3p-dis-msk",
+ &mtk->u3p_dis_msk);
ret = usb_wakeup_of_property_parse(mtk, node);
if (ret)
@@ -606,15 +615,10 @@ static int xhci_mtk_probe(struct platform_device *pdev)
}
/* Initialize dma_mask and coherent_dma_mask to 32-bits */
- ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret)
goto disable_clk;
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- else
- dma_set_mask(dev, DMA_BIT_MASK(32));
-
hcd = usb_create_hcd(driver, dev, dev_name(dev));
if (!hcd) {
ret = -ENOMEM;
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 3aa5e1d25064..6b74ce5b7564 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015 MediaTek Inc.
* Author:
* Zhigang.Wei <zhigang.wei@mediatek.com>
* Chunfeng.Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _XHCI_MTK_H_
@@ -121,12 +112,13 @@ struct xhci_hcd_mtk {
bool has_ippc;
int num_u2_ports;
int num_u3_ports;
+ int u3p_dis_msk;
struct regulator *vusb33;
struct regulator *vbus;
struct clk *sys_clk; /* sys and mac clock */
struct clk *ref_clk;
- struct clk *wk_deb_p0; /* port0's wakeup debounce clock */
- struct clk *wk_deb_p1;
+ struct clk *mcu_clk;
+ struct clk *dma_clk;
struct regmap *pericfg;
struct phy **phys;
int num_phys;
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 85908a3ecb8f..32e158568788 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell
* Author: Gregory CLEMENT <gregory.clement@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/io.h>
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 301fc984cae6..09791df2cec0 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Marvell
*
* Gregory Clement <gregory.clement@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef __LINUX_XHCI_MVEBU_H
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 76f392954733..7ef1274ef7f7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver PCI Bus Glue.
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/pci.h>
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 1cb6eaef4ae1..09f164f8cf8c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xhci-plat.c - xHCI host controller driver platform Bus Glue.
*
@@ -5,10 +6,6 @@
* Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* A lot of code borrowed from the Linux xHCI driver.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/clk.h>
@@ -16,6 +13,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
#include <linux/slab.h>
@@ -152,7 +150,7 @@ MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
static int xhci_plat_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
+ const struct xhci_plat_priv *priv_match;
const struct hc_driver *driver;
struct device *sysdev;
struct xhci_hcd *xhci;
@@ -242,9 +240,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
}
xhci = hcd_to_xhci(hcd);
- match = of_match_node(usb_xhci_of_match, pdev->dev.of_node);
- if (match) {
- const struct xhci_plat_priv *priv_match = match->data;
+ priv_match = of_device_get_match_data(&pdev->dev);
+ if (priv_match) {
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
/* Just copy data for now */
@@ -263,6 +260,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto disable_clk;
}
+ if (device_property_read_bool(sysdev, "usb2-lpm-disable"))
+ xhci->quirks |= XHCI_HW_LPM_DISABLE;
+
if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
xhci->quirks |= XHCI_LPM_SUPPORT;
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 29b227895b07..ae29f22ff5bd 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xhci-plat.h - xHCI host controller driver platform Bus Glue.
*
* Copyright (C) 2015 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _XHCI_PLAT_H
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 198bc188ab25..f0b559660007 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver for R-Car SoCs
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/firmware.h>
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
index d247951147a1..804b6ab4246f 100644
--- a/drivers/usb/host/xhci-rcar.h
+++ b/drivers/usb/host/xhci-rcar.h
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/host/xhci-rcar.h
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _XHCI_RCAR_H
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 82c746e2d85c..c239c688076c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -171,13 +159,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->type == TYPE_EVENT) {
if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
ring->dequeue++;
- return;
+ goto out;
}
if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
ring->cycle_state ^= 1;
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
- return;
+ goto out;
}
/* All other rings have link trbs */
@@ -190,6 +178,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
ring->dequeue = ring->deq_seg->trbs;
}
+out:
trace_xhci_inc_deq(ring);
return;
@@ -946,15 +935,12 @@ void xhci_hc_died(struct xhci_hcd *xhci)
* Instead we use a combination of that flag and checking if a new timer is
* pending.
*/
-void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
{
- struct xhci_hcd *xhci;
- struct xhci_virt_ep *ep;
+ struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
+ struct xhci_hcd *xhci = ep->xhci;
unsigned long flags;
- ep = (struct xhci_virt_ep *) arg;
- xhci = ep->xhci;
-
spin_lock_irqsave(&xhci->lock, flags);
/* bail out if cmd completed but raced with stop ep watchdog timer.*/
@@ -1680,9 +1666,14 @@ static void handle_port_status(struct xhci_hcd *xhci,
bus_state->resume_done[faked_port_index] = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(faked_port_index, &bus_state->resuming_ports);
+ /* Do the rest in GetPortStatus after resume time delay.
+ * Avoid polling roothub status before that so that a
+ * usb device auto-resume latency around ~40ms.
+ */
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[faked_port_index]);
- /* Do the rest in GetPortStatus */
+ bogus_port_status = true;
}
}
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 74436f8ca538..2c076ea80522 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NVIDIA Tegra xHCI host controller driver
*
* Copyright (C) 2014 NVIDIA Corporation
* Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
*/
#include <linux/clk.h>
@@ -771,7 +768,7 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
struct device *dev = tegra->dev;
const struct firmware *fw;
unsigned long timeout;
- time_t timestamp;
+ time64_t timestamp;
struct tm time;
u64 address;
u32 value;
@@ -877,7 +874,7 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
}
timestamp = le32_to_cpu(header->fwimg_created_time);
- time_to_tm(timestamp, 0, &time);
+ time64_to_tm(timestamp, 0, &time);
dev_info(dev, "Firmware timestamp: %ld-%02d-%02d %02d:%02d:%02d UTC\n",
time.tm_year + 1900, time.tm_mon + 1, time.tm_mday,
diff --git a/drivers/usb/host/xhci-trace.c b/drivers/usb/host/xhci-trace.c
index 367b630bdb3c..d0070814d1ea 100644
--- a/drivers/usb/host/xhci-trace.c
+++ b/drivers/usb/host/xhci-trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,10 +6,6 @@
*
* Author: Xenia Ragiadakou
* Email : burzalodowa@gmail.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index f20753b99624..a3b57c781db1 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,10 +6,6 @@
*
* Author: Xenia Ragiadakou
* Email : burzalodowa@gmail.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#undef TRACE_SYSTEM
@@ -388,6 +385,11 @@ DEFINE_EVENT(xhci_log_slot_ctx, xhci_handle_cmd_set_deq,
TP_ARGS(ctx)
);
+DEFINE_EVENT(xhci_log_slot_ctx, xhci_configure_endpoint,
+ TP_PROTO(struct xhci_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
DECLARE_EVENT_CLASS(xhci_log_ring,
TP_PROTO(struct xhci_ring *ring),
TP_ARGS(ring),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 51535ba2bcd4..2424d3020ca3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
*
@@ -5,19 +6,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/pci.h>
@@ -32,6 +20,7 @@
#include "xhci.h"
#include "xhci-trace.h"
#include "xhci-mtk.h"
+#include "xhci-debugfs.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
@@ -406,14 +395,14 @@ static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
#endif
-static void compliance_mode_recovery(unsigned long arg)
+static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
u32 temp;
int i;
- xhci = (struct xhci_hcd *)arg;
+ xhci = from_timer(xhci, t, comp_mode_recovery_timer);
for (i = 0; i < xhci->num_usb3_ports; i++) {
temp = readl(xhci->usb3_ports[i]);
@@ -454,8 +443,8 @@ static void compliance_mode_recovery(unsigned long arg)
static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
{
xhci->port_status_u0 = 0;
- setup_timer(&xhci->comp_mode_recovery_timer,
- compliance_mode_recovery, (unsigned long)xhci);
+ timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
+ 0);
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
@@ -632,6 +621,9 @@ int xhci_run(struct usb_hcd *hcd)
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
+
+ xhci_debugfs_init(xhci);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_run);
@@ -660,6 +652,8 @@ static void xhci_stop(struct usb_hcd *hcd)
return;
}
+ xhci_debugfs_exit(xhci);
+
spin_lock_irq(&xhci->lock);
xhci->xhc_state |= XHCI_STATE_HALTED;
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
@@ -1600,6 +1594,8 @@ static int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+ xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
+
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
if (xhci->quirks & XHCI_MTK_HOST)
@@ -1723,6 +1719,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
/* Store the usb_device pointer for later use */
ep->hcpriv = udev;
+ xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index);
+
xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
(unsigned int) ep->desc.bEndpointAddress,
udev->slot_id,
@@ -2555,6 +2553,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
unsigned long flags;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_virt_device *virt_dev;
+ struct xhci_slot_ctx *slot_ctx;
if (!command)
return -EINVAL;
@@ -2593,6 +2592,9 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
return -ENOMEM;
}
+ slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
+ trace_xhci_configure_endpoint(slot_ctx);
+
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, command,
command->in_ctx->dma,
@@ -2773,6 +2775,7 @@ static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
/* Free any rings allocated for added endpoints */
for (i = 0; i < 31; i++) {
if (virt_dev->eps[i].new_ring) {
+ xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
virt_dev->eps[i].new_ring = NULL;
}
@@ -3488,6 +3491,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
}
if (ep->ring) {
+ xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
last_freed_endpoint = i;
}
@@ -3520,11 +3524,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_virt_device *virt_dev;
struct xhci_slot_ctx *slot_ctx;
int i, ret;
- struct xhci_command *command;
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
- if (!command)
- return;
+ xhci_debugfs_remove_slot(xhci, udev->slot_id);
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
@@ -3540,10 +3541,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
/* If the host is halted due to driver unload, we still need to free the
* device.
*/
- if (ret <= 0 && ret != -ENODEV) {
- kfree(command);
+ if (ret <= 0 && ret != -ENODEV)
return;
- }
virt_dev = xhci->devs[udev->slot_id];
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
@@ -3555,26 +3554,19 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
- xhci_disable_slot(xhci, command, udev->slot_id);
- /*
- * Event command completion handler will free any data structures
- * associated with the slot. XXX Can free sleep?
- */
+ ret = xhci_disable_slot(xhci, udev->slot_id);
+ if (ret)
+ xhci_free_virt_device(xhci, udev->slot_id);
}
-int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command,
- u32 slot_id)
+int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
{
+ struct xhci_command *command;
unsigned long flags;
u32 state;
int ret = 0;
- struct xhci_virt_device *virt_dev;
- virt_dev = xhci->devs[slot_id];
- if (!virt_dev)
- return -EINVAL;
- if (!command)
- command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
if (!command)
return -ENOMEM;
@@ -3583,17 +3575,16 @@ int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command,
state = readl(&xhci->op_regs->status);
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_free_virt_device(xhci, slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
kfree(command);
- return ret;
+ return -ENODEV;
}
ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
slot_id);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ kfree(command);
return ret;
}
xhci_ring_cmd_db(xhci);
@@ -3641,13 +3632,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
if (!command)
return 0;
- /* xhci->slot_id and xhci->addr_dev are not thread-safe */
- mutex_lock(&xhci->mutex);
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- mutex_unlock(&xhci->mutex);
xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
xhci_free_command(xhci, command);
return 0;
@@ -3657,7 +3645,6 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
wait_for_completion(command->completion);
slot_id = command->slot_id;
- mutex_unlock(&xhci->mutex);
if (!slot_id || command->status != COMP_SUCCESS) {
xhci_err(xhci, "Error while assigning device slot ID\n");
@@ -3668,6 +3655,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
return 0;
}
+ xhci_free_command(xhci, command);
+
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_reserve_host_control_ep_resources(xhci);
@@ -3694,6 +3683,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
udev->slot_id = slot_id;
+ xhci_debugfs_create_slot(xhci, slot_id);
+
#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* If resetting upon resume, we can't put the controller into runtime
@@ -3703,18 +3694,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
pm_runtime_get_noresume(hcd->self.controller);
#endif
-
- xhci_free_command(xhci, command);
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
disable_slot:
- /* Disable slot, if we can do it without mem alloc */
- kfree(command->completion);
- command->completion = NULL;
- command->status = 0;
- return xhci_disable_slot(xhci, command, udev->slot_id);
+ ret = xhci_disable_slot(xhci, udev->slot_id);
+ if (ret)
+ xhci_free_virt_device(xhci, udev->slot_id);
+
+ return 0;
}
/*
@@ -3838,8 +3827,14 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
break;
case COMP_USB_TRANSACTION_ERROR:
dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
- ret = -EPROTO;
- break;
+
+ mutex_unlock(&xhci->mutex);
+ ret = xhci_disable_slot(xhci, udev->slot_id);
+ if (!ret)
+ xhci_alloc_dev(hcd, udev);
+ kfree(command->completion);
+ kfree(command);
+ return -EPROTO;
case COMP_INCOMPATIBLE_DEVICE_ERROR:
dev_warn(&udev->dev,
"ERROR: Incompatible device for setup %s command\n", act);
@@ -4088,7 +4083,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num + 1);
- if (enable) {
+ if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
/* Host supports BESL timeout instead of HIRD */
if (udev->usb2_hw_lpm_besl_capable) {
/* if device doesn't have a preferred BESL value use a
@@ -4287,6 +4282,7 @@ static unsigned long long xhci_calculate_intel_u1_timeout(
break;
}
/* Otherwise the calculation is the same as isoc eps */
+ /* fall through */
case USB_ENDPOINT_XFER_ISOC:
timeout_ns = xhci_service_interval_to_ns(desc);
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
@@ -5005,6 +5001,8 @@ static int __init xhci_hcd_init(void)
if (usb_disabled())
return -ENODEV;
+ xhci_debugfs_create_root();
+
return 0;
}
@@ -5012,7 +5010,10 @@ static int __init xhci_hcd_init(void)
* If an init function is provided, an exit function must also be provided
* to allow module unload.
*/
-static void __exit xhci_hcd_fini(void) { }
+static void __exit xhci_hcd_fini(void)
+{
+ xhci_debugfs_remove_root();
+}
module_init(xhci_hcd_init);
module_exit(xhci_hcd_fini);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2b48aa4f6b76..99a014a920d3 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* xHCI host controller driver
@@ -6,19 +7,6 @@
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_XHCI_HCD_H
@@ -131,6 +119,8 @@ struct xhci_cap_regs {
/* Extended Capabilities pointer from PCI base - section 5.3.6 */
#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
/* db_off bitmask - bits 0:1 reserved */
#define DBOFF_MASK (~0x3)
@@ -1007,6 +997,8 @@ struct xhci_virt_device {
struct xhci_tt_bw_info *tt_info;
/* The current max exit latency for the enabled USB3 link states. */
u16 current_mel;
+ /* Used for the debugfs interfaces. */
+ void *debugfs_private;
};
/*
@@ -1830,6 +1822,7 @@ struct xhci_hcd {
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
/* Reserved. It was XHCI_U2_DISABLE_WAKE */
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
+#define XHCI_HW_LPM_DISABLE (1 << 29)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -1859,6 +1852,10 @@ struct xhci_hcd {
/* Compliance Mode Timer Triggered every 2 seconds */
#define COMP_MODE_RCVRY_MSECS 2000
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_slots;
+ struct list_head regset_list;
+
/* platform-specific data -- must come last */
unsigned long priv[0] __aligned(sizeof(s64));
};
@@ -2012,8 +2009,7 @@ int xhci_run(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
-int xhci_disable_slot(struct xhci_hcd *xhci,
- struct xhci_command *command, u32 slot_id);
+int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
@@ -2068,7 +2064,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
unsigned int stream_id, struct xhci_td *td);
-void xhci_stop_endpoint_command_watchdog(unsigned long arg);
+void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
void xhci_handle_command_timeout(struct work_struct *work);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
@@ -2107,6 +2103,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container
struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id);
+
static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
struct urb *urb)
{
@@ -2442,11 +2439,12 @@ static inline const char *xhci_decode_portsc(u32 portsc)
static char str[256];
int ret;
- ret = sprintf(str, "%s %s %s Link:%s ",
+ ret = sprintf(str, "%s %s %s Link:%s PortSpeed:%d ",
portsc & PORT_POWER ? "Powered" : "Powered-off",
portsc & PORT_CONNECT ? "Connected" : "Not-connected",
portsc & PORT_PE ? "Enabled" : "Disabled",
- xhci_portsc_link_state_string(portsc));
+ xhci_portsc_link_state_string(portsc),
+ DEV_PORT_SPEED(portsc));
if (portsc & PORT_OC)
ret += sprintf(str + ret, "OverCurrent ");
diff --git a/drivers/usb/image/Makefile b/drivers/usb/image/Makefile
index 4148ae306352..8997c81ba86b 100644
--- a/drivers/usb/image/Makefile
+++ b/drivers/usb/image/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB Image drivers
#
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index e92540a21b6b..2388674042a9 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* copyright (C) 1999/2000 by Henning Zabel <henning@uni-paderborn.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
@@ -893,6 +880,7 @@ static ssize_t mdc800_device_write (struct file *file, const char __user *buf, s
return -EIO;
}
mdc800->pic_len=-1;
+ /* fall through */
case 0x09: /* Download Thumbnail */
mdc800->download_left=answersize+64;
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 0b21ba757bba..9f2f563c82ed 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Driver for Microtek Scanmaker X6 USB scanner, and possibly others.
*
* (C) Copyright 2000 John Fremlin <vii@penguinpowered.com>
diff --git a/drivers/usb/image/microtek.h b/drivers/usb/image/microtek.h
index 7e32ae787136..66685e59241a 100644
--- a/drivers/usb/image/microtek.h
+++ b/drivers/usb/image/microtek.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver for Microtek Scanmaker X6 USB scanner and possibly others.
*
diff --git a/drivers/usb/isp1760/Makefile b/drivers/usb/isp1760/Makefile
index 2b741074ad2b..9bb09e8290c4 100644
--- a/drivers/usb/isp1760/Makefile
+++ b/drivers/usb/isp1760/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
isp1760-y := isp1760-core.o isp1760-if.o
isp1760-$(CONFIG_USB_ISP1760_HCD) += isp1760-hcd.o
isp1760-$(CONFIG_USB_ISP1761_UDC) += isp1760-udc.o
diff --git a/drivers/usb/isp1760/isp1760-core.c b/drivers/usb/isp1760/isp1760-core.c
index bfa402cf3a27..05d22589b5cc 100644
--- a/drivers/usb/isp1760/isp1760-core.c
+++ b/drivers/usb/isp1760/isp1760-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1760 chip
*
@@ -7,10 +8,6 @@
* Contacts:
* Sebastian Siewior <bigeasy@linutronix.de>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/usb/isp1760/isp1760-core.h b/drivers/usb/isp1760/isp1760-core.h
index c70f8368a794..97cb4d7a3e1c 100644
--- a/drivers/usb/isp1760/isp1760-core.h
+++ b/drivers/usb/isp1760/isp1760-core.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1760 chip
*
@@ -7,10 +8,6 @@
* Contacts:
* Sebastian Siewior <bigeasy@linutronix.de>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _ISP1760_CORE_H_
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 8e59e0c02b8a..42672d6ec525 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1760 chip
*
@@ -1258,10 +1259,11 @@ leave:
#define SLOT_TIMEOUT 300
#define SLOT_CHECK_PERIOD 200
static struct timer_list errata2_timer;
+static struct usb_hcd *errata2_timer_hcd;
-static void errata2_function(unsigned long data)
+static void errata2_function(struct timer_list *unused)
{
- struct usb_hcd *hcd = (struct usb_hcd *) data;
+ struct usb_hcd *hcd = errata2_timer_hcd;
struct isp1760_hcd *priv = hcd_to_priv(hcd);
int slot;
struct ptd ptd;
@@ -1333,7 +1335,8 @@ static int isp1760_run(struct usb_hcd *hcd)
if (retval)
return retval;
- setup_timer(&errata2_timer, errata2_function, (unsigned long)hcd);
+ errata2_timer_hcd = hcd;
+ timer_setup(&errata2_timer, errata2_function, 0);
errata2_timer.expires = jiffies + msecs_to_jiffies(SLOT_CHECK_PERIOD);
add_timer(&errata2_timer);
diff --git a/drivers/usb/isp1760/isp1760-hcd.h b/drivers/usb/isp1760/isp1760-hcd.h
index 0c1c98d6ea08..f1bb2deb1ccf 100644
--- a/drivers/usb/isp1760/isp1760-hcd.h
+++ b/drivers/usb/isp1760/isp1760-hcd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ISP1760_HCD_H_
#define _ISP1760_HCD_H_
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index bc68bbab7fa1..241a00d75027 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Glue code for the ISP1760 driver and bus
* Currently there is support for
diff --git a/drivers/usb/isp1760/isp1760-regs.h b/drivers/usb/isp1760/isp1760-regs.h
index b67095c9a9d4..1f00c3850cf7 100644
--- a/drivers/usb/isp1760/isp1760-regs.h
+++ b/drivers/usb/isp1760/isp1760-regs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1760 chip
*
@@ -7,10 +8,6 @@
* Contacts:
* Sebastian Siewior <bigeasy@linutronix.de>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _ISP1760_REGS_H_
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 69400f3da886..bac4ef5d9512 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1761 device controller
*
@@ -5,10 +6,6 @@
*
* Contacts:
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
@@ -1331,9 +1328,9 @@ static irqreturn_t isp1760_udc_irq(int irq, void *dev)
return status ? IRQ_HANDLED : IRQ_NONE;
}
-static void isp1760_udc_vbus_poll(unsigned long data)
+static void isp1760_udc_vbus_poll(struct timer_list *t)
{
- struct isp1760_udc *udc = (struct isp1760_udc *)data;
+ struct isp1760_udc *udc = from_timer(udc, t, vbus_timer);
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
@@ -1452,8 +1449,7 @@ int isp1760_udc_register(struct isp1760_device *isp, int irq,
udc->regs = isp->regs;
spin_lock_init(&udc->lock);
- setup_timer(&udc->vbus_timer, isp1760_udc_vbus_poll,
- (unsigned long)udc);
+ timer_setup(&udc->vbus_timer, isp1760_udc_vbus_poll, 0);
ret = isp1760_udc_init(udc);
if (ret < 0)
diff --git a/drivers/usb/isp1760/isp1760-udc.h b/drivers/usb/isp1760/isp1760-udc.h
index 26899ed81145..2d0b88747701 100644
--- a/drivers/usb/isp1760/isp1760-udc.h
+++ b/drivers/usb/isp1760/isp1760-udc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the NXP ISP1761 device controller
*
@@ -5,10 +6,6 @@
*
* Contacts:
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
*/
#ifndef _ISP1760_UDC_H_
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 0f9f25db9163..68d2f2cd17dd 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -238,8 +238,8 @@ config USB_HUB_USB251XB
depends on I2C
help
This option enables support for configuration via SMBus of the
- Microchip USB251xB/xBi USB 2.0 Hub Controller series.
- Configuration parameters may be set in devicetree or platform data.
+ Microchip USB251x/xBi USB 2.0 Hub Controller series. Configuration
+ parameters may be set in devicetree or platform data.
Say Y or M here if you need to configure such a device via SMBus.
config USB_HSIC_USB3503
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 7fdb45fc976f..109f54f5b9aa 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the rest of the USB drivers
# (the ones that don't fit into any other categories)
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 1c0ada75c35d..4b8712733fc7 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* adutux - driver for ADU devices from Ontrak Control Systems
* This is an experimental driver. Use at your own risk.
@@ -5,11 +6,6 @@
*
* Copyright (c) 2003 John Homppi (SCO, leave this notice here)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
* derived from the Lego USB Tower driver 0.56:
* Copyright (c) 2003 David Glance <davidgsf@sourceforge.net>
* 2001 Juergen Stuber <stuber@loria.fr>
@@ -761,13 +757,11 @@ error:
static void adu_disconnect(struct usb_interface *interface)
{
struct adu_device *dev;
- int minor;
dev = usb_get_intfdata(interface);
mutex_lock(&dev->mtx); /* not interruptible */
dev->udev = NULL; /* poison */
- minor = dev->minor;
usb_deregister_dev(interface, &adu_class);
mutex_unlock(&dev->mtx);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 8efdc500e790..b3eb8b989bd4 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Apple Cinema Display driver
*
* Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch)
*
* Thanks to Caskey L. Dickson for his work with acdctl.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index abec6e604a62..b6a1b9331aa0 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* chaoskey - driver for ChaosKey device from Altus Metrum.
*
@@ -11,15 +12,6 @@
* bit stream.
*
* Copyright © 2015 Keith Packard <keithp@keithp.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 5c93a888c40e..bf4d2778907e 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* cypress_cy7c63.c
*
@@ -23,10 +24,6 @@
*
* For up-to-date information please visit:
* http://www.obock.de/kernel/cypress
-*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License as
-* published by the Free Software Foundation, version 2.
*/
#include <linux/module.h>
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 63207c42acf6..66be86077292 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* -*- linux-c -*-
* Cypress USB Thermometer driver
*
@@ -6,11 +7,6 @@
* This driver works with Elektor magazine USB Interface as published in
* issue #291. It should also work with the original starter kit/demo board
* from Cypress.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
*/
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
index c31b4a33e6bb..7895d61e733b 100644
--- a/drivers/usb/misc/ehset.c
+++ b/drivers/usb/misc/ehset.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 8950fa5e973d..24d841850e05 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Emagic EMI 2|6 usb audio interface firmware loader.
* Copyright (C) 2002
* Tapio Laxström (tapio.laxstrom@iptime.fi)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, as published by
- * the Free Software Foundation, version 2.
- *
* emi26.c,v 1.13 2002/03/08 13:10:26 tapio Exp
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 1d9be4431b72..3eea60437f56 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Emagic EMI 2|6 usb audio interface firmware loader.
* Copyright (C) 2002
* Tapio Laxström (tapio.laxstrom@iptime.fi)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, as published by
- * the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/usb/misc/ezusb.c b/drivers/usb/misc/ezusb.c
index 837208f14f86..f058d8029761 100644
--- a/drivers/usb/misc/ezusb.c
+++ b/drivers/usb/misc/ezusb.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* EZ-USB specific functions used by some of the USB to Serial drivers.
*
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 424ff12f3b51..76c718ac8c78 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB FTDI client driver for Elan Digital Systems's Uxxx adapters
*
@@ -7,11 +8,6 @@
* Author and Maintainer - Tony Olech - Elan Digital Systems
* tony.olech@elandigitalsystems.com
*
- * This program is free software;you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- *
* This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
* based on various USB client drivers in the 2.6.15 linux kernel
* with constant reference to the 3rd Edition of Linux Device Drivers
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 39d8fedfaf3b..20b0f91a5d9b 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Siemens ID Mouse driver v0.6
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2 of
- the License, or (at your option) any later version.
-
Copyright (C) 2004-5 by Florian 'Floe' Echtler <echtler@fs.tum.de>
and Andreas 'ad' Deresch <aderesch@fs.tum.de>
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index be5881303681..ad3109490c0f 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Native support for the I/O-Warrior USB devices
*
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index 1c61830e96f9..4d30095d6ad2 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for loading USB isight firmware
*
* Copyright (C) 2008 Matthew Garrett <mjg@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
* The USB isight cameras in recent Apples are roughly compatible with the USB
* video class specification, and can be driven by uvcvideo. However, they
* need firmware to be loaded beforehand. After firmware loading, the device
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 680bddb3ce05..5c1a3b852453 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/**
* Generic USB driver for report based interrupt in/out devices
* like LD Didactic's USB devices. LD Didactic's USB devices are
@@ -12,11 +13,6 @@
*
* Copyright (C) 2005 Michael Hund <mhund@ld-didactic.de>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
* Derived from Lego USB Tower driver
* Copyright (C) 2003 David Glance <advidgsf@sourceforge.net>
* 2001-2004 Juergen Stuber <starblue@users.sourceforge.net>
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 5628f678ab59..c5be6e9e24a5 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* LEGO USB Tower driver
*
* Copyright (C) 2003 David Glance <davidgsf@sourceforge.net>
* 2001-2004 Juergen Stuber <starblue@users.sourceforge.net>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
* derived from USB Skeleton driver - 0.5
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
*
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index ddddd6387f66..e5c03c6d16e9 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/usb/misc/lvstest.c
*
@@ -5,10 +6,6 @@
*
* Copyright (C) 2014 ST Microelectronics
* Pratyush Anand <pratyush.anand@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/init.h>
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index ddfebb144aaa..7b9adeb3e7aa 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* -*- linux-c -*- */
/*
@@ -6,20 +7,6 @@
* Cesar Miquel (miquel@df.uba.ar)
*
* based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
*
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
index 359abc98e706..6db7a5863496 100644
--- a/drivers/usb/misc/rio500_usb.h
+++ b/drivers/usb/misc/rio500_usb.h
@@ -1,25 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/* ----------------------------------------------------------------------
-
Copyright (C) 2000 Cesar Miquel (miquel@df.uba.ar)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
---------------------------------------------------------------------- */
-
-
#define RIO_SEND_COMMAND 0x1
#define RIO_RECV_COMMAND 0x2
diff --git a/drivers/usb/misc/sisusbvga/Makefile b/drivers/usb/misc/sisusbvga/Makefile
index 3142476ccc8e..6ed3a638261a 100644
--- a/drivers/usb/misc/sisusbvga/Makefile
+++ b/drivers/usb/misc/sisusbvga/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the sisusb driver (if driver is inside kernel tree).
#
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 30774e0aeadd..3e65bdc2615c 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb.h b/drivers/usb/misc/sisusbvga/sisusb.h
index 55492a5930bd..20f03ad0ea16 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.h
+++ b/drivers/usb/misc/sisusbvga/sisusb.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* sisusb - usb kernel driver for Net2280/SiS315 based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index f019d80ca9e4..73f7bde78e11 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c
index bf0032ca35ed..6a30e8bd9221 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles
*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.h b/drivers/usb/misc/sisusbvga/sisusb_init.h
index e79a616f0d26..1782c759c4ad 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* $XFree86$ */
/* $XdotOrg$ */
/*
diff --git a/drivers/usb/misc/sisusbvga/sisusb_struct.h b/drivers/usb/misc/sisusbvga/sisusb_struct.h
index 1c4240e802c1..706d77090e00 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_struct.h
+++ b/drivers/usb/misc/sisusbvga/sisusb_struct.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* General structure definitions for universal mode switching modules
*
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 1862ed15ce28..d83af2a332e4 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* PlayStation 2 Trance Vibrator driver
*
* Copyright (C) 2006 Sam Hocevar <sam@zoy.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Standard include files */
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 135c91c434bf..a6efb9a72939 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Microchip USB251xB USB 2.0 Hi-Speed Hub Controller
* Configuration via SMBus.
@@ -7,25 +8,14 @@
* This work is based on the USB3503 driver by Dongjin Kim and
* a not-accepted patch by Fabien Lahoudere, see:
* https://patchwork.kernel.org/patch/9257715/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/nls.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
/* Internal Register Set Addresses & Default Values acc. to DS00001692C */
@@ -38,6 +28,7 @@
#define USB251XB_DEF_PRODUCT_ID_12 0x2512 /* USB2512B/12Bi */
#define USB251XB_DEF_PRODUCT_ID_13 0x2513 /* USB2513B/13Bi */
#define USB251XB_DEF_PRODUCT_ID_14 0x2514 /* USB2514B/14Bi */
+#define USB251XB_DEF_PRODUCT_ID_17 0x2517 /* USB2517/17i */
#define USB251XB_ADDR_DEVICE_ID_LSB 0x04
#define USB251XB_ADDR_DEVICE_ID_MSB 0x05
@@ -82,7 +73,7 @@
#define USB251XB_ADDR_PRODUCT_STRING_LEN 0x14
#define USB251XB_ADDR_PRODUCT_STRING 0x54
-#define USB251XB_DEF_PRODUCT_STRING "USB251xB/xBi"
+#define USB251XB_DEF_PRODUCT_STRING "USB251xB/xBi/7i"
#define USB251XB_ADDR_SERIAL_STRING_LEN 0x15
#define USB251XB_ADDR_SERIAL_STRING 0x92
@@ -93,8 +84,10 @@
#define USB251XB_ADDR_BOOST_UP 0xF6
#define USB251XB_DEF_BOOST_UP 0x00
-#define USB251XB_ADDR_BOOST_X 0xF8
-#define USB251XB_DEF_BOOST_X 0x00
+#define USB251XB_ADDR_BOOST_57 0xF7
+#define USB251XB_DEF_BOOST_57 0x00
+#define USB251XB_ADDR_BOOST_14 0xF8
+#define USB251XB_DEF_BOOST_14 0x00
#define USB251XB_ADDR_PORT_SWAP 0xFA
#define USB251XB_DEF_PORT_SWAP 0x00
@@ -102,7 +95,11 @@
#define USB251XB_ADDR_PORT_MAP_12 0xFB
#define USB251XB_DEF_PORT_MAP_12 0x00
#define USB251XB_ADDR_PORT_MAP_34 0xFC
-#define USB251XB_DEF_PORT_MAP_34 0x00 /* USB2513B/i & USB2514B/i only */
+#define USB251XB_DEF_PORT_MAP_34 0x00 /* USB251{3B/i,4B/i,7/i} only */
+#define USB251XB_ADDR_PORT_MAP_56 0xFD
+#define USB251XB_DEF_PORT_MAP_56 0x00 /* USB2517/i only */
+#define USB251XB_ADDR_PORT_MAP_7 0xFE
+#define USB251XB_DEF_PORT_MAP_7 0x00 /* USB2517/i only */
#define USB251XB_ADDR_STATUS_COMMAND 0xFF
#define USB251XB_STATUS_COMMAND_SMBUS_DOWN 0x04
@@ -119,7 +116,7 @@ struct usb251xb {
struct device *dev;
struct i2c_client *i2c;
u8 skip_config;
- int gpio_reset;
+ struct gpio_desc *gpio_reset;
u16 vendor_id;
u16 product_id;
u16 device_id;
@@ -143,57 +140,97 @@ struct usb251xb {
char serial[USB251XB_STRING_BUFSIZE];
u8 bat_charge_en;
u8 boost_up;
- u8 boost_x;
+ u8 boost_57;
+ u8 boost_14;
u8 port_swap;
u8 port_map12;
u8 port_map34;
+ u8 port_map56;
+ u8 port_map7;
u8 status;
};
struct usb251xb_data {
u16 product_id;
+ u8 port_cnt;
+ bool led_support;
+ bool bat_support;
char product_str[USB251XB_STRING_BUFSIZE / 2]; /* ASCII string */
};
static const struct usb251xb_data usb2512b_data = {
.product_id = 0x2512,
+ .port_cnt = 2,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2512B",
};
static const struct usb251xb_data usb2512bi_data = {
.product_id = 0x2512,
+ .port_cnt = 2,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2512Bi",
};
static const struct usb251xb_data usb2513b_data = {
.product_id = 0x2513,
+ .port_cnt = 3,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2513B",
};
static const struct usb251xb_data usb2513bi_data = {
.product_id = 0x2513,
+ .port_cnt = 3,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2513Bi",
};
static const struct usb251xb_data usb2514b_data = {
.product_id = 0x2514,
+ .port_cnt = 4,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2514B",
};
static const struct usb251xb_data usb2514bi_data = {
.product_id = 0x2514,
+ .port_cnt = 4,
+ .led_support = false,
+ .bat_support = true,
.product_str = "USB2514Bi",
};
+static const struct usb251xb_data usb2517_data = {
+ .product_id = 0x2517,
+ .port_cnt = 7,
+ .led_support = true,
+ .bat_support = false,
+ .product_str = "USB2517",
+};
+
+static const struct usb251xb_data usb2517i_data = {
+ .product_id = 0x2517,
+ .port_cnt = 7,
+ .led_support = true,
+ .bat_support = false,
+ .product_str = "USB2517i",
+};
+
static void usb251xb_reset(struct usb251xb *hub, int state)
{
- if (!gpio_is_valid(hub->gpio_reset))
+ if (!hub->gpio_reset)
return;
- gpio_set_value_cansleep(hub->gpio_reset, state);
+ gpiod_set_value_cansleep(hub->gpio_reset, state);
/* wait for hub recovery/stabilization */
- if (state)
+ if (!state)
usleep_range(500, 750); /* >=500us at power on */
else
usleep_range(1, 10); /* >=1us at power down */
@@ -212,7 +249,7 @@ static int usb251xb_connect(struct usb251xb *hub)
i2c_wb[0] = 0x01;
i2c_wb[1] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 1);
+ usb251xb_reset(hub, 0);
err = i2c_smbus_write_i2c_block_data(hub->i2c,
USB251XB_ADDR_STATUS_COMMAND, 2, i2c_wb);
@@ -253,13 +290,16 @@ static int usb251xb_connect(struct usb251xb *hub)
USB251XB_STRING_BUFSIZE);
i2c_wb[USB251XB_ADDR_BATTERY_CHARGING_ENABLE] = hub->bat_charge_en;
i2c_wb[USB251XB_ADDR_BOOST_UP] = hub->boost_up;
- i2c_wb[USB251XB_ADDR_BOOST_X] = hub->boost_x;
+ i2c_wb[USB251XB_ADDR_BOOST_57] = hub->boost_57;
+ i2c_wb[USB251XB_ADDR_BOOST_14] = hub->boost_14;
i2c_wb[USB251XB_ADDR_PORT_SWAP] = hub->port_swap;
i2c_wb[USB251XB_ADDR_PORT_MAP_12] = hub->port_map12;
i2c_wb[USB251XB_ADDR_PORT_MAP_34] = hub->port_map34;
+ i2c_wb[USB251XB_ADDR_PORT_MAP_56] = hub->port_map56;
+ i2c_wb[USB251XB_ADDR_PORT_MAP_7] = hub->port_map7;
i2c_wb[USB251XB_ADDR_STATUS_COMMAND] = USB251XB_STATUS_COMMAND_ATTACH;
- usb251xb_reset(hub, 1);
+ usb251xb_reset(hub, 0);
/* write registers */
for (i = 0; i < (USB251XB_I2C_REG_SZ / USB251XB_I2C_WRITE_SZ); i++) {
@@ -297,7 +337,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
struct device *dev = hub->dev;
struct device_node *np = dev->of_node;
int len, err, i;
- u32 *property_u32 = NULL;
+ u32 property_u32 = 0;
const u32 *cproperty_u32;
const char *cproperty_char;
char str[USB251XB_STRING_BUFSIZE / 2];
@@ -312,19 +352,13 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
else
hub->skip_config = 0;
- hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
- if (hub->gpio_reset == -EPROBE_DEFER)
+ hub->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (PTR_ERR(hub->gpio_reset) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
- if (gpio_is_valid(hub->gpio_reset)) {
- err = devm_gpio_request_one(dev, hub->gpio_reset,
- GPIOF_OUT_INIT_LOW,
- "usb251xb reset");
- if (err) {
- dev_err(dev,
- "unable to request GPIO %d as reset pin (%d)\n",
- hub->gpio_reset, err);
- return err;
- }
+ } else if (IS_ERR(hub->gpio_reset)) {
+ err = PTR_ERR(hub->gpio_reset);
+ dev_err(dev, "unable to request GPIO reset pin (%d)\n", err);
+ return err;
}
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
@@ -374,16 +408,16 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (of_get_property(np, "dynamic-power-switching", NULL))
hub->conf_data2 |= BIT(7);
- if (!of_property_read_u32(np, "oc-delay-us", property_u32)) {
- if (*property_u32 == 100) {
+ if (!of_property_read_u32(np, "oc-delay-us", &property_u32)) {
+ if (property_u32 == 100) {
/* 100 us*/
hub->conf_data2 &= ~BIT(5);
hub->conf_data2 &= ~BIT(4);
- } else if (*property_u32 == 4000) {
+ } else if (property_u32 == 4000) {
/* 4 ms */
hub->conf_data2 &= ~BIT(5);
hub->conf_data2 |= BIT(4);
- } else if (*property_u32 == 16000) {
+ } else if (property_u32 == 16000) {
/* 16 ms */
hub->conf_data2 |= BIT(5);
hub->conf_data2 |= BIT(4);
@@ -401,6 +435,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (of_get_property(np, "port-mapping-mode", NULL))
hub->conf_data3 |= BIT(3);
+ if (data->led_support && of_get_property(np, "led-usb-mode", NULL))
+ hub->conf_data3 &= ~BIT(1);
+
if (of_get_property(np, "string-support", NULL))
hub->conf_data3 |= BIT(0);
@@ -410,8 +447,11 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
for (i = 0; i < len / sizeof(u32); i++) {
u32 port = be32_to_cpu(cproperty_u32[i]);
- if ((port >= 1) && (port <= 4))
+ if ((port >= 1) && (port <= data->port_cnt))
hub->non_rem_dev |= BIT(port);
+ else
+ dev_warn(dev, "NRD port %u doesn't exist\n",
+ port);
}
}
@@ -421,8 +461,11 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
for (i = 0; i < len / sizeof(u32); i++) {
u32 port = be32_to_cpu(cproperty_u32[i]);
- if ((port >= 1) && (port <= 4))
+ if ((port >= 1) && (port <= data->port_cnt))
hub->port_disable_sp |= BIT(port);
+ else
+ dev_warn(dev, "PDS port %u doesn't exist\n",
+ port);
}
}
@@ -432,14 +475,37 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
for (i = 0; i < len / sizeof(u32); i++) {
u32 port = be32_to_cpu(cproperty_u32[i]);
- if ((port >= 1) && (port <= 4))
+ if ((port >= 1) && (port <= data->port_cnt))
hub->port_disable_bp |= BIT(port);
+ else
+ dev_warn(dev, "PDB port %u doesn't exist\n",
+ port);
}
}
+ hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
+ if (!of_property_read_u32(np, "sp-max-total-current-microamp",
+ &property_u32))
+ hub->max_power_sp = min_t(u8, property_u32 / 2000, 50);
+
+ hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
+ if (!of_property_read_u32(np, "bp-max-total-current-microamp",
+ &property_u32))
+ hub->max_power_bp = min_t(u8, property_u32 / 2000, 255);
+
+ hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
+ if (!of_property_read_u32(np, "sp-max-removable-current-microamp",
+ &property_u32))
+ hub->max_current_sp = min_t(u8, property_u32 / 2000, 50);
+
+ hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
+ if (!of_property_read_u32(np, "bp-max-removable-current-microamp",
+ &property_u32))
+ hub->max_current_bp = min_t(u8, property_u32 / 2000, 255);
+
hub->power_on_time = USB251XB_DEF_POWER_ON_TIME;
- if (!of_property_read_u32(np, "power-on-time-ms", property_u32))
- hub->power_on_time = min_t(u8, *property_u32 / 2, 255);
+ if (!of_property_read_u32(np, "power-on-time-ms", &property_u32))
+ hub->power_on_time = min_t(u8, property_u32 / 2, 255);
if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
@@ -476,16 +542,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
/* The following parameters are currently not exposed to devicetree, but
* may be as soon as needed.
*/
- hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
- hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
- hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
- hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
hub->boost_up = USB251XB_DEF_BOOST_UP;
- hub->boost_x = USB251XB_DEF_BOOST_X;
+ hub->boost_57 = USB251XB_DEF_BOOST_57;
+ hub->boost_14 = USB251XB_DEF_BOOST_14;
hub->port_swap = USB251XB_DEF_PORT_SWAP;
hub->port_map12 = USB251XB_DEF_PORT_MAP_12;
hub->port_map34 = USB251XB_DEF_PORT_MAP_34;
+ hub->port_map56 = USB251XB_DEF_PORT_MAP_56;
+ hub->port_map7 = USB251XB_DEF_PORT_MAP_7;
return 0;
}
@@ -510,6 +575,12 @@ static const struct of_device_id usb251xb_of_match[] = {
.compatible = "microchip,usb2514bi",
.data = &usb2514bi_data,
}, {
+ .compatible = "microchip,usb2517",
+ .data = &usb2517_data,
+ }, {
+ .compatible = "microchip,usb2517i",
+ .data = &usb2517i_data,
+ }, {
/* sentinel */
}
};
@@ -573,6 +644,8 @@ static const struct i2c_device_id usb251xb_id[] = {
{ "usb2513bi", 0 },
{ "usb2514b", 0 },
{ "usb2514bi", 0 },
+ { "usb2517", 0 },
+ { "usb2517i", 0 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, usb251xb_id);
@@ -589,5 +662,5 @@ static struct i2c_driver usb251xb_i2c_driver = {
module_i2c_driver(usb251xb_i2c_driver);
MODULE_AUTHOR("Richard Leitner <richard.leitner@skidata.com>");
-MODULE_DESCRIPTION("USB251xB/xBi USB 2.0 Hub Controller Driver");
+MODULE_DESCRIPTION("USB251x/xBi USB 2.0 Hub Controller Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index 8e7737d7ac0a..465dbf68b463 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SMSC USB3503 USB 2.0 hub controller driver
*
* Copyright (c) 2012-2013 Dongjin Kim (tobetter@gmail.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/clk.h>
diff --git a/drivers/usb/misc/usb4604.c b/drivers/usb/misc/usb4604.c
index e9f37fb746ac..1b4de651e697 100644
--- a/drivers/usb/misc/usb4604.c
+++ b/drivers/usb/misc/usb4604.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SMSC USB4604 USB HSIC 4-port 2.0 hub controller driver
* Based on usb3503 driver
*
* Copyright (c) 2012-2013 Dongjin Kim (tobetter@gmail.com)
* Copyright (c) 2016 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/i2c.h>
diff --git a/drivers/usb/misc/usb_u132.h b/drivers/usb/misc/usb_u132.h
index dc2e5a31caec..4bf77736914f 100644
--- a/drivers/usb/misc/usb_u132.h
+++ b/drivers/usb/misc/usb_u132.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Common Header File for the Elan Digital Systems U132 adapter
* this file should be included by both the "ftdi-u132" and
@@ -9,11 +10,6 @@
* Author and Maintainer - Tony Olech - Elan Digital Systems
*(tony.olech@elandigitalsystems.com)
*
-* This program is free software;you can redistribute it and/or
-* modify it under the terms of the GNU General Public License as
-* published by the Free Software Foundation, version 2.
-*
-*
* The driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
* based on various USB client drivers in the 2.6.15 linux kernel
* with constant reference to the 3rd Edition of Linux Device Drivers
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 0f5ad896c7e3..9ba4a4e68d91 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*****************************************************************************
* USBLCD Kernel Driver *
* Version 1.05 *
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 3f6a28045b53..12f7e94695a2 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB 7 Segment Driver
*
* Copyright (C) 2008 Harrison Metzger <harrisonmetz@gmail.com>
* Based on usbled.c by Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index b3fc602b2e24..aedc9a7f149e 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -576,11 +577,16 @@ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
return sg;
}
-static void sg_timeout(unsigned long _req)
+struct sg_timeout {
+ struct timer_list timer;
+ struct usb_sg_request *req;
+};
+
+static void sg_timeout(struct timer_list *t)
{
- struct usb_sg_request *req = (struct usb_sg_request *) _req;
+ struct sg_timeout *timeout = from_timer(timeout, t, timer);
- usb_sg_cancel(req);
+ usb_sg_cancel(timeout->req);
}
static int perform_sglist(
@@ -594,9 +600,11 @@ static int perform_sglist(
{
struct usb_device *udev = testdev_to_usbdev(tdev);
int retval = 0;
- struct timer_list sg_timer;
+ struct sg_timeout timeout = {
+ .req = req,
+ };
- setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
+ timer_setup_on_stack(&timeout.timer, sg_timeout, 0);
while (retval == 0 && iterations-- > 0) {
retval = usb_sg_init(req, udev, pipe,
@@ -607,13 +615,14 @@ static int perform_sglist(
if (retval)
break;
- mod_timer(&sg_timer, jiffies +
+ mod_timer(&timeout.timer, jiffies +
msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
usb_sg_wait(req);
- if (!del_timer_sync(&sg_timer))
+ if (!del_timer_sync(&timeout.timer))
retval = -ETIMEDOUT;
else
retval = req->status;
+ destroy_timer_on_stack(&timeout.timer);
/* FIXME check resulting data pattern */
@@ -1015,7 +1024,7 @@ static int ch9_postconfig(struct usbtest_dev *dev)
/* FIXME fetch strings from at least the device descriptor */
/* [9.4.5] get_status always works */
- retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
+ retval = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
if (retval) {
dev_err(&iface->dev, "get dev status --> %d\n", retval);
return retval;
@@ -1025,7 +1034,7 @@ static int ch9_postconfig(struct usbtest_dev *dev)
* the device's remote wakeup feature ... if we can, test that here
*/
- retval = usb_get_status(udev, USB_RECIP_INTERFACE,
+ retval = usb_get_std_status(udev, USB_RECIP_INTERFACE,
iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
if (retval) {
dev_err(&iface->dev, "get interface status --> %d\n", retval);
@@ -1614,7 +1623,7 @@ static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
u16 status;
/* shouldn't look or act halted */
- retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
+ retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
ep, retval);
@@ -1636,7 +1645,7 @@ static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
u16 status;
/* should look and act halted */
- retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
+ retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
ep, retval);
@@ -1909,7 +1918,7 @@ static struct urb *iso_alloc_urb(
if (bytes < 0 || !desc)
return NULL;
- maxp = 0x7ff & usb_endpoint_maxp(desc);
+ maxp = usb_endpoint_maxp(desc);
maxp *= usb_endpoint_maxp_mult(desc);
packets = DIV_ROUND_UP(bytes, maxp);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 8a13b2fcf3e1..263c97fec708 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************/
/*
@@ -6,20 +7,6 @@
* Copyright (C) 1999, 2005, 2010
* Thomas Sailer (t.sailer@alumni.ethz.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Based on parport_pc.c
*
* History:
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 58abdf28620a..8abb6cbbd98a 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Meywa-Denki & KAYAC YUREX
*
* Copyright (C) 2010 Tomoki Sekiyama (tomoki.sekiyama@gmail.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/mon/Makefile b/drivers/usb/mon/Makefile
index 8ed24ab08698..09f43e89633c 100644
--- a/drivers/usb/mon/Makefile
+++ b/drivers/usb/mon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for USB monitor
#
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index b6d8bf475c92..f6ae753ab99b 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index 46847340b819..9812d102a005 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c
index 5bdf73a57498..98ab0cc473d6 100644
--- a/drivers/usb/mon/mon_stat.c
+++ b/drivers/usb/mon/mon_stat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 19c416d69eb9..f5e1bb5e5217 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
diff --git a/drivers/usb/mon/usb_mon.h b/drivers/usb/mon/usb_mon.h
index df9a4df342c7..aa64efaba366 100644
--- a/drivers/usb/mon/usb_mon.h
+++ b/drivers/usb/mon/usb_mon.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
diff --git a/drivers/usb/mtu3/Makefile b/drivers/usb/mtu3/Makefile
index 60e0fff7a847..4a9715812bf9 100644
--- a/drivers/usb/mtu3/Makefile
+++ b/drivers/usb/mtu3/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_USB_MTU3_DEBUG) += -DDEBUG
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index b26fffc58446..3c888d942a9f 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3.h - MediaTek USB3 DRD header
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __MTU3_H__
@@ -46,6 +37,9 @@ struct mtu3_request;
#define MU3D_EP_RXCR1(epnum) (U3D_RX1CSR1 + (((epnum) - 1) * 0x10))
#define MU3D_EP_RXCR2(epnum) (U3D_RX1CSR2 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQHIAR(epnum) (U3D_TXQHIAR1 + (((epnum) - 1) * 0x4))
+#define USB_QMU_RQHIAR(epnum) (U3D_RXQHIAR1 + (((epnum) - 1) * 0x4))
+
#define USB_QMU_RQCSR(epnum) (U3D_RXQCSR1 + (((epnum) - 1) * 0x10))
#define USB_QMU_RQSAR(epnum) (U3D_RXQSAR1 + (((epnum) - 1) * 0x10))
#define USB_QMU_RQCPR(epnum) (U3D_RXQCPR1 + (((epnum) - 1) * 0x10))
@@ -91,6 +85,7 @@ enum mtu3_speed {
MTU3_SPEED_FULL = 1,
MTU3_SPEED_HIGH = 3,
MTU3_SPEED_SUPER = 4,
+ MTU3_SPEED_SUPER_PLUS = 5,
};
/**
@@ -112,6 +107,19 @@ enum mtu3_g_ep0_state {
};
/**
+ * MTU3_DR_FORCE_NONE: automatically switch host and periperal mode
+ * by IDPIN signal.
+ * MTU3_DR_FORCE_HOST: force to enter host mode and override OTG
+ * IDPIN signal.
+ * MTU3_DR_FORCE_DEVICE: force to enter peripheral mode.
+ */
+enum mtu3_dr_force_mode {
+ MTU3_DR_FORCE_NONE = 0,
+ MTU3_DR_FORCE_HOST,
+ MTU3_DR_FORCE_DEVICE,
+};
+
+/**
* @base: the base address of fifo
* @limit: the bitmap size in bits
* @bitmap: fifo bitmap in unit of @MTU3_EP_FIFO_UNIT
@@ -138,23 +146,33 @@ struct mtu3_fifo_info {
* Checksum value is calculated over the 16 bytes of the GPD by default;
* @data_buf_len (RX ONLY): This value indicates the length of
* the assigned data buffer
+ * @tx_ext_addr (TX ONLY): [3:0] are 4 extension bits of @buffer,
+ * [7:4] are 4 extension bits of @next_gpd
* @next_gpd: Physical address of the next GPD
* @buffer: Physical address of the data buffer
* @buf_len:
* (TX): This value indicates the length of the assigned data buffer
* (RX): The total length of data received
* @ext_len: reserved
+ * @rx_ext_addr(RX ONLY): [3:0] are 4 extension bits of @buffer,
+ * [7:4] are 4 extension bits of @next_gpd
* @ext_flag:
* bit5 (TX ONLY): Zero Length Packet (ZLP),
*/
struct qmu_gpd {
__u8 flag;
__u8 chksum;
- __le16 data_buf_len;
+ union {
+ __le16 data_buf_len;
+ __le16 tx_ext_addr;
+ };
__le32 next_gpd;
__le32 buffer;
__le16 buf_len;
- __u8 ext_len;
+ union {
+ __u8 ext_len;
+ __u8 rx_ext_addr;
+ };
__u8 ext_flag;
} __packed;
@@ -183,7 +201,6 @@ struct mtu3_gpd_ring {
* xHCI driver initialization, it's necessary for system bootup
* as device.
* @is_u3_drd: whether port0 supports usb3.0 dual-role device or not
-* @id_*: used to maually switch between host and device modes by idpin
* @manual_drd_enabled: it's true when supports dual-role device by debugfs
* to switch host/device modes depending on user input.
*/
@@ -194,10 +211,6 @@ struct otg_switch_mtk {
struct notifier_block id_nb;
struct delayed_work extcon_reg_dwork;
bool is_u3_drd;
- /* dual-role switch by debugfs */
- struct pinctrl *id_pinctrl;
- struct pinctrl_state *id_float;
- struct pinctrl_state *id_ground;
bool manual_drd_enabled;
};
@@ -206,14 +219,17 @@ struct otg_switch_mtk {
* @ippc_base: register base address of IP Power and Clock interface (IPPC)
* @vusb33: usb3.3V shared by device/host IP
* @sys_clk: system clock of mtu3, shared by device/host IP
+ * @ref_clk: reference clock
+ * @mcu_clk: mcu_bus_ck clock for AHB bus etc
+ * @dma_clk: dma_bus_ck clock for AXI bus etc
* @dr_mode: works in which mode:
* host only, device only or dual-role mode
* @u2_ports: number of usb2.0 host ports
* @u3_ports: number of usb3.0 host ports
+ * @u3p_dis_msk: mask of disabling usb3 ports, for example, bit0==1 to
+ * disable u3port0, bit1==1 to disable u3port1,... etc
* @dbgfs_root: only used when supports manual dual-role switch via debugfs
* @wakeup_en: it's true when supports remote wakeup in host mode
- * @wk_deb_p0: port0's wakeup debounce clock
- * @wk_deb_p1: it's optional, and depends on port1 is supported or not
*/
struct ssusb_mtk {
struct device *dev;
@@ -226,17 +242,18 @@ struct ssusb_mtk {
struct regulator *vusb33;
struct clk *sys_clk;
struct clk *ref_clk;
+ struct clk *mcu_clk;
+ struct clk *dma_clk;
/* otg */
struct otg_switch_mtk otg_switch;
enum usb_dr_mode dr_mode;
bool is_host;
int u2_ports;
int u3_ports;
+ int u3p_dis_msk;
struct dentry *dbgfs_root;
/* usb wakeup for host mode */
bool wakeup_en;
- struct clk *wk_deb_p0;
- struct clk *wk_deb_p1;
struct regmap *pericfg;
};
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index 99c65b0788ff..b1b99a8f6a7a 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_core.c - hardware access layer and gadget init/exit of
* MediaTek usb3 Dual-Role Controller Driver
@@ -5,18 +6,9 @@
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -114,7 +106,9 @@ static int mtu3_device_enable(struct mtu3 *mtu)
mtu3_clrbits(ibase, SSUSB_U2_CTRL(0),
(SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
SSUSB_U2_PORT_HOST_SEL));
- mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
return ssusb_check_clocks(mtu->ssusb, check_clk);
}
@@ -129,7 +123,10 @@ static void mtu3_device_disable(struct mtu3 *mtu)
mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
- mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
}
@@ -236,7 +233,7 @@ void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set)
void mtu3_dev_on_off(struct mtu3 *mtu, int is_on)
{
- if (mtu->is_u3_ip && (mtu->max_speed == USB_SPEED_SUPER))
+ if (mtu->is_u3_ip && mtu->max_speed >= USB_SPEED_SUPER)
mtu3_ss_func_set(mtu, is_on);
else
mtu3_hs_softconn_set(mtu, is_on);
@@ -546,6 +543,9 @@ static void mtu3_set_speed(struct mtu3 *mtu)
mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
/* HS/FS detected by HW */
mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+ } else if (mtu->max_speed == USB_SPEED_SUPER) {
+ mtu3_clrbits(mtu->ippc_base, SSUSB_U3_CTRL(0),
+ SSUSB_U3_PORT_SSP_SPEED);
}
dev_info(mtu->dev, "max_speed: %s\n",
@@ -623,6 +623,10 @@ static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
udev_speed = USB_SPEED_SUPER;
maxpkt = 512;
break;
+ case MTU3_SPEED_SUPER_PLUS:
+ udev_speed = USB_SPEED_SUPER_PLUS;
+ maxpkt = 512;
+ break;
default:
udev_speed = USB_SPEED_UNKNOWN;
break;
@@ -759,7 +763,31 @@ static void mtu3_hw_exit(struct mtu3 *mtu)
mtu3_mem_free(mtu);
}
-/*-------------------------------------------------------------------------*/
+/**
+ * we set 32-bit DMA mask by default, here check whether the controller
+ * supports 36-bit DMA or not, if it does, set 36-bit DMA mask.
+ */
+static int mtu3_set_dma_mask(struct mtu3 *mtu)
+{
+ struct device *dev = mtu->dev;
+ bool is_36bit = false;
+ int ret = 0;
+ u32 value;
+
+ value = mtu3_readl(mtu->mac_base, U3D_MISC_CTRL);
+ if (value & DMA_ADDR_36BIT) {
+ is_36bit = true;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
+ /* If set 36-bit DMA mask fails, fall back to 32-bit DMA mask */
+ if (ret) {
+ is_36bit = false;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ }
+ }
+ dev_info(dev, "dma mask: %s bits\n", is_36bit ? "36" : "32");
+
+ return ret;
+}
int ssusb_gadget_init(struct ssusb_mtk *ssusb)
{
@@ -774,9 +802,9 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
return -ENOMEM;
mtu->irq = platform_get_irq(pdev, 0);
- if (mtu->irq <= 0) {
+ if (mtu->irq < 0) {
dev_err(dev, "fail to get irq number\n");
- return -ENODEV;
+ return mtu->irq;
}
dev_info(dev, "irq %d\n", mtu->irq);
@@ -800,14 +828,15 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
break;
default:
dev_err(dev, "invalid max_speed: %s\n",
usb_speed_string(mtu->max_speed));
/* fall through */
case USB_SPEED_UNKNOWN:
- /* default as SS */
- mtu->max_speed = USB_SPEED_SUPER;
+ /* default as SSP */
+ mtu->max_speed = USB_SPEED_SUPER_PLUS;
break;
}
@@ -820,6 +849,12 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
return ret;
}
+ ret = mtu3_set_dma_mask(mtu);
+ if (ret) {
+ dev_err(dev, "mtu3 set dma_mask failed:%d\n", ret);
+ goto dma_mask_err;
+ }
+
ret = devm_request_irq(dev, mtu->irq, mtu3_irq, 0, dev_name(dev), mtu);
if (ret) {
dev_err(dev, "request irq %d failed!\n", mtu->irq);
@@ -845,6 +880,7 @@ int ssusb_gadget_init(struct ssusb_mtk *ssusb)
gadget_err:
device_init_wakeup(dev, false);
+dma_mask_err:
irq_err:
mtu3_hw_exit(mtu);
ssusb->u3d = NULL;
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index 560256115b23..db7562d99b95 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_dr.c - dual role switch and host glue layer
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/debugfs.h>
@@ -261,21 +252,22 @@ static void extcon_register_dwork(struct work_struct *work)
* depending on user input.
* This is useful in special cases, such as uses TYPE-A receptacle but also
* wants to support dual-role mode.
- * It generates cable state changes by pulling up/down IDPIN and
- * notifies driver to switch mode by "extcon-usb-gpio".
- * NOTE: when use MICRO receptacle, should not enable this interface.
*/
static void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- if (to_host)
- pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_ground);
- else
- pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_float);
+ if (to_host) {
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
+ } else {
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_DEVICE);
+ ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+ ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+ }
}
-
static int ssusb_mode_show(struct seq_file *sf, void *unused)
{
struct ssusb_mtk *ssusb = sf->private;
@@ -388,17 +380,45 @@ static void ssusb_debugfs_exit(struct ssusb_mtk *ssusb)
debugfs_remove_recursive(ssusb->dbgfs_root);
}
+void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
+ enum mtu3_dr_force_mode mode)
+{
+ u32 value;
+
+ value = mtu3_readl(ssusb->ippc_base, SSUSB_U2_CTRL(0));
+ switch (mode) {
+ case MTU3_DR_FORCE_DEVICE:
+ value |= SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG;
+ break;
+ case MTU3_DR_FORCE_HOST:
+ value |= SSUSB_U2_PORT_FORCE_IDDIG;
+ value &= ~SSUSB_U2_PORT_RG_IDDIG;
+ break;
+ case MTU3_DR_FORCE_NONE:
+ value &= ~(SSUSB_U2_PORT_FORCE_IDDIG | SSUSB_U2_PORT_RG_IDDIG);
+ break;
+ default:
+ return;
+ }
+ mtu3_writel(ssusb->ippc_base, SSUSB_U2_CTRL(0), value);
+}
+
int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- INIT_DELAYED_WORK(&otg_sx->extcon_reg_dwork, extcon_register_dwork);
-
- if (otg_sx->manual_drd_enabled)
+ if (otg_sx->manual_drd_enabled) {
ssusb_debugfs_init(ssusb);
-
- /* It is enough to delay 1s for waiting for host initialization */
- schedule_delayed_work(&otg_sx->extcon_reg_dwork, HZ);
+ } else {
+ INIT_DELAYED_WORK(&otg_sx->extcon_reg_dwork,
+ extcon_register_dwork);
+
+ /*
+ * It is enough to delay 1s for waiting for
+ * host initialization
+ */
+ schedule_delayed_work(&otg_sx->extcon_reg_dwork, HZ);
+ }
return 0;
}
@@ -407,8 +427,8 @@ void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
{
struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
- cancel_delayed_work(&otg_sx->extcon_reg_dwork);
-
if (otg_sx->manual_drd_enabled)
ssusb_debugfs_exit(ssusb);
+ else
+ cancel_delayed_work(&otg_sx->extcon_reg_dwork);
}
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
index 9b228b5811b0..c179192408ba 100644
--- a/drivers/usb/mtu3/mtu3_dr.h
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_dr.h - dual role switch and host glue layer header
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _MTU3_DR_H_
@@ -87,6 +78,8 @@ static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
int ssusb_otg_switch_init(struct ssusb_mtk *ssusb);
void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb);
int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on);
+void ssusb_set_force_mode(struct ssusb_mtk *ssusb,
+ enum mtu3_dr_force_mode mode);
#else
@@ -103,6 +96,10 @@ static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
return 0;
}
+static inline void
+ssusb_set_force_mode(struct ssusb_mtk *ssusb, enum mtu3_dr_force_mode mode)
+{}
+
#endif
#endif /* _MTU3_DR_H_ */
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 434fca58143c..f05f10f5c171 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_gadget.c - MediaTek usb3 DRD peripheral support
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include "mtu3.h"
@@ -89,6 +80,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
switch (mtu->g.speed) {
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
if (usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc)) {
interval = desc->bInterval;
@@ -456,7 +448,7 @@ static int mtu3_gadget_wakeup(struct usb_gadget *gadget)
return -EOPNOTSUPP;
spin_lock_irqsave(&mtu->lock, flags);
- if (mtu->g.speed == USB_SPEED_SUPER) {
+ if (mtu->g.speed >= USB_SPEED_SUPER) {
mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT);
} else {
mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 958d74dd2b78..ebdcf7a38c29 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling
*
* Copyright (c) 2016 MediaTek Inc.
*
* Author: Chunfeng.Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/usb/composite.h>
@@ -212,8 +203,8 @@ ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
case USB_RECIP_DEVICE:
result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED;
result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
- /* superspeed only */
- if (mtu->g.speed == USB_SPEED_SUPER) {
+
+ if (mtu->g.speed >= USB_SPEED_SUPER) {
result[0] |= mtu->u1_enable << USB_DEV_STAT_U1_ENABLED;
result[0] |= mtu->u2_enable << USB_DEV_STAT_U2_ENABLED;
}
@@ -329,8 +320,8 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
handled = handle_test_mode(mtu, setup);
break;
case USB_DEVICE_U1_ENABLE:
- if (mtu->g.speed != USB_SPEED_SUPER ||
- mtu->g.state != USB_STATE_CONFIGURED)
+ if (mtu->g.speed < USB_SPEED_SUPER ||
+ mtu->g.state != USB_STATE_CONFIGURED)
break;
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
@@ -344,8 +335,8 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
handled = 1;
break;
case USB_DEVICE_U2_ENABLE:
- if (mtu->g.speed != USB_SPEED_SUPER ||
- mtu->g.state != USB_STATE_CONFIGURED)
+ if (mtu->g.speed < USB_SPEED_SUPER ||
+ mtu->g.state != USB_STATE_CONFIGURED)
break;
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
@@ -384,8 +375,8 @@ static int ep0_handle_feature(struct mtu3 *mtu,
break;
case USB_RECIP_INTERFACE:
/* superspeed only */
- if ((value == USB_INTRF_FUNC_SUSPEND)
- && (mtu->g.speed == USB_SPEED_SUPER)) {
+ if (value == USB_INTRF_FUNC_SUSPEND &&
+ mtu->g.speed >= USB_SPEED_SUPER) {
/*
* forward the request because function drivers
* should handle it
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
index e42d308b8dc2..d237d7e65c44 100644
--- a/drivers/usb/mtu3/mtu3_host.c
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_dr.c - dual role switch and host glue layer
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/clk.h>
@@ -79,20 +70,6 @@ int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
if (!ssusb->wakeup_en)
return 0;
- ssusb->wk_deb_p0 = devm_clk_get(dev, "wakeup_deb_p0");
- if (IS_ERR(ssusb->wk_deb_p0)) {
- dev_err(dev, "fail to get wakeup_deb_p0\n");
- return PTR_ERR(ssusb->wk_deb_p0);
- }
-
- if (of_property_read_bool(dn, "wakeup_deb_p1")) {
- ssusb->wk_deb_p1 = devm_clk_get(dev, "wakeup_deb_p1");
- if (IS_ERR(ssusb->wk_deb_p1)) {
- dev_err(dev, "fail to get wakeup_deb_p1\n");
- return PTR_ERR(ssusb->wk_deb_p1);
- }
- }
-
ssusb->pericfg = syscon_regmap_lookup_by_phandle(dn,
"mediatek,syscon-wakeup");
if (IS_ERR(ssusb->pericfg)) {
@@ -103,36 +80,6 @@ int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
return 0;
}
-static int ssusb_wakeup_clks_enable(struct ssusb_mtk *ssusb)
-{
- int ret;
-
- ret = clk_prepare_enable(ssusb->wk_deb_p0);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable wk_deb_p0\n");
- goto usb_p0_err;
- }
-
- ret = clk_prepare_enable(ssusb->wk_deb_p1);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable wk_deb_p1\n");
- goto usb_p1_err;
- }
-
- return 0;
-
-usb_p1_err:
- clk_disable_unprepare(ssusb->wk_deb_p0);
-usb_p0_err:
- return -EINVAL;
-}
-
-static void ssusb_wakeup_clks_disable(struct ssusb_mtk *ssusb)
-{
- clk_disable_unprepare(ssusb->wk_deb_p1);
- clk_disable_unprepare(ssusb->wk_deb_p0);
-}
-
static void host_ports_num_get(struct ssusb_mtk *ssusb)
{
u32 xhci_cap;
@@ -151,6 +98,7 @@ int ssusb_host_enable(struct ssusb_mtk *ssusb)
void __iomem *ibase = ssusb->ippc_base;
int num_u3p = ssusb->u3_ports;
int num_u2p = ssusb->u2_ports;
+ int u3_ports_disabed;
u32 check_clk;
u32 value;
int i;
@@ -158,8 +106,14 @@ int ssusb_host_enable(struct ssusb_mtk *ssusb)
/* power on host ip */
mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
- /* power on and enable all u3 ports */
+ /* power on and enable u3 ports except skipped ones */
+ u3_ports_disabed = 0;
for (i = 0; i < num_u3p; i++) {
+ if ((0x1 << i) & ssusb->u3p_dis_msk) {
+ u3_ports_disabed++;
+ continue;
+ }
+
value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
value |= SSUSB_U3_PORT_HOST_SEL;
@@ -175,7 +129,7 @@ int ssusb_host_enable(struct ssusb_mtk *ssusb)
}
check_clk = SSUSB_XHCI_RST_B_STS;
- if (num_u3p)
+ if (num_u3p > u3_ports_disabed)
check_clk = SSUSB_U3_MAC_RST_B_STS;
return ssusb_check_clocks(ssusb, check_clk);
@@ -190,8 +144,11 @@ int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
int ret;
int i;
- /* power down and disable all u3 ports */
+ /* power down and disable u3 ports except skipped ones */
for (i = 0; i < num_u3p; i++) {
+ if ((0x1 << i) & ssusb->u3p_dis_msk)
+ continue;
+
value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
value |= SSUSB_U3_PORT_PDN;
value |= suspend ? 0 : SSUSB_U3_PORT_DIS;
@@ -223,6 +180,8 @@ int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
static void ssusb_host_setup(struct ssusb_mtk *ssusb)
{
+ struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
host_ports_num_get(ssusb);
/*
@@ -231,6 +190,9 @@ static void ssusb_host_setup(struct ssusb_mtk *ssusb)
*/
ssusb_host_enable(ssusb);
+ if (otg_sx->manual_drd_enabled)
+ ssusb_set_force_mode(ssusb, MTU3_DR_FORCE_HOST);
+
/* if port0 supports dual-role, works as host mode by default */
ssusb_set_vbus(&ssusb->otg_switch, 1);
}
@@ -276,19 +238,14 @@ void ssusb_host_exit(struct ssusb_mtk *ssusb)
int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
{
- int ret = 0;
-
- if (ssusb->wakeup_en) {
- ret = ssusb_wakeup_clks_enable(ssusb);
+ if (ssusb->wakeup_en)
ssusb_wakeup_ip_sleep_en(ssusb);
- }
- return ret;
+
+ return 0;
}
void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
{
- if (ssusb->wakeup_en) {
+ if (ssusb->wakeup_en)
ssusb_wakeup_ip_sleep_dis(ssusb);
- ssusb_wakeup_clks_disable(ssusb);
- }
}
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 06b29664470f..6ee371478d89 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _SSUSB_HW_REGS_H_
@@ -58,6 +49,8 @@
#define U3D_QCR1 (SSUSB_DEV_BASE + 0x0404)
#define U3D_QCR2 (SSUSB_DEV_BASE + 0x0408)
#define U3D_QCR3 (SSUSB_DEV_BASE + 0x040C)
+#define U3D_TXQHIAR1 (SSUSB_DEV_BASE + 0x0484)
+#define U3D_RXQHIAR1 (SSUSB_DEV_BASE + 0x04C4)
#define U3D_TXQCSR1 (SSUSB_DEV_BASE + 0x0510)
#define U3D_TXQSAR1 (SSUSB_DEV_BASE + 0x0514)
@@ -189,6 +182,13 @@
#define QMU_RX_COZ(x) (BIT(16) << (x))
#define QMU_RX_ZLP(x) (BIT(0) << (x))
+/* U3D_TXQHIAR1 */
+/* U3D_RXQHIAR1 */
+#define QMU_LAST_DONE_PTR_HI(x) (((x) >> 16) & 0xf)
+#define QMU_CUR_GPD_ADDR_HI(x) (((x) >> 8) & 0xf)
+#define QMU_START_ADDR_HI_MSK GENMASK(3, 0)
+#define QMU_START_ADDR_HI(x) (((x) & 0xf) << 0)
+
/* U3D_TXQCSR1 */
/* U3D_RXQCSR1 */
#define QMU_Q_ACTIVE BIT(15)
@@ -225,6 +225,7 @@
#define CAP_TX_EP_NUM(x) ((x) & 0x1f)
/* U3D_MISC_CTRL */
+#define DMA_ADDR_36BIT BIT(31)
#define VBUS_ON BIT(1)
#define VBUS_FRC_EN BIT(0)
@@ -457,11 +458,14 @@
#define SSUSB_VBUS_CHG_INT_B_EN BIT(6)
/* U3D_SSUSB_U3_CTRL_0P */
+#define SSUSB_U3_PORT_SSP_SPEED BIT(9)
#define SSUSB_U3_PORT_HOST_SEL BIT(2)
#define SSUSB_U3_PORT_PDN BIT(1)
#define SSUSB_U3_PORT_DIS BIT(0)
/* U3D_SSUSB_U2_CTRL_0P */
+#define SSUSB_U2_PORT_RG_IDDIG BIT(12)
+#define SSUSB_U2_PORT_FORCE_IDDIG BIT(11)
#define SSUSB_U2_PORT_VBUSVALID BIT(9)
#define SSUSB_U2_PORT_OTG_SEL BIT(7)
#define SSUSB_U2_PORT_HOST BIT(2)
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index 088e3e685c4f..3650fd11fc49 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/clk.h>
@@ -21,7 +12,6 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include "mtu3.h"
@@ -110,15 +100,9 @@ static void ssusb_phy_power_off(struct ssusb_mtk *ssusb)
phy_power_off(ssusb->phys[i]);
}
-static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
+static int ssusb_clks_enable(struct ssusb_mtk *ssusb)
{
- int ret = 0;
-
- ret = regulator_enable(ssusb->vusb33);
- if (ret) {
- dev_err(ssusb->dev, "failed to enable vusb33\n");
- goto vusb33_err;
- }
+ int ret;
ret = clk_prepare_enable(ssusb->sys_clk);
if (ret) {
@@ -132,6 +116,52 @@ static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
goto ref_clk_err;
}
+ ret = clk_prepare_enable(ssusb->mcu_clk);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable mcu_clk\n");
+ goto mcu_clk_err;
+ }
+
+ ret = clk_prepare_enable(ssusb->dma_clk);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable dma_clk\n");
+ goto dma_clk_err;
+ }
+
+ return 0;
+
+dma_clk_err:
+ clk_disable_unprepare(ssusb->mcu_clk);
+mcu_clk_err:
+ clk_disable_unprepare(ssusb->ref_clk);
+ref_clk_err:
+ clk_disable_unprepare(ssusb->sys_clk);
+sys_clk_err:
+ return ret;
+}
+
+static void ssusb_clks_disable(struct ssusb_mtk *ssusb)
+{
+ clk_disable_unprepare(ssusb->dma_clk);
+ clk_disable_unprepare(ssusb->mcu_clk);
+ clk_disable_unprepare(ssusb->ref_clk);
+ clk_disable_unprepare(ssusb->sys_clk);
+}
+
+static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
+{
+ int ret = 0;
+
+ ret = regulator_enable(ssusb->vusb33);
+ if (ret) {
+ dev_err(ssusb->dev, "failed to enable vusb33\n");
+ goto vusb33_err;
+ }
+
+ ret = ssusb_clks_enable(ssusb);
+ if (ret)
+ goto clks_err;
+
ret = ssusb_phy_init(ssusb);
if (ret) {
dev_err(ssusb->dev, "failed to init phy\n");
@@ -149,20 +179,16 @@ static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
phy_err:
ssusb_phy_exit(ssusb);
phy_init_err:
- clk_disable_unprepare(ssusb->ref_clk);
-ref_clk_err:
- clk_disable_unprepare(ssusb->sys_clk);
-sys_clk_err:
+ ssusb_clks_disable(ssusb);
+clks_err:
regulator_disable(ssusb->vusb33);
vusb33_err:
-
return ret;
}
static void ssusb_rscs_exit(struct ssusb_mtk *ssusb)
{
- clk_disable_unprepare(ssusb->sys_clk);
- clk_disable_unprepare(ssusb->ref_clk);
+ ssusb_clks_disable(ssusb);
regulator_disable(ssusb->vusb33);
ssusb_phy_power_off(ssusb);
ssusb_phy_exit(ssusb);
@@ -176,31 +202,17 @@ static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb)
mtu3_clrbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
}
-static int get_iddig_pinctrl(struct ssusb_mtk *ssusb)
+/* ignore the error if the clock does not exist */
+static struct clk *get_optional_clk(struct device *dev, const char *id)
{
- struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
-
- otg_sx->id_pinctrl = devm_pinctrl_get(ssusb->dev);
- if (IS_ERR(otg_sx->id_pinctrl)) {
- dev_err(ssusb->dev, "Cannot find id pinctrl!\n");
- return PTR_ERR(otg_sx->id_pinctrl);
- }
-
- otg_sx->id_float =
- pinctrl_lookup_state(otg_sx->id_pinctrl, "id_float");
- if (IS_ERR(otg_sx->id_float)) {
- dev_err(ssusb->dev, "Cannot find pinctrl id_float!\n");
- return PTR_ERR(otg_sx->id_float);
- }
+ struct clk *opt_clk;
- otg_sx->id_ground =
- pinctrl_lookup_state(otg_sx->id_pinctrl, "id_ground");
- if (IS_ERR(otg_sx->id_ground)) {
- dev_err(ssusb->dev, "Cannot find pinctrl id_ground!\n");
- return PTR_ERR(otg_sx->id_ground);
- }
+ opt_clk = devm_clk_get(dev, id);
+ /* ignore error number except EPROBE_DEFER */
+ if (IS_ERR(opt_clk) && (PTR_ERR(opt_clk) != -EPROBE_DEFER))
+ opt_clk = NULL;
- return 0;
+ return opt_clk;
}
static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
@@ -225,18 +237,17 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
return PTR_ERR(ssusb->sys_clk);
}
- /*
- * reference clock is usually a "fixed-clock", make it optional
- * for backward compatibility and ignore the error if it does
- * not exist.
- */
- ssusb->ref_clk = devm_clk_get(dev, "ref_ck");
- if (IS_ERR(ssusb->ref_clk)) {
- if (PTR_ERR(ssusb->ref_clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ ssusb->ref_clk = get_optional_clk(dev, "ref_ck");
+ if (IS_ERR(ssusb->ref_clk))
+ return PTR_ERR(ssusb->ref_clk);
- ssusb->ref_clk = NULL;
- }
+ ssusb->mcu_clk = get_optional_clk(dev, "mcu_ck");
+ if (IS_ERR(ssusb->mcu_clk))
+ return PTR_ERR(ssusb->mcu_clk);
+
+ ssusb->dma_clk = get_optional_clk(dev, "dma_ck");
+ if (IS_ERR(ssusb->dma_clk))
+ return PTR_ERR(ssusb->dma_clk);
ssusb->num_phys = of_count_phandle_with_args(node,
"phys", "#phy-cells");
@@ -263,10 +274,8 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
return PTR_ERR(ssusb->ippc_base);
ssusb->dr_mode = usb_get_dr_mode(dev);
- if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN) {
- dev_err(dev, "dr_mode is error\n");
- return -EINVAL;
- }
+ if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN)
+ ssusb->dr_mode = USB_DR_MODE_OTG;
if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
return 0;
@@ -276,10 +285,10 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
if (ret)
return ret;
- if (ssusb->dr_mode != USB_DR_MODE_OTG)
- return 0;
+ /* optional property, ignore the error if it does not exist */
+ of_property_read_u32(node, "mediatek,u3p-dis-msk",
+ &ssusb->u3p_dis_msk);
- /* if dual-role mode is supported */
vbus = devm_regulator_get(&pdev->dev, "vbus");
if (IS_ERR(vbus)) {
dev_err(dev, "failed to get vbus\n");
@@ -287,6 +296,10 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
}
otg_sx->vbus = vbus;
+ if (ssusb->dr_mode == USB_DR_MODE_HOST)
+ return 0;
+
+ /* if dual-role mode is supported */
otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd");
otg_sx->manual_drd_enabled =
of_property_read_bool(node, "enable-manual-drd");
@@ -297,15 +310,11 @@ static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
dev_err(ssusb->dev, "couldn't get extcon device\n");
return -EPROBE_DEFER;
}
- if (otg_sx->manual_drd_enabled) {
- ret = get_iddig_pinctrl(ssusb);
- if (ret)
- return ret;
- }
}
- dev_info(dev, "dr_mode: %d, is_u3_dr: %d\n",
- ssusb->dr_mode, otg_sx->is_u3_drd);
+ dev_info(dev, "dr_mode: %d, is_u3_dr: %d, u3p_dis_msk: %x, drd: %s\n",
+ ssusb->dr_mode, otg_sx->is_u3_drd, ssusb->u3p_dis_msk,
+ otg_sx->manual_drd_enabled ? "manual" : "auto");
return 0;
}
@@ -447,8 +456,7 @@ static int __maybe_unused mtu3_suspend(struct device *dev)
ssusb_host_disable(ssusb, true);
ssusb_phy_power_off(ssusb);
- clk_disable_unprepare(ssusb->sys_clk);
- clk_disable_unprepare(ssusb->ref_clk);
+ ssusb_clks_disable(ssusb);
ssusb_wakeup_enable(ssusb);
return 0;
@@ -466,27 +474,21 @@ static int __maybe_unused mtu3_resume(struct device *dev)
return 0;
ssusb_wakeup_disable(ssusb);
- ret = clk_prepare_enable(ssusb->sys_clk);
- if (ret)
- goto err_sys_clk;
-
- ret = clk_prepare_enable(ssusb->ref_clk);
+ ret = ssusb_clks_enable(ssusb);
if (ret)
- goto err_ref_clk;
+ goto clks_err;
ret = ssusb_phy_power_on(ssusb);
if (ret)
- goto err_power_on;
+ goto phy_err;
ssusb_host_enable(ssusb);
return 0;
-err_power_on:
- clk_disable_unprepare(ssusb->ref_clk);
-err_ref_clk:
- clk_disable_unprepare(ssusb->sys_clk);
-err_sys_clk:
+phy_err:
+ ssusb_clks_disable(ssusb);
+clks_err:
return ret;
}
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 7d9ba8a52368..ff62ba232177 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_qmu.c - Queue Management Unit driver for device controller
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
/*
@@ -40,7 +31,58 @@
#define GPD_FLAGS_IOC BIT(7)
#define GPD_EXT_FLAG_ZLP BIT(5)
+#define GPD_EXT_NGP(x) (((x) & 0xf) << 4)
+#define GPD_EXT_BUF(x) (((x) & 0xf) << 0)
+
+#define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
+#define HILO_DMA(hi, lo) \
+ ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
+
+static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
+{
+ u32 txcpr;
+ u32 txhiar;
+
+ txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
+ txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
+ return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
+}
+
+static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
+{
+ u32 rxcpr;
+ u32 rxhiar;
+
+ rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
+ rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
+
+ return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
+}
+
+static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
+{
+ u32 tqhiar;
+
+ mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
+ cpu_to_le32(lower_32_bits(dma)));
+ tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
+ tqhiar &= ~QMU_START_ADDR_HI_MSK;
+ tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
+ mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
+}
+
+static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
+{
+ u32 rqhiar;
+
+ mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
+ cpu_to_le32(lower_32_bits(dma)));
+ rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
+ rqhiar &= ~QMU_START_ADDR_HI_MSK;
+ rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
+ mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
+}
static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
dma_addr_t dma_addr)
@@ -193,21 +235,27 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd = ring->enqueue;
struct usb_request *req = &mreq->request;
+ dma_addr_t enq_dma;
+ u16 ext_addr;
/* set all fields to zero as default value */
memset(gpd, 0, sizeof(*gpd));
- gpd->buffer = cpu_to_le32((u32)req->dma);
+ gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
+ ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
gpd->buf_len = cpu_to_le16(req->length);
gpd->flag |= GPD_FLAGS_IOC;
/* get the next GPD */
enq = advance_enq_gpd(ring);
- dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n",
- mep->epnum, gpd, enq);
+ enq_dma = gpd_virt_to_dma(ring, enq);
+ dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
+ mep->epnum, gpd, enq, &enq_dma);
enq->flag &= ~GPD_FLAGS_HWO;
- gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+ gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
+ ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
+ gpd->tx_ext_addr = cpu_to_le16(ext_addr);
if (req->zero)
gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
@@ -226,21 +274,27 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
struct qmu_gpd *gpd = ring->enqueue;
struct usb_request *req = &mreq->request;
+ dma_addr_t enq_dma;
+ u16 ext_addr;
/* set all fields to zero as default value */
memset(gpd, 0, sizeof(*gpd));
- gpd->buffer = cpu_to_le32((u32)req->dma);
+ gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
+ ext_addr = GPD_EXT_BUF(upper_32_bits(req->dma));
gpd->data_buf_len = cpu_to_le16(req->length);
gpd->flag |= GPD_FLAGS_IOC;
/* get the next GPD */
enq = advance_enq_gpd(ring);
- dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n",
- mep->epnum, gpd, enq);
+ enq_dma = gpd_virt_to_dma(ring, enq);
+ dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
+ mep->epnum, gpd, enq, &enq_dma);
enq->flag &= ~GPD_FLAGS_HWO;
- gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+ gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
+ ext_addr |= GPD_EXT_NGP(upper_32_bits(enq_dma));
+ gpd->rx_ext_addr = cpu_to_le16(ext_addr);
gpd->chksum = qmu_calc_checksum((u8 *)gpd);
gpd->flag |= GPD_FLAGS_HWO;
@@ -267,8 +321,8 @@ int mtu3_qmu_start(struct mtu3_ep *mep)
if (mep->is_in) {
/* set QMU start address */
- mtu3_writel(mbase, USB_QMU_TQSAR(mep->epnum), ring->dma);
- mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+ write_txq_start_addr(mbase, epnum, ring->dma);
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
/* send zero length packet according to ZLP flag in GPD */
mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
@@ -282,8 +336,8 @@ int mtu3_qmu_start(struct mtu3_ep *mep)
mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
} else {
- mtu3_writel(mbase, USB_QMU_RQSAR(mep->epnum), ring->dma);
- mtu3_setbits(mbase, MU3D_EP_RXCR0(mep->epnum), RX_DMAREQEN);
+ write_rxq_start_addr(mbase, epnum, ring->dma);
+ mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
/* don't expect ZLP */
mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
@@ -353,9 +407,9 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
struct mtu3_gpd_ring *ring = &mep->gpd_ring;
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd_current = NULL;
- dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
struct usb_request *req = NULL;
struct mtu3_request *mreq;
+ dma_addr_t cur_gpd_dma;
u32 txcsr = 0;
int ret;
@@ -365,7 +419,8 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
else
return;
- gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+ cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
+ gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
if (le16_to_cpu(gpd_current->buf_len) != 0) {
dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
@@ -408,12 +463,13 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd = ring->dequeue;
struct qmu_gpd *gpd_current = NULL;
- dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
struct usb_request *request = NULL;
struct mtu3_request *mreq;
+ dma_addr_t cur_gpd_dma;
/*transfer phy address got from QMU register to virtual address */
- gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+ cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
+ gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
@@ -446,11 +502,12 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
void __iomem *mbase = mtu->mac_base;
struct qmu_gpd *gpd = ring->dequeue;
struct qmu_gpd *gpd_current = NULL;
- dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
struct usb_request *req = NULL;
struct mtu3_request *mreq;
+ dma_addr_t cur_gpd_dma;
- gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+ cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
+ gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
__func__, epnum, gpd, gpd_current, ring->enqueue);
diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h
index 4dafa16bf120..81f5151a55ed 100644
--- a/drivers/usb/mtu3/mtu3_qmu.h
+++ b/drivers/usb/mtu3/mtu3_qmu.h
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mtu3_qmu.h - Queue Management Unit driver header
*
* Copyright (C) 2016 MediaTek Inc.
*
* Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __MTK_QMU_H__
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 689d42aba8a9..79d4d5439164 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# for USB OTG silicon based on Mentor Graphics INVENTRA designs
#
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 02fbb4fe3745..0ad664efda6b 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments AM35x "glue layer"
@@ -8,23 +9,6 @@
* Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
*
* This file is part of the Inventra Controller Driver for Linux.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
@@ -133,11 +117,9 @@ static void am35x_musb_set_vbus(struct musb *musb, int is_on)
#define POLL_SECONDS 2
-static struct timer_list otg_workaround;
-
-static void otg_timer(unsigned long _musb)
+static void otg_timer(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
@@ -173,7 +155,7 @@ static void otg_timer(unsigned long _musb)
case OTG_STATE_B_IDLE:
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
else
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
@@ -195,12 +177,12 @@ static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
last_timer = jiffies;
return;
}
- if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
+ if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) {
dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
return;
}
@@ -209,7 +191,7 @@ static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
usb_otg_state_string(musb->xceiv->otg->state),
jiffies_to_msecs(timeout - jiffies));
- mod_timer(&otg_workaround, timeout);
+ mod_timer(&musb->dev_timer, timeout);
}
static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
@@ -278,14 +260,14 @@ static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
otg->default_a = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
@@ -324,7 +306,7 @@ eoi:
/* Poll for ID change */
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -365,7 +347,7 @@ static int am35x_musb_init(struct musb *musb)
if (IS_ERR_OR_NULL(musb->xceiv))
return -EPROBE_DEFER;
- setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+ timer_setup(&musb->dev_timer, otg_timer, 0);
/* Reset the musb */
if (data->reset)
@@ -395,7 +377,7 @@ static int am35x_musb_exit(struct musb *musb)
struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
- del_timer_sync(&otg_workaround);
+ del_timer_sync(&musb->dev_timer);
/* Shutdown the on-chip PHY and its PLL. */
if (data->set_phy_power)
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 4418574a36a1..0a98dcd66d19 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* MUSB OTG controller driver for Blackfin Processors
*
* Copyright 2006-2008 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
@@ -223,7 +222,7 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
if ((musb->xceiv->otg->state == OTG_STATE_B_IDLE
|| musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON) ||
(musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ mod_timer(&musb->dev_timer, jiffies + TIMER_DELAY);
musb->a_wait_bcon = TIMER_DELAY;
}
@@ -232,9 +231,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
return retval;
}
-static void musb_conn_timer_handler(unsigned long _musb)
+static void musb_conn_timer_handler(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
unsigned long flags;
u16 val;
static u8 toggle;
@@ -266,7 +265,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
musb_writeb(musb->mregs, MUSB_INTRUSB, val);
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
}
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
+ mod_timer(&musb->dev_timer, jiffies + TIMER_DELAY);
break;
case OTG_STATE_B_IDLE:
/*
@@ -310,7 +309,7 @@ static void musb_conn_timer_handler(unsigned long _musb)
* shortening it, if accelerating A-plug detection
* is needed in OTG mode.
*/
- mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY / 4);
+ mod_timer(&musb->dev_timer, jiffies + TIMER_DELAY / 4);
}
break;
default:
@@ -445,8 +444,7 @@ static int bfin_musb_init(struct musb *musb)
bfin_musb_reg_init(musb);
- setup_timer(&musb_conn_timer, musb_conn_timer_handler,
- (unsigned long) musb);
+ timer_setup(&musb->dev_timer, musb_conn_timer_handler, 0);
musb->xceiv->set_power = bfin_musb_set_power;
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
index c84dae546dc6..5b149915b0f8 100644
--- a/drivers/usb/musb/blackfin.h
+++ b/drivers/usb/musb/blackfin.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 by Analog Devices, Inc.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
*/
#ifndef __MUSB_BLACKFIN_H__
@@ -82,6 +78,4 @@ static void dump_fifo_data(u8 *buf, u16 len)
/* Almost 1 second */
#define TIMER_DELAY (1 * HZ)
-static struct timer_list musb_conn_timer;
-
#endif /* __MUSB_BLACKFIN_H__ */
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index a13bd3625043..b4d6d9bb3239 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2006 by Texas Instruments
*
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index 9bb7c5e45c85..16dd1ed44bb5 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2005-2006 by Texas Instruments */
#ifndef _CPPI_DMA_H_
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index df88123274ca..0397606a211b 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments DA8xx/OMAP-L1x "glue layer"
*
@@ -10,23 +11,6 @@
* Copyright (c) 2016 Petr Kulhavy <petr@barix.com>
*
* This file is part of the Inventra Controller Driver for Linux.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
@@ -50,10 +34,7 @@
#define DA8XX_USB_CTRL_REG 0x04
#define DA8XX_USB_STAT_REG 0x08
#define DA8XX_USB_EMULATION_REG 0x0c
-#define DA8XX_USB_MODE_REG 0x10 /* Transparent, CDC, [Generic] RNDIS */
-#define DA8XX_USB_AUTOREQ_REG 0x14
#define DA8XX_USB_SRP_FIX_TIME_REG 0x18
-#define DA8XX_USB_TEARDOWN_REG 0x1c
#define DA8XX_USB_INTR_SRC_REG 0x20
#define DA8XX_USB_INTR_SRC_SET_REG 0x24
#define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28
@@ -138,11 +119,9 @@ static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
#define POLL_SECONDS 2
-static struct timer_list otg_workaround;
-
-static void otg_timer(unsigned long _musb)
+static void otg_timer(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
@@ -178,7 +157,7 @@ static void otg_timer(unsigned long _musb)
* VBUSERR got reported during enumeration" cases.
*/
if (devctl & MUSB_DEVCTL_VBUS) {
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
break;
}
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
@@ -201,7 +180,7 @@ static void otg_timer(unsigned long _musb)
musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION);
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
else
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
@@ -223,12 +202,12 @@ static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
last_timer = jiffies;
return;
}
- if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
+ if (time_after(last_timer, timeout) && timer_pending(&musb->dev_timer)) {
dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
return;
}
@@ -237,7 +216,7 @@ static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
usb_otg_state_string(musb->xceiv->otg->state),
jiffies_to_msecs(timeout - jiffies));
- mod_timer(&otg_workaround, timeout);
+ mod_timer(&musb->dev_timer, timeout);
}
static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
@@ -297,14 +276,14 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
otg->default_a = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
@@ -331,7 +310,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
/* Poll for ID change */
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -393,7 +372,7 @@ static int da8xx_musb_init(struct musb *musb)
goto fail;
}
- setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
+ timer_setup(&musb->dev_timer, otg_timer, 0);
/* Reset the controller */
musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
@@ -431,7 +410,7 @@ static int da8xx_musb_exit(struct musb *musb)
{
struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
- del_timer_sync(&otg_workaround);
+ del_timer_sync(&musb->dev_timer);
phy_power_off(glue->phy);
phy_exit(glue->phy);
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 52b491d3d5d8..2ad39dcd2f4c 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -1,24 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2006 by Texas Instruments
*
* This file is part of the Inventra Controller Driver for Linux.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
@@ -199,11 +183,9 @@ static void davinci_musb_set_vbus(struct musb *musb, int is_on)
#define POLL_SECONDS 2
-static struct timer_list otg_workaround;
-
-static void otg_timer(unsigned long _musb)
+static void otg_timer(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
@@ -224,7 +206,7 @@ static void otg_timer(unsigned long _musb)
* VBUSERR got reported during enumeration" cases.
*/
if (devctl & MUSB_DEVCTL_VBUS) {
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
break;
}
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
@@ -248,7 +230,7 @@ static void otg_timer(unsigned long _musb)
devctl | MUSB_DEVCTL_SESSION);
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
else
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
break;
@@ -325,14 +307,14 @@ static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
MUSB_HST_MODE(musb);
otg->default_a = 1;
musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
- del_timer(&otg_workaround);
+ del_timer(&musb->dev_timer);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
@@ -361,7 +343,7 @@ static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
/* poll for ID change */
if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
- mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ mod_timer(&musb->dev_timer, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -393,7 +375,7 @@ static int davinci_musb_init(struct musb *musb)
if (revision == 0)
goto fail;
- setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+ timer_setup(&musb->dev_timer, otg_timer, 0);
davinci_musb_source_power(musb, 0, 1);
@@ -443,7 +425,7 @@ unregister:
static int davinci_musb_exit(struct musb *musb)
{
- del_timer_sync(&otg_workaround);
+ del_timer_sync(&musb->dev_timer);
/* force VBUS off */
if (cpu_is_davinci_dm355()) {
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 371baa0ee509..e021485c83ae 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2006 by Texas Instruments
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
*/
#ifndef __MUSB_HDRDF_H__
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 40c68c23d553..04d8b2bc205a 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Ingenic JZ4740 "glue layer"
*
* Copyright (C) 2013, Apelete Seketeli <apelete@seketeli.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
index 1e58ed2361cc..5f04f8e3a640 100644
--- a/drivers/usb/musb/musb_am335x.c
+++ b/drivers/usb/musb/musb_am335x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ff5a1a8989d5..ea5013aa69e2 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver core code
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
/*
@@ -485,9 +460,9 @@ void musb_load_testpacket(struct musb *musb)
/*
* Handles OTG hnp timeouts, such as b_ase0_brst
*/
-static void musb_otg_timer_func(unsigned long data)
+static void musb_otg_timer_func(struct timer_list *t)
{
- struct musb *musb = (struct musb *)data;
+ struct musb *musb = from_timer(musb, t, otg_timer);
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
@@ -767,6 +742,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
case OTG_STATE_B_IDLE:
if (!musb->is_active)
break;
+ /* fall through */
case OTG_STATE_B_PERIPHERAL:
musb_g_suspend(musb);
musb->is_active = musb->g.b_hnp_enable;
@@ -2330,7 +2306,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status < 0)
goto fail3;
- setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
+ timer_setup(&musb->otg_timer, musb_otg_timer_func, 0);
/* attach to the IRQ */
if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 20f4614178d9..385841ee6f46 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_CORE_H__
@@ -345,6 +320,7 @@ struct musb {
struct list_head pending_list; /* pending work list */
struct timer_list otg_timer;
+ struct timer_list dev_timer;
struct notifier_block nb;
struct dma_controller *dma_controller;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 1ec0a4947b6b..d0dd4f470bbe 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 9a78877a8afe..5e0f079dde21 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver debug defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_LINUX_DEBUG_H__
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 952733ceaac8..7cf5a1bbdaff 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver debugfs support
*
* Copyright 2010 Nokia Corporation
* Contact: Felipe Balbi <felipe.balbi@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 04c3bd86bd62..a4241f4d430e 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver DMA controller abstraction
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_DMA_H__
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index f6b526606ad1..05a679d5e3a2 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments DSPS platforms "glue layer"
*
@@ -7,22 +8,6 @@
*
* This file is part of the Inventra Controller Driver for Linux.
*
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
* musb_dsps.c will be a common file for all the TI DSPS platforms
* such as dm64x, dm36x, dm35x, da8x, am35x and ti81x.
* For now only ti81x is using this and in future davinci.c, am35x.c
@@ -119,7 +104,6 @@ struct dsps_glue {
struct platform_device *musb; /* child musb pdev */
const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */
int vbus_irq; /* optional vbus irq */
- struct timer_list timer; /* otg_workaround timer */
unsigned long last_timer; /* last timer data for each instance */
bool sw_babble_enabled;
void __iomem *usbss_base;
@@ -149,6 +133,7 @@ static const struct debugfs_reg32 dsps_musb_regs[] = {
static void dsps_mod_timer(struct dsps_glue *glue, int wait_ms)
{
+ struct musb *musb = platform_get_drvdata(glue->musb);
int wait;
if (wait_ms < 0)
@@ -156,7 +141,7 @@ static void dsps_mod_timer(struct dsps_glue *glue, int wait_ms)
else
wait = msecs_to_jiffies(wait_ms);
- mod_timer(&glue->timer, jiffies + wait);
+ mod_timer(&musb->dev_timer, jiffies + wait);
}
/*
@@ -216,7 +201,7 @@ static void dsps_musb_disable(struct musb *musb)
musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
musb_writel(reg_base, wrp->epintr_clear,
wrp->txep_bitmap | wrp->rxep_bitmap);
- del_timer_sync(&glue->timer);
+ del_timer_sync(&musb->dev_timer);
}
/* Caller must take musb->lock */
@@ -230,7 +215,7 @@ static int dsps_check_status(struct musb *musb, void *unused)
int skip_session = 0;
if (glue->vbus_irq)
- del_timer(&glue->timer);
+ del_timer(&musb->dev_timer);
/*
* We poll because DSPS IP's won't expose several OTG-critical
@@ -282,9 +267,9 @@ static int dsps_check_status(struct musb *musb, void *unused)
return 0;
}
-static void otg_timer(unsigned long _musb)
+static void otg_timer(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
struct device *dev = musb->controller;
unsigned long flags;
int err;
@@ -480,7 +465,7 @@ static int dsps_musb_init(struct musb *musb)
}
}
- setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
+ timer_setup(&musb->dev_timer, otg_timer, 0);
/* Reset the musb */
musb_writel(reg_base, wrp->control, (1 << wrp->reset));
@@ -515,7 +500,7 @@ static int dsps_musb_exit(struct musb *musb)
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
- del_timer_sync(&glue->timer);
+ del_timer_sync(&musb->dev_timer);
usb_phy_shutdown(musb->xceiv);
phy_power_off(musb->phy);
phy_exit(musb->phy);
@@ -1027,7 +1012,7 @@ static int dsps_suspend(struct device *dev)
return ret;
}
- del_timer_sync(&glue->timer);
+ del_timer_sync(&musb->dev_timer);
mbase = musb->ctrl_base;
glue->context.control = musb_readl(mbase, wrp->control);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index bc6d1717c9ec..293e5b8da565 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver peripheral support
*
@@ -5,32 +6,6 @@
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index 0314dfc770c7..9c34aca06db6 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver peripheral defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_GADGET_H
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 844a309fe895..18da4873e52e 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG peripheral driver ep0 handling
*
@@ -5,32 +6,6 @@
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index b17450a59882..2627363fb4fe 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver host support
*
@@ -5,32 +6,6 @@
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 7bbf01bf4bb0..72392bbcd0a4 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver host defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef _MUSB_HOST_H
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 17a80ae20674..b7025b2e6e00 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver register I/O
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index cff5bcf0d00f..a4beba184798 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver register defines
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef __MUSB_REGS_H__
diff --git a/drivers/usb/musb/musb_trace.c b/drivers/usb/musb/musb_trace.c
index 70973d901a21..476872adce80 100644
--- a/drivers/usb/musb/musb_trace.c
+++ b/drivers/usb/musb/musb_trace.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* musb_trace.c - MUSB Controller Trace Support
*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Bin Liu <b-liu@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define CREATE_TRACE_POINTS
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
index f031c9e74322..a97d618fe8ff 100644
--- a/drivers/usb/musb/musb_trace.h
+++ b/drivers/usb/musb/musb_trace.h
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* musb_trace.h - MUSB Controller Trace Support
*
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
*
* Author: Bin Liu <b-liu@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 0b4595439d51..5165d2b07ade 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -1,35 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver virtual root hub support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 3620073da58c..21fb9e6622f3 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver - support for Mentor's DMA controller
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2007 by Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index a3dcbd55e436..44f7983df0a1 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MUSB OTG driver - support for Mentor's DMA controller
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2007 by Texas Instruments
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#ifndef CONFIG_BLACKFIN
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 456f3e6ecf03..5d705930ef47 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2007 by Texas Instruments
* Some code has been taken from tusb6010.c
@@ -6,23 +7,6 @@
* Tony Lindgren <tony@atomide.com>
*
* This file is part of the Inventra Controller Driver for Linux.
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- *
- * The Inventra Controller Driver for Linux is distributed in
- * the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
- * License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with The Inventra Controller Driver for Linux ; if not,
- * write to the Free Software Foundation, Inc., 59 Temple Place,
- * Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index 1b5e83a9840e..859008fa0e3c 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005-2006 by Texas Instruments
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
*/
#ifndef __MUSB_OMAP243X_H__
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index dc353e24d53c..2d201219ecff 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Allwinner sun4i MUSB Glue Layer
*
@@ -5,16 +6,6 @@
*
* Based on code from
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 4eb640c54f2c..39453287b5c3 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TUSB6010 USB 2.0 OTG Dual Role controller
*
* Copyright (C) 2006 Nokia Corporation
* Tony Lindgren <tony@atomide.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Notes:
* - Driver assumes that interface to external host (main CPU) is
* configured for NOR FLASH interface instead of VLYNQ serial
@@ -452,11 +449,9 @@ static int tusb_musb_vbus_status(struct musb *musb)
return ret;
}
-static struct timer_list musb_idle_timer;
-
-static void musb_do_idle(unsigned long _musb)
+static void musb_do_idle(struct timer_list *t)
{
- struct musb *musb = (void *)_musb;
+ struct musb *musb = from_timer(musb, t, dev_timer);
unsigned long flags;
spin_lock_irqsave(&musb->lock, flags);
@@ -523,13 +518,13 @@ static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
&& (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&musb_idle_timer);
+ del_timer(&musb->dev_timer);
last_timer = jiffies;
return;
}
if (time_after(last_timer, timeout)) {
- if (!timer_pending(&musb_idle_timer))
+ if (!timer_pending(&musb->dev_timer))
last_timer = timeout;
else {
dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
@@ -541,7 +536,7 @@ static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
usb_otg_state_string(musb->xceiv->otg->state),
(unsigned long)jiffies_to_msecs(timeout - jiffies));
- mod_timer(&musb_idle_timer, timeout);
+ mod_timer(&musb->dev_timer, timeout);
}
/* ticks of 60 MHz clock */
@@ -873,7 +868,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
}
if (int_src & TUSB_INT_SRC_USB_IP_CONN)
- del_timer(&musb_idle_timer);
+ del_timer(&musb->dev_timer);
/* OTG state change reports (annoyingly) not issued by Mentor core */
if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
@@ -982,7 +977,7 @@ static void tusb_musb_disable(struct musb *musb)
musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
- del_timer(&musb_idle_timer);
+ del_timer(&musb->dev_timer);
if (is_dma_capable() && !dma_off) {
printk(KERN_WARNING "%s %s: dma still active\n",
@@ -1142,7 +1137,7 @@ static int tusb_musb_init(struct musb *musb)
musb->xceiv->set_power = tusb_draw_power;
the_musb = musb;
- setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+ timer_setup(&musb->dev_timer, musb_do_idle, 0);
done:
if (ret < 0) {
@@ -1156,7 +1151,7 @@ done:
static int tusb_musb_exit(struct musb *musb)
{
- del_timer_sync(&musb_idle_timer);
+ del_timer_sync(&musb->dev_timer);
the_musb = NULL;
if (musb->board_set_power)
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
index 72cdad23ced9..fd8025bbece7 100644
--- a/drivers/usb/musb/tusb6010.h
+++ b/drivers/usb/musb/tusb6010.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
*
* Copyright (C) 2006 Nokia Corporation
* Tony Lindgren <tony@atomide.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __TUSB6010_H__
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e8060e49b0f4..60a93b8bbe3c 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
*
* Copyright (C) 2006 Nokia Corporation
* Tony Lindgren <tony@atomide.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 5a572500c418..27b4a77a9e23 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2010 ST-Ericsson AB
* Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
*
* Based on omap2430.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index c92a295049ad..d19bb3e89da6 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* drivers/usb/musb/ux500_dma.c
*
@@ -9,19 +10,6 @@
* Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
* Praveena Nadahally <praveen.nadahally@stericsson.com>
* Rajaram Regupathy <ragupathy.rajaram@stericsson.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index aff702c0eb9f..0f8ab981d572 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -137,35 +137,6 @@ config USB_ISP1301
To compile this driver as a module, choose M here: the
module will be called phy-isp1301.
-config USB_MSM_OTG
- tristate "Qualcomm on-chip USB OTG controller support"
- depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
- depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
- depends on RESET_CONTROLLER
- select USB_PHY
- help
- Enable this to support the USB OTG transceiver on Qualcomm chips. It
- handles PHY initialization, clock management, and workarounds
- required after resetting the hardware and power management.
- This driver is required even for peripheral only or host only
- mode configurations.
- This driver is not supported on boards like trout which
- has an external PHY.
-
-config USB_QCOM_8X16_PHY
- tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
- depends on ARCH_QCOM || COMPILE_TEST
- depends on RESET_CONTROLLER
- select USB_PHY
- select USB_ULPI_VIEWPORT
- help
- Enable this to support the USB transceiver on Qualcomm 8x16 chipsets.
- It handles PHY initialization, clock management, power management,
- and workarounds required after resetting the hardware.
-
- To compile this driver as a module, choose M here: the
- module will be called phy-qcom-8x16-usb.
-
config USB_MV_OTG
tristate "Marvell USB OTG support"
depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index e7c9ca8cafb0..25e579fb92b8 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for physical layer USB drivers
#
@@ -18,8 +19,6 @@ obj-$(CONFIG_TWL6030_USB) += phy-twl6030-usb.o
obj-$(CONFIG_USB_EHCI_TEGRA) += phy-tegra-usb.o
obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio-vbus-usb.o
obj-$(CONFIG_USB_ISP1301) += phy-isp1301.o
-obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o
-obj-$(CONFIG_USB_QCOM_8X16_PHY) += phy-qcom-8x16-usb.o
obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o
obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o
obj-$(CONFIG_USB_ULPI) += phy-ulpi.o
diff --git a/drivers/usb/phy/of.c b/drivers/usb/phy/of.c
index 66ffa82457a8..1ab134f45d67 100644
--- a/drivers/usb/phy/of.c
+++ b/drivers/usb/phy/of.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB of helper code
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 61bf2285d5b1..87295313a10c 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB transceiver driver for AB8500 family chips
*
@@ -5,21 +6,6 @@
* Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
* Avinash Kumar <avinash.kumar@stericsson.com>
* Thirupathi Chippakurthy <thirupathi.chippakurthy@stericsson.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 5f5f19813fde..a3cb25cb74f8 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
diff --git a/drivers/usb/phy/phy-am335x-control.h b/drivers/usb/phy/phy-am335x-control.h
index e86b3165d69d..cd4acfc6e360 100644
--- a/drivers/usb/phy/phy-am335x-control.h
+++ b/drivers/usb/phy/phy-am335x-control.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AM335x_PHY_CONTROL_H_
#define _AM335x_PHY_CONTROL_H_
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 7e5aece769da..b36fa8b953d0 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index cf8f40ae6e01..900875f326d7 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2007,2008 Freescale semiconductor, Inc.
*
@@ -5,20 +6,6 @@
* Jerry Huang <Chang-Ming.Huang@freescale.com>
*
* Initialization based on code from Shlomi Gridish.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index 23149954a09c..43d410f6641b 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -1,19 +1,5 @@
-/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc. */
#include <linux/usb/otg-fsm.h>
#include <linux/usb/otg.h>
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 89d6e7a5fdb7..74ba88297991 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* NOP USB transceiver for all USB transceiver which are either built-in
* into USB IP or which are mostly autonomous.
@@ -5,20 +6,6 @@
* Copyright (C) 2009 Texas Instruments Inc
* Author: Ajay Kumar Gupta <ajay.gupta@ti.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* Current status:
* This provides a "nop" transceiver for PHYs which are
* autonomous such as isp1504, isp1707, etc.
@@ -224,7 +211,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
int err = 0;
u32 clk_rate = 0;
- bool needs_vcc = false;
+ bool needs_vcc = false, needs_clk = false;
if (dev->of_node) {
struct device_node *node = dev->of_node;
@@ -233,6 +220,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
clk_rate = 0;
needs_vcc = of_property_read_bool(node, "vcc-supply");
+ needs_clk = of_property_read_bool(node, "clocks");
nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_ASIS);
err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
@@ -275,6 +263,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
if (IS_ERR(nop->clk)) {
dev_dbg(dev, "Can't get phy clock: %ld\n",
PTR_ERR(nop->clk));
+ if (needs_clk)
+ return PTR_ERR(nop->clk);
}
if (!IS_ERR(nop->clk) && clk_rate) {
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
index 0d0eadd54ed9..97289627561d 100644
--- a/drivers/usb/phy/phy-generic.h
+++ b/drivers/usb/phy/phy-generic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PHY_GENERIC_H_
#define _PHY_GENERIC_H_
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index f66120db8a41..553e2573c74f 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gpio-vbus.c - simple GPIO VBUS sensing driver for B peripheral devices
*
* Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index c6052c814bcc..7041ba030052 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* isp1301_omap - ISP 1301 USB transceiver, talking to OMAP OTG controller
*
* Copyright (C) 2004 Texas Instruments
* Copyright (C) 2004 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
@@ -1183,9 +1170,11 @@ static irqreturn_t isp1301_irq(int irq, void *isp)
return IRQ_HANDLED;
}
-static void isp1301_timer(unsigned long _isp)
+static void isp1301_timer(struct timer_list *t)
{
- isp1301_defer_work((void *)_isp, WORK_TIMER);
+ struct isp1301 *isp = from_timer(isp, t, timer);
+
+ isp1301_defer_work(isp, WORK_TIMER);
}
/*-------------------------------------------------------------------------*/
@@ -1222,7 +1211,6 @@ static int isp1301_remove(struct i2c_client *i2c)
if (machine_is_omap_h2())
gpio_free(2);
- isp->timer.data = 0;
set_bit(WORK_STOP, &isp->todo);
del_timer_sync(&isp->timer);
flush_work(&isp->work);
@@ -1507,9 +1495,7 @@ isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
}
INIT_WORK(&isp->work, isp1301_work);
- init_timer(&isp->timer);
- isp->timer.function = isp1301_timer;
- isp->timer.data = (unsigned long) isp;
+ timer_setup(&isp->timer, isp1301_timer, 0);
i2c_set_clientdata(i2c, isp);
isp->client = i2c;
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index f333024660b4..93b7d6a30aad 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* NXP ISP1301 USB transceiver driver
*
* Copyright (C) 2012 Roland Stigge
*
* Author: Roland Stigge <stigge@antcom.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index 01d4e4cdbc79..19871266312d 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* phy-keystone - USB PHY, talking to dwc3 controller in Keystone.
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*
* Author: WingMan Kwok <w-kwok2@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
deleted file mode 100644
index 3d0dd2f97415..000000000000
--- a/drivers/usb/phy/phy-msm-usb.c
+++ /dev/null
@@ -1,2085 +0,0 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/extcon.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/reboot.h>
-#include <linux/reset.h>
-#include <linux/types.h>
-#include <linux/usb/otg.h>
-
-#include <linux/usb.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/of.h>
-#include <linux/usb/ulpi.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/hcd.h>
-#include <linux/usb/msm_hsusb_hw.h>
-#include <linux/regulator/consumer.h>
-
-/**
- * OTG control
- *
- * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host
- * only configuration.
- * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY.
- * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware.
- * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs.
- *
- */
-enum otg_control_type {
- OTG_NO_CONTROL = 0,
- OTG_PHY_CONTROL,
- OTG_PMIC_CONTROL,
- OTG_USER_CONTROL,
-};
-
-/**
- * PHY used in
- *
- * INVALID_PHY Unsupported PHY
- * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY
- * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY
- *
- */
-enum msm_usb_phy_type {
- INVALID_PHY = 0,
- CI_45NM_INTEGRATED_PHY,
- SNPS_28NM_INTEGRATED_PHY,
-};
-
-#define IDEV_CHG_MAX 1500
-#define IUNIT 100
-
-/**
- * Different states involved in USB charger detection.
- *
- * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
- * process is not yet started.
- * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
- * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
- * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
- * between SDP and DCP/CDP).
- * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
- * between DCP and CDP).
- * USB_CHG_STATE_DETECTED USB charger type is determined.
- *
- */
-enum usb_chg_state {
- USB_CHG_STATE_UNDEFINED = 0,
- USB_CHG_STATE_WAIT_FOR_DCD,
- USB_CHG_STATE_DCD_DONE,
- USB_CHG_STATE_PRIMARY_DONE,
- USB_CHG_STATE_SECONDARY_DONE,
- USB_CHG_STATE_DETECTED,
-};
-
-/**
- * USB charger types
- *
- * USB_INVALID_CHARGER Invalid USB charger.
- * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
- * on USB2.0 compliant host/hub.
- * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
- * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
- * IDEV_CHG_MAX can be drawn irrespective of USB state.
- *
- */
-enum usb_chg_type {
- USB_INVALID_CHARGER = 0,
- USB_SDP_CHARGER,
- USB_DCP_CHARGER,
- USB_CDP_CHARGER,
-};
-
-/**
- * struct msm_otg_platform_data - platform device data
- * for msm_otg driver.
- * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
- * "do not overwrite default vaule at this address".
- * @phy_init_sz: PHY configuration sequence size.
- * @vbus_power: VBUS power on/off routine.
- * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
- * @mode: Supported mode (OTG/peripheral/host).
- * @otg_control: OTG switch controlled by user/Id pin
- */
-struct msm_otg_platform_data {
- int *phy_init_seq;
- int phy_init_sz;
- void (*vbus_power)(bool on);
- unsigned power_budget;
- enum usb_dr_mode mode;
- enum otg_control_type otg_control;
- enum msm_usb_phy_type phy_type;
- void (*setup_gpio)(enum usb_otg_state state);
-};
-
-/**
- * struct msm_otg: OTG driver data. Shared by HCD and DCD.
- * @otg: USB OTG Transceiver structure.
- * @pdata: otg device platform data.
- * @irq: IRQ number assigned for HSUSB controller.
- * @clk: clock struct of usb_hs_clk.
- * @pclk: clock struct of usb_hs_pclk.
- * @core_clk: clock struct of usb_hs_core_clk.
- * @regs: ioremapped register base address.
- * @inputs: OTG state machine inputs(Id, SessValid etc).
- * @sm_work: OTG state machine work.
- * @in_lpm: indicates low power mode (LPM) state.
- * @async_int: Async interrupt arrived.
- * @cur_power: The amount of mA available from downstream port.
- * @chg_work: Charger detection work.
- * @chg_state: The state of charger detection process.
- * @chg_type: The type of charger attached.
- * @dcd_retires: The retry count used to track Data contact
- * detection process.
- * @manual_pullup: true if VBUS is not routed to USB controller/phy
- * and controller driver therefore enables pull-up explicitly before
- * starting controller using usbcmd run/stop bit.
- * @vbus: VBUS signal state trakining, using extcon framework
- * @id: ID signal state trakining, using extcon framework
- * @switch_gpio: Descriptor for GPIO used to control external Dual
- * SPDT USB Switch.
- * @reboot: Used to inform the driver to route USB D+/D- line to Device
- * connector
- */
-struct msm_otg {
- struct usb_phy phy;
- struct msm_otg_platform_data *pdata;
- int irq;
- struct clk *clk;
- struct clk *pclk;
- struct clk *core_clk;
- void __iomem *regs;
-#define ID 0
-#define B_SESS_VLD 1
- unsigned long inputs;
- struct work_struct sm_work;
- atomic_t in_lpm;
- int async_int;
- unsigned cur_power;
- int phy_number;
- struct delayed_work chg_work;
- enum usb_chg_state chg_state;
- enum usb_chg_type chg_type;
- u8 dcd_retries;
- struct regulator *v3p3;
- struct regulator *v1p8;
- struct regulator *vddcx;
- struct regulator_bulk_data supplies[3];
-
- struct reset_control *phy_rst;
- struct reset_control *link_rst;
- int vdd_levels[3];
-
- bool manual_pullup;
-
- struct gpio_desc *switch_gpio;
- struct notifier_block reboot;
-};
-
-#define MSM_USB_BASE (motg->regs)
-#define DRIVER_NAME "msm_otg"
-
-#define ULPI_IO_TIMEOUT_USEC (10 * 1000)
-#define LINK_RESET_TIMEOUT_USEC (250 * 1000)
-
-#define USB_PHY_3P3_VOL_MIN 3050000 /* uV */
-#define USB_PHY_3P3_VOL_MAX 3300000 /* uV */
-#define USB_PHY_3P3_HPM_LOAD 50000 /* uA */
-#define USB_PHY_3P3_LPM_LOAD 4000 /* uA */
-
-#define USB_PHY_1P8_VOL_MIN 1800000 /* uV */
-#define USB_PHY_1P8_VOL_MAX 1800000 /* uV */
-#define USB_PHY_1P8_HPM_LOAD 50000 /* uA */
-#define USB_PHY_1P8_LPM_LOAD 4000 /* uA */
-
-#define USB_PHY_VDD_DIG_VOL_MIN 1000000 /* uV */
-#define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
-#define USB_PHY_SUSP_DIG_VOL 500000 /* uV */
-
-enum vdd_levels {
- VDD_LEVEL_NONE = 0,
- VDD_LEVEL_MIN,
- VDD_LEVEL_MAX,
-};
-
-static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init)
-{
- int ret = 0;
-
- if (init) {
- ret = regulator_set_voltage(motg->vddcx,
- motg->vdd_levels[VDD_LEVEL_MIN],
- motg->vdd_levels[VDD_LEVEL_MAX]);
- if (ret) {
- dev_err(motg->phy.dev, "Cannot set vddcx voltage\n");
- return ret;
- }
-
- ret = regulator_enable(motg->vddcx);
- if (ret)
- dev_err(motg->phy.dev, "unable to enable hsusb vddcx\n");
- } else {
- ret = regulator_set_voltage(motg->vddcx, 0,
- motg->vdd_levels[VDD_LEVEL_MAX]);
- if (ret)
- dev_err(motg->phy.dev, "Cannot set vddcx voltage\n");
- ret = regulator_disable(motg->vddcx);
- if (ret)
- dev_err(motg->phy.dev, "unable to disable hsusb vddcx\n");
- }
-
- return ret;
-}
-
-static int msm_hsusb_ldo_init(struct msm_otg *motg, int init)
-{
- int rc = 0;
-
- if (init) {
- rc = regulator_set_voltage(motg->v3p3, USB_PHY_3P3_VOL_MIN,
- USB_PHY_3P3_VOL_MAX);
- if (rc) {
- dev_err(motg->phy.dev, "Cannot set v3p3 voltage\n");
- goto exit;
- }
- rc = regulator_enable(motg->v3p3);
- if (rc) {
- dev_err(motg->phy.dev, "unable to enable the hsusb 3p3\n");
- goto exit;
- }
- rc = regulator_set_voltage(motg->v1p8, USB_PHY_1P8_VOL_MIN,
- USB_PHY_1P8_VOL_MAX);
- if (rc) {
- dev_err(motg->phy.dev, "Cannot set v1p8 voltage\n");
- goto disable_3p3;
- }
- rc = regulator_enable(motg->v1p8);
- if (rc) {
- dev_err(motg->phy.dev, "unable to enable the hsusb 1p8\n");
- goto disable_3p3;
- }
-
- return 0;
- }
-
- regulator_disable(motg->v1p8);
-disable_3p3:
- regulator_disable(motg->v3p3);
-exit:
- return rc;
-}
-
-static int msm_hsusb_ldo_set_mode(struct msm_otg *motg, int on)
-{
- int ret = 0;
-
- if (on) {
- ret = regulator_set_load(motg->v1p8, USB_PHY_1P8_HPM_LOAD);
- if (ret < 0) {
- pr_err("Could not set HPM for v1p8\n");
- return ret;
- }
- ret = regulator_set_load(motg->v3p3, USB_PHY_3P3_HPM_LOAD);
- if (ret < 0) {
- pr_err("Could not set HPM for v3p3\n");
- regulator_set_load(motg->v1p8, USB_PHY_1P8_LPM_LOAD);
- return ret;
- }
- } else {
- ret = regulator_set_load(motg->v1p8, USB_PHY_1P8_LPM_LOAD);
- if (ret < 0)
- pr_err("Could not set LPM for v1p8\n");
- ret = regulator_set_load(motg->v3p3, USB_PHY_3P3_LPM_LOAD);
- if (ret < 0)
- pr_err("Could not set LPM for v3p3\n");
- }
-
- pr_debug("reg (%s)\n", on ? "HPM" : "LPM");
- return ret < 0 ? ret : 0;
-}
-
-static int ulpi_read(struct usb_phy *phy, u32 reg)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int cnt = 0;
-
- /* initiate read operation */
- writel(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
- USB_ULPI_VIEWPORT);
-
- /* wait for completion */
- while (cnt < ULPI_IO_TIMEOUT_USEC) {
- if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
- break;
- udelay(1);
- cnt++;
- }
-
- if (cnt >= ULPI_IO_TIMEOUT_USEC) {
- dev_err(phy->dev, "ulpi_read: timeout %08x\n",
- readl(USB_ULPI_VIEWPORT));
- return -ETIMEDOUT;
- }
- return ULPI_DATA_READ(readl(USB_ULPI_VIEWPORT));
-}
-
-static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int cnt = 0;
-
- /* initiate write operation */
- writel(ULPI_RUN | ULPI_WRITE |
- ULPI_ADDR(reg) | ULPI_DATA(val),
- USB_ULPI_VIEWPORT);
-
- /* wait for completion */
- while (cnt < ULPI_IO_TIMEOUT_USEC) {
- if (!(readl(USB_ULPI_VIEWPORT) & ULPI_RUN))
- break;
- udelay(1);
- cnt++;
- }
-
- if (cnt >= ULPI_IO_TIMEOUT_USEC) {
- dev_err(phy->dev, "ulpi_write: timeout\n");
- return -ETIMEDOUT;
- }
- return 0;
-}
-
-static struct usb_phy_io_ops msm_otg_io_ops = {
- .read = ulpi_read,
- .write = ulpi_write,
-};
-
-static void ulpi_init(struct msm_otg *motg)
-{
- struct msm_otg_platform_data *pdata = motg->pdata;
- int *seq = pdata->phy_init_seq, idx;
- u32 addr = ULPI_EXT_VENDOR_SPECIFIC;
-
- for (idx = 0; idx < pdata->phy_init_sz; idx++) {
- if (seq[idx] == -1)
- continue;
-
- dev_vdbg(motg->phy.dev, "ulpi: write 0x%02x to 0x%02x\n",
- seq[idx], addr + idx);
- ulpi_write(&motg->phy, seq[idx], addr + idx);
- }
-}
-
-static int msm_phy_notify_disconnect(struct usb_phy *phy,
- enum usb_device_speed speed)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int val;
-
- if (motg->manual_pullup) {
- val = ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL;
- usb_phy_io_write(phy, val, ULPI_CLR(ULPI_MISC_A));
- }
-
- /*
- * Put the transceiver in non-driving mode. Otherwise host
- * may not detect soft-disconnection.
- */
- val = ulpi_read(phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
- ulpi_write(phy, val, ULPI_FUNC_CTRL);
-
- return 0;
-}
-
-static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert)
-{
- int ret;
-
- if (assert)
- ret = reset_control_assert(motg->link_rst);
- else
- ret = reset_control_deassert(motg->link_rst);
-
- if (ret)
- dev_err(motg->phy.dev, "usb link clk reset %s failed\n",
- assert ? "assert" : "deassert");
-
- return ret;
-}
-
-static int msm_otg_phy_clk_reset(struct msm_otg *motg)
-{
- int ret = 0;
-
- if (motg->phy_rst)
- ret = reset_control_reset(motg->phy_rst);
-
- if (ret)
- dev_err(motg->phy.dev, "usb phy clk reset failed\n");
-
- return ret;
-}
-
-static int msm_link_reset(struct msm_otg *motg)
-{
- u32 val;
- int ret;
-
- ret = msm_otg_link_clk_reset(motg, 1);
- if (ret)
- return ret;
-
- /* wait for 1ms delay as suggested in HPG. */
- usleep_range(1000, 1200);
-
- ret = msm_otg_link_clk_reset(motg, 0);
- if (ret)
- return ret;
-
- if (motg->phy_number)
- writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
-
- /* put transceiver in serial mode as part of reset */
- val = readl(USB_PORTSC) & ~PORTSC_PTS_MASK;
- writel(val | PORTSC_PTS_SERIAL, USB_PORTSC);
-
- return 0;
-}
-
-static int msm_otg_reset(struct usb_phy *phy)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int cnt = 0;
-
- writel(USBCMD_RESET, USB_USBCMD);
- while (cnt < LINK_RESET_TIMEOUT_USEC) {
- if (!(readl(USB_USBCMD) & USBCMD_RESET))
- break;
- udelay(1);
- cnt++;
- }
- if (cnt >= LINK_RESET_TIMEOUT_USEC)
- return -ETIMEDOUT;
-
- /* select ULPI phy and clear other status/control bits in PORTSC */
- writel(PORTSC_PTS_ULPI, USB_PORTSC);
-
- writel(0x0, USB_AHBBURST);
- writel(0x08, USB_AHBMODE);
-
- if (motg->phy_number)
- writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
- return 0;
-}
-
-static void msm_phy_reset(struct msm_otg *motg)
-{
- void __iomem *addr;
-
- if (motg->pdata->phy_type != SNPS_28NM_INTEGRATED_PHY) {
- msm_otg_phy_clk_reset(motg);
- return;
- }
-
- addr = USB_PHY_CTRL;
- if (motg->phy_number)
- addr = USB_PHY_CTRL2;
-
- /* Assert USB PHY_POR */
- writel(readl(addr) | PHY_POR_ASSERT, addr);
-
- /*
- * wait for minimum 10 microseconds as suggested in HPG.
- * Use a slightly larger value since the exact value didn't
- * work 100% of the time.
- */
- udelay(12);
-
- /* Deassert USB PHY_POR */
- writel(readl(addr) & ~PHY_POR_ASSERT, addr);
-}
-
-static int msm_usb_reset(struct usb_phy *phy)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- int ret;
-
- if (!IS_ERR(motg->core_clk))
- clk_prepare_enable(motg->core_clk);
-
- ret = msm_link_reset(motg);
- if (ret) {
- dev_err(phy->dev, "phy_reset failed\n");
- return ret;
- }
-
- ret = msm_otg_reset(&motg->phy);
- if (ret) {
- dev_err(phy->dev, "link reset failed\n");
- return ret;
- }
-
- msleep(100);
-
- /* Reset USB PHY after performing USB Link RESET */
- msm_phy_reset(motg);
-
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
-
- return 0;
-}
-
-static int msm_phy_init(struct usb_phy *phy)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- struct msm_otg_platform_data *pdata = motg->pdata;
- u32 val, ulpi_val = 0;
-
- /* Program USB PHY Override registers. */
- ulpi_init(motg);
-
- /*
- * It is recommended in HPG to reset USB PHY after programming
- * USB PHY Override registers.
- */
- msm_phy_reset(motg);
-
- if (pdata->otg_control == OTG_PHY_CONTROL) {
- val = readl(USB_OTGSC);
- if (pdata->mode == USB_DR_MODE_OTG) {
- ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID;
- val |= OTGSC_IDIE | OTGSC_BSVIE;
- } else if (pdata->mode == USB_DR_MODE_PERIPHERAL) {
- ulpi_val = ULPI_INT_SESS_VALID;
- val |= OTGSC_BSVIE;
- }
- writel(val, USB_OTGSC);
- ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_RISE);
- ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_FALL);
- }
-
- if (motg->manual_pullup) {
- val = ULPI_MISC_A_VBUSVLDEXTSEL | ULPI_MISC_A_VBUSVLDEXT;
- ulpi_write(phy, val, ULPI_SET(ULPI_MISC_A));
-
- val = readl(USB_GENCONFIG_2);
- val |= GENCONFIG_2_SESS_VLD_CTRL_EN;
- writel(val, USB_GENCONFIG_2);
-
- val = readl(USB_USBCMD);
- val |= USBCMD_SESS_VLD_CTRL;
- writel(val, USB_USBCMD);
-
- val = ulpi_read(phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
- ulpi_write(phy, val, ULPI_FUNC_CTRL);
- }
-
- if (motg->phy_number)
- writel(readl(USB_PHY_CTRL2) | BIT(16), USB_PHY_CTRL2);
-
- return 0;
-}
-
-#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
-#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
-
-#ifdef CONFIG_PM
-
-static int msm_hsusb_config_vddcx(struct msm_otg *motg, int high)
-{
- int max_vol = motg->vdd_levels[VDD_LEVEL_MAX];
- int min_vol;
- int ret;
-
- if (high)
- min_vol = motg->vdd_levels[VDD_LEVEL_MIN];
- else
- min_vol = motg->vdd_levels[VDD_LEVEL_NONE];
-
- ret = regulator_set_voltage(motg->vddcx, min_vol, max_vol);
- if (ret) {
- pr_err("Cannot set vddcx voltage\n");
- return ret;
- }
-
- pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
-
- return ret;
-}
-
-static int msm_otg_suspend(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- struct usb_bus *bus = phy->otg->host;
- struct msm_otg_platform_data *pdata = motg->pdata;
- void __iomem *addr;
- int cnt = 0;
-
- if (atomic_read(&motg->in_lpm))
- return 0;
-
- disable_irq(motg->irq);
- /*
- * Chipidea 45-nm PHY suspend sequence:
- *
- * Interrupt Latch Register auto-clear feature is not present
- * in all PHY versions. Latch register is clear on read type.
- * Clear latch register to avoid spurious wakeup from
- * low power mode (LPM).
- *
- * PHY comparators are disabled when PHY enters into low power
- * mode (LPM). Keep PHY comparators ON in LPM only when we expect
- * VBUS/Id notifications from USB PHY. Otherwise turn off USB
- * PHY comparators. This save significant amount of power.
- *
- * PLL is not turned off when PHY enters into low power mode (LPM).
- * Disable PLL for maximum power savings.
- */
-
- if (motg->pdata->phy_type == CI_45NM_INTEGRATED_PHY) {
- ulpi_read(phy, 0x14);
- if (pdata->otg_control == OTG_PHY_CONTROL)
- ulpi_write(phy, 0x01, 0x30);
- ulpi_write(phy, 0x08, 0x09);
- }
-
- /*
- * PHY may take some time or even fail to enter into low power
- * mode (LPM). Hence poll for 500 msec and reset the PHY and link
- * in failure case.
- */
- writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
- while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
- if (readl(USB_PORTSC) & PORTSC_PHCD)
- break;
- udelay(1);
- cnt++;
- }
-
- if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) {
- dev_err(phy->dev, "Unable to suspend PHY\n");
- msm_otg_reset(phy);
- enable_irq(motg->irq);
- return -ETIMEDOUT;
- }
-
- /*
- * PHY has capability to generate interrupt asynchronously in low
- * power mode (LPM). This interrupt is level triggered. So USB IRQ
- * line must be disabled till async interrupt enable bit is cleared
- * in USBCMD register. Assert STP (ULPI interface STOP signal) to
- * block data communication from PHY.
- */
- writel(readl(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD);
-
- addr = USB_PHY_CTRL;
- if (motg->phy_number)
- addr = USB_PHY_CTRL2;
-
- if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
- motg->pdata->otg_control == OTG_PMIC_CONTROL)
- writel(readl(addr) | PHY_RETEN, addr);
-
- clk_disable_unprepare(motg->pclk);
- clk_disable_unprepare(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
-
- if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
- motg->pdata->otg_control == OTG_PMIC_CONTROL) {
- msm_hsusb_ldo_set_mode(motg, 0);
- msm_hsusb_config_vddcx(motg, 0);
- }
-
- if (device_may_wakeup(phy->dev))
- enable_irq_wake(motg->irq);
- if (bus)
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
-
- atomic_set(&motg->in_lpm, 1);
- enable_irq(motg->irq);
-
- dev_info(phy->dev, "USB in low power mode\n");
-
- return 0;
-}
-
-static int msm_otg_resume(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- struct usb_bus *bus = phy->otg->host;
- void __iomem *addr;
- int cnt = 0;
- unsigned temp;
-
- if (!atomic_read(&motg->in_lpm))
- return 0;
-
- clk_prepare_enable(motg->pclk);
- clk_prepare_enable(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_prepare_enable(motg->core_clk);
-
- if (motg->pdata->phy_type == SNPS_28NM_INTEGRATED_PHY &&
- motg->pdata->otg_control == OTG_PMIC_CONTROL) {
-
- addr = USB_PHY_CTRL;
- if (motg->phy_number)
- addr = USB_PHY_CTRL2;
-
- msm_hsusb_ldo_set_mode(motg, 1);
- msm_hsusb_config_vddcx(motg, 1);
- writel(readl(addr) & ~PHY_RETEN, addr);
- }
-
- temp = readl(USB_USBCMD);
- temp &= ~ASYNC_INTR_CTRL;
- temp &= ~ULPI_STP_CTRL;
- writel(temp, USB_USBCMD);
-
- /*
- * PHY comes out of low power mode (LPM) in case of wakeup
- * from asynchronous interrupt.
- */
- if (!(readl(USB_PORTSC) & PORTSC_PHCD))
- goto skip_phy_resume;
-
- writel(readl(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC);
- while (cnt < PHY_RESUME_TIMEOUT_USEC) {
- if (!(readl(USB_PORTSC) & PORTSC_PHCD))
- break;
- udelay(1);
- cnt++;
- }
-
- if (cnt >= PHY_RESUME_TIMEOUT_USEC) {
- /*
- * This is a fatal error. Reset the link and
- * PHY. USB state can not be restored. Re-insertion
- * of USB cable is the only way to get USB working.
- */
- dev_err(phy->dev, "Unable to resume USB. Re-plugin the cable\n");
- msm_otg_reset(phy);
- }
-
-skip_phy_resume:
- if (device_may_wakeup(phy->dev))
- disable_irq_wake(motg->irq);
- if (bus)
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags);
-
- atomic_set(&motg->in_lpm, 0);
-
- if (motg->async_int) {
- motg->async_int = 0;
- pm_runtime_put(phy->dev);
- enable_irq(motg->irq);
- }
-
- dev_info(phy->dev, "USB exited from low power mode\n");
-
- return 0;
-}
-#endif
-
-static void msm_otg_notify_charger(struct msm_otg *motg, unsigned mA)
-{
- if (motg->cur_power == mA)
- return;
-
- /* TODO: Notify PMIC about available current */
- dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA);
- motg->cur_power = mA;
-}
-
-static void msm_otg_start_host(struct usb_phy *phy, int on)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- struct msm_otg_platform_data *pdata = motg->pdata;
- struct usb_hcd *hcd;
-
- if (!phy->otg->host)
- return;
-
- hcd = bus_to_hcd(phy->otg->host);
-
- if (on) {
- dev_dbg(phy->dev, "host on\n");
-
- if (pdata->vbus_power)
- pdata->vbus_power(1);
- /*
- * Some boards have a switch cotrolled by gpio
- * to enable/disable internal HUB. Enable internal
- * HUB before kicking the host.
- */
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_A_HOST);
-#ifdef CONFIG_USB
- usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
- device_wakeup_enable(hcd->self.controller);
-#endif
- } else {
- dev_dbg(phy->dev, "host off\n");
-
-#ifdef CONFIG_USB
- usb_remove_hcd(hcd);
-#endif
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_UNDEFINED);
- if (pdata->vbus_power)
- pdata->vbus_power(0);
- }
-}
-
-static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
-{
- struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
- struct usb_hcd *hcd;
-
- /*
- * Fail host registration if this board can support
- * only peripheral configuration.
- */
- if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL) {
- dev_info(otg->usb_phy->dev, "Host mode is not supported\n");
- return -ENODEV;
- }
-
- if (!host) {
- if (otg->state == OTG_STATE_A_HOST) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- msm_otg_start_host(otg->usb_phy, 0);
- otg->host = NULL;
- otg->state = OTG_STATE_UNDEFINED;
- schedule_work(&motg->sm_work);
- } else {
- otg->host = NULL;
- }
-
- return 0;
- }
-
- hcd = bus_to_hcd(host);
- hcd->power_budget = motg->pdata->power_budget;
-
- otg->host = host;
- dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
-
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
-
- return 0;
-}
-
-static void msm_otg_start_peripheral(struct usb_phy *phy, int on)
-{
- struct msm_otg *motg = container_of(phy, struct msm_otg, phy);
- struct msm_otg_platform_data *pdata = motg->pdata;
-
- if (!phy->otg->gadget)
- return;
-
- if (on) {
- dev_dbg(phy->dev, "gadget on\n");
- /*
- * Some boards have a switch cotrolled by gpio
- * to enable/disable internal HUB. Disable internal
- * HUB before kicking the gadget.
- */
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_B_PERIPHERAL);
- usb_gadget_vbus_connect(phy->otg->gadget);
- } else {
- dev_dbg(phy->dev, "gadget off\n");
- usb_gadget_vbus_disconnect(phy->otg->gadget);
- if (pdata->setup_gpio)
- pdata->setup_gpio(OTG_STATE_UNDEFINED);
- }
-
-}
-
-static int msm_otg_set_peripheral(struct usb_otg *otg,
- struct usb_gadget *gadget)
-{
- struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy);
-
- /*
- * Fail peripheral registration if this board can support
- * only host configuration.
- */
- if (motg->pdata->mode == USB_DR_MODE_HOST) {
- dev_info(otg->usb_phy->dev, "Peripheral mode is not supported\n");
- return -ENODEV;
- }
-
- if (!gadget) {
- if (otg->state == OTG_STATE_B_PERIPHERAL) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- msm_otg_start_peripheral(otg->usb_phy, 0);
- otg->gadget = NULL;
- otg->state = OTG_STATE_UNDEFINED;
- schedule_work(&motg->sm_work);
- } else {
- otg->gadget = NULL;
- }
-
- return 0;
- }
- otg->gadget = gadget;
- dev_dbg(otg->usb_phy->dev,
- "peripheral driver registered w/ tranceiver\n");
-
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
-
- return 0;
-}
-
-static bool msm_chg_check_secondary_det(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
- bool ret = false;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- ret = chg_det & (1 << 4);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x87);
- ret = chg_det & 1;
- break;
- default:
- break;
- }
- return ret;
-}
-
-static void msm_chg_enable_secondary_det(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* Turn off charger block */
- chg_det |= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- udelay(20);
- /* control chg block via ULPI */
- chg_det &= ~(1 << 3);
- ulpi_write(phy, chg_det, 0x34);
- /* put it in host mode for enabling D- source */
- chg_det &= ~(1 << 2);
- ulpi_write(phy, chg_det, 0x34);
- /* Turn on chg detect block */
- chg_det &= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- udelay(20);
- /* enable chg detection */
- chg_det &= ~(1 << 0);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /*
- * Configure DM as current source, DP as current sink
- * and enable battery charging comparators.
- */
- ulpi_write(phy, 0x8, 0x85);
- ulpi_write(phy, 0x2, 0x85);
- ulpi_write(phy, 0x1, 0x85);
- break;
- default:
- break;
- }
-}
-
-static bool msm_chg_check_primary_det(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
- bool ret = false;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- ret = chg_det & (1 << 4);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x87);
- ret = chg_det & 1;
- break;
- default:
- break;
- }
- return ret;
-}
-
-static void msm_chg_enable_primary_det(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* enable chg detection */
- chg_det &= ~(1 << 0);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /*
- * Configure DP as current source, DM as current sink
- * and enable battery charging comparators.
- */
- ulpi_write(phy, 0x2, 0x85);
- ulpi_write(phy, 0x1, 0x85);
- break;
- default:
- break;
- }
-}
-
-static bool msm_chg_check_dcd(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 line_state;
- bool ret = false;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- line_state = ulpi_read(phy, 0x15);
- ret = !(line_state & 1);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- line_state = ulpi_read(phy, 0x87);
- ret = line_state & 2;
- break;
- default:
- break;
- }
- return ret;
-}
-
-static void msm_chg_disable_dcd(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- chg_det &= ~(1 << 5);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- ulpi_write(phy, 0x10, 0x86);
- break;
- default:
- break;
- }
-}
-
-static void msm_chg_enable_dcd(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* Turn on D+ current source */
- chg_det |= (1 << 5);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /* Data contact detection enable */
- ulpi_write(phy, 0x10, 0x85);
- break;
- default:
- break;
- }
-}
-
-static void msm_chg_block_on(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 func_ctrl, chg_det;
-
- /* put the controller in non-driving mode */
- func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
- func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
- ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* control chg block via ULPI */
- chg_det &= ~(1 << 3);
- ulpi_write(phy, chg_det, 0x34);
- /* Turn on chg detect block */
- chg_det &= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- udelay(20);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /* Clear charger detecting control bits */
- ulpi_write(phy, 0x3F, 0x86);
- /* Clear alt interrupt latch and enable bits */
- ulpi_write(phy, 0x1F, 0x92);
- ulpi_write(phy, 0x1F, 0x95);
- udelay(100);
- break;
- default:
- break;
- }
-}
-
-static void msm_chg_block_off(struct msm_otg *motg)
-{
- struct usb_phy *phy = &motg->phy;
- u32 func_ctrl, chg_det;
-
- switch (motg->pdata->phy_type) {
- case CI_45NM_INTEGRATED_PHY:
- chg_det = ulpi_read(phy, 0x34);
- /* Turn off charger block */
- chg_det |= ~(1 << 1);
- ulpi_write(phy, chg_det, 0x34);
- break;
- case SNPS_28NM_INTEGRATED_PHY:
- /* Clear charger detecting control bits */
- ulpi_write(phy, 0x3F, 0x86);
- /* Clear alt interrupt latch and enable bits */
- ulpi_write(phy, 0x1F, 0x92);
- ulpi_write(phy, 0x1F, 0x95);
- break;
- default:
- break;
- }
-
- /* put the controller in normal mode */
- func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL);
- func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
- ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL);
-}
-
-#define MSM_CHG_DCD_POLL_TIME (100 * HZ/1000) /* 100 msec */
-#define MSM_CHG_DCD_MAX_RETRIES 6 /* Tdcd_tmout = 6 * 100 msec */
-#define MSM_CHG_PRIMARY_DET_TIME (40 * HZ/1000) /* TVDPSRC_ON */
-#define MSM_CHG_SECONDARY_DET_TIME (40 * HZ/1000) /* TVDMSRC_ON */
-static void msm_chg_detect_work(struct work_struct *w)
-{
- struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work);
- struct usb_phy *phy = &motg->phy;
- bool is_dcd, tmout, vout;
- unsigned long delay;
-
- dev_dbg(phy->dev, "chg detection work\n");
- switch (motg->chg_state) {
- case USB_CHG_STATE_UNDEFINED:
- pm_runtime_get_sync(phy->dev);
- msm_chg_block_on(motg);
- msm_chg_enable_dcd(motg);
- motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
- motg->dcd_retries = 0;
- delay = MSM_CHG_DCD_POLL_TIME;
- break;
- case USB_CHG_STATE_WAIT_FOR_DCD:
- is_dcd = msm_chg_check_dcd(motg);
- tmout = ++motg->dcd_retries == MSM_CHG_DCD_MAX_RETRIES;
- if (is_dcd || tmout) {
- msm_chg_disable_dcd(motg);
- msm_chg_enable_primary_det(motg);
- delay = MSM_CHG_PRIMARY_DET_TIME;
- motg->chg_state = USB_CHG_STATE_DCD_DONE;
- } else {
- delay = MSM_CHG_DCD_POLL_TIME;
- }
- break;
- case USB_CHG_STATE_DCD_DONE:
- vout = msm_chg_check_primary_det(motg);
- if (vout) {
- msm_chg_enable_secondary_det(motg);
- delay = MSM_CHG_SECONDARY_DET_TIME;
- motg->chg_state = USB_CHG_STATE_PRIMARY_DONE;
- } else {
- motg->chg_type = USB_SDP_CHARGER;
- motg->chg_state = USB_CHG_STATE_DETECTED;
- delay = 0;
- }
- break;
- case USB_CHG_STATE_PRIMARY_DONE:
- vout = msm_chg_check_secondary_det(motg);
- if (vout)
- motg->chg_type = USB_DCP_CHARGER;
- else
- motg->chg_type = USB_CDP_CHARGER;
- motg->chg_state = USB_CHG_STATE_SECONDARY_DONE;
- /* fall through */
- case USB_CHG_STATE_SECONDARY_DONE:
- motg->chg_state = USB_CHG_STATE_DETECTED;
- case USB_CHG_STATE_DETECTED:
- msm_chg_block_off(motg);
- dev_dbg(phy->dev, "charger = %d\n", motg->chg_type);
- schedule_work(&motg->sm_work);
- return;
- default:
- return;
- }
-
- schedule_delayed_work(&motg->chg_work, delay);
-}
-
-/*
- * We support OTG, Peripheral only and Host only configurations. In case
- * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen
- * via Id pin status or user request (debugfs). Id/BSV interrupts are not
- * enabled when switch is controlled by user and default mode is supplied
- * by board file, which can be changed by userspace later.
- */
-static void msm_otg_init_sm(struct msm_otg *motg)
-{
- struct msm_otg_platform_data *pdata = motg->pdata;
- u32 otgsc = readl(USB_OTGSC);
-
- switch (pdata->mode) {
- case USB_DR_MODE_OTG:
- if (pdata->otg_control == OTG_PHY_CONTROL) {
- if (otgsc & OTGSC_ID)
- set_bit(ID, &motg->inputs);
- else
- clear_bit(ID, &motg->inputs);
-
- if (otgsc & OTGSC_BSV)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
- } else if (pdata->otg_control == OTG_USER_CONTROL) {
- set_bit(ID, &motg->inputs);
- clear_bit(B_SESS_VLD, &motg->inputs);
- }
- break;
- case USB_DR_MODE_HOST:
- clear_bit(ID, &motg->inputs);
- break;
- case USB_DR_MODE_PERIPHERAL:
- set_bit(ID, &motg->inputs);
- if (otgsc & OTGSC_BSV)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
- break;
- default:
- break;
- }
-}
-
-static void msm_otg_sm_work(struct work_struct *w)
-{
- struct msm_otg *motg = container_of(w, struct msm_otg, sm_work);
- struct usb_otg *otg = motg->phy.otg;
-
- switch (otg->state) {
- case OTG_STATE_UNDEFINED:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_UNDEFINED state\n");
- msm_otg_reset(otg->usb_phy);
- msm_otg_init_sm(motg);
- otg->state = OTG_STATE_B_IDLE;
- /* FALL THROUGH */
- case OTG_STATE_B_IDLE:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_IDLE state\n");
- if (!test_bit(ID, &motg->inputs) && otg->host) {
- /* disable BSV bit */
- writel(readl(USB_OTGSC) & ~OTGSC_BSVIE, USB_OTGSC);
- msm_otg_start_host(otg->usb_phy, 1);
- otg->state = OTG_STATE_A_HOST;
- } else if (test_bit(B_SESS_VLD, &motg->inputs)) {
- switch (motg->chg_state) {
- case USB_CHG_STATE_UNDEFINED:
- msm_chg_detect_work(&motg->chg_work.work);
- break;
- case USB_CHG_STATE_DETECTED:
- switch (motg->chg_type) {
- case USB_DCP_CHARGER:
- msm_otg_notify_charger(motg,
- IDEV_CHG_MAX);
- break;
- case USB_CDP_CHARGER:
- msm_otg_notify_charger(motg,
- IDEV_CHG_MAX);
- msm_otg_start_peripheral(otg->usb_phy,
- 1);
- otg->state
- = OTG_STATE_B_PERIPHERAL;
- break;
- case USB_SDP_CHARGER:
- msm_otg_notify_charger(motg, IUNIT);
- msm_otg_start_peripheral(otg->usb_phy,
- 1);
- otg->state
- = OTG_STATE_B_PERIPHERAL;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
- } else {
- /*
- * If charger detection work is pending, decrement
- * the pm usage counter to balance with the one that
- * is incremented in charger detection work.
- */
- if (cancel_delayed_work_sync(&motg->chg_work)) {
- pm_runtime_put_sync(otg->usb_phy->dev);
- msm_otg_reset(otg->usb_phy);
- }
- msm_otg_notify_charger(motg, 0);
- motg->chg_state = USB_CHG_STATE_UNDEFINED;
- motg->chg_type = USB_INVALID_CHARGER;
- }
-
- if (otg->state == OTG_STATE_B_IDLE)
- pm_runtime_put_sync(otg->usb_phy->dev);
- break;
- case OTG_STATE_B_PERIPHERAL:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
- if (!test_bit(B_SESS_VLD, &motg->inputs) ||
- !test_bit(ID, &motg->inputs)) {
- msm_otg_notify_charger(motg, 0);
- msm_otg_start_peripheral(otg->usb_phy, 0);
- motg->chg_state = USB_CHG_STATE_UNDEFINED;
- motg->chg_type = USB_INVALID_CHARGER;
- otg->state = OTG_STATE_B_IDLE;
- msm_otg_reset(otg->usb_phy);
- schedule_work(w);
- }
- break;
- case OTG_STATE_A_HOST:
- dev_dbg(otg->usb_phy->dev, "OTG_STATE_A_HOST state\n");
- if (test_bit(ID, &motg->inputs)) {
- msm_otg_start_host(otg->usb_phy, 0);
- otg->state = OTG_STATE_B_IDLE;
- msm_otg_reset(otg->usb_phy);
- schedule_work(w);
- }
- break;
- default:
- break;
- }
-}
-
-static irqreturn_t msm_otg_irq(int irq, void *data)
-{
- struct msm_otg *motg = data;
- struct usb_phy *phy = &motg->phy;
- u32 otgsc = 0;
-
- if (atomic_read(&motg->in_lpm)) {
- disable_irq_nosync(irq);
- motg->async_int = 1;
- pm_runtime_get(phy->dev);
- return IRQ_HANDLED;
- }
-
- otgsc = readl(USB_OTGSC);
- if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS)))
- return IRQ_NONE;
-
- if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) {
- if (otgsc & OTGSC_ID)
- set_bit(ID, &motg->inputs);
- else
- clear_bit(ID, &motg->inputs);
- dev_dbg(phy->dev, "ID set/clear\n");
- pm_runtime_get_noresume(phy->dev);
- } else if ((otgsc & OTGSC_BSVIS) && (otgsc & OTGSC_BSVIE)) {
- if (otgsc & OTGSC_BSV)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
- dev_dbg(phy->dev, "BSV set/clear\n");
- pm_runtime_get_noresume(phy->dev);
- }
-
- writel(otgsc, USB_OTGSC);
- schedule_work(&motg->sm_work);
- return IRQ_HANDLED;
-}
-
-static int msm_otg_mode_show(struct seq_file *s, void *unused)
-{
- struct msm_otg *motg = s->private;
- struct usb_otg *otg = motg->phy.otg;
-
- switch (otg->state) {
- case OTG_STATE_A_HOST:
- seq_puts(s, "host\n");
- break;
- case OTG_STATE_B_PERIPHERAL:
- seq_puts(s, "peripheral\n");
- break;
- default:
- seq_puts(s, "none\n");
- break;
- }
-
- return 0;
-}
-
-static int msm_otg_mode_open(struct inode *inode, struct file *file)
-{
- return single_open(file, msm_otg_mode_show, inode->i_private);
-}
-
-static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct seq_file *s = file->private_data;
- struct msm_otg *motg = s->private;
- char buf[16];
- struct usb_otg *otg = motg->phy.otg;
- int status = count;
- enum usb_dr_mode req_mode;
-
- memset(buf, 0x00, sizeof(buf));
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
- status = -EFAULT;
- goto out;
- }
-
- if (!strncmp(buf, "host", 4)) {
- req_mode = USB_DR_MODE_HOST;
- } else if (!strncmp(buf, "peripheral", 10)) {
- req_mode = USB_DR_MODE_PERIPHERAL;
- } else if (!strncmp(buf, "none", 4)) {
- req_mode = USB_DR_MODE_UNKNOWN;
- } else {
- status = -EINVAL;
- goto out;
- }
-
- switch (req_mode) {
- case USB_DR_MODE_UNKNOWN:
- switch (otg->state) {
- case OTG_STATE_A_HOST:
- case OTG_STATE_B_PERIPHERAL:
- set_bit(ID, &motg->inputs);
- clear_bit(B_SESS_VLD, &motg->inputs);
- break;
- default:
- goto out;
- }
- break;
- case USB_DR_MODE_PERIPHERAL:
- switch (otg->state) {
- case OTG_STATE_B_IDLE:
- case OTG_STATE_A_HOST:
- set_bit(ID, &motg->inputs);
- set_bit(B_SESS_VLD, &motg->inputs);
- break;
- default:
- goto out;
- }
- break;
- case USB_DR_MODE_HOST:
- switch (otg->state) {
- case OTG_STATE_B_IDLE:
- case OTG_STATE_B_PERIPHERAL:
- clear_bit(ID, &motg->inputs);
- break;
- default:
- goto out;
- }
- break;
- default:
- goto out;
- }
-
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
-out:
- return status;
-}
-
-static const struct file_operations msm_otg_mode_fops = {
- .open = msm_otg_mode_open,
- .read = seq_read,
- .write = msm_otg_mode_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static struct dentry *msm_otg_dbg_root;
-static struct dentry *msm_otg_dbg_mode;
-
-static int msm_otg_debugfs_init(struct msm_otg *motg)
-{
- msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL);
-
- if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root))
- return -ENODEV;
-
- msm_otg_dbg_mode = debugfs_create_file("mode", S_IRUGO | S_IWUSR,
- msm_otg_dbg_root, motg, &msm_otg_mode_fops);
- if (!msm_otg_dbg_mode) {
- debugfs_remove(msm_otg_dbg_root);
- msm_otg_dbg_root = NULL;
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void msm_otg_debugfs_cleanup(void)
-{
- debugfs_remove(msm_otg_dbg_mode);
- debugfs_remove(msm_otg_dbg_root);
-}
-
-static const struct of_device_id msm_otg_dt_match[] = {
- {
- .compatible = "qcom,usb-otg-ci",
- .data = (void *) CI_45NM_INTEGRATED_PHY
- },
- {
- .compatible = "qcom,usb-otg-snps",
- .data = (void *) SNPS_28NM_INTEGRATED_PHY
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, msm_otg_dt_match);
-
-static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event,
- void *ptr)
-{
- struct usb_phy *usb_phy = container_of(nb, struct usb_phy, vbus_nb);
- struct msm_otg *motg = container_of(usb_phy, struct msm_otg, phy);
-
- if (event)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
-
- if (test_bit(B_SESS_VLD, &motg->inputs)) {
- /* Switch D+/D- lines to Device connector */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
- } else {
- /* Switch D+/D- lines to Hub */
- gpiod_set_value_cansleep(motg->switch_gpio, 1);
- }
-
- schedule_work(&motg->sm_work);
-
- return NOTIFY_DONE;
-}
-
-static int msm_otg_id_notifier(struct notifier_block *nb, unsigned long event,
- void *ptr)
-{
- struct usb_phy *usb_phy = container_of(nb, struct usb_phy, id_nb);
- struct msm_otg *motg = container_of(usb_phy, struct msm_otg, phy);
-
- if (event)
- clear_bit(ID, &motg->inputs);
- else
- set_bit(ID, &motg->inputs);
-
- schedule_work(&motg->sm_work);
-
- return NOTIFY_DONE;
-}
-
-static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
-{
- struct msm_otg_platform_data *pdata;
- struct device_node *node = pdev->dev.of_node;
- struct property *prop;
- int len, ret, words;
- u32 val, tmp[3];
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- motg->pdata = pdata;
-
- pdata->phy_type = (enum msm_usb_phy_type)of_device_get_match_data(&pdev->dev);
- if (!pdata->phy_type)
- return 1;
-
- motg->link_rst = devm_reset_control_get(&pdev->dev, "link");
- if (IS_ERR(motg->link_rst))
- return PTR_ERR(motg->link_rst);
-
- motg->phy_rst = devm_reset_control_get(&pdev->dev, "phy");
- if (IS_ERR(motg->phy_rst))
- motg->phy_rst = NULL;
-
- pdata->mode = usb_get_dr_mode(&pdev->dev);
- if (pdata->mode == USB_DR_MODE_UNKNOWN)
- pdata->mode = USB_DR_MODE_OTG;
-
- pdata->otg_control = OTG_PHY_CONTROL;
- if (!of_property_read_u32(node, "qcom,otg-control", &val))
- if (val == OTG_PMIC_CONTROL)
- pdata->otg_control = val;
-
- if (!of_property_read_u32(node, "qcom,phy-num", &val) && val < 2)
- motg->phy_number = val;
-
- motg->vdd_levels[VDD_LEVEL_NONE] = USB_PHY_SUSP_DIG_VOL;
- motg->vdd_levels[VDD_LEVEL_MIN] = USB_PHY_VDD_DIG_VOL_MIN;
- motg->vdd_levels[VDD_LEVEL_MAX] = USB_PHY_VDD_DIG_VOL_MAX;
-
- if (of_get_property(node, "qcom,vdd-levels", &len) &&
- len == sizeof(tmp)) {
- of_property_read_u32_array(node, "qcom,vdd-levels",
- tmp, len / sizeof(*tmp));
- motg->vdd_levels[VDD_LEVEL_NONE] = tmp[VDD_LEVEL_NONE];
- motg->vdd_levels[VDD_LEVEL_MIN] = tmp[VDD_LEVEL_MIN];
- motg->vdd_levels[VDD_LEVEL_MAX] = tmp[VDD_LEVEL_MAX];
- }
-
- motg->manual_pullup = of_property_read_bool(node, "qcom,manual-pullup");
-
- motg->switch_gpio = devm_gpiod_get_optional(&pdev->dev, "switch",
- GPIOD_OUT_LOW);
- if (IS_ERR(motg->switch_gpio))
- return PTR_ERR(motg->switch_gpio);
-
- prop = of_find_property(node, "qcom,phy-init-sequence", &len);
- if (!prop || !len)
- return 0;
-
- words = len / sizeof(u32);
-
- if (words >= ULPI_EXT_VENDOR_SPECIFIC) {
- dev_warn(&pdev->dev, "Too big PHY init sequence %d\n", words);
- return 0;
- }
-
- pdata->phy_init_seq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
- if (!pdata->phy_init_seq)
- return 0;
-
- ret = of_property_read_u32_array(node, "qcom,phy-init-sequence",
- pdata->phy_init_seq, words);
- if (!ret)
- pdata->phy_init_sz = words;
-
- return 0;
-}
-
-static int msm_otg_reboot_notify(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- struct msm_otg *motg = container_of(this, struct msm_otg, reboot);
-
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot
- */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
- return NOTIFY_DONE;
-}
-
-static int msm_otg_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct device_node *np = pdev->dev.of_node;
- struct msm_otg_platform_data *pdata;
- struct resource *res;
- struct msm_otg *motg;
- struct usb_phy *phy;
- void __iomem *phy_select;
-
- motg = devm_kzalloc(&pdev->dev, sizeof(struct msm_otg), GFP_KERNEL);
- if (!motg)
- return -ENOMEM;
-
- motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
- GFP_KERNEL);
- if (!motg->phy.otg)
- return -ENOMEM;
-
- phy = &motg->phy;
- phy->dev = &pdev->dev;
-
- motg->clk = devm_clk_get(&pdev->dev, np ? "core" : "usb_hs_clk");
- if (IS_ERR(motg->clk)) {
- dev_err(&pdev->dev, "failed to get usb_hs_clk\n");
- return PTR_ERR(motg->clk);
- }
-
- /*
- * If USB Core is running its protocol engine based on CORE CLK,
- * CORE CLK must be running at >55Mhz for correct HSUSB
- * operation and USB core cannot tolerate frequency changes on
- * CORE CLK.
- */
- motg->pclk = devm_clk_get(&pdev->dev, np ? "iface" : "usb_hs_pclk");
- if (IS_ERR(motg->pclk)) {
- dev_err(&pdev->dev, "failed to get usb_hs_pclk\n");
- return PTR_ERR(motg->pclk);
- }
-
- /*
- * USB core clock is not present on all MSM chips. This
- * clock is introduced to remove the dependency on AXI
- * bus frequency.
- */
- motg->core_clk = devm_clk_get(&pdev->dev,
- np ? "alt_core" : "usb_hs_core_clk");
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- motg->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!motg->regs)
- return -ENOMEM;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- if (!np)
- return -ENXIO;
- ret = msm_otg_read_dt(pdev, motg);
- if (ret)
- return ret;
- }
-
- /*
- * NOTE: The PHYs can be multiplexed between the chipidea controller
- * and the dwc3 controller, using a single bit. It is important that
- * the dwc3 driver does not set this bit in an incompatible way.
- */
- if (motg->phy_number) {
- phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
- if (!phy_select)
- return -ENOMEM;
-
- /* Enable second PHY with the OTG port */
- writel(0x1, phy_select);
- }
-
- dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs);
-
- motg->irq = platform_get_irq(pdev, 0);
- if (motg->irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq failed\n");
- ret = motg->irq;
- return motg->irq;
- }
-
- motg->supplies[0].supply = "vddcx";
- motg->supplies[1].supply = "v3p3";
- motg->supplies[2].supply = "v1p8";
-
- ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies),
- motg->supplies);
- if (ret)
- return ret;
-
- motg->vddcx = motg->supplies[0].consumer;
- motg->v3p3 = motg->supplies[1].consumer;
- motg->v1p8 = motg->supplies[2].consumer;
-
- clk_set_rate(motg->clk, 60000000);
-
- clk_prepare_enable(motg->clk);
- clk_prepare_enable(motg->pclk);
-
- if (!IS_ERR(motg->core_clk))
- clk_prepare_enable(motg->core_clk);
-
- ret = msm_hsusb_init_vddcx(motg, 1);
- if (ret) {
- dev_err(&pdev->dev, "hsusb vddcx configuration failed\n");
- goto disable_clks;
- }
-
- ret = msm_hsusb_ldo_init(motg, 1);
- if (ret) {
- dev_err(&pdev->dev, "hsusb vreg configuration failed\n");
- goto disable_vddcx;
- }
- ret = msm_hsusb_ldo_set_mode(motg, 1);
- if (ret) {
- dev_err(&pdev->dev, "hsusb vreg enable failed\n");
- goto disable_ldo;
- }
-
- writel(0, USB_USBINTR);
- writel(0, USB_OTGSC);
-
- INIT_WORK(&motg->sm_work, msm_otg_sm_work);
- INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
- ret = devm_request_irq(&pdev->dev, motg->irq, msm_otg_irq, IRQF_SHARED,
- "msm_otg", motg);
- if (ret) {
- dev_err(&pdev->dev, "request irq failed\n");
- goto disable_ldo;
- }
-
- phy->init = msm_phy_init;
- phy->notify_disconnect = msm_phy_notify_disconnect;
- phy->type = USB_PHY_TYPE_USB2;
- phy->vbus_nb.notifier_call = msm_otg_vbus_notifier;
- phy->id_nb.notifier_call = msm_otg_id_notifier;
-
- phy->io_ops = &msm_otg_io_ops;
-
- phy->otg->usb_phy = &motg->phy;
- phy->otg->set_host = msm_otg_set_host;
- phy->otg->set_peripheral = msm_otg_set_peripheral;
-
- msm_usb_reset(phy);
-
- ret = usb_add_phy_dev(&motg->phy);
- if (ret) {
- dev_err(&pdev->dev, "usb_add_phy failed\n");
- goto disable_ldo;
- }
-
- ret = extcon_get_state(phy->edev, EXTCON_USB);
- if (ret)
- set_bit(B_SESS_VLD, &motg->inputs);
- else
- clear_bit(B_SESS_VLD, &motg->inputs);
-
- ret = extcon_get_state(phy->id_edev, EXTCON_USB_HOST);
- if (ret)
- clear_bit(ID, &motg->inputs);
- else
- set_bit(ID, &motg->inputs);
-
- platform_set_drvdata(pdev, motg);
- device_init_wakeup(&pdev->dev, 1);
-
- if (motg->pdata->mode == USB_DR_MODE_OTG &&
- motg->pdata->otg_control == OTG_USER_CONTROL) {
- ret = msm_otg_debugfs_init(motg);
- if (ret)
- dev_dbg(&pdev->dev, "Can not create mode change file\n");
- }
-
- if (test_bit(B_SESS_VLD, &motg->inputs)) {
- /* Switch D+/D- lines to Device connector */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
- } else {
- /* Switch D+/D- lines to Hub */
- gpiod_set_value_cansleep(motg->switch_gpio, 1);
- }
-
- motg->reboot.notifier_call = msm_otg_reboot_notify;
- register_reboot_notifier(&motg->reboot);
-
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
- return 0;
-
-disable_ldo:
- msm_hsusb_ldo_init(motg, 0);
-disable_vddcx:
- msm_hsusb_init_vddcx(motg, 0);
-disable_clks:
- clk_disable_unprepare(motg->pclk);
- clk_disable_unprepare(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
-
- return ret;
-}
-
-static int msm_otg_remove(struct platform_device *pdev)
-{
- struct msm_otg *motg = platform_get_drvdata(pdev);
- struct usb_phy *phy = &motg->phy;
- int cnt = 0;
-
- if (phy->otg->host || phy->otg->gadget)
- return -EBUSY;
-
- unregister_reboot_notifier(&motg->reboot);
-
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot
- */
- gpiod_set_value_cansleep(motg->switch_gpio, 0);
-
- msm_otg_debugfs_cleanup();
- cancel_delayed_work_sync(&motg->chg_work);
- cancel_work_sync(&motg->sm_work);
-
- pm_runtime_resume(&pdev->dev);
-
- device_init_wakeup(&pdev->dev, 0);
- pm_runtime_disable(&pdev->dev);
-
- usb_remove_phy(phy);
- disable_irq(motg->irq);
-
- /*
- * Put PHY in low power mode.
- */
- ulpi_read(phy, 0x14);
- ulpi_write(phy, 0x08, 0x09);
-
- writel(readl(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC);
- while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
- if (readl(USB_PORTSC) & PORTSC_PHCD)
- break;
- udelay(1);
- cnt++;
- }
- if (cnt >= PHY_SUSPEND_TIMEOUT_USEC)
- dev_err(phy->dev, "Unable to suspend PHY\n");
-
- clk_disable_unprepare(motg->pclk);
- clk_disable_unprepare(motg->clk);
- if (!IS_ERR(motg->core_clk))
- clk_disable_unprepare(motg->core_clk);
- msm_hsusb_ldo_init(motg, 0);
-
- pm_runtime_set_suspended(&pdev->dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int msm_otg_runtime_idle(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
- struct usb_otg *otg = motg->phy.otg;
-
- dev_dbg(dev, "OTG runtime idle\n");
-
- /*
- * It is observed some times that a spurious interrupt
- * comes when PHY is put into LPM immediately after PHY reset.
- * This 1 sec delay also prevents entering into LPM immediately
- * after asynchronous interrupt.
- */
- if (otg->state != OTG_STATE_UNDEFINED)
- pm_schedule_suspend(dev, 1000);
-
- return -EAGAIN;
-}
-
-static int msm_otg_runtime_suspend(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
-
- dev_dbg(dev, "OTG runtime suspend\n");
- return msm_otg_suspend(motg);
-}
-
-static int msm_otg_runtime_resume(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
-
- dev_dbg(dev, "OTG runtime resume\n");
- return msm_otg_resume(motg);
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int msm_otg_pm_suspend(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
-
- dev_dbg(dev, "OTG PM suspend\n");
- return msm_otg_suspend(motg);
-}
-
-static int msm_otg_pm_resume(struct device *dev)
-{
- struct msm_otg *motg = dev_get_drvdata(dev);
- int ret;
-
- dev_dbg(dev, "OTG PM resume\n");
-
- ret = msm_otg_resume(motg);
- if (ret)
- return ret;
-
- /*
- * Runtime PM Documentation recommends bringing the
- * device to full powered state upon resume.
- */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops msm_otg_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
- SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
- msm_otg_runtime_idle)
-};
-
-static struct platform_driver msm_otg_driver = {
- .probe = msm_otg_probe,
- .remove = msm_otg_remove,
- .driver = {
- .name = DRIVER_NAME,
- .pm = &msm_otg_dev_pm_ops,
- .of_match_table = msm_otg_dt_match,
- },
-};
-
-module_platform_driver(msm_otg_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MSM USB transceiver driver");
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 0e315694adc9..554b72282276 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
* Author: Chao Xie <chao.xie@marvell.com>
* Neil Zhang <zhangwm@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
@@ -87,9 +83,10 @@ static void mv_otg_run_state_machine(struct mv_otg *mvotg,
queue_delayed_work(mvotg->qwork, &mvotg->work, delay);
}
-static void mv_otg_timer_await_bcon(unsigned long data)
+static void mv_otg_timer_await_bcon(struct timer_list *t)
{
- struct mv_otg *mvotg = (struct mv_otg *) data;
+ struct mv_otg *mvotg = from_timer(mvotg, t,
+ otg_ctrl.timer[A_WAIT_BCON_TIMER]);
mvotg->otg_ctrl.a_wait_bcon_timeout = 1;
@@ -117,8 +114,7 @@ static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id)
}
static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
- unsigned long interval,
- void (*callback) (unsigned long))
+ unsigned long interval)
{
struct timer_list *timer;
@@ -131,9 +127,6 @@ static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
return -EBUSY;
}
- init_timer(timer);
- timer->data = (unsigned long) mvotg;
- timer->function = callback;
timer->expires = jiffies + interval;
add_timer(timer);
@@ -459,8 +452,7 @@ run:
if (old_state != OTG_STATE_A_HOST)
mv_otg_start_host(mvotg, 1);
mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER,
- T_A_WAIT_BCON,
- mv_otg_timer_await_bcon);
+ T_A_WAIT_BCON);
/*
* Now, we directly enter A_HOST. So set b_conn = 1
* here. In fact, it need host driver to notify us.
@@ -722,7 +714,8 @@ static int mv_otg_probe(struct platform_device *pdev)
otg->set_vbus = mv_otg_set_vbus;
for (i = 0; i < OTG_TIMER_NUM; i++)
- init_timer(&mvotg->otg_ctrl.timer[i]);
+ timer_setup(&mvotg->otg_ctrl.timer[i],
+ mv_otg_timer_await_bcon, 0);
r = platform_get_resource_byname(mvotg->pdev,
IORESOURCE_MEM, "phyregs");
diff --git a/drivers/usb/phy/phy-mv-usb.h b/drivers/usb/phy/phy-mv-usb.h
index 551da6eb0ba8..96701a1229ad 100644
--- a/drivers/usb/phy/phy-mv-usb.h
+++ b/drivers/usb/phy/phy-mv-usb.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2011 Marvell International Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#ifndef __MV_USB_OTG_CONTROLLER__
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 0e2f1a36d315..da031c45395a 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -1,14 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012-2014 Freescale Semiconductor, Inc.
* Copyright (C) 2012 Marek Vasut <marex@denx.de>
* on behalf of DENX Software Engineering GmbH
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
@@ -67,11 +61,26 @@
#define ANADIG_ANA_MISC0_SET 0x154
#define ANADIG_ANA_MISC0_CLR 0x158
+#define ANADIG_USB1_CHRG_DETECT_SET 0x1b4
+#define ANADIG_USB1_CHRG_DETECT_CLR 0x1b8
+#define ANADIG_USB1_CHRG_DETECT_EN_B BIT(20)
+#define ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B BIT(19)
+#define ANADIG_USB1_CHRG_DETECT_CHK_CONTACT BIT(18)
+
#define ANADIG_USB1_VBUS_DET_STAT 0x1c0
+#define ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID BIT(3)
+
+#define ANADIG_USB1_CHRG_DET_STAT 0x1d0
+#define ANADIG_USB1_CHRG_DET_STAT_DM_STATE BIT(2)
+#define ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED BIT(1)
+#define ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT BIT(0)
+
#define ANADIG_USB2_VBUS_DET_STAT 0x220
#define ANADIG_USB1_LOOPBACK_SET 0x1e4
#define ANADIG_USB1_LOOPBACK_CLR 0x1e8
+#define ANADIG_USB1_LOOPBACK_UTMI_TESTSTART BIT(0)
+
#define ANADIG_USB2_LOOPBACK_SET 0x244
#define ANADIG_USB2_LOOPBACK_CLR 0x248
@@ -479,6 +488,144 @@ static int mxs_phy_on_disconnect(struct usb_phy *phy,
return 0;
}
+#define MXS_USB_CHARGER_DATA_CONTACT_TIMEOUT 100
+static int mxs_charger_data_contact_detect(struct mxs_phy *x)
+{
+ struct regmap *regmap = x->regmap_anatop;
+ int i, stable_contact_count = 0;
+ u32 val;
+
+ /* Check if vbus is valid */
+ regmap_read(regmap, ANADIG_USB1_VBUS_DET_STAT, &val);
+ if (!(val & ANADIG_USB1_VBUS_DET_STAT_VBUS_VALID)) {
+ dev_err(x->phy.dev, "vbus is not valid\n");
+ return -EINVAL;
+ }
+
+ /* Enable charger detector */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_CLR,
+ ANADIG_USB1_CHRG_DETECT_EN_B);
+ /*
+ * - Do not check whether a charger is connected to the USB port
+ * - Check whether the USB plug has been in contact with each other
+ */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_SET,
+ ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
+ ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+
+ /* Check if plug is connected */
+ for (i = 0; i < MXS_USB_CHARGER_DATA_CONTACT_TIMEOUT; i++) {
+ regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
+ if (val & ANADIG_USB1_CHRG_DET_STAT_PLUG_CONTACT) {
+ stable_contact_count++;
+ if (stable_contact_count > 5)
+ /* Data pin makes contact */
+ break;
+ else
+ usleep_range(5000, 10000);
+ } else {
+ stable_contact_count = 0;
+ usleep_range(5000, 6000);
+ }
+ }
+
+ if (i == MXS_USB_CHARGER_DATA_CONTACT_TIMEOUT) {
+ dev_err(x->phy.dev,
+ "Data pin can't make good contact.\n");
+ /* Disable charger detector */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_SET,
+ ANADIG_USB1_CHRG_DETECT_EN_B |
+ ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static enum usb_charger_type mxs_charger_primary_detection(struct mxs_phy *x)
+{
+ struct regmap *regmap = x->regmap_anatop;
+ enum usb_charger_type chgr_type = UNKNOWN_TYPE;
+ u32 val;
+
+ /*
+ * - Do check whether a charger is connected to the USB port
+ * - Do not Check whether the USB plug has been in contact with
+ * each other
+ */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_CLR,
+ ANADIG_USB1_CHRG_DETECT_CHK_CONTACT |
+ ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+
+ msleep(100);
+
+ /* Check if it is a charger */
+ regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
+ if (!(val & ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED)) {
+ chgr_type = SDP_TYPE;
+ dev_dbg(x->phy.dev, "It is a stardard downstream port\n");
+ }
+
+ /* Disable charger detector */
+ regmap_write(regmap, ANADIG_USB1_CHRG_DETECT_SET,
+ ANADIG_USB1_CHRG_DETECT_EN_B |
+ ANADIG_USB1_CHRG_DETECT_CHK_CHRG_B);
+
+ return chgr_type;
+}
+
+/*
+ * It must be called after DP is pulled up, which is used to
+ * differentiate DCP and CDP.
+ */
+enum usb_charger_type mxs_charger_secondary_detection(struct mxs_phy *x)
+{
+ struct regmap *regmap = x->regmap_anatop;
+ int val;
+
+ msleep(80);
+
+ regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
+ if (val & ANADIG_USB1_CHRG_DET_STAT_DM_STATE) {
+ dev_dbg(x->phy.dev, "It is a dedicate charging port\n");
+ return DCP_TYPE;
+ } else {
+ dev_dbg(x->phy.dev, "It is a charging downstream port\n");
+ return CDP_TYPE;
+ }
+}
+
+static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
+{
+ struct mxs_phy *mxs_phy = to_mxs_phy(phy);
+ struct regmap *regmap = mxs_phy->regmap_anatop;
+ void __iomem *base = phy->io_priv;
+ enum usb_charger_type chgr_type = UNKNOWN_TYPE;
+
+ if (mxs_charger_data_contact_detect(mxs_phy))
+ return chgr_type;
+
+ chgr_type = mxs_charger_primary_detection(mxs_phy);
+
+ if (chgr_type != SDP_TYPE) {
+ /* Pull up DP via test */
+ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
+ base + HW_USBPHY_DEBUG_CLR);
+ regmap_write(regmap, ANADIG_USB1_LOOPBACK_SET,
+ ANADIG_USB1_LOOPBACK_UTMI_TESTSTART);
+
+ chgr_type = mxs_charger_secondary_detection(mxs_phy);
+
+ /* Stop the test */
+ regmap_write(regmap, ANADIG_USB1_LOOPBACK_CLR,
+ ANADIG_USB1_LOOPBACK_UTMI_TESTSTART);
+ writel_relaxed(BM_USBPHY_DEBUG_CLKGATE,
+ base + HW_USBPHY_DEBUG_SET);
+ }
+
+ return chgr_type;
+}
+
static int mxs_phy_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -567,6 +714,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
mxs_phy->phy.type = USB_PHY_TYPE_USB2;
mxs_phy->phy.set_wakeup = mxs_phy_set_wakeup;
+ mxs_phy->phy.charger_detect = mxs_phy_charger_detect;
mxs_phy->clk = clk;
mxs_phy->data = of_id->data;
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 800d1d90753d..ee0863c6553e 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OMAP OTG controller driver
*
@@ -6,15 +7,6 @@
* Copyright (C) 2005-2006 Nokia Corporation
* Copyright (C) 2004 Texas Instruments
* Copyright (C) 2004 David Brownell
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
deleted file mode 100644
index 679afeaaa9a8..000000000000
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Copyright (c) 2015, Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/extcon.h>
-#include <linux/gpio/consumer.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/reboot.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-#include <linux/usb.h>
-#include <linux/usb/ulpi.h>
-
-#define HSPHY_AHBBURST 0x0090
-#define HSPHY_AHBMODE 0x0098
-#define HSPHY_GENCONFIG 0x009c
-#define HSPHY_GENCONFIG_2 0x00a0
-
-#define HSPHY_USBCMD 0x0140
-#define HSPHY_ULPI_VIEWPORT 0x0170
-#define HSPHY_CTRL 0x0240
-
-#define HSPHY_TXFIFO_IDLE_FORCE_DIS BIT(4)
-#define HSPHY_SESS_VLD_CTRL_EN BIT(7)
-#define HSPHY_POR_ASSERT BIT(0)
-#define HSPHY_RETEN BIT(1)
-
-#define HSPHY_SESS_VLD_CTRL BIT(25)
-
-#define ULPI_PWR_CLK_MNG_REG 0x88
-#define ULPI_PWR_OTG_COMP_DISABLE BIT(0)
-
-#define ULPI_MISC_A 0x96
-#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1)
-#define ULPI_MISC_A_VBUSVLDEXT BIT(0)
-
-#define HSPHY_3P3_MIN 3050000 /* uV */
-#define HSPHY_3P3_MAX 3300000 /* uV */
-
-#define HSPHY_1P8_MIN 1800000 /* uV */
-#define HSPHY_1P8_MAX 1800000 /* uV */
-
-#define HSPHY_VDD_MIN 5
-#define HSPHY_VDD_MAX 7
-
-struct phy_8x16 {
- struct usb_phy phy;
- void __iomem *regs;
- struct clk *core_clk;
- struct clk *iface_clk;
- struct regulator_bulk_data regulator[3];
-
- struct reset_control *phy_reset;
-
- struct gpio_desc *switch_gpio;
- struct notifier_block reboot_notify;
-};
-
-static int phy_8x16_notify_connect(struct usb_phy *phy,
- enum usb_device_speed speed)
-{
- struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
- u32 val;
-
- val = ULPI_MISC_A_VBUSVLDEXTSEL | ULPI_MISC_A_VBUSVLDEXT;
- usb_phy_io_write(&qphy->phy, val, ULPI_SET(ULPI_MISC_A));
-
- val = readl(qphy->regs + HSPHY_USBCMD);
- val |= HSPHY_SESS_VLD_CTRL;
- writel(val, qphy->regs + HSPHY_USBCMD);
-
- return 0;
-}
-
-static int phy_8x16_notify_disconnect(struct usb_phy *phy,
- enum usb_device_speed speed)
-{
- struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
- u32 val;
-
- val = ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL;
- usb_phy_io_write(&qphy->phy, val, ULPI_CLR(ULPI_MISC_A));
-
- val = readl(qphy->regs + HSPHY_USBCMD);
- val &= ~HSPHY_SESS_VLD_CTRL;
- writel(val, qphy->regs + HSPHY_USBCMD);
-
- return 0;
-}
-
-static int phy_8x16_vbus_on(struct phy_8x16 *qphy)
-{
- phy_8x16_notify_connect(&qphy->phy, USB_SPEED_UNKNOWN);
-
- /* Switch D+/D- lines to Device connector */
- gpiod_set_value_cansleep(qphy->switch_gpio, 0);
-
- return 0;
-}
-
-static int phy_8x16_vbus_off(struct phy_8x16 *qphy)
-{
- phy_8x16_notify_disconnect(&qphy->phy, USB_SPEED_UNKNOWN);
-
- /* Switch D+/D- lines to USB HUB */
- gpiod_set_value_cansleep(qphy->switch_gpio, 1);
-
- return 0;
-}
-
-static int phy_8x16_vbus_notify(struct notifier_block *nb, unsigned long event,
- void *ptr)
-{
- struct usb_phy *usb_phy = container_of(nb, struct usb_phy, vbus_nb);
- struct phy_8x16 *qphy = container_of(usb_phy, struct phy_8x16, phy);
-
- if (event)
- phy_8x16_vbus_on(qphy);
- else
- phy_8x16_vbus_off(qphy);
-
- return NOTIFY_DONE;
-}
-
-static int phy_8x16_init(struct usb_phy *phy)
-{
- struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
- u32 val, init[] = {0x44, 0x6B, 0x24, 0x13};
- u32 addr = ULPI_EXT_VENDOR_SPECIFIC;
- int idx, state;
-
- for (idx = 0; idx < ARRAY_SIZE(init); idx++)
- usb_phy_io_write(phy, init[idx], addr + idx);
-
- reset_control_reset(qphy->phy_reset);
-
- /* Assert USB HSPHY_POR */
- val = readl(qphy->regs + HSPHY_CTRL);
- val |= HSPHY_POR_ASSERT;
- writel(val, qphy->regs + HSPHY_CTRL);
-
- /*
- * wait for minimum 10 microseconds as suggested in HPG.
- * Use a slightly larger value since the exact value didn't
- * work 100% of the time.
- */
- usleep_range(12, 15);
-
- /* Deassert USB HSPHY_POR */
- val = readl(qphy->regs + HSPHY_CTRL);
- val &= ~HSPHY_POR_ASSERT;
- writel(val, qphy->regs + HSPHY_CTRL);
-
- usleep_range(10, 15);
-
- writel(0x00, qphy->regs + HSPHY_AHBBURST);
- writel(0x08, qphy->regs + HSPHY_AHBMODE);
-
- /* workaround for rx buffer collision issue */
- val = readl(qphy->regs + HSPHY_GENCONFIG);
- val &= ~HSPHY_TXFIFO_IDLE_FORCE_DIS;
- writel(val, qphy->regs + HSPHY_GENCONFIG);
-
- val = readl(qphy->regs + HSPHY_GENCONFIG_2);
- val |= HSPHY_SESS_VLD_CTRL_EN;
- writel(val, qphy->regs + HSPHY_GENCONFIG_2);
-
- val = ULPI_PWR_OTG_COMP_DISABLE;
- usb_phy_io_write(phy, val, ULPI_SET(ULPI_PWR_CLK_MNG_REG));
-
- state = extcon_get_state(qphy->phy.edev, EXTCON_USB);
- if (state)
- phy_8x16_vbus_on(qphy);
- else
- phy_8x16_vbus_off(qphy);
-
- val = usb_phy_io_read(&qphy->phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
- usb_phy_io_write(&qphy->phy, val, ULPI_FUNC_CTRL);
-
- return 0;
-}
-
-static void phy_8x16_shutdown(struct usb_phy *phy)
-{
- u32 val;
-
- /* Put the controller in non-driving mode */
- val = usb_phy_io_read(phy, ULPI_FUNC_CTRL);
- val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
- val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
- usb_phy_io_write(phy, val, ULPI_FUNC_CTRL);
-}
-
-static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
-{
- struct device *dev = qphy->phy.dev;
- int ret;
-
- qphy->core_clk = devm_clk_get(dev, "core");
- if (IS_ERR(qphy->core_clk))
- return PTR_ERR(qphy->core_clk);
-
- qphy->iface_clk = devm_clk_get(dev, "iface");
- if (IS_ERR(qphy->iface_clk))
- return PTR_ERR(qphy->iface_clk);
-
- qphy->regulator[0].supply = "v3p3";
- qphy->regulator[1].supply = "v1p8";
- qphy->regulator[2].supply = "vddcx";
-
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator),
- qphy->regulator);
- if (ret)
- return ret;
-
- qphy->phy_reset = devm_reset_control_get(dev, "phy");
- if (IS_ERR(qphy->phy_reset))
- return PTR_ERR(qphy->phy_reset);
-
- qphy->switch_gpio = devm_gpiod_get_optional(dev, "switch",
- GPIOD_OUT_LOW);
- return PTR_ERR_OR_ZERO(qphy->switch_gpio);
-}
-
-static int phy_8x16_reboot_notify(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- struct phy_8x16 *qphy;
-
- qphy = container_of(this, struct phy_8x16, reboot_notify);
-
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot_notify
- */
- gpiod_set_value_cansleep(qphy->switch_gpio, 0);
- return NOTIFY_DONE;
-}
-
-static int phy_8x16_probe(struct platform_device *pdev)
-{
- struct phy_8x16 *qphy;
- struct resource *res;
- struct usb_phy *phy;
- int ret;
-
- qphy = devm_kzalloc(&pdev->dev, sizeof(*qphy), GFP_KERNEL);
- if (!qphy)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, qphy);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- qphy->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(qphy->regs))
- return PTR_ERR(qphy->regs);
-
- phy = &qphy->phy;
- phy->dev = &pdev->dev;
- phy->label = dev_name(&pdev->dev);
- phy->init = phy_8x16_init;
- phy->shutdown = phy_8x16_shutdown;
- phy->notify_connect = phy_8x16_notify_connect;
- phy->notify_disconnect = phy_8x16_notify_disconnect;
- phy->io_priv = qphy->regs + HSPHY_ULPI_VIEWPORT;
- phy->io_ops = &ulpi_viewport_access_ops;
- phy->type = USB_PHY_TYPE_USB2;
- phy->vbus_nb.notifier_call = phy_8x16_vbus_notify;
- phy->id_nb.notifier_call = NULL;
-
- ret = phy_8x16_read_devicetree(qphy);
- if (ret < 0)
- return ret;
-
- ret = clk_set_rate(qphy->core_clk, INT_MAX);
- if (ret < 0)
- dev_dbg(phy->dev, "Can't boost core clock\n");
-
- ret = clk_prepare_enable(qphy->core_clk);
- if (ret < 0)
- return ret;
-
- ret = clk_prepare_enable(qphy->iface_clk);
- if (ret < 0)
- goto off_core;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator),
- qphy->regulator);
- if (WARN_ON(ret))
- goto off_clks;
-
- ret = usb_add_phy_dev(&qphy->phy);
- if (ret)
- goto off_power;
-
- qphy->reboot_notify.notifier_call = phy_8x16_reboot_notify;
- register_reboot_notifier(&qphy->reboot_notify);
-
- return 0;
-
-off_power:
- regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
-off_clks:
- clk_disable_unprepare(qphy->iface_clk);
-off_core:
- clk_disable_unprepare(qphy->core_clk);
- return ret;
-}
-
-static int phy_8x16_remove(struct platform_device *pdev)
-{
- struct phy_8x16 *qphy = platform_get_drvdata(pdev);
-
- unregister_reboot_notifier(&qphy->reboot_notify);
-
- /*
- * Ensure that D+/D- lines are routed to uB connector, so
- * we could load bootloader/kernel at next reboot_notify
- */
- gpiod_set_value_cansleep(qphy->switch_gpio, 0);
-
- usb_remove_phy(&qphy->phy);
-
- clk_disable_unprepare(qphy->iface_clk);
- clk_disable_unprepare(qphy->core_clk);
- regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
- return 0;
-}
-
-static const struct of_device_id phy_8x16_dt_match[] = {
- { .compatible = "qcom,usb-8x16-phy" },
- { }
-};
-MODULE_DEVICE_TABLE(of, phy_8x16_dt_match);
-
-static struct platform_driver phy_8x16_driver = {
- .probe = phy_8x16_probe,
- .remove = phy_8x16_remove,
- .driver = {
- .name = "phy-qcom-8x16-usb",
- .of_match_table = phy_8x16_dt_match,
- },
-};
-module_platform_driver(phy_8x16_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm APQ8016/MSM8916 chipsets USB transceiver driver");
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index 8babd318c0ed..b3ce42edb373 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tahvo USB transceiver driver
*
@@ -9,21 +10,12 @@
*
* Original driver written by Juha Yrjölä, Tony Lindgren and Timo Teräs.
* Modified for Retu/Tahvo MFD by Aaro Koskinen.
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/usb.h>
-#include <linux/extcon.h>
+#include <linux/extcon-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb/otg.h>
@@ -368,7 +360,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
if (IS_ERR(tu->extcon)) {
dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
- return -ENOMEM;
+ ret = PTR_ERR(tu->extcon);
+ goto err_disable_clk;
}
ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index ccc2bf5274b4..f668bfb708d3 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2013 NVIDIA Corporation
@@ -6,16 +7,6 @@
* Erik Gilling <konkers@google.com>
* Benoit Goby <benoit@android.com>
* Venu Byravarasu <vbyravarasu@nvidia.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/resource.h>
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index b5dc077ed7d3..e78ed52339e6 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* twl6030_usb - TWL6030 USB transceiver, talking to OMAP OTG driver.
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*
* Author: Hema HK <hemahk@ti.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/module.h>
diff --git a/drivers/usb/phy/phy-ulpi-viewport.c b/drivers/usb/phy/phy-ulpi-viewport.c
index 18bb8264b5a0..7a14e0e3b635 100644
--- a/drivers/usb/phy/phy-ulpi-viewport.c
+++ b/drivers/usb/phy/phy-ulpi-viewport.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/export.h>
diff --git a/drivers/usb/phy/phy-ulpi.c b/drivers/usb/phy/phy-ulpi.c
index f48a7a21e3c2..a43c49369a60 100644
--- a/drivers/usb/phy/phy-ulpi.c
+++ b/drivers/usb/phy/phy-ulpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Generic ULPI USB transceiver support
*
@@ -7,20 +8,6 @@
*
* Sascha Hauer <s.hauer@pengutronix.de>
* Freescale Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 89f4ac4cd93e..f97cb47577fc 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* phy.c -- USB phy handling
*
* Copyright (C) 2004-2013 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/export.h>
diff --git a/drivers/usb/renesas_usbhs/Makefile b/drivers/usb/renesas_usbhs/Makefile
index d787d05f6546..fac147a3ad23 100644
--- a/drivers/usb/renesas_usbhs/Makefile
+++ b/drivers/usb/renesas_usbhs/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# for Renesas USB
#
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index f0ce304c5aaf..56079bb6759a 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/err.h>
#include <linux/gpio.h>
@@ -486,6 +477,10 @@ static const struct of_device_id usbhs_of_match[] = {
.data = (void *)USBHS_TYPE_RCAR_GEN3,
},
{
+ .compatible = "renesas,usbhs-r8a77995",
+ .data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
+ },
+ {
.compatible = "renesas,rcar-gen2-usbhs",
.data = (void *)USBHS_TYPE_RCAR_GEN2,
},
@@ -501,7 +496,6 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
{
struct renesas_usbhs_platform_info *info;
struct renesas_usbhs_driver_param *dparam;
- const struct of_device_id *of_id = of_match_device(usbhs_of_match, dev);
u32 tmp;
int gpio;
@@ -510,7 +504,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
return NULL;
dparam = &info->driver_param;
- dparam->type = of_id ? (uintptr_t)of_id->data : 0;
+ dparam->type = (uintptr_t)of_device_get_match_data(dev);
if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp))
dparam->buswait_bwait = tmp;
gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0,
@@ -519,15 +513,19 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
dparam->enable_gpio = gpio;
if (dparam->type == USBHS_TYPE_RCAR_GEN2 ||
- dparam->type == USBHS_TYPE_RCAR_GEN3)
+ dparam->type == USBHS_TYPE_RCAR_GEN3 ||
+ dparam->type == USBHS_TYPE_RCAR_GEN3_WITH_PLL) {
dparam->has_usb_dmac = 1;
+ dparam->pipe_configs = usbhsc_new_pipe;
+ dparam->pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
+ }
return info;
}
static int usbhs_probe(struct platform_device *pdev)
{
- struct renesas_usbhs_platform_info *info = dev_get_platdata(&pdev->dev);
+ struct renesas_usbhs_platform_info *info = renesas_usbhs_get_info(pdev);
struct renesas_usbhs_driver_callback *dfunc;
struct usbhs_priv *priv;
struct resource *res, *irq_res;
@@ -577,17 +575,12 @@ static int usbhs_probe(struct platform_device *pdev)
switch (priv->dparam.type) {
case USBHS_TYPE_RCAR_GEN2:
priv->pfunc = usbhs_rcar2_ops;
- if (!priv->dparam.pipe_configs) {
- priv->dparam.pipe_configs = usbhsc_new_pipe;
- priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
- }
break;
case USBHS_TYPE_RCAR_GEN3:
priv->pfunc = usbhs_rcar3_ops;
- if (!priv->dparam.pipe_configs) {
- priv->dparam.pipe_configs = usbhsc_new_pipe;
- priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
- }
+ break;
+ case USBHS_TYPE_RCAR_GEN3_WITH_PLL:
+ priv->pfunc = usbhs_rcar3_with_pll_ops;
break;
default:
if (!info->platform_callback.get_id) {
@@ -710,7 +703,7 @@ probe_end_pipe_exit:
static int usbhs_remove(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- struct renesas_usbhs_platform_info *info = dev_get_platdata(&pdev->dev);
+ struct renesas_usbhs_platform_info *info = renesas_usbhs_get_info(pdev);
struct renesas_usbhs_driver_callback *dfunc = &info->driver_callback;
dev_dbg(&pdev->dev, "usb remove\n");
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 8c5fc12ad778..64797784a6df 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_DRIVER_H
#define RENESAS_USB_DRIVER_H
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 50285b01da92..2d24ef3076ef 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/delay.h>
#include <linux/io.h>
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 8b98507d7abc..88d1816bcda2 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_FIFO_H
#define RENESAS_USB_FIFO_H
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 28965ef4f824..7475c4f64724 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/interrupt.h>
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 1ef5bf604070..a4a61d6b82a1 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_MOD_H
#define RENESAS_USB_MOD_H
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index c068b673420b..34ee9ebe12a3 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index e256351cb72d..4e59c649db81 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/io.h>
#include <linux/list.h>
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index d811f0550c04..093cd8e87335 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 95185fdb29b1..d3d002244891 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_PIPE_H
#define RENESAS_USB_PIPE_H
diff --git a/drivers/usb/renesas_usbhs/rcar2.c b/drivers/usb/renesas_usbhs/rcar2.c
index 277160bc6f25..85a0e0933917 100644
--- a/drivers/usb/renesas_usbhs/rcar2.c
+++ b/drivers/usb/renesas_usbhs/rcar2.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-1.0+
/*
* Renesas USB driver R-Car Gen. 2 initialization and power control
*
* Copyright (C) 2014 Ulrich Hecht
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/gpio.h>
diff --git a/drivers/usb/renesas_usbhs/rcar2.h b/drivers/usb/renesas_usbhs/rcar2.h
index f07f10d9b3b2..45e3526cedeb 100644
--- a/drivers/usb/renesas_usbhs/rcar2.h
+++ b/drivers/usb/renesas_usbhs/rcar2.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "common.h"
extern const struct renesas_usbhs_platform_callback
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index 02b67abfc2a1..c929d296c77b 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB driver R-Car Gen. 3 initialization and power control
*
* Copyright (C) 2016 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/delay.h>
@@ -15,24 +11,39 @@
#include "rcar3.h"
#define LPSTS 0x102
+#define UGCTRL 0x180 /* 32-bit register */
#define UGCTRL2 0x184 /* 32-bit register */
+#define UGSTS 0x188 /* 32-bit register */
/* Low Power Status register (LPSTS) */
#define LPSTS_SUSPM 0x4000
+/* R-Car D3 only: USB General control register (UGCTRL) */
+#define UGCTRL_PLLRESET 0x00000001
+#define UGCTRL_CONNECT 0x00000004
+
/*
* USB General control register 2 (UGCTRL2)
* Remarks: bit[31:11] and bit[9:6] should be 0
*/
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
+#define UGCTRL2_USB0SEL_HSUSB 0x00000020
#define UGCTRL2_USB0SEL_OTG 0x00000030
#define UGCTRL2_VBUSSEL 0x00000400
+/* R-Car D3 only: USB General status register (UGSTS) */
+#define UGSTS_LOCK 0x00000100
+
static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
{
iowrite32(data, priv->base + reg);
}
+static u32 usbhs_read32(struct usbhs_priv *priv, u32 reg)
+{
+ return ioread32(priv->base + reg);
+}
+
static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
@@ -52,6 +63,34 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
return 0;
}
+/* R-Car D3 needs to release UGCTRL.PLLRESET */
+static int usbhs_rcar3_power_and_pll_ctrl(struct platform_device *pdev,
+ void __iomem *base, int enable)
+{
+ struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
+ u32 val;
+ int timeout = 1000;
+
+ if (enable) {
+ usbhs_write32(priv, UGCTRL, 0); /* release PLLRESET */
+ usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 |
+ UGCTRL2_USB0SEL_HSUSB);
+
+ usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
+ do {
+ val = usbhs_read32(priv, UGSTS);
+ udelay(1);
+ } while (!(val & UGSTS_LOCK) && timeout--);
+ usbhs_write32(priv, UGCTRL, UGCTRL_CONNECT);
+ } else {
+ usbhs_write32(priv, UGCTRL, 0);
+ usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
+ usbhs_write32(priv, UGCTRL, UGCTRL_PLLRESET);
+ }
+
+ return 0;
+}
+
static int usbhs_rcar3_get_id(struct platform_device *pdev)
{
return USBHS_GADGET;
@@ -61,3 +100,8 @@ const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
.power_ctrl = usbhs_rcar3_power_ctrl,
.get_id = usbhs_rcar3_get_id,
};
+
+const struct renesas_usbhs_platform_callback usbhs_rcar3_with_pll_ops = {
+ .power_ctrl = usbhs_rcar3_power_and_pll_ctrl,
+ .get_id = usbhs_rcar3_get_id,
+};
diff --git a/drivers/usb/renesas_usbhs/rcar3.h b/drivers/usb/renesas_usbhs/rcar3.h
index 5f850b23ff18..49e535a31771 100644
--- a/drivers/usb/renesas_usbhs/rcar3.h
+++ b/drivers/usb/renesas_usbhs/rcar3.h
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
#include "common.h"
extern const struct renesas_usbhs_platform_callback usbhs_rcar3_ops;
+extern const struct renesas_usbhs_platform_callback usbhs_rcar3_with_pll_ops;
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 5a21a82390e1..2d491e434f11 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the USB serial device drivers.
#
diff --git a/drivers/usb/serial/Makefile-keyspan_pda_fw b/drivers/usb/serial/Makefile-keyspan_pda_fw
index c20baf728011..503b472d85f2 100644
--- a/drivers/usb/serial/Makefile-keyspan_pda_fw
+++ b/drivers/usb/serial/Makefile-keyspan_pda_fw
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# some rules to handle the quirks of the 'as31' assembler, like
# insisting upon fixed suffixes for the input and output files,
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index 569c2200ba42..84d52953dd0a 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AIRcable USB Bluetooth Dongle Driver.
*
* Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2006 Manuel Francisco Naranjo (naranjo.manuel@gmail.com)
*
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
- *
* The device works as an standard CDC device, it has 2 interfaces, the first
* one is for firmware access and the second is the serial one.
* The protocol is very simply, there are two possibilities reading or writing.
@@ -161,4 +158,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 0adbd38b4eea..3c544782f60b 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2009 by Bart Hartgers (bart.hartgers+ark3116@gmail.com)
* Original version:
@@ -15,11 +16,6 @@
* into the old ark3116.c driver and suddenly realized the ark3116 is
* a 16450 with a USB interface glued to it. See comments at the
* bottom of this file.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index 15bc71853db5..c1235d5b9fba 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Belkin USB Serial Adapter Driver
*
@@ -9,11 +10,6 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/belkin_sa.h b/drivers/usb/serial/belkin_sa.h
index c74b58ab56f9..51bc06287603 100644
--- a/drivers/usb/serial/belkin_sa.h
+++ b/drivers/usb/serial/belkin_sa.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Definitions for Belkin USB Serial Adapter Driver
*
@@ -8,11 +9,6 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 8936a83c96cd..9e265eb92611 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter Bus specific functions
*
* Copyright (C) 2002 Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 351745aec0e1..bdd7a5ad3bf1 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2007, Frank A Kingswood <frank@kingswood-consulting.co.uk>
* Copyright 2007, Werner Cornelius <werner@cornelius-consult.de>
@@ -9,10 +10,6 @@
* serial port, an IEEE-1284 parallel printer port or a memory-like
* interface. In all cases the CH341 supports an I2C interface as well.
* This driver only supports the asynchronous serial interface.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -644,4 +641,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 43a862a90a77..17940589c647 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Console driver
*
* Copyright (C) 2001 - 2002 Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* Thanks to Randy Dunlap for the original version of this code.
*
*/
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 412f812522ee..7c6273bf5beb 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Silicon Laboratories CP210x USB to RS232 serial adaptor driver
*
* Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* Support to set flow control line levels using TIOCMGET and TIOCMSET
* thanks to Karl Hiramoto karl@hiramoto.org. RTSCTS hardware flow
* control thanks to Munir Nassar nassarmu@real-time.com
@@ -1529,4 +1526,4 @@ static void cp210x_release(struct usb_serial *serial)
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 47fbd9f0c0c7..dc67a2eb98d7 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* REINER SCT cyberJack pinpad/e-com USB Chipcard Reader Driver
*
@@ -10,11 +11,6 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Thanks to Greg Kroah-Hartman (greg@kroah.com) for his help and
* patience.
*
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 90110de715e0..e0035c023120 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Cypress M8 driver
*
@@ -6,11 +7,6 @@
* Copyright (C) 2003,2004
* Neil Whelchel (koyama@firstlight.net)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index 119d2e17077b..35e223751c0e 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CYPRESS_M8_H
#define CYPRESS_M8_H
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 2ce39af32cfa..b0526786fb02 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Digi AccelePort USB-4 and USB-2 Serial Converters
*
* Copyright 2000 by Digi International
*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
* Shamelessly based on Brian Warner's keyspan_pda.c and Greg Kroah-Hartman's
* usb-serial driver.
*
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 90e603d5f660..d680bec62547 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Empeg empeg-car player driver
*
@@ -7,10 +8,6 @@
* Copyright (C) 1999 - 2001
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, as published by
- * the Free Software Foundation, version 2.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*/
@@ -126,4 +123,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/ezusb_convert.pl b/drivers/usb/serial/ezusb_convert.pl
index 13f11469116e..40d23f21ec74 100644
--- a/drivers/usb/serial/ezusb_convert.pl
+++ b/drivers/usb/serial/ezusb_convert.pl
@@ -1,4 +1,5 @@
#! /usr/bin/perl -w
+# SPDX-License-Identifier: GPL-2.0
# convert an Intel HEX file into a set of C records usable by the firmware
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 972f5a5fe577..96036f87b1de 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Fintek F81232 USB to serial adaptor driver
*
* Copyright (C) 2012 Greg Kroah-Hartman (gregkh@linuxfoundation.org)
* Copyright (C) 2012 Linux Foundation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 3d616a2a9f96..e4573b4c8935 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* F81532/F81534 USB to Serial Ports Bridge
*
* F81532 => 2 Serial Ports
* F81534 => 4 Serial Ports
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Copyright (C) 2016 Feature Integration Technology Inc., (Fintek)
* Copyright (C) 2016 Tom Tsai (Tom_Tsai@fintek.com.tw)
* Copyright (C) 2016 Peter Hong (Peter_Hong@fintek.com.tw)
@@ -39,9 +35,11 @@
#define F81534_UART_OFFSET 0x10
#define F81534_DIVISOR_LSB_REG (0x00 + F81534_UART_BASE_ADDRESS)
#define F81534_DIVISOR_MSB_REG (0x01 + F81534_UART_BASE_ADDRESS)
+#define F81534_INTERRUPT_ENABLE_REG (0x01 + F81534_UART_BASE_ADDRESS)
#define F81534_FIFO_CONTROL_REG (0x02 + F81534_UART_BASE_ADDRESS)
#define F81534_LINE_CONTROL_REG (0x03 + F81534_UART_BASE_ADDRESS)
#define F81534_MODEM_CONTROL_REG (0x04 + F81534_UART_BASE_ADDRESS)
+#define F81534_LINE_STATUS_REG (0x05 + F81534_UART_BASE_ADDRESS)
#define F81534_MODEM_STATUS_REG (0x06 + F81534_UART_BASE_ADDRESS)
#define F81534_CONFIG1_REG (0x09 + F81534_UART_BASE_ADDRESS)
@@ -126,9 +124,13 @@ struct f81534_serial_private {
struct f81534_port_private {
struct mutex mcr_mutex;
+ struct mutex lcr_mutex;
+ struct work_struct lsr_work;
+ struct usb_serial_port *port;
unsigned long tx_empty;
spinlock_t msr_lock;
u8 shadow_mcr;
+ u8 shadow_lcr;
u8 shadow_msr;
u8 phy_num;
};
@@ -461,6 +463,7 @@ static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
u8 lcr)
{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
u32 divisor;
int status;
u8 value;
@@ -489,35 +492,65 @@ static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
}
divisor = f81534_calc_baud_divisor(baudrate, F81534_MAX_BAUDRATE);
+
+ mutex_lock(&port_priv->lcr_mutex);
+
value = UART_LCR_DLAB;
status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
value);
if (status) {
dev_err(&port->dev, "%s: set LCR failed\n", __func__);
- return status;
+ goto out_unlock;
}
value = divisor & 0xff;
status = f81534_set_port_register(port, F81534_DIVISOR_LSB_REG, value);
if (status) {
dev_err(&port->dev, "%s: set DLAB LSB failed\n", __func__);
- return status;
+ goto out_unlock;
}
value = (divisor >> 8) & 0xff;
status = f81534_set_port_register(port, F81534_DIVISOR_MSB_REG, value);
if (status) {
dev_err(&port->dev, "%s: set DLAB MSB failed\n", __func__);
- return status;
+ goto out_unlock;
}
- status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, lcr);
+ value = lcr | (port_priv->shadow_lcr & UART_LCR_SBC);
+ status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
+ value);
if (status) {
dev_err(&port->dev, "%s: set LCR failed\n", __func__);
- return status;
+ goto out_unlock;
}
- return 0;
+ port_priv->shadow_lcr = value;
+out_unlock:
+ mutex_unlock(&port_priv->lcr_mutex);
+
+ return status;
+}
+
+static void f81534_break_ctl(struct tty_struct *tty, int break_state)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+ int status;
+
+ mutex_lock(&port_priv->lcr_mutex);
+
+ if (break_state)
+ port_priv->shadow_lcr |= UART_LCR_SBC;
+ else
+ port_priv->shadow_lcr &= ~UART_LCR_SBC;
+
+ status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
+ port_priv->shadow_lcr);
+ if (status)
+ dev_err(&port->dev, "set break failed: %d\n", status);
+
+ mutex_unlock(&port_priv->lcr_mutex);
}
static int f81534_update_mctrl(struct usb_serial_port *port, unsigned int set,
@@ -1015,6 +1048,8 @@ static void f81534_process_per_serial_block(struct usb_serial_port *port,
tty_insert_flip_char(&port->port, 0,
TTY_OVERRUN);
}
+
+ schedule_work(&port_priv->lsr_work);
}
if (port->port.console && port->sysrq) {
@@ -1162,6 +1197,21 @@ static int f81534_attach(struct usb_serial *serial)
return 0;
}
+static void f81534_lsr_worker(struct work_struct *work)
+{
+ struct f81534_port_private *port_priv;
+ struct usb_serial_port *port;
+ int status;
+ u8 tmp;
+
+ port_priv = container_of(work, struct f81534_port_private, lsr_work);
+ port = port_priv->port;
+
+ status = f81534_get_port_register(port, F81534_LINE_STATUS_REG, &tmp);
+ if (status)
+ dev_warn(&port->dev, "read LSR failed: %d\n", status);
+}
+
static int f81534_port_probe(struct usb_serial_port *port)
{
struct f81534_port_private *port_priv;
@@ -1173,6 +1223,8 @@ static int f81534_port_probe(struct usb_serial_port *port)
spin_lock_init(&port_priv->msr_lock);
mutex_init(&port_priv->mcr_mutex);
+ mutex_init(&port_priv->lcr_mutex);
+ INIT_WORK(&port_priv->lsr_work, f81534_lsr_worker);
/* Assign logic-to-phy mapping */
ret = f81534_logic_to_phy_port(port->serial, port);
@@ -1180,10 +1232,30 @@ static int f81534_port_probe(struct usb_serial_port *port)
return ret;
port_priv->phy_num = ret;
+ port_priv->port = port;
usb_set_serial_port_data(port, port_priv);
dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
port->port_number, port_priv->phy_num);
+ /*
+ * The F81532/534 will hang-up when enable LSR interrupt in IER and
+ * occur data overrun. So we'll disable the LSR interrupt in probe()
+ * and submit the LSR worker to clear LSR state when reported LSR error
+ * bit with bulk-in data in f81534_process_per_serial_block().
+ */
+ ret = f81534_set_port_register(port, F81534_INTERRUPT_ENABLE_REG,
+ UART_IER_RDI | UART_IER_THRI | UART_IER_MSI);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int f81534_port_remove(struct usb_serial_port *port)
+{
+ struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+ flush_work(&port_priv->lsr_work);
return 0;
}
@@ -1317,6 +1389,8 @@ static struct usb_serial_driver f81534_device = {
.calc_num_ports = f81534_calc_num_ports,
.attach = f81534_attach,
.port_probe = f81534_port_probe,
+ .port_remove = f81534_port_remove,
+ .break_ctl = f81534_break_ctl,
.dtr_rts = f81534_dtr_rts,
.process_read_urb = f81534_process_read_urb,
.ioctl = f81534_ioctl,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 49d1b2d4606d..1aba9105b369 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB FTDI SIO driver
*
@@ -9,11 +10,6 @@
* Copyright (C) 2002
* Kuba Ober (kuba@mareimbrium.org)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index bbcc13df11ac..dcd0b6e05baf 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Driver definitions for the FTDI USB Single Port Serial Converter -
* known as FTDI_SIO (Serial Input/Output application of the chipset)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f9d15bd62785..4faa09fe308c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* vendor/product IDs (VID/PID) of devices using FTDI USB serial converters.
* Please keep numerically sorted within individual areas, thanks!
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index b2f2e87aed94..633550ec3025 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Garmin GPS driver
*
@@ -7,20 +8,6 @@
* http://sourceforge.net/projects/garmin-gps/
*
* This driver has been derived from v2.1 of the visor driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111 USA
*/
#include <linux/kernel.h>
@@ -138,6 +125,7 @@ struct garmin_data {
__u8 privpkt[4*6];
spinlock_t lock;
struct list_head pktlist;
+ struct usb_anchor write_urbs;
};
@@ -875,42 +863,38 @@ static int garmin_clear(struct garmin_data *garmin_data_p)
static int garmin_init_session(struct usb_serial_port *port)
{
- struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
- int status = 0;
- int i = 0;
+ int status;
+ int i;
- if (status == 0) {
- usb_kill_urb(port->interrupt_in_urb);
+ usb_kill_urb(port->interrupt_in_urb);
- dev_dbg(&serial->dev->dev, "%s - adding interrupt input\n", __func__);
- status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
- if (status)
- dev_err(&serial->dev->dev,
- "%s - failed submitting interrupt urb, error %d\n",
- __func__, status);
+ status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+ if (status) {
+ dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
+ status);
+ return status;
}
/*
* using the initialization method from gpsbabel. See comments in
* gpsbabel/jeeps/gpslibusb.c gusb_reset_toggles()
*/
- if (status == 0) {
- dev_dbg(&serial->dev->dev, "%s - starting session ...\n", __func__);
- garmin_data_p->state = STATE_ACTIVE;
+ dev_dbg(&port->dev, "%s - starting session ...\n", __func__);
+ garmin_data_p->state = STATE_ACTIVE;
- for (i = 0; i < 3; i++) {
- status = garmin_write_bulk(port,
- GARMIN_START_SESSION_REQ,
- sizeof(GARMIN_START_SESSION_REQ), 0);
+ for (i = 0; i < 3; i++) {
+ status = garmin_write_bulk(port, GARMIN_START_SESSION_REQ,
+ sizeof(GARMIN_START_SESSION_REQ), 0);
+ if (status < 0)
+ goto err_kill_urbs;
+ }
- if (status < 0)
- break;
- }
+ return 0;
- if (status > 0)
- status = 0;
- }
+err_kill_urbs:
+ usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
+ usb_kill_urb(port->interrupt_in_urb);
return status;
}
@@ -930,7 +914,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
/* shutdown any bulk reads that might be going on */
- usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
if (garmin_data_p->state == STATE_RESET)
@@ -953,7 +936,7 @@ static void garmin_close(struct usb_serial_port *port)
/* shutdown our urbs */
usb_kill_urb(port->read_urb);
- usb_kill_urb(port->write_urb);
+ usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
/* keep reset state so we know that we must start a new session */
if (garmin_data_p->state != STATE_RESET)
@@ -1037,12 +1020,14 @@ static int garmin_write_bulk(struct usb_serial_port *port,
}
/* send it down the pipe */
+ usb_anchor_urb(urb, &garmin_data_p->write_urbs);
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb(write bulk) failed with status = %d\n",
__func__, status);
count = status;
+ usb_unanchor_urb(urb);
kfree(buffer);
}
@@ -1370,9 +1355,9 @@ static void garmin_unthrottle(struct tty_struct *tty)
* the tty in cases where the protocol provides no own handshaking
* to initiate the transfer.
*/
-static void timeout_handler(unsigned long data)
+static void timeout_handler(struct timer_list *t)
{
- struct garmin_data *garmin_data_p = (struct garmin_data *) data;
+ struct garmin_data *garmin_data_p = from_timer(garmin_data_p, t, timer);
/* send the next queued packet to the tty port */
if (garmin_data_p->mode == MODE_NATIVE)
@@ -1391,19 +1376,23 @@ static int garmin_port_probe(struct usb_serial_port *port)
if (!garmin_data_p)
return -ENOMEM;
- init_timer(&garmin_data_p->timer);
+ timer_setup(&garmin_data_p->timer, timeout_handler, 0);
spin_lock_init(&garmin_data_p->lock);
INIT_LIST_HEAD(&garmin_data_p->pktlist);
- /* garmin_data_p->timer.expires = jiffies + session_timeout; */
- garmin_data_p->timer.data = (unsigned long)garmin_data_p;
- garmin_data_p->timer.function = timeout_handler;
garmin_data_p->port = port;
garmin_data_p->state = 0;
garmin_data_p->flags = 0;
garmin_data_p->count = 0;
+ init_usb_anchor(&garmin_data_p->write_urbs);
usb_set_serial_port_data(port, garmin_data_p);
status = garmin_init_session(port);
+ if (status)
+ goto err_free;
+
+ return 0;
+err_free:
+ kfree(garmin_data_p);
return status;
}
@@ -1413,6 +1402,7 @@ static int garmin_port_remove(struct usb_serial_port *port)
{
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+ usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
usb_kill_urb(port->interrupt_in_urb);
del_timer_sync(&garmin_data_p->timer);
kfree(garmin_data_p);
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 35cb8c0e584f..2274d9625f63 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter Generic functions
*
* Copyright (C) 2010 - 2013 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/io_16654.h b/drivers/usb/serial/io_16654.h
index a53abc9530ff..4980f72dc56f 100644
--- a/drivers/usb/serial/io_16654.h
+++ b/drivers/usb/serial/io_16654.h
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
*
* 16654.H Definitions for 16C654 UART used on EdgePorts
*
* Copyright (C) 1998 Inside Out Networks, Inc.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*
************************************************************************/
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index bdf8bd814a9a..219265ce3711 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Edgeport USB Serial Converter driver
*
* Copyright (C) 2000 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Supports the following devices:
* Edgeport/4
* Edgeport/4t
diff --git a/drivers/usb/serial/io_edgeport.h b/drivers/usb/serial/io_edgeport.h
index ad9c1d47a619..2e7fedbaf2ff 100644
--- a/drivers/usb/serial/io_edgeport.h
+++ b/drivers/usb/serial/io_edgeport.h
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
*
* io_edgeport.h Edgeport Linux Interface definitions
*
* Copyright (C) 2000 Inside Out Networks, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
************************************************************************/
#if !defined(_IO_EDGEPORT_H_)
diff --git a/drivers/usb/serial/io_ionsp.h b/drivers/usb/serial/io_ionsp.h
index 5cc591bae54d..4b8e4823bd45 100644
--- a/drivers/usb/serial/io_ionsp.h
+++ b/drivers/usb/serial/io_ionsp.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
*
* IONSP.H Definitions for I/O Networks Serial Protocol
*
* Copyright (C) 1997-1998 Inside Out Networks, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* These definitions are used by both kernel-mode driver and the
* peripheral firmware and MUST be kept in sync.
*
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 6cefb9cb133d..0fbadb37c104 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Edgeport USB Serial Converter driver
*
* Copyright (C) 2000-2002 Inside Out Networks, All rights reserved.
* Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Supports the following devices:
* EP/1 EP/2 EP/4 EP/21 EP/22 EP/221 EP/42 EP/421 WATCHPORT
*
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index 1bd67b24f916..e53c68261017 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*****************************************************************************
*
* Copyright (C) 1997-2002 Inside Out Networks, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
* Feb-16-2001 DMI Added I2C structure definitions
* May-29-2002 gkh Ported to Linux
*
diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h
index 6f6a856bc37c..c38e87ac5ea9 100644
--- a/drivers/usb/serial/io_usbvend.h
+++ b/drivers/usb/serial/io_usbvend.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/************************************************************************
*
* USBVEND.H Vendor-specific USB definitions
@@ -8,10 +9,6 @@
************************************************************************
*
* Copyright (C) 1998 Inside Out Networks, Inc.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*
************************************************************************/
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index cde0dcdce9c4..f81746c3c26c 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Compaq iPAQ driver
*
* Copyright (C) 2001 - 2002
* Ganesh Varadarajan <ganesh@veritas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 8b1cf18a668b..d04c7cc5c1c2 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* IPWireless 3G UMTS TDD Modem driver (USB connected)
*
* Copyright (C) 2004 Roelf Diedericks <roelfd@inet.co.za>
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* All information about the device was acquired using SnoopyPro
* on MSFT's O/S, and examing the MSFT drivers' debug output
* (insanely left _on_ in the enduser version)
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index f9734a96d516..24b06c7e5e2d 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB IR Dongle driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 2002 Gary Brubaker (xavyer@ix.netcom.com)
* Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* This driver allows a USB IrDA device to be used as a "dumb" serial device.
* This can be useful if you do not have access to a full IrDA stack on the
* other side of the connection. If you do have an IrDA stack on both devices,
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 18fc992a245f..397a8012ffa3 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Infinity Unlimited USB Phoenix driver
*
@@ -7,13 +8,7 @@
*
* Original code taken from iuutool (Copyright (C) 2006 Juan Carlos Borrás)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* And tested with help of WB Electronics
- *
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/usb/serial/iuu_phoenix.h b/drivers/usb/serial/iuu_phoenix.h
index b82630a3b8fd..b400b262f72e 100644
--- a/drivers/usb/serial/iuu_phoenix.h
+++ b/drivers/usb/serial/iuu_phoenix.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Infinity Unlimited USB Phoenix driver
*
@@ -6,13 +7,7 @@
*
* Original code taken from iuutool ( Copyright (C) 2006 Juan Carlos Borrás )
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* And tested with help of WB Electronics
- *
*/
#define IUU_USB_VENDOR_ID 0x104f
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 5662d324edd2..d34779fe4a8d 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
Keyspan USB to Serial Converter driver
(C) Copyright (C) 2000-2001 Hugh Blemings <hugh@blemings.org>
(C) Copyright (C) 2002 Greg Kroah-Hartman <greg@kroah.com>
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
See http://blemings.org/hugh/keyspan.html for more information.
Code in this driver inspired by and in a number of places taken
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 196908dd25a1..5169624d8b11 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Keyspan PDA / Xircom / Entrega Converter driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 1999, 2000 Brian Warner <warner@lothar.com>
* Copyright (C) 2000 Al Borchers <borchers@steinerpoint.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*/
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 595415e59d5d..5046ffd53cde 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* KLSI KL5KUSB105 chip RS232 converter driver
*
* Copyright (C) 2010 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2001 Utz-Uwe Haus <haus@uuhaus.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* All information about the device was acquired using SniffUSB ans snoopUSB
* on Windows98.
* It was written out of frustration with the PalmConnect USB Serial adapter
diff --git a/drivers/usb/serial/kl5kusb105.h b/drivers/usb/serial/kl5kusb105.h
index 22a90badc86b..41c9bf60fbf0 100644
--- a/drivers/usb/serial/kl5kusb105.h
+++ b/drivers/usb/serial/kl5kusb105.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for the KLSI KL5KUSB105 serial port adapter
*/
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 3024b9b25360..a31ea7e194dd 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* KOBIL USB Smart Card Terminal Driver
*
@@ -10,11 +11,6 @@
* and associated source files. Please see the usb/serial files for
* individual credits and copyrights.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Thanks to Greg Kroah-Hartman (greg@kroah.com) for his help and
* patience.
*
@@ -491,6 +487,7 @@ static void kobil_set_termios(struct tty_struct *tty,
break;
default:
speed = 9600;
+ /* fall through */
case 9600:
urb_val = SUSBCR_SBR_9600;
break;
diff --git a/drivers/usb/serial/kobil_sct.h b/drivers/usb/serial/kobil_sct.h
index be207f7156fe..030c1b426611 100644
--- a/drivers/usb/serial/kobil_sct.h
+++ b/drivers/usb/serial/kobil_sct.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define SUSBCRequest_SetBaudRateParityAndStopBits 1
#define SUSBCR_SBR_MASK 0xFF00
#define SUSBCR_SBR_1200 0x0100
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 70f346f1aa86..7887c312d9a9 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* MCT (Magic Control Technology Corp.) USB RS232 Converter Driver
*
* Copyright (C) 2000 Wolfgang Grandegger (wolfgang@ces.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* This program is largely derived from the Belkin USB Serial Adapter Driver
* (see belkin_sa.[ch]). All of the information about the device was acquired
* by using SniffUSB on Windows98. For technical details see mct_u232.h.
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index d325bb8cb583..0084edf518e8 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Definitions for MCT (Magic Control Technology) USB-RS232 Converter Driver
*
* Copyright (C) 2000 Wolfgang Grandegger (wolfgang@ces.ch)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* This driver is for the device MCT USB-RS232 Converter (25 pin, Model No.
* U232-P25) from Magic Control Technology Corp. (there is also a 9 pin
* Model No. U232-P9). See http://www.mct.com.tw/products/product_us232.html
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 14511d6a7d44..e63cea02cfd8 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
Some of this code is credited to Linux USB open source files that are
distributed with Linux.
@@ -54,21 +55,33 @@ MODULE_DEVICE_TABLE(usb, id_table);
#define UNI_CMD_OPEN 0x80
#define UNI_CMD_CLOSE 0xFF
-static inline int metrousb_is_unidirectional_mode(struct usb_serial_port *port)
+static int metrousb_is_unidirectional_mode(struct usb_serial *serial)
{
- __u16 product_id = le16_to_cpu(
- port->serial->dev->descriptor.idProduct);
+ u16 product_id = le16_to_cpu(serial->dev->descriptor.idProduct);
return product_id == FOCUS_PRODUCT_ID_UNI;
}
+static int metrousb_calc_num_ports(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds)
+{
+ if (metrousb_is_unidirectional_mode(serial)) {
+ if (epds->num_interrupt_out == 0) {
+ dev_err(&serial->interface->dev, "interrupt-out endpoint missing\n");
+ return -ENODEV;
+ }
+ }
+
+ return 1;
+}
+
static int metrousb_send_unidirectional_cmd(u8 cmd, struct usb_serial_port *port)
{
int ret;
int actual_len;
u8 *buffer_cmd = NULL;
- if (!metrousb_is_unidirectional_mode(port))
+ if (!metrousb_is_unidirectional_mode(port->serial))
return 0;
buffer_cmd = kzalloc(sizeof(cmd), GFP_KERNEL);
@@ -161,13 +174,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
unsigned long flags = 0;
int result = 0;
- /* Make sure the urb is initialized. */
- if (!port->interrupt_in_urb) {
- dev_err(&port->dev, "%s - interrupt urb not initialized\n",
- __func__);
- return -ENODEV;
- }
-
/* Set the private data information for the port. */
spin_lock_irqsave(&metro_priv->lock, flags);
metro_priv->control_state = 0;
@@ -189,7 +195,7 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
dev_err(&port->dev,
"%s - failed submitting interrupt in urb, error code=%d\n",
__func__, result);
- goto exit;
+ return result;
}
/* Send activate cmd to device */
@@ -198,9 +204,14 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
dev_err(&port->dev,
"%s - failed to configure device, error code=%d\n",
__func__, result);
- goto exit;
+ goto err_kill_urb;
}
-exit:
+
+ return 0;
+
+err_kill_urb:
+ usb_kill_urb(port->interrupt_in_urb);
+
return result;
}
@@ -337,7 +348,8 @@ static struct usb_serial_driver metrousb_device = {
},
.description = "Metrologic USB to Serial",
.id_table = id_table,
- .num_ports = 1,
+ .num_interrupt_in = 1,
+ .calc_num_ports = metrousb_calc_num_ports,
.open = metrousb_open,
.close = metrousb_cleanup,
.read_int_callback = metrousb_read_int_callback,
@@ -356,7 +368,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Philip Nicastro");
MODULE_AUTHOR("Aleksey Babahin <tamerlan311@gmail.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index a453965f9e9a..f4df3d5bf69c 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mos7720.c
* Controls the Moschip 7720 usb to dual port serial converter
*
* Copyright 2006 Moschip Semiconductor Tech. Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
* Developed by:
* Vijaya Kumar <vijaykumar.gn@gmail.com>
* Ajay Kumar <naanuajay@yahoo.com>
@@ -2043,4 +2040,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index e8669aae14b3..fdceb46d9fc6 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1,18 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Clean ups from Moschip version and a few ioctl implementations by:
* Paul B Schroeder <pschroeder "at" uplogix "dot" com>
*
@@ -568,9 +555,9 @@ static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
val, reg, NULL, 0, MOS_WDR_TIMEOUT);
}
-static void mos7840_led_off(unsigned long arg)
+static void mos7840_led_off(struct timer_list *t)
{
- struct moschip_port *mcs = (struct moschip_port *) arg;
+ struct moschip_port *mcs = from_timer(mcs, t, led_timer1);
/* Turn off LED */
mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER);
@@ -578,9 +565,9 @@ static void mos7840_led_off(unsigned long arg)
jiffies + msecs_to_jiffies(LED_OFF_MS));
}
-static void mos7840_led_flag_off(unsigned long arg)
+static void mos7840_led_flag_off(struct timer_list *t)
{
- struct moschip_port *mcs = (struct moschip_port *) arg;
+ struct moschip_port *mcs = from_timer(mcs, t, led_timer2);
clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
}
@@ -2302,12 +2289,11 @@ static int mos7840_port_probe(struct usb_serial_port *port)
goto error;
}
- setup_timer(&mos7840_port->led_timer1, mos7840_led_off,
- (unsigned long)mos7840_port);
+ timer_setup(&mos7840_port->led_timer1, mos7840_led_off, 0);
mos7840_port->led_timer1.expires =
jiffies + msecs_to_jiffies(LED_ON_MS);
- setup_timer(&mos7840_port->led_timer2, mos7840_led_flag_off,
- (unsigned long)mos7840_port);
+ timer_setup(&mos7840_port->led_timer2, mos7840_led_flag_off,
+ 0);
mos7840_port->led_timer2.expires =
jiffies + msecs_to_jiffies(LED_OFF_MS);
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index 3aef091fe88b..2513ee902779 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* mxuport.c - MOXA UPort series driver
*
* Copyright (c) 2006 Moxa Technologies Co., Ltd.
* Copyright (c) 2013 Andrew Lunn <andrew@lunn.ch>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Supports the following Moxa USB to serial converters:
* 2 ports : UPort 1250, UPort 1250I
* 4 ports : UPort 1410, UPort 1450, UPort 1450I
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 2a97cdc078d5..20277c52dded 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Navman Serial USB driver
*
* Copyright (C) 2006 Greg Kroah-Hartman <gregkh@suse.de>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
* TODO:
* Add termios method that uses copy_hw but also kills all echo
* flags as the navman is rx only so cannot echo.
@@ -115,4 +112,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index efcd7feed6f4..e51c9464ea42 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB ZyXEL omni.net LCD PLUS driver
*
* Copyright (C) 2013,2017 Johan Hovold <johan@kernel.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
@@ -179,4 +176,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 58657d64678b..caa0746326fd 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Opticon USB barcode to serial driver
*
@@ -5,10 +6,6 @@
* Copyright (C) 2011 Martin Jansen <martin.jansen@opticon.com>
* Copyright (C) 2008 - 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2008 - 2009 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -420,4 +417,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ba672cf4e888..aaa7d901a06d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
USB Driver for GSM modems
Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de>
- This driver is free software; you can redistribute it and/or modify
- it under the terms of Version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org>
History: see the git log.
@@ -2206,4 +2203,4 @@ static void option_instat_callback(struct urb *urb)
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index b11eead469ee..ae9cb15ee02d 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Ours Technology Inc. OTi-6858 USB to serial adapter driver.
*
@@ -21,10 +22,6 @@
* So, THIS CODE CAN DESTROY OTi-6858 AND ANY OTHER DEVICES, THAT ARE
* CONNECTED TO IT!
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
@@ -847,4 +844,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(OTI6858_DESCRIPTION);
MODULE_AUTHOR(OTI6858_AUTHOR);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/oti6858.h b/drivers/usb/serial/oti6858.h
index 704ac3a532b3..1226bf2347eb 100644
--- a/drivers/usb/serial/oti6858.h
+++ b/drivers/usb/serial/oti6858.h
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Ours Technology Inc. OTi-6858 USB to serial adapter driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_USB_SERIAL_OTI6858_H
#define __LINUX_USB_SERIAL_OTI6858_H
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index a585b477415d..57ae832a49ff 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Prolific PL2303 USB to serial adaptor driver
*
@@ -6,10 +7,6 @@
*
* Original driver for 2.2.x by anonymous
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*/
@@ -1025,4 +1022,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION("Prolific PL2303 USB to serial adaptor driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 3b5a15d1dc0d..f98fd84890de 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -1,11 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Prolific PL2303 USB to serial adaptor driver header file
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#define BENQ_VENDOR_ID 0x04a5
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 6e9f8af96959..929ffba663f2 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm USB Auxiliary Serial Port driver
*
* Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2010 Dan Williams <dcbw@redhat.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Devices listed here usually provide a CDC ACM port on which normal modem
* AT commands and PPP can be used. But when that port is in-use by PPP it
* cannot be used simultaneously for status or signal strength. Instead, the
@@ -87,4 +84,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
};
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index eb9928963a53..e3892541a489 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm Serial USB driver
*
* Copyright (c) 2008 QUALCOMM Incorporated.
* Copyright (c) 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2009 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
*/
#include <linux/tty.h>
@@ -148,6 +144,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
{DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
+ {DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 60e17d1444c3..958e12e1e7c7 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* usb-serial driver for Quatech USB 2 devices
*
* Copyright (C) 2012 Bill Pemberton (wfp5p@virginia.edu)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- *
* These devices all have only 1 bulk in and 1 bulk out that is shared
* for all serial ports.
*
@@ -1027,4 +1023,4 @@ static struct usb_serial_driver *const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 27d7a7016298..6accbecb6318 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Safe Encapsulated USB Serial Driver
*
@@ -5,11 +6,6 @@
* Copyright (C) 2001 Lineo
* Copyright (C) 2001 Hewlett-Packard
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* By:
* Stuart Lynne <sl@lineo.com>, Tom Rushworth <tbr@lineo.com>
*/
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 4c4ac4705ac0..d189f953c891 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
USB Driver for Sierra Wireless
@@ -9,10 +10,6 @@
IMPORTANT DISCLAIMER: This driver is not commercially supported by
Sierra Wireless. Use at your own risk.
- This driver is free software; you can redistribute it and/or modify
- it under the terms of Version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de>
Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
*/
@@ -1078,7 +1075,7 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
module_param(nmea, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nmea, "NMEA streaming");
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 5167b6564c8b..b42714855364 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* spcp8x5 USB to serial adaptor driver
*
@@ -8,11 +9,6 @@
* Original driver for 2.6.10 pl2303 driver by
* Greg Kroah-Hartman (greg@kroah.com)
* Changes for 2.6.20 by Harald Klein <hari@vt100.at>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 5aa7bbbeba3d..2083c267787b 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* usb-serial driver for Quatech SSU-100
*
@@ -576,4 +577,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 0d1727232d0c..cd2f8dc8b58c 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Symbol USB barcode to serial driver
*
* Copyright (C) 2013 Johan Hovold <jhovold@gmail.com>
* Copyright (C) 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2009 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -194,4 +191,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 8fc3854e5e69..6b22857f6e52 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* TI 3410/5052 USB Serial Driver
*
@@ -7,11 +8,6 @@
* Copyright (C) 2000-2002 Inside Out Networks
* Copyright (C) 2001-2002 Greg Kroah-Hartman
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* For questions or problems with this driver, contact Texas Instruments
* technical support, or Al Borchers <alborchers@steinerpoint.com>, or
* Peter Berger <pberger@brimson.com>.
diff --git a/drivers/usb/serial/upd78f0730.c b/drivers/usb/serial/upd78f0730.c
index 6819a3486e5d..1ba1401d27d7 100644
--- a/drivers/usb/serial/upd78f0730.c
+++ b/drivers/usb/serial/upd78f0730.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Electronics uPD78F0730 USB to serial converter driver
*
* Copyright (C) 2014,2016 Maksim Salau <maksim.salau@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
* Protocol of the adaptor is described in the application note U19660EJ1V0AN00
* μPD78F0730 8-bit Single-Chip Microcontroller
* USB-to-Serial Conversion Software
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index e98b6e57b703..74172fe158df 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial "Simple" driver
*
@@ -8,10 +9,6 @@
* Copyright (C) 2010 Zilogic Systems <code@zilogic.com>
* Copyright (C) 2013 Wei Shuai <cpuwolf@gmail.com>
* Copyright (C) 2013 Linux Foundation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -134,4 +131,4 @@ static const struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index bb34f9f7eaf4..790e0cbe3da9 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Serial Converter driver
*
@@ -6,10 +7,6 @@
* Copyright (C) 2000 Peter Berger (pberger@brimson.com)
* Copyright (C) 2000 Al Borchers (borchers@steinerpoint.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* This driver was originally based on the ACM driver by Armin Fuerst (which was
* based on a driver by Brad Keryan)
*
@@ -1201,17 +1198,6 @@ static const struct tty_operations serial_ops = {
struct tty_driver *usb_serial_tty_driver;
-/* Driver structure we register with the USB core */
-static struct usb_driver usb_serial_driver = {
- .name = "usbserial",
- .probe = usb_serial_probe,
- .disconnect = usb_serial_disconnect,
- .suspend = usb_serial_suspend,
- .resume = usb_serial_resume,
- .no_dynamic_id = 1,
- .supports_autosuspend = 1,
-};
-
static int __init usb_serial_init(void)
{
int result;
@@ -1247,13 +1233,6 @@ static int __init usb_serial_init(void)
goto exit_reg_driver;
}
- /* register the USB driver */
- result = usb_register(&usb_serial_driver);
- if (result < 0) {
- pr_err("%s - usb_register failed\n", __func__);
- goto exit_tty;
- }
-
/* register the generic driver, if we should */
result = usb_serial_generic_register();
if (result < 0) {
@@ -1264,9 +1243,6 @@ static int __init usb_serial_init(void)
return result;
exit_generic:
- usb_deregister(&usb_serial_driver);
-
-exit_tty:
tty_unregister_driver(usb_serial_tty_driver);
exit_reg_driver:
@@ -1285,7 +1261,6 @@ static void __exit usb_serial_exit(void)
usb_serial_generic_deregister();
- usb_deregister(&usb_serial_driver);
tty_unregister_driver(usb_serial_tty_driver);
put_tty_driver(usb_serial_tty_driver);
bus_unregister(&usb_serial_bus_type);
@@ -1460,4 +1435,4 @@ EXPORT_SYMBOL_GPL(usb_serial_deregister_drivers);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index 44b25c08c68a..d28dab4b9eff 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for USB serial mobile broadband cards
*/
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 12f4c5a91e62..ab5a2ac4993a 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Debug cable driver
*
* Copyright (C) 2006 Greg Kroah-Hartman <greg@kroah.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/gfp.h>
@@ -34,13 +31,13 @@ static const struct usb_device_id id_table[] = {
};
static const struct usb_device_id dbc_id_table[] = {
- { USB_DEVICE(0x1d6b, 0x0004) },
+ { USB_DEVICE(0x1d6b, 0x0011) },
{ },
};
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(0x0525, 0x127a) },
- { USB_DEVICE(0x1d6b, 0x0004) },
+ { USB_DEVICE(0x1d6b, 0x0011) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
@@ -98,4 +95,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
};
module_usb_serial_driver(serial_drivers, id_table_combined);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 59bfcb3da116..107e64c42e94 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
USB Driver layer for GSM modems
Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de>
- This driver is free software; you can redistribute it and/or modify
- it under the terms of Version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org>
History: see the git log.
@@ -723,4 +720,4 @@ EXPORT_SYMBOL(usb_wwan_resume);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 9f3317a940ef..f5373ed2cd45 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB HandSpring Visor, Palm m50x, and Sony Clie driver
* (supports all of the Palm OS USB devices)
@@ -5,10 +6,6 @@
* Copyright (C) 1999 - 2004
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
@@ -579,4 +576,4 @@ module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/serial/visor.h b/drivers/usb/serial/visor.h
index 4c456dd69ce5..fe290243f1ce 100644
--- a/drivers/usb/serial/visor.h
+++ b/drivers/usb/serial/visor.h
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB HandSpring Visor driver
*
* Copyright (C) 1999 - 2003
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver.
*
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 55cebc1e6fec..1c7b46a8620c 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB ConnectTech WhiteHEAT driver
*
@@ -7,11 +8,6 @@
* Copyright (C) 1999 - 2001
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*/
diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h
index 38065df4d2d8..72c1b0cf4063 100644
--- a/drivers/usb/serial/whiteheat.h
+++ b/drivers/usb/serial/whiteheat.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB ConnectTech WhiteHEAT driver
*
@@ -7,11 +8,6 @@
* Copyright (C) 1999, 2000
* Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
*
diff --git a/drivers/usb/serial/wishbone-serial.c b/drivers/usb/serial/wishbone-serial.c
index 4fed4a0bd702..ff4092f9b33c 100644
--- a/drivers/usb/serial/wishbone-serial.c
+++ b/drivers/usb/serial/wishbone-serial.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* USB Wishbone-Serial adapter driver
*
* Copyright (C) 2013 Wesley W. Terpstra <w.terpstra@gsi.de>
* Copyright (C) 2013 GSI Helmholtz Centre for Heavy Ion Research GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/serial/xsens_mt.c b/drivers/usb/serial/xsens_mt.c
index 3837d5113bb2..cf262c9a9638 100644
--- a/drivers/usb/serial/xsens_mt.c
+++ b/drivers/usb/serial/xsens_mt.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xsens MT USB driver
*
* Copyright (C) 2013 Xsens <info@xsens.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
@@ -69,4 +66,4 @@ module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR("Frans Klaver <frans.klaver@xsens.com>");
MODULE_DESCRIPTION("USB-serial driver for Xsens motion trackers");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index 4cd55481b309..c5126a4cd954 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the USB Mass Storage device drivers.
#
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 878b4b8761f5..900591df8bb2 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Alauda-based card readers
*
@@ -15,20 +16,6 @@
* (very old) vendor-supplied GPL sma03 driver.
*
* For protocol info, see http://alauda.sourceforge.net
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index 5e4af44d7d9f..4825902377eb 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Support for emulating SAT (ata pass through) on devices based
* on the Cypress USB/ATA bridge supporting ATACB.
*
* Copyright (c) 2008 Matthieu Castet (castet.matthieu@free.fr)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index 723197af6ec5..09353be199be 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Datafab USB Compact Flash reader
*
@@ -18,20 +19,6 @@
*
* Other contributors:
* (c) 2002 Alan Stern <stern@rowland.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index 8d20804a59e6..e5a4969d15ae 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Debugging Functions Source Code File
@@ -27,20 +28,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/device.h>
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index 8ab73299b650..8833cd4f78b6 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Debugging Functions Header File
@@ -24,20 +25,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _DEBUG_H_
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 28100374f7bd..93cf57ac47d6 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1,19 +1,4 @@
-/*
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/jiffies.h>
#include <linux/errno.h>
#include <linux/module.h>
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index c0a5d954414b..ec4d92c92762 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Freecom USB/IDE adaptor
*
@@ -8,20 +9,6 @@
* Current development and maintenance by:
* (C) 2000 David Brown <usb-storage@davidb.org>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* This driver was developed with information provided in FREECOM's USB
* Programmers Reference Guide. For further information contact Freecom
* (http://www.freecom.de/)
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index d9d8c17e05d1..93a6bcf77806 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Special Initializers for certain USB Mass Storage devices
*
@@ -20,20 +21,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/errno.h>
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index 039abf4d1cb7..e4cf28efb4a7 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Header file for Special Initializers for certain USB Mass Storage devices
*
@@ -20,20 +21,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "usb.h"
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 6a7720e66595..f5e4500d9970 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC
*
@@ -14,20 +15,6 @@
* does implement an interface, the ATA Command Block (ATACB) which provides
* a means of passing ATA commands and ATA register accesses to a device.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* History:
*
* 2002-10-19: Removed the specialized transfer routines.
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 011e5270690a..917f170c4124 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Lexar "Jumpshot" Compact Flash reader
*
@@ -19,20 +20,6 @@
* Developed with the assistance of:
*
* (C) 2002 Alan Stern <stern@rowland.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index b05ba4929f00..edcf2be0e0eb 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Rio Karma
*
* (c) 2006 Bob Copeland <me@bobcopeland.com>
* (c) 2006 Keith Bennett <keith@mcs.st-and.ac.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index acc3d03d8c1e..39a5009a41a6 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Support for the Maxtor OneTouch USB hard drive's button
*
@@ -10,24 +11,6 @@
* Based on usbmouse.c (Vojtech Pavlik) and xpad.c (Marko Friedemann)
*
*/
-
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/slab.h>
diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c
index 57282f12317b..7c0b05a36554 100644
--- a/drivers/usb/storage/option_ms.c
+++ b/drivers/usb/storage/option_ms.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Option High Speed Mobile Devices.
*
* (c) 2008 Dan Williams <dcbw@redhat.com>
*
* Inspiration taken from sierra_ms.c by Kevin Lloyd <klloyd@sierrawireless.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/usb.h>
@@ -41,7 +28,7 @@ MODULE_PARM_DESC(option_zero_cd, "ZeroCD mode (1=Force Modem (default),"
static int option_rezero(struct us_data *us)
{
- const unsigned char rezero_msg[] = {
+ static const unsigned char rezero_msg[] = {
0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12,
0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -87,7 +74,7 @@ out:
static int option_inquiry(struct us_data *us)
{
- const unsigned char inquiry_msg[] = {
+ static const unsigned char inquiry_msg[] = {
0x55, 0x53, 0x42, 0x43, 0x12, 0x34, 0x56, 0x78,
0x24, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x12,
0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/usb/storage/option_ms.h b/drivers/usb/storage/option_ms.h
index b6e448cab039..6439992184fa 100644
--- a/drivers/usb/storage/option_ms.h
+++ b/drivers/usb/storage/option_ms.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _OPTION_MS_H_
#define _OPTION_MS_H_
extern int option_ms_init(struct us_data *us);
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 74c38870a17e..f3f2a93f52e1 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
*
@@ -27,20 +28,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/highmem.h>
diff --git a/drivers/usb/storage/protocol.h b/drivers/usb/storage/protocol.h
index a55666880b7b..9198396e8c6e 100644
--- a/drivers/usb/storage/protocol.h
+++ b/drivers/usb/storage/protocol.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Protocol Functions Header File
@@ -21,20 +22,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _PROTOCOL_H_
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index ec83b3b5efa9..31b024441938 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
@@ -763,9 +751,9 @@ static void rts51x_modi_suspend_timer(struct rts51x_chip *chip)
mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires);
}
-static void rts51x_suspend_timer_fn(unsigned long data)
+static void rts51x_suspend_timer_fn(struct timer_list *t)
{
- struct rts51x_chip *chip = (struct rts51x_chip *)data;
+ struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer);
struct us_data *us = chip->us;
switch (rts51x_get_stat(chip)) {
@@ -929,8 +917,7 @@ static int realtek_cr_autosuspend_setup(struct us_data *us)
us->proto_handler = rts51x_invoke_transport;
chip->timer_expires = 0;
- setup_timer(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn,
- (unsigned long)chip);
+ timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0);
fw5895_init(us);
/* enable autosuspend function of the usb device */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 8cd2926fb1fe..585efd120193 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* SCSI layer glue code
@@ -28,20 +29,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
diff --git a/drivers/usb/storage/scsiglue.h b/drivers/usb/storage/scsiglue.h
index d0a331dd9bc5..bf99c6201331 100644
--- a/drivers/usb/storage/scsiglue.h
+++ b/drivers/usb/storage/scsiglue.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* SCSI Connecting Glue Header File
@@ -21,20 +22,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _SCSIGLUE_H_
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index 44f8ffccd031..1cf7dbfe277c 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SanDisk SDDR-09 SmartMedia reader
*
@@ -11,20 +12,6 @@
* been programmed to obey a certain limited set of SCSI commands.
* This driver translates the "real" SCSI commands to the SDDR-09 SCSI
* commands.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 147c50b3e00f..8c814b2ec9b2 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SanDisk SDDR-55 SmartMedia reader
*
@@ -7,20 +8,6 @@
*
* Current development and maintenance by:
* (c) 2002 Simon Munton
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/jiffies.h>
@@ -604,6 +591,7 @@ static unsigned long sddr55_get_capacity(struct us_data *us) {
case 0x64:
info->pageshift = 8;
info->smallpageshift = 1;
+ /* fall through */
case 0x5d: // 5d is a ROM card with pagesize 512.
return 0x00200000;
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 3b0294e4df93..854498e1012c 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
*
@@ -26,20 +27,6 @@
*
* See the Kconfig help text for a list of devices known to be supported by
* this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/errno.h>
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index 9a51019ac7b2..daf62448483f 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/usb/storage/sierra_ms.h b/drivers/usb/storage/sierra_ms.h
index bb48634ac1fc..3e9da537d54a 100644
--- a/drivers/usb/storage/sierra_ms.h
+++ b/drivers/usb/storage/sierra_ms.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SIERRA_MS_H_
#define _SIERRA_MS_H_
extern int sierra_ms_init(struct us_data *us);
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index a3ccb899df60..d947957f3635 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
*
@@ -28,20 +29,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/sched.h>
diff --git a/drivers/usb/storage/transport.h b/drivers/usb/storage/transport.h
index dae3ecd2e6cf..f559dc575f4f 100644
--- a/drivers/usb/storage/transport.h
+++ b/drivers/usb/storage/transport.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Transport Functions Header File
@@ -21,20 +22,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _TRANSPORT_H_
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index a155cd02bce2..1fcd758a961f 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include "usb.h"
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 63cf981ed81c..5d04c40ee40a 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
@@ -5,8 +6,6 @@
* Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
- *
- * Distributed under the terms of the GNU GPL, version two.
*/
#include <linux/blkdev.h>
@@ -668,6 +667,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
break;
case DMA_BIDIRECTIONAL:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
+ /* fall through */
case DMA_TO_DEVICE:
cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB;
case DMA_NONE:
diff --git a/drivers/usb/storage/unusual_alauda.h b/drivers/usb/storage/unusual_alauda.h
index 763bc03032a1..0ec8c99a4976 100644
--- a/drivers/usb/storage/unusual_alauda.h
+++ b/drivers/usb/storage/unusual_alauda.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Alauda-based card readers
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_ALAUDA) || \
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index e9a2eb88869a..fb99e526cd48 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for devices based on the Cypress USB/ATA bridge
* with support for ATACB
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || \
diff --git a/drivers/usb/storage/unusual_datafab.h b/drivers/usb/storage/unusual_datafab.h
index 5049b6bbe5d5..fdab5e7d68ca 100644
--- a/drivers/usb/storage/unusual_datafab.h
+++ b/drivers/usb/storage/unusual_datafab.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Datafab USB Compact Flash reader
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_DATAFAB) || \
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index eb06d88b41d6..2968046e7c05 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Unusual Devices File
@@ -10,20 +11,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/unusual_ene_ub6250.h b/drivers/usb/storage/unusual_ene_ub6250.h
index 5667f5d365c6..9134b91fbd73 100644
--- a/drivers/usb/storage/unusual_ene_ub6250.h
+++ b/drivers/usb/storage/unusual_ene_ub6250.h
@@ -1,20 +1,4 @@
-/*
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
+// SPDX-License-Identifier: GPL-2.0+
#if defined(CONFIG_USB_STORAGE_ENE_UB6250) || \
defined(CONFIG_USB_STORAGE_ENE_UB6250_MODULE)
diff --git a/drivers/usb/storage/unusual_freecom.h b/drivers/usb/storage/unusual_freecom.h
index 1f5aab42ece2..949231c7a36b 100644
--- a/drivers/usb/storage/unusual_freecom.h
+++ b/drivers/usb/storage/unusual_freecom.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Freecom USB/IDE adaptor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_FREECOM) || \
diff --git a/drivers/usb/storage/unusual_isd200.h b/drivers/usb/storage/unusual_isd200.h
index 9b6862ec3d4f..d03a02cc904e 100644
--- a/drivers/usb/storage/unusual_isd200.h
+++ b/drivers/usb/storage/unusual_isd200.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for In-System Design, Inc. ISD200 ASIC
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_ISD200) || \
diff --git a/drivers/usb/storage/unusual_jumpshot.h b/drivers/usb/storage/unusual_jumpshot.h
index 413e64fa6b95..c323338881ef 100644
--- a/drivers/usb/storage/unusual_jumpshot.h
+++ b/drivers/usb/storage/unusual_jumpshot.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Lexar "Jumpshot" Compact Flash reader
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_JUMPSHOT) || \
diff --git a/drivers/usb/storage/unusual_karma.h b/drivers/usb/storage/unusual_karma.h
index e6fad3aeae20..8f1eebd71d2c 100644
--- a/drivers/usb/storage/unusual_karma.h
+++ b/drivers/usb/storage/unusual_karma.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Rio Karma
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_KARMA) || \
diff --git a/drivers/usb/storage/unusual_onetouch.h b/drivers/usb/storage/unusual_onetouch.h
index 425dc22f345a..c76d4e990f7b 100644
--- a/drivers/usb/storage/unusual_onetouch.h
+++ b/drivers/usb/storage/unusual_onetouch.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for the Maxtor OneTouch USB hard drive's button
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_ONETOUCH) || \
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index 8fe624ad302a..d17cd95b55bb 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* Author:
* wwang (wei_wang@realsil.com.cn)
* No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China
diff --git a/drivers/usb/storage/unusual_sddr09.h b/drivers/usb/storage/unusual_sddr09.h
index d9d38ac4abf9..650cf2862754 100644
--- a/drivers/usb/storage/unusual_sddr09.h
+++ b/drivers/usb/storage/unusual_sddr09.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for SanDisk SDDR-09 SmartMedia reader
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_SDDR09) || \
diff --git a/drivers/usb/storage/unusual_sddr55.h b/drivers/usb/storage/unusual_sddr55.h
index ebb1d1c6c467..e89df2cea7bd 100644
--- a/drivers/usb/storage/unusual_sddr55.h
+++ b/drivers/usb/storage/unusual_sddr55.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for SanDisk SDDR-55 SmartMedia reader
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_SDDR55) || \
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cde115359793..d520374a824e 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Attached SCSI devices - Unusual Devices File
*
@@ -6,20 +7,6 @@
* Based on the same file for the usb-storage driver, which is:
* (c) 2000-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
* (c) 2000 Adam J. Richter (adam@yggdrasil.com), Yggdrasil Computing, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
diff --git a/drivers/usb/storage/unusual_usbat.h b/drivers/usb/storage/unusual_usbat.h
index 2044ad5ef5e4..05abf6870b8f 100644
--- a/drivers/usb/storage/unusual_usbat.h
+++ b/drivers/usb/storage/unusual_usbat.h
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Unusual Devices File for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#if defined(CONFIG_USB_STORAGE_USBAT) || \
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 0dceb9fa3a06..a0c07e05a8f1 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
*
@@ -30,20 +31,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef CONFIG_USB_STORAGE_DEBUG
@@ -332,7 +319,7 @@ static int usb_stor_control_thread(void * __us)
/* When we are called with no command pending, we're done */
srb = us->srb;
- if (us->srb == NULL) {
+ if (srb == NULL) {
scsi_unlock(host);
mutex_unlock(&us->dev_mutex);
usb_stor_dbg(us, "-- exiting\n");
@@ -341,7 +328,7 @@ static int usb_stor_control_thread(void * __us)
/* has the command timed out *already* ? */
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
- us->srb->result = DID_ABORT << 16;
+ srb->result = DID_ABORT << 16;
goto SkipForAbort;
}
@@ -351,35 +338,35 @@ static int usb_stor_control_thread(void * __us)
* reject the command if the direction indicator
* is UNKNOWN
*/
- if (us->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
+ if (srb->sc_data_direction == DMA_BIDIRECTIONAL) {
usb_stor_dbg(us, "UNKNOWN data direction\n");
- us->srb->result = DID_ERROR << 16;
+ srb->result = DID_ERROR << 16;
}
/*
* reject if target != 0 or if LUN is higher than
* the maximum known LUN
*/
- else if (us->srb->device->id &&
+ else if (srb->device->id &&
!(us->fflags & US_FL_SCM_MULT_TARG)) {
usb_stor_dbg(us, "Bad target number (%d:%llu)\n",
- us->srb->device->id,
- us->srb->device->lun);
- us->srb->result = DID_BAD_TARGET << 16;
+ srb->device->id,
+ srb->device->lun);
+ srb->result = DID_BAD_TARGET << 16;
}
- else if (us->srb->device->lun > us->max_lun) {
+ else if (srb->device->lun > us->max_lun) {
usb_stor_dbg(us, "Bad LUN (%d:%llu)\n",
- us->srb->device->id,
- us->srb->device->lun);
- us->srb->result = DID_BAD_TARGET << 16;
+ srb->device->id,
+ srb->device->lun);
+ srb->result = DID_BAD_TARGET << 16;
}
/*
* Handle those devices which need us to fake
* their inquiry data
*/
- else if ((us->srb->cmnd[0] == INQUIRY) &&
+ else if ((srb->cmnd[0] == INQUIRY) &&
(us->fflags & US_FL_FIX_INQUIRY)) {
unsigned char data_ptr[36] = {
0x00, 0x80, 0x02, 0x02,
@@ -387,13 +374,13 @@ static int usb_stor_control_thread(void * __us)
usb_stor_dbg(us, "Faking INQUIRY command\n");
fill_inquiry_response(us, data_ptr, 36);
- us->srb->result = SAM_STAT_GOOD;
+ srb->result = SAM_STAT_GOOD;
}
/* we've got a command, let's do it! */
else {
- US_DEBUG(usb_stor_show_command(us, us->srb));
- us->proto_handler(us->srb, us);
+ US_DEBUG(usb_stor_show_command(us, srb));
+ us->proto_handler(srb, us);
usb_mark_last_busy(us->pusb_dev);
}
@@ -401,7 +388,7 @@ static int usb_stor_control_thread(void * __us)
scsi_lock(host);
/* was the command aborted? */
- if (us->srb->result == DID_ABORT << 16) {
+ if (srb->result == DID_ABORT << 16) {
SkipForAbort:
usb_stor_dbg(us, "scsi command aborted\n");
srb = NULL; /* Don't call srb->scsi_done() */
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 8fae28b40bb4..90133e16bec5 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage compliant devices
* Main Header File
@@ -24,20 +25,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _USB_H_
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index 499669bcf700..83ad01747eed 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for USB Mass Storage devices
* Usual Tables File for usb-storage and libusual
@@ -6,20 +7,6 @@
*
* Please see http://www.one-eyed-alien.net/~mdharm/linux-usb for more
* information about this driver.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index bc1b7745f1d4..465d7da849c3 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -4,6 +4,18 @@ menu "USB Power Delivery and Type-C drivers"
config TYPEC
tristate
+config TYPEC_TCPM
+ tristate "USB Type-C Port Controller Manager"
+ depends on USB
+ select TYPEC
+ help
+ The Type-C Port Controller Manager provides a USB PD and USB Type-C
+ state machine for use with Type-C Port Controllers.
+
+if TYPEC_TCPM
+
+source "drivers/usb/typec/fusb302/Kconfig"
+
config TYPEC_WCOVE
tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
depends on ACPI
@@ -19,6 +31,19 @@ config TYPEC_WCOVE
To compile this driver as module, choose M here: the module will be
called typec_wcove
+endif
+
source "drivers/usb/typec/ucsi/Kconfig"
+config TYPEC_TPS6598X
+ tristate "TI TPS6598x USB Power Delivery controller driver"
+ depends on I2C
+ select TYPEC
+ help
+ Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
+ Delivery controller.
+
+ If you choose to build this driver as a dynamically linked module, the
+ module will be called tps6598x.ko.
+
endmenu
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index bc214f15f1b5..bb3138a6eaac 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -1,3 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC) += typec.o
+obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
+obj-y += fusb302/
obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
+obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
diff --git a/drivers/staging/typec/fusb302/Kconfig b/drivers/usb/typec/fusb302/Kconfig
index 48a4f2fcee03..48a4f2fcee03 100644
--- a/drivers/staging/typec/fusb302/Kconfig
+++ b/drivers/usb/typec/fusb302/Kconfig
diff --git a/drivers/staging/typec/fusb302/Makefile b/drivers/usb/typec/fusb302/Makefile
index 207efa5fbab8..3b51b33631a0 100644
--- a/drivers/staging/typec/fusb302/Makefile
+++ b/drivers/usb/typec/fusb302/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
diff --git a/drivers/staging/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
index fc6a3cf74eb3..72cb060b3fca 100644
--- a/drivers/staging/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/fusb302/fusb302.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2016-2017 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Fairchild FUSB302 Type-C Chip Driver
*/
@@ -37,11 +28,11 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/pd.h>
#include <linux/workqueue.h>
#include "fusb302_reg.h"
-#include "../tcpm.h"
-#include "../pd.h"
/*
* When the device is SNK, BC_LVL interrupt is used to monitor cc pins
diff --git a/drivers/staging/typec/fusb302/fusb302_reg.h b/drivers/usb/typec/fusb302/fusb302_reg.h
index 0682e63de773..00b39d365478 100644
--- a/drivers/staging/typec/fusb302/fusb302_reg.h
+++ b/drivers/usb/typec/fusb302/fusb302_reg.h
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2016-2017 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Fairchild FUSB302 Type-C Chip Driver
*/
diff --git a/drivers/staging/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 8af62e74d54c..c166fc77dfb8 100644
--- a/drivers/staging/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -1,16 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2015-2017 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* USB Power Delivery protocol stack.
*/
@@ -26,14 +17,13 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/pd_bdo.h>
+#include <linux/usb/pd_vdo.h>
+#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
#include <linux/workqueue.h>
-#include "pd.h"
-#include "pd_vdo.h"
-#include "pd_bdo.h"
-#include "tcpm.h"
-
#define FOREACH_STATE(S) \
S(INVALID_STATE), \
S(DRP_TOGGLING), \
@@ -908,27 +898,6 @@ static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
memset(&port->mode_data, 0, sizeof(port->mode_data));
-#if 0 /* Not really a match */
- switch (PD_IDH_PTYPE(vdo)) {
- case IDH_PTYPE_UNDEF:
- port->partner.type = TYPEC_PARTNER_NONE; /* no longer exists */
- break;
- case IDH_PTYPE_HUB:
- break;
- case IDH_PTYPE_PERIPH:
- break;
- case IDH_PTYPE_PCABLE:
- break;
- case IDH_PTYPE_ACABLE:
- break;
- case IDH_PTYPE_AMA:
- port->partner.type = TYPEC_PARTNER_ALTMODE;
- break;
- default:
- break;
- }
-#endif
-
port->partner_ident.id_header = vdo;
port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
port->partner_ident.product = product;
@@ -1008,7 +977,7 @@ static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
}
port->partner_altmode[pmdata->altmodes] =
typec_partner_register_altmode(port->partner, paltmode);
- if (port->partner_altmode[pmdata->altmodes] == NULL) {
+ if (!port->partner_altmode[pmdata->altmodes]) {
tcpm_log(port,
"Failed to register alternate modes for SVID 0x%04x",
paltmode->svid);
@@ -1103,11 +1072,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
rlen = 1;
} else {
-#if 0
- response[0] = pd_dfp_enter_mode(port, 0, 0);
- if (response[0])
- rlen = 1;
-#endif
+ /* enter alternate mode if/when implemented */
}
break;
case CMD_ENTER_MODE:
@@ -1145,10 +1110,6 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
if (PD_VDO_SVDM(p0))
rlen = tcpm_pd_svdm(port, payload, cnt, response);
-#if 0
- else
- rlen = tcpm_pd_custom_vdm(port, cnt, payload, response);
-#endif
if (rlen > 0) {
tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
@@ -2442,7 +2403,6 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SNK_STARTUP, 0);
break;
case SNK_STARTUP:
- /* XXX: callback into infrastructure */
opmode = tcpm_get_pwr_opmode(port->polarity ?
port->cc2 : port->cc1);
typec_set_pwr_opmode(port->typec_port, opmode);
@@ -3589,11 +3549,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->partner_desc.identity = &port->partner_ident;
port->port_type = tcpc->config->type;
- /*
- * TODO:
- * - alt_modes, set_alt_mode
- * - {debug,audio}_accessory
- */
port->typec_port = typec_register_port(port->dev, &port->typec_caps);
if (!port->typec_port) {
@@ -3638,6 +3593,7 @@ void tcpm_unregister_port(struct tcpm_port *port)
{
int i;
+ tcpm_reset_port(port);
for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
typec_unregister_altmode(port->port_altmode[i]);
typec_unregister_port(port->typec_port);
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
new file mode 100644
index 000000000000..2719f5d382f7
--- /dev/null
+++ b/drivers/usb/typec/tps6598x.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for TI TPS6598x USB Power Delivery controller family
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+#include <linux/usb/typec.h>
+
+/* Register offsets */
+#define TPS_REG_CMD1 0x08
+#define TPS_REG_DATA1 0x09
+#define TPS_REG_INT_EVENT1 0x14
+#define TPS_REG_INT_EVENT2 0x15
+#define TPS_REG_INT_MASK1 0x16
+#define TPS_REG_INT_MASK2 0x17
+#define TPS_REG_INT_CLEAR1 0x18
+#define TPS_REG_INT_CLEAR2 0x19
+#define TPS_REG_STATUS 0x1a
+#define TPS_REG_SYSTEM_CONF 0x28
+#define TPS_REG_CTRL_CONF 0x29
+#define TPS_REG_POWER_STATUS 0x3f
+#define TPS_REG_RX_IDENTITY_SOP 0x48
+
+/* TPS_REG_INT_* bits */
+#define TPS_REG_INT_PLUG_EVENT BIT(3)
+
+/* TPS_REG_STATUS bits */
+#define TPS_STATUS_PLUG_PRESENT BIT(0)
+#define TPS_STATUS_ORIENTATION BIT(4)
+#define TPS_STATUS_PORTROLE(s) (!!((s) & BIT(5)))
+#define TPS_STATUS_DATAROLE(s) (!!((s) & BIT(6)))
+#define TPS_STATUS_VCONN(s) (!!((s) & BIT(7)))
+
+/* TPS_REG_SYSTEM_CONF bits */
+#define TPS_SYSCONF_PORTINFO(c) ((c) & 3)
+
+enum {
+ TPS_PORTINFO_SINK,
+ TPS_PORTINFO_SINK_ACCESSORY,
+ TPS_PORTINFO_DRP_UFP,
+ TPS_PORTINFO_DRP_UFP_DRD,
+ TPS_PORTINFO_DRP_DFP,
+ TPS_PORTINFO_DRP_DFP_DRD,
+ TPS_PORTINFO_SOURCE,
+};
+
+/* TPS_REG_POWER_STATUS bits */
+#define TPS_POWER_STATUS_SOURCESINK BIT(1)
+#define TPS_POWER_STATUS_PWROPMODE(p) (((p) & GENMASK(3, 2)) >> 2)
+
+/* TPS_REG_RX_IDENTITY_SOP */
+struct tps6598x_rx_identity_reg {
+ u8 status;
+ struct usb_pd_identity identity;
+ u32 vdo[3];
+} __packed;
+
+/* Standard Task return codes */
+#define TPS_TASK_TIMEOUT 1
+#define TPS_TASK_REJECTED 3
+
+/* Unrecognized commands will be replaced with "!CMD" */
+#define INVALID_CMD(_cmd_) (_cmd_ == 0x444d4321)
+
+struct tps6598x {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock; /* device lock */
+
+ struct typec_port *port;
+ struct typec_partner *partner;
+ struct usb_pd_identity partner_identity;
+ struct typec_capability typec_cap;
+};
+
+static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
+{
+ return regmap_raw_read(tps->regmap, reg, val, sizeof(u16));
+}
+
+static inline int tps6598x_read32(struct tps6598x *tps, u8 reg, u32 *val)
+{
+ return regmap_raw_read(tps->regmap, reg, val, sizeof(u32));
+}
+
+static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
+{
+ return regmap_raw_read(tps->regmap, reg, val, sizeof(u64));
+}
+
+static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
+{
+ return regmap_raw_write(tps->regmap, reg, &val, sizeof(u16));
+}
+
+static inline int tps6598x_write32(struct tps6598x *tps, u8 reg, u32 val)
+{
+ return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
+}
+
+static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
+{
+ return regmap_raw_write(tps->regmap, reg, &val, sizeof(u64));
+}
+
+static inline int
+tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val)
+{
+ return regmap_raw_write(tps->regmap, reg, &val, sizeof(u32));
+}
+
+static int tps6598x_read_partner_identity(struct tps6598x *tps)
+{
+ struct tps6598x_rx_identity_reg id;
+ int ret;
+
+ ret = regmap_raw_read(tps->regmap, TPS_REG_RX_IDENTITY_SOP,
+ &id, sizeof(id));
+ if (ret)
+ return ret;
+
+ tps->partner_identity = id.identity;
+
+ return 0;
+}
+
+static int tps6598x_connect(struct tps6598x *tps, u32 status)
+{
+ struct typec_partner_desc desc;
+ enum typec_pwr_opmode mode;
+ u16 pwr_status;
+ int ret;
+
+ if (tps->partner)
+ return 0;
+
+ ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
+ if (ret < 0)
+ return ret;
+
+ mode = TPS_POWER_STATUS_PWROPMODE(pwr_status);
+
+ desc.usb_pd = mode == TYPEC_PWR_MODE_PD;
+ desc.accessory = TYPEC_ACCESSORY_NONE; /* XXX: handle accessories */
+ desc.identity = NULL;
+
+ if (desc.usb_pd) {
+ ret = tps6598x_read_partner_identity(tps);
+ if (ret)
+ return ret;
+ desc.identity = &tps->partner_identity;
+ }
+
+ tps->partner = typec_register_partner(tps->port, &desc);
+ if (!tps->partner)
+ return -ENODEV;
+
+ typec_set_pwr_opmode(tps->port, mode);
+ typec_set_pwr_role(tps->port, TPS_STATUS_PORTROLE(status));
+ typec_set_vconn_role(tps->port, TPS_STATUS_VCONN(status));
+ typec_set_data_role(tps->port, TPS_STATUS_DATAROLE(status));
+
+ if (desc.identity)
+ typec_partner_set_identity(tps->partner);
+
+ return 0;
+}
+
+static void tps6598x_disconnect(struct tps6598x *tps, u32 status)
+{
+ typec_unregister_partner(tps->partner);
+ tps->partner = NULL;
+ typec_set_pwr_opmode(tps->port, TYPEC_PWR_MODE_USB);
+ typec_set_pwr_role(tps->port, TPS_STATUS_PORTROLE(status));
+ typec_set_vconn_role(tps->port, TPS_STATUS_VCONN(status));
+ typec_set_data_role(tps->port, TPS_STATUS_DATAROLE(status));
+}
+
+static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
+ size_t in_len, u8 *in_data,
+ size_t out_len, u8 *out_data)
+{
+ unsigned long timeout;
+ u32 val;
+ int ret;
+
+ ret = tps6598x_read32(tps, TPS_REG_CMD1, &val);
+ if (ret)
+ return ret;
+ if (val && !INVALID_CMD(val))
+ return -EBUSY;
+
+ if (in_len) {
+ ret = regmap_raw_write(tps->regmap, TPS_REG_DATA1,
+ in_data, in_len);
+ if (ret)
+ return ret;
+ }
+
+ ret = tps6598x_write_4cc(tps, TPS_REG_CMD1, cmd);
+ if (ret < 0)
+ return ret;
+
+ /* XXX: Using 1s for now, but it may not be enough for every command. */
+ timeout = jiffies + msecs_to_jiffies(1000);
+
+ do {
+ ret = tps6598x_read32(tps, TPS_REG_CMD1, &val);
+ if (ret)
+ return ret;
+ if (INVALID_CMD(val))
+ return -EINVAL;
+
+ if (time_is_before_jiffies(timeout))
+ return -ETIMEDOUT;
+ } while (val);
+
+ if (out_len) {
+ ret = regmap_raw_read(tps->regmap, TPS_REG_DATA1,
+ out_data, out_len);
+ if (ret)
+ return ret;
+ val = out_data[0];
+ } else {
+ ret = regmap_read(tps->regmap, TPS_REG_DATA1, &val);
+ if (ret)
+ return ret;
+ }
+
+ switch (val) {
+ case TPS_TASK_TIMEOUT:
+ return -ETIMEDOUT;
+ case TPS_TASK_REJECTED:
+ return -EPERM;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+tps6598x_dr_set(const struct typec_capability *cap, enum typec_data_role role)
+{
+ struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
+ const char *cmd = (role == TYPEC_DEVICE) ? "SWUF" : "SWDF";
+ u32 status;
+ int ret;
+
+ mutex_lock(&tps->lock);
+
+ ret = tps6598x_exec_cmd(tps, cmd, 0, NULL, 0, NULL);
+ if (ret)
+ goto out_unlock;
+
+ ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
+ if (ret)
+ goto out_unlock;
+
+ if (role != TPS_STATUS_DATAROLE(status)) {
+ ret = -EPROTO;
+ goto out_unlock;
+ }
+
+ typec_set_data_role(tps->port, role);
+
+out_unlock:
+ mutex_unlock(&tps->lock);
+
+ return ret;
+}
+
+static int
+tps6598x_pr_set(const struct typec_capability *cap, enum typec_role role)
+{
+ struct tps6598x *tps = container_of(cap, struct tps6598x, typec_cap);
+ const char *cmd = (role == TYPEC_SINK) ? "SWSk" : "SWSr";
+ u32 status;
+ int ret;
+
+ mutex_lock(&tps->lock);
+
+ ret = tps6598x_exec_cmd(tps, cmd, 0, NULL, 0, NULL);
+ if (ret)
+ goto out_unlock;
+
+ ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
+ if (ret)
+ goto out_unlock;
+
+ if (role != TPS_STATUS_PORTROLE(status)) {
+ ret = -EPROTO;
+ goto out_unlock;
+ }
+
+ typec_set_pwr_role(tps->port, role);
+
+out_unlock:
+ mutex_unlock(&tps->lock);
+
+ return ret;
+}
+
+static irqreturn_t tps6598x_interrupt(int irq, void *data)
+{
+ struct tps6598x *tps = data;
+ u64 event1;
+ u64 event2;
+ u32 status;
+ int ret;
+
+ mutex_lock(&tps->lock);
+
+ ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event1);
+ ret |= tps6598x_read64(tps, TPS_REG_INT_EVENT2, &event2);
+ if (ret) {
+ dev_err(tps->dev, "%s: failed to read events\n", __func__);
+ goto err_unlock;
+ }
+
+ ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
+ if (ret) {
+ dev_err(tps->dev, "%s: failed to read status\n", __func__);
+ goto err_clear_ints;
+ }
+
+ /* Handle plug insert or removal */
+ if ((event1 | event2) & TPS_REG_INT_PLUG_EVENT) {
+ if (status & TPS_STATUS_PLUG_PRESENT) {
+ ret = tps6598x_connect(tps, status);
+ if (ret)
+ dev_err(tps->dev,
+ "failed to register partner\n");
+ } else {
+ tps6598x_disconnect(tps, status);
+ }
+ }
+
+err_clear_ints:
+ tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event1);
+ tps6598x_write64(tps, TPS_REG_INT_CLEAR2, event2);
+
+err_unlock:
+ mutex_unlock(&tps->lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config tps6598x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x7F,
+};
+
+static int tps6598x_probe(struct i2c_client *client)
+{
+ struct tps6598x *tps;
+ u32 status;
+ u32 conf;
+ u32 vid;
+ int ret;
+
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
+ return -ENOMEM;
+
+ mutex_init(&tps->lock);
+ tps->dev = &client->dev;
+
+ tps->regmap = devm_regmap_init_i2c(client, &tps6598x_regmap_config);
+ if (IS_ERR(tps->regmap))
+ return PTR_ERR(tps->regmap);
+
+ ret = tps6598x_read32(tps, 0, &vid);
+ if (ret < 0)
+ return ret;
+ if (!vid)
+ return -ENODEV;
+
+ ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
+ if (ret < 0)
+ return ret;
+
+ switch (TPS_SYSCONF_PORTINFO(conf)) {
+ case TPS_PORTINFO_SINK_ACCESSORY:
+ case TPS_PORTINFO_SINK:
+ tps->typec_cap.type = TYPEC_PORT_UFP;
+ break;
+ case TPS_PORTINFO_DRP_UFP_DRD:
+ case TPS_PORTINFO_DRP_DFP_DRD:
+ tps->typec_cap.dr_set = tps6598x_dr_set;
+ /* fall through */
+ case TPS_PORTINFO_DRP_UFP:
+ case TPS_PORTINFO_DRP_DFP:
+ tps->typec_cap.pr_set = tps6598x_pr_set;
+ tps->typec_cap.type = TYPEC_PORT_DRP;
+ break;
+ case TPS_PORTINFO_SOURCE:
+ tps->typec_cap.type = TYPEC_PORT_DFP;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ tps->typec_cap.revision = USB_TYPEC_REV_1_2;
+ tps->typec_cap.pd_revision = 0x200;
+ tps->typec_cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+
+ tps->port = typec_register_port(&client->dev, &tps->typec_cap);
+ if (!tps->port)
+ return -ENODEV;
+
+ if (status & TPS_STATUS_PLUG_PRESENT) {
+ ret = tps6598x_connect(tps, status);
+ if (ret)
+ dev_err(&client->dev, "failed to register partner\n");
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ tps6598x_interrupt,
+ IRQF_SHARED | IRQF_ONESHOT,
+ dev_name(&client->dev), tps);
+ if (ret) {
+ tps6598x_disconnect(tps, 0);
+ typec_unregister_port(tps->port);
+ return ret;
+ }
+
+ i2c_set_clientdata(client, tps);
+
+ return 0;
+}
+
+static int tps6598x_remove(struct i2c_client *client)
+{
+ struct tps6598x *tps = i2c_get_clientdata(client);
+
+ tps6598x_disconnect(tps, 0);
+ typec_unregister_port(tps->port);
+
+ return 0;
+}
+
+static const struct acpi_device_id tps6598x_acpi_match[] = {
+ { "INT3515", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tps6598x_acpi_match);
+
+static struct i2c_driver tps6598x_i2c_driver = {
+ .driver = {
+ .name = "tps6598x",
+ .acpi_match_table = tps6598x_acpi_match,
+ },
+ .probe_new = tps6598x_probe,
+ .remove = tps6598x_remove,
+};
+module_i2c_driver(tps6598x_i2c_driver);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI TPS6598x USB Power Delivery Controller Driver");
diff --git a/drivers/usb/typec/typec.c b/drivers/usb/typec/typec.c
index 24e355ba109d..735726ced602 100644
--- a/drivers/usb/typec/typec.c
+++ b/drivers/usb/typec/typec.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Type-C Connector Class
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
index e9c4e784a9cb..a8d479eb221a 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/typec_wcove.c
@@ -1,24 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* typec_wcove.c - WhiskeyCove PMIC USB Type-C PHY driver
*
* Copyright (C) 2017 Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/usb/tcpm.h>
#include <linux/interrupt.h>
-#include <linux/usb/typec.h>
#include <linux/platform_device.h>
#include <linux/mfd/intel_soc_pmic.h>
/* Register offsets */
#define WCOVE_CHGRIRQ0 0x4e09
-#define WCOVE_PHYCTRL 0x5e07
#define USBC_CONTROL1 0x7001
#define USBC_CONTROL2 0x7002
@@ -28,22 +24,57 @@
#define USBC_STATUS1 0x7007
#define USBC_STATUS2 0x7008
#define USBC_STATUS3 0x7009
+#define USBC_CC1 0x700a
+#define USBC_CC2 0x700b
+#define USBC_CC1_STATUS 0x700c
+#define USBC_CC2_STATUS 0x700d
#define USBC_IRQ1 0x7015
#define USBC_IRQ2 0x7016
#define USBC_IRQMASK1 0x7017
#define USBC_IRQMASK2 0x7018
+#define USBC_PDCFG2 0x701a
+#define USBC_PDCFG3 0x701b
+#define USBC_PDSTATUS 0x701c
+#define USBC_RXSTATUS 0x701d
+#define USBC_RXINFO 0x701e
+#define USBC_TXCMD 0x701f
+#define USBC_TXINFO 0x7020
+#define USBC_RX_DATA 0x7028
+#define USBC_TX_DATA 0x7047
/* Register bits */
-#define USBC_CONTROL1_MODE_DRP(r) (((r) & ~0x7) | 4)
+#define USBC_CONTROL1_MODE_MASK 0x3
+#define USBC_CONTROL1_MODE_SNK 0
+#define USBC_CONTROL1_MODE_SNKACC 1
+#define USBC_CONTROL1_MODE_SRC 2
+#define USBC_CONTROL1_MODE_SRCACC 3
+#define USBC_CONTROL1_MODE_DRP 4
+#define USBC_CONTROL1_MODE_DRPACC 5
+#define USBC_CONTROL1_MODE_TEST 7
+#define USBC_CONTROL1_CURSRC_MASK 0xc
+#define USBC_CONTROL1_CURSRC_UA_0 (0 << 3)
+#define USBC_CONTROL1_CURSRC_UA_80 (1 << 3)
+#define USBC_CONTROL1_CURSRC_UA_180 (2 << 3)
+#define USBC_CONTROL1_CURSRC_UA_330 (3 << 3)
+#define USBC_CONTROL1_DRPTOGGLE_RANDOM 0xe0
#define USBC_CONTROL2_UNATT_SNK BIT(0)
#define USBC_CONTROL2_UNATT_SRC BIT(1)
#define USBC_CONTROL2_DIS_ST BIT(2)
+#define USBC_CONTROL3_DET_DIS BIT(0)
#define USBC_CONTROL3_PD_DIS BIT(1)
+#define USBC_CONTROL3_RESETPHY BIT(2)
+#define USBC_CC_CTRL_PU_EN BIT(0)
#define USBC_CC_CTRL_VCONN_EN BIT(1)
+#define USBC_CC_CTRL_TX_EN BIT(2)
+#define USBC_CC_CTRL_PD_EN BIT(3)
+#define USBC_CC_CTRL_CDET_EN BIT(4)
+#define USBC_CC_CTRL_RDET_EN BIT(5)
+#define USBC_CC_CTRL_ADC_EN BIT(6)
+#define USBC_CC_CTRL_VBUSOK BIT(7)
#define USBC_STATUS1_DET_ONGOING BIT(6)
#define USBC_STATUS1_RSLT(r) ((r) & 0xf)
@@ -61,6 +92,15 @@
#define USBC_STATUS2_VBUS_REQ BIT(5)
+#define UCSC_CC_STATUS_SNK_RP BIT(0)
+#define UCSC_CC_STATUS_PWRDEFSNK BIT(1)
+#define UCSC_CC_STATUS_PWR_1P5A_SNK BIT(2)
+#define UCSC_CC_STATUS_PWR_3A_SNK BIT(3)
+#define UCSC_CC_STATUS_SRC_RP BIT(4)
+#define UCSC_CC_STATUS_RX(r) (((r) >> 5) & 0x3)
+#define USBC_CC_STATUS_RD 1
+#define USBC_CC_STATUS_RA 2
+
#define USBC_IRQ1_ADCDONE1 BIT(2)
#define USBC_IRQ1_OVERTEMP BIT(1)
#define USBC_IRQ1_SHORT BIT(0)
@@ -79,15 +119,44 @@
USBC_IRQ2_RX_HR | USBC_IRQ2_RX_CR | \
USBC_IRQ2_TX_SUCCESS | USBC_IRQ2_TX_FAIL)
+#define USBC_PDCFG2_SOP BIT(0)
+#define USBC_PDCFG2_SOP_P BIT(1)
+#define USBC_PDCFG2_SOP_PP BIT(2)
+#define USBC_PDCFG2_SOP_P_DEBUG BIT(3)
+#define USBC_PDCFG2_SOP_PP_DEBUG BIT(4)
+
+#define USBC_PDCFG3_DATAROLE_SHIFT 1
+#define USBC_PDCFG3_SOP_SHIFT 2
+
+#define USBC_RXSTATUS_RXCLEAR BIT(0)
+#define USBC_RXSTATUS_RXDATA BIT(7)
+
+#define USBC_RXINFO_RXBYTES(i) (((i) >> 3) & 0x1f)
+
+#define USBC_TXCMD_BUF_RDY BIT(0)
+#define USBC_TXCMD_START BIT(1)
+#define USBC_TXCMD_NOP (0 << 5)
+#define USBC_TXCMD_MSG (1 << 5)
+#define USBC_TXCMD_CR (2 << 5)
+#define USBC_TXCMD_HR (3 << 5)
+#define USBC_TXCMD_BIST (4 << 5)
+
+#define USBC_TXINFO_RETRIES(d) (d << 3)
+
struct wcove_typec {
struct mutex lock; /* device lock */
struct device *dev;
struct regmap *regmap;
- struct typec_port *port;
- struct typec_capability cap;
- struct typec_partner *partner;
+ guid_t guid;
+
+ bool vbus;
+
+ struct tcpc_dev tcpc;
+ struct tcpm_port *tcpm;
};
+#define tcpc_to_wcove(_tcpc_) container_of(_tcpc_, struct wcove_typec, tcpc)
+
enum wcove_typec_func {
WCOVE_FUNC_DRIVE_VBUS = 1,
WCOVE_FUNC_ORIENTATION,
@@ -105,8 +174,7 @@ enum wcove_typec_role {
WCOVE_ROLE_DEVICE,
};
-static guid_t guid = GUID_INIT(0x482383f0, 0x2876, 0x4e49,
- 0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
+#define WCOVE_DSM_UUID "482383f0-2876-4e49-8685-db66211af037"
static int wcove_typec_func(struct wcove_typec *wcove,
enum wcove_typec_func func, int param)
@@ -118,7 +186,7 @@ static int wcove_typec_func(struct wcove_typec *wcove,
tmp.type = ACPI_TYPE_INTEGER;
tmp.integer.value = param;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), &guid, 1, func,
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), &wcove->guid, 1, func,
&argv4);
if (!obj) {
dev_err(wcove->dev, "%s: failed to evaluate _DSM\n", __func__);
@@ -129,158 +197,349 @@ static int wcove_typec_func(struct wcove_typec *wcove,
return 0;
}
-static irqreturn_t wcove_typec_irq(int irq, void *data)
+static int wcove_init(struct tcpc_dev *tcpc)
{
- enum typec_role role = TYPEC_SINK;
- struct typec_partner_desc partner;
- struct wcove_typec *wcove = data;
- unsigned int cc1_ctrl;
- unsigned int cc2_ctrl;
- unsigned int cc_irq1;
- unsigned int cc_irq2;
- unsigned int status1;
- unsigned int status2;
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
int ret;
- mutex_lock(&wcove->lock);
-
- ret = regmap_read(wcove->regmap, USBC_IRQ1, &cc_irq1);
+ /* Unmask everything */
+ ret = regmap_write(wcove->regmap, USBC_IRQMASK1, 0);
if (ret)
- goto err;
+ return ret;
- ret = regmap_read(wcove->regmap, USBC_IRQ2, &cc_irq2);
- if (ret)
- goto err;
+ return regmap_write(wcove->regmap, USBC_IRQMASK2, 0);
+}
- ret = regmap_read(wcove->regmap, USBC_STATUS1, &status1);
- if (ret)
- goto err;
+static int wcove_get_vbus(struct tcpc_dev *tcpc)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int cc1ctrl;
+ int ret;
- ret = regmap_read(wcove->regmap, USBC_STATUS2, &status2);
+ ret = regmap_read(wcove->regmap, USBC_CC1_CTRL, &cc1ctrl);
if (ret)
- goto err;
+ return ret;
- ret = regmap_read(wcove->regmap, USBC_CC1_CTRL, &cc1_ctrl);
- if (ret)
- goto err;
+ wcove->vbus = !!(cc1ctrl & USBC_CC_CTRL_VBUSOK);
- ret = regmap_read(wcove->regmap, USBC_CC2_CTRL, &cc2_ctrl);
- if (ret)
- goto err;
+ return wcove->vbus;
+}
- if (cc_irq1) {
- if (cc_irq1 & USBC_IRQ1_OVERTEMP)
- dev_err(wcove->dev, "VCONN Switch Over Temperature!\n");
- if (cc_irq1 & USBC_IRQ1_SHORT)
- dev_err(wcove->dev, "VCONN Switch Short Circuit!\n");
- ret = regmap_write(wcove->regmap, USBC_IRQ1, cc_irq1);
- if (ret)
- goto err;
- }
+static int wcove_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
- if (cc_irq2) {
- ret = regmap_write(wcove->regmap, USBC_IRQ2, cc_irq2);
- if (ret)
- goto err;
- /*
- * Ignoring any PD communication interrupts until the PD support
- * is available
- */
- if (cc_irq2 & ~USBC_IRQ2_CC_CHANGE) {
- dev_WARN(wcove->dev, "USB PD handling missing\n");
- goto err;
+ return wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VBUS, on);
+}
+
+static int wcove_set_vconn(struct tcpc_dev *tcpc, bool on)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+
+ return wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, on);
+}
+
+static enum typec_cc_status wcove_to_typec_cc(unsigned int cc)
+{
+ if (cc & UCSC_CC_STATUS_SNK_RP) {
+ if (cc & UCSC_CC_STATUS_PWRDEFSNK)
+ return TYPEC_CC_RP_DEF;
+ else if (cc & UCSC_CC_STATUS_PWR_1P5A_SNK)
+ return TYPEC_CC_RP_1_5;
+ else if (cc & UCSC_CC_STATUS_PWR_3A_SNK)
+ return TYPEC_CC_RP_3_0;
+ } else {
+ switch (UCSC_CC_STATUS_RX(cc)) {
+ case USBC_CC_STATUS_RD:
+ return TYPEC_CC_RD;
+ case USBC_CC_STATUS_RA:
+ return TYPEC_CC_RA;
+ default:
+ break;
}
}
+ return TYPEC_CC_OPEN;
+}
- if (status1 & USBC_STATUS1_DET_ONGOING)
- goto out;
+static int wcove_get_cc(struct tcpc_dev *tcpc, enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int cc1_status;
+ unsigned int cc2_status;
+ int ret;
- if (USBC_STATUS1_RSLT(status1) == USBC_RSLT_NOTHING) {
- if (wcove->partner) {
- typec_unregister_partner(wcove->partner);
- wcove->partner = NULL;
- }
+ ret = regmap_read(wcove->regmap, USBC_CC1_STATUS, &cc1_status);
+ if (ret)
+ return ret;
- wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION,
- WCOVE_ORIENTATION_NORMAL);
+ ret = regmap_read(wcove->regmap, USBC_CC2_STATUS, &cc2_status);
+ if (ret)
+ return ret;
- /* This makes sure the device controller is disconnected */
- wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST);
+ *cc1 = wcove_to_typec_cc(cc1_status);
+ *cc2 = wcove_to_typec_cc(cc2_status);
- /* Port to default role */
- typec_set_data_role(wcove->port, TYPEC_DEVICE);
- typec_set_pwr_role(wcove->port, TYPEC_SINK);
- typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_USB);
+ return 0;
+}
- goto out;
- }
+static int wcove_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
+{
+ /* XXX: Relying on the HW FSM to configure things correctly for now */
+ return 0;
+}
- if (wcove->partner)
- goto out;
+static int wcove_set_polarity(struct tcpc_dev *tcpc, enum typec_cc_polarity pol)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
- switch (USBC_STATUS1_ORIENT(status1)) {
- case USBC_ORIENT_NORMAL:
- wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION,
- WCOVE_ORIENTATION_NORMAL);
- break;
- case USBC_ORIENT_REVERSE:
- wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION,
- WCOVE_ORIENTATION_REVERSE);
- default:
- break;
+ return wcove_typec_func(wcove, WCOVE_FUNC_ORIENTATION, pol);
+}
+
+static int wcove_set_current_limit(struct tcpc_dev *tcpc, u32 max_ma, u32 mv)
+{
+ return 0;
+}
+
+static int wcove_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role role, enum typec_data_role data)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int val;
+ int ret;
+
+ ret = wcove_typec_func(wcove, WCOVE_FUNC_ROLE, data == TYPEC_HOST ?
+ WCOVE_ROLE_HOST : WCOVE_ROLE_DEVICE);
+ if (ret)
+ return ret;
+
+ val = role;
+ val |= data << USBC_PDCFG3_DATAROLE_SHIFT;
+ val |= PD_REV20 << USBC_PDCFG3_SOP_SHIFT;
+
+ return regmap_write(wcove->regmap, USBC_PDCFG3, val);
+}
+
+static int wcove_set_pd_rx(struct tcpc_dev *tcpc, bool on)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+
+ return regmap_write(wcove->regmap, USBC_PDCFG2,
+ on ? USBC_PDCFG2_SOP : 0);
+}
+
+static int wcove_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int info = 0;
+ unsigned int cmd;
+ int ret;
+
+ ret = regmap_read(wcove->regmap, USBC_TXCMD, &cmd);
+ if (ret)
+ return ret;
+
+ if (!(cmd & USBC_TXCMD_BUF_RDY)) {
+ dev_warn(wcove->dev, "%s: Last transmission still ongoing!",
+ __func__);
+ return -EBUSY;
}
- memset(&partner, 0, sizeof(partner));
+ if (msg) {
+ const u8 *data = (void *)msg;
+ int i;
+
+ for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
+ ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
+ data[i]);
+ if (ret)
+ return ret;
+ }
+ }
- switch (USBC_STATUS1_RSLT(status1)) {
- case USBC_RSLT_SRC_DEFAULT:
- typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_USB);
+ switch (type) {
+ case TCPC_TX_SOP:
+ case TCPC_TX_SOP_PRIME:
+ case TCPC_TX_SOP_PRIME_PRIME:
+ case TCPC_TX_SOP_DEBUG_PRIME:
+ case TCPC_TX_SOP_DEBUG_PRIME_PRIME:
+ info = type + 1;
+ cmd = USBC_TXCMD_MSG;
break;
- case USBC_RSLT_SRC_1_5A:
- typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_1_5A);
+ case TCPC_TX_HARD_RESET:
+ cmd = USBC_TXCMD_HR;
break;
- case USBC_RSLT_SRC_3_0A:
- typec_set_pwr_opmode(wcove->port, TYPEC_PWR_MODE_3_0A);
+ case TCPC_TX_CABLE_RESET:
+ cmd = USBC_TXCMD_CR;
break;
- case USBC_RSLT_SNK:
- role = TYPEC_SOURCE;
+ case TCPC_TX_BIST_MODE_2:
+ cmd = USBC_TXCMD_BIST;
break;
- case USBC_RSLT_DEBUG_ACC:
- partner.accessory = TYPEC_ACCESSORY_DEBUG;
+ default:
+ return -EINVAL;
+ }
+
+ /* NOTE Setting maximum number of retries (7) */
+ ret = regmap_write(wcove->regmap, USBC_TXINFO,
+ info | USBC_TXINFO_RETRIES(7));
+ if (ret)
+ return ret;
+
+ return regmap_write(wcove->regmap, USBC_TXCMD, cmd | USBC_TXCMD_START);
+}
+
+static int wcove_start_drp_toggling(struct tcpc_dev *tcpc,
+ enum typec_cc_status cc)
+{
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int usbc_ctrl;
+
+ usbc_ctrl = USBC_CONTROL1_MODE_DRP | USBC_CONTROL1_DRPTOGGLE_RANDOM;
+
+ switch (cc) {
+ case TYPEC_CC_RP_1_5:
+ usbc_ctrl |= USBC_CONTROL1_CURSRC_UA_180;
break;
- case USBC_RSLT_AUDIO_ACC:
- partner.accessory = TYPEC_ACCESSORY_AUDIO;
+ case TYPEC_CC_RP_3_0:
+ usbc_ctrl |= USBC_CONTROL1_CURSRC_UA_330;
break;
default:
- dev_WARN(wcove->dev, "%s Undefined result\n", __func__);
- goto err;
+ usbc_ctrl |= USBC_CONTROL1_CURSRC_UA_80;
+ break;
}
- if (role == TYPEC_SINK) {
- wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_DEVICE);
- typec_set_data_role(wcove->port, TYPEC_DEVICE);
- typec_set_pwr_role(wcove->port, TYPEC_SINK);
- } else {
- wcove_typec_func(wcove, WCOVE_FUNC_ROLE, WCOVE_ROLE_HOST);
- typec_set_pwr_role(wcove->port, TYPEC_SOURCE);
- typec_set_data_role(wcove->port, TYPEC_HOST);
+ return regmap_write(wcove->regmap, USBC_CONTROL1, usbc_ctrl);
+}
+
+static int wcove_read_rx_buffer(struct wcove_typec *wcove, void *msg)
+{
+ unsigned int info;
+ int ret;
+ int i;
+
+ ret = regmap_read(wcove->regmap, USBC_RXINFO, &info);
+ if (ret)
+ return ret;
+
+ /* FIXME: Check that USBC_RXINFO_RXBYTES(info) matches the header */
+
+ for (i = 0; i < USBC_RXINFO_RXBYTES(info); i++) {
+ ret = regmap_read(wcove->regmap, USBC_RX_DATA + i, msg + i);
+ if (ret)
+ return ret;
}
- wcove->partner = typec_register_partner(wcove->port, &partner);
- if (!wcove->partner)
- dev_err(wcove->dev, "failed register partner\n");
-out:
- /* If either CC pins is requesting VCONN, we turn it on */
- if ((cc1_ctrl & USBC_CC_CTRL_VCONN_EN) ||
- (cc2_ctrl & USBC_CC_CTRL_VCONN_EN))
- wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, true);
- else
+ return regmap_write(wcove->regmap, USBC_RXSTATUS,
+ USBC_RXSTATUS_RXCLEAR);
+}
+
+static irqreturn_t wcove_typec_irq(int irq, void *data)
+{
+ struct wcove_typec *wcove = data;
+ unsigned int usbc_irq1 = 0;
+ unsigned int usbc_irq2 = 0;
+ unsigned int cc1ctrl;
+ int ret;
+
+ mutex_lock(&wcove->lock);
+
+ /* Read.. */
+ ret = regmap_read(wcove->regmap, USBC_IRQ1, &usbc_irq1);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(wcove->regmap, USBC_IRQ2, &usbc_irq2);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(wcove->regmap, USBC_CC1_CTRL, &cc1ctrl);
+ if (ret)
+ goto err;
+
+ if (!wcove->tcpm)
+ goto err;
+
+ /* ..check.. */
+ if (usbc_irq1 & USBC_IRQ1_OVERTEMP) {
+ dev_err(wcove->dev, "VCONN Switch Over Temperature!\n");
+ wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, false);
+ /* REVISIT: Report an error? */
+ }
+
+ if (usbc_irq1 & USBC_IRQ1_SHORT) {
+ dev_err(wcove->dev, "VCONN Switch Short Circuit!\n");
wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VCONN, false);
+ /* REVISIT: Report an error? */
+ }
+
+ if (wcove->vbus != !!(cc1ctrl & USBC_CC_CTRL_VBUSOK))
+ tcpm_vbus_change(wcove->tcpm);
+
+ /* REVISIT: See if tcpm code can be made to consider Type-C HW FSMs */
+ if (usbc_irq2 & USBC_IRQ2_CC_CHANGE)
+ tcpm_cc_change(wcove->tcpm);
+
+ if (usbc_irq2 & USBC_IRQ2_RX_PD) {
+ unsigned int status;
+
+ /*
+ * FIXME: Need to check if TX is ongoing and report
+ * TX_DIREGARDED if needed?
+ */
+
+ ret = regmap_read(wcove->regmap, USBC_RXSTATUS, &status);
+ if (ret)
+ goto err;
+
+ /* Flush all buffers */
+ while (status & USBC_RXSTATUS_RXDATA) {
+ struct pd_message msg;
+
+ ret = wcove_read_rx_buffer(wcove, &msg);
+ if (ret) {
+ dev_err(wcove->dev, "%s: RX read failed\n",
+ __func__);
+ goto err;
+ }
+
+ tcpm_pd_receive(wcove->tcpm, &msg);
+
+ ret = regmap_read(wcove->regmap, USBC_RXSTATUS,
+ &status);
+ if (ret)
+ goto err;
+ }
+ }
+
+ if (usbc_irq2 & USBC_IRQ2_RX_HR)
+ tcpm_pd_hard_reset(wcove->tcpm);
+
+ /* REVISIT: if (usbc_irq2 & USBC_IRQ2_RX_CR) */
+
+ if (usbc_irq2 & USBC_IRQ2_TX_SUCCESS)
+ tcpm_pd_transmit_complete(wcove->tcpm, TCPC_TX_SUCCESS);
+
+ if (usbc_irq2 & USBC_IRQ2_TX_FAIL)
+ tcpm_pd_transmit_complete(wcove->tcpm, TCPC_TX_FAILED);
- /* Relying on the FSM to know when we need to drive VBUS. */
- wcove_typec_func(wcove, WCOVE_FUNC_DRIVE_VBUS,
- !!(status2 & USBC_STATUS2_VBUS_REQ));
err:
+ /* ..and clear. */
+ if (usbc_irq1) {
+ ret = regmap_write(wcove->regmap, USBC_IRQ1, usbc_irq1);
+ if (ret)
+ dev_WARN(wcove->dev, "%s failed to clear IRQ1\n",
+ __func__);
+ }
+
+ if (usbc_irq2) {
+ ret = regmap_write(wcove->regmap, USBC_IRQ2, usbc_irq2);
+ if (ret)
+ dev_WARN(wcove->dev, "%s failed to clear IRQ2\n",
+ __func__);
+ }
+
/* REVISIT: Clear WhiskeyCove CHGR Type-C interrupt */
regmap_write(wcove->regmap, WCOVE_CHGRIRQ0, BIT(5));
@@ -288,11 +547,41 @@ err:
return IRQ_HANDLED;
}
+/*
+ * The following power levels should be safe to use with Joule board.
+ */
+static const u32 src_pdo[] = {
+ PDO_FIXED(5000, 1500, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP |
+ PDO_FIXED_USB_COMM),
+};
+
+static const u32 snk_pdo[] = {
+ PDO_FIXED(12000, 3000, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP |
+ PDO_FIXED_USB_COMM),
+ PDO_BATT(4750, 12000, 15000),
+ PDO_VAR(4750, 12000, 3000),
+};
+
+static struct tcpc_config wcove_typec_config = {
+ .src_pdo = src_pdo,
+ .nr_src_pdo = ARRAY_SIZE(src_pdo),
+ .snk_pdo = snk_pdo,
+ .nr_snk_pdo = ARRAY_SIZE(snk_pdo),
+
+ .max_snk_mv = 12000,
+ .max_snk_ma = 3000,
+ .max_snk_mw = 36000,
+ .operating_snk_mw = 15000,
+
+ .type = TYPEC_PORT_DRP,
+ .default_role = TYPEC_SINK,
+};
+
static int wcove_typec_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct wcove_typec *wcove;
- unsigned int val;
+ int irq;
int ret;
wcove = devm_kzalloc(&pdev->dev, sizeof(*wcove), GFP_KERNEL);
@@ -303,43 +592,47 @@ static int wcove_typec_probe(struct platform_device *pdev)
wcove->dev = &pdev->dev;
wcove->regmap = pmic->regmap;
- ret = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
+ irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
platform_get_irq(pdev, 0));
- if (ret < 0)
- return ret;
+ if (irq < 0)
+ return irq;
- ret = devm_request_threaded_irq(&pdev->dev, ret, NULL,
- wcove_typec_irq, IRQF_ONESHOT,
- "wcove_typec", wcove);
+ ret = guid_parse(WCOVE_DSM_UUID, &wcove->guid);
if (ret)
return ret;
- if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &guid, 0, 0x1f)) {
+ if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &wcove->guid, 0, 0x1f)) {
dev_err(&pdev->dev, "Missing _DSM functions\n");
return -ENODEV;
}
- wcove->cap.type = TYPEC_PORT_DRP;
- wcove->cap.revision = USB_TYPEC_REV_1_1;
- wcove->cap.prefer_role = TYPEC_NO_PREFERRED_ROLE;
+ wcove->tcpc.init = wcove_init;
+ wcove->tcpc.get_vbus = wcove_get_vbus;
+ wcove->tcpc.set_vbus = wcove_set_vbus;
+ wcove->tcpc.set_cc = wcove_set_cc;
+ wcove->tcpc.get_cc = wcove_get_cc;
+ wcove->tcpc.set_polarity = wcove_set_polarity;
+ wcove->tcpc.set_vconn = wcove_set_vconn;
+ wcove->tcpc.set_current_limit = wcove_set_current_limit;
+ wcove->tcpc.start_drp_toggling = wcove_start_drp_toggling;
- /* Make sure the PD PHY is disabled until USB PD is available */
- regmap_read(wcove->regmap, USBC_CONTROL3, &val);
- regmap_write(wcove->regmap, USBC_CONTROL3, val | USBC_CONTROL3_PD_DIS);
+ wcove->tcpc.set_pd_rx = wcove_set_pd_rx;
+ wcove->tcpc.set_roles = wcove_set_roles;
+ wcove->tcpc.pd_transmit = wcove_pd_transmit;
- /* DRP mode without accessory support */
- regmap_read(wcove->regmap, USBC_CONTROL1, &val);
- regmap_write(wcove->regmap, USBC_CONTROL1, USBC_CONTROL1_MODE_DRP(val));
+ wcove->tcpc.config = &wcove_typec_config;
- wcove->port = typec_register_port(&pdev->dev, &wcove->cap);
- if (!wcove->port)
- return -ENODEV;
+ wcove->tcpm = tcpm_register_port(wcove->dev, &wcove->tcpc);
+ if (IS_ERR(wcove->tcpm))
+ return PTR_ERR(wcove->tcpm);
- /* Unmask everything */
- regmap_read(wcove->regmap, USBC_IRQMASK1, &val);
- regmap_write(wcove->regmap, USBC_IRQMASK1, val & ~USBC_IRQMASK1_ALL);
- regmap_read(wcove->regmap, USBC_IRQMASK2, &val);
- regmap_write(wcove->regmap, USBC_IRQMASK2, val & ~USBC_IRQMASK2_ALL);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ wcove_typec_irq, IRQF_ONESHOT,
+ "wcove_typec", wcove);
+ if (ret) {
+ tcpm_unregister_port(wcove->tcpm);
+ return ret;
+ }
platform_set_drvdata(pdev, wcove);
return 0;
@@ -356,8 +649,8 @@ static int wcove_typec_remove(struct platform_device *pdev)
regmap_read(wcove->regmap, USBC_IRQMASK2, &val);
regmap_write(wcove->regmap, USBC_IRQMASK2, val | USBC_IRQMASK2_ALL);
- typec_unregister_partner(wcove->partner);
- typec_unregister_port(wcove->port);
+ tcpm_unregister_port(wcove->tcpm);
+
return 0;
}
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index 8372fc22f9b3..b57891c1fd31 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
CFLAGS_trace.o := -I$(src)
obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o
diff --git a/drivers/usb/typec/ucsi/debug.h b/drivers/usb/typec/ucsi/debug.h
index e4d8fc763e6c..fdeff39df120 100644
--- a/drivers/usb/typec/ucsi/debug.h
+++ b/drivers/usb/typec/ucsi/debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __UCSI_DEBUG_H
#define __UCSI_DEBUG_H
diff --git a/drivers/usb/typec/ucsi/trace.c b/drivers/usb/typec/ucsi/trace.c
index 006f65c72a34..d9a6ff6e673c 100644
--- a/drivers/usb/typec/ucsi/trace.c
+++ b/drivers/usb/typec/ucsi/trace.c
@@ -1,2 +1,3 @@
+// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/drivers/usb/typec/ucsi/trace.h b/drivers/usb/typec/ucsi/trace.h
index 98b404404834..d5092446ecc6 100644
--- a/drivers/usb/typec/ucsi/trace.h
+++ b/drivers/usb/typec/ucsi/trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ucsi
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 714c5bcedf2b..79046fe66426 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Type-C Connector System Software Interface driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/completion.h>
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 8a88f45822e3..53b80f40a908 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DRIVER_USB_TYPEC_UCSI_H
#define __DRIVER_USB_TYPEC_UCSI_H
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index cabd47612b0a..44eb4e1ea817 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UCSI ACPI driver
*
* Copyright (C) 2017, Intel Corporation
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index bb0bd732e29a..26ca0ec01fd5 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* USB Skeleton driver - 2.2
*
* Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
* This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
* but has been rewritten to be easier to read and use.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/usbip/Makefile b/drivers/usb/usbip/Makefile
index d843a9e68852..f4c8f3840262 100644
--- a/drivers/usb/usbip/Makefile
+++ b/drivers/usb/usbip/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_USBIP_DEBUG) := -DDEBUG
obj-$(CONFIG_USBIP_CORE) += usbip-core.o
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 910f027773aa..14a72357800a 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#ifndef __USBIP_STUB_H
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index c653ce533430..a3df8ee82faf 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/device.h>
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 7170404e8979..4f48b306713f 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/string.h>
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 191b176ffedf..536e037f541f 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <asm/byteorder.h>
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index be50cef645d8..b18bce96c212 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index 2281f3562870..f7978933b402 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <asm/byteorder.h>
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 3050fc99a417..e5de35c8c505 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -1,22 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Samsung Electronics
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#ifndef __USBIP_COMMON_H
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index f1635662c299..5b4c0864ad92 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015 Nobuo Iwata
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
index 5cfb59e98e44..5659dce1526e 100644
--- a/drivers/usb/usbip/vhci.h
+++ b/drivers/usb/usbip/vhci.h
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015 Nobuo Iwata
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#ifndef __USBIP_VHCI_H
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 11b9a22799cc..713e94170963 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Nobuo Iwata
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/init.h>
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index ef2f2d5ca6b2..90577e8b2282 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index 1b9f60a22e0b..e78f7472cac4 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
* Copyright (C) 2015-2016 Nobuo Iwata
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index 3e7878fe2fd4..d625a2ff4b71 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vudc.h b/drivers/usb/usbip/vudc.h
index 25e01b09c4c3..cf968192e59f 100644
--- a/drivers/usb/usbip/vudc.h
+++ b/drivers/usb/usbip/vudc.h
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __USBIP_VUDC_H
@@ -104,7 +92,7 @@ struct vudc {
struct usbip_device ud;
struct transfer_timer tr_timer;
- struct timeval start_time;
+ struct timespec64 start_time;
struct list_head urb_queue;
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 968471b62cbc..1b9a4f87db59 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
@@ -135,16 +123,15 @@ struct vep *vudc_find_endpoint(struct vudc *udc, u8 address)
/* gadget ops */
-/* FIXME - this will probably misbehave when suspend/resume is added */
static int vgadget_get_frame(struct usb_gadget *_gadget)
{
- struct timeval now;
+ struct timespec64 now;
struct vudc *udc = usb_gadget_to_vudc(_gadget);
- do_gettimeofday(&now);
+ ktime_get_ts64(&now);
return ((now.tv_sec - udc->start_time.tv_sec) * 1000 +
- (now.tv_usec - udc->start_time.tv_usec) / 1000)
- % 0x7FF;
+ (now.tv_nsec - udc->start_time.tv_nsec) / NSEC_PER_MSEC)
+ & 0x7FF;
}
static int vgadget_set_selfpowered(struct usb_gadget *_gadget, int value)
diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
index 9e655714e389..3fc22037a82f 100644
--- a/drivers/usb/usbip/vudc_main.c
+++ b/drivers/usb/usbip/vudc_main.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
index e429b59f6f8a..df1e30989148 100644
--- a/drivers/usb/usbip/vudc_rx.c
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <net/sock.h>
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index 0f98f2c7475f..1adc8af292ec 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
* Krzysztof Opasiak <k.opasiak@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/device.h>
@@ -161,7 +149,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
udc->ud.status = SDEV_ST_USED;
spin_unlock_irq(&udc->ud.lock);
- do_gettimeofday(&udc->start_time);
+ ktime_get_ts64(&udc->start_time);
v_start_timer(udc);
udc->connected = 1;
} else {
diff --git a/drivers/usb/usbip/vudc_transfer.c b/drivers/usb/usbip/vudc_transfer.c
index 4cfd475ee865..c9db846ee4f6 100644
--- a/drivers/usb/usbip/vudc_transfer.c
+++ b/drivers/usb/usbip/vudc_transfer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
@@ -6,19 +7,6 @@
* Based on dummy_hcd.c, which is:
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 Alan Stern
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/usb.h>
@@ -311,9 +299,9 @@ top:
return sent;
}
-static void v_timer(unsigned long _vudc)
+static void v_timer(struct timer_list *t)
{
- struct vudc *udc = (struct vudc *) _vudc;
+ struct vudc *udc = from_timer(udc, t, tr_timer.timer);
struct transfer_timer *timer = &udc->tr_timer;
struct urbp *urb_p, *tmp;
unsigned long flags;
@@ -459,7 +447,7 @@ void v_init_timer(struct vudc *udc)
{
struct transfer_timer *t = &udc->tr_timer;
- setup_timer(&t->timer, v_timer, (unsigned long) udc);
+ timer_setup(&t->timer, v_timer, 0);
t->state = VUDC_TR_STOPPED;
}
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
index 234661782fa0..1440ae0919ec 100644
--- a/drivers/usb/usbip/vudc_tx.c
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
* Copyright (C) 2015-2016 Samsung Electronics
* Igor Kotrasinski <i.kotrasinsk@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <net/sock.h>
diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile
index b3bd313032b1..d604ccdd916f 100644
--- a/drivers/usb/wusbcore/Makefile
+++ b/drivers/usb/wusbcore/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_USB_WUSB_CBAF_DEBUG) := -DDEBUG
obj-$(CONFIG_USB_WUSB) += wusbcore.o
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index aa4e440e9975..222228c5c1e1 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB - Cable Based Association
*
@@ -6,21 +7,6 @@
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* WUSB devices have to be paired (associated in WUSB lingo) so
* that they can connect to the system.
*
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 062c205f0046..4c00be2d1993 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Ultra Wide Band
* AES-128 CCM Encryption
@@ -5,21 +6,6 @@
* Copyright (C) 2007 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* We don't do any encryption here; we use the Linux Kernel's AES-128
* crypto modules to construct keys and payload blocks in a way
* defined by WUSB1.0[6]. Check the erratas, as typos are are patched
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
index 78212f8180ce..85a1acf3a729 100644
--- a/drivers/usb/wusbcore/dev-sysfs.c
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB devices
* sysfs bindings
@@ -5,21 +6,6 @@
* Copyright (C) 2007 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* Get them out of the way...
*/
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index bf9551735938..fcb06aef2675 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
* Device Connect handling
@@ -5,21 +6,6 @@
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* FIXME: docs
* FIXME: this file needs to be broken up, it's grown too big
*
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 3f485df96226..acce0d551eb2 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
* MMC (Microscheduled Management Command) handling
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* WUIEs and MMC IEs...well, they are almost the same at the end. MMC
* IEs are Wireless USB IEs that go into the MMC period...[what is
* that? look in Design-overview.txt].
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c
index 090f27371a8f..30f569131471 100644
--- a/drivers/usb/wusbcore/pal.c
+++ b/drivers/usb/wusbcore/pal.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* UWB Protocol Adaptation Layer (PAL) glue.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "wusbhc.h"
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
index 7b1b2e2fb673..6dcfc6825f55 100644
--- a/drivers/usb/wusbcore/reservation.c
+++ b/drivers/usb/wusbcore/reservation.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB cluster reservation management
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/uwb.h>
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
index a082fe62b1f0..20c08cd9dcbf 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/usb/wusbcore/rh.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* Root Hub operations
@@ -6,21 +7,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* We fake a root hub that has fake ports (as many as simultaneous
* devices the Wireless USB Host Controller can deal with). For each
* port we keep an state in @wusbhc->port[index] identical to the one
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 170f2c38de9b..33d2f5d7f33b 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* Security support: encryption enablement, etc
@@ -5,21 +6,6 @@
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* FIXME: docs
*/
#include <linux/types.h>
@@ -28,6 +14,7 @@
#include <linux/random.h>
#include <linux/export.h>
#include "wusbhc.h"
+#include <asm/unaligned.h>
static void wusbhc_gtk_rekey_work(struct work_struct *work);
@@ -367,7 +354,6 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
struct usb_device *usb_dev = wusb_dev->usb_dev;
struct device *dev = &usb_dev->dev;
u32 tkid;
- __le32 tkid_le;
struct usb_handshake *hs;
struct aes_ccm_nonce ccm_n;
u8 mic[8];
@@ -385,11 +371,10 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
goto error_dev_set_encryption;
tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
- tkid_le = cpu_to_le32(tkid);
hs[0].bMessageNumber = 1;
hs[0].bStatus = 0;
- memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID));
+ put_unaligned_le32(tkid, hs[0].tTKID);
hs[0].bReserved = 0;
memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
@@ -441,7 +426,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
/* Setup the CCM nonce */
memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
- memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
+ put_unaligned_le32(tkid, ccm_n.tkid);
ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
ccm_n.dest_addr.data[0] = wusb_dev->addr;
ccm_n.dest_addr.data[1] = 0;
@@ -472,7 +457,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
/* Send Handshake3 */
hs[2].bMessageNumber = 3;
hs[2].bStatus = 0;
- memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID));
+ put_unaligned_le32(tkid, hs[2].tTKID);
hs[2].bReserved = 0;
memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index d01496fd27fe..6827075fb8a1 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wire Adapter Host Controller Driver
* Common items to HWA and DWA based HCDs
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* FIXME: docs
*/
#include <linux/slab.h>
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index edc7267157f3..ec90fff21deb 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* HWA Host Controller Driver
* Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* This driver implements a USB Host Controller (struct usb_hcd) for a
* Wireless USB Host Controller based on the Wireless USB 1.0
* Host-Wire-Adapter specification (in layman terms, a USB-dongle that
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
index e3819fc182b0..9fdcb6b84abf 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
* Notification EndPoint support
@@ -5,21 +6,6 @@
* Copyright (C) 2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* This part takes care of getting the notification from the hw
* only and dispatching through wusbwad into
* wa_notif_dispatch. Handling is done there.
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index c7ecdbe19a32..d0f1a6698460 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter
* rpipe management
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* FIXME: docs
*
* RPIPE
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index e70322b1dd02..7fca4e7e556d 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* WUSB Wire Adapter
* Data transfer and URB enqueing
@@ -5,21 +6,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* How transfers work: get a buffer, break it up in segments (segment
* size is a multiple of the maxpacket size). For each segment issue a
* segment request (struct wa_xfer_*), then send the data buffer if
@@ -2156,6 +2142,7 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
* do not increment RPIPE avail for the WA_SEG_DELAYED case
* since it has not been submitted to the RPIPE.
*/
+ /* fall through */
case WA_SEG_DELAYED:
xfer->segs_done++;
current_seg->status = status;
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 5338e42533c8..e5ba6140c1ba 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* sysfs glue, wusbcore module support and life cycle management
@@ -6,21 +7,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* Creation/destruction of wusbhc is split in two parts; that that
* doesn't require the HCD to be added (wusbhc_{create,destroy}) and
* the one that requires (phase B, wusbhc_b_{create,destroy}).
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 8c5bd000739b..7681d796ca5b 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Wireless USB Host Controller
* Common infrastructure for WHCI and HWA WUSB-HC drivers
@@ -6,21 +7,6 @@
* Copyright (C) 2005-2006 Intel Corporation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
* This driver implements parts common to all Wireless USB Host
* Controllers (struct wusbhc, embedding a struct usb_hcd) and is used
* by:
diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile
index d47dd6e2942c..32f4de7afbd6 100644
--- a/drivers/uwb/Makefile
+++ b/drivers/uwb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_UWB) += uwb.o
obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o
obj-$(CONFIG_UWB_HWA) += hwa-rc.o
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
index 38d0504a1bbc..625f706b8160 100644
--- a/drivers/uwb/drp.c
+++ b/drivers/uwb/drp.c
@@ -603,9 +603,9 @@ static void uwb_cnflt_update_work(struct work_struct *work)
mutex_unlock(&rc->rsvs_mutex);
}
-static void uwb_cnflt_timer(unsigned long arg)
+static void uwb_cnflt_timer(struct timer_list *t)
{
- struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
+ struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
}
@@ -642,7 +642,7 @@ static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_i
}
INIT_LIST_HEAD(&cnflt->rc_node);
- setup_timer(&cnflt->timer, uwb_cnflt_timer, (unsigned long)cnflt);
+ timer_setup(&cnflt->timer, uwb_cnflt_timer, 0);
cnflt->rc = rc;
INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
diff --git a/drivers/uwb/i1480/dfu/Makefile b/drivers/uwb/i1480/dfu/Makefile
index bd1b9f25424c..4739fdac5922 100644
--- a/drivers/uwb/i1480/dfu/Makefile
+++ b/drivers/uwb/i1480/dfu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_UWB_I1480U) += i1480-dfu-usb.o
i1480-dfu-usb-objs := \
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
index 36b5cb62c15d..fbdca728bd9f 100644
--- a/drivers/uwb/neh.c
+++ b/drivers/uwb/neh.c
@@ -115,7 +115,7 @@ struct uwb_rc_neh {
struct list_head list_node;
};
-static void uwb_rc_neh_timer(unsigned long arg);
+static void uwb_rc_neh_timer(struct timer_list *t);
static void uwb_rc_neh_release(struct kref *kref)
{
@@ -223,7 +223,7 @@ struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
kref_init(&neh->kref);
INIT_LIST_HEAD(&neh->list_node);
- setup_timer(&neh->timer, uwb_rc_neh_timer, (unsigned long)neh);
+ timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
neh->rc = rc;
neh->evt_type = expected_type;
@@ -565,9 +565,9 @@ void uwb_rc_neh_error(struct uwb_rc *rc, int error)
EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
-static void uwb_rc_neh_timer(unsigned long arg)
+static void uwb_rc_neh_timer(struct timer_list *t)
{
- struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
+ struct uwb_rc_neh *neh = from_timer(neh, t, timer);
struct uwb_rc *rc = neh->rc;
unsigned long flags;
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
index f5e27247a38f..fe25a8cc6fa1 100644
--- a/drivers/uwb/rsv.c
+++ b/drivers/uwb/rsv.c
@@ -23,7 +23,7 @@
#include "uwb-internal.h"
-static void uwb_rsv_timer(unsigned long arg);
+static void uwb_rsv_timer(struct timer_list *t);
static const char *rsv_states[] = {
[UWB_RSV_STATE_NONE] = "none ",
@@ -198,9 +198,9 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
dev_dbg(dev, "put stream %d\n", rsv->stream);
}
-void uwb_rsv_backoff_win_timer(unsigned long arg)
+void uwb_rsv_backoff_win_timer(struct timer_list *t)
{
- struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
+ struct uwb_drp_backoff_win *bow = from_timer(bow, t, timer);
struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
struct device *dev = &rc->uwb_dev.dev;
@@ -470,7 +470,7 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
INIT_LIST_HEAD(&rsv->rc_node);
INIT_LIST_HEAD(&rsv->pal_node);
kref_init(&rsv->kref);
- setup_timer(&rsv->timer, uwb_rsv_timer, (unsigned long)rsv);
+ timer_setup(&rsv->timer, uwb_rsv_timer, 0);
rsv->rc = rc;
INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
@@ -939,9 +939,9 @@ static void uwb_rsv_alien_bp_work(struct work_struct *work)
mutex_unlock(&rc->rsvs_mutex);
}
-static void uwb_rsv_timer(unsigned long arg)
+static void uwb_rsv_timer(struct timer_list *t)
{
- struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
+ struct uwb_rsv *rsv = from_timer(rsv, t, timer);
queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
}
@@ -987,8 +987,7 @@ void uwb_rsv_init(struct uwb_rc *rc)
rc->bow.can_reserve_extra_mases = true;
rc->bow.total_expired = 0;
rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
- setup_timer(&rc->bow.timer, uwb_rsv_backoff_win_timer,
- (unsigned long)&rc->bow);
+ timer_setup(&rc->bow.timer, uwb_rsv_backoff_win_timer, 0);
bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
}
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h
index 353c0555a1f5..91326ce093a7 100644
--- a/drivers/uwb/uwb-internal.h
+++ b/drivers/uwb/uwb-internal.h
@@ -329,7 +329,7 @@ void uwb_rsv_put(struct uwb_rsv *rsv);
bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
-void uwb_rsv_backoff_win_timer(unsigned long arg);
+void uwb_rsv_backoff_win_timer(struct timer_list *t);
void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
int uwb_rsv_status(struct uwb_rsv *rsv);
int uwb_rsv_companion_status(struct uwb_rsv *rsv);
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 4a23c13b6be4..de67c4725cce 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
vfio_virqfd-y := virqfd.o
obj-$(CONFIG_VFIO) += vfio.o
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 5628fe114347..115a36f6f403 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -808,6 +808,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
{
__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
offset + PCI_EXP_DEVCTL);
+ int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
if (count < 0)
@@ -833,6 +834,27 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
pci_try_reset_function(vdev->pdev);
}
+ /*
+ * MPS is virtualized to the user, writes do not change the physical
+ * register since determining a proper MPS value requires a system wide
+ * device view. The MRRS is largely independent of MPS, but since the
+ * user does not have that system-wide view, they might set a safe, but
+ * inefficiently low value. Here we allow writes through to hardware,
+ * but we set the floor to the physical device MPS setting, so that
+ * we can at least use full TLPs, as defined by the MPS value.
+ *
+ * NB, if any devices actually depend on an artificially low MRRS
+ * setting, this will need to be revisited, perhaps with a quirk
+ * though pcie_set_readrq().
+ */
+ if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
+ readrq = 128 <<
+ ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
+ readrq = max(readrq, pcie_get_mps(vdev->pdev));
+
+ pcie_set_readrq(vdev->pdev, readrq);
+ }
+
return count;
}
@@ -849,11 +871,14 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
/*
* Allow writes to device control fields, except devctl_phantom,
- * which could confuse IOMMU, and the ARI bit in devctl2, which
- * is set at probe time. FLR gets virtualized via our writefn.
+ * which could confuse IOMMU, MPS, which can break communication
+ * with other physical devices, and the ARI bit in devctl2, which
+ * is set at probe time. FLR and MRRS get virtualized via our
+ * writefn.
*/
p_setw(perm, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR, ~PCI_EXP_DEVCTL_PHANTOM);
+ PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
return 0;
}
diff --git a/drivers/vfio/platform/Makefile b/drivers/vfio/platform/Makefile
index 41a6224f5e6b..3f3a24e7c4ef 100644
--- a/drivers/vfio/platform/Makefile
+++ b/drivers/vfio/platform/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
vfio-platform-base-y := vfio_platform_common.o vfio_platform_irq.o
vfio-platform-y := vfio_platform.o
diff --git a/drivers/vfio/platform/reset/Kconfig b/drivers/vfio/platform/reset/Kconfig
index 70cccc582bee..392e3c09def0 100644
--- a/drivers/vfio/platform/reset/Kconfig
+++ b/drivers/vfio/platform/reset/Kconfig
@@ -13,3 +13,12 @@ config VFIO_PLATFORM_AMDXGBE_RESET
Enables the VFIO platform driver to handle reset for AMD XGBE
If you don't know what to do here, say N.
+
+config VFIO_PLATFORM_BCMFLEXRM_RESET
+ tristate "VFIO support for Broadcom FlexRM reset"
+ depends on VFIO_PLATFORM && (ARCH_BCM_IPROC || COMPILE_TEST)
+ default ARCH_BCM_IPROC
+ help
+ Enables the VFIO platform driver to handle reset for Broadcom FlexRM
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/platform/reset/Makefile b/drivers/vfio/platform/reset/Makefile
index 93f4e232697b..57abd4f0ac5b 100644
--- a/drivers/vfio/platform/reset/Makefile
+++ b/drivers/vfio/platform/reset/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
vfio-platform-calxedaxgmac-y := vfio_platform_calxedaxgmac.o
vfio-platform-amdxgbe-y := vfio_platform_amdxgbe.o
@@ -5,3 +6,4 @@ ccflags-y += -Idrivers/vfio/platform
obj-$(CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET) += vfio-platform-calxedaxgmac.o
obj-$(CONFIG_VFIO_PLATFORM_AMDXGBE_RESET) += vfio-platform-amdxgbe.o
+obj-$(CONFIG_VFIO_PLATFORM_BCMFLEXRM_RESET) += vfio_platform_bcmflexrm.o
diff --git a/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
new file mode 100644
index 000000000000..d45c3be71198
--- /dev/null
+++ b/drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This driver provides reset support for Broadcom FlexRM ring manager
+ * to VFIO platform.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "vfio_platform_private.h"
+
+/* FlexRM configuration */
+#define RING_REGS_SIZE 0x10000
+#define RING_VER_MAGIC 0x76303031
+
+/* Per-Ring register offsets */
+#define RING_VER 0x000
+#define RING_CONTROL 0x034
+#define RING_FLUSH_DONE 0x038
+
+/* Register RING_CONTROL fields */
+#define CONTROL_FLUSH_SHIFT 5
+
+/* Register RING_FLUSH_DONE fields */
+#define FLUSH_DONE_MASK 0x1
+
+static int vfio_platform_bcmflexrm_shutdown(void __iomem *ring)
+{
+ unsigned int timeout;
+
+ /* Disable/inactivate ring */
+ writel_relaxed(0x0, ring + RING_CONTROL);
+
+ /* Set ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), ring + RING_CONTROL);
+ do {
+ if (readl_relaxed(ring + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK)
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ /* Clear ring flush state */
+ timeout = 1000; /* timeout of 1s */
+ writel_relaxed(0x0, ring + RING_CONTROL);
+ do {
+ if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+ FLUSH_DONE_MASK))
+ break;
+ mdelay(1);
+ } while (--timeout);
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int vfio_platform_bcmflexrm_reset(struct vfio_platform_device *vdev)
+{
+ void __iomem *ring;
+ int rc = 0, ret = 0, ring_num = 0;
+ struct vfio_platform_region *reg = &vdev->regions[0];
+
+ /* Map FlexRM ring registers if not mapped */
+ if (!reg->ioaddr) {
+ reg->ioaddr = ioremap_nocache(reg->addr, reg->size);
+ if (!reg->ioaddr)
+ return -ENOMEM;
+ }
+
+ /* Discover and shutdown each FlexRM ring */
+ for (ring = reg->ioaddr;
+ ring < (reg->ioaddr + reg->size); ring += RING_REGS_SIZE) {
+ if (readl_relaxed(ring + RING_VER) == RING_VER_MAGIC) {
+ rc = vfio_platform_bcmflexrm_shutdown(ring);
+ if (rc) {
+ dev_warn(vdev->device,
+ "FlexRM ring%d shutdown error %d\n",
+ ring_num, rc);
+ ret |= rc;
+ }
+ ring_num++;
+ }
+ }
+
+ return ret;
+}
+
+module_vfio_reset_handler("brcm,iproc-flexrm-mbox",
+ vfio_platform_bcmflexrm_reset);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
+MODULE_DESCRIPTION("Reset support for Broadcom FlexRM VFIO platform device");
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index f5a86f651f38..2bc3705a99bd 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
{
struct vfio_group *group = data;
struct vfio_device *device;
- struct device_driver *drv = ACCESS_ONCE(dev->driver);
+ struct device_driver *drv = READ_ONCE(dev->driver);
struct vfio_unbound_dev *unbound;
int ret = -EINVAL;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 63112c36ab2d..759a5bdd40e1 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -507,6 +507,8 @@ static int tce_iommu_clear(struct tce_container *container,
enum dma_data_direction direction;
for ( ; pages; --pages, ++entry) {
+ cond_resched();
+
direction = DMA_NONE;
oldhpa = 0;
ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 92155cce926d..e30e29ae4819 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -767,6 +767,9 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
return -EINVAL;
if (!unmap->size || unmap->size & mask)
return -EINVAL;
+ if (unmap->iova + unmap->size < unmap->iova ||
+ unmap->size > SIZE_MAX)
+ return -EINVAL;
WARN_ON(mask & PAGE_MASK);
again:
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index 6b012b986b57..6c6df24f770c 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VHOST_NET) += vhost_net.o
vhost_net-y := net.o
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 58585ec8699e..8d626d7c2e7e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -436,8 +436,8 @@ static bool vhost_exceeds_maxpend(struct vhost_net *net)
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
- return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
- == nvq->done_idx;
+ return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
+ min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
}
/* Expects to be always run from workqueue - which acts as
@@ -471,6 +471,7 @@ static void handle_tx(struct vhost_net *net)
goto out;
vhost_disable_notify(&net->dev, vq);
+ vhost_net_disable_vq(net, vq);
hdr_size = nvq->vhost_hlen;
zcopy = nvq->ubufs;
@@ -480,11 +481,6 @@ static void handle_tx(struct vhost_net *net)
if (zcopy)
vhost_zerocopy_signal_used(net, vq);
- /* If more outstanding DMAs, queue the work.
- * Handle upend_idx wrap around
- */
- if (unlikely(vhost_exceeds_maxpend(net)))
- break;
head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
ARRAY_SIZE(vq->iov),
@@ -519,8 +515,7 @@ static void handle_tx(struct vhost_net *net)
len = msg_data_left(&msg);
zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
- && (nvq->upend_idx + 1) % UIO_MAXIOV !=
- nvq->done_idx
+ && !vhost_exceeds_maxpend(net)
&& vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */
@@ -562,6 +557,7 @@ static void handle_tx(struct vhost_net *net)
% UIO_MAXIOV;
}
vhost_discard_vq_desc(vq, 1);
+ vhost_net_enable_vq(net, vq);
break;
}
if (err != len)
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 046f6d280af5..71517b3c5558 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -210,12 +210,6 @@ static struct workqueue_struct *vhost_scsi_workqueue;
static DEFINE_MUTEX(vhost_scsi_mutex);
static LIST_HEAD(vhost_scsi_list);
-static int iov_num_pages(void __user *iov_base, size_t iov_len)
-{
- return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
- ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
-}
-
static void vhost_scsi_done_inflight(struct kref *kref)
{
struct vhost_scsi_inflight *inflight;
@@ -519,7 +513,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
vs_completion_work);
DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
struct virtio_scsi_cmd_resp v_rsp;
- struct vhost_scsi_cmd *cmd;
+ struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
struct se_cmd *se_cmd;
struct iov_iter iov_iter;
@@ -527,7 +521,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
llnode = llist_del_all(&vs->vs_completion_list);
- llist_for_each_entry(cmd, llnode, tvc_completion_list) {
+ llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
@@ -618,48 +612,31 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
*/
static int
vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
- void __user *ptr,
- size_t len,
+ struct iov_iter *iter,
struct scatterlist *sgl,
bool write)
{
- unsigned int npages = 0, offset, nbytes;
- unsigned int pages_nr = iov_num_pages(ptr, len);
- struct scatterlist *sg = sgl;
struct page **pages = cmd->tvc_upages;
- int ret, i;
-
- if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
- pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
- " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
- pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
- return -ENOBUFS;
- }
+ struct scatterlist *sg = sgl;
+ ssize_t bytes;
+ size_t offset;
+ unsigned int npages = 0;
- ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
+ bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
+ VHOST_SCSI_PREALLOC_UPAGES, &offset);
/* No pages were pinned */
- if (ret < 0)
- goto out;
- /* Less pages pinned than wanted */
- if (ret != pages_nr) {
- for (i = 0; i < ret; i++)
- put_page(pages[i]);
- ret = -EFAULT;
- goto out;
- }
+ if (bytes <= 0)
+ return bytes < 0 ? bytes : -EFAULT;
- while (len > 0) {
- offset = (uintptr_t)ptr & ~PAGE_MASK;
- nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
- sg_set_page(sg, pages[npages], nbytes, offset);
- ptr += nbytes;
- len -= nbytes;
- sg++;
- npages++;
- }
+ iov_iter_advance(iter, bytes);
-out:
- return ret;
+ while (bytes) {
+ unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
+ sg_set_page(sg++, pages[npages++], n, offset);
+ bytes -= n;
+ offset = 0;
+ }
+ return npages;
}
static int
@@ -687,24 +664,20 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
struct iov_iter *iter,
struct scatterlist *sg, int sg_count)
{
- size_t off = iter->iov_offset;
- int i, ret;
-
- for (i = 0; i < iter->nr_segs; i++) {
- void __user *base = iter->iov[i].iov_base + off;
- size_t len = iter->iov[i].iov_len - off;
+ struct scatterlist *p = sg;
+ int ret;
- ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
+ while (iov_iter_count(iter)) {
+ ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
if (ret < 0) {
- for (i = 0; i < sg_count; i++) {
- struct page *page = sg_page(&sg[i]);
+ while (p < sg) {
+ struct page *page = sg_page(p++);
if (page)
put_page(page);
}
return ret;
}
sg += ret;
- off = 0;
}
return 0;
}
@@ -929,7 +902,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
continue;
}
- tpg = ACCESS_ONCE(vs_tpg[*target]);
+ tpg = READ_ONCE(vs_tpg[*target]);
if (unlikely(!tpg)) {
/* Target does not exist, fail the request */
vhost_scsi_send_bad_target(vs, vq, head, out);
diff --git a/drivers/vhost/test.h b/drivers/vhost/test.h
index 1fef5df82153..7dd265bfdf81 100644
--- a/drivers/vhost/test.h
+++ b/drivers/vhost/test.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LINUX_VHOST_TEST_H
#define LINUX_VHOST_TEST_H
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d6dbb28245e6..33ac2b186b85 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1175,7 +1175,7 @@ static int iotlb_access_ok(struct vhost_virtqueue *vq,
{
const struct vhost_umem_node *node;
struct vhost_umem *umem = vq->iotlb;
- u64 s = 0, size, orig_addr = addr;
+ u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
if (vhost_vq_meta_fetch(vq, addr, len, type))
return true;
@@ -1183,7 +1183,7 @@ static int iotlb_access_ok(struct vhost_virtqueue *vq,
while (len > s) {
node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
addr,
- addr + len - 1);
+ last);
if (node == NULL || node->start > addr) {
vhost_iotlb_miss(vq, addr, access);
return false;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d59a9cc65f9d..79c6e7a60a5e 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VHOST_H
#define _VHOST_H
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index c9de9c41aa97..5a5e981bd8e4 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -518,6 +518,8 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
goto out;
}
+ vsock->guest_cid = 0; /* no CID assigned yet */
+
atomic_set(&vsock->queued_replies, 0);
vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 445b2c230b56..df7650adede9 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VGASTATE) += vgastate.o
obj-$(CONFIG_HDMI) += hdmi.o
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 8905129691e8..5e28f01c8391 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Backlight & LCD drivers
obj-$(CONFIG_LCD_AMS369FG06) += ams369fg06.o
diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c
index a9e9cef20ed6..2b6c6aaf4e14 100644
--- a/drivers/video/backlight/ili922x.c
+++ b/drivers/video/backlight/ili922x.c
@@ -251,7 +251,7 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
struct spi_transfer xfer_regindex, xfer_regvalue;
unsigned char tbuf[CMD_BUFSIZE];
unsigned char rbuf[CMD_BUFSIZE];
- int ret, len = 0;
+ int ret;
memset(&xfer_regindex, 0, sizeof(struct spi_transfer));
memset(&xfer_regvalue, 0, sizeof(struct spi_transfer));
@@ -273,7 +273,6 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value)
ret = spi_sync(spi, &msg);
spi_message_init(&msg);
- len = 0;
tbuf[0] = set_tx_byte(START_BYTE(ili922x_id, START_RS_REG,
START_RW_WRITE));
tbuf[1] = set_tx_byte((value & 0xFF00) >> 8);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 9bd17682655a..1c2289ddd555 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
{
unsigned int lth = pb->lth_brightness;
- int duty_cycle;
+ u64 duty_cycle;
if (pb->levels)
duty_cycle = pb->levels[brightness];
else
duty_cycle = brightness;
- return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
+ duty_cycle *= pb->period - lth;
+ do_div(duty_cycle, pb->scale);
+
+ return duty_cycle + lth;
}
static int pwm_backlight_update_status(struct backlight_device *bl)
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index fd524ad860a5..380917c86276 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -239,8 +239,7 @@ tps65217_bl_parse_dt(struct platform_device *pdev)
}
if (!of_property_read_u32(node, "default-brightness", &val)) {
- if (val < 0 ||
- val > 100) {
+ if (val > 100) {
dev_err(&pdev->dev,
"invalid 'default-brightness' value in the device tree\n");
err = ERR_PTR(-EINVAL);
@@ -275,17 +274,9 @@ static int tps65217_bl_probe(struct platform_device *pdev)
struct tps65217_bl_pdata *pdata;
struct backlight_properties bl_props;
- if (tps->dev->of_node) {
- pdata = tps65217_bl_parse_dt(pdev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data provided\n");
- return -EINVAL;
- }
- }
+ pdata = tps65217_bl_parse_dt(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
tps65217_bl = devm_kzalloc(&pdev->dev, sizeof(*tps65217_bl),
GFP_KERNEL);
diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
index eb2cbec52643..db07b784bd2c 100644
--- a/drivers/video/console/Makefile
+++ b/drivers/video/console/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the Linux graphics to console drivers.
# 5 Aug 1999, James Simmons, <mailto:jsimmons@users.sf.net>
# Rewritten to use lists instead of if-statements.
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 5e58f5ec0a28..2f615b7f1c9f 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -905,16 +905,6 @@ config FB_LEO
This is the frame buffer device driver for the SBUS-based Sun ZX
(leo) frame buffer cards.
-config FB_IGA
- bool "IGA 168x display support"
- depends on (FB = y) && SPARC32
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- This is the framebuffer device for the INTERGRAPHICS 1680 and
- successor frame buffer cards.
-
config FB_XVR500
bool "Sun XVR-500 3DLABS Wildcat support"
depends on (FB = y) && PCI && SPARC64
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index ee8c81405a7f..115961e0721b 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the Linux video drivers.
# 5 Aug 1999, James Simmons, <mailto:jsimmons@users.sf.net>
# Rewritten to use lists instead of if-statements.
@@ -64,7 +65,6 @@ obj-$(CONFIG_FB_HGA) += hgafb.o
obj-$(CONFIG_FB_XVR500) += sunxvr500.o
obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o
obj-$(CONFIG_FB_XVR1000) += sunxvr1000.o
-obj-$(CONFIG_FB_IGA) += igafb.o
obj-$(CONFIG_FB_APOLLO) += dnfb.o
obj-$(CONFIG_FB_Q40) += q40fb.o
obj-$(CONFIG_FB_TGA) += tgafb.o
diff --git a/drivers/video/fbdev/amba-clcd-nomadik.h b/drivers/video/fbdev/amba-clcd-nomadik.h
index a24032c8156e..462c31381fa1 100644
--- a/drivers/video/fbdev/amba-clcd-nomadik.h
+++ b/drivers/video/fbdev/amba-clcd-nomadik.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _AMBA_CLCD_NOMADIK_H
#define _AMBA_CLCD_NOMADIK_H
diff --git a/drivers/video/fbdev/amba-clcd-versatile.h b/drivers/video/fbdev/amba-clcd-versatile.h
index 4692c3092823..b20baa47e6ad 100644
--- a/drivers/video/fbdev/amba-clcd-versatile.h
+++ b/drivers/video/fbdev/amba-clcd-versatile.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Special local versatile callbacks
*/
diff --git a/drivers/video/fbdev/atafb.h b/drivers/video/fbdev/atafb.h
index 014e05906cb1..2b2675980087 100644
--- a/drivers/video/fbdev/atafb.h
+++ b/drivers/video/fbdev/atafb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VIDEO_ATAFB_H
#define _VIDEO_ATAFB_H
diff --git a/drivers/video/fbdev/atafb_utils.h b/drivers/video/fbdev/atafb_utils.h
index ac9e19dc5057..8f3396ea8ae5 100644
--- a/drivers/video/fbdev/atafb_utils.h
+++ b/drivers/video/fbdev/atafb_utils.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VIDEO_ATAFB_UTILS_H
#define _VIDEO_ATAFB_UTILS_H
diff --git a/drivers/video/fbdev/aty/Makefile b/drivers/video/fbdev/aty/Makefile
index a6cc0e9ec790..069465c82d6d 100644
--- a/drivers/video/fbdev/aty/Makefile
+++ b/drivers/video/fbdev/aty/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FB_ATY) += atyfb.o
obj-$(CONFIG_FB_ATY128) += aty128fb.o
obj-$(CONFIG_FB_RADEON) += radeonfb.o
diff --git a/drivers/video/fbdev/aty/ati_ids.h b/drivers/video/fbdev/aty/ati_ids.h
index 3e9d28bcd9f8..2b589d687b4c 100644
--- a/drivers/video/fbdev/aty/ati_ids.h
+++ b/drivers/video/fbdev/aty/ati_ids.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ATI PCI IDs from XFree86, kept here to make sync'ing with
* XFree much simpler. Currently, this list is only used by
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 63c4842eb224..8235b285dbb2 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ATI Frame Buffer Device Driver Core Definitions
*/
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 3ec72f19114b..a9a8272f7a6e 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2272,10 +2272,10 @@ static void aty_bl_exit(struct backlight_device *bd)
static void aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
{
- const int ragepro_tbl[] = {
+ static const int ragepro_tbl[] = {
44, 50, 55, 66, 75, 80, 100
};
- const int ragexl_tbl[] = {
+ static const int ragexl_tbl[] = {
50, 66, 75, 83, 90, 95, 100, 105,
110, 115, 120, 125, 133, 143, 166
};
diff --git a/drivers/video/fbdev/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c
index 182bd680141f..2541a0e0de76 100644
--- a/drivers/video/fbdev/aty/mach64_accel.c
+++ b/drivers/video/fbdev/aty/mach64_accel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ATI Mach64 Hardware Acceleration
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 51f29d627ceb..7d3bd723d3d5 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ATI Mach64 CT/VT/GT/LT Support
diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
index 2fa0317ab3c7..4cde25eab8e8 100644
--- a/drivers/video/fbdev/aty/mach64_cursor.c
+++ b/drivers/video/fbdev/aty/mach64_cursor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ATI Mach64 CT/VT/GT/LT Cursor Support
*/
diff --git a/drivers/video/fbdev/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c
index 10c988aef58e..27cb65fa2ba2 100644
--- a/drivers/video/fbdev/aty/mach64_gx.c
+++ b/drivers/video/fbdev/aty/mach64_gx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ATI Mach64 GX Support
diff --git a/drivers/video/fbdev/aty/radeon_accel.c b/drivers/video/fbdev/aty/radeon_accel.c
index a469a3d6edcb..bb147d8bf82d 100644
--- a/drivers/video/fbdev/aty/radeon_accel.c
+++ b/drivers/video/fbdev/aty/radeon_accel.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "radeonfb.h"
/* the accelerated functions here are patterned after the
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 1e2ec360f8c1..4d77daeecf99 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -1454,9 +1454,9 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg
/*
* Timer function for delayed LVDS panel power up/down
*/
-static void radeon_lvds_timer_func(unsigned long data)
+static void radeon_lvds_timer_func(struct timer_list *t)
{
- struct radeonfb_info *rinfo = (struct radeonfb_info *)data;
+ struct radeonfb_info *rinfo = from_timer(rinfo, t, lvds_timer);
radeon_engine_idle();
@@ -1534,7 +1534,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode,
static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs *regs,
unsigned long freq)
{
- const struct {
+ static const struct {
int divider;
int bitvalue;
} *post_div,
@@ -2291,9 +2291,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev,
rinfo->pdev = pdev;
spin_lock_init(&rinfo->reg_lock);
- init_timer(&rinfo->lvds_timer);
- rinfo->lvds_timer.function = radeon_lvds_timer_func;
- rinfo->lvds_timer.data = (unsigned long)rinfo;
+ timer_setup(&rinfo->lvds_timer, radeon_lvds_timer_func, 0);
c1 = ent->device >> 8;
c2 = ent->device & 0xff;
diff --git a/drivers/video/fbdev/aty/radeon_i2c.c b/drivers/video/fbdev/aty/radeon_i2c.c
index ab1d0fd76316..269b12ebb673 100644
--- a/drivers/video/fbdev/aty/radeon_i2c.c
+++ b/drivers/video/fbdev/aty/radeon_i2c.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "radeonfb.h"
#include <linux/module.h>
diff --git a/drivers/video/fbdev/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c
index dd823f5fe4c9..9966c58aa26c 100644
--- a/drivers/video/fbdev/aty/radeon_monitor.c
+++ b/drivers/video/fbdev/aty/radeon_monitor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "radeonfb.h"
#include <linux/slab.h>
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index 1417542738fc..7137c12cbcee 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/video/aty/radeon_pm.c
*
@@ -1207,9 +1208,11 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
case 1:
if (mc & 0x4)
break;
+ /* fall through */
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKB_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKB_RESET;
+ /* fall through */
case 0:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKA_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKA_RESET;
@@ -1218,6 +1221,7 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo)
case 1:
if (!(mc & 0x4))
break;
+ /* fall through */
case 2:
dll_sleep_mask |= MDLL_R300_RDCK__MRDCKD_SLEEP;
dll_reset_mask |= MDLL_R300_RDCK__MRDCKD_RESET;
diff --git a/drivers/video/fbdev/aty/radeonfb.h b/drivers/video/fbdev/aty/radeonfb.h
index 962e31263225..131b34dd65af 100644
--- a/drivers/video/fbdev/aty/radeonfb.h
+++ b/drivers/video/fbdev/aty/radeonfb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RADEONFB_H__
#define __RADEONFB_H__
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 5f04b4096c42..87d5a62bf6ca 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1518,7 +1518,7 @@ static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id)
static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
{
struct fb_info *fbi = fbdev->fb_info;
- int bpp;
+ int bpp, ret;
fbi->fbops = &au1200fb_fb_ops;
@@ -1546,15 +1546,14 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
}
fbi->pseudo_palette = kcalloc(16, sizeof(u32), GFP_KERNEL);
- if (!fbi->pseudo_palette) {
+ if (!fbi->pseudo_palette)
return -ENOMEM;
- }
- if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
+ ret = fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0);
+ if (ret < 0) {
print_err("Fail to allocate colormap (%d entries)",
- AU1200_LCD_NBR_PALETTE_ENTRIES);
- kfree(fbi->pseudo_palette);
- return -EFAULT;
+ AU1200_LCD_NBR_PALETTE_ENTRIES);
+ return ret;
}
strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id));
@@ -1668,10 +1667,6 @@ static int au1200fb_drv_probe(struct platform_device *dev)
printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
- /* shut gcc up */
- ret = 0;
- fbdev = NULL;
-
for (plane = 0; plane < device_count; ++plane) {
bpp = winbpp(win->w[plane].mode_winctrl1);
if (win->w[plane].xres == 0)
@@ -1681,8 +1676,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
&dev->dev);
- if (!fbi)
+ if (!fbi) {
+ ret = -ENOMEM;
goto failed;
+ }
_au1200fb_infos[plane] = fbi;
fbdev = fbi->par;
@@ -1701,7 +1698,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto failed;
}
/*
@@ -1718,7 +1716,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
/* Init FB data */
- if ((ret = au1200fb_init_fbinfo(fbdev)) < 0)
+ ret = au1200fb_init_fbinfo(fbdev);
+ if (ret < 0)
goto failed;
/* Register new framebuffer */
@@ -1758,21 +1757,26 @@ static int au1200fb_drv_probe(struct platform_device *dev)
return 0;
failed:
- /* NOTE: This only does the current plane/window that failed; others are still active */
- if (fbi) {
+ for (plane = 0; plane < device_count; ++plane) {
+ fbi = _au1200fb_infos[plane];
+ if (!fbi)
+ break;
+
+ /* Clean up all probe data */
+ unregister_framebuffer(fbi);
if (fbi->cmap.len != 0)
fb_dealloc_cmap(&fbi->cmap);
kfree(fbi->pseudo_palette);
+
+ framebuffer_release(fbi);
+ _au1200fb_infos[plane] = NULL;
}
- if (plane == 0)
- free_irq(AU1200_LCD_INT, (void*)dev);
return ret;
}
static int au1200fb_drv_remove(struct platform_device *dev)
{
struct au1200fb_platdata *pd = platform_get_drvdata(dev);
- struct au1200fb_device *fbdev;
struct fb_info *fbi;
int plane;
@@ -1781,7 +1785,6 @@ static int au1200fb_drv_remove(struct platform_device *dev)
for (plane = 0; plane < device_count; ++plane) {
fbi = _au1200fb_infos[plane];
- fbdev = fbi->par;
/* Clean up all probe data */
unregister_framebuffer(fbi);
diff --git a/drivers/video/fbdev/carminefb.h b/drivers/video/fbdev/carminefb.h
index 05306de0c6b6..297688eba469 100644
--- a/drivers/video/fbdev/carminefb.h
+++ b/drivers/video/fbdev/carminefb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef CARMINE_CARMINE_H
#define CARMINE_CARMINE_H
diff --git a/drivers/video/fbdev/carminefb_regs.h b/drivers/video/fbdev/carminefb_regs.h
index 045215600b73..ae18318d2fc2 100644
--- a/drivers/video/fbdev/carminefb_regs.h
+++ b/drivers/video/fbdev/carminefb_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _CARMINEFB_REGS_H
#define _CARMINEFB_REGS_H
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index d992aa5eb3f0..b3be06dd2908 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -1477,10 +1477,12 @@ static void init_vgachip(struct fb_info *info)
mdelay(100);
/* mode */
vga_wgfx(cinfo->regbase, CL_GR31, 0x00);
- case BT_GD5480: /* fall through */
+ /* fall through */
+ case BT_GD5480:
/* from Klaus' NetBSD driver: */
vga_wgfx(cinfo->regbase, CL_GR2F, 0x00);
- case BT_ALPINE: /* fall through */
+ /* fall through */
+ case BT_ALPINE:
/* put blitter into 542x compat */
vga_wgfx(cinfo->regbase, CL_GR33, 0x00);
break;
diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h
index 6026c60fc100..261522fabdac 100644
--- a/drivers/video/fbdev/controlfb.h
+++ b/drivers/video/fbdev/controlfb.h
@@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = {
{{ 1, 2}}, /* 1152x870, 75Hz */
{{ 0, 1}}, /* 1280x960, 75Hz */
{{ 0, 1}}, /* 1280x1024, 75Hz */
+ {{ 1, 2}}, /* 1152x768, 60Hz */
+ {{ 0, 1}}, /* 1600x1024, 60Hz */
};
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index 73493bbd7a15..d34fd182ca68 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FB_CMDLINE) += fb_cmdline.o
obj-$(CONFIG_FB_NOTIFY) += fb_notify.o
obj-$(CONFIG_FB) += fb.o
diff --git a/drivers/video/fbdev/core/fb_draw.h b/drivers/video/fbdev/core/fb_draw.h
index 624ee115f129..e0d829873930 100644
--- a/drivers/video/fbdev/core/fb_draw.h
+++ b/drivers/video/fbdev/core/fb_draw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FB_DRAW_H
#define _FB_DRAW_H
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 04612f938bab..929ca472c524 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -395,10 +395,10 @@ static void fb_flashcursor(struct work_struct *work)
console_unlock();
}
-static void cursor_timer_handler(unsigned long dev_addr)
+static void cursor_timer_handler(struct timer_list *t)
{
- struct fb_info *info = (struct fb_info *) dev_addr;
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_ops *ops = from_timer(ops, t, cursor_timer);
+ struct fb_info *info = ops->info;
queue_work(system_power_efficient_wq, &info->queue);
mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
@@ -414,8 +414,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
if (!info->queue.func)
INIT_WORK(&info->queue, fb_flashcursor);
- setup_timer(&ops->cursor_timer, cursor_timer_handler,
- (unsigned long) info);
+ timer_setup(&ops->cursor_timer, cursor_timer_handler, 0);
mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
ops->flags |= FBCON_FLAGS_CURSOR_TIMER;
}
@@ -714,6 +713,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
if (!err) {
ops->cur_blink_jiffies = HZ / 5;
+ ops->info = info;
info->fbcon_par = ops;
if (vc)
@@ -962,6 +962,7 @@ static const char *fbcon_startup(void)
ops->graphics = 1;
ops->cur_rotate = -1;
ops->cur_blink_jiffies = HZ / 5;
+ ops->info = info;
info->fbcon_par = ops;
if (initial_rotation != -1)
p->con_rotate = initial_rotation;
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 18f3ac144237..9f7744fbc962 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -69,6 +69,7 @@ struct fbcon_ops {
struct timer_list cursor_timer; /* Cursor timer */
struct fb_cursor cursor_state;
struct display *p;
+ struct fb_info *info;
int currcon; /* Current VC. */
int cur_blink_jiffies;
int cursor_flash;
diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c
index 7b1492d34e98..5505fa00c634 100644
--- a/drivers/video/fbdev/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
@@ -115,7 +115,7 @@ static struct fb_ops dn_fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-struct fb_var_screeninfo dnfb_var = {
+static const struct fb_var_screeninfo dnfb_var = {
.xres = 1280,
.yres = 1024,
.xres_virtual = 2048,
@@ -242,16 +242,13 @@ static int dnfb_probe(struct platform_device *dev)
info->screen_base = (u_char *) info->fix.smem_start;
err = fb_alloc_cmap(&info->cmap, 2, 0);
- if (err < 0) {
- framebuffer_release(info);
- return err;
- }
+ if (err < 0)
+ goto release_framebuffer;
err = register_framebuffer(info);
if (err < 0) {
fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
- return err;
+ goto release_framebuffer;
}
platform_set_drvdata(dev, info);
@@ -265,6 +262,10 @@ static int dnfb_probe(struct platform_device *dev)
printk("apollo frame buffer alive and kicking !\n");
return err;
+
+release_framebuffer:
+ framebuffer_release(info);
+ return err;
}
static struct platform_driver dnfb_driver = {
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 3a010641f630..d1bf9c2a78a7 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Framebuffer driver for EFI/UEFI based system
*
diff --git a/drivers/video/fbdev/geode/Makefile b/drivers/video/fbdev/geode/Makefile
index 5c98da126883..16ba8a71dc52 100644
--- a/drivers/video/fbdev/geode/Makefile
+++ b/drivers/video/fbdev/geode/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the Geode family framebuffer drivers
obj-$(CONFIG_FB_GEODE_GX1) += gx1fb.o
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 7f6c9e6cfc6c..3b70044773b6 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -304,12 +304,18 @@ static int goldfish_fb_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id goldfish_fb_of_match[] = {
+ { .compatible = "google,goldfish-fb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
static struct platform_driver goldfish_fb_driver = {
.probe = goldfish_fb_probe,
.remove = goldfish_fb_remove,
.driver = {
- .name = "goldfish_fb"
+ .name = "goldfish_fb",
+ .of_match_table = goldfish_fb_of_match,
}
};
diff --git a/drivers/video/fbdev/i810/Makefile b/drivers/video/fbdev/i810/Makefile
index 96e08c8ded97..3e466510fe05 100644
--- a/drivers/video/fbdev/i810/Makefile
+++ b/drivers/video/fbdev/i810/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Intel 810/815 framebuffer driver
#
diff --git a/drivers/video/fbdev/igafb.c b/drivers/video/fbdev/igafb.c
deleted file mode 100644
index 486f18897414..000000000000
--- a/drivers/video/fbdev/igafb.c
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * linux/drivers/video/igafb.c -- Frame buffer device for IGA 1682
- *
- * Copyright (C) 1998 Vladimir Roganov and Gleb Raiko
- *
- * This driver is partly based on the Frame buffer device for ATI Mach64
- * and partially on VESA-related code.
- *
- * Copyright (C) 1997-1998 Geert Uytterhoeven
- * Copyright (C) 1998 Bernd Harries
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-/******************************************************************************
-
- TODO:
- Despite of IGA Card has advanced graphic acceleration,
- initial version is almost dummy and does not support it.
- Support for video modes and acceleration must be added
- together with accelerated X-Windows driver implementation.
-
- Most important thing at this moment is that we have working
- JavaEngine1 console & X with new console interface.
-
-******************************************************************************/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/nvram.h>
-
-#include <asm/io.h>
-
-#ifdef CONFIG_SPARC
-#include <asm/prom.h>
-#include <asm/pcic.h>
-#endif
-
-#include <video/iga.h>
-
-struct pci_mmap_map {
- unsigned long voff;
- unsigned long poff;
- unsigned long size;
- unsigned long prot_flag;
- unsigned long prot_mask;
-};
-
-struct iga_par {
- struct pci_mmap_map *mmap_map;
- unsigned long frame_buffer_phys;
- unsigned long io_base;
-};
-
-struct fb_info fb_info;
-
-struct fb_fix_screeninfo igafb_fix __initdata = {
- .id = "IGA 1682",
- .type = FB_TYPE_PACKED_PIXELS,
- .mmio_len = 1000
-};
-
-struct fb_var_screeninfo default_var = {
- /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
- .xres = 640,
- .yres = 480,
- .xres_virtual = 640,
- .yres_virtual = 480,
- .bits_per_pixel = 8,
- .red = {0, 8, 0 },
- .green = {0, 8, 0 },
- .blue = {0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = FB_ACCEL_NONE,
- .pixclock = 39722,
- .left_margin = 48,
- .right_margin = 16,
- .upper_margin = 33,
- .lower_margin = 10,
- .hsync_len = 96,
- .vsync_len = 2,
- .vmode = FB_VMODE_NONINTERLACED
-};
-
-#ifdef CONFIG_SPARC
-struct fb_var_screeninfo default_var_1024x768 __initdata = {
- /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */
- .xres = 1024,
- .yres = 768,
- .xres_virtual = 1024,
- .yres_virtual = 768,
- .bits_per_pixel = 8,
- .red = {0, 8, 0 },
- .green = {0, 8, 0 },
- .blue = {0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = FB_ACCEL_NONE,
- .pixclock = 12699,
- .left_margin = 176,
- .right_margin = 16,
- .upper_margin = 28,
- .lower_margin = 1,
- .hsync_len = 96,
- .vsync_len = 3,
- .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-struct fb_var_screeninfo default_var_1152x900 __initdata = {
- /* 1152x900, 76 Hz, Non-Interlaced (110.0 MHz dotclock) */
- .xres = 1152,
- .yres = 900,
- .xres_virtual = 1152,
- .yres_virtual = 900,
- .bits_per_pixel = 8,
- .red = { 0, 8, 0 },
- .green = { 0, 8, 0 },
- .blue = { 0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = FB_ACCEL_NONE,
- .pixclock = 9091,
- .left_margin = 234,
- .right_margin = 24,
- .upper_margin = 34,
- .lower_margin = 3,
- .hsync_len = 100,
- .vsync_len = 3,
- .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-struct fb_var_screeninfo default_var_1280x1024 __initdata = {
- /* 1280x1024, 75 Hz, Non-Interlaced (135.00 MHz dotclock) */
- .xres = 1280,
- .yres = 1024,
- .xres_virtual = 1280,
- .yres_virtual = 1024,
- .bits_per_pixel = 8,
- .red = {0, 8, 0 },
- .green = {0, 8, 0 },
- .blue = {0, 8, 0 },
- .height = -1,
- .width = -1,
- .accel_flags = 0,
- .pixclock = 7408,
- .left_margin = 248,
- .right_margin = 16,
- .upper_margin = 38,
- .lower_margin = 1,
- .hsync_len = 144,
- .vsync_len = 3,
- .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-};
-
-/*
- * Memory-mapped I/O functions for Sparc PCI
- *
- * On sparc we happen to access I/O with memory mapped functions too.
- */
-#define pci_inb(par, reg) readb(par->io_base+(reg))
-#define pci_outb(par, val, reg) writeb(val, par->io_base+(reg))
-
-static inline unsigned int iga_inb(struct iga_par *par, unsigned int reg,
- unsigned int idx)
-{
- pci_outb(par, idx, reg);
- return pci_inb(par, reg + 1);
-}
-
-static inline void iga_outb(struct iga_par *par, unsigned char val,
- unsigned int reg, unsigned int idx )
-{
- pci_outb(par, idx, reg);
- pci_outb(par, val, reg+1);
-}
-
-#endif /* CONFIG_SPARC */
-
-/*
- * Very important functionality for the JavaEngine1 computer:
- * make screen border black (usign special IGA registers)
- */
-static void iga_blank_border(struct iga_par *par)
-{
- int i;
-#if 0
- /*
- * PROM does this for us, so keep this code as a reminder
- * about required read from 0x3DA and writing of 0x20 in the end.
- */
- (void) pci_inb(par, 0x3DA); /* required for every access */
- pci_outb(par, IGA_IDX_VGA_OVERSCAN, IGA_ATTR_CTL);
- (void) pci_inb(par, IGA_ATTR_CTL+1);
- pci_outb(par, 0x38, IGA_ATTR_CTL);
- pci_outb(par, 0x20, IGA_ATTR_CTL); /* re-enable visual */
-#endif
- /*
- * This does not work as it was designed because the overscan
- * color is looked up in the palette. Therefore, under X11
- * overscan changes color.
- */
- for (i=0; i < 3; i++)
- iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i);
-}
-
-#ifdef CONFIG_SPARC
-static int igafb_mmap(struct fb_info *info,
- struct vm_area_struct *vma)
-{
- struct iga_par *par = (struct iga_par *)info->par;
- unsigned int size, page, map_size = 0;
- unsigned long map_offset = 0;
- int i;
-
- if (!par->mmap_map)
- return -ENXIO;
-
- size = vma->vm_end - vma->vm_start;
-
- /* Each page, see which map applies */
- for (page = 0; page < size; ) {
- map_size = 0;
- for (i = 0; par->mmap_map[i].size; i++) {
- unsigned long start = par->mmap_map[i].voff;
- unsigned long end = start + par->mmap_map[i].size;
- unsigned long offset = (vma->vm_pgoff << PAGE_SHIFT) + page;
-
- if (start > offset)
- continue;
- if (offset >= end)
- continue;
-
- map_size = par->mmap_map[i].size - (offset - start);
- map_offset = par->mmap_map[i].poff + (offset - start);
- break;
- }
- if (!map_size) {
- page += PAGE_SIZE;
- continue;
- }
- if (page + map_size > size)
- map_size = size - page;
-
- pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask);
- pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag;
-
- if (remap_pfn_range(vma, vma->vm_start + page,
- map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot))
- return -EAGAIN;
-
- page += map_size;
- }
-
- if (!map_size)
- return -EINVAL;
-
- vma->vm_flags |= VM_IO;
- return 0;
-}
-#endif /* CONFIG_SPARC */
-
-static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- /*
- * Set a single color register. The values supplied are
- * already rounded down to the hardware's capabilities
- * (according to the entries in the `var' structure). Return
- * != 0 for invalid regno.
- */
- struct iga_par *par = (struct iga_par *)info->par;
-
- if (regno >= info->cmap.len)
- return 1;
-
- pci_outb(par, regno, DAC_W_INDEX);
- pci_outb(par, red, DAC_DATA);
- pci_outb(par, green, DAC_DATA);
- pci_outb(par, blue, DAC_DATA);
-
- if (regno < 16) {
- switch (info->var.bits_per_pixel) {
- case 16:
- ((u16*)(info->pseudo_palette))[regno] =
- (regno << 10) | (regno << 5) | regno;
- break;
- case 24:
- ((u32*)(info->pseudo_palette))[regno] =
- (regno << 16) | (regno << 8) | regno;
- break;
- case 32:
- { int i;
- i = (regno << 8) | regno;
- ((u32*)(info->pseudo_palette))[regno] = (i << 16) | i;
- }
- break;
- }
- }
- return 0;
-}
-
-/*
- * Framebuffer option structure
- */
-static struct fb_ops igafb_ops = {
- .owner = THIS_MODULE,
- .fb_setcolreg = igafb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-#ifdef CONFIG_SPARC
- .fb_mmap = igafb_mmap,
-#endif
-};
-
-static int __init iga_init(struct fb_info *info, struct iga_par *par)
-{
- char vramsz = iga_inb(par, IGA_EXT_CNTRL, IGA_IDX_EXT_BUS_CNTL)
- & MEM_SIZE_ALIAS;
- int video_cmap_len;
-
- switch (vramsz) {
- case MEM_SIZE_1M:
- info->fix.smem_len = 0x100000;
- break;
- case MEM_SIZE_2M:
- info->fix.smem_len = 0x200000;
- break;
- case MEM_SIZE_4M:
- case MEM_SIZE_RESERVED:
- info->fix.smem_len = 0x400000;
- break;
- }
-
- if (info->var.bits_per_pixel > 8)
- video_cmap_len = 16;
- else
- video_cmap_len = 256;
-
- info->fbops = &igafb_ops;
- info->flags = FBINFO_DEFAULT;
-
- fb_alloc_cmap(&info->cmap, video_cmap_len, 0);
-
- if (register_framebuffer(info) < 0)
- return 0;
-
- fb_info(info, "%s frame buffer device at 0x%08lx [%dMB VRAM]\n",
- info->fix.id, par->frame_buffer_phys, info->fix.smem_len >> 20);
-
- iga_blank_border(par);
- return 1;
-}
-
-static int __init igafb_init(void)
-{
- struct fb_info *info;
- struct pci_dev *pdev;
- struct iga_par *par;
- unsigned long addr;
- int size, iga2000 = 0;
-
- if (fb_get_options("igafb", NULL))
- return -ENODEV;
-
- pdev = pci_get_device(PCI_VENDOR_ID_INTERG,
- PCI_DEVICE_ID_INTERG_1682, 0);
- if (pdev == NULL) {
- /*
- * XXX We tried to use cyber2000fb.c for IGS 2000.
- * But it does not initialize the chip in JavaStation-E, alas.
- */
- pdev = pci_get_device(PCI_VENDOR_ID_INTERG, 0x2000, 0);
- if(pdev == NULL) {
- return -ENXIO;
- }
- iga2000 = 1;
- }
- /* We leak a reference here but as it cannot be unloaded this is
- fine. If you write unload code remember to free it in unload */
-
- size = sizeof(struct iga_par) + sizeof(u32)*16;
-
- info = framebuffer_alloc(size, &pdev->dev);
- if (!info) {
- printk("igafb_init: can't alloc fb_info\n");
- pci_dev_put(pdev);
- return -ENOMEM;
- }
-
- par = info->par;
-
- if ((addr = pdev->resource[0].start) == 0) {
- printk("igafb_init: no memory start\n");
- kfree(info);
- pci_dev_put(pdev);
- return -ENXIO;
- }
-
- if ((info->screen_base = ioremap(addr, 1024*1024*2)) == 0) {
- printk("igafb_init: can't remap %lx[2M]\n", addr);
- kfree(info);
- pci_dev_put(pdev);
- return -ENXIO;
- }
-
- par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK;
-
-#ifdef CONFIG_SPARC
- /*
- * The following is sparc specific and this is why:
- *
- * IGS2000 has its I/O memory mapped and we want
- * to generate memory cycles on PCI, e.g. do ioremap(),
- * then readb/writeb() as in Documentation/io-mapping.txt.
- *
- * IGS1682 is more traditional, it responds to PCI I/O
- * cycles, so we want to access it with inb()/outb().
- *
- * On sparc, PCIC converts CPU memory access within
- * phys window 0x3000xxxx into PCI I/O cycles. Therefore
- * we may use readb/writeb to access them with IGS1682.
- *
- * We do not take io_base_phys from resource[n].start
- * on IGS1682 because that chip is BROKEN. It does not
- * have a base register for I/O. We just "know" what its
- * I/O addresses are.
- */
- if (iga2000) {
- igafb_fix.mmio_start = par->frame_buffer_phys | 0x00800000;
- } else {
- igafb_fix.mmio_start = 0x30000000; /* XXX */
- }
- if ((par->io_base = (int) ioremap(igafb_fix.mmio_start, igafb_fix.smem_len)) == 0) {
- printk("igafb_init: can't remap %lx[4K]\n", igafb_fix.mmio_start);
- iounmap((void *)info->screen_base);
- kfree(info);
- pci_dev_put(pdev);
- return -ENXIO;
- }
-
- /*
- * Figure mmap addresses from PCI config space.
- * We need two regions: for video memory and for I/O ports.
- * Later one can add region for video coprocessor registers.
- * However, mmap routine loops until size != 0, so we put
- * one additional region with size == 0.
- */
-
- par->mmap_map = kzalloc(4 * sizeof(*par->mmap_map), GFP_ATOMIC);
- if (!par->mmap_map) {
- printk("igafb_init: can't alloc mmap_map\n");
- iounmap((void *)par->io_base);
- iounmap(info->screen_base);
- kfree(info);
- pci_dev_put(pdev);
- return -ENOMEM;
- }
-
- /*
- * Set default vmode and cmode from PROM properties.
- */
- {
- struct device_node *dp = pci_device_to_OF_node(pdev);
- int node = dp->node;
- int width = prom_getintdefault(node, "width", 1024);
- int height = prom_getintdefault(node, "height", 768);
- int depth = prom_getintdefault(node, "depth", 8);
- switch (width) {
- case 1024:
- if (height == 768)
- default_var = default_var_1024x768;
- break;
- case 1152:
- if (height == 900)
- default_var = default_var_1152x900;
- break;
- case 1280:
- if (height == 1024)
- default_var = default_var_1280x1024;
- break;
- default:
- break;
- }
-
- switch (depth) {
- case 8:
- default_var.bits_per_pixel = 8;
- break;
- case 16:
- default_var.bits_per_pixel = 16;
- break;
- case 24:
- default_var.bits_per_pixel = 24;
- break;
- case 32:
- default_var.bits_per_pixel = 32;
- break;
- default:
- break;
- }
- }
-
-#endif
- igafb_fix.smem_start = (unsigned long) info->screen_base;
- igafb_fix.line_length = default_var.xres*(default_var.bits_per_pixel/8);
- igafb_fix.visual = default_var.bits_per_pixel <= 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
-
- info->var = default_var;
- info->fix = igafb_fix;
- info->pseudo_palette = (void *)(par + 1);
-
- if (!iga_init(info, par)) {
- iounmap((void *)par->io_base);
- iounmap(info->screen_base);
- kfree(par->mmap_map);
- kfree(info);
- return -ENODEV;
- }
-
-#ifdef CONFIG_SPARC
- /*
- * Add /dev/fb mmap values.
- */
-
- /* First region is for video memory */
- par->mmap_map[0].voff = 0x0;
- par->mmap_map[0].poff = par->frame_buffer_phys & PAGE_MASK;
- par->mmap_map[0].size = info->fix.smem_len & PAGE_MASK;
- par->mmap_map[0].prot_mask = SRMMU_CACHE;
- par->mmap_map[0].prot_flag = SRMMU_WRITE;
-
- /* Second region is for I/O ports */
- par->mmap_map[1].voff = par->frame_buffer_phys & PAGE_MASK;
- par->mmap_map[1].poff = info->fix.smem_start & PAGE_MASK;
- par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */
- par->mmap_map[1].prot_mask = SRMMU_CACHE;
- par->mmap_map[1].prot_flag = SRMMU_WRITE;
-#endif /* CONFIG_SPARC */
-
- return 0;
-}
-
-static int __init igafb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- }
- return 0;
-}
-
-module_init(igafb_init);
-MODULE_LICENSE("GPL");
-static struct pci_device_id igafb_pci_tbl[] = {
- { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_1682,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { }
-};
-
-MODULE_DEVICE_TABLE(pci, igafb_pci_tbl);
diff --git a/drivers/video/fbdev/intelfb/Makefile b/drivers/video/fbdev/intelfb/Makefile
index f7d631ebee8e..7ff2debb31af 100644
--- a/drivers/video/fbdev/intelfb/Makefile
+++ b/drivers/video/fbdev/intelfb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FB_INTEL) += intelfb.o
intelfb-y := intelfbdrv.o intelfbhw.o
diff --git a/drivers/video/fbdev/intelfb/intelfb.h b/drivers/video/fbdev/intelfb/intelfb.h
index 37f8339ea88c..b54db05f028d 100644
--- a/drivers/video/fbdev/intelfb/intelfb.h
+++ b/drivers/video/fbdev/intelfb/intelfb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INTELFB_H
#define _INTELFB_H
diff --git a/drivers/video/fbdev/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c
index d31ed4e2c46f..83fec573cceb 100644
--- a/drivers/video/fbdev/intelfb/intelfbhw.c
+++ b/drivers/video/fbdev/intelfb/intelfbhw.c
@@ -937,15 +937,11 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2,
{
u32 m1, m2, n, p1, p2, n1, testm;
u32 f_vco, p, p_best = 0, m, f_out = 0;
- u32 err_max, err_target, err_best = 10000000;
- u32 n_best = 0, m_best = 0, f_best, f_err;
+ u32 err_best = 10000000;
+ u32 n_best = 0, m_best = 0, f_err;
u32 p_min, p_max, p_inc, div_max;
struct pll_min_max *pll = &plls[index];
- /* Accept 0.5% difference, but aim for 0.1% */
- err_max = 5 * clock / 1000;
- err_target = clock / 1000;
-
DBG_MSG("Clock is %d\n", clock);
div_max = pll->max_vco / clock;
@@ -992,7 +988,6 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2,
m_best = testm;
n_best = n;
p_best = p;
- f_best = f_out;
err_best = f_err;
}
}
diff --git a/drivers/video/fbdev/matrox/g450_pll.h b/drivers/video/fbdev/matrox/g450_pll.h
index aac615d18440..5303336c6547 100644
--- a/drivers/video/fbdev/matrox/g450_pll.h
+++ b/drivers/video/fbdev/matrox/g450_pll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __G450_PLL_H__
#define __G450_PLL_H__
diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.h b/drivers/video/fbdev/matrox/matroxfb_DAC1064.h
index 1e6e45b57b78..3b2a6fd35fff 100644
--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.h
+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MATROXFB_DAC1064_H__
#define __MATROXFB_DAC1064_H__
diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.h b/drivers/video/fbdev/matrox/matroxfb_Ti3026.h
index 27872aaa0a17..faee149d0ba0 100644
--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.h
+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MATROXFB_TI3026_H__
#define __MATROXFB_TI3026_H__
diff --git a/drivers/video/fbdev/matrox/matroxfb_accel.h b/drivers/video/fbdev/matrox/matroxfb_accel.h
index 1e418e62c22d..a7aa9a1d26c0 100644
--- a/drivers/video/fbdev/matrox/matroxfb_accel.h
+++ b/drivers/video/fbdev/matrox/matroxfb_accel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MATROXFB_ACCEL_H__
#define __MATROXFB_ACCEL_H__
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index b9b284d79631..838869c6490c 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -2056,7 +2056,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
minfo = kzalloc(sizeof(*minfo), GFP_KERNEL);
if (!minfo)
- return -1;
+ return -ENOMEM;
minfo->pcidev = pdev;
minfo->dead = 0;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.h b/drivers/video/fbdev/matrox/matroxfb_base.h
index 7a90ea2c4613..f85ad25659e5 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.h
+++ b/drivers/video/fbdev/matrox/matroxfb_base.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
*
* Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200, G400 and G450
diff --git a/drivers/video/fbdev/matrox/matroxfb_crtc2.h b/drivers/video/fbdev/matrox/matroxfb_crtc2.h
index 1005582e843e..23e90e210905 100644
--- a/drivers/video/fbdev/matrox/matroxfb_crtc2.h
+++ b/drivers/video/fbdev/matrox/matroxfb_crtc2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MATROXFB_CRTC2_H__
#define __MATROXFB_CRTC2_H__
diff --git a/drivers/video/fbdev/matrox/matroxfb_g450.h b/drivers/video/fbdev/matrox/matroxfb_g450.h
index 3a3e654444b8..b5f17b86eae5 100644
--- a/drivers/video/fbdev/matrox/matroxfb_g450.h
+++ b/drivers/video/fbdev/matrox/matroxfb_g450.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MATROXFB_G450_H__
#define __MATROXFB_G450_H__
diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.h b/drivers/video/fbdev/matrox/matroxfb_maven.h
index 99eddec9f30c..f896b78836b1 100644
--- a/drivers/video/fbdev/matrox/matroxfb_maven.h
+++ b/drivers/video/fbdev/matrox/matroxfb_maven.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MATROXFB_MAVEN_H__
#define __MATROXFB_MAVEN_H__
diff --git a/drivers/video/fbdev/matrox/matroxfb_misc.h b/drivers/video/fbdev/matrox/matroxfb_misc.h
index 351c823f1f74..9cb6686d309e 100644
--- a/drivers/video/fbdev/matrox/matroxfb_misc.h
+++ b/drivers/video/fbdev/matrox/matroxfb_misc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MATROXFB_MISC_H__
#define __MATROXFB_MISC_H__
diff --git a/drivers/video/fbdev/mb862xx/mb862xx_reg.h b/drivers/video/fbdev/mb862xx/mb862xx_reg.h
index 9df48b8edc94..e6d0513958c2 100644
--- a/drivers/video/fbdev/mb862xx/mb862xx_reg.h
+++ b/drivers/video/fbdev/mb862xx/mb862xx_reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Fujitsu MB862xx Graphics Controller Registers/Bits
*/
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb.h b/drivers/video/fbdev/mb862xx/mb862xxfb.h
index 8550630c1e01..50bc9b584ca1 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfb.h
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MB862XX_H__
#define __MB862XX_H__
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.h b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.h
index 96a2dfef0f60..3a2549f9a48f 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.h
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MB826XXFB_ACCEL_H__
#define __MB826XXFB_ACCEL_H__
diff --git a/drivers/video/fbdev/mbx/mbxdebugfs.c b/drivers/video/fbdev/mbx/mbxdebugfs.c
index 2528d3e609a4..2bd328883178 100644
--- a/drivers/video/fbdev/mbx/mbxdebugfs.c
+++ b/drivers/video/fbdev/mbx/mbxdebugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
#include <linux/slab.h>
diff --git a/drivers/video/fbdev/mbx/reg_bits.h b/drivers/video/fbdev/mbx/reg_bits.h
index 5f14b4befd71..6607f353639b 100644
--- a/drivers/video/fbdev/mbx/reg_bits.h
+++ b/drivers/video/fbdev/mbx/reg_bits.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __REG_BITS_2700G_
#define __REG_BITS_2700G_
diff --git a/drivers/video/fbdev/mbx/regs.h b/drivers/video/fbdev/mbx/regs.h
index 063099d48839..591fc9d26084 100644
--- a/drivers/video/fbdev/mbx/regs.h
+++ b/drivers/video/fbdev/mbx/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __REGS_2700G_
#define __REGS_2700G_
diff --git a/drivers/video/fbdev/mmp/panel/Kconfig b/drivers/video/fbdev/mmp/panel/Kconfig
index 4b2c4f457b11..808890f7064b 100644
--- a/drivers/video/fbdev/mmp/panel/Kconfig
+++ b/drivers/video/fbdev/mmp/panel/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config MMP_PANEL_TPOHVGA
bool "tpohvga panel TJ032MD01BW support"
depends on SPI_MASTER
diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index 7846f0e8bbbb..79b1dc7f042b 100644
--- a/drivers/video/fbdev/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
@@ -150,7 +150,7 @@
#define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6)
-#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negtive edge sampling */
+#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negative edge sampling */
enum mxsfb_devtype {
MXSFB_V3,
@@ -788,7 +788,16 @@ static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host,
if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
- if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+
+ /*
+ * The PIXDATA flags of the display_flags enum are controller
+ * centric, e.g. NEGEDGE means drive data on negative edge.
+ * However, the drivers flag is display centric: Sample the
+ * data on negative (falling) edge. Therefore, check for the
+ * POSEDGE flag:
+ * drive on positive edge => sample on negative edge
+ */
+ if (vm.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT;
put_display_node:
diff --git a/drivers/video/fbdev/nvidia/Makefile b/drivers/video/fbdev/nvidia/Makefile
index 917d3eb05feb..cdd6e8ac454a 100644
--- a/drivers/video/fbdev/nvidia/Makefile
+++ b/drivers/video/fbdev/nvidia/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the nVidia framebuffer driver
#
diff --git a/drivers/video/fbdev/nvidia/nv_proto.h b/drivers/video/fbdev/nvidia/nv_proto.h
index 878a5ce02299..fb9c5ebf2958 100644
--- a/drivers/video/fbdev/nvidia/nv_proto.h
+++ b/drivers/video/fbdev/nvidia/nv_proto.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nv_proto.h,v 1.10 2003/07/31 20:24:29 mvojkovi Exp $ */
#ifndef __NV_PROTO_H__
diff --git a/drivers/video/fbdev/nvidia/nv_type.h b/drivers/video/fbdev/nvidia/nv_type.h
index 6ff321a36813..d7a1d4363d5f 100644
--- a/drivers/video/fbdev/nvidia/nv_type.h
+++ b/drivers/video/fbdev/nvidia/nv_type.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV_TYPE_H__
#define __NV_TYPE_H__
diff --git a/drivers/video/fbdev/omap/Makefile b/drivers/video/fbdev/omap/Makefile
index 732e0718be53..daaa73a94e7f 100644
--- a/drivers/video/fbdev/omap/Makefile
+++ b/drivers/video/fbdev/omap/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the OMAP1 framebuffer device driver
#
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index a4ee65b8f918..6199d4806193 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -474,7 +474,7 @@ static void auto_update_complete(void *data)
jiffies + HWA742_AUTO_UPDATE_TIME);
}
-static void hwa742_update_window_auto(unsigned long arg)
+static void hwa742_update_window_auto(struct timer_list *unused)
{
LIST_HEAD(req_list);
struct hwa742_request *last;
@@ -1002,9 +1002,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
hwa742.auto_update_window.height = fbdev->panel->y_res;
hwa742.auto_update_window.format = 0;
- init_timer(&hwa742.auto_update_timer);
- hwa742.auto_update_timer.function = hwa742_update_window_auto;
- hwa742.auto_update_timer.data = 0;
+ timer_setup(&hwa742.auto_update_timer, hwa742_update_window_auto, 0);
hwa742.prev_color_mode = -1;
hwa742.prev_flags = 0;
diff --git a/drivers/video/fbdev/omap/lcdc.h b/drivers/video/fbdev/omap/lcdc.h
index 845222270db3..8a7607d861c1 100644
--- a/drivers/video/fbdev/omap/lcdc.h
+++ b/drivers/video/fbdev/omap/lcdc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef LCDC_H
#define LCDC_H
diff --git a/drivers/video/fbdev/omap2/omapfb/Makefile b/drivers/video/fbdev/omap2/omapfb/Makefile
index ad68ecf141af..602edfed09df 100644
--- a/drivers/video/fbdev/omap2/omapfb/Makefile
+++ b/drivers/video/fbdev/omap2/omapfb/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
obj-y += dss/
obj-y += displays/
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/Makefile b/drivers/video/fbdev/omap2/omapfb/displays/Makefile
index 4f7459272256..f801762ce455 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/Makefile
+++ b/drivers/video/fbdev/omap2/omapfb/displays/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FB_OMAP2_ENCODER_OPA362) += encoder-opa362.o
obj-$(CONFIG_FB_OMAP2_ENCODER_TFP410) += encoder-tfp410.o
obj-$(CONFIG_FB_OMAP2_ENCODER_TPD12S015) += encoder-tpd12s015.o
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
index 27d220212870..6d0bb27e4f85 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/dss/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config FB_OMAP2_DSS_INIT
bool
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/Makefile b/drivers/video/fbdev/omap2/omapfb/dss/Makefile
index 02308e24f3ef..7318d5260e8d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/Makefile
+++ b/drivers/video/fbdev/omap2/omapfb/dss/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_FB_OMAP2_DSS_INIT) += omapdss-boot-init.o
obj-$(CONFIG_FB_OMAP2_DSS) += omapdss.o
# Core DSS files
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 30d49f3800b3..8e1d60d48dbb 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -3988,7 +3988,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
}
#ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
{
DSSERR("TE not received for 250ms!\n");
}
@@ -5298,9 +5298,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi_framedone_timeout_work_callback);
#ifdef DSI_CATCH_MISSING_TE
- init_timer(&dsi->te_timer);
- dsi->te_timer.function = dsi_te_timeout;
- dsi->te_timer.data = 0;
+ timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
index 189a5ad125a3..f13d7639826d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define DSS_SUBSYS_NAME "HDMI"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 1d7c012f09db..e08e5664e330 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1477,7 +1477,7 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
static int omapfb_parse_vram_param(const char *param, int max_entries,
unsigned long *sizes, unsigned long *paddrs)
{
- int fbnum;
+ unsigned int fbnum;
unsigned long size;
unsigned long paddr = 0;
char *p, *start;
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 867c5218968f..a582d3ae7ac1 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,9 +686,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
- FOLL_WRITE);
-
+ ret = get_user_pages_fast((unsigned long)buf, nr_pages, true, pages);
if (ret < nr_pages) {
nr_pages = ret;
ret = -EINVAL;
diff --git a/drivers/video/fbdev/pxa168fb.h b/drivers/video/fbdev/pxa168fb.h
index eee09279c524..2aba90de1360 100644
--- a/drivers/video/fbdev/pxa168fb.h
+++ b/drivers/video/fbdev/pxa168fb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PXA168FB_H__
#define __PXA168FB_H__
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 933619da1a94..55fbb432c053 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -512,28 +512,26 @@ pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma)
#ifdef PXA3XX_GCU_DEBUG_TIMER
static struct timer_list pxa3xx_gcu_debug_timer;
+static struct pxa3xx_gcu_priv *debug_timer_priv;
-static void pxa3xx_gcu_debug_timedout(unsigned long ptr)
+static void pxa3xx_gcu_debug_timedout(struct timer_list *unused)
{
- struct pxa3xx_gcu_priv *priv = (struct pxa3xx_gcu_priv *) ptr;
+ struct pxa3xx_gcu_priv *priv = debug_timer_priv;
QERROR("Timer DUMP");
- /* init the timer structure */
- init_timer(&pxa3xx_gcu_debug_timer);
- pxa3xx_gcu_debug_timer.function = pxa3xx_gcu_debug_timedout;
- pxa3xx_gcu_debug_timer.data = ptr;
- pxa3xx_gcu_debug_timer.expires = jiffies + 5*HZ; /* one second */
-
- add_timer(&pxa3xx_gcu_debug_timer);
+ mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ);
}
-static void pxa3xx_gcu_init_debug_timer(void)
+static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv)
{
- pxa3xx_gcu_debug_timedout((unsigned long) &pxa3xx_gcu_debug_timer);
+ /* init the timer structure */
+ debug_timer_priv = priv;
+ timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0);
+ pxa3xx_gcu_debug_timedout(NULL);
}
#else
-static inline void pxa3xx_gcu_init_debug_timer(void) {}
+static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {}
#endif
static int
@@ -670,7 +668,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->resource_mem = r;
pxa3xx_gcu_reset(priv);
- pxa3xx_gcu_init_debug_timer();
+ pxa3xx_gcu_init_debug_timer(priv);
dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n",
(void *) r->start, (void *) priv->shared_phys,
diff --git a/drivers/video/fbdev/pxa3xx-gcu.h b/drivers/video/fbdev/pxa3xx-gcu.h
index 0428ed03dc49..ea9489d16de5 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.h
+++ b/drivers/video/fbdev/pxa3xx-gcu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PXA3XX_GCU_H__
#define __PXA3XX_GCU_H__
diff --git a/drivers/video/fbdev/riva/Makefile b/drivers/video/fbdev/riva/Makefile
index 8898c9915b02..bdbdd6eb80ec 100644
--- a/drivers/video/fbdev/riva/Makefile
+++ b/drivers/video/fbdev/riva/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Riva framebuffer driver
#
diff --git a/drivers/video/fbdev/riva/nv_type.h b/drivers/video/fbdev/riva/nv_type.h
index a69480c9a67c..51937a0ae0a4 100644
--- a/drivers/video/fbdev/riva/nv_type.h
+++ b/drivers/video/fbdev/riva/nv_type.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nv_type.h,v 1.35 2002/08/05 20:47:06 mvojkovi Exp $ */
#ifndef __NV_STRUCT_H__
diff --git a/drivers/video/fbdev/riva/rivafb.h b/drivers/video/fbdev/riva/rivafb.h
index 61fd37ca490a..593297ca2b20 100644
--- a/drivers/video/fbdev/riva/rivafb.h
+++ b/drivers/video/fbdev/riva/rivafb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RIVAFB_H
#define __RIVAFB_H
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index fc2aaa5aca23..15ae50063296 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -323,13 +323,11 @@ sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
* according to the RGB bitfield information.
*/
if (regno < 16) {
- u32 *pal = fbi->fb.pseudo_palette;
-
val = chan_to_field(red, &fbi->fb.var.red);
val |= chan_to_field(green, &fbi->fb.var.green);
val |= chan_to_field(blue, &fbi->fb.var.blue);
- pal[regno] = val;
+ fbi->pseudo_palette[regno] = val;
ret = 0;
}
break;
@@ -1132,12 +1130,10 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
struct sa1100fb_info *fbi;
unsigned i;
- fbi = kmalloc(sizeof(struct sa1100fb_info) + sizeof(u32) * 16,
- GFP_KERNEL);
+ fbi = devm_kzalloc(dev, sizeof(struct sa1100fb_info), GFP_KERNEL);
if (!fbi)
return NULL;
- memset(fbi, 0, sizeof(struct sa1100fb_info));
fbi->dev = dev;
strcpy(fbi->fb.fix.id, SA1100_NAME);
@@ -1159,7 +1155,7 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev)
fbi->fb.fbops = &sa1100fb_ops;
fbi->fb.flags = FBINFO_DEFAULT;
fbi->fb.monspecs = monspecs;
- fbi->fb.pseudo_palette = (fbi + 1);
+ fbi->fb.pseudo_palette = fbi->pseudo_palette;
fbi->rgb[RGB_4] = &rgb_4;
fbi->rgb[RGB_8] = &rgb_8;
@@ -1218,48 +1214,42 @@ static int sa1100fb_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (irq < 0 || !res)
+ if (irq < 0)
return -EINVAL;
- if (!request_mem_region(res->start, resource_size(res), "LCD"))
- return -EBUSY;
-
fbi = sa1100fb_init_fbinfo(&pdev->dev);
- ret = -ENOMEM;
if (!fbi)
- goto failed;
-
- fbi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fbi->clk)) {
- ret = PTR_ERR(fbi->clk);
- fbi->clk = NULL;
- goto failed;
- }
+ return -ENOMEM;
- fbi->base = ioremap(res->start, resource_size(res));
- if (!fbi->base)
- goto failed;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fbi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fbi->base))
+ return PTR_ERR(fbi->base);
- /* Initialize video memory */
- ret = sa1100fb_map_video_memory(fbi);
- if (ret)
- goto failed;
+ fbi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(fbi->clk))
+ return PTR_ERR(fbi->clk);
- ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi);
+ ret = devm_request_irq(&pdev->dev, irq, sa1100fb_handle_irq, 0,
+ "LCD", fbi);
if (ret) {
dev_err(&pdev->dev, "request_irq failed: %d\n", ret);
- goto failed;
+ return ret;
}
if (machine_is_shannon()) {
- ret = gpio_request_one(SHANNON_GPIO_DISP_EN,
+ ret = devm_gpio_request_one(&pdev->dev, SHANNON_GPIO_DISP_EN,
GPIOF_OUT_INIT_LOW, "display enable");
if (ret)
- goto err_free_irq;
+ return ret;
}
+ /* Initialize video memory */
+ ret = sa1100fb_map_video_memory(fbi);
+ if (ret)
+ return ret;
+
/*
* This makes sure that our colour bitfield
* descriptors are correctly initialised.
@@ -1269,8 +1259,11 @@ static int sa1100fb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fbi);
ret = register_framebuffer(&fbi->fb);
- if (ret < 0)
- goto err_reg_fb;
+ if (ret < 0) {
+ dma_free_wc(fbi->dev, fbi->map_size, fbi->map_cpu,
+ fbi->map_dma);
+ return ret;
+ }
#ifdef CONFIG_CPU_FREQ
fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
@@ -1281,20 +1274,6 @@ static int sa1100fb_probe(struct platform_device *pdev)
/* This driver cannot be unloaded at the moment */
return 0;
-
- err_reg_fb:
- if (machine_is_shannon())
- gpio_free(SHANNON_GPIO_DISP_EN);
- err_free_irq:
- free_irq(irq, fbi);
- failed:
- if (fbi)
- iounmap(fbi->base);
- if (fbi->clk)
- clk_put(fbi->clk);
- kfree(fbi);
- release_mem_region(res->start, resource_size(res));
- return ret;
}
static struct platform_driver sa1100fb_driver = {
diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index 0139d13377a5..7a1a9ca33cec 100644
--- a/drivers/video/fbdev/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
@@ -69,6 +69,8 @@ struct sa1100fb_info {
const struct sa1100fb_mach_info *inf;
struct clk *clk;
+
+ u32 pseudo_palette[16];
};
#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member)
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index a350209ffbd3..af6fc97f4ba4 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sbuslib.c: Helper library for SBUS framebuffer drivers.
*
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
diff --git a/drivers/video/fbdev/sbuslib.h b/drivers/video/fbdev/sbuslib.h
index 7ba3250236bd..a6b9a2467646 100644
--- a/drivers/video/fbdev/sbuslib.h
+++ b/drivers/video/fbdev/sbuslib.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* sbuslib.h: SBUS fb helper library interfaces */
#ifndef _SBUSLIB_H
#define _SBUSLIB_H
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.h b/drivers/video/fbdev/sh_mobile_lcdcfb.h
index f839adef1d90..cc52c74721fe 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.h
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SH_MOBILE_LCDCFB_H
#define SH_MOBILE_LCDCFB_H
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index 1ec9c3e0e1d8..02ee752d5000 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -6486,7 +6486,7 @@ SiS_SetTVSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
if(!(SiS_Pr->SiS_TVMode & TVSetPAL)) {
if(SiS_Pr->SiS_TVMode & TVSetNTSC1024) {
- const unsigned char specialtv[] = {
+ static const unsigned char specialtv[] = {
0xa7,0x07,0xf2,0x6e,0x17,0x8b,0x73,0x53,
0x13,0x40,0x34,0xf4,0x63,0xbb,0xcc,0x7a,
0x58,0xe4,0x73,0xda,0x13
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index e92303823a4b..ecdd054d8951 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1702,6 +1702,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_GET_INFO: /* For communication with X driver */
ivideo->sisfb_infoblock.sisfb_id = SISFB_ID;
ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR;
@@ -1755,6 +1756,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_GET_VBRSTATUS:
if(sisfb_CheckVBRetrace(ivideo))
return put_user((u32)1, argp);
@@ -1765,6 +1767,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_GET_AUTOMAXIMIZE:
if(ivideo->sisfb_max)
return put_user((u32)1, argp);
@@ -1775,6 +1778,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
if(ivideo->warncount++ < 10)
printk(KERN_INFO
"sisfb: Deprecated ioctl call received - update your application!\n");
+ /* fall through */
case SISFB_SET_AUTOMAXIMIZE:
if(get_user(gpu32, argp))
return -EFAULT;
diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 076dd2711630..6f0a19501c6a 100644
--- a/drivers/video/fbdev/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
@@ -1008,6 +1008,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info)
case FB_BLANK_POWERDOWN:
ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE;
sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0);
+ /* fall through */
case FB_BLANK_NORMAL:
ctrl |= SM501_DC_CRT_CONTROL_BLANK;
@@ -1889,6 +1890,9 @@ static void sm501_free_init_fb(struct sm501fb_info *info,
{
struct fb_info *fbi = info->fb[head];
+ if (!fbi)
+ return;
+
fb_dealloc_cmap(&fbi->cmap);
}
@@ -2076,8 +2080,10 @@ static int sm501fb_remove(struct platform_device *pdev)
sm501_free_init_fb(info, HEAD_CRT);
sm501_free_init_fb(info, HEAD_PANEL);
- unregister_framebuffer(fbinfo_crt);
- unregister_framebuffer(fbinfo_pnl);
+ if (fbinfo_crt)
+ unregister_framebuffer(fbinfo_crt);
+ if (fbinfo_pnl)
+ unregister_framebuffer(fbinfo_pnl);
sm501fb_stop(info);
kfree(info);
@@ -2094,8 +2100,12 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
enum sm501_controller head)
{
struct fb_info *fbi = info->fb[head];
- struct sm501fb_par *par = fbi->par;
+ struct sm501fb_par *par;
+
+ if (!fbi)
+ return 0;
+ par = fbi->par;
if (par->screen.size == 0)
return 0;
@@ -2141,8 +2151,12 @@ static void sm501fb_resume_fb(struct sm501fb_info *info,
enum sm501_controller head)
{
struct fb_info *fbi = info->fb[head];
- struct sm501fb_par *par = fbi->par;
+ struct sm501fb_par *par;
+
+ if (!fbi)
+ return;
+ par = fbi->par;
if (par->screen.size == 0)
return;
diff --git a/drivers/video/fbdev/sticore.h b/drivers/video/fbdev/sticore.h
index af1619536ac8..fb8f58f9867a 100644
--- a/drivers/video/fbdev/sticore.h
+++ b/drivers/video/fbdev/sticore.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STICORE_H
#define STICORE_H
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index ef08a104fb42..d44f14242016 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
for (i = 0; i < len; i++) {
ret = usb_control_msg(dev->udev,
- usb_rcvctrlpipe(dev->udev, 0), (0x02),
- (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
- HZ);
- if (ret < 1) {
- pr_err("Read EDID byte %d failed err %x\n", i, ret);
+ usb_rcvctrlpipe(dev->udev, 0), 0x02,
+ (0x80 | (0x02 << 5)), i << 8, 0xA1,
+ rbuf, 2, USB_CTRL_GET_TIMEOUT);
+ if (ret < 2) {
+ pr_err("Read EDID byte %d failed: %d\n", i, ret);
i--;
break;
}
diff --git a/drivers/video/fbdev/via/Makefile b/drivers/video/fbdev/via/Makefile
index 159f26e6adb5..771a0eee7fb4 100644
--- a/drivers/video/fbdev/via/Makefile
+++ b/drivers/video/fbdev/via/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the VIA framebuffer driver (for Linux Kernel 2.6)
#
diff --git a/drivers/video/fbdev/wmt_ge_rops.h b/drivers/video/fbdev/wmt_ge_rops.h
index f73ec6377a46..8d9ed8a51a79 100644
--- a/drivers/video/fbdev/wmt_ge_rops.h
+++ b/drivers/video/fbdev/wmt_ge_rops.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_FB_WMT_GE_ROPS
extern void wmt_ge_fillrect(struct fb_info *info,
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index 3b437813584c..6194373ee424 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the Linux logos
obj-$(CONFIG_LOGO) += logo.o
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index d993df5586c0..d70ad6d38879 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -243,8 +243,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
/* Get the physical addresses of the source buffer */
- num_pinned = get_user_pages_unlocked(param.local_vaddr - lb_offset,
- num_pages, pages, (param.source == -1) ? 0 : FOLL_WRITE);
+ num_pinned = get_user_pages_fast(param.local_vaddr - lb_offset,
+ num_pages, param.source != -1, pages);
if (num_pinned != num_pages) {
/* get_user_pages() failed */
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 41e30e3dc842..3a2b5c5dcf46 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index f0b3a0b9d42f..7960746f7597 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -143,16 +143,17 @@ static void set_page_pfns(struct virtio_balloon *vb,
static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
{
- struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
unsigned num_allocated_pages;
+ unsigned num_pfns;
+ struct page *page;
+ LIST_HEAD(pages);
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
- mutex_lock(&vb->balloon_lock);
- for (vb->num_pfns = 0; vb->num_pfns < num;
- vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
- struct page *page = balloon_page_enqueue(vb_dev_info);
+ for (num_pfns = 0; num_pfns < num;
+ num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+ struct page *page = balloon_page_alloc();
if (!page) {
dev_info_ratelimited(&vb->vdev->dev,
@@ -162,6 +163,19 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
msleep(200);
break;
}
+
+ balloon_page_push(&pages, page);
+ }
+
+ mutex_lock(&vb->balloon_lock);
+
+ vb->num_pfns = 0;
+
+ while ((page = balloon_page_pop(&pages))) {
+ balloon_page_enqueue(&vb->vb_dev_info, page);
+
+ vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
+
set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
if (!virtio_has_feature(vb->vdev,
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 7cc51223db1c..5dd284008630 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -511,7 +511,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
ca91cx42_bridge = image->parent;
/* Find pci_dev container of dev */
- if (ca91cx42_bridge->parent == NULL) {
+ if (!ca91cx42_bridge->parent) {
dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
return -EINVAL;
}
@@ -529,14 +529,12 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
- if (image->bus_resource.name == NULL) {
+ if (!image->bus_resource.name) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
- if (image->bus_resource.name == NULL) {
- dev_err(ca91cx42_bridge->parent, "Unable to allocate "
- "memory for resource name\n");
+ if (!image->bus_resource.name) {
retval = -ENOMEM;
goto err_name;
}
@@ -562,7 +560,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
- if (image->kern_base == NULL) {
+ if (!image->kern_base) {
dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
@@ -574,7 +572,7 @@ err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
err_name:
return retval;
}
@@ -588,7 +586,7 @@ static void ca91cx42_free_resource(struct vme_master_resource *image)
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
@@ -1036,10 +1034,8 @@ static int ca91cx42_dma_list_add(struct vme_dma_list *list,
dev = list->parent->parent->parent;
/* XXX descriptor must be aligned on 64-bit boundaries */
- entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
- if (entry == NULL) {
- dev_err(dev, "Failed to allocate memory for dma resource "
- "structure\n");
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
retval = -ENOMEM;
goto err_mem;
}
@@ -1052,7 +1048,7 @@ static int ca91cx42_dma_list_add(struct vme_dma_list *list,
goto err_align;
}
- memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
+ memset(&entry->descriptor, 0, sizeof(entry->descriptor));
if (dest->type == VME_DMA_VME) {
entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
@@ -1323,7 +1319,7 @@ static int ca91cx42_lm_set(struct vme_lm_resource *lm,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Location monitor callback attached, "
"can't reset\n");
@@ -1432,7 +1428,7 @@ static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Existing callback attached\n");
return -EBUSY;
@@ -1567,7 +1563,7 @@ static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
- if (bridge->crcsr_kernel == NULL) {
+ if (!bridge->crcsr_kernel) {
dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
"image\n");
return -ENOMEM;
@@ -1618,21 +1614,15 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* We want to support more than one of each bridge so we need to
* dynamically allocate the bridge structure
*/
- ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
-
- if (ca91cx42_bridge == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ ca91cx42_bridge = kzalloc(sizeof(*ca91cx42_bridge), GFP_KERNEL);
+ if (!ca91cx42_bridge) {
retval = -ENOMEM;
goto err_struct;
}
vme_init_bridge(ca91cx42_bridge);
- ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
-
- if (ca91cx42_device == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ ca91cx42_device = kzalloc(sizeof(*ca91cx42_device), GFP_KERNEL);
+ if (!ca91cx42_device) {
retval = -ENOMEM;
goto err_driver;
}
@@ -1688,11 +1678,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add master windows to list */
for (i = 0; i < CA91C142_MAX_MASTER; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "master resource structure\n");
+ master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
+ if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -1706,7 +1693,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_SUPER | VME_USER | VME_PROG | VME_DATA;
master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
memset(&master_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&ca91cx42_bridge->master_resources);
@@ -1714,11 +1701,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add slave windows to list */
for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "slave resource structure\n");
+ slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
+ if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
@@ -1741,11 +1725,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add dma engines to list */
for (i = 0; i < CA91C142_MAX_DMA; i++) {
- dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
- GFP_KERNEL);
- if (dma_ctrlr == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "dma resource structure\n");
+ dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
+ if (!dma_ctrlr) {
retval = -ENOMEM;
goto err_dma;
}
@@ -1762,10 +1743,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "location monitor resource structure\n");
+ lm = kmalloc(sizeof(*lm), GFP_KERNEL);
+ if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
diff --git a/drivers/vme/bridges/vme_fake.c b/drivers/vme/bridges/vme_fake.c
index 30b3acc93833..7d83691047f4 100644
--- a/drivers/vme/bridges/vme_fake.c
+++ b/drivers/vme/bridges/vme_fake.c
@@ -409,7 +409,7 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
/* Each location monitor covers 8 bytes */
if (((lm_base + (8 * i)) <= addr) &&
((lm_base + (8 * i) + 8) > addr)) {
- if (bridge->lm_callback[i] != NULL)
+ if (bridge->lm_callback[i])
bridge->lm_callback[i](
bridge->lm_data[i]);
}
@@ -866,7 +866,7 @@ static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
pr_err("Location monitor callback attached, can't reset\n");
return -EBUSY;
@@ -940,7 +940,7 @@ static int fake_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
pr_err("Existing callback attached\n");
return -EBUSY;
@@ -978,7 +978,7 @@ static int fake_lm_detach(struct vme_lm_resource *lm, int monitor)
/* If all location monitors disabled, disable global Location Monitor */
tmp = 0;
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL)
+ if (bridge->lm_callback[i])
tmp = 1;
}
@@ -1003,7 +1003,7 @@ static void *fake_alloc_consistent(struct device *parent, size_t size,
{
void *alloc = kmalloc(size, GFP_KERNEL);
- if (alloc != NULL)
+ if (alloc)
*dma = fake_ptr_to_pci(alloc);
return alloc;
@@ -1039,7 +1039,7 @@ static int fake_crcsr_init(struct vme_bridge *fake_bridge)
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = kzalloc(VME_CRCSR_BUF_SIZE, GFP_KERNEL);
bridge->crcsr_bus = fake_ptr_to_pci(bridge->crcsr_kernel);
- if (bridge->crcsr_kernel == NULL)
+ if (!bridge->crcsr_kernel)
return -ENOMEM;
vstat = fake_slot_get(fake_bridge);
@@ -1075,14 +1075,14 @@ static int __init fake_init(void)
/* If we want to support more than one bridge at some point, we need to
* dynamically allocate this so we get one per device.
*/
- fake_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
- if (fake_bridge == NULL) {
+ fake_bridge = kzalloc(sizeof(*fake_bridge), GFP_KERNEL);
+ if (!fake_bridge) {
retval = -ENOMEM;
goto err_struct;
}
- fake_device = kzalloc(sizeof(struct fake_driver), GFP_KERNEL);
- if (fake_device == NULL) {
+ fake_device = kzalloc(sizeof(*fake_device), GFP_KERNEL);
+ if (!fake_device) {
retval = -ENOMEM;
goto err_driver;
}
@@ -1104,9 +1104,8 @@ static int __init fake_init(void)
/* Add master windows to list */
INIT_LIST_HEAD(&fake_bridge->master_resources);
for (i = 0; i < FAKE_MAX_MASTER; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
+ master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
+ if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -1131,9 +1130,8 @@ static int __init fake_init(void)
/* Add slave windows to list */
INIT_LIST_HEAD(&fake_bridge->slave_resources);
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
+ slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
+ if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
@@ -1154,9 +1152,8 @@ static int __init fake_init(void)
/* Add location monitor to list */
INIT_LIST_HEAD(&fake_bridge->lm_resources);
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- pr_err("Failed to allocate memory for location monitor resource structure\n");
+ lm = kmalloc(sizeof(*lm), GFP_KERNEL);
+ if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index fc1b634b969a..647d231d4422 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -741,18 +741,16 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/* Exit here if size is zero */
if (size == 0)
return 0;
- if (image->bus_resource.name == NULL) {
+ if (!image->bus_resource.name) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
- if (image->bus_resource.name == NULL) {
- dev_err(tsi148_bridge->parent, "Unable to allocate "
- "memory for resource name\n");
+ if (!image->bus_resource.name) {
retval = -ENOMEM;
goto err_name;
}
@@ -778,7 +776,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
- if (image->kern_base == NULL) {
+ if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
@@ -790,7 +788,7 @@ err_remap:
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
err_name:
return retval;
}
@@ -804,7 +802,7 @@ static void tsi148_free_resource(struct vme_master_resource *image)
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
- memset(&image->bus_resource, 0, sizeof(struct resource));
+ memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/*
@@ -1641,10 +1639,8 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
tsi148_bridge = list->parent->parent;
/* Descriptor must be aligned on 64-bit boundaries */
- entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
- if (entry == NULL) {
- dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
- "dma resource structure\n");
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
retval = -ENOMEM;
goto err_mem;
}
@@ -1661,7 +1657,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
/* Given we are going to fill out the structure, we probably don't
* need to zero it, but better safe than sorry for now.
*/
- memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
+ memset(&entry->descriptor, 0, sizeof(entry->descriptor));
/* Fill out source part */
switch (src->type) {
@@ -1756,8 +1752,9 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
list_add_tail(&entry->list, &list->entries);
entry->dma_handle = dma_map_single(tsi148_bridge->parent,
- &entry->descriptor,
- sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
+ &entry->descriptor,
+ sizeof(entry->descriptor),
+ DMA_TO_DEVICE);
if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
dev_err(tsi148_bridge->parent, "DMA mapping error\n");
retval = -EINVAL;
@@ -1946,7 +1943,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
- if (bridge->lm_callback[i] != NULL) {
+ if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor "
"callback attached, can't reset\n");
@@ -2071,7 +2068,7 @@ static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
- if (bridge->lm_callback[monitor] != NULL) {
+ if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
@@ -2208,7 +2205,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
- if (bridge->crcsr_kernel == NULL) {
+ if (!bridge->crcsr_kernel) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"CR/CSR image\n");
return -ENOMEM;
@@ -2294,19 +2291,15 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
- tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
- if (tsi148_bridge == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
+ if (!tsi148_bridge) {
retval = -ENOMEM;
goto err_struct;
}
vme_init_bridge(tsi148_bridge);
- tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
- if (tsi148_device == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device "
- "structure\n");
+ tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
+ if (!tsi148_device) {
retval = -ENOMEM;
goto err_driver;
}
@@ -2371,10 +2364,9 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
master_num--;
tsi148_device->flush_image =
- kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
- if (tsi148_device->flush_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "flush resource structure\n");
+ kmalloc(sizeof(*tsi148_device->flush_image),
+ GFP_KERNEL);
+ if (!tsi148_device->flush_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -2383,17 +2375,14 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
tsi148_device->flush_image->locked = 1;
tsi148_device->flush_image->number = master_num;
memset(&tsi148_device->flush_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(tsi148_device->flush_image->bus_resource));
tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
for (i = 0; i < master_num; i++) {
- master_image = kmalloc(sizeof(struct vme_master_resource),
- GFP_KERNEL);
- if (master_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "master resource structure\n");
+ master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
+ if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
@@ -2410,7 +2399,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&master_image->bus_resource, 0,
- sizeof(struct resource));
+ sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&tsi148_bridge->master_resources);
@@ -2418,11 +2407,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add slave windows to list */
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
- slave_image = kmalloc(sizeof(struct vme_slave_resource),
- GFP_KERNEL);
- if (slave_image == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "slave resource structure\n");
+ slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
+ if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
@@ -2442,11 +2428,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add dma engines to list */
for (i = 0; i < TSI148_MAX_DMA; i++) {
- dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
- GFP_KERNEL);
- if (dma_ctrlr == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "dma resource structure\n");
+ dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
+ if (!dma_ctrlr) {
retval = -ENOMEM;
goto err_dma;
}
@@ -2465,10 +2448,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
- lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
- if (lm == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for "
- "location monitor resource structure\n");
+ lm = kmalloc(sizeof(*lm), GFP_KERNEL);
+ if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 6a3ead42aba8..81246221a13b 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -92,23 +92,23 @@ void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
{
struct vme_bridge *bridge;
- if (resource == NULL) {
+ if (!resource) {
printk(KERN_ERR "No resource\n");
return NULL;
}
bridge = find_bridge(resource);
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find bridge\n");
return NULL;
}
- if (bridge->parent == NULL) {
+ if (!bridge->parent) {
printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
return NULL;
}
- if (bridge->alloc_consistent == NULL) {
+ if (!bridge->alloc_consistent) {
printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
bridge->name);
return NULL;
@@ -132,23 +132,23 @@ void vme_free_consistent(struct vme_resource *resource, size_t size,
{
struct vme_bridge *bridge;
- if (resource == NULL) {
+ if (!resource) {
printk(KERN_ERR "No resource\n");
return;
}
bridge = find_bridge(resource);
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find bridge\n");
return;
}
- if (bridge->parent == NULL) {
+ if (!bridge->parent) {
printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
return;
}
- if (bridge->free_consistent == NULL) {
+ if (!bridge->free_consistent) {
printk(KERN_ERR "free_consistent not supported by bridge %s\n",
bridge->name);
return;
@@ -208,29 +208,27 @@ int vme_check_window(u32 aspace, unsigned long long vme_base,
{
int retval = 0;
+ if (vme_base + size < size)
+ return -EINVAL;
+
switch (aspace) {
case VME_A16:
- if (((vme_base + size) > VME_A16_MAX) ||
- (vme_base > VME_A16_MAX))
+ if (vme_base + size > VME_A16_MAX)
retval = -EFAULT;
break;
case VME_A24:
- if (((vme_base + size) > VME_A24_MAX) ||
- (vme_base > VME_A24_MAX))
+ if (vme_base + size > VME_A24_MAX)
retval = -EFAULT;
break;
case VME_A32:
- if (((vme_base + size) > VME_A32_MAX) ||
- (vme_base > VME_A32_MAX))
+ if (vme_base + size > VME_A32_MAX)
retval = -EFAULT;
break;
case VME_A64:
- if ((size != 0) && (vme_base > U64_MAX + 1 - size))
- retval = -EFAULT;
+ /* The VME_A64_MAX limit is actually U64_MAX + 1 */
break;
case VME_CRCSR:
- if (((vme_base + size) > VME_CRCSR_MAX) ||
- (vme_base > VME_CRCSR_MAX))
+ if (vme_base + size > VME_CRCSR_MAX)
retval = -EFAULT;
break;
case VME_USER1:
@@ -303,7 +301,7 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -313,7 +311,7 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
slave_image = list_entry(slave_pos,
struct vme_slave_resource, list);
- if (slave_image == NULL) {
+ if (!slave_image) {
printk(KERN_ERR "Registered NULL Slave resource\n");
continue;
}
@@ -333,14 +331,13 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
}
/* No free image */
- if (allocated_image == NULL)
+ if (!allocated_image)
goto err_image;
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_WARNING "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_SLAVE;
resource->entry = &allocated_image->list;
@@ -389,7 +386,7 @@ int vme_slave_set(struct vme_resource *resource, int enabled,
image = list_entry(resource->entry, struct vme_slave_resource, list);
- if (bridge->slave_set == NULL) {
+ if (!bridge->slave_set) {
printk(KERN_ERR "Function not supported\n");
return -ENOSYS;
}
@@ -438,7 +435,7 @@ int vme_slave_get(struct vme_resource *resource, int *enabled,
image = list_entry(resource->entry, struct vme_slave_resource, list);
- if (bridge->slave_get == NULL) {
+ if (!bridge->slave_get) {
printk(KERN_ERR "vme_slave_get not supported\n");
return -EINVAL;
}
@@ -465,7 +462,7 @@ void vme_slave_free(struct vme_resource *resource)
slave_image = list_entry(resource->entry, struct vme_slave_resource,
list);
- if (slave_image == NULL) {
+ if (!slave_image) {
printk(KERN_ERR "Can't find slave resource\n");
return;
}
@@ -505,7 +502,7 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -515,7 +512,7 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
master_image = list_entry(master_pos,
struct vme_master_resource, list);
- if (master_image == NULL) {
+ if (!master_image) {
printk(KERN_WARNING "Registered NULL master resource\n");
continue;
}
@@ -536,16 +533,15 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
}
/* Check to see if we found a resource */
- if (allocated_image == NULL) {
+ if (!allocated_image) {
printk(KERN_ERR "Can't find a suitable resource\n");
goto err_image;
}
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_ERR "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_MASTER;
resource->entry = &allocated_image->list;
@@ -594,7 +590,7 @@ int vme_master_set(struct vme_resource *resource, int enabled,
image = list_entry(resource->entry, struct vme_master_resource, list);
- if (bridge->master_set == NULL) {
+ if (!bridge->master_set) {
printk(KERN_WARNING "vme_master_set not supported\n");
return -EINVAL;
}
@@ -644,7 +640,7 @@ int vme_master_get(struct vme_resource *resource, int *enabled,
image = list_entry(resource->entry, struct vme_master_resource, list);
- if (bridge->master_get == NULL) {
+ if (!bridge->master_get) {
printk(KERN_WARNING "%s not supported\n", __func__);
return -EINVAL;
}
@@ -676,7 +672,7 @@ ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
struct vme_master_resource *image;
size_t length;
- if (bridge->master_read == NULL) {
+ if (!bridge->master_read) {
printk(KERN_WARNING "Reading from resource not supported\n");
return -EINVAL;
}
@@ -725,7 +721,7 @@ ssize_t vme_master_write(struct vme_resource *resource, void *buf,
struct vme_master_resource *image;
size_t length;
- if (bridge->master_write == NULL) {
+ if (!bridge->master_write) {
printk(KERN_WARNING "Writing to resource not supported\n");
return -EINVAL;
}
@@ -776,7 +772,7 @@ unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
- if (bridge->master_rmw == NULL) {
+ if (!bridge->master_rmw) {
printk(KERN_WARNING "Writing to resource not supported\n");
return -EINVAL;
}
@@ -846,7 +842,7 @@ void vme_master_free(struct vme_resource *resource)
master_image = list_entry(resource->entry, struct vme_master_resource,
list);
- if (master_image == NULL) {
+ if (!master_image) {
printk(KERN_ERR "Can't find master resource\n");
return;
}
@@ -886,7 +882,7 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
printk(KERN_ERR "No VME resource Attribute tests done\n");
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -895,8 +891,7 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
list_for_each(dma_pos, &bridge->dma_resources) {
dma_ctrlr = list_entry(dma_pos,
struct vme_dma_resource, list);
-
- if (dma_ctrlr == NULL) {
+ if (!dma_ctrlr) {
printk(KERN_ERR "Registered NULL DMA resource\n");
continue;
}
@@ -915,14 +910,13 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
}
/* Check to see if we found a resource */
- if (allocated_ctrlr == NULL)
+ if (!allocated_ctrlr)
goto err_ctrlr;
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_WARNING "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_DMA;
resource->entry = &allocated_ctrlr->list;
@@ -951,7 +945,6 @@ EXPORT_SYMBOL(vme_dma_request);
*/
struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
{
- struct vme_dma_resource *ctrlr;
struct vme_dma_list *dma_list;
if (resource->type != VME_DMA) {
@@ -959,15 +952,14 @@ struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
return NULL;
}
- ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
-
- dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
- if (dma_list == NULL) {
- printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
+ dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
+ if (!dma_list)
return NULL;
- }
+
INIT_LIST_HEAD(&dma_list->entries);
- dma_list->parent = ctrlr;
+ dma_list->parent = list_entry(resource->entry,
+ struct vme_dma_resource,
+ list);
mutex_init(&dma_list->mtx);
return dma_list;
@@ -990,17 +982,13 @@ struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
struct vme_dma_attr *attributes;
struct vme_dma_pattern *pattern_attr;
- attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
+ if (!attributes)
goto err_attr;
- }
- pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
- if (pattern_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
+ pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL);
+ if (!pattern_attr)
goto err_pat;
- }
attributes->type = VME_DMA_PATTERN;
attributes->private = (void *)pattern_attr;
@@ -1034,19 +1022,13 @@ struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
/* XXX Run some sanity checks here */
- attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
+ if (!attributes)
goto err_attr;
- }
- pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
- if (pci_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
+ pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL);
+ if (!pci_attr)
goto err_pci;
- }
-
-
attributes->type = VME_DMA_PCI;
attributes->private = (void *)pci_attr;
@@ -1081,18 +1063,13 @@ struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
struct vme_dma_attr *attributes;
struct vme_dma_vme *vme_attr;
- attributes = kmalloc(
- sizeof(struct vme_dma_attr), GFP_KERNEL);
- if (attributes == NULL) {
- printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
+ attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
+ if (!attributes)
goto err_attr;
- }
- vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
- if (vme_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
+ vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL);
+ if (!vme_attr)
goto err_vme;
- }
attributes->type = VME_DMA_VME;
attributes->private = (void *)vme_attr;
@@ -1148,7 +1125,7 @@ int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_bridge *bridge = list->parent->parent;
int retval;
- if (bridge->dma_list_add == NULL) {
+ if (!bridge->dma_list_add) {
printk(KERN_WARNING "Link List DMA generation not supported\n");
return -EINVAL;
}
@@ -1181,7 +1158,7 @@ int vme_dma_list_exec(struct vme_dma_list *list)
struct vme_bridge *bridge = list->parent->parent;
int retval;
- if (bridge->dma_list_exec == NULL) {
+ if (!bridge->dma_list_exec) {
printk(KERN_ERR "Link List DMA execution not supported\n");
return -EINVAL;
}
@@ -1210,14 +1187,14 @@ int vme_dma_list_free(struct vme_dma_list *list)
struct vme_bridge *bridge = list->parent->parent;
int retval;
- if (bridge->dma_list_empty == NULL) {
+ if (!bridge->dma_list_empty) {
printk(KERN_WARNING "Emptying of Link Lists not supported\n");
return -EINVAL;
}
if (!mutex_trylock(&list->mtx)) {
printk(KERN_ERR "Link List in use\n");
- return -EINVAL;
+ return -EBUSY;
}
/*
@@ -1342,8 +1319,7 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
call = bridge->irq[level - 1].callback[statid].func;
priv_data = bridge->irq[level - 1].callback[statid].priv_data;
-
- if (call != NULL)
+ if (call)
call(level, statid, priv_data);
else
printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
@@ -1374,7 +1350,7 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
@@ -1384,7 +1360,7 @@ int vme_irq_request(struct vme_dev *vdev, int level, int statid,
return -EINVAL;
}
- if (bridge->irq_set == NULL) {
+ if (!bridge->irq_set) {
printk(KERN_ERR "Configuring interrupts not supported\n");
return -EINVAL;
}
@@ -1423,7 +1399,7 @@ void vme_irq_free(struct vme_dev *vdev, int level, int statid)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return;
}
@@ -1433,7 +1409,7 @@ void vme_irq_free(struct vme_dev *vdev, int level, int statid)
return;
}
- if (bridge->irq_set == NULL) {
+ if (!bridge->irq_set) {
printk(KERN_ERR "Configuring interrupts not supported\n");
return;
}
@@ -1470,7 +1446,7 @@ int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
@@ -1480,7 +1456,7 @@ int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
return -EINVAL;
}
- if (bridge->irq_generate == NULL) {
+ if (!bridge->irq_generate) {
printk(KERN_WARNING "Interrupt generation not supported\n");
return -EINVAL;
}
@@ -1508,7 +1484,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
struct vme_resource *resource = NULL;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
goto err_bus;
}
@@ -1517,8 +1493,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
list_for_each(lm_pos, &bridge->lm_resources) {
lm = list_entry(lm_pos,
struct vme_lm_resource, list);
-
- if (lm == NULL) {
+ if (!lm) {
printk(KERN_ERR "Registered NULL Location Monitor resource\n");
continue;
}
@@ -1535,14 +1510,13 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
}
/* Check to see if we found a resource */
- if (allocated_lm == NULL)
+ if (!allocated_lm)
goto err_lm;
- resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
- if (resource == NULL) {
- printk(KERN_ERR "Unable to allocate resource structure\n");
+ resource = kmalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
goto err_alloc;
- }
+
resource->type = VME_LM;
resource->entry = &allocated_lm->list;
@@ -1612,7 +1586,7 @@ int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_set == NULL) {
+ if (!bridge->lm_set) {
printk(KERN_ERR "vme_lm_set not supported\n");
return -EINVAL;
}
@@ -1648,7 +1622,7 @@ int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_get == NULL) {
+ if (!bridge->lm_get) {
printk(KERN_ERR "vme_lm_get not supported\n");
return -EINVAL;
}
@@ -1685,7 +1659,7 @@ int vme_lm_attach(struct vme_resource *resource, int monitor,
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_attach == NULL) {
+ if (!bridge->lm_attach) {
printk(KERN_ERR "vme_lm_attach not supported\n");
return -EINVAL;
}
@@ -1718,7 +1692,7 @@ int vme_lm_detach(struct vme_resource *resource, int monitor)
lm = list_entry(resource->entry, struct vme_lm_resource, list);
- if (bridge->lm_detach == NULL) {
+ if (!bridge->lm_detach) {
printk(KERN_ERR "vme_lm_detach not supported\n");
return -EINVAL;
}
@@ -1780,12 +1754,12 @@ int vme_slot_num(struct vme_dev *vdev)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
printk(KERN_ERR "Can't find VME bus\n");
return -EINVAL;
}
- if (bridge->slot_get == NULL) {
+ if (!bridge->slot_get) {
printk(KERN_WARNING "vme_slot_num not supported\n");
return -EINVAL;
}
@@ -1808,7 +1782,7 @@ int vme_bus_num(struct vme_dev *vdev)
struct vme_bridge *bridge;
bridge = vdev->bridge;
- if (bridge == NULL) {
+ if (!bridge) {
pr_err("Can't find VME bus\n");
return -EINVAL;
}
@@ -1888,7 +1862,7 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
struct vme_dev *tmp;
for (i = 0; i < ndevs; i++) {
- vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) {
err = -ENOMEM;
goto err_devalloc;
@@ -2020,30 +1994,26 @@ static int vme_bus_match(struct device *dev, struct device_driver *drv)
static int vme_bus_probe(struct device *dev)
{
- int retval = -ENODEV;
struct vme_driver *driver;
struct vme_dev *vdev = dev_to_vme_dev(dev);
driver = dev->platform_data;
+ if (driver->probe)
+ return driver->probe(vdev);
- if (driver->probe != NULL)
- retval = driver->probe(vdev);
-
- return retval;
+ return -ENODEV;
}
static int vme_bus_remove(struct device *dev)
{
- int retval = -ENODEV;
struct vme_driver *driver;
struct vme_dev *vdev = dev_to_vme_dev(dev);
driver = dev->platform_data;
+ if (driver->remove)
+ return driver->remove(vdev);
- if (driver->remove != NULL)
- retval = driver->remove(vdev);
-
- return retval;
+ return -ENODEV;
}
struct bus_type vme_bus_type = {
diff --git a/drivers/vme/vme_bridge.h b/drivers/vme/vme_bridge.h
index 2662e916b96a..42ecf961004e 100644
--- a/drivers/vme/vme_bridge.h
+++ b/drivers/vme/vme_bridge.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VME_BRIDGE_H_
#define _VME_BRIDGE_H_
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index c5a3e96fcbab..18954cae4256 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for 1-wire bus master drivers.
#
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 3c945f9f5f0f..7931231d8e80 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -148,4 +148,19 @@ config W1_SLAVE_DS28E04
If you are unsure, say N.
+config W1_SLAVE_DS28E17
+ tristate "1-wire-to-I2C master bridge (DS28E17)"
+ select CRC16
+ depends on I2C
+ help
+ Say Y here if you want to use the DS28E17 1-wire-to-I2C master bridge.
+ For each DS28E17 detected, a new I2C adapter is created within the
+ kernel. I2C devices on that bus can be configured to be used by the
+ kernel and userspace tools as on any other "native" I2C bus.
+
+ This driver is also available as a module. If so, the module
+ will be called w1_ds28e17.
+
+ If you are unsure, say N.
+
endmenu
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index 36b22fb2d3a1..d5f4f4d5b9e5 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Dallas's 1-wire slaves.
#
@@ -17,3 +18,4 @@ obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
obj-$(CONFIG_W1_SLAVE_DS28E04) += w1_ds28e04.o
+obj-$(CONFIG_W1_SLAVE_DS28E17) += w1_ds28e17.o
diff --git a/drivers/w1/slaves/w1_ds28e17.c b/drivers/w1/slaves/w1_ds28e17.c
new file mode 100644
index 000000000000..e78b63ea4daf
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds28e17.c
@@ -0,0 +1,771 @@
+/*
+ * w1_ds28e17.c - w1 family 19 (DS28E17) driver
+ *
+ * Copyright (c) 2016 Jan Kandziora <jjj@gmx.de>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#define CRC16_INIT 0
+
+#include <linux/w1.h>
+
+#define W1_FAMILY_DS28E17 0x19
+
+/* Module setup. */
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jan Kandziora <jjj@gmx.de>");
+MODULE_DESCRIPTION("w1 family 19 driver for DS28E17, 1-wire to I2C master bridge");
+MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E17));
+
+
+/* Default I2C speed to be set when a DS28E17 is detected. */
+static int i2c_speed = 100;
+module_param_named(speed, i2c_speed, int, (S_IRUSR | S_IWUSR));
+MODULE_PARM_DESC(speed, "Default I2C speed to be set when a DS28E17 is detected");
+
+/* Default I2C stretch value to be set when a DS28E17 is detected. */
+static char i2c_stretch = 1;
+module_param_named(stretch, i2c_stretch, byte, (S_IRUSR | S_IWUSR));
+MODULE_PARM_DESC(stretch, "Default I2C stretch value to be set when a DS28E17 is detected");
+
+/* DS28E17 device command codes. */
+#define W1_F19_WRITE_DATA_WITH_STOP 0x4B
+#define W1_F19_WRITE_DATA_NO_STOP 0x5A
+#define W1_F19_WRITE_DATA_ONLY 0x69
+#define W1_F19_WRITE_DATA_ONLY_WITH_STOP 0x78
+#define W1_F19_READ_DATA_WITH_STOP 0x87
+#define W1_F19_WRITE_READ_DATA_WITH_STOP 0x2D
+#define W1_F19_WRITE_CONFIGURATION 0xD2
+#define W1_F19_READ_CONFIGURATION 0xE1
+#define W1_F19_ENABLE_SLEEP_MODE 0x1E
+#define W1_F19_READ_DEVICE_REVISION 0xC4
+
+/* DS28E17 status bits */
+#define W1_F19_STATUS_CRC 0x01
+#define W1_F19_STATUS_ADDRESS 0x02
+#define W1_F19_STATUS_START 0x08
+
+/*
+ * Maximum number of I2C bytes to transfer within one CRC16 protected onewire
+ * command.
+ * */
+#define W1_F19_WRITE_DATA_LIMIT 255
+
+/* Maximum number of I2C bytes to read with one onewire command. */
+#define W1_F19_READ_DATA_LIMIT 255
+
+/* Constants for calculating the busy sleep. */
+#define W1_F19_BUSY_TIMEBASES { 90, 23, 10 }
+#define W1_F19_BUSY_GRATUITY 1000
+
+/* Number of checks for the busy flag before timeout. */
+#define W1_F19_BUSY_CHECKS 1000
+
+
+/* Slave specific data. */
+struct w1_f19_data {
+ u8 speed;
+ u8 stretch;
+ struct i2c_adapter adapter;
+};
+
+
+/* Wait a while until the busy flag clears. */
+static int w1_f19_i2c_busy_wait(struct w1_slave *sl, size_t count)
+{
+ const unsigned long timebases[3] = W1_F19_BUSY_TIMEBASES;
+ struct w1_f19_data *data = sl->family_data;
+ unsigned int checks;
+
+ /* Check the busy flag first in any case.*/
+ if (w1_touch_bit(sl->master, 1) == 0)
+ return 0;
+
+ /*
+ * Do a generously long sleep in the beginning,
+ * as we have to wait at least this time for all
+ * the I2C bytes at the given speed to be transferred.
+ */
+ usleep_range(timebases[data->speed] * (data->stretch) * count,
+ timebases[data->speed] * (data->stretch) * count
+ + W1_F19_BUSY_GRATUITY);
+
+ /* Now continusly check the busy flag sent by the DS28E17. */
+ checks = W1_F19_BUSY_CHECKS;
+ while ((checks--) > 0) {
+ /* Return success if the busy flag is cleared. */
+ if (w1_touch_bit(sl->master, 1) == 0)
+ return 0;
+
+ /* Wait one non-streched byte timeslot. */
+ udelay(timebases[data->speed]);
+ }
+
+ /* Timeout. */
+ dev_warn(&sl->dev, "busy timeout\n");
+ return -ETIMEDOUT;
+}
+
+
+/* Utility function: result. */
+static size_t w1_f19_error(struct w1_slave *sl, u8 w1_buf[])
+{
+ /* Warnings. */
+ if (w1_buf[0] & W1_F19_STATUS_CRC)
+ dev_warn(&sl->dev, "crc16 mismatch\n");
+ if (w1_buf[0] & W1_F19_STATUS_ADDRESS)
+ dev_warn(&sl->dev, "i2c device not responding\n");
+ if ((w1_buf[0] & (W1_F19_STATUS_CRC | W1_F19_STATUS_ADDRESS)) == 0
+ && w1_buf[1] != 0) {
+ dev_warn(&sl->dev, "i2c short write, %d bytes not acknowledged\n",
+ w1_buf[1]);
+ }
+
+ /* Check error conditions. */
+ if (w1_buf[0] & W1_F19_STATUS_ADDRESS)
+ return -ENXIO;
+ if (w1_buf[0] & W1_F19_STATUS_START)
+ return -EAGAIN;
+ if (w1_buf[0] != 0 || w1_buf[1] != 0)
+ return -EIO;
+
+ /* All ok. */
+ return 0;
+}
+
+
+/* Utility function: write data to I2C slave, single chunk. */
+static int __w1_f19_i2c_write(struct w1_slave *sl,
+ const u8 *command, size_t command_count,
+ const u8 *buffer, size_t count)
+{
+ u16 crc;
+ int error;
+ u8 w1_buf[2];
+
+ /* Send command and I2C data to DS28E17. */
+ crc = crc16(CRC16_INIT, command, command_count);
+ w1_write_block(sl->master, command, command_count);
+
+ w1_buf[0] = count;
+ crc = crc16(crc, w1_buf, 1);
+ w1_write_8(sl->master, w1_buf[0]);
+
+ crc = crc16(crc, buffer, count);
+ w1_write_block(sl->master, buffer, count);
+
+ w1_buf[0] = ~(crc & 0xFF);
+ w1_buf[1] = ~((crc >> 8) & 0xFF);
+ w1_write_block(sl->master, w1_buf, 2);
+
+ /* Wait until busy flag clears (or timeout). */
+ if (w1_f19_i2c_busy_wait(sl, count + 1) < 0)
+ return -ETIMEDOUT;
+
+ /* Read status from DS28E17. */
+ w1_read_block(sl->master, w1_buf, 2);
+
+ /* Check error conditions. */
+ error = w1_f19_error(sl, w1_buf);
+ if (error < 0)
+ return error;
+
+ /* Return number of bytes written. */
+ return count;
+}
+
+
+/* Write data to I2C slave. */
+static int w1_f19_i2c_write(struct w1_slave *sl, u16 i2c_address,
+ const u8 *buffer, size_t count, bool stop)
+{
+ int result;
+ int remaining = count;
+ const u8 *p;
+ u8 command[2];
+
+ /* Check input. */
+ if (count == 0)
+ return -EOPNOTSUPP;
+
+ /* Check whether we need multiple commands. */
+ if (count <= W1_F19_WRITE_DATA_LIMIT) {
+ /*
+ * Small data amount. Data can be sent with
+ * a single onewire command.
+ */
+
+ /* Send all data to DS28E17. */
+ command[0] = (stop ? W1_F19_WRITE_DATA_WITH_STOP
+ : W1_F19_WRITE_DATA_NO_STOP);
+ command[1] = i2c_address << 1;
+ result = __w1_f19_i2c_write(sl, command, 2, buffer, count);
+ } else {
+ /* Large data amount. Data has to be sent in multiple chunks. */
+
+ /* Send first chunk to DS28E17. */
+ p = buffer;
+ command[0] = W1_F19_WRITE_DATA_NO_STOP;
+ command[1] = i2c_address << 1;
+ result = __w1_f19_i2c_write(sl, command, 2, p,
+ W1_F19_WRITE_DATA_LIMIT);
+ if (result < 0)
+ return result;
+
+ /* Resume to same DS28E17. */
+ if (w1_reset_resume_command(sl->master))
+ return -EIO;
+
+ /* Next data chunk. */
+ p += W1_F19_WRITE_DATA_LIMIT;
+ remaining -= W1_F19_WRITE_DATA_LIMIT;
+
+ while (remaining > W1_F19_WRITE_DATA_LIMIT) {
+ /* Send intermediate chunk to DS28E17. */
+ command[0] = W1_F19_WRITE_DATA_ONLY;
+ result = __w1_f19_i2c_write(sl, command, 1, p,
+ W1_F19_WRITE_DATA_LIMIT);
+ if (result < 0)
+ return result;
+
+ /* Resume to same DS28E17. */
+ if (w1_reset_resume_command(sl->master))
+ return -EIO;
+
+ /* Next data chunk. */
+ p += W1_F19_WRITE_DATA_LIMIT;
+ remaining -= W1_F19_WRITE_DATA_LIMIT;
+ }
+
+ /* Send final chunk to DS28E17. */
+ command[0] = (stop ? W1_F19_WRITE_DATA_ONLY_WITH_STOP
+ : W1_F19_WRITE_DATA_ONLY);
+ result = __w1_f19_i2c_write(sl, command, 1, p, remaining);
+ }
+
+ return result;
+}
+
+
+/* Read data from I2C slave. */
+static int w1_f19_i2c_read(struct w1_slave *sl, u16 i2c_address,
+ u8 *buffer, size_t count)
+{
+ u16 crc;
+ int error;
+ u8 w1_buf[5];
+
+ /* Check input. */
+ if (count == 0)
+ return -EOPNOTSUPP;
+
+ /* Send command to DS28E17. */
+ w1_buf[0] = W1_F19_READ_DATA_WITH_STOP;
+ w1_buf[1] = i2c_address << 1 | 0x01;
+ w1_buf[2] = count;
+ crc = crc16(CRC16_INIT, w1_buf, 3);
+ w1_buf[3] = ~(crc & 0xFF);
+ w1_buf[4] = ~((crc >> 8) & 0xFF);
+ w1_write_block(sl->master, w1_buf, 5);
+
+ /* Wait until busy flag clears (or timeout). */
+ if (w1_f19_i2c_busy_wait(sl, count + 1) < 0)
+ return -ETIMEDOUT;
+
+ /* Read status from DS28E17. */
+ w1_buf[0] = w1_read_8(sl->master);
+ w1_buf[1] = 0;
+
+ /* Check error conditions. */
+ error = w1_f19_error(sl, w1_buf);
+ if (error < 0)
+ return error;
+
+ /* Read received I2C data from DS28E17. */
+ return w1_read_block(sl->master, buffer, count);
+}
+
+
+/* Write to, then read data from I2C slave. */
+static int w1_f19_i2c_write_read(struct w1_slave *sl, u16 i2c_address,
+ const u8 *wbuffer, size_t wcount, u8 *rbuffer, size_t rcount)
+{
+ u16 crc;
+ int error;
+ u8 w1_buf[3];
+
+ /* Check input. */
+ if (wcount == 0 || rcount == 0)
+ return -EOPNOTSUPP;
+
+ /* Send command and I2C data to DS28E17. */
+ w1_buf[0] = W1_F19_WRITE_READ_DATA_WITH_STOP;
+ w1_buf[1] = i2c_address << 1;
+ w1_buf[2] = wcount;
+ crc = crc16(CRC16_INIT, w1_buf, 3);
+ w1_write_block(sl->master, w1_buf, 3);
+
+ crc = crc16(crc, wbuffer, wcount);
+ w1_write_block(sl->master, wbuffer, wcount);
+
+ w1_buf[0] = rcount;
+ crc = crc16(crc, w1_buf, 1);
+ w1_buf[1] = ~(crc & 0xFF);
+ w1_buf[2] = ~((crc >> 8) & 0xFF);
+ w1_write_block(sl->master, w1_buf, 3);
+
+ /* Wait until busy flag clears (or timeout). */
+ if (w1_f19_i2c_busy_wait(sl, wcount + rcount + 2) < 0)
+ return -ETIMEDOUT;
+
+ /* Read status from DS28E17. */
+ w1_read_block(sl->master, w1_buf, 2);
+
+ /* Check error conditions. */
+ error = w1_f19_error(sl, w1_buf);
+ if (error < 0)
+ return error;
+
+ /* Read received I2C data from DS28E17. */
+ return w1_read_block(sl->master, rbuffer, rcount);
+}
+
+
+/* Do an I2C master transfer. */
+static int w1_f19_i2c_master_transfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ struct w1_slave *sl = (struct w1_slave *) adapter->algo_data;
+ int i = 0;
+ int result = 0;
+
+ /* Start onewire transaction. */
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Select DS28E17. */
+ if (w1_reset_select_slave(sl)) {
+ i = -EIO;
+ goto error;
+ }
+
+ /* Loop while there are still messages to transfer. */
+ while (i < num) {
+ /*
+ * Check for special case: Small write followed
+ * by read to same I2C device.
+ */
+ if (i < (num-1)
+ && msgs[i].addr == msgs[i+1].addr
+ && !(msgs[i].flags & I2C_M_RD)
+ && (msgs[i+1].flags & I2C_M_RD)
+ && (msgs[i].len <= W1_F19_WRITE_DATA_LIMIT)) {
+ /*
+ * The DS28E17 has a combined transfer
+ * for small write+read.
+ */
+ result = w1_f19_i2c_write_read(sl, msgs[i].addr,
+ msgs[i].buf, msgs[i].len,
+ msgs[i+1].buf, msgs[i+1].len);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+
+ /*
+ * Check if we should interpret the read data
+ * as a length byte. The DS28E17 unfortunately
+ * has no read without stop, so we can just do
+ * another simple read in that case.
+ */
+ if (msgs[i+1].flags & I2C_M_RECV_LEN) {
+ result = w1_f19_i2c_read(sl, msgs[i+1].addr,
+ &(msgs[i+1].buf[1]), msgs[i+1].buf[0]);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+ }
+
+ /* Eat up read message, too. */
+ i++;
+ } else if (msgs[i].flags & I2C_M_RD) {
+ /* Read transfer. */
+ result = w1_f19_i2c_read(sl, msgs[i].addr,
+ msgs[i].buf, msgs[i].len);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+
+ /*
+ * Check if we should interpret the read data
+ * as a length byte. The DS28E17 unfortunately
+ * has no read without stop, so we can just do
+ * another simple read in that case.
+ */
+ if (msgs[i].flags & I2C_M_RECV_LEN) {
+ result = w1_f19_i2c_read(sl,
+ msgs[i].addr,
+ &(msgs[i].buf[1]),
+ msgs[i].buf[0]);
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+ }
+ } else {
+ /*
+ * Write transfer.
+ * Stop condition only for last
+ * transfer.
+ */
+ result = w1_f19_i2c_write(sl,
+ msgs[i].addr,
+ msgs[i].buf,
+ msgs[i].len,
+ i == (num-1));
+ if (result < 0) {
+ i = result;
+ goto error;
+ }
+ }
+
+ /* Next message. */
+ i++;
+
+ /* Are there still messages to send/receive? */
+ if (i < num) {
+ /* Yes. Resume to same DS28E17. */
+ if (w1_reset_resume_command(sl->master)) {
+ i = -EIO;
+ goto error;
+ }
+ }
+ }
+
+error:
+ /* End onewire transaction. */
+ mutex_unlock(&sl->master->bus_mutex);
+
+ /* Return number of messages processed or error. */
+ return i;
+}
+
+
+/* Get I2C adapter functionality. */
+static u32 w1_f19_i2c_functionality(struct i2c_adapter *adapter)
+{
+ /*
+ * Plain I2C functions only.
+ * SMBus is emulated by the kernel's I2C layer.
+ * No "I2C_FUNC_SMBUS_QUICK"
+ * No "I2C_FUNC_SMBUS_READ_BLOCK_DATA"
+ * No "I2C_FUNC_SMBUS_BLOCK_PROC_CALL"
+ */
+ return I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_PROC_CALL |
+ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK |
+ I2C_FUNC_SMBUS_PEC;
+}
+
+
+/* I2C adapter quirks. */
+static const struct i2c_adapter_quirks w1_f19_i2c_adapter_quirks = {
+ .max_read_len = W1_F19_READ_DATA_LIMIT,
+};
+
+/* I2C algorithm. */
+static const struct i2c_algorithm w1_f19_i2c_algorithm = {
+ .master_xfer = w1_f19_i2c_master_transfer,
+ .functionality = w1_f19_i2c_functionality,
+};
+
+
+/* Read I2C speed from DS28E17. */
+static int w1_f19_get_i2c_speed(struct w1_slave *sl)
+{
+ struct w1_f19_data *data = sl->family_data;
+ int result = -EIO;
+
+ /* Start onewire transaction. */
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Select slave. */
+ if (w1_reset_select_slave(sl))
+ goto error;
+
+ /* Read slave configuration byte. */
+ w1_write_8(sl->master, W1_F19_READ_CONFIGURATION);
+ result = w1_read_8(sl->master);
+ if (result < 0 || result > 2) {
+ result = -EIO;
+ goto error;
+ }
+
+ /* Update speed in slave specific data. */
+ data->speed = result;
+
+error:
+ /* End onewire transaction. */
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return result;
+}
+
+
+/* Set I2C speed on DS28E17. */
+static int __w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed)
+{
+ struct w1_f19_data *data = sl->family_data;
+ const int i2c_speeds[3] = { 100, 400, 900 };
+ u8 w1_buf[2];
+
+ /* Select slave. */
+ if (w1_reset_select_slave(sl))
+ return -EIO;
+
+ w1_buf[0] = W1_F19_WRITE_CONFIGURATION;
+ w1_buf[1] = speed;
+ w1_write_block(sl->master, w1_buf, 2);
+
+ /* Update speed in slave specific data. */
+ data->speed = speed;
+
+ dev_info(&sl->dev, "i2c speed set to %d kBaud\n", i2c_speeds[speed]);
+
+ return 0;
+}
+
+static int w1_f19_set_i2c_speed(struct w1_slave *sl, u8 speed)
+{
+ int result;
+
+ /* Start onewire transaction. */
+ mutex_lock(&sl->master->bus_mutex);
+
+ /* Set I2C speed on DS28E17. */
+ result = __w1_f19_set_i2c_speed(sl, speed);
+
+ /* End onewire transaction. */
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return result;
+}
+
+
+/* Sysfs attributes. */
+
+/* I2C speed attribute for a single chip. */
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ int result;
+
+ /* Read current speed from slave. Updates data->speed. */
+ result = w1_f19_get_i2c_speed(sl);
+ if (result < 0)
+ return result;
+
+ /* Return current speed value. */
+ return sprintf(buf, "%d\n", result);
+}
+
+static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ int error;
+
+ /* Valid values are: "100", "400", "900" */
+ if (count < 3 || count > 4 || !buf)
+ return -EINVAL;
+ if (count == 4 && buf[3] != '\n')
+ return -EINVAL;
+ if (buf[1] != '0' || buf[2] != '0')
+ return -EINVAL;
+
+ /* Set speed on slave. */
+ switch (buf[0]) {
+ case '1':
+ error = w1_f19_set_i2c_speed(sl, 0);
+ break;
+ case '4':
+ error = w1_f19_set_i2c_speed(sl, 1);
+ break;
+ case '9':
+ error = w1_f19_set_i2c_speed(sl, 2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (error < 0)
+ return error;
+
+ /* Return bytes written. */
+ return count;
+}
+
+static DEVICE_ATTR_RW(speed);
+
+
+/* Busy stretch attribute for a single chip. */
+static ssize_t stretch_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ struct w1_f19_data *data = sl->family_data;
+
+ /* Return current stretch value. */
+ return sprintf(buf, "%d\n", data->stretch);
+}
+
+static ssize_t stretch_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct w1_slave *sl = dev_to_w1_slave(dev);
+ struct w1_f19_data *data = sl->family_data;
+
+ /* Valid values are '1' to '9' */
+ if (count < 1 || count > 2 || !buf)
+ return -EINVAL;
+ if (count == 2 && buf[1] != '\n')
+ return -EINVAL;
+ if (buf[0] < '1' || buf[0] > '9')
+ return -EINVAL;
+
+ /* Set busy stretch value. */
+ data->stretch = buf[0] & 0x0F;
+
+ /* Return bytes written. */
+ return count;
+}
+
+static DEVICE_ATTR_RW(stretch);
+
+
+/* All attributes. */
+static struct attribute *w1_f19_attrs[] = {
+ &dev_attr_speed.attr,
+ &dev_attr_stretch.attr,
+ NULL,
+};
+
+static const struct attribute_group w1_f19_group = {
+ .attrs = w1_f19_attrs,
+};
+
+static const struct attribute_group *w1_f19_groups[] = {
+ &w1_f19_group,
+ NULL,
+};
+
+
+/* Slave add and remove functions. */
+static int w1_f19_add_slave(struct w1_slave *sl)
+{
+ struct w1_f19_data *data = NULL;
+
+ /* Allocate memory for slave specific data. */
+ data = devm_kzalloc(&sl->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ sl->family_data = data;
+
+ /* Setup default I2C speed on slave. */
+ switch (i2c_speed) {
+ case 100:
+ __w1_f19_set_i2c_speed(sl, 0);
+ break;
+ case 400:
+ __w1_f19_set_i2c_speed(sl, 1);
+ break;
+ case 900:
+ __w1_f19_set_i2c_speed(sl, 2);
+ break;
+ default:
+ /*
+ * A i2c_speed module parameter of anything else
+ * than 100, 400, 900 means not to touch the
+ * speed of the DS28E17.
+ * We assume 400kBaud, the power-on value.
+ */
+ data->speed = 1;
+ }
+
+ /*
+ * Setup default busy stretch
+ * configuration for the DS28E17.
+ */
+ data->stretch = i2c_stretch;
+
+ /* Setup I2C adapter. */
+ data->adapter.owner = THIS_MODULE;
+ data->adapter.algo = &w1_f19_i2c_algorithm;
+ data->adapter.algo_data = sl;
+ strcpy(data->adapter.name, "w1-");
+ strcat(data->adapter.name, sl->name);
+ data->adapter.dev.parent = &sl->dev;
+ data->adapter.quirks = &w1_f19_i2c_adapter_quirks;
+
+ return i2c_add_adapter(&data->adapter);
+}
+
+static void w1_f19_remove_slave(struct w1_slave *sl)
+{
+ struct w1_f19_data *family_data = sl->family_data;
+
+ /* Delete I2C adapter. */
+ i2c_del_adapter(&family_data->adapter);
+
+ /* Free slave specific data. */
+ devm_kfree(&sl->dev, family_data);
+ sl->family_data = NULL;
+}
+
+
+/* Declarations within the w1 subsystem. */
+static struct w1_family_ops w1_f19_fops = {
+ .add_slave = w1_f19_add_slave,
+ .remove_slave = w1_f19_remove_slave,
+ .groups = w1_f19_groups,
+};
+
+static struct w1_family w1_family_19 = {
+ .fid = W1_FAMILY_DS28E17,
+ .fops = &w1_f19_fops,
+};
+
+
+/* Module init and remove functions. */
+static int __init w1_f19_init(void)
+{
+ return w1_register_family(&w1_family_19);
+}
+
+static void __exit w1_f19_fini(void)
+{
+ w1_unregister_family(&w1_family_19);
+}
+
+module_init(w1_f19_init);
+module_exit(w1_f19_fini);
+
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 259525c3382a..3c350dfbcd0b 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -268,17 +268,18 @@ static inline int w1_therm_eeprom(struct device *device)
int ret, max_trying = 10;
u8 *family_data = sl->family_data;
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto post_unlock;
-
if (!sl->family_data) {
ret = -ENODEV;
- goto pre_unlock;
+ goto error;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto dec_refcnt;
+
memset(rom, 0, sizeof(rom));
while (max_trying--) {
@@ -306,17 +307,17 @@ static inline int w1_therm_eeprom(struct device *device)
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto post_unlock;
+ goto dec_refcnt;
}
ret = mutex_lock_interruptible(&dev->bus_mutex);
if (ret != 0)
- goto post_unlock;
+ goto dec_refcnt;
} else if (!w1_strong_pullup) {
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto pre_unlock;
+ goto mt_unlock;
}
}
@@ -324,11 +325,11 @@ static inline int w1_therm_eeprom(struct device *device)
}
}
-pre_unlock:
+mt_unlock:
mutex_unlock(&dev->bus_mutex);
-
-post_unlock:
+dec_refcnt:
atomic_dec(THERM_REFCNT(family_data));
+error:
return ret;
}
@@ -350,20 +351,22 @@ static inline int w1_DS18B20_precision(struct device *device, int val)
if (val > 12 || val < 9) {
pr_warn("Unsupported precision\n");
- return -1;
+ ret = -EINVAL;
+ goto error;
}
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto post_unlock;
-
if (!sl->family_data) {
ret = -ENODEV;
- goto pre_unlock;
+ goto error;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto dec_refcnt;
+
memset(rom, 0, sizeof(rom));
/* translate precision to bitmask (see datasheet page 9) */
@@ -411,11 +414,10 @@ static inline int w1_DS18B20_precision(struct device *device, int val)
}
}
-pre_unlock:
mutex_unlock(&dev->bus_mutex);
-
-post_unlock:
+dec_refcnt:
atomic_dec(THERM_REFCNT(family_data));
+error:
return ret;
}
@@ -490,17 +492,18 @@ static ssize_t read_therm(struct device *device,
int ret, max_trying = 10;
u8 *family_data = sl->family_data;
- ret = mutex_lock_interruptible(&dev->bus_mutex);
- if (ret != 0)
- goto error;
-
if (!family_data) {
ret = -ENODEV;
- goto mt_unlock;
+ goto error;
}
/* prevent the slave from going away in sleep */
atomic_inc(THERM_REFCNT(family_data));
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto dec_refcnt;
+
memset(info->rom, 0, sizeof(info->rom));
while (max_trying--) {
@@ -542,7 +545,7 @@ static ssize_t read_therm(struct device *device,
sleep_rem = msleep_interruptible(tm);
if (sleep_rem != 0) {
ret = -EINTR;
- goto dec_refcnt;
+ goto mt_unlock;
}
}
@@ -567,10 +570,10 @@ static ssize_t read_therm(struct device *device,
break;
}
-dec_refcnt:
- atomic_dec(THERM_REFCNT(family_data));
mt_unlock:
mutex_unlock(&dev->bus_mutex);
+dec_refcnt:
+ atomic_dec(THERM_REFCNT(family_data));
error:
return ret;
}
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index d191e1f80579..075d120e7b88 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -58,7 +58,7 @@ static u8 w1_read_bit(struct w1_master *dev);
* @dev: the master device
* @bit: 0 - write a 0, 1 - write a 0 read the level
*/
-static u8 w1_touch_bit(struct w1_master *dev, int bit)
+u8 w1_touch_bit(struct w1_master *dev, int bit)
{
if (dev->bus_master->touch_bit)
return dev->bus_master->touch_bit(dev->bus_master->data, bit);
@@ -69,6 +69,7 @@ static u8 w1_touch_bit(struct w1_master *dev, int bit)
return 0;
}
}
+EXPORT_SYMBOL_GPL(w1_touch_bit);
/**
* w1_write_bit() - Generates a write-0 or write-1 cycle.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c722cbfdc7e6..ca200d1f310a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1460,7 +1460,7 @@ config INDYDOG
config JZ4740_WDT
tristate "Ingenic jz4740 SoC hardware watchdog"
- depends on MACH_JZ4740
+ depends on MACH_JZ4740 || MACH_JZ4780
select WATCHDOG_CORE
help
Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 56adf9fa67d0..715a21078e0c 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the WatchDog device drivers.
#
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 665e0e7dfe1e..12f7ea62dddd 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -70,8 +70,8 @@ module_param(use_gpio, int, 0);
MODULE_PARM_DESC(use_gpio,
"Use the gpio watchdog (required by old cobalt boards).");
-static void wdt_timer_ping(unsigned long);
-static DEFINE_TIMER(timer, wdt_timer_ping, 0, 1);
+static void wdt_timer_ping(struct timer_list *);
+static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
static char wdt_expect_close;
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(nowayout,
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 7e6acaf3ece4..88c05d0448b2 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -120,9 +120,9 @@ static inline void at91_wdt_reset(struct at91wdt *wdt)
/*
* Timer tick
*/
-static void at91_ping(unsigned long data)
+static void at91_ping(struct timer_list *t)
{
- struct at91wdt *wdt = (struct at91wdt *)data;
+ struct at91wdt *wdt = from_timer(wdt, t, timer);
if (time_before(jiffies, wdt->next_heartbeat) ||
!watchdog_active(&wdt->wdd)) {
at91_wdt_reset(wdt);
@@ -222,7 +222,7 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
"watchdog already configured differently (mr = %x expecting %x)\n",
tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
- setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+ timer_setup(&wdt->timer, at91_ping, 0);
/*
* Use min_heartbeat the first time to avoid spurious watchdog reset:
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 236582809336..f41b756d6dd5 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -106,9 +106,9 @@ static const struct watchdog_ops bcm47xx_wdt_hard_ops = {
.restart = bcm47xx_wdt_restart,
};
-static void bcm47xx_wdt_soft_timer_tick(unsigned long data)
+static void bcm47xx_wdt_soft_timer_tick(struct timer_list *t)
{
- struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data;
+ struct bcm47xx_wdt *wdt = from_timer(wdt, t, soft_timer);
u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms);
if (!atomic_dec_and_test(&wdt->soft_ticks)) {
@@ -133,7 +133,7 @@ static int bcm47xx_wdt_soft_start(struct watchdog_device *wdd)
struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
bcm47xx_wdt_soft_keepalive(wdd);
- bcm47xx_wdt_soft_timer_tick((unsigned long)wdt);
+ bcm47xx_wdt_soft_timer_tick(&wdt->soft_timer);
return 0;
}
@@ -190,8 +190,7 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev)
if (soft) {
wdt->wdd.ops = &bcm47xx_wdt_soft_ops;
- setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick,
- (long unsigned int)wdt);
+ timer_setup(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick, 0);
} else {
wdt->wdd.ops = &bcm47xx_wdt_hard_ops;
}
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index ab26fd90729e..8555afc70f9b 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -77,7 +77,7 @@ static void bcm63xx_wdt_isr(void *data)
die(PFX " fire", regs);
}
-static void bcm63xx_timer_tick(unsigned long unused)
+static void bcm63xx_timer_tick(struct timer_list *unused)
{
if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) {
bcm63xx_wdt_hw_start();
@@ -240,7 +240,7 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
int ret;
struct resource *r;
- setup_timer(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0L);
+ timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index 6c3f78e45c26..6cfb102c397c 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -69,7 +69,7 @@ static struct {
/* generic helper functions */
-static void cpu5wdt_trigger(unsigned long unused)
+static void cpu5wdt_trigger(struct timer_list *unused)
{
if (verbose > 2)
pr_debug("trigger at %i ticks\n", ticks);
@@ -224,7 +224,7 @@ static int cpu5wdt_init(void)
init_completion(&cpu5wdt_device.stop);
cpu5wdt_device.queue = 0;
- setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
+ timer_setup(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
cpu5wdt_device.default_ticks = ticks;
if (!request_region(port, CPU5WDT_EXTENT, PFX)) {
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 3d43775548e5..aee0b25cf10d 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -230,9 +230,9 @@ static void cpwd_resetbrokentimer(struct cpwd *p, int index)
* interrupts within the PLD so me must continually
* reset the timers ad infinitum.
*/
-static void cpwd_brokentimer(unsigned long data)
+static void cpwd_brokentimer(struct timer_list *unused)
{
- struct cpwd *p = (struct cpwd *) data;
+ struct cpwd *p = cpwd_device;
int id, tripped = 0;
/* kill a running timer instance, in case we
@@ -275,7 +275,7 @@ static void cpwd_stoptimer(struct cpwd *p, int index)
if (p->broken) {
p->devs[index].runstatus |= WD_STAT_BSTOP;
- cpwd_brokentimer((unsigned long) p);
+ cpwd_brokentimer(NULL);
}
}
}
@@ -608,7 +608,7 @@ static int cpwd_probe(struct platform_device *op)
}
if (p->broken) {
- setup_timer(&cpwd_timer, cpwd_brokentimer, (unsigned long)p);
+ timer_setup(&cpwd_timer, cpwd_brokentimer, 0);
cpwd_timer.expires = WD_BTIMEOUT;
pr_info("PLD defect workaround enabled for model %s\n",
diff --git a/drivers/watchdog/iTCO_vendor.h b/drivers/watchdog/iTCO_vendor.h
index 3c57b45537a2..7b82a7c6e7c3 100644
--- a/drivers/watchdog/iTCO_vendor.h
+++ b/drivers/watchdog/iTCO_vendor.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* iTCO Vendor Specific Support hooks */
#ifdef CONFIG_ITCO_VENDOR_SUPPORT
extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
index 3b8bb59adf02..b4221f43cd94 100644
--- a/drivers/watchdog/lpc18xx_wdt.c
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -78,10 +78,10 @@ static int lpc18xx_wdt_feed(struct watchdog_device *wdt_dev)
return 0;
}
-static void lpc18xx_wdt_timer_feed(unsigned long data)
+static void lpc18xx_wdt_timer_feed(struct timer_list *t)
{
- struct watchdog_device *wdt_dev = (struct watchdog_device *)data;
- struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = from_timer(lpc18xx_wdt, t, timer);
+ struct watchdog_device *wdt_dev = &lpc18xx_wdt->wdt_dev;
lpc18xx_wdt_feed(wdt_dev);
@@ -96,7 +96,9 @@ static void lpc18xx_wdt_timer_feed(unsigned long data)
*/
static int lpc18xx_wdt_stop(struct watchdog_device *wdt_dev)
{
- lpc18xx_wdt_timer_feed((unsigned long)wdt_dev);
+ struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
+
+ lpc18xx_wdt_timer_feed(&lpc18xx_wdt->timer);
return 0;
}
@@ -267,8 +269,7 @@ static int lpc18xx_wdt_probe(struct platform_device *pdev)
__lpc18xx_wdt_set_timeout(lpc18xx_wdt);
- setup_timer(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed,
- (unsigned long)&lpc18xx_wdt->wdt_dev);
+ timer_setup(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed, 0);
watchdog_set_nowayout(&lpc18xx_wdt->wdt_dev, nowayout);
watchdog_set_restart_priority(&lpc18xx_wdt->wdt_dev, 128);
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 9826b59ef734..88d823d87a4b 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -121,13 +121,13 @@ module_param(action, int, 0);
MODULE_PARM_DESC(action, "after watchdog resets, generate: "
"0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
-static void zf_ping(unsigned long data);
+static void zf_ping(struct timer_list *unused);
static int zf_action = GEN_RESET;
static unsigned long zf_is_open;
static char zf_expect_close;
static DEFINE_SPINLOCK(zf_port_lock);
-static DEFINE_TIMER(zf_timer, zf_ping, 0, 0);
+static DEFINE_TIMER(zf_timer, zf_ping);
static unsigned long next_heartbeat;
@@ -237,7 +237,7 @@ static void zf_timer_on(void)
}
-static void zf_ping(unsigned long data)
+static void zf_ping(struct timer_list *unused)
{
unsigned int ctrl_reg = 0;
unsigned long flags;
diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c
index be86ea359eee..3cc07447c655 100644
--- a/drivers/watchdog/mixcomwd.c
+++ b/drivers/watchdog/mixcomwd.c
@@ -99,13 +99,13 @@ static struct {
{0x0000, 0},
};
-static void mixcomwd_timerfun(unsigned long d);
+static void mixcomwd_timerfun(struct timer_list *unused);
static unsigned long mixcomwd_opened; /* long req'd for setbit --RR */
static int watchdog_port;
static int mixcomwd_timer_alive;
-static DEFINE_TIMER(mixcomwd_timer, mixcomwd_timerfun, 0, 0);
+static DEFINE_TIMER(mixcomwd_timer, mixcomwd_timerfun);
static char expect_close;
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -120,7 +120,7 @@ static void mixcomwd_ping(void)
return;
}
-static void mixcomwd_timerfun(unsigned long d)
+static void mixcomwd_timerfun(struct timer_list *unused)
{
mixcomwd_ping();
mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 366e5c7e650b..6610e9217dbc 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -80,9 +80,9 @@ static void mpc8xxx_wdt_keepalive(struct mpc8xxx_wdt_ddata *ddata)
spin_unlock(&ddata->lock);
}
-static void mpc8xxx_wdt_timer_ping(unsigned long arg)
+static void mpc8xxx_wdt_timer_ping(struct timer_list *t)
{
- struct mpc8xxx_wdt_ddata *ddata = (void *)arg;
+ struct mpc8xxx_wdt_ddata *ddata = from_timer(ddata, t, timer);
mpc8xxx_wdt_keepalive(ddata);
/* We're pinging it twice faster than needed, just to be sure. */
@@ -173,8 +173,7 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
}
spin_lock_init(&ddata->lock);
- setup_timer(&ddata->timer, mpc8xxx_wdt_timer_ping,
- (unsigned long)ddata);
+ timer_setup(&ddata->timer, mpc8xxx_wdt_timer_ping, 0);
ddata->wdd.info = &mpc8xxx_wdt_info,
ddata->wdd.ops = &mpc8xxx_wdt_ops,
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index ff27c4ac96e4..ca360d204548 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -68,7 +68,7 @@ static struct {
unsigned int gstate;
} mtx1_wdt_device;
-static void mtx1_wdt_trigger(unsigned long unused)
+static void mtx1_wdt_trigger(struct timer_list *unused)
{
spin_lock(&mtx1_wdt_device.lock);
if (mtx1_wdt_device.running)
@@ -219,7 +219,7 @@ static int mtx1_wdt_probe(struct platform_device *pdev)
init_completion(&mtx1_wdt_device.stop);
mtx1_wdt_device.queue = 0;
clear_bit(0, &mtx1_wdt_device.inuse);
- setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L);
+ timer_setup(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0);
mtx1_wdt_device.default_ticks = ticks;
ret = misc_register(&mtx1_wdt_misc);
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index d5bed78c4d9f..830bd04ff911 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -216,7 +216,7 @@ static ssize_t nuc900_wdt_write(struct file *file, const char __user *data,
return len;
}
-static void nuc900_wdt_timer_ping(unsigned long data)
+static void nuc900_wdt_timer_ping(struct timer_list *unused)
{
if (time_before(jiffies, nuc900_wdt->next_heartbeat)) {
nuc900_wdt_keepalive();
@@ -267,7 +267,7 @@ static int nuc900wdt_probe(struct platform_device *pdev)
clk_enable(nuc900_wdt->wdt_clock);
- setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
+ timer_setup(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
ret = misc_register(&nuc900wdt_miscdev);
if (ret) {
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 3ad5206d7935..b72ce68eacd3 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -367,7 +367,7 @@ static void pcwd_show_card_info(void)
pr_info("No previous trip detected - Cold boot or reset\n");
}
-static void pcwd_timer_ping(unsigned long data)
+static void pcwd_timer_ping(struct timer_list *unused)
{
int wdrst_stat;
@@ -893,7 +893,7 @@ static int pcwd_isa_probe(struct device *dev, unsigned int id)
/* clear the "card caused reboot" flag */
pcwd_clear_status();
- setup_timer(&pcwd_private.timer, pcwd_timer_ping, 0);
+ timer_setup(&pcwd_private.timer, pcwd_timer_ping, 0);
/* Disable the board */
pcwd_stop();
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index e35cf5e87907..e0a6f8c0f03c 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -85,7 +85,7 @@ static inline void pikawdt_reset(void)
/*
* Timer tick
*/
-static void pikawdt_ping(unsigned long data)
+static void pikawdt_ping(struct timer_list *unused)
{
if (time_before(jiffies, pikawdt_private.next_heartbeat) ||
(!nowayout && !pikawdt_private.open)) {
@@ -269,7 +269,7 @@ static int __init pikawdt_init(void)
iounmap(fpga);
- setup_timer(&pikawdt_private.timer, pikawdt_ping, 0);
+ timer_setup(&pikawdt_private.timer, pikawdt_ping, 0);
ret = misc_register(&pikawdt_miscdev);
if (ret) {
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 47a8f1b1087d..a281aa84bfb1 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -67,7 +67,7 @@ static struct {
/* generic helper functions */
-static void rdc321x_wdt_trigger(unsigned long unused)
+static void rdc321x_wdt_trigger(struct timer_list *unused)
{
unsigned long flags;
u32 val;
@@ -262,7 +262,7 @@ static int rdc321x_wdt_probe(struct platform_device *pdev)
clear_bit(0, &rdc321x_wdt_device.inuse);
- setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
+ timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
rdc321x_wdt_device.default_ticks = ticks;
diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c
index 2eef58a0cf05..87333a41f753 100644
--- a/drivers/watchdog/sbc60xxwdt.c
+++ b/drivers/watchdog/sbc60xxwdt.c
@@ -112,8 +112,8 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static void wdt_timer_ping(unsigned long);
-static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0);
+static void wdt_timer_ping(struct timer_list *);
+static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
static char wdt_expect_close;
@@ -122,7 +122,7 @@ static char wdt_expect_close;
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c
index 1cfd3f6a13d5..6aadb56e7faa 100644
--- a/drivers/watchdog/sc520_wdt.c
+++ b/drivers/watchdog/sc520_wdt.c
@@ -123,8 +123,8 @@ MODULE_PARM_DESC(nowayout,
static __u16 __iomem *wdtmrctl;
-static void wdt_timer_ping(unsigned long);
-static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0);
+static void wdt_timer_ping(struct timer_list *);
+static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
static char wdt_expect_close;
@@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 517a733175ef..a7d6425db807 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -175,9 +175,9 @@ static int sh_wdt_set_heartbeat(struct watchdog_device *wdt_dev, unsigned t)
return 0;
}
-static void sh_wdt_ping(unsigned long data)
+static void sh_wdt_ping(struct timer_list *t)
{
- struct sh_wdt *wdt = (struct sh_wdt *)data;
+ struct sh_wdt *wdt = from_timer(wdt, t, timer);
unsigned long flags;
spin_lock_irqsave(&wdt->lock, flags);
@@ -275,7 +275,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
return rc;
}
- setup_timer(&wdt->timer, sh_wdt_ping, (unsigned long)wdt);
+ timer_setup(&wdt->timer, sh_wdt_ping, 0);
wdt->timer.expires = next_ping_period(clock_division_ratio);
dev_info(&pdev->dev, "initialized.\n");
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
index 2b28c00da0df..1af4dee71337 100644
--- a/drivers/watchdog/sp5100_tco.h
+++ b/drivers/watchdog/sp5100_tco.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* sp5100_tco: TCO timer driver for sp5100 chipsets.
*
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 5f9cbc37520d..b085ef1084ec 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -67,8 +67,8 @@ static struct watchdog_device wdt_dev;
static struct resource wdt_res;
static void __iomem *wdt_mem;
static unsigned int mmio;
-static void wdt_timer_tick(unsigned long data);
-static DEFINE_TIMER(timer, wdt_timer_tick, 0, 0);
+static void wdt_timer_tick(struct timer_list *unused);
+static DEFINE_TIMER(timer, wdt_timer_tick);
/* The timer that pings the watchdog */
static unsigned long next_heartbeat; /* the next_heartbeat for the timer */
@@ -88,7 +88,7 @@ static inline void wdt_reset(void)
* then the external/userspace heartbeat).
* 2) the watchdog timer has been stopped by userspace.
*/
-static void wdt_timer_tick(unsigned long data)
+static void wdt_timer_tick(struct timer_list *unused)
{
if (time_before(jiffies, next_heartbeat) ||
(!watchdog_active(&wdt_dev))) {
diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c
index f0483c75ed32..05658ecc0aa4 100644
--- a/drivers/watchdog/w83877f_wdt.c
+++ b/drivers/watchdog/w83877f_wdt.c
@@ -97,8 +97,8 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static void wdt_timer_ping(unsigned long);
-static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0);
+static void wdt_timer_ping(struct timer_list *);
+static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
static char wdt_expect_close;
@@ -108,7 +108,7 @@ static DEFINE_SPINLOCK(wdt_spinlock);
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 74265b2f806c..8a8d952f8df9 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -137,25 +137,6 @@ int watchdog_init_timeout(struct watchdog_device *wdd,
}
EXPORT_SYMBOL_GPL(watchdog_init_timeout);
-static int watchdog_reboot_notifier(struct notifier_block *nb,
- unsigned long code, void *data)
-{
- struct watchdog_device *wdd = container_of(nb, struct watchdog_device,
- reboot_nb);
-
- if (code == SYS_DOWN || code == SYS_HALT) {
- if (watchdog_active(wdd)) {
- int ret;
-
- ret = wdd->ops->stop(wdd);
- if (ret)
- return NOTIFY_BAD;
- }
- }
-
- return NOTIFY_DONE;
-}
-
static int watchdog_restart_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -244,19 +225,6 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
}
}
- if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
- wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
-
- ret = register_reboot_notifier(&wdd->reboot_nb);
- if (ret) {
- pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
- wdd->id, ret);
- watchdog_dev_unregister(wdd);
- ida_simple_remove(&watchdog_ida, wdd->id);
- return ret;
- }
- }
-
if (wdd->ops->restart) {
wdd->restart_nb.notifier_call = watchdog_restart_notifier;
@@ -302,9 +270,6 @@ static void __watchdog_unregister_device(struct watchdog_device *wdd)
if (wdd->ops->restart)
unregister_restart_handler(&wdd->restart_nb);
- if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status))
- unregister_reboot_notifier(&wdd->reboot_nb);
-
watchdog_dev_unregister(wdd);
ida_simple_remove(&watchdog_ida, wdd->id);
}
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 0826e663bd5a..1e971a50d7fb 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -42,6 +42,7 @@
#include <linux/miscdevice.h> /* For handling misc devices */
#include <linux/module.h> /* For module stuff/... */
#include <linux/mutex.h> /* For mutexes */
+#include <linux/reboot.h> /* For reboot notifier */
#include <linux/slab.h> /* For memory functions */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/watchdog.h> /* For watchdog specific items */
@@ -1016,6 +1017,25 @@ static struct class watchdog_class = {
.dev_groups = wdt_groups,
};
+static int watchdog_reboot_notifier(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ struct watchdog_device *wdd;
+
+ wdd = container_of(nb, struct watchdog_device, reboot_nb);
+ if (code == SYS_DOWN || code == SYS_HALT) {
+ if (watchdog_active(wdd)) {
+ int ret;
+
+ ret = wdd->ops->stop(wdd);
+ if (ret)
+ return NOTIFY_BAD;
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
/*
* watchdog_dev_register: register a watchdog device
* @wdd: watchdog device
@@ -1049,6 +1069,18 @@ int watchdog_dev_register(struct watchdog_device *wdd)
if (ret) {
device_destroy(&watchdog_class, devno);
watchdog_cdev_unregister(wdd);
+ return ret;
+ }
+
+ if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
+ wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
+
+ ret = devm_register_reboot_notifier(dev, &wdd->reboot_nb);
+ if (ret) {
+ pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
+ wdd->id, ret);
+ watchdog_dev_unregister(wdd);
+ }
}
return ret;
diff --git a/drivers/watchdog/watchdog_pretimeout.h b/drivers/watchdog/watchdog_pretimeout.h
index a5a32b39c56d..a3f1abc68839 100644
--- a/drivers/watchdog/watchdog_pretimeout.h
+++ b/drivers/watchdog/watchdog_pretimeout.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __WATCHDOG_PRETIMEOUT_H
#define __WATCHDOG_PRETIMEOUT_H
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 4545561954ee..d8dd54678ab7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -196,6 +196,17 @@ config XEN_PCIDEV_BACKEND
If in doubt, say m.
+config XEN_PVCALLS_FRONTEND
+ tristate "XEN PV Calls frontend driver"
+ depends on INET && XEN
+ default n
+ select XEN_XENBUS_FRONTEND
+ help
+ Experimental frontend for the Xen PV Calls protocol
+ (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
+ sends a small set of POSIX calls to the backend, which
+ implements them.
+
config XEN_PVCALLS_BACKEND
bool "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index caaa15dc37bc..451e833f5931 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_X86) += fallback.o
obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_XEN_EFI) += efi.o
obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o
obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o
+obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 1bdd02a6d6ac..30d7f52eb7ca 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/bio.h>
#include <linux/io.h>
#include <linux/export.h>
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 0003912a8111..d4265c8ebb22 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/notifier.h>
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index bdff01095f54..8edef51c92e5 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xen event channels (2-level ABI)
*
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 2c6a9114d332..f45114fd8e1e 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -33,6 +33,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+#include <linux/bootmem.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -43,6 +44,7 @@
#include <linux/hardirq.h>
#include <linux/workqueue.h>
#include <linux/ratelimit.h>
+#include <linux/moduleparam.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -52,6 +54,9 @@
#include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h>
#include <xen/balloon.h>
+#ifdef CONFIG_X86
+#include <asm/xen/cpuid.h>
+#endif
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
@@ -68,15 +73,26 @@ static int gnttab_free_count;
static grant_ref_t gnttab_free_head;
static DEFINE_SPINLOCK(gnttab_list_lock);
struct grant_frames xen_auto_xlat_grant_frames;
+static unsigned int xen_gnttab_version;
+module_param_named(version, xen_gnttab_version, uint, 0);
static union {
struct grant_entry_v1 *v1;
+ union grant_entry_v2 *v2;
void *addr;
} gnttab_shared;
/*This is a structure of function pointers for grant table*/
struct gnttab_ops {
/*
+ * Version of the grant interface.
+ */
+ unsigned int version;
+ /*
+ * Grant refs per grant frame.
+ */
+ unsigned int grefs_per_grant_frame;
+ /*
* Mapping a list of frames for storing grant entries. Frames parameter
* is used to store grant table address when grant table being setup,
* nr_gframes is the number of frames to map grant table. Returning
@@ -130,14 +146,15 @@ struct unmap_refs_callback_data {
static const struct gnttab_ops *gnttab_interface;
-static int grant_table_version;
-static int grefs_per_grant_frame;
+/* This reflects status of grant entries, so act as a global value. */
+static grant_status_t *grstatus;
static struct gnttab_free_callback *gnttab_free_callback_list;
static int gnttab_expand(unsigned int req_entries);
#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+#define SPP (PAGE_SIZE / sizeof(grant_status_t))
static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
{
@@ -210,7 +227,7 @@ static void put_free_entry(grant_ref_t ref)
}
/*
- * Following applies to gnttab_update_entry_v1.
+ * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
* Introducing a valid entry into the grant table:
* 1. Write ent->domid.
* 2. Write ent->frame:
@@ -229,6 +246,15 @@ static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
gnttab_shared.v1[ref].flags = flags;
}
+static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned int flags)
+{
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ gnttab_shared.v2[ref].full_page.frame = frame;
+ wmb(); /* Hypervisor concurrent accesses. */
+ gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
+}
+
/*
* Public grant-issuing interface functions
*/
@@ -260,6 +286,11 @@ static int gnttab_query_foreign_access_v1(grant_ref_t ref)
return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
}
+static int gnttab_query_foreign_access_v2(grant_ref_t ref)
+{
+ return grstatus[ref] & (GTF_reading|GTF_writing);
+}
+
int gnttab_query_foreign_access(grant_ref_t ref)
{
return gnttab_interface->query_foreign_access(ref);
@@ -282,6 +313,29 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
return 1;
}
+static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
+{
+ gnttab_shared.v2[ref].hdr.flags = 0;
+ mb(); /* Concurrent access by hypervisor. */
+ if (grstatus[ref] & (GTF_reading|GTF_writing)) {
+ return 0;
+ } else {
+ /*
+ * The read of grstatus needs to have acquire semantics.
+ * On x86, reads already have that, and we just need to
+ * protect against compiler reorderings.
+ * On other architectures we may need a full barrier.
+ */
+#ifdef CONFIG_X86
+ barrier();
+#else
+ mb();
+#endif
+ }
+
+ return 1;
+}
+
static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
{
return gnttab_interface->end_foreign_access_ref(ref, readonly);
@@ -304,10 +358,10 @@ struct deferred_entry {
struct page *page;
};
static LIST_HEAD(deferred_list);
-static void gnttab_handle_deferred(unsigned long);
-static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
+static void gnttab_handle_deferred(struct timer_list *);
+static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
-static void gnttab_handle_deferred(unsigned long unused)
+static void gnttab_handle_deferred(struct timer_list *unused)
{
unsigned int nr = 10;
struct deferred_entry *first = NULL;
@@ -442,6 +496,37 @@ static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
return frame;
}
+static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
+{
+ unsigned long frame;
+ u16 flags;
+ u16 *pflags;
+
+ pflags = &gnttab_shared.v2[ref].hdr.flags;
+
+ /*
+ * If a transfer is not even yet started, try to reclaim the grant
+ * reference and return failure (== 0).
+ */
+ while (!((flags = *pflags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(pflags, flags, 0) == flags)
+ return 0;
+ cpu_relax();
+ }
+
+ /* If a transfer is in progress then wait until it is completed. */
+ while (!(flags & GTF_transfer_completed)) {
+ flags = *pflags;
+ cpu_relax();
+ }
+
+ rmb(); /* Read the frame number /after/ reading completion status. */
+ frame = gnttab_shared.v2[ref].full_page.frame;
+ BUG_ON(frame == 0);
+
+ return frame;
+}
+
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
{
return gnttab_interface->end_foreign_transfer_ref(ref);
@@ -563,19 +648,26 @@ void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
}
EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
+static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
+{
+ return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
+ align;
+}
+
static int grow_gnttab_list(unsigned int more_frames)
{
unsigned int new_nr_grant_frames, extra_entries, i;
unsigned int nr_glist_frames, new_nr_glist_frames;
+ unsigned int grefs_per_frame;
- BUG_ON(grefs_per_grant_frame == 0);
+ BUG_ON(gnttab_interface == NULL);
+ grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
new_nr_grant_frames = nr_grant_frames + more_frames;
- extra_entries = more_frames * grefs_per_grant_frame;
+ extra_entries = more_frames * grefs_per_frame;
- nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
- new_nr_glist_frames =
- (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
+ new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
if (!gnttab_list[i])
@@ -583,12 +675,12 @@ static int grow_gnttab_list(unsigned int more_frames)
}
- for (i = grefs_per_grant_frame * nr_grant_frames;
- i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
+ for (i = grefs_per_frame * nr_grant_frames;
+ i < grefs_per_frame * new_nr_grant_frames - 1; i++)
gnttab_entry(i) = i + 1;
gnttab_entry(i) = gnttab_free_head;
- gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
+ gnttab_free_head = grefs_per_frame * nr_grant_frames;
gnttab_free_count += extra_entries;
nr_grant_frames = new_nr_grant_frames;
@@ -938,6 +1030,12 @@ int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
+static unsigned int nr_status_frames(unsigned int nr_grant_frames)
+{
+ BUG_ON(gnttab_interface == NULL);
+ return gnttab_frames(nr_grant_frames, SPP);
+}
+
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{
int rc;
@@ -955,6 +1053,55 @@ static void gnttab_unmap_frames_v1(void)
arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
}
+static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
+{
+ uint64_t *sframes;
+ unsigned int nr_sframes;
+ struct gnttab_get_status_frames getframes;
+ int rc;
+
+ nr_sframes = nr_status_frames(nr_gframes);
+
+ /* No need for kzalloc as it is initialized in following hypercall
+ * GNTTABOP_get_status_frames.
+ */
+ sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
+ if (!sframes)
+ return -ENOMEM;
+
+ getframes.dom = DOMID_SELF;
+ getframes.nr_frames = nr_sframes;
+ set_xen_guest_handle(getframes.frame_list, sframes);
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
+ &getframes, 1);
+ if (rc == -ENOSYS) {
+ kfree(sframes);
+ return -ENOSYS;
+ }
+
+ BUG_ON(rc || getframes.status);
+
+ rc = arch_gnttab_map_status(sframes, nr_sframes,
+ nr_status_frames(gnttab_max_grant_frames()),
+ &grstatus);
+ BUG_ON(rc);
+ kfree(sframes);
+
+ rc = arch_gnttab_map_shared(frames, nr_gframes,
+ gnttab_max_grant_frames(),
+ &gnttab_shared.addr);
+ BUG_ON(rc);
+
+ return 0;
+}
+
+static void gnttab_unmap_frames_v2(void)
+{
+ arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+ arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
+}
+
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
@@ -1014,6 +1161,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
}
static const struct gnttab_ops gnttab_v1_ops = {
+ .version = 1,
+ .grefs_per_grant_frame = XEN_PAGE_SIZE /
+ sizeof(struct grant_entry_v1),
.map_frames = gnttab_map_frames_v1,
.unmap_frames = gnttab_unmap_frames_v1,
.update_entry = gnttab_update_entry_v1,
@@ -1022,14 +1172,56 @@ static const struct gnttab_ops gnttab_v1_ops = {
.query_foreign_access = gnttab_query_foreign_access_v1,
};
-static void gnttab_request_version(void)
+static const struct gnttab_ops gnttab_v2_ops = {
+ .version = 2,
+ .grefs_per_grant_frame = XEN_PAGE_SIZE /
+ sizeof(union grant_entry_v2),
+ .map_frames = gnttab_map_frames_v2,
+ .unmap_frames = gnttab_unmap_frames_v2,
+ .update_entry = gnttab_update_entry_v2,
+ .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
+ .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
+ .query_foreign_access = gnttab_query_foreign_access_v2,
+};
+
+static bool gnttab_need_v2(void)
{
- /* Only version 1 is used, which will always be available. */
- grant_table_version = 1;
- grefs_per_grant_frame = XEN_PAGE_SIZE / sizeof(struct grant_entry_v1);
- gnttab_interface = &gnttab_v1_ops;
+#ifdef CONFIG_X86
+ uint32_t base, width;
+
+ if (xen_pv_domain()) {
+ base = xen_cpuid_base();
+ if (cpuid_eax(base) < 5)
+ return false; /* Information not available, use V1. */
+ width = cpuid_ebx(base + 5) &
+ XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
+ return width > 32 + PAGE_SHIFT;
+ }
+#endif
+ return !!(max_possible_pfn >> 32);
+}
- pr_info("Grant tables using version %d layout\n", grant_table_version);
+static void gnttab_request_version(void)
+{
+ long rc;
+ struct gnttab_set_version gsv;
+
+ if (gnttab_need_v2())
+ gsv.version = 2;
+ else
+ gsv.version = 1;
+
+ /* Boot parameter overrides automatic selection. */
+ if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
+ gsv.version = xen_gnttab_version;
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
+ if (rc == 0 && gsv.version == 2)
+ gnttab_interface = &gnttab_v2_ops;
+ else
+ gnttab_interface = &gnttab_v1_ops;
+ pr_info("Grant tables using version %d layout\n",
+ gnttab_interface->version);
}
static int gnttab_setup(void)
@@ -1069,10 +1261,10 @@ static int gnttab_expand(unsigned int req_entries)
int rc;
unsigned int cur, extra;
- BUG_ON(grefs_per_grant_frame == 0);
+ BUG_ON(gnttab_interface == NULL);
cur = nr_grant_frames;
- extra = ((req_entries + (grefs_per_grant_frame-1)) /
- grefs_per_grant_frame);
+ extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
+ gnttab_interface->grefs_per_grant_frame);
if (cur + extra > gnttab_max_grant_frames()) {
pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
" cur=%u extra=%u limit=%u"
@@ -1104,16 +1296,16 @@ int gnttab_init(void)
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
- BUG_ON(grefs_per_grant_frame == 0);
+ BUG_ON(gnttab_interface == NULL);
max_nr_glist_frames = (max_nr_grant_frames *
- grefs_per_grant_frame / RPP);
+ gnttab_interface->grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
GFP_KERNEL);
if (gnttab_list == NULL)
return -ENOMEM;
- nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
for (i = 0; i < nr_glist_frames; i++) {
gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
if (gnttab_list[i] == NULL) {
@@ -1122,7 +1314,8 @@ int gnttab_init(void)
}
}
- ret = arch_gnttab_init(max_nr_grant_frames);
+ ret = arch_gnttab_init(max_nr_grant_frames,
+ nr_status_frames(max_nr_grant_frames));
if (ret < 0)
goto ini_nomem;
@@ -1131,7 +1324,8 @@ int gnttab_init(void)
goto ini_nomem;
}
- nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
+ nr_init_grefs = nr_grant_frames *
+ gnttab_interface->grefs_per_grant_frame;
for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
gnttab_entry(i) = i + 1;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c425d03d37d2..8835065029d3 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -72,18 +72,15 @@ static int xen_suspend(void *data)
}
gnttab_suspend();
+ xen_manage_runstate_time(-1);
xen_arch_pre_suspend();
- /*
- * This hypercall returns 1 if suspend was cancelled
- * or the domain was merely checkpointed, and 0 if it
- * is resuming in a new domain.
- */
si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
? virt_to_gfn(xen_start_info)
: 0);
xen_arch_post_suspend(si->cancelled);
+ xen_manage_runstate_time(si->cancelled ? 1 : 0);
gnttab_resume();
if (!si->cancelled) {
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index feca75b07fdd..1c909183c42a 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -191,13 +191,10 @@ static int traverse_pages_block(unsigned nelem, size_t size,
void *state)
{
void *pagedata;
- unsigned pageidx;
int ret = 0;
BUG_ON(size > PAGE_SIZE);
- pageidx = PAGE_SIZE;
-
while (nelem) {
int nr = (PAGE_SIZE/size);
struct page *page;
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index b209cd44bb8d..c7822d8078b9 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -134,20 +134,16 @@ static void pvcalls_conn_back_read(void *opaque)
masked_cons = pvcalls_mask(cons, array_size);
memset(&msg, 0, sizeof(msg));
- msg.msg_iter.type = ITER_KVEC|WRITE;
- msg.msg_iter.count = wanted;
if (masked_prod < masked_cons) {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = wanted;
- msg.msg_iter.kvec = vec;
- msg.msg_iter.nr_segs = 1;
+ iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 1, wanted);
} else {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = array_size - masked_prod;
vec[1].iov_base = data->in;
vec[1].iov_len = wanted - vec[0].iov_len;
- msg.msg_iter.kvec = vec;
- msg.msg_iter.nr_segs = 2;
+ iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 2, wanted);
}
atomic_set(&map->read, 0);
@@ -196,20 +192,16 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
memset(&msg, 0, sizeof(msg));
msg.msg_flags |= MSG_DONTWAIT;
- msg.msg_iter.type = ITER_KVEC|READ;
- msg.msg_iter.count = size;
if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = size;
- msg.msg_iter.kvec = vec;
- msg.msg_iter.nr_segs = 1;
+ iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 1, size);
} else {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
vec[1].iov_base = data->out;
vec[1].iov_len = size - vec[0].iov_len;
- msg.msg_iter.kvec = vec;
- msg.msg_iter.nr_segs = 2;
+ iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 2, size);
}
atomic_set(&map->write, 0);
@@ -1238,3 +1230,7 @@ static void __exit pvcalls_back_fin(void)
}
module_exit(pvcalls_back_fin);
+
+MODULE_DESCRIPTION("Xen PV Calls backend driver");
+MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
new file mode 100644
index 000000000000..40caa92bff33
--- /dev/null
+++ b/drivers/xen/pvcalls-front.c
@@ -0,0 +1,1278 @@
+/*
+ * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+
+#include <net/sock.h>
+
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/interface/io/pvcalls.h>
+
+#include "pvcalls-front.h"
+
+#define PVCALLS_INVALID_ID UINT_MAX
+#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
+#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
+#define PVCALLS_FRONT_MAX_SPIN 5000
+
+struct pvcalls_bedata {
+ struct xen_pvcalls_front_ring ring;
+ grant_ref_t ref;
+ int irq;
+
+ struct list_head socket_mappings;
+ spinlock_t socket_lock;
+
+ wait_queue_head_t inflight_req;
+ struct xen_pvcalls_response rsp[PVCALLS_NR_RSP_PER_RING];
+};
+/* Only one front/back connection supported. */
+static struct xenbus_device *pvcalls_front_dev;
+static atomic_t pvcalls_refcount;
+
+/* first increment refcount, then proceed */
+#define pvcalls_enter() { \
+ atomic_inc(&pvcalls_refcount); \
+}
+
+/* first complete other operations, then decrement refcount */
+#define pvcalls_exit() { \
+ atomic_dec(&pvcalls_refcount); \
+}
+
+struct sock_mapping {
+ bool active_socket;
+ struct list_head list;
+ struct socket *sock;
+ union {
+ struct {
+ int irq;
+ grant_ref_t ref;
+ struct pvcalls_data_intf *ring;
+ struct pvcalls_data data;
+ struct mutex in_mutex;
+ struct mutex out_mutex;
+
+ wait_queue_head_t inflight_conn_req;
+ } active;
+ struct {
+ /* Socket status */
+#define PVCALLS_STATUS_UNINITALIZED 0
+#define PVCALLS_STATUS_BIND 1
+#define PVCALLS_STATUS_LISTEN 2
+ uint8_t status;
+ /*
+ * Internal state-machine flags.
+ * Only one accept operation can be inflight for a socket.
+ * Only one poll operation can be inflight for a given socket.
+ */
+#define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
+#define PVCALLS_FLAG_POLL_INFLIGHT 1
+#define PVCALLS_FLAG_POLL_RET 2
+ uint8_t flags;
+ uint32_t inflight_req_id;
+ struct sock_mapping *accept_map;
+ wait_queue_head_t inflight_accept_req;
+ } passive;
+ };
+};
+
+static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
+{
+ *req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
+ if (RING_FULL(&bedata->ring) ||
+ bedata->rsp[*req_id].req_id != PVCALLS_INVALID_ID)
+ return -EAGAIN;
+ return 0;
+}
+
+static bool pvcalls_front_write_todo(struct sock_mapping *map)
+{
+ struct pvcalls_data_intf *intf = map->active.ring;
+ RING_IDX cons, prod, size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+ int32_t error;
+
+ error = intf->out_error;
+ if (error == -ENOTCONN)
+ return false;
+ if (error != 0)
+ return true;
+
+ cons = intf->out_cons;
+ prod = intf->out_prod;
+ return !!(size - pvcalls_queued(prod, cons, size));
+}
+
+static bool pvcalls_front_read_todo(struct sock_mapping *map)
+{
+ struct pvcalls_data_intf *intf = map->active.ring;
+ RING_IDX cons, prod;
+ int32_t error;
+
+ cons = intf->in_cons;
+ prod = intf->in_prod;
+ error = intf->in_error;
+ return (error != 0 ||
+ pvcalls_queued(prod, cons,
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) != 0);
+}
+
+static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
+{
+ struct xenbus_device *dev = dev_id;
+ struct pvcalls_bedata *bedata;
+ struct xen_pvcalls_response *rsp;
+ uint8_t *src, *dst;
+ int req_id = 0, more = 0, done = 0;
+
+ if (dev == NULL)
+ return IRQ_HANDLED;
+
+ pvcalls_enter();
+ bedata = dev_get_drvdata(&dev->dev);
+ if (bedata == NULL) {
+ pvcalls_exit();
+ return IRQ_HANDLED;
+ }
+
+again:
+ while (RING_HAS_UNCONSUMED_RESPONSES(&bedata->ring)) {
+ rsp = RING_GET_RESPONSE(&bedata->ring, bedata->ring.rsp_cons);
+
+ req_id = rsp->req_id;
+ if (rsp->cmd == PVCALLS_POLL) {
+ struct sock_mapping *map = (struct sock_mapping *)(uintptr_t)
+ rsp->u.poll.id;
+
+ clear_bit(PVCALLS_FLAG_POLL_INFLIGHT,
+ (void *)&map->passive.flags);
+ /*
+ * clear INFLIGHT, then set RET. It pairs with
+ * the checks at the beginning of
+ * pvcalls_front_poll_passive.
+ */
+ smp_wmb();
+ set_bit(PVCALLS_FLAG_POLL_RET,
+ (void *)&map->passive.flags);
+ } else {
+ dst = (uint8_t *)&bedata->rsp[req_id] +
+ sizeof(rsp->req_id);
+ src = (uint8_t *)rsp + sizeof(rsp->req_id);
+ memcpy(dst, src, sizeof(*rsp) - sizeof(rsp->req_id));
+ /*
+ * First copy the rest of the data, then req_id. It is
+ * paired with the barrier when accessing bedata->rsp.
+ */
+ smp_wmb();
+ bedata->rsp[req_id].req_id = req_id;
+ }
+
+ done = 1;
+ bedata->ring.rsp_cons++;
+ }
+
+ RING_FINAL_CHECK_FOR_RESPONSES(&bedata->ring, more);
+ if (more)
+ goto again;
+ if (done)
+ wake_up(&bedata->inflight_req);
+ pvcalls_exit();
+ return IRQ_HANDLED;
+}
+
+static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
+ struct sock_mapping *map)
+{
+ int i;
+
+ unbind_from_irqhandler(map->active.irq, map);
+
+ spin_lock(&bedata->socket_lock);
+ if (!list_empty(&map->list))
+ list_del_init(&map->list);
+ spin_unlock(&bedata->socket_lock);
+
+ for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
+ gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0);
+ gnttab_end_foreign_access(map->active.ref, 0, 0);
+ free_page((unsigned long)map->active.ring);
+
+ kfree(map);
+}
+
+static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map)
+{
+ struct sock_mapping *map = sock_map;
+
+ if (map == NULL)
+ return IRQ_HANDLED;
+
+ wake_up_interruptible(&map->active.inflight_conn_req);
+
+ return IRQ_HANDLED;
+}
+
+int pvcalls_front_socket(struct socket *sock)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map = NULL;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret;
+
+ /*
+ * PVCalls only supports domain AF_INET,
+ * type SOCK_STREAM and protocol 0 sockets for now.
+ *
+ * Check socket type here, AF_INET and protocol checks are done
+ * by the caller.
+ */
+ if (sock->type != SOCK_STREAM)
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -EACCES;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (map == NULL) {
+ pvcalls_exit();
+ return -ENOMEM;
+ }
+
+ spin_lock(&bedata->socket_lock);
+
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ kfree(map);
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+
+ /*
+ * sock->sk->sk_send_head is not used for ip sockets: reuse the
+ * field to store a pointer to the struct sock_mapping
+ * corresponding to the socket. This way, we can easily get the
+ * struct sock_mapping from the struct socket.
+ */
+ sock->sk->sk_send_head = (void *)map;
+ list_add_tail(&map->list, &bedata->socket_mappings);
+
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_SOCKET;
+ req->u.socket.id = (uintptr_t) map;
+ req->u.socket.domain = AF_INET;
+ req->u.socket.type = SOCK_STREAM;
+ req->u.socket.protocol = IPPROTO_IP;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ /* read req_id, then the content */
+ smp_rmb();
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+
+ pvcalls_exit();
+ return ret;
+}
+
+static int create_active(struct sock_mapping *map, int *evtchn)
+{
+ void *bytes;
+ int ret = -ENOMEM, irq = -1, i;
+
+ *evtchn = -1;
+ init_waitqueue_head(&map->active.inflight_conn_req);
+
+ map->active.ring = (struct pvcalls_data_intf *)
+ __get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (map->active.ring == NULL)
+ goto out_error;
+ map->active.ring->ring_order = PVCALLS_RING_ORDER;
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PVCALLS_RING_ORDER);
+ if (bytes == NULL)
+ goto out_error;
+ for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
+ map->active.ring->ref[i] = gnttab_grant_foreign_access(
+ pvcalls_front_dev->otherend_id,
+ pfn_to_gfn(virt_to_pfn(bytes) + i), 0);
+
+ map->active.ref = gnttab_grant_foreign_access(
+ pvcalls_front_dev->otherend_id,
+ pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
+
+ map->active.data.in = bytes;
+ map->active.data.out = bytes +
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+ ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
+ if (ret)
+ goto out_error;
+ irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler,
+ 0, "pvcalls-frontend", map);
+ if (irq < 0) {
+ ret = irq;
+ goto out_error;
+ }
+
+ map->active.irq = irq;
+ map->active_socket = true;
+ mutex_init(&map->active.in_mutex);
+ mutex_init(&map->active.out_mutex);
+
+ return 0;
+
+out_error:
+ if (*evtchn >= 0)
+ xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
+ kfree(map->active.data.in);
+ kfree(map->active.ring);
+ return ret;
+}
+
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len, int flags)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map = NULL;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret, evtchn;
+
+ if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *)sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ ret = create_active(map, &evtchn);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_CONNECT;
+ req->u.connect.id = (uintptr_t)map;
+ req->u.connect.len = addr_len;
+ req->u.connect.flags = flags;
+ req->u.connect.ref = map->active.ref;
+ req->u.connect.evtchn = evtchn;
+ memcpy(req->u.connect.addr, addr, sizeof(*addr));
+
+ map->sock = sock;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ /* read req_id, then the content */
+ smp_rmb();
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+ pvcalls_exit();
+ return ret;
+}
+
+static int __write_ring(struct pvcalls_data_intf *intf,
+ struct pvcalls_data *data,
+ struct iov_iter *msg_iter,
+ int len)
+{
+ RING_IDX cons, prod, size, masked_prod, masked_cons;
+ RING_IDX array_size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+ int32_t error;
+
+ error = intf->out_error;
+ if (error < 0)
+ return error;
+ cons = intf->out_cons;
+ prod = intf->out_prod;
+ /* read indexes before continuing */
+ virt_mb();
+
+ size = pvcalls_queued(prod, cons, array_size);
+ if (size >= array_size)
+ return -EINVAL;
+ if (len > array_size - size)
+ len = array_size - size;
+
+ masked_prod = pvcalls_mask(prod, array_size);
+ masked_cons = pvcalls_mask(cons, array_size);
+
+ if (masked_prod < masked_cons) {
+ len = copy_from_iter(data->out + masked_prod, len, msg_iter);
+ } else {
+ if (len > array_size - masked_prod) {
+ int ret = copy_from_iter(data->out + masked_prod,
+ array_size - masked_prod, msg_iter);
+ if (ret != array_size - masked_prod) {
+ len = ret;
+ goto out;
+ }
+ len = ret + copy_from_iter(data->out, len - ret, msg_iter);
+ } else {
+ len = copy_from_iter(data->out + masked_prod, len, msg_iter);
+ }
+ }
+out:
+ /* write to ring before updating pointer */
+ virt_wmb();
+ intf->out_prod += len;
+
+ return len;
+}
+
+int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
+ size_t len)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ int sent, tot_sent = 0;
+ int count = 0, flags;
+
+ flags = msg->msg_flags;
+ if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ mutex_lock(&map->active.out_mutex);
+ if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
+ mutex_unlock(&map->active.out_mutex);
+ pvcalls_exit();
+ return -EAGAIN;
+ }
+ if (len > INT_MAX)
+ len = INT_MAX;
+
+again:
+ count++;
+ sent = __write_ring(map->active.ring,
+ &map->active.data, &msg->msg_iter,
+ len);
+ if (sent > 0) {
+ len -= sent;
+ tot_sent += sent;
+ notify_remote_via_irq(map->active.irq);
+ }
+ if (sent >= 0 && len > 0 && count < PVCALLS_FRONT_MAX_SPIN)
+ goto again;
+ if (sent < 0)
+ tot_sent = sent;
+
+ mutex_unlock(&map->active.out_mutex);
+ pvcalls_exit();
+ return tot_sent;
+}
+
+static int __read_ring(struct pvcalls_data_intf *intf,
+ struct pvcalls_data *data,
+ struct iov_iter *msg_iter,
+ size_t len, int flags)
+{
+ RING_IDX cons, prod, size, masked_prod, masked_cons;
+ RING_IDX array_size = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+ int32_t error;
+
+ cons = intf->in_cons;
+ prod = intf->in_prod;
+ error = intf->in_error;
+ /* get pointers before reading from the ring */
+ virt_rmb();
+ if (error < 0)
+ return error;
+
+ size = pvcalls_queued(prod, cons, array_size);
+ masked_prod = pvcalls_mask(prod, array_size);
+ masked_cons = pvcalls_mask(cons, array_size);
+
+ if (size == 0)
+ return 0;
+
+ if (len > size)
+ len = size;
+
+ if (masked_prod > masked_cons) {
+ len = copy_to_iter(data->in + masked_cons, len, msg_iter);
+ } else {
+ if (len > (array_size - masked_cons)) {
+ int ret = copy_to_iter(data->in + masked_cons,
+ array_size - masked_cons, msg_iter);
+ if (ret != array_size - masked_cons) {
+ len = ret;
+ goto out;
+ }
+ len = ret + copy_to_iter(data->in, len - ret, msg_iter);
+ } else {
+ len = copy_to_iter(data->in + masked_cons, len, msg_iter);
+ }
+ }
+out:
+ /* read data from the ring before increasing the index */
+ virt_mb();
+ if (!(flags & MSG_PEEK))
+ intf->in_cons += len;
+
+ return len;
+}
+
+int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
+{
+ struct pvcalls_bedata *bedata;
+ int ret;
+ struct sock_mapping *map;
+
+ if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ mutex_lock(&map->active.in_mutex);
+ if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
+ len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+ while (!(flags & MSG_DONTWAIT) && !pvcalls_front_read_todo(map)) {
+ wait_event_interruptible(map->active.inflight_conn_req,
+ pvcalls_front_read_todo(map));
+ }
+ ret = __read_ring(map->active.ring, &map->active.data,
+ &msg->msg_iter, len, flags);
+
+ if (ret > 0)
+ notify_remote_via_irq(map->active.irq);
+ if (ret == 0)
+ ret = (flags & MSG_DONTWAIT) ? -EAGAIN : 0;
+ if (ret == -ENOTCONN)
+ ret = 0;
+
+ mutex_unlock(&map->active.in_mutex);
+ pvcalls_exit();
+ return ret;
+}
+
+int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map = NULL;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret;
+
+ if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
+ return -EOPNOTSUPP;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (map == NULL) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ map->sock = sock;
+ req->cmd = PVCALLS_BIND;
+ req->u.bind.id = (uintptr_t)map;
+ memcpy(req->u.bind.addr, addr, sizeof(*addr));
+ req->u.bind.len = addr_len;
+
+ init_waitqueue_head(&map->passive.inflight_accept_req);
+
+ map->active_socket = false;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ /* read req_id, then the content */
+ smp_rmb();
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+
+ map->passive.status = PVCALLS_STATUS_BIND;
+ pvcalls_exit();
+ return 0;
+}
+
+int pvcalls_front_listen(struct socket *sock, int backlog)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ if (map->passive.status != PVCALLS_STATUS_BIND) {
+ pvcalls_exit();
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_LISTEN;
+ req->u.listen.id = (uintptr_t) map;
+ req->u.listen.backlog = backlog;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ /* read req_id, then the content */
+ smp_rmb();
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+
+ map->passive.status = PVCALLS_STATUS_LISTEN;
+ pvcalls_exit();
+ return ret;
+}
+
+int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ struct sock_mapping *map2 = NULL;
+ struct xen_pvcalls_request *req;
+ int notify, req_id, ret, evtchn, nonblock;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -ENOTCONN;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return -ENOTSOCK;
+ }
+
+ if (map->passive.status != PVCALLS_STATUS_LISTEN) {
+ pvcalls_exit();
+ return -EINVAL;
+ }
+
+ nonblock = flags & SOCK_NONBLOCK;
+ /*
+ * Backend only supports 1 inflight accept request, will return
+ * errors for the others
+ */
+ if (test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags)) {
+ req_id = READ_ONCE(map->passive.inflight_req_id);
+ if (req_id != PVCALLS_INVALID_ID &&
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id) {
+ map2 = map->passive.accept_map;
+ goto received;
+ }
+ if (nonblock) {
+ pvcalls_exit();
+ return -EAGAIN;
+ }
+ if (wait_event_interruptible(map->passive.inflight_accept_req,
+ !test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags))) {
+ pvcalls_exit();
+ return -EINTR;
+ }
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ if (map2 == NULL) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return -ENOMEM;
+ }
+ ret = create_active(map2, &evtchn);
+ if (ret < 0) {
+ kfree(map2);
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ list_add_tail(&map2->list, &bedata->socket_mappings);
+
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_ACCEPT;
+ req->u.accept.id = (uintptr_t) map;
+ req->u.accept.ref = map2->active.ref;
+ req->u.accept.id_new = (uintptr_t) map2;
+ req->u.accept.evtchn = evtchn;
+ map->passive.accept_map = map2;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+ /* We could check if we have received a response before returning. */
+ if (nonblock) {
+ WRITE_ONCE(map->passive.inflight_req_id, req_id);
+ pvcalls_exit();
+ return -EAGAIN;
+ }
+
+ if (wait_event_interruptible(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
+ pvcalls_exit();
+ return -EINTR;
+ }
+ /* read req_id, then the content */
+ smp_rmb();
+
+received:
+ map2->sock = newsock;
+ newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
+ if (!newsock->sk) {
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+ map->passive.inflight_req_id = PVCALLS_INVALID_ID;
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ pvcalls_front_free_map(bedata, map2);
+ pvcalls_exit();
+ return -ENOMEM;
+ }
+ newsock->sk->sk_send_head = (void *)map2;
+
+ ret = bedata->rsp[req_id].ret;
+ bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
+ map->passive.inflight_req_id = PVCALLS_INVALID_ID;
+
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
+ wake_up(&map->passive.inflight_accept_req);
+
+ pvcalls_exit();
+ return ret;
+}
+
+static unsigned int pvcalls_front_poll_passive(struct file *file,
+ struct pvcalls_bedata *bedata,
+ struct sock_mapping *map,
+ poll_table *wait)
+{
+ int notify, req_id, ret;
+ struct xen_pvcalls_request *req;
+
+ if (test_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags)) {
+ uint32_t req_id = READ_ONCE(map->passive.inflight_req_id);
+
+ if (req_id != PVCALLS_INVALID_ID &&
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id)
+ return POLLIN | POLLRDNORM;
+
+ poll_wait(file, &map->passive.inflight_accept_req, wait);
+ return 0;
+ }
+
+ if (test_and_clear_bit(PVCALLS_FLAG_POLL_RET,
+ (void *)&map->passive.flags))
+ return POLLIN | POLLRDNORM;
+
+ /*
+ * First check RET, then INFLIGHT. No barriers necessary to
+ * ensure execution ordering because of the conditional
+ * instructions creating control dependencies.
+ */
+
+ if (test_and_set_bit(PVCALLS_FLAG_POLL_INFLIGHT,
+ (void *)&map->passive.flags)) {
+ poll_wait(file, &bedata->inflight_req, wait);
+ return 0;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ return ret;
+ }
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_POLL;
+ req->u.poll.id = (uintptr_t) map;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ poll_wait(file, &bedata->inflight_req, wait);
+ return 0;
+}
+
+static unsigned int pvcalls_front_poll_active(struct file *file,
+ struct pvcalls_bedata *bedata,
+ struct sock_mapping *map,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+ int32_t in_error, out_error;
+ struct pvcalls_data_intf *intf = map->active.ring;
+
+ out_error = intf->out_error;
+ in_error = intf->in_error;
+
+ poll_wait(file, &map->active.inflight_conn_req, wait);
+ if (pvcalls_front_write_todo(map))
+ mask |= POLLOUT | POLLWRNORM;
+ if (pvcalls_front_read_todo(map))
+ mask |= POLLIN | POLLRDNORM;
+ if (in_error != 0 || out_error != 0)
+ mask |= POLLERR;
+
+ return mask;
+}
+
+unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ int ret;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return POLLNVAL;
+ }
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (!map) {
+ pvcalls_exit();
+ return POLLNVAL;
+ }
+ if (map->active_socket)
+ ret = pvcalls_front_poll_active(file, bedata, map, wait);
+ else
+ ret = pvcalls_front_poll_passive(file, bedata, map, wait);
+ pvcalls_exit();
+ return ret;
+}
+
+int pvcalls_front_release(struct socket *sock)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map;
+ int req_id, notify, ret;
+ struct xen_pvcalls_request *req;
+
+ if (sock->sk == NULL)
+ return 0;
+
+ pvcalls_enter();
+ if (!pvcalls_front_dev) {
+ pvcalls_exit();
+ return -EIO;
+ }
+
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+ map = (struct sock_mapping *) sock->sk->sk_send_head;
+ if (map == NULL) {
+ pvcalls_exit();
+ return 0;
+ }
+
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
+ spin_unlock(&bedata->socket_lock);
+ pvcalls_exit();
+ return ret;
+ }
+ sock->sk->sk_send_head = NULL;
+
+ req = RING_GET_REQUEST(&bedata->ring, req_id);
+ req->req_id = req_id;
+ req->cmd = PVCALLS_RELEASE;
+ req->u.release.id = (uintptr_t)map;
+
+ bedata->ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+ spin_unlock(&bedata->socket_lock);
+ if (notify)
+ notify_remote_via_irq(bedata->irq);
+
+ wait_event(bedata->inflight_req,
+ READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+ if (map->active_socket) {
+ /*
+ * Set in_error and wake up inflight_conn_req to force
+ * recvmsg waiters to exit.
+ */
+ map->active.ring->in_error = -EBADF;
+ wake_up_interruptible(&map->active.inflight_conn_req);
+
+ /*
+ * We need to make sure that sendmsg/recvmsg on this socket have
+ * not started before we've cleared sk_send_head here. The
+ * easiest (though not optimal) way to guarantee this is to see
+ * that no pvcall (other than us) is in progress.
+ */
+ while (atomic_read(&pvcalls_refcount) > 1)
+ cpu_relax();
+
+ pvcalls_front_free_map(bedata, map);
+ } else {
+ spin_lock(&bedata->socket_lock);
+ list_del(&map->list);
+ spin_unlock(&bedata->socket_lock);
+ if (READ_ONCE(map->passive.inflight_req_id) !=
+ PVCALLS_INVALID_ID) {
+ pvcalls_front_free_map(bedata,
+ map->passive.accept_map);
+ }
+ kfree(map);
+ }
+ WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
+
+ pvcalls_exit();
+ return 0;
+}
+
+static const struct xenbus_device_id pvcalls_front_ids[] = {
+ { "pvcalls" },
+ { "" }
+};
+
+static int pvcalls_front_remove(struct xenbus_device *dev)
+{
+ struct pvcalls_bedata *bedata;
+ struct sock_mapping *map = NULL, *n;
+
+ bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+ dev_set_drvdata(&dev->dev, NULL);
+ pvcalls_front_dev = NULL;
+ if (bedata->irq >= 0)
+ unbind_from_irqhandler(bedata->irq, dev);
+
+ list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
+ map->sock->sk->sk_send_head = NULL;
+ if (map->active_socket) {
+ map->active.ring->in_error = -EBADF;
+ wake_up_interruptible(&map->active.inflight_conn_req);
+ }
+ }
+
+ smp_mb();
+ while (atomic_read(&pvcalls_refcount) > 0)
+ cpu_relax();
+ list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
+ if (map->active_socket) {
+ /* No need to lock, refcount is 0 */
+ pvcalls_front_free_map(bedata, map);
+ } else {
+ list_del(&map->list);
+ kfree(map);
+ }
+ }
+ if (bedata->ref >= 0)
+ gnttab_end_foreign_access(bedata->ref, 0, 0);
+ kfree(bedata->ring.sring);
+ kfree(bedata);
+ xenbus_switch_state(dev, XenbusStateClosed);
+ return 0;
+}
+
+static int pvcalls_front_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int ret = -ENOMEM, evtchn, i;
+ unsigned int max_page_order, function_calls, len;
+ char *versions;
+ grant_ref_t gref_head = 0;
+ struct xenbus_transaction xbt;
+ struct pvcalls_bedata *bedata = NULL;
+ struct xen_pvcalls_sring *sring;
+
+ if (pvcalls_front_dev != NULL) {
+ dev_err(&dev->dev, "only one PV Calls connection supported\n");
+ return -EINVAL;
+ }
+
+ versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
+ if (!len)
+ return -EINVAL;
+ if (strcmp(versions, "1")) {
+ kfree(versions);
+ return -EINVAL;
+ }
+ kfree(versions);
+ max_page_order = xenbus_read_unsigned(dev->otherend,
+ "max-page-order", 0);
+ if (max_page_order < PVCALLS_RING_ORDER)
+ return -ENODEV;
+ function_calls = xenbus_read_unsigned(dev->otherend,
+ "function-calls", 0);
+ /* See XENBUS_FUNCTIONS_CALLS in pvcalls.h */
+ if (function_calls != 1)
+ return -ENODEV;
+ pr_info("%s max-page-order is %u\n", __func__, max_page_order);
+
+ bedata = kzalloc(sizeof(struct pvcalls_bedata), GFP_KERNEL);
+ if (!bedata)
+ return -ENOMEM;
+
+ dev_set_drvdata(&dev->dev, bedata);
+ pvcalls_front_dev = dev;
+ init_waitqueue_head(&bedata->inflight_req);
+ INIT_LIST_HEAD(&bedata->socket_mappings);
+ spin_lock_init(&bedata->socket_lock);
+ bedata->irq = -1;
+ bedata->ref = -1;
+
+ for (i = 0; i < PVCALLS_NR_RSP_PER_RING; i++)
+ bedata->rsp[i].req_id = PVCALLS_INVALID_ID;
+
+ sring = (struct xen_pvcalls_sring *) __get_free_page(GFP_KERNEL |
+ __GFP_ZERO);
+ if (!sring)
+ goto error;
+ SHARED_RING_INIT(sring);
+ FRONT_RING_INIT(&bedata->ring, sring, XEN_PAGE_SIZE);
+
+ ret = xenbus_alloc_evtchn(dev, &evtchn);
+ if (ret)
+ goto error;
+
+ bedata->irq = bind_evtchn_to_irqhandler(evtchn,
+ pvcalls_front_event_handler,
+ 0, "pvcalls-frontend", dev);
+ if (bedata->irq < 0) {
+ ret = bedata->irq;
+ goto error;
+ }
+
+ ret = gnttab_alloc_grant_references(1, &gref_head);
+ if (ret < 0)
+ goto error;
+ ret = gnttab_claim_grant_reference(&gref_head);
+ if (ret < 0)
+ goto error;
+ bedata->ref = ret;
+ gnttab_grant_foreign_access_ref(bedata->ref, dev->otherend_id,
+ virt_to_gfn((void *)sring), 0);
+
+ again:
+ ret = xenbus_transaction_start(&xbt);
+ if (ret) {
+ xenbus_dev_fatal(dev, ret, "starting transaction");
+ goto error;
+ }
+ ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1);
+ if (ret)
+ goto error_xenbus;
+ ret = xenbus_printf(xbt, dev->nodename, "ring-ref", "%d", bedata->ref);
+ if (ret)
+ goto error_xenbus;
+ ret = xenbus_printf(xbt, dev->nodename, "port", "%u",
+ evtchn);
+ if (ret)
+ goto error_xenbus;
+ ret = xenbus_transaction_end(xbt, 0);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, ret, "completing transaction");
+ goto error;
+ }
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+ error_xenbus:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, ret, "writing xenstore");
+ error:
+ pvcalls_front_remove(dev);
+ return ret;
+}
+
+static void pvcalls_front_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ switch (backend_state) {
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+ case XenbusStateInitialising:
+ case XenbusStateInitialised:
+ case XenbusStateUnknown:
+ break;
+
+ case XenbusStateInitWait:
+ break;
+
+ case XenbusStateConnected:
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state */
+ /* fall through */
+ case XenbusStateClosing:
+ xenbus_frontend_closed(dev);
+ break;
+ }
+}
+
+static struct xenbus_driver pvcalls_front_driver = {
+ .ids = pvcalls_front_ids,
+ .probe = pvcalls_front_probe,
+ .remove = pvcalls_front_remove,
+ .otherend_changed = pvcalls_front_changed,
+};
+
+static int __init pvcalls_frontend_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ pr_info("Initialising Xen pvcalls frontend driver\n");
+
+ return xenbus_register_frontend(&pvcalls_front_driver);
+}
+
+module_init(pvcalls_frontend_init);
+
+MODULE_DESCRIPTION("Xen PV Calls frontend driver");
+MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
new file mode 100644
index 000000000000..3332978f4fcd
--- /dev/null
+++ b/drivers/xen/pvcalls-front.h
@@ -0,0 +1,28 @@
+#ifndef __PVCALLS_FRONT_H__
+#define __PVCALLS_FRONT_H__
+
+#include <linux/net.h>
+
+int pvcalls_front_socket(struct socket *sock);
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len, int flags);
+int pvcalls_front_bind(struct socket *sock,
+ struct sockaddr *addr,
+ int addr_len);
+int pvcalls_front_listen(struct socket *sock, int backlog);
+int pvcalls_front_accept(struct socket *sock,
+ struct socket *newsock,
+ int flags);
+int pvcalls_front_sendmsg(struct socket *sock,
+ struct msghdr *msg,
+ size_t len);
+int pvcalls_front_recvmsg(struct socket *sock,
+ struct msghdr *msg,
+ size_t len,
+ int flags);
+unsigned int pvcalls_front_poll(struct file *file,
+ struct socket *sock,
+ poll_table *wait);
+int pvcalls_front_release(struct socket *sock);
+
+#endif
diff --git a/drivers/xen/time.c b/drivers/xen/time.c
index ac5f23fcafc2..3e741cd1409c 100644
--- a/drivers/xen/time.c
+++ b/drivers/xen/time.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xen stolen ticks accounting.
*/
@@ -5,6 +6,7 @@
#include <linux/kernel_stat.h>
#include <linux/math64.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
#include <asm/paravirt.h>
#include <asm/xen/hypervisor.h>
@@ -19,6 +21,8 @@
/* runstate info updated by Xen */
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+static DEFINE_PER_CPU(u64[4], old_runstate_time);
+
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
{
@@ -47,8 +51,8 @@ static u64 get64(const u64 *p)
return ret;
}
-static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
- unsigned int cpu)
+static void xen_get_runstate_snapshot_cpu_delta(
+ struct vcpu_runstate_info *res, unsigned int cpu)
{
u64 state_time;
struct vcpu_runstate_info *state;
@@ -66,6 +70,71 @@ static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
(state_time & XEN_RUNSTATE_UPDATE));
}
+static void xen_get_runstate_snapshot_cpu(struct vcpu_runstate_info *res,
+ unsigned int cpu)
+{
+ int i;
+
+ xen_get_runstate_snapshot_cpu_delta(res, cpu);
+
+ for (i = 0; i < 4; i++)
+ res->time[i] += per_cpu(old_runstate_time, cpu)[i];
+}
+
+void xen_manage_runstate_time(int action)
+{
+ static struct vcpu_runstate_info *runstate_delta;
+ struct vcpu_runstate_info state;
+ int cpu, i;
+
+ switch (action) {
+ case -1: /* backup runstate time before suspend */
+ if (unlikely(runstate_delta))
+ pr_warn_once("%s: memory leak as runstate_delta is not NULL\n",
+ __func__);
+
+ runstate_delta = kmalloc_array(num_possible_cpus(),
+ sizeof(*runstate_delta),
+ GFP_ATOMIC);
+ if (unlikely(!runstate_delta)) {
+ pr_warn("%s: failed to allocate runstate_delta\n",
+ __func__);
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ xen_get_runstate_snapshot_cpu_delta(&state, cpu);
+ memcpy(runstate_delta[cpu].time, state.time,
+ sizeof(runstate_delta[cpu].time));
+ }
+
+ break;
+
+ case 0: /* backup runstate time after resume */
+ if (unlikely(!runstate_delta)) {
+ pr_warn("%s: cannot accumulate runstate time as runstate_delta is NULL\n",
+ __func__);
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < 4; i++)
+ per_cpu(old_runstate_time, cpu)[i] +=
+ runstate_delta[cpu].time[i];
+ }
+
+ break;
+
+ default: /* do not accumulate runstate time for checkpointing */
+ break;
+ }
+
+ if (action != -1 && runstate_delta) {
+ kfree(runstate_delta);
+ runstate_delta = NULL;
+ }
+}
+
/*
* Runstate accounting
*/
diff --git a/drivers/xen/xen-pciback/Makefile b/drivers/xen/xen-pciback/Makefile
index ffe0ad3438bd..e8d981d43235 100644
--- a/drivers/xen/xen-pciback/Makefile
+++ b/drivers/xen/xen-pciback/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback.o
xen-pciback-y := pci_stub.o pciback_ops.o xenbus.o
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 9e9286d0872e..60111719b01f 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI Backend - Functions for creating a virtual configuration space for
* exported PCI Devices.
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index 62461a8ba1d6..22db630717ea 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* PCI Backend - Common data structures for overriding the configuration space
*
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 7f83e9083e9d..73427d8e0116 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI Backend - Handles the virtual fields found on the capability lists
* in the configuration space.
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index 5b3d57fc82d3..10ae24b5a76e 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI Backend - Handles the virtual fields in the configuration space headers.
*
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
index 7476791cab40..89d9744ece61 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.c
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI Backend - Handle special overlays for broken devices.
*
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.h b/drivers/xen/xen-pciback/conf_space_quirks.h
index cfcc517e4570..d873abe35bf6 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.h
+++ b/drivers/xen/xen-pciback/conf_space_quirks.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* PCI Backend - Data structures for special overlays for broken devices.
*
diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c
index f16a30e2a110..66e9b814cc86 100644
--- a/drivers/xen/xen-pciback/passthrough.c
+++ b/drivers/xen/xen-pciback/passthrough.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI Backend - Provides restricted access to the real PCI bus topology
* to the frontend
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 7af369b6aaa2..263c059bff90 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* PCI Backend Common Data Structures & Function Declarations
*
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index f8c77751f330..ee2c891b55c6 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI Backend Operations - respond to PCI requests from Frontend
*
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index c99f8bb1c56c..f6ba18191c0f 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI Backend - Provides a Virtual PCI bus (with real devices)
* to the frontend
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 3814b44bf1f7..581c4e1a8b82 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* PCI Backend Xenbus Setup - handles setup with frontend and xend
*
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index a67e955cacd1..55988b8418ee 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/******************************************************************************
* Xen selfballoon driver (and optional frontswap self-shrinking driver)
*
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 31e2e9050c7a..0c7532110815 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-y += xenbus.o
obj-y += xenbus_dev_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index 1126701e212e..edba5fecde4d 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 19e45ce21f89..07896f4b2736 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -379,10 +379,12 @@ static void xenbus_reset_frontend(char *fe, char *be, int be_state)
case XenbusStateConnected:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosing);
xenbus_reset_wait_for_backend(be, XenbusStateClosing);
+ /* fall through */
case XenbusStateClosing:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateClosed);
xenbus_reset_wait_for_backend(be, XenbusStateClosed);
+ /* fall through */
case XenbusStateClosed:
xenbus_printf(XBT_NIL, fe, "state", "%d", XenbusStateInitialising);
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
index 2c5934ea9b1e..cfe4874b83a7 100644
--- a/drivers/xen/xenfs/xenfs.h
+++ b/drivers/xen/xenfs/xenfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _XENFS_XENBUS_H
#define _XENFS_XENBUS_H
diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
index 82fd2a396d96..f59235f9f8a2 100644
--- a/drivers/xen/xenfs/xenstored.c
+++ b/drivers/xen/xenfs/xenstored.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mm.h>
diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c
index c6e2b4a542ea..c6c73a33c44d 100644
--- a/drivers/xen/xenfs/xensyms.c
+++ b/drivers/xen/xenfs/xensyms.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
diff --git a/drivers/zorro/Makefile b/drivers/zorro/Makefile
index 7dc5332ff984..b360ac4ea846 100644
--- a/drivers/zorro/Makefile
+++ b/drivers/zorro/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Zorro bus specific drivers.
#
diff --git a/drivers/zorro/gen-devlist.c b/drivers/zorro/gen-devlist.c
index 16fe206f9998..e325c5ce995b 100644
--- a/drivers/zorro/gen-devlist.c
+++ b/drivers/zorro/gen-devlist.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generate devlist.h from the Zorro ID file.
*
diff --git a/drivers/zorro/names.c b/drivers/zorro/names.c
index 83eedddbb794..fa3c83dbe843 100644
--- a/drivers/zorro/names.c
+++ b/drivers/zorro/names.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Zorro Device Name Tables
*
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 05397305fccd..df05a26ab8d8 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Procfs interface for the Zorro bus.
*
diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h
index 4f805c01cfbc..ac0bab3412d9 100644
--- a/drivers/zorro/zorro.h
+++ b/drivers/zorro/zorro.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_ZORRO_NAMES
extern void zorro_name_device(struct zorro_dev *z);